summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore7
-rw-r--r--.gitmodules4
-rwxr-xr-x.travis.compiler.sh18
-rw-r--r--.travis.yml61
-rwxr-xr-xBUILD/SETUP.sh2
-rw-r--r--CMakeLists.txt17
-rw-r--r--CONTRIBUTING.md47
-rw-r--r--Docs/INSTALL-BINARY24
-rw-r--r--Docs/glibc-2.2.5.patch137
-rw-r--r--Docs/linuxthreads.txt19
-rw-r--r--Docs/sp-imp-spec.txt1100
-rw-r--r--KNOWN_BUGS.txt38
-rw-r--r--README.md29
-rw-r--r--VERSION6
-rw-r--r--client/client_priv.h3
-rw-r--r--client/mysql.cc49
-rw-r--r--client/mysql_upgrade.c18
-rw-r--r--client/mysqladmin.cc39
-rw-r--r--client/mysqlbinlog.cc32
-rw-r--r--client/mysqlcheck.c13
-rw-r--r--client/mysqldump.c30
-rw-r--r--client/mysqlimport.c23
-rw-r--r--client/mysqlshow.c23
-rw-r--r--client/mysqlslap.c21
-rw-r--r--client/mysqltest.cc54
-rw-r--r--cmake/aws_sdk.cmake95
-rw-r--r--cmake/build_configurations/mysql_release.cmake5
-rw-r--r--cmake/cpack_rpm.cmake2
-rw-r--r--cmake/make_dist.cmake.in8
-rw-r--r--cmake/os/Windows.cmake19
-rw-r--r--cmake/os/WindowsCache.cmake1
-rw-r--r--cmake/readline.cmake2
-rw-r--r--cmake/submodules.cmake8
-rw-r--r--cmake/wsrep.cmake37
-rw-r--r--config.h.cmake2
-rw-r--r--configure.cmake4
-rwxr-xr-xdebian/additions/debian-start.inc.sh2
-rwxr-xr-xdebian/autobake-deb.sh12
-rw-r--r--debian/changelog6
-rw-r--r--debian/control248
-rw-r--r--debian/libmariadbd-dev.install4
-rw-r--r--debian/mariadb-client-10.4.README.Debian (renamed from debian/mariadb-client-10.3.README.Debian)0
-rw-r--r--debian/mariadb-client-10.4.docs (renamed from debian/mariadb-client-10.3.docs)0
-rw-r--r--debian/mariadb-client-10.4.install (renamed from debian/mariadb-client-10.3.install)0
-rw-r--r--debian/mariadb-client-10.4.links (renamed from debian/mariadb-client-10.3.links)0
-rw-r--r--debian/mariadb-client-10.4.manpages (renamed from debian/mariadb-client-10.3.manpages)0
-rw-r--r--debian/mariadb-client-10.4.menu (renamed from debian/mariadb-client-10.3.menu)2
-rw-r--r--debian/mariadb-client-core-10.4.install (renamed from debian/mariadb-client-core-10.3.install)0
-rw-r--r--debian/mariadb-plugin-rocksdb.install2
-rw-r--r--debian/mariadb-plugin-tokudb.install2
-rw-r--r--debian/mariadb-server-10.3.config45
-rw-r--r--debian/mariadb-server-10.4.README.Debian (renamed from debian/mariadb-server-10.3.README.Debian)27
-rw-r--r--debian/mariadb-server-10.4.config14
-rw-r--r--debian/mariadb-server-10.4.dirs (renamed from debian/mariadb-server-10.3.dirs)0
-rw-r--r--debian/mariadb-server-10.4.install (renamed from debian/mariadb-server-10.3.install)7
-rw-r--r--debian/mariadb-server-10.4.logcheck.ignore.paranoid (renamed from debian/mariadb-server-10.3.logcheck.ignore.paranoid)0
-rw-r--r--debian/mariadb-server-10.4.logcheck.ignore.server (renamed from debian/mariadb-server-10.3.logcheck.ignore.server)0
-rw-r--r--debian/mariadb-server-10.4.logcheck.ignore.workstation (renamed from debian/mariadb-server-10.3.logcheck.ignore.workstation)0
-rw-r--r--debian/mariadb-server-10.4.mysql-server.logrotate (renamed from debian/mariadb-server-10.3.mysql-server.logrotate)0
-rw-r--r--debian/mariadb-server-10.4.mysql.default (renamed from debian/mariadb-server-10.3.mysql.default)0
-rw-r--r--debian/mariadb-server-10.4.mysql.init (renamed from debian/mariadb-server-10.3.mysql.init)2
-rw-r--r--debian/mariadb-server-10.4.postinst (renamed from debian/mariadb-server-10.3.postinst)103
-rw-r--r--debian/mariadb-server-10.4.postrm (renamed from debian/mariadb-server-10.3.postrm)4
-rw-r--r--debian/mariadb-server-10.4.preinst (renamed from debian/mariadb-server-10.3.preinst)5
-rw-r--r--debian/mariadb-server-10.4.prerm (renamed from debian/mariadb-server-10.3.prerm)0
-rw-r--r--debian/mariadb-server-10.4.py (renamed from debian/mariadb-server-10.3.py)4
-rw-r--r--debian/mariadb-server-10.4.templates (renamed from debian/mariadb-server-10.3.templates)8
-rw-r--r--debian/mariadb-server-10.4.triggers (renamed from debian/mariadb-server-10.3.triggers)0
-rw-r--r--debian/mariadb-server-core-10.4.install (renamed from debian/mariadb-server-core-10.3.install)0
-rw-r--r--debian/mariadb-test-data.install2
-rw-r--r--debian/not-installed18
-rw-r--r--debian/po/POTFILES.in2
-rw-r--r--debian/po/ar.po46
-rw-r--r--debian/po/ca.po94
-rw-r--r--debian/po/cs.po50
-rw-r--r--debian/po/da.po118
-rw-r--r--debian/po/de.po44
-rw-r--r--debian/po/es.po50
-rw-r--r--debian/po/eu.po48
-rw-r--r--debian/po/fr.po48
-rw-r--r--debian/po/gl.po46
-rw-r--r--debian/po/it.po48
-rw-r--r--debian/po/ja.po48
-rw-r--r--debian/po/nb.po44
-rw-r--r--debian/po/nl.po46
-rw-r--r--debian/po/pt.po46
-rw-r--r--debian/po/pt_BR.po50
-rw-r--r--debian/po/ro.po44
-rw-r--r--debian/po/ru.po48
-rw-r--r--debian/po/sv.po48
-rw-r--r--debian/po/templates.pot46
-rw-r--r--debian/po/tr.po48
-rwxr-xr-xdebian/rules8
-rw-r--r--extra/CMakeLists.txt3
-rw-r--r--extra/aws_sdk/CMakeLists.txt74
-rw-r--r--extra/innochecksum.cc275
-rw-r--r--extra/mariabackup/CMakeLists.txt3
-rw-r--r--extra/mariabackup/backup_mysql.cc120
-rw-r--r--extra/mariabackup/ds_local.cc8
-rw-r--r--extra/mariabackup/fil_cur.cc38
-rw-r--r--extra/mariabackup/fil_cur.h7
-rw-r--r--extra/mariabackup/read_filt.cc2
-rw-r--r--extra/mariabackup/read_filt.h2
-rw-r--r--extra/mariabackup/write_filt.cc12
-rw-r--r--extra/mariabackup/xtrabackup.cc173
-rw-r--r--extra/mariabackup/xtrabackup.h9
-rw-r--r--extra/my_print_defaults.c29
-rw-r--r--extra/perror.c6
-rw-r--r--extra/resolve_stack_dump.c2
-rw-r--r--include/aria_backup.h37
-rw-r--r--include/json_lib.h17
-rw-r--r--include/lf.h2
-rw-r--r--include/m_ctype.h9
-rw-r--r--include/m_string.h2
-rw-r--r--include/my_base.h3
-rw-r--r--include/my_compare.h1
-rw-r--r--include/my_counter.h49
-rw-r--r--include/my_global.h7
-rw-r--r--include/my_pthread.h14
-rw-r--r--include/my_sys.h16
-rw-r--r--include/my_time.h78
-rw-r--r--include/mysql/plugin.h2
-rw-r--r--include/mysql/plugin_audit.h.pp45
-rw-r--r--include/mysql/plugin_auth.h46
-rw-r--r--include/mysql/plugin_auth.h.pp49
-rw-r--r--include/mysql/plugin_encryption.h.pp45
-rw-r--r--include/mysql/plugin_ftparser.h.pp45
-rw-r--r--include/mysql/plugin_password_validation.h4
-rw-r--r--include/mysql/plugin_password_validation.h.pp49
-rw-r--r--include/mysql/service_json.h117
-rw-r--r--include/mysql/service_my_print_error.h9
-rw-r--r--include/mysql/service_wsrep.h321
-rw-r--r--include/mysql/services.h1
-rw-r--r--include/mysql_com.h11
-rw-r--r--include/mysql_embed.h1
-rw-r--r--include/service_versions.h1
-rw-r--r--include/thr_lock.h6
-rw-r--r--include/thread_pool_priv.h3
-rw-r--r--include/violite.h23
-rw-r--r--include/wsrep.h40
m---------libmariadb0
-rw-r--r--libmysqld/CMakeLists.txt9
-rw-r--r--libmysqld/lib_sql.cc241
-rw-r--r--libmysqld/libmysql.c7
-rw-r--r--libservices/CMakeLists.txt1
-rw-r--r--libservices/json_service.c19
-rw-r--r--man/comp_err.12
-rw-r--r--man/galera_new_cluster.12
-rw-r--r--man/galera_recovery.12
-rw-r--r--man/innochecksum.12
-rw-r--r--man/mariabackup.12
-rw-r--r--man/mariadb-service-convert.12
-rw-r--r--man/mbstream.12
-rw-r--r--man/msql2mysql.12
-rw-r--r--man/my_print_defaults.19
-rw-r--r--man/my_safe_process.12
-rw-r--r--man/myisam_ftdump.12
-rw-r--r--man/myisamchk.12
-rw-r--r--man/myisamlog.12
-rw-r--r--man/myisampack.12
-rw-r--r--man/mysql-stress-test.pl.12
-rw-r--r--man/mysql-test-run.pl.12
-rw-r--r--man/mysql.12
-rw-r--r--man/mysql.server.12
-rw-r--r--man/mysql_client_test.12
-rw-r--r--man/mysql_config.12
-rw-r--r--man/mysql_convert_table_format.12
-rw-r--r--man/mysql_find_rows.12
-rw-r--r--man/mysql_fix_extensions.12
-rw-r--r--man/mysql_install_db.12
-rw-r--r--man/mysql_ldb.12
-rw-r--r--man/mysql_plugin.12
-rw-r--r--man/mysql_secure_installation.12
-rw-r--r--man/mysql_setpermission.12
-rw-r--r--man/mysql_tzinfo_to_sql.12
-rw-r--r--man/mysql_upgrade.12
-rw-r--r--man/mysql_waitpid.12
-rw-r--r--man/mysqlaccess.12
-rw-r--r--man/mysqladmin.12
-rw-r--r--man/mysqlbinlog.12
-rw-r--r--man/mysqlcheck.12
-rw-r--r--man/mysqld.82
-rw-r--r--man/mysqld_multi.12
-rw-r--r--man/mysqld_safe.12
-rw-r--r--man/mysqld_safe_helper.12
-rw-r--r--man/mysqldump.12
-rw-r--r--man/mysqldumpslow.12
-rw-r--r--man/mysqlhotcopy.12
-rw-r--r--man/mysqlimport.12
-rw-r--r--man/mysqlshow.12
-rw-r--r--man/mysqlslap.12
-rw-r--r--man/mysqltest.12
-rw-r--r--man/perror.12
-rw-r--r--man/replace.12
-rw-r--r--man/resolve_stack_dump.12
-rw-r--r--man/resolveip.12
-rw-r--r--man/tokuft_logprint.12
-rw-r--r--man/tokuftdump.12
-rw-r--r--man/wsrep_sst_common.12
-rw-r--r--man/wsrep_sst_mariabackup.12
-rw-r--r--man/wsrep_sst_mysqldump.12
-rw-r--r--man/wsrep_sst_rsync.12
-rw-r--r--man/wsrep_sst_rsync_wan.12
-rw-r--r--mysql-test/include/add_anonymous_users.inc2
-rw-r--r--mysql-test/include/analyze-format.inc2
-rw-r--r--mysql-test/include/check-testcase.test63
-rw-r--r--mysql-test/include/check_ftwrl_incompatible.inc6
-rw-r--r--mysql-test/include/deadlock.inc2
-rw-r--r--mysql-test/include/default_mysqld.cnf1
-rw-r--r--mysql-test/include/diff_tables.inc2
-rw-r--r--mysql-test/include/explain_non_select.inc4
-rw-r--r--mysql-test/include/galera_cluster.inc6
-rw-r--r--mysql-test/include/galera_have_debug_sync.inc (renamed from mysql-test/suite/galera/include/galera_have_debug_sync.inc)0
-rw-r--r--mysql-test/include/galera_wait_sync_point.inc11
-rw-r--r--mysql-test/include/gis_keys.inc4
-rw-r--r--mysql-test/include/have_auth_named_pipe.inc13
-rw-r--r--mysql-test/include/have_unix_socket.inc5
-rw-r--r--mysql-test/include/have_unix_socket.opt1
-rw-r--r--mysql-test/include/have_wsrep_enabled.inc1
-rw-r--r--mysql-test/include/icp_tests.inc5
-rw-r--r--mysql-test/include/index_merge1.inc16
-rw-r--r--mysql-test/include/innodb_checksum_algorithm.combinations11
-rw-r--r--mysql-test/include/innodb_checksum_algorithm.inc1
-rw-r--r--mysql-test/include/install_plugin_if_exists.inc41
-rw-r--r--mysql-test/include/kill_galera.inc20
-rw-r--r--mysql-test/include/maria_empty_logs.inc12
-rw-r--r--mysql-test/include/maria_verify_recovery.inc12
-rw-r--r--mysql-test/include/mtr_check.sql3
-rw-r--r--mysql-test/include/start_mysqld.inc25
-rw-r--r--mysql-test/include/switch_to_mysql_global_priv.inc6
-rw-r--r--mysql-test/include/switch_to_mysql_user.inc56
-rw-r--r--mysql-test/include/system_db_struct.inc2
-rw-r--r--mysql-test/include/type_hrtime.inc6
-rw-r--r--mysql-test/include/wait_until_connected_again.inc19
-rw-r--r--mysql-test/include/wsrep_wait_disconnect.inc20
-rw-r--r--mysql-test/lib/My/ConfigFactory.pm21
-rw-r--r--mysql-test/lib/mtr_cases.pm1
-rw-r--r--mysql-test/main/1st.result2
-rw-r--r--mysql-test/main/alter_table.result1
-rw-r--r--mysql-test/main/alter_user.result36
-rw-r--r--mysql-test/main/alter_user.test10
-rw-r--r--mysql-test/main/analyze.result7
-rw-r--r--mysql-test/main/analyze_format_json.result14
-rw-r--r--mysql-test/main/analyze_stmt_orderby.result12
-rw-r--r--mysql-test/main/auth_rpl.result2
-rw-r--r--mysql-test/main/backup_aria.result158
-rw-r--r--mysql-test/main/backup_aria.test157
-rw-r--r--mysql-test/main/backup_interaction.result520
-rw-r--r--mysql-test/main/backup_interaction.test503
-rw-r--r--mysql-test/main/backup_lock.result219
-rw-r--r--mysql-test/main/backup_lock.test284
-rw-r--r--mysql-test/main/backup_lock_debug.result28
-rw-r--r--mysql-test/main/backup_lock_debug.test40
-rw-r--r--mysql-test/main/backup_locks.result46
-rw-r--r--mysql-test/main/backup_locks.test50
-rw-r--r--mysql-test/main/backup_priv.result40
-rw-r--r--mysql-test/main/backup_priv.test52
-rw-r--r--mysql-test/main/backup_stages.result335
-rw-r--r--mysql-test/main/backup_stages.test385
-rw-r--r--mysql-test/main/backup_syntax.result163
-rw-r--r--mysql-test/main/backup_syntax.test181
-rw-r--r--mysql-test/main/bootstrap.result15
-rw-r--r--mysql-test/main/bootstrap.test18
-rw-r--r--mysql-test/main/brackets.result455
-rw-r--r--mysql-test/main/brackets.test158
-rw-r--r--mysql-test/main/bug12427262.result6
-rw-r--r--mysql-test/main/cast.result15
-rw-r--r--mysql-test/main/column_compression_parts.result1
-rw-r--r--mysql-test/main/column_compression_parts.test2
-rw-r--r--mysql-test/main/connect-abstract.cnf9
-rw-r--r--mysql-test/main/connect-abstract.result5
-rw-r--r--mysql-test/main/connect-abstract.test6
-rw-r--r--mysql-test/main/connect.result30
-rw-r--r--mysql-test/main/connect.test38
-rw-r--r--mysql-test/main/create-big.result12
-rw-r--r--mysql-test/main/create-big.test12
-rw-r--r--mysql-test/main/create.result95
-rw-r--r--mysql-test/main/create.test91
-rw-r--r--mysql-test/main/create_drop_binlog.result4
-rw-r--r--mysql-test/main/create_drop_user.result24
-rw-r--r--mysql-test/main/create_drop_user.test8
-rw-r--r--mysql-test/main/create_or_replace.result51
-rw-r--r--mysql-test/main/create_or_replace.test22
-rw-r--r--mysql-test/main/create_select_tmp.result21
-rw-r--r--mysql-test/main/create_select_tmp.test14
-rw-r--r--mysql-test/main/create_user.result24
-rw-r--r--mysql-test/main/create_utf8.result89
-rw-r--r--mysql-test/main/create_utf8.test80
-rw-r--r--mysql-test/main/cte_nonrecursive.result40
-rw-r--r--mysql-test/main/cte_recursive.result12
-rw-r--r--mysql-test/main/ctype_binary.result4
-rw-r--r--mysql-test/main/ctype_cp1251.result4
-rw-r--r--mysql-test/main/ctype_gbk.result4
-rw-r--r--mysql-test/main/ctype_latin1.result4
-rw-r--r--mysql-test/main/ctype_ucs.result54
-rw-r--r--mysql-test/main/ctype_ucs.test30
-rw-r--r--mysql-test/main/ctype_upgrade.result8
-rw-r--r--mysql-test/main/ctype_utf16.result4
-rw-r--r--mysql-test/main/ctype_utf16_uca.result2
-rw-r--r--mysql-test/main/ctype_utf16le.result4
-rw-r--r--mysql-test/main/ctype_utf32.result4
-rw-r--r--mysql-test/main/ctype_utf32_uca.result2
-rw-r--r--mysql-test/main/ctype_utf8.result4
-rw-r--r--mysql-test/main/custom_aggregate_functions.result35
-rw-r--r--mysql-test/main/custom_aggregate_functions.test51
-rw-r--r--mysql-test/main/date_formats.result2
-rw-r--r--mysql-test/main/deadlock_innodb.result2
-rw-r--r--mysql-test/main/default.result2
-rw-r--r--mysql-test/main/delayed.result3
-rw-r--r--mysql-test/main/delete_use_source.result9
-rw-r--r--mysql-test/main/deprecated_features.result2
-rw-r--r--mysql-test/main/derived.result2
-rw-r--r--mysql-test/main/derived_cond_pushdown.result408
-rw-r--r--mysql-test/main/derived_cond_pushdown.test23
-rw-r--r--mysql-test/main/derived_opt.result2
-rw-r--r--mysql-test/main/derived_split_innodb.result5
-rw-r--r--mysql-test/main/derived_view.result8
-rw-r--r--mysql-test/main/disabled.def1
-rw-r--r--mysql-test/main/distinct.result6
-rw-r--r--mysql-test/main/drop.test20
-rw-r--r--mysql-test/main/dyncol.result4
-rw-r--r--mysql-test/main/empty_user_table.result11
-rw-r--r--mysql-test/main/empty_user_table.test22
-rw-r--r--mysql-test/main/endspace.result2
-rw-r--r--mysql-test/main/events_restart.result3
-rw-r--r--mysql-test/main/except.result20
-rw-r--r--mysql-test/main/except.test4
-rw-r--r--mysql-test/main/explain.result10
-rw-r--r--mysql-test/main/explain.test6
-rw-r--r--mysql-test/main/explain_json.result19
-rw-r--r--mysql-test/main/explain_non_select.result2
-rw-r--r--mysql-test/main/failed_auth_3909.result27
-rw-r--r--mysql-test/main/failed_auth_3909.test20
-rw-r--r--mysql-test/main/failed_auth_unixsocket.result10
-rw-r--r--mysql-test/main/failed_auth_unixsocket.test22
-rw-r--r--mysql-test/main/features.result1
-rw-r--r--mysql-test/main/flush.result21
-rw-r--r--mysql-test/main/flush.test48
-rw-r--r--mysql-test/main/flush_block_commit.test2
-rw-r--r--mysql-test/main/flush_block_commit_notembedded.test2
-rw-r--r--mysql-test/main/flush_read_lock.result72
-rw-r--r--mysql-test/main/flush_read_lock.test123
-rw-r--r--mysql-test/main/flush_read_lock_kill.test2
-rw-r--r--mysql-test/main/flush_ssl.result28
-rw-r--r--mysql-test/main/flush_ssl.test61
-rw-r--r--mysql-test/main/fulltext_order_by.result4
-rw-r--r--mysql-test/main/func_analyse.result55
-rw-r--r--mysql-test/main/func_analyse.test53
-rw-r--r--mysql-test/main/func_debug.result290
-rw-r--r--mysql-test/main/func_debug.test102
-rw-r--r--mysql-test/main/func_extract.result592
-rw-r--r--mysql-test/main/func_extract.test257
-rw-r--r--mysql-test/main/func_group.result21
-rw-r--r--mysql-test/main/func_group.test9
-rw-r--r--mysql-test/main/func_group_innodb.result14
-rw-r--r--mysql-test/main/func_group_innodb.test15
-rw-r--r--mysql-test/main/func_hybrid_type.result199
-rw-r--r--mysql-test/main/func_hybrid_type.test102
-rw-r--r--mysql-test/main/func_in.result49
-rw-r--r--mysql-test/main/func_in.test24
-rw-r--r--mysql-test/main/func_json.result88
-rw-r--r--mysql-test/main/func_json.test50
-rw-r--r--mysql-test/main/func_like.result4
-rw-r--r--mysql-test/main/func_misc.result26
-rw-r--r--mysql-test/main/func_misc.test30
-rw-r--r--mysql-test/main/func_sapdb.result6
-rw-r--r--mysql-test/main/func_str.result14
-rw-r--r--mysql-test/main/func_str.test16
-rw-r--r--mysql-test/main/func_time.result2600
-rw-r--r--mysql-test/main/func_time.test896
-rw-r--r--mysql-test/main/func_time_round.result1374
-rw-r--r--mysql-test/main/func_time_round.test461
-rw-r--r--mysql-test/main/get_diagnostics.result11
-rw-r--r--mysql-test/main/get_diagnostics.test7
-rw-r--r--mysql-test/main/gis-debug.result90
-rw-r--r--mysql-test/main/gis-debug.test36
-rw-r--r--mysql-test/main/gis-precise.result2
-rw-r--r--mysql-test/main/gis-rtree.result1
-rw-r--r--mysql-test/main/gis.result63
-rw-r--r--mysql-test/main/gis.test56
-rw-r--r--mysql-test/main/grant.result50
-rw-r--r--mysql-test/main/grant.test43
-rw-r--r--mysql-test/main/grant2.result114
-rw-r--r--mysql-test/main/grant2.test81
-rw-r--r--mysql-test/main/grant3.result24
-rw-r--r--mysql-test/main/grant4.result18
-rw-r--r--mysql-test/main/grant5.result81
-rw-r--r--mysql-test/main/grant5.test53
-rw-r--r--mysql-test/main/grant_4332.result6
-rw-r--r--mysql-test/main/grant_4332.test8
-rw-r--r--mysql-test/main/grant_lowercase.result6
-rw-r--r--mysql-test/main/grant_lowercase.test4
-rw-r--r--mysql-test/main/group_by.result19
-rw-r--r--mysql-test/main/group_by.test4
-rw-r--r--mysql-test/main/group_by_innodb.result1
-rw-r--r--mysql-test/main/group_min_max.result357
-rw-r--r--mysql-test/main/group_min_max.test38
-rw-r--r--mysql-test/main/group_min_max_innodb.result13
-rw-r--r--mysql-test/main/group_min_max_innodb.test12
-rw-r--r--mysql-test/main/handlersocket.result2
-rw-r--r--mysql-test/main/having.result29
-rw-r--r--mysql-test/main/having.test20
-rw-r--r--mysql-test/main/having_cond_pushdown.result4778
-rw-r--r--mysql-test/main/having_cond_pushdown.test1403
-rw-r--r--mysql-test/main/help.result8
-rw-r--r--mysql-test/main/help.test8
-rw-r--r--mysql-test/main/in_subq_cond_pushdown.result3889
-rw-r--r--mysql-test/main/in_subq_cond_pushdown.test862
-rw-r--r--mysql-test/main/index_intersect.result159
-rw-r--r--mysql-test/main/index_intersect.test35
-rw-r--r--mysql-test/main/index_intersect_innodb.result181
-rw-r--r--mysql-test/main/index_intersect_innodb.test9
-rw-r--r--mysql-test/main/index_merge_innodb.result7
-rw-r--r--mysql-test/main/index_merge_innodb.test2
-rw-r--r--mysql-test/main/index_merge_myisam.result145
-rw-r--r--mysql-test/main/index_merge_myisam.test25
-rw-r--r--mysql-test/main/information_schema-big.result2
-rw-r--r--mysql-test/main/information_schema.result25
-rw-r--r--mysql-test/main/information_schema.test9
-rw-r--r--mysql-test/main/information_schema_all_engines.result8
-rw-r--r--mysql-test/main/information_schema_db.result2
-rw-r--r--mysql-test/main/init_file_set_password-7656.result8
-rw-r--r--mysql-test/main/init_file_set_password-7656.test2
-rw-r--r--mysql-test/main/innodb_ext_key.result30
-rw-r--r--mysql-test/main/innodb_ext_key.test15
-rw-r--r--mysql-test/main/innodb_icp.result26
-rw-r--r--mysql-test/main/innodb_icp.test11
-rw-r--r--mysql-test/main/innodb_mysql_lock2.result14
-rw-r--r--mysql-test/main/intersect.result61
-rw-r--r--mysql-test/main/intersect.test31
-rw-r--r--mysql-test/main/invisible_field.result8
-rw-r--r--mysql-test/main/invisible_field_debug.result15
-rw-r--r--mysql-test/main/invisible_field_debug.test3
-rw-r--r--mysql-test/main/join.result20
-rw-r--r--mysql-test/main/join.test3
-rw-r--r--mysql-test/main/join_cache.result244
-rw-r--r--mysql-test/main/join_cache.test17
-rw-r--r--mysql-test/main/join_nested.result16
-rw-r--r--mysql-test/main/join_nested.test2
-rw-r--r--mysql-test/main/join_nested_jcl6.result20
-rw-r--r--mysql-test/main/join_outer.result13
-rw-r--r--mysql-test/main/join_outer_innodb.result32
-rw-r--r--mysql-test/main/join_outer_jcl6.result13
-rw-r--r--mysql-test/main/key.result8
-rw-r--r--mysql-test/main/key.test1
-rw-r--r--mysql-test/main/key_cache.result74
-rw-r--r--mysql-test/main/kill.result2
-rw-r--r--mysql-test/main/kill.test8
-rw-r--r--mysql-test/main/limit_rows_examined.result26
-rw-r--r--mysql-test/main/limit_rows_examined.test12
-rw-r--r--mysql-test/main/loaddata.result2
-rw-r--r--mysql-test/main/loaddata.test2
-rw-r--r--mysql-test/main/lock.result3
-rw-r--r--mysql-test/main/lock.test2
-rw-r--r--mysql-test/main/lock_multi.result20
-rw-r--r--mysql-test/main/lock_multi.test70
-rw-r--r--mysql-test/main/lock_sync.result18
-rw-r--r--mysql-test/main/lock_sync.test4
-rw-r--r--mysql-test/main/lock_user.result134
-rw-r--r--mysql-test/main/lock_user.test142
-rw-r--r--mysql-test/main/log_tables_upgrade.result4
-rw-r--r--mysql-test/main/long_unique.result1465
-rw-r--r--mysql-test/main/long_unique.test545
-rw-r--r--mysql-test/main/long_unique_bugs.result267
-rw-r--r--mysql-test/main/long_unique_bugs.test319
-rw-r--r--mysql-test/main/long_unique_debug.result579
-rw-r--r--mysql-test/main/long_unique_debug.test95
-rw-r--r--mysql-test/main/long_unique_innodb.result133
-rw-r--r--mysql-test/main/long_unique_innodb.test140
-rw-r--r--mysql-test/main/long_unique_update.result317
-rw-r--r--mysql-test/main/long_unique_update.test141
-rw-r--r--mysql-test/main/long_unique_using_hash.result54
-rw-r--r--mysql-test/main/long_unique_using_hash.test28
-rw-r--r--mysql-test/main/lowercase_fs_off.result4
-rw-r--r--mysql-test/main/max_password_errors.result45
-rw-r--r--mysql-test/main/max_password_errors.test64
-rw-r--r--mysql-test/main/mdev-504.result3
-rw-r--r--mysql-test/main/mdev-504.test4
-rw-r--r--mysql-test/main/mdev13607.result33
-rw-r--r--mysql-test/main/mdl.result74
-rw-r--r--mysql-test/main/mdl.test64
-rw-r--r--mysql-test/main/mdl_sync.result63
-rw-r--r--mysql-test/main/mdl_sync.test95
-rw-r--r--mysql-test/main/merge.result21
-rw-r--r--mysql-test/main/mix2_myisam.result1
-rw-r--r--mysql-test/main/mrr_icp_extra.result9
-rw-r--r--mysql-test/main/mrr_icp_extra.test1
-rw-r--r--mysql-test/main/multi_update.result8
-rw-r--r--mysql-test/main/myisam.result18
-rw-r--r--mysql-test/main/myisam.test8
-rw-r--r--mysql-test/main/myisam_debug.result2
-rw-r--r--mysql-test/main/myisam_explain_non_select_all.result596
-rw-r--r--mysql-test/main/myisam_icp.result26
-rw-r--r--mysql-test/main/myisam_mrr,64bit.rdiff13
-rw-r--r--mysql-test/main/myisam_mrr.result4
-rw-r--r--mysql-test/main/myisam_mrr.test1
-rw-r--r--mysql-test/main/myisam_recover.result4
-rw-r--r--mysql-test/main/mysql_client_test.result24
-rw-r--r--mysql-test/main/mysql_install_db_win.result15
-rw-r--r--mysql-test/main/mysql_install_db_win.test24
-rw-r--r--mysql-test/main/mysql_protocols.result5
-rw-r--r--mysql-test/main/mysql_upgrade-6984.result8
-rw-r--r--mysql-test/main/mysql_upgrade-6984.test4
-rw-r--r--mysql-test/main/mysql_upgrade.result93
-rw-r--r--mysql-test/main/mysql_upgrade.test54
-rw-r--r--mysql-test/main/mysql_upgrade_no_innodb.result3
-rw-r--r--mysql-test/main/mysql_upgrade_noengine.result15
-rw-r--r--mysql-test/main/mysql_upgrade_noengine.test16
-rw-r--r--mysql-test/main/mysql_upgrade_ssl.result4
-rw-r--r--mysql-test/main/mysql_upgrade_view.result16
-rw-r--r--mysql-test/main/mysqlcheck-master.opt1
-rw-r--r--mysql-test/main/mysqlcheck.result25
-rw-r--r--mysql-test/main/mysqlcheck.test4
-rw-r--r--mysql-test/main/mysqld--help,win.rdiff45
-rw-r--r--mysql-test/main/mysqld--help.result80
-rw-r--r--mysql-test/main/mysqldump.result8
-rw-r--r--mysql-test/main/mysqldump.test4
-rw-r--r--mysql-test/main/negation_elimination.result38
-rw-r--r--mysql-test/main/not_embedded_server.result4
-rw-r--r--mysql-test/main/not_embedded_server.test2
-rw-r--r--mysql-test/main/null.result18
-rw-r--r--mysql-test/main/null_key.result15
-rw-r--r--mysql-test/main/null_key.test4
-rw-r--r--mysql-test/main/olap.result10
-rw-r--r--mysql-test/main/old-mode.result41
-rw-r--r--mysql-test/main/old-mode.test29
-rw-r--r--mysql-test/main/opt_trace.result6037
-rw-r--r--mysql-test/main/opt_trace.test390
-rw-r--r--mysql-test/main/opt_trace_index_merge.result245
-rw-r--r--mysql-test/main/opt_trace_index_merge.test21
-rw-r--r--mysql-test/main/opt_trace_index_merge_innodb.result253
-rw-r--r--mysql-test/main/opt_trace_index_merge_innodb.test54
-rw-r--r--mysql-test/main/opt_trace_security.result386
-rw-r--r--mysql-test/main/opt_trace_security.test197
-rw-r--r--mysql-test/main/order_by.result189
-rw-r--r--mysql-test/main/order_by.test42
-rw-r--r--mysql-test/main/outfile_loaddata.result5
-rw-r--r--mysql-test/main/parser.result46
-rw-r--r--mysql-test/main/parser.test33
-rw-r--r--mysql-test/main/partition.result12
-rw-r--r--mysql-test/main/partition_binlog.result1
-rw-r--r--mysql-test/main/partition_explicit_prune.result18
-rw-r--r--mysql-test/main/partition_explicit_prune.test9
-rw-r--r--mysql-test/main/partition_innodb.result45
-rw-r--r--mysql-test/main/partition_innodb.test34
-rw-r--r--mysql-test/main/partition_pruning.result136
-rw-r--r--mysql-test/main/partition_pruning.test2
-rw-r--r--mysql-test/main/partition_range.result10
-rw-r--r--mysql-test/main/partition_range.test2
-rw-r--r--mysql-test/main/password_expiration.result259
-rw-r--r--mysql-test/main/password_expiration.test263
-rw-r--r--mysql-test/main/password_expiration_unix_socket.result8
-rw-r--r--mysql-test/main/password_expiration_unix_socket.test24
-rw-r--r--mysql-test/main/perror-win.result4
-rw-r--r--mysql-test/main/perror.result8
-rw-r--r--mysql-test/main/plugin.result68
-rw-r--r--mysql-test/main/plugin.test18
-rw-r--r--mysql-test/main/plugin_auth.result39
-rw-r--r--mysql-test/main/plugin_auth.test46
-rw-r--r--mysql-test/main/plugin_auth_qa.result116
-rw-r--r--mysql-test/main/plugin_auth_qa_1.result82
-rw-r--r--mysql-test/main/plugin_auth_qa_1.test14
-rw-r--r--mysql-test/main/plugin_auth_qa_2.result40
-rw-r--r--mysql-test/main/plugin_innodb.result3
-rw-r--r--mysql-test/main/plugin_innodb.test2
-rw-r--r--mysql-test/main/preload.result10
-rw-r--r--mysql-test/main/profiling.test2
-rw-r--r--mysql-test/main/ps.result90
-rw-r--r--mysql-test/main/ps.test32
-rw-r--r--mysql-test/main/ps_1general.result14
-rw-r--r--mysql-test/main/ps_2myisam.result2
-rw-r--r--mysql-test/main/ps_3innodb.result2
-rw-r--r--mysql-test/main/ps_4heap.result2
-rw-r--r--mysql-test/main/ps_5merge.result4
-rw-r--r--mysql-test/main/ps_ddl.result14
-rw-r--r--mysql-test/main/ps_ddl.test2
-rw-r--r--mysql-test/main/ps_ddl1.result2
-rw-r--r--mysql-test/main/ps_error.result1
-rw-r--r--mysql-test/main/query_cache.result44
-rw-r--r--mysql-test/main/query_cache.test27
-rw-r--r--mysql-test/main/range.result391
-rw-r--r--mysql-test/main/range.test127
-rw-r--r--mysql-test/main/range_innodb.result1
-rw-r--r--mysql-test/main/range_mrr_icp.result410
-rw-r--r--mysql-test/main/range_mrr_icp.test1
-rw-r--r--mysql-test/main/range_vs_index_merge.result126
-rw-r--r--mysql-test/main/range_vs_index_merge.test29
-rw-r--r--mysql-test/main/range_vs_index_merge_innodb.result134
-rw-r--r--mysql-test/main/range_vs_index_merge_innodb.test9
-rw-r--r--mysql-test/main/repair.result2
-rw-r--r--mysql-test/main/row.result36
-rw-r--r--mysql-test/main/rowid_filter.result2107
-rw-r--r--mysql-test/main/rowid_filter.test342
-rw-r--r--mysql-test/main/rowid_filter_innodb.result2165
-rw-r--r--mysql-test/main/rowid_filter_innodb.test68
-rw-r--r--mysql-test/main/secure_file_priv_win.result10
-rw-r--r--mysql-test/main/select.result141
-rw-r--r--mysql-test/main/select.test8
-rw-r--r--mysql-test/main/select_jcl6.result141
-rw-r--r--mysql-test/main/select_pkeycache.result141
-rw-r--r--mysql-test/main/select_safe.result9
-rw-r--r--mysql-test/main/selectivity.result18
-rw-r--r--mysql-test/main/selectivity.test2
-rw-r--r--mysql-test/main/selectivity_innodb.result6
-rw-r--r--mysql-test/main/selectivity_no_engine.result5
-rw-r--r--mysql-test/main/selectivity_no_engine.test1
-rw-r--r--mysql-test/main/set_password.result14
-rw-r--r--mysql-test/main/set_password.test2
-rw-r--r--mysql-test/main/set_statement.result17
-rw-r--r--mysql-test/main/set_statement.test16
-rw-r--r--mysql-test/main/shm-master.opt1
-rw-r--r--mysql-test/main/shm.result2170
-rw-r--r--mysql-test/main/shm.test47
-rw-r--r--mysql-test/main/show_check.result10
-rw-r--r--mysql-test/main/show_check.test2
-rw-r--r--mysql-test/main/show_create_user.result4
-rw-r--r--mysql-test/main/show_create_user.test2
-rw-r--r--mysql-test/main/show_explain.cc0
-rw-r--r--mysql-test/main/show_explain.result2
-rw-r--r--mysql-test/main/show_explain.test3
-rw-r--r--mysql-test/main/show_grants_with_plugin-7985.result197
-rw-r--r--mysql-test/main/show_grants_with_plugin-7985.test160
-rw-r--r--mysql-test/main/signal.result6
-rw-r--r--mysql-test/main/signal.test13
-rw-r--r--mysql-test/main/signal_demo1.result7
-rw-r--r--mysql-test/main/skip_grants.result37
-rw-r--r--mysql-test/main/skip_grants.test43
-rw-r--r--mysql-test/main/sp-anchor-row-type-cursor.result6
-rw-r--r--mysql-test/main/sp-anchor-row-type-table.result6
-rw-r--r--mysql-test/main/sp-anchor-type.result2
-rw-r--r--mysql-test/main/sp-big.result2
-rw-r--r--mysql-test/main/sp-bugs.result14
-rw-r--r--mysql-test/main/sp-bugs.test21
-rw-r--r--mysql-test/main/sp-destruct.test28
-rw-r--r--mysql-test/main/sp-error.result20
-rw-r--r--mysql-test/main/sp-error.test19
-rw-r--r--mysql-test/main/sp-row.result6
-rw-r--r--mysql-test/main/sp-security.result36
-rw-r--r--mysql-test/main/sp-security.test24
-rw-r--r--mysql-test/main/sp.result87
-rw-r--r--mysql-test/main/sp.test37
-rw-r--r--mysql-test/main/sp_notembedded.result4
-rw-r--r--mysql-test/main/sp_notembedded.test10
-rw-r--r--mysql-test/main/sp_trans.result2
-rw-r--r--mysql-test/main/sp_trans_log.result2
-rw-r--r--mysql-test/main/sql_mode.result8
-rw-r--r--mysql-test/main/sql_mode.test4
-rw-r--r--mysql-test/main/ssl_cipher.result1
-rw-r--r--mysql-test/main/stat_tables.result99
-rw-r--r--mysql-test/main/stat_tables.test51
-rw-r--r--mysql-test/main/stat_tables_disabled.result13
-rw-r--r--mysql-test/main/stat_tables_disabled.test11
-rw-r--r--mysql-test/main/stat_tables_innodb.result105
-rw-r--r--mysql-test/main/stat_tables_innodb.test9
-rw-r--r--mysql-test/main/stat_tables_rbr.result4
-rw-r--r--mysql-test/main/stat_tables_rbr.test5
-rw-r--r--mysql-test/main/statistics.result122
-rw-r--r--mysql-test/main/statistics.test108
-rw-r--r--mysql-test/main/status.result8
-rw-r--r--mysql-test/main/str_to_datetime_457.result7
-rw-r--r--mysql-test/main/subselect.result116
-rw-r--r--mysql-test/main/subselect.test53
-rw-r--r--mysql-test/main/subselect2.result14
-rw-r--r--mysql-test/main/subselect_exists2in.result3
-rw-r--r--mysql-test/main/subselect_exists2in_costmat.result4
-rw-r--r--mysql-test/main/subselect_extra.result4
-rw-r--r--mysql-test/main/subselect_extra_no_semijoin.result4
-rw-r--r--mysql-test/main/subselect_innodb.result36
-rw-r--r--mysql-test/main/subselect_innodb.test32
-rw-r--r--mysql-test/main/subselect_mat.result91
-rw-r--r--mysql-test/main/subselect_mat_cost.result38
-rw-r--r--mysql-test/main/subselect_mat_cost.test4
-rw-r--r--mysql-test/main/subselect_mat_cost_bugs.result1
-rw-r--r--mysql-test/main/subselect_no_exists_to_in.result114
-rw-r--r--mysql-test/main/subselect_no_mat.result112
-rw-r--r--mysql-test/main/subselect_no_opts.result106
-rw-r--r--mysql-test/main/subselect_no_scache.result116
-rw-r--r--mysql-test/main/subselect_no_semijoin.result104
-rw-r--r--mysql-test/main/subselect_sj.result26
-rw-r--r--mysql-test/main/subselect_sj.test8
-rw-r--r--mysql-test/main/subselect_sj2.result73
-rw-r--r--mysql-test/main/subselect_sj2.test25
-rw-r--r--mysql-test/main/subselect_sj2_jcl6.result74
-rw-r--r--mysql-test/main/subselect_sj2_jcl6.test11
-rw-r--r--mysql-test/main/subselect_sj2_mat.result129
-rw-r--r--mysql-test/main/subselect_sj2_mat.test18
-rw-r--r--mysql-test/main/subselect_sj_jcl6.result30
-rw-r--r--mysql-test/main/subselect_sj_mat.result147
-rw-r--r--mysql-test/main/subselect_sj_mat.test21
-rw-r--r--mysql-test/main/system_mysql_db.result114
-rw-r--r--mysql-test/main/system_mysql_db_507.result (renamed from mysql-test/main/no_password_column-mdev-11170.result)68
-rw-r--r--mysql-test/main/system_mysql_db_507.test (renamed from mysql-test/main/no_password_column-mdev-11170.test)53
-rw-r--r--mysql-test/main/system_mysql_db_fix40123.result136
-rw-r--r--mysql-test/main/system_mysql_db_fix40123.test55
-rw-r--r--mysql-test/main/system_mysql_db_fix50030.result140
-rw-r--r--mysql-test/main/system_mysql_db_fix50030.test62
-rw-r--r--mysql-test/main/system_mysql_db_fix50117.result136
-rw-r--r--mysql-test/main/system_mysql_db_fix50117.test87
-rw-r--r--mysql-test/main/system_mysql_db_refs.result16
-rw-r--r--mysql-test/main/system_mysql_db_refs.test11
-rw-r--r--mysql-test/main/table_elim.result4
-rw-r--r--mysql-test/main/tc_heuristic_recover.result1
-rw-r--r--mysql-test/main/temp_table_frm.result6
-rw-r--r--mysql-test/main/temp_table_frm.test9
-rw-r--r--mysql-test/main/temporal_literal.result12
-rw-r--r--mysql-test/main/timezone2.result225
-rw-r--r--mysql-test/main/timezone2.test198
-rw-r--r--mysql-test/main/tmp_table_count-7586.result2
-rw-r--r--mysql-test/main/trigger.result6
-rw-r--r--mysql-test/main/trigger_notembedded.test2
-rw-r--r--mysql-test/main/truncate_coverage.result2
-rw-r--r--mysql-test/main/truncate_coverage.test4
-rw-r--r--mysql-test/main/type_bit.result28
-rw-r--r--mysql-test/main/type_bit.test19
-rw-r--r--mysql-test/main/type_blob.result50
-rw-r--r--mysql-test/main/type_blob.test41
-rw-r--r--mysql-test/main/type_date.result119
-rw-r--r--mysql-test/main/type_date.test66
-rw-r--r--mysql-test/main/type_date_round.result174
-rw-r--r--mysql-test/main/type_date_round.test113
-rw-r--r--mysql-test/main/type_datetime.result183
-rw-r--r--mysql-test/main/type_datetime.test81
-rw-r--r--mysql-test/main/type_datetime_round.result205
-rw-r--r--mysql-test/main/type_datetime_round.test147
-rw-r--r--mysql-test/main/type_decimal.result111
-rw-r--r--mysql-test/main/type_decimal.test64
-rw-r--r--mysql-test/main/type_float.result32
-rw-r--r--mysql-test/main/type_float.test21
-rw-r--r--mysql-test/main/type_hex_hybrid.result24
-rw-r--r--mysql-test/main/type_hex_hybrid.test21
-rw-r--r--mysql-test/main/type_int.result44
-rw-r--r--mysql-test/main/type_int.test31
-rw-r--r--mysql-test/main/type_interval.result83
-rw-r--r--mysql-test/main/type_interval.test54
-rw-r--r--mysql-test/main/type_json.result67
-rw-r--r--mysql-test/main/type_json.test35
-rw-r--r--mysql-test/main/type_temporal_innodb.result48
-rw-r--r--mysql-test/main/type_temporal_innodb.test15
-rw-r--r--mysql-test/main/type_temporal_mysql56_debug.result416
-rw-r--r--mysql-test/main/type_temporal_mysql56_debug.test107
-rw-r--r--mysql-test/main/type_time.result285
-rw-r--r--mysql-test/main/type_time.test160
-rw-r--r--mysql-test/main/type_time_hires.result7
-rw-r--r--mysql-test/main/type_time_round.result260
-rw-r--r--mysql-test/main/type_time_round.test184
-rw-r--r--mysql-test/main/type_timestamp.result257
-rw-r--r--mysql-test/main/type_timestamp.test234
-rw-r--r--mysql-test/main/type_timestamp_round.result191
-rw-r--r--mysql-test/main/type_timestamp_round.test160
-rw-r--r--mysql-test/main/type_varchar.result63
-rw-r--r--mysql-test/main/type_varchar.test31
-rw-r--r--mysql-test/main/type_varchar_mysql41.result113
-rw-r--r--mysql-test/main/type_varchar_mysql41.test59
-rw-r--r--mysql-test/main/type_year.result85
-rw-r--r--mysql-test/main/type_year.test57
-rw-r--r--mysql-test/main/udf.result105
-rw-r--r--mysql-test/main/udf.test66
-rw-r--r--mysql-test/main/udf_notembedded.result1
-rw-r--r--mysql-test/main/union.result41
-rw-r--r--mysql-test/main/union.test22
-rw-r--r--mysql-test/main/unique.result9
-rw-r--r--mysql-test/main/unique.test22
-rw-r--r--mysql-test/main/update.result8
-rw-r--r--mysql-test/main/update_innodb.result54
-rw-r--r--mysql-test/main/update_innodb.test57
-rw-r--r--mysql-test/main/update_use_source.result1
-rw-r--r--mysql-test/main/user_var.result4
-rw-r--r--mysql-test/main/userstat.result6
-rw-r--r--mysql-test/main/varbinary.result4
-rw-r--r--mysql-test/main/view.result62
-rw-r--r--mysql-test/main/view.test51
-rw-r--r--mysql-test/main/view_grant.result12
-rw-r--r--mysql-test/main/view_grant.test16
-rw-r--r--mysql-test/main/win.result11
-rw-r--r--mysql-test/main/win.test9
-rw-r--r--mysql-test/main/win_big-mdev-11697.result2
-rwxr-xr-xmysql-test/mysql-test-run.pl190
-rw-r--r--mysql-test/std_data/binlog-header.binlogbin0 -> 256 bytes
-rw-r--r--mysql-test/std_data/rpl/mysql-5.7.11-stm-temporal-round-binlog.000001bin0 -> 514 bytes
-rw-r--r--mysql-test/std_data/rpl/mysql-8.0.13-stm-temporal-round-binlog.000001bin0 -> 892 bytes
-rw-r--r--mysql-test/suite/archive/disabled.def13
-rw-r--r--mysql-test/suite/archive/flush.result18
-rw-r--r--mysql-test/suite/archive/flush.test25
-rw-r--r--mysql-test/suite/archive/rnd_pos.result56
-rw-r--r--mysql-test/suite/archive/rnd_pos.test31
-rw-r--r--mysql-test/suite/binlog/include/binlog.test8
-rw-r--r--mysql-test/suite/binlog/r/binlog_index.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result2
-rw-r--r--mysql-test/suite/binlog/r/binlog_rotate_perf.result945
-rw-r--r--mysql-test/suite/binlog/r/binlog_row_binlog.result18
-rw-r--r--mysql-test/suite/binlog/r/binlog_stm_binlog.result16
-rw-r--r--mysql-test/suite/binlog/r/show_concurrent_rotate.result16
-rw-r--r--mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test2
-rw-r--r--mysql-test/suite/binlog/t/binlog_rotate_perf.test102
-rw-r--r--mysql-test/suite/binlog/t/binlog_stm_binlog.test4
-rw-r--r--mysql-test/suite/binlog/t/show_concurrent_rotate.test24
-rw-r--r--mysql-test/suite/binlog_encryption/binlog_index.result2
-rw-r--r--mysql-test/suite/binlog_encryption/rpl_parallel.result1
-rw-r--r--mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.result2
-rw-r--r--mysql-test/suite/compat/mssql/parser.result87
-rw-r--r--mysql-test/suite/compat/mssql/parser.test68
-rw-r--r--mysql-test/suite/compat/oracle/r/custom_aggregate_functions.result136
-rw-r--r--mysql-test/suite/compat/oracle/r/plugin.result48
-rw-r--r--mysql-test/suite/compat/oracle/r/ps.result2
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-package-innodb.result2
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-package-mdl.result4
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-package.result9
-rw-r--r--mysql-test/suite/compat/oracle/r/sp-row.result18
-rw-r--r--mysql-test/suite/compat/oracle/r/sp.result8
-rw-r--r--mysql-test/suite/compat/oracle/r/versioning.result8
-rw-r--r--mysql-test/suite/compat/oracle/t/custom_aggregate_functions.test170
-rw-r--r--mysql-test/suite/compat/oracle/t/plugin.test3
-rw-r--r--mysql-test/suite/compat/oracle/t/sp.test16
-rw-r--r--mysql-test/suite/compat/oracle/t/versioning.test10
-rw-r--r--mysql-test/suite/csv/flush.result25
-rw-r--r--mysql-test/suite/csv/flush.test30
-rw-r--r--mysql-test/suite/encryption/r/create_or_replace.result1
-rw-r--r--mysql-test/suite/encryption/r/debug_key_management.result10
-rw-r--r--mysql-test/suite/encryption/r/encrypt_and_grep.result3
-rw-r--r--mysql-test/suite/encryption/r/innochecksum.result7
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change.result6
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change2.result6
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change3.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-bad-key-change4.result3
-rw-r--r--mysql-test/suite/encryption/r/innodb-compressed-blob.result3
-rw-r--r--mysql-test/suite/encryption/r/innodb-encryption-alter.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-encryption-disable.result3
-rw-r--r--mysql-test/suite/encryption/r/innodb-first-page-read.result96
-rw-r--r--mysql-test/suite/encryption/r/innodb-force-corrupt.result4
-rw-r--r--mysql-test/suite/encryption/r/innodb-key-rotation-disable.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-missing-key.result3
-rw-r--r--mysql-test/suite/encryption/r/innodb-page_encryption.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb-page_encryption_compression.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb-read-only.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb-redo-nokeys.result6
-rw-r--r--mysql-test/suite/encryption/r/innodb-remove-encryption.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb-spatial-index,full_crc32.rdiff36
-rw-r--r--mysql-test/suite/encryption/r/innodb-spatial-index,strict_full_crc32.rdiff36
-rw-r--r--mysql-test/suite/encryption/r/innodb-spatial-index.result21
-rw-r--r--mysql-test/suite/encryption/r/innodb_encrypt_log.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result21
-rw-r--r--mysql-test/suite/encryption/r/innodb_encryption-page-compression.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb_encryption.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb_encryption_discard_import.result5
-rw-r--r--mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb_encryption_tables.result1
-rw-r--r--mysql-test/suite/encryption/r/innodb_first_page.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb_lotoftables.result54
-rw-r--r--mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result2
-rw-r--r--mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result4
-rw-r--r--mysql-test/suite/encryption/t/corrupted_during_recovery.test1
-rw-r--r--mysql-test/suite/encryption/t/debug_key_management.test18
-rw-r--r--mysql-test/suite/encryption/t/innochecksum.test22
-rw-r--r--mysql-test/suite/encryption/t/innodb-first-page-read.test104
-rw-r--r--mysql-test/suite/encryption/t/innodb-force-corrupt.test2
-rw-r--r--mysql-test/suite/encryption/t/innodb-redo-nokeys.test3
-rw-r--r--mysql-test/suite/encryption/t/innodb-spatial-index.test41
-rw-r--r--mysql-test/suite/encryption/t/innodb_encrypt_key_rotation_age.test1
-rw-r--r--mysql-test/suite/encryption/t/innodb_lotoftables.test24
-rw-r--r--mysql-test/suite/encryption/t/innodb_page_encryption_key_change.test2
-rw-r--r--mysql-test/suite/engines/funcs/r/an_calendar.result4
-rw-r--r--mysql-test/suite/engines/funcs/r/an_number.result14
-rw-r--r--mysql-test/suite/engines/funcs/r/an_string.result10
-rw-r--r--mysql-test/suite/engines/funcs/r/tc_partition_analyze.result1
-rw-r--r--mysql-test/suite/federated/error_on_close-8313.result1
-rw-r--r--mysql-test/suite/federated/federatedx_create_handlers.result312
-rw-r--r--mysql-test/suite/federated/federatedx_create_handlers.test161
-rw-r--r--mysql-test/suite/federated/net_thd_crash-12725.result1
-rw-r--r--mysql-test/suite/federated/net_thd_crash-12951.result1
-rw-r--r--mysql-test/suite/funcs_1/r/innodb_func_view.result84
-rw-r--r--mysql-test/suite/funcs_1/r/is_check_constraints.result36
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is.result8
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_is_embedded.result8
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_mysql.result237
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result237
-rw-r--r--mysql-test/suite/funcs_1/r/is_key_column_usage.result6
-rw-r--r--mysql-test/suite/funcs_1/r/is_key_column_usage_embedded.result6
-rw-r--r--mysql-test/suite/funcs_1/r/is_routines_embedded.result6
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics.result6
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics_mysql.result6
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result12
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result4
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result8
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_is.result50
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_is_embedded.result50
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_mysql.result80
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result160
-rw-r--r--mysql-test/suite/funcs_1/r/is_user_privileges.result1872
-rw-r--r--mysql-test/suite/funcs_1/r/is_views.result1
-rw-r--r--mysql-test/suite/funcs_1/r/is_views_embedded.result1
-rw-r--r--mysql-test/suite/funcs_1/r/memory_func_view.result84
-rw-r--r--mysql-test/suite/funcs_1/r/myisam_func_view.result84
-rw-r--r--mysql-test/suite/funcs_1/r/storedproc.result3
-rw-r--r--mysql-test/suite/funcs_1/t/is_user_privileges.test22
-rw-r--r--mysql-test/suite/funcs_1/t/row_count_func.test2
-rw-r--r--mysql-test/suite/funcs_1/t/storedproc.test1
-rw-r--r--mysql-test/suite/galera/disabled.def16
-rw-r--r--mysql-test/suite/galera/galera_2nodes.cnf26
-rw-r--r--mysql-test/suite/galera/galera_2nodes_as_master.cnf18
-rw-r--r--mysql-test/suite/galera/galera_2nodes_as_slave.cnf51
-rw-r--r--mysql-test/suite/galera/galera_3nodes_as_slave.cnf62
-rw-r--r--mysql-test/suite/galera/galera_4nodes.cnf30
-rw-r--r--mysql-test/suite/galera/include/galera_base_port.inc8
-rw-r--r--mysql-test/suite/galera/include/galera_concurrent_test.inc90
-rw-r--r--mysql-test/suite/galera/include/galera_dump_sr_table.inc28
-rw-r--r--mysql-test/suite/galera/include/galera_load_provider.inc68
-rw-r--r--mysql-test/suite/galera/include/galera_sst_restore.inc2
-rw-r--r--mysql-test/suite/galera/include/galera_st_disconnect_slave.inc8
-rw-r--r--mysql-test/suite/galera/include/galera_unload_provider.inc8
-rw-r--r--mysql-test/suite/galera/r/GAL-382.result2
-rw-r--r--mysql-test/suite/galera/r/GAL-401.result2
-rw-r--r--mysql-test/suite/galera/r/GAL-480.result2
-rw-r--r--mysql-test/suite/galera/r/GCF-1081.result47
-rw-r--r--mysql-test/suite/galera/r/GCF-939.result13
-rw-r--r--mysql-test/suite/galera/r/MDEV-15443.result2
-rw-r--r--mysql-test/suite/galera/r/MDEV-16509.result75
-rw-r--r--mysql-test/suite/galera/r/MW-252.result2
-rw-r--r--mysql-test/suite/galera/r/MW-258.result2
-rw-r--r--mysql-test/suite/galera/r/MW-259.result2
-rw-r--r--mysql-test/suite/galera/r/MW-284.result4
-rw-r--r--mysql-test/suite/galera/r/MW-285.result2
-rw-r--r--mysql-test/suite/galera/r/MW-286.result21
-rw-r--r--mysql-test/suite/galera/r/MW-292.result27
-rw-r--r--mysql-test/suite/galera/r/MW-309.result2
-rw-r--r--mysql-test/suite/galera/r/MW-313.result2
-rw-r--r--mysql-test/suite/galera/r/MW-328A.result25
-rw-r--r--mysql-test/suite/galera/r/MW-328B.result2
-rw-r--r--mysql-test/suite/galera/r/MW-328C.result2
-rw-r--r--mysql-test/suite/galera/r/MW-328D.result2
-rw-r--r--mysql-test/suite/galera/r/MW-328E.result2
-rw-r--r--mysql-test/suite/galera/r/MW-329.result9
-rw-r--r--mysql-test/suite/galera/r/MW-336.result2
-rw-r--r--mysql-test/suite/galera/r/MW-357.result2
-rw-r--r--mysql-test/suite/galera/r/MW-360.result41
-rw-r--r--mysql-test/suite/galera/r/MW-369.result149
-rw-r--r--mysql-test/suite/galera/r/MW-388.result14
-rw-r--r--mysql-test/suite/galera/r/MW-402.result76
-rw-r--r--mysql-test/suite/galera/r/MW-416.result5
-rw-r--r--mysql-test/suite/galera/r/MW-44.result7
-rw-r--r--mysql-test/suite/galera/r/MW-86-wait1.result10
-rw-r--r--mysql-test/suite/galera/r/MW-86-wait8.result9
-rw-r--r--mysql-test/suite/galera/r/basic.result2
-rw-r--r--mysql-test/suite/galera/r/binlog_checksum.result2
-rw-r--r--mysql-test/suite/galera/r/create.result2
-rw-r--r--mysql-test/suite/galera/r/enforce_storage_engine.result2
-rw-r--r--mysql-test/suite/galera/r/enforce_storage_engine2.result2
-rw-r--r--mysql-test/suite/galera/r/ev51914.result2
-rw-r--r--mysql-test/suite/galera/r/fk.result2
-rw-r--r--mysql-test/suite/galera/r/galera#414.result2
-rw-r--r--mysql-test/suite/galera/r/galera#500.result8
-rw-r--r--mysql-test/suite/galera/r/galera#505.result2
-rw-r--r--mysql-test/suite/galera/r/galera_FK_duplicate_client_insert.result2
-rw-r--r--mysql-test/suite/galera/r/galera_admin.result4
-rw-r--r--mysql-test/suite/galera/r/galera_alter_engine_innodb.result2
-rw-r--r--mysql-test/suite/galera/r/galera_alter_engine_myisam.result2
-rw-r--r--mysql-test/suite/galera/r/galera_alter_table_force.result2
-rw-r--r--mysql-test/suite/galera/r/galera_applier_ftwrl_table.result2
-rw-r--r--mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result2
-rw-r--r--mysql-test/suite/galera/r/galera_as_master.result4
-rw-r--r--mysql-test/suite/galera/r/galera_as_master_gtid.result44
-rw-r--r--mysql-test/suite/galera/r/galera_as_master_large.result2
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave.result12
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave_autoinc.result12
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave_gtid.result12
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db.result159
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db_cc.result315
-rw-r--r--mysql-test/suite/galera/r/galera_as_slave_nonprim.result16
-rw-r--r--mysql-test/suite/galera/r/galera_autoinc_sst_mariabackup.result2
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort.result4
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result2
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_for_update.result6
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result2
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_get_lock.result4
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_group_commit.result685
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_lock_table.result2
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_shutdown.result12
-rw-r--r--mysql-test/suite/galera/r/galera_bf_abort_sleep.result4
-rw-r--r--mysql-test/suite/galera/r/galera_bf_background_statistics.result4
-rw-r--r--mysql-test/suite/galera/r/galera_bf_lock_wait.result2
-rw-r--r--mysql-test/suite/galera/r/galera_binlog_cache_size.result2
-rw-r--r--mysql-test/suite/galera/r/galera_binlog_checksum.result2
-rw-r--r--mysql-test/suite/galera/r/galera_binlog_event_max_size_max.result2
-rw-r--r--mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result2
-rw-r--r--mysql-test/suite/galera/r/galera_binlog_row_image.result2
-rw-r--r--mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result2
-rw-r--r--mysql-test/suite/galera/r/galera_commit_empty.result15
-rw-r--r--mysql-test/suite/galera/r/galera_concurrent_ctas.result2
-rw-r--r--mysql-test/suite/galera/r/galera_create_function.result2
-rw-r--r--mysql-test/suite/galera/r/galera_create_procedure.result2
-rw-r--r--mysql-test/suite/galera/r/galera_create_table_as_select.result103
-rw-r--r--mysql-test/suite/galera/r/galera_create_table_like.result2
-rw-r--r--mysql-test/suite/galera/r/galera_create_trigger.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ddl_multiline.result2
-rw-r--r--mysql-test/suite/galera/r/galera_defaults.result16
-rw-r--r--mysql-test/suite/galera/r/galera_delete_limit.result2
-rw-r--r--mysql-test/suite/galera/r/galera_desync_overlapped.result2
-rw-r--r--mysql-test/suite/galera/r/galera_drop_database.result2
-rw-r--r--mysql-test/suite/galera/r/galera_drop_multi.result2
-rw-r--r--mysql-test/suite/galera/r/galera_encrypt_tmp_files.result2
-rw-r--r--mysql-test/suite/galera/r/galera_enum.result4
-rw-r--r--mysql-test/suite/galera/r/galera_events.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_cascade_delete.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_cascade_update.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_conflict.result4
-rw-r--r--mysql-test/suite/galera/r/galera_fk_mismatch.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_multicolumn.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_multitable.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_no_pk.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_selfreferential.result2
-rw-r--r--mysql-test/suite/galera/r/galera_fk_setnull.result2
-rw-r--r--mysql-test/suite/galera/r/galera_flush_local.result6
-rw-r--r--mysql-test/suite/galera/r/galera_forced_binlog_format.result20
-rw-r--r--mysql-test/suite/galera/r/galera_ftwrl.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ftwrl_drain.result4
-rw-r--r--mysql-test/suite/galera/r/galera_fulltext.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gcache_recover.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gcache_recover_full_gcache.result4
-rw-r--r--mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gcs_fc_limit.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gcs_fragment.result15
-rw-r--r--mysql-test/suite/galera/r/galera_gcs_max_packet_size.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gra_log.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gtid.result2
-rw-r--r--mysql-test/suite/galera/r/galera_gtid_slave.result26
-rw-r--r--mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result106
-rw-r--r--mysql-test/suite/galera/r/galera_insert_ignore.result2
-rw-r--r--mysql-test/suite/galera/r/galera_insert_multi.result4
-rw-r--r--mysql-test/suite/galera/r/galera_ist_innodb_flush_logs.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff6
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup.result5
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ist_mysqldump.result3
-rw-r--r--mysql-test/suite/galera/r/galera_ist_progress.result9
-rw-r--r--mysql-test/suite/galera/r/galera_ist_recv_bind.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ist_restart_joiner.result3
-rw-r--r--mysql-test/suite/galera/r/galera_ist_rsync.result5
-rw-r--r--mysql-test/suite/galera/r/galera_kill_ddl.result2
-rw-r--r--mysql-test/suite/galera/r/galera_kill_largechanges.result2
-rw-r--r--mysql-test/suite/galera/r/galera_kill_smallchanges.result2
-rw-r--r--mysql-test/suite/galera/r/galera_last_committed_id.result38
-rw-r--r--mysql-test/suite/galera/r/galera_load_data.result2
-rw-r--r--mysql-test/suite/galera/r/galera_lock_table.result2
-rw-r--r--mysql-test/suite/galera/r/galera_log_bin.result4
-rw-r--r--mysql-test/suite/galera/r/galera_log_output_csv.result2
-rw-r--r--mysql-test/suite/galera/r/galera_many_columns.result4
-rw-r--r--mysql-test/suite/galera/r/galera_many_indexes.result6
-rw-r--r--mysql-test/suite/galera/r/galera_many_rows.result4
-rw-r--r--mysql-test/suite/galera/r/galera_many_tables_nopk.result4
-rw-r--r--mysql-test/suite/galera/r/galera_many_tables_pk.result4
-rw-r--r--mysql-test/suite/galera/r/galera_mdev_10812.result2
-rw-r--r--mysql-test/suite/galera/r/galera_mdev_13787.result2
-rw-r--r--mysql-test/suite/galera/r/galera_mdev_15611.result2
-rw-r--r--mysql-test/suite/galera/r/galera_mdl_race.result4
-rw-r--r--mysql-test/suite/galera/r/galera_multi_database.result2
-rw-r--r--mysql-test/suite/galera/r/galera_myisam_autocommit.result2
-rw-r--r--mysql-test/suite/galera/r/galera_myisam_transactions.result2
-rw-r--r--mysql-test/suite/galera/r/galera_nopk_bit.result4
-rw-r--r--mysql-test/suite/galera/r/galera_nopk_blob.result4
-rw-r--r--mysql-test/suite/galera/r/galera_nopk_large_varchar.result4
-rw-r--r--mysql-test/suite/galera/r/galera_nopk_unicode.result4
-rw-r--r--mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result8
-rw-r--r--mysql-test/suite/galera/r/galera_parallel_autoinc_largetrx.result20
-rw-r--r--mysql-test/suite/galera/r/galera_parallel_autoinc_manytrx.result4
-rw-r--r--mysql-test/suite/galera/r/galera_parallel_simple.result2
-rw-r--r--mysql-test/suite/galera/r/galera_pc_recovery.result37
-rw-r--r--mysql-test/suite/galera/r/galera_pk_bigint_signed.result4
-rw-r--r--mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result4
-rw-r--r--mysql-test/suite/galera/r/galera_prepared_statement.result2
-rw-r--r--mysql-test/suite/galera/r/galera_query_cache.result2
-rw-r--r--mysql-test/suite/galera/r/galera_query_cache_sync_wait.result2
-rw-r--r--mysql-test/suite/galera/r/galera_read_only.result2
-rw-r--r--mysql-test/suite/galera/r/galera_repl_key_format_flat16.result2
-rw-r--r--mysql-test/suite/galera/r/galera_repl_max_ws_size.result4
-rw-r--r--mysql-test/suite/galera/r/galera_restart_nochanges.result2
-rw-r--r--mysql-test/suite/galera/r/galera_restart_on_unknown_option.result2
-rw-r--r--mysql-test/suite/galera/r/galera_roles.result2
-rw-r--r--mysql-test/suite/galera/r/galera_rsu_add_pk.result2
-rw-r--r--mysql-test/suite/galera/r/galera_rsu_drop_pk.result2
-rw-r--r--mysql-test/suite/galera/r/galera_rsu_error.result2
-rw-r--r--mysql-test/suite/galera/r/galera_rsu_simple.result2
-rw-r--r--mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sbr.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sbr_binlog.result2
-rw-r--r--mysql-test/suite/galera/r/galera_schema_dirty_reads.result2
-rw-r--r--mysql-test/suite/galera/r/galera_serializable.result8
-rw-r--r--mysql-test/suite/galera/r/galera_server.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sp_bf_abort.result356
-rw-r--r--mysql-test/suite/galera/r/galera_sp_insert_parallel.result41
-rw-r--r--mysql-test/suite/galera/r/galera_sql_log_bin_zero.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ssl.result2
-rw-r--r--mysql-test/suite/galera/r/galera_ssl_compression.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_encrypt_with_key.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mariabackup_table_options.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff22
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff18
-rw-r--r--mysql-test/suite/galera/r/galera_sst_mysqldump.result1
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff12
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync2.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result2
-rw-r--r--mysql-test/suite/galera/r/galera_status_cluster.result2
-rw-r--r--mysql-test/suite/galera/r/galera_status_local_index.result2
-rw-r--r--mysql-test/suite/galera/r/galera_status_local_state.result2
-rw-r--r--mysql-test/suite/galera/r/galera_suspend_slave.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sync_wait_show.result2
-rw-r--r--mysql-test/suite/galera/r/galera_sync_wait_upto.result43
-rw-r--r--mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ddl_error.result5
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ddl_fk_update.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ddl_locking.result33
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ddl_sequential.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_drop_database.result6
-rw-r--r--mysql-test/suite/galera/r/galera_toi_ftwrl.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_lock_exclusive.result4
-rw-r--r--mysql-test/suite/galera/r/galera_toi_lock_shared.result2
-rw-r--r--mysql-test/suite/galera/r/galera_toi_truncate.result4
-rw-r--r--mysql-test/suite/galera/r/galera_transaction_read_only.result2
-rw-r--r--mysql-test/suite/galera/r/galera_transaction_replay.result96
-rw-r--r--mysql-test/suite/galera/r/galera_truncate.result2
-rw-r--r--mysql-test/suite/galera/r/galera_truncate_temporary.result2
-rw-r--r--mysql-test/suite/galera/r/galera_unicode_identifiers.result2
-rw-r--r--mysql-test/suite/galera/r/galera_unicode_pk.result6
-rw-r--r--mysql-test/suite/galera/r/galera_update_limit.result2
-rw-r--r--mysql-test/suite/galera/r/galera_v1_row_events.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_OSU_method.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_OSU_method2.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result4
-rw-r--r--mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_cluster_address.result8
-rw-r--r--mysql-test/suite/galera/r/galera_var_desync_on.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_dirty_reads.result5
-rw-r--r--mysql-test/suite/galera/r/galera_var_fkchecks.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_gtid_domain_id.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result186
-rw-r--r--mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_load_data_splitting.result12
-rw-r--r--mysql-test/suite/galera/r/galera_var_log_bin.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_max_ws_rows.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_max_ws_size.result4
-rw-r--r--mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_node_address.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_reject_queries.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_retry_autocommit.result28
-rw-r--r--mysql-test/suite/galera/r/galera_var_slave_threads.result60
-rw-r--r--mysql-test/suite/galera/r/galera_var_sst_auth.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_sync_wait.result2
-rw-r--r--mysql-test/suite/galera/r/galera_var_wsrep_on_off.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wan.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wan_restart_ist.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wan_restart_sst.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wsrep_desync_wsrep_on.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wsrep_log_conficts.result4
-rw-r--r--mysql-test/suite/galera/r/galera_wsrep_new_cluster.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wsrep_provider_options_syntax.result2
-rw-r--r--mysql-test/suite/galera/r/galera_wsrep_provider_unset_set.result2
-rw-r--r--mysql-test/suite/galera/r/galera_zero_length_column.result2
-rw-r--r--mysql-test/suite/galera/r/grant.result4
-rw-r--r--mysql-test/suite/galera/r/lp1276424.result2
-rw-r--r--mysql-test/suite/galera/r/lp1347768.result2
-rw-r--r--mysql-test/suite/galera/r/lp1376747-2.result2
-rw-r--r--mysql-test/suite/galera/r/lp1376747-3.result2
-rw-r--r--mysql-test/suite/galera/r/lp1376747-4.result2
-rw-r--r--mysql-test/suite/galera/r/lp1376747.result2
-rw-r--r--mysql-test/suite/galera/r/lp1438990.result2
-rw-r--r--mysql-test/suite/galera/r/lp959512.result2
-rw-r--r--mysql-test/suite/galera/r/mdev_10518.result2
-rw-r--r--mysql-test/suite/galera/r/mdev_18730.result27
-rw-r--r--mysql-test/suite/galera/r/mdev_9290.result2
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#110.result2
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#198.result2
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#201.result2
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#237.result4
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#247.result2
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#31.result2
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#33.result6
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#332.result42
-rw-r--r--mysql-test/suite/galera/r/mysql-wsrep#90.result2
-rw-r--r--mysql-test/suite/galera/r/partition.result16
-rw-r--r--mysql-test/suite/galera/r/pxc-421.result2
-rw-r--r--mysql-test/suite/galera/r/query_cache.result6
-rw-r--r--mysql-test/suite/galera/r/rename.result2
-rw-r--r--mysql-test/suite/galera/r/rpl_row_annotate.result8
-rw-r--r--mysql-test/suite/galera/r/sql_log_bin.result2
-rw-r--r--mysql-test/suite/galera/r/unique_key.result2
-rw-r--r--mysql-test/suite/galera/r/versioning_trx_id.result8
-rw-r--r--mysql-test/suite/galera/r/view.result2
-rw-r--r--mysql-test/suite/galera/r/wsrep_trx_fragment_size_sr.result15
-rw-r--r--mysql-test/suite/galera/suite.pm6
-rw-r--r--mysql-test/suite/galera/t/GAL-419.test4
-rw-r--r--mysql-test/suite/galera/t/GCF-1081.test72
-rw-r--r--mysql-test/suite/galera/t/GCF-939.test31
-rw-r--r--mysql-test/suite/galera/t/MDEV-16509.test141
-rw-r--r--mysql-test/suite/galera/t/MW-284.test2
-rw-r--r--mysql-test/suite/galera/t/MW-286.test50
-rw-r--r--mysql-test/suite/galera/t/MW-292.test50
-rw-r--r--mysql-test/suite/galera/t/MW-328A.test40
-rw-r--r--mysql-test/suite/galera/t/MW-329.test28
-rw-r--r--mysql-test/suite/galera/t/MW-336.test6
-rw-r--r--mysql-test/suite/galera/t/MW-360-master.opt2
-rw-r--r--mysql-test/suite/galera/t/MW-360.test100
-rw-r--r--mysql-test/suite/galera/t/MW-369.inc7
-rw-r--r--mysql-test/suite/galera/t/MW-369.test100
-rw-r--r--mysql-test/suite/galera/t/MW-388.test12
-rw-r--r--mysql-test/suite/galera/t/MW-402.test56
-rw-r--r--mysql-test/suite/galera/t/MW-416.test8
-rw-r--r--mysql-test/suite/galera/t/MW-86-wait1.test11
-rw-r--r--mysql-test/suite/galera/t/MW-86-wait8.test9
-rw-r--r--mysql-test/suite/galera/t/galera#500.test6
-rw-r--r--mysql-test/suite/galera/t/galera_applier_ftwrl_table_alter-master.opt2
-rw-r--r--mysql-test/suite/galera/t/galera_as_master.test2
-rw-r--r--mysql-test/suite/galera/t/galera_as_master_gtid.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_as_master_gtid.test24
-rw-r--r--mysql-test/suite/galera/t/galera_as_master_gtid_change_master.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_as_master_gtid_change_master.test2
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave.test17
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_autoinc.test18
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_gtid.test21
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.cnf17
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.test150
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db_cc.test176
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_nonprim.test26
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_preordered.test19
-rw-r--r--mysql-test/suite/galera/t/galera_as_slave_replication_bundle.test13
-rw-r--r--mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.cnf4
-rw-r--r--mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.test2
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_group_commit.cnf15
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_group_commit.test77
-rw-r--r--mysql-test/suite/galera/t/galera_bf_abort_shutdown.test33
-rw-r--r--mysql-test/suite/galera/t/galera_commit_empty.test35
-rw-r--r--mysql-test/suite/galera/t/galera_create_table_as_select.test145
-rw-r--r--mysql-test/suite/galera/t/galera_defaults.test2
-rw-r--r--mysql-test/suite/galera/t/galera_forced_binlog_format.test5
-rw-r--r--mysql-test/suite/galera/t/galera_ftwrl_drain.test4
-rw-r--r--mysql-test/suite/galera/t/galera_gcache_recover_full_gcache.test12
-rw-r--r--mysql-test/suite/galera/t/galera_gcs_fragment.test4
-rw-r--r--mysql-test/suite/galera/t/galera_gtid-master.opt2
-rw-r--r--mysql-test/suite/galera/t/galera_gtid_slave.test33
-rw-r--r--mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test111
-rw-r--r--mysql-test/suite/galera/t/galera_ist_progress.test4
-rw-r--r--mysql-test/suite/galera/t/galera_ist_restart_joiner.test4
-rw-r--r--mysql-test/suite/galera/t/galera_kill_applier.test1
-rw-r--r--mysql-test/suite/galera/t/galera_last_committed_id.test68
-rw-r--r--mysql-test/suite/galera/t/galera_log_bin.test2
-rw-r--r--mysql-test/suite/galera/t/galera_migrate.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test6
-rw-r--r--mysql-test/suite/galera/t/galera_parallel_autoinc_largetrx.test11
-rw-r--r--mysql-test/suite/galera/t/galera_parallel_autoinc_manytrx.test2
-rw-r--r--mysql-test/suite/galera/t/galera_parallel_simple.test2
-rw-r--r--mysql-test/suite/galera/t/galera_pc_recovery.test102
-rw-r--r--mysql-test/suite/galera/t/galera_sp_insert_parallel.test55
-rw-r--r--mysql-test/suite/galera/t/galera_split_brain.test6
-rw-r--r--mysql-test/suite/galera/t/galera_ssl_upgrade.test4
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_data_dir.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_encrypt_with_key.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mysqldump.cnf2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mysqldump.test2
-rw-r--r--mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf6
-rw-r--r--mysql-test/suite/galera/t/galera_sync_wait_upto-master.opt1
-rw-r--r--mysql-test/suite/galera/t/galera_sync_wait_upto.test115
-rw-r--r--mysql-test/suite/galera/t/galera_toi_ddl_error.test5
-rw-r--r--mysql-test/suite/galera/t/galera_toi_ddl_locking.test54
-rw-r--r--mysql-test/suite/galera/t/galera_transaction_replay.test201
-rw-r--r--mysql-test/suite/galera/t/galera_var_cluster_address.test11
-rw-r--r--mysql-test/suite/galera/t/galera_var_dirty_reads.test7
-rw-r--r--mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test235
-rw-r--r--mysql-test/suite/galera/t/galera_var_innodb_disallow_writes.test9
-rw-r--r--mysql-test/suite/galera/t/galera_var_load_data_splitting.test3
-rw-r--r--mysql-test/suite/galera/t/galera_var_log_bin.cnf5
-rw-r--r--mysql-test/suite/galera/t/galera_var_retry_autocommit.test24
-rw-r--r--mysql-test/suite/galera/t/galera_var_slave_threads.cnf7
-rw-r--r--mysql-test/suite/galera/t/galera_var_slave_threads.test139
-rw-r--r--mysql-test/suite/galera/t/galera_vote_drop_temporary-master.opt1
-rw-r--r--mysql-test/suite/galera/t/galera_wsrep_new_cluster.test1
-rw-r--r--mysql-test/suite/galera/t/mdev_18730.test71
-rw-r--r--mysql-test/suite/galera/t/mysql-wsrep#198-master.opt1
-rw-r--r--mysql-test/suite/galera/t/mysql-wsrep#237.test4
-rw-r--r--mysql-test/suite/galera/t/mysql-wsrep#332.test2
-rw-r--r--mysql-test/suite/galera/t/partition.test16
-rw-r--r--mysql-test/suite/galera/t/rpl_row_annotate.test5
-rw-r--r--mysql-test/suite/galera/t/versioning_trx_id.test14
-rw-r--r--mysql-test/suite/galera/t/wsrep_trx_fragment_size_sr.test22
-rw-r--r--mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf37
-rw-r--r--mysql-test/suite/galera_3nodes/galera_3nodes.cnf14
-rw-r--r--mysql-test/suite/galera_3nodes/include/galera_resume.inc (renamed from mysql-test/include/galera_resume.inc)0
-rw-r--r--mysql-test/suite/galera_3nodes/r/GAL-501.result3
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_certification_ccc.result2
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_certification_double_failure.result5
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result4
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_garbd.result4
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_gtid_2_cluster.result96
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_innobackupex_backup.result17
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup.result4
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup_section.result8
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ipv6_mysqldump.result3
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync.result2
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync_section.result5
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_ist_gcache_rollover.result2
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_load_data_ist.result36
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_parallel_apply_3nodes.result4
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result2
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_pc_weight.result2
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_safe_to_bootstrap.result6
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result2
-rw-r--r--mysql-test/suite/galera_3nodes/r/galera_wsrep_schema.result82
-rw-r--r--mysql-test/suite/galera_3nodes/suite.pm4
-rw-r--r--mysql-test/suite/galera_3nodes/t/GAL-501.test6
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_certification_double_failure.test2
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test8
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_garbd.test47
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_gtid_2_cluster.test34
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test80
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf5
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.test15
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.test21
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test6
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test2
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_load_data_ist.cnf4
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_load_data_ist.test124
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_parallel_apply_3nodes.test2
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf8
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_pc_weight.test9
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_safe_to_bootstrap.test26
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_slave_options_ignore.test1
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test3
-rw-r--r--mysql-test/suite/galera_3nodes/t/galera_wsrep_schema.test84
-rw-r--r--mysql-test/suite/galera_3nodes_sr/disabled.def7
-rw-r--r--mysql-test/suite/galera_3nodes_sr/galera_3nodes.cnf1
-rw-r--r--mysql-test/suite/galera_3nodes_sr/my.cnf1
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-336.result26
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-582.result23
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-606.result38
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-609.result20
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-810A.result256
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-810B.result100
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-810C.result177
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-817.result54
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/GCF-832.result25
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_isolate_master.result80
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_join_slave.result40
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_master.result34
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply.result54
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback.result59
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback2.result31
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_before_apply.result45
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split.result117
-rw-r--r--mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split_no_primary.result85
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-336.test47
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-582.test39
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-606.test80
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-609.test30
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-810A.test137
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-810B.test49
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-810C.test70
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-817.test109
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/GCF-832.test43
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_isolate_master.test127
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_join_slave.test59
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_master.test58
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply.test81
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback.test80
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback2.test56
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_before_apply.test73
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.cnf5
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.test177
-rw-r--r--mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split_no_primary.test126
-rw-r--r--mysql-test/suite/galera_sr/disabled.def2
-rw-r--r--mysql-test/suite/galera_sr/galera_2nodes.cnf1
-rw-r--r--mysql-test/suite/galera_sr/my.cnf1
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1008.result70
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1018.result25
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1018B.result12
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1043A.result21
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1043B.result21
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1051.result46
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-1060.result21
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-437.result12
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-561.result50
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-571.result67
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-572.result37
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-580.result13
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-585.result28
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-597.result21
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-620.result18
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-623.result29
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-627.result26
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-845.result21
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-851.result30
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-867.result4
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-889.result25
-rw-r--r--mysql-test/suite/galera_sr/r/GCF-900.result21
-rw-r--r--mysql-test/suite/galera_sr/r/MDEV-18585.result36
-rw-r--r--mysql-test/suite/galera_sr/r/galera-features#56.result32
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_bf_abort.result555
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_blob.result23
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_cc_master.result66
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_cc_no_primary.result70
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_cc_slave.result61
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_concurrent.result36
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_conflict.result21
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit.result31
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit2.result28
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_conflict_with_rollback_master.result29
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_create_drop.result28
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_ddl_master.result48
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_ddl_schema.result23
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_ddl_slave.result50
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_ddl_unrelated.result42
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_dupkey_error.result46
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_fk_conflict.result39
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_gtid.result57
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_insert_select.result18
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_kill_all_nobootstrap.result31
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_kill_all_norecovery.result35
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_kill_all_pcrecovery.result32
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_kill_connection.result32
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_kill_query.result31
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_kill_slave.result54
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_large_fragment.result33
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_load_data.result14
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_load_data_splitting.result19
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_log_bin.result124
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_many_fragments.result33
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_myisam.result16
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_mysqldump_sst.result59
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_parallel_apply.result37
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_rollback.result42
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_rollback_retry.result33
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_rollback_savepoint.result42
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_rollback_statement.result33
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_shutdown_master.result32
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_shutdown_slave.result44
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_small_gcache.result15
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_table_contents.result178
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_transaction_replay.result121
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_unit_statements.result54
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_v1_row_events.result20
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_ws_size.result36
-rw-r--r--mysql-test/suite/galera_sr/r/galera_sr_ws_size2.result34
-rw-r--r--mysql-test/suite/galera_sr/r/galera_var_ignore_apply_errors_sr.result29
-rw-r--r--mysql-test/suite/galera_sr/r/mdev_18631.result21
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep#215.result137
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#136.result63
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#138.result24
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#14.result12
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#148.result39
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#15.result11
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#165.result1045
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#22.result35
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#27.result23
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#32.result27
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#35.result37
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#8.result39
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#9.result21
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#93.result18
-rw-r--r--mysql-test/suite/galera_sr/r/mysql-wsrep-features#96.result33
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1008.inc36
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1008.test18
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1018.test39
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1018B.test40
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1043A.test13
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1043B.test13
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1051.test51
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-1060.test9
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-437.test21
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-561.test65
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-571.test54
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-572.test54
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-580.test27
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-585.test44
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-597.test29
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-620.test22
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-623.test31
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-627.test30
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-845.test30
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-851.test24
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-867.test42
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-889.test29
-rw-r--r--mysql-test/suite/galera_sr/t/GCF-900.test28
-rw-r--r--mysql-test/suite/galera_sr/t/MDEV-18585.cnf5
-rw-r--r--mysql-test/suite/galera_sr/t/MDEV-18585.test42
-rw-r--r--mysql-test/suite/galera_sr/t/galera-features#56.test55
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_bf_abort.inc145
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_bf_abort.test50
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_blob.test38
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_cc_master.test100
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_cc_no_primary.test85
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_cc_slave.test105
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_concurrent.test45
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_conflict.test45
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit.test45
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit2.test46
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_conflict_with_rollback_master.test44
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_create_drop.test33
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_ddl_master.test63
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_ddl_schema.test43
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_ddl_slave.test65
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_ddl_unrelated.test53
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_dupkey_error.test59
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_fk_conflict.test62
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_gtid-master.opt1
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_gtid.test46
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_insert_select.test33
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_all_nobootstrap.test52
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.cnf8
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.test71
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_all_pcrecovery.test54
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_connection.test59
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_query.test48
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_slave.cnf4
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_kill_slave.test80
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_large_fragment-master.opt1
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_large_fragment.test58
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_load_data.test49
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_load_data_splitting.test50
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_log_bin-master.opt1
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_log_bin.test70
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_many_fragments.test53
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_myisam.test29
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.cnf11
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.test79
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_parallel_apply.test59
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_rollback.test76
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_rollback_retry.test55
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_rollback_savepoint.test51
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_rollback_statement.test59
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_shutdown_master.test53
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_shutdown_slave.test63
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_small_gcache.cnf6
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_small_gcache.test21
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_table_contents.test49
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_transaction_replay.test260
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_unit_statements.test54
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_v1_row_events-master.opt1
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_v1_row_events.test27
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_ws_size.test70
-rw-r--r--mysql-test/suite/galera_sr/t/galera_sr_ws_size2.test62
-rw-r--r--mysql-test/suite/galera_sr/t/galera_var_ignore_apply_errors_sr.test38
-rw-r--r--mysql-test/suite/galera_sr/t/mdev_18631.cnf23
-rw-r--r--mysql-test/suite/galera_sr/t/mdev_18631.test24
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep#215.test176
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#136-master.opt1
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#136.test41
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#138.test25
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#14.test21
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#148.test62
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#15.test17
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.inc109
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.test41
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#22.test47
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#27.test29
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#32-master.opt1
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#32.test44
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#35.test48
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#8.test63
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#9.test44
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#93.test29
-rw-r--r--mysql-test/suite/galera_sr/t/mysql-wsrep-features#96.test45
-rw-r--r--mysql-test/suite/gcol/inc/gcol_select.inc2
-rw-r--r--mysql-test/suite/gcol/r/gcol_bug20746926.result8
-rw-r--r--mysql-test/suite/gcol/r/gcol_bugfixes.result2
-rw-r--r--mysql-test/suite/gcol/r/gcol_keys_innodb.result6
-rw-r--r--mysql-test/suite/gcol/r/gcol_keys_myisam.result8
-rw-r--r--mysql-test/suite/gcol/r/gcol_rollback.result1
-rw-r--r--mysql-test/suite/gcol/r/gcol_select_innodb.result32
-rw-r--r--mysql-test/suite/gcol/r/gcol_select_myisam.result58
-rw-r--r--mysql-test/suite/gcol/r/gcol_view_innodb.result1
-rw-r--r--mysql-test/suite/gcol/r/gcol_view_myisam.result1
-rw-r--r--mysql-test/suite/gcol/r/innodb_virtual_fk_restart.result2
-rw-r--r--mysql-test/suite/gcol/r/innodb_virtual_rebuild.result16
-rw-r--r--mysql-test/suite/gcol/t/innodb_virtual_rebuild.test5
-rw-r--r--mysql-test/suite/handler/aria.result6
-rw-r--r--mysql-test/suite/handler/heap.result7
-rw-r--r--mysql-test/suite/handler/innodb.result6
-rw-r--r--mysql-test/suite/handler/interface.result6
-rw-r--r--mysql-test/suite/handler/interface.test6
-rw-r--r--mysql-test/suite/handler/myisam.result6
-rw-r--r--mysql-test/suite/heap/heap.result2
-rw-r--r--mysql-test/suite/heap/heap_btree.result18
-rw-r--r--mysql-test/suite/heap/heap_btree.test1
-rw-r--r--mysql-test/suite/heap/heap_hash.result13
-rw-r--r--mysql-test/suite/heap/heap_hash.test4
-rw-r--r--mysql-test/suite/innodb/r/101_compatibility.result3
-rw-r--r--mysql-test/suite/innodb/r/alter_copy.result3
-rw-r--r--mysql-test/suite/innodb/r/alter_crash.result4
-rw-r--r--mysql-test/suite/innodb/r/alter_foreign_crash.result1
-rw-r--r--mysql-test/suite/innodb/r/alter_kill.result6
-rw-r--r--mysql-test/suite/innodb/r/alter_missing_tablespace.result1
-rw-r--r--mysql-test/suite/innodb/r/alter_rename_files.result3
-rw-r--r--mysql-test/suite/innodb/r/alter_table.result15
-rw-r--r--mysql-test/suite/innodb/r/alter_varchar_change.result34
-rw-r--r--mysql-test/suite/innodb/r/analyze_table.result1
-rw-r--r--mysql-test/suite/innodb/r/autoinc_debug.result1
-rw-r--r--mysql-test/suite/innodb/r/autoinc_persist.result8
-rw-r--r--mysql-test/suite/innodb/r/doublewrite.result19
-rw-r--r--mysql-test/suite/innodb/r/drop_table_background.result1
-rw-r--r--mysql-test/suite/innodb/r/foreign_key.result1
-rw-r--r--mysql-test/suite/innodb/r/full_crc32_import.result129
-rw-r--r--mysql-test/suite/innodb/r/ibuf_not_empty.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb-32k-crash.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-32k.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-64k-crash.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-64k.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-alter-table.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-alter-tempfile.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-alter.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-autoinc-44030.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-autoinc.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-blob.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-fk.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb-get-fk.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb-index-online.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb-index.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb-isolation.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb-lock-schedule-algorithm.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-mdev-7513.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_default.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_lz4.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_lzma.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_lzo.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_snappy.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_tables.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-page_compression_zip.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb-rollback.result10
-rw-r--r--mysql-test/suite/innodb/r/innodb-table-online.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522,crc32.rdiff5
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522,strict_crc32.rdiff5
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522-debug.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb-wl5522.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb_28867993.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug12902967.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug14147491.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug14676111.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug30423.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug30919.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug51920.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug53046.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug53756.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug57252.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug59641.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug60049.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug68148.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_defrag_stats.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_defragment.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_force_recovery.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_gis.result10
-rw-r--r--mysql-test/suite/innodb/r/innodb_max_recordsize_32k.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_max_recordsize_64k.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb_mysql.result27
-rw-r--r--mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result3
-rw-r--r--mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats.result10
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_drop_locked.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch.result7
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_persistent.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb_stats_persistent_debug.result1
-rw-r--r--mysql-test/suite/innodb/r/instant_alter.result1190
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_bugs.result77
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_charset,redundant.rdiff10
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_charset.result1812
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_crash.result81
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_debug.result66
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_extend,utf8.rdiff29
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_extend.resultbin0 -> 8684 bytes
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_import.result26
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_index_rename.result178
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff9
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff9
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff9
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff9
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_limit.result46
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_null.result56
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_purge,release.rdiff18
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_purge.result46
-rw-r--r--mysql-test/suite/innodb/r/instant_alter_rollback.result28
-rw-r--r--mysql-test/suite/innodb/r/instant_auto_inc.result21
-rw-r--r--mysql-test/suite/innodb/r/instant_drop.result205
-rw-r--r--mysql-test/suite/innodb/r/log_alter_table.result3
-rw-r--r--mysql-test/suite/innodb/r/log_corruption.result21
-rw-r--r--mysql-test/suite/innodb/r/log_file.result15
-rw-r--r--mysql-test/suite/innodb/r/log_file_name.result11
-rw-r--r--mysql-test/suite/innodb/r/log_file_name_debug.result4
-rw-r--r--mysql-test/suite/innodb/r/log_file_size.result21
-rw-r--r--mysql-test/suite/innodb/r/monitor.result14
-rw-r--r--mysql-test/suite/innodb/r/purge_secondary.result1
-rw-r--r--mysql-test/suite/innodb/r/purge_thread_shutdown.result1
-rw-r--r--mysql-test/suite/innodb/r/read_only_recover_committed.result4
-rw-r--r--mysql-test/suite/innodb/r/read_only_recovery.result3
-rw-r--r--mysql-test/suite/innodb/r/readahead.result1
-rw-r--r--mysql-test/suite/innodb/r/recovery_shutdown.result3
-rw-r--r--mysql-test/suite/innodb/r/rename_table.result2
-rw-r--r--mysql-test/suite/innodb/r/rename_table_debug.result1
-rw-r--r--mysql-test/suite/innodb/r/restart.result4
-rw-r--r--mysql-test/suite/innodb/r/row_format_redundant.result5
-rw-r--r--mysql-test/suite/innodb/r/system_tables.result3
-rw-r--r--mysql-test/suite/innodb/r/table_flags.result4
-rw-r--r--mysql-test/suite/innodb/r/temporary_table.result7
-rw-r--r--mysql-test/suite/innodb/r/truncate_crash.result1
-rw-r--r--mysql-test/suite/innodb/r/truncate_missing.result1
-rw-r--r--mysql-test/suite/innodb/r/undo_truncate_recover.result2
-rw-r--r--mysql-test/suite/innodb/r/update_time.result1
-rw-r--r--mysql-test/suite/innodb/r/update_time_wl6658.result1
-rw-r--r--mysql-test/suite/innodb/r/xa_recovery.result2
-rw-r--r--mysql-test/suite/innodb/t/alter_kill.test36
-rw-r--r--mysql-test/suite/innodb/t/alter_rename_files.test2
-rw-r--r--mysql-test/suite/innodb/t/alter_table.test12
-rw-r--r--mysql-test/suite/innodb/t/alter_varchar_change.test24
-rw-r--r--mysql-test/suite/innodb/t/corrupted_during_recovery.test1
-rw-r--r--mysql-test/suite/innodb/t/doublewrite.combinations5
-rw-r--r--mysql-test/suite/innodb/t/doublewrite.test42
-rw-r--r--mysql-test/suite/innodb/t/full_crc32_import.opt1
-rw-r--r--mysql-test/suite/innodb/t/full_crc32_import.test137
-rw-r--r--mysql-test/suite/innodb/t/innodb-alter.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb-index-online.test5
-rw-r--r--mysql-test/suite/innodb/t/innodb-index.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb-rollback.test17
-rw-r--r--mysql-test/suite/innodb/t/innodb-wl5522-debug.test15
-rw-r--r--mysql-test/suite/innodb/t/innodb-wl5522.test15
-rw-r--r--mysql-test/suite/innodb/t/innodb.test5
-rw-r--r--mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test1
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb_stats_fetch.test4
-rw-r--r--mysql-test/suite/innodb/t/instant_alter.test384
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_bugs.test82
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_charset.test538
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_crash.test78
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_debug.test71
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_extend.combinations5
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_extend.test249
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_import.test32
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_index_rename.test186
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_limit.test60
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_null.test57
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_purge.test75
-rw-r--r--mysql-test/suite/innodb/t/instant_alter_rollback.test24
-rw-r--r--mysql-test/suite/innodb/t/instant_auto_inc.test13
-rw-r--r--mysql-test/suite/innodb/t/instant_drop.test108
-rw-r--r--mysql-test/suite/innodb/t/log_data_file_size.test1
-rw-r--r--mysql-test/suite/innodb/t/purge_thread_shutdown.test4
-rw-r--r--mysql-test/suite/innodb/t/redo_log_during_checkpoint.test1
-rw-r--r--mysql-test/suite/innodb/t/restart.test2
-rw-r--r--mysql-test/suite/innodb/t/row_format_redundant.test15
-rw-r--r--mysql-test/suite/innodb/t/system_tables.test2
-rw-r--r--mysql-test/suite/innodb/t/table_flags.combinations5
-rw-r--r--mysql-test/suite/innodb/t/table_flags.opt1
-rw-r--r--mysql-test/suite/innodb/t/table_flags.test17
-rw-r--r--mysql-test/suite/innodb/t/undo_truncate.opt1
-rw-r--r--mysql-test/suite/innodb/t/undo_truncate.test6
-rw-r--r--mysql-test/suite/innodb/t/undo_truncate_recover.combinations5
-rw-r--r--mysql-test/suite/innodb/t/undo_truncate_recover.test16
-rw-r--r--mysql-test/suite/innodb_fts/r/crash_recovery.result3
-rw-r--r--mysql-test/suite/innodb_fts/r/fulltext_order_by.result4
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb-fts-fic.result2
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb-fts-stopword.result4
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result4
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result2
-rw-r--r--mysql-test/suite/innodb_fts/r/innodb_fts_stopword_charset.result2
-rw-r--r--mysql-test/suite/innodb_fts/r/sync.result2
-rw-r--r--mysql-test/suite/innodb_fts/t/fulltext_table_evict.test1
-rw-r--r--mysql-test/suite/innodb_gis/r/0.result10
-rw-r--r--mysql-test/suite/innodb_gis/r/1.result10
-rw-r--r--mysql-test/suite/innodb_gis/r/alter_spatial_index.result7
-rw-r--r--mysql-test/suite/innodb_gis/r/create_spatial_index.result3
-rw-r--r--mysql-test/suite/innodb_gis/r/gis.result10
-rw-r--r--mysql-test/suite/innodb_gis/r/innodb_gis_rtree.result1
-rw-r--r--mysql-test/suite/innodb_gis/r/precise.result2
-rw-r--r--mysql-test/suite/innodb_gis/r/rt_precise.result1
-rw-r--r--mysql-test/suite/innodb_gis/r/rtree.result2
-rw-r--r--mysql-test/suite/innodb_gis/r/rtree_drop_index.result1
-rw-r--r--mysql-test/suite/innodb_gis/r/rtree_estimate.result7
-rw-r--r--mysql-test/suite/innodb_gis/r/rtree_recovery.result3
-rw-r--r--mysql-test/suite/innodb_gis/r/types.result1
-rw-r--r--mysql-test/suite/innodb_gis/t/alter_spatial_index.test3
-rw-r--r--mysql-test/suite/innodb_gis/t/kill_server.test1
-rw-r--r--mysql-test/suite/innodb_gis/t/rtree_compress2.test1
-rw-r--r--mysql-test/suite/innodb_gis/t/rtree_estimate.test1
-rw-r--r--mysql-test/suite/innodb_gis/t/rtree_split.test1
-rw-r--r--mysql-test/suite/innodb_zip/r/16k,full_crc32.rdiff16
-rw-r--r--mysql-test/suite/innodb_zip/r/16k,strict_full_crc32.rdiff16
-rw-r--r--mysql-test/suite/innodb_zip/r/16k.result6
-rw-r--r--mysql-test/suite/innodb_zip/r/8k,full_crc32.rdiff16
-rw-r--r--mysql-test/suite/innodb_zip/r/8k,strict_full_crc32.rdiff16
-rw-r--r--mysql-test/suite/innodb_zip/r/bug56680.result1
-rw-r--r--mysql-test/suite/innodb_zip/r/cmp_per_index.result1
-rw-r--r--mysql-test/suite/innodb_zip/r/index_large_prefix.result2
-rw-r--r--mysql-test/suite/innodb_zip/r/innochecksum.result5
-rw-r--r--mysql-test/suite/innodb_zip/r/innochecksum_3.result4
-rw-r--r--mysql-test/suite/innodb_zip/r/innodb-zip.result4
-rw-r--r--mysql-test/suite/innodb_zip/r/recover.result1
-rw-r--r--mysql-test/suite/innodb_zip/r/restart,full_crc32.rdiff188
-rw-r--r--mysql-test/suite/innodb_zip/r/restart,strict_full_crc32.rdiff188
-rw-r--r--mysql-test/suite/innodb_zip/r/restart.result5
-rw-r--r--mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result6
-rw-r--r--mysql-test/suite/innodb_zip/r/wl5522_zip.result2
-rw-r--r--mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result6
-rw-r--r--mysql-test/suite/innodb_zip/t/16k.test4
-rw-r--r--mysql-test/suite/innodb_zip/t/8k.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum.combinations5
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum.test22
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum_2.test1
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum_3.combinations5
-rw-r--r--mysql-test/suite/innodb_zip/t/innochecksum_3.opt1
-rw-r--r--mysql-test/suite/innodb_zip/t/restart.test2
-rw-r--r--mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test14
-rw-r--r--mysql-test/suite/large_tests/r/rpl_slave_net_timeout.result4
-rw-r--r--mysql-test/suite/maria/disabled.def13
-rw-r--r--mysql-test/suite/maria/icp.result16
-rw-r--r--mysql-test/suite/maria/maria-autozerofill.result1
-rw-r--r--mysql-test/suite/maria/maria-gis-rtree-dynamic.result1
-rw-r--r--mysql-test/suite/maria/maria-gis-rtree-trans.result1
-rw-r--r--mysql-test/suite/maria/maria-gis-rtree.result1
-rw-r--r--mysql-test/suite/maria/maria-recover.result2
-rw-r--r--mysql-test/suite/maria/maria-recover.test12
-rw-r--r--mysql-test/suite/maria/maria-ucs2.result2
-rw-r--r--mysql-test/suite/maria/maria.result17
-rw-r--r--mysql-test/suite/maria/maria.test11
-rw-r--r--mysql-test/suite/maria/maria3.result4
-rw-r--r--mysql-test/suite/maria/ps_maria.result2
-rw-r--r--mysql-test/suite/maria/system_tables.result14
-rw-r--r--mysql-test/suite/maria/system_tables.test27
-rw-r--r--mysql-test/suite/mariabackup/absolute_ibdata_paths.result2
-rw-r--r--mysql-test/suite/mariabackup/apply-log-only-incr.result3
-rw-r--r--mysql-test/suite/mariabackup/backup_ssl.result2
-rw-r--r--mysql-test/suite/mariabackup/compress_qpress.result3
-rw-r--r--mysql-test/suite/mariabackup/create_during_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/create_with_data_directory_during_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/data_directory.result2
-rw-r--r--mysql-test/suite/mariabackup/drop_table_during_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/encrypted_page_compressed.result2
-rw-r--r--mysql-test/suite/mariabackup/encrypted_page_compressed.test1
-rw-r--r--mysql-test/suite/mariabackup/encrypted_page_corruption.opt1
-rw-r--r--mysql-test/suite/mariabackup/encrypted_page_corruption.result1
-rw-r--r--mysql-test/suite/mariabackup/full_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/huge_lsn.combinations5
-rw-r--r--mysql-test/suite/mariabackup/huge_lsn.result3
-rw-r--r--mysql-test/suite/mariabackup/huge_lsn.test10
-rw-r--r--mysql-test/suite/mariabackup/include/restart_and_restore.inc1
-rw-r--r--mysql-test/suite/mariabackup/incremental_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/incremental_ddl_before_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/incremental_ddl_during_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/incremental_encrypted.result2
-rw-r--r--mysql-test/suite/mariabackup/incremental_rocksdb.result2
-rw-r--r--mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result2
-rw-r--r--mysql-test/suite/mariabackup/log_checksum_mismatch.result2
-rw-r--r--mysql-test/suite/mariabackup/mdev-14447.result1
-rw-r--r--mysql-test/suite/mariabackup/mdev-14447.test1
-rw-r--r--mysql-test/suite/mariabackup/missing_ibd.result1
-rw-r--r--mysql-test/suite/mariabackup/mlog_index_load.result2
-rw-r--r--mysql-test/suite/mariabackup/partition_datadir.result2
-rw-r--r--mysql-test/suite/mariabackup/recreate_table_during_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/rename_during_backup.result2
-rw-r--r--mysql-test/suite/mariabackup/rename_during_mdl_lock.result2
-rw-r--r--mysql-test/suite/mariabackup/skip_innodb.result2
-rw-r--r--mysql-test/suite/mariabackup/system_versioning.result4
-rw-r--r--mysql-test/suite/mariabackup/unencrypted_page_compressed.result1
-rw-r--r--mysql-test/suite/mariabackup/xb_aws_key_management.result5
-rw-r--r--mysql-test/suite/mariabackup/xb_compressed_encrypted.result2
-rw-r--r--mysql-test/suite/mariabackup/xb_file_key_management.result2
-rw-r--r--mysql-test/suite/mariabackup/xb_fulltext_encrypted.result2
-rw-r--r--mysql-test/suite/mariabackup/xb_partition.result2
-rw-r--r--mysql-test/suite/mariabackup/xb_rocksdb.result3
-rw-r--r--mysql-test/suite/mariabackup/xb_rocksdb_datadir.result1
-rw-r--r--mysql-test/suite/mariabackup/xb_rocksdb_datadir_debug.result1
-rw-r--r--mysql-test/suite/mariabackup/xbstream.result2
-rw-r--r--mysql-test/suite/multi_source/gtid_ignore_duplicates.result8
-rw-r--r--mysql-test/suite/multi_source/gtid_ignore_duplicates.test8
-rw-r--r--mysql-test/suite/parts/r/optimizer.result4
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result56
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result16
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_innodb.result32
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_myisam.result16
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_2_innodb.result80
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_2_myisam.result32
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result40
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result40
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_maria.result48
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_myisam.result48
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result40
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result40
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_maria.result48
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_myisam.result48
-rw-r--r--mysql-test/suite/parts/r/partition_alter4_innodb.result216
-rw-r--r--mysql-test/suite/parts/r/partition_alter4_myisam.result216
-rw-r--r--mysql-test/suite/parts/r/partition_basic_innodb.result64
-rw-r--r--mysql-test/suite/parts/r/partition_basic_myisam.result32
-rw-r--r--mysql-test/suite/parts/r/partition_basic_symlink_myisam.result39
-rw-r--r--mysql-test/suite/parts/r/partition_engine_innodb.result11
-rw-r--r--mysql-test/suite/parts/r/partition_engine_myisam.result11
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_memory.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_memory.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc2_innodb.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc2_memory.result1
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc2_myisam.result1
-rw-r--r--mysql-test/suite/parts/r/partition_recover_myisam.result12
-rw-r--r--mysql-test/suite/parts/r/partition_repair_myisam.result3
-rw-r--r--mysql-test/suite/parts/r/rpl_partition.result12
-rw-r--r--mysql-test/suite/perfschema/r/bad_option_1.result2
-rw-r--r--mysql-test/suite/perfschema/r/bad_option_2.result2
-rw-r--r--mysql-test/suite/perfschema/r/bad_option_3.result2
-rw-r--r--mysql-test/suite/perfschema/r/bad_option_4.result2
-rw-r--r--mysql-test/suite/perfschema/r/bad_option_5.result2
-rw-r--r--mysql-test/suite/perfschema/r/dml_handler.result2
-rw-r--r--mysql-test/suite/perfschema/r/dml_setup_instruments.result4
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate.result412
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_a.result344
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result268
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result272
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result196
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_h.result336
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_u.result340
-rw-r--r--mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result264
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result4
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result4
-rw-r--r--mysql-test/suite/perfschema/r/ortho_iter.result2
-rw-r--r--mysql-test/suite/perfschema/r/privilege_table_io.result15
-rw-r--r--mysql-test/suite/perfschema/r/selects.result2
-rw-r--r--mysql-test/suite/perfschema/r/server_init.result4
-rw-r--r--mysql-test/suite/perfschema/r/setup_instruments_defaults.result2
-rw-r--r--mysql-test/suite/perfschema/r/stage_mdl_global.result2
-rw-r--r--mysql-test/suite/perfschema/r/table_schema.result8
-rw-r--r--mysql-test/suite/perfschema/t/bad_option_1.test5
-rw-r--r--mysql-test/suite/perfschema/t/bad_option_2.test5
-rw-r--r--mysql-test/suite/perfschema/t/bad_option_3.test5
-rw-r--r--mysql-test/suite/perfschema/t/bad_option_4.test5
-rw-r--r--mysql-test/suite/perfschema/t/bad_option_5.test5
-rw-r--r--mysql-test/suite/perfschema/t/privilege_table_io.test1
-rw-r--r--mysql-test/suite/perfschema/t/server_init.test3
-rw-r--r--mysql-test/suite/perfschema/t/stage_mdl_global.test4
-rw-r--r--mysql-test/suite/period/create_triggers.inc38
-rw-r--r--mysql-test/suite/period/engines.combinations6
-rw-r--r--mysql-test/suite/period/engines.inc3
-rw-r--r--mysql-test/suite/period/r/alter.result176
-rw-r--r--mysql-test/suite/period/r/create.result98
-rw-r--r--mysql-test/suite/period/r/delete,myisam.rdiff18
-rw-r--r--mysql-test/suite/period/r/delete.result358
-rw-r--r--mysql-test/suite/period/r/update.result276
-rw-r--r--mysql-test/suite/period/r/versioning.result94
-rw-r--r--mysql-test/suite/period/t/alter.test133
-rw-r--r--mysql-test/suite/period/t/create.test81
-rw-r--r--mysql-test/suite/period/t/delete.test186
-rw-r--r--mysql-test/suite/period/t/update.test165
-rw-r--r--mysql-test/suite/period/t/versioning.test64
-rw-r--r--mysql-test/suite/plugins/r/audit_null.result6
-rw-r--r--mysql-test/suite/plugins/r/audit_null_debug.result4
-rw-r--r--mysql-test/suite/plugins/r/auth_ed25519.result31
-rw-r--r--mysql-test/suite/plugins/r/cracklib_password_check.result2
-rw-r--r--mysql-test/suite/plugins/r/feedback_plugin_load.result2
-rw-r--r--mysql-test/suite/plugins/r/feedback_plugin_send.result3
-rw-r--r--mysql-test/suite/plugins/r/max_password_errors_auth_named_pipe.result12
-rw-r--r--mysql-test/suite/plugins/r/max_password_errors_auth_socket.result12
-rw-r--r--mysql-test/suite/plugins/r/multiauth.result193
-rw-r--r--mysql-test/suite/plugins/r/pam.result7
-rw-r--r--mysql-test/suite/plugins/r/pam_v1.result25
-rw-r--r--mysql-test/suite/plugins/r/server_audit.result22
-rw-r--r--mysql-test/suite/plugins/r/show_all_plugins.result4
-rw-r--r--mysql-test/suite/plugins/r/simple_password_check.result9
-rw-r--r--mysql-test/suite/plugins/r/thread_pool_server_audit.result22
-rw-r--r--mysql-test/suite/plugins/r/unix_socket.result2
-rw-r--r--mysql-test/suite/plugins/suite.pm2
-rw-r--r--mysql-test/suite/plugins/t/audit_null_debug.test4
-rw-r--r--mysql-test/suite/plugins/t/auth_ed25519.test13
-rw-r--r--mysql-test/suite/plugins/t/feedback_plugin_load.test4
-rw-r--r--mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.opt1
-rw-r--r--mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.test22
-rw-r--r--mysql-test/suite/plugins/t/max_password_errors_auth_socket.opt1
-rw-r--r--mysql-test/suite/plugins/t/max_password_errors_auth_socket.test23
-rw-r--r--mysql-test/suite/plugins/t/multiauth.test196
-rw-r--r--mysql-test/suite/plugins/t/pam.test14
-rw-r--r--mysql-test/suite/plugins/t/pam_cleartext.test2
-rw-r--r--mysql-test/suite/plugins/t/pam_init.inc4
-rw-r--r--mysql-test/suite/plugins/t/pam_v1.test34
-rw-r--r--mysql-test/suite/plugins/t/simple_password_check.test8
-rw-r--r--mysql-test/suite/plugins/t/unix_socket.test17
-rw-r--r--mysql-test/suite/roles/create_and_drop_role.result6
-rw-r--r--mysql-test/suite/roles/create_and_drop_role_invalid_user_table.result1
-rw-r--r--mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test7
-rw-r--r--mysql-test/suite/roles/default_create_user_not_role.result2
-rw-r--r--mysql-test/suite/roles/flush_roles-17898.result8
-rw-r--r--mysql-test/suite/roles/flush_roles-17898.test3
-rw-r--r--mysql-test/suite/roles/grant_revoke_current.result3
-rw-r--r--mysql-test/suite/roles/grant_revoke_current.test5
-rw-r--r--mysql-test/suite/roles/grant_role_auto_create_user.result2
-rw-r--r--mysql-test/suite/roles/i_s_applicable_roles_is_default.result1
-rw-r--r--mysql-test/suite/roles/i_s_applicable_roles_is_default.test2
-rw-r--r--mysql-test/suite/roles/none_public.result12
-rw-r--r--mysql-test/suite/roles/none_public.test6
-rw-r--r--mysql-test/suite/roles/prepare_stmt_with_role.result6
-rw-r--r--mysql-test/suite/roles/rename_user.result2
-rw-r--r--mysql-test/suite/roles/role_case_sensitive-10744.result2
-rw-r--r--mysql-test/suite/roles/set_default_role_clear.result8
-rw-r--r--mysql-test/suite/roles/set_default_role_for.result13
-rw-r--r--mysql-test/suite/roles/set_default_role_for.test3
-rw-r--r--mysql-test/suite/roles/set_default_role_invalid.result6
-rw-r--r--mysql-test/suite/roles/set_default_role_new_connection.result10
-rw-r--r--mysql-test/suite/roles/set_default_role_ps-6960.result3
-rw-r--r--mysql-test/suite/roles/set_default_role_ps-6960.test7
-rw-r--r--mysql-test/suite/roles/set_role-database-recursive.result2
-rw-r--r--mysql-test/suite/roles/set_role-database-simple.result2
-rw-r--r--mysql-test/suite/roles/set_role-recursive.result2
-rw-r--r--mysql-test/suite/roles/set_role-routine-simple.result2
-rw-r--r--mysql-test/suite/roles/set_role-simple.result2
-rw-r--r--mysql-test/suite/roles/set_role-table-column-priv.result2
-rw-r--r--mysql-test/suite/roles/set_role-table-simple.result2
-rw-r--r--mysql-test/suite/roles/show_grants.result2
-rw-r--r--mysql-test/suite/roles/show_grants_replicated.result2
-rw-r--r--mysql-test/suite/rpl/disabled.def1
-rw-r--r--mysql-test/suite/rpl/include/rpl_EE_err.test2
-rw-r--r--mysql-test/suite/rpl/include/rpl_mixed_dml.inc1
-rw-r--r--mysql-test/suite/rpl/include/rpl_row_001.test96
-rw-r--r--mysql-test/suite/rpl/include/rpl_row_annotate.test2
-rw-r--r--mysql-test/suite/rpl/include/rpl_row_delayed_ins.test2
-rw-r--r--mysql-test/suite/rpl/include/rpl_shutdown_wait_slaves.inc91
-rw-r--r--mysql-test/suite/rpl/r/password_expiration.result28
-rw-r--r--mysql-test/suite/rpl/r/rpl_EE_err.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_create_drop_user.result44
-rw-r--r--mysql-test/suite/rpl/r/rpl_create_if_not_exists.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_ddl.result12
-rw-r--r--mysql-test/suite/rpl/r/rpl_do_grant.result23
-rw-r--r--mysql-test/suite/rpl/r/rpl_drop_db.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_grant.result8
-rw-r--r--mysql-test/suite/rpl/r/rpl_gtid_ignored.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_gtid_mdev4484.result42
-rw-r--r--mysql-test/suite/rpl/r/rpl_gtid_stop_start.result10
-rw-r--r--mysql-test/suite/rpl/r/rpl_ignore_revoke.result10
-rw-r--r--mysql-test/suite/rpl/r/rpl_ignore_table.result9
-rw-r--r--mysql-test/suite/rpl/r/rpl_innodb_bug28430.result12
-rw-r--r--mysql-test/suite/rpl/r/rpl_innodb_bug30888.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result40
-rw-r--r--mysql-test/suite/rpl/r/rpl_mdev10863.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_mdev12179.result14
-rw-r--r--mysql-test/suite/rpl/r/rpl_misc_functions.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_mysql57_stm_temporal_round.result22
-rw-r--r--mysql-test/suite/rpl/r/rpl_mysql80_stm_temporal_round.result23
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_parallel_optimistic.result16
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_001.result42
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_annotate_do.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_annotate_dont.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff24
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_big_table_id.result12
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_delayed_ins.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_shutdown_wait_semisync_slaves.result52
-rw-r--r--mysql-test/suite/rpl/r/rpl_shutdown_wait_slaves.result29
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_000001.result38
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result1
-rw-r--r--mysql-test/suite/rpl/r/rpl_switch_stm_row_mixed.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_temporal_round.result50
-rw-r--r--mysql-test/suite/rpl/r/rpl_tmp_table_and_DDL.result22
-rw-r--r--mysql-test/suite/rpl/t/password_expiration.test53
-rw-r--r--mysql-test/suite/rpl/t/rpl_create_drop_user.test18
-rw-r--r--mysql-test/suite/rpl/t/rpl_do_grant.test29
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_ignored.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_mdev4484.test70
-rw-r--r--mysql-test/suite/rpl/t/rpl_gtid_stop_start.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_ignore_table.test4
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev10863.test2
-rw-r--r--mysql-test/suite/rpl/t/rpl_mdev12179.test4
-rw-r--r--mysql-test/suite/rpl/t/rpl_mysql57_stm_temporal_round.test58
-rw-r--r--mysql-test/suite/rpl/t/rpl_mysql80_stm_temporal_round.test62
-rw-r--r--mysql-test/suite/rpl/t/rpl_parallel_optimistic.test42
-rw-r--r--mysql-test/suite/rpl/t/rpl_row_001.test48
-rw-r--r--mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.cnf16
-rw-r--r--mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.test46
-rw-r--r--mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.cnf16
-rw-r--r--mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.test11
-rw-r--r--mysql-test/suite/rpl/t/rpl_stm_000001.test24
-rw-r--r--mysql-test/suite/rpl/t/rpl_temporal_round.test35
-rw-r--r--mysql-test/suite/sql_sequence/read_only.result2
-rw-r--r--mysql-test/suite/sys_vars/r/histogram_size_basic.result12
-rw-r--r--mysql-test/suite/sys_vars/r/histogram_type_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_abort_loads.result3
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result1
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result14
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result2
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_flush_method_func.result2
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result2
-rw-r--r--mysql-test/suite/sys_vars/r/max_seeks_for_key_func.result1
-rw-r--r--mysql-test/suite/sys_vars/r/myisam_stats_method_func.result7
-rw-r--r--mysql-test/suite/sys_vars/r/optimizer_switch_basic.result36
-rw-r--r--mysql-test/suite/sys_vars/r/optimizer_use_condition_selectivity_basic.result20
-rw-r--r--mysql-test/suite/sys_vars/r/secure_file_priv.result2
-rw-r--r--mysql-test/suite/sys_vars/r/shared_memory_base_name_basic.result21
-rw-r--r--mysql-test/suite/sys_vars/r/shared_memory_basic.result21
-rw-r--r--mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result10
-rw-r--r--mysql-test/suite/sys_vars/r/sql_mode_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_innodb.result4
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff328
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_embedded.result160
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff348
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result178
-rw-r--r--mysql-test/suite/sys_vars/r/sysvars_wsrep.result74
-rw-r--r--mysql-test/suite/sys_vars/r/tcp_nodelay.result5
-rw-r--r--mysql-test/suite/sys_vars/r/use_stat_tables_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_debug_basic.result22
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_load_data_splitting_basic.result16
-rw-r--r--mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result15
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test6
-rw-r--r--mysql-test/suite/sys_vars/t/myisam_stats_method_func.test4
-rw-r--r--mysql-test/suite/sys_vars/t/shared_memory_base_name_basic.test25
-rw-r--r--mysql-test/suite/sys_vars/t/shared_memory_basic.test20
-rw-r--r--mysql-test/suite/sys_vars/t/sql_mode_basic.test8
-rw-r--r--mysql-test/suite/sys_vars/t/tcp_nodelay.test7
-rw-r--r--mysql-test/suite/sys_vars/t/wsrep_debug_basic.test10
-rw-r--r--mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test16
-rw-r--r--mysql-test/suite/unit/suite.pm2
-rw-r--r--mysql-test/suite/vcol/inc/vcol_select.inc2
-rw-r--r--mysql-test/suite/vcol/r/update.result4
-rw-r--r--mysql-test/suite/vcol/r/update_binlog.result8
-rw-r--r--mysql-test/suite/vcol/r/vcol_keys_myisam.result3
-rw-r--r--mysql-test/suite/vcol/r/vcol_misc.result28
-rw-r--r--mysql-test/suite/vcol/r/vcol_select_innodb.result16
-rw-r--r--mysql-test/suite/vcol/r/vcol_select_myisam.result32
-rw-r--r--mysql-test/suite/vcol/r/wrong_arena.result28
-rw-r--r--mysql-test/suite/vcol/t/update.test4
-rw-r--r--mysql-test/suite/vcol/t/vcol_keys_myisam.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_misc.test27
-rw-r--r--mysql-test/suite/versioning/r/alter.result4
-rw-r--r--mysql-test/suite/versioning/r/commit_id.result14
-rw-r--r--mysql-test/suite/versioning/r/create.result12
-rw-r--r--mysql-test/suite/versioning/r/cte.result2
-rw-r--r--mysql-test/suite/versioning/r/foreign.result8
-rw-r--r--mysql-test/suite/versioning/r/insert.result2
-rw-r--r--mysql-test/suite/versioning/r/load_data.result2
-rw-r--r--mysql-test/suite/versioning/r/online.result41
-rw-r--r--mysql-test/suite/versioning/r/partition.result24
-rw-r--r--mysql-test/suite/versioning/r/partition_innodb.result2
-rw-r--r--mysql-test/suite/versioning/r/select,trx_id.rdiff11
-rw-r--r--mysql-test/suite/versioning/r/select.result42
-rw-r--r--mysql-test/suite/versioning/r/select2,trx_id.rdiff16
-rw-r--r--mysql-test/suite/versioning/r/select2.result6
-rw-r--r--mysql-test/suite/versioning/r/sysvars.result15
-rw-r--r--mysql-test/suite/versioning/r/trx_id.result16
-rw-r--r--mysql-test/suite/versioning/t/alter.test2
-rw-r--r--mysql-test/suite/versioning/t/foreign.test6
-rw-r--r--mysql-test/suite/versioning/t/online.test58
-rw-r--r--mysql-test/suite/versioning/t/partition_innodb.test1
-rw-r--r--mysql-test/suite/versioning/t/select.test26
-rw-r--r--mysql-test/suite/versioning/t/sysvars.test10
-rw-r--r--mysql-test/suite/wsrep/disabled.def3
-rw-r--r--mysql-test/suite/wsrep/my.cnf4
-rw-r--r--mysql-test/suite/wsrep/r/mdev_7798.result1
-rw-r--r--mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff32
-rw-r--r--mysql-test/suite/wsrep/r/wsrep-recover-v25.result27
-rw-r--r--mysql-test/suite/wsrep/r/wsrep-recover.result64
-rw-r--r--mysql-test/suite/wsrep/suite.pm4
-rw-r--r--mysql-test/suite/wsrep/t/binlog_format.cnf8
-rw-r--r--mysql-test/suite/wsrep/t/binlog_format.opt1
-rw-r--r--mysql-test/suite/wsrep/t/binlog_format.test1
-rw-r--r--mysql-test/suite/wsrep/t/mdev_10186.cnf6
-rw-r--r--mysql-test/suite/wsrep/t/mdev_10186.opt1
-rw-r--r--mysql-test/suite/wsrep/t/mdev_10186.test1
-rw-r--r--mysql-test/suite/wsrep/t/mdev_6832.cnf7
-rw-r--r--mysql-test/suite/wsrep/t/mdev_6832.opt1
-rw-r--r--mysql-test/suite/wsrep/t/mdev_6832.test1
-rw-r--r--mysql-test/suite/wsrep/t/mdev_7798.cnf7
-rw-r--r--mysql-test/suite/wsrep/t/mdev_7798.opt1
-rw-r--r--mysql-test/suite/wsrep/t/mdev_7798.test1
-rw-r--r--mysql-test/suite/wsrep/t/pool_of_threads.cnf8
-rw-r--r--mysql-test/suite/wsrep/t/pool_of_threads.opt1
-rw-r--r--mysql-test/suite/wsrep/t/wsrep-recover-v25.test64
-rw-r--r--mysql-test/suite/wsrep/t/wsrep-recover.cnf9
-rw-r--r--mysql-test/suite/wsrep/t/wsrep-recover.combinations4
-rw-r--r--mysql-test/suite/wsrep/t/wsrep-recover.test194
-rw-r--r--mysql-test/unstable-tests477
-rw-r--r--mysql-test/valgrind.supp20
-rw-r--r--mysys/errors.c4
-rw-r--r--mysys/mf_cache.c2
-rw-r--r--mysys/mf_iocache.c19
-rw-r--r--mysys/mf_keycache.c2
-rw-r--r--mysys/mf_tempfile.c35
-rw-r--r--mysys/my_alloc.c15
-rw-r--r--mysys/my_chsize.c2
-rw-r--r--mysys/my_copy.c4
-rw-r--r--mysys/my_delete.c6
-rw-r--r--mysys/my_fopen.c7
-rw-r--r--mysys/my_fstream.c8
-rw-r--r--mysys/my_getwd.c4
-rw-r--r--mysys/my_lib.c6
-rw-r--r--mysys/my_lock.c4
-rw-r--r--mysys/my_lockmem.c2
-rw-r--r--mysys/my_malloc.c5
-rw-r--r--mysys/my_mkdir.c2
-rw-r--r--mysys/my_once.c2
-rw-r--r--mysys/my_open.c4
-rw-r--r--mysys/my_pread.c6
-rw-r--r--mysys/my_read.c4
-rw-r--r--mysys/my_redel.c6
-rw-r--r--mysys/my_rename.c2
-rw-r--r--mysys/my_setuser.c8
-rw-r--r--mysys/my_static.c2
-rw-r--r--mysys/my_sync.c2
-rw-r--r--mysys/my_write.c2
-rw-r--r--mysys/thr_alarm.c6
-rw-r--r--pcre/pcre_dfa_exec.c1
-rw-r--r--pcre/pcre_exec.c2
-rw-r--r--pcre/pcregrep.c2
-rw-r--r--plugin/auth_ed25519/CMakeLists.txt4
-rw-r--r--plugin/auth_ed25519/server_ed25519.c54
-rw-r--r--plugin/auth_examples/dialog_examples.c6
-rw-r--r--plugin/auth_examples/qa_auth_interface.c3
-rw-r--r--plugin/auth_examples/qa_auth_server.c3
-rw-r--r--plugin/auth_examples/test_plugin.c6
-rw-r--r--plugin/auth_gssapi/gssapi_server.cc20
-rw-r--r--plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.result34
-rw-r--r--plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.test36
-rw-r--r--plugin/auth_gssapi/server_plugin.cc36
-rw-r--r--plugin/auth_gssapi/server_plugin.h2
-rw-r--r--plugin/auth_gssapi/sspi_server.cc17
-rw-r--r--plugin/auth_pam/CMakeLists.txt12
-rw-r--r--plugin/auth_pam/auth_pam.c295
-rw-r--r--plugin/auth_pam/auth_pam_base.c174
-rw-r--r--plugin/auth_pam/auth_pam_common.c51
-rw-r--r--plugin/auth_pam/auth_pam_tool.c115
-rw-r--r--plugin/auth_pam/auth_pam_tool.h81
-rw-r--r--plugin/auth_pam/auth_pam_v1.c86
-rw-r--r--plugin/auth_pam/testing/pam_mariadb_mtr.c10
-rw-r--r--plugin/auth_socket/CMakeLists.txt2
-rw-r--r--plugin/auth_socket/auth_socket.c3
-rw-r--r--plugin/aws_key_management/CMakeLists.txt178
-rw-r--r--plugin/aws_key_management/aws_key_management_plugin.cc28
-rw-r--r--plugin/cracklib_password_check/cracklib_password_check.c3
-rw-r--r--plugin/feedback/sender_thread.cc14
-rw-r--r--plugin/handler_socket/handlersocket/database.cpp6
-rw-r--r--plugin/metadata_lock_info/metadata_lock_info.cc29
-rw-r--r--plugin/metadata_lock_info/mysql-test/metadata_lock_info/r/global_read_lock.result3
-rw-r--r--plugin/query_response_time/query_response_time.cc39
-rw-r--r--plugin/server_audit/server_audit.c10
-rw-r--r--plugin/simple_password_check/simple_password_check.c4
-rw-r--r--plugin/wsrep_info/mysql-test/wsrep_info/my.cnf7
-rw-r--r--plugin/wsrep_info/mysql-test/wsrep_info/r/plugin.result10
-rw-r--r--plugin/wsrep_info/mysql-test/wsrep_info/suite.pm4
-rw-r--r--plugin/wsrep_info/plugin.cc87
-rw-r--r--scripts/CMakeLists.txt4
-rw-r--r--scripts/fill_help_tables.sql2
-rw-r--r--scripts/mysql_install_db.sh71
-rw-r--r--scripts/mysql_secure_installation.sh65
-rw-r--r--scripts/mysql_system_tables.sql122
-rw-r--r--scripts/mysql_system_tables_data.sql29
-rw-r--r--scripts/mysql_system_tables_fix.sql212
-rw-r--r--scripts/mysql_test_db.sql4
-rw-r--r--scripts/mysqld_safe.sh11
-rw-r--r--scripts/wsrep_sst_mysqldump.sh5
-rw-r--r--scripts/wsrep_sst_rsync.sh33
-rw-r--r--sql-common/client.c315
-rw-r--r--sql-common/my_time.c772
-rw-r--r--sql/CMakeLists.txt48
-rw-r--r--sql/MSG00001.binbin184 -> 36 bytes
-rw-r--r--sql/backup.cc385
-rw-r--r--sql/backup.h34
-rw-r--r--sql/compat56.cc21
-rw-r--r--sql/compat56.h9
-rw-r--r--sql/datadict.cc29
-rw-r--r--sql/datadict.h3
-rw-r--r--sql/derived_handler.cc127
-rw-r--r--sql/derived_handler.h85
-rw-r--r--sql/event_data_objects.cc40
-rw-r--r--sql/event_parse_data.cc18
-rw-r--r--sql/event_scheduler.cc24
-rw-r--r--sql/events.cc47
-rw-r--r--sql/field.cc1868
-rw-r--r--sql/field.h659
-rw-r--r--sql/field_conv.cc37
-rw-r--r--sql/filesort.cc155
-rw-r--r--sql/filesort.h3
-rw-r--r--sql/ha_partition.cc96
-rw-r--r--sql/ha_partition.h8
-rw-r--r--sql/handle_connections_win.cc560
-rw-r--r--sql/handle_connections_win.h20
-rw-r--r--sql/handler.cc973
-rw-r--r--sql/handler.h292
-rw-r--r--sql/init.cc4
-rw-r--r--sql/init.h1
-rw-r--r--sql/innodb_priv.h4
-rw-r--r--sql/item.cc1466
-rw-r--r--sql/item.h1667
-rw-r--r--sql/item_buff.cc25
-rw-r--r--sql/item_cmpfunc.cc864
-rw-r--r--sql/item_cmpfunc.h152
-rw-r--r--sql/item_create.cc211
-rw-r--r--sql/item_create.h15
-rw-r--r--sql/item_func.cc487
-rw-r--r--sql/item_func.h468
-rw-r--r--sql/item_geofunc.cc2
-rw-r--r--sql/item_geofunc.h2
-rw-r--r--sql/item_inetfunc.cc627
-rw-r--r--sql/item_inetfunc.h60
-rw-r--r--sql/item_jsonfunc.cc10
-rw-r--r--sql/item_row.cc4
-rw-r--r--sql/item_row.h30
-rw-r--r--sql/item_strfunc.cc147
-rw-r--r--sql/item_strfunc.h68
-rw-r--r--sql/item_subselect.cc102
-rw-r--r--sql/item_subselect.h30
-rw-r--r--sql/item_sum.cc244
-rw-r--r--sql/item_sum.h131
-rw-r--r--sql/item_timefunc.cc945
-rw-r--r--sql/item_timefunc.h706
-rw-r--r--sql/item_vers.cc9
-rw-r--r--sql/item_vers.h2
-rw-r--r--sql/item_windowfunc.cc21
-rw-r--r--sql/item_windowfunc.h24
-rw-r--r--sql/item_xmlfunc.cc129
-rw-r--r--sql/lex.h5
-rw-r--r--sql/lock.cc191
-rw-r--r--sql/log.cc393
-rw-r--r--sql/log.h31
-rw-r--r--sql/log_event.cc155
-rw-r--r--sql/log_event.h53
-rw-r--r--sql/mdl.cc626
-rw-r--r--sql/mdl.h123
-rw-r--r--sql/mem_root_array.h3
-rw-r--r--sql/message.h29
-rw-r--r--sql/message.mc3
-rw-r--r--sql/message.rc2
-rw-r--r--sql/multi_range_read.cc150
-rw-r--r--sql/my_decimal.cc52
-rw-r--r--sql/my_decimal.h137
-rw-r--r--sql/my_json_writer.cc85
-rw-r--r--sql/my_json_writer.h427
-rw-r--r--sql/mysql_install_db.cc40
-rw-r--r--sql/mysql_upgrade_service.cc2
-rw-r--r--sql/mysqld.cc2190
-rw-r--r--sql/mysqld.h48
-rw-r--r--sql/opt_range.cc1420
-rw-r--r--sql/opt_range.h8
-rw-r--r--sql/opt_subselect.cc1146
-rw-r--r--sql/opt_subselect.h8
-rw-r--r--sql/opt_sum.cc2
-rw-r--r--sql/opt_table_elimination.cc37
-rw-r--r--sql/opt_trace.cc698
-rw-r--r--sql/opt_trace.h208
-rw-r--r--sql/opt_trace_context.h87
-rw-r--r--sql/partition_info.h5
-rw-r--r--sql/procedure.h18
-rw-r--r--sql/protocol.cc331
-rw-r--r--sql/protocol.h39
-rw-r--r--sql/repl_failsafe.cc158
-rw-r--r--sql/repl_failsafe.h7
-rw-r--r--sql/rowid_filter.cc626
-rw-r--r--sql/rowid_filter.h468
-rw-r--r--sql/rpl_gtid.cc413
-rw-r--r--sql/rpl_gtid.h12
-rw-r--r--sql/rpl_mi.cc10
-rw-r--r--sql/rpl_mi.h6
-rw-r--r--sql/rpl_parallel.cc4
-rw-r--r--sql/rpl_record.cc11
-rw-r--r--sql/rpl_rli.cc87
-rw-r--r--sql/rpl_rli.h11
-rw-r--r--sql/select_handler.cc188
-rw-r--r--sql/select_handler.h72
-rw-r--r--sql/semisync_master_ack_receiver.cc3
-rw-r--r--sql/service_wsrep.cc272
-rw-r--r--sql/session_tracker.cc4
-rw-r--r--sql/set_var.cc2
-rw-r--r--sql/set_var.h9
-rw-r--r--sql/share/errmsg-utf8.txt45
-rw-r--r--sql/slave.cc220
-rw-r--r--sql/slave.h2
-rw-r--r--sql/sp.cc9
-rw-r--r--sql/sp.h3
-rw-r--r--sql/sp_head.cc163
-rw-r--r--sql/sp_head.h25
-rw-r--r--sql/sp_pcontext.h1
-rw-r--r--sql/sp_rcontext.cc14
-rw-r--r--sql/sql_acl.cc3975
-rw-r--r--sql/sql_admin.cc35
-rw-r--r--sql/sql_alloc.h3
-rw-r--r--sql/sql_alter.cc11
-rw-r--r--sql/sql_analyse.cc103
-rw-r--r--sql/sql_analyze_stmt.h79
-rw-r--r--sql/sql_array.h13
-rw-r--r--sql/sql_base.cc751
-rw-r--r--sql/sql_base.h36
-rw-r--r--sql/sql_basic_types.h310
-rw-r--r--sql/sql_binlog.cc6
-rw-r--r--sql/sql_cache.cc15
-rw-r--r--sql/sql_class.cc573
-rw-r--r--sql/sql_class.h593
-rw-r--r--sql/sql_cmd.h1
-rw-r--r--sql/sql_connect.cc32
-rw-r--r--sql/sql_const.h6
-rw-r--r--sql/sql_cte.cc28
-rw-r--r--sql/sql_cte.h26
-rw-r--r--sql/sql_db.cc2
-rw-r--r--sql/sql_delete.cc168
-rw-r--r--sql/sql_derived.cc432
-rw-r--r--sql/sql_derived.h1
-rw-r--r--sql/sql_do.cc2
-rw-r--r--sql/sql_error.cc2
-rw-r--r--sql/sql_error.h59
-rw-r--r--sql/sql_explain.cc171
-rw-r--r--sql/sql_explain.h46
-rw-r--r--sql/sql_handler.cc14
-rw-r--r--sql/sql_handler.h1
-rw-r--r--sql/sql_help.cc11
-rw-r--r--sql/sql_insert.cc273
-rw-r--r--sql/sql_lex.cc2596
-rw-r--r--sql/sql_lex.h698
-rw-r--r--sql/sql_list.h8
-rw-r--r--sql/sql_load.cc124
-rw-r--r--sql/sql_parse.cc1134
-rw-r--r--sql/sql_parse.h3
-rw-r--r--sql/sql_partition.cc24
-rw-r--r--sql/sql_partition_admin.cc4
-rw-r--r--sql/sql_plugin.cc106
-rw-r--r--sql/sql_plugin_services.ic62
-rw-r--r--sql/sql_prepare.cc188
-rw-r--r--sql/sql_priv.h27
-rw-r--r--sql/sql_profile.cc4
-rw-r--r--sql/sql_reload.cc59
-rw-r--r--sql/sql_repl.cc348
-rw-r--r--sql/sql_repl.h11
-rw-r--r--sql/sql_select.cc2254
-rw-r--r--sql/sql_select.h99
-rw-r--r--sql/sql_sequence.cc4
-rw-r--r--sql/sql_show.cc659
-rw-r--r--sql/sql_signal.cc2
-rw-r--r--sql/sql_sort.h1
-rw-r--r--sql/sql_statistics.cc205
-rw-r--r--sql/sql_statistics.h27
-rw-r--r--sql/sql_string.cc179
-rw-r--r--sql/sql_string.h956
-rw-r--r--sql/sql_table.cc1010
-rw-r--r--sql/sql_table.h4
-rw-r--r--sql/sql_test.cc27
-rw-r--r--sql/sql_test.h2
-rw-r--r--sql/sql_time.cc368
-rw-r--r--sql/sql_time.h119
-rw-r--r--sql/sql_trigger.cc28
-rw-r--r--sql/sql_truncate.cc4
-rw-r--r--sql/sql_tvc.cc11
-rw-r--r--sql/sql_type.cc2686
-rw-r--r--sql/sql_type.h3065
-rw-r--r--sql/sql_type_int.h50
-rw-r--r--sql/sql_type_json.cc55
-rw-r--r--sql/sql_type_json.h38
-rw-r--r--sql/sql_udf.cc3
-rw-r--r--sql/sql_udf.h15
-rw-r--r--sql/sql_union.cc20
-rw-r--r--sql/sql_update.cc206
-rw-r--r--sql/sql_view.cc65
-rw-r--r--sql/sql_yacc.yy2907
-rw-r--r--sql/sql_yacc_ora.yy2840
-rw-r--r--sql/structs.h93
-rw-r--r--sql/sys_vars.cc262
-rw-r--r--sql/sys_vars.ic9
-rw-r--r--sql/table.cc1476
-rw-r--r--sql/table.h207
-rw-r--r--sql/table_cache.cc34
-rw-r--r--sql/table_cache.h2
-rw-r--r--sql/temporary_tables.cc31
-rw-r--r--sql/threadpool.h7
-rw-r--r--sql/threadpool_common.cc2
-rw-r--r--sql/threadpool_generic.cc52
-rw-r--r--sql/threadpool_win.cc94
-rw-r--r--sql/transaction.cc391
-rw-r--r--sql/transaction.h8
-rw-r--r--sql/udf_example.c139
-rw-r--r--sql/udf_example.def7
-rw-r--r--sql/unireg.cc273
-rw-r--r--sql/unireg.h14
-rw-r--r--sql/vers_string.h6
-rw-r--r--sql/vers_utils.h39
-rw-r--r--sql/wsrep_applier.cc320
-rw-r--r--sql/wsrep_applier.h67
-rw-r--r--sql/wsrep_binlog.cc405
-rw-r--r--sql/wsrep_binlog.h63
-rw-r--r--sql/wsrep_check_opts.cc4
-rw-r--r--sql/wsrep_client_service.cc337
-rw-r--r--sql/wsrep_client_service.h63
-rw-r--r--sql/wsrep_client_state.h47
-rw-r--r--sql/wsrep_condition_variable.h54
-rw-r--r--sql/wsrep_dummy.cc119
-rw-r--r--sql/wsrep_high_priority_service.cc644
-rw-r--r--sql/wsrep_high_priority_service.h119
-rw-r--r--sql/wsrep_hton.cc659
-rw-r--r--sql/wsrep_mutex.h50
-rw-r--r--sql/wsrep_mysqld.cc2646
-rw-r--r--sql/wsrep_mysqld.h377
-rw-r--r--sql/wsrep_notify.cc77
-rw-r--r--sql/wsrep_plugin.cc53
-rw-r--r--sql/wsrep_priv.h24
-rw-r--r--sql/wsrep_schema.cc1366
-rw-r--r--sql/wsrep_schema.h144
-rw-r--r--sql/wsrep_server_service.cc334
-rw-r--r--sql/wsrep_server_service.h81
-rw-r--r--sql/wsrep_server_state.cc85
-rw-r--r--sql/wsrep_server_state.h68
-rw-r--r--sql/wsrep_sst.cc614
-rw-r--r--sql/wsrep_sst.h30
-rw-r--r--sql/wsrep_storage_service.cc213
-rw-r--r--sql/wsrep_storage_service.h48
-rw-r--r--sql/wsrep_thd.cc830
-rw-r--r--sql/wsrep_thd.h216
-rw-r--r--sql/wsrep_trans_observer.h513
-rw-r--r--sql/wsrep_types.h29
-rw-r--r--sql/wsrep_utils.cc104
-rw-r--r--sql/wsrep_utils.h76
-rw-r--r--sql/wsrep_var.cc339
-rw-r--r--sql/wsrep_var.h13
-rw-r--r--sql/wsrep_xid.cc90
-rw-r--r--sql/wsrep_xid.h12
-rw-r--r--sql/xa.cc867
-rw-r--r--sql/xa.h44
-rw-r--r--storage/archive/azio.c5
-rw-r--r--storage/archive/ha_archive.cc22
-rw-r--r--storage/archive/ha_archive.h3
-rw-r--r--storage/blackhole/ha_blackhole.h2
-rw-r--r--storage/connect/array.cpp2
-rw-r--r--storage/connect/ha_connect.cc99
-rw-r--r--storage/connect/ha_connect.h4
-rw-r--r--storage/connect/mysql-test/connect/r/mysql_index.result156
-rw-r--r--storage/connect/mysql-test/connect/r/part_file.result9
-rw-r--r--storage/connect/mysql-test/connect/t/mysql_index.test4
-rw-r--r--storage/connect/mysql-test/connect/t/part_file.test4
-rw-r--r--storage/connect/tabext.cpp6
-rw-r--r--storage/connect/tabmysql.cpp5
-rw-r--r--storage/connect/tabxcl.cpp3
-rw-r--r--storage/connect/value.cpp3
-rw-r--r--storage/csv/ha_tina.cc32
-rw-r--r--storage/csv/ha_tina.h2
-rw-r--r--storage/federated/ha_federated.h1
-rw-r--r--storage/federatedx/TODO30
-rw-r--r--storage/federatedx/federatedx_pushdown.cc293
-rw-r--r--storage/federatedx/federatedx_pushdown.h63
-rw-r--r--storage/federatedx/ha_federatedx.cc21
-rw-r--r--storage/federatedx/ha_federatedx.h11
-rw-r--r--storage/heap/ha_heap.h4
-rw-r--r--storage/innobase/CMakeLists.txt2
-rw-r--r--storage/innobase/btr/btr0btr.cc506
-rw-r--r--storage/innobase/btr/btr0bulk.cc17
-rw-r--r--storage/innobase/btr/btr0cur.cc679
-rw-r--r--storage/innobase/btr/btr0defragment.cc33
-rw-r--r--storage/innobase/btr/btr0pcur.cc38
-rw-r--r--storage/innobase/btr/btr0scrub.cc22
-rw-r--r--storage/innobase/btr/btr0sea.cc8
-rw-r--r--storage/innobase/buf/buf0buf.cc699
-rw-r--r--storage/innobase/buf/buf0checksum.cc36
-rw-r--r--storage/innobase/buf/buf0dblwr.cc197
-rw-r--r--storage/innobase/buf/buf0dump.cc12
-rw-r--r--storage/innobase/buf/buf0flu.cc257
-rw-r--r--storage/innobase/buf/buf0lru.cc70
-rw-r--r--storage/innobase/buf/buf0rea.cc125
-rw-r--r--storage/innobase/data/data0data.cc72
-rw-r--r--storage/innobase/data/data0type.cc66
-rw-r--r--storage/innobase/dict/dict0boot.cc54
-rw-r--r--storage/innobase/dict/dict0crea.cc68
-rw-r--r--storage/innobase/dict/dict0dict.cc273
-rw-r--r--storage/innobase/dict/dict0load.cc5
-rw-r--r--storage/innobase/dict/dict0mem.cc358
-rw-r--r--storage/innobase/dict/dict0stats.cc61
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc4
-rw-r--r--storage/innobase/fil/fil0crypt.cc482
-rw-r--r--storage/innobase/fil/fil0fil.cc625
-rw-r--r--storage/innobase/fil/fil0pagecompress.cc540
-rw-r--r--storage/innobase/fsp/fsp0file.cc67
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc494
-rw-r--r--storage/innobase/fsp/fsp0space.cc14
-rw-r--r--storage/innobase/fsp/fsp0sysspace.cc2
-rw-r--r--storage/innobase/fts/fts0fts.cc84
-rw-r--r--storage/innobase/fts/fts0que.cc8
-rw-r--r--storage/innobase/fut/fut0lst.cc68
-rw-r--r--storage/innobase/gis/gis0rtree.cc12
-rw-r--r--storage/innobase/gis/gis0sea.cc16
-rw-r--r--storage/innobase/ha/ha0ha.cc20
-rw-r--r--storage/innobase/handler/ha_innodb.cc791
-rw-r--r--storage/innobase/handler/ha_innodb.h37
-rw-r--r--storage/innobase/handler/handler0alter.cc2887
-rw-r--r--storage/innobase/handler/i_s.cc32
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc393
-rw-r--r--storage/innobase/include/btr0btr.h99
-rw-r--r--storage/innobase/include/btr0btr.ic14
-rw-r--r--storage/innobase/include/btr0bulk.h6
-rw-r--r--storage/innobase/include/btr0cur.h16
-rw-r--r--storage/innobase/include/btr0defragment.h6
-rw-r--r--storage/innobase/include/btr0types.h38
-rw-r--r--storage/innobase/include/buf0buddy.h5
-rw-r--r--storage/innobase/include/buf0buddy.ic10
-rw-r--r--storage/innobase/include/buf0buf.h261
-rw-r--r--storage/innobase/include/buf0buf.ic61
-rw-r--r--storage/innobase/include/buf0checksum.h15
-rw-r--r--storage/innobase/include/buf0flu.h29
-rw-r--r--storage/innobase/include/buf0flu.ic79
-rw-r--r--storage/innobase/include/buf0rea.h37
-rw-r--r--storage/innobase/include/buf0types.h11
-rw-r--r--storage/innobase/include/data0data.h27
-rw-r--r--storage/innobase/include/data0type.h98
-rw-r--r--storage/innobase/include/data0type.ic2
-rw-r--r--storage/innobase/include/db0err.h2
-rw-r--r--storage/innobase/include/dict0boot.h14
-rw-r--r--storage/innobase/include/dict0crea.h8
-rw-r--r--storage/innobase/include/dict0dict.h224
-rw-r--r--storage/innobase/include/dict0dict.ic181
-rw-r--r--storage/innobase/include/dict0mem.h401
-rw-r--r--storage/innobase/include/dict0priv.h12
-rw-r--r--storage/innobase/include/dict0priv.ic35
-rw-r--r--storage/innobase/include/dict0stats.h13
-rw-r--r--storage/innobase/include/fil0crypt.h63
-rw-r--r--storage/innobase/include/fil0fil.h454
-rw-r--r--storage/innobase/include/fil0fil.ic42
-rw-r--r--storage/innobase/include/fil0pagecompress.h18
-rw-r--r--storage/innobase/include/fsp0fsp.h136
-rw-r--r--storage/innobase/include/fsp0fsp.ic69
-rw-r--r--storage/innobase/include/fsp0pagecompress.h11
-rw-r--r--storage/innobase/include/fsp0space.h2
-rw-r--r--storage/innobase/include/fsp0types.h129
-rw-r--r--storage/innobase/include/fts0fts.h14
-rw-r--r--storage/innobase/include/fut0fut.h29
-rw-r--r--storage/innobase/include/fut0fut.ic68
-rw-r--r--storage/innobase/include/fut0lst.h58
-rw-r--r--storage/innobase/include/fut0lst.ic24
-rw-r--r--storage/innobase/include/ha_prototypes.h12
-rw-r--r--storage/innobase/include/ib0mutex.h127
-rw-r--r--storage/innobase/include/ibuf0ibuf.h73
-rw-r--r--storage/innobase/include/ibuf0ibuf.ic56
-rw-r--r--storage/innobase/include/log0crypt.h19
-rw-r--r--storage/innobase/include/log0log.h63
-rw-r--r--storage/innobase/include/log0log.ic6
-rw-r--r--storage/innobase/include/log0recv.h4
-rw-r--r--storage/innobase/include/mach0data.h3
-rw-r--r--storage/innobase/include/mem0mem.ic4
-rw-r--r--storage/innobase/include/mtr0log.h20
-rw-r--r--storage/innobase/include/mtr0mtr.ic2
-rw-r--r--storage/innobase/include/mtr0types.h15
-rw-r--r--storage/innobase/include/os0api.h20
-rw-r--r--storage/innobase/include/os0file.h50
-rw-r--r--storage/innobase/include/os0proc.h2
-rw-r--r--storage/innobase/include/os0thread.h2
-rw-r--r--storage/innobase/include/page0cur.ic6
-rw-r--r--storage/innobase/include/page0page.h31
-rw-r--r--storage/innobase/include/page0page.ic31
-rw-r--r--storage/innobase/include/page0size.h197
-rw-r--r--storage/innobase/include/page0types.h12
-rw-r--r--storage/innobase/include/page0zip.h60
-rw-r--r--storage/innobase/include/page0zip.ic33
-rw-r--r--storage/innobase/include/read0types.h34
-rw-r--r--storage/innobase/include/rem0rec.h153
-rw-r--r--storage/innobase/include/rem0rec.ic40
-rw-r--r--storage/innobase/include/row0ext.h9
-rw-r--r--storage/innobase/include/row0log.h2
-rw-r--r--storage/innobase/include/row0mysql.h13
-rw-r--r--storage/innobase/include/row0row.h21
-rw-r--r--storage/innobase/include/row0row.ic8
-rw-r--r--storage/innobase/include/row0trunc.h416
-rw-r--r--storage/innobase/include/row0undo.h15
-rw-r--r--storage/innobase/include/row0upd.h34
-rw-r--r--storage/innobase/include/row0upd.ic8
-rw-r--r--storage/innobase/include/srv0mon.h2
-rw-r--r--storage/innobase/include/srv0srv.h28
-rw-r--r--storage/innobase/include/sync0arr.ic3
-rw-r--r--storage/innobase/include/sync0policy.h522
-rw-r--r--storage/innobase/include/sync0policy.ic101
-rw-r--r--storage/innobase/include/sync0rw.h5
-rw-r--r--storage/innobase/include/sync0rw.ic87
-rw-r--r--storage/innobase/include/sync0types.h72
-rw-r--r--storage/innobase/include/trx0purge.h247
-rw-r--r--storage/innobase/include/trx0roll.h10
-rw-r--r--storage/innobase/include/trx0rseg.ic5
-rw-r--r--storage/innobase/include/trx0sys.h63
-rw-r--r--storage/innobase/include/trx0trx.h25
-rw-r--r--storage/innobase/include/trx0undo.h39
-rw-r--r--storage/innobase/include/trx0undo.ic39
-rw-r--r--storage/innobase/include/univ.i1
-rw-r--r--storage/innobase/include/ut0counter.h136
-rw-r--r--storage/innobase/include/ut0crc32.h6
-rw-r--r--storage/innobase/include/ut0mutex.h2
-rw-r--r--storage/innobase/include/ut0ut.h13
-rw-r--r--storage/innobase/innodb.cmake5
-rw-r--r--storage/innobase/lock/lock0lock.cc105
-rw-r--r--storage/innobase/lock/lock0wait.cc4
-rw-r--r--storage/innobase/log/log0crypt.cc147
-rw-r--r--storage/innobase/log/log0log.cc66
-rw-r--r--storage/innobase/log/log0recv.cc251
-rw-r--r--storage/innobase/mtr/mtr0log.cc117
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc11
-rw-r--r--storage/innobase/os/os0event.cc2
-rw-r--r--storage/innobase/os/os0file.cc488
-rw-r--r--storage/innobase/os/os0proc.cc21
-rw-r--r--storage/innobase/os/os0thread.cc8
-rw-r--r--storage/innobase/page/page0cur.cc22
-rw-r--r--storage/innobase/page/page0page.cc39
-rw-r--r--storage/innobase/page/page0zip.cc185
-rw-r--r--storage/innobase/read/read0read.cc10
-rw-r--r--storage/innobase/rem/rem0cmp.cc2
-rw-r--r--storage/innobase/rem/rem0rec.cc527
-rw-r--r--storage/innobase/row/row0ext.cc32
-rw-r--r--storage/innobase/row/row0ftsort.cc4
-rw-r--r--storage/innobase/row/row0import.cc271
-rw-r--r--storage/innobase/row/row0ins.cc148
-rw-r--r--storage/innobase/row/row0log.cc75
-rw-r--r--storage/innobase/row/row0merge.cc27
-rw-r--r--storage/innobase/row/row0mysql.cc22
-rw-r--r--storage/innobase/row/row0purge.cc40
-rw-r--r--storage/innobase/row/row0quiesce.cc7
-rw-r--r--storage/innobase/row/row0row.cc398
-rw-r--r--storage/innobase/row/row0sel.cc189
-rw-r--r--storage/innobase/row/row0trunc.cc1961
-rw-r--r--storage/innobase/row/row0uins.cc232
-rw-r--r--storage/innobase/row/row0umod.cc171
-rw-r--r--storage/innobase/row/row0undo.cc194
-rw-r--r--storage/innobase/row/row0upd.cc207
-rw-r--r--storage/innobase/srv/srv0conc.cc34
-rw-r--r--storage/innobase/srv/srv0mon.cc18
-rw-r--r--storage/innobase/srv/srv0srv.cc124
-rw-r--r--storage/innobase/srv/srv0start.cc188
-rw-r--r--storage/innobase/sync/sync0arr.cc34
-rw-r--r--storage/innobase/sync/sync0rw.cc80
-rw-r--r--storage/innobase/trx/trx0purge.cc824
-rw-r--r--storage/innobase/trx/trx0rec.cc304
-rw-r--r--storage/innobase/trx/trx0roll.cc184
-rw-r--r--storage/innobase/trx/trx0rseg.cc110
-rw-r--r--storage/innobase/trx/trx0sys.cc9
-rw-r--r--storage/innobase/trx/trx0trx.cc64
-rw-r--r--storage/innobase/trx/trx0undo.cc63
-rw-r--r--storage/innobase/ut/ut0crc32.cc82
-rw-r--r--storage/innobase/ut/ut0new.cc1
-rw-r--r--storage/innobase/ut/ut0ut.cc2
-rw-r--r--storage/maria/CMakeLists.txt10
-rw-r--r--storage/maria/ha_maria.cc118
-rw-r--r--storage/maria/ma_backup.c281
-rw-r--r--storage/maria/ma_blockrec.c3
-rw-r--r--storage/maria/ma_check.c6
-rw-r--r--storage/maria/ma_checkpoint.h4
-rw-r--r--storage/maria/ma_control_file.c122
-rw-r--r--storage/maria/ma_control_file.h1
-rw-r--r--storage/maria/ma_create.c22
-rw-r--r--storage/maria/ma_crypt.c8
-rw-r--r--storage/maria/ma_extra.c5
-rw-r--r--storage/maria/ma_info.c2
-rw-r--r--storage/maria/ma_init.c2
-rw-r--r--storage/maria/ma_locking.c9
-rw-r--r--storage/maria/ma_loghandler.c26
-rw-r--r--storage/maria/ma_loghandler.h3
-rw-r--r--storage/maria/ma_open.c35
-rw-r--r--storage/maria/ma_pagecrc.c10
-rw-r--r--storage/maria/ma_recovery.c17
-rw-r--r--storage/maria/ma_sort.c2
-rw-r--r--storage/maria/ma_state.c25
-rw-r--r--storage/maria/ma_test2.c5
-rw-r--r--storage/maria/ma_trnman.h36
-rw-r--r--storage/maria/maria_def.h7
-rw-r--r--storage/maria/maria_read_log.c30
-rw-r--r--storage/maria/test_ma_backup.c449
-rw-r--r--storage/maria/trnman.c3
-rwxr-xr-xstorage/maria/unittest/ma_test_all-t7
-rw-r--r--storage/maria/unittest/ma_test_recovery.expected192
-rw-r--r--storage/mroonga/ha_mroonga.cpp26
-rw-r--r--storage/mroonga/lib/mrn_condition_converter.cpp29
-rw-r--r--storage/mroonga/vendor/groonga/vendor/plugins/CMakeLists.txt3
-rw-r--r--storage/myisam/TODO7
-rw-r--r--storage/myisam/ha_myisam.cc69
-rw-r--r--storage/myisam/ha_myisam.h2
-rw-r--r--storage/myisam/mi_check.c6
-rw-r--r--storage/myisam/mi_extra.c15
-rw-r--r--storage/myisam/mi_info.c2
-rw-r--r--storage/myisam/mi_key.c13
-rw-r--r--storage/myisam/mi_locking.c6
-rw-r--r--storage/myisam/mi_range.c72
-rw-r--r--storage/myisam/mi_rkey.c4
-rw-r--r--storage/myisam/mi_rnext.c4
-rw-r--r--storage/myisam/mi_rnext_same.c4
-rw-r--r--storage/myisam/mi_rprev.c4
-rw-r--r--storage/myisam/myisamdef.h12
-rw-r--r--storage/myisam/myisampack.c2
-rw-r--r--storage/myisam/sort.c2
-rw-r--r--storage/oqgraph/ha_oqgraph.cc2
-rw-r--r--storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result2
-rw-r--r--storage/oqgraph/oqgraph_thunk.cc12
-rw-r--r--storage/perfschema/unittest/pfs_server_stubs.cc2
-rw-r--r--storage/rocksdb/ha_rocksdb.cc11
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result14
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result9
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/cardinality.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/misc.result6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result60
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/partition.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result7
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result3
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/select.result6
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/statistics.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result5
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result4
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/r/xa.result1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/cardinality.test2
-rw-r--r--storage/rocksdb/mysql-test/rocksdb/t/disabled.def1
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/r/mdev12179.result18
-rw-r--r--storage/rocksdb/mysql-test/rocksdb_rpl/t/mdev12179.test85
-rw-r--r--storage/sequence/mysql-test/sequence/simple.result4
-rw-r--r--storage/sequence/mysql-test/sequence/simple.test2
-rw-r--r--storage/sphinx/ha_sphinx.cc8
-rw-r--r--storage/spider/ha_spider.cc10
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/cp932_column_deinit.inc13
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/cp932_column_init.inc29
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_deinit.inc14
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_init.inc54
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/wait_timeout_deinit.inc18
-rw-r--r--storage/spider/mysql-test/spider/bugfix/include/wait_timeout_init.inc56
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/cp932_column.result84
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/group_by_order_by_limit.result117
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result2
-rw-r--r--storage/spider/mysql-test/spider/bugfix/r/wait_timeout.result130
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/cp932_column.cnf3
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/cp932_column.test80
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.cnf4
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.test97
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/wait_timeout.cnf4
-rw-r--r--storage/spider/mysql-test/spider/bugfix/t/wait_timeout.test109
-rw-r--r--storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_deinit.inc10
-rw-r--r--storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_init.inc10
-rw-r--r--storage/spider/mysql-test/spider/feature/my.cnf2
-rw-r--r--storage/spider/mysql-test/spider/feature/my_1_1.cnf44
-rw-r--r--storage/spider/mysql-test/spider/feature/my_2_1.cnf56
-rw-r--r--storage/spider/mysql-test/spider/feature/my_2_2.cnf38
-rw-r--r--storage/spider/mysql-test/spider/feature/my_2_3.cnf8
-rw-r--r--storage/spider/mysql-test/spider/feature/my_3_1.cnf11
-rw-r--r--storage/spider/mysql-test/spider/feature/my_3_2.cnf9
-rw-r--r--storage/spider/mysql-test/spider/feature/my_3_3.cnf9
-rw-r--r--storage/spider/mysql-test/spider/feature/my_4_1.cnf9
-rw-r--r--storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result22
-rw-r--r--storage/spider/mysql-test/spider/feature/suite.opt1
-rw-r--r--storage/spider/mysql-test/spider/feature/suite.pm12
-rw-r--r--storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.cnf4
-rw-r--r--storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.test9
-rw-r--r--storage/spider/mysql-test/spider/include/checksum_table_with_quick_mode_3_deinit.inc2
-rw-r--r--storage/spider/mysql-test/spider/r/direct_join.result2
-rw-r--r--storage/spider/mysql-test/spider/r/partition_fulltext.result6
-rw-r--r--storage/spider/mysql-test/spider/r/pushdown_not_like.result63
-rw-r--r--storage/spider/mysql-test/spider/r/show_system_tables.result18
-rw-r--r--storage/spider/mysql-test/spider/r/slave_trx_isolation.result2
-rw-r--r--storage/spider/mysql-test/spider/r/timestamp.result20
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_deinit.inc12
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_init.inc36
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_deinit.inc12
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_init.inc36
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/my.cnf3
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/my_1_1.cnf44
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/my_2_1.cnf56
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_key.result94
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_pkey.result94
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/suite.opt1
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/suite.pm12
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_key.test82
-rw-r--r--storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_pkey.test82
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_deinit.inc14
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_init.inc54
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/my.cnf4
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/my_1_1.cnf44
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/my_2_1.cnf56
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/my_2_2.cnf38
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/r/group_by_order_by_limit_ok.result117
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/suite.opt1
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/suite.pm12
-rw-r--r--storage/spider/mysql-test/spider/regression/e112122/t/group_by_order_by_limit_ok.test97
-rw-r--r--storage/spider/mysql-test/spider/t/connect_child2_1.inc1
-rw-r--r--storage/spider/mysql-test/spider/t/connect_child2_2.inc1
-rw-r--r--storage/spider/mysql-test/spider/t/pushdown_not_like.test138
-rw-r--r--storage/spider/mysql-test/spider/t/test_deinit.inc2
-rw-r--r--storage/spider/spd_conn.cc19
-rw-r--r--storage/spider/spd_conn.h5
-rw-r--r--storage/spider/spd_db_conn.cc199
-rw-r--r--storage/spider/spd_db_conn.h5
-rw-r--r--storage/spider/spd_db_handlersocket.cc29
-rw-r--r--storage/spider/spd_db_handlersocket.h9
-rw-r--r--storage/spider/spd_db_include.h16
-rw-r--r--storage/spider/spd_db_mysql.cc111
-rw-r--r--storage/spider/spd_db_mysql.h9
-rw-r--r--storage/spider/spd_db_oracle.cc49
-rw-r--r--storage/spider/spd_db_oracle.h9
-rw-r--r--storage/spider/spd_environ.h2
-rw-r--r--storage/spider/spd_include.h5
-rw-r--r--storage/spider/spd_malloc.cc15
-rw-r--r--storage/spider/spd_param.cc52
-rw-r--r--storage/spider/spd_param.h9
-rw-r--r--storage/spider/spd_sys_table.cc1
-rw-r--r--storage/spider/spd_table.cc110
-rw-r--r--storage/spider/spd_table.h15
-rw-r--r--storage/spider/spd_trx.cc42
-rw-r--r--storage/spider/spd_trx.h15
-rw-r--r--storage/tokudb/PerconaFT/portability/toku_crash.cc2
-rw-r--r--storage/tokudb/ha_tokudb.cc30
-rw-r--r--storage/tokudb/ha_tokudb.h1
-rw-r--r--storage/tokudb/ha_tokudb_alter_56.cc2
-rw-r--r--storage/tokudb/ha_tokudb_update.cc6
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug28430.result12
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug30888.result4
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result40
-rw-r--r--storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/include/cluster_key.inc73
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_add_drop.result5
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_add_index.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_drop_index.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_no_keys.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_pk.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_pk_2.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result5
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_sk.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_sk_2.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/card_unique_sk.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_create_table.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_delete.result128
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_filter_key.result13
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_key.result165
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_query_plan.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_tokudb_bug_993_2.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/cluster_update.result115
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/compressions.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/mvcc-29.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/mvcc-30.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/mvcc-31.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_bit.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_blob.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_datetime.result10
-rw-r--r--storage/tokudb/mysql-test/tokudb/r/type_varchar.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_add_drop.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_add_index.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_drop_index.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_drop_index_2.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_drop_pk.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_pk_2.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_pk_sk.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_sk.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/card_sk_2.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_create_table.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_delete.test50
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_filter_key.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_query_plan.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/cluster_update.test43
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test8
-rw-r--r--storage/tokudb/mysql-test/tokudb/t/type_blob.test2
-rw-r--r--storage/tokudb/mysql-test/tokudb_alter_table/r/hcad_with_lock_sps.result12
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/2970.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/5733_innodb.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/5733_tokudb.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result2
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/bulk_fetch.result8
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result7
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/frm_store.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/frm_store2.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/frm_store3.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/r/index_read.result6
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/bulk_fetch.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash.test3
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1_pick.test3
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2_pick.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/db757_part_alter_analyze.test3
-rw-r--r--storage/tokudb/mysql-test/tokudb_bugs/t/index_read.test4
-rw-r--r--storage/tokudb/mysql-test/tokudb_mariadb/r/clustering.result17
-rw-r--r--storage/tokudb/mysql-test/tokudb_mariadb/t/clustering.test5
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result56
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result32
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result80
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result40
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result40
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result40
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result40
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result216
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result64
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result262
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result11
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc0_tokudb.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc10_tokudb.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc1_tokudb.result1
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/r/mdev12179.result18
-rw-r--r--storage/tokudb/mysql-test/tokudb_rpl/t/mdev12179.test85
-rw-r--r--storage/tokudb/tokudb_dir_cmd.cc4
-rw-r--r--storage/tokudb/tokudb_sysvars.cc2
-rw-r--r--strings/ctype-uca.c1444
-rw-r--r--strings/ctype-uca.ic839
-rw-r--r--strings/ctype-ucs2.c145
-rw-r--r--strings/ctype-ucs2.h32
-rw-r--r--strings/ctype-unidata.h31
-rw-r--r--strings/ctype-utf16.h80
-rw-r--r--strings/ctype-utf32.h33
-rw-r--r--strings/ctype-utf8.c364
-rw-r--r--strings/ctype-utf8.h190
-rw-r--r--strings/json_lib.c249
-rw-r--r--strings/strcoll.ic267
-rw-r--r--support-files/CMakeLists.txt8
-rw-r--r--support-files/compiler_warnings.supp1
-rw-r--r--support-files/rpm/server-postin.sh4
-rw-r--r--support-files/rpm/server-posttrans.sh7
-rw-r--r--support-files/rpm/server-preun.sh18
-rw-r--r--support-files/rpm/server.cnf4
-rw-r--r--tests/mysql_client_fw.c12
-rw-r--r--tests/mysql_client_test.c89
-rw-r--r--unittest/strings/CMakeLists.txt2
-rw-r--r--unittest/strings/json-t.c103
-rw-r--r--vio/CMakeLists.txt2
-rw-r--r--vio/docs/TODO3
-rw-r--r--vio/vio.c61
-rw-r--r--vio/vio_priv.h8
-rw-r--r--vio/viopipe.c1
-rw-r--r--vio/vioshm.c217
-rw-r--r--vio/viosocket.c52
-rw-r--r--win/packaging/CMakeLists.txt69
-rw-r--r--win/packaging/CPackWixConfig.cmake2
-rw-r--r--win/packaging/ca/CMakeLists.txt7
-rw-r--r--win/packaging/ca/CustomAction.cpp2
-rw-r--r--win/packaging/create_msi.cmake11
-rw-r--r--win/packaging/extra.wxs.in14
-rw-r--r--win/packaging/mysql_server.wxs.in2
-rw-r--r--win/upgrade_wizard/CMakeLists.txt18
m---------wsrep-lib0
-rw-r--r--wsrep/CMakeLists.txt26
-rw-r--r--wsrep/wsrep_api.h1117
-rw-r--r--wsrep/wsrep_dummy.c413
-rw-r--r--wsrep/wsrep_gtid.c74
-rw-r--r--wsrep/wsrep_loader.c226
-rw-r--r--wsrep/wsrep_uuid.c83
3078 files changed, 164736 insertions, 60882 deletions
diff --git a/.gitignore b/.gitignore
index ca9ca17fc34..0fb30cc3184 100644
--- a/.gitignore
+++ b/.gitignore
@@ -99,6 +99,7 @@ pcre/pcre_chartables.c
pcre/pcregrep
pcre/pcretest
pcre/test*grep
+plugin/auth_pam/auth_pam_tool
plugin/aws_key_management/aws-sdk-cpp
plugin/aws_key_management/aws_sdk_cpp
plugin/aws_key_management/aws_sdk_cpp-prefix
@@ -181,6 +182,7 @@ storage/maria/ma_sp_test
storage/maria/ma_test1
storage/maria/ma_test2
storage/maria/ma_test3
+storage/maria/test_ma_backup
storage/myisam/mi_test1
storage/myisam/mi_test2
storage/myisam/mi_test3
@@ -193,6 +195,7 @@ storage/myisam/sp_test
storage/rocksdb/ldb
storage/rocksdb/myrocks_hotbackup
storage/rocksdb/mysql_ldb
+storage/rocksdb/myrocks_hotbackup
storage/rocksdb/rdb_source_revision.h
storage/rocksdb/sst_dump
storage/tokudb/PerconaFT/buildheader/db.h
@@ -516,3 +519,7 @@ compile_commands.json
# Visual Studio Code workspace
.vscode/
+
+# Clion && other JetBrains ides
+.idea
+
diff --git a/.gitmodules b/.gitmodules
index 6419657e501..61d4c06dd4e 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,3 +4,7 @@
[submodule "storage/rocksdb/rocksdb"]
path = storage/rocksdb/rocksdb
url = https://github.com/facebook/rocksdb.git
+[submodule "wsrep-lib"]
+ path = wsrep-lib
+ url = https://github.com/codership/wsrep-lib.git
+ branch = master
diff --git a/.travis.compiler.sh b/.travis.compiler.sh
index 6b8de374219..98d8197d371 100755
--- a/.travis.compiler.sh
+++ b/.travis.compiler.sh
@@ -22,19 +22,23 @@ if [[ "${TRAVIS_OS_NAME}" == 'linux' ]]; then
exclude_modules;
if which ccache ; then
CMAKE_OPT="${CMAKE_OPT} -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
- ccache --max-size=2200M
fi
if [[ "${CXX}" == 'clang++' ]]; then
- export CXX CC=${CXX/++/}
+ if [[ "${CC_VERSION}" == '6' ]]; then
+ export CXX=${CXX}-${CC_VERSION}.0
+ else
+ export CXX=${CXX}-${CC_VERSION}
+ fi
+ export CC=${CXX/++/}
+ # excess warnings about unused include path
+ export CFLAGS='-Wno-unused-command-line-argument'
+ export CXXFLAGS='-Wno-unused-command-line-argument'
elif [[ "${CXX}" == 'g++' ]]; then
export CXX=g++-${CC_VERSION}
export CC=gcc-${CC_VERSION}
fi
- if [[ ${CC_VERSION} == 6 ]]; then
- wget http://mirrors.kernel.org/ubuntu/pool/universe/p/percona-xtradb-cluster-galera-2.x/percona-xtradb-cluster-galera-2.x_165-0ubuntu1_amd64.deb ;
- ar vx percona-xtradb-cluster-galera-2.x_165-0ubuntu1_amd64.deb
- tar -xJvf data.tar.xz
- export WSREP_PROVIDER=$PWD/usr/lib/libgalera_smm.so
+ if [[ ${CC_VERSION} == 7 ]]; then
+ export WSREP_PROVIDER=/usr/lib/galera/libgalera_smm.so
MYSQL_TEST_SUITES="${MYSQL_TEST_SUITES},wsrep"
fi
fi
diff --git a/.travis.yml b/.travis.yml
index 45adb875b61..9c862d496c1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,7 +2,7 @@
# travis-ci.org definition
sudo: false
-dist: trusty
+dist: xenial
git:
depth: 2
@@ -25,17 +25,24 @@ cache:
env:
matrix:
- - CC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
- - CC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=archive,optimizer_unfixed_bugs,parts,sys_vars,unit,vcol,innodb,innodb_gis,innodb_zip,innodb_fts
- - CC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rpl
- - CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=binlog,binlog_encryption,encryption
- - CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rocksdb,versioning
- - CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,perfschema,plugins,multi_source,roles
+ - CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
+ - CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=archive,optimizer_unfixed_bugs,parts,sys_vars,unit,vcol,innodb,innodb_gis,innodb_zip,innodb_fts
+ - CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rpl
+ - CC_VERSION=7 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=binlog,binlog_encryption,encryption
+ - CC_VERSION=7 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rocksdb,versioning
+ - CC_VERSION=7 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,perfschema,plugins,multi_source,roles
+ - CC_VERSION=8 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
matrix:
exclude:
- os: osx
compiler: gcc
+ - os: osx
+ compiler: clang
+ env: CC_VERSION=8 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
+ - os: linux
+ compiler: clang
+ env: CC_VERSION=8 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
include:
- os: linux
compiler: gcc
@@ -79,53 +86,58 @@ matrix:
- uuid-dev
- devscripts
- fakeroot
+ - dh-systemd
+ - libsystemd-dev
+ - libzstd-dev
+ - unixodbc-dev
script:
- ${CC} --version ; ${CXX} --version
- # https://github.com/travis-ci/travis-ci/issues/7062 - /run/shm isn't writable or executable
- # in trusty containers
- - export MTR_MEM=/tmp
- env DEB_BUILD_OPTIONS="parallel=4" debian/autobake-deb.sh;
- - ccache --show-stats
# Until OSX becomes a bit more stable: MDEV-12435 MDEV-16213
allow_failures:
- os: osx
compiler: clang
- env: CC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rpl
+ env: CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rpl
- os: osx
compiler: clang
- env: CC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
+ env: CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=main
- os: osx
compiler: clang
- env: CC_VERSION=5 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=archive,optimizer_unfixed_bugs,parts,sys_vars,unit,vcol,innodb,innodb_gis,innodb_zip,innodb_fts
+ env: CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=archive,optimizer_unfixed_bugs,parts,sys_vars,unit,vcol,innodb,innodb_gis,innodb_zip,innodb_fts
- os: osx
compiler: clang
- env: CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=binlog,binlog_encryption,encryption
+ env: CC_VERSION=7 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=binlog,binlog_encryption,encryption
- os: osx
compiler: clang
- env: CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rocksdb,versioning
+ env: CC_VERSION=7 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=rocksdb,versioning
- os: osx
compiler: clang
- env: CC_VERSION=6 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,perfschema,plugins,multi_source,roles
+ env: CC_VERSION=7 TYPE=RelWithDebInfo MYSQL_TEST_SUITES=csv,federated,funcs_1,funcs_2,gcol,handler,heap,json,maria,perfschema,plugins,multi_source,roles
addons:
apt:
sources:
- ubuntu-toolchain-r-test
- - llvm-toolchain-trusty-5.0
- - llvm-toolchain-trusty-6.0
+ - llvm-toolchain-xenial-6.0
+ - llvm-toolchain-xenial-7
+ - sourceline: 'deb [arch=amd64,arm64,i386,ppc64el] http://ftp.osuosl.org/pub/mariadb/repo/10.4/ubuntu xenial main'
+ key_url: 'http://keyserver.ubuntu.com/pks/lookup?search=0xF1656F24C74CD1D8&op=get'
packages: # make sure these include all compilers and all build dependencies (see list above)
- - gcc-5
- - g++-5
- gcc-6
- g++-6
- - clang-5.0
- - llvm-5.0-dev
+ - gcc-7
+ - g++-7
+ - gcc-8
+ - g++-8
- clang-6.0
- llvm-6.0-dev
+ - clang-7
+ - llvm-7-dev
- bison
- chrpath
- cmake
- gdb
+ - galera-4
- libaio-dev
- libboost-dev
- libcurl3-dev
@@ -165,6 +177,7 @@ before_script:
- df -h
- ccache --version
- ccache --show-stats
+ - ccache --max-size=5G
script:
# following modules are disabled after sourcing .travis.compiler.sh:
@@ -182,6 +195,8 @@ script:
--suite=${MYSQL_TEST_SUITES}
--skip-test-list=unstable-tests
--skip-test=binlog.binlog_unsafe
+
+after_script:
- ccache --show-stats
- df -h
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index 7e6f0831423..6cf2a18404c 100755
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -141,7 +141,7 @@ elif [ "x$warning_mode" = "xmaintainer" ]; then
debug_extra_cflags="-g3"
else
# Both C and C++ warnings
- warnings="-Wall -Wextra -Wunused -Wwrite-strings -Wno-uninitialized -Wno-strict-aliasing"
+ warnings="-Wall -Wextra -Wunused -Wwrite-strings -Wno-uninitialized -Wno-strict-aliasing -Wimplicit-fallthrough=2"
# For more warnings, uncomment the following line
# warnings="$warnings -Wshadow"
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6615106093d..7480b76b5ae 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -78,6 +78,14 @@ ELSE()
ENDIF()
PROJECT(${MYSQL_PROJECT_NAME})
+IF(CMAKE_VERSION VERSION_LESS "3.1")
+ IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++11")
+ ENDIF()
+ELSE()
+ SET(CMAKE_CXX_STANDARD 11)
+ENDIF()
+
SET(CPACK_PACKAGE_NAME "MariaDB")
SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "MariaDB: a very fast and robust SQL database server")
SET(CPACK_PACKAGE_URL "http://mariadb.org")
@@ -136,6 +144,7 @@ INCLUDE(misc)
INCLUDE(mysql_version)
INCLUDE(cpack_source_ignore_files)
INCLUDE(install_layout)
+INCLUDE(submodules)
INCLUDE(wsrep)
INCLUDE(cpack_rpm)
INCLUDE(cpack_deb)
@@ -385,7 +394,6 @@ ENDIF()
SET (MYSQLD_STATIC_PLUGIN_LIBS "" CACHE INTERNAL "")
-INCLUDE(submodules)
INCLUDE(mariadb_connector_c) # this does ADD_SUBDIRECTORY(libmariadb)
# Add storage engines and plugins.
@@ -413,7 +421,7 @@ IF(NOT WITHOUT_SERVER)
ENDIF(WITH_EMBEDDED_SERVER)
IF(WITH_WSREP)
- ADD_SUBDIRECTORY(wsrep)
+ ADD_SUBDIRECTORY(wsrep-lib)
ENDIF()
ADD_SUBDIRECTORY(mysql-test)
@@ -431,13 +439,10 @@ ENDIF()
INCLUDE(cmake/abi_check.cmake)
INCLUDE(cmake/tags.cmake)
-
-
-
-
INCLUDE(for_clients)
ADD_SUBDIRECTORY(scripts)
ADD_SUBDIRECTORY(support-files)
+ADD_SUBDIRECTORY(extra/aws_sdk)
IF(NOT CMAKE_CROSSCOMPILING)
SET(EXPORTED comp_err comp_sql factorial)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000000..64af450d29f
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,47 @@
+# How to contribute
+
+MariaDB Server has a vibrant community contributing in a wide range of areas. There are many valuable ways you can contribute to MariaDB.
+
+### Engage online with other community members
+---
+- [MariaDB on Zulip](https://mariadb.zulipchat.com/)
+- [maria-developers mailing list](http://launchpad.net/~maria-developers)
+- [maria-discuss mailing list](http://launchpad.net/~maria-discuss)
+- [maria-docs mailing list](http://launchpad.net/~maria-docs)
+- ircs://chat.freenode.net/maria ([see the IRC page on the Knowledge Base](https://mariadb.com/kb/en/meta/irc-chat-servers-and-zulip-instance/) for help with IRC).
+- The MariaDB Foundation and MariaDB Corporation have a presence on Reddit, Twitter, Facebook and Google Plus. See the [social media page](https://mariadb.com/kb/en/mariadb/social-media/).
+
+### Help document MariaDB
+----
+- Contribute towards [documenting MariaDB Server](https://mariadb.com/kb/en/meta/writing-editing-library-articles/) and its ecosystem by adding new content or improving existing content.
+- [Translate](https://mariadb.com/kb/en/meta/translating-library-articles/) existing documentation.
+
+### Help debug and develop MariaDB
+-----
+- [Report bugs](https://jira.mariadb.org/)
+- Test development versions
+- Write code to fix bugs or develop new features (see [Getting Started for Developers](https://mariadb.org/getting-started-for-developers)).See also [list of beginner friendly tasks](https://jira.mariadb.org/browse/MDEV-15736?jql=resolution%20%3D%20Unresolved%20AND%20labels%20%3D%20beginner-friendly%20ORDER%20BY%20updated%20DESC)
+- Help with code quality control
+- Participate in packaging for different Linux distributions
+
+### Sponsor or donate
+---
+You’re very welcome to support MariaDB Server as an individual, or talk your company into joining the Foundation as a sponsoring member. See the [Sponsor page](https://mariadb.org/donate/).
+
+### Events, meetups and conferences
+---
+- Attend an event
+ - [Events and Conferences page](https://mariadb.org/events/)
+ - [mariadb.meetup.com](http://mariadb.meetup.com/)
+
+### Live QA for beginner contributors
+----
+MariaDB has a dedicated time each week when we answer new contributor questions live on Zulip and IRC.
+From 8:00 to 10:00 UTC on Mondays, and 10:00 to 12:00 UTC on Thursdays, anyone can ask any questions they’d like,
+and a live developer will be available to assist.
+New contributors can ask questions any time, but we will provide immediate feedback during that interval.
+
+### Additional resources
+----
+ - [MariaDB Foundation ](https://mariadb.org/)
+ - [Knowledge Base](https://mariadb.com/kb/en/)
diff --git a/Docs/INSTALL-BINARY b/Docs/INSTALL-BINARY
index 7ff33c7051e..2bd6daaea17 100644
--- a/Docs/INSTALL-BINARY
+++ b/Docs/INSTALL-BINARY
@@ -1,7 +1,9 @@
MariaDB and MySQL have identical install methods. In this document we
-describe how to install MariaDB; However all documentation at www.mysql.com
-also applies.
+describe how to install MariaDB.
+The full documentation for installing MariaDB can be found at
+https://mariadb.com/kb/en/library/binary-packages/
+However most documentation at www.mysql.com also applies.
2.2. Installing MariaDB from Generic Binaries on Unix/Linux
@@ -33,7 +35,8 @@ also applies.
If you run into problems and need to file a bug report,
please report them to: http://mariadb.org/jira
- See the instructions in Section 1.6, "How to Report Bugs or Problems."
+ See the instructions at
+ https://mariadb.com/kb/en/mariadb-community-bug-reporting
The basic commands that you must execute to install and use a
MariaDB binary distribution are:
@@ -79,10 +82,9 @@ shell> useradd -g mysql mysql
is protected, you must perform the installation as root.)
shell> cd /usr/local
- 3. Obtain a distribution file using the instructions in Section
- 2.1.3, "How to Get MariaDB." For a given release, binary
- distributions for all platforms are built from the same MariaDB
- source distribution.
+ 3. Obtain a distribution file using the instructions at
+ https://mariadb.com/kb/en/library/where-to-download-mariadb/
+ The description below describes how to install a MariaDB tar file.
4. Unpack the distribution, which creates the installation
directory. Then create a symbolic link to that directory:
@@ -149,8 +151,8 @@ shell> chown -R mysql data
machine, you can copy support-files/mysql.server to the
location where your system has its startup files. More
information can be found in the support-files/mysql.server
- script itself and in Section 2.13.1.2, "Starting and Stopping
- MariaDB Automatically."
+ script itself and at
+ https://mariadb.com/kb/en/starting-and-stopping-mariadb-automatically.
10. You can set up new accounts using the bin/mysql_setpermission
script if you install the DBI and DBD::mysql Perl modules. See
Section 4.6.14, "mysql_setpermission --- Interactively Set
@@ -181,8 +183,8 @@ shell> bin/mysqld_safe --user=mysql &
find some information in the host_name.err file in the data
directory.
- More information about mysqld_safe is given in Section 4.3.2,
- "mysqld_safe --- MySQL Server Startup Script."
+ More information about mysqld_safe can be found at
+ https://mariadb.com/kb/en/mysqld_safe
Note
diff --git a/Docs/glibc-2.2.5.patch b/Docs/glibc-2.2.5.patch
deleted file mode 100644
index ef5d40b6899..00000000000
--- a/Docs/glibc-2.2.5.patch
+++ /dev/null
@@ -1,137 +0,0 @@
-diff -r -c --exclude='*.info*' glibc-2.2.5.org/linuxthreads/internals.h glibc-2.2.5/linuxthreads/internals.h
-*** glibc-2.2.5.org/linuxthreads/internals.h Thu Nov 29 08:44:16 2001
---- glibc-2.2.5/linuxthreads/internals.h Tue May 21 10:51:53 2002
-***************
-*** 343,349 ****
- THREAD_SELF implementation is used, this must be a power of two and
- a multiple of PAGE_SIZE. */
- #ifndef STACK_SIZE
-! #define STACK_SIZE (2 * 1024 * 1024)
- #endif
-
- /* The initial size of the thread stack. Must be a multiple of PAGE_SIZE. */
---- 343,349 ----
- THREAD_SELF implementation is used, this must be a power of two and
- a multiple of PAGE_SIZE. */
- #ifndef STACK_SIZE
-! #define STACK_SIZE (128 * 1024)
- #endif
-
- /* The initial size of the thread stack. Must be a multiple of PAGE_SIZE. */
-diff -r -c --exclude='*.info*' glibc-2.2.5.org/linuxthreads/sysdeps/unix/sysv/linux/bits/local_lim.h glibc-2.2.5/linuxthreads/sysdeps/unix/sysv/linux/bits/local_lim.h
-*** glibc-2.2.5.org/linuxthreads/sysdeps/unix/sysv/linux/bits/local_lim.h Thu Jun 8 21:49:49 2000
---- glibc-2.2.5/linuxthreads/sysdeps/unix/sysv/linux/bits/local_lim.h Tue May 21 10:52:58 2002
-***************
-*** 64,70 ****
- /* The number of threads per process. */
- #define _POSIX_THREAD_THREADS_MAX 64
- /* This is the value this implementation supports. */
-! #define PTHREAD_THREADS_MAX 1024
-
- /* Maximum amount by which a process can descrease its asynchronous I/O
- priority level. */
---- 64,70 ----
- /* The number of threads per process. */
- #define _POSIX_THREAD_THREADS_MAX 64
- /* This is the value this implementation supports. */
-! #define PTHREAD_THREADS_MAX 4096
-
- /* Maximum amount by which a process can descrease its asynchronous I/O
- priority level. */
-diff -r -c --exclude='*.info*' glibc-2.2.5.org/nss/nsswitch.c glibc-2.2.5/nss/nsswitch.c
-*** glibc-2.2.5.org/nss/nsswitch.c Tue Jul 17 10:21:36 2001
---- glibc-2.2.5/nss/nsswitch.c Tue May 21 10:59:55 2002
-***************
-*** 496,501 ****
---- 496,502 ----
- {
- service_user *new_service;
- const char *name;
-+ int name_alloc_len;
-
- while (isspace (line[0]))
- ++line;
-***************
-*** 510,522 ****
- if (name == line)
- return result;
-
-
- new_service = (service_user *) malloc (sizeof (service_user)
-! + (line - name + 1));
- if (new_service == NULL)
- return result;
-
-! *((char *) __mempcpy (new_service->name, name, line - name)) = '\0';
-
- /* Set default actions. */
- new_service->actions[2 + NSS_STATUS_TRYAGAIN] = NSS_ACTION_CONTINUE;
---- 511,534 ----
- if (name == line)
- return result;
-
-+ name_alloc_len = line - name + 1;
-+
-+ #ifdef DO_STATIC_NSS
-+ if (!((name_alloc_len == 6 && strncmp(name,"files",5) == 0) ||
-+ (name_alloc_len == 4 && strncmp(name,"dns",3) == 0)))
-+ {
-+ name = (char*) "files";
-+ name_alloc_len = 6;
-+ }
-+ #endif
-
- new_service = (service_user *) malloc (sizeof (service_user)
-! + name_alloc_len);
- if (new_service == NULL)
- return result;
-
-! *((char *) __mempcpy (new_service->name, name, name_alloc_len-1)) = '\0';
-!
-
- /* Set default actions. */
- new_service->actions[2 + NSS_STATUS_TRYAGAIN] = NSS_ACTION_CONTINUE;
-diff -r -c --exclude='*.info*' glibc-2.2.5.org/time/Makefile glibc-2.2.5/time/Makefile
-*** glibc-2.2.5.org/time/Makefile Fri Aug 10 01:59:41 2001
---- glibc-2.2.5/time/Makefile Tue May 21 11:01:11 2002
-***************
-*** 37,44 ****
-
- include ../Rules
-
-! tz-cflags = -DTZDIR='"$(zonedir)"' \
-! -DTZDEFAULT='"$(localtime-file)"' \
- -DTZDEFRULES='"$(posixrules-file)"'
-
- CFLAGS-tzfile.c = $(tz-cflags)
---- 37,44 ----
-
- include ../Rules
-
-! tz-cflags = -DTZDIR='"/usr/share/zoneinfo/"' \
-! -DTZDEFAULT='"/etc/localtime"' \
- -DTZDEFRULES='"$(posixrules-file)"'
-
- CFLAGS-tzfile.c = $(tz-cflags)
-diff -r -c --exclude='*.info*' glibc-2.2.5.org/timezone/Makefile glibc-2.2.5/timezone/Makefile
-*** glibc-2.2.5.org/timezone/Makefile Thu Aug 30 00:45:25 2001
---- glibc-2.2.5/timezone/Makefile Tue May 21 11:01:57 2002
-***************
-*** 159,166 ****
-
- $(objpfx)zic: $(objpfx)scheck.o $(objpfx)ialloc.o
-
-! tz-cflags = -DTZDIR='"$(zonedir)"' \
-! -DTZDEFAULT='"$(localtime-file)"' \
- -DTZDEFRULES='"$(posixrules-file)"' \
- -DTM_GMTOFF=tm_gmtoff -DTM_ZONE=tm_zone
-
---- 159,166 ----
-
- $(objpfx)zic: $(objpfx)scheck.o $(objpfx)ialloc.o
-
-! tz-cflags = -DTZDIR='"/usr/share/zoneinfo/"' \
-! -DTZDEFAULT='"/etc/localtime"' \
- -DTZDEFRULES='"$(posixrules-file)"' \
- -DTM_GMTOFF=tm_gmtoff -DTM_ZONE=tm_zone
-
diff --git a/Docs/linuxthreads.txt b/Docs/linuxthreads.txt
deleted file mode 100644
index 552415fe794..00000000000
--- a/Docs/linuxthreads.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-[Note this information is obsolete]
-
-Notes on compiling glibc for the standard MySQL binary:
-
- - make sure you have gcc 2.95 and gmake 3.79 or newer
- - wget ftp://ftp.gnu.org/pub/gnu/glibc/glibc-2.2.5.tar.gz
- - wget ftp://ftp.gnu.org/pub/gnu/glibc/glibc-linuxthreads-2.2.5.tar.gz
- - tar zxvf glibc-2.2.5.tar.gz ; cd glibc-2.2.5 ;
- tar zxvf ../glibc-linuxthreads-2.2.5.tar.gz
- - patch -p1 < ~/bk/mysql/Docs/glibc-2.2.5.patch
- - ./configure --prefix=/usr/local/mysql-glibc --enable-static-nss \
- --disable-shared --enable-add-ons=linuxthreads --target=i386 \
- --host=i386-pc-linux-gnu
- - make
- - possible problems - if compiler is not properly installed, one can get
- "cpp: too many input" files error - easiest way to solve - SUSE RPM for gcc
- 2.95
- - surun make install
- - To build the binaries, run Build-tools/Do-linux-build
diff --git a/Docs/sp-imp-spec.txt b/Docs/sp-imp-spec.txt
deleted file mode 100644
index 52389ea50f4..00000000000
--- a/Docs/sp-imp-spec.txt
+++ /dev/null
@@ -1,1100 +0,0 @@
-
- Implementation specification for Stored Procedures
- ==================================================
-
-
-- How parsing and execution of queries work
-
- In order to execute a query, the function sql_parse.cc:mysql_parse() is
- called, which in turn calls the parser (yyparse()) with an updated Lex
- structure as the result. mysql_parse() then calls mysql_execute_command()
- which dispatches on the command code (in Lex) to the corresponding code for
- executing that particular query.
-
- There are three structures involved in the execution of a query which are of
- interest to the stored procedure implementation:
-
- - Lex (mentioned above) is the "compiled" query, that is the output from
- the parser and what is then interpreted to do the actual work.
- It constains an enum value (sql_command) which is the query type, and
- all the data collected by the parser needed for the execution (table
- names, fields, values, etc).
- - THD is the "run-time" state of a connection, containing all that is
- needed for a particular client connection, and, among other things, the
- Lex structure currently being executed.
- - Item_*: During parsing, all data is translated into "items", objects of
- the subclasses of "Item", such as Item_int, Item_real, Item_string, etc,
- for basic datatypes, and also various more specialized Item types for
- expressions to be evaluated (Item_func objects).
-
-
-- How to fit Stored Procedure into this scheme
-
- - An overview of the classes and files for stored procedures
- (More detailed APIs at the end of this file)
-
- - class sp_head (sp_head.{cc,h})
- This contains, among other things, an array of "instructions" and the
- method for executing the procedure.
-
- - class sp_pcontext (sp_pcontext.{cc,h}
- This is the parse context for the procedure. It's primarily used during
- parsing to keep track of local parameters, variables and labels, but
- it's also used at CALL time do find parameters mode (IN, OUT or INOUT)
- and type when setting up the runtime context.
-
- - class sp_instr (sp_head.{cc,h})
- This is the base class for "instructions", that is, what is generated
- by the parser. It turns out that we only need a minimum of 5 different
- sub classes:
- - sp_instr_stmt
- Execute a statement. This is the "call-out" any normal SQL statement,
- like a SELECT, INSERT etc. It contains the Lex structure for the
- statement in question.
- - sp_instr_set
- Set the value of a local variable (or parameter)
- - sp_instr_jump
- An unconditional jump.
- - sp_instr_jump_if_not
- Jump if condition is not true. It turns out that the negative test is
- most convenient when generating the code for the flow control
- constructs.
- - sp_instr_freturn
- Return a value from a FUNCTION and exit.
- For condition HANDLERs some special instructions are also needed, see
- that section below.
-
- - class sp_rcontext (sp_rcontext.h)
- This is the runtime context in the THD structure.
- It contains an array of items, the parameters and local variables for
- the currently executing stored procedure.
- This means that variable value lookup is in runtime is constant time,
- a simple index operation.
-
- - class Item_splocal (Item.{cc,h})
- This is a subclass of Item. Its sole purpose is to hide the fact that
- the real Item is actually in the current frame (runtime context).
- It contains the frame offset and defers all methods to the real Item
- in the frame. This is what the parser generates for local variables.
-
- - Utility functions (sp.{cc,h})
- This contains functions for creating, dropping and finding a stored
- procedure in the mysql.proc table (or the internal cache).
-
-
- - Parsing CREATE PROCEDURE ...
-
- When parsing a CREATE PROCEDURE the parser first initializes the
- sphead and spcont (runtime context) fields in the Lex.
- The sql_command code for the result of parsing a is
- SQLCOM_CREATE_PROCEDURE.
-
- The parsing of the parameter list and body is relatively
- straight-forward:
-
- - Parameters:
- name, type and mode (IN/OUT/INOUT) is pushed to spcont
- - Declared local variables:
- Same as parameters (mode is then IN)
- - Local Variable references:
- If an identifier is found in in spcont, an Item_splocal is created
- with the variable's frame index, otherwise an Item_field or Item_ref
- is created (as before).
- - Statements:
- The Lex in THD is replaced by a new Lex structure and the statement,
- is parsed as usual. A sp_instr_stmt is created, containing the new
- Lex, and added to the instructions in sphead.
- Afterwards, the procedure's Lex is restored in THD.
- - SET var:
- Setting a local variable generates a sp_instr_set instruction,
- containing the variable's frame offset, the expression (an Item),
- and the type.
- - Flow control:
- Flow control constructs like, IF, WHILE, etc, generate a conditional
- and unconditional jumps in the "obvious" way, but a few notes may
- be required:
- - Forward jumps: When jumping forward, the exact destination is not
- known at the time of the creation of the jump instruction. The
- sphead therefore contains list of instruction-label pairs for
- each forward reference. When the position later is known, the
- instructions in the list are updated with the correct location.
- - Loop constructs have optional labels. If a loop doesn't have a
- label, an anonymous label is generated to simplify the parsing.
- - There are two types of CASE. The "simple" case is implemented
- with an anonymous variable bound to the value to be tested.
-
-
- - A simple example
-
- Parsing the procedure:
-
- create procedure a(s char(16))
- begin
- declare x int;
- set x = 3;
- while x > 0 do
- set x = x-1;
- insert into db.tab values (x, s);
- end while;
- end
-
- would generate the following structures:
- ______
- thd: | | _________
- | lex -+--->| | ___________________
- |______| | spcont -+------------------->| "s",in,char(16):0 |
- | sphead -+------ |("x",in,int :1)|
- |_________| | |___________________|
- ____V__________________
- | m_name: "a" |
- | m_defstr: "create ..."|
- | m_instr: ... |
- |_______________________|
-
- Note that the contents of the spcont is changing during the parsing,
- at all times reflecting the state of the would-be runtime frame.
- The m_instr is an array of instructions:
-
- Pos. Instruction
- 0 sp_instr_set(1, '3')
- 1 sp_instr_jump_if_not(5, 'x>0')
- 2 sp_instr_set(1, 'x-1')
- 3 sp_instr_stmt('insert into ...')
- 4 sp_instr_jump(1)
- 5 <end>
-
- Here, '3', 'x>0', etc, represent the Items or Lex for the respective
- expressions or statements.
-
-
- - Parsing CREATE FUNCTION ...
-
- Creating a functions is essentially the same thing as for a PROCEDURE,
- with the addition that a FUNCTION has a return type and a RETURN
- statement, but no OUT or INOUT parameters.
-
- The main difference during parsing is that we store the result type
- in the sp_head. However, there are big differences when it comes to
- invoking a FUNCTION. (See below.)
-
-
- - Storing, caching, dropping...
-
- As seen above, the entired definition string, including the "CREATE
- PROCEDURE" (or "FUNCTION") is kept. The procedure definition string is
- stored in the table mysql.proc with the name and type as the key, the
- type being one of the enum ("procedure","function").
-
- A PROCEDURE is just stored in the mysql.proc table. A FUNCTION has an
- additional requirement. They will be called in expressions with the same
- syntax as UDFs, so UDFs and stored FUNCTIONs share the namespace. Thus,
- we must make sure that we do not have UDFs and FUNCTIONs with the same
- name (even if they are stored in different places).
-
- This means that we can reparse the procedure as many time as we want.
- The first time, the resulting Lex is used to store the procedure in
- the database (using the function sp.c:sp_create_procedure()).
-
- The simplest way would be to just leave it at that, and re-read the
- procedure from the database each time it is called. (And in fact, that's
- the way the earliest implementation will work.)
- However, this is not very efficient, and we can do better. The full
- implementation should work like this:
-
- 1) Upon creation time, parse and store the procedure. Note that we still
- need to parse it to catch syntax errors, but we can't check if called
- procedures exists for instance.
- 2) Upon first CALL, read from the database, parse it, and cache the
- resulting Lex in memory. This time we can do more error checking.
- 3) Upon subsequent CALLs, use the cached Lex.
-
- Note that this implies that the Lex structure with its sphead must be
- reentrant, that is, reusable and shareable between different threads
- and calls. The runtime state for a procedure is kept in the sp_rcontext
- in THD.
-
- The mechanisms of storing, finding, and dropping procedures are
- encapsulated in the files sp.{cc,h}.
-
-
- - CALLing a procedure
-
- A CALL is parsed just like any statement. The resulting Lex has the
- sql_command SQLCOM_CALL, the procedure's name and the parameters are
- pushed to the Lex' value_list.
-
- sql_parse.cc:mysql_execute_command() then uses sp.cc:sp_find() to
- get the sp_head for the procedure (which may have been read from the
- database or fetched from the in-memory cache) and calls the sp_head's
- method execute().
- Note: It's important that substatements called by the procedure do not
- do send_ok(). Fortunately, there is a flag in THD->net to disable
- this during CALLs. If a substatement fails, it will however send
- an error back to the client, so the CALL mechanism must return
- immediately and without sending an error.
-
- The sp_head::execute() method works as follows:
-
- 1) Keep a pointer to the old runtime context in THD (if any)
- 2) Create a new runtime context. The information about the required size
- is in sp_head's parse time context.
- 3) Push each parameter (from the CALL's Lex->value_list) to the new
- context. If it's an OUT or INOUT parameter, the parameter's offset
- in the caller's frame is set in the new context as well.
- 4) For each instruction, call its execute() method.
- The result is a pointer to the next instruction to execute (or NULL)
- if an error occurred.
- 5) On success, set the new values of the OUT and INOUT parameters in
- the caller's frame.
-
- - USE database
-
- Before executing the instruction we also keeps the current default
- database (if any). If this was changed during execution (i.e. a "USE"
- statement has been executed), we restore the current database to the
- original.
-
- This is the most useful way to handle USE in procedures. If we didn't,
- the caller would find himself in a different database after calling
- a function, which can be confusing.
- Restoring the database also gives full freedom to the procedure writer:
- - It's possible to write "general" procedures that are independent of
- the actual database name.
- - It's possible to write procedures that work on a particular database
- by calling USE, without having to use fully qualified table names
- everywhere (which doesn't help if you want to call other, "general",
- procedures anyway).
-
- - Evaluating Items
-
- There are three occasions where we need to evaluate an expression:
-
- - When SETing a variable
- - When CALLing a procedure
- - When testing an expression for a branch (in IF, WHILE, etc)
-
- The semantics in stored procedures is "call-by-value", so we have to
- evaluate any "func" Items at the point of the CALL or SET, otherwise
- we would get a kind of "lazy" evaluation with unexpected results with
- respect to OUT parameters for instance.
- For this the support function, sp_head.cc:eval_func_item() is needed.
-
-
- - Calling a FUNCTION
-
- Functions don't have an explicit call keyword like procedures. Instead,
- they appear in expressions with the conventional syntax "fun(arg, ...)".
- The problem is that we already have User Defined Functions (UDFs) which
- are called the same way. A UDF is detected by the lexical analyzer (not
- the parser!), in the find_keyword() function, and returns a UDF_*_FUNC
- or UDA_*_SUM token with the udf_func object as the yylval.
-
- So, stored functions must be handled in a simpilar way, and as a
- consequence, UDFs and functions must not have the same name.
-
- - Detecting and parsing a FUNCTION invocation
-
- The existence of UDFs are checked during the lexical analysis (in
- sql_lex.cc:find_keyword()). This has the drawback that they must
- exist before they are referred to, which was ok before SPs existed,
- but then it becomes a problem. The first implementation of SP FUNCTIONs
- will work the same way, but this should be fixed a.s.a.p. (This will
- required some reworking of the way UDFs are handled, which is why it's
- not done from the start.)
- For the time being, a FUNCTION is detected the same way, and returns
- the token SP_FUNC. During the parsing we only check for the *existence*
- of the function, we don't parse it, since wa can't call the parser
- recursively.
-
- When encountering a SP_FUNC with parameters in the expression parser,
- an instance of the new Item_func_sp class is created. Unlike UDFs, we
- don't have different classes for different return types, since we at
- this point don't know the type.
-
- - Collecting FUNCTIONs to invoke
-
- A FUNCTION differs from a PROCEDURE in one important aspect: Whereas a
- PROCEDURE is CALLed as statement by itself, a FUNCTION is invoked
- "on-the-fly" during the execution of *another* statement.
- This makes things a lot more complicated compared to CALL:
- - We can't read and parse the FUNCTION from the mysql.proc table at the
- point of invocation; the server requires that all tables used are
- opened and locked at the beginning of the query execution.
- One "obvious" solution would be to simply push "mysql.proc" to the list
- of tables used by the query, but this implies a "join" with this table
- if the query is a select, so it doesn't work (and we can't exclude this
- table easily; since a privileged used might in fact want to search
- the proc table).
- Another solution would of course be to allow the opening and closing
- of the mysql.proc table during a query execution, but this it not
- possible at the present.
-
- So, the solution is to collect the names of the referred FUNCTIONs during
- parsing in the lex.
- Then, before doing anything else in mysql_execute_command(), read all
- functions from the database an keep them in the THD, where the function
- sp_find_function() can find them during the execution.
- Note: Even with an in-memory cache, we must still make sure that the
- functions are indeed read and cached at this point.
- The code that read and cache functions from the database must also be
- invoked recursively for each read FUNCTION to make sure we have *all* the
- functions we need.
-
-
- - Parsing DROP PROCEDURE/FUNCTION
-
- The procedure name is pushed to Lex->value_list.
- The sql_command code for the result of parsing a is
- SQLCOM_DROP_PROCEDURE/SQLCOM_DROP_FUNCTION.
-
- Dropping is done by simply getting the procedure with the sp_find()
- function and calling sp_drop() (both in sp.{cc,h}).
-
- DROP PROCEDURE/FUNCTION also supports the non-standard "IF EXISTS",
- analogous to other DROP statements in MySQL.
-
-
- - Condition and Handlers
-
- Condition names are lexical entities and are kept in the parser context
- just like variables. But, condition are just "aliases" for SQLSTATE
- strings, or mysqld error codes (which is a non-standard extension in
- MySQL), and are only used during parsing.
-
- Handlers comes in three types, CONTINUE, EXIT and UNDO. The latter is
- like an EXIT handler with an implicit rollback, and is currently not
- implemented.
- The EXIT handler jumps to the end of its BEGIN-END block when finished.
- The CONTINUE handler returns to the statement following that which
- invoked the handler.
-
- The handlers in effect at any point is part of each thread's runtime
- state, so we need to push and pop handlers in the sp_rcontext during
- execution. We use special instructions for this:
- - sp_instr_hpush_jump
- Push a handler. The instruction contains the necessary information,
- like which conditions we handle and the location of the handler.
- The jump takes us to the location after the handler code.
- - sp_instr_hpop
- Pop the handlers of the current frame (which we are just leaving).
-
- It might seems strange to jump past the handlers like that, but there's
- no extra cost in doing this, and for technical reasons it's easiest for
- the parser to generate the handler instructions when they occur in the
- source.
-
- When an error occurs, one of the error routines is called and an error
- message is normally sent back to the client immediately.
- Catching a condition must be done in these error routines (there are
- quite a few) to prevent them from doing this. We do this by calling
- a method in the THD's sp_rcontext (if there is one). If a handler is
- found, this is recorded in the context and the routine returns without
- sending the error message.
- The execution loop (sp_head::execute()) checks for this after each
- statement and invokes the handler that has been found. If several
- errors or warnings occurs during one statement, only the first is
- caught, the rest are ignored.
-
- Invoking and returning from a handler is trivial in the EXIT case.
- We simply jump to it, and it will have an sp_instr_jump as its last
- instruction.
-
- Calling and returning from a CONTINUE handler poses some special
- problems. Since we need to return to the point after its invocation,
- we push the return location on a stack in the sp_rcontext (this is
- done by the execution loop). The handler then ends with a special
- instruction, sp_instr_hreturn, which returns to this location.
-
- CONTINUE handlers have one additional problem: They are parsed at
- the lexical level where they occur, so variable offsets will assume
- that it's actually called at that level. However, a handler might be
- invoked from a sub-block where additional local variables have been
- declared, which will then share the location of any local variables
- in the handler itself. So, when calling a CONTINUE handler, we need
- to save any local variables above the handler's frame offset, and
- restore them upon return. (This is not a problem for EXIT handlers,
- since they will leave the block anyway.)
- This is taken care of by the execution loop and the sp_instr_hreturn
- instruction.
-
- - Examples:
-
- - EXIT handler
- begin
- declare x int default 0;
-
- begin
- declare exit handler for 'XXXXX' set x = 1;
-
- (statement1);
- (statement2);
- end;
- (statement3);
- end
-
- Pos. Instruction
- 0 sp_instr_set(0, '0')
- 1 sp_instr_hpush_jump(4, 1) # location and frame size
- 2 sp_instr_set(0, '1')
- 3 sp_instr_jump(6)
- 4 sp_instr_stmt('statement1')
- 5 sp_instr_stmt('statement2')
- 6 sp_instr_hpop(1)
- 7 sp_instr_stmt('statement3')
-
- - CONTINUE handler
- create procedure hndlr1(val int)
- begin
- declare x int default 0;
- declare foo condition for 1146;
- declare continue handler for foo set x = 1;
-
- insert into t3 values ("hndlr1", val); # Non-existing table?
- if x>0 then
- insert into t1 values ("hndlr1", val); # This instead then
- end if;
- end|
-
- Pos. Instruction
- 0 sp_instr_set(1, '0')
- 1 sp_instr_hpush_jump(4, 2)
- 2 sp_instr_set(1, '1')
- 3 sp_instr_hreturn(2) # frame size
- 4 sp_instr_stmt('insert ... t3 ...')
- 5 sp_instr_jump_if_not(7, 'x>0')
- 6 sp_instr_stmt('insert ... t1 ...')
- 7 sp_instr_hpop(2)
-
-
- - Cursors
-
- For stored procedures to be really useful, you want to have cursors.
- MySQL doesn't yet have "real" cursor support (with API and ODBC support,
- allowing updating, arbitrary scrolling, etc), but a simple asensitive,
- non-scrolling, read-only cursor can be implemented in SPs using the
- class Protocol_cursor.
- This class intecepts the creation and sending of results sets and instead
- stores it in-memory, as MYSQL_FIELDS and MYSQL_ROWS (as in the client API).
-
- To support this, we need the usual name binding support in sp_pcontext
- (similar to variables and conditions) to keep track on declared cursor
- names, and a corresponding run-time mechanism in sp_rcontext.
- Cursors are lexically scoped like everything with a body or BEGIN/END
- block, so they are pushed and poped as usual (see conditions and variables
- above).
- The basic operations on a cursor are OPEN, FETCH and CLOSE, which will
- each have a corresponding instruction. In addition, we need instructions
- to push a new cursor (this will encapsulate the LEX of the SELECT statement
- of the cursor), and a pop instruction:
- - sp_instr_cpush
- Push a cursor to the sp_rcontext. This instruction contains the LEX
- for the select statement
- - sp_instr_cpop
- Pop a number of cursors from the sp_rcontext.
- - sp_instr_copen
- Open a cursor: This will execute the select and get the result set
- in a sepeate memroot.
- - sp_instr_cfetch
- Fetch the next row from the in-memory result set. The instruction
- contains a list of the variables (frame offsets) to set.
- - sp_instr_cclose
- Free the result set.
-
- A cursor is a separate class, sp_cursor (defined in sp_rcontex.h) which
- encapsulates the basic operations used by the above instructions.
- This class contains the LEX, Protocol_cursor object, and its memroot,
- as well as the cursor's current state.
- Compiling and executing is fairly straight-forward. sp_instr_copen is
- a subclass of sp_instr_stmt and uses its mechanism to execute a
- substatement.
-
- - Example:
-
- begin
- declare x int;
- declare c cursor for select a from t1;
-
- open c;
- fetch c into x;
- close c;
- end
-
- Pos. Instruction
- 0 sp_instr_cpush('select a from ...')
- 1 sp_instr_copen(0) # The 0'th cursor
- 2 sp_instr_cfetch(0) # Contains the variable list
- 3 sp_instr_cclose(0)
- 4 sp_instr_cpop(1)
-
-
-
- - The SP cache
-
- There are two ways to cache SPs:
-
- 1) one global cache, share by all threads/connections,
- 2) one cache per thread.
-
- There are pros and cons with both methods:
-
- 1) Pros: Save memory, each SP only read from table once,
- Cons: Needs locking (= serialization at access), requires thread-safe
- data structures,
- 2) Pros: Fast, no locking required (almost), limited thread-safe
- requirement,
- Cons: Uses more memory, each SP read from table once per thread.
-
- Unfortunately, we cannot use alternative 1 for the time being, as most
- of the data structures to be cached (lex and items) are not reentrant
- and thread-safe. (Things are modified at execution, we have THD pointers
- stored everywhere, etc.)
- This leaves us with alternative 2, one cache per thread; or actually
- two, since we keep FUNCTIONs and PROCEDUREs in separate caches.
- This is not that terrible; the only case when it will perform
- significantly worse than a global cache is when we have an application
- where new threads are connecting, calling a procedure, and disconnecting,
- over and over again.
-
- The cache implementation itself is simple and straightforward, a hashtable
- wrapped in a class and a C API (see APIs below).
-
- There is however one issue with multiple caches: dropping and altering
- procedures. Normally, this should be a very rare event in a running
- system; it's typically something you do during development and testing,
- so it's not unthinkable that we would simply ignore the issue and let
- any threads running with a cached version of an SP keep doing so until
- its disconnected.
- But assuming we want to keep the caches consistent with respect to drop
- and alter, it can be done:
-
- 1) A global counter is needed, initialized to 0 at start.
- 2) At each DROP or ALTER, increase the counter by one.
- 3) Each cache has its own copy of the counter, copied at the last read.
- 4) When looking up a name in the cache, first check if the global counter
- is larger than the local copy.
- If so, clear the cache and return "not found", and update the local
- counter; otherwise, lookup as usual.
-
- This minimizes the cost to a single brief lock for the access of an
- integer when operating normally. Only in the event of an actual drop or
- alter, is the cache cleared. This may seem to be drastic, but since we
- assume that this is a rare event, it's not a problem.
- It would of course be possible to have a much more fine-grained solution,
- keeping track of each SP, but the overhead of doing so is not worth the
- effort.
-
-
- - Class and function APIs
- This is an outline of the key types. Some types and other details
- in the actual files have been omitted for readability.
-
- - The parser context: sp_pcontext.h
-
- typedef enum
- {
- sp_param_in,
- sp_param_out,
- sp_param_inout
- } sp_param_mode_t;
-
- typedef struct
- {
- LEX_STRING name;
- enum enum_field_types type;
- sp_param_mode_t mode;
- uint offset; // Offset in current frame
- my_bool isset;
- } sp_pvar_t;
-
- typedef struct sp_cond_type
- {
- enum { number, state, warning, notfound, exception } type;
- char sqlstate[6];
- uint mysqlerr;
- } sp_cond_type_t;
-
- class sp_pcontext
- {
- sp_pcontext();
-
- // Return the maximum frame size
- uint max_framesize();
-
- // Return the current frame size
- uint current_framesize();
-
- // Return the number of parameters
- uint params();
-
- // Set the number of parameters to the current frame size
- void set_params();
-
- // Set type of the variable at offset 'i' in the frame
- void set_type(uint i, enum enum_field_types type);
-
- // Mark the i:th variable to "set" (i.e. having a value) with
- // 'val' true.
- void set_isset(uint i, my_bool val);
-
- // Push the variable 'name' to the frame.
- void push_var(LEX_STRING *name,
- enum enum_field_types type, sp_param_mode_t mode);
-
- // Pop 'num' variables from the frame.
- void pop_var(uint num = 1);
-
- // Find variable by name
- sp_pvar_t *find_pvar(LEX_STRING *name);
-
- // Find variable by index
- sp_pvar_t *find_pvar(uint i);
-
- // Push label 'name' of instruction index 'ip' to the label context
- sp_label_t *push_label(char *name, uint ip);
-
- // Find label 'name' in the context
- sp_label_t *find_label(char *name);
-
- // Return the last pushed label
- sp_label_t *last_label();
-
- // Return and remove the last pushed label.
- sp_label_t *pop_label();
-
- // Push a condition to the context
- void push_cond(LEX_STRING *name, sp_cond_type_t *val);
-
- // Pop a 'num' condition from the context
- void pop_cond(uint num);
-
- // Find a condition in the context
- sp_cond_type_t *find_cond(LEX_STRING *name);
-
- // Increase the handler count
- void add_handler();
-
- // Returns the handler count
- uint handlers();
-
- // Push a cursor
- void push_cursor(LEX_STRING *name);
-
- // Find a cursor
- my_bool find_cursor(LEX_STRING *name, uint *poff);
-
- // Pop 'num' cursors
- void pop_cursor(uint num);
-
- // Return the number of cursors
- uint cursors();
- }
-
-
- - The run-time context (call frame): sp_rcontext.h
-
- #define SP_HANDLER_NONE 0
- #define SP_HANDLER_EXIT 1
- #define SP_HANDLER_CONTINUE 2
- #define SP_HANDLER_UNDO 3
-
- typedef struct
- {
- struct sp_cond_type *cond;
- uint handler; // Location of handler
- int type;
- uint foffset; // Frame offset for the handlers declare level
- } sp_handler_t;
-
- class sp_rcontext
- {
- // 'fsize' is the max size of the context, 'hmax' the number of handlers,
- // 'cmax' the number of cursors
- sp_rcontext(uint fsize, uint hmax, , uint cmax);
-
- // Push value (parameter) 'i' to the frame
- void push_item(Item *i);
-
- // Set slot 'idx' to value 'i'
- void set_item(uint idx, Item *i);
-
- // Return the item in slot 'idx'
- Item *get_item(uint idx);
-
- // Set the "out" index 'oidx' for slot 'idx. If it's an IN slot,
- // use 'oidx' -1.
- void set_oindex(uint idx, int oidx);
-
- // Return the "out" index for slot 'idx'
- int get_oindex(uint idx);
-
- // Set the FUNCTION result
- void set_result(Item *i);
-
- // Get the FUNCTION result
- Item *get_result();
-
- // Push handler at location 'h' for condition 'cond'. 'f' is the
- // current variable frame size.
- void push_handler(sp_cond_type_t *cond, uint h, int type, uint f);
-
- // Pop 'count' handlers
- void pop_handlers(uint count);
-
- // Find a handler for this error. This sets the state for a found
- // handler in the context. If called repeatedly without clearing,
- // only the first call's state is kept.
- int find_handler(uint sql_errno);
-
- // Returns 1 if a handler has been found, with '*ip' and '*fp' set
- // to the handler location and frame size respectively.
- int found_handler(uint *ip, uint *fp);
-
- // Clear the found handler state.
- void clear_handler();
-
- // Push a return address for a CONTINUE handler
- void push_hstack(uint ip);
-
- // Pop the CONTINUE handler return stack
- uint pop_hstack();
-
- // Save variables from frame index 'fp' and up.
- void save_variables(uint fp);
-
- // Restore saved variables from to frame index 'fp' and up.
- void restore_variables(uint fp);
-
- // Push a cursor for the statement (lex)
- void push_cursor(LEX *lex);
-
- // Pop 'count' cursors
- void pop_cursors(uint count);
-
- // Pop all cursors
- void pop_all_cursors();
-
- // Get the 'i'th cursor
- sp_cursor *get_cursor(uint i);
-
- }
-
-
- - The procedure: sp_head.h
-
- #define TYPE_ENUM_FUNCTION 1
- #define TYPE_ENUM_PROCEDURE 2
-
- class sp_head
- {
- int m_type; // TYPE_ENUM_FUNCTION or TYPE_ENUM_PROCEDURE
-
- sp_head();
-
- void init(LEX_STRING *name, LEX *lex, LEX_STRING *comment, char suid);
-
- // Store this procedure in the database. This is a wrapper around
- // the function sp_create_procedure().
- int create(THD *);
-
- // Invoke a FUNCTION
- int
- execute_function(THD *thd, Item **args, uint argcount, Item **resp);
-
- // CALL a PROCEDURE
- int
- execute_procedure(THD *thd, List<Item> *args);
-
- // Add the instruction to this procedure.
- void add_instr(sp_instr *);
-
- // Returns the number of instructions.
- uint instructions();
-
- // Returns the last instruction
- sp_instr *last_instruction();
-
- // Resets lex in 'thd' and keeps a copy of the old one.
- void reset_lex(THD *);
-
- // Restores lex in 'thd' from our copy, but keeps some status from the
- // one in 'thd', like ptr, tables, fields, etc.
- void restore_lex(THD *);
-
- // Put the instruction on the backpatch list, associated with
- // the label.
- void push_backpatch(sp_instr *, struct sp_label *);
-
- // Update all instruction with this label in the backpatch list to
- // the current position.
- void backpatch(struct sp_label *);
-
- // Returns the SP name (with optional length in '*lenp').
- char *name(uint *lenp = 0);
-
- // Returns the result type for a function
- Item_result result();
-
- // Sets various attributes
- void sp_set_info(char *creator, uint creatorlen,
- longlong created, longlong modified,
- bool suid, char *comment, uint commentlen);
- }
-
-
- - Instructions
-
- - The base class:
- class sp_instr
- {
- // 'ip' is the index of this instruction
- sp_instr(uint ip);
-
- // Execute this instrution.
- // '*nextp' will be set to the index of the next instruction
- // to execute. (For most instruction this will be the
- // instruction following this one.)
- // Returns 0 on success, non-zero if some error occurred.
- virtual int execute(THD *, uint *nextp)
- }
-
- - Statement instruction:
- class sp_instr_stmt : public sp_instr
- {
- sp_instr_stmt(uint ip);
-
- int execute(THD *, uint *nextp);
-
- // Set the statement's Lex
- void set_lex(LEX *);
-
- // Return the statement's Lex
- LEX *get_lex();
- }
-
- - SET instruction:
- class sp_instr_set : public sp_instr
- {
- // 'offset' is the variable's frame offset, 'val' the value,
- // and 'type' the variable type.
- sp_instr_set(uint ip,
- uint offset, Item *val, enum enum_field_types type);
-
- int execute(THD *, uint *nextp);
- }
-
- - Unconditional jump
- class sp_instr_jump : public sp_instr
- {
- // No destination, must be set.
- sp_instr_jump(uint ip);
-
- // 'dest' is the destination instruction index.
- sp_instr_jump(uint ip, uint dest);
-
- int execute(THD *, uint *nextp);
-
- // Set the destination instruction 'dest'.
- void set_destination(uint dest);
- }
-
- - Conditional jump
- class sp_instr_jump_if_not : public sp_instr_jump
- {
- // Jump if 'i' evaluates to false. Destination not set yet.
- sp_instr_jump_if_not(uint ip, Item *i);
-
- // Jump to 'dest' if 'i' evaluates to false.
- sp_instr_jump_if_not(uint ip, Item *i, uint dest)
-
- int execute(THD *, uint *nextp);
- }
-
- - Return a function value
- class sp_instr_freturn : public sp_instr
- {
- // Return the value 'val'
- sp_instr_freturn(uint ip, Item *val, enum enum_field_types type);
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Push a handler and jump
- class sp_instr_hpush_jump : public sp_instr_jump
- {
- // Push handler of type 'htype', with current frame size 'fp'
- sp_instr_hpush_jump(uint ip, int htype, uint fp);
-
- int execute(THD *thd, uint *nextp);
-
- // Add condition for this handler
- void add_condition(struct sp_cond_type *cond);
- }
-
- - Pops handlers
- class sp_instr_hpop : public sp_instr
- {
- // Pop 'count' handlers
- sp_instr_hpop(uint ip, uint count);
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Return from a CONTINUE handler
- class sp_instr_hreturn : public sp_instr
- {
- // Return from handler, and restore variables to 'fp'.
- sp_instr_hreturn(uint ip, uint fp);
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Push a CURSOR
- class sp_instr_cpush : public sp_instr_stmt
- {
- // Push a cursor for statement 'lex'
- sp_instr_cpush(uint ip, LEX *lex)
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Pop CURSORs
- class sp_instr_cpop : public sp_instr_stmt
- {
- // Pop 'count' cursors
- sp_instr_cpop(uint ip, uint count)
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Open a CURSOR
- class sp_instr_copen : public sp_instr_stmt
- {
- // Open the 'c'th cursor
- sp_instr_copen(uint ip, uint c);
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Close a CURSOR
- class sp_instr_cclose : public sp_instr
- {
- // Close the 'c'th cursor
- sp_instr_cclose(uint ip, uint c);
-
- int execute(THD *thd, uint *nextp);
- }
-
- - Fetch a row with CURSOR
- class sp_instr_cfetch : public sp_instr
- {
- // Fetch next with the 'c'th cursor
- sp_instr_cfetch(uint ip, uint c);
-
- int execute(THD *thd, uint *nextp);
-
- // Add a target variable for the fetch
- void add_to_varlist(struct sp_pvar *var);
- }
-
-
- - Utility functions: sp.h
-
- #define SP_OK 0
- #define SP_KEY_NOT_FOUND -1
- #define SP_OPEN_TABLE_FAILED -2
- #define SP_WRITE_ROW_FAILED -3
- #define SP_DELETE_ROW_FAILED -4
- #define SP_GET_FIELD_FAILED -5
- #define SP_PARSE_ERROR -6
-
- // Finds a stored procedure given its name. Returns NULL if not found.
- sp_head *sp_find_procedure(THD *, LEX_STRING *name);
-
- // Store the procedure 'name' in the database. 'def' is the complete
- // definition string ("create procedure ...").
- int sp_create_procedure(THD *,
- char *name, uint namelen,
- char *def, uint deflen,
- char *comment, uint commentlen, bool suid);
-
- // Drop the procedure 'name' from the database.
- int sp_drop_procedure(THD *, char *name, uint namelen);
-
- // Finds a stored function given its name. Returns NULL if not found.
- sp_head *sp_find_function(THD *, LEX_STRING *name);
-
- // Store the function 'name' in the database. 'def' is the complete
- // definition string ("create function ...").
- int sp_create_function(THD *,
- char *name, uint namelen,
- char *def, uint deflen,
- char *comment, uint commentlen, bool suid);
-
- // Drop the function 'name' from the database.
- int sp_drop_function(THD *, char *name, uint namelen);
-
-
- - The cache: sp_cache.h
-
- /* Initialize the SP caching once at startup */
- void sp_cache_init();
-
- /* Clear the cache *cp and set *cp to NULL */
- void sp_cache_clear(sp_cache **cp);
-
- /* Insert an SP to cache. If **cp points to NULL, it's set to a
- new cache */
- void sp_cache_insert(sp_cache **cp, sp_head *sp);
-
- /* Lookup an SP in cache */
- sp_head *sp_cache_lookup(sp_cache **cp, char *name, uint namelen);
-
- /* Remove an SP from cache */
- void sp_cache_remove(sp_cache **cp, sp_head *sp);
-
-
- - The mysql.proc schema:
-
- CREATE TABLE proc (
- db char(64) binary DEFAULT '' NOT NULL,
- name char(64) DEFAULT '' NOT NULL,
- type enum('FUNCTION','PROCEDURE') NOT NULL,
- specific_name char(64) DEFAULT '' NOT NULL,
- language enum('SQL') DEFAULT 'SQL' NOT NULL,
- sql_data_access enum('CONTAINS_SQL') DEFAULT 'CONTAINS_SQL' NOT NULL,
- is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL,
- security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL,
- param_list blob DEFAULT '' NOT NULL,
- returns char(64) DEFAULT '' NOT NULL,
- body blob DEFAULT '' NOT NULL,
- definer char(77) binary DEFAULT '' NOT NULL,
- created timestamp,
- modified timestamp,
- sql_mode set(
- 'REAL_AS_FLOAT',
- 'PIPES_AS_CONCAT',
- 'ANSI_QUOTES',
- 'IGNORE_SPACE',
- 'IGNORE_BAD_TABLE_OPTIONS',
- 'ONLY_FULL_GROUP_BY',
- 'NO_UNSIGNED_SUBTRACTION',
- 'NO_DIR_IN_CREATE',
- 'POSTGRESQL',
- 'ORACLE',
- 'MSSQL',
- 'DB2',
- 'MAXDB',
- 'NO_KEY_OPTIONS',
- 'NO_TABLE_OPTIONS',
- 'NO_FIELD_OPTIONS',
- 'MYSQL323',
- 'MYSQL40',
- 'ANSI',
- 'NO_AUTO_VALUE_ON_ZERO'
- ) DEFAULT 0 NOT NULL,
- comment char(64) binary DEFAULT '' NOT NULL,
- PRIMARY KEY (db,name,type)
- ) comment='Stored Procedures';
-
- --
-
diff --git a/KNOWN_BUGS.txt b/KNOWN_BUGS.txt
index 56c9102b0cd..af65c98590d 100644
--- a/KNOWN_BUGS.txt
+++ b/KNOWN_BUGS.txt
@@ -1,35 +1,15 @@
-This file should contain all known fatal bugs in Mariadb and the Aria
-storage engine for the last source or binary release. Minor bugs,
-extensions and feature requests, and bugs found since this release can
-be found in the MariaDB bugs database at: http://mariadb.org/jira and
-in the MySQL bugs databases at: http://bugs.mysql.com/ (category
-"Maria storage engine").
+This file should contain all known fatal bugs in MariaDB for the last
+source or binary release. Minor bugs, extensions and feature requests,
+and bugs found since this release can be found in the MariaDB bugs
+database at: https://jira.mariadb.org
There should not normally be any bugs which affect normal operations
in any MariaDB release. Still, there are always exceptions and edge
cases and that is what this file is for.
-If you have found a bug that is not listed here, please add it to
-http://mariadb.org/jira so we can either fix it for next release or in
-the worst case add it here for others to know!
+If you have find bug please add it to https://jira.mariadb.org so
+that we are can try to fix it for the next release. You can also add
+feature request to the JIRA.
-
-IMPORTANT:
-
-If you have been using the Maria (now Aria) storage engine with the
-MySQL-5.1-Maria-alpha build and are upgrading to a newer MariaDB you
-MUST run [m]aria_chk --recover on all your Aria tables. This is because
-we made an incompatible change with how the transaction id is stored
-and old transaction id's must be reset!
-
-cd mysql-data-directory
-aria_chk --recover */*.MAI
-
-Going forward, we will do our best to not introduce any incompatible
-changes in the data format for Aria tables. If this would be ever be
-needed, we will, if possible, support both the old and the new version
-to make upgrades as easy as possible.
-
-Note that for the MariaDB 5.1 release the Aria storage engine is
-classified as 'beta'; It should work, but use it with caution. Please
-report all bugs to http://mariadb.org/jira so we can fix them!
+The latest documentation for the MariaDB server can be found at:
+https://mariadb.com/kb
diff --git a/README.md b/README.md
index 681f75ca812..8d4a6e7cfab 100644
--- a/README.md
+++ b/README.md
@@ -1,7 +1,7 @@
Code status:
------------
-* [![Travis CI status](https://secure.travis-ci.org/MariaDB/server.png?branch=10.3)](https://travis-ci.org/MariaDB/server) travis-ci.org (10.3 branch)
+* [![Travis CI status](https://secure.travis-ci.org/MariaDB/server.png?branch=10.4)](https://travis-ci.org/MariaDB/server) travis-ci.org (10.4 branch)
* [![Appveyor CI status](https://ci.appveyor.com/api/projects/status/4u6pexmtpuf8jq66?svg=true)](https://ci.appveyor.com/project/rasmushoj/server) ci.appveyor.com
## MariaDB: drop-in replacement for MySQL
@@ -9,13 +9,13 @@ Code status:
MariaDB is designed as a drop-in replacement of MySQL(R) with more
features, new storage engines, fewer bugs, and better performance.
-MariaDB is brought to you by the MariaDB Foundation.
+MariaDB is brought to you by the MariaDB Foundation and the MariaDB corporation.
Please read the CREDITS file for details about the MariaDB Foundation,
and who is developing MariaDB.
MariaDB is developed by many of the original developers of MySQL who
-now work for the MariaDB Foundation and the MariaDB Corporation, and by many people in
-the community.
+now work for the MariaDB Corporation, the MariaDB Foundation and by
+many people in the community.
MySQL, which is the base of MariaDB, is a product and trademark of Oracle
Corporation, Inc. For a list of developers and other contributors,
@@ -24,28 +24,35 @@ list of active contributors.
A description of the MariaDB project and a manual can be found at:
-https://mariadb.org/
-
https://mariadb.com/kb/en/
https://mariadb.com/kb/en/mariadb-vs-mysql-features/
-https://mariadb.com/kb/en/mariadb-versus-mysql-features/
-
https://mariadb.com/kb/en/mariadb-versus-mysql-compatibility/
+https://mariadb.com/kb/en/library/new-and-old-releases/
+
+https://mariadb.org/
+
As MariaDB is a full replacement of MySQL, the MySQL manual at
http://dev.mysql.com/doc is generally applicable.
-Help:
+Help
-----
More help is available from the Maria Discuss mailing list
https://launchpad.net/~maria-discuss
and the #maria IRC channel on Freenode.
+Live QA for beginner contributors
+----
+MariaDB has a dedicated time each week when we answer new contributor questions live on Zulip and IRC.
+From 8:00 to 10:00 UTC on Mondays, and 10:00 to 12:00 UTC on Thursdays,
+anyone can ask any questions they’d like, and a live developer will be available to assist.
+
+New contributors can ask questions any time, but we will provide immediate feedback during that interval.
-License:
+License
--------
***************************************************************************
@@ -62,7 +69,7 @@ and COPYING.thirdparty files.
***************************************************************************
-Bug Reports:
+Bug Reports
------------
Bug and/or error reports regarding MariaDB should be submitted at:
diff --git a/VERSION b/VERSION
index a532e780f23..da1fcc97f1d 100644
--- a/VERSION
+++ b/VERSION
@@ -1,4 +1,4 @@
MYSQL_VERSION_MAJOR=10
-MYSQL_VERSION_MINOR=3
-MYSQL_VERSION_PATCH=15
-SERVER_MATURITY=stable
+MYSQL_VERSION_MINOR=4
+MYSQL_VERSION_PATCH=5
+SERVER_MATURITY=gamma
diff --git a/client/client_priv.h b/client/client_priv.h
index ada72187569..54fed943313 100644
--- a/client/client_priv.h
+++ b/client/client_priv.h
@@ -49,7 +49,7 @@ enum options_client
OPT_SSL_CIPHER, OPT_SHUTDOWN_TIMEOUT, OPT_LOCAL_INFILE,
OPT_DELETE_MASTER_LOGS, OPT_COMPACT,
OPT_PROMPT, OPT_IGN_LINES,OPT_TRANSACTION,OPT_MYSQL_PROTOCOL,
- OPT_SHARED_MEMORY_BASE_NAME, OPT_FRM, OPT_SKIP_OPTIMIZATION,
+ OPT_FRM, OPT_SKIP_OPTIMIZATION,
OPT_COMPATIBLE, OPT_RECONNECT, OPT_DELIMITER, OPT_SECURE_AUTH,
OPT_OPEN_FILES_LIMIT, OPT_SET_CHARSET, OPT_SERVER_ARG,
OPT_STOP_POSITION, OPT_START_DATETIME, OPT_STOP_DATETIME,
@@ -100,6 +100,7 @@ enum options_client
OPT_SKIP_ANNOTATE_ROWS_EVENTS,
OPT_SSL_CRL, OPT_SSL_CRLPATH,
OPT_PRINT_ROW_COUNT, OPT_PRINT_ROW_EVENT_POSITIONS,
+ OPT_SHUTDOWN_WAIT_FOR_SLAVES,
OPT_MAX_CLIENT_OPTION /* should be always the last */
};
diff --git a/client/mysql.cc b/client/mysql.cc
index ed1959e8b03..3ab3c1192b7 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2009, 2018, MariaDB Corporation
+ Copyright (c) 2009, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -160,6 +160,7 @@ static uint my_end_arg;
static char * opt_mysql_unix_port=0;
static int connect_flag=CLIENT_INTERACTIVE;
static my_bool opt_binary_mode= FALSE;
+static my_bool opt_connect_expired_password= FALSE;
static int interrupted_query= 0;
static char *current_host,*current_db,*current_user=0,*opt_password=0,
*current_prompt=0, *delimiter_str= 0,
@@ -196,10 +197,8 @@ static char delimiter[16]= DEFAULT_DELIMITER;
static uint delimiter_length= 1;
unsigned short terminal_width= 80;
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name=0;
-#endif
static uint opt_protocol=0;
+static const char *opt_protocol_type= "";
static CHARSET_INFO *charset_info= &my_charset_latin1;
#include "sslopt-vars.h"
@@ -281,9 +280,9 @@ static COMMANDS commands[] = {
{ "edit", 'e', com_edit, 0, "Edit command with $EDITOR."},
#endif
{ "ego", 'G', com_ego, 0,
- "Send command to mysql server, display result vertically."},
+ "Send command to MariaDB server, display result vertically."},
{ "exit", 'q', com_quit, 0, "Exit mysql. Same as quit."},
- { "go", 'g', com_go, 0, "Send command to mysql server." },
+ { "go", 'g', com_go, 0, "Send command to MariaDB server." },
{ "help", 'h', com_help, 1, "Display this help." },
#ifdef USE_POPEN
{ "nopager",'n', com_nopager,0, "Disable pager, print to stdout." },
@@ -1340,9 +1339,6 @@ sig_handler mysql_end(int sig)
my_free(full_username);
my_free(part_username);
my_free(default_prompt);
-#ifdef HAVE_SMEM
- my_free(shared_memory_base_name);
-#endif
my_free(current_prompt);
while (embedded_server_arg_count > 1)
my_free(embedded_server_args[--embedded_server_arg_count]);
@@ -1373,10 +1369,6 @@ static bool do_connect(MYSQL *mysql, const char *host, const char *user,
#endif
if (opt_protocol)
mysql_options(mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir);
@@ -1547,7 +1539,7 @@ static struct my_option my_long_options[] =
&ignore_spaces, &ignore_spaces, 0, GET_BOOL, NO_ARG, 0, 0,
0, 0, 0, 0},
{"init-command", OPT_INIT_COMMAND,
- "SQL Command to execute when connecting to MySQL server. Will "
+ "SQL Command to execute when connecting to MariaDB server. Will "
"automatically be re-executed when reconnecting.",
&opt_init_command, &opt_init_command, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -1609,11 +1601,12 @@ static struct my_option my_long_options[] =
"Get progress reports for long running commands (like ALTER TABLE)",
&opt_progress_reports, &opt_progress_reports, 0, GET_BOOL, NO_ARG, 1, 0,
0, 0, 0, 0},
- {"prompt", OPT_PROMPT, "Set the mysql prompt to this value.",
+ {"prompt", OPT_PROMPT, "Set the command line prompt to this value.",
&current_prompt, &current_prompt, 0, GET_STR_ALLOC,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).",
- 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
+ {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe).",
+ &opt_protocol_type, &opt_protocol_type, 0, GET_STR, REQUIRED_ARG,
+ 0, 0, 0, 0, 0, 0},
{"quick", 'q',
"Don't cache result, print it row by row. This may slow down the server "
"if the output is suspended. Doesn't use history file.",
@@ -1626,11 +1619,6 @@ static struct my_option my_long_options[] =
&opt_reconnect, &opt_reconnect, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
{"silent", 's', "Be more silent. Print results with a tab as separator, "
"each row on new line.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name,
- &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"socket", 'S', "The socket file to use for connection.",
&opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR_ALLOC,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -1701,6 +1689,11 @@ static struct my_option my_long_options[] =
"piped to mysql or loaded using the 'source' command). This is necessary "
"when processing output from mysqlbinlog that may contain blobs.",
&opt_binary_mode, &opt_binary_mode, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+ {"connect-expired-password", 0,
+ "Notify the server that this client is prepared to handle expired "
+ "password sandbox mode even if --batch was specified.",
+ &opt_connect_expired_password, &opt_connect_expired_password, 0, GET_BOOL,
+ NO_ARG, 0, 0, 0, 0, 0, 0},
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -1794,8 +1787,10 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
break;
case OPT_MYSQL_PROTOCOL:
#ifndef EMBEDDED_LIBRARY
- if ((opt_protocol= find_type_with_warning(argument, &sql_protocol_typelib,
- opt->name)) <= 0)
+ if (!argument[0])
+ opt_protocol= 0;
+ else if ((opt_protocol= find_type_with_warning(argument, &sql_protocol_typelib,
+ opt->name)) <= 0)
exit(1);
#endif
break;
@@ -1882,6 +1877,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
case 'W':
#ifdef __WIN__
opt_protocol = MYSQL_PROTOCOL_PIPE;
+ opt_protocol_type= "pipe";
#endif
break;
#include <sslopt-case.h>
@@ -3142,7 +3138,7 @@ com_help(String *buffer __attribute__((unused)),
put_info("\nGeneral information about MariaDB can be found at\n"
"http://mariadb.org\n", INFO_INFO);
- put_info("List of all MySQL commands:", INFO_INFO);
+ put_info("List of all client commands:", INFO_INFO);
if (!named_cmds)
put_info("Note that all text commands must be first on line and end with ';'",INFO_INFO);
for (i = 0; commands[i].name; i++)
@@ -4700,6 +4696,9 @@ sql_real_connect(char *host,char *database,char *user,char *password,
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
+ my_bool can_handle_expired= opt_connect_expired_password || !status.batch;
+ mysql_options(&mysql, MYSQL_OPT_CAN_HANDLE_EXPIRED_PASSWORDS, &can_handle_expired);
+
if (!do_connect(&mysql, host, user, password, database,
connect_flag | CLIENT_MULTI_STATEMENTS))
{
diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c
index ef1630dd0e3..ee10996d678 100644
--- a/client/mysql_upgrade.c
+++ b/client/mysql_upgrade.c
@@ -111,7 +111,7 @@ static struct my_option my_long_options[]=
&opt_default_auth, &opt_default_auth, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"force", 'f', "Force execution of mysqlcheck even if mysql_upgrade "
- "has already been executed for the current version of MySQL.",
+ "has already been executed for the current version of MariaDB.",
&opt_force, &opt_force, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"host", 'h', "Connect to host.", 0,
0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -135,13 +135,8 @@ static struct my_option my_long_options[]=
"built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"protocol", OPT_MYSQL_PROTOCOL,
- "The protocol to use for connection (tcp, socket, pipe, memory).",
+ "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", 0,
- 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"silent", OPT_SILENT, "Print less information", &opt_silent,
&opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"socket", 'S', "The socket file to use for connection.",
@@ -354,7 +349,6 @@ get_one_option(int optid, const struct my_option *opt,
case 'P': /* --port */
case 'S': /* --socket */
case OPT_MYSQL_PROTOCOL: /* --protocol */
- case OPT_SHARED_MEMORY_BASE_NAME: /* --shared-memory-base-name */
case OPT_PLUGIN_DIR: /* --plugin-dir */
case OPT_DEFAULT_AUTH: /* --default-auth */
add_one_option_cmd_line(&conn_args, opt, argument);
@@ -814,7 +808,7 @@ static my_bool is_mysql()
strstr(ds_events_struct.str, "IGNORE_BAD_TABLE_OPTIONS") != NULL)
ret= FALSE;
else
- verbose("MySQL upgrade detected");
+ verbose("MariaDB upgrade detected");
dynstr_free(&ds_events_struct);
return(ret);
@@ -884,10 +878,14 @@ static int run_mysqlcheck_fixnames(void)
static const char *expected_errors[]=
{
+ "ERROR 1051", /* Unknown table */
"ERROR 1060", /* Duplicate column name */
"ERROR 1061", /* Duplicate key name */
"ERROR 1054", /* Unknown column */
+ "ERROR 1146", /* Table does not exist */
"ERROR 1290", /* RR_OPTION_PREVENTS_STATEMENT */
+ "ERROR 1347", /* 'mysql.user' is not of type 'BASE TABLE' */
+ "ERROR 1348", /* Column 'Show_db_priv' is not updatable */
0
};
@@ -1198,7 +1196,7 @@ int main(int argc, char **argv)
*/
if (!opt_force && upgrade_already_done(0))
{
- printf("This installation of MySQL is already upgraded to %s, "
+ printf("This installation of MariaDB is already upgraded to %s, "
"use --force if you still need to run mysql_upgrade\n",
MYSQL_SERVER_VERSION);
goto end;
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index b5e54be899a..5c9f9cd05f3 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -40,7 +40,8 @@ ulonglong last_values[MAX_MYSQL_VAR+100];
static int interval=0;
static my_bool option_force=0,interrupted=0,new_line=0,
opt_compress= 0, opt_local= 0, opt_relative= 0, opt_verbose= 0,
- opt_vertical= 0, tty_password= 0, opt_nobeep;
+ opt_vertical= 0, tty_password= 0, opt_nobeep,
+ opt_shutdown_wait_for_slaves= 0;
static my_bool debug_info_flag= 0, debug_check_flag= 0;
static uint tcp_port = 0, option_wait = 0, option_silent=0, nr_iterations;
static uint opt_count_iterations= 0, my_end_arg;
@@ -49,9 +50,6 @@ static char * unix_port=0;
static char *opt_plugin_dir= 0, *opt_default_auth= 0;
static bool sql_log_bin_off= false;
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name=0;
-#endif
static uint opt_protocol=0;
static myf error_flags; /* flags to pass to my_printf_error, like ME_BELL */
@@ -185,18 +183,13 @@ static struct my_option my_long_options[] =
#endif
"built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").",
&tcp_port, &tcp_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).",
+ {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"relative", 'r',
"Show difference between current and previous values when used with -i. "
"Currently only works with extended-status.",
&opt_relative, &opt_relative, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name,
- 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"silent", 's', "Silently exit if one can't connect to server.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"socket", 'S', "The socket file to use for connection.",
@@ -226,6 +219,11 @@ static struct my_option my_long_options[] =
{"shutdown_timeout", OPT_SHUTDOWN_TIMEOUT, "", &opt_shutdown_timeout,
&opt_shutdown_timeout, 0, GET_ULONG, REQUIRED_ARG,
SHUTDOWN_DEF_TIMEOUT, 0, 3600*12, 0, 1, 0},
+ {"wait_for_all_slaves", OPT_SHUTDOWN_WAIT_FOR_SLAVES,
+ "Defers shutdown until after all binlogged events have been sent to "
+ "all connected slaves", &opt_shutdown_wait_for_slaves,
+ &opt_shutdown_wait_for_slaves, 0, GET_BOOL, NO_ARG, 0, 0, 0,
+ 0, 0, 0},
{"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.",
&opt_plugin_dir, &opt_plugin_dir, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -367,10 +365,6 @@ int main(int argc,char *argv[])
#endif
if (opt_protocol)
mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
error_flags= (myf)(opt_nobeep ? 0 : ME_BELL);
@@ -496,9 +490,6 @@ err2:
mysql_library_end();
my_free(opt_password);
my_free(user);
-#ifdef HAVE_SMEM
- my_free(shared_memory_base_name);
-#endif
free_defaults(save_argv);
my_end(my_end_arg);
return error;
@@ -588,7 +579,7 @@ static my_bool sql_connect(MYSQL *mysql, uint wait)
if (!info)
{
info=1;
- fputs("Waiting for MySQL server to answer",stderr);
+ fputs("Waiting for MariaDB server to answer",stderr);
(void) fflush(stderr);
}
else
@@ -708,7 +699,17 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv)
!stat(pidfile, &pidfile_status))
last_modified= pidfile_status.st_mtime;
- if (mysql_shutdown(mysql, SHUTDOWN_DEFAULT))
+ if (opt_shutdown_wait_for_slaves)
+ {
+ sprintf(buff, "SHUTDOWN WAIT FOR ALL SLAVES");
+ if (mysql_query(mysql, buff))
+ {
+ my_printf_error(0, "%s failed; error: '%-.200s'",
+ error_flags, buff, mysql_error(mysql));
+ return -1;
+ }
+ }
+ else if (mysql_shutdown(mysql, SHUTDOWN_DEFAULT))
{
my_printf_error(0, "shutdown failed; error: '%s'", error_flags,
mysql_error(mysql));
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index 7060775112b..e31058f3477 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -125,9 +125,6 @@ static uint my_end_arg;
static const char* sock= 0;
static char *opt_plugindir= 0, *opt_default_auth= 0;
-#ifdef HAVE_SMEM
-static const char *shared_memory_base_name= 0;
-#endif
static char* user = 0;
static char* pass = 0;
static char *charset= 0;
@@ -1606,7 +1603,7 @@ static struct my_option my_options[] =
&opt_default_auth, &opt_default_auth, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"disable-log-bin", 'D', "Disable binary log. This is useful, if you "
- "enabled --to-last-log and are sending the output to the same MySQL server. "
+ "enabled --to-last-log and are sending the output to the same MariaDB server. "
"This way you could avoid an endless loop. You would also like to use it "
"when restoring after a crash to avoid duplication of the statements you "
"already have. NOTE: you will need a SUPER privilege to use this option.",
@@ -1649,9 +1646,9 @@ static struct my_option my_options[] =
&port, &port, 0, GET_INT, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
{"protocol", OPT_MYSQL_PROTOCOL,
- "The protocol to use for connection (tcp, socket, pipe, memory).",
+ "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"read-from-remote-server", 'R', "Read binary logs from a MySQL server.",
+ {"read-from-remote-server", 'R', "Read binary logs from a MariaDB server.",
&remote_opt, &remote_opt, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0,
0, 0},
{"raw", 0, "Requires -R. Output raw binlog data instead of SQL "
@@ -1690,12 +1687,6 @@ static struct my_option my_options[] =
{"set-charset", OPT_SET_CHARSET,
"Add 'SET NAMES character_set' to the output.", &charset,
&charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name,
- &shared_memory_base_name,
- 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"short-form", 's', "Just show regular queries: no extra info, no "
"row-based events and no row counts. This is mainly for testing only, "
"and should not be used to feed to the MariaDB server. "
@@ -1710,7 +1701,7 @@ static struct my_option my_options[] =
{"start-datetime", OPT_START_DATETIME,
"Start reading the binlog at first event having a datetime equal or "
"posterior to the argument; the argument must be a date and time "
- "in the local time zone, in any format accepted by the MySQL server "
+ "in the local time zone, in any format accepted by the MariaDB server "
"for DATETIME and TIMESTAMP types, for example: 2004-12-25 11:25:56 "
"(you should probably use quotes for your shell to set it properly).",
&start_datetime_str, &start_datetime_str,
@@ -1728,7 +1719,7 @@ static struct my_option my_options[] =
{"stop-datetime", OPT_STOP_DATETIME,
"Stop reading the binlog at first event having a datetime equal or "
"posterior to the argument; the argument must be a date and time "
- "in the local time zone, in any format accepted by the MySQL server "
+ "in the local time zone, in any format accepted by the MariaDB server "
"for DATETIME and TIMESTAMP types, for example: 2004-12-25 11:25:56 "
"(you should probably use quotes for your shell to set it properly).",
&stop_datetime_str, &stop_datetime_str,
@@ -1752,7 +1743,7 @@ static struct my_option my_options[] =
0, 0, 0, 0, 0, 0},
{"to-last-log", 't', "Requires -R. Will not stop at the end of the \
requested binlog but rather continue printing until the end of the last \
-binlog of the MySQL server. If you send the output to the same MySQL server, \
+binlog of the MariaDB server. If you send the output to the same MariaDB server, \
that may lead to an endless loop.",
&to_last_remote_log, &to_last_remote_log, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -1898,7 +1889,7 @@ static void usage()
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
printf("\
-Dumps a MySQL binary log in a format usable for viewing or for piping to\n\
+Dumps a MariaDB binary log in a format usable for viewing or for piping to\n\
the mysql command line client.\n\n");
printf("Usage: %s [options] log-files\n", my_progname);
print_defaults("my",load_groups);
@@ -1916,7 +1907,7 @@ static my_time_t convert_str_to_timestamp(const char* str)
uint dummy_in_dst_time_gap;
/* We require a total specification (date AND time) */
- if (str_to_datetime(str, (uint) strlen(str), &l_time, 0, &status) ||
+ if (str_to_datetime_or_date(str, (uint) strlen(str), &l_time, 0, &status) ||
l_time.time_type != MYSQL_TIMESTAMP_DATETIME || status.warnings)
{
error("Incorrect date and time argument: %s", str);
@@ -2148,11 +2139,6 @@ static Exit_status safe_connect()
if (opt_protocol)
mysql_options(mysql, MYSQL_OPT_PROTOCOL, (char*) &opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(mysql, MYSQL_SHARED_MEMORY_BASE_NAME,
- shared_memory_base_name);
-#endif
mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0);
mysql_options4(mysql, MYSQL_OPT_CONNECT_ATTR_ADD,
"program_name", "mysqlbinlog");
@@ -2309,7 +2295,7 @@ static Exit_status check_master_version()
break;
default:
error("Could not find server version: "
- "Master reported unrecognized MySQL version '%s'.", row[0]);
+ "Master reported unrecognized MariaDB version '%s'.", row[0]);
goto err;
}
if (!glob_description_event || !glob_description_event->is_valid())
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index a0cbf4d972f..e4ab18d52c4 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -55,7 +55,6 @@ static int first_error = 0;
static char *opt_skip_database;
DYNAMIC_ARRAY tables4repair, tables4rebuild, alter_table_cmds;
DYNAMIC_ARRAY views4repair;
-static char *shared_memory_base_name=0;
static uint opt_protocol=0;
enum operations { DO_CHECK=1, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE, DO_FIX_NAMES };
@@ -179,7 +178,7 @@ static struct my_option my_long_options[] =
"built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").",
&opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0,
0},
- {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).",
+ {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"quick", 'q',
"If you are using this option with CHECK TABLE, it prevents the check from scanning the rows to check for wrong links. This is the fastest check. If you are using this option with REPAIR TABLE, it will try to repair only the index tree. This is the fastest repair method for a table.",
@@ -188,11 +187,6 @@ static struct my_option my_long_options[] =
{"repair", 'r',
"Can fix almost anything except unique keys that aren't unique.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name,
- 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"silent", 's', "Print only error messages.", &opt_silent,
&opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"skip_database", 0, "Don't process the database specified as argument",
@@ -1113,8 +1107,6 @@ static int dbConnect(char *host, char *user, char *passwd)
#endif
if (opt_protocol)
mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
- if (shared_memory_base_name)
- mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(&mysql_connection, MYSQL_PLUGIN_DIR, opt_plugin_dir);
@@ -1254,8 +1246,7 @@ int main(int argc, char **argv)
delete_dynamic(&alter_table_cmds);
}
end1:
- my_free(opt_password);
- my_free(shared_memory_base_name);
+ my_free(opt_password);;
mysql_library_end();
free_defaults(defaults_argv);
my_end(my_end_arg);
diff --git a/client/mysqldump.c b/client/mysqldump.c
index a2ed3c0504c..48eb2dfdb5e 100644
--- a/client/mysqldump.c
+++ b/client/mysqldump.c
@@ -159,9 +159,6 @@ static MYSQL_RES *routine_res, *routine_list_res;
FILE *md_result_file= 0;
FILE *stderror_file=0;
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name=0;
-#endif
static uint opt_protocol= 0;
static char *opt_plugin_dir= 0, *opt_default_auth= 0;
@@ -257,10 +254,10 @@ static struct my_option my_long_options[] =
1, 0, 0, 0, 0, 0},
{"compatible", OPT_COMPATIBLE,
"Change the dump to be compatible with a given mode. By default tables "
- "are dumped in a format optimized for MySQL. Legal modes are: ansi, "
+ "are dumped in a format optimized for MariaDB. Legal modes are: ansi, "
"mysql323, mysql40, postgresql, oracle, mssql, db2, maxdb, no_key_options, "
"no_table_options, no_field_options. One can use several modes separated "
- "by commas. Note: Requires MySQL server version 4.1.0 or higher. "
+ "by commas. Note: Requires MariaDB server version 4.1.0 or higher. "
"This option is ignored with earlier server versions.",
&opt_compatible_mode_str, &opt_compatible_mode_str, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -277,7 +274,7 @@ static struct my_option my_long_options[] =
&opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0,
0, 0, 0},
{"create-options", 'a',
- "Include all MySQL specific create options.",
+ "Include all MariaDB specific create options.",
&create_options, &create_options, 0, GET_BOOL, NO_ARG, 1,
0, 0, 0, 0, 0},
{"databases", 'B',
@@ -472,7 +469,7 @@ static struct my_option my_long_options[] =
&opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0,
0},
{"protocol", OPT_MYSQL_PROTOCOL,
- "The protocol to use for connection (tcp, socket, pipe, memory).",
+ "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"quick", 'q', "Don't buffer query, dump directly to stdout.",
&quick, &quick, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0},
@@ -494,11 +491,6 @@ static struct my_option my_long_options[] =
"Add 'SET NAMES default_character_set' to the output.",
&opt_set_charset, &opt_set_charset, 0, GET_BOOL, NO_ARG, 1,
0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name,
- 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
/*
Note that the combination --single-transaction --master-data
will give bullet-proof binlog position only if server >=4.1.3. That's the
@@ -650,7 +642,7 @@ static void usage(void)
{
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
- puts("Dumping structure and contents of MySQL databases and tables.");
+ puts("Dumping structure and contents of MariaDB databases and tables.");
short_usage_sub(stdout);
print_defaults("my",load_default_groups);
puts("");
@@ -708,7 +700,7 @@ static void write_header(FILE *sql_file, char *db_name)
else if (!opt_compact)
{
print_comment(sql_file, 0,
- "-- MySQL dump %s Distrib %s, for %s (%s)\n--\n",
+ "-- MariaDB dump %s Distrib %s, for %s (%s)\n--\n",
DUMP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE,
MACHINE_TYPE);
print_comment(sql_file, 0, "-- Host: %s ",
@@ -1719,10 +1711,6 @@ static int connect_to_db(char *host, char *user,char *passwd)
#endif
if (opt_protocol)
mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset);
if (opt_plugin_dir && *opt_plugin_dir)
@@ -5527,7 +5515,7 @@ static int start_transaction(MYSQL *mysql_con)
if ((mysql_get_server_version(mysql_con) < 40100) && opt_master_data)
{
fprintf(stderr, "-- %s: the combination of --single-transaction and "
- "--master-data requires a MySQL server version of at least 4.1 "
+ "--master-data requires a MariaDB server version of at least 4.1 "
"(current server's version is %s). %s\n",
ignore_errors ? "Warning" : "Error",
mysql_con->server_version ? mysql_con->server_version : "unknown",
@@ -6255,10 +6243,6 @@ err:
if (opt_slave_data)
do_start_slave_sql(mysql);
-#ifdef HAVE_SMEM
- my_free(shared_memory_base_name);
-#endif
-
dbDisconnect(current_host);
if (!path)
write_footer(md_result_file);
diff --git a/client/mysqlimport.c b/client/mysqlimport.c
index c7432992d45..977e0e6ca1e 100644
--- a/client/mysqlimport.c
+++ b/client/mysqlimport.c
@@ -65,10 +65,6 @@ static longlong opt_ignore_lines= -1;
static char **argv_to_free;
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name=0;
-#endif
-
static struct my_option my_long_options[] =
{
{"character-sets-dir", OPT_CHARSETS_DIR,
@@ -157,15 +153,10 @@ static struct my_option my_long_options[] =
&opt_mysql_port,
&opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0,
0},
- {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).",
+ {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"replace", 'r', "If duplicate unique key was found, replace old row.",
&replace, &replace, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name,
- 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"silent", 's', "Be more silent.", &silent, &silent, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"socket", 'S', "The socket file to use for connection.",
@@ -204,13 +195,14 @@ static void usage(void)
{
puts("Copyright 2000-2008 MySQL AB, 2008 Sun Microsystems, Inc.");
puts("Copyright 2008-2011 Oracle and Monty Program Ab.");
+ puts("Copyright 2012-2019 MariaDB Corporation Ab.");
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
printf("\
Loads tables from text files in various formats. The base name of the\n\
text file must be the name of the table that should be used.\n\
-If one uses sockets to connect to the MySQL server, the server will open and\n\
-read the text file directly. In other cases the client will open the text\n\
+If one uses sockets to connect to the MariaDB server, the server will open\n\
+and read the text file directly. In other cases the client will open the text\n\
file. The SQL command 'LOAD DATA INFILE' is used to import the rows.\n");
printf("\nUsage: %s [OPTIONS] database textfile...\n",my_progname);
@@ -458,10 +450,6 @@ static MYSQL *db_connect(char *host, char *database,
#endif
if (opt_protocol)
mysql_options(mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
if (opt_plugin_dir && *opt_plugin_dir)
mysql_options(mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir);
@@ -514,9 +502,6 @@ static void safe_exit(int error, MYSQL *mysql)
if (mysql)
mysql_close(mysql);
-#ifdef HAVE_SMEM
- my_free(shared_memory_base_name);
-#endif
free_defaults(argv_to_free);
mysql_library_end();
my_free(opt_password);
diff --git a/client/mysqlshow.c b/client/mysqlshow.c
index 484ca661d9e..a6108ef4fc1 100644
--- a/client/mysqlshow.c
+++ b/client/mysqlshow.c
@@ -39,9 +39,6 @@ static uint opt_verbose=0;
static char *default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME;
static char *opt_plugin_dir= 0, *opt_default_auth= 0;
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name=0;
-#endif
static uint opt_protocol=0;
static void get_options(int *argc,char ***argv);
@@ -131,10 +128,7 @@ int main(int argc, char **argv)
#endif
if (opt_protocol)
mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
+
mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset);
if (opt_plugin_dir && *opt_plugin_dir)
@@ -177,9 +171,6 @@ error:
mysql_close(&mysql); /* Close & free connection */
my_free(opt_password);
mysql_server_end();
-#ifdef HAVE_SMEM
- my_free(shared_memory_base_name);
-#endif
free_defaults(defaults_argv);
my_end(my_end_arg);
exit(error ? 1 : 0);
@@ -243,14 +234,8 @@ static struct my_option my_long_options[] =
NO_ARG, 0, 0, 0, 0, 0, 0},
#endif
{"protocol", OPT_MYSQL_PROTOCOL,
- "The protocol to use for connection (tcp, socket, pipe, memory).",
+ "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name,
- &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
-#endif
{"show-table-type", 't', "Show table type column.",
&opt_table_type, &opt_table_type, 0, GET_BOOL,
NO_ARG, 0, 0, 0, 0, 0, 0},
@@ -283,7 +268,7 @@ static void usage(void)
{
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
- puts("Shows the structure of a MySQL database (databases, tables, and columns).\n");
+ puts("Shows the structure of a MariaDB database (databases, tables, and columns).\n");
printf("Usage: %s [OPTIONS] [database [table [column]]]\n",my_progname);
puts("\n\
If last argument contains a shell or SQL wildcard (*,?,% or _) then only\n\
@@ -673,7 +658,7 @@ list_table_status(MYSQL *mysql,const char *db,const char *wild)
fprintf(stderr,"%s: Cannot get status for db: %s, table: %s: %s\n",
my_progname,db,wild ? wild : "",mysql_error(mysql));
if (mysql_errno(mysql) == ER_PARSE_ERROR)
- fprintf(stderr,"This error probably means that your MySQL server doesn't support the\n\'show table status' command.\n");
+ fprintf(stderr,"This error probably means that your MariaDB server doesn't support the\n\'show table status' command.\n");
return 1;
}
diff --git a/client/mysqlslap.c b/client/mysqlslap.c
index 4cb6cbcc15b..8c79da88e41 100644
--- a/client/mysqlslap.c
+++ b/client/mysqlslap.c
@@ -98,9 +98,6 @@ TODO:
#define snprintf _snprintf
#endif
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name=0;
-#endif
/* Global Thread counter */
uint thread_counter;
@@ -309,10 +306,6 @@ void set_mysql_connect_options(MYSQL *mysql)
#endif
if (opt_protocol)
mysql_options(mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol);
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset);
}
@@ -423,10 +416,6 @@ int main(int argc, char **argv)
statement_cleanup(pre_statements);
statement_cleanup(post_statements);
option_cleanup(engine_options);
-
-#ifdef HAVE_SMEM
- my_free(shared_memory_base_name);
-#endif
free_defaults(defaults_argv);
mysql_library_end();
my_end(my_end_arg);
@@ -634,7 +623,7 @@ static struct my_option my_long_options[] =
{"host", 'h', "Connect to host.", &host, &host, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"init-command", OPT_INIT_COMMAND,
- "SQL Command to execute when connecting to MySQL server. Will "
+ "SQL Command to execute when connecting to MariaDB server. Will "
"automatically be re-executed when reconnecting.",
&opt_init_command, &opt_init_command, 0,
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -689,17 +678,11 @@ static struct my_option my_long_options[] =
&pre_system, &pre_system,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"protocol", OPT_MYSQL_PROTOCOL,
- "The protocol to use for connection (tcp, socket, pipe, memory).",
+ "The protocol to use for connection (tcp, socket, pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"query", 'q', "Query to run or file containing query to run.",
&user_supplied_query, &user_supplied_query,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME,
- "Base name of shared memory.", &shared_memory_base_name,
- &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
-#endif
{"silent", 's', "Run program in silent mode - no output.",
&opt_silent, &opt_silent, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 89dcf6593b4..80d4d70be2c 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -107,7 +107,6 @@ enum {
static int record= 0, opt_sleep= -1;
static char *opt_db= 0, *opt_pass= 0;
const char *opt_user= 0, *opt_host= 0, *unix_sock= 0, *opt_basedir= "./";
-static char *shared_memory_base_name=0;
const char *opt_logdir= "";
const char *opt_prologue= 0, *opt_charsets_dir;
static int opt_port= 0;
@@ -5895,7 +5894,6 @@ do_handle_error:
<opts> - options to use for the connection
* SSL - use SSL if available
* COMPRESS - use compression if available
- * SHM - use shared memory if available
* PIPE - use named pipe if available
*/
@@ -5907,7 +5905,6 @@ void do_connect(struct st_command *command)
char *ssl_cipher __attribute__((unused))= 0;
my_bool con_ssl= 0, con_compress= 0;
my_bool con_pipe= 0;
- my_bool con_shm __attribute__ ((unused))= 0;
int read_timeout= 0;
int write_timeout= 0;
int connect_timeout= 0;
@@ -5923,9 +5920,6 @@ void do_connect(struct st_command *command)
static DYNAMIC_STRING ds_sock;
static DYNAMIC_STRING ds_options;
static DYNAMIC_STRING ds_default_auth;
-#ifdef HAVE_SMEM
- static DYNAMIC_STRING ds_shm;
-#endif
const struct command_arg connect_args[] = {
{ "connection name", ARG_STRING, TRUE, &ds_connection_name, "Name of the connection" },
{ "host", ARG_STRING, TRUE, &ds_host, "Host to connect to" },
@@ -5954,19 +5948,15 @@ void do_connect(struct st_command *command)
die("Illegal argument for port: '%s'", ds_port.str);
}
-#ifdef HAVE_SMEM
- /* Shared memory */
- init_dynamic_string(&ds_shm, ds_sock.str, 0, 0);
-#endif
-
/* Sock */
if (ds_sock.length)
{
/*
If the socket is specified just as a name without path
+ or an abstract socket indicator ('@'), then
append tmpdir in front
*/
- if (*ds_sock.str != FN_LIBCHAR)
+ if (*ds_sock.str != FN_LIBCHAR && *ds_sock.str != '@')
{
char buff[FN_REFLEN];
fn_format(buff, ds_sock.str, TMPDIR, "", 0);
@@ -6006,8 +5996,6 @@ void do_connect(struct st_command *command)
con_compress= 1;
else if (length == 4 && !strncmp(con_options, "PIPE", 4))
con_pipe= 1;
- else if (length == 3 && !strncmp(con_options, "SHM", 3))
- con_shm= 1;
else if (strncasecmp(con_options, "read_timeout=",
sizeof("read_timeout=")-1) == 0)
{
@@ -6117,22 +6105,6 @@ void do_connect(struct st_command *command)
(char*)&connect_timeout);
}
-#ifdef HAVE_SMEM
- if (con_shm)
- {
- uint protocol= MYSQL_PROTOCOL_MEMORY;
- if (!ds_shm.length)
- die("Missing shared memory base name");
- mysql_options(con_slot->mysql, MYSQL_SHARED_MEMORY_BASE_NAME, ds_shm.str);
- mysql_options(con_slot->mysql, MYSQL_OPT_PROTOCOL, &protocol);
- }
- else if (shared_memory_base_name)
- {
- mysql_options(con_slot->mysql, MYSQL_SHARED_MEMORY_BASE_NAME,
- shared_memory_base_name);
- }
-#endif
-
/* Use default db name */
if (ds_database.length == 0)
dynstr_set(&ds_database, opt_db);
@@ -6177,9 +6149,6 @@ void do_connect(struct st_command *command)
dynstr_free(&ds_sock);
dynstr_free(&ds_options);
dynstr_free(&ds_default_auth);
-#ifdef HAVE_SMEM
- dynstr_free(&ds_shm);
-#endif
free(csname);
DBUG_VOID_RETURN;
}
@@ -7071,7 +7040,7 @@ static struct my_option my_long_options[] =
GET_INT, REQUIRED_ARG, DEFAULT_MAX_CONN, 8, 5120, 0, 0, 0},
{"password", 'p', "Password to use when connecting to server.",
0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
- {"protocol", OPT_MYSQL_PROTOCOL, "The protocol of connection (tcp,socket,pipe,memory).",
+ {"protocol", OPT_MYSQL_PROTOCOL, "The protocol of connection (tcp,socket,pipe).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"port", 'P', "Port number to use for connection or 0 for default to, in "
"order of preference, my.cnf, $MYSQL_TCP_PORT, "
@@ -7104,10 +7073,6 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"server-file", 'F', "Read embedded server arguments from file.",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"shared-memory-base-name", 0,
- "Base name of shared memory.", &shared_memory_base_name,
- &shared_memory_base_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0,
- 0, 0, 0},
{"silent", 's', "Suppress all normal output. Synonym for --quiet.",
&silent, &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"sleep", 'T', "Always sleep this many seconds on sleep commands.",
@@ -7168,7 +7133,7 @@ void usage()
{
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
- printf("Runs a test against the mysql server and compares output with a results file.\n\n");
+ printf("Runs a test against the MariaDB server and compares output with a results file.\n\n");
printf("Usage: %s [OPTIONS] [database] < test_file\n", my_progname);
print_defaults("my",load_default_groups);
puts("");
@@ -7926,7 +7891,7 @@ int append_warnings(DYNAMIC_STRING *ds, MYSQL* mysql)
static void handle_no_active_connection(struct st_command *command,
struct st_connection *cn, DYNAMIC_STRING *ds)
{
- handle_error(command, 2006, "MySQL server has gone away", "000000", ds);
+ handle_error(command, 2006, "MariaDB server has gone away", "000000", ds);
cn->pending= FALSE;
var_set_errno(2006);
}
@@ -9289,7 +9254,7 @@ int main(int argc, char **argv)
if (mysql_server_init(embedded_server_arg_count,
embedded_server_args,
(char**) embedded_server_groups))
- die("Can't initialize MySQL server");
+ die("Can't initialize MariaDB server");
server_initialized= 1;
if (cur_file == file_stack && cur_file->file == 0)
{
@@ -9347,11 +9312,6 @@ int main(int argc, char **argv)
}
#endif
-#ifdef HAVE_SMEM
- if (shared_memory_base_name)
- mysql_options(con->mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name);
-#endif
-
if (!(con->name = my_strdup("default", MYF(MY_WME))))
die("Out of memory");
mysql_options(con->mysql, MYSQL_OPT_NONBLOCK, 0);
@@ -10020,7 +9980,7 @@ void do_get_replace(struct st_command *command)
char *buff, *start;
char word_end_chars[256], *pos;
POINTER_ARRAY to_array, from_array;
- DBUG_ENTER("get_replace");
+ DBUG_ENTER("do_get_replace");
free_replace();
diff --git a/cmake/aws_sdk.cmake b/cmake/aws_sdk.cmake
new file mode 100644
index 00000000000..7abd9974c1e
--- /dev/null
+++ b/cmake/aws_sdk.cmake
@@ -0,0 +1,95 @@
+MACRO (SKIP_AWS_SDK MSG)
+ SET(${RETVAL} OFF PARENT_SCOPE)
+ SET(${REASON} ${MSG} PARENT_SCOPE)
+ RETURN()
+ENDMACRO()
+
+FUNCTION (CHECK_AWS_SDK RETVAL REASON)
+ # AWS_SDK_EXTERNAL_PROJECT must be ON
+ IF(NOT AWS_SDK_EXTERNAL_PROJECT)
+ SKIP_AWS_SDK("AWS_SDK_EXTERNAL_PROJECT is not ON")
+ ENDIF()
+ IF(NOT NOT_FOR_DISTRIBUTION)
+ SKIP_AWS_SDK("AWS SDK has Apache 2.0 License which is not complatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need it")
+ ENDIF()
+ # Check compiler support
+ IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
+ EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
+ IF (GCC_VERSION VERSION_LESS 4.8)
+ SKIP_AWS_SDK("GCC VERSION too old (${GCC_VERSION}, required is 4.8 or later")
+ ENDIF()
+ ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR
+ (CLANG_VERSION_STRING AND CLANG_VERSION_STRING VERSION_LESS 3.3))
+ SKIP_AWS_SDK("Clang version too old, required is 3.3 or later")
+ ENDIF()
+ ELSEIF(MSVC)
+ IF (MSVC_VERSION LESS 1800)
+ SKIP_AWS_SDK("MSVC version too old, required is VS2015 or later")
+ ENDIF()
+ ELSE()
+ SKIP_AWS_SDK("Unsupported compiler")
+ ENDIF()
+
+ # Check OS support
+ IF (NOT(WIN32 OR APPLE OR (CMAKE_SYSTEM_NAME MATCHES "Linux")))
+ SKIP_AWS_SDK("OS unsupported by AWS SDK")
+ ENDIF()
+
+ # Build from source, using ExternalProject_Add
+ # AWS C++ SDK requires cmake 2.8.12
+ IF(CMAKE_VERSION VERSION_LESS "2.8.12")
+ SKIP_AWS_SDK("CMake is too old")
+ ENDIF()
+
+ IF(UNIX)
+ # Check librairies required for building SDK
+ FIND_PACKAGE(CURL)
+ IF(NOT CURL_FOUND)
+ SKIP_AWS_SDK("AWS C++ SDK requires libcurl development package")
+ ENDIF()
+ FIND_PATH(UUID_INCLUDE_DIR uuid/uuid.h)
+ IF(NOT UUID_INCLUDE_DIR)
+ SKIP_AWS_SDK("AWS C++ SDK requires uuid development package")
+ ENDIF()
+ IF(NOT APPLE)
+ FIND_LIBRARY(UUID_LIBRARIES uuid)
+ IF(NOT UUID_LIBRARIES)
+ SKIP_AWS_SDK("AWS C++ SDK requires uuid development package")
+ ENDIF()
+ FIND_PACKAGE(OpenSSL)
+ IF(NOT OPENSSL_FOUND)
+ SKIP_AWS_SDK("AWS C++ SDK requires openssl development package")
+ ENDIF()
+ ENDIF()
+ ENDIF()
+ SET(${RETVAL} ON PARENT_SCOPE)
+ENDFUNCTION()
+
+
+# USE_AWS_SDK_LIBS(target sdk_component1 ... sdk_component_N)
+# Example usage
+# USE_AWS_SDK_LIBS(aws_key_management kms s3)
+FUNCTION(USE_AWS_SDK_LIBS)
+ SET(SDK_COMPONENTS ${ARGN})
+ LIST(GET SDK_COMPONENTS 0 target)
+ IF(NOT TARGET ${target})
+ MESSAGE(FATAL_ERROR "${target} is not a valid target")
+ ENDIF()
+ SET(NON_DISTRIBUTABLE_WARNING "Apache 2.0" CACHE INTERNAL "")
+ LIST(REMOVE_AT SDK_COMPONENTS 0)
+ FOREACH(comp ${SDK_COMPONENTS})
+ SET_PROPERTY(GLOBAL PROPERTY AWS_SDK_LIBS ${comp} APPEND)
+ TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-${comp})
+ ENDFOREACH()
+ TARGET_LINK_LIBRARIES(${target} aws-cpp-sdk-core)
+ TARGET_INCLUDE_DIRECTORIES(${target} PRIVATE ${PROJECT_BINARY_DIR}/extra/aws_sdk/aws_sdk_cpp/include)
+ # Link OS libraries that AWS SDK depends on
+ IF(WIN32)
+ TARGET_LINK_LIBRARIES(${target} bcrypt winhttp wininet userenv version)
+ ELSE()
+ FIND_PACKAGE(CURL REQUIRED)
+ FIND_PACKAGE(OpenSSL REQUIRED)
+ TARGET_LINK_LIBRARIES(${target} ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES})
+ ENDIF()
+ENDFUNCTION()
diff --git a/cmake/build_configurations/mysql_release.cmake b/cmake/build_configurations/mysql_release.cmake
index 9e685244a8c..91650e5e206 100644
--- a/cmake/build_configurations/mysql_release.cmake
+++ b/cmake/build_configurations/mysql_release.cmake
@@ -1,5 +1,5 @@
# Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
-# Copyright (c) 2011, 2018, MariaDB Corporation
+# Copyright (c) 2011, 2019, MariaDB Corporation.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -97,15 +97,18 @@ ELSEIF(RPM)
SET(WITH_ZLIB system CACHE STRING "")
SET(CHECKMODULE /usr/bin/checkmodule CACHE FILEPATH "")
SET(SEMODULE_PACKAGE /usr/bin/semodule_package CACHE FILEPATH "")
+ SET(PLUGIN_AUTH_SOCKET YES CACHE STRING "")
ELSEIF(DEB)
SET(WITH_SSL system CACHE STRING "")
SET(WITH_ZLIB system CACHE STRING "")
SET(WITH_LIBWRAP ON)
SET(HAVE_EMBEDDED_PRIVILEGE_CONTROL ON)
+ SET(PLUGIN_AUTH_SOCKET YES CACHE STRING "")
ELSE()
SET(WITH_SSL bundled CACHE STRING "")
SET(WITH_ZLIB bundled CACHE STRING "")
SET(WITH_JEMALLOC static CACHE STRING "")
+ SET(PLUGIN_AUTH_SOCKET STATIC CACHE STRING "")
ENDIF()
IF(NOT COMPILATION_COMMENT)
diff --git a/cmake/cpack_rpm.cmake b/cmake/cpack_rpm.cmake
index 895d5941c29..db377184499 100644
--- a/cmake/cpack_rpm.cmake
+++ b/cmake/cpack_rpm.cmake
@@ -179,7 +179,7 @@ SETA(CPACK_RPM_server_PACKAGE_REQUIRES
IF(WITH_WSREP)
SETA(CPACK_RPM_server_PACKAGE_REQUIRES
- "galera" "rsync" "lsof" "grep" "gawk" "iproute"
+ "galera-4" "rsync" "lsof" "grep" "gawk" "iproute"
"coreutils" "findutils" "tar")
ENDIF()
diff --git a/cmake/make_dist.cmake.in b/cmake/make_dist.cmake.in
index 6fad17137fd..8e77b700eb7 100644
--- a/cmake/make_dist.cmake.in
+++ b/cmake/make_dist.cmake.in
@@ -50,6 +50,14 @@ IF(GIT_EXECUTABLE)
IF(NOT RESULT EQUAL 0)
SET(GIT_EXECUTABLE)
ENDIF()
+ EXECUTE_PROCESS(
+ COMMAND "${GIT_EXECUTABLE}" submodule foreach "${GIT_EXECUTABLE} checkout-index --all --prefix=${PACKAGE_DIR}/wsrep-lib/$path/"
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/wsrep-lib
+ RESULT_VARIABLE RESULT
+ )
+ IF(NOT RESULT EQUAL 0)
+ SET(GIT_EXECUTABLE)
+ ENDIF()
ENDIF()
CONFIGURE_FILE(${CMAKE_BINARY_DIR}/include/source_revision.h
diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake
index d7e748a6d71..44a9c2e823c 100644
--- a/cmake/os/Windows.cmake
+++ b/cmake/os/Windows.cmake
@@ -96,7 +96,8 @@ IF(MSVC)
# Disable mingw based pkg-config found in Strawberry perl
SET(PKG_CONFIG_EXECUTABLE 0 CACHE INTERNAL "")
- SET(MSVC_CRT_TYPE /MT CACHE STRING
+
+ SET(MSVC_CRT_TYPE /MD CACHE STRING
"Runtime library - specify runtime library for linking (/MT,/MTd,/MD,/MDd)"
)
SET(VALID_CRT_TYPES /MTd /MDd /MD /MT)
@@ -106,9 +107,7 @@ IF(MSVC)
IF(MSVC_CRT_TYPE MATCHES "/MD")
# Dynamic runtime (DLLs), need to install CRT libraries.
- SET(CMAKE_INSTALL_MFC_LIBRARIES TRUE)# upgrade wizard
SET(CMAKE_INSTALL_SYSTEM_RUNTIME_COMPONENT VCCRT)
- SET(CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS TRUE)
SET(CMAKE_INSTALL_UCRT_LIBRARIES TRUE)
IF(MSVC_CRT_TYPE STREQUAL "/MDd")
SET (CMAKE_INSTALL_DEBUG_LIBRARIES_ONLY TRUE)
@@ -277,3 +276,17 @@ ENDIF()
SET(FN_NO_CASE_SENSE 1)
SET(USE_SYMDIR 1)
+
+# Force static C runtime for targets in current directory
+# (useful to get rid of MFC dll's dependency, or in installer)
+MACRO(FORCE_STATIC_CRT)
+ FOREACH(flag
+ CMAKE_C_FLAGS_RELEASE CMAKE_C_FLAGS_RELWITHDEBINFO
+ CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_DEBUG_INIT
+ CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELWITHDEBINFO
+ CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG_INIT
+ CMAKE_C_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_MINSIZEREL
+ )
+ STRING(REGEX REPLACE "/MD[d]?" "/MT" "${flag}" "${${flag}}" )
+ ENDFOREACH()
+ENDMACRO()
diff --git a/cmake/os/WindowsCache.cmake b/cmake/os/WindowsCache.cmake
index 2ba3aec16b4..f51015e436c 100644
--- a/cmake/os/WindowsCache.cmake
+++ b/cmake/os/WindowsCache.cmake
@@ -153,6 +153,7 @@ SET(HAVE_SIGSET CACHE INTERNAL "")
SET(HAVE_SIGTERM 1 CACHE INTERNAL "")
SET(HAVE_SIGTHREADMASK CACHE INTERNAL "")
SET(HAVE_SIGWAIT CACHE INTERNAL "")
+SET(HAVE_SIGWAITINFO CACHE INTERNAL "")
SET(HAVE_SIZEOF_CHARP TRUE CACHE INTERNAL "")
SET(SIZEOF_CHARP ${CMAKE_SIZEOF_VOID_P} CACHE INTERNAL "")
SET(HAVE_SIZEOF_IN6_ADDR TRUE CACHE INTERNAL "")
diff --git a/cmake/readline.cmake b/cmake/readline.cmake
index f1c6f62e311..f7a5291135c 100644
--- a/cmake/readline.cmake
+++ b/cmake/readline.cmake
@@ -134,7 +134,7 @@ MACRO (MYSQL_FIND_SYSTEM_READLINE)
SET(USE_NEW_READLINE_INTERFACE 1)
ELSE()
IF(NOT_FOR_DISTRIBUTION)
- SET(NON_DISTRIBUTABLE_WARNING "GPLv3")
+ SET(NON_DISTRIBUTABLE_WARNING "GPLv3" CACHE INTERNAL "")
SET(USE_NEW_READLINE_INTERFACE 1)
ELSE()
SET(USE_NEW_READLINE_INTERFACE 0)
diff --git a/cmake/submodules.cmake b/cmake/submodules.cmake
index 34d1f37c956..c8f7b3cc400 100644
--- a/cmake/submodules.cmake
+++ b/cmake/submodules.cmake
@@ -19,16 +19,16 @@ IF(GIT_EXECUTABLE AND EXISTS "${CMAKE_SOURCE_DIR}/.git")
SET(update_result 0)
ELSEIF (cmake_update_submodules MATCHES force)
MESSAGE(STATUS "Updating submodules (forced)")
- EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --force
+ EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --force --recursive
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE update_result)
ELSEIF (cmake_update_submodules MATCHES yes)
- EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init
+ EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --recursive
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE update_result)
ELSE()
MESSAGE(STATUS "Updating submodules")
- EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init
+ EXECUTE_PROCESS(COMMAND "${GIT_EXECUTABLE}" submodule update --init --recursive
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
RESULT_VARIABLE update_result)
ENDIF()
@@ -36,6 +36,6 @@ ENDIF()
IF(update_result OR NOT EXISTS ${CMAKE_SOURCE_DIR}/libmariadb/CMakeLists.txt)
MESSAGE(FATAL_ERROR "No MariaDB Connector/C! Run
- ${GIT_EXECUTABLE} submodule update --init
+ ${GIT_EXECUTABLE} submodule update --init --recursive
Then restart the build.${SUBMODULE_UPDATE_CONFIG_MESSAGE}")
ENDIF()
diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake
index b272df51adb..603f1955e5f 100644
--- a/cmake/wsrep.cmake
+++ b/cmake/wsrep.cmake
@@ -24,20 +24,37 @@ ELSE()
ENDIF()
OPTION(WITH_WSREP "WSREP replication API (to use, e.g. Galera Replication library)" ${with_wsrep_default})
+OPTION(WITH_WSREP_ALL
+ "Build all components of WSREP (unit tests, sample programs)"
+ OFF)
-# Set the patch version
-SET(WSREP_PATCH_VERSION "24")
+IF(WITH_WSREP)
+ # Set the patch version
+ SET(WSREP_PATCH_VERSION "22")
-# Obtain wsrep API version
-FILE(STRINGS "${MySQL_SOURCE_DIR}/wsrep/wsrep_api.h" WSREP_API_VERSION
- LIMIT_COUNT 1 REGEX "WSREP_INTERFACE_VERSION")
-STRING(REGEX MATCH "([0-9]+)" WSREP_API_VERSION "${WSREP_API_VERSION}")
+ IF(NOT EXISTS "${CMAKE_SOURCE_DIR}/wsrep-lib/wsrep-API/v26/wsrep_api.h")
+ MESSAGE(FATAL_ERROR "No MariaDB wsrep-API code! Run
+ ${GIT_EXECUTABLE} submodule update --init --recursive
+Then restart the build.
+")
+ ENDIF()
+ # Obtain wsrep API version
+ FILE(STRINGS "${CMAKE_SOURCE_DIR}/wsrep-lib/wsrep-API/v26/wsrep_api.h" WSREP_API_VERSION
+ LIMIT_COUNT 1 REGEX "WSREP_INTERFACE_VERSION")
+ STRING(REGEX MATCH "([0-9]+)" WSREP_API_VERSION "${WSREP_API_VERSION}")
-SET(WSREP_VERSION "${WSREP_API_VERSION}.${WSREP_PATCH_VERSION}"
- CACHE INTERNAL "WSREP version")
+ SET(WSREP_VERSION "${WSREP_API_VERSION}.${WSREP_PATCH_VERSION}"
+ CACHE INTERNAL "WSREP version")
-SET(WSREP_PROC_INFO ${WITH_WSREP})
+ SET(WSREP_PROC_INFO ${WITH_WSREP})
-IF(WITH_WSREP)
SET(WSREP_PATCH_VERSION "wsrep_${WSREP_VERSION}")
+ if (NOT WITH_WSREP_ALL)
+ SET(WSREP_LIB_WITH_UNIT_TESTS OFF CACHE BOOL
+ "Disable unit tests for wsrep-lib")
+ SET(WSREP_LIB_WITH_DBSIM OFF CACHE BOOL
+ "Disable building dbsim for wsrep-lib")
+ endif()
+ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/wsrep-lib/include)
+ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/wsrep-lib/wsrep-API/v26)
ENDIF()
diff --git a/config.h.cmake b/config.h.cmake
index 44d41a2f729..6eaba1e0d9f 100644
--- a/config.h.cmake
+++ b/config.h.cmake
@@ -184,7 +184,6 @@
#cmakedefine HAVE_PERROR 1
#cmakedefine HAVE_POLL 1
#cmakedefine HAVE_POSIX_FALLOCATE 1
-#cmakedefine HAVE_LINUX_FALLOC_H 1
#cmakedefine HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE 1
#cmakedefine HAVE_PREAD 1
#cmakedefine HAVE_PAUSE_INSTRUCTION 1
@@ -219,6 +218,7 @@
#cmakedefine HAVE_SIGACTION 1
#cmakedefine HAVE_SIGTHREADMASK 1
#cmakedefine HAVE_SIGWAIT 1
+#cmakedefine HAVE_SIGWAITINFO 1
#cmakedefine HAVE_SLEEP 1
#cmakedefine HAVE_SNPRINTF 1
#cmakedefine HAVE_STPCPY 1
diff --git a/configure.cmake b/configure.cmake
index 3cfc4b31d4e..1404263e5a6 100644
--- a/configure.cmake
+++ b/configure.cmake
@@ -196,7 +196,6 @@ CHECK_INCLUDE_FILES (inttypes.h HAVE_INTTYPES_H)
CHECK_INCLUDE_FILES (langinfo.h HAVE_LANGINFO_H)
CHECK_INCLUDE_FILES (link.h HAVE_LINK_H)
CHECK_INCLUDE_FILES (linux/unistd.h HAVE_LINUX_UNISTD_H)
-CHECK_INCLUDE_FILES (linux/falloc.h HAVE_LINUX_FALLOC_H)
CHECK_INCLUDE_FILES (limits.h HAVE_LIMITS_H)
CHECK_INCLUDE_FILES (locale.h HAVE_LOCALE_H)
CHECK_INCLUDE_FILES (malloc.h HAVE_MALLOC_H)
@@ -260,7 +259,7 @@ SET(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -DPACKAGE=test) # b
CHECK_INCLUDE_FILES (bfd.h BFD_H_EXISTS)
IF(BFD_H_EXISTS)
IF(NOT_FOR_DISTRIBUTION)
- SET(NON_DISTRIBUTABLE_WARNING "GPLv3")
+ SET(NON_DISTRIBUTABLE_WARNING "GPLv3" CACHE INTERNAL "")
SET(HAVE_BFD_H 1)
ENDIF()
ENDIF()
@@ -400,6 +399,7 @@ CHECK_FUNCTION_EXISTS (setlocale HAVE_SETLOCALE)
CHECK_FUNCTION_EXISTS (sigaction HAVE_SIGACTION)
CHECK_FUNCTION_EXISTS (sigthreadmask HAVE_SIGTHREADMASK)
CHECK_FUNCTION_EXISTS (sigwait HAVE_SIGWAIT)
+CHECK_FUNCTION_EXISTS (sigwaitinfo HAVE_SIGWAITINFO)
CHECK_FUNCTION_EXISTS (sigset HAVE_SIGSET)
CHECK_FUNCTION_EXISTS (sleep HAVE_SLEEP)
CHECK_FUNCTION_EXISTS (snprintf HAVE_SNPRINTF)
diff --git a/debian/additions/debian-start.inc.sh b/debian/additions/debian-start.inc.sh
index 0640bf9c2a6..fa5b1299bdc 100755
--- a/debian/additions/debian-start.inc.sh
+++ b/debian/additions/debian-start.inc.sh
@@ -60,7 +60,7 @@ function upgrade_system_tables_if_necessary() {
# errors as the script is designed to be idempotent.
LC_ALL=C $MYUPGRADE \
2>&1 \
- | egrep -v '^(1|@had|ERROR (1054|1060|1061))' \
+ | egrep -v '^(1|@had|ERROR (1051|1054|1060|1061|1146|1347|1348))' \
| logger -p daemon.warn -i -t$0
}
diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh
index 4dba4de26b7..7d2301c7369 100755
--- a/debian/autobake-deb.sh
+++ b/debian/autobake-deb.sh
@@ -16,11 +16,11 @@ export DEB_BUILD_OPTIONS="nocheck $DEB_BUILD_OPTIONS"
if [[ $TRAVIS ]]
then
# On Travis-CI, the log must stay under 4MB so make the build less verbose
- sed -i -e '/Add support for verbose builds/,+2d' debian/rules
+ sed -i -e '/Add support for verbose builds/,/^$/d' debian/rules
# Don't include test suite package on Travis-CI to make the build time shorter
sed '/Package: mariadb-test-data/,/^$/d' -i debian/control
- sed '/Package: mariadb-test/,/^$/d' -i debian/control
+ sed '/Package: mariadb-test$/,/^$/d' -i debian/control
# Don't build the test package at all to save time and disk space
sed 's|DINSTALL_MYSQLTESTDIR=share/mysql/mysql-test|DINSTALL_MYSQLTESTDIR=false|' -i debian/rules
@@ -67,9 +67,9 @@ then
sed 's/ --with systemd//' -i debian/rules
sed '/systemd/d' -i debian/rules
sed '/\.service/d' -i debian/rules
- sed '/galera_new_cluster/d' -i debian/mariadb-server-10.3.install
- sed '/galera_recovery/d' -i debian/mariadb-server-10.3.install
- sed '/mariadb-service-convert/d' -i debian/mariadb-server-10.3.install
+ sed '/galera_new_cluster/d' -i debian/mariadb-server-10.4.install
+ sed '/galera_recovery/d' -i debian/mariadb-server-10.4.install
+ sed '/mariadb-service-convert/d' -i debian/mariadb-server-10.4.install
fi
# If libzstd-dev is not available (before Debian Stretch and Ubuntu Xenial)
@@ -113,7 +113,7 @@ then
sed -i -e "/Package: mariadb-plugin-mroonga/,/^$/d" debian/control
sed -i -e "/Package: mariadb-plugin-spider/,/^$/d" debian/control
sed -i -e "/Package: mariadb-plugin-oqgraph/,/^$/d" debian/control
- sed -i -e "/usr\/lib\/mysql\/plugin\/ha_sphinx.so/d" debian/mariadb-server-10.3.install
+ sed -i -e "/usr\/lib\/mysql\/plugin\/ha_sphinx.so/d" debian/mariadb-server-10.4.install
sed -i -e "/Package: libmariadbd-dev/,/^$/d" debian/control
fi
diff --git a/debian/changelog b/debian/changelog
index dec49bd4282..053727796d0 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,9 @@
+mariadb-10.4 (1:10.4.0) unstable; urgency=low
+
+ * Initial release.
+
+ -- Otto Kekäläinen <otto@debian.org> Sun, 05 Aug 2018 00:06:30 -0500
+
mariadb-10.3 (1:10.3.0) unstable; urgency=low
* Bump epoch to supersede 1:10.1.29-6 in Debian
diff --git a/debian/control b/debian/control
index a2ab4970db1..2366cbc9fc2 100644
--- a/debian/control
+++ b/debian/control
@@ -1,4 +1,4 @@
-Source: mariadb-10.3
+Source: mariadb-10.4
Section: database
Priority: optional
Maintainer: MariaDB Developers <maria-developers@lists.launchpad.net>
@@ -38,6 +38,53 @@ Homepage: http://mariadb.org/
Vcs-Browser: https://github.com/MariaDB/server/
Vcs-Git: https://github.com/MariaDB/server.git
+Package: libmariadb3
+Architecture: any
+Multi-Arch: same
+Section: libs
+Depends: mariadb-common,
+ ${misc:Depends},
+ ${shlibs:Depends}
+Conflicts: libmariadbclient18 (<< 10.2.0),
+ mariadb-galera-server-10.0 (<< 10.0.5),
+ mariadb-galera-server-5.5 (<< 5.5.33),
+ mariadb-server-10.0 (<< 10.0.5),
+ mariadb-server-5.1,
+ mariadb-server-5.2,
+ mariadb-server-5.3,
+ mariadb-server-5.5 (<< 5.5.33)
+Breaks: libmariadbclient18 (<< ${source:Version})
+Replaces: libmariadbclient18 (<< ${source:Version})
+Description: MariaDB database client library
+ MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
+ server. SQL (Structured Query Language) is the most popular database query
+ language in the world. The main goals of MariaDB are speed, robustness and
+ ease of use.
+ .
+ This package includes the client library.
+
+Package: libmariadb3-compat
+Architecture: any
+Section: libs
+Depends: libmariadb3,
+ mariadb-common,
+ ${misc:Depends},
+ ${shlibs:Depends}
+Breaks: libmysqlclient19,
+ libmysqlclient20
+Replaces: libmysqlclient19,
+ libmysqlclient20
+Provides: libmysqlclient19,
+ libmysqlclient20
+Description: MariaDB database client library MySQL compat package
+ MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
+ server. SQL (Structured Query Language) is the most popular database query
+ language in the world. The main goals of MariaDB are speed, robustness and
+ ease of use.
+ .
+ This package includes the client runtime libraries that simulate and replace
+ the equivalents found in MySQL 5.6 and 5.7 (mysqlclient19 and 20).
+
Package: libmariadb-dev
Architecture: any
Multi-Arch: same
@@ -90,53 +137,6 @@ Description: MariaDB Connector/C, compatibility symlinks
This package includes compatibility symlinks to allow sources expecting the
MySQL client libraries to be built against MariaDB Connector/C.
-Package: libmariadb3
-Architecture: any
-Multi-Arch: same
-Section: libs
-Depends: mariadb-common,
- ${misc:Depends},
- ${shlibs:Depends}
-Conflicts: libmariadbclient18 (<< 10.2.0),
- mariadb-galera-server-10.0 (<< 10.0.5),
- mariadb-galera-server-5.5 (<< 5.5.33),
- mariadb-server-10.0 (<< 10.0.5),
- mariadb-server-5.1,
- mariadb-server-5.2,
- mariadb-server-5.3,
- mariadb-server-5.5 (<< 5.5.33)
-Breaks: libmariadbclient18 (<< ${source:Version})
-Replaces: libmariadbclient18 (<< ${source:Version})
-Description: MariaDB database client library
- MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
- server. SQL (Structured Query Language) is the most popular database query
- language in the world. The main goals of MariaDB are speed, robustness and
- ease of use.
- .
- This package includes the client library.
-
-Package: libmariadb3-compat
-Architecture: any
-Section: libs
-Depends: libmariadb3,
- mariadb-common,
- ${misc:Depends},
- ${shlibs:Depends}
-Breaks: libmysqlclient19,
- libmysqlclient20
-Replaces: libmysqlclient19,
- libmysqlclient20
-Provides: libmysqlclient19,
- libmysqlclient20
-Description: MariaDB database client library MySQL compat package
- MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
- server. SQL (Structured Query Language) is the most popular database query
- language in the world. The main goals of MariaDB are speed, robustness and
- ease of use.
- .
- This package includes the client runtime libraries that simulate and replace
- the equivalents found in MySQL 5.6 and 5.7 (mysqlclient19 and 20).
-
Package: libmariadbclient18
Section: libs
Architecture: any
@@ -169,6 +169,22 @@ Description: Virtual package to satisfy external libmysqlclient18 depends
This package provides compatibility symlinks for binaries that expect to find
libmysqlclient.so.18 will automatically use libmariadb.so.3 instead.
+Package: libmariadbd19
+Architecture: any
+Multi-Arch: same
+Section: libs
+Depends: ${misc:Depends},
+ ${shlibs:Depends}
+Breaks: libmariadbd-dev (<< ${source:Version})
+Replaces: libmariadbd-dev (<< ${source:Version})
+Description: MariaDB embedded database, shared library
+ MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
+ server. SQL (Structured Query Language) is the most popular database query
+ language in the world. The main goals of MariaDB are speed, robustness and
+ ease of use.
+ .
+ This package includes a shared library for embedded MariaDB applications
+
Package: libmariadbd-dev
Architecture: any
Multi-Arch: same
@@ -189,22 +205,6 @@ Description: MariaDB embedded database, development files
.
This package includes the embedded server library development and header files.
-Package: libmariadbd19
-Architecture: any
-Multi-Arch: same
-Section: libs
-Depends: ${misc:Depends},
- ${shlibs:Depends}
-Breaks: libmariadbd-dev (<< ${source:Version})
-Replaces: libmariadbd-dev (<< ${source:Version})
-Description: MariaDB embedded database, shared library
- MariaDB is a fast, stable and true multi-user, multi-threaded SQL database
- server. SQL (Structured Query Language) is the most popular database query
- language in the world. The main goals of MariaDB are speed, robustness and
- ease of use.
- .
- This package includes a shared library for embedded MariaDB applications
-
Package: mysql-common
Architecture: all
Depends: ${misc:Depends},
@@ -232,7 +232,7 @@ Description: MariaDB database common files (e.g. /etc/mysql/conf.d/mariadb.cnf)
This package includes files needed by all versions of the client library
(e.g. /etc/mysql/conf.d/mariadb.cnf).
-Package: mariadb-client-core-10.3
+Package: mariadb-client-core-10.4
Architecture: any
Depends: mariadb-common (>= ${source:Version}),
${misc:Depends},
@@ -240,6 +240,7 @@ Depends: mariadb-common (>= ${source:Version}),
Conflicts: mariadb-client-10.0,
mariadb-client-10.1,
mariadb-client-10.2,
+ mariadb-client-10.3,
mariadb-client-5.1,
mariadb-client-5.2,
mariadb-client-5.3,
@@ -247,6 +248,7 @@ Conflicts: mariadb-client-10.0,
mariadb-client-core-10.0,
mariadb-client-core-10.1,
mariadb-client-core-10.2,
+ mariadb-client-core-10.3,
mariadb-client-core-5.1,
mariadb-client-core-5.2,
mariadb-client-core-5.3,
@@ -263,6 +265,7 @@ Conflicts: mariadb-client-10.0,
Replaces: mariadb-client-10.0,
mariadb-client-10.1,
mariadb-client-10.2,
+ mariadb-client-10.3,
mariadb-client-5.1,
mariadb-client-5.2,
mariadb-client-5.3,
@@ -270,6 +273,7 @@ Replaces: mariadb-client-10.0,
mariadb-client-core-10.0,
mariadb-client-core-10.1,
mariadb-client-core-10.2,
+ mariadb-client-core-10.3,
mariadb-client-core-5.1,
mariadb-client-core-5.2,
mariadb-client-core-5.3,
@@ -298,10 +302,10 @@ Description: MariaDB database core client binaries
.
This package includes the core client files, as used by Akonadi.
-Package: mariadb-client-10.3
+Package: mariadb-client-10.4
Architecture: any
Depends: debianutils (>=1.6),
- mariadb-client-core-10.3 (>= ${source:Version}),
+ mariadb-client-core-10.4 (>= ${source:Version}),
mariadb-common,
${misc:Depends},
${perl:Depends},
@@ -310,6 +314,7 @@ Conflicts: mariadb-client (<< ${source:Version}),
mariadb-client-10.0,
mariadb-client-10.1,
mariadb-client-10.2,
+ mariadb-client-10.3,
mariadb-client-5.1,
mariadb-client-5.2,
mariadb-client-5.3,
@@ -325,6 +330,7 @@ Replaces: mariadb-client (<< ${source:Version}),
mariadb-client-10.0,
mariadb-client-10.1,
mariadb-client-10.2,
+ mariadb-client-10.3,
mariadb-client-5.1,
mariadb-client-5.2,
mariadb-client-5.3,
@@ -356,13 +362,15 @@ Description: MariaDB database client binaries
This package includes the client binaries and the additional tools
innotop and mysqlreport.
-Package: mariadb-server-core-10.3
+Package: mariadb-server-core-10.4
Architecture: any
Depends: mariadb-common (>= ${source:Version}),
${misc:Depends},
${shlibs:Depends}
Conflicts: mariadb-server-core-10.0,
mariadb-server-core-10.1,
+ mariadb-server-core-10.2,
+ mariadb-server-core-10.3,
mariadb-server-core-5.1,
mariadb-server-core-5.2,
mariadb-server-core-5.3,
@@ -377,16 +385,17 @@ Conflicts: mariadb-server-core-10.0,
Breaks: mariadb-client-10.0,
mariadb-client-10.1,
mariadb-client-10.2,
- mariadb-client-10.3 (<< ${source:Version}),
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-client-10.3,
+ mariadb-server-10.3
Replaces: mariadb-client-10.0,
mariadb-client-10.1,
mariadb-client-10.2,
- mariadb-client-10.3 (<< ${source:Version}),
- mariadb-server-10.3 (<< ${source:Version}),
+ mariadb-client-10.3,
+ mariadb-server-10.3,
mariadb-server-core-10.0,
mariadb-server-core-10.1,
mariadb-server-core-10.2,
+ mariadb-server-core-10.3,
mariadb-server-core-5.1,
mariadb-server-core-5.2,
mariadb-server-core-5.3,
@@ -413,7 +422,7 @@ Description: MariaDB database core server files
.
This package includes the core server files, as used by Akonadi.
-Package: mariadb-server-10.3
+Package: mariadb-server-10.4
Architecture: any
Suggests: mailx,
mariadb-test,
@@ -423,14 +432,14 @@ Recommends: libhtml-template-perl
Pre-Depends: adduser (>= 3.40),
debconf,
mariadb-common (>= ${source:Version})
-Depends: galera-3 (>=25.3),
+Depends: galera-4 (>=26.4),
gawk,
iproute | iproute2,
libdbi-perl,
lsb-base (>= 3.0-10),
lsof,
- mariadb-client-10.3 (>= ${source:Version}),
- mariadb-server-core-10.3 (>= ${binary:Version}),
+ mariadb-client-10.4 (>= ${source:Version}),
+ mariadb-server-core-10.4 (>= ${binary:Version}),
passwd,
perl (>= 5.6),
psmisc,
@@ -442,6 +451,7 @@ Conflicts: mariadb-server (<< ${source:Version}),
mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
+ mariadb-server-10.3,
mariadb-server-5.1,
mariadb-server-5.2,
mariadb-server-5.3,
@@ -464,6 +474,7 @@ Replaces: libmariadbclient-dev (<< 5.5.0),
mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
+ mariadb-server-10.3,
mariadb-server-5.1,
mariadb-server-5.2,
mariadb-server-5.3,
@@ -492,11 +503,11 @@ Description: MariaDB database server binaries
Package: mariadb-server
Architecture: all
-Depends: mariadb-server-10.3 (>= ${source:Version}),
+Depends: mariadb-server-10.4 (>= ${source:Version}),
${misc:Depends}
Description: MariaDB database server (metapackage depending on the latest version)
This is an empty package that depends on the current "best" version of
- mariadb-server (currently mariadb-server-10.3), as determined by the MariaDB
+ mariadb-server (currently mariadb-server-10.4), as determined by the MariaDB
maintainers. Install this package if in doubt about which MariaDB
version you need. That will install the version recommended by the
package maintainers.
@@ -508,18 +519,34 @@ Description: MariaDB database server (metapackage depending on the latest versio
Package: mariadb-client
Architecture: all
-Depends: mariadb-client-10.3 (>= ${source:Version}),
+Depends: mariadb-client-10.4 (>= ${source:Version}),
${misc:Depends}
Description: MariaDB database client (metapackage depending on the latest version)
This is an empty package that depends on the current "best" version of
- mariadb-client (currently mariadb-client-10.3), as determined by the MariaDB
+ mariadb-client (currently mariadb-client-10.4), as determined by the MariaDB
maintainers. Install this package if in doubt about which MariaDB version
you want, as this is the one considered to be in the best shape.
+Package: mariadb-backup
+Architecture: any
+Breaks: mariadb-backup-10.1,
+ mariadb-backup-10.2
+Replaces: mariadb-backup-10.1,
+ mariadb-backup-10.2
+Depends: mariadb-client-core-10.4 (= ${binary:Version}),
+ ${misc:Depends},
+ ${shlibs:Depends}
+Description: Backup tool for MariaDB server
+ This backup tool is guaranteed to be compatible with MariaDB.
+ Based on Xtrabackup, but improved to work with MariaDB.
+ .
+ Plese refer to the MariaDB Knowledge Base on more information on
+ how to use this tool.
+
Package: mariadb-plugin-connect
Architecture: any
Depends: libxml2,
- mariadb-server-10.3 (= ${binary:Version}),
+ mariadb-server-10.4 (= ${binary:Version}),
unixodbc,
${misc:Depends},
${shlibs:Depends}
@@ -537,8 +564,7 @@ Description: Connect storage engine for MariaDB
Package: mariadb-plugin-rocksdb
Architecture: amd64 arm64 mips64el ppc64el
-Depends: mariadb-server-10.3 (= ${binary:Version}),
- libzstd1,
+Depends: mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-rocksdb-engine-10.2,
@@ -554,7 +580,7 @@ Description: RocksDB storage engine for MariaDB
Package: mariadb-plugin-oqgraph
Architecture: any
Depends: libjudydebian1,
- mariadb-server-10.3 (= ${binary:Version}),
+ mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-oqgraph-engine-10.1,
@@ -570,18 +596,18 @@ Description: OQGraph storage engine for MariaDB
Package: mariadb-plugin-tokudb
Architecture: amd64
-Depends: mariadb-server-10.3 (= ${binary:Version}),
- libjemalloc1 (>= 3.0.0~) | libjemalloc2,
+Depends: libjemalloc1 (>= 3.0.0~) | libjemalloc2,
+ mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Replaces: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Description: TokuDB storage engine for MariaDB
The TokuDB storage engine is for use in high-performance and write-intensive
environments, offering increased compression and better performance based
@@ -590,17 +616,17 @@ Description: TokuDB storage engine for MariaDB
Package: mariadb-plugin-mroonga
Architecture: any-alpha any-amd64 any-arm any-arm64 any-i386 any-ia64 any-mips64el any-mips64r6el any-mipsel any-mipsr6el any-nios2 any-powerpcel any-ppc64el any-sh3 any-sh4 any-tilegx
-Depends: mariadb-server-10.3 (= ${binary:Version}),
+Depends: mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Replaces: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Description: Mroonga storage engine for MariaDB
Mroonga (formerly named Groonga Storage Engine) is a storage engine that
provides fast CJK-ready full text searching using column store.
@@ -608,17 +634,17 @@ Description: Mroonga storage engine for MariaDB
Package: mariadb-plugin-spider
Architecture: any
-Depends: mariadb-server-10.3 (= ${binary:Version}),
+Depends: mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Replaces: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Description: Spider storage engine for MariaDB
The Spider storage engine with built-in sharding features. It supports
partitioning and xa transactions, and allows tables of different MariaDB
@@ -627,17 +653,17 @@ Description: Spider storage engine for MariaDB
Package: mariadb-plugin-cassandra
Architecture: any
-Depends: mariadb-server-10.3 (= ${binary:Version}),
+Depends: mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Replaces: mariadb-server-10.0,
mariadb-server-10.1,
mariadb-server-10.2,
- mariadb-server-10.3 (<< ${source:Version})
+ mariadb-server-10.3
Description: Cassandra storage engine for MariaDB
The Cassandra Storage Engine allows access to data in a Cassandra cluster from
MariaDB, combining the best of SQL and no-SQL worlds. Cassandra SE (storage
@@ -649,7 +675,7 @@ Description: Cassandra storage engine for MariaDB
Package: mariadb-plugin-gssapi-server
Architecture: any
Depends: libgssapi-krb5-2,
- mariadb-server-10.3,
+ mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-gssapi-server-10.1,
@@ -667,7 +693,7 @@ Description: GSSAPI authentication plugin for MariaDB server
Package: mariadb-plugin-gssapi-client
Architecture: any
Depends: libgssapi-krb5-2,
- mariadb-client-10.3,
+ mariadb-client-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Breaks: mariadb-gssapi-client-10.1,
@@ -682,26 +708,10 @@ Description: GSSAPI authentication plugin for MariaDB client
.
This package contains the client parts.
-Package: mariadb-backup
-Architecture: any
-Breaks: mariadb-backup-10.1,
- mariadb-backup-10.2
-Replaces: mariadb-backup-10.1,
- mariadb-backup-10.2
-Depends: mariadb-client-core-10.3 (= ${binary:Version}),
- ${misc:Depends},
- ${shlibs:Depends}
-Description: Backup tool for MariaDB server
- This backup tool is guaranteed to be compatible with MariaDB.
- Based on Xtrabackup, but improved to work with MariaDB.
- .
- Plese refer to the MariaDB Knowledge Base on more information on
- how to use this tool.
-
Package: mariadb-plugin-cracklib-password-check
Architecture: any
Depends: libcrack2 (>= 2.9.0),
- mariadb-server-10.3,
+ mariadb-server-10.4 (= ${binary:Version}),
${misc:Depends},
${shlibs:Depends}
Description: CrackLib Password Validation Plugin for MariaDB
@@ -710,8 +720,8 @@ Description: CrackLib Password Validation Plugin for MariaDB
Package: mariadb-test
Architecture: any
-Depends: mariadb-client-10.3 (= ${binary:Version}),
- mariadb-server-10.3 (= ${binary:Version}),
+Depends: mariadb-client-10.4 (= ${binary:Version}),
+ mariadb-server-10.4 (= ${binary:Version}),
mariadb-test-data (= ${source:Version}),
${misc:Depends},
${shlibs:Depends}
@@ -719,6 +729,7 @@ Breaks: mariadb-server-5.5,
mariadb-test-10.0,
mariadb-test-10.1,
mariadb-test-10.2,
+ mariadb-test-10.3,
mariadb-test-5.5,
mysql-testsuite,
mysql-testsuite-5.5,
@@ -729,6 +740,7 @@ Replaces: mariadb-server-5.5,
mariadb-test-10.0,
mariadb-test-10.1,
mariadb-test-10.2,
+ mariadb-test-10.3,
mariadb-test-5.5,
mysql-testsuite,
mysql-testsuite-5.5,
diff --git a/debian/libmariadbd-dev.install b/debian/libmariadbd-dev.install
index 2c14af5ab31..13d961409cc 100644
--- a/debian/libmariadbd-dev.install
+++ b/debian/libmariadbd-dev.install
@@ -1,4 +1,4 @@
-usr/lib/*/libmysqld.a
usr/lib/*/libmariadbd.a
-usr/lib/*/libmysqld.so
usr/lib/*/libmariadbd.so
+usr/lib/*/libmysqld.a
+usr/lib/*/libmysqld.so
diff --git a/debian/mariadb-client-10.3.README.Debian b/debian/mariadb-client-10.4.README.Debian
index 64f0f509951..64f0f509951 100644
--- a/debian/mariadb-client-10.3.README.Debian
+++ b/debian/mariadb-client-10.4.README.Debian
diff --git a/debian/mariadb-client-10.3.docs b/debian/mariadb-client-10.4.docs
index c09092629c3..c09092629c3 100644
--- a/debian/mariadb-client-10.3.docs
+++ b/debian/mariadb-client-10.4.docs
diff --git a/debian/mariadb-client-10.3.install b/debian/mariadb-client-10.4.install
index 945bf77c689..945bf77c689 100644
--- a/debian/mariadb-client-10.3.install
+++ b/debian/mariadb-client-10.4.install
diff --git a/debian/mariadb-client-10.3.links b/debian/mariadb-client-10.4.links
index 4a504969246..4a504969246 100644
--- a/debian/mariadb-client-10.3.links
+++ b/debian/mariadb-client-10.4.links
diff --git a/debian/mariadb-client-10.3.manpages b/debian/mariadb-client-10.4.manpages
index 6f3e2bc188c..6f3e2bc188c 100644
--- a/debian/mariadb-client-10.3.manpages
+++ b/debian/mariadb-client-10.4.manpages
diff --git a/debian/mariadb-client-10.3.menu b/debian/mariadb-client-10.4.menu
index 1894442ca20..58f7ebfc45f 100644
--- a/debian/mariadb-client-10.3.menu
+++ b/debian/mariadb-client-10.4.menu
@@ -1,3 +1,3 @@
# According to /usr/share/menu/ policy 1.4, not /usr/share/doc/debian-policy/
-?package(mariadb-client-10.3):needs="text" section="Applications/Data Management"\
+?package(mariadb-client-10.4):needs="text" section="Applications/Data Management"\
title="Innotop" command="/usr/bin/innotop"
diff --git a/debian/mariadb-client-core-10.3.install b/debian/mariadb-client-core-10.4.install
index a2781309439..a2781309439 100644
--- a/debian/mariadb-client-core-10.3.install
+++ b/debian/mariadb-client-core-10.4.install
diff --git a/debian/mariadb-plugin-rocksdb.install b/debian/mariadb-plugin-rocksdb.install
index b9a6f7dc432..403c7f291b6 100644
--- a/debian/mariadb-plugin-rocksdb.install
+++ b/debian/mariadb-plugin-rocksdb.install
@@ -1,5 +1,5 @@
etc/mysql/conf.d/rocksdb.cnf etc/mysql/mariadb.conf.d
-usr/bin/mysql_ldb
usr/bin/myrocks_hotbackup
+usr/bin/mysql_ldb
usr/bin/sst_dump
usr/lib/mysql/plugin/ha_rocksdb.so
diff --git a/debian/mariadb-plugin-tokudb.install b/debian/mariadb-plugin-tokudb.install
index e8925c1d4f1..40dd0e78c65 100644
--- a/debian/mariadb-plugin-tokudb.install
+++ b/debian/mariadb-plugin-tokudb.install
@@ -3,6 +3,6 @@ etc/systemd/system/mariadb.service.d/tokudb.conf
usr/bin/tokuft_logprint
usr/bin/tokuftdump
usr/lib/mysql/plugin/ha_tokudb.so
-usr/share/doc/mariadb-server-10.3/README.md usr/share/doc/mariadb-plugin-tokudb/README.md
+usr/share/doc/mariadb-server-10.4/README.md usr/share/doc/mariadb-plugin-tokudb/README.md
usr/share/man/man1/tokuft_logprint.1
usr/share/man/man1/tokuftdump.1
diff --git a/debian/mariadb-server-10.3.config b/debian/mariadb-server-10.3.config
deleted file mode 100644
index 44640f2a441..00000000000
--- a/debian/mariadb-server-10.3.config
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-
-set -e
-
-. /usr/share/debconf/confmodule
-
-if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi
-${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 }
-
-# Beware that there are two ypwhich one of them needs the 2>/dev/null!
-if test -n "`which ypwhich 2>/dev/null`" && ypwhich >/dev/null 2>&1; then
- db_input high mariadb-server-10.0/nis_warning || true
- db_go
-fi
-# only ask this question on fresh installs, during "reconfiguration" and when
-# not upgrading from an existing 5.0 installation.
-# there is also an additional check for empty root passwords in the
-# postinst script when the tools are available for us to use.
-if [ "$1" = "configure" ] && ([ -z "$2" ] && [ ! -e "/var/lib/mysql/debian-5.0.flag" ] ) || [ "$1" = "reconfigure" ]; then
- while :; do
- RET=""
- db_input high mysql-server/root_password || true
- db_go
- db_get mysql-server/root_password
- # if password isn't empty we ask for password verification
- if [ -z "$RET" ]; then
- db_fset mysql-server/root_password seen false
- db_fset mysql-server/root_password_again seen false
- break
- fi
- ROOT_PW="$RET"
- db_input high mysql-server/root_password_again || true
- db_go
- db_get mysql-server/root_password_again
- if [ "$RET" == "$ROOT_PW" ]; then
- ROOT_PW=''
- break
- fi
- db_fset mysql-server/password_mismatch seen false
- db_input critical mysql-server/password_mismatch
- db_set mysql-server/root_password ""
- db_set mysql-server/root_password_again ""
- db_go
- done
-fi
diff --git a/debian/mariadb-server-10.3.README.Debian b/debian/mariadb-server-10.4.README.Debian
index 1e8b86f867c..5a05f196042 100644
--- a/debian/mariadb-server-10.3.README.Debian
+++ b/debian/mariadb-server-10.4.README.Debian
@@ -2,12 +2,7 @@
=============================
You may never ever delete the mysql user "root". Although it has no password
is set, the unix_auth plugin ensure that it can only be run locally as the root
-user. The credentials in /etc/mysql/debian.cnf specify the user are used by the
-init scripts to stop the server and perform logrotation. So in most of the
-time you can fix the situation by making sure that the /etc/mysql/debian.cnf
-file specifies the root user and no password.
-
-This used to be the debian-sys-maint user which is no longer used.
+user.
* WHAT TO DO AFTER UPGRADES:
============================
@@ -57,7 +52,13 @@ the DB server over the network:
sudo /usr/bin/mysql -e "GRANT ALL ON *.* TO 'USERNAME'@'%' IDENTIFIED BY 'password' WITH GRANT OPTION"
-Scripts should run as a user have the required grants and be identified via unix_socket.
+Scripts should run as a user have the required grants and be authenticated via
+unix_socket.
+
+It is wise to run scripts as the "mysql" system user. Like root,
+mysql@localhost is created by default to have all privileges in MariaDB
+and to use unix_socket authentication. But scripts running under "mysql"
+won't have system-wide root so they won't be able to corrupt your system.
If you are too tired to type the password in every time and unix_socket auth
doesn't suit your needs, you can store it in the file $HOME/.my.cnf. It should
@@ -67,18 +68,6 @@ can read it. Every other configuration parameter can be stored there, too.
For more information in the MariaDB manual in/usr/share/doc/mariadb-doc or
https://mariadb.com/kb/en/configuring-mariadb-with-mycnf/.
-ATTENTION: It is necessary, that a ~/.my.cnf from root always contains a "user"
-line wherever there is a "password" line, else, the Debian maintenance
-scripts, that use /etc/mysql/debian.cnf, will use the username
-"root" but the password that is in root's .my.cnf. Also note,
-that every change you make in the /root/.my.cnf will affect the mysql cron
-script, too.
-
- # an example of $HOME/.my.cnf
- [client]
- user = your-mysql-username
- password = enter-your-good-new-password-here
-
* FURTHER NOTES ON REPLICATION
===============================
If the MySQL server is acting as a replication slave, you should not
diff --git a/debian/mariadb-server-10.4.config b/debian/mariadb-server-10.4.config
new file mode 100644
index 00000000000..1929c370d6d
--- /dev/null
+++ b/debian/mariadb-server-10.4.config
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -e
+
+. /usr/share/debconf/confmodule
+
+if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi
+${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 }
+
+# Beware that there are two ypwhich one of them needs the 2>/dev/null!
+if test -n "`which ypwhich 2>/dev/null`" && ypwhich >/dev/null 2>&1; then
+ db_input high mariadb-server-10.0/nis_warning || true
+ db_go
+fi
diff --git a/debian/mariadb-server-10.3.dirs b/debian/mariadb-server-10.4.dirs
index 5057fe806c3..5057fe806c3 100644
--- a/debian/mariadb-server-10.3.dirs
+++ b/debian/mariadb-server-10.4.dirs
diff --git a/debian/mariadb-server-10.3.install b/debian/mariadb-server-10.4.install
index 9447446a6b5..a9cbbfc8a99 100644
--- a/debian/mariadb-server-10.3.install
+++ b/debian/mariadb-server-10.4.install
@@ -38,7 +38,8 @@ usr/bin/wsrep_sst_mysqldump
usr/bin/wsrep_sst_rsync
usr/lib/mysql/plugin/auth_ed25519.so
usr/lib/mysql/plugin/auth_pam.so
-usr/lib/mysql/plugin/auth_socket.so
+usr/lib/mysql/plugin/auth_pam_tool_dir/auth_pam_tool
+usr/lib/mysql/plugin/auth_pam_v1.so
usr/lib/mysql/plugin/disks.so
usr/lib/mysql/plugin/file_key_management.so
usr/lib/mysql/plugin/ha_archive.so
@@ -55,8 +56,8 @@ usr/lib/mysql/plugin/server_audit.so
usr/lib/mysql/plugin/simple_password_check.so
usr/lib/mysql/plugin/sql_errlog.so
usr/lib/mysql/plugin/wsrep_info.so
-usr/share/apport/package-hooks/source_mariadb-10.3.py
-usr/share/doc/mariadb-server-10.3/mysqld.sym.gz
+usr/share/apport/package-hooks/source_mariadb-10.4.py
+usr/share/doc/mariadb-server-10.4/mysqld.sym.gz
usr/share/man/man1/aria_chk.1
usr/share/man/man1/aria_dump_log.1
usr/share/man/man1/aria_ftdump.1
diff --git a/debian/mariadb-server-10.3.logcheck.ignore.paranoid b/debian/mariadb-server-10.4.logcheck.ignore.paranoid
index 00cc5c3e29d..00cc5c3e29d 100644
--- a/debian/mariadb-server-10.3.logcheck.ignore.paranoid
+++ b/debian/mariadb-server-10.4.logcheck.ignore.paranoid
diff --git a/debian/mariadb-server-10.3.logcheck.ignore.server b/debian/mariadb-server-10.4.logcheck.ignore.server
index a0b4792ecda..a0b4792ecda 100644
--- a/debian/mariadb-server-10.3.logcheck.ignore.server
+++ b/debian/mariadb-server-10.4.logcheck.ignore.server
diff --git a/debian/mariadb-server-10.3.logcheck.ignore.workstation b/debian/mariadb-server-10.4.logcheck.ignore.workstation
index a0b4792ecda..a0b4792ecda 100644
--- a/debian/mariadb-server-10.3.logcheck.ignore.workstation
+++ b/debian/mariadb-server-10.4.logcheck.ignore.workstation
diff --git a/debian/mariadb-server-10.3.mysql-server.logrotate b/debian/mariadb-server-10.4.mysql-server.logrotate
index 4111a276dc3..4111a276dc3 100644
--- a/debian/mariadb-server-10.3.mysql-server.logrotate
+++ b/debian/mariadb-server-10.4.mysql-server.logrotate
diff --git a/debian/mariadb-server-10.3.mysql.default b/debian/mariadb-server-10.4.mysql.default
index 146c5a87a84..146c5a87a84 100644
--- a/debian/mariadb-server-10.3.mysql.default
+++ b/debian/mariadb-server-10.4.mysql.default
diff --git a/debian/mariadb-server-10.3.mysql.init b/debian/mariadb-server-10.4.mysql.init
index 35a52d5d8db..bcc366e95b7 100644
--- a/debian/mariadb-server-10.3.mysql.init
+++ b/debian/mariadb-server-10.4.mysql.init
@@ -158,7 +158,7 @@ case "${1:-''}" in
if ! mysqld_status check_dead warn; then
log_end_msg 1
- log_failure_msg "Please stop MariaDB manually and read /usr/share/doc/mariadb-server-10.3/README.Debian.gz!"
+ log_failure_msg "Please stop MariaDB manually and read /usr/share/doc/mariadb-server-10.4/README.Debian.gz!"
exit -1
else
log_end_msg 0
diff --git a/debian/mariadb-server-10.3.postinst b/debian/mariadb-server-10.4.postinst
index 7874a691268..4532085169e 100644
--- a/debian/mariadb-server-10.3.postinst
+++ b/debian/mariadb-server-10.4.postinst
@@ -24,35 +24,6 @@ invoke() {
fi
}
-MYSQL_BOOTSTRAP="/usr/sbin/mysqld --bootstrap --user=mysql --disable-log-bin --skip-grant-tables --default-storage-engine=myisam"
-
-set_mysql_rootpw() {
- # forget we ever saw the password. don't use reset to keep the seen status
- db_set mysql-server/root_password ""
- db_set mysql-server/root_password_again ""
-
- tfile=`mktemp`
- if [ ! -f "$tfile" ]; then
- return 1
- fi
-
- # this avoids us having to call "test" or "[" on $rootpw
- cat << EOF > $tfile
-USE mysql;
-SET sql_log_bin=0;
-UPDATE user SET password=PASSWORD("$rootpw") WHERE user='root';
-FLUSH PRIVILEGES;
-EOF
- if grep -q 'PASSWORD("")' $tfile; then
- retval=0
- else
- $MYSQL_BOOTSTRAP <$tfile
- retval=$?
- fi
- rm -f $tfile
- return $retval
-}
-
case "$1" in
configure)
# This is needed because mysql_install_db removes the pid file in /var/run
@@ -135,7 +106,7 @@ EOF
# Clean up old flags before setting new one
rm -f $mysql_datadir/debian-*.flag
# Flag data dir to avoid downgrades
- touch $mysql_datadir/debian-10.3.flag
+ touch $mysql_datadir/debian-10.4.flag
# initiate databases. Output is not allowed by debconf :-(
# This will fail if we are upgrading an existing database; in this case
@@ -149,79 +120,25 @@ EOF
$ERR_LOGGER
set -e
-
# To avoid downgrades.
touch $mysql_statedir/debian-$VER.flag
- ## On every reconfiguration the maintenance user is recreated.
- #
- # - It is easier to regenerate the password every time but as people
- # use fancy rsync scripts and file alteration monitors, the existing
- # password is used and existing files not touched.
- # - The mysqld statement is like that in mysql_install_db because the
- # server is not already running. This has some implications:
- # - The amount of newlines and semicolons in the query is important!
- # - GRANT is not possible with --skip-grant-tables and "INSERT
- # (user,host..) VALUES" is not --ansi compliant
- # - The echo is just for readability. ash's buildin has no "-e" so use /bin/echo.
- # - The Super_priv, Show_db_priv, Create_tmp_table_priv and Lock_tables_priv
- # may not be present as old Woody 3.23 databases did not have it and the
- # admin might not already have run mysql_upgrade which adds them.
- # As the binlog cron scripts to need at least the Super_priv, I do first
- # the old query which always succeeds and then the new which may or may not.
-
- # recreate the credentials file if not present or without mysql_upgrade stanza
+ # On new installations root user can connect via unix_socket.
+ # But on upgrades, scripts rely on debian-sys-maint user and
+ # credentials in /etc/mysql/debian.cnf
+ # All tools use --defaults-file=/etc/mysql/debian.cnf
+ # And while it's not needed for new installations, we keep using
+ # --defaults-file option for tools (for the sake of upgrades)
+ # and thus need /etc/mysql/debian.cnf to exist, even if it's empty.
dc=$mysql_cfgdir/debian.cnf;
- if [ -e "$dc" -a -n "`fgrep mysql_upgrade $dc 2>/dev/null`" ]; then
- pass="`sed -n 's/^[ ]*password *= *// p' $dc | head -n 1`"
- else
- pass=`perl -e 'print map{("a".."z","A".."Z",0..9)[int(rand(62))]}(1..16)'`;
- if [ ! -d "$mysql_cfgdir" ]; then install -o 0 -g 0 -m 0755 -d $mysql_cfgdir; fi
- umask 066
+ if [ ! -e "$dc" ]; then
cat /dev/null > $dc
- umask 022
echo "# Automatically generated for Debian scripts. DO NOT TOUCH!" >>$dc
- echo "[client]" >>$dc
- echo "host = localhost" >>$dc
- echo "user = debian-sys-maint" >>$dc
- echo "password = $pass" >>$dc
- echo "socket = $mysql_rundir/mysqld.sock" >>$dc
- echo "[mysql_upgrade]" >>$dc
- echo "host = localhost" >>$dc
- echo "user = debian-sys-maint" >>$dc
- echo "password = $pass" >>$dc
- echo "socket = $mysql_rundir/mysqld.sock" >>$dc
- echo "basedir = /usr" >>$dc
fi
- # If this dir chmod go+w then the admin did it. But this file should not.
+ # Keep it only root-readable, as it always was
chown 0:0 $dc
chmod 0600 $dc
- replace_query=`/bin/echo -e \
- "USE mysql;\n" \
- "SET sql_mode='';\n" \
- "REPLACE INTO user SET " \
- " host='localhost', user='debian-sys-maint', password=password('$pass'), " \
- " Select_priv='Y', Insert_priv='Y', Update_priv='Y', Delete_priv='Y', " \
- " Create_priv='Y', Drop_priv='Y', Reload_priv='Y', Shutdown_priv='Y', " \
- " Process_priv='Y', File_priv='Y', Grant_priv='Y', References_priv='Y', " \
- " Index_priv='Y', Alter_priv='Y', Super_priv='Y', Show_db_priv='Y', "\
- " Create_tmp_table_priv='Y', Lock_tables_priv='Y', Execute_priv='Y', "\
- " Repl_slave_priv='Y', Repl_client_priv='Y', Create_view_priv='Y', "\
- " Show_view_priv='Y', Create_routine_priv='Y', Alter_routine_priv='Y', "\
- " Create_user_priv='Y', Event_priv='Y', Trigger_priv='Y',"\
- " ssl_cipher='', x509_issuer='', x509_subject='';"`;
-
- db_get mysql-server/root_password && rootpw="$RET"
- if ! set_mysql_rootpw; then
- db_input high mysql-server/error_setting_password || true
- db_go
- fi
-
- set +e
- echo "$replace_query" | $MYSQL_BOOTSTRAP 2>&1 | $ERR_LOGGER
- set -e
-
# If there is a real AppArmor profile, we reload it.
# If the default empty profile is installed, then we remove any old
# profile that may be loaded.
diff --git a/debian/mariadb-server-10.3.postrm b/debian/mariadb-server-10.4.postrm
index 6ab40df2e12..bb9ba8cec68 100644
--- a/debian/mariadb-server-10.3.postrm
+++ b/debian/mariadb-server-10.4.postrm
@@ -49,9 +49,9 @@ if [ "$1" = "purge" -a ! \( -x /usr/sbin/mysqld -o -L /usr/sbin/mysqld \) ]; the
rm -f /var/log/mysql.{log,err}{,.0,.[1234567].gz}
rm -rf /var/log/mysql
- db_input high mariadb-server-10.3/postrm_remove_databases || true
+ db_input high mariadb-server-10.4/postrm_remove_databases || true
db_go || true
- db_get mariadb-server-10.3/postrm_remove_databases || true
+ db_get mariadb-server-10.4/postrm_remove_databases || true
if [ "$RET" = "true" ]; then
# never remove the debian.cnf when the databases are still existing
# else we ran into big trouble on the next install!
diff --git a/debian/mariadb-server-10.3.preinst b/debian/mariadb-server-10.4.preinst
index cbfc4640c41..08e424bc712 100644
--- a/debian/mariadb-server-10.3.preinst
+++ b/debian/mariadb-server-10.4.preinst
@@ -13,7 +13,6 @@ if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi
${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 }
export PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin
-MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf"
mysql_datadir=/var/lib/mysql
mysql_upgradedir=/var/lib/mysql-upgrade
@@ -49,7 +48,7 @@ stop_server() {
################################ main() ##########################
-this_version=10.3
+this_version=10.4
max_upgradeable_version=5.7
# Check if a flag file is found that indicates a previous MariaDB or MySQL
@@ -113,7 +112,7 @@ fi
# Instead simply move the old datadir and create a new for this_version.
if [ ! -z "$downgrade_detected" ]
then
- db_input critical mariadb-server-10.3/old_data_directory_saved || true
+ db_input critical mariadb-server-10.4/old_data_directory_saved || true
db_go
echo "The file $mysql_datadir/debian-$found_version.flag indicates a" 1>&2
echo "version that cannot automatically be upgraded. Therefore the" 1>&2
diff --git a/debian/mariadb-server-10.3.prerm b/debian/mariadb-server-10.4.prerm
index 0371bbfc844..0371bbfc844 100644
--- a/debian/mariadb-server-10.3.prerm
+++ b/debian/mariadb-server-10.4.prerm
diff --git a/debian/mariadb-server-10.3.py b/debian/mariadb-server-10.4.py
index 99d3f0fd913..b44228daf8b 100644
--- a/debian/mariadb-server-10.3.py
+++ b/debian/mariadb-server-10.4.py
@@ -1,4 +1,4 @@
-'''apport package hook for mariadb-10.3
+'''apport package hook for mariadb-10.4
(c) 2009 Canonical Ltd.
Author: Mathias Gug <mathias.gug@canonical.com>
@@ -20,7 +20,7 @@ def _add_my_conf_files(report, filename):
continue
def add_info(report):
- attach_conffiles(report, 'mariadb-server-10.3', conffiles=None)
+ attach_conffiles(report, 'mariadb-server-10.4', conffiles=None)
key = 'Logs' + path_to_key('/var/log/daemon.log')
report[key] = ""
for line in read_file('/var/log/daemon.log').split('\n'):
diff --git a/debian/mariadb-server-10.3.templates b/debian/mariadb-server-10.4.templates
index 8ef29264678..a761ad76c6a 100644
--- a/debian/mariadb-server-10.3.templates
+++ b/debian/mariadb-server-10.4.templates
@@ -7,7 +7,7 @@
# Even minor modifications require translation updates and such
# changes should be coordinated with translators and reviewers.
-Template: mariadb-server-10.3/old_data_directory_saved
+Template: mariadb-server-10.4/old_data_directory_saved
Type: note
_Description: The old data directory will be saved at new location
A file named /var/lib/mysql/debian-*.flag exists on this system.
@@ -19,7 +19,7 @@ _Description: The old data directory will be saved at new location
.
Please manually export/import your data (e.g. with mysqldump) if needed.
-Template: mariadb-server-10.3/nis_warning
+Template: mariadb-server-10.4/nis_warning
Type: note
#flag:translate!:3,5
_Description: Important note for NIS/YP users
@@ -33,7 +33,7 @@ _Description: Important note for NIS/YP users
.
/var/lib/mysql: drwxr-xr-x mysql mysql
-Template: mariadb-server-10.3/postrm_remove_databases
+Template: mariadb-server-10.4/postrm_remove_databases
Type: boolean
Default: false
_Description: Remove all MariaDB databases?
@@ -66,7 +66,7 @@ _Description: Unable to set password for the MariaDB "root" user
.
You should check the account's password after the package installation.
.
- Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file
+ Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file
for more information.
Template: mysql-server/password_mismatch
diff --git a/debian/mariadb-server-10.3.triggers b/debian/mariadb-server-10.4.triggers
index d1f5f5e14f1..d1f5f5e14f1 100644
--- a/debian/mariadb-server-10.3.triggers
+++ b/debian/mariadb-server-10.4.triggers
diff --git a/debian/mariadb-server-core-10.3.install b/debian/mariadb-server-core-10.4.install
index 46c116b618d..46c116b618d 100644
--- a/debian/mariadb-server-core-10.3.install
+++ b/debian/mariadb-server-core-10.4.install
diff --git a/debian/mariadb-test-data.install b/debian/mariadb-test-data.install
index 9e5356d061f..26b69c2941b 100644
--- a/debian/mariadb-test-data.install
+++ b/debian/mariadb-test-data.install
@@ -1,7 +1,7 @@
usr/share/mysql/mysql-test/collections
usr/share/mysql/mysql-test/include
-usr/share/mysql/mysql-test/plugin
usr/share/mysql/mysql-test/main
+usr/share/mysql/mysql-test/plugin
usr/share/mysql/mysql-test/std_data
usr/share/mysql/mysql-test/suite
usr/share/mysql/mysql-test/unstable-tests
diff --git a/debian/not-installed b/debian/not-installed
index c151cf753e6..5341f6ca438 100644
--- a/debian/not-installed
+++ b/debian/not-installed
@@ -7,15 +7,15 @@ usr/lib/sysusers.d/sysusers.conf
usr/lib/tmpfiles.d/tmpfiles.conf
usr/lib/mysql/plugin/JavaWrappers.jar # These are only built if JNI/libjawt.so is installed from e.g. openjdk-8-jre-headless
usr/lib/mysql/plugin/JdbcInterface.jar # These are only built if JNI/libjawt.so is installed from e.g. openjdk-8-jre-headless
-usr/share/doc/mariadb-server-10.3/COPYING
-usr/share/doc/mariadb-server-10.3/COPYING.AGPLv3
-usr/share/doc/mariadb-server-10.3/COPYING.GPLv2
-usr/share/doc/mariadb-server-10.3/COPYING.thirdparty
-usr/share/doc/mariadb-server-10.3/CREDITS
-usr/share/doc/mariadb-server-10.3/EXCEPTIONS-CLIENT
-usr/share/doc/mariadb-server-10.3/INSTALL-BINARY
-usr/share/doc/mariadb-server-10.3/PATENTS
-usr/share/doc/mariadb-server-10.3/README-wsrep
+usr/share/doc/mariadb-server-10.4/COPYING
+usr/share/doc/mariadb-server-10.4/COPYING.AGPLv3
+usr/share/doc/mariadb-server-10.4/COPYING.GPLv2
+usr/share/doc/mariadb-server-10.4/COPYING.thirdparty
+usr/share/doc/mariadb-server-10.4/CREDITS
+usr/share/doc/mariadb-server-10.4/EXCEPTIONS-CLIENT
+usr/share/doc/mariadb-server-10.4/INSTALL-BINARY
+usr/share/doc/mariadb-server-10.4/PATENTS
+usr/share/doc/mariadb-server-10.4/README-wsrep
usr/share/groonga/COPYING
usr/share/groonga-normalizer-mysql/lgpl-2.0.txt
usr/share/groonga-normalizer-mysql/README.md
diff --git a/debian/po/POTFILES.in b/debian/po/POTFILES.in
index f2af9ac992f..9499932064b 100644
--- a/debian/po/POTFILES.in
+++ b/debian/po/POTFILES.in
@@ -1 +1 @@
-[type: gettext/rfc822deb] mariadb-server-10.3.templates
+[type: gettext/rfc822deb] mariadb-server-10.4.templates
diff --git a/debian/po/ar.po b/debian/po/ar.po
index 69ff80df4fa..c351e81bfbf 100644
--- a/debian/po/ar.po
+++ b/debian/po/ar.po
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: templates\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-05-01 13:04+0300\n"
"Last-Translator: Ossama M. Khayat <okhayat@yahoo.com>\n"
@@ -27,13 +27,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -42,7 +42,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -50,20 +50,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "ملاحظة هامة لمستخدمي NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -71,7 +71,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -83,13 +83,13 @@ msgstr "عليك أيضاً أن تقوم بالتأكد من صلاحيات Ù…Ø
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "إزالة جميع قواعد بيانات MariaDB؟"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -97,7 +97,7 @@ msgstr "الدليل /var/lib/mysql الذي يحتوي قواعد بيانات
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -108,13 +108,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "كلمة المرور الجديدة لمستخد \"root\" الخاص بـMariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -124,7 +124,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -132,7 +132,7 @@ msgstr "إن ترك الحقل Ùارغاً، Ùلن يتم تغيير كلمة
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MariaDB \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -140,13 +140,13 @@ msgstr "كلمة المرور الجديدة لمستخد \"root\" الخاص ب
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "تعذر تعيين كلمة مرور للمستخدم \"root\" الخاص بـMariaDB."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -158,7 +158,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "You should check the account's password after tha package installation."
@@ -167,27 +167,27 @@ msgstr "يجب عليك التحقق من كلمة مرور الحساب عقب
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"الرجاء قراءة المل٠/usr/share/doc/mariadb-server-10.3/README.Debian للمزيد "
+"الرجاء قراءة المل٠/usr/share/doc/mariadb-server-10.4/README.Debian للمزيد "
"من المعلومات."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/ca.po b/debian/po/ca.po
index 7cc9bb340af..e2ba65054ba 100644
--- a/debian/po/ca.po
+++ b/debian/po/ca.po
@@ -5,7 +5,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-4.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2004-01-31 19:20GMT\n"
"Last-Translator: Aleix Badia i Bosch <abadia@ica.es>\n"
@@ -17,13 +17,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -32,7 +32,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -40,14 +40,14 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid "Important note for NIS/YP users!"
msgid "Important note for NIS/YP users"
@@ -55,7 +55,7 @@ msgstr "Nota important pels usuaris de NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -63,7 +63,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -71,13 +71,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -85,7 +85,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -94,13 +94,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -108,25 +108,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -135,43 +135,43 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
#, fuzzy
#~| msgid "Should MySQL start on boot?"
#~ msgid "Start the MariaDB server on boot?"
-#~ msgstr "Voleu que el MariaDB s'iniciï a l'arrencada ?"
+#~ msgstr "Voleu que el MariaDB s'inici� a l'arrencada ?"
#, fuzzy
#~ msgid ""
#~ "The MariaDB server can be launched automatically at boot time or manually "
#~ "with the '/etc/init.d/mysql start' command."
#~ msgstr ""
-#~ "El MariaDB es pot executar a l'arrencada o només si executeu manualment '/"
-#~ "etc/init.d/mysql start'. Seleccioneu 'sí' si voleu que s'inicialitzi "
-#~ "automàticament."
+#~ "El MariaDB es pot executar a l'arrencada o nom�s si executeu manualment '/"
+#~ "etc/init.d/mysql start'. Seleccioneu 's�' si voleu que s'inicialitzi "
+#~ "autom�ticament."
#, fuzzy
#~ msgid ""
@@ -180,7 +180,7 @@ msgstr ""
#~ "permissions (the uid/gid may be different)."
#~ msgstr ""
#~ "Per utilitzar la base de dades de MySQL heu d'afegir un usuari i grup "
-#~ "equivalent al següent i assegurar-vos que el directori /var/lib/mysql "
+#~ "equivalent al seg�ent i assegurar-vos que el directori /var/lib/mysql "
#~ "tingui els permisos correctes."
#~ msgid ""
@@ -203,13 +203,13 @@ msgstr ""
#~ msgid ""
#~ "MySQL will only install if you have a non-numeric hostname that is "
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
-#~ "returns \"myhostname\" then there must be a line like \"10.3.0.1 "
+#~ "returns \"myhostname\" then there must be a line like \"10.4.0.1 "
#~ "myhostname\"."
#~ msgstr ""
-#~ "El MySQL només s'instal·la en cas de tenir un nom d'ordinador central que "
-#~ "no sigui numèric i que es pugui resoldre a través del fitxer /etc/hosts. "
+#~ "El MySQL nom�s s'instal�la en cas de tenir un nom d'ordinador central que "
+#~ "no sigui num�ric i que es pugui resoldre a trav�s del fitxer /etc/hosts. "
#~ "Ex. si l'ordre \"hostname\" retorna \"myhostname\", llavors hi ha d'haver "
-#~ "una línia com la següent \"10.3.0.1 myhostname\"."
+#~ "una l�nia com la seg�ent \"10.4.0.1 myhostname\"."
#, fuzzy
#~ msgid ""
@@ -217,7 +217,7 @@ msgstr ""
#~ "is used in the start/stop and cron scripts. Don't delete."
#~ msgstr ""
#~ "Es crea un nou usuari de mysql \"debian-sys-maint\". S'utilitza per les "
-#~ "seqüències d'inicialització i aturada del cron, no el suprimiu."
+#~ "seq��ncies d'inicialitzaci� i aturada del cron, no el suprimiu."
#, fuzzy
#~ msgid ""
@@ -226,8 +226,8 @@ msgstr ""
#~ "there, never only the password!"
#~ msgstr ""
#~ "Recordeu posar una contrasenya al superusuari del MySQL. Si utilitzeu un "
-#~ "fitxer /root/.my.cnf, escriviu sempre allà les línies \"user\" i "
-#~ "\"password\".; mai només la contrasenya. Per a més informació feu una "
+#~ "fitxer /root/.my.cnf, escriviu sempre all� les l�nies \"user\" i "
+#~ "\"password\".; mai nom�s la contrasenya. Per a m�s informaci� feu una "
#~ "ullada a /usr/share/doc/mysql-server/README.Debian."
#, fuzzy
@@ -241,12 +241,12 @@ msgstr ""
#~ "Networking is disabled by default for security reasons. You can enable it "
#~ "by commenting out the skip-networking option in /etc/mysql/my.cnf."
#~ msgstr ""
-#~ "La xarxa està inhabilitada per defecte per a raons de seguretat. La podeu "
-#~ "habilitar descomentant l'opció de skip-networking del fitxer /etc/mysql/"
+#~ "La xarxa est� inhabilitada per defecte per a raons de seguretat. La podeu "
+#~ "habilitar descomentant l'opci� de skip-networking del fitxer /etc/mysql/"
#~ "my.cnf."
#~ msgid "security and update notice"
-#~ msgstr "Avís de seguretat i actualització"
+#~ msgstr "Av�s de seguretat i actualitzaci�"
#~ msgid "Please run mysql_fix_privilege_tables !"
#~ msgstr "Executeu mysql_fix_privilege_tables"
@@ -274,18 +274,18 @@ msgstr ""
#~ "mysql_fix_privilege_tables script during this upgrade regardless of if "
#~ "the server is currently running or not!"
#~ msgstr ""
-#~ "Les últimes versions de MySQL tenen un sistema de privilegis més "
+#~ "Les �ltimes versions de MySQL tenen un sistema de privilegis m�s "
#~ "elaborat. Per utilitzar-lo cal afegir nous camps a les taules de la base "
-#~ "de dades \"mysql\". Aquesta tasca la realitza la seqüència "
-#~ "mysql_fix_privilege_tables durant l'actualització independentment de si "
-#~ "el servidor s'està executant o no!"
+#~ "de dades \"mysql\". Aquesta tasca la realitza la seq��ncia "
+#~ "mysql_fix_privilege_tables durant l'actualitzaci� independentment de si "
+#~ "el servidor s'est� executant o no!"
#~ msgid ""
#~ "This script is not supposed to give any user more rights that he had "
#~ "before, if you encounter such a case, please contact me."
#~ msgstr ""
-#~ "Aquesta seqüència no assigna privilegis d'usuari diferents als que ja "
-#~ "tenia, en cas que us trobéssiu en aquesta situació, poseu-vos en contacte "
+#~ "Aquesta seq��ncia no assigna privilegis d'usuari diferents als que ja "
+#~ "tenia, en cas que us trob�ssiu en aquesta situaci�, poseu-vos en contacte "
#~ "amb mi."
#~ msgid ""
@@ -295,7 +295,7 @@ msgstr ""
#~ msgstr ""
#~ "Voleu suprimir tots els continguts de /var/lib/mysql quan es purgui el "
#~ "paquet mysql-server amb l'ordre \"dpkg --purge mysql-server\". (ex. "
-#~ "suprimir-ho tot inclòs la configuració) ? (per defecte no)"
+#~ "suprimir-ho tot incl�s la configuraci�) ? (per defecte no)"
#~ msgid "Make MySQL reachable via network?"
#~ msgstr "Voleu fer accessible el MySQL via xarxa ?"
@@ -305,7 +305,7 @@ msgstr ""
#~ "necessary for use on a single computer and could be a security problem."
#~ msgstr ""
#~ "Voleu que el MySQL escolti a un port TCP accessible des de la xarxa ? "
-#~ "Aquesta opció no és imprescindible en ordinadors aïllats i podria "
+#~ "Aquesta opci� no �s imprescindible en ordinadors a�llats i podria "
#~ "provocar un problema de seguretat."
#~ msgid "Enable chroot mode?"
@@ -318,6 +318,6 @@ msgstr ""
#~ "files."
#~ msgstr ""
#~ "El MySQL es pot executar en una entorn tancat al directori /var/lib/"
-#~ "mysql_jail perquè els usuaris no puguin modificar cap fitxer fora del "
-#~ "directori.Aquesta opció també augmenta la seguretat envers els crackers, "
+#~ "mysql_jail perqu� els usuaris no puguin modificar cap fitxer fora del "
+#~ "directori.Aquesta opci� tamb� augmenta la seguretat envers els crackers, "
#~ "jaque no poden modificar els fitxers del sistema."
diff --git a/debian/po/cs.po b/debian/po/cs.po
index a991150fafb..c0d66613637 100644
--- a/debian/po/cs.po
+++ b/debian/po/cs.po
@@ -14,7 +14,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-05-01 13:01+0200\n"
"Last-Translator: Miroslav Kure <kurem@debian.cz>\n"
@@ -26,13 +26,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -41,7 +41,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -49,20 +49,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Důležitá poznámka pro uživatele NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -70,7 +70,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -83,13 +83,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Odstranit všechny MariaDB databáze?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -99,7 +99,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -111,13 +111,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nové heslo MariaDB uživatele \"root\":"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -127,7 +127,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -135,7 +135,7 @@ msgstr "Ponecháte-li pole prázdné, heslo se nezmění."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -143,13 +143,13 @@ msgstr "Nové heslo MariaDB uživatele \"root\":"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Nelze nastavit heslo MariaDB uživatele \"root\""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -161,7 +161,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "You should check the account's password after tha package installation."
@@ -170,26 +170,26 @@ msgstr "Po instalaci balíku byste měli heslo ověřit."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Více informací naleznete v /usr/share/doc/mariadb-server-10.3/README.Debian."
+"Více informací naleznete v /usr/share/doc/mariadb-server-10.4/README.Debian."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
@@ -310,13 +310,13 @@ msgstr ""
#~ msgid ""
#~ "MySQL will only install if you have a non-numeric hostname that is "
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
-#~ "returns \"myhostname\" then there must be a line like \"10.3.0.1 "
+#~ "returns \"myhostname\" then there must be a line like \"10.4.0.1 "
#~ "myhostname\"."
#~ msgstr ""
#~ "MySQL se nainstaluje pouze v případě, že používáte nenumerické jméno "
#~ "poÄítaÄe, které se dá pÅ™eložit pÅ™es soubor /etc/hosts. NapÅ™. když příkaz "
#~ "\"hostname\" vrátí \"diamond\", tak v /etc/hosts musí existovat obdobný "
-#~ "řádek jako \"10.3.0.1 diamond\"."
+#~ "řádek jako \"10.4.0.1 diamond\"."
#~ msgid ""
#~ "A new mysql user \"debian-sys-maint\" will be created. This mysql account "
diff --git a/debian/po/da.po b/debian/po/da.po
index 7684daee58a..06d40cd58af 100644
--- a/debian/po/da.po
+++ b/debian/po/da.po
@@ -14,7 +14,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-4.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-05-30 22:41+0200\n"
"Last-Translator: Claus Hindsgaul <claus.hindsgaul@gmail.com>\n"
@@ -27,13 +27,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -42,7 +42,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -50,20 +50,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Vigtig oplysning til NIS/YP-brugere"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -71,7 +71,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -80,17 +80,17 @@ msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
msgstr ""
-"Du bør også tjekke filrettighederne og ejerskabet af mappen /var/lib/mysql:"
+"Du b�r ogs� tjekke filrettighederne og ejerskabet af mappen /var/lib/mysql:"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Fjern alle MariaDB-databaser?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -100,43 +100,43 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
"the data should be kept."
msgstr ""
"Hvis du fjerner MariaDB-pakken for senere at installere en nyere version, "
-"eller hvis en anden mariadb-server-pakke allerede benytter den, bør dataene "
+"eller hvis en anden mariadb-server-pakke allerede benytter den, b�r dataene "
"bevares."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Ny adgangskode for MariaDB's \"root\"-bruger:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
msgstr ""
-"Selvom det ikke kræves, anbefales det kraftigt, at du sætter en adgangskode "
+"Selvom det ikke kr�ves, anbefales det kraftigt, at du s�tter en adgangskode "
"for MariaDB's administrationsbruger \"root\"."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
-msgstr "Hvis du lader dette felt stå tomt, vil adgangskoden ikke blive ændret."
+msgstr "Hvis du lader dette felt st� tomt, vil adgangskoden ikke blive �ndret."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -144,61 +144,61 @@ msgstr "Ny adgangskode for MariaDB's \"root\"-bruger:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
-msgstr "Kunne ikke sætte adgangskoden for MariaDB's \"root\"-bruger"
+msgstr "Kunne ikke s�tte adgangskoden for MariaDB's \"root\"-bruger"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
"because of a communication problem with the MariaDB server."
msgstr ""
"Der opstod en fejl, da adgangskoden for MariaDB's administrationsbruger blev "
-"forsøgt ændret. Dette kan være sket, fordi brugeren allerede har en "
+"fors�gt �ndret. Dette kan v�re sket, fordi brugeren allerede har en "
"adgangskode, eller fordi der var problemer med at kommunikere med MariaDB-"
"serveren."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
-msgstr "Du bør tjekke kontoens adgangskode efter pakkeinstallationen."
+msgstr "Du b�r tjekke kontoens adgangskode efter pakkeinstallationen."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Se filen /usr/share/doc/mariadb-server-10.3/README.Debian for yderligere "
+"Se filen /usr/share/doc/mariadb-server-10.4/README.Debian for yderligere "
"oplysninger."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
#~ msgid "Really proceed with downgrade?"
-#~ msgstr "Ønsker du virkelig at fortsætte nedgraderingen?"
+#~ msgstr "�nsker du virkelig at forts�tte nedgraderingen?"
#~ msgid "A file named /var/lib/mysql/debian-*.flag exists on this system."
#~ msgstr ""
-#~ "Der er en fil med navnet /var/lib/mysql/debian-*.flag på dette system."
+#~ "Der er en fil med navnet /var/lib/mysql/debian-*.flag p� dette system."
#, fuzzy
#~| msgid ""
@@ -208,7 +208,7 @@ msgstr ""
#~ "Such a file is an indication that a mariadb-server package with a higher "
#~ "version has been installed previously."
#~ msgstr ""
-#~ "Sådan en fil tyder på at der tidligere har været installeret en højere "
+#~ "S�dan en fil tyder p� at der tidligere har v�ret installeret en h�jere "
#~ "version af mariadb-server-pakken."
#~ msgid ""
@@ -232,7 +232,7 @@ msgstr ""
#~ "To use MariaDB, the following entries for users and groups should be "
#~ "added to the system:"
#~ msgstr ""
-#~ "Nedenstående linjer for brugere og grupper skal tilføjes dette system for "
+#~ "Nedenst�ende linjer for brugere og grupper skal tilf�jes dette system for "
#~ "at benytte MariaDB:"
#~ msgid "Cannot upgrade if ISAM tables are present!"
@@ -246,18 +246,18 @@ msgstr ""
#~ "mysql-server-4.1 gets removed nevertheless just reinstall it to convert "
#~ "those tables."
#~ msgstr ""
-#~ "Nyere versioner af MySQL kan ikke længere benytte det gamle ISAM-"
-#~ "tabelformat, og det er derfor nødvendigt at konvertere dine tabeller til "
+#~ "Nyere versioner af MySQL kan ikke l�ngere benytte det gamle ISAM-"
+#~ "tabelformat, og det er derfor n�dvendigt at konvertere dine tabeller til "
#~ "f.eks. MyISAM forud for opgraderingen med \"mysql_convert_table_format\" "
#~ "eller \"ALTER TABLE x ENGINE=MyISAM\". Installationen af mysql-server-5.1 "
#~ "afbrydes nu. Skulle din gamle mysql-server-4.1 alligevel bliver "
-#~ "afinstalleret, så geninstallér den blot og konverter tabellerne."
+#~ "afinstalleret, s� geninstall�r den blot og konverter tabellerne."
#~ msgid ""
#~ "Support MySQL connections from hosts running Debian \"sarge\" or older?"
#~ msgstr ""
-#~ "Understøt MySQL-forbindelser fra maskiner, der kører Debian \"Sarge\" "
-#~ "eller ældre?"
+#~ "Underst�t MySQL-forbindelser fra maskiner, der k�rer Debian \"Sarge\" "
+#~ "eller �ldre?"
#~ msgid ""
#~ "In old versions of MySQL clients on Debian, passwords were not stored "
@@ -265,10 +265,10 @@ msgstr ""
#~ "PHP) from hosts running Debian 3.1 Sarge will not be able to connect to "
#~ "recent accounts or accounts whose password have been changed."
#~ msgstr ""
-#~ "Gamle udgaver af MySQL-klienter på Debian gemte ikke adgangskoderne "
+#~ "Gamle udgaver af MySQL-klienter p� Debian gemte ikke adgangskoderne "
#~ "sikkert. Dette er blevet forbedret siden da, men klienter (f.eks. PHP) "
-#~ "fra maskiner, der kører Debian 3.1 Sarge vil ikke kunne forbinde til "
-#~ "nyere konti eller konti, hvis adgangskode er blevet ændret."
+#~ "fra maskiner, der k�rer Debian 3.1 Sarge vil ikke kunne forbinde til "
+#~ "nyere konti eller konti, hvis adgangskode er blevet �ndret."
#~ msgid ""
#~ "To use mysql you must install an equivalent user and group to the "
@@ -276,7 +276,7 @@ msgstr ""
#~ "permissions (the uid/gid may be different)."
#~ msgstr ""
#~ "For at kunne bruge mysql skal du installere en bruger og en gruppe, der "
-#~ "svarer til nedenstående, og sikre dig at /var/lib/mysql har de rigtige "
+#~ "svarer til nedenst�ende, og sikre dig at /var/lib/mysql har de rigtige "
#~ "adgangsrettigheder (uid/gid kan afvige)."
#~ msgid ""
@@ -295,14 +295,14 @@ msgstr ""
#~ msgid ""
#~ "If you do not provide a password no changes will be made to the account."
-#~ msgstr "Hvis du ikke angiver en adgangskode, vil kontoen ikke blive ændret."
+#~ msgstr "Hvis du ikke angiver en adgangskode, vil kontoen ikke blive �ndret."
#~ msgid ""
#~ "When installation finishes, you should verify that the account is "
#~ "properly protected with a password (see README.Debian for more "
#~ "information)."
#~ msgstr ""
-#~ "Når installationen afsluttes, bør du tjekke at kontoen er ordentligt "
+#~ "N�r installationen afsluttes, b�r du tjekke at kontoen er ordentligt "
#~ "beskyttet med en adgangskode (se README.Debian for yderligere "
#~ "oplysninger)."
@@ -314,12 +314,12 @@ msgstr ""
#~ "corrupted! This script also enhances the privilege tables but is not "
#~ "supposed to give any user more rights that he had before,"
#~ msgstr ""
-#~ "Du skal køre \"mysql_upgrade\" efter opgraderingen, da tabellerne eller "
-#~ "kan blive ødelagt! Dette script forbedrer også rettighedstabellerne, men "
+#~ "Du skal k�re \"mysql_upgrade\" efter opgraderingen, da tabellerne eller "
+#~ "kan blive �delagt! Dette script forbedrer ogs� rettighedstabellerne, men "
#~ "burde ikke give nogen bruger flere rettigheder, end han havde tidligere,"
#~ msgid "Please also read http://www.mysql.com/doc/en/Upgrade.html"
-#~ msgstr "Læs også http://www.mysql.com/doc/en/Upgrade.html"
+#~ msgstr "L�s ogs� http://www.mysql.com/doc/en/Upgrade.html"
#~ msgid "Install Hints"
#~ msgstr "Installationstips"
@@ -331,17 +331,17 @@ msgstr ""
#~ msgstr ""
#~ "Ved opgraderinger fra MySQL 3.23, der fulgte med Debian Woody, kan de "
#~ "symbolske /var/lib/mysql or /var/log/mysql blive fjernet ved et uheld, og "
-#~ "må genskabes manuelt."
+#~ "m� genskabes manuelt."
#~ msgid ""
#~ "MySQL will only install if you have a non-numeric hostname that is "
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
-#~ "returns \"myhostname\" then there must be a line like \"10.3.0.1 "
+#~ "returns \"myhostname\" then there must be a line like \"10.4.0.1 "
#~ "myhostname\"."
#~ msgstr ""
-#~ "MySQL vil kun blive installeret, hvis du har et ikke-numerisk værtsnavn, "
-#~ "som kan slås op i filen /ets/hosts. Hvis f.eks. kommandoen \"hostname\" "
-#~ "svarer med \"mitvaertsnavn\", skal du have en linje a'la \"10.3.0.1 "
+#~ "MySQL vil kun blive installeret, hvis du har et ikke-numerisk v�rtsnavn, "
+#~ "som kan sl�s op i filen /ets/hosts. Hvis f.eks. kommandoen \"hostname\" "
+#~ "svarer med \"mitvaertsnavn\", skal du have en linje a'la \"10.4.0.1 "
#~ "mitvaertsnavn\" i /etc/hosts."
#~ msgid ""
@@ -356,8 +356,8 @@ msgstr ""
#~ "root/.my.cnf, always write the \"user\" and the \"password\" lines in "
#~ "there, never only the password!"
#~ msgstr ""
-#~ "Husk at sætte en ADGANGSKODE for MySQLs root-bruger! Hvis du bruger en /"
-#~ "etc/.my.cnf, så skriv altid \"user\"- og \"password\"-linjer ind her, "
+#~ "Husk at s�tte en ADGANGSKODE for MySQLs root-bruger! Hvis du bruger en /"
+#~ "etc/.my.cnf, s� skriv altid \"user\"- og \"password\"-linjer ind her, "
#~ "ikke kun adgangskoden!"
#~ msgid ""
@@ -365,7 +365,7 @@ msgstr ""
#~ "by all MySQL versions, not necessarily only the one you are about to "
#~ "purge?"
#~ msgstr ""
-#~ "Skal jeg fjerne hele mappetræet /var/lib/mysql, som benyttes af alle "
+#~ "Skal jeg fjerne hele mappetr�et /var/lib/mysql, som benyttes af alle "
#~ "MySQL-versioner, ikke kun den version, du er ved at slette?"
#~ msgid ""
@@ -373,7 +373,7 @@ msgstr ""
#~ "make use of it mysql_fix_privilege_tables must be executed manually. The "
#~ "script is not supposed to give any user more rights that he had before,"
#~ msgstr ""
-#~ "En sjælden gang imellem, f.eks. ved nye hovedversioner, sker det at "
-#~ "rettighedssystemet forbedres. For at gøre brug af dette, skal "
-#~ "mysql_fix_privilege_tables køres manuelt. Scriptet vil ikke give nogen "
+#~ "En sj�lden gang imellem, f.eks. ved nye hovedversioner, sker det at "
+#~ "rettighedssystemet forbedres. For at g�re brug af dette, skal "
+#~ "mysql_fix_privilege_tables k�res manuelt. Scriptet vil ikke give nogen "
#~ "bruger flere rettigheder, end vedkommende havde tidligere,"
diff --git a/debian/po/de.po b/debian/po/de.po
index 2e75376cc21..19e03ad88bd 100644
--- a/debian/po/de.po
+++ b/debian/po/de.po
@@ -15,7 +15,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1_5.1.37-1_de\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-08-27 22:41+0200\n"
"Last-Translator: Thomas Mueller <thomas.mueller@tmit.eu>\n"
@@ -29,13 +29,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -44,7 +44,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -52,20 +52,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Wichtige Anmerkung für NIS/YP-Benutzer!"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -75,7 +75,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -85,13 +85,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Alle MariaDB-Datenbanken entfernen?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -101,7 +101,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -113,13 +113,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Neues Passwort für den MariaDB »root«-Benutzer:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -129,25 +129,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr "Wenn dieses Feld freigelassen wird, wird das Passwort nicht geändert."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "Wiederholen Sie das Passwort für den MariaDB-»root«-Benutzer:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Konnte für den MariaDB-»root«-Benutzer kein Passwort setzen"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -160,7 +160,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
"Sie sollten das Passwort des administrativen Benutzers nach der "
@@ -168,13 +168,13 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mariadb-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
"Für weitere Informationen lesen Sie /usr/share/doc/mariadb-server-5.1/README."
@@ -182,13 +182,13 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "Passwort-Eingabefehler"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
"Die beiden von Ihnen eingegebenen Passwörter sind nicht identisch. Bitte "
diff --git a/debian/po/es.po b/debian/po/es.po
index 927bf904be7..825cda108ab 100644
--- a/debian/po/es.po
+++ b/debian/po/es.po
@@ -40,7 +40,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1_5.0.24-3\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-05-28 22:21+0200\n"
"Last-Translator: Javier Fernández-Sanguino <jfs@debian.org>\n"
@@ -52,13 +52,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -67,7 +67,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -75,20 +75,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Nota importante para los usuarios de NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -96,7 +96,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -110,13 +110,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "¿Desea eliminar todas las bases de datos MariaDB?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -126,7 +126,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -138,13 +138,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nueva contraseña para el usuario «root» de MariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -154,7 +154,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -162,7 +162,7 @@ msgstr "No se modificará la contraseña si deja el espacio en blanco."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -170,13 +170,13 @@ msgstr "Nueva contraseña para el usuario «root» de MariaDB:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "No se pudo fijar la contraseña para el usuario «root» de MariaDB"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -189,7 +189,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
"Debería comprobar la contraseña de la cuenta después de la instalación del "
@@ -197,27 +197,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Consulte /usr/share/doc/mariadb-server-10.3/README.Debian para más "
+"Consulte /usr/share/doc/mariadb-server-10.4/README.Debian para más "
"información."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
@@ -354,13 +354,13 @@ msgstr ""
#~ msgid ""
#~ "MySQL will only install if you have a non-numeric hostname that is "
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
-#~ "returns \"myhostname\" then there must be a line like \"10.3.0.1 "
+#~ "returns \"myhostname\" then there must be a line like \"10.4.0.1 "
#~ "myhostname\"."
#~ msgstr ""
#~ "Sólo se instalará MySQL si tiene un nombre de equipo que no sea una "
#~ "dirección IP y pueda resolverse a través del archivo /etc/hosts. Por "
#~ "ejemplo, si la orden «hostname» devuelve «MiNombreEquipo» entonces deberá "
-#~ "existir una línea «10.3.0.1 MiNombreEquipo» en dicho archivo."
+#~ "existir una línea «10.4.0.1 MiNombreEquipo» en dicho archivo."
#~ msgid ""
#~ "A new mysql user \"debian-sys-maint\" will be created. This mysql account "
diff --git a/debian/po/eu.po b/debian/po/eu.po
index 91b7f1e341d..4c22d656c79 100644
--- a/debian/po/eu.po
+++ b/debian/po/eu.po
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: eu\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-07-29 11:59+0200\n"
"Last-Translator: Piarres Beobide <pi@beobide.net>\n"
@@ -20,13 +20,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -35,7 +35,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -43,20 +43,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "NIS/YP erabiltzaileentzat ohar garrantzitsua"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -64,7 +64,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -78,13 +78,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Ezabatu MariaDB datubase guztiak?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -93,7 +93,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -105,13 +105,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "MariaDB \"root\" erabiltzailearen pasahitz berria:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -121,7 +121,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -129,19 +129,19 @@ msgstr "Eremua hau zurian utziaz gero ez da pasahitza aldatuko."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "Errepikatu MariaDB \"root\" erabiltzailearen pasahitza:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Ezin da MariaDB \"root\" erabiltzailearen pasahitza ezarri"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -153,34 +153,34 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
"Kontuaren pasahitza egiaztatu beharko zenuke paketea instalatu aurretik."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
-#| "Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+#| "Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Mesedez irakurri /usr/share/doc/mariadb-server-10.3/README.Debian fitxategia "
+"Mesedez irakurri /usr/share/doc/mariadb-server-10.4/README.Debian fitxategia "
"xehetasun gehiagorako."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "Pasahitz sarrera errorea"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr "Idatzi dituzun bi pasahitzak ez dira berdina. Mesedez saiatu berriz."
diff --git a/debian/po/fr.po b/debian/po/fr.po
index 192d7de496e..46ae1e7ffbc 100644
--- a/debian/po/fr.po
+++ b/debian/po/fr.po
@@ -7,7 +7,7 @@
msgid ""
msgstr ""
"Project-Id-Version: fr\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-08-08 14:56+0200\n"
"Last-Translator: Christian Perrier <bubulle@debian.org>\n"
@@ -22,13 +22,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -37,7 +37,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -45,20 +45,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Note importante pour les utilisateurs NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -68,7 +68,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -78,13 +78,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Faut-il supprimer toutes les bases de données MariaDB ?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -94,7 +94,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -105,13 +105,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nouveau mot de passe du superutilisateur de MariaDB :"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -121,26 +121,26 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr "Si ce champ est laissé vide, le mot de passe ne sera pas changé."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "Confirmation du mot de passe du superutilisateur de MariaDB :"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr ""
"Impossible de changer le mot de passe de l'utilisateur « root » de MariaDB"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -152,7 +152,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
"Vous devriez vérifier le mot de passe de ce compte après l'installation du "
@@ -160,27 +160,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
-#| "Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+#| "Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Veuillez consulter le fichier /usr/share/doc/mysql-server-10.3/README.Debian "
+"Veuillez consulter le fichier /usr/share/doc/mysql-server-10.4/README.Debian "
"pour plus d'informations."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "Erreur de saisie du mot de passe"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
"Le mot de passe et sa confirmation ne sont pas identiques. Veuillez "
diff --git a/debian/po/gl.po b/debian/po/gl.po
index cc17ee481a8..948cd8fd839 100644
--- a/debian/po/gl.po
+++ b/debian/po/gl.po
@@ -5,7 +5,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-04-20 09:44+0200\n"
"Last-Translator: Jacobo Tarrio <jtarrio@debian.org>\n"
@@ -17,13 +17,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -32,7 +32,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -40,20 +40,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Nota importante para os usuarios de NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -61,7 +61,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -75,13 +75,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "¿Eliminar tódalas bases de datos de MariaDB?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -91,7 +91,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -103,13 +103,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Novo contrasinal para o usuario \"root\" de MariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -119,7 +119,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -127,7 +127,7 @@ msgstr "Se deixa o campo en branco, non se ha cambiar o contrasinal."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -135,13 +135,13 @@ msgstr "Novo contrasinal para o usuario \"root\" de MariaDB:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Non se puido establecer o contrasinal do usuario \"root\" de MariaDB"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -153,7 +153,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "You should check the account's password after tha package installation."
@@ -162,27 +162,27 @@ msgstr "Debería comprobar o contrasinal da conta trala instalación do paquete.
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Consulte o ficheiro /usr/share/doc/mariadb-server-10.3/README.Debian para "
+"Consulte o ficheiro /usr/share/doc/mariadb-server-10.4/README.Debian para "
"máis información."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/it.po b/debian/po/it.po
index d1761b879e4..336bfe3fa43 100644
--- a/debian/po/it.po
+++ b/debian/po/it.po
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1 5.1.37 italian debconf templates\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-08-08 11:03+0200\n"
"Last-Translator: Luca Monducci <luca.mo@tiscali.it>\n"
@@ -18,13 +18,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -33,7 +33,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -41,20 +41,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Nota importante per gli utenti NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -64,7 +64,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -74,13 +74,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Eliminare tutti i database MariaDB?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -90,7 +90,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -102,13 +102,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nuova password per l'utente «root» di MariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -118,25 +118,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr "Se questo campo è lasciato vuoto, la password non viene cambiata."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "Ripetere la password per l'utente «root» di MariaDB:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Impossibile impostare la password per l'utente «root» di MariaDB"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -149,34 +149,34 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
"Al termine dell'installazione si deve verificare la password dell'account."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
-#| "Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+#| "Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
"Per maggiori informazioni si consulti il file /usr/share/doc/mariadb-"
-"server-10.3/README.Debian."
+"server-10.4/README.Debian."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "Errore di inserimento della password"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr "Le due password inserite sono diverse. Riprovare."
diff --git a/debian/po/ja.po b/debian/po/ja.po
index 8ca7993a140..ba7bb52763c 100644
--- a/debian/po/ja.po
+++ b/debian/po/ja.po
@@ -15,7 +15,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1 5.1.37-1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-09-01 08:25+0900\n"
"Last-Translator: Hideki Yamane (Debian-JP) <henrich@debian.or.jp>\n"
@@ -27,13 +27,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -42,7 +42,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -50,20 +50,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "NIS/YP ユーザã¸ã®é‡è¦ãªæ³¨æ„"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -73,7 +73,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -81,13 +81,13 @@ msgstr "/var/lib/mysql ã®æ‰€æœ‰è€…権é™ã‚’ãƒã‚§ãƒƒã‚¯ã™ã‚‹å¿…è¦ã‚‚ã‚ã‚Šã¾
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "ã™ã¹ã¦ã® MariaDB データベースを削除ã—ã¾ã™ã‹?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -97,7 +97,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -109,13 +109,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "MariaDB ã® \"root\" ユーザã«å¯¾ã™ã‚‹æ–°ã—ã„パスワード:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -125,25 +125,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr "ã“ã®å€¤ã‚’空ã®ã¾ã¾ã«ã—ã¦ãŠã„ãŸå ´åˆã¯ã€ãƒ‘スワードã¯å¤‰æ›´ã•ã‚Œã¾ã›ã‚“。"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "MariaDB ã® \"root\" ユーザã«å¯¾ã™ã‚‹æ–°ã—ã„パスワード:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "MariaDB ã® \"root\" ユーザã®ãƒ‘スワードを設定ã§ãã¾ã›ã‚“"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -155,33 +155,33 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
"パッケージã®ã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«å¾Œã€ã‚¢ã‚«ã‚¦ãƒ³ãƒˆã®ãƒ‘スワードを確èªã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
-#| "Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+#| "Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"詳細㯠/usr/share/doc/mariadb-server-10.3/README.Debian ã‚’å‚ç…§ã—ã¦ãã ã•ã„。"
+"詳細㯠/usr/share/doc/mariadb-server-10.4/README.Debian ã‚’å‚ç…§ã—ã¦ãã ã•ã„。"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "パスワード入力エラー"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr "入力ã•ã‚ŒãŸäºŒã¤ã®ãƒ‘スワードãŒä¸€è‡´ã—ã¾ã›ã‚“。å†å…¥åŠ›ã—ã¦ãã ã•ã„。"
diff --git a/debian/po/nb.po b/debian/po/nb.po
index 06acfd8c25c..4a3e7514513 100644
--- a/debian/po/nb.po
+++ b/debian/po/nb.po
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql_nb\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-02-18 12:13+0100\n"
"Last-Translator: Bjørn Steensrud <bjornst@powertech.no>\n"
@@ -19,13 +19,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -34,7 +34,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -42,14 +42,14 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid "Important note for NIS/YP users!"
msgid "Important note for NIS/YP users"
@@ -57,7 +57,7 @@ msgstr "Viktig merknad for NIS/YP-brukere!"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -65,7 +65,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -73,13 +73,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -87,7 +87,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
#, fuzzy
#| msgid ""
#| "The script is about to remove the data directory /var/lib/mysql. If it is "
@@ -104,7 +104,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "New password for MySQL \"root\" user:"
msgid "New password for the MariaDB \"root\" user:"
@@ -112,7 +112,7 @@ msgstr "Nytt passord for MariaDBs «root»-bruker:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid ""
#| "It is highly recommended that you set a password for the MySQL "
@@ -126,13 +126,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -140,7 +140,7 @@ msgstr "Nytt passord for MariaDBs «root»-bruker:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid "Unable to set password for MySQL \"root\" user"
msgid "Unable to set password for the MariaDB \"root\" user"
@@ -148,7 +148,7 @@ msgstr "Klarer ikke angi passord for MariaDBs «root»-bruker"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "It seems an error occurred while setting the password for the MySQL "
@@ -167,27 +167,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/nl.po b/debian/po/nl.po
index 65112a9f0a0..8dea3504592 100644
--- a/debian/po/nl.po
+++ b/debian/po/nl.po
@@ -6,9 +6,9 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1 5.0.30-1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
-"PO-Revision-Date: 2006-02-19 10.30+0100\n"
+"PO-Revision-Date: 2006-02-19 10.40+0100\n"
"Last-Translator: Thijs Kinkhorst <thijs@debian.org>\n"
"Language-Team: Debian-Dutch <debian-l10n-dutch@lists.debian.org>\n"
"Language: \n"
@@ -18,13 +18,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -33,7 +33,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -41,14 +41,14 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid "Important note for NIS/YP users!"
msgid "Important note for NIS/YP users"
@@ -56,7 +56,7 @@ msgstr "Belangrijke opmerking voor gebruikers van NIS/YP!"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -64,7 +64,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -72,13 +72,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -86,7 +86,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
#, fuzzy
#| msgid ""
#| "The script is about to remove the data directory /var/lib/mysql. If it is "
@@ -104,7 +104,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "New password for MySQL \"root\" user:"
msgid "New password for the MariaDB \"root\" user:"
@@ -112,7 +112,7 @@ msgstr "Nieuw wachtwoord voor de MariaDB \"root\"-gebruiker:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid ""
#| "It is highly recommended that you set a password for the MySQL "
@@ -126,13 +126,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -140,7 +140,7 @@ msgstr "Nieuw wachtwoord voor de MariaDB \"root\"-gebruiker:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid "Unable to set password for MySQL \"root\" user"
msgid "Unable to set password for the MariaDB \"root\" user"
@@ -148,7 +148,7 @@ msgstr "Kan het wachtwoord voor de MariaDB \"root\"-gebruiker niet instellen"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "It seems an error occurred while setting the password for the MySQL "
@@ -167,27 +167,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/pt.po b/debian/po/pt.po
index 106802ee87b..2d1834deda3 100644
--- a/debian/po/pt.po
+++ b/debian/po/pt.po
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-05-05 21:01+0100\n"
"Last-Translator: Miguel Figueiredo <elmig@debianpt.org>\n"
@@ -18,13 +18,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -33,7 +33,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -41,20 +41,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Nota importante para utilizadores de NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -62,7 +62,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -75,13 +75,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Remover todas as bases de dados MariaDB?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -91,7 +91,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -103,13 +103,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nova palavra-passe para o utilizador \"root\" do MariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -119,7 +119,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -128,7 +128,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -136,7 +136,7 @@ msgstr "Nova palavra-passe para o utilizador \"root\" do MariaDB:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr ""
"Não foi possível definir a palavra-passe para o utilizador \"root\" do "
@@ -144,7 +144,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -157,7 +157,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "You should check the account's password after tha package installation."
@@ -167,27 +167,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
"Para mais informação por favor leia o ficheiro /usr/share/doc/mariadb-"
-"server-10.3/README.Debian."
+"server-10.4/README.Debian."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/pt_BR.po b/debian/po/pt_BR.po
index cf072b0cc7a..31c9239f217 100644
--- a/debian/po/pt_BR.po
+++ b/debian/po/pt_BR.po
@@ -8,7 +8,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2007-04-21 15:59-0300\n"
"Last-Translator: André Luís Lopes <andrelop@debian.org>\n"
@@ -21,13 +21,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -36,7 +36,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -44,20 +44,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Aviso importante para usuários NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -65,7 +65,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid ""
#| "You should also check the permissions and the owner of the /var/lib/mysql "
@@ -78,13 +78,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Remover todas as bases de dados do MariaDB?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -94,7 +94,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -106,13 +106,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nova senha para o usuário \"root\" do MariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -122,7 +122,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "If that field is left blank, the password will not be changed."
msgid "If this field is left blank, the password will not be changed."
@@ -130,7 +130,7 @@ msgstr "Caso este campo seja deixado em branco, a senha não sera mudada."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for the MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -138,13 +138,13 @@ msgstr "Nova senha para o usuário \"root\" do MariaDB:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Impossível definir senha para o usuário \"root\" do MariaDB"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -157,7 +157,7 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "You should check the account's password after tha package installation."
@@ -166,27 +166,27 @@ msgstr "Você deverá checar a senha dessa conta após a instalação deste paco
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "Please read the /usr/share/doc/mysql-server-5.1/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Por favor, leia o arquivo /usr/share/doc/mariadb-server-10.3/README.Debian "
+"Por favor, leia o arquivo /usr/share/doc/mariadb-server-10.4/README.Debian "
"para maiores informações."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
@@ -322,13 +322,13 @@ msgstr ""
#~ msgid ""
#~ "MySQL will only install if you have a non-numeric hostname that is "
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
-#~ "returns \"myhostname\" then there must be a line like \"10.3.0.1 "
+#~ "returns \"myhostname\" then there must be a line like \"10.4.0.1 "
#~ "myhostname\"."
#~ msgstr ""
#~ "O MySQL será instalado somente caso você possua um nome de host NÃO "
#~ "NUMÉRICO que possa ser resolvido através do arquivo /etc/hosts, ou seja, "
#~ "caso o comando \"hostname\" retorne \"myhostname\", uma linha como "
-#~ "\"10.3.0.1 myhostname\" deverá existir no arquivo /etc/hosts."
+#~ "\"10.4.0.1 myhostname\" deverá existir no arquivo /etc/hosts."
#~ msgid ""
#~ "A new mysql user \"debian-sys-maint\" will be created. This mysql account "
diff --git a/debian/po/ro.po b/debian/po/ro.po
index ad2516da21e..328573c0357 100644
--- a/debian/po/ro.po
+++ b/debian/po/ro.po
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: po-debconf://mysql-dfsg\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2006-12-20 21:27+0200\n"
"Last-Translator: stan ioan-eugen <stan.ieugen@gmail.com>\n"
@@ -19,13 +19,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -34,7 +34,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -42,14 +42,14 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid "Important note for NIS/YP users!"
msgid "Important note for NIS/YP users"
@@ -57,7 +57,7 @@ msgstr "Notă importantă pentru utilizatorii NIS/YP!"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -65,7 +65,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -73,13 +73,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -87,7 +87,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
#, fuzzy
#| msgid ""
#| "The script is about to remove the data directory /var/lib/mysql. If it is "
@@ -104,7 +104,7 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid "New password for MySQL \"root\" user:"
msgid "New password for the MariaDB \"root\" user:"
@@ -112,7 +112,7 @@ msgstr "Noua parolă pentru utilizatorul „root†al MariaDB:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
#, fuzzy
#| msgid ""
#| "It is highly recommended that you set a password for the MySQL "
@@ -126,13 +126,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
#, fuzzy
#| msgid "New password for MySQL \"root\" user:"
msgid "Repeat password for the MariaDB \"root\" user:"
@@ -140,7 +140,7 @@ msgstr "Noua parolă pentru utilizatorul „root†al MariaDB:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid "Unable to set password for MySQL \"root\" user"
msgid "Unable to set password for the MariaDB \"root\" user"
@@ -148,7 +148,7 @@ msgstr "Nu s-a putut stabili parola pentru utilizatorul „root†al MariaDB"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
#| "It seems an error occurred while setting the password for the MySQL "
@@ -167,27 +167,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/ru.po b/debian/po/ru.po
index 2c0bc7fee1f..baa70378169 100644
--- a/debian/po/ru.po
+++ b/debian/po/ru.po
@@ -17,7 +17,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1 5.1.37-1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-08-06 20:27+0400\n"
"Last-Translator: Yuri Kozlov <yuray@komyakino.ru>\n"
@@ -32,13 +32,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -47,7 +47,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -55,20 +55,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Важное замечание Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÐµÐ¹ NIS/YP"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -78,7 +78,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -86,13 +86,13 @@ msgstr "Также проверьте права доÑтупа и владелÑ
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Удалить вÑе базы данных MariaDB?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -101,7 +101,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -113,13 +113,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Ðовый пароль Ð´Ð»Ñ MariaDB Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ \"root\":"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -129,25 +129,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr "ЕÑли оÑтавить поле пуÑтым, то пароль изменён не будет."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "Повторите ввод Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð´Ð»Ñ MariaDB Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ \"root\":"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Ðевозможно задать пароль MariaDB пользователю \"root\""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -159,32 +159,32 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr "Проверьте пароль учётной запиÑи поÑле уÑтановки пакета."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
-#| "Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+#| "Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"ПодробноÑти Ñм. в файле /usr/share/doc/mariadb-server-10.3/README.Debian."
+"ПодробноÑти Ñм. в файле /usr/share/doc/mariadb-server-10.4/README.Debian."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "Ошибка ввода паролÑ"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr "Два введённых Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð½Ðµ одинаковы. Повторите ввод."
diff --git a/debian/po/sv.po b/debian/po/sv.po
index ce830bd2319..e1bfe1f6d1b 100644
--- a/debian/po/sv.po
+++ b/debian/po/sv.po
@@ -7,7 +7,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-5.1 5.0.21-3\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2009-09-08 21:42+0100\n"
"Last-Translator: Martin Bagge <brother@bsnet.se>\n"
@@ -21,13 +21,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -36,7 +36,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -44,20 +44,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr "Viktig information för NIS/YP-användare"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -67,7 +67,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -77,13 +77,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr "Ta bort alla MariaDB-databaser?"
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -93,7 +93,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -105,13 +105,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr "Nytt lösenord för MariaDBs \"root\"-användare:"
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -121,25 +121,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr "Om detta fält lämnas tom kommer lösenordet inte att ändras."
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr "Repetera lösenordet för MariaDBs \"root\"-användare:"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr "Kunde inte sätta lösenord för MariaDBs \"root\"-användare"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -152,33 +152,33 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr "Du bör kontrollera kontots lösenord efter installationen av paketet."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
#, fuzzy
#| msgid ""
-#| "Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+#| "Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
#| "more information."
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
-"Läs filen /usr/share/doc/mariadb-server-10.3/README.Debian för mer "
+"Läs filen /usr/share/doc/mariadb-server-10.4/README.Debian för mer "
"information."
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr "Fel vid inmatning av lösenord"
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr "De två lösenorden du angav stämde inte överrens. Prova igen."
diff --git a/debian/po/templates.pot b/debian/po/templates.pot
index b0e3ee82576..46a4487480e 100644
--- a/debian/po/templates.pot
+++ b/debian/po/templates.pot
@@ -6,8 +6,8 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: mariadb-10.3\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Project-Id-Version: mariadb-10.4\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
@@ -19,13 +19,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -34,7 +34,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -42,20 +42,20 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid "Important note for NIS/YP users"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -63,7 +63,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -71,13 +71,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -85,7 +85,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -94,13 +94,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -108,25 +108,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -135,26 +135,26 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
diff --git a/debian/po/tr.po b/debian/po/tr.po
index ab8542d376f..6695bdd8a96 100644
--- a/debian/po/tr.po
+++ b/debian/po/tr.po
@@ -5,7 +5,7 @@
msgid ""
msgstr ""
"Project-Id-Version: mysql-dfsg-4.1\n"
-"Report-Msgid-Bugs-To: mariadb-10.3@packages.debian.org\n"
+"Report-Msgid-Bugs-To: mariadb-10.4@packages.debian.org\n"
"POT-Creation-Date: 2016-10-08 01:26+0300\n"
"PO-Revision-Date: 2004-06-05 08:53+0300\n"
"Last-Translator: Gürkan Aslan <gurkan@iaslan.com>\n"
@@ -18,13 +18,13 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid "The old data directory will be saved at new location"
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"A file named /var/lib/mysql/debian-*.flag exists on this system. The number "
"indicates a database binary format version that cannot automatically be "
@@ -33,7 +33,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Therefore the previous data directory will be renamed to /var/lib/mysql-* "
"and a new data directory will be initialized at /var/lib/mysql."
@@ -41,14 +41,14 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:2001
+#: ../mariadb-server-10.4.templates:2001
msgid ""
"Please manually export/import your data (e.g. with mysqldump) if needed."
msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
#, fuzzy
#| msgid "Important note for NIS/YP users!"
msgid "Important note for NIS/YP users"
@@ -56,7 +56,7 @@ msgstr "NIS/YP kullanıcıları için önemli not!"
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"Using MariaDB under NIS/YP requires a mysql user account to be added on the "
"local system with:"
@@ -64,7 +64,7 @@ msgstr ""
#. Type: note
#. Description
-#: ../mariadb-server-10.3.templates:3001
+#: ../mariadb-server-10.4.templates:3001
msgid ""
"You should also check the permissions and ownership of the /var/lib/mysql "
"directory:"
@@ -72,13 +72,13 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid "Remove all MariaDB databases?"
msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"The /var/lib/mysql directory which contains the MariaDB databases is about "
"to be removed."
@@ -86,7 +86,7 @@ msgstr ""
#. Type: boolean
#. Description
-#: ../mariadb-server-10.3.templates:4001
+#: ../mariadb-server-10.4.templates:4001
msgid ""
"If you're removing the MariaDB package in order to later install a more "
"recent version or if a different mariadb-server package is already using it, "
@@ -95,13 +95,13 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "New password for the MariaDB \"root\" user:"
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid ""
"While not mandatory, it is highly recommended that you set a password for "
"the MariaDB administrative \"root\" user."
@@ -109,25 +109,25 @@ msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:5001
+#: ../mariadb-server-10.4.templates:5001
msgid "If this field is left blank, the password will not be changed."
msgstr ""
#. Type: password
#. Description
-#: ../mariadb-server-10.3.templates:6001
+#: ../mariadb-server-10.4.templates:6001
msgid "Repeat password for the MariaDB \"root\" user:"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "Unable to set password for the MariaDB \"root\" user"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
"An error occurred while setting the password for the MariaDB administrative "
"user. This may have happened because the account already has a password, or "
@@ -136,27 +136,27 @@ msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid "You should check the account's password after the package installation."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:7001
+#: ../mariadb-server-10.4.templates:7001
msgid ""
-"Please read the /usr/share/doc/mariadb-server-10.3/README.Debian file for "
+"Please read the /usr/share/doc/mariadb-server-10.4/README.Debian file for "
"more information."
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "Password input error"
msgstr ""
#. Type: error
#. Description
-#: ../mariadb-server-10.3.templates:8001
+#: ../mariadb-server-10.4.templates:8001
msgid "The two passwords you entered were not the same. Please try again."
msgstr ""
@@ -202,12 +202,12 @@ msgstr ""
#~ msgid ""
#~ "MySQL will only install if you have a non-numeric hostname that is "
#~ "resolvable via the /etc/hosts file. E.g. if the \"hostname\" command "
-#~ "returns \"myhostname\" then there must be a line like \"10.3.0.1 "
+#~ "returns \"myhostname\" then there must be a line like \"10.4.0.1 "
#~ "myhostname\"."
#~ msgstr ""
#~ "MySQL sadece /etc/hosts dosyası yoluyla çözülebilir NUMERİK OLMAYAN bir "
#~ "makine adına sahipseniz kurulacaktır. Örneğin, eğer \"hostname\" komutu "
-#~ "\"makinem\" ismini döndürüyorsa, bu dosya içinde \"10.3.0.1 makinem\" "
+#~ "\"makinem\" ismini döndürüyorsa, bu dosya içinde \"10.4.0.1 makinem\" "
#~ "gibi bir satır olmalıdır."
#, fuzzy
diff --git a/debian/rules b/debian/rules
index 93bfe01e55e..13d1343671a 100755
--- a/debian/rules
+++ b/debian/rules
@@ -116,8 +116,8 @@ override_dh_auto_install:
ifneq (,$(filter linux,$(DEB_HOST_ARCH_OS)))
# Copy systemd files to a location available for dh_installinit
- cp $(BUILDDIR)/support-files/mariadb.service debian/mariadb-server-10.3.mariadb.service
- cp $(BUILDDIR)/support-files/mariadb@.service debian/mariadb-server-10.3.mariadb@.service
+ cp $(BUILDDIR)/support-files/mariadb.service debian/mariadb-server-10.4.mariadb.service
+ cp $(BUILDDIR)/support-files/mariadb@.service debian/mariadb-server-10.4.mariadb@.service
endif
# make install
@@ -130,12 +130,12 @@ endif
# nm numeric soft is not enough, therefore extra sort in command
# to satisfy Debian reproducible build requirements
- nm --defined-only $(BUILDDIR)/sql/mysqld | LC_ALL=C sort | gzip -n -9 > $(TMP)/usr/share/doc/mariadb-server-10.3/mysqld.sym.gz
+ nm --defined-only $(BUILDDIR)/sql/mysqld | LC_ALL=C sort | gzip -n -9 > $(TMP)/usr/share/doc/mariadb-server-10.4/mysqld.sym.gz
# rename and install AppArmor profile
install -D -m 644 debian/apparmor-profile $(TMP)/etc/apparmor.d/usr.sbin.mysqld
# install Apport hook
- install -D -m 644 debian/mariadb-server-10.3.py $(TMP)/usr/share/apport/package-hooks/source_mariadb-10.3.py
+ install -D -m 644 debian/mariadb-server-10.4.py $(TMP)/usr/share/apport/package-hooks/source_mariadb-10.4.py
# Install libmariadbclient18 compatibility links
ln -s libmariadb.so.3 $(TMP)/usr/lib/$(DEB_HOST_MULTIARCH)/libmariadbclient.so
diff --git a/extra/CMakeLists.txt b/extra/CMakeLists.txt
index 05b89290a6d..cd099dedf63 100644
--- a/extra/CMakeLists.txt
+++ b/extra/CMakeLists.txt
@@ -72,9 +72,6 @@ IF(WITH_INNOBASE_STORAGE_ENGINE)
# We use the InnoDB code directly in case the code changes.
ADD_DEFINITIONS("-DUNIV_INNOCHECKSUM")
- IF(WITH_INNODB_BUG_ENDIAN_CRC32)
- ADD_DEFINITIONS(-DINNODB_BUG_ENDIAN_CRC32)
- ENDIF()
# Avoid generating Hardware Capabilities due to crc32 instructions
IF(CMAKE_SYSTEM_NAME MATCHES "SunOS" AND CMAKE_SYSTEM_PROCESSOR MATCHES "i386")
diff --git a/extra/aws_sdk/CMakeLists.txt b/extra/aws_sdk/CMakeLists.txt
new file mode 100644
index 00000000000..85a196dccce
--- /dev/null
+++ b/extra/aws_sdk/CMakeLists.txt
@@ -0,0 +1,74 @@
+OPTION(AWS_SDK_EXTERNAL_PROJECT "Allow to download and build AWS C++ SDK" OFF)
+INCLUDE(aws_sdk)
+INCLUDE(ExternalProject)
+
+GET_PROPERTY(SDK_LIBS GLOBAL PROPERTY AWS_SDK_LIBS)
+LIST(LENGTH SDK_LIBS SDK_LIBS_COUNT)
+IF(SDK_LIBS_COUNT EQUAL 0)
+ RETURN()
+ENDIF()
+
+CHECK_AWS_SDK(RETVAL REASON)
+IF(NOT RETVAL)
+ MESSAGE(FATAL_ERROR
+ "AWS C++ will not be built (${REASON}), but dependency on following components is found ${SDK_LIBS}.
+ Use CHECK_AWS_SDK() function before trying to build with SDK components")
+ENDIF()
+
+
+SET(byproducts)
+
+FOREACH(lib ${SDK_LIBS} core)
+ SET(lib aws-cpp-sdk-${lib})
+ ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL)
+ ADD_DEPENDENCIES(${lib} aws_sdk_cpp)
+
+ SET (loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}")
+ IF(CMAKE_VERSION VERSION_GREATER "3.1")
+ SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc})
+ ENDIF()
+ SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc})
+ENDFOREACH()
+
+# To be compatible with older cmake, we use older version of the SDK
+IF(CMAKE_VERSION LESS "3.0")
+ SET(GIT_TAG "1.0.8")
+ELSE()
+ SET(GIT_TAG "1.2.11")
+ENDIF()
+
+IF(MSVC_CRT_TYPE MATCHES "/MD")
+ SET(FORCE_SHARED_CRT ON)
+ELSE()
+ SET(FORCE_SHARED_CRT OFF)
+ENDIF()
+
+LIST(REMOVE_DUPLICATES SDK_LIBS)
+STRING( REPLACE ";" "!" SDK_LIBS_STR "${SDK_LIBS}")
+#MESSAGE("SDK_LIBS_STR=${SDK_LIBS_STR}")
+
+ExternalProject_Add(
+ aws_sdk_cpp
+ GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git"
+ GIT_TAG ${GIT_TAG}
+ UPDATE_COMMAND ""
+ SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp"
+ LIST_SEPARATOR !
+ ${byproducts}
+ CMAKE_ARGS
+ -DBUILD_ONLY=${SDK_LIBS_STR}
+ -DBUILD_SHARED_LIBS=OFF
+ -DFORCE_SHARED_CRT=${FORCE_SHARED_CRT}
+ -DENABLE_TESTING=OFF
+ "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} ${PIC_FLAG}"
+ "-DCMAKE_CXX_FLAGS_RELWITHDEBINFO=${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}"
+ "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}"
+ "-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}"
+ "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
+ "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
+ ${EXTRA_SDK_CMAKE_FLAGS}
+ -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp
+ -DCMAKE_INSTALL_LIBDIR=lib
+ TEST_COMMAND ""
+)
+SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE)
diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc
index 164a7727d68..77b1767c426 100644
--- a/extra/innochecksum.cc
+++ b/extra/innochecksum.cc
@@ -42,42 +42,18 @@
/* Only parts of these files are included from the InnoDB codebase.
The parts not included are excluded by #ifndef UNIV_INNOCHECKSUM. */
-typedef void fil_space_t;
-
-#include "page0size.h"
-
-#define FLST_BASE_NODE_SIZE (4 + 2 * FIL_ADDR_SIZE)
-#define FLST_NODE_SIZE (2 * FIL_ADDR_SIZE)
-#define FSEG_PAGE_DATA FIL_PAGE_DATA
-#define FSEG_HEADER_SIZE 10
-#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8)
-
-#include "ut0ut.h"
-#include "ut0byte.h"
-#include "mtr0types.h"
#include "mach0data.h"
-#include "fsp0types.h"
-#include "rem0rec.h"
+#include "page0page.h"
#include "buf0checksum.h" /* buf_calc_page_*() */
#include "buf0buf.h" /* buf_page_is_corrupted */
-#include "fil0fil.h" /* FIL_* */
-#include "page0page.h" /* PAGE_* */
#include "page0zip.h" /* page_zip_*() */
#include "trx0undo.h" /* TRX_* */
-#include "fsp0fsp.h" /* fsp_flags_get_page_size() &
- fsp_flags_get_zip_size() */
#include "ut0crc32.h" /* ut_crc32_init() */
#include "fsp0pagecompress.h" /* fil_get_compression_alg_name */
#include "fil0crypt.h" /* fil_space_verify_crypt_checksum */
#include <string.h>
-#ifdef UNIV_NONINL
-# include "fsp0fsp.ic"
-# include "mach0data.ic"
-# include "ut0rnd.ic"
-#endif
-
#ifndef PRIuMAX
#define PRIuMAX "llu"
#endif
@@ -95,10 +71,8 @@ static my_bool per_page_details;
static ulint n_merge;
extern ulong srv_checksum_algorithm;
static ulint physical_page_size; /* Page size in bytes on disk. */
-static ulint logical_page_size; /* Page size when uncompressed. */
ulong srv_page_size;
ulong srv_page_size_shift;
-page_size_t univ_page_size(0, 0, false);
/* Current page number (0 based). */
unsigned long long cur_page_num;
/* Skip the checksum verification. */
@@ -300,29 +274,27 @@ void print_leaf_stats(
}
}
-/** Get the page size of the filespace from the filespace header.
-@param[in] buf buffer used to read the page.
-@return page size */
-static
-const page_size_t
-get_page_size(
- byte* buf)
+/** Init the page size for the tablespace.
+@param[in] buf buffer used to read the page */
+static void init_page_size(const byte* buf)
{
const unsigned flags = mach_read_from_4(buf + FIL_PAGE_DATA
+ FSP_SPACE_FLAGS);
+ if (fil_space_t::full_crc32(flags)) {
+ srv_page_size = fil_space_t::logical_size(flags);
+ physical_page_size = srv_page_size;
+ return;
+ }
+
const ulong ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
srv_page_size_shift = ssize
? UNIV_ZIP_SIZE_SHIFT_MIN - 1 + ssize
: UNIV_PAGE_SIZE_SHIFT_ORIG;
- srv_page_size = 1U << srv_page_size_shift;
-
- univ_page_size.copy_from(
- page_size_t(srv_page_size, srv_page_size, false));
-
- return(page_size_t(flags));
+ srv_page_size = fil_space_t::logical_size(flags);
+ physical_page_size = fil_space_t::physical_size(flags);
}
#ifdef _WIN32
@@ -454,19 +426,16 @@ ulint read_file(
/** Check if page is corrupted or not.
@param[in] buf page frame
-@param[in] page_size page size
@param[in] is_encrypted true if page0 contained cryp_data
with crypt_scheme encrypted
-@param[in] is_compressed true if page0 fsp_flags contained
- page compression flag
+@param[in] flags tablespace flags
@retval true if page is corrupted otherwise false. */
static
bool
is_page_corrupted(
byte* buf,
- const page_size_t& page_size,
bool is_encrypted,
- bool is_compressed)
+ ulint flags)
{
/* enable if page is corrupted. */
@@ -475,9 +444,12 @@ is_page_corrupted(
ulint logseq;
ulint logseqfield;
ulint page_type = mach_read_from_2(buf+FIL_PAGE_TYPE);
- uint key_version = mach_read_from_4(buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ uint key_version = buf_page_get_key_version(buf, flags);
ulint space_id = mach_read_from_4(
buf + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+ ulint zip_size = fil_space_t::zip_size(flags);
+ ulint is_compressed = fil_space_t::is_compressed(flags);
+ const bool use_full_crc32 = fil_space_t::full_crc32(flags);
/* We can't trust only a page type, thus we take account
also fsp_flags or crypt_data on page 0 */
@@ -489,13 +461,15 @@ is_page_corrupted(
return (false);
}
- if (page_size.is_compressed()) {
+ if (!zip_size && (!is_compressed || !use_full_crc32)) {
/* check the stored log sequence numbers
for uncompressed tablespace. */
logseq = mach_read_from_4(buf + FIL_PAGE_LSN + 4);
- logseqfield = mach_read_from_4(
- buf + page_size.logical() -
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4);
+ logseqfield = use_full_crc32
+ ? mach_read_from_4(buf + srv_page_size
+ - FIL_PAGE_FCRC32_END_LSN)
+ : mach_read_from_4(buf + srv_page_size
+ - FIL_PAGE_END_LSN_OLD_CHKSUM + 4);
if (is_log_enabled) {
fprintf(log_file,
@@ -523,24 +497,22 @@ is_page_corrupted(
so if crypt checksum does not match we verify checksum using
normal method. */
if (is_encrypted && key_version != 0) {
- is_corrupted = !fil_space_verify_crypt_checksum(buf,
- page_size);
+ is_corrupted = use_full_crc32
+ ? buf_page_is_corrupted(true, buf, flags)
+ : !fil_space_verify_crypt_checksum(buf, zip_size);
+
if (is_corrupted && log_file) {
fprintf(log_file,
"Page " ULINTPF ":%llu may be corrupted;"
" key_version=%u\n",
- space_id, cur_page_num,
- mach_read_from_4(
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- + buf));
+ space_id, cur_page_num, key_version);
}
} else {
is_corrupted = true;
}
if (is_corrupted) {
- is_corrupted = buf_page_is_corrupted(
- true, buf, page_size, NULL);
+ is_corrupted = buf_page_is_corrupted(true, buf, flags);
}
return(is_corrupted);
@@ -592,19 +564,13 @@ is_page_empty(
/********************************************************************//**
Rewrite the checksum for the page.
@param [in/out] page page buffer
-@param [in] physical_page_size page size in bytes on disk.
-@param [in] iscompressed Is compressed/Uncompressed Page.
+@param [in] flags tablespace flags
@retval true : do rewrite
@retval false : skip the rewrite as checksum stored match with
calculated or page is doublwrite buffer.
*/
-
-bool
-update_checksum(
- byte* page,
- ulong physical_page_size,
- bool iscompressed)
+static bool update_checksum(byte* page, ulint flags)
{
ib_uint32_t checksum = 0;
byte stored1[4]; /* get FIL_PAGE_SPACE_OR_CHKSUM field checksum */
@@ -616,6 +582,9 @@ update_checksum(
return (false);
}
+ const bool use_full_crc32 = fil_space_t::full_crc32(flags);
+ const bool iscompressed = fil_space_t::zip_size(flags);
+
memcpy(stored1, page + FIL_PAGE_SPACE_OR_CHKSUM, 4);
memcpy(stored2, page + physical_page_size -
FIL_PAGE_END_LSN_OLD_CHKSUM, 4);
@@ -643,12 +612,26 @@ update_checksum(
" %u\n", cur_page_num, checksum);
}
+ } else if (use_full_crc32) {
+ ulint payload = buf_page_full_crc32_size(page, NULL, NULL)
+ - FIL_PAGE_FCRC32_CHECKSUM;
+ checksum = ut_crc32(page, payload);
+ byte* c = page + payload;
+ if (mach_read_from_4(c) == checksum) return false;
+ mach_write_to_4(c, checksum);
+ if (is_log_enabled) {
+ fprintf(log_file, "page::%llu; Updated checksum"
+ " = %u\n", cur_page_num, checksum);
+ }
+ return true;
} else {
/* page is uncompressed. */
/* Store the new formula checksum */
switch ((srv_checksum_algorithm_t) write_check) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
checksum = buf_calc_page_crc32(page);
@@ -664,6 +647,7 @@ update_checksum(
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
checksum = BUF_NO_CHECKSUM_MAGIC;
break;
+
/* no default so the compiler will emit a warning if new
enum is added and not handled here */
}
@@ -717,10 +701,8 @@ func_exit:
@param[in,out] file file pointer where content
have to be written
@param[in] buf file buffer read
-@param[in] compressed Enabled if tablespace is
- compressed.
+@param[in] flags tablespace flags
@param[in,out] pos current file position.
-@param[in] page_size page size in bytes on disk.
@retval true if successfully written
@retval false if a non-recoverable error occurred
@@ -731,13 +713,12 @@ write_file(
const char* filename,
FILE* file,
byte* buf,
- bool compressed,
- fpos_t* pos,
- ulong page_size)
+ ulint flags,
+ fpos_t* pos)
{
bool do_update;
- do_update = update_checksum(buf, page_size, compressed);
+ do_update = update_checksum(buf, flags);
if (file != stdin) {
if (do_update) {
@@ -757,8 +738,9 @@ write_file(
}
}
- if (page_size
- != fwrite(buf, 1, page_size, file == stdin ? stdout : file)) {
+ if (physical_page_size
+ != fwrite(buf, 1, physical_page_size,
+ file == stdin ? stdout : file)) {
fprintf(stderr, "Failed to write page::%llu to %s: %s\n",
cur_page_num, filename, strerror(errno));
@@ -781,7 +763,6 @@ Parse the page and collect/dump the information about page type
@param [in] page buffer page
@param [out] xdes extend descriptor page
@param [in] file file for diagnosis.
-@param [in] page_size page_size
@param [in] is_encrypted tablespace is encrypted
*/
void
@@ -789,7 +770,6 @@ parse_page(
const byte* page,
byte* xdes,
FILE* file,
- const page_size_t& page_size,
bool is_encrypted)
{
unsigned long long id;
@@ -848,8 +828,7 @@ parse_page(
}
size_range_id = (data_bytes * SIZE_RANGES_FOR_PAGE
- + page_size.logical() - 1) /
- page_size.logical();
+ + srv_page_size - 1) / srv_page_size;
if (size_range_id > SIZE_RANGES_FOR_PAGE + 1) {
/* data_bytes is bigger than logical_page_size */
@@ -868,7 +847,7 @@ parse_page(
it = index_ids.find(id);
per_index_stats &index = (it->second);
const byte* des = xdes + XDES_ARR_OFFSET
- + XDES_SIZE * ((page_no & (page_size.physical() - 1))
+ + XDES_SIZE * ((page_no & (physical_page_size - 1))
/ FSP_EXTENT_SIZE);
if (xdes_get_bit(des, XDES_FREE_BIT,
page_no % FSP_EXTENT_SIZE)) {
@@ -1031,7 +1010,7 @@ parse_page(
case FIL_PAGE_TYPE_FSP_HDR:
page_type.n_fil_page_type_fsp_hdr++;
- memcpy(xdes, page, page_size.physical());
+ memcpy(xdes, page, physical_page_size);
if (page_type_dump) {
fprintf(file, "#::%llu\t\t|\t\tFile Space "
"Header\t\t|\t%s\n", cur_page_num, str);
@@ -1040,7 +1019,7 @@ parse_page(
case FIL_PAGE_TYPE_XDES:
page_type.n_fil_page_type_xdes++;
- memcpy(xdes, page, page_size.physical());
+ memcpy(xdes, page, physical_page_size);
if (page_type_dump) {
fprintf(file, "#::%llu\t\t|\t\tExtent descriptor "
"page\t\t|\t%s\n", cur_page_num, str);
@@ -1355,6 +1334,13 @@ innochecksum_get_one_option(
srv_checksum_algorithm =
SRV_CHECKSUM_ALGORITHM_STRICT_NONE;
break;
+
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ srv_checksum_algorithm =
+ SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32;
+ break;
+
default:
return(true);
}
@@ -1409,18 +1395,13 @@ get_options(
/** Check from page 0 if table is encrypted.
@param[in] filename Filename
-@param[in] page_size page size
@param[in] page Page 0
@retval true if tablespace is encrypted, false if not
*/
-static
-bool check_encryption(
- const char* filename,
- const page_size_t& page_size,
- byte * page)
+static bool check_encryption(const char* filename, const byte* page)
{
- ulint offset = (FSP_HEADER_OFFSET + (XDES_ARR_OFFSET + XDES_SIZE *
- (page_size.physical()) / FSP_EXTENT_SIZE));
+ ulint offset = FSP_HEADER_OFFSET + XDES_ARR_OFFSET + XDES_SIZE *
+ physical_page_size / FSP_EXTENT_SIZE;
if (memcmp(page + offset, CRYPT_MAGIC, MAGIC_SZ) != 0) {
return false;
@@ -1453,30 +1434,21 @@ bool check_encryption(
return (type == CRYPT_SCHEME_1);
}
-/**
-Verify page checksum.
+/** Verify page checksum.
@param[in] buf page to verify
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] is_encrypted true if tablespace is encrypted
-@param[in] is_compressed true if tablespace is page compressed
@param[in,out] mismatch_count Number of pages failed in checksum verify
-@retval 0 if page checksum matches or 1 if it does not match
-*/
-static
-int verify_checksum(
- byte* buf,
- const page_size_t& page_size,
- bool is_encrypted,
- bool is_compressed,
- unsigned long long* mismatch_count)
+@param[in] flags tablespace flags
+@retval 0 if page checksum matches or 1 if it does not match */
+static int verify_checksum(
+ byte* buf,
+ bool is_encrypted,
+ unsigned long long* mismatch_count,
+ ulint flags)
{
int exit_status = 0;
- bool is_corrupted = false;
-
- is_corrupted = is_page_corrupted(
- buf, page_size, is_encrypted, is_compressed);
-
- if (is_corrupted) {
+ if (is_page_corrupted(buf, is_encrypted, flags)) {
fprintf(stderr, "Fail: page::%llu invalid\n",
cur_page_num);
@@ -1502,10 +1474,9 @@ int verify_checksum(
@param[in] filename File name
@param[in] fil_in File pointer
@param[in] buf page
-@param[in] page_size page size
@param[in] pos File position
@param[in] is_encrypted true if tablespace is encrypted
-@param[in] is_compressed true if tablespace is page compressed
+@param[in] flags tablespace flags
@retval 0 if checksum rewrite was successful, 1 if error was detected */
static
int
@@ -1513,25 +1484,16 @@ rewrite_checksum(
const char* filename,
FILE* fil_in,
byte* buf,
- const page_size_t& page_size,
fpos_t* pos,
- bool is_encrypted,
- bool is_compressed)
+ bool is_encrypted,
+ ulint flags)
{
- int exit_status = 0;
+ bool is_compressed = fil_space_t::is_compressed(flags);
+
/* Rewrite checksum. Note that for encrypted and
page compressed tables this is not currently supported. */
- if (do_write &&
- !is_encrypted &&
- !is_compressed
- && !write_file(filename, fil_in, buf,
- page_size.is_compressed(), pos,
- static_cast<ulong>(page_size.physical()))) {
-
- exit_status = 1;
- }
-
- return (exit_status);
+ return do_write && !is_encrypted && !is_compressed
+ && !write_file(filename, fil_in, buf, flags, pos);
}
int main(
@@ -1707,22 +1669,18 @@ int main(
/* Determine page size, zip_size and page compression
from fsp_flags and encryption metadata from page 0 */
- const page_size_t& page_size = get_page_size(buf);
+ init_page_size(buf);
ulint flags = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + buf);
- ulint zip_size = page_size.is_compressed() ? page_size.logical() : 0;
- logical_page_size = page_size.is_compressed() ? zip_size : 0;
- physical_page_size = page_size.physical();
- bool is_compressed = FSP_FLAGS_HAS_PAGE_COMPRESSION(flags);
- if (page_size.physical() > UNIV_ZIP_SIZE_MIN) {
+ if (physical_page_size > UNIV_ZIP_SIZE_MIN) {
/* Read rest of the page 0 to determine crypt_data */
- bytes = read_file(buf, partial_page_read, page_size.physical(), fil_in);
- if (bytes != page_size.physical()) {
+ bytes = read_file(buf, partial_page_read, physical_page_size, fil_in);
+ if (bytes != physical_page_size) {
fprintf(stderr, "Error: Was not able to read the "
"rest of the page ");
fprintf(stderr, "of " ULINTPF " bytes. Bytes read was " ULINTPF "\n",
- page_size.physical() - UNIV_ZIP_SIZE_MIN, bytes);
+ physical_page_size - UNIV_ZIP_SIZE_MIN, bytes);
exit_status = 1;
goto my_exit;
@@ -1731,7 +1689,7 @@ int main(
}
/* Now that we have full page 0 in buffer, check encryption */
- bool is_encrypted = check_encryption(filename, page_size, buf);
+ bool is_encrypted = check_encryption(filename, buf);
/* Verify page 0 contents. Note that we can't allow
checksum mismatch on page 0, because that would mean we
@@ -1740,7 +1698,8 @@ int main(
unsigned long long tmp_allow_mismatches = allow_mismatches;
allow_mismatches = 0;
- exit_status = verify_checksum(buf, page_size, is_encrypted, is_compressed, &mismatch_count);
+ exit_status = verify_checksum(buf, is_encrypted,
+ &mismatch_count, flags);
if (exit_status) {
fprintf(stderr, "Error: Page 0 checksum mismatch, can't continue. \n");
@@ -1749,8 +1708,9 @@ int main(
allow_mismatches = tmp_allow_mismatches;
}
- if ((exit_status = rewrite_checksum(filename, fil_in, buf,
- page_size, &pos, is_encrypted, is_compressed))) {
+ if ((exit_status = rewrite_checksum(
+ filename, fil_in, buf,
+ &pos, is_encrypted, flags))) {
goto my_exit;
}
@@ -1773,10 +1733,10 @@ int main(
}
if (page_type_summary || page_type_dump) {
- parse_page(buf, xdes, fil_page_type, page_size, is_encrypted);
+ parse_page(buf, xdes, fil_page_type, is_encrypted);
}
- pages = (ulint) (size / page_size.physical());
+ pages = (ulint) (size / physical_page_size);
if (just_count) {
if (read_from_stdin) {
@@ -1813,12 +1773,9 @@ int main(
partial_page_read = false;
offset = (off_t) start_page
- * (off_t) page_size.physical();
-#ifdef _WIN32
- if (_fseeki64(fil_in, offset, SEEK_SET)) {
-#else
- if (fseeko(fil_in, offset, SEEK_SET)) {
-#endif /* _WIN32 */
+ * (off_t) physical_page_size;
+ if (IF_WIN(_fseeki64,fseeko)(fil_in, offset,
+ SEEK_SET)) {
perror("Error: Unable to seek to "
"necessary offset");
@@ -1850,8 +1807,7 @@ int main(
if partial_page_read is enable. */
bytes = read_file(buf,
partial_page_read,
- static_cast<ulong>(
- page_size.physical()),
+ physical_page_size,
fil_in);
partial_page_read = false;
@@ -1876,8 +1832,7 @@ int main(
while (!feof(fil_in)) {
bytes = read_file(buf, partial_page_read,
- static_cast<ulong>(
- page_size.physical()), fil_in);
+ physical_page_size, fil_in);
partial_page_read = false;
if (!bytes && feof(fil_in)) {
@@ -1886,17 +1841,17 @@ int main(
if (ferror(fil_in)) {
fprintf(stderr, "Error reading " ULINTPF " bytes",
- page_size.physical());
+ physical_page_size);
perror(" ");
exit_status = 1;
goto my_exit;
}
- if (bytes != page_size.physical()) {
+ if (bytes != physical_page_size) {
fprintf(stderr, "Error: bytes read (" ULINTPF ") "
"doesn't match page size (" ULINTPF ")\n",
- bytes, page_size.physical());
+ bytes, physical_page_size);
exit_status = 1;
goto my_exit;
}
@@ -1921,13 +1876,15 @@ int main(
checksum verification.*/
if (!no_check
&& !skip_page
- && (exit_status = verify_checksum(buf, page_size,
- is_encrypted, is_compressed, &mismatch_count))) {
+ && (exit_status = verify_checksum(
+ buf, is_encrypted,
+ &mismatch_count, flags))) {
goto my_exit;
}
- if ((exit_status = rewrite_checksum(filename, fil_in, buf,
- page_size, &pos, is_encrypted, is_compressed))) {
+ if ((exit_status = rewrite_checksum(
+ filename, fil_in, buf,
+ &pos, is_encrypted, flags))) {
goto my_exit;
}
@@ -1941,7 +1898,7 @@ int main(
}
if (page_type_summary || page_type_dump) {
- parse_page(buf, xdes, fil_page_type, page_size, is_encrypted);
+ parse_page(buf, xdes, fil_page_type, is_encrypted);
}
/* do counter increase and progress printing */
diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt
index 02f2d1a3690..623e7460ee4 100644
--- a/extra/mariabackup/CMakeLists.txt
+++ b/extra/mariabackup/CMakeLists.txt
@@ -40,9 +40,6 @@ IF(NOT HAVE_SYSTEM_REGEX)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/pcre)
ENDIF()
-IF(WITH_WSREP)
- INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/wsrep)
-ENDIF()
ADD_DEFINITIONS(-UMYSQL_SERVER)
########################################################################
diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc
index 831433b43e2..b0ee0420049 100644
--- a/extra/mariabackup/backup_mysql.cc
+++ b/extra/mariabackup/backup_mysql.cc
@@ -861,82 +861,6 @@ stop_query_killer()
}
-/*
-Killing connections that wait for MDL lock.
-If lock-ddl-per-table is used, there can be some DDL statements
-
-FLUSH TABLES would hang infinitely, if DDL statements are waiting for
-MDL lock, which mariabackup currently holds. Therefore we start killing
-those statements from a dedicated thread, until FLUSH TABLES WITH READ LOCK
-succeeds.
-*/
-
-static os_event_t mdl_killer_stop_event;
-static os_event_t mdl_killer_finished_event;
-
-static
-os_thread_ret_t
-DECLARE_THREAD(kill_mdl_waiters_thread(void *))
-{
- MYSQL *mysql;
- if ((mysql = xb_mysql_connect()) == NULL) {
- msg("Error: kill mdl waiters thread failed to connect");
- goto stop_thread;
- }
-
- for(;;){
- if (os_event_wait_time(mdl_killer_stop_event, 1000) == 0)
- break;
-
- MYSQL_RES *result = xb_mysql_query(mysql,
- "SELECT ID, COMMAND, INFO FROM INFORMATION_SCHEMA.PROCESSLIST "
- " WHERE State='Waiting for table metadata lock'",
- true, true);
- while (MYSQL_ROW row = mysql_fetch_row(result))
- {
- char query[64];
-
- if (row[1] && !strcmp(row[1], "Killed"))
- continue;
-
- msg("Killing MDL waiting %s ('%s') on connection %s",
- row[1], row[2], row[0]);
- snprintf(query, sizeof(query), "KILL QUERY %s", row[0]);
- if (mysql_query(mysql, query) && (mysql_errno(mysql) != ER_NO_SUCH_THREAD)) {
- die("failed to execute query %s: %s", query,mysql_error(mysql));
- }
- }
- mysql_free_result(result);
- }
-
- mysql_close(mysql);
-
-stop_thread:
- msg("Kill mdl waiters thread stopped");
- os_event_set(mdl_killer_finished_event);
- os_thread_exit();
- return os_thread_ret_t(0);
-}
-
-
-static void start_mdl_waiters_killer()
-{
- mdl_killer_stop_event = os_event_create(0);
- mdl_killer_finished_event = os_event_create(0);
- os_thread_create(kill_mdl_waiters_thread, 0, 0);
-}
-
-
-/* Tell MDL killer to stop and finish for its completion*/
-static void stop_mdl_waiters_killer()
-{
- os_event_set(mdl_killer_stop_event);
- os_event_wait(mdl_killer_finished_event);
-
- os_event_destroy(mdl_killer_stop_event);
- os_event_destroy(mdl_killer_finished_event);
-}
-
/*********************************************************************//**
Function acquires either a backup tables lock, if supported
by the server, or a global read lock (FLUSH TABLES WITH READ LOCK)
@@ -959,30 +883,6 @@ lock_tables(MYSQL *connection)
return(true);
}
- if (opt_lock_ddl_per_table) {
- start_mdl_waiters_killer();
- }
-
- if (!opt_lock_wait_timeout && !opt_kill_long_queries_timeout) {
-
- /* We do first a FLUSH TABLES. If a long update is running, the
- FLUSH TABLES will wait but will not stall the whole mysqld, and
- when the long update is done the FLUSH TABLES WITH READ LOCK
- will start and succeed quickly. So, FLUSH TABLES is to lower
- the probability of a stage where both mysqldump and most client
- connections are stalled. Of course, if a second long update
- starts between the two FLUSHes, we have that bad stall.
-
- Option lock_wait_timeout serve the same purpose and is not
- compatible with this trick.
- */
-
- msg("Executing FLUSH NO_WRITE_TO_BINLOG TABLES...");
-
- xb_mysql_query(connection,
- "FLUSH NO_WRITE_TO_BINLOG TABLES", false);
- }
-
if (opt_lock_wait_timeout) {
if (!wait_for_no_updates(connection, opt_lock_wait_timeout,
opt_lock_wait_threshold)) {
@@ -990,7 +890,7 @@ lock_tables(MYSQL *connection)
}
}
- msg("Executing FLUSH TABLES WITH READ LOCK...");
+ msg("Acquiring BACKUP LOCKS...");
if (opt_kill_long_queries_timeout) {
start_query_killer();
@@ -1001,11 +901,10 @@ lock_tables(MYSQL *connection)
"SET SESSION wsrep_causal_reads=0", false);
}
- xb_mysql_query(connection, "FLUSH TABLES WITH READ LOCK", false);
-
- if (opt_lock_ddl_per_table) {
- stop_mdl_waiters_killer();
- }
+ xb_mysql_query(connection, "BACKUP STAGE START", true);
+ //xb_mysql_query(connection, "BACKUP STAGE FLUSH", true);
+ //xb_mysql_query(connection, "BACKUP STAGE BLOCK_DDL", true);
+ xb_mysql_query(connection, "BACKUP STAGE BLOCK_COMMIT", true);
if (opt_kill_long_queries_timeout) {
stop_query_killer();
@@ -1047,13 +946,8 @@ unlock_all(MYSQL *connection)
os_thread_sleep(opt_debug_sleep_before_unlock * 1000);
}
- if (binlog_locked) {
- msg("Executing UNLOCK BINLOG");
- xb_mysql_query(connection, "UNLOCK BINLOG", false);
- }
-
- msg("Executing UNLOCK TABLES");
- xb_mysql_query(connection, "UNLOCK TABLES", false);
+ msg("Executing BACKUP STAGE END");
+ xb_mysql_query(connection, "BACKUP STAGE END", false);
msg("All tables unlocked");
}
diff --git a/extra/mariabackup/ds_local.cc b/extra/mariabackup/ds_local.cc
index 58353365cd6..90fb88ef381 100644
--- a/extra/mariabackup/ds_local.cc
+++ b/extra/mariabackup/ds_local.cc
@@ -64,7 +64,7 @@ local_init(const char *root)
{
char errbuf[MYSYS_STRERROR_SIZE];
my_strerror(errbuf, sizeof(errbuf),my_errno);
- my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG),
+ my_error(EE_CANT_MKDIR, MYF(ME_BELL),
root, my_errno,errbuf, my_errno);
return NULL;
}
@@ -96,7 +96,7 @@ local_open(ds_ctxt_t *ctxt, const char *path,
if (my_mkdir(dirpath, 0777, MYF(0)) < 0 && my_errno != EEXIST) {
char errbuf[MYSYS_STRERROR_SIZE];
my_strerror(errbuf, sizeof(errbuf), my_errno);
- my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG),
+ my_error(EE_CANT_MKDIR, MYF(ME_BELL),
dirpath, my_errno, errbuf);
return NULL;
}
@@ -178,7 +178,9 @@ static void init_ibd_data(ds_local_file_t *local_file, const uchar *buf, size_t
ulint flags = mach_read_from_4(&buf[FIL_PAGE_DATA + FSP_SPACE_FLAGS]);
ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
local_file->pagesize= ssize == 0 ? UNIV_PAGE_SIZE_ORIG : ((UNIV_ZIP_SIZE_MIN >> 1) << ssize);
- local_file->compressed = (my_bool)FSP_FLAGS_HAS_PAGE_COMPRESSION(flags);
+ local_file->compressed = fil_space_t::full_crc32(flags)
+ ? fil_space_t::is_compressed(flags)
+ : bool(FSP_FLAGS_HAS_PAGE_COMPRESSION(flags));
#if defined(_WIN32) && (MYSQL_VERSION_ID > 100200)
/* Make compressed file sparse, on Windows.
diff --git a/extra/mariabackup/fil_cur.cc b/extra/mariabackup/fil_cur.cc
index 09f5763f7df..6b186319539 100644
--- a/extra/mariabackup/fil_cur.cc
+++ b/extra/mariabackup/fil_cur.cc
@@ -233,11 +233,11 @@ xb_fil_cur_open(
posix_fadvise(cursor->file, 0, 0, POSIX_FADV_SEQUENTIAL);
- const page_size_t page_size(node->space->flags);
- cursor->page_size = page_size;
+ cursor->page_size = node->space->physical_size();
+ cursor->zip_size = node->space->zip_size();
/* Allocate read buffer */
- cursor->buf_size = XB_FIL_CUR_PAGES * page_size.physical();
+ cursor->buf_size = XB_FIL_CUR_PAGES * cursor->page_size;
cursor->orig_buf = static_cast<byte *>
(malloc(cursor->buf_size + srv_page_size));
cursor->buf = static_cast<byte *>
@@ -252,18 +252,17 @@ xb_fil_cur_open(
if (!node->space->crypt_data
&& os_file_read(IORequestRead,
node->handle, cursor->buf, 0,
- page_size.physical()) == DB_SUCCESS) {
+ cursor->page_size) == DB_SUCCESS) {
mutex_enter(&fil_system.mutex);
if (!node->space->crypt_data) {
- node->space->crypt_data
- = fil_space_read_crypt_data(page_size,
- cursor->buf);
+ node->space->crypt_data = fil_space_read_crypt_data(
+ node->space->zip_size(), cursor->buf);
}
mutex_exit(&fil_system.mutex);
}
cursor->space_size = (ulint)(cursor->statinfo.st_size
- / page_size.physical());
+ / cursor->page_size);
cursor->read_filter = read_filter;
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor,
@@ -278,7 +277,7 @@ static bool page_is_corrupted(const byte *page, ulint page_no,
{
byte tmp_frame[UNIV_PAGE_SIZE_MAX];
byte tmp_page[UNIV_PAGE_SIZE_MAX];
- const ulint page_size = cursor->page_size.physical();
+ const ulint page_size = cursor->page_size;
ulint page_type = mach_read_from_2(page + FIL_PAGE_TYPE);
/* We ignore the doublewrite buffer pages.*/
@@ -315,6 +314,10 @@ static bool page_is_corrupted(const byte *page, ulint page_no,
return false;
}
+ if (space->full_crc32()) {
+ return buf_page_is_corrupted(true, page, space->flags);
+ }
+
/* Validate encrypted pages. The first page is never encrypted.
In the system tablespace, the first page would be written with
FIL_PAGE_FILE_FLUSH_LSN at shutdown, and if the LSN exceeds
@@ -327,7 +330,7 @@ static bool page_is_corrupted(const byte *page, ulint page_no,
|| (space->crypt_data
&& space->crypt_data->type != CRYPT_SCHEME_UNENCRYPTED))) {
- if (!fil_space_verify_crypt_checksum(page, cursor->page_size))
+ if (!fil_space_verify_crypt_checksum(page, space->zip_size()))
return true;
/* Compressed encrypted need to be decrypted
@@ -348,7 +351,7 @@ static bool page_is_corrupted(const byte *page, ulint page_no,
if (page_type != FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED) {
return buf_page_is_corrupted(true, tmp_page,
- cursor->page_size, space);
+ space->flags);
}
}
@@ -358,19 +361,20 @@ static bool page_is_corrupted(const byte *page, ulint page_no,
if (page_type == FIL_PAGE_PAGE_COMPRESSED
|| page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED) {
- ulint decomp = fil_page_decompress(tmp_frame, tmp_page);
+ ulint decomp = fil_page_decompress(tmp_frame, tmp_page,
+ space->flags);
page_type = mach_read_from_2(tmp_page + FIL_PAGE_TYPE);
return (!decomp
|| (decomp != srv_page_size
- && cursor->page_size.is_compressed())
+ && cursor->zip_size)
|| page_type == FIL_PAGE_PAGE_COMPRESSED
|| page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED
|| buf_page_is_corrupted(true, tmp_page,
- cursor->page_size, space));
+ space->flags));
}
- return buf_page_is_corrupted(true, page, cursor->page_size, space);
+ return buf_page_is_corrupted(true, page, space->flags);
}
/************************************************************************
@@ -391,7 +395,7 @@ xb_fil_cur_read(
xb_fil_cur_result_t ret;
ib_int64_t offset;
ib_int64_t to_read;
- const ulint page_size = cursor->page_size.physical();
+ const ulint page_size = cursor->page_size;
xb_ad(!cursor->is_system() || page_size == srv_page_size);
cursor->read_filter->get_next_batch(&cursor->read_filter_ctxt,
@@ -461,7 +465,7 @@ read_retry:
"10 retries. File %s seems to be "
"corrupted.", cursor->abs_path);
ret = XB_FIL_CUR_ERROR;
- buf_page_print(page, cursor->page_size);
+ ut_print_buf(stderr, page, page_size);
break;
}
msg(cursor->thread_n, "Database page corruption detected at page "
diff --git a/extra/mariabackup/fil_cur.h b/extra/mariabackup/fil_cur.h
index ad023d93208..193c51d6c43 100644
--- a/extra/mariabackup/fil_cur.h
+++ b/extra/mariabackup/fil_cur.h
@@ -38,7 +38,9 @@ struct xb_fil_cur_t {
char abs_path[FN_REFLEN];
/*!< absolute file path */
MY_STAT statinfo; /*!< information about the file */
- page_size_t page_size; /*!< page size */
+ ulint zip_size; /*!< compressed page size in bytes or 0
+ for uncompressed pages */
+ ulint page_size; /*!< physical page size */
xb_read_filt_t* read_filter; /*!< read filter */
xb_read_filt_ctxt_t read_filter_ctxt;
/*!< read filter context */
@@ -57,9 +59,6 @@ struct xb_fil_cur_t {
ulint space_id; /*!< ID of tablespace */
ulint space_size; /*!< space size in pages */
- /** TODO: remove this default constructor */
- xb_fil_cur_t() : page_size(0), read_filter(0), read_filter_ctxt() {}
-
/** @return whether this is not a file-per-table tablespace */
bool is_system() const
{
diff --git a/extra/mariabackup/read_filt.cc b/extra/mariabackup/read_filt.cc
index a48591abf29..055056245ba 100644
--- a/extra/mariabackup/read_filt.cc
+++ b/extra/mariabackup/read_filt.cc
@@ -127,7 +127,7 @@ rf_bitmap_get_next_batch(
of pages */
{
ulint start_page_id;
- const ulint page_size = ctxt->page_size.physical();
+ const ulint page_size = ctxt->page_size;
start_page_id = (ulint)(ctxt->offset / page_size);
diff --git a/extra/mariabackup/read_filt.h b/extra/mariabackup/read_filt.h
index cebc714eed8..585662c7f9c 100644
--- a/extra/mariabackup/read_filt.h
+++ b/extra/mariabackup/read_filt.h
@@ -41,7 +41,7 @@ struct xb_read_filt_ctxt_t {
/* Move these to union if any other filters are added in future */
xb_page_bitmap_range *bitmap_range; /*!< changed page bitmap range
iterator for space_id */
- page_size_t page_size; /*!< page size */
+ ulint page_size; /*!< page size */
ulint filter_batch_end;/*!< the ending page id of the
current changed page block in
the bitmap */
diff --git a/extra/mariabackup/write_filt.cc b/extra/mariabackup/write_filt.cc
index 63b11850bfb..3332159f99b 100644
--- a/extra/mariabackup/write_filt.cc
+++ b/extra/mariabackup/write_filt.cc
@@ -75,8 +75,7 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
ctxt->cursor = cursor;
/* allocate buffer for incremental backup (4096 pages) */
- cp->delta_buf_size = (cursor->page_size.physical() / 4)
- * cursor->page_size.physical();
+ cp->delta_buf_size = (cursor->page_size / 4) * cursor->page_size;
cp->delta_buf = (unsigned char *)os_mem_alloc_large(&cp->delta_buf_size);
if (!cp->delta_buf) {
@@ -88,7 +87,8 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
/* write delta meta info */
snprintf(meta_name, sizeof(meta_name), "%s%s", dst_name,
XB_DELTA_INFO_SUFFIX);
- const xb_delta_info_t info(cursor->page_size, cursor->space_id);
+ const xb_delta_info_t info(cursor->page_size, cursor->zip_size,
+ cursor->space_id);
if (!xb_write_delta_metadata(meta_name, &info)) {
msg(cursor->thread_n,"Error: "
"failed to write meta info for %s",
@@ -116,8 +116,7 @@ wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
ulint i;
xb_fil_cur_t *cursor = ctxt->cursor;
byte *page;
- const ulint page_size
- = cursor->page_size.physical();
+ const ulint page_size = cursor->page_size;
xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
for (i = 0, page = cursor->buf; i < cursor->buf_npages;
@@ -162,8 +161,7 @@ static my_bool
wf_incremental_finalize(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile)
{
xb_fil_cur_t *cursor = ctxt->cursor;
- const ulint page_size
- = cursor->page_size.physical();
+ const ulint page_size = cursor->page_size;
xb_wf_incremental_ctxt_t *cp = &(ctxt->u.wf_incremental_ctxt);
if (cp->npages != page_size / 4) {
diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc
index 8aa1743d20b..6a7f2e78ce5 100644
--- a/extra/mariabackup/xtrabackup.cc
+++ b/extra/mariabackup/xtrabackup.cc
@@ -469,9 +469,18 @@ DECLARE_THREAD(dbug_execute_in_new_connection)(void *arg)
dbug_thread_param_t *par= (dbug_thread_param_t *)arg;
int err = mysql_query(par->con, par->query);
int err_no = mysql_errno(par->con);
- DBUG_ASSERT(par->expect_err == err);
- if (err && par->expect_errno)
- DBUG_ASSERT(err_no == par->expect_errno);
+ if(par->expect_err != err)
+ {
+ msg("FATAL: dbug_execute_in_new_connection : mysql_query '%s' returns %d, instead of expected %d",
+ par->query, err, par->expect_err);
+ _exit(1);
+ }
+ if (err && par->expect_errno && par->expect_errno != err_no)
+ {
+ msg("FATAL: dbug_execute_in_new_connection: mysql_query '%s' returns mysql_errno %d, instead of expected %d",
+ par->query, err_no, par->expect_errno);
+ _exit(1);
+ }
mysql_close(par->con);
mysql_thread_end();
os_event_t done = par->done_event;
@@ -627,7 +636,6 @@ static void backup_file_op_fail(ulint space_id, const byte* flags,
const byte* name, ulint len,
const byte* new_name, ulint new_len)
{
- ut_a(opt_no_lock);
bool fail;
if (flags) {
msg("DDL tracking : create %zu \"%.*s\": %x",
@@ -648,6 +656,7 @@ static void backup_file_op_fail(ulint space_id, const byte* flags,
msg("DDL tracking : delete %zu \"%.*s\"", space_id, int(len), name);
}
if (fail) {
+ ut_a(opt_no_lock);
die("DDL operation detected in the late phase of backup."
"Backup is inconsistent. Remove --no-lock option to fix.");
}
@@ -668,9 +677,9 @@ static void backup_optimized_ddl_op(ulint space_id)
run with --no-lock. Usually aborts the backup.
*/
static void backup_optimized_ddl_op_fail(ulint space_id) {
- ut_a(opt_no_lock);
msg("DDL tracking : optimized DDL on space %zu", space_id);
if (ddl_tracker.tables_in_backup.find(space_id) != ddl_tracker.tables_in_backup.end()) {
+ ut_a(opt_no_lock);
msg("ERROR : Optimized DDL operation detected in the late phase of backup."
"Backup is inconsistent. Remove --no-lock option to fix.");
exit(EXIT_FAILURE);
@@ -678,16 +687,6 @@ static void backup_optimized_ddl_op_fail(ulint space_id) {
}
-/** Callback whenever MLOG_TRUNCATE happens. */
-static void backup_truncate_fail()
-{
- msg("mariabackup: Incompatible TRUNCATE operation detected.%s",
- opt_lock_ddl_per_table
- ? ""
- : " Use --lock-ddl-per-table to lock all tables before backup.");
-}
-
-
/*
Retrieve default data directory, to be used with --copy-back.
@@ -1690,7 +1689,7 @@ xb_get_one_option(int optid,
case OPT_INNODB_CHECKSUM_ALGORITHM:
- ut_a(srv_checksum_algorithm <= SRV_CHECKSUM_ALGORITHM_STRICT_NONE);
+ ut_a(srv_checksum_algorithm <= SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32);
ADD_PRINT_PARAM_OPT(innodb_checksum_algorithm_names[srv_checksum_algorithm]);
break;
@@ -1859,15 +1858,18 @@ static bool innodb_init_param()
msg("innodb_data_file_path = %s",
innobase_data_file_path);
- /* This is the first time univ_page_size is used.
- It was initialized to 16k pages before srv_page_size was set */
- univ_page_size.copy_from(
- page_size_t(srv_page_size, srv_page_size, false));
-
srv_sys_space.set_space_id(TRX_SYS_SPACE);
srv_sys_space.set_name("innodb_system");
srv_sys_space.set_path(srv_data_home);
- srv_sys_space.set_flags(FSP_FLAGS_PAGE_SSIZE());
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ srv_sys_space.set_flags(FSP_FLAGS_FCRC32_MASK_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE());
+ break;
+ default:
+ srv_sys_space.set_flags(FSP_FLAGS_PAGE_SSIZE());
+ }
if (!srv_sys_space.parse_params(innobase_data_file_path, true)) {
goto error;
@@ -2161,8 +2163,7 @@ xb_read_delta_metadata(const char *filepath, xb_delta_info_t *info)
msg("page_size is required in %s", filepath);
r = FALSE;
} else {
- info->page_size = page_size_t(zip_size ? zip_size : page_size,
- page_size, zip_size != 0);
+ info->page_size = zip_size ? zip_size : page_size;
}
if (info->space_id == ULINT_UNDEFINED) {
@@ -2190,9 +2191,8 @@ xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info)
"page_size = " ULINTPF "\n"
"zip_size = " ULINTPF " \n"
"space_id = " ULINTPF "\n",
- info->page_size.logical(),
- info->page_size.is_compressed()
- ? info->page_size.physical() : 0,
+ info->page_size,
+ info->zip_size,
info->space_id);
len = strlen(buf);
@@ -2375,6 +2375,18 @@ check_if_skip_table(
const char *ptr;
char *eptr;
+
+ dbname = NULL;
+ tbname = name;
+ while ((ptr = strchr(tbname, '/')) != NULL) {
+ dbname = tbname;
+ tbname = ptr + 1;
+ }
+
+ if (strncmp(tbname, tmp_file_prefix, tmp_file_prefix_length) == 0) {
+ return TRUE;
+ }
+
if (regex_exclude_list.empty() &&
regex_include_list.empty() &&
tables_include_hash == NULL &&
@@ -2384,13 +2396,6 @@ check_if_skip_table(
return(FALSE);
}
- dbname = NULL;
- tbname = name;
- while ((ptr = strchr(tbname, '/')) != NULL) {
- dbname = tbname;
- tbname = ptr + 1;
- }
-
if (dbname == NULL) {
return(FALSE);
}
@@ -2667,8 +2672,7 @@ static lsn_t xtrabackup_copy_log(lsn_t start_lsn, lsn_t end_lsn, bool last)
if (data_len == OS_FILE_LOG_BLOCK_SIZE) {
/* We got a full log block. */
scanned_lsn += data_len;
- } else if (data_len
- >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE
+ } else if (data_len >= log_sys.trailer_offset()
|| data_len <= LOG_BLOCK_HDR_SIZE) {
/* We got a garbage block (abrupt end of the log). */
msg(0,"garbage block: " LSN_PF ",%zu",scanned_lsn, data_len);
@@ -3101,7 +3105,7 @@ xb_load_single_table_tablespace(
ut_a(node_size != (os_offset_t) -1);
- n_pages = node_size / page_size_t(file->flags()).physical();
+ n_pages = node_size / fil_space_t::physical_size(file->flags());
space = fil_space_create(
name, file->space_id(), file->flags(),
@@ -3279,7 +3283,8 @@ static dberr_t xb_assign_undo_space_start()
bool ret;
dberr_t error = DB_SUCCESS;
ulint space;
- int n_retries = 5;
+ int n_retries = 5;
+ ulint fsp_flags;
if (srv_undo_tablespaces == 0) {
return error;
@@ -3296,6 +3301,15 @@ static dberr_t xb_assign_undo_space_start()
buf = static_cast<byte*>(ut_malloc_nokey(2U << srv_page_size_shift));
page = static_cast<byte*>(ut_align(buf, srv_page_size));
+ if (os_file_read(IORequestRead, file, page, 0, srv_page_size)
+ != DB_SUCCESS) {
+ msg("Reading first page failed.\n");
+ error = DB_ERROR;
+ goto func_exit;
+ }
+
+ fsp_flags = mach_read_from_4(
+ page + FSP_HEADER_OFFSET + FSP_SPACE_FLAGS);
retry:
if (os_file_read(IORequestRead, file, page,
TRX_SYS_PAGE_NO << srv_page_size_shift,
@@ -3306,7 +3320,7 @@ retry:
}
/* TRX_SYS page can't be compressed or encrypted. */
- if (buf_page_is_corrupted(false, page, univ_page_size)) {
+ if (buf_page_is_corrupted(false, page, fsp_flags)) {
if (n_retries--) {
os_thread_sleep(1000);
goto retry;
@@ -4219,7 +4233,6 @@ fail_before_log_copying_thread_start:
log_copy_scanned_lsn = checkpoint_lsn_start;
recv_sys->recovered_lsn = log_copy_scanned_lsn;
log_optimized_ddl_op = backup_optimized_ddl_op;
- log_truncate = backup_truncate_fail;
if (xtrabackup_copy_logfile())
goto fail_before_log_copying_thread_start;
@@ -4247,7 +4260,7 @@ fail_before_log_copying_thread_start:
DBUG_EXECUTE_IF("check_mdl_lock_works",
dbug_alter_thread_done =
dbug_start_query_thread("ALTER TABLE test.t ADD COLUMN mdl_lock_column int",
- "Waiting for table metadata lock", 1, ER_QUERY_INTERRUPTED););
+ "Waiting for table metadata lock", 0, 0););
}
datafiles_iter_t *it = datafiles_iter_new();
@@ -4465,6 +4478,7 @@ void backup_fix_ddl(void)
}
datafiles_iter_free(it);
+ DBUG_EXECUTE_IF("check_mdl_lock_works", DBUG_ASSERT(new_tables.size() == 0););
for (std::set<std::string>::iterator iter = new_tables.begin();
iter != new_tables.end(); iter++) {
const char *space_name = iter->c_str();
@@ -4576,16 +4590,17 @@ xb_space_create_file(
fsp_header_init_fields(page, space_id, flags);
mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id);
- const page_size_t page_size(flags);
+ const ulint zip_size = fil_space_t::zip_size(flags);
- if (!page_size.is_compressed()) {
- buf_flush_init_for_writing(NULL, page, NULL, 0);
+ if (!zip_size) {
+ buf_flush_init_for_writing(
+ NULL, page, NULL, 0,
+ fil_space_t::full_crc32(flags));
ret = os_file_write(IORequestWrite, path, *file, page, 0,
srv_page_size);
} else {
page_zip_des_t page_zip;
- ulint zip_size = page_size.physical();
page_zip_set_size(&page_zip, zip_size);
page_zip.data = page + srv_page_size;
fprintf(stderr, "zip_size = " ULINTPF "\n", zip_size);
@@ -4596,7 +4611,7 @@ xb_space_create_file(
page_zip.m_end = page_zip.m_nonempty =
page_zip.n_blobs = 0;
- buf_flush_init_for_writing(NULL, page, &page_zip, 0);
+ buf_flush_init_for_writing(NULL, page, &page_zip, 0, false);
ret = os_file_write(IORequestWrite, path, *file,
page_zip.data, 0, zip_size);
@@ -4762,19 +4777,20 @@ exit:
}
/* No matching space found. create the new one. */
- const ulint flags = info.page_size.is_compressed()
- ? get_bit_shift(info.page_size.physical()
+ const ulint flags = info.zip_size
+ ? get_bit_shift(info.page_size
>> (UNIV_ZIP_SIZE_SHIFT_MIN - 1))
<< FSP_FLAGS_POS_ZIP_SSIZE
| FSP_FLAGS_MASK_POST_ANTELOPE
| FSP_FLAGS_MASK_ATOMIC_BLOBS
- | (info.page_size.logical() == UNIV_PAGE_SIZE_ORIG
+ | (srv_page_size == UNIV_PAGE_SIZE_ORIG
? 0
- : get_bit_shift(info.page_size.logical()
+ : get_bit_shift(srv_page_size
>> (UNIV_ZIP_SIZE_SHIFT_MIN - 1))
<< FSP_FLAGS_POS_PAGE_SSIZE)
: FSP_FLAGS_PAGE_SSIZE();
- ut_ad(page_size_t(flags).equals_to(info.page_size));
+ ut_ad(fil_space_t::zip_size(flags) == info.zip_size);
+ ut_ad(fil_space_t::physical_size(flags) == info.page_size);
if (fil_space_create(dest_space_name, info.space_id, flags,
FIL_TYPE_TABLESPACE, 0)) {
@@ -4811,7 +4827,7 @@ xtrabackup_apply_delta(
ulint page_in_buffer;
ulint incremental_buffers = 0;
- xb_delta_info_t info(univ_page_size, SRV_TMP_SPACE_ID);
+ xb_delta_info_t info(srv_page_size, 0, SRV_TMP_SPACE_ID);
ulint page_size;
ulint page_size_shift;
byte* incremental_buffer_base = NULL;
@@ -4850,7 +4866,7 @@ xtrabackup_apply_delta(
goto error;
}
- page_size = info.page_size.physical();
+ page_size = info.page_size;
page_size_shift = get_bit_shift(page_size);
msg("page size for %s is %zu bytes",
src_path, page_size);
@@ -4898,9 +4914,9 @@ xtrabackup_apply_delta(
/* first block of block cluster */
offset = ((incremental_buffers * (page_size / 4))
<< page_size_shift);
- success = os_file_read(IORequestRead, src_file,
- incremental_buffer, offset, page_size);
- if (success != DB_SUCCESS) {
+ if (os_file_read(IORequestRead, src_file,
+ incremental_buffer, offset, page_size)
+ != DB_SUCCESS) {
goto error;
}
@@ -4930,10 +4946,10 @@ xtrabackup_apply_delta(
ut_a(last_buffer || page_in_buffer == page_size / 4);
/* read whole of the cluster */
- success = os_file_read(IORequestRead, src_file,
- incremental_buffer,
- offset, page_in_buffer * page_size);
- if (success != DB_SUCCESS) {
+ if (os_file_read(IORequestRead, src_file,
+ incremental_buffer,
+ offset, page_in_buffer * page_size)
+ != DB_SUCCESS) {
goto error;
}
@@ -4979,9 +4995,9 @@ xtrabackup_apply_delta(
}
}
- success = os_file_write(IORequestWrite,
- dst_path, dst_file, buf, off, page_size);
- if (success != DB_SUCCESS) {
+ if (os_file_write(IORequestWrite,
+ dst_path, dst_file, buf, off,
+ page_size) != DB_SUCCESS) {
goto error;
}
}
@@ -5124,7 +5140,7 @@ xb_process_datadir(
handle_datadir_entry_func_t func) /*!<in: callback */
{
ulint ret;
- char dbpath[OS_FILE_MAX_PATH+1];
+ char dbpath[OS_FILE_MAX_PATH+2];
os_file_dir_t dir;
os_file_dir_t dbdir;
os_file_stat_t dbinfo;
@@ -5692,26 +5708,24 @@ int check_privilege(
}
-/******************************************************************//**
+/**
Check DB user privileges according to the intended actions.
Fetches DB user privileges, determines intended actions based on
command-line arguments and prints missing privileges.
-May terminate application with EXIT_FAILURE exit code.*/
-static void
-check_all_privileges()
+@return whether all the necessary privileges are granted */
+static bool check_all_privileges()
{
if (!mysql_connection) {
/* Not connected, no queries is going to be executed. */
- return;
+ return true;
}
/* Fetch effective privileges. */
std::list<std::string> granted_privileges;
- MYSQL_ROW row = 0;
MYSQL_RES* result = xb_mysql_query(mysql_connection, "SHOW GRANTS",
- true);
- while ((row = mysql_fetch_row(result))) {
+ true);
+ while (MYSQL_ROW row = mysql_fetch_row(result)) {
granted_privileges.push_back(*row);
}
mysql_free_result(result);
@@ -5724,13 +5738,9 @@ check_all_privileges()
check_result |= check_privilege(
granted_privileges,
"RELOAD", "*", "*");
- }
-
- if (!opt_no_lock)
- {
check_result |= check_privilege(
granted_privileges,
- "PROCESS", "*", "*");
+ "PROCESS", "*", "*");
}
/* KILL ... */
@@ -5754,7 +5764,6 @@ check_all_privileges()
}
if (check_result & PRIVILEGE_ERROR) {
- mysql_close(mysql_connection);
msg("Current privileges, as reported by 'SHOW GRANTS': ");
int n=1;
for (std::list<std::string>::const_iterator it = granted_privileges.begin();
@@ -5762,8 +5771,10 @@ check_all_privileges()
it++,n++) {
msg(" %d.%s", n, it->c_str());
}
- die("Insufficient privileges");
+ return false;
}
+
+ return true;
}
bool
@@ -5830,8 +5841,8 @@ xb_init()
if (!get_mysql_vars(mysql_connection)) {
return(false);
}
- if (opt_check_privileges) {
- check_all_privileges();
+ if (opt_check_privileges && !check_all_privileges()) {
+ return(false);
}
history_start_time = time(NULL);
diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h
index 3dc0891f3b4..25943ea7d98 100644
--- a/extra/mariabackup/xtrabackup.h
+++ b/extra/mariabackup/xtrabackup.h
@@ -28,11 +28,12 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
struct xb_delta_info_t
{
- xb_delta_info_t(page_size_t page_size, ulint space_id)
- : page_size(page_size), space_id(space_id) {}
+ xb_delta_info_t(ulint page_size, ulint zip_size, ulint space_id)
+ : page_size(page_size), zip_size(zip_size), space_id(space_id) {}
- page_size_t page_size;
- ulint space_id;
+ ulint page_size;
+ ulint zip_size;
+ ulint space_id;
};
/* value of the --incremental option */
diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c
index 07c95a79ddc..8eb0baa6eeb 100644
--- a/extra/my_print_defaults.c
+++ b/extra/my_print_defaults.c
@@ -41,24 +41,6 @@ const char *default_dbug_option="d:t:o,/tmp/my_print_defaults.trace";
static struct my_option my_long_options[] =
{
- /*
- NB: --config-file is troublesome, because get_defaults_options() doesn't
- know about it, but we pretend --config-file is like --defaults-file. In
- fact they behave differently: see the comments at the top of
- mysys/default.c for how --defaults-file should behave.
-
- This --config-file option behaves as:
- - If it has a directory name part (absolute or relative), then only this
- file is read; no error is given if the file doesn't exist
- - If the file has no directory name part, the standard locations are
- searched for a file of this name (and standard filename extensions are
- added if the file has no extension)
- */
- {"config-file", 'c', "Deprecated, please use --defaults-file instead. "
- "Name of config file to read; if no extension is given, default "
- "extension (e.g., .ini or .cnf) will be added",
- (char**) &config_file, (char**) &config_file, 0, GET_STR, REQUIRED_ARG,
- 0, 0, 0, 0, 0, 0},
#ifdef DBUG_OFF
{"debug", '#', "This is a non-debug version. Catch this and exit",
0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0},
@@ -66,8 +48,8 @@ static struct my_option my_long_options[] =
{"debug", '#', "Output debug log", (char**) &default_dbug_option,
(char**) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
#endif
- {"defaults-file", 'c', "Like --config-file, except: if first option, "
- "then read this file only, do not read global or per-user config "
+ {"defaults-file", 'c',
+ "Read this file only, do not read global or per-user config "
"files; should be the first option",
(char**) &config_file, (char*) &config_file, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0},
@@ -80,11 +62,6 @@ static struct my_option my_long_options[] =
"In addition to the given groups, read also groups with this suffix",
(char**) &my_defaults_group_suffix, (char**) &my_defaults_group_suffix,
0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
- {"extra-file", 'e',
- "Deprecated. Synonym for --defaults-extra-file.",
- (void *)&my_defaults_extra_file,
- (void *)&my_defaults_extra_file, 0, GET_STR,
- REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"mysqld", 0, "Read the same set of groups that the mysqld binary does.",
&opt_mysqld, &opt_mysqld, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{"no-defaults", 'n', "Return an empty string (useful for scripts).",
@@ -107,7 +84,7 @@ static void cleanup_and_exit(int exit_code)
static void version()
{
- printf("%s Ver 1.6 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
+ printf("%s Ver 1.7 for %s at %s\n",my_progname,SYSTEM_TYPE, MACHINE_TYPE);
}
diff --git a/extra/perror.c b/extra/perror.c
index bd62401c8b2..49b1318cbb2 100644
--- a/extra/perror.c
+++ b/extra/perror.c
@@ -87,7 +87,7 @@ static void usage(void)
{
print_version();
puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"));
- printf("Print a description for a system error code or a MySQL error code.\n");
+ printf("Print a description for a system error code or a MariaDB error code.\n");
printf("If you want to get the error for a negative error code, you should use\n-- before the first error code to tell perror that there was no more options.\n\n");
printf("Usage: %s [OPTIONS] [ERRORCODE [ERRORCODE...]]\n",my_progname);
my_print_help(my_long_options);
@@ -336,7 +336,7 @@ int main(int argc,char *argv[])
{
found= 1;
if (verbose)
- printf("MySQL error code %3d: %s\n", code, msg);
+ printf("MariaDB error code %3d: %s\n", code, msg);
else
puts(msg);
}
@@ -344,7 +344,7 @@ int main(int argc,char *argv[])
{
found= 1;
if (verbose)
- printf("MySQL error code %3d (%s): %s\n", code, name, msg);
+ printf("MariaDB error code %3d (%s): %s\n", code, name, msg);
else
puts(msg);
}
diff --git a/extra/resolve_stack_dump.c b/extra/resolve_stack_dump.c
index dbd9941141d..78a32298dab 100644
--- a/extra/resolve_stack_dump.c
+++ b/extra/resolve_stack_dump.c
@@ -75,7 +75,7 @@ static void print_version(void)
static void usage()
{
print_version();
- printf("MySQL AB, by Sasha Pachev\n");
+ printf("MariaDB Corporation, originally created by Sasha Pachev\n");
printf("This software comes with ABSOLUTELY NO WARRANTY\n\n");
printf("Resolve numeric stack strace dump into symbols.\n\n");
printf("Usage: %s [OPTIONS] symbols-file [numeric-dump-file]\n",
diff --git a/include/aria_backup.h b/include/aria_backup.h
new file mode 100644
index 00000000000..1a1c437d0b9
--- /dev/null
+++ b/include/aria_backup.h
@@ -0,0 +1,37 @@
+/* Copyright (C) 2018 MariaDB corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+/* Interfaces for doing backups of Aria tables */
+
+C_MODE_START
+
+typedef struct st_aria_table_capabilities
+{
+ my_off_t header_size;
+ ulong bitmap_pages_covered;
+ uint block_size;
+ uint keypage_header;
+ my_bool checksum;
+ my_bool transactional;
+ /* This is true if the table can be copied without any locks */
+ my_bool online_backup_safe;
+} ARIA_TABLE_CAPABILITIES;
+
+int aria_get_capabilities(File kfile, ARIA_TABLE_CAPABILITIES *cap);
+int aria_read_index(File kfile, ARIA_TABLE_CAPABILITIES *cap, ulonglong block,
+ uchar *buffer);
+int aria_read_data(File dfile, ARIA_TABLE_CAPABILITIES *cap, ulonglong block,
+ uchar *buffer, size_t *bytes_read);
+C_MODE_END
diff --git a/include/json_lib.h b/include/json_lib.h
index fed85b516d9..b6add6d13a3 100644
--- a/include/json_lib.h
+++ b/include/json_lib.h
@@ -174,11 +174,11 @@ enum json_value_types
{
JSON_VALUE_OBJECT=1,
JSON_VALUE_ARRAY=2,
- JSON_VALUE_STRING,
- JSON_VALUE_NUMBER,
- JSON_VALUE_TRUE,
- JSON_VALUE_FALSE,
- JSON_VALUE_NULL
+ JSON_VALUE_STRING=3,
+ JSON_VALUE_NUMBER=4,
+ JSON_VALUE_TRUE=5,
+ JSON_VALUE_FALSE=6,
+ JSON_VALUE_NULL=7
};
@@ -423,10 +423,15 @@ int json_path_parts_compare(
int json_path_compare(const json_path_t *a, const json_path_t *b,
enum json_value_types vt);
+int json_valid(const char *js, size_t js_len, CHARSET_INFO *cs);
+
+int json_locate_key(const char *js, const char *js_end,
+ const char *kname,
+ const char **key_start, const char **key_end,
+ int *comma_pos);
#ifdef __cplusplus
}
#endif
#endif /* JSON_LIB_INCLUDED */
-
diff --git a/include/lf.h b/include/lf.h
index a9d7e9ee688..fe861a524dc 100644
--- a/include/lf.h
+++ b/include/lf.h
@@ -167,6 +167,8 @@ void *lf_hash_search_using_hash_value(LF_HASH *hash, LF_PINS *pins,
int lf_hash_delete(LF_HASH *hash, LF_PINS *pins, const void *key, uint keylen);
int lf_hash_iterate(LF_HASH *hash, LF_PINS *pins,
my_hash_walk_action action, void *argument);
+#define lf_hash_size(hash) \
+ my_atomic_load32_explicit(&(hash)->count, MY_MEMORY_ORDER_RELAXED)
/*
shortcut macros to access underlying pinbox functions from an LF_HASH
see lf_pinbox_get_pins() and lf_pinbox_put_pins()
diff --git a/include/m_ctype.h b/include/m_ctype.h
index a4aa5b51218..c6273590bbe 100644
--- a/include/m_ctype.h
+++ b/include/m_ctype.h
@@ -362,7 +362,6 @@ extern MY_COLLATION_HANDLER my_collation_8bit_bin_handler;
extern MY_COLLATION_HANDLER my_collation_8bit_simple_ci_handler;
extern MY_COLLATION_HANDLER my_collation_8bit_nopad_bin_handler;
extern MY_COLLATION_HANDLER my_collation_8bit_simple_nopad_ci_handler;
-extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler;
/* Some typedef to make it easy for C++ to make function pointers */
typedef int (*my_charset_conv_mb_wc)(CHARSET_INFO *, my_wc_t *,
@@ -872,14 +871,6 @@ size_t my_strnxfrm_mb_nopad(CHARSET_INFO *,
uchar *dst, size_t dstlen, uint nweights,
const uchar *src, size_t srclen, uint flags);
-size_t my_strnxfrm_unicode(CHARSET_INFO *,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags);
-
-size_t my_strnxfrm_unicode_nopad(CHARSET_INFO *,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags);
-
size_t my_strnxfrmlen_unicode(CHARSET_INFO *, size_t);
size_t my_strnxfrm_unicode_full_bin(CHARSET_INFO *,
diff --git a/include/m_string.h b/include/m_string.h
index d50da8770c3..c8b5774b08c 100644
--- a/include/m_string.h
+++ b/include/m_string.h
@@ -99,7 +99,7 @@ extern char *strmake(char *dst,const char *src,size_t length);
#define strmake_buf(D,S) strmake(D, S, sizeof(D) - 1)
#else
#define strmake_buf(D,S) ({ \
- typeof (D) __x __attribute__((unused)) = { 2 }; \
+ __typeof__ (D) __x __attribute__((unused)) = { 2 }; \
strmake(D, S, sizeof(D) - 1); \
})
#endif
diff --git a/include/my_base.h b/include/my_base.h
index c36072c0bfa..8a8237ce8b2 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -99,7 +99,8 @@ enum ha_key_alg {
HA_KEY_ALG_BTREE= 1, /* B-tree, default one */
HA_KEY_ALG_RTREE= 2, /* R-tree, for spatial searches */
HA_KEY_ALG_HASH= 3, /* HASH keys (HEAP tables) */
- HA_KEY_ALG_FULLTEXT= 4 /* FULLTEXT (MyISAM tables) */
+ HA_KEY_ALG_FULLTEXT= 4, /* FULLTEXT (MyISAM tables) */
+ HA_KEY_ALG_LONG_HASH= 5 /* long BLOB keys */
};
/* Storage media types */
diff --git a/include/my_compare.h b/include/my_compare.h
index 4387105aff8..0f48771d7a7 100644
--- a/include/my_compare.h
+++ b/include/my_compare.h
@@ -152,5 +152,6 @@ typedef enum icp_result {
} ICP_RESULT;
typedef ICP_RESULT (*index_cond_func_t)(void *param);
+typedef int (*rowid_filter_func_t)(void *param);
#endif /* _my_compare_h */
diff --git a/include/my_counter.h b/include/my_counter.h
new file mode 100644
index 00000000000..c5cbe296df0
--- /dev/null
+++ b/include/my_counter.h
@@ -0,0 +1,49 @@
+#ifndef MY_COUNTER_H_INCLUDED
+#define MY_COUNTER_H_INCLUDED
+/*
+ Copyright (C) 2018 MariaDB Foundation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include <atomic>
+
+
+template <typename Type> class Atomic_counter
+{
+ std::atomic<Type> m_counter;
+
+ Type add(Type i) { return m_counter.fetch_add(i, std::memory_order_relaxed); }
+ Type sub(Type i) { return m_counter.fetch_sub(i, std::memory_order_relaxed); }
+
+public:
+ Atomic_counter(const Atomic_counter<Type> &rhs)
+ { m_counter.store(rhs, std::memory_order_relaxed); }
+ Atomic_counter(Type val): m_counter(val) {}
+ Atomic_counter() {}
+
+ Type operator++(int) { return add(1); }
+ Type operator--(int) { return sub(1); }
+
+ Type operator++() { return add(1) + 1; }
+ Type operator--() { return sub(1) - 1; }
+
+ Type operator+=(const Type i) { return add(i) + i; }
+ Type operator-=(const Type i) { return sub(i) - i; }
+
+ operator Type() const { return m_counter.load(std::memory_order_relaxed); }
+ Type operator=(const Type val)
+ { m_counter.store(val, std::memory_order_relaxed); return val; }
+};
+#endif /* MY_COUNTER_H_INCLUDED */
diff --git a/include/my_global.h b/include/my_global.h
index 37f78eabb4f..69b91f8b870 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -133,11 +133,6 @@
#define F_UNLCK 3
#define F_TO_EOF 0x3FFFFFFF
-/* Shared memory and named pipe connections are supported. */
-#define HAVE_SMEM 1
-#define HAVE_NAMED_PIPE 1
-#define shared_memory_buffer_length 16000
-#define default_shared_memory_base_name "MYSQL"
#endif /* _WIN32*/
@@ -1189,8 +1184,6 @@ typedef struct { const char *dli_fname, dli_fbase; } Dl_info;
/* Things we don't need in the embedded version of MySQL */
/* TODO HF add #undef HAVE_VIO if we don't want client in embedded library */
-#undef HAVE_SMEM /* No shared memory */
-
#else
#define HAVE_REPLICATION
#define HAVE_EXTERNAL_CLIENT
diff --git a/include/my_pthread.h b/include/my_pthread.h
index 264125a8fe3..4d33d1abdd4 100644
--- a/include/my_pthread.h
+++ b/include/my_pthread.h
@@ -189,7 +189,19 @@ extern int my_pthread_create_detached;
int sigwait(sigset_t *set, int *sig);
#endif
-#define my_sigwait(A,B) sigwait((A),(B))
+static inline int my_sigwait(sigset_t *set, int *sig, int *code)
+{
+#ifdef HAVE_SIGWAITINFO
+ siginfo_t siginfo;
+ *sig= sigwaitinfo(set, &siginfo);
+ *code= siginfo.si_code;
+ return *sig < 0 ? errno : 0;
+#else
+#define SI_KERNEL 128
+ *code= 0;
+ return sigwait(set, sig);
+#endif
+}
#if defined(HAVE_SIGTHREADMASK) && !defined(HAVE_PTHREAD_SIGMASK)
#define pthread_sigmask(A,B,C) sigthreadmask((A),(B),(C))
diff --git a/include/my_sys.h b/include/my_sys.h
index ddaceff8708..6a300e9aca3 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -104,13 +104,12 @@ typedef struct my_aio_result {
#define MY_GIVE_INFO 2U /* Give time info about process*/
#define MY_DONT_FREE_DBUG 4U /* Do not call DBUG_END() in my_end() */
-#define ME_BELL 4U /* Ring bell then printing message */
-#define ME_WAITTANG 0 /* Wait for a user action */
-#define ME_NOREFRESH 64U /* Write the error message to error log */
-#define ME_NOINPUT 0 /* Don't use the input library */
-#define ME_JUST_INFO 1024U /**< not error but just info */
-#define ME_JUST_WARNING 2048U /**< not error but just warning */
-#define ME_FATALERROR 4096U /* Fatal statement error */
+#define ME_BELL 4U /* Ring bell then printing message */
+#define ME_ERROR_LOG 64 /**< write the error message to error log */
+#define ME_ERROR_LOG_ONLY 128 /**< write the error message to error log only */
+#define ME_NOTE 1024 /**< not error but just info */
+#define ME_WARNING 2048 /**< not error but just warning */
+#define ME_FATAL 4096 /**< fatal statement error */
/* Bits in last argument to fn_format */
#define MY_REPLACE_DIR 1U /* replace dir in name with 'dir' */
@@ -329,7 +328,7 @@ typedef struct st_record_cache /* Used when caching records */
enum file_type
{
UNOPEN = 0, FILE_BY_OPEN, FILE_BY_CREATE, STREAM_BY_FOPEN, STREAM_BY_FDOPEN,
- FILE_BY_MKSTEMP, FILE_BY_DUP
+ FILE_BY_O_TMPFILE, FILE_BY_MKSTEMP, FILE_BY_DUP
};
struct st_my_file_info
@@ -907,6 +906,7 @@ static inline char *safe_strdup_root(MEM_ROOT *root, const char *str)
}
extern char *strmake_root(MEM_ROOT *root,const char *str,size_t len);
extern void *memdup_root(MEM_ROOT *root,const void *str, size_t len);
+extern LEX_CSTRING safe_lexcstrdup_root(MEM_ROOT *root, const LEX_CSTRING str);
extern my_bool my_compress(uchar *, size_t *, size_t *);
extern my_bool my_uncompress(uchar *, size_t , size_t *);
extern uchar *my_compress_alloc(const uchar *packet, size_t *len,
diff --git a/include/my_time.h b/include/my_time.h
index cec168c6fd6..27011be8b47 100644
--- a/include/my_time.h
+++ b/include/my_time.h
@@ -57,22 +57,20 @@ extern uchar days_in_month[];
/* Flags to str_to_datetime */
-/*
- TIME_FUZZY_DATES is used for the result will only be used for comparison
- purposes. Conversion is as relaxed as possible.
-*/
-#define TIME_FUZZY_DATES 1U
-#define TIME_DATETIME_ONLY 2U
-#define TIME_TIME_ONLY 4U
-#define TIME_NO_ZERO_IN_DATE (1UL << 23) /* == MODE_NO_ZERO_IN_DATE */
-#define TIME_NO_ZERO_DATE (1UL << 24) /* == MODE_NO_ZERO_DATE */
-#define TIME_INVALID_DATES (1UL << 25) /* == MODE_INVALID_DATES */
+#define C_TIME_NO_ZERO_IN_DATE (1UL << 23) /* == MODE_NO_ZERO_IN_DATE */
+#define C_TIME_NO_ZERO_DATE (1UL << 24) /* == MODE_NO_ZERO_DATE */
+#define C_TIME_INVALID_DATES (1UL << 25) /* == MODE_INVALID_DATES */
#define MYSQL_TIME_WARN_TRUNCATED 1U
#define MYSQL_TIME_WARN_OUT_OF_RANGE 2U
+#define MYSQL_TIME_WARN_EDOM 4U
+#define MYSQL_TIME_WARN_ZERO_DATE 8U
#define MYSQL_TIME_NOTE_TRUNCATED 16U
-#define MYSQL_TIME_WARN_WARNINGS (MYSQL_TIME_WARN_TRUNCATED|MYSQL_TIME_WARN_OUT_OF_RANGE)
+#define MYSQL_TIME_WARN_WARNINGS (MYSQL_TIME_WARN_TRUNCATED|\
+ MYSQL_TIME_WARN_OUT_OF_RANGE|\
+ MYSQL_TIME_WARN_EDOM|\
+ MYSQL_TIME_WARN_ZERO_DATE)
#define MYSQL_TIME_WARN_NOTES (MYSQL_TIME_NOTE_TRUNCATED)
#define MYSQL_TIME_WARN_HAVE_WARNINGS(x) MY_TEST((x) & MYSQL_TIME_WARN_WARNINGS)
@@ -81,6 +79,16 @@ extern uchar days_in_month[];
/* Useful constants */
#define SECONDS_IN_24H 86400L
+/* Limits for the INTERVAL data type */
+
+ /* Number of hours between '0001-01-01 00h' and '9999-12-31 23h' */
+#define TIME_MAX_INTERVAL_HOUR 87649415
+#define TIME_MAX_INTERVAL_HOUR_CHAR_LENGTH 8
+
+/* Number of full days between '0001-01-01' and '9999-12-31'*/
+#define TIME_MAX_INTERVAL_DAY 3652058 /*87649415/24*/
+#define TIME_MAX_INTERVAL_DAY_CHAR_LENGTH 7
+
/* Limits for the TIME data type */
#define TIME_MAX_HOUR 838
#define TIME_MAX_MINUTE 59
@@ -100,35 +108,46 @@ typedef struct st_mysql_time_status
{
int warnings;
uint precision;
+ uint nanoseconds;
} MYSQL_TIME_STATUS;
static inline void my_time_status_init(MYSQL_TIME_STATUS *status)
{
status->warnings= 0;
status->precision= 0;
+ status->nanoseconds= 0;
}
my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date,
ulonglong flags, int *was_cut);
-my_bool str_to_time(const char *str, size_t length, MYSQL_TIME *l_time,
- ulonglong flag, MYSQL_TIME_STATUS *status);
-my_bool str_to_datetime(const char *str, size_t length, MYSQL_TIME *l_time,
- ulonglong flags, MYSQL_TIME_STATUS *status);
-longlong number_to_datetime(longlong nr, ulong sec_part, MYSQL_TIME *time_res,
- ulonglong flags, int *was_cut);
-
-static inline
-longlong double_to_datetime(double nr, MYSQL_TIME *ltime, ulonglong flags, int *cut)
-{
- if (nr < 0 || nr > LONGLONG_MAX)
- nr= (double)LONGLONG_MAX;
- return number_to_datetime((longlong) floor(nr),
- (ulong)((nr-floor(nr))*TIME_SECOND_PART_FACTOR),
- ltime, flags, cut);
-}
+my_bool str_to_DDhhmmssff(const char *str, size_t length, MYSQL_TIME *l_time,
+ ulong max_hour, MYSQL_TIME_STATUS *status);
+my_bool str_to_datetime_or_date_or_time(const char *str, size_t length,
+ MYSQL_TIME *to, ulonglong flag,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour);
+my_bool
+str_to_datetime_or_date_or_interval_hhmmssff(const char *str, size_t length,
+ MYSQL_TIME *to, ulonglong flag,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour);
+my_bool
+str_to_datetime_or_date_or_interval_day(const char *str, size_t length,
+ MYSQL_TIME *to, ulonglong flag,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour);
+my_bool str_to_datetime_or_date(const char *str, size_t length, MYSQL_TIME *to,
+ ulonglong flags, MYSQL_TIME_STATUS *status);
+
+longlong number_to_datetime_or_date(longlong nr, ulong sec_part,
+ MYSQL_TIME *time_res,
+ ulonglong flags, int *was_cut);
+int number_to_time_only(my_bool neg, ulonglong nr, ulong sec_part,
+ ulong max_hour, MYSQL_TIME *to, int *was_cut);
-int number_to_time(my_bool neg, ulonglong nr, ulong sec_part,
- MYSQL_TIME *ltime, int *was_cut);
ulonglong TIME_to_ulonglong_datetime(const MYSQL_TIME *);
ulonglong TIME_to_ulonglong_date(const MYSQL_TIME *);
ulonglong TIME_to_ulonglong_time(const MYSQL_TIME *);
@@ -191,6 +210,7 @@ void set_zero_time(MYSQL_TIME *tm, enum enum_mysql_timestamp_type time_type);
#define MAX_DATE_STRING_REP_LENGTH 30
#define AUTO_SEC_PART_DIGITS DECIMAL_NOT_SPECIFIED
+int my_interval_DDhhmmssff_to_str(const MYSQL_TIME *, char *to, uint digits);
int my_time_to_str(const MYSQL_TIME *l_time, char *to, uint digits);
int my_date_to_str(const MYSQL_TIME *l_time, char *to);
int my_datetime_to_str(const MYSQL_TIME *l_time, char *to, uint digits);
diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h
index b122d888c6d..02a87c6db71 100644
--- a/include/mysql/plugin.h
+++ b/include/mysql/plugin.h
@@ -75,7 +75,7 @@ typedef struct st_mysql_xid MYSQL_XID;
#define MYSQL_PLUGIN_INTERFACE_VERSION 0x0104
/* MariaDB plugin interface version */
-#define MARIA_PLUGIN_INTERFACE_VERSION 0x010d
+#define MARIA_PLUGIN_INTERFACE_VERSION 0x010e
/*
The allowable types of plugins
diff --git a/include/mysql/plugin_audit.h.pp b/include/mysql/plugin_audit.h.pp
index 89f7dcc36c4..c5ae678e82a 100644
--- a/include/mysql/plugin_audit.h.pp
+++ b/include/mysql/plugin_audit.h.pp
@@ -374,6 +374,51 @@ extern struct thd_wait_service_st {
} *thd_wait_service;
void thd_wait_begin(void* thd, int wait_type);
void thd_wait_end(void* thd);
+enum json_types
+{
+ JSV_BAD_JSON=-1,
+ JSV_NOTHING=0,
+ JSV_OBJECT=1,
+ JSV_ARRAY=2,
+ JSV_STRING=3,
+ JSV_NUMBER=4,
+ JSV_TRUE=5,
+ JSV_FALSE=6,
+ JSV_NULL=7
+};
+extern struct json_service_st {
+ enum json_types (*json_type)(const char *js, const char *js_end,
+ const char **value, int *value_len);
+ enum json_types (*json_get_array_item)(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_key)(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_nkey)(const char *js,const char *js_end,
+ int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+ int (*json_escape_string)(const char *str,const char *str_end,
+ char *json, char *json_end);
+ int (*json_unescape_json)(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+} *json_service;
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len);
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end);
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end);
struct st_mysql_xid {
long formatID;
long gtrid_length;
diff --git a/include/mysql/plugin_auth.h b/include/mysql/plugin_auth.h
index 638d9b57fd9..44e51c3fd0b 100644
--- a/include/mysql/plugin_auth.h
+++ b/include/mysql/plugin_auth.h
@@ -27,7 +27,7 @@
#include <mysql/plugin.h>
-#define MYSQL_AUTHENTICATION_INTERFACE_VERSION 0x0201
+#define MYSQL_AUTHENTICATION_INTERFACE_VERSION 0x0202
#include <mysql/plugin_auth_common.h>
@@ -60,7 +60,8 @@ typedef struct st_mysql_server_auth_info
/**
A corresponding column value from the mysql.user table for the
- matching account name
+ matching account name or the preprocessed value, if preprocess_hash
+ method is not NULL
*/
const char *auth_string;
@@ -130,6 +131,47 @@ struct st_mysql_auth
used for authorization.
*/
int (*authenticate_user)(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info);
+ /**
+ Create a password hash (or digest) out of a plain-text password
+
+ Used in SET PASSWORD, GRANT, and CREATE USER to convert user specified
+ plain-text password into a value that will be stored in mysql.user table.
+
+ @see preprocess_hash
+
+ @param password plain-text password
+ @param password_length plain-text password length
+ @param hash the digest will be stored there
+ @param hash_length in: hash buffer size
+ out: the actual length of the hash
+
+ @return 0 for ok, 1 for error
+
+ Can be NULL.
+ */
+ int (*hash_password)(const char *password, size_t password_length,
+ char *hash, size_t *hash_length);
+
+ /**
+ Prepare the password hash for authentication.
+
+ Password hash is stored in the authentication_string column of the
+ mysql.user table in a text form. If a plugin needs to preprocess the
+ value somehow before the authentication (e.g. convert from hex or base64
+ to binary), it can do it in this method. This way the conversion
+ will happen only once, not for every authentication attempt.
+
+ The value written to the out buffer will be cached and later made
+ available to the authenticate_user() method in the
+ MYSQL_SERVER_AUTH_INFO::auth_string[] buffer.
+
+ @return 0 for ok, 1 for error
+
+ Can be NULL, in this case the mysql.user.authentication_string value will
+ be given to the authenticate_user() method as is, unconverted.
+ */
+ int (*preprocess_hash)(const char *hash, size_t hash_length,
+ unsigned char *out, size_t *out_length);
};
#ifdef __cplusplus
diff --git a/include/mysql/plugin_auth.h.pp b/include/mysql/plugin_auth.h.pp
index e515699cad6..41cb7d075c4 100644
--- a/include/mysql/plugin_auth.h.pp
+++ b/include/mysql/plugin_auth.h.pp
@@ -374,6 +374,51 @@ extern struct thd_wait_service_st {
} *thd_wait_service;
void thd_wait_begin(void* thd, int wait_type);
void thd_wait_end(void* thd);
+enum json_types
+{
+ JSV_BAD_JSON=-1,
+ JSV_NOTHING=0,
+ JSV_OBJECT=1,
+ JSV_ARRAY=2,
+ JSV_STRING=3,
+ JSV_NUMBER=4,
+ JSV_TRUE=5,
+ JSV_FALSE=6,
+ JSV_NULL=7
+};
+extern struct json_service_st {
+ enum json_types (*json_type)(const char *js, const char *js_end,
+ const char **value, int *value_len);
+ enum json_types (*json_get_array_item)(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_key)(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_nkey)(const char *js,const char *js_end,
+ int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+ int (*json_escape_string)(const char *str,const char *str_end,
+ char *json, char *json_end);
+ int (*json_unescape_json)(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+} *json_service;
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len);
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end);
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end);
struct st_mysql_xid {
long formatID;
long gtrid_length;
@@ -561,4 +606,8 @@ struct st_mysql_auth
int interface_version;
const char *client_auth_plugin;
int (*authenticate_user)(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info);
+ int (*hash_password)(const char *password, size_t password_length,
+ char *hash, size_t *hash_length);
+ int (*preprocess_hash)(const char *hash, size_t hash_length,
+ unsigned char *out, size_t *out_length);
};
diff --git a/include/mysql/plugin_encryption.h.pp b/include/mysql/plugin_encryption.h.pp
index 7defe0aec2c..6597decfbef 100644
--- a/include/mysql/plugin_encryption.h.pp
+++ b/include/mysql/plugin_encryption.h.pp
@@ -374,6 +374,51 @@ extern struct thd_wait_service_st {
} *thd_wait_service;
void thd_wait_begin(void* thd, int wait_type);
void thd_wait_end(void* thd);
+enum json_types
+{
+ JSV_BAD_JSON=-1,
+ JSV_NOTHING=0,
+ JSV_OBJECT=1,
+ JSV_ARRAY=2,
+ JSV_STRING=3,
+ JSV_NUMBER=4,
+ JSV_TRUE=5,
+ JSV_FALSE=6,
+ JSV_NULL=7
+};
+extern struct json_service_st {
+ enum json_types (*json_type)(const char *js, const char *js_end,
+ const char **value, int *value_len);
+ enum json_types (*json_get_array_item)(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_key)(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_nkey)(const char *js,const char *js_end,
+ int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+ int (*json_escape_string)(const char *str,const char *str_end,
+ char *json, char *json_end);
+ int (*json_unescape_json)(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+} *json_service;
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len);
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end);
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end);
struct st_mysql_xid {
long formatID;
long gtrid_length;
diff --git a/include/mysql/plugin_ftparser.h.pp b/include/mysql/plugin_ftparser.h.pp
index a36f51e74e1..bd1cfc7b68b 100644
--- a/include/mysql/plugin_ftparser.h.pp
+++ b/include/mysql/plugin_ftparser.h.pp
@@ -374,6 +374,51 @@ extern struct thd_wait_service_st {
} *thd_wait_service;
void thd_wait_begin(void* thd, int wait_type);
void thd_wait_end(void* thd);
+enum json_types
+{
+ JSV_BAD_JSON=-1,
+ JSV_NOTHING=0,
+ JSV_OBJECT=1,
+ JSV_ARRAY=2,
+ JSV_STRING=3,
+ JSV_NUMBER=4,
+ JSV_TRUE=5,
+ JSV_FALSE=6,
+ JSV_NULL=7
+};
+extern struct json_service_st {
+ enum json_types (*json_type)(const char *js, const char *js_end,
+ const char **value, int *value_len);
+ enum json_types (*json_get_array_item)(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_key)(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_nkey)(const char *js,const char *js_end,
+ int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+ int (*json_escape_string)(const char *str,const char *str_end,
+ char *json, char *json_end);
+ int (*json_unescape_json)(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+} *json_service;
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len);
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end);
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end);
struct st_mysql_xid {
long formatID;
long gtrid_length;
diff --git a/include/mysql/plugin_password_validation.h b/include/mysql/plugin_password_validation.h
index e2763483db6..699d42d6b50 100644
--- a/include/mysql/plugin_password_validation.h
+++ b/include/mysql/plugin_password_validation.h
@@ -42,8 +42,8 @@ struct st_mariadb_password_validation
Function provided by the plugin which should perform password validation
and return 0 if the password has passed the validation.
*/
- int (*validate_password)(MYSQL_CONST_LEX_STRING *username,
- MYSQL_CONST_LEX_STRING *password);
+ int (*validate_password)(const MYSQL_CONST_LEX_STRING *username,
+ const MYSQL_CONST_LEX_STRING *password);
};
#ifdef __cplusplus
diff --git a/include/mysql/plugin_password_validation.h.pp b/include/mysql/plugin_password_validation.h.pp
index 9701ad1b92f..2f9d2299c1f 100644
--- a/include/mysql/plugin_password_validation.h.pp
+++ b/include/mysql/plugin_password_validation.h.pp
@@ -374,6 +374,51 @@ extern struct thd_wait_service_st {
} *thd_wait_service;
void thd_wait_begin(void* thd, int wait_type);
void thd_wait_end(void* thd);
+enum json_types
+{
+ JSV_BAD_JSON=-1,
+ JSV_NOTHING=0,
+ JSV_OBJECT=1,
+ JSV_ARRAY=2,
+ JSV_STRING=3,
+ JSV_NUMBER=4,
+ JSV_TRUE=5,
+ JSV_FALSE=6,
+ JSV_NULL=7
+};
+extern struct json_service_st {
+ enum json_types (*json_type)(const char *js, const char *js_end,
+ const char **value, int *value_len);
+ enum json_types (*json_get_array_item)(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_key)(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_nkey)(const char *js,const char *js_end,
+ int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+ int (*json_escape_string)(const char *str,const char *str_end,
+ char *json, char *json_end);
+ int (*json_unescape_json)(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+} *json_service;
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len);
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end);
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end);
struct st_mysql_xid {
long formatID;
long gtrid_length;
@@ -531,6 +576,6 @@ void thd_wakeup_subsequent_commits(void* thd, int wakeup_error);
struct st_mariadb_password_validation
{
int interface_version;
- int (*validate_password)(MYSQL_CONST_LEX_STRING *username,
- MYSQL_CONST_LEX_STRING *password);
+ int (*validate_password)(const MYSQL_CONST_LEX_STRING *username,
+ const MYSQL_CONST_LEX_STRING *password);
};
diff --git a/include/mysql/service_json.h b/include/mysql/service_json.h
new file mode 100644
index 00000000000..141b76279a5
--- /dev/null
+++ b/include/mysql/service_json.h
@@ -0,0 +1,117 @@
+/* Copyright (C) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+#ifndef MYSQL_SERVICE_JSON
+#define MYSQL_SERVICE_JSON
+
+/**
+ @file
+ json service
+
+ Esports JSON parsing methods for plugins to use.
+
+ Fuctions of the service:
+ js_type - returns the type of the JSON argument,
+ and the parsed value if it's scalar (not object or array)
+
+ js_get_array_item - expecs JSON array as an argument,
+ and returns the n_item's item's type and value
+ Returns JSV_NOTHING type if the array is shorter
+ than n_item and the actual length of the array in v_len.
+
+ js_get_object_key - expects JSON object as an argument,
+ searches for a key in the object, return it's type and value.
+ JSV_NOTHING if no such key found, the number of keys
+ in v_len.
+
+ js_get_object_nkey - expects JSON object as an argument.
+ finds n_key's key in the object, returns it's name, type and value.
+ JSV_NOTHING if object has less keys than n_key.
+*/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum json_types
+{
+ JSV_BAD_JSON=-1,
+ JSV_NOTHING=0,
+ JSV_OBJECT=1,
+ JSV_ARRAY=2,
+ JSV_STRING=3,
+ JSV_NUMBER=4,
+ JSV_TRUE=5,
+ JSV_FALSE=6,
+ JSV_NULL=7
+};
+
+extern struct json_service_st {
+ enum json_types (*json_type)(const char *js, const char *js_end,
+ const char **value, int *value_len);
+ enum json_types (*json_get_array_item)(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_key)(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+ enum json_types (*json_get_object_nkey)(const char *js,const char *js_end,
+ int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+ int (*json_escape_string)(const char *str,const char *str_end,
+ char *json, char *json_end);
+ int (*json_unescape_json)(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+} *json_service;
+
+#ifdef MYSQL_DYNAMIC_PLUGIN
+
+#define json_type json_service->json_type
+#define json_get_array_item json_service->json_get_array_item
+#define json_get_object_key json_service->json_get_object_key
+#define json_get_object_nkey json_service->json_get_object_nkey
+#define json_escape_string json_service->json_escape_string
+#define json_unescape_json json_service->json_unescape_json
+
+#else
+
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len);
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len);
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len);
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len);
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end);
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end);
+
+#endif /*MYSQL_DYNAMIC_PLUGIN*/
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*MYSQL_SERVICE_JSON */
+
+
diff --git a/include/mysql/service_my_print_error.h b/include/mysql/service_my_print_error.h
index 5a71be74fd0..0046ba54f6d 100644
--- a/include/mysql/service_my_print_error.h
+++ b/include/mysql/service_my_print_error.h
@@ -32,10 +32,11 @@ extern "C" {
#include <stdlib.h>
#endif
-#define ME_ERROR_LOG 64 /* Write the message to the error log */
-#define ME_NOTE 1024 /* Not an error, just a note */
-#define ME_WARNING 2048 /* Not an error, just a warning */
-#define ME_FATAL 4096 /* Fatal statement error */
+#define ME_ERROR_LOG 64 /* Write the message to the error log */
+#define ME_ERROR_LOG_ONLY 128 /* Write the error message to error log only */
+#define ME_NOTE 1024 /* Not an error, just a note */
+#define ME_WARNING 2048 /* Not an error, just a warning */
+#define ME_FATAL 4096 /* Fatal statement error */
extern struct my_print_error_service_st {
void (*my_error_func)(unsigned int nr, unsigned long MyFlags, ...);
diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h
index 48f98661619..d76643364b3 100644
--- a/include/mysql/service_wsrep.h
+++ b/include/mysql/service_wsrep.h
@@ -1,5 +1,20 @@
#ifndef MYSQL_SERVICE_WSREP_INCLUDED
+#define MYSQL_SERVICE_WSREP_INCLUDED
+
+enum Wsrep_service_key_type
+{
+ WSREP_SERVICE_KEY_SHARED,
+ WSREP_SERVICE_KEY_REFERENCE,
+ WSREP_SERVICE_KEY_UPDATE,
+ WSREP_SERVICE_KEY_EXCLUSIVE
+};
+
+#if (defined (MYSQL_DYNAMIC_PLUGIN) && defined(MYSQL_SERVICE_WSREP_DYNAMIC_INCLUDED)) || (!defined(MYSQL_DYNAMIC_PLUGIN) && defined(MYSQL_SERVICE_WSREP_STATIC_INCLUDED))
+
+#else
+
/* Copyright (c) 2015 MariaDB Corporation Ab
+ 2018 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -21,162 +36,99 @@
Interface to WSREP functionality in the server.
For engines that want to support galera.
*/
-
+#include <my_pthread.h>
#ifdef __cplusplus
-extern "C" {
#endif
-enum wsrep_conflict_state {
- NO_CONFLICT,
- MUST_ABORT,
- ABORTING,
- ABORTED,
- MUST_REPLAY,
- REPLAYING,
- RETRY_AUTOCOMMIT,
- CERT_FAILURE,
-};
-
-enum wsrep_exec_mode {
- /* Transaction processing before replication. */
- LOCAL_STATE,
- /* Slave thread applying write sets from other nodes or replaying thread. */
- REPL_RECV,
- /* Total-order-isolation mode. */
- TOTAL_ORDER,
- /*
- Transaction procession after it has been replicated in prepare stage and
- has passed certification.
- */
- LOCAL_COMMIT
-};
-
-enum wsrep_query_state {
- QUERY_IDLE,
- QUERY_EXEC,
- QUERY_COMMITTING,
- QUERY_EXITING,
- QUERY_ROLLINGBACK,
-};
-
-enum wsrep_trx_status {
- WSREP_TRX_OK,
- WSREP_TRX_CERT_FAIL, /* certification failure, must abort */
- WSREP_TRX_SIZE_EXCEEDED, /* trx size exceeded */
- WSREP_TRX_ERROR, /* native mysql error */
-};
-
struct xid_t;
-struct wsrep;
struct wsrep_ws_handle;
struct wsrep_buf;
+/* Must match to definition in sql/mysqld.h */
+typedef int64 query_id_t;
+
+
extern struct wsrep_service_st {
- struct wsrep * (*get_wsrep_func)();
- my_bool (*get_wsrep_certify_nonPK_func)();
- my_bool (*get_wsrep_debug_func)();
- my_bool (*get_wsrep_drupal_282555_workaround_func)();
my_bool (*get_wsrep_recovery_func)();
- my_bool (*get_wsrep_load_data_splitting_func)();
- my_bool (*get_wsrep_log_conflicts_func)();
- long (*get_wsrep_protocol_version_func)();
- my_bool (*wsrep_aborting_thd_contains_func)(THD *thd);
- void (*wsrep_aborting_thd_enqueue_func)(THD *thd);
- bool (*wsrep_consistency_check_func)(THD *thd);
- int (*wsrep_is_wsrep_xid_func)(const struct xid_t *xid);
+ bool (*wsrep_consistency_check_func)(MYSQL_THD thd);
+ int (*wsrep_is_wsrep_xid_func)(const void *xid);
long long (*wsrep_xid_seqno_func)(const struct xid_t *xid);
const unsigned char* (*wsrep_xid_uuid_func)(const struct xid_t *xid);
- void (*wsrep_lock_rollback_func)();
- int (*wsrep_on_func)(MYSQL_THD);
- void (*wsrep_post_commit_func)(THD* thd, bool all);
- bool (*wsrep_prepare_key_func)(const unsigned char*, size_t, const unsigned char*, size_t, struct wsrep_buf*, size_t*);
- enum wsrep_trx_status (*wsrep_run_wsrep_commit_func)(THD *thd, bool all);
- void (*wsrep_thd_LOCK_func)(THD *thd);
- void (*wsrep_thd_UNLOCK_func)(THD *thd);
- void (*wsrep_thd_awake_func)(THD *thd, my_bool signal);
- enum wsrep_conflict_state (*wsrep_thd_conflict_state_func)(MYSQL_THD, my_bool);
- const char * (*wsrep_thd_conflict_state_str_func)(THD *thd);
- enum wsrep_exec_mode (*wsrep_thd_exec_mode_func)(THD *thd);
- const char * (*wsrep_thd_exec_mode_str_func)(THD *thd);
- enum wsrep_conflict_state (*wsrep_thd_get_conflict_state_func)(MYSQL_THD);
- my_bool (*wsrep_thd_is_BF_func)(MYSQL_THD , my_bool);
- my_bool (*wsrep_thd_is_wsrep_func)(MYSQL_THD thd);
- char * (*wsrep_thd_query_func)(THD *thd);
- enum wsrep_query_state (*wsrep_thd_query_state_func)(THD *thd);
- const char * (*wsrep_thd_query_state_str_func)(THD *thd);
- int (*wsrep_thd_retry_counter_func)(THD *thd);
- void (*wsrep_thd_set_conflict_state_func)(THD *thd, enum wsrep_conflict_state state);
- bool (*wsrep_thd_ignore_table_func)(THD *thd);
- long long (*wsrep_thd_trx_seqno_func)(THD *thd);
- struct wsrep_ws_handle * (*wsrep_thd_ws_handle_func)(THD *thd);
+ my_bool (*wsrep_on_func)(const MYSQL_THD thd);
+ bool (*wsrep_prepare_key_for_innodb_func)(MYSQL_THD thd, const unsigned char*, size_t, const unsigned char*, size_t, struct wsrep_buf*, size_t*);
+ void (*wsrep_thd_LOCK_func)(const MYSQL_THD thd);
+ void (*wsrep_thd_UNLOCK_func)(const MYSQL_THD thd);
+ const char * (*wsrep_thd_query_func)(const MYSQL_THD thd);
+ int (*wsrep_thd_retry_counter_func)(const MYSQL_THD thd);
+ bool (*wsrep_thd_ignore_table_func)(MYSQL_THD thd);
+ long long (*wsrep_thd_trx_seqno_func)(const MYSQL_THD thd);
void (*wsrep_thd_auto_increment_variables_func)(THD *thd, unsigned long long *offset, unsigned long long *increment);
- void (*wsrep_set_load_multi_commit_func)(THD *thd, bool split);
- bool (*wsrep_is_load_multi_commit_func)(THD *thd);
- int (*wsrep_trx_is_aborting_func)(MYSQL_THD thd);
- int (*wsrep_trx_order_before_func)(MYSQL_THD, MYSQL_THD);
- void (*wsrep_unlock_rollback_func)();
+ my_bool (*wsrep_thd_is_aborting_func)(const MYSQL_THD thd);
void (*wsrep_set_data_home_dir_func)(const char *data_dir);
- my_bool (*wsrep_thd_is_applier_func)(MYSQL_THD);
+ my_bool (*wsrep_thd_is_BF_func)(const MYSQL_THD thd, my_bool sync);
+ my_bool (*wsrep_thd_is_local_func)(const MYSQL_THD thd);
+ void (*wsrep_thd_self_abort_func)(MYSQL_THD thd);
+ int (*wsrep_thd_append_key_func)(MYSQL_THD thd, const struct wsrep_key* key,
+ int n_keys, enum Wsrep_service_key_type);
+ const char* (*wsrep_thd_client_state_str_func)(const MYSQL_THD thd);
+ const char* (*wsrep_thd_client_mode_str_func)(const MYSQL_THD thd);
+ const char* (*wsrep_thd_transaction_state_str_func)(const MYSQL_THD thd);
+ query_id_t (*wsrep_thd_transaction_id_func)(const MYSQL_THD thd);
+ my_bool (*wsrep_thd_bf_abort_func)(const MYSQL_THD bf_thd,
+ MYSQL_THD victim_thd,
+ my_bool signal);
+ my_bool (*wsrep_thd_order_before_func)(const MYSQL_THD left, const MYSQL_THD right);
+ void (*wsrep_handle_SR_rollback_func)(MYSQL_THD BF_thd, MYSQL_THD victim_thd);
+ my_bool (*wsrep_thd_skip_locking_func)(const MYSQL_THD thd);
+ const char* (*wsrep_get_sr_table_name_func)();
+ my_bool (*wsrep_get_debug_func)();
+ void (*wsrep_commit_ordered_func)(MYSQL_THD thd);
+ my_bool (*wsrep_thd_is_applying_func)(const MYSQL_THD thd);
} *wsrep_service;
+#define MYSQL_SERVICE_WSREP_INCLUDED
+#endif
+
#ifdef MYSQL_DYNAMIC_PLUGIN
-#define get_wsrep() wsrep_service->get_wsrep_func()
-#define get_wsrep_certify_nonPK() wsrep_service->get_wsrep_certify_nonPK_func()
-#define get_wsrep_debug() wsrep_service->get_wsrep_debug_func()
-#define get_wsrep_drupal_282555_workaround() wsrep_service->get_wsrep_drupal_282555_workaround_func()
+
+#define MYSQL_SERVICE_WSREP_DYNAMIC_INCLUDED
#define get_wsrep_recovery() wsrep_service->get_wsrep_recovery_func()
-#define get_wsrep_load_data_splitting() wsrep_service->get_wsrep_load_data_splitting_func()
-#define get_wsrep_log_conflicts() wsrep_service->get_wsrep_log_conflicts_func()
-#define get_wsrep_protocol_version() wsrep_service->get_wsrep_protocol_version_func()
-#define wsrep_aborting_thd_contains(T) wsrep_service->wsrep_aborting_thd_contains_func(T)
-#define wsrep_aborting_thd_enqueue(T) wsrep_service->wsrep_aborting_thd_enqueue_func(T)
#define wsrep_consistency_check(T) wsrep_service->wsrep_consistency_check_func(T)
#define wsrep_is_wsrep_xid(X) wsrep_service->wsrep_is_wsrep_xid_func(X)
#define wsrep_xid_seqno(X) wsrep_service->wsrep_xid_seqno_func(X)
#define wsrep_xid_uuid(X) wsrep_service->wsrep_xid_uuid_func(X)
-#define wsrep_lock_rollback() wsrep_service->wsrep_lock_rollback_func()
#define wsrep_on(X) wsrep_service->wsrep_on_func(X)
-#define wsrep_post_commit(T,A) wsrep_service->wsrep_post_commit_func(T,A)
-#define wsrep_prepare_key(A,B,C,D,E,F) wsrep_service->wsrep_prepare_key_func(A,B,C,D,E,F)
-#define wsrep_run_wsrep_commit(T,A) wsrep_service->wsrep_run_wsrep_commit_func(T,A)
+#define wsrep_prepare_key_for_innodb(A,B,C,D,E,F,G) wsrep_service->wsrep_prepare_key_for_innodb_func(A,B,C,D,E,F,G)
#define wsrep_thd_LOCK(T) wsrep_service->wsrep_thd_LOCK_func(T)
#define wsrep_thd_UNLOCK(T) wsrep_service->wsrep_thd_UNLOCK_func(T)
-#define wsrep_thd_awake(T,S) wsrep_service->wsrep_thd_awake_func(T,S)
-#define wsrep_thd_conflict_state(T,S) wsrep_service->wsrep_thd_conflict_state_func(T,S)
-#define wsrep_thd_conflict_state_str(T) wsrep_service->wsrep_thd_conflict_state_str_func(T)
-#define wsrep_thd_exec_mode(T) wsrep_service->wsrep_thd_exec_mode_func(T)
-#define wsrep_thd_exec_mode_str(T) wsrep_service->wsrep_thd_exec_mode_str_func(T)
-#define wsrep_thd_get_conflict_state(T) wsrep_service->wsrep_thd_get_conflict_state_func(T)
-#define wsrep_thd_is_BF(T,S) wsrep_service->wsrep_thd_is_BF_func(T,S)
-#define wsrep_thd_is_wsrep(T) wsrep_service->wsrep_thd_is_wsrep_func(T)
#define wsrep_thd_query(T) wsrep_service->wsrep_thd_query_func(T)
-#define wsrep_thd_query_state(T) wsrep_service->wsrep_thd_query_state_func(T)
-#define wsrep_thd_query_state_str(T) wsrep_service->wsrep_thd_query_state_str_func(T)
#define wsrep_thd_retry_counter(T) wsrep_service->wsrep_thd_retry_counter_func(T)
-#define wsrep_thd_set_conflict_state(T,S) wsrep_service->wsrep_thd_set_conflict_state_func(T,S)
#define wsrep_thd_ignore_table(T) wsrep_service->wsrep_thd_ignore_table_func(T)
#define wsrep_thd_trx_seqno(T) wsrep_service->wsrep_thd_trx_seqno_func(T)
-#define wsrep_thd_ws_handle(T) wsrep_service->wsrep_thd_ws_handle_func(T)
#define wsrep_thd_auto_increment_variables(T,O,I) wsrep_service->wsrep_thd_auto_increment_variables_func(T,O,I)
-#define wsrep_set_load_multi_commit(T,S) wsrep_service->wsrep_set_load_multi_commit_func(T,S)
-#define wsrep_is_load_multi_commit(T) wsrep_service->wsrep_is_load_multi_commit_func(T)
-#define wsrep_trx_is_aborting(T) wsrep_service->wsrep_trx_is_aborting_func(T)
-#define wsrep_trx_order_before(T1,T2) wsrep_service->wsrep_trx_order_before_func(T1,T2)
-#define wsrep_unlock_rollback() wsrep_service->wsrep_unlock_rollback_func()
#define wsrep_set_data_home_dir(A) wsrep_service->wsrep_set_data_home_dir_func(A)
-#define wsrep_thd_is_applier(T) wsrep_service->wsrep_thd_is_applier_func(T)
-
-#define wsrep_debug get_wsrep_debug()
-#define wsrep_log_conflicts get_wsrep_log_conflicts()
-#define wsrep_certify_nonPK get_wsrep_certify_nonPK()
-#define wsrep_load_data_splitting get_wsrep_load_data_splitting()
-#define wsrep_drupal_282555_workaround get_wsrep_drupal_282555_workaround()
-#define wsrep_recovery get_wsrep_recovery()
-#define wsrep_protocol_version get_wsrep_protocol_version()
+#define wsrep_thd_is_BF(T,S) wsrep_service->wsrep_thd_is_BF_func(T,S)
+#define wsrep_thd_is_aborting(T) wsrep_service->wsrep_thd_is_aborting_func(T)
+#define wsrep_thd_is_local(T) wsrep_service->wsrep_thd_is_local_func(T)
+#define wsrep_thd_self_abort(T) wsrep_service->wsrep_thd_self_abort_func(T)
+#define wsrep_thd_append_key(T,W,N,K) wsrep_service->wsrep_thd_append_key_func(T,W,N,K)
+#define wsrep_thd_client_state_str(T) wsrep_service->wsrep_thd_client_state_str_func(T)
+#define wsrep_thd_client_mode_str(T) wsrep_service->wsrep_thd_client_mode_str_func(T)
+#define wsrep_thd_transaction_state_str(T) wsrep_service->wsrep_thd_transaction_state_str_func(T)
+#define wsrep_thd_transaction_id(T) wsrep_service->wsrep_thd_transaction_id_func(T)
+#define wsrep_thd_bf_abort(T,T2,S) wsrep_service->wsrep_thd_bf_abort_func(T,T2,S)
+#define wsrep_thd_order_before(L,R) wsrep_service->wsrep_thd_order_before_func(L,R)
+#define wsrep_handle_SR_rollback(B,V) wsrep_service->wsrep_handle_SR_rollback_func(B,V)
+#define wsrep_thd_skip_locking(T) wsrep_service->wsrep_thd_skip_locking_func(T)
+#define wsrep_get_sr_table_name() wsrep_service->wsrep_get_sr_table_name_func()
+#define wsrep_get_debug() wsrep_service->wsrep_get_debug_func()
+#define wsrep_commit_ordered(T) wsrep_service->wsrep_commit_ordered_func(T)
+#define wsrep_thd_is_applying(T) wsrep_service->wsrep_thd_is_applying_func(T)
#else
-extern my_bool wsrep_debug;
+#define MYSQL_SERVICE_WSREP_STATIC_INCLUDED
+extern ulong wsrep_debug;
extern my_bool wsrep_log_conflicts;
extern my_bool wsrep_certify_nonPK;
extern my_bool wsrep_load_data_splitting;
@@ -184,57 +136,86 @@ extern my_bool wsrep_drupal_282555_workaround;
extern my_bool wsrep_recovery;
extern long wsrep_protocol_version;
-bool wsrep_consistency_check(THD *thd);
-bool wsrep_prepare_key(const unsigned char* cache_key, size_t cache_key_len, const unsigned char* row_id, size_t row_id_len, struct wsrep_buf* key, size_t* key_len);
-char *wsrep_thd_query(THD *thd);
-const char *wsrep_thd_conflict_state_str(THD *thd);
-const char *wsrep_thd_exec_mode_str(THD *thd);
-const char *wsrep_thd_query_state_str(THD *thd);
-enum wsrep_conflict_state wsrep_thd_conflict_state(MYSQL_THD thd, my_bool sync);
-enum wsrep_conflict_state wsrep_thd_get_conflict_state(MYSQL_THD thd);
-enum wsrep_exec_mode wsrep_thd_exec_mode(THD *thd);
-enum wsrep_query_state wsrep_thd_query_state(THD *thd);
-enum wsrep_trx_status wsrep_run_wsrep_commit(THD *thd, bool all);
-int wsrep_is_wsrep_xid(const struct xid_t* xid);
-long long wsrep_xid_seqno(const struct xid_t* xid);
+extern "C" bool wsrep_consistency_check(MYSQL_THD thd);
+bool wsrep_prepare_key_for_innodb(MYSQL_THD thd, const unsigned char* cache_key, size_t cache_key_len, const unsigned char* row_id, size_t row_id_len, struct wsrep_buf* key, size_t* key_len);
+extern "C" const char *wsrep_thd_query(const MYSQL_THD thd);
+extern "C" int wsrep_is_wsrep_xid(const void* xid);
+extern "C" long long wsrep_xid_seqno(const struct xid_t* xid);
const unsigned char* wsrep_xid_uuid(const struct xid_t* xid);
-int wsrep_on(MYSQL_THD thd);
-int wsrep_thd_retry_counter(THD *thd);
-int wsrep_trx_is_aborting(MYSQL_THD thd);
-int wsrep_trx_order_before(MYSQL_THD thd1, MYSQL_THD thd2);
-long get_wsrep_protocol_version();
-long long wsrep_thd_trx_seqno(THD *thd);
-my_bool get_wsrep_certify_nonPK();
-my_bool get_wsrep_debug();
-my_bool get_wsrep_drupal_282555_workaround();
+extern "C" long long wsrep_thd_trx_seqno(const MYSQL_THD thd);
my_bool get_wsrep_recovery();
-my_bool get_wsrep_load_data_splitting();
-my_bool get_wsrep_log_conflicts();
-my_bool wsrep_aborting_thd_contains(THD *thd);
-my_bool wsrep_thd_is_BF(MYSQL_THD thd, my_bool sync);
-my_bool wsrep_thd_is_wsrep(MYSQL_THD thd);
-struct wsrep *get_wsrep();
-struct wsrep_ws_handle *wsrep_thd_ws_handle(THD *thd);
void wsrep_thd_auto_increment_variables(THD *thd, unsigned long long *offset, unsigned long long *increment);
-void wsrep_set_load_multi_commit(THD *thd, bool split);
-bool wsrep_is_load_multi_commit(THD *thd);
-void wsrep_aborting_thd_enqueue(THD *thd);
-void wsrep_lock_rollback();
-void wsrep_post_commit(THD* thd, bool all);
-void wsrep_thd_LOCK(THD *thd);
-void wsrep_thd_UNLOCK(THD *thd);
-void wsrep_thd_awake(THD *thd, my_bool signal);
-void wsrep_thd_set_conflict_state(THD *thd, enum wsrep_conflict_state state);
-bool wsrep_thd_ignore_table(THD *thd);
-void wsrep_unlock_rollback();
+bool wsrep_thd_ignore_table(MYSQL_THD thd);
void wsrep_set_data_home_dir(const char *data_dir);
-my_bool wsrep_thd_is_applier(MYSQL_THD thd);
-#endif
-#ifdef __cplusplus
-}
-#endif
+/* from mysql wsrep-lib */
+#include "my_global.h"
+#include "my_pthread.h"
+
+/* Return true if wsrep is enabled for a thd. This means that
+ wsrep is enabled globally and the thd has wsrep on */
+extern "C" my_bool wsrep_on(const MYSQL_THD thd);
+/* Lock thd wsrep lock */
+extern "C" void wsrep_thd_LOCK(const MYSQL_THD thd);
+/* Unlock thd wsrep lock */
+extern "C" void wsrep_thd_UNLOCK(const MYSQL_THD thd);
+
+/* Return thd client state string */
+extern "C" const char* wsrep_thd_client_state_str(const MYSQL_THD thd);
+/* Return thd client mode string */
+extern "C" const char* wsrep_thd_client_mode_str(const MYSQL_THD thd);
+/* Return thd transaction state string */
+extern "C" const char* wsrep_thd_transaction_state_str(const MYSQL_THD thd);
+
+/* Return current transaction id */
+extern "C" query_id_t wsrep_thd_transaction_id(const MYSQL_THD thd);
+/* Mark thd own transaction as aborted */
+extern "C" void wsrep_thd_self_abort(MYSQL_THD thd);
+/* Return true if thd is in replicating mode */
+extern "C" my_bool wsrep_thd_is_local(const MYSQL_THD thd);
+/* Return true if thd is in high priority mode */
+/* todo: rename to is_high_priority() */
+extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd);
+/* Return true if thd is in TOI mode */
+extern "C" my_bool wsrep_thd_is_toi(const MYSQL_THD thd);
+/* Return true if thd is in replicating TOI mode */
+extern "C" my_bool wsrep_thd_is_local_toi(const MYSQL_THD thd);
+/* Return true if thd is in RSU mode */
+extern "C" my_bool wsrep_thd_is_in_rsu(const MYSQL_THD thd);
+/* Return true if thd is in BF mode, either high_priority or TOI */
+extern "C" my_bool wsrep_thd_is_BF(const MYSQL_THD thd, my_bool sync);
+/* Return true if thd is streaming */
+extern "C" my_bool wsrep_thd_is_SR(const MYSQL_THD thd);
+extern "C" void wsrep_handle_SR_rollback(MYSQL_THD BF_thd, MYSQL_THD victim_thd);
+/* Return thd retry counter */
+extern "C" int wsrep_thd_retry_counter(const MYSQL_THD thd);
+/* BF abort victim_thd */
+extern "C" my_bool wsrep_thd_bf_abort(const MYSQL_THD bf_thd,
+ MYSQL_THD victim_thd,
+ my_bool signal);
+/* Return true if left thd is ordered before right thd */
+extern "C" my_bool wsrep_thd_order_before(const MYSQL_THD left, const MYSQL_THD right);
+/* Return true if thd should skip locking. This means that the thd
+ is operating on shared resource inside commit order critical section. */
+extern "C" my_bool wsrep_thd_skip_locking(const MYSQL_THD thd);
+/* Return true if thd is aborting */
+extern "C" my_bool wsrep_thd_is_aborting(const MYSQL_THD thd);
+
+struct wsrep_key;
+struct wsrep_key_array;
+extern "C" int wsrep_thd_append_key(MYSQL_THD thd,
+ const struct wsrep_key* key,
+ int n_keys,
+ enum Wsrep_service_key_type);
+
+extern const char* wsrep_sr_table_name_full;
+
+extern "C" const char* wsrep_get_sr_table_name();
+
+extern "C" my_bool wsrep_get_debug();
+
+extern "C" void wsrep_commit_ordered(MYSQL_THD thd);
+extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd);
-#define MYSQL_SERVICE_WSREP_INCLUDED
#endif
-
+#endif /* MYSQL_SERVICE_WSREP_INCLUDED */
diff --git a/include/mysql/services.h b/include/mysql/services.h
index 6168c5ed8dc..6dc970df1e6 100644
--- a/include/mysql/services.h
+++ b/include/mysql/services.h
@@ -39,6 +39,7 @@ extern "C" {
#include <mysql/service_thd_specifics.h>
#include <mysql/service_thd_timezone.h>
#include <mysql/service_thd_wait.h>
+#include <mysql/service_json.h>
/*#include <mysql/service_wsrep.h>*/
#ifdef __cplusplus
diff --git a/include/mysql_com.h b/include/mysql_com.h
index 902c0ff2706..3b1dee91a6e 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -203,6 +203,8 @@ enum enum_indicator_type
#define VERS_UPDATE_UNVERSIONED_FLAG (1 << 29) /* column that doesn't support
system versioning when table
itself supports it*/
+#define LONG_UNIQUE_HASH_FIELD (1<< 30) /* This field will store hash for unique
+ column */
#define REFRESH_GRANT (1ULL << 0) /* Refresh grant tables */
#define REFRESH_LOG (1ULL << 1) /* Start on new log file */
@@ -231,6 +233,7 @@ enum enum_indicator_type
#define REFRESH_DES_KEY_FILE (1ULL << 18)
#define REFRESH_USER_RESOURCES (1ULL << 19)
#define REFRESH_FOR_EXPORT (1ULL << 20) /* FLUSH TABLES ... FOR EXPORT */
+#define REFRESH_SSL (1ULL << 21)
#define REFRESH_GENERIC (1ULL << 30)
#define REFRESH_FAST (1ULL << 31) /* Intern flag */
@@ -332,12 +335,8 @@ enum enum_indicator_type
CLIENT_DEPRECATE_EOF |\
CLIENT_CONNECT_ATTRS |\
MARIADB_CLIENT_COM_MULTI |\
- MARIADB_CLIENT_STMT_BULK_OPERATIONS)
-
-/*
- To be added later:
- CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS
-*/
+ MARIADB_CLIENT_STMT_BULK_OPERATIONS |\
+ CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS)
/*
Switch off the flags that are optional and depending on build flags
diff --git a/include/mysql_embed.h b/include/mysql_embed.h
index 12b18ff965e..3047593fb6c 100644
--- a/include/mysql_embed.h
+++ b/include/mysql_embed.h
@@ -25,7 +25,6 @@
/* TODO HF add #undef HAVE_VIO if we don't want client in embedded library */
#undef HAVE_DLOPEN /* No udf functions */
-#undef HAVE_SMEM /* No shared memory */
#endif /* EMBEDDED_LIBRARY */
#endif /* MYSQL_EMBED_INCLUDED */
diff --git a/include/service_versions.h b/include/service_versions.h
index 753d6444475..050012d30b9 100644
--- a/include/service_versions.h
+++ b/include/service_versions.h
@@ -42,3 +42,4 @@
#define VERSION_thd_timezone 0x0100
#define VERSION_thd_wait 0x0100
#define VERSION_wsrep 0x0202
+#define VERSION_json 0x0100
diff --git a/include/thr_lock.h b/include/thr_lock.h
index e6451bf21c4..ee15fd7315d 100644
--- a/include/thr_lock.h
+++ b/include/thr_lock.h
@@ -168,9 +168,9 @@ void thr_set_lock_wait_callback(void (*before_wait)(void),
void (*after_wait)(void));
#ifdef WITH_WSREP
- typedef my_bool (* wsrep_thd_is_brute_force_fun)(void *, my_bool);
- typedef int (* wsrep_abort_thd_fun)(void *, void *, my_bool);
- typedef int (* wsrep_on_fun)(void *);
+ typedef my_bool (* wsrep_thd_is_brute_force_fun)(const MYSQL_THD, my_bool);
+ typedef my_bool(* wsrep_abort_thd_fun)(const MYSQL_THD, MYSQL_THD, my_bool);
+ typedef my_bool (* wsrep_on_fun)(const MYSQL_THD);
void wsrep_thr_lock_init(
wsrep_thd_is_brute_force_fun bf_fun, wsrep_abort_thd_fun abort_fun,
my_bool debug, my_bool convert_LOCK_to_trx, wsrep_on_fun on_fun);
diff --git a/include/thread_pool_priv.h b/include/thread_pool_priv.h
index ef3cd6f269c..7eb2800555a 100644
--- a/include/thread_pool_priv.h
+++ b/include/thread_pool_priv.h
@@ -61,9 +61,6 @@ void thd_set_mysys_var(THD *thd, st_my_thread_var *mysys_var);
my_socket thd_get_fd(THD *thd);
int thd_store_globals(THD* thd);
-THD *first_global_thread();
-THD *next_global_thread(THD *thd);
-
/* Print to the MySQL error log */
void sql_print_error(const char *format, ...);
diff --git a/include/violite.h b/include/violite.h
index 55f8328df47..74b53f41fdd 100644
--- a/include/violite.h
+++ b/include/violite.h
@@ -38,7 +38,7 @@ typedef struct st_vio Vio;
enum enum_vio_type
{
VIO_CLOSED, VIO_TYPE_TCPIP, VIO_TYPE_SOCKET, VIO_TYPE_NAMEDPIPE,
- VIO_TYPE_SSL, VIO_TYPE_SHARED_MEMORY
+ VIO_TYPE_SSL
};
/**
@@ -68,13 +68,6 @@ Vio* vio_new(my_socket sd, enum enum_vio_type type, uint flags);
Vio* mysql_socket_vio_new(MYSQL_SOCKET mysql_socket, enum enum_vio_type type, uint flags);
#ifdef __WIN__
Vio* vio_new_win32pipe(HANDLE hPipe);
-Vio* vio_new_win32shared_memory(HANDLE handle_file_map,
- HANDLE handle_map,
- HANDLE event_server_wrote,
- HANDLE event_server_read,
- HANDLE event_client_wrote,
- HANDLE event_client_read,
- HANDLE event_conn_closed);
#else
#define HANDLE void *
#endif /* __WIN__ */
@@ -89,6 +82,7 @@ size_t vio_write(Vio *vio, const uchar * buf, size_t size);
int vio_blocking(Vio *vio, my_bool onoff, my_bool *old_mode);
my_bool vio_is_blocking(Vio *vio);
/* setsockopt TCP_NODELAY at IPPROTO_TCP level, when possible */
+int vio_nodelay(Vio *vio, my_bool on);
int vio_fastsend(Vio *vio);
/* setsockopt SO_KEEPALIVE at SOL_SOCKET level, when possible */
int vio_keepalive(Vio *vio, my_bool onoff);
@@ -264,22 +258,9 @@ struct st_vio
#ifdef HAVE_OPENSSL
void *ssl_arg;
#endif
-#ifdef HAVE_SMEM
- HANDLE handle_file_map;
- char *handle_map;
- HANDLE event_server_wrote;
- HANDLE event_server_read;
- HANDLE event_client_wrote;
- HANDLE event_client_read;
- HANDLE event_conn_closed;
- size_t shared_memory_remain;
- char *shared_memory_pos;
-#endif /* HAVE_SMEM */
#ifdef _WIN32
HANDLE hPipe;
OVERLAPPED overlapped;
- DWORD read_timeout_ms;
- DWORD write_timeout_ms;
#endif
};
#endif /* vio_violite_h_ */
diff --git a/include/wsrep.h b/include/wsrep.h
index dc85670d60e..df8a88e1c69 100644
--- a/include/wsrep.h
+++ b/include/wsrep.h
@@ -13,11 +13,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
-#include <my_config.h>
-
#ifndef WSREP_INCLUDED
#define WSREP_INCLUDED
+#include <my_config.h>
+
#ifdef WITH_WSREP
#define IF_WSREP(A,B) A
#define DBUG_ASSERT_IF_WSREP(A) DBUG_ASSERT(A)
@@ -28,12 +28,14 @@
goto wsrep_error_label;
#define WSREP_TO_ISOLATION_BEGIN_ALTER(db_, table_, table_list_, alter_info_) \
- if (WSREP_ON && WSREP(thd) && wsrep_to_isolation_begin(thd, db_, table_, \
- table_list_, alter_info_)) \
+ if (WSREP(thd) && wsrep_thd_is_local(thd) && \
+ wsrep_to_isolation_begin(thd, db_, table_, \
+ table_list_, alter_info_)) \
goto wsrep_error_label;
-#define WSREP_TO_ISOLATION_END \
- if (WSREP_ON && (WSREP(thd) || (thd && thd->wsrep_exec_mode==TOTAL_ORDER))) \
+#define WSREP_TO_ISOLATION_END \
+ if ((WSREP(thd) && wsrep_thd_is_local_toi(thd)) || \
+ wsrep_thd_is_in_rsu(thd)) \
wsrep_to_isolation_end(thd);
/*
@@ -50,24 +52,26 @@
#define WSREP_WARN(...) WSREP_LOG(sql_print_warning, ##__VA_ARGS__)
#define WSREP_ERROR(...) WSREP_LOG(sql_print_error, ##__VA_ARGS__)
-#define WSREP_SYNC_WAIT(thd_, before_) \
- do { if (WSREP_CLIENT(thd_) && \
- wsrep_sync_wait(thd_, before_)) goto wsrep_error_label; } while(0)
-#define WSREP_ERROR_LABEL wsrep_error_label
-#else
+#define WSREP_SYNC_WAIT(thd_, before_) \
+ { if (WSREP_CLIENT(thd_) && \
+ wsrep_sync_wait(thd_, before_)) goto wsrep_error_label; }
+
+#else /* !WITH_WSREP */
+
+/* These macros are needed to compile MariaDB without WSREP support
+ * (e.g. embedded) */
+
#define IF_WSREP(A,B) B
-#define DBUG_ASSERT_IF_WSREP(A)
+//#define DBUG_ASSERT_IF_WSREP(A)
#define WSREP_DEBUG(...)
-#define WSREP_INFO(...)
-#define WSREP_WARN(...)
+//#define WSREP_INFO(...)
+//#define WSREP_WARN(...)
#define WSREP_ERROR(...)
-#define WSREP_TO_ISOLATION_BEGIN(db_, table_, table_list_)
+#define WSREP_TO_ISOLATION_BEGIN(db_, table_, table_list_) do { } while(0)
#define WSREP_TO_ISOLATION_BEGIN_ALTER(db_, table_, table_list_, alter_info_)
#define WSREP_TO_ISOLATION_END
#define WSREP_TO_ISOLATION_BEGIN_WRTCHK(db_, table_, table_list_)
-#define WSREP_SYNC_WAIT(thd_, before_) do { } while(0)
-#define WSREP_ERROR_LABEL goto wsrep_error_label; wsrep_error_label
-
+#define WSREP_SYNC_WAIT(thd_, before_)
#endif /* WITH_WSREP */
#endif /* WSREP_INCLUDED */
diff --git a/libmariadb b/libmariadb
-Subproject b50871611764d282874ad095d6c021163d1fe35
+Subproject 1dd39fb9f7418f533da05ca1156aa8f60937b7e
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index 98d5860d45e..baa5577bc39 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -78,7 +78,8 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/debug_sync.cc ../sql/opt_table_elimination.cc
../sql/sql_prepare.cc ../sql/sql_rename.cc ../sql/sql_repl.cc
../sql/sql_select.cc ../sql/sql_servers.cc
- ../sql/group_by_handler.cc
+ ../sql/group_by_handler.cc ../sql/derived_handler.cc
+ ../sql/select_handler.cc
../sql/sql_show.cc ../sql/sql_state.c
../sql/sql_statistics.cc ../sql/sql_string.cc
../sql/sql_tablespace.cc ../sql/sql_table.cc ../sql/sql_test.cc
@@ -109,6 +110,7 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/sql_analyze_stmt.cc ../sql/sql_analyze_stmt.h
../sql/compat56.cc
../sql/sql_type.cc ../sql/sql_type.h
+ ../sql/sql_type_json.cc
../sql/table_cache.cc ../sql/mf_iocache_encr.cc
../sql/item_inetfunc.cc
../sql/wsrep_dummy.cc ../sql/encryption.cc
@@ -117,10 +119,13 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/sql_sequence.cc ../sql/sql_sequence.h
../sql/ha_sequence.cc ../sql/ha_sequence.h
../sql/temporary_tables.cc
- ../sql/proxy_protocol.cc
+ ../sql/proxy_protocol.cc ../sql/backup.cc
../sql/sql_tvc.cc ../sql/sql_tvc.h
../sql/opt_split.cc
+ ../sql/rowid_filter.cc ../sql/rowid_filter.h
../sql/item_vers.cc
+ ../sql/opt_trace.cc
+ ../sql/xa.cc
${GEN_SOURCES}
${MYSYS_LIBWRAP_SOURCE}
)
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 715f1dde5b5..8326b5da2df 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -432,11 +432,9 @@ int emb_unbuffered_fetch(MYSQL *mysql, char **row)
static void emb_free_embedded_thd(MYSQL *mysql)
{
THD *thd= (THD*)mysql->thd;
- mysql_mutex_lock(&LOCK_thread_count);
+ server_threads.erase(thd);
thd->clear_data_list();
thd->store_globals();
- thd->unlink();
- mysql_mutex_unlock(&LOCK_thread_count);
delete thd;
my_pthread_setspecific_ptr(THR_THD, 0);
mysql->thd=0;
@@ -711,10 +709,7 @@ void *create_embedded_thd(int client_flag)
thd->first_data= 0;
thd->data_tail= &thd->first_data;
bzero((char*) &thd->net, sizeof(thd->net));
-
- mysql_mutex_lock(&LOCK_thread_count);
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ server_threads.insert(thd);
thd->mysys_var= 0;
thd->reset_globals();
return thd;
@@ -896,6 +891,20 @@ static char *dup_str_aux(MEM_ROOT *root, const char *from, uint length,
}
+static char *dup_str_aux(MEM_ROOT *root, const char *from,
+ CHARSET_INFO *fromcs, CHARSET_INFO *tocs)
+{
+ return dup_str_aux(root, from, (uint) strlen(from), fromcs, tocs);
+}
+
+
+static char *dup_str_aux(MEM_ROOT *root, const LEX_CSTRING &from,
+ CHARSET_INFO *fromcs, CHARSET_INFO *tocs)
+{
+ return dup_str_aux(root, from.str, (uint) from.length, fromcs, tocs);
+}
+
+
/*
creates new result and hooks it to the list
@@ -974,7 +983,7 @@ write_eof_packet(THD *thd, uint server_status, uint statement_warn_count)
1 if memory allocation failed
*/
-int Protocol::begin_dataset()
+bool Protocol::begin_dataset()
{
MYSQL_DATA *data= thd->alloc_new_dataset();
if (!data)
@@ -987,6 +996,19 @@ int Protocol::begin_dataset()
}
+bool Protocol::begin_dataset(THD *thd, uint numfields)
+{
+ if (begin_dataset())
+ return true;
+ MYSQL_DATA *data= thd->cur_data;
+ data->fields= field_count= numfields;
+ if (!(data->embedded_info->fields_list=
+ (MYSQL_FIELD*)alloc_root(&data->alloc, sizeof(MYSQL_FIELD)*field_count)))
+ return true;
+ return false;
+}
+
+
/*
remove last row of current recordset
@@ -1016,110 +1038,80 @@ void Protocol_text::remove_last_row()
}
+bool Protocol_text::store_field_metadata(const THD * thd,
+ const Send_field &server_field,
+ CHARSET_INFO *charset_for_protocol,
+ uint pos)
+{
+ CHARSET_INFO *cs= system_charset_info;
+ CHARSET_INFO *thd_cs= thd->variables.character_set_results;
+ MYSQL_DATA *data= thd->cur_data;
+ MEM_ROOT *field_alloc= &data->alloc;
+ MYSQL_FIELD *client_field= &thd->cur_data->embedded_info->fields_list[pos];
+ DBUG_ASSERT(server_field.is_sane());
+
+ client_field->db= dup_str_aux(field_alloc, server_field.db_name,
+ cs, thd_cs);
+ client_field->table= dup_str_aux(field_alloc, server_field.table_name,
+ cs, thd_cs);
+ client_field->name= dup_str_aux(field_alloc, server_field.col_name,
+ cs, thd_cs);
+ client_field->org_table= dup_str_aux(field_alloc, server_field.org_table_name,
+ cs, thd_cs);
+ client_field->org_name= dup_str_aux(field_alloc, server_field.org_col_name,
+ cs, thd_cs);
+ if (charset_for_protocol == &my_charset_bin || thd_cs == NULL)
+ {
+ /* No conversion */
+ client_field->charsetnr= charset_for_protocol->number;
+ client_field->length= server_field.length;
+ }
+ else
+ {
+ /* With conversion */
+ client_field->charsetnr= thd_cs->number;
+ client_field->length= server_field.max_octet_length(charset_for_protocol,
+ thd_cs);
+ }
+ client_field->type= server_field.type_handler()->type_code_for_protocol();
+ client_field->flags= (uint16) server_field.flags;
+ client_field->decimals= server_field.decimals;
+
+ client_field->db_length= strlen(client_field->db);
+ client_field->table_length= strlen(client_field->table);
+ client_field->name_length= strlen(client_field->name);
+ client_field->org_name_length= strlen(client_field->org_name);
+ client_field->org_table_length= strlen(client_field->org_table);
+
+ client_field->catalog= dup_str_aux(field_alloc, "def", 3, cs, thd_cs);
+ client_field->catalog_length= 3;
+
+ if (IS_NUM(client_field->type))
+ client_field->flags|= NUM_FLAG;
+
+ client_field->max_length= 0;
+ client_field->def= 0;
+ return false;
+}
+
+
bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
{
List_iterator_fast<Item> it(*list);
Item *item;
- MYSQL_FIELD *client_field;
- MEM_ROOT *field_alloc;
- CHARSET_INFO *thd_cs= thd->variables.character_set_results;
- CHARSET_INFO *cs= system_charset_info;
- MYSQL_DATA *data;
+ Protocol_text prot(thd);
DBUG_ENTER("send_result_set_metadata");
if (!thd->mysql) // bootstrap file handling
DBUG_RETURN(0);
- if (begin_dataset())
- goto err;
-
- data= thd->cur_data;
- data->fields= field_count= list->elements;
- field_alloc= &data->alloc;
-
- if (!(client_field= data->embedded_info->fields_list=
- (MYSQL_FIELD*)alloc_root(field_alloc, sizeof(MYSQL_FIELD)*field_count)))
+ if (begin_dataset(thd, list->elements))
goto err;
- while ((item= it++))
+ for (uint pos= 0 ; (item= it++); pos++)
{
- Send_field server_field;
- item->make_send_field(thd, &server_field);
-
- /* Keep things compatible for old clients */
- if (server_field.type == MYSQL_TYPE_VARCHAR)
- server_field.type= MYSQL_TYPE_VAR_STRING;
-
- client_field->db= dup_str_aux(field_alloc, server_field.db_name,
- strlen(server_field.db_name), cs, thd_cs);
- client_field->table= dup_str_aux(field_alloc, server_field.table_name,
- strlen(server_field.table_name), cs, thd_cs);
- client_field->name= dup_str_aux(field_alloc, server_field.col_name.str,
- server_field.col_name.length, cs, thd_cs);
- client_field->org_table= dup_str_aux(field_alloc, server_field.org_table_name,
- strlen(server_field.org_table_name), cs, thd_cs);
- client_field->org_name= dup_str_aux(field_alloc,
- server_field.org_col_name.str,
- server_field.org_col_name.length,
- cs, thd_cs);
- if (item->charset_for_protocol() == &my_charset_bin || thd_cs == NULL)
- {
- /* No conversion */
- client_field->charsetnr= item->charset_for_protocol()->number;
- client_field->length= server_field.length;
- }
- else
- {
- uint max_char_len;
- /* With conversion */
- client_field->charsetnr= thd_cs->number;
- max_char_len= (server_field.type >= (int) MYSQL_TYPE_TINY_BLOB &&
- server_field.type <= (int) MYSQL_TYPE_BLOB) ?
- server_field.length / item->collation.collation->mbminlen :
- server_field.length / item->collation.collation->mbmaxlen;
- client_field->length= char_to_byte_length_safe(max_char_len,
- thd_cs->mbmaxlen);
- }
- client_field->type= server_field.type;
- client_field->flags= (uint16) server_field.flags;
- client_field->decimals= server_field.decimals;
- if (server_field.type == MYSQL_TYPE_FLOAT ||
- server_field.type == MYSQL_TYPE_DOUBLE)
- set_if_smaller(client_field->decimals, FLOATING_POINT_DECIMALS);
-
- client_field->db_length= strlen(client_field->db);
- client_field->table_length= strlen(client_field->table);
- client_field->name_length= strlen(client_field->name);
- client_field->org_name_length= strlen(client_field->org_name);
- client_field->org_table_length= strlen(client_field->org_table);
-
- client_field->catalog= dup_str_aux(field_alloc, "def", 3, cs, thd_cs);
- client_field->catalog_length= 3;
-
- if (IS_NUM(client_field->type))
- client_field->flags|= NUM_FLAG;
-
- if (flags & (int) Protocol::SEND_DEFAULTS)
- {
- char buff[80];
- String tmp(buff, sizeof(buff), default_charset_info), *res;
-
- if (!(res=item->val_str(&tmp)))
- {
- client_field->def_length= 0;
- client_field->def= strmake_root(field_alloc, "",0);
- }
- else
- {
- client_field->def_length= res->length();
- client_field->def= strmake_root(field_alloc, res->ptr(),
- client_field->def_length);
- }
- }
- else
- client_field->def=0;
- client_field->max_length= 0;
- ++client_field;
+ if (prot.store_field_metadata(thd, item, pos))
+ goto err;
}
if (flags & SEND_EOF)
@@ -1132,6 +1124,55 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
DBUG_RETURN(1); /* purecov: inspected */
}
+
+static void
+list_fields_send_default(THD *thd, Protocol *p, Field *fld, uint pos)
+{
+ char buff[80];
+ String tmp(buff, sizeof(buff), default_charset_info), *res;
+ MYSQL_FIELD *client_field= &thd->cur_data->embedded_info->fields_list[pos];
+
+ if (fld->is_null() || !(res= fld->val_str(&tmp)))
+ {
+ client_field->def_length= 0;
+ client_field->def= strmake_root(&thd->cur_data->alloc, "", 0);
+ }
+ else
+ {
+ client_field->def_length= res->length();
+ client_field->def= strmake_root(&thd->cur_data->alloc, res->ptr(),
+ client_field->def_length);
+ }
+}
+
+
+bool Protocol::send_list_fields(List<Field> *list, const TABLE_LIST *table_list)
+{
+ DBUG_ENTER("send_result_set_metadata");
+ Protocol_text prot(thd);
+ List_iterator_fast<Field> it(*list);
+ Field *fld;
+
+ if (!thd->mysql) // bootstrap file handling
+ DBUG_RETURN(0);
+
+ if (begin_dataset(thd, list->elements))
+ goto err;
+
+ for (uint pos= 0 ; (fld= it++); pos++)
+ {
+ if (prot.store_field_metadata_for_list_fields(thd, fld, table_list, pos))
+ goto err;
+ list_fields_send_default(thd, this, fld, pos);
+ }
+
+ DBUG_RETURN(prepare_for_send(list->elements));
+err:
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ DBUG_RETURN(1);
+}
+
+
bool Protocol::write()
{
if (!thd->mysql) // bootstrap file handling
diff --git a/libmysqld/libmysql.c b/libmysqld/libmysql.c
index b3ef96698ff..cd170b42b42 100644
--- a/libmysqld/libmysql.c
+++ b/libmysqld/libmysql.c
@@ -3215,7 +3215,8 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, size_t
{
MYSQL_TIME *tm= (MYSQL_TIME *)buffer;
MYSQL_TIME_STATUS status;
- str_to_time(value, length, tm, 0, &status);
+ str_to_datetime_or_date_or_time(value, length, tm, 0, &status,
+ TIME_MAX_HOUR, UINT_MAX32);
err= status.warnings;
*param->error= MY_TEST(err);
break;
@@ -3226,7 +3227,7 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value, size_t
{
MYSQL_TIME *tm= (MYSQL_TIME *)buffer;
MYSQL_TIME_STATUS status;
- (void) str_to_datetime(value, length, tm, 0, &status);
+ (void) str_to_datetime_or_date(value, length, tm, 0, &status);
err= status.warnings;
*param->error= MY_TEST(err) && (param->buffer_type == MYSQL_TYPE_DATE &&
tm->time_type != MYSQL_TIMESTAMP_DATE);
@@ -3350,7 +3351,7 @@ static void fetch_long_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field,
case MYSQL_TYPE_DATETIME:
{
int error;
- value= number_to_datetime(value, 0, (MYSQL_TIME *) buffer, 0, &error);
+ value= number_to_datetime_or_date(value, 0, (MYSQL_TIME *) buffer, 0, &error);
*param->error= MY_TEST(error);
break;
}
diff --git a/libservices/CMakeLists.txt b/libservices/CMakeLists.txt
index e20be6d7a7c..b99be7106e1 100644
--- a/libservices/CMakeLists.txt
+++ b/libservices/CMakeLists.txt
@@ -37,6 +37,7 @@ SET(MYSQLSERVICES_SOURCES
thd_timezone_service.c
thd_wait_service.c
wsrep_service.c
+ json_service.c
)
ADD_CONVENIENCE_LIBRARY(mysqlservices ${MYSQLSERVICES_SOURCES})
diff --git a/libservices/json_service.c b/libservices/json_service.c
new file mode 100644
index 00000000000..96b3b3fa532
--- /dev/null
+++ b/libservices/json_service.c
@@ -0,0 +1,19 @@
+
+/* Copyright (c) 2018, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include <service_versions.h>
+SERVICE_VERSION json_service= (void*)VERSION_json;
diff --git a/man/comp_err.1 b/man/comp_err.1
index f530f8e5039..df8f4324158 100644
--- a/man/comp_err.1
+++ b/man/comp_err.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBCOMP_ERR\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBCOMP_ERR\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/galera_new_cluster.1 b/man/galera_new_cluster.1
index 134d289e94f..e3f7d6670a0 100644
--- a/man/galera_new_cluster.1
+++ b/man/galera_new_cluster.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBGALERA_NEW_CLUSTER\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBGALERA_NEW_CLUSTER\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/galera_recovery.1 b/man/galera_recovery.1
index a83a9e5d3cb..deb51f39cd9 100644
--- a/man/galera_recovery.1
+++ b/man/galera_recovery.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBGALERA_RECOVERY\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBGALERA_RECOVERY\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/innochecksum.1 b/man/innochecksum.1
index 8e144c6de34..20ec629d5cd 100644
--- a/man/innochecksum.1
+++ b/man/innochecksum.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBINNOCHECKSUM\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBINNOCHECKSUM\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mariabackup.1 b/man/mariabackup.1
index 865ff0b8111..f3f24db88cf 100644
--- a/man/mariabackup.1
+++ b/man/mariabackup.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMARIABACKUP\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMARIABACKUP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mariadb-service-convert.1 b/man/mariadb-service-convert.1
index 448dbde5afd..a7c99f712e4 100644
--- a/man/mariadb-service-convert.1
+++ b/man/mariadb-service-convert.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMARIADB-SERVICE-CONVERT\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMARIADB-SERVICE-CONVERT\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mbstream.1 b/man/mbstream.1
index 5a3ab1e2867..7508aaf6326 100644
--- a/man/mbstream.1
+++ b/man/mbstream.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMBSTREAM\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMBSTREAM\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/msql2mysql.1 b/man/msql2mysql.1
index 4d36ce3893e..8d2b5867b2b 100644
--- a/man/msql2mysql.1
+++ b/man/msql2mysql.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMSQL2MYSQL\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMSQL2MYSQL\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/my_print_defaults.1 b/man/my_print_defaults.1
index b01d24b2e86..c374966a898 100644
--- a/man/my_print_defaults.1
+++ b/man/my_print_defaults.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMY_PRINT_DEFAULTS" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMY_PRINT_DEFAULTS" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
@@ -74,14 +74,14 @@ Display a help message and exit\&.
.\}
.\" my_print_defaults: config-file option
.\" config-file option: my_print_defaults
-\fB\-\-config\-file=\fR\fB\fIfile_name\fR\fR,
+\fB
.\" my_print_defaults: defaults-file option
.\" defaults-file option: my_print_defaults
\fB\-\-defaults\-file=\fR\fB\fIfile_name\fR\fR,
\fB\-c \fR\fB\fIfile_name\fR\fR
.sp
Read only the given option file\&. If no extension is given, default extension(.ini or .cnf) will
-be used\&. \fB\-\-config-file\fR is deprecated, use \fB\-\-defaults\-file\fR instead\&. If \fB\-\-defaults\-file\fR is
+be used\&. If \fB\-\-defaults\-file\fR is
the first option, then read this file only, do not read global or per\-user config files; should be the first option.
.RE
.sp
@@ -118,11 +118,10 @@ string is
\fB\-\-defaults\-extra\-file=\fR\fB\fIfile_name\fR\fR,
.\" my_print_defaults: extra-file option
.\" extra-file option: my_print_defaults
-\fB\-\-extra\-file=\fR\fB\fIfile_name\fR\fR,
\fB\-e \fR\fB\fIfile_name\fR\fR
.sp
Read this option file after the global option file but (on Unix) before the user option
-file\&. Should be the first option\&. \fB\-\-extra\-file\fR is deprecated, use \fB\-\-defaults\-extra\-file\fR\&.
+file\&. Should be the first option\&.
.RE
.sp
.RS 4
diff --git a/man/my_safe_process.1 b/man/my_safe_process.1
index 4809124be70..296f2e43997 100644
--- a/man/my_safe_process.1
+++ b/man/my_safe_process.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMY_SAFE_PROCESS\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMY_SAFE_PROCESS\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/myisam_ftdump.1 b/man/myisam_ftdump.1
index cc807d7872b..d2348e60e11 100644
--- a/man/myisam_ftdump.1
+++ b/man/myisam_ftdump.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYISAM_FTDUMP\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYISAM_FTDUMP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/myisamchk.1 b/man/myisamchk.1
index ebfcdcbfcf4..d36b92c88a6 100644
--- a/man/myisamchk.1
+++ b/man/myisamchk.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYISAMCHK\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYISAMCHK\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/myisamlog.1 b/man/myisamlog.1
index 98b3f46d7d8..166d3c44a82 100644
--- a/man/myisamlog.1
+++ b/man/myisamlog.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYISAMLOG\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYISAMLOG\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/myisampack.1 b/man/myisampack.1
index 042461c018c..1394ac7a431 100644
--- a/man/myisampack.1
+++ b/man/myisampack.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYISAMPACK\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYISAMPACK\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql-stress-test.pl.1 b/man/mysql-stress-test.pl.1
index 4ab03bef077..e2754343bc8 100644
--- a/man/mysql-stress-test.pl.1
+++ b/man/mysql-stress-test.pl.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL\-STRESS\-TE" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL\-STRESS\-TE" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql-test-run.pl.1 b/man/mysql-test-run.pl.1
index b928ec66587..659e0651929 100644
--- a/man/mysql-test-run.pl.1
+++ b/man/mysql-test-run.pl.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL\-TEST\-RUN\" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL\-TEST\-RUN\" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql.1 b/man/mysql.1
index f925fee454b..df156a31397 100644
--- a/man/mysql.1
+++ b/man/mysql.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql.server.1 b/man/mysql.server.1
index 7adafc5cce1..09bacc41377 100644
--- a/man/mysql.server.1
+++ b/man/mysql.server.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL\&.SERVER\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL\&.SERVER\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_client_test.1 b/man/mysql_client_test.1
index 609a6e0d3cb..80f9b4b02d1 100644
--- a/man/mysql_client_test.1
+++ b/man/mysql_client_test.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_CLIENT_TEST" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_CLIENT_TEST" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_config.1 b/man/mysql_config.1
index 04bbe16cfdb..114ff8ff2f4 100644
--- a/man/mysql_config.1
+++ b/man/mysql_config.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_CONFIG\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_CONFIG\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_convert_table_format.1 b/man/mysql_convert_table_format.1
index 3a69b4ae05a..17dc7c76638 100644
--- a/man/mysql_convert_table_format.1
+++ b/man/mysql_convert_table_format.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_CONVERT_TAB" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_CONVERT_TAB" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_find_rows.1 b/man/mysql_find_rows.1
index 38c612981e3..913c32c8a0d 100644
--- a/man/mysql_find_rows.1
+++ b/man/mysql_find_rows.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_FIND_ROWS\F" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_FIND_ROWS\F" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_fix_extensions.1 b/man/mysql_fix_extensions.1
index ae5ad8c78bf..822bbc0b1aa 100644
--- a/man/mysql_fix_extensions.1
+++ b/man/mysql_fix_extensions.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_FIX_EXTENSI" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_FIX_EXTENSI" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_install_db.1 b/man/mysql_install_db.1
index 98b39871ea6..668aec1e5ee 100644
--- a/man/mysql_install_db.1
+++ b/man/mysql_install_db.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_INSTALL_DB\FR" "1" "4 April 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_INSTALL_DB\FR" "1" "4 April 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_ldb.1 b/man/mysql_ldb.1
index f29044a0aa6..fb60c382912 100644
--- a/man/mysql_ldb.1
+++ b/man/mysql_ldb.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_LDB\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_LDB\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_plugin.1 b/man/mysql_plugin.1
index 23b94865f07..1fb5e362203 100644
--- a/man/mysql_plugin.1
+++ b/man/mysql_plugin.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_PLUGIN\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_PLUGIN\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
diff --git a/man/mysql_secure_installation.1 b/man/mysql_secure_installation.1
index 28b0e0bde97..b9a15a1a0c7 100644
--- a/man/mysql_secure_installation.1
+++ b/man/mysql_secure_installation.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_SECURE_INST" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_SECURE_INST" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_setpermission.1 b/man/mysql_setpermission.1
index 7a0b373f62f..9f3307d0222 100644
--- a/man/mysql_setpermission.1
+++ b/man/mysql_setpermission.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_SETPERMISSI" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_SETPERMISSI" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_tzinfo_to_sql.1 b/man/mysql_tzinfo_to_sql.1
index fb5fee606fe..450bc7b25f1 100644
--- a/man/mysql_tzinfo_to_sql.1
+++ b/man/mysql_tzinfo_to_sql.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_TZINFO_TO_S" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_TZINFO_TO_S" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_upgrade.1 b/man/mysql_upgrade.1
index 4ea0f4e054d..e49a5a98c87 100644
--- a/man/mysql_upgrade.1
+++ b/man/mysql_upgrade.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_UPGRADE\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_UPGRADE\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysql_waitpid.1 b/man/mysql_waitpid.1
index 518c15db511..43ed82b2e19 100644
--- a/man/mysql_waitpid.1
+++ b/man/mysql_waitpid.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQL_WAITPID\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQL_WAITPID\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlaccess.1 b/man/mysqlaccess.1
index b6ba346dd14..3856fea67c1 100644
--- a/man/mysqlaccess.1
+++ b/man/mysqlaccess.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLACCESS\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLACCESS\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqladmin.1 b/man/mysqladmin.1
index 321964e20d6..80ff3059a39 100644
--- a/man/mysqladmin.1
+++ b/man/mysqladmin.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLADMIN\FR" "1" "28 December 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLADMIN\FR" "1" "28 December 2017" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlbinlog.1 b/man/mysqlbinlog.1
index 8c0920543cb..5eb1b3e4f9a 100644
--- a/man/mysqlbinlog.1
+++ b/man/mysqlbinlog.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLBINLOG\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLBINLOG\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlcheck.1 b/man/mysqlcheck.1
index b2d39fbe567..f33de415e1d 100644
--- a/man/mysqlcheck.1
+++ b/man/mysqlcheck.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLCHECK\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLCHECK\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqld.8 b/man/mysqld.8
index 002adb97de2..ef11cc07920 100644
--- a/man/mysqld.8
+++ b/man/mysqld.8
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLD\FR" "8" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLD\FR" "8" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqld_multi.1 b/man/mysqld_multi.1
index f0de2181bf6..c89553cf4f9 100644
--- a/man/mysqld_multi.1
+++ b/man/mysqld_multi.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLD_MULTI\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLD_MULTI\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqld_safe.1 b/man/mysqld_safe.1
index 09bb72e331a..3b77f04099b 100644
--- a/man/mysqld_safe.1
+++ b/man/mysqld_safe.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLD_SAFE\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLD_SAFE\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqld_safe_helper.1 b/man/mysqld_safe_helper.1
index 1dd39df6276..68cd74152d5 100644
--- a/man/mysqld_safe_helper.1
+++ b/man/mysqld_safe_helper.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLD_SAFE_HELPER\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLD_SAFE_HELPER\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqldump.1 b/man/mysqldump.1
index e0dd15c0001..0ebaccf9c23 100644
--- a/man/mysqldump.1
+++ b/man/mysqldump.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLDUMP\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLDUMP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqldumpslow.1 b/man/mysqldumpslow.1
index 3c9048e7394..f89b5f2d929 100644
--- a/man/mysqldumpslow.1
+++ b/man/mysqldumpslow.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLDUMPSLOW\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLDUMPSLOW\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlhotcopy.1 b/man/mysqlhotcopy.1
index 61c6ee5b53c..040a4784f0c 100644
--- a/man/mysqlhotcopy.1
+++ b/man/mysqlhotcopy.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLHOTCOPY\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLHOTCOPY\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlimport.1 b/man/mysqlimport.1
index 6273ec1145d..2a6addfc6a6 100644
--- a/man/mysqlimport.1
+++ b/man/mysqlimport.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLIMPORT\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLIMPORT\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlshow.1 b/man/mysqlshow.1
index 7aa0ac9473a..e06f1fde2e8 100644
--- a/man/mysqlshow.1
+++ b/man/mysqlshow.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLSHOW\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLSHOW\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqlslap.1 b/man/mysqlslap.1
index 11f9173d737..3e589a28c36 100644
--- a/man/mysqlslap.1
+++ b/man/mysqlslap.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLSLAP\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLSLAP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/mysqltest.1 b/man/mysqltest.1
index be04358ba8c..039f10d6f78 100644
--- a/man/mysqltest.1
+++ b/man/mysqltest.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBMYSQLTEST\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBMYSQLTEST\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/perror.1 b/man/perror.1
index 57614f2e807..06b20ff88af 100644
--- a/man/perror.1
+++ b/man/perror.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBPERROR\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBPERROR\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/replace.1 b/man/replace.1
index 6a573888c37..89662ae412f 100644
--- a/man/replace.1
+++ b/man/replace.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBREPLACE\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBREPLACE\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/resolve_stack_dump.1 b/man/resolve_stack_dump.1
index a1b7d061aeb..83a4964d95b 100644
--- a/man/resolve_stack_dump.1
+++ b/man/resolve_stack_dump.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBRESOLVE_STACK_DUM" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBRESOLVE_STACK_DUM" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/resolveip.1 b/man/resolveip.1
index b9dab4fb686..0e7a10ed4ed 100644
--- a/man/resolveip.1
+++ b/man/resolveip.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBRESOLVEIP\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBRESOLVEIP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/tokuft_logprint.1 b/man/tokuft_logprint.1
index 0a9ff539b73..c97f7e19f69 100644
--- a/man/tokuft_logprint.1
+++ b/man/tokuft_logprint.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBTOKUFT_LOGPRINT\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBTOKUFT_LOGPRINT\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/tokuftdump.1 b/man/tokuftdump.1
index 8fe9e65c7e9..8ee274280d2 100644
--- a/man/tokuftdump.1
+++ b/man/tokuftdump.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBTOKUFTDUMP\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBTOKUFTDUMP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/wsrep_sst_common.1 b/man/wsrep_sst_common.1
index df784e3fa19..606c668b004 100644
--- a/man/wsrep_sst_common.1
+++ b/man/wsrep_sst_common.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBWSREP_SST_COMMON\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBWSREP_SST_COMMON\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/wsrep_sst_mariabackup.1 b/man/wsrep_sst_mariabackup.1
index 1dadf705b96..225cc728fad 100644
--- a/man/wsrep_sst_mariabackup.1
+++ b/man/wsrep_sst_mariabackup.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBWSREP_SST_MARIABACKUP\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBWSREP_SST_MARIABACKUP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/wsrep_sst_mysqldump.1 b/man/wsrep_sst_mysqldump.1
index c203d1bdc91..d6ab1a01b4c 100644
--- a/man/wsrep_sst_mysqldump.1
+++ b/man/wsrep_sst_mysqldump.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBWSREP_SST_MYSQLDUMP\FR" "1" "9 May 2017" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBWSREP_SST_MYSQLDUMP\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/wsrep_sst_rsync.1 b/man/wsrep_sst_rsync.1
index bbaeb64016e..ca3451a8998 100644
--- a/man/wsrep_sst_rsync.1
+++ b/man/wsrep_sst_rsync.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBWSREP_SST_RSYNC\FR" "1" "9 August 2018" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBWSREP_SST_RSYNC\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/man/wsrep_sst_rsync_wan.1 b/man/wsrep_sst_rsync_wan.1
index 00b5f7572f7..81bbf27305f 100644
--- a/man/wsrep_sst_rsync_wan.1
+++ b/man/wsrep_sst_rsync_wan.1
@@ -1,6 +1,6 @@
'\" t
.\"
-.TH "\FBWSREP_SST_RSYNC_WAN\FR" "1" "29 March 2019" "MariaDB 10\&.3" "MariaDB Database System"
+.TH "\FBWSREP_SST_RSYNC_WAN\FR" "1" "28 March 2019" "MariaDB 10\&.4" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
diff --git a/mysql-test/include/add_anonymous_users.inc b/mysql-test/include/add_anonymous_users.inc
index 635a80e3dee..86ce529d1e2 100644
--- a/mysql-test/include/add_anonymous_users.inc
+++ b/mysql-test/include/add_anonymous_users.inc
@@ -1,7 +1,7 @@
# Allow anonymous users to connect
disable_warnings;
disable_query_log;
-INSERT IGNORE INTO mysql.user (host, user) VALUES ('localhost','');
+INSERT IGNORE INTO mysql.global_priv (host, user) VALUES ('localhost','');
FLUSH PRIVILEGES;
enable_query_log;
enable_warnings;
diff --git a/mysql-test/include/analyze-format.inc b/mysql-test/include/analyze-format.inc
index 65e61b81582..08006e26c79 100644
--- a/mysql-test/include/analyze-format.inc
+++ b/mysql-test/include/analyze-format.inc
@@ -1,3 +1,3 @@
# The time on ANALYSE FORMAT=JSON is rather variable
---replace_regex /("(r_total_time_ms|r_buffer_size)": )[^, \n]*/\1"REPLACED"/
+--replace_regex /("(r_total_time_ms|r_buffer_size|r_filling_time_ms)": )[^, \n]*/\1"REPLACED"/
diff --git a/mysql-test/include/check-testcase.test b/mysql-test/include/check-testcase.test
index 44aa4472cba..191feae1628 100644
--- a/mysql-test/include/check-testcase.test
+++ b/mysql-test/include/check-testcase.test
@@ -81,6 +81,18 @@ if (!$tmp) {
SHOW SLAVE STATUS;
}
+#
+# Ensure that we don't get warnings from mysql.proc (used by check_mysqld)
+#
+
+--disable_query_log
+--disable_warnings
+--disable_result_log
+select count(*) from mysql.proc;
+--enable_result_log
+--enable_warnings
+--enable_query_log
+
call mtr.check_testcase();
let $datadir=`select @@datadir`;
@@ -91,5 +103,56 @@ cat_file $datadir.tempfiles.txt;
remove_file $datadir.tempfiles.txt;
list_files $datadir/mysql #sql*;
+#
+# Check that SHOW ENGINE INNODB STATUS does not show any active transactions
+# We do this only if wsrep provider is loaded, to avoid disturbing any non-Galera MTR tests
+#
+if (`SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.PLUGINS WHERE PLUGIN_NAME = 'wsrep' AND PLUGIN_STATUS='ACTIVE'`) {
+ if (`SELECT @@wsrep_on`) {
+ if (`SELECT COUNT(*) FROM information_schema.innodb_trx WHERE trx_mysql_thread_id != 0`) {
+ if ($before) {
+ --echo Before test start.
+ }
+ if (!$before) {
+ --echo After test end.
+ }
+ --echo There is one or more active InnoDB transaction(s) when there should be none. Dumping some diagnostics.
+
+ --let $status_locks = `SELECT @@innodb_status_output_locks`
+ --let $status_output = `SELECT @@innodb_status_output`
+ --enable_query_log
+ SET GLOBAL innodb_status_output_locks=ON;
+ SHOW ENGINE INNODB STATUS;
+ --disable_query_log
+ --eval SET GLOBAL innodb_status_output_locks=$status_locks;
+ --eval SET GLOBAL innodb_status_output=$status_output;
+ --enable_query_log
+
+ --vertical_results
+ if ($before) {
+ --replace_regex /$/ /
+ }
+ SELECT * FROM information_schema.processlist;
+
+ if ($before) {
+ --replace_regex /$/ /
+ }
+ SELECT * FROM information_schema.innodb_trx;
+
+ if ($before) {
+ --replace_regex /$/ /
+ }
+ SELECT * FROM information_schema.innodb_locks;
+
+ if ($before) {
+ --replace_regex /$/ /
+ }
+ SELECT * FROM information_schema.innodb_lock_waits;
+ --horizontal_results
+ --disable_query_log
+ }
+ }
+}
+
--enable_query_log
diff --git a/mysql-test/include/check_ftwrl_incompatible.inc b/mysql-test/include/check_ftwrl_incompatible.inc
index 4787a69ea9c..a7e87c3750b 100644
--- a/mysql-test/include/check_ftwrl_incompatible.inc
+++ b/mysql-test/include/check_ftwrl_incompatible.inc
@@ -68,8 +68,7 @@ connection $con_aux1;
--enable_query_log
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where (state = "Waiting for global read lock" or
- state = "Waiting for commit lock") and
+ where state = "Waiting for backup lock" and
info = "$statement";
--source include/wait_condition.inc
--disable_result_log
@@ -116,8 +115,7 @@ connection $con_aux2;
--enable_query_log
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where (state = "Waiting for global read lock" or
- state = "Waiting for commit lock") and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
--disable_result_log
diff --git a/mysql-test/include/deadlock.inc b/mysql-test/include/deadlock.inc
index 2fa61f48624..7ac2a16fc44 100644
--- a/mysql-test/include/deadlock.inc
+++ b/mysql-test/include/deadlock.inc
@@ -94,7 +94,7 @@ insert into t2 values(0, 0), (1, 20), (2, 30);
commit;
connection con1;
-select a,b from t2 UNION SELECT id, x from t1 FOR UPDATE;
+select a,b from t2 UNION (SELECT id, x from t1 FOR UPDATE);
select * from t2;
select * from t1;
diff --git a/mysql-test/include/default_mysqld.cnf b/mysql-test/include/default_mysqld.cnf
index 69a2b58288b..edae06ee7be 100644
--- a/mysql-test/include/default_mysqld.cnf
+++ b/mysql-test/include/default_mysqld.cnf
@@ -107,6 +107,7 @@ loose-performance-schema-consumer-thread-instrumentation=ON
binlog-direct-non-transactional-updates
default-storage-engine=myisam
+use_stat_tables=preferably
loose-ssl-ca=@ENV.MYSQL_TEST_DIR/std_data/cacert.pem
loose-ssl-cert=@ENV.MYSQL_TEST_DIR/std_data/server-cert.pem
diff --git a/mysql-test/include/diff_tables.inc b/mysql-test/include/diff_tables.inc
index b5ee4db0e8f..a29156cfce9 100644
--- a/mysql-test/include/diff_tables.inc
+++ b/mysql-test/include/diff_tables.inc
@@ -167,7 +167,7 @@ while ($_dt_tables)
# the table to a file.
--let $_dt_outfile= `SELECT @@datadir`
--let $_dt_outfile= $_dt_outfile/diff_table-$_dt_connection-$_dt_database-$_dt_table
- eval SELECT * FROM $_dt_database.$_dt_table ORDER BY `$_dt_column_list` INTO OUTFILE '$_dt_outfile';
+ eval SELECT * INTO OUTFILE '$_dt_outfile' FROM $_dt_database.$_dt_table ORDER BY `$_dt_column_list`;
# Compare files.
if ($_dt_prev_outfile)
diff --git a/mysql-test/include/explain_non_select.inc b/mysql-test/include/explain_non_select.inc
index 57b96994d20..d22310c9813 100644
--- a/mysql-test/include/explain_non_select.inc
+++ b/mysql-test/include/explain_non_select.inc
@@ -158,7 +158,7 @@ CREATE TABLE t1 ( a int PRIMARY KEY );
--let $query = DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a
--let $select = SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a
--source include/explain_utils.inc
-INSERT INTO t1 VALUES (1), (2), (3);
+INSERT INTO t1 VALUES (1), (2), (3), (-1), (-2), (-3);
--let $query = DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a
--let $select = SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a
--source include/explain_utils.inc
@@ -640,7 +640,7 @@ DROP VIEW v1;
--echo #63
CREATE TABLE t1 (a INT, PRIMARY KEY(a));
-INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9);
CREATE VIEW v1 (a) AS SELECT a FROM t1;
--let $query = DELETE FROM v1 WHERE a < 4
--let $select = SELECT * FROM v1 WHERE a < 4
diff --git a/mysql-test/include/galera_cluster.inc b/mysql-test/include/galera_cluster.inc
index c1834c3c26f..7f76ea59c7f 100644
--- a/mysql-test/include/galera_cluster.inc
+++ b/mysql-test/include/galera_cluster.inc
@@ -8,5 +8,11 @@
--let $galera_cluster_size = 2
--source include/galera_init.inc
+--source include/have_innodb.inc
+--source include/galera_wait_ready.inc
+--connection node_2
+--source include/galera_wait_ready.inc
--source include/have_innodb.inc
+
+--connection node_1
diff --git a/mysql-test/suite/galera/include/galera_have_debug_sync.inc b/mysql-test/include/galera_have_debug_sync.inc
index 7c0156052d8..7c0156052d8 100644
--- a/mysql-test/suite/galera/include/galera_have_debug_sync.inc
+++ b/mysql-test/include/galera_have_debug_sync.inc
diff --git a/mysql-test/include/galera_wait_sync_point.inc b/mysql-test/include/galera_wait_sync_point.inc
index cf3a4980186..c0951b220b4 100644
--- a/mysql-test/include/galera_wait_sync_point.inc
+++ b/mysql-test/include/galera_wait_sync_point.inc
@@ -1,6 +1,17 @@
--let $wait_timeout = 10
--let $wsrep_on_orig = `SELECT @@wsrep_on`
SET SESSION wsrep_on = 0;
+
+#
+# following is only for debugging purposes
+# should be commented out when test wporks as planned
+#
+#--sleep 1
+# SHOW PROCESSLIST;
+#SHOW STATUS LIKE 'wsrep_%';
+#--echo $galera_sync_point
+
--let $wait_condition = SELECT 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_debug_sync_waiters' AND VARIABLE_VALUE = '$galera_sync_point'
--source include/wait_condition.inc
--eval SET SESSION wsrep_on = $wsrep_on_orig
+
diff --git a/mysql-test/include/gis_keys.inc b/mysql-test/include/gis_keys.inc
index cc8ec68f7d1..388c7b45898 100644
--- a/mysql-test/include/gis_keys.inc
+++ b/mysql-test/include/gis_keys.inc
@@ -27,8 +27,8 @@ SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
# the "most rows covered" rule doesn't kick in anymore
# now EXPLAIN shows the index used on the table
# and we're getting the wrong result again
-INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
-INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
+INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(3 4)'));
+INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(3 4)'));
EXPLAIN
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
diff --git a/mysql-test/include/have_auth_named_pipe.inc b/mysql-test/include/have_auth_named_pipe.inc
new file mode 100644
index 00000000000..4f4bf37f11e
--- /dev/null
+++ b/mysql-test/include/have_auth_named_pipe.inc
@@ -0,0 +1,13 @@
+--source include/not_embedded.inc
+
+if (!$AUTH_NAMED_PIPE_SO) {
+ skip No auth_named_pipe plugin;
+}
+
+if (!$USERNAME) {
+ skip USER variable is undefined;
+}
+
+if (`SELECT count(*) <> 0 FROM mysql.user WHERE user = '$USERNAME'`) {
+ skip %USERNAME%=$USER which exists in mysql.user;
+}
diff --git a/mysql-test/include/have_unix_socket.inc b/mysql-test/include/have_unix_socket.inc
index 4246b138e9d..7be828ac384 100644
--- a/mysql-test/include/have_unix_socket.inc
+++ b/mysql-test/include/have_unix_socket.inc
@@ -1,7 +1,8 @@
--source include/not_embedded.inc
-if (!$AUTH_SOCKET_SO) {
- skip No unix_socket plugin;
+if (`select count(*) = 0 from information_schema.plugins where plugin_name = 'unix_socket' and plugin_status='active'`)
+{
+ --skip Needs unix_socket plugin
}
if (!$USER) {
diff --git a/mysql-test/include/have_unix_socket.opt b/mysql-test/include/have_unix_socket.opt
new file mode 100644
index 00000000000..460e3a26ae2
--- /dev/null
+++ b/mysql-test/include/have_unix_socket.opt
@@ -0,0 +1 @@
+--plugin-load-add=$AUTH_SOCKET_SO --loose-enable-unix-socket
diff --git a/mysql-test/include/have_wsrep_enabled.inc b/mysql-test/include/have_wsrep_enabled.inc
index 9287369c87c..7eb8b4372cf 100644
--- a/mysql-test/include/have_wsrep_enabled.inc
+++ b/mysql-test/include/have_wsrep_enabled.inc
@@ -1,7 +1,6 @@
# To be used in a test which requires wsrep plugin to be ACTIVE and enabled
# (i.e. wsrep_on=ON). It includes have_wsrep.inc.
---source include/have_wsrep.inc
--source include/have_innodb.inc
if (`SELECT COUNT(*)=0 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_on' AND VARIABLE_VALUE='ON'`)
diff --git a/mysql-test/include/icp_tests.inc b/mysql-test/include/icp_tests.inc
index f29088887d0..1ff34a936c6 100644
--- a/mysql-test/include/icp_tests.inc
+++ b/mysql-test/include/icp_tests.inc
@@ -226,6 +226,7 @@ EXPLAIN
SELECT c1 FROM t3 WHERE c1 >= 'c-1004=w' and c1 <= 'c-1006=w' and i1 > 2;
SELECT c1 FROM t3 WHERE c1 >= 'c-1004=w' and c1 <= 'c-1006=w' and i1 > 2;
+--replace_column 9 100
EXPLAIN
SELECT c1 FROM t3 WHERE c1 >= 'c-1004=w' and c1 <= 'c-1006=w' or i1 > 2;
SELECT c1 FROM t3 WHERE c1 >= 'c-1004=w' and c1 <= 'c-1006=w' or i1 > 2;
@@ -633,6 +634,8 @@ CREATE TABLE t2 (
);
INSERT INTO t2 VALUES (4,1);
+ANALYZE TABLE t1,t2;
+
EXPLAIN
SELECT t1.d1, t2.pk, t2.i1 FROM t1 STRAIGHT_JOIN t2 ON t2.i1
WHERE t2.pk <> t1.d1 AND t2.pk = 4;
@@ -840,6 +843,8 @@ INSERT INTO t2 (g,h) VALUES
(3,'e'),(1,'u'),(4,'v'),(9,'u'),(6,'i'),(1,'x'),
(7,'f'),(5,'j'),(3,'e'),(1,'u'),(4,'v'),(9,'u');
+ANALYZE TABLE t1,t2;
+
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
diff --git a/mysql-test/include/index_merge1.inc b/mysql-test/include/index_merge1.inc
index 0fb94b96ab4..5498a607c3c 100644
--- a/mysql-test/include/index_merge1.inc
+++ b/mysql-test/include/index_merge1.inc
@@ -59,12 +59,12 @@ update t0 set key2=key1,key3=key1,key4=key1,key5=key1,key6=key1,key7=key1,key8=1
analyze table t0;
# 1. One index
-explain select * from t0 where key1 < 3 or key1 > 1020;
+explain select * from t0 where key1 < 3 or key1 > 920 and key1 < 924;
# 2. Simple cases
explain
-select * from t0 where key1 < 3 or key2 > 1020;
-select * from t0 where key1 < 3 or key2 > 1020;
+select * from t0 where key1 < 3 or key2 > 920 and key2 < 924;
+select * from t0 where key1 < 3 or key2 > 920 and key2 < 924;
select * from t0 where key1=1022; # MDEV-13535 no-key-read select after keyread
@@ -117,7 +117,6 @@ select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4);
select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4);
-
explain select * from t0 where
(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 2 or key6 < 2);
@@ -148,6 +147,10 @@ select * from t0 where
key1 < 7;
# tree_or(List<SEL_IMERGE>, List<SEL_IMERGE>).
+select count(*) from t0 where
+ ((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4))
+ or
+ ((key5 < 5 or key6 < 6) and (key7 <7 or key8 < 4));
explain select * from t0 where
((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4))
or
@@ -480,6 +483,7 @@ create table t2(
) ENGINE=MEMORY DEFAULT CHARSET=latin1;
insert into t2 select * from t1;
+
--echo must use sort-union rather than union:
--replace_column 9 #
explain select * from t1 where a=4 or b=4;
@@ -490,9 +494,9 @@ select * from t1 ignore index(a,b) where a=4 or b=4;
--echo must use union, not sort-union:
--replace_column 9 #
-explain select * from t2 where a=4 or b=4;
+explain select * from t2 where a=2 or b=2;
--sorted_result
-select * from t2 where a=4 or b=4;
+select * from t2 where a=2 or b=2;
drop table t1, t2;
diff --git a/mysql-test/include/innodb_checksum_algorithm.combinations b/mysql-test/include/innodb_checksum_algorithm.combinations
new file mode 100644
index 00000000000..fd237e1190a
--- /dev/null
+++ b/mysql-test/include/innodb_checksum_algorithm.combinations
@@ -0,0 +1,11 @@
+[crc32]
+--innodb-checksum-algorithm=crc32
+
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[full_crc32]
+--innodb-checksum-algorithm=full_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/include/innodb_checksum_algorithm.inc b/mysql-test/include/innodb_checksum_algorithm.inc
new file mode 100644
index 00000000000..c841fece702
--- /dev/null
+++ b/mysql-test/include/innodb_checksum_algorithm.inc
@@ -0,0 +1 @@
+--source include/have_innodb.inc
diff --git a/mysql-test/include/install_plugin_if_exists.inc b/mysql-test/include/install_plugin_if_exists.inc
new file mode 100644
index 00000000000..93f968e8a39
--- /dev/null
+++ b/mysql-test/include/install_plugin_if_exists.inc
@@ -0,0 +1,41 @@
+--echo #
+--echo # MDEV-16294: INSTALL PLUGIN IF NOT EXISTS / UNINSTALL PLUGIN IF EXISTS
+--echo #
+--echo # INSTALL IF NOT EXISTS PLUGIN name SONAME library /
+--echo # UNINSTALL IF EXISTS PLUGIN|SONAME name
+--echo #
+
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+INSTALL PLUGIN IF NOT EXISTS example SONAME 'ha_example';
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+
+--replace_regex /\.dll/.so/
+--error ER_PLUGIN_INSTALLED
+INSTALL PLUGIN example SONAME 'ha_example';
+
+INSTALL PLUGIN IF NOT EXISTS example SONAME 'ha_example';
+SHOW WARNINGS;
+
+UNINSTALL PLUGIN IF EXISTS example;
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+
+UNINSTALL PLUGIN IF EXISTS example;
+SHOW WARNINGS;
+
+--error ER_SP_DOES_NOT_EXIST
+UNINSTALL PLUGIN example;
+
+INSTALL SONAME 'ha_example';
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+
+UNINSTALL SONAME IF EXISTS 'ha_example';
+
+--replace_regex /\.dll/.so/
+UNINSTALL SONAME IF EXISTS 'ha_example';
+--replace_regex /\.dll/.so/
+SHOW WARNINGS;
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+
+--replace_regex /\.dll/.so/
+--error ER_SP_DOES_NOT_EXIST
+UNINSTALL SONAME 'ha_example';
diff --git a/mysql-test/include/kill_galera.inc b/mysql-test/include/kill_galera.inc
new file mode 100644
index 00000000000..d7f665df6c7
--- /dev/null
+++ b/mysql-test/include/kill_galera.inc
@@ -0,0 +1,20 @@
+--echo Killing server ...
+
+# Write file to make mysql-test-run.pl expect the crash, but don't start it
+--let $_server_id= `SELECT @@server_id`
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
+--exec echo "wait" > $_expect_file_name
+
+# Kill the connected server
+--disable_reconnect
+--let KILL_NODE_PIDFILE = `SELECT @@pid_file`
+
+--perl
+ my $pid_filename = $ENV{'KILL_NODE_PIDFILE'};
+ my $mysqld_pid = `cat $pid_filename`;
+ chomp($mysqld_pid);
+ system("kill -9 $mysqld_pid");
+ exit(0);
+EOF
+
+--source include/wait_until_disconnected.inc
diff --git a/mysql-test/include/maria_empty_logs.inc b/mysql-test/include/maria_empty_logs.inc
index 78a08228caa..f1835c0d2c3 100644
--- a/mysql-test/include/maria_empty_logs.inc
+++ b/mysql-test/include/maria_empty_logs.inc
@@ -92,3 +92,15 @@ connection default;
--disable_query_log
eval use $default_db;
--enable_query_log
+
+#
+# Ensure that we don't get warnings from mysql.priv (used by check_mysqld)
+# or test running after this one.
+#
+--disable_query_log
+--disable_warnings
+--disable_result_log
+show table status from mysql;
+--enable_result_log
+--enable_warnings
+--enable_query_log
diff --git a/mysql-test/include/maria_verify_recovery.inc b/mysql-test/include/maria_verify_recovery.inc
index b0f95d2a94b..bb782d5f4cc 100644
--- a/mysql-test/include/maria_verify_recovery.inc
+++ b/mysql-test/include/maria_verify_recovery.inc
@@ -97,3 +97,15 @@ while ($mms_table_to_use)
connection default;
# the effect of "use" is lost after a restart so we are back into db "test"
use mysqltest;
+
+#
+# Ensure that we don't get warnings from mysql.proc (used by check_mysqld)
+#
+
+--disable_query_log
+--disable_warnings
+--disable_result_log
+select count(*) from mysql.proc;
+--enable_result_log
+--enable_warnings
+--enable_query_log
diff --git a/mysql-test/include/mtr_check.sql b/mysql-test/include/mtr_check.sql
index f2c0b70e192..b5673b2a313 100644
--- a/mysql-test/include/mtr_check.sql
+++ b/mysql-test/include/mtr_check.sql
@@ -85,7 +85,6 @@ BEGIN
mysql.help_category,
mysql.help_keyword,
mysql.help_relation,
- mysql.host,
mysql.plugin,
mysql.proc,
mysql.procs_priv,
@@ -96,7 +95,7 @@ BEGIN
mysql.time_zone_name,
mysql.time_zone_transition,
mysql.time_zone_transition_type,
- mysql.user;
+ mysql.global_priv;
-- verify that no plugin changed its disabled/enabled state
SELECT * FROM INFORMATION_SCHEMA.PLUGINS;
diff --git a/mysql-test/include/start_mysqld.inc b/mysql-test/include/start_mysqld.inc
index e31f26aad8c..b4fe116fe86 100644
--- a/mysql-test/include/start_mysqld.inc
+++ b/mysql-test/include/start_mysqld.inc
@@ -1,13 +1,37 @@
# Include this script only after using shutdown_mysqld.inc
# where $_expect_file_name was initialized.
# Write file to make mysql-test-run.pl start up the server again
+
+# restart_noprint defines how much is printed to the .result file
+# if 0 (default) then '# result' and restart_parameters are printed
+# if 1 then print #result but not the content of restart_parameters
+# if 2 then nothing is printed
+
+if (!$restart_noprint)
+{
+ --let $restart_noprint=0
+}
+
if ($restart_parameters)
{
--exec echo "restart: $restart_parameters" > $_expect_file_name
+ if (!$restart_noprint)
+ {
+ --replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+ --exec echo "# restart: $restart_parameters"
+ }
+ if ($restart_noprint == 1)
+ {
+ --exec echo "# restart: with restart_parameters"
+ }
}
if (!$restart_parameters)
{
--exec echo "restart" > $_expect_file_name
+ if ($restart_noprint < 2)
+ {
+ --exec echo "# restart"
+ }
}
# Turn on reconnect
@@ -18,4 +42,3 @@ if (!$restart_parameters)
# Turn off reconnect again
--disable_reconnect
-
diff --git a/mysql-test/include/switch_to_mysql_global_priv.inc b/mysql-test/include/switch_to_mysql_global_priv.inc
new file mode 100644
index 00000000000..78df6642760
--- /dev/null
+++ b/mysql-test/include/switch_to_mysql_global_priv.inc
@@ -0,0 +1,6 @@
+disable_query_log;
+drop table mysql.user;
+rename table mysql.user_bak to mysql.user;
+rename table mysql.global_priv_bak to mysql.global_priv;
+flush privileges;
+enable_query_log;
diff --git a/mysql-test/include/switch_to_mysql_user.inc b/mysql-test/include/switch_to_mysql_user.inc
new file mode 100644
index 00000000000..f5801db6114
--- /dev/null
+++ b/mysql-test/include/switch_to_mysql_user.inc
@@ -0,0 +1,56 @@
+disable_query_log;
+rename table mysql.user to mysql.user_bak;
+CREATE TABLE mysql.user (
+ Host char(60) binary DEFAULT '' NOT NULL,
+ User char(80) binary DEFAULT '' NOT NULL,
+ Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL,
+ Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ Delete_history_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ ssl_type enum('','ANY','X509','SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL,
+ ssl_cipher BLOB NOT NULL,
+ x509_issuer BLOB NOT NULL,
+ x509_subject BLOB NOT NULL,
+ max_questions int(11) unsigned DEFAULT 0 NOT NULL,
+ max_updates int(11) unsigned DEFAULT 0 NOT NULL,
+ max_connections int(11) unsigned DEFAULT 0 NOT NULL,
+ max_user_connections int(11) DEFAULT 0 NOT NULL,
+ plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL,
+ authentication_string TEXT NOT NULL,
+ password_expired ENUM('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ is_role enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
+ default_role char(80) binary DEFAULT '' NOT NULL,
+ max_statement_time decimal(12,6) DEFAULT 0 NOT NULL,
+ PRIMARY KEY Host (Host,User)
+) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+insert mysql.user select * from mysql.user_bak;
+rename table mysql.global_priv to mysql.global_priv_bak;
+flush privileges;
+enable_query_log;
diff --git a/mysql-test/include/system_db_struct.inc b/mysql-test/include/system_db_struct.inc
index 9467c023dba..d043d209850 100644
--- a/mysql-test/include/system_db_struct.inc
+++ b/mysql-test/include/system_db_struct.inc
@@ -5,9 +5,9 @@
-- replace_result Tables_in_mysql Tables_in_db Tables_in_test Tables_in_db
show tables;
show create table db;
-show create table host;
show create table user;
show create table func;
+show create table global_priv;
show create table tables_priv;
show create table columns_priv;
show create table procs_priv;
diff --git a/mysql-test/include/type_hrtime.inc b/mysql-test/include/type_hrtime.inc
index 55fd3d665fe..d666dc63bba 100644
--- a/mysql-test/include/type_hrtime.inc
+++ b/mysql-test/include/type_hrtime.inc
@@ -55,6 +55,12 @@ select * from t1;
delete from t1 where a < 20110101;
select * from t1;
+if ($type == time)
+{
+delete from t1 where a is not null;
+select * from t1;
+}
+
#
# create ... select
#
diff --git a/mysql-test/include/wait_until_connected_again.inc b/mysql-test/include/wait_until_connected_again.inc
index 26168d10558..15a1e5bf847 100644
--- a/mysql-test/include/wait_until_connected_again.inc
+++ b/mysql-test/include/wait_until_connected_again.inc
@@ -11,7 +11,7 @@ let $counter= 5000;
let $mysql_errno= 9999;
while ($mysql_errno)
{
- --error 0,ER_SERVER_SHUTDOWN,ER_CONNECTION_KILLED,ER_LOCK_WAIT_TIMEOUT,2002,2006,2013
+ --error 0,ER_ACCESS_DENIED_ERROR,ER_SERVER_SHUTDOWN,ER_CONNECTION_KILLED,ER_LOCK_WAIT_TIMEOUT,2002,2006,2013
show status;
dec $counter;
@@ -19,13 +19,18 @@ while ($mysql_errno)
{
--die Server failed to restart
}
+ if (!$mysql_errno)
+ {
+ # WSREP: SHOW STATUS queries are allowed even if wsrep
+ # is not ready. Make sure wsrep is ready before
+ # returning from this script
+ source include/wait_wsrep_ready.inc;
+ }
+ if ($mysql_errno == 1045)
+ {
+ let $mysql_errno=0;
+ }
--sleep 0.1
}
--enable_query_log
--enable_result_log
-
-# WSREP: SHOW STATUS queries are allowed even if wsrep
-# is not ready. Make sure wsrep is ready before
-# returning from this script
-
---source include/wait_wsrep_ready.inc
diff --git a/mysql-test/include/wsrep_wait_disconnect.inc b/mysql-test/include/wsrep_wait_disconnect.inc
new file mode 100644
index 00000000000..740fc0d9426
--- /dev/null
+++ b/mysql-test/include/wsrep_wait_disconnect.inc
@@ -0,0 +1,20 @@
+let $wait_condition = SELECT 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready' AND VARIABLE_VALUE = 'OFF';
+# since this is called until AFTER provider disconnects,we need to allow
+# queries in non-prim
+#
+# We are also forced to use a hard-coded value for wsrep_sync_wait here because
+# we can not issue a SELECT query to obtain the original value and then restore
+# it
+disable_query_log;
+SET SESSION wsrep_sync_wait = 7;
+--let $restore_wsrep_on = `SHOW VARIABLES WHERE Variable_name = 'wsrep_on' AND Value = 'ON'`
+SET SESSION wsrep_on = OFF;
+
+--source include/wait_condition.inc
+
+if ($restore_wsrep_on != "")
+{
+ --eval SET SESSION wsrep_on = ON
+}
+SET SESSION wsrep_sync_wait = 15;
+enable_query_log;
diff --git a/mysql-test/lib/My/ConfigFactory.pm b/mysql-test/lib/My/ConfigFactory.pm
index 830b49d431f..d481058902f 100644
--- a/mysql-test/lib/My/ConfigFactory.pm
+++ b/mysql-test/lib/My/ConfigFactory.pm
@@ -209,13 +209,6 @@ my @mysqld_rules=
{ 'bind-address' => \&fix_bind_address },
);
-if (IS_WINDOWS)
-{
- # For simplicity, we use the same names for shared memory and
- # named pipes.
- push(@mysqld_rules, {'shared-memory-base-name' => \&fix_socket});
-}
-
#
# Rules to run for [client] section
# - will be run in order listed here
@@ -281,19 +274,6 @@ sub post_check_client_group {
}
$config->insert($client_group_name, $name_to, $option->value())
}
-
- if (IS_WINDOWS)
- {
- if (! $self->{ARGS}->{embedded})
- {
- # Shared memory base may or may not be defined (e.g not defined in embedded)
- my $shm = $group_to_copy_from->option("shared-memory-base-name");
- if (defined $shm)
- {
- $config->insert($client_group_name,"shared-memory-base-name", $shm->value());
- }
- }
- }
}
@@ -340,7 +320,6 @@ sub post_check_embedded_group {
(
'log-error', # Embedded server writes stderr to mysqltest's log file
'slave-net-timeout', # Embedded server are not build with replication
- 'shared-memory-base-name', # No shared memory for embedded
);
foreach my $option ( $mysqld->options(), $first_mysqld->options() ) {
diff --git a/mysql-test/lib/mtr_cases.pm b/mysql-test/lib/mtr_cases.pm
index 61ce88f9a5a..3fe57987713 100644
--- a/mysql-test/lib/mtr_cases.pm
+++ b/mysql-test/lib/mtr_cases.pm
@@ -291,6 +291,7 @@ sub combinations_from_file($$)
@combs = ({ skip => $skip_combinations{$filename} });
} else {
return () if @::opt_combinations or not -f $filename;
+ return () if ::using_extern();
# Read combinations file in my.cnf format
mtr_verbose("Read combinations file $filename");
my $config= My::Config->new($filename);
diff --git a/mysql-test/main/1st.result b/mysql-test/main/1st.result
index cb2da3505f5..c630be1d61c 100644
--- a/mysql-test/main/1st.result
+++ b/mysql-test/main/1st.result
@@ -13,12 +13,12 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
help_relation
help_topic
-host
index_stats
innodb_index_stats
innodb_table_stats
diff --git a/mysql-test/main/alter_table.result b/mysql-test/main/alter_table.result
index 7a88f9165da..5ae90459ff4 100644
--- a/mysql-test/main/alter_table.result
+++ b/mysql-test/main/alter_table.result
@@ -181,6 +181,7 @@ t1 0 a 2 b A 300 NULL NULL YES BTREE
t1 1 b 1 b A 100 NULL NULL YES BTREE
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/mysql-test/main/alter_user.result b/mysql-test/main/alter_user.result
index 76f811f18c7..cae864fa437 100644
--- a/mysql-test/main/alter_user.result
+++ b/mysql-test/main/alter_user.result
@@ -1,3 +1,4 @@
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
select * from mysql.user where user = 'root' and host = 'localhost';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
localhost root Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y 0 0 0 0 N N 0.000000
@@ -7,19 +8,19 @@ localhost root Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y
alter user CURRENT_USER;
select * from mysql.user where user = 'root' and host = 'localhost';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-localhost root Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y 0 0 0 0 N N 0.000000
+localhost root Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y 0 0 0 0 mysql_native_password N N 0.000000
alter user CURRENT_USER();
select * from mysql.user where user = 'root' and host = 'localhost';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-localhost root Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y 0 0 0 0 N N 0.000000
+localhost root Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y 0 0 0 0 mysql_native_password N N 0.000000
create user foo;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 N N 0.000000
+% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 mysql_native_password N N 0.000000
alter user foo;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 N N 0.000000
+% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 mysql_native_password N N 0.000000
# Test super privilege works correctly with a read only database.
SET @start_read_only = @@global.read_only;
SET GLOBAL read_only=1;
@@ -51,37 +52,43 @@ Note 1396 Operation ALTER USER failed for 'boo'
alter user foo identified by 'something';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 N N 0.000000
+% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 mysql_native_password *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N 0.000000
alter user foo identified by 'something2';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *9CD58369E930E28C8996A89DB18B63294E6DC10C N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 N N 0.000000
+% foo *9CD58369E930E28C8996A89DB18B63294E6DC10C N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 mysql_native_password *9CD58369E930E28C8996A89DB18B63294E6DC10C N N 0.000000
alter user foo identified by password '*88C89BE093D4ECF72D039F62EBB7477EA1FD4D63';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 N N 0.000000
+% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 mysql_native_password *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N 0.000000
alter user foo identified with 'somecoolplugin';
+ERROR HY000: Operation ALTER USER failed for 'foo'@'%'
+show warnings;
+Level Code Message
+Error 1524 Plugin 'somecoolplugin' is not loaded
+Error 1396 Operation ALTER USER failed for 'foo'@'%'
+alter user foo identified with 'mysql_old_password';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 somecoolplugin N N 0.000000
-alter user foo identified with 'somecoolplugin' using 'somecoolpassphrase';
+% foo N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 mysql_old_password N N 0.000000
+alter user foo identified with 'mysql_old_password' using '0123456789ABCDEF';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 somecoolplugin somecoolpassphrase N N 0.000000
+% foo 0123456789ABCDEF N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N 0 0 0 0 mysql_old_password 0123456789ABCDEF N N 0.000000
# Test ssl related altering.
alter user foo identified by 'something' require SSL;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N ANY 0 0 0 0 N N 0.000000
+% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N ANY 0 0 0 0 mysql_native_password *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N 0.000000
alter user foo identified by 'something' require X509;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N X509 0 0 0 0 N N 0.000000
+% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N X509 0 0 0 0 mysql_native_password *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N 0.000000
alter user foo identified by 'something'
require cipher 'text' issuer 'foo_issuer' subject 'foo_subject';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N SPECIFIED text foo_issuer foo_subject 0 0 0 0 N N 0.000000
+% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N SPECIFIED text foo_issuer foo_subject 0 0 0 0 mysql_native_password *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N 0.000000
# Test resource limits altering.
alter user foo with MAX_QUERIES_PER_HOUR 10
MAX_UPDATES_PER_HOUR 20
@@ -89,5 +96,6 @@ MAX_CONNECTIONS_PER_HOUR 30
MAX_USER_CONNECTIONS 40;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N SPECIFIED text foo_issuer foo_subject 10 20 30 40 N N 0.000000
+% foo *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N N N N N N N N N N N N N N Y N N N N N N N N N Y N N N N SPECIFIED text foo_issuer foo_subject 10 20 30 40 mysql_native_password *88C89BE093D4ECF72D039F62EBB7477EA1FD4D63 N N 0.000000
drop user foo;
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
diff --git a/mysql-test/main/alter_user.test b/mysql-test/main/alter_user.test
index ca444f70a70..9ea98615272 100644
--- a/mysql-test/main/alter_user.test
+++ b/mysql-test/main/alter_user.test
@@ -1,5 +1,7 @@
--source include/not_embedded.inc
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
+
select * from mysql.user where user = 'root' and host = 'localhost';
--echo # Test syntax
--echo #
@@ -53,10 +55,14 @@ select * from mysql.user where user = 'foo';
alter user foo identified by password '*88C89BE093D4ECF72D039F62EBB7477EA1FD4D63';
select * from mysql.user where user = 'foo';
+--error ER_CANNOT_USER
alter user foo identified with 'somecoolplugin';
+show warnings;
+
+alter user foo identified with 'mysql_old_password';
select * from mysql.user where user = 'foo';
-alter user foo identified with 'somecoolplugin' using 'somecoolpassphrase';
+alter user foo identified with 'mysql_old_password' using '0123456789ABCDEF';
select * from mysql.user where user = 'foo';
--echo # Test ssl related altering.
@@ -77,3 +83,5 @@ alter user foo with MAX_QUERIES_PER_HOUR 10
MAX_USER_CONNECTIONS 40;
select * from mysql.user where user = 'foo';
drop user foo;
+
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
diff --git a/mysql-test/main/analyze.result b/mysql-test/main/analyze.result
index 9dff94ab08c..dd785abcd42 100644
--- a/mysql-test/main/analyze.result
+++ b/mysql-test/main/analyze.result
@@ -3,6 +3,7 @@ lock tables t1 write;
insert into t1 values(0);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
unlock tables;
check table t1;
@@ -15,6 +16,7 @@ lock tables t1 write;
delete from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
unlock tables;
check table t1;
@@ -25,6 +27,7 @@ create table t1 (a bigint);
insert into t1 values(0);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
check table t1;
Table Op Msg_type Msg_text
@@ -34,9 +37,13 @@ create table t1 (a mediumtext, fulltext key key1(a)) charset utf8 collate utf8_g
insert into t1 values ('hello');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'a'
test.t1 analyze status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'a'
test.t1 analyze status Table is already up to date
drop table t1;
CREATE TABLE t1 (a int);
diff --git a/mysql-test/main/analyze_format_json.result b/mysql-test/main/analyze_format_json.result
index 7991379bc55..c306c2fc1b3 100644
--- a/mysql-test/main/analyze_format_json.result
+++ b/mysql-test/main/analyze_format_json.result
@@ -143,7 +143,7 @@ ANALYZE
"attached_condition": "tbl2.b < 60"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "1Kb",
"join_type": "BNL",
"r_filtered": 100
}
@@ -181,7 +181,7 @@ ANALYZE
"attached_condition": "tbl2.b < 60"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "1Kb",
"join_type": "BNL",
"attached_condition": "tbl1.c > tbl2.c",
"r_filtered": 15.833
@@ -315,7 +315,7 @@ ANALYZE
"key_length": "4",
"used_key_parts": ["pk"],
"r_loops": 1,
- "rows": 11,
+ "rows": 10,
"r_rows": 10,
"r_total_time_ms": "REPLACED",
"filtered": 100,
@@ -340,7 +340,7 @@ ANALYZE
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["pk"],
- "rows": 11,
+ "rows": 10,
"r_rows": 10,
"r_filtered": 50,
"r_total_time_ms": "REPLACED",
@@ -625,7 +625,7 @@ ANALYZE
"attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "<in_optimizer>(t2.b,t2.b in (subquery#2))",
"r_filtered": null
@@ -711,7 +711,7 @@ ANALYZE
"attached_condition": "t3.f3 in (1,2)"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "1",
"join_type": "BNL",
"r_filtered": null
},
@@ -742,7 +742,7 @@ ANALYZE
"r_filtered": null
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t2.f2 = t3.f3",
"r_filtered": null
diff --git a/mysql-test/main/analyze_stmt_orderby.result b/mysql-test/main/analyze_stmt_orderby.result
index deb19d4d93a..23f4c39dd5b 100644
--- a/mysql-test/main/analyze_stmt_orderby.result
+++ b/mysql-test/main/analyze_stmt_orderby.result
@@ -62,7 +62,7 @@ ANALYZE
explain
update t2 set a=a+1 where a<10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 8 Using where; Using buffer
+1 SIMPLE t2 range a a 5 NULL 9 Using where; Using buffer
explain format=json
update t2 set a=a+1 where a<10;
EXPLAIN
@@ -78,7 +78,7 @@ EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 8,
+ "rows": 9,
"attached_condition": "t2.a < 10"
}
}
@@ -100,7 +100,7 @@ ANALYZE
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 8,
+ "rows": 9,
"r_rows": 10,
"r_filtered": 100,
"r_total_time_ms": "REPLACED",
@@ -426,7 +426,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t3.a = t0.a",
"r_filtered": 10
@@ -497,7 +497,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"attached_condition": "t5.a = t6.a",
"r_filtered": 21.429
@@ -537,7 +537,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"attached_condition": "t5.a = t6.a"
}
diff --git a/mysql-test/main/auth_rpl.result b/mysql-test/main/auth_rpl.result
index a3da7985da1..56f5f5e8f2b 100644
--- a/mysql-test/main/auth_rpl.result
+++ b/mysql-test/main/auth_rpl.result
@@ -15,7 +15,7 @@ connection master;
connection slave;
# Slave in-sync with master now.
SELECT user, plugin, authentication_string FROM mysql.user WHERE user LIKE 'plug_user';
-user plugin authentication_string
+User plugin authentication_string
plug_user test_plugin_server plug_user
# Cleanup (on slave).
include/stop_slave.inc
diff --git a/mysql-test/main/backup_aria.result b/mysql-test/main/backup_aria.result
new file mode 100644
index 00000000000..d537711404f
--- /dev/null
+++ b/mysql-test/main/backup_aria.result
@@ -0,0 +1,158 @@
+connect con1,localhost,root,,;
+SET SESSION lock_wait_timeout = 1;
+#-----------------------------------------------------------------------
+# Single-threaded tests
+#-----------------------------------------------------------------------
+# Show the fate and impact of some SELECT /HANDLER ... READ
+# sliding through the sequence.
+CREATE TABLE t_permanent_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+INSERT INTO t_permanent_aria SET col1 = 1;
+BACKUP STAGE START;
+SELECT COUNT(*) FROM t_permanent_aria;
+COUNT(*)
+1
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+col1
+1
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE FLUSH;
+SELECT COUNT(*) FROM t_permanent_aria;
+COUNT(*)
+1
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+col1
+1
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE BLOCK_DDL;
+SELECT COUNT(*) FROM t_permanent_aria;
+COUNT(*)
+1
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+col1
+1
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE BLOCK_COMMIT;
+SELECT COUNT(*) FROM t_permanent_aria;
+COUNT(*)
+1
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+col1
+1
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE END;
+# In case the backup lock is taken by the current connection than
+# - DML modifying some permanent table is not allowed
+BACKUP STAGE START;
+SET AUTOCOMMIT = 0;
+INSERT INTO t_permanent_aria SET col1 = 1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+SET AUTOCOMMIT = 1;
+INSERT INTO t_permanent_aria SET col1 = 1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+# - DDL creating or renaming a permanent table or a procedure is not
+# allowed.
+# The latter tries to modify a permanent system table.
+CREATE TABLE throw_away (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+RENAME TABLE t_permanent_aria To throw_away;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+# - DDL creating a temporary table is allowed.
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+# - DML modifying that temporary table is allowed.
+INSERT INTO t_temporary_aria SET col1 = 1;
+SELECT COUNT(*) FROM t_temporary_aria;
+COUNT(*)
+1
+BACKUP STAGE END;
+# Show the fate and impact of some auto committed INSERT into temporary
+# table sliding through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE FLUSH;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE BLOCK_DDL;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE BLOCK_COMMIT;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE END;
+SELECT COUNT(*) FROM t_temporary_aria;
+COUNT(*)
+5
+# Show the fate and impact of some DROP/CREATE TEMPORARY TABLE sliding
+# through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE FLUSH;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE BLOCK_DDL;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE BLOCK_COMMIT;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE END;
+# Show that even more DDL on the temporary table is allowed.
+BACKUP STAGE START;
+TRUNCATE t_temporary_aria;
+ALTER TABLE t_temporary_aria ADD COLUMN col2 INT;
+ALTER TABLE t_temporary_aria ADD KEY idx(col2);
+BACKUP STAGE END;
+DROP TABLE t_permanent_aria;
+#-----------------------------------------------------------------------
+# Show that non transactional tables locks with BACKUP STAGE FLUSH
+#-----------------------------------------------------------------------
+set session lock_wait_timeout=default;
+create table t1 (a int) engine=aria transactional=0;
+insert into t1 values (1), (2);
+connection con1;
+backup stage start;
+backup stage flush;
+connection default;
+select * from t1;
+a
+1
+2
+SET STATEMENT lock_wait_timeout=0 FOR INSERT INTO t1 values (3);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 values (4);
+connection con1;
+backup stage end;
+connection default;
+select * from t1;
+a
+1
+2
+4
+drop table t1;
+#-----------------------------------------------------------------------
+# Show that transactional tables doesn't lock with BACKUP STAGE FLUSH
+#-----------------------------------------------------------------------
+set session lock_wait_timeout=default;
+create table t1 (a int) engine=aria transactional=1 page_checksum=1;
+insert into t1 values (1), (2);
+connection con1;
+backup stage start;
+backup stage flush;
+connection default;
+INSERT INTO t1 values (4);
+connection con1;
+backup stage end;
+connection default;
+select * from t1;
+a
+1
+2
+4
+drop table t1;
+#
+# Cleanup
+#
+disconnect con1;
diff --git a/mysql-test/main/backup_aria.test b/mysql-test/main/backup_aria.test
new file mode 100644
index 00000000000..7b741b829a1
--- /dev/null
+++ b/mysql-test/main/backup_aria.test
@@ -0,0 +1,157 @@
+########################################################################
+# Tests for Implement LOCK FOR BACKUP (MDEV-5336)
+########################################################################
+# Check a non transactional table per ENGINE = Aria TRANSACTIONAL = 0.
+#
+
+--source include/not_embedded.inc
+# As non transactional engine we use Aria with TRANSACTIONAL = 0
+--source include/have_aria.inc
+
+# Following connections are used in a few of the following tests
+connect (con1,localhost,root,,);
+
+SET SESSION lock_wait_timeout = 1;
+
+--echo #-----------------------------------------------------------------------
+--echo # Single-threaded tests
+--echo #-----------------------------------------------------------------------
+
+--echo # Show the fate and impact of some SELECT /HANDLER ... READ
+--echo # sliding through the sequence.
+CREATE TABLE t_permanent_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+INSERT INTO t_permanent_aria SET col1 = 1;
+BACKUP STAGE START;
+SELECT COUNT(*) FROM t_permanent_aria;
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE FLUSH;
+SELECT COUNT(*) FROM t_permanent_aria;
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE BLOCK_DDL;
+SELECT COUNT(*) FROM t_permanent_aria;
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE BLOCK_COMMIT;
+SELECT COUNT(*) FROM t_permanent_aria;
+HANDLER t_permanent_aria OPEN;
+HANDLER t_permanent_aria READ FIRST;
+HANDLER t_permanent_aria CLOSE;
+BACKUP STAGE END;
+
+--echo # In case the backup lock is taken by the current connection than
+--echo # - DML modifying some permanent table is not allowed
+BACKUP STAGE START;
+SET AUTOCOMMIT = 0;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+INSERT INTO t_permanent_aria SET col1 = 1;
+SET AUTOCOMMIT = 1;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+INSERT INTO t_permanent_aria SET col1 = 1;
+
+--echo # - DDL creating or renaming a permanent table or a procedure is not
+--echo # allowed.
+--echo # The latter tries to modify a permanent system table.
+
+--error ER_BACKUP_LOCK_IS_ACTIVE
+CREATE TABLE throw_away (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+RENAME TABLE t_permanent_aria To throw_away;
+--echo # - DDL creating a temporary table is allowed.
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+--echo # - DML modifying that temporary table is allowed.
+INSERT INTO t_temporary_aria SET col1 = 1;
+SELECT COUNT(*) FROM t_temporary_aria;
+BACKUP STAGE END;
+
+--echo # Show the fate and impact of some auto committed INSERT into temporary
+--echo # table sliding through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE FLUSH;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE BLOCK_DDL;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE BLOCK_COMMIT;
+INSERT INTO t_temporary_aria SET col1 = 1;
+BACKUP STAGE END;
+SELECT COUNT(*) FROM t_temporary_aria;
+
+--echo # Show the fate and impact of some DROP/CREATE TEMPORARY TABLE sliding
+--echo # through the sequence.
+
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE FLUSH;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE BLOCK_DDL;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE BLOCK_COMMIT;
+DROP TEMPORARY TABLE t_temporary_aria;
+CREATE TEMPORARY TABLE t_temporary_aria (col1 INT) ENGINE = Aria TRANSACTIONAL = 0;
+BACKUP STAGE END;
+--echo # Show that even more DDL on the temporary table is allowed.
+BACKUP STAGE START;
+TRUNCATE t_temporary_aria;
+ALTER TABLE t_temporary_aria ADD COLUMN col2 INT;
+ALTER TABLE t_temporary_aria ADD KEY idx(col2);
+BACKUP STAGE END;
+
+DROP TABLE t_permanent_aria;
+
+--echo #-----------------------------------------------------------------------
+--echo # Show that non transactional tables locks with BACKUP STAGE FLUSH
+--echo #-----------------------------------------------------------------------
+
+set session lock_wait_timeout=default;
+create table t1 (a int) engine=aria transactional=0;
+insert into t1 values (1), (2);
+
+connection con1;
+backup stage start;
+backup stage flush;
+connection default;
+select * from t1;
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR INSERT INTO t1 values (3);
+--send INSERT INTO t1 values (4)
+connection con1;
+backup stage end;
+connection default;
+--reap # send
+select * from t1;
+drop table t1;
+
+--echo #-----------------------------------------------------------------------
+--echo # Show that transactional tables doesn't lock with BACKUP STAGE FLUSH
+--echo #-----------------------------------------------------------------------
+
+set session lock_wait_timeout=default;
+create table t1 (a int) engine=aria transactional=1 page_checksum=1;
+insert into t1 values (1), (2);
+
+connection con1;
+backup stage start;
+backup stage flush;
+connection default;
+INSERT INTO t1 values (4);
+connection con1;
+backup stage end;
+connection default;
+select * from t1;
+drop table t1;
+
+--echo #
+--echo # Cleanup
+--echo #
+
+disconnect con1;
diff --git a/mysql-test/main/backup_interaction.result b/mysql-test/main/backup_interaction.result
new file mode 100644
index 00000000000..20ba8fa0811
--- /dev/null
+++ b/mysql-test/main/backup_interaction.result
@@ -0,0 +1,520 @@
+#
+# Check backup and FTWRL
+#
+flush tables with read lock;
+backup stage start;
+ERROR HY000: Can't execute the query because you have a conflicting read lock
+unlock tables;
+backup stage start;
+flush tables with read lock;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+backup stage end;
+#
+# Check backup and FLUSH TABLES
+#
+flush tables;
+backup stage start;
+flush tables;
+backup stage end;
+#
+# Check BACKUP STAGE under lock tables
+#
+create table t1 (a int);
+lock table t1 write;
+backup stage start;
+ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+backup stage end;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+unlock tables;
+lock table t1 read;
+backup stage start;
+ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+backup stage end;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+unlock tables;
+#
+# Check lock tables under BACKUP STAGE
+#
+backup stage start;
+unlock tables;
+select lock_mode from information_schema.metadata_lock_info;
+lock_mode
+MDL_BACKUP_START
+lock table t1 write;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+lock table t1 read;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+unlock tables;
+backup stage end;
+drop table t1;
+#
+# Check setting readonly under BACKUP STAGE
+#
+backup stage start;
+set @@global.read_only=1;
+ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+backup stage end;
+# also make sure going back from read-only mode is not allowed
+set @@global.read_only=1;
+backup stage start;
+set @@global.read_only=0;
+ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+backup stage end;
+set @@global.read_only=0;
+#
+# Check BACKUP STAGE under read_only
+#
+set @@global.read_only=1;
+backup stage start;
+backup stage end;
+set @@global.read_only=0;
+#
+# Check that we can't create tables during backup
+#
+backup stage start;
+create table t1 (a int);
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+backup stage end;
+# also make sure we can't write to a table during backup
+create table t1(a INT);
+backup stage start;
+insert into t1 values(1);
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+insert delayed into t1 values(1);
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+update t1 set a=1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+delete from t1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+truncate table t1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+drop table t1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+backup stage end;
+drop table t1;
+#
+# BACKUP STAGE performs implicit commits
+#
+create table t1(a int) engine=InnoDB;
+begin;
+insert into t1 values(1);
+select lock_mode from information_schema.metadata_lock_info;
+lock_mode
+MDL_SHARED_WRITE
+backup stage start;
+select lock_mode from information_schema.metadata_lock_info;
+lock_mode
+MDL_BACKUP_START
+backup stage block_commit;
+commit;
+backup stage end;
+drop table t1;
+# Ensure that BACKUP STAGE ... does AUTOCOMMIT like most DDL.
+# Sideeffect:
+# Show the impact of not yet committed INSERT before sequence start
+# and ROLLBACK sliding through the sequence.
+CREATE TABLE t1 (col1 INT) ENGINE = InnoDB;
+SET AUTOCOMMIT = 0;
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+ROLLBACK;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+ROLLBACK;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+ROLLBACK;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+ROLLBACK;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+ROLLBACK;
+SELECT COUNT(*) = 5 AS expect_1 FROM t1;
+expect_1
+1
+# Show the impact of not yet committed INSERT before sequence start
+# and a COMMIT sliding through the sequence.
+SET AUTOCOMMIT = 0;
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+COMMIT;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+COMMIT;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+COMMIT;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+COMMIT;
+BACKUP STAGE END;
+#----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+COMMIT;
+SELECT COUNT(*) = 10 AS expect_1 FROM t1;
+expect_1
+1
+DELETE FROM t1;
+COMMIT;
+drop table t1;
+#
+# CHECK: RO transaction under BACKUP STAGE is a potential deadlock
+# OTOH we most probably allow them under FTWRL as well
+#
+CREATE TABLE t1 (col1 INT) ENGINE = InnoDB;
+insert into t1 values (1);
+backup stage start;
+backup stage block_commit;
+begin;
+select * from t1;
+col1
+1
+select lock_mode from information_schema.metadata_lock_info;
+lock_mode
+MDL_BACKUP_WAIT_COMMIT
+MDL_SHARED_READ
+backup stage end;
+select lock_mode from information_schema.metadata_lock_info;
+lock_mode
+drop table t1;
+#
+# Check that handler are closed by backup stage block_ddl
+#
+create table t1 (a int, key a (a));
+insert into t1 (a) values (1), (2), (3), (4), (5);
+handler t1 open;
+handler t1 read a prev;
+a
+5
+backup stage start;
+handler t1 read a prev;
+a
+4
+backup stage flush;
+backup stage block_ddl;
+handler t1 read a prev;
+a
+5
+backup stage block_commit;
+handler t1 read a prev;
+a
+4
+backup stage end;
+handler t1 close;
+drop table t1;
+# Show the fate and impact of some SELECT /HANDLER ... READ
+# sliding through the sequence.
+CREATE TABLE t1_innodb (col1 INT) ENGINE = InnoDB;
+INSERT INTO t1_innodb values (1),(2),(3);
+COMMIT;
+CREATE TABLE t1_myisam (col1 INT) ENGINE = MyISAM;
+INSERT INTO t1_myisam values (1),(2),(3);
+BACKUP STAGE START;
+SELECT COUNT(*) FROM t1_innodb;
+COUNT(*)
+3
+SELECT * FROM t1_innodb;
+col1
+1
+2
+3
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+col1
+1
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+COUNT(*)
+3
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+col1
+1
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE FLUSH;
+SELECT COUNT(*) FROM t1_innodb;
+COUNT(*)
+3
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+col1
+1
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+COUNT(*)
+3
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+col1
+1
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE BLOCK_DDL;
+SELECT COUNT(*) FROM t1_innodb;
+COUNT(*)
+3
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+col1
+1
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+COUNT(*)
+3
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+col1
+1
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE BLOCK_COMMIT;
+SELECT COUNT(*) FROM t1_innodb;
+COUNT(*)
+3
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+col1
+1
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+COUNT(*)
+3
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+col1
+1
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE END;
+drop table t1_innodb,t1_myisam;
+# Show the fate and impact of some SET GLOBAL tx_read_only = 1/0
+# sliding through the sequence.
+BACKUP STAGE START;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE FLUSH;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE BLOCK_DDL;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE BLOCK_COMMIT;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE END;
+# Show the fate and impact of some SET SESSION sql_log_bin = 0/1
+# sliding through the sequence.
+COMMIT;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE START;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE FLUSH;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE BLOCK_DDL;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE BLOCK_COMMIT;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE END;
+#----
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE START;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE FLUSH;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE BLOCK_DDL;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE BLOCK_COMMIT;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE END;
+SET SESSION sql_log_bin = 1;
+#-----------------------------------------------------------------------
+# BACKUP STAGE statements are not allowed in stored routines
+#-----------------------------------------------------------------------
+CREATE TABLE t1 (col1 INT);
+CREATE PROCEDURE p1()
+BEGIN
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+END|
+ERROR 0A000: BACKUP STAGE is not allowed in stored procedures
+CREATE FUNCTION f1 (s CHAR(20)) RETURNS INT DETERMINISTIC
+BEGIN
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+RETURN 1;
+END|
+ERROR 0A000: BACKUP STAGE is not allowed in stored procedures
+CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW
+BEGIN
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+END|
+ERROR 0A000: BACKUP STAGE is not allowed in stored procedures
+DROP TABLE t1;
+#-----------------------------------------------------------------------
+# Check BACKUP status variables
+#-----------------------------------------------------------------------
+SET SESSION lock_wait_timeout = 1;
+FLUSH STATUS;
+# Show how the status variable 'Com_backup' changes after BACKUP STAGE ..
+SHOW STATUS LIKE 'Com_backup';
+Variable_name Value
+Com_backup 0
+BACKUP STAGE START;
+SHOW STATUS LIKE 'Com_backup';
+Variable_name Value
+Com_backup 1
+BACKUP STAGE START;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'START'
+SHOW STATUS LIKE 'Com_backup';
+Variable_name Value
+Com_backup 2
+BACKUP STAGE FLUSH;
+SHOW STATUS LIKE 'Com_backup';
+Variable_name Value
+Com_backup 3
+BACKUP STAGE BLOCK_DDL;
+SHOW STATUS LIKE 'Com_backup';
+Variable_name Value
+Com_backup 4
+BACKUP STAGE BLOCK_COMMIT;
+SHOW STATUS LIKE 'Com_backup';
+Variable_name Value
+Com_backup 5
+BACKUP STAGE END;
+# In case the backup lock is taken by the current connection than
+# - DML modifying some permanent table is not allowed
+CREATE TABLE t1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t1_myisam (col1 INT) ENGINE = MyISAM;
+BACKUP STAGE START;
+SET AUTOCOMMIT = 0;
+INSERT INTO t1_innodb SET col1 = 1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+SET AUTOCOMMIT = 1;
+INSERT INTO t1_innodb SET col1 = 1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+INSERT INTO t1_myisam SET col1 = 1;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+# - DDL creating or renaming a permanent table or a procedure etc.
+# is not allowed.
+CREATE TABLE throw_away (col1 INT) ENGINE = InnoDB;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+RENAME TABLE t1_innodb To throw_away;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+CREATE PROCEDURE p1() SELECT 13;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+CREATE PROCEDURE p1() SELECT 13;
+ERROR HY000: Can't execute the command as you have a BACKUP STAGE active
+BACKUP STAGE END;
+DROP TABLE t1_innodb;
+DROP TABLE t1_myisam;
+#
+# Creating and modifying TEMPORARY TABLES are allowed
+#
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+CREATE TEMPORARY TABLE tmp (col1 INT);
+DROP TEMPORARY TABLE tmp;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TEMPORARY TABLE t_temporary_myisam (col1 INT) ENGINE = MyISAM;
+# - DML modifying that temporary table is allowed.
+INSERT INTO t_temporary_innodb SET col1 = 1;
+SELECT COUNT(*) FROM t_temporary_innodb;
+COUNT(*)
+1
+INSERT INTO t_temporary_myisam SET col1 = 1;
+SELECT COUNT(*) FROM t_temporary_myisam;
+COUNT(*)
+1
+BACKUP STAGE END;
+# Show the fate and impact of some auto committed INSERT into temporary
+# table sliding through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE FLUSH;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE BLOCK_DDL;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE BLOCK_COMMIT;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE END;
+SELECT COUNT(*) FROM t_temporary_innodb;
+COUNT(*)
+5
+# Show the fate and impact of some DROP/CREATE TEMPORARY TABLE sliding
+# through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE FLUSH;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE BLOCK_DDL;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE BLOCK_COMMIT;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE END;
+# Show that even more DDL on the temporary table is allowed.
+BACKUP STAGE START;
+TRUNCATE t_temporary_innodb;
+ALTER TABLE t_temporary_innodb ADD COLUMN col2 INT;
+ALTER TABLE t_temporary_innodb ADD KEY idx(col2);
+BACKUP STAGE END;
diff --git a/mysql-test/main/backup_interaction.test b/mysql-test/main/backup_interaction.test
new file mode 100644
index 00000000000..f5362417c98
--- /dev/null
+++ b/mysql-test/main/backup_interaction.test
@@ -0,0 +1,503 @@
+########################################################################
+# Tests how BACKUP STAGE interacts with other commands (MDEV-5336)
+########################################################################
+
+--source include/have_innodb.inc
+--source include/have_metadata_lock_info.inc
+
+--echo #
+--echo # Check backup and FTWRL
+--echo #
+
+flush tables with read lock;
+--error ER_CANT_UPDATE_WITH_READLOCK
+backup stage start;
+unlock tables;
+backup stage start;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+flush tables with read lock;
+backup stage end;
+
+--echo #
+--echo # Check backup and FLUSH TABLES
+--echo #
+
+flush tables;
+backup stage start;
+flush tables;
+backup stage end;
+
+--echo #
+--echo # Check BACKUP STAGE under lock tables
+--echo #
+
+create table t1 (a int);
+lock table t1 write;
+--error ER_LOCK_OR_ACTIVE_TRANSACTION
+backup stage start;
+--error ER_BACKUP_NOT_RUNNING
+backup stage end;
+unlock tables;
+
+lock table t1 read;
+--error ER_LOCK_OR_ACTIVE_TRANSACTION
+backup stage start;
+--error ER_BACKUP_NOT_RUNNING
+backup stage end;
+unlock tables;
+
+
+--echo #
+--echo # Check lock tables under BACKUP STAGE
+--echo #
+backup stage start;
+unlock tables;
+select lock_mode from information_schema.metadata_lock_info;
+
+--error ER_BACKUP_LOCK_IS_ACTIVE
+lock table t1 write;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+lock table t1 read;
+unlock tables;
+backup stage end;
+drop table t1;
+
+
+--echo #
+--echo # Check setting readonly under BACKUP STAGE
+--echo #
+backup stage start;
+--error ER_LOCK_OR_ACTIVE_TRANSACTION
+set @@global.read_only=1;
+backup stage end;
+
+--echo # also make sure going back from read-only mode is not allowed
+set @@global.read_only=1;
+backup stage start;
+--error ER_LOCK_OR_ACTIVE_TRANSACTION
+set @@global.read_only=0;
+backup stage end;
+set @@global.read_only=0;
+
+--echo #
+--echo # Check BACKUP STAGE under read_only
+--echo #
+
+set @@global.read_only=1;
+backup stage start;
+backup stage end;
+set @@global.read_only=0;
+
+--echo #
+--echo # Check that we can't create tables during backup
+--echo #
+
+backup stage start;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+create table t1 (a int);
+backup stage end;
+
+--echo # also make sure we can't write to a table during backup
+create table t1(a INT);
+backup stage start;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+insert into t1 values(1);
+--error ER_BACKUP_LOCK_IS_ACTIVE
+insert delayed into t1 values(1);
+--error ER_BACKUP_LOCK_IS_ACTIVE
+update t1 set a=1;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+delete from t1;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+truncate table t1;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+drop table t1;
+backup stage end;
+drop table t1;
+
+--echo #
+--echo # BACKUP STAGE performs implicit commits
+--echo #
+create table t1(a int) engine=InnoDB;
+begin;
+insert into t1 values(1);
+select lock_mode from information_schema.metadata_lock_info;
+backup stage start;
+select lock_mode from information_schema.metadata_lock_info;
+backup stage block_commit;
+commit;
+backup stage end;
+drop table t1;
+
+--echo # Ensure that BACKUP STAGE ... does AUTOCOMMIT like most DDL.
+--echo # Sideeffect:
+--echo # Show the impact of not yet committed INSERT before sequence start
+--echo # and ROLLBACK sliding through the sequence.
+
+CREATE TABLE t1 (col1 INT) ENGINE = InnoDB;
+SET AUTOCOMMIT = 0;
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+ROLLBACK;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+ROLLBACK;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+ROLLBACK;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+ROLLBACK;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+ROLLBACK;
+SELECT COUNT(*) = 5 AS expect_1 FROM t1;
+
+--echo # Show the impact of not yet committed INSERT before sequence start
+--echo # and a COMMIT sliding through the sequence.
+
+SET AUTOCOMMIT = 0;
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+COMMIT;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+COMMIT;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+COMMIT;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+COMMIT;
+BACKUP STAGE END;
+--echo #----
+INSERT INTO t1 SET col1 = 1;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+COMMIT;
+SELECT COUNT(*) = 10 AS expect_1 FROM t1;
+DELETE FROM t1;
+COMMIT;
+drop table t1;
+
+--echo #
+--echo # CHECK: RO transaction under BACKUP STAGE is a potential deadlock
+--echo # OTOH we most probably allow them under FTWRL as well
+--echo #
+
+CREATE TABLE t1 (col1 INT) ENGINE = InnoDB;
+insert into t1 values (1);
+backup stage start;
+backup stage block_commit;
+begin;
+select * from t1;
+select lock_mode from information_schema.metadata_lock_info;
+backup stage end;
+select lock_mode from information_schema.metadata_lock_info;
+drop table t1;
+
+--echo #
+--echo # Check that handler are closed by backup stage block_ddl
+--echo #
+
+create table t1 (a int, key a (a));
+insert into t1 (a) values (1), (2), (3), (4), (5);
+handler t1 open;
+handler t1 read a prev;
+backup stage start;
+handler t1 read a prev;
+backup stage flush;
+backup stage block_ddl;
+handler t1 read a prev;
+backup stage block_commit;
+handler t1 read a prev;
+backup stage end;
+handler t1 close;
+drop table t1;
+
+--echo # Show the fate and impact of some SELECT /HANDLER ... READ
+--echo # sliding through the sequence.
+
+CREATE TABLE t1_innodb (col1 INT) ENGINE = InnoDB;
+INSERT INTO t1_innodb values (1),(2),(3);
+COMMIT;
+CREATE TABLE t1_myisam (col1 INT) ENGINE = MyISAM;
+INSERT INTO t1_myisam values (1),(2),(3);
+BACKUP STAGE START;
+SELECT COUNT(*) FROM t1_innodb;
+SELECT * FROM t1_innodb;
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE FLUSH;
+SELECT COUNT(*) FROM t1_innodb;
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE BLOCK_DDL;
+SELECT COUNT(*) FROM t1_innodb;
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE BLOCK_COMMIT;
+SELECT COUNT(*) FROM t1_innodb;
+HANDLER t1_innodb OPEN;
+HANDLER t1_innodb READ FIRST;
+HANDLER t1_innodb CLOSE;
+SELECT COUNT(*) FROM t1_myisam;
+HANDLER t1_myisam OPEN;
+HANDLER t1_myisam READ FIRST;
+HANDLER t1_myisam CLOSE;
+BACKUP STAGE END;
+drop table t1_innodb,t1_myisam;
+
+--echo # Show the fate and impact of some SET GLOBAL tx_read_only = 1/0
+--echo # sliding through the sequence.
+
+BACKUP STAGE START;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE FLUSH;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE BLOCK_DDL;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE BLOCK_COMMIT;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+BACKUP STAGE END;
+
+--echo # Show the fate and impact of some SET SESSION sql_log_bin = 0/1
+--echo # sliding through the sequence.
+COMMIT;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE START;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE FLUSH;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE BLOCK_DDL;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE BLOCK_COMMIT;
+SET SESSION sql_log_bin = 0;
+SET SESSION sql_log_bin = 1;
+BACKUP STAGE END;
+
+--echo #----
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE START;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE FLUSH;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE BLOCK_DDL;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE BLOCK_COMMIT;
+SET SESSION sql_log_bin = 1;
+SET SESSION sql_log_bin = 0;
+BACKUP STAGE END;
+SET SESSION sql_log_bin = 1;
+
+--echo #-----------------------------------------------------------------------
+--echo # BACKUP STAGE statements are not allowed in stored routines
+--echo #-----------------------------------------------------------------------
+
+CREATE TABLE t1 (col1 INT);
+
+delimiter |;
+--error ER_SP_BADSTATEMENT
+CREATE PROCEDURE p1()
+BEGIN
+ BACKUP STAGE START;
+ BACKUP STAGE FLUSH;
+ BACKUP STAGE BLOCK_DDL;
+ BACKUP STAGE BLOCK_COMMIT;
+ BACKUP STAGE END;
+END|
+
+--error ER_SP_BADSTATEMENT
+CREATE FUNCTION f1 (s CHAR(20)) RETURNS INT DETERMINISTIC
+BEGIN
+ BACKUP STAGE START;
+ BACKUP STAGE FLUSH;
+ BACKUP STAGE BLOCK_DDL;
+ BACKUP STAGE BLOCK_COMMIT;
+ BACKUP STAGE END;
+ RETURN 1;
+END|
+
+--error ER_SP_BADSTATEMENT
+CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW
+BEGIN
+ BACKUP STAGE START;
+ BACKUP STAGE FLUSH;
+ BACKUP STAGE BLOCK_DDL;
+ BACKUP STAGE BLOCK_COMMIT;
+ BACKUP STAGE END;
+END|
+
+delimiter ;|
+DROP TABLE t1;
+
+--echo #-----------------------------------------------------------------------
+--echo # Check BACKUP status variables
+--echo #-----------------------------------------------------------------------
+
+SET SESSION lock_wait_timeout = 1;
+
+FLUSH STATUS;
+
+# MDEV-5336 introduces the status variable Com_backup
+--echo # Show how the status variable 'Com_backup' changes after BACKUP STAGE ..
+
+SHOW STATUS LIKE 'Com_backup';
+BACKUP STAGE START;
+SHOW STATUS LIKE 'Com_backup';
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE START;
+SHOW STATUS LIKE 'Com_backup';
+BACKUP STAGE FLUSH;
+SHOW STATUS LIKE 'Com_backup';
+BACKUP STAGE BLOCK_DDL;
+SHOW STATUS LIKE 'Com_backup';
+BACKUP STAGE BLOCK_COMMIT;
+SHOW STATUS LIKE 'Com_backup';
+BACKUP STAGE END;
+
+--echo # In case the backup lock is taken by the current connection than
+--echo # - DML modifying some permanent table is not allowed
+
+CREATE TABLE t1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t1_myisam (col1 INT) ENGINE = MyISAM;
+
+BACKUP STAGE START;
+SET AUTOCOMMIT = 0;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+INSERT INTO t1_innodb SET col1 = 1;
+SET AUTOCOMMIT = 1;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+INSERT INTO t1_innodb SET col1 = 1;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+INSERT INTO t1_myisam SET col1 = 1;
+--echo # - DDL creating or renaming a permanent table or a procedure etc.
+--echo # is not allowed.
+--error ER_BACKUP_LOCK_IS_ACTIVE
+CREATE TABLE throw_away (col1 INT) ENGINE = InnoDB;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+RENAME TABLE t1_innodb To throw_away;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+CREATE PROCEDURE p1() SELECT 13;
+--error ER_BACKUP_LOCK_IS_ACTIVE
+CREATE PROCEDURE p1() SELECT 13;
+BACKUP STAGE END;
+DROP TABLE t1_innodb;
+DROP TABLE t1_myisam;
+
+--echo #
+--echo # Creating and modifying TEMPORARY TABLES are allowed
+--echo #
+
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+CREATE TEMPORARY TABLE tmp (col1 INT);
+DROP TEMPORARY TABLE tmp;
+
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TEMPORARY TABLE t_temporary_myisam (col1 INT) ENGINE = MyISAM;
+--echo # - DML modifying that temporary table is allowed.
+INSERT INTO t_temporary_innodb SET col1 = 1;
+SELECT COUNT(*) FROM t_temporary_innodb;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+SELECT COUNT(*) FROM t_temporary_myisam;
+BACKUP STAGE END;
+
+--echo # Show the fate and impact of some auto committed INSERT into temporary
+--echo # table sliding through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE FLUSH;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE BLOCK_DDL;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE BLOCK_COMMIT;
+INSERT INTO t_temporary_innodb SET col1 = 1;
+INSERT INTO t_temporary_myisam SET col1 = 1;
+BACKUP STAGE END;
+SELECT COUNT(*) FROM t_temporary_innodb;
+
+--echo # Show the fate and impact of some DROP/CREATE TEMPORARY TABLE sliding
+--echo # through the sequence.
+SET AUTOCOMMIT = 1;
+BACKUP STAGE START;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE FLUSH;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE BLOCK_DDL;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE BLOCK_COMMIT;
+DROP TEMPORARY TABLE t_temporary_innodb;
+CREATE TEMPORARY TABLE t_temporary_innodb (col1 INT) ENGINE = InnoDB;
+BACKUP STAGE END;
+--echo # Show that even more DDL on the temporary table is allowed.
+BACKUP STAGE START;
+TRUNCATE t_temporary_innodb;
+ALTER TABLE t_temporary_innodb ADD COLUMN col2 INT;
+ALTER TABLE t_temporary_innodb ADD KEY idx(col2);
+BACKUP STAGE END;
diff --git a/mysql-test/main/backup_lock.result b/mysql-test/main/backup_lock.result
new file mode 100644
index 00000000000..95b2f520d90
--- /dev/null
+++ b/mysql-test/main/backup_lock.result
@@ -0,0 +1,219 @@
+#
+# Testing which locks we get from all stages
+#
+BACKUP STAGE START;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_START Backup lock
+BACKUP STAGE FLUSH;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_FLUSH Backup lock
+BACKUP STAGE BLOCK_DDL;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_WAIT_DDL Backup lock
+BACKUP STAGE BLOCK_COMMIT;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_WAIT_COMMIT Backup lock
+BACKUP STAGE END;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+#
+# testing BACKUP STAGE LOCK's
+#
+connect con1,localhost,root,,;
+connect con2,localhost,root,,;
+connection default;
+#
+# testing if BACKUP STAGE FLUSH causes deadlocks with ALTER TABLE
+#
+create table t1 (a int) engine=innodb;
+start transaction;
+insert into t1 values (1);
+connection con1;
+alter table t1 add column (j int), algorithm copy;
+connection con2;
+backup stage start;
+backup stage flush;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_DDL Backup lock
+MDL_BACKUP_FLUSH Backup lock
+MDL_SHARED_WRITE Table metadata lock test t1
+MDL_SHARED_UPGRADABLE Table metadata lock test t1
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+SET STATEMENT max_statement_time=1 FOR backup stage block_ddl;
+ERROR 70100: Query execution was interrupted (max_statement_time exceeded)
+backup stage block_ddl;
+connection default;
+commit;
+SELECT * FROM t1;
+a
+1
+SET STATEMENT lock_wait_timeout=0 FOR INSERT INTO t1 values (2);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 values (2,0);;
+connection con2;
+backup stage end;
+connection con1;
+connection default;
+select * from t1;
+a j
+1 NULL
+2 0
+drop table t1;
+# Test with inline alter table, which doesn't block block_commit
+create table t1 (a int) engine=innodb;
+start transaction;
+insert into t1 values (1);
+connection con1;
+alter table t1 add column (j int);
+connection con2;
+backup stage start;
+backup stage flush;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_ALTER_COPY Backup lock
+MDL_BACKUP_FLUSH Backup lock
+MDL_SHARED_WRITE Table metadata lock test t1
+MDL_SHARED_UPGRADABLE Table metadata lock test t1
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+backup stage block_ddl;
+backup stage block_commit;
+connection default;
+SELECT * FROM t1;
+a
+1
+commit;
+connection con2;
+backup stage end;
+connection con1;
+connection default;
+drop table t1;
+#
+# testing if BACKUP STAGE FLUSH causes deadlocks with DROP TABLE
+#
+create table t1 (a int) engine=innodb;
+start transaction;
+insert into t1 values (1);
+connection con1;
+SET STATEMENT lock_wait_timeout=0 FOR DROP TABLE t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+DROP TABLE t1;
+connection con2;
+backup stage start;
+backup stage flush;
+SET STATEMENT lock_wait_timeout=0 FOR SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+backup stage block_ddl;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_WAIT_DDL Backup lock
+MDL_SHARED_WRITE Table metadata lock test t1
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+backup stage end;
+connection default;
+commit;
+connection con1;
+connection default;
+#
+# Check if backup stage block_dll + concurrent drop table blocks select
+#
+create table t1 (a int) engine=innodb;
+backup stage start;
+backup stage block_ddl;
+connection con1;
+DROP TABLE t1;
+connection con2;
+connection con2;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_WAIT_DDL Backup lock
+SELECT * FROM t1;
+a
+connection default;
+backup stage end;
+connection con1;
+connection default;
+#
+# Check if backup stage block_dll overrides ddl lock for drop table
+#
+create table t1 (a int) engine=innodb;
+start transaction;
+insert into t1 values (1);
+connection con1;
+SET STATEMENT lock_wait_timeout=0 FOR DROP TABLE t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+DROP TABLE t1;
+connection con2;
+backup stage start;
+backup stage flush;
+backup stage block_ddl;
+connection default;
+commit;
+connection con2;
+backup stage end;
+connection con1;
+connection default;
+#
+# Check if BACKUP STAGE BLOCK_COMMIT blocks commit
+#
+create table t1 (a int) engine=innodb;
+start transaction;
+insert into t1 values (1);
+connection con1;
+backup stage start;
+backup stage block_commit;
+connection default;
+commit;
+connection con1;
+backup stage end;
+connection default;
+select * from t1;
+a
+1
+drop table t1;
+disconnect con1;
+disconnect con2;
+#
+# Test backup stage and flush tables
+#
+BACKUP STAGE START ;
+BACKUP STAGE BLOCK_DDL ;
+FLUSH TABLES;
+CREATE TEMPORARY TABLE t12345678_tmp (col1 INT);
+drop table t12345678_tmp;
+BACKUP STAGE END;
+#
+# Test BACKUP STAGES with lock timeouts
+#
+SET GLOBAL lock_wait_timeout=0;
+CREATE TABLE t_permanent_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_permanent_myisam (col1 INT) ENGINE = MyISAM;
+INSERT INTO t_permanent_innodb SET col1 = 1;
+INSERT INTO t_permanent_myisam SET col1 = 1;
+CREATE TABLE t_con1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_con1_myisam (col1 INT) ENGINE = MyISAM;
+connect con1,localhost,root,,;
+SET AUTOCOMMIT = 0;
+connection default;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+connection con1;
+UPDATE t_permanent_innodb SET col1 = 8;
+UPDATE t_permanent_myisam SET col1 = 8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+DROP TABLE t_con1_innodb;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+DROP TABLE t_con1_myisam;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection default;
+BACKUP STAGE END;
+DROP TABLE t_permanent_myisam, t_permanent_innodb;
+DROP TABLE t_con1_innodb, t_con1_myisam;
+disconnect con1;
+set global lock_wait_timeout=default;
diff --git a/mysql-test/main/backup_lock.test b/mysql-test/main/backup_lock.test
new file mode 100644
index 00000000000..d6db7a6364e
--- /dev/null
+++ b/mysql-test/main/backup_lock.test
@@ -0,0 +1,284 @@
+########################################################################
+# Tests BACKUP STAGE locking
+########################################################################
+
+--source include/have_innodb.inc
+--source include/have_metadata_lock_info.inc
+--source include/not_embedded.inc
+
+--echo #
+--echo # Testing which locks we get from all stages
+--echo #
+
+BACKUP STAGE START;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP STAGE FLUSH;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP STAGE BLOCK_DDL;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP STAGE BLOCK_COMMIT;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP STAGE END;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+
+--echo #
+--echo # testing BACKUP STAGE LOCK's
+--echo #
+
+# Following connections are used in a few of the following tests
+connect (con1,localhost,root,,);
+connect (con2,localhost,root,,);
+connection default;
+
+--echo #
+--echo # testing if BACKUP STAGE FLUSH causes deadlocks with ALTER TABLE
+--echo #
+
+create table t1 (a int) engine=innodb;
+
+start transaction;
+# Acquires MDL lock
+insert into t1 values (1);
+
+connection con1;
+# Waits on MDL
+--send alter table t1 add column (j int), algorithm copy
+
+connection con2;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock";
+--source include/wait_condition.inc
+backup stage start;
+backup stage flush;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+#
+# Do first test with max_statement_time, other tests later are done with
+# lock_wait_timeout. This is mostly to ensure that both methods works
+#
+--error ER_STATEMENT_TIMEOUT
+SET STATEMENT max_statement_time=1 FOR backup stage block_ddl;
+--send backup stage block_ddl
+
+connection default;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for backup lock";
+--source include/wait_condition.inc
+commit;
+# The following select works because alter table is waiting for DDL lock
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR INSERT INTO t1 values (2);
+--send INSERT INTO t1 values (2,0);
+connection con2;
+--reap # BLOCK_DDL
+backup stage end;
+connection con1;
+--reap # ALTER TABLE
+connection default;
+--reap # INSERT
+select * from t1;
+drop table t1;
+
+--echo # Test with inline alter table, which doesn't block block_commit
+
+create table t1 (a int) engine=innodb;
+
+start transaction;
+# Acquires MDL lock
+insert into t1 values (1);
+
+connection con1;
+# Waits on MDL
+--send alter table t1 add column (j int)
+
+connection con2;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock";
+--source include/wait_condition.inc
+backup stage start;
+backup stage flush;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+backup stage block_ddl;
+backup stage block_commit;
+connection default;
+SELECT * FROM t1;
+--send commit
+connection con2;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for backup lock";
+backup stage end;
+connection con1;
+--reap # ALTER TABLE
+connection default;
+--reap # commit
+drop table t1;
+
+--echo #
+--echo # testing if BACKUP STAGE FLUSH causes deadlocks with DROP TABLE
+--echo #
+
+create table t1 (a int) engine=innodb;
+start transaction;
+# Acquires MDL lock
+insert into t1 values (1);
+
+connection con1;
+# Waits on MDL
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR DROP TABLE t1;
+--send DROP TABLE t1
+
+connection con2;
+backup stage start;
+backup stage flush;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock";
+--source include/wait_condition.inc
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR SELECT * FROM t1;
+
+backup stage block_ddl;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+backup stage end;
+
+connection default;
+commit;
+connection con1;
+--reap # DROP TABLE
+connection default;
+
+--echo #
+--echo # Check if backup stage block_dll + concurrent drop table blocks select
+--echo #
+
+create table t1 (a int) engine=innodb;
+backup stage start;
+backup stage block_ddl;
+connection con1;
+--send DROP TABLE t1
+connection con2;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for backup lock";
+--source include/wait_condition.inc
+connection con2;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+# Check that select's are not blocked
+SELECT * FROM t1;
+connection default;
+backup stage end;
+connection con1;
+--reap
+connection default;
+
+--echo #
+--echo # Check if backup stage block_dll overrides ddl lock for drop table
+--echo #
+
+create table t1 (a int) engine=innodb;
+start transaction;
+# Acquires MDL lock
+insert into t1 values (1);
+
+connection con1;
+# Waits on MDL
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR DROP TABLE t1;
+--send DROP TABLE t1
+
+connection con2;
+backup stage start;
+backup stage flush;
+backup stage block_ddl;
+connection default;
+commit;
+connection con2;
+backup stage end;
+connection con1;
+--reap # DROP TABLE
+connection default;
+
+--echo #
+--echo # Check if BACKUP STAGE BLOCK_COMMIT blocks commit
+--echo #
+
+create table t1 (a int) engine=innodb;
+start transaction;
+# Acquires MDL lock
+insert into t1 values (1);
+
+connection con1;
+backup stage start;
+backup stage block_commit;
+connection default;
+--send commit
+connection con1;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for backup lock";
+backup stage end;
+connection default;
+--reap # commit
+select * from t1;
+drop table t1;
+
+#
+# End of tests using con1 and con2
+#
+disconnect con1;
+disconnect con2;
+
+--echo #
+--echo # Test backup stage and flush tables
+--echo #
+
+BACKUP STAGE START ;
+BACKUP STAGE BLOCK_DDL ;
+FLUSH TABLES;
+CREATE TEMPORARY TABLE t12345678_tmp (col1 INT);
+drop table t12345678_tmp;
+BACKUP STAGE END;
+
+--echo #
+--echo # Test BACKUP STAGES with lock timeouts
+--echo #
+
+SET GLOBAL lock_wait_timeout=0;
+CREATE TABLE t_permanent_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_permanent_myisam (col1 INT) ENGINE = MyISAM;
+INSERT INTO t_permanent_innodb SET col1 = 1;
+
+INSERT INTO t_permanent_myisam SET col1 = 1;
+CREATE TABLE t_con1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_con1_myisam (col1 INT) ENGINE = MyISAM;
+
+--connect(con1,localhost,root,,)
+SET AUTOCOMMIT = 0;
+
+--connection default
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+
+--connection con1
+UPDATE t_permanent_innodb SET col1 = 8;
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t_permanent_myisam SET col1 = 8;
+--error ER_LOCK_WAIT_TIMEOUT
+DROP TABLE t_con1_innodb;
+
+--error ER_LOCK_WAIT_TIMEOUT
+DROP TABLE t_con1_myisam;
+
+--connection default
+BACKUP STAGE END;
+DROP TABLE t_permanent_myisam, t_permanent_innodb;
+DROP TABLE t_con1_innodb, t_con1_myisam;
+--disconnect con1
+set global lock_wait_timeout=default;
diff --git a/mysql-test/main/backup_lock_debug.result b/mysql-test/main/backup_lock_debug.result
new file mode 100644
index 00000000000..8832d9cd3e7
--- /dev/null
+++ b/mysql-test/main/backup_lock_debug.result
@@ -0,0 +1,28 @@
+#
+# Make sure pending LOCK TABLES doesn't block BACKUP STAGE
+#
+CREATE TABLE t1(a INT);
+LOCK TABLE t1 READ;
+#
+connect con1,localhost,root,,;
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready';
+LOCK TABLE t1 WRITE;
+#
+connect con2,localhost,root,,;
+SET DEBUG_SYNC= 'now WAIT_FOR ready';
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+disconnect con2;
+#
+connection default;
+UNLOCK TABLES;
+#
+connection con1;
+UNLOCK TABLES;
+disconnect con1;
+#
+connection default;
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
diff --git a/mysql-test/main/backup_lock_debug.test b/mysql-test/main/backup_lock_debug.test
new file mode 100644
index 00000000000..8cf492b3404
--- /dev/null
+++ b/mysql-test/main/backup_lock_debug.test
@@ -0,0 +1,40 @@
+########################################################################
+# Tests for BACKUP STAGE locking that requires debug.
+########################################################################
+
+--source include/have_debug_sync.inc
+
+--echo #
+--echo # Make sure pending LOCK TABLES doesn't block BACKUP STAGE
+--echo #
+CREATE TABLE t1(a INT);
+LOCK TABLE t1 READ;
+
+--echo #
+connect (con1,localhost,root,,);
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready';
+--send LOCK TABLE t1 WRITE
+
+--echo #
+connect (con2,localhost,root,,);
+SET DEBUG_SYNC= 'now WAIT_FOR ready';
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+disconnect con2;
+
+--echo #
+connection default;
+UNLOCK TABLES;
+
+--echo #
+connection con1;
+reap;
+UNLOCK TABLES;
+disconnect con1;
+
+--echo #
+connection default;
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
diff --git a/mysql-test/main/backup_locks.result b/mysql-test/main/backup_locks.result
new file mode 100644
index 00000000000..a3a66937cf9
--- /dev/null
+++ b/mysql-test/main/backup_locks.result
@@ -0,0 +1,46 @@
+#
+# Test lock taken
+#
+BACKUP LOCK test.t1;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_SHARED_HIGH_PRIO Table metadata lock test t1
+BACKUP UNLOCK;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+BACKUP LOCK t1;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_SHARED_HIGH_PRIO Table metadata lock test t1
+BACKUP UNLOCK;
+BACKUP LOCK non_existing.t1;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_SHARED_HIGH_PRIO Table metadata lock non_existing t1
+BACKUP UNLOCK;
+#
+# Test that backup lock protects against ddl
+#
+connect con1,localhost,root,,;
+connection default;
+create table t1 (a int) engine=innodb;
+insert into t1 values (1);
+backup lock t1;
+select * from t1;
+a
+1
+connection con1;
+drop table t1;
+connection default;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_SHARED_HIGH_PRIO Table metadata lock test t1
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+select * from t1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+backup unlock;
+connection con1;
+connection default;
+disconnect con1;
+show tables;
+Tables_in_test
diff --git a/mysql-test/main/backup_locks.test b/mysql-test/main/backup_locks.test
new file mode 100644
index 00000000000..21b67100506
--- /dev/null
+++ b/mysql-test/main/backup_locks.test
@@ -0,0 +1,50 @@
+########################################################################
+# Tests BACKUP STAGE locking
+########################################################################
+
+--source include/have_innodb.inc
+--source include/have_metadata_lock_info.inc
+--source include/not_embedded.inc
+
+--echo #
+--echo # Test lock taken
+--echo #
+
+BACKUP LOCK test.t1;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP UNLOCK;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP LOCK t1;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP UNLOCK;
+BACKUP LOCK non_existing.t1;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+BACKUP UNLOCK;
+
+--echo #
+--echo # Test that backup lock protects against ddl
+--echo #
+
+connect (con1,localhost,root,,);
+
+connection default;
+create table t1 (a int) engine=innodb;
+insert into t1 values (1);
+backup lock t1;
+select * from t1;
+connection con1;
+--send drop table t1
+connection default;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock";
+--source include/wait_condition.inc
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+--error ER_LOCK_DEADLOCK
+select * from t1;
+backup unlock;
+connection con1;
+--reap
+connection default;
+disconnect con1;
+show tables;
diff --git a/mysql-test/main/backup_priv.result b/mysql-test/main/backup_priv.result
new file mode 100644
index 00000000000..4169f58f40f
--- /dev/null
+++ b/mysql-test/main/backup_priv.result
@@ -0,0 +1,40 @@
+#
+# Test privileges for BACKUP STAGES
+#
+set sql_mode="";
+GRANT RELOAD ON *.* TO user1@localhost;
+GRANT CREATE, DROP ON *.* TO user2@localhost;
+connect con1, localhost, user1;
+BACKUP STAGE START;
+BACKUP STAGE END;
+# change_user must release backup lock
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+SELECT lock_mode FROM information_schema.metadata_lock_info WHERE lock_type='Backup lock';
+lock_mode
+MDL_BACKUP_FLUSH
+SELECT lock_mode FROM information_schema.metadata_lock_info WHERE lock_type='Backup lock';
+lock_mode
+disconnect con1;
+connection default;
+# A very low privileged user (-> con4) cannot acquire the backup lock
+connect con1, localhost, user2;
+BACKUP STAGE START;
+ERROR 42000: Access denied; you need (at least one of) the RELOAD privilege(s) for this operation
+BACKUP STAGE FLUSH;
+ERROR 42000: Access denied; you need (at least one of) the RELOAD privilege(s) for this operation
+BACKUP STAGE BLOCK_DDL;
+ERROR 42000: Access denied; you need (at least one of) the RELOAD privilege(s) for this operation
+BACKUP STAGE BLOCK_COMMIT;
+ERROR 42000: Access denied; you need (at least one of) the RELOAD privilege(s) for this operation
+BACKUP STAGE END;
+ERROR 42000: Access denied; you need (at least one of) the RELOAD privilege(s) for this operation
+disconnect con1;
+connection default;
+DROP USER user1@localhost, user2@localhost;
+#
+# Test using BACKUP STAGES in a SP
+#
+create procedure foo42()
+BACKUP STAGE START;
+ERROR 0A000: BACKUP STAGE is not allowed in stored procedures
diff --git a/mysql-test/main/backup_priv.test b/mysql-test/main/backup_priv.test
new file mode 100644
index 00000000000..93b69af0b67
--- /dev/null
+++ b/mysql-test/main/backup_priv.test
@@ -0,0 +1,52 @@
+--source include/have_innodb.inc
+--source include/not_embedded.inc
+--source include/have_metadata_lock_info.inc
+
+--echo #
+--echo # Test privileges for BACKUP STAGES
+--echo #
+
+set sql_mode="";
+
+GRANT RELOAD ON *.* TO user1@localhost;
+GRANT CREATE, DROP ON *.* TO user2@localhost;
+
+--connect(con1, localhost, user1)
+BACKUP STAGE START;
+BACKUP STAGE END;
+--echo # change_user must release backup lock
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+SELECT lock_mode FROM information_schema.metadata_lock_info WHERE lock_type='Backup lock';
+change_user user2;
+SELECT lock_mode FROM information_schema.metadata_lock_info WHERE lock_type='Backup lock';
+--disconnect con1
+--source include/wait_until_disconnected.inc
+--connection default
+
+--echo # A very low privileged user (-> con4) cannot acquire the backup lock
+
+--connect(con1, localhost, user2)
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+BACKUP STAGE START;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+BACKUP STAGE FLUSH;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+BACKUP STAGE BLOCK_DDL;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+BACKUP STAGE END;
+--disconnect con1
+--source include/wait_until_disconnected.inc
+
+--connection default
+DROP USER user1@localhost, user2@localhost;
+
+--echo #
+--echo # Test using BACKUP STAGES in a SP
+--echo #
+
+--error ER_SP_BADSTATEMENT
+create procedure foo42()
+ BACKUP STAGE START;
diff --git a/mysql-test/main/backup_stages.result b/mysql-test/main/backup_stages.result
new file mode 100644
index 00000000000..caea1fda0b9
--- /dev/null
+++ b/mysql-test/main/backup_stages.result
@@ -0,0 +1,335 @@
+#-----------------------------------------------------------------------
+# Multi-threaded tests
+#-----------------------------------------------------------------------
+# Show that only one connection can hold the backup lock.
+connection default;
+BACKUP STAGE START;
+connect con1,localhost,root,,;
+SET STATEMENT lock_wait_timeout=0 FOR BACKUP STAGE START;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+BACKUP STAGE START;
+connection default;
+# Show that the connection con1 has to wait for the backup lock and the
+# corresponding representation within the processlist.
+SET @con1_id = <con1_id>;
+SELECT ID, USER, COMMAND, STATE, INFO, STAGE, MAX_STAGE, INFO_BINARY
+FROM information_schema.processlist WHERE id = @con1_id;
+ID USER COMMAND STATE INFO STAGE MAX_STAGE INFO_BINARY
+<con1_id> root Query Waiting for backup lock BACKUP STAGE START 0 0 BACKUP STAGE START
+BACKUP STAGE END;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_START Backup lock
+connection con1;
+# The connection default has removed the backup lock.
+# And so the current connection con1 can reap for its BACKUP STAGE START
+connect con2,localhost,root,,;
+# The connection con2 cannot continue the work of con1 by setting the
+# next BACKUP STAGE FLUSH.
+BACKUP STAGE FLUSH;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+BACKUP STAGE START;
+connection default;
+SET @con2_id = <con2_id>;
+# Connection con2 waits for the backup lock held by con1.
+SELECT ID, USER, COMMAND, STATE, INFO, STAGE, MAX_STAGE, INFO_BINARY
+FROM information_schema.processlist WHERE id = @con2_id;
+ID USER COMMAND STATE INFO STAGE MAX_STAGE INFO_BINARY
+<con2_id> root Query Waiting for backup lock BACKUP STAGE START 0 0 BACKUP STAGE START
+disconnect con1;
+connection con2;
+# Connection con1 frees the backup lock held by disconnecting.
+# So connection con2 gets the backup lock.
+connect con3,localhost,root,,;
+BACKUP STAGE START;
+connection default;
+SET @con3_id = <con3_id>;
+# Connection con3 waits for the backup lock held by con2.
+SELECT ID, USER, COMMAND, STATE, INFO, STAGE, MAX_STAGE, INFO_BINARY
+FROM information_schema.processlist WHERE id = @con3_id;
+ID USER COMMAND STATE INFO STAGE MAX_STAGE INFO_BINARY
+<con3_id> root Query Waiting for backup lock BACKUP STAGE START 0 0 BACKUP STAGE START
+KILL CONNECTION @con2_id;
+connection con3;
+# Connection con2 frees the backup lock held by getting killed.
+# So connection con3 gets the backup lock.
+BACKUP STAGE END;
+disconnect con3;
+connection default;
+CREATE TABLE t_permanent_innodb (col1 INT) ENGINE = InnoDB;
+INSERT INTO t_permanent_innodb SET col1 = 1;
+CREATE TABLE t_permanent_myisam (col1 INT) ENGINE = MyISAM;
+INSERT INTO t_permanent_myisam SET col1 = 1;
+connect backup,localhost,root,,;
+connect con11,localhost,root,,;
+SET AUTOCOMMIT = 0;
+set session lock_wait_timeout=0;
+connect con12,localhost,root,,;
+SET AUTOCOMMIT = 1;
+# Between (connection default) BACKUP STAGE START and FLUSH
+# no restrictions for concurrent sessions regarding DDL or DML
+# affecting transactional/non transactional permanent tables.
+connection backup;
+BACKUP STAGE START;
+connection con11;
+UPDATE t_permanent_innodb SET col1 = 2;
+UPDATE t_permanent_myisam SET col1 = 2;
+SELECT COUNT(*) FROM t_permanent_innodb;
+COUNT(*)
+1
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+col1
+2
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+COUNT(*)
+1
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+col1
+2
+HANDLER t_permanent_myisam CLOSE;
+CREATE TABLE t_con1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_con1_myisam (col1 INT) ENGINE = InnoDB;
+ALTER TABLE t_permanent_innodb ADD COLUMN col2 INT;
+ALTER TABLE t_permanent_myisam ADD COLUMN col2 INT;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_innodb;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_myisam;
+connection con12;
+UPDATE t_permanent_innodb SET col1 = 3;
+UPDATE t_permanent_myisam SET col1 = 3;
+# Between (connection default) BACKUP STAGE FLUSH and BLOCK_DDL
+# concurrent sessions
+# - can change transactional permanent tables with DDL and DML
+# - can run DROP/CREATE transactional/non transactional TABLE
+# - cannot modify non transactional permanent tables with DDL or DML
+connection backup;
+BACKUP STAGE FLUSH;
+connection con11;
+UPDATE t_permanent_innodb SET col1 = 4;
+SET STATEMENT lock_wait_timeout=0 FOR UPDATE t_permanent_myisam SET col1 = 4;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SELECT COUNT(*) FROM t_permanent_innodb;
+COUNT(*)
+1
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+col1 col2
+4 NULL
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+COUNT(*)
+1
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+col1 col2
+3 NULL
+HANDLER t_permanent_myisam CLOSE;
+DROP TABLE t_con1_innodb;
+DROP TABLE t_con1_myisam;
+CREATE TABLE t_con1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_con1_myisam (col1 INT) ENGINE = InnoDB;
+ALTER TABLE t_permanent_innodb ADD COLUMN col3 INT;
+SET STATEMENT lock_wait_timeout=0 FOR ALTER TABLE t_permanent_myisam ADD COLUMN col3 INT;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_innodb;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_myisam;
+connection con12;
+UPDATE t_permanent_innodb SET col1 = 5;
+# Between (connection default) BACKUP STAGE BLOCK_DDL and BLOCK_COMMIT
+# concurrent sessions
+# - can change transactional permanent tables with DML
+# - cannot run DDL
+# - cannot change non transactional permanent tables with DML
+connection backup;
+BACKUP STAGE BLOCK_DDL;
+connection con11;
+UPDATE t_permanent_innodb SET col1 = 6;
+UPDATE t_permanent_myisam SET col1 = 6;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SELECT COUNT(*) FROM t_permanent_innodb;
+COUNT(*)
+1
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+col1 col2 col3
+6 NULL NULL
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+COUNT(*)
+1
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+col1 col2
+3 NULL
+HANDLER t_permanent_myisam CLOSE;
+DROP TABLE t_con1_innodb;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+CREATE TABLE throw_away (col1 INT) ENGINE = InnoDB;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ALTER TABLE t_permanent_innodb ADD COLUMN col4 INT;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_innodb;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_myisam;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con12;
+UPDATE t_permanent_innodb SET col1 = 7;
+# Between (connection default) BACKUP STAGE BLOCK_COMMIT and END
+# concurrent sessions
+# - can change transactional permanent tables with DML
+# - cannot run DDL
+# - cannot change non transactional permanent tables with DML
+connection backup;
+BACKUP STAGE BLOCK_COMMIT;
+connection con11;
+UPDATE t_permanent_innodb SET col1 = 8;
+UPDATE t_permanent_myisam SET col1 = 8;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SELECT COUNT(*) FROM t_permanent_innodb;
+COUNT(*)
+1
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+col1 col2 col3
+8 NULL NULL
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+COUNT(*)
+1
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+col1 col2
+3 NULL
+HANDLER t_permanent_myisam CLOSE;
+DROP TABLE t_con1_innodb;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+DROP TABLE t_con1_myisam;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con12;
+SET STATEMENT lock_wait_timeout=0 FOR UPDATE t_permanent_innodb SET col1 = 9;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection backup;
+BACKUP STAGE END;
+connection con11;
+COMMIT;
+SELECT * FROM t_permanent_innodb ORDER BY col1;
+col1 col2 col3
+7 NULL NULL
+SELECT * FROM t_permanent_myisam ORDER BY col1;
+col1 col2
+3 NULL
+SET AUTOCOMMIT = 0;
+SET GLOBAL tx_read_only = 1;
+connection con12;
+BACKUP STAGE START;
+BACKUP STAGE END;
+SET GLOBAL tx_read_only = 0;
+DROP VIEW v_some_view;
+DROP TABLE t_con1_innodb;
+DROP TABLE t_con1_myisam;
+# Connection backup holds the backup log and is on some stage.
+# Connection con11 tries to LOCK TABLEs or to set read_only.
+connection backup;
+BACKUP STAGE START;
+connection con11;
+# Between BACKUP STAGE START and FLUSH:
+# No restrictions for other connection around LOCK TABLES or read-only.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+LOCK TABLES t_permanent_innodb WRITE;
+LOCK TABLES t_permanent_myisam WRITE;
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+connection backup;
+BACKUP STAGE FLUSH;
+connection con11;
+# Between BACKUP STAGE FLUSH and BLOCK_COMMIT:
+# Connection con11 not holding the backup lock cannot
+# LOCK WRITE non transactional table.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+LOCK TABLES t_permanent_innodb WRITE;
+LOCK TABLES t_permanent_myisam WRITE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+connection backup;
+BACKUP STAGE BLOCK_DDL;
+connection con11;
+# Between BACKUP STAGE FLUSH and BLOCK_COMMIT:
+# Connection con11 not holding the backup lock cannot
+# LOCK WRITE transactional or non transactional table.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+LOCK TABLES t_permanent_innodb WRITE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+LOCK TABLES t_permanent_myisam WRITE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+connection backup;
+BACKUP STAGE BLOCK_COMMIT;
+connection con11;
+# Between BACKUP BLOCK_COMMIT FLUSH and END:
+# Connection con11 not holding the backup lock cannot
+# LOCK WRITE transactional or non transactional table.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+LOCK TABLES t_permanent_innodb WRITE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+LOCK TABLES t_permanent_myisam WRITE;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+connection backup;
+BACKUP STAGE END;
+DROP TABLE t_permanent_innodb;
+DROP TABLE t_permanent_myisam;
+#
+# Log tables
+#
+connection backup;
+SET @old_general_log = @@general_log;
+SET @old_slow_query_log = @@slow_query_log;
+SET @old_log_output = @@log_output;
+SET GLOBAL log_output = 'TABLE';
+SET GLOBAL general_log = ON;
+SET GLOBAL slow_query_log = ON;
+connection con11;
+SET @old_long_query_time = @@SESSION.long_query_time;
+SET SESSION long_query_time = 0;
+connection backup;
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+connection con11;
+SELECT 1;
+1
+1
+connection backup;
+SELECT 1;
+1
+1
+connection con11;
+SET SESSION long_query_time = @old_long_query_time;
+connection backup;
+BACKUP STAGE END;
+SET GLOBAL log_output = @old_log_output;
+SET GLOBAL slow_query_log = @old_slow_query_log;
+SET GLOBAL general_log = @old_general_log;
+#-----------------------------------------------------------------------
+# Cleanup
+#-----------------------------------------------------------------------
+SET GLOBAL lock_wait_timeout = <old_lock_wait_timeout>;
+disconnect con2;
+disconnect con11;
+disconnect con12;
+disconnect backup;
+connection default;
diff --git a/mysql-test/main/backup_stages.test b/mysql-test/main/backup_stages.test
new file mode 100644
index 00000000000..ba9c15a1d7e
--- /dev/null
+++ b/mysql-test/main/backup_stages.test
@@ -0,0 +1,385 @@
+########################################################################
+# Test what is locked in each stage for LOCK FOR BACKUP (MDEV-5336)
+########################################################################
+
+--source include/not_embedded.inc
+# A transactional engine
+--source include/have_innodb.inc
+--source include/have_metadata_lock_info.inc
+# As non transactional engine we have MyISAM anyway.
+
+# Save the initial number of concurrent sessions.
+--source include/count_sessions.inc
+
+let $old_lock_wait_timeout = `SELECT @@global.lock_wait_timeout`;
+
+--echo #-----------------------------------------------------------------------
+--echo # Multi-threaded tests
+--echo #-----------------------------------------------------------------------
+--echo # Show that only one connection can hold the backup lock.
+
+--connection default
+let $default_id= `SELECT CONNECTION_ID()`;
+BACKUP STAGE START;
+
+# con1, root high privileged user
+--connect(con1,localhost,root,,)
+let $con1_id= `SELECT CONNECTION_ID()`;
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR BACKUP STAGE START;
+send BACKUP STAGE START;
+--connection default
+
+--echo # Show that the connection con1 has to wait for the backup lock and the
+--echo # corresponding representation within the processlist.
+
+--replace_result $con1_id <con1_id>
+
+eval SET @con1_id = $con1_id;
+# Output expected here is
+# ID USER COMMAND STATE INFO INFO_BINARY
+# <con1_id> root Query Waiting for backup lock BACKUP STAGE START BACKUP STAGE START
+let $wait_condition=
+ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST
+ WHERE STATE = "Waiting for backup lock" and INFO = "BACKUP STAGE START";
+--source include/wait_condition.inc
+--replace_column 1 <con1_id>
+eval
+SELECT ID, USER, COMMAND, STATE, INFO, STAGE, MAX_STAGE, INFO_BINARY
+FROM information_schema.processlist WHERE id = @con1_id;
+# con1 uses @@global.lock_wait_timeout
+
+BACKUP STAGE END;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+
+--connection con1
+--echo # The connection default has removed the backup lock.
+--echo # And so the current connection con1 can reap for its BACKUP STAGE START
+--reap
+
+# con2, root high privileged user
+--connect(con2,localhost,root,,)
+let $con2_id= `SELECT CONNECTION_ID()`;
+--echo # The connection con2 cannot continue the work of con1 by setting the
+--echo # next BACKUP STAGE FLUSH.
+--error ER_BACKUP_NOT_RUNNING
+BACKUP STAGE FLUSH;
+send
+BACKUP STAGE START;
+
+--connection default
+--replace_result $con2_id <con2_id>
+eval SET @con2_id = $con2_id;
+--echo # Connection con2 waits for the backup lock held by con1.
+--source include/wait_condition.inc
+--replace_column 1 <con2_id>
+eval
+SELECT ID, USER, COMMAND, STATE, INFO, STAGE, MAX_STAGE, INFO_BINARY
+FROM information_schema.processlist WHERE id = @con2_id;
+
+--disconnect con1
+
+--connection con2
+--echo # Connection con1 frees the backup lock held by disconnecting.
+--echo # So connection con2 gets the backup lock.
+--reap
+
+--connect(con3,localhost,root,,)
+let $con3_id= `SELECT CONNECTION_ID()`;
+send
+BACKUP STAGE START;
+
+--connection default
+--replace_result $con3_id <con3_id>
+eval SET @con3_id = $con3_id;
+--echo # Connection con3 waits for the backup lock held by con2.
+--source include/wait_condition.inc
+--replace_column 1 <con3_id>
+eval
+SELECT ID, USER, COMMAND, STATE, INFO, STAGE, MAX_STAGE, INFO_BINARY
+FROM information_schema.processlist WHERE id = @con3_id;
+KILL CONNECTION @con2_id;
+
+--connection con3
+--echo # Connection con2 frees the backup lock held by getting killed.
+--echo # So connection con3 gets the backup lock.
+--reap
+BACKUP STAGE END;
+disconnect con3;
+--connection default
+
+CREATE TABLE t_permanent_innodb (col1 INT) ENGINE = InnoDB;
+INSERT INTO t_permanent_innodb SET col1 = 1;
+CREATE TABLE t_permanent_myisam (col1 INT) ENGINE = MyISAM;
+INSERT INTO t_permanent_myisam SET col1 = 1;
+
+# backup : Try the BACKUP STAGE sequence
+# con11 : Try DDL + DML with AUTOCOMMIT = 0
+# con12 : Try DML with AUTOCOMMIT = 1
+--connect(backup,localhost,root,,)
+--connect(con11,localhost,root,,)
+SET AUTOCOMMIT = 0;
+set session lock_wait_timeout=0;
+--connect(con12,localhost,root,,)
+SET AUTOCOMMIT = 1;
+
+--echo # Between (connection default) BACKUP STAGE START and FLUSH
+--echo # no restrictions for concurrent sessions regarding DDL or DML
+--echo # affecting transactional/non transactional permanent tables.
+
+--connection backup
+BACKUP STAGE START;
+--connection con11
+UPDATE t_permanent_innodb SET col1 = 2;
+UPDATE t_permanent_myisam SET col1 = 2;
+SELECT COUNT(*) FROM t_permanent_innodb;
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+HANDLER t_permanent_myisam CLOSE;
+CREATE TABLE t_con1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_con1_myisam (col1 INT) ENGINE = InnoDB;
+ALTER TABLE t_permanent_innodb ADD COLUMN col2 INT;
+ALTER TABLE t_permanent_myisam ADD COLUMN col2 INT;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_innodb;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_myisam;
+--connection con12
+UPDATE t_permanent_innodb SET col1 = 3;
+UPDATE t_permanent_myisam SET col1 = 3;
+
+--echo # Between (connection default) BACKUP STAGE FLUSH and BLOCK_DDL
+--echo # concurrent sessions
+--echo # - can change transactional permanent tables with DDL and DML
+--echo # - can run DROP/CREATE transactional/non transactional TABLE
+--echo # - cannot modify non transactional permanent tables with DDL or DML
+
+--connection backup
+BACKUP STAGE FLUSH;
+--connection con11
+UPDATE t_permanent_innodb SET col1 = 4;
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR UPDATE t_permanent_myisam SET col1 = 4;
+SELECT COUNT(*) FROM t_permanent_innodb;
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+HANDLER t_permanent_myisam CLOSE;
+DROP TABLE t_con1_innodb;
+DROP TABLE t_con1_myisam;
+CREATE TABLE t_con1_innodb (col1 INT) ENGINE = InnoDB;
+CREATE TABLE t_con1_myisam (col1 INT) ENGINE = InnoDB;
+ALTER TABLE t_permanent_innodb ADD COLUMN col3 INT;
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR ALTER TABLE t_permanent_myisam ADD COLUMN col3 INT;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_innodb;
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_myisam;
+--connection con12
+UPDATE t_permanent_innodb SET col1 = 5;
+
+
+--echo # Between (connection default) BACKUP STAGE BLOCK_DDL and BLOCK_COMMIT
+--echo # concurrent sessions
+--echo # - can change transactional permanent tables with DML
+--echo # - cannot run DDL
+--echo # - cannot change non transactional permanent tables with DML
+
+--connection backup
+BACKUP STAGE BLOCK_DDL;
+--connection con11
+
+UPDATE t_permanent_innodb SET col1 = 6;
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t_permanent_myisam SET col1 = 6;
+SELECT COUNT(*) FROM t_permanent_innodb;
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+HANDLER t_permanent_myisam CLOSE;
+--error ER_LOCK_WAIT_TIMEOUT
+DROP TABLE t_con1_innodb;
+--error ER_LOCK_WAIT_TIMEOUT
+CREATE TABLE throw_away (col1 INT) ENGINE = InnoDB;
+--error ER_LOCK_WAIT_TIMEOUT
+ALTER TABLE t_permanent_innodb ADD COLUMN col4 INT;
+--error ER_LOCK_WAIT_TIMEOUT
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_innodb;
+--error ER_LOCK_WAIT_TIMEOUT
+CREATE OR REPLACE VIEW v_some_view AS SELECT * FROM t_permanent_myisam;
+--connection con12
+UPDATE t_permanent_innodb SET col1 = 7;
+
+
+--echo # Between (connection default) BACKUP STAGE BLOCK_COMMIT and END
+--echo # concurrent sessions
+--echo # - can change transactional permanent tables with DML
+--echo # - cannot run DDL
+--echo # - cannot change non transactional permanent tables with DML
+
+--connection backup
+BACKUP STAGE BLOCK_COMMIT;
+--connection con11
+UPDATE t_permanent_innodb SET col1 = 8;
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE t_permanent_myisam SET col1 = 8;
+SELECT COUNT(*) FROM t_permanent_innodb;
+HANDLER t_permanent_innodb OPEN;
+HANDLER t_permanent_innodb READ FIRST;
+HANDLER t_permanent_innodb CLOSE;
+SELECT COUNT(*) FROM t_permanent_myisam;
+HANDLER t_permanent_myisam OPEN;
+HANDLER t_permanent_myisam READ FIRST;
+HANDLER t_permanent_myisam CLOSE;
+--error ER_LOCK_WAIT_TIMEOUT
+DROP TABLE t_con1_innodb;
+--error ER_LOCK_WAIT_TIMEOUT
+DROP TABLE t_con1_myisam;
+--connection con12
+--error ER_LOCK_WAIT_TIMEOUT
+SET STATEMENT lock_wait_timeout=0 FOR UPDATE t_permanent_innodb SET col1 = 9;
+
+--connection backup
+BACKUP STAGE END;
+--connection con11
+COMMIT;
+SELECT * FROM t_permanent_innodb ORDER BY col1;
+SELECT * FROM t_permanent_myisam ORDER BY col1;
+SET AUTOCOMMIT = 0;
+SET GLOBAL tx_read_only = 1;
+--connection con12
+BACKUP STAGE START;
+BACKUP STAGE END;
+SET GLOBAL tx_read_only = 0;
+DROP VIEW v_some_view;
+DROP TABLE t_con1_innodb;
+DROP TABLE t_con1_myisam;
+
+--echo # Connection backup holds the backup log and is on some stage.
+--echo # Connection con11 tries to LOCK TABLEs or to set read_only.
+
+--connection backup
+BACKUP STAGE START;
+--connection con11
+--echo # Between BACKUP STAGE START and FLUSH:
+--echo # No restrictions for other connection around LOCK TABLES or read-only.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+LOCK TABLES t_permanent_innodb WRITE;
+LOCK TABLES t_permanent_myisam WRITE;
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+--connection backup
+BACKUP STAGE FLUSH;
+--connection con11
+--echo # Between BACKUP STAGE FLUSH and BLOCK_COMMIT:
+--echo # Connection con11 not holding the backup lock cannot
+--echo # LOCK WRITE non transactional table.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+LOCK TABLES t_permanent_innodb WRITE;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLES t_permanent_myisam WRITE;
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+--connection backup
+BACKUP STAGE BLOCK_DDL;
+--connection con11
+--echo # Between BACKUP STAGE FLUSH and BLOCK_COMMIT:
+--echo # Connection con11 not holding the backup lock cannot
+--echo # LOCK WRITE transactional or non transactional table.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLES t_permanent_innodb WRITE;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLES t_permanent_myisam WRITE;
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+--connection backup
+BACKUP STAGE BLOCK_COMMIT;
+--connection con11
+--echo # Between BACKUP BLOCK_COMMIT FLUSH and END:
+--echo # Connection con11 not holding the backup lock cannot
+--echo # LOCK WRITE transactional or non transactional table.
+LOCK TABLES t_permanent_innodb READ;
+LOCK TABLES t_permanent_myisam READ;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLES t_permanent_innodb WRITE;
+--error ER_LOCK_WAIT_TIMEOUT
+LOCK TABLES t_permanent_myisam WRITE;
+UNLOCK TABLES;
+SET GLOBAL tx_read_only = 1;
+SET GLOBAL tx_read_only = 0;
+--connection backup
+BACKUP STAGE END;
+
+DROP TABLE t_permanent_innodb;
+DROP TABLE t_permanent_myisam;
+
+--echo #
+--echo # Log tables
+--echo #
+
+--connection backup
+
+SET @old_general_log = @@general_log;
+SET @old_slow_query_log = @@slow_query_log;
+SET @old_log_output = @@log_output;
+
+SET GLOBAL log_output = 'TABLE';
+SET GLOBAL general_log = ON;
+SET GLOBAL slow_query_log = ON;
+
+
+--connection con11
+SET @old_long_query_time = @@SESSION.long_query_time;
+SET SESSION long_query_time = 0;
+
+--connection backup
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+
+--connection con11
+SELECT 1;
+
+--connection backup
+SELECT 1;
+
+--connection con11
+SET SESSION long_query_time = @old_long_query_time;
+
+--connection backup
+BACKUP STAGE END;
+
+SET GLOBAL log_output = @old_log_output;
+SET GLOBAL slow_query_log = @old_slow_query_log;
+SET GLOBAL general_log = @old_general_log;
+
+--echo #-----------------------------------------------------------------------
+--echo # Cleanup
+--echo #-----------------------------------------------------------------------
+
+--replace_result $old_lock_wait_timeout <old_lock_wait_timeout>
+eval
+SET GLOBAL lock_wait_timeout = $old_lock_wait_timeout;
+
+--disconnect con2
+--disconnect con11
+--disconnect con12
+--disconnect backup
+
+--connection default
+--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/main/backup_syntax.result b/mysql-test/main/backup_syntax.result
new file mode 100644
index 00000000000..4b8e74edd57
--- /dev/null
+++ b/mysql-test/main/backup_syntax.result
@@ -0,0 +1,163 @@
+#-----------------------------------------------------------------------
+# Basic syntax checks
+#-----------------------------------------------------------------------
+# Check existing BACKUP STAGE statements in the sequence to be used.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+# Check invalid variants of BACKUP .... syntax.
+BACKUP LOG;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'LOG' at line 1
+BACKUP LOCK;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
+BACKUP STAGE;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
+BACKUP STAGE LOCK;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'LOCK' at line 1
+BACKUP STAGE not_existing;
+ERROR HY000: Unknown backup stage: 'not_existing'. Stage should be one of START, FLUSH, BLOCK_DDL, BLOCK_COMMIT or END
+#-----------------------------------------------------------------------
+# BACKUP STAGE statements in various orders.
+#-----------------------------------------------------------------------
+# All BACKUP STAGE statements != 'BACKUP STAGE START' expect that a
+# backup lock (generated by BACKUP STAGE START) already exists.
+#
+backup stage start;
+backup stage flush;
+backup stage start;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'FLUSH'
+backup stage start;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'FLUSH'
+backup stage block_commit;
+backup stage flush;
+ERROR HY000: Backup stage 'FLUSH' is same or before current backup stage 'BLOCK_COMMIT'
+backup stage flush;
+ERROR HY000: Backup stage 'FLUSH' is same or before current backup stage 'BLOCK_COMMIT'
+backup stage end;
+backup stage flush;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+BACKUP STAGE END;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+BACKUP STAGE BLOCK_COMMIT;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+BACKUP STAGE BLOCK_DDL;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+BACKUP STAGE FLUSH;
+ERROR HY000: You must start backup with "BACKUP STAGE START"
+# Ordered "give up" with 'BACKUP STAGE END' because of whatever reason.
+# Some existing backup lock assumed a 'BACKUP STAGE END' is allowed in
+# every situation.
+BACKUP STAGE START;
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+# Orders with BACKUP STAGE FLUSH omitted.
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+# Orders with BACKUP STAGE BLOCK_DDL omitted.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+# Orders with BACKUP STAGE BLOCK_COMMIT omitted.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+# Orders with doubled BACKUP STAGE statements.
+# We get an error but that seems to have no bad impact on the state.
+# And so we are allowed to go on with BACKUP STAGE statements.
+BACKUP STAGE START;
+BACKUP STAGE START;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'START'
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE FLUSH;
+ERROR HY000: Backup stage 'FLUSH' is same or before current backup stage 'FLUSH'
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_DDL;
+ERROR HY000: Backup stage 'BLOCK_DDL' is same or before current backup stage 'BLOCK_DDL'
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE BLOCK_COMMIT;
+ERROR HY000: Backup stage 'BLOCK_COMMIT' is same or before current backup stage 'BLOCK_COMMIT'
+BACKUP STAGE END;
+# Scrambled orders.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE START;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'FLUSH'
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE START;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'BLOCK_DDL'
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE START;
+ERROR HY000: Backup stage 'START' is same or before current backup stage 'BLOCK_COMMIT'
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE FLUSH;
+ERROR HY000: Backup stage 'FLUSH' is same or before current backup stage 'BLOCK_DDL'
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE FLUSH;
+ERROR HY000: Backup stage 'FLUSH' is same or before current backup stage 'BLOCK_COMMIT'
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE BLOCK_DDL;
+ERROR HY000: Backup stage 'BLOCK_DDL' is same or before current backup stage 'BLOCK_COMMIT'
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE FLUSH;
+ERROR HY000: Backup stage 'FLUSH' is same or before current backup stage 'BLOCK_DDL'
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+#----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE BLOCK_DDL;
+ERROR HY000: Backup stage 'BLOCK_DDL' is same or before current backup stage 'BLOCK_COMMIT'
+BACKUP STAGE END;
+#
+# Check Oracle syntax
+#
+set SQL_MODE=Oracle;
+backup stage start;
+backup stage end;
+set SQL_MODE=default;
diff --git a/mysql-test/main/backup_syntax.test b/mysql-test/main/backup_syntax.test
new file mode 100644
index 00000000000..f02c69bdd85
--- /dev/null
+++ b/mysql-test/main/backup_syntax.test
@@ -0,0 +1,181 @@
+########################################################################
+# Tests things releated to syntax of BACKUP STAGE (MDEV-5336)
+########################################################################
+
+--source include/have_innodb.inc
+--source include/have_metadata_lock_info.inc
+
+--echo #-----------------------------------------------------------------------
+--echo # Basic syntax checks
+--echo #-----------------------------------------------------------------------
+
+--echo # Check existing BACKUP STAGE statements in the sequence to be used.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+
+--echo # Check invalid variants of BACKUP .... syntax.
+--error ER_PARSE_ERROR
+BACKUP LOG;
+--error ER_PARSE_ERROR
+BACKUP LOCK;
+--error ER_PARSE_ERROR
+BACKUP STAGE;
+--error ER_PARSE_ERROR
+BACKUP STAGE LOCK;
+--error ER_BACKUP_UNKNOWN_STAGE
+BACKUP STAGE not_existing;
+
+--echo #-----------------------------------------------------------------------
+--echo # BACKUP STAGE statements in various orders.
+--echo #-----------------------------------------------------------------------
+--echo # All BACKUP STAGE statements != 'BACKUP STAGE START' expect that a
+--echo # backup lock (generated by BACKUP STAGE START) already exists.
+--echo #
+
+backup stage start;
+backup stage flush;
+--error ER_BACKUP_WRONG_STAGE
+backup stage start;
+--error ER_BACKUP_WRONG_STAGE
+backup stage start;
+backup stage block_commit;
+--error ER_BACKUP_WRONG_STAGE
+backup stage flush;
+--error ER_BACKUP_WRONG_STAGE
+backup stage flush;
+backup stage end;
+--error ER_BACKUP_NOT_RUNNING
+backup stage flush;
+--error ER_BACKUP_NOT_RUNNING
+BACKUP STAGE END;
+--error ER_BACKUP_NOT_RUNNING
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_BACKUP_NOT_RUNNING
+BACKUP STAGE BLOCK_DDL;
+--error ER_BACKUP_NOT_RUNNING
+BACKUP STAGE FLUSH;
+
+--echo # Ordered "give up" with 'BACKUP STAGE END' because of whatever reason.
+--echo # Some existing backup lock assumed a 'BACKUP STAGE END' is allowed in
+--echo # every situation.
+
+BACKUP STAGE START;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+--echo # Orders with BACKUP STAGE FLUSH omitted.
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+--echo # Orders with BACKUP STAGE BLOCK_DDL omitted.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+--echo # Orders with BACKUP STAGE BLOCK_COMMIT omitted.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+
+--echo # Orders with doubled BACKUP STAGE statements.
+--echo # We get an error but that seems to have no bad impact on the state.
+--echo # And so we are allowed to go on with BACKUP STAGE statements.
+
+BACKUP STAGE START;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE FLUSH;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+
+--echo # Scrambled orders.
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE START;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE FLUSH;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE BLOCK_DDL;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_COMMIT;
+BACKUP STAGE END;
+--echo #----
+BACKUP STAGE START;
+BACKUP STAGE FLUSH;
+BACKUP STAGE BLOCK_COMMIT;
+--error ER_BACKUP_WRONG_STAGE
+BACKUP STAGE BLOCK_DDL;
+BACKUP STAGE END;
+
+--echo #
+--echo # Check Oracle syntax
+--echo #
+
+set SQL_MODE=Oracle;
+backup stage start;
+backup stage end;
+set SQL_MODE=default;
diff --git a/mysql-test/main/bootstrap.result b/mysql-test/main/bootstrap.result
index 5cefc281996..96aec014093 100644
--- a/mysql-test/main/bootstrap.result
+++ b/mysql-test/main/bootstrap.result
@@ -1,10 +1,13 @@
drop table if exists t1;
+# Kill the server
+# restart
drop table t1;
+# Kill the server
+# restart
drop table t1;
ERROR 42S02: Unknown table 'test.t1'
-set @my_max_allowed_packet= @@max_allowed_packet;
-set global max_allowed_packet=64*@@max_allowed_packet;
-set global max_allowed_packet=@my_max_allowed_packet;
+# Kill the server
+# restart
drop table t1;
End of 5.1 tests
#
@@ -14,7 +17,11 @@ End of 5.1 tests
SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
and SUPPORT='YES';
+# Kill the server
+# restart
End of 5.5 tests
+# Kill the server
+# restart
flush tables;
show create table t1;
Table Create Table
@@ -26,3 +33,5 @@ select * from mysql.plugin;
name dl
EXAMPLE ha_example.so
truncate table mysql.plugin;
+# Kill the server
+# restart
diff --git a/mysql-test/main/bootstrap.test b/mysql-test/main/bootstrap.test
index 414deede892..d75be403f13 100644
--- a/mysql-test/main/bootstrap.test
+++ b/mysql-test/main/bootstrap.test
@@ -1,3 +1,4 @@
+--source include/not_embedded.inc
#
# test mysqld in bootstrap mode
#
@@ -15,7 +16,9 @@ let $MYSQLD_BOOTSTRAP_CMD= $MYSQLD_BOOTSTRAP_CMD --datadir=$MYSQLD_DATADIR --def
use test;
CREATE TABLE t1(a int);
EOF
+--source include/kill_mysqld.inc
--exec $MYSQLD_BOOTSTRAP_CMD < $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
+--source include/start_mysqld.inc
drop table t1;
remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql;
#
@@ -25,9 +28,11 @@ remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_test.sql;
use test;
CREATE TABLE t1;
EOF
+--source include/kill_mysqld.inc
--error 1
--exec $MYSQLD_BOOTSTRAP_CMD < $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
# Table t1 should not exists
+--source include/start_mysqld.inc
--error 1051
drop table t1;
remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql;
@@ -35,17 +40,15 @@ remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_error.sql;
#
# Bootstrap with a query larger than 2*thd->net.max_packet
#
-set @my_max_allowed_packet= @@max_allowed_packet;
-set global max_allowed_packet=64*@@max_allowed_packet;
--disable_query_log
create table t1 select 2 as a, concat(repeat('MySQL', @@max_allowed_packet/10), ';') as b;
eval select * into outfile '$MYSQLTEST_VARDIR/tmp/long_query.sql' from t1;
--enable_query_log
+--source include/kill_mysqld.inc
--error 1
--exec $MYSQLD_BOOTSTRAP_CMD < $MYSQLTEST_VARDIR/tmp/long_query.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
remove_file $MYSQLTEST_VARDIR/tmp/long_query.sql;
-
-set global max_allowed_packet=@my_max_allowed_packet;
+--source include/start_mysqld.inc
drop table t1;
--echo End of 5.1 tests
@@ -62,8 +65,10 @@ SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
#
# MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init
#
+--source include/kill_mysqld.inc
--error 1
--exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE
+--source include/start_mysqld.inc
--echo End of 5.5 tests
@@ -73,6 +78,7 @@ SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
# Check that --bootstrap can install and uninstall plugins
#
let $PLUGIN_DIR=`select @@plugin_dir`;
+--source include/kill_mysqld.inc
--write_file $MYSQLTEST_VARDIR/tmp/install_plugin.sql
install soname 'ha_example';
uninstall plugin unusable;
@@ -90,6 +96,7 @@ create table t1(a int) engine=example charset=latin1;
EOF
--exec $MYSQLD_BOOTSTRAP_CMD --plugin-dir=$PLUGIN_DIR < $MYSQLTEST_VARDIR/tmp/bootstrap_plugins.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
--remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_plugins.sql
+--source include/start_mysqld.inc
flush tables;
show create table t1;
drop table t1;
@@ -104,6 +111,7 @@ truncate table mysql.plugin;
--write_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
use test;
EOF
+--source include/kill_mysqld.inc
--exec $MYSQLD_BOOTSTRAP_CMD --ignore-db-dirs='some_dir' --ignore-db-dirs='some_dir' < $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
--remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
@@ -115,3 +123,5 @@ use test;
EOF
--exec $MYSQLD_BOOTSTRAP_CMD --default-time-zone=Europe/Moscow < $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql >> $MYSQLTEST_VARDIR/tmp/bootstrap.log 2>&1
--remove_file $MYSQLTEST_VARDIR/tmp/bootstrap_9969.sql
+
+--source include/start_mysqld.inc
diff --git a/mysql-test/main/brackets.result b/mysql-test/main/brackets.result
new file mode 100644
index 00000000000..869afe56c9c
--- /dev/null
+++ b/mysql-test/main/brackets.result
@@ -0,0 +1,455 @@
+select 1 union ( select 2 union select 3);
+1
+1
+2
+3
+explain extended
+select 1 union ( select 2 union select 3);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+4 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+3 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
+NULL UNION RESULT <union1,4> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `1` union /* select#4 */ select `__4`.`2` AS `2` from (/* select#2 */ select 2 AS `2` union /* select#3 */ select 3 AS `3`) `__4`
+select 1 union ( select 1 union select 1);
+1
+1
+explain extended
+select 1 union ( select 1 union select 1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+4 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+3 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
+NULL UNION RESULT <union1,4> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `1` union /* select#4 */ select `__4`.`1` AS `1` from (/* select#2 */ select 1 AS `1` union /* select#3 */ select 1 AS `1`) `__4`
+select 1 union all ( select 1 union select 1);
+1
+1
+1
+explain extended
+select 1 union all ( select 1 union select 1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+4 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+3 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `1` union all /* select#4 */ select `__4`.`1` AS `1` from (/* select#2 */ select 1 AS `1` union /* select#3 */ select 1 AS `1`) `__4`
+select 1 union ( select 1 union all select 1);
+1
+1
+explain extended
+select 1 union ( select 1 union all select 1);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+4 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+3 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union1,4> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `1` union /* select#4 */ select `__4`.`1` AS `1` from (/* select#2 */ select 1 AS `1` union all /* select#3 */ select 1 AS `1`) `__4`
+select 1 union select 1 union all select 1;
+1
+1
+1
+explain extended
+select 1 union select 1 union all select 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+2 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+3 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union1,2,3> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `1` union /* select#2 */ select 1 AS `1` union all /* select#3 */ select 1 AS `1`
+(select 1 as a) union (select 2) order by a;
+a
+1
+2
+explain extended
+(select 1 as a) union (select 2) order by a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+2 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
+Warnings:
+Note 1003 (/* select#1 */ select 1 AS `a`) union (/* select#2 */ select 2 AS `2`) order by `a`
+/* select#1 */ select 1 AS `a` union /* select#2 */ select 2 AS `2` order by `a`;
+a
+1
+2
+explain extended
+/* select#1 */ select 1 AS `a` union /* select#2 */ select 2 AS `2` order by `a`;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+2 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
+Warnings:
+Note 1003 /* select#1 */ select 1 AS `a` union /* select#2 */ select 2 AS `2` order by `a`
+select 1 union ( select 1 union (select 1 union (select 1 union select 1)));
+1
+1
+explain extended all
+select 1 union ( select 1 union (select 1 union (select 1 union select 1)));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+8 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+7 UNION <derived3> ALL NULL NULL NULL NULL 2 100.00
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+6 UNION <derived4> ALL NULL NULL NULL NULL 2 100.00
+4 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+5 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
+NULL UNION RESULT <union3,6> ALL NULL NULL NULL NULL NULL NULL
+NULL UNION RESULT <union2,7> ALL NULL NULL NULL NULL NULL NULL
+NULL UNION RESULT <union1,8> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1/0 Filter Select: select `1` AS `1` */ select 1 AS `1` union /* select#8/0 */ select `__8`.`1` AS `1` from (/* select#2/1 Filter Select: select `1` AS `1` */ select 1 AS `1` union /* select#7/1 */ select `__7`.`1` AS `1` from (/* select#3/2 Filter Select: select `1` AS `1` */ select 1 AS `1` union /* select#6/2 */ select `__6`.`1` AS `1` from (/* select#4/3 Filter Select: select `1` AS `1` */ select 1 AS `1` union /* select#5/3 */ select 1 AS `1`) `__6`) `__7`) `__8`
+#
+# MDEV-6341: INSERT ... SELECT UNION with parenthesis
+#
+create table t1 (a int, b int);
+insert into t1 (select 1,1 union select 2,2);
+select * from t1 order by 1;
+a b
+1 1
+2 2
+delete from t1;
+insert into t1 select 1,1 union select 2,2;
+select * from t1 order by 1;
+a b
+1 1
+2 2
+drop table t1;
+CREATE OR REPLACE TABLE t1 AS SELECT 1 AS a UNION SELECT 2;
+select * from t1 order by 1;
+a
+1
+2
+drop table t1;
+CREATE OR REPLACE TABLE t1 AS (SELECT 1 AS a UNION SELECT 2);
+select * from t1 order by 1;
+a
+1
+2
+drop table t1;
+CREATE OR REPLACE VIEW v1 AS (SELECT 1 AS a);
+show create view v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS (select 1 AS `a`) latin1 latin1_swedish_ci
+drop view v1;
+CREATE OR REPLACE VIEW v1 AS SELECT 1 AS a UNION SELECT 2;
+show create view v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `a` union select 2 AS `2` latin1 latin1_swedish_ci
+drop view v1;
+CREATE OR REPLACE VIEW v1 AS (SELECT 1 AS a UNION SELECT 2);
+show create view v1;
+View Create View character_set_client collation_connection
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `a` union select 2 AS `2` latin1 latin1_swedish_ci
+drop view v1;
+#
+# MDEV-10028: Syntax error on ((SELECT ...) UNION (SELECT ...))
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (30);
+((SELECT a FROM t1) UNION (SELECT a FROM t1));
+a
+10
+20
+30
+(SELECT * FROM t1 UNION SELECT * FROM t1);
+a
+10
+20
+30
+((SELECT a FROM t1) LIMIT 1);
+a
+10
+SELECT * FROM (SELECT 1 UNION (SELECT 2 UNION SELECT 3)) t1;
+1
+1
+2
+3
+DROP TABLE t1;
+#
+# test of several levels of ORDER BY / LIMIT
+#
+create table t1 (a int, b int);
+insert into t1 (a,b) values (1, 100), (2, 200), (3,30), (4,4);
+select a,b from t1 order by 1 limit 3;
+a b
+1 100
+2 200
+3 30
+(select a,b from t1 order by 1 limit 3) order by 2 limit 2;
+a b
+3 30
+1 100
+(select 10,1000 union select a,b from t1 order by 1 limit 3) order by 2 limit 2;
+10 1000
+3 30
+1 100
+((select a,b from t1 order by 1 limit 3) order by 2 limit 2) order by 1 limit 1;
+a b
+1 100
+((select a,b from t1 order by 1 limit 3) order by 2 limit 2) order by 1;
+a b
+1 100
+3 30
+drop table t1;
+#
+# MDEV-16359: union with 3 selects in brackets
+#
+select 1 union select 1 union select 1;
+1
+1
+(select 1 union select 1 union select 1);
+1
+1
+((select 1) union (select 1) union (select 1));
+1
+1
+#
+# MDEV-16357: union in brackets with tail
+# union with tail in brackets
+#
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES(1),(2),(3),(4);
+CREATE TABLE t2 (a int);
+INSERT INTO t2 VALUES (4),(5),(6),(7);
+(SELECT a FROM t1 UNION SELECT a FROM t2) LIMIT 1;
+a
+1
+(SELECT a FROM t1 UNION SELECT a FROM t2) ORDER BY a DESC;
+a
+7
+6
+5
+4
+3
+2
+1
+(SELECT a FROM t1 UNION SELECT a FROM t2 LIMIT 1);
+a
+1
+DROP TABLE t1,t2;
+#
+# MDEV-19324: ((SELECT ...) ORDER BY col ) LIMIT n
+#
+create table t1 (a int);
+insert into t1 values (10),(20),(30);
+select a from t1 order by a desc limit 1;
+a
+30
+explain extended select a from t1 order by a desc limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using filesort
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `test`.`t1`.`a` desc limit 1
+explain format=json select a from t1 order by a desc limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "read_sorted_file": {
+ "filesort": {
+ "sort_key": "t1.a desc",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ }
+}
+(select a from t1 order by a desc) limit 1;
+a
+30
+explain extended (select a from t1 order by a desc) limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using filesort
+Warnings:
+Note 1003 (select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `test`.`t1`.`a` desc limit 1)
+explain format=json (select a from t1 order by a desc) limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "read_sorted_file": {
+ "filesort": {
+ "sort_key": "t1.a desc",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ }
+}
+(select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+a
+30
+explain extended (select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 UNION t1 ALL NULL NULL NULL NULL 3 100.00
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 20 union /* select#2 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `a` desc limit 1
+explain format=json (select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t1.a = 20"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "operation": "UNION",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+a
+30
+explain extended ((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+2 UNION t1 ALL NULL NULL NULL NULL 3 100.00
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL Using filesort
+Warnings:
+Note 1003 (/* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 20) union /* select#2 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` order by `a` desc limit 1
+explain format=json ((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t1.a = 20"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "operation": "UNION",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+drop table t1;
+#
+# MDEV-19363: ((SELECT ...) ORDER BY col ) LIMIT n UNION ...
+#
+create table t1 (pk int);
+insert into t1 values (5),(4),(1),(2),(3);
+((select * from t1 order by pk) limit 2) union (select * from t1 where pk > 4);
+pk
+1
+2
+5
+explain extended ((select * from t1 order by pk) limit 2) union (select * from t1 where pk > 4);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 100.00 Using filesort
+2 UNION t1 ALL NULL NULL NULL NULL 5 100.00 Using where
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 (/* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` order by `test`.`t1`.`pk` limit 2) union (/* select#2 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` where `test`.`t1`.`pk` > 4)
+explain format=json ((select * from t1 order by pk) limit 2) union (select * from t1 where pk > 4);
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "read_sorted_file": {
+ "filesort": {
+ "sort_key": "t1.pk",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "operation": "UNION",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.pk > 4"
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+drop table t1;
+# End of 10.4 tests
diff --git a/mysql-test/main/brackets.test b/mysql-test/main/brackets.test
new file mode 100644
index 00000000000..cf1dcc56acc
--- /dev/null
+++ b/mysql-test/main/brackets.test
@@ -0,0 +1,158 @@
+select 1 union ( select 2 union select 3);
+explain extended
+select 1 union ( select 2 union select 3);
+select 1 union ( select 1 union select 1);
+explain extended
+select 1 union ( select 1 union select 1);
+select 1 union all ( select 1 union select 1);
+explain extended
+select 1 union all ( select 1 union select 1);
+select 1 union ( select 1 union all select 1);
+explain extended
+select 1 union ( select 1 union all select 1);
+select 1 union select 1 union all select 1;
+explain extended
+select 1 union select 1 union all select 1;
+
+(select 1 as a) union (select 2) order by a;
+explain extended
+(select 1 as a) union (select 2) order by a;
+/* select#1 */ select 1 AS `a` union /* select#2 */ select 2 AS `2` order by `a`;
+explain extended
+/* select#1 */ select 1 AS `a` union /* select#2 */ select 2 AS `2` order by `a`;
+
+select 1 union ( select 1 union (select 1 union (select 1 union select 1)));
+explain extended all
+select 1 union ( select 1 union (select 1 union (select 1 union select 1)));
+
+--echo #
+--echo # MDEV-6341: INSERT ... SELECT UNION with parenthesis
+--echo #
+create table t1 (a int, b int);
+insert into t1 (select 1,1 union select 2,2);
+select * from t1 order by 1;
+delete from t1;
+insert into t1 select 1,1 union select 2,2;
+select * from t1 order by 1;
+drop table t1;
+CREATE OR REPLACE TABLE t1 AS SELECT 1 AS a UNION SELECT 2;
+select * from t1 order by 1;
+drop table t1;
+CREATE OR REPLACE TABLE t1 AS (SELECT 1 AS a UNION SELECT 2);
+select * from t1 order by 1;
+drop table t1;
+CREATE OR REPLACE VIEW v1 AS (SELECT 1 AS a);
+show create view v1;
+drop view v1;
+CREATE OR REPLACE VIEW v1 AS SELECT 1 AS a UNION SELECT 2;
+show create view v1;
+drop view v1;
+CREATE OR REPLACE VIEW v1 AS (SELECT 1 AS a UNION SELECT 2);
+show create view v1;
+drop view v1;
+
+
+--echo #
+--echo # MDEV-10028: Syntax error on ((SELECT ...) UNION (SELECT ...))
+--echo #
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (30);
+
+((SELECT a FROM t1) UNION (SELECT a FROM t1));
+(SELECT * FROM t1 UNION SELECT * FROM t1);
+((SELECT a FROM t1) LIMIT 1);
+SELECT * FROM (SELECT 1 UNION (SELECT 2 UNION SELECT 3)) t1;
+DROP TABLE t1;
+
+--echo #
+--echo # test of several levels of ORDER BY / LIMIT
+--echo #
+create table t1 (a int, b int);
+insert into t1 (a,b) values (1, 100), (2, 200), (3,30), (4,4);
+
+select a,b from t1 order by 1 limit 3;
+(select a,b from t1 order by 1 limit 3) order by 2 limit 2;
+(select 10,1000 union select a,b from t1 order by 1 limit 3) order by 2 limit 2;
+((select a,b from t1 order by 1 limit 3) order by 2 limit 2) order by 1 limit 1;
+((select a,b from t1 order by 1 limit 3) order by 2 limit 2) order by 1;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-16359: union with 3 selects in brackets
+--echo #
+
+select 1 union select 1 union select 1;
+(select 1 union select 1 union select 1);
+((select 1) union (select 1) union (select 1));
+
+--echo #
+--echo # MDEV-16357: union in brackets with tail
+--echo # union with tail in brackets
+--echo #
+
+CREATE TABLE t1 (a int);
+INSERT INTO t1 VALUES(1),(2),(3),(4);
+
+CREATE TABLE t2 (a int);
+INSERT INTO t2 VALUES (4),(5),(6),(7);
+
+(SELECT a FROM t1 UNION SELECT a FROM t2) LIMIT 1;
+(SELECT a FROM t1 UNION SELECT a FROM t2) ORDER BY a DESC;
+
+(SELECT a FROM t1 UNION SELECT a FROM t2 LIMIT 1);
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-19324: ((SELECT ...) ORDER BY col ) LIMIT n
+--echo #
+
+create table t1 (a int);
+insert into t1 values (10),(20),(30);
+
+let $q1=
+select a from t1 order by a desc limit 1;
+eval $q1;
+eval explain extended $q1;
+eval explain format=json $q1;
+
+let $q2=
+(select a from t1 order by a desc) limit 1;
+eval $q2;
+eval explain extended $q2;
+eval explain format=json $q2;
+
+let $q1=
+(select a from t1 where a=20 union select a from t1) order by a desc limit 1;
+eval $q1;
+eval explain extended $q1;
+eval explain format=json $q1;
+
+let $q2=
+((select a from t1 where a=20 union select a from t1) order by a desc) limit 1;
+eval $q2;
+eval explain extended $q2;
+eval explain format=json $q2;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-19363: ((SELECT ...) ORDER BY col ) LIMIT n UNION ...
+--echo #
+
+create table t1 (pk int);
+insert into t1 values (5),(4),(1),(2),(3);
+
+let $q=
+((select * from t1 order by pk) limit 2) union (select * from t1 where pk > 4);
+eval $q;
+eval explain extended $q;
+eval explain format=json $q;
+
+drop table t1;
+
+--echo # End of 10.4 tests
+
diff --git a/mysql-test/main/bug12427262.result b/mysql-test/main/bug12427262.result
index 8ec14efc45e..6e79ec3aa11 100644
--- a/mysql-test/main/bug12427262.result
+++ b/mysql-test/main/bug12427262.result
@@ -16,6 +16,8 @@ create table t10 (c1 int);
select Sum(ALL(COUNT_READ)) from performance_schema.file_summary_by_instance where FILE_NAME
like "%show_table_lw_db%" AND FILE_NAME like "%.frm%" AND EVENT_NAME='wait/io/file/sql/FRM'
into @count_read_before;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show tables;
Tables_in_show_table_lw_db
t1
@@ -31,6 +33,8 @@ t9
select Sum(ALL(COUNT_READ)) from performance_schema.file_summary_by_instance where FILE_NAME
like "%show_table_lw_db%" AND FILE_NAME like "%.frm%" AND EVENT_NAME='wait/io/file/sql/FRM'
into @count_read_after;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @count_read_after-@count_read_before;
@count_read_after-@count_read_before
0.00000000000000000000000000000000000000
@@ -49,6 +53,8 @@ t9 BASE TABLE
select Sum(ALL(COUNT_READ)) from performance_schema.file_summary_by_instance where FILE_NAME
like "%show_table_lw_db%" AND FILE_NAME like "%.frm%" AND EVENT_NAME='wait/io/file/sql/FRM'
into @count_read_after;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @count_read_after-@count_read_before;
@count_read_after-@count_read_before
10.00000000000000000000000000000000000000
diff --git a/mysql-test/main/cast.result b/mysql-test/main/cast.result
index 1dc6bbbf9e7..17329cb596f 100644
--- a/mysql-test/main/cast.result
+++ b/mysql-test/main/cast.result
@@ -9,7 +9,6 @@ CAST(CAST(1-2 AS UNSIGNED) AS SIGNED INTEGER)
-1
Warnings:
Note 1105 Cast to unsigned converted negative integer to it's positive complement
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
select CAST('10 ' as unsigned integer);
CAST('10 ' as unsigned integer)
10
@@ -30,8 +29,6 @@ Note 1105 Cast to unsigned converted negative integer to it's positive complemen
select ~5, cast(~5 as signed);
~5 cast(~5 as signed)
18446744073709551610 -6
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
explain extended select ~5, cast(~5 as signed);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -40,8 +37,6 @@ Note 1003 select ~5 AS `~5`,cast(~5 as signed) AS `cast(~5 as signed)`
select cast(18446744073709551615 as signed);
cast(18446744073709551615 as signed)
-1
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
select cast(5 as unsigned) -6.0;
cast(5 as unsigned) -6.0
-1.0
@@ -217,16 +212,12 @@ CAST(0xb3 as signed)
select CAST(0x8fffffffffffffff as signed);
CAST(0x8fffffffffffffff as signed)
-8070450532247928833
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
select CAST(0xffffffffffffffff as unsigned);
CAST(0xffffffffffffffff as unsigned)
18446744073709551615
select CAST(0xfffffffffffffffe as signed);
CAST(0xfffffffffffffffe as signed)
-2
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
select cast('-10a' as signed integer);
cast('-10a' as signed integer)
-10
@@ -322,7 +313,7 @@ select cast('' as time);
cast('' as time)
NULL
Warnings:
-Warning 1292 Truncated incorrect time value: ''
+Warning 1292 Incorrect time value: ''
select cast(NULL as DATE);
cast(NULL as DATE)
NULL
@@ -563,8 +554,6 @@ cast(18446744073709551615 as unsigned)
select cast(18446744073709551615 as signed);
cast(18446744073709551615 as signed)
-1
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
select cast('18446744073709551615' as unsigned);
cast('18446744073709551615' as unsigned)
18446744073709551615
@@ -1294,5 +1283,3 @@ SET sql_mode=DEFAULT;
SELECT CAST(11068046444225730969 AS SIGNED);
CAST(11068046444225730969 AS SIGNED)
-7378697629483820647
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
diff --git a/mysql-test/main/column_compression_parts.result b/mysql-test/main/column_compression_parts.result
index e0b54aec20c..bc3028826ee 100644
--- a/mysql-test/main/column_compression_parts.result
+++ b/mysql-test/main/column_compression_parts.result
@@ -12,6 +12,7 @@ INSERT INTO t1 VALUES (1,REPEAT('a',100)),(2,REPEAT('v',200)),(3,REPEAT('r',300)
INSERT INTO t1 VALUES (5,REPEAT('k',500)),(6,'April'),(7,7),(8,""),(9,"M"),(10,DEFAULT);
ALTER TABLE t1 ANALYZE PARTITION p1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 CHECK PARTITION p2;
Table Op Msg_type Msg_text
diff --git a/mysql-test/main/column_compression_parts.test b/mysql-test/main/column_compression_parts.test
index 9a6f63d0cde..4c77a7308f7 100644
--- a/mysql-test/main/column_compression_parts.test
+++ b/mysql-test/main/column_compression_parts.test
@@ -179,4 +179,4 @@ ALTER TABLE t1 REORGANIZE PARTITION p2 INTO (PARTITION p22 VALUES LESS THAN (MAX
SHOW CREATE TABLE t1;
ALTER TABLE t1 REBUILD PARTITION p22;
-DROP TABLE t1; \ No newline at end of file
+DROP TABLE t1;
diff --git a/mysql-test/main/connect-abstract.cnf b/mysql-test/main/connect-abstract.cnf
new file mode 100644
index 00000000000..5798c4f2f2a
--- /dev/null
+++ b/mysql-test/main/connect-abstract.cnf
@@ -0,0 +1,9 @@
+
+!include include/default_my.cnf
+
+[mysqld.1]
+socket= @ENV.ABSTRACT_SOCKET
+
+# Using @OPT.port here for uniqueness
+[ENV]
+ABSTRACT_SOCKET= @mtr-test-abstract-socket-@OPT.port
diff --git a/mysql-test/main/connect-abstract.result b/mysql-test/main/connect-abstract.result
new file mode 100644
index 00000000000..68a9674dfaa
--- /dev/null
+++ b/mysql-test/main/connect-abstract.result
@@ -0,0 +1,5 @@
+connect con1,localhost,root,,test,,$ABSTRACT_SOCKET;
+select 1;
+1
+1
+disconnect con1;
diff --git a/mysql-test/main/connect-abstract.test b/mysql-test/main/connect-abstract.test
new file mode 100644
index 00000000000..0f212fe5a0d
--- /dev/null
+++ b/mysql-test/main/connect-abstract.test
@@ -0,0 +1,6 @@
+--source include/linux.inc
+--source include/not_embedded.inc
+
+connect(con1,localhost,root,,test,,$ABSTRACT_SOCKET);
+select 1;
+disconnect con1;
diff --git a/mysql-test/main/connect.result b/mysql-test/main/connect.result
index 92584d5d11d..89e10d221d2 100644
--- a/mysql-test/main/connect.result
+++ b/mysql-test/main/connect.result
@@ -1,5 +1,4 @@
SET global secure_auth=0;
-drop table if exists t1,t2;
connect con1,localhost,root,,mysql;
show tables;
Tables_in_mysql
@@ -9,12 +8,12 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
help_relation
help_topic
-host
index_stats
innodb_index_stats
innodb_table_stats
@@ -57,12 +56,12 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
help_relation
help_topic
-host
index_stats
innodb_index_stats
innodb_table_stats
@@ -100,11 +99,7 @@ ERROR 28000: Access denied for user 'test'@'localhost' (using password: YES)
connect(localhost,test,zorro,test,MASTER_PORT,MASTER_SOCKET);
connect fail_con,localhost,test,zorro,;
ERROR 28000: Access denied for user 'test'@'localhost' (using password: YES)
-select user,host,password,plugin,authentication_string from mysql.user where user='test';
-user host password plugin authentication_string
-test localhost *5FDFF3268A50F41C5D18D2CA2F754D7BDB9B3E59
-test 127.0.0.1 *5FDFF3268A50F41C5D18D2CA2F754D7BDB9B3E59
-update mysql.user set password=old_password("gambling2") where user=_binary"test";
+update mysql.user set plugin="", authentication_string="", password=old_password("gambling2") where user=_binary"test";
flush privileges;
show grants for test@localhost;
Grants for test@localhost
@@ -116,7 +111,6 @@ Grants for test@localhost
GRANT ALL PRIVILEGES ON *.* TO 'test'@'localhost' IDENTIFIED BY PASSWORD '2f27438961437573'
connect con10,localhost,test,gambling2,;
connect con5,localhost,test,gambling2,mysql;
-connection con5;
set password="";
set password='gambling3';
ERROR HY000: Password hash should be a 41-digit hexadecimal number
@@ -129,12 +123,12 @@ db
event
func
general_log
+global_priv_bak
gtid_slave_pos
help_category
help_keyword
help_relation
help_topic
-host
index_stats
innodb_index_stats
innodb_table_stats
@@ -154,6 +148,7 @@ time_zone_transition
time_zone_transition_type
transaction_registry
user
+user_bak
connect con6,localhost,test,gambling3,test;
show tables;
Tables_in_test
@@ -174,9 +169,7 @@ connect(localhost,test,zorro,test,MASTER_PORT,MASTER_SOCKET);
connect fail_con,localhost,test,zorro,;
ERROR 28000: Access denied for user 'test'@'localhost' (using password: YES)
delete from mysql.user where user=_binary"test";
-flush privileges;
connect con7,localhost,root,,test;
-connection con7;
create table t1 (id integer not null auto_increment primary key);
create temporary table t2(id integer not null auto_increment primary key);
set @id := 1;
@@ -317,12 +310,10 @@ SET GLOBAL event_scheduler = OFF;
# -- End of Bug#35074.
connect extracon,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,;
-connection extracon;
SELECT 'Connection on extra port ok';
Connection on extra port ok
Connection on extra port ok
connect extracon2,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,;
-connection extracon2;
SELECT 'Connection on extra port 2 ok';
Connection on extra port 2 ok
Connection on extra port 2 ok
@@ -353,7 +344,6 @@ connect(localhost,mysqltest_up1,foo,test,MASTER_PORT,MASTER_SOCKET);
connect pcon1,localhost,mysqltest_up1,foo,,$MASTER_MYPORT,;
ERROR 28000: Access denied for user 'mysqltest_up1'@'localhost' (using password: YES)
connect pcon2,localhost,mysqltest_up1,bar,,$MASTER_MYPORT,;
-connection pcon2;
select user(), current_user();
user() current_user()
mysqltest_up1@localhost mysqltest_up1@%
@@ -362,7 +352,6 @@ connect(localhost,mysqltest_up2,newpw,test,MASTER_PORT,MASTER_SOCKET);
connect pcon3,localhost,mysqltest_up2,newpw,,$MASTER_MYPORT,;
ERROR 28000: Access denied for user 'mysqltest_up2'@'localhost' (using password: YES)
connect pcon4,localhost,mysqltest_up2,oldpw,,$MASTER_MYPORT,;
-connection pcon4;
select user(), current_user();
user() current_user()
mysqltest_up2@localhost mysqltest_up2@%
@@ -379,17 +368,15 @@ update mysql.user set plugin='mysql_old_password' where user = 'mysqltest_up2';
select user, password, plugin, authentication_string from mysql.user
where user like 'mysqltest_up_';
user password plugin authentication_string
-mysqltest_up1 *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB mysql_native_password
-mysqltest_up2 09301740536db389 mysql_old_password
+mysqltest_up1 *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB mysql_native_password *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB
+mysqltest_up2 09301740536db389 mysql_old_password 09301740536db389
flush privileges;
connect pcon6,localhost,mysqltest_up1,bar,,$MASTER_MYPORT,;
-connection pcon6;
select user(), current_user();
user() current_user()
mysqltest_up1@localhost mysqltest_up1@%
disconnect pcon6;
connect pcon7,localhost,mysqltest_up2,oldpw,,$MASTER_MYPORT,;
-connection pcon7;
select user(), current_user();
user() current_user()
mysqltest_up2@localhost mysqltest_up2@%
@@ -398,7 +385,7 @@ connection default;
DROP USER mysqltest_up1@'%';
DROP USER mysqltest_up2@'%';
#
-# BUG#1010351: New "via" keyword in 5.2+ can't be used as identifier anymore
+# BUG#1010351: New "via" keyword in 5.2+ can't be used as identifier anymore
#
create table t1 (via int);
alter table t1 add key(via);
@@ -406,7 +393,6 @@ drop table t1;
create table t1 (col1 int);
alter table t1 add via int not null;
drop table t1;
-drop procedure if exists p1;
create procedure p1(x int)
foo: loop
if x = 0 then
diff --git a/mysql-test/main/connect.test b/mysql-test/main/connect.test
index 560f29e840d..b0461a5afac 100644
--- a/mysql-test/main/connect.test
+++ b/mysql-test/main/connect.test
@@ -14,10 +14,6 @@
SET global secure_auth=0;
---disable_warnings
-drop table if exists t1,t2;
---enable_warnings
-
#connect (con1,localhost,root,,"");
#show tables;
connect (con1,localhost,root,,mysql);
@@ -66,8 +62,8 @@ connect (fail_con,localhost,test,zorro,test2);
connect (fail_con,localhost,test,zorro,);
# check if old password version also works
-select user,host,password,plugin,authentication_string from mysql.user where user='test';
-update mysql.user set password=old_password("gambling2") where user=_binary"test";
+source include/switch_to_mysql_user.inc;
+update mysql.user set plugin="", authentication_string="", password=old_password("gambling2") where user=_binary"test";
flush privileges;
show grants for test@localhost;
update mysql.user set plugin='mysql_old_password' where user='test';
@@ -76,7 +72,6 @@ show grants for test@localhost;
connect (con10,localhost,test,gambling2,);
connect (con5,localhost,test,gambling2,mysql);
-connection con5;
set password="";
--error ER_PASSWD_LENGTH
set password='gambling3';
@@ -103,17 +98,15 @@ connect (fail_con,localhost,test,zorro,test2);
--error ER_ACCESS_DENIED_ERROR
connect (fail_con,localhost,test,zorro,);
-
+source include/switch_to_mysql_global_priv.inc;
# remove user 'test' so that other tests which may use 'test'
# do not depend on this test.
delete from mysql.user where user=_binary"test";
-flush privileges;
#
# Bug#12517 Clear user variables and replication events before
# closing temp tables in thread cleanup.
connect (con7,localhost,root,,test);
-connection con7;
let $connection_id= `select connection_id()`;
create table t1 (id integer not null auto_increment primary key);
create temporary table t2(id integer not null auto_increment primary key);
@@ -316,11 +309,9 @@ SET GLOBAL event_scheduler = OFF;
# Test connections to the extra port.
connect(extracon,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
-connection extracon;
SELECT 'Connection on extra port ok';
connect(extracon2,127.0.0.1,root,,test,$MASTER_EXTRA_PORT,);
-connection extracon2;
SELECT 'Connection on extra port 2 ok';
--disable_abort_on_error
@@ -351,7 +342,6 @@ GRANT ALL ON test.* TO 'O1234567890123456789012345678901234567890123456789012345
FLUSH PRIVILEGES;
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
connect (con1,localhost,O1234567890123456789012345678901234567890123456789012345678901234567890123456789x,test123,test);
disconnect con1;
@@ -382,18 +372,14 @@ CREATE USER mysqltest_up2 IDENTIFIED VIA mysql_old_password using '09301740536db
--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
--error ER_ACCESS_DENIED_ERROR
connect(pcon1,localhost,mysqltest_up1,foo,,$MASTER_MYPORT,);
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
connect(pcon2,localhost,mysqltest_up1,bar,,$MASTER_MYPORT,);
-connection pcon2;
select user(), current_user();
disconnect pcon2;
--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
--error ER_ACCESS_DENIED_ERROR
connect(pcon3,localhost,mysqltest_up2,newpw,,$MASTER_MYPORT,);
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
connect(pcon4,localhost,mysqltest_up2,oldpw,,$MASTER_MYPORT,);
-connection pcon4;
select user(), current_user();
disconnect pcon4;
@@ -417,34 +403,32 @@ connection default;
#
# cannot connect when password is set and plugin=mysql_native_password
#
+source include/switch_to_mysql_user.inc;
update mysql.user set plugin='mysql_native_password' where user = 'mysqltest_up1';
update mysql.user set plugin='mysql_old_password' where user = 'mysqltest_up2';
select user, password, plugin, authentication_string from mysql.user
where user like 'mysqltest_up_';
-flush privileges;
+flush privileges;
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
connect(pcon6,localhost,mysqltest_up1,bar,,$MASTER_MYPORT,);
-connection pcon6;
select user(), current_user();
disconnect pcon6;
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
connect(pcon7,localhost,mysqltest_up2,oldpw,,$MASTER_MYPORT,);
-connection pcon7;
select user(), current_user();
disconnect pcon7;
connection default;
+source include/switch_to_mysql_global_priv.inc;
+
DROP USER mysqltest_up1@'%';
DROP USER mysqltest_up2@'%';
# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
-
--echo #
---echo # BUG#1010351: New "via" keyword in 5.2+ can't be used as identifier anymore
+--echo # BUG#1010351: New "via" keyword in 5.2+ can't be used as identifier anymore
--echo #
create table t1 (via int);
alter table t1 add key(via);
@@ -454,10 +438,6 @@ create table t1 (col1 int);
alter table t1 add via int not null;
drop table t1;
---disable_warnings
-drop procedure if exists p1;
---enable_warnings
-
delimiter |;
create procedure p1(x int)
foo: loop
@@ -472,6 +452,4 @@ delimiter ;|
call p1(2);
drop procedure p1;
-
-
SET global secure_auth=default;
diff --git a/mysql-test/main/create-big.result b/mysql-test/main/create-big.result
index d041419443e..fc29b08ffb8 100644
--- a/mysql-test/main/create-big.result
+++ b/mysql-test/main/create-big.result
@@ -237,11 +237,11 @@ select @a;
@a
0
drop table t1;
-set debug_sync='create_table_select_before_check_if_exists SIGNAL parked WAIT_FOR go';
-create table if not exists t1 select 1 as i;;
+set debug_sync='create_table_before_check_if_exists SIGNAL parked WAIT_FOR go';
+create table if not exists t1 select 1 as i;
connection addconroot1;
set debug_sync='now WAIT_FOR parked';
-drop table t1;;
+drop table t1;
connection addconroot2;
set debug_sync='now SIGNAL go';
connection default;
@@ -249,11 +249,11 @@ connection addconroot1;
connection default;
create table t1 (i int);
set @a:=0;
-set debug_sync='create_table_select_before_check_if_exists SIGNAL parked WAIT_FOR go';
-create table if not exists t1 select 1 as i;;
+set debug_sync='create_table_before_check_if_exists SIGNAL parked WAIT_FOR go';
+create table if not exists t1 select 1 as i;
connection addconroot1;
set debug_sync='now WAIT_FOR parked';
-create trigger t1_bi before insert on t1 for each row set @a:=1;;
+create trigger t1_bi before insert on t1 for each row set @a:=1;
connection addconroot2;
set debug_sync='now SIGNAL go';
connection default;
diff --git a/mysql-test/main/create-big.test b/mysql-test/main/create-big.test
index 7f20a8b42af..82a4dd82a32 100644
--- a/mysql-test/main/create-big.test
+++ b/mysql-test/main/create-big.test
@@ -395,11 +395,11 @@ select @a;
drop table t1;
# Concurrent DROP TABLE
-set debug_sync='create_table_select_before_check_if_exists SIGNAL parked WAIT_FOR go';
---send create table if not exists t1 select 1 as i;
+set debug_sync='create_table_before_check_if_exists SIGNAL parked WAIT_FOR go';
+--send create table if not exists t1 select 1 as i
connection addconroot1;
set debug_sync='now WAIT_FOR parked';
---send drop table t1;
+--send drop table t1
connection addconroot2;
# Wait until the above DROP TABLE is blocked due to CREATE TABLE
let $wait_condition=
@@ -417,11 +417,11 @@ connection default;
# Concurrent CREATE TRIGGER
create table t1 (i int);
set @a:=0;
-set debug_sync='create_table_select_before_check_if_exists SIGNAL parked WAIT_FOR go';
---send create table if not exists t1 select 1 as i;
+set debug_sync='create_table_before_check_if_exists SIGNAL parked WAIT_FOR go';
+--send create table if not exists t1 select 1 as i
connection addconroot1;
set debug_sync='now WAIT_FOR parked';
---send create trigger t1_bi before insert on t1 for each row set @a:=1;
+--send create trigger t1_bi before insert on t1 for each row set @a:=1
connection addconroot2;
# Wait until the above DROP TABLE is blocked due to CREATE TABLE
let $wait_condition=
diff --git a/mysql-test/main/create.result b/mysql-test/main/create.result
index fd017c0967b..ea9014498e0 100644
--- a/mysql-test/main/create.result
+++ b/mysql-test/main/create.result
@@ -404,7 +404,7 @@ create table t3 like mysqltest.t3;
ERROR 42S01: Table 't3' already exists
create table non_existing_database.t1 like t1;
ERROR 42000: Unknown database 'non_existing_database'
-create table t3 like non_existing_table;
+create table t4 like non_existing_table;
ERROR 42S02: Table 'test.non_existing_table' doesn't exist
create temporary table t3 like t1;
ERROR 42S01: Table 't3' already exists
@@ -914,8 +914,12 @@ unlock tables;
lock table t1 read, t2 read;
create table t2 select * from t1;
ERROR HY000: Table 't2' was locked with a READ lock and can't be updated
+create table t3 select * from t1;
+ERROR HY000: Table 't3' was not locked with LOCK TABLES
create table if not exists t2 select * from t1;
ERROR HY000: Table 't2' was locked with a READ lock and can't be updated
+create table if not exists t3 select * from t1;
+ERROR HY000: Table 't3' was not locked with LOCK TABLES
unlock tables;
lock table t1 read, t2 write;
create table t2 select * from t1;
@@ -1053,95 +1057,6 @@ USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
ERROR 42000: Incorrect database name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
-set names utf8;
-create database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
-use имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
-select database();
-database()
-имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45
-use test;
-select SCHEMA_NAME from information_schema.schemata
-where schema_name='имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45';
-SCHEMA_NAME
-имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45
-drop database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
-create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48
-(
-имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45 int,
-index имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48 (имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45)
-);
-create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42 as
-select имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
-from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-select * from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
-select TABLE_NAME from information_schema.tables where
-table_schema='test';
-TABLE_NAME
-имÑ_вью_кодировке_утф8_длиной_больше_чем_42
-имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48
-select COLUMN_NAME from information_schema.columns where
-table_schema='test';
-COLUMN_NAME
-имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
-имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
-select INDEX_NAME from information_schema.statistics where
-table_schema='test';
-INDEX_NAME
-имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48
-select TABLE_NAME from information_schema.views where
-table_schema='test';
-TABLE_NAME
-имÑ_вью_кодировке_утф8_длиной_больше_чем_42
-show create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-Table Create Table
-имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 CREATE TABLE `имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48` (
- `имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45` int(11) DEFAULT NULL,
- KEY `имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48` (`имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45`)
-) ENGINE=MyISAM DEFAULT CHARSET=latin1
-show create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
-View Create View character_set_client collation_connection
-имÑ_вью_кодировке_утф8_длиной_больше_чем_42 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `имÑ_вью_кодировке_утф8_длиной_больше_чем_42` AS select `имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48`.`имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45` AS `имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45` from `имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48` utf8 utf8_general_ci
-create trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49
-before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
-select TRIGGER_NAME from information_schema.triggers where
-trigger_schema='test';
-TRIGGER_NAME
-имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49
-drop trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49;
-create trigger
-очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66
-before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
-ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
-drop trigger очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66;
-ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
-create procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50()
-begin
-end;
-select ROUTINE_NAME from information_schema.routines where
-routine_schema='test';
-ROUTINE_NAME
-имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50
-drop procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50;
-create procedure очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
-begin
-end;
-ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
-create function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49()
-returns int
-return 0;
-select ROUTINE_NAME from information_schema.routines where
-routine_schema='test';
-ROUTINE_NAME
-имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49
-drop function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49;
-create function очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
-returns int
-return 0;
-ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
-drop view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
-drop table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-set names default;
drop table if exists t1,t2,t3;
drop function if exists f1;
create function f1() returns int
diff --git a/mysql-test/main/create.test b/mysql-test/main/create.test
index af5c427852c..4bf6ce99504 100644
--- a/mysql-test/main/create.test
+++ b/mysql-test/main/create.test
@@ -323,7 +323,7 @@ create table t3 like mysqltest.t3;
--error 1049
create table non_existing_database.t1 like t1;
--error ER_NO_SUCH_TABLE
-create table t3 like non_existing_table;
+create table t4 like non_existing_table;
--error 1050
create temporary table t3 like t1;
drop table t1, t2, t3;
@@ -768,7 +768,6 @@ drop table t1;
--error ER_CANT_AGGREGATE_2COLLATIONS
create table t1 select coalesce('a' collate latin1_swedish_ci,'b' collate latin1_bin);
-
# Base vs temporary tables dillema (a.k.a. bug#24508 "Inconsistent
# results of CREATE TABLE ... SELECT when temporary table exists").
# In this situation we either have to create non-temporary table and
@@ -776,6 +775,7 @@ create table t1 select coalesce('a' collate latin1_swedish_ci,'b' collate latin1
# permanent table. After patch for Bug#47418, we create the base table and
# instert data into it, even though a temporary table exists with the same
# name.
+
create temporary table t1 (j int);
create table if not exists t1 select 1;
select * from t1;
@@ -783,7 +783,6 @@ drop temporary table t1;
select * from t1;
drop table t1;
-
#
# CREATE TABLE ... SELECT and LOCK TABLES
#
@@ -792,6 +791,7 @@ drop table t1;
# the server doesn't crash, hang and produces sensible errors.
# Includes test for bug #20662 "Infinite loop in CREATE TABLE
# IF NOT EXISTS ... SELECT with locked tables".
+
create table t1 (i int);
insert into t1 values (1), (2);
lock tables t1 read;
@@ -812,8 +812,12 @@ unlock tables;
lock table t1 read, t2 read;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
create table t2 select * from t1;
+--error ER_TABLE_NOT_LOCKED
+create table t3 select * from t1;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
create table if not exists t2 select * from t1;
+--error ER_TABLE_NOT_LOCKED
+create table if not exists t3 select * from t1;
unlock tables;
lock table t1 read, t2 write;
--error ER_TABLE_EXISTS_ERROR
@@ -991,87 +995,6 @@ USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
#
-# Bug#21432 Database/Table name limited to 64 bytes, not chars, problems with multi-byte
-#
-set names utf8;
-
-create database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
-use имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
-select database();
-use test;
-
-select SCHEMA_NAME from information_schema.schemata
-where schema_name='имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45';
-
-drop database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
-create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48
-(
- имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45 int,
- index имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48 (имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45)
-);
-
-create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42 as
-select имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
-from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-
-# database, table, field, key, view
-select * from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-
---sorted_result
-select TABLE_NAME from information_schema.tables where
-table_schema='test';
-
-select COLUMN_NAME from information_schema.columns where
-table_schema='test';
-
-select INDEX_NAME from information_schema.statistics where
-table_schema='test';
-
-select TABLE_NAME from information_schema.views where
-table_schema='test';
-
-show create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-show create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
-
-create trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49
-before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
-select TRIGGER_NAME from information_schema.triggers where
-trigger_schema='test';
-drop trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49;
---error 1059
-create trigger
-очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66
-before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
---error 1059
-drop trigger очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66;
-
-create procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50()
-begin
-end;
-select ROUTINE_NAME from information_schema.routines where
-routine_schema='test';
-drop procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50;
---error 1059
-create procedure очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
-begin
-end;
-
-create function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49()
- returns int
-return 0;
-select ROUTINE_NAME from information_schema.routines where
-routine_schema='test';
-drop function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49;
---error 1059
-create function очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
- returns int
-return 0;
-
-drop view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
-drop table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
-set names default;
-
-#
# Bug#21136 CREATE TABLE SELECT within CREATE TABLE SELECT causes server crash
#
diff --git a/mysql-test/main/create_drop_binlog.result b/mysql-test/main/create_drop_binlog.result
index be40fcc140a..b8f8b61c359 100644
--- a/mysql-test/main/create_drop_binlog.result
+++ b/mysql-test/main/create_drop_binlog.result
@@ -322,8 +322,6 @@ Log_name Pos Event_type Server_id End_log_pos Info
# # Gtid 1 # GTID #-#-#
# # Query 1 # use `test`; CREATE TABLE t1(a INT, b INT)
# # Gtid 1 # GTID #-#-#
-# # Query 1 # use `test`; CREATE TABLE IF NOT EXISTS t1(a INT, b INT)
-# # Gtid 1 # GTID #-#-#
# # Query 1 # use `test`; CREATE OR REPLACE INDEX i1 ON t1(a)
# # Gtid 1 # GTID #-#-#
# # Query 1 # use `test`; CREATE OR REPLACE INDEX i1 ON t1(a)
@@ -377,8 +375,6 @@ Log_name Pos Event_type Server_id End_log_pos Info
# # Gtid 1 # GTID #-#-#
# # Query 1 # use `test`; CREATE TABLE t1(a INT, b INT)
# # Gtid 1 # GTID #-#-#
-# # Query 1 # use `test`; CREATE TABLE IF NOT EXISTS t1(a INT, b INT)
-# # Gtid 1 # GTID #-#-#
# # Query 1 # use `test`; CREATE INDEX IF NOT EXISTS i1 ON t1(a)
# # Gtid 1 # GTID #-#-#
# # Query 1 # use `test`; CREATE INDEX IF NOT EXISTS i1 ON t1(a)
diff --git a/mysql-test/main/create_drop_user.result b/mysql-test/main/create_drop_user.result
index dd75d1c257a..67717f3e4e0 100644
--- a/mysql-test/main/create_drop_user.result
+++ b/mysql-test/main/create_drop_user.result
@@ -1,22 +1,22 @@
CREATE USER IF NOT EXISTS u1@localhost IDENTIFIED BY 'pw1';
-SELECT password FROM mysql.user WHERE user='u1';
-password
-*2B602296A79E0A8784ACC5C88D92E46588CCA3C3
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
+plugin authentication_string
+mysql_native_password *2B602296A79E0A8784ACC5C88D92E46588CCA3C3
CREATE USER IF NOT EXISTS u1@localhost IDENTIFIED BY 'pw2';
Warnings:
Note 1973 Can't create user 'u1'@'localhost'; it already exists
-SELECT password FROM mysql.user WHERE user='u1';
-password
-*2B602296A79E0A8784ACC5C88D92E46588CCA3C3
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
+plugin authentication_string
+mysql_native_password *2B602296A79E0A8784ACC5C88D92E46588CCA3C3
CREATE OR REPLACE USER u1@localhost IDENTIFIED BY 'pw3';
-SELECT password FROM mysql.user WHERE user='u1';
-password
-*77B4A70CEFD76DB9415F36D291E74C110D2738E0
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
+plugin authentication_string
+mysql_native_password *77B4A70CEFD76DB9415F36D291E74C110D2738E0
CREATE OR REPLACE USER IF NOT EXISTS u1@localhost IDENTIFIED BY 'pw4';
ERROR HY000: Incorrect usage of OR REPLACE and IF NOT EXISTS
-SELECT password FROM mysql.user WHERE user='u1';
-password
-*77B4A70CEFD76DB9415F36D291E74C110D2738E0
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
+plugin authentication_string
+mysql_native_password *77B4A70CEFD76DB9415F36D291E74C110D2738E0
DROP USER IF EXISTS u1@localhost;
DROP USER IF EXISTS u1@localhost;
Warnings:
diff --git a/mysql-test/main/create_drop_user.test b/mysql-test/main/create_drop_user.test
index 949782a2daf..234383fb4ca 100644
--- a/mysql-test/main/create_drop_user.test
+++ b/mysql-test/main/create_drop_user.test
@@ -1,17 +1,17 @@
--source include/not_embedded.inc
CREATE USER IF NOT EXISTS u1@localhost IDENTIFIED BY 'pw1';
-SELECT password FROM mysql.user WHERE user='u1';
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
CREATE USER IF NOT EXISTS u1@localhost IDENTIFIED BY 'pw2';
-SELECT password FROM mysql.user WHERE user='u1';
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
CREATE OR REPLACE USER u1@localhost IDENTIFIED BY 'pw3';
-SELECT password FROM mysql.user WHERE user='u1';
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
--error ER_WRONG_USAGE
CREATE OR REPLACE USER IF NOT EXISTS u1@localhost IDENTIFIED BY 'pw4';
-SELECT password FROM mysql.user WHERE user='u1';
+SELECT plugin,authentication_string FROM mysql.user WHERE user='u1';
DROP USER IF EXISTS u1@localhost;
DROP USER IF EXISTS u1@localhost;
diff --git a/mysql-test/main/create_or_replace.result b/mysql-test/main/create_or_replace.result
index 54bec5c3f9d..485091e5810 100644
--- a/mysql-test/main/create_or_replace.result
+++ b/mysql-test/main/create_or_replace.result
@@ -257,12 +257,13 @@ drop table if exists test.t1,mysqltest2.t2;
Warnings:
Note 1051 Unknown table 'test.t1'
Note 1051 Unknown table 'mysqltest2.t2'
-create table test.t1 (i int);
+create table test.t1 (i int) engine=myisam;
create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
@@ -274,7 +275,8 @@ Tables_in_test
t2
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
@@ -289,7 +291,8 @@ create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
@@ -301,7 +304,8 @@ Tables_in_test
t2
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
@@ -311,6 +315,31 @@ select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
create table t1 (i int);
drop table t1;
+create table test.t1 (i int) engine=innodb;
+create table mysqltest2.t2 like test.t1;
+lock table test.t1 write, mysqltest2.t2 write;
+select * from information_schema.metadata_lock_info;
+THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
+# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
+# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
+# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
+unlock tables;
+drop table test.t1,mysqltest2.t2;
+create table test.t1 (i int) engine=aria transactional=1 checksum=1;
+create table mysqltest2.t2 like test.t1;
+lock table test.t1 write, mysqltest2.t2 write;
+select * from information_schema.metadata_lock_info;
+THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock mysqltest2
+# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
+# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock mysqltest2 t2
+# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
+unlock tables;
+drop table t1;
+create table test.t1 (i int);
drop database mysqltest2;
#
# Testing CREATE .. LIKE
@@ -398,28 +427,32 @@ create table t1 (a int);
lock table t1 write, t2 read;
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
create or replace table t1 (i int);
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
create or replace table t1 like t2;
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
create or replace table t1 select 1 as f1;
select * from information_schema.metadata_lock_info;
THREAD_ID LOCK_MODE LOCK_DURATION LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-# MDL_INTENTION_EXCLUSIVE NULL Global read lock
+# MDL_BACKUP_DDL NULL Backup lock
+# MDL_BACKUP_DML NULL Backup lock
# MDL_INTENTION_EXCLUSIVE NULL Schema metadata lock test
# MDL_SHARED_NO_READ_WRITE NULL Table metadata lock test t1
# MDL_SHARED_READ NULL Table metadata lock test t2
diff --git a/mysql-test/main/create_or_replace.test b/mysql-test/main/create_or_replace.test
index 4b167663742..1b4994e811f 100644
--- a/mysql-test/main/create_or_replace.test
+++ b/mysql-test/main/create_or_replace.test
@@ -210,7 +210,7 @@ drop table t1,t3,t4;
create database mysqltest2;
drop table if exists test.t1,mysqltest2.t2;
-create table test.t1 (i int);
+create table test.t1 (i int) engine=myisam;
create table mysqltest2.t2 like test.t1;
lock table test.t1 write, mysqltest2.t2 write;
--replace_column 1 #
@@ -249,6 +249,26 @@ create or replace table mysqltest2.t2 (a int) select 1 as 'a', 2 as 'a';
select * from information_schema.metadata_lock_info;
create table t1 (i int);
drop table t1;
+
+create table test.t1 (i int) engine=innodb;
+create table mysqltest2.t2 like test.t1;
+lock table test.t1 write, mysqltest2.t2 write;
+--replace_column 1 #
+--sorted_result
+select * from information_schema.metadata_lock_info;
+unlock tables;
+drop table test.t1,mysqltest2.t2;
+
+create table test.t1 (i int) engine=aria transactional=1 checksum=1;
+create table mysqltest2.t2 like test.t1;
+lock table test.t1 write, mysqltest2.t2 write;
+--replace_column 1 #
+--sorted_result
+select * from information_schema.metadata_lock_info;
+unlock tables;
+drop table t1;
+
+create table test.t1 (i int);
drop database mysqltest2;
--echo #
diff --git a/mysql-test/main/create_select_tmp.result b/mysql-test/main/create_select_tmp.result
index f499e539baf..2842ab26c42 100644
--- a/mysql-test/main/create_select_tmp.result
+++ b/mysql-test/main/create_select_tmp.result
@@ -18,3 +18,24 @@ ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
select * from t2;
ERROR 42S02: Table 'test.t2' doesn't exist
drop table t1;
+set sql_mode='ignore_bad_table_options';
+create table t1 (
+f1 int invisible,
+f2 int comment 'a comment',
+f3 int foo="bar",
+f4 int check(f4 < 10),
+f5 int without system versioning
+) with system versioning as select 1 as f1,2 as f2,3 as f3,4 as f4,5 as f5;
+Warnings:
+Warning 1911 Unknown option 'foo'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) INVISIBLE DEFAULT NULL,
+ `f2` int(11) DEFAULT NULL COMMENT 'a comment',
+ `f3` int(11) DEFAULT NULL `foo`='bar',
+ `f4` int(11) DEFAULT NULL CHECK (`f4` < 10),
+ `f5` int(11) DEFAULT NULL WITHOUT SYSTEM VERSIONING
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
+drop table t1;
+set sql_mode=default;
diff --git a/mysql-test/main/create_select_tmp.test b/mysql-test/main/create_select_tmp.test
index ef3315aed97..3ed885ea382 100644
--- a/mysql-test/main/create_select_tmp.test
+++ b/mysql-test/main/create_select_tmp.test
@@ -37,3 +37,17 @@ select * from t2;
drop table t1;
# End of 4.1 tests
+
+set sql_mode='ignore_bad_table_options';
+create table t1 (
+ f1 int invisible,
+ f2 int comment 'a comment',
+ f3 int foo="bar",
+ f4 int check(f4 < 10),
+ f5 int without system versioning
+) with system versioning as select 1 as f1,2 as f2,3 as f3,4 as f4,5 as f5;
+show create table t1;
+drop table t1;
+set sql_mode=default;
+
+# End of 10.4 tests
diff --git a/mysql-test/main/create_user.result b/mysql-test/main/create_user.result
index 8001b43221b..8bd0ca88335 100644
--- a/mysql-test/main/create_user.result
+++ b/mysql-test/main/create_user.result
@@ -1,57 +1,57 @@
create user foo;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 N N 0.000000
+% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 mysql_native_password N N 0.000000
drop user foo;
create user foo identified by 'password';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require SSL;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N ANY 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N ANY 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require X509;
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N X509 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N X509 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require CIPHER 'cipher';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require ISSUER 'issuer';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED issuer 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED issuer 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require SUBJECT 'subject';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED subject 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED subject 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require CIPHER 'cipher'
SUBJECT 'subject';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher subject 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher subject 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo identified by 'password' require CIPHER 'cipher'
AND SUBJECT 'subject'
AND ISSUER 'issuer';
select * from mysql.user where user = 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher issuer subject 0 0 0 0 N N 0.000000
+% foo *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher issuer subject 0 0 0 0 mysql_native_password *2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19 N N 0.000000
drop user foo;
create user foo, foo2 identified by 'password' require CIPHER 'cipher'
AND SUBJECT 'subject'
AND ISSUER 'issuer';
select * from mysql.user where user like 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher issuer subject 0 0 0 0 N N 0.000000
+% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher issuer subject 0 0 0 0 mysql_native_password N N 0.000000
#--warning ER_USER_CREATE_EXISTS
create user if not exists foo, foo2 identified by 'password2'
require CIPHER 'cipher2' AND SUBJECT 'subject2' AND ISSUER 'issuer2';
@@ -60,7 +60,7 @@ Note 1973 Can't create user 'foo'@'%'; it already exists
Note 1973 Can't create user 'foo2'@'%'; it already exists
select * from mysql.user where user like 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher issuer subject 0 0 0 0 N N 0.000000
+% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N SPECIFIED cipher issuer subject 0 0 0 0 mysql_native_password N N 0.000000
drop user foo, foo2;
create user foo with MAX_QUERIES_PER_HOUR 10
MAX_UPDATES_PER_HOUR 20
@@ -68,5 +68,5 @@ MAX_CONNECTIONS_PER_HOUR 30
MAX_USER_CONNECTIONS 40;
select * from mysql.user where user like 'foo';
Host User Password Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Reload_priv Shutdown_priv Process_priv File_priv Grant_priv References_priv Index_priv Alter_priv Show_db_priv Super_priv Create_tmp_table_priv Lock_tables_priv Execute_priv Repl_slave_priv Repl_client_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Create_user_priv Event_priv Trigger_priv Create_tablespace_priv Delete_history_priv ssl_type ssl_cipher x509_issuer x509_subject max_questions max_updates max_connections max_user_connections plugin authentication_string password_expired is_role default_role max_statement_time
-% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 10 20 30 40 N N 0.000000
+% foo N N N N N N N N N N N N N N N N N N N N N N N N N N N N N N 10 20 30 40 mysql_native_password N N 0.000000
drop user foo;
diff --git a/mysql-test/main/create_utf8.result b/mysql-test/main/create_utf8.result
new file mode 100644
index 00000000000..e1ccf7a08d7
--- /dev/null
+++ b/mysql-test/main/create_utf8.result
@@ -0,0 +1,89 @@
+set names utf8;
+create database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
+use имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
+select database();
+database()
+имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45
+use test;
+select SCHEMA_NAME from information_schema.schemata
+where schema_name='имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45';
+SCHEMA_NAME
+имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45
+drop database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
+create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48
+(
+имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45 int,
+index имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48 (имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45)
+);
+create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42 as
+select имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
+from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+select * from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
+select TABLE_NAME from information_schema.tables where
+table_schema='test';
+TABLE_NAME
+имÑ_вью_кодировке_утф8_длиной_больше_чем_42
+имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48
+select COLUMN_NAME from information_schema.columns where
+table_schema='test';
+COLUMN_NAME
+имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
+имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
+select INDEX_NAME from information_schema.statistics where
+table_schema='test';
+INDEX_NAME
+имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48
+select TABLE_NAME from information_schema.views where
+table_schema='test';
+TABLE_NAME
+имÑ_вью_кодировке_утф8_длиной_больше_чем_42
+show create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+Table Create Table
+имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 CREATE TABLE `имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48` (
+ `имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45` int(11) DEFAULT NULL,
+ KEY `имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48` (`имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
+View Create View character_set_client collation_connection
+имÑ_вью_кодировке_утф8_длиной_больше_чем_42 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `имÑ_вью_кодировке_утф8_длиной_больше_чем_42` AS select `имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48`.`имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45` AS `имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45` from `имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48` utf8 utf8_general_ci
+create trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49
+before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
+select TRIGGER_NAME from information_schema.triggers where
+trigger_schema='test';
+TRIGGER_NAME
+имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49
+drop trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49;
+create trigger
+очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66
+before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
+ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
+drop trigger очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66;
+ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
+create procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50()
+begin
+end;
+select ROUTINE_NAME from information_schema.routines where
+routine_schema='test';
+ROUTINE_NAME
+имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50
+drop procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50;
+create procedure очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
+begin
+end;
+ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
+create function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49()
+returns int
+return 0;
+select ROUTINE_NAME from information_schema.routines where
+routine_schema='test';
+ROUTINE_NAME
+имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49
+drop function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49;
+create function очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
+returns int
+return 0;
+ERROR 42000: Identifier name 'очень_очень_очень_очень_очень_очень_очень_очень_длинна' is too long
+drop view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
+drop table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+set names default;
diff --git a/mysql-test/main/create_utf8.test b/mysql-test/main/create_utf8.test
new file mode 100644
index 00000000000..40e814834bc
--- /dev/null
+++ b/mysql-test/main/create_utf8.test
@@ -0,0 +1,80 @@
+#
+# Bug#21432 Database/Table name limited to 64 bytes, not chars, problems with multi-byte
+#
+set names utf8;
+
+create database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
+use имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
+select database();
+use test;
+
+select SCHEMA_NAME from information_schema.schemata
+where schema_name='имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45';
+
+drop database имÑ_базы_в_кодировке_утф8_длиной_больше_чем_45;
+create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48
+(
+ имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45 int,
+ index имÑ_индекÑа_в_кодировке_утф8_длиной_больше_чем_48 (имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45)
+);
+
+create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42 as
+select имÑ_полÑ_в_кодировке_утф8_длиной_больше_чем_45
+from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+
+# database, table, field, key, view
+select * from имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+
+--sorted_result
+select TABLE_NAME from information_schema.tables where
+table_schema='test';
+
+select COLUMN_NAME from information_schema.columns where
+table_schema='test';
+
+select INDEX_NAME from information_schema.statistics where
+table_schema='test';
+
+select TABLE_NAME from information_schema.views where
+table_schema='test';
+
+show create table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+show create view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
+
+create trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49
+before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
+select TRIGGER_NAME from information_schema.triggers where
+trigger_schema='test';
+drop trigger имÑ_триггера_в_кодировке_утф8_длиной_больше_чем_49;
+--error 1059
+create trigger
+очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66
+before insert on имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1;
+--error 1059
+drop trigger очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66;
+
+create procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50()
+begin
+end;
+select ROUTINE_NAME from information_schema.routines where
+routine_schema='test';
+drop procedure имÑ_процедуры_в_кодировке_утф8_длиной_больше_чем_50;
+--error 1059
+create procedure очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
+begin
+end;
+
+create function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49()
+ returns int
+return 0;
+select ROUTINE_NAME from information_schema.routines where
+routine_schema='test';
+drop function имÑ_функции_в_кодировке_утф8_длиной_больше_чем_49;
+--error 1059
+create function очень_очень_очень_очень_очень_очень_очень_очень_длиннаÑ_Ñтрока_66()
+ returns int
+return 0;
+
+drop view имÑ_вью_кодировке_утф8_длиной_больше_чем_42;
+drop table имÑ_таблицы_в_кодировке_утф8_длиной_больше_чем_48;
+set names default;
diff --git a/mysql-test/main/cte_nonrecursive.result b/mysql-test/main/cte_nonrecursive.result
index d80d34ecc7f..2556fd4b06b 100644
--- a/mysql-test/main/cte_nonrecursive.result
+++ b/mysql-test/main/cte_nonrecursive.result
@@ -418,10 +418,10 @@ t2.c in (with t as (select * from t1 where t1.a<5)
select t2.c from t2,t where t2.c=t.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
-2 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
explain
select t1.a,t1.b from t1,t2
where t1.a>t2.c and
@@ -461,10 +461,10 @@ t.c in (with t as (select * from t1 where t1.a<5)
select t2.c from t2,t where t2.c=t.a);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
-3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
+4 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+4 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
explain
select t1.a,t1.b from t1, (select c from t2 where c >= 4) as t
where t1.a=t.c and
@@ -507,9 +507,9 @@ select t.a, count(*) from t1,t where t1.a=t.a group by t.a;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
1 PRIMARY t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 35 func 1
-3 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
-3 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 35 func 1
+4 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where
+4 MATERIALIZED t1 ALL NULL NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
explain
select t.a, count(*)
from t1,
@@ -597,8 +597,8 @@ explain
select * from v2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where
-1 PRIMARY <derived3> ref key0 key0 5 test.t2.c 2
-3 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.c 2
+2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using where; Using temporary; Using filesort
# with clause in the specification of a view that whose definition
# table alias for a with table
create view v3 as
@@ -863,8 +863,8 @@ SELECT * FROM (WITH a AS (SELECT * FROM t1) SELECT 1) AS t1;
1
EXPLAIN SELECT * FROM (WITH a AS (SELECT * FROM t1) SELECT 1) AS t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> system NULL NULL NULL NULL 1
-2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
+1 PRIMARY <derived3> system NULL NULL NULL NULL 1
+3 DERIVED NULL NULL NULL NULL NULL NULL NULL No tables used
DROP TABLE t1;
#
# MDEV-10058: Suspicious EXPLAIN output for a derived table + WITH + joined table
@@ -1116,17 +1116,17 @@ select * from cte_e as cte_e1 where a > 1
union
select * from cte_e as cte_e2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 14 100.00 Using where
-2 DERIVED t1 ALL NULL NULL NULL NULL 7 100.00 Using where
+1 PRIMARY <derived4> ALL NULL NULL NULL NULL 14 100.00 Using where
+4 DERIVED t1 ALL NULL NULL NULL NULL 7 100.00 Using where
5 UNION t1 ALL NULL NULL NULL NULL 7 100.00 Using where
-NULL UNION RESULT <union2,5> ALL NULL NULL NULL NULL NULL NULL
-6 UNION <derived9> ALL NULL NULL NULL NULL 14 100.00
-9 DERIVED t1 ALL NULL NULL NULL NULL 7 100.00 Using where
+NULL UNION RESULT <union4,5> ALL NULL NULL NULL NULL NULL NULL
+6 UNION <derived11> ALL NULL NULL NULL NULL 14 100.00
+11 DERIVED t1 ALL NULL NULL NULL NULL 7 100.00 Using where
12 UNION t1 ALL NULL NULL NULL NULL 7 100.00 Using where
-NULL UNION RESULT <union9,12> ALL NULL NULL NULL NULL NULL NULL
+NULL UNION RESULT <union11,12> ALL NULL NULL NULL NULL NULL NULL
NULL UNION RESULT <union1,6> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 with cte_e as (with cte_o as (with cte_i as (/* select#4 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)/* select#3 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)/* select#2 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1 union /* select#5 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1)/* select#1 */ select `cte_e1`.`a` AS `a` from `cte_e` `cte_e1` where `cte_e1`.`a` > 1 union /* select#6 */ select `cte_e2`.`a` AS `a` from (with cte_o as (with cte_i as (/* select#11 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)/* select#10 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)/* select#9 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 union /* select#12 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7) `cte_e2`
+Note 1003 with cte_e as (with cte_o as (with cte_i as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1 union select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 and `test`.`t1`.`a` > 1)select `cte_e1`.`a` AS `a` from `cte_e` `cte_e1` where `cte_e1`.`a` > 1 union select `cte_e2`.`a` AS `a` from (with cte_o as (with cte_i as (select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 7)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1)select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 3 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7 union select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 and `test`.`t1`.`a` > 1 and `test`.`t1`.`a` < 7) `cte_e2`
drop table t1;
#
# MDEV-13753: embedded CTE in a VIEW created in prepared statement
diff --git a/mysql-test/main/cte_recursive.result b/mysql-test/main/cte_recursive.result
index f2ae9929145..0b22da8f72a 100644
--- a/mysql-test/main/cte_recursive.result
+++ b/mysql-test/main/cte_recursive.result
@@ -1409,7 +1409,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "686",
"join_type": "BNL",
"attached_condition": "prev_gen.father = folks.`id` or prev_gen.mother = folks.`id`"
}
@@ -1636,16 +1636,16 @@ i div 4 - (i % 4) = ps.i div 4 - (ps.i % 4)
)
SELECT regexp_replace(board,concat('(',REPEAT('.', 4),')'),'\\1\n') n_queens FROM solutions WHERE n_queens = 4;
n_queens
--*--
----*
-*---
---*-
-
--*-
*---
---*
-*--
+-*--
+---*
+*---
+--*-
+
#
# MDEV-10883: execution of prepared statement from SELECT
# with recursive CTE that renames columns
diff --git a/mysql-test/main/ctype_binary.result b/mysql-test/main/ctype_binary.result
index 8059e02611e..758c456754c 100644
--- a/mysql-test/main/ctype_binary.result
+++ b/mysql-test/main/ctype_binary.result
@@ -2766,11 +2766,11 @@ KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 4 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
ALTER TABLE t1 MODIFY date_column DATETIME DEFAULT NULL;
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 6 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 6 NULL 2 Using index condition
DROP TABLE t1;
#
# Bug #31384 DATE_ADD() and DATE_SUB() return binary data
diff --git a/mysql-test/main/ctype_cp1251.result b/mysql-test/main/ctype_cp1251.result
index 2da53740619..548335af8ae 100644
--- a/mysql-test/main/ctype_cp1251.result
+++ b/mysql-test/main/ctype_cp1251.result
@@ -3175,11 +3175,11 @@ KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 4 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
ALTER TABLE t1 MODIFY date_column DATETIME DEFAULT NULL;
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 6 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 6 NULL 2 Using index condition
DROP TABLE t1;
#
# Bug #31384 DATE_ADD() and DATE_SUB() return binary data
diff --git a/mysql-test/main/ctype_gbk.result b/mysql-test/main/ctype_gbk.result
index de056dffe2a..9bf69584725 100644
--- a/mysql-test/main/ctype_gbk.result
+++ b/mysql-test/main/ctype_gbk.result
@@ -480,7 +480,11 @@ b MEDIUMTEXT CHARACTER SET big5);
INSERT INTO t1 VALUES
(REPEAT(0x1125,200000), REPEAT(0x1125,200000)), ('', ''), ('', '');
SELECT a FROM t1 GROUP BY 1 LIMIT 1 INTO @nullll;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT b FROM t1 GROUP BY 1 LIMIT 1 INTO @nullll;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
DROP TABLES t1;
End of 5.0 tests
#
diff --git a/mysql-test/main/ctype_latin1.result b/mysql-test/main/ctype_latin1.result
index 705c719405b..59a706a7fd8 100644
--- a/mysql-test/main/ctype_latin1.result
+++ b/mysql-test/main/ctype_latin1.result
@@ -3472,11 +3472,11 @@ KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 4 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
ALTER TABLE t1 MODIFY date_column DATETIME DEFAULT NULL;
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 6 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 6 NULL 2 Using index condition
DROP TABLE t1;
#
# Bug #31384 DATE_ADD() and DATE_SUB() return binary data
diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result
index dee9da3ce87..a59a25db4e6 100644
--- a/mysql-test/main/ctype_ucs.result
+++ b/mysql-test/main/ctype_ucs.result
@@ -207,6 +207,8 @@ DROP TABLE t1;
# Problem # 1 (original report): wrong parsing of ucs2 data
SET character_set_connection=ucs2;
SELECT '00' UNION SELECT '10' INTO OUTFILE 'tmpp.txt';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE TABLE t1(a INT);
LOAD DATA INFILE 'tmpp.txt' INTO TABLE t1 CHARACTER SET ucs2
(@b) SET a=REVERSE(@b);
@@ -218,6 +220,8 @@ a
DROP TABLE t1;
# Problem # 2 : if you write and read ucs2 data to a file they're lost
SELECT '00' UNION SELECT '10' INTO OUTFILE 'tmpp2.txt' CHARACTER SET ucs2;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE TABLE t1(a INT);
LOAD DATA INFILE 'tmpp2.txt' INTO TABLE t1 CHARACTER SET ucs2
(@b) SET a=REVERSE(@b);
@@ -252,10 +256,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values ("a"),("abc"),("abcd"),("hello"),("test");
explain select * from t1 where a like 'abc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 23 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 23 NULL 2 Using where; Using index
explain select * from t1 where a like concat('abc','%');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 23 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 23 NULL 2 Using where; Using index
select * from t1 where a like "abc%";
a
abc
@@ -1553,7 +1557,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 23 NULL 1 Using where; Using index
EXPLAIN SELECT * FROM t1 WHERE a LIKE 'c%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 23 NULL 30 Using where; Using index
+1 SIMPLE t1 index a a 23 NULL 31 Using where; Using index
SELECT * FROM t1 WHERE a LIKE 'c%';
a
ca
@@ -1569,7 +1573,7 @@ ch
ALTER TABLE t1 MODIFY a VARCHAR(10) CHARACTER SET ucs2 COLLATE ucs2_croatian_ci;
EXPLAIN SELECT * FROM t1 WHERE a LIKE 'd%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 23 NULL 30 Using where; Using index
+1 SIMPLE t1 index a a 23 NULL 31 Using where; Using index
SELECT hex(concat('d',_ucs2 0x017E,'%'));
hex(concat('d',_ucs2 0x017E,'%'))
0064017E0025
@@ -4352,11 +4356,11 @@ KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 4 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
ALTER TABLE t1 MODIFY date_column DATETIME DEFAULT NULL;
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 6 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 6 NULL 2 Using index condition
DROP TABLE t1;
#
# Bug #31384 DATE_ADD() and DATE_SUB() return binary data
@@ -4571,7 +4575,7 @@ SELECT SEC_TO_TIME(CONVERT(900*24*60*60 USING ucs2));
SEC_TO_TIME(CONVERT(900*24*60*60 USING ucs2))
838:59:59.999999
Warnings:
-Warning 1292 Truncated incorrect time value: '77760000'
+Warning 1292 Truncated incorrect seconds value: '77760000'
#
# MDEV-13530 VARBINARY doesn't convert to to BLOB for sizes 65533, 65534 and 65535
#
@@ -6379,3 +6383,39 @@ DEALLOCATE PREPARE stmt;
#
# End of 10.2 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-17995 INET6_NTOA(ucs2_input) erroneously returns NULL
+#
+SELECT HEX(INET6_ATON('1::1')), HEX(INET6_ATON(CONVERT('1::1' USING ucs2)));
+HEX(INET6_ATON('1::1')) HEX(INET6_ATON(CONVERT('1::1' USING ucs2)))
+00010000000000000000000000000001 00010000000000000000000000000001
+#
+# MDEV-19184 Crash in IS_IPV6(_ucs2 0x0031)
+#
+SET NAMES utf8;
+SELECT IS_IPV6(_ucs2 0x0031);
+IS_IPV6(_ucs2 0x0031)
+0
+SELECT IS_IPV4(_ucs2 0x0031);
+IS_IPV4(_ucs2 0x0031)
+0
+SELECT IS_IPV6(_ucs2 0x003A003A);
+IS_IPV6(_ucs2 0x003A003A)
+1
+SELECT IS_IPV4(_ucs2 0x00310030002E0030002E0030002E0031);
+IS_IPV4(_ucs2 0x00310030002E0030002E0030002E0031)
+1
+SET NAMES utf8, collation_connection=ucs2_bin;
+SELECT IS_IPV6('::');
+IS_IPV6('::')
+1
+SELECT IS_IPV4('10.0.0.1');
+IS_IPV4('10.0.0.1')
+1
+SET NAMES utf8;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/ctype_ucs.test b/mysql-test/main/ctype_ucs.test
index d0d463c0340..7a772a092b1 100644
--- a/mysql-test/main/ctype_ucs.test
+++ b/mysql-test/main/ctype_ucs.test
@@ -1094,3 +1094,33 @@ DEALLOCATE PREPARE stmt;
--echo #
--echo # End of 10.2 tests
--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-17995 INET6_NTOA(ucs2_input) erroneously returns NULL
+--echo #
+
+SELECT HEX(INET6_ATON('1::1')), HEX(INET6_ATON(CONVERT('1::1' USING ucs2)));
+
+--echo #
+--echo # MDEV-19184 Crash in IS_IPV6(_ucs2 0x0031)
+--echo #
+
+SET NAMES utf8;
+SELECT IS_IPV6(_ucs2 0x0031);
+SELECT IS_IPV4(_ucs2 0x0031);
+
+SELECT IS_IPV6(_ucs2 0x003A003A);
+SELECT IS_IPV4(_ucs2 0x00310030002E0030002E0030002E0031);
+
+SET NAMES utf8, collation_connection=ucs2_bin;
+SELECT IS_IPV6('::');
+SELECT IS_IPV4('10.0.0.1');
+SET NAMES utf8;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/ctype_upgrade.result b/mysql-test/main/ctype_upgrade.result
index 5f0be66f8fb..9d19c3b5203 100644
--- a/mysql-test/main/ctype_upgrade.result
+++ b/mysql-test/main/ctype_upgrade.result
@@ -235,12 +235,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -258,9 +258,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -294,12 +294,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -317,9 +317,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
diff --git a/mysql-test/main/ctype_utf16.result b/mysql-test/main/ctype_utf16.result
index fb7ae1f62bb..1417e0ba752 100644
--- a/mysql-test/main/ctype_utf16.result
+++ b/mysql-test/main/ctype_utf16.result
@@ -133,10 +133,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values ("a"),("abc"),("abcd"),("hello"),("test");
explain select * from t1 where a like 'abc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 43 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 43 NULL 2 Using where; Using index
explain select * from t1 where a like concat('abc','%');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 43 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 43 NULL 2 Using where; Using index
select * from t1 where a like "abc%";
a
abc
diff --git a/mysql-test/main/ctype_utf16_uca.result b/mysql-test/main/ctype_utf16_uca.result
index 0cb9c4c74c1..17f52a91c7b 100644
--- a/mysql-test/main/ctype_utf16_uca.result
+++ b/mysql-test/main/ctype_utf16_uca.result
@@ -3999,7 +3999,7 @@ D801DC28 30D2 ð¨
ALTER TABLE t1 ADD KEY(c);
EXPLAIN SELECT hex(c) FROM t1 WHERE c LIKE 'a%' ORDER BY c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 43 NULL 2 Using where; Using index
+1 SIMPLE t1 range c c 43 NULL 3 Using where; Using index
SELECT hex(c), hex(weight_string(c)) FROM t1 WHERE c LIKE 'a%' ORDER BY c;
hex(c) hex(weight_string(c))
0061 120F
diff --git a/mysql-test/main/ctype_utf16le.result b/mysql-test/main/ctype_utf16le.result
index ba7a2383671..633809b51fa 100644
--- a/mysql-test/main/ctype_utf16le.result
+++ b/mysql-test/main/ctype_utf16le.result
@@ -136,10 +136,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values ("a"),("abc"),("abcd"),("hello"),("test");
explain select * from t1 where a like 'abc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 43 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 43 NULL 2 Using where; Using index
explain select * from t1 where a like concat('abc','%');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 43 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 43 NULL 2 Using where; Using index
select * from t1 where a like "abc%";
a
abc
diff --git a/mysql-test/main/ctype_utf32.result b/mysql-test/main/ctype_utf32.result
index 47e739df290..2da1f22f592 100644
--- a/mysql-test/main/ctype_utf32.result
+++ b/mysql-test/main/ctype_utf32.result
@@ -132,10 +132,10 @@ t1 CREATE TABLE `t1` (
insert into t1 values ("a"),("abc"),("abcd"),("hello"),("test");
explain select * from t1 where a like 'abc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 43 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 43 NULL 2 Using where; Using index
explain select * from t1 where a like concat('abc','%');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 43 NULL 1 Using where; Using index
+1 SIMPLE t1 range a a 43 NULL 2 Using where; Using index
select * from t1 where a like "abc%";
a
abc
diff --git a/mysql-test/main/ctype_utf32_uca.result b/mysql-test/main/ctype_utf32_uca.result
index a112918c0c3..aa8d2002bee 100644
--- a/mysql-test/main/ctype_utf32_uca.result
+++ b/mysql-test/main/ctype_utf32_uca.result
@@ -4019,7 +4019,7 @@ hex(c) hex(weight_string(c)) c
ALTER TABLE t1 ADD KEY(c);
EXPLAIN SELECT hex(c) FROM t1 WHERE c LIKE 'a%' ORDER BY c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 43 NULL 2 Using where; Using index
+1 SIMPLE t1 range c c 43 NULL 3 Using where; Using index
SELECT hex(c), hex(weight_string(c)) FROM t1 WHERE c LIKE 'a%' ORDER BY c;
hex(c) hex(weight_string(c))
00000061 120F
diff --git a/mysql-test/main/ctype_utf8.result b/mysql-test/main/ctype_utf8.result
index 6a72f60a437..5f66e60bc65 100644
--- a/mysql-test/main/ctype_utf8.result
+++ b/mysql-test/main/ctype_utf8.result
@@ -5223,11 +5223,11 @@ KEY(date_column));
INSERT INTO t1 VALUES (1,'2010-09-01'),(2,'2010-10-01');
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 4 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 4 NULL 2 Using index condition
ALTER TABLE t1 MODIFY date_column DATETIME DEFAULT NULL;
EXPLAIN SELECT * FROM t1 WHERE date_column BETWEEN '2010-09-01' AND '2010-10-01';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range date_column date_column 6 NULL 1 Using index condition
+1 SIMPLE t1 range date_column date_column 6 NULL 2 Using index condition
DROP TABLE t1;
#
# Bug #31384 DATE_ADD() and DATE_SUB() return binary data
diff --git a/mysql-test/main/custom_aggregate_functions.result b/mysql-test/main/custom_aggregate_functions.result
index 0a27334f58e..b98954b920e 100644
--- a/mysql-test/main/custom_aggregate_functions.result
+++ b/mysql-test/main/custom_aggregate_functions.result
@@ -37,7 +37,7 @@ set x=5;
fetch group next row;
return x+1;
end |
-ERROR HY000: Non-aggregate function contains aggregate specific instructions: (FETCH GROUP NEXT ROW)
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
create aggregate function f1(x INT) returns INT
begin
declare continue handler for not found return x;
@@ -1153,3 +1153,36 @@ i sum(i)
NULL 8
drop function agg_sum;
drop table t1;
+#
+# MDEV-18813 PROCEDURE and anonymous blocks silently ignore FETCH GROUP NEXT ROW
+#
+CREATE PROCEDURE p1()
+BEGIN
+FETCH GROUP NEXT ROW;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+BEGIN NOT ATOMIC
+FETCH GROUP NEXT ROW;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE DEFINER=root@localhost FUNCTION f1() RETURNS INT
+BEGIN
+FETCH GROUP NEXT ROW;
+RETURN 0;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE TABLE t1 (a INT);
+CREATE TRIGGER tr1
+AFTER INSERT ON t1 FOR EACH ROW
+FETCH GROUP NEXT ROW;
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+DROP TABLE t1;
+CREATE EVENT ev1
+ON SCHEDULE EVERY 1 HOUR
+STARTS CURRENT_TIMESTAMP + INTERVAL 1 MONTH
+ENDS CURRENT_TIMESTAMP + INTERVAL 1 MONTH + INTERVAL 1 WEEK
+DO FETCH GROUP NEXT ROW;
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
diff --git a/mysql-test/main/custom_aggregate_functions.test b/mysql-test/main/custom_aggregate_functions.test
index ab799b48bdb..a10ea44af60 100644
--- a/mysql-test/main/custom_aggregate_functions.test
+++ b/mysql-test/main/custom_aggregate_functions.test
@@ -965,3 +965,54 @@ select i, sum(i) from t1 group by i with rollup;
# Cleanup
drop function agg_sum;
drop table t1;
+
+
+--echo #
+--echo # MDEV-18813 PROCEDURE and anonymous blocks silently ignore FETCH GROUP NEXT ROW
+--echo #
+
+
+DELIMITER $$;
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE PROCEDURE p1()
+BEGIN
+ FETCH GROUP NEXT ROW;
+END;
+$$
+DELIMITER ;$$
+
+
+DELIMITER $$;
+--error ER_NOT_AGGREGATE_FUNCTION
+BEGIN NOT ATOMIC
+ FETCH GROUP NEXT ROW;
+END;
+$$
+DELIMITER ;$$
+
+
+DELIMITER $$;
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE DEFINER=root@localhost FUNCTION f1() RETURNS INT
+BEGIN
+ FETCH GROUP NEXT ROW;
+ RETURN 0;
+END;
+$$
+DELIMITER ;$$
+
+
+CREATE TABLE t1 (a INT);
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE TRIGGER tr1
+ AFTER INSERT ON t1 FOR EACH ROW
+ FETCH GROUP NEXT ROW;
+DROP TABLE t1;
+
+
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE EVENT ev1
+ ON SCHEDULE EVERY 1 HOUR
+ STARTS CURRENT_TIMESTAMP + INTERVAL 1 MONTH
+ ENDS CURRENT_TIMESTAMP + INTERVAL 1 MONTH + INTERVAL 1 WEEK
+DO FETCH GROUP NEXT ROW;
diff --git a/mysql-test/main/date_formats.result b/mysql-test/main/date_formats.result
index 16dbdf7969c..463cce39520 100644
--- a/mysql-test/main/date_formats.result
+++ b/mysql-test/main/date_formats.result
@@ -442,7 +442,7 @@ f1 f2 f3
Warnings:
Warning 1292 Truncated incorrect datetime value: '2003-01-02 10:11:12.0012ABCD'
Warning 1292 Truncated incorrect time value: '-01:01:01.01 GGG'
-Warning 1292 Truncated incorrect time value: '1997-12-31 23:59:59.01XXXX'
+Warning 1292 Truncated incorrect datetime value: '1997-12-31 23:59:59.01XXXX'
select str_to_date("2003-04-05 g", "%Y-%m-%d") as f1,
str_to_date("2003-04-05 10:11:12.101010234567", "%Y-%m-%d %H:%i:%S.%f") as f2;
f1 f2
diff --git a/mysql-test/main/deadlock_innodb.result b/mysql-test/main/deadlock_innodb.result
index af78a6aa9d5..fca0ff6be0c 100644
--- a/mysql-test/main/deadlock_innodb.result
+++ b/mysql-test/main/deadlock_innodb.result
@@ -72,7 +72,7 @@ insert into t1 values(0, 0), (300, 300);
insert into t2 values(0, 0), (1, 20), (2, 30);
commit;
connection con1;
-select a,b from t2 UNION SELECT id, x from t1 FOR UPDATE;
+select a,b from t2 UNION (SELECT id, x from t1 FOR UPDATE);
a b
0 0
20 1
diff --git a/mysql-test/main/default.result b/mysql-test/main/default.result
index c214e529d72..73417d0d64e 100644
--- a/mysql-test/main/default.result
+++ b/mysql-test/main/default.result
@@ -2217,8 +2217,6 @@ t1 CREATE TABLE `t1` (
`b` bigint(20) DEFAULT (cast(`a` as signed))
) ENGINE=MyISAM DEFAULT CHARSET=latin1
INSERT INTO t1 (a) VALUES (0xFFFFFFFFFFFFFFFF);
-Warnings:
-Note 1105 Cast to signed converted positive out-of-range integer to it's negative complement
SELECT * FROM t1;
a b
18446744073709551615 -1
diff --git a/mysql-test/main/delayed.result b/mysql-test/main/delayed.result
index 613e214751e..ec36e3af46d 100644
--- a/mysql-test/main/delayed.result
+++ b/mysql-test/main/delayed.result
@@ -500,7 +500,8 @@ call mtr.add_suppression("Checking table");
insert delayed into t1 values (2,2);
Warnings:
Error 145 Table './test/t1' is marked as crashed and should be repaired
-Error 1034 1 client is using or hasn't closed the table properly
+Warning 1034 1 client is using or hasn't closed the table properly
+Note 1034 Table is fixed
insert delayed into t1 values (3,3);
flush tables t1;
select * from t1;
diff --git a/mysql-test/main/delete_use_source.result b/mysql-test/main/delete_use_source.result
index 08da8901528..0ce010eb415 100644
--- a/mysql-test/main/delete_use_source.result
+++ b/mysql-test/main/delete_use_source.result
@@ -6,6 +6,7 @@ insert t1 select 2,seq from seq_1_to_50;
insert t1 select 3,seq from seq_1_to_20;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
#
# Delete with limit (quick select - range acces)
@@ -47,8 +48,8 @@ rollback;
start transaction;
explain delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500 limit 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL c1 NULL NULL NULL 502 Using where
-2 DEPENDENT SUBQUERY b ref c1 c1 4 test.t1.c1 58 Using index
+1 PRIMARY t1 range c1 c1 4 NULL 502 Using where
+2 DEPENDENT SUBQUERY b ref c1 c1 4 test.t1.c1 167 Using index
delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500 limit 1;
affected rows: 1
delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500 limit 1;
@@ -63,8 +64,8 @@ rollback;
start transaction;
explain delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL c1 NULL NULL NULL 502 Using where
-2 DEPENDENT SUBQUERY b ref c1 c1 4 test.t1.c1 58 Using index
+1 PRIMARY t1 ALL c1 NULL NULL NULL 670 Using where
+2 DEPENDENT SUBQUERY b ref c1 c1 4 test.t1.c1 167 Using index
delete from v1 where (select count(*) from t1 b where b.c1=v1.c1) = 500 ;
affected rows: 500
select count(*) from v1 where c1=0;
diff --git a/mysql-test/main/deprecated_features.result b/mysql-test/main/deprecated_features.result
index fc6c86d065d..2c77d745e2e 100644
--- a/mysql-test/main/deprecated_features.result
+++ b/mysql-test/main/deprecated_features.result
@@ -5,7 +5,7 @@ ERROR HY000: Unknown system variable 'table_type'
select @@table_type='MyISAM';
ERROR HY000: Unknown system variable 'table_type'
backup table t1 to 'data.txt';
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'backup table t1 to 'data.txt'' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'table t1 to 'data.txt'' at line 1
restore table t1 from 'data.txt';
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'restore table t1 from 'data.txt'' at line 1
show plugin;
diff --git a/mysql-test/main/derived.result b/mysql-test/main/derived.result
index 857246d68b4..abf087f891c 100644
--- a/mysql-test/main/derived.result
+++ b/mysql-test/main/derived.result
@@ -632,7 +632,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED t1 system NULL NULL NULL NULL 1 100.00
Warnings:
Note 1276 Field or reference 'sq.f2' of SELECT #3 was resolved in SELECT #1
-Note 1003 /* select#1 */ select 6 AS `f1` from <materialize> (/* select#4 */ select `test`.`t2`.`f3` from `test`.`t2` having `test`.`t2`.`f3` >= 8) semi join (`test`.`t2`) where `test`.`t2`.`f3` = 6 and `<subquery4>`.`f3` = 9
+Note 1003 /* select#1 */ select 6 AS `f1` from <materialize> (/* select#4 */ select `test`.`t2`.`f3` from `test`.`t2` having `test`.`t2`.`f3` >= 8) semi join (`test`.`t2`) where `<subquery4>`.`f3` = 9 and `test`.`t2`.`f3` = 6
DROP TABLE t2,t1;
#
# MDEV-9462: Out of memory using explain on 2 empty tables
diff --git a/mysql-test/main/derived_cond_pushdown.result b/mysql-test/main/derived_cond_pushdown.result
index 8086c4480f6..54068c4d6dd 100644
--- a/mysql-test/main/derived_cond_pushdown.result
+++ b/mysql-test/main/derived_cond_pushdown.result
@@ -136,7 +136,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 214"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "t2.a > v1.a",
"materialized": {
@@ -291,7 +291,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 400 or v1.max_c < 135"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.max_c > 400 and t2.a > v1.a or v1.max_c < 135 and t2.a < v1.a",
"materialized": {
@@ -359,7 +359,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 300 or v1.max_c < 135"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.b = t2.b and v1.max_c > 300 and v1.avg_c > t2.d or v1.a = t2.a and v1.max_c < 135 and v1.max_c < t2.c",
"materialized": {
@@ -416,7 +416,7 @@ EXPLAIN
"attached_condition": "v1.a > 6"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "t2.b > v1.b",
"materialized": {
@@ -483,7 +483,7 @@ EXPLAIN
"attached_condition": "v2.b > 25"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "t2.a < v2.a",
"materialized": {
@@ -571,7 +571,7 @@ EXPLAIN
"attached_condition": "v1.a > 7 or v1.a < 2"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a > 7 and t2.c < v1.max_c or v1.a < 2 and t2.b < v1.b",
"materialized": {
@@ -652,7 +652,7 @@ EXPLAIN
"attached_condition": "v2.a > 7 or v2.a > 5"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v2.a > 7 and t2.c < v2.max_c or v2.a > 5 and t2.b < v2.b",
"materialized": {
@@ -719,7 +719,7 @@ EXPLAIN
"attached_condition": "v1.a > 4 or v1.a < 2"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a > 4 and v1.b > t2.b and v1.max_c = t2.d or v1.a < 2 and v1.max_c < t2.c and v1.max_c = t2.d",
"materialized": {
@@ -779,7 +779,7 @@ EXPLAIN
"attached_condition": "v1.a < 2 and v1.max_c > 400"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "t2.b > v1.b",
"materialized": {
@@ -991,7 +991,7 @@ EXPLAIN
"attached_condition": "v1.a > 7 and v1.max_c > 300 or v1.a < 4 and v1.max_c < 500"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a > 7 and v1.max_c > 300 and t2.c < v1.max_c or v1.a < 4 and v1.max_c < 500 and t2.b < v1.b",
"materialized": {
@@ -1080,7 +1080,7 @@ EXPLAIN
"attached_condition": "v1.a < 2 and v1.max_c > 120 or v1.a > 7"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a < 2 and v1.max_c > 120 or v1.a > 7",
"materialized": {
@@ -1158,7 +1158,7 @@ EXPLAIN
"attached_condition": "v1.a < 2 and v1.max_c > 120 or v1.a > 7"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.b = t2.b and v1.a < 2 and v1.max_c > 120 or v1.a > 7",
"materialized": {
@@ -1225,7 +1225,7 @@ EXPLAIN
"attached_condition": "v1.a < 2 and v1.max_c < 200 or v1.a > 4"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a < 2 and v1.max_c < 200 and t2.c > v1.max_c and v1.max_c = t2.d or v1.max_c = t2.c and v1.a > 4 and t2.c < 500 and t2.b < v1.b",
"materialized": {
@@ -1302,7 +1302,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 400 or v1.max_c < 135"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.max_c > 400 and t2.a > v1.a or v1.max_c < 135 and t2.a < v1.a",
"materialized": {
@@ -1345,7 +1345,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 400 or v1.max_c < 135"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.max_c > 400 and t2.a > v1.a or v1.max_c < 135 and t2.a < v1.a",
"materialized": {
@@ -1684,7 +1684,7 @@ EXPLAIN
"attached_condition": "v1.a = 3 and v1.b = 3"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -1734,7 +1734,7 @@ EXPLAIN
"attached_condition": "v1.a = 1 and v1.b = 21"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -1807,7 +1807,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "220",
"join_type": "BNL",
"attached_condition": "t.b = v.b or v.max_c > 20"
}
@@ -1931,7 +1931,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a = t2.a and t2.a < 4 or v1.max_c = t2.c and t2.c > 150",
"materialized": {
@@ -2061,7 +2061,7 @@ EXPLAIN
"attached_condition": "v1.a = 8 and v1.max_c = 404"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -2359,7 +2359,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "162",
"join_type": "BNL"
}
}
@@ -2471,7 +2471,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -2598,7 +2598,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a = t2.a or v1.b = t2.b and (v1.a = 1 or v1.a = 6)",
"materialized": {
@@ -2693,7 +2693,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a = 1 or v1.b = 21 or t2.a = 2",
"materialized": {
@@ -2761,7 +2761,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a < t2.a or t2.a < 11",
"materialized": {
@@ -2943,7 +2943,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.b is not null",
"materialized": {
@@ -3038,7 +3038,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a = t2.a or t2.c < 115",
"materialized": {
@@ -3067,7 +3067,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "incremental",
- "buffer_size": "256Kb",
+ "buffer_size": "4Kb",
"join_type": "BNL",
"attached_condition": "v1.a = t2.a and v2.a = t2.a or v2.b > 13 and t2.c < 115",
"materialized": {
@@ -3138,7 +3138,7 @@ EXPLAIN
"attached_condition": "v1.max_c < 300"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -3167,7 +3167,7 @@ EXPLAIN
"attached_condition": "v2.b < 50 or v2.b = 19"
},
"buffer_type": "incremental",
- "buffer_size": "256Kb",
+ "buffer_size": "4Kb",
"join_type": "BNL",
"attached_condition": "(v2.a = v1.a or v1.a = t2.a) and (v2.b < 50 or v2.b = 19)",
"materialized": {
@@ -3317,7 +3317,7 @@ EXPLAIN
"attached_condition": "v1.a = 1 and v1.b > 10"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.b is not null",
"materialized": {
@@ -3435,7 +3435,7 @@ EXPLAIN
"attached_condition": "t.a = 'b'"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "220",
"join_type": "BNL"
}
}
@@ -3544,7 +3544,7 @@ EXPLAIN
"attached_condition": "v2.max_c > 300"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "715",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -3574,7 +3574,7 @@ EXPLAIN
"attached_condition": "v1.max_c < 500"
},
"buffer_type": "incremental",
- "buffer_size": "256Kb",
+ "buffer_size": "9Kb",
"join_type": "BNL",
"attached_condition": "v1.a = v2.a or v1.a = t2.a",
"materialized": {
@@ -3790,7 +3790,7 @@ EXPLAIN
"attached_condition": "v1.avg_c < 400 or v1.a > 1"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "(v1.avg_c < 400 or v1.a > 1) and v1.a is not null and v1.b is not null",
"materialized": {
@@ -3935,7 +3935,7 @@ EXPLAIN
"attached_condition": "(v1.a = 1 or v1.max_c < 300) and v1.b > 25"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a = 1 or v1.max_c < 300",
"materialized": {
@@ -4128,7 +4128,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 100 and v1.a > 7"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -4229,7 +4229,7 @@ EXPLAIN
"attached_condition": "v1.b = 19 and v1.a < 5"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -4271,7 +4271,7 @@ EXPLAIN
"attached_condition": "v1.max_c > 400 or v1.avg_c > 270"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "(v1.max_c > 400 or v1.avg_c > 270) and v1.a < t2.a",
"materialized": {
@@ -4383,7 +4383,7 @@ EXPLAIN
"attached_condition": "v1.a = 1 or v1.a = 6"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "(v1.a = t2.a or v1.b = t2.b) and (v1.a = 1 or v1.a = 6)",
"materialized": {
@@ -4426,7 +4426,7 @@ EXPLAIN
"attached_condition": "v1.a > 3 and v1.b > 27 or v1.max_c > 550"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.a > 3 and v1.b > 27 or v1.max_c > 550",
"materialized": {
@@ -4540,7 +4540,7 @@ EXPLAIN
"attached_condition": "v1.a = 1 and (v1.max_c < 500 or v1.avg_c > 500)"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v1.max_c < 500 or v1.avg_c > 500",
"materialized": {
@@ -4584,7 +4584,7 @@ EXPLAIN
"attached_condition": "v2.b > 10"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v2.a < t2.b or v2.max_c > 200",
"materialized": {
@@ -4706,7 +4706,7 @@ EXPLAIN
"attached_condition": "v_union.a < 3 and v_union.c > 100"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -4814,7 +4814,7 @@ EXPLAIN
"attached_condition": "(v_union.a < 2 or v_union.c > 800) and v_union.b > 12"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "v_union.a < 2 or v_union.c > 800",
"materialized": {
@@ -4911,7 +4911,7 @@ EXPLAIN
"attached_condition": "v_union.a = 1 and v_union.c < 200"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -5078,7 +5078,7 @@ EXPLAIN
"attached_condition": "v1.a = 1"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -5108,7 +5108,7 @@ EXPLAIN
"attached_condition": "v_union.a = 1"
},
"buffer_type": "incremental",
- "buffer_size": "256Kb",
+ "buffer_size": "4Kb",
"join_type": "BNL",
"attached_condition": "v_union.c > 800 or v1.max_c > 200",
"materialized": {
@@ -5923,7 +5923,7 @@ EXPLAIN
"attached_condition": "v1.a > 5 and v1.b > 12"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "333",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -6306,7 +6306,7 @@ EXPLAIN
"attached_condition": "v4.b > 10 and v4.a > 1 or v4.b < 20"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "238",
"join_type": "BNL",
"attached_condition": "(v4.b > 10 and v4.a > 1 or v4.b < 20) and v4.a is not null",
"materialized": {
@@ -6451,7 +6451,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "333",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -6971,7 +6971,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "715",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -7766,7 +7766,7 @@ EXPLAIN
"attached_condition": "v2.pk > 2"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -7778,7 +7778,7 @@ EXPLAIN
"key": "PRIMARY",
"key_length": "4",
"used_key_parts": ["pk"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"index_condition": "t.pk > 2"
}
@@ -8218,12 +8218,10 @@ EXPLAIN
"query_block": {
"select_id": 1,
"table": {
- "table_name": "<subquery2>",
- "access_type": "system",
- "rows": 1,
- "filtered": 100,
- "materialized": {
- "unique": 1,
+ "message": "Impossible WHERE"
+ },
+ "subqueries": [
+ {
"query_block": {
"select_id": 2,
"table": {
@@ -8231,26 +8229,7 @@ EXPLAIN
}
}
}
- },
- "table": {
- "table_name": "<derived3>",
- "access_type": "ALL",
- "rows": 2,
- "filtered": 100,
- "attached_condition": "v1.c = NULL",
- "materialized": {
- "query_block": {
- "select_id": 3,
- "table": {
- "table_name": "t1",
- "access_type": "ALL",
- "rows": 2,
- "filtered": 100,
- "attached_condition": "t1.c = NULL"
- }
- }
- }
- }
+ ]
}
}
DROP VIEW v1;
@@ -8763,7 +8742,7 @@ EXPLAIN
"attached_condition": "v2.i2 = 1"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -8879,7 +8858,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "163",
"join_type": "BNL",
"attached_condition": "t1.id2 = vc.id2"
}
@@ -8952,12 +8931,12 @@ EXPLAIN
"attached_condition": "v1.a = 1 and v1.b = 1 and v1.max_c > 30"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"materialized": {
"query_block": {
"select_id": 2,
- "having_condition": "max_c > 37 and max_c > 30 and t1.b = 1",
+ "having_condition": "t1.b = 1 and max_c > 37 and max_c > 30",
"table": {
"table_name": "t1",
"access_type": "ALL",
@@ -9027,12 +9006,12 @@ EXPLAIN
"attached_condition": "v1.a = 1 and v1.b = 1 and v1.d = 1 and v1.max_c > 30"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"materialized": {
"query_block": {
"select_id": 2,
- "having_condition": "max_c > 37 and max_c > 30 and t1.b = 1",
+ "having_condition": "t1.b = 1 and max_c > 37 and max_c > 30",
"table": {
"table_name": "t1",
"access_type": "ALL",
@@ -9573,7 +9552,7 @@ EXPLAIN
"materialized": {
"query_block": {
"select_id": 2,
- "having_condition": "t1.a < 3 and a > 1",
+ "having_condition": "a > 1",
"filesort": {
"sort_key": "t1.a",
"temporary_table": {
@@ -9581,7 +9560,8 @@ EXPLAIN
"table_name": "t1",
"access_type": "ALL",
"rows": 3,
- "filtered": 100
+ "filtered": 100,
+ "attached_condition": "t1.a < 3"
}
}
}
@@ -9758,7 +9738,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t1.a = dt1.a"
}
@@ -9817,7 +9797,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"attached_condition": "t1.a = dt.a"
}
@@ -10269,9 +10249,9 @@ EXPLAIN INSERT INTO t1
SELECT * FROM ( SELECT t1.f FROM v1 JOIN t1 ) AS t WHERE f IS NOT NULL;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 144 Using where
-2 DERIVED <derived3> ALL NULL NULL NULL NULL 12
+2 DERIVED <derived4> ALL NULL NULL NULL NULL 12
2 DERIVED t1 ALL NULL NULL NULL NULL 12 Using where; Using join buffer (flat, BNL join)
-3 DERIVED t1 ALL NULL NULL NULL NULL 12
+4 DERIVED t1 ALL NULL NULL NULL NULL 12
EXPLAIN FORMAT=JSON INSERT INTO t1
SELECT * FROM ( SELECT t1.f FROM v1 JOIN t1 ) AS t WHERE f IS NOT NULL;
EXPLAIN
@@ -10288,13 +10268,13 @@ EXPLAIN
"query_block": {
"select_id": 2,
"table": {
- "table_name": "<derived3>",
+ "table_name": "<derived4>",
"access_type": "ALL",
"rows": 12,
"filtered": 100,
"materialized": {
"query_block": {
- "select_id": 3,
+ "select_id": 4,
"table": {
"table_name": "t1",
"access_type": "ALL",
@@ -10313,7 +10293,7 @@ EXPLAIN
"attached_condition": "t1.f is not null"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "64",
"join_type": "BNL"
}
}
@@ -10364,7 +10344,7 @@ EXPLAIN
"attached_condition": "t1.f is not null"
},
"table": {
- "table_name": "<derived3>",
+ "table_name": "<derived4>",
"access_type": "ref",
"possible_keys": ["key0"],
"key": "key0",
@@ -10375,7 +10355,7 @@ EXPLAIN
"filtered": 100,
"materialized": {
"query_block": {
- "select_id": 3,
+ "select_id": 4,
"table": {
"table_name": "t1",
"access_type": "ALL",
@@ -10488,7 +10468,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t1.f2 = t.f2"
}
@@ -10668,7 +10648,7 @@ EXPLAIN
"attached_condition": "v1.a = 8"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -10756,7 +10736,7 @@ EXPLAIN
"attached_condition": "v1.a = 8"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -11246,7 +11226,7 @@ EXPLAIN
"attached_condition": "v1.a > 3 or v1.a = 1 and v1.c < 110"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"attached_condition": "v1.c = t2.c and v1.a > 3 and t2.c > 110 or v1.a = 1 and v1.c < 110",
"materialized": {
@@ -11258,7 +11238,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
- "having_condition": "c < 300 and (t1.a > 3 and c > 110 or c < 110 and t1.a = 1)",
+ "having_condition": "c < 300 and (t1.a > 3 and c > 110 or t1.a = 1 and c < 110)",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -11277,7 +11257,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "INTERSECT",
- "having_condition": "c > 100 and (t1.a > 3 and c > 110 or c < 110 and t1.a = 1)",
+ "having_condition": "c > 100 and (t1.a > 3 and c > 110 or t1.a = 1 and c < 110)",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -11587,7 +11567,7 @@ EXPLAIN
"attached_condition": "v1.a = 6"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -11677,7 +11657,7 @@ EXPLAIN
"attached_condition": "v1.a = 6"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"materialized": {
"query_block": {
@@ -12171,7 +12151,7 @@ EXPLAIN
"attached_condition": "v1.a > 1 or v1.a = 1 and v1.c > 500"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"attached_condition": "v1.c = t2.c and v1.a > 1 and t2.c < 500 or v1.a = 1 and v1.c > 500",
"materialized": {
@@ -12183,7 +12163,7 @@ EXPLAIN
{
"query_block": {
"select_id": 2,
- "having_condition": "c > 200 and (t1.a > 1 and c < 500 or c > 500 and t1.a = 1)",
+ "having_condition": "c > 200 and (t1.a > 1 and c < 500 or t1.a = 1 and c > 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -12202,7 +12182,7 @@ EXPLAIN
"query_block": {
"select_id": 3,
"operation": "EXCEPT",
- "having_condition": "c < 300 and (t1.a > 1 and c < 500 or c > 500 and t1.a = 1)",
+ "having_condition": "c < 300 and (t1.a > 1 and c < 500 or t1.a = 1 and c > 500)",
"filesort": {
"sort_key": "t1.a, t1.b",
"temporary_table": {
@@ -12455,7 +12435,7 @@ EXPLAIN
"access_type": "ALL",
"rows": 18,
"filtered": 100,
- "attached_condition": "__3.a > 5 and __3.c > 200",
+ "attached_condition": "__5.a > 5 and __5.c > 200",
"materialized": {
"query_block": {
"union_result": {
@@ -13081,7 +13061,7 @@ EXPLAIN
"access_type": "ALL",
"rows": 18,
"filtered": 100,
- "attached_condition": "__3.a > 4 and __3.c < 130",
+ "attached_condition": "__5.a > 4 and __5.c < 130",
"materialized": {
"query_block": {
"union_result": {
@@ -13227,7 +13207,7 @@ EXPLAIN
"access_type": "ALL",
"rows": 18,
"filtered": 100,
- "attached_condition": "__3.a > 4 and __3.c < 130",
+ "attached_condition": "__6.a > 4 and __6.c < 130",
"materialized": {
"query_block": {
"union_result": {
@@ -13984,16 +13964,16 @@ a b max_c a b c
1 21 345 3 21 231
select * from v1,t2 where (v1.b=t2.b) and (v1.a<5);
a b max_c a b c
-1 21 345 3 21 231
2 33 7 5 33 207
-2 33 7 8 33 117
-3 21 500 3 21 231
4 33 123 5 33 207
+2 33 7 8 33 117
4 33 123 8 33 117
+1 21 345 3 21 231
+3 21 500 3 21 231
explain select * from v1,t2 where (v1.b=t2.b) and (v1.a<5);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 9
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 9 Using where; Using join buffer (flat, BNL join)
2 DERIVED t3 range i1 i1 5 NULL 5 Using index condition
explain format=json select * from v1,t2 where (v1.b=t2.b) and (v1.a<5);
EXPLAIN
@@ -14001,23 +13981,11 @@ EXPLAIN
"query_block": {
"select_id": 1,
"table": {
- "table_name": "t2",
+ "table_name": "<derived2>",
"access_type": "ALL",
- "rows": 9,
- "filtered": 100
- },
- "block-nl-join": {
- "table": {
- "table_name": "<derived2>",
- "access_type": "ALL",
- "rows": 5,
- "filtered": 80,
- "attached_condition": "v1.a < 5"
- },
- "buffer_type": "flat",
- "buffer_size": "256Kb",
- "join_type": "BNL",
- "attached_condition": "v1.b = t2.b",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "v1.a < 5",
"materialized": {
"query_block": {
"select_id": 2,
@@ -14034,6 +14002,18 @@ EXPLAIN
}
}
}
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 9,
+ "filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "173",
+ "join_type": "BNL",
+ "attached_condition": "t2.b = v1.b"
}
}
}
@@ -14127,7 +14107,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "173",
"join_type": "BNL",
"attached_condition": "t2.b = v1.b"
}
@@ -14957,7 +14937,9 @@ insert into t2 values
insert into t2 select a+10, b+10, concat(c,'f') from t2;
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
set statement optimizer_switch='split_materialized=off' for select t1.a,t.s,t.m
from t1 join
@@ -14987,7 +14969,7 @@ where t1.b < 3;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 range idx_b idx_b 5 NULL 4 100.00 Using index condition; Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00
-2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 2 100.00
+2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`c`) AS `m` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` < 3
explain format=json select t1.a,t.s,t.m
@@ -15033,7 +15015,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100
}
}
@@ -15095,11 +15077,11 @@ from t1 join
on t1.a=t.a
where t1.b <= 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL idx_b NULL NULL NULL 12 75.00 Using where
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 9 100.00
-2 DERIVED t2 ALL idx_a NULL NULL NULL 90 100.00 Using temporary; Using filesort
+1 PRIMARY t1 ALL idx_b NULL NULL NULL 12 83.33 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00
+2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 1 100.00
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`b`) AS `m` from `test`.`t2` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <= 5
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`s` AS `s`,`t`.`m` AS `m` from `test`.`t1` join (/* select#2 */ select `test`.`t2`.`a` AS `a`,sum(`test`.`t2`.`b`) AS `s`,min(`test`.`t2`.`b`) AS `m` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` where `t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`b` <= 5
explain format=json select t1.a,t.s,t.m
from t1 join
(select a, sum(t2.b) as s, min(t2.b) as m from t2 group by t2.a) t
@@ -15114,7 +15096,7 @@ EXPLAIN
"access_type": "ALL",
"possible_keys": ["idx_b"],
"rows": 12,
- "filtered": 75,
+ "filtered": 83.333,
"attached_condition": "t1.b <= 5 and t1.a is not null"
},
"table": {
@@ -15125,22 +15107,22 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 9,
+ "rows": 2,
"filtered": 100,
"materialized": {
"query_block": {
"select_id": 2,
- "filesort": {
- "sort_key": "t2.a",
- "temporary_table": {
- "table": {
- "table_name": "t2",
- "access_type": "ALL",
- "possible_keys": ["idx_a"],
- "rows": 90,
- "filtered": 100
- }
- }
+ "outer_ref_condition": "t1.a is not null",
+ "table": {
+ "table_name": "t2",
+ "access_type": "ref",
+ "possible_keys": ["idx_a"],
+ "key": "idx_a",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t1.a"],
+ "rows": 1,
+ "filtered": 100
}
}
}
@@ -15199,11 +15181,11 @@ from t1 left join
(select a, max(t2.b) max, min(t2.b) min from t2 group by t2.a) t
on t1.a=t.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
-1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 2 100.00 Using where
-2 LATERAL DERIVED t2 ref idx_a idx_a 5 test.t1.a 2 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 12 100.00
+1 PRIMARY <derived2> ref key0 key0 5 test.t1.a 9 100.00 Using where
+2 DERIVED t2 ALL idx_a NULL NULL NULL 90 100.00 Using temporary; Using filesort
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t1` left join (/* select#2 */ select `test`.`t2`.`a` AS `a`,max(`test`.`t2`.`b`) AS `max`,min(`test`.`t2`.`b`) AS `min` from `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` group by `test`.`t2`.`a`) `t` on(`t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` is not null) where 1
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t1` left join (/* select#2 */ select `test`.`t2`.`a` AS `a`,max(`test`.`t2`.`b`) AS `max`,min(`test`.`t2`.`b`) AS `min` from `test`.`t2` group by `test`.`t2`.`a`) `t` on(`t`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` is not null) where 1
explain format=json select t1.a,t.max,t.min
from t1 left join
(select a, max(t2.b) max, min(t2.b) min from t2 group by t2.a) t
@@ -15216,7 +15198,7 @@ EXPLAIN
"table": {
"table_name": "t1",
"access_type": "ALL",
- "rows": 4,
+ "rows": 12,
"filtered": 100
},
"table": {
@@ -15227,23 +15209,23 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t1.a"],
- "rows": 2,
+ "rows": 9,
"filtered": 100,
"attached_condition": "trigcond(trigcond(t1.a is not null))",
"materialized": {
"query_block": {
"select_id": 2,
- "outer_ref_condition": "t1.a is not null",
- "table": {
- "table_name": "t2",
- "access_type": "ref",
- "possible_keys": ["idx_a"],
- "key": "idx_a",
- "key_length": "5",
- "used_key_parts": ["a"],
- "ref": ["test.t1.a"],
- "rows": 2,
- "filtered": 100
+ "filesort": {
+ "sort_key": "t2.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "possible_keys": ["idx_a"],
+ "rows": 90,
+ "filtered": 100
+ }
+ }
}
}
}
@@ -15264,7 +15246,9 @@ insert into t4 values
insert into t4 select a+10, b+10, concat(c,'f') from t4;
analyze table t3,t4;
Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
set statement optimizer_switch='split_materialized=off' for select t3.a,t3.c,t.max,t.min
from t3 join
@@ -15288,7 +15272,7 @@ from t3 join
on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 range idx_b idx_b 5 NULL 3 100.00 Using index condition; Using where
+1 PRIMARY t3 range idx_b idx_b 5 NULL 2 100.00 Using index condition; Using where
1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2 100.00
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
@@ -15309,7 +15293,7 @@ EXPLAIN
"key": "idx_b",
"key_length": "5",
"used_key_parts": ["b"],
- "rows": 3,
+ "rows": 2,
"filtered": 100,
"index_condition": "t3.b > 15",
"attached_condition": "t3.a is not null and t3.c is not null"
@@ -15366,7 +15350,7 @@ from t3 join
on t3.a=t.a and t3.c=t.c
where t3.b <= 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 75.00 Using where
+1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 83.33 Using where
1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 4 100.00
2 DERIVED t4 ALL idx NULL NULL NULL 40 100.00 Using temporary; Using filesort
Warnings:
@@ -15385,7 +15369,7 @@ EXPLAIN
"access_type": "ALL",
"possible_keys": ["idx_b"],
"rows": 12,
- "filtered": 75,
+ "filtered": 83.333,
"attached_condition": "t3.b <= 15 and t3.a is not null and t3.c is not null"
},
"table": {
@@ -15440,7 +15424,7 @@ from t3 join
on t3.a=t.a and t3.c=t.c
where t3.b > 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 range idx_b idx_b 5 NULL 3 100.00 Using index condition; Using where
+1 PRIMARY t3 range idx_b idx_b 5 NULL 2 100.00 Using index condition; Using where
1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 2 100.00
2 LATERAL DERIVED t4 ref idx idx 133 test.t3.a,test.t3.c 1 100.00
Warnings:
@@ -15461,7 +15445,7 @@ EXPLAIN
"key": "idx_b",
"key_length": "5",
"used_key_parts": ["b"],
- "rows": 3,
+ "rows": 2,
"filtered": 100,
"index_condition": "t3.b > 15",
"attached_condition": "t3.a is not null and t3.c is not null"
@@ -15518,7 +15502,7 @@ from t3 join
on t3.a=t.a and t3.c=t.c
where t3.b <= 15;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 75.00 Using where
+1 PRIMARY t3 ALL idx_b NULL NULL NULL 12 83.33 Using where
1 PRIMARY <derived2> ref key0 key0 133 test.t3.a,test.t3.c 4 100.00
2 DERIVED t4 ALL idx NULL NULL NULL 40 100.00 Using temporary; Using filesort
Warnings:
@@ -15537,7 +15521,7 @@ EXPLAIN
"access_type": "ALL",
"possible_keys": ["idx_b"],
"rows": 12,
- "filtered": 75,
+ "filtered": 83.333,
"attached_condition": "t3.b <= 15 and t3.a is not null and t3.c is not null"
},
"table": {
@@ -15580,8 +15564,11 @@ insert into t4 select a+100, b+100, concat(c,'g') from t4;
insert into t4 select a+1000, b+1000, concat(c,'h') from t4;
analyze table t2,t3,t4;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
set statement optimizer_switch='split_materialized=off' for select t2.a,t2.b,t2.c,t.c as t_c,t.max,t.min
from t2, t3, (select c, max(b) max, min(b) min from t4 group by c) t
@@ -15602,9 +15589,9 @@ from t2, t3, (select c, max(b) max, min(b) min from t4 group by c) t
where t2.b between 80 and 85 and t2.c in ('y','z') and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 range idx idx 133 NULL 2 100.00 Using index condition; Using where
-1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 2 100.00 Using where
+1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 1 100.00 Using where
1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 2 100.00
-2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 3 100.00
+2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 2 100.00
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`t`.`c` AS `t_c`,`t`.`max` AS `max`,`t`.`min` AS `min` from `test`.`t2` join `test`.`t3` join (/* select#2 */ select `test`.`t4`.`c` AS `c`,max(`test`.`t4`.`b`) AS `max`,min(`test`.`t4`.`b`) AS `min` from `test`.`t4` where `test`.`t4`.`c` = `test`.`t3`.`c` group by `test`.`t4`.`c`) `t` where `test`.`t3`.`a` = `test`.`t2`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t2`.`b` between 80 and 85 and `test`.`t2`.`c` in ('y','z')
explain format=json select t2.a,t2.b,t2.c,t.c as t_c,t.max,t.min
@@ -15634,7 +15621,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "t3.c is not null"
},
@@ -15660,7 +15647,7 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
- "rows": 3,
+ "rows": 2,
"filtered": 100
}
}
@@ -15748,8 +15735,8 @@ explain extended select t2.a,t2.b,t2.c,t.c as t_c,t.max,t.min
from t2, t3, (select c, max(b) max, min(b) min from t4 group by c) t
where t2.b < 40 and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 90 100.00 Using where
-1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 2 100.00 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 90 63.28 Using where
+1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 1 100.00 Using where
1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 10 100.00
2 DERIVED t4 ALL idx_c NULL NULL NULL 160 100.00 Using temporary; Using filesort
Warnings:
@@ -15765,7 +15752,7 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"rows": 90,
- "filtered": 100,
+ "filtered": 63.281,
"attached_condition": "t2.b < 40 and t2.a is not null"
},
"table": {
@@ -15776,7 +15763,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "t3.c is not null"
},
@@ -15849,9 +15836,9 @@ from t2, t3, (select c, b, sum(b) over (partition by c) from t4 ) t
where t2.b between 80 and 85 and t2.c in ('y','z') and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 range idx idx 133 NULL 2 100.00 Using index condition; Using where
-1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 2 100.00 Using where
+1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 1 100.00 Using where
1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 2 100.00
-2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 3 100.00 Using temporary
+2 LATERAL DERIVED t4 ref idx_c idx_c 128 test.t3.c 2 100.00 Using temporary
Warnings:
Note 1003 /* select#1 */ select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t3`.`c` AS `c`,`t`.`c` AS `c`,`t`.`b` AS `b`,`t`.`sum(b) over (partition by c)` AS `sum(b) over (partition by c)` from `test`.`t2` join `test`.`t3` join (/* select#2 */ select `test`.`t4`.`c` AS `c`,`test`.`t4`.`b` AS `b`,sum(`test`.`t4`.`b`) over ( partition by `test`.`t4`.`c`) AS `sum(b) over (partition by c)` from `test`.`t4` where `test`.`t4`.`c` = `test`.`t3`.`c`) `t` where `test`.`t3`.`a` = `test`.`t2`.`a` and `t`.`c` = `test`.`t3`.`c` and `test`.`t2`.`b` between 80 and 85 and `test`.`t2`.`c` in ('y','z')
explain format=json select *
@@ -15881,7 +15868,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "t3.c is not null"
},
@@ -15914,7 +15901,7 @@ EXPLAIN
"key_length": "128",
"used_key_parts": ["c"],
"ref": ["test.t3.c"],
- "rows": 3,
+ "rows": 2,
"filtered": 100
}
}
@@ -16244,8 +16231,8 @@ explain extended select *
from t2, t3, (select c, b, sum(b) over (partition by c) from t4 ) t
where t2.b < 40 and t2.a=t3.a and t3.c=t.c;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 90 100.00 Using where
-1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 2 100.00 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 90 63.28 Using where
+1 PRIMARY t3 ref idx_a idx_a 5 test.t2.a 1 100.00 Using where
1 PRIMARY <derived2> ref key0 key0 128 test.t3.c 10 100.00
2 DERIVED t4 ALL idx_c NULL NULL NULL 160 100.00 Using temporary
Warnings:
@@ -16261,7 +16248,7 @@ EXPLAIN
"table_name": "t2",
"access_type": "ALL",
"rows": 90,
- "filtered": 100,
+ "filtered": 63.281,
"attached_condition": "t2.b < 40 and t2.a is not null"
},
"table": {
@@ -16272,7 +16259,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t2.a"],
- "rows": 2,
+ "rows": 1,
"filtered": 100,
"attached_condition": "t3.c is not null"
},
@@ -16328,9 +16315,13 @@ INSERT INTO t4 VALUES
(5,'zzz'),(9,'xyz'),(2,'yxz'),(5,'zxy'),(7,'zyx') ;
ANALYZE TABLE t1,t2,t3,t4;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
CREATE VIEW v1 AS
SELECT c FROM t3
@@ -16423,7 +16414,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t2.i = t1.i and t2.i = t1.i"
}
@@ -16642,7 +16633,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED a2 eq_ref PRIMARY PRIMARY 4 a1.f 1 100.00 Using index
4 DERIVED t1 index PRIMARY PRIMARY 4 NULL 7 100.00 Using index; Using temporary; Using filesort
Warnings:
-Note 1003 /* select#1 */ select `s`.`f` AS `f`,`s`.`c` AS `c` from (/* select#2 */ select straight_join `a2`.`f` AS `f`,count(0) AS `c` from ((/* select#4 */ select `test`.`t1`.`f` AS `f`,count(0) AS `c` from `test`.`t1` group by `test`.`t1`.`f`)) `a1` join `test`.`t1` `a2` where `a2`.`f` = `a1`.`f` group by `a2`.`f`) `s`
+Note 1003 /* select#1 */ select `s`.`f` AS `f`,`s`.`c` AS `c` from (/* select#2 */ select straight_join `a2`.`f` AS `f`,count(0) AS `c` from (/* select#4 */ select `test`.`t1`.`f` AS `f`,count(0) AS `c` from `test`.`t1` group by `test`.`t1`.`f`) `a1` join `test`.`t1` `a2` where `a2`.`f` = `a1`.`f` group by `a2`.`f`) `s`
SELECT * FROM ( SELECT STRAIGHT_JOIN f, COUNT(*) as c FROM v1 GROUP BY f ) AS s;
f c
1 1
@@ -16672,8 +16663,11 @@ INSERT INTO t3 VALUES
(3), (4), (1), (8), (3);
ANALYZE tables t1,t2,t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SELECT *
FROM t3,
@@ -16703,7 +16697,7 @@ WHERE t3.d = dt.b;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 5 100.00 Using where
1 PRIMARY <derived2> ref key0 key0 5 test.t3.d 2 100.00
-2 LATERAL DERIVED t1 ref idx_b idx_b 5 test.t3.d 2 100.00 Using index; Using temporary; Using filesort
+2 LATERAL DERIVED t1 ref idx_b idx_b 5 test.t3.d 1 100.00 Using index; Using temporary; Using filesort
2 LATERAL DERIVED t2 ALL NULL NULL NULL NULL 5 100.00 Using join buffer (flat, BNL join)
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`d` AS `d`,`dt`.`b` AS `b`,`dt`.`c` AS `c` from `test`.`t3` join (/* select#2 */ select `test`.`t1`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`b` = `test`.`t3`.`d` group by `test`.`t1`.`b`,`test`.`t2`.`c`) `dt` where `dt`.`b` = `test`.`t3`.`d`
@@ -16749,3 +16743,25 @@ id username id userid logindate
set join_cache_level=default;
DROP TABLE t1,t2;
# End of 10.3 tests
+#
+# MDEV-18679: materialized view with SELECT S containing materialized
+# derived when impossible WHERE has been detected for S
+#
+create table t1 (pk int, f varchar(1));
+insert into t1 values
+(3,'y'), (1,'x'), (7,'z');
+create view v1 as
+select t1.f
+from t1, (select distinct * from t1) t
+where t.f = t1.f and 1 = 0
+group by t1.f;
+select * from v1;
+f
+explain select * from v1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> system NULL NULL NULL NULL 0 Const row not found
+2 DERIVED NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+3 DERIVED t1 ALL NULL NULL NULL NULL 3 Using temporary
+drop view v1;
+drop table t1;
+# End of 10.4 tests
diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test
index c6c8c26271f..ebbda849afa 100644
--- a/mysql-test/main/derived_cond_pushdown.test
+++ b/mysql-test/main/derived_cond_pushdown.test
@@ -3285,3 +3285,26 @@ set join_cache_level=default;
DROP TABLE t1,t2;
--echo # End of 10.3 tests
+
+--echo #
+--echo # MDEV-18679: materialized view with SELECT S containing materialized
+--echo # derived when impossible WHERE has been detected for S
+--echo #
+
+create table t1 (pk int, f varchar(1));
+insert into t1 values
+ (3,'y'), (1,'x'), (7,'z');
+
+create view v1 as
+select t1.f
+ from t1, (select distinct * from t1) t
+ where t.f = t1.f and 1 = 0
+group by t1.f;
+
+select * from v1;
+explain select * from v1;
+
+drop view v1;
+drop table t1;
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/derived_opt.result b/mysql-test/main/derived_opt.result
index 48ac7e62653..c30f56d9925 100644
--- a/mysql-test/main/derived_opt.result
+++ b/mysql-test/main/derived_opt.result
@@ -534,7 +534,7 @@ LEFT JOIN
ON t2.id=t.id
WHERE t2.id < 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 3 Using index condition
+1 PRIMARY t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition
1 PRIMARY <derived2> ref key0 key0 5 test.t2.id 2
2 DERIVED t1 ALL NULL NULL NULL NULL 8 Using temporary; Using filesort
set join_cache_level=default;
diff --git a/mysql-test/main/derived_split_innodb.result b/mysql-test/main/derived_split_innodb.result
index b9ed016429b..e8f9df5f80d 100644
--- a/mysql-test/main/derived_split_innodb.result
+++ b/mysql-test/main/derived_split_innodb.result
@@ -11,6 +11,7 @@ KEY n1_c1_n2 (n1,c1,n2)
INSERT INTO t1 VALUES (0, 2, 'a'), (1, 3, 'a');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT t1.n1 FROM t1, (SELECT n1, n2 FROM t1 WHERE c1 = 'a' GROUP BY n1) as t
WHERE t.n1 = t1.n1 AND t.n2 = t1.n2 AND c1 = 'a' GROUP BY n1;
@@ -135,8 +136,8 @@ left join
on t1.f1=t.f1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t const f2 NULL NULL NULL 1 Impossible ON condition
-1 PRIMARY <derived2> const key1 NULL NULL NULL 1 Impossible ON condition
+1 PRIMARY <derived3> const key1 NULL NULL NULL 1 Impossible ON condition
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
-2 DERIVED t2 index NULL PRIMARY 4 NULL 3
+3 DERIVED t2 index NULL PRIMARY 4 NULL 3
drop view v1;
drop table t1,t2;
diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result
index 30831e75341..28c3e2958e9 100644
--- a/mysql-test/main/derived_view.result
+++ b/mysql-test/main/derived_view.result
@@ -2939,21 +2939,23 @@ insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
explain select a from t1 where a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref idx idx 5 test.t1.a 139 Using index; FirstMatch(t1)
explain select * from (select a from t1 where a in (select b from t2)) t;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref idx idx 5 test.t1.a 139 Using index; FirstMatch(t1)
create view v1 as select a from t1 where a in (select b from t2);
explain select * from v1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 140 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref idx idx 5 test.t1.a 139 Using index; FirstMatch(t1)
drop view v1;
drop table t1,t2;
#
diff --git a/mysql-test/main/disabled.def b/mysql-test/main/disabled.def
index 93fff886791..43bf4f9ab76 100644
--- a/mysql-test/main/disabled.def
+++ b/mysql-test/main/disabled.def
@@ -19,3 +19,4 @@ innodb_bug12902967 : broken upstream
file_contents : MDEV-6526 these files are not installed anymore
max_statement_time : cannot possibly work, depends on timing
partition_open_files_limit : open_files_limit check broken by MDEV-18360
+join_cache : enable after MDEV-17752 is fixed
diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result
index 237638468fa..8fcc45e740a 100644
--- a/mysql-test/main/distinct.result
+++ b/mysql-test/main/distinct.result
@@ -173,9 +173,9 @@ INSERT INTO t2 values (1),(2),(3);
INSERT INTO t3 VALUES (1,'1'),(2,'2'),(1,'1'),(2,'2');
explain SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using temporary
+1 SIMPLE t2 index a a 4 NULL 5 Using index; Using temporary
+1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 Using where
1 SIMPLE t3 ref a a 5 test.t1.b 2 Using index
-1 SIMPLE t2 index a a 4 NULL 5 Using where; Using index; Using join buffer (flat, BNL join)
SELECT distinct t3.a FROM t3,t2,t1 WHERE t3.a=t1.b AND t1.a=t2.a;
a
1
@@ -1061,7 +1061,7 @@ UNION
( SELECT DISTINCT 1 FROM t1 ORDER BY BENCHMARK(1, MIN(pk)) );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL PRIMARY 4 NULL 2 Using index; Using temporary
-2 UNION t1 index NULL PRIMARY 4 NULL 2 Using index; Using temporary
+2 UNCACHEABLE UNION t1 index NULL PRIMARY 4 NULL 2 Using index; Using temporary
NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
( SELECT DISTINCT 1 FROM t1 ORDER BY BENCHMARK(1, MIN(pk)) )
UNION
diff --git a/mysql-test/main/drop.test b/mysql-test/main/drop.test
index 6f506dd7215..fe8d39c0d7e 100644
--- a/mysql-test/main/drop.test
+++ b/mysql-test/main/drop.test
@@ -190,28 +190,28 @@ CREATE DATABASE mysql_test;
let $MYSQLD_DATADIR= `select @@datadir`;
--let $proc_frm = $MYSQLD_DATADIR/mysql/proc.frm
---let $proc_MYD = $MYSQLD_DATADIR/mysql/proc.MYD
---let $proc_MYI = $MYSQLD_DATADIR/mysql/proc.MYI
+--let $proc_MAD = $MYSQLD_DATADIR/mysql/proc.MAD
+--let $proc_MAI = $MYSQLD_DATADIR/mysql/proc.MAI
--let $copy_of_proc_frm = $MYSQLTEST_VARDIR/tmp/bug29958.copy.frm
---let $copy_of_proc_MYD = $MYSQLTEST_VARDIR/tmp/bug29958.copy.MYD
---let $copy_of_proc_MYI = $MYSQLTEST_VARDIR/tmp/bug29958.copy.MYI
+--let $copy_of_proc_MAD = $MYSQLTEST_VARDIR/tmp/bug29958.copy.MAD
+--let $copy_of_proc_MAI = $MYSQLTEST_VARDIR/tmp/bug29958.copy.MAI
--copy_file $proc_frm $copy_of_proc_frm
---copy_file $proc_MYD $copy_of_proc_MYD
---copy_file $proc_MYI $copy_of_proc_MYI
+--copy_file $proc_MAD $copy_of_proc_MAD
+--copy_file $proc_MAI $copy_of_proc_MAI
DROP TABLE mysql.proc;
DROP DATABASE mysql_test;
--copy_file $copy_of_proc_frm $proc_frm
---copy_file $copy_of_proc_MYD $proc_MYD
---copy_file $copy_of_proc_MYI $proc_MYI
+--copy_file $copy_of_proc_MAD $proc_MAD
+--copy_file $copy_of_proc_MAI $proc_MAI
--remove_file $copy_of_proc_frm
---remove_file $copy_of_proc_MYD
---remove_file $copy_of_proc_MYI
+--remove_file $copy_of_proc_MAD
+--remove_file $copy_of_proc_MAI
--echo
--echo # --
diff --git a/mysql-test/main/dyncol.result b/mysql-test/main/dyncol.result
index 7a5eeac67cc..cc9a94e74be 100644
--- a/mysql-test/main/dyncol.result
+++ b/mysql-test/main/dyncol.result
@@ -1028,12 +1028,12 @@ select column_get(column_create(1, "2011-02-32 8:46:06.23434" AS CHAR), 1 as tim
column_get(column_create(1, "2011-02-32 8:46:06.23434" AS CHAR), 1 as time)
NULL
Warnings:
-Warning 1292 Truncated incorrect time value: '2011-02-32 8:46:06.23434'
+Warning 1292 Incorrect time value: '2011-02-32 8:46:06.23434'
select column_get(column_create(1, "2011-13-01 8:46:06.23434" AS CHAR), 1 as time);
column_get(column_create(1, "2011-13-01 8:46:06.23434" AS CHAR), 1 as time)
NULL
Warnings:
-Warning 1292 Truncated incorrect time value: '2011-13-01 8:46:06.23434'
+Warning 1292 Incorrect time value: '2011-13-01 8:46:06.23434'
select column_get(column_create(1, "2011-02-30 8:46:06.23434" AS CHAR), 1 as time);
column_get(column_create(1, "2011-02-30 8:46:06.23434" AS CHAR), 1 as time)
08:46:06
diff --git a/mysql-test/main/empty_user_table.result b/mysql-test/main/empty_user_table.result
index 54a7fd4907b..924e4cd8ea5 100644
--- a/mysql-test/main/empty_user_table.result
+++ b/mysql-test/main/empty_user_table.result
@@ -1,9 +1,14 @@
-create table t1 as select * from mysql.user;
-truncate table mysql.user;
+create table t1 as select * from mysql.global_priv;
+truncate table mysql.global_priv;
flush privileges;
connect(localhost,u1,,test,MASTER_PORT,MASTER_SOCKET);
connect fail,localhost,u1;
Got one of the listed errors
-insert mysql.user select * from t1;
+insert mysql.global_priv select * from t1;
drop table t1;
flush privileges;
+truncate table mysql.user;
+flush privileges;
+connect(localhost,u1,,test,MASTER_PORT,MASTER_SOCKET);
+connect fail,localhost,u1;
+Got one of the listed errors
diff --git a/mysql-test/main/empty_user_table.test b/mysql-test/main/empty_user_table.test
index b54f2109e30..8a544fece83 100644
--- a/mysql-test/main/empty_user_table.test
+++ b/mysql-test/main/empty_user_table.test
@@ -4,8 +4,8 @@
source include/not_embedded.inc;
-create table t1 as select * from mysql.user;
-truncate table mysql.user;
+create table t1 as select * from mysql.global_priv;
+truncate table mysql.global_priv;
flush privileges;
# connecting via unix socket gives ER_ACCESS_DENIED_ERROR
@@ -14,7 +14,23 @@ flush privileges;
--error ER_ACCESS_DENIED_ERROR,ER_HOST_NOT_PRIVILEGED
connect (fail,localhost,u1);
-insert mysql.user select * from t1;
+insert mysql.global_priv select * from t1;
drop table t1;
flush privileges;
+#
+# same with mysql.user
+#
+
+source include/switch_to_mysql_user.inc;
+truncate table mysql.user;
+
+flush privileges;
+
+# connecting via unix socket gives ER_ACCESS_DENIED_ERROR
+# connecting via tcp/ip gives ER_HOST_NOT_PRIVILEGED
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+--error ER_ACCESS_DENIED_ERROR,ER_HOST_NOT_PRIVILEGED
+connect (fail,localhost,u1);
+
+source include/switch_to_mysql_global_priv.inc;
diff --git a/mysql-test/main/endspace.result b/mysql-test/main/endspace.result
index f9619db7e64..a0f53167148 100644
--- a/mysql-test/main/endspace.result
+++ b/mysql-test/main/endspace.result
@@ -145,8 +145,8 @@ teststring
teststring
select * from t1 where text1='teststring' or text1 >= 'teststring\t';
text1
-teststring
teststring
+teststring
select * from t1 order by text1;
text1
nothing
diff --git a/mysql-test/main/events_restart.result b/mysql-test/main/events_restart.result
index 0caac907f64..8c9c252018a 100644
--- a/mysql-test/main/events_restart.result
+++ b/mysql-test/main/events_restart.result
@@ -15,6 +15,7 @@ insert into event_like select * from mysql.event;
alter table mysql.event
change column body body longtext character set utf8 collate utf8_bin;
"Now we restart the server"
+# restart
use events_test;
select @@event_scheduler;
@@event_scheduler
@@ -75,6 +76,7 @@ events_test abc1 root@localhost SYSTEM RECURRING # 1 SECOND # # ENABLED 1 latin1
events_test abc2 root@localhost SYSTEM RECURRING # 1 SECOND # # ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
events_test abc3 root@localhost SYSTEM RECURRING # 1 SECOND # # ENABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
Now let's restart the server again
+# restart
use events_test;
select @@event_scheduler;
@@event_scheduler
@@ -95,6 +97,7 @@ SHOW EVENTS;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
test e1 root@localhost SYSTEM RECURRING # 1 SECOND # # DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
"Now we restart the server"
+# restart
USE test;
SELECT @@event_scheduler;
@@event_scheduler
diff --git a/mysql-test/main/except.result b/mysql-test/main/except.result
index 594bb7118eb..9c5a3eaa93c 100644
--- a/mysql-test/main/except.result
+++ b/mysql-test/main/except.result
@@ -24,7 +24,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 EXCEPT t2 ALL NULL NULL NULL NULL 2 100.00
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b` from ((/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) except (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`)) `a`
+Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b` from (/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` except (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`)) `a`
EXPLAIN format=json (select a,b from t1) except (select c,d from t2);
EXPLAIN
{
@@ -229,7 +229,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 EXCEPT t4 ALL NULL NULL NULL NULL 2 100.00 Using join buffer (flat, BNL join)
NULL EXCEPT RESULT <except2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b`,`a`.`e` AS `e`,`a`.`f` AS `f` from ((/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t1` join `test`.`t3`) except (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d`,`test`.`t4`.`g` AS `g`,`test`.`t4`.`h` AS `h` from `test`.`t2` join `test`.`t4`)) `a`
+Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b`,`a`.`e` AS `e`,`a`.`f` AS `f` from (/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t1` join `test`.`t3` except (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d`,`test`.`t4`.`g` AS `g`,`test`.`t4`.`h` AS `h` from `test`.`t2` join `test`.`t4`)) `a`
EXPLAIN format=json (select a,b,e,f from t1,t3) except (select c,d,g,h from t2,t4);
EXPLAIN
{
@@ -255,7 +255,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL"
}
}
@@ -278,7 +278,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL"
}
}
@@ -324,7 +324,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"r_filtered": 100
}
@@ -358,7 +358,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"r_filtered": 100
}
@@ -420,7 +420,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"r_filtered": 100
}
@@ -454,7 +454,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "119",
"join_type": "BNL",
"r_filtered": 100
}
@@ -500,7 +500,7 @@ a
(select 1 from dual) except (select 1 from dual);
1
(select 1 from dual into @v) except (select 1 from dual);
-ERROR HY000: Incorrect usage of EXCEPT and INTO
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @v) except (select 1 from dual)' at line 1
select 1 from dual ORDER BY 1 except select 1 from dual;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'except select 1 from dual' at line 1
select 1 as a from dual union all select 1 from dual;
@@ -508,7 +508,7 @@ a
1
1
select 1 from dual except all select 1 from dual;
-ERROR HY000: Incorrect usage of EXCEPT and ALL
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'all select 1 from dual' at line 1
create table t1 (a int, b blob, a1 int, b1 blob) engine=MyISAM;
create table t2 (c int, d blob, c1 int, d1 blob) engine=MyISAM;
insert into t1 values (1,"ddd", 1, "sdfrrwwww"),(2, "fgh", 2, "dffggtt");
diff --git a/mysql-test/main/except.test b/mysql-test/main/except.test
index f88d9b29e35..32aa0b90544 100644
--- a/mysql-test/main/except.test
+++ b/mysql-test/main/except.test
@@ -60,13 +60,13 @@ drop tables t1,t2,t3,t4;
select 1 as a from dual except select 1 from dual;
(select 1 from dual) except (select 1 from dual);
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(select 1 from dual into @v) except (select 1 from dual);
--error ER_PARSE_ERROR
select 1 from dual ORDER BY 1 except select 1 from dual;
select 1 as a from dual union all select 1 from dual;
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
select 1 from dual except all select 1 from dual;
diff --git a/mysql-test/main/explain.result b/mysql-test/main/explain.result
index f593e0dfaba..bc3c53d01d3 100644
--- a/mysql-test/main/explain.result
+++ b/mysql-test/main/explain.result
@@ -13,7 +13,7 @@ id str
3 foo
explain select * from t1 where str is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref str str 11 const 1 Using index condition
+1 SIMPLE t1 ref str str 11 const 2 Using index condition
explain select * from t1 where str="foo";
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const str str 11 const 1
@@ -311,9 +311,15 @@ INSERT INTO t1 VALUES(4, 1, 1, 1);
INSERT INTO t1 VALUES(3, 1, 1, 1);
INSERT INTO t1 VALUES(2, 1, 1, 1);
INSERT INTO t1 VALUES(1, 1, 1, 1);
+INSERT INTO t1 VALUES(5, 2, 1, 1);
+INSERT INTO t1 VALUES(6, 2, 1, 1);
+INSERT INTO t1 VALUES(7, 3, 1, 1);
+INSERT INTO t1 VALUES(9, 3, 1, 1);
+INSERT INTO t1 VALUES(10, 4, 1, 1);
+INSERT INTO t1 VALUES(11, 4, 1, 1);
EXPLAIN SELECT c1 FROM t1 WHERE c2 = 1 AND c4 = 1 AND c5 = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref c2,c2_2 c2 10 const,const 3 Using where
+1 SIMPLE t1 ref c2,c2_2 c2 10 const,const 4 Using where
DROP TABLE t1;
#
# Bug#56814 Explain + subselect + fulltext crashes server
diff --git a/mysql-test/main/explain.test b/mysql-test/main/explain.test
index d5be354c852..cf9f6be09ed 100644
--- a/mysql-test/main/explain.test
+++ b/mysql-test/main/explain.test
@@ -238,6 +238,12 @@ INSERT INTO t1 VALUES(4, 1, 1, 1);
INSERT INTO t1 VALUES(3, 1, 1, 1);
INSERT INTO t1 VALUES(2, 1, 1, 1);
INSERT INTO t1 VALUES(1, 1, 1, 1);
+INSERT INTO t1 VALUES(5, 2, 1, 1);
+INSERT INTO t1 VALUES(6, 2, 1, 1);
+INSERT INTO t1 VALUES(7, 3, 1, 1);
+INSERT INTO t1 VALUES(9, 3, 1, 1);
+INSERT INTO t1 VALUES(10, 4, 1, 1);
+INSERT INTO t1 VALUES(11, 4, 1, 1);
EXPLAIN SELECT c1 FROM t1 WHERE c2 = 1 AND c4 = 1 AND c5 = 1;
diff --git a/mysql-test/main/explain_json.result b/mysql-test/main/explain_json.result
index ef6b70aff71..b918e7614af 100644
--- a/mysql-test/main/explain_json.result
+++ b/mysql-test/main/explain_json.result
@@ -373,7 +373,7 @@ EXPLAIN
"attached_condition": "tbl2.b < 5"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "1Kb",
"join_type": "BNL",
"attached_condition": "tbl2.a = tbl1.a"
}
@@ -640,7 +640,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "1Kb",
"join_type": "BNL"
}
}
@@ -674,7 +674,7 @@ EXPLAIN
"first_match": "t2"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "141",
"join_type": "BNL",
"attached_condition": "t1.b = t2.b and t1.a = t2.a"
}
@@ -711,7 +711,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "206",
"join_type": "BNL",
"attached_condition": "t1.b = t2.b and t1.a = t2.a"
}
@@ -739,7 +739,7 @@ EXPLAIN
"key": "a",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 1,
+ "rows": 2,
"filtered": 100,
"index_condition": "t1.a < 3",
"mrr_type": "Rowid-ordered scan"
@@ -825,7 +825,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"attached_condition": "t2.b <> outer_t1.a"
}
@@ -876,7 +876,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "1Kb",
"join_type": "BNL",
"attached_condition": "tbl2.b = tbl1.b"
}
@@ -1029,6 +1029,7 @@ create index idx_t1_1 on t1 (a1,a2,b,c);
create index idx_t1_2 on t1 (a1,a2,b);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
explain select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
id select_type table type possible_keys key key_len ref rows Extra
@@ -1491,7 +1492,7 @@ EXPLAIN
"index_condition_bka": "t4.b + 1 <= t3.b + 1"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "400",
"join_type": "BKA",
"mrr_type": "Rowid-ordered scan"
}
@@ -1534,7 +1535,7 @@ ANALYZE
"index_condition_bka": "t4.b + 1 <= t3.b + 1"
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "400",
"join_type": "BKA",
"mrr_type": "Rowid-ordered scan",
"r_filtered": 100
diff --git a/mysql-test/main/explain_non_select.result b/mysql-test/main/explain_non_select.result
index 5a6b9f841c2..51414d0c196 100644
--- a/mysql-test/main/explain_non_select.result
+++ b/mysql-test/main/explain_non_select.result
@@ -229,7 +229,7 @@ INSERT INTO t1 VALUES (1),(2);
EXPLAIN UPDATE v1, mysql.user SET v1.a = v1.a + 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
-1 SIMPLE user index NULL PRIMARY 420 NULL 4 Using index
+1 SIMPLE global_priv index NULL PRIMARY 420 NULL 4 Using index
DROP TABLE t1;
DROP VIEW v1;
#
diff --git a/mysql-test/main/failed_auth_3909.result b/mysql-test/main/failed_auth_3909.result
index d0fd2c41221..19951415585 100644
--- a/mysql-test/main/failed_auth_3909.result
+++ b/mysql-test/main/failed_auth_3909.result
@@ -1,24 +1,17 @@
-optimize table mysql.user;
-Table Op Msg_type Msg_text
-mysql.user optimize status OK
-insert ignore mysql.user (user,plugin) values ('foo','bar'),('bar','bar'),('baz','bar');
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
-flush privileges;
+create user foo identified via mysql_old_password;
+create user bar identified via mysql_old_password;
+create user baz identified via mysql_old_password;
connect(localhost,u1,,test,MASTER_PORT,MASTER_SOCKET);
connect fail,localhost,u1;
-ERROR HY000: Plugin 'bar' is not loaded
+ERROR 28000: Access denied for user 'u1'@'localhost' (using password: NO)
connect(localhost,u2,,test,MASTER_PORT,MASTER_SOCKET);
connect fail,localhost,u2;
-ERROR 28000: Access denied for user 'u2'@'localhost' (using password: NO)
+ERROR HY000: Server is running in --secure-auth mode, but 'u2'@'localhost' has a password in the old format; please change the password to the new format
connect(localhost,u2,password,test,MASTER_PORT,MASTER_SOCKET);
connect fail,localhost,u2,password;
-ERROR 28000: Access denied for user 'u2'@'localhost' (using password: YES)
-ERROR HY000: Plugin 'bar' is not loaded
-ERROR 28000: Access denied for user 'u2'@'localhost' (using password: NO)
-ERROR 28000: Access denied for user 'u2'@'localhost' (using password: YES)
-delete from mysql.user where plugin = 'bar';
+ERROR HY000: Server is running in --secure-auth mode, but 'u2'@'localhost' has a password in the old format; please change the password to the new format
+ERROR 28000: Access denied for user 'u1'@'localhost' (using password: NO)
+ERROR HY000: Server is running in --secure-auth mode, but 'u2'@'localhost' has a password in the old format; please change the password to the new format
+ERROR HY000: Server is running in --secure-auth mode, but 'u2'@'localhost' has a password in the old format; please change the password to the new format
+delete from mysql.user where plugin = 'mysql_old_password';
flush privileges;
diff --git a/mysql-test/main/failed_auth_3909.test b/mysql-test/main/failed_auth_3909.test
index f72460691ea..25e45e49a0e 100644
--- a/mysql-test/main/failed_auth_3909.test
+++ b/mysql-test/main/failed_auth_3909.test
@@ -6,32 +6,32 @@ source include/not_embedded.inc;
# verify that for some failed login attemps (with wrong user names)
# the server requests a plugin
#
-optimize table mysql.user;
-insert ignore mysql.user (user,plugin) values ('foo','bar'),('bar','bar'),('baz','bar');
-flush privileges;
+create user foo identified via mysql_old_password;
+create user bar identified via mysql_old_password;
+create user baz identified via mysql_old_password;
--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
---error ER_PLUGIN_IS_NOT_LOADED
+--error ER_ACCESS_DENIED_ERROR
connect (fail,localhost,u1);
--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
---error ER_ACCESS_DENIED_ERROR
+--error ER_SERVER_IS_IN_SECURE_AUTH_MODE
connect (fail,localhost,u2);
--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
---error ER_ACCESS_DENIED_ERROR
+--error ER_SERVER_IS_IN_SECURE_AUTH_MODE
connect (fail,localhost,u2,password);
---error ER_PLUGIN_IS_NOT_LOADED
+--error ER_ACCESS_DENIED_ERROR
change_user u1;
---error ER_ACCESS_DENIED_ERROR
+--error ER_SERVER_IS_IN_SECURE_AUTH_MODE
change_user u2;
---error ER_ACCESS_DENIED_ERROR
+--error ER_SERVER_IS_IN_SECURE_AUTH_MODE
change_user u2,password;
-delete from mysql.user where plugin = 'bar';
+delete from mysql.user where plugin = 'mysql_old_password';
flush privileges;
diff --git a/mysql-test/main/failed_auth_unixsocket.result b/mysql-test/main/failed_auth_unixsocket.result
index 680d3b48a33..98057c02b74 100644
--- a/mysql-test/main/failed_auth_unixsocket.result
+++ b/mysql-test/main/failed_auth_unixsocket.result
@@ -1,13 +1,7 @@
-update mysql.user set plugin='unix_socket';
+update mysql.global_priv set priv=json_insert(priv, '$.plugin', 'unix_socket');
flush privileges;
connect(localhost,USER,,test,MASTER_PORT,MASTER_SOCKET);
-connect fail,localhost,$USER;
-ERROR HY000: Plugin 'unix_socket' is not loaded
-ERROR HY000: Plugin 'unix_socket' is not loaded
-install plugin unix_socket soname 'auth_socket.so';
-connect(localhost,USER,,test,MASTER_PORT,MASTER_SOCKET);
ERROR 28000: Access denied for user 'USER'@'localhost'
ERROR 28000: Access denied for user 'USER'@'localhost'
-update mysql.user set plugin='';
+update mysql.global_priv set priv=json_compact(json_remove(priv, '$.plugin'));
flush privileges;
-uninstall plugin unix_socket;
diff --git a/mysql-test/main/failed_auth_unixsocket.test b/mysql-test/main/failed_auth_unixsocket.test
index f7345f44698..0dcc050a463 100644
--- a/mysql-test/main/failed_auth_unixsocket.test
+++ b/mysql-test/main/failed_auth_unixsocket.test
@@ -4,33 +4,23 @@
# MDEV-3909 remote user enumeration
# unix_socket tests
#
-update mysql.user set plugin='unix_socket';
+update mysql.global_priv set priv=json_insert(priv, '$.plugin', 'unix_socket');
flush privileges;
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT $USER USER
---error ER_PLUGIN_IS_NOT_LOADED
-connect (fail,localhost,$USER);
-
---error ER_PLUGIN_IS_NOT_LOADED
-change_user $USER;
-
-eval install plugin unix_socket soname '$AUTH_SOCKET_SO';
-
# Make sure that the replace works, even if $USER is 'user' or something else
# that matches other parts of the error message.
+let $replace=Access denied for user '$USER';
+
--echo connect(localhost,USER,,test,MASTER_PORT,MASTER_SOCKET);
---let $replace=Access denied for user '$USER'
---replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT $replace "Access denied for user 'USER'"
+--replace_result $replace "Access denied for user 'USER'"
--disable_query_log
--error ER_ACCESS_DENIED_NO_PASSWORD_ERROR
connect (fail,localhost,$USER);
--enable_query_log
---replace_result $replace "Access denied for user 'USER'"
+--replace_result $replace "Access denied for user 'USER'"
--error ER_ACCESS_DENIED_NO_PASSWORD_ERROR
change_user $USER;
-update mysql.user set plugin='';
+update mysql.global_priv set priv=json_compact(json_remove(priv, '$.plugin'));
flush privileges;
-uninstall plugin unix_socket;
-
diff --git a/mysql-test/main/features.result b/mysql-test/main/features.result
index f31a6672549..beab7fb946f 100644
--- a/mysql-test/main/features.result
+++ b/mysql-test/main/features.result
@@ -3,6 +3,7 @@ set sql_mode="";
flush status;
show status like "feature%";
Variable_name Value
+Feature_application_time_periods 0
Feature_check_constraint 0
Feature_custom_aggregate_functions 0
Feature_delay_key_write 0
diff --git a/mysql-test/main/flush.result b/mysql-test/main/flush.result
index af8e327657b..8149ce29dec 100644
--- a/mysql-test/main/flush.result
+++ b/mysql-test/main/flush.result
@@ -364,16 +364,19 @@ flush table t1;
connection default;
# Let flush table sync in.
select * from t1;
+a
connection con1;
select * from t1;
a
unlock tables;
+connection default;
+select count(*) from information_schema.processlist where state = "Waiting for table metadata lock";
+count(*)
+1
+commit;
connection con2;
# Reaping 'flush table t1'...
connection default;
-# Reaping 'select * from t1'...
-a
-commit;
#
# Repeat the same test but with FLUSH TABLES
#
@@ -386,13 +389,10 @@ connection con1;
#
lock table t1 read;
connection con2;
-#
-# FLUSH TABLES expels the table definition from the cache.
-# Sending 'flush tables'...
flush tables;
connection default;
-# Let flush table sync in.
select * from t1;
+a
connection con1;
select * from t1;
a
@@ -400,8 +400,6 @@ unlock tables;
connection con2;
# Reaping 'flush tables'...
connection default;
-# Reaping 'select * from t1'...
-a
commit;
# Cleanup
connection con1;
@@ -472,10 +470,7 @@ create table t1 (i int);
create table t2 (i int);
handler t1 open;
connection con1;
-# Sending:
flush tables with read lock;
-connection con2;
-# Wait until FTWRL starts waiting for 't1' to be closed.
connection default;
# The below statement should not cause deadlock.
# Sending:
@@ -483,8 +478,6 @@ insert into t2 values (1);
connection con2;
# Wait until INSERT starts to wait for FTWRL to go away.
connection con1;
-# FTWRL should be able to continue now.
-# Reap FTWRL.
unlock tables;
connection default;
# Reap INSERT.
diff --git a/mysql-test/main/flush.test b/mysql-test/main/flush.test
index 51b5c48c137..17f9241a122 100644
--- a/mysql-test/main/flush.test
+++ b/mysql-test/main/flush.test
@@ -449,24 +449,20 @@ connection default;
--echo # Let flush table sync in.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush"
+ where state = "Waiting for table metadata lock"
and info = "flush table t1";
--source include/wait_condition.inc
-send select * from t1;
+select * from t1;
connection con1;
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush"
- and info = "select * from t1";
select * from t1;
unlock tables;
+connection default;
+select count(*) from information_schema.processlist where state = "Waiting for table metadata lock";
+commit;
connection con2;
--echo # Reaping 'flush table t1'...
reap;
connection default;
---echo # Reaping 'select * from t1'...
-reap;
-commit;
--echo #
--echo # Repeat the same test but with FLUSH TABLES
@@ -480,31 +476,16 @@ connection con1;
--echo #
lock table t1 read;
connection con2;
---echo #
---echo # FLUSH TABLES expels the table definition from the cache.
---echo # Sending 'flush tables'...
send flush tables;
connection default;
---echo # Let flush table sync in.
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush"
- and info = "flush tables";
---source include/wait_condition.inc
-send select * from t1;
+select * from t1;
connection con1;
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush"
- and info = "select * from t1";
select * from t1;
unlock tables;
connection con2;
--echo # Reaping 'flush tables'...
reap;
connection default;
---echo # Reaping 'select * from t1'...
-reap;
commit;
--echo # Cleanup
@@ -566,17 +547,7 @@ create table t2 (i int);
handler t1 open;
connection con1;
---echo # Sending:
---send flush tables with read lock
-
-connection con2;
---echo # Wait until FTWRL starts waiting for 't1' to be closed.
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush"
- and info = "flush tables with read lock";
---source include/wait_condition.inc
-
+flush tables with read lock;
connection default;
--echo # The below statement should not cause deadlock.
--echo # Sending:
@@ -586,14 +557,11 @@ connection con2;
--echo # Wait until INSERT starts to wait for FTWRL to go away.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock"
+ where state = "Waiting for backup lock"
and info = "insert into t2 values (1)";
--source include/wait_condition.inc
connection con1;
---echo # FTWRL should be able to continue now.
---echo # Reap FTWRL.
---reap
unlock tables;
connection default;
diff --git a/mysql-test/main/flush_block_commit.test b/mysql-test/main/flush_block_commit.test
index 6a6120ce63f..0280aedf2ca 100644
--- a/mysql-test/main/flush_block_commit.test
+++ b/mysql-test/main/flush_block_commit.test
@@ -32,7 +32,7 @@ connection con2;
--echo # Wait until COMMIT gets blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and info = "COMMIT";
+ where state = "Waiting for backup lock" and info = "COMMIT";
--source include/wait_condition.inc
--echo # Verify that 'con1' was blocked and data did not move.
SELECT * FROM t1;
diff --git a/mysql-test/main/flush_block_commit_notembedded.test b/mysql-test/main/flush_block_commit_notembedded.test
index 3d894c5f16c..5be9e50e58b 100644
--- a/mysql-test/main/flush_block_commit_notembedded.test
+++ b/mysql-test/main/flush_block_commit_notembedded.test
@@ -46,7 +46,7 @@ begin;
connection con1;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "insert into t1 values (1)";
--source include/wait_condition.inc
unlock tables;
diff --git a/mysql-test/main/flush_read_lock.result b/mysql-test/main/flush_read_lock.result
index 55c31ae8d12..5e836dd0544 100644
--- a/mysql-test/main/flush_read_lock.result
+++ b/mysql-test/main/flush_read_lock.result
@@ -159,11 +159,10 @@ Success: FTWRL is blocked when 'alter event e1 comment 'test'' is active in anot
#
# 2) ANALYZE TABLE statement is compatible with FTWRL.
# See Bug#43336 ANALYZE and OPTIMIZE do not honour
-# --read-only for a discussion why.
+# --read-only as they update status tables.
#
-Success: Was able to run 'analyze table t1_base' under FTWRL.
-Success: Was able to run 'analyze table t1_base' with FTWRL active in another connection.
-Success: Was able to run FTWRL while 'analyze table t1_base' was active in another connection.
+Success: Was not able to run 'analyze table t1_base' under FTWRL.
+Success: 'analyze table t1_base' is blocked by FTWRL active in another connection.
#
# 3) BEGIN, ROLLBACK and COMMIT statements.
# BEGIN and ROLLBACK are compatible with FTWRL.
@@ -652,6 +651,7 @@ connection default;
# 14.2) FLUSH TABLES <list> WITH READ LOCK is not blocked by
# active FTWRL. But since the latter keeps tables open
# FTWRL is blocked by FLUSH TABLES <list> WITH READ LOCK.
+# Fixed by MDEV-5336
flush tables with read lock;
# FT <list> WRL is allowed under FTWRL at the moment.
# It does not make much sense though.
@@ -668,12 +668,9 @@ connection default;
flush tables t1_base, t2_base with read lock;
connection con1;
flush tables with read lock;
-connection con2;
-# Wait until FTWRL is blocked.
connection default;
unlock tables;
connection con1;
-# Reap FTWRL.
unlock tables;
connection default;
#
@@ -1411,10 +1408,8 @@ set autocommit= 1;
# 39.1.a) ANALYZE TABLE for transactional table is incompatible with
# FTWRL.
flush tables with read lock;
-# Implicit commits are allowed under FTWRL.
analyze table t3_trans;
-Table Op Msg_type Msg_text
-test.t3_trans analyze status OK
+ERROR HY000: Can't execute the query because you have a conflicting read lock
unlock tables;
#
connection con1;
@@ -1427,6 +1422,7 @@ unlock tables;
connection default;
# Reap ANALYZE TABLE
Table Op Msg_type Msg_text
+test.t3_trans analyze status Engine-independent statistics collected
test.t3_trans analyze status OK
#
# 39.1.b) CHECK TABLE for transactional table is compatible with FTWRL.
@@ -1462,7 +1458,7 @@ Success: Was able to run 'repair table t3_temp_trans' with FTWRL active in anoth
Success: Was able to run FTWRL while 'repair table t3_temp_trans' was active in another connection.
#
# And ANALYZE TABLE:
-Success: Was able to run 'analyze table t3_temp_trans' under FTWRL.
+Error: Wasn't able to run 'analyze table t3_temp_trans' under FTWRL!
Success: Was able to run 'analyze table t3_temp_trans' with FTWRL active in another connection.
Success: Was able to run FTWRL while 'analyze table t3_temp_trans' was active in another connection.
#
@@ -1677,3 +1673,57 @@ disconnect con1;
disconnect con2;
disconnect con3;
set global sql_mode=default;
+#
+# Deadlock between FTWRL under open handler and DDL/LOCK TABLES
+#
+CREATE TABLE t1(a INT);
+HANDLER t1 OPEN;
+#
+connect con1,localhost,root,,;
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready';
+LOCK TABLE t1 WRITE;
+#
+# we need to do it in a separate connection,
+# because SET DEBUG_SYNC call open_tables()/mysql_ha_flush() :(
+connect con2,localhost,root,,;
+SET DEBUG_SYNC= 'now WAIT_FOR ready';
+disconnect con2;
+#
+connection default;
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+HANDLER t1 CLOSE;
+#
+connection con1;
+UNLOCK TABLES;
+disconnect con1;
+#
+connection default;
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
+#
+# Make sure pending LOCK TABLES doesn't block FTWRL
+#
+CREATE TABLE t1(a INT);
+LOCK TABLE t1 READ;
+#
+connect con1,localhost,root,,;
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready';
+LOCK TABLE t1 WRITE;
+#
+connect con2,localhost,root,,;
+SET DEBUG_SYNC= 'now WAIT_FOR ready';
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+disconnect con2;
+#
+connection default;
+UNLOCK TABLES;
+#
+connection con1;
+UNLOCK TABLES;
+disconnect con1;
+#
+connection default;
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
diff --git a/mysql-test/main/flush_read_lock.test b/mysql-test/main/flush_read_lock.test
index 4a9752ae9f1..786c1747c20 100644
--- a/mysql-test/main/flush_read_lock.test
+++ b/mysql-test/main/flush_read_lock.test
@@ -199,11 +199,13 @@ let $cleanup_stmt1= alter event e1 comment '';
--echo #
--echo # 2) ANALYZE TABLE statement is compatible with FTWRL.
--echo # See Bug#43336 ANALYZE and OPTIMIZE do not honour
---echo # --read-only for a discussion why.
+--echo # --read-only as they update status tables.
--echo #
+let $skip_3rd_check= 1;
let $statement= analyze table t1_base;
let $cleanup_stmt= ;
---source include/check_ftwrl_compatible.inc
+--source include/check_ftwrl_incompatible.inc
+let $skip_3rd_check= ;
--echo #
--echo # 3) BEGIN, ROLLBACK and COMMIT statements.
@@ -259,7 +261,7 @@ connection $con_aux1;
--echo # Wait until COMMIT is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "commit";
--source include/wait_condition.inc
unlock tables;
@@ -281,7 +283,7 @@ connection $con_aux2;
--echo # Wait until FTWRL is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
set debug_sync='now SIGNAL go';
@@ -565,7 +567,7 @@ connection $con_aux1;
--echo # Check that EXECUTE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "insert into t1_base values (1)";
--source include/wait_condition.inc
unlock tables;
@@ -582,7 +584,7 @@ connection $con_aux2;
--echo # Wait until FTWRL is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
set debug_sync='now SIGNAL go';
@@ -800,6 +802,8 @@ connection default;
--echo # 14.2) FLUSH TABLES <list> WITH READ LOCK is not blocked by
--echo # active FTWRL. But since the latter keeps tables open
--echo # FTWRL is blocked by FLUSH TABLES <list> WITH READ LOCK.
+--echo # Fixed by MDEV-5336
+
flush tables with read lock;
--echo # FT <list> WRL is allowed under FTWRL at the moment.
--echo # It does not make much sense though.
@@ -815,19 +819,10 @@ unlock tables;
connection default;
flush tables t1_base, t2_base with read lock;
connection $con_aux1;
---send flush tables with read lock
-connection $con_aux2;
---echo # Wait until FTWRL is blocked.
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush" and
- info = "flush tables with read lock";
---source include/wait_condition.inc
+flush tables with read lock;
connection default;
unlock tables;
connection $con_aux1;
---echo # Reap FTWRL.
---reap
unlock tables;
connection default;
@@ -1018,7 +1013,7 @@ connection $con_aux1;
--echo # Check that LOCK TABLES WRITE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "lock tables t1_base write";
--source include/wait_condition.inc
unlock tables;
@@ -1062,7 +1057,7 @@ connection $con_aux1;
--echo # Check that OPTIMIZE TABLE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "optimize table t1_base";
--source include/wait_condition.inc
unlock tables;
@@ -1228,7 +1223,7 @@ connection $con_aux1;
--echo # Check that REPAIR TABLE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "repair table t1_base";
--source include/wait_condition.inc
unlock tables;
@@ -1427,7 +1422,7 @@ connection $con_aux1;
--echo # Wait until SET AUTOCOMMIT=1 is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "set autocommit= 1";
--source include/wait_condition.inc
unlock tables;
@@ -1449,7 +1444,7 @@ connection $con_aux2;
--echo # Wait until FTWRL is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
set debug_sync='now SIGNAL go';
@@ -1628,7 +1623,7 @@ connection $con_aux1;
--echo # Wait until XA COMMIT is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "xa commit 'test1'";
--source include/wait_condition.inc
unlock tables;
@@ -1652,7 +1647,7 @@ connection $con_aux2;
--echo # Wait until FTWRL is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
set debug_sync='now SIGNAL go';
@@ -1719,7 +1714,7 @@ set autocommit= 1;
--echo # 39.1.a) ANALYZE TABLE for transactional table is incompatible with
--echo # FTWRL.
flush tables with read lock;
---echo # Implicit commits are allowed under FTWRL.
+--error ER_CANT_UPDATE_WITH_READLOCK
analyze table t3_trans;
unlock tables;
--echo #
@@ -1731,7 +1726,7 @@ connection $con_aux1;
--echo # Check that ANALYZE TABLE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "analyze table t3_trans";
--source include/wait_condition.inc
unlock tables;
@@ -1806,7 +1801,7 @@ connection $con_aux1;
--echo # Check that CHECK TABLE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "check table t1_base";
--source include/wait_condition.inc
unlock tables;
@@ -1824,7 +1819,7 @@ connection $con_aux1;
--echo # Check that ALTER TABLE is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
+ where state = "Waiting for backup lock" and
info = "alter table t1_temp add column c1 int";
--source include/wait_condition.inc
unlock tables;
@@ -1877,7 +1872,7 @@ connection $con_aux2;
--echo # Wait until FTWRL is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
--echo # Try to run another INSERT and see that it is blocked.
@@ -1886,7 +1881,7 @@ connection con3;
--echo # Wait until new INSERT is blocked.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "insert into t2_base values (1)";
--echo # Unblock INSERT in the first connection.
set debug_sync='now SIGNAL go';
@@ -2022,3 +2017,73 @@ set global sql_mode=default;
# Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc
+
+
+--echo #
+--echo # Deadlock between FTWRL under open handler and DDL/LOCK TABLES
+--echo #
+CREATE TABLE t1(a INT);
+HANDLER t1 OPEN;
+
+--echo #
+connect (con1,localhost,root,,);
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready';
+--send LOCK TABLE t1 WRITE
+
+--echo #
+--echo # we need to do it in a separate connection,
+--echo # because SET DEBUG_SYNC call open_tables()/mysql_ha_flush() :(
+connect (con2,localhost,root,,);
+SET DEBUG_SYNC= 'now WAIT_FOR ready';
+disconnect con2;
+
+--echo #
+connection default;
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+HANDLER t1 CLOSE;
+
+--echo #
+connection con1;
+reap;
+UNLOCK TABLES;
+disconnect con1;
+
+--echo #
+connection default;
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
+
+
+--echo #
+--echo # Make sure pending LOCK TABLES doesn't block FTWRL
+--echo #
+CREATE TABLE t1(a INT);
+LOCK TABLE t1 READ;
+
+--echo #
+connect (con1,localhost,root,,);
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL ready';
+--send LOCK TABLE t1 WRITE
+
+--echo #
+connect (con2,localhost,root,,);
+SET DEBUG_SYNC= 'now WAIT_FOR ready';
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+disconnect con2;
+
+--echo #
+connection default;
+UNLOCK TABLES;
+
+--echo #
+connection con1;
+reap;
+UNLOCK TABLES;
+disconnect con1;
+
+--echo #
+connection default;
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
diff --git a/mysql-test/main/flush_read_lock_kill.test b/mysql-test/main/flush_read_lock_kill.test
index d83e5b3f1df..bd3efd7bdc4 100644
--- a/mysql-test/main/flush_read_lock_kill.test
+++ b/mysql-test/main/flush_read_lock_kill.test
@@ -51,7 +51,7 @@ SELECT ((@id := kill_id) - kill_id) FROM t1 LIMIT 1;
--echo # to active COMMIT
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock"
+ where state = "Waiting for backup lock"
and info = "flush tables with read lock";
--source include/wait_condition.inc
diff --git a/mysql-test/main/flush_ssl.result b/mysql-test/main/flush_ssl.result
new file mode 100644
index 00000000000..ab2944d2bc5
--- /dev/null
+++ b/mysql-test/main/flush_ssl.result
@@ -0,0 +1,28 @@
+# Kill the server
+# restart: --ssl-key=MYSQLTEST_VARDIR/tmp/ssl_key.pem --ssl-cert=MYSQLTEST_VARDIR/tmp/ssl_cert.pem
+connect ssl_con,localhost,root,,,,,SSL;
+SELECT VARIABLE_VALUE INTO @ssl_not_after FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_server_not_after';
+# Use a different certificate ("Not after" certificate field changed)
+FLUSH SSL;
+# Check new certificate used by new connection
+Result
+OK
+# Check that existing SSL connection still works, and uses old certificate, even if new one is loaded in FLUSH SSL
+connection ssl_con;
+SELECT IF(VARIABLE_VALUE=@ssl_not_after,'OK','FAIL') as Result FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_server_not_after';
+Result
+OK
+disconnect ssl_con;
+connection default;
+SELECT VARIABLE_NAME NAME, VARIABLE_VALUE VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME in ('Ssl_accepts', 'Ssl_finished_accepts');
+NAME VALUE
+SSL_ACCEPTS 1
+SSL_FINISHED_ACCEPTS 1
+FLUSH SSL;
+SELECT VARIABLE_NAME NAME, VARIABLE_VALUE VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME in ('Ssl_accepts', 'Ssl_finished_accepts');
+NAME VALUE
+SSL_ACCEPTS 0
+SSL_FINISHED_ACCEPTS 0
+# Cleanup
+# Kill the server
+# restart
diff --git a/mysql-test/main/flush_ssl.test b/mysql-test/main/flush_ssl.test
new file mode 100644
index 00000000000..e7bd57b156a
--- /dev/null
+++ b/mysql-test/main/flush_ssl.test
@@ -0,0 +1,61 @@
+# MDEV-16266 Reload SSL certificate
+# This test reloads server SSL certs FLUSH SSL, and checks that
+# 1. old SSL connections (that existed before FLUSH) still work and use old certificate
+# 2. new SSL connection use new certificate
+# 3. if FLUSH SSL runs into error, SSL is still functioning
+# SWtatus variable Ssl_server_not_after is used to tell the old certificate from new.
+
+
+source include/have_ssl_communication.inc;
+
+# Restart server with cert. files located in temp directory
+# We are going to remove / replace them within the test,
+# so we can't use the ones in std_data directly.
+
+let $ssl_cert=$MYSQLTEST_VARDIR/tmp/ssl_cert.pem;
+let $ssl_key=$MYSQLTEST_VARDIR/tmp/ssl_key.pem;
+
+copy_file $MYSQL_TEST_DIR/std_data/server-key.pem $ssl_key;
+copy_file $MYSQL_TEST_DIR/std_data/server-cert.pem $ssl_cert;
+
+let $restart_parameters=--ssl-key=$ssl_key --ssl-cert=$ssl_cert;
+--source include/kill_mysqld.inc
+--source include/start_mysqld.inc
+
+connect ssl_con,localhost,root,,,,,SSL;
+SELECT VARIABLE_VALUE INTO @ssl_not_after FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_server_not_after';
+let $ssl_not_after=`SELECT @ssl_not_after`;
+
+remove_file $ssl_cert;
+remove_file $ssl_key;
+
+--echo # Use a different certificate ("Not after" certificate field changed)
+copy_file $MYSQL_TEST_DIR/std_data/server-new-key.pem $ssl_key;
+copy_file $MYSQL_TEST_DIR/std_data/server-new-cert.pem $ssl_cert;
+
+FLUSH SSL;
+
+--echo # Check new certificate used by new connection
+exec $MYSQL --ssl -e "SELECT IF(VARIABLE_VALUE <> '$ssl_not_after', 'OK', 'FAIL') as Result FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_server_not_after'";
+
+--echo # Check that existing SSL connection still works, and uses old certificate, even if new one is loaded in FLUSH SSL
+connection ssl_con;
+SELECT IF(VARIABLE_VALUE=@ssl_not_after,'OK','FAIL') as Result FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME='Ssl_server_not_after';
+
+disconnect ssl_con;
+connection default;
+
+SELECT VARIABLE_NAME NAME, VARIABLE_VALUE VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME in ('Ssl_accepts', 'Ssl_finished_accepts');
+FLUSH SSL;
+#Check that accepts are zeroed by FLUSH SSL.
+SELECT VARIABLE_NAME NAME, VARIABLE_VALUE VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME in ('Ssl_accepts', 'Ssl_finished_accepts');
+
+--echo # Cleanup
+remove_file $ssl_cert;
+remove_file $ssl_key;
+# restart with usuall SSL
+let $restart_parameters=;
+--source include/kill_mysqld.inc
+--source include/start_mysqld.inc
+
+
diff --git a/mysql-test/main/fulltext_order_by.result b/mysql-test/main/fulltext_order_by.result
index c2f57c6f9c2..a350a55c75d 100644
--- a/mysql-test/main/fulltext_order_by.result
+++ b/mysql-test/main/fulltext_order_by.result
@@ -126,7 +126,7 @@ group by
a.text, b.id, b.betreff
order by
match(b.betreff) against ('+abc' in boolean mode) desc;
-ERROR 42000: Table 'b' from one of the SELECTs cannot be used in field list
+ERROR 42000: Table 'b' from one of the SELECTs cannot be used in ORDER clause
select a.text, b.id, b.betreff
from
t2 a inner join t3 b on a.id = b.forum inner join
@@ -142,7 +142,7 @@ where
match(c.beitrag) against ('+abc' in boolean mode)
order by
match(b.betreff) against ('+abc' in boolean mode) desc;
-ERROR 42000: Table 'b' from one of the SELECTs cannot be used in field list
+ERROR 42000: Table 'b' from one of the SELECTs cannot be used in ORDER clause
select a.text, b.id, b.betreff
from
t2 a inner join t3 b on a.id = b.forum inner join
diff --git a/mysql-test/main/func_analyse.result b/mysql-test/main/func_analyse.result
index 1e78e603bca..1cb9e3c9ad8 100644
--- a/mysql-test/main/func_analyse.result
+++ b/mysql-test/main/func_analyse.result
@@ -153,13 +153,19 @@ End of 5.1 tests
# Start of 10.2 tests
#
(SELECT 1 FROM DUAL PROCEDURE ANALYSE());
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE())' at line 1
+((SELECT 1 FROM DUAL PROCEDURE ANALYSE()));
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE()))' at line 1
+(SELECT 1 FROM DUAL) PROCEDURE ANALYSE();
Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype
1 1 1 1 1 0 0 1.0000 0.0000 ENUM('1') NOT NULL
-((SELECT 1 FROM DUAL PROCEDURE ANALYSE()));
+((SELECT 1 FROM DUAL)) PROCEDURE ANALYSE();
Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype
1 1 1 1 1 0 0 1.0000 0.0000 ENUM('1') NOT NULL
+create table t1 (a int);
SELECT * FROM t1 UNION SELECT * FROM t1 PROCEDURE analyse();
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE analyse()' at line 1
+ERROR 42000: Incorrect usage/placement of 'PROCEDURE'
+drop table t1;
#
# MDEV-10030 sql_yacc.yy: Split table_expression and remove PROCEDURE from create_select, select_paren_derived, select_derived2, query_specification
#
@@ -171,3 +177,48 @@ SELECT (SELECT 1 FROM t1 PROCEDURE ANALYSE()) FROM t2;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE()) FROM t2' at line 1
SELECT ((SELECT 1 FROM t1 PROCEDURE ANALYSE())) FROM t2;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE())) FROM t2' at line 1
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16309 Split ::create_tmp_field() into virtual methods in Item
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+BEGIN NOT ATOMIC
+DECLARE rec ROW(Field_name TEXT,
+Min_value TEXT,
+Max_value TEXT,
+Min_length TEXT,
+Max_length TEXT,
+Empties_or_zeros TEXT,
+Nulls TEXT,
+Avg_value_or_avg_length TEXT,
+Std TEXT,
+Optimal_fieldtype TEXT);
+DECLARE c CURSOR FOR SELECT * FROM t1 PROCEDURE analyse();
+OPEN c;
+FETCH c INTO rec;
+CLOSE c;
+SELECT rec.field_name,
+rec.Min_value, rec.Max_value,
+rec.Min_length, rec. Max_length,
+rec.Empties_or_zeros, rec.Nulls,
+rec.Avg_value_or_avg_length, rec.Std,
+rec.Optimal_fieldtype;
+END;
+$$
+rec.field_name test.t1.a
+rec.Min_value 1
+rec.Max_value 3
+rec.Min_length 1
+rec. Max_length 1
+rec.Empties_or_zeros 0
+rec.Nulls 0
+rec.Avg_value_or_avg_length 2.0000
+rec.Std 0.8165
+rec.Optimal_fieldtype ENUM('1','2','3') NOT NULL
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/func_analyse.test b/mysql-test/main/func_analyse.test
index d99f5c0fa9a..3c8be90d6e2 100644
--- a/mysql-test/main/func_analyse.test
+++ b/mysql-test/main/func_analyse.test
@@ -161,12 +161,17 @@ DROP TABLE t1, t2;
--echo #
--echo # Start of 10.2 tests
--echo #
+--error ER_PARSE_ERROR
(SELECT 1 FROM DUAL PROCEDURE ANALYSE());
+--error ER_PARSE_ERROR
((SELECT 1 FROM DUAL PROCEDURE ANALYSE()));
+(SELECT 1 FROM DUAL) PROCEDURE ANALYSE();
+((SELECT 1 FROM DUAL)) PROCEDURE ANALYSE();
-# TODO:
---error ER_PARSE_ERROR
+create table t1 (a int);
+--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 UNION SELECT * FROM t1 PROCEDURE analyse();
+drop table t1;
--echo #
--echo # MDEV-10030 sql_yacc.yy: Split table_expression and remove PROCEDURE from create_select, select_paren_derived, select_derived2, query_specification
@@ -181,3 +186,47 @@ SELECT * FROM t1 NATURAL JOIN (SELECT * FROM t2 PROCEDURE ANALYSE());
SELECT (SELECT 1 FROM t1 PROCEDURE ANALYSE()) FROM t2;
--error ER_PARSE_ERROR
SELECT ((SELECT 1 FROM t1 PROCEDURE ANALYSE())) FROM t2;
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16309 Split ::create_tmp_field() into virtual methods in Item
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+--vertical_results
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ DECLARE rec ROW(Field_name TEXT,
+ Min_value TEXT,
+ Max_value TEXT,
+ Min_length TEXT,
+ Max_length TEXT,
+ Empties_or_zeros TEXT,
+ Nulls TEXT,
+ Avg_value_or_avg_length TEXT,
+ Std TEXT,
+ Optimal_fieldtype TEXT);
+ DECLARE c CURSOR FOR SELECT * FROM t1 PROCEDURE analyse();
+ OPEN c;
+ FETCH c INTO rec;
+ CLOSE c;
+ SELECT rec.field_name,
+ rec.Min_value, rec.Max_value,
+ rec.Min_length, rec. Max_length,
+ rec.Empties_or_zeros, rec.Nulls,
+ rec.Avg_value_or_avg_length, rec.Std,
+ rec.Optimal_fieldtype;
+END;
+$$
+DELIMITER ;$$
+--horizontal_results
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/func_debug.result b/mysql-test/main/func_debug.result
index e2bf0ca8df3..47bbced730b 100644
--- a/mysql-test/main/func_debug.result
+++ b/mysql-test/main/func_debug.result
@@ -1565,12 +1565,16 @@ A NULL
Warnings:
Note 1105 DBUG: [0] arg=2 handler=0 (longblob)
Note 1105 DBUG: types_compatible=yes bisect=no
+Note 1105 DBUG: [0] arg=2 handler=0 (longblob)
+Note 1105 DBUG: types_compatible=yes bisect=no
SELECT a,NULL AS b FROM t1 GROUP BY a HAVING 'A' IN (a,b);
a b
A NULL
Warnings:
Note 1105 DBUG: [0] arg=1 handler=0 (longblob)
Note 1105 DBUG: types_compatible=yes bisect=no
+Note 1105 DBUG: [0] arg=1 handler=0 (longblob)
+Note 1105 DBUG: types_compatible=yes bisect=no
SELECT a,NULL AS b FROM t1 GROUP BY a HAVING 'A' IN (b,'A',10);
a b
A NULL
@@ -1594,6 +1598,9 @@ Warnings:
Note 1105 DBUG: [0] arg=2 handler=0 (longblob)
Note 1105 DBUG: [1] arg=3 handler=1 (double)
Note 1105 DBUG: types_compatible=no bisect=no
+Note 1105 DBUG: [0] arg=2 handler=0 (longblob)
+Note 1105 DBUG: [1] arg=3 handler=1 (double)
+Note 1105 DBUG: types_compatible=no bisect=no
Warning 1292 Truncated incorrect DOUBLE value: 'A'
SELECT a,NULL AS b FROM t1 GROUP BY a HAVING 'A' IN (a,b,10);
a b
@@ -1602,6 +1609,9 @@ Warnings:
Note 1105 DBUG: [0] arg=1 handler=0 (longblob)
Note 1105 DBUG: [1] arg=3 handler=1 (double)
Note 1105 DBUG: types_compatible=no bisect=no
+Note 1105 DBUG: [0] arg=1 handler=0 (longblob)
+Note 1105 DBUG: [1] arg=3 handler=1 (double)
+Note 1105 DBUG: types_compatible=no bisect=no
Warning 1292 Truncated incorrect DOUBLE value: 'A'
DROP TABLE t1;
#
@@ -1674,3 +1684,283 @@ Warnings:
Note 1105 DBUG: Item_subselect::exec (select max(`test`.`t1`.`a`) from `test`.`t1`)
DROP TABLE t1;
SET SESSION debug_dbug="-d,Item_subselect";
+#
+# MDEV-16408 Remove tests for Item::type() in Item_basic_value::eq()
+#
+SET SESSION debug_dbug="+d,Item_basic_value";
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT * FROM t1 WHERE a BETWEEN 1 AND 1.0;
+a
+1
+Warnings:
+Note 1105 bin_eq=0 a=(int)1 b=(decimal)1.0
+SELECT * FROM t1 WHERE a BETWEEN 1 AND 1;
+a
+1
+Warnings:
+Note 1105 bin_eq=1 a=(int)1 b=(int)1
+SELECT * FROM t1 WHERE a BETWEEN 0 AND 1;
+a
+1
+Warnings:
+Note 1105 bin_eq=0 a=(int)0 b=(int)1
+SELECT * FROM t1 WHERE a BETWEEN 0 AND -1;
+a
+Warnings:
+Note 1105 bin_eq=0 a=(int)0 b=(int)-1
+SELECT * FROM t1 WHERE a BETWEEN -1 AND -1;
+a
+Warnings:
+Note 1105 bin_eq=1 a=(int)-1 b=(int)-1
+SELECT * FROM t1 WHERE a BETWEEN -0000000000000001 AND -1;
+a
+Warnings:
+Note 1105 bin_eq=1 a=(bigint)-1 b=(int)-1
+SELECT * FROM t1 WHERE a BETWEEN -1 AND 18446744073709551615;
+a
+1
+2
+3
+Warnings:
+Note 1105 bin_eq=0 a=(int)-1 b=(bigint)18446744073709551615
+SELECT * FROM t1 WHERE a BETWEEN -1 AND 18446744073709551616;
+a
+1
+2
+3
+Warnings:
+Note 1105 bin_eq=0 a=(int)-1 b=(decimal)18446744073709551616
+SELECT * FROM t1 WHERE a BETWEEN 1e2 AND 100e0;
+a
+Warnings:
+Note 1105 bin_eq=1 a=(double)1e2 b=(double)100e0
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN 1 AND ?' USING 1;
+a
+1
+Warnings:
+Note 1105 bin_eq=1 a=(int)1 b=(int)1
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN -1 AND ?' USING 18446744073709551615;
+a
+1
+2
+3
+Warnings:
+Note 1105 bin_eq=0 a=(int)-1 b=(bigint)18446744073709551615
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN -1 AND ?' USING 18446744073709551616;
+a
+1
+2
+3
+Warnings:
+Note 1105 bin_eq=0 a=(int)-1 b=(decimal)18446744073709551616
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(10));
+INSERT INTO t1 VALUES ('0'),('1'),('2');
+SELECT * FROM t1 WHERE a BETWEEN '0' AND '0';
+a
+0
+Warnings:
+Note 1105 eq=1 a=(varchar)'0' b=(varchar)'0'
+SELECT * FROM t1 WHERE a BETWEEN '0' AND ' 0';
+a
+Warnings:
+Note 1105 eq=0 a=(varchar)'0' b=(varchar)' 0'
+SELECT * FROM t1 WHERE a BETWEEN '0' AND '0 ';
+a
+0
+Warnings:
+Note 1105 eq=1 a=(varchar)'0' b=(varchar)'0 '
+DROP TABLE t1;
+SET SESSION debug_dbug="-d,Item_basic_value";
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+#
+SET SESSION debug_dbug="+d,Item_basic_value";
+CREATE TABLE t1 (a DECIMAL(10,3));
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT * FROM t1 WHERE a BETWEEN 1.0 AND 1.0;
+a
+1.000
+Warnings:
+Note 1105 bin_eq=1 a=(decimal)1.0 b=(decimal)1.0
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN 1.0 AND ?' USING 1.0;
+a
+1.000
+Warnings:
+Note 1105 bin_eq=1 a=(decimal)1.0 b=(decimal)1.0
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME);
+INSERT INTO t1 VALUES ('00:00:00'),('00:00:01');
+SELECT * FROM t1 WHERE a BETWEEN TIME'00:00:00' AND TIME'00:00:00';
+a
+00:00:00
+Warnings:
+Note 1105 bin_eq=1 a=(time)TIME'00:00:00' b=(time)TIME'00:00:00'
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN TIME''00:00:00'' AND ?' USING TIME'00:00:00';
+a
+00:00:00
+Warnings:
+Note 1105 bin_eq=1 a=(time)TIME'00:00:00' b=(time)TIME'00:00:00'
+DROP TABLE t1;
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES ('2001-01-01'),('2001-01-02');
+SELECT * FROM t1 WHERE a BETWEEN DATE'2001-01-01' AND DATE'2001-01-01';
+a
+2001-01-01
+Warnings:
+Note 1105 bin_eq=1 a=(date)DATE'2001-01-01' b=(date)DATE'2001-01-01'
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN DATE''2001-01-01'' AND ?' USING DATE'2001-01-01';
+a
+2001-01-01
+Warnings:
+Note 1105 bin_eq=1 a=(date)DATE'2001-01-01' b=(date)DATE'2001-01-01'
+DROP TABLE t1;
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 VALUES ('2001-01-01 00:00:00'),('2001-01-01 00:00:00');
+SELECT * FROM t1 WHERE a BETWEEN TIMESTAMP'2001-01-01 00:00:00' AND TIMESTAMP'2001-01-01 00:00:00';
+a
+2001-01-01 00:00:00
+2001-01-01 00:00:00
+Warnings:
+Note 1105 bin_eq=1 a=(datetime)TIMESTAMP'2001-01-01 00:00:00' b=(datetime)TIMESTAMP'2001-01-01 00:00:00'
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN TIMESTAMP''2001-01-01 00:00:00'' AND ?' USING TIMESTAMP'2001-01-01 00:00:00';
+a
+2001-01-01 00:00:00
+2001-01-01 00:00:00
+Warnings:
+Note 1105 bin_eq=1 a=(datetime)TIMESTAMP'2001-01-01 00:00:00' b=(datetime)TIMESTAMP'2001-01-01 00:00:00'
+DROP TABLE t1;
+SET SESSION debug_dbug="-d,Item_basic_value";
+#
+# MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+#
+SET SESSION debug_dbug="+d,Item_basic_value";
+CREATE TABLE t1 (a VARCHAR(10));
+INSERT INTO t1 VALUES ('a'),('b'),('c');
+SELECT * FROM t1 WHERE a BETWEEN 'a' AND 0x61;
+a
+a
+Warnings:
+Note 1105 eq=0 a=(varchar)'a' b=(hex_hybrid)0x61
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN ''a'' AND ?' USING 0x61;
+a
+a
+Warnings:
+Note 1105 eq=0 a=(varchar)'a' b=(hex_hybrid)'a'
+DROP TABLE t1;
+SET SESSION debug_dbug="-d,Item_basic_value";
+#
+# MDEV-16454 Bad results for IN with ROW
+#
+SET SESSION debug_dbug="+d,cmp_item";
+SET SESSION debug_dbug="+d,Item_func_in";
+SET SESSION debug_dbug="+d,Predicant_to_list_comparator";
+SELECT (18446744073709551615,0) IN ((18446744073709551614,0),(-1,0));
+(18446744073709551615,0) IN ((18446744073709551614,0),(-1,0))
+0
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=bigint
+Note 1105 DBUG: [0,1] handler=bigint
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=decimal
+Note 1105 DBUG: [1,0] handler=int
+Note 1105 DBUG: [1,1] handler=int
+Note 1105 DBUG: [1,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: types_compatible=yes bisect=yes
+SELECT (1,(0,0)) IN ((1,(1,0)),(0,(0,0)));
+(1,(0,0)) IN ((1,(1,0)),(0,(0,0)))
+0
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [1,0] handler=row
+Note 1105 DBUG: [1,1] handler=row
+Note 1105 DBUG: [1,2] handler=row
+Note 1105 DBUG: => handler=row
+Note 1105 DBUG: ROW(3 args) level=1
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [1,0] handler=int
+Note 1105 DBUG: [1,1] handler=int
+Note 1105 DBUG: [1,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: types_compatible=yes bisect=yes
+SELECT (1,(0,0),3) IN ((1,(1,0),3),(0,(0,0),3));
+(1,(0,0),3) IN ((1,(1,0),3),(0,(0,0),3))
+0
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [1,0] handler=row
+Note 1105 DBUG: [1,1] handler=row
+Note 1105 DBUG: [1,2] handler=row
+Note 1105 DBUG: => handler=row
+Note 1105 DBUG: ROW(3 args) level=1
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [1,0] handler=int
+Note 1105 DBUG: [1,1] handler=int
+Note 1105 DBUG: [1,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [2,0] handler=int
+Note 1105 DBUG: [2,1] handler=int
+Note 1105 DBUG: [2,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: types_compatible=yes bisect=yes
+SELECT '0x' IN (0);
+'0x' IN (0)
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SELECT '0x' IN (0,1);
+'0x' IN (0,1)
+1
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (double)
+Note 1105 DBUG: [1] arg=2 handler=0 (double)
+Note 1105 DBUG: types_compatible=yes bisect=yes
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SELECT ('0x',1) IN ((0,1));
+('0x',1) IN ((0,1))
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SELECT ('0x',1) IN ((0,1),(1,1));
+('0x',1) IN ((0,1),(1,1))
+1
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=varchar
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=double
+Note 1105 DBUG: [1,0] handler=int
+Note 1105 DBUG: [1,1] handler=int
+Note 1105 DBUG: [1,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: types_compatible=yes bisect=yes
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SET SESSION debug_dbug="-d,Predicant_to_list_comparator";
+SET SESSION debug_dbug="-d,Item_func_in";
+SET SESSION debug_dbug="-d,cmp_item";
diff --git a/mysql-test/main/func_debug.test b/mysql-test/main/func_debug.test
index 8dd01650ed3..9ae24035e20 100644
--- a/mysql-test/main/func_debug.test
+++ b/mysql-test/main/func_debug.test
@@ -488,3 +488,105 @@ EXPLAIN SELECT * FROM t1 WHERE a IN (1,2,(SELECT MAX(a) FROM t1));
SELECT * FROM t1 WHERE a IN (1,2,(SELECT MAX(a) FROM t1));
DROP TABLE t1;
SET SESSION debug_dbug="-d,Item_subselect";
+
+
+--echo #
+--echo # MDEV-16408 Remove tests for Item::type() in Item_basic_value::eq()
+--echo #
+
+SET SESSION debug_dbug="+d,Item_basic_value";
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT * FROM t1 WHERE a BETWEEN 1 AND 1.0;
+SELECT * FROM t1 WHERE a BETWEEN 1 AND 1;
+SELECT * FROM t1 WHERE a BETWEEN 0 AND 1;
+SELECT * FROM t1 WHERE a BETWEEN 0 AND -1;
+SELECT * FROM t1 WHERE a BETWEEN -1 AND -1;
+SELECT * FROM t1 WHERE a BETWEEN -0000000000000001 AND -1;
+SELECT * FROM t1 WHERE a BETWEEN -1 AND 18446744073709551615;
+SELECT * FROM t1 WHERE a BETWEEN -1 AND 18446744073709551616;
+SELECT * FROM t1 WHERE a BETWEEN 1e2 AND 100e0;
+
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN 1 AND ?' USING 1;
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN -1 AND ?' USING 18446744073709551615;
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN -1 AND ?' USING 18446744073709551616;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(10));
+INSERT INTO t1 VALUES ('0'),('1'),('2');
+SELECT * FROM t1 WHERE a BETWEEN '0' AND '0';
+SELECT * FROM t1 WHERE a BETWEEN '0' AND ' 0';
+SELECT * FROM t1 WHERE a BETWEEN '0' AND '0 ';
+DROP TABLE t1;
+
+SET SESSION debug_dbug="-d,Item_basic_value";
+
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+--echo #
+
+SET SESSION debug_dbug="+d,Item_basic_value";
+
+CREATE TABLE t1 (a DECIMAL(10,3));
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT * FROM t1 WHERE a BETWEEN 1.0 AND 1.0;
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN 1.0 AND ?' USING 1.0;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME);
+INSERT INTO t1 VALUES ('00:00:00'),('00:00:01');
+SELECT * FROM t1 WHERE a BETWEEN TIME'00:00:00' AND TIME'00:00:00';
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN TIME''00:00:00'' AND ?' USING TIME'00:00:00';
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES ('2001-01-01'),('2001-01-02');
+SELECT * FROM t1 WHERE a BETWEEN DATE'2001-01-01' AND DATE'2001-01-01';
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN DATE''2001-01-01'' AND ?' USING DATE'2001-01-01';
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 VALUES ('2001-01-01 00:00:00'),('2001-01-01 00:00:00');
+SELECT * FROM t1 WHERE a BETWEEN TIMESTAMP'2001-01-01 00:00:00' AND TIMESTAMP'2001-01-01 00:00:00';
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN TIMESTAMP''2001-01-01 00:00:00'' AND ?' USING TIMESTAMP'2001-01-01 00:00:00';
+DROP TABLE t1;
+
+SET SESSION debug_dbug="-d,Item_basic_value";
+
+
+--echo #
+--echo # MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+--echo #
+
+SET SESSION debug_dbug="+d,Item_basic_value";
+
+CREATE TABLE t1 (a VARCHAR(10));
+INSERT INTO t1 VALUES ('a'),('b'),('c');
+SELECT * FROM t1 WHERE a BETWEEN 'a' AND 0x61;
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE a BETWEEN ''a'' AND ?' USING 0x61;
+DROP TABLE t1;
+
+SET SESSION debug_dbug="-d,Item_basic_value";
+
+
+--echo #
+--echo # MDEV-16454 Bad results for IN with ROW
+--echo #
+
+SET SESSION debug_dbug="+d,cmp_item";
+SET SESSION debug_dbug="+d,Item_func_in";
+SET SESSION debug_dbug="+d,Predicant_to_list_comparator";
+
+SELECT (18446744073709551615,0) IN ((18446744073709551614,0),(-1,0));
+SELECT (1,(0,0)) IN ((1,(1,0)),(0,(0,0)));
+SELECT (1,(0,0),3) IN ((1,(1,0),3),(0,(0,0),3));
+
+SELECT '0x' IN (0);
+SELECT '0x' IN (0,1);
+SELECT ('0x',1) IN ((0,1));
+SELECT ('0x',1) IN ((0,1),(1,1));
+
+SET SESSION debug_dbug="-d,Predicant_to_list_comparator";
+SET SESSION debug_dbug="-d,Item_func_in";
+SET SESSION debug_dbug="-d,cmp_item";
diff --git a/mysql-test/main/func_extract.result b/mysql-test/main/func_extract.result
new file mode 100644
index 00000000000..30ec0460d61
--- /dev/null
+++ b/mysql-test/main/func_extract.result
@@ -0,0 +1,592 @@
+#
+# MDEV-17385 MICROSECOND() returns confusing results with an out-of-range TIME-alike argument
+#
+CREATE TABLE t1 (v VARCHAR(64), ll BIGINT, t TIME, dt DATETIME, d DATE);
+CREATE TABLE t2 AS SELECT
+EXTRACT(DAY FROM t),
+EXTRACT(DAY_HOUR FROM t),
+EXTRACT(DAY_MINUTE FROM t),
+EXTRACT(DAY_SECOND FROM t),
+EXTRACT(DAY_MICROSECOND FROM t),
+EXTRACT(DAY FROM d),
+EXTRACT(DAY_HOUR FROM d),
+EXTRACT(DAY_MINUTE FROM d),
+EXTRACT(DAY_SECOND FROM d),
+EXTRACT(DAY_MICROSECOND FROM d),
+EXTRACT(DAY FROM v),
+EXTRACT(DAY_HOUR FROM v),
+EXTRACT(DAY_MINUTE FROM v),
+EXTRACT(DAY_SECOND FROM v),
+EXTRACT(DAY_MICROSECOND FROM v),
+EXTRACT(DAY FROM ll),
+EXTRACT(DAY_HOUR FROM ll),
+EXTRACT(DAY_MINUTE FROM ll),
+EXTRACT(DAY_SECOND FROM ll),
+EXTRACT(DAY_MICROSECOND FROM ll)
+FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `EXTRACT(DAY FROM t)` int(3) DEFAULT NULL,
+ `EXTRACT(DAY_HOUR FROM t)` int(5) DEFAULT NULL,
+ `EXTRACT(DAY_MINUTE FROM t)` int(7) DEFAULT NULL,
+ `EXTRACT(DAY_SECOND FROM t)` int(9) DEFAULT NULL,
+ `EXTRACT(DAY_MICROSECOND FROM t)` bigint(15) DEFAULT NULL,
+ `EXTRACT(DAY FROM d)` int(3) DEFAULT NULL,
+ `EXTRACT(DAY_HOUR FROM d)` int(5) DEFAULT NULL,
+ `EXTRACT(DAY_MINUTE FROM d)` int(7) DEFAULT NULL,
+ `EXTRACT(DAY_SECOND FROM d)` int(9) DEFAULT NULL,
+ `EXTRACT(DAY_MICROSECOND FROM d)` bigint(15) DEFAULT NULL,
+ `EXTRACT(DAY FROM v)` int(8) DEFAULT NULL,
+ `EXTRACT(DAY_HOUR FROM v)` int(10) DEFAULT NULL,
+ `EXTRACT(DAY_MINUTE FROM v)` bigint(12) DEFAULT NULL,
+ `EXTRACT(DAY_SECOND FROM v)` bigint(14) DEFAULT NULL,
+ `EXTRACT(DAY_MICROSECOND FROM v)` bigint(20) DEFAULT NULL,
+ `EXTRACT(DAY FROM ll)` int(8) DEFAULT NULL,
+ `EXTRACT(DAY_HOUR FROM ll)` int(10) DEFAULT NULL,
+ `EXTRACT(DAY_MINUTE FROM ll)` bigint(12) DEFAULT NULL,
+ `EXTRACT(DAY_SECOND FROM ll)` bigint(14) DEFAULT NULL,
+ `EXTRACT(DAY_MICROSECOND FROM ll)` bigint(20) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t2;
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64), b DECIMAL(32,9));
+INSERT INTO t1 VALUES
+('9999-12-31 23:59:59.123456', 99991231235959.123456),
+('2001-01-01 10:20:30.123456', 20010101102030.123456),
+('4294967296:59:59.123456', 42949672965959.123456),
+('4294967295:59:59.123456', 42949672955959.123456),
+('87649416:59:59.123456', 876494165959.123456),
+('87649415:59:59.123456', 876494155959.123456),
+('87649414:59:59.123456', 876494145959.123456),
+('9999:59:59.123456', 99995959.123456),
+('9999:01:01.123456', 99990101.123456),
+('9999:01:01', 99990101),
+('0.999999', 0.999999),
+('0.99999', 0.99999),
+('0.9999', 0.9999),
+('0.999', 0.999),
+('0.99', 0.99),
+('0.9', 0.9),
+('000000',0);
+# Summary:
+# Check that FUNC(varchar) and FUNC(decimal) give equal results
+# Expect empty sets
+SELECT a, b, EXTRACT(DAY_HOUR FROM a), EXTRACT(DAY_HOUR FROM b) FROM t1 WHERE NOT (EXTRACT(DAY_HOUR FROM a)<=>EXTRACT(DAY_HOUR FROM b));
+a b EXTRACT(DAY_HOUR FROM a) EXTRACT(DAY_HOUR FROM b)
+SELECT a, b, EXTRACT(DAY FROM a), EXTRACT(DAY FROM b) FROM t1 WHERE NOT (EXTRACT(DAY FROM a)<=>EXTRACT(DAY FROM b));
+a b EXTRACT(DAY FROM a) EXTRACT(DAY FROM b)
+SELECT a, b, EXTRACT(HOUR FROM a), EXTRACT(HOUR FROM b) FROM t1 WHERE NOT (EXTRACT(HOUR FROM a)<=>EXTRACT(HOUR FROM b));
+a b EXTRACT(HOUR FROM a) EXTRACT(HOUR FROM b)
+SELECT a, b, EXTRACT(MINUTE FROM a), EXTRACT(MINUTE FROM b) FROM t1 WHERE NOT (EXTRACT(MINUTE FROM a)<=>EXTRACT(MINUTE FROM b));
+a b EXTRACT(MINUTE FROM a) EXTRACT(MINUTE FROM b)
+SELECT a, b, EXTRACT(SECOND FROM a), EXTRACT(SECOND FROM b) FROM t1 WHERE NOT (EXTRACT(SECOND FROM a)<=>EXTRACT(SECOND FROM b));
+a b EXTRACT(SECOND FROM a) EXTRACT(SECOND FROM b)
+SELECT a, b, EXTRACT(MICROSECOND FROM a), EXTRACT(MICROSECOND FROM b) FROM t1 WHERE NOT (EXTRACT(MICROSECOND FROM a)<=>EXTRACT(MICROSECOND FROM b));
+a b EXTRACT(MICROSECOND FROM a) EXTRACT(MICROSECOND FROM b)
+# Detailed results
+SELECT
+a,
+CAST(a AS INTERVAL DAY_SECOND(6)) AS cidm,
+EXTRACT(DAY FROM a) * 24 + EXTRACT(HOUR FROM a) AS dh,
+EXTRACT(DAY_HOUR FROM a),
+EXTRACT(DAY FROM a),
+EXTRACT(HOUR FROM a),
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a)
+FROM t1;
+a cidm dh EXTRACT(DAY_HOUR FROM a) EXTRACT(DAY FROM a) EXTRACT(HOUR FROM a) EXTRACT(MINUTE FROM a) EXTRACT(SECOND FROM a) EXTRACT(MICROSECOND FROM a)
+9999-12-31 23:59:59.123456 NULL 767 3123 31 23 59 59 123456
+2001-01-01 10:20:30.123456 NULL 34 110 1 10 20 30 123456
+4294967296:59:59.123456 NULL NULL NULL NULL NULL NULL NULL NULL
+4294967295:59:59.123456 NULL NULL NULL NULL NULL NULL NULL NULL
+87649416:59:59.123456 NULL NULL NULL NULL NULL NULL NULL NULL
+87649415:59:59.123456 3652058 23:59:59.123456 87649415 365205823 3652058 23 59 59 123456
+87649414:59:59.123456 3652058 22:59:59.123456 87649414 365205822 3652058 22 59 59 123456
+9999:59:59.123456 416 15:59:59.123456 9999 41615 416 15 59 59 123456
+9999:01:01.123456 416 15:01:01.123456 9999 41615 416 15 1 1 123456
+9999:01:01 416 15:01:01.000000 9999 41615 416 15 1 1 0
+0.999999 00:00:00.999999 0 0 0 0 0 0 999999
+0.99999 00:00:00.999990 0 0 0 0 0 0 999990
+0.9999 00:00:00.999900 0 0 0 0 0 0 999900
+0.999 00:00:00.999000 0 0 0 0 0 0 999000
+0.99 00:00:00.990000 0 0 0 0 0 0 990000
+0.9 00:00:00.900000 0 0 0 0 0 0 900000
+000000 00:00:00.000000 0 0 0 0 0 0 0
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '9999-12-31 23:59:59.123456'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01 10:20:30.123456'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967296:59:59.123456'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect interval value: '4294967295:59:59.123456'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+Warning 1292 Incorrect interval value: '87649416:59:59.123456'
+SELECT
+b,
+CAST(b AS INTERVAL DAY_SECOND(6)) AS cidm,
+EXTRACT(DAY FROM b) * 24 + EXTRACT(HOUR FROM b) AS dh,
+EXTRACT(DAY_HOUR FROM b),
+EXTRACT(DAY FROM b),
+EXTRACT(HOUR FROM b),
+EXTRACT(MINUTE FROM b),
+EXTRACT(SECOND FROM b),
+EXTRACT(MICROSECOND FROM b)
+FROM t1;
+b cidm dh EXTRACT(DAY_HOUR FROM b) EXTRACT(DAY FROM b) EXTRACT(HOUR FROM b) EXTRACT(MINUTE FROM b) EXTRACT(SECOND FROM b) EXTRACT(MICROSECOND FROM b)
+99991231235959.123456000 NULL 767 3123 31 23 59 59 123456
+20010101102030.123456000 NULL 34 110 1 10 20 30 123456
+42949672965959.123456000 NULL NULL NULL NULL NULL NULL NULL NULL
+42949672955959.123456000 NULL NULL NULL NULL NULL NULL NULL NULL
+876494165959.123456000 NULL NULL NULL NULL NULL NULL NULL NULL
+876494155959.123456000 3652058 23:59:59.123456 87649415 365205823 3652058 23 59 59 123456
+876494145959.123456000 3652058 22:59:59.123456 87649414 365205822 3652058 22 59 59 123456
+99995959.123456000 416 15:59:59.123456 9999 41615 416 15 59 59 123456
+99990101.123456000 416 15:01:01.123456 9999 41615 416 15 1 1 123456
+99990101.000000000 416 15:01:01.000000 9999 41615 416 15 1 1 0
+0.999999000 00:00:00.999999 0 0 0 0 0 0 999999
+0.999990000 00:00:00.999990 0 0 0 0 0 0 999990
+0.999900000 00:00:00.999900 0 0 0 0 0 0 999900
+0.999000000 00:00:00.999000 0 0 0 0 0 0 999000
+0.990000000 00:00:00.990000 0 0 0 0 0 0 990000
+0.900000000 00:00:00.900000 0 0 0 0 0 0 900000
+0.000000000 00:00:00.000000 0 0 0 0 0 0 0
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '99991231235959.123456000'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '20010101102030.123456000'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '42949672965959.123456000'
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect interval value: '42949672965959.123456000' for column `test`.`t1`.`b` at row 3
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '42949672955959.123456000'
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect interval value: '42949672955959.123456000' for column `test`.`t1`.`b` at row 4
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '876494165959.123456000'
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Warning 1292 Incorrect interval value: '876494165959.123456000' for column `test`.`t1`.`b` at row 5
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '876494155959.123456000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '876494145959.123456000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '99995959.123456000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '99990101.123456000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '99990101.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.999999000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.999990000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.999900000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.999000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.990000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.900000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.000000000'
+DROP TABLE t1;
+# Special case: DAY + TIME
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES ('9999-01-01');
+SELECT a,
+EXTRACT(DAY_HOUR FROM a),
+EXTRACT(DAY_MINUTE FROM a),
+EXTRACT(DAY_SECOND FROM a),
+EXTRACT(DAY_MICROSECOND FROM a),
+EXTRACT(DAY FROM a),
+EXTRACT(HOUR FROM a),
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a)
+FROM t1;
+a EXTRACT(DAY_HOUR FROM a) EXTRACT(DAY_MINUTE FROM a) EXTRACT(DAY_SECOND FROM a) EXTRACT(DAY_MICROSECOND FROM a) EXTRACT(DAY FROM a) EXTRACT(HOUR FROM a) EXTRACT(MINUTE FROM a) EXTRACT(SECOND FROM a) EXTRACT(MICROSECOND FROM a)
+9999-01-01 100 10000 1000000 1000000000000 1 0 0 0 0
+DROP TABLE t1;
+# Bad values
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES ('');
+SELECT a,
+CAST(a AS INTERVAL DAY_SECOND(6)) AS cidm,
+EXTRACT(DAY_HOUR FROM a),
+EXTRACT(DAY_MINUTE FROM a),
+EXTRACT(DAY_SECOND FROM a),
+EXTRACT(DAY_MICROSECOND FROM a),
+EXTRACT(DAY FROM a),
+EXTRACT(HOUR FROM a),
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a)
+FROM t1;
+a cidm EXTRACT(DAY_HOUR FROM a) EXTRACT(DAY_MINUTE FROM a) EXTRACT(DAY_SECOND FROM a) EXTRACT(DAY_MICROSECOND FROM a) EXTRACT(DAY FROM a) EXTRACT(HOUR FROM a) EXTRACT(MINUTE FROM a) EXTRACT(SECOND FROM a) EXTRACT(MICROSECOND FROM a)
+ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+Warning 1292 Incorrect interval value: ''
+DROP TABLE t1;
+# Backward compatibility
+# This still parses as DATETIME
+SELECT EXTRACT(YEAR FROM '2001/02/03 10:20:30');
+EXTRACT(YEAR FROM '2001/02/03 10:20:30')
+2001
+SELECT EXTRACT(MONTH FROM '2001/02/03 10:20:30');
+EXTRACT(MONTH FROM '2001/02/03 10:20:30')
+2
+SELECT EXTRACT(DAY FROM '2001/02/03 10:20:30');
+EXTRACT(DAY FROM '2001/02/03 10:20:30')
+3
+SELECT EXTRACT(YEAR FROM '01/02/03 10:20:30');
+EXTRACT(YEAR FROM '01/02/03 10:20:30')
+2001
+SELECT EXTRACT(MONTH FROM '01/02/03 10:20:30');
+EXTRACT(MONTH FROM '01/02/03 10:20:30')
+2
+SELECT EXTRACT(DAY FROM '01/02/03 10:20:30');
+EXTRACT(DAY FROM '01/02/03 10:20:30')
+3
+SELECT EXTRACT(YEAR FROM '01:02:03 10:20:30');
+EXTRACT(YEAR FROM '01:02:03 10:20:30')
+2001
+SELECT EXTRACT(MONTH FROM '01:02:03 10:20:30');
+EXTRACT(MONTH FROM '01:02:03 10:20:30')
+2
+SELECT EXTRACT(DAY FROM '01:02:03 10:20:30');
+EXTRACT(DAY FROM '01:02:03 10:20:30')
+3
+# This still parses as DATETIME and returns NULL
+SELECT EXTRACT(YEAR FROM "2011-02-32 8:46:06.23434");
+EXTRACT(YEAR FROM "2011-02-32 8:46:06.23434")
+NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '2011-02-32 8:46:06.23434'
+SELECT EXTRACT(MONTH FROM "2011-02-32 8:46:06.23434");
+EXTRACT(MONTH FROM "2011-02-32 8:46:06.23434")
+NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '2011-02-32 8:46:06.23434'
+SELECT EXTRACT(DAY FROM "2011-02-32 8:46:06.23434");
+EXTRACT(DAY FROM "2011-02-32 8:46:06.23434")
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '2011-02-32 8:46:06.23434'
+SELECT EXTRACT(HOUR FROM "2011-02-32 8:46:06.23434");
+EXTRACT(HOUR FROM "2011-02-32 8:46:06.23434")
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '2011-02-32 8:46:06.23434'
+# This still parses as DATE
+SELECT EXTRACT(YEAR FROM '2001/02/03');
+EXTRACT(YEAR FROM '2001/02/03')
+2001
+SELECT EXTRACT(MONTH FROM '2001/02/03');
+EXTRACT(MONTH FROM '2001/02/03')
+2
+SELECT EXTRACT(DAY FROM '2001/02/03');
+EXTRACT(DAY FROM '2001/02/03')
+3
+SELECT EXTRACT(YEAR FROM '01/02/03');
+EXTRACT(YEAR FROM '01/02/03')
+2001
+SELECT EXTRACT(MONTH FROM '01/02/03');
+EXTRACT(MONTH FROM '01/02/03')
+2
+SELECT EXTRACT(DAY FROM '01/02/03');
+EXTRACT(DAY FROM '01/02/03')
+3
+SELECT EXTRACT(YEAR FROM '01-02-03');
+EXTRACT(YEAR FROM '01-02-03')
+2001
+SELECT EXTRACT(MONTH FROM '01-02-03');
+EXTRACT(MONTH FROM '01-02-03')
+2
+SELECT EXTRACT(DAY FROM '01-02-03');
+EXTRACT(DAY FROM '01-02-03')
+3
+SELECT EXTRACT(YEAR FROM '1-2-3');
+EXTRACT(YEAR FROM '1-2-3')
+1
+SELECT EXTRACT(MONTH FROM '1-2-3');
+EXTRACT(MONTH FROM '1-2-3')
+2
+SELECT EXTRACT(DAY FROM '1-2-3');
+EXTRACT(DAY FROM '1-2-3')
+3
+SELECT EXTRACT(HOUR FROM '1-2-3');
+EXTRACT(HOUR FROM '1-2-3')
+0
+SELECT EXTRACT(DAY FROM '2024-01-03 garbage /////');
+EXTRACT(DAY FROM '2024-01-03 garbage /////')
+3
+Warnings:
+Warning 1292 Truncated incorrect date value: '2024-01-03 garbage /////'
+SELECT EXTRACT(DAY FROM '24-01-03 garbage /////');
+EXTRACT(DAY FROM '24-01-03 garbage /////')
+3
+Warnings:
+Warning 1292 Truncated incorrect date value: '24-01-03 garbage /////'
+SELECT EXTRACT(DAY FROM '01-02-03');
+EXTRACT(DAY FROM '01-02-03')
+3
+SELECT EXTRACT(DAY FROM '24:02:03T');
+EXTRACT(DAY FROM '24:02:03T')
+3
+SELECT EXTRACT(DAY FROM '24-02-03');
+EXTRACT(DAY FROM '24-02-03')
+3
+SELECT EXTRACT(DAY FROM '24/02/03');
+EXTRACT(DAY FROM '24/02/03')
+3
+SELECT EXTRACT(DAY FROM '11111');
+EXTRACT(DAY FROM '11111')
+1
+SELECT TIME('2001-01-01T'), TIME('2001-01-01T ');
+TIME('2001-01-01T') TIME('2001-01-01T ')
+00:00:00 00:00:00
+SELECT TIME('2001/01/01T'), TIME('2001/01/01T ');
+TIME('2001/01/01T') TIME('2001/01/01T ')
+00:00:00 00:00:00
+SELECT TIME('2001:01:01T'), TIME('2001:01:01T ');
+TIME('2001:01:01T') TIME('2001:01:01T ')
+00:00:00 00:00:00
+SELECT EXTRACT(DAY FROM '2001-01-01T'), EXTRACT(DAY FROM '2001-01-01T ');
+EXTRACT(DAY FROM '2001-01-01T') EXTRACT(DAY FROM '2001-01-01T ')
+1 1
+SELECT EXTRACT(DAY FROM '2001/01/01T'), EXTRACT(DAY FROM '2001/01/01T ');
+EXTRACT(DAY FROM '2001/01/01T') EXTRACT(DAY FROM '2001/01/01T ')
+1 1
+SELECT EXTRACT(DAY FROM '2001:01:01T'), EXTRACT(DAY FROM '2001:01:01T ');
+EXTRACT(DAY FROM '2001:01:01T') EXTRACT(DAY FROM '2001:01:01T ')
+1 1
+SELECT TIME('2001:01:01T'), TIME('2001:01:01T ');
+TIME('2001:01:01T') TIME('2001:01:01T ')
+00:00:00 00:00:00
+SELECT EXTRACT(HOUR FROM '2001-01-01T'), EXTRACT(HOUR FROM '2001-01-01T ');
+EXTRACT(HOUR FROM '2001-01-01T') EXTRACT(HOUR FROM '2001-01-01T ')
+0 0
+SELECT EXTRACT(HOUR FROM '2001/01/01T'), EXTRACT(HOUR FROM '2001/01/01T ');
+EXTRACT(HOUR FROM '2001/01/01T') EXTRACT(HOUR FROM '2001/01/01T ')
+0 0
+SELECT EXTRACT(HOUR FROM '2001:01:01T'), EXTRACT(HOUR FROM '2001:01:01T ');
+EXTRACT(HOUR FROM '2001:01:01T') EXTRACT(HOUR FROM '2001:01:01T ')
+0 0
+# This still parses as DATE and returns NULL (without trying TIME)
+SELECT EXTRACT(DAY FROM '100000:02:03T');
+EXTRACT(DAY FROM '100000:02:03T')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '100000:02:03T'
+SELECT EXTRACT(DAY FROM '100000/02/03');
+EXTRACT(DAY FROM '100000/02/03')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '100000/02/03'
+SELECT EXTRACT(DAY FROM '100000-02-03');
+EXTRACT(DAY FROM '100000-02-03')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '100000-02-03'
+SELECT EXTRACT(DAY FROM '1111');
+EXTRACT(DAY FROM '1111')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '1111'
+SELECT EXTRACT(DAY FROM '111');
+EXTRACT(DAY FROM '111')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '111'
+SELECT EXTRACT(DAY FROM '11');
+EXTRACT(DAY FROM '11')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '11'
+SELECT EXTRACT(DAY FROM '1');
+EXTRACT(DAY FROM '1')
+NULL
+Warnings:
+Warning 1292 Incorrect interval value: '1'
+# This still parses as TIME
+SELECT EXTRACT(HOUR FROM '11111');
+EXTRACT(HOUR FROM '11111')
+1
+SELECT EXTRACT(HOUR FROM '1111');
+EXTRACT(HOUR FROM '1111')
+0
+SELECT EXTRACT(HOUR FROM '111');
+EXTRACT(HOUR FROM '111')
+0
+SELECT EXTRACT(HOUR FROM '11');
+EXTRACT(HOUR FROM '11')
+0
+SELECT EXTRACT(HOUR FROM '1');
+EXTRACT(HOUR FROM '1')
+0
+SELECT TIME('01:02:03:');
+TIME('01:02:03:')
+01:02:03
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03:'
+SELECT TIME('01:02:03-');
+TIME('01:02:03-')
+01:02:03
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03-'
+SELECT TIME('01:02:03;');
+TIME('01:02:03;')
+01:02:03
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03;'
+SELECT TIME('01:02:03/');
+TIME('01:02:03/')
+01:02:03
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03/'
+SELECT EXTRACT(HOUR FROM '01:02:03:');
+EXTRACT(HOUR FROM '01:02:03:')
+1
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03:'
+SELECT EXTRACT(HOUR FROM '01:02:03-');
+EXTRACT(HOUR FROM '01:02:03-')
+1
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03-'
+SELECT EXTRACT(HOUR FROM '01:02:03;');
+EXTRACT(HOUR FROM '01:02:03;')
+1
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03;'
+SELECT EXTRACT(HOUR FROM '01:02:03/');
+EXTRACT(HOUR FROM '01:02:03/')
+1
+Warnings:
+Warning 1292 Truncated incorrect time value: '01:02:03/'
+# Backward compatibility preserved for YEAR and MONTH only
+# (behavior has changed for DAY, see below)
+SELECT EXTRACT(YEAR FROM '01:02:03');
+EXTRACT(YEAR FROM '01:02:03')
+2001
+SELECT EXTRACT(MONTH FROM '01:02:03');
+EXTRACT(MONTH FROM '01:02:03')
+2
+SELECT EXTRACT(YEAR FROM '24:01:03 garbage /////');
+EXTRACT(YEAR FROM '24:01:03 garbage /////')
+2024
+Warnings:
+Warning 1292 Truncated incorrect date value: '24:01:03 garbage /////'
+SELECT EXTRACT(MONTH FROM '24:01:03 garbage /////');
+EXTRACT(MONTH FROM '24:01:03 garbage /////')
+1
+Warnings:
+Warning 1292 Truncated incorrect date value: '24:01:03 garbage /////'
+# This still parses as TIME 00:20:01
+SELECT TIME('2001/01/01');
+TIME('2001/01/01')
+00:20:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '2001/01/01'
+SELECT TIME('2001-01-01');
+TIME('2001-01-01')
+00:20:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '2001-01-01'
+# This still parses as TIME and overflows to '838:59:59'
+SELECT TIME('2001:01:01');
+TIME('2001:01:01')
+838:59:59
+Warnings:
+Warning 1292 Truncated incorrect time value: '2001:01:01'
+# This used to parse as DATE, now parses as TIME interval
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2024:01:03 garbage /////'),
+('24:01:03 garbage /////'),
+('01:01:03 garbage /////'),
+('2024:02:03'),
+('100000:02:03'),
+('24:02:03'),
+('01:02:03'),
+('01:02:03:'),
+('01:02:03-'),
+('01:02:03;'),
+('01:02:03/'),
+('20 10:20:30');
+SELECT
+EXTRACT(DAY FROM a),
+EXTRACT(DAY_SECOND FROM a), a,
+CAST(a AS INTERVAL DAY_SECOND(6)) AS cidm
+FROM t1;
+EXTRACT(DAY FROM a) EXTRACT(DAY_SECOND FROM a) a cidm
+84 84080103 2024:01:03 garbage ///// NULL
+1 1000103 24:01:03 garbage ///// NULL
+0 10103 01:01:03 garbage ///// NULL
+84 84080203 2024:02:03 84 08:02:03.000000
+4166 4166160203 100000:02:03 4166 16:02:03.000000
+1 1000203 24:02:03 1 00:02:03.000000
+0 10203 01:02:03 01:02:03.000000
+0 10203 01:02:03: 01:02:03.000000
+0 10203 01:02:03- NULL
+0 10203 01:02:03; 01:02:03.000000
+0 10203 01:02:03/ 01:02:03.000000
+20 20102030 20 10:20:30 20 10:20:30.000000
+Warnings:
+Warning 1292 Truncated incorrect time value: '2024:01:03 garbage /////'
+Warning 1292 Truncated incorrect time value: '2024:01:03 garbage /////'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2024:01:03 garbage /////'
+Warning 1292 Truncated incorrect time value: '24:01:03 garbage /////'
+Warning 1292 Truncated incorrect time value: '24:01:03 garbage /////'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '24:01:03 garbage /////'
+Warning 1292 Truncated incorrect time value: '01:01:03 garbage /////'
+Warning 1292 Truncated incorrect time value: '01:01:03 garbage /////'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '01:01:03 garbage /////'
+Warning 1292 Truncated incorrect time value: '01:02:03:'
+Warning 1292 Truncated incorrect time value: '01:02:03:'
+Warning 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '01:02:03:'
+Warning 1292 Truncated incorrect time value: '01:02:03-'
+Warning 1292 Truncated incorrect time value: '01:02:03-'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '01:02:03-'
+Warning 1292 Truncated incorrect time value: '01:02:03;'
+Warning 1292 Truncated incorrect time value: '01:02:03;'
+Warning 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '01:02:03;'
+Warning 1292 Truncated incorrect time value: '01:02:03/'
+Warning 1292 Truncated incorrect time value: '01:02:03/'
+Warning 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '01:02:03/'
+DROP TABLE t1;
diff --git a/mysql-test/main/func_extract.test b/mysql-test/main/func_extract.test
new file mode 100644
index 00000000000..edc99b9c00c
--- /dev/null
+++ b/mysql-test/main/func_extract.test
@@ -0,0 +1,257 @@
+--echo #
+--echo # MDEV-17385 MICROSECOND() returns confusing results with an out-of-range TIME-alike argument
+--echo #
+
+CREATE TABLE t1 (v VARCHAR(64), ll BIGINT, t TIME, dt DATETIME, d DATE);
+CREATE TABLE t2 AS SELECT
+ EXTRACT(DAY FROM t),
+ EXTRACT(DAY_HOUR FROM t),
+ EXTRACT(DAY_MINUTE FROM t),
+ EXTRACT(DAY_SECOND FROM t),
+ EXTRACT(DAY_MICROSECOND FROM t),
+ EXTRACT(DAY FROM d),
+ EXTRACT(DAY_HOUR FROM d),
+ EXTRACT(DAY_MINUTE FROM d),
+ EXTRACT(DAY_SECOND FROM d),
+ EXTRACT(DAY_MICROSECOND FROM d),
+ EXTRACT(DAY FROM v),
+ EXTRACT(DAY_HOUR FROM v),
+ EXTRACT(DAY_MINUTE FROM v),
+ EXTRACT(DAY_SECOND FROM v),
+ EXTRACT(DAY_MICROSECOND FROM v),
+ EXTRACT(DAY FROM ll),
+ EXTRACT(DAY_HOUR FROM ll),
+ EXTRACT(DAY_MINUTE FROM ll),
+ EXTRACT(DAY_SECOND FROM ll),
+ EXTRACT(DAY_MICROSECOND FROM ll)
+FROM t1;
+SHOW CREATE TABLE t2;
+DROP TABLE t2;
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (a VARCHAR(64), b DECIMAL(32,9));
+INSERT INTO t1 VALUES
+('9999-12-31 23:59:59.123456', 99991231235959.123456),
+('2001-01-01 10:20:30.123456', 20010101102030.123456),
+('4294967296:59:59.123456', 42949672965959.123456),
+('4294967295:59:59.123456', 42949672955959.123456),
+('87649416:59:59.123456', 876494165959.123456),
+('87649415:59:59.123456', 876494155959.123456),
+('87649414:59:59.123456', 876494145959.123456),
+('9999:59:59.123456', 99995959.123456),
+('9999:01:01.123456', 99990101.123456),
+('9999:01:01', 99990101),
+('0.999999', 0.999999),
+('0.99999', 0.99999),
+('0.9999', 0.9999),
+('0.999', 0.999),
+('0.99', 0.99),
+('0.9', 0.9),
+('000000',0);
+
+--echo # Summary:
+--echo # Check that FUNC(varchar) and FUNC(decimal) give equal results
+--echo # Expect empty sets
+--disable_warnings
+SELECT a, b, EXTRACT(DAY_HOUR FROM a), EXTRACT(DAY_HOUR FROM b) FROM t1 WHERE NOT (EXTRACT(DAY_HOUR FROM a)<=>EXTRACT(DAY_HOUR FROM b));
+SELECT a, b, EXTRACT(DAY FROM a), EXTRACT(DAY FROM b) FROM t1 WHERE NOT (EXTRACT(DAY FROM a)<=>EXTRACT(DAY FROM b));
+SELECT a, b, EXTRACT(HOUR FROM a), EXTRACT(HOUR FROM b) FROM t1 WHERE NOT (EXTRACT(HOUR FROM a)<=>EXTRACT(HOUR FROM b));
+SELECT a, b, EXTRACT(MINUTE FROM a), EXTRACT(MINUTE FROM b) FROM t1 WHERE NOT (EXTRACT(MINUTE FROM a)<=>EXTRACT(MINUTE FROM b));
+SELECT a, b, EXTRACT(SECOND FROM a), EXTRACT(SECOND FROM b) FROM t1 WHERE NOT (EXTRACT(SECOND FROM a)<=>EXTRACT(SECOND FROM b));
+SELECT a, b, EXTRACT(MICROSECOND FROM a), EXTRACT(MICROSECOND FROM b) FROM t1 WHERE NOT (EXTRACT(MICROSECOND FROM a)<=>EXTRACT(MICROSECOND FROM b));
+--enable_warnings
+
+--echo # Detailed results
+SELECT
+ a,
+ CAST(a AS INTERVAL DAY_SECOND(6)) AS cidm,
+ EXTRACT(DAY FROM a) * 24 + EXTRACT(HOUR FROM a) AS dh,
+ EXTRACT(DAY_HOUR FROM a),
+ EXTRACT(DAY FROM a),
+ EXTRACT(HOUR FROM a),
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a)
+FROM t1;
+SELECT
+ b,
+ CAST(b AS INTERVAL DAY_SECOND(6)) AS cidm,
+ EXTRACT(DAY FROM b) * 24 + EXTRACT(HOUR FROM b) AS dh,
+ EXTRACT(DAY_HOUR FROM b),
+ EXTRACT(DAY FROM b),
+ EXTRACT(HOUR FROM b),
+ EXTRACT(MINUTE FROM b),
+ EXTRACT(SECOND FROM b),
+ EXTRACT(MICROSECOND FROM b)
+FROM t1;
+DROP TABLE t1;
+
+--echo # Special case: DAY + TIME
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES ('9999-01-01');
+SELECT a,
+ EXTRACT(DAY_HOUR FROM a),
+ EXTRACT(DAY_MINUTE FROM a),
+ EXTRACT(DAY_SECOND FROM a),
+ EXTRACT(DAY_MICROSECOND FROM a),
+ EXTRACT(DAY FROM a),
+ EXTRACT(HOUR FROM a),
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a)
+FROM t1;
+DROP TABLE t1;
+
+--echo # Bad values
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES ('');
+SELECT a,
+ CAST(a AS INTERVAL DAY_SECOND(6)) AS cidm,
+ EXTRACT(DAY_HOUR FROM a),
+ EXTRACT(DAY_MINUTE FROM a),
+ EXTRACT(DAY_SECOND FROM a),
+ EXTRACT(DAY_MICROSECOND FROM a),
+ EXTRACT(DAY FROM a),
+ EXTRACT(HOUR FROM a),
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a)
+FROM t1;
+DROP TABLE t1;
+
+
+--echo # Backward compatibility
+
+--echo # This still parses as DATETIME
+SELECT EXTRACT(YEAR FROM '2001/02/03 10:20:30');
+SELECT EXTRACT(MONTH FROM '2001/02/03 10:20:30');
+SELECT EXTRACT(DAY FROM '2001/02/03 10:20:30');
+
+SELECT EXTRACT(YEAR FROM '01/02/03 10:20:30');
+SELECT EXTRACT(MONTH FROM '01/02/03 10:20:30');
+SELECT EXTRACT(DAY FROM '01/02/03 10:20:30');
+
+SELECT EXTRACT(YEAR FROM '01:02:03 10:20:30');
+SELECT EXTRACT(MONTH FROM '01:02:03 10:20:30');
+SELECT EXTRACT(DAY FROM '01:02:03 10:20:30');
+
+--echo # This still parses as DATETIME and returns NULL
+
+SELECT EXTRACT(YEAR FROM "2011-02-32 8:46:06.23434");
+SELECT EXTRACT(MONTH FROM "2011-02-32 8:46:06.23434");
+SELECT EXTRACT(DAY FROM "2011-02-32 8:46:06.23434");
+SELECT EXTRACT(HOUR FROM "2011-02-32 8:46:06.23434");
+
+--echo # This still parses as DATE
+
+SELECT EXTRACT(YEAR FROM '2001/02/03');
+SELECT EXTRACT(MONTH FROM '2001/02/03');
+SELECT EXTRACT(DAY FROM '2001/02/03');
+
+SELECT EXTRACT(YEAR FROM '01/02/03');
+SELECT EXTRACT(MONTH FROM '01/02/03');
+SELECT EXTRACT(DAY FROM '01/02/03');
+
+SELECT EXTRACT(YEAR FROM '01-02-03');
+SELECT EXTRACT(MONTH FROM '01-02-03');
+SELECT EXTRACT(DAY FROM '01-02-03');
+
+SELECT EXTRACT(YEAR FROM '1-2-3');
+SELECT EXTRACT(MONTH FROM '1-2-3');
+SELECT EXTRACT(DAY FROM '1-2-3');
+SELECT EXTRACT(HOUR FROM '1-2-3');
+
+SELECT EXTRACT(DAY FROM '2024-01-03 garbage /////');
+SELECT EXTRACT(DAY FROM '24-01-03 garbage /////');
+SELECT EXTRACT(DAY FROM '01-02-03');
+
+SELECT EXTRACT(DAY FROM '24:02:03T');
+SELECT EXTRACT(DAY FROM '24-02-03');
+SELECT EXTRACT(DAY FROM '24/02/03');
+
+SELECT EXTRACT(DAY FROM '11111');
+
+SELECT TIME('2001-01-01T'), TIME('2001-01-01T ');
+SELECT TIME('2001/01/01T'), TIME('2001/01/01T ');
+SELECT TIME('2001:01:01T'), TIME('2001:01:01T ');
+
+SELECT EXTRACT(DAY FROM '2001-01-01T'), EXTRACT(DAY FROM '2001-01-01T ');
+SELECT EXTRACT(DAY FROM '2001/01/01T'), EXTRACT(DAY FROM '2001/01/01T ');
+SELECT EXTRACT(DAY FROM '2001:01:01T'), EXTRACT(DAY FROM '2001:01:01T ');
+
+
+SELECT TIME('2001:01:01T'), TIME('2001:01:01T ');
+SELECT EXTRACT(HOUR FROM '2001-01-01T'), EXTRACT(HOUR FROM '2001-01-01T ');
+SELECT EXTRACT(HOUR FROM '2001/01/01T'), EXTRACT(HOUR FROM '2001/01/01T ');
+SELECT EXTRACT(HOUR FROM '2001:01:01T'), EXTRACT(HOUR FROM '2001:01:01T ');
+
+--echo # This still parses as DATE and returns NULL (without trying TIME)
+SELECT EXTRACT(DAY FROM '100000:02:03T');
+SELECT EXTRACT(DAY FROM '100000/02/03');
+SELECT EXTRACT(DAY FROM '100000-02-03');
+
+SELECT EXTRACT(DAY FROM '1111');
+SELECT EXTRACT(DAY FROM '111');
+SELECT EXTRACT(DAY FROM '11');
+SELECT EXTRACT(DAY FROM '1');
+
+
+--echo # This still parses as TIME
+
+SELECT EXTRACT(HOUR FROM '11111');
+SELECT EXTRACT(HOUR FROM '1111');
+SELECT EXTRACT(HOUR FROM '111');
+SELECT EXTRACT(HOUR FROM '11');
+SELECT EXTRACT(HOUR FROM '1');
+
+SELECT TIME('01:02:03:');
+SELECT TIME('01:02:03-');
+SELECT TIME('01:02:03;');
+SELECT TIME('01:02:03/');
+
+SELECT EXTRACT(HOUR FROM '01:02:03:');
+SELECT EXTRACT(HOUR FROM '01:02:03-');
+SELECT EXTRACT(HOUR FROM '01:02:03;');
+SELECT EXTRACT(HOUR FROM '01:02:03/');
+
+--echo # Backward compatibility preserved for YEAR and MONTH only
+--echo # (behavior has changed for DAY, see below)
+SELECT EXTRACT(YEAR FROM '01:02:03');
+SELECT EXTRACT(MONTH FROM '01:02:03');
+
+SELECT EXTRACT(YEAR FROM '24:01:03 garbage /////');
+SELECT EXTRACT(MONTH FROM '24:01:03 garbage /////');
+
+--echo # This still parses as TIME 00:20:01
+
+SELECT TIME('2001/01/01');
+SELECT TIME('2001-01-01');
+
+--echo # This still parses as TIME and overflows to '838:59:59'
+SELECT TIME('2001:01:01');
+
+
+--echo # This used to parse as DATE, now parses as TIME interval
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2024:01:03 garbage /////'),
+('24:01:03 garbage /////'),
+('01:01:03 garbage /////'),
+('2024:02:03'),
+('100000:02:03'),
+('24:02:03'),
+('01:02:03'),
+('01:02:03:'),
+('01:02:03-'),
+('01:02:03;'),
+('01:02:03/'),
+('20 10:20:30');
+
+SELECT
+ EXTRACT(DAY FROM a),
+ EXTRACT(DAY_SECOND FROM a), a,
+ CAST(a AS INTERVAL DAY_SECOND(6)) AS cidm
+FROM t1;
+DROP TABLE t1;
diff --git a/mysql-test/main/func_group.result b/mysql-test/main/func_group.result
index f65ad43cf71..c6daf2335a9 100644
--- a/mysql-test/main/func_group.result
+++ b/mysql-test/main/func_group.result
@@ -612,12 +612,12 @@ id select_type table type possible_keys key key_len ref rows Extra
explain
select max(a3) from t1 where a2 < 2 and a3 < 'SEA';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range k1 k1 3 NULL 6 Using where; Using index
+1 SIMPLE t1 range k1 k1 3 NULL 7 Using where; Using index
explain
-select max(t1.a3), min(t2.a2) from t1, t2 where t1.a2 = 2 and t1.a3 < 'MIN' and t2.a3 > 'CA';
+select max(t1.a3), min(t2.a2) from t1, t2 where t1.a2 = 2 and t1.a3 < 'DEN' and t2.a3 >= 'LA';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range k1 k1 7 NULL 1 Using where; Using index
-1 SIMPLE t2 range k1 k1 3 NULL 4 Using where; Using index; Using join buffer (flat, BNL join)
+1 SIMPLE t2 range k1 k1 3 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
explain
select min(a4 - 0.01) from t1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -653,7 +653,7 @@ id select_type table type possible_keys key key_len ref rows Extra
explain
select concat(min(t1.a1),min(t2.a4)) from t1, t2 where t2.a4 <> 'AME';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k2 k2 4 NULL 6 Using where; Using index
+1 SIMPLE t2 index k2 k2 4 NULL 7 Using where; Using index
1 SIMPLE t1 index NULL PRIMARY 3 NULL 15 Using index; Using join buffer (flat, BNL join)
drop table t1, t2;
create table t1 (a char(10));
@@ -1333,10 +1333,11 @@ SELECT MIN(a), MIN(b) FROM t1;
MIN(a) MIN(b)
NULL 1
CREATE TABLE t2( a INT, b INT, c INT, KEY(a, b) );
-INSERT INTO t2 ( a, b, c ) VALUES ( 1, NULL, 2 ), ( 1, 3, 4 ), ( 1, 4, 4 );
+INSERT INTO t2 ( a, b, c ) VALUES
+( 1, NULL, 2 ), ( 1, 3, 4 ), ( 1, 4, 4 ), ( 2, NULL, 2 ), ( 2, 3, 4 ), ( 2, 4, 4 );
EXPLAIN SELECT MIN(b), MIN(c) FROM t2 WHERE a = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref a a 5 const 2
+1 SIMPLE t2 ref a a 5 const 3
SELECT MIN(b), MIN(c) FROM t2 WHERE a = 1;
MIN(b) MIN(c)
3 2
@@ -1839,12 +1840,12 @@ End of 5.1 tests
# Bug #904345: MIN/MAX optimization with constant FALSE condition
#
CREATE TABLE t1 (a int NOT NULL, KEY(a));
-INSERT INTO t1 VALUES (10), (8), (11), (7), (15), (12), (9);
+INSERT INTO t1 VALUES (10), (8), (11), (7), (15), (12), (9), (13), (15), (17);
CREATE TABLE t2 (a int, b int);
INSERT INTO t2 VALUES
(8,2), (6,9), (8,4), (5,3), (9,1);
EXPLAIN EXTENDED
-SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT 3,4) AND a<10;
+SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT 3,4) AND a<8;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
@@ -1857,7 +1858,7 @@ EXPLAIN EXTENDED
SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT a,b FROM t2 WHERE b<5) and a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func,func 1 100.00
-1 PRIMARY t1 range a a 4 NULL 4 100.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 range a a 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
Warnings:
Note 1003 select max(`test`.`t1`.`a`) AS `MAX(a)` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a` = 1 and `test`.`t2`.`b` = 2 and `test`.`t1`.`a` < 10
@@ -1867,7 +1868,7 @@ NULL
EXPLAIN EXTENDED
SELECT MAX(a) FROM t1 WHERE RAND()*0<>0 AND a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range a a 4 NULL 4 100.00 Using where; Using index
+1 SIMPLE t1 range a a 4 NULL 3 100.00 Using where; Using index
Warnings:
Note 1003 select max(`test`.`t1`.`a`) AS `MAX(a)` from `test`.`t1` where rand() * 0 <> 0 and `test`.`t1`.`a` < 10
SELECT MAX(a) FROM t1 WHERE RAND()*0<>0 AND a<10;
diff --git a/mysql-test/main/func_group.test b/mysql-test/main/func_group.test
index d847f7cd5f7..bc2d6e9047d 100644
--- a/mysql-test/main/func_group.test
+++ b/mysql-test/main/func_group.test
@@ -363,7 +363,7 @@ select min(a1) from t1 where a1 != 'KKK';
explain
select max(a3) from t1 where a2 < 2 and a3 < 'SEA';
explain
-select max(t1.a3), min(t2.a2) from t1, t2 where t1.a2 = 2 and t1.a3 < 'MIN' and t2.a3 > 'CA';
+select max(t1.a3), min(t2.a2) from t1, t2 where t1.a2 = 2 and t1.a3 < 'DEN' and t2.a3 >= 'LA';
explain
select min(a4 - 0.01) from t1;
@@ -849,7 +849,8 @@ EXPLAIN SELECT MIN(a), MIN(b) FROM t1;
SELECT MIN(a), MIN(b) FROM t1;
CREATE TABLE t2( a INT, b INT, c INT, KEY(a, b) );
-INSERT INTO t2 ( a, b, c ) VALUES ( 1, NULL, 2 ), ( 1, 3, 4 ), ( 1, 4, 4 );
+INSERT INTO t2 ( a, b, c ) VALUES
+ ( 1, NULL, 2 ), ( 1, 3, 4 ), ( 1, 4, 4 ), ( 2, NULL, 2 ), ( 2, 3, 4 ), ( 2, 4, 4 );
EXPLAIN SELECT MIN(b), MIN(c) FROM t2 WHERE a = 1;
SELECT MIN(b), MIN(c) FROM t2 WHERE a = 1;
@@ -1168,14 +1169,14 @@ drop table t1;
--echo #
CREATE TABLE t1 (a int NOT NULL, KEY(a));
-INSERT INTO t1 VALUES (10), (8), (11), (7), (15), (12), (9);
+INSERT INTO t1 VALUES (10), (8), (11), (7), (15), (12), (9), (13), (15), (17);
CREATE TABLE t2 (a int, b int);
INSERT INTO t2 VALUES
(8,2), (6,9), (8,4), (5,3), (9,1);
EXPLAIN EXTENDED
-SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT 3,4) AND a<10;
+SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT 3,4) AND a<8;
SELECT MAX(a) FROM t1 WHERE (1,2) IN (SELECT 3,4) AND a<10;
EXPLAIN EXTENDED
diff --git a/mysql-test/main/func_group_innodb.result b/mysql-test/main/func_group_innodb.result
index e149997af4f..a4c9b574585 100644
--- a/mysql-test/main/func_group_innodb.result
+++ b/mysql-test/main/func_group_innodb.result
@@ -1,3 +1,8 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
insert into t1 values (1, 3);
select count(*) + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ + MAX_REQ - MAX_REQ from t1 group by MAX_REQ;
@@ -237,14 +242,16 @@ SET storage_engine=@old_engine;
CREATE TABLE t1(a BLOB, b VARCHAR(255) CHARSET LATIN1, c INT,
KEY(b, c, a(765))) ENGINE=INNODB;
INSERT INTO t1(a, b, c) VALUES
-('', 'a', 0), ('', 'a', null), ('', 'a', 0), ('', 'a', null), ('', 'a', 0);
+('', 'a', 0), ('', 'a', null), ('', 'a', 0), ('', 'a', null), ('', 'a', 0),
+('', 'a', 1), ('', 'a', 1), ('', 'a', 2), ('', 'a', 2), ('', 'a', 3),
+('', 'a', 3), ('', 'a', 4), ('', 'a', 4), ('', 'a', 5), ('', 'a', 5);
ANALYZE TABLE t1;
SELECT MIN(c) FROM t1 GROUP BY b;
MIN(c)
0
EXPLAIN SELECT MIN(c) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL b 263 NULL 3 Using index for group-by
+1 SIMPLE t1 range NULL b 263 NULL 2 Using index for group-by
DROP TABLE t1;
#
# MDEV-17589: Stack-buffer-overflow with indexed varchar (utf8) field
@@ -271,3 +278,6 @@ MIN(x.v1)
NULL
drop table t1;
End of 5.5 tests
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/func_group_innodb.test b/mysql-test/main/func_group_innodb.test
index c4914b97641..6141b4d85ed 100644
--- a/mysql-test/main/func_group_innodb.test
+++ b/mysql-test/main/func_group_innodb.test
@@ -4,6 +4,13 @@
--source include/have_innodb.inc
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
--disable_warnings
create table t1 (USR_ID integer not null, MAX_REQ integer not null, constraint PK_SEA_USER primary key (USR_ID)) engine=InnoDB;
--enable_warnings
@@ -181,7 +188,9 @@ SET storage_engine=@old_engine;
CREATE TABLE t1(a BLOB, b VARCHAR(255) CHARSET LATIN1, c INT,
KEY(b, c, a(765))) ENGINE=INNODB;
INSERT INTO t1(a, b, c) VALUES
-('', 'a', 0), ('', 'a', null), ('', 'a', 0), ('', 'a', null), ('', 'a', 0);
+('', 'a', 0), ('', 'a', null), ('', 'a', 0), ('', 'a', null), ('', 'a', 0),
+('', 'a', 1), ('', 'a', 1), ('', 'a', 2), ('', 'a', 2), ('', 'a', 3),
+('', 'a', 3), ('', 'a', 4), ('', 'a', 4), ('', 'a', 5), ('', 'a', 5);
-- disable_result_log
ANALYZE TABLE t1;
@@ -212,3 +221,7 @@ SELECT MIN(x.v1) FROM (SELECT t1.* FROM t1 WHERE t1.v1 >= 'p') x;
drop table t1;
--echo End of 5.5 tests
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/func_hybrid_type.result b/mysql-test/main/func_hybrid_type.result
index 109f7a35038..91f3949d456 100644
--- a/mysql-test/main/func_hybrid_type.result
+++ b/mysql-test/main/func_hybrid_type.result
@@ -3786,8 +3786,8 @@ LEAST(20010001,TIMESTAMP'2001-01-01 00:00:00') AS i4;
Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
def s1 10 10 0 Y 128 0 63
def s2 10 10 0 Y 128 0 63
-def s3 12 26 0 Y 128 6 63
-def s4 12 26 0 Y 128 6 63
+def s3 12 26 0 Y 128 0 63
+def s4 12 26 0 Y 128 0 63
def i1 10 10 0 Y 128 0 63
def i2 10 10 0 Y 128 0 63
def i3 12 19 0 Y 128 0 63
@@ -3830,8 +3830,8 @@ Table Create Table
t1 CREATE TABLE `t1` (
`s1` date DEFAULT NULL,
`s2` date DEFAULT NULL,
- `s3` datetime(6) DEFAULT NULL,
- `s4` datetime(6) DEFAULT NULL,
+ `s3` datetime DEFAULT NULL,
+ `s4` datetime DEFAULT NULL,
`i1` date DEFAULT NULL,
`i2` date DEFAULT NULL,
`i3` datetime DEFAULT NULL,
@@ -3870,10 +3870,10 @@ SELECT LEAST(999,TIME'10:20:30') AS c1;
c1
NULL
Warnings:
-Warning 1292 Incorrect datetime value: '999'
+Warning 1292 Incorrect time value: '999'
CREATE TABLE t1 AS SELECT LEAST(999,TIME'10:20:30') AS c1;
Warnings:
-Warning 1292 Incorrect datetime value: '999'
+Warning 1292 Incorrect time value: '999'
SELECT * FROM t1;
c1
NULL
@@ -3887,3 +3887,190 @@ SET sql_mode=DEFAULT;
#
# End of 10.3 tests
#
+#
+# MDEV-17325 NULL-ability problems with LEAST() in combination with NO_ZERO_DATE and NO_ZERO_IN_DATE
+#
+SET sql_mode='NO_ZERO_DATE,NO_ZERO_IN_DATE';
+SELECT
+LEAST('0000-00-00',DATE'2001-01-01') AS s1,
+LEAST('0001-00-01',DATE'2001-01-01') AS s2,
+LEAST('0000-00-00',TIMESTAMP'2001-01-01 00:00:00') AS s3,
+LEAST('0001-00-01',TIMESTAMP'2001-01-01 00:00:00') AS s4,
+LEAST(0,DATE'2001-01-01') AS i1,
+LEAST(20010001,DATE'2001-01-01') AS i2,
+LEAST(0,TIMESTAMP'2001-01-01 00:00:00') AS i3,
+LEAST(20010001,TIMESTAMP'2001-01-01 00:00:00') AS i4;
+Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr
+def s1 10 10 0 Y 128 0 63
+def s2 10 10 0 Y 128 0 63
+def s3 12 26 0 Y 128 0 63
+def s4 12 26 0 Y 128 0 63
+def i1 10 10 0 Y 128 0 63
+def i2 10 10 0 Y 128 0 63
+def i3 12 19 0 Y 128 0 63
+def i4 12 19 0 Y 128 0 63
+s1 s2 s3 s4 i1 i2 i3 i4
+NULL NULL NULL NULL NULL NULL NULL NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0001-00-01'
+Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00'
+Warning 1292 Incorrect datetime value: '0001-00-01 00:00:00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '2001-00-01'
+Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00'
+Warning 1292 Incorrect datetime value: '2001-00-01 00:00:00'
+SET sql_mode='NO_ZERO_DATE,NO_ZERO_IN_DATE';
+CREATE TABLE t1 AS SELECT
+LEAST('0000-00-00',DATE'2001-01-01') AS s1,
+LEAST('0001-00-01',DATE'2001-01-01') AS s2,
+LEAST('0000-00-00',TIMESTAMP'2001-01-01 00:00:00') AS s3,
+LEAST('0001-00-01',TIMESTAMP'2001-01-01 00:00:00') AS s4,
+LEAST(0,DATE'2001-01-01') AS i1,
+LEAST(20010001,DATE'2001-01-01') AS i2,
+LEAST(0,TIMESTAMP'2001-01-01 00:00:00') AS i3,
+LEAST(20010001,TIMESTAMP'2001-01-01 00:00:00') AS i4;
+Warnings:
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0001-00-01'
+Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00'
+Warning 1292 Incorrect datetime value: '0001-00-01 00:00:00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '2001-00-01'
+Warning 1292 Incorrect datetime value: '0000-00-00 00:00:00'
+Warning 1292 Incorrect datetime value: '2001-00-01 00:00:00'
+SELECT * FROM t1;
+s1 s2 s3 s4 i1 i2 i3 i4
+NULL NULL NULL NULL NULL NULL NULL NULL
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `s1` date DEFAULT NULL,
+ `s2` date DEFAULT NULL,
+ `s3` datetime DEFAULT NULL,
+ `s4` datetime DEFAULT NULL,
+ `i1` date DEFAULT NULL,
+ `i2` date DEFAULT NULL,
+ `i3` datetime DEFAULT NULL,
+ `i4` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30');
+CREATE TABLE t1 AS SELECT LEAST(CURRENT_DATE,CURRENT_TIME) AS c1;
+SELECT * FROM t1;
+c1
+2001-01-01 00:00:00
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` datetime NOT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET old_mode=ZERO_DATE_TIME_CAST;
+CREATE TABLE t1 AS SELECT LEAST(CURRENT_DATE,CURRENT_TIME) AS c1;
+Warnings:
+Warning 1292 Incorrect datetime value: '0000-00-00 10:20:30'
+SELECT * FROM t1;
+c1
+NULL
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` datetime DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET old_mode=DEFAULT;
+SET timestamp=DEFAULT;
+SET sql_mode=DEFAULT;
+SET sql_mode='';
+SELECT LEAST(999,TIME'10:20:30') AS c1;
+c1
+NULL
+Warnings:
+Warning 1292 Incorrect time value: '999'
+CREATE TABLE t1 AS SELECT LEAST(999,TIME'10:20:30') AS c1;
+Warnings:
+Warning 1292 Incorrect time value: '999'
+SELECT * FROM t1;
+c1
+NULL
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` time DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# MDEV-17318 CAST(LEAST(zero_date,non_zero_date) AS numeric_data_type) returns a wrong result
+#
+SET sql_mode='NO_ZERO_DATE,NO_ZERO_IN_DATE';
+SELECT
+LEAST('0000-00-00',DATE'2001-01-01') AS c0,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS CHAR) AS string,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATE) AS date,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATETIME) AS datetime,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS TIME) AS time,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DECIMAL) AS dc,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DOUBLE) AS dbl,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS SIGNED) AS sint,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS UNSIGNED) AS uint;
+c0 string date datetime time dc dbl sint uint
+NULL NULL NULL NULL NULL NULL NULL NULL NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+CREATE TABLE t1 AS SELECT
+LEAST('0000-00-00',DATE'2001-01-01') AS c0,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS CHAR) AS string,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATE) AS date,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATETIME) AS datetime,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS TIME) AS time,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DECIMAL) AS dc,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DOUBLE) AS dbl,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS SIGNED) AS sint,
+CAST(LEAST('0000-00-00',DATE'2001-01-01') AS UNSIGNED) AS uint;
+Warnings:
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+Warning 1292 Incorrect datetime value: '0000-00-00'
+SELECT * FROM t1;
+c0 string date datetime time dc dbl sint uint
+NULL NULL NULL NULL NULL NULL NULL NULL NULL
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c0` date DEFAULT NULL,
+ `string` varchar(10) DEFAULT NULL,
+ `date` date DEFAULT NULL,
+ `datetime` datetime DEFAULT NULL,
+ `time` time DEFAULT NULL,
+ `dc` decimal(10,0) DEFAULT NULL,
+ `dbl` double DEFAULT NULL,
+ `sint` bigint(10) DEFAULT NULL,
+ `uint` bigint(20) unsigned DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# MDEV-17330 Wrong result for 0 + LEAST(TIME'-10:00:00',TIME'10:00:00')
+#
+SELECT 0 + LEAST(TIME'-10:00:00',TIME'10:00:00') AS c;
+c
+-100000
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/func_hybrid_type.test b/mysql-test/main/func_hybrid_type.test
index 223ae4b2166..020f4ce93bd 100644
--- a/mysql-test/main/func_hybrid_type.test
+++ b/mysql-test/main/func_hybrid_type.test
@@ -693,3 +693,105 @@ SET sql_mode=DEFAULT;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # MDEV-17325 NULL-ability problems with LEAST() in combination with NO_ZERO_DATE and NO_ZERO_IN_DATE
+--echo #
+
+SET sql_mode='NO_ZERO_DATE,NO_ZERO_IN_DATE';
+
+--disable_ps_protocol
+--enable_metadata
+SELECT
+ LEAST('0000-00-00',DATE'2001-01-01') AS s1,
+ LEAST('0001-00-01',DATE'2001-01-01') AS s2,
+ LEAST('0000-00-00',TIMESTAMP'2001-01-01 00:00:00') AS s3,
+ LEAST('0001-00-01',TIMESTAMP'2001-01-01 00:00:00') AS s4,
+ LEAST(0,DATE'2001-01-01') AS i1,
+ LEAST(20010001,DATE'2001-01-01') AS i2,
+ LEAST(0,TIMESTAMP'2001-01-01 00:00:00') AS i3,
+ LEAST(20010001,TIMESTAMP'2001-01-01 00:00:00') AS i4;
+--disable_metadata
+--enable_ps_protocol
+
+SET sql_mode='NO_ZERO_DATE,NO_ZERO_IN_DATE';
+CREATE TABLE t1 AS SELECT
+ LEAST('0000-00-00',DATE'2001-01-01') AS s1,
+ LEAST('0001-00-01',DATE'2001-01-01') AS s2,
+ LEAST('0000-00-00',TIMESTAMP'2001-01-01 00:00:00') AS s3,
+ LEAST('0001-00-01',TIMESTAMP'2001-01-01 00:00:00') AS s4,
+ LEAST(0,DATE'2001-01-01') AS i1,
+ LEAST(20010001,DATE'2001-01-01') AS i2,
+ LEAST(0,TIMESTAMP'2001-01-01 00:00:00') AS i3,
+ LEAST(20010001,TIMESTAMP'2001-01-01 00:00:00') AS i4;
+SELECT * FROM t1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30');
+
+# A TIME always converts to a non-NULL DATETIME with the new CAST style
+# Expect a NOT NULL column
+CREATE TABLE t1 AS SELECT LEAST(CURRENT_DATE,CURRENT_TIME) AS c1;
+SELECT * FROM t1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+# A TIME can convert to a NULL DATETIME with old CAST style
+# Expect a NULL-able column
+SET old_mode=ZERO_DATE_TIME_CAST;
+CREATE TABLE t1 AS SELECT LEAST(CURRENT_DATE,CURRENT_TIME) AS c1;
+SELECT * FROM t1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+SET old_mode=DEFAULT;
+SET timestamp=DEFAULT;
+
+SET sql_mode=DEFAULT;
+
+SET sql_mode='';
+SELECT LEAST(999,TIME'10:20:30') AS c1;
+CREATE TABLE t1 AS SELECT LEAST(999,TIME'10:20:30') AS c1;
+SELECT * FROM t1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+--echo #
+--echo # MDEV-17318 CAST(LEAST(zero_date,non_zero_date) AS numeric_data_type) returns a wrong result
+--echo #
+SET sql_mode='NO_ZERO_DATE,NO_ZERO_IN_DATE';
+SELECT
+ LEAST('0000-00-00',DATE'2001-01-01') AS c0,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS CHAR) AS string,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATE) AS date,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATETIME) AS datetime,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS TIME) AS time,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DECIMAL) AS dc,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DOUBLE) AS dbl,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS SIGNED) AS sint,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS UNSIGNED) AS uint;
+CREATE TABLE t1 AS SELECT
+ LEAST('0000-00-00',DATE'2001-01-01') AS c0,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS CHAR) AS string,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATE) AS date,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DATETIME) AS datetime,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS TIME) AS time,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DECIMAL) AS dc,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS DOUBLE) AS dbl,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS SIGNED) AS sint,
+ CAST(LEAST('0000-00-00',DATE'2001-01-01') AS UNSIGNED) AS uint;
+SELECT * FROM t1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+--echo #
+--echo # MDEV-17330 Wrong result for 0 + LEAST(TIME'-10:00:00',TIME'10:00:00')
+--echo #
+SELECT 0 + LEAST(TIME'-10:00:00',TIME'10:00:00') AS c;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/func_in.result b/mysql-test/main/func_in.result
index 65313148bf8..79f5f555681 100644
--- a/mysql-test/main/func_in.result
+++ b/mysql-test/main/func_in.result
@@ -241,7 +241,7 @@ insert into t2 select C.a*2+1, 'yes' from t1 C;
explain
select * from t2 where a NOT IN (0, 2,4,6,8,10,12,14,16,18);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 12 Using index condition
+1 SIMPLE t2 range a a 5 NULL 11 Using index condition
select * from t2 where a NOT IN (0, 2,4,6,8,10,12,14,16,18);
a filler
1 yes
@@ -271,7 +271,7 @@ select * from t2 where a NOT IN (
'2006-04-25 10:00:00','2006-04-25 10:02:00','2006-04-25 10:04:00',
'2006-04-25 10:06:00', '2006-04-25 10:08:00');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 6 NULL 12 Using index condition
+1 SIMPLE t2 range a a 6 NULL 11 Using index condition
select * from t2 where a NOT IN (
'2006-04-25 10:00:00','2006-04-25 10:02:00','2006-04-25 10:04:00',
'2006-04-25 10:06:00', '2006-04-25 10:08:00');
@@ -295,7 +295,7 @@ insert into t2 values ('fon', '1'), ('fop','1'), ('barbaq','1'),
('barbas','1'), ('bazbazbay', '1'),('zz','1');
explain select * from t2 where a not in('foo','barbar', 'bazbazbaz');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 13 NULL 7 Using index condition
+1 SIMPLE t2 range a a 13 NULL 6 Using index condition
drop table t2;
create table t2 (a decimal(10,5), filler char(200), key(a));
insert into t2 select 345.67890, 'no' from t1 A, t1 B;
@@ -306,7 +306,7 @@ insert into t2 values (0, '1'), (22334.123,'1'), (33333,'1'),
explain
select * from t2 where a not in (345.67890, 43245.34, 64224.56344);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 7 NULL 7 Using index condition
+1 SIMPLE t2 range a a 7 NULL 6 Using index condition
select * from t2 where a not in (345.67890, 43245.34, 64224.56344);
a filler
0.00000 1
@@ -481,7 +481,7 @@ SELECT * FROM t4 WHERE a IN ('1972-02-06','19772-07-29');
a
1972-02-06
Warnings:
-Warning 1292 Incorrect datetime value: '19772-07-29'
+Warning 1292 Truncated incorrect datetime value: '19772-07-29'
DROP TABLE t1,t2,t3,t4;
CREATE TABLE t1 (id int not null);
INSERT INTO t1 VALUES (1),(2);
@@ -717,11 +717,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN SELECT * FROM t1 WHERE c_timestamp
IN ('2009-09-01 00:00:01', '2009-09-01 00:00:02', '2009-09-01 00:00:03');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c_timestamp c_timestamp 4 NULL 3 Using index condition
+1 SIMPLE t1 ALL c_timestamp NULL NULL NULL 20 Using where
EXPLAIN SELECT * FROM t1 WHERE c_timestamp
IN (NULL, '2009-09-01 00:00:01', '2009-09-01 00:00:02', '2009-09-01 00:00:03');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c_timestamp c_timestamp 4 NULL 3 Using index condition
+1 SIMPLE t1 ALL c_timestamp NULL NULL NULL 20 Using where
EXPLAIN SELECT * FROM t1 WHERE c_timestamp IN (NULL);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
@@ -909,3 +909,38 @@ Warnings:
Warning 1292 Truncated incorrect time value: ''
Warning 1292 Truncated incorrect time value: ''
Warning 1292 Truncated incorrect time value: ''
+#
+# End of 10.3 tests
+#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16454 Bad results for IN with ROW
+#
+SELECT (18446744073709551615,0) IN ((18446744073709551614,0),(-1,0));
+(18446744073709551615,0) IN ((18446744073709551614,0),(-1,0))
+0
+SELECT '0x' IN (0);
+'0x' IN (0)
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SELECT '0x' IN (0,1);
+'0x' IN (0,1)
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SELECT ('0x',1) IN ((0,1));
+('0x',1) IN ((0,1))
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+SELECT ('0x',1) IN ((0,1),(1,1));
+('0x',1) IN ((0,1),(1,1))
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: '0x'
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/func_in.test b/mysql-test/main/func_in.test
index b99fad159c2..fb6f2036f20 100644
--- a/mysql-test/main/func_in.test
+++ b/mysql-test/main/func_in.test
@@ -690,3 +690,27 @@ SELECT
TIME'00:00:00'='' AS c1_true,
TIME'00:00:00' IN ('', TIME'10:20:30') AS c2_true,
TIME'00:00:00' NOT IN ('', TIME'10:20:30') AS c3_false;
+
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16454 Bad results for IN with ROW
+--echo #
+SELECT (18446744073709551615,0) IN ((18446744073709551614,0),(-1,0));
+
+SELECT '0x' IN (0);
+SELECT '0x' IN (0,1);
+SELECT ('0x',1) IN ((0,1));
+SELECT ('0x',1) IN ((0,1),(1,1));
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result
index 6222d5f1feb..aa5685b19fa 100644
--- a/mysql-test/main/func_json.result
+++ b/mysql-test/main/func_json.result
@@ -871,3 +871,91 @@ json_length json_depnth
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16351 JSON_OBJECT() treats hybrid functions with boolean arguments as numbers
+#
+SELECT
+JSON_OBJECT("cond", true) AS j1,
+JSON_OBJECT("cond", COALESCE(true, false)) j2,
+JSON_OBJECT("cond", COALESCE(COALESCE(true, false))) j3;
+j1 {"cond": true}
+j2 {"cond": true}
+j3 {"cond": true}
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT JSON_OBJECT('x',(SELECT MAX(a)=4 FROM t1));
+JSON_OBJECT('x',(SELECT MAX(a)=4 FROM t1))
+{"x": false}
+SELECT JSON_OBJECT('x',(SELECT MAX(a)=3 FROM t1));
+JSON_OBJECT('x',(SELECT MAX(a)=3 FROM t1))
+{"x": true}
+SELECT JSON_OBJECT('x',(SELECT MAX(a)=2 FROM t1));
+JSON_OBJECT('x',(SELECT MAX(a)=2 FROM t1))
+{"x": false}
+SELECT JSON_OBJECT('x',MAX(a=4)) FROM t1;
+JSON_OBJECT('x',MAX(a=4))
+{"x": false}
+SELECT JSON_OBJECT('x',MAX(a=3)) FROM t1;
+JSON_OBJECT('x',MAX(a=3))
+{"x": true}
+SELECT JSON_OBJECT('x',MAX(a=2)) FROM t1;
+JSON_OBJECT('x',MAX(a=2))
+{"x": true}
+SELECT JSON_OBJECT('x',(SELECT MAX(a=4) FROM t1));
+JSON_OBJECT('x',(SELECT MAX(a=4) FROM t1))
+{"x": false}
+SELECT JSON_OBJECT('x',(SELECT MAX(a=3) FROM t1));
+JSON_OBJECT('x',(SELECT MAX(a=3) FROM t1))
+{"x": true}
+SELECT JSON_OBJECT('x',(SELECT MAX(a=2) FROM t1));
+JSON_OBJECT('x',(SELECT MAX(a=2) FROM t1))
+{"x": true}
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=4 FROM t1))='{"x": true}' THEN a END;
+a
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=4 FROM t1))='{"x": false}' THEN a END;
+a
+1
+2
+3
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=3 FROM t1))='{"x": true}' THEN a END;
+a
+1
+2
+3
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=3 FROM t1))='{"x": false}' THEN a END;
+a
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=2 FROM t1))='{"x": true}' THEN a END;
+a
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=2 FROM t1))='{"x": false}' THEN a END;
+a
+1
+2
+3
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=4) FROM t1))='{"x": true}' THEN a END;
+a
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=4) FROM t1))='{"x": false}' THEN a END;
+a
+1
+2
+3
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=3) FROM t1))='{"x": true}' THEN a END;
+a
+1
+2
+3
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=3) FROM t1))='{"x": false}' THEN a END;
+a
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=2) FROM t1))='{"x": true}' THEN a END;
+a
+1
+2
+3
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=2) FROM t1))='{"x": false}' THEN a END;
+a
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test
index bcf0fdfe3fc..ea0be8fd757 100644
--- a/mysql-test/main/func_json.test
+++ b/mysql-test/main/func_json.test
@@ -518,3 +518,53 @@ SELECT
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16351 JSON_OBJECT() treats hybrid functions with boolean arguments as numbers
+--echo #
+
+--vertical_results
+SELECT
+ JSON_OBJECT("cond", true) AS j1,
+ JSON_OBJECT("cond", COALESCE(true, false)) j2,
+ JSON_OBJECT("cond", COALESCE(COALESCE(true, false))) j3;
+--horizontal_results
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT JSON_OBJECT('x',(SELECT MAX(a)=4 FROM t1));
+SELECT JSON_OBJECT('x',(SELECT MAX(a)=3 FROM t1));
+SELECT JSON_OBJECT('x',(SELECT MAX(a)=2 FROM t1));
+
+SELECT JSON_OBJECT('x',MAX(a=4)) FROM t1;
+SELECT JSON_OBJECT('x',MAX(a=3)) FROM t1;
+SELECT JSON_OBJECT('x',MAX(a=2)) FROM t1;
+
+SELECT JSON_OBJECT('x',(SELECT MAX(a=4) FROM t1));
+SELECT JSON_OBJECT('x',(SELECT MAX(a=3) FROM t1));
+SELECT JSON_OBJECT('x',(SELECT MAX(a=2) FROM t1));
+
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=4 FROM t1))='{"x": true}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=4 FROM t1))='{"x": false}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=3 FROM t1))='{"x": true}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=3 FROM t1))='{"x": false}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=2 FROM t1))='{"x": true}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a)=2 FROM t1))='{"x": false}' THEN a END;
+
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=4) FROM t1))='{"x": true}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=4) FROM t1))='{"x": false}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=3) FROM t1))='{"x": true}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=3) FROM t1))='{"x": false}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=2) FROM t1))='{"x": true}' THEN a END;
+SELECT * FROM t1 WHERE CASE WHEN JSON_OBJECT('x', (SELECT MAX(a=2) FROM t1))='{"x": false}' THEN a END;
+
+
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/func_like.result b/mysql-test/main/func_like.result
index 06a549e94b2..a937037167c 100644
--- a/mysql-test/main/func_like.result
+++ b/mysql-test/main/func_like.result
@@ -3,12 +3,12 @@ create table t1 (a varchar(10), key(a));
insert into t1 values ("a"),("abc"),("abcd"),("hello"),("test");
explain extended select * from t1 where a like 'abc%';
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 index a a 13 NULL 5 20.00 Using where; Using index
+1 SIMPLE t1 index a a 13 NULL 5 40.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` like 'abc%'
explain extended select * from t1 where a like concat('abc','%');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 index a a 13 NULL 5 20.00 Using where; Using index
+1 SIMPLE t1 index a a 13 NULL 5 40.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` like <cache>(concat('abc','%'))
select * from t1 where a like "abc%";
diff --git a/mysql-test/main/func_misc.result b/mysql-test/main/func_misc.result
index 49f08356471..1d284e45545 100644
--- a/mysql-test/main/func_misc.result
+++ b/mysql-test/main/func_misc.result
@@ -1590,3 +1590,29 @@ SELECT * FROM t1;
c1 c2
18446744073709551615 18446744073709551615
DROP TABLE t1;
+#
+# End of 10.3 tests
+#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16309 Split ::create_tmp_field() into virtual methods in Item
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+BEGIN NOT ATOMIC
+DECLARE a TEXT;
+DECLARE c CURSOR FOR SELECT NAME_CONST('x','y') FROM t1;
+OPEN c;
+FETCH c INTO a;
+CLOSE c;
+SELECT a;
+END;
+$$
+a
+y
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/func_misc.test b/mysql-test/main/func_misc.test
index e4a2cd22f69..331293a9c95 100644
--- a/mysql-test/main/func_misc.test
+++ b/mysql-test/main/func_misc.test
@@ -1226,3 +1226,33 @@ CREATE TABLE t1 AS SELECT 18446744073709551615 AS c1, name_const('a',18446744073
SHOW CREATE TABLE t1;
SELECT * FROM t1;
DROP TABLE t1;
+--echo #
+--echo # End of 10.3 tests
+--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16309 Split ::create_tmp_field() into virtual methods in Item
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ DECLARE a TEXT;
+ DECLARE c CURSOR FOR SELECT NAME_CONST('x','y') FROM t1;
+ OPEN c;
+ FETCH c INTO a;
+ CLOSE c;
+ SELECT a;
+END;
+$$
+DELIMITER ;$$
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/func_sapdb.result b/mysql-test/main/func_sapdb.result
index 5b9743fb33f..27f1d74bc41 100644
--- a/mysql-test/main/func_sapdb.result
+++ b/mysql-test/main/func_sapdb.result
@@ -107,9 +107,13 @@ subtime("1997-12-31 23:59:59.000001", "1 1:1:1.000002")
select addtime("1997-12-31 23:59:59.999999", "1998-01-01 01:01:01.999999");
addtime("1997-12-31 23:59:59.999999", "1998-01-01 01:01:01.999999")
NULL
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '1998-01-01 01:01:01.999999'
select subtime("1997-12-31 23:59:59.999999", "1998-01-01 01:01:01.999999");
subtime("1997-12-31 23:59:59.999999", "1998-01-01 01:01:01.999999")
NULL
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '1998-01-01 01:01:01.999999'
select subtime("01:00:00.999999", "02:00:00.999998");
subtime("01:00:00.999999", "02:00:00.999998")
-00:59:59.999999
@@ -180,7 +184,7 @@ select time("1997-12-31 25:59:59.000001");
time("1997-12-31 25:59:59.000001")
NULL
Warnings:
-Warning 1292 Truncated incorrect time value: '1997-12-31 25:59:59.000001'
+Warning 1292 Incorrect time value: '1997-12-31 25:59:59.000001'
select microsecond("1997-12-31 23:59:59.000001");
microsecond("1997-12-31 23:59:59.000001")
1
diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result
index f1da28d10be..ecc8a4b353b 100644
--- a/mysql-test/main/func_str.result
+++ b/mysql-test/main/func_str.result
@@ -1423,7 +1423,7 @@ SELECT * FROM t1 INNER JOIN t2 ON code=id
WHERE id='a12' AND (LENGTH(code)=5 OR code < 'a00');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 const PRIMARY PRIMARY 12 const 1 100.00 Using index
-1 SIMPLE t1 ref code code 13 const 3 100.00 Using where; Using index
+1 SIMPLE t1 ref code code 13 const 4 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`code` AS `code`,'a12' AS `id` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`code` = 'a12' and octet_length(`test`.`t1`.`code`) = 5
DROP TABLE t1,t2;
@@ -5032,3 +5032,15 @@ DROP TABLE t1;
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-18205 Assertion `str_length < len' failed in Binary_string::realloc_raw
+#
+SELECT GROUP_CONCAT( UpdateXML( '<a>new year</a>', '/a', '2019-01-01 00:00:00' ), ENCODE('text','pass') ) AS f;
+f
+2019-01-01 00:00:00F}^i
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/func_str.test b/mysql-test/main/func_str.test
index 64aca38c115..51b71cc9bda 100644
--- a/mysql-test/main/func_str.test
+++ b/mysql-test/main/func_str.test
@@ -1996,3 +1996,19 @@ DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-18205 Assertion `str_length < len' failed in Binary_string::realloc_raw
+--echo #
+
+SELECT GROUP_CONCAT( UpdateXML( '<a>new year</a>', '/a', '2019-01-01 00:00:00' ), ENCODE('text','pass') ) AS f;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/func_time.result b/mysql-test/main/func_time.result
index 51f882aeafc..8546a7bc84b 100644
--- a/mysql-test/main/func_time.result
+++ b/mysql-test/main/func_time.result
@@ -26,7 +26,7 @@ select sec_to_time('9001.1'), sec_to_time('1234567890123.123');
sec_to_time('9001.1') sec_to_time('1234567890123.123')
02:30:01.100000 838:59:59.999999
Warnings:
-Warning 1292 Truncated incorrect time value: '1234567890123.123'
+Warning 1292 Truncated incorrect seconds value: '1234567890123.123000'
select sec_to_time(-9001.1), sec_to_time(-9001.1) / 1,
sec_to_time(-9001.1) / 1e0, sec_to_time(-9001) div 1;
sec_to_time(-9001.1) sec_to_time(-9001.1) / 1 sec_to_time(-9001.1) / 1e0 sec_to_time(-9001) div 1
@@ -35,13 +35,13 @@ select sec_to_time(90011e-1), sec_to_time(1234567890123e30);
sec_to_time(90011e-1) sec_to_time(1234567890123e30)
02:30:01.100000 838:59:59.999999
Warnings:
-Warning 1292 Truncated incorrect time value: '1.234567890123e42'
+Warning 1292 Truncated incorrect seconds value: '1.234567890123e42'
select sec_to_time(1234567890123), sec_to_time('99999999999999999999999999999');
sec_to_time(1234567890123) sec_to_time('99999999999999999999999999999')
838:59:59 838:59:59.999999
Warnings:
-Warning 1292 Truncated incorrect time value: '1234567890123'
-Warning 1292 Truncated incorrect time value: '99999999999999999999999999999'
+Warning 1292 Truncated incorrect seconds value: '1234567890123'
+Warning 1292 Truncated incorrect seconds value: '99999999999999999999999999999'
select now()-curdate()*1000000-curtime();
now()-curdate()*1000000-curtime()
0
@@ -367,9 +367,7 @@ extract(DAY_MINUTE FROM "02 10:11:12")
21011
select extract(DAY_SECOND FROM "225 10:11:12");
extract(DAY_SECOND FROM "225 10:11:12")
-34225959
-Warnings:
-Warning 1292 Truncated incorrect time value: '225 10:11:12'
+225101112
select extract(HOUR FROM "1999-01-02 10:11:12");
extract(HOUR FROM "1999-01-02 10:11:12")
10
@@ -584,6 +582,8 @@ from_unixtime(2147483647)
select from_unixtime(2147483648);
from_unixtime(2147483648)
NULL
+Warnings:
+Warning 1292 Truncated incorrect unixtime value: '2147483648'
select from_unixtime(0);
from_unixtime(0)
1970-01-01 03:00:00
@@ -593,6 +593,8 @@ unix_timestamp(from_unixtime(2147483647))
select unix_timestamp(from_unixtime(2147483648));
unix_timestamp(from_unixtime(2147483648))
NULL
+Warnings:
+Warning 1292 Truncated incorrect unixtime value: '2147483648'
select unix_timestamp('2039-01-20 01:00:00');
unix_timestamp('2039-01-20 01:00:00')
NULL
@@ -937,7 +939,7 @@ f1
select f1 from t1 where cast("2006-1-1" as date) between f1 and cast('zzz' as date);
f1
Warnings:
-Warning 1292 Incorrect datetime value: 'zzz'
+Warning 1292 Truncated incorrect datetime value: 'zzz'
select f1 from t1 where makedate(2006,1) between date(f1) and date(f3);
f1
2006-01-01
@@ -960,17 +962,17 @@ SELECT SEC_TO_TIME(3300000);
SEC_TO_TIME(3300000)
838:59:59
Warnings:
-Warning 1292 Truncated incorrect time value: '3300000'
+Warning 1292 Truncated incorrect seconds value: '3300000'
SELECT SEC_TO_TIME(3300000)+0;
SEC_TO_TIME(3300000)+0
8385959
Warnings:
-Warning 1292 Truncated incorrect time value: '3300000'
+Warning 1292 Truncated incorrect seconds value: '3300000'
SELECT SEC_TO_TIME(3600 * 4294967296);
SEC_TO_TIME(3600 * 4294967296)
838:59:59
Warnings:
-Warning 1292 Truncated incorrect time value: '15461882265600'
+Warning 1292 Truncated incorrect seconds value: '15461882265600'
SELECT TIME_TO_SEC('916:40:00');
TIME_TO_SEC('916:40:00')
3020399
@@ -1019,6 +1021,8 @@ NULL
SELECT MAKETIME(0, 0, 4294967296);
MAKETIME(0, 0, 4294967296)
NULL
+Warnings:
+Warning 1292 Truncated incorrect seconds value: '4294967296'
SELECT MAKETIME(CAST(-1 AS UNSIGNED), 0, 0);
MAKETIME(CAST(-1 AS UNSIGNED), 0, 0)
838:59:59
@@ -1027,9 +1031,7 @@ Note 1105 Cast to unsigned converted negative integer to it's positive complemen
Warning 1292 Truncated incorrect time value: '18446744073709551615:00:00'
SELECT EXTRACT(HOUR FROM '10000:02:03');
EXTRACT(HOUR FROM '10000:02:03')
-22
-Warnings:
-Warning 1292 Truncated incorrect time value: '10000:02:03'
+16
CREATE TABLE t1(f1 TIME);
INSERT IGNORE INTO t1 VALUES('916:00:00 a');
Warnings:
@@ -1044,8 +1046,7 @@ SEC_TO_TIME(CAST(-1 AS UNSIGNED))
838:59:59
Warnings:
Note 1105 Cast to unsigned converted negative integer to it's positive complement
-Note 1105 Cast to unsigned converted negative integer to it's positive complement
-Warning 1292 Truncated incorrect time value: '18446744073709551615'
+Warning 1292 Truncated incorrect seconds value: '18446744073709551615'
SET NAMES latin1;
SET character_set_results = NULL;
SHOW VARIABLES LIKE 'character_set_results';
@@ -1058,6 +1059,7 @@ fmtddate field2
Sep-4 12:00AM abcd
DROP TABLE testBug8868;
SET NAMES DEFAULT;
+SET TIMESTAMP=UNIX_TIMESTAMP('2001-01-01 11:22:33');
CREATE TABLE t1 (
a TIMESTAMP
);
@@ -1066,7 +1068,11 @@ SELECT 1 FROM t1 ORDER BY MAKETIME(1, 1, a);
1
1
1
+Warnings:
+Warning 1292 Truncated incorrect seconds value: '20010101112233'
+Warning 1292 Truncated incorrect seconds value: '20010101112233'
DROP TABLE t1;
+SET TIMESTAMP=DEFAULT;
(select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%H') As H)
union
(select time_format(timediff(now(), DATE_SUB(now(),INTERVAL 5 DAY)),'%H') As H);
@@ -1095,7 +1101,7 @@ week(20061108), week(20061108.01), week(20061108085411.000002);
isnull(week(now() + 0)) isnull(week(now() + 0.2)) week(20061108) week(20061108.01) week(20061108085411.000002)
0 0 45 45 45
Warnings:
-Warning 1292 Truncated incorrect datetime value: '20061108.01'
+Note 1292 Truncated incorrect date value: '20061108.01'
End of 4.1 tests
select time_format('100:00:00', '%H %k %h %I %l');
time_format('100:00:00', '%H %k %h %I %l')
@@ -1230,7 +1236,7 @@ str_to_date("1997-00-04 22:23:00","%Y-%m-%D") + interval 10 minute
NULL
Warnings:
Warning 1292 Truncated incorrect date value: '1997-00-04 22:23:00'
-Warning 1292 Incorrect datetime value: '1997-00-04'
+Warning 1292 Incorrect datetime value: '1997-00-04 00:00:00'
create table t1 (field DATE);
insert into t1 values ('2006-11-06');
select * from t1 where field < '2006-11-06 04:08:36.0';
@@ -1372,9 +1378,9 @@ SELECT COUNT(*) FROM t1 GROUP BY TIME_TO_SEC(a);
COUNT(*)
2
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: ''
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: ''
DROP TABLE t1;
#
# Bug#11766112 59151:UNINITIALIZED VALUES IN EXTRACT_DATE_TIME WITH STR_TO_DATE(SPACE(..) ...
@@ -1716,7 +1722,6 @@ min(timestampadd(month, 1>'', from_days('%Z')))
NULL
Warnings:
Warning 1292 Truncated incorrect INTEGER value: '%Z'
-Warning 1292 Truncated incorrect DOUBLE value: ''
Warning 1292 Incorrect datetime value: '0000-00-00'
SET timestamp=UNIX_TIMESTAMP('2001-01-01 00:00:00');
create table t1(a time);
@@ -1726,7 +1731,7 @@ select 1 from t1 where 1 < some (select cast(a as datetime) from t1);
1
1
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
drop table t1;
SET timestamp=DEFAULT;
#
@@ -1833,7 +1838,7 @@ select cast('131415.123e0' as time);
cast('131415.123e0' as time)
NULL
Warnings:
-Warning 1292 Truncated incorrect time value: '131415.123e0'
+Warning 1292 Incorrect time value: '131415.123e0'
select cast('2010-01-02 03:04:05' as datetime) between null and '2010-01-02 03:04:04';
cast('2010-01-02 03:04:05' as datetime) between null and '2010-01-02 03:04:04'
0
@@ -1909,7 +1914,7 @@ select least(1, f1) from t1;
least(1, f1)
0000-00-00 00:00:00
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
drop table t1;
SET timestamp=UNIX_TIMESTAMP('2014-04-14 10:10:10');
select now() > coalesce(time('21:43:24'), date('2010-05-03'));
@@ -2510,7 +2515,7 @@ TIMESTAMP('2001-01-01','10:10:10.12345'),
TIMESTAMP('2001-01-01','10:10:10.123456'),
TIMESTAMP('2001-01-01','10:10:10.1234567');
Warnings:
-Note 1292 Truncated incorrect time value: '10:10:10.1234567'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '10:10:10.1234567'
SHOW COLUMNS FROM t1;
Field Type Null Key Default Extra
TIMESTAMP('2001-01-01','10:10:10') datetime YES NULL
@@ -2631,7 +2636,7 @@ SELECT DATE_ADD('2001-01-01 10:20:30',INTERVAL 250000000000.0 SECOND) AS c1, DAT
c1 c2
9923-03-10 22:47:10.0 NULL
Warnings:
-Warning 1292 Truncated incorrect DECIMAL value: '2000000000000000000.0'
+Warning 1292 Truncated incorrect seconds value: '2000000000000000000.0'
#
# MDEV-4838 Wrong metadata for DATE_ADD('string', INVERVAL)
#
@@ -2790,13 +2795,12 @@ DO TO_DAYS(SEC_TO_TIME(MAKEDATE('',RAND(~('')))));
Warnings:
Warning 1292 Truncated incorrect INTEGER value: ''
Warning 1292 Truncated incorrect INTEGER value: ''
-Warning 1292 Truncated incorrect INTEGER value: ''
-Warning 1292 Truncated incorrect time value: '20000101'
+Warning 1292 Truncated incorrect seconds value: '20000101'
SELECT SEC_TO_TIME(MAKEDATE(0,RAND(~0)));
SEC_TO_TIME(MAKEDATE(0,RAND(~0)))
838:59:59
Warnings:
-Warning 1292 Truncated incorrect time value: '20000101'
+Warning 1292 Truncated incorrect seconds value: '20000101'
SELECT PERIOD_DIFF(2018, AES_ENCRYPT('Rae Bareli', 'Rae Bareli'));
PERIOD_DIFF(2018, AES_ENCRYPT('Rae Bareli', 'Rae Bareli'))
24257
@@ -3132,7 +3136,7 @@ def EXTRACT(YEAR_MONTH FROM a) 3 6 6 Y 32896 0 63
def EXTRACT(QUARTER FROM a) 3 2 1 Y 32896 0 63
def EXTRACT(MONTH FROM a) 3 2 2 Y 32896 0 63
def EXTRACT(WEEK FROM a) 3 2 2 Y 32896 0 63
-def EXTRACT(DAY FROM a) 3 2 2 Y 32896 0 63
+def EXTRACT(DAY FROM a) 3 3 2 Y 32896 0 63
def EXTRACT(DAY_HOUR FROM a) 3 5 4 Y 32896 0 63
def EXTRACT(DAY_MINUTE FROM a) 3 7 6 Y 32896 0 63
def EXTRACT(DAY_SECOND FROM a) 3 9 8 Y 32896 0 63
@@ -3222,7 +3226,7 @@ t2 CREATE TABLE `t2` (
`EXTRACT(QUARTER FROM a)` int(2) DEFAULT NULL,
`EXTRACT(MONTH FROM a)` int(2) DEFAULT NULL,
`EXTRACT(WEEK FROM a)` int(2) DEFAULT NULL,
- `EXTRACT(DAY FROM a)` int(2) DEFAULT NULL,
+ `EXTRACT(DAY FROM a)` int(3) DEFAULT NULL,
`EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL,
`EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL,
`EXTRACT(DAY_SECOND FROM a)` int(9) DEFAULT NULL,
@@ -3271,7 +3275,7 @@ def EXTRACT(YEAR_MONTH FROM a) 3 6 1 Y 32896 0 63
def EXTRACT(QUARTER FROM a) 3 2 1 Y 32896 0 63
def EXTRACT(MONTH FROM a) 3 2 1 Y 32896 0 63
def EXTRACT(WEEK FROM a) 3 2 9 Y 32896 0 63
-def EXTRACT(DAY FROM a) 3 2 2 Y 32896 0 63
+def EXTRACT(DAY FROM a) 3 3 3 Y 32896 0 63
def EXTRACT(DAY_HOUR FROM a) 3 5 5 Y 32896 0 63
def EXTRACT(DAY_MINUTE FROM a) 3 7 7 Y 32896 0 63
def EXTRACT(DAY_SECOND FROM a) 3 9 9 Y 32896 0 63
@@ -3292,7 +3296,7 @@ EXTRACT(YEAR_MONTH FROM a) 0
EXTRACT(QUARTER FROM a) 0
EXTRACT(MONTH FROM a) 0
EXTRACT(WEEK FROM a) 613566757
-EXTRACT(DAY FROM a) 34
+EXTRACT(DAY FROM a) -34
EXTRACT(DAY_HOUR FROM a) -3422
EXTRACT(DAY_MINUTE FROM a) -342259
EXTRACT(DAY_SECOND FROM a) -34225959
@@ -3358,7 +3362,7 @@ EXTRACT(YEAR_MONTH FROM a) 0
EXTRACT(QUARTER FROM a) 0
EXTRACT(MONTH FROM a) 0
EXTRACT(WEEK FROM a) 613566757
-EXTRACT(DAY FROM a) 34
+EXTRACT(DAY FROM a) -34
EXTRACT(DAY_HOUR FROM a) -3422
EXTRACT(DAY_MINUTE FROM a) -342259
EXTRACT(DAY_SECOND FROM a) -34225959
@@ -3403,7 +3407,7 @@ t2 CREATE TABLE `t2` (
`EXTRACT(QUARTER FROM a)` int(2) DEFAULT NULL,
`EXTRACT(MONTH FROM a)` int(2) DEFAULT NULL,
`EXTRACT(WEEK FROM a)` int(2) DEFAULT NULL,
- `EXTRACT(DAY FROM a)` int(2) DEFAULT NULL,
+ `EXTRACT(DAY FROM a)` int(3) DEFAULT NULL,
`EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL,
`EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL,
`EXTRACT(DAY_SECOND FROM a)` int(9) DEFAULT NULL,
@@ -3475,7 +3479,7 @@ NULL
NULL
NULL
Warnings:
-Warning 1292 Incorrect datetime value: '18446744073709551615'
+Warning 1292 Truncated incorrect datetime value: '18446744073709551615'
CREATE TABLE t2 (pk int default 1, a1 date);
INSERT INTO t2 VALUES (4,NULL);
CREATE view v2 as SELECT default(t1.pk), default(t2.pk), t1.pk from t1,t2;
@@ -3653,6 +3657,11 @@ ADDTIME(TIME'10:20:30', DATE'2001-01-01') AS c3,
ADDTIME(TIME'10:20:30', COALESCE(DATE'2001-01-01',TIMESTAMP'2001-01-01 00:00:00')) AS c4;
c1 c2 c3 c4
NULL NULL NULL NULL
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01 00:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01 00:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01 00:00:00'
SELECT
HOUR(TIMESTAMP'0000-00-01 10:00:00') AS h0,
TIME_TO_SEC(TIMESTAMP'0000-00-01 10:00:00') AS tts0,
@@ -3773,13 +3782,21 @@ SET @sav_slow_query_log= @@session.slow_query_log;
SET @@session.slow_query_log= ON;
SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @ts_func;
SELECT a FROM t_ts LIMIT 1 into @ts_func;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a FROM t_trig LIMIT 1 into @ts_trig;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
DELETE FROM t_ts;
DELETE FROM t_trig;
SET @@session.slow_query_log= OFF;
SELECT current_timestamp(6),fn_sleep_before_now() INTO @ts_cur, @func_ts;
SELECT a FROM t_ts LIMIT 1 into @ts_func;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a FROM t_trig LIMIT 1 into @ts_trig;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SET @@session.slow_query_log= @sav_slow_query_log;
DROP FUNCTION fn_sleep_before_now;
DROP TRIGGER trg_insert_t_ts;
@@ -3819,3 +3836,2512 @@ c1 c2 c3
#
# End of 10.3 tests
#
+#
+# MDEV-14032 SEC_TO_TIME executes side effect two times
+#
+SET @a=10000000;
+SELECT SEC_TO_TIME(@a:=@a+1);
+SEC_TO_TIME(@a:=@a+1)
+838:59:59
+Warnings:
+Warning 1292 Truncated incorrect seconds value: '10000001'
+SELECT @a;
+@a
+10000001
+CREATE TABLE t1 (a TEXT);
+CREATE FUNCTION f1() RETURNS INT
+BEGIN
+INSERT INTO t1 VALUES ('f1 was called');
+RETURN 10000000;
+END;
+$$
+SELECT SEC_TO_TIME(f1());
+SEC_TO_TIME(f1())
+838:59:59
+Warnings:
+Warning 1292 Truncated incorrect seconds value: '10000000'
+SELECT * FROM t1;
+a
+f1 was called
+DROP TABLE t1;
+DROP FUNCTION f1;
+#
+# MDEV-17351 MICROSECOND(XXX(int_number_out_of_range)) erroneously returns 999999
+#
+# Reject anything that's parsed as DATETIME or DATE
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2001-01-01 10:20:30'),
+('01-01-01 10:20:30'),
+('2001-01-01 '),
+('20010101102030'),
+('010101102030');
+SELECT ADDTIME(DATE'2001-01-01',a), a FROM t1;
+ADDTIME(DATE'2001-01-01',a) a
+NULL 2001-01-01 10:20:30
+NULL 01-01-01 10:20:30
+NULL 2001-01-01
+NULL 20010101102030
+NULL 010101102030
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01 10:20:30'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '01-01-01 10:20:30'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '2001-01-01 '
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '20010101102030'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '010101102030'
+DROP TABLE t1;
+# GREATEST(decimal, time)
+SELECT
+GREATEST(8395959, TIME'00:00:00') AS c0,
+GREATEST(8395959.0, TIME'00:00:00') AS c1,
+GREATEST(8395959.00, TIME'00:00:00') AS c2,
+GREATEST(8395959.000, TIME'00:00:00') AS c3,
+GREATEST(8395959.0000, TIME'00:00:00') AS c4,
+GREATEST(8395959.00000, TIME'00:00:00') AS c5,
+GREATEST(8395959.000000, TIME'00:00:00') AS c6,
+GREATEST(8395959.0000000, TIME'00:00:00') AS c7;
+c0 838:59:59
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+c7 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.0'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.00'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.0000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.00000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.0000000'
+SELECT
+MICROSECOND(GREATEST(8395959, TIME'00:00:00')) AS c0,
+MICROSECOND(GREATEST(8395959.0, TIME'00:00:00')) AS c1,
+MICROSECOND(GREATEST(8395959.00, TIME'00:00:00')) AS c2,
+MICROSECOND(GREATEST(8395959.000, TIME'00:00:00')) AS c3,
+MICROSECOND(GREATEST(8395959.0000, TIME'00:00:00')) AS c4,
+MICROSECOND(GREATEST(8395959.00000, TIME'00:00:00')) AS c5,
+MICROSECOND(GREATEST(8395959.000000, TIME'00:00:00')) AS c6,
+MICROSECOND(GREATEST(8395959.0000000, TIME'00:00:00')) AS c7;
+c0 0
+c1 900000
+c2 990000
+c3 999000
+c4 999900
+c5 999990
+c6 999999
+c7 999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.0'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.00'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.0000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.00000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959.0000000'
+SELECT
+CAST(GREATEST(8395959, TIME'00:00:00') AS SIGNED) AS ci,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,0)) AS c0,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,1)) AS c1,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,2)) AS c2,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,3)) AS c3,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,4)) AS c4,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,5)) AS c5,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,6)) AS c6,
+CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,7)) AS c7;
+ci 8385959
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+SELECT
+GREATEST(8395959, TIME'00:00:00') AS ci,
+GREATEST(8395959, TIME'00:00:00')+0 AS c0,
+GREATEST(8395959, TIME'00:00:00')+0.0 AS c1,
+GREATEST(8395959, TIME'00:00:00')+0.00 AS c2,
+GREATEST(8395959, TIME'00:00:00')+0.000 AS c3,
+GREATEST(8395959, TIME'00:00:00')+0.0000 AS c4,
+GREATEST(8395959, TIME'00:00:00')+0.00000 AS c5,
+GREATEST(8395959, TIME'00:00:00')+0.000000 AS c6,
+GREATEST(8395959, TIME'00:00:00')+0.0000000 AS c7;
+ci 838:59:59
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '8395959'
+# GREATEST(string, time)
+SELECT
+GREATEST('839:59:59', TIME'00:00:00') AS ci,
+GREATEST('839:59:59.0', TIME'00:00:00') AS c1,
+GREATEST('839:59:59.00', TIME'00:00:00') AS c2,
+GREATEST('839:59:59.000', TIME'00:00:00') AS c3,
+GREATEST('839:59:59.0000', TIME'00:00:00') AS c4,
+GREATEST('839:59:59.00000', TIME'00:00:00') AS c5,
+GREATEST('839:59:59.000000', TIME'00:00:00') AS c6,
+GREATEST('839:59:59.0000000', TIME'00:00:00') AS c7;
+ci 838:59:59
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+c7 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.0'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.00'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.0000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.00000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.0000000'
+SELECT
+MICROSECOND(GREATEST('839:59:59', TIME'00:00:00')) AS ci,
+MICROSECOND(GREATEST('839:59:59.0', TIME'00:00:00')) AS c1,
+MICROSECOND(GREATEST('839:59:59.00', TIME'00:00:00')) AS c2,
+MICROSECOND(GREATEST('839:59:59.000', TIME'00:00:00')) AS c3,
+MICROSECOND(GREATEST('839:59:59.0000', TIME'00:00:00')) AS c4,
+MICROSECOND(GREATEST('839:59:59.00000', TIME'00:00:00')) AS c5,
+MICROSECOND(GREATEST('839:59:59.000000', TIME'00:00:00')) AS c6,
+MICROSECOND(GREATEST('839:59:59.0000000', TIME'00:00:00')) AS c7;
+ci 0
+c1 900000
+c2 990000
+c3 999000
+c4 999900
+c5 999990
+c6 999999
+c7 999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.0'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.00'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.0000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.00000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59.0000000'
+SELECT
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS SIGNED) AS ci,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,0)) AS c0,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,1)) AS c1,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,2)) AS c2,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,3)) AS c3,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,4)) AS c4,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,5)) AS c5,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,6)) AS c6,
+CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,7)) AS c7;
+ci 8385959
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+GREATEST('839:59:59', TIME'00:00:00') AS ci,
+GREATEST('839:59:59', TIME'00:00:00')+0 AS c0,
+GREATEST('839:59:59', TIME'00:00:00')+0.0 AS c1,
+GREATEST('839:59:59', TIME'00:00:00')+0.00 AS c2,
+GREATEST('839:59:59', TIME'00:00:00')+0.000 AS c3,
+GREATEST('839:59:59', TIME'00:00:00')+0.0000 AS c4,
+GREATEST('839:59:59', TIME'00:00:00')+0.00000 AS c5,
+GREATEST('839:59:59', TIME'00:00:00')+0.000000 AS c6,
+GREATEST('839:59:59', TIME'00:00:00')+0.0000000 AS c7;
+ci 838:59:59
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+# ADDTIME(datetime, decimal)
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0) AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00) AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000) AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000) AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00000) AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000000) AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000000) AS c7;
+c0 2001-02-04 23:59:59
+c1 2001-02-04 23:59:59.0
+c2 2001-02-04 23:59:59.00
+c3 2001-02-04 23:59:59.000
+c4 2001-02-04 23:59:59.0000
+c5 2001-02-04 23:59:59.00000
+c6 2001-02-04 23:59:59.000000
+c7 2001-02-04 23:59:59.000000
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+SELECT
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)) AS c0,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0)) AS c1,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00)) AS c2,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000)) AS c3,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000)) AS c4,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00000)) AS c5,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000000)) AS c6,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000000)) AS c7;
+c0 0
+c1 0
+c2 0
+c3 0
+c4 0
+c5 0
+c6 0
+c7 0
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+SELECT
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS SIGNED) AS ci,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,0)) AS c0,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,1)) AS c1,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,2)) AS c2,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,3)) AS c3,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,4)) AS c4,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,5)) AS c5,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,6)) AS c6,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,7)) AS c7;
+ci 20010204235959
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+c7 20010204235959.0000000
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.0000000 AS c7;
+ci 2001-02-04 23:59:59
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+c7 20010204235959.0000000
+# ADDTIME(datetime, string)
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0') AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00') AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000') AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000') AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00000') AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000000') AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000000') AS c7;
+c0 2001-02-04 23:59:59
+c1 2001-02-04 23:59:59.0
+c2 2001-02-04 23:59:59.00
+c3 2001-02-04 23:59:59.000
+c4 2001-02-04 23:59:59.0000
+c5 2001-02-04 23:59:59.00000
+c6 2001-02-04 23:59:59.000000
+c7 2001-02-04 23:59:59.000000
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.0000000'
+SELECT
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')) AS c0,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0')) AS c1,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00')) AS c2,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000')) AS c3,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000')) AS c4,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00000')) AS c5,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000000')) AS c6,
+MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000000')) AS c7;
+c0 0
+c1 0
+c2 0
+c3 0
+c4 0
+c5 0
+c6 0
+c7 0
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.0000000'
+SELECT
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS SIGNED) AS ci,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,0)) AS c0,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,1)) AS c1,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,2)) AS c2,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,3)) AS c3,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,4)) AS c4,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,5)) AS c5,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,6)) AS c6,
+CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,7)) AS c7;
+ci 20010204235959
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+c7 20010204235959.0000000
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.0000000 AS c7;
+ci 2001-02-04 23:59:59
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+c7 20010204235959.0000000
+# ADDTIME(time, decimal)
+SELECT
+ADDTIME(TIME'00:00:00', 8395959) AS c0,
+ADDTIME(TIME'00:00:00', 8395959.0) AS c1,
+ADDTIME(TIME'00:00:00', 8395959.00) AS c2,
+ADDTIME(TIME'00:00:00', 8395959.000) AS c3,
+ADDTIME(TIME'00:00:00', 8395959.0000) AS c4,
+ADDTIME(TIME'00:00:00', 8395959.00000) AS c5,
+ADDTIME(TIME'00:00:00', 8395959.000000) AS c6,
+ADDTIME(TIME'00:00:00', 8395959.0000000) AS c7;
+c0 838:59:59
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+c7 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959)) AS c0,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.0)) AS c1,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.00)) AS c2,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.000)) AS c3,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.0000)) AS c4,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.00000)) AS c5,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.000000)) AS c6,
+MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.0000000)) AS c7;
+c0 0
+c1 900000
+c2 990000
+c3 999000
+c4 999900
+c5 999990
+c6 999999
+c7 999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS SIGNED) AS ci,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,0)) AS c0,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,1)) AS c1,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,2)) AS c2,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,3)) AS c3,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,4)) AS c4,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,5)) AS c5,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,6)) AS c6,
+CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,7)) AS c7;
+ci 8385959
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+ADDTIME(TIME'00:00:00', 8395959) AS ci,
+ADDTIME(TIME'00:00:00', 8395959)+0 AS c0,
+ADDTIME(TIME'00:00:00', 8395959)+0.0 AS c1,
+ADDTIME(TIME'00:00:00', 8395959)+0.00 AS c2,
+ADDTIME(TIME'00:00:00', 8395959)+0.000 AS c3,
+ADDTIME(TIME'00:00:00', 8395959)+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', 8395959)+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', 8395959)+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', 8395959)+0.0000000 AS c7;
+ci 838:59:59
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+# ADDTIME(time,string)
+SELECT
+ADDTIME(TIME'00:00:00', '839:59:59') AS c0,
+ADDTIME(TIME'00:00:00', '839:59:59.0') AS c1,
+ADDTIME(TIME'00:00:00', '839:59:59.00') AS c2,
+ADDTIME(TIME'00:00:00', '839:59:59.000') AS c3,
+ADDTIME(TIME'00:00:00', '839:59:59.0000') AS c4,
+ADDTIME(TIME'00:00:00', '839:59:59.00000') AS c5,
+ADDTIME(TIME'00:00:00', '839:59:59.000000') AS c6,
+ADDTIME(TIME'00:00:00', '839:59:59.0000000') AS c7;
+c0 838:59:59
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+c7 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.0000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59')) AS c0,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.0')) AS c1,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.00')) AS c2,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.000')) AS c3,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.0000')) AS c4,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.00000')) AS c5,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.000000')) AS c6,
+MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.0000000')) AS c7;
+c0 0
+c1 900000
+c2 990000
+c3 999000
+c4 999900
+c5 999990
+c6 999999
+c7 999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.0000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS SIGNED) AS ci,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,0)) AS c0,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,1)) AS c1,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,2)) AS c2,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,3)) AS c3,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,4)) AS c4,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,5)) AS c5,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,6)) AS c6,
+CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,7)) AS c7;
+ci 8385959
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+ADDTIME(TIME'00:00:00', '839:59:59') AS ci,
+ADDTIME(TIME'00:00:00', '839:59:59')+0 AS c0,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.0 AS c1,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.00 AS c2,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.000 AS c3,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', '839:59:59')+0.0000000 AS c7;
+ci 838:59:59
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+# ADDTIME(int,int)
+SELECT
+ADDTIME(0, 8395959) AS c,
+MICROSECOND(ADDTIME(0, 8395959)) AS cm,
+CAST(ADDTIME(0, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+CAST(ADDTIME(0, 8395959) AS DECIMAL(30,0)) AS cd300;
+c 838:59:59
+cm 0
+cs_fixme_mdev_17384 838
+cd300 8385959
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect INTEGER value: '838:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+ADDTIME(20010101000000, 8395959) AS c,
+MICROSECOND(ADDTIME(20010101000000, 8395959)) AS cm,
+CAST(ADDTIME(20010101000000, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+CAST(ADDTIME(20010101000000, 8395959) AS DECIMAL(30,0)) AS cd300;
+c 2001-02-04 23:59:59
+cm 0
+cs_fixme_mdev_17384 2001
+cd300 20010204235959
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect INTEGER value: '2001-02-04 23:59:59'
+# ADDTIME(decimal,int)
+# 8385960 in cd300 is correct: addtime returns '838:59:59.9'
+# which is further *rounded* to a decimals(30,0)
+SELECT
+ADDTIME(0.0, 8395959) AS c,
+MICROSECOND(ADDTIME(0.0, 8395959)) AS cm,
+CAST(ADDTIME(0.0, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,0)) AS cd300;
+c 838:59:59.9
+cm 900000
+cs_fixme_mdev_17384 838
+cd300 8385960
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect INTEGER value: '838:59:59.9'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+ADDTIME(20010101000000.0, 8395959) AS c,
+MICROSECOND(ADDTIME(20010101000000.0, 8395959)) AS cm,
+CAST(ADDTIME(20010101000000.0, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+CAST(ADDTIME(20010101000000.0, 8395959) AS DECIMAL(30,0)) AS cd300;
+c 2001-02-04 23:59:59.0
+cm 0
+cs_fixme_mdev_17384 2001
+cd300 20010204235959
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect INTEGER value: '2001-02-04 23:59:59.0'
+# ADDTIME(decimal,decimal)
+SELECT
+ADDTIME(0.0, 8395959.0) AS c1,
+ADDTIME(0.0, 8395959.00) AS c2,
+ADDTIME(0.0, 8395959.000) AS c3,
+ADDTIME(0.0, 8395959.0000) AS c4,
+ADDTIME(0.0, 8395959.00000) AS c5,
+ADDTIME(0.0, 8395959.000000) AS c6,
+ADDTIME(0.0, 8395959.0000000) AS c7;
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+c7 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+MICROSECOND(ADDTIME(0.0, 8395959.0)) AS c1,
+MICROSECOND(ADDTIME(0.0, 8395959.00)) AS c2,
+MICROSECOND(ADDTIME(0.0, 8395959.000)) AS c3,
+MICROSECOND(ADDTIME(0.0, 8395959.0000)) AS c4,
+MICROSECOND(ADDTIME(0.0, 8395959.00000)) AS c5,
+MICROSECOND(ADDTIME(0.0, 8395959.000000)) AS c6,
+MICROSECOND(ADDTIME(0.0, 8395959.0000000)) AS c7;
+c1 900000
+c2 990000
+c3 999000
+c4 999900
+c5 999990
+c6 999999
+c7 999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+# 8385960 in c1 is correct: addtime returns '838:59:59.9'
+# which is further *rounded* to a decimals(30,0)
+SELECT
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,0)) AS c0,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,1)) AS c1,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,2)) AS c2,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,3)) AS c3,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,4)) AS c4,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,5)) AS c5,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,6)) AS c6,
+CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,7)) AS c7;
+c0 8385960
+c1 8385959.9
+c2 8385959.90
+c3 8385959.900
+c4 8385959.9000
+c5 8385959.90000
+c6 8385959.900000
+c7 8385959.9000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+SELECT
+ADDTIME(0.0, 8395959)+0 AS c0,
+ADDTIME(0.0, 8395959)+0.0 AS c1,
+ADDTIME(0.0, 8395959)+0.00 AS c2,
+ADDTIME(0.0, 8395959)+0.000 AS c3,
+ADDTIME(0.0, 8395959)+0.0000 AS c4,
+ADDTIME(0.0, 8395959)+0.00000 AS c5,
+ADDTIME(0.0, 8395959)+0.000000 AS c6,
+ADDTIME(0.0, 8395959)+0.0000000 AS c7;
+c0 8385959.9
+c1 8385959.9
+c2 8385959.90
+c3 8385959.900
+c4 8385959.9000
+c5 8385959.90000
+c6 8385959.900000
+c7 8385959.9000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '839:59:59'
+# TIMESTAMP(string,decimal)
+SELECT
+TIMESTAMP('2001-01-01', 8395959) AS ci,
+TIMESTAMP('2001-01-01', 8395959.0) AS c1,
+TIMESTAMP('2001-01-01', 8395959.00) AS c2,
+TIMESTAMP('2001-01-01', 8395959.000) AS c3,
+TIMESTAMP('2001-01-01', 8395959.0000) AS c4,
+TIMESTAMP('2001-01-01', 8395959.00000) AS c5,
+TIMESTAMP('2001-01-01', 8395959.000000) AS c6,
+TIMESTAMP('2001-01-01', 8395959.0000000) AS c7;
+ci 2001-02-04 23:59:59
+c1 2001-02-04 23:59:59.0
+c2 2001-02-04 23:59:59.00
+c3 2001-02-04 23:59:59.000
+c4 2001-02-04 23:59:59.0000
+c5 2001-02-04 23:59:59.00000
+c6 2001-02-04 23:59:59.000000
+c7 2001-02-04 23:59:59.000000
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+SELECT
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959)) AS ci,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.0)) AS c1,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.00)) AS c2,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.000)) AS c3,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.0000)) AS c4,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.00000)) AS c5,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.000000)) AS c6,
+MICROSECOND(TIMESTAMP('2001-01-01', 8395959.0000000)) AS c7;
+ci 0
+c1 0
+c2 0
+c3 0
+c4 0
+c5 0
+c6 0
+c7 0
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.0000000'
+SELECT
+CAST(TIMESTAMP('2001-01-01', 8395959) AS SIGNED) AS ci,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,0)) AS c0,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,1)) AS c1,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,2)) AS c2,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,3)) AS c3,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,4)) AS c4,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,5)) AS c5,
+CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,6)) AS c6;
+ci 20010204235959
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+SELECT
+TIMESTAMP('2001-01-01', 8395959) AS ci,
+TIMESTAMP('2001-01-01', 8395959)+0 AS c0,
+TIMESTAMP('2001-01-01', 8395959)+0.0 AS c1,
+TIMESTAMP('2001-01-01', 8395959)+0.00 AS c2,
+TIMESTAMP('2001-01-01', 8395959)+0.000 AS c3,
+TIMESTAMP('2001-01-01', 8395959)+0.0000 AS c4,
+TIMESTAMP('2001-01-01', 8395959)+0.00000 AS c5,
+TIMESTAMP('2001-01-01', 8395959)+0.000000 AS c6,
+TIMESTAMP('2001-01-01', 8395959)+0.0000000 AS c7;
+ci 2001-02-04 23:59:59
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+c7 20010204235959.0000000
+# TIMESTAMP(string,string)
+SELECT
+TIMESTAMP('2001-01-01', '839:59:59') AS ci,
+TIMESTAMP('2001-01-01', '839:59:59.0') AS c1,
+TIMESTAMP('2001-01-01', '839:59:59.00') AS c2,
+TIMESTAMP('2001-01-01', '839:59:59.000') AS c3,
+TIMESTAMP('2001-01-01', '839:59:59.0000') AS c4,
+TIMESTAMP('2001-01-01', '839:59:59.00000') AS c5,
+TIMESTAMP('2001-01-01', '839:59:59.000000') AS c6,
+TIMESTAMP('2001-01-01', '839:59:59.0000000') AS c7;
+ci 2001-02-04 23:59:59
+c1 2001-02-04 23:59:59.0
+c2 2001-02-04 23:59:59.00
+c3 2001-02-04 23:59:59.000
+c4 2001-02-04 23:59:59.0000
+c5 2001-02-04 23:59:59.00000
+c6 2001-02-04 23:59:59.000000
+c7 2001-02-04 23:59:59.000000
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.0000000'
+SELECT
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59')) AS ci,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.0')) AS c1,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.00')) AS c2,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.000')) AS c3,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.0000')) AS c4,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.00000')) AS c5,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.000000')) AS c6,
+MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.0000000')) AS c7;
+ci 0
+c1 0
+c2 0
+c3 0
+c4 0
+c5 0
+c6 0
+c7 0
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.0000000'
+SELECT
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS SIGNED) AS ci,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,0)) AS c0,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,1)) AS c1,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,2)) AS c2,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,3)) AS c3,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,4)) AS c4,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,5)) AS c5,
+CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,6)) AS c6;
+ci 20010204235959
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+SELECT
+TIMESTAMP('2001-01-01', '839:59:59') AS ci,
+TIMESTAMP('2001-01-01', '839:59:59')+0 AS c0,
+TIMESTAMP('2001-01-01', '839:59:59')+0.0 AS c1,
+TIMESTAMP('2001-01-01', '839:59:59')+0.00 AS c2,
+TIMESTAMP('2001-01-01', '839:59:59')+0.000 AS c3,
+TIMESTAMP('2001-01-01', '839:59:59')+0.0000 AS c4,
+TIMESTAMP('2001-01-01', '839:59:59')+0.00000 AS c5,
+TIMESTAMP('2001-01-01', '839:59:59')+0.000000 AS c6,
+TIMESTAMP('2001-01-01', '839:59:59')+0.0000000 AS c7;
+ci 2001-02-04 23:59:59
+c0 20010204235959
+c1 20010204235959.0
+c2 20010204235959.00
+c3 20010204235959.000
+c4 20010204235959.0000
+c5 20010204235959.00000
+c6 20010204235959.000000
+c7 20010204235959.0000000
+# Corner cases for TIMESTAMP(timestamp,xxx)
+# HOUR is outside of supported INTERVAL DAYS TO SECONDS range
+# Expect NULL with INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('4294967296:00:00', '178956970 16:00:00');
+INSERT INTO t1 VALUES ('4294967295:59:59', '178956970 15:59:59');
+INSERT INTO t1 VALUES ('4294967294:59:59', '178956970 14:59:59');
+INSERT INTO t1 VALUES ('87649416:00:00', '3652059 00:00:00');
+SELECT TIMESTAMP('0001-01-01 00:00:00', a) AS ta, TIMESTAMP('0001-01-01 00:00:00', b) AS tb FROM t1;
+ta tb
+NULL NULL
+NULL NULL
+NULL NULL
+NULL NULL
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '4294967296:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '178956970 16:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '178956970 15:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '4294967294:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '178956970 14:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '87649416:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '3652059 00:00:00'
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('-4294967296:00:00', '-178956970 16:00:00');
+INSERT INTO t1 VALUES ('-4294967295:59:59', '-178956970 15:59:59');
+INSERT INTO t1 VALUES ('-4294967294:59:59', '-178956970 14:59:59');
+INSERT INTO t1 VALUES ('-87649416:00:00', '-3652059 00:00:00');
+SELECT TIMESTAMP('9999-12-31 23:59:59', a) AS ta, TIMESTAMP('9999-12-31 23:59:59.999999', b) AS tb FROM t1;
+ta tb
+NULL NULL
+NULL NULL
+NULL NULL
+NULL NULL
+Warnings:
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-4294967296:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-178956970 16:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-4294967295:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-178956970 15:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-4294967294:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-178956970 14:59:59'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-87649416:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '-3652059 00:00:00'
+DROP TABLE t1;
+# HOUR is OK
+# Expect max or near-max DATETIME value + no INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('87649415:59:59.999999', '3652058 23:59:59.999999');
+INSERT INTO t1 VALUES ('87649415:59:59', '3652058 23:59:59');
+SELECT TIMESTAMP('0001-01-01 00:00:00', a) AS ta, TIMESTAMP('0001-01-01 00:00:00', b) AS tb FROM t1;
+ta tb
+9999-12-31 23:59:59.999999 9999-12-31 23:59:59.999999
+9999-12-31 23:59:59.000000 9999-12-31 23:59:59.000000
+DROP TABLE t1;
+# HOUR is OK
+# Expect near '0001-01-01 00:00:00' DATETIME value + no INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('-87649415:59:59.999999', '-3652058 23:59:59.999999');
+INSERT INTO t1 VALUES ('-87649415:59:59', '-3652058 23:59:59');
+SELECT TIMESTAMP('9999-12-31 23:59:59', a) AS ta, TIMESTAMP('9999-12-31 23:59:59.999999', b) AS tb FROM t1;
+ta tb
+NULL 0001-01-01 00:00:00.000000
+0001-01-01 00:00:00.000000 0001-01-01 00:00:00.999999
+DROP TABLE t1;
+# HOUR is OK
+# Expect NULL on datetime arithmetic overflow + no INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('-00:00:00.000001', '-0 00:00:00.000001');
+SELECT TIMESTAMP('0001-01-01 00:00:00', a) AS ta, TIMESTAMP('0001-01-01 00:00:00', b) AS tb FROM t1;
+ta tb
+NULL NULL
+DROP TABLE t1;
+# Corner cases for ADDTIME(timestamp,xxx)
+# HOUR is outside of UINT_MAX32 range
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959) AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59') AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+# HOUR UINT_MAX32
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959) AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59') AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+# HOUR is max_useful_hour()+1
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959) AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59') AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+# HOUR is max_useful_hour()
+# Expect NULL (calc_time_diff overflows ) + no INTERVAL warnings
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959) AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+SELECT
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59') AS ci,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0 AS c0,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.0 AS c1,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.00 AS c2,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.000 AS c3,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.0000 AS c4,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.00000 AS c5,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.000000 AS c6,
+ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+# HOUR is max_useful_hour()
+# Expect non-NULL + no warnings
+SELECT
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959) AS ci,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0 AS c0,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.0 AS c1,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.00 AS c2,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.000 AS c3,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.0000 AS c4,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.00000 AS c5,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.000000 AS c6,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.0000000 AS c7;
+ci 9999-12-31 23:59:59
+c0 99991231235959
+c1 99991231235959.0
+c2 99991231235959.00
+c3 99991231235959.000
+c4 99991231235959.0000
+c5 99991231235959.00000
+c6 99991231235959.000000
+c7 99991231235959.0000000
+SELECT
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59') AS ci,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0 AS c0,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.0 AS c1,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.00 AS c2,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.000 AS c3,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.0000 AS c4,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.00000 AS c5,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.000000 AS c6,
+ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.0000000 AS c7;
+ci 9999-12-31 23:59:59
+c0 99991231235959
+c1 99991231235959.0
+c2 99991231235959.00
+c3 99991231235959.000
+c4 99991231235959.0000
+c5 99991231235959.00000
+c6 99991231235959.000000
+c7 99991231235959.0000000
+# Corner cases for ADDTIME(time,xxx)
+# HOUR outside of UINT32 range
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIME'00:00:00', 42949672965959) AS ci,
+ADDTIME(TIME'00:00:00', 42949672965959)+0 AS c0,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.0 AS c1,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.00 AS c2,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.000 AS c3,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', 42949672965959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672965959'
+SELECT
+ADDTIME(TIME'00:00:00', '4294967296:59:59') AS ci,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0 AS c0,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.0 AS c1,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.00 AS c2,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.000 AS c3,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967296:59:59'
+# HOUR is UINT_MAX32 (outside of INTERVAL DAY TO SECOND range)
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIME'00:00:00', 42949672955959) AS ci,
+ADDTIME(TIME'00:00:00', 42949672955959)+0 AS c0,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.0 AS c1,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.00 AS c2,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.000 AS c3,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', 42949672955959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '42949672955959'
+SELECT
+ADDTIME(TIME'00:00:00', '4294967295:59:59') AS ci,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0 AS c0,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.0 AS c1,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.00 AS c2,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.000 AS c3,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', '4294967295;00:00')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '4294967295;00:00'
+# HOUR is max_useful_hour()+1 (outside of INTERVAL DAY TO SECOND range)
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIME'00:00:00', 876494165959) AS ci,
+ADDTIME(TIME'00:00:00', 876494165959)+0 AS c0,
+ADDTIME(TIME'00:00:00', 876494165959)+0.0 AS c1,
+ADDTIME(TIME'00:00:00', 876494165959)+0.00 AS c2,
+ADDTIME(TIME'00:00:00', 876494165959)+0.000 AS c3,
+ADDTIME(TIME'00:00:00', 876494165959)+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', 876494165959)+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', 876494165959)+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', 876494165959)+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+SELECT
+ADDTIME(TIME'00:00:00', '87649416:59:59') AS ci,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0 AS c0,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.0 AS c1,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.00 AS c2,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.000 AS c3,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', '87649416:59:59')+0.0000000 AS c7;
+ci NULL
+c0 NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+c7 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+# HOUR is max_useful_hour()+1 (outside of INTERVAL DAY TO SECOND range)
+# Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ADDTIME(TIME'-838:59:59', 876494165959) AS ci,
+ADDTIME(TIME'-838:59:59.9', 876494165959) AS c1,
+ADDTIME(TIME'-838:59:59.99', 876494165959) AS c2,
+ADDTIME(TIME'-838:59:59.999', 876494165959) AS c3,
+ADDTIME(TIME'-838:59:59.9999', 876494165959) AS c4,
+ADDTIME(TIME'-838:59:59.99999', 876494165959) AS c5,
+ADDTIME(TIME'-838:59:59.999999', 876494165959) AS c6;
+ci NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '876494165959'
+SELECT
+ADDTIME(TIME'-838:59:59', '87649416:59:59') AS ci,
+ADDTIME(TIME'-838:59:59.9', '87649416:59:59') AS c1,
+ADDTIME(TIME'-838:59:59.99', '87649416:59:59') AS c2,
+ADDTIME(TIME'-838:59:59.999', '87649416:59:59') AS c3,
+ADDTIME(TIME'-838:59:59.9999', '87649416:59:59') AS c4,
+ADDTIME(TIME'-838:59:59.99999', '87649416:59:59') AS c5,
+ADDTIME(TIME'-838:59:59.999999', '87649416:59:59') AS c6;
+ci NULL
+c1 NULL
+c2 NULL
+c3 NULL
+c4 NULL
+c5 NULL
+c6 NULL
+Warnings:
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+Level Warning
+Code 1292
+Message Incorrect INTERVAL DAY TO SECOND value: '87649416:59:59'
+SELECT
+ADDTIME(TIME'-838:59:59.9999999', '87649416:59:59') AS c7;
+c7 NULL
+# HOUR is max_useful_hour() (inside INTERVAL DAY TO SECOND range)
+# Expect max TIME(0) + zero fraction + TIME warnings + no INTEVAL warnings
+SELECT
+ADDTIME(TIME'00:00:00', 876494155959) AS ci,
+ADDTIME(TIME'00:00:00', 876494155959)+0 AS c0,
+ADDTIME(TIME'00:00:00', 876494155959)+0.0 AS c1,
+ADDTIME(TIME'00:00:00', 876494155959)+0.00 AS c2,
+ADDTIME(TIME'00:00:00', 876494155959)+0.000 AS c3,
+ADDTIME(TIME'00:00:00', 876494155959)+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', 876494155959)+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', 876494155959)+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', 876494155959)+0.0000000 AS c7;
+ci 838:59:59
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+SELECT
+ADDTIME(TIME'00:00:00', '87649415:59:59') AS ci,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0 AS c0,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.0 AS c1,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.00 AS c2,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.000 AS c3,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.0000 AS c4,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.00000 AS c5,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.000000 AS c6,
+ADDTIME(TIME'00:00:00', '87649415:59:59')+0.0000000 AS c7;
+ci 838:59:59
+c0 8385959
+c1 8385959.0
+c2 8385959.00
+c3 8385959.000
+c4 8385959.0000
+c5 8385959.00000
+c6 8385959.000000
+c7 8385959.0000000
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87649415:59:59'
+# HOUR is max_useful_hour() (inside INTERVAL DAY TO SECOND range)
+# Expect max TIME(N) + TIME warnings + no INTERVAL warnings
+SELECT
+ADDTIME(TIME'-838:59:59', 876494155959) AS ci,
+ADDTIME(TIME'-838:59:59.9', 876494155959) AS c1,
+ADDTIME(TIME'-838:59:59.99', 876494155959) AS c2,
+ADDTIME(TIME'-838:59:59.999', 876494155959) AS c3,
+ADDTIME(TIME'-838:59:59.9999', 876494155959) AS c4,
+ADDTIME(TIME'-838:59:59.99999', 876494155959) AS c5,
+ADDTIME(TIME'-838:59:59.999999', 876494155959) AS c6;
+ci 838:59:59
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648577:00:00'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.100000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.010000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.001000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.000100'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.000010'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.000001'
+SELECT
+ADDTIME(TIME'-838:59:59', '87649415:59:59') AS ci,
+ADDTIME(TIME'-838:59:59.9', '87649415:59:59') AS c1,
+ADDTIME(TIME'-838:59:59.99', '87649415:59:59') AS c2,
+ADDTIME(TIME'-838:59:59.999', '87649415:59:59') AS c3,
+ADDTIME(TIME'-838:59:59.9999', '87649415:59:59') AS c4,
+ADDTIME(TIME'-838:59:59.99999', '87649415:59:59') AS c5,
+ADDTIME(TIME'-838:59:59.999999', '87649415:59:59') AS c6;
+ci 838:59:59
+c1 838:59:59.9
+c2 838:59:59.99
+c3 838:59:59.999
+c4 838:59:59.9999
+c5 838:59:59.99999
+c6 838:59:59.999999
+Warnings:
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648577:00:00'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.100000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.010000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.001000'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.000100'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.000010'
+Level Warning
+Code 1292
+Message Truncated incorrect time value: '87648576:59:59.000001'
+SELECT
+ADDTIME(TIME'-838:59:59.9999999', '87649415:59:59') AS c7;
+c7 838:59:59.999999
+#
+# MDEV-17400 The result of TIME('42949672965959-01') depends on architecture
+#
+SELECT TIME('42949672955959-01'), TIME('42949672965959-01');
+TIME('42949672955959-01') TIME('42949672965959-01')
+NULL NULL
+Warnings:
+Warning 1292 Incorrect time value: '42949672955959-01'
+Warning 1292 Incorrect time value: '42949672965959-01'
+SELECT TIME('18446744073709551615-01'), TIME('18446744073709551616-01');
+TIME('18446744073709551615-01') TIME('18446744073709551616-01')
+NULL NULL
+Warnings:
+Warning 1292 Incorrect time value: '18446744073709551615-01'
+Warning 1292 Incorrect time value: '18446744073709551616-01'
+#
+# MDEV-17434 EXTRACT(DAY FROM negative_time) returns wrong result
+#
+CREATE TABLE t1 (a TIME(6));
+INSERT INTO t1 VALUES ('-24:10:10.10');
+SELECT
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a),
+EXTRACT(DAY FROM a),
+EXTRACT(DAY_HOUR FROM a),
+EXTRACT(DAY_MINUTE FROM a),
+EXTRACT(DAY_SECOND FROM a),
+EXTRACT(DAY_MICROSECOND FROM a)
+FROM t1;
+EXTRACT(MINUTE FROM a) EXTRACT(SECOND FROM a) EXTRACT(MICROSECOND FROM a) EXTRACT(DAY FROM a) EXTRACT(DAY_HOUR FROM a) EXTRACT(DAY_MINUTE FROM a) EXTRACT(DAY_SECOND FROM a) EXTRACT(DAY_MICROSECOND FROM a)
+-10 -10 -100000 -1 -100 -10010 -1001010 -1001010100000
+CREATE TABLE t2 AS
+SELECT
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a),
+EXTRACT(DAY FROM a),
+EXTRACT(DAY_HOUR FROM a),
+EXTRACT(DAY_MINUTE FROM a),
+EXTRACT(DAY_SECOND FROM a),
+EXTRACT(DAY_MICROSECOND FROM a)
+FROM t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `EXTRACT(MINUTE FROM a)` int(3) DEFAULT NULL,
+ `EXTRACT(SECOND FROM a)` int(3) DEFAULT NULL,
+ `EXTRACT(MICROSECOND FROM a)` int(7) DEFAULT NULL,
+ `EXTRACT(DAY FROM a)` int(3) DEFAULT NULL,
+ `EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL,
+ `EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL,
+ `EXTRACT(DAY_SECOND FROM a)` int(9) DEFAULT NULL,
+ `EXTRACT(DAY_MICROSECOND FROM a)` bigint(15) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t2;
+DROP TABLE t1;
+#
+# MDEV-17478 Wrong result for TIME('+100:20:30')
+#
+SELECT TIME('+100:20:30');
+TIME('+100:20:30')
+100:20:30
+#
+# MDEV-17477 Wrong result for TIME('-2001-01-01 10:20:30')
+#
+SELECT TIME('-2001-01-01 10:20:30');
+TIME('-2001-01-01 10:20:30')
+NULL
+Warnings:
+Warning 1292 Incorrect time value: '-2001-01-01 10:20:30'
+SELECT TIME('2001-01-01') AS c1, TIME('2001-01-01 ') AS c2;
+c1 c2
+00:20:01 00:20:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '2001-01-01'
+Warning 1292 Truncated incorrect time value: '2001-01-01 '
+SELECT TIME('0001:01:01 '), TIME('0001:01:01 ');
+TIME('0001:01:01 ') TIME('0001:01:01 ')
+01:01:01 01:01:01
+SELECT TIME('1 2'), TIME('1 2 ');
+TIME('1 2') TIME('1 2 ')
+00:00:01 00:00:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '1 2'
+Warning 1292 Truncated incorrect time value: '1 2 '
+SELECT TIME('2001-01-01T'), TIME('2001-01-01T ');
+TIME('2001-01-01T') TIME('2001-01-01T ')
+00:00:00 00:00:00
+SELECT TIME('901-01-01T1'), TIME('901-01-01T10');
+TIME('901-01-01T1') TIME('901-01-01T10')
+01:00:00 10:00:00
+SELECT TIME('091-01-01T1'), TIME('091-01-01T10');
+TIME('091-01-01T1') TIME('091-01-01T10')
+01:00:00 10:00:00
+SELECT TIME('0001:01:01x'), TIME('0001:01:01xx');
+TIME('0001:01:01x') TIME('0001:01:01xx')
+01:01:01 01:01:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '0001:01:01x'
+Warning 1292 Truncated incorrect time value: '0001:01:01xx'
+SELECT TIME('0001:01:01.'), TIME('0001:01:01..');
+TIME('0001:01:01.') TIME('0001:01:01..')
+01:01:01 01:01:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '0001:01:01..'
+SELECT TIME('0001:01:01-'), TIME('0001:01:01--');
+TIME('0001:01:01-') TIME('0001:01:01--')
+01:01:01 01:01:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '0001:01:01-'
+Warning 1292 Truncated incorrect time value: '0001:01:01--'
+SELECT TIME('0001:01:01-'), TIME('0001:01:01--');
+TIME('0001:01:01-') TIME('0001:01:01--')
+01:01:01 01:01:01
+Warnings:
+Warning 1292 Truncated incorrect time value: '0001:01:01-'
+Warning 1292 Truncated incorrect time value: '0001:01:01--'
+SELECT TIME('-xxx'), TIME('-xxxxxxxxxxxxxxxxxxxx');
+TIME('-xxx') TIME('-xxxxxxxxxxxxxxxxxxxx')
+NULL NULL
+Warnings:
+Warning 1292 Incorrect time value: '-xxx'
+Warning 1292 Incorrect time value: '-xxxxxxxxxxxxxxxxxxxx'
+SELECT TIME('- '), TIME('- ');
+TIME('- ') TIME('- ')
+NULL NULL
+Warnings:
+Warning 1292 Incorrect time value: '- '
+Warning 1292 Incorrect time value: '- '
+SELECT TIME('-'), TIME('-');
+TIME('-') TIME('-')
+NULL NULL
+Warnings:
+Warning 1292 Incorrect time value: '-'
+Warning 1292 Incorrect time value: '-'
+SELECT TIME('1-1-1 1:1:1'), TIME('1-1-1 1:1:1.0');
+TIME('1-1-1 1:1:1') TIME('1-1-1 1:1:1.0')
+01:01:01 01:01:01.0
+SELECT TIME('1-1-1 1:2:3'), TIME('1-1-1 1:2:3.0');
+TIME('1-1-1 1:2:3') TIME('1-1-1 1:2:3.0')
+01:02:03 01:02:03.0
+SELECT
+CAST('20050326112233 garbage' as datetime),
+CAST('20050326 garbage' as date),
+CAST('50326 garbage' as time);
+CAST('20050326112233 garbage' as datetime) CAST('20050326 garbage' as date) CAST('50326 garbage' as time)
+2005-03-26 11:22:33 2005-03-26 05:03:26
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '20050326112233 garbage'
+Warning 1292 Truncated incorrect date value: '20050326 garbage'
+Warning 1292 Truncated incorrect time value: '50326 garbage'
+SELECT TIME('- 01:00:00'), TIME('- 1 01:00:00');
+TIME('- 01:00:00') TIME('- 1 01:00:00')
+-01:00:00 -25:00:00
+#
+# MDEV-17854 Assertion `decimals <= 6' failed in my_time_fraction_remainder on SELECT with NULLIF and FROM_UNIXTIME on incorrect time
+#
+SET time_zone='+00:00';
+SELECT NULLIF(FROM_UNIXTIME('foo'), '2012-12-12 21:10:14');
+NULLIF(FROM_UNIXTIME('foo'), '2012-12-12 21:10:14')
+1970-01-01 00:00:00
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: 'foo'
+Warning 1292 Truncated incorrect DECIMAL value: 'foo'
+SET time_zone=DEFAULT;
+#
+# MDEV-18402 Assertion `sec.sec() <= 59' failed in Item_func_maketime::get_date
+#
+SELECT MAKETIME('01', '01', LEAST( -100, NULL ));
+MAKETIME('01', '01', LEAST( -100, NULL ))
+NULL
+SELECT CONCAT(MAKETIME('01', '01', LEAST( -100, NULL )));
+CONCAT(MAKETIME('01', '01', LEAST( -100, NULL )))
+NULL
diff --git a/mysql-test/main/func_time.test b/mysql-test/main/func_time.test
index 232cb16939e..5760553e0dd 100644
--- a/mysql-test/main/func_time.test
+++ b/mysql-test/main/func_time.test
@@ -564,12 +564,15 @@ SET NAMES DEFAULT;
# Bug #31160: MAKETIME() crashes server when returning NULL in ORDER BY using
# filesort
#
+SET TIMESTAMP=UNIX_TIMESTAMP('2001-01-01 11:22:33');
CREATE TABLE t1 (
a TIMESTAMP
);
INSERT INTO t1 VALUES (now()), (now());
SELECT 1 FROM t1 ORDER BY MAKETIME(1, 1, a);
DROP TABLE t1;
+SET TIMESTAMP=DEFAULT;
+
#
# Bug #19844 time_format in Union truncates values
#
@@ -2274,3 +2277,896 @@ SELECT
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # MDEV-14032 SEC_TO_TIME executes side effect two times
+--echo #
+
+SET @a=10000000;
+SELECT SEC_TO_TIME(@a:=@a+1);
+SELECT @a;
+
+CREATE TABLE t1 (a TEXT);
+DELIMITER $$;
+CREATE FUNCTION f1() RETURNS INT
+BEGIN
+ INSERT INTO t1 VALUES ('f1 was called');
+ RETURN 10000000;
+END;
+$$
+DELIMITER ;$$
+SELECT SEC_TO_TIME(f1());
+SELECT * FROM t1;
+DROP TABLE t1;
+DROP FUNCTION f1;
+
+--echo #
+--echo # MDEV-17351 MICROSECOND(XXX(int_number_out_of_range)) erroneously returns 999999
+--echo #
+
+--echo # Reject anything that's parsed as DATETIME or DATE
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2001-01-01 10:20:30'),
+('01-01-01 10:20:30'),
+('2001-01-01 '),
+('20010101102030'),
+('010101102030');
+SELECT ADDTIME(DATE'2001-01-01',a), a FROM t1;
+DROP TABLE t1;
+
+
+--vertical_results
+
+--echo # GREATEST(decimal, time)
+
+SELECT
+ GREATEST(8395959, TIME'00:00:00') AS c0,
+ GREATEST(8395959.0, TIME'00:00:00') AS c1,
+ GREATEST(8395959.00, TIME'00:00:00') AS c2,
+ GREATEST(8395959.000, TIME'00:00:00') AS c3,
+ GREATEST(8395959.0000, TIME'00:00:00') AS c4,
+ GREATEST(8395959.00000, TIME'00:00:00') AS c5,
+ GREATEST(8395959.000000, TIME'00:00:00') AS c6,
+ GREATEST(8395959.0000000, TIME'00:00:00') AS c7;
+
+SELECT
+ MICROSECOND(GREATEST(8395959, TIME'00:00:00')) AS c0,
+ MICROSECOND(GREATEST(8395959.0, TIME'00:00:00')) AS c1,
+ MICROSECOND(GREATEST(8395959.00, TIME'00:00:00')) AS c2,
+ MICROSECOND(GREATEST(8395959.000, TIME'00:00:00')) AS c3,
+ MICROSECOND(GREATEST(8395959.0000, TIME'00:00:00')) AS c4,
+ MICROSECOND(GREATEST(8395959.00000, TIME'00:00:00')) AS c5,
+ MICROSECOND(GREATEST(8395959.000000, TIME'00:00:00')) AS c6,
+ MICROSECOND(GREATEST(8395959.0000000, TIME'00:00:00')) AS c7;
+
+SELECT
+ CAST(GREATEST(8395959, TIME'00:00:00') AS SIGNED) AS ci,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,0)) AS c0,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,1)) AS c1,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,2)) AS c2,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,3)) AS c3,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,4)) AS c4,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,5)) AS c5,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,6)) AS c6,
+ CAST(GREATEST(8395959, TIME'00:00:00') AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ GREATEST(8395959, TIME'00:00:00') AS ci,
+ GREATEST(8395959, TIME'00:00:00')+0 AS c0,
+ GREATEST(8395959, TIME'00:00:00')+0.0 AS c1,
+ GREATEST(8395959, TIME'00:00:00')+0.00 AS c2,
+ GREATEST(8395959, TIME'00:00:00')+0.000 AS c3,
+ GREATEST(8395959, TIME'00:00:00')+0.0000 AS c4,
+ GREATEST(8395959, TIME'00:00:00')+0.00000 AS c5,
+ GREATEST(8395959, TIME'00:00:00')+0.000000 AS c6,
+ GREATEST(8395959, TIME'00:00:00')+0.0000000 AS c7;
+
+
+
+--echo # GREATEST(string, time)
+
+SELECT
+ GREATEST('839:59:59', TIME'00:00:00') AS ci,
+ GREATEST('839:59:59.0', TIME'00:00:00') AS c1,
+ GREATEST('839:59:59.00', TIME'00:00:00') AS c2,
+ GREATEST('839:59:59.000', TIME'00:00:00') AS c3,
+ GREATEST('839:59:59.0000', TIME'00:00:00') AS c4,
+ GREATEST('839:59:59.00000', TIME'00:00:00') AS c5,
+ GREATEST('839:59:59.000000', TIME'00:00:00') AS c6,
+ GREATEST('839:59:59.0000000', TIME'00:00:00') AS c7;
+
+SELECT
+ MICROSECOND(GREATEST('839:59:59', TIME'00:00:00')) AS ci,
+ MICROSECOND(GREATEST('839:59:59.0', TIME'00:00:00')) AS c1,
+ MICROSECOND(GREATEST('839:59:59.00', TIME'00:00:00')) AS c2,
+ MICROSECOND(GREATEST('839:59:59.000', TIME'00:00:00')) AS c3,
+ MICROSECOND(GREATEST('839:59:59.0000', TIME'00:00:00')) AS c4,
+ MICROSECOND(GREATEST('839:59:59.00000', TIME'00:00:00')) AS c5,
+ MICROSECOND(GREATEST('839:59:59.000000', TIME'00:00:00')) AS c6,
+ MICROSECOND(GREATEST('839:59:59.0000000', TIME'00:00:00')) AS c7;
+
+SELECT
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS SIGNED) AS ci,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,0)) AS c0,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,1)) AS c1,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,2)) AS c2,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,3)) AS c3,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,4)) AS c4,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,5)) AS c5,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,6)) AS c6,
+ CAST(GREATEST('839:59:59', TIME'00:00:00') AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ GREATEST('839:59:59', TIME'00:00:00') AS ci,
+ GREATEST('839:59:59', TIME'00:00:00')+0 AS c0,
+ GREATEST('839:59:59', TIME'00:00:00')+0.0 AS c1,
+ GREATEST('839:59:59', TIME'00:00:00')+0.00 AS c2,
+ GREATEST('839:59:59', TIME'00:00:00')+0.000 AS c3,
+ GREATEST('839:59:59', TIME'00:00:00')+0.0000 AS c4,
+ GREATEST('839:59:59', TIME'00:00:00')+0.00000 AS c5,
+ GREATEST('839:59:59', TIME'00:00:00')+0.000000 AS c6,
+ GREATEST('839:59:59', TIME'00:00:00')+0.0000000 AS c7;
+
+
+--echo # ADDTIME(datetime, decimal)
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0) AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00) AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000) AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000) AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00000) AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000000) AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000000) AS c7;
+
+SELECT
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)) AS c0,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0)) AS c1,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00)) AS c2,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000)) AS c3,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000)) AS c4,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.00000)) AS c5,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.000000)) AS c6,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959.0000000)) AS c7;
+
+SELECT
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS SIGNED) AS ci,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,0)) AS c0,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,1)) AS c1,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,2)) AS c2,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,3)) AS c3,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,4)) AS c4,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,5)) AS c5,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,6)) AS c6,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959) AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 8395959)+0.0000000 AS c7;
+
+--echo # ADDTIME(datetime, string)
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0') AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00') AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000') AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000') AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00000') AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000000') AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000000') AS c7;
+
+SELECT
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')) AS c0,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0')) AS c1,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00')) AS c2,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000')) AS c3,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000')) AS c4,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.00000')) AS c5,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.000000')) AS c6,
+ MICROSECOND(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59.0000000')) AS c7;
+
+SELECT
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS SIGNED) AS ci,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,0)) AS c0,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,1)) AS c1,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,2)) AS c2,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,3)) AS c3,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,4)) AS c4,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,5)) AS c5,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,6)) AS c6,
+ CAST(ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59') AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '839:59:59')+0.0000000 AS c7;
+
+--echo # ADDTIME(time, decimal)
+
+SELECT
+ ADDTIME(TIME'00:00:00', 8395959) AS c0,
+ ADDTIME(TIME'00:00:00', 8395959.0) AS c1,
+ ADDTIME(TIME'00:00:00', 8395959.00) AS c2,
+ ADDTIME(TIME'00:00:00', 8395959.000) AS c3,
+ ADDTIME(TIME'00:00:00', 8395959.0000) AS c4,
+ ADDTIME(TIME'00:00:00', 8395959.00000) AS c5,
+ ADDTIME(TIME'00:00:00', 8395959.000000) AS c6,
+ ADDTIME(TIME'00:00:00', 8395959.0000000) AS c7;
+
+SELECT
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959)) AS c0,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.0)) AS c1,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.00)) AS c2,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.000)) AS c3,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.0000)) AS c4,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.00000)) AS c5,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.000000)) AS c6,
+ MICROSECOND(ADDTIME(TIME'00:00:00', 8395959.0000000)) AS c7;
+
+SELECT
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS SIGNED) AS ci,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,0)) AS c0,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,1)) AS c1,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,2)) AS c2,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,3)) AS c3,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,4)) AS c4,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,5)) AS c5,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,6)) AS c6,
+ CAST(ADDTIME(TIME'00:00:00', 8395959) AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ ADDTIME(TIME'00:00:00', 8395959) AS ci,
+ ADDTIME(TIME'00:00:00', 8395959)+0 AS c0,
+ ADDTIME(TIME'00:00:00', 8395959)+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', 8395959)+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', 8395959)+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', 8395959)+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', 8395959)+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', 8395959)+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', 8395959)+0.0000000 AS c7;
+
+--echo # ADDTIME(time,string)
+
+SELECT
+ ADDTIME(TIME'00:00:00', '839:59:59') AS c0,
+ ADDTIME(TIME'00:00:00', '839:59:59.0') AS c1,
+ ADDTIME(TIME'00:00:00', '839:59:59.00') AS c2,
+ ADDTIME(TIME'00:00:00', '839:59:59.000') AS c3,
+ ADDTIME(TIME'00:00:00', '839:59:59.0000') AS c4,
+ ADDTIME(TIME'00:00:00', '839:59:59.00000') AS c5,
+ ADDTIME(TIME'00:00:00', '839:59:59.000000') AS c6,
+ ADDTIME(TIME'00:00:00', '839:59:59.0000000') AS c7;
+
+SELECT
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59')) AS c0,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.0')) AS c1,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.00')) AS c2,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.000')) AS c3,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.0000')) AS c4,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.00000')) AS c5,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.000000')) AS c6,
+ MICROSECOND(ADDTIME(TIME'00:00:00', '839:59:59.0000000')) AS c7;
+
+SELECT
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS SIGNED) AS ci,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,0)) AS c0,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,1)) AS c1,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,2)) AS c2,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,3)) AS c3,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,4)) AS c4,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,5)) AS c5,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,6)) AS c6,
+ CAST(ADDTIME(TIME'00:00:00', '839:59:59') AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ ADDTIME(TIME'00:00:00', '839:59:59') AS ci,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0 AS c0,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', '839:59:59')+0.0000000 AS c7;
+
+--echo # ADDTIME(int,int)
+SELECT
+ ADDTIME(0, 8395959) AS c,
+ MICROSECOND(ADDTIME(0, 8395959)) AS cm,
+ CAST(ADDTIME(0, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+ CAST(ADDTIME(0, 8395959) AS DECIMAL(30,0)) AS cd300;
+
+SELECT
+ ADDTIME(20010101000000, 8395959) AS c,
+ MICROSECOND(ADDTIME(20010101000000, 8395959)) AS cm,
+ CAST(ADDTIME(20010101000000, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+ CAST(ADDTIME(20010101000000, 8395959) AS DECIMAL(30,0)) AS cd300;
+
+--echo # ADDTIME(decimal,int)
+--echo # 8385960 in cd300 is correct: addtime returns '838:59:59.9'
+--echo # which is further *rounded* to a decimals(30,0)
+SELECT
+ ADDTIME(0.0, 8395959) AS c,
+ MICROSECOND(ADDTIME(0.0, 8395959)) AS cm,
+ CAST(ADDTIME(0.0, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,0)) AS cd300;
+
+SELECT
+ ADDTIME(20010101000000.0, 8395959) AS c,
+ MICROSECOND(ADDTIME(20010101000000.0, 8395959)) AS cm,
+ CAST(ADDTIME(20010101000000.0, 8395959) AS SIGNED) AS cs_fixme_mdev_17384,
+ CAST(ADDTIME(20010101000000.0, 8395959) AS DECIMAL(30,0)) AS cd300;
+
+
+--echo # ADDTIME(decimal,decimal)
+
+SELECT
+ ADDTIME(0.0, 8395959.0) AS c1,
+ ADDTIME(0.0, 8395959.00) AS c2,
+ ADDTIME(0.0, 8395959.000) AS c3,
+ ADDTIME(0.0, 8395959.0000) AS c4,
+ ADDTIME(0.0, 8395959.00000) AS c5,
+ ADDTIME(0.0, 8395959.000000) AS c6,
+ ADDTIME(0.0, 8395959.0000000) AS c7;
+
+SELECT
+ MICROSECOND(ADDTIME(0.0, 8395959.0)) AS c1,
+ MICROSECOND(ADDTIME(0.0, 8395959.00)) AS c2,
+ MICROSECOND(ADDTIME(0.0, 8395959.000)) AS c3,
+ MICROSECOND(ADDTIME(0.0, 8395959.0000)) AS c4,
+ MICROSECOND(ADDTIME(0.0, 8395959.00000)) AS c5,
+ MICROSECOND(ADDTIME(0.0, 8395959.000000)) AS c6,
+ MICROSECOND(ADDTIME(0.0, 8395959.0000000)) AS c7;
+
+--echo # 8385960 in c1 is correct: addtime returns '838:59:59.9'
+--echo # which is further *rounded* to a decimals(30,0)
+SELECT
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,0)) AS c0,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,1)) AS c1,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,2)) AS c2,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,3)) AS c3,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,4)) AS c4,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,5)) AS c5,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,6)) AS c6,
+ CAST(ADDTIME(0.0, 8395959) AS DECIMAL(30,7)) AS c7;
+
+SELECT
+ ADDTIME(0.0, 8395959)+0 AS c0,
+ ADDTIME(0.0, 8395959)+0.0 AS c1,
+ ADDTIME(0.0, 8395959)+0.00 AS c2,
+ ADDTIME(0.0, 8395959)+0.000 AS c3,
+ ADDTIME(0.0, 8395959)+0.0000 AS c4,
+ ADDTIME(0.0, 8395959)+0.00000 AS c5,
+ ADDTIME(0.0, 8395959)+0.000000 AS c6,
+ ADDTIME(0.0, 8395959)+0.0000000 AS c7;
+
+
+--echo # TIMESTAMP(string,decimal)
+
+SELECT
+ TIMESTAMP('2001-01-01', 8395959) AS ci,
+ TIMESTAMP('2001-01-01', 8395959.0) AS c1,
+ TIMESTAMP('2001-01-01', 8395959.00) AS c2,
+ TIMESTAMP('2001-01-01', 8395959.000) AS c3,
+ TIMESTAMP('2001-01-01', 8395959.0000) AS c4,
+ TIMESTAMP('2001-01-01', 8395959.00000) AS c5,
+ TIMESTAMP('2001-01-01', 8395959.000000) AS c6,
+ TIMESTAMP('2001-01-01', 8395959.0000000) AS c7;
+
+SELECT
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959)) AS ci,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.0)) AS c1,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.00)) AS c2,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.000)) AS c3,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.0000)) AS c4,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.00000)) AS c5,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.000000)) AS c6,
+ MICROSECOND(TIMESTAMP('2001-01-01', 8395959.0000000)) AS c7;
+
+SELECT
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS SIGNED) AS ci,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,0)) AS c0,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,1)) AS c1,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,2)) AS c2,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,3)) AS c3,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,4)) AS c4,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,5)) AS c5,
+ CAST(TIMESTAMP('2001-01-01', 8395959) AS DECIMAL(30,6)) AS c6;
+
+SELECT
+ TIMESTAMP('2001-01-01', 8395959) AS ci,
+ TIMESTAMP('2001-01-01', 8395959)+0 AS c0,
+ TIMESTAMP('2001-01-01', 8395959)+0.0 AS c1,
+ TIMESTAMP('2001-01-01', 8395959)+0.00 AS c2,
+ TIMESTAMP('2001-01-01', 8395959)+0.000 AS c3,
+ TIMESTAMP('2001-01-01', 8395959)+0.0000 AS c4,
+ TIMESTAMP('2001-01-01', 8395959)+0.00000 AS c5,
+ TIMESTAMP('2001-01-01', 8395959)+0.000000 AS c6,
+ TIMESTAMP('2001-01-01', 8395959)+0.0000000 AS c7;
+
+--echo # TIMESTAMP(string,string)
+
+SELECT
+ TIMESTAMP('2001-01-01', '839:59:59') AS ci,
+ TIMESTAMP('2001-01-01', '839:59:59.0') AS c1,
+ TIMESTAMP('2001-01-01', '839:59:59.00') AS c2,
+ TIMESTAMP('2001-01-01', '839:59:59.000') AS c3,
+ TIMESTAMP('2001-01-01', '839:59:59.0000') AS c4,
+ TIMESTAMP('2001-01-01', '839:59:59.00000') AS c5,
+ TIMESTAMP('2001-01-01', '839:59:59.000000') AS c6,
+ TIMESTAMP('2001-01-01', '839:59:59.0000000') AS c7;
+
+SELECT
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59')) AS ci,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.0')) AS c1,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.00')) AS c2,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.000')) AS c3,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.0000')) AS c4,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.00000')) AS c5,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.000000')) AS c6,
+ MICROSECOND(TIMESTAMP('2001-01-01', '839:59:59.0000000')) AS c7;
+
+SELECT
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS SIGNED) AS ci,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,0)) AS c0,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,1)) AS c1,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,2)) AS c2,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,3)) AS c3,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,4)) AS c4,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,5)) AS c5,
+ CAST(TIMESTAMP('2001-01-01', '839:59:59') AS DECIMAL(30,6)) AS c6;
+
+SELECT
+ TIMESTAMP('2001-01-01', '839:59:59') AS ci,
+ TIMESTAMP('2001-01-01', '839:59:59')+0 AS c0,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.0 AS c1,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.00 AS c2,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.000 AS c3,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.0000 AS c4,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.00000 AS c5,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.000000 AS c6,
+ TIMESTAMP('2001-01-01', '839:59:59')+0.0000000 AS c7;
+
+--horizontal_results
+
+--echo # Corner cases for TIMESTAMP(timestamp,xxx)
+
+--echo # HOUR is outside of supported INTERVAL DAYS TO SECONDS range
+--echo # Expect NULL with INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('4294967296:00:00', '178956970 16:00:00');
+INSERT INTO t1 VALUES ('4294967295:59:59', '178956970 15:59:59');
+INSERT INTO t1 VALUES ('4294967294:59:59', '178956970 14:59:59');
+INSERT INTO t1 VALUES ('87649416:00:00', '3652059 00:00:00');
+SELECT TIMESTAMP('0001-01-01 00:00:00', a) AS ta, TIMESTAMP('0001-01-01 00:00:00', b) AS tb FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('-4294967296:00:00', '-178956970 16:00:00');
+INSERT INTO t1 VALUES ('-4294967295:59:59', '-178956970 15:59:59');
+INSERT INTO t1 VALUES ('-4294967294:59:59', '-178956970 14:59:59');
+INSERT INTO t1 VALUES ('-87649416:00:00', '-3652059 00:00:00');
+SELECT TIMESTAMP('9999-12-31 23:59:59', a) AS ta, TIMESTAMP('9999-12-31 23:59:59.999999', b) AS tb FROM t1;
+DROP TABLE t1;
+
+--echo # HOUR is OK
+--echo # Expect max or near-max DATETIME value + no INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('87649415:59:59.999999', '3652058 23:59:59.999999');
+INSERT INTO t1 VALUES ('87649415:59:59', '3652058 23:59:59');
+SELECT TIMESTAMP('0001-01-01 00:00:00', a) AS ta, TIMESTAMP('0001-01-01 00:00:00', b) AS tb FROM t1;
+DROP TABLE t1;
+
+--echo # HOUR is OK
+--echo # Expect near '0001-01-01 00:00:00' DATETIME value + no INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('-87649415:59:59.999999', '-3652058 23:59:59.999999');
+INSERT INTO t1 VALUES ('-87649415:59:59', '-3652058 23:59:59');
+SELECT TIMESTAMP('9999-12-31 23:59:59', a) AS ta, TIMESTAMP('9999-12-31 23:59:59.999999', b) AS tb FROM t1;
+DROP TABLE t1;
+
+--echo # HOUR is OK
+--echo # Expect NULL on datetime arithmetic overflow + no INTERVAL warnings
+CREATE TABLE t1 (a VARCHAR(64), b VARCHAR(64));
+INSERT INTO t1 VALUES ('-00:00:00.000001', '-0 00:00:00.000001');
+SELECT TIMESTAMP('0001-01-01 00:00:00', a) AS ta, TIMESTAMP('0001-01-01 00:00:00', b) AS tb FROM t1;
+DROP TABLE t1;
+
+
+--echo # Corner cases for ADDTIME(timestamp,xxx)
+--vertical_results
+
+--echo # HOUR is outside of UINT_MAX32 range
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959) AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672965959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59') AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967296:59:59')+0.0000000 AS c7;
+
+## TODO: add '0001-01-01 00:00:00'
+
+--echo # HOUR UINT_MAX32
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959) AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 42949672955959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59') AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '4294967295:59:59')+0.0000000 AS c7;
+
+--echo # HOUR is max_useful_hour()+1
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959) AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494165959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59') AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649416:59:59')+0.0000000 AS c7;
+
+--echo # HOUR is max_useful_hour()
+--echo # Expect NULL (calc_time_diff overflows ) + no INTERVAL warnings
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959) AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', 876494155959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59') AS ci,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0 AS c0,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.0 AS c1,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.00 AS c2,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.000 AS c3,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'2001-01-01 00:00:00', '87649415:59:59')+0.0000000 AS c7;
+
+--echo # HOUR is max_useful_hour()
+--echo # Expect non-NULL + no warnings
+SELECT
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959) AS ci,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0 AS c0,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.0 AS c1,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.00 AS c2,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.000 AS c3,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', 876494155959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59') AS ci,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0 AS c0,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.0 AS c1,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.00 AS c2,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.000 AS c3,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.0000 AS c4,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.00000 AS c5,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.000000 AS c6,
+ ADDTIME(TIMESTAMP'0001-01-01 00:00:00', '87649415:59:59')+0.0000000 AS c7;
+--horizontal_results
+
+
+--echo # Corner cases for ADDTIME(time,xxx)
+--vertical_results
+
+--echo # HOUR outside of UINT32 range
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ ADDTIME(TIME'00:00:00', 42949672965959) AS ci,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0 AS c0,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', 42949672965959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIME'00:00:00', '4294967296:59:59') AS ci,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0 AS c0,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', '4294967296:59:59')+0.0000000 AS c7;
+
+--echo # HOUR is UINT_MAX32 (outside of INTERVAL DAY TO SECOND range)
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+SELECT
+ ADDTIME(TIME'00:00:00', 42949672955959) AS ci,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0 AS c0,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', 42949672955959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIME'00:00:00', '4294967295:59:59') AS ci,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0 AS c0,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', '4294967295:59:59')+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', '4294967295;00:00')+0.0000000 AS c7;
+
+--echo # HOUR is max_useful_hour()+1 (outside of INTERVAL DAY TO SECOND range)
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+
+SELECT
+ ADDTIME(TIME'00:00:00', 876494165959) AS ci,
+ ADDTIME(TIME'00:00:00', 876494165959)+0 AS c0,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', 876494165959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIME'00:00:00', '87649416:59:59') AS ci,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0 AS c0,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', '87649416:59:59')+0.0000000 AS c7;
+
+--echo # HOUR is max_useful_hour()+1 (outside of INTERVAL DAY TO SECOND range)
+--echo # Expect NULL + "Incorrect INTERVAL DAY TO SECOND value"
+
+SELECT
+ ADDTIME(TIME'-838:59:59', 876494165959) AS ci,
+ ADDTIME(TIME'-838:59:59.9', 876494165959) AS c1,
+ ADDTIME(TIME'-838:59:59.99', 876494165959) AS c2,
+ ADDTIME(TIME'-838:59:59.999', 876494165959) AS c3,
+ ADDTIME(TIME'-838:59:59.9999', 876494165959) AS c4,
+ ADDTIME(TIME'-838:59:59.99999', 876494165959) AS c5,
+ ADDTIME(TIME'-838:59:59.999999', 876494165959) AS c6;
+
+SELECT
+ ADDTIME(TIME'-838:59:59', '87649416:59:59') AS ci,
+ ADDTIME(TIME'-838:59:59.9', '87649416:59:59') AS c1,
+ ADDTIME(TIME'-838:59:59.99', '87649416:59:59') AS c2,
+ ADDTIME(TIME'-838:59:59.999', '87649416:59:59') AS c3,
+ ADDTIME(TIME'-838:59:59.9999', '87649416:59:59') AS c4,
+ ADDTIME(TIME'-838:59:59.99999', '87649416:59:59') AS c5,
+ ADDTIME(TIME'-838:59:59.999999', '87649416:59:59') AS c6;
+
+# This does not give a warning about nanosecond truncation in --ps runs
+# so disable warnings
+--disable_warnings
+SELECT
+ ADDTIME(TIME'-838:59:59.9999999', '87649416:59:59') AS c7;
+--enable_warnings
+
+--echo # HOUR is max_useful_hour() (inside INTERVAL DAY TO SECOND range)
+--echo # Expect max TIME(0) + zero fraction + TIME warnings + no INTEVAL warnings
+SELECT
+ ADDTIME(TIME'00:00:00', 876494155959) AS ci,
+ ADDTIME(TIME'00:00:00', 876494155959)+0 AS c0,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', 876494155959)+0.0000000 AS c7;
+
+SELECT
+ ADDTIME(TIME'00:00:00', '87649415:59:59') AS ci,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0 AS c0,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.0 AS c1,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.00 AS c2,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.000 AS c3,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.0000 AS c4,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.00000 AS c5,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.000000 AS c6,
+ ADDTIME(TIME'00:00:00', '87649415:59:59')+0.0000000 AS c7;
+
+
+--echo # HOUR is max_useful_hour() (inside INTERVAL DAY TO SECOND range)
+--echo # Expect max TIME(N) + TIME warnings + no INTERVAL warnings
+
+SELECT
+ ADDTIME(TIME'-838:59:59', 876494155959) AS ci,
+ ADDTIME(TIME'-838:59:59.9', 876494155959) AS c1,
+ ADDTIME(TIME'-838:59:59.99', 876494155959) AS c2,
+ ADDTIME(TIME'-838:59:59.999', 876494155959) AS c3,
+ ADDTIME(TIME'-838:59:59.9999', 876494155959) AS c4,
+ ADDTIME(TIME'-838:59:59.99999', 876494155959) AS c5,
+ ADDTIME(TIME'-838:59:59.999999', 876494155959) AS c6;
+
+SELECT
+ ADDTIME(TIME'-838:59:59', '87649415:59:59') AS ci,
+ ADDTIME(TIME'-838:59:59.9', '87649415:59:59') AS c1,
+ ADDTIME(TIME'-838:59:59.99', '87649415:59:59') AS c2,
+ ADDTIME(TIME'-838:59:59.999', '87649415:59:59') AS c3,
+ ADDTIME(TIME'-838:59:59.9999', '87649415:59:59') AS c4,
+ ADDTIME(TIME'-838:59:59.99999', '87649415:59:59') AS c5,
+ ADDTIME(TIME'-838:59:59.999999', '87649415:59:59') AS c6;
+
+# This does not give a warning about nanosecond truncation in --ps runs
+# so disable warnings
+--disable_warnings
+SELECT
+ ADDTIME(TIME'-838:59:59.9999999', '87649415:59:59') AS c7;
+--enable_warnings
+
+--horizontal_results
+
+
+--echo #
+--echo # MDEV-17400 The result of TIME('42949672965959-01') depends on architecture
+--echo #
+
+SELECT TIME('42949672955959-01'), TIME('42949672965959-01');
+SELECT TIME('18446744073709551615-01'), TIME('18446744073709551616-01');
+
+--echo #
+--echo # MDEV-17434 EXTRACT(DAY FROM negative_time) returns wrong result
+--echo #
+
+CREATE TABLE t1 (a TIME(6));
+INSERT INTO t1 VALUES ('-24:10:10.10');
+SELECT
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a),
+ EXTRACT(DAY FROM a),
+ EXTRACT(DAY_HOUR FROM a),
+ EXTRACT(DAY_MINUTE FROM a),
+ EXTRACT(DAY_SECOND FROM a),
+ EXTRACT(DAY_MICROSECOND FROM a)
+FROM t1;
+CREATE TABLE t2 AS
+SELECT
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a),
+ EXTRACT(DAY FROM a),
+ EXTRACT(DAY_HOUR FROM a),
+ EXTRACT(DAY_MINUTE FROM a),
+ EXTRACT(DAY_SECOND FROM a),
+ EXTRACT(DAY_MICROSECOND FROM a)
+FROM t1;
+SHOW CREATE TABLE t2;
+DROP TABLE t2;
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-17478 Wrong result for TIME('+100:20:30')
+--echo #
+
+SELECT TIME('+100:20:30');
+
+--echo #
+--echo # MDEV-17477 Wrong result for TIME('-2001-01-01 10:20:30')
+--echo #
+
+SELECT TIME('-2001-01-01 10:20:30');
+SELECT TIME('2001-01-01') AS c1, TIME('2001-01-01 ') AS c2;
+SELECT TIME('0001:01:01 '), TIME('0001:01:01 ');
+SELECT TIME('1 2'), TIME('1 2 ');
+
+SELECT TIME('2001-01-01T'), TIME('2001-01-01T ');
+SELECT TIME('901-01-01T1'), TIME('901-01-01T10');
+SELECT TIME('091-01-01T1'), TIME('091-01-01T10');
+
+SELECT TIME('0001:01:01x'), TIME('0001:01:01xx');
+SELECT TIME('0001:01:01.'), TIME('0001:01:01..');
+SELECT TIME('0001:01:01-'), TIME('0001:01:01--');
+SELECT TIME('0001:01:01-'), TIME('0001:01:01--');
+
+SELECT TIME('-xxx'), TIME('-xxxxxxxxxxxxxxxxxxxx');
+SELECT TIME('- '), TIME('- ');
+SELECT TIME('-'), TIME('-');
+SELECT TIME('1-1-1 1:1:1'), TIME('1-1-1 1:1:1.0');
+SELECT TIME('1-1-1 1:2:3'), TIME('1-1-1 1:2:3.0');
+
+SELECT
+ CAST('20050326112233 garbage' as datetime),
+ CAST('20050326 garbage' as date),
+ CAST('50326 garbage' as time);
+
+SELECT TIME('- 01:00:00'), TIME('- 1 01:00:00');
+
+
+--echo #
+--echo # MDEV-17854 Assertion `decimals <= 6' failed in my_time_fraction_remainder on SELECT with NULLIF and FROM_UNIXTIME on incorrect time
+--echo #
+
+SET time_zone='+00:00';
+SELECT NULLIF(FROM_UNIXTIME('foo'), '2012-12-12 21:10:14');
+SET time_zone=DEFAULT;
+
+
+--echo #
+--echo # MDEV-18402 Assertion `sec.sec() <= 59' failed in Item_func_maketime::get_date
+--echo #
+
+SELECT MAKETIME('01', '01', LEAST( -100, NULL ));
+SELECT CONCAT(MAKETIME('01', '01', LEAST( -100, NULL )));
diff --git a/mysql-test/main/func_time_round.result b/mysql-test/main/func_time_round.result
new file mode 100644
index 00000000000..b335cf393ba
--- /dev/null
+++ b/mysql-test/main/func_time_round.result
@@ -0,0 +1,1374 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+CREATE TABLE t1_datetime_in_varchar (id SERIAL, a VARCHAR(64));
+INSERT INTO t1_datetime_in_varchar (a) VALUES
+('2000-12-31 23:59:59'),
+('2000-12-31 23:59:59.9'),
+('2000-12-31 23:59:59.99'),
+('2000-12-31 23:59:59.999'),
+('2000-12-31 23:59:59.9999'),
+('2000-12-31 23:59:59.99999'),
+('2000-12-31 23:59:59.999999'),
+('2000-12-31 23:59:59.9999999');
+CREATE TABLE t1_datetime_in_decimal (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_datetime_in_decimal (a) VALUES
+(20001231235959),
+(20001231235959.9),
+(20001231235959.99),
+(20001231235959.999),
+(20001231235959.9999),
+(20001231235959.99999),
+(20001231235959.999999),
+(20001231235959.9999999);
+CREATE TABLE t1_time_in_varchar (id SERIAL, a VARCHAR(64));
+INSERT INTO t1_time_in_varchar (a) VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.99'),
+('00:00:00.999'),
+('00:00:00.9999'),
+('00:00:00.99999'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+INSERT INTO t1_time_in_varchar (a) VALUES
+('837:59:59.9999999'),
+('838:59:59'),
+('838:59:59.9'),
+('838:59:59.99'),
+('838:59:59.999'),
+('838:59:59.9999'),
+('838:59:59.99999'),
+('838:59:59.999999'),
+('838:59:59.9999999'),
+('839:59:59.9999999'),
+('87649414:59:59.999999'),
+('87649414:59:59.9999999'),
+('87649415:59:59.999999'),
+('87649415:59:59.9999999');
+CREATE TABLE t1_time_in_decimal (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_time_in_decimal (a) VALUES
+(0),
+(0.9),
+(0.99),
+(0.999),
+(0.9999),
+(0.99999),
+(0.999999),
+(0.9999999);
+INSERT INTO t1_time_in_decimal (a) VALUES
+(8375959.9999999),
+(8385959),
+(8385959.9),
+(8385959.99),
+(8385959.999),
+(8385959.9999),
+(8385959.99999),
+(8385959.999999),
+(8385959.9999999),
+(8395959.9999999),
+(876494145959.999999),
+(876494145959.9999999),
+(876494155959.999999),
+(876494155959.9999999);
+#
+# TIME: LEAST/GREATEST
+#
+SELECT GREATEST(TIME'00:00:00', a) FROM t1_time_in_varchar;
+GREATEST(TIME'00:00:00', a)
+00:00:00.000000
+00:00:00.900000
+00:00:00.990000
+00:00:00.999000
+00:00:00.999900
+00:00:00.999990
+00:00:00.999999
+00:00:01.000000
+838:00:00.000000
+838:59:59.000000
+838:59:59.900000
+838:59:59.990000
+838:59:59.999000
+838:59:59.999900
+838:59:59.999990
+838:59:59.999999
+838:59:59.999999
+838:59:59.999999
+838:59:59.999999
+838:59:59.999999
+838:59:59.999999
+838:59:59.999999
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '838:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '839:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649414:59:59.999999'
+Warning 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+SELECT GREATEST(TIME'00:00:00', a) FROM t1_time_in_decimal;
+GREATEST(TIME'00:00:00', a)
+00:00:00.000000
+00:00:00.900000
+00:00:00.990000
+00:00:00.999000
+00:00:00.999900
+00:00:00.999990
+00:00:00.999999
+00:00:01.000000
+838:00:00.000000
+838:59:59.000000
+838:59:59.900000
+838:59:59.990000
+838:59:59.999000
+838:59:59.999900
+838:59:59.999990
+838:59:59.999999
+838:59:59.999999
+838:59:59.999999
+NULL
+NULL
+NULL
+NULL
+Warnings:
+Warning 1292 Incorrect time value: '8385959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 17
+Warning 1292 Incorrect time value: '8395959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 18
+Warning 1292 Incorrect time value: '876494145959.9999990000' for column `test`.`t1_time_in_decimal`.`a` at row 19
+Warning 1292 Incorrect time value: '876494145959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 20
+Warning 1292 Incorrect time value: '876494155959.9999990000' for column `test`.`t1_time_in_decimal`.`a` at row 21
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+SELECT GREATEST(TIME'00:00:00', '00:00:00.0000004');
+GREATEST(TIME'00:00:00', '00:00:00.0000004')
+00:00:00.000000
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.0000004'
+SELECT GREATEST(TIME'00:00:00', 0.0000004);
+GREATEST(TIME'00:00:00', 0.0000004)
+00:00:00.000000
+SELECT GREATEST(TIME'00:00:00', '00:00:00.0000005');
+GREATEST(TIME'00:00:00', '00:00:00.0000005')
+00:00:00.000001
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.0000005'
+SELECT GREATEST(TIME'00:00:00', 0.0000005);
+GREATEST(TIME'00:00:00', 0.0000005)
+00:00:00.000001
+#
+# Functions with a single TIME input, conversion from DATETIME-in-VARCHAR
+#
+SELECT SECOND(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+SECOND(a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 2000-12-31 23:59:59
+59 23:59:59.900000 2000-12-31 23:59:59.9
+59 23:59:59.990000 2000-12-31 23:59:59.99
+59 23:59:59.999000 2000-12-31 23:59:59.999
+59 23:59:59.999900 2000-12-31 23:59:59.9999
+59 23:59:59.999990 2000-12-31 23:59:59.99999
+59 23:59:59.999999 2000-12-31 23:59:59.999999
+0 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT MINUTE(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+MINUTE(a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 2000-12-31 23:59:59
+59 23:59:59.900000 2000-12-31 23:59:59.9
+59 23:59:59.990000 2000-12-31 23:59:59.99
+59 23:59:59.999000 2000-12-31 23:59:59.999
+59 23:59:59.999900 2000-12-31 23:59:59.9999
+59 23:59:59.999990 2000-12-31 23:59:59.99999
+59 23:59:59.999999 2000-12-31 23:59:59.999999
+0 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT HOUR(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+HOUR(a) CAST(a AS TIME(6)) a
+23 23:59:59.000000 2000-12-31 23:59:59
+23 23:59:59.900000 2000-12-31 23:59:59.9
+23 23:59:59.990000 2000-12-31 23:59:59.99
+23 23:59:59.999000 2000-12-31 23:59:59.999
+23 23:59:59.999900 2000-12-31 23:59:59.9999
+23 23:59:59.999990 2000-12-31 23:59:59.99999
+23 23:59:59.999999 2000-12-31 23:59:59.999999
+0 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT EXTRACT(SECOND FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+EXTRACT(SECOND FROM a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 2000-12-31 23:59:59
+59 23:59:59.900000 2000-12-31 23:59:59.9
+59 23:59:59.990000 2000-12-31 23:59:59.99
+59 23:59:59.999000 2000-12-31 23:59:59.999
+59 23:59:59.999900 2000-12-31 23:59:59.9999
+59 23:59:59.999990 2000-12-31 23:59:59.99999
+59 23:59:59.999999 2000-12-31 23:59:59.999999
+0 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT EXTRACT(MINUTE FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+EXTRACT(MINUTE FROM a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 2000-12-31 23:59:59
+59 23:59:59.900000 2000-12-31 23:59:59.9
+59 23:59:59.990000 2000-12-31 23:59:59.99
+59 23:59:59.999000 2000-12-31 23:59:59.999
+59 23:59:59.999900 2000-12-31 23:59:59.9999
+59 23:59:59.999990 2000-12-31 23:59:59.99999
+59 23:59:59.999999 2000-12-31 23:59:59.999999
+0 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT EXTRACT(HOUR FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+EXTRACT(HOUR FROM a) CAST(a AS TIME(6)) a
+23 23:59:59.000000 2000-12-31 23:59:59
+23 23:59:59.900000 2000-12-31 23:59:59.9
+23 23:59:59.990000 2000-12-31 23:59:59.99
+23 23:59:59.999000 2000-12-31 23:59:59.999
+23 23:59:59.999900 2000-12-31 23:59:59.9999
+23 23:59:59.999990 2000-12-31 23:59:59.99999
+23 23:59:59.999999 2000-12-31 23:59:59.999999
+0 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT TIME_TO_SEC(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+TIME_TO_SEC(a) CAST(a AS TIME(6)) a
+86399.000000 23:59:59.000000 2000-12-31 23:59:59
+86399.900000 23:59:59.900000 2000-12-31 23:59:59.9
+86399.990000 23:59:59.990000 2000-12-31 23:59:59.99
+86399.999000 23:59:59.999000 2000-12-31 23:59:59.999
+86399.999900 23:59:59.999900 2000-12-31 23:59:59.9999
+86399.999990 23:59:59.999990 2000-12-31 23:59:59.99999
+86399.999999 23:59:59.999999 2000-12-31 23:59:59.999999
+0.000000 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+#
+# Functions with a single TIME input, conversion from DATETIME-in-DECIMAL
+#
+SELECT SECOND(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+SECOND(a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 20001231235959.0000000000
+59 23:59:59.900000 20001231235959.9000000000
+59 23:59:59.990000 20001231235959.9900000000
+59 23:59:59.999000 20001231235959.9990000000
+59 23:59:59.999900 20001231235959.9999000000
+59 23:59:59.999990 20001231235959.9999900000
+59 23:59:59.999999 20001231235959.9999990000
+0 00:00:00.000000 20001231235959.9999999000
+SELECT MINUTE(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+MINUTE(a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 20001231235959.0000000000
+59 23:59:59.900000 20001231235959.9000000000
+59 23:59:59.990000 20001231235959.9900000000
+59 23:59:59.999000 20001231235959.9990000000
+59 23:59:59.999900 20001231235959.9999000000
+59 23:59:59.999990 20001231235959.9999900000
+59 23:59:59.999999 20001231235959.9999990000
+0 00:00:00.000000 20001231235959.9999999000
+SELECT HOUR(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+HOUR(a) CAST(a AS TIME(6)) a
+23 23:59:59.000000 20001231235959.0000000000
+23 23:59:59.900000 20001231235959.9000000000
+23 23:59:59.990000 20001231235959.9900000000
+23 23:59:59.999000 20001231235959.9990000000
+23 23:59:59.999900 20001231235959.9999000000
+23 23:59:59.999990 20001231235959.9999900000
+23 23:59:59.999999 20001231235959.9999990000
+0 00:00:00.000000 20001231235959.9999999000
+SELECT EXTRACT(SECOND FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+EXTRACT(SECOND FROM a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 20001231235959.0000000000
+59 23:59:59.900000 20001231235959.9000000000
+59 23:59:59.990000 20001231235959.9900000000
+59 23:59:59.999000 20001231235959.9990000000
+59 23:59:59.999900 20001231235959.9999000000
+59 23:59:59.999990 20001231235959.9999900000
+59 23:59:59.999999 20001231235959.9999990000
+0 00:00:00.000000 20001231235959.9999999000
+SELECT EXTRACT(MINUTE FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+EXTRACT(MINUTE FROM a) CAST(a AS TIME(6)) a
+59 23:59:59.000000 20001231235959.0000000000
+59 23:59:59.900000 20001231235959.9000000000
+59 23:59:59.990000 20001231235959.9900000000
+59 23:59:59.999000 20001231235959.9990000000
+59 23:59:59.999900 20001231235959.9999000000
+59 23:59:59.999990 20001231235959.9999900000
+59 23:59:59.999999 20001231235959.9999990000
+0 00:00:00.000000 20001231235959.9999999000
+SELECT EXTRACT(HOUR FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+EXTRACT(HOUR FROM a) CAST(a AS TIME(6)) a
+23 23:59:59.000000 20001231235959.0000000000
+23 23:59:59.900000 20001231235959.9000000000
+23 23:59:59.990000 20001231235959.9900000000
+23 23:59:59.999000 20001231235959.9990000000
+23 23:59:59.999900 20001231235959.9999000000
+23 23:59:59.999990 20001231235959.9999900000
+23 23:59:59.999999 20001231235959.9999990000
+0 00:00:00.000000 20001231235959.9999999000
+SELECT TIME_TO_SEC(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+TIME_TO_SEC(a) CAST(a AS TIME(6)) a
+86399.000000 23:59:59.000000 20001231235959.0000000000
+86399.900000 23:59:59.900000 20001231235959.9000000000
+86399.990000 23:59:59.990000 20001231235959.9900000000
+86399.999000 23:59:59.999000 20001231235959.9990000000
+86399.999900 23:59:59.999900 20001231235959.9999000000
+86399.999990 23:59:59.999990 20001231235959.9999900000
+86399.999999 23:59:59.999999 20001231235959.9999990000
+0.000000 00:00:00.000000 20001231235959.9999999000
+#
+# Functions with a single TIME interval input, conversion from TIME-interval-in-VARCHAR
+#
+SELECT
+EXTRACT(DAY FROM a),
+EXTRACT(HOUR FROM a),
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a),
+CAST(a AS INTERVAL DAY_SECOND(6)),
+a
+FROM t1_time_in_varchar ORDER BY id;
+EXTRACT(DAY FROM a) EXTRACT(HOUR FROM a) EXTRACT(MINUTE FROM a) EXTRACT(SECOND FROM a) EXTRACT(MICROSECOND FROM a) CAST(a AS INTERVAL DAY_SECOND(6)) a
+0 0 0 0 0 00:00:00.000000 00:00:00
+0 0 0 0 900000 00:00:00.900000 00:00:00.9
+0 0 0 0 990000 00:00:00.990000 00:00:00.99
+0 0 0 0 999000 00:00:00.999000 00:00:00.999
+0 0 0 0 999900 00:00:00.999900 00:00:00.9999
+0 0 0 0 999990 00:00:00.999990 00:00:00.99999
+0 0 0 0 999999 00:00:00.999999 00:00:00.999999
+0 0 0 1 0 00:00:01.000000 00:00:00.9999999
+34 22 0 0 0 34 22:00:00.000000 837:59:59.9999999
+34 22 59 59 0 34 22:59:59.000000 838:59:59
+34 22 59 59 900000 34 22:59:59.900000 838:59:59.9
+34 22 59 59 990000 34 22:59:59.990000 838:59:59.99
+34 22 59 59 999000 34 22:59:59.999000 838:59:59.999
+34 22 59 59 999900 34 22:59:59.999900 838:59:59.9999
+34 22 59 59 999990 34 22:59:59.999990 838:59:59.99999
+34 22 59 59 999999 34 22:59:59.999999 838:59:59.999999
+34 23 0 0 0 34 23:00:00.000000 838:59:59.9999999
+35 0 0 0 0 35 00:00:00.000000 839:59:59.9999999
+3652058 22 59 59 999999 3652058 22:59:59.999999 87649414:59:59.999999
+3652058 23 0 0 0 3652058 23:00:00.000000 87649414:59:59.9999999
+3652058 23 59 59 999999 3652058 23:59:59.999999 87649415:59:59.999999
+3652058 23 59 59 999999 3652058 23:59:59.999999 87649415:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '837:59:59.9999999'
+Note 1292 Truncated incorrect time value: '838:59:59.9999999'
+Note 1292 Truncated incorrect time value: '838:59:59.9999999'
+Note 1292 Truncated incorrect time value: '838:59:59.9999999'
+Note 1292 Truncated incorrect time value: '838:59:59.9999999'
+Note 1292 Truncated incorrect time value: '838:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '838:59:59.9999999'
+Note 1292 Truncated incorrect time value: '839:59:59.9999999'
+Note 1292 Truncated incorrect time value: '839:59:59.9999999'
+Note 1292 Truncated incorrect time value: '839:59:59.9999999'
+Note 1292 Truncated incorrect time value: '839:59:59.9999999'
+Note 1292 Truncated incorrect time value: '839:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '839:59:59.9999999'
+Note 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Note 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Note 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Note 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Note 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '87649414:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '87649415:59:59.9999999'
+SELECT
+TIME_TO_SEC(a),
+CAST(a AS TIME(6)),
+a
+FROM t1_time_in_varchar ORDER BY id;
+TIME_TO_SEC(a) CAST(a AS TIME(6)) a
+0.000000 00:00:00.000000 00:00:00
+0.900000 00:00:00.900000 00:00:00.9
+0.990000 00:00:00.990000 00:00:00.99
+0.999000 00:00:00.999000 00:00:00.999
+0.999900 00:00:00.999900 00:00:00.9999
+0.999990 00:00:00.999990 00:00:00.99999
+0.999999 00:00:00.999999 00:00:00.999999
+1.000000 00:00:01.000000 00:00:00.9999999
+3016800.000000 838:00:00.000000 837:59:59.9999999
+3020399.000000 838:59:59.000000 838:59:59
+3020399.900000 838:59:59.900000 838:59:59.9
+3020399.990000 838:59:59.990000 838:59:59.99
+3020399.999000 838:59:59.999000 838:59:59.999
+3020399.999900 838:59:59.999900 838:59:59.9999
+3020399.999990 838:59:59.999990 838:59:59.99999
+3020399.999999 838:59:59.999999 838:59:59.999999
+3020399.999999 838:59:59.999999 838:59:59.9999999
+3020399.999999 838:59:59.999999 839:59:59.9999999
+3020399.999999 838:59:59.999999 87649414:59:59.999999
+3020399.999999 838:59:59.999999 87649414:59:59.9999999
+3020399.999999 838:59:59.999999 87649415:59:59.999999
+3020399.999999 838:59:59.999999 87649415:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Note 1292 Truncated incorrect time value: '837:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '838:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '838:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '839:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '839:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649414:59:59.999999'
+Warning 1292 Truncated incorrect time value: '87649414:59:59.999999'
+Warning 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649414:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+Warning 1292 Truncated incorrect time value: '87649415:59:59.9999999'
+#
+# Functions with a single TIME interval input, conversion from TIME-interval-in-DECIMAL
+#
+SELECT
+EXTRACT(DAY FROM a),
+EXTRACT(HOUR FROM a),
+EXTRACT(MINUTE FROM a),
+EXTRACT(SECOND FROM a),
+EXTRACT(MICROSECOND FROM a),
+CAST(a AS INTERVAL DAY_SECOND(6)),
+a
+FROM t1_time_in_decimal ORDER BY id;
+EXTRACT(DAY FROM a) EXTRACT(HOUR FROM a) EXTRACT(MINUTE FROM a) EXTRACT(SECOND FROM a) EXTRACT(MICROSECOND FROM a) CAST(a AS INTERVAL DAY_SECOND(6)) a
+0 0 0 0 0 00:00:00.000000 0.0000000000
+0 0 0 0 900000 00:00:00.900000 0.9000000000
+0 0 0 0 990000 00:00:00.990000 0.9900000000
+0 0 0 0 999000 00:00:00.999000 0.9990000000
+0 0 0 0 999900 00:00:00.999900 0.9999000000
+0 0 0 0 999990 00:00:00.999990 0.9999900000
+0 0 0 0 999999 00:00:00.999999 0.9999990000
+0 0 0 1 0 00:00:01.000000 0.9999999000
+34 22 0 0 0 34 22:00:00.000000 8375959.9999999000
+34 22 59 59 0 34 22:59:59.000000 8385959.0000000000
+34 22 59 59 900000 34 22:59:59.900000 8385959.9000000000
+34 22 59 59 990000 34 22:59:59.990000 8385959.9900000000
+34 22 59 59 999000 34 22:59:59.999000 8385959.9990000000
+34 22 59 59 999900 34 22:59:59.999900 8385959.9999000000
+34 22 59 59 999990 34 22:59:59.999990 8385959.9999900000
+34 22 59 59 999999 34 22:59:59.999999 8385959.9999990000
+34 23 0 0 0 34 23:00:00.000000 8385959.9999999000
+35 0 0 0 0 35 00:00:00.000000 8395959.9999999000
+3652058 22 59 59 999999 3652058 22:59:59.999999 876494145959.9999990000
+3652058 23 0 0 0 3652058 23:00:00.000000 876494145959.9999999000
+3652058 23 59 59 999999 3652058 23:59:59.999999 876494155959.9999990000
+3652058 23 59 59 999999 3652058 23:59:59.999999 876494155959.9999999000
+Warnings:
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.0000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9900000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9990000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9999000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9999900000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9999990000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.9999999000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8375959.9999999000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.0000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9900000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9990000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9999000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9999900000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9999990000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.9999999000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8395959.9999999000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '876494145959.9999990000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '876494145959.9999999000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '876494155959.9999990000'
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '876494155959.9999999000'
+SELECT
+TIME_TO_SEC(a),
+CAST(a AS TIME(6)),
+a
+FROM t1_time_in_decimal ORDER BY id;
+TIME_TO_SEC(a) CAST(a AS TIME(6)) a
+0.000000 00:00:00.000000 0.0000000000
+0.900000 00:00:00.900000 0.9000000000
+0.990000 00:00:00.990000 0.9900000000
+0.999000 00:00:00.999000 0.9990000000
+0.999900 00:00:00.999900 0.9999000000
+0.999990 00:00:00.999990 0.9999900000
+0.999999 00:00:00.999999 0.9999990000
+1.000000 00:00:01.000000 0.9999999000
+3016800.000000 838:00:00.000000 8375959.9999999000
+3020399.000000 838:59:59.000000 8385959.0000000000
+3020399.900000 838:59:59.900000 8385959.9000000000
+3020399.990000 838:59:59.990000 8385959.9900000000
+3020399.999000 838:59:59.999000 8385959.9990000000
+3020399.999900 838:59:59.999900 8385959.9999000000
+3020399.999990 838:59:59.999990 8385959.9999900000
+3020399.999999 838:59:59.999999 8385959.9999990000
+3020399.999999 838:59:59.999999 8385959.9999999000
+3020399.999999 838:59:59.999999 8395959.9999999000
+NULL NULL 876494145959.9999990000
+NULL NULL 876494145959.9999999000
+NULL NULL 876494155959.9999990000
+NULL NULL 876494155959.9999999000
+Warnings:
+Warning 1292 Incorrect time value: '8385959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 17
+Warning 1292 Incorrect time value: '8385959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 17
+Warning 1292 Incorrect time value: '8395959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 18
+Warning 1292 Incorrect time value: '8395959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 18
+Warning 1292 Incorrect time value: '876494145959.9999990000' for column `test`.`t1_time_in_decimal`.`a` at row 19
+Warning 1292 Incorrect time value: '876494145959.9999990000' for column `test`.`t1_time_in_decimal`.`a` at row 19
+Warning 1292 Incorrect time value: '876494145959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 20
+Warning 1292 Incorrect time value: '876494145959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 20
+Warning 1292 Incorrect time value: '876494155959.9999990000' for column `test`.`t1_time_in_decimal`.`a` at row 21
+Warning 1292 Incorrect time value: '876494155959.9999990000' for column `test`.`t1_time_in_decimal`.`a` at row 21
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+Warning 1292 Incorrect time value: '876494155959.9999999000' for column `test`.`t1_time_in_decimal`.`a` at row 22
+#
+# Functions with a single DATE input, conversion from DATETIME-in-VARCHAR
+#
+SELECT QUARTER(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+QUARTER(a) CAST(a AS DATE) a
+4 2000-12-31 2000-12-31 23:59:59
+4 2000-12-31 2000-12-31 23:59:59.9
+4 2000-12-31 2000-12-31 23:59:59.99
+4 2000-12-31 2000-12-31 23:59:59.999
+4 2000-12-31 2000-12-31 23:59:59.9999
+4 2000-12-31 2000-12-31 23:59:59.99999
+4 2000-12-31 2000-12-31 23:59:59.999999
+1 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+DAY(a) CAST(a AS DATE) a
+31 2000-12-31 2000-12-31 23:59:59
+31 2000-12-31 2000-12-31 23:59:59.9
+31 2000-12-31 2000-12-31 23:59:59.99
+31 2000-12-31 2000-12-31 23:59:59.999
+31 2000-12-31 2000-12-31 23:59:59.9999
+31 2000-12-31 2000-12-31 23:59:59.99999
+31 2000-12-31 2000-12-31 23:59:59.999999
+1 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT MONTH(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+MONTH(a) CAST(a AS DATE) a
+12 2000-12-31 2000-12-31 23:59:59
+12 2000-12-31 2000-12-31 23:59:59.9
+12 2000-12-31 2000-12-31 23:59:59.99
+12 2000-12-31 2000-12-31 23:59:59.999
+12 2000-12-31 2000-12-31 23:59:59.9999
+12 2000-12-31 2000-12-31 23:59:59.99999
+12 2000-12-31 2000-12-31 23:59:59.999999
+1 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT YEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+YEAR(a) CAST(a AS DATE) a
+2000 2000-12-31 2000-12-31 23:59:59
+2000 2000-12-31 2000-12-31 23:59:59.9
+2000 2000-12-31 2000-12-31 23:59:59.99
+2000 2000-12-31 2000-12-31 23:59:59.999
+2000 2000-12-31 2000-12-31 23:59:59.9999
+2000 2000-12-31 2000-12-31 23:59:59.99999
+2000 2000-12-31 2000-12-31 23:59:59.999999
+2001 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT DAYNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+DAYNAME(a) CAST(a AS DATE) a
+Sunday 2000-12-31 2000-12-31 23:59:59
+Sunday 2000-12-31 2000-12-31 23:59:59.9
+Sunday 2000-12-31 2000-12-31 23:59:59.99
+Sunday 2000-12-31 2000-12-31 23:59:59.999
+Sunday 2000-12-31 2000-12-31 23:59:59.9999
+Sunday 2000-12-31 2000-12-31 23:59:59.99999
+Sunday 2000-12-31 2000-12-31 23:59:59.999999
+Monday 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT MONTHNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+MONTHNAME(a) CAST(a AS DATE) a
+December 2000-12-31 2000-12-31 23:59:59
+December 2000-12-31 2000-12-31 23:59:59.9
+December 2000-12-31 2000-12-31 23:59:59.99
+December 2000-12-31 2000-12-31 23:59:59.999
+December 2000-12-31 2000-12-31 23:59:59.9999
+December 2000-12-31 2000-12-31 23:59:59.99999
+December 2000-12-31 2000-12-31 23:59:59.999999
+January 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT LAST_DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+LAST_DAY(a) CAST(a AS DATE) a
+2000-12-31 2000-12-31 2000-12-31 23:59:59
+2000-12-31 2000-12-31 2000-12-31 23:59:59.9
+2000-12-31 2000-12-31 2000-12-31 23:59:59.99
+2000-12-31 2000-12-31 2000-12-31 23:59:59.999
+2000-12-31 2000-12-31 2000-12-31 23:59:59.9999
+2000-12-31 2000-12-31 2000-12-31 23:59:59.99999
+2000-12-31 2000-12-31 2000-12-31 23:59:59.999999
+2000-12-31 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT TO_DAYS(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+TO_DAYS(a) CAST(a AS DATE) a
+730850 2000-12-31 2000-12-31 23:59:59
+730850 2000-12-31 2000-12-31 23:59:59.9
+730850 2000-12-31 2000-12-31 23:59:59.99
+730850 2000-12-31 2000-12-31 23:59:59.999
+730850 2000-12-31 2000-12-31 23:59:59.9999
+730850 2000-12-31 2000-12-31 23:59:59.99999
+730850 2000-12-31 2000-12-31 23:59:59.999999
+730851 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT DAYOFYEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+DAYOFYEAR(a) CAST(a AS DATE) a
+366 2000-12-31 2000-12-31 23:59:59
+366 2000-12-31 2000-12-31 23:59:59.9
+366 2000-12-31 2000-12-31 23:59:59.99
+366 2000-12-31 2000-12-31 23:59:59.999
+366 2000-12-31 2000-12-31 23:59:59.9999
+366 2000-12-31 2000-12-31 23:59:59.99999
+366 2000-12-31 2000-12-31 23:59:59.999999
+1 2000-12-31 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+CREATE TABLE t1 (a VARCHAR(32));
+INSERT INTO t1 VALUES
+('2002-01-05 23:59:59'),
+('2002-01-05 23:59:59.999999'),
+('2002-01-05 23:59:59.9999999');
+SELECT YEARWEEK(a), a FROM t1;
+YEARWEEK(a) a
+200152 2002-01-05 23:59:59
+200152 2002-01-05 23:59:59.999999
+200201 2002-01-05 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2002-01-05 23:59:59.9999999'
+SELECT WEEK(a), a FROM t1;
+WEEK(a) a
+0 2002-01-05 23:59:59
+0 2002-01-05 23:59:59.999999
+1 2002-01-05 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2002-01-05 23:59:59.9999999'
+SELECT WEEKDAY(a), a FROM t1;
+WEEKDAY(a) a
+5 2002-01-05 23:59:59
+5 2002-01-05 23:59:59.999999
+6 2002-01-05 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2002-01-05 23:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(32,9));
+INSERT INTO t1 VALUES
+(20020105235959),
+(20020105235959.999999),
+(20020105235959.9999999);
+SELECT YEARWEEK(a), a FROM t1;
+YEARWEEK(a) a
+200152 20020105235959.000000000
+200152 20020105235959.999999000
+200201 20020105235959.999999900
+SELECT WEEK(a), a FROM t1;
+WEEK(a) a
+0 20020105235959.000000000
+0 20020105235959.999999000
+1 20020105235959.999999900
+SELECT WEEKDAY(a), a FROM t1;
+WEEKDAY(a) a
+5 20020105235959.000000000
+5 20020105235959.999999000
+6 20020105235959.999999900
+DROP TABLE t1;
+#
+# Functions with a single DATE input, conversion from DATETIME-in-DECIMAL
+#
+SELECT QUARTER(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+QUARTER(a) CAST(a AS DATE) a
+4 2000-12-31 20001231235959.0000000000
+4 2000-12-31 20001231235959.9000000000
+4 2000-12-31 20001231235959.9900000000
+4 2000-12-31 20001231235959.9990000000
+4 2000-12-31 20001231235959.9999000000
+4 2000-12-31 20001231235959.9999900000
+4 2000-12-31 20001231235959.9999990000
+1 2000-12-31 20001231235959.9999999000
+SELECT DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+DAY(a) CAST(a AS DATE) a
+31 2000-12-31 20001231235959.0000000000
+31 2000-12-31 20001231235959.9000000000
+31 2000-12-31 20001231235959.9900000000
+31 2000-12-31 20001231235959.9990000000
+31 2000-12-31 20001231235959.9999000000
+31 2000-12-31 20001231235959.9999900000
+31 2000-12-31 20001231235959.9999990000
+1 2000-12-31 20001231235959.9999999000
+SELECT MONTH(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+MONTH(a) CAST(a AS DATE) a
+12 2000-12-31 20001231235959.0000000000
+12 2000-12-31 20001231235959.9000000000
+12 2000-12-31 20001231235959.9900000000
+12 2000-12-31 20001231235959.9990000000
+12 2000-12-31 20001231235959.9999000000
+12 2000-12-31 20001231235959.9999900000
+12 2000-12-31 20001231235959.9999990000
+1 2000-12-31 20001231235959.9999999000
+SELECT YEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+YEAR(a) CAST(a AS DATE) a
+2000 2000-12-31 20001231235959.0000000000
+2000 2000-12-31 20001231235959.9000000000
+2000 2000-12-31 20001231235959.9900000000
+2000 2000-12-31 20001231235959.9990000000
+2000 2000-12-31 20001231235959.9999000000
+2000 2000-12-31 20001231235959.9999900000
+2000 2000-12-31 20001231235959.9999990000
+2001 2000-12-31 20001231235959.9999999000
+SELECT DAYNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+DAYNAME(a) CAST(a AS DATE) a
+Sunday 2000-12-31 20001231235959.0000000000
+Sunday 2000-12-31 20001231235959.9000000000
+Sunday 2000-12-31 20001231235959.9900000000
+Sunday 2000-12-31 20001231235959.9990000000
+Sunday 2000-12-31 20001231235959.9999000000
+Sunday 2000-12-31 20001231235959.9999900000
+Sunday 2000-12-31 20001231235959.9999990000
+Monday 2000-12-31 20001231235959.9999999000
+SELECT MONTHNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+MONTHNAME(a) CAST(a AS DATE) a
+December 2000-12-31 20001231235959.0000000000
+December 2000-12-31 20001231235959.9000000000
+December 2000-12-31 20001231235959.9900000000
+December 2000-12-31 20001231235959.9990000000
+December 2000-12-31 20001231235959.9999000000
+December 2000-12-31 20001231235959.9999900000
+December 2000-12-31 20001231235959.9999990000
+January 2000-12-31 20001231235959.9999999000
+SELECT YEARWEEK(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+YEARWEEK(a) CAST(a AS DATE) a
+200053 2000-12-31 20001231235959.0000000000
+200053 2000-12-31 20001231235959.9000000000
+200053 2000-12-31 20001231235959.9900000000
+200053 2000-12-31 20001231235959.9990000000
+200053 2000-12-31 20001231235959.9999000000
+200053 2000-12-31 20001231235959.9999900000
+200053 2000-12-31 20001231235959.9999990000
+200053 2000-12-31 20001231235959.9999999000
+SELECT LAST_DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+LAST_DAY(a) CAST(a AS DATE) a
+2000-12-31 2000-12-31 20001231235959.0000000000
+2000-12-31 2000-12-31 20001231235959.9000000000
+2000-12-31 2000-12-31 20001231235959.9900000000
+2000-12-31 2000-12-31 20001231235959.9990000000
+2000-12-31 2000-12-31 20001231235959.9999000000
+2000-12-31 2000-12-31 20001231235959.9999900000
+2000-12-31 2000-12-31 20001231235959.9999990000
+2000-12-31 2000-12-31 20001231235959.9999999000
+SELECT TO_DAYS(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+TO_DAYS(a) CAST(a AS DATE) a
+730850 2000-12-31 20001231235959.0000000000
+730850 2000-12-31 20001231235959.9000000000
+730850 2000-12-31 20001231235959.9900000000
+730850 2000-12-31 20001231235959.9990000000
+730850 2000-12-31 20001231235959.9999000000
+730850 2000-12-31 20001231235959.9999900000
+730850 2000-12-31 20001231235959.9999990000
+730851 2000-12-31 20001231235959.9999999000
+SELECT DAYOFYEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+DAYOFYEAR(a) CAST(a AS DATE) a
+366 2000-12-31 20001231235959.0000000000
+366 2000-12-31 20001231235959.9000000000
+366 2000-12-31 20001231235959.9900000000
+366 2000-12-31 20001231235959.9990000000
+366 2000-12-31 20001231235959.9999000000
+366 2000-12-31 20001231235959.9999900000
+366 2000-12-31 20001231235959.9999990000
+1 2000-12-31 20001231235959.9999999000
+SELECT DAYOFMONTH(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+DAYOFMONTH(a) CAST(a AS DATE) a
+31 2000-12-31 20001231235959.0000000000
+31 2000-12-31 20001231235959.9000000000
+31 2000-12-31 20001231235959.9900000000
+31 2000-12-31 20001231235959.9990000000
+31 2000-12-31 20001231235959.9999000000
+31 2000-12-31 20001231235959.9999900000
+31 2000-12-31 20001231235959.9999990000
+1 2000-12-31 20001231235959.9999999000
+#
+# Functions with a single DATETIME input, conversion from DATETIME-in-VARCHAR
+#
+SELECT TO_SECONDS(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+TO_SECONDS(a) CAST(a AS DATETIME(6)) a
+63145526399 2000-12-31 23:59:59.000000 2000-12-31 23:59:59
+63145526399 2000-12-31 23:59:59.900000 2000-12-31 23:59:59.9
+63145526399 2000-12-31 23:59:59.990000 2000-12-31 23:59:59.99
+63145526399 2000-12-31 23:59:59.999000 2000-12-31 23:59:59.999
+63145526399 2000-12-31 23:59:59.999900 2000-12-31 23:59:59.9999
+63145526399 2000-12-31 23:59:59.999990 2000-12-31 23:59:59.99999
+63145526399 2000-12-31 23:59:59.999999 2000-12-31 23:59:59.999999
+63145526400 2001-01-01 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SET time_zone='+00:00';
+SELECT UNIX_TIMESTAMP(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+UNIX_TIMESTAMP(a) CAST(a AS DATETIME(6)) a
+978307199.000000 2000-12-31 23:59:59.000000 2000-12-31 23:59:59
+978307199.900000 2000-12-31 23:59:59.900000 2000-12-31 23:59:59.9
+978307199.990000 2000-12-31 23:59:59.990000 2000-12-31 23:59:59.99
+978307199.999000 2000-12-31 23:59:59.999000 2000-12-31 23:59:59.999
+978307199.999900 2000-12-31 23:59:59.999900 2000-12-31 23:59:59.9999
+978307199.999990 2000-12-31 23:59:59.999990 2000-12-31 23:59:59.99999
+978307199.999999 2000-12-31 23:59:59.999999 2000-12-31 23:59:59.999999
+978307200.000000 2001-01-01 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SET time_zone=DEFAULT;
+SELECT CONVERT_TZ(a, '+00:00','+00:00'), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+CONVERT_TZ(a, '+00:00','+00:00') CAST(a AS DATETIME(6)) a
+2000-12-31 23:59:59.000000 2000-12-31 23:59:59.000000 2000-12-31 23:59:59
+2000-12-31 23:59:59.900000 2000-12-31 23:59:59.900000 2000-12-31 23:59:59.9
+2000-12-31 23:59:59.990000 2000-12-31 23:59:59.990000 2000-12-31 23:59:59.99
+2000-12-31 23:59:59.999000 2000-12-31 23:59:59.999000 2000-12-31 23:59:59.999
+2000-12-31 23:59:59.999900 2000-12-31 23:59:59.999900 2000-12-31 23:59:59.9999
+2000-12-31 23:59:59.999990 2000-12-31 23:59:59.999990 2000-12-31 23:59:59.99999
+2000-12-31 23:59:59.999999 2000-12-31 23:59:59.999999 2000-12-31 23:59:59.999999
+2001-01-01 00:00:00.000000 2001-01-01 00:00:00.000000 2000-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+#
+# Functions with a single DATETIME input, conversion from DATETIME-in-DECIMAL
+#
+SELECT TO_SECONDS(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+TO_SECONDS(a) CAST(a AS DATETIME(6)) a
+63145526399 2000-12-31 23:59:59.000000 20001231235959.0000000000
+63145526399 2000-12-31 23:59:59.900000 20001231235959.9000000000
+63145526399 2000-12-31 23:59:59.990000 20001231235959.9900000000
+63145526399 2000-12-31 23:59:59.999000 20001231235959.9990000000
+63145526399 2000-12-31 23:59:59.999900 20001231235959.9999000000
+63145526399 2000-12-31 23:59:59.999990 20001231235959.9999900000
+63145526399 2000-12-31 23:59:59.999999 20001231235959.9999990000
+63145526400 2001-01-01 00:00:00.000000 20001231235959.9999999000
+SET time_zone='+00:00';
+SELECT UNIX_TIMESTAMP(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+UNIX_TIMESTAMP(a) CAST(a AS DATETIME(6)) a
+978307199.000000 2000-12-31 23:59:59.000000 20001231235959.0000000000
+978307199.900000 2000-12-31 23:59:59.900000 20001231235959.9000000000
+978307199.990000 2000-12-31 23:59:59.990000 20001231235959.9900000000
+978307199.999000 2000-12-31 23:59:59.999000 20001231235959.9990000000
+978307199.999900 2000-12-31 23:59:59.999900 20001231235959.9999000000
+978307199.999990 2000-12-31 23:59:59.999990 20001231235959.9999900000
+978307199.999999 2000-12-31 23:59:59.999999 20001231235959.9999990000
+978307200.000000 2001-01-01 00:00:00.000000 20001231235959.9999999000
+SET time_zone=DEFAULT;
+SELECT CONVERT_TZ(a, '+00:00','+00:00'), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+CONVERT_TZ(a, '+00:00','+00:00') CAST(a AS DATETIME(6)) a
+2000-12-31 23:59:59.000000 2000-12-31 23:59:59.000000 20001231235959.0000000000
+2000-12-31 23:59:59.900000 2000-12-31 23:59:59.900000 20001231235959.9000000000
+2000-12-31 23:59:59.990000 2000-12-31 23:59:59.990000 20001231235959.9900000000
+2000-12-31 23:59:59.999000 2000-12-31 23:59:59.999000 20001231235959.9990000000
+2000-12-31 23:59:59.999900 2000-12-31 23:59:59.999900 20001231235959.9999000000
+2000-12-31 23:59:59.999990 2000-12-31 23:59:59.999990 20001231235959.9999900000
+2000-12-31 23:59:59.999999 2000-12-31 23:59:59.999999 20001231235959.9999990000
+2001-01-01 00:00:00.000000 2001-01-01 00:00:00.000000 20001231235959.9999999000
+DROP TABLE t1_datetime_in_varchar;
+DROP TABLE t1_datetime_in_decimal;
+DROP TABLE t1_time_in_varchar;
+DROP TABLE t1_time_in_decimal;
+#
+# Functions that construct DATETIME
+#
+SET time_zone='+00:00';
+CREATE TABLE t1_unix_timestamp (id SERIAL, a DECIMAL(30,10));
+INSERT INTO t1_unix_timestamp (a) VALUES
+(980639999),
+(980639999.9),
+(980639999.999999),
+(980639999.9999999),
+(2147483647),
+(2147483647.9),
+(2147483647.999999),
+(2147483647.9999999);
+SELECT a, FROM_UNIXTIME(a) FROM t1_unix_timestamp ORDER BY id;
+a FROM_UNIXTIME(a)
+980639999.0000000000 2001-01-27 23:59:59.000000
+980639999.9000000000 2001-01-27 23:59:59.900000
+980639999.9999990000 2001-01-27 23:59:59.999999
+980639999.9999999000 2001-01-28 00:00:00.000000
+2147483647.0000000000 2038-01-19 03:14:07.000000
+2147483647.9000000000 2038-01-19 03:14:07.900000
+2147483647.9999990000 2038-01-19 03:14:07.999999
+2147483647.9999999000 NULL
+DROP TABLE t1_unix_timestamp;
+SET time_zone=DEFAULT;
+#
+# Functions that construct TIME
+#
+CREATE TABLE t1_sec (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_sec (a) VALUES
+(59),
+(59.9),
+(59.999999),
+(59.9999999),
+(3020398),
+(3020398.999999),
+(3020398.9999999),
+(3020399),
+(3020399.999999),
+(3020399.9999999),
+(9223372036854775807),
+(9223372036854775807.9),
+(9223372036854775807.999999),
+(9223372036854775807.9999999),
+(18446744073709551615),
+(18446744073709551615.9),
+(18446744073709551615.999999),
+(18446744073709551615.9999999);
+SELECT a, SEC_TO_TIME(a) FROM t1_sec ORDER BY id;
+a SEC_TO_TIME(a)
+59.0000000000 00:00:59.000000
+59.9000000000 00:00:59.900000
+59.9999990000 00:00:59.999999
+59.9999999000 00:01:00.000000
+3020398.0000000000 838:59:58.000000
+3020398.9999990000 838:59:58.999999
+3020398.9999999000 838:59:59.000000
+3020399.0000000000 838:59:59.000000
+3020399.9999990000 838:59:59.999999
+3020399.9999999000 838:59:59.999999
+9223372036854775807.0000000000 838:59:59.999999
+9223372036854775807.9000000000 838:59:59.999999
+9223372036854775807.9999990000 838:59:59.999999
+9223372036854775807.9999999000 838:59:59.999999
+18446744073709551615.0000000000 838:59:59.999999
+18446744073709551615.9000000000 838:59:59.999999
+18446744073709551615.9999990000 838:59:59.999999
+18446744073709551615.9999999000 838:59:59.999999
+Warnings:
+Warning 1292 Truncated incorrect seconds value: '3020400'
+Warning 1292 Truncated incorrect seconds value: '9223372036854775807.0000000000'
+Warning 1292 Truncated incorrect seconds value: '9223372036854775807.9000000000'
+Warning 1292 Truncated incorrect seconds value: '9223372036854775807.9999990000'
+Warning 1292 Truncated incorrect seconds value: '9223372036854775807.9999999000'
+Warning 1292 Truncated incorrect seconds value: '18446744073709551615.0000000000'
+Warning 1292 Truncated incorrect seconds value: '18446744073709551615.9000000000'
+Warning 1292 Truncated incorrect seconds value: '18446744073709551615.9999990000'
+Warning 1292 Truncated incorrect seconds value: '18446744073709551615.9999999000'
+DROP TABLE t1_sec;
+CREATE TABLE t1_sec (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_sec (a) VALUES
+(0),
+(0.9),
+(0.999999),
+(0.9999999);
+SELECT a, MAKETIME(0, 0, a) FROM t1_sec ORDER BY id;
+a MAKETIME(0, 0, a)
+0.0000000000 00:00:00.000000
+0.9000000000 00:00:00.900000
+0.9999990000 00:00:00.999999
+0.9999999000 00:00:01.000000
+DROP TABLE t1_sec;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+SELECT
+'----',
+a,
+DATE_FORMAT(a, '%Y') AS yyyy,
+DATE_FORMAT(a, '%Y-%m-%d') AS d,
+DATE_FORMAT(a, '%H:%i:%s') AS t0,
+DATE_FORMAT(a, '%H:%i:%s.%f') AS t6,
+DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s') AS dt0,
+DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s.%f') AS dt6
+FROM t1;
+---- ----
+a 2017-12-31 23:59:59
+yyyy 2017
+d 2017-12-31
+t0 23:59:59
+t6 23:59:59.000000
+dt0 2017-12-31 23:59:59
+dt6 2017-12-31 23:59:59.000000
+---- ----
+a 2017-12-31 23:59:59.9
+yyyy 2017
+d 2017-12-31
+t0 23:59:59
+t6 23:59:59.900000
+dt0 2017-12-31 23:59:59
+dt6 2017-12-31 23:59:59.900000
+---- ----
+a 2017-12-31 23:59:59.999999
+yyyy 2017
+d 2017-12-31
+t0 23:59:59
+t6 23:59:59.999999
+dt0 2017-12-31 23:59:59
+dt6 2017-12-31 23:59:59.999999
+---- ----
+a 2017-12-31 23:59:59.9999999
+yyyy 2018
+d 2018-01-01
+t0 00:00:00
+t6 00:00:00.000000
+dt0 2018-01-01 00:00:00
+dt6 2018-01-01 00:00:00.000000
+Warnings:
+Level Note
+Code 1292
+Message Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Level Note
+Code 1292
+Message Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Level Note
+Code 1292
+Message Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Level Note
+Code 1292
+Message Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Level Note
+Code 1292
+Message Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Level Note
+Code 1292
+Message Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(32,9));
+INSERT INTO t1 VALUES
+(20171231235959),
+(20171231235959.9),
+(20171231235959.999999),
+(20171231235959.9999999);
+SELECT
+'----',
+a,
+DATE_FORMAT(a, '%Y') AS yyyy,
+DATE_FORMAT(a, '%Y-%m-%d') AS d,
+DATE_FORMAT(a, '%H:%i:%s') AS t0,
+DATE_FORMAT(a, '%H:%i:%s.%f') AS t6,
+DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s') AS dt0,
+DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s.%f') AS dt6
+FROM t1;
+---- ----
+a 20171231235959.000000000
+yyyy 2017
+d 2017-12-31
+t0 23:59:59
+t6 23:59:59.000000
+dt0 2017-12-31 23:59:59
+dt6 2017-12-31 23:59:59.000000
+---- ----
+a 20171231235959.900000000
+yyyy 2017
+d 2017-12-31
+t0 23:59:59
+t6 23:59:59.900000
+dt0 2017-12-31 23:59:59
+dt6 2017-12-31 23:59:59.900000
+---- ----
+a 20171231235959.999999000
+yyyy 2017
+d 2017-12-31
+t0 23:59:59
+t6 23:59:59.999999
+dt0 2017-12-31 23:59:59
+dt6 2017-12-31 23:59:59.999999
+---- ----
+a 20171231235959.999999900
+yyyy 2018
+d 2018-01-01
+t0 00:00:00
+t6 00:00:00.000000
+dt0 2018-01-01 00:00:00
+dt6 2018-01-01 00:00:00.000000
+DROP TABLE t1;
+#
+# Functions with two temporal parameters that round nanoseconds in both parameters in MySQL
+#
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+INSERT INTO t2 VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+SELECT TIMESTAMP(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+TIMESTAMP(t1.a, t2.a) a a
+2017-12-31 23:59:59.000000 2017-12-31 23:59:59 00:00:00
+2017-12-31 23:59:59.900000 2017-12-31 23:59:59 00:00:00.9
+2017-12-31 23:59:59.999999 2017-12-31 23:59:59 00:00:00.999999
+2018-01-01 00:00:00.000000 2017-12-31 23:59:59 00:00:00.9999999
+2017-12-31 23:59:59.900000 2017-12-31 23:59:59.9 00:00:00
+2018-01-01 00:00:00.800000 2017-12-31 23:59:59.9 00:00:00.9
+2018-01-01 00:00:00.899999 2017-12-31 23:59:59.9 00:00:00.999999
+2018-01-01 00:00:00.900000 2017-12-31 23:59:59.9 00:00:00.9999999
+2017-12-31 23:59:59.999999 2017-12-31 23:59:59.999999 00:00:00
+2018-01-01 00:00:00.899999 2017-12-31 23:59:59.999999 00:00:00.9
+2018-01-01 00:00:00.999998 2017-12-31 23:59:59.999999 00:00:00.999999
+2018-01-01 00:00:00.999999 2017-12-31 23:59:59.999999 00:00:00.9999999
+2018-01-01 00:00:00.000000 2017-12-31 23:59:59.9999999 00:00:00
+2018-01-01 00:00:00.900000 2017-12-31 23:59:59.9999999 00:00:00.9
+2018-01-01 00:00:00.999999 2017-12-31 23:59:59.9999999 00:00:00.999999
+2018-01-01 00:00:01.000000 2017-12-31 23:59:59.9999999 00:00:00.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+SELECT ADDTIME(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+ADDTIME(t1.a, t2.a) a a
+2017-12-31 23:59:59 2017-12-31 23:59:59 00:00:00
+2017-12-31 23:59:59.900000 2017-12-31 23:59:59 00:00:00.9
+2017-12-31 23:59:59.999999 2017-12-31 23:59:59 00:00:00.999999
+2018-01-01 00:00:00 2017-12-31 23:59:59 00:00:00.9999999
+2017-12-31 23:59:59.900000 2017-12-31 23:59:59.9 00:00:00
+2018-01-01 00:00:00.800000 2017-12-31 23:59:59.9 00:00:00.9
+2018-01-01 00:00:00.899999 2017-12-31 23:59:59.9 00:00:00.999999
+2018-01-01 00:00:00.900000 2017-12-31 23:59:59.9 00:00:00.9999999
+2017-12-31 23:59:59.999999 2017-12-31 23:59:59.999999 00:00:00
+2018-01-01 00:00:00.899999 2017-12-31 23:59:59.999999 00:00:00.9
+2018-01-01 00:00:00.999998 2017-12-31 23:59:59.999999 00:00:00.999999
+2018-01-01 00:00:00.999999 2017-12-31 23:59:59.999999 00:00:00.9999999
+2018-01-01 00:00:00 2017-12-31 23:59:59.9999999 00:00:00
+2018-01-01 00:00:00.900000 2017-12-31 23:59:59.9999999 00:00:00.9
+2018-01-01 00:00:00.999999 2017-12-31 23:59:59.9999999 00:00:00.999999
+2018-01-01 00:00:01 2017-12-31 23:59:59.9999999 00:00:00.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '00:00:00.9999999'
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('23:59:59'),
+('23:59:59.9'),
+('23:59:59.999999'),
+('23:59:59.9999999');
+INSERT INTO t2 VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+SELECT TIMEDIFF(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+TIMEDIFF(t1.a, t2.a) a a
+23:59:59.000000 23:59:59 00:00:00
+23:59:58.100000 23:59:59 00:00:00.9
+23:59:58.000001 23:59:59 00:00:00.999999
+23:59:58.000000 23:59:59 00:00:00.9999999
+23:59:59.900000 23:59:59.9 00:00:00
+23:59:59.000000 23:59:59.9 00:00:00.9
+23:59:58.900001 23:59:59.9 00:00:00.999999
+23:59:58.900000 23:59:59.9 00:00:00.9999999
+23:59:59.999999 23:59:59.999999 00:00:00
+23:59:59.099999 23:59:59.999999 00:00:00.9
+23:59:59.000000 23:59:59.999999 00:00:00.999999
+23:59:58.999999 23:59:59.999999 00:00:00.9999999
+24:00:00.000000 23:59:59.9999999 00:00:00
+23:59:59.100000 23:59:59.9999999 00:00:00.9
+23:59:59.000001 23:59:59.9999999 00:00:00.999999
+23:59:59.000000 23:59:59.9999999 00:00:00.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2001-12-31 23:59:59'),
+('2001-12-31 23:59:59.9'),
+('2001-12-31 23:59:59.999999'),
+('2001-12-31 23:59:59.9999999');
+INSERT INTO t2 VALUES
+('2001-12-31 23:59:59'),
+('2001-12-31 23:59:59.9'),
+('2001-12-31 23:59:59.999999'),
+('2001-12-31 23:59:59.9999999');
+SELECT TIMESTAMPDIFF(MICROSECOND,t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+TIMESTAMPDIFF(MICROSECOND,t1.a, t2.a) a a
+0 2001-12-31 23:59:59 2001-12-31 23:59:59
+900000 2001-12-31 23:59:59 2001-12-31 23:59:59.9
+999999 2001-12-31 23:59:59 2001-12-31 23:59:59.999999
+1000000 2001-12-31 23:59:59 2001-12-31 23:59:59.9999999
+-900000 2001-12-31 23:59:59.9 2001-12-31 23:59:59
+0 2001-12-31 23:59:59.9 2001-12-31 23:59:59.9
+99999 2001-12-31 23:59:59.9 2001-12-31 23:59:59.999999
+100000 2001-12-31 23:59:59.9 2001-12-31 23:59:59.9999999
+-999999 2001-12-31 23:59:59.999999 2001-12-31 23:59:59
+-99999 2001-12-31 23:59:59.999999 2001-12-31 23:59:59.9
+0 2001-12-31 23:59:59.999999 2001-12-31 23:59:59.999999
+1 2001-12-31 23:59:59.999999 2001-12-31 23:59:59.9999999
+-1000000 2001-12-31 23:59:59.9999999 2001-12-31 23:59:59
+-100000 2001-12-31 23:59:59.9999999 2001-12-31 23:59:59.9
+-1 2001-12-31 23:59:59.9999999 2001-12-31 23:59:59.999999
+0 2001-12-31 23:59:59.9999999 2001-12-31 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-12-31 23:59:59.9999999'
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('23:59:59'),
+('23:59:59.9'),
+('23:59:59.999999'),
+('23:59:59.9999999');
+INSERT INTO t2 VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+SELECT TIMEDIFF(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+TIMEDIFF(t1.a, t2.a) a a
+23:59:59.000000 23:59:59 00:00:00
+23:59:58.100000 23:59:59 00:00:00.9
+23:59:58.000001 23:59:59 00:00:00.999999
+23:59:58.000000 23:59:59 00:00:00.9999999
+23:59:59.900000 23:59:59.9 00:00:00
+23:59:59.000000 23:59:59.9 00:00:00.9
+23:59:58.900001 23:59:59.9 00:00:00.999999
+23:59:58.900000 23:59:59.9 00:00:00.9999999
+23:59:59.999999 23:59:59.999999 00:00:00
+23:59:59.099999 23:59:59.999999 00:00:00.9
+23:59:59.000000 23:59:59.999999 00:00:00.999999
+23:59:58.999999 23:59:59.999999 00:00:00.9999999
+24:00:00.000000 23:59:59.9999999 00:00:00
+23:59:59.100000 23:59:59.9999999 00:00:00.9
+23:59:59.000001 23:59:59.9999999 00:00:00.999999
+23:59:59.000000 23:59:59.9999999 00:00:00.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+Note 1292 Truncated incorrect time value: '23:59:59.9999999'
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+DROP TABLE t1, t2;
+#
+# STR_TO_DATE behaviour is questionable in MySQL 5.6 (MySQL Bug #92474)
+#
+# It truncates nanoseconds, but this may change in the future.
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+SELECT
+a,
+STR_TO_DATE(a, '%Y-%m-%d %H:%i:%s') AS c0,
+STR_TO_DATE(a, '%Y-%m-%d %H:%i:%s.%f') AS c6
+FROM t1;
+a c0 c6
+2017-12-31 23:59:59 2017-12-31 23:59:59 2017-12-31 23:59:59.000000
+2017-12-31 23:59:59.9 2017-12-31 23:59:59 2017-12-31 23:59:59.900000
+2017-12-31 23:59:59.999999 2017-12-31 23:59:59 2017-12-31 23:59:59.999999
+2017-12-31 23:59:59.9999999 2017-12-31 23:59:59 2017-12-31 23:59:59.999999
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9'
+Warning 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.999999'
+Warning 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Warning 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+DROP TABLE t1;
+#
+# DATE_ADD behaviour is questionable in MySQL 5.6 (MySQL Bug#92473)
+# It rounds nanoseconds in the first argument, but truncates nanoseconds in the second argument.
+# This may change in the future, to round both arguments.
+#
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+CREATE TABLE t2 (b DECIMAL(32,9));
+INSERT INTO t2 VALUES
+(0),
+(0.9),
+(0.999999),
+(0.9999999);
+SELECT a, b, DATE_ADD(a, INTERVAL b SECOND) FROM t1,t2 ORDER BY a,b;
+a b DATE_ADD(a, INTERVAL b SECOND)
+2017-12-31 23:59:59 0.000000000 2017-12-31 23:59:59.000000
+2017-12-31 23:59:59 0.900000000 2017-12-31 23:59:59.900000
+2017-12-31 23:59:59 0.999999000 2017-12-31 23:59:59.999999
+2017-12-31 23:59:59 0.999999900 2017-12-31 23:59:59.999999
+2017-12-31 23:59:59.9 0.000000000 2017-12-31 23:59:59.900000
+2017-12-31 23:59:59.9 0.900000000 2018-01-01 00:00:00.800000
+2017-12-31 23:59:59.9 0.999999000 2018-01-01 00:00:00.899999
+2017-12-31 23:59:59.9 0.999999900 2018-01-01 00:00:00.899999
+2017-12-31 23:59:59.999999 0.000000000 2017-12-31 23:59:59.999999
+2017-12-31 23:59:59.999999 0.900000000 2018-01-01 00:00:00.899999
+2017-12-31 23:59:59.999999 0.999999000 2018-01-01 00:00:00.999998
+2017-12-31 23:59:59.999999 0.999999900 2018-01-01 00:00:00.999998
+2017-12-31 23:59:59.9999999 0.000000000 2018-01-01 00:00:00.000000
+2017-12-31 23:59:59.9999999 0.900000000 2018-01-01 00:00:00.900000
+2017-12-31 23:59:59.9999999 0.999999000 2018-01-01 00:00:00.999999
+2017-12-31 23:59:59.9999999 0.999999900 2018-01-01 00:00:00.999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2017-12-31 23:59:59.9999999'
+DROP TABLE t1, t2;
diff --git a/mysql-test/main/func_time_round.test b/mysql-test/main/func_time_round.test
new file mode 100644
index 00000000000..12d3a50a10f
--- /dev/null
+++ b/mysql-test/main/func_time_round.test
@@ -0,0 +1,461 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+
+CREATE TABLE t1_datetime_in_varchar (id SERIAL, a VARCHAR(64));
+INSERT INTO t1_datetime_in_varchar (a) VALUES
+('2000-12-31 23:59:59'),
+('2000-12-31 23:59:59.9'),
+('2000-12-31 23:59:59.99'),
+('2000-12-31 23:59:59.999'),
+('2000-12-31 23:59:59.9999'),
+('2000-12-31 23:59:59.99999'),
+('2000-12-31 23:59:59.999999'),
+('2000-12-31 23:59:59.9999999');
+
+CREATE TABLE t1_datetime_in_decimal (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_datetime_in_decimal (a) VALUES
+(20001231235959),
+(20001231235959.9),
+(20001231235959.99),
+(20001231235959.999),
+(20001231235959.9999),
+(20001231235959.99999),
+(20001231235959.999999),
+(20001231235959.9999999);
+
+
+CREATE TABLE t1_time_in_varchar (id SERIAL, a VARCHAR(64));
+INSERT INTO t1_time_in_varchar (a) VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.99'),
+('00:00:00.999'),
+('00:00:00.9999'),
+('00:00:00.99999'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+INSERT INTO t1_time_in_varchar (a) VALUES
+('837:59:59.9999999'),
+('838:59:59'),
+('838:59:59.9'),
+('838:59:59.99'),
+('838:59:59.999'),
+('838:59:59.9999'),
+('838:59:59.99999'),
+('838:59:59.999999'),
+('838:59:59.9999999'),
+('839:59:59.9999999'),
+('87649414:59:59.999999'),
+('87649414:59:59.9999999'),
+('87649415:59:59.999999'),
+('87649415:59:59.9999999');
+
+
+CREATE TABLE t1_time_in_decimal (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_time_in_decimal (a) VALUES
+(0),
+(0.9),
+(0.99),
+(0.999),
+(0.9999),
+(0.99999),
+(0.999999),
+(0.9999999);
+INSERT INTO t1_time_in_decimal (a) VALUES
+(8375959.9999999),
+(8385959),
+(8385959.9),
+(8385959.99),
+(8385959.999),
+(8385959.9999),
+(8385959.99999),
+(8385959.999999),
+(8385959.9999999),
+(8395959.9999999),
+(876494145959.999999),
+(876494145959.9999999),
+(876494155959.999999),
+(876494155959.9999999);
+
+--echo #
+--echo # TIME: LEAST/GREATEST
+--echo #
+
+SELECT GREATEST(TIME'00:00:00', a) FROM t1_time_in_varchar;
+SELECT GREATEST(TIME'00:00:00', a) FROM t1_time_in_decimal;
+
+SELECT GREATEST(TIME'00:00:00', '00:00:00.0000004');
+SELECT GREATEST(TIME'00:00:00', 0.0000004);
+
+SELECT GREATEST(TIME'00:00:00', '00:00:00.0000005');
+SELECT GREATEST(TIME'00:00:00', 0.0000005);
+
+
+--echo #
+--echo # Functions with a single TIME input, conversion from DATETIME-in-VARCHAR
+--echo #
+
+SELECT SECOND(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT MINUTE(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT HOUR(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+
+SELECT EXTRACT(SECOND FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT EXTRACT(MINUTE FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT EXTRACT(HOUR FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+
+SELECT TIME_TO_SEC(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+
+
+--echo #
+--echo # Functions with a single TIME input, conversion from DATETIME-in-DECIMAL
+--echo #
+
+
+SELECT SECOND(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT MINUTE(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT HOUR(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+
+SELECT EXTRACT(SECOND FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT EXTRACT(MINUTE FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT EXTRACT(HOUR FROM a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+
+SELECT TIME_TO_SEC(a), CAST(a AS TIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+
+
+--echo #
+--echo # Functions with a single TIME interval input, conversion from TIME-interval-in-VARCHAR
+--echo #
+
+SELECT
+ EXTRACT(DAY FROM a),
+ EXTRACT(HOUR FROM a),
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a),
+ CAST(a AS INTERVAL DAY_SECOND(6)),
+ a
+FROM t1_time_in_varchar ORDER BY id;
+
+SELECT
+ TIME_TO_SEC(a),
+ CAST(a AS TIME(6)),
+ a
+FROM t1_time_in_varchar ORDER BY id;
+
+--echo #
+--echo # Functions with a single TIME interval input, conversion from TIME-interval-in-DECIMAL
+--echo #
+
+SELECT
+ EXTRACT(DAY FROM a),
+ EXTRACT(HOUR FROM a),
+ EXTRACT(MINUTE FROM a),
+ EXTRACT(SECOND FROM a),
+ EXTRACT(MICROSECOND FROM a),
+ CAST(a AS INTERVAL DAY_SECOND(6)),
+ a
+FROM t1_time_in_decimal ORDER BY id;
+
+SELECT
+ TIME_TO_SEC(a),
+ CAST(a AS TIME(6)),
+ a
+FROM t1_time_in_decimal ORDER BY id;
+
+
+--echo #
+--echo # Functions with a single DATE input, conversion from DATETIME-in-VARCHAR
+--echo #
+
+SELECT QUARTER(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT MONTH(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT YEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+
+SELECT DAYNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT MONTHNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+
+SELECT LAST_DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT TO_DAYS(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+SELECT DAYOFYEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_varchar ORDER BY id;
+
+
+CREATE TABLE t1 (a VARCHAR(32));
+INSERT INTO t1 VALUES
+('2002-01-05 23:59:59'),
+('2002-01-05 23:59:59.999999'),
+('2002-01-05 23:59:59.9999999');
+SELECT YEARWEEK(a), a FROM t1;
+SELECT WEEK(a), a FROM t1;
+SELECT WEEKDAY(a), a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(32,9));
+INSERT INTO t1 VALUES
+(20020105235959),
+(20020105235959.999999),
+(20020105235959.9999999);
+SELECT YEARWEEK(a), a FROM t1;
+SELECT WEEK(a), a FROM t1;
+SELECT WEEKDAY(a), a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # Functions with a single DATE input, conversion from DATETIME-in-DECIMAL
+--echo #
+
+SELECT QUARTER(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT MONTH(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT YEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+
+SELECT DAYNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT MONTHNAME(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT YEARWEEK(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+
+SELECT LAST_DAY(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT TO_DAYS(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT DAYOFYEAR(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+SELECT DAYOFMONTH(a), CAST(a AS DATE), a FROM t1_datetime_in_decimal ORDER BY id;
+
+
+--echo #
+--echo # Functions with a single DATETIME input, conversion from DATETIME-in-VARCHAR
+--echo #
+
+SELECT TO_SECONDS(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+
+SET time_zone='+00:00';
+SELECT UNIX_TIMESTAMP(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+SET time_zone=DEFAULT;
+
+SELECT CONVERT_TZ(a, '+00:00','+00:00'), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_varchar ORDER BY id;
+
+
+--echo #
+--echo # Functions with a single DATETIME input, conversion from DATETIME-in-DECIMAL
+--echo #
+
+SELECT TO_SECONDS(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+
+SET time_zone='+00:00';
+SELECT UNIX_TIMESTAMP(a), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+SET time_zone=DEFAULT;
+
+SELECT CONVERT_TZ(a, '+00:00','+00:00'), CAST(a AS DATETIME(6)), a FROM t1_datetime_in_decimal ORDER BY id;
+
+DROP TABLE t1_datetime_in_varchar;
+DROP TABLE t1_datetime_in_decimal;
+DROP TABLE t1_time_in_varchar;
+DROP TABLE t1_time_in_decimal;
+
+
+--echo #
+--echo # Functions that construct DATETIME
+--echo #
+
+SET time_zone='+00:00';
+CREATE TABLE t1_unix_timestamp (id SERIAL, a DECIMAL(30,10));
+INSERT INTO t1_unix_timestamp (a) VALUES
+(980639999),
+(980639999.9),
+(980639999.999999),
+(980639999.9999999),
+(2147483647),
+(2147483647.9),
+(2147483647.999999),
+(2147483647.9999999);
+SELECT a, FROM_UNIXTIME(a) FROM t1_unix_timestamp ORDER BY id;
+DROP TABLE t1_unix_timestamp;
+SET time_zone=DEFAULT;
+
+
+--echo #
+--echo # Functions that construct TIME
+--echo #
+
+CREATE TABLE t1_sec (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_sec (a) VALUES
+(59),
+(59.9),
+(59.999999),
+(59.9999999),
+(3020398),
+(3020398.999999),
+(3020398.9999999),
+(3020399),
+(3020399.999999),
+(3020399.9999999),
+(9223372036854775807),
+(9223372036854775807.9),
+(9223372036854775807.999999),
+(9223372036854775807.9999999),
+(18446744073709551615),
+(18446744073709551615.9),
+(18446744073709551615.999999),
+(18446744073709551615.9999999);
+SELECT a, SEC_TO_TIME(a) FROM t1_sec ORDER BY id;
+DROP TABLE t1_sec;
+
+
+CREATE TABLE t1_sec (id SERIAL, a DECIMAL(38,10));
+INSERT INTO t1_sec (a) VALUES
+(0),
+(0.9),
+(0.999999),
+(0.9999999);
+SELECT a, MAKETIME(0, 0, a) FROM t1_sec ORDER BY id;
+DROP TABLE t1_sec;
+
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+--vertical_results
+SELECT
+ '----',
+ a,
+ DATE_FORMAT(a, '%Y') AS yyyy,
+ DATE_FORMAT(a, '%Y-%m-%d') AS d,
+ DATE_FORMAT(a, '%H:%i:%s') AS t0,
+ DATE_FORMAT(a, '%H:%i:%s.%f') AS t6,
+ DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s') AS dt0,
+ DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s.%f') AS dt6
+FROM t1;
+--horizontal_results
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(32,9));
+INSERT INTO t1 VALUES
+(20171231235959),
+(20171231235959.9),
+(20171231235959.999999),
+(20171231235959.9999999);
+--vertical_results
+SELECT
+ '----',
+ a,
+ DATE_FORMAT(a, '%Y') AS yyyy,
+ DATE_FORMAT(a, '%Y-%m-%d') AS d,
+ DATE_FORMAT(a, '%H:%i:%s') AS t0,
+ DATE_FORMAT(a, '%H:%i:%s.%f') AS t6,
+ DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s') AS dt0,
+ DATE_FORMAT(a, '%Y-%m-%d %H:%i:%s.%f') AS dt6
+FROM t1;
+--horizontal_results
+DROP TABLE t1;
+
+
+--echo #
+--echo # Functions with two temporal parameters that round nanoseconds in both parameters in MySQL
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+INSERT INTO t2 VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+
+SELECT TIMESTAMP(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+SELECT ADDTIME(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+
+DROP TABLE t1, t2;
+
+
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('23:59:59'),
+('23:59:59.9'),
+('23:59:59.999999'),
+('23:59:59.9999999');
+INSERT INTO t2 VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+SELECT TIMEDIFF(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+DROP TABLE t1, t2;
+
+
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2001-12-31 23:59:59'),
+('2001-12-31 23:59:59.9'),
+('2001-12-31 23:59:59.999999'),
+('2001-12-31 23:59:59.9999999');
+INSERT INTO t2 VALUES
+('2001-12-31 23:59:59'),
+('2001-12-31 23:59:59.9'),
+('2001-12-31 23:59:59.999999'),
+('2001-12-31 23:59:59.9999999');
+SELECT TIMESTAMPDIFF(MICROSECOND,t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (a VARCHAR(64));
+CREATE TABLE t2 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('23:59:59'),
+('23:59:59.9'),
+('23:59:59.999999'),
+('23:59:59.9999999');
+INSERT INTO t2 VALUES
+('00:00:00'),
+('00:00:00.9'),
+('00:00:00.999999'),
+('00:00:00.9999999');
+SELECT TIMEDIFF(t1.a, t2.a), t1.a, t2.a FROM t1,t2 ORDER BY t1.a, t2.a;
+DROP TABLE t1, t2;
+
+
+--echo #
+--echo # STR_TO_DATE behaviour is questionable in MySQL 5.6 (MySQL Bug #92474)
+--echo #
+
+--echo # It truncates nanoseconds, but this may change in the future.
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+
+SELECT
+ a,
+ STR_TO_DATE(a, '%Y-%m-%d %H:%i:%s') AS c0,
+ STR_TO_DATE(a, '%Y-%m-%d %H:%i:%s.%f') AS c6
+FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # DATE_ADD behaviour is questionable in MySQL 5.6 (MySQL Bug#92473)
+--echo # It rounds nanoseconds in the first argument, but truncates nanoseconds in the second argument.
+--echo # This may change in the future, to round both arguments.
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES
+('2017-12-31 23:59:59'),
+('2017-12-31 23:59:59.9'),
+('2017-12-31 23:59:59.999999'),
+('2017-12-31 23:59:59.9999999');
+CREATE TABLE t2 (b DECIMAL(32,9));
+INSERT INTO t2 VALUES
+(0),
+(0.9),
+(0.999999),
+(0.9999999);
+SELECT a, b, DATE_ADD(a, INTERVAL b SECOND) FROM t1,t2 ORDER BY a,b;
+DROP TABLE t1, t2;
diff --git a/mysql-test/main/get_diagnostics.result b/mysql-test/main/get_diagnostics.result
index 732be7c0283..6944103c805 100644
--- a/mysql-test/main/get_diagnostics.result
+++ b/mysql-test/main/get_diagnostics.result
@@ -133,7 +133,7 @@ DROP PROCEDURE p1;
GET DIAGNOSTICS CONDITION;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
GET DIAGNOSTICS CONDITION a;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
+ERROR 42S22: Unknown column 'a' in 'field list'
GET DIAGNOSTICS CONDITION 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
GET DIAGNOSTICS CONDITION 1 @var;
@@ -212,9 +212,9 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
GET DIAGNOSTICS CONDITION (1) @var = CLASS_ORIGIN;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(1) @var = CLASS_ORIGIN' at line 1
GET DIAGNOSTICS CONDITION p1() @var = CLASS_ORIGIN;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '() @var = CLASS_ORIGIN' at line 1
+ERROR 42S22: Unknown column 'p1' in 'field list'
GET DIAGNOSTICS CONDITION ABS(2) @var = CLASS_ORIGIN;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '(2) @var = CLASS_ORIGIN' at line 1
+ERROR 42S22: Unknown column 'ABS' in 'field list'
GET DIAGNOSTICS CONDITION 1.1 @var = CLASS_ORIGIN;
GET DIAGNOSTICS CONDITION "1" @var = CLASS_ORIGIN;
SELECT COUNT(max_questions) INTO @var FROM mysql.user;
@@ -226,10 +226,7 @@ Warnings:
Error 1758 Invalid condition number
Error 1758 Invalid condition number
GET DIAGNOSTICS CONDITION a @var = CLASS_ORIGIN;
-Warnings:
-Error 1758 Invalid condition number
-Error 1758 Invalid condition number
-Error 1054 Unknown column 'a' in 'field list'
+ERROR 42S22: Unknown column 'a' in 'field list'
SELECT COUNT(max_questions) INTO @var FROM mysql.user;
SET @cond = 1;
GET DIAGNOSTICS CONDITION @cond @var1 = CLASS_ORIGIN;
diff --git a/mysql-test/main/get_diagnostics.test b/mysql-test/main/get_diagnostics.test
index a30bad72136..1553eb500b7 100644
--- a/mysql-test/main/get_diagnostics.test
+++ b/mysql-test/main/get_diagnostics.test
@@ -169,7 +169,7 @@ DROP PROCEDURE p1;
--error ER_PARSE_ERROR
GET DIAGNOSTICS CONDITION;
---error ER_PARSE_ERROR
+--error ER_BAD_FIELD_ERROR
GET DIAGNOSTICS CONDITION a;
--error ER_PARSE_ERROR
GET DIAGNOSTICS CONDITION 1;
@@ -271,9 +271,9 @@ GET DIAGNOSTICS CONDITION 1+1 @var = CLASS_ORIGIN;
GET DIAGNOSTICS CONDITION ? @var = CLASS_ORIGIN;
--error ER_PARSE_ERROR
GET DIAGNOSTICS CONDITION (1) @var = CLASS_ORIGIN;
---error ER_PARSE_ERROR
+--error ER_BAD_FIELD_ERROR
GET DIAGNOSTICS CONDITION p1() @var = CLASS_ORIGIN;
---error ER_PARSE_ERROR
+--error ER_BAD_FIELD_ERROR
GET DIAGNOSTICS CONDITION ABS(2) @var = CLASS_ORIGIN;
# Unfortunate side effects...
@@ -285,6 +285,7 @@ SELECT COUNT(max_questions) INTO @var FROM mysql.user;
GET DIAGNOSTICS CONDITION 9999 @var = CLASS_ORIGIN;
GET DIAGNOSTICS CONDITION NULL @var = CLASS_ORIGIN;
+--error ER_BAD_FIELD_ERROR
GET DIAGNOSTICS CONDITION a @var = CLASS_ORIGIN;
# Reset warnings
diff --git a/mysql-test/main/gis-debug.result b/mysql-test/main/gis-debug.result
index be4145f2236..2daa810db0d 100644
--- a/mysql-test/main/gis-debug.result
+++ b/mysql-test/main/gis-debug.result
@@ -405,3 +405,93 @@ ERROR HY000: Illegal parameter data types varchar and geometry for operation '/'
CREATE TABLE t1 AS SELECT '0' MOD POINT(0,0) LIMIT 0;
ERROR HY000: Illegal parameter data types varchar and geometry for operation 'MOD'
SET debug_dbug='-d,num_op';
+#
+# End of 10.3 tests
+#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16454 Bad results for IN with ROW
+#
+SET SESSION debug_dbug="+d,cmp_item";
+SET SESSION debug_dbug="+d,Item_func_in";
+SET SESSION debug_dbug="+d,Predicant_to_list_comparator";
+SELECT (POINT(1,1),0) IN ((POINT(1,1),0),((POINT(1,1)),1));
+(POINT(1,1),0) IN ((POINT(1,1),0),((POINT(1,1)),1))
+1
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=geometry
+Note 1105 DBUG: [0,1] handler=geometry
+Note 1105 DBUG: [0,2] handler=geometry
+Note 1105 DBUG: => handler=geometry
+Note 1105 DBUG: [1,0] handler=int
+Note 1105 DBUG: [1,1] handler=int
+Note 1105 DBUG: [1,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: types_compatible=yes bisect=no
+SELECT (1,(POINT(1,1),0)) IN ((1,(POINT(1,1),0)),(0,(POINT(1,1),0)));
+(1,(POINT(1,1),0)) IN ((1,(POINT(1,1),0)),(0,(POINT(1,1),0)))
+1
+Warnings:
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [1,0] handler=row
+Note 1105 DBUG: [1,1] handler=row
+Note 1105 DBUG: [1,2] handler=row
+Note 1105 DBUG: => handler=row
+Note 1105 DBUG: ROW(3 args) level=1
+Note 1105 DBUG: [0,0] handler=geometry
+Note 1105 DBUG: [0,1] handler=geometry
+Note 1105 DBUG: [0,2] handler=geometry
+Note 1105 DBUG: => handler=geometry
+Note 1105 DBUG: [1,0] handler=int
+Note 1105 DBUG: [1,1] handler=int
+Note 1105 DBUG: [1,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: types_compatible=yes bisect=no
+SELECT (1,0) IN ((POINT(1,1),0),(0,0));
+ERROR HY000: Illegal parameter data types int and geometry for operation 'in'
+SHOW WARNINGS;
+Level Code Message
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=geometry
+Note 1105 DBUG: [0,2] handler=int
+Error 4078 Illegal parameter data types int and geometry for operation 'in'
+SELECT (1,(0,0)) IN ((1,(POINT(1,1),0)),(0,(0,0)));
+ERROR HY000: Illegal parameter data types int and geometry for operation 'in'
+SHOW WARNINGS;
+Level Code Message
+Note 1105 DBUG: [0] arg=1 handler=0 (row)
+Note 1105 DBUG: [1] arg=2 handler=0 (row)
+Note 1105 DBUG: ROW(3 args) level=0
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=int
+Note 1105 DBUG: [0,2] handler=int
+Note 1105 DBUG: => handler=bigint
+Note 1105 DBUG: [1,0] handler=row
+Note 1105 DBUG: [1,1] handler=row
+Note 1105 DBUG: [1,2] handler=row
+Note 1105 DBUG: => handler=row
+Note 1105 DBUG: ROW(3 args) level=1
+Note 1105 DBUG: [0,0] handler=int
+Note 1105 DBUG: [0,1] handler=geometry
+Note 1105 DBUG: [0,2] handler=int
+Error 4078 Illegal parameter data types int and geometry for operation 'in'
+SET SESSION debug_dbug="-d,Predicant_to_list_comparator";
+SET SESSION debug_dbug="-d,Item_func_in";
+SET SESSION debug_dbug="-d,cmp_item";
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/gis-debug.test b/mysql-test/main/gis-debug.test
index 588bc706370..dd64ce0f04c 100644
--- a/mysql-test/main/gis-debug.test
+++ b/mysql-test/main/gis-debug.test
@@ -111,3 +111,39 @@ CREATE TABLE t1 AS SELECT '0'/POINT(0,0) LIMIT 0;
CREATE TABLE t1 AS SELECT '0' MOD POINT(0,0) LIMIT 0;
SET debug_dbug='-d,num_op';
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16454 Bad results for IN with ROW
+--echo #
+
+SET SESSION debug_dbug="+d,cmp_item";
+SET SESSION debug_dbug="+d,Item_func_in";
+SET SESSION debug_dbug="+d,Predicant_to_list_comparator";
+
+SELECT (POINT(1,1),0) IN ((POINT(1,1),0),((POINT(1,1)),1));
+SELECT (1,(POINT(1,1),0)) IN ((1,(POINT(1,1),0)),(0,(POINT(1,1),0)));
+
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT (1,0) IN ((POINT(1,1),0),(0,0));
+SHOW WARNINGS;
+
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT (1,(0,0)) IN ((1,(POINT(1,1),0)),(0,(0,0)));
+SHOW WARNINGS;
+
+SET SESSION debug_dbug="-d,Predicant_to_list_comparator";
+SET SESSION debug_dbug="-d,Item_func_in";
+SET SESSION debug_dbug="-d,cmp_item";
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/gis-precise.result b/mysql-test/main/gis-precise.result
index 32b536b6e7f..4e4161c34ec 100644
--- a/mysql-test/main/gis-precise.result
+++ b/mysql-test/main/gis-precise.result
@@ -223,7 +223,7 @@ st_u
MULTIPOLYGON(((525400 18370,525000.9677614468 183300,525400 183300,525400 18370)),((525000 183300,525000 183700,525000.9677614468 183300,525000 183300)),((525265.58 183481.95,525263.95 183484.75,525260.7 183491.55,525276.79 183500,525278.39 183500.84,525278.63 183500.97,525280.98 183502.26,525283.17 183503.47,525289.11 183506.62,525296.42 183510.31,525296.57 183510.39,525298.67 183511.53,525302.81 183513.8,525304.5 183510.83,525307.85 183504.95,525304.45 183504.25,525301.75 183509.35,525283.55 183500,525282.2 183499.3,525282.3 183499.1,525280.35 183498.2,525275.5 183495.7,525276.5 183493.45,525278.97 183488.73,525265.58 183481.95),(525266.99 183484.33,525263.26 183491.55,525266.15 183493.04,525269.88 183485.82,525266.99 183484.33),(525272.06 183488.37,525268.94 183494.51,525271.94 183496.03,525275.06 183489.89,525272.06 183488.37)))
SET @a=0x0000000001030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440000000000000000000000000000024400000000000000000000000000000000000000000000000000000F03F000000000000F03F0000000000000040000000000000F03F00000000000000400000000000000040000000000000F03F0000000000000040000000000000F03F000000000000F03F;
SELECT ASTEXT(TOUCHES(@a, GEOMFROMTEXT('point(0 0)'))) t;
-ERROR HY000: Illegal parameter data type int for operation 'st_astext'
+ERROR HY000: Illegal parameter data type boolean for operation 'st_astext'
SELECT astext(ST_UNION (
PolyFromText('POLYGON(( 2 2 ,3 2,2 7,2 2),( 0 0,8 2,1 9,0 0))'),
ExteriorRing( Envelope( MultiLineStringFromText('MULTILINESTRING((3 4,5 3),(3 0,0 5))')))));
diff --git a/mysql-test/main/gis-rtree.result b/mysql-test/main/gis-rtree.result
index 5d4708dd111..2ef8757339e 100644
--- a/mysql-test/main/gis-rtree.result
+++ b/mysql-test/main/gis-rtree.result
@@ -744,6 +744,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
CREATE TABLE t1 (
diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result
index 0ee502d8f87..191955504c2 100644
--- a/mysql-test/main/gis.result
+++ b/mysql-test/main/gis.result
@@ -964,29 +964,29 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
1
-INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
-INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
+INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(3 4)'));
+INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(3 4)'));
EXPLAIN
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref p p 28 const # Using where
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
DROP TABLE t1, t2;
End of 5.0 tests
#
@@ -4936,3 +4936,56 @@ ERROR HY000: Illegal parameter data type geometry for operation 'is_used_lock'
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16351 JSON_OBJECT() treats hybrid functions with boolean arguments as numbers
+#
+SELECT ST_SRID(TRUE);
+ERROR HY000: Illegal parameter data type boolean for operation 'srid'
+SELECT ST_SRID(COALESCE(TRUE,TRUE));
+ERROR HY000: Illegal parameter data type boolean for operation 'srid'
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+SELECT ST_SRID((SELECT MAX(a)>3 FROM t1));
+ERROR HY000: Illegal parameter data type boolean for operation 'srid'
+SELECT ST_SRID(MAX(a>3)) FROM t1;
+ERROR HY000: Illegal parameter data type boolean for operation 'srid'
+SELECT ST_SRID((SELECT MAX(a>3) FROM t1));
+ERROR HY000: Illegal parameter data type boolean for operation 'srid'
+DROP TABLE t1;
+#
+# Mixing GEOMETRY with HEX hybrid
+#
+SELECT 0x60=POINT(1,1), POINT(1,1)=0x60;
+0x60=POINT(1,1) POINT(1,1)=0x60
+0 0
+CREATE TABLE t1 AS SELECT
+COALESCE(0x60,POINT(1,1)),
+COALESCE(POINT(1,1),0x60),
+LEAST(0x60,POINT(1,1)),
+LEAST(POINT(1,1),0x60);
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `COALESCE(0x60,POINT(1,1))` longblob DEFAULT NULL,
+ `COALESCE(POINT(1,1),0x60)` longblob DEFAULT NULL,
+ `LEAST(0x60,POINT(1,1))` longblob DEFAULT NULL,
+ `LEAST(POINT(1,1),0x60)` longblob DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SELECT 0x60+POINT(1,1);
+ERROR HY000: Illegal parameter data types bigint and geometry for operation '+'
+SELECT POINT(1,1)+0x60;
+ERROR HY000: Illegal parameter data types geometry and bigint for operation '+'
+#
+# MDEV-16454 Bad results for IN with ROW
+#
+SELECT (1,0) IN ((POINT(1,1),0),(0,0));
+ERROR HY000: Illegal parameter data types int and geometry for operation 'in'
+SELECT (1,(0,0)) IN ((1,(POINT(1,1),0)),(0,(0,0)));
+ERROR HY000: Illegal parameter data types int and geometry for operation 'in'
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test
index fbc130fcb7e..3bc77ec50df 100644
--- a/mysql-test/main/gis.test
+++ b/mysql-test/main/gis.test
@@ -2609,7 +2609,6 @@ SELECT ST_SRID(1);
--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
SELECT ST_SRID('test');
-
--echo # Item_bool_func_args_geometry
--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
@@ -3004,3 +3003,58 @@ SELECT IS_USED_LOCK(POINT(1,1));
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16351 JSON_OBJECT() treats hybrid functions with boolean arguments as numbers
+--echo #
+
+--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
+SELECT ST_SRID(TRUE);
+--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
+SELECT ST_SRID(COALESCE(TRUE,TRUE));
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
+SELECT ST_SRID((SELECT MAX(a)>3 FROM t1));
+--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
+SELECT ST_SRID(MAX(a>3)) FROM t1;
+--error ER_ILLEGAL_PARAMETER_DATA_TYPE_FOR_OPERATION
+SELECT ST_SRID((SELECT MAX(a>3) FROM t1));
+DROP TABLE t1;
+
+--echo #
+--echo # Mixing GEOMETRY with HEX hybrid
+--echo #
+
+SELECT 0x60=POINT(1,1), POINT(1,1)=0x60;
+
+CREATE TABLE t1 AS SELECT
+ COALESCE(0x60,POINT(1,1)),
+ COALESCE(POINT(1,1),0x60),
+ LEAST(0x60,POINT(1,1)),
+ LEAST(POINT(1,1),0x60);
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT 0x60+POINT(1,1);
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT POINT(1,1)+0x60;
+
+--echo #
+--echo # MDEV-16454 Bad results for IN with ROW
+--echo #
+
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT (1,0) IN ((POINT(1,1),0),(0,0));
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT (1,(0,0)) IN ((1,(POINT(1,1),0)),(0,(0,0)));
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/grant.result b/mysql-test/main/grant.result
index 31ea932445d..fad874d7d64 100644
--- a/mysql-test/main/grant.result
+++ b/mysql-test/main/grant.result
@@ -2,6 +2,7 @@ set GLOBAL sql_mode="";
set LOCAL sql_mode="";
SET @old_log_bin_trust_function_creators= @@global.log_bin_trust_function_creators;
SET GLOBAL log_bin_trust_function_creators = 1;
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
drop table if exists t1;
drop database if exists mysqltest;
connect master,localhost,root,,;
@@ -58,7 +59,7 @@ max_questions 0
max_updates 0
max_connections 0
max_user_connections 0
-plugin
+plugin mysql_native_password
authentication_string
password_expired N
is_role N
@@ -134,7 +135,7 @@ max_questions 10
max_updates 0
max_connections 0
max_user_connections 0
-plugin
+plugin mysql_native_password
authentication_string
password_expired N
is_role N
@@ -186,7 +187,7 @@ max_questions 10
max_updates 20
max_connections 30
max_user_connections 0
-plugin
+plugin mysql_native_password
authentication_string
password_expired N
is_role N
@@ -298,12 +299,7 @@ ERROR HY000: Incorrect usage of DB GRANT and GLOBAL PRIVILEGES
select 1;
1
1
-insert into mysql.user (host, user) values ('localhost', 'test11');
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
+insert into mysql.global_priv (host, user) values ('localhost', 'test11');
insert into mysql.db (host, db, user, select_priv) values
('localhost', 'a%', 'test11', 'Y'), ('localhost', 'ab%', 'test11', 'Y');
alter table mysql.db order by db asc;
@@ -842,9 +838,6 @@ drop database db27515;
End of 4.1 tests
use test;
create table t1 (a int);
-create table t2 as select * from mysql.user where user='';
-delete from mysql.user where user='';
-flush privileges;
create user mysqltest_8@'';
create user mysqltest_8@host8;
create user mysqltest_8@'';
@@ -854,7 +847,7 @@ ERROR HY000: Operation CREATE USER failed for 'mysqltest_8'@'%'
create user mysqltest_8@host8;
ERROR HY000: Operation CREATE USER failed for 'mysqltest_8'@'host8'
select user, QUOTE(host) from mysql.user where user="mysqltest_8";
-user QUOTE(host)
+User QUOTE(host)
mysqltest_8 '%'
mysqltest_8 'host8'
Schema privileges
@@ -1044,9 +1037,6 @@ ERROR 42000: There is no such grant defined for user 'mysqltest_8' on host '%'
drop user mysqltest_8@host8;
show grants for mysqltest_8@host8;
ERROR 42000: There is no such grant defined for user 'mysqltest_8' on host 'host8'
-insert into mysql.user select * from t2;
-flush privileges;
-drop table t2;
drop table t1;
connection master;
CREATE DATABASE mysqltest3;
@@ -1440,6 +1430,8 @@ declare tmp varchar(30);
select col1 from test limit 1 into tmp;
return '1';
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create view v1 as select test.* from test where test.col1=test_function();
grant update (col1) on v1 to 'greg'@'localhost';
drop user 'greg'@'localhost';
@@ -1451,7 +1443,6 @@ CURRENT_USER()
root@localhost
SET PASSWORD FOR CURRENT_USER() = PASSWORD("admin");
SET PASSWORD FOR CURRENT_USER() = PASSWORD("");
-update mysql.user set plugin='';
# Bug#57952
@@ -1923,7 +1914,7 @@ DROP DATABASE db2;
grant usage on Foo.* to myuser@Localhost identified by 'foo';
grant select on Foo.* to myuser@localhost;
select host,user from mysql.user where User='myuser';
-host user
+Host User
localhost myuser
revoke select on Foo.* from myuser@localhost;
delete from mysql.user where User='myuser';
@@ -2731,7 +2722,7 @@ DROP USER untrusted@localhost;
DROP DATABASE secret;
set GLOBAL sql_mode=default;
#
-# Start of 10.2 tests
+# End of 10.1 tests
#
#
# MDEV-10134 Add full support for DEFAULT
@@ -2760,3 +2751,24 @@ DROP USER dummy@localhost;
#
# End of 10.2 tests
#
+#
+# MDEV-17932 : Assertion upon double RENAME USER
+#
+CREATE USER foo@localhost;
+CREATE USER bar2@localhost;
+RENAME USER foo@localhost TO bar1@localhost, bar1@localhost TO bar3@localhost;
+DROP USER bar2@localhost;
+DROP USER bar3@localhost;
+#
+# MDEV-17946 : Unsorted acl_dbs after RENAME USER
+#
+CREATE USER foo;
+GRANT SELECT ON test.* TO foo;
+RENAME USER '' TO 'name';
+GRANT UPDATE ON test.* TO foo;
+RENAME USER 'name' to '';
+DROP USER foo;
+#
+# End of 10.4 tests
+#
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
diff --git a/mysql-test/main/grant.test b/mysql-test/main/grant.test
index f54c4bd981d..8ae64c9ad4d 100644
--- a/mysql-test/main/grant.test
+++ b/mysql-test/main/grant.test
@@ -10,6 +10,7 @@ set GLOBAL sql_mode="";
set LOCAL sql_mode="";
SET @old_log_bin_trust_function_creators= @@global.log_bin_trust_function_creators;
SET GLOBAL log_bin_trust_function_creators = 1;
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
# Cleanup
--disable_warnings
@@ -131,7 +132,7 @@ select 1; # To test that the previous command didn't cause problems
#
# Bug#4898 User privileges depending on ORDER BY Settings of table db
#
-insert into mysql.user (host, user) values ('localhost', 'test11');
+insert into mysql.global_priv (host, user) values ('localhost', 'test11');
insert into mysql.db (host, db, user, select_priv) values
('localhost', 'a%', 'test11', 'Y'), ('localhost', 'ab%', 'test11', 'Y');
alter table mysql.db order by db asc;
@@ -581,12 +582,6 @@ drop database db27515;
use test;
create table t1 (a int);
-# Backup anonymous users and remove them. (They get in the way of
-# the one we test with here otherwise.)
-create table t2 as select * from mysql.user where user='';
-delete from mysql.user where user='';
-flush privileges;
-
# Create some users with different hostnames
create user mysqltest_8@'';
create user mysqltest_8@host8;
@@ -700,10 +695,6 @@ drop user mysqltest_8@host8;
--error ER_NONEXISTING_GRANT
show grants for mysqltest_8@host8;
-# Restore the anonymous users.
-insert into mysql.user select * from t2;
-flush privileges;
-drop table t2;
drop table t1;
#
@@ -1265,9 +1256,6 @@ SELECT CURRENT_USER();
SET PASSWORD FOR CURRENT_USER() = PASSWORD("admin");
SET PASSWORD FOR CURRENT_USER() = PASSWORD("");
-#cleanup after MDEV-16238
-update mysql.user set plugin='';
-
#
# Bug#57952: privilege change is not taken into account by EXECUTE.
#
@@ -2233,7 +2221,7 @@ set GLOBAL sql_mode=default;
--source include/wait_until_count_sessions.inc
--echo #
---echo # Start of 10.2 tests
+--echo # End of 10.1 tests
--echo #
--echo #
@@ -2258,3 +2246,28 @@ DROP USER dummy@localhost;
--echo #
--echo # End of 10.2 tests
--echo #
+
+--echo #
+--echo # MDEV-17932 : Assertion upon double RENAME USER
+--echo #
+CREATE USER foo@localhost;
+CREATE USER bar2@localhost;
+RENAME USER foo@localhost TO bar1@localhost, bar1@localhost TO bar3@localhost;
+DROP USER bar2@localhost;
+DROP USER bar3@localhost;
+
+--echo #
+--echo # MDEV-17946 : Unsorted acl_dbs after RENAME USER
+--echo #
+CREATE USER foo;
+GRANT SELECT ON test.* TO foo;
+RENAME USER '' TO 'name';
+GRANT UPDATE ON test.* TO foo;
+RENAME USER 'name' to '';
+DROP USER foo;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
+
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
diff --git a/mysql-test/main/grant2.result b/mysql-test/main/grant2.result
index ffb41c1b5f8..5d168a04455 100644
--- a/mysql-test/main/grant2.result
+++ b/mysql-test/main/grant2.result
@@ -1,3 +1,4 @@
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
set GLOBAL sql_mode="";
set LOCAL sql_mode="";
SET NAMES binary;
@@ -181,19 +182,19 @@ grant select on *.* to 'mysqltest_2';
grant insert on test.* to 'mysqltest_2';
grant update on test.t1 to 'mysqltest_2';
grant update (c2) on test.t2 to 'mysqltest_2';
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-host user password
-% mysqltest_1
-% mysqltest_2 *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1
-% mysqltest_3 fffffffffffffffffffffffffffffffffffffffff
-select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user;
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%';
+Host User Password plugin authentication_string
+% mysqltest_1 mysql_native_password
+% mysqltest_2 *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1 mysql_native_password *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1
+% mysqltest_3 fffffffffffffffffffffffffffffffffffffffff mysql_native_password fffffffffffffffffffffffffffffffffffffffff
+select host,db,user from mysql.db where user like 'mysqltest_%';
host db user
% test mysqltest_2
-select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' order by host,db,user,table_name;
+select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%';
host db user table_name
% test mysqltest_2 t1
% test mysqltest_2 t2
-select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' order by host,db,user,table_name,column_name;
+select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%';
host db user table_name column_name
% test mysqltest_2 t2 c2
show grants for 'mysqltest_1';
@@ -206,35 +207,35 @@ GRANT INSERT ON "test".* TO 'mysqltest_2'@'%'
GRANT UPDATE (c2) ON "test"."t2" TO 'mysqltest_2'@'%'
GRANT UPDATE ON "test"."t1" TO 'mysqltest_2'@'%'
drop user 'mysqltest_1';
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-host user password
-% mysqltest_2 *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1
-% mysqltest_3 fffffffffffffffffffffffffffffffffffffffff
-select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user;
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%';
+Host User Password plugin authentication_string
+% mysqltest_2 *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1 mysql_native_password *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1
+% mysqltest_3 fffffffffffffffffffffffffffffffffffffffff mysql_native_password fffffffffffffffffffffffffffffffffffffffff
+select host,db,user from mysql.db where user like 'mysqltest_%';
host db user
% test mysqltest_2
-select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' order by host,db,user,table_name;
+select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%';
host db user table_name
% test mysqltest_2 t1
% test mysqltest_2 t2
-select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' order by host,db,user,table_name,column_name;
+select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%';
host db user table_name column_name
% test mysqltest_2 t2 c2
show grants for 'mysqltest_1';
ERROR 42000: There is no such grant defined for user 'mysqltest_1' on host '%'
rename user 'mysqltest_2' to 'mysqltest_1';
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-host user password
-% mysqltest_1 *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1
-% mysqltest_3 fffffffffffffffffffffffffffffffffffffffff
-select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user;
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%' ;
+Host User Password plugin authentication_string
+% mysqltest_1 *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1 mysql_native_password *BD447CBA355AF58578D3AE33BA2E2CD388BA08D1
+% mysqltest_3 fffffffffffffffffffffffffffffffffffffffff mysql_native_password fffffffffffffffffffffffffffffffffffffffff
+select host,db,user from mysql.db where user like 'mysqltest_%' ;
host db user
% test mysqltest_1
-select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' order by host,db,user,table_name;
+select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' ;
host db user table_name
% test mysqltest_1 t1
% test mysqltest_1 t2
-select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' order by host,db,user,table_name,column_name;
+select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' ;
host db user table_name column_name
% test mysqltest_1 t2 c2
show grants for 'mysqltest_1';
@@ -254,21 +255,21 @@ ERROR 42000: There is no such grant defined for user 'mysqltest_1' on host '%'
revoke all privileges, grant option from 'mysqltest_1';
ERROR HY000: Can't revoke all privileges for one or more of the requested users
drop user 'mysqltest_1';
-select host,db,user from mysql.db where user = 'mysqltest_1' order by host,db,user;
+select host,db,user from mysql.db where user = 'mysqltest_1' ;
host db user
insert into mysql.tables_priv set host='%', db='test', user='mysqltest_1', table_name='t1';
flush privileges;
show grants for 'mysqltest_1';
ERROR 42000: There is no such grant defined for user 'mysqltest_1' on host '%'
drop user 'mysqltest_1';
-select host,db,user,table_name from mysql.tables_priv where user = 'mysqltest_1' order by host,db,user,table_name;
+select host,db,user,table_name from mysql.tables_priv where user = 'mysqltest_1' ;
host db user table_name
insert into mysql.columns_priv set host='%', db='test', user='mysqltest_1', table_name='t1', column_name='c1';
flush privileges;
show grants for 'mysqltest_1';
ERROR 42000: There is no such grant defined for user 'mysqltest_1' on host '%'
drop user 'mysqltest_1';
-select host,db,user,table_name,column_name from mysql.columns_priv where user = 'mysqltest_1' order by host,db,user,table_name,column_name;
+select host,db,user,table_name,column_name from mysql.columns_priv where user = 'mysqltest_1' ;
host db user table_name column_name
create user 'mysqltest_1', 'mysqltest_2', 'mysqltest_3';
drop user 'mysqltest_1', 'mysqltest_2', 'mysqltest_3';
@@ -310,7 +311,7 @@ create user mysqltest_2@localhost;
grant create user on *.* to mysqltest_2@localhost;
connect user3,localhost,mysqltest_2,,;
connection user3;
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%' ;
ERROR 42000: SELECT command denied to user 'mysqltest_2'@'localhost' for table 'user'
create user mysqltest_A@'%';
rename user mysqltest_A@'%' to mysqltest_B@'%';
@@ -326,14 +327,9 @@ show grants;
Grants for mysqltest_3@localhost
GRANT USAGE ON *.* TO 'mysqltest_3'@'localhost'
GRANT INSERT, UPDATE, DELETE ON `mysql`.* TO 'mysqltest_3'@'localhost'
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%' ;
ERROR 42000: SELECT command denied to user 'mysqltest_3'@'localhost' for table 'user'
-insert into mysql.user set host='%', user='mysqltest_B';
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
+insert into mysql.global_priv set host='%', user='mysqltest_B';
create user mysqltest_A@'%';
rename user mysqltest_B@'%' to mysqltest_C@'%';
drop user mysqltest_C@'%';
@@ -373,7 +369,7 @@ set password = password('changed');
disconnect b12302;
connection default;
select host, length(authentication_string) from mysql.user where user like 'mysqltest\_1';
-host length(authentication_string)
+Host length(authentication_string)
127.0.0.1 41
revoke all on mysqltest_1.* from mysqltest_1@'127.0.0.1';
delete from mysql.user where user like 'mysqltest\_1';
@@ -388,7 +384,7 @@ set password = password('changed');
disconnect b12302_2;
connection default;
select host, length(authentication_string) from mysql.user where user like 'mysqltest\_1';
-host length(authentication_string)
+Host length(authentication_string)
127.0.0.0/255.0.0.0 41
revoke all on mysqltest_1.* from mysqltest_1@'127.0.0.0/255.0.0.0';
delete from mysql.user where user like 'mysqltest\_1';
@@ -428,15 +424,10 @@ disconnect con2root;
disconnect con3root;
create database TESTDB;
create table t2(a int);
-create temporary table t1 as select * from mysql.user;
-delete from mysql.user where host='localhost';
-INSERT INTO mysql.user (host, user, password) VALUES
-('%','mysqltest_1',password('password'));
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
+create temporary table t1 as select * from mysql.global_priv;
+delete from mysql.global_priv where host='localhost';
+INSERT INTO mysql.global_priv (host, user, priv) VALUES
+('%','mysqltest_1',json_object('authentication_string', password('password')));
INSERT INTO mysql.db (host, db, user, select_priv) VALUES
('%','TESTDB','mysqltest_1','Y');
FLUSH PRIVILEGES;
@@ -445,9 +436,9 @@ create database TEStdb;
Got one of the listed errors
connection default;
disconnect con1;
-delete from mysql.user;
+delete from mysql.global_priv;
delete from mysql.db where host='%' and user='mysqltest_1' and db='TESTDB';
-insert into mysql.user select * from t1;
+insert into mysql.global_priv select * from t1;
drop table t1, t2;
drop database TESTDB;
flush privileges;
@@ -462,6 +453,8 @@ INSERT INTO t2 VALUES (1);
DROP FUNCTION IF EXISTS f2;
CREATE FUNCTION f2 () RETURNS INT
BEGIN DECLARE v INT; SELECT s1 FROM t2 INTO v; RETURN v; END//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f2();
f2()
1
@@ -553,27 +546,18 @@ End of 5.0 tests
USE mysql;
SELECT LEFT(CURRENT_USER(),INSTR(CURRENT_USER(),'@')-1) INTO @u;
SELECT MID(CURRENT_USER(),INSTR(CURRENT_USER(),'@')+1) INTO @h;
-SELECT password FROM user WHERE user=@u AND host=@h INTO @pwd;
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-user host password insert_priv
-root localhost Y
-UPDATE user SET insert_priv='N' WHERE user=@u AND host=@h;
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-user host password insert_priv
-root localhost N
+SELECT user,host,password,plugin,authentication_string,insert_priv FROM user WHERE user=@u AND host=@h;
+User Host Password plugin authentication_string Insert_priv
+root localhost Y
GRANT INSERT ON *.* TO CURRENT_USER();
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-user host password insert_priv
-root localhost Y
-UPDATE user SET insert_priv='N' WHERE user=@u AND host=@h;
+SELECT user,host,password,plugin,authentication_string,insert_priv FROM user WHERE user=@u AND host=@h;
+User Host Password plugin authentication_string Insert_priv
+root localhost mysql_native_password Y
GRANT INSERT ON *.* TO CURRENT_USER() IDENTIFIED BY 'keksdose';
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-user host password insert_priv
-root localhost *0BB7188CF0DE9B403BA66E9DD810D82652D002EB Y
-UPDATE user SET password=@pwd WHERE user=@u AND host=@h;
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-user host password insert_priv
-root localhost Y
+SELECT user,host,password,plugin,authentication_string,insert_priv FROM user WHERE user=@u AND host=@h;
+User Host Password plugin authentication_string Insert_priv
+root localhost *0BB7188CF0DE9B403BA66E9DD810D82652D002EB mysql_native_password *0BB7188CF0DE9B403BA66E9DD810D82652D002EB Y
+UPDATE global_priv SET priv=@root_priv;
FLUSH PRIVILEGES;
USE test;
End of 5.1 tests
diff --git a/mysql-test/main/grant2.test b/mysql-test/main/grant2.test
index 1f7450df6c1..b33d7d63992 100644
--- a/mysql-test/main/grant2.test
+++ b/mysql-test/main/grant2.test
@@ -4,7 +4,7 @@
# Save the initial number of concurrent sessions
--source include/count_sessions.inc
-
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
set GLOBAL sql_mode="";
set LOCAL sql_mode="";
SET NAMES binary;
@@ -231,28 +231,40 @@ grant select on *.* to 'mysqltest_2';
grant insert on test.* to 'mysqltest_2';
grant update on test.t1 to 'mysqltest_2';
grant update (c2) on test.t2 to 'mysqltest_2';
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user;
-select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' order by host,db,user,table_name;
-select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' order by host,db,user,table_name,column_name;
+--sorted_result
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%';
+--sorted_result
+select host,db,user from mysql.db where user like 'mysqltest_%';
+--sorted_result
+select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%';
+--sorted_result
+select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%';
show grants for 'mysqltest_1';
show grants for 'mysqltest_2';
#
# Drop
drop user 'mysqltest_1';
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user;
-select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' order by host,db,user,table_name;
-select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' order by host,db,user,table_name,column_name;
+--sorted_result
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%';
+--sorted_result
+select host,db,user from mysql.db where user like 'mysqltest_%';
+--sorted_result
+select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%';
+--sorted_result
+select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%';
--error ER_NONEXISTING_GRANT
show grants for 'mysqltest_1';
#
# Rename
rename user 'mysqltest_2' to 'mysqltest_1';
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-select host,db,user from mysql.db where user like 'mysqltest_%' order by host,db,user;
-select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' order by host,db,user,table_name;
-select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' order by host,db,user,table_name,column_name;
+--sorted_result
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%' ;
+--sorted_result
+select host,db,user from mysql.db where user like 'mysqltest_%' ;
+--sorted_result
+select host,db,user,table_name from mysql.tables_priv where user like 'mysqltest_%' ;
+--sorted_result
+select host,db,user,table_name,column_name from mysql.columns_priv where user like 'mysqltest_%' ;
show grants for 'mysqltest_1';
drop user 'mysqltest_1', 'mysqltest_3';
--error ER_CANNOT_USER
@@ -269,7 +281,8 @@ show grants for 'mysqltest_1';
--error ER_REVOKE_GRANTS
revoke all privileges, grant option from 'mysqltest_1';
drop user 'mysqltest_1';
-select host,db,user from mysql.db where user = 'mysqltest_1' order by host,db,user;
+--sorted_result
+select host,db,user from mysql.db where user = 'mysqltest_1' ;
#
# Add a stray record
insert into mysql.tables_priv set host='%', db='test', user='mysqltest_1', table_name='t1';
@@ -277,7 +290,8 @@ flush privileges;
--error ER_NONEXISTING_GRANT
show grants for 'mysqltest_1';
drop user 'mysqltest_1';
-select host,db,user,table_name from mysql.tables_priv where user = 'mysqltest_1' order by host,db,user,table_name;
+--sorted_result
+select host,db,user,table_name from mysql.tables_priv where user = 'mysqltest_1' ;
#
# Add a stray record
insert into mysql.columns_priv set host='%', db='test', user='mysqltest_1', table_name='t1', column_name='c1';
@@ -285,7 +299,8 @@ flush privileges;
--error ER_NONEXISTING_GRANT
show grants for 'mysqltest_1';
drop user 'mysqltest_1';
-select host,db,user,table_name,column_name from mysql.columns_priv where user = 'mysqltest_1' order by host,db,user,table_name,column_name;
+--sorted_result
+select host,db,user,table_name,column_name from mysql.columns_priv where user = 'mysqltest_1' ;
#
# Handle multi user lists
create user 'mysqltest_1', 'mysqltest_2', 'mysqltest_3';
@@ -330,7 +345,7 @@ grant create user on *.* to mysqltest_2@localhost;
connect (user3,localhost,mysqltest_2,,);
connection user3;
--error ER_TABLEACCESS_DENIED_ERROR
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%' ;
create user mysqltest_A@'%';
rename user mysqltest_A@'%' to mysqltest_B@'%';
drop user mysqltest_B@'%';
@@ -345,8 +360,8 @@ connect (user4,localhost,mysqltest_3,,);
connection user4;
show grants;
--error ER_TABLEACCESS_DENIED_ERROR
-select host,user,password from mysql.user where user like 'mysqltest_%' order by host,user,password;
-insert into mysql.user set host='%', user='mysqltest_B';
+select host,user,password,plugin,authentication_string from mysql.user where user like 'mysqltest_%' ;
+insert into mysql.global_priv set host='%', user='mysqltest_B';
create user mysqltest_A@'%';
rename user mysqltest_B@'%' to mysqltest_C@'%';
drop user mysqltest_C@'%';
@@ -466,10 +481,10 @@ disconnect con3root;
create database TESTDB;
create table t2(a int);
-create temporary table t1 as select * from mysql.user;
-delete from mysql.user where host='localhost';
-INSERT INTO mysql.user (host, user, password) VALUES
-('%','mysqltest_1',password('password'));
+create temporary table t1 as select * from mysql.global_priv;
+delete from mysql.global_priv where host='localhost';
+INSERT INTO mysql.global_priv (host, user, priv) VALUES
+('%','mysqltest_1',json_object('authentication_string', password('password')));
INSERT INTO mysql.db (host, db, user, select_priv) VALUES
('%','TESTDB','mysqltest_1','Y');
FLUSH PRIVILEGES;
@@ -485,9 +500,9 @@ create database TEStdb;
# Clean-up
connection default;
disconnect con1;
-delete from mysql.user;
+delete from mysql.global_priv;
delete from mysql.db where host='%' and user='mysqltest_1' and db='TESTDB';
-insert into mysql.user select * from t1;
+insert into mysql.global_priv select * from t1;
drop table t1, t2;
drop database TESTDB;
flush privileges;
@@ -642,27 +657,19 @@ DROP DATABASE db1;
USE mysql;
SELECT LEFT(CURRENT_USER(),INSTR(CURRENT_USER(),'@')-1) INTO @u;
SELECT MID(CURRENT_USER(),INSTR(CURRENT_USER(),'@')+1) INTO @h;
-SELECT password FROM user WHERE user=@u AND host=@h INTO @pwd;
# show current privs.
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-
-# toggle INSERT
-UPDATE user SET insert_priv='N' WHERE user=@u AND host=@h;
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
+SELECT user,host,password,plugin,authentication_string,insert_priv FROM user WHERE user=@u AND host=@h;
# show that GRANT ... TO CURRENT_USER() no longer crashes
GRANT INSERT ON *.* TO CURRENT_USER();
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-UPDATE user SET insert_priv='N' WHERE user=@u AND host=@h;
+SELECT user,host,password,plugin,authentication_string,insert_priv FROM user WHERE user=@u AND host=@h;
# show that GRANT ... TO CURRENT_USER() IDENTIFIED BY ... works now
GRANT INSERT ON *.* TO CURRENT_USER() IDENTIFIED BY 'keksdose';
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
-
-UPDATE user SET password=@pwd WHERE user=@u AND host=@h;
-SELECT user,host,password,insert_priv FROM user WHERE user=@u AND host=@h;
+SELECT user,host,password,plugin,authentication_string,insert_priv FROM user WHERE user=@u AND host=@h;
+UPDATE global_priv SET priv=@root_priv;
FLUSH PRIVILEGES;
USE test;
diff --git a/mysql-test/main/grant3.result b/mysql-test/main/grant3.result
index 4c99fb02f19..1bc6e7572c5 100644
--- a/mysql-test/main/grant3.result
+++ b/mysql-test/main/grant3.result
@@ -29,7 +29,7 @@ grant select on test.* to CUser@localhost;
grant select on test.* to CUser@LOCALHOST;
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, select_priv FROM mysql.db where user = 'CUser' order by 1,2;
user host db select_priv
@@ -37,14 +37,14 @@ CUser localhost test Y
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'CUser'@'LOCALHOST';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, select_priv FROM mysql.db where user = 'CUser' order by 1,2;
user host db select_priv
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'CUser'@'localhost';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, select_priv FROM mysql.db where user = 'CUser' order by 1,2;
user host db select_priv
@@ -56,7 +56,7 @@ grant select on test.t1 to CUser@localhost;
grant select on test.t1 to CUser@LOCALHOST;
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, Table_name, Table_priv, Column_priv FROM mysql.tables_priv where user = 'CUser' order by 1,2;
user host db Table_name Table_priv Column_priv
@@ -64,14 +64,14 @@ CUser localhost test t1 Select
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'CUser'@'LOCALHOST';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, Table_name, Table_priv, Column_priv FROM mysql.tables_priv where user = 'CUser' order by 1,2;
user host db Table_name Table_priv Column_priv
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'CUser'@'localhost';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, Table_name, Table_priv, Column_priv FROM mysql.tables_priv where user = 'CUser' order by 1,2;
user host db Table_name Table_priv Column_priv
@@ -82,7 +82,7 @@ grant select(a) on test.t1 to CUser@localhost;
grant select(a) on test.t1 to CUser@LOCALHOST;
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, Table_name, Table_priv, Column_priv FROM mysql.tables_priv where user = 'CUser' order by 1,2;
user host db Table_name Table_priv Column_priv
@@ -90,14 +90,14 @@ CUser localhost test t1 Select
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'CUser'@'LOCALHOST';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, Table_name, Table_priv, Column_priv FROM mysql.tables_priv where user = 'CUser' order by 1,2;
user host db Table_name Table_priv Column_priv
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'CUser'@'localhost';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser' order by 1,2;
-user host
+User Host
CUser localhost
SELECT user, host, db, Table_name, Table_priv, Column_priv FROM mysql.tables_priv where user = 'CUser' order by 1,2;
user host db Table_name Table_priv Column_priv
@@ -109,7 +109,7 @@ grant select on test.* to CUser2@localhost;
grant select on test.* to CUser2@LOCALHOST;
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser2' order by 1,2;
-user host
+User Host
CUser2 localhost
SELECT user, host, db, select_priv FROM mysql.db where user = 'CUser2' order by 1,2;
user host db select_priv
@@ -117,7 +117,7 @@ CUser2 localhost test Y
REVOKE SELECT ON test.* FROM 'CUser2'@'LOCALHOST';
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser2' order by 1,2;
-user host
+User Host
CUser2 localhost
SELECT user, host, db, select_priv FROM mysql.db where user = 'CUser2' order by 1,2;
user host db select_priv
@@ -125,7 +125,7 @@ REVOKE SELECT ON test.* FROM 'CUser2'@'localhost';
ERROR 42000: There is no such grant defined for user 'CUser2' on host 'localhost'
flush privileges;
SELECT user, host FROM mysql.user where user = 'CUser2' order by 1,2;
-user host
+User Host
CUser2 localhost
SELECT user, host, db, select_priv FROM mysql.db where user = 'CUser2' order by 1,2;
user host db select_priv
diff --git a/mysql-test/main/grant4.result b/mysql-test/main/grant4.result
index c3db2e03e3e..9aad70205ec 100644
--- a/mysql-test/main/grant4.result
+++ b/mysql-test/main/grant4.result
@@ -198,15 +198,15 @@ grant select on test.* to foo6 identified by password '2222222222222222';
grant select on test.* to foo7 identified via mysql_native_password using '11111111111111111111111111111111111111111';
grant select on test.* to foo8 identified via mysql_old_password using '2222222222222222';
select user,password,plugin,authentication_string from mysql.user where user like 'foo%';
-user password plugin authentication_string
-foo1 11111111111111111111111111111111111111111
-foo2 2222222222222222
-foo3 11111111111111111111111111111111111111111
-foo4 2222222222222222
-foo5 11111111111111111111111111111111111111111
-foo6 2222222222222222
-foo7 11111111111111111111111111111111111111111
-foo8 2222222222222222
+User Password plugin authentication_string
+foo1 11111111111111111111111111111111111111111 mysql_native_password 11111111111111111111111111111111111111111
+foo2 2222222222222222 mysql_old_password 2222222222222222
+foo3 11111111111111111111111111111111111111111 mysql_native_password 11111111111111111111111111111111111111111
+foo4 2222222222222222 mysql_old_password 2222222222222222
+foo5 11111111111111111111111111111111111111111 mysql_native_password 11111111111111111111111111111111111111111
+foo6 2222222222222222 mysql_old_password 2222222222222222
+foo7 11111111111111111111111111111111111111111 mysql_native_password 11111111111111111111111111111111111111111
+foo8 2222222222222222 mysql_old_password 2222222222222222
drop user foo1;
drop user foo2;
drop user foo3;
diff --git a/mysql-test/main/grant5.result b/mysql-test/main/grant5.result
index c0fecf0c369..c35e8201582 100644
--- a/mysql-test/main/grant5.result
+++ b/mysql-test/main/grant5.result
@@ -21,7 +21,84 @@ drop role foo;
CREATE TABLE t1 (a INT);
LOCK TABLE t1 WRITE;
REVOKE EXECUTE ON PROCEDURE sp FROM u;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'procs_priv' was not locked with LOCK TABLES
REVOKE PROCESS ON *.* FROM u;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
DROP TABLE t1;
+create user u1@h identified with 'mysql_native_password' using 'pwd';
+ERROR HY000: Password hash should be a 41-digit hexadecimal number
+create user u1@h identified with 'mysql_native_password' using password('pwd');
+create user u2@h identified with 'mysql_native_password' using '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD';
+create user u3@h identified with 'mysql_native_password';
+set password for u3@h = 'pwd';
+ERROR HY000: Password hash should be a 41-digit hexadecimal number
+set password for u3@h = password('pwd');
+create user u4@h identified with 'mysql_native_password';
+set password for u4@h = '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD';
+create user u5@h identified with 'mysql_old_password' using 'pwd';
+ERROR HY000: Password hash should be a 16-digit hexadecimal number
+create user u5@h identified with 'mysql_old_password' using password('pwd');
+create user u6@h identified with 'mysql_old_password' using '78a302dd267f6044';
+create user u7@h identified with 'mysql_old_password';
+set password for u7@h = 'pwd';
+ERROR HY000: Password hash should be a 41-digit hexadecimal number
+set password for u7@h = old_password('pwd');
+create user u8@h identified with 'mysql_old_password';
+set password for u8@h = '78a302dd267f6044';
+select user,host,plugin,authentication_string from mysql.user where host='h';
+User Host plugin authentication_string
+u1 h mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u2 h mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u3 h mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u4 h mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u5 h mysql_old_password 78a302dd267f6044
+u6 h mysql_old_password 78a302dd267f6044
+u7 h mysql_old_password 78a302dd267f6044
+u8 h mysql_old_password 78a302dd267f6044
+update mysql.global_priv set priv=json_set(priv, '$.authentication_string', 'bad') where user='u1';
+update mysql.global_priv set priv=json_set(priv, '$.authentication_string', 'bad') where user='u5';
+update mysql.global_priv set priv=json_set(priv, '$.plugin', 'nonexistent') where user='u8';
+flush privileges;
+show create user u1@h;
+CREATE USER for u1@h
+CREATE USER 'u1'@'h' IDENTIFIED BY PASSWORD 'bad'
+show create user u2@h;
+CREATE USER for u2@h
+CREATE USER 'u2'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD'
+show create user u3@h;
+CREATE USER for u3@h
+CREATE USER 'u3'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD'
+show create user u4@h;
+CREATE USER for u4@h
+CREATE USER 'u4'@'h' IDENTIFIED BY PASSWORD '*975B2CD4FF9AE554FE8AD33168FBFC326D2021DD'
+show create user u5@h;
+CREATE USER for u5@h
+CREATE USER 'u5'@'h' IDENTIFIED BY PASSWORD 'bad'
+show create user u6@h;
+CREATE USER for u6@h
+CREATE USER 'u6'@'h' IDENTIFIED BY PASSWORD '78a302dd267f6044'
+show create user u7@h;
+CREATE USER for u7@h
+CREATE USER 'u7'@'h' IDENTIFIED BY PASSWORD '78a302dd267f6044'
+show create user u8@h;
+CREATE USER for u8@h
+CREATE USER 'u8'@'h' IDENTIFIED VIA nonexistent USING '78a302dd267f6044'
+grant select on *.* to u1@h;
+grant select on *.* to u2@h;
+grant select on *.* to u3@h;
+grant select on *.* to u4@h;
+grant select on *.* to u5@h;
+grant select on *.* to u6@h;
+grant select on *.* to u7@h;
+grant select on *.* to u8@h;
+select user,select_priv,plugin,authentication_string from mysql.user where user like 'u_';
+User Select_priv plugin authentication_string
+u1 Y mysql_native_password bad
+u2 Y mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u3 Y mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u4 Y mysql_native_password *975B2CD4FF9AE554FE8AD33168FBFC326D2021DD
+u5 Y mysql_old_password bad
+u6 Y mysql_old_password 78a302dd267f6044
+u7 Y mysql_old_password 78a302dd267f6044
+u8 Y nonexistent 78a302dd267f6044
+drop user u1@h, u2@h, u3@h, u4@h, u5@h, u6@h, u7@h, u8@h;
diff --git a/mysql-test/main/grant5.test b/mysql-test/main/grant5.test
index 649bba7d1ca..cc673754461 100644
--- a/mysql-test/main/grant5.test
+++ b/mysql-test/main/grant5.test
@@ -33,3 +33,56 @@ REVOKE EXECUTE ON PROCEDURE sp FROM u;
--error ER_TABLE_NOT_LOCKED
REVOKE PROCESS ON *.* FROM u;
DROP TABLE t1;
+
+#
+# MDEV-12321 authentication plugin: SET PASSWORD support
+#
+error ER_PASSWD_LENGTH;
+create user u1@h identified with 'mysql_native_password' using 'pwd';
+create user u1@h identified with 'mysql_native_password' using password('pwd');
+let p=`select password('pwd')`;
+eval create user u2@h identified with 'mysql_native_password' using '$p';
+create user u3@h identified with 'mysql_native_password';
+error ER_PASSWD_LENGTH;
+set password for u3@h = 'pwd';
+set password for u3@h = password('pwd');
+create user u4@h identified with 'mysql_native_password';
+eval set password for u4@h = '$p';
+error ER_PASSWD_LENGTH;
+create user u5@h identified with 'mysql_old_password' using 'pwd';
+create user u5@h identified with 'mysql_old_password' using password('pwd');
+let p=`select old_password('pwd')`;
+eval create user u6@h identified with 'mysql_old_password' using '$p';
+create user u7@h identified with 'mysql_old_password';
+error ER_PASSWD_LENGTH;
+set password for u7@h = 'pwd';
+set password for u7@h = old_password('pwd');
+create user u8@h identified with 'mysql_old_password';
+eval set password for u8@h = '$p';
+sorted_result;
+select user,host,plugin,authentication_string from mysql.user where host='h';
+# test with invalid entries
+update mysql.global_priv set priv=json_set(priv, '$.authentication_string', 'bad') where user='u1';
+update mysql.global_priv set priv=json_set(priv, '$.authentication_string', 'bad') where user='u5';
+update mysql.global_priv set priv=json_set(priv, '$.plugin', 'nonexistent') where user='u8';
+flush privileges;
+show create user u1@h;
+show create user u2@h;
+show create user u3@h;
+show create user u4@h;
+show create user u5@h;
+show create user u6@h;
+show create user u7@h;
+show create user u8@h;
+grant select on *.* to u1@h;
+grant select on *.* to u2@h;
+grant select on *.* to u3@h;
+grant select on *.* to u4@h;
+grant select on *.* to u5@h;
+grant select on *.* to u6@h;
+grant select on *.* to u7@h;
+grant select on *.* to u8@h;
+select user,select_priv,plugin,authentication_string from mysql.user where user like 'u_';
+
+# but they still can be dropped
+drop user u1@h, u2@h, u3@h, u4@h, u5@h, u6@h, u7@h, u8@h;
diff --git a/mysql-test/main/grant_4332.result b/mysql-test/main/grant_4332.result
index b62ca1a20cc..af6b23088f5 100644
--- a/mysql-test/main/grant_4332.result
+++ b/mysql-test/main/grant_4332.result
@@ -1,5 +1,5 @@
-set GLOBAL sql_mode="";
-set LOCAL sql_mode="";
+set global sql_mode="";
+set local sql_mode="";
alter table mysql.user modify User char(16) binary not null default '';
alter table mysql.db modify User char(16) binary not null default '';
alter table mysql.tables_priv modify User char(16) binary not null default '';
@@ -32,4 +32,4 @@ Catalog Database Table Table_alias Column Column_alias Type Length Max length Is
def user() 253 141 14 N 1 39 8
user()
root@localhost
-set GLOBAL sql_mode=default;
+set global sql_mode=default;
diff --git a/mysql-test/main/grant_4332.test b/mysql-test/main/grant_4332.test
index 41e0b822f98..f723e4afe44 100644
--- a/mysql-test/main/grant_4332.test
+++ b/mysql-test/main/grant_4332.test
@@ -6,8 +6,9 @@
#
--source include/not_embedded.inc
-set GLOBAL sql_mode="";
-set LOCAL sql_mode="";
+--source include/switch_to_mysql_user.inc
+set global sql_mode="";
+set local sql_mode="";
alter table mysql.user modify User char(16) binary not null default '';
alter table mysql.db modify User char(16) binary not null default '';
@@ -42,4 +43,5 @@ flush privileges;
select user();
--disable_metadata
-set GLOBAL sql_mode=default;
+set global sql_mode=default;
+--source include/switch_to_mysql_global_priv.inc
diff --git a/mysql-test/main/grant_lowercase.result b/mysql-test/main/grant_lowercase.result
index d4a1667595e..a87f74721bc 100644
--- a/mysql-test/main/grant_lowercase.result
+++ b/mysql-test/main/grant_lowercase.result
@@ -9,17 +9,11 @@ connection default;
disconnect conn1;
drop user user1@localhost;
call mtr.add_suppression("Incorrect database name");
-alter table mysql.host modify Db varchar(200);
alter table mysql.db modify User char(16) default "", modify Db varchar(200) default "";
-insert mysql.host set db=concat('=>', repeat(_utf8 'й', 200));
-Warnings:
-Warning 1265 Data truncated for column 'Db' at row 1
insert mysql.db set db=concat('=>', repeat(_utf8 'й', 200));
Warnings:
Warning 1265 Data truncated for column 'Db' at row 1
flush privileges;
-delete from mysql.host where db like '=>%';
delete from mysql.db where db like '=>%';
-alter table mysql.host modify Db char(64) default "";
alter table mysql.db modify Db char(64) default "", modify User char(80) default "";
flush privileges;
diff --git a/mysql-test/main/grant_lowercase.test b/mysql-test/main/grant_lowercase.test
index e31a72b890e..ffd1d4682a0 100644
--- a/mysql-test/main/grant_lowercase.test
+++ b/mysql-test/main/grant_lowercase.test
@@ -18,13 +18,9 @@ drop user user1@localhost;
# in acl_load()
call mtr.add_suppression("Incorrect database name");
-alter table mysql.host modify Db varchar(200);
alter table mysql.db modify User char(16) default "", modify Db varchar(200) default "";
-insert mysql.host set db=concat('=>', repeat(_utf8 'й', 200));
insert mysql.db set db=concat('=>', repeat(_utf8 'й', 200));
flush privileges; # shouldn't crash here
-delete from mysql.host where db like '=>%';
delete from mysql.db where db like '=>%';
-alter table mysql.host modify Db char(64) default "";
alter table mysql.db modify Db char(64) default "", modify User char(80) default "";
flush privileges;
diff --git a/mysql-test/main/group_by.result b/mysql-test/main/group_by.result
index a6da0e70247..2eaec8a08b4 100644
--- a/mysql-test/main/group_by.result
+++ b/mysql-test/main/group_by.result
@@ -536,17 +536,17 @@ a b
select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b ORDER BY NULL;
a b
1 3
+1 1
3 1
2 2
-1 1
explain select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using temporary; Using filesort
-1 SIMPLE t2 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ALL a NULL NULL NULL 4 Using temporary; Using filesort
+1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
explain select t1.a,t2.b from t1,t2 where t1.a=t2.a group by t1.a,t2.b ORDER BY NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using temporary
-1 SIMPLE t2 ALL a NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t2 ALL a NULL NULL NULL 4 Using temporary
+1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
drop table t1,t2;
create table t1 (a int, b int);
insert into t1 values (1, 4),(10, 40),(1, 4),(10, 43),(1, 4),(10, 41),(1, 4),(10, 43),(1, 4);
@@ -1330,6 +1330,7 @@ INSERT INTO t1 SELECT a + 64,b FROM t1;
INSERT INTO t1 SELECT a + 128,b FROM t1 limit 16;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT a FROM t1 WHERE a < 2;
id select_type table type possible_keys key key_len ref rows Extra
@@ -2429,7 +2430,12 @@ a int,
b varchar(1),
KEY (b,a)
);
-INSERT INTO t1 VALUES (1,NULL),(0,'a'),(1,NULL),(0,'a');
+INSERT INTO t1 VALUES
+(1,NULL),(0,'a'),(1,NULL),(0,'a'), (1,'a'),(0,'a'),(2,'a'),(1,'a');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT SQL_BUFFER_RESULT MIN(a), b FROM t1 WHERE t1.b = 'a' GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
@@ -2749,6 +2755,7 @@ create table t2 (c int, col1 int, key(c));
insert into t2 select t1.a, 100000 from t1;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
explain
select
diff --git a/mysql-test/main/group_by.test b/mysql-test/main/group_by.test
index c8b28828369..6686166e00e 100644
--- a/mysql-test/main/group_by.test
+++ b/mysql-test/main/group_by.test
@@ -1545,7 +1545,9 @@ CREATE TABLE t1 (
b varchar(1),
KEY (b,a)
);
-INSERT INTO t1 VALUES (1,NULL),(0,'a'),(1,NULL),(0,'a');
+INSERT INTO t1 VALUES
+ (1,NULL),(0,'a'),(1,NULL),(0,'a'), (1,'a'),(0,'a'),(2,'a'),(1,'a');
+ANALYZE TABLE t1;
let $query=
SELECT SQL_BUFFER_RESULT MIN(a), b FROM t1 WHERE t1.b = 'a' GROUP BY b;
diff --git a/mysql-test/main/group_by_innodb.result b/mysql-test/main/group_by_innodb.result
index 034866b63d5..f935d4da45b 100644
--- a/mysql-test/main/group_by_innodb.result
+++ b/mysql-test/main/group_by_innodb.result
@@ -151,6 +151,7 @@ KEY idx3 (a, b)
INSERT INTO t1 (a, b) SELECT t01.a, t02.a FROM t0 t01, t0 t02;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT DISTINCT a, MAX(b) FROM t1 WHERE a >= 0 GROUP BY a,a;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result
index cfdf9ef9865..59e3fc65501 100644
--- a/mysql-test/main/group_min_max.result
+++ b/mysql-test/main/group_min_max.result
@@ -40,6 +40,7 @@ create index idx_t1_1 on t1 (a1,a2,b,c);
create index idx_t1_2 on t1 (a1,a2,b);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
drop table if exists t2;
create table t2 (
@@ -68,6 +69,7 @@ create index idx_t2_1 on t2 (a1,a2,b,c);
create index idx_t2_2 on t2 (a1,a2,b);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
drop table if exists t3;
create table t3 (
@@ -130,6 +132,7 @@ create index idx_t3_1 on t3 (a1,a2,b,c);
create index idx_t3_2 on t3 (a1,a2,b);
analyze table t3;
Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
explain select a1, min(a2) from t1 group by a1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -317,7 +320,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 Using where; Using index for group-by
explain select a1, max(c) from t1 where a1 in ('a','b','d') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 Using where; Using index for group-by
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 13 Using where; Using index for group-by
explain select a1,a2,b, max(c) from t2 where a1 < 'd' group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range idx_t2_0,idx_t2_1,idx_t2_2 idx_t2_1 146 NULL # Using where; Using index for group-by
@@ -684,10 +687,10 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 147 NULL 9 Using where; Using index for group-by
explain select a1,a2,b,max(c),min(c) from t2 where (a2 = 'a') and (b = 'b') group by a1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL idx_t2_1 163 NULL 5 Using where; Using index for group-by
+1 SIMPLE t2 range NULL idx_t2_1 163 NULL 6 Using where; Using index for group-by
explain select a1,max(c),min(c) from t2 where (a2 = 'a') and (b = 'b') group by a1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL idx_t2_1 163 NULL 5 Using where; Using index for group-by
+1 SIMPLE t2 range NULL idx_t2_1 163 NULL 6 Using where; Using index for group-by
explain select a1,a2,b, max(c) from t2 where (b = 'b') group by a1,a2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 146 NULL 10 Using where; Using index for group-by
@@ -804,10 +807,10 @@ b h212 e212
c h312 e312
explain select a1,a2,b,min(c) from t2 where (a2 = 'a') and b is NULL group by a1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL idx_t2_1 163 NULL 5 Using where; Using index for group-by
+1 SIMPLE t2 range NULL idx_t2_1 163 NULL 6 Using where; Using index for group-by
explain select a1,a2,b,max(c) from t2 where (a2 = 'a') and b is NULL group by a1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL idx_t2_1 146 NULL 5 Using where; Using index for group-by
+1 SIMPLE t2 range NULL idx_t2_1 146 NULL 6 Using where; Using index for group-by
explain select a1,a2,b,min(c) from t2 where b is NULL group by a1,a2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 163 NULL 10 Using where; Using index for group-by
@@ -1653,7 +1656,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 17 Using where; Using index for group-by
explain select a1,a2,b from t1 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 Using where; Using index for group-by
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 13 Using where; Using index for group-by
explain select a1,a2,b from t2 where (a1 >= 'c' or a2 < 'b') and (b > 'a') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range idx_t2_0,idx_t2_1,idx_t2_2 idx_t2_1 146 NULL # Using where; Using index for group-by
@@ -1662,7 +1665,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 146 NULL # Using where; Using index for group-by
explain select a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index NULL idx_t2_1 163 NULL # Using where; Using index
+1 SIMPLE t2 range NULL idx_t2_1 163 NULL # Using where; Using index for group-by
explain select a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range idx_t2_0,idx_t2_1,idx_t2_2 idx_t2_1 146 NULL # Using where; Using index for group-by
@@ -1713,12 +1716,12 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using where; Using index for group-by
explain extended select distinct a1,a2,b,c from t1 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 index NULL idx_t1_1 163 NULL 128 50.78 Using where; Using index
+1 SIMPLE t1 index NULL idx_t1_1 163 NULL 128 0.38 Using where; Using index
Warnings:
Note 1003 select distinct `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c` from `test`.`t1` where `test`.`t1`.`b` = 'a' and `test`.`t1`.`c` = 'i121' and `test`.`t1`.`a2` >= 'b'
explain select distinct a1,a2,b from t1 where (a1 > 'a') and (a2 > 'a') and (b = 'c');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 Using where; Using index for group-by
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 13 Using where; Using index for group-by
explain select distinct b from t1 where (a2 >= 'b') and (b = 'a');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL idx_t1_2 147 NULL 128 Using where; Using index
@@ -1730,7 +1733,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 146 NULL # Using where; Using index for group-by
explain extended select distinct a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 index NULL idx_t2_1 163 NULL 164 50.61 Using where; Using index
+1 SIMPLE t2 index NULL idx_t2_1 163 NULL 164 0.30 Using where; Using index
Warnings:
Note 1003 select distinct `test`.`t2`.`a1` AS `a1`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` where `test`.`t2`.`b` = 'a' and `test`.`t2`.`c` = 'i121' and `test`.`t2`.`a2` >= 'b'
explain select distinct a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c');
@@ -1864,7 +1867,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 17 Using where; Using index for group-by
explain select distinct a1,a2,b from t1 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 Using where; Using index for group-by
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 13 Using where; Using index for group-by
explain select distinct b from t1 where (a2 >= 'b') and (b = 'a') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using where; Using index for group-by; Using temporary; Using filesort
@@ -1876,7 +1879,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range NULL idx_t2_1 146 NULL # Using where; Using index for group-by
explain select distinct a1,a2,b,c from t2 where (a2 >= 'b') and (b = 'a') and (c = 'i121') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index NULL idx_t2_1 163 NULL # Using where; Using index
+1 SIMPLE t2 range NULL idx_t2_1 163 NULL # Using where; Using index for group-by
explain select distinct a1,a2,b from t2 where (a1 > 'a') and (a2 > 'a') and (b = 'c') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range idx_t2_0,idx_t2_1,idx_t2_2 idx_t2_1 146 NULL # Using where; Using index for group-by
@@ -1959,7 +1962,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL idx_t1_1 163 NULL 65 Using where; Using index for group-by (scanning)
explain extended select count(distinct a1,a2,b) from t1 where (a1 > 'a') and (a2 > 'a') and (b = 'c');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 100.00 Using where; Using index for group-by
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 13 100.00 Using where; Using index for group-by
Warnings:
Note 1003 select count(distinct `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`) AS `count(distinct a1,a2,b)` from `test`.`t1` where `test`.`t1`.`b` = 'c' and `test`.`t1`.`a1` > 'a' and `test`.`t1`.`a2` > 'a'
explain select count(distinct b) from t1 where (a2 >= 'b') and (b = 'a');
@@ -1967,7 +1970,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL idx_t1_2 147 NULL 128 Using where; Using index
explain extended select 98 + count(distinct a1,a2,b) from t1 where (a1 > 'a') and (a2 > 'a');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 147 NULL 14 100.00 Using where; Using index for group-by
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 13 100.00 Using where; Using index for group-by
Warnings:
Note 1003 select 98 + count(distinct `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`) AS `98 + count(distinct a1,a2,b)` from `test`.`t1` where `test`.`t1`.`a1` > 'a' and `test`.`t1`.`a2` > 'a'
select count(distinct a1,a2,b) from t1 where (a2 >= 'b') and (b = 'a');
@@ -2075,19 +2078,19 @@ id select_type table type possible_keys key key_len ref rows Extra
explain extended select a1,a2,min(b),max(b) from t1
where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (c > 'a111') group by a1,a2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 130 NULL 76 85.53 Using where; Using index
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 130 NULL 77 99.22 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,min(`test`.`t1`.`b`) AS `min(b)`,max(`test`.`t1`.`b`) AS `max(b)` from `test`.`t1` where (`test`.`t1`.`a1` = 'b' or `test`.`t1`.`a1` = 'd' or `test`.`t1`.`a1` = 'a' or `test`.`t1`.`a1` = 'c') and `test`.`t1`.`a2` > 'a' and `test`.`t1`.`c` > 'a111' group by `test`.`t1`.`a1`,`test`.`t1`.`a2`
explain extended select a1,a2,b,min(c),max(c) from t1
where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (d > 'xy2') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL idx_t1_0,idx_t1_1,idx_t1_2 NULL NULL NULL 128 50.78 Using where; Using temporary; Using filesort
+1 SIMPLE t1 ALL idx_t1_0,idx_t1_1,idx_t1_2 NULL NULL NULL 128 45.12 Using where; Using temporary; Using filesort
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t1`.`b` AS `b`,min(`test`.`t1`.`c`) AS `min(c)`,max(`test`.`t1`.`c`) AS `max(c)` from `test`.`t1` where (`test`.`t1`.`a1` = 'b' or `test`.`t1`.`a1` = 'd' or `test`.`t1`.`a1` = 'a' or `test`.`t1`.`a1` = 'c') and `test`.`t1`.`a2` > 'a' and `test`.`t1`.`d` > 'xy2' group by `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`
explain extended select a1,a2,b,c from t1
where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (d > 'xy2') group by a1,a2,b,c;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL idx_t1_0,idx_t1_1,idx_t1_2 NULL NULL NULL 128 50.78 Using where; Using temporary; Using filesort
+1 SIMPLE t1 ALL idx_t1_0,idx_t1_1,idx_t1_2 NULL NULL NULL 128 45.12 Using where; Using temporary; Using filesort
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`a1` = 'b' or `test`.`t1`.`a1` = 'd' or `test`.`t1`.`a1` = 'a' or `test`.`t1`.`a1` = 'c') and `test`.`t1`.`a2` > 'a' and `test`.`t1`.`d` > 'xy2' group by `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`,`test`.`t1`.`c`
explain select a1,a2,b,max(c),min(c) from t2 where (a2 = 'a') and (b = 'b') or (b < 'b') group by a1;
@@ -2095,7 +2098,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL idx_t2_1 163 NULL 164 Using where; Using index
explain extended select a1,a2,b from t1 where (a1 = 'b' or a1 = 'd' or a1 = 'a' or a1 = 'c') and (a2 > 'a') and (c > 'a111') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 130 NULL 76 85.53 Using where; Using index
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_1 130 NULL 77 99.22 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t1`.`b` AS `b` from `test`.`t1` where (`test`.`t1`.`a1` = 'b' or `test`.`t1`.`a1` = 'd' or `test`.`t1`.`a1` = 'a' or `test`.`t1`.`a1` = 'c') and `test`.`t1`.`a2` > 'a' and `test`.`t1`.`c` > 'a111' group by `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`
explain select a1,a2,min(b),c from t2 where (a2 = 'a') and (c = 'a111') group by a1;
@@ -2119,12 +2122,12 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL idx_t1_2 147 NULL 128 Using index
explain extended select a1,a2,count(a2) from t1 where (a1 > 'a') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 index idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 128 75.00 Using where; Using index
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 65 NULL 101 95.05 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,count(`test`.`t1`.`a2`) AS `count(a2)` from `test`.`t1` where `test`.`t1`.`a1` > 'a' group by `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`
explain extended select sum(ord(a1)) from t1 where (a1 > 'a') group by a1,a2,b;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 index idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 147 NULL 128 75.00 Using where; Using index
+1 SIMPLE t1 range idx_t1_0,idx_t1_1,idx_t1_2 idx_t1_2 65 NULL 101 95.05 Using where; Using index
Warnings:
Note 1003 select sum(ord(`test`.`t1`.`a1`)) AS `sum(ord(a1))` from `test`.`t1` where `test`.`t1`.`a1` > 'a' group by `test`.`t1`.`a1`,`test`.`t1`.`a2`,`test`.`t1`.`b`
create table t4 as select distinct a1, a2, b, c from t1;
@@ -2225,7 +2228,7 @@ a
BB
EXPLAIN SELECT a FROM t1 WHERE a='AA' GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref PRIMARY PRIMARY 7 const 3 Using where; Using index
+1 SIMPLE t1 ref PRIMARY PRIMARY 7 const 4 Using where; Using index
EXPLAIN SELECT a FROM t1 WHERE a='BB' GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref PRIMARY PRIMARY 7 const 1 Using where; Using index
@@ -2281,9 +2284,16 @@ INSERT INTO t1 (a) VALUES
(''), ('CENTRAL'), ('EASTERN'), ('GREATER LONDON'),
('NORTH CENTRAL'), ('NORTH EAST'), ('NORTH WEST'), ('SCOTLAND'),
('SOUTH EAST'), ('SOUTH WEST'), ('WESTERN');
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT DISTINCT a,a FROM t1 ORDER BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 66 NULL 6 Using index for group-by
+1 SIMPLE t1 range NULL a 66 NULL 12 Using index for group-by
SELECT DISTINCT a,a FROM t1 ORDER BY a;
a a
@@ -2342,10 +2352,16 @@ id2 id3 id5 id4 id3 id6 id5 id1
1 1 1 1 1 1 1 1
DROP TABLE t1,t2,t3,t4,t5,t6;
CREATE TABLE t1 (a int, b int, KEY (a,b), KEY b (b));
-INSERT INTO t1 VALUES (1,1),(1,2),(1,0),(1,3);
+INSERT INTO t1 VALUES
+(1,1),(1,2),(1,0),(1,3),
+(1,-1),(1,-2),(1,-3),(1,-4);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a,b a 10 NULL 1 Using where; Using index for group-by
+1 SIMPLE t1 range a,b a 10 NULL 2 Using where; Using index for group-by
SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
MAX(b) a
1 1
@@ -2364,9 +2380,13 @@ DROP TABLE t1,t2;
CREATE TABLE t1 (a INT, b INT, INDEX (a,b));
INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT max(b), a FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 5 NULL 8 Using index for group-by
+1 SIMPLE t1 index NULL a 10 NULL 15 Using index
FLUSH STATUS;
SELECT max(b), a FROM t1 GROUP BY a;
max(b) a
@@ -2376,18 +2396,18 @@ max(b) a
6 4
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 0
+Handler_read_key 0
+Handler_read_next 15
Handler_read_retry 0
EXPLAIN SELECT max(b), a FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 5 NULL 8 Using index for group-by
+1 SIMPLE t1 index NULL a 10 NULL 15 Using index
FLUSH STATUS;
CREATE TABLE t2 SELECT max(b), a FROM t1 GROUP BY a;
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 0
+Handler_read_key 0
+Handler_read_next 15
Handler_read_retry 0
FLUSH STATUS;
SELECT * FROM (SELECT max(b), a FROM t1 GROUP BY a) b;
@@ -2398,8 +2418,8 @@ max(b) a
6 4
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 0
+Handler_read_key 0
+Handler_read_next 15
Handler_read_retry 0
FLUSH STATUS;
(SELECT max(b), a FROM t1 GROUP BY a) UNION
@@ -2411,61 +2431,61 @@ max(b) a
6 4
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 16
-Handler_read_next 0
+Handler_read_key 0
+Handler_read_next 30
Handler_read_retry 0
EXPLAIN (SELECT max(b), a FROM t1 GROUP BY a) UNION
(SELECT max(b), a FROM t1 GROUP BY a);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range NULL a 5 NULL 8 Using index for group-by
-2 UNION t1 range NULL a 5 NULL 8 Using index for group-by
+1 PRIMARY t1 index NULL a 10 NULL 15 Using index
+2 UNION t1 index NULL a 10 NULL 15 Using index
NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
EXPLAIN SELECT (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) x
FROM t1 AS t1_outer;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer index NULL a 10 NULL 15 Using index
-2 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE EXISTS
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer index NULL a 10 NULL 15 Using index
-2 SUBQUERY t1 index NULL a 10 NULL 15 Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
a IN (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 8
-1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 2 Using index
-2 MATERIALIZED t1 range NULL a 5 NULL 8 Using index for group-by
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
+1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 3 Using index
+2 MATERIALIZED t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer GROUP BY a HAVING
a > (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1_outer range NULL a 5 NULL 8 Using index for group-by
-2 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by
+1 PRIMARY t1_outer index NULL a 10 NULL 15 Using index
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT 1 FROM t1 AS t1_outer1 JOIN t1 AS t1_outer2
ON t1_outer1.a = (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2)
AND t1_outer1.b = t1_outer2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer1 ref a a 5 const 1 Using where; Using index
1 PRIMARY t1_outer2 index NULL a 10 NULL 15 Using where; Using index; Using join buffer (flat, BNL join)
-2 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
EXPLAIN SELECT (SELECT (SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) x
FROM t1 AS t1_outer) x2 FROM t1 AS t1_outer2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1_outer2 index NULL a 10 NULL 15 Using index
2 SUBQUERY t1_outer index NULL a 10 NULL 15 Using index
-3 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by
+3 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
CREATE TABLE t3 LIKE t1;
FLUSH STATUS;
INSERT INTO t3 SELECT a,MAX(b) FROM t1 GROUP BY a;
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 0
+Handler_read_key 5
+Handler_read_next 15
Handler_read_retry 0
DELETE FROM t3;
FLUSH STATUS;
@@ -2473,15 +2493,15 @@ INSERT INTO t3 SELECT 1, (SELECT MAX(b) FROM t1 GROUP BY a HAVING a < 2)
FROM t1 LIMIT 1;
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 0
+Handler_read_key 0
+Handler_read_next 15
Handler_read_retry 0
FLUSH STATUS;
DELETE FROM t3 WHERE (SELECT MAX(b) FROM t1 GROUP BY a HAVING a < 2) > 10000;
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 0
+Handler_read_key 0
+Handler_read_next 15
Handler_read_retry 0
FLUSH STATUS;
DELETE FROM t3 WHERE (SELECT (SELECT MAX(b) FROM t1 GROUP BY a HAVING a < 2) x
@@ -2489,17 +2509,21 @@ FROM t1) > 10000;
ERROR 21000: Subquery returns more than 1 row
SHOW STATUS LIKE 'handler_read__e%';
Variable_name Value
-Handler_read_key 8
-Handler_read_next 1
+Handler_read_key 0
+Handler_read_next 16
Handler_read_retry 0
DROP TABLE t1,t2,t3;
CREATE TABLE t1 (a int, INDEX idx(a));
INSERT INTO t1 VALUES
(4), (2), (1), (2), (4), (2), (1), (4),
(4), (2), (1), (2), (2), (4), (1), (4);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT DISTINCT(a) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL idx 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL idx 5 NULL 4 Using index for group-by
SELECT DISTINCT(a) FROM t1;
a
1
@@ -2507,7 +2531,7 @@ a
4
EXPLAIN SELECT SQL_BIG_RESULT DISTINCT(a) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL idx 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL idx 5 NULL 4 Using index for group-by
SELECT SQL_BIG_RESULT DISTINCT(a) FROM t1;
a
1
@@ -2515,19 +2539,23 @@ a
4
DROP TABLE t1;
CREATE TABLE t1 (a INT, b INT);
-INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3);
+INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5);
INSERT INTO t1 SELECT a + 1, b FROM t1;
INSERT INTO t1 SELECT a + 2, b FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort
+1 SIMPLE t1 ALL NULL NULL NULL NULL 20 Using temporary; Using filesort
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC;
a MIN(b) MAX(b)
-4 1 3
-3 1 3
-2 1 3
-1 1 3
+4 1 5
+3 1 5
+2 1 5
+1 1 5
CREATE INDEX break_it ON t1 (a, b);
EXPLAIN
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a;
@@ -2535,30 +2563,30 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL break_it 10 NULL 7 Using index for group-by
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a;
a MIN(b) MAX(b)
-1 1 3
-2 1 3
-3 1 3
-4 1 3
+1 1 5
+2 1 5
+3 1 5
+4 1 5
EXPLAIN
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range NULL break_it 10 NULL 7 Using index for group-by; Using temporary; Using filesort
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC;
a MIN(b) MAX(b)
-4 1 3
-3 1 3
-2 1 3
-1 1 3
+4 1 5
+3 1 5
+2 1 5
+1 1 5
EXPLAIN
SELECT a, MIN(b), MAX(b), AVG(b) FROM t1 GROUP BY a ORDER BY a DESC;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL break_it 10 NULL 12 Using index
+1 SIMPLE t1 index NULL break_it 10 NULL 20 Using index
SELECT a, MIN(b), MAX(b), AVG(b) FROM t1 GROUP BY a ORDER BY a DESC;
a MIN(b) MAX(b) AVG(b)
-4 1 3 2.0000
-3 1 3 2.0000
-2 1 3 2.0000
-1 1 3 2.0000
+4 1 5 3.0000
+3 1 5 3.0000
+2 1 5 3.0000
+1 1 5 3.0000
DROP TABLE t1;
create table t1 (a int, b int, key (a,b), key `index` (a,b)) engine=MyISAM;
Warnings:
@@ -2644,9 +2672,13 @@ INSERT INTO t1 VALUES (1, 1, 1, 1), (1, 1, 1, 2), (1, 1, 1, 3), (1, 1, 1, 4);
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT a,b,c+1,d FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT DISTINCT c FROM t1 WHERE d=4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL foo 10 NULL 9 Using where; Using index for group-by
+1 SIMPLE t1 range NULL foo 10 NULL 3 Using where; Using index for group-by
SELECT DISTINCT c FROM t1 WHERE d=4;
c
1
@@ -2660,12 +2692,16 @@ CREATE TABLE t (a INT, b INT, INDEX (a,b));
INSERT INTO t VALUES (2,0), (2,0), (2,1), (2,1);
INSERT INTO t SELECT * FROM t;
INSERT INTO t SELECT * FROM t;
+ANALYZE TABLE t;
+Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze status OK
# test MIN
#should use range with index for group by
EXPLAIN
SELECT a, MIN(b) FROM t WHERE b <> 0 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range NULL a 10 NULL 9 Using where; Using index for group-by
+1 SIMPLE t range NULL a 10 NULL 2 Using where; Using index for group-by
#should return 1 row
SELECT a, MIN(b) FROM t WHERE b <> 0 GROUP BY a;
a MIN(b)
@@ -2675,7 +2711,7 @@ a MIN(b)
EXPLAIN
SELECT a, MAX(b) FROM t WHERE b <> 1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range NULL a 10 NULL 9 Using where; Using index for group-by
+1 SIMPLE t range NULL a 10 NULL 2 Using where; Using index for group-by
#should return 1 row
SELECT a, MAX(b) FROM t WHERE b <> 1 GROUP BY a;
a MAX(b)
@@ -2686,7 +2722,7 @@ INSERT INTO t SELECT a, 2 FROM t;
EXPLAIN
SELECT a, MAX(b) FROM t WHERE b > 0 AND b < 2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range NULL a 10 NULL 9 Using where; Using index for group-by
+1 SIMPLE t range NULL a 10 NULL 2 Using where; Using index for group-by
#should return 1 row
SELECT a, MAX(b) FROM t WHERE b > 0 AND b < 2 GROUP BY a;
a MAX(b)
@@ -2719,6 +2755,7 @@ CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL, KEY (b));
INSERT INTO t1 VALUES(1,1),(2,1);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT 1 AS c, b FROM t1 WHERE b IN (1,2) GROUP BY c, b;
c b
@@ -3295,10 +3332,16 @@ INSERT INTO t1 VALUES (0,99),(9,99),(4,0),(7,0),(99,0),(7,0),(8,0),(99,0),(1,0);
INSERT INTO t1 VALUES (0,99),(9,99),(4,0),(7,0),(99,0),(7,0),(8,0),(99,0),(1,0);
CREATE TABLE t2 (c int) ;
INSERT INTO t2 VALUES (0),(1);
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT MIN(a), b FROM t1 WHERE a > 0 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL b 10 NULL 10 Using where; Using index for group-by
+1 SIMPLE t1 range NULL b 10 NULL 3 Using where; Using index for group-by
SELECT MIN(a), b FROM t1 WHERE a > 0 GROUP BY b;
MIN(a) b
1 0
@@ -3306,7 +3349,7 @@ MIN(a) b
EXPLAIN
SELECT MIN(a), b FROM t1 WHERE a > ( SELECT c FROM t2 WHERE c = 0 ) GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range NULL b 10 NULL 10 Using where; Using index for group-by
+1 PRIMARY t1 range NULL b 10 NULL 3 Using where; Using index for group-by
2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
SELECT MIN(a), b FROM t1 WHERE a > ( SELECT c FROM t2 WHERE c = 0 ) GROUP BY b;
MIN(a) b
@@ -3330,54 +3373,68 @@ End of 5.3 tests
#
CREATE TABLE t1 (a INT, b INT, c INT, KEY (a,b));
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1), (1,4,1);
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT a, b + 4, 1 FROM t1;
+INSERT INTO t1 SELECT a, b + 8, 1 FROM t1;
INSERT INTO t1 SELECT a + 1, b, 1 FROM t1;
CREATE TABLE t2 (a INT, b INT, c INT, d INT, e INT, f INT, KEY (a,b,c));
-INSERT INTO t2 VALUES (1,1,1,1,1,1), (1,2,1,1,1,1), (1,3,1,1,1,1),
-(1,4,1,1,1,1);
+INSERT INTO t2 VALUES
+(1,1,1,1,1,1), (1,2,1,1,1,1), (1,3,1,1,1,1), (1,4,1,1,1,1);
+INSERT INTO t2 SELECT * FROM t2;
+INSERT INTO t2 SELECT * FROM t2;
+INSERT INTO t2 SELECT * FROM t2;
+INSERT INTO t2 SELECT * FROM t2;
INSERT INTO t2 SELECT a, b + 4, c,d,e,f FROM t2;
INSERT INTO t2 SELECT a + 1, b, c,d,e,f FROM t2;
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 5 NULL 3 Using index for group-by
SELECT COUNT(DISTINCT a) FROM t1;
COUNT(DISTINCT a)
2
EXPLAIN SELECT COUNT(DISTINCT a,b) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 10 NULL 33 Using index for group-by
SELECT COUNT(DISTINCT a,b) FROM t1;
COUNT(DISTINCT a,b)
-16
+32
EXPLAIN SELECT COUNT(DISTINCT b,a) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 10 NULL 33 Using index for group-by
SELECT COUNT(DISTINCT b,a) FROM t1;
COUNT(DISTINCT b,a)
-16
+32
EXPLAIN SELECT COUNT(DISTINCT b) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 10 NULL 16 Using index
+1 SIMPLE t1 index NULL a 10 NULL 256 Using index
SELECT COUNT(DISTINCT b) FROM t1;
COUNT(DISTINCT b)
-8
+16
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 5 NULL 3 Using index for group-by
SELECT COUNT(DISTINCT a) FROM t1 GROUP BY a;
COUNT(DISTINCT a)
1
1
EXPLAIN SELECT COUNT(DISTINCT b) FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 10 NULL 33 Using index for group-by
SELECT COUNT(DISTINCT b) FROM t1 GROUP BY a;
COUNT(DISTINCT b)
-8
-8
+16
+16
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1 GROUP BY b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 10 NULL 16 Using index; Using filesort
+1 SIMPLE t1 index NULL a 10 NULL 256 Using index; Using filesort
SELECT COUNT(DISTINCT a) FROM t1 GROUP BY b;
COUNT(DISTINCT a)
2
@@ -3388,96 +3445,103 @@ COUNT(DISTINCT a)
2
2
2
+2
+2
+2
+2
+2
+2
+2
+2
EXPLAIN SELECT DISTINCT COUNT(DISTINCT a) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 10 NULL 16 Using index
+1 SIMPLE t1 index NULL a 10 NULL 256 Using index
SELECT DISTINCT COUNT(DISTINCT a) FROM t1;
COUNT(DISTINCT a)
2
EXPLAIN SELECT COUNT(DISTINCT a, b + 0) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 10 NULL 16 Using index
+1 SIMPLE t1 index NULL a 10 NULL 256 Using index
SELECT COUNT(DISTINCT a, b + 0) FROM t1;
COUNT(DISTINCT a, b + 0)
-16
+32
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1 HAVING COUNT(DISTINCT b) < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 10 NULL 16 Using index
+1 SIMPLE t1 index NULL a 10 NULL 256 Using index
SELECT COUNT(DISTINCT a) FROM t1 HAVING COUNT(DISTINCT b) < 10;
COUNT(DISTINCT a)
-2
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1 HAVING COUNT(DISTINCT c) < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 16
+1 SIMPLE t1 ALL NULL NULL NULL NULL 256
SELECT COUNT(DISTINCT a) FROM t1 HAVING COUNT(DISTINCT c) < 10;
COUNT(DISTINCT a)
2
EXPLAIN SELECT 1 FROM t1 HAVING COUNT(DISTINCT a) < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 5 NULL 3 Using index for group-by
SELECT 1 FROM t1 HAVING COUNT(DISTINCT a) < 10;
1
1
EXPLAIN SELECT 1 FROM t1 GROUP BY a HAVING COUNT(DISTINCT b) > 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 10 NULL 33 Using index for group-by
SELECT 1 FROM t1 GROUP BY a HAVING COUNT(DISTINCT b) > 1;
1
1
1
EXPLAIN SELECT COUNT(DISTINCT t1_1.a) FROM t1 t1_1, t1 t1_2 GROUP BY t1_1.a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1_1 index NULL a 10 NULL 16 Using index; Using temporary; Using filesort
-1 SIMPLE t1_2 index NULL a 10 NULL 16 Using index; Using join buffer (flat, BNL join)
+1 SIMPLE t1_1 index NULL a 10 NULL 256 Using index; Using temporary; Using filesort
+1 SIMPLE t1_2 index NULL a 10 NULL 256 Using index; Using join buffer (flat, BNL join)
SELECT COUNT(DISTINCT t1_1.a) FROM t1 t1_1, t1 t1_2 GROUP BY t1_1.a;
COUNT(DISTINCT t1_1.a)
1
1
EXPLAIN SELECT COUNT(DISTINCT a), 12 FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 5 NULL 9 Using index for group-by
+1 SIMPLE t1 range NULL a 5 NULL 3 Using index for group-by
SELECT COUNT(DISTINCT a), 12 FROM t1;
COUNT(DISTINCT a) 12
2 12
EXPLAIN SELECT COUNT(DISTINCT a, b, c) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 15 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 15 NULL 17 Using index for group-by
SELECT COUNT(DISTINCT a, b, c) FROM t2;
COUNT(DISTINCT a, b, c)
16
EXPLAIN SELECT COUNT(DISTINCT a), SUM(DISTINCT a), AVG(DISTINCT a) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 5 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 5 NULL 3 Using index for group-by
SELECT COUNT(DISTINCT a), SUM(DISTINCT a), AVG(DISTINCT a) FROM t2;
COUNT(DISTINCT a) SUM(DISTINCT a) AVG(DISTINCT a)
2 3 1.5000
EXPLAIN SELECT COUNT(DISTINCT a), SUM(DISTINCT a), AVG(DISTINCT f) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 16
+1 SIMPLE t2 ALL NULL NULL NULL NULL 256
SELECT COUNT(DISTINCT a), SUM(DISTINCT a), AVG(DISTINCT f) FROM t2;
COUNT(DISTINCT a) SUM(DISTINCT a) AVG(DISTINCT f)
2 3 1.0000
EXPLAIN SELECT COUNT(DISTINCT a, b), COUNT(DISTINCT b, a) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 10 NULL 17 Using index for group-by
SELECT COUNT(DISTINCT a, b), COUNT(DISTINCT b, a) FROM t2;
COUNT(DISTINCT a, b) COUNT(DISTINCT b, a)
16 16
EXPLAIN SELECT COUNT(DISTINCT a, b), COUNT(DISTINCT b, f) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 16
+1 SIMPLE t2 ALL NULL NULL NULL NULL 256
SELECT COUNT(DISTINCT a, b), COUNT(DISTINCT b, f) FROM t2;
COUNT(DISTINCT a, b) COUNT(DISTINCT b, f)
16 8
EXPLAIN SELECT COUNT(DISTINCT a, b), COUNT(DISTINCT b, d) FROM t2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 16
+1 SIMPLE t2 ALL NULL NULL NULL NULL 256
SELECT COUNT(DISTINCT a, b), COUNT(DISTINCT b, d) FROM t2;
COUNT(DISTINCT a, b) COUNT(DISTINCT b, d)
16 8
EXPLAIN SELECT a, c, COUNT(DISTINCT c, a, b) FROM t2 GROUP BY a, b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 15 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 15 NULL 17 Using index for group-by
SELECT a, c, COUNT(DISTINCT c, a, b) FROM t2 GROUP BY a, b, c;
a c COUNT(DISTINCT c, a, b)
1 1 1
@@ -3499,7 +3563,7 @@ a c COUNT(DISTINCT c, a, b)
EXPLAIN SELECT COUNT(DISTINCT c, a, b) FROM t2
WHERE a > 5 AND b BETWEEN 10 AND 20 GROUP BY a, b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 15 NULL 1 Using where; Using index for group-by
+1 SIMPLE t2 range a a 5 NULL 1 Using where; Using index
SELECT COUNT(DISTINCT c, a, b) FROM t2
WHERE a > 5 AND b BETWEEN 10 AND 20 GROUP BY a, b, c;
COUNT(DISTINCT c, a, b)
@@ -3512,47 +3576,47 @@ GROUP BY b;
COUNT(DISTINCT b) SUM(DISTINCT b)
EXPLAIN SELECT a, COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 10 NULL 17 Using index for group-by
SELECT a, COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a;
a COUNT(DISTINCT b) SUM(DISTINCT b)
1 8 36
2 8 36
EXPLAIN SELECT COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 10 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 10 NULL 17 Using index for group-by
SELECT COUNT(DISTINCT b), SUM(DISTINCT b) FROM t2 GROUP BY a;
COUNT(DISTINCT b) SUM(DISTINCT b)
8 36
8 36
EXPLAIN SELECT COUNT(DISTINCT a, b) FROM t2 WHERE c = 13 AND d = 42;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 16 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 256 Using where
SELECT COUNT(DISTINCT a, b) FROM t2 WHERE c = 13 AND d = 42;
COUNT(DISTINCT a, b)
0
EXPLAIN SELECT a, COUNT(DISTINCT a), SUM(DISTINCT a) FROM t2
WHERE b = 13 AND c = 42 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 15 NULL 9 Using where; Using index for group-by
+1 SIMPLE t2 range NULL a 15 NULL 3 Using where; Using index for group-by
SELECT a, COUNT(DISTINCT a), SUM(DISTINCT a) FROM t2
WHERE b = 13 AND c = 42 GROUP BY a;
a COUNT(DISTINCT a) SUM(DISTINCT a)
EXPLAIN SELECT COUNT(DISTINCT a, b), SUM(DISTINCT a) FROM t2 WHERE b = 42;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index NULL a 15 NULL 16 Using where; Using index
+1 SIMPLE t2 index NULL a 15 NULL 256 Using where; Using index
SELECT COUNT(DISTINCT a, b), SUM(DISTINCT a) FROM t2 WHERE b = 42;
COUNT(DISTINCT a, b) SUM(DISTINCT a)
0 NULL
EXPLAIN SELECT SUM(DISTINCT a), MAX(b) FROM t2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index NULL a 15 NULL 16 Using index
+1 SIMPLE t2 index NULL a 15 NULL 256 Using index
SELECT SUM(DISTINCT a), MAX(b) FROM t2 GROUP BY a;
SUM(DISTINCT a) MAX(b)
1 8
2 8
EXPLAIN SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range NULL a 15 NULL 9 Using index for group-by
+1 SIMPLE t2 range NULL a 15 NULL 17 Using index for group-by
SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c;
42 * (a + c + COUNT(DISTINCT c, a, b))
126
@@ -3573,7 +3637,7 @@ SELECT 42 * (a + c + COUNT(DISTINCT c, a, b)) FROM t2 GROUP BY a, b, c;
168
EXPLAIN SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index NULL a 15 NULL 16 Using index
+1 SIMPLE t2 index NULL a 15 NULL 256 Using index
SELECT (SUM(DISTINCT a) + MAX(b)) FROM t2 GROUP BY a;
(SUM(DISTINCT a) + MAX(b))
9
@@ -3609,6 +3673,7 @@ CREATE TABLE t (a INT, b INT, KEY(a,b));
INSERT INTO t VALUES (1,1), (2,2), (3,3), (4,4), (1,0), (3,2), (4,5);
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
SELECT a, SUM(DISTINCT a), MIN(b) FROM t GROUP BY a;
a SUM(DISTINCT a) MIN(b)
@@ -3682,8 +3747,13 @@ b c
drop table faulty;
CREATE TABLE t1 (a INT, b INT);
INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3);
+INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT a + 1, b FROM t1;
INSERT INTO t1 SELECT a + 2, b FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
CREATE INDEX break_it ON t1 (a, b);
EXPLAIN
SELECT distinct a, b FROM t1 where a = '3' ORDER BY b;
@@ -3739,24 +3809,29 @@ DROP TABLE t1;
CREATE TABLE t1 (a INT, b INT,c INT DEFAULT 0, INDEX (a,b));
INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+INSERT INTO t1 SELECT * FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
set @save_use_stat_tables= @@use_stat_tables;
set @@optimizer_use_condition_selectivity=4;
set @@use_stat_tables=PREFERABLY;
explain extended SELECT a FROM t1 AS t1_outer WHERE a IN (SELECT max(b) FROM t1 GROUP BY a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 8 100.00
-1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 2 100.00 Using index
-2 MATERIALIZED t1 range NULL a 5 NULL 8 100.00 Using index for group-by
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5 100.00
+1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 7 100.00 Using index
+2 MATERIALIZED t1 range NULL a 5 NULL 5 100.00 Using index for group-by
Warnings:
Note 1003 /* select#1 */ select `test`.`t1_outer`.`a` AS `a` from <materialize> (/* select#2 */ select max(`test`.`t1`.`b`) from `test`.`t1` group by `test`.`t1`.`a`) join `test`.`t1` `t1_outer` where `test`.`t1_outer`.`a` = `<subquery2>`.`max(b)`
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set @@use_stat_tables=@save_use_stat_tables;
explain extended SELECT a FROM t1 AS t1_outer WHERE a IN (SELECT max(b) FROM t1 GROUP BY a);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 8 100.00
-1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 2 100.00 Using index
-2 MATERIALIZED t1 range NULL a 5 NULL 8 100.00 Using index for group-by
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5 100.00
+1 PRIMARY t1_outer ref a a 5 <subquery2>.max(b) 7 100.00 Using index
+2 MATERIALIZED t1 range NULL a 5 NULL 5 100.00 Using index for group-by
Warnings:
Note 1003 /* select#1 */ select `test`.`t1_outer`.`a` AS `a` from <materialize> (/* select#2 */ select max(`test`.`t1`.`b`) from `test`.`t1` group by `test`.`t1`.`a`) join `test`.`t1` `t1_outer` where `test`.`t1_outer`.`a` = `<subquery2>`.`max(b)`
drop table t1;
@@ -3786,15 +3861,19 @@ INSERT INTO t1 VALUES (4,'2001-01-01');
INSERT INTO t1 VALUES (4,'2001-01-02');
INSERT INTO t1 VALUES (4,'2001-01-03');
INSERT INTO t1 VALUES (4,'2001-01-04');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>='2001-01-04' GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL id 8 NULL 9 Using where; Using index for group-by
+1 SIMPLE t1 range NULL id 8 NULL 5 Using where; Using index for group-by
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104.0 GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL id 8 NULL 9 Using where; Using index for group-by
+1 SIMPLE t1 range NULL id 8 NULL 5 Using where; Using index for group-by
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL id 8 NULL 9 Using where; Using index for group-by
+1 SIMPLE t1 range NULL id 8 NULL 5 Using where; Using index for group-by
SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>='2001-01-04' GROUP BY id;
id MIN(a) MAX(a)
1 2001-01-04 2001-01-04
@@ -3835,6 +3914,12 @@ INSERT INTO t1 VALUES (4,'2001-01-01');
INSERT INTO t1 VALUES (4,'2001-01-02');
INSERT INTO t1 VALUES (4,'2001-01-03');
INSERT INTO t1 VALUES (4,' 2001-01-04');
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN ' 2001-01-04' AND '2001-01-05' GROUP BY id;
id MIN(a) MAX(a)
1 2001-01-04 2001-01-03
@@ -3890,19 +3975,19 @@ id MIN(a) MAX(a)
4 2001-01-04 2001-01-04
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN ' 2001-01-04' AND '2001-01-05' GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL id 27 NULL 9 Using where; Using index for group-by
+1 SIMPLE t1 range NULL id 27 NULL 10 Using where; Using index for group-by
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND '2001-01-05' GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL id 27 NULL 9 Using where; Using index for group-by
+1 SIMPLE t1 range NULL id 27 NULL 10 Using where; Using index for group-by
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND DATE'2001-01-05' GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL id 27 NULL 16 Using where; Using index
+1 SIMPLE t1 index NULL id 27 NULL 64 Using where; Using index
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND '2001-01-05' GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL id 27 NULL 16 Using where; Using index
+1 SIMPLE t1 index NULL id 27 NULL 64 Using where; Using index
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND DATE'2001-01-05' GROUP BY id;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL id 27 NULL 16 Using where; Using index
+1 SIMPLE t1 index NULL id 27 NULL 64 Using where; Using index
DROP TABLE t1;
#
# MIN() optimization didn't work correctly with BETWEEN when using too
diff --git a/mysql-test/main/group_min_max.test b/mysql-test/main/group_min_max.test
index e8245dd2898..b32a39b1988 100644
--- a/mysql-test/main/group_min_max.test
+++ b/mysql-test/main/group_min_max.test
@@ -835,6 +835,10 @@ INSERT INTO t1 (a) VALUES
(''), ('CENTRAL'), ('EASTERN'), ('GREATER LONDON'),
('NORTH CENTRAL'), ('NORTH EAST'), ('NORTH WEST'), ('SCOTLAND'),
('SOUTH EAST'), ('SOUTH WEST'), ('WESTERN');
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ANALYZE TABLE t1;
EXPLAIN SELECT DISTINCT a,a FROM t1 ORDER BY a;
SELECT DISTINCT a,a FROM t1 ORDER BY a;
@@ -893,7 +897,10 @@ DROP TABLE t1,t2,t3,t4,t5,t6;
# Bug#22342: No results returned for query using max and group by
#
CREATE TABLE t1 (a int, b int, KEY (a,b), KEY b (b));
-INSERT INTO t1 VALUES (1,1),(1,2),(1,0),(1,3);
+INSERT INTO t1 VALUES
+ (1,1),(1,2),(1,0),(1,3),
+ (1,-1),(1,-2),(1,-3),(1,-4);
+ANALYZE TABLE t1;
explain SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
SELECT MAX(b), a FROM t1 WHERE b < 2 AND a = 1 GROUP BY a;
@@ -912,6 +919,7 @@ DROP TABLE t1,t2;
CREATE TABLE t1 (a INT, b INT, INDEX (a,b));
INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+ANALYZE TABLE t1;
EXPLAIN SELECT max(b), a FROM t1 GROUP BY a;
FLUSH STATUS;
SELECT max(b), a FROM t1 GROUP BY a;
@@ -975,6 +983,7 @@ CREATE TABLE t1 (a int, INDEX idx(a));
INSERT INTO t1 VALUES
(4), (2), (1), (2), (4), (2), (1), (4),
(4), (2), (1), (2), (2), (4), (1), (4);
+ANALYZE TABLE t1;
EXPLAIN SELECT DISTINCT(a) FROM t1;
SELECT DISTINCT(a) FROM t1;
@@ -988,9 +997,10 @@ DROP TABLE t1;
#
CREATE TABLE t1 (a INT, b INT);
-INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3);
+INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5);
INSERT INTO t1 SELECT a + 1, b FROM t1;
INSERT INTO t1 SELECT a + 2, b FROM t1;
+ANALYZE TABLE t1;
EXPLAIN
SELECT a, MIN(b), MAX(b) FROM t1 GROUP BY a ORDER BY a DESC;
@@ -1044,6 +1054,7 @@ INSERT INTO t1 VALUES (1, 1, 1, 1), (1, 1, 1, 2), (1, 1, 1, 3), (1, 1, 1, 4);
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT a,b,c+1,d FROM t1;
+ANALYZE TABLE t1;
#Should be non-empty
EXPLAIN SELECT DISTINCT c FROM t1 WHERE d=4;
@@ -1060,6 +1071,7 @@ CREATE TABLE t (a INT, b INT, INDEX (a,b));
INSERT INTO t VALUES (2,0), (2,0), (2,1), (2,1);
INSERT INTO t SELECT * FROM t;
INSERT INTO t SELECT * FROM t;
+ANALYZE TABLE t;
--echo # test MIN
--echo #should use range with index for group by
@@ -1262,6 +1274,7 @@ INSERT INTO t1 VALUES (0,99),(9,99),(4,0),(7,0),(99,0),(7,0),(8,0),(99,0),(1,0);
INSERT INTO t1 VALUES (0,99),(9,99),(4,0),(7,0),(99,0),(7,0),(8,0),(99,0),(1,0);
CREATE TABLE t2 (c int) ;
INSERT INTO t2 VALUES (0),(1);
+ANALYZE TABLE t1,t2;
EXPLAIN
SELECT MIN(a), b FROM t1 WHERE a > 0 GROUP BY b;
@@ -1284,13 +1297,22 @@ drop table t1, t2;
CREATE TABLE t1 (a INT, b INT, c INT, KEY (a,b));
INSERT INTO t1 VALUES (1,1,1), (1,2,1), (1,3,1), (1,4,1);
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT a, b + 4, 1 FROM t1;
+INSERT INTO t1 SELECT a, b + 8, 1 FROM t1;
INSERT INTO t1 SELECT a + 1, b, 1 FROM t1;
CREATE TABLE t2 (a INT, b INT, c INT, d INT, e INT, f INT, KEY (a,b,c));
-INSERT INTO t2 VALUES (1,1,1,1,1,1), (1,2,1,1,1,1), (1,3,1,1,1,1),
- (1,4,1,1,1,1);
+INSERT INTO t2 VALUES
+ (1,1,1,1,1,1), (1,2,1,1,1,1), (1,3,1,1,1,1), (1,4,1,1,1,1);
+INSERT INTO t2 SELECT * FROM t2;
+INSERT INTO t2 SELECT * FROM t2;
+INSERT INTO t2 SELECT * FROM t2;
+INSERT INTO t2 SELECT * FROM t2;
INSERT INTO t2 SELECT a, b + 4, c,d,e,f FROM t2;
INSERT INTO t2 SELECT a + 1, b, c,d,e,f FROM t2;
+ANALYZE TABLE t1,t2;
EXPLAIN SELECT COUNT(DISTINCT a) FROM t1;
SELECT COUNT(DISTINCT a) FROM t1;
@@ -1479,8 +1501,10 @@ drop table faulty;
CREATE TABLE t1 (a INT, b INT);
INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3);
+INSERT INTO t1 SELECT * FROM t1;
INSERT INTO t1 SELECT a + 1, b FROM t1;
INSERT INTO t1 SELECT a + 2, b FROM t1;
+ANALYZE TABLE t1;
CREATE INDEX break_it ON t1 (a, b);
@@ -1527,6 +1551,8 @@ DROP TABLE t1;
CREATE TABLE t1 (a INT, b INT,c INT DEFAULT 0, INDEX (a,b));
INSERT INTO t1 (a, b) VALUES (1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+INSERT INTO t1 SELECT * FROM t1;
+ANALYZE TABLE t1;
set @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity;
set @save_use_stat_tables= @@use_stat_tables;
set @@optimizer_use_condition_selectivity=4;
@@ -1566,6 +1592,7 @@ INSERT INTO t1 VALUES (4,'2001-01-01');
INSERT INTO t1 VALUES (4,'2001-01-02');
INSERT INTO t1 VALUES (4,'2001-01-03');
INSERT INTO t1 VALUES (4,'2001-01-04');
+ANALYZE TABLE t1;
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>='2001-01-04' GROUP BY id;
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104.0 GROUP BY id;
EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
@@ -1595,6 +1622,9 @@ INSERT INTO t1 VALUES (4,'2001-01-01');
INSERT INTO t1 VALUES (4,'2001-01-02');
INSERT INTO t1 VALUES (4,'2001-01-03');
INSERT INTO t1 VALUES (4,' 2001-01-04');
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ANALYZE TABLE t1;
SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN ' 2001-01-04' AND '2001-01-05' GROUP BY id;
SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN '2001-01-04' AND '2001-01-05' GROUP BY id;
SELECT id,MIN(a),MAX(a) FROM t1 WHERE a BETWEEN DATE'2001-01-04' AND DATE'2001-01-05' GROUP BY id;
diff --git a/mysql-test/main/group_min_max_innodb.result b/mysql-test/main/group_min_max_innodb.result
index 311032bc453..3586ad5237f 100644
--- a/mysql-test/main/group_min_max_innodb.result
+++ b/mysql-test/main/group_min_max_innodb.result
@@ -1,3 +1,8 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
drop view if exists v1;
drop table if exists t1,t4;
create table t4 (
@@ -41,6 +46,7 @@ create index idx12672_1 on t4 (a1,a2,b,c);
create index idx12672_2 on t4 (a1,a2,b);
analyze table t4;
Table Op Msg_type Msg_text
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
select distinct a1 from t4 where pk_col not in (1,2,3,4);
a1
@@ -194,7 +200,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2
WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )
GROUP BY c1,i1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k2 k2 5 NULL 60 Using where; Using index
+1 SIMPLE t2 range k2 k2 9 NULL 60 Using where; Using index for group-by
SELECT c1, i1, max(i2) FROM t2
WHERE (c1 = 'C' OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 )
GROUP BY c1,i1;
@@ -205,7 +211,7 @@ EXPLAIN SELECT c1, i1, max(i2) FROM t2
WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ))
GROUP BY c1,i1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range k2 k2 5 NULL 60 Using where; Using index
+1 SIMPLE t2 range k2 k2 9 NULL 60 Using where; Using index for group-by
SELECT c1, i1, max(i2) FROM t2
WHERE (((c1 = 'C' AND i1 < 40) OR ( c1 = 'F' AND i1 < 35)) AND ( i2 = 17 ))
GROUP BY c1,i1;
@@ -302,3 +308,6 @@ NULL bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
NULL aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
NULL aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
drop table t1,t2;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/group_min_max_innodb.test b/mysql-test/main/group_min_max_innodb.test
index 91e0bd3279f..87a6e320887 100644
--- a/mysql-test/main/group_min_max_innodb.test
+++ b/mysql-test/main/group_min_max_innodb.test
@@ -6,6 +6,13 @@
--source include/have_innodb.inc
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
--disable_warnings
drop view if exists v1;
drop table if exists t1,t4;
@@ -243,3 +250,8 @@ CREATE TABLE t2 (`voter_id` int(10) unsigned NOT NULL DEFAULT '0',
insert into t2 values (1,repeat("a",1000)),(2,repeat("a",1000)),(3,repeat("b",1000)),(4,repeat("c",1000)),(4,repeat("b",1000));
SELECT GROUP_CONCAT(t1.language_id SEPARATOR ',') AS `translation_resources`, `d`.`serialized_c` FROM t2 AS `d` LEFT JOIN t1 ON `d`.`voter_id` = t1.`voter_id` GROUP BY `d`.`voter_id` ORDER BY 10-d.voter_id+RAND()*0;
drop table t1,t2;
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
+
diff --git a/mysql-test/main/handlersocket.result b/mysql-test/main/handlersocket.result
index 1b3fc573548..8ef9b289cd0 100644
--- a/mysql-test/main/handlersocket.result
+++ b/mysql-test/main/handlersocket.result
@@ -5,7 +5,7 @@ plugin_version 1.0
plugin_status ACTIVE
plugin_type DAEMON
plugin_library handlersocket.so
-plugin_library_version 1.13
+plugin_library_version 1.14
plugin_author higuchi dot akira at dena dot jp
plugin_description Direct access into InnoDB
plugin_license BSD
diff --git a/mysql-test/main/having.result b/mysql-test/main/having.result
index f37cc48772e..837940a55ef 100644
--- a/mysql-test/main/having.result
+++ b/mysql-test/main/having.result
@@ -470,9 +470,9 @@ WHERE table2.f1 = 2
GROUP BY table1.f1, table2.f2
HAVING (table2.f2 = 8 AND table1.f1 >= 6);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
-Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where `test`.`table1`.`f3` = 9 group by `test`.`table1`.`f1`,7 having 0
+Note 1003 select 0 AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where 0 group by 0,7 having 1
EXPLAIN EXTENDED
SELECT table1.f1, table2.f2
FROM t1 AS table1
@@ -481,9 +481,9 @@ WHERE table2.f1 = 2
GROUP BY table1.f1, table2.f2
HAVING (table2.f2 = 8);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
-Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where `test`.`table1`.`f3` = 9 group by `test`.`table1`.`f1`,7 having 0
+Note 1003 select `test`.`table1`.`f1` AS `f1`,7 AS `f2` from `test`.`t1` `table1` join `test`.`t1` `table2` where 0 group by `test`.`table1`.`f1`,7 having 1
DROP TABLE t1;
#
# Bug#52336 Segfault / crash in 5.1 copy_fields (param=0x9872980) at sql_select.cc:15355
@@ -631,7 +631,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL f10 4 NULL 2 100.00 Using index
2 DEPENDENT SUBQUERY t3 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
-Note 1003 /* select#1 */ select min(`test`.`t1`.`f10`) AS `field1` from `test`.`t1` where <expr_cache><7>(<in_optimizer>(7,<exists>(/* select#2 */ select `test`.`t3`.`f3` from `test`.`t3` where <cache>(7) = `test`.`t3`.`f3`))) having <cache>(`field1`) < 's'
+Note 1003 /* select#1 */ select min(`test`.`t1`.`f10`) AS `field1` from `test`.`t1` where <expr_cache><7>(<in_optimizer>(7,<exists>(/* select#2 */ select `test`.`t3`.`f3` from `test`.`t3` where <cache>(7) = `test`.`t3`.`f3`))) having `field1` < 's'
set optimizer_switch=@save_optimizer_switch;
drop table t1,t2,t3;
End of 5.2 tests
@@ -835,6 +835,7 @@ INSERT INTO t1 VALUES(10, 10), (11, 11), (12, 12), (12, 13),(14, 15), (15, 16),
(16, 17), (17, 17);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t)
GROUP BY t HAVING r = 1 ORDER BY t1.u;
@@ -846,3 +847,21 @@ t r
DROP TABLE t1;
DROP FUNCTION next_seq_value;
DROP TABLE series;
+# End of 10.3 tests
+#
+# MDEV-18681: AND formula in HAVING with several occurances
+# of the same field f in different conjuncts + f=constant
+#
+CREATE TABLE t1 (pk int, f varchar(1));
+INSERT INTO t1 VALUES (2,'x'), (7,'y');
+CREATE TABLE t2 (pk int);
+INSERT INTO t2 VALUES (2), (3);
+SELECT t.f
+FROM (SELECT t1.* FROM (t1 JOIN t2 ON (t2.pk = t1.pk))) t
+HAVING t.f != 112 AND t.f = 'x' AND t.f != 'a';
+f
+x
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'x'
+DROP TABLE t1,t2;
+# End of 10.4 tests
diff --git a/mysql-test/main/having.test b/mysql-test/main/having.test
index 179af14559f..ed86b41a2c3 100644
--- a/mysql-test/main/having.test
+++ b/mysql-test/main/having.test
@@ -890,3 +890,23 @@ SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t)
DROP TABLE t1;
DROP FUNCTION next_seq_value;
DROP TABLE series;
+
+--echo # End of 10.3 tests
+
+--echo #
+--echo # MDEV-18681: AND formula in HAVING with several occurances
+--echo # of the same field f in different conjuncts + f=constant
+--echo #
+
+CREATE TABLE t1 (pk int, f varchar(1));
+INSERT INTO t1 VALUES (2,'x'), (7,'y');
+CREATE TABLE t2 (pk int);
+INSERT INTO t2 VALUES (2), (3);
+
+SELECT t.f
+FROM (SELECT t1.* FROM (t1 JOIN t2 ON (t2.pk = t1.pk))) t
+HAVING t.f != 112 AND t.f = 'x' AND t.f != 'a';
+
+DROP TABLE t1,t2;
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/having_cond_pushdown.result b/mysql-test/main/having_cond_pushdown.result
new file mode 100644
index 00000000000..82a4813b156
--- /dev/null
+++ b/mysql-test/main/having_cond_pushdown.result
@@ -0,0 +1,4778 @@
+CREATE TABLE t1(a INT, b INT, c INT);
+CREATE TABLE t2(x INT, y INT);
+INSERT INTO t1 VALUES (1,14,3), (2,13,2), (1,22,1), (3,13,4), (3,14,2);
+INSERT INTO t2 VALUES (2,13),(5,22),(3,14),(1,22);
+CREATE VIEW v1
+AS SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a;
+CREATE FUNCTION f1() RETURNS INT RETURN 3;
+# conjunctive subformula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>2);
+a MAX(t1.b)
+3 14
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>2);
+a MAX(t1.b)
+3 14
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>2)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 2"
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : using equality
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2);
+a MAX(t1.b)
+2 13
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2);
+a MAX(t1.b)
+2 13
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a=2)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2"
+ }
+ }
+}
+# extracted AND formula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.a<4);
+a MAX(t1.b)
+2 13
+3 14
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.a<4);
+a MAX(t1.b)
+2 13
+3 14
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.a<4);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.a<4);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a < 4"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1) AND (t1.a<4)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a < 4"
+ }
+ }
+ }
+ }
+}
+# extracted OR formula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) OR (a IN (SELECT 3));
+a MAX(t1.b)
+2 13
+3 14
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) OR (a IN (SELECT 3));
+a MAX(t1.b)
+2 13
+3 14
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) OR (a IN (SELECT 3));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+Warnings:
+Note 1249 Select 2 was reduced during optimization
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) OR (a IN (SELECT 3));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 or t1.a = 3"
+ }
+ }
+ }
+ }
+}
+Warnings:
+Note 1249 Select 2 was reduced during optimization
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1) OR (a IN (SELECT 3))
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 or t1.a = 3"
+ }
+ }
+ }
+ }
+}
+Warnings:
+Note 1249 Select 3 was reduced during optimization
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+a MAX(t1.b) MIN(t1.c)
+2 13 2
+3 14 2
+SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+a MAX(t1.b) MIN(t1.c)
+2 13 2
+3 14 2
+explain SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a > 2 and max(t1.b) > 13 or t1.a < 3 and min(t1.c) > 1",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 2 or t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a>2) OR (t1.a<3)
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a > 2 and max(t1.b) > 13 or t1.a < 3 and min(t1.c) > 1",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 2 or t1.a < 3"
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : no aggregation formula pushdown
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.a)<3);
+a MAX(t1.b)
+2 13
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.a)<3);
+a MAX(t1.b)
+2 13
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.a)<3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.a)<3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.a) < 3",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MAX(t1.a)<3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.a) < 3",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)>13);
+a MAX(t1.b)
+3 14
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)>13);
+a MAX(t1.b)
+3 14
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)>13);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)>13);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) > 13",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MAX(t1.b)>13);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) > 13",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=3) AND (MAX(t1.a)=3);
+a MAX(t1.b)
+3 14
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=3) AND (MAX(t1.a)=3);
+a MAX(t1.b)
+3 14
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=3) AND (MAX(t1.a)=3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=3) AND (MAX(t1.a)=3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.a) = 3",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a=3)
+GROUP BY t1.a
+HAVING (MAX(t1.a)=3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.a) = 3",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)>12);
+a MAX(t1.b)
+2 13
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)>12);
+a MAX(t1.b)
+2 13
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)>12);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)>12);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) > 12",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a=2)
+GROUP BY t1.a
+HAVING (MAX(t1.b)>12);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) > 12",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)=13);
+a MAX(t1.b)
+2 13
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)=13);
+a MAX(t1.b)
+2 13
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)=13);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)=13);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) = 13",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MAX(t1.b)=13);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) = 13",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MIN(t1.c)<3);
+a MIN(t1.c)
+2 2
+3 2
+SELECT t1.a,MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MIN(t1.c)<3);
+a MIN(t1.c)
+2 2
+3 2
+explain SELECT t1.a,MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MIN(t1.c)<3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MIN(t1.c)<3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "min(t1.c) < 3",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MIN(t1.c)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MIN(t1.c)<3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "min(t1.c) < 3",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+a MAX(t1.b) MIN(t1.c)
+2 13 2
+SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+a MAX(t1.b) MIN(t1.c)
+2 13 2
+explain SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) = 13 and min(t1.c) = 2",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MIN(t1.c)
+FROM t1
+WHERE (t1.a=2)
+GROUP BY t1.a
+HAVING (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) = 13 and min(t1.c) = 2",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2"
+ }
+ }
+}
+# conjunctive subformula : no stored function pushdown
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (a=test.f1());
+a MAX(t1.b)
+3 14
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (a=test.f1());
+a MAX(t1.b)
+3 14
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (a=test.f1());
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (a=test.f1());
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a = test.f1()",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (a=test.f1());
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a = test.f1()",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushdown into derived table WHERE clause
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.a
+HAVING (v1.a>1);
+a
+2
+3
+SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.a
+HAVING (v1.a>1);
+a
+2
+3
+explain SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.a
+HAVING (v1.a>1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Using temporary; Using filesort
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.x 2
+2 DERIVED t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.a
+HAVING (v1.a>1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "v1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x > 1 and t2.x is not null"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t2.x"],
+ "rows": 2,
+ "filtered": 100,
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a) AND (v1.a>1)
+GROUP BY v1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "v1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x > 1 and t2.x is not null"
+ },
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t2.x"],
+ "rows": 2,
+ "filtered": 100,
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushdown into derived table HAVING clause
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.c
+HAVING (v1.c>2);
+a c
+1 3
+3 4
+SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.c
+HAVING (v1.c>2);
+a c
+1 3
+3 4
+explain SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.c
+HAVING (v1.c>2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 4 Using where; Using temporary; Using filesort
+1 PRIMARY <derived2> ref key0 key0 5 test.t2.x 2 Using where
+2 DERIVED t1 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
+explain format=json SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.c
+HAVING (v1.c>2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "v1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x is not null"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t2.x"],
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "v1.c > 2",
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "t1.c > 2",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a) AND (v1.c>2)
+GROUP BY v1.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "v1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x is not null"
+ },
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t2.x"],
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "v1.c > 2",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "t1.c > 2",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushdown into materialized IN subquery
+# WHERE clause
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a
+HAVING (t1.a>1);
+a b c
+2 13 2
+3 14 2
+SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a
+HAVING (t1.a>1);
+a b c
+2 13 2
+3 14 2
+explain SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a
+HAVING (t1.a>1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.b 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where; Using temporary
+explain format=json SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a
+HAVING (t1.a>1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["x", "MAX(t2.y)"],
+ "ref": ["test.t1.a", "test.t1.b"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x < 5 and t2.x > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT * FROM t1
+WHERE
+(t1.a>1) AND
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null"
+ },
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["x", "MAX(t2.y)"],
+ "ref": ["test.t1.a", "test.t1.b"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x < 5 and t2.x > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushdown into materialized IN subquery
+# HAVING clause
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b
+HAVING (t1.b<14);
+a b c
+2 13 2
+SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b
+HAVING (t1.b<14);
+a b c
+2 13 2
+explain SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b
+HAVING (t1.b<14);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.b 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 4 Using where; Using temporary
+explain format=json SELECT * FROM t1
+WHERE
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b
+HAVING (t1.b<14);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b < 14 and t1.a is not null and t1.b is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["x", "MAX(t2.y)"],
+ "ref": ["test.t1.a", "test.t1.b"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.y)` < 14",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT * FROM t1
+WHERE
+(t1.b<14) AND
+(t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b < 14 and t1.a is not null and t1.b is not null"
+ },
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["x", "MAX(t2.y)"],
+ "ref": ["test.t1.a", "test.t1.b"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "`MAX(t2.y)` < 14",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t2.x < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# non-standard allowed queries
+# conjunctive subformula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.c=2) AND (t1.a>1);
+a MAX(t1.b) c
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.c=2) AND (t1.a>1);
+a MAX(t1.b) c
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.c=2) AND (t1.a>1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.c=2) AND (t1.a>1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c = 2",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (t1.c=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c = 2",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.b=13) AND (t1.c=2);
+MAX(t1.a) a b c
+3 2 13 2
+SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.b=13) AND (t1.c=2);
+MAX(t1.a) a b c
+3 2 13 2
+explain SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.b=13) AND (t1.c=2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.b=13) AND (t1.c=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a = 2 and t1.c = 2",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 13"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+WHERE (t1.b=13)
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.c=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a = 2 and t1.c = 2",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 13"
+ }
+ }
+}
+# extracted AND formula : using equalities
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c>1);
+a MAX(t1.b) c
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c>1);
+a MAX(t1.b) c
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c>1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c>1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b) FROM t1
+WHERE (t1.a=t1.c) AND (t1.a>1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c=2);
+a MAX(t1.b) c
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c=2);
+a MAX(t1.b) c
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c=2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c=2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2 and t1.c = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (t1.a=2)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2 and t1.c = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+a MAX(t1.b) c
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+a MAX(t1.b) c
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3 and t1.c > 3",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a=t1.c) AND (t1.a>1)) OR (t1.a<3)
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3 and t1.c > 3",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a > 1 or t1.a < 3"
+ }
+ }
+ }
+ }
+}
+# conjuctive subformula : pushdown using WHERE multiple equalities
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.c<3);
+a MAX(t1.b) c
+1 22 1
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.c<3);
+a MAX(t1.b) c
+1 22 1
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.c<3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.c<3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (t1.c<3)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+# extracted AND-formula : pushdown using WHERE multiple equalities
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.c<3);
+a MAX(t1.b) c
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.c<3);
+a MAX(t1.b) c
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.c<3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.c<3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a > 1 and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (t1.a>1) AND (t1.c<3)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and t1.a > 1 and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4)) AND (t1.a<2);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4)) AND (t1.a<2);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4)) AND (t1.a<2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4)) AND (t1.a<2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4) and t1.a < 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (((t1.a>1) OR (t1.c<4)) AND (t1.a<2))
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4) and t1.a < 2"
+ }
+ }
+ }
+ }
+}
+# extracted OR-formula : pushdown using WHERE multiple equalities
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+a MAX(t1.b) c
+1 22 1
+2 13 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+a MAX(t1.b) c
+1 22 1
+2 13 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND ((t1.a>1) OR (t1.c<4))
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a > 1 and max(t1.c) < 3 or t1.c < 4",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = t1.a and (t1.a > 1 or t1.a < 4)"
+ }
+ }
+ }
+ }
+}
+DROP TABLE t1,t2;
+DROP VIEW v1;
+DROP FUNCTION f1;
+#
+# MDEV-18668: pushdown from HAVING into impossible WHERE
+#
+CREATE TABLE t1 (a INT, b INT);
+INSERT INTO t1 VALUES (1,1),(2,2);
+SELECT a FROM t1 WHERE b = 1 AND b = 2 GROUP BY a HAVING a <= 3;
+a
+EXPLAIN
+SELECT a FROM t1 WHERE b = 1 AND b = 2 GROUP BY a HAVING a <= 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+DROP TABLE t1;
+#
+# MDEV-18769: unfixed OR condition pushed from HAVING into WHERE
+#
+CREATE TABLE t1(a INT, b INT, c INT);
+CREATE TABLE t3(a INT, b INT, c INT, d INT);
+INSERT INTO t1 VALUES (1,14,3), (2,13,2), (1,22,1), (3,13,4), (3,14,2);
+INSERT INTO t3 VALUES (1,2,16,1), (1,3,11,2), (2,3,10,2);
+# nothing to push
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+a b MAX(t1.c)
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+a b MAX(t1.c)
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.b = 13 and max(t1.c) > 2",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.b = 13 and max(t1.c) > 2",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100
+ }
+ }
+ }
+ }
+}
+# extracted AND formula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14);
+a b MAX(t1.c)
+2 13 2
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14);
+a b MAX(t1.c)
+2 13 2
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.b > 10) and t1.b < 14"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14)
+GROUP BY t1.a,t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.b > 10) and t1.b < 14"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15);
+a b MAX(t1.c)
+1 22 1
+2 13 2
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15);
+a b MAX(t1.c)
+1 22 1
+2 13 2
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.b > 15)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15)
+GROUP BY t1.a,t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.b > 15)"
+ }
+ }
+ }
+ }
+}
+# extracted AND formula : equality in the inner AND formula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2));
+a b MAX(t1.c)
+2 13 2
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2));
+a b MAX(t1.c)
+2 13 2
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.a = 2 and t1.b > 15)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2))
+GROUP BY t1.a,t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.b > 10) and (t1.b < 14 or t1.a = 2 and t1.b > 15)"
+ }
+ }
+ }
+ }
+}
+# extracted OR formula
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2) OR (t1.b = 13 AND t1.a > 2);
+a b MAX(t1.c)
+1 14 3
+1 22 1
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2) OR (t1.b = 13 AND t1.a > 2);
+a b MAX(t1.c)
+1 14 3
+1 22 1
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2) OR (t1.b = 13 AND t1.a > 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2) OR (t1.b = 13 AND t1.a > 2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a < 2 or t1.b = 13 and t1.a > 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2) OR (t1.b = 13 AND t1.a > 2)
+GROUP BY t1.a,t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a < 2 or t1.b = 13 and t1.a > 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13);
+a b MAX(t1.c)
+1 14 3
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13);
+a b MAX(t1.c)
+1 14 3
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a < 2 or t1.b = 13 and t1.a > 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13)
+GROUP BY t1.a,t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a < 2 or t1.b = 13 and t1.a > 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14));
+a b MAX(t1.c)
+1 14 3
+3 13 4
+3 14 2
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14));
+a b MAX(t1.c)
+1 14 3
+3 13 4
+3 14 2
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a < 2 or t1.a > 2 and (t1.b = 13 or t1.b = 14)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14))
+GROUP BY t1.a,t1.b;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.b",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a < 2 or t1.a > 2 and (t1.b = 13 or t1.b = 14)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+a b MAX(t1.c)
+1 14 3
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+a b MAX(t1.c)
+1 14 3
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a < 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a < 2 or t1.a = 1 or t1.a = 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2) OR (t1.a = 1 OR t1.a = 2)
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a < 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a < 2 or t1.a = 1 or t1.a = 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+a b MAX(t1.c)
+1 14 3
+2 13 2
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+a b MAX(t1.c)
+1 14 3
+2 13 2
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a = 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2 or t1.a = 1 or t1.a = 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 2) OR (t1.a = 1 OR t1.a = 2)
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.a = 2 and max(t1.c) = 2 or max(t1.c) > 2 and (t1.a = 1 or t1.a = 2)",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 2 or t1.a = 1 or t1.a = 2"
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : equality pushdown
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1) AND (MAX(t1.c) = 3);
+a b MAX(t1.c)
+1 14 3
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1) AND (MAX(t1.c) = 3);
+a b MAX(t1.c)
+1 14 3
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1) AND (MAX(t1.c) = 3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1) AND (MAX(t1.c) = 3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.c) = 3",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (MAX(t1.c) = 3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.c) = 3",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1"
+ }
+ }
+}
+# conjunctive subformula : equalities pushdown
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 3) AND MAX(t1.b = 14);
+a b MAX(t1.c)
+1 14 3
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 3) AND MAX(t1.b = 14);
+a b MAX(t1.c)
+1 14 3
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 3) AND MAX(t1.b = 14);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 3) AND MAX(t1.b = 14);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b = 14)",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (MAX(t1.b) = 14);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) = 14",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 3"
+ }
+ }
+}
+# conjunctive subformula : multiple equality consists of
+two equalities pushdown
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 1) AND MAX(t1.b = 14);
+a b MAX(t1.c)
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 1) AND MAX(t1.b = 14);
+a b MAX(t1.c)
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 1) AND MAX(t1.b = 14);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 1) AND MAX(t1.b = 14);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b = 14)",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 1)
+GROUP BY t1.a,t1.c
+HAVING (MAX(t1.b) = 14);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "max(t1.b) = 14",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+#
+# Pushdown from HAVING into non-empty WHERE
+#
+# inequality : inequality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+a b MAX(t1.c)
+1 14 3
+2 13 2
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+a b MAX(t1.c)
+1 14 3
+2 13 2
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b > 2 and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2) AND (t1.a < 3)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b > 2 and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+# equality : inequality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a = 3);
+a b MAX(t1.c)
+3 13 4
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a = 3);
+a b MAX(t1.c)
+3 13 4
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a = 3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a = 3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 3 and t1.b > 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2) AND (t1.a = 3)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 3 and t1.b > 2"
+ }
+ }
+}
+# inequality : equality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+a b MAX(t1.c)
+1 14 3
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+a b MAX(t1.c)
+1 14 3
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14) AND (t1.a < 3)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a < 3"
+ }
+ }
+ }
+ }
+}
+# equality : equality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a b MAX(t1.c)
+1 14 3
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a b MAX(t1.c)
+1 14 3
+explain SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.b = 14"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14) AND (t1.a = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.b = 14 and t1.a = 1"
+ }
+ }
+}
+# equality : equality in WHERE, impossible WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.c)
+SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.c)
+explain SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+explain format=json SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3) AND (t1.a = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+# equality : equality in WHERE (equal through constant)
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1) AND (t1.a = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c = 1 and t1.a = 1"
+ }
+ }
+}
+# inequality : AND formula in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c > 0 and t1.c < 3 and t1.a > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3) AND (t1.a > 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.c > 0 and t1.c < 3 and t1.a > 1"
+ }
+ }
+ }
+ }
+}
+# equality : AND formula in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c > 0 and t1.c < 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3) AND (t1.a = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c > 0 and t1.c < 3"
+ }
+ }
+}
+# equality : AND formula in WHERE, impossible WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "const_condition": "1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c < 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3) AND (t1.a = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c < 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b)
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+a MAX(t1.b)
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3) AND (t1.a = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.a = 1);
+a b MAX(t3.c) d
+1 2 16 1
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.a = 1);
+a b MAX(t3.c) d
+1 2 16 1
+explain SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.a = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.a = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.b = 2 and t3.d = 1 and t3.a = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1) AND (t3.a = 1)
+GROUP BY t3.a,t3.b,t3.d;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.b = 2 and t3.d = 1 and t3.a = 1"
+ }
+ }
+}
+# inequality : OR formula in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 2);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 2);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) OR (t1.c < 3)) AND (t1.a < 2)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 2"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 2);
+a MAX(t1.b)
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 2);
+a MAX(t1.b)
+explain SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE ((t1.a = 1) OR (t1.a = 3)) AND (t1.a = 2)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+# AND formula : inequality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+a MAX(t1.b) c
+2 13 2
+3 14 4
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+a MAX(t1.b) c
+2 13 2
+3 14 4
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a < 4 and t1.a > 0"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.a < 4) AND (t1.a > 0)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a < 4 and t1.a > 0"
+ }
+ }
+ }
+ }
+}
+# AND formula : equality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+a MAX(t1.b) c
+1 22 3
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+a MAX(t1.b) c
+1 22 3
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "const_condition": "1 and 1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a < 4) AND (t1.a > 0)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1"
+ }
+ }
+}
+# OR formula : inequality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+a MAX(t1.b) c
+2 13 2
+3 14 4
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+a MAX(t1.b) c
+2 13 2
+3 14 4
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and (t1.a < 4 or t1.a > 0)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND ((t1.a < 4) OR (t1.a > 0))
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and (t1.a < 4 or t1.a > 0)"
+ }
+ }
+ }
+ }
+}
+# OR formula : equality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+a MAX(t1.b) c
+1 22 3
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+a MAX(t1.b) c
+1 22 3
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "const_condition": "1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND ((t1.a < 4) OR (t1.a > 0))
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1"
+ }
+ }
+}
+# AND formula : AND formula in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 2
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 2
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c > 1",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.c < 3 and t1.a < 4"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) AND (t1.c < 3)) AND
+(t1.a < 4)
+GROUP BY t1.a
+HAVING (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c > 1",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.c < 3 and t1.a < 4"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "const_condition": "1",
+ "filesort": {
+ "sort_key": "t1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c < 3 and t1.c > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a = 1) AND (t1.c < 3)) AND
+((t1.a < 4) AND (t1.c > 1))
+GROUP BY t1.a,t1.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c < 3 and t1.c > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+1 14 3
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+1 14 3
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "const_condition": "1 and 1",
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a = 1) AND (t1.c = 3)) AND
+((t1.a < 4) AND (t1.c > 1))
+GROUP BY t1.a,t1.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 3"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b
+HAVING (t3.b = 2) AND (t3.d > 0);
+a b MAX(t3.c) d
+1 2 16 1
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b
+HAVING (t3.b = 2) AND (t3.d > 0);
+a b MAX(t3.c) d
+1 2 16 1
+explain SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b
+HAVING (t3.b = 2) AND (t3.d > 0);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b
+HAVING (t3.b = 2) AND (t3.d > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t3.d > 0",
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1) AND
+(t3.b = 2)
+GROUP BY t3.a,t3.b
+HAVING (t3.d > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t3.d > 0",
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.b = 2) AND (t3.d > 0);
+a b MAX(t3.c) d
+1 2 16 1
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.b = 2) AND (t3.d > 0);
+a b MAX(t3.c) d
+1 2 16 1
+explain SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.b = 2) AND (t3.d > 0);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.b = 2) AND (t3.d > 0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "const_condition": "1",
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1) AND
+(t3.b = 2) AND (t3.d > 0)
+GROUP BY t3.a,t3.b,t3.d;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.a = 1 and t3.d = 1 and t3.b = 2"
+ }
+ }
+}
+# AND formula : OR formula in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 4
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 4
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c > 1",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) OR (t1.c < 3)) AND
+(t1.a < 4)
+GROUP BY t1.a
+HAVING (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "having_condition": "t1.c > 1",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 2
+3 13 4
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+a MAX(t1.b) c
+2 13 2
+3 14 2
+3 13 4
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4 and t1.c > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) OR (t1.c < 3)) AND
+(t1.a < 4) AND (t1.c > 1)
+GROUP BY t1.a,t1.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a > 1 or t1.c < 3) and t1.a < 4 and t1.c > 1"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 4) OR (t1.c > 1);
+a MAX(t1.b) c
+1 14 3
+3 14 2
+3 13 4
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 4) OR (t1.c > 1);
+a MAX(t1.b) c
+1 14 3
+3 14 2
+3 13 4
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 4) OR (t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where; Using temporary; Using filesort
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 4) OR (t1.c > 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.a = 3) and (t1.a = 4 or t1.c > 1)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a = 1) OR (t1.a = 3)) AND
+((t1.a = 4) OR (t1.c > 1))
+GROUP BY t1.a,t1.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.a, t1.c",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "(t1.a = 1 or t1.a = 3) and (t1.a = 4 or t1.c > 1)"
+ }
+ }
+ }
+ }
+}
+# equality : pushdown through equality in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c) AND (t1.c = 1)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+# OR formula : pushdown through equality
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1) OR (t1.c = 2);
+a MAX(t1.b) c
+1 22 1
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1) OR (t1.c = 2);
+a MAX(t1.b) c
+1 22 1
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1) OR (t1.c = 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1) OR (t1.c = 2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c) AND
+((t1.c = 1) OR (t1.c = 2))
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 5,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.c = 1"
+ }
+ }
+}
+# OR formula : pushdown through equality, impossible WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 3) OR (t1.c = 2);
+a MAX(t1.b) c
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 3) OR (t1.c = 2);
+a MAX(t1.b) c
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 3) OR (t1.c = 2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 3) OR (t1.c = 2);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c) AND
+((t1.c = 3) OR (t1.c = 2))
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+# AND formula : pushdown through equality, impossible WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a,t1.c
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+a MAX(t1.b) c
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a,t1.c
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+a MAX(t1.b) c
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a,t1.c
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a,t1.c
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3) AND
+(t1.a > 2) AND (t1.a = t1.c)
+GROUP BY t1.a,t1.c;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+a MAX(t1.b) c
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+a MAX(t1.b) c
+explain SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3) AND
+(t1.a > 2) AND (t1.a = t1.c)
+GROUP BY t1.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Impossible WHERE"
+ }
+ }
+}
+# AND formula with OR subformula : AND condition in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1));
+a MAX(t3.b) c d
+SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1));
+a MAX(t3.b) c d
+explain SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using temporary; Using filesort
+explain format=json SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t3.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.a = 1 or t3.a > 1)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t3.a,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2)) AND
+(t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1))
+GROUP BY t3.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t3.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.a = 1 or t3.a > 1)"
+ }
+ }
+ }
+ }
+}
+# AND formula with OR subformula : AND condition in WHERE
+set statement optimizer_switch='condition_pushdown_from_having=off' for SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1));
+a MAX(t3.b) c d
+SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1));
+a MAX(t3.b) c d
+explain SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1));
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using temporary; Using filesort
+explain format=json SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1));
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t3.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.c = t3.a and t3.c < 15 or t3.a > 1)"
+ }
+ }
+ }
+ }
+}
+set statement optimizer_switch='condition_pushdown_from_having=off' for explain format=json SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2)) AND
+(t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1))
+GROUP BY t3.a;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t3.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 3,
+ "filtered": 100,
+ "attached_condition": "t3.d = t3.a and t3.a > 1 and (t3.c = 3 or t3.c < 2) and (t3.c = t3.a and t3.a < 15 or t3.a > 1)"
+ }
+ }
+ }
+ }
+}
+# prepare statement
+PREPARE stmt1 from "
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1)
+";
+execute stmt1;
+a MAX(t1.b) c
+1 22 3
+execute stmt1;
+a MAX(t1.b) c
+1 22 3
+deallocate prepare stmt1;
+DROP TABLE t1,t3;
+#
+# MDEV-19185: pushdown constant function with subquery
+#
+CREATE TABLE t1 (pk INT, c1 VARCHAR(64));
+INSERT INTO t1 VALUES (1,'bbb'),(2,'aaa'),(3,'ccc');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk
+FROM t1
+GROUP BY pk
+HAVING (1 NOT IN (SELECT COUNT(t1.c1) FROM (v1, t1)));
+pk
+1
+2
+3
+DROP TABLE t1;
+DROP VIEW v1;
+#
+# MDEV-19186: temporary table defined with view field in HAVING
+#
+CREATE TABLE t1 (pk INT, x VARCHAR(10));
+INSERT INTO t1 VALUES (1,'y'),(2,'s'),(3,'aaa');
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE TABLE t2 (pk INT, x VARCHAR(10));
+INSERT INTO t2 VALUES (1,'aa'),(2,'t'),(3,'bb');
+CREATE TABLE tmp1
+SELECT v1.pk
+FROM t2,v1
+WHERE v1.x = t2.x
+GROUP BY v1.pk
+HAVING (v1.pk = 1);
+DROP TABLE t1,t2,tmp1;
+DROP VIEW v1;
+#
+# MDEV-19164: pushdown of condition with cached items
+#
+create table t1 (d1 date);
+insert into t1 values (null),('1971-03-06'),('1993-06-05'),('1998-07-08');
+select d1 from t1
+group by d1
+having d1 between (inet_aton('1978-04-27')) and '2018-08-26';
+d1
+explain extended select d1 from t1
+group by d1
+having d1 between (inet_aton('1978-04-27')) and '2018-08-26';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4 100.00 Using where; Using temporary; Using filesort
+Warnings:
+Note 1003 select `test`.`t1`.`d1` AS `d1` from `test`.`t1` where `test`.`t1`.`d1` between <cache>(inet_aton('1978-04-27')) and <cache>('2018-08-26') group by `test`.`t1`.`d1` having 1
+explain format=json select d1 from t1
+group by d1
+having d1 between (inet_aton('1978-04-27')) and '2018-08-26';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.d1",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 4,
+ "filtered": 100,
+ "attached_condition": "t1.d1 between <cache>(inet_aton('1978-04-27')) and <cache>('2018-08-26')"
+ }
+ }
+ }
+ }
+}
+delete from t1;
+insert into t1 values ('2018-01-15'),('2018-02-20');
+select d1 from t1
+group by d1
+having d1 not between 0 AND exp(0);
+d1
+2018-01-15
+2018-02-20
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '1'
+explain extended select d1 from t1
+group by d1
+having d1 not between 0 AND exp(0);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where; Using temporary; Using filesort
+Warnings:
+Note 1003 select `test`.`t1`.`d1` AS `d1` from `test`.`t1` where `test`.`t1`.`d1` not between <cache>(0) and <cache>(exp(0)) group by `test`.`t1`.`d1` having 1
+explain format=json select d1 from t1
+group by d1
+having d1 not between 0 AND exp(0);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "filesort": {
+ "sort_key": "t1.d1",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "t1.d1 not between <cache>(0) and <cache>(exp(0))"
+ }
+ }
+ }
+ }
+}
+drop table t1;
+#
+# MDEV-19245: Impossible WHERE should be noticed earlier
+# after HAVING pushdown
+#
+CREATE TABLE t1 (a INT, b INT, c INT);
+INSERT INTO t1 VALUES (1,2,1),(3,2,2),(5,6,4),(3,4,1);
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a > 3 GROUP BY t1.a HAVING t1.a = 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a = 3 GROUP BY t1.a HAVING t1.a > 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a > 3 AND t1.a = 3 GROUP BY t1.a ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE (t1.a < 2 OR t1.c > 1) GROUP BY t1.a HAVING t1.a = 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using where
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a = 3 GROUP BY t1.a HAVING (t1.a < 2 OR t1.a > 3);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a = 3 AND (t1.a < 2 OR t1.a > 3) GROUP BY t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE (t1.a < 2 AND t1.c > 1) GROUP BY t1.a HAVING t1.a = 3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE t1.a = 3 GROUP BY t1.a HAVING (t1.a < 2 AND t1.c > 1);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE t1.a = 3 AND (t1.a < 2 AND t1.b > 3) GROUP BY t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+DROP TABLE t1;
diff --git a/mysql-test/main/having_cond_pushdown.test b/mysql-test/main/having_cond_pushdown.test
new file mode 100644
index 00000000000..f1bf70627f6
--- /dev/null
+++ b/mysql-test/main/having_cond_pushdown.test
@@ -0,0 +1,1403 @@
+let $no_pushdown=
+ set statement optimizer_switch='condition_pushdown_from_having=off' for;
+
+CREATE TABLE t1(a INT, b INT, c INT);
+CREATE TABLE t2(x INT, y INT);
+
+INSERT INTO t1 VALUES (1,14,3), (2,13,2), (1,22,1), (3,13,4), (3,14,2);
+INSERT INTO t2 VALUES (2,13),(5,22),(3,14),(1,22);
+
+CREATE VIEW v1
+AS SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a;
+
+CREATE FUNCTION f1() RETURNS INT RETURN 3;
+
+--echo # conjunctive subformula
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>2)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : using equality
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a=2)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted AND formula
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.a<4);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1) AND (t1.a<4)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted OR formula
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) OR (a IN (SELECT 3));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1) OR (a IN (SELECT 3))
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a>2) OR (t1.a<3)
+GROUP BY t1.a
+HAVING ((t1.a>2) AND (MAX(t1.b)>13)) OR ((t1.a<3) AND (MIN(t1.c)>1));
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : no aggregation formula pushdown
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.a)<3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MAX(t1.a)<3);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)>13);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MAX(t1.b)>13);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=3) AND (MAX(t1.a)=3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a=3)
+GROUP BY t1.a
+HAVING (MAX(t1.a)=3);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)>12);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a=2)
+GROUP BY t1.a
+HAVING (MAX(t1.b)>12);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MAX(t1.b)=13);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MAX(t1.b)=13);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (MIN(t1.c)<3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MIN(t1.c)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (MIN(t1.c)<3);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),MIN(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=2) AND (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MIN(t1.c)
+FROM t1
+WHERE (t1.a=2)
+GROUP BY t1.a
+HAVING (MAX(t1.b)=13) AND (MIN(t1.c)=2);
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : no stored function pushdown
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a>1) AND (a=test.f1());
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (a=test.f1());
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : pushdown into derived table WHERE clause
+let $query=
+SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.a
+HAVING (v1.a>1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT v1.a
+FROM t2,v1
+WHERE (t2.x=v1.a) AND (v1.a>1)
+GROUP BY v1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : pushdown into derived table HAVING clause
+let $query=
+SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a)
+GROUP BY v1.c
+HAVING (v1.c>2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT v1.a,v1.c
+FROM t2,v1
+WHERE (t2.x=v1.a) AND (v1.c>2)
+GROUP BY v1.c;
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : pushdown into materialized IN subquery
+--echo # WHERE clause
+let $query=
+SELECT * FROM t1
+WHERE
+ (t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a
+HAVING (t1.a>1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT * FROM t1
+WHERE
+ (t1.a>1) AND
+ (t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : pushdown into materialized IN subquery
+--echo # HAVING clause
+let $query=
+SELECT * FROM t1
+WHERE
+ (t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b
+HAVING (t1.b<14);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT * FROM t1
+WHERE
+ (t1.b<14) AND
+ (t1.a,t1.b) IN (SELECT t2.x,MAX(t2.y) FROM t2 WHERE t2.x<5 GROUP BY t2.x)
+GROUP BY t1.b;
+eval $no_pushdown explain format=json $query;
+
+--echo # non-standard allowed queries
+--echo # conjunctive subformula
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.c=2) AND (t1.a>1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE (t1.a>1)
+GROUP BY t1.a
+HAVING (t1.c=2);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.b=13) AND (t1.c=2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT MAX(t1.a),t1.a,t1.b,t1.c
+FROM t1
+WHERE (t1.b=13)
+GROUP BY t1.b
+HAVING (t1.a=2) AND (t1.c=2);
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted AND formula : using equalities
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c>1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b) FROM t1
+WHERE (t1.a=t1.c) AND (t1.a>1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a=t1.c) AND (t1.c=2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (t1.a=2)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a=t1.c) AND (t1.a>1)) OR (t1.a<3)
+GROUP BY t1.a
+HAVING ((t1.a=t1.c) AND (t1.a>1)) OR ((t1.a<3) AND (t1.c>3));
+eval $no_pushdown explain format=json $query;
+
+--echo # conjuctive subformula : pushdown using WHERE multiple equalities
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.c<3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (t1.c<3)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted AND-formula : pushdown using WHERE multiple equalities
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (t1.a>1) AND (t1.c<3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (t1.a>1) AND (t1.c<3)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING (((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4)) AND (t1.a<2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND (((t1.a>1) OR (t1.c<4)) AND (t1.a<2))
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted OR-formula : pushdown using WHERE multiple equalities
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c)
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a=t1.c) AND ((t1.a>1) OR (t1.c<4))
+GROUP BY t1.a
+HAVING ((t1.a>1) AND (MAX(t1.c)<3)) OR (t1.c<4);
+eval $no_pushdown explain format=json $query;
+
+DROP TABLE t1,t2;
+DROP VIEW v1;
+DROP FUNCTION f1;
+
+--echo #
+--echo # MDEV-18668: pushdown from HAVING into impossible WHERE
+--echo #
+
+CREATE TABLE t1 (a INT, b INT);
+INSERT INTO t1 VALUES (1,1),(2,2);
+
+SELECT a FROM t1 WHERE b = 1 AND b = 2 GROUP BY a HAVING a <= 3;
+EXPLAIN
+SELECT a FROM t1 WHERE b = 1 AND b = 2 GROUP BY a HAVING a <= 3;
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18769: unfixed OR condition pushed from HAVING into WHERE
+--echo #
+
+CREATE TABLE t1(a INT, b INT, c INT);
+CREATE TABLE t3(a INT, b INT, c INT, d INT);
+
+INSERT INTO t1 VALUES (1,14,3), (2,13,2), (1,22,1), (3,13,4), (3,14,2);
+INSERT INTO t3 VALUES (1,2,16,1), (1,3,11,2), (2,3,10,2);
+
+--echo # nothing to push
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING t1.b = 13 AND MAX(t1.c) > 2;
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted AND formula
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14)
+GROUP BY t1.a,t1.b;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR t1.b > 15)
+GROUP BY t1.a,t1.b;
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted AND formula : equality in the inner AND formula
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1 OR t1.b > 10) AND (t1.b < 14 OR (t1.b > 15 AND t1.a = 2))
+GROUP BY t1.a,t1.b;
+eval $no_pushdown explain format=json $query;
+
+--echo # extracted OR formula
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2) OR (t1.b = 13 AND t1.a > 2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2) OR (t1.b = 13 AND t1.a > 2)
+GROUP BY t1.a,t1.b;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND t1.b = 13)
+GROUP BY t1.a,t1.b;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.b
+HAVING (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2 AND t1.b = 14) OR (t1.a > 2 AND (t1.b = 13 OR t1.b = 14))
+GROUP BY t1.a,t1.b;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a < 2) OR (t1.a = 1 OR t1.a = 2)
+GROUP BY t1.a
+HAVING (t1.a < 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 2) OR (t1.a = 1 OR t1.a = 2)
+GROUP BY t1.a
+HAVING (t1.a = 2 AND MAX(t1.c) = 2) OR (MAX(t1.c) > 2 AND (t1.a = 1 OR t1.a = 2));
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : equality pushdown
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1) AND (MAX(t1.c) = 3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (MAX(t1.c) = 3);
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : equalities pushdown
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 3) AND MAX(t1.b = 14);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (MAX(t1.b) = 14);
+eval $no_pushdown explain format=json $query;
+
+--echo # conjunctive subformula : multiple equality consists of
+--echo two equalities pushdown
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 1) AND (t1.c = 1) AND MAX(t1.b = 14);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 1)
+GROUP BY t1.a,t1.c
+HAVING (MAX(t1.b) = 14);
+eval $no_pushdown explain format=json $query;
+
+--echo #
+--echo # Pushdown from HAVING into non-empty WHERE
+--echo #
+
+--echo # inequality : inequality in WHERE
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2) AND (t1.a < 3)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : inequality in WHERE
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2)
+GROUP BY t1.a
+HAVING (t1.a = 3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b > 2) AND (t1.a = 3)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # inequality : equality in WHERE
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a < 3);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14) AND (t1.a < 3)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : equality in WHERE
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,t1.b,MAX(t1.c)
+FROM t1
+WHERE (t1.b = 14) AND (t1.a = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : equality in WHERE, impossible WHERE
+let $query=
+SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.c)
+FROM t1
+WHERE (t1.a = 3) AND (t1.a = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : equality in WHERE (equal through constant)
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c = 1) AND (t1.a = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # inequality : AND formula in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3) AND (t1.a > 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : AND formula in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.c > 0) AND (t1.c < 3) AND (t1.a = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : AND formula in WHERE, impossible WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 0) AND (t1.c < 3) AND (t1.a = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 0) AND (t1.a = 3) AND (t1.a = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.a = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.b = 2) AND (t3.d = 1) AND (t3.a = 1)
+GROUP BY t3.a,t3.b,t3.d;
+eval $no_pushdown explain format=json $query;
+
+--echo # inequality : OR formula in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) OR (t1.c < 3)) AND (t1.a < 2)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a
+HAVING (t1.a = 2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b)
+FROM t1
+WHERE ((t1.a = 1) OR (t1.a = 3)) AND (t1.a = 2)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula : inequality in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.a < 4) AND (t1.a > 0)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula : equality in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.a > 0);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a < 4) AND (t1.a > 0)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # OR formula : inequality in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND ((t1.a < 4) OR (t1.a > 0))
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # OR formula : equality in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.a < 4) OR (t1.a > 0);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND ((t1.a < 4) OR (t1.a > 0))
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula : AND formula in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) AND (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) AND (t1.c < 3)) AND
+ (t1.a < 4)
+GROUP BY t1.a
+HAVING (t1.c > 1);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a = 1) AND (t1.c < 3)) AND
+ ((t1.a < 4) AND (t1.c > 1))
+GROUP BY t1.a,t1.c;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a = 1) AND (t1.c = 3)) AND
+ ((t1.a < 4) AND (t1.c > 1))
+GROUP BY t1.a,t1.c;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b
+HAVING (t3.b = 2) AND (t3.d > 0);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1) AND
+ (t3.b = 2)
+GROUP BY t3.a,t3.b
+HAVING (t3.d > 0);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1)
+GROUP BY t3.a,t3.b,t3.d
+HAVING (t3.b = 2) AND (t3.d > 0);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a = 1) AND (t3.d = 1) AND
+ (t3.b = 2) AND (t3.d > 0)
+GROUP BY t3.a,t3.b,t3.d;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula : OR formula in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a
+HAVING (t1.a < 4) AND (t1.c > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) OR (t1.c < 3)) AND
+ (t1.a < 4)
+GROUP BY t1.a
+HAVING (t1.c > 1);
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a > 1) OR (t1.c < 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a < 4) AND (t1.c > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a > 1) OR (t1.c < 3)) AND
+ (t1.a < 4) AND (t1.c > 1)
+GROUP BY t1.a,t1.c;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) OR (t1.a = 3)
+GROUP BY t1.a,t1.c
+HAVING (t1.a = 4) OR (t1.c > 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE ((t1.a = 1) OR (t1.a = 3)) AND
+ ((t1.a = 4) OR (t1.c > 1))
+GROUP BY t1.a,t1.c;
+eval $no_pushdown explain format=json $query;
+
+--echo # equality : pushdown through equality in WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c) AND (t1.c = 1)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # OR formula : pushdown through equality
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 1) OR (t1.c = 2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c) AND
+ ((t1.c = 1) OR (t1.c = 2))
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # OR formula : pushdown through equality, impossible WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c)
+GROUP BY t1.a
+HAVING (t1.c = 3) OR (t1.c = 2);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.a = t1.c) AND
+ ((t1.c = 3) OR (t1.c = 2))
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula : pushdown through equality, impossible WHERE
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a,t1.c
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3) AND
+ (t1.a > 2) AND (t1.a = t1.c)
+GROUP BY t1.a,t1.c;
+eval $no_pushdown explain format=json $query;
+
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1)
+GROUP BY t1.a
+HAVING (t1.c = 3) AND (t1.a > 2) AND (t1.a = t1.c);
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+WHERE (t1.a = 1) AND (t1.c = 3) AND
+ (t1.a > 2) AND (t1.a = t1.c)
+GROUP BY t1.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula with OR subformula : AND condition in WHERE
+let $query=
+SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t3.a,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2)) AND
+ (t3.a = t3.d) AND ((t3.d = 1) OR (t3.d > 1))
+GROUP BY t3.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # AND formula with OR subformula : AND condition in WHERE
+let $query=
+SELECT t3.a,MAX(t3.b),t3.c,t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2))
+GROUP BY t3.a
+HAVING (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1));
+eval $no_pushdown $query;
+eval $query;
+eval explain $query;
+eval explain format=json $query;
+let $query=
+SELECT t3.a,t3.b,MAX(t3.c),t3.d
+FROM t3
+WHERE (t3.a > 1) AND ((t3.c = 3) OR (t3.c < 2)) AND
+ (t3.a = t3.d) AND (((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1))
+GROUP BY t3.a;
+eval $no_pushdown explain format=json $query;
+
+--echo # prepare statement
+PREPARE stmt1 from "
+SELECT t1.a,MAX(t1.b),t1.c
+FROM t1
+GROUP BY t1.a
+HAVING (t1.a = 1)
+";
+execute stmt1;
+execute stmt1;
+deallocate prepare stmt1;
+
+DROP TABLE t1,t3;
+
+
+--echo #
+--echo # MDEV-19185: pushdown constant function with subquery
+--echo #
+
+CREATE TABLE t1 (pk INT, c1 VARCHAR(64));
+INSERT INTO t1 VALUES (1,'bbb'),(2,'aaa'),(3,'ccc');
+CREATE VIEW v1 AS SELECT * FROM t1;
+
+SELECT pk
+FROM t1
+GROUP BY pk
+HAVING (1 NOT IN (SELECT COUNT(t1.c1) FROM (v1, t1)));
+
+DROP TABLE t1;
+DROP VIEW v1;
+
+
+--echo #
+--echo # MDEV-19186: temporary table defined with view field in HAVING
+--echo #
+
+CREATE TABLE t1 (pk INT, x VARCHAR(10));
+INSERT INTO t1 VALUES (1,'y'),(2,'s'),(3,'aaa');
+CREATE VIEW v1 AS SELECT * FROM t1;
+
+CREATE TABLE t2 (pk INT, x VARCHAR(10));
+INSERT INTO t2 VALUES (1,'aa'),(2,'t'),(3,'bb');
+
+CREATE TABLE tmp1
+SELECT v1.pk
+FROM t2,v1
+WHERE v1.x = t2.x
+GROUP BY v1.pk
+HAVING (v1.pk = 1);
+
+DROP TABLE t1,t2,tmp1;
+DROP VIEW v1;
+
+--echo #
+--echo # MDEV-19164: pushdown of condition with cached items
+--echo #
+
+create table t1 (d1 date);
+insert into t1 values (null),('1971-03-06'),('1993-06-05'),('1998-07-08');
+
+let $q1=
+select d1 from t1
+ group by d1
+ having d1 between (inet_aton('1978-04-27')) and '2018-08-26';
+
+eval $q1;
+eval explain extended $q1;
+eval explain format=json $q1;
+
+delete from t1;
+insert into t1 values ('2018-01-15'),('2018-02-20');
+
+let $q2=
+select d1 from t1
+ group by d1
+ having d1 not between 0 AND exp(0);
+
+eval $q2;
+eval explain extended $q2;
+eval explain format=json $q2;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-19245: Impossible WHERE should be noticed earlier
+--echo # after HAVING pushdown
+--echo #
+
+CREATE TABLE t1 (a INT, b INT, c INT);
+INSERT INTO t1 VALUES (1,2,1),(3,2,2),(5,6,4),(3,4,1);
+
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a > 3 GROUP BY t1.a HAVING t1.a = 3;
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a = 3 GROUP BY t1.a HAVING t1.a > 3;
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a > 3 AND t1.a = 3 GROUP BY t1.a ;
+
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE (t1.a < 2 OR t1.c > 1) GROUP BY t1.a HAVING t1.a = 3;
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a = 3 GROUP BY t1.a HAVING (t1.a < 2 OR t1.a > 3);
+EXPLAIN SELECT t1.a,MAX(t1.b) FROM t1
+WHERE t1.a = 3 AND (t1.a < 2 OR t1.a > 3) GROUP BY t1.a;
+
+EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE (t1.a < 2 AND t1.c > 1) GROUP BY t1.a HAVING t1.a = 3;
+EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE t1.a = 3 GROUP BY t1.a HAVING (t1.a < 2 AND t1.c > 1);
+EXPLAIN SELECT t1.a,MAX(t1.b),t1.c FROM t1
+WHERE t1.a = 3 AND (t1.a < 2 AND t1.b > 3) GROUP BY t1.a;
+
+DROP TABLE t1;
diff --git a/mysql-test/main/help.result b/mysql-test/main/help.result
index 66850e75dc1..130aacf6644 100644
--- a/mysql-test/main/help.result
+++ b/mysql-test/main/help.result
@@ -255,10 +255,10 @@ help 'impossible_category_1';
source_category_name name is_it_category
impossible_category_1 impossible_function_1 N
impossible_category_1 impossible_function_2 N
-alter table mysql.help_relation engine=myisam;
-alter table mysql.help_keyword engine=myisam;
-alter table mysql.help_topic engine=myisam;
-alter table mysql.help_category engine=myisam;
+alter table mysql.help_relation engine=aria;
+alter table mysql.help_keyword engine=aria;
+alter table mysql.help_topic engine=aria;
+alter table mysql.help_category engine=aria;
delete from mysql.help_topic where help_topic_id=@topic1_id;
delete from mysql.help_topic where help_topic_id=@topic2_id;
delete from mysql.help_topic where help_topic_id=@topic3_id;
diff --git a/mysql-test/main/help.test b/mysql-test/main/help.test
index 802f24f80a9..725eec2771f 100644
--- a/mysql-test/main/help.test
+++ b/mysql-test/main/help.test
@@ -98,10 +98,10 @@ help 'impossible_function_1';
help 'impossible_category_1';
##############
-alter table mysql.help_relation engine=myisam;
-alter table mysql.help_keyword engine=myisam;
-alter table mysql.help_topic engine=myisam;
-alter table mysql.help_category engine=myisam;
+alter table mysql.help_relation engine=aria;
+alter table mysql.help_keyword engine=aria;
+alter table mysql.help_topic engine=aria;
+alter table mysql.help_category engine=aria;
delete from mysql.help_topic where help_topic_id=@topic1_id;
delete from mysql.help_topic where help_topic_id=@topic2_id;
diff --git a/mysql-test/main/in_subq_cond_pushdown.result b/mysql-test/main/in_subq_cond_pushdown.result
new file mode 100644
index 00000000000..eef320d2d04
--- /dev/null
+++ b/mysql-test/main/in_subq_cond_pushdown.result
@@ -0,0 +1,3889 @@
+CREATE TABLE t1 (a INT, b INT, c INT, d INT);
+CREATE TABLE t2 (e INT, f INT, g INT);
+CREATE TABLE t3 (x INT, y INT);
+INSERT INTO t1 VALUES
+(1,1,18,1), (2,1,25,1), (1,3,40,1), (2,3,40,4),
+(4,2,24,4), (3,2,23,1), (1,2,40,2), (3,4,17,2),
+(5,5,65,1), (2,3,70,3), (1,4,35,3), (2,3,25,3),
+(2,2,40,4), (1,4,55,1), (5,3,72,4), (1,2,70,5);
+INSERT INTO t2 VALUES
+(1,2,38), (2,3,15), (1,3,40), (1,4,35),
+(2,2,70), (3,4,23), (5,5,12), (5,4,17),
+(3,3,17), (4,2,24), (2,5,25), (5,1,65);
+INSERT INTO t3 VALUES
+(1,25), (1,18), (2,15), (4,24),
+(1,35), (3,23), (3,17), (2,15);
+CREATE VIEW v1 AS
+(
+SELECT t3.x AS v1_x, t3.y AS v1_y FROM t3 WHERE t3.x<=3
+);
+CREATE VIEW v2 AS
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.e
+HAVING max_g>25
+);
+# conjunctive subformula : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.c<25 AND
+(t1.a,t1.c) IN (SELECT t2.e,MAX(t2.g) FROM t2 WHERE t2.e<5 GROUP BY t2.e);
+a b c d
+4 2 24 4
+3 2 23 1
+SELECT * FROM t1
+WHERE t1.c<25 AND
+(t1.a,t1.c) IN (SELECT t2.e,MAX(t2.g) FROM t2 WHERE t2.e<5 GROUP BY t2.e);
+a b c d
+4 2 24 4
+3 2 23 1
+EXPLAIN SELECT * FROM t1
+WHERE t1.c<25 AND
+(t1.a,t1.c) IN (SELECT t2.e,MAX(t2.g) FROM t2 WHERE t2.e<5 GROUP BY t2.e);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.c<25 AND
+(t1.a,t1.c) IN (SELECT t2.e,MAX(t2.g) FROM t2 WHERE t2.e<5 GROUP BY t2.e);
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c < 25 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` < 25",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted AND formula : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.c>55 AND t1.b<4 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE t1.c>55 AND t1.b<4 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE t1.c>55 AND t1.b<4 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.c>55 AND t1.b<4 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c > 55 and t1.b < 4 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` > 55 and t2.f < 4",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted OR formula : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.c>60 OR t1.c<25) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+2 3 70 3
+SELECT * FROM t1
+WHERE (t1.c>60 OR t1.c<25) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.c>60 OR t1.c<25) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.c>60 OR t1.c<25) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.c > 60 or t1.c < 25) and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` > 60 or `MAX(t2.g)` < 25",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted AND-OR formula : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE ((t1.c>60 OR t1.c<25) AND t1.b>2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE ((t1.c>60 OR t1.c<25) AND t1.b>2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE ((t1.c>60 OR t1.c<25) AND t1.b>2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE ((t1.c>60 OR t1.c<25) AND t1.b>2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.c > 60 or t1.c < 25) and t1.b > 2 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "(`MAX(t2.g)` > 60 or `MAX(t2.g)` < 25) and t2.f > 2",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.d>3) AND t1.b>1) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+1 2 40 2
+SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.d>3) AND t1.b>1) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+1 2 40 2
+EXPLAIN SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.d>3) AND t1.b>1) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.d>3) AND t1.b>1) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.a < 2 or t1.d > 3) and t1.b > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "t2.f > 1",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# using view IN subquery defINition : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.c>20 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+a b c d
+3 2 23 1
+SELECT * FROM t1
+WHERE t1.c>20 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+a b c d
+3 2 23 1
+EXPLAIN SELECT * FROM t1
+WHERE t1.c>20 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 8 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.c>20 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c > 20 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["v1_x", "MAX(v1_y)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(v1_y)` > 20",
+ "temporary_table": {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 8,
+ "filtered": 100,
+ "attached_condition": "t3.x > 1 and t3.x <= 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# using equality : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1,v1
+WHERE t1.c>20 AND t1.c=v1_y AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d v1_x v1_y
+3 2 23 1 3 23
+SELECT * FROM t1,v1
+WHERE t1.c>20 AND t1.c=v1_y AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d v1_x v1_y
+3 2 23 1 3 23
+EXPLAIN SELECT * FROM t1,v1
+WHERE t1.c>20 AND t1.c=v1_y AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 8 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t3.y 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1,v1
+WHERE t1.c>20 AND t1.c=v1_y AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 8,
+ "filtered": 100,
+ "attached_condition": "t3.y > 20 and t3.x <= 3 and t3.y is not null"
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "119",
+ "join_type": "BNL",
+ "attached_condition": "t1.c = t3.y and t1.a is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t3.y"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` > 20",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a<2 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+1 2 40 2
+SELECT * FROM t1
+WHERE t1.a<2 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+1 2 40 2
+EXPLAIN SELECT * FROM t1
+WHERE t1.a<2 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a<2 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a < 2 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e < 2"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted AND formula : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a>2 AND t1.a<5 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+3 2 23 1
+SELECT * FROM t1
+WHERE t1.a>2 AND t1.a<5 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+3 2 23 1
+EXPLAIN SELECT * FROM t1
+WHERE t1.a>2 AND t1.a<5 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a>2 AND t1.a<5 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 2 and t1.a < 5 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e > 2 and t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted OR formula : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.a<2 OR t1.a>=4) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+4 2 24 4
+1 2 40 2
+SELECT * FROM t1
+WHERE (t1.a<2 OR t1.a>=4) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+4 2 24 4
+1 2 40 2
+EXPLAIN SELECT * FROM t1
+WHERE (t1.a<2 OR t1.a>=4) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.a<2 OR t1.a>=4) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.a < 2 or t1.a >= 4) and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and (t2.e < 2 or t2.e >= 4)"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted AND-OR formula : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+1 4 35 3
+SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+1 4 35 3
+EXPLAIN SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.a < 2 or t1.a = 5) and t1.b > 3 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and (t2.e < 2 or t2.e = 5) and t2.f > 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# extracted AND-OR formula : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+1 4 35 3
+SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+1 4 35 3
+EXPLAIN SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.a < 2 or t1.a = 5) and t1.b > 3 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and (t2.e < 2 or t2.e = 5) and t2.f > 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.d>2) AND t1.a<2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 2 40 2
+SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.d>2) AND t1.a<2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 2 40 2
+EXPLAIN SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.d>2) AND t1.a<2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.d>2) AND t1.a<2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.b < 3 or t1.d > 2) and t1.a < 2 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e < 2"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# using equalities : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.d=1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+SELECT * FROM t1
+WHERE t1.d=1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+EXPLAIN SELECT * FROM t1
+WHERE t1.d=1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 const,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.d=1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a = 1 and t1.d = 1 and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["const", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e = 1"
+ }
+ }
+ }
+ }
+ }
+}
+# using equality : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.d>1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+SELECT * FROM t1
+WHERE t1.d>1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+EXPLAIN SELECT * FROM t1
+WHERE t1.d>1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.d>1 AND t1.a=t1.d AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.d = t1.a and t1.a > 1 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# using view IN subquery definition : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a<3 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+a b c d
+SELECT * FROM t1
+WHERE t1.a<3 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+a b c d
+EXPLAIN SELECT * FROM t1
+WHERE t1.a<3 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t3 ALL NULL NULL NULL NULL 8 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a<3 AND
+(t1.a,t1.c) IN
+(
+SELECT v1_x,MAX(v1_y)
+FROM v1
+WHERE v1_x>1
+GROUP BY v1_x
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a < 3 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["v1_x", "MAX(v1_y)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 8,
+ "filtered": 100,
+ "attached_condition": "t3.x > 1 and t3.x <= 3 and t3.x < 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# using equality : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1,v1
+WHERE t1.a=v1_x AND v1_x<2 AND v1_y>30 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d v1_x v1_y
+1 3 40 1 1 35
+1 2 40 2 1 35
+SELECT * FROM t1,v1
+WHERE t1.a=v1_x AND v1_x<2 AND v1_y>30 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d v1_x v1_y
+1 3 40 1 1 35
+1 2 40 2 1 35
+EXPLAIN SELECT * FROM t1,v1
+WHERE t1.a=v1_x AND v1_x<2 AND v1_y>30 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 8 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t3.x,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1,v1
+WHERE t1.a=v1_x AND v1_x<2 AND v1_y>30 AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 8,
+ "filtered": 100,
+ "attached_condition": "t3.x < 2 and t3.y > 30 and t3.x <= 3 and t3.x is not null"
+ },
+ "block-nl-join": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100
+ },
+ "buffer_type": "flat",
+ "buffer_size": "119",
+ "join_type": "BNL",
+ "attached_condition": "t1.a = t3.x and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t3.x", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e <= 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE
+# extracted OR formula : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.b=4) AND t1.a<3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 2 40 2
+SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.b=4) AND t1.a<3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 2 40 2
+EXPLAIN SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.b=4) AND t1.a<3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.b=4) AND t1.a<3) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "(t1.b < 3 or t1.b = 4) and t1.a < 3 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "t2.f < 3 or t2.f = 4",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e < 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using addition : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.a+t1.c>41) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE (t1.a+t1.c>41) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.a+t1.c>41) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.a+t1.c>41) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a + t1.c > 41 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "t2.e + `MAX(t2.g)` > 41",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using substitution : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.c-t1.a<35) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+3 2 23 1
+SELECT * FROM t1
+WHERE (t1.c-t1.a<35) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+4 2 24 4
+3 2 23 1
+EXPLAIN SELECT * FROM t1
+WHERE (t1.c-t1.a<35) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.c-t1.a<35) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c - t1.a < 35 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` - t2.e < 35",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using multiplication : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.c*t1.a>100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE (t1.c*t1.a>100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.c*t1.a>100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.c*t1.a>100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c * t1.a > 100 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` * t2.e > 100",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using division : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.c/t1.a>30) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+1 2 40 2
+2 3 70 3
+SELECT * FROM t1
+WHERE (t1.c/t1.a>30) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+1 2 40 2
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.c/t1.a>30) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.c/t1.a>30) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c / t1.a > 30 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` / t2.e > 30",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using BETWEEN : pushing into HAVING
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.c BETWEEN 50 AND 100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE (t1.c BETWEEN 50 AND 100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.c BETWEEN 50 AND 100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.c BETWEEN 50 AND 100) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c between 50 and 100 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "`MAX(t2.g)` between 50 and 100",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using addition : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.a+t1.b > 5) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+4 2 24 4
+SELECT * FROM t1
+WHERE (t1.a+t1.b > 5) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+4 2 24 4
+EXPLAIN SELECT * FROM t1
+WHERE (t1.a+t1.b > 5) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.a+t1.b > 5) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a + t1.b > 5 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e + t2.f > 5"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using substitution : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.a-t1.b > 0) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+4 2 24 4
+SELECT * FROM t1
+WHERE (t1.a-t1.b > 0) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+4 2 24 4
+EXPLAIN SELECT * FROM t1
+WHERE (t1.a-t1.b > 0) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.a-t1.b > 0) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a - t1.b > 0 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e - t2.f > 0"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using multiplication : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.a*t1.b > 6) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+4 2 24 4
+SELECT * FROM t1
+WHERE (t1.a*t1.b > 6) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+4 2 24 4
+EXPLAIN SELECT * FROM t1
+WHERE (t1.a*t1.b > 6) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.a*t1.b > 6) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a * t1.b > 6 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e * t2.f > 6"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using division : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.b/t1.a > 2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+1 3 40 1
+1 4 35 3
+SELECT * FROM t1
+WHERE (t1.b/t1.a > 2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+a b c d
+1 3 40 1
+1 4 35 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.b/t1.a > 2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.b/t1.a > 2) AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e,t2.f
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.b / t1.a > 2 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.f / t2.e > 2"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula using BETWEEN : pushing into WHERE
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.a BETWEEN 1 AND 3) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+3 2 23 1
+1 2 40 2
+2 3 70 3
+SELECT * FROM t1
+WHERE (t1.a BETWEEN 1 AND 3) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+a b c d
+1 3 40 1
+3 2 23 1
+1 2 40 2
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE (t1.a BETWEEN 1 AND 3) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.a,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.a BETWEEN 1 AND 3) AND
+(t1.a,t1.c) IN
+(
+SELECT t2.e,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a between 1 and 3 and t1.a is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["e", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e between 1 and 3"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into HAVING of the IN subquery
+# conjunctive subformula : pushing into WHERE of the view from the IN subquery
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.c>3 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+a b c d
+1 2 40 2
+2 3 70 3
+SELECT * FROM t1
+WHERE t1.c>3 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+a b c d
+1 2 40 2
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE t1.c>3 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using temporary
+3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.c>3 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.c > 3 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "MAX(v2.f)", "max_g"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "v2.max_g > 3",
+ "temporary_table": {
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "v2.e < 5",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "max_g > 25",
+ "filesort": {
+ "sort_key": "t2.e",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE of the IN subquery
+# conjunctive subformula : pushing into WHERE of the view
+# from the IN subquery
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using temporary
+3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "MAX(v2.f)", "max_g"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "v2.e < 5 and v2.e > 1",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "max_g > 25",
+ "filesort": {
+ "sort_key": "t2.e",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE and HAVING
+# of the IN subquery
+# conjunctive subformula : pushing into WHERE of the view
+# from the IN subquery
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a>1 AND t1.c<100 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+a b c d
+2 3 70 3
+SELECT * FROM t1
+WHERE t1.a>1 AND t1.c<100 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+a b c d
+2 3 70 3
+EXPLAIN SELECT * FROM t1
+WHERE t1.a>1 AND t1.c<100 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using temporary
+3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary; Using filesort
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a>1 AND t1.c<100 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT v2.e,MAX(v2.f),v2.max_g
+FROM v2
+WHERE v2.e<5
+GROUP BY v2.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.c < 100 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "MAX(v2.f)", "max_g"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "v2.max_g < 100",
+ "temporary_table": {
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "v2.e < 5 and v2.e > 1",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "max_g > 25",
+ "filesort": {
+ "sort_key": "t2.e",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.e > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE of the IN subquery
+# extracted AND formula : pushing into HAVING of the derived table
+# from the IN subquery
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+a b c d
+2 3 40 4
+SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+a b c d
+2 3 40 4
+EXPLAIN SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using temporary
+3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "MAX(d_tab.f)", "max_g"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "d_tab.e < 5 and d_tab.e > 1",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1",
+ "filesort": {
+ "sort_key": "t2.f",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into HAVING of the derived table
+# conjunctive subformula : pushing into WHERE of the IN subquery from
+# the derived table
+SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.e<5
+GROUP BY t2.e
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.b>2;
+x y a b max_c
+2 15 2 3 70
+2 15 2 3 70
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+a b c d
+2 3 40 4
+SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+a b c d
+2 3 40 4
+EXPLAIN SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using temporary
+3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "MAX(d_tab.f)", "max_g"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "d_tab.e < 5 and d_tab.e > 1",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1",
+ "filesort": {
+ "sort_key": "t2.f",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE of the derived table
+# extracted AND formula : pushing into WHERE of the IN subquery from
+# the derived table
+SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5;
+x y a b max_c
+2 15 2 3 70
+4 24 4 2 24
+2 15 2 3 70
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+a b c d
+2 3 40 4
+SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+a b c d
+2 3 40 4
+EXPLAIN SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+2 MATERIALIZED <derived3> ALL NULL NULL NULL NULL 12 Using where; Using temporary
+3 DERIVED t2 ALL NULL NULL NULL NULL 12 Using temporary; Using filesort
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+FROM
+(
+SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+FROM t2
+GROUP BY t2.f
+HAVING max_g>25
+) as d_tab
+WHERE d_tab.e<5
+GROUP BY d_tab.e
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "MAX(d_tab.f)", "max_g"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "temporary_table": {
+ "table": {
+ "table_name": "<derived3>",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "d_tab.e < 5 and d_tab.e > 1",
+ "materialized": {
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "max_g > 25 and t2.e < 5 and t2.e > 1",
+ "filesort": {
+ "sort_key": "t2.f",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE and HAVING
+# of the derived table
+# extracted AND formula : pushing into WHERE of the IN subquery
+# from the derived table
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+x y a b max_c
+4 24 4 2 24
+SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+x y a b max_c
+4 24 4 2 24
+EXPLAIN SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 8 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.x 2 Using where
+2 DERIVED t1 ALL NULL NULL NULL NULL 16 Using where; Using temporary; Using filesort
+2 DERIVED <subquery3> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 8,
+ "filtered": 100,
+ "attached_condition": "t3.x < 5 and t3.x is not null"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t3.x"],
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "d_tab.max_c < 70",
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "max_c < 70",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a < 5 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "t2.f < 5",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e > 1 and t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE of the derived table
+# conjunctive subformula : pushing into HAVING of the IN subquery from
+# the derived table
+SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE (t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+WHERE t2.f<4
+GROUP BY t2.f
+)
+GROUP BY t1.a
+HAVING t1.b<5
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5;
+x y a b max_c
+1 25 1 2 70
+1 18 1 2 70
+2 15 2 3 40
+1 35 1 2 70
+2 15 2 3 40
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+x y a b max_c
+4 24 4 2 24
+SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+x y a b max_c
+4 24 4 2 24
+EXPLAIN SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 8 Using where
+1 PRIMARY <derived2> ref key0 key0 5 test.t3.x 2 Using where
+2 DERIVED t1 ALL NULL NULL NULL NULL 16 Using where; Using temporary; Using filesort
+2 DERIVED <subquery3> eq_ref distinct_key distinct_key 12 test.t1.a,test.t1.b,test.t1.c 1
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT *
+FROM t3,
+(
+SELECT t1.a,t1.b,max(t1.c) as max_c
+FROM t1
+WHERE t1.a>1 AND
+(t1.a,t1.b,t1.c) IN
+(
+SELECT t2.e,t2.f,MAX(t2.g)
+FROM t2
+GROUP BY t2.e
+HAVING t2.f<5
+)
+GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 8,
+ "filtered": 100,
+ "attached_condition": "t3.x < 5 and t3.x is not null"
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "5",
+ "used_key_parts": ["a"],
+ "ref": ["test.t3.x"],
+ "rows": 2,
+ "filtered": 100,
+ "attached_condition": "d_tab.max_c < 70",
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "having_condition": "max_c < 70",
+ "filesort": {
+ "sort_key": "t1.a",
+ "temporary_table": {
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.a > 1 and t1.a < 5 and t1.a is not null and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery3>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["e", "f", "MAX(t2.g)"],
+ "ref": ["test.t1.a", "test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 3,
+ "having_condition": "t2.f < 5",
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e > 1 and t2.e < 5"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE
+# using WINDOW FUNCTIONS : using MAX function
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, MAX(t2.g) OVER (PARTITION BY t2.f)
+FROM t2
+WHERE t2.e<5
+)
+;
+a b c d
+1 3 40 1
+2 3 40 4
+1 4 35 3
+1 2 70 5
+SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, MAX(t2.g) OVER (PARTITION BY t2.f)
+FROM t2
+WHERE t2.e<5
+)
+;
+a b c d
+1 3 40 1
+2 3 40 4
+1 4 35 3
+1 2 70 5
+EXPLAIN SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, MAX(t2.g) OVER (PARTITION BY t2.f)
+FROM t2
+WHERE t2.e<5
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 test.t1.b,test.t1.c 1
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, MAX(t2.g) OVER (PARTITION BY t2.f)
+FROM t2
+WHERE t2.e<5
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.b > 1 and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "8",
+ "used_key_parts": ["f", "MAX(t2.g) OVER (PARTITION BY t2.f)"],
+ "ref": ["test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "window_functions_computation": {
+ "sorts": {
+ "filesort": {
+ "sort_key": "t2.f"
+ }
+ },
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.f > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+# conjunctive subformula : pushing into WHERE
+# using WINDOW FUNCTIONS : using SUM function
+SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)
+FROM t2
+WHERE t2.e<5
+)
+;
+a b c d
+5 3 72 4
+SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)
+FROM t2
+WHERE t2.e<5
+)
+;
+a b c d
+5 3 72 4
+EXPLAIN SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)
+FROM t2
+WHERE t2.e<5
+)
+;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 16 Using where
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 12 test.t1.b,test.t1.c 1 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 12 Using where; Using temporary
+EXPLAIN FORMAT=JSON SELECT * FROM t1
+WHERE (t1.b>1) AND
+(t1.b, t1.c) IN
+(
+SELECT t2.f, CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)
+FROM t2
+WHERE t2.e<5
+)
+;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "ALL",
+ "rows": 16,
+ "filtered": 100,
+ "attached_condition": "t1.b > 1 and t1.b is not null and t1.c is not null"
+ },
+ "table": {
+ "table_name": "<subquery2>",
+ "access_type": "eq_ref",
+ "possible_keys": ["distinct_key"],
+ "key": "distinct_key",
+ "key_length": "12",
+ "used_key_parts": ["f", "CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)"],
+ "ref": ["test.t1.b", "test.t1.c"],
+ "rows": 1,
+ "filtered": 100,
+ "attached_condition": "t1.c = `<subquery2>`.`CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)`",
+ "materialized": {
+ "unique": 1,
+ "query_block": {
+ "select_id": 2,
+ "window_functions_computation": {
+ "sorts": {
+ "filesort": {
+ "sort_key": "t2.f"
+ }
+ },
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 12,
+ "filtered": 100,
+ "attached_condition": "t2.e < 5 and t2.f > 1"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+DROP TABLE t1,t2,t3;
+DROP VIEW v1,v2;
+#
+# MDEV-16721: IN-subquery defined with the AUTO-INCREMENT column
+# and used with the ZEROFILL column
+#
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY);
+CREATE TABLE t2 (b INT ZEROFILL);
+INSERT INTO t2 VALUES (2), (3);
+SELECT *
+FROM t2
+WHERE t2.b IN (SELECT MIN(t1.a) from t1);
+b
+DROP TABLE t1, t2;
+#
+# MDEV-16730: server fault caused by pushdown into the derived table
+# condition that joins IN subquery and parent select
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1), (2), (3);
+SELECT *
+FROM (SELECT DISTINCT * FROM t1) AS tbl
+WHERE tbl.a IN
+(
+SELECT COUNT(t1.a)
+FROM t1
+WHERE (t1.a!=1)
+);
+a
+2
+DROP TABLE t1;
+#
+# MDEV-16727: failure assertion caused by the lamely saved list
+# of multiple equalities
+#
+CREATE TABLE t1 (a varchar(1));
+INSERT INTO t1 VALUES ('x'), ('y'), ('z');
+CREATE TABLE t2 (b varchar(1));
+INSERT INTO t2 VALUES ('x');
+CREATE TABLE t3 (c varchar(1));
+INSERT INTO t3 VALUES ('y');
+CREATE TABLE t4 (d varchar(1));
+INSERT INTO t4 VALUES ('x'), ('z');
+SELECT * FROM t1
+JOIN t2 ON (t1.a=t2.b)
+LEFT JOIN t3 ON (t1.a=t3.c)
+WHERE (t1.a) IN
+(
+SELECT t4.d
+FROM t4
+ORDER BY t4.d
+);
+a b c
+x x NULL
+DROP TABLE t1,t2,t3,t4;
+#
+# MDEV-17360: IN subquery predicate with outer reference in the left part
+# that refers to a field of a mergeable derived table
+#
+CREATE TABLE t1 (id1 int) ENGINE=MYISAM;
+INSERT INTO t1 VALUES (1814),(0),(NULL),(1);
+CREATE TABLE t2 (id2 int) ENGINE=MYISAM;
+SELECT 1 AS r FROM t2,t1,(SELECT * FROM t1) dt1
+WHERE NOT EXISTS (SELECT id2 FROM t2
+WHERE dt1.id1 IN (SELECT t2.id2 FROM t2
+HAVING t2.id2 >= 1));
+r
+DROP TABLE t1,t2;
+#
+# MDEV-17027: IN subquery predicate with outer reference in the left part
+# conjuncted with equality predicate
+#
+CREATE TABLE t1 (pk int, i1 int, v1 varchar(1));
+INSERT INTO t1 VALUES (3,2,'x'), (1,1,'y'), (4,2,'z');
+CREATE TABLE t2 (pk int, i1 int, v1 varchar(1));
+INSERT INTO t2 VALUES (5,2,'x'), (7,1,'x');
+CREATE TABLE t3 (pk int, i1 int, v1 varchar(1));
+INSERT INTO t3 VALUES (8,2,'x'), (7,1,'z');
+SELECT t3.i1 FROM t3
+WHERE EXISTS ( SELECT t2.v1 FROM t1,t2
+WHERE t1.v1 = t2.v1 AND
+t3.i1 IN (SELECT t.i1 FROM t1 as t
+GROUP BY i1 HAVING t.i1 < 3));
+i1
+2
+1
+DROP TABLE t1,t2,t3;
diff --git a/mysql-test/main/in_subq_cond_pushdown.test b/mysql-test/main/in_subq_cond_pushdown.test
new file mode 100644
index 00000000000..7763201cda1
--- /dev/null
+++ b/mysql-test/main/in_subq_cond_pushdown.test
@@ -0,0 +1,862 @@
+LET $no_pushdown=
+ SET STATEMENT optimizer_switch='condition_pushdown_for_subquery=off' FOR;
+
+CREATE TABLE t1 (a INT, b INT, c INT, d INT);
+CREATE TABLE t2 (e INT, f INT, g INT);
+CREATE TABLE t3 (x INT, y INT);
+
+INSERT INTO t1 VALUES
+(1,1,18,1), (2,1,25,1), (1,3,40,1), (2,3,40,4),
+(4,2,24,4), (3,2,23,1), (1,2,40,2), (3,4,17,2),
+(5,5,65,1), (2,3,70,3), (1,4,35,3), (2,3,25,3),
+(2,2,40,4), (1,4,55,1), (5,3,72,4), (1,2,70,5);
+
+INSERT INTO t2 VALUES
+(1,2,38), (2,3,15), (1,3,40), (1,4,35),
+(2,2,70), (3,4,23), (5,5,12), (5,4,17),
+(3,3,17), (4,2,24), (2,5,25), (5,1,65);
+
+INSERT INTO t3 VALUES
+(1,25), (1,18), (2,15), (4,24),
+(1,35), (3,23), (3,17), (2,15);
+
+CREATE VIEW v1 AS
+(
+ SELECT t3.x AS v1_x, t3.y AS v1_y FROM t3 WHERE t3.x<=3
+);
+
+CREATE VIEW v2 AS
+(
+ SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+ FROM t2
+ GROUP BY t2.e
+ HAVING max_g>25
+);
+
+--echo # conjunctive subformula : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE t1.c<25 AND
+ (t1.a,t1.c) IN (SELECT t2.e,MAX(t2.g) FROM t2 WHERE t2.e<5 GROUP BY t2.e);
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted AND formula : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE t1.c>55 AND t1.b<4 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted OR formula : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE (t1.c>60 OR t1.c<25) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted AND-OR formula : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE ((t1.c>60 OR t1.c<25) AND t1.b>2) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.d>3) AND t1.b>1) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # using view IN subquery defINition : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE t1.c>20 AND
+ (t1.a,t1.c) IN
+ (
+ SELECT v1_x,MAX(v1_y)
+ FROM v1
+ WHERE v1_x>1
+ GROUP BY v1_x
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # using equality : pushing into WHERE
+LET $query=
+SELECT * FROM t1,v1
+WHERE t1.c>20 AND t1.c=v1_y AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE t1.a<2 AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted AND formula : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE t1.a>2 AND t1.a<5 AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted OR formula : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE (t1.a<2 OR t1.a>=4) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted AND-OR formula : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e,t2.f
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # extracted AND-OR formula : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE ((t1.a<2 OR t1.a=5) AND t1.b>3) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e,t2.f
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.d>2) AND t1.a<2) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # using equalities : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE t1.d=1 AND t1.a=t1.d AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # using equality : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE t1.d>1 AND t1.a=t1.d AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # using view IN subquery definition : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE t1.a<3 AND
+ (t1.a,t1.c) IN
+ (
+ SELECT v1_x,MAX(v1_y)
+ FROM v1
+ WHERE v1_x>1
+ GROUP BY v1_x
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # using equality : pushing into WHERE
+LET $query=
+SELECT * FROM t1,v1
+WHERE t1.a=v1_x AND v1_x<2 AND v1_y>30 AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE
+--echo # extracted OR formula : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE ((t1.b<3 OR t1.b=4) AND t1.a<3) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using addition : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE (t1.a+t1.c>41) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using substitution : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE (t1.c-t1.a<35) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using multiplication : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE (t1.c*t1.a>100) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using division : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE (t1.c/t1.a>30) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using BETWEEN : pushing into HAVING
+LET $query=
+SELECT * FROM t1
+WHERE (t1.c BETWEEN 50 AND 100) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using addition : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE (t1.a+t1.b > 5) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e,t2.f
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using substitution : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE (t1.a-t1.b > 0) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e,t2.f
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using multiplication : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE (t1.a*t1.b > 6) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e,t2.f
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using division : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE (t1.b/t1.a > 2) AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e,t2.f
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula using BETWEEN : pushing into WHERE
+LET $query=
+SELECT * FROM t1
+WHERE (t1.a BETWEEN 1 AND 3) AND
+ (t1.a,t1.c) IN
+ (
+ SELECT t2.e,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into HAVING of the IN subquery
+--echo # conjunctive subformula : pushing into WHERE of the view from the IN subquery
+LET $query=
+SELECT * FROM t1
+WHERE t1.c>3 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT v2.e,MAX(v2.f),v2.max_g
+ FROM v2
+ WHERE v2.e<5
+ GROUP BY v2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE of the IN subquery
+--echo # conjunctive subformula : pushing into WHERE of the view
+--echo # from the IN subquery
+LET $query=
+SELECT * FROM t1
+WHERE t1.a>1 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT v2.e,MAX(v2.f),v2.max_g
+ FROM v2
+ WHERE v2.e<5
+ GROUP BY v2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE and HAVING
+--echo # of the IN subquery
+--echo # conjunctive subformula : pushing into WHERE of the view
+--echo # from the IN subquery
+LET $query=
+SELECT * FROM t1
+WHERE t1.a>1 AND t1.c<100 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT v2.e,MAX(v2.f),v2.max_g
+ FROM v2
+ WHERE v2.e<5
+ GROUP BY v2.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE of the IN subquery
+--echo # extracted AND formula : pushing into HAVING of the derived table
+--echo # from the IN subquery
+LET $query=
+SELECT * FROM t1
+WHERE t1.a>1 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT d_tab.e,MAX(d_tab.f),d_tab.max_g
+ FROM
+ (
+ SELECT t2.e, t2.f, MAX(t2.g) AS max_g
+ FROM t2
+ GROUP BY t2.f
+ HAVING max_g>25
+ ) as d_tab
+ WHERE d_tab.e<5
+ GROUP BY d_tab.e
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into HAVING of the derived table
+--echo # conjunctive subformula : pushing into WHERE of the IN subquery from
+--echo # the derived table
+SELECT *
+FROM t3,
+(
+ SELECT t1.a,t1.b,max(t1.c) as max_c
+ FROM t1
+ WHERE t1.a>1 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.e<5
+ GROUP BY t2.e
+ )
+ GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.b>2;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE of the derived table
+--echo # extracted AND formula : pushing into WHERE of the IN subquery from
+--echo # the derived table
+SELECT *
+FROM t3,
+(
+ SELECT t1.a,t1.b,max(t1.c) as max_c
+ FROM t1
+ WHERE t1.a>1 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ GROUP BY t2.e
+ HAVING t2.f<5
+ )
+ GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE and HAVING
+--echo # of the derived table
+--echo # extracted AND formula : pushing into WHERE of the IN subquery
+--echo # from the derived table
+LET $query=
+SELECT *
+FROM t3,
+(
+ SELECT t1.a,t1.b,max(t1.c) as max_c
+ FROM t1
+ WHERE t1.a>1 AND
+ (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ GROUP BY t2.e
+ HAVING t2.f<5
+ )
+ GROUP BY t1.a
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5 AND d_tab.max_c<70;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE of the derived table
+--echo # conjunctive subformula : pushing into HAVING of the IN subquery from
+--echo # the derived table
+SELECT *
+FROM t3,
+(
+ SELECT t1.a,t1.b,max(t1.c) as max_c
+ FROM t1
+ WHERE (t1.a,t1.b,t1.c) IN
+ (
+ SELECT t2.e,t2.f,MAX(t2.g)
+ FROM t2
+ WHERE t2.f<4
+ GROUP BY t2.f
+ )
+ GROUP BY t1.a
+ HAVING t1.b<5
+) AS d_tab
+WHERE d_tab.a=t3.x AND d_tab.a<5;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE
+--echo # using WINDOW FUNCTIONS : using MAX function
+LET $query=
+SELECT * FROM t1
+WHERE (t1.b>1) AND
+ (t1.b, t1.c) IN
+ (
+ SELECT t2.f, MAX(t2.g) OVER (PARTITION BY t2.f)
+ FROM t2
+ WHERE t2.e<5
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+--echo # conjunctive subformula : pushing into WHERE
+--echo # using WINDOW FUNCTIONS : using SUM function
+LET $query=
+SELECT * FROM t1
+WHERE (t1.b>1) AND
+ (t1.b, t1.c) IN
+ (
+ SELECT t2.f, CAST(SUM(t2.g) OVER (PARTITION BY t2.f) AS INT)
+ FROM t2
+ WHERE t2.e<5
+ )
+;
+
+EVAL $no_pushdown $query;
+EVAL $query;
+EVAL EXPLAIN $query;
+EVAL EXPLAIN FORMAT=JSON $query;
+
+DROP TABLE t1,t2,t3;
+DROP VIEW v1,v2;
+
+--echo #
+--echo # MDEV-16721: IN-subquery defined with the AUTO-INCREMENT column
+--echo # and used with the ZEROFILL column
+--echo #
+
+CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY);
+CREATE TABLE t2 (b INT ZEROFILL);
+
+INSERT INTO t2 VALUES (2), (3);
+
+SELECT *
+FROM t2
+WHERE t2.b IN (SELECT MIN(t1.a) from t1);
+
+DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-16730: server fault caused by pushdown into the derived table
+--echo # condition that joins IN subquery and parent select
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1), (2), (3);
+
+SELECT *
+FROM (SELECT DISTINCT * FROM t1) AS tbl
+WHERE tbl.a IN
+(
+ SELECT COUNT(t1.a)
+ FROM t1
+ WHERE (t1.a!=1)
+);
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-16727: failure assertion caused by the lamely saved list
+--echo # of multiple equalities
+--echo #
+
+CREATE TABLE t1 (a varchar(1));
+INSERT INTO t1 VALUES ('x'), ('y'), ('z');
+
+CREATE TABLE t2 (b varchar(1));
+INSERT INTO t2 VALUES ('x');
+
+CREATE TABLE t3 (c varchar(1));
+INSERT INTO t3 VALUES ('y');
+
+CREATE TABLE t4 (d varchar(1));
+INSERT INTO t4 VALUES ('x'), ('z');
+
+SELECT * FROM t1
+JOIN t2 ON (t1.a=t2.b)
+LEFT JOIN t3 ON (t1.a=t3.c)
+WHERE (t1.a) IN
+(
+ SELECT t4.d
+ FROM t4
+ ORDER BY t4.d
+);
+
+DROP TABLE t1,t2,t3,t4;
+
+--echo #
+--echo # MDEV-17360: IN subquery predicate with outer reference in the left part
+--echo # that refers to a field of a mergeable derived table
+--echo #
+
+CREATE TABLE t1 (id1 int) ENGINE=MYISAM;
+INSERT INTO t1 VALUES (1814),(0),(NULL),(1);
+
+CREATE TABLE t2 (id2 int) ENGINE=MYISAM;
+
+SELECT 1 AS r FROM t2,t1,(SELECT * FROM t1) dt1
+ WHERE NOT EXISTS (SELECT id2 FROM t2
+ WHERE dt1.id1 IN (SELECT t2.id2 FROM t2
+ HAVING t2.id2 >= 1));
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-17027: IN subquery predicate with outer reference in the left part
+--echo # conjuncted with equality predicate
+--echo #
+
+CREATE TABLE t1 (pk int, i1 int, v1 varchar(1));
+INSERT INTO t1 VALUES (3,2,'x'), (1,1,'y'), (4,2,'z');
+
+CREATE TABLE t2 (pk int, i1 int, v1 varchar(1));
+INSERT INTO t2 VALUES (5,2,'x'), (7,1,'x');
+
+CREATE TABLE t3 (pk int, i1 int, v1 varchar(1));
+INSERT INTO t3 VALUES (8,2,'x'), (7,1,'z');
+
+SELECT t3.i1 FROM t3
+ WHERE EXISTS ( SELECT t2.v1 FROM t1,t2
+ WHERE t1.v1 = t2.v1 AND
+ t3.i1 IN (SELECT t.i1 FROM t1 as t
+ GROUP BY i1 HAVING t.i1 < 3));
+
+DROP TABLE t1,t2,t3;
diff --git a/mysql-test/main/index_intersect.result b/mysql-test/main/index_intersect.result
index 7a0633d4dc8..bb2478c8c46 100644
--- a/mysql-test/main/index_intersect.result
+++ b/mysql-test/main/index_intersect.result
@@ -38,6 +38,7 @@ SELECT COUNT(*) FROM CountryLanguage;
COUNT(*)
984
CREATE INDEX Name ON City(Name);
+SET SESSION optimizer_switch='rowid_filter=off';
SET SESSION optimizer_switch='index_merge_sort_intersection=on';
SELECT COUNT(*) FROM City;
COUNT(*)
@@ -357,6 +358,9 @@ COUNT(*)
SELECT COUNT(*) FROM City WHERE Country LIKE 'B%';
COUNT(*)
339
+SELECT COUNT(*) FROM City WHERE Country LIKE 'J%';
+COUNT(*)
+256
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'M' AND 'N' AND Population > 1000000 AND Country LIKE 'C%';
@@ -364,7 +368,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Country,Name Name,Population 35,4 NULL # Using sort_intersect(Name,Population); Using where
EXPLAIN
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Country,Name Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
EXPLAIN
@@ -383,15 +387,13 @@ ID Name Country Population
1810 Montréal CAN 1016376
2259 Medellín COL 1861265
SELECT * FROM City USE INDEX ()
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
ID Name Country Population
-217 Guarulhos BRA 1095874
-218 Goiânia BRA 1056330
+1541 Hiroshima JPN 1119117
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
ID Name Country Population
-217 Guarulhos BRA 1095874
-218 Goiânia BRA 1056330
+1541 Hiroshima JPN 1119117
SELECT * FROM City USE INDEX ()
WHERE Name BETWEEN 'G' AND 'K' AND Population > 500000 AND Country LIKE 'C%';
ID Name Country Population
@@ -465,9 +467,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City range PRIMARY,Population,Country Population 4 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Country,Population 3,4 NULL # Using sort_intersect(Country,Population); Using where
+1 SIMPLE City range PRIMARY,Population,Country Population 4 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
@@ -494,33 +496,11 @@ WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
554 Santiago de Chile CHL 4703954
SELECT * FROM City USE INDEX ()
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
-1 Kabul AFG 1780000
-126 Yerevan ARM 1248700
-130 Sydney AUS 3276207
-131 Melbourne AUS 2865329
-132 Brisbane AUS 1291117
-133 Perth AUS 1096829
-144 Baku AZE 1787800
-56 Luanda AGO 2022000
-69 Buenos Aires ARG 2982146
-70 La Matanza ARG 1266461
-71 Córdoba ARG 1157507
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
-1 Kabul AFG 1780000
-126 Yerevan ARM 1248700
-130 Sydney AUS 3276207
-131 Melbourne AUS 2865329
-132 Brisbane AUS 1291117
-133 Perth AUS 1096829
-144 Baku AZE 1787800
-56 Luanda AGO 2022000
-69 Buenos Aires ARG 2982146
-70 La Matanza ARG 1266461
-71 Córdoba ARG 1157507
SELECT * FROM City USE INDEX ()
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
ID Name Country Population
@@ -726,7 +706,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
EXPLAIN
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Country,Name Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
EXPLAIN
@@ -736,9 +716,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City range Population,Country,Name Name 35 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country Country,Population 3,4 NULL # Using sort_intersect(Country,Population); Using where
+1 SIMPLE City range PRIMARY,Population,Country Population 4 NULL # Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
@@ -778,10 +758,9 @@ ID Name Country Population
766 Manila PHL 1581082
942 Medan IDN 1843919
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 700000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 700000 AND Country LIKE 'J%';
ID Name Country Population
-217 Guarulhos BRA 1095874
-218 Goiânia BRA 1056330
+1541 Hiroshima JPN 1119117
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'J' AND Population > 500000 AND Country LIKE 'C%';
ID Name Country Population
@@ -794,19 +773,8 @@ ID Name Country Population
1937 Huainan CHN 700000
1950 Hegang CHN 520000
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
-1 Kabul AFG 1780000
-56 Luanda AGO 2022000
-69 Buenos Aires ARG 2982146
-70 La Matanza ARG 1266461
-71 Córdoba ARG 1157507
-126 Yerevan ARM 1248700
-130 Sydney AUS 3276207
-131 Melbourne AUS 2865329
-132 Brisbane AUS 1291117
-133 Perth AUS 1096829
-144 Baku AZE 1787800
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
AND Country BETWEEN 'S' AND 'Z';
@@ -888,12 +856,12 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,CountryID,CountryName Population,CountryID 4,3 NULL # Using sort_intersect(Population,CountryID); Using where
EXPLAIN
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000;
+WHERE Country='USA' AND Population > 1000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,CountryID,CountryName Population,CountryID 4,3 NULL # Using sort_intersect(Population,CountryID); Using where
EXPLAIN
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Name,CountryID,CountryName CountryName,Population 38,4 NULL # Using sort_intersect(CountryName,Population); Using where
SELECT * FROM City USE INDEX ()
@@ -931,73 +899,37 @@ ID Name Country Population
2698 Maputo MOZ 1018938
2710 Rangoon (Yangon) MMR 3361700
SELECT * FROM City USE INDEX ()
-WHERE Country='CHN' AND Population > 1500000;
+WHERE Country='USA' AND Population > 1000000;
ID Name Country Population
-1890 Shanghai CHN 9696300
-1891 Peking CHN 7472000
-1892 Chongqing CHN 6351600
-1893 Tianjin CHN 5286800
-1894 Wuhan CHN 4344600
-1895 Harbin CHN 4289800
-1896 Shenyang CHN 4265200
-1897 Kanton [Guangzhou] CHN 4256300
-1898 Chengdu CHN 3361500
-1899 Nanking [Nanjing] CHN 2870300
-1900 Changchun CHN 2812000
-1901 Xi´an CHN 2761400
-1902 Dalian CHN 2697000
-1903 Qingdao CHN 2596000
-1904 Jinan CHN 2278100
-1905 Hangzhou CHN 2190500
-1906 Zhengzhou CHN 2107200
-1907 Shijiazhuang CHN 2041500
-1908 Taiyuan CHN 1968400
-1909 Kunming CHN 1829500
-1910 Changsha CHN 1809800
-1911 Nanchang CHN 1691600
-1912 Fuzhou CHN 1593800
-1913 Lanzhou CHN 1565800
+3793 New York USA 8008278
+3794 Los Angeles USA 3694820
+3795 Chicago USA 2896016
+3796 Houston USA 1953631
+3797 Philadelphia USA 1517550
+3798 Phoenix USA 1321045
+3799 San Diego USA 1223400
+3800 Dallas USA 1188580
+3801 San Antonio USA 1144646
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000;
+WHERE Country='USA' AND Population > 1000000;
ID Name Country Population
-1890 Shanghai CHN 9696300
-1891 Peking CHN 7472000
-1892 Chongqing CHN 6351600
-1893 Tianjin CHN 5286800
-1894 Wuhan CHN 4344600
-1895 Harbin CHN 4289800
-1896 Shenyang CHN 4265200
-1897 Kanton [Guangzhou] CHN 4256300
-1898 Chengdu CHN 3361500
-1899 Nanking [Nanjing] CHN 2870300
-1900 Changchun CHN 2812000
-1901 Xi´an CHN 2761400
-1902 Dalian CHN 2697000
-1903 Qingdao CHN 2596000
-1904 Jinan CHN 2278100
-1905 Hangzhou CHN 2190500
-1906 Zhengzhou CHN 2107200
-1907 Shijiazhuang CHN 2041500
-1908 Taiyuan CHN 1968400
-1909 Kunming CHN 1829500
-1910 Changsha CHN 1809800
-1911 Nanchang CHN 1691600
-1912 Fuzhou CHN 1593800
-1913 Lanzhou CHN 1565800
+3793 New York USA 8008278
+3794 Los Angeles USA 3694820
+3795 Chicago USA 2896016
+3796 Houston USA 1953631
+3797 Philadelphia USA 1517550
+3798 Phoenix USA 1321045
+3799 San Diego USA 1223400
+3800 Dallas USA 1188580
+3801 San Antonio USA 1144646
SELECT * FROM City USE INDEX ()
-WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
ID Name Country Population
-1892 Chongqing CHN 6351600
-1898 Chengdu CHN 3361500
-1900 Changchun CHN 2812000
-1910 Changsha CHN 1809800
+3795 Chicago USA 2896016
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
ID Name Country Population
-1892 Chongqing CHN 6351600
-1898 Chengdu CHN 3361500
-1900 Changchun CHN 2812000
-1910 Changsha CHN 1809800
+3795 Chicago USA 2896016
EXPLAIN
SELECT * FROM City, Country
WHERE City.Name LIKE 'C%' AND City.Population > 1000000 AND
@@ -1041,3 +973,4 @@ f1 f4 f5
998 a 0
DROP TABLE t1;
SET SESSION optimizer_switch='index_merge_sort_intersection=on';
+SET SESSION optimizer_switch='rowid_filter=default';
diff --git a/mysql-test/main/index_intersect.test b/mysql-test/main/index_intersect.test
index 1be963cb9e5..c77eccc6335 100644
--- a/mysql-test/main/index_intersect.test
+++ b/mysql-test/main/index_intersect.test
@@ -33,6 +33,7 @@ ANALYZE TABLE City;
--enable_result_log
--enable_query_log
+SET SESSION optimizer_switch='rowid_filter=off';
SET SESSION optimizer_switch='index_merge_sort_intersection=on';
SELECT COUNT(*) FROM City;
@@ -117,6 +118,7 @@ SELECT COUNT(*) FROM City WHERE Population > 1000000;
SELECT COUNT(*) FROM City WHERE Population > 500000;
SELECT COUNT(*) FROM City WHERE Country LIKE 'C%';
SELECT COUNT(*) FROM City WHERE Country LIKE 'B%';
+SELECT COUNT(*) FROM City WHERE Country LIKE 'J%';
# The pattern of the WHERE condition used in the following 3 queries is
@@ -134,7 +136,7 @@ SELECT * FROM City
--replace_column 9 #
EXPLAIN
SELECT * FROM City
- WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+ WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
--replace_column 7 # 9 #
--replace_result Population,Country,Name Population,Name,Country
@@ -156,11 +158,11 @@ SELECT * FROM City
SELECT * FROM City USE INDEX ()
- WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+ WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
--sorted_result
SELECT * FROM City
- WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+ WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
SELECT * FROM City USE INDEX ()
@@ -207,7 +209,7 @@ SELECT * FROM City
--replace_column 9 #
EXPLAIN
SELECT * FROM City
- WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+ WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
--replace_column 9 #
EXPLAIN
@@ -241,10 +243,10 @@ SELECT * FROM City
--sorted_result
SELECT * FROM City USE INDEX ()
- WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+ WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
--sorted_result
SELECT * FROM City
- WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+ WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
SELECT * FROM City USE INDEX ()
@@ -293,7 +295,7 @@ SELECT * FROM City WHERE
--replace_column 9 #
EXPLAIN
SELECT * FROM City
- WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+ WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
--replace_column 9 #
EXPLAIN
@@ -304,7 +306,7 @@ SELECT * FROM City
--replace_column 9 #
EXPLAIN
SELECT * FROM City
- WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+ WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
--replace_column 9 #
--replace_result PRIMARY,Country,Population PRIMARY,Population,Country 4,7,4 4,4,7
@@ -326,7 +328,7 @@ SELECT * FROM City WHERE
--sorted_result
SELECT * FROM City
- WHERE Name BETWEEN 'G' AND 'J' AND Population > 700000 AND Country LIKE 'B%';
+ WHERE Name BETWEEN 'G' AND 'J' AND Population > 700000 AND Country LIKE 'J%';
--sorted_result
SELECT * FROM City
@@ -334,7 +336,7 @@ SELECT * FROM City
SELECT * FROM City
- WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+ WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
--sorted_result
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
@@ -371,12 +373,12 @@ SELECT * FROM City
--replace_column 9 #
EXPLAIN
SELECT * FROM City
- WHERE Country='CHN' AND Population > 1500000;
+ WHERE Country='USA' AND Population > 1000000;
--replace_column 9 #
EXPLAIN
SELECT * FROM City
- WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+ WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
# Check that the previous 3 plans return the right results when executed
@@ -390,18 +392,18 @@ SELECT * FROM City
--sorted_result
SELECT * FROM City USE INDEX ()
- WHERE Country='CHN' AND Population > 1500000;
+ WHERE Country='USA' AND Population > 1000000;
--sorted_result
SELECT * FROM City
- WHERE Country='CHN' AND Population > 1500000;
+ WHERE Country='USA' AND Population > 1000000;
SELECT * FROM City USE INDEX ()
- WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+ WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
--sorted_result
SELECT * FROM City
- WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+ WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
#
@@ -459,3 +461,4 @@ WHERE (f1 < 535 OR f1 > 985) AND ( f4='r' OR f4 LIKE 'a%' ) ;
DROP TABLE t1;
SET SESSION optimizer_switch='index_merge_sort_intersection=on';
+SET SESSION optimizer_switch='rowid_filter=default';
diff --git a/mysql-test/main/index_intersect_innodb.result b/mysql-test/main/index_intersect_innodb.result
index 15244098170..854bcd75e5c 100644
--- a/mysql-test/main/index_intersect_innodb.result
+++ b/mysql-test/main/index_intersect_innodb.result
@@ -1,4 +1,9 @@
SET SESSION STORAGE_ENGINE='InnoDB';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
DROP TABLE IF EXISTS t1,t2,t3,t4;
DROP DATABASE IF EXISTS world;
set names utf8;
@@ -39,6 +44,7 @@ SELECT COUNT(*) FROM CountryLanguage;
COUNT(*)
984
CREATE INDEX Name ON City(Name);
+SET SESSION optimizer_switch='rowid_filter=off';
SET SESSION optimizer_switch='index_merge_sort_intersection=on';
SELECT COUNT(*) FROM City;
COUNT(*)
@@ -80,7 +86,7 @@ EXPLAIN
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
+1 SIMPLE City range Population,Name Population 4 NULL # Using index condition; Using where
SELECT * FROM City USE INDEX ()
WHERE Name LIKE 'C%' AND Population > 1000000;
ID Name Country Population
@@ -335,8 +341,8 @@ ID Name Country Population
SELECT * FROM City
WHERE Name LIKE 'M%' AND Population > 7000000;
ID Name Country Population
-1024 Mumbai (Bombay) IND 10500000
3580 Moscow RUS 8389200
+1024 Mumbai (Bombay) IND 10500000
SELECT COUNT(*) FROM City WHERE Name BETWEEN 'M' AND 'N';
COUNT(*)
301
@@ -358,16 +364,19 @@ COUNT(*)
SELECT COUNT(*) FROM City WHERE Country LIKE 'B%';
COUNT(*)
339
+SELECT COUNT(*) FROM City WHERE Country LIKE 'J%';
+COUNT(*)
+256
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'M' AND 'N' AND Population > 1000000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Population,Name,Country 4,35,3 NULL # Using sort_intersect(Population,Name,Country); Using where
+1 SIMPLE City index_merge Population,Country,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
EXPLAIN
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Population,Country,Name 4,3,35 NULL # Using sort_intersect(Population,Country,Name); Using where
+1 SIMPLE City index_merge Population,Country,Name Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'K' AND Population > 500000 AND Country LIKE 'C%';
@@ -384,15 +393,13 @@ ID Name Country Population
1810 Montréal CAN 1016376
2259 Medellín COL 1861265
SELECT * FROM City USE INDEX ()
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
ID Name Country Population
-217 Guarulhos BRA 1095874
-218 Goiânia BRA 1056330
+1541 Hiroshima JPN 1119117
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
ID Name Country Population
-217 Guarulhos BRA 1095874
-218 Goiânia BRA 1056330
+1541 Hiroshima JPN 1119117
SELECT * FROM City USE INDEX ()
WHERE Name BETWEEN 'G' AND 'K' AND Population > 500000 AND Country LIKE 'C%';
ID Name Country Population
@@ -466,9 +473,9 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Country,Population 4,7,4 NULL # Using sort_intersect(PRIMARY,Country,Population); Using where
EXPLAIN
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Country,Population 4,7,4 NULL # Using sort_intersect(PRIMARY,Country,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
@@ -479,7 +486,7 @@ SELECT * FROM City
WHERE ID BETWEEN 3701 AND 4000 AND Population > 1000000
AND Country BETWEEN 'S' AND 'Z';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Country,Population 4,7,4 NULL # Using sort_intersect(PRIMARY,Country,Population); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population 4,4 NULL # Using sort_intersect(PRIMARY,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
@@ -495,33 +502,11 @@ WHERE ID BETWEEN 501 AND 1000 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
554 Santiago de Chile CHL 4703954
SELECT * FROM City USE INDEX ()
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
-1 Kabul AFG 1780000
-126 Yerevan ARM 1248700
-130 Sydney AUS 3276207
-131 Melbourne AUS 2865329
-132 Brisbane AUS 1291117
-133 Perth AUS 1096829
-144 Baku AZE 1787800
-56 Luanda AGO 2022000
-69 Buenos Aires ARG 2982146
-70 La Matanza ARG 1266461
-71 Córdoba ARG 1157507
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
-1 Kabul AFG 1780000
-126 Yerevan ARM 1248700
-130 Sydney AUS 3276207
-131 Melbourne AUS 2865329
-132 Brisbane AUS 1291117
-133 Perth AUS 1096829
-144 Baku AZE 1787800
-56 Luanda AGO 2022000
-69 Buenos Aires ARG 2982146
-70 La Matanza ARG 1266461
-71 Córdoba ARG 1157507
SELECT * FROM City USE INDEX ()
WHERE ID BETWEEN 2001 AND 2500 AND Population > 300000 AND Country LIKE 'H%';
ID Name Country Population
@@ -727,19 +712,19 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Name Population,Name 4,35 NULL # Using sort_intersect(Population,Name); Using where
EXPLAIN
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 1000000 AND Country LIKE 'J%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Population,Country,Name 4,3,35 NULL # Using sort_intersect(Population,Country,Name); Using where
+1 SIMPLE City index_merge Population,Country,Name Population,Country 4,3 NULL # Using sort_intersect(Population,Country); Using where
EXPLAIN
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'J' AND Population > 500000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name Name,Population,Country 35,4,3 NULL # Using sort_intersect(Name,Population,Country); Using where
+1 SIMPLE City index_merge Population,Country,Name Name,Population 35,4 NULL # Using sort_intersect(Name,Population); Using where
EXPLAIN
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Population,Country 4,4,7 NULL # Using sort_intersect(PRIMARY,Population,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country PRIMARY,Country,Population 4,7,4 NULL # Using sort_intersect(PRIMARY,Country,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
@@ -779,10 +764,9 @@ ID Name Country Population
766 Manila PHL 1581082
942 Medan IDN 1843919
SELECT * FROM City
-WHERE Name BETWEEN 'G' AND 'J' AND Population > 700000 AND Country LIKE 'B%';
+WHERE Name BETWEEN 'G' AND 'J' AND Population > 700000 AND Country LIKE 'J%';
ID Name Country Population
-217 Guarulhos BRA 1095874
-218 Goiânia BRA 1056330
+1541 Hiroshima JPN 1119117
SELECT * FROM City
WHERE Name BETWEEN 'G' AND 'J' AND Population > 500000 AND Country LIKE 'C%';
ID Name Country Population
@@ -795,19 +779,8 @@ ID Name Country Population
1937 Huainan CHN 700000
1950 Hegang CHN 520000
SELECT * FROM City
-WHERE ID BETWEEN 1 AND 500 AND Population > 1000000 AND Country LIKE 'A%';
+WHERE ID BETWEEN 1 AND 500 AND Population > 700000 AND Country LIKE 'C%';
ID Name Country Population
-1 Kabul AFG 1780000
-56 Luanda AGO 2022000
-69 Buenos Aires ARG 2982146
-70 La Matanza ARG 1266461
-71 Córdoba ARG 1157507
-126 Yerevan ARM 1248700
-130 Sydney AUS 3276207
-131 Melbourne AUS 2865329
-132 Brisbane AUS 1291117
-133 Perth AUS 1096829
-144 Baku AZE 1787800
SELECT * FROM City
WHERE ID BETWEEN 3001 AND 4000 AND Population > 600000
AND Country BETWEEN 'S' AND 'Z';
@@ -889,12 +862,12 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,CountryID,CountryName Population,CountryID 4,3 NULL # Using sort_intersect(Population,CountryID); Using where
EXPLAIN
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000;
+WHERE Country='USA' AND Population > 1000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,CountryID,CountryName Population,CountryID 4,3 NULL # Using sort_intersect(Population,CountryID); Using where
EXPLAIN
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge Population,Name,CountryID,CountryName CountryName,Population 38,4 NULL # Using sort_intersect(CountryName,Population); Using where
SELECT * FROM City USE INDEX ()
@@ -932,73 +905,37 @@ ID Name Country Population
2698 Maputo MOZ 1018938
2710 Rangoon (Yangon) MMR 3361700
SELECT * FROM City USE INDEX ()
-WHERE Country='CHN' AND Population > 1500000;
+WHERE Country='USA' AND Population > 1000000;
ID Name Country Population
-1890 Shanghai CHN 9696300
-1891 Peking CHN 7472000
-1892 Chongqing CHN 6351600
-1893 Tianjin CHN 5286800
-1894 Wuhan CHN 4344600
-1895 Harbin CHN 4289800
-1896 Shenyang CHN 4265200
-1897 Kanton [Guangzhou] CHN 4256300
-1898 Chengdu CHN 3361500
-1899 Nanking [Nanjing] CHN 2870300
-1900 Changchun CHN 2812000
-1901 Xi´an CHN 2761400
-1902 Dalian CHN 2697000
-1903 Qingdao CHN 2596000
-1904 Jinan CHN 2278100
-1905 Hangzhou CHN 2190500
-1906 Zhengzhou CHN 2107200
-1907 Shijiazhuang CHN 2041500
-1908 Taiyuan CHN 1968400
-1909 Kunming CHN 1829500
-1910 Changsha CHN 1809800
-1911 Nanchang CHN 1691600
-1912 Fuzhou CHN 1593800
-1913 Lanzhou CHN 1565800
+3793 New York USA 8008278
+3794 Los Angeles USA 3694820
+3795 Chicago USA 2896016
+3796 Houston USA 1953631
+3797 Philadelphia USA 1517550
+3798 Phoenix USA 1321045
+3799 San Diego USA 1223400
+3800 Dallas USA 1188580
+3801 San Antonio USA 1144646
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000;
+WHERE Country='USA' AND Population > 1000000;
ID Name Country Population
-1890 Shanghai CHN 9696300
-1891 Peking CHN 7472000
-1892 Chongqing CHN 6351600
-1893 Tianjin CHN 5286800
-1894 Wuhan CHN 4344600
-1895 Harbin CHN 4289800
-1896 Shenyang CHN 4265200
-1897 Kanton [Guangzhou] CHN 4256300
-1898 Chengdu CHN 3361500
-1899 Nanking [Nanjing] CHN 2870300
-1900 Changchun CHN 2812000
-1901 Xi´an CHN 2761400
-1902 Dalian CHN 2697000
-1903 Qingdao CHN 2596000
-1904 Jinan CHN 2278100
-1905 Hangzhou CHN 2190500
-1906 Zhengzhou CHN 2107200
-1907 Shijiazhuang CHN 2041500
-1908 Taiyuan CHN 1968400
-1909 Kunming CHN 1829500
-1910 Changsha CHN 1809800
-1911 Nanchang CHN 1691600
-1912 Fuzhou CHN 1593800
-1913 Lanzhou CHN 1565800
+3793 New York USA 8008278
+3794 Los Angeles USA 3694820
+3795 Chicago USA 2896016
+3796 Houston USA 1953631
+3797 Philadelphia USA 1517550
+3798 Phoenix USA 1321045
+3799 San Diego USA 1223400
+3800 Dallas USA 1188580
+3801 San Antonio USA 1144646
SELECT * FROM City USE INDEX ()
-WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
ID Name Country Population
-1892 Chongqing CHN 6351600
-1898 Chengdu CHN 3361500
-1900 Changchun CHN 2812000
-1910 Changsha CHN 1809800
+3795 Chicago USA 2896016
SELECT * FROM City
-WHERE Country='CHN' AND Population > 1500000 AND Name LIKE 'C%';
+WHERE Country='USA' AND Population > 1500000 AND Name LIKE 'C%';
ID Name Country Population
-1892 Chongqing CHN 6351600
-1898 Chengdu CHN 3361500
-1900 Changchun CHN 2812000
-1910 Changsha CHN 1809800
+3795 Chicago USA 2896016
EXPLAIN
SELECT * FROM City, Country
WHERE City.Name LIKE 'C%' AND City.Population > 1000000 AND
@@ -1042,4 +979,8 @@ f1 f4 f5
998 a 0
DROP TABLE t1;
SET SESSION optimizer_switch='index_merge_sort_intersection=on';
+SET SESSION optimizer_switch='rowid_filter=default';
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/index_intersect_innodb.test b/mysql-test/main/index_intersect_innodb.test
index f2e44cb3947..637056ad795 100644
--- a/mysql-test/main/index_intersect_innodb.test
+++ b/mysql-test/main/index_intersect_innodb.test
@@ -2,6 +2,15 @@
SET SESSION STORAGE_ENGINE='InnoDB';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
--source index_intersect.test
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/index_merge_innodb.result b/mysql-test/main/index_merge_innodb.result
index f27bb263c6f..a25fe928a24 100644
--- a/mysql-test/main/index_merge_innodb.result
+++ b/mysql-test/main/index_merge_innodb.result
@@ -1,5 +1,6 @@
set @optimizer_switch_save= @@optimizer_switch;
set optimizer_switch='index_merge_sort_intersection=off';
+set optimizer_switch='rowid_filter=off';
#---------------- Index merge test 2 -------------------------------------------
SET SESSION STORAGE_ENGINE = InnoDB;
drop table if exists t1,t2;
@@ -106,6 +107,7 @@ insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b)
select key1a, key1b, key2a, key2b, key3a, key3b from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1;
count(*)
@@ -316,6 +318,7 @@ update t1 set key2=key1,key3=key1;
insert into t1 select 10000+key1, 10000+key2,10000+key3 from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40);
id select_type table type possible_keys key key_len ref rows Extra
@@ -704,8 +707,8 @@ SELECT COUNT(*) FROM
(SELECT * FROM t1 FORCE INDEX(primary,idx)
WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 6145
-2 DERIVED t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 6145 Using sort_union(idx,PRIMARY); Using where
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL #
+2 DERIVED t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL # Using sort_union(idx,PRIMARY); Using where
SELECT COUNT(*) FROM
(SELECT * FROM t1 FORCE INDEX(primary,idx)
WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
diff --git a/mysql-test/main/index_merge_innodb.test b/mysql-test/main/index_merge_innodb.test
index f959db3039a..7f9d6ac6eb1 100644
--- a/mysql-test/main/index_merge_innodb.test
+++ b/mysql-test/main/index_merge_innodb.test
@@ -20,6 +20,7 @@ let $merge_table_support= 0;
set @optimizer_switch_save= @@optimizer_switch;
set optimizer_switch='index_merge_sort_intersection=off';
+set optimizer_switch='rowid_filter=off';
# The first two tests are disabled because of non deterministic explain output.
# If include/index_merge1.inc can be enabled for InnoDB and all other
@@ -74,6 +75,7 @@ set optimizer_switch='derived_merge=off,derived_with_keys=off';
# We have to use FORCE INDEX here as Innodb gives inconsistent estimates
# which causes different query plans.
+--replace_column 9 #
EXPLAIN
SELECT COUNT(*) FROM
(SELECT * FROM t1 FORCE INDEX(primary,idx)
diff --git a/mysql-test/main/index_merge_myisam.result b/mysql-test/main/index_merge_myisam.result
index 37f5e15a5f7..3d31f8d3dfa 100644
--- a/mysql-test/main/index_merge_myisam.result
+++ b/mysql-test/main/index_merge_myisam.result
@@ -1,4 +1,5 @@
set @optimizer_switch_save= @@optimizer_switch;
+set optimizer_switch='rowid_filter=off';
set optimizer_switch='index_merge_sort_intersection=off';
#---------------- Index merge test 1 -------------------------------------------
SET SESSION STORAGE_ENGINE = MyISAM;
@@ -18,28 +19,28 @@ alter table t0 add key8 int not null, add index i8(key8);
update t0 set key2=key1,key3=key1,key4=key1,key5=key1,key6=key1,key7=key1,key8=1024-key1;
analyze table t0;
Table Op Msg_type Msg_text
+test.t0 analyze status Engine-independent statistics collected
test.t0 analyze status OK
-explain select * from t0 where key1 < 3 or key1 > 1020;
+explain select * from t0 where key1 < 3 or key1 > 920 and key1 < 924;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 range i1 i1 4 NULL 78 Using index condition; Using where
+1 SIMPLE t0 range i1 i1 4 NULL 5 Using index condition; Using where
explain
-select * from t0 where key1 < 3 or key2 > 1020;
+select * from t0 where key1 < 3 or key2 > 920 and key2 < 924;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 45 Using sort_union(i1,i2); Using where
-select * from t0 where key1 < 3 or key2 > 1020;
+1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 5 Using sort_union(i1,i2); Using where
+select * from t0 where key1 < 3 or key2 > 920 and key2 < 924;
key1 key2 key3 key4 key5 key6 key7 key8
1 1 1 1 1 1 1 1023
2 2 2 2 2 2 2 1022
-1021 1021 1021 1021 1021 1021 1021 3
-1022 1022 1022 1022 1022 1022 1022 2
-1023 1023 1023 1023 1023 1023 1023 1
-1024 1024 1024 1024 1024 1024 1024 0
+921 921 921 921 921 921 921 103
+922 922 922 922 922 922 922 102
+923 923 923 923 923 923 923 101
select * from t0 where key1=1022;
key1 key2 key3 key4 key5 key6 key7 key8
1022 1022 1022 1022 1022 1022 1022 2
explain select * from t0 where key1 < 3 or key2 <4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 5 Using sort_union(i1,i2); Using where
explain
select * from t0 where (key1 > 30 and key1<35) or (key2 >32 and key2 < 40);
id select_type table type possible_keys key key_len ref rows Extra
@@ -63,7 +64,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 ref i1,i2,i3 i3 4 const 1 Using where
explain select * from t0 use index (i1,i2) where (key1 < 3 or key2 <4) and key3 = 50;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 5 Using sort_union(i1,i2); Using where
explain select * from t0 where (key1 > 1 or key2 > 2);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 ALL i1,i2 NULL NULL NULL 1024 Using where
@@ -74,7 +75,7 @@ explain
select * from t0 where key1<3 or key2<3 or (key1>5 and key1<8) or
(key1>10 and key1<12) or (key2>100 and key2<110);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 17 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 15 Using sort_union(i1,i2); Using where
explain select * from t0 where key2 = 45 or key1 <=> null;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t0 range i1,i2 i2 4 NULL 1 Using index condition
@@ -93,19 +94,19 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select key1 from t0 where (key1 <=> null) or (key2 < 5) or
(key3=10) or (key4 <=> null);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i4 i2,i3 4,4 NULL 6 Using sort_union(i2,i3); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i4 i2,i3 4,4 NULL 5 Using sort_union(i2,i3); Using where
explain select key1 from t0 where (key1 <=> null) or (key1 < 5) or
(key3=10) or (key4 <=> null);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i3,i4 i1,i3 4,4 NULL 6 Using sort_union(i1,i3); Using where
+1 SIMPLE t0 index_merge i1,i3,i4 i1,i3 4,4 NULL 5 Using sort_union(i1,i3); Using where
explain select * from t0 where
(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 5 or key6 < 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 4 Using sort_union(i1,i2); Using where
explain
select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 9 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where
select * from t0 where (key1 < 3 or key2 < 6) and (key1 < 7 or key3 < 4);
key1 key2 key3 key4 key5 key6 key7 key8
1 1 1 1 1 1 1 1023
@@ -116,15 +117,15 @@ key1 key2 key3 key4 key5 key6 key7 key8
explain select * from t0 where
(key1 < 3 or key2 < 3) and (key3 < 4 or key4 < 4) and (key5 < 2 or key6 < 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i4,i5,i6 i5,i6 4,4 NULL 2 Using sort_union(i5,i6); Using where
explain select * from t0 where
(key1 < 3 or key2 < 3) and (key3 < 100);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 4 Using sort_union(i1,i2); Using where
explain select * from t0 where
(key1 < 3 or key2 < 3) and (key3 < 1000);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 6 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 4 Using sort_union(i1,i2); Using where
explain select * from t0 where
((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4))
or
@@ -136,7 +137,7 @@ explain select * from t0 where
or
key1 < 7;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2,i3 i1,i2 4,4 NULL 8 Using sort_union(i1,i2); Using where
select * from t0 where
((key1 < 4 or key2 < 4) and (key2 <5 or key3 < 4))
or
@@ -148,30 +149,36 @@ key1 key2 key3 key4 key5 key6 key7 key8
4 4 4 4 4 4 4 1020
5 5 5 5 5 5 5 1019
6 6 6 6 6 6 6 1018
+select count(*) from t0 where
+((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4))
+or
+((key5 < 5 or key6 < 6) and (key7 <7 or key8 < 4));
+count(*)
+5
explain select * from t0 where
((key1 < 4 or key2 < 4) and (key3 <5 or key5 < 4))
or
((key5 < 5 or key6 < 6) and (key7 <7 or key8 < 4));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i1,i2,i5,i6 4,4,4,4 NULL 19 Using sort_union(i1,i2,i5,i6); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i1,i2,i5,i6 4,4,4,4 NULL 15 Using sort_union(i1,i2,i5,i6); Using where
explain select * from t0 where
((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4))
or
((key7 <7 or key8 < 4) and (key5 < 5 or key6 < 6));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i3,i5,i7,i8 4,4,4,4 NULL 20 Using sort_union(i3,i5,i7,i8); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7,i8 i3,i5,i7,i8 4,4,4,4 NULL 16 Using sort_union(i3,i5,i7,i8); Using where
explain select * from t0 where
((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4))
or
((key3 <7 or key5 < 2) and (key5 < 5 or key6 < 6));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 4,4 NULL 11 Using sort_union(i3,i5); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i5,i6 i3,i5 4,4 NULL 9 Using sort_union(i3,i5); Using where
explain select * from t0 where
((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4))
or
(((key3 <7 and key7 < 6) or key5 < 2) and (key5 < 5 or key6 < 6));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7 i3,i5 4,4 NULL 11 Using sort_union(i3,i5); Using where
+1 SIMPLE t0 index_merge i1,i2,i3,i5,i6,i7 i3,i5 4,4 NULL 9 Using sort_union(i3,i5); Using where
explain select * from t0 where
((key3 <5 or key5 < 4) and (key1 < 4 or key2 < 4))
or
@@ -197,7 +204,7 @@ key1 key2 key3 key4 key5 key6 key7 key8
explain
select * from t0 where key1 < 5 or key8 < 4 order by key1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i8 i1,i8 4,4 NULL 9 Using sort_union(i1,i8); Using where; Using filesort
+1 SIMPLE t0 index_merge i1,i8 i1,i8 4,4 NULL 7 Using sort_union(i1,i8); Using where; Using filesort
create table t2 like t0;
insert into t2 select * from t0;
alter table t2 add index i1_3(key1, key3);
@@ -213,7 +220,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index i1_3,i2_3 i321 12 NULL 1024 Using where; Using index
explain select key7 from t2 where key1 <100 or key2 < 100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL 188 Using sort_union(i1_3,i2_3); Using where
+1 SIMPLE t2 index_merge i1_3,i2_3 i1_3,i2_3 4,4 NULL 186 Using sort_union(i1_3,i2_3); Using where
create table t4 (
key1a int not null,
key1b int not null,
@@ -238,10 +245,10 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 index_merge i1a,i1b i1a,i1b 4,4 NULL 2 Using sort_union(i1a,i1b); Using where
explain select * from t4 where key2 = 1 and (key2_1 = 1 or key3 = 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 10 Using where
+1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 9 Using where
explain select * from t4 where key2 = 1 and (key2_1 = 1 or key2_2 = 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 10 Using where
+1 SIMPLE t4 ref i2_1,i2_2 i2_1 4 const 9 Using where
explain select * from t4 where key2_1 = 1 or key2_2 = 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t4 ALL NULL NULL NULL NULL 1024 Using where
@@ -282,13 +289,13 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select * from t0,t1 where t0.key1 < 3 and
(t1.key1 = t0.key1 or t1.key8 = t0.key1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 range i1 i1 4 NULL 3 Using index condition
+1 SIMPLE t0 range i1 i1 4 NULL 2 Using index condition
1 SIMPLE t1 ALL i1,i8 NULL NULL NULL 1024 Range checked for each record (index map: 0x81)
explain select * from t1 where key1=3 or key2=4
union select * from t1 where key1<4 or key3=5;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index_merge i1,i2 i1,i2 4,4 NULL 2 Using union(i1,i2); Using where
-2 UNION t1 index_merge i1,i3 i1,i3 4,4 NULL 5 Using sort_union(i1,i3); Using where
+2 UNION t1 index_merge i1,i3 i1,i3 4,4 NULL 4 Using sort_union(i1,i3); Using where
NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
set @tmp_optimizer_switch=@@optimizer_switch;
set optimizer_switch='derived_merge=off,derived_with_keys=off';
@@ -329,7 +336,7 @@ key1 key2 key3 key4 key5 key6 key7 key8 key9 keyA keyB keyC
1016 1016 1016 1016 1016 1016 1016 8 1016 1016 1016 1016
explain select * from t0 where key1 < 3 or key2 < 4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 7 Using sort_union(i1,i2); Using where
+1 SIMPLE t0 index_merge i1,i2 i1,i2 4,4 NULL 5 Using sort_union(i1,i2); Using where
select * from t0 where key1 < 3 or key2 < 4;
key1 key2 key3 key4 key5 key6 key7 key8
1 1 1 1 1 1 1 1023
@@ -357,8 +364,8 @@ from t0 as A force index(i1,i2), t0 as B force index (i1,i2)
where (A.key1 < 500000 or A.key2 < 3)
and (B.key1 < 500000 or B.key2 < 3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1013 Using sort_union(i1,i2); Using where
-1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1013 Using sort_union(i1,i2); Using where; Using join buffer (flat, BNL join)
+1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1010 Using sort_union(i1,i2); Using where
+1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1010 Using sort_union(i1,i2); Using where; Using join buffer (flat, BNL join)
select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5)
from t0 as A force index(i1,i2), t0 as B force index (i1,i2)
where (A.key1 < 500000 or A.key2 < 3)
@@ -371,8 +378,8 @@ from t0 as A force index(i1,i2), t0 as B force index (i1,i2)
where (A.key1 = 1 or A.key2 = 1)
and (B.key1 = 1 or B.key2 = 1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1020 Using union(i1,i2); Using where
-1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1020 Using union(i1,i2); Using where; Using join buffer (flat, BNL join)
+1 SIMPLE A index_merge i1,i2 i1,i2 4,4 NULL 1021 Using union(i1,i2); Using where
+1 SIMPLE B index_merge i1,i2 i1,i2 4,4 NULL 1021 Using union(i1,i2); Using where; Using join buffer (flat, BNL join)
select max(A.key1 + B.key1 + A.key2 + B.key2 + A.key3 + B.key3 + A.key4 + B.key4 + A.key5 + B.key5)
from t0 as A force index(i1,i2), t0 as B force index (i1,i2)
where (A.key1 = 1 or A.key2 = 1)
@@ -515,18 +522,13 @@ a filler b
4 zz 4
5 qq 4
must use union, not sort-union:
-explain select * from t2 where a=4 or b=4;
+explain select * from t2 where a=2 or b=2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index_merge a,b a,b 5,5 NULL # Using union(a,b); Using where
-select * from t2 where a=4 or b=4;
+select * from t2 where a=2 or b=2;
a filler b
-4 4 0
-4 5 0
-4 filler 4
-4 filler 4
-4 qq 5
-4 zz 4
-5 qq 4
+2 filler 2
+2 filler 2
drop table t1, t2;
CREATE TABLE t1 (a varchar(8), b set('a','b','c','d','e','f','g','h'),
KEY b(b), KEY a(a));
@@ -705,19 +707,19 @@ select key1,key2,filler1 from t1 where key2=100 and key2=200;
key1 key2 filler1
explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 152 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
key1 key2 key3 key4 filler1
-1 -1 100 100 key4-key3
delete from t1 where key3=100 and key4=100;
explain select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 152 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key1,key2,key3,key4 5,5,5,5 NULL 154 Using union(intersect(key1,key2),intersect(key3,key4)); Using where
select key1,key2,key3,key4,filler1 from t1 where key1=100 and key2=100 or key3=100 and key4=100;
key1 key2 key3 key4 filler1
explain select key1,key2 from t1 where key1=100 and key2=100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 76 Using intersect(key1,key2); Using where; Using index
+1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 77 Using intersect(key1,key2); Using where; Using index
select key1,key2 from t1 where key1=100 and key2=100;
key1 key2
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-1');
@@ -725,7 +727,7 @@ insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key
insert into t1 (key1, key2, key3, key4, filler1) values (100, 100, 200, 200,'key1-key2-key3-key4-3');
explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 136 Using union(key3,intersect(key1,key2),key4); Using where
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 137 Using union(key3,intersect(key1,key2),key4); Using where
select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200;
key1 key2 key3 key4 filler1
100 100 200 200 key1-key2-key3-key4-3
@@ -734,7 +736,7 @@ key1 key2 key3 key4 filler1
insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, -1, 200,'key4');
explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 146 Using union(key3,intersect(key1,key2),key4); Using where
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 148 Using union(key3,intersect(key1,key2),key4); Using where
select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200;
key1 key2 key3 key4 filler1
100 100 200 200 key1-key2-key3-key4-3
@@ -744,7 +746,7 @@ key1 key2 key3 key4 filler1
insert into t1 (key1, key2, key3, key4, filler1) values (-1, -1, 200, -1,'key3');
explain select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 156 Using union(key3,intersect(key1,key2),key4); Using where
+1 SIMPLE t1 index_merge key1,key2,key3,key4 key3,key1,key2,key4 5,5,5,5 NULL 159 Using union(key3,intersect(key1,key2),key4); Using where
select key1,key2,key3,key4,filler1 from t1 where key3=200 or (key1=100 and key2=100) or key4=200;
key1 key2 key3 key4 filler1
100 100 200 200 key1-key2-key3-key4-3
@@ -760,7 +762,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a,stb_swt1a_2b,stb_swt1b,st_b st_a,st_b 4,4 NULL 3515 Using intersect(st_a,st_b); Using where; Using index
explain select st_a from t1 ignore index (st_a) where st_a=1 and st_b=1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,stb_swt1a_2b,stb_swt1b,st_b st_b 4 const 15093 Using where
+1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,stb_swt1a_2b,stb_swt1b,st_b st_b 4 const 15094 Using where
explain select * from t1 where st_a=1 and swt1a=1 and swt2a=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref sta_swt12a,sta_swt1a,sta_swt2a,sta_swt21a,st_a sta_swt21a 12 const,const,const 971
@@ -850,7 +852,7 @@ INDEX i2(key2)
);
explain select * from t1 where key1 < 5 or key2 > 197;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where
+1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 8 Using sort_union(i1,i2); Using where
select * from t1 where key1 < 5 or key2 > 197;
key1 key2
0 200
@@ -860,7 +862,7 @@ key1 key2
4 196
explain select * from t1 where key1 < 3 or key2 > 195;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where
+1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 8 Using sort_union(i1,i2); Using where
select * from t1 where key1 < 3 or key2 > 195;
key1 key2
0 200
@@ -876,7 +878,7 @@ update t1 set str1='aaa', str2='bbb', str3=concat(key2, '-', key1 div 2, '_' ,if
alter table t1 add primary key (str1, zeroval, str2, str3);
explain select * from t1 where key1 < 5 or key2 > 197;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where
+1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 8 Using sort_union(i1,i2); Using where
select * from t1 where key1 < 5 or key2 > 197;
key1 key2 str1 zeroval str2 str3
0 200 aaa 0 bbb 200-0_a
@@ -886,7 +888,7 @@ key1 key2 str1 zeroval str2 str3
4 196 aaa 0 bbb 196-2_a
explain select * from t1 where key1 < 3 or key2 > 195;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 10 Using sort_union(i1,i2); Using where
+1 SIMPLE t1 index_merge i1,i2 i1,i2 4,4 NULL 8 Using sort_union(i1,i2); Using where
select * from t1 where key1 < 3 or key2 > 195;
key1 key2 str1 zeroval str2 str3
0 200 aaa 0 bbb 200-0_a
@@ -944,6 +946,7 @@ insert into t1 (key1a, key1b, key2a, key2b, key3a, key3b)
select key1a, key1b, key2a, key2b, key3a, key3b from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1;
count(*)
@@ -1154,6 +1157,7 @@ update t1 set key2=key1,key3=key1;
insert into t1 select 10000+key1, 10000+key2,10000+key3 from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1 where (key3 > 30 and key3<35) or (key2 >32 and key2 < 40);
id select_type table type possible_keys key key_len ref rows Extra
@@ -1447,7 +1451,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref key1,pktail4bad pktail4bad 4 const 82 Using where
explain select * from t1 where pktail5bad=1 and key1=10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref key1,pktail5bad pktail5bad 4 const 70 Using where
+1 SIMPLE t1 ref key1,pktail5bad pktail5bad 4 const 69 Using where
explain select pk1,pk2,key1,key2 from t1 where key1 = 10 and key2=10 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge key1,key2 key1,key2 4,4 NULL 1 Using intersect(key1,key2); Using where
@@ -1501,7 +1505,7 @@ EXPLAIN SELECT t1.f1 FROM t1
WHERE (SELECT COUNT(*) FROM t2 WHERE t2.f3 = 'h' AND t2.f2 = t1.f1) = 0 AND t1.f1 = 2;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
-2 SUBQUERY t2 ref f2,f3 f2 5 const 1 Using where
+2 SUBQUERY t2 ref f2,f3 f2 5 const 2 Using where
DROP TABLE t1,t2;
create table t0 (a int);
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
@@ -1518,12 +1522,12 @@ explain select * from t1 where a=1 or b=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 2 Using union(a,b); Using where
This should use ALL:
-set optimizer_switch='default,index_merge=off';
+set optimizer_switch='default,index_merge=off,rowid_filter=off';
explain select * from t1 where a=1 or b=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a,b NULL NULL NULL 1000 Using where
This should use sort-union:
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a=1 or b=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 2 Using sort_union(a,b); Using where
@@ -1533,17 +1537,17 @@ explain select * from t1 where a<1 or b <1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 38 Using sort_union(a,b); Using where
This should use ALL:
-set optimizer_switch='default,index_merge_sort_union=off';
+set optimizer_switch='default,index_merge_sort_union=off,rowid_filter=off';
explain select * from t1 where a<1 or b <1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a,b NULL NULL NULL 1000 Using where
This should use ALL:
-set optimizer_switch='default,index_merge=off';
+set optimizer_switch='default,index_merge=off,rowid_filter=off';
explain select * from t1 where a<1 or b <1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a,b NULL NULL NULL 1000 Using where
This will use sort-union:
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a<1 or b <1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 38 Using sort_union(a,b); Using where
@@ -1555,7 +1559,7 @@ explain select * from t1 where (a=3 or b in (1,2)) and (c=3 or d=4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b,c,d a,b 5,5 NULL 3 Using sort_union(a,b); Using where
And if we disable sort_union, union:
-set optimizer_switch='default,index_merge_sort_union=off';
+set optimizer_switch='default,index_merge_sort_union=off,rowid_filter=off';
explain select * from t1 where (a=3 or b in (1,2)) and (c=3 or d=4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b,c,d c,d 5,5 NULL 100 Using union(c,d); Using where
@@ -1574,22 +1578,22 @@ explain select * from t1 where a=10 and b=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 1 Using intersect(a,b); Using where
No intersect when index_merge is disabled:
-set optimizer_switch='default,index_merge=off';
+set optimizer_switch='default,index_merge=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a,b a 5 const 49 Using where
No intersect if it is disabled:
-set optimizer_switch='default,index_merge_sort_intersection=off,index_merge_intersection=off';
+set optimizer_switch='default,index_merge_sort_intersection=off,index_merge_intersection=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a,b a 5 const 49 Using where
Do intersect when union was disabled
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 1 Using intersect(a,b); Using where
Do intersect when sort_union was disabled
-set optimizer_switch='default,index_merge_sort_union=off';
+set optimizer_switch='default,index_merge_sort_union=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 1 Using intersect(a,b); Using where
@@ -1599,13 +1603,13 @@ explain select * from t1 where a=10 and b=10 or c=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b,c a,b,c 5,5,5 NULL 6 Using union(intersect(a,b),c); Using where
Should be only union left:
-set optimizer_switch='default,index_merge_intersection=off';
+set optimizer_switch='default,index_merge_intersection=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10 or c=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b,c a,c 5,5 NULL 54 Using union(a,c); Using where
This will switch to sort-union (intersection will be gone, too,
that's a known limitation:
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10 or c=10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge a,b,c a,c 5,5 NULL 54 Using sort_union(a,c); Using where
@@ -1650,7 +1654,7 @@ SELECT * FROM t1 FORCE INDEX ( PRIMARY, population_rate, area_rate, code )
WHERE pk = 1 OR population_rate = 1 OR ( area_rate IN ( 1,2 ) OR area_rate IS NULL )
AND (population_rate = 25 OR area_rate BETWEEN 2 AND 25 OR code BETWEEN 'MA' AND 'TX');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,code,population_rate,area_rate PRIMARY,population_rate,area_rate 4,5,5 NULL 2 Using sort_union(PRIMARY,population_rate,area_rate); Using where
+1 SIMPLE t1 index_merge PRIMARY,code,population_rate,area_rate PRIMARY,population_rate,area_rate,code 4,5,5,3 NULL 2 Using sort_union(PRIMARY,population_rate,area_rate,code); Using where
SELECT * FROM t1 FORCE INDEX ( PRIMARY, population_rate, area_rate, code )
WHERE pk = 1 OR population_rate = 1 OR ( area_rate IN ( 1,2 ) OR area_rate IS NULL )
AND (population_rate = 25 OR area_rate BETWEEN 2 AND 25 OR code BETWEEN 'MA' AND 'TX');
@@ -1748,6 +1752,7 @@ alter table t0 add key8 int not null, add index i8(key8);
update t0 set key2=key1,key3=key1,key8=1024-key1;
analyze table t0;
Table Op Msg_type Msg_text
+test.t0 analyze status Engine-independent statistics collected
test.t0 analyze status OK
set @optimizer_switch_save=@@optimizer_switch;
set optimizer_switch='derived_merge=off,derived_with_keys=off';
diff --git a/mysql-test/main/index_merge_myisam.test b/mysql-test/main/index_merge_myisam.test
index 75beb9bd883..c3ac7fd32e5 100644
--- a/mysql-test/main/index_merge_myisam.test
+++ b/mysql-test/main/index_merge_myisam.test
@@ -16,6 +16,7 @@ let $merge_table_support= 1;
set @optimizer_switch_save= @@optimizer_switch;
+set optimizer_switch='rowid_filter=off';
set optimizer_switch='index_merge_sort_intersection=off';
--source include/index_merge1.inc
@@ -39,11 +40,11 @@ from t0 A, t0 B, t0 C;
explain select * from t1 where a=1 or b=1;
--echo This should use ALL:
-set optimizer_switch='default,index_merge=off';
+set optimizer_switch='default,index_merge=off,rowid_filter=off';
explain select * from t1 where a=1 or b=1;
--echo This should use sort-union:
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a=1 or b=1;
--echo This will use sort-union:
@@ -51,16 +52,16 @@ set optimizer_switch=default;
explain select * from t1 where a<1 or b <1;
--echo This should use ALL:
-set optimizer_switch='default,index_merge_sort_union=off';
+set optimizer_switch='default,index_merge_sort_union=off,rowid_filter=off';
explain select * from t1 where a<1 or b <1;
--echo This should use ALL:
-set optimizer_switch='default,index_merge=off';
+set optimizer_switch='default,index_merge=off,rowid_filter=off';
explain select * from t1 where a<1 or b <1;
--echo This will use sort-union:
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a<1 or b <1;
alter table t1 add d int, add key(d);
@@ -71,7 +72,7 @@ set optimizer_switch=default;
explain select * from t1 where (a=3 or b in (1,2)) and (c=3 or d=4);
--echo And if we disable sort_union, union:
-set optimizer_switch='default,index_merge_sort_union=off';
+set optimizer_switch='default,index_merge_sort_union=off,rowid_filter=off';
explain select * from t1 where (a=3 or b in (1,2)) and (c=3 or d=4);
drop table t1;
@@ -92,19 +93,19 @@ set optimizer_switch=default;
explain select * from t1 where a=10 and b=10;
--echo No intersect when index_merge is disabled:
-set optimizer_switch='default,index_merge=off';
+set optimizer_switch='default,index_merge=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
--echo No intersect if it is disabled:
-set optimizer_switch='default,index_merge_sort_intersection=off,index_merge_intersection=off';
+set optimizer_switch='default,index_merge_sort_intersection=off,index_merge_intersection=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
--echo Do intersect when union was disabled
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
--echo Do intersect when sort_union was disabled
-set optimizer_switch='default,index_merge_sort_union=off';
+set optimizer_switch='default,index_merge_sort_union=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10;
# Now take union-of-intersection and see how we can disable parts of it
@@ -113,12 +114,12 @@ set optimizer_switch=default;
explain select * from t1 where a=10 and b=10 or c=10;
--echo Should be only union left:
-set optimizer_switch='default,index_merge_intersection=off';
+set optimizer_switch='default,index_merge_intersection=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10 or c=10;
--echo This will switch to sort-union (intersection will be gone, too,
--echo that's a known limitation:
-set optimizer_switch='default,index_merge_union=off';
+set optimizer_switch='default,index_merge_union=off,rowid_filter=off';
explain select * from t1 where a=10 and b=10 or c=10;
set optimizer_switch=default;
diff --git a/mysql-test/main/information_schema-big.result b/mysql-test/main/information_schema-big.result
index d4aa6deb2e2..b135621acb7 100644
--- a/mysql-test/main/information_schema-big.result
+++ b/mysql-test/main/information_schema-big.result
@@ -36,6 +36,7 @@ GLOBAL_VARIABLES VARIABLE_NAME
INDEX_STATISTICS TABLE_SCHEMA
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_TRACE QUERY
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
@@ -94,6 +95,7 @@ GLOBAL_VARIABLES VARIABLE_NAME
INDEX_STATISTICS TABLE_SCHEMA
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_TRACE QUERY
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
diff --git a/mysql-test/main/information_schema.result b/mysql-test/main/information_schema.result
index bca22b4885e..b4d2d065d4a 100644
--- a/mysql-test/main/information_schema.result
+++ b/mysql-test/main/information_schema.result
@@ -67,6 +67,7 @@ GLOBAL_VARIABLES
INDEX_STATISTICS
KEY_CACHES
KEY_COLUMN_USAGE
+OPTIMIZER_TRACE
PARAMETERS
PARTITIONS
PLUGINS
@@ -96,12 +97,12 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
help_relation
help_topic
-host
index_stats
plugin
proc
@@ -564,6 +565,7 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION;
create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION;
select * from information_schema.views;
TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE CHARACTER_SET_CLIENT COLLATION_CONNECTION ALGORITHM
+def mysql user select `mysql`.`global_priv`.`Host` AS `Host`,`mysql`.`global_priv`.`User` AS `User`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`mysql`.`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`mysql`.`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `mysql`.`global_priv` NONE YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
def test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
def test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
def test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER latin1 latin1_swedish_ci UNDEFINED
@@ -594,6 +596,7 @@ information_schema.tables;
s1
10
11
+NULL
drop table t1;
SHOW CREATE TABLE INFORMATION_SCHEMA.character_sets;
Table Create Table
@@ -665,7 +668,7 @@ proc body longblob
proc definer char(141)
proc created timestamp
proc modified timestamp
-proc sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT')
+proc sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL')
proc comment text
proc character_set_client char(32)
proc collation_connection char(32)
@@ -825,7 +828,7 @@ NULL
select table_type from information_schema.tables
where table_schema="mysql" and table_name="user";
table_type
-BASE TABLE
+VIEW
show open tables where `table` like "user";
Database Table In_use Name_locked
mysql user 0 0
@@ -862,6 +865,8 @@ information_schema COLUMNS COLUMN_DEFAULT
information_schema COLUMNS COLUMN_TYPE
information_schema COLUMNS GENERATION_EXPRESSION
information_schema EVENTS EVENT_DEFINITION
+information_schema OPTIMIZER_TRACE QUERY
+information_schema OPTIMIZER_TRACE TRACE
information_schema PARAMETERS DTD_IDENTIFIER
information_schema PARTITIONS PARTITION_EXPRESSION
information_schema PARTITIONS SUBPARTITION_EXPRESSION
@@ -875,6 +880,14 @@ information_schema SYSTEM_VARIABLES ENUM_VALUE_LIST
information_schema TRIGGERS ACTION_CONDITION
information_schema TRIGGERS ACTION_STATEMENT
information_schema VIEWS VIEW_DEFINITION
+mysql global_priv Priv
+mysql user Password
+mysql user ssl_cipher
+mysql user x509_issuer
+mysql user x509_subject
+mysql user plugin
+mysql user authentication_string
+mysql user default_role
select table_name, column_name, data_type from information_schema.columns
where data_type = 'datetime' and table_name not like 'innodb_%'
order by binary table_name, ordinal_position;
@@ -1254,7 +1267,7 @@ CREATE VIEW v1 AS SELECT 1 FROM t1
WHERE f3 = (SELECT func2 ());
SELECT func1();
func1()
-1
+2
DROP TABLE t1;
DROP VIEW v1;
DROP FUNCTION func1;
@@ -1405,6 +1418,7 @@ create table t2 (f1 int, f2 int);
create view v2 as select f1+1 as a, f2 as b from t2;
select table_name, is_updatable from information_schema.views order by table_name;
table_name is_updatable
+user YES
v1 NO
v2 YES
delete from v1;
@@ -1958,8 +1972,9 @@ lock table t1 read;
connect con1, localhost, root,,;
connection con1;
flush tables;
+flush tables t1;
connection default;
-select * from information_schema.views;
+select * from information_schema.views where table_schema='test';
TABLE_CATALOG def
TABLE_SCHEMA test
TABLE_NAME v1
diff --git a/mysql-test/main/information_schema.test b/mysql-test/main/information_schema.test
index 2b318f5f1aa..95b6f253058 100644
--- a/mysql-test/main/information_schema.test
+++ b/mysql-test/main/information_schema.test
@@ -1622,15 +1622,16 @@ alter table t1 change b c int;
lock table t1 read;
connect(con1, localhost, root,,);
connection con1;
-send flush tables;
+flush tables;
+send flush tables t1;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush" and
- info = "flush tables";
+ where state = "Waiting for table metadata lock" and
+ info = "flush tables t1";
--source include/wait_condition.inc
--vertical_results
-select * from information_schema.views;
+select * from information_schema.views where table_schema='test';
--horizontal_results
unlock tables;
diff --git a/mysql-test/main/information_schema_all_engines.result b/mysql-test/main/information_schema_all_engines.result
index 2916858b5a6..9ba4d20c76d 100644
--- a/mysql-test/main/information_schema_all_engines.result
+++ b/mysql-test/main/information_schema_all_engines.result
@@ -43,6 +43,7 @@ INNODB_TABLESPACES_SCRUBBING
INNODB_TRX
KEY_CACHES
KEY_COLUMN_USAGE
+OPTIMIZER_TRACE
PARAMETERS
PARTITIONS
PLUGINS
@@ -123,6 +124,7 @@ INNODB_TABLESPACES_SCRUBBING SPACE
INNODB_TRX trx_id
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_TRACE QUERY
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
@@ -203,6 +205,7 @@ INNODB_TABLESPACES_SCRUBBING SPACE
INNODB_TRX trx_id
KEY_CACHES KEY_CACHE_NAME
KEY_COLUMN_USAGE CONSTRAINT_SCHEMA
+OPTIMIZER_TRACE QUERY
PARAMETERS SPECIFIC_SCHEMA
PARTITIONS TABLE_SCHEMA
PLUGINS PLUGIN_NAME
@@ -288,6 +291,7 @@ INNODB_TABLESPACES_SCRUBBING information_schema.INNODB_TABLESPACES_SCRUBBING 1
INNODB_TRX information_schema.INNODB_TRX 1
KEY_CACHES information_schema.KEY_CACHES 1
KEY_COLUMN_USAGE information_schema.KEY_COLUMN_USAGE 1
+OPTIMIZER_TRACE information_schema.OPTIMIZER_TRACE 1
PARAMETERS information_schema.PARAMETERS 1
PARTITIONS information_schema.PARTITIONS 1
PLUGINS information_schema.PLUGINS 1
@@ -358,6 +362,7 @@ Database: information_schema
| INNODB_TRX |
| KEY_CACHES |
| KEY_COLUMN_USAGE |
+| OPTIMIZER_TRACE |
| PARAMETERS |
| PARTITIONS |
| PLUGINS |
@@ -428,6 +433,7 @@ Database: INFORMATION_SCHEMA
| INNODB_TRX |
| KEY_CACHES |
| KEY_COLUMN_USAGE |
+| OPTIMIZER_TRACE |
| PARAMETERS |
| PARTITIONS |
| PLUGINS |
@@ -459,5 +465,5 @@ Wildcard: inf_rmation_schema
| information_schema |
SELECT table_schema, count(*) FROM information_schema.TABLES WHERE table_schema IN ('mysql', 'INFORMATION_SCHEMA', 'test', 'mysqltest') GROUP BY TABLE_SCHEMA;
table_schema count(*)
-information_schema 65
+information_schema 66
mysql 31
diff --git a/mysql-test/main/information_schema_db.result b/mysql-test/main/information_schema_db.result
index 45ade65c502..0d98a10050d 100644
--- a/mysql-test/main/information_schema_db.result
+++ b/mysql-test/main/information_schema_db.result
@@ -26,6 +26,8 @@ declare ret_val int;
select max(f1) from t1 into ret_val;
return ret_val;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create view v1 as select f1 from t1 where f1 = func1(f1);
create function func2() returns int return 1;
use mbase;
diff --git a/mysql-test/main/init_file_set_password-7656.result b/mysql-test/main/init_file_set_password-7656.result
index e5b3fc75706..fba65b036fd 100644
--- a/mysql-test/main/init_file_set_password-7656.result
+++ b/mysql-test/main/init_file_set_password-7656.result
@@ -1,8 +1,8 @@
create user foo@localhost;
select user,host,password from mysql.user where user='foo';
-user host password
+User Host Password
foo localhost
-select user,host,password from mysql.user where user='foo';
-user host password
-foo localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
+select user,host,password,plugin,authentication_string from mysql.user where user='foo';
+User Host Password plugin authentication_string
+foo localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29 mysql_native_password *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
drop user foo@localhost;
diff --git a/mysql-test/main/init_file_set_password-7656.test b/mysql-test/main/init_file_set_password-7656.test
index ecee3924355..c695d01b675 100644
--- a/mysql-test/main/init_file_set_password-7656.test
+++ b/mysql-test/main/init_file_set_password-7656.test
@@ -21,6 +21,6 @@ EOF
--exec echo "restart:--init-file=$MYSQLTEST_VARDIR/init.file " > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/wait_until_connected_again.inc
-select user,host,password from mysql.user where user='foo';
+select user,host,password,plugin,authentication_string from mysql.user where user='foo';
drop user foo@localhost;
diff --git a/mysql-test/main/innodb_ext_key.result b/mysql-test/main/innodb_ext_key.result
index 7a994730738..b4572174fcb 100644
--- a/mysql-test/main/innodb_ext_key.result
+++ b/mysql-test/main/innodb_ext_key.result
@@ -1,6 +1,11 @@
DROP TABLE IF EXISTS t1,t2,t3,t4;
DROP DATABASE IF EXISTS dbt3_s001;
SET SESSION STORAGE_ENGINE='InnoDB';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
CREATE DATABASE dbt3_s001;
use dbt3_s001;
set @save_ext_key_optimizer_switch=@@optimizer_switch;
@@ -819,8 +824,8 @@ set optimizer_switch='extended_keys=off';
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10
-1 SIMPLE t2 eq_ref a a 4 test.t1.a 1 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL #
+1 SIMPLE t2 eq_ref a a 4 test.t1.a # Using where
flush status;
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
a pk a b
@@ -841,8 +846,8 @@ set optimizer_switch='extended_keys=on';
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 10
-1 SIMPLE t2 eq_ref a a 4 test.t1.a 1 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL #
+1 SIMPLE t2 eq_ref a a 4 test.t1.a # Using where
flush status;
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
a pk a b
@@ -872,7 +877,9 @@ alter table t3 add primary key (pk1, pk2);
alter table t3 add key (col1, col2);
analyze table t1,t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
set optimizer_switch='extended_keys=off';
explain
@@ -1027,10 +1034,16 @@ create table t1 (a bigint not null unique auto_increment, b varchar(10), primary
create table t2 (a bigint not null unique auto_increment, b varchar(10), primary key (a), key (b(2))) engine = innodb default character set utf8;
insert into t1 (b) values (null), (null), (null);
insert into t2 (b) values (null), (null), (null);
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
set optimizer_switch='extended_keys=on';
explain select a from t1 where b is null order by a desc limit 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index b PRIMARY 8 NULL 3 Using where
+1 SIMPLE t1 index b PRIMARY 8 NULL 2 Using where
select a from t1 where b is null order by a desc limit 2;
a
3
@@ -1172,7 +1185,7 @@ EXPLAIN
"key_length": "3070",
"used_key_parts": ["f2", "pk1"],
"rows": 1,
- "filtered": 100,
+ "filtered": 50,
"index_condition": "t1.pk1 <= 5 and t1.pk2 <= 5 and t1.f2 = 'abc'",
"attached_condition": "t1.f1 <= '3'"
}
@@ -1202,10 +1215,13 @@ EXPLAIN
"key_length": "3011",
"used_key_parts": ["pk1", "f2", "pk2"],
"rows": 1,
- "filtered": 100,
+ "filtered": 50,
"index_condition": "t1.f2 <= 5 and t1.pk2 <= 5 and t1.pk1 = 'abc'",
"attached_condition": "t1.f1 <= '3'"
}
}
}
drop table t1;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/innodb_ext_key.test b/mysql-test/main/innodb_ext_key.test
index 4104ac5f787..d2f426662b4 100644
--- a/mysql-test/main/innodb_ext_key.test
+++ b/mysql-test/main/innodb_ext_key.test
@@ -7,6 +7,13 @@ DROP DATABASE IF EXISTS dbt3_s001;
SET SESSION STORAGE_ENGINE='InnoDB';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
CREATE DATABASE dbt3_s001;
use dbt3_s001;
@@ -490,6 +497,7 @@ select
from t1 A, t1 B;
set optimizer_switch='extended_keys=off';
+--replace_column 9 #
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
flush status;
@@ -497,6 +505,7 @@ select * from t1, t2 where t2.a=t1.a and t2.b < 2;
show status like 'handler_read%';
set optimizer_switch='extended_keys=on';
+--replace_column 9 #
explain
select * from t1, t2 where t2.a=t1.a and t2.b < 2;
flush status;
@@ -674,6 +683,8 @@ create table t2 (a bigint not null unique auto_increment, b varchar(10), primary
insert into t1 (b) values (null), (null), (null);
insert into t2 (b) values (null), (null), (null);
+analyze table t1,t2;
+
set optimizer_switch='extended_keys=on';
explain select a from t1 where b is null order by a desc limit 2;
select a from t1 where b is null order by a desc limit 2;
@@ -822,3 +833,7 @@ INSERT INTO t1 VALUES (1,2,'2','abc'),(2,3,'3','def');
explain format= json
select * from t1 force index(k1) where f2 <= 5 and pk2 <=5 and pk1 = 'abc' and f1 <= '3';
drop table t1;
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/innodb_icp.result b/mysql-test/main/innodb_icp.result
index 9ba98ba5b13..07d317925cd 100644
--- a/mysql-test/main/innodb_icp.result
+++ b/mysql-test/main/innodb_icp.result
@@ -1,5 +1,10 @@
set @save_storage_engine= @@storage_engine;
set storage_engine=InnoDB;
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set @innodb_icp_tmp=@@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
#
@@ -590,6 +595,12 @@ i1 INTEGER NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t2 VALUES (4,1);
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT t1.d1, t2.pk, t2.i1 FROM t1 STRAIGHT_JOIN t2 ON t2.i1
WHERE t2.pk <> t1.d1 AND t2.pk = 4;
@@ -679,7 +690,7 @@ EXPLAIN
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using filesort
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using filesort
1 SIMPLE t2 ref a a 515 test.t1.a 1 Using where
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
HAVING t1.c != 5 ORDER BY t1.c;
@@ -690,7 +701,7 @@ EXPLAIN
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
HAVING t1.c != 5 ORDER BY t1.c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using filesort
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using filesort
1 SIMPLE t2 ref a a 515 test.t1.a 1 Using where
SELECT t1.b, t1.c FROM t1, t2 WHERE t1.a = t2.a AND t1.b != 0
HAVING t1.c != 5 ORDER BY t1.c;
@@ -795,6 +806,12 @@ INSERT INTO t2 (g,h) VALUES
(0,'p'),(0,'f'),(0,'p'),(7,'d'),(7,'f'),(5,'j'),
(3,'e'),(1,'u'),(4,'v'),(9,'u'),(6,'i'),(1,'x'),
(7,'f'),(5,'j'),(3,'e'),(1,'u'),(4,'v'),(9,'u');
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
EXPLAIN
@@ -804,7 +821,7 @@ AND (EXISTS (SELECT * FROM t1, t2 WHERE a = f AND h <= t.e AND a > t.b)
OR a = 0 AND h < 'z' );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL PRIMARY,c NULL NULL NULL 64 Using where
-1 PRIMARY t2 ref g g 5 test.t.c 9 Using where
+1 PRIMARY t2 ref g g 5 test.t.c 18 Using where
2 DEPENDENT SUBQUERY t1 index PRIMARY d 3 NULL 64 Using where; Using index
2 DEPENDENT SUBQUERY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 Using where
SELECT COUNT(*) FROM t1 AS t, t2
@@ -947,3 +964,6 @@ pk key1 key2
drop table t1, t2;
set optimizer_switch=@innodb_icp_tmp;
set storage_engine= @save_storage_engine;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/innodb_icp.test b/mysql-test/main/innodb_icp.test
index acb8238e01f..d192ce044df 100644
--- a/mysql-test/main/innodb_icp.test
+++ b/mysql-test/main/innodb_icp.test
@@ -7,6 +7,13 @@
set @save_storage_engine= @@storage_engine;
set storage_engine=InnoDB;
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
set @innodb_icp_tmp=@@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
@@ -76,3 +83,7 @@ drop table t1, t2;
set optimizer_switch=@innodb_icp_tmp;
set storage_engine= @save_storage_engine;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
+
diff --git a/mysql-test/main/innodb_mysql_lock2.result b/mysql-test/main/innodb_mysql_lock2.result
index df97b32a41c..608cbec88c5 100644
--- a/mysql-test/main/innodb_mysql_lock2.result
+++ b/mysql-test/main/innodb_mysql_lock2.result
@@ -57,6 +57,8 @@ declare j int;
select i from t1 where i = 1 into j;
return j;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f2() returns int
begin
declare k int;
@@ -64,6 +66,8 @@ select i from t1 where i = 1 into k;
insert into t2 values (k + 5);
return 0;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f3() returns int
begin
return (select i from t1 where i = 3);
@@ -87,12 +91,16 @@ declare k int;
select i from v1 where i = 1 into k;
return k;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f7() returns int
begin
declare k int;
select j from v2 where j = 1 into k;
return k;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f8() returns int
begin
declare k int;
@@ -100,6 +108,8 @@ select i from v1 where i = 1 into k;
insert into t2 values (k+5);
return k;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f9() returns int
begin
update v2 set j=j+10 where j=1;
@@ -129,6 +139,8 @@ create procedure p2(inout p int)
begin
select i from t1 where i = 1 into p;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f14() returns int
begin
declare k int;
@@ -148,6 +160,8 @@ declare k int;
select i from t1 where i=1 into k;
set new.l= k+1;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create trigger t4_bu before update on t4 for each row
begin
if (select i from t1 where i=1) then
diff --git a/mysql-test/main/intersect.result b/mysql-test/main/intersect.result
index 66c7addfd36..bd88243f151 100644
--- a/mysql-test/main/intersect.result
+++ b/mysql-test/main/intersect.result
@@ -37,7 +37,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
4 INTERSECT t3 ALL NULL NULL NULL NULL 3 100.00
NULL INTERSECT RESULT <intersect2,3,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b` from ((/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) intersect (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#4 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `a`
+Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b` from (/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` intersect (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#4 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `a`
EXPLAIN format=json (select a,b from t1) intersect (select c,d from t2) intersect (select e,f from t3);
EXPLAIN
{
@@ -278,7 +278,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 INTERSECT t3 ALL NULL NULL NULL NULL 3 100.00 Using join buffer (flat, BNL join)
NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b` from ((/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) intersect (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t3`.`e` AS `e` from `test`.`t2` join `test`.`t3`)) `a`
+Note 1003 /* select#1 */ select `a`.`a` AS `a`,`a`.`b` AS `b` from (/* select#2 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` intersect (/* select#3 */ select `test`.`t2`.`c` AS `c`,`test`.`t3`.`e` AS `e` from `test`.`t2` join `test`.`t3`)) `a`
EXPLAIN format=json (select a,b from t1) intersect (select c,e from t2,t3);
EXPLAIN
{
@@ -316,7 +316,7 @@ EXPLAIN
"filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL"
}
}
@@ -380,7 +380,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"r_filtered": 100
}
@@ -459,7 +459,7 @@ ANALYZE
"r_filtered": 100
},
"buffer_type": "flat",
- "buffer_size": "256Kb",
+ "buffer_size": "65",
"join_type": "BNL",
"r_filtered": 100
}
@@ -497,7 +497,7 @@ a
1
1
(select 1 from dual into @v) intersect (select 1 from dual);
-ERROR HY000: Incorrect usage of INTERSECT and INTO
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @v) intersect (select 1 from dual)' at line 1
select 1 from dual ORDER BY 1 intersect select 1 from dual;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'intersect select 1 from dual' at line 1
select 1 as a from dual union all select 1 from dual;
@@ -505,7 +505,7 @@ a
1
1
select 1 from dual intersect all select 1 from dual;
-ERROR HY000: Incorrect usage of INTERSECT and ALL
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'all select 1 from dual' at line 1
create table t1 (a int, b blob, a1 int, b1 blob);
create table t2 (c int, d blob, c1 int, d1 blob);
insert into t1 values (1,"ddd", 1, "sdfrrwwww"),(2, "fgh", 2, "dffggtt");
@@ -599,14 +599,14 @@ explain extended
(select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 100.00
-3 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
+5 UNION <derived2> ALL NULL NULL NULL NULL 2 100.00
2 DERIVED t2 ALL NULL NULL NULL NULL 2 100.00
-4 INTERSECT t3 ALL NULL NULL NULL NULL 2 100.00
-NULL INTERSECT RESULT <intersect2,4> ALL NULL NULL NULL NULL NULL NULL
-5 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-NULL UNION RESULT <union1,3,5> ALL NULL NULL NULL NULL NULL NULL
+3 INTERSECT t3 ALL NULL NULL NULL NULL 2 100.00
+NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL NULL
+4 UNION NULL NULL NULL NULL NULL NULL NULL NULL No tables used
+NULL UNION RESULT <union1,5,4> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 (/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union /* select#3 */ select `__3`.`c` AS `c`,`__3`.`d` AS `d` from ((/* select#2 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#4 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__3` union (/* select#5 */ select 4 AS `4`,4 AS `4`)
+Note 1003 (/* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union /* select#5 */ select `__5`.`c` AS `c`,`__5`.`d` AS `d` from ((/* select#2 */ select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (/* select#3 */ select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__5` union (/* select#4 */ select 4 AS `4`,4 AS `4`)
set SQL_MODE=ORACLE;
(select a,b from t1) union (select c,d from t2) intersect (select e,f from t3) union (select 4,4);
a b
@@ -720,7 +720,7 @@ a b
drop procedure p1;
show create view v1;
View Create View character_set_client collation_connection
-v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS (select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union select `__3`.`c` AS `c`,`__3`.`d` AS `d` from ((select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2`) intersect (select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__3` union (select 4 AS `4`,4 AS `4`) latin1 latin1_swedish_ci
+v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS (select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1`) union select `__6`.`c` AS `c`,`__6`.`d` AS `d` from (select `test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` intersect (select `test`.`t3`.`e` AS `e`,`test`.`t3`.`f` AS `f` from `test`.`t3`)) `__6` union (select 4 AS `4`,4 AS `4`) latin1 latin1_swedish_ci
drop view v1;
drop tables t1,t2,t3;
#
@@ -833,3 +833,36 @@ c1
3
drop table t12,t13,t234;
# End of 10.3 tests
+#
+# MDEV-18701: Wrong result from query that uses INTERSECT after UNION ALL
+#
+create table t1 (a int);
+insert into t1 values (3), (1), (7), (3), (2), (7), (4);
+create table t2 (a int);
+insert into t2 values (4), (5), (9), (1), (8), (9);
+create table t3 (a int);
+insert into t3 values (8), (1), (8), (2), (3), (7), (2);
+select * from t1 where a > 4
+union all
+select * from t2 where a < 5
+intersect
+select * from t3 where a < 5;
+a
+7
+7
+1
+explain extended
+select * from t1 where a > 4
+union all
+select * from t2 where a < 5
+intersect
+select * from t3 where a < 5;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 7 100.00 Using where
+4 UNION <derived2> ALL NULL NULL NULL NULL 6 100.00
+2 DERIVED t2 ALL NULL NULL NULL NULL 6 100.00 Using where
+3 INTERSECT t3 ALL NULL NULL NULL NULL 7 100.00 Using where
+NULL INTERSECT RESULT <intersect2,3> ALL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 4 union all /* select#4 */ select `__4`.`a` AS `a` from (/* select#2 */ select `test`.`t2`.`a` AS `a` from `test`.`t2` where `test`.`t2`.`a` < 5 intersect /* select#3 */ select `test`.`t3`.`a` AS `a` from `test`.`t3` where `test`.`t3`.`a` < 5) `__4`
+drop table t1,t2,t3;
diff --git a/mysql-test/main/intersect.test b/mysql-test/main/intersect.test
index fb5e991a24c..616a833ea3c 100644
--- a/mysql-test/main/intersect.test
+++ b/mysql-test/main/intersect.test
@@ -59,13 +59,13 @@ drop tables t1,t2,t3;
select 1 as a from dual intersect select 1 from dual;
(select 1 from dual) intersect (select 1 from dual);
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(select 1 from dual into @v) intersect (select 1 from dual);
--error ER_PARSE_ERROR
select 1 from dual ORDER BY 1 intersect select 1 from dual;
select 1 as a from dual union all select 1 from dual;
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
select 1 from dual intersect all select 1 from dual;
@@ -321,3 +321,30 @@ select * from t13 union select * from t234 intersect select * from t12;
drop table t12,t13,t234;
--echo # End of 10.3 tests
+
+--echo #
+--echo # MDEV-18701: Wrong result from query that uses INTERSECT after UNION ALL
+--echo #
+
+create table t1 (a int);
+insert into t1 values (3), (1), (7), (3), (2), (7), (4);
+create table t2 (a int);
+insert into t2 values (4), (5), (9), (1), (8), (9);
+create table t3 (a int);
+insert into t3 values (8), (1), (8), (2), (3), (7), (2);
+
+
+select * from t1 where a > 4
+union all
+select * from t2 where a < 5
+intersect
+select * from t3 where a < 5;
+
+explain extended
+select * from t1 where a > 4
+union all
+select * from t2 where a < 5
+intersect
+select * from t3 where a < 5;
+
+drop table t1,t2,t3;
diff --git a/mysql-test/main/invisible_field.result b/mysql-test/main/invisible_field.result
index 876a80814e5..36e62645ef2 100644
--- a/mysql-test/main/invisible_field.result
+++ b/mysql-test/main/invisible_field.result
@@ -404,8 +404,8 @@ b int(11) YES NULL
c int(11) YES NULL
explain select * from t1,t2 where t1.b = t2.c and t1.c = t2.b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 10
-1 SIMPLE t1 ALL b,c NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ALL b,c NULL NULL NULL 10
+1 SIMPLE t2 ALL NULL NULL NULL NULL 10 Using where; Using join buffer (flat, BNL join)
select * from t1,t2 where t1.b = t2.c and t1.c = t2.b;
a a b c
1 1 1 1
@@ -559,6 +559,8 @@ DROP TABLE t1;
create or replace table t1 (a int, b int invisible);
insert into t1 values (1),(2);
select * from t1 into outfile 'f';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
load data infile 'f' into table t1;
select a,b from t1;
a b
@@ -589,6 +591,8 @@ a b
truncate table t1;
insert into t1(a,b) values (1,1),(2,2);
select a,b from t1 into outfile 'a';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
load data infile 'a' into table t1(a,b);
select a,b from t1;
a b
diff --git a/mysql-test/main/invisible_field_debug.result b/mysql-test/main/invisible_field_debug.result
index 0ea8ab12de8..32eb3a274d8 100644
--- a/mysql-test/main/invisible_field_debug.result
+++ b/mysql-test/main/invisible_field_debug.result
@@ -331,6 +331,9 @@ insert into t1 values(1,1,1);
insert into t1 values(2,2,2);
insert into t1 values(3,3,3);
insert into t1 values(4,4,4);
+insert into t1 values(5,5,5);
+insert into t1 values(6,6,6);
+insert into t1 values(7,7,7);
set debug_dbug= "+d,test_completely_invisible,test_invisible_index";
select invisible, a ,b from t1 order by b;
invisible a b
@@ -338,9 +341,12 @@ invisible a b
9 2 2
9 3 3
9 4 4
+9 5 5
+9 6 6
+9 7 7
explain select * from t1 where invisible =9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref invisible invisible 5 const 3
+1 SIMPLE t1 ALL invisible NULL NULL NULL 7 Using where
alter table t1 add x int default 3;
select invisible, a ,b from t1;
invisible a b
@@ -348,6 +354,9 @@ invisible a b
9 2 2
9 3 3
9 4 4
+9 5 5
+9 6 6
+9 7 7
set debug_dbug=@old_debug;
Show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -359,11 +368,11 @@ drop index invisible on t1;
ERROR 42000: Can't DROP INDEX `invisible`; check that it exists
explain select * from t1 where invisible =9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref invisible invisible 5 const 3
+1 SIMPLE t1 ALL invisible NULL NULL NULL 7 Using where
create index invisible on t1(c);
explain select * from t1 where invisible =9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref invisible_2 invisible_2 5 const 3
+1 SIMPLE t1 ALL invisible_2 NULL NULL NULL 7 Using where
show indexes in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 1 b 1 b A NULL NULL NULL YES BTREE
diff --git a/mysql-test/main/invisible_field_debug.test b/mysql-test/main/invisible_field_debug.test
index 86252512386..77e65cf7a6b 100644
--- a/mysql-test/main/invisible_field_debug.test
+++ b/mysql-test/main/invisible_field_debug.test
@@ -251,6 +251,9 @@ insert into t1 values(1,1,1);
insert into t1 values(2,2,2);
insert into t1 values(3,3,3);
insert into t1 values(4,4,4);
+insert into t1 values(5,5,5);
+insert into t1 values(6,6,6);
+insert into t1 values(7,7,7);
set debug_dbug= "+d,test_completely_invisible,test_invisible_index";
select invisible, a ,b from t1 order by b;
explain select * from t1 where invisible =9;
diff --git a/mysql-test/main/join.result b/mysql-test/main/join.result
index cc8e174c8e6..47c3e78116f 100644
--- a/mysql-test/main/join.result
+++ b/mysql-test/main/join.result
@@ -260,8 +260,8 @@ PRIMARY KEY (id)
INSERT INTO t2 VALUES (1,'s1'),(2,'s2'),(3,'s3'),(4,'s4'),(5,'s5');
select t1.*, t2.* from t1, t2 where t2.id=t1.t2_id limit 2;
t1_id t2_id type cost_unit min_value max_value t3_id item_id id name
-22 1 Percent Cost 100 -1 6 291 1 s1
-23 1 Percent Cost 100 -1 21 291 1 s1
+12 5 Percent Cost -1 0 -1 -1 5 s5
+14 4 Percent Cost -1 0 -1 -1 4 s4
drop table t1,t2;
CREATE TABLE t1 (
siteid varchar(25) NOT NULL default '',
@@ -755,10 +755,10 @@ ERROR 42S22: Unknown column 't1.b' in 'on clause'
select
statistics.TABLE_NAME, statistics.COLUMN_NAME, statistics.TABLE_CATALOG, statistics.TABLE_SCHEMA, statistics.NON_UNIQUE, statistics.INDEX_SCHEMA, statistics.INDEX_NAME, statistics.SEQ_IN_INDEX, statistics.COLLATION, statistics.SUB_PART, statistics.PACKED, statistics.NULLABLE, statistics.INDEX_TYPE, statistics.COMMENT,
columns.TABLE_CATALOG, columns.TABLE_SCHEMA, columns.COLUMN_DEFAULT, columns.IS_NULLABLE, columns.DATA_TYPE, columns.CHARACTER_MAXIMUM_LENGTH, columns.CHARACTER_OCTET_LENGTH, columns.NUMERIC_PRECISION, columns.NUMERIC_SCALE, columns.CHARACTER_SET_NAME, columns.COLLATION_NAME, columns.COLUMN_TYPE, columns.COLUMN_KEY, columns.EXTRA, columns.COLUMN_COMMENT
-from information_schema.statistics join information_schema.columns using(table_name,column_name) where table_name='user';
+from information_schema.statistics join information_schema.columns using(table_name,column_name) where table_name='global_priv';
TABLE_NAME COLUMN_NAME TABLE_CATALOG TABLE_SCHEMA NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLLATION SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT TABLE_CATALOG TABLE_SCHEMA COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA COLUMN_COMMENT
-user Host def mysql 0 mysql PRIMARY 1 A NULL NULL BTREE def mysql '' NO char 60 180 NULL NULL utf8 utf8_bin char(60) PRI
-user User def mysql 0 mysql PRIMARY 2 A NULL NULL BTREE def mysql '' NO char 80 240 NULL NULL utf8 utf8_bin char(80) PRI
+global_priv Host def mysql 0 mysql PRIMARY 1 A NULL NULL BTREE def mysql '' NO char 60 180 NULL NULL utf8 utf8_bin char(60) PRI
+global_priv User def mysql 0 mysql PRIMARY 2 A NULL NULL BTREE def mysql '' NO char 80 240 NULL NULL utf8 utf8_bin char(80) PRI
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
Warning 1286 Unknown storage engine 'InnoDB'
@@ -1268,13 +1268,19 @@ id select_type table type possible_keys key key_len ref rows Extra
INSERT INTO t1 VALUES (3,'b'),(4,NULL),(5,'c'),(6,'cc'),(7,'d'),
(8,'dd'),(9,'e'),(10,'ee');
INSERT INTO t2 VALUES (2,NULL);
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
FLUSH STATUS;
SELECT * FROM t1 JOIN t2 ON t1.v = t2.v WHERE t2.v IS NULL ORDER BY 1;
pk v pk v
SHOW STATUS LIKE 'Handler_read_%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 1
+Handler_read_key 14
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -1484,7 +1490,7 @@ DROP TABLE t1,t2,t3,t4,t5;
# MDEV-4752: Segfault during parsing of illegal query
#
SELECT * FROM t5 JOIN (t1 JOIN t2 UNION SELECT * FROM t3 JOIN t4);
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT * FROM t3 JOIN t4)' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT * FROM t3 JOIN t4)' at line 1
#
# MDEV-4959: join of const table with NULL fields
#
diff --git a/mysql-test/main/join.test b/mysql-test/main/join.test
index 3d2a02e2346..6b0481d859b 100644
--- a/mysql-test/main/join.test
+++ b/mysql-test/main/join.test
@@ -555,7 +555,7 @@ select * from v1a join v1b on t1.b = t2.b;
select
statistics.TABLE_NAME, statistics.COLUMN_NAME, statistics.TABLE_CATALOG, statistics.TABLE_SCHEMA, statistics.NON_UNIQUE, statistics.INDEX_SCHEMA, statistics.INDEX_NAME, statistics.SEQ_IN_INDEX, statistics.COLLATION, statistics.SUB_PART, statistics.PACKED, statistics.NULLABLE, statistics.INDEX_TYPE, statistics.COMMENT,
columns.TABLE_CATALOG, columns.TABLE_SCHEMA, columns.COLUMN_DEFAULT, columns.IS_NULLABLE, columns.DATA_TYPE, columns.CHARACTER_MAXIMUM_LENGTH, columns.CHARACTER_OCTET_LENGTH, columns.NUMERIC_PRECISION, columns.NUMERIC_SCALE, columns.CHARACTER_SET_NAME, columns.COLLATION_NAME, columns.COLUMN_TYPE, columns.COLUMN_KEY, columns.EXTRA, columns.COLUMN_COMMENT
- from information_schema.statistics join information_schema.columns using(table_name,column_name) where table_name='user';
+ from information_schema.statistics join information_schema.columns using(table_name,column_name) where table_name='global_priv';
drop table t1;
drop table t2;
@@ -955,6 +955,7 @@ EXPLAIN SELECT * FROM t1 JOIN t2 ON t1.v = t2.v;
INSERT INTO t1 VALUES (3,'b'),(4,NULL),(5,'c'),(6,'cc'),(7,'d'),
(8,'dd'),(9,'e'),(10,'ee');
INSERT INTO t2 VALUES (2,NULL);
+ANALYZE TABLE t1,t2;
FLUSH STATUS;
SELECT * FROM t1 JOIN t2 ON t1.v = t2.v WHERE t2.v IS NULL ORDER BY 1;
SHOW STATUS LIKE 'Handler_read_%';
diff --git a/mysql-test/main/join_cache.result b/mysql-test/main/join_cache.result
index 23396d22876..999ed500a84 100644
--- a/mysql-test/main/join_cache.result
+++ b/mysql-test/main/join_cache.result
@@ -6,6 +6,11 @@ set optimizer_switch='semijoin=on,firstmatch=on,loosescan=on';
set @@optimizer_switch='semijoin_with_cache=on';
set @@optimizer_switch='outer_join_with_cache=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set @local_join_cache_test_optimizer_switch_default=@@optimizer_switch;
set names utf8;
CREATE DATABASE world;
@@ -79,9 +84,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
+1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where
+1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where; Using join buffer (flat, BNL join)
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -157,9 +162,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
-1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where
+1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (incremental, BNL join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -236,8 +241,8 @@ CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
-1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -314,8 +319,8 @@ CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
-1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (incremental, BNLH join)
+1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (incremental, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -402,7 +407,7 @@ ON City.Country=Country.Code AND City.Population > 5000000
WHERE Country.Name LIKE 'C%' AND Country.Population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
-1 SIMPLE City hash_range City_Population #hash#$hj:City_Population 3:4 world.Country.Code 25 Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
+1 SIMPLE City hash_range City_Population #hash#$hj:City_Population 3:4 world.Country.Code 24 Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
SELECT Country.Name, Country.Population, City.Name, City.Population
FROM Country LEFT JOIN City
ON City.Country=Country.Code AND City.Population > 5000000
@@ -497,9 +502,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
+1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where; Using join buffer (flat, BNL join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -575,9 +580,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
+1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where
1 SIMPLE CountryLanguage ALL NULL NULL NULL NULL 984 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where; Using join buffer (incremental, BNL join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -653,9 +658,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
-1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE Country hash_ALL NULL #hash#$hj 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -731,9 +736,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE Country ALL NULL NULL NULL NULL 239 Using where
-1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.Country.Code 984 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL NULL #hash#$hj 3 world.Country.Code 4079 Using where; Using join buffer (incremental, BNLH join)
+1 SIMPLE City ALL NULL NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage hash_ALL NULL #hash#$hj 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE Country hash_ALL NULL #hash#$hj 3 world.City.Country 239 Using where; Using join buffer (incremental, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -848,9 +853,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.CountryLanguage.Country 239 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL Country #hash#Country 3 world.CountryLanguage.Country 4079 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1048,9 +1053,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.CountryLanguage.Country 239 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL Country #hash#Country 3 world.CountryLanguage.Country 4079 Using where; Using join buffer (incremental, BNLH join)
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (incremental, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1278,7 +1283,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -1307,9 +1312,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1318,41 +1323,41 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
Name Name Language
-La Matanza Argentina Spanish
Lomas de Zamora Argentina Spanish
+La Matanza Argentina Spanish
Lauro de Freitas Brazil Portuguese
Los Angeles Chile Spanish
Las Palmas de Gran Canaria Spain Spanish
-L´Hospitalet de Llobregat Spain Spanish
Lleida (Lérida) Spain Spanish
+L´Hospitalet de Llobregat Spain Spanish
Liupanshui China Chinese
-Lianyungang China Chinese
Liangcheng China Chinese
+Lianyungang China Chinese
Lengshuijiang China Chinese
Lázaro Cárdenas Mexico Spanish
Lagos de Moreno Mexico Spanish
Las Margaritas Mexico Spanish
Lashio (Lasho) Myanmar Burmese
Lalitapur Nepal Nepali
-Ludwigshafen am Rhein Germany German
Leverkusen Germany German
+Ludwigshafen am Rhein Germany German
Luchou Taiwan Min
Lungtan Taiwan Min
Lower Hutt New Zealand English
Los Teques Venezuela Spanish
Leninsk-Kuznetski Russian Federation Russian
-Los Angeles United States English
Long Beach United States English
Lexington-Fayette United States English
Louisville United States English
Little Rock United States English
+Los Angeles United States English
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -1475,7 +1480,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -1504,9 +1509,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1515,41 +1520,41 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
Name Name Language
-La Matanza Argentina Spanish
Lomas de Zamora Argentina Spanish
+La Matanza Argentina Spanish
Lauro de Freitas Brazil Portuguese
Los Angeles Chile Spanish
Las Palmas de Gran Canaria Spain Spanish
-L´Hospitalet de Llobregat Spain Spanish
Lleida (Lérida) Spain Spanish
+L´Hospitalet de Llobregat Spain Spanish
Liupanshui China Chinese
-Lianyungang China Chinese
Liangcheng China Chinese
Lengshuijiang China Chinese
-Lázaro Cárdenas Mexico Spanish
-Lagos de Moreno Mexico Spanish
+Lianyungang China Chinese
Las Margaritas Mexico Spanish
+Lagos de Moreno Mexico Spanish
+Lázaro Cárdenas Mexico Spanish
Lashio (Lasho) Myanmar Burmese
Lalitapur Nepal Nepali
-Ludwigshafen am Rhein Germany German
Leverkusen Germany German
+Ludwigshafen am Rhein Germany German
Luchou Taiwan Min
Lungtan Taiwan Min
Lower Hutt New Zealand English
Los Teques Venezuela Spanish
Leninsk-Kuznetski Russian Federation Russian
-Los Angeles United States English
-Long Beach United States English
-Lexington-Fayette United States English
-Louisville United States English
Little Rock United States English
+Louisville United States English
+Lexington-Fayette United States English
+Long Beach United States English
+Los Angeles United States English
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -1672,7 +1677,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -1701,9 +1706,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1716,37 +1721,37 @@ La Matanza Argentina Spanish
Lomas de Zamora Argentina Spanish
Lauro de Freitas Brazil Portuguese
Los Angeles Chile Spanish
+Lleida (Lérida) Spain Spanish
Las Palmas de Gran Canaria Spain Spanish
L´Hospitalet de Llobregat Spain Spanish
-Lleida (Lérida) Spain Spanish
-Liupanshui China Chinese
Lianyungang China Chinese
Liangcheng China Chinese
Lengshuijiang China Chinese
-Lázaro Cárdenas Mexico Spanish
+Liupanshui China Chinese
Lagos de Moreno Mexico Spanish
Las Margaritas Mexico Spanish
+Lázaro Cárdenas Mexico Spanish
Lashio (Lasho) Myanmar Burmese
Lalitapur Nepal Nepali
-Ludwigshafen am Rhein Germany German
Leverkusen Germany German
+Ludwigshafen am Rhein Germany German
Luchou Taiwan Min
Lungtan Taiwan Min
Lower Hutt New Zealand English
Los Teques Venezuela Spanish
Leninsk-Kuznetski Russian Federation Russian
-Los Angeles United States English
-Long Beach United States English
Lexington-Fayette United States English
Louisville United States English
+Los Angeles United States English
Little Rock United States English
+Long Beach United States English
EXPLAIN
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -1869,7 +1874,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -1898,9 +1903,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -1913,28 +1918,28 @@ La Matanza Argentina Spanish
Lomas de Zamora Argentina Spanish
Lauro de Freitas Brazil Portuguese
Los Angeles Chile Spanish
+Lleida (Lérida) Spain Spanish
Las Palmas de Gran Canaria Spain Spanish
L´Hospitalet de Llobregat Spain Spanish
-Lleida (Lérida) Spain Spanish
-Liupanshui China Chinese
Lianyungang China Chinese
Liangcheng China Chinese
Lengshuijiang China Chinese
-Lázaro Cárdenas Mexico Spanish
+Liupanshui China Chinese
Lagos de Moreno Mexico Spanish
+Lázaro Cárdenas Mexico Spanish
Las Margaritas Mexico Spanish
Lashio (Lasho) Myanmar Burmese
Lalitapur Nepal Nepali
-Ludwigshafen am Rhein Germany German
Leverkusen Germany German
+Ludwigshafen am Rhein Germany German
Luchou Taiwan Min
Lungtan Taiwan Min
Lower Hutt New Zealand English
Los Teques Venezuela Spanish
Leninsk-Kuznetski Russian Federation Russian
+Lexington-Fayette United States English
Los Angeles United States English
Long Beach United States English
-Lexington-Fayette United States English
Louisville United States English
Little Rock United States English
EXPLAIN
@@ -1943,7 +1948,7 @@ WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AN
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -2099,9 +2104,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.CountryLanguage.Country 239 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL Country #hash#Country 3 world.CountryLanguage.Country 4079 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (flat, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -2203,9 +2208,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.CountryLanguage.Country 239 Using where; Using join buffer (flat, BNLH join)
-1 SIMPLE City hash_ALL Country #hash#Country 3 world.CountryLanguage.Country 4079 Using where; Using join buffer (incremental, BNLH join)
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage hash_ALL PRIMARY,Percentage #hash#PRIMARY 3 world.City.Country 984 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE Country hash_ALL PRIMARY #hash#PRIMARY 3 world.City.Country 239 Using where; Using join buffer (incremental, BNLH join)
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -2278,7 +2283,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -2307,9 +2312,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -2352,7 +2357,7 @@ WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AN
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -2382,7 +2387,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -2411,9 +2416,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -2456,7 +2461,7 @@ WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AN
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -2486,7 +2491,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -2515,9 +2520,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -2534,12 +2539,12 @@ Las Palmas de Gran Canaria Spain Spanish
L´Hospitalet de Llobregat Spain Spanish
Lleida (Lérida) Spain Spanish
Liupanshui China Chinese
-Lianyungang China Chinese
Liangcheng China Chinese
+Lianyungang China Chinese
Lengshuijiang China Chinese
Lázaro Cárdenas Mexico Spanish
-Lagos de Moreno Mexico Spanish
Las Margaritas Mexico Spanish
+Lagos de Moreno Mexico Spanish
Lashio (Lasho) Myanmar Burmese
Lalitapur Nepal Nepali
Ludwigshafen am Rhein Germany German
@@ -2549,9 +2554,9 @@ Lungtan Taiwan Min
Lower Hutt New Zealand English
Los Teques Venezuela Spanish
Leninsk-Kuznetski Russian Federation Russian
+Lexington-Fayette United States English
Los Angeles United States English
Long Beach United States English
-Lexington-Fayette United States English
Louisville United States English
Little Rock United States English
EXPLAIN
@@ -2560,7 +2565,7 @@ WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AN
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -2590,7 +2595,7 @@ WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 SIMPLE City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name FROM City,Country
WHERE City.Country=Country.Code AND
Country.Name LIKE 'L%' AND City.Population > 100000;
@@ -2619,9 +2624,9 @@ City.Name LIKE 'L%' AND Country.Population > 3000000 AND
CountryLanguage.Percentage > 50 AND
LENGTH(Language) < LENGTH(City.Name) - 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CountryLanguage ALL PRIMARY,Percentage NULL NULL NULL 984 Using where
-1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
-1 SIMPLE City ref Country Country 3 world.CountryLanguage.Country 18 Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE City ALL Country NULL NULL NULL 4079 Using where
+1 SIMPLE CountryLanguage ref PRIMARY,Percentage PRIMARY 3 world.City.Country 4 Using index condition(BKA); Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 SIMPLE Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using join buffer (incremental, BKAH join); Key-ordered Rowid-ordered scan
SELECT City.Name, Country.Name, CountryLanguage.Language
FROM City,Country,CountryLanguage
WHERE City.Country=Country.Code AND
@@ -2664,7 +2669,7 @@ WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AN
City.Population > 100000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range PRIMARY,Name Name 52 NULL 10 Using index condition; Rowid-ordered scan
-1 PRIMARY City ref Population,Country Country 3 world.Country.Code 18 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
+1 PRIMARY City ref Population,Country Country 3 world.Country.Code 17 Using where; Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT Name FROM City
WHERE City.Country IN (SELECT Code FROM Country WHERE Country.Name LIKE 'L%') AND
City.Population > 100000;
@@ -3037,13 +3042,13 @@ t6.formattypeid IN (2) AND (t3.formatid IN (31, 8, 76)) AND
t1.metaid = t2.metaid AND t1.affiliateid = '2';
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t6 system PRIMARY NULL NULL NULL 1
-1 SIMPLE t1 ref t1_affiliateid,t1_metaid t1_affiliateid 4 const 1
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.metaid 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t7 ref PRIMARY PRIMARY 4 test.t1.metaid 1 Using index
-1 SIMPLE t8 eq_ref PRIMARY PRIMARY 4 test.t7.artistid 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t5 ref PRIMARY,t5_formattypeid t5_formattypeid 4 const 1
+1 SIMPLE t1 ref t1_affiliateid,t1_metaid t1_affiliateid 4 const 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.metaid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t3 ref t3_metaid,t3_formatid,t3_metaidformatid t3_metaidformatid 4 test.t1.metaid 1 Using index condition; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t4 eq_ref PRIMARY,t4_formatclassid,t4_formats_idx PRIMARY 4 test.t3.formatid 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t5 eq_ref PRIMARY,t5_formattypeid PRIMARY 4 test.t4.formatclassid 1 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t7 ref PRIMARY PRIMARY 4 test.t1.metaid 1 Using index
+1 SIMPLE t8 eq_ref PRIMARY PRIMARY 4 test.t7.artistid 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t9 index PRIMARY,t9_subgenreid,t9_metaid PRIMARY 8 NULL 2 Using where; Using index; Using join buffer (incremental, BNL join)
1 SIMPLE t10 eq_ref PRIMARY,t10_genreid PRIMARY 4 test.t9.subgenreid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t10.genreid 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
@@ -3160,7 +3165,7 @@ Warning 1292 Truncated incorrect join_buffer_size value: '32'
set join_cache_level=8;
EXPLAIN SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b >= 30;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL idx NULL NULL NULL 7 Using where
+1 SIMPLE t1 range idx idx 5 NULL 3 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t2 ref idx idx 5 test.t1.a 2 Using join buffer (flat, BKAH join); Key-ordered Rowid-ordered scan
SELECT * FROM t1,t2 WHERE t1.a=t2.a AND t1.b >= 30;
a b a b
@@ -3665,8 +3670,8 @@ where t2.b=t1.b and t3.d=t1.d and t4.c=t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where
1 SIMPLE t2 ref idx idx 5 test.t1.b 1
-1 SIMPLE t3 ref idx idx 5 test.t1.d 1
1 SIMPLE t4 ref idx idx 5 test.t1.c 1
+1 SIMPLE t3 ref idx idx 5 test.t1.d 1
select t1.a, t1.b, t1.c, t1.d, t2.e, t3.f, t4.g from t1,t2,t3,t4
where t2.b=t1.b and t3.d=t1.d and t4.c=t1.c;
a b c d e f g
@@ -3678,8 +3683,8 @@ where t2.b=t1.b and t3.d=t1.d and t4.c=t1.c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 7 Using where
1 SIMPLE t2 ref idx idx 5 test.t1.b 1 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t3 ref idx idx 5 test.t1.d 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t4 ref idx idx 5 test.t1.c 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 ref idx idx 5 test.t1.d 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
select t1.a, t1.b, t1.c, t1.d, t2.e, t3.f, t4.g from t1,t2,t3,t4
where t2.b=t1.b and t3.d=t1.d and t4.c=t1.c;
a b c d e f g
@@ -5095,7 +5100,7 @@ SET SESSION join_cache_level = 1;
EXPLAIN
SELECT * FROM t1,t2 WHERE t1.a < 3 and t2.a IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using index
1 SIMPLE t2 ref idx idx 5 const 4 Using index condition
SELECT * FROM t1,t2 WHERE t1.a < 3 and t2.a IS NULL;
a a b
@@ -5111,7 +5116,7 @@ SET SESSION join_cache_level = 4;
EXPLAIN
SELECT * FROM t1,t2 WHERE t1.a < 3 and t2.a IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using index
1 SIMPLE t2 hash_range idx #hash#idx:idx 5:5 const 4 Using index condition; Using where; Rowid-ordered scan; Using join buffer (flat, BNLH join)
SELECT * FROM t1,t2 WHERE t1.a < 3 and t2.a IS NULL;
a a b
@@ -5414,7 +5419,7 @@ ORDER BY t2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1
1 PRIMARY t3 system NULL NULL NULL NULL 1
-1 PRIMARY t2 range a,c a 5 NULL 1 Using index condition; Using where; Using filesort
+1 PRIMARY t2 range a,c a 5 NULL 2 Using index condition; Using where; Using filesort
1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary
SELECT * FROM t1,t2
WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND
@@ -5432,7 +5437,7 @@ ORDER BY t2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1 Using temporary; Using filesort
1 PRIMARY t3 system NULL NULL NULL NULL 1
-1 PRIMARY t2 range a,c a 5 NULL 1 Using index condition; Using where
+1 PRIMARY t2 range a,c a 5 NULL 2 Using index condition; Using where
1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary
SELECT * FROM t1,t2
WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND
@@ -5451,7 +5456,7 @@ ORDER BY t2.b;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system NULL NULL NULL NULL 1 Using temporary; Using filesort
1 PRIMARY t3 system NULL NULL NULL NULL 1
-1 PRIMARY t2 range a,c a 5 NULL 1 Using index condition; Using where
+1 PRIMARY t2 range a,c a 5 NULL 2 Using index condition; Using where
1 PRIMARY t4 ref c c 5 test.t2.c 2 Using where; Start temporary; End temporary
SELECT * FROM t1,t2
WHERE t2.c IN (SELECT c FROM t3,t4 WHERE t4.a < 10) AND
@@ -5934,6 +5939,10 @@ CREATE TABLE t2 (i1 int, v1 varchar(1), KEY v1 (v1,i1)) ENGINE=InnoDB;
INSERT INTO t2 VALUES
(NULL,'x'),(1,'x'),(3,'x'),(5,'x'),(8,'x'),(48,'x'),
(228,'x'),(3,'y'),(1,'z'),(9,'z');
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+test.t2 analyze status OK
CREATE TABLE temp
SELECT t1.i1 AS f1, t1.v1 AS f2 FROM (t2 JOIN t1 ON (t1.v1 = t2.v1));
SELECT * FROM temp
@@ -6033,12 +6042,12 @@ drop table t1;
SET join_cache_level = 3;
# The following should have
# - table order PROFILING,user,
-# - table user accessed with hash_ALL:
+# - table db accessed with hash_ALL:
explain
-SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user WHERE password_expired = PAGE_FAULTS_MINOR;
+SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.db WHERE Select_priv = PAGE_FAULTS_MINOR;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL Using where
-1 SIMPLE user hash_ALL NULL #hash#$hj 1 information_schema.PROFILING.PAGE_FAULTS_MINOR 4 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE db hash_ALL NULL #hash#$hj 1 information_schema.PROFILING.PAGE_FAULTS_MINOR 2 Using where; Using join buffer (flat, BNLH join)
set join_cache_level=default;
create table t1 (c1 date not null, key (c1)) engine=innodb;
insert t1 values ('2017-12-27');
@@ -6050,3 +6059,6 @@ f2
drop table t1, t2;
set join_buffer_size = default;
set @@optimizer_switch=@save_optimizer_switch;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/join_cache.test b/mysql-test/main/join_cache.test
index df89fc30dee..b69be56a435 100644
--- a/mysql-test/main/join_cache.test
+++ b/mysql-test/main/join_cache.test
@@ -10,6 +10,13 @@ set @@optimizer_switch='semijoin_with_cache=on';
set @@optimizer_switch='outer_join_with_cache=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
set @local_join_cache_test_optimizer_switch_default=@@optimizer_switch;
set names utf8;
@@ -3886,6 +3893,8 @@ INSERT INTO t2 VALUES
(NULL,'x'),(1,'x'),(3,'x'),(5,'x'),(8,'x'),(48,'x'),
(228,'x'),(3,'y'),(1,'z'),(9,'z');
+ANALYZE TABLE t1,t2;
+
CREATE TABLE temp
SELECT t1.i1 AS f1, t1.v1 AS f2 FROM (t2 JOIN t1 ON (t1.v1 = t2.v1));
@@ -3970,9 +3979,9 @@ drop table t1;
SET join_cache_level = 3;
--echo # The following should have
--echo # - table order PROFILING,user,
---echo # - table user accessed with hash_ALL:
+--echo # - table db accessed with hash_ALL:
explain
-SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user WHERE password_expired = PAGE_FAULTS_MINOR;
+SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.db WHERE Select_priv = PAGE_FAULTS_MINOR;
set join_cache_level=default;
@@ -3990,3 +3999,7 @@ set join_buffer_size = default;
# The following command must be the last one the file
set @@optimizer_switch=@save_optimizer_switch;
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/join_nested.result b/mysql-test/main/join_nested.result
index 708c72fffb5..4db32c3fad3 100644
--- a/mysql-test/main/join_nested.result
+++ b/mysql-test/main/join_nested.result
@@ -1062,9 +1062,9 @@ t0.b=t1.b AND
(t8.b=t9.b OR t8.c IS NULL) AND
(t9.a=1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t0 ref idx_a idx_a 5 const 1 100.00 Using where
-1 SIMPLE t1 ref idx_b idx_b 5 test.t0.b 2 100.00
+1 SIMPLE t0 ref idx_a idx_a 5 const 2 100.00
1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ALL idx_b NULL NULL NULL 7 100.00 Using where; Using join buffer (flat, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where
1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where
@@ -1110,13 +1110,13 @@ t0.b=t1.b AND
(t9.a=1);
a b a b a b a b a b a b a b a b a b a b
1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1
+1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 2
1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 1
1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 4 2 1 2 4 2 3 1 6 2 1 1 NULL NULL 1 1
1 2 3 2 4 2 1 2 4 2 3 3 NULL NULL NULL NULL NULL NULL 1 1
1 2 3 2 5 3 NULL NULL NULL NULL 3 1 6 2 1 1 NULL NULL 1 1
1 2 3 2 5 3 NULL NULL NULL NULL 3 3 NULL NULL NULL NULL NULL NULL 1 1
-1 2 2 2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 2
1 2 3 2 4 2 1 2 3 2 3 1 6 2 1 1 NULL NULL 1 2
1 2 3 2 4 2 1 2 3 2 2 2 6 2 2 2 0 2 1 2
1 2 3 2 4 2 1 2 3 2 3 3 NULL NULL NULL NULL NULL NULL 1 2
@@ -1150,7 +1150,7 @@ a b a b
4 2 2 2
5 3 NULL NULL
SELECT t2.a,t2.b,t3.a,t3.b
-FROM t2 LEFT JOIN (t3) ON t2.b=t3.b
+FROM t2 LEFT JOIN t3 ON t2.b=t3.b
WHERE t2.a = 4 OR (t2.a > 4 AND t3.a IS NULL);
a b a b
4 2 1 2
@@ -1209,13 +1209,13 @@ INSERT INTO t3 VALUES (0), (1), (2), (3), (4), (5);
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON c < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
-1 SIMPLE t3 index c c 5 NULL 6 Using where; Using index
-1 SIMPLE t2 ref b b 5 test.t3.c 2 Using index
+1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
-1 SIMPLE t3 index c c 5 NULL 6 Using where; Using index
-1 SIMPLE t2 ref b b 5 test.t3.c 2 Using index
+1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
a b c
NULL 0 0
diff --git a/mysql-test/main/join_nested.test b/mysql-test/main/join_nested.test
index e60b7827f75..77d0e4154c1 100644
--- a/mysql-test/main/join_nested.test
+++ b/mysql-test/main/join_nested.test
@@ -683,7 +683,7 @@ SELECT t2.a,t2.b,t3.a,t3.b
WHERE t2.a = 4 OR (t2.a > 4 AND t3.a IS NULL);
SELECT t2.a,t2.b,t3.a,t3.b
- FROM t2 LEFT JOIN (t3) ON t2.b=t3.b
+ FROM t2 LEFT JOIN t3 ON t2.b=t3.b
WHERE t2.a = 4 OR (t2.a > 4 AND t3.a IS NULL);
ALTER TABLE t3
diff --git a/mysql-test/main/join_nested_jcl6.result b/mysql-test/main/join_nested_jcl6.result
index 2f8e1712672..67534b24e32 100644
--- a/mysql-test/main/join_nested_jcl6.result
+++ b/mysql-test/main/join_nested_jcl6.result
@@ -1073,9 +1073,9 @@ t0.b=t1.b AND
(t8.b=t9.b OR t8.c IS NULL) AND
(t9.a=1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t0 ref idx_a idx_a 5 const 1 100.00 Using where
-1 SIMPLE t1 ref idx_b idx_b 5 test.t0.b 2 100.00 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t0 ref idx_a idx_a 5 const 2 100.00
+1 SIMPLE t9 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE t1 ALL idx_b NULL NULL NULL 7 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t2 ALL NULL NULL NULL NULL 8 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t3 ALL NULL NULL NULL NULL 2 100.00 Using where; Using join buffer (incremental, BNL join)
1 SIMPLE t4 ref idx_b idx_b 5 test.t2.b 2 100.00 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
@@ -1161,7 +1161,7 @@ a b a b
4 2 2 2
5 3 NULL NULL
SELECT t2.a,t2.b,t3.a,t3.b
-FROM t2 LEFT JOIN (t3) ON t2.b=t3.b
+FROM t2 LEFT JOIN t3 ON t2.b=t3.b
WHERE t2.a = 4 OR (t2.a > 4 AND t3.a IS NULL);
a b a b
4 2 1 2
@@ -1220,13 +1220,13 @@ INSERT INTO t3 VALUES (0), (1), (2), (3), (4), (5);
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON c < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
-1 SIMPLE t3 index c c 5 NULL 6 Using where; Using index
-1 SIMPLE t2 ref b b 5 test.t3.c 2 Using index
+1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
EXPLAIN SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 5 NULL 21 Using index
-1 SIMPLE t3 index c c 5 NULL 6 Using where; Using index
-1 SIMPLE t2 ref b b 5 test.t3.c 2 Using index
+1 SIMPLE t2 range b b 5 NULL 3 Using where; Using index
+1 SIMPLE t3 ref c c 5 test.t2.b 2 Using index
SELECT a, b, c FROM t1 LEFT JOIN (t2, t3) ON b < 3 and b = c;
a b c
NULL 0 0
@@ -2002,8 +2002,8 @@ ON t6.b >= 2 AND t5.b=t7.b AND
(t8.a > 0 OR t8.c IS NULL) AND t6.a>0 AND t7.a>0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t5 ALL NULL NULL NULL NULL 3
-1 SIMPLE t7 ref PRIMARY,b_i b_i 5 test.t5.b 2 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
-1 SIMPLE t6 ALL PRIMARY,b_i NULL NULL NULL 7 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t7 ref|filter PRIMARY,b_i b_i|PRIMARY 5|4 test.t5.b 2 (29%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
+1 SIMPLE t6 range PRIMARY,b_i PRIMARY 4 NULL 3 Using where; Rowid-ordered scan; Using join buffer (incremental, BNL join)
1 SIMPLE t8 ref b_i b_i 5 test.t5.b 2 Using where; Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
SELECT t5.a,t5.b,t6.a,t6.b,t7.a,t7.b,t8.a,t8.b
FROM t5
diff --git a/mysql-test/main/join_outer.result b/mysql-test/main/join_outer.result
index 6b671a0759f..05991b56aba 100644
--- a/mysql-test/main/join_outer.result
+++ b/mysql-test/main/join_outer.result
@@ -1740,7 +1740,7 @@ from t1,t2
where t2.pk=t1.pk+1000 and t1.pk>1000
group by t2.pk;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using index; Using temporary; Using filesort
+1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 2 50.00 Using where; Using index; Using temporary; Using filesort
1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 func 1 100.00 Using where; Using index
2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 func 1 100.00 Using where; Using index
2 DEPENDENT SUBQUERY t4 eq_ref PRIMARY PRIMARY 4 test.t3.pk 1 100.00 Using index
@@ -1790,8 +1790,11 @@ insert into t3 values (11, 100), (33, 301), (44, 402), (11, 102), (11, 101);
insert into t3 values (22, 100), (53, 301), (64, 402), (22, 102), (22, 101);
analyze table t1,t2,t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
flush status;
select sum(t3.b) from t1 left join t3 on t3.a=t1.a and t1.a is not null;
@@ -1800,7 +1803,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
-Handler_read_key 4
+Handler_read_key 13
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
@@ -1815,7 +1818,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
-Handler_read_key 4
+Handler_read_key 7
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
@@ -2549,7 +2552,7 @@ test.t3 analyze status OK
explain extended select * from t1 left join t3 on t1.a=t3.b and t3.a<5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00
-1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 1.96 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` left join `test`.`t3` on(`test`.`t3`.`b` = `test`.`t1`.`a` and `test`.`t3`.`a` < 5) where 1
# t3.filtered must less than 100%, too:
@@ -2557,7 +2560,7 @@ explain extended select * from t1 left join (t3 join t2) on t1.a=t3.b and t3.a<5
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00
-1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 1.96 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t2`.`a` AS `a` from `test`.`t1` left join (`test`.`t3` join `test`.`t2`) on(`test`.`t3`.`b` = `test`.`t1`.`a` and `test`.`t3`.`a` < 5) where 1
drop table t1,t2,t3;
diff --git a/mysql-test/main/join_outer_innodb.result b/mysql-test/main/join_outer_innodb.result
index 6f3fb09329d..9026a32e356 100644
--- a/mysql-test/main/join_outer_innodb.result
+++ b/mysql-test/main/join_outer_innodb.result
@@ -433,21 +433,21 @@ left join t16 on t15.o1 = t16.p1
where t1.a10 = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a4,a6,a5,a7 NULL NULL NULL 3 Using where
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
-1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
-1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1 Using index
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
-1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t1.a5 1
1 SIMPLE t12 eq_ref PRIMARY PRIMARY 4 test.t11.k3 1 Using where
1 SIMPLE l2 eq_ref PRIMARY PRIMARY 4 test.t11.k4 1 Using where
1 SIMPLE t13 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
+1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where; Using index
1 SIMPLE m2 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
-1 SIMPLE l3 eq_ref PRIMARY PRIMARY 4 test.m2.m2 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t5 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE l3 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
+1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
1 SIMPLE t14 eq_ref PRIMARY PRIMARY 2 test.t1.a8 1 Using where
1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where; Using index
1 SIMPLE t16 ref PRIMARY PRIMARY 2 test.t15.o1 1 Using where
@@ -455,21 +455,21 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select * from v1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a4,a6,a5,a7 NULL NULL NULL 3 Using where
-1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
-1 SIMPLE t5 eq_ref PRIMARY PRIMARY 4 test.t4.d1 1 Using where
-1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
1 SIMPLE t8 eq_ref PRIMARY PRIMARY 1 test.t1.a4 1 Using index
-1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
-1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
1 SIMPLE t11 eq_ref PRIMARY PRIMARY 4 test.t1.a5 1
1 SIMPLE t12 eq_ref PRIMARY PRIMARY 4 test.t11.k3 1 Using where
1 SIMPLE l2 eq_ref PRIMARY PRIMARY 4 test.t11.k4 1 Using where
1 SIMPLE t13 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
+1 SIMPLE t2 ref PRIMARY PRIMARY 4 test.t1.a1 1 Using index
+1 SIMPLE t9 ref PRIMARY PRIMARY 1 test.t1.a4 1
1 SIMPLE l4 eq_ref PRIMARY PRIMARY 4 test.t13.m2 1 Using where; Using index
1 SIMPLE m2 ref PRIMARY,m3 PRIMARY 4 test.t1.a1 1 Using where; Using index
-1 SIMPLE l3 eq_ref PRIMARY PRIMARY 4 test.m2.m2 1 Using where
+1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.b2 1 Using where; Using index
+1 SIMPLE t4 eq_ref PRIMARY PRIMARY 4 test.t1.a2 1 Using index
+1 SIMPLE t5 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE l3 ALL PRIMARY NULL NULL NULL 3 Using where; Using join buffer (incremental, BNL join)
+1 SIMPLE t7 eq_ref PRIMARY PRIMARY 1 test.t1.a7 1
+1 SIMPLE t6 eq_ref PRIMARY PRIMARY 4 test.t1.a3 1 Using where; Using index
1 SIMPLE t14 eq_ref PRIMARY PRIMARY 2 test.t1.a8 1 Using where
1 SIMPLE t15 eq_ref PRIMARY PRIMARY 2 test.t1.a9 1 Using where; Using index
1 SIMPLE t16 ref PRIMARY PRIMARY 2 test.t15.o1 1 Using where
diff --git a/mysql-test/main/join_outer_jcl6.result b/mysql-test/main/join_outer_jcl6.result
index f46f3e2df21..399d6306fa0 100644
--- a/mysql-test/main/join_outer_jcl6.result
+++ b/mysql-test/main/join_outer_jcl6.result
@@ -1751,7 +1751,7 @@ from t1,t2
where t2.pk=t1.pk+1000 and t1.pk>1000
group by t2.pk;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using index; Using temporary; Using filesort
+1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 2 50.00 Using where; Using index; Using temporary; Using filesort
1 PRIMARY t2 eq_ref PRIMARY PRIMARY 4 func 1 100.00 Using where; Using index
2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 func 1 100.00 Using where; Using index
2 DEPENDENT SUBQUERY t4 eq_ref PRIMARY PRIMARY 4 test.t3.pk 1 100.00 Using index
@@ -1801,8 +1801,11 @@ insert into t3 values (11, 100), (33, 301), (44, 402), (11, 102), (11, 101);
insert into t3 values (22, 100), (53, 301), (64, 402), (22, 102), (22, 101);
analyze table t1,t2,t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
flush status;
select sum(t3.b) from t1 left join t3 on t3.a=t1.a and t1.a is not null;
@@ -1811,7 +1814,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
-Handler_read_key 4
+Handler_read_key 13
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
@@ -1826,7 +1829,7 @@ sum(t3.b)
show status like "handler_read%";
Variable_name Value
Handler_read_first 0
-Handler_read_key 4
+Handler_read_key 7
Handler_read_last 0
Handler_read_next 5
Handler_read_prev 0
@@ -2560,7 +2563,7 @@ test.t3 analyze status OK
explain extended select * from t1 left join t3 on t1.a=t3.b and t3.a<5;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00
-1 SIMPLE t3 hash_ALL NULL #hash#$hj 5 test.t1.a 1000 0.99 Using where; Using join buffer (flat, BNLH join)
+1 SIMPLE t3 hash_ALL NULL #hash#$hj 5 test.t1.a 1000 1.96 Using where; Using join buffer (flat, BNLH join)
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b` from `test`.`t1` left join `test`.`t3` on(`test`.`t3`.`b` = `test`.`t1`.`a` and `test`.`t3`.`a` < 5 and `test`.`t1`.`a` is not null) where 1
# t3.filtered must less than 100%, too:
@@ -2568,7 +2571,7 @@ explain extended select * from t1 left join (t3 join t2) on t1.a=t3.b and t3.a<5
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 100.00 Using where
-1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 1000 1.96 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t3`.`a` AS `a`,`test`.`t3`.`b` AS `b`,`test`.`t2`.`a` AS `a` from `test`.`t1` left join (`test`.`t3` join `test`.`t2`) on(`test`.`t3`.`b` = `test`.`t1`.`a` and `test`.`t3`.`a` < 5 and `test`.`t1`.`a` is not null) where 1
drop table t1,t2,t3;
diff --git a/mysql-test/main/key.result b/mysql-test/main/key.result
index ba1124aaa14..f341c4be2c6 100644
--- a/mysql-test/main/key.result
+++ b/mysql-test/main/key.result
@@ -216,7 +216,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 Using index
explain select 1 from t1 where id =2 or id=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using index
explain select name from t1 where id =2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
@@ -603,11 +603,15 @@ INSERT INTO t1 (a, b)
VALUES
(1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+ANALYZE table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t1 range NULL a 5 NULL 8 Using index for group-by
+2 SUBQUERY t1 range a a 5 NULL 2 Using where; Using index for group-by
SELECT 1 as RES FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
RES
diff --git a/mysql-test/main/key.test b/mysql-test/main/key.test
index 0695dc2eecb..ccaef163d08 100644
--- a/mysql-test/main/key.test
+++ b/mysql-test/main/key.test
@@ -555,6 +555,7 @@ INSERT INTO t1 (a, b)
VALUES
(1,1), (1,2), (1,3), (1,4), (1,5),
(2,2), (2,3), (2,1), (3,1), (4,1), (4,2), (4,3), (4,4), (4,5), (4,6);
+ANALYZE table t1;
EXPLAIN SELECT 1 FROM t1 AS t1_outer WHERE
(SELECT max(b) FROM t1 GROUP BY a HAVING a < 2) > 12;
SELECT 1 as RES FROM t1 AS t1_outer WHERE
diff --git a/mysql-test/main/key_cache.result b/mysql-test/main/key_cache.result
index 1146ae8bbfa..36c75ad4a5d 100644
--- a/mysql-test/main/key_cache.result
+++ b/mysql-test/main/key_cache.result
@@ -440,25 +440,25 @@ VARIABLE_NAME VARIABLE_VALUE
KEY_BLOCKS_NOT_FLUSHED 0
KEY_BLOCKS_USED 4
KEY_BLOCKS_WARM 0
-KEY_READ_REQUESTS 22
+KEY_READ_REQUESTS 24
KEY_READS 0
KEY_WRITE_REQUESTS 26
KEY_WRITES 6
select variable_value into @key_blocks_unused from information_schema.session_status where variable_name = 'Key_blocks_unused';
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default NULL NULL 2097152 1024 4 # 0 22 0 26 6
+default NULL NULL 2097152 1024 4 # 0 24 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t2 where a='zzzz';
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default NULL NULL 2097152 1024 4 # 0 29 0 32 9
+default NULL NULL 2097152 1024 4 # 0 32 0 32 9
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t1;
delete from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default NULL NULL 2097152 1024 4 # 0 29 0 32 9
+default NULL NULL 2097152 1024 4 # 0 32 0 32 9
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
set global key_cache_segments=2;
select @@key_cache_segments;
@@ -488,7 +488,7 @@ VARIABLE_NAME VARIABLE_VALUE
KEY_BLOCKS_NOT_FLUSHED 0
KEY_BLOCKS_USED 4
KEY_BLOCKS_WARM 0
-KEY_READ_REQUESTS 22
+KEY_READ_REQUESTS 24
KEY_READS 0
KEY_WRITE_REQUESTS 26
KEY_WRITES 6
@@ -497,13 +497,13 @@ variable_value < @key_blocks_unused
1
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 2097152 1024 4 # 0 22 0 26 6
+default 2 NULL 2097152 1024 4 # 0 24 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t1;
delete from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 2097152 1024 4 # 0 22 0 26 6
+default 2 NULL 2097152 1024 4 # 0 24 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
set global key_cache_segments=1;
select @@key_cache_segments;
@@ -533,7 +533,7 @@ VARIABLE_NAME VARIABLE_VALUE
KEY_BLOCKS_NOT_FLUSHED 0
KEY_BLOCKS_USED 4
KEY_BLOCKS_WARM 0
-KEY_READ_REQUESTS 22
+KEY_READ_REQUESTS 24
KEY_READS 0
KEY_WRITE_REQUESTS 26
KEY_WRITES 6
@@ -542,13 +542,13 @@ variable_value = @key_blocks_unused
1
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 1 NULL 2097152 1024 4 # 0 22 0 26 6
+default 1 NULL 2097152 1024 4 # 0 24 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
delete from t1;
delete from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 1 NULL 2097152 1024 4 # 0 22 0 26 6
+default 1 NULL 2097152 1024 4 # 0 24 0 26 6
small NULL NULL 1048576 1024 1 # 0 1 0 2 1
flush tables;
flush status;
@@ -586,7 +586,7 @@ update t1 set p=3 where p=1;
update t2 set i=2 where i=1;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 4 # 0 22 0 26 6
+default 2 NULL 32768 1024 4 # 0 24 0 26 6
small NULL NULL 1048576 1024 1 # 0 0 0 0 0
insert into t1(a) select a from t1;
insert into t1(a) select a from t1;
@@ -606,7 +606,7 @@ insert into t2(i,a) select i,a from t2;
insert into t2(i,a) select i,a from t2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 6733 # 3684 103
+default 2 NULL 32768 1024 # # 0 6735 # 3684 103
small NULL NULL 1048576 1024 # # 0 0 # 0 0
select * from t1 where p between 1010 and 1020 ;
p a
@@ -625,7 +625,7 @@ p i a
1020 3 zzzz
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 6750 # 3684 103
+default 2 NULL 32768 1024 # # 0 6756 # 3684 103
small NULL NULL 1048576 1024 # # 0 0 # 0 0
flush tables;
flush status;
@@ -633,7 +633,7 @@ update t1 set a='zzzz' where a='qqqq';
update t2 set i=1 where i=2;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3076 18 1552 18
+default 2 NULL 32768 1024 # # 0 3078 18 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
set global keycache1.key_buffer_size=256*1024;
select @@keycache1.key_buffer_size;
@@ -645,7 +645,7 @@ select @@keycache1.key_cache_segments;
7
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3076 18 1552 18
+default 2 NULL 32768 1024 # # 0 3078 18 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 7 NULL 262143 2048 # # 0 0 0 0 0
select * from information_schema.key_caches where key_cache_name like "key%"
@@ -662,7 +662,7 @@ select p from t1 where p between 1010 and 1020;
p
explain select i from t2 where p between 1010 and 1020;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 28 Using index condition
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 9 Using index condition
select i from t2 where p between 1010 and 1020;
i
1
@@ -685,13 +685,13 @@ count(*)
256
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
-keycache1 7 NULL 262143 2048 # # 0 14 3 0 0
+keycache1 7 NULL 262143 2048 # # 0 18 3 0 0
select * from information_schema.key_caches where key_cache_name like "key%"
and segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-keycache1 7 NULL 262143 2048 3 # 0 14 3 0 0
+keycache1 7 NULL 262143 2048 3 # 0 18 3 0 0
cache index t2 in keycache1;
Table Op Msg_type Msg_text
test.t2 assign_to_keycache status OK
@@ -699,7 +699,7 @@ update t2 set p=p+3000, i=2 where a='qqqq';
select * from information_schema.key_caches where key_cache_name like "key%"
and segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-keycache1 7 NULL 262143 2048 25 # 0 2082 25 1071 19
+keycache1 7 NULL 262143 2048 25 # 0 2088 25 1071 19
set global keycache2.key_buffer_size=1024*1024;
cache index t2 in keycache2;
Table Op Msg_type Msg_text
@@ -712,7 +712,7 @@ keycache2 NULL NULL 1048576 1024 6 # 0 6 6 3 3
select * from information_schema.key_caches where key_cache_name like "key%"
and segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-keycache1 7 NULL 262143 2048 25 # 0 2082 25 1071 19
+keycache1 7 NULL 262143 2048 25 # 0 2088 25 1071 19
keycache2 NULL NULL 1048576 1024 6 # 0 6 6 3 3
cache index t2 in keycache1;
Table Op Msg_type Msg_text
@@ -728,7 +728,7 @@ p i a
1019 1 yyyy
explain select p from t2 where p between 1010 and 1020;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 7 Using where; Using index
+1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
select p from t2 where p between 1010 and 1020;
p
1010
@@ -739,92 +739,92 @@ p
1019
explain select i from t2 where a='yyyy' and i=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref k1,k2 k1 5 const 188 Using where
+1 SIMPLE t2 ref k1,k2 k1 5 const 189 Using where
select i from t2 where a='yyyy' and i=3;
i
3
explain select a from t2 where a='yyyy' and i=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref k1,k2 k1 5 const 188 Using where
+1 SIMPLE t2 ref k1,k2 k1 5 const 189 Using where
select a from t2 where a='yyyy' and i=3 ;
a
yyyy
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
-keycache1 7 NULL 262143 2048 # # 0 3201 43 1594 30
+keycache1 7 NULL 262143 2048 # # 0 3231 43 1594 30
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_cache_block_size=2*1024;
insert into t2 values (7000, 3, 'yyyy');
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 7 NULL 262143 2048 # # 0 6 6 3 3
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_cache_block_size=8*1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 3 NULL 262143 8192 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
insert into t2 values (8000, 3, 'yyyy');
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 3 NULL 262143 8192 # # 0 6 5 3 3
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_buffer_size=64*1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_cache_block_size=2*1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 3 NULL 65535 2048 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_cache_block_size=8*1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_buffer_size=0;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_cache_block_size=8*1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_buffer_size=0;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_buffer_size=128*1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 1 NULL 131072 8192 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
set global keycache1.key_cache_block_size=1024;
select * from information_schema.key_caches where segment_number is null;
KEY_CACHE_NAME SEGMENTS SEGMENT_NUMBER FULL_SIZE BLOCK_SIZE USED_BLOCKS UNUSED_BLOCKS DIRTY_BLOCKS READ_REQUESTS READS WRITE_REQUESTS WRITES
-default 2 NULL 32768 1024 # # 0 3172 24 1552 18
+default 2 NULL 32768 1024 # # 0 3178 24 1552 18
small NULL NULL 1048576 1024 # # 0 0 0 0 0
keycache1 7 NULL 131068 1024 # # 0 0 0 0 0
keycache2 NULL NULL 1048576 1024 # # 0 6 6 3 3
diff --git a/mysql-test/main/kill.result b/mysql-test/main/kill.result
index dc1cb9252da..4775d111b79 100644
--- a/mysql-test/main/kill.result
+++ b/mysql-test/main/kill.result
@@ -324,7 +324,7 @@ connection blocker;
lock tables t1 read;
connection ddl;
# Let us mark locked table t1 as old
-flush tables;
+flush tables t1;
connection dml;
select * from t1;
connection default;
diff --git a/mysql-test/main/kill.test b/mysql-test/main/kill.test
index b6000ffced1..059d8d40b11 100644
--- a/mysql-test/main/kill.test
+++ b/mysql-test/main/kill.test
@@ -538,18 +538,18 @@ connection blocker;
lock tables t1 read;
connection ddl;
--echo # Let us mark locked table t1 as old
---send flush tables
+--send flush tables t1
connection dml;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush" and
- info = "flush tables";
+ where state = "Waiting for table metadata lock" and
+ info = "flush tables t1";
--source include/wait_condition.inc
--send select * from t1
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush" and
+ where state = "Waiting for table metadata lock" and
info = "select * from t1";
--source include/wait_condition.inc
--replace_result $ID2 ID2
diff --git a/mysql-test/main/limit_rows_examined.result b/mysql-test/main/limit_rows_examined.result
index 8458e063d97..0711e10ddde 100644
--- a/mysql-test/main/limit_rows_examined.result
+++ b/mysql-test/main/limit_rows_examined.result
@@ -49,10 +49,11 @@ explain
select * from t1i, t2i where c1 = c2 LIMIT ROWS EXAMINED 6;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1i index PRIMARY PRIMARY 2 NULL 4 Using index
-1 SIMPLE t2i index PRIMARY PRIMARY 2 NULL 4 Using where; Using index; Using join buffer (flat, BNL join)
+1 SIMPLE t2i eq_ref PRIMARY PRIMARY 2 test.t1i.c1 1 Using index
select * from t1i, t2i where c1 = c2 LIMIT ROWS EXAMINED 6;
c1 c2
bb bb
+cc cc
Warnings:
Warning 1931 Query execution was interrupted. The query examined at least 7 rows, which exceeds LIMIT ROWS EXAMINED (6). The query result may be incomplete
set @@join_cache_level=6;
@@ -69,10 +70,11 @@ explain
select * from t1i, t2i where c1 = c2 LIMIT ROWS EXAMINED 6;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1i index PRIMARY PRIMARY 2 NULL 4 Using index
-1 SIMPLE t2i index PRIMARY PRIMARY 2 NULL 4 Using where; Using index; Using join buffer (flat, BNL join)
+1 SIMPLE t2i eq_ref PRIMARY PRIMARY 2 test.t1i.c1 1 Using index
select * from t1i, t2i where c1 = c2 LIMIT ROWS EXAMINED 6;
c1 c2
bb bb
+cc cc
Warnings:
Warning 1931 Query execution was interrupted. The query examined at least 7 rows, which exceeds LIMIT ROWS EXAMINED (6). The query result may be incomplete
Mix LIMIT ROWS EXAMINED with LIMIT
@@ -254,12 +256,13 @@ where c1 IN (select * from t2i where c2 > ' ')
LIMIT ROWS EXAMINED 6;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1i index PRIMARY PRIMARY 2 NULL 4 Using where; Using index
-1 PRIMARY t2i index PRIMARY PRIMARY 2 NULL 4 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t2i eq_ref PRIMARY PRIMARY 2 test.t1i.c1 1 Using index
select * from t1i
where c1 IN (select * from t2i where c2 > ' ')
LIMIT ROWS EXAMINED 6;
c1
bb
+cc
Warnings:
Warning 1931 Query execution was interrupted. The query examined at least 7 rows, which exceeds LIMIT ROWS EXAMINED (6). The query result may be incomplete
Subqueries with IN-TO-EXISTS
@@ -836,7 +839,7 @@ WHERE c = (SELECT MAX(b) FROM t2)
LIMIT ROWS EXAMINED 3;
(SELECT MAX(c) FROM t1, t2)
Warnings:
-Warning 1931 Query execution was interrupted. The query examined at least 10 rows, which exceeds LIMIT ROWS EXAMINED (3). The query result may be incomplete
+Warning 1931 Query execution was interrupted. The query examined at least 12 rows, which exceeds LIMIT ROWS EXAMINED (3). The query result may be incomplete
drop table t1, t2;
MDEV-178: LIMIT ROWS EXAMINED: Assertion `0' failed in net_end_statement(THD*) on the
@@ -861,3 +864,18 @@ Warnings:
Warning 1931 Query execution was interrupted. The query examined at least 22 rows, which exceeds LIMIT ROWS EXAMINED (21). The query result may be incomplete
drop view v;
drop table t1, t2;
+#
+# MDEV-18117: Crash with Explain extended when using limit rows examined
+#
+create table t1 (c1 char(2));
+create table t2 (c2 char(2));
+insert into t1 values ('bb'), ('cc'), ('aa'), ('dd');
+insert into t2 values ('bb'), ('cc'), ('dd'), ('ff');
+explain extended
+select * from t1, t2 where c1 = c2 LIMIT ROWS EXAMINED 2;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4 100.00
+1 SIMPLE t2 ALL NULL NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
+Warnings:
+Note 1003 select `test`.`t1`.`c1` AS `c1`,`test`.`t2`.`c2` AS `c2` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`c2` = `test`.`t1`.`c1`
+drop table t1,t2;
diff --git a/mysql-test/main/limit_rows_examined.test b/mysql-test/main/limit_rows_examined.test
index 815394aec5c..528bb8c7b3d 100644
--- a/mysql-test/main/limit_rows_examined.test
+++ b/mysql-test/main/limit_rows_examined.test
@@ -576,3 +576,15 @@ EXECUTE ps;
drop view v;
drop table t1, t2;
+
+--echo #
+--echo # MDEV-18117: Crash with Explain extended when using limit rows examined
+--echo #
+
+create table t1 (c1 char(2));
+create table t2 (c2 char(2));
+insert into t1 values ('bb'), ('cc'), ('aa'), ('dd');
+insert into t2 values ('bb'), ('cc'), ('dd'), ('ff');
+explain extended
+select * from t1, t2 where c1 = c2 LIMIT ROWS EXAMINED 2;
+drop table t1,t2;
diff --git a/mysql-test/main/loaddata.result b/mysql-test/main/loaddata.result
index f09e6d70258..b7d51a13c85 100644
--- a/mysql-test/main/loaddata.result
+++ b/mysql-test/main/loaddata.result
@@ -623,7 +623,7 @@ CREATE TABLE t1 (a INT, b INT, PRIMARY KEY (a), UNIQUE(b));
INSERT INTO t1 VALUES (1,1);
CREATE TABLE t2 (c INT);
CREATE VIEW v AS SELECT t1.* FROM t1 JOIN t2;
-SELECT a, b FROM t1 INTO OUTFILE '15645.data';
+SELECT a, b INTO OUTFILE '15645.data' FROM t1;
LOAD DATA INFILE '15645.data' IGNORE INTO TABLE v (a,b);
ERROR HY000: Incorrect usage of Multi-table VIEW and LOAD
LOAD DATA INFILE '15645.data' REPLACE INTO TABLE v (a,b);
diff --git a/mysql-test/main/loaddata.test b/mysql-test/main/loaddata.test
index 3a5bcad1685..8fb1877bbc5 100644
--- a/mysql-test/main/loaddata.test
+++ b/mysql-test/main/loaddata.test
@@ -714,7 +714,7 @@ CREATE TABLE t1 (a INT, b INT, PRIMARY KEY (a), UNIQUE(b));
INSERT INTO t1 VALUES (1,1);
CREATE TABLE t2 (c INT);
CREATE VIEW v AS SELECT t1.* FROM t1 JOIN t2;
-SELECT a, b FROM t1 INTO OUTFILE '15645.data';
+SELECT a, b INTO OUTFILE '15645.data' FROM t1;
--error ER_WRONG_USAGE
LOAD DATA INFILE '15645.data' IGNORE INTO TABLE v (a,b);
--error ER_WRONG_USAGE
diff --git a/mysql-test/main/lock.result b/mysql-test/main/lock.result
index 339cfcaa441..6edb86bfa3f 100644
--- a/mysql-test/main/lock.result
+++ b/mysql-test/main/lock.result
@@ -457,6 +457,7 @@ connection default;
LOCK TABLE t1 WRITE;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
connection con2;
LOCK TABLE t2 WRITE;
@@ -499,7 +500,7 @@ connect con1,localhost,root,,test;
LOCK TABLE t2 WRITE;
SET lock_wait_timeout= 1;
FLUSH TABLES;
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+FLUSH TABLES t2;
UNLOCK TABLES;
disconnect con1;
connection default;
diff --git a/mysql-test/main/lock.test b/mysql-test/main/lock.test
index ff77b4991c0..8a59f4082b1 100644
--- a/mysql-test/main/lock.test
+++ b/mysql-test/main/lock.test
@@ -609,8 +609,8 @@ LOCK TABLE t1 READ;
--connect (con1,localhost,root,,test)
LOCK TABLE t2 WRITE;
SET lock_wait_timeout= 1;
---error ER_LOCK_WAIT_TIMEOUT
FLUSH TABLES;
+FLUSH TABLES t2;
# Cleanup
UNLOCK TABLES;
diff --git a/mysql-test/main/lock_multi.result b/mysql-test/main/lock_multi.result
index e702c458c0c..30cd63e60ed 100644
--- a/mysql-test/main/lock_multi.result
+++ b/mysql-test/main/lock_multi.result
@@ -114,22 +114,21 @@ unlock tables;
drop table t1;
connection locker;
USE mysql;
-LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
+LOCK TABLES columns_priv WRITE, db WRITE, user WRITE;
FLUSH TABLES;
connection reader;
USE mysql;
-SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1;
+SELECT global_priv.host FROM global_priv, db WHERE global_priv.user = db.user LIMIT 1;
connection locker;
-OPTIMIZE TABLES columns_priv, db, host, user;
+OPTIMIZE TABLES columns_priv, db, global_priv;
Table Op Msg_type Msg_text
mysql.columns_priv optimize status OK
mysql.db optimize status OK
-mysql.host optimize status OK
-mysql.user optimize status OK
+mysql.global_priv optimize status OK
UNLOCK TABLES;
connection reader;
-Select_priv
-N
+host
+localhost
USE test;
connection locker;
use test;
@@ -219,6 +218,7 @@ connection con2;
unlock tables;
connection con3;
a
+connection con4;
connection default;
disconnect con5;
disconnect con4;
@@ -248,6 +248,7 @@ flush table t2;
connection default;
unlock tables;
connection con1;
+connection con2;
#
# LOCK TABLES .. WRITE
#
@@ -300,7 +301,7 @@ connection default;
alter table t1 add column j int;
connect insert,localhost,root,,test,,;
connection insert;
-insert into t1 values (1,2);;
+insert into t1 values (1,2);
connection default;
unlock tables;
connection flush;
@@ -532,8 +533,9 @@ connect con3, localhost, root;
connection default;
LOCK TABLE t1 READ;
connection con3;
-# Sending:
FLUSH TABLES;
+# Sending:
+FLUSH TABLES t1;
connection con2;
SELECT * FROM t1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
diff --git a/mysql-test/main/lock_multi.test b/mysql-test/main/lock_multi.test
index a945bcdbb74..5cc7219b01d 100644
--- a/mysql-test/main/lock_multi.test
+++ b/mysql-test/main/lock_multi.test
@@ -184,25 +184,25 @@ drop table t1;
connection locker;
USE mysql;
-LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE;
+LOCK TABLES columns_priv WRITE, db WRITE, user WRITE;
FLUSH TABLES;
#
connection reader;
USE mysql;
# Note: This must be a multi-table select, otherwise the deadlock will not occur
send
-SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1;
+SELECT global_priv.host FROM global_priv, db WHERE global_priv.user = db.user LIMIT 1;
#
connection locker;
# Sleep a bit till the select of connection reader is in work and hangs
let $wait_condition=
SELECT COUNT(*) = 1 FROM information_schema.processlist
WHERE state = "Waiting for table metadata lock" AND info =
- "SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1";
+ "SELECT global_priv.host FROM global_priv, db WHERE global_priv.user = db.user LIMIT 1";
--source include/wait_condition.inc
# Make test case independent from earlier grants.
--replace_result "Table is already up to date" "OK"
-OPTIMIZE TABLES columns_priv, db, host, user;
+OPTIMIZE TABLES columns_priv, db, global_priv;
UNLOCK TABLES;
#
connection reader;
@@ -229,7 +229,7 @@ connection writer;
# Sleep a bit till the flush of connection locker is in work and hangs
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "FLUSH TABLES WITH READ LOCK";
--source include/wait_condition.inc
# This must not block.
@@ -261,7 +261,7 @@ connection writer;
# Sleep a bit till the flush of connection locker is in work and hangs
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "FLUSH TABLES WITH READ LOCK";
--source include/wait_condition.inc
--error ER_TABLE_NOT_LOCKED
@@ -298,10 +298,10 @@ DROP DATABASE mysqltest_1;
# When fixed: Reject dropping db because of the read lock.
connection con1;
# Wait a bit so that the session con2 is in state
-# "Waiting for global read lock"
+# "Waiting for backup lock"
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock"
+ where state = "Waiting for backup lock"
and info = "DROP DATABASE mysqltest_1";
--source include/wait_condition.inc
--error ER_CANT_UPDATE_WITH_READLOCK
@@ -377,7 +377,7 @@ send flush tables with read lock;
connection con5;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
--echo # global read lock is taken
@@ -386,14 +386,19 @@ send select * from t2 for update;
connection con5;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "select * from t2 for update";
--source include/wait_condition.inc
--echo # waiting for release of read lock
connection con4;
--echo # would hang and later cause a deadlock
-flush tables t2;
+--send flush tables t2
connection con1;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock" and
+ info = "flush tables t2";
+--source include/wait_condition.inc
--echo # clean up
unlock tables;
connection con2;
@@ -401,6 +406,8 @@ connection con2;
unlock tables;
connection con3;
--reap
+connection con4;
+--reap
connection default;
disconnect con5;
disconnect con4;
@@ -432,16 +439,23 @@ send update t2 set a = 1;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "update t2 set a = 1";
--source include/wait_condition.inc
--echo # statement is waiting for release of read lock
connection con2;
-flush table t2;
+--send flush table t2
connection default;
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock" and
+ info = "flush table t2";
+--source include/wait_condition.inc
unlock tables;
connection con1;
--reap
+connection con2;
+--reap
--echo #
--echo # LOCK TABLES .. WRITE
@@ -454,7 +468,7 @@ send lock tables t2 write;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "lock tables t2 write";
--source include/wait_condition.inc
--echo # statement is waiting for release of read lock
@@ -542,7 +556,7 @@ connection flush;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
alter table t1 add column j int;
@@ -550,14 +564,14 @@ connect (insert,localhost,root,,test,,);
connection insert;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "flush tables with read lock";
--source include/wait_condition.inc
---send insert into t1 values (1,2);
+--send insert into t1 values (1,2)
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for table metadata lock" and
info = "insert into t1 values (1,2)";
--source include/wait_condition.inc
unlock tables;
@@ -565,7 +579,7 @@ connection flush;
--reap
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock" and
+ where state = "Waiting for backup lock" and
info = "insert into t1 values (1,2)";
--source include/wait_condition.inc
select * from t1;
@@ -598,12 +612,12 @@ connection flush;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock";
+ where state = "Waiting for backup lock";
--source include/wait_condition.inc
flush tables;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock";
+ where state = "Waiting for backup lock";
--source include/wait_condition.inc
unlock tables;
connection flush;
@@ -664,12 +678,12 @@ connection flush;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock";
+ where state = "Waiting for backup lock";
--source include/wait_condition.inc
flush tables;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock";
+ where state = "Waiting for backup lock";
--source include/wait_condition.inc
drop table t1;
connection flush;
@@ -931,13 +945,19 @@ connection default;
LOCK TABLE t1 READ;
connection con3;
+
+# first test that flush tables doesn't block
+FLUSH TABLES;
+
+# Check the FLUSH TABLES t1 waits until table lock is released
+
--echo # Sending:
---send FLUSH TABLES
+--send FLUSH TABLES t1
connection con2;
let $wait_condition=
SELECT COUNT(*) = 1 FROM information_schema.processlist
- WHERE state = "Waiting for table flush" AND info = "FLUSH TABLES";
+ WHERE state = "Waiting for table metadata lock" AND info = "FLUSH TABLES t1";
--source include/wait_condition.inc
--error ER_LOCK_WAIT_TIMEOUT
SELECT * FROM t1;
diff --git a/mysql-test/main/lock_sync.result b/mysql-test/main/lock_sync.result
index 7b61c5994b6..bbdc1d43ba5 100644
--- a/mysql-test/main/lock_sync.result
+++ b/mysql-test/main/lock_sync.result
@@ -67,6 +67,8 @@ declare j int;
select i from t1 where i = 1 into j;
return j;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f2() returns int
begin
declare k int;
@@ -74,6 +76,8 @@ select i from t1 where i = 1 into k;
insert into t2 values (k + 5);
return 0;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f3() returns int
begin
return (select i from t1 where i = 3);
@@ -97,12 +101,16 @@ declare k int;
select i from v1 where i = 1 into k;
return k;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f7() returns int
begin
declare k int;
select j from v2 where j = 1 into k;
return k;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f8() returns int
begin
declare k int;
@@ -110,6 +118,8 @@ select i from v1 where i = 1 into k;
insert into t2 values (k+5);
return k;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f9() returns int
begin
update v2 set j=j+10 where j=1;
@@ -139,6 +149,8 @@ create procedure p2(inout p int)
begin
select i from t1 where i = 1 into p;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function f14() returns int
begin
declare k int;
@@ -166,6 +178,8 @@ select i from t1 where i = 1 into j;
call p3;
return 1;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure p3()
begin
create temporary table if not exists temp1 (a int);
@@ -178,6 +192,8 @@ declare k int;
select i from t1 where i=1 into k;
set new.l= k+1;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create trigger t4_bu before update on t4 for each row
begin
if (select i from t1 where i=1) then
@@ -766,7 +782,7 @@ SET DEBUG_SYNC= 'now WAIT_FOR opened';
SET DEBUG_SYNC= 'now SIGNAL dropped';
SET DEBUG_SYNC= 'now WAIT_FOR opened';
# Sending:
-FLUSH TABLES;
+FLUSH TABLES t1;
connection default;
# Waiting for FLUSH TABLES to be blocked.
SET DEBUG_SYNC= 'now SIGNAL dropped';
diff --git a/mysql-test/main/lock_sync.test b/mysql-test/main/lock_sync.test
index af8435f7fbb..1a8cd7bdbd3 100644
--- a/mysql-test/main/lock_sync.test
+++ b/mysql-test/main/lock_sync.test
@@ -974,12 +974,12 @@ SET DEBUG_SYNC= 'now WAIT_FOR opened';
SET DEBUG_SYNC= 'now SIGNAL dropped';
SET DEBUG_SYNC= 'now WAIT_FOR opened';
--echo # Sending:
---send FLUSH TABLES
+--send FLUSH TABLES t1
connection default;
--echo # Waiting for FLUSH TABLES to be blocked.
let $wait_condition= SELECT COUNT(*)=1 FROM information_schema.processlist
- WHERE state= 'Waiting for table flush' AND info= 'FLUSH TABLES';
+ WHERE state= 'Waiting for table metadata lock' AND info= 'FLUSH TABLES t1';
--source include/wait_condition.inc
SET DEBUG_SYNC= 'now SIGNAL dropped';
diff --git a/mysql-test/main/lock_user.result b/mysql-test/main/lock_user.result
new file mode 100644
index 00000000000..a8740e8ad37
--- /dev/null
+++ b/mysql-test/main/lock_user.result
@@ -0,0 +1,134 @@
+create user user1@localhost;
+create user user2@localhost;
+#
+# Only privileged users should be able to lock/unlock.
+#
+alter user user1@localhost account lock;
+alter user user1@localhost account unlock;
+create user user3@localhost account lock;
+drop user user3@localhost;
+connect con1,localhost,user1;
+connection con1;
+alter user user2@localhost account lock;
+ERROR 42000: Access denied; you need (at least one of) the CREATE USER privilege(s) for this operation
+disconnect con1;
+connection default;
+#
+# ALTER USER USER1 ACCOUNT LOCK should deny the connection of user1,
+# but it should allow user2 to connect.
+#
+alter user user1@localhost account lock;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Access denied, this account is locked
+connect con2,localhost,user2;
+disconnect con2;
+connection default;
+alter user user1@localhost account unlock;
+#
+# Passing an incorrect user should return an error unless
+# IF EXISTS is used
+#
+alter user inexistentUser@localhost account lock;
+ERROR HY000: Operation ALTER USER failed for 'inexistentUser'@'localhost'
+alter if exists user inexistentUser@localhost account lock;
+Warnings:
+Error 1133 Can't find any matching row in the user table
+Note 1396 Operation ALTER USER failed for 'inexistentUser'@'localhost'
+#
+# Passing an existing user to CREATE should not be allowed
+# and it should not change the locking state of the current user
+#
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+create user user1@localhost account lock;
+ERROR HY000: Operation CREATE USER failed for 'user1'@'localhost'
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+#
+# Passing multiple users should lock them all
+#
+alter user user1@localhost, user2@localhost account lock;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Access denied, this account is locked
+connect(localhost,user2,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con2,localhost,user2;
+ERROR HY000: Access denied, this account is locked
+alter user user1@localhost, user2@localhost account unlock;
+#
+# The locking state is preserved after acl reload
+#
+alter user user1@localhost account lock;
+flush privileges;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Access denied, this account is locked
+alter user user1@localhost account unlock;
+#
+# JSON functions on global_priv reflect the locking state of an account
+#
+alter user user1@localhost account lock;
+select host, user, JSON_VALUE(Priv, '$.account_locked') from mysql.global_priv where user='user1';
+host user JSON_VALUE(Priv, '$.account_locked')
+localhost user1 1
+alter user user1@localhost account unlock;
+select host, user, JSON_VALUE(Priv, '$.account_locked') from mysql.global_priv where user='user1';
+host user JSON_VALUE(Priv, '$.account_locked')
+localhost user1 0
+#
+# SHOW CREATE USER correctly displays the locking state of an user
+#
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+alter user user1@localhost account lock;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' ACCOUNT LOCK
+alter user user1@localhost account unlock;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+create user newuser@localhost account lock;
+show create user newuser@localhost;
+CREATE USER for newuser@localhost
+CREATE USER 'newuser'@'localhost' ACCOUNT LOCK
+drop user newuser@localhost;
+#
+# Users should be able to lock themselves
+#
+grant CREATE USER on *.* to user1@localhost;
+connect con1,localhost,user1;
+connection con1;
+alter user user1@localhost account lock;
+disconnect con1;
+connection default;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Access denied, this account is locked
+alter user user1@localhost account unlock;
+#
+# Users should be able to unlock themselves if the connections
+# had been established before the accounts were locked
+#
+grant CREATE USER on *.* to user1@localhost;
+connect con1,localhost,user1;
+alter user user1@localhost account lock;
+connection con1;
+alter user user1@localhost account unlock;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+disconnect con1;
+connection default;
+#
+# COM_CHANGE_USER should return error if the destination
+# account is locked
+#
+alter user user1@localhost account lock;
+ERROR HY000: Access denied, this account is locked
+drop user user1@localhost;
+drop user user2@localhost;
diff --git a/mysql-test/main/lock_user.test b/mysql-test/main/lock_user.test
new file mode 100644
index 00000000000..366c34ecea8
--- /dev/null
+++ b/mysql-test/main/lock_user.test
@@ -0,0 +1,142 @@
+#
+# Test user account locking
+#
+
+--source include/not_embedded.inc
+
+create user user1@localhost;
+create user user2@localhost;
+
+--echo #
+--echo # Only privileged users should be able to lock/unlock.
+--echo #
+alter user user1@localhost account lock;
+alter user user1@localhost account unlock;
+create user user3@localhost account lock;
+drop user user3@localhost;
+
+connect(con1,localhost,user1);
+connection con1;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+alter user user2@localhost account lock;
+disconnect con1;
+connection default;
+
+--echo #
+--echo # ALTER USER USER1 ACCOUNT LOCK should deny the connection of user1,
+--echo # but it should allow user2 to connect.
+--echo #
+
+alter user user1@localhost account lock;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con1,localhost,user1);
+connect(con2,localhost,user2);
+disconnect con2;
+connection default;
+alter user user1@localhost account unlock;
+
+--echo #
+--echo # Passing an incorrect user should return an error unless
+--echo # IF EXISTS is used
+--echo #
+
+--error ER_CANNOT_USER
+alter user inexistentUser@localhost account lock;
+
+alter if exists user inexistentUser@localhost account lock;
+
+--echo #
+--echo # Passing an existing user to CREATE should not be allowed
+--echo # and it should not change the locking state of the current user
+--echo #
+
+show create user user1@localhost;
+--error ER_CANNOT_USER
+create user user1@localhost account lock;
+show create user user1@localhost;
+
+--echo #
+--echo # Passing multiple users should lock them all
+--echo #
+
+alter user user1@localhost, user2@localhost account lock;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con1,localhost,user1);
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con2,localhost,user2);
+alter user user1@localhost, user2@localhost account unlock;
+
+--echo #
+--echo # The locking state is preserved after acl reload
+--echo #
+
+alter user user1@localhost account lock;
+flush privileges;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con1,localhost,user1);
+alter user user1@localhost account unlock;
+
+--echo #
+--echo # JSON functions on global_priv reflect the locking state of an account
+--echo #
+
+alter user user1@localhost account lock;
+select host, user, JSON_VALUE(Priv, '$.account_locked') from mysql.global_priv where user='user1';
+alter user user1@localhost account unlock;
+select host, user, JSON_VALUE(Priv, '$.account_locked') from mysql.global_priv where user='user1';
+
+--echo #
+--echo # SHOW CREATE USER correctly displays the locking state of an user
+--echo #
+
+show create user user1@localhost;
+alter user user1@localhost account lock;
+show create user user1@localhost;
+alter user user1@localhost account unlock;
+show create user user1@localhost;
+create user newuser@localhost account lock;
+show create user newuser@localhost;
+drop user newuser@localhost;
+
+--echo #
+--echo # Users should be able to lock themselves
+--echo #
+grant CREATE USER on *.* to user1@localhost;
+connect(con1,localhost,user1);
+connection con1;
+alter user user1@localhost account lock;
+disconnect con1;
+connection default;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con1,localhost,user1);
+alter user user1@localhost account unlock;
+
+--echo #
+--echo # Users should be able to unlock themselves if the connections
+--echo # had been established before the accounts were locked
+--echo #
+grant CREATE USER on *.* to user1@localhost;
+connect(con1,localhost,user1);
+alter user user1@localhost account lock;
+connection con1;
+alter user user1@localhost account unlock;
+show create user user1@localhost;
+disconnect con1;
+connection default;
+
+--echo #
+--echo # COM_CHANGE_USER should return error if the destination
+--echo # account is locked
+--echo #
+alter user user1@localhost account lock;
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+--change_user user1
+
+drop user user1@localhost;
+drop user user2@localhost;
+
diff --git a/mysql-test/main/log_tables_upgrade.result b/mysql-test/main/log_tables_upgrade.result
index 8f822d56020..8d7b08a11bd 100644
--- a/mysql-test/main/log_tables_upgrade.result
+++ b/mysql-test/main/log_tables_upgrade.result
@@ -19,12 +19,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -43,9 +43,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result
new file mode 100644
index 00000000000..3843ff4aff0
--- /dev/null
+++ b/mysql-test/main/long_unique.result
@@ -0,0 +1,1465 @@
+#Structure of tests
+#First we will check all option for
+#table containing single unique column
+#table containing keys like unique(a,b,c,d) etc
+#then table containing 2 blob unique etc
+set @allowed_packet= @@max_allowed_packet;
+#table with single long blob column;
+create table t1(a blob unique );
+insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
+#blob with primary key not allowed
+create table t2(a blob,primary key(a(10000)));
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+create table t3(a varchar(10000) primary key);
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+insert into t1 values(2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+#table structure;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 10 Deleted blocks: 0
+Recordlength: 12
+
+table description:
+Key Start Len Index Type
+1 12 8 multip. ulonglong NULL
+select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,COLUMN_COMMENT,IS_GENERATED,GENERATION_EXPRESSION from information_schema.columns where table_schema = 'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
+def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI NEVER NULL
+select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 0 test a 1 a A NULL NULL NULL YES HASH
+select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
+CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA test
+CONSTRAINT_NAME a
+TABLE_CATALOG def
+TABLE_SCHEMA test
+TABLE_NAME t1
+COLUMN_NAME a
+ORDINAL_POSITION 1
+POSITION_IN_UNIQUE_CONSTRAINT NULL
+REFERENCED_TABLE_SCHEMA NULL
+REFERENCED_TABLE_NAME NULL
+REFERENCED_COLUMN_NAME NULL
+# table select we should not be able to see db_row_hash_column;
+select * from t1 order by a;
+a
+NULL
+NULL
+1
+123456789034567890
+123456789034567891
+2
+3
+56
+maria
+sachin
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+#duplicate entry test;
+insert into t1 values(2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+insert into t1 values('sachin');
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values(123456789034567891);
+ERROR 23000: Duplicate entry '123456789034567891' for key 'a'
+select * from t1 order by a;
+a
+NULL
+NULL
+1
+123456789034567890
+123456789034567891
+2
+3
+56
+maria
+sachin
+insert into t1 values(11),(22),(33);
+insert into t1 values(12),(22);
+ERROR 23000: Duplicate entry '22' for key 'a'
+select * from t1 order by a;
+a
+NULL
+NULL
+1
+11
+12
+123456789034567890
+123456789034567891
+2
+22
+3
+33
+56
+maria
+sachin
+insert into t1 values(repeat('s',4000*10)),(repeat('s',4001*10));
+insert into t1 values(repeat('m',4000*10)),(repeat('m',4000*10));
+ERROR 23000: Duplicate entry 'mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm' for key 'a'
+insert into t1 values(repeat('m',4001)),(repeat('m',4002));
+truncate table t1;
+insert into t1 values(1),(2),(3),(4),(5),(8),(7);
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 7 Deleted blocks: 0
+Recordlength: 12
+
+table description:
+Key Start Len Index Type
+1 12 8 multip. ulonglong NULL
+#now some alter commands;
+alter table t1 add column b int;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b int(11) YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(1,2);
+ERROR 23000: Duplicate entry '1' for key 'a'
+insert into t1 values(2,2);
+ERROR 23000: Duplicate entry '2' for key 'a'
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+#now try to change db_row_hash_1 column;
+alter table t1 drop column db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 add column d int , add column e int , drop column db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 modify column db_row_hash_1 int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column a int , add column b int, modify column db_row_hash_1 int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 change column db_row_hash_1 dsds int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column asd int, change column db_row_hash_1 dsds int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 drop column b , add column c int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+#now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(45,1,55),(46,1,55);
+ERROR 23000: Duplicate entry '55' for key 'db_row_hash_1'
+insert into t1 values(45,1,55),(45,1,55);
+ERROR 23000: Duplicate entry '45' for key 'a'
+alter table t1 add column db_row_hash_2 int, add column db_row_hash_3 int;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+c int(11) YES NULL
+db_row_hash_1 int(11) YES UNI NULL
+db_row_hash_2 int(11) YES NULL
+db_row_hash_3 int(11) YES NULL
+#this should also drop the unique index ;
+alter table t1 drop column a;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+#add column with unique index on blob ;
+alter table t1 add column a blob unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+# try to change the blob unique name;
+alter table t1 change column a aa blob ;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES HASH
+# try to change the blob unique datatype;
+#this will change index to b tree;
+alter table t1 modify column aa int ;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` int(11) DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES BTREE
+alter table t1 add column clm blob unique;
+#try changing the name ;
+alter table t1 change column clm clm_changed blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` int(11) DEFAULT NULL,
+ `clm_changed` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`),
+ UNIQUE KEY `clm` (`clm_changed`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES BTREE
+t1 0 clm 1 clm_changed A NULL NULL NULL YES HASH
+#now drop the unique key;
+alter table t1 drop key clm;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ `aa` int(11) DEFAULT NULL,
+ `clm_changed` blob DEFAULT NULL,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `a` (`aa`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 a 1 aa A NULL NULL NULL YES BTREE
+drop table t1;
+create table t1 (a TEXT CHARSET latin1 COLLATE latin1_german2_ci unique);
+desc t1;
+Field Type Null Key Default Extra
+a text YES UNI NULL
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+insert into t1 values ('ae');
+insert into t1 values ('AE');
+ERROR 23000: Duplicate entry 'AE' for key 'a'
+insert into t1 values ('Ä');
+drop table t1;
+create table t1 (a int primary key, b blob unique);
+desc t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI NULL
+b blob YES UNI NULL
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
+t1 0 b 1 b A NULL NULL NULL YES HASH
+insert into t1 values(1,1),(2,2),(3,3);
+insert into t1 values(1,1);
+ERROR 23000: Duplicate entry '1' for key 'b'
+insert into t1 values(7,1);
+ERROR 23000: Duplicate entry '1' for key 'b'
+drop table t1;
+#table with multiple long blob column and varchar text column ;
+create table t1(a blob unique, b int , c blob unique , d text unique , e varchar(3000) unique);
+insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555),
+('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'),
+(123456789034567891,353534,53453453453456,64565464564564,45435345345345),
+(123456789034567890,43545,657567567567,78967657567567,657567567567567676);
+#table structure;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b int(11) YES NULL
+c blob YES UNI NULL
+d text YES UNI NULL
+e varchar(3000) YES UNI NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 8 Deleted blocks: 0
+Recordlength: 3040
+
+table description:
+Key Start Len Index Type
+1 3039 8 multip. ulonglong NULL
+2 3047 8 multip. ulonglong NULL
+3 3055 8 multip. ulonglong NULL
+4 3063 8 multip. ulonglong NULL
+select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,COLUMN_COMMENT,IS_GENERATED,GENERATION_EXPRESSION from information_schema.columns where table_schema = 'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
+def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI NEVER NULL
+def test t1 b 2 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) NEVER NULL
+def test t1 c 3 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob UNI NEVER NULL
+def test t1 d 4 NULL YES text 65535 65535 NULL NULL NULL latin1 latin1_swedish_ci text UNI NEVER NULL
+def test t1 e 5 NULL YES varchar 3000 3000 NULL NULL NULL latin1 latin1_swedish_ci varchar(3000) UNI NEVER NULL
+select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 0 test a 1 a A NULL NULL NULL YES HASH
+def test t1 0 test c 1 c A NULL NULL NULL YES HASH
+def test t1 0 test d 1 d A NULL NULL NULL YES HASH
+def test t1 0 test e 1 e A NULL NULL NULL YES HASH
+select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
+CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+def test a def test t1 a 1 NULL NULL NULL NULL
+def test c def test t1 c 1 NULL NULL NULL NULL
+def test d def test t1 d 1 NULL NULL NULL NULL
+def test e def test t1 e 1 NULL NULL NULL NULL
+#table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+a b c d e
+1 2 3 4 5
+123456789034567890 43545 657567567567 78967657567567 657567567567567676
+123456789034567891 353534 53453453453456 64565464564564 45435345345345
+2 11 22 33 44
+3111 222 333 444 555
+5611 2222 3333 4444 5555
+maria 345 frter dasd utyuty
+sachin 341 fdf gfgfgfg hghgr
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+select db_row_hash_2 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_2' in 'field list'
+select db_row_hash_3 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_3' in 'field list'
+#duplicate entry test;
+insert into t1 values(21,2,3,42,51);
+ERROR 23000: Duplicate entry '3' for key 'c'
+insert into t1 values('sachin',null,null,null,null);
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values(1234567890345671890,4353451,6575675675617,789676575675617,657567567567567676);
+ERROR 23000: Duplicate entry '657567567567567676' for key 'e'
+select * from t1 order by a;
+a b c d e
+1 2 3 4 5
+123456789034567890 43545 657567567567 78967657567567 657567567567567676
+123456789034567891 353534 53453453453456 64565464564564 45435345345345
+2 11 22 33 44
+3111 222 333 444 555
+5611 2222 3333 4444 5555
+maria 345 frter dasd utyuty
+sachin 341 fdf gfgfgfg hghgr
+insert into t1 values(repeat('s',4000*10),100,repeat('s',4000*10),repeat('s',4000*10),
+repeat('s',400)),(repeat('s',4001*10),1000,repeat('s',4001*10),repeat('s',4001*10),
+repeat('s',2995));
+insert into t1 values(repeat('m',4000*11),10,repeat('s',4000*11),repeat('s',4000*11),repeat('s',2995));
+ERROR 23000: Duplicate entry 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' for key 'e'
+truncate table t1;
+insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555);
+#now some alter commands;
+alter table t1 add column f int;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b int(11) YES NULL
+c blob YES UNI NULL
+d text YES UNI NULL
+e varchar(3000) YES UNI NULL
+f int(11) YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+#unique key should not break;
+insert into t1 values(1,2,3,4,5,6);
+ERROR 23000: Duplicate entry '1' for key 'a'
+select db_row_hash_1 , db_row_hash_2, db_row_hash_3 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+#now try to change db_row_hash_1 column;
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 drop column b , add column g int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+#now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+alter table t1 add column db_row_hash_2 int unique;
+alter table t1 add column db_row_hash_3 int unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_3` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`) USING HASH,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `db_row_hash_3` (`db_row_hash_3`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t1 add column db_row_hash_7 int, add column db_row_hash_5 int , add column db_row_hash_4 int ;
+alter table t1 drop column db_row_hash_7,drop column db_row_hash_3, drop column db_row_hash_4;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+c blob YES UNI NULL
+d text YES UNI NULL
+e varchar(3000) YES UNI NULL
+f int(11) YES NULL
+g int(11) YES NULL
+db_row_hash_1 int(11) YES UNI NULL
+db_row_hash_2 int(11) YES UNI NULL
+db_row_hash_5 int(11) YES NULL
+#this should not break anything;
+insert into t1 values(1,2,3,4,5,6,23,5,6);
+ERROR 23000: Duplicate entry '1' for key 'a'
+#this should also drop the unique index;
+alter table t1 drop column a, drop column c;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+#add column with unique index on blob;
+alter table t1 add column a blob unique;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`) USING HASH,
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES HASH
+#try to change the blob unique column name;
+#this will change index to b tree;
+alter table t1 modify column a int , modify column e int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` int(11) DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`),
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+alter table t1 add column clm1 blob unique,add column clm2 blob unique;
+#try changing the name;
+alter table t1 change column clm1 clm_changed1 blob, change column clm2 clm_changed2 blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` int(11) DEFAULT NULL,
+ `clm_changed1` blob DEFAULT NULL,
+ `clm_changed2` blob DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`),
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`),
+ UNIQUE KEY `clm1` (`clm_changed1`) USING HASH,
+ UNIQUE KEY `clm2` (`clm_changed2`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+t1 0 clm1 1 clm_changed1 A NULL NULL NULL YES HASH
+t1 0 clm2 1 clm_changed2 A NULL NULL NULL YES HASH
+#now drop the unique key;
+alter table t1 drop key clm1, drop key clm2;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `d` text DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `a` int(11) DEFAULT NULL,
+ `clm_changed1` blob DEFAULT NULL,
+ `clm_changed2` blob DEFAULT NULL,
+ UNIQUE KEY `d` (`d`) USING HASH,
+ UNIQUE KEY `e` (`e`),
+ UNIQUE KEY `db_row_hash_1` (`db_row_hash_1`),
+ UNIQUE KEY `db_row_hash_2` (`db_row_hash_2`),
+ UNIQUE KEY `a` (`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 d 1 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_1 1 db_row_hash_1 A NULL NULL NULL YES BTREE
+t1 0 db_row_hash_2 1 db_row_hash_2 A NULL NULL NULL YES BTREE
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+drop table t1;
+#now the table with key on multiple columns; the ultimate test;
+create table t1(a blob, b int , c varchar(2000) , d text , e varchar(3000) , f longblob , g int , h text ,
+unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h));
+insert into t1 values(1,1,1,1,1,1,1,1),(2,2,2,2,2,2,2,2),(3,3,3,3,3,3,3,3),(4,4,4,4,4,4,4,4),(5,5,5,5,5,5,5,5),
+('maria',6,'maria','maria','maria','maria',6,'maria'),('mariadb',7,'mariadb','mariadb','mariadb','mariadb',8,'mariadb')
+,(null,null,null,null,null,null,null,null),(null,null,null,null,null,null,null,null);
+#table structure;
+desc t1;
+Field Type Null Key Default Extra
+a blob YES MUL NULL
+b int(11) YES MUL NULL
+c varchar(2000) YES MUL NULL
+d text YES NULL
+e varchar(3000) YES MUL NULL
+f longblob YES NULL
+g int(11) YES NULL
+h text YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+
+MyISAM file: DATADIR/test/t1
+Record format: Packed
+Character set: latin1_swedish_ci (8)
+Data records: 9 Deleted blocks: 0
+Recordlength: 5059
+
+table description:
+Key Start Len Index Type
+1 5057 8 multip. ulonglong NULL
+2 5065 8 multip. ulonglong NULL
+3 5073 8 multip. ulonglong NULL
+4 5081 8 multip. ulonglong NULL
+select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,COLUMN_COMMENT,IS_GENERATED,GENERATION_EXPRESSION from information_schema.columns where table_schema = 'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION
+def test t1 a 1 NULL YES blob 65535 65535 NULL NULL NULL NULL NULL blob MUL NEVER NULL
+def test t1 b 2 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) MUL NEVER NULL
+def test t1 c 3 NULL YES varchar 2000 2000 NULL NULL NULL latin1 latin1_swedish_ci varchar(2000) MUL NEVER NULL
+def test t1 d 4 NULL YES text 65535 65535 NULL NULL NULL latin1 latin1_swedish_ci text NEVER NULL
+def test t1 e 5 NULL YES varchar 3000 3000 NULL NULL NULL latin1 latin1_swedish_ci varchar(3000) MUL NEVER NULL
+def test t1 f 6 NULL YES longblob 4294967295 4294967295 NULL NULL NULL NULL NULL longblob NEVER NULL
+def test t1 g 7 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) NEVER NULL
+def test t1 h 8 NULL YES text 65535 65535 NULL NULL NULL latin1 latin1_swedish_ci text NEVER NULL
+select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
+TABLE_CATALOG TABLE_SCHEMA TABLE_NAME NON_UNIQUE INDEX_SCHEMA INDEX_NAME SEQ_IN_INDEX COLUMN_NAME COLLATION CARDINALITY SUB_PART PACKED NULLABLE INDEX_TYPE COMMENT INDEX_COMMENT
+def test t1 0 test a 1 a A NULL NULL NULL YES HASH
+def test t1 0 test a 2 b A NULL NULL NULL YES HASH
+def test t1 0 test a 3 c A NULL NULL NULL YES HASH
+def test t1 0 test c 1 c A NULL NULL NULL YES HASH
+def test t1 0 test c 2 d A NULL NULL NULL YES HASH
+def test t1 0 test c 3 e A NULL NULL NULL YES HASH
+def test t1 0 test e 1 e A NULL NULL NULL YES HASH
+def test t1 0 test e 2 f A NULL NULL NULL YES HASH
+def test t1 0 test e 3 g A NULL NULL NULL YES HASH
+def test t1 0 test e 4 h A NULL NULL NULL YES HASH
+def test t1 0 test b 1 b A NULL NULL NULL YES HASH
+def test t1 0 test b 2 d A NULL NULL NULL YES HASH
+def test t1 0 test b 3 g A NULL NULL NULL YES HASH
+def test t1 0 test b 4 h A NULL NULL NULL YES HASH
+select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
+CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME
+def test a def test t1 a 1 NULL NULL NULL NULL
+def test a def test t1 b 2 NULL NULL NULL NULL
+def test a def test t1 c 3 NULL NULL NULL NULL
+def test c def test t1 c 1 NULL NULL NULL NULL
+def test c def test t1 d 2 NULL NULL NULL NULL
+def test c def test t1 e 3 NULL NULL NULL NULL
+def test e def test t1 e 1 NULL NULL NULL NULL
+def test e def test t1 f 2 NULL NULL NULL NULL
+def test e def test t1 g 3 NULL NULL NULL NULL
+def test e def test t1 h 4 NULL NULL NULL NULL
+def test b def test t1 b 1 NULL NULL NULL NULL
+def test b def test t1 d 2 NULL NULL NULL NULL
+def test b def test t1 g 3 NULL NULL NULL NULL
+def test b def test t1 h 4 NULL NULL NULL NULL
+# table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+a b c d e f g h
+NULL NULL NULL NULL NULL NULL NULL NULL
+NULL NULL NULL NULL NULL NULL NULL NULL
+1 1 1 1 1 1 1 1
+2 2 2 2 2 2 2 2
+3 3 3 3 3 3 3 3
+4 4 4 4 4 4 4 4
+5 5 5 5 5 5 5 5
+maria 6 maria maria maria maria 6 maria
+mariadb 7 mariadb mariadb mariadb mariadb 8 mariadb
+select db_row_hash_1 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+select db_row_hash_2 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_2' in 'field list'
+select db_row_hash_3 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_3' in 'field list'
+#duplicate entry test;
+#duplicate keys entry;
+insert into t1 values(1,1,1,0,0,0,0,0);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+insert into t1 values(0,0,1,1,1,0,0,0);
+ERROR 23000: Duplicate entry '1-1-1' for key 'c'
+insert into t1 values(0,0,0,0,1,1,1,1);
+ERROR 23000: Duplicate entry '1-1-1-1' for key 'e'
+insert into t1 values(1,1,1,1,1,0,0,0);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+insert into t1 values(0,0,0,0,1,1,1,1);
+ERROR 23000: Duplicate entry '1-1-1-1' for key 'e'
+insert into t1 values(1,1,1,1,1,1,1,1);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+select db_row_hash_1,db_row_hash_2,db_row_hash_3,db_row_hash_4,db_row_hash_5 from t1;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 'field list'
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
+ERROR 42000: Can't DROP COLUMN `db_row_hash_1`; check that it exists
+alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+ERROR 42S22: Unknown column 'db_row_hash_1' in 't1'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+# add column named db_row_hash_*;
+alter table t1 add column db_row_hash_7 int , add column db_row_hash_5 int,
+add column db_row_hash_1 int, add column db_row_hash_2 int;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ `db_row_hash_7` int(11) DEFAULT NULL,
+ `db_row_hash_5` int(11) DEFAULT NULL,
+ `db_row_hash_1` int(11) DEFAULT NULL,
+ `db_row_hash_2` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+alter table t1 drop column db_row_hash_7 , drop column db_row_hash_5 ,
+drop column db_row_hash_1, drop column db_row_hash_2 ;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` text DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#try to change column names;
+alter table t1 change column a aa blob , change column b bb blob , change column d dd blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `aa` blob DEFAULT NULL,
+ `bb` blob DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `dd` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`aa`,`bb`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`dd`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`bb`,`dd`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 aa A NULL NULL NULL YES HASH
+t1 0 a 2 bb A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 dd A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 bb A NULL NULL NULL YES HASH
+t1 0 b 2 dd A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+alter table t1 change column aa a blob , change column bb b blob , change column dd d blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` varchar(2000) DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#now we will change the data type to int and varchar limit so that we no longer require hash_index;
+#on key a_b_c;
+alter table t1 modify column a varchar(20) , modify column b varchar(20) , modify column c varchar(20);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` varchar(20) DEFAULT NULL,
+ `b` varchar(20) DEFAULT NULL,
+ `c` varchar(20) DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`),
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES BTREE
+t1 0 a 2 b A NULL NULL NULL YES BTREE
+t1 0 a 3 c A NULL NULL NULL YES BTREE
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#change it back;
+alter table t1 modify column a blob , modify column b blob , modify column c blob;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `c` (`c`,`d`,`e`) USING HASH,
+ UNIQUE KEY `e` (`e`,`f`,`g`,`h`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+t1 0 c 3 e A NULL NULL NULL YES HASH
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 f A NULL NULL NULL YES HASH
+t1 0 e 3 g A NULL NULL NULL YES HASH
+t1 0 e 4 h A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+#try to delete blob column in unique;
+truncate table t1;
+#now try to delete keys;
+alter table t1 drop key c, drop key e;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(3000) DEFAULT NULL,
+ `f` longblob DEFAULT NULL,
+ `g` int(11) DEFAULT NULL,
+ `h` text DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`b`,`c`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`,`g`,`h`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 b 3 g A NULL NULL NULL YES HASH
+t1 0 b 4 h A NULL NULL NULL YES HASH
+drop table t1;
+#now alter table containing some data basically some tests with ignore;
+create table t1 (a blob);
+insert into t1 values(1),(2),(3);
+#normal alter table;
+alter table t1 add unique key(a);
+alter table t1 drop key a;
+truncate table t1;
+insert into t1 values(1),(1),(2),(2),(3);
+alter table t1 add unique key(a);
+ERROR 23000: Duplicate entry '1' for key 'a'
+alter ignore table t1 add unique key(a);
+select * from t1 order by a;
+a
+1
+2
+3
+insert into t1 values(1);
+ERROR 23000: Duplicate entry '1' for key 'a'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+drop table t1;
+#Now with multiple keys;
+create table t1(a blob , b blob, c blob , d blob , e int);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (2,1,1,1,1);
+insert into t1 values (2,2,2,2,2);
+insert into t1 values (3,3,4,4,4);
+insert into t1 values (4,4,4,4,4);
+alter table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
+ERROR 23000: Duplicate entry '1-1' for key 'a'
+alter ignore table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
+select * from t1 order by a;
+a b c d e
+1 1 1 1 1
+2 2 2 2 2
+3 3 4 4 4
+insert into t1 values (1,12,1,13,14);
+ERROR 23000: Duplicate entry '1-1' for key 'a'
+insert into t1 values (12,1,14,1,14);
+ERROR 23000: Duplicate entry '1-1' for key 'b'
+insert into t1 values (13,12,13,14,4);
+ERROR 23000: Duplicate entry '4' for key 'e'
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`,`c`) USING HASH,
+ UNIQUE KEY `b` (`b`,`d`) USING HASH,
+ UNIQUE KEY `e` (`e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 c A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 d A NULL NULL NULL YES HASH
+t1 0 e 1 e A 0 NULL NULL YES BTREE
+drop table t1;
+#visibility of db_row_hash
+create table t1 (a blob unique , b blob unique);
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+b blob YES UNI NULL
+insert into t1 values(1,19);
+insert into t1 values(2,29);
+insert into t1 values(3,39);
+insert into t1 values(4,49);
+create table t2 (DB_ROW_HASH_1 int, DB_ROW_HASH_2 int);
+insert into t2 values(11,1);
+insert into t2 values(22,2);
+insert into t2 values(33,3);
+insert into t2 values(44,4);
+select * from t1 order by a;
+a b
+1 19
+2 29
+3 39
+4 49
+select * from t2 order by DB_ROW_HASH_1;
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+22 2
+33 3
+44 4
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
+ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'field list'
+#bug
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+11 1
+11 1
+11 1
+22 2
+22 2
+22 2
+22 2
+33 3
+33 3
+33 3
+33 3
+44 4
+44 4
+44 4
+44 4
+select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+ERROR 42S22: Unknown column 'DB_ROW_HASH_1' in 'IN/ALL/ANY subquery'
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+22 2
+33 3
+44 4
+11 1
+22 2
+33 3
+44 4
+11 1
+22 2
+33 3
+44 4
+11 1
+22 2
+33 3
+44 4
+select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
+DB_ROW_HASH_1 DB_ROW_HASH_2
+11 1
+22 2
+33 3
+44 4
+select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
+ERROR 42S22: Unknown column 't1.DB_ROW_HASH_1' in 'where clause'
+select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
+DB_ROW_HASH_1
+11
+22
+33
+44
+drop table t1,t2;
+#very long blob entry;
+SET @@GLOBAL.max_allowed_packet=67108864;
+connect 'newcon', localhost, root,,;
+connection newcon;
+show variables like 'max_allowed_packet';
+Variable_name Value
+max_allowed_packet 67108864
+create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
+desc t1;
+Field Type Null Key Default Extra
+a longblob YES UNI NULL
+b longblob YES MUL NULL
+c longblob YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` longblob DEFAULT NULL,
+ `b` longblob DEFAULT NULL,
+ `c` longblob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `b` (`b`,`c`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 c A NULL NULL NULL YES HASH
+insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'1'));
+insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'4'));
+ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachinsach' for key 'a'
+insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+ERROR 23000: Duplicate entry 'sachinsachinsachinsachinsachinsachinsachinsachinsachinsachinsach' for key 'b'
+drop table t1;
+#long key unique with different key length
+create table t1(a blob, unique(a(3000)));
+desc t1;
+Field Type Null Key Default Extra
+a blob YES UNI NULL
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL 3000 NULL YES HASH
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(3000)) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 value(concat(repeat('s',3000),'1'));
+insert into t1 value(concat(repeat('s',3000),'2'));
+ERROR 23000: Duplicate entry 'ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' for key 'a'
+insert into t1 value(concat(repeat('a',3000),'2'));
+drop table t1;
+create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
+unique(a(3500), b), unique(c(4500), d));
+desc t1;
+Field Type Null Key Default Extra
+a varchar(4000) YES MUL NULL
+b longblob YES NULL
+c varchar(5000) YES MUL NULL
+d longblob YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` varchar(4000) DEFAULT NULL,
+ `b` longblob DEFAULT NULL,
+ `c` varchar(5000) DEFAULT NULL,
+ `d` longblob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(3500),`b`) USING HASH,
+ UNIQUE KEY `c` (`c`(4500),`d`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL 3500 NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL 4500 NULL YES HASH
+t1 0 c 2 d A NULL NULL NULL YES HASH
+drop table t1;
+disconnect newcon;
+connection default;
+SET @@GLOBAL.max_allowed_packet=4194304;
+#ext bug
+create table t1(a int primary key, b blob unique, c int, d blob , index(c));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ UNIQUE KEY `b` (`b`) USING HASH,
+ KEY `c` (`c`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(1,23,1,33);
+insert into t1 values(2,23,1,33);
+ERROR 23000: Duplicate entry '23' for key 'b'
+drop table t1;
+create table t2 (a blob unique , c int , index(c));
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ KEY `c` (`c`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t2 values(1,1);
+insert into t2 values(2,1);
+drop table t2;
+#not null test
+create table t1(a blob unique not null);
+desc t1;
+Field Type Null Key Default Extra
+a blob NO UNI NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob NOT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values(1);
+insert into t1 values(3);
+insert into t1 values(1);
+ERROR 23000: Duplicate entry '1' for key 'a'
+drop table t1;
+create table t1(a int primary key, b blob unique , c blob unique not null);
+insert into t1 values(1,1,1);
+insert into t1 values(2,1,2);
+ERROR 23000: Duplicate entry '1' for key 'b'
+insert into t1 values(3,3,1);
+ERROR 23000: Duplicate entry '1' for key 'c'
+drop table t1;
+create table t1 (a blob unique not null, b blob not null, c blob not null, unique(b,c));
+desc t1;
+Field Type Null Key Default Extra
+a blob NO UNI NULL
+b blob NO MUL NULL
+c blob NO NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob NOT NULL,
+ `b` blob NOT NULL,
+ `c` blob NOT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `b` (`b`,`c`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values (1, 2, 3);
+insert into t1 values (2, 1, 3);
+insert into t1 values (2, 1, 3);
+ERROR 23000: Duplicate entry '2' for key 'a'
+drop table t1;
+#partition
+create table t1(a blob unique) partition by hash(a);
+ERROR HY000: A BLOB field is not allowed in partition function
+#key length > 2^16 -1
+create table t1(a blob, unique(a(65536)));
+ERROR 42000: Specified key part was too long; max key part length is 65535 bytes
+create table t1(a blob, unique(a(65535)));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`(65535)) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+#64 indexes
+create table t1 ( a63 blob unique, a62 blob unique, a61 blob unique, a60 blob unique, a59 blob unique, a58 blob unique, a57 blob unique, a56 blob unique, a55 blob unique, a54 blob unique, a53 blob unique, a52 blob unique, a51 blob unique, a50 blob unique, a49 blob unique, a48 blob unique, a47 blob unique, a46 blob unique, a45 blob unique, a44 blob unique, a43 blob unique, a42 blob unique, a41 blob unique, a40 blob unique, a39 blob unique, a38 blob unique, a37 blob unique, a36 blob unique, a35 blob unique, a34 blob unique, a33 blob unique, a32 blob unique, a31 blob unique, a30 blob unique, a29 blob unique, a28 blob unique, a27 blob unique, a26 blob unique, a25 blob unique, a24 blob unique, a23 blob unique, a22 blob unique, a21 blob unique, a20 blob unique, a19 blob unique, a18 blob unique, a17 blob unique, a16 blob unique, a15 blob unique, a14 blob unique, a13 blob unique, a12 blob unique, a11 blob unique, a10 blob unique, a9 blob unique, a8 blob unique, a7 blob unique, a6 blob unique, a5 blob unique, a4 blob unique, a3 blob unique, a2 blob unique, a1 blob unique, a blob unique);;
+insert into t1 values( 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);;
+insert into t1 values( 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);;
+ERROR 23000: Duplicate entry '63' for key 'a63'
+insert into t1 values( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63);;
+insert into t1 values( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63);;
+ERROR 23000: Duplicate entry '0' for key 'a63'
+drop table t1;
+create table t1(a blob , key(a));
+Warnings:
+Note 1071 Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ KEY `a` (`a`(1000))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t1(a blob);
+alter table t1 add index(a);
+Warnings:
+Note 1071 Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ KEY `a` (`a`(1000))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t1(a text, key(a));
+Warnings:
+Note 1071 Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` text DEFAULT NULL,
+ KEY `a` (`a`(1000))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t1(a varchar(4000));
+alter table t1 add index(a);
+Warnings:
+Warning 1071 Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` varchar(4000) DEFAULT NULL,
+ KEY `a` (`a`(1000))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t1 (pk int, a int, b int, primary key(pk), key(pk,a));
+alter table t1 modify a text;
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+alter table t1 modify a varchar(1000);
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `pk` int(11) NOT NULL,
+ `a` int(11) DEFAULT NULL,
+ `b` int(11) DEFAULT NULL,
+ PRIMARY KEY (`pk`),
+ KEY `pk` (`pk`,`a`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+set @@GLOBAL.max_allowed_packet= @allowed_packet;
diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test
new file mode 100644
index 00000000000..a6bc68f54dc
--- /dev/null
+++ b/mysql-test/main/long_unique.test
@@ -0,0 +1,545 @@
+let datadir=`select @@datadir`;
+--source include/have_partition.inc
+
+#
+# MDEV-371 Unique indexes for blobs
+#
+
+--echo #Structure of tests
+--echo #First we will check all option for
+--echo #table containing single unique column
+--echo #table containing keys like unique(a,b,c,d) etc
+--echo #then table containing 2 blob unique etc
+set @allowed_packet= @@max_allowed_packet;
+--echo #table with single long blob column;
+create table t1(a blob unique );
+insert into t1 values(1),(2),(3),(56),('sachin'),('maria'),(123456789034567891),(null),(null),(123456789034567890);
+--echo #blob with primary key not allowed
+--error ER_TOO_LONG_KEY
+create table t2(a blob,primary key(a(10000)));
+--error ER_TOO_LONG_KEY
+create table t3(a varchar(10000) primary key);
+
+--error ER_DUP_ENTRY
+insert into t1 values(2);
+--echo #table structure;
+desc t1;
+show create table t1;
+query_vertical show keys from t1;
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,COLUMN_COMMENT,IS_GENERATED,GENERATION_EXPRESSION from information_schema.columns where table_schema = 'test' and table_name = 't1';
+select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
+query_vertical select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
+--echo # table select we should not be able to see db_row_hash_column;
+select * from t1 order by a;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--echo #duplicate entry test;
+--error ER_DUP_ENTRY
+insert into t1 values(2);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin');
+--error ER_DUP_ENTRY
+insert into t1 values(123456789034567891);
+select * from t1 order by a;
+insert into t1 values(11),(22),(33);
+--error ER_DUP_ENTRY
+insert into t1 values(12),(22);
+select * from t1 order by a;
+insert into t1 values(repeat('s',4000*10)),(repeat('s',4001*10));
+--error ER_DUP_ENTRY
+insert into t1 values(repeat('m',4000*10)),(repeat('m',4000*10));
+insert into t1 values(repeat('m',4001)),(repeat('m',4002));
+truncate table t1;
+insert into t1 values(1),(2),(3),(4),(5),(8),(7);
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+--echo #now some alter commands;
+alter table t1 add column b int;
+desc t1;
+show create table t1;
+--error ER_DUP_ENTRY
+insert into t1 values(1,2);
+--error ER_DUP_ENTRY
+insert into t1 values(2,2);
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--echo #now try to change db_row_hash_1 column;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 drop column db_row_hash_1;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 add column d int , add column e int , drop column db_row_hash_1;
+--error ER_BAD_FIELD_ERROR
+alter table t1 modify column db_row_hash_1 int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column a int , add column b int, modify column db_row_hash_1 int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 change column db_row_hash_1 dsds int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column asd int, change column db_row_hash_1 dsds int;
+alter table t1 drop column b , add column c int;
+show create table t1;
+
+--echo #now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+show create table t1;
+--error ER_DUP_ENTRY
+insert into t1 values(45,1,55),(46,1,55);
+--error ER_DUP_ENTRY
+insert into t1 values(45,1,55),(45,1,55);
+alter table t1 add column db_row_hash_2 int, add column db_row_hash_3 int;
+desc t1;
+--echo #this should also drop the unique index ;
+alter table t1 drop column a;
+show create table t1;
+show keys from t1;
+--echo #add column with unique index on blob ;
+alter table t1 add column a blob unique;
+show create table t1;
+--echo # try to change the blob unique name;
+alter table t1 change column a aa blob ;
+show create table t1;
+show keys from t1;
+--echo # try to change the blob unique datatype;
+--echo #this will change index to b tree;
+alter table t1 modify column aa int ;
+show create table t1;
+show keys from t1;
+alter table t1 add column clm blob unique;
+--echo #try changing the name ;
+alter table t1 change column clm clm_changed blob;
+show create table t1;
+show keys from t1;
+--echo #now drop the unique key;
+alter table t1 drop key clm;
+show create table t1;
+show keys from t1;
+drop table t1;
+
+create table t1 (a TEXT CHARSET latin1 COLLATE latin1_german2_ci unique);
+desc t1;
+show keys from t1;
+ insert into t1 values ('ae');
+--error ER_DUP_ENTRY
+insert into t1 values ('AE');
+insert into t1 values ('Ä');
+drop table t1;
+create table t1 (a int primary key, b blob unique);
+desc t1;
+show keys from t1;
+insert into t1 values(1,1),(2,2),(3,3);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(7,1);
+drop table t1;
+
+--echo #table with multiple long blob column and varchar text column ;
+create table t1(a blob unique, b int , c blob unique , d text unique , e varchar(3000) unique);
+insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555),
+('sachin',341,'fdf','gfgfgfg','hghgr'),('maria',345,'frter','dasd','utyuty'),
+(123456789034567891,353534,53453453453456,64565464564564,45435345345345),
+(123456789034567890,43545,657567567567,78967657567567,657567567567567676);
+
+--echo #table structure;
+desc t1;
+show create table t1;
+show keys from t1;
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,COLUMN_COMMENT,IS_GENERATED,GENERATION_EXPRESSION from information_schema.columns where table_schema = 'test' and table_name = 't1';
+select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
+select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
+--echo #table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_2 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_3 from t1;
+--echo #duplicate entry test;
+--error ER_DUP_ENTRY
+insert into t1 values(21,2,3,42,51);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin',null,null,null,null);
+--error ER_DUP_ENTRY
+insert into t1 values(1234567890345671890,4353451,6575675675617,789676575675617,657567567567567676);
+select * from t1 order by a;
+insert into t1 values(repeat('s',4000*10),100,repeat('s',4000*10),repeat('s',4000*10),
+repeat('s',400)),(repeat('s',4001*10),1000,repeat('s',4001*10),repeat('s',4001*10),
+repeat('s',2995));
+--error ER_DUP_ENTRY
+insert into t1 values(repeat('m',4000*11),10,repeat('s',4000*11),repeat('s',4000*11),repeat('s',2995));
+truncate table t1;
+insert into t1 values(1,2,3,4,5),(2,11,22,33,44),(3111,222,333,444,555),(5611,2222,3333,4444,5555);
+--echo #now some alter commands;
+alter table t1 add column f int;
+desc t1;
+show create table t1;
+--echo #unique key should not break;
+--error ER_DUP_ENTRY
+insert into t1 values(1,2,3,4,5,6);
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 , db_row_hash_2, db_row_hash_3 from t1;
+--echo #now try to change db_row_hash_1 column;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
+--error ER_BAD_FIELD_ERROR
+alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+alter table t1 drop column b , add column g int;
+show create table t1;
+
+--echo #now add some column with name db_row_hash;
+alter table t1 add column db_row_hash_1 int unique;
+alter table t1 add column db_row_hash_2 int unique;
+alter table t1 add column db_row_hash_3 int unique;
+show create table t1;
+
+alter table t1 add column db_row_hash_7 int, add column db_row_hash_5 int , add column db_row_hash_4 int ;
+alter table t1 drop column db_row_hash_7,drop column db_row_hash_3, drop column db_row_hash_4;
+desc t1;
+--echo #this should not break anything;
+--error ER_DUP_ENTRY
+insert into t1 values(1,2,3,4,5,6,23,5,6);
+--echo #this should also drop the unique index;
+alter table t1 drop column a, drop column c;
+show create table t1;
+show keys from t1;
+--echo #add column with unique index on blob;
+alter table t1 add column a blob unique;
+show create table t1;
+show keys from t1;
+--echo #try to change the blob unique column name;
+--echo #this will change index to b tree;
+alter table t1 modify column a int , modify column e int;
+show create table t1;
+show keys from t1;
+alter table t1 add column clm1 blob unique,add column clm2 blob unique;
+--echo #try changing the name;
+alter table t1 change column clm1 clm_changed1 blob, change column clm2 clm_changed2 blob;
+show create table t1;
+show keys from t1;
+--echo #now drop the unique key;
+alter table t1 drop key clm1, drop key clm2;
+show create table t1;
+show keys from t1;
+drop table t1;
+--echo #now the table with key on multiple columns; the ultimate test;
+create table t1(a blob, b int , c varchar(2000) , d text , e varchar(3000) , f longblob , g int , h text ,
+ unique(a,b,c), unique(c,d,e),unique(e,f,g,h), unique(b,d,g,h));
+
+insert into t1 values(1,1,1,1,1,1,1,1),(2,2,2,2,2,2,2,2),(3,3,3,3,3,3,3,3),(4,4,4,4,4,4,4,4),(5,5,5,5,5,5,5,5),
+('maria',6,'maria','maria','maria','maria',6,'maria'),('mariadb',7,'mariadb','mariadb','mariadb','mariadb',8,'mariadb')
+,(null,null,null,null,null,null,null,null),(null,null,null,null,null,null,null,null);
+
+--echo #table structure;
+desc t1;
+show create table t1;
+show keys from t1;
+replace_result $datadir DATADIR;
+exec $MYISAMCHK -d $datadir/test/t1;
+select TABLE_CATALOG,TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,COLUMN_DEFAULT,IS_NULLABLE,DATA_TYPE,CHARACTER_MAXIMUM_LENGTH,CHARACTER_OCTET_LENGTH,NUMERIC_PRECISION,NUMERIC_SCALE,DATETIME_PRECISION,CHARACTER_SET_NAME,COLLATION_NAME,COLUMN_TYPE,COLUMN_KEY,EXTRA,COLUMN_COMMENT,IS_GENERATED,GENERATION_EXPRESSION from information_schema.columns where table_schema = 'test' and table_name = 't1';
+select * from information_schema.statistics where table_schema = 'test' and table_name = 't1';
+select * from information_schema.key_column_usage where table_schema= 'test' and table_name= 't1';
+--echo # table select we should not be able to see db_row_hash_1 column;
+select * from t1 order by a;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_2 from t1;
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_3 from t1;
+--echo #duplicate entry test;
+--echo #duplicate keys entry;
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1,0,0,0,0,0);
+--error ER_DUP_ENTRY
+insert into t1 values(0,0,1,1,1,0,0,0);
+--error ER_DUP_ENTRY
+insert into t1 values(0,0,0,0,1,1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1,1,1,0,0,0);
+--error ER_DUP_ENTRY
+insert into t1 values(0,0,0,0,1,1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1,1,1,1,1,1);
+--error ER_BAD_FIELD_ERROR
+select db_row_hash_1,db_row_hash_2,db_row_hash_3,db_row_hash_4,db_row_hash_5 from t1;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 drop column db_row_hash_1, drop column db_row_hash_2, drop column db_row_hash_3;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t1 add column dg int , add column ef int , drop column db_row_hash_1;
+--error ER_BAD_FIELD_ERROR
+alter table t1 modify column db_row_hash_1 int , modify column db_row_hash_2 int, modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column ar int , add column rb int, modify column db_row_hash_1 int , modify column db_row_hash_3 int;
+--error ER_BAD_FIELD_ERROR
+alter table t1 change column db_row_hash_1 dsds int , change column db_row_hash_2 dfdf int , change column db_row_hash_3 gdfg int ;
+--error ER_BAD_FIELD_ERROR
+alter table t1 add column asd int, drop column a, change column db_row_hash_1 dsds int, change db_row_hash_3 fdfdfd int;
+
+show create table t1;
+--echo # add column named db_row_hash_*;
+alter table t1 add column db_row_hash_7 int , add column db_row_hash_5 int,
+ add column db_row_hash_1 int, add column db_row_hash_2 int;
+show create table t1;
+show keys from t1;
+alter table t1 drop column db_row_hash_7 , drop column db_row_hash_5 ,
+ drop column db_row_hash_1, drop column db_row_hash_2 ;
+show create table t1;
+show keys from t1;
+
+--echo #try to change column names;
+alter table t1 change column a aa blob , change column b bb blob , change column d dd blob;
+show create table t1;
+show keys from t1;
+alter table t1 change column aa a blob , change column bb b blob , change column dd d blob;
+show create table t1;
+show keys from t1;
+
+--echo #now we will change the data type to int and varchar limit so that we no longer require hash_index;
+--echo #on key a_b_c;
+alter table t1 modify column a varchar(20) , modify column b varchar(20) , modify column c varchar(20);
+show create table t1;
+show keys from t1;
+--echo #change it back;
+alter table t1 modify column a blob , modify column b blob , modify column c blob;
+show create table t1;
+show keys from t1;
+
+--echo #try to delete blob column in unique;
+truncate table t1;
+## this feature removed in 10.2
+#alter table t1 drop column a, drop column b, drop column c;
+#show create table t1;
+#show keys from t1;
+--echo #now try to delete keys;
+alter table t1 drop key c, drop key e;
+show create table t1;
+show keys from t1;
+drop table t1;
+
+--echo #now alter table containing some data basically some tests with ignore;
+create table t1 (a blob);
+insert into t1 values(1),(2),(3);
+--echo #normal alter table;
+alter table t1 add unique key(a);
+alter table t1 drop key a;
+truncate table t1;
+insert into t1 values(1),(1),(2),(2),(3);
+--error ER_DUP_ENTRY
+alter table t1 add unique key(a);
+alter ignore table t1 add unique key(a);
+select * from t1 order by a;
+--error ER_DUP_ENTRY
+insert into t1 values(1);
+show create table t1;
+show keys from t1;
+drop table t1;
+
+--echo #Now with multiple keys;
+create table t1(a blob , b blob, c blob , d blob , e int);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (1,1,1,1,1);
+insert into t1 values (2,1,1,1,1);
+insert into t1 values (2,2,2,2,2);
+insert into t1 values (3,3,4,4,4);
+insert into t1 values (4,4,4,4,4);
+--error ER_DUP_ENTRY
+alter table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
+alter ignore table t1 add unique key(a,c), add unique key(b,d), add unique key(e);
+select * from t1 order by a;
+--error ER_DUP_ENTRY
+insert into t1 values (1,12,1,13,14);
+--error ER_DUP_ENTRY
+insert into t1 values (12,1,14,1,14);
+--error ER_DUP_ENTRY
+insert into t1 values (13,12,13,14,4);
+show create table t1;
+show keys from t1;
+drop table t1;
+
+--echo #visibility of db_row_hash
+create table t1 (a blob unique , b blob unique);
+desc t1;
+insert into t1 values(1,19);
+insert into t1 values(2,29);
+insert into t1 values(3,39);
+insert into t1 values(4,49);
+create table t2 (DB_ROW_HASH_1 int, DB_ROW_HASH_2 int);
+insert into t2 values(11,1);
+insert into t2 values(22,2);
+insert into t2 values(33,3);
+insert into t2 values(44,4);
+select * from t1 order by a;
+select * from t2 order by DB_ROW_HASH_1;
+--error ER_BAD_FIELD_ERROR
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1;
+--echo #bug
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2;
+--error ER_BAD_FIELD_ERROR
+select * from t1 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+select DB_ROW_HASH_1, DB_ROW_HASH_2 from t1,t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t2);
+select * from t2 where DB_ROW_HASH_1 in (select DB_ROW_HASH_1 from t1);
+--error ER_BAD_FIELD_ERROR
+select DB_ROW_HASH_1 from t1,t2 where t1.DB_ROW_HASH_1 = t2.DB_ROW_HASH_2;
+select DB_ROW_HASH_1 from t1 inner join t2 on t1.a = t2.DB_ROW_HASH_2;
+drop table t1,t2;
+
+--echo #very long blob entry;
+SET @@GLOBAL.max_allowed_packet=67108864;
+
+connect ('newcon', localhost, root,,);
+--connection newcon
+show variables like 'max_allowed_packet';
+create table t1(a longblob unique, b longblob , c longblob , unique(b,c));
+desc t1;
+show create table t1;
+show keys from t1;
+insert into t1 values(concat(repeat('sachin',10000000),'1'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'1'));
+--error ER_DUP_ENTRY
+insert into t1 values(concat(repeat('sachin',10000000),'2'),concat(repeat('sachin',10000000),'2'),
+concat(repeat('sachin',10000000),'4'));
+--error ER_DUP_ENTRY
+insert into t1 values(concat(repeat('sachin',10000000),'3'),concat(repeat('sachin',10000000),'1'),
+concat(repeat('sachin',10000000),'1'));
+drop table t1;
+
+--echo #long key unique with different key length
+create table t1(a blob, unique(a(3000)));
+desc t1;
+show keys from t1;
+show create table t1;
+insert into t1 value(concat(repeat('s',3000),'1'));
+--error ER_DUP_ENTRY
+insert into t1 value(concat(repeat('s',3000),'2'));
+insert into t1 value(concat(repeat('a',3000),'2'));
+drop table t1;
+
+create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob,
+unique(a(3500), b), unique(c(4500), d));
+desc t1;
+show create table t1;
+show keys from t1;
+drop table t1;
+disconnect newcon;
+--connection default
+SET @@GLOBAL.max_allowed_packet=4194304;
+--echo #ext bug
+create table t1(a int primary key, b blob unique, c int, d blob , index(c));
+show create table t1;
+insert into t1 values(1,23,1,33);
+--error ER_DUP_ENTRY
+insert into t1 values(2,23,1,33);
+drop table t1;
+create table t2 (a blob unique , c int , index(c));
+show create table t2;
+insert into t2 values(1,1);
+insert into t2 values(2,1);
+drop table t2;
+--echo #not null test
+create table t1(a blob unique not null);
+desc t1;
+show create table t1;
+insert into t1 values(1);
+insert into t1 values(3);
+--error ER_DUP_ENTRY
+insert into t1 values(1);
+drop table t1;
+create table t1(a int primary key, b blob unique , c blob unique not null);
+insert into t1 values(1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(2,1,2);
+--error ER_DUP_ENTRY
+insert into t1 values(3,3,1);
+drop table t1;
+create table t1 (a blob unique not null, b blob not null, c blob not null, unique(b,c));
+desc t1;
+show create table t1;
+insert into t1 values (1, 2, 3);
+insert into t1 values (2, 1, 3);
+--error ER_DUP_ENTRY
+insert into t1 values (2, 1, 3);
+drop table t1;
+
+--echo #partition
+--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
+create table t1(a blob unique) partition by hash(a);
+--echo #key length > 2^16 -1
+--error ER_TOO_LONG_KEYPART
+create table t1(a blob, unique(a(65536)));
+create table t1(a blob, unique(a(65535)));
+show create table t1;
+drop table t1;
+
+--echo #64 indexes
+--let $create_table=create table t1 (
+--let $insert_data_1=insert into t1 values(
+--let $insert_data_2=insert into t1 values(
+--let $count= 63
+--let $index= 0
+while ($count)
+{
+ --let $create_table=$create_table a$count blob unique,
+ --let $insert_data_1=$insert_data_1 $count,
+ --let $insert_data_2=$insert_data_2 $index,
+ --dec $count
+ --inc $index
+}
+--let $create_table=$create_table a blob unique);
+--let $insert_data_1=$insert_data_1 0);
+--let $insert_data_2=$insert_data_2 63);
+
+--eval $create_table
+--eval $insert_data_1
+--error ER_DUP_ENTRY
+--eval $insert_data_1
+--eval $insert_data_2
+--error ER_DUP_ENTRY
+--eval $insert_data_2
+drop table t1;
+
+#
+# non-unique long indexes are automatically shortened
+#
+create table t1(a blob , key(a));
+show create table t1;
+drop table t1;
+create table t1(a blob);
+alter table t1 add index(a);
+show create table t1;
+drop table t1;
+
+create table t1(a text, key(a));
+show create table t1;
+drop table t1;
+create table t1(a varchar(4000));
+alter table t1 add index(a);
+show create table t1;
+drop table t1;
+
+#
+# somewhat inconsistently, the following is an error
+#
+create table t1 (pk int, a int, b int, primary key(pk), key(pk,a));
+--error ER_TOO_LONG_KEY
+alter table t1 modify a text;
+--error ER_TOO_LONG_KEY
+alter table t1 modify a varchar(1000);
+show create table t1;
+drop table t1;
+
+set @@GLOBAL.max_allowed_packet= @allowed_packet;
diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result
new file mode 100644
index 00000000000..33496c4e20d
--- /dev/null
+++ b/mysql-test/main/long_unique_bugs.result
@@ -0,0 +1,267 @@
+create table t1 (a int, b int, c int, d int, e int);
+insert into t1 () values
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),();
+select * into outfile 'load.data' from t1;
+create temporary table tmp (a varchar(1024), b int, c int, d int, e linestring, unique (e));
+load data infile 'load.data' into table tmp;
+delete from tmp;
+drop table t1;
+create table t1 (b blob) engine=innodb;
+alter table t1 add unique (b);
+alter table t1 force;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` blob DEFAULT NULL,
+ UNIQUE KEY `b` (`b`) USING HASH
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+drop table t1;
+create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam;
+insert into t1 values (1,'foo');
+replace into t1 (pk) values (1);
+alter table t1 force;
+replace into t1 (pk) values (1);
+drop table t1;
+create table t1 (t time, unique(t)) engine=innodb;
+insert into t1 values (null),(null);
+alter ignore table t1 modify t text not null default '';
+Warnings:
+Warning 1265 Data truncated for column 't' at row 1
+Warning 1265 Data truncated for column 't' at row 2
+drop table t1;
+create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning;
+insert into t1 values (1,'foo');
+update t1 set f = 'bar';
+select * from t1;
+pk f
+1 bar
+update t1 set f = 'foo';
+select * from t1;
+pk f
+1 foo
+select pk, f, row_end > DATE'2030-01-01' from t1 for system_time all;
+pk f row_end > DATE'2030-01-01'
+1 foo 1
+1 foo 0
+1 bar 0
+drop table t1;
+create temporary table t1 (f blob, unique(f)) engine=innodb;
+insert into t1 values (1);
+replace into t1 values (1);
+drop table t1;
+create table t (b blob, unique(b)) engine=myisam;
+insert into t values ('foo');
+replace into t values ('foo');
+drop table t;
+CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x));
+INSERT INTO t1 VALUES (1,'foo');
+ALTER TABLE t1 DROP x, ALGORITHM=INPLACE;
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY
+UPDATE t1 SET x = 'bar';
+DROP TABLE t1;
+create table t1(a blob unique , b blob);
+insert into t1 values(1,1),(2,1);
+alter table t1 add unique(b);
+ERROR 23000: Duplicate entry '1' for key 'b'
+show keys from t1;;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+insert into t1 values(1,1);
+ERROR 23000: Duplicate entry '1' for key 'a'
+DROP TABLE t1;
+CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM;
+ALTER TABLE t1 DROP x;
+ERROR 42000: Can't DROP COLUMN `x`; check that it exists
+UPDATE t1 SET b = 0 WHERE a = 'foo';
+DROP TABLE t1;
+CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB;
+ALTER TABLE t1 DROP x;
+ERROR 42000: Can't DROP COLUMN `x`; check that it exists
+UPDATE t1 SET b = 0 WHERE a = 'foo';
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 ADD KEY (f);
+ERROR HY000: Index column size too large. The maximum column size is 767 bytes
+TRUNCATE TABLE t1;
+SELECT * FROM t1 WHERE f LIKE 'foo';
+f
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB;
+ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT;
+Warnings:
+Note 1054 Unknown column 'b' in 't1'
+DROP TABLE t1;
+CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
+ALTER TABLE t1 DROP x;
+ERROR 42000: Can't DROP COLUMN `x`; check that it exists
+SELECT * FROM t1 WHERE f LIKE 'foo';
+f
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB;
+show keys from t1;;
+Table t1
+Non_unique 0
+Key_name PRIMARY
+Seq_in_index 1
+Column_name pk
+Collation A
+Cardinality 0
+Sub_part NULL
+Packed NULL
+Null
+Index_type BTREE
+Comment
+Index_comment
+ALTER TABLE t1 ADD INDEX (pk);
+DROP TABLE t1;
+CREATE TABLE t1 (b int, a varchar(4000));
+INSERT INTO t1 VALUES (1, 2),(2,3),(3,4);
+ALTER TABLE t1 ADD UNIQUE INDEX (a);
+SELECT * FROM t1;
+b a
+1 2
+2 3
+3 4
+SELECT a FROM t1;
+a
+2
+3
+4
+drop table t1;
+CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
+ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: ADD INDEX. Try ALGORITHM=NOCOPY
+ALTER TABLE t1 ADD KEY idx2(f);
+Warnings:
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+DROP TABLE t1;
+CREATE TABLE t1(a blob , b blob , unique(a,b));
+alter table t1 drop column b;
+ERROR 42000: Key column 'b' doesn't exist in table
+insert into t1 values(1,1);
+insert into t1 values(1,1);
+ERROR 23000: Duplicate entry '1-1' for key 'a'
+alter table t1 add column c int;
+drop table t1;
+create table t1(a blob , b blob as (a) unique);
+insert into t1 values(1, default);
+insert into t1 values(1, default);
+ERROR 23000: Duplicate entry '1' for key 'b'
+drop table t1;
+create table t1(a blob, b blob, c blob as (left(a, 5000)) virtual, d blob as (left(b, 5000)) persistent, unique(a,b(4000)));
+insert into t1(a,b) values(10,11);
+insert into t1(a,b) values(10,11);
+ERROR 23000: Duplicate entry '10-11' for key 'a'
+insert into t1(a,b) values(2,2);
+insert into t1(a,b) values(2,3);
+insert into t1(a,b) values(3,2);
+drop table t1;
+CREATE TABLE t1 (
+a CHAR(128),
+b CHAR(128) AS (a),
+c DATETIME,
+UNIQUE(c,b(64))
+) ENGINE=InnoDB;
+ALTER TABLE t1 MODIFY COLUMN c VARCHAR(4096);
+drop table t1;
+CREATE TABLE t1 (
+a CHAR(128),
+b CHAR(128) AS (a),
+c varchar(5000),
+UNIQUE(c,b(64))
+) ENGINE=InnoDB;
+drop table t1;
+CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning;
+INSERT INTO t1 VALUES ('A');
+SELECT * INTO OUTFILE 'load.data' from t1;
+LOAD DATA INFILE 'load.data' INTO TABLE t1;
+ERROR 23000: Duplicate entry 'A' for key 'data'
+select * from t1;
+data
+A
+DROP TABLE t1;
+CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('f'), ('o'), ('o');
+SELECT * INTO OUTFILE 'load.data' from t1;
+ALTER IGNORE TABLE t1 ADD UNIQUE INDEX (data);
+SELECT * FROM t1;
+data
+f
+o
+ALTER TABLE t1 ADD SYSTEM VERSIONING ;
+SELECT * FROM t1;
+data
+f
+o
+REPLACE INTO t1 VALUES ('f'), ('o'), ('o');
+SELECT * FROM t1;
+data
+f
+o
+LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
+SELECT * FROM t1;
+data
+f
+o
+DROP TABLE t1;
+create table t1 (
+c char(10) character set utf8mb4,
+unique key a using hash (c(1))
+) engine=myisam;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c` char(10) CHARACTER SET utf8mb4 DEFAULT NULL,
+ UNIQUE KEY `a` (`c`(1)) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert into t1 values ('б');
+insert into t1 values ('бб');
+ERROR 23000: Duplicate entry 'Ð' for key 'a'
+insert into t1 values ('ббб');
+ERROR 23000: Duplicate entry 'Ð' for key 'a'
+drop table t1;
+CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2;
+INSERT INTO t1 VALUES (2);
+REPLACE INTO t1 VALUES (2);
+DROP TABLE t1;
+set innodb_lock_wait_timeout= 10;
+CREATE TABLE t1 (
+id int primary key,
+f INT unique
+) ENGINE=InnoDB;
+CREATE TABLE t2 (
+id int primary key,
+a blob unique
+) ENGINE=InnoDB;
+START TRANSACTION;
+connect con1,localhost,root,,test;
+connection con1;
+set innodb_lock_wait_timeout= 10;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1,1)/*1*/;
+connection default;
+INSERT INTO t2 VALUES (2, 1)/*2*/ ;
+connection con1;
+INSERT INTO t2 VALUES (3, 1)/*3*/;
+connection default;
+INSERT IGNORE INTO t1 VALUES (4, 1)/*4*/;
+connection con1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+disconnect con1;
+connection default;
+DROP TABLE t1, t2;
diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test
new file mode 100644
index 00000000000..dc78f6c7067
--- /dev/null
+++ b/mysql-test/main/long_unique_bugs.test
@@ -0,0 +1,319 @@
+--source include/have_innodb.inc
+--source include/have_partition.inc
+
+#
+# MDEV-18707 Server crash in my_hash_sort_bin, ASAN heap-use-after-free in Field::is_null, server hang, corrupted double-linked list
+#
+create table t1 (a int, b int, c int, d int, e int);
+insert into t1 () values
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),
+(),(),(),();
+select * into outfile 'load.data' from t1;
+create temporary table tmp (a varchar(1024), b int, c int, d int, e linestring, unique (e));
+load data infile 'load.data' into table tmp;
+delete from tmp;
+drop table t1;
+--let $datadir= `SELECT @@datadir`
+--remove_file $datadir/test/load.data
+
+#
+# MDEV-18712 InnoDB indexes are inconsistent with what defined in .frm for table after rebuilding table with index on blob
+#
+create table t1 (b blob) engine=innodb;
+alter table t1 add unique (b);
+alter table t1 force;
+show create table t1;
+drop table t1;
+
+#
+# MDEV-18713 Assertion `strcmp(share->unique_file_name,filename) || share->last_version' failed in test_if_reopen upon REPLACE into table with key on blob
+#
+create table t1 (pk int, b blob, primary key(pk), unique(b)) engine=myisam;
+insert into t1 values (1,'foo');
+replace into t1 (pk) values (1);
+alter table t1 force;
+replace into t1 (pk) values (1);
+drop table t1;
+
+#
+# MDEV-18722 Assertion `templ->mysql_null_bit_mask' failed in row_sel_store_mysql_rec upon modifying indexed column into blob
+#
+create table t1 (t time, unique(t)) engine=innodb;
+insert into t1 values (null),(null);
+alter ignore table t1 modify t text not null default '';
+drop table t1;
+
+#
+# MDEV-18720 Assertion `inited==NONE' failed in ha_index_init upon update on versioned table with key on blob
+#
+create table t1 ( pk int, f text, primary key (pk), unique(f)) with system versioning;
+insert into t1 values (1,'foo');
+update t1 set f = 'bar';
+select * from t1;
+update t1 set f = 'foo';
+select * from t1;
+select pk, f, row_end > DATE'2030-01-01' from t1 for system_time all;
+drop table t1;
+
+#
+# MDEV-18747 InnoDB: Failing assertion: table->get_ref_count() == 0 upon dropping temporary table with unique blob
+#
+create temporary table t1 (f blob, unique(f)) engine=innodb;
+insert into t1 values (1);
+replace into t1 values (1);
+drop table t1;
+
+#
+# MDEV-18748 REPLACE doesn't work with unique blobs on MyISAM table
+#
+create table t (b blob, unique(b)) engine=myisam;
+insert into t values ('foo');
+replace into t values ('foo');
+drop table t;
+
+#
+# MDEV-18790 Server crash in fields_in_hash_keyinfo after unsuccessful attempt to drop BLOB with long index
+#
+CREATE TABLE t1 (f INT, x BLOB, UNIQUE (x));
+INSERT INTO t1 VALUES (1,'foo');
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE t1 DROP x, ALGORITHM=INPLACE;
+UPDATE t1 SET x = 'bar';
+DROP TABLE t1;
+
+#
+# MDEV-18799 Long unique does not work after failed alter table
+#
+create table t1(a blob unique , b blob);
+insert into t1 values(1,1),(2,1);
+--error ER_DUP_ENTRY
+alter table t1 add unique(b);
+--query_vertical show keys from t1;
+--error ER_DUP_ENTRY
+insert into t1 values(1,1);
+DROP TABLE t1;
+
+#
+# MDEV-18792 ASAN unknown-crash in _mi_pack_key upon UPDATE after failed ALTER on a table with long BLOB key
+#
+CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=MyISAM;
+--error ER_CANT_DROP_FIELD_OR_KEY
+ALTER TABLE t1 DROP x;
+UPDATE t1 SET b = 0 WHERE a = 'foo';
+DROP TABLE t1;
+
+#
+# MDEV-18793 Assertion `0' failed in row_sel_convert_mysql_key_to_innobase, ASAN unknown-crash in
+# row_mysql_store_col_in_innobase_format, warning " InnoDB: Using a partial-field key prefix in search"
+#
+CREATE TABLE t1 (a TEXT, b INT, UNIQUE(a)) ENGINE=InnoDB;
+--error ER_CANT_DROP_FIELD_OR_KEY
+ALTER TABLE t1 DROP x;
+UPDATE t1 SET b = 0 WHERE a = 'foo';
+DROP TABLE t1;
+
+#
+# MDEV-18795 InnoDB: Failing assertion: field->prefix_len > 0 upon DML on table with BLOB index
+#
+CREATE TEMPORARY TABLE t1 (f BLOB, UNIQUE(f)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+--error ER_INDEX_COLUMN_TOO_LONG
+ALTER TABLE t1 ADD KEY (f);
+TRUNCATE TABLE t1;
+SELECT * FROM t1 WHERE f LIKE 'foo';
+DROP TABLE t1;
+
+#
+# MDEV-18798 InnoDB: No matching column for `DB_ROW_HASH_1`and server crash in
+# ha_innobase::commit_inplace_alter_table upon ALTER on table with UNIQUE key
+#
+CREATE TABLE t1 (a INT, UNIQUE ind USING HASH (a)) ENGINE=InnoDB;
+ALTER TABLE t1 CHANGE COLUMN IF EXISTS b a INT;
+DROP TABLE t1;
+
+#
+# MDEV-18801 InnoDB: Failing assertion: field->col->mtype == type or ASAN heap-buffer-overflow
+# in row_sel_convert_mysql_key_to_innobase upon SELECT on table with long index
+#
+CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
+--error ER_CANT_DROP_FIELD_OR_KEY
+ALTER TABLE t1 DROP x;
+SELECT * FROM t1 WHERE f LIKE 'foo';
+DROP TABLE t1;
+
+#
+# MDEV-18800 Server crash in instant_alter_column_possible or
+# Assertion `!pk->has_virtual()' failed in instant_alter_column_possible upon adding key
+#
+CREATE TABLE t1 (pk INT, PRIMARY KEY USING HASH (pk)) ENGINE=InnoDB;
+--query_vertical show keys from t1;
+ALTER TABLE t1 ADD INDEX (pk);
+DROP TABLE t1;
+
+#
+# MDEV-18922 Alter on long unique varchar column makes result null
+#
+CREATE TABLE t1 (b int, a varchar(4000));
+INSERT INTO t1 VALUES (1, 2),(2,3),(3,4);
+ALTER TABLE t1 ADD UNIQUE INDEX (a);
+SELECT * FROM t1;
+SELECT a FROM t1;
+drop table t1;
+
+#
+# MDEV-18809 Server crash in fields_in_hash_keyinfo or Assertion `key_info->key_part->field->flags
+# & (1<< 30)' failed in setup_keyinfo_hash
+#
+CREATE TABLE t1 (f VARCHAR(4096), UNIQUE(f)) ENGINE=InnoDB;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ALTER TABLE t1 DROP KEY f, ADD INDEX idx1(f), ALGORITHM=INSTANT;
+ALTER TABLE t1 ADD KEY idx2(f);
+DROP TABLE t1;
+CREATE TABLE t1(a blob , b blob , unique(a,b));
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 drop column b;
+insert into t1 values(1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1);
+alter table t1 add column c int;
+drop table t1;
+
+#
+# MDEV-18889 Long unique on virtual fields crashes server
+#
+create table t1(a blob , b blob as (a) unique);
+insert into t1 values(1, default);
+--error ER_DUP_ENTRY
+insert into t1 values(1, default);
+drop table t1;
+
+create table t1(a blob, b blob, c blob as (left(a, 5000)) virtual, d blob as (left(b, 5000)) persistent, unique(a,b(4000)));
+insert into t1(a,b) values(10,11);
+--error ER_DUP_ENTRY
+insert into t1(a,b) values(10,11);
+insert into t1(a,b) values(2,2);
+insert into t1(a,b) values(2,3);
+insert into t1(a,b) values(3,2);
+drop table t1;
+
+#
+# MDEV-18888 Server crashes in Item_field::register_field_in_read_map upon MODIFY COLUMN
+#
+CREATE TABLE t1 (
+ a CHAR(128),
+ b CHAR(128) AS (a),
+ c DATETIME,
+ UNIQUE(c,b(64))
+) ENGINE=InnoDB;
+ALTER TABLE t1 MODIFY COLUMN c VARCHAR(4096);
+drop table t1;
+CREATE TABLE t1 (
+ a CHAR(128),
+ b CHAR(128) AS (a),
+ c varchar(5000),
+ UNIQUE(c,b(64))
+) ENGINE=InnoDB;
+drop table t1;
+
+#
+# MDEV-18967 Load data in system version with long unique does not work
+#
+CREATE TABLE t1 (data VARCHAR(4), unique(data) using hash) with system versioning;
+INSERT INTO t1 VALUES ('A');
+SELECT * INTO OUTFILE 'load.data' from t1;
+--error ER_DUP_ENTRY
+LOAD DATA INFILE 'load.data' INTO TABLE t1;
+select * from t1;
+DROP TABLE t1;
+--let $datadir= `select @@datadir`
+--remove_file $datadir/test/load.data
+
+#
+# MDEV-18901 Wrong results after ADD UNIQUE INDEX(blob_column)
+#
+--source include/have_innodb.inc
+CREATE TABLE t1 (data VARCHAR(7961)) ENGINE=InnoDB;
+
+INSERT INTO t1 VALUES ('f'), ('o'), ('o');
+SELECT * INTO OUTFILE 'load.data' from t1;
+
+ALTER IGNORE TABLE t1 ADD UNIQUE INDEX (data);
+SELECT * FROM t1;
+ALTER TABLE t1 ADD SYSTEM VERSIONING ;
+SELECT * FROM t1;
+REPLACE INTO t1 VALUES ('f'), ('o'), ('o');
+SELECT * FROM t1;
+# This should be equivalent to the REPLACE above
+LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+--let $datadir= `select @@datadir`
+--remove_file $datadir/test/load.data
+
+#
+# MDEV-18953 Hash index on partial char field not working
+#
+create table t1 (
+ c char(10) character set utf8mb4,
+ unique key a using hash (c(1))
+) engine=myisam;
+show create table t1;
+insert into t1 values ('б');
+--error ER_DUP_ENTRY
+insert into t1 values ('бб');
+--error ER_DUP_ENTRY
+insert into t1 values ('ббб');
+drop table t1;
+
+#
+# MDEV-18904 Assertion `m_part_spec.start_part >= m_part_spec.end_part' failed in ha_partition::index_read_idx_map
+#
+CREATE TABLE t1 (a INT, UNIQUE USING HASH (a)) PARTITION BY HASH (a) PARTITIONS 2;
+INSERT INTO t1 VALUES (2);
+REPLACE INTO t1 VALUES (2);
+DROP TABLE t1;
+
+#
+# MDEV-18820 Assertion `lock_table_has(trx, index->table, LOCK_IX)' failed in lock_rec_insert_check_and_lock upon INSERT into table with blob key'
+#
+
+--source include/have_innodb.inc
+set innodb_lock_wait_timeout= 10;
+
+CREATE TABLE t1 (
+ id int primary key,
+ f INT unique
+) ENGINE=InnoDB;
+
+CREATE TABLE t2 (
+ id int primary key,
+ a blob unique
+) ENGINE=InnoDB;
+
+START TRANSACTION;
+
+--connect (con1,localhost,root,,test)
+
+--connection con1
+set innodb_lock_wait_timeout= 10;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1,1)/*1*/;
+
+--connection default
+INSERT INTO t2 VALUES (2, 1)/*2*/ ;
+
+--connection con1
+--send
+ INSERT INTO t2 VALUES (3, 1)/*3*/;
+
+--connection default
+INSERT IGNORE INTO t1 VALUES (4, 1)/*4*/;
+
+--connection con1
+--error ER_LOCK_DEADLOCK
+--reap
+--disconnect con1
+--connection default
+DROP TABLE t1, t2;
diff --git a/mysql-test/main/long_unique_debug.result b/mysql-test/main/long_unique_debug.result
new file mode 100644
index 00000000000..fb56a9d024b
--- /dev/null
+++ b/mysql-test/main/long_unique_debug.result
@@ -0,0 +1,579 @@
+#In this test case we will check what will happen in the case of hash collision
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique);
+FLUSH STATUS;
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+insert into t1 values('sachin');
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values('maria');
+insert into t1 values('maria');
+ERROR 23000: Duplicate entry 'maria' for key 'a'
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+Variable_name Value
+Handler_read_next 11
+SET debug_dbug="";
+create table t1(a blob unique);
+FLUSH STATUS;
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+insert into t1 values('sachin');
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values('maria');
+insert into t1 values('maria');
+ERROR 23000: Duplicate entry 'maria' for key 'a'
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+Variable_name Value
+Handler_read_next 0
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique, b blob unique);
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+insert into t1 values('sachin', 4);
+ERROR 23000: Duplicate entry 'sachin' for key 'a'
+insert into t1 values('maria', 2);
+insert into t1 values('maria', 3);
+ERROR 23000: Duplicate entry 'maria' for key 'a'
+drop table t1;
+create table t1(a blob , b blob , unique(a,b));
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+insert into t1 values('sachin', 1);
+ERROR 23000: Duplicate entry 'sachin-1' for key 'a'
+insert into t1 values('maria', 2);
+insert into t1 values('maria', 2);
+ERROR 23000: Duplicate entry 'maria-2' for key 'a'
+drop table t1;
+##Internal State of long unique tables
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique);
+Warnings:
+Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 19
+table->s->fields 2
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 1
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 11
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 10
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 1
+
+table->field[1]->field_name DB_ROW_HASH_1
+table->field[1]->offset = 11
+table->field[1]->field_length = 8
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 2
+
+SET debug_dbug="";
+drop table t1;
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique, b blob unique , c blob unique);
+Warnings:
+Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 55
+table->s->fields 6
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 1
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 31
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+table->key_info[1] user_defined_key_parts = 1
+table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[1] flags & HA_NOSAME = 0
+
+table->s->key_info[1] user_defined_key_parts = 1
+table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[1] flags & HA_NOSAME = 1
+
+Printing table->key_info[1].key_part[0] info
+key_part->offset = 39
+key_part->field_name = DB_ROW_HASH_2
+key_part->length = 8
+key_part->null_bit = 16
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[0] info
+key_part->offset = 11
+key_part->field_name = b
+key_part->length = 0
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+table->key_info[2] user_defined_key_parts = 1
+table->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[2] flags & HA_NOSAME = 0
+
+table->s->key_info[2] user_defined_key_parts = 1
+table->s->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[2] flags & HA_NOSAME = 1
+
+Printing table->key_info[2].key_part[0] info
+key_part->offset = 47
+key_part->field_name = DB_ROW_HASH_3
+key_part->length = 8
+key_part->null_bit = 32
+key_part->null_offset = 0
+
+Printing share->key_info[2].key_part[0] info
+key_part->offset = 21
+key_part->field_name = c
+key_part->length = 0
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 10
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 1
+
+table->field[1]->field_name b
+table->field[1]->offset = 11
+table->field[1]->field_length = 10
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 2
+
+table->field[2]->field_name c
+table->field[2]->offset = 21
+table->field[2]->field_length = 10
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 4
+
+table->field[3]->field_name DB_ROW_HASH_1
+table->field[3]->offset = 31
+table->field[3]->field_length = 8
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 8
+
+table->field[4]->field_name DB_ROW_HASH_2
+table->field[4]->offset = 39
+table->field[4]->field_length = 8
+table->field[4]->null_pos wrt to record 0 = 0
+table->field[4]->null_bit_pos = 16
+
+table->field[5]->field_name DB_ROW_HASH_3
+table->field[5]->offset = 47
+table->field[5]->field_length = 8
+table->field[5]->null_pos wrt to record 0 = 0
+table->field[5]->null_bit_pos = 32
+
+SET debug_dbug="";
+drop table t1;
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob , b blob , c blob , d blob , unique (a,b), unique(c, d));
+Warnings:
+Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 57
+table->s->fields 6
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 2
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 41
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 16
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[1] info
+key_part->offset = 11
+key_part->field_name = b
+key_part->length = 0
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+table->key_info[1] user_defined_key_parts = 1
+table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[1] flags & HA_NOSAME = 0
+
+table->s->key_info[1] user_defined_key_parts = 2
+table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[1] flags & HA_NOSAME = 1
+
+Printing table->key_info[1].key_part[0] info
+key_part->offset = 49
+key_part->field_name = DB_ROW_HASH_2
+key_part->length = 8
+key_part->null_bit = 32
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[0] info
+key_part->offset = 21
+key_part->field_name = c
+key_part->length = 0
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[1] info
+key_part->offset = 31
+key_part->field_name = d
+key_part->length = 0
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 10
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 1
+
+table->field[1]->field_name b
+table->field[1]->offset = 11
+table->field[1]->field_length = 10
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 2
+
+table->field[2]->field_name c
+table->field[2]->offset = 21
+table->field[2]->field_length = 10
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 4
+
+table->field[3]->field_name d
+table->field[3]->offset = 31
+table->field[3]->field_length = 10
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 8
+
+table->field[4]->field_name DB_ROW_HASH_1
+table->field[4]->offset = 41
+table->field[4]->field_length = 8
+table->field[4]->null_pos wrt to record 0 = 0
+table->field[4]->null_bit_pos = 16
+
+table->field[5]->field_name DB_ROW_HASH_2
+table->field[5]->offset = 49
+table->field[5]->field_length = 8
+table->field[5]->null_pos wrt to record 0 = 0
+table->field[5]->null_bit_pos = 32
+
+SET debug_dbug="";
+drop table t1;
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int primary key, b blob unique , c blob unique not null);
+Warnings:
+Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 41
+table->s->fields 5
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->key_info[0] flags & HA_NOSAME = 1
+
+table->s->key_info[0] user_defined_key_parts = 1
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+table->key_info[1] user_defined_key_parts = 1
+table->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[1] flags & HA_NOSAME = 0
+
+table->s->key_info[1] user_defined_key_parts = 1
+table->s->key_info[1] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[1] flags & HA_NOSAME = 1
+
+Printing table->key_info[1].key_part[0] info
+key_part->offset = 25
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[1].key_part[0] info
+key_part->offset = 5
+key_part->field_name = b
+key_part->length = 0
+key_part->null_bit = 1
+key_part->null_offset = 0
+
+table->key_info[2] user_defined_key_parts = 1
+table->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[2] flags & HA_NOSAME = 0
+
+table->s->key_info[2] user_defined_key_parts = 1
+table->s->key_info[2] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[2] flags & HA_NOSAME = 1
+
+Printing table->key_info[2].key_part[0] info
+key_part->offset = 33
+key_part->field_name = DB_ROW_HASH_2
+key_part->length = 8
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+Printing share->key_info[2].key_part[0] info
+key_part->offset = 15
+key_part->field_name = c
+key_part->length = 0
+key_part->null_bit = 0
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 4
+table->field[0]->null_pos wrt to record 0 = -1
+table->field[0]->null_bit_pos = 0
+
+table->field[1]->field_name b
+table->field[1]->offset = 5
+table->field[1]->field_length = 10
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 1
+
+table->field[2]->field_name c
+table->field[2]->offset = 15
+table->field[2]->field_length = 10
+table->field[2]->null_pos wrt to record 0 = -1
+table->field[2]->null_bit_pos = 0
+
+table->field[3]->field_name DB_ROW_HASH_1
+table->field[3]->offset = 25
+table->field[3]->field_length = 8
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 2
+
+table->field[4]->field_name DB_ROW_HASH_2
+table->field[4]->offset = 33
+table->field[4]->field_length = 8
+table->field[4]->null_pos wrt to record 0 = -1
+table->field[4]->null_bit_pos = 0
+
+SET debug_dbug="";
+drop table t1;
+##Using hash
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+Warnings:
+Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 21
+table->s->fields 4
+
+table->key_info[0] user_defined_key_parts = 1
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->key_info[0] flags & HA_NOSAME = 0
+
+table->s->key_info[0] user_defined_key_parts = 3
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 1
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 13
+key_part->field_name = DB_ROW_HASH_1
+key_part->length = 8
+key_part->null_bit = 16
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[1] info
+key_part->offset = 5
+key_part->field_name = b
+key_part->length = 4
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[2] info
+key_part->offset = 9
+key_part->field_name = c
+key_part->length = 4
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 4
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 2
+
+table->field[1]->field_name b
+table->field[1]->offset = 5
+table->field[1]->field_length = 4
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 4
+
+table->field[2]->field_name c
+table->field[2]->offset = 9
+table->field[2]->field_length = 4
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 8
+
+table->field[3]->field_name DB_ROW_HASH_1
+table->field[3]->offset = 13
+table->field[3]->field_length = 8
+table->field[3]->null_pos wrt to record 0 = 0
+table->field[3]->null_bit_pos = 16
+
+SET debug_dbug="";
+drop table t1;
+##Using hash but with memory engine so no long unique column
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
+Warnings:
+Note 1 Printing Table state, It will print table fields, fields->offset,field->null_bit, field->null_pos and key_info ...
+
+Printing Table keyinfo
+
+table->s->reclength 13
+table->s->fields 3
+
+table->key_info[0] user_defined_key_parts = 3
+table->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->key_info[0] flags & HA_NOSAME = 1
+
+table->s->key_info[0] user_defined_key_parts = 3
+table->s->key_info[0] algorithm == HA_KEY_ALG_LONG_HASH = 0
+table->s->key_info[0] flags & HA_NOSAME = 1
+
+Printing table->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[0] info
+key_part->offset = 1
+key_part->field_name = a
+key_part->length = 4
+key_part->null_bit = 2
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[1] info
+key_part->offset = 5
+key_part->field_name = b
+key_part->length = 4
+key_part->null_bit = 4
+key_part->null_offset = 0
+
+Printing share->key_info[0].key_part[2] info
+key_part->offset = 9
+key_part->field_name = c
+key_part->length = 4
+key_part->null_bit = 8
+key_part->null_offset = 0
+
+Printing table->fields
+
+table->field[0]->field_name a
+table->field[0]->offset = 1
+table->field[0]->field_length = 4
+table->field[0]->null_pos wrt to record 0 = 0
+table->field[0]->null_bit_pos = 2
+
+table->field[1]->field_name b
+table->field[1]->offset = 5
+table->field[1]->field_length = 4
+table->field[1]->null_pos wrt to record 0 = 0
+table->field[1]->null_bit_pos = 4
+
+table->field[2]->field_name c
+table->field[2]->offset = 9
+table->field[2]->field_length = 4
+table->field[2]->null_pos wrt to record 0 = 0
+table->field[2]->null_bit_pos = 8
+
+SET debug_dbug="";
+drop table t1;
diff --git a/mysql-test/main/long_unique_debug.test b/mysql-test/main/long_unique_debug.test
new file mode 100644
index 00000000000..560f6499be6
--- /dev/null
+++ b/mysql-test/main/long_unique_debug.test
@@ -0,0 +1,95 @@
+--source include/have_debug.inc
+
+#
+# MDEV-371 Unique indexes for blobs
+#
+
+--echo #In this test case we will check what will happen in the case of hash collision
+
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique);
+
+FLUSH STATUS;
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+--error ER_DUP_ENTRY
+insert into t1 values('sachin');
+insert into t1 values('maria');
+--error ER_DUP_ENTRY
+insert into t1 values('maria');
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+
+SET debug_dbug="";
+create table t1(a blob unique);
+FLUSH STATUS;
+
+insert into t1 values('xyz');
+insert into t1 values('abc');
+insert into t1 values('sachin');
+--error ER_DUP_ENTRY
+insert into t1 values('sachin');
+insert into t1 values('maria');
+--error ER_DUP_ENTRY
+insert into t1 values('maria');
+drop table t1;
+SHOW STATUS LIKE 'handler_read_next';
+
+SET debug_dbug="d,same_long_unique_hash";
+create table t1(a blob unique, b blob unique);
+
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin', 4);
+insert into t1 values('maria', 2);
+--error ER_DUP_ENTRY
+insert into t1 values('maria', 3);
+drop table t1;
+
+create table t1(a blob , b blob , unique(a,b));
+
+insert into t1 values('xyz', 11);
+insert into t1 values('abc', 22);
+insert into t1 values('sachin', 1);
+--error ER_DUP_ENTRY
+insert into t1 values('sachin', 1);
+insert into t1 values('maria', 2);
+--error ER_DUP_ENTRY
+insert into t1 values('maria', 2);
+drop table t1;
+
+--echo ##Internal State of long unique tables
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique);
+SET debug_dbug="";
+drop table t1;
+
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob unique, b blob unique , c blob unique);
+SET debug_dbug="";
+drop table t1;
+
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1 ( a blob , b blob , c blob , d blob , unique (a,b), unique(c, d));
+SET debug_dbug="";
+drop table t1;
+
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int primary key, b blob unique , c blob unique not null);
+SET debug_dbug="";
+drop table t1;
+
+--echo ##Using hash
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+SET debug_dbug="";
+drop table t1;
+
+--echo ##Using hash but with memory engine so no long unique column
+SET debug_dbug="d,print_long_unique_internal_state";
+create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
+SET debug_dbug="";
+drop table t1;
diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result
new file mode 100644
index 00000000000..cb8c3ea4858
--- /dev/null
+++ b/mysql-test/main/long_unique_innodb.result
@@ -0,0 +1,133 @@
+create table t1(a blob unique) engine= InnoDB;
+insert into t1 values('RUC');
+insert into t1 values ('RUC');
+ERROR 23000: Duplicate entry 'RUC' for key 'a'
+drop table t1;
+create table t1 (a blob unique , c int unique) engine=innodb;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ `c` int(11) DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH,
+ UNIQUE KEY `c` (`c`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+drop table t1;
+#test for concurrent insert of long unique in innodb
+create table t1(a blob unique) engine= InnoDB;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` blob DEFAULT NULL,
+ UNIQUE KEY `a` (`a`) USING HASH
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+connect 'con1', localhost, root,,;
+connect 'con2', localhost, root,,;
+connection con1;
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+connection con2;
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values ('RUC');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+connection con2;
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values ('RC');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+commit;
+connection con1;
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+connection con2;
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values ('RR');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection con1;
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+connection con2;
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values ('S');
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+commit;
+connection con1;
+commit;
+select * from t1;
+a
+RUC
+RC
+RR
+S
+drop table t1;
+create table t1(a blob unique) engine=Innodb;
+connection con1;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+connection con2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values ('RUC');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection con1;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+connection con2;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values ('RC');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection con1;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+connection con2;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values ('RR');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection con1;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+connection con2;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values ('S');;
+connection con1;
+rollback;
+connection con2;
+commit;
+connection default;
+drop table t1;
+disconnect con1;
+disconnect con2;
diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test
new file mode 100644
index 00000000000..aac68cd2271
--- /dev/null
+++ b/mysql-test/main/long_unique_innodb.test
@@ -0,0 +1,140 @@
+--source include/have_innodb.inc
+
+#
+# MDEV-371 Unique indexes for blobs
+#
+
+create table t1(a blob unique) engine= InnoDB;
+insert into t1 values('RUC');
+--error ER_DUP_ENTRY
+insert into t1 values ('RUC');
+drop table t1;
+
+create table t1 (a blob unique , c int unique) engine=innodb;
+show create table t1;
+drop table t1;
+
+--echo #test for concurrent insert of long unique in innodb
+create table t1(a blob unique) engine= InnoDB;
+show create table t1;
+connect ('con1', localhost, root,,);
+connect ('con2', localhost, root,,);
+
+--connection con1
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+--connection con2
+set innodb_lock_wait_timeout= 2;
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('RUC');
+
+--connection con1
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+--connection con2
+commit;
+set transaction isolation level READ COMMITTED;
+start transaction;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('RC');
+commit;
+
+--connection con1
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+--connection con2
+commit;
+set transaction isolation level REPEATABLE READ;
+start transaction;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('RR');
+
+--connection con1
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+--connection con2
+commit;
+set transaction isolation level SERIALIZABLE;
+start transaction;
+--error ER_LOCK_WAIT_TIMEOUT
+insert into t1 values ('S');
+commit;
+
+--connection con1
+commit;
+
+select * from t1;
+drop table t1;
+
+create table t1(a blob unique) engine=Innodb;
+
+--connection con1
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+insert into t1 values('RUC');
+--connection con2
+set transaction isolation level READ UNCOMMITTED;
+start transaction;
+--send insert into t1 values ('RUC');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+--connection con1
+set transaction isolation level READ COMMITTED;
+start transaction;
+insert into t1 values('RC');
+--connection con2
+set transaction isolation level READ COMMITTED;
+start transaction;
+--send insert into t1 values ('RC');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+--connection con1
+set transaction isolation level REPEATABLE READ;
+start transaction;
+insert into t1 values('RR');
+--connection con2
+set transaction isolation level REPEATABLE READ;
+start transaction;
+--send insert into t1 values ('RR');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+--connection con1
+set transaction isolation level SERIALIZABLE;
+start transaction;
+insert into t1 values('S');
+--connection con2
+set transaction isolation level SERIALIZABLE;
+start transaction;
+--send insert into t1 values ('S');
+--connection con1
+rollback;
+--connection con2
+--reap
+commit;
+
+connection default;
+drop table t1;
+disconnect con1;
+disconnect con2;
diff --git a/mysql-test/main/long_unique_update.result b/mysql-test/main/long_unique_update.result
new file mode 100644
index 00000000000..60a4fb46558
--- /dev/null
+++ b/mysql-test/main/long_unique_update.result
@@ -0,0 +1,317 @@
+#structure of tests;
+#1 test of table containing single unique blob column;
+#2 test of table containing another unique int/ varchar etc column;
+#3 test of table containing multiple unique blob column like unique(a),unique(b);
+#4 test of table containing multiple multiple unique blob column like unique(a,b...),unique(c,d....);
+#structure of each test;
+#test if update works;
+#test update for duplicate entry;
+#test update for no change keys;
+#test update for ignore ;
+#test 1
+create table t1 (a blob unique);
+show keys from t1;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+insert into t1 values(1),(2),(3),(4),(5);
+select * from t1;
+a
+1
+2
+3
+4
+5
+update t1 set a=11 where a=5;
+update t1 set a=a+20 where a=1;
+select * from t1;
+a
+21
+2
+3
+4
+11
+update t1 set a=3 where a=2;
+ERROR 23000: Duplicate entry '3' for key 'a'
+update t1 set a=4 where a=3;
+ERROR 23000: Duplicate entry '4' for key 'a'
+#no change in blob key
+update t1 set a=3 where a=3;
+update t1 set a=2 where a=2;
+select* from t1;
+a
+21
+2
+3
+4
+11
+#IGNORE;
+update ignore t1 set a=3 where a=2;
+update ignore t1 set a=4 where a=3;
+select * from t1;
+a
+21
+2
+3
+4
+11
+drop table t1;
+#test 2;
+create table t1 (a int primary key, b blob unique , c int unique );
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES BTREE
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+a b c
+1 1 1
+2 2 2
+3 3 3
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+a b c
+1 34 1
+2 40 2
+3 16 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+update t1 set b=4 where a=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where b=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where c=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+#no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+#IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+select * from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+drop table t1;
+#test 3;
+create table t1 (a blob unique, b blob unique , c blob unique);
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 c 1 c A NULL NULL NULL YES HASH
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+a b c
+1 1 1
+2 2 2
+3 3 3
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+a b c
+1 34 1
+2 40 2
+3 16 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+update t1 set b=4 where a=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where b=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+update t1 set b=a+1 where c=3;
+ERROR 23000: Duplicate entry '4' for key 'b'
+#no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 5 5
+6 6 6
+7 7 7
+#IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+update ignore t1 set b=b+3 where a>1 or b>1 or c>1;
+select * from t1;
+a b c
+1 1 1
+2 2 2
+3 3 3
+4 4 4
+5 8 5
+6 9 6
+7 10 7
+update ignore t1 set b=b+5 where a>1 and b<5 and c<a+b;
+select * from t1;
+a b c
+1 1 1
+2 7 2
+3 3 3
+4 4 4
+5 8 5
+6 9 6
+7 10 7
+drop table t1;
+#test 4 ultimate test;
+create table t1 (a int primary key , b int, c blob , d blob , e varchar(2000), f int , g text,
+unique (b,c), unique (b,f),unique(e,g),unique(a,b,c,d,e,f,g));
+desc t1;
+Field Type Null Key Default Extra
+a int(11) NO PRI NULL
+b int(11) YES MUL NULL
+c blob YES NULL
+d blob YES NULL
+e varchar(2000) YES MUL NULL
+f int(11) YES NULL
+g text YES NULL
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL,
+ `b` int(11) DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ `d` blob DEFAULT NULL,
+ `e` varchar(2000) DEFAULT NULL,
+ `f` int(11) DEFAULT NULL,
+ `g` text DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ UNIQUE KEY `b` (`b`,`c`) USING HASH,
+ UNIQUE KEY `b_2` (`b`,`f`),
+ UNIQUE KEY `e` (`e`,`g`) USING HASH,
+ UNIQUE KEY `a` (`a`,`b`,`c`,`d`,`e`,`f`,`g`) USING HASH
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 PRIMARY 1 a A 0 NULL NULL BTREE
+t1 0 b 1 b A NULL NULL NULL YES HASH
+t1 0 b 2 c A NULL NULL NULL YES HASH
+t1 0 b_2 1 b A NULL NULL NULL YES BTREE
+t1 0 b_2 2 f A NULL NULL NULL YES BTREE
+t1 0 e 1 e A NULL NULL NULL YES HASH
+t1 0 e 2 g A NULL NULL NULL YES HASH
+t1 0 a 1 a A NULL NULL NULL HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+t1 0 a 4 d A NULL NULL NULL YES HASH
+t1 0 a 5 e A NULL NULL NULL YES HASH
+t1 0 a 6 f A NULL NULL NULL YES HASH
+t1 0 a 7 g A NULL NULL NULL YES HASH
+insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+select * from t1 limit 3;
+a b c d e f g
+1 1 1 1 1 1 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+#key b_c
+update t1 set b=2 ,c=2 where a=1;
+ERROR 23000: Duplicate entry '2-2' for key 'b'
+update t1 set b=b+34, c=c+34 where e=1 and g=1 ;
+update t1 set b=35, c=35 where e=1 and g=1 ;
+update t1 set b=b+1, c=c+1 where a>0;
+ERROR 23000: Duplicate entry '3-3' for key 'b'
+update ignore t1 set b=b+1, c=c+1 where a>0;
+select * from t1 ;
+a b c d e f g
+1 37 37 1 1 1 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+8 8 8 8 8 8 8
+9 10 10 9 9 9 9
+truncate table t1;
+insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+#key b_f no hash key
+update t1 set b=2 , f=2 where a=1;
+ERROR 23000: Duplicate entry '2-2' for key 'b_2'
+update t1 set b=b+33, f=f+33 where e=1 and g=1;
+update t1 set b=34, f=34 where e=1 and g=1 ;
+update t1 set b=b+1, f=f+1 where a>0;
+ERROR 23000: Duplicate entry '3-3' for key 'b_2'
+update ignore t1 set b=b+1, f=f+1 where a>0;
+select * from t1 ;
+a b c d e f g
+1 36 1 1 1 36 1
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+8 8 8 8 8 8 8
+9 10 9 9 9 10 9
+truncate table t1;
+insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+#key e_g
+update t1 set e=2 , g=2 where a=1;
+ERROR 23000: Duplicate entry '2-2' for key 'e'
+update t1 set e=e+34, g=g+34 where a=1;
+update t1 set e=34, g=34 where e=1 and g=1 ;
+select * from t1 where a=1;
+a b c d e f g
+1 1 1 1 35 1 35
+update t1 set e=e+1, g=g+1 where a>0;
+ERROR 23000: Duplicate entry '3-3' for key 'e'
+update ignore t1 set e=e+1, g=g+1 where a>0;
+select * from t1 ;
+a b c d e f g
+1 1 1 1 37 1 37
+2 2 2 2 2 2 2
+3 3 3 3 3 3 3
+4 4 4 4 4 4 4
+5 5 5 5 5 5 5
+6 6 6 6 6 6 6
+7 7 7 7 7 7 7
+8 8 8 8 8 8 8
+9 9 9 9 10 9 10
+drop table t1;
diff --git a/mysql-test/main/long_unique_update.test b/mysql-test/main/long_unique_update.test
new file mode 100644
index 00000000000..98c3aaefe17
--- /dev/null
+++ b/mysql-test/main/long_unique_update.test
@@ -0,0 +1,141 @@
+#
+# MDEV-371 Unique indexes for blobs
+#
+--echo #structure of tests;
+--echo #1 test of table containing single unique blob column;
+--echo #2 test of table containing another unique int/ varchar etc column;
+--echo #3 test of table containing multiple unique blob column like unique(a),unique(b);
+--echo #4 test of table containing multiple multiple unique blob column like unique(a,b...),unique(c,d....);
+--echo #structure of each test;
+--echo #test if update works;
+--echo #test update for duplicate entry;
+--echo #test update for no change keys;
+--echo #test update for ignore ;
+
+--echo #test 1
+create table t1 (a blob unique);
+query_vertical show keys from t1;
+insert into t1 values(1),(2),(3),(4),(5);
+select * from t1;
+update t1 set a=11 where a=5;
+update t1 set a=a+20 where a=1;
+select * from t1;
+--error ER_DUP_ENTRY
+update t1 set a=3 where a=2;
+--error ER_DUP_ENTRY
+update t1 set a=4 where a=3;
+--echo #no change in blob key
+update t1 set a=3 where a=3;
+update t1 set a=2 where a=2;
+select* from t1;
+--echo #IGNORE;
+update ignore t1 set a=3 where a=2;
+update ignore t1 set a=4 where a=3;
+select * from t1;
+drop table t1;
+
+--echo #test 2;
+create table t1 (a int primary key, b blob unique , c int unique );
+show keys from t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+--error ER_DUP_ENTRY
+update t1 set b=4 where a=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where b=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where c=3;
+--echo #no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+--echo #IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+select * from t1;
+drop table t1;
+
+--echo #test 3;
+create table t1 (a blob unique, b blob unique , c blob unique);
+show keys from t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+select * from t1 limit 3;
+update t1 set b=34 where a=1;
+update t1 set b=a+c+b+34 where b=2;
+update t1 set b=a+10+b where c=3;
+select * from t1;
+truncate table t1;
+insert into t1 values(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7);
+--error ER_DUP_ENTRY
+update t1 set b=4 where a=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where b=3;
+--error ER_DUP_ENTRY
+update t1 set b=a+1 where c=3;
+--echo #no change in blob key
+update t1 set b=3 where a=3;
+update t1 set b=2 where b=2;
+update t1 set b=5 where c=5;
+select* from t1;
+--echo #IGNORE;
+update ignore t1 set b=3 where a=2;
+update ignore t1 set b=4 where b=3;
+update ignore t1 set b=5 where c=3;
+update ignore t1 set b=b+3 where a>1 or b>1 or c>1;
+select * from t1;
+update ignore t1 set b=b+5 where a>1 and b<5 and c<a+b;
+select * from t1;
+drop table t1;
+
+--echo #test 4 ultimate test;
+create table t1 (a int primary key , b int, c blob , d blob , e varchar(2000), f int , g text,
+unique (b,c), unique (b,f),unique(e,g),unique(a,b,c,d,e,f,g));
+desc t1;
+show create table t1;
+show keys from t1;
+insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+select * from t1 limit 3;
+--echo #key b_c
+--error ER_DUP_ENTRY
+update t1 set b=2 ,c=2 where a=1;
+update t1 set b=b+34, c=c+34 where e=1 and g=1 ;
+update t1 set b=35, c=35 where e=1 and g=1 ;
+--error ER_DUP_ENTRY
+update t1 set b=b+1, c=c+1 where a>0;
+update ignore t1 set b=b+1, c=c+1 where a>0;
+select * from t1 ;
+truncate table t1;
+insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+--echo #key b_f no hash key
+--error ER_DUP_ENTRY
+update t1 set b=2 , f=2 where a=1;
+update t1 set b=b+33, f=f+33 where e=1 and g=1;
+update t1 set b=34, f=34 where e=1 and g=1 ;
+--error ER_DUP_ENTRY
+update t1 set b=b+1, f=f+1 where a>0;
+update ignore t1 set b=b+1, f=f+1 where a>0;
+select * from t1 ;
+truncate table t1;
+insert into t1 values(1,1,1,1,1,1,1),(2,2,2,2,2,2,2),(3,3,3,3,3,3,3),(4,4,4,4,4,4,4),
+(5,5,5,5,5,5,5),(6,6,6,6,6,6,6),(7,7,7,7,7,7,7),(8,8,8,8,8,8,8),(9,9,9,9,9,9,9);
+--echo #key e_g
+--error ER_DUP_ENTRY
+update t1 set e=2 , g=2 where a=1;
+update t1 set e=e+34, g=g+34 where a=1;
+update t1 set e=34, g=34 where e=1 and g=1 ;
+select * from t1 where a=1;
+--error ER_DUP_ENTRY
+update t1 set e=e+1, g=g+1 where a>0;
+update ignore t1 set e=e+1, g=g+1 where a>0;
+select * from t1 ;
+drop table t1;
diff --git a/mysql-test/main/long_unique_using_hash.result b/mysql-test/main/long_unique_using_hash.result
new file mode 100644
index 00000000000..987e11294ec
--- /dev/null
+++ b/mysql-test/main/long_unique_using_hash.result
@@ -0,0 +1,54 @@
+create table t1(a blob , unique(a) using hash);
+show keys from t1;;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+drop table t1;
+create table t1(a blob , unique(a) using btree);
+ERROR 42000: Specified key was too long; max key length is 1000 bytes
+create table t1(a int , unique(a) using hash);
+show keys from t1;;
+Table t1
+Non_unique 0
+Key_name a
+Seq_in_index 1
+Column_name a
+Collation A
+Cardinality NULL
+Sub_part NULL
+Packed NULL
+Null YES
+Index_type HASH
+Comment
+Index_comment
+drop table t1;
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a A NULL NULL NULL YES HASH
+t1 0 a 2 b A NULL NULL NULL YES HASH
+t1 0 a 3 c A NULL NULL NULL YES HASH
+insert into t1 values(1,1,1);
+insert into t1 values(1,1,1);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+drop table t1;
+create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
+show keys from t1;
+Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
+t1 0 a 1 a NULL NULL NULL NULL YES HASH
+t1 0 a 2 b NULL NULL NULL NULL YES HASH
+t1 0 a 3 c NULL 0 NULL NULL YES HASH
+insert into t1 values(1,1,1);
+insert into t1 values(1,1,1);
+ERROR 23000: Duplicate entry '1-1-1' for key 'a'
+drop table t1;
diff --git a/mysql-test/main/long_unique_using_hash.test b/mysql-test/main/long_unique_using_hash.test
new file mode 100644
index 00000000000..1e19cd66b02
--- /dev/null
+++ b/mysql-test/main/long_unique_using_hash.test
@@ -0,0 +1,28 @@
+#
+# MDEV-371 Unique indexes for blobs
+#
+
+create table t1(a blob , unique(a) using hash);
+--query_vertical show keys from t1;
+drop table t1;
+
+--error ER_TOO_LONG_KEY
+create table t1(a blob , unique(a) using btree);
+
+create table t1(a int , unique(a) using hash);
+--query_vertical show keys from t1;
+drop table t1;
+
+create table t1(a int ,b int , c int, unique(a, b, c) using hash);
+show keys from t1;
+insert into t1 values(1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1);
+drop table t1;
+
+create table t1(a int ,b int , c int, unique(a, b, c) using hash) engine=memory;
+show keys from t1;
+insert into t1 values(1,1,1);
+--error ER_DUP_ENTRY
+insert into t1 values(1,1,1);
+drop table t1;
diff --git a/mysql-test/main/lowercase_fs_off.result b/mysql-test/main/lowercase_fs_off.result
index f2a8ec14641..6ff8c1b7f93 100644
--- a/mysql-test/main/lowercase_fs_off.result
+++ b/mysql-test/main/lowercase_fs_off.result
@@ -94,12 +94,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
@@ -123,7 +123,6 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
-mysql.user OK
Repairing tables
mysql.innodb_index_stats
@@ -137,6 +136,7 @@ Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
diff --git a/mysql-test/main/max_password_errors.result b/mysql-test/main/max_password_errors.result
new file mode 100644
index 00000000000..020761b4f2e
--- /dev/null
+++ b/mysql-test/main/max_password_errors.result
@@ -0,0 +1,45 @@
+set @old_max_password_errors=@@max_password_errors;
+set global max_password_errors=2;
+create user u identified by 'good_pass';
+connect(localhost,u,bas_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, u, bas_pass;
+ERROR 28000: Access denied for user 'u'@'localhost' (using password: YES)
+connect(localhost,u,bad_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, u, bad_pass;
+ERROR 28000: Access denied for user 'u'@'localhost' (using password: YES)
+connect(localhost,u,good_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, u, good_pass;
+ERROR HY000: User is blocked because of too many credential errors; unblock with 'FLUSH PRIVILEGES'
+connect(localhost,u,bad_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, u, bad_pass;
+ERROR HY000: User is blocked because of too many credential errors; unblock with 'FLUSH PRIVILEGES'
+FLUSH PRIVILEGES;
+connect con1, localhost, u, good_pass;
+disconnect con1;
+connect(localhost,u,bad_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, u, bad_pass;
+ERROR 28000: Access denied for user 'u'@'localhost' (using password: YES)
+connect con1, localhost, u, good_pass;
+disconnect con1;
+connect(localhost,u,bad_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, u, bad_pass;
+ERROR 28000: Access denied for user 'u'@'localhost' (using password: YES)
+connect con1, localhost, u, good_pass;
+ERROR 28000: Access denied for user 'u'@'localhost' (using password: YES)
+ERROR 28000: Access denied for user 'u'@'localhost' (using password: YES)
+ERROR HY000: User is blocked because of too many credential errors; unblock with 'FLUSH PRIVILEGES'
+disconnect con1;
+connection default;
+FLUSH PRIVILEGES;
+connect(localhost,root,bas_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, root, bas_pass;
+ERROR 28000: Access denied for user 'root'@'localhost' (using password: YES)
+connect(localhost,root,bad_pass,test,MASTER_PORT,MASTER_SOCKET);
+connect con1, localhost, root, bad_pass;
+ERROR 28000: Access denied for user 'root'@'localhost' (using password: YES)
+connect con1, localhost, u, good_pass;
+disconnect con1;
+connection default;
+DROP USER u;
+FLUSH PRIVILEGES;
+set global max_password_errors=@old_max_password_errors;
diff --git a/mysql-test/main/max_password_errors.test b/mysql-test/main/max_password_errors.test
new file mode 100644
index 00000000000..1debca0258d
--- /dev/null
+++ b/mysql-test/main/max_password_errors.test
@@ -0,0 +1,64 @@
+--source include/not_embedded.inc
+set @old_max_password_errors=@@max_password_errors;
+set global max_password_errors=2;
+create user u identified by 'good_pass';
+
+# Test that user is blocked after 'max_password_errors' bad passwords
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+connect(con1, localhost, u, bas_pass);
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+connect (con1, localhost, u, bad_pass);
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_USER_IS_BLOCKED;
+connect(con1, localhost, u, good_pass);
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_USER_IS_BLOCKED;
+connect(con1, localhost, u, bad_pass);
+
+
+# Test that FLUSH PRIVILEGES clears the error
+FLUSH PRIVILEGES;
+connect (con1, localhost, u, good_pass);
+disconnect con1;
+
+# Test that good login clears the error
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+connect (con1, localhost, u, bad_pass);
+connect (con1, localhost, u, good_pass);
+disconnect con1;
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+connect (con1, localhost, u, bad_pass);
+connect (con1, localhost, u, good_pass);
+
+# Test the behavior of change_user
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+change_user u,bad_pass;
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+change_user u,bad_pass;
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_USER_IS_BLOCKED;
+change_user u,good_pass;
+disconnect con1;
+
+connection default;
+FLUSH PRIVILEGES;
+
+#Test that root@localhost is not blocked, with password errors
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+connect(con1, localhost, root, bas_pass);
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_ERROR;
+connect (con1, localhost, root, bad_pass);
+connect (con1, localhost, u, good_pass);
+disconnect con1;
+connection default;
+DROP USER u;
+FLUSH PRIVILEGES;
+set global max_password_errors=@old_max_password_errors; \ No newline at end of file
diff --git a/mysql-test/main/mdev-504.result b/mysql-test/main/mdev-504.result
index 9b8b6795e0f..e34e57be6ed 100644
--- a/mysql-test/main/mdev-504.result
+++ b/mysql-test/main/mdev-504.result
@@ -1,3 +1,4 @@
+set @save_use_stat_tables=@@global.use_stat_tables;
SET GLOBAL net_write_timeout = 900;
CREATE TABLE A (
pk INTEGER AUTO_INCREMENT PRIMARY KEY,
@@ -20,5 +21,5 @@ connection default;
DROP TABLE A;
DROP PROCEDURE p_analyze;
DROP FUNCTION rnd3;
-SET GLOBAL use_stat_tables = DEFAULT;
+SET GLOBAL use_stat_tables = @save_use_stat_tables;
SET GLOBAL net_write_timeout = DEFAULT;
diff --git a/mysql-test/main/mdev-504.test b/mysql-test/main/mdev-504.test
index 551c21c37d0..277b5a038a0 100644
--- a/mysql-test/main/mdev-504.test
+++ b/mysql-test/main/mdev-504.test
@@ -1,6 +1,8 @@
--source include/not_valgrind.inc
--source include/no_protocol.inc
+set @save_use_stat_tables=@@global.use_stat_tables;
+
SET GLOBAL net_write_timeout = 900;
CREATE TABLE A (
@@ -76,5 +78,5 @@ while ($trial)
DROP TABLE A;
DROP PROCEDURE p_analyze;
DROP FUNCTION rnd3;
-SET GLOBAL use_stat_tables = DEFAULT;
+SET GLOBAL use_stat_tables = @save_use_stat_tables;
SET GLOBAL net_write_timeout = DEFAULT;
diff --git a/mysql-test/main/mdev13607.result b/mysql-test/main/mdev13607.result
index 08848bc645b..43fc473c84d 100644
--- a/mysql-test/main/mdev13607.result
+++ b/mysql-test/main/mdev13607.result
@@ -14,8 +14,11 @@ CREATE TABLE t3 (id INT) ENGINE=InnoDB;
INSERT INTO t3 VALUES (1),(2);
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
explain SELECT * FROM
(SELECT p1.* FROM t1 p1 NATURAL JOIN t2 r1 NATURAL JOIN t3 d1 NATURAL JOIN t1 p2 NATURAL JOIN t2 r2 NATURAL JOIN t3 d2 NATURAL JOIN t1 p3 NATURAL JOIN t2 r3 NATURAL JOIN t3 d3 NATURAL JOIN t1 p4 NATURAL JOIN t2 r4 NATURAL JOIN t3 d4 NATURAL JOIN t1 p5 NATURAL JOIN t2 r5 NATURAL JOIN t3 d5 NATURAL JOIN t1 p6 NATURAL JOIN t2 r6 NATURAL JOIN t3 d6 NATURAL JOIN t1 p7 NATURAL JOIN t2 r7 NATURAL JOIN t3 d7 NATURAL JOIN t1 p8 NATURAL JOIN t2 r8 NATURAL JOIN t3 d8 NATURAL JOIN t1 p9 ) gp_1
@@ -76,21 +79,21 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY p7 ALL NULL NULL NULL NULL 50 Using where; Using join buffer (incremental, BNL join)
1 PRIMARY p8 ALL NULL NULL NULL NULL 50 Using where; Using join buffer (incremental, BNL join)
1 PRIMARY p9 ALL NULL NULL NULL NULL 50 Using where; Using join buffer (incremental, BNL join)
-1 PRIMARY <derived3> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived4> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived5> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived6> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived7> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived8> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived9> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived10> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived11> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived12> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived13> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived14> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived15> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived16> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
-1 PRIMARY <derived17> ALL NULL NULL NULL NULL -1127208515966861312 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived3> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived4> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived5> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived6> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived7> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived8> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived9> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived10> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived11> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived12> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived13> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived14> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived15> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived16> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
+1 PRIMARY <derived17> ALL NULL NULL NULL NULL 17319535557742690304 Using join buffer (incremental, BNL join)
17 DERIVED r1 ALL NULL NULL NULL NULL 2
17 DERIVED d1 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
17 DERIVED r2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (incremental, BNL join)
diff --git a/mysql-test/main/mdl.result b/mysql-test/main/mdl.result
index d93bfd5c729..883f35674c0 100644
--- a/mysql-test/main/mdl.result
+++ b/mysql-test/main/mdl.result
@@ -6,17 +6,85 @@
# failed in MDL_context::upgrade_shared_lock
#
CREATE TABLE t1(a INT) ENGINE=InnoDB;
+CREATE TABLE t3(a INT) ENGINE=myisam;
LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ;
SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-MDL_INTENTION_EXCLUSIVE Global read lock
+MDL_BACKUP_TRANS_DML Backup lock
MDL_SHARED_NO_READ_WRITE Table metadata lock test t1
UNLOCK TABLES;
LOCK TABLES t1 AS t2 READ, t1 WRITE CONCURRENT;
SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
-MDL_INTENTION_EXCLUSIVE Global read lock
+MDL_BACKUP_TRANS_DML Backup lock
MDL_SHARED_WRITE Table metadata lock test t1
MDL_SHARED_READ_ONLY Table metadata lock test t1
UNLOCK TABLES;
-DROP TABLE t1;
+LOCK TABLES t1 WRITE CONCURRENT, t3 WRITE;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_DDL Backup lock
+MDL_BACKUP_DML Backup lock
+MDL_SHARED_WRITE Table metadata lock test t1
+MDL_SHARED_NO_READ_WRITE Table metadata lock test t3
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+UNLOCK TABLES;
+LOCK TABLES t3 WRITE, t1 WRITE CONCURRENT;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_DDL Backup lock
+MDL_BACKUP_DML Backup lock
+MDL_SHARED_WRITE Table metadata lock test t1
+MDL_SHARED_NO_READ_WRITE Table metadata lock test t3
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+UNLOCK TABLES;
+LOCK TABLES t1 WRITE, mysql.user WRITE;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_DDL Backup lock
+MDL_SHARED_NO_READ_WRITE Table metadata lock mysql user
+MDL_SHARED_NO_READ_WRITE Table metadata lock test t1
+MDL_INTENTION_EXCLUSIVE Schema metadata lock mysql
+MDL_SHARED_NO_READ_WRITE Table metadata lock mysql global_priv
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+UNLOCK TABLES;
+LOCK TABLES mysql.general_log WRITE;
+ERROR HY000: You can't use locks with log tables
+LOCK TABLES t1 WRITE,information_schema.tables READ;
+UNLOCK TABLES;
+DROP TABLE t1,t3;
+#
+# Check MDL locks taken for different kind of tables by open
+#
+CREATE TABLE t1(a INT) ENGINE=InnoDB;
+CREATE TABLE t3(a INT) ENGINE=myisam;
+connect locker,localhost,root,,;
+connection default;
+FLUSH TABLES WITH READ LOCK;
+connection locker;
+insert into t1 values (1);
+connection default;
+connection default;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_FTWRL2 Backup lock
+MDL_SHARED_WRITE Table metadata lock test t1
+unlock tables;
+connection locker;
+unlock tables;
+connection default;
+FLUSH TABLES WITH READ LOCK;
+connection locker;
+insert into t3 values (2);
+connection default;
+connection default;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_FTWRL2 Backup lock
+MDL_SHARED_WRITE Table metadata lock test t3
+unlock tables;
+connection locker;
+unlock tables;
+connection default;
+disconnect locker;
+DROP TABLE t1,t3;
diff --git a/mysql-test/main/mdl.test b/mysql-test/main/mdl.test
index b90c74a8f7f..23a862f5212 100644
--- a/mysql-test/main/mdl.test
+++ b/mysql-test/main/mdl.test
@@ -10,10 +10,72 @@
--echo #
CREATE TABLE t1(a INT) ENGINE=InnoDB;
+CREATE TABLE t3(a INT) ENGINE=myisam;
LOCK TABLES t1 WRITE CONCURRENT, t1 AS t2 READ;
SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
UNLOCK TABLES;
LOCK TABLES t1 AS t2 READ, t1 WRITE CONCURRENT;
SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
UNLOCK TABLES;
-DROP TABLE t1;
+LOCK TABLES t1 WRITE CONCURRENT, t3 WRITE;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+UNLOCK TABLES;
+LOCK TABLES t3 WRITE, t1 WRITE CONCURRENT;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+UNLOCK TABLES;
+LOCK TABLES t1 WRITE, mysql.user WRITE;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+UNLOCK TABLES;
+--error ER_CANT_LOCK_LOG_TABLE
+LOCK TABLES mysql.general_log WRITE;
+# The following may work in embedded server
+--error 0,ER_DBACCESS_DENIED_ERROR
+LOCK TABLES t1 WRITE,information_schema.tables READ;
+UNLOCK TABLES;
+DROP TABLE t1,t3;
+
+--echo #
+--echo # Check MDL locks taken for different kind of tables by open
+--echo #
+
+CREATE TABLE t1(a INT) ENGINE=InnoDB;
+CREATE TABLE t3(a INT) ENGINE=myisam;
+connect (locker,localhost,root,,);
+connection default;
+
+FLUSH TABLES WITH READ LOCK;
+connection locker;
+--send insert into t1 values (1)
+connection default;
+# Wait till above update gets blocked on a user lock.
+let $wait_condition=
+ select count(*) > 0 from information_schema.processlist
+ where state = "Waiting for backup lock";
+--source include/wait_condition.inc
+connection default;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+unlock tables;
+connection locker;
+--reap
+unlock tables;
+connection default;
+
+FLUSH TABLES WITH READ LOCK;
+connection locker;
+--send insert into t3 values (2)
+connection default;
+# Wait till above update gets blocked on a user lock.
+let $wait_condition=
+ select count(*) > 0 from information_schema.processlist
+ where state = "Waiting for backup lock";
+--source include/wait_condition.inc
+connection default;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+unlock tables;
+connection locker;
+--reap
+unlock tables;
+connection default;
+
+disconnect locker;
+DROP TABLE t1,t3;
diff --git a/mysql-test/main/mdl_sync.result b/mysql-test/main/mdl_sync.result
index 3880fc5ef91..5203fdddb2d 100644
--- a/mysql-test/main/mdl_sync.result
+++ b/mysql-test/main/mdl_sync.result
@@ -2146,10 +2146,11 @@ flush tables t1, t2 with read lock;
connection con1;
# Wait till FLUSH TABLES <list> WITH READ LOCK stops.
set debug_sync='now WAIT_FOR parked';
+flush tables;
# Start a statement which will flush all tables and thus
# invalidate table t1 open by FLUSH TABLES <list> WITH READ LOCK.
# Sending:
-flush tables;
+flush tables t1;
connection default;
# Wait till the above FLUSH TABLES blocks.
# Resume FLUSH TABLES <list> WITH READ LOCK, so it tries to open t2
@@ -2513,6 +2514,12 @@ connection con2;
SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
# Check that FLUSH must wait to get the GRL
# and let DROP PROCEDURE continue
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_DDL Backup lock
+MDL_EXCLUSIVE Stored procedure metadata lock test p1
+MDL_INTENTION_EXCLUSIVE Schema metadata lock test
+MDL_SHARED_WRITE Table metadata lock mysql proc
SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL grlwait';
FLUSH TABLES WITH READ LOCK;
connection default;
@@ -2527,6 +2534,43 @@ connection con2;
UNLOCK TABLES;
connection default;
SET DEBUG_SYNC= 'RESET';
+#
+# UPDATE should wait for FTWRL with non transactional table second
+#
+create table t1 (a int) engine=myisam;
+create table t2 (a int) engine=innodb;
+insert into t1 values (1);
+insert into t2 values (1);
+SET DEBUG_SYNC= 'after_open_table_mdl_shared SIGNAL table_opened WAIT_FOR grlwait execute 2';
+update t1,t2 set t1.a=2,t2.a=3;
+connection con2;
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'now SIGNAL grlwait';
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL grlwait';
+FLUSH TABLES WITH READ LOCK;
+connection default;
+# Reaping UPDATE
+connection con2;
+UNLOCK TABLES;
+connection default;
+SET DEBUG_SYNC= 'RESET';
+SET DEBUG_SYNC= 'after_open_table_mdl_shared SIGNAL table_opened WAIT_FOR grlwait execute 2';
+update t2,t1 set t1.a=2,t2.a=3;
+connection con2;
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'now SIGNAL grlwait';
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL grlwait';
+FLUSH TABLES WITH READ LOCK;
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+LOCK_MODE LOCK_TYPE TABLE_SCHEMA TABLE_NAME
+MDL_BACKUP_FTWRL2 Backup lock
+unlock tables;
+connection default;
+# Reaping UPDATE
+SET DEBUG_SYNC= 'RESET';
+drop table t1,t2;
disconnect con2;
#
# Bug#50786 Assertion `thd->mdl_context.trans_sentinel() == __null'
@@ -2534,7 +2578,6 @@ disconnect con2;
#
# Supress warnings written to the log file
call mtr.add_suppression("Wait on a lock was aborted due to a pending exclusive lock");
-DROP TABLE IF EXISTS t1, t2;
connect con1,localhost,root;
connect con2,localhost,root;
connect con3,localhost,root;
@@ -3055,7 +3098,7 @@ disconnect con3;
#
CREATE TABLE t1(a INT) ENGINE=InnoDB;
SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go';
-SELECT * FROM t1;
+INSERT INTO t1 values (1);
connect con1,localhost,root,,;
SET debug_sync='now WAIT_FOR ready';
SET lock_wait_timeout=1;
@@ -3063,7 +3106,21 @@ FLUSH TABLES WITH READ LOCK;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SET debug_sync='now SIGNAL go';
connection default;
+# After MDEV-5536, SELECT will not block FLUSH TABLES
+SET debug_sync='RESET';
+SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go';
+SELECT * FROM t1;
+connection con1;
+SET debug_sync='now WAIT_FOR ready';
+SET lock_wait_timeout=1;
+FLUSH TABLES WITH READ LOCK;
+SET debug_sync='now SIGNAL go';
+connection default;
a
+1
+connection con1;
+unlock tables;
+connection default;
SET debug_sync='RESET';
DROP TABLE t1;
disconnect con1;
diff --git a/mysql-test/main/mdl_sync.test b/mysql-test/main/mdl_sync.test
index fbecd6bf547..2a1e488ab44 100644
--- a/mysql-test/main/mdl_sync.test
+++ b/mysql-test/main/mdl_sync.test
@@ -2,6 +2,7 @@
# We need the Debug Sync Facility.
#
--source include/have_debug_sync.inc
+--source include/have_metadata_lock_info.inc
# We need InnoDB tables for some of the tests.
--source include/have_innodb.inc
@@ -2690,17 +2691,20 @@ connection con1;
--echo # Wait till FLUSH TABLES <list> WITH READ LOCK stops.
set debug_sync='now WAIT_FOR parked';
+# Simple flush tables should not block
+flush tables;
+
--echo # Start a statement which will flush all tables and thus
--echo # invalidate table t1 open by FLUSH TABLES <list> WITH READ LOCK.
--echo # Sending:
-send flush tables;
+send flush tables t1;
connection default;
--echo # Wait till the above FLUSH TABLES blocks.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush" and
- info = "flush tables";
+ where state = "Waiting for table metadata lock" and
+ info = "flush tables t1";
--source include/wait_condition.inc
--echo # Resume FLUSH TABLES <list> WITH READ LOCK, so it tries to open t2
@@ -3245,6 +3249,7 @@ connection con2;
SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
--echo # Check that FLUSH must wait to get the GRL
--echo # and let DROP PROCEDURE continue
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL grlwait';
--send FLUSH TABLES WITH READ LOCK
@@ -3261,12 +3266,63 @@ connection con2;
--echo # Reaping FTWRL.
--reap
UNLOCK TABLES;
+connection default;
+SET DEBUG_SYNC= 'RESET';
+
+--echo #
+--echo # UPDATE should wait for FTWRL with non transactional table second
+--echo #
+
+create table t1 (a int) engine=myisam;
+create table t2 (a int) engine=innodb;
+insert into t1 values (1);
+insert into t2 values (1);
+
+SET DEBUG_SYNC= 'after_open_table_mdl_shared SIGNAL table_opened WAIT_FOR grlwait execute 2';
+--send update t1,t2 set t1.a=2,t2.a=3
+
+connection con2;
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'now SIGNAL grlwait';
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL grlwait';
+FLUSH TABLES WITH READ LOCK;
+
+connection default;
+--echo # Reaping UPDATE
+--reap
+
+connection con2;
+UNLOCK TABLES;
connection default;
SET DEBUG_SYNC= 'RESET';
-disconnect con2;
+# This will cause a wait as we first get lock for innodb table t2 but FTWRL
+# will cause lock for t1 to wait
+
+SET DEBUG_SYNC= 'after_open_table_mdl_shared SIGNAL table_opened WAIT_FOR grlwait execute 2';
+--send update t2,t1 set t1.a=2,t2.a=3
+
+connection con2;
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'now SIGNAL grlwait';
+SET DEBUG_SYNC= 'now WAIT_FOR table_opened';
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL grlwait';
+FLUSH TABLES WITH READ LOCK;
+let $wait_condition= SELECT COUNT(*)=1 FROM information_schema.metadata_lock_info;
+--source include/wait_condition.inc
+SELECT LOCK_MODE, LOCK_TYPE, TABLE_SCHEMA, TABLE_NAME FROM information_schema.metadata_lock_info;
+
+unlock tables;
+
+connection default;
+--echo # Reaping UPDATE
+--reap
+SET DEBUG_SYNC= 'RESET';
+drop table t1,t2;
+disconnect con2;
--echo #
--echo # Bug#50786 Assertion `thd->mdl_context.trans_sentinel() == __null'
@@ -3275,9 +3331,6 @@ disconnect con2;
--echo # Supress warnings written to the log file
call mtr.add_suppression("Wait on a lock was aborted due to a pending exclusive lock");
---disable_warnings
-DROP TABLE IF EXISTS t1, t2;
---enable_warnings
connect (con1,localhost,root);
connect (con2,localhost,root);
@@ -3966,7 +4019,7 @@ connection con2;
connection default;
let $wait_condition=SELECT COUNT(*)=1 FROM information_schema.processlist
- WHERE state='Waiting for global read lock'
+ WHERE state='Waiting for backup lock'
AND info='CREATE TABLE db1.t2(a INT)';
--source include/wait_condition.inc
UNLOCK TABLES;
@@ -3984,7 +4037,7 @@ connection con2;
connection default;
let $wait_condition=SELECT COUNT(*)=1 FROM information_schema.processlist
- WHERE state='Waiting for global read lock'
+ WHERE state='Waiting for backup lock'
AND info='ALTER DATABASE db1 DEFAULT CHARACTER SET utf8';
--source include/wait_condition.inc
UNLOCK TABLES;
@@ -4079,9 +4132,10 @@ disconnect con3;
--echo # MDEV-12620 - set lock_wait_timeout = 1;flush tables with read lock;
--echo # lock not released after timeout
--echo #
+
CREATE TABLE t1(a INT) ENGINE=InnoDB;
SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go';
-send SELECT * FROM t1;
+send INSERT INTO t1 values (1);
connect (con1,localhost,root,,);
SET debug_sync='now WAIT_FOR ready';
@@ -4093,12 +4147,31 @@ SET debug_sync='now SIGNAL go';
connection default;
reap;
+
+--echo # After MDEV-5536, SELECT will not block FLUSH TABLES
+
+SET debug_sync='RESET';
+SET debug_sync='open_tables_after_open_and_process_table SIGNAL ready WAIT_FOR go';
+send SELECT * FROM t1;
+
+connection con1;
+SET debug_sync='now WAIT_FOR ready';
+# lock_wait_timeout should be 0 in 10.3, so that we don't have to wait at all
+SET lock_wait_timeout=1;
+FLUSH TABLES WITH READ LOCK;
+SET debug_sync='now SIGNAL go';
+
+connection default;
+reap;
+connection con1;
+unlock tables;
+connection default;
+
SET debug_sync='RESET';
DROP TABLE t1;
disconnect con1;
-
# Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/main/merge.result b/mysql-test/main/merge.result
index ff6bdf4a07e..7891ffa1723 100644
--- a/mysql-test/main/merge.result
+++ b/mysql-test/main/merge.result
@@ -38,7 +38,7 @@ insert into t1 select NULL,message from t2;
create table t3 (a int not null, b char(20), key(a)) engine=MERGE UNION=(test.t1,test.t2);
explain select * from t3 where a < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range a a 4 NULL 18 Using where
+1 SIMPLE t3 range a a 4 NULL 17 Using where
explain select * from t3 where a > 10 and a < 20;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 range a a 4 NULL 17 Using where
@@ -669,15 +669,15 @@ KEY files (fileset_id,fileset_root_id)
EXPLAIN SELECT * FROM t2 IGNORE INDEX (files) WHERE fileset_id = 2
AND file_code BETWEEN '0000000115' AND '0000000120' LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range PRIMARY PRIMARY 35 NULL 5 Using where
+1 SIMPLE t2 range PRIMARY PRIMARY 35 NULL 6 Using where
EXPLAIN SELECT * FROM t2 WHERE fileset_id = 2
AND file_code BETWEEN '0000000115' AND '0000000120' LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range PRIMARY,files PRIMARY 35 NULL 5 Using where
+1 SIMPLE t2 range PRIMARY,files PRIMARY 35 NULL 6 Using where
EXPLAIN SELECT * FROM t1 WHERE fileset_id = 2
AND file_code BETWEEN '0000000115' AND '0000000120' LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,files PRIMARY 35 NULL 5 Using index condition
+1 SIMPLE t1 range PRIMARY,files PRIMARY 35 NULL 6 Using index condition
EXPLAIN SELECT * FROM t2 WHERE fileset_id = 2
AND file_code = '0000000115' LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -742,7 +742,7 @@ insert into t1 (a,b,c) values (1,1,0),(1,2,0);
insert into t2 (a,b,c) values (1,1,1),(1,2,1);
explain select a,b,c from t3 force index (a) where a=1 order by a,b,c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ref a a 5 const 2 Using where; Using index
+1 SIMPLE t3 ref a a 5 const 4 Using where; Using index
select a,b,c from t3 force index (a) where a=1 order by a,b,c;
a b c
1 1 0
@@ -751,7 +751,7 @@ a b c
1 2 1
explain select a,b,c from t3 force index (a) where a=1 order by a desc, b desc, c desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ref a a 5 const 2 Using where; Using index
+1 SIMPLE t3 ref a a 5 const 4 Using where; Using index
select a,b,c from t3 force index (a) where a=1 order by a desc, b desc, c desc;
a b c
1 2 1
@@ -2117,6 +2117,7 @@ CREATE TABLE t1(a INT, KEY(a));
INSERT INTO t1 VALUES(0),(1),(2),(3),(4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CREATE TABLE m1(a INT, KEY(a)) ENGINE=MERGE UNION=(t1);
SELECT CARDINALITY FROM INFORMATION_SCHEMA.STATISTICS WHERE TABLE_SCHEMA='test' AND TABLE_NAME='m1';
@@ -2811,6 +2812,8 @@ CREATE TABLE tm1 (c1 INT) ENGINE=MRG_MYISAM UNION=(t1)
INSERT_METHOD=LAST;
CREATE TRIGGER tm1_ai AFTER INSERT ON tm1
FOR EACH ROW SELECT max(c1) FROM t1 INTO @var;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
LOCK TABLE tm1 WRITE, t1 WRITE;
INSERT INTO tm1 VALUES (1);
SELECT * FROM tm1;
@@ -2835,6 +2838,8 @@ CREATE TABLE tm1 (c1 INT) ENGINE=MRG_MYISAM UNION=(t1,t2,t3,t4,t5)
INSERT_METHOD=LAST;
CREATE TRIGGER t2_au AFTER UPDATE ON t2
FOR EACH ROW SELECT MAX(c1) FROM t1 INTO @var;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE FUNCTION f1() RETURNS INT
RETURN (SELECT MAX(c1) FROM t4);
LOCK TABLE tm1 WRITE, t1 WRITE, t2 WRITE, t3 WRITE, t4 WRITE, t5 WRITE;
@@ -3814,11 +3819,15 @@ CREATE TABLE tmerge (f1 INT) ENGINE=MERGE UNION=(t1);
PREPARE stmt FROM "ANALYZE TABLE tmerge, t1";
EXECUTE stmt;
Table Op Msg_type Msg_text
+test.tmerge analyze status Engine-independent statistics collected
test.tmerge analyze note The storage engine for the table doesn't support analyze
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
EXECUTE stmt;
Table Op Msg_type Msg_text
+test.tmerge analyze status Engine-independent statistics collected
test.tmerge analyze note The storage engine for the table doesn't support analyze
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
DEALLOCATE PREPARE stmt;
DROP TABLE t1, tmerge;
diff --git a/mysql-test/main/mix2_myisam.result b/mysql-test/main/mix2_myisam.result
index 34764466d2a..5acec2616fa 100644
--- a/mysql-test/main/mix2_myisam.result
+++ b/mysql-test/main/mix2_myisam.result
@@ -211,6 +211,7 @@ create index skr on t1 (a);
insert into t1 values (3,""), (4,"testing");
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/mysql-test/main/mrr_icp_extra.result b/mysql-test/main/mrr_icp_extra.result
index 95788b393dd..49acd7bde20 100644
--- a/mysql-test/main/mrr_icp_extra.result
+++ b/mysql-test/main/mrr_icp_extra.result
@@ -1,6 +1,7 @@
call mtr.add_suppression("Can't find record in .*");
set @mrr_icp_extra_tmp=@@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set optimizer_switch='rowid_filter=off';
SET NAMES latin1;
CREATE TABLE t1
(s1 char(10) COLLATE latin1_german1_ci,
@@ -82,7 +83,7 @@ a b
4 NULL
explain select * from t1 where b=2 or b is null order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref_or_null b b 5 const 3 Using index condition; Using where; Using filesort
+1 SIMPLE t1 ref_or_null b b 5 const 4 Using index condition; Using where; Using filesort
select * from t1 where b=2 or b is null order by a;
a b
3 NULL
@@ -103,10 +104,10 @@ KEY StringField (FieldKey,StringVal(32))
INSERT INTO t1 VALUES ('0',3,'0'),('0',2,'1'),('0',1,'2'),('1',2,'1'),('1',1,'3'), ('1',0,'2'),('2',3,'0'),('2',2,'1'),('2',1,'2'),('2',3,'0'),('2',2,'1'),('2',1,'2'),('3',2,'1'),('3',1,'2'),('3','3','3');
EXPLAIN SELECT * FROM t1 IGNORE INDEX (LongField, StringField) WHERE FieldKey > '2' ORDER BY LongVal;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range FieldKey FieldKey 38 NULL 4 Using index condition; Rowid-ordered scan; Using filesort
+1 SIMPLE t1 range FieldKey FieldKey 38 NULL 3 Using index condition; Rowid-ordered scan; Using filesort
EXPLAIN SELECT * FROM t1 IGNORE INDEX (FieldKey, LongField) WHERE FieldKey > '2' ORDER BY LongVal;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range StringField StringField 38 NULL 4 Using where; Using filesort
+1 SIMPLE t1 range StringField StringField 38 NULL 3 Using where; Using filesort
SELECT * FROM t1 WHERE FieldKey > '2' ORDER BY LongVal;
FieldKey LongVal StringVal
3 1 2
@@ -124,7 +125,7 @@ Table Op Msg_type Msg_text
test.t1 optimize status OK
explain select * from t1 force index (a) where a=0 or a=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 4 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range a a 4 NULL 5 Using index condition; Using where; Rowid-ordered scan
select * from t1 force index (a) where a=0 or a=2;
a b c
0 NULL 0
diff --git a/mysql-test/main/mrr_icp_extra.test b/mysql-test/main/mrr_icp_extra.test
index 75ddc85b984..38306f54ed5 100644
--- a/mysql-test/main/mrr_icp_extra.test
+++ b/mysql-test/main/mrr_icp_extra.test
@@ -4,6 +4,7 @@ call mtr.add_suppression("Can't find record in .*");
set @mrr_icp_extra_tmp=@@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set optimizer_switch='rowid_filter=off';
SET NAMES latin1;
CREATE TABLE t1
(s1 char(10) COLLATE latin1_german1_ci,
diff --git a/mysql-test/main/multi_update.result b/mysql-test/main/multi_update.result
index c40de47668a..d2a33619900 100644
--- a/mysql-test/main/multi_update.result
+++ b/mysql-test/main/multi_update.result
@@ -603,11 +603,11 @@ CREATE TABLE t1 (f1 DATE);
INSERT INTO t1 VALUES('2001-01-01');
UPDATE IGNORE (SELECT 1 FROM t1 WHERE f1 = (SELECT f1() FROM t1)) x, t1 SET f1 = 1;
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
CREATE view v1 as SELECT f1() FROM t1;
UPDATE IGNORE (SELECT 1 FROM t1 WHERE f1 = (select * from v1)) x, t1 SET f1 = 1;
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
DROP VIEW v1;
DROP FUNCTION f1;
DROP TABLE t1;
@@ -998,8 +998,8 @@ a b c a b c
set optimizer_switch='firstmatch=off';
explain update t1, t2 set t2.c=1 where t1.a=t2.a and t1.b in (select b from t3 where t3.c< t2.c) order by t2.c, t1.c limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
-1 PRIMARY t1 ALL a NULL NULL NULL 10 Using where
+1 PRIMARY t2 ALL NULL NULL NULL NULL 10 Using where; Using temporary; Using filesort
+1 PRIMARY t1 ref a a 5 test.t2.a 1
1 PRIMARY t3 ALL NULL NULL NULL NULL 10 Using where; Start temporary; End temporary
update t1, t2 set t2.c=1 where t1.a=t2.a and t1.b in (select b from t3 where t3.c<=t2.c) order by t2.c, t1.c limit 5;
select * from t2;
diff --git a/mysql-test/main/myisam.result b/mysql-test/main/myisam.result
index 4864ef0bb13..8c464f2f312 100644
--- a/mysql-test/main/myisam.result
+++ b/mysql-test/main/myisam.result
@@ -395,7 +395,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL a NULL NULL NULL 5 Using where
explain select * from t1 force index (a) where a=0 or a=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 4 Using index condition; Using where
+1 SIMPLE t1 range a a 4 NULL 5 Using index condition; Using where
explain select * from t1 where c=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref c,c_2 c 5 const 1
@@ -641,7 +641,7 @@ create table t1 ( a tinytext, b char(1), index idx (a(1),b) );
insert into t1 values (null,''), (null,'');
explain select count(*) from t1 where a is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx idx 4 const 1 Using where
+1 SIMPLE t1 ref idx idx 4 const 2 Using where
select count(*) from t1 where a is null;
count(*)
2
@@ -676,6 +676,7 @@ insert into t1 values (0),(1),(2),(3),(4);
insert into t1 select NULL from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -689,6 +690,8 @@ show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 1 a 1 a A 10 NULL NULL YES BTREE
set myisam_stats_method=nulls_equal;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
show variables like 'myisam_stats_method';
Variable_name Value
myisam_stats_method NULLS_EQUAL
@@ -696,6 +699,7 @@ insert into t1 values (11);
delete from t1 where a=11;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -708,6 +712,7 @@ test.t1 check status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 1 a 1 a A 5 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
set myisam_stats_method=DEFAULT;
show variables like 'myisam_stats_method';
Variable_name Value
@@ -716,6 +721,7 @@ insert into t1 values (11);
delete from t1 where a=11;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -733,6 +739,8 @@ set myisam_stats_method=nulls_ignored;
show variables like 'myisam_stats_method';
Variable_name Value
myisam_stats_method NULLS_IGNORED
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table t1 (
a char(3), b char(4), c char(5), d char(6),
key(a,b,c,d)
@@ -743,6 +751,7 @@ insert into t1 values ('bce','def1', 'yuu', NULL);
insert into t1 values ('bce','def2', NULL, 'quux');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -753,6 +762,7 @@ t1 1 a 4 d A 4 NULL NULL YES BTREE
delete from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -762,6 +772,7 @@ t1 1 a 3 c A 0 NULL NULL YES BTREE
t1 1 a 4 d A 0 NULL NULL YES BTREE
set myisam_stats_method=DEFAULT;
drop table t1;
+set @@use_stat_tables = @save_use_stat_tables;
create table t1(
cip INT NOT NULL,
time TIME NOT NULL,
@@ -1801,6 +1812,7 @@ create table t1 (a int, key(a));
insert into t1 values (1),(2),(3),(4),(NULL),(NULL),(NULL),(NULL);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -2545,7 +2557,7 @@ INSERT INTO t1 VALUES
('0'),('0'),('0'),('0'),('0'),('0'),('0');
Warnings:
Error 1034 myisam_sort_buffer_size is too small. X
-Error 1034 Number of rows changed from 0 to 157
+Warning 1034 Number of rows changed from 0 to 157
SET myisam_sort_buffer_size=@@global.myisam_sort_buffer_size;
INSERT INTO t1 VALUES('1');
SELECT * FROM t1, t1 AS a1 WHERE t1.a=1 AND a1.a=1;
diff --git a/mysql-test/main/myisam.test b/mysql-test/main/myisam.test
index 63f5f6672f2..f58aee6795c 100644
--- a/mysql-test/main/myisam.test
+++ b/mysql-test/main/myisam.test
@@ -641,6 +641,9 @@ show index from t1;
# Set nulls to be equal:
set myisam_stats_method=nulls_equal;
+
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
show variables like 'myisam_stats_method';
insert into t1 values (11);
delete from t1 where a=11;
@@ -655,6 +658,7 @@ check table t1;
show index from t1;
# Set nulls back to be equal
+set @@use_stat_tables = @save_use_stat_tables;
set myisam_stats_method=DEFAULT;
show variables like 'myisam_stats_method';
insert into t1 values (11);
@@ -675,6 +679,9 @@ drop table t1;
set myisam_stats_method=nulls_ignored;
show variables like 'myisam_stats_method';
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table t1 (
a char(3), b char(4), c char(5), d char(6),
key(a,b,c,d)
@@ -691,6 +698,7 @@ show index from t1;
set myisam_stats_method=DEFAULT;
drop table t1;
+set @@use_stat_tables = @save_use_stat_tables;
# BUG#13814 - key value packed incorrectly for TINYBLOBs
diff --git a/mysql-test/main/myisam_debug.result b/mysql-test/main/myisam_debug.result
index 6232e3eac0e..9cba8968116 100644
--- a/mysql-test/main/myisam_debug.result
+++ b/mysql-test/main/myisam_debug.result
@@ -23,6 +23,8 @@ SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE STATE = 'wait_in_enable_indexes' AND
INFO = "INSERT INTO t1(id) SELECT id FROM t2"
INTO @thread_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
KILL QUERY @thread_id;
CHECK TABLE t1;
Table Op Msg_type Msg_text
diff --git a/mysql-test/main/myisam_explain_non_select_all.result b/mysql-test/main/myisam_explain_non_select_all.result
index 09e662f5d6a..e11438be48d 100644
--- a/mysql-test/main/myisam_explain_non_select_all.result
+++ b/mysql-test/main/myisam_explain_non_select_all.result
@@ -7,6 +7,8 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a < 10
# select: SELECT * FROM t1 WHERE a < 10
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a < 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -17,6 +19,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a < 10;
@@ -26,11 +29,16 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 10
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 4
Handler_update 3
@@ -42,6 +50,8 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 WHERE a < 10
# select: SELECT * FROM t1 WHERE a < 10
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a < 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -52,6 +62,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a < 10;
@@ -61,12 +72,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 10
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
Handler_delete 3
+Handler_read_key 2
Handler_read_rnd_next 4
DROP TABLE t1;
@@ -77,6 +93,8 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 USING t1 WHERE a = 1
# select: SELECT * FROM t1 WHERE a = 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 USING t1 WHERE a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -87,6 +105,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a = 1;
@@ -96,12 +115,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_key 2
Handler_read_rnd_next 4
DROP TABLE t1;
@@ -114,6 +138,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1
# select: SELECT * FROM t1, t2 WHERE t1.a = 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, t2 SET t1.a = 10 WHERE t1.a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -126,6 +152,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, t2 WHERE t1.a = 1;
@@ -136,11 +163,16 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 8
Handler_update 1
@@ -154,6 +186,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a = 1
# select: SELECT * FROM t1 t11, (SELECT * FROM t2) t12 WHERE t11.a = 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 Using where
@@ -168,6 +202,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 t11, (SELECT * FROM t2) t12 WHERE t11.a = 1;
@@ -178,11 +213,16 @@ Warnings:
Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` `t11` join `test`.`t2` where `test`.`t11`.`a` = 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 12
Handler_update 1
@@ -196,6 +236,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3)
# select: SELECT * FROM t1 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE 1 IN (SELECT 1 FROM t2 WHERE t2.b < 3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
@@ -208,6 +250,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 1
FLUSH STATUS;
FLUSH TABLES;
@@ -220,12 +263,16 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 5
Handler_update 3
@@ -239,6 +286,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3)
# select: SELECT * FROM t1 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
@@ -253,6 +302,7 @@ Warnings:
Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a IN (SELECT b FROM t2 WHERE t1.a < 3);
@@ -264,11 +314,16 @@ Note 1276 Field or reference 'test.t1.a' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b` = `test`.`t1`.`a` and `test`.`t1`.`a` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 7
Handler_update 2
@@ -282,6 +337,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3)
# select: SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
@@ -298,6 +355,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, t2 WHERE a IN (SELECT b FROM t2 WHERE t2.b < 3);
@@ -310,13 +368,16 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join (`test`.`t2`) join `test`.`t2` where `test`.`t2`.`b` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 12
# Status of testing query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 16
Handler_update 2
@@ -330,6 +391,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = t11.a + 10
# select: SELECT * FROM t1 t11, (SELECT * FROM t2) t12
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = t11.a + 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3
@@ -344,6 +407,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 t11, (SELECT * FROM t2) t12;
@@ -354,11 +418,16 @@ Warnings:
Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` `t11` join `test`.`t2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd 3
Handler_read_rnd_deleted 1
Handler_read_rnd_next 24
@@ -374,6 +443,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT 1 FROM DUAL) t12 SET t11.a = t11.a + 10
# select: SELECT * FROM t1 t11, (SELECT 1 FROM DUAL) t12
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT 1 FROM DUAL) t12 SET t11.a = t11.a + 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY <derived2> system NULL NULL NULL NULL 1
@@ -388,6 +459,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED NULL NULL NULL NULL NULL NULL NULL NULL No tables used
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 1
FLUSH STATUS;
FLUSH TABLES;
@@ -400,12 +472,17 @@ Warnings:
Note 1003 /* select#1 */ select `test`.`t11`.`a` AS `a`,1 AS `1` from `test`.`t1` `t11`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 1
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 5
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd 3
Handler_read_rnd_next 9
Handler_update 3
@@ -420,6 +497,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a > 1
# select: SELECT * FROM t1 t11, (SELECT * FROM t2) t12 WHERE t11.a > 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 t11, (SELECT * FROM t2) t12 SET t11.a = 10 WHERE t11.a > 1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t11 ALL NULL NULL NULL NULL 3 Using where
@@ -434,6 +513,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 t11, (SELECT * FROM t2) t12 WHERE t11.a > 1;
@@ -444,11 +524,16 @@ Warnings:
Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` `t11` join `test`.`t2` where `test`.`t11`.`a` > 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 16
Handler_update 2
@@ -460,6 +545,8 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 WHERE a > 1 LIMIT 1
# select: SELECT * FROM t1 WHERE a > 1 LIMIT 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a > 1 LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where
@@ -470,6 +557,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a > 1 LIMIT 1;
@@ -479,12 +567,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 1 limit 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 2
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_key 2
Handler_read_rnd_next 2
DROP TABLE t1;
@@ -495,6 +588,8 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 WHERE 0
# select: SELECT * FROM t1 WHERE 0
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
@@ -505,6 +600,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE 0;
@@ -514,10 +610,15 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 0
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
DROP TABLE t1;
#13
@@ -527,6 +628,8 @@ INSERT INTO t1 VALUES (1), (2), (3);
# query: DELETE FROM t1 USING t1 WHERE 0
# select: SELECT * FROM t1 WHERE 0
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 USING t1 WHERE 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
@@ -537,6 +640,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE 0;
@@ -546,10 +650,15 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 0
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
DROP TABLE t1;
#14
@@ -559,6 +668,8 @@ INSERT INTO t1 VALUES (3, 3), (7, 7);
# query: DELETE FROM t1 WHERE a = 3
# select: SELECT * FROM t1 WHERE a = 3
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a = 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 1 Using where
@@ -569,6 +680,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 5
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a = 3;
@@ -578,14 +690,16 @@ Warnings:
Note 1003 select 3 AS `a`,3 AS `b` from `test`.`t1` where 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
-Handler_read_key 1
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 6
# Status of testing query execution:
Variable_name Value
Handler_delete 1
-Handler_read_key 1
+Handler_read_key 6
DROP TABLE t1;
#15
@@ -595,6 +709,8 @@ INSERT INTO t1 VALUES (3, 3), (7, 7);
# query: DELETE FROM t1 WHERE a < 3
# select: SELECT * FROM t1 WHERE a < 3
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 1 Using where
@@ -605,6 +721,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range a a 5 NULL 1 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 5
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a < 3;
@@ -614,12 +731,15 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 3
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 5
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 6
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 6
DROP TABLE t1;
#16
@@ -628,16 +748,19 @@ CREATE TABLE t1 ( a int PRIMARY KEY );
# query: DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a
# select: SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 1 Using where; Using filesort
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 1 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
@@ -647,19 +770,27 @@ Warnings:
Note 1003 select NULL AS `a` from `test`.`t1` where 0 order by NULL
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
Handler_read_rnd_next 1
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 3
Handler_read_rnd_next 1
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 3
+Handler_read_rnd_next 1
+Sort_scan 1
-INSERT INTO t1 VALUES (1), (2), (3);
+INSERT INTO t1 VALUES (1), (2), (3), (-1), (-2), (-3);
#
# query: DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a
# select: SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where
@@ -670,6 +801,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE t1.a > 0 ORDER BY t1.a;
@@ -679,14 +811,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 0 order by `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 4
Handler_read_next 3
# Status of testing query execution:
Variable_name Value
Handler_delete 3
-Handler_read_key 1
+Handler_read_key 4
Handler_read_next 3
DROP TABLE t1;
@@ -697,6 +832,8 @@ INSERT INTO t1 VALUES (4),(3),(1),(2);
# query: DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1
# select: SELECT * FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 Using where
@@ -707,6 +844,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 1 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE (@a:= a) ORDER BY a LIMIT 1;
@@ -716,13 +854,18 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where @a:=`test`.`t1`.`a` order by `test`.`t1`.`a` limit 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 3
# Status of testing query execution:
Variable_name Value
Handler_delete 1
Handler_read_first 1
+Handler_read_key 3
DROP TABLE t1;
#18
@@ -733,6 +876,8 @@ UPDATE t1 SET a = c, b = c;
# query: DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1
# select: SELECT * FROM t1 ORDER BY a ASC, b ASC LIMIT 1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 ORDER BY a ASC, b ASC LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 Using filesort
@@ -743,6 +888,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 10 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 7
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 ORDER BY a ASC, b ASC LIMIT 1;
@@ -752,8 +898,12 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c` from `test`.`t1` order by `test`.`t1`.`a`,`test`.`t1`.`b` limit 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 7
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 11
Sort_priority_queue_sorts 1
Sort_rows 1
@@ -761,6 +911,7 @@ Sort_scan 1
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_key 7
Handler_read_rnd 1
Handler_read_rnd_next 11
Sort_rows 10
@@ -778,6 +929,8 @@ INSERT INTO t3 VALUES (1,1), (2,1), (1,3);
# query: DELETE t1,t2,t3 FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3
# select: SELECT * FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE t1,t2,t3 FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -792,6 +945,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t3 eq_ref PRIMARY PRIMARY 8 test.t2.b2,test.t1.b1 1 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 13
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1,t2,t3 WHERE a1=a2 AND b2=a3 AND b1=b3;
@@ -803,15 +957,18 @@ Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2`,`test`.`t3`.`a3` AS `a3`,`test`.`t3`.`b3` AS `b3` from `test`.`t1` join `test`.`t2` join `test`.`t3` where `test`.`t2`.`a2` = `test`.`t1`.`a1` and `test`.`t3`.`a3` = `test`.`t2`.`b2` and `test`.`t3`.`b3` = `test`.`t1`.`b1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 13
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 6
+Handler_read_key 19
Handler_read_next 3
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
Handler_delete 8
-Handler_read_key 6
+Handler_read_key 19
Handler_read_next 3
Handler_read_rnd 5
Handler_read_rnd_next 4
@@ -826,6 +983,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2)
# select: SELECT * FROM t1 WHERE a IN (SELECT a FROM t2)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a IN (SELECT a FROM t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
@@ -838,6 +997,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a IN (SELECT a FROM t2);
@@ -849,12 +1009,16 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join (`test`.`t2`) where 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 8
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 10
Handler_update 3
@@ -869,6 +1033,8 @@ SET @save_optimizer_switch= @@optimizer_switch;
# query: DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
# select: SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
@@ -881,6 +1047,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
@@ -891,13 +1058,17 @@ Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`>(<in_optimizer>(`test`.`t1`.`a1`,<exists>(/* select#2 */ select `test`.`t2`.`a2` from `test`.`t2` where `test`.`t2`.`a2` > 2 and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`a2`)))
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 5
+Handler_read_key 9
Handler_read_rnd_next 30
# Status of testing query execution:
Variable_name Value
Handler_delete 3
+Handler_read_key 4
Handler_read_rnd_next 30
SET @@optimizer_switch= @save_optimizer_switch;
@@ -907,6 +1078,8 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
# query: DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
# select: SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
@@ -919,6 +1092,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a1 IN (SELECT a2 FROM t2 WHERE a2 > 2);
@@ -929,12 +1103,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`a2` > 2 and `test`.`t1`.`a1` = `test`.`t2`.`a2`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 12
# Status of testing query execution:
Variable_name Value
Handler_delete 3
+Handler_read_key 4
Handler_read_rnd_next 30
DROP TABLE t1, t2;
@@ -945,6 +1124,8 @@ INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
# query: UPDATE t1 SET i = 10
# select: SELECT * FROM t1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET i = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5
@@ -955,6 +1136,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 5 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1;
@@ -964,11 +1146,16 @@ Warnings:
Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`j` AS `j` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 3
Handler_read_rnd_next 6
# Status of testing query execution:
Variable_name Value
+Handler_read_key 3
Handler_read_rnd_next 6
Handler_update 5
@@ -980,6 +1167,8 @@ INSERT INTO t1 VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5);
# query: DELETE FROM t1
# select: SELECT * FROM t1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL 5 Deleting all rows
@@ -990,6 +1179,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE NULL NULL NULL NULL NULL NULL 5 NULL Deleting all rows
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1;
@@ -999,11 +1189,16 @@ Warnings:
Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`j` AS `j` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 3
Handler_read_rnd_next 6
# Status of testing query execution:
Variable_name Value
+Handler_read_key 3
DROP TABLE t1;
#24
@@ -1018,6 +1213,8 @@ INSERT INTO t2 (a, b, c) SELECT t1.i, t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 15 NULL 5 Using where
@@ -1028,6 +1225,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1037,14 +1235,19 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 8
Handler_read_next 4
# Status of testing query execution:
Variable_name Value
Handler_delete 5
Handler_read_first 1
+Handler_read_key 8
Handler_read_next 4
DROP TABLE t1, t2;
@@ -1056,6 +1259,8 @@ CREATE TABLE t2 (i INT);
# query: INSERT INTO t2 SELECT * FROM t1
# select: SELECT * FROM t1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN INSERT INTO t2 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -1066,6 +1271,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1;
@@ -1075,11 +1281,16 @@ Warnings:
Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 4
Handler_write 3
@@ -1092,6 +1303,8 @@ CREATE TABLE t2 (i INT);
# query: REPLACE INTO t2 SELECT * FROM t1
# select: SELECT * FROM t1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN REPLACE INTO t2 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3
@@ -1102,6 +1315,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1;
@@ -1111,11 +1325,16 @@ Warnings:
Note 1003 select `test`.`t1`.`i` AS `i` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 4
Handler_write 3
@@ -1136,8 +1355,10 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_write 1
DROP TABLE t1;
@@ -1157,8 +1378,10 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_write 1
DROP TABLE t1;
@@ -1171,6 +1394,8 @@ INSERT INTO t1 (i) VALUES (10),(11),(12),(13),(14),(15),(16),(17),(18),(19),
# query: DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 5 Using where
@@ -1181,6 +1406,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
@@ -1190,14 +1416,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`i` AS `i` from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_next 4
# Status of testing query execution:
Variable_name Value
Handler_delete 5
-Handler_read_key 1
+Handler_read_key 5
Handler_read_next 4
DROP TABLE t1;
@@ -1210,6 +1439,8 @@ INSERT INTO t1 (i) VALUES (10),(11),(12),(13),(14),(15),(16),(17),(18),(19),
# query: DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1220,6 +1451,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
@@ -1229,8 +1461,12 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`i` AS `i` from `test`.`t1` where `test`.`t1`.`i` > 10 and `test`.`t1`.`i` <= 18 order by `test`.`t1`.`i` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 5
@@ -1238,6 +1474,7 @@ Sort_scan 1
# Status of testing query execution:
Variable_name Value
Handler_delete 5
+Handler_read_key 4
Handler_read_rnd 5
Handler_read_rnd_next 27
Sort_rows 8
@@ -1255,6 +1492,8 @@ INSERT INTO t2 (a, b, c) SELECT i, i, i FROM t1;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1265,6 +1504,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1274,8 +1514,12 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 1
@@ -1283,6 +1527,7 @@ Sort_scan 1
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_rows 1
@@ -1301,6 +1546,8 @@ INSERT INTO t2 (a, b, c) SELECT t1.i, t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 15 NULL 5 Using where
@@ -1311,6 +1558,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1320,14 +1568,19 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 8
Handler_read_next 4
# Status of testing query execution:
Variable_name Value
Handler_delete 5
Handler_read_first 1
+Handler_read_key 8
Handler_read_next 4
DROP TABLE t1, t2;
@@ -1342,6 +1595,8 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1352,6 +1607,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1361,8 +1617,12 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 1
@@ -1370,6 +1630,7 @@ Sort_scan 1
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_rows 1
@@ -1388,6 +1649,8 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1398,6 +1661,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1407,8 +1671,12 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
@@ -1417,6 +1685,7 @@ Sort_scan 1
# Status of testing query execution:
Variable_name Value
Handler_delete 1
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_rows 1
@@ -1435,6 +1704,8 @@ INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
# query: DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1
# select: SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 Using sort_union(key1,key2); Using where; Using filesort
@@ -1445,6 +1716,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
@@ -1454,9 +1726,12 @@ Warnings:
Note 1003 select `test`.`t2`.`i` AS `i`,`test`.`t2`.`key1` AS `key1`,`test`.`t2`.`key2` AS `key2` from `test`.`t2` where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 2
+Handler_read_key 8
Handler_read_next 7
Handler_read_rnd 4
Sort_range 1
@@ -1464,7 +1739,7 @@ Sort_rows 4
# Status of testing query execution:
Variable_name Value
Handler_delete 4
-Handler_read_key 2
+Handler_read_key 8
Handler_read_next 7
Handler_read_rnd 8
Sort_range 1
@@ -1482,6 +1757,8 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 Using where
@@ -1492,6 +1769,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
@@ -1501,14 +1779,17 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_prev 4
# Status of testing query execution:
Variable_name Value
Handler_delete 5
-Handler_read_key 1
+Handler_read_key 5
Handler_read_prev 4
DROP TABLE t1, t2;
@@ -1523,6 +1804,8 @@ INSERT INTO t2 SELECT i, i, i FROM t1;
# query: DELETE FROM t2 ORDER BY a, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a, b DESC LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 ORDER BY a, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using filesort
@@ -1533,6 +1816,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 ORDER BY a, b DESC LIMIT 5;
@@ -1542,8 +1826,12 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 6
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 5
@@ -1551,6 +1839,7 @@ Sort_scan 1
# Status of testing query execution:
Variable_name Value
Handler_delete 5
+Handler_read_key 6
Handler_read_rnd 5
Handler_read_rnd_next 27
Sort_rows 26
@@ -1569,6 +1858,8 @@ INSERT INTO t2 (a, b) SELECT t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a DESC, b DESC LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t2 ORDER BY a DESC, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 6 NULL 5
@@ -1579,6 +1870,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 6 NULL 5 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 ORDER BY a DESC, b DESC LIMIT 5;
@@ -1588,13 +1880,18 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 6
Handler_read_last 1
Handler_read_prev 4
# Status of testing query execution:
Variable_name Value
Handler_delete 5
+Handler_read_key 6
Handler_read_last 1
Handler_read_prev 4
@@ -1610,6 +1907,8 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 Using where; Using buffer
@@ -1620,6 +1919,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
@@ -1629,13 +1929,16 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_next 4
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_next 4
Handler_read_rnd 5
Handler_update 5
@@ -1652,6 +1955,8 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1662,6 +1967,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i LIMIT 5;
@@ -1671,14 +1977,19 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 5
Sort_scan 1
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd 5
Handler_read_rnd_next 27
Handler_update 5
@@ -1698,6 +2009,8 @@ INSERT INTO t2 (a, b, c) SELECT i, i, i FROM t1;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1708,6 +2021,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1717,14 +2031,19 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 1
Sort_scan 1
# Status of testing query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Handler_update 1
@@ -1745,6 +2064,8 @@ INSERT INTO t2 (a, b, c) SELECT t1.i, t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 15 NULL 5 Using where; Using buffer
@@ -1755,6 +2076,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 15 NULL 5 100.00 Using where; Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1764,13 +2086,18 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 8
Handler_read_next 4
# Status of testing query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 8
Handler_read_next 4
Handler_read_rnd 5
Handler_update 5
@@ -1787,6 +2114,8 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1797,6 +2126,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1806,14 +2136,19 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 1
Sort_scan 1
# Status of testing query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
@@ -1833,6 +2168,8 @@ INSERT INTO t2 SELECT i, i, i, i FROM t1;
# query: UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5
# select: SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET d = 10 WHERE b = 10 ORDER BY a, c LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using where; Using filesort
@@ -1843,6 +2180,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 8
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE b = 10 ORDER BY a, c LIMIT 5;
@@ -1852,8 +2190,12 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c`,`test`.`t2`.`d` AS `d` from `test`.`t2` where `test`.`t2`.`b` = 10 order by `test`.`t2`.`a`,`test`.`t2`.`c` limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 8
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
@@ -1861,6 +2203,7 @@ Sort_rows 1
Sort_scan 1
# Status of testing query execution:
Variable_name Value
+Handler_read_key 8
Handler_read_rnd 1
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
@@ -1880,6 +2223,8 @@ INSERT INTO t2 (key1, key2) SELECT i, i FROM t1;
# query: UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1
# select: SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET i = 123 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 Using sort_union(key1,key2); Using where; Using filesort
@@ -1890,6 +2235,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index_merge key1,key2 key1,key2 5,5 NULL 7 100.00 Using sort_union(key1,key2); Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE key1 < 13 or key2 < 14 ORDER BY key1;
@@ -1899,16 +2245,19 @@ Warnings:
Note 1003 select `test`.`t2`.`i` AS `i`,`test`.`t2`.`key1` AS `key1`,`test`.`t2`.`key2` AS `key2` from `test`.`t2` where `test`.`t2`.`key1` < 13 or `test`.`t2`.`key2` < 14 order by `test`.`t2`.`key1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 2
+Handler_read_key 8
Handler_read_next 7
Handler_read_rnd 4
Sort_range 1
Sort_rows 4
# Status of testing query execution:
Variable_name Value
-Handler_read_key 2
+Handler_read_key 8
Handler_read_next 7
Handler_read_rnd 8
Handler_update 4
@@ -1927,6 +2276,8 @@ INSERT INTO t2 (i) SELECT i FROM t1;
# query: UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
# select: SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET a = 10 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 Using where; Using buffer
@@ -1937,6 +2288,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 5 100.00 Using where; Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 WHERE i > 10 AND i <= 18 ORDER BY i DESC LIMIT 5;
@@ -1946,13 +2298,16 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`i` AS `i` from `test`.`t2` where `test`.`t2`.`i` > 10 and `test`.`t2`.`i` <= 18 order by `test`.`t2`.`i` desc limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_prev 4
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 5
Handler_read_prev 4
Handler_read_rnd 5
Handler_update 5
@@ -1969,6 +2324,8 @@ INSERT INTO t2 SELECT i, i, i FROM t1;
# query: UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a, b DESC LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET c = 10 ORDER BY a, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 Using filesort
@@ -1979,6 +2336,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 26 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 ORDER BY a, b DESC LIMIT 5;
@@ -1988,14 +2346,19 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` order by `test`.`t2`.`a`,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 6
Handler_read_rnd_next 27
Sort_priority_queue_sorts 1
Sort_rows 5
Sort_scan 1
# Status of testing query execution:
Variable_name Value
+Handler_read_key 6
Handler_read_rnd 5
Handler_read_rnd_next 27
Handler_update 4
@@ -2016,6 +2379,8 @@ INSERT INTO t2 (a, b) SELECT t1.i, t1.i FROM t1, t1 x1, t1 x2;
# query: UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5
# select: SELECT * FROM t2 ORDER BY a DESC, b DESC LIMIT 5
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t2 SET c = 10 ORDER BY a DESC, b DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 6 NULL 5 Using buffer
@@ -2026,6 +2391,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 index NULL a 6 NULL 5 100.00 Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2 ORDER BY a DESC, b DESC LIMIT 5;
@@ -2035,12 +2401,17 @@ Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b`,`test`.`t2`.`c` AS `c` from `test`.`t2` order by `test`.`t2`.`a` desc,`test`.`t2`.`b` desc limit 5
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 6
Handler_read_last 1
Handler_read_prev 4
# Status of testing query execution:
Variable_name Value
+Handler_read_key 6
Handler_read_last 1
Handler_read_prev 4
Handler_read_rnd 5
@@ -2060,6 +2431,8 @@ INSERT INTO t1 VALUES (1,'y',1), (2,'n',2), (3,'y',3), (4,'n',4);
# query: UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
# select: SELECT * FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET c2 = 0 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 Using where; Using filesort
@@ -2070,6 +2443,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
@@ -2079,16 +2453,19 @@ Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1_idx` AS `c1_idx`,`test`.`t1`.`c2` AS `c2` from `test`.`t1` where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 7
Handler_read_next 2
Sort_priority_queue_sorts 1
Sort_range 1
Sort_rows 2
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 7
Handler_read_next 2
Handler_read_rnd 2
Handler_update 2
@@ -2100,6 +2477,8 @@ Sort_rows 2
# query: DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
# select: SELECT * FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 Using where; Using filesort
@@ -2110,6 +2489,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range c1_idx c1_idx 2 NULL 2 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE c1_idx = 'y' ORDER BY pk DESC LIMIT 2;
@@ -2119,9 +2499,12 @@ Warnings:
Note 1003 select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`c1_idx` AS `c1_idx`,`test`.`t1`.`c2` AS `c2` from `test`.`t1` where `test`.`t1`.`c1_idx` = 'y' order by `test`.`t1`.`pk` desc limit 2
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 7
Handler_read_next 2
Sort_priority_queue_sorts 1
Sort_range 1
@@ -2129,7 +2512,7 @@ Sort_rows 2
# Status of testing query execution:
Variable_name Value
Handler_delete 2
-Handler_read_key 1
+Handler_read_key 7
Handler_read_next 2
Handler_read_rnd 2
Sort_range 1
@@ -2143,32 +2526,38 @@ INSERT INTO t1 VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(
# query: UPDATE t1 SET a=a+10 WHERE a > 34
# select: SELECT * FROM t1 WHERE a > 34
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a=a+10 WHERE a > 34;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using buffer
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using where; Using buffer
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED UPDATE t1 SET a=a+10 WHERE a > 34;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using buffer
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a > 34;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 100.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` > 34
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 4
Handler_read_next 2
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 4
Handler_read_next 2
Handler_read_rnd 2
Handler_update 2
@@ -2182,6 +2571,8 @@ INSERT INTO t1 VALUES (1, 1, 10), (2, 2, 20);
# query: UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10
# select: SELECT * FROM t1 LEFT JOIN t2 ON t1.c1 = t2.c1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 system NULL NULL NULL NULL 0 Const row not found
@@ -2194,6 +2585,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 1
FLUSH STATUS;
FLUSH TABLES;
@@ -2205,18 +2597,25 @@ Warnings:
Note 1003 select `test`.`t1`.`c1` AS `c1`,`test`.`t1`.`c2` AS `c2`,`test`.`t1`.`c3` AS `c3`,NULL AS `c1`,NULL AS `c2` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 1
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 4
#
# query: UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 WHERE t1.c3 = 10
# select: SELECT * FROM t1 LEFT JOIN t2 ON t1.c1 = t2.c1 WHERE t1.c3 = 10
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 LEFT JOIN t2 ON t1.c1 = t2.c1 SET t2.c2 = 10 WHERE t1.c3 = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 system NULL NULL NULL NULL 0 Const row not found
@@ -2229,6 +2628,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 1
FLUSH STATUS;
FLUSH TABLES;
@@ -2240,12 +2640,17 @@ Warnings:
Note 1003 select `test`.`t1`.`c1` AS `c1`,`test`.`t1`.`c2` AS `c2`,`test`.`t1`.`c3` AS `c3`,NULL AS `c1`,NULL AS `c2` from `test`.`t1` where `test`.`t1`.`c3` = 10
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 1
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 4
# Status of testing query execution:
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 4
DROP TABLE t1, t2;
@@ -2259,6 +2664,8 @@ INSERT INTO t2 VALUES(1,1),(2,2);
# query: UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1)
# select: SELECT (SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1) FROM t1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET t1.f2=(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2
@@ -2273,6 +2680,7 @@ Warnings:
Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 7
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT (SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1) FROM t1;
@@ -2284,12 +2692,16 @@ Note 1276 Field or reference 'test.t1.f1' of SELECT #2 was resolved in SELECT #1
Note 1003 /* select#1 */ select <expr_cache><`test`.`t1`.`f1`>((/* select#2 */ select max(`test`.`t2`.`f4`) from `test`.`t2` where `test`.`t2`.`f3` = `test`.`t1`.`f1`)) AS `(SELECT MAX(t2.f4) FROM t2 WHERE t2.f3=t1.f1)` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 7
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 2
+Handler_read_key 9
Handler_read_rnd_next 9
# Status of testing query execution:
Variable_name Value
+Handler_read_key 7
Handler_read_rnd_next 9
Handler_update 2
@@ -2325,6 +2737,8 @@ CREATE VIEW v1 AS SELECT t11.a, t12.a AS b FROM t1 t11, t1 t12;
# query: UPDATE v1 SET a = 1 WHERE a > 0
# select: SELECT * FROM v1 WHERE a > 0
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE v1 SET a = 1 WHERE a > 0;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t11 ALL NULL NULL NULL NULL 2 Using where
@@ -2337,6 +2751,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM v1 WHERE a > 0;
@@ -2347,11 +2762,16 @@ Warnings:
Note 1003 select `test`.`t11`.`a` AS `a`,`test`.`t12`.`a` AS `b` from `test`.`t1` `t11` join `test`.`t1` `t12` where `test`.`t11`.`a` > 0
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 6
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd 1
Handler_read_rnd_deleted 1
Handler_read_rnd_next 8
@@ -2360,6 +2780,8 @@ Handler_read_rnd_next 8
# query: UPDATE t1, v1 SET v1.a = 1 WHERE t1.a = v1.a
# select: SELECT * FROM t1, v1 WHERE t1.a = v1.a
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, v1 SET v1.a = 1 WHERE t1.a = v1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
@@ -2374,6 +2796,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t12 ALL NULL NULL NULL NULL 2 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, v1 WHERE t1.a = v1.a;
@@ -2385,11 +2808,16 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t11`.`a` AS `a`,`test`.`t12`.`a` AS `b` from `test`.`t1` join `test`.`t1` `t11` join `test`.`t1` `t12` where `test`.`t11`.`a` = `test`.`t1`.`a`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 9
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd 2
Handler_read_rnd_deleted 1
Handler_read_rnd_next 18
@@ -2399,12 +2827,14 @@ DROP TABLE t1;
DROP VIEW v1;
#63
CREATE TABLE t1 (a INT, PRIMARY KEY(a));
-INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9);
CREATE VIEW v1 (a) AS SELECT a FROM t1;
#
# query: DELETE FROM v1 WHERE a < 4
# select: SELECT * FROM v1 WHERE a < 4
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE FROM v1 WHERE a < 4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where
@@ -2415,6 +2845,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM v1 WHERE a < 4;
@@ -2424,14 +2855,19 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` < 4
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 3
Handler_read_next 3
# Status of testing query execution:
Variable_name Value
Handler_delete 3
Handler_read_first 1
+Handler_read_key 3
Handler_read_next 3
DROP TABLE t1;
@@ -2446,6 +2882,8 @@ CREATE VIEW v1 (a,c) AS SELECT a, b+1 FROM t1;
# query: DELETE v1 FROM t2, v1 WHERE t2.x = v1.a
# select: SELECT * FROM t2, v1 WHERE t2.x = v1.a
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE v1 FROM t2, v1 WHERE t2.x = v1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where
@@ -2458,6 +2896,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2, v1 WHERE t2.x = v1.a;
@@ -2468,14 +2907,17 @@ Warnings:
Note 1003 select `test`.`t2`.`x` AS `x`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` + 1 AS `c` from `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 4
+Handler_read_key 10
Handler_read_rnd_next 5
# Status of testing query execution:
Variable_name Value
Handler_delete 4
-Handler_read_key 4
+Handler_read_key 10
Handler_read_rnd 4
Handler_read_rnd_next 5
@@ -2491,6 +2933,8 @@ CREATE VIEW v1 (a,c) AS SELECT a, b+1 FROM t1;
# query: DELETE v1 FROM t2, v1 WHERE t2.x = v1.a
# select: SELECT * FROM t2, v1 WHERE t2.x = v1.a
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN DELETE v1 FROM t2, v1 WHERE t2.x = v1.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where
@@ -2503,6 +2947,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 4 test.t2.x 1 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 6
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t2, v1 WHERE t2.x = v1.a;
@@ -2513,14 +2958,17 @@ Warnings:
Note 1003 select `test`.`t2`.`x` AS `x`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` + 1 AS `c` from `test`.`t2` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t2`.`x`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 6
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 4
+Handler_read_key 10
Handler_read_rnd_next 5
# Status of testing query execution:
Variable_name Value
Handler_delete 4
-Handler_read_key 4
+Handler_read_key 10
Handler_read_rnd 4
Handler_read_rnd_next 5
@@ -2543,6 +2991,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 INSERT t1 ALL NULL NULL NULL NULL NULL 100.00 NULL
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 2
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT NULL;
@@ -2556,6 +3005,7 @@ Variable_name Value
Variable_name Value
# Status of testing query execution:
Variable_name Value
+Handler_read_key 2
Handler_write 1
DROP TABLE t1;
@@ -2569,6 +3019,8 @@ CREATE VIEW v1 (x) AS SELECT b FROM t2;
# query: INSERT INTO v1 SELECT * FROM t1
# select: SELECT * FROM t1
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN INSERT INTO v1 SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 system NULL NULL NULL NULL 0 Const row not found
@@ -2579,6 +3031,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 system NULL NULL NULL NULL 0 0.00 Const row not found
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 1
FLUSH STATUS;
FLUSH TABLES;
@@ -2589,12 +3042,17 @@ Warnings:
Note 1003 select NULL AS `a` from `test`.`t1`
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 1
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 2
Handler_read_rnd_next 1
# Status of testing query execution:
Variable_name Value
+Handler_read_key 4
Handler_read_rnd_next 1
DROP TABLE t1, t2;
@@ -2614,6 +3072,8 @@ INSERT INTO t2 VALUES (1), (2), (3);
# query: UPDATE t1 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
# select: SELECT * FROM t1 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
@@ -2628,6 +3088,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
@@ -2640,16 +3101,19 @@ Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a` from `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) where 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 10
Sort_priority_queue_sorts 1
Sort_rows 3
Sort_scan 1
# Status of testing query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 8
Handler_update 1
Sort_priority_queue_sorts 1
@@ -2660,6 +3124,8 @@ Sort_scan 1
# query: UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
# select: SELECT * FROM t1, t2 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, t2 SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
@@ -2678,6 +3144,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
3 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00 Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, t2 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
@@ -2691,16 +3158,19 @@ Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join ((/* select#3 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` where 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 10
Sort_priority_queue_sorts 1
Sort_rows 3
Sort_scan 1
# Status of testing query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 10
Sort_priority_queue_sorts 1
Sort_rows 3
@@ -2710,6 +3180,8 @@ Sort_scan 1
# query: UPDATE t1, (SELECT * FROM t2) y SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
# select: SELECT * FROM t1, (SELECT * FROM t2) y WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x)
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1, (SELECT * FROM t2) y SET a = 10 WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3
@@ -2730,6 +3202,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DERIVED t2 ALL NULL NULL NULL NULL 3 100.00
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 4
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT * FROM t1, (SELECT * FROM t2) y WHERE a IN (SELECT * FROM (SELECT b FROM t2 ORDER BY b LIMIT 2,2) x);
@@ -2743,16 +3216,19 @@ Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` semi join ((/* select#4 */ select `test`.`t2`.`b` AS `b` from `test`.`t2` order by `test`.`t2`.`b` limit 2,2) `x`) join `test`.`t2` where 1
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 4
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 10
Sort_priority_queue_sorts 1
Sort_rows 3
Sort_scan 1
# Status of testing query execution:
Variable_name Value
-Handler_read_key 3
+Handler_read_key 7
Handler_read_rnd_next 10
Sort_priority_queue_sorts 1
Sort_rows 3
@@ -2792,6 +3268,8 @@ INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
# query: UPDATE t1 SET a=a+1 WHERE a>10
# select: SELECT a t1 FROM t1 WHERE a>10
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a=a+1 WHERE a>10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using buffer
@@ -2802,6 +3280,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using buffer
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT a t1 FROM t1 WHERE a>10;
@@ -2811,19 +3290,25 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `t1` from `test`.`t1` where `test`.`t1`.`a` > 10
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
Handler_read_first 1
+Handler_read_key 3
Handler_read_next 5
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 4
# used key is modified & Using filesort
#
# query: UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20
# select: SELECT a t1 FROM t1 WHERE a>10 ORDER BY a+20
#
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
EXPLAIN UPDATE t1 SET a=a+1 WHERE a>10 ORDER BY a+20;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using filesort
@@ -2834,6 +3319,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 100.00 Using where; Using filesort
# Status of EXPLAIN EXTENDED query
Variable_name Value
+Handler_read_key 3
FLUSH STATUS;
FLUSH TABLES;
EXPLAIN EXTENDED SELECT a t1 FROM t1 WHERE a>10 ORDER BY a+20;
@@ -2843,13 +3329,17 @@ Warnings:
Note 1003 select `test`.`t1`.`a` AS `t1` from `test`.`t1` where `test`.`t1`.`a` > 10 order by `test`.`t1`.`a` + 20
# Status of EXPLAIN EXTENDED "equivalent" SELECT query execution
Variable_name Value
+Handler_read_key 3
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Status of "equivalent" SELECT query execution:
Variable_name Value
+Handler_read_key 3
Handler_read_rnd_next 6
Sort_scan 1
# Status of testing query execution:
Variable_name Value
-Handler_read_key 1
+Handler_read_key 4
Sort_range 1
DROP TABLE t1;
diff --git a/mysql-test/main/myisam_icp.result b/mysql-test/main/myisam_icp.result
index 2048205528d..577a0df12b0 100644
--- a/mysql-test/main/myisam_icp.result
+++ b/mysql-test/main/myisam_icp.result
@@ -407,7 +407,7 @@ WHERE (pk BETWEEN 4 AND 5 OR pk < 2) AND c1 < 240
ORDER BY c1
LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,k1 k1 5 NULL 4 Using where
+1 SIMPLE t1 range|filter PRIMARY,k1 PRIMARY|k1 4|5 NULL 3 (50%) Using index condition; Using where; Rowid-ordered scan; Using filesort; Using rowid filter
DROP TABLE t1;
#
#
@@ -505,8 +505,8 @@ SELECT c2 FROM t1 JOIN t2 ON t1.c1 = t2.c1
WHERE (t2.pk <= 4 AND t1.pk IN (2,1)) OR
(t1.pk > 1 AND t2.pk BETWEEN 6 AND 6);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using where; Rowid-ordered scan
-1 SIMPLE t2 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 1 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t2 ALL PRIMARY NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
SELECT c2 FROM t1 JOIN t2 ON t1.c1 = t2.c1
WHERE (t2.pk <= 4 AND t1.pk IN (2,1)) OR
(t1.pk > 1 AND t2.pk BETWEEN 6 AND 6);
@@ -588,6 +588,12 @@ i1 INTEGER NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t2 VALUES (4,1);
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT t1.d1, t2.pk, t2.i1 FROM t1 STRAIGHT_JOIN t2 ON t2.i1
WHERE t2.pk <> t1.d1 AND t2.pk = 4;
@@ -793,6 +799,12 @@ INSERT INTO t2 (g,h) VALUES
(0,'p'),(0,'f'),(0,'p'),(7,'d'),(7,'f'),(5,'j'),
(3,'e'),(1,'u'),(4,'v'),(9,'u'),(6,'i'),(1,'x'),
(7,'f'),(5,'j'),(3,'e'),(1,'u'),(4,'v'),(9,'u');
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
EXPLAIN
@@ -802,7 +814,7 @@ AND (EXISTS (SELECT * FROM t1, t2 WHERE a = f AND h <= t.e AND a > t.b)
OR a = 0 AND h < 'z' );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL PRIMARY,c NULL NULL NULL 64 Using where
-1 PRIMARY t2 ref g g 5 test.t.c 19 Using where
+1 PRIMARY t2 ref g g 5 test.t.c 18 Using where
2 DEPENDENT SUBQUERY t1 index PRIMARY PRIMARY 4 NULL 64 Using where; Using index
2 DEPENDENT SUBQUERY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 Using where
SELECT COUNT(*) FROM t1 AS t, t2
@@ -913,7 +925,7 @@ SET SESSION optimizer_switch='index_condition_pushdown=off';
EXPLAIN
SELECT a, MIN(c) FROM t1 WHERE b = 'x' AND c > 'x' GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx idx 4 const 1 Using where; Using temporary; Using filesort
+1 SIMPLE t1 ref idx idx 4 const 2 Using where; Using temporary; Using filesort
SELECT a, MIN(c) FROM t1 WHERE b = 'x' AND c > 'x' GROUP BY a;
a MIN(c)
5 y
@@ -921,7 +933,7 @@ SET SESSION optimizer_switch='index_condition_pushdown=on';
EXPLAIN
SELECT a, MIN(c) FROM t1 WHERE b = 'x' AND c > 'x' GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx idx 4 const 1 Using index condition; Using where; Using temporary; Using filesort
+1 SIMPLE t1 ref idx idx 4 const 2 Using index condition; Using where; Using temporary; Using filesort
SELECT a, MIN(c) FROM t1 WHERE b = 'x' AND c > 'x' GROUP BY a;
a MIN(c)
5 y
@@ -976,7 +988,7 @@ set optimizer_switch='mrr=off';
# Must not use ICP:
explain select * from t1 where a between 5 and 8 order by a desc, col desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 39 Using where
+1 SIMPLE t1 range a a 5 NULL 40 Using where
set optimizer_switch= @tmp_10000051;
# Must not use ICP:
explain select * from t1 where a=3 and col > 500 order by a desc, col desc;
diff --git a/mysql-test/main/myisam_mrr,64bit.rdiff b/mysql-test/main/myisam_mrr,64bit.rdiff
new file mode 100644
index 00000000000..82f6dfabb65
--- /dev/null
+++ b/mysql-test/main/myisam_mrr,64bit.rdiff
@@ -0,0 +1,13 @@
+--- main/myisam_mrr.result 2019-02-04 13:47:00.000000000 +0530
++++ main/myisam_mrr,64bit.reject 2019-02-04 13:50:01.000000000 +0530
+@@ -617,8 +617,8 @@
+ show status like 'handler_mrr%';
+ Variable_name Value
+ Handler_mrr_init 1
+-Handler_mrr_key_refills 0
+-Handler_mrr_rowid_refills 0
++Handler_mrr_key_refills 1
++Handler_mrr_rowid_refills 1
+ set join_buffer_size=10;
+ explain select sum(t1.b) from t0,t1 where t0.a=t1.a;
+ id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/main/myisam_mrr.result b/mysql-test/main/myisam_mrr.result
index 511e6a8d01f..95aa93eb40e 100644
--- a/mysql-test/main/myisam_mrr.result
+++ b/mysql-test/main/myisam_mrr.result
@@ -210,7 +210,7 @@ NULL NULL NULL NULL-1
explain
select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 range idx1 idx1 29 NULL 21 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t4 range idx1 idx1 29 NULL 20 Using index condition; Using where; Rowid-ordered scan
select * from t4 where (a ='b-1' or a='bb-1') and b IS NULL and (c='c-1' or c='cc-2');
a b c filler
b-1 NULL c-1 NULL-15
@@ -575,7 +575,7 @@ insert into t1
select A.a+10*B.a+100*C.a+1000*D.a, 123,'filler' from t0 A, t0 B, t0 C, t0 D;
explain select sum(b) from t1 where a < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 8 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range a a 5 NULL 9 Using index condition; Rowid-ordered scan
# This should show one MRR scan and no re-fills:
flush status;
select sum(b) from t1 where a < 10;
diff --git a/mysql-test/main/myisam_mrr.test b/mysql-test/main/myisam_mrr.test
index b15fdf60ce7..60d96c602ef 100644
--- a/mysql-test/main/myisam_mrr.test
+++ b/mysql-test/main/myisam_mrr.test
@@ -12,6 +12,7 @@ set @mrr_buffer_size_save= @@mrr_buffer_size;
set mrr_buffer_size=79;
-- source include/mrr_tests.inc
+-- source include/word_size.inc
set @@mrr_buffer_size= @mrr_buffer_size_save;
diff --git a/mysql-test/main/myisam_recover.result b/mysql-test/main/myisam_recover.result
index 92df67b42d1..3a2c424c8b2 100644
--- a/mysql-test/main/myisam_recover.result
+++ b/mysql-test/main/myisam_recover.result
@@ -87,7 +87,7 @@ a
6
Warnings:
Error 145 Table 't1' is marked as crashed and should be repaired
-Error 1034 Number of rows changed from 3 to 6
+Warning 1034 Number of rows changed from 3 to 6
#
# Cleanup
#
@@ -139,7 +139,7 @@ a
4
Warnings:
Error 145 Table 't1' is marked as crashed and should be repaired
-Error 1034 Number of rows changed from 1 to 2
+Warning 1034 Number of rows changed from 1 to 2
connect con2, localhost, root;
ALTER TABLE t2 ADD val INT;
connection default;
diff --git a/mysql-test/main/mysql_client_test.result b/mysql-test/main/mysql_client_test.result
index 6ecf03946ab..06794a0e5ec 100644
--- a/mysql-test/main/mysql_client_test.result
+++ b/mysql-test/main/mysql_client_test.result
@@ -135,7 +135,7 @@ EXPALIN number of fields: 10
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
EXPALIN JSON number of fields: 1
- 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39
@@ -148,8 +148,8 @@ ANALYZE number of fields: 13
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
- - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
+ - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
@@ -164,7 +164,7 @@ EXPALIN INSERT number of fields: 10
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
EXPALIN JSON INSERT number of fields: 1
- 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39
@@ -177,8 +177,8 @@ ANALYZE INSERT number of fields: 13
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
- - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
+ - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
@@ -193,7 +193,7 @@ EXPALIN UPDATE number of fields: 10
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
EXPALIN JSON UPDATE number of fields: 1
- 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39
@@ -206,8 +206,8 @@ ANALYZE UPDATE number of fields: 13
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
- - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
+ - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
@@ -222,7 +222,7 @@ EXPALIN DELETE number of fields: 10
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 9: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
EXPALIN JSON DELETE number of fields: 1
- 0: name: 'EXPLAIN'/''; table: ''/''; db: ''; catalog: 'def'; length: 234; max_length: 0; type: 253; decimals: 39
@@ -235,8 +235,8 @@ ANALYZE DELETE number of fields: 13
- 5: name: 'key'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 6: name: 'key_len'/''; table: ''/''; db: ''; catalog: 'def'; length: 12288; max_length: 0; type: 253; decimals: 39
- 7: name: 'ref'/''; table: ''/''; db: ''; catalog: 'def'; length: 6144; max_length: 0; type: 253; decimals: 39
- - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 10; max_length: 0; type: 8; decimals: 0
- - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
+ - 8: name: 'rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
+ - 9: name: 'r_rows'/''; table: ''/''; db: ''; catalog: 'def'; length: 192; max_length: 0; type: 253; decimals: 39
- 10: name: 'filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 11: name: 'r_filtered'/''; table: ''/''; db: ''; catalog: 'def'; length: 4; max_length: 0; type: 5; decimals: 2
- 12: name: 'Extra'/''; table: ''/''; db: ''; catalog: 'def'; length: 765; max_length: 0; type: 253; decimals: 39
diff --git a/mysql-test/main/mysql_install_db_win.result b/mysql-test/main/mysql_install_db_win.result
new file mode 100644
index 00000000000..950ff868035
--- /dev/null
+++ b/mysql-test/main/mysql_install_db_win.result
@@ -0,0 +1,15 @@
+Running bootstrap
+Removing default user
+Allowing remote access for user root
+Setting root password
+Creating my.ini file
+Creation of the database was successful
+# Kill the server
+# restart: --datadir=MYSQLTEST_VARDIR/tmp/ddir --loose-innodb
+connect root,localhost,root,foo;
+SELECT @@datadir;
+@@datadir
+DATADIR/
+# Kill the server
+connection default;
+# restart
diff --git a/mysql-test/main/mysql_install_db_win.test b/mysql-test/main/mysql_install_db_win.test
new file mode 100644
index 00000000000..7bf62903219
--- /dev/null
+++ b/mysql-test/main/mysql_install_db_win.test
@@ -0,0 +1,24 @@
+--source include/windows.inc
+
+# Create database in tmp directory using mysql_install_db.exe,
+# and start server from this directory.
+let $ddir= $MYSQLTEST_VARDIR/tmp/ddir;
+exec $MYSQL_INSTALL_DB_EXE --datadir=$ddir --password=foo -R;
+
+--source include/kill_mysqld.inc
+let $restart_parameters=--datadir=$ddir --loose-innodb;
+--source include/start_mysqld.inc
+
+connect root,localhost,root,foo;
+# Smoke test - check that we're actually using datadir
+# we've created (i.e restart_parameters worked)
+--replace_result $ddir DATADIR
+SELECT @@datadir;
+
+# restart in the original datadir again
+--source include/kill_mysqld.inc
+rmdir $ddir;
+let $restart_parameters=;
+connection default;
+--source include/start_mysqld.inc
+
diff --git a/mysql-test/main/mysql_protocols.result b/mysql-test/main/mysql_protocols.result
index c6207c4f4f5..746e4a46100 100644
--- a/mysql-test/main/mysql_protocols.result
+++ b/mysql-test/main/mysql_protocols.result
@@ -5,6 +5,7 @@ TCP
SOCKET
ok
ERROR 2047 (HY000): Wrong or unknown protocol
-ERROR 2047 (HY000): Wrong or unknown protocol
+Unknown option to protocol: MEMORY
+Alternatives are: 'TCP','SOCKET','PIPE'
Unknown option to protocol: NullS
-Alternatives are: 'TCP','SOCKET','PIPE','MEMORY'
+Alternatives are: 'TCP','SOCKET','PIPE'
diff --git a/mysql-test/main/mysql_upgrade-6984.result b/mysql-test/main/mysql_upgrade-6984.result
index 6c711b4847f..ad48004a2a3 100644
--- a/mysql-test/main/mysql_upgrade-6984.result
+++ b/mysql-test/main/mysql_upgrade-6984.result
@@ -1,4 +1,4 @@
-update mysql.user set password=password("foo") where user='root';
+update mysql.global_priv set priv=json_set(priv, '$.plugin', 'mysql_native_password', '$.authentication_string', password('foo'));
Phase 1/7: Checking and upgrading mysql database
Processing databases
mysql
@@ -7,12 +7,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
@@ -36,7 +36,6 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
-mysql.user OK
Repairing tables
mysql.innodb_index_stats
@@ -50,6 +49,7 @@ Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -63,6 +63,6 @@ test
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
connect con1,localhost,root,foo,,,;
-update mysql.user set password='' where user='root';
+update mysql.global_priv set priv=json_compact(json_remove(priv, '$.plugin', '$.authentication_string'));
flush privileges;
set global event_scheduler=OFF;
diff --git a/mysql-test/main/mysql_upgrade-6984.test b/mysql-test/main/mysql_upgrade-6984.test
index 9bbfbeb3f87..ad2b95314b0 100644
--- a/mysql-test/main/mysql_upgrade-6984.test
+++ b/mysql-test/main/mysql_upgrade-6984.test
@@ -11,13 +11,13 @@
# In this setup MYSQL_UPGRADE cannot continue after issuing FLUSH PRIVILEGES
#
-update mysql.user set password=password("foo") where user='root';
+update mysql.global_priv set priv=json_set(priv, '$.plugin', 'mysql_native_password', '$.authentication_string', password('foo'));
--exec $MYSQL_UPGRADE
connect(con1,localhost,root,foo,,,);
-update mysql.user set password='' where user='root';
+update mysql.global_priv set priv=json_compact(json_remove(priv, '$.plugin', '$.authentication_string'));
flush privileges;
# Load event table
set global event_scheduler=OFF;
diff --git a/mysql-test/main/mysql_upgrade.result b/mysql-test/main/mysql_upgrade.result
index 08efe0e8bc8..bd70c71e041 100644
--- a/mysql-test/main/mysql_upgrade.result
+++ b/mysql-test/main/mysql_upgrade.result
@@ -8,12 +8,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -31,9 +31,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -47,7 +47,7 @@ test
Phase 7/7: Running 'FLUSH PRIVILEGES'
OK
Run it again - should say already completed
-This installation of MySQL is already upgraded to VERSION, use --force if you still need to run mysql_upgrade
+This installation of MariaDB is already upgraded to VERSION, use --force if you still need to run mysql_upgrade
Force should run it regardless of whether it has been run before
Phase 1/7: Checking and upgrading mysql database
Processing databases
@@ -57,12 +57,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -80,9 +80,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -106,12 +106,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -129,9 +129,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -160,12 +160,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -183,9 +183,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -215,12 +215,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -238,9 +238,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -273,12 +273,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -296,9 +296,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -316,7 +316,7 @@ Grants for user3@%
GRANT USAGE ON *.* TO 'user3'@'%'
GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%'
DROP USER 'user3'@'%';
-End of 5.1 tests
+# End of 5.1 tests
The --upgrade-system-tables option was used, user tables won't be touched.
Phase 1/7: Checking and upgrading mysql database
Processing databases
@@ -326,12 +326,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -349,7 +349,6 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views... Skipped
Phase 4/7: Running 'mysql_fix_privilege_tables'
@@ -371,12 +370,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -394,9 +393,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -437,12 +436,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -460,9 +459,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -506,7 +505,7 @@ length(table_name)
79
79
drop table extralongname_extralongname_extralongname_extralongname_ext;
-End of 10.0 tests
+# End of 10.0 tests
set sql_mode=default;
# Droping the previously created mysql_upgrade_info file..
create table test.t1(a int) engine=MyISAM;
@@ -520,12 +519,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -543,9 +542,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
@@ -570,18 +569,19 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE test.t1;
SET GLOBAL enforce_storage_engine=NULL;
-End of 10.1 tests
-Start of 10.3 tests
+# End of 10.1 tests
+# End of 10.2 tests
#
# Ensure that mysql_upgrade correctly sets truncate_versioning_priv
# on upgrade from 10.2
#
-flush privileges;
+drop view mysql.user_bak;
CREATE USER 'user3'@'%';
GRANT USAGE ON *.* TO 'user3'@'%';
GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%';
alter table mysql.user drop column Delete_history_priv;
alter table mysql.db drop column Delete_history_priv;
+# restart
Run mysql_upgrade with all privileges on a user
flush privileges;
SHOW GRANTS FOR 'user3'@'%';
@@ -590,3 +590,44 @@ GRANT USAGE ON *.* TO 'user3'@'%'
GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%'
DROP USER 'user3'@'%';
update mysql.db set Delete_history_priv='Y' where db like 'test%';
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+# End of 10.3 tests
+drop view mysql.user_bak;
+create user 'user3'@'localhost' identified with mysql_native_password as password('a_password');
+show create user user3@localhost;
+CREATE USER for user3@localhost
+CREATE USER 'user3'@'localhost' IDENTIFIED BY PASSWORD '*5DC1D11F45824A9DD613961F05C1EC1E7A1601AA'
+update mysql.user set password=authentication_string, authentication_string='' where user='user3';
+select password,plugin,authentication_string from mysql.user where user='user3';
+password plugin authentication_string
+*5DC1D11F45824A9DD613961F05C1EC1E7A1601AA mysql_native_password
+flush privileges;
+show create user user3@localhost;
+CREATE USER for user3@localhost
+CREATE USER 'user3'@'localhost' IDENTIFIED BY PASSWORD '*5DC1D11F45824A9DD613961F05C1EC1E7A1601AA' PASSWORD EXPIRE NEVER
+connect con1,localhost,user3,a_password;
+select current_user();
+current_user()
+user3@localhost
+disconnect con1;
+connection default;
+# mysql_upgrade --force --silent 2>&1
+show create user user3@localhost;
+CREATE USER for user3@localhost
+CREATE USER 'user3'@'localhost' IDENTIFIED BY PASSWORD '*5DC1D11F45824A9DD613961F05C1EC1E7A1601AA'
+connect con1,localhost,user3,a_password;
+select current_user();
+current_user()
+user3@localhost
+disconnect con1;
+connection default;
+drop user user3@localhost;
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+drop view mysql.user_bak;
+drop table mysql.innodb_index_stats, mysql.innodb_table_stats;
+# mysql_upgrade --force --silent 2>&1
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+# End of 10.4 tests
diff --git a/mysql-test/main/mysql_upgrade.test b/mysql-test/main/mysql_upgrade.test
index 0171fe6c7ba..3dfe38701aa 100644
--- a/mysql-test/main/mysql_upgrade.test
+++ b/mysql-test/main/mysql_upgrade.test
@@ -106,8 +106,7 @@ SHOW GRANTS FOR 'user3'@'%';
DROP USER 'user3'@'%';
---echo End of 5.1 tests
-
+--echo # End of 5.1 tests
#
# Test the --upgrade-system-tables option
@@ -192,7 +191,7 @@ create table extralongname_extralongname_extralongname_extralongname_ext (
select length(table_name) from mysql.innodb_table_stats;
drop table extralongname_extralongname_extralongname_extralongname_ext;
---echo End of 10.0 tests
+--echo # End of 10.0 tests
set sql_mode=default;
@@ -218,16 +217,17 @@ DROP TABLE test.t1;
--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
SET GLOBAL enforce_storage_engine=NULL;
---echo End of 10.1 tests
+--echo # End of 10.1 tests
---echo Start of 10.3 tests
+--echo # End of 10.2 tests
--echo #
--echo # Ensure that mysql_upgrade correctly sets truncate_versioning_priv
--echo # on upgrade from 10.2
--echo #
-flush privileges;
+--source include/switch_to_mysql_user.inc
+drop view mysql.user_bak;
CREATE USER 'user3'@'%';
GRANT USAGE ON *.* TO 'user3'@'%';
GRANT ALL PRIVILEGES ON `roelt`.`test2` TO 'user3'@'%';
@@ -241,3 +241,45 @@ flush privileges;
SHOW GRANTS FOR 'user3'@'%';
DROP USER 'user3'@'%';
update mysql.db set Delete_history_priv='Y' where db like 'test%';
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+
+--echo # End of 10.3 tests
+
+--source include/switch_to_mysql_user.inc
+drop view mysql.user_bak;
+create user 'user3'@'localhost' identified with mysql_native_password as password('a_password');
+show create user user3@localhost;
+update mysql.user set password=authentication_string, authentication_string='' where user='user3';
+select password,plugin,authentication_string from mysql.user where user='user3';
+flush privileges;
+show create user user3@localhost;
+connect con1,localhost,user3,a_password;
+select current_user();
+disconnect con1;
+connection default;
+--echo # mysql_upgrade --force --silent 2>&1
+--exec $MYSQL_UPGRADE --force --silent 2>&1
+--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+show create user user3@localhost;
+connect con1,localhost,user3,a_password;
+select current_user();
+disconnect con1;
+connection default;
+drop user user3@localhost;
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+
+#
+# MDEV-18275 Live upgrade from 5.5 does not work: InnoDB stat tables are used before creation
+#
+--source include/switch_to_mysql_user.inc
+drop view mysql.user_bak;
+drop table mysql.innodb_index_stats, mysql.innodb_table_stats;
+--echo # mysql_upgrade --force --silent 2>&1
+--exec $MYSQL_UPGRADE --force --silent 2>&1
+--remove_file $MYSQLD_DATADIR/mysql_upgrade_info
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/mysql_upgrade_no_innodb.result b/mysql-test/main/mysql_upgrade_no_innodb.result
index 8e051bb7c16..35b55bb45e0 100644
--- a/mysql-test/main/mysql_upgrade_no_innodb.result
+++ b/mysql-test/main/mysql_upgrade_no_innodb.result
@@ -7,12 +7,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
@@ -36,7 +36,6 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
-mysql.user OK
Repairing tables
mysql.innodb_index_stats
diff --git a/mysql-test/main/mysql_upgrade_noengine.result b/mysql-test/main/mysql_upgrade_noengine.result
index 7b3b1610ee0..79ad04e1849 100644
--- a/mysql-test/main/mysql_upgrade_noengine.result
+++ b/mysql-test/main/mysql_upgrade_noengine.result
@@ -1,3 +1,4 @@
+drop view mysql.user_bak;
install soname 'ha_blackhole';
install soname 'ha_archive';
create table t1 (a int) engine=blackhole;
@@ -61,12 +62,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv_bak OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -141,6 +142,9 @@ Warnings:
Level Warning
Code 1286
Message Unknown storage engine 'ARCHIVE'
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+drop view mysql.user_bak;
alter table mysql.user drop column default_role, drop column max_statement_time;
Phase 1/7: Checking and upgrading mysql database
Processing databases
@@ -150,12 +154,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv_bak OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -230,6 +234,9 @@ Warnings:
Level Warning
Code 1286
Message Unknown storage engine 'ARCHIVE'
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+drop view mysql.user_bak;
alter table mysql.user drop column default_role, drop column max_statement_time;
Phase 1/7: Checking and upgrading mysql database
Processing databases
@@ -239,12 +246,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv_bak OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -306,3 +313,5 @@ table_comment
drop table t1, t2;
uninstall plugin blackhole;
uninstall plugin archive;
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
diff --git a/mysql-test/main/mysql_upgrade_noengine.test b/mysql-test/main/mysql_upgrade_noengine.test
index 1d65e7ffa1c..e3c3e718591 100644
--- a/mysql-test/main/mysql_upgrade_noengine.test
+++ b/mysql-test/main/mysql_upgrade_noengine.test
@@ -11,6 +11,9 @@ if (!$HA_ARCHIVE_SO) {
skip Need Archive plugin;
}
+source include/switch_to_mysql_user.inc;
+drop view mysql.user_bak;
+
let $datadir= `select @@datadir`;
install soname 'ha_blackhole';
@@ -33,6 +36,11 @@ exec $MYSQL_UPGRADE 2>&1;
select table_catalog, table_schema, table_name, table_type, engine, row_format, table_rows, data_length, table_comment from information_schema.tables where table_schema='test' and table_name='t1';
select table_catalog, table_schema, table_name, table_type, engine, row_format, table_rows, data_length, table_comment from information_schema.tables where table_schema='test' and table_name='t2';
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+source include/switch_to_mysql_user.inc;
+drop view mysql.user_bak;
+
# pretend it's an upgrade from 10.0
alter table mysql.user drop column default_role, drop column max_statement_time;
@@ -47,6 +55,11 @@ exec $MYSQL_UPGRADE 2>&1;
select table_catalog, table_schema, table_name, table_type, engine, row_format, table_rows, data_length, table_comment from information_schema.tables where table_schema='test' and table_name='t1';
select table_catalog, table_schema, table_name, table_type, engine, row_format, table_rows, data_length, table_comment from information_schema.tables where table_schema='test' and table_name='t2';
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
+source include/switch_to_mysql_user.inc;
+drop view mysql.user_bak;
+
alter table mysql.user drop column default_role, drop column max_statement_time;
remove_file $datadir/mysql_upgrade_info;
@@ -60,3 +73,6 @@ drop table t1, t2;
remove_file $datadir/mysql_upgrade_info;
uninstall plugin blackhole;
uninstall plugin archive;
+
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
diff --git a/mysql-test/main/mysql_upgrade_ssl.result b/mysql-test/main/mysql_upgrade_ssl.result
index 172a1401cdb..ff911e9f54b 100644
--- a/mysql-test/main/mysql_upgrade_ssl.result
+++ b/mysql-test/main/mysql_upgrade_ssl.result
@@ -9,12 +9,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -32,9 +32,9 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
Phase 4/7: Running 'mysql_fix_privilege_tables'
Phase 5/7: Fixing table and database names
Phase 6/7: Checking and upgrading tables
diff --git a/mysql-test/main/mysql_upgrade_view.result b/mysql-test/main/mysql_upgrade_view.result
index 813138b57a8..d22298c6ed0 100644
--- a/mysql-test/main/mysql_upgrade_view.result
+++ b/mysql-test/main/mysql_upgrade_view.result
@@ -71,12 +71,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
@@ -100,7 +100,6 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
-mysql.user OK
Repairing tables
mysql.innodb_index_stats
@@ -114,6 +113,7 @@ Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views
+mysql.user OK
test.v1 OK
test.v1badcheck OK
test.v2 OK
@@ -211,7 +211,7 @@ v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VI
show create view v4;
View Create View character_set_client collation_connection
v4 CREATE ALGORITHM=TEMPTABLE DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v4` AS select `t1`.`a` AS `a` from `t1` latin1 latin1_swedish_ci
-MySQL upgrade detected
+MariaDB upgrade detected
Phase 1/7: Checking and upgrading mysql database
Processing databases
mysql
@@ -221,12 +221,12 @@ mysql.db OK
mysql.ev_bk OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
@@ -250,7 +250,6 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
-mysql.user OK
Repairing tables
mysql.innodb_index_stats
@@ -264,6 +263,7 @@ Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views from mysql
+mysql.user OK
test.v1 OK
test.v2 OK
test.v3 OK
@@ -336,7 +336,7 @@ drop view v1,v2,v3,v4;
rename table mysql.event to mysql.ev_bk;
flush tables;
The --upgrade-system-tables option was used, user tables won't be touched.
-MySQL upgrade detected
+MariaDB upgrade detected
Phase 1/7: Checking and upgrading mysql database
Processing databases
mysql
@@ -346,12 +346,12 @@ mysql.db OK
mysql.ev_bk OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
Error : Unknown storage engine 'InnoDB'
@@ -375,7 +375,6 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
Error : Unknown storage engine 'InnoDB'
error : Corrupt
-mysql.user OK
Repairing tables
mysql.innodb_index_stats
@@ -389,6 +388,7 @@ Error : Unknown storage engine 'InnoDB'
error : Corrupt
Phase 2/7: Installing used storage engines... Skipped
Phase 3/7: Fixing views from mysql
+mysql.user OK
test.v1 OK
test.v2 OK
test.v3 OK
diff --git a/mysql-test/main/mysqlcheck-master.opt b/mysql-test/main/mysqlcheck-master.opt
new file mode 100644
index 00000000000..38190455bbf
--- /dev/null
+++ b/mysql-test/main/mysqlcheck-master.opt
@@ -0,0 +1 @@
+--use-stat-tables=NEVER
diff --git a/mysql-test/main/mysqlcheck.result b/mysql-test/main/mysqlcheck.result
index 7259b68be50..02ab2af9fc1 100644
--- a/mysql-test/main/mysqlcheck.result
+++ b/mysql-test/main/mysqlcheck.result
@@ -9,12 +9,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -32,7 +32,6 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
mtr.global_suppressions Table is already up to date
mtr.test_suppressions Table is already up to date
mysql.column_stats OK
@@ -40,12 +39,12 @@ mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats
note : Table does not support optimize, doing recreate + analyze instead
@@ -69,18 +68,17 @@ mysql.time_zone_transition_type OK
mysql.transaction_registry
note : Table does not support optimize, doing recreate + analyze instead
status : OK
-mysql.user OK
mysql.column_stats OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
+mysql.global_priv OK
mysql.gtid_slave_pos OK
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
-mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -98,18 +96,17 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.transaction_registry OK
-mysql.user OK
mysql.column_stats Table is already up to date
mysql.columns_priv Table is already up to date
mysql.db Table is already up to date
mysql.event Table is already up to date
mysql.func Table is already up to date
+mysql.global_priv Table is already up to date
mysql.gtid_slave_pos Table is already up to date
mysql.help_category Table is already up to date
mysql.help_keyword Table is already up to date
mysql.help_relation Table is already up to date
mysql.help_topic Table is already up to date
-mysql.host Table is already up to date
mysql.index_stats Table is already up to date
mysql.innodb_index_stats
note : Table does not support optimize, doing recreate + analyze instead
@@ -133,7 +130,6 @@ mysql.time_zone_transition_type Table is already up to date
mysql.transaction_registry
note : Table does not support optimize, doing recreate + analyze instead
status : OK
-mysql.user Table is already up to date
create table t1 (a int) engine=myisam;
create view v1 as select * from t1;
test.t1 OK
@@ -433,12 +429,12 @@ mysql.columns_priv Table is already up to date
mysql.db Table is already up to date
mysql.event Table is already up to date
mysql.func Table is already up to date
+mysql.global_priv Table is already up to date
mysql.gtid_slave_pos Table is already up to date
mysql.help_category Table is already up to date
mysql.help_keyword Table is already up to date
mysql.help_relation Table is already up to date
mysql.help_topic Table is already up to date
-mysql.host Table is already up to date
mysql.index_stats Table is already up to date
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
@@ -456,7 +452,6 @@ mysql.time_zone_name Table is already up to date
mysql.time_zone_transition Table is already up to date
mysql.time_zone_transition_type Table is already up to date
mysql.transaction_registry OK
-mysql.user Table is already up to date
mysqltest1.t1
warning : Table is marked as crashed
warning : Size of datafile is: 4 Should be: 0
@@ -473,11 +468,11 @@ drop database mysqltest1;
#
create table t1(a int);
insert into t1 (a) values (1), (2), (3);
-select * from mysql.column_stats;
-db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+select db_name,table_name,column_name,min_value,max_value,nulls_ratio,avg_length,avg_frequency,hist_size,hist_type from mysql.column_stats order by db_name,table_name;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type
test.t1 Engine-independent statistics collected
status : OK
-select * from mysql.column_stats where db_name = 'test' and table_name = 't1';
-db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
-test t1 a 1 3 0.0000 4.0000 1.0000 0 NULL NULL
+select db_name,table_name,column_name,min_value,max_value,nulls_ratio,avg_length,avg_frequency,hist_size,hist_type from mysql.column_stats where db_name = 'test' and table_name = 't1' order by db_name,table_name;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type
+test t1 a 1 3 0.0000 4.0000 1.0000 254 DOUBLE_PREC_HB
drop table t1;
diff --git a/mysql-test/main/mysqlcheck.test b/mysql-test/main/mysqlcheck.test
index 779ea8d13d4..d1d57023b5a 100644
--- a/mysql-test/main/mysqlcheck.test
+++ b/mysql-test/main/mysqlcheck.test
@@ -403,7 +403,7 @@ drop database mysqltest1;
--echo #
create table t1(a int);
insert into t1 (a) values (1), (2), (3);
-select * from mysql.column_stats;
+select db_name,table_name,column_name,min_value,max_value,nulls_ratio,avg_length,avg_frequency,hist_size,hist_type from mysql.column_stats order by db_name,table_name;
--exec $MYSQL_CHECK --analyze test t1 --persistent
-select * from mysql.column_stats where db_name = 'test' and table_name = 't1';
+select db_name,table_name,column_name,min_value,max_value,nulls_ratio,avg_length,avg_frequency,hist_size,hist_type from mysql.column_stats where db_name = 'test' and table_name = 't1' order by db_name,table_name;
drop table t1;
diff --git a/mysql-test/main/mysqld--help,win.rdiff b/mysql-test/main/mysqld--help,win.rdiff
index 89d2cd36112..6aba3acab28 100644
--- a/mysql-test/main/mysqld--help,win.rdiff
+++ b/mysql-test/main/mysqld--help,win.rdiff
@@ -1,6 +1,6 @@
---- a/mysql-test/r/mysqld--help.result
-+++ b/mysql-test/r/mysqld--help.result
-@@ -381,7 +381,6 @@ The following options may be given as the first argument:
+--- mysqld--help.result 2018-08-12 00:10:13.694793500 +0100
++++ mysqld--help,win.reject 2018-08-16 20:31:08.353317200 +0100
+@@ -386,7 +386,6 @@
The number of segments in a key cache
-L, --language=name Client error messages in given language. May be given as
a full path. Deprecated. Use --lc-messages-dir instead.
@@ -8,7 +8,7 @@
--lc-messages=name Set the language used for the error messages.
-L, --lc-messages-dir=name
Directory where error messages are
-@@ -603,6 +602,7 @@ The following options may be given as the first argument:
+@@ -608,6 +607,7 @@
Use MySQL-5.6 (instead of MariaDB-5.3) format for TIME,
DATETIME, TIMESTAMP columns.
(Defaults to on; use --skip-mysql56-temporal-format to disable.)
@@ -16,17 +16,7 @@
--net-buffer-length=#
Buffer length for TCP/IP and socket communication
--net-read-timeout=#
-@@ -1048,6 +1048,9 @@ The following options may be given as the first argument:
- characteristics (isolation level, read only/read
- write,snapshot - but not any work done / data modified
- within the transaction).
-+ --shared-memory Enable the shared memory
-+ --shared-memory-base-name=name
-+ Base name of shared memory
- --show-slave-auth-info
- Show user and password in SHOW SLAVE HOSTS on this
- master.
-@@ -1171,6 +1174,10 @@ The following options may be given as the first argument:
+@@ -1188,6 +1188,10 @@
Log slow queries to given log file. Defaults logging to
'hostname'-slow.log. Must be enabled to activate other
slow log options
@@ -37,7 +27,7 @@
--socket=name Socket file to use for connection
--sort-buffer-size=#
Each thread that needs to do a sort allocates a buffer of
-@@ -1190,6 +1197,7 @@ The following options may be given as the first argument:
+@@ -1207,6 +1211,7 @@
EMPTY_STRING_IS_NULL, SIMULTANEOUS_ASSIGNMENT
--stack-trace Print a symbolic stack trace on failure
(Defaults to on; use --skip-stack-trace to disable.)
@@ -45,7 +35,7 @@
--standard-compliant-cte
Allow only CTEs compliant to SQL standard
(Defaults to on; use --skip-standard-compliant-cte to disable.)
-@@ -1257,6 +1265,11 @@ The following options may be given as the first argument:
+@@ -1277,6 +1282,11 @@
--thread-pool-max-threads=#
Maximum allowed number of worker threads in the thread
pool
@@ -57,7 +47,7 @@
--thread-pool-oversubscribe=#
How many additional active worker threads in a group are
allowed.
-@@ -1295,8 +1308,8 @@ The following options may be given as the first argument:
+@@ -1315,8 +1325,8 @@
automatically convert it to an on-disk MyISAM or Aria
table.
-t, --tmpdir=name Path for temporary files. Several paths may be specified,
@@ -68,7 +58,7 @@
--transaction-alloc-block-size=#
Allocation block size for transactions to be stored in
binary log
-@@ -1430,7 +1443,6 @@ key-cache-block-size 1024
+@@ -1451,7 +1461,6 @@
key-cache-division-limit 100
key-cache-file-hash-size 512
key-cache-segments 0
@@ -76,7 +66,7 @@
lc-messages en_US
lc-messages-dir MYSQL_SHAREDIR/
lc-time-names en_US
-@@ -1502,6 +1514,7 @@ myisam-sort-buffer-size 134216704
+@@ -1523,6 +1532,7 @@
myisam-stats-method NULLS_UNEQUAL
myisam-use-mmap FALSE
mysql56-temporal-format TRUE
@@ -84,16 +74,7 @@
net-buffer-length 16384
net-read-timeout 30
net-retry-count 10
-@@ -1612,6 +1625,8 @@ session-track-schema TRUE
- session-track-state-change FALSE
- session-track-system-variables autocommit,character_set_client,character_set_connection,character_set_results,time_zone
- session-track-transaction-info OFF
-+shared-memory FALSE
-+shared-memory-base-name MYSQL
- show-slave-auth-info FALSE
- silent-startup FALSE
- skip-grant-tables TRUE
-@@ -1638,6 +1653,7 @@ slave-transaction-retry-interval 0
+@@ -1660,6 +1670,7 @@
slave-type-conversions
slow-launch-time 2
slow-query-log FALSE
@@ -101,7 +82,7 @@
sort-buffer-size 2097152
sql-mode STRICT_TRANS_TABLES,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
stack-trace TRUE
-@@ -1652,9 +1668,9 @@ sync-relay-log 10000
+@@ -1674,9 +1685,9 @@
sync-relay-log-info 10000
sysdate-is-now FALSE
system-versioning-alter-history ERROR
@@ -113,7 +94,7 @@
table-open-cache-instances 8
tc-heuristic-recover OFF
tcp-keepalive-interval 0
-@@ -1663,6 +1679,8 @@ tcp-keepalive-time 0
+@@ -1686,6 +1697,8 @@
thread-cache-size 151
thread-pool-idle-timeout 60
thread-pool-max-threads 65536
diff --git a/mysql-test/main/mysqld--help.result b/mysql-test/main/mysqld--help.result
index 1c7e9cd839d..150e2af1fbc 100644
--- a/mysql-test/main/mysqld--help.result
+++ b/mysql-test/main/mysqld--help.result
@@ -15,6 +15,10 @@ The following specify which files/extra groups are read (specified before remain
--alter-algorithm[=name]
Specify the alter table algorithm. One of: DEFAULT, COPY,
INPLACE, NOCOPY, INSTANT
+ --analyze-sample-percentage=#
+ Percentage of rows from the table ANALYZE TABLE will
+ sample to collect table statistics. Set to 0 to let
+ MariaDB decide what percentage of rows to sample.
-a, --ansi Use ANSI SQL syntax instead of MySQL syntax. This mode
will also set transaction isolation level 'serializable'.
--auto-increment-increment[=#]
@@ -184,6 +188,12 @@ The following specify which files/extra groups are read (specified before remain
--deadlock-timeout-short=#
Short timeout for the two-step deadlock detection (in
microseconds)
+ --default-password-lifetime=#
+ This defines the global password expiration policy. 0
+ means automatic password expiration is disabled. If the
+ value is a positive integer N, the passwords must be
+ changed every N days. This behavior can be overriden
+ using the password expiration options in ALTER USER.
--default-regex-flags=name
Default flags for the regex library. Any combination of:
DOTALL, DUPNAMES, EXTENDED, EXTRA, MULTILINE, UNGREEDY
@@ -220,6 +230,11 @@ The following specify which files/extra groups are read (specified before remain
handling INSERT DELAYED. If the queue becomes full, any
client that does INSERT DELAYED will wait until there is
room in the queue again
+ --disconnect-on-expired-password
+ This variable controls how the server handles clients
+ that are not aware of the sandbox mode. If enabled, the
+ server disconnects the client, otherwise the server puts
+ the client in a sandbox mode.
--div-precision-increment=#
Precision of the result of '/' operator will be increased
on that value
@@ -294,6 +309,15 @@ The following specify which files/extra groups are read (specified before remain
--group-concat-max-len=#
The maximum length of the result of function
GROUP_CONCAT()
+ --gtid-cleanup-batch-size=#
+ Normally does not need tuning. How many old rows must
+ accumulate in the mysql.gtid_slave_pos table before a
+ background job will be run to delete them. Can be
+ increased to reduce number of commits if using many
+ different engines with --gtid_pos_auto_engines, or to
+ reduce CPU overhead if using a huge number of different
+ gtid_domain_ids. Can be decreased to reduce number of old
+ rows in the table.
--gtid-domain-id=# Used with global transaction ID to identify logically
independent replication streams. When events can
propagate through multiple parallel paths (for example
@@ -537,6 +561,10 @@ The following specify which files/extra groups are read (specified before remain
The maximum BLOB length to send to server from
mysql_send_long_data API. Deprecated option; use
max_allowed_packet instead.
+ --max-password-errors=#
+ If there is more than this number of failed connect
+ attempts due to invalid password, user will be blocked
+ from further connections until FLUSH_PRIVILEGES.
--max-prepared-stmt-count=#
Maximum number of prepared statements in the server
--max-recursive-iterations[=#]
@@ -546,6 +574,8 @@ The following specify which files/extra groups are read (specified before remain
relay log will be rotated automatically when the size
exceeds this value. If 0 at startup, it's set to
max_binlog_size
+ --max-rowid-filter-size=#
+ The maximum size of the container of a rowid filter
--max-seeks-for-key=#
Limit assumed max number of seeks when looking up rows
based on a key
@@ -678,7 +708,15 @@ The following specify which files/extra groups are read (specified before remain
join_cache_hashed, join_cache_bka,
optimize_join_buffer_size, table_elimination,
extended_keys, exists_to_in, orderby_uses_equalities,
- condition_pushdown_for_derived, split_materialized
+ condition_pushdown_for_derived, split_materialized,
+ condition_pushdown_for_subquery, rowid_filter,
+ condition_pushdown_from_having
+ --optimizer-trace=name
+ Controls tracing of the Optimizer:
+ optimizer_trace=option=val[,option=val...], where option
+ is one of {enabled} and val is one of {on, off, default}
+ --optimizer-trace-max-mem-size=#
+ Maximum allowed size of an optimizer trace
--optimizer-use-condition-selectivity=#
Controls selectivity of which conditions the optimizer
takes into account to calculate cardinality of a partial
@@ -1169,8 +1207,10 @@ The following specify which files/extra groups are read (specified before remain
--slave-transaction-retry-errors=name
Tells the slave thread to retry transaction for
replication when a query event returns an error from the
- provided list. Deadlock and elapsed lock wait timeout
- errors are automatically added to this list
+ provided list. Deadlock error, elapsed lock wait timeout,
+ net read error, net read timeout, net write error, net
+ write timeout, connect error and 2 types of lost
+ connection error are automatically added to this list
--slave-transaction-retry-interval=#
Interval of the slave SQL thread will retry a transaction
in case it failed with a deadlock or elapsed lock wait
@@ -1208,7 +1248,8 @@ The following specify which files/extra groups are read (specified before remain
ERROR_FOR_DIVISION_BY_ZERO, TRADITIONAL,
NO_AUTO_CREATE_USER, HIGH_NOT_PRECEDENCE,
NO_ENGINE_SUBSTITUTION, PAD_CHAR_TO_FULL_LENGTH,
- EMPTY_STRING_IS_NULL, SIMULTANEOUS_ASSIGNMENT
+ EMPTY_STRING_IS_NULL, SIMULTANEOUS_ASSIGNMENT,
+ TIME_ROUND_FRACTIONAL
--sql-safe-updates If set to 1, UPDATEs and DELETEs need either a key in the
WHERE clause, or a LIMIT clause, or else they will
aborted. Prevents the common mistake of accidentally
@@ -1273,6 +1314,9 @@ The following specify which files/extra groups are read (specified before remain
first TCP keep-alive packet is sent.If set to 0, system
dependent default is used. (Automatically configured
unless set explicitly)
+ --tcp-nodelay Set option TCP_NODELAY (disable Nagle's algorithm) on
+ socket
+ (Defaults to on; use --skip-tcp-nodelay to disable.)
--thread-cache-size=#
How many threads we should keep in a cache for reuse.
These are freed after 5 minutes of idle time
@@ -1344,7 +1388,8 @@ The following specify which files/extra groups are read (specified before remain
(usually get from GUI tools)
--use-stat-tables=name
Specifies how to use system statistics tables. One of:
- NEVER, COMPLEMENTARY, PREFERABLY
+ NEVER, COMPLEMENTARY, PREFERABLY,
+ COMPLEMENTARY_FOR_QUERIES, PREFERABLY_FOR_QUERIES
-u, --user=name Run mysqld daemon as user.
--userstat Enables statistics gathering for USER_STATISTICS,
CLIENT_STATISTICS, INDEX_STATISTICS and TABLE_STATISTICS
@@ -1358,6 +1403,7 @@ The following specify which files/extra groups are read (specified before remain
Variables (--variable-name=value)
allow-suspicious-udfs FALSE
alter-algorithm DEFAULT
+analyze-sample-percentage 100
auto-increment-increment 1
auto-increment-offset 1
autocommit TRUE
@@ -1395,6 +1441,7 @@ deadlock-search-depth-long 15
deadlock-search-depth-short 4
deadlock-timeout-long 50000000
deadlock-timeout-short 10000
+default-password-lifetime 0
default-regex-flags
default-storage-engine myisam
default-time-zone (No default value)
@@ -1404,12 +1451,13 @@ delay-key-write ON
delayed-insert-limit 100
delayed-insert-timeout 300
delayed-queue-size 1000
+disconnect-on-expired-password FALSE
div-precision-increment 4
encrypt-binlog FALSE
encrypt-tmp-disk-tables FALSE
encrypt-tmp-files FALSE
enforce-storage-engine (No default value)
-eq-range-index-dive-limit 0
+eq-range-index-dive-limit 200
event-scheduler OFF
expensive-subquery-limit 100
expire-logs-days 0
@@ -1429,13 +1477,14 @@ gdb FALSE
general-log FALSE
getopt-prefix-matching FALSE
group-concat-max-len 1048576
+gtid-cleanup-batch-size 64
gtid-domain-id 0
gtid-ignore-duplicates FALSE
gtid-pos-auto-engines
gtid-strict-mode FALSE
help TRUE
-histogram-size 0
-histogram-type SINGLE_PREC_HB
+histogram-size 254
+histogram-type DOUBLE_PREC_HB
host-cache-size 279
idle-readonly-transaction-timeout 0
idle-transaction-timeout 0
@@ -1502,9 +1551,11 @@ max-heap-table-size 16777216
max-join-size 18446744073709551615
max-length-for-sort-data 1024
max-long-data-size 16777216
+max-password-errors 18446744073709551615
max-prepared-stmt-count 16382
max-recursive-iterations 18446744073709551615
max-relay-log-size 1073741824
+max-rowid-filter-size 131072
max-seeks-for-key 18446744073709551615
max-session-mem-used 9223372036854775807
max-sort-length 1024
@@ -1541,8 +1592,10 @@ old-style-user-limits FALSE
optimizer-prune-level 1
optimizer-search-depth 62
optimizer-selectivity-sampling-limit 100
-optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
-optimizer-use-condition-selectivity 1
+optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
+optimizer-trace
+optimizer-trace-max-mem-size 1048576
+optimizer-use-condition-selectivity 4
performance-schema FALSE
performance-schema-accounts-size -1
performance-schema-consumer-events-stages-current FALSE
@@ -1579,7 +1632,7 @@ performance-schema-max-rwlock-instances -1
performance-schema-max-socket-classes 10
performance-schema-max-socket-instances -1
performance-schema-max-stage-classes 160
-performance-schema-max-statement-classes 200
+performance-schema-max-statement-classes 202
performance-schema-max-table-handles -1
performance-schema-max-table-instances -1
performance-schema-max-thread-classes 50
@@ -1661,7 +1714,7 @@ slave-run-triggers-for-rbr NO
slave-skip-errors OFF
slave-sql-verify-checksum TRUE
slave-transaction-retries 10
-slave-transaction-retry-errors 1213,1205
+slave-transaction-retry-errors 1158,1159,1160,1161,1205,1213,1429,2013,12701
slave-transaction-retry-interval 0
slave-type-conversions
slow-launch-time 2
@@ -1689,6 +1742,7 @@ tc-heuristic-recover OFF
tcp-keepalive-interval 0
tcp-keepalive-probes 0
tcp-keepalive-time 0
+tcp-nodelay TRUE
thread-cache-size 151
thread-pool-idle-timeout 60
thread-pool-max-threads 65536
@@ -1707,7 +1761,7 @@ transaction-isolation REPEATABLE-READ
transaction-prealloc-size 4096
transaction-read-only FALSE
updatable-views-with-limit YES
-use-stat-tables NEVER
+use-stat-tables PREFERABLY_FOR_QUERIES
userstat FALSE
verbose TRUE
wait-timeout 28800
diff --git a/mysql-test/main/mysqldump.result b/mysql-test/main/mysqldump.result
index 556245df9cd..41a719c6a94 100644
--- a/mysql-test/main/mysqldump.result
+++ b/mysql-test/main/mysqldump.result
@@ -2750,6 +2750,8 @@ CREATE PROCEDURE bug9056_proc2(OUT a INT)
BEGIN
select sum(id) from t1 into a;
END //
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set sql_mode='ansi';
create procedure `a'b` () select 1;
set sql_mode='';
@@ -5161,16 +5163,16 @@ USE BUG52792;
SET NAMES utf8;
CREATE FUNCTION `straße` ( c1 CHAR(20))
RETURNS CHAR(50) DETERMINISTIC
-RETURN CONCAT(']]>, ', s, '!');
+RETURN CONCAT(']]>, ', c1, '!');
<?xml version="1.0"?>
<mysqldump xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<database name="BUG52792">
<routines>
<routine Function="straße" sql_mode="" character_set_client="utf8" collation_connection="utf8_general_ci" Database_Collation="latin1_swedish_ci">
<![CDATA[
-CREATE DEFINER=`root`@`localhost` FUNCTION `straße`( c1 CHAR(20)) RETURNS char(50) CHARSET latin1
+CREATE DEFINER=`root`@`localhost` FUNCTION `straße`(c1 CHAR(20)) RETURNS char(50) CHARSET latin1
DETERMINISTIC
-RETURN CONCAT(']]]]><![CDATA[>, ', s, '!')
+RETURN CONCAT(']]]]><![CDATA[>, ', c1, '!')
]]>
</routine>
</routines>
diff --git a/mysql-test/main/mysqldump.test b/mysql-test/main/mysqldump.test
index dbd32f3e74a..e2d9cc74d32 100644
--- a/mysql-test/main/mysqldump.test
+++ b/mysql-test/main/mysqldump.test
@@ -707,7 +707,7 @@ drop table t1;
--echo # Bug#15328 Segmentation fault occurred if my.cnf is invalid for escape sequence
--echo #
---exec $MYSQL_MY_PRINT_DEFAULTS --config-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump
+--exec $MYSQL_MY_PRINT_DEFAULTS --defaults-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump
--echo #
--echo # Bug#19025 mysqldump doesn't correctly dump "auto_increment = [int]"
@@ -2362,7 +2362,7 @@ USE BUG52792;
SET NAMES utf8;
CREATE FUNCTION `straße` ( c1 CHAR(20))
RETURNS CHAR(50) DETERMINISTIC
-RETURN CONCAT(']]>, ', s, '!');
+RETURN CONCAT(']]>, ', c1, '!');
--exec $MYSQL_DUMP --character-sets-dir=$CHARSETSDIR --skip-comments --default-character-set=utf8 --compatible=mysql323 -R -X BUG52792
diff --git a/mysql-test/main/negation_elimination.result b/mysql-test/main/negation_elimination.result
index 1b08baee60a..6e40074c317 100644
--- a/mysql-test/main/negation_elimination.result
+++ b/mysql-test/main/negation_elimination.result
@@ -4,7 +4,7 @@ insert into t1 values (NULL), (0), (1), (2), (3), (4), (5), (6), (7), (8), (9),
(10), (11), (12), (13), (14), (15), (16), (17), (18), (19);
explain select * from t1 where not(not(a));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 21 Using where; Using index
select * from t1 where not(not(a));
a
1
@@ -28,7 +28,7 @@ a
19
explain select * from t1 where not(not(not(a > 10)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where not(not(not(a > 10)));
a
0
@@ -44,7 +44,7 @@ a
10
explain select * from t1 where not(not(not(a < 5) and not(a > 10)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 5 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 6 Using where; Using index
select * from t1 where not(not(not(a < 5) and not(a > 10)));
a
5
@@ -55,7 +55,7 @@ a
10
explain select * from t1 where not(a = 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 19 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 21 Using where; Using index
select * from t1 where not(a = 10);
a
0
@@ -85,7 +85,7 @@ a
1
explain select * from t1 where not(a < 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
select * from t1 where not(a < 10);
a
10
@@ -100,7 +100,7 @@ a
19
explain select * from t1 where not(a >= 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 9 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
select * from t1 where not(a >= 10);
a
0
@@ -115,7 +115,7 @@ a
9
explain select * from t1 where not(a > 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where not(a > 10);
a
0
@@ -131,7 +131,7 @@ a
10
explain select * from t1 where not(a <= 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 9 Using where; Using index
select * from t1 where not(a <= 10);
a
11
@@ -145,7 +145,7 @@ a
19
explain select * from t1 where not(a is null);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 20 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 21 Using where; Using index
select * from t1 where not(a is null);
a
0
@@ -176,7 +176,7 @@ a
NULL
explain select * from t1 where not(a < 5 or a > 15);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where not(a < 5 or a > 15);
a
5
@@ -192,7 +192,7 @@ a
15
explain select * from t1 where not(a < 15 and a > 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 12 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where not(a < 15 and a > 5);
a
0
@@ -208,7 +208,7 @@ a
19
explain select * from t1 where a = 2 or not(a < 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 12 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where a = 2 or not(a < 10);
a
2
@@ -224,7 +224,7 @@ a
19
explain select * from t1 where a > 5 and not(a > 10);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 5 Using where; Using index
select * from t1 where a > 5 and not(a > 10);
a
6
@@ -255,7 +255,7 @@ a
19
explain select * from t1 where a = 2 or not(a < 5 or a > 15);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 12 Using where; Using index
select * from t1 where a = 2 or not(a < 5 or a > 15);
a
2
@@ -272,7 +272,7 @@ a
15
explain select * from t1 where a = 7 or not(a < 15 and a > 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 13 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 21 Using where; Using index
select * from t1 where a = 7 or not(a < 15 and a > 5);
a
0
@@ -289,7 +289,7 @@ a
19
explain select * from t1 where NULL or not(a < 15 and a > 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 12 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where NULL or not(a < 15 and a > 5);
a
0
@@ -327,7 +327,7 @@ a
0
explain select * from t1 where not((a < 5 or a < 10) and (not(a > 16) or a > 17));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
select * from t1 where not((a < 5 or a < 10) and (not(a > 16) or a > 17));
a
10
@@ -362,7 +362,7 @@ a
19
explain select * from t1 where ((a between 5 and 15) and (not(a like 10)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 11 Using where; Using index
select * from t1 where ((a between 5 and 15) and (not(a like 10)));
a
5
@@ -500,7 +500,7 @@ NULL NULL
3 1
explain extended select a, not(not(a)), not(a <= 2 and not(a)), not(a not like "1"), not (a not in (1,2)), not(a != 2) from t1 where not(not(a)) having not(not(a));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range a a 5 NULL 4 100.00 Using where; Using index
+1 SIMPLE t1 index a a 5 NULL 5 80.00 Using where; Using index
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`a` <> 0 AS `not(not(a))`,`test`.`t1`.`a` > 2 or `test`.`t1`.`a` <> 0 AS `not(a <= 2 and not(a))`,`test`.`t1`.`a` like '1' AS `not(a not like "1")`,`test`.`t1`.`a` in (1,2) AS `not (a not in (1,2))`,`test`.`t1`.`a` = 2 AS `not(a != 2)` from `test`.`t1` where `test`.`t1`.`a` <> 0 having `test`.`t1`.`a` <> 0
drop table t1;
diff --git a/mysql-test/main/not_embedded_server.result b/mysql-test/main/not_embedded_server.result
index ba68ca8619b..bc794ce48c8 100644
--- a/mysql-test/main/not_embedded_server.result
+++ b/mysql-test/main/not_embedded_server.result
@@ -1,4 +1,4 @@
-call mtr.add_suppression("Can't open and lock privilege tables: Table 'user' was not locked with LOCK TABLES");
+call mtr.add_suppression("Can't open and lock privilege tables: Table 'db' was not locked with LOCK TABLES");
SHOW VARIABLES like 'slave_skip_errors';
Variable_name Value
slave_skip_errors OFF
@@ -16,7 +16,7 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (c1 INT);
LOCK TABLES t1 READ;
FLUSH PRIVILEGES;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
UNLOCK TABLES;
DROP TABLE t1;
#
diff --git a/mysql-test/main/not_embedded_server.test b/mysql-test/main/not_embedded_server.test
index b2cbdba6850..20f75db25d5 100644
--- a/mysql-test/main/not_embedded_server.test
+++ b/mysql-test/main/not_embedded_server.test
@@ -6,7 +6,7 @@
# End of 4.1 tests
-call mtr.add_suppression("Can't open and lock privilege tables: Table 'user' was not locked with LOCK TABLES");
+call mtr.add_suppression("Can't open and lock privilege tables: Table 'db' was not locked with LOCK TABLES");
#
# Bug#43835: SHOW VARIABLES does not include 0 for slave_skip_errors
diff --git a/mysql-test/main/null.result b/mysql-test/main/null.result
index 2fa89a2d001..fc29f68baf0 100644
--- a/mysql-test/main/null.result
+++ b/mysql-test/main/null.result
@@ -187,7 +187,7 @@ Warnings:
Warning 1265 Data truncated for column 'i' at row 513
explain select * from t1 where i=2 or i is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref i i 4 const 7 Using index
+1 SIMPLE t1 ref i i 4 const 8 Using index
select count(*) from t1 where i=2 or i is null;
count(*)
9
@@ -515,9 +515,9 @@ NULLIF(TIMESTAMP'2001-01-01 00:00:00',1e0),
NULLIF(TIMESTAMP'2001-01-01 00:00:00','2001-01-01'),
NULLIF(TIMESTAMP'2001-01-01 00:00:00',TIME'00:00:00');
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '1.0'
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1.0'
+Warning 1292 Truncated incorrect datetime value: '1'
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -536,9 +536,9 @@ NULLIF(DATE'2001-01-01',1e0),
NULLIF(DATE'2001-01-01','2001-01-01'),
NULLIF(DATE'2001-01-01',TIME'00:00:00');
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '1.0'
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1.0'
+Warning 1292 Truncated incorrect datetime value: '1'
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1640,8 +1640,8 @@ CASE WHEN TIMESTAMP'2001-01-01 00:00:00'=1 THEN NULL
ELSE TIMESTAMP'2001-01-01 00:00:00'
END AS b;
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
DROP TABLE t1;
#
# MDEV-8785 Wrong results for EXPLAIN EXTENDED...WHERE NULLIF(latin1_col, _utf8'a' COLLATE utf8_bin) IS NOT NULL
diff --git a/mysql-test/main/null_key.result b/mysql-test/main/null_key.result
index 4b4a4b80dee..8eca97d5e70 100644
--- a/mysql-test/main/null_key.result
+++ b/mysql-test/main/null_key.result
@@ -18,9 +18,9 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select * from t1 where a<=>b limit 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL a 9 NULL 12 Using where; Using index
-explain select * from t1 where (a is null or a > 0 and a < 3) and b < 5 limit 3;
+explain select * from t1 where (a is null or a > 0 and a < 2) and b < 5 limit 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a,b a 9 NULL 3 Using where; Using index
+1 SIMPLE t1 range a,b a 9 NULL 2 Using where; Using index
explain select * from t1 where (a is null or a = 7) and b=7;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref_or_null a,b a 9 const,const 2 Using where; Using index
@@ -257,10 +257,11 @@ uniq_id int(10) unsigned default NULL,
PRIMARY KEY (id)
) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
+INSERT INTO t1 VALUES (11,5),(12,6),(13,7),(14,8),(15,9);
INSERT INTO t2 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
explain select id from t1 where uniq_id is null;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1 idx1 5 const 5 Using index condition
+1 SIMPLE t1 ref idx1 idx1 5 const 6 Using index condition
explain select id from t1 where uniq_id =1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const idx1 idx1 5 const 1
@@ -290,6 +291,11 @@ id uniq_id
4 2
7 3
8 4
+11 5
+12 6
+13 7
+14 8
+15 9
SELECT * FROM t2 ORDER BY uniq_id, id;
id uniq_id
3 1
@@ -399,8 +405,11 @@ INSERT INTO t3 SELECT * FROM t4;
INSERT INTO t3 VALUES (2), (3);
ANALYZE table t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SELECT COUNT(*) FROM t3;
COUNT(*)
diff --git a/mysql-test/main/null_key.test b/mysql-test/main/null_key.test
index c3ebc6f5b94..a5781cc8524 100644
--- a/mysql-test/main/null_key.test
+++ b/mysql-test/main/null_key.test
@@ -12,7 +12,7 @@ explain select * from t1 where a is null and b = 2;
explain select * from t1 where a is null and b = 7;
explain select * from t1 where a=2 and b = 2;
explain select * from t1 where a<=>b limit 2;
-explain select * from t1 where (a is null or a > 0 and a < 3) and b < 5 limit 3;
+explain select * from t1 where (a is null or a > 0 and a < 2) and b < 5 limit 3;
explain select * from t1 where (a is null or a = 7) and b=7;
explain select * from t1 where (a is null or a = 7) and b=7 order by a;
explain select * from t1 where (a is null and b>a) or a is null and b=7 limit 2;
@@ -103,8 +103,8 @@ CREATE TABLE t2 (
) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
+INSERT INTO t1 VALUES (11,5),(12,6),(13,7),(14,8),(15,9);
INSERT INTO t2 VALUES (1,NULL),(2,NULL),(3,1),(4,2),(5,NULL),(6,NULL),(7,3),(8,4),(9,NULL),(10,NULL);
-
#
# Check IS NULL optimization
#
diff --git a/mysql-test/main/olap.result b/mysql-test/main/olap.result
index b7681179be0..93eda747d83 100644
--- a/mysql-test/main/olap.result
+++ b/mysql-test/main/olap.result
@@ -834,15 +834,15 @@ a
1
1
Warnings:
+Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Incorrect datetime value: ''
-Warning 1292 Incorrect datetime value: ''
-Warning 1292 Incorrect datetime value: ''
-Warning 1292 Incorrect datetime value: ''
-Warning 1292 Incorrect datetime value: ''
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
Warning 1292 Incorrect datetime value: ''
DROP TABLE t1;
#
diff --git a/mysql-test/main/old-mode.result b/mysql-test/main/old-mode.result
index 73ad613048a..e0a3412bbdf 100644
--- a/mysql-test/main/old-mode.result
+++ b/mysql-test/main/old-mode.result
@@ -180,3 +180,44 @@ a unix_timestamp(a)
2010-10-31 02:25:26 1288481126
drop table t1, t2;
set time_zone=DEFAULT;
+#
+# MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+#
+SET global mysql56_temporal_format=false;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP(0));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Moscow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526+3599) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT a, COALESCE(a), UNIX_TIMESTAMP(a) FROM t1;
+a COALESCE(a) UNIX_TIMESTAMP(a)
+2010-10-31 02:25:26 2010-10-31 02:25:26 1288477526
+2010-10-31 02:25:25 2010-10-31 02:25:25 1288481125
+SELECT MIN(a), UNIX_TIMESTAMP(MIN(a)) AS a FROM t1;
+MIN(a) a
+2010-10-31 02:25:26 1288477526
+SELECT MAX(a), UNIX_TIMESTAMP(MAX(a)) AS a FROM t1;
+MAX(a) a
+2010-10-31 02:25:25 1288481125
+SELECT t1.a, UNIX_TIMESTAMP(t1.a), t2.a, UNIX_TIMESTAMP(t2.a) FROM t1 t1, t1 t2 WHERE t1.a=t2.a;
+a UNIX_TIMESTAMP(t1.a) a UNIX_TIMESTAMP(t2.a)
+2010-10-31 02:25:26 1288477526 2010-10-31 02:25:26 1288477526
+2010-10-31 02:25:25 1288481125 2010-10-31 02:25:25 1288481125
+ALTER TABLE t1 MODIFY a TIMESTAMP(1);
+SELECT a, COALESCE(a), UNIX_TIMESTAMP(a) FROM t1;
+a COALESCE(a) UNIX_TIMESTAMP(a)
+2010-10-31 02:25:26.0 2010-10-31 02:25:26.0 1288477526.0
+2010-10-31 02:25:25.0 2010-10-31 02:25:25.0 1288481125.0
+SELECT MIN(a), UNIX_TIMESTAMP(MIN(a)) AS a FROM t1;
+MIN(a) a
+2010-10-31 02:25:26.0 1288477526.0
+SELECT MAX(a), UNIX_TIMESTAMP(MAX(a)) AS a FROM t1;
+MAX(a) a
+2010-10-31 02:25:25.0 1288481125.0
+SELECT t1.a, UNIX_TIMESTAMP(t1.a), t2.a, UNIX_TIMESTAMP(t2.a) FROM t1 t1, t1 t2 WHERE t1.a=t2.a;
+a UNIX_TIMESTAMP(t1.a) a UNIX_TIMESTAMP(t2.a)
+2010-10-31 02:25:26.0 1288477526.0 2010-10-31 02:25:26.0 1288477526.0
+2010-10-31 02:25:25.0 1288481125.0 2010-10-31 02:25:25.0 1288481125.0
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+SET global mysql56_temporal_format=true;
diff --git a/mysql-test/main/old-mode.test b/mysql-test/main/old-mode.test
index e092ee78a2e..a09de1cf87d 100644
--- a/mysql-test/main/old-mode.test
+++ b/mysql-test/main/old-mode.test
@@ -120,3 +120,32 @@ insert t2 select a from t1;
select a, unix_timestamp(a) from t2;
drop table t1, t2;
set time_zone=DEFAULT;
+
+
+--echo #
+--echo # MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+--echo #
+
+# This tests:
+# Field_timestamp::val_native()
+# Field_timestamp_hires::val_native()
+# Type_handler_timestamp_common::type_handler_for_native_format()
+
+SET global mysql56_temporal_format=false;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP(0));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Moscow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526+3599) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT a, COALESCE(a), UNIX_TIMESTAMP(a) FROM t1;
+SELECT MIN(a), UNIX_TIMESTAMP(MIN(a)) AS a FROM t1;
+SELECT MAX(a), UNIX_TIMESTAMP(MAX(a)) AS a FROM t1;
+SELECT t1.a, UNIX_TIMESTAMP(t1.a), t2.a, UNIX_TIMESTAMP(t2.a) FROM t1 t1, t1 t2 WHERE t1.a=t2.a;
+ALTER TABLE t1 MODIFY a TIMESTAMP(1);
+SELECT a, COALESCE(a), UNIX_TIMESTAMP(a) FROM t1;
+SELECT MIN(a), UNIX_TIMESTAMP(MIN(a)) AS a FROM t1;
+SELECT MAX(a), UNIX_TIMESTAMP(MAX(a)) AS a FROM t1;
+SELECT t1.a, UNIX_TIMESTAMP(t1.a), t2.a, UNIX_TIMESTAMP(t2.a) FROM t1 t1, t1 t2 WHERE t1.a=t2.a;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+SET global mysql56_temporal_format=true;
diff --git a/mysql-test/main/opt_trace.result b/mysql-test/main/opt_trace.result
new file mode 100644
index 00000000000..12d4c713886
--- /dev/null
+++ b/mysql-test/main/opt_trace.result
@@ -0,0 +1,6037 @@
+SELECT table_name, column_name FROM information_schema.columns where table_name="OPTIMIZER_TRACE";
+table_name column_name
+OPTIMIZER_TRACE QUERY
+OPTIMIZER_TRACE TRACE
+OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE
+OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES
+show variables like 'optimizer_trace';
+Variable_name Value
+optimizer_trace enabled=off
+set optimizer_trace="enabled=on";
+show variables like 'optimizer_trace';
+Variable_name Value
+optimizer_trace enabled=on
+set optimizer_trace="enabled=off";
+create table t1 (a int, b int);
+insert into t1 values (1,2),(2,3);
+create table t2 (b int);
+insert into t2 values (1),(2);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+create function f1 (a int) returns INT
+return 1;
+create view v1 as select * from t1 where t1.a=1;
+create view v2 as select * from t1 where t1.a=1 group by t1.b;
+set optimizer_trace="enabled=on";
+# Mergeable views/derived tables
+select * from v1;
+a b
+1 2
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from v1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "view": {
+ "table": "v1",
+ "select_id": 2,
+ "algorithm": "merged"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select t1.a AS a,t1.b AS b from t1 where t1.a = 1"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t1.a AS a,t1.b AS b from v1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = 1",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [
+ {
+ "column_name": "a",
+ "selectivity_from_histogram": 0.5
+ }
+ ],
+ "cond_selectivity": 0.5
+ },
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2.0044
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 1,
+ "cost": 2.2044,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = 1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.a = 1"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+select * from (select * from t1 where t1.a=1)q;
+a b
+1 2
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from (select * from t1 where t1.a=1)q {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "derived": {
+ "table": "q",
+ "select_id": 2,
+ "algorithm": "merged"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select t1.a AS a,t1.b AS b from t1 where t1.a = 1"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t1.a AS a,t1.b AS b from (/* select#2 */ select t1.a AS a,t1.b AS b from t1 where t1.a = 1) q"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = 1",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [
+ {
+ "column_name": "a",
+ "selectivity_from_histogram": 0.5
+ }
+ ],
+ "cond_selectivity": 0.5
+ },
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2.0044
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 1,
+ "cost": 2.2044,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = 1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.a = 1"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+# Non-Mergeable views
+select * from v2;
+a b
+1 2
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from v2 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "view": {
+ "table": "v2",
+ "select_id": 2,
+ "algorithm": "materialized"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select t1.a AS a,t1.b AS b from t1 where t1.a = 1 group by t1.b"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select v2.a AS a,v2.b AS b from v2"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_optimization": {
+ "select_id": 2,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = 1",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(1, t1.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [
+ {
+ "column_name": "a",
+ "selectivity_from_histogram": 0.5
+ }
+ ],
+ "cond_selectivity": 0.5
+ },
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2.0044
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 1,
+ "cost": 2.2044,
+ "chosen": true,
+ "use_tmp_table": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = 1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.a = 1"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "<derived2>",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "<derived2>",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "<derived2>",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 2,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "<derived2>",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_execution": {
+ "select_id": 2,
+ "steps": []
+ }
+ }
+ ]
+ }
+ }
+ ]
+} 0 0
+drop table t1,t2;
+drop view v1,v2;
+drop function f1;
+create table t1(a int, b int);
+insert into t1 values (0,0),(1,1),(2,1),(3,2),(4,3),
+(5,3),(6,3),(7,3),(8,3),(9,3);
+create table t2(a int, b int);
+insert into t2 values (0,0),(1,1),(2,1),(3,2),(4,3),
+(5,3),(6,3),(7,3),(8,3),(9,3);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+create view v1 as select a from t1 group by b;
+create view v2 as select a from t2;
+# Mergeable view
+explain select * from v2 ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t2 ALL NULL NULL NULL NULL 10
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from v2 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "view": {
+ "table": "v2",
+ "select_id": 2,
+ "algorithm": "merged"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select t2.a AS a from t2"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t2.a AS a from v2"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t2",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t2",
+ "table_scan": {
+ "rows": 10,
+ "cost": 2.022
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 10,
+ "cost": 2.022,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t2",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+# Non-Mergeable view
+explain select * from v1 ;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 10
+2 DERIVED t1 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from v1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "view": {
+ "table": "v1",
+ "select_id": 2,
+ "algorithm": "materialized"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select t1.a AS a from t1 group by t1.b"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select v1.a AS a from v1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_optimization": {
+ "select_id": 2,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 10,
+ "cost": 2.022
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 10,
+ "cost": 2.022,
+ "chosen": true,
+ "use_tmp_table": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "<derived2>",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "<derived2>",
+ "table_scan": {
+ "rows": 10,
+ "cost": 10
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "<derived2>",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 10,
+ "cost": 10,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "<derived2>",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_execution": {
+ "select_id": 2,
+ "steps": []
+ }
+ }
+ ]
+ }
+ }
+ ]
+} 0 0
+drop table t1,t2;
+drop view v1,v2;
+#
+# print ref-keyues array
+#
+create table t0 (a int);
+INSERT INTO t0 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b int, c int, key(a));
+insert into t1 select A.a*10 + B.a, A.a*10 + B.a, A.a*10 + B.a from t0 A, t0 B;
+create table t2(a int, b int, c int , key(a));
+insert into t2 select A.a*10 + B.a, A.a*10 + B.a, A.a*10 + B.a from t0 A, t0 B;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL a NULL NULL NULL 100 Using where
+1 SIMPLE t2 ref a a 5 test.t1.b 1 Using where
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.a AS a,t1.b AS b,t1.c AS c,t2.a AS a,t2.b AS b,t2.c AS c from t1 join t2 where t1.a = t2.b + 2 and t2.a = t1.b"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = t2.b + 2 and t2.a = t1.b",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.a = t2.b + 2 and multiple equal(t2.a, t1.b)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.a = t2.b + 2 and multiple equal(t2.a, t1.b)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.a = t2.b + 2 and multiple equal(t2.a, t1.b)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t2",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t1",
+ "field": "a",
+ "equals": "t2.b + 2",
+ "null_rejecting": false
+ },
+ {
+ "table": "t2",
+ "field": "a",
+ "equals": "t1.b",
+ "null_rejecting": true
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 100,
+ "cost": 2.3174
+ }
+ },
+ {
+ "table": "t2",
+ "table_scan": {
+ "rows": 100,
+ "cost": 2.3174
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 100,
+ "cost": 2.3174,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t1"],
+ "table": "t2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": false,
+ "cause": "not available",
+ "rows": 1,
+ "cost": 200,
+ "chosen": true
+ },
+ {
+ "access_type": "scan",
+ "resulting_rows": 100,
+ "cost": 2.3174,
+ "chosen": false
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 100,
+ "cost": 2.3174,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t2"],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": false,
+ "cause": "not available",
+ "rows": 1,
+ "cost": 200,
+ "chosen": true
+ },
+ {
+ "access_type": "scan",
+ "resulting_rows": 100,
+ "cost": 2.3174,
+ "chosen": false
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t2.a = t1.b and t1.a = t2.b + 2",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.b is not null"
+ },
+ {
+ "table": "t2",
+ "attached": "t1.a = t2.b + 2"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1,t2,t0;
+#
+# group_by min max optimization
+#
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN SELECT DISTINCT a FROM t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range NULL a 4 NULL 5 Using index for group-by
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+EXPLAIN SELECT DISTINCT a FROM t1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select distinct t1.a AS a from t1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 65536,
+ "cost": 13255
+ },
+ "potential_range_indexes": [
+ {
+ "index": "PRIMARY",
+ "usable": false,
+ "cause": "not applicable"
+ },
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "a",
+ "cost": 4812.5,
+ "chosen": true
+ },
+ "group_index_range": {
+ "distinct_query": true,
+ "potential_group_range_indexes": [
+ {
+ "index": "a",
+ "covering": true,
+ "rows": 5,
+ "cost": 6.75
+ }
+ ]
+ },
+ "best_group_range_summary": {
+ "type": "index_group",
+ "index": "a",
+ "min_max_arg": null,
+ "min_aggregate": false,
+ "max_aggregate": false,
+ "distinct_aggregate": false,
+ "rows": 5,
+ "cost": 6.75,
+ "key_parts_used_for_access": ["a"],
+ "ranges": [],
+ "chosen": true
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "index_group",
+ "index": "a",
+ "min_max_arg": null,
+ "min_aggregate": false,
+ "max_aggregate": false,
+ "distinct_aggregate": false,
+ "rows": 5,
+ "cost": 6.75,
+ "key_parts_used_for_access": ["a"],
+ "ranges": []
+ },
+ "rows_for_plan": 5,
+ "cost_for_plan": 6.75,
+ "chosen": true
+ }
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "range",
+ "resulting_rows": 5,
+ "cost": 6.75,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1;
+#
+# With group by , where clause and MIN/MAX function
+#
+CREATE TABLE t1 (a INT, b INT, c int, d int, KEY(a,b,c,d));
+INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3), (4,4,4,4), (1,0,1,1), (3,2,3,3), (4,5,4,4);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL a 20 NULL 7 Using where; Using index
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select min(t1.d) AS `MIN(d)` from t1 where t1.b = 2 and t1.c = 3 group by t1.a"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.b = 2 and t1.c = 3",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(2, t1.b) and multiple equal(3, t1.c)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(2, t1.b) and multiple equal(3, t1.c)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(2, t1.b) and multiple equal(3, t1.c)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 7,
+ "cost": 5.5291
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a", "b", "c", "d"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "a",
+ "cost": 1.3869,
+ "chosen": true
+ },
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "potential_group_range_indexes": [
+ {
+ "index": "a",
+ "covering": true,
+ "ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
+ "rows": 8,
+ "cost": 2.2
+ }
+ ]
+ },
+ "best_group_range_summary": {
+ "type": "index_group",
+ "index": "a",
+ "min_max_arg": "d",
+ "min_aggregate": true,
+ "max_aggregate": false,
+ "distinct_aggregate": false,
+ "rows": 8,
+ "cost": 2.2,
+ "key_parts_used_for_access": ["a", "b", "c"],
+ "ranges": ["2 <= b <= 2 AND 3 <= c <= 3"],
+ "chosen": false,
+ "cause": "cost"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [
+ {
+ "column_name": "b",
+ "selectivity_from_histogram": 0.2891
+ },
+ {
+ "column_name": "c",
+ "selectivity_from_histogram": 0.2891
+ }
+ ],
+ "cond_selectivity": 0.0836
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 0.5849,
+ "cost": 3.3121,
+ "chosen": true,
+ "use_tmp_table": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.b = 2 and t1.c = 3",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.b = 2 and t1.c = 3"
+ }
+ ]
+ }
+ },
+ {
+ "reconsidering_access_paths_for_index_ordering": {
+ "clause": "GROUP BY",
+ "fanout": 1,
+ "read_time": 3.3131,
+ "table": "t1",
+ "rows_estimation": 7,
+ "possible_keys": [
+ {
+ "index": "a",
+ "can_resolve_order": true,
+ "updated_limit": 7,
+ "index_scan_time": 7,
+ "records": 7,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL, a DATE, KEY(id,a));
+INSERT INTO t1 values (1,'2001-01-01'),(1,'2001-01-02'),
+(1,'2001-01-03'),(1,'2001-01-04'),
+(2,'2001-01-01'),(2,'2001-01-02'),
+(2,'2001-01-03'),(2,'2001-01-04'),
+(3,'2001-01-01'),(3,'2001-01-02'),
+(3,'2001-01-03'),(3,'2001-01-04'),
+(4,'2001-01-01'),(4,'2001-01-02'),
+(4,'2001-01-03'),(4,'2001-01-04');
+set optimizer_trace='enabled=on';
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL id 8 NULL 16 Using where; Using index
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.`id` AS `id`,min(t1.a) AS `MIN(a)`,max(t1.a) AS `MAX(a)` from t1 where t1.a >= 20010104e0 group by t1.`id`"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a >= 20010104e0",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.a >= 20010104e0"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.a >= 20010104e0"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.a >= 20010104e0"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 16,
+ "cost": 7.3313
+ },
+ "potential_range_indexes": [
+ {
+ "index": "id",
+ "usable": true,
+ "key_parts": ["id", "a"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "id",
+ "cost": 1.8468,
+ "chosen": true
+ },
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "potential_group_range_indexes": [
+ {
+ "index": "id",
+ "covering": true,
+ "ranges": ["0x24a20f <= a"],
+ "rows": 9,
+ "cost": 2.35
+ }
+ ]
+ },
+ "best_group_range_summary": {
+ "type": "index_group",
+ "index": "id",
+ "min_max_arg": "a",
+ "min_aggregate": true,
+ "max_aggregate": true,
+ "distinct_aggregate": false,
+ "rows": 9,
+ "cost": 2.35,
+ "key_parts_used_for_access": ["id"],
+ "ranges": ["0x24a20f <= a"],
+ "chosen": false,
+ "cause": "cost"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [],
+ "cond_selectivity": 1
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 16,
+ "cost": 2.0312,
+ "chosen": true,
+ "use_tmp_table": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a >= 20010104e0",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.a >= 20010104e0"
+ }
+ ]
+ }
+ },
+ {
+ "reconsidering_access_paths_for_index_ordering": {
+ "clause": "GROUP BY",
+ "fanout": 1,
+ "read_time": 2.0322,
+ "table": "t1",
+ "rows_estimation": 9,
+ "possible_keys": [
+ {
+ "index": "id",
+ "can_resolve_order": true,
+ "updated_limit": 16,
+ "index_scan_time": 16,
+ "records": 16,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index NULL id 8 NULL 16 Using where; Using index
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.`id` AS `id`,t1.a AS a from t1 where t1.a = 20010104e0 group by t1.`id`"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = 20010104e0",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.a = 20010104e0"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.a = 20010104e0"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.a = 20010104e0"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 16,
+ "cost": 7.3313
+ },
+ "potential_range_indexes": [
+ {
+ "index": "id",
+ "usable": true,
+ "key_parts": ["id", "a"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "id",
+ "cost": 1.8468,
+ "chosen": true
+ },
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "potential_group_range_indexes": [
+ {
+ "index": "id",
+ "covering": true,
+ "ranges": ["0x24a20f <= a <= 0x24a20f"],
+ "rows": 9,
+ "cost": 2.35
+ }
+ ]
+ },
+ "best_group_range_summary": {
+ "type": "index_group",
+ "index": "id",
+ "min_max_arg": null,
+ "min_aggregate": false,
+ "max_aggregate": false,
+ "distinct_aggregate": false,
+ "rows": 9,
+ "cost": 2.35,
+ "key_parts_used_for_access": ["id", "a"],
+ "ranges": ["0x24a20f <= a <= 0x24a20f"],
+ "chosen": false,
+ "cause": "cost"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [],
+ "cond_selectivity": 1
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 16,
+ "cost": 2.0312,
+ "chosen": true,
+ "use_tmp_table": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = 20010104e0",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.a = 20010104e0"
+ }
+ ]
+ }
+ },
+ {
+ "reconsidering_access_paths_for_index_ordering": {
+ "clause": "GROUP BY",
+ "fanout": 1,
+ "read_time": 2.0322,
+ "table": "t1",
+ "rows_estimation": 9,
+ "possible_keys": [
+ {
+ "index": "id",
+ "can_resolve_order": true,
+ "updated_limit": 16,
+ "index_scan_time": 16,
+ "records": 16,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1;
+#
+# Late ORDER BY optimization
+#
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k(a int primary key);
+insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
+create table t1 (
+pk int not null,
+a int,
+b int,
+c int,
+filler char(100),
+KEY a_a(c),
+KEY a_c(a,c),
+KEY a_b(a,b)
+);
+insert into t1
+select a, a,a,a, 'filler-dataaa' from test.one_k;
+update t1 set a=1 where pk between 0 and 180;
+update t1 set b=2 where pk between 0 and 20;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+set optimizer_trace='enabled=on';
+explain select * from t1 where a=1 and b=2 order by c limit 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 range a_c,a_b a_c 5 NULL 180 Using where
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 where a=1 and b=2 order by c limit 1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.pk AS pk,t1.a AS a,t1.b AS b,t1.c AS c,t1.filler AS filler from t1 where t1.a = 1 and t1.b = 2 order by t1.c limit 1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = 1 and t1.b = 2",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(1, t1.a) and multiple equal(2, t1.b)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(1, t1.a) and multiple equal(2, t1.b)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(1, t1.a) and multiple equal(2, t1.b)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t1",
+ "field": "a",
+ "equals": "1",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "a",
+ "equals": "1",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "b",
+ "equals": "2",
+ "null_rejecting": false
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 1000,
+ "cost": 232.66
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a_a",
+ "usable": false,
+ "cause": "not applicable"
+ },
+ {
+ "index": "a_c",
+ "usable": true,
+ "key_parts": ["a", "c"]
+ },
+ {
+ "index": "a_b",
+ "usable": true,
+ "key_parts": ["a", "b"]
+ }
+ ],
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no group by or distinct"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a_c",
+ "ranges": ["1 <= a <= 1"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 180,
+ "cost": 231.72,
+ "chosen": true
+ },
+ {
+ "index": "a_b",
+ "ranges": ["1 <= a <= 1 AND 2 <= b <= 2"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 21,
+ "cost": 27.445,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "a_b",
+ "rows": 21,
+ "ranges": ["1 <= a <= 1 AND 2 <= b <= 2"]
+ },
+ "rows_for_plan": 21,
+ "cost_for_plan": 27.445,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "a_b",
+ "selectivity_from_index": 0.021
+ }
+ ],
+ "selectivity_for_columns": [
+ {
+ "column_name": "a",
+ "selectivity_from_histogram": 0.1797
+ },
+ {
+ "column_name": "b",
+ "selectivity_from_histogram": 0.0156
+ }
+ ],
+ "cond_selectivity": 0.021
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "a_c",
+ "used_range_estimates": true,
+ "rows": 180,
+ "cost": 92,
+ "chosen": true
+ },
+ {
+ "access_type": "ref",
+ "index": "a_b",
+ "used_range_estimates": true,
+ "rows": 21,
+ "cost": 22,
+ "chosen": true
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = 1 and t1.b = 2",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ },
+ {
+ "reconsidering_access_paths_for_index_ordering": {
+ "clause": "ORDER BY",
+ "fanout": 1,
+ "read_time": 22.001,
+ "table": "t1",
+ "rows_estimation": 21,
+ "possible_keys": [
+ {
+ "index": "a_a",
+ "can_resolve_order": true,
+ "updated_limit": 47,
+ "index_scan_time": 47,
+ "usable": false,
+ "cause": "cost"
+ },
+ {
+ "index": "a_c",
+ "can_resolve_order": true,
+ "updated_limit": 47,
+ "range_scan_time": 4.324,
+ "index_scan_time": 4.324,
+ "records": 180,
+ "chosen": true
+ },
+ {
+ "index": "a_b",
+ "can_resolve_order": false,
+ "cause": "not usable index for the query"
+ }
+ ]
+ }
+ },
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 1000,
+ "cost": 1202
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a_a",
+ "usable": false,
+ "cause": "not applicable"
+ },
+ {
+ "index": "a_c",
+ "usable": true,
+ "key_parts": ["a", "c"]
+ },
+ {
+ "index": "a_b",
+ "usable": false,
+ "cause": "not applicable"
+ }
+ ],
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no group by or distinct"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a_c",
+ "ranges": ["1 <= a <= 1"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 180,
+ "cost": 231.72,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "a_c",
+ "rows": 180,
+ "ranges": ["1 <= a <= 1"]
+ },
+ "rows_for_plan": 180,
+ "cost_for_plan": 231.72,
+ "chosen": true
+ }
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1,ten,one_k;
+#
+# TABLE ELIMINATION
+#
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3);
+create table t0 as select * from t1;
+create table t2 (a int primary key, b int)
+as select a, a as b from t1 where a in (1,2);
+create table t3 (a int primary key, b int)
+as select a, a as b from t1 where a in (1,3);
+set optimizer_trace='enabled=on';
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+analyze table t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+analyze table t3;
+Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+# table t2 should be eliminated
+explain
+select t1.a from t1 left join t2 on t1.a=t2.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain
+select t1.a from t1 left join t2 on t1.a=t2.a {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.a AS a from (t1 left join t2 on(t1.a = t2.a))"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t2",
+ "row_may_be_null": true,
+ "map_bit": 1,
+ "depends_on_map_bits": ["0"]
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t2",
+ "field": "a",
+ "equals": "t1.a",
+ "null_rejecting": true
+ }
+ ]
+ },
+ {
+ "eliminated_tables": ["t2"]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 4,
+ "cost": 2.0068
+ }
+ },
+ {
+ "table": "t2",
+ "rows": 1,
+ "cost": 1,
+ "table_type": "const"
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": ["t2"],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 4,
+ "cost": 2.0068,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "condition_on_constant_tables": "1"
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+# no tables should be eliminated
+explain select * from t1 left join t2 on t2.a=t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 Using where
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 left join t2 on t2.a=t1.a {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.a AS a,t2.a AS a,t2.b AS b from (t1 left join t2 on(t2.a = t1.a))"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t2",
+ "row_may_be_null": true,
+ "map_bit": 1,
+ "depends_on_map_bits": ["0"]
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t2",
+ "field": "a",
+ "equals": "t1.a",
+ "null_rejecting": true
+ }
+ ]
+ },
+ {
+ "eliminated_tables": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 4,
+ "cost": 2.0068
+ }
+ },
+ {
+ "table": "t2",
+ "table_scan": {
+ "rows": 2,
+ "cost": 2.0044
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 4,
+ "cost": 2.0068,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t1"],
+ "table": "t2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "eq_ref",
+ "index": "PRIMARY",
+ "rows": 1,
+ "cost": 4,
+ "chosen": true
+ },
+ {
+ "access_type": "scan",
+ "resulting_rows": 2,
+ "cost": 8.0176,
+ "chosen": false
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "condition_on_constant_tables": "1"
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ },
+ {
+ "table": "t2",
+ "attached": "trigcond(trigcond(t1.a is not null))"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+# multiple tables are eliminated
+explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and t3.a=t1.a;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 4
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and t3.a=t1.a {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.a AS a from (t1 left join (t2 join t3 on(t2.b = t3.b)) on(t2.a = t1.a and t3.a = t1.a))"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t2",
+ "row_may_be_null": true,
+ "map_bit": 1,
+ "depends_on_map_bits": ["0"]
+ },
+ {
+ "table": "t3",
+ "row_may_be_null": true,
+ "map_bit": 2,
+ "depends_on_map_bits": ["0"]
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t2",
+ "field": "a",
+ "equals": "t1.a",
+ "null_rejecting": true
+ },
+ {
+ "table": "t2",
+ "field": "a",
+ "equals": "t3.a",
+ "null_rejecting": true
+ },
+ {
+ "table": "t3",
+ "field": "a",
+ "equals": "t2.a",
+ "null_rejecting": true
+ },
+ {
+ "table": "t3",
+ "field": "a",
+ "equals": "t1.a",
+ "null_rejecting": true
+ }
+ ]
+ },
+ {
+ "eliminated_tables": ["t3", "t2"]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 4,
+ "cost": 2.0068
+ }
+ },
+ {
+ "table": "t2",
+ "rows": 1,
+ "cost": 1,
+ "table_type": "const"
+ },
+ {
+ "table": "t3",
+ "rows": 1,
+ "cost": 1,
+ "table_type": "const"
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": ["t3", "t2"],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 4,
+ "cost": 2.0068,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "condition_on_constant_tables": "1"
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t0, t1, t2, t3;
+#
+# IN subquery to sem-join is traced
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int, b int);
+insert into t1 values (0,0),(1,1),(2,2);
+create table t2 as select * from t1;
+create table t11(a int, b int);
+create table t10 (pk int, a int);
+insert into t10 select a,a from t0;
+create table t12 like t10;
+insert into t12 select * from t10;
+analyze table t1,t10;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t10 analyze status Engine-independent statistics collected
+test.t10 analyze status OK
+set optimizer_trace='enabled=on';
+explain extended select * from t1 where a in (select pk from t10);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 100.00
+2 MATERIALIZED t10 ALL NULL NULL NULL NULL 10 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` semi join (`test`.`t10`) where 1
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain extended select * from t1 where a in (select pk from t10) {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "materialization",
+ "sjm_scan_allowed": true,
+ "possible": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "chosen": true
+ }
+ },
+ {
+ "expanded_query": "/* select#2 */ select t10.pk from t10"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t1.a AS a,t1.b AS b from t1 where t1.a in (/* select#2 */ select t10.pk from t10)"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "converted_to_semi_join": true
+ }
+ },
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "1 and t1.a = t10.pk",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "1 and multiple equal(t1.a, t10.pk)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "1 and multiple equal(t1.a, t10.pk)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(t1.a, t10.pk)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t10",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0066
+ }
+ },
+ {
+ "table": "t10",
+ "table_scan": {
+ "rows": 10,
+ "cost": 2.022
+ }
+ }
+ ]
+ },
+ {
+ "execution_plan_for_potential_materialization": {
+ "steps": [
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t10",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 10,
+ "cost": 2.022,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0066,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t1"],
+ "table": "t10",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 10,
+ "cost": 2.022,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t10",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 10,
+ "cost": 2.022,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "fix_semijoin_strategies_for_picked_join_order": [
+ {
+ "semi_join_strategy": "sj_materialize",
+ "join_order": [
+ {
+ "table": "t10"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "condition_on_constant_tables": "1"
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ },
+ {
+ "table": "t10",
+ "attached": null
+ },
+ {
+ "table": "<subquery2>",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t0,t1,t11,t10,t12,t2;
+#
+# Selectivities for columns and indexes.
+#
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (
+pk int,
+a int,
+b int,
+key pk(pk),
+key pk_a(pk,a),
+key pk_a_b(pk,a,b));
+insert into t1 select a,a,a from t0;
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a,b) INDEXES ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@optimizer_use_condition_selectivity=4;
+set @@use_stat_tables= PREFERABLY;
+set optimizer_trace='enabled=on';
+explain select * from t1 where pk = 2 and a=5 and b=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref pk,pk_a,pk_a_b pk_a_b 15 const,const,const 1 Using index
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 where pk = 2 and a=5 and b=1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.pk AS pk,t1.a AS a,t1.b AS b from t1 where t1.pk = 2 and t1.a = 5 and t1.b = 1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.pk = 2 and t1.a = 5 and t1.b = 1",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(2, t1.pk) and multiple equal(5, t1.a) and multiple equal(1, t1.b)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(2, t1.pk) and multiple equal(5, t1.a) and multiple equal(1, t1.b)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(2, t1.pk) and multiple equal(5, t1.a) and multiple equal(1, t1.b)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t1",
+ "field": "pk",
+ "equals": "2",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "pk",
+ "equals": "2",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "a",
+ "equals": "5",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "pk",
+ "equals": "2",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "a",
+ "equals": "5",
+ "null_rejecting": false
+ },
+ {
+ "table": "t1",
+ "field": "b",
+ "equals": "1",
+ "null_rejecting": false
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 10,
+ "cost": 6.1317
+ },
+ "potential_range_indexes": [
+ {
+ "index": "pk",
+ "usable": true,
+ "key_parts": ["pk"]
+ },
+ {
+ "index": "pk_a",
+ "usable": true,
+ "key_parts": ["pk", "a"]
+ },
+ {
+ "index": "pk_a_b",
+ "usable": true,
+ "key_parts": ["pk", "a", "b"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "pk_a_b",
+ "cost": 1.5429,
+ "chosen": true
+ },
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no group by or distinct"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "pk",
+ "ranges": ["2 <= pk <= 2"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3773,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "index": "pk_a",
+ "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3783,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "index": "pk_a_b",
+ "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 1,
+ "cost": 1.1793,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "intersecting_indexes": [
+ {
+ "index": "pk",
+ "index_scan_cost": 1.0023,
+ "cumulated_index_scan_cost": 1.0023,
+ "disk_sweep_cost": 0.9008,
+ "cumulative_total_cost": 1.9031,
+ "usable": true,
+ "matching_rows_now": 1,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ },
+ {
+ "index": "pk_a",
+ "usable": false,
+ "cause": "does not reduce cost of intersect"
+ },
+ {
+ "index": "pk_a_b",
+ "usable": false,
+ "cause": "does not reduce cost of intersect"
+ }
+ ],
+ "clustered_pk": {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "no clustered pk index"
+ },
+ "chosen": false,
+ "cause": "cost"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "pk_a_b",
+ "rows": 1,
+ "ranges": ["2 <= pk <= 2 AND 5 <= a <= 5 AND 1 <= b <= 1"]
+ },
+ "rows_for_plan": 1,
+ "cost_for_plan": 1.1793,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "pk_a_b",
+ "selectivity_from_index": 0.1
+ }
+ ],
+ "selectivity_for_columns": [
+ {
+ "column_name": "a",
+ "selectivity_from_histogram": 0.1
+ },
+ {
+ "column_name": "b",
+ "selectivity_from_histogram": 0.1
+ }
+ ],
+ "cond_selectivity": 0.1
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "pk",
+ "used_range_estimates": true,
+ "rows": 1,
+ "cost": 2,
+ "chosen": true
+ },
+ {
+ "access_type": "ref",
+ "index": "pk_a",
+ "used_range_estimates": true,
+ "rows": 1,
+ "cost": 2,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "access_type": "ref",
+ "index": "pk_a_b",
+ "used_range_estimates": true,
+ "rows": 1,
+ "cost": 1.0043,
+ "chosen": true
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.pk = 2 and t1.a = 5 and t1.b = 1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set @@use_stat_tables= @save_use_stat_tables;
+drop table t0,t1;
+set optimizer_trace="enabled=off";
+#
+# Tests added to show that sub-statements are not traced
+#
+create table t1(a int);
+insert into t1 values (1),(2),(3),(4);
+create table t2(a int);
+insert into t2 values (1),(2),(3),(4);
+create function f1(a int) returns int
+begin
+declare a int default 0;
+set a= a+ (select count(*) from t2);
+return a;
+end|
+create function f2(a int) returns int
+begin
+declare a int default 0;
+select count(*) from t2 into a;
+return a;
+end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+set optimizer_trace='enabled=on';
+select f1(a) from t1;
+f1(a)
+4
+4
+4
+4
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select f1(a) from t1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select f1(t1.a) AS `f1(a)` from t1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 4,
+ "cost": 2.0068
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 4,
+ "cost": 2.0068,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+select f2(a) from t1;
+f2(a)
+4
+4
+4
+4
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select f2(a) from t1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select f2(t1.a) AS `f2(a)` from t1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 4,
+ "cost": 2.0068
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 4,
+ "cost": 2.0068,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1,t2;
+drop function f1;
+drop function f2;
+set optimizer_trace='enabled=off';
+#
+# MDEV-18489: Limit the memory used by the optimizer trace
+#
+create table t1 (a int);
+insert into t1 values (1),(2);
+set optimizer_trace='enabled=on';
+set @save_optimizer_trace_max_mem_size= @@optimizer_trace_max_mem_size;
+select * from t1;
+a
+1
+2
+select length(trace) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+length(trace)
+1754
+set optimizer_trace_max_mem_size=100;
+select * from t1;
+a
+1
+2
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from t1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ 1654 0
+set optimizer_trace_max_mem_size=0;
+select * from t1;
+a
+1
+2
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from t1 1754 0
+drop table t1;
+set optimizer_trace='enabled=off';
+set @@optimizer_trace_max_mem_size= @save_optimizer_trace_max_mem_size;
+#
+# MDEV-18527: Optimizer trace for DELETE query shows table:null
+#
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t0 (a int, b int);
+insert into t0 select a,a from ten;
+alter table t0 add key(a);
+set optimizer_trace=1;
+explain delete from t0 where t0.a<3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 range a a 5 NULL 3 Using where
+select * from information_schema.optimizer_trace;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain delete from t0 where t0.a<3 {
+ "steps": [
+ {
+ "table": "t0",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 10,
+ "cost": 6.122
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ }
+ ],
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no join"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a",
+ "ranges": ["NULL < a < 3"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 3,
+ "cost": 5.007,
+ "chosen": true
+ }
+ ],
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "a",
+ "rows": 3,
+ "ranges": ["NULL < a < 3"]
+ },
+ "rows_for_plan": 3,
+ "cost_for_plan": 5.007,
+ "chosen": true
+ }
+ }
+ }
+ ]
+} 0 0
+drop table ten,t0;
+set optimizer_trace='enabled=off';
+#
+# MDEV-18528: Optimizer trace support for multi-table UPDATE and DELETE
+#
+set optimizer_trace=1;
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t0 (a int, b int);
+insert into t0 select a,a from ten;
+alter table t0 add key(a);
+create table t1 like t0;
+insert into t1 select * from t0;
+explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t0 range a a 5 NULL 3 Using where
+1 SIMPLE t1 ref a a 5 test.t0.a 1
+select * from information_schema.optimizer_trace;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select NULL AS `NULL` from t0 join t1 where t0.a = t1.a and t1.a < 3"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t0.a = t1.a and t1.a < 3",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.a < 3 and multiple equal(t0.a, t1.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.a < 3 and multiple equal(t0.a, t1.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.a < 3 and multiple equal(t0.a, t1.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t0",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t0",
+ "field": "a",
+ "equals": "t1.a",
+ "null_rejecting": true
+ },
+ {
+ "table": "t1",
+ "field": "a",
+ "equals": "t0.a",
+ "null_rejecting": true
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t0",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 10,
+ "cost": 6.122
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "a",
+ "cost": 1.5234,
+ "chosen": true
+ },
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "not single_table"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a",
+ "ranges": ["NULL < a < 3"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 3,
+ "cost": 1.407,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "a",
+ "rows": 3,
+ "ranges": ["NULL < a < 3"]
+ },
+ "rows_for_plan": 3,
+ "cost_for_plan": 1.407,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.3
+ }
+ ],
+ "selectivity_for_columns": [],
+ "cond_selectivity": 0.3
+ },
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 10,
+ "cost": 6.122
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ }
+ ],
+ "best_covering_index_scan": {
+ "index": "a",
+ "cost": 1.5234,
+ "chosen": true
+ },
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "not single_table"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "a",
+ "ranges": ["NULL < a < 3"],
+ "rowid_ordered": false,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 3,
+ "cost": 1.407,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "a",
+ "rows": 3,
+ "ranges": ["NULL < a < 3"]
+ },
+ "rows_for_plan": 3,
+ "cost_for_plan": 1.407,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "a",
+ "selectivity_from_index": 0.3
+ }
+ ],
+ "selectivity_for_columns": [],
+ "cond_selectivity": 0.3
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t0",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "range",
+ "resulting_rows": 3,
+ "cost": 1.407,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t0"],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": false,
+ "cause": "not better than ref estimates",
+ "rows": 1,
+ "cost": 3.007,
+ "chosen": true
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "range",
+ "resulting_rows": 3,
+ "cost": 1.407,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t1"],
+ "table": "t0",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "a",
+ "used_range_estimates": false,
+ "cause": "not better than ref estimates",
+ "rows": 2,
+ "cost": 3.014,
+ "chosen": true
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = t0.a and t0.a < 3",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t0",
+ "attached": "t0.a < 3 and t0.a is not null"
+ },
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table ten,t0,t1;
+set optimizer_trace='enabled=off';
+#
+# Merged to Materialized for derived tables
+#
+set optimizer_trace=1;
+create table t1 (a int);
+insert into t1 values (1),(2),(3);
+explain select * from (select rand() from t1)q;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
+2 DERIVED t1 ALL NULL NULL NULL NULL 3
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from (select rand() from t1)q {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "derived": {
+ "table": "q",
+ "select_id": 2,
+ "algorithm": "merged"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select rand() AS `rand()` from t1"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select rand() AS `rand()` from (/* select#2 */ select rand() AS `rand()` from t1) q"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "derived": {
+ "table": "q",
+ "select_id": 2,
+ "algorithm": "materialized",
+ "cause": "Random function in the select"
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 2,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "<derived2>",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "<derived2>",
+ "table_scan": {
+ "rows": 3,
+ "cost": 3
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "<derived2>",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 3,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "<derived2>",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_execution": {
+ "select_id": 2,
+ "steps": []
+ }
+ }
+ ]
+ }
+ }
+ ]
+} 0 0
+drop table t1;
+set optimizer_trace='enabled=off';
+#
+# Semi-join nest
+#
+set optimizer_trace=1;
+create table t1 (a int);
+insert into t1 values (1),(2),(3);
+create table t2(a int);
+insert into t2 values (1),(2),(3),(1),(2),(3),(1),(2),(3);
+set @save_optimizer_switch= @@optimizer_switch;
+explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_inner_2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+2 MATERIALIZED t_inner_1 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t_inner_2 ALL NULL NULL NULL NULL 3 Using join buffer (flat, BNL join)
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_inner_2) {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "materialization",
+ "sjm_scan_allowed": true,
+ "possible": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "chosen": true
+ }
+ },
+ {
+ "expanded_query": "/* select#2 */ select t_inner_1.a from t1 t_inner_1 join t1 t_inner_2"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t1.a AS a from t1 where t1.a in (/* select#2 */ select t_inner_1.a from t1 t_inner_1 join t1 t_inner_2)"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "converted_to_semi_join": true
+ }
+ },
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "1 and t1.a = t_inner_1.a",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "1 and multiple equal(t1.a, t_inner_1.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "1 and multiple equal(t1.a, t_inner_1.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(t1.a, t_inner_1.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_1",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_2",
+ "row_may_be_null": false,
+ "map_bit": 2,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ },
+ {
+ "table": "t_inner_1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ },
+ {
+ "table": "t_inner_2",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ }
+ ]
+ },
+ {
+ "execution_plan_for_potential_materialization": {
+ "steps": [
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_inner_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t1"],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t1", "t_inner_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "fix_semijoin_strategies_for_picked_join_order": [
+ {
+ "semi_join_strategy": "sj_materialize",
+ "join_order": [
+ {
+ "table": "t_inner_1"
+ },
+ {
+ "table": "t_inner_2"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "condition_on_constant_tables": "1"
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ },
+ {
+ "table": "t_inner_1",
+ "attached": null
+ },
+ {
+ "table": "t_inner_2",
+ "attached": null
+ },
+ {
+ "table": "<subquery2>",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+# with Firstmatch, mostly for tracing fix_semijoin_strategies_for_picked_join_order
+set optimizer_switch='materialization=off';
+explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_inner_1.a from t2 t_inner_2, t1 t_inner_1) and
+t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t_outer_1 ALL NULL NULL NULL NULL 3
+1 PRIMARY t_inner_1 ALL NULL NULL NULL NULL 3 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t_inner_2 ALL NULL NULL NULL NULL 9 FirstMatch(t_outer_1); Using join buffer (incremental, BNL join)
+1 PRIMARY t_outer_2 ALL NULL NULL NULL NULL 9 Using join buffer (incremental, BNL join)
+1 PRIMARY t_inner_4 ALL NULL NULL NULL NULL 3 Using join buffer (incremental, BNL join)
+1 PRIMARY t_inner_3 ALL NULL NULL NULL NULL 9 Using where; FirstMatch(t_outer_2); Using join buffer (incremental, BNL join)
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_inner_1.a from t2 t_inner_2, t1 t_inner_1) and
+t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "materialization",
+ "sjm_scan_allowed": true,
+ "possible": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "chosen": true
+ }
+ },
+ {
+ "expanded_query": "/* select#2 */ select t_inner_1.a from t2 t_inner_2 join t1 t_inner_1"
+ }
+ ]
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 3,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 3,
+ "from": "IN (SELECT)",
+ "to": "materialization",
+ "sjm_scan_allowed": true,
+ "possible": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 3,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "chosen": true
+ }
+ },
+ {
+ "expanded_query": "/* select#3 */ select t_inner_3.a from t2 t_inner_3 join t1 t_inner_4"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t_outer_1.a AS a,t_outer_2.a AS a from t1 t_outer_1 join t2 t_outer_2 where t_outer_1.a in (/* select#2 */ select t_inner_1.a from t2 t_inner_2 join t1 t_inner_1) and t_outer_2.a in (/* select#3 */ select t_inner_3.a from t2 t_inner_3 join t1 t_inner_4)"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "converted_to_semi_join": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 3,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "converted_to_semi_join": true
+ }
+ },
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "1 and 1 and t_outer_1.a = t_inner_1.a and t_outer_2.a = t_inner_3.a",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "1 and 1 and multiple equal(t_outer_1.a, t_inner_1.a) and multiple equal(t_outer_2.a, t_inner_3.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "1 and 1 and multiple equal(t_outer_1.a, t_inner_1.a) and multiple equal(t_outer_2.a, t_inner_3.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(t_outer_1.a, t_inner_1.a) and multiple equal(t_outer_2.a, t_inner_3.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t_outer_1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_outer_2",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_2",
+ "row_may_be_null": false,
+ "map_bit": 2,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_1",
+ "row_may_be_null": false,
+ "map_bit": 3,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_3",
+ "row_may_be_null": false,
+ "map_bit": 4,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_4",
+ "row_may_be_null": false,
+ "map_bit": 5,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t_outer_1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ },
+ {
+ "table": "t_outer_2",
+ "table_scan": {
+ "rows": 9,
+ "cost": 2.0154
+ }
+ },
+ {
+ "table": "t_inner_2",
+ "table_scan": {
+ "rows": 9,
+ "cost": 2.0154
+ }
+ },
+ {
+ "table": "t_inner_1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ },
+ {
+ "table": "t_inner_3",
+ "table_scan": {
+ "rows": 9,
+ "cost": 2.0154
+ }
+ },
+ {
+ "table": "t_inner_4",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ }
+ ]
+ },
+ {
+ "execution_plan_for_potential_materialization": {
+ "steps": []
+ }
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t_outer_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_outer_2"
+ ],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_outer_2",
+ "t_inner_4"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_outer_2"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_inner_4"
+ ],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_inner_4",
+ "t_outer_2"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_inner_4"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_outer_2",
+ "t_inner_4"
+ ],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_outer_2",
+ "t_inner_4"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_outer_2"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_4",
+ "t_outer_2"
+ ],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_4",
+ "t_outer_2"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_4"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_3"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_3",
+ "t_outer_2"
+ ],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_3",
+ "t_outer_2"
+ ],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_3"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_3",
+ "t_inner_4"
+ ],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_3",
+ "t_inner_4"
+ ],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_3"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "fix_semijoin_strategies_for_picked_join_order": [
+ {
+ "semi_join_strategy": "firstmatch",
+ "join_order": [
+ {
+ "table": "t_inner_4"
+ },
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 162.42,
+ "chosen": true
+ }
+ ]
+ },
+ {
+ "table": "t_inner_3"
+ },
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 489.74,
+ "chosen": true
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "semi_join_strategy": "firstmatch",
+ "join_order": [
+ {
+ "table": "t_inner_1"
+ },
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 18.046,
+ "chosen": true
+ }
+ ]
+ },
+ {
+ "table": "t_inner_2"
+ },
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 54.415,
+ "chosen": true
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t_inner_1.a = t_outer_1.a and t_inner_3.a = t_outer_2.a",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t_outer_1",
+ "attached": null
+ },
+ {
+ "table": "t_inner_1",
+ "attached": "t_inner_1.a = t_outer_1.a"
+ },
+ {
+ "table": "t_inner_2",
+ "attached": null
+ },
+ {
+ "table": "t_outer_2",
+ "attached": null
+ },
+ {
+ "table": "t_inner_4",
+ "attached": null
+ },
+ {
+ "table": "t_inner_3",
+ "attached": "t_inner_3.a = t_outer_2.a"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+set optimizer_switch='materialization=on';
+explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_inner_1.a from t2 t_inner_2, t1 t_inner_1) and
+t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t_outer_1 ALL NULL NULL NULL NULL 3
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
+1 PRIMARY t_outer_2 ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join)
+1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1
+2 MATERIALIZED t_inner_1 ALL NULL NULL NULL NULL 3
+2 MATERIALIZED t_inner_2 ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join)
+3 MATERIALIZED t_inner_4 ALL NULL NULL NULL NULL 3
+3 MATERIALIZED t_inner_3 ALL NULL NULL NULL NULL 9 Using join buffer (flat, BNL join)
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_inner_1.a from t2 t_inner_2, t1 t_inner_1) and
+t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4) {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "materialization",
+ "sjm_scan_allowed": true,
+ "possible": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "chosen": true
+ }
+ },
+ {
+ "expanded_query": "/* select#2 */ select t_inner_1.a from t2 t_inner_2 join t1 t_inner_1"
+ }
+ ]
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 3,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 3,
+ "from": "IN (SELECT)",
+ "to": "materialization",
+ "sjm_scan_allowed": true,
+ "possible": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 3,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "chosen": true
+ }
+ },
+ {
+ "expanded_query": "/* select#3 */ select t_inner_3.a from t2 t_inner_3 join t1 t_inner_4"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select t_outer_1.a AS a,t_outer_2.a AS a from t1 t_outer_1 join t2 t_outer_2 where t_outer_1.a in (/* select#2 */ select t_inner_1.a from t2 t_inner_2 join t1 t_inner_1) and t_outer_2.a in (/* select#3 */ select t_inner_3.a from t2 t_inner_3 join t1 t_inner_4)"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "transformation": {
+ "select_id": 2,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "converted_to_semi_join": true
+ }
+ },
+ {
+ "transformation": {
+ "select_id": 3,
+ "from": "IN (SELECT)",
+ "to": "semijoin",
+ "converted_to_semi_join": true
+ }
+ },
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "1 and 1 and t_outer_1.a = t_inner_1.a and t_outer_2.a = t_inner_3.a",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "1 and 1 and multiple equal(t_outer_1.a, t_inner_1.a) and multiple equal(t_outer_2.a, t_inner_3.a)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "1 and 1 and multiple equal(t_outer_1.a, t_inner_1.a) and multiple equal(t_outer_2.a, t_inner_3.a)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(t_outer_1.a, t_inner_1.a) and multiple equal(t_outer_2.a, t_inner_3.a)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t_outer_1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_outer_2",
+ "row_may_be_null": false,
+ "map_bit": 1,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_2",
+ "row_may_be_null": false,
+ "map_bit": 2,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_1",
+ "row_may_be_null": false,
+ "map_bit": 3,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_3",
+ "row_may_be_null": false,
+ "map_bit": 4,
+ "depends_on_map_bits": []
+ },
+ {
+ "table": "t_inner_4",
+ "row_may_be_null": false,
+ "map_bit": 5,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t_outer_1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ },
+ {
+ "table": "t_outer_2",
+ "table_scan": {
+ "rows": 9,
+ "cost": 2.0154
+ }
+ },
+ {
+ "table": "t_inner_2",
+ "table_scan": {
+ "rows": 9,
+ "cost": 2.0154
+ }
+ },
+ {
+ "table": "t_inner_1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ },
+ {
+ "table": "t_inner_3",
+ "table_scan": {
+ "rows": 9,
+ "cost": 2.0154
+ }
+ },
+ {
+ "table": "t_inner_4",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ }
+ ]
+ },
+ {
+ "execution_plan_for_potential_materialization": {
+ "steps": [
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_inner_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_inner_4"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t_outer_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_outer_2"
+ ],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_outer_2",
+ "t_inner_4"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_outer_2"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "rest_of_plan": [
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_inner_4"
+ ],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": [
+ "t_outer_1",
+ "t_inner_1",
+ "t_inner_2",
+ "t_inner_4"
+ ],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1", "t_inner_2"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1", "t_inner_1"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_cost": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": ["t_outer_1"],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_outer_2",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_4",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ },
+ {
+ "plan_prefix": [],
+ "table": "t_inner_3",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 9,
+ "cost": 2.0154,
+ "chosen": true
+ }
+ ]
+ },
+ "pruned_by_heuristic": true
+ }
+ ]
+ },
+ {
+ "fix_semijoin_strategies_for_picked_join_order": [
+ {
+ "semi_join_strategy": "sj_materialize",
+ "join_order": [
+ {
+ "table": "t_inner_4"
+ },
+ {
+ "table": "t_inner_3"
+ }
+ ]
+ },
+ {
+ "semi_join_strategy": "sj_materialize",
+ "join_order": [
+ {
+ "table": "t_inner_1"
+ },
+ {
+ "table": "t_inner_2"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "condition_on_constant_tables": "1"
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t_outer_1",
+ "attached": null
+ },
+ {
+ "table": "t_inner_1",
+ "attached": null
+ },
+ {
+ "table": "t_inner_2",
+ "attached": null
+ },
+ {
+ "table": "<subquery2>",
+ "attached": null
+ },
+ {
+ "table": "t_outer_2",
+ "attached": null
+ },
+ {
+ "table": "t_inner_4",
+ "attached": null
+ },
+ {
+ "table": "t_inner_3",
+ "attached": null
+ },
+ {
+ "table": "<subquery3>",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+set @@optimizer_switch= @save_optimizer_switch;
+drop table t1,t2;
+#
+# MDEV-18942: Json_writer::add_bool: Conditional jump or move depends on uninitialised value upon
+# fulltext search under optimizer trace
+#
+CREATE TABLE t1 (f VARCHAR(255), FULLTEXT(f));
+CREATE VIEW v1 AS SELECT * FROM t1;
+INSERT INTO t1 VALUES ('fooba'),('abcde'),('xyzab');
+SET optimizer_trace = 'enabled=on';
+SELECT COUNT(*) FROM v1 WHERE MATCH (f) AGAINST ('fooba');
+COUNT(*)
+1
+DROP VIEW v1;
+DROP TABLE t1;
+set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test
new file mode 100644
index 00000000000..4ec7c338acd
--- /dev/null
+++ b/mysql-test/main/opt_trace.test
@@ -0,0 +1,390 @@
+--source include/not_embedded.inc
+SELECT table_name, column_name FROM information_schema.columns where table_name="OPTIMIZER_TRACE";
+show variables like 'optimizer_trace';
+set optimizer_trace="enabled=on";
+show variables like 'optimizer_trace';
+set optimizer_trace="enabled=off";
+create table t1 (a int, b int);
+insert into t1 values (1,2),(2,3);
+
+create table t2 (b int);
+insert into t2 values (1),(2);
+
+analyze table t1;
+analyze table t2;
+create function f1 (a int) returns INT
+return 1;
+
+create view v1 as select * from t1 where t1.a=1;
+create view v2 as select * from t1 where t1.a=1 group by t1.b;
+set optimizer_trace="enabled=on";
+
+--echo # Mergeable views/derived tables
+select * from v1;
+select * from information_schema.OPTIMIZER_TRACE;
+select * from (select * from t1 where t1.a=1)q;
+select * from information_schema.OPTIMIZER_TRACE;
+
+--echo # Non-Mergeable views
+select * from v2;
+select * from information_schema.OPTIMIZER_TRACE;
+
+drop table t1,t2;
+drop view v1,v2;
+drop function f1;
+
+create table t1(a int, b int);
+insert into t1 values (0,0),(1,1),(2,1),(3,2),(4,3),
+(5,3),(6,3),(7,3),(8,3),(9,3);
+create table t2(a int, b int);
+insert into t2 values (0,0),(1,1),(2,1),(3,2),(4,3),
+(5,3),(6,3),(7,3),(8,3),(9,3);
+
+ANALYZE TABLE t1;
+ANALYZE TABLE t2;
+
+create view v1 as select a from t1 group by b;
+create view v2 as select a from t2;
+
+--echo # Mergeable view
+explain select * from v2 ;
+select * from information_schema.OPTIMIZER_TRACE;
+
+--echo # Non-Mergeable view
+explain select * from v1 ;
+select * from information_schema.OPTIMIZER_TRACE;
+drop table t1,t2;
+drop view v1,v2;
+
+--echo #
+--echo # print ref-keyues array
+--echo #
+
+create table t0 (a int);
+INSERT INTO t0 VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (a int, b int, c int, key(a));
+insert into t1 select A.a*10 + B.a, A.a*10 + B.a, A.a*10 + B.a from t0 A, t0 B;
+
+create table t2(a int, b int, c int , key(a));
+insert into t2 select A.a*10 + B.a, A.a*10 + B.a, A.a*10 + B.a from t0 A, t0 B;
+
+analyze table t1;
+analyze table t2;
+
+explain select * from t1,t2 where t1.a=t2.b+2 and t2.a= t1.b;
+select * from information_schema.OPTIMIZER_TRACE;
+drop table t1,t2,t0;
+
+--echo #
+--echo # group_by min max optimization
+--echo #
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a INT NOT NULL, KEY(a));
+--disable_query_log
+INSERT INTO t1(a) VALUES (1), (2), (3), (4);
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+INSERT INTO t1(a) SELECT a FROM t1;
+--enable_query_log
+
+analyze table t1;
+EXPLAIN SELECT DISTINCT a FROM t1;
+select * from information_schema.OPTIMIZER_TRACE;
+drop table t1;
+
+--echo #
+--echo # With group by , where clause and MIN/MAX function
+--echo #
+CREATE TABLE t1 (a INT, b INT, c int, d int, KEY(a,b,c,d));
+INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3), (4,4,4,4), (1,0,1,1), (3,2,3,3), (4,5,4,4);
+ANALYZE TABLE t1;
+EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a;
+select * from information_schema.OPTIMIZER_TRACE;
+DROP TABLE t1;
+
+CREATE TABLE t1 (id INT NOT NULL, a DATE, KEY(id,a));
+INSERT INTO t1 values (1,'2001-01-01'),(1,'2001-01-02'),
+ (1,'2001-01-03'),(1,'2001-01-04'),
+ (2,'2001-01-01'),(2,'2001-01-02'),
+ (2,'2001-01-03'),(2,'2001-01-04'),
+ (3,'2001-01-01'),(3,'2001-01-02'),
+ (3,'2001-01-03'),(3,'2001-01-04'),
+ (4,'2001-01-01'),(4,'2001-01-02'),
+ (4,'2001-01-03'),(4,'2001-01-04');
+set optimizer_trace='enabled=on';
+EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+EXPLAIN SELECT * FROM t1 WHERE a = 20010104e0 GROUP BY id;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1;
+
+--echo #
+--echo # Late ORDER BY optimization
+--echo #
+
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table one_k(a int primary key);
+insert into one_k select A.a + B.a* 10 + C.a * 100 from ten A, ten B, ten C;
+create table t1 (
+ pk int not null,
+ a int,
+ b int,
+ c int,
+ filler char(100),
+ KEY a_a(c),
+ KEY a_c(a,c),
+ KEY a_b(a,b)
+);
+
+insert into t1
+select a, a,a,a, 'filler-dataaa' from test.one_k;
+update t1 set a=1 where pk between 0 and 180;
+update t1 set b=2 where pk between 0 and 20;
+analyze table t1;
+set optimizer_trace='enabled=on';
+explain select * from t1 where a=1 and b=2 order by c limit 1;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1,ten,one_k;
+
+--echo #
+--echo # TABLE ELIMINATION
+--echo #
+
+create table t1 (a int);
+insert into t1 values (0),(1),(2),(3);
+create table t0 as select * from t1;
+
+create table t2 (a int primary key, b int)
+ as select a, a as b from t1 where a in (1,2);
+
+create table t3 (a int primary key, b int)
+ as select a, a as b from t1 where a in (1,3);
+
+set optimizer_trace='enabled=on';
+
+analyze table t1;
+analyze table t2;
+analyze table t3;
+
+--echo # table t2 should be eliminated
+explain
+select t1.a from t1 left join t2 on t1.a=t2.a;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+--echo # no tables should be eliminated
+explain select * from t1 left join t2 on t2.a=t1.a;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+--echo # multiple tables are eliminated
+explain select t1.a from t1 left join (t2 join t3 on t2.b=t3.b) on t2.a=t1.a and t3.a=t1.a;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t0, t1, t2, t3;
+
+--echo #
+--echo # IN subquery to sem-join is traced
+--echo #
+
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1(a int, b int);
+insert into t1 values (0,0),(1,1),(2,2);
+create table t2 as select * from t1;
+
+create table t11(a int, b int);
+
+create table t10 (pk int, a int);
+insert into t10 select a,a from t0;
+create table t12 like t10;
+insert into t12 select * from t10;
+
+analyze table t1,t10;
+
+set optimizer_trace='enabled=on';
+explain extended select * from t1 where a in (select pk from t10);
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t0,t1,t11,t10,t12,t2;
+
+--echo #
+--echo # Selectivities for columns and indexes.
+--echo #
+
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+
+create table t1 (
+pk int,
+a int,
+b int,
+key pk(pk),
+key pk_a(pk,a),
+key pk_a_b(pk,a,b));
+insert into t1 select a,a,a from t0;
+
+ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a,b) INDEXES ();
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@optimizer_use_condition_selectivity=4;
+set @@use_stat_tables= PREFERABLY;
+set optimizer_trace='enabled=on';
+explain select * from t1 where pk = 2 and a=5 and b=1;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+set @@use_stat_tables= @save_use_stat_tables;
+drop table t0,t1;
+set optimizer_trace="enabled=off";
+
+--echo #
+--echo # Tests added to show that sub-statements are not traced
+--echo #
+
+create table t1(a int);
+insert into t1 values (1),(2),(3),(4);
+create table t2(a int);
+insert into t2 values (1),(2),(3),(4);
+delimiter |;
+create function f1(a int) returns int
+begin
+ declare a int default 0;
+ set a= a+ (select count(*) from t2);
+ return a;
+end|
+
+create function f2(a int) returns int
+begin
+ declare a int default 0;
+ select count(*) from t2 into a;
+ return a;
+end|
+
+delimiter ;|
+set optimizer_trace='enabled=on';
+select f1(a) from t1;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+select f2(a) from t1;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1,t2;
+drop function f1;
+drop function f2;
+set optimizer_trace='enabled=off';
+
+--echo #
+--echo # MDEV-18489: Limit the memory used by the optimizer trace
+--echo #
+
+create table t1 (a int);
+insert into t1 values (1),(2);
+
+set optimizer_trace='enabled=on';
+set @save_optimizer_trace_max_mem_size= @@optimizer_trace_max_mem_size;
+select * from t1;
+select length(trace) from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+set optimizer_trace_max_mem_size=100;
+select * from t1;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+set optimizer_trace_max_mem_size=0;
+select * from t1;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1;
+set optimizer_trace='enabled=off';
+set @@optimizer_trace_max_mem_size= @save_optimizer_trace_max_mem_size;
+
+--echo #
+--echo # MDEV-18527: Optimizer trace for DELETE query shows table:null
+--echo #
+
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t0 (a int, b int);
+insert into t0 select a,a from ten;
+alter table t0 add key(a);
+
+set optimizer_trace=1;
+explain delete from t0 where t0.a<3;
+select * from information_schema.optimizer_trace;
+drop table ten,t0;
+set optimizer_trace='enabled=off';
+
+--echo #
+--echo # MDEV-18528: Optimizer trace support for multi-table UPDATE and DELETE
+--echo #
+
+set optimizer_trace=1;
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t0 (a int, b int);
+insert into t0 select a,a from ten;
+alter table t0 add key(a);
+create table t1 like t0;
+insert into t1 select * from t0;
+explain delete t0,t1 from t0, t1 where t0.a=t1.a and t1.a<3;
+select * from information_schema.optimizer_trace;
+drop table ten,t0,t1;
+set optimizer_trace='enabled=off';
+
+--echo #
+--echo # Merged to Materialized for derived tables
+--echo #
+
+set optimizer_trace=1;
+create table t1 (a int);
+insert into t1 values (1),(2),(3);
+explain select * from (select rand() from t1)q;
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+drop table t1;
+set optimizer_trace='enabled=off';
+
+--echo #
+--echo # Semi-join nest
+--echo #
+
+set optimizer_trace=1;
+create table t1 (a int);
+insert into t1 values (1),(2),(3);
+create table t2(a int);
+insert into t2 values (1),(2),(3),(1),(2),(3),(1),(2),(3);
+set @save_optimizer_switch= @@optimizer_switch;
+explain select * from t1 where a in (select t_inner_1.a from t1 t_inner_1, t1 t_inner_2);
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+
+--echo # with Firstmatch, mostly for tracing fix_semijoin_strategies_for_picked_join_order
+
+set optimizer_switch='materialization=off';
+explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_inner_1.a from t2 t_inner_2, t1 t_inner_1) and
+ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4);
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+set optimizer_switch='materialization=on';
+explain select * from t1 t_outer_1,t2 t_outer_2 where t_outer_1.a in (select t_inner_1.a from t2 t_inner_2, t1 t_inner_1) and
+ t_outer_2.a in (select t_inner_3.a from t2 t_inner_3, t1 t_inner_4);
+select * from INFORMATION_SCHEMA.OPTIMIZER_TRACE;
+set @@optimizer_switch= @save_optimizer_switch;
+drop table t1,t2;
+
+--echo #
+--echo # MDEV-18942: Json_writer::add_bool: Conditional jump or move depends on uninitialised value upon
+--echo # fulltext search under optimizer trace
+--echo #
+
+CREATE TABLE t1 (f VARCHAR(255), FULLTEXT(f));
+CREATE VIEW v1 AS SELECT * FROM t1;
+INSERT INTO t1 VALUES ('fooba'),('abcde'),('xyzab');
+SET optimizer_trace = 'enabled=on';
+SELECT COUNT(*) FROM v1 WHERE MATCH (f) AGAINST ('fooba');
+DROP VIEW v1;
+DROP TABLE t1;
+
+set optimizer_trace='enabled=off';
diff --git a/mysql-test/main/opt_trace_index_merge.result b/mysql-test/main/opt_trace_index_merge.result
new file mode 100644
index 00000000000..50daef815d6
--- /dev/null
+++ b/mysql-test/main/opt_trace_index_merge.result
@@ -0,0 +1,245 @@
+set @tmp_opt_switch= @@optimizer_switch;
+set optimizer_switch='index_merge_sort_intersection=on';
+set optimizer_trace='enabled=on';
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b int, c int, filler char(100),
+key(a), key(b), key(c));
+insert into t1 select
+A.a * B.a*10 + C.a*100,
+A.a * B.a*10 + C.a*100,
+A.a,
+'filler'
+from t0 A, t0 B, t0 C;
+This should use union:
+explain select * from t1 where a=1 or b=1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 index_merge a,b a,b 5,5 NULL 2 Using union(a,b); Using where
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 where a=1 or b=1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.a AS a,t1.b AS b,t1.c AS c,t1.filler AS filler from t1 where t1.a = 1 or t1.b = 1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.a = 1 or t1.b = 1",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "multiple equal(1, t1.a) or multiple equal(1, t1.b)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "multiple equal(1, t1.a) or multiple equal(1, t1.b)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "multiple equal(1, t1.a) or multiple equal(1, t1.b)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": []
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 1000,
+ "cost": 231.69
+ },
+ "potential_range_indexes": [
+ {
+ "index": "a",
+ "usable": true,
+ "key_parts": ["a"]
+ },
+ {
+ "index": "b",
+ "usable": true,
+ "key_parts": ["b"]
+ },
+ {
+ "index": "c",
+ "usable": false,
+ "cause": "not applicable"
+ }
+ ],
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no group by or distinct"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ },
+ "analyzing_sort_intersect": {},
+ "analyzing_index_merge_union": [
+ {
+ "indexes_to_merge": [
+ {
+ "range_scan_alternatives": [
+ {
+ "index": "a",
+ "ranges": ["1 <= a <= 1"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 1,
+ "cost": 1.1773,
+ "chosen": true
+ }
+ ],
+ "index_to_merge": "a",
+ "cumulated_cost": 1.1773
+ },
+ {
+ "range_scan_alternatives": [
+ {
+ "index": "b",
+ "ranges": ["1 <= b <= 1"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": true,
+ "rows": 1,
+ "cost": 1.1773,
+ "chosen": true
+ }
+ ],
+ "index_to_merge": "b",
+ "cumulated_cost": 2.3547
+ }
+ ],
+ "cost_of_reading_ranges": 2.3547,
+ "use_roworder_union": true,
+ "cause": "always cheaper than non roworder retrieval",
+ "analyzing_roworder_scans": [
+ {
+ "type": "range_scan",
+ "index": "a",
+ "rows": 1,
+ "ranges": ["1 <= a <= 1"],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ }
+ },
+ {
+ "type": "range_scan",
+ "index": "b",
+ "rows": 1,
+ "ranges": ["1 <= b <= 1"],
+ "analyzing_roworder_intersect": {
+ "cause": "too few roworder scans"
+ }
+ }
+ ],
+ "index_roworder_union_cost": 4.1484,
+ "members": 2,
+ "chosen": true
+ }
+ ]
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "index_roworder_union",
+ "union_of": [
+ {
+ "type": "range_scan",
+ "index": "a",
+ "rows": 1,
+ "ranges": ["1 <= a <= 1"]
+ },
+ {
+ "type": "range_scan",
+ "index": "b",
+ "rows": 1,
+ "ranges": ["1 <= b <= 1"]
+ }
+ ]
+ },
+ "rows_for_plan": 2,
+ "cost_for_plan": 4.1484,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [],
+ "selectivity_for_columns": [],
+ "cond_selectivity": 0.002
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "range",
+ "resulting_rows": 2,
+ "cost": 4.1484,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.a = 1 or t1.b = 1",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.a = 1 or t1.b = 1"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t0,t1;
+set optimizer_trace="enabled=off";
+set @@optimizer_switch= @tmp_opt_switch;
diff --git a/mysql-test/main/opt_trace_index_merge.test b/mysql-test/main/opt_trace_index_merge.test
new file mode 100644
index 00000000000..d5efaf81db5
--- /dev/null
+++ b/mysql-test/main/opt_trace_index_merge.test
@@ -0,0 +1,21 @@
+--source include/not_embedded.inc
+set @tmp_opt_switch= @@optimizer_switch;
+set optimizer_switch='index_merge_sort_intersection=on';
+set optimizer_trace='enabled=on';
+create table t0 (a int);
+insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1 (a int, b int, c int, filler char(100),
+ key(a), key(b), key(c));
+insert into t1 select
+ A.a * B.a*10 + C.a*100,
+ A.a * B.a*10 + C.a*100,
+ A.a,
+ 'filler'
+from t0 A, t0 B, t0 C;
+
+--echo This should use union:
+explain select * from t1 where a=1 or b=1;
+select * from information_schema.OPTIMIZER_TRACE;
+drop table t0,t1;
+set optimizer_trace="enabled=off";
+set @@optimizer_switch= @tmp_opt_switch;
diff --git a/mysql-test/main/opt_trace_index_merge_innodb.result b/mysql-test/main/opt_trace_index_merge_innodb.result
new file mode 100644
index 00000000000..94e9d4f58cc
--- /dev/null
+++ b/mysql-test/main/opt_trace_index_merge_innodb.result
@@ -0,0 +1,253 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+create table t1
+(
+pk1 int not null,
+pk2 int not null,
+key1 int not null,
+key2 int not null,
+key (key1),
+key (key2),
+primary key (pk1, pk2)
+)engine=Innodb;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+set optimizer_trace="enabled=on";
+set @tmp_index_merge_ror_cpk=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+explain select * from t1 where pk1 != 0 and key1 = 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref PRIMARY,key1 key1 4 const 1 Using index condition
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+explain select * from t1 where pk1 != 0 and key1 = 1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select t1.pk1 AS pk1,t1.pk2 AS pk2,t1.key1 AS key1,t1.key2 AS key2 from t1 where t1.pk1 <> 0 and t1.key1 = 1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "condition_processing": {
+ "condition": "WHERE",
+ "original_condition": "t1.pk1 <> 0 and t1.key1 = 1",
+ "steps": [
+ {
+ "transformation": "equality_propagation",
+ "resulting_condition": "t1.pk1 <> 0 and multiple equal(1, t1.key1)"
+ },
+ {
+ "transformation": "constant_propagation",
+ "resulting_condition": "t1.pk1 <> 0 and multiple equal(1, t1.key1)"
+ },
+ {
+ "transformation": "trivial_condition_removal",
+ "resulting_condition": "t1.pk1 <> 0 and multiple equal(1, t1.key1)"
+ }
+ ]
+ }
+ },
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "ref_optimizer_key_uses": [
+ {
+ "table": "t1",
+ "field": "key1",
+ "equals": "1",
+ "null_rejecting": false
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "range_analysis": {
+ "table_scan": {
+ "rows": 1000,
+ "cost": 206.1
+ },
+ "potential_range_indexes": [
+ {
+ "index": "PRIMARY",
+ "usable": true,
+ "key_parts": ["pk1", "pk2"]
+ },
+ {
+ "index": "key1",
+ "usable": true,
+ "key_parts": ["key1"]
+ },
+ {
+ "index": "key2",
+ "usable": false,
+ "cause": "not applicable"
+ }
+ ],
+ "setup_range_conditions": [],
+ "group_index_range": {
+ "chosen": false,
+ "cause": "no group by or distinct"
+ },
+ "analyzing_range_alternatives": {
+ "range_scan_alternatives": [
+ {
+ "index": "PRIMARY",
+ "ranges": ["pk1 < 0", "0 < pk1"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1000,
+ "cost": 206.39,
+ "chosen": false,
+ "cause": "cost"
+ },
+ {
+ "index": "key1",
+ "ranges": ["1 <= key1 <= 1"],
+ "rowid_ordered": true,
+ "using_mrr": false,
+ "index_only": false,
+ "rows": 1,
+ "cost": 2.3751,
+ "chosen": true
+ }
+ ],
+ "analyzing_roworder_intersect": {
+ "intersecting_indexes": [
+ {
+ "index": "key1",
+ "index_scan_cost": 1.0001,
+ "cumulated_index_scan_cost": 1.0001,
+ "disk_sweep_cost": 1.0014,
+ "cumulative_total_cost": 2.0015,
+ "usable": true,
+ "matching_rows_now": 1,
+ "intersect_covering_with_this_index": false,
+ "chosen": true
+ }
+ ],
+ "clustered_pk": {
+ "clustered_pk_added_to_intersect": false,
+ "cause": "cost"
+ },
+ "chosen": false,
+ "cause": "too few indexes to merge"
+ },
+ "analyzing_index_merge_union": []
+ },
+ "chosen_range_access_summary": {
+ "range_access_plan": {
+ "type": "range_scan",
+ "index": "key1",
+ "rows": 1,
+ "ranges": ["1 <= key1 <= 1"]
+ },
+ "rows_for_plan": 1,
+ "cost_for_plan": 2.3751,
+ "chosen": true
+ }
+ }
+ },
+ {
+ "selectivity_for_indexes": [
+ {
+ "index_name": "PRIMARY",
+ "selectivity_from_index": 1
+ },
+ {
+ "index_name": "key1",
+ "selectivity_from_index": 0.001
+ }
+ ],
+ "selectivity_for_columns": [],
+ "cond_selectivity": 0.001
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "ref",
+ "index": "key1",
+ "used_range_estimates": true,
+ "rows": 1,
+ "cost": 2,
+ "chosen": true
+ },
+ {
+ "type": "scan",
+ "chosen": false,
+ "cause": "cost"
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": "t1.key1 = 1 and t1.pk1 <> 0",
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": "t1.pk1 <> 0"
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+drop table t1;
+set @@optimizer_switch= @tmp_index_merge_ror_cpk;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
+#
+# MDEV-18962: ASAN heap-buffer-overflow in Single_line_formatting_helper::on_add_str with optimizer trace
+#
+CREATE TABLE t1 (a date not null, b time, key(a), key(b)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('1991-09-09','00:00:00'),('2032-08-24','02:22:24');
+SET SESSION optimizer_trace = 'enabled=on';
+SELECT * FROM t1 WHERE b IS NULL AND a = '2000-01-01';
+a b
+DROP TABLE t1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_index_merge_innodb.test b/mysql-test/main/opt_trace_index_merge_innodb.test
new file mode 100644
index 00000000000..42d8c57144c
--- /dev/null
+++ b/mysql-test/main/opt_trace_index_merge_innodb.test
@@ -0,0 +1,54 @@
+--source include/not_embedded.inc
+--source include/have_innodb.inc
+
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
+create table t1
+(
+ pk1 int not null,
+ pk2 int not null,
+ key1 int not null,
+ key2 int not null,
+ key (key1),
+ key (key2),
+ primary key (pk1, pk2)
+)engine=Innodb;
+
+--disable_query_log
+let $1=1000;
+while ($1)
+{
+ eval insert into t1 values (1+$1/10,$1 mod 100,$1,$1/100);
+ dec $1;
+}
+--enable_query_log
+analyze table t1;
+
+set optimizer_trace="enabled=on";
+set @tmp_index_merge_ror_cpk=@@optimizer_switch;
+set optimizer_switch='extended_keys=off';
+explain select * from t1 where pk1 != 0 and key1 = 1;
+select * from information_schema.OPTIMIZER_TRACE;
+drop table t1;
+set @@optimizer_switch= @tmp_index_merge_ror_cpk;
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
+
+--echo #
+--echo # MDEV-18962: ASAN heap-buffer-overflow in Single_line_formatting_helper::on_add_str with optimizer trace
+--echo #
+
+CREATE TABLE t1 (a date not null, b time, key(a), key(b)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('1991-09-09','00:00:00'),('2032-08-24','02:22:24');
+SET SESSION optimizer_trace = 'enabled=on';
+SELECT * FROM t1 WHERE b IS NULL AND a = '2000-01-01';
+DROP TABLE t1;
+
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_security.result b/mysql-test/main/opt_trace_security.result
new file mode 100644
index 00000000000..9f5bacd6aa7
--- /dev/null
+++ b/mysql-test/main/opt_trace_security.result
@@ -0,0 +1,386 @@
+create database db1;
+use db1;
+create table t1(a int);
+insert into t1 values (1),(2),(3);
+create table t2(a int);
+CREATE USER 'foo'@'%';
+CREATE USER 'bar'@'%';
+create definer=foo SQL SECURITY definer view db1.v1 as select * from db1.t1;
+create definer=foo function f1 (a int) returns INT SQL SECURITY DEFINER
+BEGIN
+insert into t2 select * from t1;
+return a+1;
+END|
+set optimizer_trace="enabled=on";
+select * from db1.t1;
+ERROR 42000: SELECT command denied to user 'foo'@'localhost' for table 't1'
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+ 0 1
+set optimizer_trace="enabled=off";
+grant select(a) on db1.t1 to 'foo'@'%';
+set optimizer_trace="enabled=on";
+select * from db1.t1;
+a
+1
+2
+3
+# INSUFFICIENT PRIVILEGES should be set to 1
+# Trace and Query should be empty
+# We need SELECT privilege on the table db1.t1;
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+ 0 1
+set optimizer_trace="enabled=off";
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+grant select on db1.t1 to 'foo'@'%';
+grant select on db1.t2 to 'foo'@'%';
+set optimizer_trace="enabled=on";
+#
+# SELECT privilege on the table db1.t1
+# The trace would be present.
+#
+select * from db1.t1;
+a
+1
+2
+3
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from db1.t1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "expanded_query": "select db1.t1.a AS a from t1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+set optimizer_trace="enabled=off";
+grant select on db1.v1 to 'foo'@'%';
+grant show view on db1.v1 to 'foo'@'%';
+grant select on db1.v1 to 'bar'@'%';
+grant show view on db1.v1 to 'bar'@'%';
+select current_user();
+current_user()
+foo@%
+set optimizer_trace="enabled=on";
+select * from db1.v1;
+a
+1
+2
+3
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+select * from db1.v1 {
+ "steps": [
+ {
+ "join_preparation": {
+ "select_id": 1,
+ "steps": [
+ {
+ "view": {
+ "table": "v1",
+ "select_id": 2,
+ "algorithm": "merged"
+ }
+ },
+ {
+ "join_preparation": {
+ "select_id": 2,
+ "steps": [
+ {
+ "expanded_query": "/* select#2 */ select db1.t1.a AS a from t1"
+ }
+ ]
+ }
+ },
+ {
+ "expanded_query": "/* select#1 */ select db1.t1.a AS a from v1"
+ }
+ ]
+ }
+ },
+ {
+ "join_optimization": {
+ "select_id": 1,
+ "steps": [
+ {
+ "table_dependencies": [
+ {
+ "table": "t1",
+ "row_may_be_null": false,
+ "map_bit": 0,
+ "depends_on_map_bits": []
+ }
+ ]
+ },
+ {
+ "rows_estimation": [
+ {
+ "table": "t1",
+ "table_scan": {
+ "rows": 3,
+ "cost": 2.0051
+ }
+ }
+ ]
+ },
+ {
+ "considered_execution_plans": [
+ {
+ "plan_prefix": [],
+ "table": "t1",
+ "best_access_path": {
+ "considered_access_paths": [
+ {
+ "access_type": "scan",
+ "resulting_rows": 3,
+ "cost": 2.0051,
+ "chosen": true
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "attaching_conditions_to_tables": {
+ "original_condition": null,
+ "attached_conditions_computation": [],
+ "attached_conditions_summary": [
+ {
+ "table": "t1",
+ "attached": null
+ }
+ ]
+ }
+ }
+ ]
+ }
+ },
+ {
+ "join_execution": {
+ "select_id": 1,
+ "steps": []
+ }
+ }
+ ]
+} 0 0
+set optimizer_trace="enabled=off";
+select current_user();
+current_user()
+bar@%
+set optimizer_trace="enabled=on";
+select * from db1.v1;
+a
+1
+2
+3
+#
+# INSUFFICIENT PRIVILEGES should be set to 1
+# Trace and Query should be empty
+# Privileges for the underlying tables of the
+# view should also be present for the current user
+#
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+ 0 1
+set optimizer_trace="enabled=off";
+grant execute on function db1.f1 to 'foo'@'%';
+grant execute on function db1.f1 to 'bar'@'%';
+grant select on db1.t1 to 'bar'@'%';
+grant insert on db1.t2 to 'foo'@'%';
+select current_user();
+current_user()
+foo@%
+set optimizer_trace="enabled=on";
+select db1.f1(a) from db1.t1;
+db1.f1(a)
+2
+3
+4
+select INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+INSUFFICIENT_PRIVILEGES
+0
+set optimizer_trace="enabled=off";
+select current_user();
+current_user()
+bar@%
+set optimizer_trace="enabled=on";
+#
+# The trace should be empty, because the current user
+# does not have INSERT privilege for table t2 which is
+# used in the function f1
+#
+select db1.f1(a) from db1.t1;
+db1.f1(a)
+2
+3
+4
+select * from information_schema.OPTIMIZER_TRACE;
+QUERY TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE INSUFFICIENT_PRIVILEGES
+ 0 1
+set optimizer_trace="enabled=off";
+select current_user();
+current_user()
+root@localhost
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM foo;
+drop user if exists foo;
+drop user if exists bar;
+drop table db1.t1, db1.t2;
+drop database db1;
+#
+# Privilege checking for optimizer trace across connections
+#
+connection default;
+create database db1;
+use db1;
+create table t1(a int);
+insert into t1 values (1),(2),(3);
+create table t2(a int);
+CREATE USER 'foo'@'localhost';
+CREATE USER 'bar'@'localhost';
+grant all on *.* to foo@localhost with grant option;
+grant all on *.* to bar@localhost with grant option;
+connect con_foo,localhost, foo,, db1;
+connection default;
+connect con_bar,localhost, bar,, db1;
+connection default;
+create definer=foo@localhost SQL SECURITY definer view db1.v1 as select * from db1.t1;
+create function f1 (a int) returns INT SQL SECURITY DEFINER
+BEGIN
+insert into t2 select * from t1;
+return a+1;
+END|
+grant execute on function f1 to bar@localhost;
+connection con_foo;
+set optimizer_trace='enabled=on';
+select * from db1.t1;
+a
+1
+2
+3
+#
+# Test that security context changes are allowed when, and only
+# when, invoker has all global privileges.
+#
+select query, INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+query INSUFFICIENT_PRIVILEGES
+select * from db1.t1 0
+set optimizer_trace='enabled=off';
+connection con_bar;
+set optimizer_trace='enabled=on';
+select f1(a) from db1.t1;
+f1(a)
+2
+3
+4
+select query, INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+query INSUFFICIENT_PRIVILEGES
+select f1(a) from db1.t1 0
+set optimizer_trace='enabled=off';
+connection default;
+revoke shutdown on *.* from foo@localhost;
+disconnect con_foo;
+connect con_foo, localhost, foo,, db1;
+connection con_foo;
+set optimizer_trace='enabled=on';
+select f1(a) from db1.t1;
+f1(a)
+2
+3
+4
+#
+# Test to check if invoker has all global privileges or not, only then
+# the security context changes are allowed. The user has been revoked
+# shutdown privilege so INSUFFICIENT PRIVILEGES should be set to 1.
+#
+select query, INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+query INSUFFICIENT_PRIVILEGES
+ 1
+set optimizer_trace='enabled=off';
+connection default;
+select current_user();
+current_user()
+root@localhost
+select * from db1.v1;
+a
+1
+2
+3
+drop user foo@localhost, bar@localhost;
+drop view db1.v1;
+drop table db1.t1;
+drop database db1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/opt_trace_security.test b/mysql-test/main/opt_trace_security.test
new file mode 100644
index 00000000000..9fa49190990
--- /dev/null
+++ b/mysql-test/main/opt_trace_security.test
@@ -0,0 +1,197 @@
+--source include/not_embedded.inc
+create database db1;
+use db1;
+create table t1(a int);
+insert into t1 values (1),(2),(3);
+create table t2(a int);
+
+CREATE USER 'foo'@'%';
+CREATE USER 'bar'@'%';
+
+create definer=foo SQL SECURITY definer view db1.v1 as select * from db1.t1;
+
+delimiter |;
+create definer=foo function f1 (a int) returns INT SQL SECURITY DEFINER
+BEGIN
+ insert into t2 select * from t1;
+ return a+1;
+END|
+delimiter ;|
+
+--change_user foo
+set optimizer_trace="enabled=on";
+--error 1142
+select * from db1.t1;
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user root
+grant select(a) on db1.t1 to 'foo'@'%';
+
+--change_user foo
+set optimizer_trace="enabled=on";
+select * from db1.t1;
+
+--echo # INSUFFICIENT PRIVILEGES should be set to 1
+--echo # Trace and Query should be empty
+--echo # We need SELECT privilege on the table db1.t1;
+
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user root
+select * from information_schema.OPTIMIZER_TRACE;
+grant select on db1.t1 to 'foo'@'%';
+grant select on db1.t2 to 'foo'@'%';
+
+--change_user foo
+set optimizer_trace="enabled=on";
+
+--echo #
+--echo # SELECT privilege on the table db1.t1
+--echo # The trace would be present.
+--echo #
+select * from db1.t1;
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user root
+
+grant select on db1.v1 to 'foo'@'%';
+grant show view on db1.v1 to 'foo'@'%';
+
+grant select on db1.v1 to 'bar'@'%';
+grant show view on db1.v1 to 'bar'@'%';
+
+--change_user foo
+select current_user();
+set optimizer_trace="enabled=on";
+select * from db1.v1;
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user bar
+select current_user();
+set optimizer_trace="enabled=on";
+select * from db1.v1;
+--echo #
+--echo # INSUFFICIENT PRIVILEGES should be set to 1
+--echo # Trace and Query should be empty
+--echo # Privileges for the underlying tables of the
+--echo # view should also be present for the current user
+--echo #
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user root
+grant execute on function db1.f1 to 'foo'@'%';
+grant execute on function db1.f1 to 'bar'@'%';
+
+grant select on db1.t1 to 'bar'@'%';
+grant insert on db1.t2 to 'foo'@'%';
+
+--change_user foo
+select current_user();
+set optimizer_trace="enabled=on";
+
+select db1.f1(a) from db1.t1;
+select INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user bar
+select current_user();
+set optimizer_trace="enabled=on";
+--echo #
+--echo # The trace should be empty, because the current user
+--echo # does not have INSERT privilege for table t2 which is
+--echo # used in the function f1
+--echo #
+select db1.f1(a) from db1.t1;
+select * from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace="enabled=off";
+
+--change_user root
+select current_user();
+REVOKE ALL PRIVILEGES, GRANT OPTION FROM foo;
+
+--change_user root
+drop user if exists foo;
+drop user if exists bar;
+drop table db1.t1, db1.t2;
+drop database db1;
+
+
+--echo #
+--echo # Privilege checking for optimizer trace across connections
+--echo #
+
+connection default;
+create database db1;
+use db1;
+create table t1(a int);
+insert into t1 values (1),(2),(3);
+create table t2(a int);
+
+CREATE USER 'foo'@'localhost';
+CREATE USER 'bar'@'localhost';
+grant all on *.* to foo@localhost with grant option;
+grant all on *.* to bar@localhost with grant option;
+#grant select on db1.t1 to bar@localhost;
+#grant insert on db1.t2 to bar@localhost;
+
+connect (con_foo,localhost, foo,, db1);
+connection default;
+connect (con_bar,localhost, bar,, db1);
+connection default;
+create definer=foo@localhost SQL SECURITY definer view db1.v1 as select * from db1.t1;
+
+delimiter |;
+create function f1 (a int) returns INT SQL SECURITY DEFINER
+BEGIN
+ insert into t2 select * from t1;
+ return a+1;
+END|
+delimiter ;|
+
+grant execute on function f1 to bar@localhost;
+
+connection con_foo;
+set optimizer_trace='enabled=on';
+select * from db1.t1;
+--echo #
+--echo # Test that security context changes are allowed when, and only
+--echo # when, invoker has all global privileges.
+--echo #
+select query, INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace='enabled=off';
+
+connection con_bar;
+set optimizer_trace='enabled=on';
+select f1(a) from db1.t1;
+select query, INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace='enabled=off';
+
+connection default;
+revoke shutdown on *.* from foo@localhost;
+disconnect con_foo;
+connect (con_foo, localhost, foo,, db1);
+
+connection con_foo;
+set optimizer_trace='enabled=on';
+select f1(a) from db1.t1;
+--echo #
+--echo # Test to check if invoker has all global privileges or not, only then
+--echo # the security context changes are allowed. The user has been revoked
+--echo # shutdown privilege so INSUFFICIENT PRIVILEGES should be set to 1.
+--echo #
+select query, INSUFFICIENT_PRIVILEGES from information_schema.OPTIMIZER_TRACE;
+set optimizer_trace='enabled=off';
+
+connection default;
+select current_user();
+select * from db1.v1;
+drop user foo@localhost, bar@localhost;
+drop view db1.v1;
+drop table db1.t1;
+drop database db1;
+set optimizer_trace="enabled=off";
diff --git a/mysql-test/main/order_by.result b/mysql-test/main/order_by.result
index db096acb162..4e8f8bfb17d 100644
--- a/mysql-test/main/order_by.result
+++ b/mysql-test/main/order_by.result
@@ -294,82 +294,109 @@ member_id nickname voornaam
drop table t1;
create table t1 (a int not null, b int, c varchar(10), key (a, b, c));
insert into t1 values (1, NULL, NULL), (1, NULL, 'b'), (1, 1, NULL), (1, 1, 'b'), (1, 1, 'b'), (2, 1, 'a'), (2, 1, 'b'), (2, 2, 'a'), (2, 2, 'b'), (2, 3, 'c'),(1,3,'b');
+insert into t1 select * from t1;
explain select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 22 NULL 2 Using where; Using index
+1 SIMPLE t1 range a a 22 NULL 3 Using where; Using index
select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
a b c
1 NULL b
+1 NULL b
explain select * from t1 where a >= 1 and a < 3 order by a desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 4 NULL 22 Using where; Using index
select * from t1 where a >= 1 and a < 3 order by a desc;
a b c
2 3 c
+2 3 c
+2 2 b
2 2 b
2 2 a
+2 2 a
+2 1 b
2 1 b
2 1 a
+2 1 a
1 3 b
+1 3 b
+1 1 b
+1 1 b
1 1 b
1 1 b
1 1 NULL
+1 1 NULL
+1 NULL b
1 NULL b
1 NULL NULL
+1 NULL NULL
explain select * from t1 where a = 1 order by a desc, b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a a 4 const 5 Using where; Using index
+1 SIMPLE t1 ref a a 4 const 12 Using where; Using index
select * from t1 where a = 1 order by a desc, b desc;
a b c
1 3 b
+1 3 b
+1 1 b
+1 1 b
1 1 b
1 1 b
1 1 NULL
+1 1 NULL
+1 NULL b
1 NULL b
1 NULL NULL
+1 NULL NULL
explain select * from t1 where a = 1 and b is null order by a desc, b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a a 9 const,const 2 Using where; Using index; Using filesort
+1 SIMPLE t1 ref a a 9 const,const 4 Using where; Using index; Using filesort
select * from t1 where a = 1 and b is null order by a desc, b desc;
a b c
1 NULL NULL
+1 NULL NULL
+1 NULL b
1 NULL b
explain select * from t1 where a >= 1 and a < 3 and b >0 order by a desc,b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 8 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 18 Using where; Using index
explain select * from t1 where a = 2 and b >0 order by a desc,b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 5 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 10 Using where; Using index
explain select * from t1 where a = 2 and b is null order by a desc,b desc;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref a a 9 const,const 1 Using where; Using index; Using filesort
explain select * from t1 where a = 2 and (b is null or b > 0) order by a
desc,b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 6 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 11 Using where; Using index
explain select * from t1 where a = 2 and b > 0 order by a desc,b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 5 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 10 Using where; Using index
explain select * from t1 where a = 2 and b < 2 order by a desc,b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 2 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 4 Using where; Using index
explain select * from t1 where a = 1 order by b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a a 4 const 5 Using where; Using index
+1 SIMPLE t1 ref a a 4 const 12 Using where; Using index
explain select * from t1 where a = 2 and b > 0 order by a desc,b desc,b,a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 5 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 10 Using where; Using index
explain select * from t1 where a = 2 and b < 2 order by a desc,a,b desc,a,b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 9 NULL 2 Using where; Using index
+1 SIMPLE t1 range a a 9 NULL 4 Using where; Using index
select * from t1 where a = 1 order by b desc;
a b c
1 3 b
+1 3 b
+1 1 b
+1 1 b
1 1 b
1 1 b
1 1 NULL
+1 1 NULL
+1 NULL b
1 NULL b
1 NULL NULL
+1 NULL NULL
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
alter table t1 modify b int not null, modify c varchar(10) not null;
Warnings:
@@ -377,91 +404,146 @@ Warning 1265 Data truncated for column 'b' at row 1
Warning 1265 Data truncated for column 'c' at row 1
Warning 1265 Data truncated for column 'b' at row 2
Warning 1265 Data truncated for column 'c' at row 3
+Warning 1265 Data truncated for column 'b' at row 12
+Warning 1265 Data truncated for column 'c' at row 12
+Warning 1265 Data truncated for column 'b' at row 13
+Warning 1265 Data truncated for column 'c' at row 14
explain select * from t1 order by a, b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 20 NULL 11 Using index
+1 SIMPLE t1 index NULL a 20 NULL 22 Using index
select * from t1 order by a, b, c;
a b c
1 0
+1 0
+1 0 b
1 0 b
1 1
+1 1
1 1 b
1 1 b
+1 1 b
+1 1 b
+1 3 b
1 3 b
2 1 a
+2 1 a
+2 1 b
2 1 b
2 2 a
+2 2 a
2 2 b
+2 2 b
+2 3 c
2 3 c
explain select * from t1 order by a desc, b desc, c desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL a 20 NULL 11 Using index
+1 SIMPLE t1 index NULL a 20 NULL 22 Using index
select * from t1 order by a desc, b desc, c desc;
a b c
2 3 c
+2 3 c
2 2 b
+2 2 b
+2 2 a
2 2 a
2 1 b
+2 1 b
2 1 a
+2 1 a
+1 3 b
1 3 b
1 1 b
1 1 b
+1 1 b
+1 1 b
+1 1
1 1
1 0 b
+1 0 b
+1 0
1 0
explain select * from t1 where (a = 1 and b = 1 and c = 'b') or (a > 2) order by a desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 20 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 20 NULL 5 Using where; Using index
select * from t1 where (a = 1 and b = 1 and c = 'b') or (a > 2) order by a desc;
a b c
1 1 b
1 1 b
+1 1 b
+1 1 b
explain select * from t1 where a < 2 and b <= 1 order by a desc, b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 6 Using where; Using index
+1 SIMPLE t1 range a a 4 NULL 12 Using where; Using index
select * from t1 where a < 2 and b <= 1 order by a desc, b desc;
a b c
1 1 b
1 1 b
+1 1 b
+1 1 b
1 1
+1 1
+1 0 b
1 0 b
1 0
+1 0
select count(*) from t1 where a < 5 and b > 0;
count(*)
-9
+18
select * from t1 where a < 5 and b > 0 order by a desc,b desc;
a b c
2 3 c
+2 3 c
2 2 b
+2 2 b
+2 2 a
2 2 a
2 1 b
+2 1 b
2 1 a
+2 1 a
+1 3 b
1 3 b
1 1 b
1 1 b
+1 1 b
+1 1 b
+1 1
1 1
explain select * from t1 where a between 1 and 3 and b <= 1 order by a desc, b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 8 NULL 10 Using where; Using index
+1 SIMPLE t1 range a a 8 NULL 22 Using where; Using index
select * from t1 where a between 1 and 3 and b <= 1 order by a desc, b desc;
a b c
2 1 b
+2 1 b
2 1 a
+2 1 a
+1 1 b
1 1 b
1 1 b
+1 1 b
+1 1
1 1
1 0 b
+1 0 b
+1 0
1 0
explain select * from t1 where a between 0 and 1 order by a desc, b desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 range a a 4 NULL 12 Using where; Using index
select * from t1 where a between 0 and 1 order by a desc, b desc;
a b c
1 3 b
+1 3 b
+1 1 b
1 1 b
1 1 b
+1 1 b
+1 1
1 1
1 0 b
+1 0 b
+1 0
1 0
drop table t1;
CREATE TABLE t1 (
@@ -621,10 +703,10 @@ DS-MRR: use two IGNORE INDEX queries, otherwise we get cost races, because
DS-MRR: records_in_range/read_time return the same numbers for all three indexes
EXPLAIN SELECT * FROM t1 IGNORE INDEX (LongField, StringField) WHERE FieldKey > '2' ORDER BY LongVal;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range FieldKey FieldKey 38 NULL 4 Using index condition; Using filesort
+1 SIMPLE t1 range FieldKey FieldKey 38 NULL 3 Using index condition; Using filesort
EXPLAIN SELECT * FROM t1 IGNORE INDEX (FieldKey, LongField) WHERE FieldKey > '2' ORDER BY LongVal;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range StringField StringField 38 NULL 4 Using where; Using filesort
+1 SIMPLE t1 range StringField StringField 38 NULL 3 Using where; Using filesort
SELECT * FROM t1 WHERE FieldKey > '2' ORDER BY LongVal;
FieldKey LongVal StringVal
3 1 2
@@ -632,7 +714,7 @@ FieldKey LongVal StringVal
3 3 3
EXPLAIN SELECT * FROM t1 WHERE FieldKey > '2' ORDER BY FieldKey, LongVal;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range FieldKey,LongField,StringField LongField 38 NULL 4 Using where
+1 SIMPLE t1 range FieldKey,LongField,StringField LongField 38 NULL 3 Using index condition
SELECT * FROM t1 WHERE FieldKey > '2' ORDER BY FieldKey, LongVal;
FieldKey LongVal StringVal
3 1 2
@@ -663,7 +745,7 @@ a b
4 NULL
explain select * from t1 where b=2 or b is null order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref_or_null b b 5 const 3 Using index condition; Using where; Using filesort
+1 SIMPLE t1 ref_or_null b b 5 const 4 Using index condition; Using where; Using filesort
select * from t1 where b=2 or b is null order by a;
a b
3 NULL
@@ -784,8 +866,6 @@ key `wnid` (`wnid`)
) engine=myisam default charset=latin1;
insert into t1 (`sid`, `wnid`) values
('10100','01019000000'),('37986','01019000000'),('37987','01019010000'),
-('39560','01019090000'),('37989','01019000000'),('37990','01019011000'),
-('37991','01019011000'),('37992','01019019000'),('37993','01019030000'),
('37994','01019090000'),('475','02070000000'),('25253','02071100000'),
('25255','02071100000'),('25256','02071110000'),('25258','02071130000'),
('25259','02071190000'),('25260','02071200000'),('25261','02071210000'),
@@ -800,18 +880,12 @@ insert into t1 (`sid`, `wnid`) values
('25295','02071491000'),('25296','02071491000'),('25297','02071499000');
explain select * from t1 where wnid like '0101%' order by wnid;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range wnid14,wnid wnid 13 NULL 10 Using where
+1 SIMPLE t1 range wnid14,wnid wnid 13 NULL 4 Using where
select * from t1 where wnid like '0101%' order by wnid;
sid wnid
10100 01019000000
37986 01019000000
-37989 01019000000
37987 01019010000
-37990 01019011000
-37991 01019011000
-37992 01019019000
-37993 01019030000
-39560 01019090000
37994 01019090000
drop table t1;
CREATE TABLE t1 (a int);
@@ -2851,7 +2925,7 @@ explain
select b, count(*) num_cnt from t1
where a > 9750 group by b order by num_cnt;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1 idx1 5 NULL 502 Using where; Using index; Using temporary; Using filesort
+1 SIMPLE t1 range idx1 idx1 5 NULL 503 Using where; Using index; Using temporary; Using filesort
flush status;
select b, count(*) num_cnt from t1
where a > 9750 group by b order by num_cnt;
@@ -2870,7 +2944,7 @@ explain
select b, count(*) num_cnt from t1
where a > 9750 group by b order by num_cnt limit 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1 idx1 5 NULL 502 Using where; Using index; Using temporary; Using filesort
+1 SIMPLE t1 range idx1 idx1 5 NULL 503 Using where; Using index; Using temporary; Using filesort
flush status;
select b, count(*) num_cnt from t1
where a > 9750 group by b order by num_cnt limit 1;
@@ -3034,15 +3108,17 @@ KEY id_234_date (id2,id3,id4,date)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
# t2 has a "good" index declaration order
INSERT INTO t1 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
+INSERT INTO t1 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
+INSERT INTO t2 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
INSERT INTO t2 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
# The following two must both use id_23_date and no "using filesort":
EXPLAIN SELECT id1 FROM t1 WHERE id2=1 AND id3=1 ORDER BY date DESC LIMIT 0,4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range id_234_date,id_23_date id_23_date 2 NULL 3 Using where
+1 SIMPLE t1 range id_234_date,id_23_date id_23_date 2 NULL 8 Using where
# See above query
EXPLAIN SELECT id1 FROM t2 WHERE id2=1 AND id3=1 ORDER BY date DESC LIMIT 0,4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref id_23_date,id_234_date id_23_date 2 const,const 3 Using where
+1 SIMPLE t2 ref id_23_date,id_234_date id_23_date 2 const,const 8 Using where
drop table t1,t2;
#
# MDEV-8989: ORDER BY optimizer ignores equality propagation
@@ -3246,9 +3322,9 @@ WHERE books.library_id = 8663 AND
books.scheduled_for_removal=0 )
ORDER BY wings.id;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 1 100.00 Using temporary; Using filesort
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2 100.00 Using temporary; Using filesort
1 PRIMARY wings eq_ref PRIMARY PRIMARY 4 test.books.wings_id 1 100.00
-2 MATERIALIZED books ref library_idx library_idx 4 const 1 100.00 Using where
+2 MATERIALIZED books ref library_idx library_idx 4 const 2 100.00 Using where
Warnings:
Note 1003 select `test`.`wings`.`id` AS `wing_id`,`test`.`wings`.`department_id` AS `department_id` from `test`.`wings` semi join (`test`.`books`) where `test`.`books`.`library_id` = 8663 and `test`.`books`.`scheduled_for_removal` = 0 and `test`.`wings`.`id` = `test`.`books`.`wings_id` order by `test`.`wings`.`id`
set optimizer_switch= @save_optimizer_switch;
@@ -3266,3 +3342,40 @@ NULLIF(GROUP_CONCAT(v1), null)
C
B
DROP TABLE t1;
+#
+# MDEV-17761: Odd optimizer choice with ORDER BY LIMIT and condition selectivity
+#
+create table t1(a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t2(a int);
+insert into t2 select A.a + B.a* 10 + C.a * 100 from t1 A, t1 B, t1 C;
+create table t3(a int);
+insert into t3 select A.a + 1000 *B.a from t2 A, t1 B;
+create table t4 (
+a int,
+b int,
+c int,
+filler1 char(255),
+filler2 char(255),
+key(a)
+);
+insert into t4 select a,a,a, a,a from t3;
+set @tmp_h=@@histogram_size, @tmp_ht=@@histogram_type, @tmp_u=@@use_stat_tables,
+@tmp_o=@@optimizer_use_condition_selectivity;
+set histogram_size=100, histogram_type='single_prec_hb';
+set use_stat_tables=preferably;
+set optimizer_use_condition_selectivity=4;
+analyze table t4 persistent for columns(b) indexes ();
+Table Op Msg_type Msg_text
+test.t4 analyze status Engine-independent statistics collected
+test.t4 analyze status Table is already up to date
+# rows must be around 1200, not 600:
+explain extended
+select * from t4 where b < 5000 order by a limit 600;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t4 index NULL a 5 NULL 1188 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t4`.`a` AS `a`,`test`.`t4`.`b` AS `b`,`test`.`t4`.`c` AS `c`,`test`.`t4`.`filler1` AS `filler1`,`test`.`t4`.`filler2` AS `filler2` from `test`.`t4` where `test`.`t4`.`b` < 5000 order by `test`.`t4`.`a` limit 600
+set histogram_size=@tmp_h, histogram_type=@tmp_ht, use_stat_tables=@tmp_u,
+optimizer_use_condition_selectivity=@tmp_o;
+drop table t1,t2,t3,t4;
diff --git a/mysql-test/main/order_by.test b/mysql-test/main/order_by.test
index d67c67de89c..425ba8f916c 100644
--- a/mysql-test/main/order_by.test
+++ b/mysql-test/main/order_by.test
@@ -246,6 +246,7 @@ drop table t1;
create table t1 (a int not null, b int, c varchar(10), key (a, b, c));
insert into t1 values (1, NULL, NULL), (1, NULL, 'b'), (1, 1, NULL), (1, 1, 'b'), (1, 1, 'b'), (2, 1, 'a'), (2, 1, 'b'), (2, 2, 'a'), (2, 2, 'b'), (2, 3, 'c'),(1,3,'b');
+insert into t1 select * from t1;
explain select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
select * from t1 where (a = 1 and b is null and c = 'b') or (a > 2) order by a desc;
@@ -551,8 +552,6 @@ create table t1 (
insert into t1 (`sid`, `wnid`) values
('10100','01019000000'),('37986','01019000000'),('37987','01019010000'),
-('39560','01019090000'),('37989','01019000000'),('37990','01019011000'),
-('37991','01019011000'),('37992','01019019000'),('37993','01019030000'),
('37994','01019090000'),('475','02070000000'),('25253','02071100000'),
('25255','02071100000'),('25256','02071110000'),('25258','02071130000'),
('25259','02071190000'),('25260','02071200000'),('25261','02071210000'),
@@ -2039,6 +2038,8 @@ CREATE TABLE t2 (
--echo # t2 has a "good" index declaration order
INSERT INTO t1 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
+INSERT INTO t1 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
+INSERT INTO t2 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
INSERT INTO t2 (id2,id3,id4) VALUES (1,1,1),(1,1,1),(1,1,1),(1,1,1),(1,0,1),(1,2,1),(1,3,1);
--echo # The following two must both use id_23_date and no "using filesort":
@@ -2188,6 +2189,7 @@ set optimizer_switch= @save_optimizer_switch;
DROP TABLE books, wings;
+
--echo #
--echo # MDEV-17796: query with DISTINCT, GROUP BY and ORDER BY
--echo #
@@ -2201,3 +2203,39 @@ GROUP BY id
ORDER BY id+1 DESC;
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17761: Odd optimizer choice with ORDER BY LIMIT and condition selectivity
+--echo #
+create table t1(a int);
+insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t2(a int);
+insert into t2 select A.a + B.a* 10 + C.a * 100 from t1 A, t1 B, t1 C;
+create table t3(a int);
+insert into t3 select A.a + 1000 *B.a from t2 A, t1 B;
+
+create table t4 (
+ a int,
+ b int,
+ c int,
+ filler1 char(255),
+ filler2 char(255),
+ key(a)
+);
+insert into t4 select a,a,a, a,a from t3;
+
+set @tmp_h=@@histogram_size, @tmp_ht=@@histogram_type, @tmp_u=@@use_stat_tables,
+ @tmp_o=@@optimizer_use_condition_selectivity;
+set histogram_size=100, histogram_type='single_prec_hb';
+set use_stat_tables=preferably;
+set optimizer_use_condition_selectivity=4;
+analyze table t4 persistent for columns(b) indexes ();
+
+--echo # rows must be around 1200, not 600:
+explain extended
+select * from t4 where b < 5000 order by a limit 600;
+
+set histogram_size=@tmp_h, histogram_type=@tmp_ht, use_stat_tables=@tmp_u,
+ optimizer_use_condition_selectivity=@tmp_o;
+
+drop table t1,t2,t3,t4;
diff --git a/mysql-test/main/outfile_loaddata.result b/mysql-test/main/outfile_loaddata.result
index 1449cb19453..4356f8b113e 100644
--- a/mysql-test/main/outfile_loaddata.result
+++ b/mysql-test/main/outfile_loaddata.result
@@ -124,16 +124,19 @@ ERROR 42000: Field separator argument is not what is expected; check the manual
# LOAD DATA rises error or has unpredictable result -- to be fixed later
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' FIELDS ENCLOSED BY 'ÑŠ';
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' INTO TABLE t2 CHARACTER SET binary FIELDS ENCLOSED BY 'ÑŠ';
ERROR 42000: Field separator argument is not what is expected; check the manual
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' FIELDS ESCAPED BY 'ÑŠ';
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' INTO TABLE t2 CHARACTER SET binary FIELDS ESCAPED BY 'ÑŠ';
ERROR 42000: Field separator argument is not what is expected; check the manual
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' FIELDS TERMINATED BY 'ÑŠ';
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
##################################################
1ÑŠABC-áâ÷ÑŠDEF-ÂÃÄ
@@ -157,6 +160,7 @@ a b c
2 NULL NULL
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' LINES STARTING BY 'ÑŠ';
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
##################################################
ÑŠ1 ABC-áâ÷ DEF-ÂÃÄ
@@ -172,6 +176,7 @@ a b c
2 NULL NULL
SELECT * FROM t1 INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/t1.txt' LINES TERMINATED BY 'ÑŠ';
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1638 Non-ASCII separator arguments are not fully supported
##################################################
1 ABC-áâ÷ DEF-ÂÃÄÑŠ2 \N \NÑŠ##################################################
diff --git a/mysql-test/main/parser.result b/mysql-test/main/parser.result
index 2ed1032f6cf..34c119a322e 100644
--- a/mysql-test/main/parser.result
+++ b/mysql-test/main/parser.result
@@ -705,6 +705,9 @@ FOR UPDATE;
1
1
SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
+PROCEDURE ANALYSE();
+ERROR HY000: Can't use ORDER clause with this procedure
+SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
PROCEDURE ANALYSE() FOR UPDATE;
ERROR HY000: Can't use ORDER clause with this procedure
SELECT 1 FROM
@@ -734,7 +737,7 @@ SELECT 1 FROM t1
UNION
SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
PROCEDURE ANALYSE() FOR UPDATE;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE() FOR UPDATE' at line 4
+ERROR 42000: Incorrect usage/placement of 'PROCEDURE'
SELECT 1 FROM DUAL PROCEDURE ANALYSE()
UNION
SELECT 1 FROM t1;
@@ -754,15 +757,18 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
# "FOR UPDATE" tests
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1;
1
-SELECT 1 FROM t1 FOR UPDATE UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1;
+(SELECT 1 FROM t1 FOR UPDATE) UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1;
1
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1 FOR UPDATE;
1
# "INTO" clause tests
SELECT 1 FROM t1 INTO @var17727401;
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 FROM DUAL INTO @var17727401;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT 1 INTO @var17727401;
SELECT 1 INTO @var17727401 FROM t1;
Warnings:
@@ -778,6 +784,7 @@ Warnings:
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 FROM t1 WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1 LIMIT 1 INTO @var17727401;
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 FROM t1 WHERE 1 INTO @var17727401 GROUP BY 1 HAVING 1 ORDER BY 1 LIMIT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GROUP BY 1 HAVING 1 ORDER BY 1 LIMIT 1' at line 1
@@ -794,31 +801,20 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
SELECT 1 FROM t1 INTO @var17727401 UNION SELECT 1 FROM t1 INTO t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 FROM t1 INTO t1' at line 1
(SELECT 1 FROM t1 INTO @var17727401) UNION (SELECT 1 FROM t1 INTO t1);
-ERROR HY000: Incorrect usage of UNION and INTO
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INTO @var17727401) UNION (SELECT 1 FROM t1 INTO t1)' at line 1
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 INTO @var17727401;
Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
Warning 1329 No data - zero rows fetched, selected, or processed
SELECT 1 INTO @var17727401 FROM t1 PROCEDURE ANALYSE();
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE()' at line 1
SELECT 1 FROM t1 PROCEDURE ANALYSE() INTO @var17727401;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'INTO @var17727401' at line 1
# ORDER and LIMIT clause combinations
-(SELECT 1 FROM t1 ORDER BY 1) ORDER BY 1;
-1
-(SELECT 1 FROM t1 LIMIT 1) LIMIT 1;
-1
-((SELECT 1 FROM t1 ORDER BY 1) ORDER BY 1) ORDER BY 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ORDER BY 1) ORDER BY 1' at line 1
-((SELECT 1 FROM t1 LIMIT 1) LIMIT 1) LIMIT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'LIMIT 1) LIMIT 1' at line 1
-(SELECT 1 FROM t1 ORDER BY 1) LIMIT 1;
-1
-(SELECT 1 FROM t1 LIMIT 1) ORDER BY 1;
-1
((SELECT 1 FROM t1 ORDER BY 1) LIMIT 1) ORDER BY 1);
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'LIMIT 1) ORDER BY 1)' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
((SELECT 1 FROM t1 LIMIT 1) ORDER BY 1) LIMIT 1);
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ORDER BY 1) LIMIT 1)' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1;
1
SELECT (SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1);
@@ -1265,19 +1261,27 @@ CREATE TABLE t1 (i INT);
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10));
-ERROR HY000: Incorrect usage of UNION and SELECT ... PROCEDURE ANALYSE()
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(10, 10))
+UNION
+(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))' at line 1
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
SELECT * FROM t1 PROCEDURE ANALYSE(10, 10);
-ERROR HY000: Incorrect usage of UNION and SELECT ... PROCEDURE ANALYSE()
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(10, 10))
+UNION
+SELECT * FROM t1 PROCEDURE ANALYSE(10, 10)' at line 1
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
(SELECT 1);
-ERROR HY000: Incorrect usage of UNION and SELECT ... PROCEDURE ANALYSE()
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(10, 10))
+UNION
+(SELECT 1)' at line 1
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
SELECT 1;
-ERROR HY000: Incorrect usage of UNION and SELECT ... PROCEDURE ANALYSE()
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'PROCEDURE ANALYSE(10, 10))
+UNION
+SELECT 1' at line 1
SELECT * FROM t1 PROCEDURE ANALYSE(10, 10)
UNION
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10));
diff --git a/mysql-test/main/parser.test b/mysql-test/main/parser.test
index 040678a51df..3a7c4f6467e 100644
--- a/mysql-test/main/parser.test
+++ b/mysql-test/main/parser.test
@@ -826,6 +826,9 @@ SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
--error ER_ORDER_WITH_PROC
SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
+ PROCEDURE ANALYSE();
+--error ER_ORDER_WITH_PROC
+SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
PROCEDURE ANALYSE() FOR UPDATE;
SELECT 1 FROM
@@ -851,7 +854,7 @@ UNION
SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
FOR UPDATE;
---error ER_PARSE_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT 1 FROM t1
UNION
SELECT 1 FROM DUAL WHERE 1 GROUP BY 1 HAVING 1 ORDER BY 1
@@ -876,7 +879,7 @@ UNION
--echo # "FOR UPDATE" tests
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1;
-SELECT 1 FROM t1 FOR UPDATE UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1;
+(SELECT 1 FROM t1 FOR UPDATE) UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1;
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 ORDER BY 1 LIMIT 1 FOR UPDATE;
@@ -916,7 +919,7 @@ SELECT EXISTS(SELECT 1 FROM t1 INTO @var17727401);
--error ER_PARSE_ERROR
SELECT 1 FROM t1 INTO @var17727401 UNION SELECT 1 FROM t1 INTO t1;
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(SELECT 1 FROM t1 INTO @var17727401) UNION (SELECT 1 FROM t1 INTO t1);
SELECT 1 FROM t1 UNION SELECT 1 FROM t1 INTO @var17727401;
@@ -931,16 +934,16 @@ SELECT 1 FROM t1 PROCEDURE ANALYSE() INTO @var17727401;
# Limited support for (SELECT ...) ORDER/LIMIT:
-(SELECT 1 FROM t1 ORDER BY 1) ORDER BY 1;
-(SELECT 1 FROM t1 LIMIT 1) LIMIT 1;
+# (SELECT 1 FROM t1 ORDER BY 1) ORDER BY 1;
+# (SELECT 1 FROM t1 LIMIT 1) LIMIT 1;
---error ER_PARSE_ERROR
-((SELECT 1 FROM t1 ORDER BY 1) ORDER BY 1) ORDER BY 1;
---error ER_PARSE_ERROR
-((SELECT 1 FROM t1 LIMIT 1) LIMIT 1) LIMIT 1;
+#--error ER_PARSE_ERROR
+# ((SELECT 1 FROM t1 ORDER BY 1) ORDER BY 1) ORDER BY 1;
+#--error ER_PARSE_ERROR
+# ((SELECT 1 FROM t1 LIMIT 1) LIMIT 1) LIMIT 1;
-(SELECT 1 FROM t1 ORDER BY 1) LIMIT 1;
-(SELECT 1 FROM t1 LIMIT 1) ORDER BY 1;
+# (SELECT 1 FROM t1 ORDER BY 1) LIMIT 1;
+# (SELECT 1 FROM t1 LIMIT 1) ORDER BY 1;
--error ER_PARSE_ERROR
((SELECT 1 FROM t1 ORDER BY 1) LIMIT 1) ORDER BY 1);
@@ -1276,22 +1279,22 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 (i INT);
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10));
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
SELECT * FROM t1 PROCEDURE ANALYSE(10, 10);
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
(SELECT 1);
---error ER_WRONG_USAGE
+--error ER_PARSE_ERROR
(SELECT * FROM t1 PROCEDURE ANALYSE(10, 10))
UNION
SELECT 1;
diff --git a/mysql-test/main/partition.result b/mysql-test/main/partition.result
index 064817446b0..5a7795394ec 100644
--- a/mysql-test/main/partition.result
+++ b/mysql-test/main/partition.result
@@ -286,10 +286,11 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
t1 1 a 1 a A NULL NULL NULL YES BTREE
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show indexes from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t1 1 a 1 a A 1 NULL NULL YES BTREE
+t1 1 a 1 a A 2 NULL NULL YES BTREE
drop table t1;
create table t1 (a int)
partition by hash (a);
@@ -1029,6 +1030,7 @@ PARTITION BY LIST (a)
(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20));
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
create table t1
@@ -1824,6 +1826,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze note The storage engine for the table doesn't support analyze
drop table t1;
drop procedure if exists mysqltest_1;
@@ -2060,6 +2063,7 @@ ALTER TABLE t1 ANALYZE PARTITION p1 EXTENDED;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXTENDED' at line 1
ALTER TABLE t1 ANALYZE PARTITION p1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 CHECK PARTITION p1;
Table Op Msg_type Msg_text
@@ -2357,7 +2361,7 @@ b c
EXPLAIN
SELECT b, c FROM t1 WHERE b = 1 GROUP BY b, c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range bc bc 10 NULL 7 Using where; Using index for group-by
+1 SIMPLE t1 range bc bc 10 NULL 8 Using where; Using index for group-by
DROP TABLE t1;
#
# Bug #45807: crash accessing partitioned table and sql_mode
@@ -2733,7 +2737,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p1,p2 ALL NULL NULL NULL NULL 400 Using where
explain extended select * from t2 where b=5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1000 19.80 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 1000 19.61 Using where
Warnings:
Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` = 5
explain partitions select * from t2 where b=5;
@@ -2741,7 +2745,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL NULL NULL NULL NULL 1000 Using where
explain extended select * from t2 partition(p0) where b=1;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 200 19.80 Using where
+1 SIMPLE t2 ALL NULL NULL NULL NULL 200 19.61 Using where
Warnings:
Note 1003 select `test`.`t2`.`part_key` AS `part_key`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` PARTITION (`p0`) where `test`.`t2`.`b` = 1
set @@use_stat_tables= @save_use_stat_tables;
diff --git a/mysql-test/main/partition_binlog.result b/mysql-test/main/partition_binlog.result
index 21eca8f1c00..3e6100b51d8 100644
--- a/mysql-test/main/partition_binlog.result
+++ b/mysql-test/main/partition_binlog.result
@@ -27,6 +27,7 @@ Table Op Msg_type Msg_text
test.t1 repair error Error in list of partitions to test.t1
ALTER TABLE t1 ANALYZE PARTITION p0;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 CHECK PARTITION p0;
Table Op Msg_type Msg_text
diff --git a/mysql-test/main/partition_explicit_prune.result b/mysql-test/main/partition_explicit_prune.result
index 951b21db3e1..d9db35a249a 100644
--- a/mysql-test/main/partition_explicit_prune.result
+++ b/mysql-test/main/partition_explicit_prune.result
@@ -233,6 +233,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
+HANDLER_READ_KEY 6
HANDLER_TMP_WRITE 24
HANDLER_WRITE 2
# Should be 1 commit
@@ -315,6 +316,7 @@ a b
1000 `p100-99999`(-subp6)
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata;
SET @@global.innodb_stats_on_metadata=ON;
@@ -348,6 +350,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
HANDLER_READ_FIRST 1
+HANDLER_READ_KEY 8
HANDLER_TMP_WRITE 24
# Should be 1 commit
# 4 locks (1 ha_partition + 1 ha_innobase) x 2 (lock/unlock)
@@ -690,6 +693,8 @@ a b
-21 REPLACEd by REPLACE
FLUSH STATUS;
SELECT * FROM t1 PARTITION (pNeg, `p10-99`) INTO OUTFILE 'loadtest.txt';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
@@ -772,6 +777,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
+HANDLER_READ_KEY 8
HANDLER_READ_RND_NEXT 2
HANDLER_TMP_WRITE 24
HANDLER_UPDATE 2
@@ -1056,6 +1062,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
HANDLER_READ_FIRST 5
+HANDLER_READ_KEY 6
HANDLER_READ_NEXT 5
HANDLER_TMP_WRITE 24
HANDLER_WRITE 10
@@ -1075,6 +1082,7 @@ SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS
WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_READ_FIRST 5
+HANDLER_READ_KEY 6
HANDLER_ROLLBACK 1
HANDLER_TMP_WRITE 24
HANDLER_WRITE 1
@@ -1102,6 +1110,7 @@ WHERE VARIABLE_NAME LIKE 'HANDLER_%' AND VARIABLE_VALUE > 0;
VARIABLE_NAME VARIABLE_VALUE
HANDLER_COMMIT 1
HANDLER_READ_FIRST 5
+HANDLER_READ_KEY 6
HANDLER_READ_NEXT 5
HANDLER_TMP_WRITE 24
HANDLER_WRITE 10
@@ -1889,3 +1898,12 @@ SELECT * FROM t1 PARTITION (p0);
i
UNLOCK TABLES;
DROP TABLE t1, t2;
+#
+# MDEV-18982: INSERT using explicit patition pruning with column list
+#
+create table t1 (a int) partition by hash(a);
+insert into t1 partition (p0) (a) values (1);
+select * from t1;
+a
+1
+drop table t1;
diff --git a/mysql-test/main/partition_explicit_prune.test b/mysql-test/main/partition_explicit_prune.test
index b8b6e480ce9..a516527c055 100644
--- a/mysql-test/main/partition_explicit_prune.test
+++ b/mysql-test/main/partition_explicit_prune.test
@@ -877,3 +877,12 @@ UNLOCK TABLES;
# Cleanup
DROP TABLE t1, t2;
+
+--echo #
+--echo # MDEV-18982: INSERT using explicit patition pruning with column list
+--echo #
+
+create table t1 (a int) partition by hash(a);
+insert into t1 partition (p0) (a) values (1);
+select * from t1;
+drop table t1;
diff --git a/mysql-test/main/partition_innodb.result b/mysql-test/main/partition_innodb.result
index cdfe619cb29..605ac38384e 100644
--- a/mysql-test/main/partition_innodb.result
+++ b/mysql-test/main/partition_innodb.result
@@ -1,6 +1,11 @@
call mtr.add_suppression("Deadlock found when trying to get lock; try restarting transaction");
set global default_storage_engine='innodb';
set session default_storage_engine='innodb';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
drop table if exists t1, t2;
#
# Bug#13694811: THE OPTIMIZER WRONGLY USES THE FIRST
@@ -24,12 +29,16 @@ INSERT INTO t1 VALUES (0, 'Mod Zero'), (1, 'One'), (2, 'Two'), (3, 'Three'),
(20, '0'), (21, '1'), (22, '2'), (23, '3'),
(4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9');
INSERT INTO t1 SELECT a + 30, b FROM t1 WHERE a >= 0;
+INSERT INTO t1 SELECT a + 60, b FROM t1 WHERE a >= 0;
+INSERT INTO t1 SELECT a + 120, b FROM t1 WHERE a >= 0;
+INSERT INTO t1 SELECT a + 240, b FROM t1 WHERE a >= 0;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT b FROM t1 WHERE b between 'L' and 'N' AND a > -100;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 67 NULL 34 Using where; Using index
+1 SIMPLE t1 range PRIMARY,b b 67 NULL 90 Using where; Using index
DROP TABLE t1;
#
# Bug#13007154: Crash in keys_to_use_for_scanning with ORDER BY
@@ -571,6 +580,33 @@ DROP TABLE t1;
# Bug#57985 "ONLINE/FAST ALTER PARTITION can fail and leave the
# table unusable".
#
+CREATE TABLE t1 (a bigint not null, b int not null, PRIMARY KEY (a))
+ENGINE = InnoDB PARTITION BY KEY(a) PARTITIONS 2;
+INSERT INTO t1 values (0,1), (1,2);
+# The below ALTER should fail. It should leave the
+# table in its original, non-corrupted, usable state.
+ALTER TABLE t1 ADD UNIQUE KEY (b);
+ERROR HY000: A UNIQUE INDEX must include all columns in the table's partitioning function
+# The below statements should succeed, as ALTER should
+# have left table intact.
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` bigint(20) NOT NULL,
+ `b` int(11) NOT NULL,
+ PRIMARY KEY (`a`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+ PARTITION BY KEY (`a`)
+PARTITIONS 2
+SELECT * FROM t1;
+a b
+1 2
+0 1
+DROP TABLE t1;
+#
+# Bug#57985 "ONLINE/FAST ALTER PARTITION can fail and leave the
+# table unusable".
+#
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a bigint not null, b int not null, PRIMARY KEY (a))
ENGINE = InnoDB PARTITION BY KEY(a) PARTITIONS 2;
@@ -663,7 +699,7 @@ EXPLAIN SELECT * FROM t1 WHERE col1 = 1 AND col2 = 2
AND col3 BETWEEN '2013-03-08 00:00:00' AND '2013-03-12 12:00:00'
GROUP BY 1, 2, 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,col1,col2 PRIMARY 5 NULL # Using where; Using filesort
+1 SIMPLE t1 ref PRIMARY,col1,col2 col1 8 const # Using where; Using filesort
SELECT * FROM t1 USE INDEX () WHERE col1 = 1 AND col2 = 2
AND col3 BETWEEN '2013-03-08 00:00:00' AND '2013-03-12 12:00:00'
GROUP BY 1, 2, 3;
@@ -696,9 +732,11 @@ insert into t1 select 10+A.a + 10*B.a + 100*C.a + 1000*D.a,
10+A.a + 10*B.a + 100*C.a + 1000*D.a,
2000 + A.a + 10*B.a + 100*C.a + 1000*D.a
from t2 A, t2 B, t2 C ,t2 D;
+set statement optimizer_switch='rowid_filter=off' for
explain select * from t1 where a=1 and b=2 and pk between 1 and 999999 ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge PRIMARY,a,b b,a 4,4 NULL # Using intersect(b,a); Using where; Using index
+set statement optimizer_switch='rowid_filter=off' for
create temporary table t3 as
select * from t1 where a=1 and b=2 and pk between 1 and 999 ;
select count(*) from t3;
@@ -954,6 +992,9 @@ test_jfg test_jfg11
test_jfg test_jfg12#P#p1000
test_jfg test_jfg12#P#pmax
DROP DATABASE test_jfg;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
create table t1 (a int) engine=innodb;
create table t2 (
b int,
diff --git a/mysql-test/main/partition_innodb.test b/mysql-test/main/partition_innodb.test
index 57d644d293d..ae0ce59fabc 100644
--- a/mysql-test/main/partition_innodb.test
+++ b/mysql-test/main/partition_innodb.test
@@ -7,6 +7,13 @@ call mtr.add_suppression("Deadlock found when trying to get lock; try restarting
set global default_storage_engine='innodb';
set session default_storage_engine='innodb';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
--disable_warnings
drop table if exists t1, t2;
--enable_warnings
@@ -38,6 +45,9 @@ INSERT INTO t1 VALUES (0, 'Mod Zero'), (1, 'One'), (2, 'Two'), (3, 'Three'),
(20, '0'), (21, '1'), (22, '2'), (23, '3'),
(4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9');
INSERT INTO t1 SELECT a + 30, b FROM t1 WHERE a >= 0;
+INSERT INTO t1 SELECT a + 60, b FROM t1 WHERE a >= 0;
+INSERT INTO t1 SELECT a + 120, b FROM t1 WHERE a >= 0;
+INSERT INTO t1 SELECT a + 240, b FROM t1 WHERE a >= 0;
ANALYZE TABLE t1;
EXPLAIN SELECT b FROM t1 WHERE b between 'L' and 'N' AND a > -100;
DROP TABLE t1;
@@ -655,6 +665,25 @@ OPTIMIZE TABLE t1;
SET SESSION sql_mode = @old_mode;
DROP TABLE t1;
+
+--echo #
+--echo # Bug#57985 "ONLINE/FAST ALTER PARTITION can fail and leave the
+--echo # table unusable".
+--echo #
+CREATE TABLE t1 (a bigint not null, b int not null, PRIMARY KEY (a))
+ ENGINE = InnoDB PARTITION BY KEY(a) PARTITIONS 2;
+INSERT INTO t1 values (0,1), (1,2);
+--echo # The below ALTER should fail. It should leave the
+--echo # table in its original, non-corrupted, usable state.
+--error ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
+ALTER TABLE t1 ADD UNIQUE KEY (b);
+--echo # The below statements should succeed, as ALTER should
+--echo # have left table intact.
+SHOW CREATE TABLE t1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+
--echo #
--echo # Bug#57985 "ONLINE/FAST ALTER PARTITION can fail and leave the
--echo # table unusable".
@@ -772,8 +801,10 @@ insert into t1 select 10+A.a + 10*B.a + 100*C.a + 1000*D.a,
# This should show index_merge, using intersect
--replace_column 9 #
+set statement optimizer_switch='rowid_filter=off' for
explain select * from t1 where a=1 and b=2 and pk between 1 and 999999 ;
# 794 rows in output
+set statement optimizer_switch='rowid_filter=off' for
create temporary table t3 as
select * from t1 where a=1 and b=2 and pk between 1 and 999 ;
select count(*) from t3;
@@ -1048,6 +1079,9 @@ database_name = 'test_jfg';
DROP DATABASE test_jfg;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
#
# MDEV-17755 Assertion `!table || (!table->read_set || bitmap_is_set(table->read_set, field_index) || (!(ptr >= table->record[0] && ptr < table->record[0] + table->s->reclength)))' failed in Field_bit::val_int upon SELECT with JOIN, partitions, indexed virtual column
#
diff --git a/mysql-test/main/partition_pruning.result b/mysql-test/main/partition_pruning.result
index 00bd24101d6..15767556f37 100644
--- a/mysql-test/main/partition_pruning.result
+++ b/mysql-test/main/partition_pruning.result
@@ -18,7 +18,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# # # # # # # # # 3 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-# # # # # # # # # 9 #
+# # # # # # # # # 10 #
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
# # # # # # # # # 3 #
@@ -105,7 +105,7 @@ a
6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a <= 1;
a
-1
@@ -168,7 +168,7 @@ a
6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a <= 7;
a
-1
@@ -182,7 +182,7 @@ a
7
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max range PRIMARY PRIMARY 4 NULL 9 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,p5,max index PRIMARY PRIMARY 4 NULL 10 Using where; Using index
SELECT * FROM t1 WHERE a = 1;
a
1
@@ -424,7 +424,7 @@ a
5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a <= 1;
a
-1
@@ -474,7 +474,7 @@ a
5
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 5;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a <= 6;
a
-1
@@ -487,7 +487,7 @@ a
6
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0,p1,p2,p3,p4,max range PRIMARY PRIMARY 4 NULL 8 Using where; Using index
+1 SIMPLE t1 p0,p1,p2,p3,p4,max index PRIMARY PRIMARY 4 NULL 9 Using where; Using index
SELECT * FROM t1 WHERE a = 1;
a
1
@@ -744,41 +744,41 @@ a
1001-01-01
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p2001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p2001-01-01 index a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
# Disabling warnings for the invalid date
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 index a a 4 NULL 7 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 index a a 4 NULL 7 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p2001-01-01 index a a 4 NULL 4 Using where; Using index
@@ -790,16 +790,16 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01,p2001-01-01 index a a 4 NULL 7 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p1001-01-01,p2001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 pNULL,p0001-01-01,p1001-01-01 index a a 4 NULL 6 Using where; Using index
# test without index
ALTER TABLE t1 DROP KEY a;
SELECT * FROM t1 WHERE a < '1001-01-01';
@@ -1073,41 +1073,41 @@ a
1001-01-01
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
# Disabling warnings for the invalid date
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index
@@ -1119,16 +1119,16 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01 index a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
# test without index
ALTER TABLE t1 DROP KEY a;
SELECT * FROM t1 WHERE a < '1001-01-01';
@@ -1402,41 +1402,41 @@ a
1001-01-01
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1001-01-01 system a NULL NULL NULL 1
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a > '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p2001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a = '1001-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
# Disabling warnings for the invalid date
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a < '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a <= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a >= '1999-02-31';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2001-01-01,pNULL index a a 4 NULL 4 Using where; Using index
@@ -1448,16 +1448,16 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL ref a a 4 const 1 Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 5 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0000-00-00' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 range a a 4 NULL 4 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p0000-01-02,p1001-01-01 index a a 4 NULL 6 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-02' AND '1002-00-00';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 pNULL,p1001-01-01 index a a 4 NULL 4 Using where; Using index
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE a BETWEEN '0001-01-01' AND '1001-01-01';
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p0001-01-01,pNULL,p1001-01-01 range a a 4 NULL 3 Using where; Using index
+1 SIMPLE t1 p0001-01-01,pNULL,p1001-01-01 index a a 4 NULL 5 Using where; Using index
# test without index
ALTER TABLE t1 DROP KEY a;
SELECT * FROM t1 WHERE a < '1001-01-01';
@@ -2670,17 +2670,19 @@ create table t1 (a int not null, b int not null, key(a), key(b))
partition by hash(a) partitions 4;
insert into t1 values (1,1),(2,2),(3,3),(4,4);
insert into t1 values (5,5),(6,6),(7,7),(8,8);
+insert into t1 values (9,9),(10,10),(11,11),(12,12);
+insert into t1 values (13,13),(14,14),(15,15),(16,16);
explain partitions
select * from t1 X, t1 Y
where X.b = Y.b and (X.a=1 or X.a=2) and (Y.a=2 or Y.a=3);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE X p1,p2 range a,b a 4 NULL 4 Using where
-1 SIMPLE Y p2,p3 ref a,b b 4 test.X.b 2 Using where
+1 SIMPLE X p1,p2 ALL a,b NULL NULL NULL 8 Using where
+1 SIMPLE Y p2,p3 ALL a,b NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
explain partitions
select * from t1 X, t1 Y where X.a = Y.a and (X.a=1 or X.a=2);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE X p1,p2 range a a 4 NULL 4 Using where
-1 SIMPLE Y p1,p2 ref a a 4 test.X.a 2
+1 SIMPLE X p1,p2 ALL a NULL NULL NULL 8 Using where
+1 SIMPLE Y p1,p2 ALL a NULL NULL NULL 8 Using where; Using join buffer (flat, BNL join)
drop table t1;
create table t1 (a int) partition by hash(a) partitions 20;
insert into t1 values (1),(2),(3);
@@ -2867,15 +2869,15 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2 ALL NULL NULL NULL NULL 510 Using where
explain partitions select * from t2 where b = 4;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 76
+1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 77
explain extended select * from t2 where b = 6;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ref b b 5 const 76 100.00
+1 SIMPLE t2 ref b b 5 const 77 100.00
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` = 6
explain partitions select * from t2 where b = 6;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 76
+1 SIMPLE t2 p0,p1,p2,p3,p4 ref b b 5 const 77
explain extended select * from t2 where b in (1,3,5);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t2 ALL b NULL NULL NULL 910 40.66 Using where
@@ -2886,7 +2888,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b in (2,4,6);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL b NULL NULL NULL 910 25.05 Using where
+1 SIMPLE t2 ALL b NULL NULL NULL 910 25.38 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` in (2,4,6)
explain partitions select * from t2 where b in (2,4,6);
@@ -2894,7 +2896,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b in (7,8,9);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL b NULL NULL NULL 910 36.70 Using where
+1 SIMPLE t2 ALL b NULL NULL NULL 910 36.81 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` in (7,8,9)
explain partitions select * from t2 where b in (7,8,9);
@@ -2902,7 +2904,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b > 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL b NULL NULL NULL 910 44.84 Using where
+1 SIMPLE t2 ALL b NULL NULL NULL 910 44.62 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` > 5
explain partitions select * from t2 where b > 5;
@@ -2910,7 +2912,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b > 5 and b < 8;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL b NULL NULL NULL 910 22.09 Using where
+1 SIMPLE t2 ALL b NULL NULL NULL 910 22.20 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` > 5 and `test`.`t2`.`b` < 8
explain partitions select * from t2 where b > 5 and b < 8;
@@ -2918,15 +2920,15 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t2 p0,p1,p2,p3,p4 ALL b NULL NULL NULL 910 Using where
explain extended select * from t2 where b > 5 and b < 7;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 range b b 5 NULL 76 100.00 Using where
+1 SIMPLE t2 range b b 5 NULL 77 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` > 5 and `test`.`t2`.`b` < 7
explain partitions select * from t2 where b > 5 and b < 7;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 p0,p1,p2,p3,p4 range b b 5 NULL 76 Using where
+1 SIMPLE t2 p0,p1,p2,p3,p4 range b b 5 NULL 77 Using where
explain extended select * from t2 where b > 0 and b < 5;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL b NULL NULL NULL 910 41.65 Using where
+1 SIMPLE t2 ALL b NULL NULL NULL 910 41.87 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t2` where `test`.`t2`.`b` > 0 and `test`.`t2`.`b` < 5
explain partitions select * from t2 where b > 0 and b < 5;
@@ -3376,16 +3378,16 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1 const PRIMARY PRIMARY 8 const,const 1
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 >= 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where
+1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 3 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 > 1;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 < 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p1 range PRIMARY PRIMARY 8 NULL 1 Using where
+1 SIMPLE t1 p1 range PRIMARY PRIMARY 8 NULL 2 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 1 AND c2 <= 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where
+1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 3 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where
@@ -3394,7 +3396,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2 const PRIMARY PRIMARY 8 const,const 1
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 >= 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where
+1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 2 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 > 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2 range PRIMARY PRIMARY 8 NULL 1 Using where
@@ -3403,7 +3405,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 <= 4;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 2 Using where
+1 SIMPLE t1 p1,p2 range PRIMARY PRIMARY 8 NULL 3 Using where
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE c1 = 2 AND c2 = 4;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 p2 const PRIMARY PRIMARY 8 const,const 1
@@ -3463,7 +3465,7 @@ select * from t1
where company_id = 1000
and dept_id in (select dept_id from t2 where COMPANY_ID = 1000);
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 p_1000 ref PRIMARY PRIMARY 8 const 2 Using index
+1 PRIMARY t2 p_1000 ref PRIMARY PRIMARY 8 const 3 Using index
1 PRIMARY t1 p_1000 ALL PRIMARY NULL NULL NULL 6 Using where; Using join buffer (flat, BNL join)
drop table t1,t2;
#
diff --git a/mysql-test/main/partition_pruning.test b/mysql-test/main/partition_pruning.test
index 2879b0eae6c..ecf794ca214 100644
--- a/mysql-test/main/partition_pruning.test
+++ b/mysql-test/main/partition_pruning.test
@@ -892,6 +892,8 @@ create table t1 (a int not null, b int not null, key(a), key(b))
partition by hash(a) partitions 4;
insert into t1 values (1,1),(2,2),(3,3),(4,4);
insert into t1 values (5,5),(6,6),(7,7),(8,8);
+insert into t1 values (9,9),(10,10),(11,11),(12,12);
+insert into t1 values (13,13),(14,14),(15,15),(16,16);
explain partitions
select * from t1 X, t1 Y
diff --git a/mysql-test/main/partition_range.result b/mysql-test/main/partition_range.result
index 7ae029d488c..48b1ce87555 100644
--- a/mysql-test/main/partition_range.result
+++ b/mysql-test/main/partition_range.result
@@ -958,13 +958,19 @@ INSERT INTO t1 SELECT a + 10, b + 10 FROM t1;
INSERT INTO t1 SELECT a + 20, b + 20 FROM t1;
INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
INSERT INTO t2 SELECT * FROM t1;
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
# plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
+1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
@@ -980,5 +986,5 @@ a MAX(b)
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
-Handler_read_key 4
+Handler_read_key 2
DROP TABLE t1, t2;
diff --git a/mysql-test/main/partition_range.test b/mysql-test/main/partition_range.test
index 3ff03248d88..7f637f83ed9 100644
--- a/mysql-test/main/partition_range.test
+++ b/mysql-test/main/partition_range.test
@@ -957,6 +957,8 @@ INSERT INTO t1 SELECT a + 40, b + 40 FROM t1;
INSERT INTO t2 SELECT * FROM t1;
+ANALYZE TABLE t1,t2;
+
--echo # plans should be identical
EXPLAIN SELECT a, MAX(b) FROM t1 WHERE a IN (10,100) GROUP BY a;
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
diff --git a/mysql-test/main/password_expiration.result b/mysql-test/main/password_expiration.result
new file mode 100644
index 00000000000..73c436aecd2
--- /dev/null
+++ b/mysql-test/main/password_expiration.result
@@ -0,0 +1,259 @@
+#
+# Only privileged users should be able to expire passwords
+#
+create user user1@localhost;
+alter user user1@localhost password expire;
+create user user2@localhost;
+connect con2,localhost,user2;
+connection con2;
+alter user user1@localhost password expire;
+ERROR 42000: Access denied; you need (at least one of) the CREATE USER privilege(s) for this operation
+disconnect con2;
+connection default;
+drop user user1@localhost;
+drop user user2@localhost;
+#
+# disconnect_on_expired_password=ON should deny a clients's connection
+# when the password is expired or put the client in sandbox mode if OFF
+#
+create user user1@localhost password expire;
+set global disconnect_on_expired_password=ON;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+set global disconnect_on_expired_password=OFF;
+connect con1,localhost,user1;
+connection con1;
+select 1;
+ERROR HY000: You must SET PASSWORD before executing this statement
+disconnect con1;
+connection default;
+drop user user1@localhost;
+#
+# connect-expired-password option passed to client should override
+# the behavior of disconnect_on_expired_password server system var.
+#
+create user user1@localhost password expire;
+set global disconnect_on_expired_password=ON;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+drop user user1@localhost;
+#
+# Manually expiring a password should have immediate effect
+#
+create user user1@localhost;
+alter user user1@localhost password expire;
+set global disconnect_on_expired_password=ON;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+drop user user1@localhost;
+#
+# Sandbox mode should only allow change password statements
+#
+create user user1@localhost password expire;
+grant create user on *.* to user1@localhost;
+set global disconnect_on_expired_password=OFF;
+connect con1,localhost,user1;
+connection con1;
+select 1;
+ERROR HY000: You must SET PASSWORD before executing this statement
+set password=password('');
+select 1;
+1
+1
+disconnect con1;
+connection default;
+drop user user1@localhost;
+#
+# Passwords are still expired after acl reload
+#
+set global disconnect_on_expired_password=ON;
+create user user1@localhost password expire;
+flush privileges;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+drop user user1@localhost;
+#
+# JSON functions on global_priv reflect the correct state
+# of the password expiration columns
+#
+create user user1@localhost password expire;
+select host, user, JSON_VALUE(Priv, '$.password_last_changed') from mysql.global_priv where user='user1';
+host user JSON_VALUE(Priv, '$.password_last_changed')
+localhost user1 0
+alter user user1@localhost password expire never;
+select host, user, JSON_VALUE(Priv, '$.password_lifetime') from mysql.global_priv where user='user1';
+host user JSON_VALUE(Priv, '$.password_lifetime')
+localhost user1 0
+alter user user1@localhost password expire default;
+select host, user, JSON_VALUE(Priv, '$.password_lifetime') from mysql.global_priv where user='user1';
+host user JSON_VALUE(Priv, '$.password_lifetime')
+localhost user1 -1
+alter user user1@localhost password expire interval 123 day;
+select host, user, JSON_VALUE(Priv, '$.password_lifetime') from mysql.global_priv where user='user1';
+host user JSON_VALUE(Priv, '$.password_lifetime')
+localhost user1 123
+drop user user1@localhost;
+#
+# SHOW CREATE USER correctly displays the locking state of an user
+#
+create user user1@localhost;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+alter user user1@localhost password expire;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE
+set password for user1@localhost= password('');
+alter user user1@localhost password expire default;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+alter user user1@localhost password expire never;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE NEVER
+alter user user1@localhost password expire interval 123 day;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE INTERVAL 123 DAY
+alter user user1@localhost password expire;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE
+set password for user1@localhost= password('');
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE INTERVAL 123 DAY
+drop user user1@localhost;
+#
+# Incorrect INTERVAL values should be rejected
+#
+create user user1@localhost password expire interval 0 day;
+ERROR HY000: Incorrect DAY value: '0'
+#
+# Password expiration fields are loaded properly on 10.3 tables
+#
+create user user1@localhost;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+flush privileges;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE NEVER
+alter user user1@localhost password expire;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE
+flush privileges;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE
+set password for user1@localhost= password('');
+alter user user1@localhost password expire default;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+flush privileges;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE NEVER
+alter user user1@localhost password expire never;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE NEVER
+flush privileges;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE NEVER
+alter user user1@localhost password expire interval 123 day;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE INTERVAL 123 DAY
+flush privileges;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE NEVER
+alter user user1@localhost password expire;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE
+flush privileges;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' PASSWORD EXPIRE
+set global disconnect_on_expired_password=ON;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+set global disconnect_on_expired_password=OFF;
+connect con1,localhost,user1;
+connection con1;
+select 1;
+ERROR HY000: You must SET PASSWORD before executing this statement
+set password=password('');
+select 1;
+1
+1
+disconnect con1;
+connection default;
+drop user user1@localhost;
+set global disconnect_on_expired_password=default;
+set global default_password_lifetime=default;
+#
+# PASSWORD EXPIRE DEFAULT should use the default_password_lifetime
+# system var to set the number of days till expiration
+#
+set global disconnect_on_expired_password= ON;
+set global default_password_lifetime= 2;
+create user user1@localhost password expire default;
+set @tstamp_expired= UNIX_TIMESTAMP(NOW() - INTERVAL 3 DAY);
+update mysql.global_priv set
+priv=json_set(priv, '$.password_last_changed', @tstamp_expired)
+where user='user1';
+flush privileges;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+drop user user1@localhost;
+#
+# PASSWORD EXPIRE INTERVAL should expire a client's password after
+# X days and not before
+#
+set global disconnect_on_expired_password= ON;
+create user user1@localhost password expire interval 2 day;
+connect con1,localhost,user1;
+disconnect con1;
+connection default;
+set @tstamp_expired= UNIX_TIMESTAMP(NOW() - INTERVAL 3 DAY);
+update mysql.global_priv set
+priv=json_set(priv, '$.password_last_changed', @tstamp_expired)
+where user='user1';
+flush privileges;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Your password has expired. To log in you must change it using a client that supports expired passwords
+drop user user1@localhost;
+#
+# PASSWORD EXPIRE NEVER should override the other policies and never
+# expire a client's password
+#
+set global disconnect_on_expired_password= ON;
+create user user1@localhost password expire interval 2 day;
+alter user user1@localhost password expire never;
+set @tstamp_expired= UNIX_TIMESTAMP() - 3;
+update mysql.global_priv set
+priv=json_set(priv, '$.password_last_changed', @tstamp_expired)
+where user='user1';
+flush privileges;
+connect con1,localhost,user1;
+disconnect con1;
+connection default;
+drop user user1@localhost;
+set global disconnect_on_expired_password= default;
+set global default_password_lifetime= default;
diff --git a/mysql-test/main/password_expiration.test b/mysql-test/main/password_expiration.test
new file mode 100644
index 00000000000..b7d1269537a
--- /dev/null
+++ b/mysql-test/main/password_expiration.test
@@ -0,0 +1,263 @@
+#
+# Test password expiration
+#
+
+--source include/not_embedded.inc
+
+--echo #
+--echo # Only privileged users should be able to expire passwords
+--echo #
+create user user1@localhost;
+alter user user1@localhost password expire;
+
+create user user2@localhost;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+connect(con2,localhost,user2);
+connection con2;
+--error ER_SPECIFIC_ACCESS_DENIED_ERROR
+alter user user1@localhost password expire;
+
+disconnect con2;
+connection default;
+drop user user1@localhost;
+drop user user2@localhost;
+
+--echo #
+--echo # disconnect_on_expired_password=ON should deny a clients's connection
+--echo # when the password is expired or put the client in sandbox mode if OFF
+--echo #
+create user user1@localhost password expire;
+set global disconnect_on_expired_password=ON;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+
+# should allow the client to enter sandbox mode
+set global disconnect_on_expired_password=OFF;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+connect(con1,localhost,user1);
+connection con1;
+--error ER_MUST_CHANGE_PASSWORD
+select 1;
+disconnect con1;
+connection default;
+drop user user1@localhost;
+
+--echo #
+--echo # connect-expired-password option passed to client should override
+--echo # the behavior of disconnect_on_expired_password server system var.
+--echo #
+create user user1@localhost password expire;
+set global disconnect_on_expired_password=ON;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+
+--exec $MYSQL --connect-expired-password -u user1 -e "set password=password('');"
+drop user user1@localhost;
+
+--echo #
+--echo # Manually expiring a password should have immediate effect
+--echo #
+create user user1@localhost;
+alter user user1@localhost password expire;
+set global disconnect_on_expired_password=ON;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+drop user user1@localhost;
+
+--echo #
+--echo # Sandbox mode should only allow change password statements
+--echo #
+create user user1@localhost password expire;
+grant create user on *.* to user1@localhost;
+set global disconnect_on_expired_password=OFF;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+connect(con1,localhost,user1);
+connection con1;
+--error ER_MUST_CHANGE_PASSWORD
+select 1;
+set password=password('');
+select 1;
+disconnect con1;
+connection default;
+
+drop user user1@localhost;
+
+--echo #
+--echo # Passwords are still expired after acl reload
+--echo #
+set global disconnect_on_expired_password=ON;
+create user user1@localhost password expire;
+flush privileges;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+drop user user1@localhost;
+
+--echo #
+--echo # JSON functions on global_priv reflect the correct state
+--echo # of the password expiration columns
+--echo #
+
+create user user1@localhost password expire;
+select host, user, JSON_VALUE(Priv, '$.password_last_changed') from mysql.global_priv where user='user1';
+alter user user1@localhost password expire never;
+select host, user, JSON_VALUE(Priv, '$.password_lifetime') from mysql.global_priv where user='user1';
+alter user user1@localhost password expire default;
+select host, user, JSON_VALUE(Priv, '$.password_lifetime') from mysql.global_priv where user='user1';
+alter user user1@localhost password expire interval 123 day;
+select host, user, JSON_VALUE(Priv, '$.password_lifetime') from mysql.global_priv where user='user1';
+drop user user1@localhost;
+
+--echo #
+--echo # SHOW CREATE USER correctly displays the locking state of an user
+--echo #
+
+create user user1@localhost;
+show create user user1@localhost;
+alter user user1@localhost password expire;
+show create user user1@localhost;
+set password for user1@localhost= password('');
+alter user user1@localhost password expire default;
+show create user user1@localhost;
+alter user user1@localhost password expire never;
+show create user user1@localhost;
+alter user user1@localhost password expire interval 123 day;
+show create user user1@localhost;
+alter user user1@localhost password expire;
+show create user user1@localhost;
+set password for user1@localhost= password('');
+show create user user1@localhost;
+drop user user1@localhost;
+
+--echo #
+--echo # Incorrect INTERVAL values should be rejected
+--echo #
+--error ER_WRONG_VALUE
+create user user1@localhost password expire interval 0 day;
+
+--echo #
+--echo # Password expiration fields are loaded properly on 10.3 tables
+--echo #
+--source include/switch_to_mysql_user.inc
+create user user1@localhost;
+show create user user1@localhost;
+flush privileges;
+show create user user1@localhost;
+
+alter user user1@localhost password expire;
+show create user user1@localhost;
+flush privileges;
+show create user user1@localhost;
+set password for user1@localhost= password('');
+
+alter user user1@localhost password expire default;
+show create user user1@localhost;
+flush privileges;
+show create user user1@localhost;
+
+alter user user1@localhost password expire never;
+show create user user1@localhost;
+flush privileges;
+show create user user1@localhost;
+
+alter user user1@localhost password expire interval 123 day;
+show create user user1@localhost;
+flush privileges;
+show create user user1@localhost;
+
+alter user user1@localhost password expire;
+show create user user1@localhost;
+flush privileges;
+show create user user1@localhost;
+
+set global disconnect_on_expired_password=ON;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+
+set global disconnect_on_expired_password=OFF;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+connect(con1,localhost,user1);
+connection con1;
+--error ER_MUST_CHANGE_PASSWORD
+select 1;
+set password=password('');
+select 1;
+disconnect con1;
+connection default;
+drop user user1@localhost;
+
+set global disconnect_on_expired_password=default;
+set global default_password_lifetime=default;
+--source include/switch_to_mysql_global_priv.inc
+
+#
+# Test password expiration INTERVAL and default_password_lifetime options
+#
+
+--echo #
+--echo # PASSWORD EXPIRE DEFAULT should use the default_password_lifetime
+--echo # system var to set the number of days till expiration
+--echo #
+set global disconnect_on_expired_password= ON;
+set global default_password_lifetime= 2;
+create user user1@localhost password expire default;
+
+set @tstamp_expired= UNIX_TIMESTAMP(NOW() - INTERVAL 3 DAY);
+update mysql.global_priv set
+ priv=json_set(priv, '$.password_last_changed', @tstamp_expired)
+ where user='user1';
+flush privileges;
+
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+drop user user1@localhost;
+
+--echo #
+--echo # PASSWORD EXPIRE INTERVAL should expire a client's password after
+--echo # X days and not before
+--echo #
+set global disconnect_on_expired_password= ON;
+create user user1@localhost password expire interval 2 day;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+connect(con1,localhost,user1);
+disconnect con1;
+connection default;
+
+set @tstamp_expired= UNIX_TIMESTAMP(NOW() - INTERVAL 3 DAY);
+update mysql.global_priv set
+ priv=json_set(priv, '$.password_last_changed', @tstamp_expired)
+ where user='user1';
+flush privileges;
+
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_MUST_CHANGE_PASSWORD_LOGIN
+connect(con1,localhost,user1);
+drop user user1@localhost;
+
+--echo #
+--echo # PASSWORD EXPIRE NEVER should override the other policies and never
+--echo # expire a client's password
+--echo #
+set global disconnect_on_expired_password= ON;
+create user user1@localhost password expire interval 2 day;
+alter user user1@localhost password expire never;
+
+set @tstamp_expired= UNIX_TIMESTAMP() - 3;
+update mysql.global_priv set
+ priv=json_set(priv, '$.password_last_changed', @tstamp_expired)
+ where user='user1';
+flush privileges;
+
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+connect(con1,localhost,user1);
+disconnect con1;
+connection default;
+drop user user1@localhost;
+
+set global disconnect_on_expired_password= default;
+set global default_password_lifetime= default;
diff --git a/mysql-test/main/password_expiration_unix_socket.result b/mysql-test/main/password_expiration_unix_socket.result
new file mode 100644
index 00000000000..5feee17f205
--- /dev/null
+++ b/mysql-test/main/password_expiration_unix_socket.result
@@ -0,0 +1,8 @@
+#
+# A password cannot expire, if there is no password
+#
+create user USER identified via unix_socket;
+alter user USER password expire;
+1
+1
+drop user USER;
diff --git a/mysql-test/main/password_expiration_unix_socket.test b/mysql-test/main/password_expiration_unix_socket.test
new file mode 100644
index 00000000000..f2579aaf18f
--- /dev/null
+++ b/mysql-test/main/password_expiration_unix_socket.test
@@ -0,0 +1,24 @@
+#
+# Test password expiration
+#
+
+--source include/not_embedded.inc
+--source include/have_unix_socket.inc
+
+--echo #
+--echo # A password cannot expire, if there is no password
+--echo #
+
+--let $replace=create user $USER
+--replace_result $replace "create user USER"
+--eval create user $USER identified via unix_socket
+
+--let $replace=alter user $USER
+--replace_result $replace "alter user USER"
+--eval alter user $USER password expire
+
+--exec $MYSQL -u $USER -e 'select 1'
+
+--let $replace=drop user $USER
+--replace_result $replace "drop user USER"
+--eval drop user $USER
diff --git a/mysql-test/main/perror-win.result b/mysql-test/main/perror-win.result
index 139b566757f..0e9d2e48a99 100644
--- a/mysql-test/main/perror-win.result
+++ b/mysql-test/main/perror-win.result
@@ -1,7 +1,7 @@
-MySQL error code 150: Foreign key constraint is incorrectly formed
+MariaDB error code 150: Foreign key constraint is incorrectly formed
Win32 error code 150: System trace information was not specified in your CONFIG.SYS file, or tracing is disallowed.
OS error code 23: Too many open files in system
Win32 error code 23: Data error (cyclic redundancy check).
-MySQL error code 1062 (ER_DUP_ENTRY): Duplicate entry '%-.192s' for key %d
+MariaDB error code 1062 (ER_DUP_ENTRY): Duplicate entry '%-.192s' for key %d
Win32 error code 1062: The service has not been started.
Illegal error code: 30000
diff --git a/mysql-test/main/perror.result b/mysql-test/main/perror.result
index 46554442721..ad2106524e5 100644
--- a/mysql-test/main/perror.result
+++ b/mysql-test/main/perror.result
@@ -1,6 +1,6 @@
Illegal error code: 10000
-MySQL error code 1062 (ER_DUP_ENTRY): Duplicate entry '%-.192s' for key %d
-MySQL error code 1408 (ER_STARTUP): %s: ready for connections.
+MariaDB error code 1062 (ER_DUP_ENTRY): Duplicate entry '%-.192s' for key %d
+MariaDB error code 1408 (ER_STARTUP): %s: ready for connections.
Version: '%s' socket: '%s' port: %d %s
-MySQL error code 1459 (ER_TABLE_NEEDS_UPGRADE): Upgrade required. Please do "REPAIR %s %`s" or dump/reload to fix it!
-MySQL error code 1461 (ER_MAX_PREPARED_STMT_COUNT_REACHED): Can't create more than max_prepared_stmt_count statements (current value: %u)
+MariaDB error code 1459 (ER_TABLE_NEEDS_UPGRADE): Upgrade required. Please do "REPAIR %s %`s" or dump/reload to fix it!
+MariaDB error code 1461 (ER_MAX_PREPARED_STMT_COUNT_REACHED): Can't create more than max_prepared_stmt_count statements (current value: %u)
diff --git a/mysql-test/main/plugin.result b/mysql-test/main/plugin.result
index 3a141a25b5c..49342705289 100644
--- a/mysql-test/main/plugin.result
+++ b/mysql-test/main/plugin.result
@@ -12,7 +12,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE STORAGE ENGINE
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
-PLUGIN_LIBRARY_VERSION 1.13
+PLUGIN_LIBRARY_VERSION 1.14
PLUGIN_AUTHOR Brian Aker, MySQL AB
PLUGIN_DESCRIPTION Example storage engine
PLUGIN_LICENSE GPL
@@ -25,7 +25,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE DAEMON
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
-PLUGIN_LIBRARY_VERSION 1.13
+PLUGIN_LIBRARY_VERSION 1.14
PLUGIN_AUTHOR Sergei Golubchik
PLUGIN_DESCRIPTION Unusable Daemon
PLUGIN_LICENSE GPL
@@ -64,7 +64,7 @@ PLUGIN_STATUS DELETED
PLUGIN_TYPE STORAGE ENGINE
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
-PLUGIN_LIBRARY_VERSION 1.13
+PLUGIN_LIBRARY_VERSION 1.14
PLUGIN_AUTHOR Brian Aker, MySQL AB
PLUGIN_DESCRIPTION Example storage engine
PLUGIN_LICENSE GPL
@@ -331,3 +331,65 @@ UNINSTALL PLUGIN example;
RENAME TABLE t1 TO t2;
ERROR 42S02: Table 'test.t1' doesn't exist
DROP TABLE t1;
+#
+# MDEV-16294: INSTALL PLUGIN IF NOT EXISTS / UNINSTALL PLUGIN IF EXISTS
+#
+# INSTALL IF NOT EXISTS PLUGIN name SONAME library /
+# UNINSTALL IF EXISTS PLUGIN|SONAME name
+#
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+INSTALL PLUGIN IF NOT EXISTS example SONAME 'ha_example';
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+EXAMPLE ACTIVE STORAGE ENGINE
+INSTALL PLUGIN example SONAME 'ha_example';
+ERROR HY000: Plugin 'example' already installed
+INSTALL PLUGIN IF NOT EXISTS example SONAME 'ha_example';
+Warnings:
+Note 1968 Plugin 'example' already installed
+SHOW WARNINGS;
+Level Code Message
+Note 1968 Plugin 'example' already installed
+UNINSTALL PLUGIN IF EXISTS example;
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+UNINSTALL PLUGIN IF EXISTS example;
+Warnings:
+Note 1305 PLUGIN example does not exist
+SHOW WARNINGS;
+Level Code Message
+Note 1305 PLUGIN example does not exist
+UNINSTALL PLUGIN example;
+ERROR 42000: PLUGIN example does not exist
+INSTALL SONAME 'ha_example';
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+EXAMPLE ACTIVE STORAGE ENGINE
+UNUSABLE ACTIVE DAEMON
+UNINSTALL SONAME IF EXISTS 'ha_example';
+UNINSTALL SONAME IF EXISTS 'ha_example';
+Warnings:
+Note 1305 SONAME ha_example.so does not exist
+SHOW WARNINGS;
+Level Code Message
+Note 1305 SONAME ha_example.so does not exist
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+UNINSTALL SONAME 'ha_example';
+ERROR 42000: SONAME ha_example.so does not exist
+#
+# Make sure temporary tables maintain plugin references properly
+#
+INSTALL PLUGIN example SONAME 'ha_example';
+CREATE TEMPORARY TABLE t1(a INT) ENGINE=example;
+UNINSTALL PLUGIN example;
+Warnings:
+Warning 1620 Plugin is busy and will be uninstalled on shutdown
+INSTALL PLUGIN example SONAME 'ha_example';
+ERROR HY000: Plugin 'example' already installed
+DROP TABLE t1;
+INSTALL PLUGIN example SONAME 'ha_example';
+CREATE TEMPORARY TABLE t1(a INT) ENGINE=example;
+DROP TABLE t1;
+UNINSTALL PLUGIN example;
diff --git a/mysql-test/main/plugin.test b/mysql-test/main/plugin.test
index 68c4d5afd64..0990cb206d8 100644
--- a/mysql-test/main/plugin.test
+++ b/mysql-test/main/plugin.test
@@ -265,3 +265,21 @@ UNINSTALL PLUGIN example;
--error ER_NO_SUCH_TABLE
RENAME TABLE t1 TO t2;
DROP TABLE t1;
+
+--source include/install_plugin_if_exists.inc
+
+
+--echo #
+--echo # Make sure temporary tables maintain plugin references properly
+--echo #
+INSTALL PLUGIN example SONAME 'ha_example';
+CREATE TEMPORARY TABLE t1(a INT) ENGINE=example;
+UNINSTALL PLUGIN example;
+--error ER_PLUGIN_INSTALLED
+INSTALL PLUGIN example SONAME 'ha_example';
+DROP TABLE t1;
+
+INSTALL PLUGIN example SONAME 'ha_example';
+CREATE TEMPORARY TABLE t1(a INT) ENGINE=example;
+DROP TABLE t1;
+UNINSTALL PLUGIN example;
diff --git a/mysql-test/main/plugin_auth.result b/mysql-test/main/plugin_auth.result
index e470cc010c0..8472891cd44 100644
--- a/mysql-test/main/plugin_auth.result
+++ b/mysql-test/main/plugin_auth.result
@@ -11,6 +11,8 @@ SELECT plugin,authentication_string FROM mysql.user WHERE User='plug';
plugin authentication_string
test_plugin_server plug_dest
## test plugin auth
+connect(localhost,plug,plug_dest,test,MYSQL_PORT,MYSQL_SOCK);
+connect plug_con,localhost,plug,plug_dest;
ERROR 28000: Access denied for user 'plug'@'localhost' (using password: YES)
GRANT PROXY ON plug_dest TO plug;
test proxies_priv columns
@@ -30,9 +32,8 @@ proxies_priv CREATE TABLE `proxies_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
PRIMARY KEY (`Host`,`User`,`Proxied_host`,`Proxied_user`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='User proxy privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User proxy privileges'
connect plug_con,localhost,plug,plug_dest;
-connection plug_con;
select USER(),CURRENT_USER();
USER() CURRENT_USER()
plug@localhost plug_dest@%
@@ -41,11 +42,18 @@ SET PASSWORD = PASSWORD('plug_dest');
connection default;
disconnect plug_con;
## test bad credentials
+connect(localhost,plug,bad_credentials,test,MYSQL_PORT,MYSQL_SOCK);
+connect plug_con,localhost,plug,bad_credentials;
ERROR 28000: Access denied for user 'plug'@'localhost' (using password: YES)
-## test bad default plugin : should get CR_AUTH_PLUGIN_CANNOT_LOAD
+## test bad default plugin : nothing bad happens, as that plugin was't required by the server
+connect plug_con_wrongp,localhost,plug,plug_dest,,,,,wrong_plugin_name;
+select USER(),CURRENT_USER();
+USER() CURRENT_USER()
+plug@localhost plug_dest@%
+connection default;
+disconnect plug_con_wrongp;
## test correct default plugin
connect plug_con_rightp,localhost,plug,plug_dest,,,,,auth_test_plugin;
-connection plug_con_rightp;
select USER(),CURRENT_USER();
USER() CURRENT_USER()
plug@localhost plug_dest@%
@@ -60,7 +68,6 @@ DROP USER grant_user;
CREATE USER `Ÿ` IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
GRANT PROXY ON plug_dest TO `Ÿ`;
connect non_ascii,localhost,Ÿ,plug_dest;
-connection non_ascii;
select USER(),CURRENT_USER();
USER() CURRENT_USER()
Ÿ@localhost plug_dest@%
@@ -74,7 +81,6 @@ GRANT ALL PRIVILEGES ON test_grant_db.* TO new_grant_user
IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
GRANT PROXY ON plug_dest TO new_grant_user;
connect plug_con_grant,localhost,new_grant_user,plug_dest;
-connection plug_con_grant;
select USER(),CURRENT_USER();
USER() CURRENT_USER()
new_grant_user@localhost plug_dest@%
@@ -91,7 +97,6 @@ connect(localhost,new_grant_user,plug_dest,test,MYSQL_PORT,MYSQL_SOCK);
connect plug_con_grant_deny,localhost,new_grant_user,plug_dest;
ERROR 28000: Access denied for user 'new_grant_user'@'localhost' (using password: YES)
connect plug_con_grant,localhost,new_grant_user,new_password;
-connection plug_con_grant;
select USER(),CURRENT_USER();
USER() CURRENT_USER()
new_grant_user@localhost new_grant_user@%
@@ -133,8 +138,6 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
REVOKE PROXY ON grant_plug_dest FROM grant_plug;
ERROR 42000: There is no such grant defined for user 'grant_plug' on host '%'
connect grant_plug_dest_con,localhost,grant_plug_dest,grant_plug_dest_passwd;
-connection grant_plug_dest_con;
-in grant_plug_dest_con
## testing what an ordinary user can grant
this should fail : no rights to grant all
GRANT PROXY ON ''@'%%' TO grant_plug;
@@ -158,7 +161,6 @@ this should fail : can't create users
GRANT PROXY ON grant_plug_dest TO grant_plug@localhost;
ERROR 42000: You are not allowed to create a user with GRANT
connection default;
-in default connection
disconnect grant_plug_dest_con;
# test what root can grant
should work : root has PROXY to all users
@@ -170,12 +172,9 @@ WITH GRANT OPTION;
need USAGE : PROXY doesn't contain it.
GRANT USAGE on *.* TO proxy_admin;
connect proxy_admin_con,localhost,proxy_admin,test;
-connection proxy_admin_con;
-in proxy_admin_con;
should work : proxy_admin has proxy to ''@'%%'
GRANT PROXY ON future_user TO grant_plug;
connection default;
-in default connection
disconnect proxy_admin_con;
SHOW GRANTS FOR grant_plug;
Grants for grant_plug@%
@@ -221,13 +220,10 @@ SELECT @@LOCAL.proxy_user;
@@LOCAL.proxy_user
NULL
connect plug_con,localhost,plug,plug_dest;
-connection plug_con;
-# in connection plug_con
SELECT @@LOCAL.proxy_user;
@@LOCAL.proxy_user
'plug'@'%'
connection default;
-# in connection default
disconnect plug_con;
## cleanup
DROP USER plug;
@@ -253,13 +249,10 @@ SELECT @@LOCAL.external_user;
@@LOCAL.external_user
NULL
connect plug_con,localhost,plug,plug_dest;
-connection plug_con;
-# in connection plug_con
SELECT @@LOCAL.external_user;
@@LOCAL.external_user
plug_dest
connection default;
-# in connection default
disconnect plug_con;
## cleanup
DROP USER plug;
@@ -299,6 +292,7 @@ DROP DATABASE shared;
CALL mtr.add_suppression("Missing system table mysql.proxies_priv.");
DROP TABLE mysql.proxies_priv;
# Must come back with mysql.proxies_priv absent.
+# restart
SELECT * FROM mysql.proxies_priv;
ERROR 42S02: Table 'mysql.proxies_priv' doesn't exist
CREATE USER u1@localhost;
@@ -315,7 +309,6 @@ REVOKE PROXY ON u2@localhost FROM u1@localhost;
ERROR 28000: Access denied for user 'root'@'localhost'
# go try graning proxy on itself, so that it will need the table
connect proxy_granter_con,localhost,u2,;
-connection proxy_granter_con;
GRANT PROXY ON u2@localhost TO u1@localhost;
ERROR 42S02: Table 'mysql.proxies_priv' doesn't exist
REVOKE PROXY ON u2@localhost FROM u1@localhost;
@@ -355,13 +348,13 @@ CREATE USER uplain@localhost IDENTIFIED WITH 'cleartext_plugin_server'
## test plugin auth
ERROR 28000: Access denied for user 'uplain'@'localhost' (using password: YES)
connect cleartext_con,localhost,uplain,cleartext_test;
-connection cleartext_con;
select USER(),CURRENT_USER();
USER() CURRENT_USER()
uplain@localhost uplain@localhost
connection default;
disconnect cleartext_con;
DROP USER uplain@localhost;
+drop view mysql.user_bak;
#
# Bug #59038 : mysql.user.authentication_string column
# causes configuration wizard to fail
@@ -443,6 +436,8 @@ ORDER BY COLUMN_NAME;
IS_NULLABLE COLUMN_NAME
NO authentication_string
NO plugin
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
#
# Bug # 11766641: 59792: BIN/MYSQL -UUNKNOWN -PUNKNOWN
# .-> USING PASSWORD: NO
@@ -471,7 +466,6 @@ CREATE USER bug12818542_dest@localhost
IDENTIFIED BY 'bug12818542_dest_passwd';
GRANT PROXY ON bug12818542_dest@localhost TO bug12818542@localhost;
connect bug12818542_con,localhost,bug12818542,bug12818542_dest;
-connection bug12818542_con;
SELECT USER(),CURRENT_USER();
USER() CURRENT_USER()
bug12818542@localhost bug12818542_dest@localhost
@@ -479,7 +473,6 @@ SET PASSWORD = PASSWORD('bruhaha');
connection default;
disconnect bug12818542_con;
connect bug12818542_con2,localhost,bug12818542,bug12818542_dest;
-connection bug12818542_con2;
SELECT USER(),CURRENT_USER();
USER() CURRENT_USER()
bug12818542@localhost bug12818542_dest@localhost
diff --git a/mysql-test/main/plugin_auth.test b/mysql-test/main/plugin_auth.test
index c3c18b7e427..30e4fa6e0ad 100644
--- a/mysql-test/main/plugin_auth.test
+++ b/mysql-test/main/plugin_auth.test
@@ -15,10 +15,9 @@ CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT plugin,authentication_string FROM mysql.user WHERE User='plug';
--echo ## test plugin auth
---disable_query_log
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
--error ER_ACCESS_DENIED_ERROR : this should fail : no grant
connect(plug_con,localhost,plug,plug_dest);
---enable_query_log
GRANT PROXY ON plug_dest TO plug;
--echo test proxies_priv columns
@@ -28,8 +27,6 @@ SELECT * FROM mysql.proxies_priv WHERE user !='root';
SHOW CREATE TABLE mysql.proxies_priv;
connect(plug_con,localhost,plug,plug_dest);
-
-connection plug_con;
select USER(),CURRENT_USER();
--echo ## test SET PASSWORD
@@ -40,22 +37,18 @@ connection default;
disconnect plug_con;
--echo ## test bad credentials
---disable_query_log
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
--error ER_ACCESS_DENIED_ERROR
connect(plug_con,localhost,plug,bad_credentials);
---enable_query_log
---echo ## test bad default plugin : should get CR_AUTH_PLUGIN_CANNOT_LOAD
---disable_result_log
---disable_query_log
---error 2059
+--echo ## test bad default plugin : nothing bad happens, as that plugin was't required by the server
connect(plug_con_wrongp,localhost,plug,plug_dest,,,,,wrong_plugin_name);
---enable_query_log
---enable_result_log
+select USER(),CURRENT_USER();
+connection default;
+disconnect plug_con_wrongp;
--echo ## test correct default plugin
connect(plug_con_rightp,localhost,plug,plug_dest,,,,,auth_test_plugin);
-connection plug_con_rightp;
select USER(),CURRENT_USER();
connection default;
disconnect plug_con_rightp;
@@ -72,7 +65,6 @@ CREATE USER `Ÿ` IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
GRANT PROXY ON plug_dest TO `Ÿ`;
connect(non_ascii,localhost,Ÿ,plug_dest);
-connection non_ascii;
select USER(),CURRENT_USER();
connection default;
@@ -90,7 +82,6 @@ GRANT ALL PRIVILEGES ON test_grant_db.* TO new_grant_user
GRANT PROXY ON plug_dest TO new_grant_user;
connect(plug_con_grant,localhost,new_grant_user,plug_dest);
-connection plug_con_grant;
select USER(),CURRENT_USER();
USE test_grant_db;
CREATE TABLE t1 (a INT);
@@ -108,7 +99,6 @@ GRANT ALL PRIVILEGES ON test_grant_db.* TO new_grant_user
connect(plug_con_grant_deny,localhost,new_grant_user,plug_dest);
connect(plug_con_grant,localhost,new_grant_user,new_password);
-connection plug_con_grant;
select USER(),CURRENT_USER();
USE test_grant_db;
CREATE TABLE t1 (a INT);
@@ -166,8 +156,6 @@ GRANT ALL SELECT,PROXY ON grant_plug_dest TO grant_plug;
REVOKE PROXY ON grant_plug_dest FROM grant_plug;
connect(grant_plug_dest_con,localhost,grant_plug_dest,grant_plug_dest_passwd);
-connection grant_plug_dest_con;
---echo in grant_plug_dest_con
--echo ## testing what an ordinary user can grant
--echo this should fail : no rights to grant all
@@ -209,7 +197,6 @@ REVOKE PROXY ON grant_plug_dest@localhost FROM grant_plug;
GRANT PROXY ON grant_plug_dest TO grant_plug@localhost;
connection default;
---echo in default connection
disconnect grant_plug_dest_con;
--echo # test what root can grant
@@ -226,14 +213,11 @@ GRANT PROXY ON ''@'%%' TO proxy_admin IDENTIFIED BY 'test'
GRANT USAGE on *.* TO proxy_admin;
connect (proxy_admin_con,localhost,proxy_admin,test);
-connection proxy_admin_con;
---echo in proxy_admin_con;
--echo should work : proxy_admin has proxy to ''@'%%'
GRANT PROXY ON future_user TO grant_plug;
connection default;
---echo in default connection
disconnect proxy_admin_con;
SHOW GRANTS FOR grant_plug;
@@ -275,11 +259,8 @@ SET LOCAL proxy_user = 'test';
SELECT @@LOCAL.proxy_user;
connect(plug_con,localhost,plug,plug_dest);
-connection plug_con;
---echo # in connection plug_con
SELECT @@LOCAL.proxy_user;
connection default;
---echo # in connection default
disconnect plug_con;
--echo ## cleanup
@@ -304,11 +285,8 @@ SET LOCAL external_user = 'test';
SELECT @@LOCAL.external_user;
connect(plug_con,localhost,plug,plug_dest);
-connection plug_con;
---echo # in connection plug_con
SELECT @@LOCAL.external_user;
connection default;
---echo # in connection default
disconnect plug_con;
--echo ## cleanup
@@ -382,7 +360,6 @@ REVOKE PROXY ON u2@localhost FROM u1@localhost;
--echo # go try graning proxy on itself, so that it will need the table
connect(proxy_granter_con,localhost,u2,);
-connection proxy_granter_con;
--error ER_NO_SUCH_TABLE
GRANT PROXY ON u2@localhost TO u1@localhost;
@@ -438,13 +415,16 @@ connect(cleartext_fail_con,localhost,uplain,cleartext_test2);
--enable_query_log
connect(cleartext_con,localhost,uplain,cleartext_test);
-connection cleartext_con;
select USER(),CURRENT_USER();
connection default;
disconnect cleartext_con;
DROP USER uplain@localhost;
+# prepare for two tests that use mysql.user table
+source include/switch_to_mysql_user.inc;
+drop view mysql.user_bak;
+
--echo #
--echo # Bug #59038 : mysql.user.authentication_string column
--echo # causes configuration wizard to fail
@@ -502,7 +482,6 @@ SELECT IS_NULLABLE, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE
TABLE_SCHEMA='mysql'
ORDER BY COLUMN_NAME;
-
--echo #
--echo # Bug #11936829: diff. between mysql.user (authentication_string)
--echo # in fresh and upgraded 5.5.11
@@ -524,7 +503,8 @@ SELECT IS_NULLABLE, COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS
ORDER BY COLUMN_NAME;
let $datadir= `select @@datadir`;
remove_file $datadir/mysql_upgrade_info;
-
+drop table mysql.global_priv;
+rename table mysql.global_priv_bak to mysql.global_priv;
--echo #
--echo # Bug # 11766641: 59792: BIN/MYSQL -UUNKNOWN -PUNKNOWN
@@ -567,7 +547,6 @@ CREATE USER bug12818542_dest@localhost
GRANT PROXY ON bug12818542_dest@localhost TO bug12818542@localhost;
connect(bug12818542_con,localhost,bug12818542,bug12818542_dest);
-connection bug12818542_con;
SELECT USER(),CURRENT_USER();
SET PASSWORD = PASSWORD('bruhaha');
@@ -576,7 +555,6 @@ connection default;
disconnect bug12818542_con;
connect(bug12818542_con2,localhost,bug12818542,bug12818542_dest);
-connection bug12818542_con2;
SELECT USER(),CURRENT_USER();
connection default;
diff --git a/mysql-test/main/plugin_auth_qa.result b/mysql-test/main/plugin_auth_qa.result
index 4f274c45971..e00ac1a4c96 100644
--- a/mysql-test/main/plugin_auth_qa.result
+++ b/mysql-test/main/plugin_auth_qa.result
@@ -1,38 +1,38 @@
set sql_mode="";
CREATE DATABASE test_user_db;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
========== test 1.1 ======================================================
CREATE USER plug IDENTIFIED WITH test_plugin_server;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server
DROP USER plug;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH test_plugin_server;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server
REVOKE ALL PRIVILEGES ON test_user_db.* FROM plug;
DROP USER plug;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server
DROP USER plug;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH 'test_plugin_server';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server
REVOKE ALL PRIVILEGES ON test_user_db.* FROM plug;
DROP USER plug;
CREATE USER plug IDENTIFIED WITH test_plugin_server AS '';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server
DROP USER plug;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH test_plugin_server AS '';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server
REVOKE ALL PRIVILEGES ON test_user_db.* FROM plug;
DROP USER plug;
@@ -100,63 +100,63 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
========== test 1.1.1.6/1.1.2.5 ============================
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
-plug_dest
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug, plug_dest;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
DROP USER plug;
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plug_dest
+User plugin authentication_string
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug_dest;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
-plug_dest
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug, plug_dest;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH test_plugin_server AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
DROP USER plug;
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plug_dest
+User plugin authentication_string
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug_dest;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
GRANT ALL PRIVILEGES ON test_user_db.* TO plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
-plug_dest
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug, plug_dest;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plug_dest
DROP USER plug;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plug_dest
+User plugin authentication_string
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug_dest;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
@@ -170,12 +170,12 @@ ERROR HY000: Operation CREATE USER failed for 'plug'@'%'
DROP USER plug;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
plug test_plugin_server plug_dest
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
-plug *939AEE68989794C0F408277411C26055CDF41119
+User plugin authentication_string Password
+plug mysql_native_password *939AEE68989794C0F408277411C26055CDF41119 *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug IDENTIFIED WITH test_plugin_server AS 'plug_dest';
CREATE USER plug IDENTIFIED BY 'plug_dest_passwd';
@@ -184,18 +184,18 @@ DROP USER plug;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
CREATE USER plug_dest IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
plug test_plugin_server plug_dest
plug_dest test_plugin_server plug_dest
DROP USER plug,plug_dest;
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
plug test_plugin_server plug_dest
GRANT ALL PRIVILEGES ON test_user_db.* TO plug_dest
IDENTIFIED WITH test_plugin_server AS 'plug_dest';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
plug test_plugin_server plug_dest
plug_dest test_plugin_server plug_dest
DROP USER plug,plug_dest;
@@ -203,24 +203,24 @@ DROP USER plug,plug_dest;
SET NAMES utf8;
CREATE USER plüg IDENTIFIED WITH 'test_plugin_server' AS 'plüg_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plüg test_plugin_server plüg_dest
DROP USER plüg;
CREATE USER plüg_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plüg_dest
+User plugin authentication_string
+plüg_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plüg_dest;
SET NAMES ascii;
CREATE USER 'plüg' IDENTIFIED WITH 'test_plugin_server' AS 'plüg_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
pl??g test_plugin_server pl??g_dest
DROP USER 'plüg';
CREATE USER 'plüg_dest' IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-pl??g_dest
+User plugin authentication_string
+pl??g_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER 'plüg_dest';
SET NAMES latin1;
========== test 1.1.1.5 ====================================
@@ -228,36 +228,36 @@ CREATE USER 'plüg' IDENTIFIED WITH 'test_plügin_server' AS 'plüg_dest';
ERROR HY000: Plugin 'test_plügin_server' is not loaded
CREATE USER 'plug' IDENTIFIED WITH 'test_plugin_server' AS 'plüg_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server plüg_dest
DROP USER 'plug';
CREATE USER 'plüg_dest' IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plüg_dest
+User plugin authentication_string
+plüg_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER 'plüg_dest';
SET NAMES utf8;
CREATE USER plüg IDENTIFIED WITH 'test_plügin_server' AS 'plüg_dest';
ERROR HY000: Plugin 'test_plügin_server' is not loaded
CREATE USER 'plüg' IDENTIFIED WITH 'test_plugin_server' AS 'plüg_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plüg test_plugin_server plüg_dest
DROP USER 'plüg';
CREATE USER 'plüg_dest' IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plüg_dest
+User plugin authentication_string
+plüg_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER 'plüg_dest';
CREATE USER plüg IDENTIFIED WITH test_plugin_server AS 'plüg_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plüg test_plugin_server plüg_dest
DROP USER plüg;
CREATE USER plüg_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plüg_dest
+User plugin authentication_string
+plüg_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER plüg_dest;
========== test 1.1.1.2/1.1.2.2=============================
SET @auth_name= 'test_plugin_server';
@@ -271,13 +271,13 @@ CREATE USER plug IDENTIFIED WITH 'hh''s_test_plugin_server' AS 'plug_dest';
ERROR HY000: Plugin 'hh's_test_plugin_server' is not loaded
CREATE USER plug IDENTIFIED WITH 'test_plugin_server' AS 'hh''s_plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
plug test_plugin_server hh's_plug_dest
DROP USER plug;
CREATE USER 'hh''s_plug_dest' IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-hh's_plug_dest
+User plugin authentication_string
+hh's_plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
DROP USER 'hh''s_plug_dest';
========== test 1.1.1.4 ====================================
CREATE USER plug IDENTIFIED WITH hh''s_test_plugin_server AS 'plug_dest';
@@ -285,21 +285,21 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
========== test 1.1.3.1 ====================================
GRANT INSERT ON test_user_db.* TO grant_user IDENTIFIED WITH test_plugin_server AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
grant_user test_plugin_server plug_dest
CREATE USER plug_dest;
DROP USER plug_dest;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug_dest;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
grant_user test_plugin_server plug_dest
-plug_dest
+plug_dest mysql_native_password
DROP USER grant_user,plug_dest;
set @save_sql_mode= @@sql_mode;
SET @@sql_mode=no_auto_create_user;
GRANT INSERT ON test_user_db.* TO grant_user IDENTIFIED WITH test_plugin_server AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
grant_user test_plugin_server plug_dest
CREATE USER plug_dest;
DROP USER plug_dest;
@@ -308,19 +308,19 @@ ERROR 28000: Can't find any matching row in the user table
DROP USER grant_user;
GRANT INSERT ON test_user_db.* TO grant_user IDENTIFIED WITH test_plugin_server AS 'plug_dest';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
grant_user test_plugin_server plug_dest
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
grant_user test_plugin_server plug_dest
-plug_dest *939AEE68989794C0F408277411C26055CDF41119
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119 *939AEE68989794C0F408277411C26055CDF41119
DROP USER plug_dest;
GRANT ALL PRIVILEGES ON test_user_db.* TO plug_dest IDENTIFIED BY 'plug_user_passwd';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
grant_user test_plugin_server plug_dest
-plug_dest *560881EB651416CEF77314D07D55EDCD5FC1BD6D
+plug_dest mysql_native_password *560881EB651416CEF77314D07D55EDCD5FC1BD6D *560881EB651416CEF77314D07D55EDCD5FC1BD6D
DROP USER grant_user,plug_dest;
set @@sql_mode= @save_sql_mode;
DROP DATABASE test_user_db;
diff --git a/mysql-test/main/plugin_auth_qa_1.result b/mysql-test/main/plugin_auth_qa_1.result
index 42a7b1491c0..d2d902cb6a6 100644
--- a/mysql-test/main/plugin_auth_qa_1.result
+++ b/mysql-test/main/plugin_auth_qa_1.result
@@ -1,6 +1,6 @@
CREATE DATABASE test_user_db;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
========== test 1.1.3.2 ====================================
CREATE USER plug_user IDENTIFIED WITH test_plugin_server AS 'plug_dest';
CREATE USER plug_dest IDENTIFIED BY 'plug_dest_passwd';
@@ -19,8 +19,8 @@ IDENTIFIED WITH test_plugin_server AS 'plug_dest';
GRANT ALL PRIVILEGES ON test_user_db.* TO plug_dest IDENTIFIED BY 'plug_dest_passwd';
GRANT PROXY ON plug_dest TO plug_user;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plug_dest
+User plugin authentication_string
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
plug_user test_plugin_server plug_dest
1)
current_user()
@@ -70,8 +70,8 @@ ERROR 1045 (28000): Access denied for user 'plug_user'@'localhost' (using passwo
GRANT PROXY ON new_dest TO plug_user;
ERROR 1045 (28000): Access denied for user 'plug_user'@'localhost' (using password: YES)
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-new_dest
+User plugin authentication_string
+new_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
plug_user test_plugin_server plug_dest
DROP USER plug_user,new_dest;
CREATE USER plug_user
@@ -88,8 +88,8 @@ ERROR 1045 (28000): Access denied for user 'plug_user'@'localhost' (using passwo
GRANT PROXY ON new_dest TO plug_user;
ERROR 1045 (28000): Access denied for user 'plug_user'@'localhost' (using password: YES)
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-new_dest
+User plugin authentication_string
+new_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
plug_user test_plugin_server plug_dest
DROP USER plug_user,new_dest;
CREATE USER plug_user
@@ -109,15 +109,15 @@ USER() CURRENT_USER()
new_user@localhost plug_dest@%
connection default;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
new_user test_plugin_server plug_dest
-plug_dest
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
disconnect plug_user;
-UPDATE mysql.user SET user='plug_user' WHERE user='new_user';
+UPDATE mysql.global_priv SET user='plug_user' WHERE user='new_user';
FLUSH PRIVILEGES;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plug_dest
+User plugin authentication_string
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
plug_user test_plugin_server plug_dest
DROP USER plug_dest,plug_user;
========== test 1.3 ========================================
@@ -132,35 +132,35 @@ plug_user@localhost plug_dest@%
connection default;
disconnect plug_user;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-plug_dest
+User plugin authentication_string
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
plug_user test_plugin_server plug_dest
-UPDATE mysql.user SET user='new_user' WHERE user='plug_user';
+UPDATE mysql.global_priv SET user='new_user' WHERE user='plug_user';
FLUSH PRIVILEGES;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
new_user test_plugin_server plug_dest
-plug_dest
-UPDATE mysql.user SET authentication_string='new_dest' WHERE user='new_user';
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
+UPDATE mysql.global_priv SET priv=JSON_SET(priv, '$.authentication_string', 'new_dest') WHERE user='new_user';
FLUSH PRIVILEGES;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
new_user test_plugin_server new_dest
-plug_dest
-UPDATE mysql.user SET plugin='new_plugin_server' WHERE user='new_user';
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
+UPDATE mysql.global_priv SET priv=JSON_SET(priv, '$.plugin', 'new_plugin_server') WHERE user='new_user';
FLUSH PRIVILEGES;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
new_user new_plugin_server new_dest
-plug_dest
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
ERROR HY000: Plugin 'new_plugin_server' is not loaded
-UPDATE mysql.user SET plugin='test_plugin_server' WHERE user='new_user';
-UPDATE mysql.user SET USER='new_dest' WHERE user='plug_dest';
+UPDATE mysql.global_priv SET priv=JSON_SET(priv, '$.plugin', 'test_plugin_server') WHERE user='new_user';
+UPDATE mysql.global_priv SET user='new_dest' WHERE user='plug_dest';
FLUSH PRIVILEGES;
GRANT PROXY ON new_dest TO new_user;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-new_dest
+User plugin authentication_string
+new_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
new_user test_plugin_server new_dest
connect plug_user,localhost,new_user,new_dest;
select USER(),CURRENT_USER();
@@ -168,14 +168,14 @@ USER() CURRENT_USER()
new_user@localhost new_dest@%
connection default;
disconnect plug_user;
-UPDATE mysql.user SET USER='plug_dest' WHERE user='new_dest';
+UPDATE mysql.global_priv SET user='plug_dest' WHERE user='new_dest';
FLUSH PRIVILEGES;
CREATE USER new_dest IDENTIFIED BY 'new_dest_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-new_dest
+User plugin authentication_string
+new_dest mysql_native_password *01422E86A6FFF84618914AF149F9AEF64B84170A
new_user test_plugin_server new_dest
-plug_dest
+plug_dest mysql_native_password *939AEE68989794C0F408277411C26055CDF41119
GRANT ALL PRIVILEGES ON test.* TO new_user;
connect plug_user,localhost,new_dest,new_dest_passwd;
select USER(),CURRENT_USER();
@@ -188,9 +188,9 @@ DROP USER new_user,new_dest,plug_dest;
CREATE USER ''@'%%' IDENTIFIED WITH test_plugin_server AS 'proxied_user';
CREATE USER proxied_user IDENTIFIED BY 'proxied_user_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
test_plugin_server proxied_user
-proxied_user
+proxied_user mysql_native_password *D7A51428CD38DB3C5293B9321DA1228BFB1611DD
connect proxy_con,localhost,proxied_user,proxied_user_passwd;
SELECT USER(),CURRENT_USER();
USER() CURRENT_USER()
@@ -224,9 +224,9 @@ GRANT ALL PRIVILEGES ON test_user_db.* TO ''@'%%'
IDENTIFIED WITH test_plugin_server AS 'proxied_user';
CREATE USER proxied_user IDENTIFIED BY 'proxied_user_passwd';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
test_plugin_server proxied_user
-proxied_user
+proxied_user mysql_native_password *D7A51428CD38DB3C5293B9321DA1228BFB1611DD
connect proxy_con,localhost,proxied_user,proxied_user_passwd;
SELECT USER(),CURRENT_USER();
USER() CURRENT_USER()
@@ -266,13 +266,13 @@ GRANT PROXY ON proxied_user_3 TO ''@'%%';
GRANT PROXY ON proxied_user_4 TO ''@'%%';
GRANT PROXY ON proxied_user_5 TO ''@'%%';
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
+User plugin authentication_string
test_plugin_server proxied_user
-proxied_user_1
-proxied_user_2
-proxied_user_3
-proxied_user_4
-proxied_user_5
+proxied_user_1 mysql_native_password *551D5A5177FCC3340F7D2FB0F4D8D1EEA7F7EF71
+proxied_user_2 mysql_native_password *3D948F77C6A988AFDCA9755AB2A6724362557220
+proxied_user_3 mysql_native_password *41A18925D237DEE738C76581153990B037F462E3
+proxied_user_4 mysql_native_password *F990073A9B96FF535C2D0721406042B8751E593F
+proxied_user_5 mysql_native_password *5AA915C5D0B5B1336336FD2BF7768BC09FD1F5B2
connect proxy_con_1,localhost,proxied_user_1,'proxied_user_1_pwd';
connect proxy_con_2,localhost,proxied_user_2,proxied_user_2_pwd;
connect proxy_con_3,localhost,proxied_user_3,proxied_user_3_pwd;
diff --git a/mysql-test/main/plugin_auth_qa_1.test b/mysql-test/main/plugin_auth_qa_1.test
index b0b8ffb3544..fb577fc178f 100644
--- a/mysql-test/main/plugin_auth_qa_1.test
+++ b/mysql-test/main/plugin_auth_qa_1.test
@@ -110,7 +110,7 @@ connection default;
--sorted_result
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
disconnect plug_user;
-UPDATE mysql.user SET user='plug_user' WHERE user='new_user';
+UPDATE mysql.global_priv SET user='plug_user' WHERE user='new_user';
FLUSH PRIVILEGES;
--sorted_result
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
@@ -128,15 +128,15 @@ connection default;
disconnect plug_user;
--sorted_result
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-UPDATE mysql.user SET user='new_user' WHERE user='plug_user';
+UPDATE mysql.global_priv SET user='new_user' WHERE user='plug_user';
FLUSH PRIVILEGES;
--sorted_result
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-UPDATE mysql.user SET authentication_string='new_dest' WHERE user='new_user';
+UPDATE mysql.global_priv SET priv=JSON_SET(priv, '$.authentication_string', 'new_dest') WHERE user='new_user';
FLUSH PRIVILEGES;
--sorted_result
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-UPDATE mysql.user SET plugin='new_plugin_server' WHERE user='new_user';
+UPDATE mysql.global_priv SET priv=JSON_SET(priv, '$.plugin', 'new_plugin_server') WHERE user='new_user';
FLUSH PRIVILEGES;
--sorted_result
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
@@ -144,8 +144,8 @@ SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
--error ER_PLUGIN_IS_NOT_LOADED
connect(plug_user,localhost,new_user,new_dest);
--enable_query_log
-UPDATE mysql.user SET plugin='test_plugin_server' WHERE user='new_user';
-UPDATE mysql.user SET USER='new_dest' WHERE user='plug_dest';
+UPDATE mysql.global_priv SET priv=JSON_SET(priv, '$.plugin', 'test_plugin_server') WHERE user='new_user';
+UPDATE mysql.global_priv SET user='new_dest' WHERE user='plug_dest';
FLUSH PRIVILEGES;
GRANT PROXY ON new_dest TO new_user;
--sorted_result
@@ -154,7 +154,7 @@ connect(plug_user,localhost,new_user,new_dest);
select USER(),CURRENT_USER();
connection default;
disconnect plug_user;
-UPDATE mysql.user SET USER='plug_dest' WHERE user='new_dest';
+UPDATE mysql.global_priv SET user='plug_dest' WHERE user='new_dest';
FLUSH PRIVILEGES;
CREATE USER new_dest IDENTIFIED BY 'new_dest_passwd';
--sorted_result
diff --git a/mysql-test/main/plugin_auth_qa_2.result b/mysql-test/main/plugin_auth_qa_2.result
index fa88530be6a..e71132e2bc9 100644
--- a/mysql-test/main/plugin_auth_qa_2.result
+++ b/mysql-test/main/plugin_auth_qa_2.result
@@ -6,8 +6,8 @@ CREATE USER qa_test_1_dest IDENTIFIED BY 'dest_passwd';
GRANT ALL PRIVILEGES ON test_user_db.* TO qa_test_1_dest identified by 'dest_passwd';
GRANT PROXY ON qa_test_1_dest TO qa_test_1_user;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-qa_test_1_dest
+User plugin authentication_string
+qa_test_1_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_1_user qa_auth_interface qa_test_1_dest
SELECT @@proxy_user;
@@proxy_user
@@ -19,8 +19,8 @@ exec MYSQL -h localhost -P MASTER_MYPORT -u qa_test_1_user --password=qa_test_1_
current_user() user() @@local.proxy_user @@local.external_user
qa_test_1_user@% qa_test_1_user@localhost NULL NULL
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-qa_test_1_dest
+User plugin authentication_string
+qa_test_1_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_1_user qa_auth_interface qa_test_1_dest
DROP USER qa_test_1_user;
DROP USER qa_test_1_dest;
@@ -32,9 +32,9 @@ GRANT ALL PRIVILEGES ON test_user_db.* TO qa_test_2_dest identified by 'dest_pas
GRANT PROXY ON qa_test_2_dest TO qa_test_2_user;
GRANT PROXY ON authenticated_as TO qa_test_2_user;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-authenticated_as
-qa_test_2_dest
+User plugin authentication_string
+authenticated_as mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
+qa_test_2_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_2_user qa_auth_interface qa_test_2_dest
SELECT @@proxy_user;
@@proxy_user
@@ -46,9 +46,9 @@ exec MYSQL -h localhost -P MASTER_MYPORT -u qa_test_2_user --password=qa_test_2_
current_user() user() @@local.proxy_user @@local.external_user
authenticated_as@% user_name@localhost 'qa_test_2_user'@'%' externaluser
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-authenticated_as
-qa_test_2_dest
+User plugin authentication_string
+authenticated_as mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
+qa_test_2_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_2_user qa_auth_interface qa_test_2_dest
DROP USER qa_test_2_user;
DROP USER qa_test_2_dest;
@@ -82,9 +82,9 @@ GRANT ALL PRIVILEGES ON test_user_db.* TO ''@'localhost' identified by 'dest_pas
GRANT PROXY ON qa_test_5_dest TO qa_test_5_user;
GRANT PROXY ON qa_test_5_dest TO ''@'localhost';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
- *DFCACE76914AD7BD801FC1A1ECF6562272621A22
-qa_test_5_dest *DFCACE76914AD7BD801FC1A1ECF6562272621A22
+User plugin authentication_string Password
+ mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22 *DFCACE76914AD7BD801FC1A1ECF6562272621A22
+qa_test_5_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22 *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_5_user qa_auth_interface qa_test_5_dest
exec MYSQL -h localhost -P MASTER_MYPORT --user=qa_test_5_user --password=qa_test_5_dest test_user_db -e "SELECT current_user(),user(),@@local.proxy_user,@@local.external_user;" 2>&1
ERROR 1045 (28000): Access denied for user 'qa_test_5_user'@'localhost' (using password: YES)
@@ -97,22 +97,22 @@ CREATE USER qa_test_6_dest IDENTIFIED BY 'dest_passwd';
GRANT ALL PRIVILEGES ON test_user_db.* TO qa_test_6_dest identified by 'dest_passwd';
GRANT PROXY ON qa_test_6_dest TO qa_test_6_user;
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
-qa_test_6_dest *DFCACE76914AD7BD801FC1A1ECF6562272621A22
+User plugin authentication_string Password
+qa_test_6_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22 *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_6_user qa_auth_interface qa_test_6_dest
exec MYSQL -h localhost -P MASTER_MYPORT --user=qa_test_6_user --password=qa_test_6_dest test_user_db -e "SELECT current_user(),user(),@@local.proxy_user,@@local.external_user;" 2>&1
ERROR 1045 (28000): Access denied for user 'qa_test_6_user'@'localhost' (using password: YES)
GRANT PROXY ON qa_test_6_dest TO root IDENTIFIED WITH qa_auth_interface AS 'qa_test_6_dest';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
-qa_test_6_dest *DFCACE76914AD7BD801FC1A1ECF6562272621A22
+User plugin authentication_string Password
+qa_test_6_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22 *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_6_user qa_auth_interface qa_test_6_dest
exec MYSQL -h localhost -P MASTER_MYPORT --user=root --password=qa_test_6_dest test_user_db -e "SELECT current_user(),user(),@@local.proxy_user,@@local.external_user;" 2>&1
ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
REVOKE PROXY ON qa_test_6_dest FROM root;
SELECT user,plugin,authentication_string FROM mysql.user WHERE user != 'root';
-user plugin authentication_string
-qa_test_6_dest
+User plugin authentication_string
+qa_test_6_dest mysql_native_password *DFCACE76914AD7BD801FC1A1ECF6562272621A22
qa_test_6_user qa_auth_interface qa_test_6_dest
exec MYSQL -h localhost -P MASTER_MYPORT --user=root --password=qa_test_6_dest test_user_db -e "SELECT current_user(),user(),@@local.proxy_user,@@local.external_user;" 2>&1
ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
@@ -120,7 +120,7 @@ DROP USER qa_test_6_user;
DROP USER qa_test_6_dest;
DELETE FROM mysql.user WHERE user='root' AND plugin='qa_auth_interface';
SELECT user,plugin,authentication_string,password FROM mysql.user WHERE user != 'root';
-user plugin authentication_string password
+User plugin authentication_string Password
=== Test of the --default_auth option for clients ====
CREATE USER qa_test_11_user IDENTIFIED WITH qa_auth_interface AS 'qa_test_11_dest';
CREATE USER qa_test_11_dest IDENTIFIED BY 'dest_passwd';
diff --git a/mysql-test/main/plugin_innodb.result b/mysql-test/main/plugin_innodb.result
index 48510ad8745..c55bfd779c2 100644
--- a/mysql-test/main/plugin_innodb.result
+++ b/mysql-test/main/plugin_innodb.result
@@ -3,9 +3,10 @@ create table t1(a int) engine=example;
drop table t1;
alter table mysql.plugin engine=innodb;
restart
+# restart
create table t1(a int) engine=example;
select * from t1;
a
drop table t1;
-alter table mysql.plugin engine=myisam;
+alter table mysql.plugin engine=aria;
uninstall plugin example;
diff --git a/mysql-test/main/plugin_innodb.test b/mysql-test/main/plugin_innodb.test
index fb5dd84b997..5700486b218 100644
--- a/mysql-test/main/plugin_innodb.test
+++ b/mysql-test/main/plugin_innodb.test
@@ -22,6 +22,6 @@ create table t1(a int) engine=example;
select * from t1;
drop table t1;
-alter table mysql.plugin engine=myisam;
+alter table mysql.plugin engine=aria;
uninstall plugin example;
diff --git a/mysql-test/main/preload.result b/mysql-test/main/preload.result
index 7ed0c62f33a..529c7ac2690 100644
--- a/mysql-test/main/preload.result
+++ b/mysql-test/main/preload.result
@@ -55,14 +55,14 @@ count(*)
4181
show status like "key_read%";
Variable_name Value
-Key_read_requests 294
+Key_read_requests 297
Key_reads 60
select count(*) from t1 where b = 'test1';
count(*)
4181
show status like "key_read%";
Variable_name Value
-Key_read_requests 588
+Key_read_requests 594
Key_reads 60
flush tables;
flush status;
@@ -81,7 +81,7 @@ count(*)
4181
show status like "key_read%";
Variable_name Value
-Key_read_requests 1068
+Key_read_requests 1071
Key_reads 774
flush tables;
flush status;
@@ -105,7 +105,7 @@ count(*)
4181
show status like "key_read%";
Variable_name Value
-Key_read_requests 311
+Key_read_requests 314
Key_reads 75
flush tables;
flush status;
@@ -133,7 +133,7 @@ count(*)
2584
show status like "key_read%";
Variable_name Value
-Key_read_requests 1266
+Key_read_requests 1272
Key_reads 821
flush tables;
flush status;
diff --git a/mysql-test/main/profiling.test b/mysql-test/main/profiling.test
index 1e1aada7c15..912e4f69798 100644
--- a/mysql-test/main/profiling.test
+++ b/mysql-test/main/profiling.test
@@ -60,6 +60,7 @@ select '012345678900123456789001234567890012345678900123456789001234567890012345
--replace_column 2 #
show profiles;
+--disable_ps_protocol
--disable_result_log
###--replace_column 2 # 3 # 4 #
show profile for query 15;
@@ -90,6 +91,7 @@ show profile all for query 0 limit 0;
###--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 11 # 12 # 13 # 16 #
show profile all for query 15;
###--replace_column 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 11 # 12 # 13 # 16 #
+--enable_ps_protocol
select * from information_schema.profiling;
select query_id, state, duration from information_schema.profiling;
diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result
index b0c7c7d9847..c9f89b94e41 100644
--- a/mysql-test/main/ps.result
+++ b/mysql-test/main/ps.result
@@ -244,6 +244,8 @@ prepare stmt1 from "insert into t1 select i from t1";
execute stmt1;
execute stmt1;
prepare stmt1 from "select * from t1 into outfile '<MYSQLTEST_VARDIR>/tmp/f1.txt'";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt1;
deallocate prepare stmt1;
drop table t1;
@@ -1379,9 +1381,11 @@ test.t1 optimize status Table is already up to date
prepare stmt from "analyze table t1";
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
prepare stmt from "repair table t1, t2, t3";
execute stmt;
@@ -1408,13 +1412,19 @@ test.t3 optimize status Table is already up to date
prepare stmt from "analyze table t1, t2, t3";
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
prepare stmt from "repair table t1, t4, t3";
execute stmt;
@@ -1447,11 +1457,13 @@ execute stmt;
Table Op Msg_type Msg_text
test.t4 analyze Error Table 'test.t4' doesn't exist
test.t4 analyze status Operation failed
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
execute stmt;
Table Op Msg_type Msg_text
test.t4 analyze Error Table 'test.t4' doesn't exist
test.t4 analyze status Operation failed
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
deallocate prepare stmt;
drop table t1, t2, t3;
@@ -1518,6 +1530,7 @@ Field Type Null Key Default Extra
i int(11) YES NULL
execute stmt_analyze;
Table Op Msg_type Msg_text
+mysqltest_long_database_name_to_thrash_heap.t1 analyze status Engine-independent statistics collected
mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date
execute stmt_optimize;
Table Op Msg_type Msg_text
@@ -2132,12 +2145,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
call proc_1();
show open tables from mysql;
@@ -2145,12 +2159,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
call proc_1();
show open tables from mysql;
@@ -2158,12 +2173,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
call proc_1();
show open tables from mysql;
@@ -2171,12 +2187,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
flush tables;
create function func_1() returns int begin flush tables; return 1; end|
@@ -2189,12 +2206,13 @@ drop procedure proc_1;
flush tables;
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
prepare abc from "flush tables";
execute abc;
@@ -2203,12 +2221,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
execute abc;
show open tables from mysql;
@@ -2216,12 +2235,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
execute abc;
show open tables from mysql;
@@ -2229,12 +2249,13 @@ Database Table In_use Name_locked
mysql general_log 0 0
select Host, User from mysql.user limit 0;
Host User
-select Host, Db from mysql.host limit 0;
-Host Db
show open tables from mysql;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
-mysql host 0 0
+mysql global_priv 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
mysql user 0 0
flush tables;
deallocate prepare abc;
@@ -4642,7 +4663,7 @@ EXECUTE IMMEDIATE 'SELECT ? FROM DUAL' USING (SELECT 1);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)' at line 1
CREATE FUNCTION f1() RETURNS VARCHAR(10) RETURN 'test';
EXECUTE IMMEDIATE 'SELECT ? FROM DUAL' USING f1();
-ERROR 42000: EXECUTE..USING does not support subqueries or stored functions
+ERROR 42000: EXECUTE IMMEDIATE does not support subqueries or stored functions
DROP FUNCTION f1;
#
# DDL
@@ -5243,6 +5264,7 @@ CREATE PROCEDURE p1(tn VARCHAR(32))
EXECUTE IMMEDIATE CONCAT('ANALYZE TABLE ',tn);
CALL p1('t1');
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
DROP PROCEDURE p1;
DROP TABLE t1;
@@ -5366,3 +5388,21 @@ drop table t1;
#
# End of 10.2 tests
#
+#
+# MDEV-19263: Server crashes in mysql_handle_single_derived
+# upon 2nd execution of PS
+#
+CREATE TABLE t1 (f INT);
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW INSERT INTO v1 SELECT * FROM x;
+PREPARE stmt FROM "INSERT INTO v1 VALUES (1)";
+EXECUTE stmt;
+ERROR 42S02: Table 'test.x' doesn't exist
+EXECUTE stmt;
+ERROR 42S02: Table 'test.x' doesn't exist
+DEALLOCATE PREPARE stmt;
+DROP VIEW v1;
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test
index 2b8a05094a6..4254c7c41eb 100644
--- a/mysql-test/main/ps.test
+++ b/mysql-test/main/ps.test
@@ -2228,28 +2228,24 @@ flush tables;
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
call proc_1();
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
call proc_1();
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
call proc_1();
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
flush tables;
@@ -2269,7 +2265,6 @@ drop procedure proc_1;
--disable_ps_protocol
flush tables;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
--enable_ps_protocol
@@ -2279,21 +2274,18 @@ execute abc;
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
execute abc;
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
execute abc;
--sorted_result
show open tables from mysql;
select Host, User from mysql.user limit 0;
-select Host, Db from mysql.host limit 0;
--sorted_result
show open tables from mysql;
flush tables;
@@ -4833,3 +4825,27 @@ drop table t1;
--echo #
--echo # End of 10.2 tests
--echo #
+
+--echo #
+--echo # MDEV-19263: Server crashes in mysql_handle_single_derived
+--echo # upon 2nd execution of PS
+--echo #
+
+CREATE TABLE t1 (f INT);
+CREATE VIEW v1 AS SELECT * FROM t1;
+CREATE TRIGGER tr BEFORE INSERT ON t1 FOR EACH ROW INSERT INTO v1 SELECT * FROM x;
+PREPARE stmt FROM "INSERT INTO v1 VALUES (1)";
+
+--error ER_NO_SUCH_TABLE
+EXECUTE stmt;
+--error ER_NO_SUCH_TABLE
+EXECUTE stmt;
+
+# Cleanup
+DEALLOCATE PREPARE stmt;
+DROP VIEW v1;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/ps_1general.result b/mysql-test/main/ps_1general.result
index 035372a1359..2ef5571cdca 100644
--- a/mysql-test/main/ps_1general.result
+++ b/mysql-test/main/ps_1general.result
@@ -451,7 +451,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 14 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 4 Using filesort
@@ -462,15 +462,15 @@ Catalog Database Table Table_alias Column Column_alias Type Length Max length Is
def id 8 3 1 Y 32928 0 63
def select_type 253 19 6 N 1 39 8
def table 253 64 2 Y 0 39 8
-def type 253 10 5 Y 0 39 8
+def type 253 10 3 Y 0 39 8
def possible_keys 253 4_OR_8_K 7 Y 0 39 8
-def key 253 64 7 Y 0 39 8
-def key_len 253 4_OR_8_K 1 Y 0 39 8
+def key 253 64 0 Y 0 39 8
+def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
-def Extra 253 255 37 N 1 39 8
+def rows 253 64 1 Y 0 39 8
+def Extra 253 255 27 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using index condition; Using filesort
+1 SIMPLE t1 ALL PRIMARY NULL NULL NULL 4 Using where; Using filesort
drop table if exists t2;
create table t2 (id smallint, name varchar(20)) ;
prepare stmt1 from ' insert into t2 values(?, ?) ' ;
diff --git a/mysql-test/main/ps_2myisam.result b/mysql-test/main/ps_2myisam.result
index 3906875da92..c85abaad051 100644
--- a/mysql-test/main/ps_2myisam.result
+++ b/mysql-test/main/ps_2myisam.result
@@ -1161,7 +1161,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 0 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t9 ALL NULL NULL NULL NULL 2
diff --git a/mysql-test/main/ps_3innodb.result b/mysql-test/main/ps_3innodb.result
index 9f5c8956dd8..53f736f41a3 100644
--- a/mysql-test/main/ps_3innodb.result
+++ b/mysql-test/main/ps_3innodb.result
@@ -1161,7 +1161,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 0 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t9 ALL NULL NULL NULL NULL 2
diff --git a/mysql-test/main/ps_4heap.result b/mysql-test/main/ps_4heap.result
index 46b4d9c58e5..0cf1ed13cba 100644
--- a/mysql-test/main/ps_4heap.result
+++ b/mysql-test/main/ps_4heap.result
@@ -1162,7 +1162,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 0 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t9 ALL NULL NULL NULL NULL 2
diff --git a/mysql-test/main/ps_5merge.result b/mysql-test/main/ps_5merge.result
index cc2f0f88b2a..49155394e2a 100644
--- a/mysql-test/main/ps_5merge.result
+++ b/mysql-test/main/ps_5merge.result
@@ -1205,7 +1205,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 0 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t9 ALL NULL NULL NULL NULL 2
@@ -4573,7 +4573,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 0 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t9 ALL NULL NULL NULL NULL 2
diff --git a/mysql-test/main/ps_ddl.result b/mysql-test/main/ps_ddl.result
index 68acf50aee1..c5c5b5a9ac0 100644
--- a/mysql-test/main/ps_ddl.result
+++ b/mysql-test/main/ps_ddl.result
@@ -20,6 +20,8 @@ else
select '' as "SUCCESS";
end if;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set @reprepare_count= 0;
flush status;
=====================================================================
@@ -767,11 +769,12 @@ deallocate prepare stmt;
#
# Test 3: View referencing an Information schema table
#
-create view t1 as select table_name from information_schema.views;
+create view t1 as select table_name from information_schema.views order by table_name;
prepare stmt from "select * from t1";
execute stmt;
table_name
t1
+user
call p_verify_reprepare_count(0);
SUCCESS
@@ -779,6 +782,7 @@ create temporary table t1 (a int);
execute stmt;
table_name
t1
+user
call p_verify_reprepare_count(0);
SUCCESS
@@ -1071,6 +1075,8 @@ call p1(x);
return x;
end|
create procedure p1(out x int) select max(a) from t1 into x;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
prepare stmt from "select * from v1";
execute stmt;
f1()
@@ -1083,6 +1089,8 @@ SUCCESS
drop procedure p1;
create procedure p1(out x int) select max(a) from t2 into x;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# XXX: used to be a bug. The prelocked list was not invalidated
# and we kept opening table t1, whereas the procedure
# is now referring to table t2
@@ -1399,6 +1407,7 @@ SUCCESS
prepare stmt from "analyze table t1";
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
create table t1 (a1 int, a2 int);
@@ -1406,14 +1415,17 @@ insert into t1 values (1, 10), (2, 20), (3, 30);
# t1 has changed, and it's not a problem
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
alter table t1 add column b varchar(50) default NULL;
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
alter table t1 drop column b;
execute stmt;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
call p_verify_reprepare_count(0);
SUCCESS
diff --git a/mysql-test/main/ps_ddl.test b/mysql-test/main/ps_ddl.test
index 90226d379bf..dafb5b5dea3 100644
--- a/mysql-test/main/ps_ddl.test
+++ b/mysql-test/main/ps_ddl.test
@@ -699,7 +699,7 @@ deallocate prepare stmt;
--echo #
--echo # Test 3: View referencing an Information schema table
--echo #
-create view t1 as select table_name from information_schema.views;
+create view t1 as select table_name from information_schema.views order by table_name;
prepare stmt from "select * from t1";
execute stmt;
diff --git a/mysql-test/main/ps_ddl1.result b/mysql-test/main/ps_ddl1.result
index 667cbed8a7a..5178ee64f16 100644
--- a/mysql-test/main/ps_ddl1.result
+++ b/mysql-test/main/ps_ddl1.result
@@ -20,6 +20,8 @@ else
select '' as "SUCCESS";
end if;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set @reprepare_count= 0;
flush status;
drop table if exists t1;
diff --git a/mysql-test/main/ps_error.result b/mysql-test/main/ps_error.result
index 448832dc7a8..ad178f86915 100644
--- a/mysql-test/main/ps_error.result
+++ b/mysql-test/main/ps_error.result
@@ -31,6 +31,7 @@ Warnings:
Warning 1292 Truncated incorrect DOUBLE value: 'foo'
EXECUTE stmt;
ERROR 22007: Truncated incorrect DOUBLE value: 'foo'
+# restart
SELECT 'All done';
All done
All done
diff --git a/mysql-test/main/query_cache.result b/mysql-test/main/query_cache.result
index 9c010cbffc7..f5d8b5eb461 100644
--- a/mysql-test/main/query_cache.result
+++ b/mysql-test/main/query_cache.result
@@ -645,9 +645,13 @@ show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
select * from t1 into outfile "query_cache.out.file";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select * from t1 into outfile "query_cache.out.file";
ERROR HY000: File 'query_cache.out.file' already exists
select * from t1 limit 1 into dumpfile "query_cache.dump.file";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
@@ -963,19 +967,19 @@ INSERT INTO t1 VALUES ('20050326');
INSERT INTO t1 VALUES ('20050325');
SELECT COUNT(*) FROM t1 WHERE date BETWEEN '20050326' AND '20050327 invalid';
COUNT(*)
-0
+1
Warnings:
-Warning 1292 Incorrect datetime value: '20050327 invalid'
+Warning 1292 Truncated incorrect date value: '20050327 invalid'
SELECT COUNT(*) FROM t1 WHERE date BETWEEN '20050326' AND '20050328 invalid';
COUNT(*)
-0
+1
Warnings:
-Warning 1292 Incorrect datetime value: '20050328 invalid'
+Warning 1292 Truncated incorrect date value: '20050328 invalid'
SELECT COUNT(*) FROM t1 WHERE date BETWEEN '20050326' AND '20050327 invalid';
COUNT(*)
-0
+1
Warnings:
-Warning 1292 Incorrect datetime value: '20050327 invalid'
+Warning 1292 Truncated incorrect date value: '20050327 invalid'
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
@@ -1100,6 +1104,8 @@ Declare var1 int;
select max(a) from t1 into var1;
return var1;
end//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure `p1`()
begin
select a, f1() from t1;
@@ -1858,17 +1864,17 @@ DROP TABLE t1;
SET GLOBAL query_cache_size= default;
CREATE TABLE t1( a INT );
SET @v = ( SELECT SQL_CACHE 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1 )' at line 1
+ERROR 42000: Incorrect usage/placement of 'SQL_CACHE'
SET @v = ( SELECT SQL_NO_CACHE 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '1 )' at line 1
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT a FROM t1 WHERE a IN ( SELECT SQL_CACHE a FROM t1 );
-ERROR 42S22: Unknown column 'SQL_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_CACHE'
SELECT a FROM t1 WHERE a IN ( SELECT SQL_NO_CACHE a FROM t1 );
-ERROR 42S22: Unknown column 'SQL_NO_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT ( SELECT SQL_CACHE a FROM t1 );
-ERROR 42S22: Unknown column 'SQL_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_CACHE'
SELECT ( SELECT SQL_NO_CACHE a FROM t1 );
-ERROR 42S22: Unknown column 'SQL_NO_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT SQL_CACHE * FROM t1;
a
SELECT SQL_NO_CACHE * FROM t1;
@@ -1878,18 +1884,18 @@ ERROR 42000: Incorrect usage/placement of 'SQL_CACHE'
SELECT * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT * FROM t1 WHERE a IN (SELECT SQL_CACHE a FROM t1);
-ERROR 42S22: Unknown column 'SQL_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_CACHE'
SELECT * FROM t1 WHERE a IN (SELECT a FROM t1 UNION SELECT SQL_CACHE a FROM t1);
-ERROR 42S22: Unknown column 'SQL_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_CACHE'
SELECT * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT * FROM t1 WHERE a IN (SELECT SQL_NO_CACHE a FROM t1);
-ERROR 42S22: Unknown column 'SQL_NO_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT * FROM t1 WHERE a IN
(SELECT a FROM t1 UNION SELECT SQL_NO_CACHE a FROM t1);
-ERROR 42S22: Unknown column 'SQL_NO_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT SQL_CACHE SQL_NO_CACHE * FROM t1;
-ERROR HY000: Incorrect usage of SQL_CACHE and SQL_NO_CACHE
+ERROR HY000: Incorrect usage of SQL_NO_CACHE and SQL_CACHE
SELECT SQL_NO_CACHE SQL_CACHE * FROM t1;
ERROR HY000: Incorrect usage of SQL_NO_CACHE and SQL_CACHE
SELECT SQL_CACHE * FROM t1 UNION SELECT SQL_CACHE * FROM t1;
@@ -1902,10 +1908,10 @@ SELECT SQL_NO_CACHE * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT SQL_CACHE * FROM t1 WHERE a IN
(SELECT SQL_NO_CACHE a FROM t1);
-ERROR 42S22: Unknown column 'SQL_NO_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
SELECT SQL_CACHE * FROM t1 WHERE a IN
(SELECT a FROM t1 UNION SELECT SQL_NO_CACHE a FROM t1);
-ERROR 42S22: Unknown column 'SQL_NO_CACHE' in 'field list'
+ERROR 42000: Incorrect usage/placement of 'SQL_NO_CACHE'
DROP TABLE t1;
End of 5.1 tests
#
diff --git a/mysql-test/main/query_cache.test b/mysql-test/main/query_cache.test
index 1b1e24bc6f4..389aa0de2fa 100644
--- a/mysql-test/main/query_cache.test
+++ b/mysql-test/main/query_cache.test
@@ -1534,22 +1534,21 @@ SET GLOBAL query_cache_size= default;
#
CREATE TABLE t1( a INT );
---error ER_PARSE_ERROR
+--error ER_CANT_USE_OPTION_HERE
SET @v = ( SELECT SQL_CACHE 1 );
---error ER_PARSE_ERROR
+--error ER_CANT_USE_OPTION_HERE
SET @v = ( SELECT SQL_NO_CACHE 1 );
#
-# Keywords 'SQL_CACHE' and 'SQL_NO_CACHE' are allowed as column names.
-# Hence the error messages are not intuitive.
+# Keywords 'SQL_CACHE' and 'SQL_NO_CACHE'.
#
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT a FROM t1 WHERE a IN ( SELECT SQL_CACHE a FROM t1 );
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT a FROM t1 WHERE a IN ( SELECT SQL_NO_CACHE a FROM t1 );
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT ( SELECT SQL_CACHE a FROM t1 );
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT ( SELECT SQL_NO_CACHE a FROM t1 );
SELECT SQL_CACHE * FROM t1;
@@ -1560,16 +1559,16 @@ SELECT SQL_NO_CACHE * FROM t1;
SELECT * FROM t1 UNION SELECT SQL_CACHE * FROM t1;
--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 WHERE a IN (SELECT SQL_CACHE a FROM t1);
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 WHERE a IN (SELECT a FROM t1 UNION SELECT SQL_CACHE a FROM t1);
--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 WHERE a IN (SELECT SQL_NO_CACHE a FROM t1);
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT * FROM t1 WHERE a IN
(SELECT a FROM t1 UNION SELECT SQL_NO_CACHE a FROM t1);
--error ER_WRONG_USAGE
@@ -1584,10 +1583,10 @@ SELECT SQL_CACHE * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
SELECT SQL_NO_CACHE * FROM t1 UNION SELECT SQL_CACHE * FROM t1;
--error ER_CANT_USE_OPTION_HERE
SELECT SQL_NO_CACHE * FROM t1 UNION SELECT SQL_NO_CACHE * FROM t1;
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT SQL_CACHE * FROM t1 WHERE a IN
(SELECT SQL_NO_CACHE a FROM t1);
---error ER_BAD_FIELD_ERROR
+--error ER_CANT_USE_OPTION_HERE
SELECT SQL_CACHE * FROM t1 WHERE a IN
(SELECT a FROM t1 UNION SELECT SQL_NO_CACHE a FROM t1);
diff --git a/mysql-test/main/range.result b/mysql-test/main/range.result
index 32e0cf2868c..9a2d99e2f82 100644
--- a/mysql-test/main/range.result
+++ b/mysql-test/main/range.result
@@ -1,3 +1,8 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
CREATE TABLE t1 (
event_date date DEFAULT '0000-00-00' NOT NULL,
type int(11) DEFAULT '0' NOT NULL,
@@ -244,7 +249,7 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select count(*) from t1 where x in (1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref x x 5 const 1 Using index
-explain select count(*) from t1 where x in (1,2);
+explain select count(*) from t1 where x in (1,2,3,4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index x x 5 NULL 9 Using where; Using index
drop table t1;
@@ -275,7 +280,7 @@ INSERT INTO t1 VALUES
(33,5),(33,5),(33,5),(33,5),(34,5),(35,5);
EXPLAIN SELECT * FROM t1 WHERE a IN(1,2) AND b=5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a,b a 5 NULL 2 Using index condition; Using where
+1 SIMPLE t1 ref|filter a,b b|a 5|5 const 15 (5%) Using where; Using rowid filter
SELECT * FROM t1 WHERE a IN(1,2) AND b=5;
a b
DROP TABLE t1;
@@ -416,23 +421,25 @@ count(*)
1026
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition
+1 SIMPLE t1 range uid_index uid_index 4 NULL 111 Using index condition
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
explain select * from t1, t2 where t1.uid=t2.uid AND t2.uid > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition
+1 SIMPLE t1 range uid_index uid_index 4 NULL 111 Using index condition
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 113 Using index condition
+1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
explain select * from t1, t2 where t1.uid=t2.uid AND t2.uid != 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 113 Using index condition
+1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
id name uid id name uid
@@ -715,7 +722,7 @@ WHERE
v.oxrootid ='d8c4177d09f8b11f5.52725521' AND
s.oxleft > v.oxleft AND s.oxleft < v.oxright;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE v ref OXLEFT,OXRIGHT,OXROOTID OXROOTID 34 const 5 Using index condition
+1 SIMPLE v ref OXLEFT,OXRIGHT,OXROOTID OXROOTID 34 const 6 Using index condition
1 SIMPLE s ALL OXLEFT NULL NULL NULL 12 Range checked for each record (index map: 0x4)
SELECT s.oxid FROM t1 v, t1 s
WHERE
@@ -892,12 +899,13 @@ INSERT INTO t1 VALUES
(43,'A'), (44,'A'), (45,'A'), (46,'A'), (47,'A'), (48,'A'),
(49,'A'), (50,'A'), (51,'A'), (52,'A'), (53,'C'), (54,'C'),
(55,'C'), (56,'C'), (57,'C'), (58,'C'), (59,'C'), (60,'C');
+INSERT INTO t1(status) SELECT status FROM t1;
EXPLAIN SELECT * FROM t1 WHERE status <> 'A' AND status <> 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using index condition
+1 SIMPLE t1 range status status 23 NULL 18 Using index condition
EXPLAIN SELECT * FROM t1 WHERE status NOT IN ('A','B');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using index condition
+1 SIMPLE t1 range status status 23 NULL 18 Using index condition
SELECT * FROM t1 WHERE status <> 'A' AND status <> 'B';
id status
53 C
@@ -908,6 +916,14 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
SELECT * FROM t1 WHERE status NOT IN ('A','B');
id status
53 C
@@ -918,18 +934,26 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
EXPLAIN SELECT status FROM t1 WHERE status <> 'A' AND status <> 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using where; Using index
+1 SIMPLE t1 range status status 23 NULL 18 Using where; Using index
EXPLAIN SELECT status FROM t1 WHERE status NOT IN ('A','B');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using where; Using index
+1 SIMPLE t1 range status status 23 NULL 18 Using where; Using index
EXPLAIN SELECT * FROM t1 WHERE status NOT BETWEEN 'A' AND 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 10 Using index condition
+1 SIMPLE t1 range status status 23 NULL 17 Using index condition
EXPLAIN SELECT * FROM t1 WHERE status < 'A' OR status > 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 10 Using index condition; Using where
+1 SIMPLE t1 range status status 23 NULL 17 Using index condition; Using where
SELECT * FROM t1 WHERE status NOT BETWEEN 'A' AND 'B';
id status
53 C
@@ -940,6 +964,14 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
SELECT * FROM t1 WHERE status < 'A' OR status > 'B';
id status
53 C
@@ -950,6 +982,14 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
DROP TABLE t1;
CREATE TABLE t1 (a int, b int, primary key(a,b));
INSERT INTO t1 VALUES
@@ -957,16 +997,16 @@ INSERT INTO t1 VALUES
CREATE VIEW v1 as SELECT a,b FROM t1 WHERE b=3;
EXPLAIN SELECT a,b FROM t1 WHERE a < 2 and b=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
EXPLAIN SELECT a,b FROM v1 WHERE a < 2 and b=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
EXPLAIN SELECT a,b FROM t1 WHERE a < 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
EXPLAIN SELECT a,b FROM v1 WHERE a < 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
SELECT a,b FROM t1 WHERE a < 2 and b=3;
a b
1 3
@@ -1006,10 +1046,10 @@ INSERT INTO `t1` VALUES
,(13,2),(14,2),(15,3),(16,3),(17,3),(18,3),(19,3);
explain select * from t1 where a in (3,4) and b in (1,2,3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from v1 where a in (3,4) and b in (1,2,3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t1 where a between 3 and 4 and b between 1 and 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index
@@ -1065,10 +1105,10 @@ id b c
0 3 4
EXPLAIN SELECT * FROM t1 WHERE b<=3 AND 3<=c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 3 Using index condition; Using where
+1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 2 Using index condition; Using where
EXPLAIN SELECT * FROM t1 WHERE 3 BETWEEN b AND c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 3 Using where
+1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 2 Using where
SELECT * FROM t1 WHERE 0 < b OR 0 > c;
id b c
0 3 4
@@ -1079,10 +1119,10 @@ id b c
0 3 4
EXPLAIN SELECT * FROM t1 WHERE 0 < b OR 0 > c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 4 Using sort_union(idx1,idx2); Using where
+1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 3 Using sort_union(idx1,idx2); Using where
EXPLAIN SELECT * FROM t1 WHERE 0 NOT BETWEEN b AND c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 4 Using sort_union(idx1,idx2); Using where
+1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 3 Using sort_union(idx1,idx2); Using where
DROP TABLE t1;
CREATE TABLE t1 (
item char(20) NOT NULL default '',
@@ -1097,11 +1137,11 @@ INSERT INTO t1 VALUES
('A2','2005-12-01 08:00:00',1000);
EXPLAIN SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-01 24:00:00';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref PRIMARY PRIMARY 20 const 2 Using index condition
+1 SIMPLE t1 ref PRIMARY PRIMARY 20 const 3 Using index condition
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-01 24:00:00';
item started price
Warnings:
-Warning 1292 Incorrect datetime value: '2005-12-01 24:00:00'
+Warning 1292 Truncated incorrect datetime value: '2005-12-01 24:00:00'
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00';
item started price
A1 2005-11-01 08:00:00 1000.000
@@ -1113,7 +1153,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-01 24:00:00';
item started price
Warnings:
-Warning 1292 Incorrect datetime value: '2005-12-01 24:00:00'
+Warning 1292 Truncated incorrect datetime value: '2005-12-01 24:00:00'
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00';
item started price
A1 2005-11-01 08:00:00 1000.000
@@ -1141,7 +1181,7 @@ CREATE TABLE t1 (
a varchar(32), index (a)
) DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
INSERT INTO t1 VALUES
-('B'), ('A'), ('A'), ('C'), ('B'), ('A'), ('A');
+('B'), ('A'), ('A'), ('C'), ('B'), ('A'), ('A'), ('C'), ('A');
SELECT a FROM t1 WHERE a='b' OR a='B';
a
B
@@ -1201,13 +1241,15 @@ Z
In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)
explain select * from t2 where a=1000 and b<11;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref a a 5 const 502 Using index condition
+1 SIMPLE t2 ref a a 5 const 503 Using index condition
drop table t1, t2;
CREATE TABLE t1( a INT, b INT, KEY( a, b ) );
CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
CREATE TABLE t3( a INT, b INT, KEY( a, b ) );
-INSERT INTO t1( a, b )
-VALUES (0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7);
+INSERT INTO t1( a, b ) VALUES
+(0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7),
+(10, 11), (11,12), (11, 14), (12, 13), (15, 10), (19, 17),
+(20, 21), (21,22), (21, 24), (22, 23), (25, 20), (29, 27);
INSERT INTO t2( a, b )
VALUES ( 1, 1), ( 2, 1), ( 3, 1), ( 4, 1), ( 5, 1),
( 6, 1), ( 7, 1), ( 8, 1), ( 9, 1), (10, 1),
@@ -1218,65 +1260,67 @@ INSERT INTO t2 SELECT a, 3 FROM t2 WHERE b = 1;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
-INSERT INTO t3
-VALUES (1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
-(6, 0), (7, 0), (8, 0), (9, 0), (10, 0);
+INSERT INTO t3 VALUES
+(1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
+(6, 0), (7, 0), (8, 0), (9, 0), (10, 0),
+(11, 0), (12, 0), (13, 0), (14, 0), (15, 0),
+(16, 0), (17, 0), (18, 0), (19, 0), (20, 0);
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 < a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 < a AND b = 23 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 < a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 < a AND b = 23 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 1 OR
15 <= a AND a < 20 AND b = 3
@@ -1315,7 +1359,7 @@ SELECT * FROM t2 WHERE
OR
1 <= a AND b = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 10 NULL 50 Using where; Using index
+1 SIMPLE t2 range a a 10 NULL 49 Using where; Using index
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 2 OR
15 <= a AND a < 20 AND b = 3
@@ -1359,7 +1403,7 @@ SELECT * FROM t2 WHERE
OR
1 <= a AND b = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 10 NULL 50 Using where; Using index
+1 SIMPLE t2 range a a 10 NULL 49 Using where; Using index
SELECT * FROM t3 WHERE
5 <= a AND a < 10 AND b = 3 OR
a < 5 OR
@@ -1380,7 +1424,7 @@ SELECT * FROM t3 WHERE
a < 5 OR
a < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range a a 5 NULL 8 Using where; Using index
+1 SIMPLE t3 range a a 5 NULL 9 Using where; Using index
DROP TABLE t1, t2, t3;
#
# Bug #47123: Endless 100% CPU loop with STRAIGHT_JOIN
@@ -1398,10 +1442,16 @@ CREATE TABLE t2 ( a DATETIME, KEY ( a ) );
# Make optimizer choose range scan
INSERT INTO t1 VALUES ('2009-09-22'), ('2009-09-22'), ('2009-09-22');
INSERT INTO t1 VALUES ('2009-09-23'), ('2009-09-23'), ('2009-09-23');
+INSERT INTO t1 VALUES ('2009-09-20'), ('2009-09-20'), ('2009-09-20');
+INSERT INTO t1 VALUES ('2009-09-21'), ('2009-09-21'), ('2009-09-21');
INSERT INTO t2 VALUES ('2009-09-22 12:00:00'), ('2009-09-22 12:00:00'),
('2009-09-22 12:00:00');
INSERT INTO t2 VALUES ('2009-09-23 12:00:00'), ('2009-09-23 12:00:00'),
('2009-09-23 12:00:00');
+INSERT INTO t2 VALUES ('2009-09-20 12:00:00'), ('2009-09-20 12:00:00'),
+('2009-09-20 12:00:00');
+INSERT INTO t2 VALUES ('2009-09-21 12:00:00'), ('2009-09-21 12:00:00'),
+('2009-09-21 12:00:00');
# DATE vs DATE
EXPLAIN
SELECT * FROM t1 WHERE a >= '2009/09/23';
@@ -1563,7 +1613,7 @@ str_to_date('2007-10-00', '%Y-%m-%d') >= '' AND
str_to_date('2007-10-00', '%Y-%m-%d') <= '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT str_to_date('2007-20-00', '%Y-%m-%d') >= '2007/10/20' AND
str_to_date('2007-20-00', '%Y-%m-%d') <= '';
str_to_date('2007-20-00', '%Y-%m-%d') >= '2007/10/20' AND
@@ -1578,7 +1628,7 @@ SELECT str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20';
str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT str_to_date('2007-20-00', '%Y-%m-%d') BETWEEN '2007/10/20' AND '';
str_to_date('2007-20-00', '%Y-%m-%d') BETWEEN '2007/10/20' AND ''
NULL
@@ -1656,7 +1706,8 @@ DROP TABLE t1;
#
CREATE TABLE t1(pk INT PRIMARY KEY, i4 INT);
CREATE UNIQUE INDEX i4_uq ON t1(i4);
-INSERT INTO t1 VALUES (1,10), (2,20), (3,30);
+INSERT INTO t1 VALUES
+(1,10), (2,20), (3,30), (4,40), (5,50), (6,60), (7,70), (8,80);
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 10;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1672,14 +1723,13 @@ SELECT * FROM t1 WHERE 10 BETWEEN i4 AND i4;
pk i4
1 10
EXPLAIN
-SELECT * FROM t1 WHERE 10 BETWEEN 10 AND i4;
+SELECT * FROM t1 WHERE 70 BETWEEN 70 AND i4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i4_uq i4_uq 5 NULL 3 Using index condition
-SELECT * FROM t1 WHERE 10 BETWEEN 10 AND i4;
+1 SIMPLE t1 range i4_uq i4_uq 5 NULL 2 Using index condition
+SELECT * FROM t1 WHERE 70 BETWEEN 70 AND i4;
pk i4
-1 10
-2 20
-3 30
+7 70
+8 80
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN i4 AND 10;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1690,12 +1740,17 @@ pk i4
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN 10 AND 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8
SELECT * FROM t1 WHERE 10 BETWEEN 10 AND 10;
pk i4
1 10
2 20
3 30
+4 40
+5 50
+6 60
+7 70
+8 80
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN 11 AND 11;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1715,14 +1770,13 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT * FROM t1 WHERE i4 BETWEEN 100 AND 0;
pk i4
EXPLAIN
-SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 99999999999999999;
+SELECT * FROM t1 WHERE i4 BETWEEN 70 AND 99999999999999999;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range i4_uq i4_uq 5 NULL 2 Using index condition
-SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 99999999999999999;
+SELECT * FROM t1 WHERE i4 BETWEEN 70 AND 99999999999999999;
pk i4
-1 10
-2 20
-3 30
+7 70
+8 80
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 999999999999999 AND 30;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1732,7 +1786,7 @@ pk i4
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND '20';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i4_uq i4_uq 5 NULL 1 Using index condition
+1 SIMPLE t1 range i4_uq i4_uq 5 NULL 2 Using index condition
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND '20';
pk i4
1 10
@@ -1740,14 +1794,14 @@ pk i4
EXPLAIN
SELECT * FROM t1, t1 as t2 WHERE t2.pk BETWEEN t1.i4 AND t1.i4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL i4_uq NULL NULL NULL 3
+1 SIMPLE t1 ALL i4_uq NULL NULL NULL 8
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.i4 1 Using index condition
SELECT * FROM t1, t1 as t2 WHERE t2.pk BETWEEN t1.i4 AND t1.i4;
pk i4 pk i4
EXPLAIN
SELECT * FROM t1, t1 as t2 WHERE t1.i4 BETWEEN t2.pk AND t2.pk;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL i4_uq NULL NULL NULL 3
+1 SIMPLE t1 ALL i4_uq NULL NULL NULL 8
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.i4 1 Using index condition
SELECT * FROM t1, t1 as t2 WHERE t1.i4 BETWEEN t2.pk AND t2.pk;
pk i4 pk i4
@@ -1886,6 +1940,7 @@ alter table t1 add key2 int not null, add index i2(key2);
update t1 set key2=key1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
create table t2 (a int);
insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8);
@@ -1893,19 +1948,23 @@ insert into t2 select a+16 from t2;
insert into t2 select a+32 from t2;
insert into t2 select a+64 from t2;
explain
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < 1000;
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < 1000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 64
-1 SIMPLE t1 range i1,i2 i1 4 NULL 78 Using where; Using join buffer (flat, BNL join)
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < 1000;
+1 SIMPLE t1 range i1,i2 i1 4 NULL 12 Using where; Using join buffer (flat, BNL join)
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < 1000;
count(*)
-128
+832
explain
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < t2.a;
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 64
-1 SIMPLE t1 range i1,i2 i1 4 NULL 78 Using where; Using join buffer (flat, BNL join)
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < t2.a;
+1 SIMPLE t1 range i1,i2 i1 4 NULL 12 Using where; Using join buffer (flat, BNL join)
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < t2.a;
count(*)
126
drop table t1,t2;
@@ -1919,7 +1978,7 @@ insert into t1 values (0,0,0), (2,2,0), (1,1,1), (2,2,1);
explain
select * from t1 force index (idx) where a >=1 and c <= 1 and a=b and b > 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 3 Using where; Using index
+1 SIMPLE t1 index idx idx 15 NULL 4 Using where; Using index
select * from t1 force index (idx) where a >=1 and c <= 1 and a=b and b > 1;
a b c
2 2 0
@@ -1949,7 +2008,7 @@ INSERT INTO t100(I,J) VALUES(8,26);
EXPLAIN SELECT * FROM t100 WHERE I <> 6 OR (I <> 8 AND J = 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t100 range I I 10 NULL 4 Using index condition; Using where
+1 SIMPLE t100 range I I 10 NULL 3 Using index condition; Using where
SELECT * FROM t100 WHERE I <> 6 OR (I <> 8 AND J = 5);
K I J
@@ -2084,6 +2143,7 @@ insert into t2 select * from t2;
insert into t2 values (0, 0, 0, 0), (1, 1, 1, 1);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
select a, b from t2 where (a, b) in ((0, 0), (1, 1));
a b
@@ -2272,10 +2332,10 @@ CREATE TABLE t1 (a INT, b INT, KEY(a));
INSERT INTO t1 (a) VALUES (10),(10),(10),(10),(10),(10),(10),(10),(10),(10),(70);
EXPLAIN SELECT * FROM t1 WHERE a<>10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using index condition
+1 SIMPLE t1 range a a 5 NULL 2 Using index condition
EXPLAIN SELECT * FROM t1 WHERE 10<>a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using index condition
+1 SIMPLE t1 range a a 5 NULL 2 Using index condition
SELECT * FROM t1 WHERE a<>10;
a b
70 NULL
@@ -2333,7 +2393,7 @@ insert into t1 values
# range access to t1 by 2-component keys for index idx
explain select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 10 NULL 7 Using where
+1 SIMPLE t1 range idx idx 10 NULL 6 Using where
explain format=json select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7));
EXPLAIN
{
@@ -2346,7 +2406,7 @@ EXPLAIN
"key": "idx",
"key_length": "10",
"used_key_parts": ["a", "b"],
- "rows": 7,
+ "rows": 6,
"filtered": 100,
"attached_condition": "(t1.a,t1.b) in (<cache>((2,3)),<cache>((3,3)),<cache>((8,8)),<cache>((7,7)))"
}
@@ -2473,7 +2533,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"rows": 3,
- "filtered": 100,
+ "filtered": 60,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((2,2)))"
},
@@ -2529,8 +2589,8 @@ insert into t2 values
explain select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 6 Using index condition
-1 SIMPLE t2 ref idx1,idx2 idx1 5 test.t1.a 12 Using where
+1 SIMPLE t2 range|filter idx1,idx2 idx1|idx2 5|5 NULL 8 (14%) Using index condition; Using where; Using rowid filter
+1 SIMPLE t1 ref idx idx 5 test.t2.d 8
explain format=json select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
EXPLAIN
@@ -2538,27 +2598,35 @@ EXPLAIN
"query_block": {
"select_id": 1,
"table": {
- "table_name": "t1",
+ "table_name": "t2",
"access_type": "range",
- "possible_keys": ["idx"],
- "key": "idx",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx1",
"key_length": "5",
- "used_key_parts": ["a"],
- "rows": 6,
- "filtered": 100,
- "index_condition": "t1.a is not null"
+ "used_key_parts": ["d"],
+ "rowid_filter": {
+ "range": {
+ "key": "idx2",
+ "used_key_parts": ["e"]
+ },
+ "rows": 15,
+ "selectivity_pct": 14.423
+ },
+ "rows": 8,
+ "filtered": 14.423,
+ "index_condition": "t2.d is not null",
+ "attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
},
"table": {
- "table_name": "t2",
+ "table_name": "t1",
"access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx1",
+ "possible_keys": ["idx"],
+ "key": "idx",
"key_length": "5",
- "used_key_parts": ["d"],
- "ref": ["test.t1.a"],
- "rows": 12,
- "filtered": 100,
- "attached_condition": "(t1.a,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
+ "used_key_parts": ["a"],
+ "ref": ["test.t2.d"],
+ "rows": 8,
+ "filtered": 100
}
}
}
@@ -2566,16 +2634,16 @@ select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
a b c d e f
3 2 uuuw 3 3 i
-3 2 uuuw 3 3 i
-3 2 uuua 3 3 i
3 2 uuua 3 3 i
3 3 zzzz 3 3 i
-3 3 zzzz 3 3 i
-3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
3 3 zzza 3 3 i
-3 3 zzza 3 3 i
3 3 zyxa 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuua 3 3 i
+3 3 zzzz 3 3 i
+3 3 zyxw 3 3 i
+3 3 zzza 3 3 i
3 3 zyxa 3 3 i
7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
@@ -2586,16 +2654,16 @@ where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1";
execute stmt;
a b c d e f
3 2 uuuw 3 3 i
-3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 2 uuua 3 3 i
-3 3 zzzz 3 3 i
3 3 zzzz 3 3 i
3 3 zyxw 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
3 3 zzza 3 3 i
3 3 zyxa 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuua 3 3 i
+3 3 zzzz 3 3 i
+3 3 zyxw 3 3 i
+3 3 zzza 3 3 i
3 3 zyxa 3 3 i
7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
@@ -2604,16 +2672,16 @@ a b c d e f
execute stmt;
a b c d e f
3 2 uuuw 3 3 i
-3 2 uuuw 3 3 i
-3 2 uuua 3 3 i
3 2 uuua 3 3 i
3 3 zzzz 3 3 i
-3 3 zzzz 3 3 i
3 3 zyxw 3 3 i
-3 3 zyxw 3 3 i
-3 3 zzza 3 3 i
3 3 zzza 3 3 i
3 3 zyxa 3 3 i
+3 2 uuuw 3 3 i
+3 2 uuua 3 3 i
+3 3 zzzz 3 3 i
+3 3 zyxw 3 3 i
+3 3 zzza 3 3 i
3 3 zyxa 3 3 i
7 7 xxxyy 7 7 h
7 7 xxxya 7 7 h
@@ -2626,7 +2694,7 @@ insert into t1 select * from t1;
explain select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range idx1,idx2 idx2 5 NULL 6 Using where
+1 SIMPLE t2 range|filter idx1,idx2 idx1|idx2 5|5 NULL 7 (7%) Using index condition; Using where; Using rowid filter
1 SIMPLE t1 ref idx idx 5 test.t2.d 11
explain format=json select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
@@ -2638,12 +2706,21 @@ EXPLAIN
"table_name": "t2",
"access_type": "range",
"possible_keys": ["idx1", "idx2"],
- "key": "idx2",
+ "key": "idx1",
"key_length": "5",
- "used_key_parts": ["e"],
- "rows": 6,
- "filtered": 100,
- "attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1 and t2.d is not null"
+ "used_key_parts": ["d"],
+ "rowid_filter": {
+ "range": {
+ "key": "idx2",
+ "used_key_parts": ["e"]
+ },
+ "rows": 7,
+ "selectivity_pct": 6.7308
+ },
+ "rows": 7,
+ "filtered": 6.7308,
+ "index_condition": "t2.d is not null",
+ "attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
},
"table": {
"table_name": "t1",
@@ -2828,7 +2905,7 @@ explain select * from t1,t2
where a = d and (a,2) in ((2,2),(7,7),(8,8)) and
length(c) = 1 and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where
+1 SIMPLE t1 range idx idx 5 NULL 12 Using index condition; Using where
1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where
explain format=json select * from t1,t2
where a = d and (a,2) in ((2,2),(7,7),(8,8)) and
@@ -2844,7 +2921,7 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 13,
+ "rows": 12,
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,2) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1"
@@ -2904,7 +2981,7 @@ where id = 1 and a = d and
length(c) = 1 and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where
+1 SIMPLE t1 range idx idx 5 NULL 12 Using index condition; Using where
1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where
explain format=json select * from t1,t2,t3
where id = 1 and a = d and
@@ -2932,7 +3009,7 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 13,
+ "rows": 12,
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,1 + 1) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1"
@@ -2993,6 +3070,7 @@ insert into t1 select a+15, concat(b,'yy') from t1;
insert into t1 select a+100, concat(b,'xx') from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select cast(count(a)/count(distinct a) as unsigned) as rec_per_key from t1;
rec_per_key
@@ -3011,7 +3089,7 @@ a b
set eq_range_index_dive_limit=2;
explain select * from t1 where a in (8, 15, 31, 1, 9);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 10 Using index condition
+1 SIMPLE t1 range idx idx 5 NULL 5 Using index condition
select * from t1 where a in (8, 15, 31, 1, 9);
a b
1 yy
@@ -3022,5 +3100,28 @@ a b
set eq_range_index_dive_limit=default;
drop table t1;
#
+# MDEV-18551: New defaults for eq_range_index_dive_limit
+#
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int, key(a));
+insert into t1 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C;
+insert into t1 select 1 from ten A, ten B,ten C;
+create table t2(a int, key(a));
+insert into t2 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C where A.a + B.a*10 + C.a*100 < 199;
+# expected type=range, rows=1487 , reason=using index dives
+analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198);
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 range a a 5 NULL 1487 1199.00 100.00 100.00 Using where; Using index
+insert into t2 values (200),(201);
+# expected type=range, rows=201 , reason=using index statistics
+analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,200,201);
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 range a a 5 NULL 201 1201.00 100.00 100.00 Using where; Using index
+drop table t1,ten,t2;
+#
# End of 10.2 tests
#
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/range.test b/mysql-test/main/range.test
index bd2299bac5f..76d893f56b7 100644
--- a/mysql-test/main/range.test
+++ b/mysql-test/main/range.test
@@ -3,6 +3,13 @@
#
--source include/have_innodb.inc
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
CREATE TABLE t1 (
event_date date DEFAULT '0000-00-00' NOT NULL,
type int(11) DEFAULT '0' NOT NULL,
@@ -188,7 +195,7 @@ explain select * from t1, t1 t2 where t1.y = 2 and t2.x between 0 and t1.y;
explain select * from t1, t1 t2 where t1.y = 2 and t2.x >= 0 and t2.x <= t1.y;
# testing IN
explain select count(*) from t1 where x in (1);
-explain select count(*) from t1 where x in (1,2);
+explain select count(*) from t1 where x in (1,2,3,4);
drop table t1;
#
@@ -745,6 +752,7 @@ INSERT INTO t1 VALUES
(43,'A'), (44,'A'), (45,'A'), (46,'A'), (47,'A'), (48,'A'),
(49,'A'), (50,'A'), (51,'A'), (52,'A'), (53,'C'), (54,'C'),
(55,'C'), (56,'C'), (57,'C'), (58,'C'), (59,'C'), (60,'C');
+INSERT INTO t1(status) SELECT status FROM t1;
EXPLAIN SELECT * FROM t1 WHERE status <> 'A' AND status <> 'B';
EXPLAIN SELECT * FROM t1 WHERE status NOT IN ('A','B');
@@ -950,7 +958,7 @@ CREATE TABLE t1 (
) DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
INSERT INTO t1 VALUES
- ('B'), ('A'), ('A'), ('C'), ('B'), ('A'), ('A');
+ ('B'), ('A'), ('A'), ('C'), ('B'), ('A'), ('A'), ('C'), ('A');
SELECT a FROM t1 WHERE a='b' OR a='B';
EXPLAIN SELECT a FROM t1 WHERE a='b' OR a='B';
@@ -1040,8 +1048,10 @@ CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
CREATE TABLE t3( a INT, b INT, KEY( a, b ) );
-INSERT INTO t1( a, b )
-VALUES (0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7);
+INSERT INTO t1( a, b ) VALUES
+ (0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7),
+ (10, 11), (11,12), (11, 14), (12, 13), (15, 10), (19, 17),
+ (20, 21), (21,22), (21, 24), (22, 23), (25, 20), (29, 27);
INSERT INTO t2( a, b )
VALUES ( 1, 1), ( 2, 1), ( 3, 1), ( 4, 1), ( 5, 1),
@@ -1057,9 +1067,11 @@ INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
-INSERT INTO t3
-VALUES (1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
- (6, 0), (7, 0), (8, 0), (9, 0), (10, 0);
+INSERT INTO t3 VALUES
+ (1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
+ (6, 0), (7, 0), (8, 0), (9, 0), (10, 0),
+ (11, 0), (12, 0), (13, 0), (14, 0), (15, 0),
+ (16, 0), (17, 0), (18, 0), (19, 0), (20, 0);
# To make range scan compelling to the optimizer
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
@@ -1071,47 +1083,47 @@ INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
# With one exception, they are independent of Problem#2.
#
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 < a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 < a AND b = 23 OR
+23 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 < a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 < a AND b = 23 OR
+23 <= a;
# Query below: Tests both Problem#1 and Problem#2 (EXPLAIN differs as well)
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+23 <= a;
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+23 <= a;
#
# Problem#2 Test queries.
@@ -1175,11 +1187,17 @@ CREATE TABLE t2 ( a DATETIME, KEY ( a ) );
--echo # Make optimizer choose range scan
INSERT INTO t1 VALUES ('2009-09-22'), ('2009-09-22'), ('2009-09-22');
INSERT INTO t1 VALUES ('2009-09-23'), ('2009-09-23'), ('2009-09-23');
+INSERT INTO t1 VALUES ('2009-09-20'), ('2009-09-20'), ('2009-09-20');
+INSERT INTO t1 VALUES ('2009-09-21'), ('2009-09-21'), ('2009-09-21');
INSERT INTO t2 VALUES ('2009-09-22 12:00:00'), ('2009-09-22 12:00:00'),
('2009-09-22 12:00:00');
INSERT INTO t2 VALUES ('2009-09-23 12:00:00'), ('2009-09-23 12:00:00'),
('2009-09-23 12:00:00');
+INSERT INTO t2 VALUES ('2009-09-20 12:00:00'), ('2009-09-20 12:00:00'),
+ ('2009-09-20 12:00:00');
+INSERT INTO t2 VALUES ('2009-09-21 12:00:00'), ('2009-09-21 12:00:00'),
+ ('2009-09-21 12:00:00');
--echo # DATE vs DATE
--replace_column 1 X 2 X 3 X 7 X 8 X 9 X 10 X
@@ -1320,7 +1338,9 @@ DROP TABLE t1;
CREATE TABLE t1(pk INT PRIMARY KEY, i4 INT);
CREATE UNIQUE INDEX i4_uq ON t1(i4);
-INSERT INTO t1 VALUES (1,10), (2,20), (3,30);
+INSERT INTO t1 VALUES
+ (1,10), (2,20), (3,30), (4,40), (5,50), (6,60), (7,70), (8,80);
+
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 10;
@@ -1331,8 +1351,8 @@ SELECT * FROM t1 WHERE 10 BETWEEN i4 AND i4;
SELECT * FROM t1 WHERE 10 BETWEEN i4 AND i4;
EXPLAIN
-SELECT * FROM t1 WHERE 10 BETWEEN 10 AND i4;
-SELECT * FROM t1 WHERE 10 BETWEEN 10 AND i4;
+SELECT * FROM t1 WHERE 70 BETWEEN 70 AND i4;
+SELECT * FROM t1 WHERE 70 BETWEEN 70 AND i4;
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN i4 AND 10;
@@ -1355,8 +1375,8 @@ SELECT * FROM t1 WHERE i4 BETWEEN 100 AND 0;
SELECT * FROM t1 WHERE i4 BETWEEN 100 AND 0;
EXPLAIN
-SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 99999999999999999;
-SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 99999999999999999;
+SELECT * FROM t1 WHERE i4 BETWEEN 70 AND 99999999999999999;
+SELECT * FROM t1 WHERE i4 BETWEEN 70 AND 99999999999999999;
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 999999999999999 AND 30;
@@ -1479,11 +1499,15 @@ insert into t2 select a+32 from t2;
insert into t2 select a+64 from t2;
explain
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < 1000;
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < 1000;
+select count(*) from t2 left join t1
+ on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < 1000;
+select count(*) from t2 left join t1
+ on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < 1000;
explain
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < t2.a;
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < t2.a;
+select count(*) from t2 left join t1
+ on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < t2.a;
+select count(*) from t2 left join t1
+ on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < t2.a;
drop table t1,t2;
@@ -2051,5 +2075,32 @@ set eq_range_index_dive_limit=default;
drop table t1;
--echo #
+--echo # MDEV-18551: New defaults for eq_range_index_dive_limit
+--echo #
+
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int, key(a));
+insert into t1 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C;
+insert into t1 select 1 from ten A, ten B,ten C;
+
+create table t2(a int, key(a));
+insert into t2 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C where A.a + B.a*10 + C.a*100 < 199;
+
+let $a= `select group_concat(a) from t2`;
+
+--echo # expected type=range, rows=1487 , reason=using index dives
+eval analyze SELECT * FROM t1 where a in ($a);
+insert into t2 values (200),(201);
+let $a= `select group_concat(a) from t2`;
+--echo # expected type=range, rows=201 , reason=using index statistics
+eval analyze SELECT * FROM t1 where a in ($a);
+drop table t1,ten,t2;
+
+--echo #
--echo # End of 10.2 tests
--echo #
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/range_innodb.result b/mysql-test/main/range_innodb.result
index 6572b248911..30161a2711d 100644
--- a/mysql-test/main/range_innodb.result
+++ b/mysql-test/main/range_innodb.result
@@ -30,6 +30,7 @@ repeat('0123456789', 10)
from t1;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
# The following must not use "Range checked for each record":
explain select * from t0 left join t2 on t2.a <t0.a and t2.b between 50 and 250;
diff --git a/mysql-test/main/range_mrr_icp.result b/mysql-test/main/range_mrr_icp.result
index 6b5bf33239f..5cda4111b6d 100644
--- a/mysql-test/main/range_mrr_icp.result
+++ b/mysql-test/main/range_mrr_icp.result
@@ -1,5 +1,11 @@
set @mrr_icp_extra_tmp=@@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set optimizer_switch='rowid_filter=off';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
CREATE TABLE t1 (
event_date date DEFAULT '0000-00-00' NOT NULL,
type int(11) DEFAULT '0' NOT NULL,
@@ -246,7 +252,7 @@ id select_type table type possible_keys key key_len ref rows Extra
explain select count(*) from t1 where x in (1);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref x x 5 const 1 Using index
-explain select count(*) from t1 where x in (1,2);
+explain select count(*) from t1 where x in (1,2,3,4);
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index x x 5 NULL 9 Using where; Using index
drop table t1;
@@ -418,23 +424,25 @@ count(*)
1026
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range uid_index uid_index 4 NULL 111 Using index condition; Rowid-ordered scan
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
explain select * from t1, t2 where t1.uid=t2.uid AND t2.uid > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range uid_index uid_index 4 NULL 111 Using index condition; Rowid-ordered scan
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
explain select * from t1, t2 where t1.uid=t2.uid AND t1.uid != 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 113 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition; Rowid-ordered scan
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
explain select * from t1, t2 where t1.uid=t2.uid AND t2.uid != 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uid_index uid_index 4 NULL 113 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range uid_index uid_index 4 NULL 112 Using index condition; Rowid-ordered scan
1 SIMPLE t2 ref uid_index uid_index 4 test.t1.uid 38
select * from t1, t2 where t1.uid=t2.uid AND t1.uid > 0;
id name uid id name uid
@@ -717,7 +725,7 @@ WHERE
v.oxrootid ='d8c4177d09f8b11f5.52725521' AND
s.oxleft > v.oxleft AND s.oxleft < v.oxright;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE v ref OXLEFT,OXRIGHT,OXROOTID OXROOTID 34 const 5 Using index condition
+1 SIMPLE v ref OXLEFT,OXRIGHT,OXROOTID OXROOTID 34 const 6 Using index condition
1 SIMPLE s ALL OXLEFT NULL NULL NULL 12 Range checked for each record (index map: 0x4)
SELECT s.oxid FROM t1 v, t1 s
WHERE
@@ -894,12 +902,13 @@ INSERT INTO t1 VALUES
(43,'A'), (44,'A'), (45,'A'), (46,'A'), (47,'A'), (48,'A'),
(49,'A'), (50,'A'), (51,'A'), (52,'A'), (53,'C'), (54,'C'),
(55,'C'), (56,'C'), (57,'C'), (58,'C'), (59,'C'), (60,'C');
+INSERT INTO t1(status) SELECT status FROM t1;
EXPLAIN SELECT * FROM t1 WHERE status <> 'A' AND status <> 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range status status 23 NULL 18 Using index condition; Rowid-ordered scan
EXPLAIN SELECT * FROM t1 WHERE status NOT IN ('A','B');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range status status 23 NULL 18 Using index condition; Rowid-ordered scan
SELECT * FROM t1 WHERE status <> 'A' AND status <> 'B';
id status
53 C
@@ -910,6 +919,14 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
SELECT * FROM t1 WHERE status NOT IN ('A','B');
id status
53 C
@@ -920,18 +937,26 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
EXPLAIN SELECT status FROM t1 WHERE status <> 'A' AND status <> 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using where; Using index
+1 SIMPLE t1 range status status 23 NULL 18 Using where; Using index
EXPLAIN SELECT status FROM t1 WHERE status NOT IN ('A','B');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 11 Using where; Using index
+1 SIMPLE t1 range status status 23 NULL 18 Using where; Using index
EXPLAIN SELECT * FROM t1 WHERE status NOT BETWEEN 'A' AND 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 10 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range status status 23 NULL 17 Using index condition; Rowid-ordered scan
EXPLAIN SELECT * FROM t1 WHERE status < 'A' OR status > 'B';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range status status 23 NULL 10 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range status status 23 NULL 17 Using index condition; Using where; Rowid-ordered scan
SELECT * FROM t1 WHERE status NOT BETWEEN 'A' AND 'B';
id status
53 C
@@ -942,6 +967,14 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
SELECT * FROM t1 WHERE status < 'A' OR status > 'B';
id status
53 C
@@ -952,6 +985,14 @@ id status
58 C
59 C
60 C
+113 C
+114 C
+115 C
+116 C
+117 C
+118 C
+119 C
+120 C
DROP TABLE t1;
CREATE TABLE t1 (a int, b int, primary key(a,b));
INSERT INTO t1 VALUES
@@ -959,16 +1000,16 @@ INSERT INTO t1 VALUES
CREATE VIEW v1 as SELECT a,b FROM t1 WHERE b=3;
EXPLAIN SELECT a,b FROM t1 WHERE a < 2 and b=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
EXPLAIN SELECT a,b FROM v1 WHERE a < 2 and b=3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
EXPLAIN SELECT a,b FROM t1 WHERE a < 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
EXPLAIN SELECT a,b FROM v1 WHERE a < 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using where; Using index
+1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 3 Using where; Using index
SELECT a,b FROM t1 WHERE a < 2 and b=3;
a b
1 3
@@ -1008,10 +1049,10 @@ INSERT INTO `t1` VALUES
,(13,2),(14,2),(15,3),(16,3),(17,3),(18,3),(19,3);
explain select * from t1 where a in (3,4) and b in (1,2,3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from v1 where a in (3,4) and b in (1,2,3);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t1 index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t1 where a between 3 and 4 and b between 1 and 2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 8 NULL # Using where; Using index
@@ -1067,10 +1108,10 @@ id b c
0 3 4
EXPLAIN SELECT * FROM t1 WHERE b<=3 AND 3<=c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 3 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 2 Using index condition; Using where; Rowid-ordered scan
EXPLAIN SELECT * FROM t1 WHERE 3 BETWEEN b AND c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 3 Using where; Rowid-ordered scan
+1 SIMPLE t1 range idx1,idx2 idx2 4 NULL 2 Using where; Rowid-ordered scan
SELECT * FROM t1 WHERE 0 < b OR 0 > c;
id b c
0 3 4
@@ -1081,10 +1122,10 @@ id b c
0 3 4
EXPLAIN SELECT * FROM t1 WHERE 0 < b OR 0 > c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 4 Using sort_union(idx1,idx2); Using where
+1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 3 Using sort_union(idx1,idx2); Using where
EXPLAIN SELECT * FROM t1 WHERE 0 NOT BETWEEN b AND c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 4 Using sort_union(idx1,idx2); Using where
+1 SIMPLE t1 index_merge idx1,idx2 idx1,idx2 4,4 NULL 3 Using sort_union(idx1,idx2); Using where
DROP TABLE t1;
CREATE TABLE t1 (
item char(20) NOT NULL default '',
@@ -1099,11 +1140,11 @@ INSERT INTO t1 VALUES
('A2','2005-12-01 08:00:00',1000);
EXPLAIN SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-01 24:00:00';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref PRIMARY PRIMARY 20 const 2 Using index condition
+1 SIMPLE t1 ref PRIMARY PRIMARY 20 const 3 Using index condition
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-01 24:00:00';
item started price
Warnings:
-Warning 1292 Incorrect datetime value: '2005-12-01 24:00:00'
+Warning 1292 Truncated incorrect datetime value: '2005-12-01 24:00:00'
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00';
item started price
A1 2005-11-01 08:00:00 1000.000
@@ -1115,7 +1156,7 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-01 24:00:00';
item started price
Warnings:
-Warning 1292 Incorrect datetime value: '2005-12-01 24:00:00'
+Warning 1292 Truncated incorrect datetime value: '2005-12-01 24:00:00'
SELECT * FROM t1 WHERE item='A1' AND started<='2005-12-02 00:00:00';
item started price
A1 2005-11-01 08:00:00 1000.000
@@ -1143,7 +1184,7 @@ CREATE TABLE t1 (
a varchar(32), index (a)
) DEFAULT CHARSET=latin1 COLLATE=latin1_bin;
INSERT INTO t1 VALUES
-('B'), ('A'), ('A'), ('C'), ('B'), ('A'), ('A');
+('B'), ('A'), ('A'), ('C'), ('B'), ('A'), ('A'), ('C'), ('A');
SELECT a FROM t1 WHERE a='b' OR a='B';
a
B
@@ -1203,13 +1244,15 @@ Z
In following EXPLAIN the access method should be ref, #rows~=500 (and not 2)
explain select * from t2 where a=1000 and b<11;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref a a 5 const 502 Using index condition
+1 SIMPLE t2 ref a a 5 const 503 Using index condition
drop table t1, t2;
CREATE TABLE t1( a INT, b INT, KEY( a, b ) );
CREATE TABLE t2( a INT, b INT, KEY( a, b ) );
CREATE TABLE t3( a INT, b INT, KEY( a, b ) );
-INSERT INTO t1( a, b )
-VALUES (0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7);
+INSERT INTO t1( a, b ) VALUES
+(0, 1), (1, 2), (1, 4), (2, 3), (5, 0), (9, 7),
+(10, 11), (11,12), (11, 14), (12, 13), (15, 10), (19, 17),
+(20, 21), (21,22), (21, 24), (22, 23), (25, 20), (29, 27);
INSERT INTO t2( a, b )
VALUES ( 1, 1), ( 2, 1), ( 3, 1), ( 4, 1), ( 5, 1),
( 6, 1), ( 7, 1), ( 8, 1), ( 9, 1), (10, 1),
@@ -1220,65 +1263,67 @@ INSERT INTO t2 SELECT a, 3 FROM t2 WHERE b = 1;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
INSERT INTO t2 SELECT -1, -1 FROM t2;
-INSERT INTO t3
-VALUES (1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
-(6, 0), (7, 0), (8, 0), (9, 0), (10, 0);
+INSERT INTO t3 VALUES
+(1, 0), (2, 0), (3, 0), (4, 0), (5, 0),
+(6, 0), (7, 0), (8, 0), (9, 0), (10, 0),
+(11, 0), (12, 0), (13, 0), (14, 0), (15, 0),
+(16, 0), (17, 0), (18, 0), (19, 0), (20, 0);
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
INSERT INTO t3 SELECT * FROM t3 WHERE a = 10;
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 < a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 < a AND b = 23 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 < a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 < a AND b = 23 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a < 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a < 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 4 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-5 <= a AND b = 3 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+25 <= a AND b = 23 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+23 <= a;
a b
-5 0
-9 7
+25 20
+29 27
EXPLAIN
SELECT * FROM t1 WHERE
-3 <= a AND a <= 5 OR
-3 <= a;
+23 <= a AND a <= 25 OR
+23 <= a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where; Using index
+1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 1 OR
15 <= a AND a < 20 AND b = 3
@@ -1317,7 +1362,7 @@ SELECT * FROM t2 WHERE
OR
1 <= a AND b = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 10 NULL 50 Using where; Using index
+1 SIMPLE t2 range a a 10 NULL 49 Using where; Using index
SELECT * FROM t2 WHERE
5 <= a AND a < 10 AND b = 2 OR
15 <= a AND a < 20 AND b = 3
@@ -1361,7 +1406,7 @@ SELECT * FROM t2 WHERE
OR
1 <= a AND b = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 10 NULL 50 Using where; Using index
+1 SIMPLE t2 range a a 10 NULL 49 Using where; Using index
SELECT * FROM t3 WHERE
5 <= a AND a < 10 AND b = 3 OR
a < 5 OR
@@ -1382,7 +1427,7 @@ SELECT * FROM t3 WHERE
a < 5 OR
a < 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range a a 5 NULL 8 Using where; Using index
+1 SIMPLE t3 range a a 5 NULL 9 Using where; Using index
DROP TABLE t1, t2, t3;
#
# Bug #47123: Endless 100% CPU loop with STRAIGHT_JOIN
@@ -1400,10 +1445,16 @@ CREATE TABLE t2 ( a DATETIME, KEY ( a ) );
# Make optimizer choose range scan
INSERT INTO t1 VALUES ('2009-09-22'), ('2009-09-22'), ('2009-09-22');
INSERT INTO t1 VALUES ('2009-09-23'), ('2009-09-23'), ('2009-09-23');
+INSERT INTO t1 VALUES ('2009-09-20'), ('2009-09-20'), ('2009-09-20');
+INSERT INTO t1 VALUES ('2009-09-21'), ('2009-09-21'), ('2009-09-21');
INSERT INTO t2 VALUES ('2009-09-22 12:00:00'), ('2009-09-22 12:00:00'),
('2009-09-22 12:00:00');
INSERT INTO t2 VALUES ('2009-09-23 12:00:00'), ('2009-09-23 12:00:00'),
('2009-09-23 12:00:00');
+INSERT INTO t2 VALUES ('2009-09-20 12:00:00'), ('2009-09-20 12:00:00'),
+('2009-09-20 12:00:00');
+INSERT INTO t2 VALUES ('2009-09-21 12:00:00'), ('2009-09-21 12:00:00'),
+('2009-09-21 12:00:00');
# DATE vs DATE
EXPLAIN
SELECT * FROM t1 WHERE a >= '2009/09/23';
@@ -1565,7 +1616,7 @@ str_to_date('2007-10-00', '%Y-%m-%d') >= '' AND
str_to_date('2007-10-00', '%Y-%m-%d') <= '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT str_to_date('2007-20-00', '%Y-%m-%d') >= '2007/10/20' AND
str_to_date('2007-20-00', '%Y-%m-%d') <= '';
str_to_date('2007-20-00', '%Y-%m-%d') >= '2007/10/20' AND
@@ -1580,7 +1631,7 @@ SELECT str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20';
str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT str_to_date('2007-20-00', '%Y-%m-%d') BETWEEN '2007/10/20' AND '';
str_to_date('2007-20-00', '%Y-%m-%d') BETWEEN '2007/10/20' AND ''
NULL
@@ -1658,7 +1709,8 @@ DROP TABLE t1;
#
CREATE TABLE t1(pk INT PRIMARY KEY, i4 INT);
CREATE UNIQUE INDEX i4_uq ON t1(i4);
-INSERT INTO t1 VALUES (1,10), (2,20), (3,30);
+INSERT INTO t1 VALUES
+(1,10), (2,20), (3,30), (4,40), (5,50), (6,60), (7,70), (8,80);
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 10;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1674,14 +1726,13 @@ SELECT * FROM t1 WHERE 10 BETWEEN i4 AND i4;
pk i4
1 10
EXPLAIN
-SELECT * FROM t1 WHERE 10 BETWEEN 10 AND i4;
+SELECT * FROM t1 WHERE 70 BETWEEN 70 AND i4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i4_uq i4_uq 5 NULL 3 Using index condition; Rowid-ordered scan
-SELECT * FROM t1 WHERE 10 BETWEEN 10 AND i4;
+1 SIMPLE t1 range i4_uq i4_uq 5 NULL 2 Using index condition; Rowid-ordered scan
+SELECT * FROM t1 WHERE 70 BETWEEN 70 AND i4;
pk i4
-1 10
-2 20
-3 30
+7 70
+8 80
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN i4 AND 10;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1692,12 +1743,17 @@ pk i4
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN 10 AND 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 3
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8
SELECT * FROM t1 WHERE 10 BETWEEN 10 AND 10;
pk i4
1 10
2 20
3 30
+4 40
+5 50
+6 60
+7 70
+8 80
EXPLAIN
SELECT * FROM t1 WHERE 10 BETWEEN 11 AND 11;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1717,14 +1773,13 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT * FROM t1 WHERE i4 BETWEEN 100 AND 0;
pk i4
EXPLAIN
-SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 99999999999999999;
+SELECT * FROM t1 WHERE i4 BETWEEN 70 AND 99999999999999999;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range i4_uq i4_uq 5 NULL 2 Using index condition; Rowid-ordered scan
-SELECT * FROM t1 WHERE i4 BETWEEN 10 AND 99999999999999999;
+SELECT * FROM t1 WHERE i4 BETWEEN 70 AND 99999999999999999;
pk i4
-1 10
-2 20
-3 30
+7 70
+8 80
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 999999999999999 AND 30;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1734,7 +1789,7 @@ pk i4
EXPLAIN
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND '20';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i4_uq i4_uq 5 NULL 1 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range i4_uq i4_uq 5 NULL 2 Using index condition; Rowid-ordered scan
SELECT * FROM t1 WHERE i4 BETWEEN 10 AND '20';
pk i4
1 10
@@ -1742,14 +1797,14 @@ pk i4
EXPLAIN
SELECT * FROM t1, t1 as t2 WHERE t2.pk BETWEEN t1.i4 AND t1.i4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL i4_uq NULL NULL NULL 3
+1 SIMPLE t1 ALL i4_uq NULL NULL NULL 8
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.i4 1 Using index condition
SELECT * FROM t1, t1 as t2 WHERE t2.pk BETWEEN t1.i4 AND t1.i4;
pk i4 pk i4
EXPLAIN
SELECT * FROM t1, t1 as t2 WHERE t1.i4 BETWEEN t2.pk AND t2.pk;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL i4_uq NULL NULL NULL 3
+1 SIMPLE t1 ALL i4_uq NULL NULL NULL 8
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.i4 1 Using index condition
SELECT * FROM t1, t1 as t2 WHERE t1.i4 BETWEEN t2.pk AND t2.pk;
pk i4 pk i4
@@ -1888,6 +1943,7 @@ alter table t1 add key2 int not null, add index i2(key2);
update t1 set key2=key1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
create table t2 (a int);
insert into t2 values (1),(2),(3),(4),(5),(6),(7),(8);
@@ -1895,19 +1951,23 @@ insert into t2 select a+16 from t2;
insert into t2 select a+32 from t2;
insert into t2 select a+64 from t2;
explain
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < 1000;
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < 1000;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 64
-1 SIMPLE t1 range i1,i2 i1 4 NULL 78 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < 1000;
+1 SIMPLE t1 range i1,i2 i1 4 NULL 12 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < 1000;
count(*)
-128
+832
explain
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < t2.a;
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < t2.a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 64
-1 SIMPLE t1 range i1,i2 i1 4 NULL 78 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
-select count(*) from t2 left join t1 on (t1.key1 < 3 or t1.key1 > 1020) and t1.key2 < t2.a;
+1 SIMPLE t1 range i1,i2 i1 4 NULL 12 Using where; Rowid-ordered scan; Using join buffer (flat, BNL join)
+select count(*) from t2 left join t1
+on (t1.key1 < 3 or t1.key1 between 920 and 930) and t1.key2 < t2.a;
count(*)
126
drop table t1,t2;
@@ -1921,7 +1981,7 @@ insert into t1 values (0,0,0), (2,2,0), (1,1,1), (2,2,1);
explain
select * from t1 force index (idx) where a >=1 and c <= 1 and a=b and b > 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 3 Using where; Using index
+1 SIMPLE t1 index idx idx 15 NULL 4 Using where; Using index
select * from t1 force index (idx) where a >=1 and c <= 1 and a=b and b > 1;
a b c
2 2 0
@@ -1951,7 +2011,7 @@ INSERT INTO t100(I,J) VALUES(8,26);
EXPLAIN SELECT * FROM t100 WHERE I <> 6 OR (I <> 8 AND J = 5);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t100 range I I 10 NULL 4 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t100 range I I 10 NULL 3 Using index condition; Using where; Rowid-ordered scan
SELECT * FROM t100 WHERE I <> 6 OR (I <> 8 AND J = 5);
K I J
@@ -2086,6 +2146,7 @@ insert into t2 select * from t2;
insert into t2 values (0, 0, 0, 0), (1, 1, 1, 1);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
select a, b from t2 where (a, b) in ((0, 0), (1, 1));
a b
@@ -2274,10 +2335,10 @@ CREATE TABLE t1 (a INT, b INT, KEY(a));
INSERT INTO t1 (a) VALUES (10),(10),(10),(10),(10),(10),(10),(10),(10),(10),(70);
EXPLAIN SELECT * FROM t1 WHERE a<>10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range a a 5 NULL 2 Using index condition; Rowid-ordered scan
EXPLAIN SELECT * FROM t1 WHERE 10<>a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range a a 5 NULL 2 Using index condition; Rowid-ordered scan
SELECT * FROM t1 WHERE a<>10;
a b
70 NULL
@@ -2335,7 +2396,7 @@ insert into t1 values
# range access to t1 by 2-component keys for index idx
explain select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 10 NULL 7 Using where; Rowid-ordered scan
+1 SIMPLE t1 range idx idx 10 NULL 6 Using where; Rowid-ordered scan
explain format=json select * from t1 where (a,b) IN ((2, 3),(3,3),(8,8),(7,7));
EXPLAIN
{
@@ -2348,7 +2409,7 @@ EXPLAIN
"key": "idx",
"key_length": "10",
"used_key_parts": ["a", "b"],
- "rows": 7,
+ "rows": 6,
"filtered": 100,
"attached_condition": "(t1.a,t1.b) in (<cache>((2,3)),<cache>((3,3)),<cache>((8,8)),<cache>((7,7)))",
"mrr_type": "Rowid-ordered scan"
@@ -2478,7 +2539,7 @@ EXPLAIN
"key_length": "5",
"used_key_parts": ["d"],
"rows": 3,
- "filtered": 100,
+ "filtered": 60,
"index_condition": "t2.d is not null",
"attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((2,2)))",
"mrr_type": "Rowid-ordered scan"
@@ -2535,8 +2596,8 @@ insert into t2 values
explain select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 6 Using index condition; Rowid-ordered scan
-1 SIMPLE t2 ref idx1,idx2 idx1 5 test.t1.a 12 Using where
+1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 8 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 ref idx idx 5 test.t2.d 8
explain format=json select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
EXPLAIN
@@ -2544,88 +2605,88 @@ EXPLAIN
"query_block": {
"select_id": 1,
"table": {
- "table_name": "t1",
+ "table_name": "t2",
"access_type": "range",
- "possible_keys": ["idx"],
- "key": "idx",
+ "possible_keys": ["idx1", "idx2"],
+ "key": "idx1",
"key_length": "5",
- "used_key_parts": ["a"],
- "rows": 6,
- "filtered": 100,
- "index_condition": "t1.a is not null",
+ "used_key_parts": ["d"],
+ "rows": 8,
+ "filtered": 14.423,
+ "index_condition": "t2.d is not null",
+ "attached_condition": "(t2.d,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1",
"mrr_type": "Rowid-ordered scan"
},
"table": {
- "table_name": "t2",
+ "table_name": "t1",
"access_type": "ref",
- "possible_keys": ["idx1", "idx2"],
- "key": "idx1",
+ "possible_keys": ["idx"],
+ "key": "idx",
"key_length": "5",
- "used_key_parts": ["d"],
- "ref": ["test.t1.a"],
- "rows": 12,
- "filtered": 100,
- "attached_condition": "(t1.a,t2.e) in (<cache>((3,3)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1"
+ "used_key_parts": ["a"],
+ "ref": ["test.t2.d"],
+ "rows": 8,
+ "filtered": 100
}
}
}
select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1;
a b c d e f
+3 2 uuuw 3 3 i
+3 2 uuua 3 3 i
3 3 zzzz 3 3 i
-3 3 zzzz 3 3 i
-7 8 xxxxx 7 7 h
-7 7 xxxyy 7 7 h
3 3 zyxw 3 3 i
-3 3 zyxw 3 3 i
-3 2 uuuw 3 3 i
-3 2 uuuw 3 3 i
-3 3 zzza 3 3 i
3 3 zzza 3 3 i
-7 8 xxxxa 7 7 h
-7 7 xxxya 7 7 h
3 3 zyxa 3 3 i
-3 3 zyxa 3 3 i
-3 2 uuua 3 3 i
+7 7 xxxyy 7 7 h
+7 7 xxxya 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxa 7 7 h
+3 2 uuuw 3 3 i
3 2 uuua 3 3 i
+3 3 zzzz 3 3 i
+3 3 zyxw 3 3 i
+3 3 zzza 3 3 i
+3 3 zyxa 3 3 i
prepare stmt from "select * from t1,t2
where a = d and (a,e) in ((3,3),(7,7),(8,8)) and length(f) = 1";
execute stmt;
a b c d e f
+3 2 uuuw 3 3 i
+3 2 uuua 3 3 i
3 3 zzzz 3 3 i
-3 3 zzzz 3 3 i
-7 8 xxxxx 7 7 h
-7 7 xxxyy 7 7 h
-3 3 zyxw 3 3 i
3 3 zyxw 3 3 i
-3 2 uuuw 3 3 i
-3 2 uuuw 3 3 i
3 3 zzza 3 3 i
-3 3 zzza 3 3 i
-7 8 xxxxa 7 7 h
-7 7 xxxya 7 7 h
-3 3 zyxa 3 3 i
3 3 zyxa 3 3 i
+7 7 xxxyy 7 7 h
+7 7 xxxya 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxa 7 7 h
+3 2 uuuw 3 3 i
3 2 uuua 3 3 i
-3 2 uuua 3 3 i
+3 3 zzzz 3 3 i
+3 3 zyxw 3 3 i
+3 3 zzza 3 3 i
+3 3 zyxa 3 3 i
execute stmt;
a b c d e f
+3 2 uuuw 3 3 i
+3 2 uuua 3 3 i
3 3 zzzz 3 3 i
-3 3 zzzz 3 3 i
-7 8 xxxxx 7 7 h
-7 7 xxxyy 7 7 h
3 3 zyxw 3 3 i
-3 3 zyxw 3 3 i
-3 2 uuuw 3 3 i
-3 2 uuuw 3 3 i
-3 3 zzza 3 3 i
3 3 zzza 3 3 i
-7 8 xxxxa 7 7 h
-7 7 xxxya 7 7 h
3 3 zyxa 3 3 i
-3 3 zyxa 3 3 i
-3 2 uuua 3 3 i
+7 7 xxxyy 7 7 h
+7 7 xxxya 7 7 h
+7 8 xxxxx 7 7 h
+7 8 xxxxa 7 7 h
+3 2 uuuw 3 3 i
3 2 uuua 3 3 i
+3 3 zzzz 3 3 i
+3 3 zyxw 3 3 i
+3 3 zzza 3 3 i
+3 3 zyxa 3 3 i
deallocate prepare stmt;
insert into t1 select * from t1;
# join order: (t2,t1) with ref access of t1
@@ -2633,7 +2694,7 @@ insert into t1 select * from t1;
explain select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range idx1,idx2 idx2 5 NULL 6 Using where; Rowid-ordered scan
+1 SIMPLE t2 range idx1,idx2 idx1 5 NULL 7 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t1 ref idx idx 5 test.t2.d 11
explain format=json select * from t1,t2
where a = d and (a,e) in ((4,4),(7,7),(8,8)) and length(f) = 1;
@@ -2645,12 +2706,13 @@ EXPLAIN
"table_name": "t2",
"access_type": "range",
"possible_keys": ["idx1", "idx2"],
- "key": "idx2",
+ "key": "idx1",
"key_length": "5",
- "used_key_parts": ["e"],
- "rows": 6,
- "filtered": 100,
- "attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1 and t2.d is not null",
+ "used_key_parts": ["d"],
+ "rows": 7,
+ "filtered": 6.7308,
+ "index_condition": "t2.d is not null",
+ "attached_condition": "(t2.d,t2.e) in (<cache>((4,4)),<cache>((7,7)),<cache>((8,8))) and octet_length(t2.f) = 1",
"mrr_type": "Rowid-ordered scan"
},
"table": {
@@ -2838,7 +2900,7 @@ explain select * from t1,t2
where a = d and (a,2) in ((2,2),(7,7),(8,8)) and
length(c) = 1 and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range idx idx 5 NULL 12 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where
explain format=json select * from t1,t2
where a = d and (a,2) in ((2,2),(7,7),(8,8)) and
@@ -2854,7 +2916,7 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 13,
+ "rows": 12,
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,2) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1",
@@ -2915,7 +2977,7 @@ where id = 1 and a = d and
length(c) = 1 and length(f) = 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t3 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t1 range idx idx 5 NULL 13 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range idx idx 5 NULL 12 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t2 ref idx3 idx3 5 test.t1.a 3 Using where
explain format=json select * from t1,t2,t3
where id = 1 and a = d and
@@ -2943,7 +3005,7 @@ EXPLAIN
"key": "idx",
"key_length": "5",
"used_key_parts": ["a"],
- "rows": 13,
+ "rows": 12,
"filtered": 100,
"index_condition": "t1.a is not null",
"attached_condition": "(t1.a,1 + 1) in (<cache>((2,2)),<cache>((7,7)),<cache>((8,8))) and octet_length(t1.c) = 1",
@@ -3005,6 +3067,7 @@ insert into t1 select a+15, concat(b,'yy') from t1;
insert into t1 select a+100, concat(b,'xx') from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select cast(count(a)/count(distinct a) as unsigned) as rec_per_key from t1;
rec_per_key
@@ -3023,7 +3086,7 @@ a b
set eq_range_index_dive_limit=2;
explain select * from t1 where a in (8, 15, 31, 1, 9);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 5 NULL 10 Using index condition; Rowid-ordered scan
+1 SIMPLE t1 range idx idx 5 NULL 5 Using index condition; Rowid-ordered scan
select * from t1 where a in (8, 15, 31, 1, 9);
a b
1 yy
@@ -3034,6 +3097,29 @@ a b
set eq_range_index_dive_limit=default;
drop table t1;
#
+# MDEV-18551: New defaults for eq_range_index_dive_limit
+#
+create table ten(a int);
+insert into ten values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+create table t1(a int, key(a));
+insert into t1 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C;
+insert into t1 select 1 from ten A, ten B,ten C;
+create table t2(a int, key(a));
+insert into t2 select A.a + B.a*10 + C.a*100 from ten A, ten B,ten C where A.a + B.a*10 + C.a*100 < 199;
+# expected type=range, rows=1487 , reason=using index dives
+analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198);
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 range a a 5 NULL 1487 1199.00 100.00 100.00 Using where; Using index
+insert into t2 values (200),(201);
+# expected type=range, rows=201 , reason=using index statistics
+analyze SELECT * FROM t1 where a in (0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,200,201);
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 range a a 5 NULL 201 1201.00 100.00 100.00 Using where; Using index
+drop table t1,ten,t2;
+#
# End of 10.2 tests
#
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@mrr_icp_extra_tmp;
diff --git a/mysql-test/main/range_mrr_icp.test b/mysql-test/main/range_mrr_icp.test
index 29e7af321db..4c6983c742f 100644
--- a/mysql-test/main/range_mrr_icp.test
+++ b/mysql-test/main/range_mrr_icp.test
@@ -1,5 +1,6 @@
set @mrr_icp_extra_tmp=@@optimizer_switch;
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set optimizer_switch='rowid_filter=off';
--source range.test
diff --git a/mysql-test/main/range_vs_index_merge.result b/mysql-test/main/range_vs_index_merge.result
index bc46a4fdd0b..65ac003b427 100644
--- a/mysql-test/main/range_vs_index_merge.result
+++ b/mysql-test/main/range_vs_index_merge.result
@@ -64,7 +64,7 @@ Country IN ('CAN', 'ARG') AND ID BETWEEN 120 AND 130 OR
Country <= 'ALB' AND Name LIKE 'L%' OR
ID BETWEEN 3807 AND 3810;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,PRIMARY,Country 35,4,3 NULL 31 Using sort_union(Name,PRIMARY,Country); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,PRIMARY,Country 35,4,3 NULL 30 Using sort_union(Name,PRIMARY,Country); Using where
EXPLAIN
SELECT * FROM City
WHERE (Population > 101000 AND Population < 115000);
@@ -175,7 +175,7 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN
SELECT * FROM City WHERE (Name < 'Bb');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Name Name 35 NULL 208 Using index condition
+1 SIMPLE City range Name Name 35 NULL 207 Using index condition
EXPLAIN
SELECT * FROM City WHERE (Country > 'A' AND Country < 'B');
id select_type table type possible_keys key key_len ref rows Extra
@@ -327,11 +327,11 @@ ID Name Country Population
EXPLAIN
SELECT * FROM City WHERE (ID < 10) OR (ID BETWEEN 100 AND 110);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range PRIMARY PRIMARY 4 NULL 21 Using index condition; Using where
+1 SIMPLE City range PRIMARY PRIMARY 4 NULL 20 Using index condition; Using where
EXPLAIN
SELECT * FROM City WHERE (ID < 200) OR (ID BETWEEN 100 AND 200);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range PRIMARY PRIMARY 4 NULL 201 Using index condition; Using where
+1 SIMPLE City range PRIMARY PRIMARY 4 NULL 200 Using index condition; Using where
EXPLAIN
SELECT * FROM City WHERE (ID < 600) OR (ID BETWEEN 900 AND 1500);
id select_type table type possible_keys key key_len ref rows Extra
@@ -339,11 +339,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN
SELECT * FROM City WHERE Country > 'A' AND Country < 'ARG';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Country Country 3 NULL 19 Using index condition
+1 SIMPLE City range Country Country 3 NULL 20 Using index condition
EXPLAIN
SELECT * FROM City WHERE Name LIKE 'H%' OR Name LIKE 'P%' ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Name Name 35 NULL 222 Using index condition; Using where
+1 SIMPLE City range Name Name 35 NULL 223 Using index condition; Using where
EXPLAIN
SELECT * FROM City WHERE Name LIKE 'Ha%' OR Name LIKE 'Pa%' ;
id select_type table type possible_keys key key_len ref rows Extra
@@ -354,21 +354,21 @@ WHERE ((ID < 10) AND (Name LIKE 'H%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 100 AND 110) AND
(Name LIKE 'P%' OR (Population > 103000 AND Population < 104000)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range PRIMARY,Population,Country,Name PRIMARY 4 NULL 21 Using index condition; Using where
+1 SIMPLE City range PRIMARY,Population,Country,Name PRIMARY 4 NULL 20 Using index condition; Using where
EXPLAIN
SELECT * FROM City
WHERE ((ID < 800) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 900 AND 1500) AND
-(Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000)));
+(Name LIKE 'Pa%' OR (Population > 103000 AND Population < 105000)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,Population 35,3,4 NULL 128 Using sort_union(Name,Country,Population); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,Population 35,3,4 NULL 151 Using sort_union(Name,Country,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ((ID < 200) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 100 AND 200) AND
-(Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000)));
+(Name LIKE 'Pa%' OR (Population > 103200 AND Population < 104000)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,Population 35,3,4 NULL 128 Using sort_union(Name,Country,Population); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,Population 35,3,4 NULL 124 Using sort_union(Name,Country,Population); Using where
SELECT * FROM City USE INDEX ()
WHERE ((ID < 10) AND (Name LIKE 'H%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 100 AND 110) AND
@@ -584,11 +584,11 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN
SELECT * FROM City WHERE Country < 'C';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Country Country 3 NULL 436 Using index condition
+1 SIMPLE City range Country Country 3 NULL 435 Using index condition
EXPLAIN
SELECT * FROM City WHERE Country < 'AGO';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Country Country 3 NULL 6 Using index condition
+1 SIMPLE City range Country Country 3 NULL 5 Using index condition
EXPLAIN
SELECT * FROM City WHERE Name BETWEEN 'P' AND 'S';
id select_type table type possible_keys key key_len ref rows Extra
@@ -616,7 +616,7 @@ WHERE ((Population > 101000 AND Population < 102000) AND
((ID BETWEEN 3400 AND 3800) AND
(Country < 'AGO' OR Name LIKE 'Pa%'));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country,Name Country,Name,Population 3,35,4 NULL 84 Using sort_union(Country,Name,Population); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country,Name Country,Name,Population 3,35,4 NULL 83 Using sort_union(Country,Name,Population); Using where
EXPLAIN
SELECT * FROM City
WHERE ((Population > 101000 AND Population < 110000) AND
@@ -624,7 +624,7 @@ WHERE ((Population > 101000 AND Population < 110000) AND
((ID BETWEEN 3790 AND 3800) AND
(Country < 'C' OR Name LIKE 'P%'));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country,Name Country,Name,PRIMARY 3,35,4 NULL 56 Using sort_union(Country,Name,PRIMARY); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country,Name Country,Name,PRIMARY 3,35,4 NULL 55 Using sort_union(Country,Name,PRIMARY); Using where
SELECT * FROM City USE INDEX ()
WHERE ((Population > 101000 AND Population < 102000) AND
(Country < 'C' OR Name BETWEEN 'P' AND 'S')) OR
@@ -679,7 +679,7 @@ CREATE INDEX CountryPopulation ON City(Country,Population);
EXPLAIN
SELECT * FROM City WHERE Name LIKE 'Pas%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Name Name 35 NULL 5 Using index condition
+1 SIMPLE City range Name Name 35 NULL 4 Using index condition
EXPLAIN
SELECT * FROM City WHERE Name LIKE 'P%';
id select_type table type possible_keys key key_len ref rows Extra
@@ -695,19 +695,19 @@ id select_type table type possible_keys key key_len ref rows Extra
EXPLAIN
SELECT * FROM City WHERE Country='FIN';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City ref Country,CountryPopulation Country 3 const 6 Using index condition
+1 SIMPLE City ref Country,CountryPopulation Country 3 const 7 Using index condition
EXPLAIN
SELECT * FROM City
WHERE ((Population > 101000 AND Population < 103000) OR Name LIKE 'Pas%')
AND Country='USA';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Population,Country,Name,CountryPopulation CountryPopulation,Name 7,35 NULL 15 Using sort_union(CountryPopulation,Name); Using where
+1 SIMPLE City index_merge Population,Country,Name,CountryPopulation CountryPopulation,Name 7,35 NULL 14 Using sort_union(CountryPopulation,Name); Using where
EXPLAIN
SELECT * FROM City
WHERE ((Population > 101000 AND Population < 103000) OR Name LIKE 'P%')
AND Country='FIN';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City ref Population,Country,Name,CountryPopulation Country 3 const 6 Using index condition; Using where
+1 SIMPLE City ref Population,Country,Name,CountryPopulation Country 3 const 7 Using index condition; Using where
SELECT * FROM City
WHERE ((Population > 101000 AND Population < 103000) OR Name LIKE 'Pas%')
AND Country='USA';
@@ -1079,7 +1079,7 @@ EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge Country,CountryPopulation,CountryName,CityName CountryName,CityName 38,35 NULL 28 Using sort_union(CountryName,CityName); Using where
+1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CountryName 38 NULL 28 Using index condition; Using where
SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
(Name='Addis Abeba' AND Country='ETH') OR
@@ -1109,33 +1109,33 @@ SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
Name Country Population
+Toronto CAN 688275
+Vancouver CAN 514008
+Basel CHE 166700
+Peking CHN 7472000
+Praha CZE 1181126
+Dresden DEU 476668
Addis Abeba ETH 2495000
-Manila PHL 1581082
+Paris FRA 2125246
Jakarta IDN 9604900
-Delhi IND 7206704
Bangalore IND 2660088
+Delhi IND 7206704
Teheran IRN 6758845
Roma ITA 2643581
Venezia ITA 277305
Tokyo JPN 7980230
-Toronto CAN 688275
-Vancouver CAN 514008
-Peking CHN 7472000
Seoul KOR 9981619
Kaunas LTU 412639
Rabat MAR 623457
Tijuana MEX 1212232
Lagos NGA 1518000
-Paris FRA 2125246
-Dresden DEU 476668
+Manila PHL 1581082
+Samara RUS 1156100
Dakar SEN 785071
-Basel CHE 166700
-Praha CZE 1181126
Ankara TUR 3038159
Lugansk UKR 469000
-Caracas VEN 1975294
-Samara RUS 1156100
Seattle USA 563374
+Caracas VEN 1975294
set optimizer_switch='index_merge=off';
EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
@@ -1166,7 +1166,7 @@ EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CountryName 38 NULL 29 Using index condition; Using where
+1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CountryName 38 NULL 28 Using index condition; Using where
SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
(Name='Addis Abeba' AND Country='ETH') OR
@@ -1343,6 +1343,38 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
+set @tmp_mdev585=@@optimizer_use_condition_selectivity;
+set optimizer_use_condition_selectivity=1;
+EXPLAIN
+SELECT * FROM City
+WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
+AND (Population >= 100000 AND Population < 120000)
+ORDER BY Population LIMIT 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE City range Country,Name,Population Population 4 NULL # Using where
+FLUSH STATUS;
+SELECT * FROM City
+WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
+AND (Population >= 100000 AND Population < 120000)
+ORDER BY Population LIMIT 5;
+ID Name Country Population
+519 Worthing GBR 100000
+638 al-Arish EGY 100447
+518 Basildon GBR 100924
+707 Marbella ESP 101144
+3792 Tartu EST 101246
+SHOW STATUS LIKE 'Handler_read_%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 59
+Handler_read_prev 0
+Handler_read_retry 0
+Handler_read_rnd 0
+Handler_read_rnd_deleted 0
+Handler_read_rnd_next 0
+set optimizer_use_condition_selectivity=@tmp_mdev585;
set optimizer_switch='index_merge=off';
EXPLAIN
SELECT * FROM City
@@ -1430,6 +1462,7 @@ insert into t1(account_id, login, home_state, work_state)
select 1, 'pw', 'ak', 'ak' from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1 where account_id = 1;
count(*)
@@ -1491,6 +1524,7 @@ insert into t1 (c2, c3, c4, c5, cp)
select c2, c3, c4, c5, cp from t1 where cp = 4;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain
select * from t1 where (c2=1 and c3=1) or (c4=2 and c5=1);
@@ -1543,6 +1577,7 @@ insert into t1 (c2,c3,c4) select c2,c3,3 from t1 where c2 != 'a';
insert into t1 (c2,c3,c4) select c2,c3,4 from t1 where c2 != 'a';
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1 where (c2='e' OR c3='q');
count(*)
@@ -1582,6 +1617,7 @@ update t1 set c1=lpad(id+1000, 12, ' '), c2=lpad(id+10000, 15, ' ');
alter table t1 add unique index (c1), add unique index (c2), add index (c3);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
explain
select * from t1 where (c1=' 100000' or c2=' 2000000');
@@ -1620,7 +1656,7 @@ SELECT COUNT(*) FROM t1
WHERE c = 'i' OR b IN ( 'Arkansas' , 'd' , 'pdib' , 'can' ) OR
(pk BETWEEN 120 AND 79 + 255 OR a IN ( 4 , 179 , 1 ) ) AND a > 8 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,idx1,idx2,idx3 idx3,idx2,PRIMARY,idx1 67,13,4,3 NULL 8 Using sort_union(idx3,idx2,PRIMARY,idx1); Using where
+1 SIMPLE t1 index_merge PRIMARY,idx1,idx2,idx3 idx3,idx2,PRIMARY,idx1 67,13,4,3 NULL 9 Using sort_union(idx3,idx2,PRIMARY,idx1); Using where
DROP TABLE t1;
CREATE TABLE t1 (
f1 int, f2 int, f3 int, f4 int, f5 int,
@@ -1639,6 +1675,10 @@ f1 int, f2 int, f3 int, f4 int,
PRIMARY KEY (f1), KEY (f3), KEY (f4)
);
INSERT INTO t1 VALUES (9,0,2,6), (9930,0,0,NULL);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SET SESSION optimizer_switch='index_merge_intersection=off';
SET SESSION optimizer_switch='index_merge_sort_union=off';
SET SESSION optimizer_switch='index_merge_union=off';
@@ -1659,7 +1699,7 @@ SELECT * FROM t1 FORCE KEY (PRIMARY,f3,f4)
WHERE ( f3 = 1 OR f1 = 7 ) AND f1 < 10
OR f3 BETWEEN 2 AND 2 AND ( f3 = 1 OR f4 != 1 );
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY,f3,f4 NULL NULL NULL 2 Using where
+1 SIMPLE t1 index_merge PRIMARY,f3,f4 f3,PRIMARY,f3 5,4,5 NULL 3 Using union(f3,PRIMARY,f3); Using where
SELECT * FROM t1 FORCE KEY (PRIMARY,f3,f4)
WHERE ( f3 = 1 OR f1 = 7 ) AND f1 < 10
OR f3 BETWEEN 2 AND 2 AND ( f3 = 1 OR f4 != 1 );
@@ -1670,6 +1710,10 @@ INSERT INTO t1 VALUES
(95,0,5,6), (9935,0,5,5), (96,0,6,6), (9936,0,6,6),
(97,0,7,6), (9937,0,7,7), (98,0,8,6), (9938,0,8,8),
(99,0,9,6), (9939,0,9,9);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SET SESSION optimizer_switch='index_merge_union=off';
EXPLAIN
SELECT * FROM t1 FORCE KEY (PRIMARY,f3,f4)
@@ -1713,6 +1757,10 @@ PRIMARY KEY(b), INDEX idx1(d), INDEX idx2(d,b,c)
INSERT INTO t1 VALUES
(0,58,7,7),(0,63,2,0),(0,64,186,8),(0,65,1,-2), (0,71,190,-3),
(0,72,321,-7),(0,73,0,3),(0,74,5,25),(0,75,5,3);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SET SESSION optimizer_switch='index_merge_sort_union=off';
EXPLAIN
SELECT * FROM t1
@@ -1762,7 +1810,7 @@ EXPLAIN
SELECT * FROM t1
WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL PRIMARY,idx NULL NULL NULL 2 Using where
+1 SIMPLE t1 range PRIMARY,idx PRIMARY 0 NULL 2 Using index condition; Using where
SELECT * FROM t1
WHERE a BETWEEN 4 AND 5 AND b IN (255,4) OR a IN (2,14,25) OR a!=2;
a b
@@ -1790,12 +1838,16 @@ INSERT INTO t1 VALUES
(7,'Pennsylvania','Harrisburg'),
(8,'Virginia','Richmond')
;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN
SELECT * FROM t1 FORCE KEY (state,capital)
WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range state,capital state 71 NULL 12 Using index condition; Using where
+1 SIMPLE t1 range state,capital state 71 NULL 8 Using index condition; Using where
SELECT * FROM t1 FORCE KEY (state,capital)
WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
@@ -1886,6 +1938,6 @@ Country='POL' AND Name IN ('Warszawa', 'Wroclaw') OR
Country='NOR' AND Name IN ('Oslo', 'Bergen') OR
Country='ITA' AND Name IN ('Napoli', 'Venezia');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range CountryName,Name CountryName 38 NULL 22 Using index condition; Using where
+1 SIMPLE City range CountryName,Name CountryName 38 NULL 20 Using index condition; Using where
DROP DATABASE world;
set session optimizer_switch='index_merge_sort_intersection=default';
diff --git a/mysql-test/main/range_vs_index_merge.test b/mysql-test/main/range_vs_index_merge.test
index 84b87579e85..2c43027d757 100644
--- a/mysql-test/main/range_vs_index_merge.test
+++ b/mysql-test/main/range_vs_index_merge.test
@@ -229,13 +229,13 @@ EXPLAIN
SELECT * FROM City
WHERE ((ID < 800) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 900 AND 1500) AND
- (Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000)));
+ (Name LIKE 'Pa%' OR (Population > 103000 AND Population < 105000)));
EXPLAIN
SELECT * FROM City
WHERE ((ID < 200) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 100 AND 200) AND
- (Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000)));
+ (Name LIKE 'Pa%' OR (Population > 103200 AND Population < 104000)));
# The following 6 queries check that the plans
@@ -718,6 +718,23 @@ SELECT * FROM City
ORDER BY Population LIMIT 5;
SHOW STATUS LIKE 'Handler_read_%';
+set @tmp_mdev585=@@optimizer_use_condition_selectivity;
+set optimizer_use_condition_selectivity=1;
+--replace_column 9 #
+EXPLAIN
+SELECT * FROM City
+ WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
+ AND (Population >= 100000 AND Population < 120000)
+ORDER BY Population LIMIT 5;
+
+FLUSH STATUS;
+SELECT * FROM City
+ WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
+ AND (Population >= 100000 AND Population < 120000)
+ORDER BY Population LIMIT 5;
+SHOW STATUS LIKE 'Handler_read_%';
+
+set optimizer_use_condition_selectivity=@tmp_mdev585;
set optimizer_switch='index_merge=off';
@@ -1069,6 +1086,8 @@ CREATE TABLE t1 (
INSERT INTO t1 VALUES (9,0,2,6), (9930,0,0,NULL);
+analyze table t1;
+
SET SESSION optimizer_switch='index_merge_intersection=off';
SET SESSION optimizer_switch='index_merge_sort_union=off';
@@ -1101,6 +1120,8 @@ INSERT INTO t1 VALUES
(97,0,7,6), (9937,0,7,7), (98,0,8,6), (9938,0,8,8),
(99,0,9,6), (9939,0,9,9);
+analyze table t1;
+
SET SESSION optimizer_switch='index_merge_union=off';
EXPLAIN
@@ -1158,6 +1179,8 @@ INSERT INTO t1 VALUES
(0,58,7,7),(0,63,2,0),(0,64,186,8),(0,65,1,-2), (0,71,190,-3),
(0,72,321,-7),(0,73,0,3),(0,74,5,25),(0,75,5,3);
+ANALYZE TABLE t1;
+
SET SESSION optimizer_switch='index_merge_sort_union=off';
EXPLAIN
SELECT * FROM t1
@@ -1230,6 +1253,8 @@ INSERT INTO t1 VALUES
(7,'Pennsylvania','Harrisburg'),
(8,'Virginia','Richmond')
;
+
+ANALYZE TABLE t1;
EXPLAIN
SELECT * FROM t1 FORCE KEY (state,capital)
diff --git a/mysql-test/main/range_vs_index_merge_innodb.result b/mysql-test/main/range_vs_index_merge_innodb.result
index ce90f522d6e..061fcab15b4 100644
--- a/mysql-test/main/range_vs_index_merge_innodb.result
+++ b/mysql-test/main/range_vs_index_merge_innodb.result
@@ -1,4 +1,9 @@
SET SESSION STORAGE_ENGINE='InnoDB';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
DROP TABLE IF EXISTS t1,t2,t3,t4;
DROP DATABASE IF EXISTS world;
set names utf8;
@@ -360,16 +365,16 @@ EXPLAIN
SELECT * FROM City
WHERE ((ID < 800) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 900 AND 1500) AND
-(Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000)));
+(Name LIKE 'Pa%' OR (Population > 103000 AND Population < 105000)));
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Country,PRIMARY 39,3,4 NULL 683 Using sort_union(Name,Country,PRIMARY); Using where
EXPLAIN
SELECT * FROM City
WHERE ((ID < 200) AND (Name LIKE 'Ha%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 100 AND 200) AND
-(Name LIKE 'Pa%' OR (Population > 103000 AND Population < 104000)));
+(Name LIKE 'Pa%' OR (Population > 103200 AND Population < 104000)));
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Population,PRIMARY 39,4,4 NULL 307 Using sort_union(Name,Population,PRIMARY); Using where
+1 SIMPLE City index_merge PRIMARY,Population,Country,Name Name,Population,PRIMARY 39,4,4 NULL 302 Using sort_union(Name,Population,PRIMARY); Using where
SELECT * FROM City USE INDEX ()
WHERE ((ID < 10) AND (Name LIKE 'H%' OR (Country > 'A' AND Country < 'ARG')))
OR ((ID BETWEEN 100 AND 110) AND
@@ -1080,7 +1085,7 @@ EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CountryName 38 NULL 27 Using index condition; Using where
+1 SIMPLE City index_merge Country,CountryPopulation,CountryName,CityName CountryName,CityName 38,35 NULL 27 Using sort_union(CountryName,CityName); Using where
SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
(Name='Addis Abeba' AND Country='ETH') OR
@@ -1110,33 +1115,33 @@ SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
Name Country Population
-Toronto CAN 688275
-Vancouver CAN 514008
-Basel CHE 166700
-Peking CHN 7472000
-Praha CZE 1181126
-Dresden DEU 476668
Addis Abeba ETH 2495000
-Paris FRA 2125246
+Manila PHL 1581082
Jakarta IDN 9604900
-Bangalore IND 2660088
Delhi IND 7206704
+Bangalore IND 2660088
Teheran IRN 6758845
Roma ITA 2643581
Venezia ITA 277305
Tokyo JPN 7980230
+Toronto CAN 688275
+Vancouver CAN 514008
+Peking CHN 7472000
Seoul KOR 9981619
Kaunas LTU 412639
Rabat MAR 623457
Tijuana MEX 1212232
Lagos NGA 1518000
-Manila PHL 1581082
-Samara RUS 1156100
+Paris FRA 2125246
+Dresden DEU 476668
Dakar SEN 785071
+Basel CHE 166700
+Praha CZE 1181126
Ankara TUR 3038159
Lugansk UKR 469000
-Seattle USA 563374
Caracas VEN 1975294
+Samara RUS 1156100
+Seattle USA 563374
set optimizer_switch='index_merge=off';
EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
@@ -1167,7 +1172,7 @@ EXPLAIN SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CountryName 38 NULL 27 Using index condition; Using where
+1 SIMPLE City range Country,CountryPopulation,CountryName,CityName CityName 35 NULL 28 Using index condition; Using where
SELECT Name, Country, Population FROM City WHERE
(Name='Manila' AND Country='PHL') OR
(Name='Addis Abeba' AND Country='ETH') OR
@@ -1197,33 +1202,33 @@ SELECT Name, Country, Population FROM City WHERE
(Name='Samara' AND Country='RUS') OR
(Name='Seattle' AND Country='USA');
Name Country Population
-Toronto CAN 688275
-Vancouver CAN 514008
-Basel CHE 166700
-Peking CHN 7472000
-Praha CZE 1181126
-Dresden DEU 476668
Addis Abeba ETH 2495000
-Paris FRA 2125246
-Jakarta IDN 9604900
+Ankara TUR 3038159
Bangalore IND 2660088
+Basel CHE 166700
+Caracas VEN 1975294
+Dakar SEN 785071
Delhi IND 7206704
-Teheran IRN 6758845
-Roma ITA 2643581
-Venezia ITA 277305
-Tokyo JPN 7980230
-Seoul KOR 9981619
+Dresden DEU 476668
+Jakarta IDN 9604900
Kaunas LTU 412639
-Rabat MAR 623457
-Tijuana MEX 1212232
Lagos NGA 1518000
+Lugansk UKR 469000
Manila PHL 1581082
+Paris FRA 2125246
+Peking CHN 7472000
+Praha CZE 1181126
+Rabat MAR 623457
+Roma ITA 2643581
Samara RUS 1156100
-Dakar SEN 785071
-Ankara TUR 3038159
-Lugansk UKR 469000
Seattle USA 563374
-Caracas VEN 1975294
+Seoul KOR 9981619
+Teheran IRN 6758845
+Tijuana MEX 1212232
+Tokyo JPN 7980230
+Toronto CAN 688275
+Vancouver CAN 514008
+Venezia ITA 277305
set optimizer_switch=@save_optimizer_switch;
#
# Bug mdev-585: range vs index-merge with ORDER BY ... LIMIT n
@@ -1344,6 +1349,38 @@ Handler_read_retry 0
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 0
+set @tmp_mdev585=@@optimizer_use_condition_selectivity;
+set optimizer_use_condition_selectivity=1;
+EXPLAIN
+SELECT * FROM City
+WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
+AND (Population >= 100000 AND Population < 120000)
+ORDER BY Population LIMIT 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE City range Country,Name,Population Population 4 NULL # Using where
+FLUSH STATUS;
+SELECT * FROM City
+WHERE ((Name > 'Ca' AND Name < 'Cf') OR (Country > 'E' AND Country < 'H'))
+AND (Population >= 100000 AND Population < 120000)
+ORDER BY Population LIMIT 5;
+ID Name Country Population
+519 Worthing GBR 100000
+638 al-Arish EGY 100447
+518 Basildon GBR 100924
+707 Marbella ESP 101144
+3792 Tartu EST 101246
+SHOW STATUS LIKE 'Handler_read_%';
+Variable_name Value
+Handler_read_first 0
+Handler_read_key 1
+Handler_read_last 0
+Handler_read_next 59
+Handler_read_prev 0
+Handler_read_retry 0
+Handler_read_rnd 0
+Handler_read_rnd_deleted 0
+Handler_read_rnd_next 0
+set optimizer_use_condition_selectivity=@tmp_mdev585;
set optimizer_switch='index_merge=off';
EXPLAIN
SELECT * FROM City
@@ -1431,6 +1468,7 @@ insert into t1(account_id, login, home_state, work_state)
select 1, 'pw', 'ak', 'ak' from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1 where account_id = 1;
count(*)
@@ -1492,6 +1530,7 @@ insert into t1 (c2, c3, c4, c5, cp)
select c2, c3, c4, c5, cp from t1 where cp = 4;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain
select * from t1 where (c2=1 and c3=1) or (c4=2 and c5=1);
@@ -1544,6 +1583,7 @@ insert into t1 (c2,c3,c4) select c2,c3,3 from t1 where c2 != 'a';
insert into t1 (c2,c3,c4) select c2,c3,4 from t1 where c2 != 'a';
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1 where (c2='e' OR c3='q');
count(*)
@@ -1583,6 +1623,7 @@ update t1 set c1=lpad(id+1000, 12, ' '), c2=lpad(id+10000, 15, ' ');
alter table t1 add unique index (c1), add unique index (c2), add index (c3);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain
select * from t1 where (c1=' 100000' or c2=' 2000000');
@@ -1640,6 +1681,10 @@ f1 int, f2 int, f3 int, f4 int,
PRIMARY KEY (f1), KEY (f3), KEY (f4)
);
INSERT INTO t1 VALUES (9,0,2,6), (9930,0,0,NULL);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SET SESSION optimizer_switch='index_merge_intersection=off';
SET SESSION optimizer_switch='index_merge_sort_union=off';
SET SESSION optimizer_switch='index_merge_union=off';
@@ -1671,6 +1716,10 @@ INSERT INTO t1 VALUES
(95,0,5,6), (9935,0,5,5), (96,0,6,6), (9936,0,6,6),
(97,0,7,6), (9937,0,7,7), (98,0,8,6), (9938,0,8,8),
(99,0,9,6), (9939,0,9,9);
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SET SESSION optimizer_switch='index_merge_union=off';
EXPLAIN
SELECT * FROM t1 FORCE KEY (PRIMARY,f3,f4)
@@ -1714,6 +1763,10 @@ PRIMARY KEY(b), INDEX idx1(d), INDEX idx2(d,b,c)
INSERT INTO t1 VALUES
(0,58,7,7),(0,63,2,0),(0,64,186,8),(0,65,1,-2), (0,71,190,-3),
(0,72,321,-7),(0,73,0,3),(0,74,5,25),(0,75,5,3);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SET SESSION optimizer_switch='index_merge_sort_union=off';
EXPLAIN
SELECT * FROM t1
@@ -1751,7 +1804,7 @@ SELECT * FROM t1
WHERE t1.a>300 AND t1.c!=0 AND t1.b>=350 AND t1.b<=400 AND
(t1.c=0 OR t1.a=500);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,idx PRIMARY 4 NULL 1 Using where
+1 SIMPLE t1 index PRIMARY,idx idx 10 NULL 2 Using where; Using index
SELECT * FROM t1
WHERE t1.a>300 AND t1.c!=0 AND t1.b>=350 AND t1.b<=400 AND
(t1.c=0 OR t1.a=500);
@@ -1791,12 +1844,16 @@ INSERT INTO t1 VALUES
(7,'Pennsylvania','Harrisburg'),
(8,'Virginia','Richmond')
;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN
SELECT * FROM t1 FORCE KEY (state,capital)
WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range state,capital state 71 NULL 12 Using index condition; Using where
+1 SIMPLE t1 range state,capital state 71 NULL 8 Using index condition; Using where
SELECT * FROM t1 FORCE KEY (state,capital)
WHERE ( state = 'Alabama' OR state >= 'Colorado' ) AND id != 9
OR ( capital >= 'Topeka' OR state = 'Kansas' ) AND state != 'Texas';
@@ -1890,4 +1947,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE City range CountryName,Name CountryName 38 NULL 20 Using index condition; Using where
DROP DATABASE world;
set session optimizer_switch='index_merge_sort_intersection=default';
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/range_vs_index_merge_innodb.test b/mysql-test/main/range_vs_index_merge_innodb.test
index 31870ccd9c6..5117ee3668a 100644
--- a/mysql-test/main/range_vs_index_merge_innodb.test
+++ b/mysql-test/main/range_vs_index_merge_innodb.test
@@ -2,6 +2,15 @@
SET SESSION STORAGE_ENGINE='InnoDB';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
--source range_vs_index_merge.test
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/repair.result b/mysql-test/main/repair.result
index 75d7525ee71..18a7cf509c4 100644
--- a/mysql-test/main/repair.result
+++ b/mysql-test/main/repair.result
@@ -78,7 +78,7 @@ INSERT INTO t1 VALUES
('0'),('0'),('0'),('0'),('0'),('0'),('0');
Warnings:
Error 1034 myisam_sort_buffer_size is too small. X
-Error 1034 Number of rows changed from 0 to 157
+Warning 1034 Number of rows changed from 0 to 157
SET myisam_repair_threads=2;
REPAIR TABLE t1;
Table Op Msg_type Msg_text
diff --git a/mysql-test/main/row.result b/mysql-test/main/row.result
index 7483f37970f..40d3e2640f0 100644
--- a/mysql-test/main/row.result
+++ b/mysql-test/main/row.result
@@ -15,28 +15,50 @@ select row('a',1.5,3) IN (row(1,2,3), row('a',1.5,3), row('a','a','a'));
row('a',1.5,3) IN (row(1,2,3), row('a',1.5,3), row('a','a','a'))
1
Warnings:
-Warning 1292 Truncated incorrect DECIMAL value: 'a'
-Warning 1292 Truncated incorrect INTEGER value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select row('a',0,3) IN (row(3,2,3), row('a','a','3'), row(1,3,3));
row('a',0,3) IN (row(3,2,3), row('a','a','3'), row(1,3,3))
1
Warnings:
-Warning 1292 Truncated incorrect INTEGER value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select row('a',0,3) IN (row(3,2,3), row('a','0','3'), row(1,3,3));
row('a',0,3) IN (row(3,2,3), row('a','0','3'), row(1,3,3))
1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select row('a',1.5,3) IN (row(3,NULL,3), row('a',1.5,3), row(1,3,3));
row('a',1.5,3) IN (row(3,NULL,3), row('a',1.5,3), row(1,3,3))
1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select row('b',1.5,3) IN (row(3,NULL,3), row('a',1.5,3), row(1,3,3));
row('b',1.5,3) IN (row(3,NULL,3), row('a',1.5,3), row(1,3,3))
-0
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'b'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select row('b',1.5,3) IN (row('b',NULL,3), row('a',1.5,3), row(1,3,3));
row('b',1.5,3) IN (row('b',NULL,3), row('a',1.5,3), row(1,3,3))
-NULL
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'b'
+Warning 1292 Truncated incorrect DOUBLE value: 'b'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select row('b',1.5,3) IN (row('b',NULL,4), row('a',1.5,3), row(1,3,3));
row('b',1.5,3) IN (row('b',NULL,4), row('a',1.5,3), row(1,3,3))
-0
+1
+Warnings:
+Warning 1292 Truncated incorrect DOUBLE value: 'b'
+Warning 1292 Truncated incorrect DOUBLE value: 'b'
+Warning 1292 Truncated incorrect DOUBLE value: 'a'
select (1,2,(3,4)) IN ((3,2,(3,4)), (1,2,(3,4)));
(1,2,(3,4)) IN ((3,2,(3,4)), (1,2,(3,4)))
1
@@ -461,7 +483,7 @@ select * from t1,t2 where (a,b) = (c,d);
a b c d
abc 1 abc 1
select host,user from mysql.user where (host,user) = ('localhost','test');
-host user
+Host User
drop table t1,t2;
#
# Bug#52124 memory leaks like a sieve in datetime, timestamp, time, date fields + warnings
diff --git a/mysql-test/main/rowid_filter.result b/mysql-test/main/rowid_filter.result
new file mode 100644
index 00000000000..efe914faba7
--- /dev/null
+++ b/mysql-test/main/rowid_filter.result
@@ -0,0 +1,2107 @@
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+CREATE INDEX i_l_quantity ON lineitem(l_quantity);
+CREATE INDEX i_o_totalprice ON orders(o_totalprice);
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables=preferably;
+ANALYZE TABLE lineitem, orders;
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT 0,
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT 0,
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`),
+ KEY `i_l_quantity` (`l_quantity`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show create table orders;
+Table Create Table
+orders CREATE TABLE `orders` (
+ `o_orderkey` int(11) NOT NULL,
+ `o_custkey` int(11) DEFAULT NULL,
+ `o_orderstatus` char(1) DEFAULT NULL,
+ `o_totalprice` double DEFAULT NULL,
+ `o_orderDATE` date DEFAULT NULL,
+ `o_orderpriority` char(15) DEFAULT NULL,
+ `o_clerk` char(15) DEFAULT NULL,
+ `o_shippriority` int(11) DEFAULT NULL,
+ `o_comment` varchar(79) DEFAULT NULL,
+ PRIMARY KEY (`o_orderkey`),
+ KEY `i_o_orderdate` (`o_orderDATE`),
+ KEY `i_o_custkey` (`o_custkey`),
+ KEY `i_o_totalprice` (`o_totalprice`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+set optimizer_use_condition_selectivity=2;
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) Using index condition; Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 662,
+ "selectivity_pct": 11.024
+ },
+ "rows": 509,
+ "filtered": 11.024,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) 60.00 (3%) 11.02 100.00 Using index condition; Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 662,
+ "selectivity_pct": 11.024,
+ "r_rows": 605,
+ "r_selectivity_pct": 3.6855,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "r_loops": 1,
+ "rows": 509,
+ "r_rows": 60,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 11.024,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+l_orderkey l_linenumber l_shipdate l_quantity
+1121 5 1997-04-27 47
+1121 6 1997-04-21 50
+1441 7 1997-06-07 50
+1443 1 1997-02-05 47
+1473 1 1997-05-05 50
+1568 2 1997-04-06 46
+1632 1 1997-01-25 47
+1632 3 1997-01-29 47
+1954 7 1997-06-04 49
+1959 1 1997-05-05 46
+2151 3 1997-01-20 49
+2177 5 1997-05-10 46
+2369 2 1997-01-02 47
+2469 3 1997-01-11 48
+2469 6 1997-03-03 49
+2470 2 1997-06-02 50
+260 1 1997-03-24 50
+288 2 1997-04-19 49
+289 4 1997-03-14 48
+3009 1 1997-03-19 48
+3105 3 1997-02-28 48
+3106 2 1997-02-27 49
+3429 1 1997-04-08 48
+3490 2 1997-06-27 50
+3619 1 1997-01-22 49
+3619 3 1997-01-31 46
+3969 3 1997-05-29 46
+4005 4 1997-01-31 49
+4036 1 1997-06-21 46
+4066 4 1997-02-17 49
+4098 1 1997-01-26 46
+422 3 1997-06-21 46
+4258 3 1997-01-02 46
+4421 2 1997-04-21 46
+4421 3 1997-05-25 46
+4453 3 1997-05-29 48
+4484 7 1997-03-17 50
+4609 3 1997-02-11 46
+484 1 1997-03-06 49
+484 3 1997-01-24 50
+484 5 1997-03-05 48
+485 1 1997-03-28 50
+4868 1 1997-04-29 47
+4868 3 1997-04-23 49
+4934 1 1997-05-20 48
+4967 1 1997-05-27 50
+5090 2 1997-04-05 46
+5152 2 1997-03-10 50
+5158 4 1997-04-10 49
+5606 3 1997-03-11 46
+5606 7 1997-02-01 46
+5762 4 1997-03-02 47
+581 3 1997-02-27 49
+5829 5 1997-01-31 49
+5831 4 1997-02-24 46
+5895 2 1997-04-27 47
+5895 3 1997-03-15 49
+5952 1 1997-06-30 49
+705 1 1997-04-18 46
+836 3 1997-03-21 46
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 509 Using index condition; Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 509,
+ "filtered": 11.024,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.02 11.76 Using index condition; Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 509,
+ "r_rows": 510,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 11.024,
+ "r_filtered": 11.765,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+l_orderkey l_linenumber l_shipdate l_quantity
+1121 5 1997-04-27 47
+1121 6 1997-04-21 50
+1441 7 1997-06-07 50
+1443 1 1997-02-05 47
+1473 1 1997-05-05 50
+1568 2 1997-04-06 46
+1632 1 1997-01-25 47
+1632 3 1997-01-29 47
+1954 7 1997-06-04 49
+1959 1 1997-05-05 46
+2151 3 1997-01-20 49
+2177 5 1997-05-10 46
+2369 2 1997-01-02 47
+2469 3 1997-01-11 48
+2469 6 1997-03-03 49
+2470 2 1997-06-02 50
+260 1 1997-03-24 50
+288 2 1997-04-19 49
+289 4 1997-03-14 48
+3009 1 1997-03-19 48
+3105 3 1997-02-28 48
+3106 2 1997-02-27 49
+3429 1 1997-04-08 48
+3490 2 1997-06-27 50
+3619 1 1997-01-22 49
+3619 3 1997-01-31 46
+3969 3 1997-05-29 46
+4005 4 1997-01-31 49
+4036 1 1997-06-21 46
+4066 4 1997-02-17 49
+4098 1 1997-01-26 46
+422 3 1997-06-21 46
+4258 3 1997-01-02 46
+4421 2 1997-04-21 46
+4421 3 1997-05-25 46
+4453 3 1997-05-29 48
+4484 7 1997-03-17 50
+4609 3 1997-02-11 46
+484 1 1997-03-06 49
+484 3 1997-01-24 50
+484 5 1997-03-05 48
+485 1 1997-03-28 50
+4868 1 1997-04-29 47
+4868 3 1997-04-23 49
+4934 1 1997-05-20 48
+4967 1 1997-05-27 50
+5090 2 1997-04-05 46
+5152 2 1997-03-10 50
+5158 4 1997-04-10 49
+5606 3 1997-03-11 46
+5606 7 1997-02-01 46
+5762 4 1997-03-02 47
+581 3 1997-02-27 49
+5829 5 1997-01-31 49
+5831 4 1997-02-24 46
+5895 2 1997-04-27 47
+5895 3 1997-03-15 49
+5952 1 1997-06-30 49
+705 1 1997-04-18 46
+836 3 1997-03-21 46
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 98,
+ "filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_o_totalprice",
+ "used_key_parts": ["o_totalprice"]
+ },
+ "rows": 81,
+ "selectivity_pct": 5.4
+ },
+ "rows": 1,
+ "filtered": 5.4,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (5%) 0.11 (10%) 5.40 100.00 Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 98,
+ "r_rows": 98,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_o_totalprice",
+ "used_key_parts": ["o_totalprice"]
+ },
+ "rows": 81,
+ "selectivity_pct": 5.4,
+ "r_rows": 71,
+ "r_selectivity_pct": 10.417,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "r_loops": 98,
+ "rows": 1,
+ "r_rows": 0.1122,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 5.4,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 6 1997-01-25 222274.54
+484 3 1997-01-24 219920.62
+5606 6 1997-01-11 219959.08
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using index condition
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 98,
+ "filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 5.4,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using index condition
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 5.40 11.22 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 98,
+ "r_rows": 98,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 98,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 5.4,
+ "r_filtered": 11.224,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 6 1997-01-25 222274.54
+484 3 1997-01-24 219920.62
+5606 6 1997-01-11 219959.08
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (10%) Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 662,
+ "selectivity_pct": 11.024
+ },
+ "rows": 509,
+ "filtered": 11.024,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_o_totalprice",
+ "used_key_parts": ["o_totalprice"]
+ },
+ "rows": 152,
+ "selectivity_pct": 10.133
+ },
+ "rows": 1,
+ "filtered": 10.133,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 509 (11%) 60.00 (3%) 11.02 100.00 Using index condition; Using where; Using rowid filter
+1 SIMPLE orders eq_ref|filter PRIMARY,i_o_totalprice PRIMARY|i_o_totalprice 4|9 dbt3_s001.lineitem.l_orderkey 1 (10%) 0.27 (25%) 10.13 100.00 Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 662,
+ "selectivity_pct": 11.024,
+ "r_rows": 605,
+ "r_selectivity_pct": 3.6855,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "r_loops": 1,
+ "rows": 509,
+ "r_rows": 60,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 11.024,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_o_totalprice",
+ "used_key_parts": ["o_totalprice"]
+ },
+ "rows": 152,
+ "selectivity_pct": 10.133,
+ "r_rows": 144,
+ "r_selectivity_pct": 25.424,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "r_loops": 60,
+ "rows": 1,
+ "r_rows": 0.2667,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 10.133,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
+1632 1 1997-01-25 47 183286.33
+1632 3 1997-01-29 47 183286.33
+2177 5 1997-05-10 46 183493.42
+2469 3 1997-01-11 48 192074.23
+2469 6 1997-03-03 49 192074.23
+3619 1 1997-01-22 49 222274.54
+3619 3 1997-01-31 46 222274.54
+484 1 1997-03-06 49 219920.62
+484 3 1997-01-24 50 219920.62
+484 5 1997-03-05 48 219920.62
+4934 1 1997-05-20 48 180478.16
+5606 3 1997-03-11 46 219959.08
+5606 7 1997-02-01 46 219959.08
+5829 5 1997-01-31 49 183734.56
+5895 2 1997-04-27 47 201419.83
+5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 509 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 509,
+ "filtered": 11.024,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 10.133,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity i_l_shipdate 4 NULL 509 510.00 11.02 11.76 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 10.13 26.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 509,
+ "r_rows": 510,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 11.024,
+ "r_filtered": 11.765,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 60,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 10.133,
+ "r_filtered": 26.667,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
+1632 1 1997-01-25 47 183286.33
+1632 3 1997-01-29 47 183286.33
+2177 5 1997-05-10 46 183493.42
+2469 3 1997-01-11 48 192074.23
+2469 6 1997-03-03 49 192074.23
+3619 1 1997-01-22 49 222274.54
+3619 3 1997-01-31 46 222274.54
+484 1 1997-03-06 49 219920.62
+484 3 1997-01-24 50 219920.62
+484 5 1997-03-05 48 219920.62
+4934 1 1997-05-20 48 180478.16
+5606 3 1997-03-11 46 219959.08
+5606 7 1997-02-01 46 219959.08
+5829 5 1997-01-31 49 183734.56
+5895 2 1997-04-27 47 201419.83
+5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 Using index condition
+1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "rows": 81,
+ "filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
+ },
+ "rows": 509,
+ "selectivity_pct": 8.4763
+ },
+ "rows": 4,
+ "filtered": 8.4763,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 71.00 100.00 100.00 Using index condition
+1 SIMPLE lineitem ref|filter PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY|i_l_shipdate 4|4 dbt3_s001.orders.o_orderkey 4 (8%) 0.52 (7%) 8.48 100.00 Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "r_loops": 1,
+ "rows": 81,
+ "r_rows": 71,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_shipdate",
+ "used_key_parts": ["l_shipDATE"]
+ },
+ "rows": 509,
+ "selectivity_pct": 8.4763,
+ "r_rows": 510,
+ "r_selectivity_pct": 7.7731,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 0.5211,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 8.4763,
+ "r_filtered": 100,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+1890 1 1997-04-02 202364.58
+1890 3 1997-02-09 202364.58
+1890 4 1997-04-08 202364.58
+1890 5 1997-04-15 202364.58
+1890 6 1997-02-13 202364.58
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 4 1997-03-18 222274.54
+3619 6 1997-01-25 222274.54
+453 1 1997-06-30 216826.73
+453 2 1997-06-30 216826.73
+484 1 1997-03-06 219920.62
+484 2 1997-04-09 219920.62
+484 3 1997-01-24 219920.62
+484 4 1997-04-29 219920.62
+484 5 1997-03-05 219920.62
+484 6 1997-04-06 219920.62
+5606 2 1997-02-23 219959.08
+5606 3 1997-03-11 219959.08
+5606 4 1997-02-06 219959.08
+5606 6 1997-01-11 219959.08
+5606 7 1997-02-01 219959.08
+5859 2 1997-05-15 210643.96
+5859 5 1997-05-28 210643.96
+5859 6 1997-06-15 210643.96
+5895 1 1997-04-05 201419.83
+5895 2 1997-04-27 201419.83
+5895 3 1997-03-15 201419.83
+5895 4 1997-03-03 201419.83
+5895 5 1997-04-30 201419.83
+5895 6 1997-04-19 201419.83
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 Using index condition
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "rows": 81,
+ "filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 8.4763,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 81 71.00 100.00 100.00 Using index condition
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.70 8.48 7.77 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "r_loops": 1,
+ "rows": 81,
+ "r_rows": 71,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "index_condition": "orders.o_totalprice between 200000 and 230000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 6.7042,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 8.4763,
+ "r_filtered": 7.7731,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+1890 1 1997-04-02 202364.58
+1890 3 1997-02-09 202364.58
+1890 4 1997-04-08 202364.58
+1890 5 1997-04-15 202364.58
+1890 6 1997-02-13 202364.58
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 4 1997-03-18 222274.54
+3619 6 1997-01-25 222274.54
+453 1 1997-06-30 216826.73
+453 2 1997-06-30 216826.73
+484 1 1997-03-06 219920.62
+484 2 1997-04-09 219920.62
+484 3 1997-01-24 219920.62
+484 4 1997-04-29 219920.62
+484 5 1997-03-05 219920.62
+484 6 1997-04-06 219920.62
+5606 2 1997-02-23 219959.08
+5606 3 1997-03-11 219959.08
+5606 4 1997-02-06 219959.08
+5606 6 1997-01-11 219959.08
+5606 7 1997-02-01 219959.08
+5859 2 1997-05-15 210643.96
+5859 5 1997-05-28 210643.96
+5859 6 1997-06-15 210643.96
+5895 1 1997-04-05 201419.83
+5895 2 1997-04-27 201419.83
+5895 3 1997-03-15 201419.83
+5895 4 1997-03-03 201419.83
+5895 5 1997-04-30 201419.83
+5895 6 1997-04-19 201419.83
+#
+# MDEV-18413: find constraint correlated indexes
+#
+ALTER TABLE lineitem ADD CONSTRAINT l_date CHECK(l_shipdate < l_receiptdate);
+# Filter on l_shipdate is not used because it participates in
+# the same constraint as l_receiptdate.
+# Access is made on l_receiptdate.
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "rows": 18,
+ "filtered": 0.5662,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 8.7333,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 18.00 0.57 38.89 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 8.73 14.29 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "r_loops": 1,
+ "rows": 18,
+ "r_rows": 18,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 0.5662,
+ "r_filtered": 38.889,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 7,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 8.7333,
+ "r_filtered": 14.286,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+l_shipdate l_receiptdate o_totalprice
+1996-10-07 1996-10-08 202623.92
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "rows": 18,
+ "filtered": 0.5662,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 8.7333,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 18.00 0.57 38.89 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 8.73 14.29 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "r_loops": 1,
+ "rows": 18,
+ "r_rows": 18,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 0.5662,
+ "r_filtered": 38.889,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 7,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 8.7333,
+ "r_filtered": 14.286,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+l_shipdate l_receiptdate o_totalprice
+1996-10-07 1996-10-08 202623.92
+ALTER TABLE orders ADD COLUMN o_totaldiscount double;
+UPDATE orders SET o_totaldiscount = o_totalprice*(o_custkey/1000);
+CREATE INDEX i_o_totaldiscount on orders(o_totaldiscount);
+ALTER TABLE orders ADD CONSTRAINT o_price CHECK(o_totalprice > o_totaldiscount);
+# Filter on o_totalprice is not used because it participates in
+# the same constraint as o_discount.
+# Access is made on o_discount.
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 39,
+ "filtered": 3.2667,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.27 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 39,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.2667,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 39,
+ "filtered": 3.2667,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 3.27 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 39,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.2667,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+CREATE VIEW v1 AS
+SELECT * FROM orders
+WHERE o_orderdate BETWEEN '1992-12-01' AND '1997-01-01';
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 39,
+ "filtered": 1.9905,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 1.99 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 39,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 1.9905,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 39,
+ "filtered": 1.9905,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 39 41.00 1.99 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 39,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 1.9905,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+ALTER TABLE lineitem DROP CONSTRAINT l_date;
+ALTER TABLE orders DROP CONSTRAINT o_price;
+ALTER TABLE orders DROP COLUMN o_totaldiscount;
+DROP VIEW v1;
+DROP DATABASE dbt3_s001;
+use test;
+#
+# MDEV-18816: potential range filter for one join table with
+# impossible WHERE for another
+#
+create table t1 (
+pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
+) engine=myisam;
+insert into t1 values (1,'a',-5),(2,'a',null);
+create table t2 (
+pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
+) engine=myisam;
+insert into t2 values
+(1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
+(7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
+(13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
+select 1
+from t1
+left join
+t2 join t1 as t1_a on t2.i1 = t1_a.pk
+on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+1
+explain extended select 1
+from t1
+left join
+t2 join t1 as t1_a on t2.i1 = t1_a.pk
+on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 select 1 AS `1` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t1_a` where 0
+drop table t1,t2;
+#
+# MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
+# move depends on uninitialized value
+#
+CREATE TABLE t1 (
+pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
+SELECT * FROM t1 WHERE pk < 5;
+pk i
+1 10
+2 20
+DROP TABLE t1;
+#
+# MDEV-18956: Possible rowid filter for subquery for which
+# in_to_exists strategy has been chosen
+#
+CREATE TABLE t1 (pk int) engine=myisam ;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (
+pk int auto_increment PRIMARY KEY,
+i1 int, i2 int, c2 varchar(1),
+KEY (i1), KEY (i2)
+) engine=myisam;
+INSERT INTO t2 VALUES
+(1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
+(6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
+(11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+pk
+EXPLAIN EXTENDED
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0
+DROP TABLE t1,t2;
+#
+# MDEV-19255: rowid range filter built for range condition
+# that uses in expensive subquery
+#
+CREATE TABLE t1 (
+pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
+(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
+(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
+(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
+(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
+(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
+(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
+(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
+(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
+(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
+(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
+(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
+(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
+(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
+(107,8,'z'),(108,3,'k'),(109,65,NULL);
+CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,1,'x');
+INSERT INTO t2 SELECT * FROM t1;
+SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+pk1 a1 b1 pk2 a2 b2
+65 2 a 109 65 NULL
+EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where
+1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter
+2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2`
+EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 101,
+ "filtered": 100,
+ "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "b1"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["pk1"],
+ "ref": ["test.t2.a2"],
+ "rowid_filter": {
+ "range": {
+ "key": "b1",
+ "used_key_parts": ["b1"]
+ },
+ "rows": 87,
+ "selectivity_pct": 87
+ },
+ "rows": 1,
+ "filtered": 87,
+ "attached_condition": "t1.b1 <= (subquery#2)"
+ },
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t2",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["pk2"],
+ "rows": 1,
+ "filtered": 100,
+ "index_condition": "t2.pk2 <= 1"
+ }
+ }
+ }
+ ]
+ }
+}
+DROP TABLE t1,t2;
+set @@use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/main/rowid_filter.test b/mysql-test/main/rowid_filter.test
new file mode 100644
index 00000000000..6f26e81db92
--- /dev/null
+++ b/mysql-test/main/rowid_filter.test
@@ -0,0 +1,342 @@
+--disable_warnings
+DROP DATABASE IF EXISTS dbt3_s001;
+--enable_warnings
+
+CREATE DATABASE dbt3_s001;
+
+use dbt3_s001;
+
+--disable_query_log
+--disable_result_log
+--disable_warnings
+--source include/dbt3_s001.inc
+--enable_warnings
+--enable_result_log
+--enable_query_log
+
+CREATE INDEX i_l_quantity ON lineitem(l_quantity);
+
+CREATE INDEX i_o_totalprice ON orders(o_totalprice);
+
+set @save_use_stat_tables= @@use_stat_tables;
+
+set @@use_stat_tables=preferably;
+
+--disable_result_log
+--disable_warnings
+ANALYZE TABLE lineitem, orders;
+--enable_warnings
+--enable_result_log
+
+show create table lineitem;
+show create table orders;
+
+set optimizer_use_condition_selectivity=2;
+
+let $with_filter=
+set statement optimizer_switch='rowid_filter=on' for;
+
+let $without_filter=
+set statement optimizer_switch='rowid_filter=off' for;
+
+let $q1=
+SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+ l_quantity > 45;
+
+eval $with_filter EXPLAIN $q1;
+eval $with_filter EXPLAIN FORMAT=JSON $q1;
+eval $with_filter ANALYZE $q1;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q1;
+--sorted_result
+eval $with_filter $q1;
+
+eval $without_filter EXPLAIN $q1;
+eval $without_filter EXPLAIN FORMAT=JSON $q1;
+eval $without_filter ANALYZE $q1;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q1;
+--sorted_result
+eval $without_filter $q1;
+
+let $q2=
+SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+ o_totalprice between 200000 and 230000;
+
+eval $with_filter EXPLAIN $q2;
+eval $with_filter EXPLAIN FORMAT=JSON $q2;
+eval $with_filter ANALYZE $q2;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q2;
+--sorted_result
+eval $with_filter $q2;
+
+eval $without_filter EXPLAIN $q2;
+eval $without_filter EXPLAIN FORMAT=JSON $q2;
+eval $without_filter ANALYZE $q2;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q2;
+--sorted_result
+eval $without_filter $q2;
+
+let $q3=
+SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+ l_quantity > 45 AND
+ o_totalprice between 180000 and 230000;
+
+eval $with_filter EXPLAIN $q3;
+eval $with_filter EXPLAIN FORMAT=JSON $q3;
+eval $with_filter ANALYZE $q3;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q3;
+--sorted_result
+eval $with_filter $q3;
+
+eval $without_filter EXPLAIN $q3;
+eval $without_filter EXPLAIN FORMAT=JSON $q3;
+eval $without_filter ANALYZE $q3;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q3;
+--sorted_result
+eval $without_filter $q3;
+
+let $q4=
+SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+ FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+ WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+ o_totalprice between 200000 and 230000;
+
+eval $with_filter EXPLAIN $q4;
+eval $with_filter EXPLAIN FORMAT=JSON $q4;
+eval $with_filter ANALYZE $q4;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q4;
+--sorted_result
+eval $with_filter $q4;
+
+eval $without_filter EXPLAIN $q4;
+eval $without_filter EXPLAIN FORMAT=JSON $q4;
+eval $without_filter ANALYZE $q4;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q4;
+--sorted_result
+eval $without_filter $q4;
+
+--echo #
+--echo # MDEV-18413: find constraint correlated indexes
+--echo #
+
+ALTER TABLE lineitem ADD CONSTRAINT l_date CHECK(l_shipdate < l_receiptdate);
+
+--echo # Filter on l_shipdate is not used because it participates in
+--echo # the same constraint as l_receiptdate.
+--echo # Access is made on l_receiptdate.
+let $q5=
+SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+ l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+ l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+ o_totalprice BETWEEN 200000 AND 250000;
+
+eval $with_filter EXPLAIN $q5;
+eval $with_filter EXPLAIN FORMAT=JSON $q5;
+eval $with_filter ANALYZE $q5;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q5;
+--sorted_result
+eval $with_filter $q5;
+
+eval $without_filter EXPLAIN $q5;
+eval $without_filter EXPLAIN FORMAT=JSON $q5;
+eval $without_filter ANALYZE $q5;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q5;
+--sorted_result
+eval $without_filter $q5;
+
+ALTER TABLE orders ADD COLUMN o_totaldiscount double;
+UPDATE orders SET o_totaldiscount = o_totalprice*(o_custkey/1000);
+CREATE INDEX i_o_totaldiscount on orders(o_totaldiscount);
+
+ALTER TABLE orders ADD CONSTRAINT o_price CHECK(o_totalprice > o_totaldiscount);
+
+--echo # Filter on o_totalprice is not used because it participates in
+--echo # the same constraint as o_discount.
+--echo # Access is made on o_discount.
+let $q6=
+SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+ o_totaldiscount BETWEEN 18000 AND 20000 AND
+ o_totalprice BETWEEN 200000 AND 220000 AND
+ l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+
+eval $with_filter EXPLAIN $q6;
+eval $with_filter EXPLAIN FORMAT=JSON $q6;
+eval $with_filter ANALYZE $q6;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q6;
+--sorted_result
+eval $with_filter $q6;
+
+eval $without_filter EXPLAIN $q6;
+eval $without_filter EXPLAIN FORMAT=JSON $q6;
+eval $without_filter ANALYZE $q6;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q6;
+--sorted_result
+eval $without_filter $q6;
+
+CREATE VIEW v1 AS
+SELECT * FROM orders
+WHERE o_orderdate BETWEEN '1992-12-01' AND '1997-01-01';
+
+let $q7=
+SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+ o_totaldiscount BETWEEN 18000 AND 20000 AND
+ o_totalprice BETWEEN 200000 AND 220000 AND
+ l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+
+eval $with_filter EXPLAIN $q7;
+eval $with_filter EXPLAIN FORMAT=JSON $q7;
+eval $with_filter ANALYZE $q7;
+--source include/analyze-format.inc
+eval $with_filter ANALYZE FORMAT=JSON $q7;
+--sorted_result
+eval $with_filter $q7;
+
+eval $without_filter EXPLAIN $q7;
+eval $without_filter EXPLAIN FORMAT=JSON $q7;
+eval $without_filter ANALYZE $q7;
+--source include/analyze-format.inc
+eval $without_filter ANALYZE FORMAT=JSON $q7;
+--sorted_result
+eval $without_filter $q7;
+
+ALTER TABLE lineitem DROP CONSTRAINT l_date;
+ALTER TABLE orders DROP CONSTRAINT o_price;
+ALTER TABLE orders DROP COLUMN o_totaldiscount;
+DROP VIEW v1;
+
+DROP DATABASE dbt3_s001;
+
+use test;
+
+--echo #
+--echo # MDEV-18816: potential range filter for one join table with
+--echo # impossible WHERE for another
+--echo #
+
+create table t1 (
+ pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
+) engine=myisam;
+insert into t1 values (1,'a',-5),(2,'a',null);
+
+create table t2 (
+ pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
+) engine=myisam;
+insert into t2 values
+ (1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
+ (7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
+ (13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
+
+let $q=
+select 1
+ from t1
+ left join
+ t2 join t1 as t1_a on t2.i1 = t1_a.pk
+ on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+
+eval $q;
+eval explain extended $q;
+
+drop table t1,t2;
+
+--echo #
+--echo # MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
+--echo # move depends on uninitialized value
+--echo #
+
+CREATE TABLE t1 (
+ pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
+
+SELECT * FROM t1 WHERE pk < 5;
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18956: Possible rowid filter for subquery for which
+--echo # in_to_exists strategy has been chosen
+--echo #
+
+CREATE TABLE t1 (pk int) engine=myisam ;
+INSERT INTO t1 VALUES (1),(2);
+
+CREATE TABLE t2 (
+ pk int auto_increment PRIMARY KEY,
+ i1 int, i2 int, c2 varchar(1),
+ KEY (i1), KEY (i2)
+) engine=myisam;
+
+INSERT INTO t2 VALUES
+ (1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
+ (6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
+ (11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
+
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+EXPLAIN EXTENDED
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-19255: rowid range filter built for range condition
+--echo # that uses in expensive subquery
+--echo #
+
+CREATE TABLE t1 (
+ pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
+(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
+(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
+(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
+(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
+(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
+(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
+(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
+(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
+(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
+(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
+(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
+(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
+(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
+(107,8,'z'),(108,3,'k'),(109,65,NULL);
+
+CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,1,'x');
+INSERT INTO t2 SELECT * FROM t1;
+
+let $q=
+SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+ WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+
+eval $q;
+eval EXPLAIN EXTENDED $q;
+eval EXPLAIN FORMAT=JSON $q;
+
+DROP TABLE t1,t2;
+
+set @@use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/main/rowid_filter_innodb.result b/mysql-test/main/rowid_filter_innodb.result
new file mode 100644
index 00000000000..54c7e03f13a
--- /dev/null
+++ b/mysql-test/main/rowid_filter_innodb.result
@@ -0,0 +1,2165 @@
+SET SESSION STORAGE_ENGINE='InnoDB';
+DROP DATABASE IF EXISTS dbt3_s001;
+CREATE DATABASE dbt3_s001;
+use dbt3_s001;
+CREATE INDEX i_l_quantity ON lineitem(l_quantity);
+CREATE INDEX i_o_totalprice ON orders(o_totalprice);
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables=preferably;
+ANALYZE TABLE lineitem, orders;
+show create table lineitem;
+Table Create Table
+lineitem CREATE TABLE `lineitem` (
+ `l_orderkey` int(11) NOT NULL DEFAULT 0,
+ `l_partkey` int(11) DEFAULT NULL,
+ `l_suppkey` int(11) DEFAULT NULL,
+ `l_linenumber` int(11) NOT NULL DEFAULT 0,
+ `l_quantity` double DEFAULT NULL,
+ `l_extendedprice` double DEFAULT NULL,
+ `l_discount` double DEFAULT NULL,
+ `l_tax` double DEFAULT NULL,
+ `l_returnflag` char(1) DEFAULT NULL,
+ `l_linestatus` char(1) DEFAULT NULL,
+ `l_shipDATE` date DEFAULT NULL,
+ `l_commitDATE` date DEFAULT NULL,
+ `l_receiptDATE` date DEFAULT NULL,
+ `l_shipinstruct` char(25) DEFAULT NULL,
+ `l_shipmode` char(10) DEFAULT NULL,
+ `l_comment` varchar(44) DEFAULT NULL,
+ PRIMARY KEY (`l_orderkey`,`l_linenumber`),
+ KEY `i_l_shipdate` (`l_shipDATE`),
+ KEY `i_l_suppkey_partkey` (`l_partkey`,`l_suppkey`),
+ KEY `i_l_partkey` (`l_partkey`),
+ KEY `i_l_suppkey` (`l_suppkey`),
+ KEY `i_l_receiptdate` (`l_receiptDATE`),
+ KEY `i_l_orderkey` (`l_orderkey`),
+ KEY `i_l_orderkey_quantity` (`l_orderkey`,`l_quantity`),
+ KEY `i_l_commitdate` (`l_commitDATE`),
+ KEY `i_l_quantity` (`l_quantity`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+show create table orders;
+Table Create Table
+orders CREATE TABLE `orders` (
+ `o_orderkey` int(11) NOT NULL,
+ `o_custkey` int(11) DEFAULT NULL,
+ `o_orderstatus` char(1) DEFAULT NULL,
+ `o_totalprice` double DEFAULT NULL,
+ `o_orderDATE` date DEFAULT NULL,
+ `o_orderpriority` char(15) DEFAULT NULL,
+ `o_clerk` char(15) DEFAULT NULL,
+ `o_shippriority` int(11) DEFAULT NULL,
+ `o_comment` varchar(79) DEFAULT NULL,
+ PRIMARY KEY (`o_orderkey`),
+ KEY `i_o_orderdate` (`o_orderDATE`),
+ KEY `i_o_custkey` (`o_custkey`),
+ KEY `i_o_totalprice` (`o_totalprice`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+set optimizer_use_condition_selectivity=2;
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 510 (10%) Using index condition; Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 605,
+ "selectivity_pct": 10.075
+ },
+ "rows": 510,
+ "filtered": 10.075,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range|filter i_l_shipdate,i_l_quantity i_l_shipdate|i_l_quantity 4|9 NULL 510 (10%) 60.00 (11%) 10.07 100.00 Using index condition; Using where; Using rowid filter
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rowid_filter": {
+ "range": {
+ "key": "i_l_quantity",
+ "used_key_parts": ["l_quantity"]
+ },
+ "rows": 605,
+ "selectivity_pct": 10.075,
+ "r_rows": 605,
+ "r_selectivity_pct": 11.765,
+ "r_buffer_size": "REPLACED",
+ "r_filling_time_ms": "REPLACED"
+ },
+ "r_loops": 1,
+ "rows": 510,
+ "r_rows": 60,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 10.075,
+ "r_filtered": 100,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+l_orderkey l_linenumber l_shipdate l_quantity
+1121 5 1997-04-27 47
+1121 6 1997-04-21 50
+1441 7 1997-06-07 50
+1443 1 1997-02-05 47
+1473 1 1997-05-05 50
+1568 2 1997-04-06 46
+1632 1 1997-01-25 47
+1632 3 1997-01-29 47
+1954 7 1997-06-04 49
+1959 1 1997-05-05 46
+2151 3 1997-01-20 49
+2177 5 1997-05-10 46
+2369 2 1997-01-02 47
+2469 3 1997-01-11 48
+2469 6 1997-03-03 49
+2470 2 1997-06-02 50
+260 1 1997-03-24 50
+288 2 1997-04-19 49
+289 4 1997-03-14 48
+3009 1 1997-03-19 48
+3105 3 1997-02-28 48
+3106 2 1997-02-27 49
+3429 1 1997-04-08 48
+3490 2 1997-06-27 50
+3619 1 1997-01-22 49
+3619 3 1997-01-31 46
+3969 3 1997-05-29 46
+4005 4 1997-01-31 49
+4036 1 1997-06-21 46
+4066 4 1997-02-17 49
+4098 1 1997-01-26 46
+422 3 1997-06-21 46
+4258 3 1997-01-02 46
+4421 2 1997-04-21 46
+4421 3 1997-05-25 46
+4453 3 1997-05-29 48
+4484 7 1997-03-17 50
+4609 3 1997-02-11 46
+484 1 1997-03-06 49
+484 3 1997-01-24 50
+484 5 1997-03-05 48
+485 1 1997-03-28 50
+4868 1 1997-04-29 47
+4868 3 1997-04-23 49
+4934 1 1997-05-20 48
+4967 1 1997-05-27 50
+5090 2 1997-04-05 46
+5152 2 1997-03-10 50
+5158 4 1997-04-10 49
+5606 3 1997-03-11 46
+5606 7 1997-02-01 46
+5762 4 1997-03-02 47
+581 3 1997-02-27 49
+5829 5 1997-01-31 49
+5831 4 1997-02-24 46
+5895 2 1997-04-27 47
+5895 3 1997-03-15 49
+5952 1 1997-06-30 49
+705 1 1997-04-18 46
+836 3 1997-03-21 46
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 510 Using index condition; Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 510,
+ "filtered": 10.075,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range i_l_shipdate,i_l_quantity i_l_shipdate 4 NULL 510 510.00 10.07 11.76 Using index condition; Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": ["i_l_shipdate", "i_l_quantity"],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 510,
+ "r_rows": 510,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 10.075,
+ "r_filtered": 11.765,
+ "index_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'",
+ "attached_condition": "lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT l_orderkey, l_linenumber, l_shipdate, l_quantity FROM lineitem
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45;
+l_orderkey l_linenumber l_shipdate l_quantity
+1121 5 1997-04-27 47
+1121 6 1997-04-21 50
+1441 7 1997-06-07 50
+1443 1 1997-02-05 47
+1473 1 1997-05-05 50
+1568 2 1997-04-06 46
+1632 1 1997-01-25 47
+1632 3 1997-01-29 47
+1954 7 1997-06-04 49
+1959 1 1997-05-05 46
+2151 3 1997-01-20 49
+2177 5 1997-05-10 46
+2369 2 1997-01-02 47
+2469 3 1997-01-11 48
+2469 6 1997-03-03 49
+2470 2 1997-06-02 50
+260 1 1997-03-24 50
+288 2 1997-04-19 49
+289 4 1997-03-14 48
+3009 1 1997-03-19 48
+3105 3 1997-02-28 48
+3106 2 1997-02-27 49
+3429 1 1997-04-08 48
+3490 2 1997-06-27 50
+3619 1 1997-01-22 49
+3619 3 1997-01-31 46
+3969 3 1997-05-29 46
+4005 4 1997-01-31 49
+4036 1 1997-06-21 46
+4066 4 1997-02-17 49
+4098 1 1997-01-26 46
+422 3 1997-06-21 46
+4258 3 1997-01-02 46
+4421 2 1997-04-21 46
+4421 3 1997-05-25 46
+4453 3 1997-05-29 48
+4484 7 1997-03-17 50
+4609 3 1997-02-11 46
+484 1 1997-03-06 49
+484 3 1997-01-24 50
+484 5 1997-03-05 48
+485 1 1997-03-28 50
+4868 1 1997-04-29 47
+4868 3 1997-04-23 49
+4934 1 1997-05-20 48
+4967 1 1997-05-27 50
+5090 2 1997-04-05 46
+5152 2 1997-03-10 50
+5158 4 1997-04-10 49
+5606 3 1997-03-11 46
+5606 7 1997-02-01 46
+5762 4 1997-03-02 47
+581 3 1997-02-27 49
+5829 5 1997-01-31 49
+5831 4 1997-02-24 46
+5895 2 1997-04-27 47
+5895 3 1997-03-15 49
+5952 1 1997-06-30 49
+705 1 1997-04-18 46
+836 3 1997-03-21 46
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using where; Using index
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 98,
+ "filtered": 100,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 4.7333,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using where; Using index
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 4.73 11.22 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 98,
+ "r_rows": 98,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 98,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 4.7333,
+ "r_filtered": 11.224,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 6 1997-01-25 222274.54
+484 3 1997-01-24 219920.62
+5606 6 1997-01-11 219959.08
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 Using where; Using index
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "rows": 98,
+ "filtered": 100,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 4.7333,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity i_l_shipdate 4 NULL 98 98.00 100.00 100.00 Using where; Using index
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 4.73 11.22 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_shipdate",
+ "key_length": "4",
+ "used_key_parts": ["l_shipDATE"],
+ "r_loops": 1,
+ "rows": 98,
+ "r_rows": 98,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-01-31'",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 98,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 4.7333,
+ "r_filtered": 11.224,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-01-31' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 6 1997-01-25 222274.54
+484 3 1997-01-24 219920.62
+5606 6 1997-01-11 219959.08
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 144 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "rows": 144,
+ "filtered": 100,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 0.8557,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 144 144.00 100.00 100.00 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.62 0.86 1.68 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "r_loops": 1,
+ "rows": 144,
+ "r_rows": 144,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 144,
+ "rows": 4,
+ "r_rows": 6.625,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 0.8557,
+ "r_filtered": 1.6771,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
+1632 1 1997-01-25 47 183286.33
+1632 3 1997-01-29 47 183286.33
+2177 5 1997-05-10 46 183493.42
+2469 3 1997-01-11 48 192074.23
+2469 6 1997-03-03 49 192074.23
+3619 1 1997-01-22 49 222274.54
+3619 3 1997-01-31 46 222274.54
+484 1 1997-03-06 49 219920.62
+484 3 1997-01-24 50 219920.62
+484 5 1997-03-05 48 219920.62
+4934 1 1997-05-20 48 180478.16
+5606 3 1997-03-11 46 219959.08
+5606 7 1997-02-01 46 219959.08
+5829 5 1997-01-31 49 183734.56
+5895 2 1997-04-27 47 201419.83
+5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 144 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "rows": 144,
+ "filtered": 100,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 0.8557,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 144 144.00 100.00 100.00 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity,i_l_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.62 0.86 1.68 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "r_loops": 1,
+ "rows": 144,
+ "r_rows": 144,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 180000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity",
+ "i_l_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 144,
+ "rows": 4,
+ "r_rows": 6.625,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 0.8557,
+ "r_filtered": 1.6771,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30' and lineitem.l_quantity > 45"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, l_quantity, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+l_quantity > 45 AND
+o_totalprice between 180000 and 230000;
+o_orderkey l_linenumber l_shipdate l_quantity o_totalprice
+1632 1 1997-01-25 47 183286.33
+1632 3 1997-01-29 47 183286.33
+2177 5 1997-05-10 46 183493.42
+2469 3 1997-01-11 48 192074.23
+2469 6 1997-03-03 49 192074.23
+3619 1 1997-01-22 49 222274.54
+3619 3 1997-01-31 46 222274.54
+484 1 1997-03-06 49 219920.62
+484 3 1997-01-24 50 219920.62
+484 5 1997-03-05 48 219920.62
+4934 1 1997-05-20 48 180478.16
+5606 3 1997-03-11 46 219959.08
+5606 7 1997-02-01 46 219959.08
+5829 5 1997-01-31 49 183734.56
+5895 2 1997-04-27 47 201419.83
+5895 3 1997-03-15 49 201419.83
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 71 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "rows": 71,
+ "filtered": 100,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 8.4929,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 71 71.00 100.00 100.00 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.70 8.49 7.77 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "r_loops": 1,
+ "rows": 71,
+ "r_rows": 71,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 6.7042,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 8.4929,
+ "r_filtered": 7.7731,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+1890 1 1997-04-02 202364.58
+1890 3 1997-02-09 202364.58
+1890 4 1997-04-08 202364.58
+1890 5 1997-04-15 202364.58
+1890 6 1997-02-13 202364.58
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 4 1997-03-18 222274.54
+3619 6 1997-01-25 222274.54
+453 1 1997-06-30 216826.73
+453 2 1997-06-30 216826.73
+484 1 1997-03-06 219920.62
+484 2 1997-04-09 219920.62
+484 3 1997-01-24 219920.62
+484 4 1997-04-29 219920.62
+484 5 1997-03-05 219920.62
+484 6 1997-04-06 219920.62
+5606 2 1997-02-23 219959.08
+5606 3 1997-03-11 219959.08
+5606 4 1997-02-06 219959.08
+5606 6 1997-01-11 219959.08
+5606 7 1997-02-01 219959.08
+5859 2 1997-05-15 210643.96
+5859 5 1997-05-28 210643.96
+5859 6 1997-06-15 210643.96
+5895 1 1997-04-05 201419.83
+5895 2 1997-04-27 201419.83
+5895 3 1997-03-15 201419.83
+5895 4 1997-03-03 201419.83
+5895 5 1997-04-30 201419.83
+5895 6 1997-04-19 201419.83
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 71 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "rows": 71,
+ "filtered": 100,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 8.4929,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice i_o_totalprice 9 NULL 71 71.00 100.00 100.00 Using where; Using index
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.70 8.49 7.77 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "i_o_totalprice",
+ "key_length": "9",
+ "used_key_parts": ["o_totalprice"],
+ "r_loops": 1,
+ "rows": 71,
+ "r_rows": 71,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "attached_condition": "orders.o_totalprice between 200000 and 230000",
+ "using_index": true
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 71,
+ "rows": 4,
+ "r_rows": 6.7042,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 8.4929,
+ "r_filtered": 7.7731,
+ "attached_condition": "lineitem.l_shipDATE between '1997-01-01' and '1997-06-30'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_orderkey, l_linenumber, l_shipdate, o_totalprice
+FROM orders JOIN lineitem ON o_orderkey=l_orderkey
+WHERE l_shipdate BETWEEN '1997-01-01' AND '1997-06-30' AND
+o_totalprice between 200000 and 230000;
+o_orderkey l_linenumber l_shipdate o_totalprice
+1156 3 1997-01-24 217682.81
+1156 4 1997-01-18 217682.81
+1156 6 1997-01-27 217682.81
+1156 7 1997-01-01 217682.81
+1890 1 1997-04-02 202364.58
+1890 3 1997-02-09 202364.58
+1890 4 1997-04-08 202364.58
+1890 5 1997-04-15 202364.58
+1890 6 1997-02-13 202364.58
+2180 2 1997-01-03 208481.57
+2180 3 1997-01-03 208481.57
+3619 1 1997-01-22 222274.54
+3619 3 1997-01-31 222274.54
+3619 4 1997-03-18 222274.54
+3619 6 1997-01-25 222274.54
+453 1 1997-06-30 216826.73
+453 2 1997-06-30 216826.73
+484 1 1997-03-06 219920.62
+484 2 1997-04-09 219920.62
+484 3 1997-01-24 219920.62
+484 4 1997-04-29 219920.62
+484 5 1997-03-05 219920.62
+484 6 1997-04-06 219920.62
+5606 2 1997-02-23 219959.08
+5606 3 1997-03-11 219959.08
+5606 4 1997-02-06 219959.08
+5606 6 1997-01-11 219959.08
+5606 7 1997-02-01 219959.08
+5859 2 1997-05-15 210643.96
+5859 5 1997-05-28 210643.96
+5859 6 1997-06-15 210643.96
+5895 1 1997-04-05 201419.83
+5895 2 1997-04-27 201419.83
+5895 3 1997-03-15 201419.83
+5895 4 1997-03-03 201419.83
+5895 5 1997-04-30 201419.83
+5895 6 1997-04-19 201419.83
+#
+# MDEV-18413: find constraint correlated indexes
+#
+ALTER TABLE lineitem ADD CONSTRAINT l_date CHECK(l_shipdate < l_receiptdate);
+# Filter on l_shipdate is not used because it participates in
+# the same constraint as l_receiptdate.
+# Access is made on l_receiptdate.
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "rows": 18,
+ "filtered": 0.5662,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 5.6667,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 18.00 0.57 38.89 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 5.67 14.29 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "r_loops": 1,
+ "rows": 18,
+ "r_rows": 18,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 0.5662,
+ "r_filtered": 38.889,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 7,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 5.6667,
+ "r_filtered": 14.286,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+l_shipdate l_receiptdate o_totalprice
+1996-10-07 1996-10-08 202623.92
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "rows": 18,
+ "filtered": 0.5662,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "rows": 1,
+ "filtered": 5.6667,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE lineitem range PRIMARY,i_l_shipdate,i_l_receiptdate,i_l_orderkey,i_l_orderkey_quantity i_l_receiptdate 4 NULL 18 18.00 0.57 38.89 Using index condition; Using where
+1 SIMPLE orders eq_ref PRIMARY,i_o_totalprice PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 1.00 5.67 14.29 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_receiptdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "i_l_receiptdate",
+ "key_length": "4",
+ "used_key_parts": ["l_receiptDATE"],
+ "r_loops": 1,
+ "rows": 18,
+ "r_rows": 18,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 0.5662,
+ "r_filtered": 38.889,
+ "index_condition": "lineitem.l_receiptDATE between '1996-10-05' and '1996-10-10'",
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-10-10'"
+ },
+ "table": {
+ "table_name": "orders",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "i_o_totalprice"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["o_orderkey"],
+ "ref": ["dbt3_s001.lineitem.l_orderkey"],
+ "r_loops": 7,
+ "rows": 1,
+ "r_rows": 1,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 5.6667,
+ "r_filtered": 14.286,
+ "attached_condition": "orders.o_totalprice between 200000 and 250000"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT l_shipdate, l_receiptdate, o_totalprice
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-10-10' AND
+l_receiptdate BETWEEN '1996-10-05' AND '1996-10-10' AND
+o_totalprice BETWEEN 200000 AND 250000;
+l_shipdate l_receiptdate o_totalprice
+1996-10-07 1996-10-08 202623.92
+ALTER TABLE orders ADD COLUMN o_totaldiscount double;
+UPDATE orders SET o_totaldiscount = o_totalprice*(o_custkey/1000);
+CREATE INDEX i_o_totaldiscount on orders(o_totaldiscount);
+ALTER TABLE orders ADD CONSTRAINT o_price CHECK(o_totalprice > o_totaldiscount);
+# Filter on o_totalprice is not used because it participates in
+# the same constraint as o_discount.
+# Access is made on o_discount.
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 41,
+ "filtered": 3.3333,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 41.00 3.33 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 41,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.3333,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 41,
+ "filtered": 3.3333,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 41.00 3.33 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY", "i_o_totalprice", "i_o_totaldiscount"],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 41,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.3333,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM orders, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+CREATE VIEW v1 AS
+SELECT * FROM orders
+WHERE o_orderdate BETWEEN '1992-12-01' AND '1997-01-01';
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=on' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 41,
+ "filtered": 2.0711,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 41.00 2.07 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=on' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 41,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 2.0711,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=on' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
+set statement optimizer_switch='rowid_filter=off' for EXPLAIN FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "rows": 41,
+ "filtered": 2.0711,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "rows": 4,
+ "filtered": 3.0475,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for ANALYZE SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_totalprice,i_o_totaldiscount i_o_totaldiscount 9 NULL 41 41.00 2.07 2.44 Using index condition; Using where
+1 SIMPLE lineitem ref PRIMARY,i_l_shipdate,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 6.00 3.05 66.67 Using where
+set statement optimizer_switch='rowid_filter=off' for ANALYZE FORMAT=JSON SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "orders",
+ "access_type": "range",
+ "possible_keys": [
+ "PRIMARY",
+ "i_o_orderdate",
+ "i_o_totalprice",
+ "i_o_totaldiscount"
+ ],
+ "key": "i_o_totaldiscount",
+ "key_length": "9",
+ "used_key_parts": ["o_totaldiscount"],
+ "r_loops": 1,
+ "rows": 41,
+ "r_rows": 41,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 2.0711,
+ "r_filtered": 2.439,
+ "index_condition": "orders.o_totaldiscount between 18000 and 20000",
+ "attached_condition": "orders.o_totalprice between 200000 and 220000 and orders.o_orderDATE between '1992-12-01' and '1997-01-01'"
+ },
+ "table": {
+ "table_name": "lineitem",
+ "access_type": "ref",
+ "possible_keys": [
+ "PRIMARY",
+ "i_l_shipdate",
+ "i_l_orderkey",
+ "i_l_orderkey_quantity"
+ ],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["l_orderkey"],
+ "ref": ["dbt3_s001.orders.o_orderkey"],
+ "r_loops": 1,
+ "rows": 4,
+ "r_rows": 6,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 3.0475,
+ "r_filtered": 66.667,
+ "attached_condition": "lineitem.l_shipDATE between '1996-10-01' and '1996-12-01'"
+ }
+ }
+}
+set statement optimizer_switch='rowid_filter=off' for SELECT o_totaldiscount, o_totalprice, l_shipdate
+FROM v1, lineitem
+WHERE o_orderkey=l_orderkey AND
+o_totaldiscount BETWEEN 18000 AND 20000 AND
+o_totalprice BETWEEN 200000 AND 220000 AND
+l_shipdate BETWEEN '1996-10-01' AND '1996-12-01';
+o_totaldiscount o_totalprice l_shipdate
+18016.04288 219707.84 1996-10-02
+18016.04288 219707.84 1996-10-17
+18016.04288 219707.84 1996-11-04
+18016.04288 219707.84 1996-11-14
+ALTER TABLE lineitem DROP CONSTRAINT l_date;
+ALTER TABLE orders DROP CONSTRAINT o_price;
+ALTER TABLE orders DROP COLUMN o_totaldiscount;
+DROP VIEW v1;
+DROP DATABASE dbt3_s001;
+use test;
+#
+# MDEV-18816: potential range filter for one join table with
+# impossible WHERE for another
+#
+create table t1 (
+pk int not null primary key, c2 varchar(10) , i1 int,key (c2)
+) engine=myisam;
+insert into t1 values (1,'a',-5),(2,'a',null);
+create table t2 (
+pk int, i1 int, c1 varchar(30) , key c1 (c1(30)), key i1 (i1)
+) engine=myisam;
+insert into t2 values
+(1,-5,'a'),(2,null,'a'),(3,null,'a'),(4,null,'a'),(5,5,'a'),(6,null,'a'),
+(7,4,'a'),(8,55,'a'),(9,null,'a'),(10,null,'a'),(11,null,'a'),(12,-5,'a'),
+(13,-5,'a'),(14,null,'a'),(15,null,'a'),(16,-5,'a'),(17,-5,'a');
+select 1
+from t1
+left join
+t2 join t1 as t1_a on t2.i1 = t1_a.pk
+on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+1
+explain extended select 1
+from t1
+left join
+t2 join t1 as t1_a on t2.i1 = t1_a.pk
+on t1.c2 = t2.c1
+where t1_a.pk is null and t1_a.i1 != 3;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+Warnings:
+Note 1003 select 1 AS `1` from `test`.`t1` join `test`.`t2` join `test`.`t1` `t1_a` where 0
+drop table t1,t2;
+#
+# MDEV-18640: TABLE::prune_range_rowid_filters: Conditional jump or
+# move depends on uninitialized value
+#
+CREATE TABLE t1 (
+pk INT, i INT, PRIMARY KEY (pk), KEY (pk,i)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,10), (7,70), (2,20);
+SELECT * FROM t1 WHERE pk < 5;
+pk i
+1 10
+2 20
+DROP TABLE t1;
+#
+# MDEV-18956: Possible rowid filter for subquery for which
+# in_to_exists strategy has been chosen
+#
+CREATE TABLE t1 (pk int) engine=myisam ;
+INSERT INTO t1 VALUES (1),(2);
+CREATE TABLE t2 (
+pk int auto_increment PRIMARY KEY,
+i1 int, i2 int, c2 varchar(1),
+KEY (i1), KEY (i2)
+) engine=myisam;
+INSERT INTO t2 VALUES
+(1,8,6,'t'),(2,5,7,'i'),(3,4,4,'h'),(4,207,38,'d'),(5,183,206,'b'),
+(6,7,null,'o'),(7,1,2,'j'),(8,17,36,'s'),(9,4,5,'q'),(10,0,6,'l'),
+(11,1,9,'j'),(12,5,6,'y'),(13,null,0,'i'),(14,7,7,'x'),(15,5,2,'u');
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+pk
+EXPLAIN EXTENDED
+SELECT * FROM t1 HAVING (7, 9) IN (SELECT t2.i1, t2.i2 FROM t2 WHERE t2.i1 = 3);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING
+2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL NULL no matching row in const table
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`pk` AS `pk` from `test`.`t1` having 0
+DROP TABLE t1,t2;
+#
+# MDEV-19255: rowid range filter built for range condition
+# that uses in expensive subquery
+#
+CREATE TABLE t1 (
+pk1 INT PRIMARY KEY, a1 INT, b1 VARCHAR(1), KEY(b1)
+) ENGINE=MyISAM;
+INSERT INTO t1 VALUES
+(10,0,'z'),(11,3,'j'),(12,8,'f'),(13,8,'p'),(14,6,'w'),(15,0,'c'),(16,1,'j'),
+(17,1,'f'),(18,5,'v'),(19,3,'f'),(20,2,'q'),(21,8,'y'),(22,0,'a'),(23,9,'w'),
+(24,3,'e'),(25,1,'b'),(26,9,'r'),(27,2,'k'),(28,5,'c'),(29,3,'k'),(30,9,'b'),
+(31,8,'j'),(32,1,'t'),(33,8,'n'),(34,3,'z'),(35,0,'u'),(36,3,'a'),(37,3,'g'),
+(38,1,'f'),(39,6,'p'),(40,6,'m'),(41,6,'t'),(42,7,'i'),(43,4,'h'),(44,3,'d'),
+(45,2,'b'),(46,1,'o'),(47,2,'j'),(48,6,'s'),(49,5,'q'),(50,6,'l'),(51,9,'j'),
+(52,6,'y'),(53,0,'i'),(54,7,'x'),(55,2,'u'),(56,6,'t'),(57,4,'b'),(58,5,'m'),
+(59,4,'x'),(60,8,'x'),(61,6,'v'),(62,8,'m'),(63,4,'j'),(64,8,'z'),(65,2,'a'),
+(66,9,'i'),(67,4,'g'),(68,8,'h'),(69,1,'p'),(70,8,'a'),(71,0,'x'),(72,2,'s'),
+(73,6,'k'),(74,0,'m'),(75,6,'e'),(76,9,'y'),(77,7,'d'),(78,7,'w'),(79,6,'y'),
+(80,9,'s'),(81,9,'x'),(82,6,'l'),(83,9,'f'),(84,8,'x'),(85,1,'p'),(86,7,'y'),
+(87,6,'p'),(88,1,'g'),(89,3,'c'),(90,5,'h'),(91,3,'p'),(92,2,'b'),(93,1,NULL),
+(94,3,NULL),(95,2,'y'),(96,7,'s'),(97,7,'x'),(98,6,'i'),(99,9,'t'),(100,5,'j'),
+(101,0,'u'),(102,7,'r'),(103,2,'x'),(104,8,'e'),(105,8,'i'),(106,5,'q'),
+(107,8,'z'),(108,3,'k'),(109,65,NULL);
+CREATE TABLE t2 (pk2 INT PRIMARY KEY, a2 INT, b2 VARCHAR(1)) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1,1,'x');
+INSERT INTO t2 SELECT * FROM t1;
+SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+pk1 a1 b1 pk2 a2 b2
+65 2 a 109 65 NULL
+EXPLAIN EXTENDED SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 101 100.00 Using where
+1 PRIMARY t1 eq_ref|filter PRIMARY,b1 PRIMARY|b1 4|4 test.t2.a2 1 (87%) 87.00 Using where; Using rowid filter
+2 SUBQUERY t2 range PRIMARY PRIMARY 4 NULL 1 100.00 Using index condition
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`pk1` AS `pk1`,`test`.`t1`.`a1` AS `a1`,`test`.`t1`.`b1` AS `b1`,`test`.`t2`.`pk2` AS `pk2`,`test`.`t2`.`a2` AS `a2`,`test`.`t2`.`b2` AS `b2` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`pk1` = `test`.`t2`.`a2` and `test`.`t1`.`b1` <= (/* select#2 */ select max(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`pk2` <= 1) and `test`.`t2`.`a2` <> `test`.`t2`.`pk2`
+EXPLAIN FORMAT=JSON SELECT * FROM t1 INNER JOIN t2 ON ( pk1 <> pk2 AND pk1 = a2 )
+WHERE b1 <= ( SELECT MAX(b2) FROM t2 WHERE pk2 <= 1 );
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "rows": 101,
+ "filtered": 100,
+ "attached_condition": "t2.a2 <> t2.pk2 and t2.a2 is not null"
+ },
+ "table": {
+ "table_name": "t1",
+ "access_type": "eq_ref",
+ "possible_keys": ["PRIMARY", "b1"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["pk1"],
+ "ref": ["test.t2.a2"],
+ "rowid_filter": {
+ "range": {
+ "key": "b1",
+ "used_key_parts": ["b1"]
+ },
+ "rows": 87,
+ "selectivity_pct": 87
+ },
+ "rows": 1,
+ "filtered": 87,
+ "attached_condition": "t1.b1 <= (subquery#2)"
+ },
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "table_name": "t2",
+ "access_type": "range",
+ "possible_keys": ["PRIMARY"],
+ "key": "PRIMARY",
+ "key_length": "4",
+ "used_key_parts": ["pk2"],
+ "rows": 1,
+ "filtered": 100,
+ "index_condition": "t2.pk2 <= 1"
+ }
+ }
+ }
+ ]
+ }
+}
+DROP TABLE t1,t2;
+set @@use_stat_tables=@save_use_stat_tables;
+#
+# MDEV-18755: possible RORI-plan and possible plan with range filter
+#
+create table t1 (
+pk int not null primary key, f1 varchar(10), f2 varchar(30), a int(10),
+key (f1), key (f2)
+) engine=innodb;
+insert into t1 values
+(2,'a','a',2),(3,'a','a',null),(4,'a','a',55),(5,'a','a',4),(6,'a','a',0),
+(7,'a','a',1),(8,'a','a',4),(9,'a','a',null),(10,'a','a',0),(11,'a','a',0),
+(12,'a','a',null),(13,'a','a',49778),(14,'a','a',6),(15,'a','a',3),
+(16,'a','a',233),(17,'a','a',-1),(18,'a','a',5),(19,'a','a',-1),
+(20,'a','a',null),(21,'a','a',0),(22,'a','a',null),(23,'a','a',53840),
+(24,'a','a',null),(25,'a','a',null),(26,'a','a',5),(27,'a','a',43454),
+(28,'a','a',0),(29,'a','a',0),(30,'a','a',null),(59,'a','a',null),
+(60,'a','a',null),(61,'a','a',-1),(62,'a','a',null),(63,'a','a',0),
+(64,'a','a',14468),(65,'a','a',0),(66,'a','a',28),(67,'a','a',null),
+(68,'a','a',14983),(69,'a','a',null),(70,'a','a',3),(71,'a','a',null),
+(72,'a','a',null),(73,'a','a',237),(74,'a','a',2),(75,'a','a',0),
+(76,'a','a',6),(77,'a','a',5),(78,'a','a',0),(79,'a','a',1),(80,'a','a',-1),
+(81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null),
+(85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160),
+(89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null);
+( select * from t1
+where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')))
+union
+( select * from t1
+where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')));
+pk f1 f2 a
+explain ( select * from t1
+where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')))
+union
+( select * from t1
+where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')));
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t1 index_merge f1,f2 f1,f2 13,33 NULL 1 Using intersect(f1,f2); Using where
+2 UNION t1 index_merge f1,f2 f1,f2 13,33 NULL 1 Using intersect(f1,f2); Using where
+NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
+explain format=json ( select * from t1
+where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')))
+union
+( select * from t1
+where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')));
+EXPLAIN
+{
+ "query_block": {
+ "union_result": {
+ "table_name": "<union1,2>",
+ "access_type": "ALL",
+ "query_specifications": [
+ {
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t1",
+ "access_type": "index_merge",
+ "possible_keys": ["f1", "f2"],
+ "key_length": "13,33",
+ "index_merge": {
+ "intersect": {
+ "range": {
+ "key": "f1",
+ "used_key_parts": ["f1"]
+ },
+ "range": {
+ "key": "f2",
+ "used_key_parts": ["f2"]
+ }
+ }
+ },
+ "rows": 1,
+ "filtered": 1.5873,
+ "attached_condition": "t1.f1 is null and t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')"
+ }
+ }
+ },
+ {
+ "query_block": {
+ "select_id": 2,
+ "operation": "UNION",
+ "table": {
+ "table_name": "t1",
+ "access_type": "index_merge",
+ "possible_keys": ["f1", "f2"],
+ "key_length": "13,33",
+ "index_merge": {
+ "intersect": {
+ "range": {
+ "key": "f1",
+ "used_key_parts": ["f1"]
+ },
+ "range": {
+ "key": "f2",
+ "used_key_parts": ["f2"]
+ }
+ }
+ },
+ "rows": 1,
+ "filtered": 1.5873,
+ "attached_condition": "t1.f1 is null and t1.f2 is null and (t1.f2 between 'a' and 'z' or t1.f1 = 'a')"
+ }
+ }
+ }
+ ]
+ }
+ }
+}
+drop table t1;
+#
+# MDEV-19195: possible RORI-plan and possible plan with range filter
+# for not first joined table
+#
+create table t1 (id int not null primary key) engine=innodb;
+insert into t1 values (2),(1);
+create table t2 (y int,x int,index (x),index (y)) engine=innodb;
+insert into t2 values
+(4,1),(4,777),(2,1),(2,888),(111,1),(222,1),(333,345),(444,1),
+(555,555),(666,1);
+select * from t1 join t2 on t1.id = t2.x where t2.y = 2 and t1.id = 1;
+id y x
+1 2 1
+explain extended select * from t1 join t2 on t1.id = t2.x where t2.y = 2 and t1.id = 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 100.00 Using index
+1 SIMPLE t2 index_merge x,y y,x 5,5 NULL 1 100.00 Using intersect(y,x); Using where; Using index
+Warnings:
+Note 1003 select 1 AS `id`,`test`.`t2`.`y` AS `y`,`test`.`t2`.`x` AS `x` from `test`.`t1` join `test`.`t2` where `test`.`t2`.`y` = 2 and `test`.`t2`.`x` = 1
+drop table t1, t2;
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/rowid_filter_innodb.test b/mysql-test/main/rowid_filter_innodb.test
new file mode 100644
index 00000000000..173ba15f10d
--- /dev/null
+++ b/mysql-test/main/rowid_filter_innodb.test
@@ -0,0 +1,68 @@
+--source include/have_innodb.inc
+
+SET SESSION STORAGE_ENGINE='InnoDB';
+
+--source rowid_filter.test
+
+--echo #
+--echo # MDEV-18755: possible RORI-plan and possible plan with range filter
+--echo #
+
+create table t1 (
+ pk int not null primary key, f1 varchar(10), f2 varchar(30), a int(10),
+ key (f1), key (f2)
+) engine=innodb;
+
+insert into t1 values
+ (2,'a','a',2),(3,'a','a',null),(4,'a','a',55),(5,'a','a',4),(6,'a','a',0),
+ (7,'a','a',1),(8,'a','a',4),(9,'a','a',null),(10,'a','a',0),(11,'a','a',0),
+ (12,'a','a',null),(13,'a','a',49778),(14,'a','a',6),(15,'a','a',3),
+ (16,'a','a',233),(17,'a','a',-1),(18,'a','a',5),(19,'a','a',-1),
+ (20,'a','a',null),(21,'a','a',0),(22,'a','a',null),(23,'a','a',53840),
+ (24,'a','a',null),(25,'a','a',null),(26,'a','a',5),(27,'a','a',43454),
+ (28,'a','a',0),(29,'a','a',0),(30,'a','a',null),(59,'a','a',null),
+ (60,'a','a',null),(61,'a','a',-1),(62,'a','a',null),(63,'a','a',0),
+ (64,'a','a',14468),(65,'a','a',0),(66,'a','a',28),(67,'a','a',null),
+ (68,'a','a',14983),(69,'a','a',null),(70,'a','a',3),(71,'a','a',null),
+ (72,'a','a',null),(73,'a','a',237),(74,'a','a',2),(75,'a','a',0),
+ (76,'a','a',6),(77,'a','a',5),(78,'a','a',0),(79,'a','a',1),(80,'a','a',-1),
+ (81,'a','a',20),(82,'a','a',0),(83,'a','a',0),(84,'a','a',null),
+ (85,'a','a',-1),(86,'a','a',5),(87,'a','a',null),(88,'a','a',160),
+ (89,null,null,null),(90,'a','a',14785),(91,'a','a',0),(92,'a','a',null);
+
+let $q=
+( select * from t1
+ where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')))
+ union
+( select * from t1
+ where (f1 is null and f2 is null) and (f2 between 'a' and 'z' or f1 in ('a')));
+
+eval $q;
+eval explain $q;
+eval explain format=json $q;
+
+drop table t1;
+
+--echo #
+--echo # MDEV-19195: possible RORI-plan and possible plan with range filter
+--echo # for not first joined table
+--echo #
+
+create table t1 (id int not null primary key) engine=innodb;
+insert into t1 values (2),(1);
+
+create table t2 (y int,x int,index (x),index (y)) engine=innodb;
+insert into t2 values
+ (4,1),(4,777),(2,1),(2,888),(111,1),(222,1),(333,345),(444,1),
+ (555,555),(666,1);
+
+let $q=
+select * from t1 join t2 on t1.id = t2.x where t2.y = 2 and t1.id = 1;
+
+eval $q;
+eval explain extended $q;
+
+drop table t1, t2;
+
+
+SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/secure_file_priv_win.result b/mysql-test/main/secure_file_priv_win.result
index d456c24d20c..af402ae9537 100644
--- a/mysql-test/main/secure_file_priv_win.result
+++ b/mysql-test/main/secure_file_priv_win.result
@@ -1,6 +1,8 @@
CREATE TABLE t1 (c1 longtext);
INSERT INTO t1 values ('a');
SELECT * FROM t1 INTO OUTFILE 'MYSQL_TMP_DIR/B11764517.tmp';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show global variables like 'secure_file_priv';
Variable_name Value
secure_file_priv MYSQL_TMP_DIR/
@@ -32,7 +34,15 @@ ERROR HY000: The MariaDB server is running with the --secure-file-priv option so
SELECT * FROM t1 INTO OUTFILE 'MYSQL_TMP_DIR\\..a..\\..\\..\\B11764517-2.tmp';
ERROR HY000: The MariaDB server is running with the --secure-file-priv option so it cannot execute this statement
SELECT * FROM t1 INTO OUTFILE 'MYSQL_TMP_DIR\\B11764517-2.tmp';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT * FROM t1 INTO OUTFILE 'MYSQL_TMP_DIR/B11764517-3.tmp';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT * FROM t1 INTO OUTFILE 'MYSQL_TMP_DIR_UCASE/B11764517-4.tmp';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT * FROM t1 INTO OUTFILE 'MYSQL_TMP_DIR_LCASE/B11764517-5.tmp';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
DROP TABLE t1;
diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result
index 9b6a570717b..a527459657a 100644
--- a/mysql-test/main/select.result
+++ b/mysql-test/main/select.result
@@ -2118,8 +2118,8 @@ INSERT INTO t2 VALUES (1,3,10,'2002-06-01 08:00:00',35),(1,3,1010,'2002-06-01 12
SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= 'wrong-date-value' AND b.sampletime < 'wrong-date-value' AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid;
gvid the_success the_fail the_size the_time
Warnings:
-Warning 1292 Incorrect datetime value: 'wrong-date-value'
-Warning 1292 Incorrect datetime value: 'wrong-date-value'
+Warning 1292 Truncated incorrect datetime value: 'wrong-date-value'
+Warning 1292 Truncated incorrect datetime value: 'wrong-date-value'
SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= NULL AND b.sampletime < NULL AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid;
gvid the_success the_fail the_size the_time
DROP TABLE t1,t2;
@@ -2786,7 +2786,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index key1 key1 5 NULL 4 Using where; Using index
explain select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range key1 key1 5 NULL 3 Using where; Using index
+1 SIMPLE t1 index key1 key1 5 NULL 4 Using where; Using index
select max(key1) from t1 where key1 <= 0.6158;
max(key1)
0.6158000230789185
@@ -3408,7 +3408,7 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
CREATE TABLE t1 (sku int PRIMARY KEY, pr int);
CREATE TABLE t2 (sku int PRIMARY KEY, sppr int, name varchar(255));
INSERT INTO t1 VALUES
-(10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10);
+(10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10), (70, 10);
INSERT INTO t2 VALUES
(10, 10, 'aaa'), (20, 10, 'bbb'), (30, 10, 'ccc'), (40, 20, 'ddd'),
(50, 10, 'eee'), (60, 20, 'fff'), (70, 20, 'ggg'), (80, 30, 'hhh');
@@ -3453,6 +3453,7 @@ create table t2 (a int, b int, c int, e int, primary key(a,b,c));
insert into t2 select A.a, B.a, C.a, C.a from t1 A, t1 B, t1 C;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
select 'In next EXPLAIN, B.rows must be exactly 10:' Z;
Z
@@ -3474,13 +3475,13 @@ INSERT INTO t2 VALUES
EXPLAIN
SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition
-1 SIMPLE t2 ref c c 5 test.t1.a 2
+1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where
+1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where
-1 SIMPLE t2 ref c c 5 test.t1.a 2
+1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where
+1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter
DROP TABLE t1, t2;
create table t1 (
a int unsigned not null auto_increment primary key,
@@ -3522,7 +3523,7 @@ INSERT INTO t2 VALUES
EXPLAIN SELECT b FROM t1, t2 WHERE b=c AND a=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t2 ref idx idx 4 const 7 Using index
+1 SIMPLE t2 ref idx idx 4 const 8 Using index
EXPLAIN SELECT b FROM t1, t2 WHERE b=c AND a=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
@@ -3533,14 +3534,14 @@ INSERT INTO t1 VALUES (1,2), (2,NULL), (3,2);
CREATE TABLE t2 (b int, c INT, INDEX idx1(b));
INSERT INTO t2 VALUES (2,1), (3,2);
CREATE TABLE t3 (d int, e int, INDEX idx1(d));
-INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50);
+INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50), (1,60), (3,70), (1,80), (3,90);
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
WHERE t1.id=2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 const idx1 NULL NULL NULL 1
-1 SIMPLE t3 ref idx1 idx1 5 const 3
+1 SIMPLE t3 ref idx1 idx1 5 const 4
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
WHERE t1.id=2;
id a b c d e
@@ -3569,7 +3570,7 @@ EXPLAIN SELECT t2.*
FROM t1 JOIN t2 ON t2.fk=t1.pk
WHERE t2.fk < 'c' AND t2.pk=t1.fk;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 12 NULL 3 Using index condition; Using where
+1 SIMPLE t1 range PRIMARY PRIMARY 12 NULL 2 Using index condition; Using where
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 18 test.t1.fk 1 Using where
EXPLAIN SELECT t2.*
FROM t1 JOIN t2 ON t2.fk=t1.pk
@@ -3616,7 +3617,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where
-1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
@@ -3624,7 +3625,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
-1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3698,6 +3699,10 @@ INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SELECT COUNT(*) FROM t1 WHERE ID1_with_null IS NULL AND ID2_with_null=3;
COUNT(*)
24
@@ -3713,45 +3718,47 @@ COUNT(*)
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null=3 IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
DROP INDEX idx1 ON t1;
CREATE UNIQUE INDEX idx1 ON t1(ID1_with_null,ID2_with_null);
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND
(ID2_with_null=1 OR ID2_with_null=2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter
DROP TABLE t1;
CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts));
INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00");
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CREATE TABLE t2 (a INT, dt1 DATETIME, dt2 DATETIME, PRIMARY KEY (a));
INSERT INTO t2 VALUES (30, "2006-01-01 00:00:00", "2999-12-31 00:00:00");
INSERT INTO t2 SELECT a+1,dt1,dt2 FROM t2;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (t1.a=t2.a) WHERE t1.a=30
@@ -3759,7 +3766,7 @@ AND t1.ts BETWEEN t2.dt1 AND t2.dt2
AND t1.ts BETWEEN "2006-01-01" AND "2006-12-31";
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t1 range ts ts 4 NULL 1 Using index condition; Using where
+1 SIMPLE t1 range ts ts 4 NULL 2 Using index condition; Using where
SELECT * FROM t1 LEFT JOIN t2 ON (t1.a=t2.a) WHERE t1.a=30
AND t1.ts BETWEEN t2.dt1 AND t2.dt2
AND t1.ts BETWEEN "2006-01-01" AND "2006-12-31";
@@ -3801,6 +3808,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
name n
bb 1
@@ -3810,27 +3818,39 @@ cc  4
cc 5
bb 6
cc 7
+bb 8
+aa 9
+aa 10
+bb 11
SELECT * FROM t2 ORDER BY name;
name n
aa 2
+aa 10
+aa 9
bb 1
+bb 8
bb 6
+bb 11
cc  4
-cc 3
-cc 5
cc 7
+cc 5
+cc 3
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
name LENGTH(name) n
aa 2 2
+aa 2 10
+aa 2 9
bb 2 1
+bb 3 8
bb 3 6
+bb 2 11
cc  4 4
-cc 5 3
-cc 2 5
cc 3 7
+cc 2 5
+cc 5 3
EXPLAIN SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref name name 6 const 3 Using where
+1 SIMPLE t2 ref name name 6 const 4 Using where
SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
name LENGTH(name) n
cc 5 3
@@ -3838,7 +3858,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where
+1 SIMPLE t2 range name name 6 NULL 4 Using where
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
name LENGTH(name) n
cc 5 3
@@ -3847,7 +3867,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where; Using filesort
+1 SIMPLE t2 range name name 6 NULL 4 Using where; Using filesort
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
name LENGTH(name) n
cc  4 4
@@ -3863,11 +3883,17 @@ name name n
ccc NULL NULL
bb bb 1
bb bb 6
+bb bb 8
+bb bb 11
cc cc 3
cc cc 5
cc cc 7
aa aa 2
+aa aa 9
+aa aa 10
aa aa 2
+aa aa 9
+aa aa 10
DROP TABLE t1,t2;
CREATE TABLE t1 (name text);
CREATE TABLE t2 (name text, n int, KEY (name(3)));
@@ -3875,6 +3901,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
name n
bb 1
@@ -3884,11 +3911,19 @@ cc  4
cc 5
bb 6
cc 7
+bb 8
+aa 9
+aa 10
+bb 11
SELECT * FROM t2 ORDER BY name;
name n
aa 2
+aa 9
+aa 10
bb 1
bb 6
+bb 8
+bb 11
cc  4
cc 3
cc 5
@@ -3896,15 +3931,19 @@ cc 7
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
name LENGTH(name) n
aa 2 2
+aa 2 9
+aa 2 10
bb 2 1
bb 3 6
+bb 3 8
+bb 2 11
cc  4 4
cc 5 3
cc 2 5
cc 3 7
EXPLAIN SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref name name 6 const 3 Using where
+1 SIMPLE t2 ref name name 6 const 4 Using where
SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
name LENGTH(name) n
cc 5 3
@@ -3912,7 +3951,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where
+1 SIMPLE t2 range name name 6 NULL 4 Using where
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
name LENGTH(name) n
cc 5 3
@@ -3921,7 +3960,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where; Using filesort
+1 SIMPLE t2 range name name 6 NULL 4 Using where; Using filesort
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
name LENGTH(name) n
cc  4 4
@@ -3937,11 +3976,17 @@ name name n
ccc NULL NULL
bb bb 1
bb bb 6
+bb bb 8
+bb bb 11
cc cc 3
cc cc 5
cc cc 7
aa aa 2
+aa aa 9
+aa aa 10
aa aa 2
+aa aa 9
+aa aa 10
DROP TABLE t1,t2;
CREATE TABLE t1 (
access_id int NOT NULL default '0',
@@ -4111,7 +4156,7 @@ select str_to_date('2007-10-09','%Y-%m-%d') <= '2007/10/2000:00:00 GMT-6';
str_to_date('2007-10-09','%Y-%m-%d') <= '2007/10/2000:00:00 GMT-6'
0
Warnings:
-Warning 1292 Incorrect datetime value: '2007/10/2000:00:00 GMT-6'
+Warning 1292 Truncated incorrect datetime value: '2007/10/2000:00:00 GMT-6'
select str_to_date('2007-10-01','%Y-%m-%d') = '2007-10-1 00:00:00 GMT-6';
str_to_date('2007-10-01','%Y-%m-%d') = '2007-10-1 00:00:00 GMT-6'
1
@@ -4198,7 +4243,7 @@ select str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('','%Y-%m-%d') between '2007/10/01' and '2007/10/20';
str_to_date('','%Y-%m-%d') between '2007/10/01' and '2007/10/20'
0
@@ -4212,22 +4257,22 @@ select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = ''
0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('1','%Y-%m-%d') = '1';
str_to_date('1','%Y-%m-%d') = '1'
0
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
select str_to_date('1','%Y-%m-%d') = '1';
str_to_date('1','%Y-%m-%d') = '1'
0
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
select str_to_date('','%Y-%m-%d') = '';
str_to_date('','%Y-%m-%d') = ''
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('2000-01-01','%Y-%m-%d') between '1000-01-01' and '2001-01-01';
str_to_date('2000-01-01','%Y-%m-%d') between '1000-01-01' and '2001-01-01'
1
@@ -4645,6 +4690,8 @@ WHERE int_key IN (SELECT 1 FROM t1)
HAVING date_nokey = '10:41:7'
ORDER BY date_key;
date_nokey
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '10:41:7'
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT NOT NULL, b INT);
INSERT INTO t1 VALUES (1, 1);
@@ -5211,7 +5258,7 @@ INSERT INTO `CC` VALUES
EXPLAIN SELECT `varchar_nokey` G1 FROM CC WHERE `int_nokey` AND `int_key` <= 4
HAVING G1 ORDER BY `varchar_key` LIMIT 6 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CC range int_key int_key 4 NULL 10 Using index condition; Using where; Using filesort
+1 SIMPLE CC range int_key int_key 4 NULL 9 Using index condition; Using where; Using filesort
SELECT `varchar_nokey` G1 FROM CC WHERE `int_nokey` AND `int_key` <= 4
HAVING G1 ORDER BY `varchar_key` LIMIT 6 ;
G1
@@ -5282,30 +5329,30 @@ SELECT * FROM t1 HAVING f1 = 'zz';
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'zz'
SELECT * FROM t1 HAVING f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM t1 HAVING f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM t1 WHERE f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM v1 HAVING f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
DROP TABLE t1;
DROP VIEW v1;
#
diff --git a/mysql-test/main/select.test b/mysql-test/main/select.test
index 52ef4aa2111..0d43dfd55b9 100644
--- a/mysql-test/main/select.test
+++ b/mysql-test/main/select.test
@@ -2915,7 +2915,7 @@ CREATE TABLE t1 (sku int PRIMARY KEY, pr int);
CREATE TABLE t2 (sku int PRIMARY KEY, sppr int, name varchar(255));
INSERT INTO t1 VALUES
- (10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10);
+ (10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10), (70, 10);
INSERT INTO t2 VALUES
(10, 10, 'aaa'), (20, 10, 'bbb'), (30, 10, 'ccc'), (40, 20, 'ddd'),
@@ -3048,7 +3048,7 @@ CREATE TABLE t2 (b int, c INT, INDEX idx1(b));
INSERT INTO t2 VALUES (2,1), (3,2);
CREATE TABLE t3 (d int, e int, INDEX idx1(d));
-INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50);
+INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50), (1,60), (3,70), (1,80), (3,90);
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
@@ -3230,6 +3230,8 @@ INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
+ANALYZE TABLE t1;
+
SELECT COUNT(*) FROM t1 WHERE ID1_with_null IS NULL AND ID2_with_null=3;
SELECT COUNT(*) FROM t1 WHERE ID1_with_null=3 AND ID2_with_null IS NULL;
SELECT COUNT(*) FROM t1 WHERE ID1_with_null IS NULL AND ID2_with_null IS NULL;
@@ -3311,6 +3313,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
SELECT * FROM t2 ORDER BY name;
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
@@ -3332,6 +3335,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
SELECT * FROM t2 ORDER BY name;
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result
index d78fad15da1..c1e9e9d3ad5 100644
--- a/mysql-test/main/select_jcl6.result
+++ b/mysql-test/main/select_jcl6.result
@@ -2129,8 +2129,8 @@ INSERT INTO t2 VALUES (1,3,10,'2002-06-01 08:00:00',35),(1,3,1010,'2002-06-01 12
SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= 'wrong-date-value' AND b.sampletime < 'wrong-date-value' AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid;
gvid the_success the_fail the_size the_time
Warnings:
-Warning 1292 Incorrect datetime value: 'wrong-date-value'
-Warning 1292 Incorrect datetime value: 'wrong-date-value'
+Warning 1292 Truncated incorrect datetime value: 'wrong-date-value'
+Warning 1292 Truncated incorrect datetime value: 'wrong-date-value'
SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= NULL AND b.sampletime < NULL AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid;
gvid the_success the_fail the_size the_time
DROP TABLE t1,t2;
@@ -2797,7 +2797,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index key1 key1 5 NULL 4 Using where; Using index
explain select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range key1 key1 5 NULL 3 Using where; Using index
+1 SIMPLE t1 index key1 key1 5 NULL 4 Using where; Using index
select max(key1) from t1 where key1 <= 0.6158;
max(key1)
0.6158000230789185
@@ -3419,7 +3419,7 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
CREATE TABLE t1 (sku int PRIMARY KEY, pr int);
CREATE TABLE t2 (sku int PRIMARY KEY, sppr int, name varchar(255));
INSERT INTO t1 VALUES
-(10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10);
+(10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10), (70, 10);
INSERT INTO t2 VALUES
(10, 10, 'aaa'), (20, 10, 'bbb'), (30, 10, 'ccc'), (40, 20, 'ddd'),
(50, 10, 'eee'), (60, 20, 'fff'), (70, 20, 'ggg'), (80, 30, 'hhh');
@@ -3464,6 +3464,7 @@ create table t2 (a int, b int, c int, e int, primary key(a,b,c));
insert into t2 select A.a, B.a, C.a, C.a from t1 A, t1 B, t1 C;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
select 'In next EXPLAIN, B.rows must be exactly 10:' Z;
Z
@@ -3485,13 +3486,13 @@ INSERT INTO t2 VALUES
EXPLAIN
SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Rowid-ordered scan
-1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where
+1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
EXPLAIN
SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where; Rowid-ordered scan
-1 SIMPLE t2 ref c c 5 test.t1.a 2 Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where
+1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
DROP TABLE t1, t2;
create table t1 (
a int unsigned not null auto_increment primary key,
@@ -3533,7 +3534,7 @@ INSERT INTO t2 VALUES
EXPLAIN SELECT b FROM t1, t2 WHERE b=c AND a=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t2 ref idx idx 4 const 7 Using index
+1 SIMPLE t2 ref idx idx 4 const 8 Using index
EXPLAIN SELECT b FROM t1, t2 WHERE b=c AND a=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
@@ -3544,14 +3545,14 @@ INSERT INTO t1 VALUES (1,2), (2,NULL), (3,2);
CREATE TABLE t2 (b int, c INT, INDEX idx1(b));
INSERT INTO t2 VALUES (2,1), (3,2);
CREATE TABLE t3 (d int, e int, INDEX idx1(d));
-INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50);
+INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50), (1,60), (3,70), (1,80), (3,90);
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
WHERE t1.id=2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 const idx1 NULL NULL NULL 1
-1 SIMPLE t3 ref idx1 idx1 5 const 3
+1 SIMPLE t3 ref idx1 idx1 5 const 4
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
WHERE t1.id=2;
id a b c d e
@@ -3580,7 +3581,7 @@ EXPLAIN SELECT t2.*
FROM t1 JOIN t2 ON t2.fk=t1.pk
WHERE t2.fk < 'c' AND t2.pk=t1.fk;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 12 NULL 3 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range PRIMARY PRIMARY 12 NULL 2 Using index condition; Using where; Rowid-ordered scan
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 18 test.t1.fk 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
EXPLAIN SELECT t2.*
FROM t1 JOIN t2 ON t2.fk=t1.pk
@@ -3627,7 +3628,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
-1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
@@ -3635,7 +3636,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where; Rowid-ordered scan
-1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using join buffer (flat, BKA join); Key-ordered Rowid-ordered scan; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3709,6 +3710,10 @@ INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SELECT COUNT(*) FROM t1 WHERE ID1_with_null IS NULL AND ID2_with_null=3;
COUNT(*)
24
@@ -3724,45 +3729,47 @@ COUNT(*)
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null=3 IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
DROP INDEX idx1 ON t1;
CREATE UNIQUE INDEX idx1 ON t1(ID1_with_null,ID2_with_null);
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND
(ID2_with_null=1 OR ID2_with_null=2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter
DROP TABLE t1;
CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts));
INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00");
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CREATE TABLE t2 (a INT, dt1 DATETIME, dt2 DATETIME, PRIMARY KEY (a));
INSERT INTO t2 VALUES (30, "2006-01-01 00:00:00", "2999-12-31 00:00:00");
INSERT INTO t2 SELECT a+1,dt1,dt2 FROM t2;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (t1.a=t2.a) WHERE t1.a=30
@@ -3770,7 +3777,7 @@ AND t1.ts BETWEEN t2.dt1 AND t2.dt2
AND t1.ts BETWEEN "2006-01-01" AND "2006-12-31";
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t1 range ts ts 4 NULL 1 Using index condition; Using where; Rowid-ordered scan
+1 SIMPLE t1 range ts ts 4 NULL 2 Using index condition; Using where; Rowid-ordered scan
SELECT * FROM t1 LEFT JOIN t2 ON (t1.a=t2.a) WHERE t1.a=30
AND t1.ts BETWEEN t2.dt1 AND t2.dt2
AND t1.ts BETWEEN "2006-01-01" AND "2006-12-31";
@@ -3812,6 +3819,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
name n
bb 1
@@ -3821,27 +3829,39 @@ cc  4
cc 5
bb 6
cc 7
+bb 8
+aa 9
+aa 10
+bb 11
SELECT * FROM t2 ORDER BY name;
name n
aa 2
+aa 10
+aa 9
bb 1
+bb 8
bb 6
+bb 11
cc  4
-cc 3
-cc 5
cc 7
+cc 5
+cc 3
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
name LENGTH(name) n
aa 2 2
+aa 2 10
+aa 2 9
bb 2 1
+bb 3 8
bb 3 6
+bb 2 11
cc  4 4
-cc 5 3
-cc 2 5
cc 3 7
+cc 2 5
+cc 5 3
EXPLAIN SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref name name 6 const 3 Using where
+1 SIMPLE t2 ref name name 6 const 4 Using where
SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
name LENGTH(name) n
cc 5 3
@@ -3849,7 +3869,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where
+1 SIMPLE t2 range name name 6 NULL 4 Using where
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
name LENGTH(name) n
cc 5 3
@@ -3858,7 +3878,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where; Using filesort
+1 SIMPLE t2 range name name 6 NULL 4 Using where; Using filesort
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
name LENGTH(name) n
cc  4 4
@@ -3874,11 +3894,17 @@ name name n
ccc NULL NULL
bb bb 1
bb bb 6
+bb bb 8
+bb bb 11
cc cc 3
cc cc 5
cc cc 7
aa aa 2
+aa aa 9
+aa aa 10
aa aa 2
+aa aa 9
+aa aa 10
DROP TABLE t1,t2;
CREATE TABLE t1 (name text);
CREATE TABLE t2 (name text, n int, KEY (name(3)));
@@ -3886,6 +3912,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
name n
bb 1
@@ -3895,11 +3922,19 @@ cc  4
cc 5
bb 6
cc 7
+bb 8
+aa 9
+aa 10
+bb 11
SELECT * FROM t2 ORDER BY name;
name n
aa 2
+aa 9
+aa 10
bb 1
bb 6
+bb 8
+bb 11
cc  4
cc 3
cc 5
@@ -3907,15 +3942,19 @@ cc 7
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
name LENGTH(name) n
aa 2 2
+aa 2 9
+aa 2 10
bb 2 1
bb 3 6
+bb 3 8
+bb 2 11
cc  4 4
cc 5 3
cc 2 5
cc 3 7
EXPLAIN SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref name name 6 const 3 Using where
+1 SIMPLE t2 ref name name 6 const 4 Using where
SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
name LENGTH(name) n
cc 5 3
@@ -3923,7 +3962,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where
+1 SIMPLE t2 range name name 6 NULL 4 Using where
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
name LENGTH(name) n
cc 5 3
@@ -3932,7 +3971,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where; Using filesort
+1 SIMPLE t2 range name name 6 NULL 4 Using where; Using filesort
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
name LENGTH(name) n
cc  4 4
@@ -3948,11 +3987,17 @@ name name n
ccc NULL NULL
bb bb 1
bb bb 6
+bb bb 8
+bb bb 11
cc cc 3
cc cc 5
cc cc 7
aa aa 2
+aa aa 9
+aa aa 10
aa aa 2
+aa aa 9
+aa aa 10
DROP TABLE t1,t2;
CREATE TABLE t1 (
access_id int NOT NULL default '0',
@@ -4122,7 +4167,7 @@ select str_to_date('2007-10-09','%Y-%m-%d') <= '2007/10/2000:00:00 GMT-6';
str_to_date('2007-10-09','%Y-%m-%d') <= '2007/10/2000:00:00 GMT-6'
0
Warnings:
-Warning 1292 Incorrect datetime value: '2007/10/2000:00:00 GMT-6'
+Warning 1292 Truncated incorrect datetime value: '2007/10/2000:00:00 GMT-6'
select str_to_date('2007-10-01','%Y-%m-%d') = '2007-10-1 00:00:00 GMT-6';
str_to_date('2007-10-01','%Y-%m-%d') = '2007-10-1 00:00:00 GMT-6'
1
@@ -4209,7 +4254,7 @@ select str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('','%Y-%m-%d') between '2007/10/01' and '2007/10/20';
str_to_date('','%Y-%m-%d') between '2007/10/01' and '2007/10/20'
0
@@ -4223,22 +4268,22 @@ select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = ''
0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('1','%Y-%m-%d') = '1';
str_to_date('1','%Y-%m-%d') = '1'
0
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
select str_to_date('1','%Y-%m-%d') = '1';
str_to_date('1','%Y-%m-%d') = '1'
0
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
select str_to_date('','%Y-%m-%d') = '';
str_to_date('','%Y-%m-%d') = ''
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('2000-01-01','%Y-%m-%d') between '1000-01-01' and '2001-01-01';
str_to_date('2000-01-01','%Y-%m-%d') between '1000-01-01' and '2001-01-01'
1
@@ -4656,6 +4701,8 @@ WHERE int_key IN (SELECT 1 FROM t1)
HAVING date_nokey = '10:41:7'
ORDER BY date_key;
date_nokey
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '10:41:7'
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT NOT NULL, b INT);
INSERT INTO t1 VALUES (1, 1);
@@ -5222,7 +5269,7 @@ INSERT INTO `CC` VALUES
EXPLAIN SELECT `varchar_nokey` G1 FROM CC WHERE `int_nokey` AND `int_key` <= 4
HAVING G1 ORDER BY `varchar_key` LIMIT 6 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CC range int_key int_key 4 NULL 10 Using index condition; Using where; Rowid-ordered scan; Using filesort
+1 SIMPLE CC range int_key int_key 4 NULL 9 Using index condition; Using where; Rowid-ordered scan; Using filesort
SELECT `varchar_nokey` G1 FROM CC WHERE `int_nokey` AND `int_key` <= 4
HAVING G1 ORDER BY `varchar_key` LIMIT 6 ;
G1
@@ -5293,30 +5340,30 @@ SELECT * FROM t1 HAVING f1 = 'zz';
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'zz'
SELECT * FROM t1 HAVING f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM t1 HAVING f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM t1 WHERE f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM v1 HAVING f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
DROP TABLE t1;
DROP VIEW v1;
#
diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result
index 9b6a570717b..a527459657a 100644
--- a/mysql-test/main/select_pkeycache.result
+++ b/mysql-test/main/select_pkeycache.result
@@ -2118,8 +2118,8 @@ INSERT INTO t2 VALUES (1,3,10,'2002-06-01 08:00:00',35),(1,3,1010,'2002-06-01 12
SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= 'wrong-date-value' AND b.sampletime < 'wrong-date-value' AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid;
gvid the_success the_fail the_size the_time
Warnings:
-Warning 1292 Incorrect datetime value: 'wrong-date-value'
-Warning 1292 Incorrect datetime value: 'wrong-date-value'
+Warning 1292 Truncated incorrect datetime value: 'wrong-date-value'
+Warning 1292 Truncated incorrect datetime value: 'wrong-date-value'
SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= NULL AND b.sampletime < NULL AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid;
gvid the_success the_fail the_size the_time
DROP TABLE t1,t2;
@@ -2786,7 +2786,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index key1 key1 5 NULL 4 Using where; Using index
explain select min(key1) from t1 where key1 >= 0.3762 and rand() + 0.5 >= 0.5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range key1 key1 5 NULL 3 Using where; Using index
+1 SIMPLE t1 index key1 key1 5 NULL 4 Using where; Using index
select max(key1) from t1 where key1 <= 0.6158;
max(key1)
0.6158000230789185
@@ -3408,7 +3408,7 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
CREATE TABLE t1 (sku int PRIMARY KEY, pr int);
CREATE TABLE t2 (sku int PRIMARY KEY, sppr int, name varchar(255));
INSERT INTO t1 VALUES
-(10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10);
+(10, 10), (20, 10), (30, 20), (40, 30), (50, 10), (60, 10), (70, 10);
INSERT INTO t2 VALUES
(10, 10, 'aaa'), (20, 10, 'bbb'), (30, 10, 'ccc'), (40, 20, 'ddd'),
(50, 10, 'eee'), (60, 20, 'fff'), (70, 20, 'ggg'), (80, 30, 'hhh');
@@ -3453,6 +3453,7 @@ create table t2 (a int, b int, c int, e int, primary key(a,b,c));
insert into t2 select A.a, B.a, C.a, C.a from t1 A, t1 B, t1 C;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
select 'In next EXPLAIN, B.rows must be exactly 10:' Z;
Z
@@ -3474,13 +3475,13 @@ INSERT INTO t2 VALUES
EXPLAIN
SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition
-1 SIMPLE t2 ref c c 5 test.t1.a 2
+1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where
+1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT a, c, d, f FROM t1,t2 WHERE a=c AND b BETWEEN 4 AND 6 AND a > 0;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,b b 5 NULL 3 Using index condition; Using where
-1 SIMPLE t2 ref c c 5 test.t1.a 2
+1 SIMPLE t2 ALL c NULL NULL NULL 18 Using where
+1 SIMPLE t1 eq_ref|filter PRIMARY,b PRIMARY|b 4|5 test.t2.c 1 (30%) Using where; Using rowid filter
DROP TABLE t1, t2;
create table t1 (
a int unsigned not null auto_increment primary key,
@@ -3522,7 +3523,7 @@ INSERT INTO t2 VALUES
EXPLAIN SELECT b FROM t1, t2 WHERE b=c AND a=1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t2 ref idx idx 4 const 7 Using index
+1 SIMPLE t2 ref idx idx 4 const 8 Using index
EXPLAIN SELECT b FROM t1, t2 WHERE b=c AND a=4;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
@@ -3533,14 +3534,14 @@ INSERT INTO t1 VALUES (1,2), (2,NULL), (3,2);
CREATE TABLE t2 (b int, c INT, INDEX idx1(b));
INSERT INTO t2 VALUES (2,1), (3,2);
CREATE TABLE t3 (d int, e int, INDEX idx1(d));
-INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50);
+INSERT INTO t3 VALUES (2,10), (2,20), (1,30), (2,40), (2,50), (1,60), (3,70), (1,80), (3,90);
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
WHERE t1.id=2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 const idx1 NULL NULL NULL 1
-1 SIMPLE t3 ref idx1 idx1 5 const 3
+1 SIMPLE t3 ref idx1 idx1 5 const 4
SELECT * FROM t1 LEFT JOIN t2 ON t2.b=t1.a INNER JOIN t3 ON t3.d=t1.id
WHERE t1.id=2;
id a b c d e
@@ -3569,7 +3570,7 @@ EXPLAIN SELECT t2.*
FROM t1 JOIN t2 ON t2.fk=t1.pk
WHERE t2.fk < 'c' AND t2.pk=t1.fk;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 12 NULL 3 Using index condition; Using where
+1 SIMPLE t1 range PRIMARY PRIMARY 12 NULL 2 Using index condition; Using where
1 SIMPLE t2 eq_ref PRIMARY PRIMARY 18 test.t1.fk 1 Using where
EXPLAIN SELECT t2.*
FROM t1 JOIN t2 ON t2.fk=t1.pk
@@ -3616,7 +3617,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si si 5 NULL 4 Using index condition; Using where
-1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2,t3
WHERE t1.id = 8 AND t2.i BETWEEN t1.b AND t1.e AND
@@ -3624,7 +3625,7 @@ t3.a=t2.a AND t3.c IN ('bb','ee') ;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1
1 SIMPLE t2 range si,ai si 5 NULL 4 Using index condition; Using where
-1 SIMPLE t3 eq_ref PRIMARY,ci PRIMARY 4 test.t2.a 1 Using where
+1 SIMPLE t3 eq_ref|filter PRIMARY,ci PRIMARY|ci 4|5 test.t2.a 1 (30%) Using where; Using rowid filter
EXPLAIN
SELECT t3.a FROM t1,t2 FORCE INDEX (si),t3
WHERE t1.id = 8 AND (t2.i=t1.b OR t2.i=t1.e) AND t3.a=t2.a AND
@@ -3698,6 +3699,10 @@ INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID1_with_null IS NULL;
INSERT INTO t1 SELECT * FROM t1 WHERE ID2_with_null IS NULL;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
SELECT COUNT(*) FROM t1 WHERE ID1_with_null IS NULL AND ID2_with_null=3;
COUNT(*)
24
@@ -3713,45 +3718,47 @@ COUNT(*)
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null=3 IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
DROP INDEX idx1 ON t1;
CREATE UNIQUE INDEX idx1 ON t1(ID1_with_null,ID2_with_null);
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null=3 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null=3 AND ID2_with_null IS NULL ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND ID2_with_null IS NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref idx1,idx2 idx2 4 const 2 Using where
EXPLAIN SELECT * FROM t1
WHERE ID_better=1 AND ID1_with_null IS NULL AND
(ID2_with_null=1 OR ID2_with_null=2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref idx1,idx2 idx2 4 const 1 Using where
+1 SIMPLE t1 ref|filter idx1,idx2 idx1|idx2 5|4 const 2 (1%) Using index condition; Using where; Using rowid filter
DROP TABLE t1;
CREATE TABLE t1 (a INT, ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, KEY ts(ts));
INSERT INTO t1 VALUES (30,"2006-01-03 23:00:00"), (31,"2006-01-03 23:00:00");
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CREATE TABLE t2 (a INT, dt1 DATETIME, dt2 DATETIME, PRIMARY KEY (a));
INSERT INTO t2 VALUES (30, "2006-01-01 00:00:00", "2999-12-31 00:00:00");
INSERT INTO t2 SELECT a+1,dt1,dt2 FROM t2;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
EXPLAIN
SELECT * FROM t1 LEFT JOIN t2 ON (t1.a=t2.a) WHERE t1.a=30
@@ -3759,7 +3766,7 @@ AND t1.ts BETWEEN t2.dt1 AND t2.dt2
AND t1.ts BETWEEN "2006-01-01" AND "2006-12-31";
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 const PRIMARY PRIMARY 4 const 1
-1 SIMPLE t1 range ts ts 4 NULL 1 Using index condition; Using where
+1 SIMPLE t1 range ts ts 4 NULL 2 Using index condition; Using where
SELECT * FROM t1 LEFT JOIN t2 ON (t1.a=t2.a) WHERE t1.a=30
AND t1.ts BETWEEN t2.dt1 AND t2.dt2
AND t1.ts BETWEEN "2006-01-01" AND "2006-12-31";
@@ -3801,6 +3808,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
name n
bb 1
@@ -3810,27 +3818,39 @@ cc  4
cc 5
bb 6
cc 7
+bb 8
+aa 9
+aa 10
+bb 11
SELECT * FROM t2 ORDER BY name;
name n
aa 2
+aa 10
+aa 9
bb 1
+bb 8
bb 6
+bb 11
cc  4
-cc 3
-cc 5
cc 7
+cc 5
+cc 3
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
name LENGTH(name) n
aa 2 2
+aa 2 10
+aa 2 9
bb 2 1
+bb 3 8
bb 3 6
+bb 2 11
cc  4 4
-cc 5 3
-cc 2 5
cc 3 7
+cc 2 5
+cc 5 3
EXPLAIN SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref name name 6 const 3 Using where
+1 SIMPLE t2 ref name name 6 const 4 Using where
SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
name LENGTH(name) n
cc 5 3
@@ -3838,7 +3858,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where
+1 SIMPLE t2 range name name 6 NULL 4 Using where
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
name LENGTH(name) n
cc 5 3
@@ -3847,7 +3867,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where; Using filesort
+1 SIMPLE t2 range name name 6 NULL 4 Using where; Using filesort
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
name LENGTH(name) n
cc  4 4
@@ -3863,11 +3883,17 @@ name name n
ccc NULL NULL
bb bb 1
bb bb 6
+bb bb 8
+bb bb 11
cc cc 3
cc cc 5
cc cc 7
aa aa 2
+aa aa 9
+aa aa 10
aa aa 2
+aa aa 9
+aa aa 10
DROP TABLE t1,t2;
CREATE TABLE t1 (name text);
CREATE TABLE t2 (name text, n int, KEY (name(3)));
@@ -3875,6 +3901,7 @@ INSERT INTO t1 VALUES ('ccc'), ('bb'), ('cc '), ('aa '), ('aa');
INSERT INTO t2 VALUES ('bb',1), ('aa',2), ('cc ',3);
INSERT INTO t2 VALUES (concat('cc ', 0x06), 4);
INSERT INTO t2 VALUES ('cc',5), ('bb ',6), ('cc ',7);
+INSERT INTO t2 VALUES ('bb ',8), ('aa',9), ('aa',10), ('bb',11);
SELECT * FROM t2;
name n
bb 1
@@ -3884,11 +3911,19 @@ cc  4
cc 5
bb 6
cc 7
+bb 8
+aa 9
+aa 10
+bb 11
SELECT * FROM t2 ORDER BY name;
name n
aa 2
+aa 9
+aa 10
bb 1
bb 6
+bb 8
+bb 11
cc  4
cc 3
cc 5
@@ -3896,15 +3931,19 @@ cc 7
SELECT name, LENGTH(name), n FROM t2 ORDER BY name;
name LENGTH(name) n
aa 2 2
+aa 2 9
+aa 2 10
bb 2 1
bb 3 6
+bb 3 8
+bb 2 11
cc  4 4
cc 5 3
cc 2 5
cc 3 7
EXPLAIN SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref name name 6 const 3 Using where
+1 SIMPLE t2 ref name name 6 const 4 Using where
SELECT name, LENGTH(name), n FROM t2 WHERE name='cc ';
name LENGTH(name) n
cc 5 3
@@ -3912,7 +3951,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where
+1 SIMPLE t2 range name name 6 NULL 4 Using where
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%';
name LENGTH(name) n
cc 5 3
@@ -3921,7 +3960,7 @@ cc 2 5
cc 3 7
EXPLAIN SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range name name 6 NULL 3 Using where; Using filesort
+1 SIMPLE t2 range name name 6 NULL 4 Using where; Using filesort
SELECT name , LENGTH(name), n FROM t2 WHERE name LIKE 'cc%' ORDER BY name;
name LENGTH(name) n
cc  4 4
@@ -3937,11 +3976,17 @@ name name n
ccc NULL NULL
bb bb 1
bb bb 6
+bb bb 8
+bb bb 11
cc cc 3
cc cc 5
cc cc 7
aa aa 2
+aa aa 9
+aa aa 10
aa aa 2
+aa aa 9
+aa aa 10
DROP TABLE t1,t2;
CREATE TABLE t1 (
access_id int NOT NULL default '0',
@@ -4111,7 +4156,7 @@ select str_to_date('2007-10-09','%Y-%m-%d') <= '2007/10/2000:00:00 GMT-6';
str_to_date('2007-10-09','%Y-%m-%d') <= '2007/10/2000:00:00 GMT-6'
0
Warnings:
-Warning 1292 Incorrect datetime value: '2007/10/2000:00:00 GMT-6'
+Warning 1292 Truncated incorrect datetime value: '2007/10/2000:00:00 GMT-6'
select str_to_date('2007-10-01','%Y-%m-%d') = '2007-10-1 00:00:00 GMT-6';
str_to_date('2007-10-01','%Y-%m-%d') = '2007-10-1 00:00:00 GMT-6'
1
@@ -4198,7 +4243,7 @@ select str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20'
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('','%Y-%m-%d') between '2007/10/01' and '2007/10/20';
str_to_date('','%Y-%m-%d') between '2007/10/01' and '2007/10/20'
0
@@ -4212,22 +4257,22 @@ select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = ''
0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('1','%Y-%m-%d') = '1';
str_to_date('1','%Y-%m-%d') = '1'
0
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
select str_to_date('1','%Y-%m-%d') = '1';
str_to_date('1','%Y-%m-%d') = '1'
0
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
select str_to_date('','%Y-%m-%d') = '';
str_to_date('','%Y-%m-%d') = ''
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select str_to_date('2000-01-01','%Y-%m-%d') between '1000-01-01' and '2001-01-01';
str_to_date('2000-01-01','%Y-%m-%d') between '1000-01-01' and '2001-01-01'
1
@@ -4645,6 +4690,8 @@ WHERE int_key IN (SELECT 1 FROM t1)
HAVING date_nokey = '10:41:7'
ORDER BY date_key;
date_nokey
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '10:41:7'
DROP TABLE t1,t2;
CREATE TABLE t1 (a INT NOT NULL, b INT);
INSERT INTO t1 VALUES (1, 1);
@@ -5211,7 +5258,7 @@ INSERT INTO `CC` VALUES
EXPLAIN SELECT `varchar_nokey` G1 FROM CC WHERE `int_nokey` AND `int_key` <= 4
HAVING G1 ORDER BY `varchar_key` LIMIT 6 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE CC range int_key int_key 4 NULL 10 Using index condition; Using where; Using filesort
+1 SIMPLE CC range int_key int_key 4 NULL 9 Using index condition; Using where; Using filesort
SELECT `varchar_nokey` G1 FROM CC WHERE `int_nokey` AND `int_key` <= 4
HAVING G1 ORDER BY `varchar_key` LIMIT 6 ;
G1
@@ -5282,30 +5329,30 @@ SELECT * FROM t1 HAVING f1 = 'zz';
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'zz'
SELECT * FROM t1 HAVING f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM t1 HAVING f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM t1 WHERE f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
SELECT * FROM v1 HAVING f1 = 'zz' AND f1 <= 'aa' ;
f1
0000-00-00
Warnings:
-Warning 1292 Incorrect datetime value: 'zz'
-Warning 1292 Incorrect datetime value: 'aa'
+Warning 1292 Truncated incorrect datetime value: 'zz'
+Warning 1292 Truncated incorrect datetime value: 'aa'
DROP TABLE t1;
DROP VIEW v1;
#
diff --git a/mysql-test/main/select_safe.result b/mysql-test/main/select_safe.result
index 60364b8c4ec..649e2dc484e 100644
--- a/mysql-test/main/select_safe.result
+++ b/mysql-test/main/select_safe.result
@@ -62,19 +62,20 @@ a b
5 a
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
insert into t1 values (null,"b"),(null,"b"),(null,"c"),(null,"c"),(null,"d"),(null,"d"),(null,"e"),(null,"e"),(null,"a"),(null,"e");
insert into t1 values (null,"x"),(null,"x"),(null,"y"),(null,"y"),(null,"z"),(null,"z"),(null,"v"),(null,"v"),(null,"a"),(null,"v");
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL b NULL NULL NULL 41 Using where
-1 SIMPLE t2 ref b b 21 test.t1.b 6
+1 SIMPLE t1 ALL b NULL NULL NULL 11
+1 SIMPLE t2 ALL b NULL NULL NULL 11 Using where; Using join buffer (flat, BNL join)
set MAX_SEEKS_FOR_KEY=1;
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL b NULL NULL NULL 41 Using where
-1 SIMPLE t2 ref b b 21 test.t1.b 6
+1 SIMPLE t1 ALL b NULL NULL NULL 11
+1 SIMPLE t2 ALL b NULL NULL NULL 11 Using where; Using join buffer (flat, BNL join)
SET MAX_SEEKS_FOR_KEY=DEFAULT;
drop table t1;
create table t1 (a int);
diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result
index 00907235ecc..087faef284d 100644
--- a/mysql-test/main/selectivity.result
+++ b/mysql-test/main/selectivity.result
@@ -10,6 +10,8 @@ set use_stat_tables='preferably';
set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
set @save_histogram_size=@@histogram_size;
set @save_histogram_type=@@histogram_type;
+set histogram_size=0;
+set histogram_type='single_prec_hb';
set optimizer_use_condition_selectivity=3;
create table t1 (a int);
insert into t1 values
@@ -141,9 +143,9 @@ order by s_suppkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier ALL PRIMARY NULL NULL NULL 10 100.00 Using filesort
1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
-3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
-2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 268 100.00
-4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
+3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 269 100.00 Using where; Using temporary; Using filesort
+2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 269 100.00
+4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 269 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_suppkey` AS `s_suppkey`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`revenue0`.`total_revenue` AS `total_revenue` from `dbt3_s001`.`supplier` join `dbt3_s001`.`revenue0` where `revenue0`.`supplier_no` = `dbt3_s001`.`supplier`.`s_suppkey` and `revenue0`.`total_revenue` = (/* select#2 */ select max(`revenue0`.`total_revenue`) from `dbt3_s001`.`revenue0`) order by `dbt3_s001`.`supplier`.`s_suppkey`
select s_suppkey, s_name, s_address, s_phone, total_revenue
@@ -162,9 +164,9 @@ order by s_suppkey;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY supplier ALL PRIMARY NULL NULL NULL 10 100.00 Using filesort
1 PRIMARY <derived3> ref key0 key0 5 dbt3_s001.supplier.s_suppkey 10 100.00 Using where
-3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
-2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 268 100.00
-4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 268 100.00 Using where; Using temporary; Using filesort
+3 DERIVED lineitem range i_l_shipdate,i_l_suppkey i_l_shipdate 4 NULL 269 100.00 Using where; Using temporary; Using filesort
+2 SUBQUERY <derived4> ALL NULL NULL NULL NULL 269 100.00
+4 DERIVED lineitem range i_l_shipdate i_l_shipdate 4 NULL 269 100.00 Using where; Using temporary; Using filesort
Warnings:
Note 1003 /* select#1 */ select `dbt3_s001`.`supplier`.`s_suppkey` AS `s_suppkey`,`dbt3_s001`.`supplier`.`s_name` AS `s_name`,`dbt3_s001`.`supplier`.`s_address` AS `s_address`,`dbt3_s001`.`supplier`.`s_phone` AS `s_phone`,`revenue0`.`total_revenue` AS `total_revenue` from `dbt3_s001`.`supplier` join `dbt3_s001`.`revenue0` where `revenue0`.`supplier_no` = `dbt3_s001`.`supplier`.`s_suppkey` and `revenue0`.`total_revenue` = (/* select#2 */ select max(`revenue0`.`total_revenue`) from `dbt3_s001`.`revenue0`) order by `dbt3_s001`.`supplier`.`s_suppkey`
select s_suppkey, s_name, s_address, s_phone, total_revenue
@@ -1369,14 +1371,14 @@ test.t2 analyze status Table is already up to date
explain extended
select * from t1 straight_join t2 where t1.a=t2.a and t1.a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 1.96 Using where
1 SIMPLE t2 ref a a 5 test.t1.a 10 100.00
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` straight_join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` < 10
explain extended
select * from t1 straight_join t2 where t1.a=t2.a and t2.a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 1.96 Using where
1 SIMPLE t2 ref a a 5 test.t1.a 10 100.00
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` straight_join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` < 10
diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test
index 3df49456332..eb3f6e2893a 100644
--- a/mysql-test/main/selectivity.test
+++ b/mysql-test/main/selectivity.test
@@ -14,6 +14,8 @@ set use_stat_tables='preferably';
set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
set @save_histogram_size=@@histogram_size;
set @save_histogram_type=@@histogram_type;
+set histogram_size=0;
+set histogram_type='single_prec_hb';
# check that statistics on nulls is used
diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result
index 93917065722..b6fed1bb41d 100644
--- a/mysql-test/main/selectivity_innodb.result
+++ b/mysql-test/main/selectivity_innodb.result
@@ -13,6 +13,8 @@ set use_stat_tables='preferably';
set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
set @save_histogram_size=@@histogram_size;
set @save_histogram_type=@@histogram_type;
+set histogram_size=0;
+set histogram_type='single_prec_hb';
set optimizer_use_condition_selectivity=3;
create table t1 (a int);
insert into t1 values
@@ -1379,14 +1381,14 @@ test.t2 analyze status OK
explain extended
select * from t1 straight_join t2 where t1.a=t2.a and t1.a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 1.96 Using where
1 SIMPLE t2 ref a a 5 test.t1.a 10 100.00
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` straight_join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` < 10
explain extended
select * from t1 straight_join t2 where t1.a=t2.a and t2.a<10;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 0.99 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 1000 1.96 Using where
1 SIMPLE t2 ref a a 5 test.t1.a 10 100.00
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t2`.`a` AS `a`,`test`.`t2`.`b` AS `b` from `test`.`t1` straight_join `test`.`t2` where `test`.`t2`.`a` = `test`.`t1`.`a` and `test`.`t1`.`a` < 10
diff --git a/mysql-test/main/selectivity_no_engine.result b/mysql-test/main/selectivity_no_engine.result
index 7fc3c6e9909..743dcd04695 100644
--- a/mysql-test/main/selectivity_no_engine.result
+++ b/mysql-test/main/selectivity_no_engine.result
@@ -161,7 +161,7 @@ Note 1003 select `test`.`t1`.`key1` AS `key1`,`test`.`t1`.`col1` AS `col1` from
# Must show 100%, not 10%
explain extended select * from t1 where key1=2;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 ref key1 key1 5 const 98 100.00
+1 SIMPLE t1 ref key1 key1 5 const 99 100.00
Warnings:
Note 1003 select `test`.`t1`.`key1` AS `key1`,`test`.`t1`.`col1` AS `col1` from `test`.`t1` where `test`.`t1`.`key1` = 2
drop table t0, t1;
@@ -231,11 +231,12 @@ f2 varchar(1024),
KEY (f1,f2(255))
);
INSERT INTO t2 VALUES ('foo','baz','qux'),('bar','baz','qux');
+INSERT INTO t2 VALUES ('foo','bazz','qux'),('bar','bazz','qux');
set optimizer_use_condition_selectivity=2;
explain
select * from t1,t2 where t1.id = t2.t1_id and t2.f2='qux' and t2.f1='baz';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref f1 f1 325 const,const 1 Using index condition; Using where
+1 SIMPLE t2 ref f1 f1 325 const,const 2 Using index condition; Using where
1 SIMPLE t1 eq_ref PRIMARY PRIMARY 122 test.t2.t1_id 1
select * from t1,t2 where t1.id = t2.t1_id and t2.f2='qux' and t2.f1='baz';
id dt t1_id f1 f2
diff --git a/mysql-test/main/selectivity_no_engine.test b/mysql-test/main/selectivity_no_engine.test
index 2a31c01ed97..270e719e74c 100644
--- a/mysql-test/main/selectivity_no_engine.test
+++ b/mysql-test/main/selectivity_no_engine.test
@@ -181,6 +181,7 @@ CREATE TABLE t2 (
);
INSERT INTO t2 VALUES ('foo','baz','qux'),('bar','baz','qux');
+INSERT INTO t2 VALUES ('foo','bazz','qux'),('bar','bazz','qux');
set optimizer_use_condition_selectivity=2;
explain
diff --git a/mysql-test/main/set_password.result b/mysql-test/main/set_password.result
index 733d9c96187..bb1124e09d4 100644
--- a/mysql-test/main/set_password.result
+++ b/mysql-test/main/set_password.result
@@ -8,12 +8,12 @@ create user oldpass@localhost identified by password '378b243e220ca493';
create user oldpassold@localhost identified with 'mysql_old_password';
set password for oldpassold@localhost = '378b243e220ca493';
select user, host, password, plugin, authentication_string from mysql.user where user != 'root';
-user host password plugin authentication_string
-natauth localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
-newpass localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
+User Host Password plugin authentication_string
+natauth localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29 mysql_native_password *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
+newpass localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29 mysql_native_password *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
newpassnat localhost *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29 mysql_native_password *94BDCEBE19083CE2A1F959FD02F964C7AF4CFC29
-oldauth localhost 378b243e220ca493
-oldpass localhost 378b243e220ca493
+oldauth localhost 378b243e220ca493 mysql_old_password 378b243e220ca493
+oldpass localhost 378b243e220ca493 mysql_old_password 378b243e220ca493
oldpassold localhost 378b243e220ca493 mysql_old_password 378b243e220ca493
connect con,localhost,natauth,test,;
select current_user();
@@ -85,7 +85,7 @@ set password for oldauth@localhost = PASSWORD('test2');
set password for oldpass@localhost = PASSWORD('test2');
set password for oldpassold@localhost = PASSWORD('test2');
select user, host, password, plugin, authentication_string from mysql.user where user != 'root';
-user host password plugin authentication_string
+User Host Password plugin authentication_string
natauth localhost *7CEB3FDE5F7A9C4CE5FBE610D7D8EDA62EBE5F4E mysql_native_password *7CEB3FDE5F7A9C4CE5FBE610D7D8EDA62EBE5F4E
newpass localhost *7CEB3FDE5F7A9C4CE5FBE610D7D8EDA62EBE5F4E mysql_native_password *7CEB3FDE5F7A9C4CE5FBE610D7D8EDA62EBE5F4E
newpassnat localhost *7CEB3FDE5F7A9C4CE5FBE610D7D8EDA62EBE5F4E mysql_native_password *7CEB3FDE5F7A9C4CE5FBE610D7D8EDA62EBE5F4E
@@ -173,7 +173,7 @@ disconnect foo;
connection default;
select user,host,password,plugin,authentication_string from mysql.user where user='foo';
user host password plugin authentication_string
-foo localhost *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB mysql_native_password *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB
+foo localhost mysql_native_password *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB
set password for 'foo'@'localhost' = '';
select user,host,password,plugin,authentication_string from mysql.user where user='foo';
user host password plugin authentication_string
diff --git a/mysql-test/main/set_password.test b/mysql-test/main/set_password.test
index fc1ecb5ef5c..c67dc22dc81 100644
--- a/mysql-test/main/set_password.test
+++ b/mysql-test/main/set_password.test
@@ -132,6 +132,7 @@ set global secure_auth=default;
#
# MDEV-16238 root/localhost authn prioritizes authentication_string over Password
#
+--source include/switch_to_mysql_user.inc
create user foo@localhost identified with mysql_native_password;
update mysql.user set authentication_string=password('foo'), plugin='mysql_native_password' where user='foo' and host='localhost';
set password for 'foo'@'localhost' = password('bar');
@@ -145,3 +146,4 @@ select user,host,password,plugin,authentication_string from mysql.user where use
set password for 'foo'@'localhost' = '';
select user,host,password,plugin,authentication_string from mysql.user where user='foo';
drop user foo@localhost;
+--source include/switch_to_mysql_global_priv.inc
diff --git a/mysql-test/main/set_statement.result b/mysql-test/main/set_statement.result
index c34e1171899..f3dc0b35ea6 100644
--- a/mysql-test/main/set_statement.result
+++ b/mysql-test/main/set_statement.result
@@ -1225,3 +1225,20 @@ set @rnd=1;
select @rnd;
@rnd
0
+create table t (a int);
+SET sql_mode=ORACLE;
+SET STATEMENT myisam_sort_buffer_size=800000 FOR OPTIMIZE TABLE t;
+Table Op Msg_type Msg_text
+test.t optimize status Table is already up to date
+SET sql_mode=default;
+SET STATEMENT myisam_sort_buffer_size=800000 FOR OPTIMIZE TABLE t;
+Table Op Msg_type Msg_text
+test.t optimize status Table is already up to date
+drop table t;
+#
+# MDEV-18358: Server crash when using SET STATEMENT max_statement_time
+#
+SET sql_mode=ORACLE;
+SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
+SET sql_mode=default;
+SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
diff --git a/mysql-test/main/set_statement.test b/mysql-test/main/set_statement.test
index cc361553cfd..0687644abc6 100644
--- a/mysql-test/main/set_statement.test
+++ b/mysql-test/main/set_statement.test
@@ -1130,3 +1130,19 @@ while ($1)
--enable_query_log
--echo # @rnd should be 0
select @rnd;
+
+create table t (a int);
+SET sql_mode=ORACLE;
+SET STATEMENT myisam_sort_buffer_size=800000 FOR OPTIMIZE TABLE t;
+SET sql_mode=default;
+SET STATEMENT myisam_sort_buffer_size=800000 FOR OPTIMIZE TABLE t;
+drop table t;
+
+
+--echo #
+--echo # MDEV-18358: Server crash when using SET STATEMENT max_statement_time
+--echo #
+SET sql_mode=ORACLE;
+SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
+SET sql_mode=default;
+SET STATEMENT max_statement_time=30 FOR DELETE FROM mysql.user where user = 'unknown';
diff --git a/mysql-test/main/shm-master.opt b/mysql-test/main/shm-master.opt
deleted file mode 100644
index d71395213b1..00000000000
--- a/mysql-test/main/shm-master.opt
+++ /dev/null
@@ -1 +0,0 @@
---skip-grant-tables --loose-shared-memory-base-name=HeyMrBaseNameXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX$MTR_BUILD_THREAD --loose-shared-memory=1
diff --git a/mysql-test/main/shm.result b/mysql-test/main/shm.result
deleted file mode 100644
index 65187b6b19b..00000000000
--- a/mysql-test/main/shm.result
+++ /dev/null
@@ -1,2170 +0,0 @@
-connect shm_con,localhost,root,,,,$shm_name,SHM;
-drop table if exists t1,t2,t3,t4;
-CREATE TABLE t1 (
-Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
-Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
-);
-INSERT INTO t1 VALUES (9410,9412);
-select period from t1;
-period
-9410
-select * from t1;
-Period Varor_period
-9410 9412
-select t1.* from t1;
-Period Varor_period
-9410 9412
-CREATE TABLE t2 (
-auto int not null auto_increment,
-fld1 int(6) unsigned zerofill DEFAULT '000000' NOT NULL,
-companynr tinyint(2) unsigned zerofill DEFAULT '00' NOT NULL,
-fld3 char(30) DEFAULT '' NOT NULL,
-fld4 char(35) DEFAULT '' NOT NULL,
-fld5 char(35) DEFAULT '' NOT NULL,
-fld6 char(4) DEFAULT '' NOT NULL,
-UNIQUE fld1 (fld1),
-KEY fld3 (fld3),
-PRIMARY KEY (auto)
-);
-select t2.fld3 from t2 where companynr = 58 and fld3 like "%imaginable%";
-fld3
-imaginable
-select fld3 from t2 where fld3 like "%cultivation" ;
-fld3
-cultivation
-select t2.fld3,companynr from t2 where companynr = 57+1 order by fld3;
-fld3 companynr
-concoct 58
-druggists 58
-engrossing 58
-Eurydice 58
-exclaimers 58
-ferociousness 58
-hopelessness 58
-Huey 58
-imaginable 58
-judges 58
-merging 58
-ostrich 58
-peering 58
-Phelps 58
-presumes 58
-Ruth 58
-sentences 58
-Shylock 58
-straggled 58
-synergy 58
-thanking 58
-tying 58
-unlocks 58
-select fld3,companynr from t2 where companynr = 58 order by fld3;
-fld3 companynr
-concoct 58
-druggists 58
-engrossing 58
-Eurydice 58
-exclaimers 58
-ferociousness 58
-hopelessness 58
-Huey 58
-imaginable 58
-judges 58
-merging 58
-ostrich 58
-peering 58
-Phelps 58
-presumes 58
-Ruth 58
-sentences 58
-Shylock 58
-straggled 58
-synergy 58
-thanking 58
-tying 58
-unlocks 58
-select fld3 from t2 order by fld3 desc limit 10;
-fld3
-youthfulness
-yelped
-Wotan
-workers
-Witt
-witchcraft
-Winsett
-Willy
-willed
-wildcats
-select fld3 from t2 order by fld3 desc limit 5;
-fld3
-youthfulness
-yelped
-Wotan
-workers
-Witt
-select fld3 from t2 order by fld3 desc limit 5,5;
-fld3
-witchcraft
-Winsett
-Willy
-willed
-wildcats
-select t2.fld3 from t2 where fld3 = 'honeysuckle';
-fld3
-honeysuckle
-select t2.fld3 from t2 where fld3 LIKE 'honeysuckl_';
-fld3
-honeysuckle
-select t2.fld3 from t2 where fld3 LIKE 'hon_ysuckl_';
-fld3
-honeysuckle
-select t2.fld3 from t2 where fld3 LIKE 'honeysuckle%';
-fld3
-honeysuckle
-select t2.fld3 from t2 where fld3 LIKE 'h%le';
-fld3
-honeysuckle
-select t2.fld3 from t2 where fld3 LIKE 'honeysuckle_';
-fld3
-select t2.fld3 from t2 where fld3 LIKE 'don_t_find_me_please%';
-fld3
-explain select t2.fld3 from t2 where fld3 = 'honeysuckle';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref fld3 fld3 30 const 1 Using where; Using index
-explain select fld3 from t2 ignore index (fld3) where fld3 = 'honeysuckle';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select fld3 from t2 use index (fld1) where fld3 = 'honeysuckle';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select fld3 from t2 use index (fld3) where fld3 = 'honeysuckle';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref fld3 fld3 30 const 1 Using where; Using index
-explain select fld3 from t2 use index (fld1,fld3) where fld3 = 'honeysuckle';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ref fld3 fld3 30 const 1 Using where; Using index
-explain select fld3 from t2 ignore index (fld3,not_used);
-ERROR 42000: Key 'not_used' doesn't exist in table 't2'
-explain select fld3 from t2 use index (not_used);
-ERROR 42000: Key 'not_used' doesn't exist in table 't2'
-select t2.fld3 from t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3;
-fld3
-honeysuckle
-honoring
-explain select t2.fld3 from t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range fld3 fld3 30 NULL 2 Using where; Using index
-select fld1,fld3 from t2 where fld3="Colombo" or fld3 = "nondecreasing" order by fld3;
-fld1 fld3
-148504 Colombo
-068305 Colombo
-000000 nondecreasing
-select fld1,fld3 from t2 where companynr = 37 and fld3 = 'appendixes';
-fld1 fld3
-232605 appendixes
-1232605 appendixes
-1232606 appendixes
-1232607 appendixes
-1232608 appendixes
-1232609 appendixes
-select fld1 from t2 where fld1=250501 or fld1="250502";
-fld1
-250501
-250502
-explain select fld1 from t2 where fld1=250501 or fld1="250502";
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range fld1 fld1 4 NULL 2 Using where; Using index
-select fld1 from t2 where fld1=250501 or fld1=250502 or fld1 >= 250505 and fld1 <= 250601 or fld1 between 250501 and 250502;
-fld1
-250501
-250502
-250505
-250601
-explain select fld1 from t2 where fld1=250501 or fld1=250502 or fld1 >= 250505 and fld1 <= 250601 or fld1 between 250501 and 250502;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range fld1 fld1 4 NULL 4 Using where; Using index
-select fld1,fld3 from t2 where companynr = 37 and fld3 like 'f%';
-fld1 fld3
-012001 flanking
-013602 foldout
-013606 fingerings
-018007 fanatic
-018017 featherweight
-018054 fetters
-018103 flint
-018104 flopping
-036002 funereal
-038017 fetched
-038205 firearm
-058004 Fenton
-088303 feminine
-186002 freakish
-188007 flurried
-188505 fitting
-198006 furthermore
-202301 Fitzpatrick
-208101 fiftieth
-208113 freest
-218008 finishers
-218022 feed
-218401 faithful
-226205 foothill
-226209 furnishings
-228306 forthcoming
-228311 fated
-231315 freezes
-232102 forgivably
-238007 filial
-238008 fixedly
-select fld3 from t2 where fld3 like "L%" and fld3 = "ok";
-fld3
-select fld3 from t2 where (fld3 like "C%" and fld3 = "Chantilly");
-fld3
-Chantilly
-select fld1,fld3 from t2 where fld1 like "25050%";
-fld1 fld3
-250501 poisoning
-250502 Iraqis
-250503 heaving
-250504 population
-250505 bomb
-select fld1,fld3 from t2 where fld1 like "25050_";
-fld1 fld3
-250501 poisoning
-250502 Iraqis
-250503 heaving
-250504 population
-250505 bomb
-select distinct companynr from t2;
-companynr
-00
-37
-36
-50
-58
-29
-40
-53
-65
-41
-34
-68
-select distinct companynr from t2 order by companynr;
-companynr
-00
-29
-34
-36
-37
-40
-41
-50
-53
-58
-65
-68
-select distinct companynr from t2 order by companynr desc;
-companynr
-68
-65
-58
-53
-50
-41
-40
-37
-36
-34
-29
-00
-select distinct t2.fld3,period from t2,t1 where companynr=37 and fld3 like "O%";
-fld3 period
-obliterates 9410
-offload 9410
-opaquely 9410
-organizer 9410
-overestimating 9410
-overlay 9410
-select distinct fld3 from t2 where companynr = 34 order by fld3;
-fld3
-absentee
-accessed
-ahead
-alphabetic
-Asiaticizations
-attitude
-aye
-bankruptcies
-belays
-Blythe
-bomb
-boulevard
-bulldozes
-cannot
-caressing
-charcoal
-checksumming
-chess
-clubroom
-colorful
-cosy
-creator
-crying
-Darius
-diffusing
-duality
-Eiffel
-Epiphany
-Ernestine
-explorers
-exterminated
-famine
-forked
-Gershwins
-heaving
-Hodges
-Iraqis
-Italianization
-Lagos
-landslide
-libretto
-Majorca
-mastering
-narrowed
-occurred
-offerers
-Palestine
-Peruvianizes
-pharmaceutic
-poisoning
-population
-Pygmalion
-rats
-realest
-recording
-regimented
-retransmitting
-reviver
-rouses
-scars
-sicker
-sleepwalk
-stopped
-sugars
-translatable
-uncles
-unexpected
-uprisings
-versatility
-vest
-select distinct fld3 from t2 limit 10;
-fld3
-abates
-abiding
-Abraham
-abrogating
-absentee
-abut
-accessed
-accruing
-accumulating
-accuracies
-select distinct fld3 from t2 having fld3 like "A%" limit 10;
-fld3
-abates
-abiding
-Abraham
-abrogating
-absentee
-abut
-accessed
-accruing
-accumulating
-accuracies
-select distinct substring(fld3,1,3) from t2 where fld3 like "A%";
-substring(fld3,1,3)
-aba
-abi
-Abr
-abs
-abu
-acc
-acq
-acu
-Ade
-adj
-Adl
-adm
-Ado
-ads
-adv
-aer
-aff
-afi
-afl
-afo
-agi
-ahe
-aim
-air
-Ald
-alg
-ali
-all
-alp
-alr
-ama
-ame
-amm
-ana
-and
-ane
-Ang
-ani
-Ann
-Ant
-api
-app
-aqu
-Ara
-arc
-Arm
-arr
-Art
-Asi
-ask
-asp
-ass
-ast
-att
-aud
-Aug
-aut
-ave
-avo
-awe
-aye
-Azt
-select distinct substring(fld3,1,3) as a from t2 having a like "A%" order by a limit 10;
-a
-aba
-abi
-Abr
-abs
-abu
-acc
-acq
-acu
-Ade
-adj
-select distinct substring(fld3,1,3) from t2 where fld3 like "A%" limit 10;
-substring(fld3,1,3)
-aba
-abi
-Abr
-abs
-abu
-acc
-acq
-acu
-Ade
-adj
-select distinct substring(fld3,1,3) as a from t2 having a like "A%" limit 10;
-a
-aba
-abi
-Abr
-abs
-abu
-acc
-acq
-acu
-Ade
-adj
-create table t3 (
-period int not null,
-name char(32) not null,
-companynr int not null,
-price double(11,0),
-price2 double(11,0),
-key (period),
-key (name)
-);
-create temporary table tmp engine = myisam select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-insert into tmp select * from t3;
-insert into t3 select * from tmp;
-alter table t3 add t2nr int not null auto_increment primary key first;
-drop table tmp;
-SET BIG_TABLES=1;
-select distinct concat(fld3," ",fld3) as namn from t2,t3 where t2.fld1=t3.t2nr order by namn limit 10;
-namn
-Abraham Abraham
-abrogating abrogating
-admonishing admonishing
-Adolph Adolph
-afield afield
-aging aging
-ammonium ammonium
-analyzable analyzable
-animals animals
-animized animized
-SET BIG_TABLES=0;
-select distinct concat(fld3," ",fld3) from t2,t3 where t2.fld1=t3.t2nr order by fld3 limit 10;
-concat(fld3," ",fld3)
-Abraham Abraham
-abrogating abrogating
-admonishing admonishing
-Adolph Adolph
-afield afield
-aging aging
-ammonium ammonium
-analyzable analyzable
-animals animals
-animized animized
-select distinct fld5 from t2 limit 10;
-fld5
-neat
-Steinberg
-jarring
-tinily
-balled
-persist
-attainments
-fanatic
-measures
-rightfulness
-select distinct fld3,count(*) from t2 group by companynr,fld3 limit 10;
-fld3 count(*)
-affixed 1
-and 1
-annoyers 1
-Anthony 1
-assayed 1
-assurers 1
-attendants 1
-bedlam 1
-bedpost 1
-boasted 1
-SET BIG_TABLES=1;
-select distinct fld3,count(*) from t2 group by companynr,fld3 limit 10;
-fld3 count(*)
-affixed 1
-and 1
-annoyers 1
-Anthony 1
-assayed 1
-assurers 1
-attendants 1
-bedlam 1
-bedpost 1
-boasted 1
-SET BIG_TABLES=0;
-select distinct fld3,repeat("a",length(fld3)),count(*) from t2 group by companynr,fld3 limit 100,10;
-fld3 repeat("a",length(fld3)) count(*)
-circus aaaaaa 1
-cited aaaaa 1
-Colombo aaaaaaa 1
-congresswoman aaaaaaaaaaaaa 1
-contrition aaaaaaaaaa 1
-corny aaaaa 1
-cultivation aaaaaaaaaaa 1
-definiteness aaaaaaaaaaaa 1
-demultiplex aaaaaaaaaaa 1
-disappointing aaaaaaaaaaaaa 1
-select distinct companynr,rtrim(space(512+companynr)) from t3 order by 1,2;
-companynr rtrim(space(512+companynr))
-37
-78
-101
-154
-311
-447
-512
-select distinct fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2nr order by fld3;
-fld3
-explain select t3.t2nr,fld3 from t2,t3 where t2.companynr = 34 and t2.fld1=t3.t2nr order by t3.t2nr,fld3;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL fld1 NULL NULL NULL 1199 Using where; Using temporary; Using filesort
-1 SIMPLE t3 eq_ref PRIMARY PRIMARY 4 test.t2.fld1 1 Using where; Using index
-explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL period NULL NULL NULL 41810 Using filesort
-1 SIMPLE t3 ref period period 4 test.t1.period 4181
-explain select * from t3 as t1,t3 where t1.period=t3.period order by t3.period limit 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 index period period 4 NULL 1
-1 SIMPLE t1 ref period period 4 test.t3.period 4181
-explain select * from t3 as t1,t3 where t1.period=t3.period order by t1.period limit 10;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index period period 4 NULL 1
-1 SIMPLE t3 ref period period 4 test.t1.period 4181
-select period from t1;
-period
-9410
-select period from t1 where period=1900;
-period
-select fld3,period from t1,t2 where fld1 = 011401 order by period;
-fld3 period
-breaking 9410
-select fld3,period from t2,t3 where t2.fld1 = 011401 and t2.fld1=t3.t2nr and t3.period=1001;
-fld3 period
-breaking 1001
-explain select fld3,period from t2,t3 where t2.fld1 = 011401 and t3.t2nr=t2.fld1 and 1001 = t3.period;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 const fld1 fld1 4 const 1
-1 SIMPLE t3 const PRIMARY,period PRIMARY 4 const 1
-select fld3,period from t2,t1 where companynr*10 = 37*10;
-fld3 period
-breaking 9410
-Romans 9410
-intercepted 9410
-bewilderingly 9410
-astound 9410
-admonishing 9410
-sumac 9410
-flanking 9410
-combed 9410
-subjective 9410
-scatterbrain 9410
-Eulerian 9410
-Kane 9410
-overlay 9410
-perturb 9410
-goblins 9410
-annihilates 9410
-Wotan 9410
-snatching 9410
-concludes 9410
-laterally 9410
-yelped 9410
-grazing 9410
-Baird 9410
-celery 9410
-misunderstander 9410
-handgun 9410
-foldout 9410
-mystic 9410
-succumbed 9410
-Nabisco 9410
-fingerings 9410
-aging 9410
-afield 9410
-ammonium 9410
-boat 9410
-intelligibility 9410
-Augustine 9410
-teethe 9410
-dreaded 9410
-scholastics 9410
-audiology 9410
-wallet 9410
-parters 9410
-eschew 9410
-quitter 9410
-neat 9410
-Steinberg 9410
-jarring 9410
-tinily 9410
-balled 9410
-persist 9410
-attainments 9410
-fanatic 9410
-measures 9410
-rightfulness 9410
-capably 9410
-impulsive 9410
-starlet 9410
-terminators 9410
-untying 9410
-announces 9410
-featherweight 9410
-pessimist 9410
-daughter 9410
-decliner 9410
-lawgiver 9410
-stated 9410
-readable 9410
-attrition 9410
-cascade 9410
-motors 9410
-interrogate 9410
-pests 9410
-stairway 9410
-dopers 9410
-testicle 9410
-Parsifal 9410
-leavings 9410
-postulation 9410
-squeaking 9410
-contrasted 9410
-leftover 9410
-whiteners 9410
-erases 9410
-Punjab 9410
-Merritt 9410
-Quixotism 9410
-sweetish 9410
-dogging 9410
-scornfully 9410
-bellow 9410
-bills 9410
-cupboard 9410
-sureties 9410
-puddings 9410
-fetters 9410
-bivalves 9410
-incurring 9410
-Adolph 9410
-pithed 9410
-Miles 9410
-trimmings 9410
-tragedies 9410
-skulking 9410
-flint 9410
-flopping 9410
-relaxing 9410
-offload 9410
-suites 9410
-lists 9410
-animized 9410
-multilayer 9410
-standardizes 9410
-Judas 9410
-vacuuming 9410
-dentally 9410
-humanness 9410
-inch 9410
-Weissmuller 9410
-irresponsibly 9410
-luckily 9410
-culled 9410
-medical 9410
-bloodbath 9410
-subschema 9410
-animals 9410
-Micronesia 9410
-repetitions 9410
-Antares 9410
-ventilate 9410
-pityingly 9410
-interdependent 9410
-Graves 9410
-neonatal 9410
-chafe 9410
-honoring 9410
-realtor 9410
-elite 9410
-funereal 9410
-abrogating 9410
-sorters 9410
-Conley 9410
-lectured 9410
-Abraham 9410
-Hawaii 9410
-cage 9410
-hushes 9410
-Simla 9410
-reporters 9410
-Dutchman 9410
-descendants 9410
-groupings 9410
-dissociate 9410
-coexist 9410
-Beebe 9410
-Taoism 9410
-Connally 9410
-fetched 9410
-checkpoints 9410
-rusting 9410
-galling 9410
-obliterates 9410
-traitor 9410
-resumes 9410
-analyzable 9410
-terminator 9410
-gritty 9410
-firearm 9410
-minima 9410
-Selfridge 9410
-disable 9410
-witchcraft 9410
-betroth 9410
-Manhattanize 9410
-imprint 9410
-peeked 9410
-swelling 9410
-interrelationships 9410
-riser 9410
-Gandhian 9410
-peacock 9410
-bee 9410
-kanji 9410
-dental 9410
-scarf 9410
-chasm 9410
-insolence 9410
-syndicate 9410
-alike 9410
-imperial 9410
-convulsion 9410
-railway 9410
-validate 9410
-normalizes 9410
-comprehensive 9410
-chewing 9410
-denizen 9410
-schemer 9410
-chronicle 9410
-Kline 9410
-Anatole 9410
-partridges 9410
-brunch 9410
-recruited 9410
-dimensions 9410
-Chicana 9410
-announced 9410
-praised 9410
-employing 9410
-linear 9410
-quagmire 9410
-western 9410
-relishing 9410
-serving 9410
-scheduling 9410
-lore 9410
-eventful 9410
-arteriole 9410
-disentangle 9410
-cured 9410
-Fenton 9410
-avoidable 9410
-drains 9410
-detectably 9410
-husky 9410
-impelling 9410
-undoes 9410
-evened 9410
-squeezes 9410
-destroyer 9410
-rudeness 9410
-beaner 9410
-boorish 9410
-Everhart 9410
-encompass 9410
-mushrooms 9410
-Alison 9410
-externally 9410
-pellagra 9410
-cult 9410
-creek 9410
-Huffman 9410
-Majorca 9410
-governing 9410
-gadfly 9410
-reassigned 9410
-intentness 9410
-craziness 9410
-psychic 9410
-squabbled 9410
-burlesque 9410
-capped 9410
-extracted 9410
-DiMaggio 9410
-exclamation 9410
-subdirectory 9410
-Gothicism 9410
-feminine 9410
-metaphysically 9410
-sanding 9410
-Miltonism 9410
-freakish 9410
-index 9410
-straight 9410
-flurried 9410
-denotative 9410
-coming 9410
-commencements 9410
-gentleman 9410
-gifted 9410
-Shanghais 9410
-sportswriting 9410
-sloping 9410
-navies 9410
-leaflet 9410
-shooter 9410
-Joplin 9410
-babies 9410
-assails 9410
-admiring 9410
-swaying 9410
-Goldstine 9410
-fitting 9410
-Norwalk 9410
-analogy 9410
-deludes 9410
-cokes 9410
-Clayton 9410
-exhausts 9410
-causality 9410
-sating 9410
-icon 9410
-throttles 9410
-communicants 9410
-dehydrate 9410
-priceless 9410
-publicly 9410
-incidentals 9410
-commonplace 9410
-mumbles 9410
-furthermore 9410
-cautioned 9410
-parametrized 9410
-registration 9410
-sadly 9410
-positioning 9410
-babysitting 9410
-eternal 9410
-hoarder 9410
-congregates 9410
-rains 9410
-workers 9410
-sags 9410
-unplug 9410
-garage 9410
-boulder 9410
-specifics 9410
-Teresa 9410
-Winsett 9410
-convenient 9410
-buckboards 9410
-amenities 9410
-resplendent 9410
-sews 9410
-participated 9410
-Simon 9410
-certificates 9410
-Fitzpatrick 9410
-Evanston 9410
-misted 9410
-textures 9410
-save 9410
-count 9410
-rightful 9410
-chaperone 9410
-Lizzy 9410
-clenched 9410
-effortlessly 9410
-accessed 9410
-beaters 9410
-Hornblower 9410
-vests 9410
-indulgences 9410
-infallibly 9410
-unwilling 9410
-excrete 9410
-spools 9410
-crunches 9410
-overestimating 9410
-ineffective 9410
-humiliation 9410
-sophomore 9410
-star 9410
-rifles 9410
-dialysis 9410
-arriving 9410
-indulge 9410
-clockers 9410
-languages 9410
-Antarctica 9410
-percentage 9410
-ceiling 9410
-specification 9410
-regimented 9410
-ciphers 9410
-pictures 9410
-serpents 9410
-allot 9410
-realized 9410
-mayoral 9410
-opaquely 9410
-hostess 9410
-fiftieth 9410
-incorrectly 9410
-decomposition 9410
-stranglings 9410
-mixture 9410
-electroencephalography 9410
-similarities 9410
-charges 9410
-freest 9410
-Greenberg 9410
-tinting 9410
-expelled 9410
-warm 9410
-smoothed 9410
-deductions 9410
-Romano 9410
-bitterroot 9410
-corset 9410
-securing 9410
-environing 9410
-cute 9410
-Crays 9410
-heiress 9410
-inform 9410
-avenge 9410
-universals 9410
-Kinsey 9410
-ravines 9410
-bestseller 9410
-equilibrium 9410
-extents 9410
-relatively 9410
-pressure 9410
-critiques 9410
-befouled 9410
-rightfully 9410
-mechanizing 9410
-Latinizes 9410
-timesharing 9410
-Aden 9410
-embassies 9410
-males 9410
-shapelessly 9410
-mastering 9410
-Newtonian 9410
-finishers 9410
-abates 9410
-teem 9410
-kiting 9410
-stodgy 9410
-feed 9410
-guitars 9410
-airships 9410
-store 9410
-denounces 9410
-Pyle 9410
-Saxony 9410
-serializations 9410
-Peruvian 9410
-taxonomically 9410
-kingdom 9410
-stint 9410
-Sault 9410
-faithful 9410
-Ganymede 9410
-tidiness 9410
-gainful 9410
-contrary 9410
-Tipperary 9410
-tropics 9410
-theorizers 9410
-renew 9410
-already 9410
-terminal 9410
-Hegelian 9410
-hypothesizer 9410
-warningly 9410
-journalizing 9410
-nested 9410
-Lars 9410
-saplings 9410
-foothill 9410
-labeled 9410
-imperiously 9410
-reporters 9410
-furnishings 9410
-precipitable 9410
-discounts 9410
-excises 9410
-Stalin 9410
-despot 9410
-ripeness 9410
-Arabia 9410
-unruly 9410
-mournfulness 9410
-boom 9410
-slaughter 9410
-Sabine 9410
-handy 9410
-rural 9410
-organizer 9410
-shipyard 9410
-civics 9410
-inaccuracy 9410
-rules 9410
-juveniles 9410
-comprised 9410
-investigations 9410
-stabilizes 9410
-seminaries 9410
-Hunter 9410
-sporty 9410
-test 9410
-weasels 9410
-CERN 9410
-tempering 9410
-afore 9410
-Galatean 9410
-techniques 9410
-error 9410
-veranda 9410
-severely 9410
-Cassites 9410
-forthcoming 9410
-guides 9410
-vanish 9410
-lied 9410
-sawtooth 9410
-fated 9410
-gradually 9410
-widens 9410
-preclude 9410
-evenhandedly 9410
-percentage 9410
-disobedience 9410
-humility 9410
-gleaning 9410
-petted 9410
-bloater 9410
-minion 9410
-marginal 9410
-apiary 9410
-measures 9410
-precaution 9410
-repelled 9410
-primary 9410
-coverings 9410
-Artemia 9410
-navigate 9410
-spatial 9410
-Gurkha 9410
-meanwhile 9410
-Melinda 9410
-Butterfield 9410
-Aldrich 9410
-previewing 9410
-glut 9410
-unaffected 9410
-inmate 9410
-mineral 9410
-impending 9410
-meditation 9410
-ideas 9410
-miniaturizes 9410
-lewdly 9410
-title 9410
-youthfulness 9410
-creak 9410
-Chippewa 9410
-clamored 9410
-freezes 9410
-forgivably 9410
-reduce 9410
-McGovern 9410
-Nazis 9410
-epistle 9410
-socializes 9410
-conceptions 9410
-Kevin 9410
-uncovering 9410
-chews 9410
-appendixes 9410
-appendixes 9410
-appendixes 9410
-appendixes 9410
-appendixes 9410
-appendixes 9410
-raining 9410
-infest 9410
-compartment 9410
-minting 9410
-ducks 9410
-roped 9410
-waltz 9410
-Lillian 9410
-repressions 9410
-chillingly 9410
-noncritical 9410
-lithograph 9410
-spongers 9410
-parenthood 9410
-posed 9410
-instruments 9410
-filial 9410
-fixedly 9410
-relives 9410
-Pandora 9410
-watering 9410
-ungrateful 9410
-secures 9410
-poison 9410
-dusted 9410
-encompasses 9410
-presentation 9410
-Kantian 9410
-select fld3,period,price,price2 from t2,t3 where t2.fld1=t3.t2nr and period >= 1001 and period <= 1002 and t2.companynr = 37 order by fld3,period, price;
-fld3 period price price2
-admonishing 1002 28357832 8723648
-analyzable 1002 28357832 8723648
-annihilates 1001 5987435 234724
-Antares 1002 28357832 8723648
-astound 1001 5987435 234724
-audiology 1001 5987435 234724
-Augustine 1002 28357832 8723648
-Baird 1002 28357832 8723648
-bewilderingly 1001 5987435 234724
-breaking 1001 5987435 234724
-Conley 1001 5987435 234724
-dentally 1002 28357832 8723648
-dissociate 1002 28357832 8723648
-elite 1001 5987435 234724
-eschew 1001 5987435 234724
-Eulerian 1001 5987435 234724
-flanking 1001 5987435 234724
-foldout 1002 28357832 8723648
-funereal 1002 28357832 8723648
-galling 1002 28357832 8723648
-Graves 1001 5987435 234724
-grazing 1001 5987435 234724
-groupings 1001 5987435 234724
-handgun 1001 5987435 234724
-humility 1002 28357832 8723648
-impulsive 1002 28357832 8723648
-inch 1001 5987435 234724
-intelligibility 1001 5987435 234724
-jarring 1001 5987435 234724
-lawgiver 1001 5987435 234724
-lectured 1002 28357832 8723648
-Merritt 1002 28357832 8723648
-neonatal 1001 5987435 234724
-offload 1002 28357832 8723648
-parters 1002 28357832 8723648
-pityingly 1002 28357832 8723648
-puddings 1002 28357832 8723648
-Punjab 1001 5987435 234724
-quitter 1002 28357832 8723648
-realtor 1001 5987435 234724
-relaxing 1001 5987435 234724
-repetitions 1001 5987435 234724
-resumes 1001 5987435 234724
-Romans 1002 28357832 8723648
-rusting 1001 5987435 234724
-scholastics 1001 5987435 234724
-skulking 1002 28357832 8723648
-stated 1002 28357832 8723648
-suites 1002 28357832 8723648
-sureties 1001 5987435 234724
-testicle 1002 28357832 8723648
-tinily 1002 28357832 8723648
-tragedies 1001 5987435 234724
-trimmings 1001 5987435 234724
-vacuuming 1001 5987435 234724
-ventilate 1001 5987435 234724
-wallet 1001 5987435 234724
-Weissmuller 1002 28357832 8723648
-Wotan 1002 28357832 8723648
-select t2.fld1,fld3,period,price,price2 from t2,t3 where t2.fld1>= 18201 and t2.fld1 <= 18811 and t2.fld1=t3.t2nr and period = 1001 and t2.companynr = 37;
-fld1 fld3 period price price2
-018201 relaxing 1001 5987435 234724
-018601 vacuuming 1001 5987435 234724
-018801 inch 1001 5987435 234724
-018811 repetitions 1001 5987435 234724
-create table t4 (
-companynr tinyint(2) unsigned zerofill NOT NULL default '00',
-companyname char(30) NOT NULL default '',
-PRIMARY KEY (companynr),
-UNIQUE KEY companyname(companyname)
-) ENGINE=MyISAM MAX_ROWS=50 PACK_KEYS=1 COMMENT='companynames';
-select STRAIGHT_JOIN t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
-companynr companyname
-00 Unknown
-29 company 1
-34 company 2
-36 company 3
-37 company 4
-40 company 5
-41 company 6
-50 company 11
-53 company 7
-58 company 8
-65 company 9
-68 company 10
-select SQL_SMALL_RESULT t2.companynr,companyname from t4,t2 where t2.companynr=t4.companynr group by t2.companynr;
-companynr companyname
-00 Unknown
-29 company 1
-34 company 2
-36 company 3
-37 company 4
-40 company 5
-41 company 6
-50 company 11
-53 company 7
-58 company 8
-65 company 9
-68 company 10
-select * from t1,t1 t12;
-Period Varor_period Period Varor_period
-9410 9412 9410 9412
-select t2.fld1,t22.fld1 from t2,t2 t22 where t2.fld1 >= 250501 and t2.fld1 <= 250505 and t22.fld1 >= 250501 and t22.fld1 <= 250505;
-fld1 fld1
-250501 250501
-250502 250501
-250503 250501
-250504 250501
-250505 250501
-250501 250502
-250502 250502
-250503 250502
-250504 250502
-250505 250502
-250501 250503
-250502 250503
-250503 250503
-250504 250503
-250505 250503
-250501 250504
-250502 250504
-250503 250504
-250504 250504
-250505 250504
-250501 250505
-250502 250505
-250503 250505
-250504 250505
-250505 250505
-SET @save_optimizer_switch=@@optimizer_switch;
-SET optimizer_switch='outer_join_with_cache=off';
-insert into t2 (fld1, companynr) values (999999,99);
-select t2.companynr,companyname from t2 left join t4 using (companynr) where t4.companynr is null;
-companynr companyname
-99 NULL
-select count(*) from t2 left join t4 using (companynr) where t4.companynr is not null;
-count(*)
-1199
-explain select t2.companynr,companyname from t2 left join t4 using (companynr) where t4.companynr is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1200
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1 Using where; Not exists
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1200 Using where; Not exists
-select companynr,companyname from t2 left join t4 using (companynr) where companynr is null;
-companynr companyname
-select count(*) from t2 left join t4 using (companynr) where companynr is not null;
-count(*)
-1200
-explain select companynr,companyname from t2 left join t4 using (companynr) where companynr is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-explain select companynr,companyname from t4 left join t2 using (companynr) where companynr is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-delete from t2 where fld1=999999;
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 and t4.companynr > 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-1 SIMPLE t4 eq_ref PRIMARY PRIMARY 1 test.t2.companynr 1
-explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 and companynr > 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where t2.companynr > 0 or t2.companynr < 0 or t4.companynr > 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select t2.companynr,companyname from t4 left join t2 using (companynr) where ifnull(t2.companynr,1)>0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select companynr,companyname from t4 left join t2 using (companynr) where companynr > 0 or companynr < 0 or companynr > 0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL PRIMARY NULL NULL NULL 12 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-explain select companynr,companyname from t4 left join t2 using (companynr) where ifnull(companynr,1)>0;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 ALL NULL NULL NULL NULL 12 Using where
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where
-SET optimizer_switch=@save_optimizer_switch;
-select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
-companynr companynr
-37 36
-41 40
-explain select distinct t2.companynr,t4.companynr from t2,t4 where t2.companynr=t4.companynr+1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t4 index NULL PRIMARY 1 NULL 12 Using index; Using temporary
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 Using where; Using join buffer (flat, BNL join)
-select t2.fld1,t2.companynr,fld3,period from t3,t2 where t2.fld1 = 38208 and t2.fld1=t3.t2nr and period = 1008 or t2.fld1 = 38008 and t2.fld1 =t3.t2nr and period = 1008;
-fld1 companynr fld3 period
-038008 37 reporters 1008
-038208 37 Selfridge 1008
-select t2.fld1,t2.companynr,fld3,period from t3,t2 where (t2.fld1 = 38208 or t2.fld1 = 38008) and t2.fld1=t3.t2nr and period>=1008 and period<=1009;
-fld1 companynr fld3 period
-038008 37 reporters 1008
-038208 37 Selfridge 1008
-select t2.fld1,t2.companynr,fld3,period from t3,t2 where (t3.t2nr = 38208 or t3.t2nr = 38008) and t2.fld1=t3.t2nr and period>=1008 and period<=1009;
-fld1 companynr fld3 period
-038008 37 reporters 1008
-038208 37 Selfridge 1008
-select period from t1 where (((period > 0) or period < 10000 or (period = 1900)) and (period=1900 and period <= 1901) or (period=1903 and (period=1903)) and period>=1902) or ((period=1904 or period=1905) or (period=1906 or period>1907)) or (period=1908 and period = 1909);
-period
-9410
-select period from t1 where ((period > 0 and period < 1) or (((period > 0 and period < 100) and (period > 10)) or (period > 10)) or (period > 0 and (period > 5 or period > 6)));
-period
-9410
-select a.fld1 from t2 as a,t2 b where ((a.fld1 = 250501 and a.fld1=b.fld1) or a.fld1=250502 or a.fld1=250503 or (a.fld1=250505 and a.fld1<=b.fld1 and b.fld1>=a.fld1)) and a.fld1=b.fld1;
-fld1
-250501
-250502
-250503
-250505
-select fld1 from t2 where fld1 in (250502,98005,98006,250503,250605,250606) and fld1 >=250502 and fld1 not in (250605,250606);
-fld1
-250502
-250503
-select fld1 from t2 where fld1 between 250502 and 250504;
-fld1
-250502
-250503
-250504
-select fld3 from t2 where (((fld3 like "_%L%" ) or (fld3 like "%ok%")) and ( fld3 like "L%" or fld3 like "G%")) and fld3 like "L%" ;
-fld3
-label
-labeled
-labeled
-landslide
-laterally
-leaflet
-lewdly
-Lillian
-luckily
-select count(*) from t1;
-count(*)
-1
-select companynr,count(*),sum(fld1) from t2 group by companynr;
-companynr count(*) sum(fld1)
-00 82 10355753
-29 95 14473298
-34 70 17788966
-36 215 22786296
-37 588 83602098
-40 37 6618386
-41 52 12816335
-50 11 1595438
-53 4 793210
-58 23 2254293
-65 10 2284055
-68 12 3097288
-select companynr,count(*) from t2 group by companynr order by companynr desc limit 5;
-companynr count(*)
-68 12
-65 10
-58 23
-53 4
-50 11
-select count(*),min(fld4),max(fld4),sum(fld1),avg(fld1),std(fld1),variance(fld1) from t2 where companynr = 34 and fld4<>"";
-count(*) min(fld4) max(fld4) sum(fld1) avg(fld1) std(fld1) variance(fld1)
-70 absentee vest 17788966 254128.0857 3272.5940 10709871.3069
-explain extended select count(*),min(fld4),max(fld4),sum(fld1),avg(fld1),std(fld1),variance(fld1) from t2 where companynr = 34 and fld4<>"";
-id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199 100.00 Using where
-Warnings:
-Note 1003 select count(0) AS `count(*)`,min(`test`.`t2`.`fld4`) AS `min(fld4)`,max(`test`.`t2`.`fld4`) AS `max(fld4)`,sum(`test`.`t2`.`fld1`) AS `sum(fld1)`,avg(`test`.`t2`.`fld1`) AS `avg(fld1)`,std(`test`.`t2`.`fld1`) AS `std(fld1)`,variance(`test`.`t2`.`fld1`) AS `variance(fld1)` from `test`.`t2` where `test`.`t2`.`companynr` = 34 and `test`.`t2`.`fld4` <> ''
-select companynr,count(*),min(fld4),max(fld4),sum(fld1),avg(fld1),std(fld1),variance(fld1) from t2 group by companynr limit 3;
-companynr count(*) min(fld4) max(fld4) sum(fld1) avg(fld1) std(fld1) variance(fld1)
-00 82 Anthony windmills 10355753 126289.6707 115550.9757 13352027981.7087
-29 95 abut wetness 14473298 152350.5053 8368.5480 70032594.9026
-34 70 absentee vest 17788966 254128.0857 3272.5940 10709871.3069
-select companynr,t2nr,count(price),sum(price),min(price),max(price),avg(price) from t3 where companynr = 37 group by companynr,t2nr limit 10;
-companynr t2nr count(price) sum(price) min(price) max(price) avg(price)
-37 1 1 5987435 5987435 5987435 5987435.0000
-37 2 1 28357832 28357832 28357832 28357832.0000
-37 3 1 39654943 39654943 39654943 39654943.0000
-37 11 1 5987435 5987435 5987435 5987435.0000
-37 12 1 28357832 28357832 28357832 28357832.0000
-37 13 1 39654943 39654943 39654943 39654943.0000
-37 21 1 5987435 5987435 5987435 5987435.0000
-37 22 1 28357832 28357832 28357832 28357832.0000
-37 23 1 39654943 39654943 39654943 39654943.0000
-37 31 1 5987435 5987435 5987435 5987435.0000
-select /*! SQL_SMALL_RESULT */ companynr,t2nr,count(price),sum(price),min(price),max(price),avg(price) from t3 where companynr = 37 group by companynr,t2nr limit 10;
-companynr t2nr count(price) sum(price) min(price) max(price) avg(price)
-37 1 1 5987435 5987435 5987435 5987435.0000
-37 2 1 28357832 28357832 28357832 28357832.0000
-37 3 1 39654943 39654943 39654943 39654943.0000
-37 11 1 5987435 5987435 5987435 5987435.0000
-37 12 1 28357832 28357832 28357832 28357832.0000
-37 13 1 39654943 39654943 39654943 39654943.0000
-37 21 1 5987435 5987435 5987435 5987435.0000
-37 22 1 28357832 28357832 28357832 28357832.0000
-37 23 1 39654943 39654943 39654943 39654943.0000
-37 31 1 5987435 5987435 5987435 5987435.0000
-select companynr,count(price),sum(price),min(price),max(price),avg(price) from t3 group by companynr ;
-companynr count(price) sum(price) min(price) max(price) avg(price)
-37 12543 309394878010 5987435 39654943 24666736.6667
-78 8362 414611089292 726498 98439034 49582766.0000
-101 4181 3489454238 834598 834598 834598.0000
-154 4181 4112197254950 983543950 983543950 983543950.0000
-311 4181 979599938 234298 234298 234298.0000
-447 4181 9929180954 2374834 2374834 2374834.0000
-512 4181 3288532102 786542 786542 786542.0000
-select distinct mod(companynr,10) from t4 group by companynr;
-mod(companynr,10)
-0
-9
-4
-6
-7
-1
-3
-8
-5
-select distinct 1 from t4 group by companynr;
-1
-1
-select count(distinct fld1) from t2;
-count(distinct fld1)
-1199
-select companynr,count(distinct fld1) from t2 group by companynr;
-companynr count(distinct fld1)
-00 82
-29 95
-34 70
-36 215
-37 588
-40 37
-41 52
-50 11
-53 4
-58 23
-65 10
-68 12
-select companynr,count(*) from t2 group by companynr;
-companynr count(*)
-00 82
-29 95
-34 70
-36 215
-37 588
-40 37
-41 52
-50 11
-53 4
-58 23
-65 10
-68 12
-select companynr,count(distinct concat(fld1,repeat(65,1000))) from t2 group by companynr;
-companynr count(distinct concat(fld1,repeat(65,1000)))
-00 82
-29 95
-34 70
-36 215
-37 588
-40 37
-41 52
-50 11
-53 4
-58 23
-65 10
-68 12
-select companynr,count(distinct concat(fld1,repeat(65,200))) from t2 group by companynr;
-companynr count(distinct concat(fld1,repeat(65,200)))
-00 82
-29 95
-34 70
-36 215
-37 588
-40 37
-41 52
-50 11
-53 4
-58 23
-65 10
-68 12
-select companynr,count(distinct floor(fld1/100)) from t2 group by companynr;
-companynr count(distinct floor(fld1/100))
-00 47
-29 35
-34 14
-36 69
-37 108
-40 16
-41 11
-50 9
-53 1
-58 1
-65 1
-68 1
-select companynr,count(distinct concat(repeat(65,1000),floor(fld1/100))) from t2 group by companynr;
-companynr count(distinct concat(repeat(65,1000),floor(fld1/100)))
-00 47
-29 35
-34 14
-36 69
-37 108
-40 16
-41 11
-50 9
-53 1
-58 1
-65 1
-68 1
-select sum(fld1),fld3 from t2 where fld3="Romans" group by fld1 limit 10;
-sum(fld1) fld3
-11402 Romans
-select name,count(*) from t3 where name='cloakroom' group by name;
-name count(*)
-cloakroom 4181
-select name,count(*) from t3 where name='cloakroom' and price>10 group by name;
-name count(*)
-cloakroom 4181
-select count(*) from t3 where name='cloakroom' and price2=823742;
-count(*)
-4181
-select name,count(*) from t3 where name='cloakroom' and price2=823742 group by name;
-name count(*)
-cloakroom 4181
-select name,count(*) from t3 where name >= "extramarital" and price <= 39654943 group by name;
-name count(*)
-extramarital 4181
-gazer 4181
-gems 4181
-Iranizes 4181
-spates 4181
-tucked 4181
-violinist 4181
-select t2.fld3,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 group by t3.name;
-fld3 count(*)
-spates 4181
-select companynr|0,companyname from t4 group by 1;
-companynr|0 companyname
-0 Unknown
-29 company 1
-34 company 2
-36 company 3
-37 company 4
-40 company 5
-41 company 6
-50 company 11
-53 company 7
-58 company 8
-65 company 9
-68 company 10
-select t2.companynr,companyname,count(*) from t2,t4 where t2.companynr=t4.companynr group by t2.companynr order by companyname;
-companynr companyname count(*)
-29 company 1 95
-68 company 10 12
-50 company 11 11
-34 company 2 70
-36 company 3 215
-37 company 4 588
-40 company 5 37
-41 company 6 52
-53 company 7 4
-58 company 8 23
-65 company 9 10
-00 Unknown 82
-select t2.fld1,count(*) from t2,t3 where t2.fld1=158402 and t3.name=t2.fld3 group by t3.name;
-fld1 count(*)
-158402 4181
-select sum(Period)/count(*) from t1;
-sum(Period)/count(*)
-9410.0000
-select companynr,count(price) as "count",sum(price) as "sum" ,abs(sum(price)/count(price)-avg(price)) as "diff",(0+count(price))*companynr as func from t3 group by companynr;
-companynr count sum diff func
-37 12543 309394878010 0.0000 464091
-78 8362 414611089292 0.0000 652236
-101 4181 3489454238 0.0000 422281
-154 4181 4112197254950 0.0000 643874
-311 4181 979599938 0.0000 1300291
-447 4181 9929180954 0.0000 1868907
-512 4181 3288532102 0.0000 2140672
-select companynr,sum(price)/count(price) as avg from t3 group by companynr having avg > 70000000 order by avg;
-companynr avg
-154 983543950.0000
-select companynr,count(*) from t2 group by companynr order by 2 desc;
-companynr count(*)
-37 588
-36 215
-29 95
-00 82
-34 70
-41 52
-40 37
-58 23
-68 12
-50 11
-65 10
-53 4
-select companynr,count(*) from t2 where companynr > 40 group by companynr order by 2 desc;
-companynr count(*)
-41 52
-58 23
-68 12
-50 11
-65 10
-53 4
-select t2.fld4,t2.fld1,count(price),sum(price),min(price),max(price),avg(price) from t3,t2 where t3.companynr = 37 and t2.fld1 = t3.t2nr group by fld1,t2.fld4;
-fld4 fld1 count(price) sum(price) min(price) max(price) avg(price)
-teethe 000001 1 5987435 5987435 5987435 5987435.0000
-dreaded 011401 1 5987435 5987435 5987435 5987435.0000
-scholastics 011402 1 28357832 28357832 28357832 28357832.0000
-audiology 011403 1 39654943 39654943 39654943 39654943.0000
-wallet 011501 1 5987435 5987435 5987435 5987435.0000
-parters 011701 1 5987435 5987435 5987435 5987435.0000
-eschew 011702 1 28357832 28357832 28357832 28357832.0000
-quitter 011703 1 39654943 39654943 39654943 39654943.0000
-neat 012001 1 5987435 5987435 5987435 5987435.0000
-Steinberg 012003 1 39654943 39654943 39654943 39654943.0000
-balled 012301 1 5987435 5987435 5987435 5987435.0000
-persist 012302 1 28357832 28357832 28357832 28357832.0000
-attainments 012303 1 39654943 39654943 39654943 39654943.0000
-capably 012501 1 5987435 5987435 5987435 5987435.0000
-impulsive 012602 1 28357832 28357832 28357832 28357832.0000
-starlet 012603 1 39654943 39654943 39654943 39654943.0000
-featherweight 012701 1 5987435 5987435 5987435 5987435.0000
-pessimist 012702 1 28357832 28357832 28357832 28357832.0000
-daughter 012703 1 39654943 39654943 39654943 39654943.0000
-lawgiver 013601 1 5987435 5987435 5987435 5987435.0000
-stated 013602 1 28357832 28357832 28357832 28357832.0000
-readable 013603 1 39654943 39654943 39654943 39654943.0000
-testicle 013801 1 5987435 5987435 5987435 5987435.0000
-Parsifal 013802 1 28357832 28357832 28357832 28357832.0000
-leavings 013803 1 39654943 39654943 39654943 39654943.0000
-squeaking 013901 1 5987435 5987435 5987435 5987435.0000
-contrasted 016001 1 5987435 5987435 5987435 5987435.0000
-leftover 016201 1 5987435 5987435 5987435 5987435.0000
-whiteners 016202 1 28357832 28357832 28357832 28357832.0000
-erases 016301 1 5987435 5987435 5987435 5987435.0000
-Punjab 016302 1 28357832 28357832 28357832 28357832.0000
-Merritt 016303 1 39654943 39654943 39654943 39654943.0000
-sweetish 018001 1 5987435 5987435 5987435 5987435.0000
-dogging 018002 1 28357832 28357832 28357832 28357832.0000
-scornfully 018003 1 39654943 39654943 39654943 39654943.0000
-fetters 018012 1 28357832 28357832 28357832 28357832.0000
-bivalves 018013 1 39654943 39654943 39654943 39654943.0000
-skulking 018021 1 5987435 5987435 5987435 5987435.0000
-flint 018022 1 28357832 28357832 28357832 28357832.0000
-flopping 018023 1 39654943 39654943 39654943 39654943.0000
-Judas 018032 1 28357832 28357832 28357832 28357832.0000
-vacuuming 018033 1 39654943 39654943 39654943 39654943.0000
-medical 018041 1 5987435 5987435 5987435 5987435.0000
-bloodbath 018042 1 28357832 28357832 28357832 28357832.0000
-subschema 018043 1 39654943 39654943 39654943 39654943.0000
-interdependent 018051 1 5987435 5987435 5987435 5987435.0000
-Graves 018052 1 28357832 28357832 28357832 28357832.0000
-neonatal 018053 1 39654943 39654943 39654943 39654943.0000
-sorters 018061 1 5987435 5987435 5987435 5987435.0000
-epistle 018062 1 28357832 28357832 28357832 28357832.0000
-Conley 018101 1 5987435 5987435 5987435 5987435.0000
-lectured 018102 1 28357832 28357832 28357832 28357832.0000
-Abraham 018103 1 39654943 39654943 39654943 39654943.0000
-cage 018201 1 5987435 5987435 5987435 5987435.0000
-hushes 018202 1 28357832 28357832 28357832 28357832.0000
-Simla 018402 1 28357832 28357832 28357832 28357832.0000
-reporters 018403 1 39654943 39654943 39654943 39654943.0000
-coexist 018601 1 5987435 5987435 5987435 5987435.0000
-Beebe 018602 1 28357832 28357832 28357832 28357832.0000
-Taoism 018603 1 39654943 39654943 39654943 39654943.0000
-Connally 018801 1 5987435 5987435 5987435 5987435.0000
-fetched 018802 1 28357832 28357832 28357832 28357832.0000
-checkpoints 018803 1 39654943 39654943 39654943 39654943.0000
-gritty 018811 1 5987435 5987435 5987435 5987435.0000
-firearm 018812 1 28357832 28357832 28357832 28357832.0000
-minima 019101 1 5987435 5987435 5987435 5987435.0000
-Selfridge 019102 1 28357832 28357832 28357832 28357832.0000
-disable 019103 1 39654943 39654943 39654943 39654943.0000
-witchcraft 019201 1 5987435 5987435 5987435 5987435.0000
-betroth 030501 1 5987435 5987435 5987435 5987435.0000
-Manhattanize 030502 1 28357832 28357832 28357832 28357832.0000
-imprint 030503 1 39654943 39654943 39654943 39654943.0000
-swelling 031901 1 5987435 5987435 5987435 5987435.0000
-interrelationships 036001 1 5987435 5987435 5987435 5987435.0000
-riser 036002 1 28357832 28357832 28357832 28357832.0000
-bee 038001 1 5987435 5987435 5987435 5987435.0000
-kanji 038002 1 28357832 28357832 28357832 28357832.0000
-dental 038003 1 39654943 39654943 39654943 39654943.0000
-railway 038011 1 5987435 5987435 5987435 5987435.0000
-validate 038012 1 28357832 28357832 28357832 28357832.0000
-normalizes 038013 1 39654943 39654943 39654943 39654943.0000
-Kline 038101 1 5987435 5987435 5987435 5987435.0000
-Anatole 038102 1 28357832 28357832 28357832 28357832.0000
-partridges 038103 1 39654943 39654943 39654943 39654943.0000
-recruited 038201 1 5987435 5987435 5987435 5987435.0000
-dimensions 038202 1 28357832 28357832 28357832 28357832.0000
-Chicana 038203 1 39654943 39654943 39654943 39654943.0000
-select t3.companynr,fld3,sum(price) from t3,t2 where t2.fld1 = t3.t2nr and t3.companynr = 512 group by companynr,fld3;
-companynr fld3 sum(price)
-512 boat 786542
-512 capably 786542
-512 cupboard 786542
-512 decliner 786542
-512 descendants 786542
-512 dopers 786542
-512 erases 786542
-512 Micronesia 786542
-512 Miles 786542
-512 skies 786542
-select t2.companynr,count(*),min(fld3),max(fld3),sum(price),avg(price) from t2,t3 where t3.companynr >= 30 and t3.companynr <= 58 and t3.t2nr = t2.fld1 and 1+1=2 group by t2.companynr;
-companynr count(*) min(fld3) max(fld3) sum(price) avg(price)
-00 1 Omaha Omaha 5987435 5987435.0000
-36 1 dubbed dubbed 28357832 28357832.0000
-37 83 Abraham Wotan 1908978016 22999735.1325
-50 2 scribbled tapestry 68012775 34006387.5000
-select t3.companynr+0,t3.t2nr,fld3,sum(price) from t3,t2 where t2.fld1 = t3.t2nr and t3.companynr = 37 group by 1,t3.t2nr,fld3,fld3,fld3,fld3,fld3 order by fld1;
-t3.companynr+0 t2nr fld3 sum(price)
-37 1 Omaha 5987435
-37 11401 breaking 5987435
-37 11402 Romans 28357832
-37 11403 intercepted 39654943
-37 11501 bewilderingly 5987435
-37 11701 astound 5987435
-37 11702 admonishing 28357832
-37 11703 sumac 39654943
-37 12001 flanking 5987435
-37 12003 combed 39654943
-37 12301 Eulerian 5987435
-37 12302 dubbed 28357832
-37 12303 Kane 39654943
-37 12501 annihilates 5987435
-37 12602 Wotan 28357832
-37 12603 snatching 39654943
-37 12701 grazing 5987435
-37 12702 Baird 28357832
-37 12703 celery 39654943
-37 13601 handgun 5987435
-37 13602 foldout 28357832
-37 13603 mystic 39654943
-37 13801 intelligibility 5987435
-37 13802 Augustine 28357832
-37 13803 teethe 39654943
-37 13901 scholastics 5987435
-37 16001 audiology 5987435
-37 16201 wallet 5987435
-37 16202 parters 28357832
-37 16301 eschew 5987435
-37 16302 quitter 28357832
-37 16303 neat 39654943
-37 18001 jarring 5987435
-37 18002 tinily 28357832
-37 18003 balled 39654943
-37 18012 impulsive 28357832
-37 18013 starlet 39654943
-37 18021 lawgiver 5987435
-37 18022 stated 28357832
-37 18023 readable 39654943
-37 18032 testicle 28357832
-37 18033 Parsifal 39654943
-37 18041 Punjab 5987435
-37 18042 Merritt 28357832
-37 18043 Quixotism 39654943
-37 18051 sureties 5987435
-37 18052 puddings 28357832
-37 18053 tapestry 39654943
-37 18061 trimmings 5987435
-37 18062 humility 28357832
-37 18101 tragedies 5987435
-37 18102 skulking 28357832
-37 18103 flint 39654943
-37 18201 relaxing 5987435
-37 18202 offload 28357832
-37 18402 suites 28357832
-37 18403 lists 39654943
-37 18601 vacuuming 5987435
-37 18602 dentally 28357832
-37 18603 humanness 39654943
-37 18801 inch 5987435
-37 18802 Weissmuller 28357832
-37 18803 irresponsibly 39654943
-37 18811 repetitions 5987435
-37 18812 Antares 28357832
-37 19101 ventilate 5987435
-37 19102 pityingly 28357832
-37 19103 interdependent 39654943
-37 19201 Graves 5987435
-37 30501 neonatal 5987435
-37 30502 scribbled 28357832
-37 30503 chafe 39654943
-37 31901 realtor 5987435
-37 36001 elite 5987435
-37 36002 funereal 28357832
-37 38001 Conley 5987435
-37 38002 lectured 28357832
-37 38003 Abraham 39654943
-37 38011 groupings 5987435
-37 38012 dissociate 28357832
-37 38013 coexist 39654943
-37 38101 rusting 5987435
-37 38102 galling 28357832
-37 38103 obliterates 39654943
-37 38201 resumes 5987435
-37 38202 analyzable 28357832
-37 38203 terminator 39654943
-select sum(price) from t3,t2 where t2.fld1 = t3.t2nr and t3.companynr = 512 and t3.t2nr = 38008 and t2.fld1 = 38008 or t2.fld1= t3.t2nr and t3.t2nr = 38008 and t2.fld1 = 38008;
-sum(price)
-234298
-select t2.fld1,sum(price) from t3,t2 where t2.fld1 = t3.t2nr and t3.companynr = 512 and t3.t2nr = 38008 and t2.fld1 = 38008 or t2.fld1 = t3.t2nr and t3.t2nr = 38008 and t2.fld1 = 38008 or t3.t2nr = t2.fld1 and t2.fld1 = 38008 group by t2.fld1;
-fld1 sum(price)
-038008 234298
-explain select fld3 from t2 where 1>2 or 2>3;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-explain select fld3 from t2 where fld1=fld1;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 ALL NULL NULL NULL NULL 1199
-select companynr,fld1 from t2 HAVING fld1=250501 or fld1=250502;
-companynr fld1
-34 250501
-34 250502
-select companynr,fld1 from t2 WHERE fld1>=250501 HAVING fld1<=250502;
-companynr fld1
-34 250501
-34 250502
-select companynr,count(*) as count,sum(fld1) as sum from t2 group by companynr having count > 40 and sum/count >= 120000;
-companynr count sum
-00 82 10355753
-29 95 14473298
-34 70 17788966
-37 588 83602098
-41 52 12816335
-select companynr from t2 group by companynr having count(*) > 40 and sum(fld1)/count(*) >= 120000 ;
-companynr
-00
-29
-34
-37
-41
-select t2.companynr,companyname,count(*) from t2,t4 where t2.companynr=t4.companynr group by companyname having t2.companynr >= 40;
-companynr companyname count(*)
-68 company 10 12
-50 company 11 11
-40 company 5 37
-41 company 6 52
-53 company 7 4
-58 company 8 23
-65 company 9 10
-select count(*) from t2;
-count(*)
-1199
-select count(*) from t2 where fld1 < 098024;
-count(*)
-387
-select min(fld1) from t2 where fld1>= 098024;
-min(fld1)
-98024
-select max(fld1) from t2 where fld1>= 098024;
-max(fld1)
-1232609
-select count(*) from t3 where price2=76234234;
-count(*)
-4181
-select count(*) from t3 where companynr=512 and price2=76234234;
-count(*)
-4181
-explain select min(fld1),max(fld1),count(*) from t2;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away
-select min(fld1),max(fld1),count(*) from t2;
-min(fld1) max(fld1) count(*)
-0 1232609 1199
-select min(t2nr),max(t2nr) from t3 where t2nr=2115 and price2=823742;
-min(t2nr) max(t2nr)
-2115 2115
-select count(*),min(t2nr),max(t2nr) from t3 where name='spates' and companynr=78;
-count(*) min(t2nr) max(t2nr)
-4181 4 41804
-select t2nr,count(*) from t3 where name='gems' group by t2nr limit 20;
-t2nr count(*)
-9 1
-19 1
-29 1
-39 1
-49 1
-59 1
-69 1
-79 1
-89 1
-99 1
-109 1
-119 1
-129 1
-139 1
-149 1
-159 1
-169 1
-179 1
-189 1
-199 1
-select max(t2nr) from t3 where price=983543950;
-max(t2nr)
-41807
-select t1.period from t3 = t1 limit 1;
-period
-1001
-select t1.period from t1 as t1 limit 1;
-period
-9410
-select t1.period as "Nuvarande period" from t1 as t1 limit 1;
-Nuvarande period
-9410
-select period as ok_period from t1 limit 1;
-ok_period
-9410
-select period as ok_period from t1 group by ok_period limit 1;
-ok_period
-9410
-select 1+1 as summa from t1 group by summa limit 1;
-summa
-2
-select period as "Nuvarande period" from t1 group by "Nuvarande period" limit 1;
-Nuvarande period
-9410
-show tables;
-Tables_in_test
-t1
-t2
-t3
-t4
-show tables from test like "s%";
-Tables_in_test (s%)
-show tables from test like "t?";
-Tables_in_test (t?)
-show full columns from t2;
-Field Type Collation Null Key Default Extra Privileges Comment
-auto int(11) NULL NO PRI NULL auto_increment #
-fld1 int(6) unsigned zerofill NULL NO UNI 000000 #
-companynr tinyint(2) unsigned zerofill NULL NO 00 #
-fld3 char(30) latin1_swedish_ci NO MUL #
-fld4 char(35) latin1_swedish_ci NO #
-fld5 char(35) latin1_swedish_ci NO #
-fld6 char(4) latin1_swedish_ci NO #
-show full columns from t2 from test like 'f%';
-Field Type Collation Null Key Default Extra Privileges Comment
-fld1 int(6) unsigned zerofill NULL NO UNI 000000 #
-fld3 char(30) latin1_swedish_ci NO MUL #
-fld4 char(35) latin1_swedish_ci NO #
-fld5 char(35) latin1_swedish_ci NO #
-fld6 char(4) latin1_swedish_ci NO #
-show full columns from t2 from test like 's%';
-Field Type Collation Null Key Default Extra Privileges Comment
-show keys from t2;
-Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
-t2 0 PRIMARY 1 auto A 1199 NULL NULL BTREE
-t2 0 fld1 1 fld1 A 1199 NULL NULL BTREE
-t2 1 fld3 1 fld3 A NULL NULL NULL BTREE
-drop table t4, t3, t2, t1;
-CREATE TABLE t1 (
-cont_nr int(11) NOT NULL auto_increment,
-ver_nr int(11) NOT NULL default '0',
-aufnr int(11) NOT NULL default '0',
-username varchar(50) NOT NULL default '',
-hdl_nr int(11) NOT NULL default '0',
-eintrag date NOT NULL default '0000-00-00',
-st_klasse varchar(40) NOT NULL default '',
-st_wert varchar(40) NOT NULL default '',
-st_zusatz varchar(40) NOT NULL default '',
-st_bemerkung varchar(255) NOT NULL default '',
-kunden_art varchar(40) NOT NULL default '',
-mcbs_knr int(11) default NULL,
-mcbs_aufnr int(11) NOT NULL default '0',
-schufa_status char(1) default '?',
-bemerkung text,
-wirknetz text,
-wf_igz int(11) NOT NULL default '0',
-tarifcode varchar(80) default NULL,
-recycle char(1) default NULL,
-sim varchar(30) default NULL,
-mcbs_tpl varchar(30) default NULL,
-emp_nr int(11) NOT NULL default '0',
-laufzeit int(11) default NULL,
-hdl_name varchar(30) default NULL,
-prov_hdl_nr int(11) NOT NULL default '0',
-auto_wirknetz varchar(50) default NULL,
-auto_billing varchar(50) default NULL,
-touch timestamp NOT NULL,
-kategorie varchar(50) default NULL,
-kundentyp varchar(20) NOT NULL default '',
-sammel_rech_msisdn varchar(30) NOT NULL default '',
-p_nr varchar(9) NOT NULL default '',
-suffix char(3) NOT NULL default '',
-PRIMARY KEY (cont_nr),
-KEY idx_aufnr(aufnr),
-KEY idx_hdl_nr(hdl_nr),
-KEY idx_st_klasse(st_klasse),
-KEY ver_nr(ver_nr),
-KEY eintrag_idx(eintrag),
-KEY emp_nr_idx(emp_nr),
-KEY wf_igz(wf_igz),
-KEY touch(touch),
-KEY hdl_tag(eintrag,hdl_nr),
-KEY prov_hdl_nr(prov_hdl_nr),
-KEY mcbs_aufnr(mcbs_aufnr),
-KEY kundentyp(kundentyp),
-KEY p_nr(p_nr,suffix)
-) ENGINE=MyISAM;
-INSERT INTO t1 VALUES (3359356,405,3359356,'Mustermann Musterfrau',52500,'2000-05-20','workflow','Auftrag erledigt','Originalvertrag eingegangen und geprüft','','privat',1485525,2122316,'+','','N',1909160,'MobilComSuper92000D2',NULL,NULL,'MS9ND2',3,24,'MobilCom Shop Koeln',52500,NULL,'auto',20010202105916,'Mobilfunk','PP','','','');
-INSERT INTO t1 VALUES (3359357,468,3359357,'Mustermann Musterfrau',7001,'2000-05-20','workflow','Auftrag erledigt','Originalvertrag eingegangen und geprüft','','privat',1503580,2139699,'+','','P',1909171,'MobilComSuper9D1T10SFreisprech(Akquise)',NULL,NULL,'MS9NS1',327,24,'MobilCom Intern',7003,NULL,'auto',20010202105916,'Mobilfunk','PP','','','');
-INSERT INTO t1 VALUES (3359358,407,3359358,'Mustermann Musterfrau',7001,'2000-05-20','workflow','Auftrag erledigt','Originalvertrag eingegangen und geprüft','','privat',1501358,2137473,'N','','N',1909159,'MobilComSuper92000D2',NULL,NULL,'MS9ND2',325,24,'MobilCom Intern',7003,NULL,'auto',20010202105916,'Mobilfunk','PP','','','');
-INSERT INTO t1 VALUES (3359359,468,3359359,'Mustermann Musterfrau',7001,'2000-05-20','workflow','Auftrag erledigt','Originalvertrag eingegangen und geprüft','','privat',1507831,2143894,'+','','P',1909162,'MobilComSuper9D1T10SFreisprech(Akquise)',NULL,NULL,'MS9NS1',327,24,'MobilCom Intern',7003,NULL,'auto',20010202105916,'Mobilfunk','PP','','','');
-INSERT INTO t1 VALUES (3359360,0,0,'Mustermann Musterfrau',29674907,'2000-05-20','workflow','Auftrag erledigt','Originalvertrag eingegangen und geprüft','','privat',1900169997,2414578,'+',NULL,'N',1909148,'',NULL,NULL,'RV99066_2',20,NULL,'POS',29674907,NULL,NULL,20010202105916,'Mobilfunk','','','97317481','007');
-INSERT INTO t1 VALUES (3359361,406,3359361,'Mustermann Musterfrau',7001,'2000-05-20','workflow','Auftrag storniert','','(7001-84):Storno, Kd. möchte nicht mehr','privat',NULL,0,'+','','P',1909150,'MobilComSuper92000D1(Akquise)',NULL,NULL,'MS9ND1',325,24,'MobilCom Intern',7003,NULL,'auto',20010202105916,'Mobilfunk','PP','','','');
-INSERT INTO t1 VALUES (3359362,406,3359362,'Mustermann Musterfrau',7001,'2000-05-20','workflow','Auftrag erledigt','Originalvertrag eingegangen und geprüft','','privat',1509984,2145874,'+','','P',1909154,'MobilComSuper92000D1(Akquise)',NULL,NULL,'MS9ND1',327,24,'MobilCom Intern',7003,NULL,'auto',20010202105916,'Mobilfunk','PP','','','');
-SELECT ELT(FIELD(kundentyp,'PP','PPA','PG','PGA','FK','FKA','FP','FPA','K','KA','V','VA',''), 'Privat (Private Nutzung)','Privat (Private Nutzung) Sitz im Ausland','Privat (geschaeftliche Nutzung)','Privat (geschaeftliche Nutzung) Sitz im Ausland','Firma (Kapitalgesellschaft)','Firma (Kapitalgesellschaft) Sitz im Ausland','Firma (Personengesellschaft)','Firma (Personengesellschaft) Sitz im Ausland','oeff. rechtl. Koerperschaft','oeff. rechtl. Koerperschaft Sitz im Ausland','Eingetragener Verein','Eingetragener Verein Sitz im Ausland','Typ unbekannt') AS Kundentyp ,kategorie FROM t1 WHERE hdl_nr < 2000000 AND kategorie IN ('Prepaid','Mobilfunk') AND st_klasse = 'Workflow' GROUP BY kundentyp ORDER BY kategorie;
-Kundentyp kategorie
-Privat (Private Nutzung) Mobilfunk
-Warnings:
-Warning 1052 Column 'kundentyp' in group statement is ambiguous
-drop table t1;
-connection default;
-disconnect shm_con;
-mysqld is alive
-SET @max_allowed_packet= @@global.max_allowed_packet;
-SET @net_buffer_length= @@global.net_buffer_length;
-SET GLOBAL max_allowed_packet= 1024;
-Warnings:
-Warning 1708 The value of 'max_allowed_packet' should be no less than the value of 'net_buffer_length'
-SET GLOBAL net_buffer_length= 1024;
-ERROR 1153 (08S01) at line 1: Got a packet bigger than 'max_allowed_packet' bytes
-SET GLOBAL max_allowed_packet= @max_allowed_packet;
-SET GLOBAL net_buffer_length= @net_buffer_length;
-End of 5.0 tests.
diff --git a/mysql-test/main/shm.test b/mysql-test/main/shm.test
deleted file mode 100644
index 346ce49476e..00000000000
--- a/mysql-test/main/shm.test
+++ /dev/null
@@ -1,47 +0,0 @@
-# We currently only have shm support on windows, so in order
-# to optimize things we skip this test on all other platforms
---source include/windows.inc
-
-# thread pool causes different results
--- source include/not_threadpool.inc
-
-# Only run this test if shared memory is avaliable
-let $shm= query_get_value("SHOW VARIABLES LIKE 'shared_memory'", Value, 1);
-if ($shm != ON){
- skip No shm support;
-}
-let $shm_name= query_get_value("SHOW GLOBAL VARIABLES LIKE 'shared_memory_base_name'", Value, 1);
-
-# Connect using SHM for testing
-connect(shm_con,localhost,root,,,,$shm_name,SHM);
-
-# Source select test case
--- source include/common-tests.inc
-
-connection default;
-disconnect shm_con;
-
-#
-# Bug #24924: shared-memory-base-name that is too long causes buffer overflow
-#
---exec $MYSQLADMIN --user=root --host=127.0.0.1 --port=$MASTER_MYPORT --shared-memory-base-name=HeyMrBaseNameXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ping
-
-#
-# Bug #33899: Deadlock in mysql_real_query with shared memory connections
-#
-
-let $stmt= `SELECT REPEAT('a', 2048)`;
-
-SET @max_allowed_packet= @@global.max_allowed_packet;
-SET @net_buffer_length= @@global.net_buffer_length;
-
-SET GLOBAL max_allowed_packet= 1024;
-SET GLOBAL net_buffer_length= 1024;
-
---error 1
---exec echo SELECT '$stmt'| $MYSQL --protocol=memory --shared-memory-base-name=$shm_name 2>&1
-
-SET GLOBAL max_allowed_packet= @max_allowed_packet;
-SET GLOBAL net_buffer_length= @net_buffer_length;
-
---echo End of 5.0 tests.
diff --git a/mysql-test/main/show_check.result b/mysql-test/main/show_check.result
index 6a7afb80fae..9205eee1cf7 100644
--- a/mysql-test/main/show_check.result
+++ b/mysql-test/main/show_check.result
@@ -162,8 +162,9 @@ Catalog Database Table Table_alias Column Column_alias Type Length Max length Is
def Table 253 128 7 Y 0 39 8
def Op 253 10 7 Y 0 39 8
def Msg_type 253 10 6 Y 0 39 8
-def Msg_text 250 393216 2 Y 0 39 8
+def Msg_text 250 393216 39 Y 0 39 8
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -261,7 +262,10 @@ create table t1(n int);
insert into t1 values (1);
show open tables;
Database Table In_use Name_locked
+mysql column_stats 0 0
mysql general_log 0 0
+mysql index_stats 0 0
+mysql table_stats 0 0
test t1 0 0
drop table t1;
create table t1 (a int not null, b VARCHAR(10), INDEX (b) ) AVG_ROW_LENGTH=10 CHECKSUM=1 COMMENT="test" ENGINE=MYISAM MIN_ROWS=10 MAX_ROWS=100 PACK_KEYS=1 DELAY_KEY_WRITE=1 ROW_FORMAT=fixed;
@@ -757,11 +761,11 @@ View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache current_timestamp() AS `NOW()` binary binary
DROP VIEW v1;
CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW();
-ERROR HY000: Incorrect usage of SQL_CACHE and SQL_NO_CACHE
+ERROR HY000: Incorrect usage of SQL_NO_CACHE and SQL_CACHE
CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW();
ERROR HY000: Incorrect usage of SQL_NO_CACHE and SQL_CACHE
CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW();
-ERROR HY000: Incorrect usage of SQL_CACHE and SQL_NO_CACHE
+ERROR HY000: Option 'SQL_CACHE' used twice in statement
CREATE PROCEDURE p1()
BEGIN
SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1';
diff --git a/mysql-test/main/show_check.test b/mysql-test/main/show_check.test
index d9025bc8d10..18826f31fed 100644
--- a/mysql-test/main/show_check.test
+++ b/mysql-test/main/show_check.test
@@ -564,7 +564,7 @@ CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW();
--error ER_WRONG_USAGE
CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW();
---error ER_WRONG_USAGE
+--error ER_DUP_ARGUMENT
CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW();
# Check CREATE VIEW in a prepared statement in a procedure.
diff --git a/mysql-test/main/show_create_user.result b/mysql-test/main/show_create_user.result
index 63013eca074..1205b658b6e 100644
--- a/mysql-test/main/show_create_user.result
+++ b/mysql-test/main/show_create_user.result
@@ -10,10 +10,10 @@ create user foo2@test identified by 'password';
show create user foo2@test;
CREATE USER for foo2@test
CREATE USER 'foo2'@'test' IDENTIFIED BY PASSWORD '*2470C0C06DEE42FD1618BB99005ADCA2EC9D1E19'
-alter user foo2@test identified with 'someplugin' as 'somepassword';
+alter user foo2@test identified with 'mysql_old_password' as '0123456789ABCDEF';
show create user foo2@test;
CREATE USER for foo2@test
-CREATE USER 'foo2'@'test' IDENTIFIED VIA someplugin USING 'somepassword'
+CREATE USER 'foo2'@'test' IDENTIFIED BY PASSWORD '0123456789ABCDEF'
create user foo3@test require SSL;
show create user foo3@test;
CREATE USER for foo3@test
diff --git a/mysql-test/main/show_create_user.test b/mysql-test/main/show_create_user.test
index a10c8aeeda6..03852b5abbc 100644
--- a/mysql-test/main/show_create_user.test
+++ b/mysql-test/main/show_create_user.test
@@ -9,7 +9,7 @@ show create user foo@test;
create user foo2@test identified by 'password';
show create user foo2@test;
-alter user foo2@test identified with 'someplugin' as 'somepassword';
+alter user foo2@test identified with 'mysql_old_password' as '0123456789ABCDEF';
show create user foo2@test;
create user foo3@test require SSL;
diff --git a/mysql-test/main/show_explain.cc b/mysql-test/main/show_explain.cc
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/mysql-test/main/show_explain.cc
diff --git a/mysql-test/main/show_explain.result b/mysql-test/main/show_explain.result
index 0819ae5ba37..32364d0ec89 100644
--- a/mysql-test/main/show_explain.result
+++ b/mysql-test/main/show_explain.result
@@ -641,7 +641,7 @@ SELECT 'test' FROM t1 WHERE a=1;
connection default;
show explain for $thr2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref a a 5 const 1 Using index
+1 SIMPLE t1 ref a a 5 const 2 Using index
Warnings:
Note 1003 SELECT 'test' FROM t1 WHERE a=1
connection con1;
diff --git a/mysql-test/main/show_explain.test b/mysql-test/main/show_explain.test
index 6615ff66737..6647ca07eba 100644
--- a/mysql-test/main/show_explain.test
+++ b/mysql-test/main/show_explain.test
@@ -4,6 +4,7 @@
--source include/have_debug.inc
--source include/have_innodb.inc
+
--disable_warnings
drop table if exists t0, t1, t2, t3, t4;
drop view if exists v1;
@@ -328,6 +329,7 @@ connection con1;
set debug_dbug='+d,show_explain_probe_join_exec_start';
set @show_explain_probe_select_id=1;
+
send select distinct a from t0;
connection default;
--source include/wait_condition.inc
@@ -1202,3 +1204,4 @@ drop table t0,t1,t2;
connection default;
disconnect con1;
set debug_sync='RESET';
+
diff --git a/mysql-test/main/show_grants_with_plugin-7985.result b/mysql-test/main/show_grants_with_plugin-7985.result
deleted file mode 100644
index bb475b7cba9..00000000000
--- a/mysql-test/main/show_grants_with_plugin-7985.result
+++ /dev/null
@@ -1,197 +0,0 @@
-call mtr.add_suppression("password and an authentication plugin");
-#
-# Create a user with mysql_native_password plugin.
-# The user has no password or auth_string set.
-#
-create user u1;
-GRANT SELECT ON mysql.* to u1 IDENTIFIED VIA mysql_native_password;
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-user host password plugin authentication_string
-u1 %
-#
-# The user's grants should show no password at all.
-#
-show grants for u1;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-#
-# Test to see if connecting with no password is succesful.
-#
-connect con1, localhost, u1,,;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Test after flushing privileges.
-#
-flush privileges;
-connect con1, localhost, u1,,;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Now add a mysql_native password string in authentication_string.
-#
-GRANT SELECT ON mysql.* to u1 IDENTIFIED VIA mysql_native_password
-USING '*7AFEFD08B6B720E781FB000CAA418F54FA662626';
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-user host password plugin authentication_string
-u1 % *7AFEFD08B6B720E781FB000CAA418F54FA662626
-#
-# Test to see if connecting with password is succesful.
-#
-connect con1, localhost, u1,'SOMETHING',;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*7AFEFD08B6B720E781FB000CAA418F54FA662626'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Test after flushing privileges.
-#
-flush privileges;
-connect con1, localhost, u1,'SOMETHING',;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*7AFEFD08B6B720E781FB000CAA418F54FA662626'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Now we also set a password for the user.
-#
-set password for u1 = PASSWORD('SOMETHINGELSE');
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-user host password plugin authentication_string
-u1 % *054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6 mysql_native_password *054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6
-#
-# Here we should use the password field, as that primes over
-# the authentication_string field.
-#
-show grants for u1;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-#
-# Logging in with the user's password should work.
-#
-connect con1, localhost, u1,'SOMETHINGELSE',;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Reload privileges and test logging in again.
-#
-flush privileges;
-show grants for u1;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-#
-# Here we connect via the user's password again.
-#
-connect con1, localhost, u1,'SOMETHINGELSE',;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Now we remove the authentication plugin password, flush privileges and
-# try again.
-#
-update mysql.user set password=authentication_string, plugin='', authentication_string='' where user='u1';
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-user host password plugin authentication_string
-u1 % *054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6
-flush privileges;
-show grants for u1;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-#
-# Here we connect via the user's password.
-#
-connect con1, localhost, u1,'SOMETHINGELSE',;
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-user host password plugin authentication_string
-u1 % *054B7BBD2B9A553DA560520DCD3F76DA2D81B7C6
-disconnect con1;
-connection default;
-#
-# Try and set a wrong auth_string password, with mysql_native_password.
-# Make sure it fails.
-#
-GRANT USAGE ON *.* TO u1 IDENTIFIED VIA mysql_native_password USING 'asd';
-ERROR HY000: Password hash should be a 41-digit hexadecimal number
-#
-# Now set a correct password.
-#
-GRANT SELECT ON mysql.* to u1 IDENTIFIED VIA mysql_native_password
-USING '*7AFEFD08B6B720E781FB000CAA418F54FA662626';
-show grants for u1;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*7AFEFD08B6B720E781FB000CAA418F54FA662626'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-#
-# Test if the user can now use that password instead.
-#
-connect con1, localhost, u1,'SOMETHING',;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*7AFEFD08B6B720E781FB000CAA418F54FA662626'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-#
-# Test if the user can now use that password instead, after flushing privileges;
-#
-connection default;
-flush privileges;
-connect con1, localhost, u1,'SOMETHING',;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%' IDENTIFIED BY PASSWORD '*7AFEFD08B6B720E781FB000CAA418F54FA662626'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Clear all passwords from the user.
-#
-set password for u1 = '';
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-user host password plugin authentication_string
-u1 % mysql_native_password
-#
-# Test no password connect.
-#
-connect con1, localhost, u1,,;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-#
-# Test no password connect, after flushing privileges.
-#
-flush privileges;
-connect con1, localhost, u1,,;
-show grants;
-Grants for u1@%
-GRANT USAGE ON *.* TO 'u1'@'%'
-GRANT SELECT ON `mysql`.* TO 'u1'@'%'
-disconnect con1;
-connection default;
-drop user u1;
diff --git a/mysql-test/main/show_grants_with_plugin-7985.test b/mysql-test/main/show_grants_with_plugin-7985.test
deleted file mode 100644
index 85952870254..00000000000
--- a/mysql-test/main/show_grants_with_plugin-7985.test
+++ /dev/null
@@ -1,160 +0,0 @@
---source include/not_embedded.inc
-call mtr.add_suppression("password and an authentication plugin");
-
---echo #
---echo # Create a user with mysql_native_password plugin.
---echo # The user has no password or auth_string set.
---echo #
-
-create user u1;
-GRANT SELECT ON mysql.* to u1 IDENTIFIED VIA mysql_native_password;
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-
---echo #
---echo # The user's grants should show no password at all.
---echo #
-show grants for u1;
---echo #
---echo # Test to see if connecting with no password is succesful.
---echo #
---connect (con1, localhost, u1,,)
-show grants;
---disconnect con1
-
---connection default
---echo #
---echo # Test after flushing privileges.
---echo #
-flush privileges;
---connect (con1, localhost, u1,,)
-show grants;
---disconnect con1
-
---connection default
---echo #
---echo # Now add a mysql_native password string in authentication_string.
---echo #
-# Password string is SOMETHING
-GRANT SELECT ON mysql.* to u1 IDENTIFIED VIA mysql_native_password
-USING '*7AFEFD08B6B720E781FB000CAA418F54FA662626';
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
---echo #
---echo # Test to see if connecting with password is succesful.
---echo #
---connect (con1, localhost, u1,'SOMETHING',)
-show grants;
---disconnect con1
-
---connection default
---echo #
---echo # Test after flushing privileges.
---echo #
-flush privileges;
---connect (con1, localhost, u1,'SOMETHING',)
-show grants;
---disconnect con1
---connection default
-
---echo #
---echo # Now we also set a password for the user.
---echo #
-set password for u1 = PASSWORD('SOMETHINGELSE');
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-
---echo #
---echo # Here we should use the password field, as that primes over
---echo # the authentication_string field.
---echo #
-show grants for u1;
-
---echo #
---echo # Logging in with the user's password should work.
---echo #
---connect (con1, localhost, u1,'SOMETHINGELSE',)
-show grants;
---disconnect con1
---connection default
---echo #
---echo # Reload privileges and test logging in again.
---echo #
-flush privileges;
-show grants for u1;
---echo #
---echo # Here we connect via the user's password again.
---echo #
---connect (con1, localhost, u1,'SOMETHINGELSE',)
-show grants;
---disconnect con1
---connection default
-
---echo #
---echo # Now we remove the authentication plugin password, flush privileges and
---echo # try again.
---echo #
-update mysql.user set password=authentication_string, plugin='', authentication_string='' where user='u1';
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-flush privileges;
-show grants for u1;
---echo #
---echo # Here we connect via the user's password.
---echo #
---connect (con1, localhost, u1,'SOMETHINGELSE',)
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
---disconnect con1
---connection default
-
---echo #
---echo # Try and set a wrong auth_string password, with mysql_native_password.
---echo # Make sure it fails.
---echo #
---error ER_PASSWD_LENGTH
-GRANT USAGE ON *.* TO u1 IDENTIFIED VIA mysql_native_password USING 'asd';
---echo #
---echo # Now set a correct password.
---echo #
-GRANT SELECT ON mysql.* to u1 IDENTIFIED VIA mysql_native_password
-USING '*7AFEFD08B6B720E781FB000CAA418F54FA662626';
-show grants for u1;
-
---echo #
---echo # Test if the user can now use that password instead.
---echo #
---connect (con1, localhost, u1,'SOMETHING',)
-show grants;
---disconnect con1
-
---echo #
---echo # Test if the user can now use that password instead, after flushing privileges;
---echo #
---connection default
-flush privileges;
-
---connect (con1, localhost, u1,'SOMETHING',)
-show grants;
---disconnect con1
---connection default
-
---echo #
---echo # Clear all passwords from the user.
---echo #
-set password for u1 = '';
-select user, host, password, plugin, authentication_string from mysql.user where user = 'u1';
-
---echo #
---echo # Test no password connect.
---echo #
---connect (con1, localhost, u1,,)
-show grants;
---disconnect con1
---connection default
-
---echo #
---echo # Test no password connect, after flushing privileges.
---echo #
-flush privileges;
---connect (con1, localhost, u1,,)
-show grants;
---disconnect con1
---connection default
-
-drop user u1;
diff --git a/mysql-test/main/signal.result b/mysql-test/main/signal.result
index 215f7db8b0d..e329c58a47e 100644
--- a/mysql-test/main/signal.result
+++ b/mysql-test/main/signal.result
@@ -2285,17 +2285,13 @@ begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
SIGNAL foo SET MYSQL_ERRNO = `65`; /* illegal */
end $$
-call test_signal $$
ERROR 42S22: Unknown column '65' in 'field list'
-drop procedure test_signal $$
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
SIGNAL foo SET MYSQL_ERRNO = `A`; /* illegal */
end $$
-call test_signal $$
ERROR 42S22: Unknown column 'A' in 'field list'
-drop procedure test_signal $$
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
@@ -2346,9 +2342,7 @@ DECLARE foo CONDITION FOR SQLSTATE '12345';
SIGNAL foo SET MYSQL_ERRNO = 1000,
MESSAGE_TEXT = `Hello`;
end $$
-call test_signal $$
ERROR 42S22: Unknown column 'Hello' in 'field list'
-drop procedure test_signal $$
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
diff --git a/mysql-test/main/signal.test b/mysql-test/main/signal.test
index e4dcb5a71cf..5b40863b0e6 100644
--- a/mysql-test/main/signal.test
+++ b/mysql-test/main/signal.test
@@ -2546,25 +2546,21 @@ end $$
call test_signal $$
drop procedure test_signal $$
+-- error ER_BAD_FIELD_ERROR
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
SIGNAL foo SET MYSQL_ERRNO = `65`; /* illegal */
end $$
--- error ER_BAD_FIELD_ERROR
-call test_signal $$
-drop procedure test_signal $$
+-- error ER_BAD_FIELD_ERROR
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
SIGNAL foo SET MYSQL_ERRNO = `A`; /* illegal */
end $$
--- error ER_BAD_FIELD_ERROR
-call test_signal $$
-drop procedure test_signal $$
create procedure test_signal()
begin
@@ -2620,6 +2616,7 @@ end $$
call test_signal $$
drop procedure test_signal $$
+-- error ER_BAD_FIELD_ERROR
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
@@ -2627,10 +2624,6 @@ begin
MESSAGE_TEXT = `Hello`;
end $$
--- error ER_BAD_FIELD_ERROR
-call test_signal $$
-drop procedure test_signal $$
-
create procedure test_signal()
begin
DECLARE foo CONDITION FOR SQLSTATE '12345';
diff --git a/mysql-test/main/signal_demo1.result b/mysql-test/main/signal_demo1.result
index 752f23a48d6..d919f48404f 100644
--- a/mysql-test/main/signal_demo1.result
+++ b/mysql-test/main/signal_demo1.result
@@ -75,6 +75,9 @@ end;
end case;
end
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure check_pk_inventory(in id integer)
begin
declare x integer;
@@ -92,6 +95,8 @@ MYSQL_ERRNO = 10000;
end if;
end
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create procedure check_pk_order(in id integer)
begin
declare x integer;
@@ -108,6 +113,8 @@ MYSQL_ERRNO = 10000;
end if;
end
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create trigger po_order_bi before insert on po_order
for each row
begin
diff --git a/mysql-test/main/skip_grants.result b/mysql-test/main/skip_grants.result
index de263074b61..5ecb783ac43 100644
--- a/mysql-test/main/skip_grants.result
+++ b/mysql-test/main/skip_grants.result
@@ -1,14 +1,4 @@
use test;
-DROP VIEW IF EXISTS v1;
-DROP VIEW IF EXISTS v2;
-DROP VIEW IF EXISTS v3;
-DROP TABLE IF EXISTS t1;
-DROP PROCEDURE IF EXISTS p1;
-DROP PROCEDURE IF EXISTS p2;
-DROP PROCEDURE IF EXISTS p3;
-DROP FUNCTION IF EXISTS f1;
-DROP FUNCTION IF EXISTS f2;
-DROP FUNCTION IF EXISTS f3;
CREATE TABLE t1(c INT);
CREATE TRIGGER t1_bi BEFORE INSERT ON t1
FOR EACH ROW
@@ -95,3 +85,30 @@ Acl_role_grants 0
Acl_roles 0
Acl_table_grants 0
Acl_users 0
+show create user root@localhost;
+ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement
+insert mysql.global_priv values ('foo', 'bar', '{}');
+insert mysql.global_priv values ('baz', 'baz', '{"plugin":"baz"}');
+set password for bar@foo = password("pass word");
+ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement
+flush privileges;
+show create user root@localhost;
+CREATE USER for root@localhost
+CREATE USER 'root'@'localhost'
+show create user bar@foo;
+CREATE USER for bar@foo
+CREATE USER 'bar'@'foo'
+show create user baz@baz;
+CREATE USER for baz@baz
+CREATE USER 'baz'@'baz' IDENTIFIED VIA baz
+set password for bar@foo = password("pass word");
+show create user bar@foo;
+CREATE USER for bar@foo
+CREATE USER 'bar'@'foo' IDENTIFIED BY PASSWORD '*EDBBEA7F4E7B5D8B0BC8D7AC5D1936FB7DA10611'
+alter user baz@baz identified with mysql_native_password as password("baz");
+show create user baz@baz;
+CREATE USER for baz@baz
+CREATE USER 'baz'@'baz' IDENTIFIED BY PASSWORD '*E52096EF8EB0240275A7FE9E069101C33F98CF07'
+drop user bar@foo;
+drop user baz@baz;
+# restart
diff --git a/mysql-test/main/skip_grants.test b/mysql-test/main/skip_grants.test
index 5f79404e7e4..ccad3c2d13f 100644
--- a/mysql-test/main/skip_grants.test
+++ b/mysql-test/main/skip_grants.test
@@ -15,26 +15,6 @@ use test;
# - BUG#13504: creation view with DEFINER clause if --skip-grant-tables
#
-# Prepare.
-
---disable_warnings
-
-DROP VIEW IF EXISTS v1;
-DROP VIEW IF EXISTS v2;
-DROP VIEW IF EXISTS v3;
-
-DROP TABLE IF EXISTS t1;
-
-DROP PROCEDURE IF EXISTS p1;
-DROP PROCEDURE IF EXISTS p2;
-DROP PROCEDURE IF EXISTS p3;
-
-DROP FUNCTION IF EXISTS f1;
-DROP FUNCTION IF EXISTS f2;
-DROP FUNCTION IF EXISTS f3;
-
---enable_warnings
-
# Test case.
CREATE TABLE t1(c INT);
@@ -137,3 +117,26 @@ select no_such_function(1);
# MDEV-8280 crash in 'show global status' with --skip-grant-tables
#
show global status like 'Acl%';
+
+#
+# MDEV-18297
+# How to reset a forgotten root password
+#
+--error ER_OPTION_PREVENTS_STATEMENT
+show create user root@localhost;
+insert mysql.global_priv values ('foo', 'bar', '{}');
+insert mysql.global_priv values ('baz', 'baz', '{"plugin":"baz"}');
+--error ER_OPTION_PREVENTS_STATEMENT
+set password for bar@foo = password("pass word");
+flush privileges;
+show create user root@localhost;
+show create user bar@foo;
+show create user baz@baz;
+set password for bar@foo = password("pass word");
+show create user bar@foo;
+alter user baz@baz identified with mysql_native_password as password("baz");
+show create user baz@baz;
+drop user bar@foo;
+drop user baz@baz;
+# need to restart the server to restore the --skip-grant state
+--source include/restart_mysqld.inc
diff --git a/mysql-test/main/sp-anchor-row-type-cursor.result b/mysql-test/main/sp-anchor-row-type-cursor.result
index add771c534c..64359988a3d 100644
--- a/mysql-test/main/sp-anchor-row-type-cursor.result
+++ b/mysql-test/main/sp-anchor-row-type-cursor.result
@@ -936,6 +936,8 @@ SELECT rec1.a, rec1.b;
END;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -955,6 +957,8 @@ SELECT rec1.a, rec1.b;
END;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -972,6 +976,8 @@ SELECT rec1.a, rec1.b;
END;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/main/sp-anchor-row-type-table.result b/mysql-test/main/sp-anchor-row-type-table.result
index 6abf1e18315..b51ee5bf55d 100644
--- a/mysql-test/main/sp-anchor-row-type-table.result
+++ b/mysql-test/main/sp-anchor-row-type-table.result
@@ -606,6 +606,8 @@ SELECT 10,'a','b' FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -622,6 +624,8 @@ SELECT 10,'a' FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -638,6 +642,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/main/sp-anchor-type.result b/mysql-test/main/sp-anchor-type.result
index 2b61854d514..f7674735821 100644
--- a/mysql-test/main/sp-anchor-type.result
+++ b/mysql-test/main/sp-anchor-type.result
@@ -957,6 +957,8 @@ SELECT * FROM t1 INTO v_a, v_b, v_c;
SELECT v_a, v_b, v_c;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
v_a v_b v_c
1 b1 2001-01-01 10:20:30.123
diff --git a/mysql-test/main/sp-big.result b/mysql-test/main/sp-big.result
index e12136eb36d..0a07a3aa7a8 100644
--- a/mysql-test/main/sp-big.result
+++ b/mysql-test/main/sp-big.result
@@ -77,6 +77,8 @@ select count(*) as cnt from (select id1 from t1 force index (primary) where id1
set id1_cond = id1_cond + 1;
end while;
end//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert t1 select seq, seq, 1, 1, seq, seq, seq from seq_1_to_2000;
set @before=unix_timestamp();
call select_test();
diff --git a/mysql-test/main/sp-bugs.result b/mysql-test/main/sp-bugs.result
index a699cd11c5a..3ab3d19ccfb 100644
--- a/mysql-test/main/sp-bugs.result
+++ b/mysql-test/main/sp-bugs.result
@@ -60,20 +60,6 @@ ERROR HY000: Trigger does not exist
DROP TABLE t1;
DROP PROCEDURE p1;
#
-# Bug#50423: Crash on second call of a procedure dropping a trigger
-#
-DROP TABLE IF EXISTS t1;
-DROP TRIGGER IF EXISTS tr1;
-DROP PROCEDURE IF EXISTS p1;
-CREATE TABLE t1 (f1 INTEGER);
-CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW SET @aux = 1;
-CREATE PROCEDURE p1 () DROP TRIGGER tr1;
-CALL p1 ();
-CALL p1 ();
-ERROR HY000: Trigger does not exist
-DROP TABLE t1;
-DROP PROCEDURE p1;
-#
# Bug#54375: Error in stored procedure leaves connection
# in different default schema
#
diff --git a/mysql-test/main/sp-bugs.test b/mysql-test/main/sp-bugs.test
index 2dd70d28249..6695b05b72d 100644
--- a/mysql-test/main/sp-bugs.test
+++ b/mysql-test/main/sp-bugs.test
@@ -81,27 +81,6 @@ DROP TABLE t1;
DROP PROCEDURE p1;
--echo #
---echo # Bug#50423: Crash on second call of a procedure dropping a trigger
---echo #
-
---disable_warnings
-DROP TABLE IF EXISTS t1;
-DROP TRIGGER IF EXISTS tr1;
-DROP PROCEDURE IF EXISTS p1;
---enable_warnings
-
-CREATE TABLE t1 (f1 INTEGER);
-CREATE TRIGGER tr1 BEFORE INSERT ON t1 FOR EACH ROW SET @aux = 1;
-CREATE PROCEDURE p1 () DROP TRIGGER tr1;
-
-CALL p1 ();
---error ER_TRG_DOES_NOT_EXIST
-CALL p1 ();
-
-DROP TABLE t1;
-DROP PROCEDURE p1;
-
---echo #
--echo # Bug#54375: Error in stored procedure leaves connection
--echo # in different default schema
--echo #
diff --git a/mysql-test/main/sp-destruct.test b/mysql-test/main/sp-destruct.test
index 8870df29299..12299fa230b 100644
--- a/mysql-test/main/sp-destruct.test
+++ b/mysql-test/main/sp-destruct.test
@@ -17,8 +17,8 @@ call mtr.add_suppression("Stored routine .test...bug14233_[123].: invalid value
let $MYSQLD_DATADIR= `select @@datadir`;
flush table mysql.proc;
--copy_file $MYSQLD_DATADIR/mysql/proc.frm $MYSQLTEST_VARDIR/tmp/proc.frm
---copy_file $MYSQLD_DATADIR/mysql/proc.MYD $MYSQLTEST_VARDIR/tmp/proc.MYD
---copy_file $MYSQLD_DATADIR/mysql/proc.MYI $MYSQLTEST_VARDIR/tmp/proc.MYI
+--copy_file $MYSQLD_DATADIR/mysql/proc.MAD $MYSQLTEST_VARDIR/tmp/proc.MAD
+--copy_file $MYSQLD_DATADIR/mysql/proc.MAI $MYSQLTEST_VARDIR/tmp/proc.MAI
use test;
@@ -70,8 +70,8 @@ flush table mysql.proc;
# Drop the mysql.proc table
--remove_file $MYSQLD_DATADIR/mysql/proc.frm
---remove_file $MYSQLD_DATADIR/mysql/proc.MYD
---remove_file $MYSQLD_DATADIR/mysql/proc.MYI
+--remove_file $MYSQLD_DATADIR/mysql/proc.MAD
+--remove_file $MYSQLD_DATADIR/mysql/proc.MAI
--error ER_NO_SUCH_TABLE
call bug14233();
--error ER_NO_SUCH_TABLE
@@ -81,11 +81,11 @@ insert into t1 values (0);
# Restore mysql.proc
--copy_file $MYSQLTEST_VARDIR/tmp/proc.frm $MYSQLD_DATADIR/mysql/proc.frm
---copy_file $MYSQLTEST_VARDIR/tmp/proc.MYD $MYSQLD_DATADIR/mysql/proc.MYD
---copy_file $MYSQLTEST_VARDIR/tmp/proc.MYI $MYSQLD_DATADIR/mysql/proc.MYI
+--copy_file $MYSQLTEST_VARDIR/tmp/proc.MAD $MYSQLD_DATADIR/mysql/proc.MAD
+--copy_file $MYSQLTEST_VARDIR/tmp/proc.MAI $MYSQLD_DATADIR/mysql/proc.MAI
--remove_file $MYSQLTEST_VARDIR/tmp/proc.frm
---remove_file $MYSQLTEST_VARDIR/tmp/proc.MYD
---remove_file $MYSQLTEST_VARDIR/tmp/proc.MYI
+--remove_file $MYSQLTEST_VARDIR/tmp/proc.MAD
+--remove_file $MYSQLTEST_VARDIR/tmp/proc.MAI
flush table mysql.proc;
flush privileges;
@@ -233,8 +233,8 @@ drop database if exists mysqltest;
flush table mysql.proc;
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MYSQLD_DATADIR/mysql/proc.frm $MYSQLTEST_VARDIR/tmp/proc.frm
---copy_file $MYSQLD_DATADIR/mysql/proc.MYD $MYSQLTEST_VARDIR/tmp/proc.MYD
---copy_file $MYSQLD_DATADIR/mysql/proc.MYI $MYSQLTEST_VARDIR/tmp/proc.MYI
+--copy_file $MYSQLD_DATADIR/mysql/proc.MAD $MYSQLTEST_VARDIR/tmp/proc.MAD
+--copy_file $MYSQLD_DATADIR/mysql/proc.MAI $MYSQLTEST_VARDIR/tmp/proc.MAI
create database mysqltest;
--echo # Corrupt mysql.proc to make it unusable by current version of server.
@@ -245,11 +245,11 @@ drop database mysqltest;
--echo # Restore mysql.proc.
drop table mysql.proc;
--copy_file $MYSQLTEST_VARDIR/tmp/proc.frm $MYSQLD_DATADIR/mysql/proc.frm
---copy_file $MYSQLTEST_VARDIR/tmp/proc.MYD $MYSQLD_DATADIR/mysql/proc.MYD
---copy_file $MYSQLTEST_VARDIR/tmp/proc.MYI $MYSQLD_DATADIR/mysql/proc.MYI
+--copy_file $MYSQLTEST_VARDIR/tmp/proc.MAD $MYSQLD_DATADIR/mysql/proc.MAD
+--copy_file $MYSQLTEST_VARDIR/tmp/proc.MAI $MYSQLD_DATADIR/mysql/proc.MAI
--remove_file $MYSQLTEST_VARDIR/tmp/proc.frm
---remove_file $MYSQLTEST_VARDIR/tmp/proc.MYD
---remove_file $MYSQLTEST_VARDIR/tmp/proc.MYI
+--remove_file $MYSQLTEST_VARDIR/tmp/proc.MAD
+--remove_file $MYSQLTEST_VARDIR/tmp/proc.MAI
--echo #
diff --git a/mysql-test/main/sp-error.result b/mysql-test/main/sp-error.result
index 1a32665c886..3d4e7895fa4 100644
--- a/mysql-test/main/sp-error.result
+++ b/mysql-test/main/sp-error.result
@@ -1,5 +1,7 @@
drop table if exists t1, t2;
SELECT * FROM mysql.proc INTO OUTFILE 'MYSQLTEST_VARDIR/tmp/proc.txt';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
delete from mysql.proc;
create procedure syntaxerror(t int)|
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
@@ -12,6 +14,8 @@ create table t3 ( x int )|
insert into t3 values (2), (3)|
create procedure bad_into(out param int)
select x from t3 into param|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
call bad_into(@x)|
ERROR 42000: Result consisted of more than one row
drop procedure bad_into|
@@ -439,6 +443,9 @@ create procedure nodb.bug3339() begin end|
ERROR 42000: Unknown database 'nodb'
create procedure bug2653_1(a int, out b int)
set b = aa|
+call bug2653_1(1, @b)|
+ERROR 42S22: Unknown column 'aa' in 'field list'
+drop procedure bug2653_1|
create procedure bug2653_2(a int, out b int)
begin
if aa < 0 then
@@ -447,12 +454,7 @@ else
set b = a;
end if;
end|
-call bug2653_1(1, @b)|
ERROR 42S22: Unknown column 'aa' in 'field list'
-call bug2653_2(2, @b)|
-ERROR 42S22: Unknown column 'aa' in 'field list'
-drop procedure bug2653_1|
-drop procedure bug2653_2|
create procedure bug4344() drop procedure bug4344|
ERROR HY000: Can't drop or alter a PROCEDURE from within another stored routine
create procedure bug4344() drop function bug4344|
@@ -1067,6 +1069,7 @@ IF bug13037_foo THEN
SELECT 1;
END IF;
END|
+ERROR 42S22: Unknown column 'bug13037_foo' in 'field list'
CREATE PROCEDURE bug13037_p2()
BEGIN
SET @bug13037_foo = bug13037_bar;
@@ -1076,19 +1079,14 @@ BEGIN
SELECT bug13037_foo;
END|
-CALL bug13037_p1();
-ERROR 42S22: Unknown column 'bug13037_foo' in 'field list'
CALL bug13037_p2();
ERROR 42S22: Unknown column 'bug13037_bar' in 'field list'
CALL bug13037_p3();
ERROR 42S22: Unknown column 'bug13037_foo' in 'field list'
-CALL bug13037_p1();
-ERROR 42S22: Unknown column 'bug13037_foo' in 'field list'
CALL bug13037_p2();
ERROR 42S22: Unknown column 'bug13037_bar' in 'field list'
CALL bug13037_p3();
ERROR 42S22: Unknown column 'bug13037_foo' in 'field list'
-DROP PROCEDURE bug13037_p1;
DROP PROCEDURE bug13037_p2;
DROP PROCEDURE bug13037_p3;
create database mysqltest1;
@@ -2848,6 +2846,8 @@ DECLARE v VARCHAR(5) DEFAULT -1;
SELECT b FROM t1 WHERE a = 2 INTO v;
RETURN v;
END|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Here we check that the NOT_FOUND condition raised in f1()
# is not visible in the outer function (f2), i.e. the continue
diff --git a/mysql-test/main/sp-error.test b/mysql-test/main/sp-error.test
index 0e16948f438..475d677b820 100644
--- a/mysql-test/main/sp-error.test
+++ b/mysql-test/main/sp-error.test
@@ -608,6 +608,12 @@ create procedure nodb.bug3339() begin end|
create procedure bug2653_1(a int, out b int)
set b = aa|
+--error ER_BAD_FIELD_ERROR
+call bug2653_1(1, @b)|
+
+drop procedure bug2653_1|
+
+--error ER_BAD_FIELD_ERROR
create procedure bug2653_2(a int, out b int)
begin
if aa < 0 then
@@ -617,13 +623,6 @@ begin
end if;
end|
---error 1054
-call bug2653_1(1, @b)|
---error 1054
-call bug2653_2(2, @b)|
-
-drop procedure bug2653_1|
-drop procedure bug2653_2|
#
# BUG#4344
@@ -1507,6 +1506,7 @@ DROP PROCEDURE IF EXISTS bug13037_p3;
delimiter |;
+--error ER_BAD_FIELD_ERROR
CREATE PROCEDURE bug13037_p1()
BEGIN
IF bug13037_foo THEN
@@ -1529,20 +1529,15 @@ delimiter ;|
--echo
--error 1054
-CALL bug13037_p1();
---error 1054
CALL bug13037_p2();
--error 1054
CALL bug13037_p3();
--error 1054
-CALL bug13037_p1();
---error 1054
CALL bug13037_p2();
--error 1054
CALL bug13037_p3();
-DROP PROCEDURE bug13037_p1;
DROP PROCEDURE bug13037_p2;
DROP PROCEDURE bug13037_p3;
diff --git a/mysql-test/main/sp-row.result b/mysql-test/main/sp-row.result
index da8258d9ec4..4b87798e0bb 100644
--- a/mysql-test/main/sp-row.result
+++ b/mysql-test/main/sp-row.result
@@ -2135,6 +2135,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2149,6 +2151,8 @@ SELECT * FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2163,6 +2167,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/main/sp-security.result b/mysql-test/main/sp-security.result
index 4487528210f..662d52902d0 100644
--- a/mysql-test/main/sp-security.result
+++ b/mysql-test/main/sp-security.result
@@ -506,9 +506,9 @@ DROP DATABASE mysqltest;
GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow';
GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO
user19857@localhost;
-SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
-Host User Password
-localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C
+SELECT Host,User,Plugin,Authentication_string FROM mysql.user WHERE User='user19857';
+Host User plugin authentication_string
+localhost user19857 mysql_native_password *82DC221D557298F6CE9961037DB1C90604792F5C
connect mysqltest_2_con,localhost,user19857,meow,test;
connection mysqltest_2_con;
USE test;
@@ -533,9 +533,9 @@ connection mysqltest_2_con;
DROP PROCEDURE IF EXISTS test.sp19857;
connection con1root;
disconnect mysqltest_2_con;
-SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
-Host User Password
-localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C
+SELECT Host,User,Plugin,Authentication_string FROM mysql.user WHERE User='user19857';
+Host User plugin authentication_string
+localhost user19857 mysql_native_password *82DC221D557298F6CE9961037DB1C90604792F5C
DROP USER user19857@localhost;
disconnect con1root;
connection default;
@@ -711,9 +711,7 @@ disconnect con2;
DROP USER user2@localhost;
DROP DATABASE db1;
create user foo@local_ost;
-create user foo@`local\_ost`;
-update mysql.user set plugin='foobar' where host='local\\_ost';
-flush privileges;
+create user foo@`local\_ost` identified via mysql_old_password using '0123456789ABCDEF';
create database foodb;
grant create routine on foodb.* to foo@local_ost;
connect con1,localhost,foo;
@@ -767,21 +765,23 @@ connection default;
disconnect conn1;
drop user bug12602983_user@localhost;
drop database mysqltest_db;
+create user u1@localhost;
+grant all privileges on *.* to u1@localhost with grant option;
+connect u1, localhost, u1;
set password=password('foobar');
create procedure sp1() select 1;
show grants;
-Grants for root@localhost
-GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY PASSWORD '*9B500343BC52E2911172EB52AE5CF4847604C6E5' WITH GRANT OPTION
-GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+Grants for u1@localhost
+GRANT ALL PRIVILEGES ON *.* TO 'u1'@'localhost' IDENTIFIED BY PASSWORD '*9B500343BC52E2911172EB52AE5CF4847604C6E5' WITH GRANT OPTION
grant execute on procedure sp1 to current_user() identified by 'barfoo';
show grants;
-Grants for root@localhost
-GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY PASSWORD '*343915A8181B5728EADBDC73E1F7E6B0C3998483' WITH GRANT OPTION
-GRANT EXECUTE ON PROCEDURE `test`.`sp1` TO 'root'@'localhost'
-GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
+Grants for u1@localhost
+GRANT ALL PRIVILEGES ON *.* TO 'u1'@'localhost' IDENTIFIED BY PASSWORD '*343915A8181B5728EADBDC73E1F7E6B0C3998483' WITH GRANT OPTION
+GRANT EXECUTE ON PROCEDURE `test`.`sp1` TO 'u1'@'localhost'
drop procedure sp1;
-set password='';
-update mysql.user set plugin='';
+disconnect u1;
+connection default;
+drop user u1@localhost;
#
# MDEV-13396 Unexpected "alter routine comand defined" during CREATE OR REPLACE PROCEDURE
#
diff --git a/mysql-test/main/sp-security.test b/mysql-test/main/sp-security.test
index 4f645ce2cd3..7a87fae4097 100644
--- a/mysql-test/main/sp-security.test
+++ b/mysql-test/main/sp-security.test
@@ -742,7 +742,7 @@ DROP DATABASE mysqltest;
GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow';
GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO
user19857@localhost;
-SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+SELECT Host,User,Plugin,Authentication_string FROM mysql.user WHERE User='user19857';
--connect (mysqltest_2_con,localhost,user19857,meow,test)
--connection mysqltest_2_con
@@ -770,7 +770,7 @@ DROP PROCEDURE IF EXISTS test.sp19857;
--disconnect mysqltest_2_con
-SELECT Host,User,Password FROM mysql.user WHERE User='user19857';
+SELECT Host,User,Plugin,Authentication_string FROM mysql.user WHERE User='user19857';
DROP USER user19857@localhost;
@@ -977,16 +977,7 @@ DROP DATABASE db1;
# Bug#27407480: AUTOMATIC_SP_PRIVILEGES REQUIRES NEED THE INSERT PRIVILEGES FOR MYSQL.USER TABLE
#
create user foo@local_ost;
-#
-# Create a user with an authentification plugin 'foobar'.
-# Instead of using a normal "CREATE USER <user> IDENTIFIED VIA <plugin>"
-# we do CREATE (without VIA) followed by UPDATE and FLUSH.
-# This is to avoid installing a real plugin and thus avoid the test dependency.
-# We won't login under this user in the below test, so this is fine.
-#
-create user foo@`local\_ost`;
-update mysql.user set plugin='foobar' where host='local\\_ost';
-flush privileges;
+create user foo@`local\_ost` identified via mysql_old_password using '0123456789ABCDEF';
create database foodb;
grant create routine on foodb.* to foo@local_ost;
connect con1,localhost,foo;
@@ -1044,15 +1035,18 @@ drop database mysqltest_db;
# Wait till all disconnects are completed
--source include/wait_until_count_sessions.inc
+create user u1@localhost;
+grant all privileges on *.* to u1@localhost with grant option;
+connect u1, localhost, u1;
set password=password('foobar');
create procedure sp1() select 1;
show grants;
grant execute on procedure sp1 to current_user() identified by 'barfoo';
show grants;
drop procedure sp1;
-set password='';
-#cleanup after MDEV-16238
-update mysql.user set plugin='';
+disconnect u1;
+connection default;
+drop user u1@localhost;
--echo #
--echo # MDEV-13396 Unexpected "alter routine comand defined" during CREATE OR REPLACE PROCEDURE
diff --git a/mysql-test/main/sp.result b/mysql-test/main/sp.result
index b7022559dc5..3129a2e165c 100644
--- a/mysql-test/main/sp.result
+++ b/mysql-test/main/sp.result
@@ -314,10 +314,12 @@ delete from t1|
drop procedure b|
drop procedure if exists b2|
create procedure b2(x int)
-repeat(select 1 into outfile 'b2');
+repeat(select 1) into outfile 'b2';
insert into test.t1 values (repeat("b2",3), x);
set x = x-1;
until x = 0 end repeat|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
drop procedure b2|
drop procedure if exists c|
create procedure c(x int)
@@ -2291,9 +2293,11 @@ create procedure bug3843()
analyze table t1|
call bug3843()|
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
call bug3843()|
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
select 1+2|
1+2
@@ -2440,7 +2444,7 @@ drop procedure if exists bug4904|
create procedure bug4904()
begin
declare continue handler for sqlstate 'HY000' begin end;
-create table t2 as select * from t3;
+create table not_existing_table as select * from t3;
end|
call bug4904()|
ERROR 42S02: Table 'test.t3' doesn't exist
@@ -4179,6 +4183,7 @@ select v, isnull(v);
end if;
end;
end|
+ERROR 42S22: Unknown column 'undefined_var' in 'field list'
create procedure bug14643_2()
begin
declare continue handler for sqlexception select 'boo' as 'Handler';
@@ -4190,18 +4195,7 @@ select 2;
end case;
select undefined_var;
end|
-call bug14643_1()|
-Handler
-boo
-v isnull(v)
-NULL 1
-call bug14643_2()|
-Handler
-boo
-Handler
-boo
-drop procedure bug14643_1|
-drop procedure bug14643_2|
+ERROR 42S22: Unknown column 'undefined_var' in 'field list'
drop procedure if exists bug14304|
drop table if exists t3, t4|
create table t3(a int primary key auto_increment)|
@@ -4231,9 +4225,7 @@ create procedure bug14376()
begin
declare x int default x;
end|
-call bug14376()|
ERROR 42S22: Unknown column 'x' in 'field list'
-drop procedure bug14376|
create procedure bug14376()
begin
declare x int default 42;
@@ -4290,6 +4282,9 @@ select i as 'A local variable in a nested compound statement takes precedence o
end;
end;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
call bug5967("a - stored procedure parameter")|
a
a - stored procedure parameter
@@ -4338,8 +4333,11 @@ test.t3 optimize status OK
test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE'
test.v1 optimize status Operation failed
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE'
test.v1 analyze status Operation failed
@@ -4357,8 +4355,11 @@ test.t3 optimize status OK
test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE'
test.v1 optimize status Operation failed
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE'
test.v1 analyze status Operation failed
@@ -4376,8 +4377,11 @@ test.t3 optimize status OK
test.v1 optimize Error 'test.v1' is not of type 'BASE TABLE'
test.v1 optimize status Operation failed
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
test.v1 analyze Error 'test.v1' is not of type 'BASE TABLE'
test.v1 analyze status Operation failed
@@ -4476,6 +4480,7 @@ select 'no' as 'v';
end if;
select 'done' as 'End';
end|
+ERROR 42S22: Unknown column 'v' in 'field list'
create procedure bug14498_2()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -4484,6 +4489,7 @@ select 'yes' as 'v';
end while;
select 'done' as 'End';
end|
+ERROR 42S22: Unknown column 'v' in 'field list'
create procedure bug14498_3()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -4492,6 +4498,7 @@ select 'maybe' as 'v';
until v end repeat;
select 'done' as 'End';
end|
+ERROR 42S22: Unknown column 'v' in 'field list'
create procedure bug14498_4()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -4505,6 +4512,7 @@ select '?' as 'v';
end case;
select 'done' as 'End';
end|
+ERROR 42S22: Unknown column 'v' in 'field list'
create procedure bug14498_5()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -4518,38 +4526,7 @@ select '?' as 'v';
end case;
select 'done' as 'End';
end|
-call bug14498_1()|
-Handler
-error
-End
-done
-call bug14498_2()|
-Handler
-error
-End
-done
-call bug14498_3()|
-v
-maybe
-Handler
-error
-End
-done
-call bug14498_4()|
-Handler
-error
-End
-done
-call bug14498_5()|
-Handler
-error
-End
-done
-drop procedure bug14498_1|
-drop procedure bug14498_2|
-drop procedure bug14498_3|
-drop procedure bug14498_4|
-drop procedure bug14498_5|
+ERROR 42S22: Unknown column 'v' in 'field list'
drop table if exists t3|
drop procedure if exists bug15231_1|
drop procedure if exists bug15231_2|
@@ -5797,6 +5774,8 @@ end;
select 1 from no_such_view limit 1 into x;
return x;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create function func_8407_b() returns int
begin
declare x int default 0;
@@ -7748,7 +7727,7 @@ UPDATE t1 SET a = '+' WHERE daynum=tdn();
SHOW STATUS LIKE '%Handler_read%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 2
+Handler_read_key 9
Handler_read_last 0
Handler_read_next 4097
Handler_read_prev 0
@@ -8405,14 +8384,12 @@ DECLARE name VARCHAR(10);
SET name="hello";
call p1(name2);
END|
+ERROR 42S22: Unknown column 'name2' in 'field list'
call p2();
a
hello
-call p3();
-ERROR 42S22: Unknown column 'name2' in 'field list'
drop procedure p1;
drop procedure p2;
-drop procedure p3;
#
# MDEV-15328: MariaDB 10.2.13 Crashes upon CALL PROCEDURE PARAM
# LAST_INSERT_ID ()
@@ -8642,11 +8619,11 @@ RETURN a = timestamp'2038-01-19 03:14:07.999999'
END
$$
SELECT f1(e) FROM t1;
-ERROR 22007: Truncated incorrect DOUBLE value: '2001-01-01 10:20:30'
+ERROR 22007: Truncated incorrect DOUBLE value: '2001-01-01 10:20:30.000000'
SELECT f2(e) FROM t1;
-ERROR 22007: Truncated incorrect DOUBLE value: '2001-01-01 10:20:30'
+ERROR 22007: Truncated incorrect DOUBLE value: '2001-01-01 10:20:30.000000'
SELECT f3(e) FROM t1;
-ERROR 22007: Truncated incorrect DOUBLE value: '2001-01-01 10:20:30'
+ERROR 22007: Truncated incorrect DOUBLE value: '2001-01-01 10:20:30.000000'
DROP FUNCTION f1;
DROP FUNCTION f2;
DROP FUNCTION f3;
diff --git a/mysql-test/main/sp.test b/mysql-test/main/sp.test
index 743f0f6781e..7f841ccd0b4 100644
--- a/mysql-test/main/sp.test
+++ b/mysql-test/main/sp.test
@@ -440,7 +440,7 @@ drop procedure b|
drop procedure if exists b2|
--enable_warnings
create procedure b2(x int)
-repeat(select 1 into outfile 'b2');
+repeat(select 1) into outfile 'b2';
insert into test.t1 values (repeat("b2",3), x);
set x = x-1;
until x = 0 end repeat|
@@ -2918,7 +2918,7 @@ create procedure bug4904()
begin
declare continue handler for sqlstate 'HY000' begin end;
- create table t2 as select * from t3;
+ create table not_existing_table as select * from t3;
end|
-- error 1146
@@ -5040,6 +5040,7 @@ drop procedure if exists bug14643_1|
drop procedure if exists bug14643_2|
--enable_warnings
+--error ER_BAD_FIELD_ERROR
create procedure bug14643_1()
begin
declare continue handler for sqlexception select 'boo' as 'Handler';
@@ -5055,6 +5056,7 @@ begin
end;
end|
+--error ER_BAD_FIELD_ERROR
create procedure bug14643_2()
begin
declare continue handler for sqlexception select 'boo' as 'Handler';
@@ -5069,11 +5071,6 @@ begin
select undefined_var;
end|
-call bug14643_1()|
-call bug14643_2()|
-
-drop procedure bug14643_1|
-drop procedure bug14643_2|
#
# BUG#14304: auto_increment field incorrect set in SP
@@ -5114,15 +5111,12 @@ drop table t3, t4|
drop procedure if exists bug14376|
--enable_warnings
+--error ER_BAD_FIELD_ERROR
create procedure bug14376()
begin
declare x int default x;
end|
-# Not the error we want, but that's what we got for now...
---error ER_BAD_FIELD_ERROR
-call bug14376()|
-drop procedure bug14376|
create procedure bug14376()
begin
@@ -5344,6 +5338,7 @@ drop procedure if exists bug14498_4|
drop procedure if exists bug14498_5|
--enable_warnings
+--error ER_BAD_FIELD_ERROR
create procedure bug14498_1()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -5356,6 +5351,7 @@ begin
select 'done' as 'End';
end|
+--error ER_BAD_FIELD_ERROR
create procedure bug14498_2()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -5366,6 +5362,7 @@ begin
select 'done' as 'End';
end|
+--error ER_BAD_FIELD_ERROR
create procedure bug14498_3()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -5376,6 +5373,7 @@ begin
select 'done' as 'End';
end|
+--error ER_BAD_FIELD_ERROR
create procedure bug14498_4()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -5391,6 +5389,7 @@ begin
select 'done' as 'End';
end|
+--error ER_BAD_FIELD_ERROR
create procedure bug14498_5()
begin
declare continue handler for sqlexception select 'error' as 'Handler';
@@ -5406,17 +5405,6 @@ begin
select 'done' as 'End';
end|
-call bug14498_1()|
-call bug14498_2()|
-call bug14498_3()|
-call bug14498_4()|
-call bug14498_5()|
-
-drop procedure bug14498_1|
-drop procedure bug14498_2|
-drop procedure bug14498_3|
-drop procedure bug14498_4|
-drop procedure bug14498_5|
#
# BUG#15231: Stored procedure bug with not found condition handler
@@ -9913,6 +9901,8 @@ BEGIN
SET name="hello";
call p1(name);
END|
+
+--error ER_BAD_FIELD_ERROR
CREATE OR REPLACE PROCEDURE p3 ()
BEGIN
DECLARE name VARCHAR(10);
@@ -9923,11 +9913,8 @@ END|
DELIMITER ;|
call p2();
---error ER_BAD_FIELD_ERROR
-call p3();
drop procedure p1;
drop procedure p2;
-drop procedure p3;
--echo #
--echo # MDEV-15328: MariaDB 10.2.13 Crashes upon CALL PROCEDURE PARAM
diff --git a/mysql-test/main/sp_notembedded.result b/mysql-test/main/sp_notembedded.result
index d337d50e18a..23461867500 100644
--- a/mysql-test/main/sp_notembedded.result
+++ b/mysql-test/main/sp_notembedded.result
@@ -231,8 +231,6 @@ CREATE PROCEDURE p1(i INT) BEGIN END;
disconnect con1;
connection default;
DROP PROCEDURE p1;
-DELETE FROM mysql.user WHERE User='mysqltest_1';
-FLUSH PRIVILEGES;
#
# Bug#44521 Prepared Statement: CALL p() - crashes: `! thd->main_da.is_sent' failed et.al.
#
@@ -324,7 +322,7 @@ DROP EVENT teste_bug11763507;
# -- End of 5.1 tests
# ------------------------------------------------------------------
grant create routine on test.* to foo1@localhost identified by 'foo';
-update mysql.user set password = replace(password, '*', '-') where user='foo1';
+update mysql.user set authentication_string = replace(authentication_string, '*', '-') where user='foo1';
connect foo,localhost,foo1,foo;
show grants;
Grants for foo1@localhost
diff --git a/mysql-test/main/sp_notembedded.test b/mysql-test/main/sp_notembedded.test
index 1b481767fbe..29901c1221b 100644
--- a/mysql-test/main/sp_notembedded.test
+++ b/mysql-test/main/sp_notembedded.test
@@ -305,6 +305,7 @@ set session low_priority_updates=default;
#
# Bug#44798 MySQL engine crashes when creating stored procedures with execute_priv=N
#
+--source include/switch_to_mysql_user.inc
INSERT IGNORE INTO mysql.user (Host, User, Password, Select_priv, Insert_priv, Update_priv,
Delete_priv, Create_priv, Drop_priv, Reload_priv, Shutdown_priv, Process_priv, File_priv,
Grant_priv, References_priv, Index_priv, Alter_priv, Show_db_priv, Super_priv,
@@ -323,10 +324,7 @@ CREATE PROCEDURE p1(i INT) BEGIN END;
disconnect con1;
connection default;
DROP PROCEDURE p1;
-
-DELETE FROM mysql.user WHERE User='mysqltest_1';
-FLUSH PRIVILEGES;
-
+--source include/switch_to_mysql_global_priv.inc
--echo #
--echo # Bug#44521 Prepared Statement: CALL p() - crashes: `! thd->main_da.is_sent' failed et.al.
@@ -464,8 +462,9 @@ DROP EVENT teste_bug11763507;
# A case of SHOW GRANTS
# (creating a new procedure changes the password)
#
+--source include/switch_to_mysql_user.inc
grant create routine on test.* to foo1@localhost identified by 'foo';
-update mysql.user set password = replace(password, '*', '-') where user='foo1';
+update mysql.user set authentication_string = replace(authentication_string, '*', '-') where user='foo1';
--connect (foo,localhost,foo1,foo)
show grants;
--connection default
@@ -479,6 +478,7 @@ show grants;
--disconnect foo
drop procedure spfoo;
drop user foo1@localhost;
+--source include/switch_to_mysql_global_priv.inc
#
# Restore global concurrent_insert value. Keep in the end of the test file.
diff --git a/mysql-test/main/sp_trans.result b/mysql-test/main/sp_trans.result
index deb0d6797fe..cee2a7e5188 100644
--- a/mysql-test/main/sp_trans.result
+++ b/mysql-test/main/sp_trans.result
@@ -506,6 +506,8 @@ insert into t3 select a from t3;
select count(*)*255 from t3 into table_size;
until table_size > max_table_size*2 end repeat;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
call bug14210_fill_table()|
drop procedure bug14210_fill_table|
create table t4 like t3|
diff --git a/mysql-test/main/sp_trans_log.result b/mysql-test/main/sp_trans_log.result
index b72e8332fad..adc9eafc370 100644
--- a/mysql-test/main/sp_trans_log.result
+++ b/mysql-test/main/sp_trans_log.result
@@ -11,6 +11,8 @@ insert into t1 values (null);
select count(*) from t1 into @a;
return @a;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
reset master;
insert into t2 values (bug23333(),1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
diff --git a/mysql-test/main/sql_mode.result b/mysql-test/main/sql_mode.result
index 238bae2efd8..25a90703bf5 100644
--- a/mysql-test/main/sql_mode.result
+++ b/mysql-test/main/sql_mode.result
@@ -476,10 +476,14 @@ select @@sql_mode;
@@sql_mode
REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,NO_TABLE_OPTIONS,ANSI
set sql_mode=2147483648*2*2*2;
-ERROR 42000: Variable 'sql_mode' can't be set to the value of '17179869184'
select @@sql_mode;
@@sql_mode
-REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,NO_TABLE_OPTIONS,ANSI
+TIME_ROUND_FRACTIONAL
+set sql_mode=2147483648*2*2*2*2;
+ERROR 42000: Variable 'sql_mode' can't be set to the value of '34359738368'
+select @@sql_mode;
+@@sql_mode
+TIME_ROUND_FRACTIONAL
set sql_mode=PAD_CHAR_TO_FULL_LENGTH;
create table t1 (a int auto_increment primary key, b char(5));
insert into t1 (b) values('a'),('b\t'),('c ');
diff --git a/mysql-test/main/sql_mode.test b/mysql-test/main/sql_mode.test
index 8cf50f73f6f..97f5cf42791 100644
--- a/mysql-test/main/sql_mode.test
+++ b/mysql-test/main/sql_mode.test
@@ -263,8 +263,10 @@ set sql_mode=4194304;
select @@sql_mode;
set sql_mode=16384+(65536*4);
select @@sql_mode;
+set sql_mode=2147483648*2*2*2;
+select @@sql_mode;
--error 1231
-set sql_mode=2147483648*2*2*2; # that mode does not exist
+set sql_mode=2147483648*2*2*2*2; # that mode does not exist
select @@sql_mode;
#
diff --git a/mysql-test/main/ssl_cipher.result b/mysql-test/main/ssl_cipher.result
index 266c9f9322a..930d384eda9 100644
--- a/mysql-test/main/ssl_cipher.result
+++ b/mysql-test/main/ssl_cipher.result
@@ -56,6 +56,7 @@ grant usage on mysqltest.* to mysqltest_1@localhost require cipher "AES256-SHA";
Variable_name Value
Ssl_cipher AES256-SHA
drop user mysqltest_1@localhost;
+# restart: --ssl-cipher=AES128-SHA
connect ssl_con,localhost,root,,,,,SSL;
SHOW STATUS LIKE 'Ssl_cipher';
Variable_name Value
diff --git a/mysql-test/main/stat_tables.result b/mysql-test/main/stat_tables.result
index 7fd0b5902ec..642dc1fc8f3 100644
--- a/mysql-test/main/stat_tables.result
+++ b/mysql-test/main/stat_tables.result
@@ -6,6 +6,8 @@ select @@session.use_stat_tables;
COMPLEMENTARY
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
+set @tmp_stt_hs=@@histogram_size, @tmp_stt_ht=@@histogram_type;
+set histogram_size=0, histogram_type='single_prec_hb';
DROP DATABASE IF EXISTS dbt3_s001;
CREATE DATABASE dbt3_s001;
use dbt3_s001;
@@ -64,12 +66,12 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 179 Using where; Using temporary; Using filesort
-1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 SIMPLE nation ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5
+1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1
+1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.nation.n_nationkey 6
+1 SIMPLE orders ref|filter PRIMARY,i_o_orderdate,i_o_custkey i_o_custkey|i_o_orderdate 5|4 dbt3_s001.customer.c_custkey 15 (12%) Using where; Using rowid filter
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
-1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
from customer, orders, lineitem, supplier, nation, region
where c_custkey = o_custkey and l_orderkey = o_orderkey
@@ -80,7 +82,7 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
n_name revenue
-PERU 321915.8715
+PERU 321915.87150000007
ARGENTINA 69817.1451
set optimizer_switch=@save_optimizer_switch;
delete from mysql.index_stats;
@@ -171,12 +173,12 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 179 Using where; Using temporary; Using filesort
-1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 SIMPLE nation ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5
+1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1
+1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.nation.n_nationkey 6
+1 SIMPLE orders ref|filter PRIMARY,i_o_orderdate,i_o_custkey i_o_custkey|i_o_orderdate 5|4 dbt3_s001.customer.c_custkey 15 (12%) Using where; Using rowid filter
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
-1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
from customer, orders, lineitem, supplier, nation, region
where c_custkey = o_custkey and l_orderkey = o_orderkey
@@ -187,7 +189,7 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
n_name revenue
-PERU 321915.8715
+PERU 321915.87150000007
ARGENTINA 69817.1451
set optimizer_switch=@save_optimizer_switch;
EXPLAIN select o_year,
@@ -207,14 +209,14 @@ and o_orderdate between date '1995-01-01' and date '1996-12-31'
group by o_year
order by o_year;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL 1500 Using where; Using temporary; Using filesort
-1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
-1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_partkey 1 Using where
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 SIMPLE part ALL PRIMARY NULL NULL NULL 200 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 5 dbt3_s001.part.p_partkey 30 Using index condition
1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
1 SIMPLE n2 eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
+1 SIMPLE orders eq_ref PRIMARY,i_o_orderdate,i_o_custkey PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1
+1 SIMPLE n1 ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5 Using where
select o_year,
sum(case when nation = 'UNITED STATES' then volume else 0 end) /
sum(volume) as mkt_share
@@ -332,7 +334,7 @@ and o_orderkey=l_orderkey and p_partkey=l_partkey;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE orders ref PRIMARY,i_o_orderdate i_o_orderdate 4 const 1
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
-1 SIMPLE part eq_ref PRIMARY,i_p_retailprice PRIMARY 4 dbt3_s001.lineitem.l_partkey 1 Using where
+1 SIMPLE part eq_ref|filter PRIMARY,i_p_retailprice PRIMARY|i_p_retailprice 4|9 dbt3_s001.lineitem.l_partkey 1 (1%) Using where; Using rowid filter
select o_orderkey, p_partkey
from part, lineitem, orders
where p_retailprice > 1100 and o_orderdate='1997-01-01'
@@ -587,7 +589,7 @@ explain
SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL
-1 SIMPLE user ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 SIMPLE global_priv ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
#
@@ -624,6 +626,7 @@ SELECT MAX(pk) FROM t1;
MAX(pk)
NULL
DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
#
# MDEV-17605: SHOW INDEXES with use_stat_tables='preferably'
#
@@ -752,3 +755,59 @@ set @@sql_mode= @save_sql_mode;
set use_stat_tables=@save_use_stat_tables;
set @@histogram_size= @save_histogram_size;
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+#
+# MDEV-17255: New optimizer defaults and ANALYZE TABLE
+#
+create table t1 (a int, b int);
+insert into t1(a,b) values (1,2),(1,3),(1,4),(1,5),(2,6),(2,7),(3,8),(3,9),(3,9),(4,10);
+set use_stat_tables= preferably_for_queries;
+#
+# with use_stat_tables= PREFERABLY_FOR_QUERIES
+# analyze table t1 will not collect statistics
+#
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select * from mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+analyze
+select * from t1 where a = 1 and b=3;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 100.00 10.00 Using where
+#
+# with use_stat_tables= PREFERABLY_FOR_QUERIES
+# analyze table t1 will collect statistics if we use PERSISTENT
+# for columns, indexes or everything
+#
+analyze table t1 persistent for columns (a) indexes ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select * from mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 a 1 4 0.0000 4.0000 2.5000 0 NULL NULL
+# filtered shows that we used the data from stat tables
+analyze
+select * from t1 where a = 1 and b=3;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 25.00 10.00 Using where
+#
+# with use_stat_tables= PREFERABLY
+# analyze table t1 will collect statistics
+#
+set use_stat_tables=PREFERABLY;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select * from mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 a 1 4 0.0000 4.0000 2.5000 0 NULL NULL
+test t1 b 2 10 0.0000 4.0000 1.1111 0 NULL NULL
+# filtered shows that we used the data from stat tables
+analyze
+select * from t1 where a=1 and b=3;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 2.78 10.00 Using where
+drop table t1;
+set histogram_size=@tmp_stt_hs, histogram_type=@tmp_stt_ht;
diff --git a/mysql-test/main/stat_tables.test b/mysql-test/main/stat_tables.test
index 93caa47ce79..d845bcd95e7 100644
--- a/mysql-test/main/stat_tables.test
+++ b/mysql-test/main/stat_tables.test
@@ -6,6 +6,8 @@ select @@session.use_stat_tables;
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
+set @tmp_stt_hs=@@histogram_size, @tmp_stt_ht=@@histogram_type;
+set histogram_size=0, histogram_type='single_prec_hb';
--disable_warnings
DROP DATABASE IF EXISTS dbt3_s001;
@@ -191,7 +193,7 @@ FLUSH TABLE t1;
SET use_stat_tables='never';
EXPLAIN SELECT * FROM t1;
---move_file $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD.save
+--move_file $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MAD $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MAD.save
FLUSH TABLES;
SET use_stat_tables='preferably';
@@ -200,7 +202,7 @@ EXPLAIN SELECT * FROM t1;
--enable_warnings
# Cleanup
---move_file $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD.save $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MYD
+--move_file $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MAD.save $MYSQLTEST_VARDIR/mysqld.1/data/mysql/table_stats.MAD
DROP TABLE t1;
set use_stat_tables=@save_use_stat_tables;
@@ -400,6 +402,8 @@ CREATE OR REPLACE TABLE t1 (pk INT PRIMARY KEY, t TEXT);
SELECT MAX(pk) FROM t1;
DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
+
--echo #
@@ -497,3 +501,46 @@ set @@sql_mode= @save_sql_mode;
set use_stat_tables=@save_use_stat_tables;
set @@histogram_size= @save_histogram_size;
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+
+--echo #
+--echo # MDEV-17255: New optimizer defaults and ANALYZE TABLE
+--echo #
+
+create table t1 (a int, b int);
+insert into t1(a,b) values (1,2),(1,3),(1,4),(1,5),(2,6),(2,7),(3,8),(3,9),(3,9),(4,10);
+set use_stat_tables= preferably_for_queries;
+--echo #
+--echo # with use_stat_tables= PREFERABLY_FOR_QUERIES
+--echo # analyze table t1 will not collect statistics
+--echo #
+
+analyze table t1;
+select * from mysql.column_stats;
+analyze
+select * from t1 where a = 1 and b=3;
+
+--echo #
+--echo # with use_stat_tables= PREFERABLY_FOR_QUERIES
+--echo # analyze table t1 will collect statistics if we use PERSISTENT
+--echo # for columns, indexes or everything
+--echo #
+
+analyze table t1 persistent for columns (a) indexes ();
+select * from mysql.column_stats;
+--echo # filtered shows that we used the data from stat tables
+analyze
+select * from t1 where a = 1 and b=3;
+
+--echo #
+--echo # with use_stat_tables= PREFERABLY
+--echo # analyze table t1 will collect statistics
+--echo #
+
+set use_stat_tables=PREFERABLY;
+analyze table t1;
+select * from mysql.column_stats;
+--echo # filtered shows that we used the data from stat tables
+analyze
+select * from t1 where a=1 and b=3;
+drop table t1;
+set histogram_size=@tmp_stt_hs, histogram_type=@tmp_stt_ht;
diff --git a/mysql-test/main/stat_tables_disabled.result b/mysql-test/main/stat_tables_disabled.result
index f57abc34e0c..c974bba5a4b 100644
--- a/mysql-test/main/stat_tables_disabled.result
+++ b/mysql-test/main/stat_tables_disabled.result
@@ -1,10 +1,15 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
SET SESSION STORAGE_ENGINE='InnoDB';
select @@global.use_stat_tables;
@@global.use_stat_tables
-NEVER
+PREFERABLY
select @@session.use_stat_tables;
@@session.use_stat_tables
-NEVER
+PREFERABLY
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
DROP DATABASE IF EXISTS dbt3_s001;
@@ -12,6 +17,7 @@ CREATE DATABASE dbt3_s001;
use dbt3_s001;
set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
+set optimizer_switch='rowid_filter=off';
#
# Bug mdev-503: optimizer ignores setting use_stat_tables='preferably'
#
@@ -68,3 +74,6 @@ DROP DATABASE dbt3_s001;
use test;
set use_stat_tables=@save_use_stat_tables;
SET SESSION STORAGE_ENGINE=DEFAULT;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/stat_tables_disabled.test b/mysql-test/main/stat_tables_disabled.test
index c9d923f903b..427cf4874bc 100644
--- a/mysql-test/main/stat_tables_disabled.test
+++ b/mysql-test/main/stat_tables_disabled.test
@@ -1,3 +1,9 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
--source include/have_innodb.inc
SET SESSION STORAGE_ENGINE='InnoDB';
@@ -19,6 +25,7 @@ use dbt3_s001;
set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=off';
+set optimizer_switch='rowid_filter=off';
--disable_query_log
--disable_result_log
@@ -76,3 +83,7 @@ set use_stat_tables=@save_use_stat_tables;
SET SESSION STORAGE_ENGINE=DEFAULT;
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/stat_tables_innodb.result b/mysql-test/main/stat_tables_innodb.result
index 2bc69c24104..9d0ea179755 100644
--- a/mysql-test/main/stat_tables_innodb.result
+++ b/mysql-test/main/stat_tables_innodb.result
@@ -1,6 +1,11 @@
SET SESSION STORAGE_ENGINE='InnoDB';
set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
set optimizer_switch='extended_keys=on';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
select @@global.use_stat_tables;
@@global.use_stat_tables
COMPLEMENTARY
@@ -9,6 +14,8 @@ select @@session.use_stat_tables;
COMPLEMENTARY
set @save_use_stat_tables=@@use_stat_tables;
set use_stat_tables='preferably';
+set @tmp_stt_hs=@@histogram_size, @tmp_stt_ht=@@histogram_type;
+set histogram_size=0, histogram_type='single_prec_hb';
DROP DATABASE IF EXISTS dbt3_s001;
CREATE DATABASE dbt3_s001;
use dbt3_s001;
@@ -67,11 +74,11 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 213 Using where; Using temporary; Using filesort
-1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
-1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.customer.c_nationkey 1 Using index
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 SIMPLE nation ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5
+1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 Using index
+1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.nation.n_nationkey 6 Using index
+1 SIMPLE orders ref|filter PRIMARY,i_o_orderdate,i_o_custkey i_o_custkey|i_o_orderdate 5|4 dbt3_s001.customer.c_custkey 15 (14%) Using where; Using rowid filter
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
from customer, orders, lineitem, supplier, nation, region
@@ -83,7 +90,7 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
n_name revenue
-PERU 321915.8715
+PERU 321915.87150000007
ARGENTINA 69817.1451
set optimizer_switch=@save_optimizer_switch;
delete from mysql.index_stats;
@@ -198,11 +205,11 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orders range PRIMARY,i_o_orderdate,i_o_custkey i_o_orderdate 4 NULL 213 Using where; Using temporary; Using filesort
-1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE nation eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
-1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.customer.c_nationkey 1 Using index
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 SIMPLE nation ref PRIMARY,i_n_regionkey i_n_regionkey 5 dbt3_s001.region.r_regionkey 5
+1 SIMPLE supplier ref PRIMARY,i_s_nationkey i_s_nationkey 5 dbt3_s001.nation.n_nationkey 1 Using index
+1 SIMPLE customer ref PRIMARY,i_c_nationkey i_c_nationkey 5 dbt3_s001.nation.n_nationkey 6 Using index
+1 SIMPLE orders ref|filter PRIMARY,i_o_orderdate,i_o_custkey i_o_custkey|i_o_orderdate 5|4 dbt3_s001.customer.c_custkey 15 (14%) Using where; Using rowid filter
1 SIMPLE lineitem ref PRIMARY,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
select n_name, sum(l_extendedprice * (1 - l_discount)) as revenue
from customer, orders, lineitem, supplier, nation, region
@@ -214,7 +221,7 @@ and r_name = 'AMERICA' and o_orderdate >= date '1995-01-01'
group by n_name
order by revenue desc;
n_name revenue
-PERU 321915.8715
+PERU 321915.87150000007
ARGENTINA 69817.1451
set optimizer_switch=@save_optimizer_switch;
EXPLAIN select o_year,
@@ -234,14 +241,14 @@ and o_orderdate between date '1995-01-01' and date '1996-12-31'
group by o_year
order by o_year;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE orders ALL PRIMARY,i_o_orderdate,i_o_custkey NULL NULL NULL 1500 Using where; Using temporary; Using filesort
-1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
-1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1
-1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
-1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 dbt3_s001.orders.o_orderkey 4 Using where
-1 SIMPLE part eq_ref PRIMARY PRIMARY 4 dbt3_s001.lineitem.l_partkey 1 Using where
+1 SIMPLE region ALL PRIMARY NULL NULL NULL 5 Using where; Using temporary; Using filesort
+1 SIMPLE part ALL PRIMARY NULL NULL NULL 200 Using where; Using join buffer (flat, BNL join)
+1 SIMPLE lineitem ref PRIMARY,i_l_suppkey_partkey,i_l_partkey,i_l_suppkey,i_l_orderkey,i_l_orderkey_quantity i_l_suppkey_partkey 5 dbt3_s001.part.p_partkey 30 Using index condition
1 SIMPLE supplier eq_ref PRIMARY,i_s_nationkey PRIMARY 4 dbt3_s001.lineitem.l_suppkey 1 Using where
1 SIMPLE n2 eq_ref PRIMARY PRIMARY 4 dbt3_s001.supplier.s_nationkey 1
+1 SIMPLE orders eq_ref PRIMARY,i_o_orderdate,i_o_custkey PRIMARY 4 dbt3_s001.lineitem.l_orderkey 1 Using where
+1 SIMPLE customer eq_ref PRIMARY,i_c_nationkey PRIMARY 4 dbt3_s001.orders.o_custkey 1 Using where
+1 SIMPLE n1 eq_ref PRIMARY,i_n_regionkey PRIMARY 4 dbt3_s001.customer.c_nationkey 1 Using where
select o_year,
sum(case when nation = 'UNITED STATES' then volume else 0 end) /
sum(volume) as mkt_share
@@ -614,7 +621,7 @@ explain
SELECT * FROM INFORMATION_SCHEMA.PROFILING, mysql.user;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE PROFILING ALL NULL NULL NULL NULL NULL
-1 SIMPLE user ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
+1 SIMPLE global_priv ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
set use_stat_tables=@save_use_stat_tables;
#
@@ -651,6 +658,7 @@ SELECT MAX(pk) FROM t1;
MAX(pk)
NULL
DROP TABLE t1;
+set use_stat_tables=@save_use_stat_tables;
#
# MDEV-17605: SHOW INDEXES with use_stat_tables='preferably'
#
@@ -779,5 +787,64 @@ set @@sql_mode= @save_sql_mode;
set use_stat_tables=@save_use_stat_tables;
set @@histogram_size= @save_histogram_size;
set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+#
+# MDEV-17255: New optimizer defaults and ANALYZE TABLE
+#
+create table t1 (a int, b int);
+insert into t1(a,b) values (1,2),(1,3),(1,4),(1,5),(2,6),(2,7),(3,8),(3,9),(3,9),(4,10);
+set use_stat_tables= preferably_for_queries;
+#
+# with use_stat_tables= PREFERABLY_FOR_QUERIES
+# analyze table t1 will not collect statistics
+#
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
+select * from mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+analyze
+select * from t1 where a = 1 and b=3;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 100.00 10.00 Using where
+#
+# with use_stat_tables= PREFERABLY_FOR_QUERIES
+# analyze table t1 will collect statistics if we use PERSISTENT
+# for columns, indexes or everything
+#
+analyze table t1 persistent for columns (a) indexes ();
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+select * from mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 a 1 4 0.0000 4.0000 2.5000 0 NULL NULL
+# filtered shows that we used the data from stat tables
+analyze
+select * from t1 where a = 1 and b=3;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 25.00 10.00 Using where
+#
+# with use_stat_tables= PREFERABLY
+# analyze table t1 will collect statistics
+#
+set use_stat_tables=PREFERABLY;
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+select * from mysql.column_stats;
+db_name table_name column_name min_value max_value nulls_ratio avg_length avg_frequency hist_size hist_type histogram
+test t1 a 1 4 0.0000 4.0000 2.5000 0 NULL NULL
+test t1 b 2 10 0.0000 4.0000 1.1111 0 NULL NULL
+# filtered shows that we used the data from stat tables
+analyze
+select * from t1 where a=1 and b=3;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 10 10.00 2.78 10.00 Using where
+drop table t1;
+set histogram_size=@tmp_stt_hs, histogram_type=@tmp_stt_ht;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
SET SESSION STORAGE_ENGINE=DEFAULT;
diff --git a/mysql-test/main/stat_tables_innodb.test b/mysql-test/main/stat_tables_innodb.test
index 04e81de8f9d..5a97ad89363 100644
--- a/mysql-test/main/stat_tables_innodb.test
+++ b/mysql-test/main/stat_tables_innodb.test
@@ -5,7 +5,16 @@ SET SESSION STORAGE_ENGINE='InnoDB';
set @save_optimizer_switch_for_stat_tables_test=@@optimizer_switch;
set optimizer_switch='extended_keys=on';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
--source stat_tables.test
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@save_optimizer_switch_for_stat_tables_test;
diff --git a/mysql-test/main/stat_tables_rbr.result b/mysql-test/main/stat_tables_rbr.result
index 7ae7ade4398..130d1f6da9a 100644
--- a/mysql-test/main/stat_tables_rbr.result
+++ b/mysql-test/main/stat_tables_rbr.result
@@ -1,15 +1,15 @@
#
# Bug mdev-463: assertion failure when running ANALYZE with RBR on
#
-SET GLOBAL use_stat_tables = PREFERABLY;
connect con1,localhost,root,,;
CREATE TABLE t1 (i INT) ENGINE=InnoDB;
+set use_stat_tables= PREFERABLY;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
-SET GLOBAL use_stat_tables = DEFAULT;
+SET use_stat_tables = DEFAULT;
disconnect con1;
connection default;
SET use_stat_tables = PREFERABLY;
diff --git a/mysql-test/main/stat_tables_rbr.test b/mysql-test/main/stat_tables_rbr.test
index 29f7c4e6622..1b6a9603743 100644
--- a/mysql-test/main/stat_tables_rbr.test
+++ b/mysql-test/main/stat_tables_rbr.test
@@ -6,15 +6,14 @@
--echo # Bug mdev-463: assertion failure when running ANALYZE with RBR on
--echo #
-SET GLOBAL use_stat_tables = PREFERABLY;
-
--connect (con1,localhost,root,,)
CREATE TABLE t1 (i INT) ENGINE=InnoDB;
+set use_stat_tables= PREFERABLY;
ANALYZE TABLE t1;
# Cleanup
DROP TABLE t1;
-SET GLOBAL use_stat_tables = DEFAULT;
+SET use_stat_tables = DEFAULT;
--disconnect con1
--connection default
diff --git a/mysql-test/main/statistics.result b/mysql-test/main/statistics.result
index 34a17cf049c..787f0194532 100644
--- a/mysql-test/main/statistics.result
+++ b/mysql-test/main/statistics.result
@@ -1,5 +1,7 @@
drop table if exists t1,t2;
set @save_use_stat_tables=@@use_stat_tables;
+set @save_hist_size=@@histogram_size, @save_hist_type=@@histogram_type;
+set histogram_size=0, histogram_type='single_prec_hb';
DELETE FROM mysql.table_stats;
DELETE FROM mysql.column_stats;
DELETE FROM mysql.index_stats;
@@ -246,7 +248,7 @@ test t1 e 0.01 0.112 0.2250 6.2000 8 DOUBLE_PREC_HB 000005056464E1E1
test t1 f 1 5 0.2000 6.4000 8 DOUBLE_PREC_HB FF3FFF7FFFBFFFBF
DELETE FROM mysql.column_stats;
set histogram_size= 0;
-set histogram_type=default;
+set histogram_type='single_prec_hb';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
@@ -1504,8 +1506,8 @@ hist_size 254
hist_type DOUBLE_PREC_HB
hex(histogram) 1F00A1002B023002350238023F02430249024E02520258025D02630268026E02720276027B02800285028C02920297029D02A102A802AC02B402BC02C402CC02D302DA02E302EA02F102F802010305030C03120319031F03290333033D0343034F03590363036D037803840390039A03A603B303C303D103E003F203020412042404330440045304600472047F049104A204B804C804DE04F2040A0526053F0558056F058E05B305D905F4051306380667068406AB06DA06020731075C079407C507F8072E085E08A508DF0824096909CC092E0A760AD50A400BA90B150CAD0C310D240E130F0E103B11B9126B14F0166B192F1CB71FFF240630483FC567
decode_histogram(hist_type,histogram) 0.00047,0.00198,0.00601,0.00008,0.00008,0.00005,0.00011,0.00006,0.00009,0.00008,0.00006,0.00009,0.00008,0.00009,0.00008,0.00009,0.00006,0.00006,0.00008,0.00008,0.00008,0.00011,0.00009,0.00008,0.00009,0.00006,0.00011,0.00006,0.00012,0.00012,0.00012,0.00012,0.00011,0.00011,0.00014,0.00011,0.00011,0.00011,0.00014,0.00006,0.00011,0.00009,0.00011,0.00009,0.00015,0.00015,0.00015,0.00009,0.00018,0.00015,0.00015,0.00015,0.00017,0.00018,0.00018,0.00015,0.00018,0.00020,0.00024,0.00021,0.00023,0.00027,0.00024,0.00024,0.00027,0.00023,0.00020,0.00029,0.00020,0.00027,0.00020,0.00027,0.00026,0.00034,0.00024,0.00034,0.00031,0.00037,0.00043,0.00038,0.00038,0.00035,0.00047,0.00056,0.00058,0.00041,0.00047,0.00056,0.00072,0.00044,0.00060,0.00072,0.00061,0.00072,0.00066,0.00085,0.00075,0.00078,0.00082,0.00073,0.00108,0.00089,0.00105,0.00105,0.00151,0.00150,0.00110,0.00145,0.00163,0.00160,0.00165,0.00232,0.00201,0.00371,0.00365,0.00383,0.00459,0.00583,0.00662,0.00984,0.00969,0.01080,0.01379,0.02063,0.04308,0.05960,0.15816,0.59464
-set histogram_type=default;
-set histogram_size=default;
+set histogram_type='single_prec_hb';
+set histogram_size=0;
use test;
DROP DATABASE world;
SELECT UPPER(db_name), UPPER(table_name), cardinality
@@ -1600,8 +1602,8 @@ hist_size, hist_type, HEX(histogram)
FROM mysql.column_stats;
db_name table_name column_name min_value max_value nulls_ratio avg_frequency hist_size hist_type HEX(histogram)
test t1 a 1 5 0.0000 1.0000 10 DOUBLE_PREC_HB 0000FF3FFF7FFFBFFFFF
-set histogram_size=default;
-set histogram_type=default;
+set histogram_size=0;
+set histogram_type='single_prec_hb';
drop table t1;
#
# Bug mdev-4369: histogram for a column with many distinct values
@@ -1641,7 +1643,7 @@ hist_size, hist_type, HEX(histogram)
FROM mysql.column_stats;
db_name table_name column_name min_value max_value nulls_ratio avg_frequency hist_size hist_type HEX(histogram)
test t2 id 1 1024 0.0000 8.0000 63 SINGLE_PREC_HB 03070B0F13171B1F23272B2F33373B3F43474B4F53575B5F63676B6F73777B7F83878B8F93979B9FA3A7ABAFB3B7BBBFC3C7CBCFD3D7DBDFE3E7EBEFF3F7FB
-set histogram_size=default;
+set histogram_size=0;
drop table t1, t2;
set use_stat_tables=@save_use_stat_tables;
#
@@ -1757,3 +1759,111 @@ DROP TABLE t1;
#
# End of 10.2 tests
#
+set histogram_size=@save_hist_size, histogram_type=@save_hist_type;
+#
+# Start of 10.4 tests
+#
+#
+# Test analyze_sample_percentage system variable.
+#
+set @save_use_stat_tables=@@use_stat_tables;
+set @save_analyze_sample_percentage=@@analyze_sample_percentage;
+set @save_hist_size=@@histogram_size;
+set session rand_seed1=42;
+set session rand_seed2=62;
+set use_stat_tables=PREFERABLY;
+set histogram_size=10;
+CREATE TABLE t1 (id int);
+INSERT INTO t1 (id) VALUES (1), (1), (1), (1), (1), (1), (1);
+INSERT INTO t1 (id) SELECT id FROM t1;
+INSERT INTO t1 SELECT id+1 FROM t1;
+INSERT INTO t1 SELECT id+2 FROM t1;
+INSERT INTO t1 SELECT id+4 FROM t1;
+INSERT INTO t1 SELECT id+8 FROM t1;
+INSERT INTO t1 SELECT id+16 FROM t1;
+INSERT INTO t1 SELECT id+32 FROM t1;
+INSERT INTO t1 SELECT id+64 FROM t1;
+INSERT INTO t1 SELECT id+128 FROM t1;
+INSERT INTO t1 SELECT id+256 FROM t1;
+INSERT INTO t1 SELECT id+512 FROM t1;
+INSERT INTO t1 SELECT id+1024 FROM t1;
+INSERT INTO t1 SELECT id+2048 FROM t1;
+INSERT INTO t1 SELECT id+4096 FROM t1;
+INSERT INTO t1 SELECT id+9192 FROM t1;
+#
+# This query will should show a full table scan analysis.
+#
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+table_name column_name min_value max_value nulls_ratio avg_length avg_frequency DECODE_HISTOGRAM(hist_type, histogram)
+t1 id 1 17384 0.0000 4.0000 14.0000 0.15705,0.15711,0.21463,0.15705,0.15711,0.15706
+set analyze_sample_percentage=0.1;
+#
+# This query will show an innacurate avg_frequency value.
+#
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+table_name column_name min_value max_value nulls_ratio avg_length avg_frequency DECODE_HISTOGRAM(hist_type, histogram)
+t1 id 111 17026 0.0000 4.0000 10.4739 0.13649,0.14922,0.16921,0.21141,0.18355,0.15012
+#
+# This query will show a better avg_frequency value.
+#
+set analyze_sample_percentage=25;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+table_name column_name min_value max_value nulls_ratio avg_length avg_frequency DECODE_HISTOGRAM(hist_type, histogram)
+t1 id 1 17384 0.0000 4.0000 14.0401 0.15566,0.15590,0.15729,0.21538,0.15790,0.15787
+set analyze_sample_percentage=0;
+#
+# Test self adjusting sampling level.
+#
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+table_name column_name min_value max_value nulls_ratio avg_length avg_frequency DECODE_HISTOGRAM(hist_type, histogram)
+t1 id 1 17384 0.0000 4.0000 13.9812 0.15860,0.15767,0.21515,0.15573,0.15630,0.15654
+#
+# Test record estimation is working properly.
+#
+select count(*) from t1;
+count(*)
+229376
+explain select * from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 229060
+set analyze_sample_percentage=100;
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status Table is already up to date
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+table_name column_name min_value max_value nulls_ratio avg_length avg_frequency DECODE_HISTOGRAM(hist_type, histogram)
+t1 id 1 17384 0.0000 4.0000 14.0000 0.15705,0.15711,0.21463,0.15705,0.15711,0.15706
+explain select * from t1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 229376
+drop table t1;
+set analyze_sample_percentage=@save_analyze_sample_percentage;
+set histogram_size=@save_hist_size;
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/main/statistics.test b/mysql-test/main/statistics.test
index b2e544064b0..880f2987b50 100644
--- a/mysql-test/main/statistics.test
+++ b/mysql-test/main/statistics.test
@@ -5,7 +5,8 @@ drop table if exists t1,t2;
--enable_warnings
set @save_use_stat_tables=@@use_stat_tables;
-
+set @save_hist_size=@@histogram_size, @save_hist_type=@@histogram_type;
+set histogram_size=0, histogram_type='single_prec_hb';
DELETE FROM mysql.table_stats;
--sorted_result
DELETE FROM mysql.column_stats;
@@ -196,7 +197,7 @@ SELECT db_name, table_name, column_name,
DELETE FROM mysql.column_stats;
set histogram_size= 0;
-set histogram_type=default;
+set histogram_type='single_prec_hb';
ANALYZE TABLE t1;
@@ -657,8 +658,8 @@ FLUSH TABLES;
--query_vertical select UPPER(db_name),UPPER(table_name),UPPER(column_name),min_value,max_value,nulls_ratio,avg_length,avg_frequency,hist_size,hist_type,hex(histogram),decode_histogram(hist_type,histogram) from mysql.column_stats where UPPER(db_name)='WORLD' and UPPER(table_name)='COUNTRYLANGUAGE' and UPPER(column_name) = 'PERCENTAGE';
--query_vertical select UPPER(db_name),UPPER(table_name),UPPER(column_name),min_value,max_value,nulls_ratio,avg_length,avg_frequency,hist_size,hist_type,hex(histogram),decode_histogram(hist_type,histogram) from mysql.column_stats where UPPER(db_name)='WORLD' and UPPER(table_name)='CITY' and UPPER(column_name) = 'POPULATION';
-set histogram_type=default;
-set histogram_size=default;
+set histogram_type='single_prec_hb';
+set histogram_size=0;
use test;
DROP DATABASE world;
@@ -732,8 +733,8 @@ select db_name, table_name, column_name,
hist_size, hist_type, HEX(histogram)
FROM mysql.column_stats;
-set histogram_size=default;
-set histogram_type=default;
+set histogram_size=0;
+set histogram_type='single_prec_hb';
drop table t1;
@@ -776,7 +777,7 @@ select db_name, table_name, column_name,
hist_size, hist_type, HEX(histogram)
FROM mysql.column_stats;
-set histogram_size=default;
+set histogram_size=0;
drop table t1, t2;
@@ -898,3 +899,96 @@ DROP TABLE t1;
--echo #
--echo # End of 10.2 tests
--echo #
+set histogram_size=@save_hist_size, histogram_type=@save_hist_type;
+
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # Test analyze_sample_percentage system variable.
+--echo #
+set @save_use_stat_tables=@@use_stat_tables;
+set @save_analyze_sample_percentage=@@analyze_sample_percentage;
+set @save_hist_size=@@histogram_size;
+
+set session rand_seed1=42;
+set session rand_seed2=62;
+
+set use_stat_tables=PREFERABLY;
+set histogram_size=10;
+
+CREATE TABLE t1 (id int);
+INSERT INTO t1 (id) VALUES (1), (1), (1), (1), (1), (1), (1);
+INSERT INTO t1 (id) SELECT id FROM t1;
+INSERT INTO t1 SELECT id+1 FROM t1;
+INSERT INTO t1 SELECT id+2 FROM t1;
+INSERT INTO t1 SELECT id+4 FROM t1;
+INSERT INTO t1 SELECT id+8 FROM t1;
+INSERT INTO t1 SELECT id+16 FROM t1;
+INSERT INTO t1 SELECT id+32 FROM t1;
+INSERT INTO t1 SELECT id+64 FROM t1;
+INSERT INTO t1 SELECT id+128 FROM t1;
+INSERT INTO t1 SELECT id+256 FROM t1;
+INSERT INTO t1 SELECT id+512 FROM t1;
+INSERT INTO t1 SELECT id+1024 FROM t1;
+INSERT INTO t1 SELECT id+2048 FROM t1;
+INSERT INTO t1 SELECT id+4096 FROM t1;
+INSERT INTO t1 SELECT id+9192 FROM t1;
+
+--echo #
+--echo # This query will should show a full table scan analysis.
+--echo #
+ANALYZE TABLE t1;
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+ DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+
+set analyze_sample_percentage=0.1;
+
+--echo #
+--echo # This query will show an innacurate avg_frequency value.
+--echo #
+ANALYZE TABLE t1;
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+ DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+
+--echo #
+--echo # This query will show a better avg_frequency value.
+--echo #
+set analyze_sample_percentage=25;
+ANALYZE TABLE t1;
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+ DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+
+
+set analyze_sample_percentage=0;
+--echo #
+--echo # Test self adjusting sampling level.
+--echo #
+ANALYZE TABLE t1;
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+ DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+-- echo #
+-- echo # Test record estimation is working properly.
+-- echo #
+select count(*) from t1;
+explain select * from t1;
+
+set analyze_sample_percentage=100;
+ANALYZE TABLE t1;
+select table_name, column_name, min_value, max_value, nulls_ratio, avg_length, avg_frequency,
+ DECODE_HISTOGRAM(hist_type, histogram)
+from mysql.column_stats;
+explain select * from t1;
+
+
+drop table t1;
+set analyze_sample_percentage=@save_analyze_sample_percentage;
+set histogram_size=@save_hist_size;
+set use_stat_tables=@save_use_stat_tables;
diff --git a/mysql-test/main/status.result b/mysql-test/main/status.result
index 688d8acee1a..37c551328f0 100644
--- a/mysql-test/main/status.result
+++ b/mysql-test/main/status.result
@@ -324,7 +324,7 @@ Handler_mrr_key_refills 0
Handler_mrr_rowid_refills 0
Handler_prepare 0
Handler_read_first 0
-Handler_read_key 4
+Handler_read_key 9
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -403,9 +403,9 @@ Table_open_cache_overflows 0
SHOW STATUS LIKE 'Table_open_cache%';
Variable_name Value
Table_open_cache_active_instances 1
-Table_open_cache_hits 30
-Table_open_cache_misses 15
-Table_open_cache_overflows 5
+Table_open_cache_hits 72
+Table_open_cache_misses 18
+Table_open_cache_overflows 8
FLUSH TABLES;
FLUSH STATUS;
SET @@global.table_open_cache= @old_table_open_cache;
diff --git a/mysql-test/main/str_to_datetime_457.result b/mysql-test/main/str_to_datetime_457.result
index 4fd0d00691c..e365c303c81 100644
--- a/mysql-test/main/str_to_datetime_457.result
+++ b/mysql-test/main/str_to_datetime_457.result
@@ -1,6 +1,6 @@
select cast('01:02:03 ' as time), cast('01:02:03 ' as time);
cast('01:02:03 ' as time) cast('01:02:03 ' as time)
-01:02:03 00:00:00
+01:02:03 01:02:03
select cast('2002-011-012' as date), cast('2002.11.12' as date), cast('2002.011.012' as date);
cast('2002-011-012' as date) cast('2002.11.12' as date) cast('2002.011.012' as date)
2002-11-12 2002-11-12 2002-11-12
@@ -17,10 +17,7 @@ Warnings:
Warning 1292 Incorrect datetime value: '0'
select extract(hour from '100000:02:03'), extract(hour from '100000:02:03 ');
extract(hour from '100000:02:03') extract(hour from '100000:02:03 ')
-NULL NULL
-Warnings:
-Warning 1292 Truncated incorrect time value: '100000:02:03'
-Warning 1292 Truncated incorrect time value: '100000:02:03 '
+16 16
#
# backward compatibility craziness
#
diff --git a/mysql-test/main/subselect.result b/mysql-test/main/subselect.result
index 2f7c5039c67..3bd23a49cdb 100644
--- a/mysql-test/main/subselect.result
+++ b/mysql-test/main/subselect.result
@@ -179,7 +179,8 @@ select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
a b
1 7
2 7
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
a b
1 7
2 7
@@ -343,7 +344,7 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
-1 PRIMARY t6 ALL i1 NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t6 ALL i1 NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t7` join `test`.`t6` where `test`.`t6`.`clinic_uq` = `test`.`t7`.`uq`
@@ -1440,7 +1441,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 4 75.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using index
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a`
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
@@ -1450,7 +1451,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
@@ -1460,8 +1461,8 @@ a
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
+1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
drop table t1, t2, t3;
@@ -1629,7 +1630,7 @@ Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2') from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Using where; Full scan on NULL key
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 50.00 Using index; Using where; Full scan on NULL key
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL where `test`.`t2`.`s1` < 'a2' having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
drop table t1,t2;
@@ -3102,13 +3103,18 @@ create table t1(a int, primary key (a));
insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using where
+2 SUBQUERY t2 range b b 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3120,7 +3126,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using index condition
+2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3707,7 +3713,7 @@ ORDER BY t1.t DESC LIMIT 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t1 index NULL PRIMARY 13 NULL 11 Using where; Using index
-2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 5 Using where; Using index
+2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 6 Using where; Using index
SELECT * FROM t1,t2
WHERE t1.t = (SELECT t1.t FROM t1
WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
@@ -3726,7 +3732,7 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
i
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2
+i
explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12))
from t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'union (select t12.i from t1 t12))
@@ -4976,7 +4982,7 @@ UNIQUE KEY b (b,c),
KEY c (c),
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@@ -5186,35 +5192,23 @@ a 1
1 1
2 1
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1) ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a UNION SELECT 1)) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1)) ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') t1a ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) t1a ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) ON 1' at line 1
SELECT * FROM (t1 t1a);
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT * FROM ((t1 t1a));
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '))' at line 1
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
a t1a
1 1
@@ -5289,11 +5283,14 @@ SELECT ( SELECT a FROM t1 WHERE a = 1 UNION SELECT 1 ), a FROM t1;
SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
a b
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
+1
+1
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1' at line 1
+1
+1
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) )
+1
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1' at line 1
SELECT ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -5303,9 +5300,9 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
((SELECT 1 UNION SELECT 1 UNION SELECT 1))
1
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
1
1
@@ -5313,19 +5310,25 @@ SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
1
1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -7021,8 +7024,8 @@ SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a a 5 NULL 2 Using where; Using index
-2 SUBQUERY <subquery3> ALL distinct_key NULL NULL NULL 1
-2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 test.t2.c 1
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2
SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
@@ -7081,12 +7084,6 @@ call procedure2();
ControlRev
NULL
drop procedure procedure2;
-SELECT
-(SELECT user FROM mysql.user
-WHERE h.host in (SELECT host FROM mysql.user)
-) AS sq
-FROM mysql.host h GROUP BY h.host;
-sq
#
# MDEV-7846:Server crashes in Item_subselect::fix
#_fields or fails with Thread stack overrun
@@ -7299,3 +7296,18 @@ pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
# End of 10.2 tests
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+#
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/subselect.test b/mysql-test/main/subselect.test
index 189dde382dd..dd80b789516 100644
--- a/mysql-test/main/subselect.test
+++ b/mysql-test/main/subselect.test
@@ -99,7 +99,8 @@ select (select a from t3), a from t2;
select * from t2 where t2.a=(select a from t1);
insert into t3 values (6),(7),(3);
select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 where t4.b=(select max(t2.a)*4 from t2) order by a);
explain extended (select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 where t4.b=(select max(t2.a)*4 from t2) order by a);
select (select a from t3 where a<t2.a*4 order by 1 desc limit 1), a from t2;
@@ -2025,6 +2026,8 @@ insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
@@ -2604,8 +2607,6 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
(SELECT i FROM t1)
);
-#TODO:not supported
---error ER_PARSE_ERROR
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
@@ -3879,7 +3880,7 @@ CREATE TABLE t3 (
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
@@ -4313,7 +4314,7 @@ SELECT * FROM (SELECT 1 a UNION SELECT 1 a ORDER BY a LIMIT 1) t1a;
# aliases after.
#
SELECT * FROM t1 JOIN (SELECT 1 UNION SELECT 1) alias ON 1;
---error ER_DERIVED_MUST_HAVE_ALIAS
+--error ER_PARSE_ERROR
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
--error ER_PARSE_ERROR
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
@@ -4324,10 +4325,14 @@ SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
--error ER_PARSE_ERROR
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
+--error ER_PARSE_ERROR
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
+--error ER_PARSE_ERROR
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
+--error ER_PARSE_ERROR
SELECT * FROM (t1 t1a);
+--error ER_PARSE_ERROR
SELECT * FROM ((t1 t1a));
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
@@ -4391,12 +4396,9 @@ SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
# Make sure the parser does not allow nested UNIONs anywhere
---error ER_PARSE_ERROR
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
---error ER_PARSE_ERROR
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
---error ER_PARSE_ERROR
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
--error ER_PARSE_ERROR
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
@@ -4405,25 +4407,19 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
--error ER_PARSE_ERROR
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
---error ER_DERIVED_MUST_HAVE_ALIAS
+--error ER_PARSE_ERROR
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
---error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
---error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
---error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
---error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
--error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
---error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
---error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
--error ER_PARSE_ERROR
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
@@ -5933,13 +5929,6 @@ call procedure2();
drop procedure procedure2;
-SELECT
- (SELECT user FROM mysql.user
- WHERE h.host in (SELECT host FROM mysql.user)
- ) AS sq
-FROM mysql.host h GROUP BY h.host;
-
-
--echo #
--echo # MDEV-7846:Server crashes in Item_subselect::fix
--echo #_fields or fails with Thread stack overrun
@@ -6166,3 +6155,23 @@ SELECT * FROM t t1 RIGHT JOIN t t2 ON (t2.pk = t1.pk)
DROP TABLE t;
--echo # End of 10.2 tests
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+--echo #
+
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+--error ER_ILLEGAL_PARAMETER_DATA_TYPES2_FOR_OPERATION
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/subselect2.result b/mysql-test/main/subselect2.result
index cae0f2286c1..0e71f22e52e 100644
--- a/mysql-test/main/subselect2.result
+++ b/mysql-test/main/subselect2.result
@@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
-1 PRIMARY t3 eq_ref PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX PRIMARY 34 test.t3.PARENTID 1 Using where
+1 PRIMARY t3 ref|filter PRIMARY,FFOLDERID_IDX,CMFLDRPARNT_IDX FFOLDERID_IDX|CMFLDRPARNT_IDX 34|35 test.t3.PARENTID 1 (29%) Using where; Using rowid filter
drop table t1, t2, t3, t4;
CREATE TABLE t1 (a int(10) , PRIMARY KEY (a)) Engine=InnoDB;
INSERT INTO t1 VALUES (1),(2);
@@ -286,8 +286,8 @@ WHERE date < '2012-12-12 12:12:12'
ORDER BY mirror_date ASC
) AS calculated_result;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2
-2 DERIVED t1 range date date 6 NULL 2 Using index condition; Using where; Rowid-ordered scan; Using filesort
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
+2 DERIVED t1 ALL date NULL NULL NULL 3 Using where; Using filesort
SELECT * FROM (
SELECT node_uid, date, mirror_date, @result := 0 AS result
FROM t1
@@ -309,8 +309,8 @@ WHERE date < '2012-12-12 12:12:12'
ORDER BY mirror_date ASC
) AS calculated_result;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 2
-2 DERIVED t1 range date date 6 NULL 2 Using index condition; Using where; Using filesort
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL 3
+2 DERIVED t1 ALL date NULL NULL NULL 3 Using where; Using filesort
SELECT * FROM (
SELECT node_uid, date, mirror_date, @result := 0 AS result
FROM t1
@@ -413,6 +413,6 @@ f1 test
1 1
2 1
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '2'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '2'
DROP TABLE t1,t2;
diff --git a/mysql-test/main/subselect_exists2in.result b/mysql-test/main/subselect_exists2in.result
index 36ca0bf82f2..029ac22a486 100644
--- a/mysql-test/main/subselect_exists2in.result
+++ b/mysql-test/main/subselect_exists2in.result
@@ -142,6 +142,7 @@ create index idx_t1_1 on t1 (a1,a2,b,c);
create index idx_t1_2 on t1 (a1,a2,b);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
create table t2 (
a1 char(64), a2 char(64) not null, b char(16), c char(16), d char(16), dummy char(64) default ' '
@@ -169,6 +170,7 @@ create index idx_t2_1 on t2 (a1,a2,b,c);
create index idx_t2_2 on t2 (a1,a2,b);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
create table t3 (
a1 char(1), a2 char(1), b char(1), c char(4) not null, d char(3), dummy char(1) default ' '
@@ -230,6 +232,7 @@ create index idx_t3_1 on t3 (a1,a2,b,c);
create index idx_t3_2 on t3 (a1,a2,b);
analyze table t3;
Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
explain select a1,a2,b,c,min(c), max(c) from t1
where exists ( select * from t2
diff --git a/mysql-test/main/subselect_exists2in_costmat.result b/mysql-test/main/subselect_exists2in_costmat.result
index a46996d9bad..1c9574aafd3 100644
--- a/mysql-test/main/subselect_exists2in_costmat.result
+++ b/mysql-test/main/subselect_exists2in_costmat.result
@@ -37,6 +37,8 @@ create index Language on CountryLanguage(Language);
create index CityName on City(Name);
alter table City change population population int(11) null default 0;
select max(id) from City into @max_city_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into City values (@max_city_id + 1,'Kilifarevo','BGR',NULL);
SELECT COUNT(*) FROM Country;
COUNT(*)
@@ -63,7 +65,7 @@ Name LIKE 'L%') AND
surfacearea > 1000000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL Name,SurfaceArea NULL NULL NULL 239 Using where
-2 MATERIALIZED City ALL Population,Country NULL NULL NULL 4080 Using where
+2 MATERIALIZED City ALL Population,Country NULL NULL NULL 4079 Using where
SELECT Name FROM Country
WHERE (EXISTS (select 1 from City where City.Population > 100000 and
Code = Country) OR
diff --git a/mysql-test/main/subselect_extra.result b/mysql-test/main/subselect_extra.result
index dbcf00268c2..c654fdfca13 100644
--- a/mysql-test/main/subselect_extra.result
+++ b/mysql-test/main/subselect_extra.result
@@ -132,6 +132,7 @@ create index idx_t1_1 on t1 (a1,a2,b,c);
create index idx_t1_2 on t1 (a1,a2,b);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
create table t2 (
a1 char(64), a2 char(64) not null, b char(16), c char(16), d char(16), dummy char(64) default ' '
@@ -159,6 +160,7 @@ create index idx_t2_1 on t2 (a1,a2,b,c);
create index idx_t2_2 on t2 (a1,a2,b);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
create table t3 (
a1 char(1), a2 char(1), b char(1), c char(4) not null, d char(3), dummy char(1) default ' '
@@ -220,6 +222,7 @@ create index idx_t3_1 on t3 (a1,a2,b,c);
create index idx_t3_2 on t3 (a1,a2,b);
analyze table t3;
Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
explain select a1,a2,b,c,min(c), max(c) from t1
where exists ( select * from t2
@@ -302,6 +305,7 @@ INSERT INTO t1 SELECT a + 64,b FROM t1;
INSERT INTO t1 SELECT a + 128,b FROM t1 limit 16;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT 1 FROM t1 WHERE a IN
(SELECT a FROM t1 USE INDEX (i2) IGNORE INDEX (i2));
diff --git a/mysql-test/main/subselect_extra_no_semijoin.result b/mysql-test/main/subselect_extra_no_semijoin.result
index 49a1431eb9b..faeaf75c590 100644
--- a/mysql-test/main/subselect_extra_no_semijoin.result
+++ b/mysql-test/main/subselect_extra_no_semijoin.result
@@ -136,6 +136,7 @@ create index idx_t1_1 on t1 (a1,a2,b,c);
create index idx_t1_2 on t1 (a1,a2,b);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
create table t2 (
a1 char(64), a2 char(64) not null, b char(16), c char(16), d char(16), dummy char(64) default ' '
@@ -163,6 +164,7 @@ create index idx_t2_1 on t2 (a1,a2,b,c);
create index idx_t2_2 on t2 (a1,a2,b);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
create table t3 (
a1 char(1), a2 char(1), b char(1), c char(4) not null, d char(3), dummy char(1) default ' '
@@ -224,6 +226,7 @@ create index idx_t3_1 on t3 (a1,a2,b,c);
create index idx_t3_2 on t3 (a1,a2,b);
analyze table t3;
Table Op Msg_type Msg_text
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status Table is already up to date
explain select a1,a2,b,c,min(c), max(c) from t1
where exists ( select * from t2
@@ -306,6 +309,7 @@ INSERT INTO t1 SELECT a + 64,b FROM t1;
INSERT INTO t1 SELECT a + 128,b FROM t1 limit 16;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT 1 FROM t1 WHERE a IN
(SELECT a FROM t1 USE INDEX (i2) IGNORE INDEX (i2));
diff --git a/mysql-test/main/subselect_innodb.result b/mysql-test/main/subselect_innodb.result
index ec7f2c0a3d5..8e09be9b705 100644
--- a/mysql-test/main/subselect_innodb.result
+++ b/mysql-test/main/subselect_innodb.result
@@ -458,7 +458,7 @@ EXPLAIN
SELECT * FROM t1 WHERE EXISTS ( SELECT b FROM t2, t3 GROUP BY b HAVING b != 3 );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
-2 SUBQUERY t2 index NULL PRIMARY 4 NULL 1 Using index; Using temporary; Using filesort
+2 SUBQUERY t2 index PRIMARY PRIMARY 4 NULL 1 Using where; Using index; Using temporary; Using filesort
2 SUBQUERY t3 ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
SELECT * FROM t1 WHERE EXISTS ( SELECT b FROM t2, t3 GROUP BY b HAVING b != 3 );
a
@@ -561,6 +561,7 @@ update t2 set key2=key1;
alter table t2 add key(key2);
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
flush tables;
# Table tsubq must use 'ref' + Using filesort (not 'index' w/o filesort)
@@ -615,3 +616,36 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
Warnings:
Note 1003 select `test`.`t1`.`f1` AS `f1`,`test`.`t2`.`f2` AS `f2`,`test`.`t3`.`f3` AS `f3` from `test`.`t1` join `test`.`t2` semi join (`test`.`t4`) join `test`.`t3` where `test`.`t4`.`f4` = 1 and `test`.`t1`.`f1` >= `test`.`t2`.`f2`
DROP TABLE t1,t2,t3,t4;
+#
+# MDEV-17362: SIGSEGV in JOIN::optimize_inner or Assertion `fixed == 0'
+# failed in Item_equal::fix_fields, server crashes after 2nd execution
+# of PS
+#
+create table t1 (a int, b int);
+create table t2 (x int, y int);
+insert into t1 values (1,1),(2,2);
+insert into t2 values (1,1),(2,2),(2,3);
+# here we can see conditions pushdown (see HAVING):
+prepare stmt from "
+explain extended
+SELECT * FROM t1
+WHERE a = b
+ AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE 1=2 GROUP BY t2.x);";
+execute stmt;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+2 MATERIALIZED NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
+Warnings:
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select `test`.`t2`.`x`,count(`test`.`t2`.`y`) from `test`.`t2` where 0 group by `test`.`t2`.`x` having `COUNT(t2.y)` = `test`.`t2`.`x`) join `test`.`t1` where 0
+# here re-execution of the pushdown does not crash:
+prepare stmt from "
+SELECT * FROM t1
+WHERE a = b
+ AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE 1=2 GROUP BY t2.x);";
+execute stmt;
+a b
+execute stmt;
+a b
+execute stmt;
+a b
+drop table t1,t2;
diff --git a/mysql-test/main/subselect_innodb.test b/mysql-test/main/subselect_innodb.test
index 544bcd994ed..b8d12d04a5e 100644
--- a/mysql-test/main/subselect_innodb.test
+++ b/mysql-test/main/subselect_innodb.test
@@ -611,3 +611,35 @@ FROM t1
DROP TABLE t1,t2,t3,t4;
+--echo #
+--echo # MDEV-17362: SIGSEGV in JOIN::optimize_inner or Assertion `fixed == 0'
+--echo # failed in Item_equal::fix_fields, server crashes after 2nd execution
+--echo # of PS
+--echo #
+
+create table t1 (a int, b int);
+create table t2 (x int, y int);
+
+insert into t1 values (1,1),(2,2);
+insert into t2 values (1,1),(2,2),(2,3);
+
+--echo # here we can see conditions pushdown (see HAVING):
+prepare stmt from "
+explain extended
+SELECT * FROM t1
+WHERE a = b
+ AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE 1=2 GROUP BY t2.x);";
+
+execute stmt;
+
+--echo # here re-execution of the pushdown does not crash:
+prepare stmt from "
+SELECT * FROM t1
+WHERE a = b
+ AND (a,b) IN (SELECT t2.x, COUNT(t2.y) FROM t2 WHERE 1=2 GROUP BY t2.x);";
+
+execute stmt;
+execute stmt;
+execute stmt;
+
+drop table t1,t2;
diff --git a/mysql-test/main/subselect_mat.result b/mysql-test/main/subselect_mat.result
index 3ee904b8d9f..482833dc967 100644
--- a/mysql-test/main/subselect_mat.result
+++ b/mysql-test/main/subselect_mat.result
@@ -39,6 +39,20 @@ create index it3i3 on t3i (c1, c2);
insert into t1i select * from t1;
insert into t2i select * from t2;
insert into t3i select * from t3;
+analyze table t1,t2,t3,t1i,t2i,t3i;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+test.t1i analyze status Engine-independent statistics collected
+test.t1i analyze status Table is already up to date
+test.t2i analyze status Engine-independent statistics collected
+test.t2i analyze status Table is already up to date
+test.t3i analyze status Engine-independent statistics collected
+test.t3i analyze status Table is already up to date
set @@optimizer_switch='materialization=on,in_to_exists=off,firstmatch=off';
/******************************************************************************
* Simple tests.
@@ -48,7 +62,7 @@ explain extended
select * from t1 where a1 in (select b1 from t2 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`>(<in_optimizer>(`test`.`t1`.`a1`,`test`.`t1`.`a1` in ( <materialize> (/* select#2 */ select `test`.`t2`.`b1` from `test`.`t2` where `test`.`t2`.`b1` > '0' ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1`))))
select * from t1 where a1 in (select b1 from t2 where b1 > '0');
@@ -59,7 +73,7 @@ explain extended
select * from t1 where a1 in (select b1 from t2 where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`>(<in_optimizer>(`test`.`t1`.`a1`,`test`.`t1`.`a1` in ( <materialize> (/* select#2 */ select `test`.`t2`.`b1` from `test`.`t2` where `test`.`t2`.`b1` > '0' ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1`))))
select * from t1 where a1 in (select b1 from t2 where b1 > '0' group by b1);
@@ -70,7 +84,7 @@ explain extended
select * from t1 where (a1, a2) in (select b1, b2 from t2 where b1 > '0' group by b1, b2);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` > '0' ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1` and `test`.`t1`.`a2` = `<subquery2>`.`b2`))))
select * from t1 where (a1, a2) in (select b1, b2 from t2 where b1 > '0' group by b1, b2);
@@ -81,7 +95,7 @@ explain extended
select * from t1 where (a1, a2) in (select b1, min(b2) from t2 where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Using temporary
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where; Using temporary
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2`.`b1`,min(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`b1` > '0' group by `test`.`t2`.`b1` ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1` and `test`.`t1`.`a2` = `<subquery2>`.`min(b2)`))))
select * from t1 where (a1, a2) in (select b1, min(b2) from t2 where b1 > '0' group by b1);
@@ -125,7 +139,7 @@ explain extended
select * from t1i where (a1, a2) in (select b1, max(b2) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1i index NULL # # # 3 100.00 #
-2 MATERIALIZED t2i range it2i1,it2i3 # # # 3 100.00 #
+2 MATERIALIZED t2i range it2i1,it2i3 # # # 5 100.00 #
Warnings:
Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` where <expr_cache><`test`.`t1i`.`a1`,`test`.`t1i`.`a2`>(<in_optimizer>((`test`.`t1i`.`a1`,`test`.`t1i`.`a2`),(`test`.`t1i`.`a1`,`test`.`t1i`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,max(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1` ), <primary_index_lookup>(`test`.`t1i`.`a1` in <temporary table> on distinct_key where `test`.`t1i`.`a1` = `<subquery2>`.`b1` and `test`.`t1i`.`a2` = `<subquery2>`.`max(b2)`))))
select * from t1i where (a1, a2) in (select b1, max(b2) from t2i where b1 > '0' group by b1);
@@ -136,34 +150,52 @@ explain extended
select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1i index NULL # # # 3 100.00 #
-2 MATERIALIZED t2i range it2i1,it2i3 # # # 3 100.00 #
+2 MATERIALIZED t2i range it2i1,it2i3 # # # 5 100.00 #
Warnings:
Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` where <expr_cache><`test`.`t1i`.`a1`,`test`.`t1i`.`a2`>(<in_optimizer>((`test`.`t1i`.`a1`,`test`.`t1i`.`a2`),(`test`.`t1i`.`a1`,`test`.`t1i`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,min(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1` ), <primary_index_lookup>(`test`.`t1i`.`a1` in <temporary table> on distinct_key where `test`.`t1i`.`a1` = `<subquery2>`.`b1` and `test`.`t1i`.`a2` = `<subquery2>`.`min(b2)`))))
select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
a1 a2
1 - 01 2 - 01
1 - 02 2 - 02
+create table t2i_c like t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+analyze table t2i_c;
+Table Op Msg_type Msg_text
+test.t2i_c analyze status Engine-independent statistics collected
+test.t2i_c analyze status OK
+show create table t2i_c;
+Table Create Table
+t2i_c CREATE TABLE `t2i_c` (
+ `b1` char(8) DEFAULT NULL,
+ `b2` char(8) DEFAULT NULL,
+ KEY `it2i1` (`b1`),
+ KEY `it2i2` (`b2`),
+ KEY `it2i3` (`b1`,`b2`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
explain extended
-select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1);
+select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t2i range NULL it2i3 9 NULL 3 100.00 Using index for group-by
+2 MATERIALIZED t2i_c range NULL it2i3 9 NULL 4 100.00 Using index for group-by
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,max(`test`.`t2i`.`b2`) from `test`.`t2i` group by `test`.`t2i`.`b1` ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1` and `test`.`t1`.`a2` = `<subquery2>`.`max(b2)`))))
-select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1);
+Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2i_c`.`b1`,max(`test`.`t2i_c`.`b2`) from `test`.`t2i_c` group by `test`.`t2i_c`.`b1` ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1` and `test`.`t1`.`a2` = `<subquery2>`.`max(b2)`))))
+select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1);
a1 a2
1 - 01 2 - 01
1 - 02 2 - 02
-prepare st1 from "explain select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1)";
+prepare st1 from "explain select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1)";
execute st1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-2 MATERIALIZED t2i range NULL it2i3 9 NULL 3 Using index for group-by
+2 MATERIALIZED t2i_c range NULL it2i3 9 NULL 4 Using index for group-by
execute st1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-2 MATERIALIZED t2i range NULL it2i3 9 NULL 3 Using index for group-by
-prepare st2 from "select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1)";
+2 MATERIALIZED t2i_c range NULL it2i3 9 NULL 4 Using index for group-by
+prepare st2 from "select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1)";
execute st2;
a1 a2
1 - 01 2 - 01
@@ -172,11 +204,12 @@ execute st2;
a1 a2
1 - 01 2 - 01
1 - 02 2 - 02
+drop table t2i_c;
explain extended
select * from t1 where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-2 MATERIALIZED t2i range it2i1,it2i3 it2i3 18 NULL 3 100.00 Using where; Using index for group-by
+2 MATERIALIZED t2i range it2i1,it2i3 it2i3 9 NULL 5 100.00 Using where; Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,min(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1` ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1` and `test`.`t1`.`a2` = `<subquery2>`.`min(b2)`))))
select * from t1 where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
@@ -289,7 +322,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
4 MATERIALIZED t2i index it2i2 it2i3 18 NULL 5 100.00 Using where; Using index
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#2 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` > '0' ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery2>`.`b1` and `test`.`t1`.`a2` = `<subquery2>`.`b2`)))) and <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#3 */ select `test`.`t3`.`c1`,`test`.`t3`.`c2` from `test`.`t3` where <expr_cache><`test`.`t3`.`c1`,`test`.`t3`.`c2`>(<in_optimizer>((`test`.`t3`.`c1`,`test`.`t3`.`c2`),(`test`.`t3`.`c1`,`test`.`t3`.`c2`) in ( <materialize> (/* select#4 */ select `test`.`t2i`.`b1`,`test`.`t2i`.`b2` from `test`.`t2i` where `test`.`t2i`.`b2` > '0' ), <primary_index_lookup>(`test`.`t3`.`c1` in <temporary table> on distinct_key where `test`.`t3`.`c1` = `<subquery4>`.`b1` and `test`.`t3`.`c2` = `<subquery4>`.`b2`)))) ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery3>`.`c1` and `test`.`t1`.`a2` = `<subquery3>`.`c2`))))
select * from t1
@@ -419,8 +452,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
5 MATERIALIZED t2i index it2i2 it2i3 18 NULL 5 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 99.22 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select `test`.`t1`.`a1`,`test`.`t1`.`a2` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t1`.`a1`) = `test`.`t1`.`a1` and <cache>(`test`.`t1`.`a2`) = `test`.`t1`.`a2` union /* select#3 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`b1` and <cache>(`test`.`t1`.`a2`) = `test`.`t2`.`b2`))) and <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),(`test`.`t1`.`a1`,`test`.`t1`.`a2`) in ( <materialize> (/* select#4 */ select `test`.`t3`.`c1`,`test`.`t3`.`c2` from `test`.`t3` where <expr_cache><`test`.`t3`.`c1`,`test`.`t3`.`c2`>(<in_optimizer>((`test`.`t3`.`c1`,`test`.`t3`.`c2`),(`test`.`t3`.`c1`,`test`.`t3`.`c2`) in ( <materialize> (/* select#5 */ select `test`.`t2i`.`b1`,`test`.`t2i`.`b2` from `test`.`t2i` where `test`.`t2i`.`b2` > '0' ), <primary_index_lookup>(`test`.`t3`.`c1` in <temporary table> on distinct_key where `test`.`t3`.`c1` = `<subquery5>`.`b1` and `test`.`t3`.`c2` = `<subquery5>`.`b2`)))) ), <primary_index_lookup>(`test`.`t1`.`a1` in <temporary table> on distinct_key where `test`.`t1`.`a1` = `<subquery4>`.`c1` and `test`.`t1`.`a2` = `<subquery4>`.`c2`))))
@@ -442,8 +475,8 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
5 MATERIALIZED t2i index it2i2 it2i3 18 NULL 5 100.00 Using where; Using index
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 99.22 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`c2` AS `c2` from `test`.`t1` join `test`.`t3` where `test`.`t3`.`c1` = `test`.`t1`.`a1` and <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select `test`.`t1`.`a1`,`test`.`t1`.`a2` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t1`.`a1`) = `test`.`t1`.`a1` and <cache>(`test`.`t1`.`a2`) = `test`.`t1`.`a2` union /* select#3 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`b1` and <cache>(`test`.`t1`.`a2`) = `test`.`t2`.`b2`))) and <expr_cache><`test`.`t3`.`c1`,`test`.`t3`.`c2`>(<in_optimizer>((`test`.`t3`.`c1`,`test`.`t3`.`c2`),(`test`.`t3`.`c1`,`test`.`t3`.`c2`) in ( <materialize> (/* select#4 */ select `test`.`t3`.`c1`,`test`.`t3`.`c2` from `test`.`t3` where <expr_cache><`test`.`t3`.`c1`,`test`.`t3`.`c2`>(<in_optimizer>((`test`.`t3`.`c1`,`test`.`t3`.`c2`),(`test`.`t3`.`c1`,`test`.`t3`.`c2`) in ( <materialize> (/* select#5 */ select `test`.`t2i`.`b1`,`test`.`t2i`.`b2` from `test`.`t2i` where `test`.`t2i`.`b2` > '0' ), <primary_index_lookup>(`test`.`t3`.`c1` in <temporary table> on distinct_key where `test`.`t3`.`c1` = `<subquery5>`.`b1` and `test`.`t3`.`c2` = `<subquery5>`.`b2`)))) ), <primary_index_lookup>(`test`.`t3`.`c1` in <temporary table> on distinct_key where `test`.`t3`.`c1` = `<subquery4>`.`c1` and `test`.`t3`.`c2` = `<subquery4>`.`c2`))))
@@ -464,8 +497,8 @@ select * from t3
where c1 in (select a1 from t1 where a1 > '0' UNION select b1 from t2 where b1 < '9');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 99.22 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`c1` AS `c1`,`test`.`t3`.`c2` AS `c2` from `test`.`t3` where <expr_cache><`test`.`t3`.`c1`>(<in_optimizer>(`test`.`t3`.`c1`,<exists>(/* select#2 */ select `test`.`t1`.`a1` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t3`.`c1`) = `test`.`t1`.`a1` union /* select#3 */ select `test`.`t2`.`b1` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t3`.`c1`) = `test`.`t2`.`b1`)))
@@ -485,7 +518,7 @@ where (c1, c2) in (select b1, b2 from t2i where b2 > '0' or b2 = a2));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
5 DEPENDENT SUBQUERY t3c ALL NULL NULL NULL NULL 4 100.00 Using where
-6 DEPENDENT SUBQUERY t2i index_subquery it2i1,it2i2,it2i3 it2i3 18 func,func 2 100.00 Using index; Using where
+6 DEPENDENT SUBQUERY t2i index_subquery it2i1,it2i2,it2i3 it2i3 18 func,func 1 100.00 Using index; Using where
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where
4 MATERIALIZED t3b ALL NULL NULL NULL NULL 4 100.00 Using where
3 DEPENDENT SUBQUERY t3a ALL NULL NULL NULL NULL 4 100.00 Using where
@@ -679,7 +712,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
2 DEPENDENT SUBQUERY t1_16 ALL NULL NULL NULL NULL 3 100.00 Using where
3 DEPENDENT SUBQUERY t2_16 ALL NULL NULL NULL NULL 3 100.00 Using where
3 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Using join buffer (flat, BNL join)
-4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
+4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 99.22 Using where
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` where <expr_cache><concat(`test`.`t1`.`a1`,'x')>(<in_optimizer>(concat(`test`.`t1`.`a1`,'x'),<exists>(/* select#2 */ select left(`test`.`t1_16`.`a1`,8) from `test`.`t1_16` where <expr_cache><`test`.`t1_16`.`a1`,`test`.`t1_16`.`a2`>(<in_optimizer>((`test`.`t1_16`.`a1`,`test`.`t1_16`.`a2`),<exists>(/* select#3 */ select `test`.`t2_16`.`b1`,`test`.`t2_16`.`b2` from `test`.`t2_16` join `test`.`t2` where `test`.`t2`.`b2` = substr(`test`.`t2_16`.`b2`,1,6) and <expr_cache><`test`.`t2`.`b1`>(<in_optimizer>(`test`.`t2`.`b1`,`test`.`t2`.`b1` in ( <materialize> (/* select#4 */ select `test`.`t3`.`c1` from `test`.`t3` where `test`.`t3`.`c2` > '0' ), <primary_index_lookup>(`test`.`t2`.`b1` in <temporary table> on distinct_key where `test`.`t2`.`b1` = `<subquery4>`.`c1`)))) and <cache>(`test`.`t1_16`.`a1`) = `test`.`t2_16`.`b1` and <cache>(`test`.`t1_16`.`a2`) = `test`.`t2_16`.`b2`))) and <cache>(concat(`test`.`t1`.`a1`,'x')) = left(`test`.`t1_16`.`a1`,8))))
drop table t1_16, t2_16, t3_16;
@@ -1901,7 +1934,7 @@ INSERT INTO t2 values(1),(2);
EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a HAVING a > 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using temporary
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary
flush status;
CREATE TABLE t3 SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a HAVING a > 1);
SHOW STATUS LIKE 'Created_tmp_tables';
@@ -1925,7 +1958,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2`) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(/*always not null*/ 1 is null) or `<subquery2>`.`MAX(c)` = 7)
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2` having `MAX(c)` is null or `MAX(c)` = 7) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(/*always not null*/ 1 is null) or `<subquery2>`.`MAX(c)` = 7)
SELECT * FROM t1
WHERE a IN (SELECT MAX(c) FROM t2) AND b=7 AND (a IS NULL OR a=b);
a b
@@ -2179,11 +2212,11 @@ drop database mysqltest4;
# (both 1st and further executions)
CREATE TABLE t1 (a INT NOT NULL) ENGINE=MyISAM;
INSERT INTO t1 VALUES (0),(8);
-SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM (t1 AS t2));
+SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM t1 AS t2);
a
0
PREPARE stmt FROM "
-SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM (t1 AS t2))
+SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM t1 AS t2)
";
execute stmt;
a
@@ -2526,7 +2559,7 @@ select * from t1 where (a,b) in (select max(a),b from t2 group by b);
show status where Variable_name like 'Handler_read%' or Variable_name like 'Handler_%write%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 5000
+Handler_read_key 5004
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
diff --git a/mysql-test/main/subselect_mat_cost.result b/mysql-test/main/subselect_mat_cost.result
index a9f980fff5d..9b0578b62b1 100644
--- a/mysql-test/main/subselect_mat_cost.result
+++ b/mysql-test/main/subselect_mat_cost.result
@@ -40,6 +40,8 @@ create index Language on CountryLanguage(Language);
create index CityName on City(Name);
alter table City change population population int(11) null default 0;
select max(id) from City into @max_city_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into City values (@max_city_id + 1,'Kilifarevo','BGR',NULL);
SELECT COUNT(*) FROM Country;
COUNT(*)
@@ -65,7 +67,7 @@ Name LIKE 'L%') AND
surfacearea > 1000000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL Name,SurfaceArea NULL NULL NULL 239 Using where
-2 MATERIALIZED City ALL Population,Country NULL NULL NULL 4080 Using where
+2 MATERIALIZED City ALL Population,Country NULL NULL NULL 4079 Using where
SELECT Name FROM Country
WHERE (Code IN (select Country from City where City.Population > 100000) OR
Name LIKE 'L%') AND
@@ -111,7 +113,7 @@ Name LIKE 'L%') AND
surfacearea > 10*1000000;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country range Name,SurfaceArea SurfaceArea 4 NULL 5 Using index condition; Using where; Rowid-ordered scan
-2 DEPENDENT SUBQUERY City index_subquery Population,Country Country 3 func 18 Using where
+2 DEPENDENT SUBQUERY City index_subquery Population,Country Country 3 func 17 Using where
SELECT Name FROM Country
WHERE (Code IN (select Country from City where City.Population > 100000) OR
Name LIKE 'L%') AND
@@ -133,7 +135,7 @@ Country.SurfaceArea < 3000 AND Country.SurfaceArea > 10 AND
City.name LIKE '%Island%');
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL PRIMARY,SurfaceArea NULL NULL NULL 239 Using where
-1 PRIMARY City ref Country Country 3 world.Country.Code 18 Using where
+1 PRIMARY City ref Country Country 3 world.Country.Code 17 Using where
2 MATERIALIZED CountryLanguage ALL Percentage,Language NULL NULL NULL 984 Using where
SELECT *
FROM Country, City
@@ -158,7 +160,7 @@ Country.SurfaceArea < 3000 AND Country.SurfaceArea > 10 AND
Country.name LIKE '%Island%');
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL PRIMARY,SurfaceArea NULL NULL NULL 239 Using where
-1 PRIMARY City ref Country Country 3 world.Country.Code 18
+1 PRIMARY City ref Country Country 3 world.Country.Code 17
2 DEPENDENT SUBQUERY CountryLanguage index_subquery Percentage,Language Language 30 func 2 Using where
SELECT *
FROM Country, City
@@ -203,7 +205,7 @@ OR
(select Country, Language from CountryLanguage));
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL PRIMARY,SurfaceArea NULL NULL NULL 239 Using where
-1 PRIMARY City ref Country Country 3 world.Country.Code 18 Using where
+1 PRIMARY City ref Country Country 3 world.Country.Code 17 Using where
3 MATERIALIZED CountryLanguage index PRIMARY,Language PRIMARY 33 NULL 984 Using index
2 DEPENDENT SUBQUERY CountryLanguage unique_subquery PRIMARY,Percentage,Language PRIMARY 33 func,func 1 Using where
SELECT City.Name, Country.Name
@@ -232,7 +234,7 @@ select count(*) from City
where City.id not in (select capital from Country
where capital is not null and population < 100000);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY City index NULL PRIMARY 4 NULL 4080 Using where; Using index
+1 PRIMARY City index NULL PRIMARY 4 NULL 4079 Using where; Using index
2 MATERIALIZED Country ALL NULL NULL NULL NULL 239 Using where
Q2.2e:
@@ -246,7 +248,7 @@ WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English
AND CountryLanguage.Language = 'French'
AND Code = Country;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY CountryLanguage ref PRIMARY,Language Language 30 const 20 Using index condition
+1 PRIMARY CountryLanguage ref PRIMARY,Language Language 30 const 19 Using index condition
1 PRIMARY Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where
2 DEPENDENT SUBQUERY CountryLanguage unique_subquery PRIMARY,Language PRIMARY 33 func,const 1 Using index; Using where
SELECT Country.Name
@@ -278,6 +280,7 @@ Q2.2m:
Countries that speak French OR Spanish, but do not speak English
MATERIALIZATION because the outer query filters less rows than Q5-a,
so there are more lookups.
+set statement optimizer_switch='rowid_filter=off' for
EXPLAIN
SELECT Country.Name
FROM Country, CountryLanguage
@@ -285,9 +288,10 @@ WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English
AND (CountryLanguage.Language = 'French' OR CountryLanguage.Language = 'Spanish')
AND Code = Country;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY CountryLanguage range PRIMARY,Language Language 30 NULL 45 Using index condition; Using where; Rowid-ordered scan
+1 PRIMARY CountryLanguage range PRIMARY,Language Language 30 NULL 44 Using index condition; Using where; Rowid-ordered scan
1 PRIMARY Country eq_ref PRIMARY PRIMARY 3 world.CountryLanguage.Country 1 Using where
-2 MATERIALIZED CountryLanguage ref PRIMARY,Language Language 30 const 47 Using index condition
+3 MATERIALIZED CountryLanguage ref PRIMARY,Language Language 30 const 48 Using index condition
+set statement optimizer_switch='rowid_filter=off' for
SELECT Country.Name
FROM Country, CountryLanguage
WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English')
@@ -347,7 +351,7 @@ where (Language, Country) NOT IN
FROM City LEFT JOIN Country ON (Country = Code and City.Population < 10000))
AND Language IN ('English','Spanish');
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY CountryLanguage range Language Language 30 NULL 72 Using index condition; Using where; Rowid-ordered scan
+1 PRIMARY CountryLanguage range Language Language 30 NULL 73 Using index condition; Using where; Rowid-ordered scan
2 DEPENDENT SUBQUERY City ref CityName CityName 35 func 1 Using index condition
2 DEPENDENT SUBQUERY Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using where; Using index
select count(*)
@@ -371,7 +375,7 @@ FROM City LEFT JOIN Country ON (Country = Code)
HAVING City.Name LIKE "Santa%");
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY CountryLanguage index NULL PRIMARY 33 NULL 984 Using where; Using index
-2 MATERIALIZED City ALL NULL NULL NULL NULL 4080
+2 MATERIALIZED City ALL NULL NULL NULL NULL 4079
2 MATERIALIZED Country eq_ref PRIMARY PRIMARY 3 world.City.Country 1 Using index
select count(*)
from CountryLanguage
@@ -429,7 +433,7 @@ capital is null);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL NULL NULL NULL NULL 239 Using where
1 PRIMARY City eq_ref PRIMARY PRIMARY 4 world.Country.Capital 1 Using where
-2 MATERIALIZED City index NULL CityName 35 NULL 4080 Using index
+2 MATERIALIZED City index NULL CityName 35 NULL 4079 Using index
select * from Country, City
where capital = id and
(City.name in (SELECT name FROM City
@@ -450,7 +454,7 @@ WHERE Country.Code NOT IN
(SELECT Country FROM City GROUP BY Name HAVING COUNT(Name) = 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country ALL NULL NULL NULL NULL 239 Using where
-2 MATERIALIZED City ALL NULL NULL NULL NULL 4080 Using temporary
+2 MATERIALIZED City ALL NULL NULL NULL NULL 4079 Using temporary
SELECT Name
FROM Country
WHERE Country.Code NOT IN
@@ -479,7 +483,7 @@ select Name, City.id in (select capital from Country where capital is not null)
from City
where City.population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY City range Population Population 5 NULL 4 Using index condition; Rowid-ordered scan
+1 PRIMARY City range Population Population 5 NULL 3 Using index condition; Rowid-ordered scan
2 MATERIALIZED Country ALL NULL NULL NULL NULL 239 Using where
select Name, City.id in (select capital from Country where capital is not null) as is_capital
from City
@@ -495,7 +499,7 @@ select Name, City.id in (select capital from Country where capital is not null)
from City
where City.population > 10000000;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY City range Population Population 5 NULL 4 Using index condition; Rowid-ordered scan
+1 PRIMARY City range Population Population 5 NULL 3 Using index condition; Rowid-ordered scan
2 SUBQUERY Country index_subquery CountryCapital CountryCapital 5 func 2 Using index; Using where
select Name, City.id in (select capital from Country where capital is not null) as is_capital
from City
@@ -513,7 +517,7 @@ GROUP BY City.Name
HAVING City.Name IN (select Name from Country where population < 1000000);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY Country index PRIMARY PRIMARY 3 NULL 239 Using index; Using temporary; Using filesort
-1 PRIMARY City ref Country Country 3 world.Country.Code 18
+1 PRIMARY City ref Country Country 3 world.Country.Code 17
2 MATERIALIZED Country ALL Name NULL NULL NULL 239 Using where
SELECT City.Name, City.Population
FROM City JOIN Country ON City.Country = Country.Code
@@ -538,7 +542,7 @@ SELECT Name, round(Population/1000)
FROM City
WHERE Country = "IND" AND Population < 100000);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY City ALL NULL NULL NULL NULL 4080 Using where
+1 PRIMARY City ALL NULL NULL NULL NULL 4079 Using where
2 DEPENDENT SUBQUERY City ref Population,Country,CityName CityName 35 func 1 Using where
3 DEPENDENT UNION City ref Population,Country,CityName CityName 35 func 1 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL
diff --git a/mysql-test/main/subselect_mat_cost.test b/mysql-test/main/subselect_mat_cost.test
index 5a1fb550ca4..5f44d0d2bf4 100644
--- a/mysql-test/main/subselect_mat_cost.test
+++ b/mysql-test/main/subselect_mat_cost.test
@@ -205,6 +205,9 @@ WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English
-- echo Countries that speak French OR Spanish, but do not speak English
-- echo MATERIALIZATION because the outer query filters less rows than Q5-a,
-- echo so there are more lookups.
+
+
+set statement optimizer_switch='rowid_filter=off' for
EXPLAIN
SELECT Country.Name
FROM Country, CountryLanguage
@@ -212,6 +215,7 @@ WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English
AND (CountryLanguage.Language = 'French' OR CountryLanguage.Language = 'Spanish')
AND Code = Country;
+set statement optimizer_switch='rowid_filter=off' for
SELECT Country.Name
FROM Country, CountryLanguage
WHERE Code NOT IN (SELECT Country FROM CountryLanguage WHERE Language = 'English')
diff --git a/mysql-test/main/subselect_mat_cost_bugs.result b/mysql-test/main/subselect_mat_cost_bugs.result
index 6377ae556d2..2c696ed36fd 100644
--- a/mysql-test/main/subselect_mat_cost_bugs.result
+++ b/mysql-test/main/subselect_mat_cost_bugs.result
@@ -310,6 +310,7 @@ create index key3 on t2 (kp2);
SET @@optimizer_switch='materialization=off,semijoin=off,in_to_exists=on';
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
explain
select c1 from t1 where c1 in (select kp1 from t2 where kp2 = 10 and c2 = 4) or c1 > 7;
diff --git a/mysql-test/main/subselect_no_exists_to_in.result b/mysql-test/main/subselect_no_exists_to_in.result
index f8fd29aced9..bacba844deb 100644
--- a/mysql-test/main/subselect_no_exists_to_in.result
+++ b/mysql-test/main/subselect_no_exists_to_in.result
@@ -183,7 +183,8 @@ select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
a b
1 7
2 7
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
a b
1 7
2 7
@@ -1444,7 +1445,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 4 75.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using index
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a`
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
@@ -1454,7 +1455,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
@@ -1464,8 +1465,8 @@ a
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
+1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
drop table t1, t2, t3;
@@ -1633,7 +1634,7 @@ Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2') from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Using where; Full scan on NULL key
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 50.00 Using index; Using where; Full scan on NULL key
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL where `test`.`t2`.`s1` < 'a2' having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
drop table t1,t2;
@@ -3105,13 +3106,18 @@ create table t1(a int, primary key (a));
insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using where
+2 SUBQUERY t2 range b b 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3123,7 +3129,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using index condition
+2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3710,7 +3716,7 @@ ORDER BY t1.t DESC LIMIT 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t1 index NULL PRIMARY 13 NULL 11 Using where; Using index
-2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 5 Using where; Using index
+2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 6 Using where; Using index
SELECT * FROM t1,t2
WHERE t1.t = (SELECT t1.t FROM t1
WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
@@ -3729,7 +3735,7 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
i
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2
+i
explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12))
from t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'union (select t12.i from t1 t12))
@@ -4978,7 +4984,7 @@ UNIQUE KEY b (b,c),
KEY c (c),
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@@ -5188,35 +5194,23 @@ a 1
1 1
2 1
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1) ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a UNION SELECT 1)) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1)) ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') t1a ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) t1a ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) ON 1' at line 1
SELECT * FROM (t1 t1a);
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT * FROM ((t1 t1a));
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '))' at line 1
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
a t1a
1 1
@@ -5291,11 +5285,14 @@ SELECT ( SELECT a FROM t1 WHERE a = 1 UNION SELECT 1 ), a FROM t1;
SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
a b
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
+1
+1
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1' at line 1
+1
+1
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) )
+1
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1' at line 1
SELECT ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -5305,9 +5302,9 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
((SELECT 1 UNION SELECT 1 UNION SELECT 1))
1
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
1
1
@@ -5315,19 +5312,25 @@ SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
1
1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -7021,8 +7024,8 @@ SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a a 5 NULL 2 Using where; Using index
-2 SUBQUERY <subquery3> ALL distinct_key NULL NULL NULL 1
-2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 test.t2.c 1
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2
SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
@@ -7081,12 +7084,6 @@ call procedure2();
ControlRev
NULL
drop procedure procedure2;
-SELECT
-(SELECT user FROM mysql.user
-WHERE h.host in (SELECT host FROM mysql.user)
-) AS sq
-FROM mysql.host h GROUP BY h.host;
-sq
#
# MDEV-7846:Server crashes in Item_subselect::fix
#_fields or fails with Thread stack overrun
@@ -7299,6 +7296,21 @@ pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
# End of 10.2 tests
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+#
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+#
+# End of 10.4 tests
+#
set optimizer_switch=default;
select @@optimizer_switch like '%exists_to_in=off%';
@@optimizer_switch like '%exists_to_in=off%'
diff --git a/mysql-test/main/subselect_no_mat.result b/mysql-test/main/subselect_no_mat.result
index 89f236a5a60..a5b1d95cae1 100644
--- a/mysql-test/main/subselect_no_mat.result
+++ b/mysql-test/main/subselect_no_mat.result
@@ -186,7 +186,8 @@ select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
a b
1 7
2 7
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
a b
1 7
2 7
@@ -350,7 +351,7 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
-1 PRIMARY t6 ALL i1 NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t6 ALL i1 NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t7` join `test`.`t6` where `test`.`t6`.`clinic_uq` = `test`.`t7`.`uq`
@@ -1447,7 +1448,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 4 75.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using index
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a`
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
@@ -1457,7 +1458,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
@@ -1467,8 +1468,8 @@ a
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
+1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
drop table t1, t2, t3;
@@ -1636,7 +1637,7 @@ Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2') from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Using where; Full scan on NULL key
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 50.00 Using index; Using where; Full scan on NULL key
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<expr_cache><`test`.`t1`.`s1`>(<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL where `test`.`t2`.`s1` < 'a2' having trigcond(`test`.`t2`.`s1` is null))))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
drop table t1,t2;
@@ -3107,13 +3108,18 @@ create table t1(a int, primary key (a));
insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using where
+2 SUBQUERY t2 range b b 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3125,7 +3131,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using index condition
+2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3710,7 +3716,7 @@ ORDER BY t1.t DESC LIMIT 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t1 index NULL PRIMARY 13 NULL 11 Using where; Using index
-2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 5 Using where; Using index
+2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 6 Using where; Using index
SELECT * FROM t1,t2
WHERE t1.t = (SELECT t1.t FROM t1
WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
@@ -3729,7 +3735,7 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
i
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2
+i
explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12))
from t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'union (select t12.i from t1 t12))
@@ -4976,7 +4982,7 @@ UNIQUE KEY b (b,c),
KEY c (c),
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@@ -5186,35 +5192,23 @@ a 1
1 1
2 1
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1) ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a UNION SELECT 1)) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1)) ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') t1a ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) t1a ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) ON 1' at line 1
SELECT * FROM (t1 t1a);
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT * FROM ((t1 t1a));
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '))' at line 1
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
a t1a
1 1
@@ -5289,11 +5283,14 @@ SELECT ( SELECT a FROM t1 WHERE a = 1 UNION SELECT 1 ), a FROM t1;
SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
a b
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
+1
+1
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1' at line 1
+1
+1
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) )
+1
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1' at line 1
SELECT ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -5303,9 +5300,9 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
((SELECT 1 UNION SELECT 1 UNION SELECT 1))
1
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
1
1
@@ -5313,19 +5310,25 @@ SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
1
1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -7074,12 +7077,6 @@ call procedure2();
ControlRev
NULL
drop procedure procedure2;
-SELECT
-(SELECT user FROM mysql.user
-WHERE h.host in (SELECT host FROM mysql.user)
-) AS sq
-FROM mysql.host h GROUP BY h.host;
-sq
#
# MDEV-7846:Server crashes in Item_subselect::fix
#_fields or fails with Thread stack overrun
@@ -7292,6 +7289,21 @@ pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
# End of 10.2 tests
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+#
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+#
+# End of 10.4 tests
+#
set optimizer_switch=default;
select @@optimizer_switch like '%materialization=on%';
@@optimizer_switch like '%materialization=on%'
diff --git a/mysql-test/main/subselect_no_opts.result b/mysql-test/main/subselect_no_opts.result
index 348cffb9c53..0ea16d89849 100644
--- a/mysql-test/main/subselect_no_opts.result
+++ b/mysql-test/main/subselect_no_opts.result
@@ -182,7 +182,8 @@ select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
a b
1 7
2 7
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
a b
1 7
2 7
@@ -1632,7 +1633,7 @@ Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<in_optimizer>(`test`.
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2') from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Using where; Full scan on NULL key
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 50.00 Using index; Using where; Full scan on NULL key
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL where `test`.`t2`.`s1` < 'a2' having trigcond(`test`.`t2`.`s1` is null)))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
drop table t1,t2;
@@ -3103,13 +3104,18 @@ create table t1(a int, primary key (a));
insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using where
+2 SUBQUERY t2 range b b 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3121,7 +3127,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using index condition
+2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3706,7 +3712,7 @@ ORDER BY t1.t DESC LIMIT 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t1 index NULL PRIMARY 13 NULL 11 Using where; Using index
-2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 5 Using where; Using index
+2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 6 Using where; Using index
SELECT * FROM t1,t2
WHERE t1.t = (SELECT t1.t FROM t1
WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
@@ -3725,7 +3731,7 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
i
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2
+i
explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12))
from t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'union (select t12.i from t1 t12))
@@ -4972,7 +4978,7 @@ UNIQUE KEY b (b,c),
KEY c (c),
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@@ -5182,35 +5188,23 @@ a 1
1 1
2 1
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1) ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a UNION SELECT 1)) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1)) ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') t1a ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) t1a ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) ON 1' at line 1
SELECT * FROM (t1 t1a);
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT * FROM ((t1 t1a));
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '))' at line 1
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
a t1a
1 1
@@ -5285,11 +5279,14 @@ SELECT ( SELECT a FROM t1 WHERE a = 1 UNION SELECT 1 ), a FROM t1;
SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
a b
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
+1
+1
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1' at line 1
+1
+1
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) )
+1
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1' at line 1
SELECT ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -5299,9 +5296,9 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
((SELECT 1 UNION SELECT 1 UNION SELECT 1))
1
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
1
1
@@ -5309,19 +5306,25 @@ SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
1
1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -7012,8 +7015,8 @@ SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a a 5 NULL 2 Using where; Using index
-2 SUBQUERY <subquery3> ALL distinct_key NULL NULL NULL 1
-2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 test.t2.c 1
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2
SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
@@ -7072,12 +7075,6 @@ call procedure2();
ControlRev
NULL
drop procedure procedure2;
-SELECT
-(SELECT user FROM mysql.user
-WHERE h.host in (SELECT host FROM mysql.user)
-) AS sq
-FROM mysql.host h GROUP BY h.host;
-sq
#
# MDEV-7846:Server crashes in Item_subselect::fix
#_fields or fails with Thread stack overrun
@@ -7290,4 +7287,19 @@ pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
# End of 10.2 tests
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+#
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+#
+# End of 10.4 tests
+#
set @optimizer_switch_for_subselect_test=null;
diff --git a/mysql-test/main/subselect_no_scache.result b/mysql-test/main/subselect_no_scache.result
index 230c645b261..196af2dc372 100644
--- a/mysql-test/main/subselect_no_scache.result
+++ b/mysql-test/main/subselect_no_scache.result
@@ -185,7 +185,8 @@ select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
a b
1 7
2 7
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
a b
1 7
2 7
@@ -349,7 +350,7 @@ patient_uq clinic_uq
explain extended select * from t6 where exists (select * from t7 where uq = clinic_uq);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t7 index PRIMARY PRIMARY 4 NULL 2 100.00 Using index
-1 PRIMARY t6 ALL i1 NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t6 ALL i1 NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1276 Field or reference 'test.t6.clinic_uq' of SELECT #2 was resolved in SELECT #1
Note 1003 select `test`.`t6`.`patient_uq` AS `patient_uq`,`test`.`t6`.`clinic_uq` AS `clinic_uq` from `test`.`t7` join `test`.`t6` where `test`.`t6`.`clinic_uq` = `test`.`t7`.`uq`
@@ -1446,7 +1447,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 index PRIMARY PRIMARY 4 NULL 4 75.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using index
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a`
select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
@@ -1456,7 +1457,7 @@ a
explain extended select * from t2 where t2.a in (select a from t1 where t1.b <> 30);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00 Using where
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t2` where `test`.`t1`.`a` = `test`.`t2`.`a` and `test`.`t1`.`b` <> 30
select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
@@ -1466,8 +1467,8 @@ a
explain extended select * from t2 where t2.a in (select t1.a from t1,t3 where t1.b=t3.a);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2 index PRIMARY PRIMARY 4 NULL 4 100.00 Using index
-1 PRIMARY t1 ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 100.00 Using index
+1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.a 1 100.00
+1 PRIMARY t3 index PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t2`.`a` AS `a` from `test`.`t1` join `test`.`t3` join `test`.`t2` where `test`.`t3`.`a` = `test`.`t1`.`b` and `test`.`t1`.`a` = `test`.`t2`.`a`
drop table t1, t2, t3;
@@ -1635,7 +1636,7 @@ Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<in_optimizer>(`test`.
explain extended select s1, s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2') from t1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 index NULL s1 6 NULL 3 100.00 Using index
-2 SUBQUERY t2 index_subquery s1 s1 6 func 2 100.00 Using index; Using where; Full scan on NULL key
+2 SUBQUERY t2 index_subquery s1 s1 6 func 2 50.00 Using index; Using where; Full scan on NULL key
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`s1` AS `s1`,!<in_optimizer>(`test`.`t1`.`s1`,<exists>(<index_lookup>(<cache>(`test`.`t1`.`s1`) in t2 on s1 checking NULL where `test`.`t2`.`s1` < 'a2' having trigcond(`test`.`t2`.`s1` is null)))) AS `s1 NOT IN (SELECT s1 FROM t2 WHERE s1 < 'a2')` from `test`.`t1`
drop table t1,t2;
@@ -3108,13 +3109,18 @@ create table t1(a int, primary key (a));
insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using where
+2 SUBQUERY t2 range b b 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3126,7 +3132,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using index condition
+2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3713,7 +3719,7 @@ ORDER BY t1.t DESC LIMIT 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t1 index NULL PRIMARY 13 NULL 11 Using where; Using index
-2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 5 Using where; Using index
+2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 6 Using where; Using index
SELECT * FROM t1,t2
WHERE t1.t = (SELECT t1.t FROM t1
WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
@@ -3732,7 +3738,7 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
i
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2
+i
explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12))
from t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'union (select t12.i from t1 t12))
@@ -4982,7 +4988,7 @@ UNIQUE KEY b (b,c),
KEY c (c),
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@@ -5192,35 +5198,23 @@ a 1
1 1
2 1
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1) ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a UNION SELECT 1)) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1)) ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') t1a ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) t1a ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) ON 1' at line 1
SELECT * FROM (t1 t1a);
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT * FROM ((t1 t1a));
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '))' at line 1
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
a t1a
1 1
@@ -5295,11 +5289,14 @@ SELECT ( SELECT a FROM t1 WHERE a = 1 UNION SELECT 1 ), a FROM t1;
SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
a b
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
+1
+1
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1' at line 1
+1
+1
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) )
+1
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1' at line 1
SELECT ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -5309,9 +5306,9 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
((SELECT 1 UNION SELECT 1 UNION SELECT 1))
1
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
1
1
@@ -5319,19 +5316,25 @@ SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
1
1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -7027,8 +7030,8 @@ SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a a 5 NULL 2 Using where; Using index
-2 SUBQUERY <subquery3> ALL distinct_key NULL NULL NULL 1
-2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 test.t2.c 1
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2
SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
@@ -7087,12 +7090,6 @@ call procedure2();
ControlRev
NULL
drop procedure procedure2;
-SELECT
-(SELECT user FROM mysql.user
-WHERE h.host in (SELECT host FROM mysql.user)
-) AS sq
-FROM mysql.host h GROUP BY h.host;
-sq
#
# MDEV-7846:Server crashes in Item_subselect::fix
#_fields or fails with Thread stack overrun
@@ -7305,6 +7302,21 @@ pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
# End of 10.2 tests
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+#
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+#
+# End of 10.4 tests
+#
set optimizer_switch=default;
select @@optimizer_switch like '%subquery_cache=on%';
@@optimizer_switch like '%subquery_cache=on%'
diff --git a/mysql-test/main/subselect_no_semijoin.result b/mysql-test/main/subselect_no_semijoin.result
index e58a01d1983..c590a5d3450 100644
--- a/mysql-test/main/subselect_no_semijoin.result
+++ b/mysql-test/main/subselect_no_semijoin.result
@@ -182,7 +182,8 @@ select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1);
a b
1 7
2 7
-(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1)) union (select * from t4 order by a limit 2) limit 3;
+(select * from t2 where t2.b=(select a from t3 order by 1 desc limit 1))
+union (select * from t4 order by a limit 2) order by a limit 3;
a b
1 7
2 7
@@ -3103,13 +3104,18 @@ create table t1(a int, primary key (a));
insert into t1 values (10);
create table t2 (a int primary key, b varchar(32), c int, unique key b(c, b));
insert into t2(a, c, b) values (1,10,'359'), (2,10,'35988'), (3,10,'35989');
+insert into t2(a, c, b) values (4,10,'360'), (5,10,'35998'), (6,10,'35999');
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using where
+2 SUBQUERY t2 range b b 40 NULL 3 Using where
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c DESC, t2.b DESC LIMIT 1) WHERE t1.a = 10;
@@ -3121,7 +3127,7 @@ ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
1 PRIMARY r const PRIMARY PRIMARY 4 const 1
-2 SUBQUERY t2 range b b 40 NULL 2 Using index condition
+2 SUBQUERY t2 range b b 40 NULL 3 Using index condition
SELECT sql_no_cache t1.a, r.a, r.b FROM t1 LEFT JOIN t2 r
ON r.a = (SELECT t2.a FROM t2 WHERE t2.c = t1.a AND t2.b <= '359899'
ORDER BY t2.c, t2.b LIMIT 1) WHERE t1.a = 10;
@@ -3706,7 +3712,7 @@ ORDER BY t1.t DESC LIMIT 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t2 system NULL NULL NULL NULL 1
1 PRIMARY t1 index NULL PRIMARY 13 NULL 11 Using where; Using index
-2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 5 Using where; Using index
+2 SUBQUERY t1 range PRIMARY PRIMARY 13 NULL 6 Using where; Using index
SELECT * FROM t1,t2
WHERE t1.t = (SELECT t1.t FROM t1
WHERE t1.t < t2.t AND t1.i2=1 AND t2.i1=t1.i1
@@ -3725,7 +3731,7 @@ SELECT sql_no_cache * FROM t1 WHERE NOT EXISTS
i
SELECT * FROM t1
WHERE NOT EXISTS (((SELECT i FROM t1) UNION (SELECT i FROM t1)));
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION (SELECT i FROM t1)))' at line 2
+i
explain select ((select t11.i from t1 t11) union (select t12.i from t1 t12))
from t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'union (select t12.i from t1 t12))
@@ -4972,7 +4978,7 @@ UNIQUE KEY b (b,c),
KEY c (c),
KEY b_2 (b)
);
-INSERT INTO t3 VALUES (1,1,1), (2,32,1);
+INSERT INTO t3 VALUES (1,1,1), (2,32,1), (3,33,1), (4,34,2);
explain
SELECT t1.a, (SELECT 1 FROM t2 WHERE t2.b=t3.c AND t2.c=t1.a ORDER BY t2.d LIMIT 1) AS incorrect FROM t1, t3 WHERE t3.b=t1.a;
id select_type table type possible_keys key key_len ref rows Extra
@@ -5182,35 +5188,23 @@ a 1
1 1
2 1
SELECT * FROM t1 JOIN ((SELECT 1 UNION SELECT 1)) ON 1;
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a UNION SELECT 1) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1) ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a UNION SELECT 1)) ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'SELECT 1)) ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1)) ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') t1a ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) t1a ON 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 't1a ON 1' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) t1a ON 1' at line 1
SELECT * FROM t1 JOIN (t1 t1a) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ') ON 1' at line 1
SELECT * FROM t1 JOIN ((t1 t1a)) ON 1;
-a a
-1 1
-2 1
-1 2
-2 2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')) ON 1' at line 1
SELECT * FROM (t1 t1a);
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ')' at line 1
SELECT * FROM ((t1 t1a));
-a
-1
-2
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '))' at line 1
SELECT * FROM t1 JOIN (SELECT 1 t1a) alias ON 1;
a t1a
1 1
@@ -5285,11 +5279,14 @@ SELECT ( SELECT a FROM t1 WHERE a = 1 UNION SELECT 1 ), a FROM t1;
SELECT * FROM t2 WHERE (a, b) IN (SELECT a, b FROM t2);
a b
SELECT 1 UNION ( SELECT 1 UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
+1
+1
( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1' at line 1
+1
+1
SELECT ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) )
+1
SELECT ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1' at line 1
SELECT ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -5299,9 +5296,9 @@ SELECT ((SELECT 1 UNION SELECT 1 UNION SELECT 1));
((SELECT 1 UNION SELECT 1 UNION SELECT 1))
1
SELECT * FROM ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: Every derived table must have its own alias
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
SELECT * FROM ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 ) a;
1
1
@@ -5309,19 +5306,25 @@ SELECT * FROM ( SELECT 1 UNION SELECT 1 UNION SELECT 1 ) a;
1
1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ALL ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( SELECT 1 UNION ( SELECT 1 UNION SELECT 1 ) );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ALL ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a = ANY ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 ) UNION SELECT 1 )' at line 1
+a
+1
SELECT * FROM t1 WHERE a IN ( ( SELECT 1 UNION SELECT 1 ) UNION SELECT 1 );
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT 1 )' at line 1
SELECT * FROM t1 WHERE a = ( SELECT 1 UNION SELECT 1 UNION SELECT 1 );
@@ -7012,8 +7015,8 @@ SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index a a 5 NULL 2 Using where; Using index
-2 SUBQUERY <subquery3> ALL distinct_key NULL NULL NULL 1
-2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+2 SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
+2 SUBQUERY <subquery3> eq_ref distinct_key distinct_key 4 test.t2.c 1
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 2
SELECT * FROM t1
WHERE a = (SELECT MAX(b) FROM t2 WHERE c IN (SELECT MAX(d) FROM t3)) OR a = 10;
@@ -7072,12 +7075,6 @@ call procedure2();
ControlRev
NULL
drop procedure procedure2;
-SELECT
-(SELECT user FROM mysql.user
-WHERE h.host in (SELECT host FROM mysql.user)
-) AS sq
-FROM mysql.host h GROUP BY h.host;
-sq
#
# MDEV-7846:Server crashes in Item_subselect::fix
#_fields or fails with Thread stack overrun
@@ -7290,5 +7287,20 @@ pk i c pk i c
1 10 foo 1 10 foo
DROP TABLE t;
# End of 10.2 tests
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16861 Split Item::update_null_value() into a new virtual method in Type_handler
+#
+SELECT ROW(1,2) = EXISTS (SELECT 1);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = 1 IN (SELECT 1 UNION SELECT 2);
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+SELECT ROW(1,2) = (1 = ANY (SELECT 1 UNION SELECT 2));
+ERROR HY000: Illegal parameter data types row and boolean for operation '='
+#
+# End of 10.4 tests
+#
set @optimizer_switch_for_subselect_test=null;
set @join_cache_level_for_subselect_test=NULL;
diff --git a/mysql-test/main/subselect_sj.result b/mysql-test/main/subselect_sj.result
index 454a09771f6..2907536df02 100644
--- a/mysql-test/main/subselect_sj.result
+++ b/mysql-test/main/subselect_sj.result
@@ -204,7 +204,7 @@ a b a b
insert into t1 select (A.a + 10 * B.a),1 from t0 A, t0 B;
explain extended select * from t1 where a in (select pk from t10 where pk<3);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t10 range PRIMARY PRIMARY 4 NULL 4 100.00 Using where; Using index
+1 PRIMARY t10 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index
1 PRIMARY t1 ALL NULL NULL NULL NULL 103 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t10` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t10`.`pk` and `test`.`t10`.`pk` < 3
@@ -729,7 +729,7 @@ SELECT int_key FROM ot1
WHERE int_nokey IN (SELECT it2.int_key
FROM it1 LEFT JOIN it2 ON it2.datetime_key);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 11
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 10
1 PRIMARY ot1 ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join)
2 MATERIALIZED it1 index NULL int_key 4 NULL 2 Using index
2 MATERIALIZED it2 ALL int_key,datetime_key NULL NULL NULL 20 Using where
@@ -1675,7 +1675,7 @@ CREATE TABLE t3 ( f11 int) ;
INSERT IGNORE INTO t3 VALUES (0);
SELECT alias1.f11 AS field2
FROM ( t3 AS alias2 JOIN t1 AS alias3 ON alias3.f10 = 1)
-LEFT JOIN ( t2 AS alias1 ) ON alias3.f11 = 1
+LEFT JOIN t2 AS alias1 ON alias3.f11 = 1
WHERE alias2.f11 IN ( SELECT f11 FROM t2 )
GROUP BY field2 ;
field2
@@ -3079,9 +3079,11 @@ set optimizer_switch= @tmp_mdev6859;
set @tmp_mdev12675=@@optimizer_switch;
set optimizer_switch=default;
create table t1 (a int) engine=myisam;
-insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+insert into t1 values (3),(2),(7),(2),(1);
create table t2 (b int, index idx(b)) engine=myisam;
-insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 values (2),(3),(2),(1),(3),(4),(1),(2),(1),(2);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
@@ -3095,20 +3097,22 @@ insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
explain
select a from t1, t2 where b between 1 and 2 and a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
-1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 1462 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 5 Using where; Using index; Using join buffer (flat, BNL join)
explain
select a from t1 join t2 on b between 1 and 2 and a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
-1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 1462 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 5 Using where; Using index; Using join buffer (flat, BNL join)
drop table t1,t2;
set optimizer_switch= @tmp_mdev12675;
#
diff --git a/mysql-test/main/subselect_sj.test b/mysql-test/main/subselect_sj.test
index e4e31691843..22c9b2bf0cf 100644
--- a/mysql-test/main/subselect_sj.test
+++ b/mysql-test/main/subselect_sj.test
@@ -1462,7 +1462,7 @@ INSERT IGNORE INTO t3 VALUES (0);
SELECT alias1.f11 AS field2
FROM ( t3 AS alias2 JOIN t1 AS alias3 ON alias3.f10 = 1)
-LEFT JOIN ( t2 AS alias1 ) ON alias3.f11 = 1
+LEFT JOIN t2 AS alias1 ON alias3.f11 = 1
WHERE alias2.f11 IN ( SELECT f11 FROM t2 )
GROUP BY field2 ;
@@ -2782,9 +2782,11 @@ set optimizer_switch= @tmp_mdev6859;
set @tmp_mdev12675=@@optimizer_switch;
set optimizer_switch=default;
create table t1 (a int) engine=myisam;
-insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+insert into t1 values (3),(2),(7),(2),(1);
create table t2 (b int, index idx(b)) engine=myisam;
-insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 values (2),(3),(2),(1),(3),(4),(1),(2),(1),(2);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
diff --git a/mysql-test/main/subselect_sj2.result b/mysql-test/main/subselect_sj2.result
index 948be5766a2..a127c18280e 100644
--- a/mysql-test/main/subselect_sj2.result
+++ b/mysql-test/main/subselect_sj2.result
@@ -1,3 +1,8 @@
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set @subselect_sj2_tmp= @@optimizer_switch;
set optimizer_switch='semijoin=on,firstmatch=on,loosescan=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
@@ -67,6 +72,14 @@ primary key(pk1, pk2, pk3)
) engine=innodb;
insert into t3 select a,a, a,a,a from t0;
insert into t3 select a,a, a+100,a+100,a+100 from t0;
+analyze table t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
explain select * from t3 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL b NULL NULL NULL 20
@@ -119,7 +132,7 @@ set max_heap_table_size= @save_max_heap_table_size;
explain select * from t1 where a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY t2 ref b b 5 test.t1.a 2 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref b b 5 test.t1.a 1 Using index; FirstMatch(t1)
select * from t1;
a b
1 1
@@ -717,9 +730,8 @@ alter table t3 add primary key(id), add key(a);
The following must use loose index scan over t3, key a:
explain select count(a) from t2 where a in ( SELECT a FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index a a 5 NULL 1000 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t3 index a a 5 NULL 30000 Using index
+1 PRIMARY t2 index a a 5 NULL 1000 Using where; Using index
+1 PRIMARY t3 ref a a 5 test.t2.a 30 Using index; FirstMatch(t2)
select count(a) from t2 where a in ( SELECT a FROM t3);
count(a)
1000
@@ -803,10 +815,10 @@ explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 const PRIMARY PRIMARY 4 const 1 Using index
-1 PRIMARY alias2 index f12 f12 7 NULL 1 Using index; LooseScan
-1 PRIMARY t1 index NULL PRIMARY 4 NULL 2 Using index; FirstMatch(alias2)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 7 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY alias1 const PRIMARY PRIMARY 4 const # Using index
+1 PRIMARY alias2 index f12 f12 7 NULL # Using index; LooseScan
+1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index; FirstMatch(alias2)
+1 PRIMARY t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
f12
@@ -922,6 +934,12 @@ INSERT INTO t2 VALUES
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
@@ -953,6 +971,10 @@ INSERT INTO t1 VALUES
('y','y'),('t','t'),('d','d'),('s','s'),('r','r'),
('m','m'),('b','b'),('x','x'),('g','g'),('p','p'),
('q','q'),('w','w'),('d','d'),('e','e');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
# This query returned 6 rows instead of 19
SELECT * FROM v1
@@ -986,6 +1008,10 @@ y y
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(1) NOT NULL, KEY(a)) ENGINE=InnoDB;
INSERT INTO t2 SELECT * FROM t1;
INSERT INTO t2 SELECT * FROM t1;
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT * FROM t2
WHERE (a, a) IN (SELECT alias2.b, alias2.a FROM t1 AS alias1, t1 AS alias2
@@ -1081,11 +1107,11 @@ WHERE alias5.b = alias4.b
AND ( alias5.b >= alias3.b OR alias5.c != alias3.c )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL 19 Using where
-1 PRIMARY alias4 ref PRIMARY,c c 4 test.alias3.d 1 Using index
-1 PRIMARY alias5 eq_ref PRIMARY PRIMARY 4 test.alias4.b 1 Using where; FirstMatch(alias3)
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
-1 PRIMARY alias1 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
+1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL #
+1 PRIMARY alias5 index PRIMARY c 4 NULL # Using where; Using index
+1 PRIMARY alias4 eq_ref PRIMARY,c PRIMARY 4 test.alias5.b # Using where; FirstMatch(alias3)
+1 PRIMARY alias1 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
SELECT alias4.c FROM t2 AS alias4, t2 AS alias5
@@ -1102,11 +1128,11 @@ WHERE alias5.b = alias4.b
AND ( alias5.b >= alias3.b OR alias3.c != alias5.c )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL 19 Using where
-1 PRIMARY alias4 ref PRIMARY,c c 4 test.alias3.d 1 Using index
-1 PRIMARY alias5 eq_ref PRIMARY PRIMARY 4 test.alias4.b 1 Using where; FirstMatch(alias3)
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
-1 PRIMARY alias1 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
+1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL #
+1 PRIMARY alias5 index PRIMARY c 4 NULL # Using where; Using index
+1 PRIMARY alias4 eq_ref PRIMARY,c PRIMARY 4 test.alias5.b # Using where; FirstMatch(alias3)
+1 PRIMARY alias1 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
SELECT alias4.c FROM t2 AS alias4, t2 AS alias5
@@ -1232,6 +1258,14 @@ INSERT IGNORE INTO t2 (t2id, t1idref) SELECT t1id, t1id FROM t1;
INSERT IGNORE INTO t1 VALUES (200001, 'a');
INSERT IGNORE INTO t2 (t2id, t1idref) VALUES (200011, 200001),(200012, 200001),(200013, 200001);
INSERT IGNORE INTO t3 VALUES (1, 200011, 1), (1, 200012, 2), (1, 200013, 3);
+ANALYZE TABLE t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
set @tmp7474= @@optimizer_search_depth;
SET SESSION optimizer_search_depth = 1;
SELECT SQL_NO_CACHE
@@ -1332,4 +1366,7 @@ a pk b
DROP TABLE t1,t2,t3;
DROP VIEW v3;
# This must be the last in the file:
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@subselect_sj2_tmp;
diff --git a/mysql-test/main/subselect_sj2.test b/mysql-test/main/subselect_sj2.test
index a948b086a85..2b4f619a615 100644
--- a/mysql-test/main/subselect_sj2.test
+++ b/mysql-test/main/subselect_sj2.test
@@ -1,6 +1,13 @@
#
# DuplicateElimination strategy test
#
+
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
--source include/have_innodb.inc
set @subselect_sj2_tmp= @@optimizer_switch;
@@ -57,6 +64,8 @@ create table t3 (
insert into t3 select a,a, a,a,a from t0;
insert into t3 select a,a, a+100,a+100,a+100 from t0;
+analyze table t1,t2,t3;
+
explain select * from t3 where b in (select a from t1);
select * from t3 where b in (select a from t1);
@@ -986,6 +995,8 @@ CREATE TABLE t3 (f12 varchar(1) NOT NULL) ENGINE=InnoDB;
INSERT INTO t3 VALUES ('r'),('s'),('t'),('v'),('w'),('x'),('y');
--echo # The following must use LooseScan but not join buffering
+
+--replace_column 9 #
explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
@@ -1113,6 +1124,8 @@ INSERT INTO t2 VALUES
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+analyze table t1,t2;
+
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
@@ -1139,6 +1152,8 @@ INSERT INTO t1 VALUES
('m','m'),('b','b'),('x','x'),('g','g'),('p','p'),
('q','q'),('w','w'),('d','d'),('e','e');
+ANALYZE TABLE t1;
+
CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
--echo # This query returned 6 rows instead of 19
@@ -1155,6 +1170,9 @@ WHERE ( a, a ) IN (
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(1) NOT NULL, KEY(a)) ENGINE=InnoDB;
INSERT INTO t2 SELECT * FROM t1;
INSERT INTO t2 SELECT * FROM t1;
+
+ANALYZE TABLE t2;
+
EXPLAIN
SELECT * FROM t2
WHERE (a, a) IN (SELECT alias2.b, alias2.a FROM t1 AS alias1, t1 AS alias2
@@ -1207,6 +1225,7 @@ INSERT INTO t2 VALUES
(13,'b','b'),(14,'x','x'),(15,'g','g'),(16,'p','p'),
(17,'q','q'),(18,'w','w'),(19,'d','d');
+--replace_column 9 #
EXPLAIN
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
@@ -1226,6 +1245,7 @@ WHERE alias3.d IN (
# Do the same EXPLAIN SELECT and SELECT
# with "alias3.c != alias5.c" instead of "alias5.c != alias3.c"
+--replace_column 9 #
EXPLAIN
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
@@ -1379,6 +1399,8 @@ INSERT IGNORE INTO t1 VALUES (200001, 'a');
INSERT IGNORE INTO t2 (t2id, t1idref) VALUES (200011, 200001),(200012, 200001),(200013, 200001);
INSERT IGNORE INTO t3 VALUES (1, 200011, 1), (1, 200012, 2), (1, 200013, 3);
+ANALYZE TABLE t1,t2,t3;
+
set @tmp7474= @@optimizer_search_depth;
SET SESSION optimizer_search_depth = 1;
@@ -1465,4 +1487,7 @@ DROP TABLE t1,t2,t3;
DROP VIEW v3;
--echo # This must be the last in the file:
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@subselect_sj2_tmp;
diff --git a/mysql-test/main/subselect_sj2_jcl6.result b/mysql-test/main/subselect_sj2_jcl6.result
index 2955307eb86..56c11e8c9af 100644
--- a/mysql-test/main/subselect_sj2_jcl6.result
+++ b/mysql-test/main/subselect_sj2_jcl6.result
@@ -9,6 +9,11 @@ Variable_name Value
join_cache_level 6
set @optimizer_switch_for_subselect_sj2_test=@@optimizer_switch;
set @join_cache_level_for_subselect_sj2_test=@@join_cache_level;
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set @subselect_sj2_tmp= @@optimizer_switch;
set optimizer_switch='semijoin=on,firstmatch=on,loosescan=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
@@ -78,6 +83,14 @@ primary key(pk1, pk2, pk3)
) engine=innodb;
insert into t3 select a,a, a,a,a from t0;
insert into t3 select a,a, a+100,a+100,a+100 from t0;
+analyze table t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
explain select * from t3 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL b NULL NULL NULL 20
@@ -818,10 +831,10 @@ explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 const PRIMARY PRIMARY 4 const 1 Using index
-1 PRIMARY alias2 index f12 f12 7 NULL 1 Using index; LooseScan
-1 PRIMARY t1 index NULL PRIMARY 4 NULL 2 Using index; FirstMatch(alias2)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 7 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY alias1 const PRIMARY PRIMARY 4 const # Using index
+1 PRIMARY alias2 index f12 f12 7 NULL # Using index; LooseScan
+1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index; FirstMatch(alias2)
+1 PRIMARY t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
f12
@@ -937,6 +950,12 @@ INSERT INTO t2 VALUES
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
@@ -968,6 +987,10 @@ INSERT INTO t1 VALUES
('y','y'),('t','t'),('d','d'),('s','s'),('r','r'),
('m','m'),('b','b'),('x','x'),('g','g'),('p','p'),
('q','q'),('w','w'),('d','d'),('e','e');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
# This query returned 6 rows instead of 19
SELECT * FROM v1
@@ -1001,6 +1024,10 @@ y y
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(1) NOT NULL, KEY(a)) ENGINE=InnoDB;
INSERT INTO t2 SELECT * FROM t1;
INSERT INTO t2 SELECT * FROM t1;
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT * FROM t2
WHERE (a, a) IN (SELECT alias2.b, alias2.a FROM t1 AS alias1, t1 AS alias2
@@ -1096,11 +1123,11 @@ WHERE alias5.b = alias4.b
AND ( alias5.b >= alias3.b OR alias5.c != alias3.c )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL 19 Using where
-1 PRIMARY alias4 ref PRIMARY,c c 4 test.alias3.d 1 Using index
-1 PRIMARY alias5 eq_ref PRIMARY PRIMARY 4 test.alias4.b 1 Using where; FirstMatch(alias3)
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
-1 PRIMARY alias1 ALL NULL NULL NULL NULL 14 Using join buffer (incremental, BNL join)
+1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL #
+1 PRIMARY alias5 index PRIMARY c 4 NULL # Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY alias4 eq_ref PRIMARY,c PRIMARY 4 test.alias5.b # Using where; FirstMatch(alias3); Using join buffer (incremental, BKA join); Key-ordered scan
+1 PRIMARY alias1 ALL NULL NULL NULL NULL # Using join buffer (incremental, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using join buffer (incremental, BNL join)
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
SELECT alias4.c FROM t2 AS alias4, t2 AS alias5
@@ -1117,11 +1144,11 @@ WHERE alias5.b = alias4.b
AND ( alias5.b >= alias3.b OR alias3.c != alias5.c )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL 19 Using where
-1 PRIMARY alias4 ref PRIMARY,c c 4 test.alias3.d 1 Using index
-1 PRIMARY alias5 eq_ref PRIMARY PRIMARY 4 test.alias4.b 1 Using where; FirstMatch(alias3)
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
-1 PRIMARY alias1 ALL NULL NULL NULL NULL 14 Using join buffer (incremental, BNL join)
+1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL #
+1 PRIMARY alias5 index PRIMARY c 4 NULL # Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY alias4 eq_ref PRIMARY,c PRIMARY 4 test.alias5.b # Using where; FirstMatch(alias3); Using join buffer (incremental, BKA join); Key-ordered scan
+1 PRIMARY alias1 ALL NULL NULL NULL NULL # Using join buffer (incremental, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using join buffer (incremental, BNL join)
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
SELECT alias4.c FROM t2 AS alias4, t2 AS alias5
@@ -1247,6 +1274,14 @@ INSERT IGNORE INTO t2 (t2id, t1idref) SELECT t1id, t1id FROM t1;
INSERT IGNORE INTO t1 VALUES (200001, 'a');
INSERT IGNORE INTO t2 (t2id, t1idref) VALUES (200011, 200001),(200012, 200001),(200013, 200001);
INSERT IGNORE INTO t3 VALUES (1, 200011, 1), (1, 200012, 2), (1, 200013, 3);
+ANALYZE TABLE t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
set @tmp7474= @@optimizer_search_depth;
SET SESSION optimizer_search_depth = 1;
SELECT SQL_NO_CACHE
@@ -1347,7 +1382,15 @@ a pk b
DROP TABLE t1,t2,t3;
DROP VIEW v3;
# This must be the last in the file:
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@subselect_sj2_tmp;
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
#
# Bug #898073: potential incremental join cache for semijoin
#
@@ -1440,6 +1483,9 @@ set join_cache_level=default;
show variables like 'join_cache_level';
Variable_name Value
join_cache_level 2
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
set @@optimizer_switch=@save_optimizer_switch_jcl6;
set @optimizer_switch_for_subselect_sj2_test=NULL;
set @join_cache_level_subselect_sj2_test=NULL;
diff --git a/mysql-test/main/subselect_sj2_jcl6.test b/mysql-test/main/subselect_sj2_jcl6.test
index 7ff08716230..9be6102a5f9 100644
--- a/mysql-test/main/subselect_sj2_jcl6.test
+++ b/mysql-test/main/subselect_sj2_jcl6.test
@@ -16,6 +16,13 @@ set @join_cache_level_for_subselect_sj2_test=@@join_cache_level;
--source subselect_sj2.test
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
+
--echo #
--echo # Bug #898073: potential incremental join cache for semijoin
--echo #
@@ -107,6 +114,10 @@ DROP TABLE t1,t2;
set join_cache_level=default;
show variables like 'join_cache_level';
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
+
set @@optimizer_switch=@save_optimizer_switch_jcl6;
set @optimizer_switch_for_subselect_sj2_test=NULL;
set @join_cache_level_subselect_sj2_test=NULL;
diff --git a/mysql-test/main/subselect_sj2_mat.result b/mysql-test/main/subselect_sj2_mat.result
index 884451d7dff..73f682755da 100644
--- a/mysql-test/main/subselect_sj2_mat.result
+++ b/mysql-test/main/subselect_sj2_mat.result
@@ -1,5 +1,10 @@
set optimizer_switch='materialization=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set @subselect_sj2_tmp= @@optimizer_switch;
set optimizer_switch='semijoin=on,firstmatch=on,loosescan=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
@@ -69,6 +74,14 @@ primary key(pk1, pk2, pk3)
) engine=innodb;
insert into t3 select a,a, a,a,a from t0;
insert into t3 select a,a, a+100,a+100,a+100 from t0;
+analyze table t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
explain select * from t3 where b in (select a from t1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t3 ALL b NULL NULL NULL 20
@@ -121,7 +134,7 @@ set max_heap_table_size= @save_max_heap_table_size;
explain select * from t1 where a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
-1 PRIMARY t2 ref b b 5 test.t1.a 2 Using index; FirstMatch(t1)
+1 PRIMARY t2 ref b b 5 test.t1.a 1 Using index; FirstMatch(t1)
select * from t1;
a b
1 1
@@ -719,9 +732,8 @@ alter table t3 add primary key(id), add key(a);
The following must use loose index scan over t3, key a:
explain select count(a) from t2 where a in ( SELECT a FROM t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t2 index a a 5 NULL 1000 Using index
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
-2 MATERIALIZED t3 index a a 5 NULL 30000 Using index
+1 PRIMARY t2 index a a 5 NULL 1000 Using where; Using index
+1 PRIMARY t3 ref a a 5 test.t2.a 30 Using index; FirstMatch(t2)
select count(a) from t2 where a in ( SELECT a FROM t3);
count(a)
1000
@@ -805,10 +817,10 @@ explain
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias1 const PRIMARY PRIMARY 4 const 1 Using index
-1 PRIMARY alias2 index f12 f12 7 NULL 1 Using index; LooseScan
-1 PRIMARY t1 index NULL PRIMARY 4 NULL 2 Using index; FirstMatch(alias2)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 7 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY alias1 const PRIMARY PRIMARY 4 const # Using index
+1 PRIMARY alias2 index f12 f12 7 NULL # Using index; LooseScan
+1 PRIMARY t1 index NULL PRIMARY 4 NULL # Using index; FirstMatch(alias2)
+1 PRIMARY t3 ALL NULL NULL NULL NULL # Using where; Using join buffer (flat, BNL join)
SELECT * FROM t3
WHERE f12 IN (SELECT alias2.f12 FROM t1 AS alias1, t2 AS alias2, t1 WHERE alias1.f13 = 24);
f12
@@ -924,6 +936,12 @@ INSERT INTO t2 VALUES
(6,'u',6),(7,'m',7),(8,'k',8),(9,'o',9),(10,'w',1),
(11,'m',2),(12,'q',3),(13,'m',4),(14,'d',5),
(15,'g',6),(16,'x',7),(17,'f',8);
+analyze table t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
explain
SELECT * FROM t1 WHERE b IN (
SELECT d FROM t2, t1
@@ -955,6 +973,10 @@ INSERT INTO t1 VALUES
('y','y'),('t','t'),('d','d'),('s','s'),('r','r'),
('m','m'),('b','b'),('x','x'),('g','g'),('p','p'),
('q','q'),('w','w'),('d','d'),('e','e');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
CREATE ALGORITHM=TEMPTABLE VIEW v1 AS SELECT * FROM t1;
# This query returned 6 rows instead of 19
SELECT * FROM v1
@@ -988,6 +1010,10 @@ y y
CREATE TABLE t2 (a VARCHAR(1), b VARCHAR(1) NOT NULL, KEY(a)) ENGINE=InnoDB;
INSERT INTO t2 SELECT * FROM t1;
INSERT INTO t2 SELECT * FROM t1;
+ANALYZE TABLE t2;
+Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT * FROM t2
WHERE (a, a) IN (SELECT alias2.b, alias2.a FROM t1 AS alias1, t1 AS alias2
@@ -1083,11 +1109,11 @@ WHERE alias5.b = alias4.b
AND ( alias5.b >= alias3.b OR alias5.c != alias3.c )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL 19 Using where
-1 PRIMARY alias4 ref PRIMARY,c c 4 test.alias3.d 1 Using index
-1 PRIMARY alias5 eq_ref PRIMARY PRIMARY 4 test.alias4.b 1 Using where; FirstMatch(alias3)
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
-1 PRIMARY alias1 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
+1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL #
+1 PRIMARY alias5 index PRIMARY c 4 NULL # Using where; Using index
+1 PRIMARY alias4 eq_ref PRIMARY,c PRIMARY 4 test.alias5.b # Using where; FirstMatch(alias3)
+1 PRIMARY alias1 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
SELECT alias4.c FROM t2 AS alias4, t2 AS alias5
@@ -1104,11 +1130,11 @@ WHERE alias5.b = alias4.b
AND ( alias5.b >= alias3.b OR alias3.c != alias5.c )
);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL 19 Using where
-1 PRIMARY alias4 ref PRIMARY,c c 4 test.alias3.d 1 Using index
-1 PRIMARY alias5 eq_ref PRIMARY PRIMARY 4 test.alias4.b 1 Using where; FirstMatch(alias3)
-1 PRIMARY alias2 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
-1 PRIMARY alias1 ALL NULL NULL NULL NULL 14 Using join buffer (flat, BNL join)
+1 PRIMARY alias3 ALL PRIMARY NULL NULL NULL #
+1 PRIMARY alias5 index PRIMARY c 4 NULL # Using where; Using index
+1 PRIMARY alias4 eq_ref PRIMARY,c PRIMARY 4 test.alias5.b # Using where; FirstMatch(alias3)
+1 PRIMARY alias1 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
+1 PRIMARY alias2 ALL NULL NULL NULL NULL # Using join buffer (flat, BNL join)
SELECT COUNT(*) FROM t1 AS alias1, t1 AS alias2, t2 AS alias3
WHERE alias3.d IN (
SELECT alias4.c FROM t2 AS alias4, t2 AS alias5
@@ -1234,6 +1260,14 @@ INSERT IGNORE INTO t2 (t2id, t1idref) SELECT t1id, t1id FROM t1;
INSERT IGNORE INTO t1 VALUES (200001, 'a');
INSERT IGNORE INTO t2 (t2id, t1idref) VALUES (200011, 200001),(200012, 200001),(200013, 200001);
INSERT IGNORE INTO t3 VALUES (1, 200011, 1), (1, 200012, 2), (1, 200013, 3);
+ANALYZE TABLE t1,t2,t3;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
set @tmp7474= @@optimizer_search_depth;
SET SESSION optimizer_search_depth = 1;
SELECT SQL_NO_CACHE
@@ -1334,7 +1368,15 @@ a pk b
DROP TABLE t1,t2,t3;
DROP VIEW v3;
# This must be the last in the file:
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
set optimizer_switch=@subselect_sj2_tmp;
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+@@innodb_stats_persistent_sample_pages;
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set optimizer_switch=default;
select @@optimizer_switch like '%materialization=on%';
@@optimizer_switch like '%materialization=on%'
@@ -1419,7 +1461,7 @@ WHERE t1.cat_id = t3.cat_id AND
t3.cat_id IN (SELECT cat_id FROM t2) AND
t3.sack_id = 33479 AND t3.kit_id = 6;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 4 Using index
+1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 5 Using index
1 PRIMARY t2 ref cat_id cat_id 4 test.t3.cat_id 2 Using where; Using index; FirstMatch(t3)
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t2.cat_id 1 Using where; Using index
SELECT count(*) FROM t1, t3
@@ -1435,7 +1477,7 @@ WHERE t1.cat_id = t3.cat_id AND
t3.cat_id IN (SELECT cat_id FROM t4) AND
t3.sack_id = 33479 AND t3.kit_id = 6;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 4 Using index
+1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 5 Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.cat_id 1 Using index
2 MATERIALIZED t4 index cat_id cat_id 4 NULL 19 Using index
@@ -1451,7 +1493,7 @@ WHERE t1.cat_id = t3.cat_id AND
t3.cat_id IN (SELECT cat_id FROM t2) AND
t3.sack_id = 33479 AND t3.kit_id = 6;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 4 Using index
+1 PRIMARY t3 ref PRIMARY PRIMARY 5 const,const 5 Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
1 PRIMARY t1 eq_ref PRIMARY PRIMARY 4 test.t3.cat_id 1 Using index
2 MATERIALIZED t2 index cat_id cat_id 4 NULL 19 Using index
@@ -1692,6 +1734,10 @@ insert into t1(`id`,`local_name`) values
(11,'Rollover - Internet Payday'),
(12,'AL Monthly Installment'),
(13,'AL Semi-Monthly Installment');
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
explain
SELECT SQL_NO_CACHE t.id
FROM t1 t
@@ -1704,7 +1750,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t index PRIMARY PRIMARY 4 NULL 13 Using where; Using index
2 MATERIALIZED <subquery3> ALL distinct_key NULL NULL NULL 8
2 MATERIALIZED A ALL PRIMARY NULL NULL NULL 13 Using where; Using join buffer (flat, BNL join)
-3 MATERIALIZED B ALL PRIMARY NULL NULL NULL 13 Using where
+3 MATERIALIZED B range PRIMARY PRIMARY 4 NULL 8 Using where
SELECT SQL_NO_CACHE t.id
FROM t1 t
WHERE (
@@ -1841,6 +1887,19 @@ CREATE TABLE t5 (id_product int) ENGINE=MyISAM;
INSERT INTO `t5` VALUES
(652),(668),(669),(670),(671),(673),(674),(675),(676),
(677),(679),(680),(681),(682),(683),(684),(685),(686);
+ANALYZE TABLE t1,t2,t3,t,t5;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+test.t analyze Error Table 'test.t' doesn't exist
+test.t analyze status Operation failed
+test.t5 analyze status Engine-independent statistics collected
+test.t5 analyze status OK
+set optimizer_switch='rowid_filter=off';
explain
SELECT * FROM t3
JOIN t4 ON (t4.id_product = t3.id_product AND t4.id_shop = 1)
@@ -1853,18 +1912,22 @@ AND t3.id_product IN (SELECT id_product FROM t2 t2_3 WHERE t2_3.id_t2 = 18 OR t2
AND t3.id_product IN (SELECT id_product FROM t2 t2_4 WHERE t2_4.id_t2 = 34 OR t2_4.id_t2 = 23)
AND t3.id_product IN (SELECT id_product FROM t2 t2_5 WHERE t2_5.id_t2 = 29 OR t2_5.id_t2 = 28 OR t2_5.id_t2 = 26);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 index NULL PRIMARY 8 NULL 73 Using index
-1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t1.id_product 1 Using index
+1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 12
+1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t2_2.id_product 1 Using where; Using index
1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
-1 PRIMARY t4 eq_ref PRIMARY PRIMARY 8 test.t1.id_product,const 1 Using where; Using index
-1 PRIMARY <subquery6> eq_ref distinct_key distinct_key 4 func 1 Using where
1 PRIMARY t5 ALL NULL NULL NULL NULL 18 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY <subquery6> eq_ref distinct_key distinct_key 4 func 1 Using where
+1 PRIMARY t4 eq_ref PRIMARY PRIMARY 8 test.t3.id_product,const 1 Using where; Using index
+1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1 Using where
+1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 4 func 1 Using where
+1 PRIMARY t1 index NULL PRIMARY 8 NULL 73 Using where; Using index; Using join buffer (flat, BNL join)
+3 MATERIALIZED t2_2 ref id_t2,id_product id_t2 5 const 12 Using where
5 MATERIALIZED t2_4 range id_t2,id_product id_t2 5 NULL 18 Using index condition; Using where
-4 MATERIALIZED t2_3 range id_t2,id_product id_t2 5 NULL 32 Using index condition; Using where
-3 MATERIALIZED t2_2 ref id_t2,id_product id_t2 5 const 12
-2 MATERIALIZED t2_1 ref id_t2,id_product id_t2 5 const 50
-6 MATERIALIZED t2_5 range id_t2,id_product id_t2 5 NULL 30 Using index condition; Using where
+6 MATERIALIZED t2_5 range id_t2,id_product id_t2 5 NULL 31 Using index condition; Using where
+2 MATERIALIZED t2_1 ref id_t2,id_product id_t2 5 const 51
+4 MATERIALIZED t2_3 range id_t2,id_product id_t2 5 NULL 33 Using index condition; Using where
+set optimizer_switch='rowid_filter=default';
drop table t1,t2,t3,t4,t5;
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+@innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/subselect_sj2_mat.test b/mysql-test/main/subselect_sj2_mat.test
index 6ae687aa99a..6eeaceb82b0 100644
--- a/mysql-test/main/subselect_sj2_mat.test
+++ b/mysql-test/main/subselect_sj2_mat.test
@@ -5,6 +5,12 @@ set optimizer_switch='materialization=on';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
--source subselect_sj2.test
+set @innodb_stats_persistent_save= @@innodb_stats_persistent;
+set @innodb_stats_persistent_sample_pages_save=
+ @@innodb_stats_persistent_sample_pages;
+
+set global innodb_stats_persistent= 1;
+set global innodb_stats_persistent_sample_pages=100;
set optimizer_switch=default;
select @@optimizer_switch like '%materialization=on%';
@@ -329,6 +335,8 @@ insert into t1(`id`,`local_name`) values
(12,'AL Monthly Installment'),
(13,'AL Semi-Monthly Installment');
+ANALYZE TABLE t1;
+
explain
SELECT SQL_NO_CACHE t.id
FROM t1 t
@@ -467,6 +475,10 @@ CREATE TABLE t5 (id_product int) ENGINE=MyISAM;
INSERT INTO `t5` VALUES
(652),(668),(669),(670),(671),(673),(674),(675),(676),
(677),(679),(680),(681),(682),(683),(684),(685),(686);
+
+ANALYZE TABLE t1,t2,t3,t,t5;
+
+set optimizer_switch='rowid_filter=off';
explain
SELECT * FROM t3
@@ -480,4 +492,10 @@ AND t3.id_product IN (SELECT id_product FROM t2 t2_3 WHERE t2_3.id_t2 = 18 OR t2
AND t3.id_product IN (SELECT id_product FROM t2 t2_4 WHERE t2_4.id_t2 = 34 OR t2_4.id_t2 = 23)
AND t3.id_product IN (SELECT id_product FROM t2 t2_5 WHERE t2_5.id_t2 = 29 OR t2_5.id_t2 = 28 OR t2_5.id_t2 = 26);
+set optimizer_switch='rowid_filter=default';
+
drop table t1,t2,t3,t4,t5;
+
+set global innodb_stats_persistent= @innodb_stats_persistent_save;
+set global innodb_stats_persistent_sample_pages=
+ @innodb_stats_persistent_sample_pages_save;
diff --git a/mysql-test/main/subselect_sj_jcl6.result b/mysql-test/main/subselect_sj_jcl6.result
index fc279b05ac2..697a2ae36b6 100644
--- a/mysql-test/main/subselect_sj_jcl6.result
+++ b/mysql-test/main/subselect_sj_jcl6.result
@@ -217,7 +217,7 @@ a b a b
insert into t1 select (A.a + 10 * B.a),1 from t0 A, t0 B;
explain extended select * from t1 where a in (select pk from t10 where pk<3);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t10 range PRIMARY PRIMARY 4 NULL 4 100.00 Using where; Using index
+1 PRIMARY t10 range PRIMARY PRIMARY 4 NULL 3 100.00 Using where; Using index
1 PRIMARY t1 ALL NULL NULL NULL NULL 103 100.00 Using where; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t10` join `test`.`t1` where `test`.`t1`.`a` = `test`.`t10`.`pk` and `test`.`t10`.`pk` < 3
@@ -742,7 +742,7 @@ SELECT int_key FROM ot1
WHERE int_nokey IN (SELECT it2.int_key
FROM it1 LEFT JOIN it2 ON it2.datetime_key);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 11
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 10
1 PRIMARY ot1 ALL NULL NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join)
2 MATERIALIZED it1 index NULL int_key 4 NULL 2 Using index
2 MATERIALIZED it2 ALL int_key,datetime_key NULL NULL NULL 20 Using where; Using join buffer (flat, BNL join)
@@ -1688,7 +1688,7 @@ CREATE TABLE t3 ( f11 int) ;
INSERT IGNORE INTO t3 VALUES (0);
SELECT alias1.f11 AS field2
FROM ( t3 AS alias2 JOIN t1 AS alias3 ON alias3.f10 = 1)
-LEFT JOIN ( t2 AS alias1 ) ON alias3.f11 = 1
+LEFT JOIN t2 AS alias1 ON alias3.f11 = 1
WHERE alias2.f11 IN ( SELECT f11 FROM t2 )
GROUP BY field2 ;
field2
@@ -3093,9 +3093,11 @@ set optimizer_switch= @tmp_mdev6859;
set @tmp_mdev12675=@@optimizer_switch;
set optimizer_switch=default;
create table t1 (a int) engine=myisam;
-insert into t1 values (5),(3),(2),(7),(2),(5),(1);
+insert into t1 values (3),(2),(7),(2),(1);
create table t2 (b int, index idx(b)) engine=myisam;
-insert into t2 values (2),(3),(2),(1),(3),(4);
+insert into t2 values (2),(3),(2),(1),(3),(4),(1),(2),(1),(2);
+insert into t2 select b+10 from t2;
+insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
@@ -3109,20 +3111,22 @@ insert into t2 select b+10 from t2;
insert into t2 select b+10 from t2;
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
explain
select a from t1, t2 where b between 1 and 2 and a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
-1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 1462 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 5 Using where; Using index; Using join buffer (flat, BNL join)
explain
select a from t1 join t2 on b between 1 and 2 and a in (select b from t2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 7 Using where
-1 PRIMARY t2 ref idx idx 5 test.t1.a 256 Using index; FirstMatch(t1)
-1 PRIMARY t2 range idx idx 5 NULL 2 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t2 ref idx idx 5 test.t1.a 1462 Using index; FirstMatch(t1)
+1 PRIMARY t2 range idx idx 5 NULL 5 Using where; Using index; Using join buffer (flat, BNL join)
drop table t1,t2;
set optimizer_switch= @tmp_mdev12675;
#
@@ -3419,7 +3423,7 @@ EXPLAIN
SELECT a FROM t1 t WHERE a IN (SELECT b FROM t1, t2 WHERE b = a)
GROUP BY a HAVING a != 'z';
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t index idx_a idx_a 4 NULL 3 Using index
+1 PRIMARY t range idx_a idx_a 4 NULL 3 Using where; Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
2 MATERIALIZED t1 ref idx_a idx_a 4 test.t2.b 2 Using index
@@ -3433,7 +3437,7 @@ EXPLAIN
SELECT a FROM t1 t WHERE a IN (SELECT b FROM t1, t2 WHERE b = a)
GROUP BY a HAVING a != 'z';
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t index idx_a idx_a 4 NULL 3 Using index
+1 PRIMARY t range idx_a idx_a 4 NULL 3 Using where; Using index
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 func 1
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where
2 MATERIALIZED t1 ref idx_a idx_a 4 test.t2.b 2 Using index
diff --git a/mysql-test/main/subselect_sj_mat.result b/mysql-test/main/subselect_sj_mat.result
index 85c314e5fde..3fc8f9afd3e 100644
--- a/mysql-test/main/subselect_sj_mat.result
+++ b/mysql-test/main/subselect_sj_mat.result
@@ -38,6 +38,20 @@ create index it3i3 on t3i (c1, c2);
insert into t1i select * from t1;
insert into t2i select * from t2;
insert into t3i select * from t3;
+analyze table t1,t2,t3,t1i,t2i,t3i;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
+test.t3 analyze status OK
+test.t1i analyze status Engine-independent statistics collected
+test.t1i analyze status Table is already up to date
+test.t2i analyze status Engine-independent statistics collected
+test.t2i analyze status Table is already up to date
+test.t3i analyze status Engine-independent statistics collected
+test.t3i analyze status Table is already up to date
set @@optimizer_switch='materialization=on,in_to_exists=off,firstmatch=off';
/******************************************************************************
* Simple tests.
@@ -46,9 +60,9 @@ set @@optimizer_switch='materialization=on,in_to_exists=off,firstmatch=off';
explain extended
select * from t1 where a1 in (select b1 from t2 where b1 > '0');
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 99.22
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b1` > '0'
select * from t1 where a1 in (select b1 from t2 where b1 > '0');
@@ -58,9 +72,9 @@ a1 a2
explain extended
select * from t1 where a1 in (select b1 from t2 where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 99.22
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 8 func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b1` > '0'
select * from t1 where a1 in (select b1 from t2 where b1 > '0' group by b1);
@@ -70,9 +84,9 @@ a1 a2
explain extended
select * from t1 where (a1, a2) in (select b1, b2 from t2 where b1 > '0' group by b1, b2);
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 99.22
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) where `test`.`t2`.`b1` > '0'
select * from t1 where (a1, a2) in (select b1, b2 from t2 where b1 > '0' group by b1, b2);
@@ -84,7 +98,7 @@ select * from t1 where (a1, a2) in (select b1, min(b2) from t2 where b1 > '0' gr
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 test.t1.a1,test.t1.a2 1 100.00
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Using temporary
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where; Using temporary
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2`.`b1`,min(`test`.`t2`.`b2`) from `test`.`t2` where `test`.`t2`.`b1` > '0' group by `test`.`t2`.`b1`) join `test`.`t1` where `<subquery2>`.`b1` = `test`.`t1`.`a1` and `<subquery2>`.`min(b2)` = `test`.`t1`.`a2`
select * from t1 where (a1, a2) in (select b1, min(b2) from t2 where b1 > '0' group by b1);
@@ -130,7 +144,7 @@ select * from t1i where (a1, a2) in (select b1, max(b2) from t2i where b1 > '0'
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1i index it1i1,it1i2,it1i3 # # # 3 100.00 #
1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
-2 MATERIALIZED t2i range it2i1,it2i3 # # # 3 100.00 #
+2 MATERIALIZED t2i range it2i1,it2i3 # # # 5 100.00 #
Warnings:
Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,max(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `<subquery2>`.`b1` = `test`.`t1i`.`a1` and `<subquery2>`.`max(b2)` = `test`.`t1i`.`a2`
select * from t1i where (a1, a2) in (select b1, max(b2) from t2i where b1 > '0' group by b1);
@@ -142,37 +156,55 @@ select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0'
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1i index it1i1,it1i2,it1i3 # # # 3 100.00 #
1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
-2 MATERIALIZED t2i range it2i1,it2i3 # # # 3 100.00 #
+2 MATERIALIZED t2i range it2i1,it2i3 # # # 5 100.00 #
Warnings:
Note 1003 /* select#1 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,min(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1i` where `<subquery2>`.`b1` = `test`.`t1i`.`a1` and `<subquery2>`.`min(b2)` = `test`.`t1i`.`a2`
select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
a1 a2
1 - 01 2 - 01
1 - 02 2 - 02
+create table t2i_c like t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+analyze table t2i_c;
+Table Op Msg_type Msg_text
+test.t2i_c analyze status Engine-independent statistics collected
+test.t2i_c analyze status OK
+show create table t2i_c;
+Table Create Table
+t2i_c CREATE TABLE `t2i_c` (
+ `b1` char(8) DEFAULT NULL,
+ `b2` char(8) DEFAULT NULL,
+ KEY `it2i1` (`b1`),
+ KEY `it2i2` (`b2`),
+ KEY `it2i3` (`b1`,`b2`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
explain extended
-select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1);
+select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 test.t1.a1,test.t1.a2 1 100.00
-2 MATERIALIZED t2i range NULL it2i3 9 NULL 3 100.00 Using index for group-by
+2 MATERIALIZED t2i_c range NULL it2i3 9 NULL 4 100.00 Using index for group-by
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,max(`test`.`t2i`.`b2`) from `test`.`t2i` group by `test`.`t2i`.`b1`) join `test`.`t1` where `<subquery2>`.`b1` = `test`.`t1`.`a1` and `<subquery2>`.`max(b2)` = `test`.`t1`.`a2`
-select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1);
+Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i_c`.`b1`,max(`test`.`t2i_c`.`b2`) from `test`.`t2i_c` group by `test`.`t2i_c`.`b1`) join `test`.`t1` where `<subquery2>`.`b1` = `test`.`t1`.`a1` and `<subquery2>`.`max(b2)` = `test`.`t1`.`a2`
+select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1);
a1 a2
1 - 01 2 - 01
1 - 02 2 - 02
-prepare st1 from "explain select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1)";
+prepare st1 from "explain select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1)";
execute st1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 test.t1.a1,test.t1.a2 1
-2 MATERIALIZED t2i range NULL it2i3 9 NULL 3 Using index for group-by
+2 MATERIALIZED t2i_c range NULL it2i3 9 NULL 4 Using index for group-by
execute st1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 test.t1.a1,test.t1.a2 1
-2 MATERIALIZED t2i range NULL it2i3 9 NULL 3 Using index for group-by
-prepare st2 from "select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1)";
+2 MATERIALIZED t2i_c range NULL it2i3 9 NULL 4 Using index for group-by
+prepare st2 from "select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1)";
execute st2;
a1 a2
1 - 01 2 - 01
@@ -181,12 +213,13 @@ execute st2;
a1 a2
1 - 01 2 - 01
1 - 02 2 - 02
+drop table t2i_c;
explain extended
select * from t1 where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 test.t1.a1,test.t1.a2 1 100.00
-2 MATERIALIZED t2i range it2i1,it2i3 it2i3 18 NULL 3 100.00 Using where; Using index for group-by
+2 MATERIALIZED t2i range it2i1,it2i3 it2i3 9 NULL 5 100.00 Using where; Using index
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from <materialize> (/* select#2 */ select `test`.`t2i`.`b1`,min(`test`.`t2i`.`b2`) from `test`.`t2i` where `test`.`t2i`.`b1` > '0' group by `test`.`t2i`.`b1`) join `test`.`t1` where `<subquery2>`.`b1` = `test`.`t1`.`a1` and `<subquery2>`.`min(b2)` = `test`.`t1`.`a2`
select * from t1 where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
@@ -298,12 +331,12 @@ where (a1, a2) in (select b1, b2 from t2 where b1 > '0') and
(a1, a2) in (select c1, c2 from t3
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 98.44
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 16 func,func 1 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-3 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-3 MATERIALIZED t2i index it2i1,it2i2,it2i3 it2i3 18 NULL 5 80.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+3 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 98.44 Using where
+3 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3.c1,test.t3.c2 1 100.00 Using index
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 98.44 Using where
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and `test`.`t2`.`b1` > '0' and `test`.`t3`.`c2` > '0'
select * from t1
@@ -322,7 +355,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t2i index it2i1,it2i2,it2i3 # # # 5 50.00 #
1 PRIMARY t1i ref it1i1,it1i2,it1i3 # # # 1 100.00 #
1 PRIMARY t3i ref it3i1,it3i2,it3i3 # # # 1 100.00 #
-1 PRIMARY t2i ref it2i1,it2i2,it2i3 # # # 2 100.00 #
+1 PRIMARY t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
Warnings:
Note 1003 select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) semi join (`test`.`t2i` join `test`.`t3i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t3i`.`c1` = `test`.`t2i`.`b1` and `test`.`t2i`.`b1` = `test`.`t2i`.`b1` and `test`.`t1i`.`a2` = `test`.`t2i`.`b2` and `test`.`t3i`.`c2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b1` > '0' and `test`.`t2i`.`b2` > '0'
select * from t1i
@@ -340,12 +373,12 @@ b2 in (select c2 from t3 where c2 LIKE '%03')) and
(a1, a2) in (select c1, c2 from t3
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 99.22
1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 16 func,func 1 100.00
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-5 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-5 MATERIALIZED t2i index it2i1,it2i2,it2i3 it2i3 18 NULL 5 80.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+5 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 99.22 Using where
+5 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3.c1,test.t3.c2 1 100.00 Using index
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 99.22 Using where
4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
3 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
@@ -366,11 +399,11 @@ b2 in (select c2 from t3 t3b where c2 LIKE '%03')) and
(a1, a2) in (select c1, c2 from t3 t3c
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 99.22
1 PRIMARY <subquery5> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
-5 MATERIALIZED t3c ALL NULL NULL NULL NULL 4 100.00 Using where
-5 MATERIALIZED t2i index it2i1,it2i2,it2i3 it2i3 18 NULL 5 80.00 Using where; Using index; Using join buffer (flat, BNL join)
+1 PRIMARY t2 ALL NULL NULL NULL NULL 5 99.22 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
+5 MATERIALIZED t3c ALL NULL NULL NULL NULL 4 99.22 Using where
+5 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3c.c1,test.t3c.c2 1 100.00 Using index
4 MATERIALIZED t3b ALL NULL NULL NULL NULL 4 100.00 Using where
3 DEPENDENT SUBQUERY t3a ALL NULL NULL NULL NULL 4 100.00 Using where
Warnings:
@@ -399,18 +432,18 @@ where (a1, a2) in (select b1, b2 from t2i where b1 > '0') and
(a1, a2) in (select c1, c2 from t3i
where (c1, c2) in (select b1, b2 from t2i where b2 > '0')));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL # # # 3 100.00 #
+1 PRIMARY t1 ALL NULL # # # 3 99.22 #
1 PRIMARY <subquery5> eq_ref distinct_key # # # 1 100.00 #
1 PRIMARY <subquery2> eq_ref distinct_key # # # 1 100.00 #
-5 MATERIALIZED t3 ALL NULL # # # 4 100.00 #
-5 MATERIALIZED t2i index it2i1,it2i2,it2i3 # # # 5 80.00 #
-2 MATERIALIZED t2 ALL NULL # # # 5 100.00 #
+5 MATERIALIZED t3 ALL NULL # # # 4 99.22 #
+5 MATERIALIZED t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
+2 MATERIALIZED t2 ALL NULL # # # 5 99.22 #
4 MATERIALIZED t3 ALL NULL # # # 4 100.00 #
3 MATERIALIZED t3 ALL NULL # # # 4 100.00 #
7 UNION t2i index it2i1,it2i2,it2i3 # # # 5 50.00 #
7 UNION t1i ref it1i1,it1i2,it1i3 # # # 1 100.00 #
7 UNION t3i ref it3i1,it3i2,it3i3 # # # 1 100.00 #
-7 UNION t2i ref it2i1,it2i2,it2i3 # # # 2 100.00 #
+7 UNION t2i ref it2i1,it2i2,it2i3 # # # 1 100.00 #
NULL UNION RESULT <union1,7> ALL NULL # # # NULL NULL #
Warnings:
Note 1003 (/* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2`) semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and (<expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#3 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%02' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery3>`.`c2`)))) or <expr_cache><`test`.`t2`.`b2`>(<in_optimizer>(`test`.`t2`.`b2`,`test`.`t2`.`b2` in ( <materialize> (/* select#4 */ select `test`.`t3`.`c2` from `test`.`t3` where `test`.`t3`.`c2` like '%03' ), <primary_index_lookup>(`test`.`t2`.`b2` in <temporary table> on distinct_key where `test`.`t2`.`b2` = `<subquery4>`.`c2`))))) and `test`.`t3`.`c2` > '0') union (/* select#7 */ select `test`.`t1i`.`a1` AS `a1`,`test`.`t1i`.`a2` AS `a2` from `test`.`t1i` semi join (`test`.`t2i`) semi join (`test`.`t2i` join `test`.`t3i`) where `test`.`t1i`.`a1` = `test`.`t2i`.`b1` and `test`.`t3i`.`c1` = `test`.`t2i`.`b1` and `test`.`t2i`.`b1` = `test`.`t2i`.`b1` and `test`.`t1i`.`a2` = `test`.`t2i`.`b2` and `test`.`t3i`.`c2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b2` = `test`.`t2i`.`b2` and `test`.`t2i`.`b1` > '0' and `test`.`t2i`.`b2` > '0')
@@ -435,12 +468,12 @@ where (a1, a2) in (select * from t1 where a1 > '0' UNION select * from t2 where
(a1, a2) in (select c1, c2 from t3
where (c1, c2) in (select b1, b2 from t2i where b2 > '0'));
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+1 PRIMARY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-4 MATERIALIZED t2i index it2i1,it2i2,it2i3 it2i3 18 NULL 5 80.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 99.22 Using where
+4 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3.c1,test.t3.c2 1 100.00 Using index
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 99.22 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t2i` join `test`.`t3`) where `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select `test`.`t1`.`a1`,`test`.`t1`.`a2` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t1`.`a1`) = `test`.`t1`.`a1` and <cache>(`test`.`t1`.`a2`) = `test`.`t1`.`a2` union /* select#3 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`b1` and <cache>(`test`.`t1`.`a2`) = `test`.`t2`.`b2`))) and `test`.`t3`.`c2` > '0'
@@ -459,15 +492,15 @@ where (c1, c2) in (select b1, b2 from t2i where b2 > '0')) and
a1 = c1;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY <subquery4> eq_ref distinct_key distinct_key 16 func,func 1 100.00
-4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-4 MATERIALIZED t2i index it2i1,it2i2,it2i3 it2i3 18 NULL 5 80.00 Using where; Using index; Using join buffer (flat, BNL join)
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+1 PRIMARY <subquery4> ALL distinct_key NULL NULL NULL 4 100.00 Using where
+1 PRIMARY t3 ALL NULL NULL NULL NULL 4 99.22 Using where; Using join buffer (flat, BNL join)
+4 MATERIALIZED t3 ALL NULL NULL NULL NULL 4 99.22 Using where
+4 MATERIALIZED t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t3.c1,test.t3.c2 1 100.00 Using index
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 99.22 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`c2` AS `c2` from `test`.`t1` semi join (`test`.`t2i` join `test`.`t3`) join `test`.`t3` where `test`.`t3`.`c1` = `test`.`t1`.`a1` and `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select `test`.`t1`.`a1`,`test`.`t1`.`a2` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t1`.`a1`) = `test`.`t1`.`a1` and <cache>(`test`.`t1`.`a2`) = `test`.`t1`.`a2` union /* select#3 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`b1` and <cache>(`test`.`t1`.`a2`) = `test`.`t2`.`b2`))) and `test`.`t3`.`c2` > '0'
+Note 1003 /* select#1 */ select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2`,`test`.`t3`.`c1` AS `c1`,`test`.`t3`.`c2` AS `c2` from `test`.`t1` semi join (`test`.`t2i` join `test`.`t3`) join `test`.`t3` where `test`.`t3`.`c1` = `test`.`t1`.`a1` and `test`.`t2i`.`b1` = `test`.`t3`.`c1` and `test`.`t3`.`c1` = `test`.`t1`.`a1` and `test`.`t2i`.`b2` = `test`.`t3`.`c2` and `test`.`t3`.`c2` = `test`.`t3`.`c2` and <expr_cache><`test`.`t1`.`a1`,`test`.`t1`.`a2`>(<in_optimizer>((`test`.`t1`.`a1`,`test`.`t1`.`a2`),<exists>(/* select#2 */ select `test`.`t1`.`a1`,`test`.`t1`.`a2` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t1`.`a1`) = `test`.`t1`.`a1` and <cache>(`test`.`t1`.`a2`) = `test`.`t1`.`a2` union /* select#3 */ select `test`.`t2`.`b1`,`test`.`t2`.`b2` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t1`.`a1`) = `test`.`t2`.`b1` and <cache>(`test`.`t1`.`a2`) = `test`.`t2`.`b2`))) and `test`.`t3`.`c2` > '0'
select * from t1, t3
where (a1, a2) in (select * from t1 where a1 > '0' UNION select * from t2 where b1 < '9') and
(c1, c2) in (select c1, c2 from t3
@@ -485,8 +518,8 @@ select * from t3
where c1 in (select a1 from t1 where a1 > '0' UNION select b1 from t2 where b1 < '9');
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where
-2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 100.00 Using where
+2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 3 99.22 Using where
+3 DEPENDENT UNION t2 ALL NULL NULL NULL NULL 5 99.22 Using where
NULL UNION RESULT <union2,3> ALL NULL NULL NULL NULL NULL NULL
Warnings:
Note 1003 /* select#1 */ select `test`.`t3`.`c1` AS `c1`,`test`.`t3`.`c2` AS `c2` from `test`.`t3` where <expr_cache><`test`.`t3`.`c1`>(<in_optimizer>(`test`.`t3`.`c1`,<exists>(/* select#2 */ select `test`.`t1`.`a1` from `test`.`t1` where `test`.`t1`.`a1` > '0' and <cache>(`test`.`t3`.`c1`) = `test`.`t1`.`a1` union /* select#3 */ select `test`.`t2`.`b1` from `test`.`t2` where `test`.`t2`.`b1` < '9' and <cache>(`test`.`t3`.`c1`) = `test`.`t2`.`b1`)))
@@ -505,7 +538,7 @@ b2 in (select c2 from t3 t3b where c2 LIKE '%03')) and
where (c1, c2) in (select b1, b2 from t2i where b2 > '0' or b2 = a2));
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where
-1 PRIMARY t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t1.a1,test.t1.a2 2 100.00 Using index; Start temporary
+1 PRIMARY t2i ref it2i1,it2i2,it2i3 it2i3 18 test.t1.a1,test.t1.a2 1 100.00 Using index; Start temporary
1 PRIMARY t3c ALL NULL NULL NULL NULL 4 100.00 Using where; End temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; Start temporary; End temporary; Using join buffer (flat, BNL join)
4 MATERIALIZED t3b ALL NULL NULL NULL NULL 4 100.00 Using where
@@ -701,7 +734,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00
1 PRIMARY t1_16 ALL NULL NULL NULL NULL 3 100.00 Using where; Start temporary; Using join buffer (flat, BNL join)
1 PRIMARY t2_16 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
-1 PRIMARY t3 ALL NULL NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t3 ALL NULL NULL NULL NULL 4 99.22 Using where; Using join buffer (flat, BNL join)
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 100.00 Using where; End temporary; Using join buffer (flat, BNL join)
Warnings:
Note 1003 select `test`.`t1`.`a1` AS `a1`,`test`.`t1`.`a2` AS `a2` from `test`.`t1` semi join (`test`.`t3` join `test`.`t2_16` join `test`.`t2` join `test`.`t1_16`) where `test`.`t2`.`b1` = `test`.`t3`.`c1` and `test`.`t2_16`.`b1` = `test`.`t1_16`.`a1` and `test`.`t2_16`.`b2` = `test`.`t1_16`.`a2` and `test`.`t2`.`b2` = substr(`test`.`t1_16`.`a2`,1,6) and `test`.`t3`.`c2` > '0' and concat(`test`.`t1`.`a1`,'x') = left(`test`.`t1_16`.`a1`,8)
@@ -1939,7 +1972,7 @@ EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a HAVING a > 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using temporary
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary
flush status;
CREATE TABLE t3 SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a HAVING a > 1);
SHOW STATUS LIKE 'Created_tmp_tables';
@@ -1963,7 +1996,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 3 100.00 Using where; Using join buffer (flat, BNL join)
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 100.00
Warnings:
-Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2`) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(/*always not null*/ 1 is null) or `<subquery2>`.`MAX(c)` = 7)
+Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (/* select#2 */ select max(`test`.`t2`.`c`) from `test`.`t2` having `MAX(c)` is null or `MAX(c)` = 7) join `test`.`t1` where `test`.`t1`.`b` = 7 and `test`.`t1`.`a` = `<subquery2>`.`MAX(c)` and (<cache>(/*always not null*/ 1 is null) or `<subquery2>`.`MAX(c)` = 7)
SELECT * FROM t1
WHERE a IN (SELECT MAX(c) FROM t2) AND b=7 AND (a IS NULL OR a=b);
a b
@@ -2219,11 +2252,11 @@ drop database mysqltest4;
# (both 1st and further executions)
CREATE TABLE t1 (a INT NOT NULL) ENGINE=MyISAM;
INSERT INTO t1 VALUES (0),(8);
-SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM (t1 AS t2));
+SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM t1 AS t2);
a
0
PREPARE stmt FROM "
-SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM (t1 AS t2))
+SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM t1 AS t2)
";
execute stmt;
a
@@ -2566,7 +2599,7 @@ select * from t1 where (a,b) in (select max(a),b from t2 group by b);
show status where Variable_name like 'Handler_read%' or Variable_name like 'Handler_%write%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 5000
+Handler_read_key 5004
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
diff --git a/mysql-test/main/subselect_sj_mat.test b/mysql-test/main/subselect_sj_mat.test
index 4bd8dfdf058..ac0baee3728 100644
--- a/mysql-test/main/subselect_sj_mat.test
+++ b/mysql-test/main/subselect_sj_mat.test
@@ -55,6 +55,7 @@ create index it3i3 on t3i (c1, c2);
insert into t1i select * from t1;
insert into t2i select * from t2;
insert into t3i select * from t3;
+analyze table t1,t2,t3,t1i,t2i,t3i;
# force the use of materialization
set @@optimizer_switch='materialization=on,in_to_exists=off,firstmatch=off';
@@ -108,16 +109,24 @@ select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0'
select * from t1i where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
# BUG#31639: Wrong plan for uncorrelated subquery when loose scan is applicable.
+create table t2i_c like t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+insert into t2i_c select * from t2i;
+analyze table t2i_c;
+show create table t2i_c;
explain extended
-select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1);
-select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1);
+select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1);
+select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1);
-prepare st1 from "explain select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1)";
+prepare st1 from "explain select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1)";
execute st1;
execute st1;
-prepare st2 from "select * from t1 where (a1, a2) in (select b1, max(b2) from t2i group by b1)";
+prepare st2 from "select * from t1 where (a1, a2) in (select b1, max(b2) from t2i_c group by b1)";
execute st2;
execute st2;
+drop table t2i_c;
explain extended
select * from t1 where (a1, a2) in (select b1, min(b2) from t2i where b1 > '0' group by b1);
@@ -1850,9 +1859,9 @@ drop database mysqltest4;
CREATE TABLE t1 (a INT NOT NULL) ENGINE=MyISAM;
INSERT INTO t1 VALUES (0),(8);
-SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM (t1 AS t2));
+SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM t1 AS t2);
PREPARE stmt FROM "
-SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM (t1 AS t2))
+SELECT a FROM (SELECT DISTINCT * FROM t1) AS sq WHERE a IN (SELECT MIN(t2.a) FROM t1 AS t2)
";
execute stmt;
execute stmt;
diff --git a/mysql-test/main/system_mysql_db.result b/mysql-test/main/system_mysql_db.result
index 2abcfb92ffa..e7cd9bc628b 100644
--- a/mysql-test/main/system_mysql_db.result
+++ b/mysql-test/main/system_mysql_db.result
@@ -6,12 +6,12 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
help_relation
help_topic
-host
index_stats
innodb_index_stats
innodb_table_stats
@@ -59,84 +59,10 @@ db CREATE TABLE `db` (
`Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`Db`,`User`),
KEY `User` (`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Database privileges'
-show create table host;
-Table Create Table
-host CREATE TABLE `host` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Db` char(64) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- PRIMARY KEY (`Host`,`Db`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Host privileges; Merged with database privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
-Table Create Table
-user CREATE TABLE `user` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
- `ssl_cipher` blob NOT NULL,
- `x509_issuer` blob NOT NULL,
- `x509_subject` blob NOT NULL,
- `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
- `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
- `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
- `max_user_connections` int(11) NOT NULL DEFAULT 0,
- `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
- `authentication_string` text COLLATE utf8_bin NOT NULL,
- `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
- PRIMARY KEY (`Host`,`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+View Create View character_set_client collation_connection
+user CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
@@ -145,7 +71,15 @@ func CREATE TABLE `func` (
`dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '',
`type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL,
PRIMARY KEY (`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='User defined functions'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User defined functions'
+show create table global_priv;
+Table Create Table
+global_priv CREATE TABLE `global_priv` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Priv` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '{}' CHECK (json_valid(`Priv`)),
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Users and global privileges'
show create table tables_priv;
Table Create Table
tables_priv CREATE TABLE `tables_priv` (
@@ -159,7 +93,7 @@ tables_priv CREATE TABLE `tables_priv` (
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Table privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Table privileges'
show create table columns_priv;
Table Create Table
columns_priv CREATE TABLE `columns_priv` (
@@ -171,7 +105,7 @@ columns_priv CREATE TABLE `columns_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Column privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Column privileges'
show create table procs_priv;
Table Create Table
procs_priv CREATE TABLE `procs_priv` (
@@ -185,7 +119,7 @@ procs_priv CREATE TABLE `procs_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
PRIMARY KEY (`Host`,`Db`,`User`,`Routine_name`,`Routine_type`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Procedure privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Procedure privileges'
show create table servers;
Table Create Table
servers CREATE TABLE `servers` (
@@ -199,7 +133,7 @@ servers CREATE TABLE `servers` (
`Wrapper` char(64) NOT NULL DEFAULT '',
`Owner` char(64) NOT NULL DEFAULT '',
PRIMARY KEY (`Server_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='MySQL Foreign Servers table'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table'
show create table proc;
Table Create Table
proc CREATE TABLE `proc` (
@@ -217,7 +151,7 @@ proc CREATE TABLE `proc` (
`definer` char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`created` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`modified` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
`character_set_client` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`collation_connection` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
@@ -225,7 +159,7 @@ proc CREATE TABLE `proc` (
`body_utf8` longblob DEFAULT NULL,
`aggregate` enum('NONE','GROUP') NOT NULL DEFAULT 'NONE',
PRIMARY KEY (`db`,`name`,`type`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Stored Procedures'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Stored Procedures'
show create table event;
Table Create Table
event CREATE TABLE `event` (
@@ -243,7 +177,7 @@ event CREATE TABLE `event` (
`ends` datetime DEFAULT NULL,
`status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED',
`on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`originator` int(10) unsigned NOT NULL,
`time_zone` char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM',
@@ -252,7 +186,7 @@ event CREATE TABLE `event` (
`db_collation` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`body_utf8` longblob DEFAULT NULL,
PRIMARY KEY (`db`,`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Events'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Events'
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
@@ -287,7 +221,7 @@ table_stats CREATE TABLE `table_stats` (
`table_name` varchar(64) COLLATE utf8_bin NOT NULL,
`cardinality` bigint(21) unsigned DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Tables'
show create table column_stats;
Table Create Table
column_stats CREATE TABLE `column_stats` (
@@ -303,7 +237,7 @@ column_stats CREATE TABLE `column_stats` (
`hist_type` enum('SINGLE_PREC_HB','DOUBLE_PREC_HB') COLLATE utf8_bin DEFAULT NULL,
`histogram` varbinary(255) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Columns'
show create table index_stats;
Table Create Table
index_stats CREATE TABLE `index_stats` (
@@ -313,6 +247,6 @@ index_stats CREATE TABLE `index_stats` (
`prefix_arity` int(11) unsigned NOT NULL,
`avg_frequency` decimal(12,4) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Indexes'
show tables;
Tables_in_test
diff --git a/mysql-test/main/no_password_column-mdev-11170.result b/mysql-test/main/system_mysql_db_507.result
index 6195100436d..767b8fee102 100644
--- a/mysql-test/main/no_password_column-mdev-11170.result
+++ b/mysql-test/main/system_mysql_db_507.result
@@ -3,8 +3,6 @@
# Fatal error: mysql.user table is damaged or in
# unsupported 3.20 format
#
-create table backup_user like mysql.user;
-insert into backup_user select * from mysql.user;
#
# Original mysql.user table
#
@@ -60,7 +58,12 @@ max_statement_time decimal(12,6) NO 0.000000
#
# Drop the password column.
#
-alter table mysql.user drop column password;
+alter table mysql.user drop column password,
+drop column is_role,
+drop column default_role,
+add column password_last_changed timestamp null default null after password_expired,
+add column password_lifetime smallint unsigned after password_last_changed,
+add column account_locked enum('n','y') character set utf8 not null default 'n' after password_lifetime;
flush privileges;
#
# Create users without the password column present.
@@ -87,7 +90,7 @@ select user, host, select_priv, plugin, authentication_string from mysql.user
where user like "%oo"
order by user;
user host select_priv plugin authentication_string
-foo % N
+foo % N mysql_native_password
goo % N mysql_native_password *F3A2A51A9B0F2BE2468926B4132313728C250DBF
ioo % N mysql_old_password 7a8f886d28473e85
#
@@ -162,8 +165,59 @@ foo % Y mysql_native_password *E8D46CE25265E545D225A8A6F1BAF642FEBEE5CB
goo % Y mysql_native_password *F3A2A51A9B0F2BE2468926B4132313728C250DBF
ioo % Y mysql_old_password 7a8f886d28473e85
#
-# Reset to final original state.
+# Test account locking
#
-drop table mysql.user;
-rename table backup_user to mysql.user;
+create user user1@localhost account lock;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Access denied, this account is locked
flush privileges;
+connect(localhost,user1,,test,MYSQL_PORT,MYSQL_SOCK);
+connect con1,localhost,user1;
+ERROR HY000: Access denied, this account is locked
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost' ACCOUNT LOCK
+alter user user1@localhost account unlock;
+connect con1,localhost,user1;
+disconnect con1;
+connection default;
+show create user user1@localhost;
+CREATE USER for user1@localhost
+CREATE USER 'user1'@'localhost'
+#
+# Test password expiration fields are loaded correctly
+#
+create user user@localhost;
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost'
+alter user user@localhost password expire;
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost' PASSWORD EXPIRE
+set password for user@localhost= password('');
+alter user user@localhost password expire default;
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost'
+alter user user@localhost password expire never;
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost' PASSWORD EXPIRE NEVER
+alter user user@localhost password expire interval 123 day;
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost' PASSWORD EXPIRE INTERVAL 123 DAY
+alter user user@localhost password expire;
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost' PASSWORD EXPIRE
+set password for user@localhost= password('');
+show create user user@localhost;
+CREATE USER for user@localhost
+CREATE USER 'user'@'localhost' PASSWORD EXPIRE INTERVAL 123 DAY
+drop user user@localhost;
+#
+# Reset to final original state.
+#
diff --git a/mysql-test/main/no_password_column-mdev-11170.test b/mysql-test/main/system_mysql_db_507.test
index 2cc4ba82ee8..cfefcdc602e 100644
--- a/mysql-test/main/no_password_column-mdev-11170.test
+++ b/mysql-test/main/system_mysql_db_507.test
@@ -5,9 +5,7 @@
--echo # unsupported 3.20 format
--echo #
-
-create table backup_user like mysql.user;
-insert into backup_user select * from mysql.user;
+--source include/switch_to_mysql_user.inc
--echo #
--echo # Original mysql.user table
@@ -17,7 +15,12 @@ describe mysql.user;
--echo #
--echo # Drop the password column.
--echo #
-alter table mysql.user drop column password;
+alter table mysql.user drop column password,
+ drop column is_role,
+ drop column default_role,
+ add column password_last_changed timestamp null default null after password_expired,
+ add column password_lifetime smallint unsigned after password_last_changed,
+ add column account_locked enum('n','y') character set utf8 not null default 'n' after password_lifetime;
flush privileges;
--echo #
@@ -85,11 +88,45 @@ select user, host, select_priv, plugin, authentication_string from mysql.user
where user like "%oo"
order by user;
+--echo #
+--echo # Test account locking
+--echo #
+create user user1@localhost account lock;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con1,localhost,user1);
+flush privileges;
+--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK
+--error ER_ACCOUNT_HAS_BEEN_LOCKED
+connect(con1,localhost,user1);
+show create user user1@localhost;
+alter user user1@localhost account unlock;
+connect(con1,localhost,user1);
+disconnect con1;
+connection default;
+show create user user1@localhost;
+
+--echo #
+--echo # Test password expiration fields are loaded correctly
+--echo #
+create user user@localhost;
+show create user user@localhost;
+alter user user@localhost password expire;
+show create user user@localhost;
+set password for user@localhost= password('');
+alter user user@localhost password expire default;
+show create user user@localhost;
+alter user user@localhost password expire never;
+show create user user@localhost;
+alter user user@localhost password expire interval 123 day;
+show create user user@localhost;
+alter user user@localhost password expire;
+show create user user@localhost;
+set password for user@localhost= password('');
+show create user user@localhost;
+drop user user@localhost;
--echo #
--echo # Reset to final original state.
--echo #
-drop table mysql.user;
-rename table backup_user to mysql.user;
-
-flush privileges;
+--source include/switch_to_mysql_global_priv.inc
diff --git a/mysql-test/main/system_mysql_db_fix40123.result b/mysql-test/main/system_mysql_db_fix40123.result
index 2abcfb92ffa..6ec1e35b0b7 100644
--- a/mysql-test/main/system_mysql_db_fix40123.result
+++ b/mysql-test/main/system_mysql_db_fix40123.result
@@ -1,3 +1,24 @@
+use test;
+set storage_engine=myisam;
+CREATE TABLE db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y');
+INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y');
+CREATE TABLE host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
+CREATE TABLE user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0);
+INSERT INTO user VALUES ('localhost','','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0);
+CREATE TABLE func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
+CREATE TABLE tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE help_topic ( help_topic_id int unsigned not null, name varchar(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url varchar(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE help_category ( help_category_id smallint unsigned not null, name varchar(64) not null, parent_category_id smallint unsigned null, url varchar(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE help_keyword ( help_keyword_id int unsigned not null, name varchar(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
show tables;
Tables_in_db
column_stats
@@ -6,6 +27,7 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
@@ -59,84 +81,10 @@ db CREATE TABLE `db` (
`Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`Db`,`User`),
KEY `User` (`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Database privileges'
-show create table host;
-Table Create Table
-host CREATE TABLE `host` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Db` char(64) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- PRIMARY KEY (`Host`,`Db`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Host privileges; Merged with database privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
-Table Create Table
-user CREATE TABLE `user` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
- `ssl_cipher` blob NOT NULL,
- `x509_issuer` blob NOT NULL,
- `x509_subject` blob NOT NULL,
- `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
- `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
- `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
- `max_user_connections` int(11) NOT NULL DEFAULT 0,
- `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
- `authentication_string` text COLLATE utf8_bin NOT NULL,
- `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
- PRIMARY KEY (`Host`,`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+View Create View character_set_client collation_connection
+user CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
@@ -145,7 +93,15 @@ func CREATE TABLE `func` (
`dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '',
`type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL,
PRIMARY KEY (`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='User defined functions'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User defined functions'
+show create table global_priv;
+Table Create Table
+global_priv CREATE TABLE `global_priv` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Priv` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '{}' CHECK (json_valid(`Priv`)),
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Users and global privileges'
show create table tables_priv;
Table Create Table
tables_priv CREATE TABLE `tables_priv` (
@@ -159,7 +115,7 @@ tables_priv CREATE TABLE `tables_priv` (
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Table privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Table privileges'
show create table columns_priv;
Table Create Table
columns_priv CREATE TABLE `columns_priv` (
@@ -171,7 +127,7 @@ columns_priv CREATE TABLE `columns_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Column privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Column privileges'
show create table procs_priv;
Table Create Table
procs_priv CREATE TABLE `procs_priv` (
@@ -185,7 +141,7 @@ procs_priv CREATE TABLE `procs_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
PRIMARY KEY (`Host`,`Db`,`User`,`Routine_name`,`Routine_type`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Procedure privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Procedure privileges'
show create table servers;
Table Create Table
servers CREATE TABLE `servers` (
@@ -199,7 +155,7 @@ servers CREATE TABLE `servers` (
`Wrapper` char(64) NOT NULL DEFAULT '',
`Owner` char(64) NOT NULL DEFAULT '',
PRIMARY KEY (`Server_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='MySQL Foreign Servers table'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table'
show create table proc;
Table Create Table
proc CREATE TABLE `proc` (
@@ -217,7 +173,7 @@ proc CREATE TABLE `proc` (
`definer` char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`created` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`modified` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
`character_set_client` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`collation_connection` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
@@ -225,7 +181,7 @@ proc CREATE TABLE `proc` (
`body_utf8` longblob DEFAULT NULL,
`aggregate` enum('NONE','GROUP') NOT NULL DEFAULT 'NONE',
PRIMARY KEY (`db`,`name`,`type`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Stored Procedures'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Stored Procedures'
show create table event;
Table Create Table
event CREATE TABLE `event` (
@@ -243,7 +199,7 @@ event CREATE TABLE `event` (
`ends` datetime DEFAULT NULL,
`status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED',
`on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`originator` int(10) unsigned NOT NULL,
`time_zone` char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM',
@@ -252,7 +208,7 @@ event CREATE TABLE `event` (
`db_collation` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`body_utf8` longblob DEFAULT NULL,
PRIMARY KEY (`db`,`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Events'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Events'
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
@@ -287,7 +243,7 @@ table_stats CREATE TABLE `table_stats` (
`table_name` varchar(64) COLLATE utf8_bin NOT NULL,
`cardinality` bigint(21) unsigned DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Tables'
show create table column_stats;
Table Create Table
column_stats CREATE TABLE `column_stats` (
@@ -303,7 +259,7 @@ column_stats CREATE TABLE `column_stats` (
`hist_type` enum('SINGLE_PREC_HB','DOUBLE_PREC_HB') COLLATE utf8_bin DEFAULT NULL,
`histogram` varbinary(255) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Columns'
show create table index_stats;
Table Create Table
index_stats CREATE TABLE `index_stats` (
@@ -313,6 +269,8 @@ index_stats CREATE TABLE `index_stats` (
`prefix_arity` int(11) unsigned NOT NULL,
`avg_frequency` decimal(12,4) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Indexes'
+DROP VIEW user;
+DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv;
show tables;
Tables_in_test
diff --git a/mysql-test/main/system_mysql_db_fix40123.test b/mysql-test/main/system_mysql_db_fix40123.test
index 2d17a0964e5..00747cd7ba0 100644
--- a/mysql-test/main/system_mysql_db_fix40123.test
+++ b/mysql-test/main/system_mysql_db_fix40123.test
@@ -19,62 +19,43 @@ if (!$MYSQL_FIX_PRIVILEGE_TABLES)
# mysql_fix_system_tables which should be ignored.
# Instead, concentrate on the errors in r/system_mysql_db.reject
--- disable_result_log
--- disable_query_log
-
use test;
# create system tables as in mysql-4.1.23
# created by executing "./mysql_create_system_tables real ."
set storage_engine=myisam;
-CREATE TABLE db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+CREATE TABLE db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y');
INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y');
-
-CREATE TABLE host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
-
-
-CREATE TABLE user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+CREATE TABLE host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
+CREATE TABLE user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0);
INSERT INTO user VALUES ('localhost','','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0);
-
-CREATE TABLE func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
-
-
-CREATE TABLE tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
-CREATE TABLE columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
-
-CREATE TABLE help_topic ( help_topic_id int unsigned not null, name varchar(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url varchar(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
-CREATE TABLE help_category ( help_category_id smallint unsigned not null, name varchar(64) not null, parent_category_id smallint unsigned null, url varchar(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
-CREATE TABLE help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
-CREATE TABLE help_keyword ( help_keyword_id int unsigned not null, name varchar(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
-
-CREATE TABLE time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
-
-CREATE TABLE time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
-
-CREATE TABLE time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
-
-CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
-
-CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
+CREATE TABLE tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE help_topic ( help_topic_id int unsigned not null, name varchar(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url varchar(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE help_category ( help_category_id smallint unsigned not null, name varchar(64) not null, parent_category_id smallint unsigned null, url varchar(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE help_keyword ( help_keyword_id int unsigned not null, name varchar(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
# Run the mysql_fix_privilege_tables.sql using "mysql --force"
+-- disable_result_log
--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES
-
--- enable_query_log
-- enable_result_log
# Dump the tables that should be compared
-- source include/system_db_struct.inc
--- disable_query_log
-
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos;
-
--- enable_query_log
+DROP VIEW user;
+DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv;
# check that we dropped all system tables
show tables;
diff --git a/mysql-test/main/system_mysql_db_fix50030.result b/mysql-test/main/system_mysql_db_fix50030.result
index 81b6da4c16d..9f2729b86d1 100644
--- a/mysql-test/main/system_mysql_db_fix50030.result
+++ b/mysql-test/main/system_mysql_db_fix50030.result
@@ -1,3 +1,28 @@
+use test;
+set storage_engine=myisam;
+CREATE TABLE db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N');
+INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N');
+CREATE TABLE host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
+CREATE TABLE user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0);
+INSERT INTO user VALUES ('localhost','','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0, 0);
+CREATE TABLE func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
+CREATE TABLE tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url char(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url char(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE proc ( db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA' ) DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob DEFAULT '' NOT NULL, returns char(64) DEFAULT '' NOT NULL, body longblob DEFAULT '' NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'NOT_USED', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL, comment char(64) collate utf8_bin DEFAULT '' NOT NULL, PRIMARY KEY (db,name,type) ) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) binary DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
+CREATE TABLE servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table';
+INSERT INTO servers VALUES ('test','localhost','test','root','', 0,'','mysql','root');
show tables;
Tables_in_db
column_stats
@@ -6,6 +31,7 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
@@ -59,84 +85,10 @@ db CREATE TABLE `db` (
`Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`Db`,`User`),
KEY `User` (`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Database privileges'
-show create table host;
-Table Create Table
-host CREATE TABLE `host` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Db` char(64) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- PRIMARY KEY (`Host`,`Db`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Host privileges; Merged with database privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
-Table Create Table
-user CREATE TABLE `user` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
- `ssl_cipher` blob NOT NULL,
- `x509_issuer` blob NOT NULL,
- `x509_subject` blob NOT NULL,
- `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
- `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
- `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
- `max_user_connections` int(11) NOT NULL DEFAULT 0,
- `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
- `authentication_string` text COLLATE utf8_bin NOT NULL,
- `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
- PRIMARY KEY (`Host`,`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+View Create View character_set_client collation_connection
+user CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
@@ -145,7 +97,15 @@ func CREATE TABLE `func` (
`dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '',
`type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL,
PRIMARY KEY (`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='User defined functions'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User defined functions'
+show create table global_priv;
+Table Create Table
+global_priv CREATE TABLE `global_priv` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Priv` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '{}' CHECK (json_valid(`Priv`)),
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Users and global privileges'
show create table tables_priv;
Table Create Table
tables_priv CREATE TABLE `tables_priv` (
@@ -159,7 +119,7 @@ tables_priv CREATE TABLE `tables_priv` (
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Table privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Table privileges'
show create table columns_priv;
Table Create Table
columns_priv CREATE TABLE `columns_priv` (
@@ -171,7 +131,7 @@ columns_priv CREATE TABLE `columns_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Column privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Column privileges'
show create table procs_priv;
Table Create Table
procs_priv CREATE TABLE `procs_priv` (
@@ -185,7 +145,7 @@ procs_priv CREATE TABLE `procs_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
PRIMARY KEY (`Host`,`Db`,`User`,`Routine_name`,`Routine_type`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Procedure privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Procedure privileges'
show create table servers;
Table Create Table
servers CREATE TABLE `servers` (
@@ -199,7 +159,7 @@ servers CREATE TABLE `servers` (
`Wrapper` char(64) NOT NULL DEFAULT '',
`Owner` char(64) NOT NULL DEFAULT '',
PRIMARY KEY (`Server_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='MySQL Foreign Servers table'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table'
show create table proc;
Table Create Table
proc CREATE TABLE `proc` (
@@ -217,7 +177,7 @@ proc CREATE TABLE `proc` (
`definer` char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`created` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`modified` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
`character_set_client` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`collation_connection` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
@@ -225,7 +185,7 @@ proc CREATE TABLE `proc` (
`body_utf8` longblob DEFAULT NULL,
`aggregate` enum('NONE','GROUP') NOT NULL DEFAULT 'NONE',
PRIMARY KEY (`db`,`name`,`type`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Stored Procedures'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Stored Procedures'
show create table event;
Table Create Table
event CREATE TABLE `event` (
@@ -243,7 +203,7 @@ event CREATE TABLE `event` (
`ends` datetime DEFAULT NULL,
`status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED',
`on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`originator` int(10) unsigned NOT NULL,
`time_zone` char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM',
@@ -252,7 +212,7 @@ event CREATE TABLE `event` (
`db_collation` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`body_utf8` longblob DEFAULT NULL,
PRIMARY KEY (`db`,`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Events'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Events'
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
@@ -287,7 +247,7 @@ table_stats CREATE TABLE `table_stats` (
`table_name` varchar(64) COLLATE utf8_bin NOT NULL,
`cardinality` bigint(21) unsigned DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Tables'
show create table column_stats;
Table Create Table
column_stats CREATE TABLE `column_stats` (
@@ -303,7 +263,7 @@ column_stats CREATE TABLE `column_stats` (
`hist_type` enum('SINGLE_PREC_HB','DOUBLE_PREC_HB') COLLATE utf8_bin DEFAULT NULL,
`histogram` varbinary(255) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Columns'
show create table index_stats;
Table Create Table
index_stats CREATE TABLE `index_stats` (
@@ -313,6 +273,8 @@ index_stats CREATE TABLE `index_stats` (
`prefix_arity` int(11) unsigned NOT NULL,
`avg_frequency` decimal(12,4) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Indexes'
+DROP VIEW user;
+DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv;
show tables;
Tables_in_test
diff --git a/mysql-test/main/system_mysql_db_fix50030.test b/mysql-test/main/system_mysql_db_fix50030.test
index 9506c3465e7..085286637b0 100644
--- a/mysql-test/main/system_mysql_db_fix50030.test
+++ b/mysql-test/main/system_mysql_db_fix50030.test
@@ -19,69 +19,47 @@ if (!$MYSQL_FIX_PRIVILEGE_TABLES)
# mysql_fix_system_tables which should be ignored.
# Instead, concentrate on the errors in r/system_mysql_db.reject
--- disable_result_log
--- disable_query_log
-
use test;
# create system tables as in mysql-5.0.30
# created by executing "./mysql_create_system_tables real ."
set storage_engine=myisam;
-CREATE TABLE db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+CREATE TABLE db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N');
INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N');
-
-CREATE TABLE host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
-
-CREATE TABLE user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+CREATE TABLE host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
+CREATE TABLE user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
INSERT INTO user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0);
INSERT INTO user VALUES ('localhost','','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0, 0);
-
-CREATE TABLE func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
-
-CREATE TABLE tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
-
-CREATE TABLE columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
-
-CREATE TABLE help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url char(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
-CREATE TABLE help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url char(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
-CREATE TABLE help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
-CREATE TABLE help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
-
-CREATE TABLE time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
-
-CREATE TABLE time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
-
-CREATE TABLE time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
-
-CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
-
-CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
-
-CREATE TABLE proc ( db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA' ) DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob DEFAULT '' NOT NULL, returns char(64) DEFAULT '' NOT NULL, body longblob DEFAULT '' NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'NOT_USED', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL, comment char(64) collate utf8_bin DEFAULT '' NOT NULL, PRIMARY KEY (db,name,type) ) engine=MyISAM character set utf8 comment='Stored Procedures';
-
-CREATE TABLE procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) binary DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
-
+CREATE TABLE func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
+CREATE TABLE tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url char(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url char(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE proc ( db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA' ) DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob DEFAULT '' NOT NULL, returns char(64) DEFAULT '' NOT NULL, body longblob DEFAULT '' NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'NOT_USED', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL, comment char(64) collate utf8_bin DEFAULT '' NOT NULL, PRIMARY KEY (db,name,type) ) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) binary DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
CREATE TABLE servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table';
-
INSERT INTO servers VALUES ('test','localhost','test','root','', 0,'','mysql','root');
+-- disable_result_log
# Run the mysql_fix_privilege_tables.sql using "mysql --force"
--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES
-
--- enable_query_log
-- enable_result_log
# Dump the tables that should be compared
-- source include/system_db_struct.inc
--- disable_query_log
-
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos;
-
--- enable_query_log
+DROP VIEW user;
+DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv;
# check that we dropped all system tables
show tables;
diff --git a/mysql-test/main/system_mysql_db_fix50117.result b/mysql-test/main/system_mysql_db_fix50117.result
index 2abcfb92ffa..08f334e9bcc 100644
--- a/mysql-test/main/system_mysql_db_fix50117.result
+++ b/mysql-test/main/system_mysql_db_fix50117.result
@@ -1,3 +1,24 @@
+use test;
+CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
+CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
+CREATE TABLE IF NOT EXISTS plugin ( name char(64) binary DEFAULT '' NOT NULL, dl char(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='MySQL plugins';
+CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table';
+CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url char(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url char(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE IF NOT EXISTS proc ( db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA' ) DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns char(64) DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'NOT_USED', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL, comment char(64) collate utf8_bin DEFAULT '' NOT NULL, PRIMARY KEY (db,name,type) ) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) binary DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
+CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
show tables;
Tables_in_db
column_stats
@@ -6,6 +27,7 @@ db
event
func
general_log
+global_priv
gtid_slave_pos
help_category
help_keyword
@@ -59,84 +81,10 @@ db CREATE TABLE `db` (
`Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`Db`,`User`),
KEY `User` (`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Database privileges'
-show create table host;
-Table Create Table
-host CREATE TABLE `host` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Db` char(64) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- PRIMARY KEY (`Host`,`Db`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Host privileges; Merged with database privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Database privileges'
show create table user;
-Table Create Table
-user CREATE TABLE `user` (
- `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
- `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '',
- `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `Delete_history_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '',
- `ssl_cipher` blob NOT NULL,
- `x509_issuer` blob NOT NULL,
- `x509_subject` blob NOT NULL,
- `max_questions` int(11) unsigned NOT NULL DEFAULT 0,
- `max_updates` int(11) unsigned NOT NULL DEFAULT 0,
- `max_connections` int(11) unsigned NOT NULL DEFAULT 0,
- `max_user_connections` int(11) NOT NULL DEFAULT 0,
- `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
- `authentication_string` text COLLATE utf8_bin NOT NULL,
- `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
- `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
- `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000,
- PRIMARY KEY (`Host`,`User`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
+View Create View character_set_client collation_connection
+user CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `user` AS select `global_priv`.`Host` AS `Host`,`global_priv`.`User` AS `User`,if(json_value(`global_priv`.`Priv`,'$.plugin') in ('mysql_native_password','mysql_old_password'),ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),''),'') AS `Password`,if(json_value(`global_priv`.`Priv`,'$.access') & 1,'Y','N') AS `Select_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2,'Y','N') AS `Insert_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4,'Y','N') AS `Update_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8,'Y','N') AS `Delete_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16,'Y','N') AS `Create_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32,'Y','N') AS `Drop_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 64,'Y','N') AS `Reload_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 128,'Y','N') AS `Shutdown_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 256,'Y','N') AS `Process_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 512,'Y','N') AS `File_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1024,'Y','N') AS `Grant_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2048,'Y','N') AS `References_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4096,'Y','N') AS `Index_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8192,'Y','N') AS `Alter_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16384,'Y','N') AS `Show_db_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 32768,'Y','N') AS `Super_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 65536,'Y','N') AS `Create_tmp_table_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 131072,'Y','N') AS `Lock_tables_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 262144,'Y','N') AS `Execute_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 524288,'Y','N') AS `Repl_slave_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 1048576,'Y','N') AS `Repl_client_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 2097152,'Y','N') AS `Create_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 4194304,'Y','N') AS `Show_view_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 8388608,'Y','N') AS `Create_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 16777216,'Y','N') AS `Alter_routine_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 33554432,'Y','N') AS `Create_user_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 67108864,'Y','N') AS `Event_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 134217728,'Y','N') AS `Trigger_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 268435456,'Y','N') AS `Create_tablespace_priv`,if(json_value(`global_priv`.`Priv`,'$.access') & 536870912,'Y','N') AS `Delete_history_priv`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.ssl_type'),0) + 1,'','ANY','X509','SPECIFIED') AS `ssl_type`,ifnull(json_value(`global_priv`.`Priv`,'$.ssl_cipher'),'') AS `ssl_cipher`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_issuer'),'') AS `x509_issuer`,ifnull(json_value(`global_priv`.`Priv`,'$.x509_subject'),'') AS `x509_subject`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_questions'),0) as unsigned) AS `max_questions`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_updates'),0) as unsigned) AS `max_updates`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_connections'),0) as unsigned) AS `max_connections`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_user_connections'),0) as signed) AS `max_user_connections`,ifnull(json_value(`global_priv`.`Priv`,'$.plugin'),'') AS `plugin`,ifnull(json_value(`global_priv`.`Priv`,'$.authentication_string'),'') AS `authentication_string`,'N' AS `password_expired`,elt(ifnull(json_value(`global_priv`.`Priv`,'$.is_role'),0) + 1,'N','Y') AS `is_role`,ifnull(json_value(`global_priv`.`Priv`,'$.default_role'),'') AS `default_role`,cast(ifnull(json_value(`global_priv`.`Priv`,'$.max_statement_time'),0.0) as decimal(12,6)) AS `max_statement_time` from `global_priv` latin1 latin1_swedish_ci
show create table func;
Table Create Table
func CREATE TABLE `func` (
@@ -145,7 +93,15 @@ func CREATE TABLE `func` (
`dl` char(128) COLLATE utf8_bin NOT NULL DEFAULT '',
`type` enum('function','aggregate') CHARACTER SET utf8 NOT NULL,
PRIMARY KEY (`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='User defined functions'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='User defined functions'
+show create table global_priv;
+Table Create Table
+global_priv CREATE TABLE `global_priv` (
+ `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '',
+ `Priv` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT '{}' CHECK (json_valid(`Priv`)),
+ PRIMARY KEY (`Host`,`User`)
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Users and global privileges'
show create table tables_priv;
Table Create Table
tables_priv CREATE TABLE `tables_priv` (
@@ -159,7 +115,7 @@ tables_priv CREATE TABLE `tables_priv` (
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Table privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Table privileges'
show create table columns_priv;
Table Create Table
columns_priv CREATE TABLE `columns_priv` (
@@ -171,7 +127,7 @@ columns_priv CREATE TABLE `columns_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`Column_priv` set('Select','Insert','Update','References') CHARACTER SET utf8 NOT NULL DEFAULT '',
PRIMARY KEY (`Host`,`Db`,`User`,`Table_name`,`Column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Column privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Column privileges'
show create table procs_priv;
Table Create Table
procs_priv CREATE TABLE `procs_priv` (
@@ -185,7 +141,7 @@ procs_priv CREATE TABLE `procs_priv` (
`Timestamp` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
PRIMARY KEY (`Host`,`Db`,`User`,`Routine_name`,`Routine_type`),
KEY `Grantor` (`Grantor`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Procedure privileges'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Procedure privileges'
show create table servers;
Table Create Table
servers CREATE TABLE `servers` (
@@ -199,7 +155,7 @@ servers CREATE TABLE `servers` (
`Wrapper` char(64) NOT NULL DEFAULT '',
`Owner` char(64) NOT NULL DEFAULT '',
PRIMARY KEY (`Server_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='MySQL Foreign Servers table'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='MySQL Foreign Servers table'
show create table proc;
Table Create Table
proc CREATE TABLE `proc` (
@@ -217,7 +173,7 @@ proc CREATE TABLE `proc` (
`definer` char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`created` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
`modified` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` text CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
`character_set_client` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`collation_connection` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
@@ -225,7 +181,7 @@ proc CREATE TABLE `proc` (
`body_utf8` longblob DEFAULT NULL,
`aggregate` enum('NONE','GROUP') NOT NULL DEFAULT 'NONE',
PRIMARY KEY (`db`,`name`,`type`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Stored Procedures'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Stored Procedures'
show create table event;
Table Create Table
event CREATE TABLE `event` (
@@ -243,7 +199,7 @@ event CREATE TABLE `event` (
`ends` datetime DEFAULT NULL,
`status` enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL DEFAULT 'ENABLED',
`on_completion` enum('DROP','PRESERVE') NOT NULL DEFAULT 'DROP',
- `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NOT NULL DEFAULT '',
+ `sql_mode` set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NOT NULL DEFAULT '',
`comment` char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '',
`originator` int(10) unsigned NOT NULL,
`time_zone` char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM',
@@ -252,7 +208,7 @@ event CREATE TABLE `event` (
`db_collation` char(32) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
`body_utf8` longblob DEFAULT NULL,
PRIMARY KEY (`db`,`name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Events'
+) ENGINE=Aria DEFAULT CHARSET=utf8 PAGE_CHECKSUM=1 TRANSACTIONAL=1 COMMENT='Events'
show create table general_log;
Table Create Table
general_log CREATE TABLE `general_log` (
@@ -287,7 +243,7 @@ table_stats CREATE TABLE `table_stats` (
`table_name` varchar(64) COLLATE utf8_bin NOT NULL,
`cardinality` bigint(21) unsigned DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Tables'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Tables'
show create table column_stats;
Table Create Table
column_stats CREATE TABLE `column_stats` (
@@ -303,7 +259,7 @@ column_stats CREATE TABLE `column_stats` (
`hist_type` enum('SINGLE_PREC_HB','DOUBLE_PREC_HB') COLLATE utf8_bin DEFAULT NULL,
`histogram` varbinary(255) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`column_name`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Columns'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Columns'
show create table index_stats;
Table Create Table
index_stats CREATE TABLE `index_stats` (
@@ -313,6 +269,8 @@ index_stats CREATE TABLE `index_stats` (
`prefix_arity` int(11) unsigned NOT NULL,
`avg_frequency` decimal(12,4) DEFAULT NULL,
PRIMARY KEY (`db_name`,`table_name`,`index_name`,`prefix_arity`)
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Statistics on Indexes'
+) ENGINE=Aria DEFAULT CHARSET=utf8 COLLATE=utf8_bin PAGE_CHECKSUM=1 TRANSACTIONAL=0 COMMENT='Statistics on Indexes'
+DROP VIEW user;
+DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv;
show tables;
Tables_in_test
diff --git a/mysql-test/main/system_mysql_db_fix50117.test b/mysql-test/main/system_mysql_db_fix50117.test
index f8bef3da162..9755415c6e4 100644
--- a/mysql-test/main/system_mysql_db_fix50117.test
+++ b/mysql-test/main/system_mysql_db_fix50117.test
@@ -19,86 +19,41 @@ if (!$MYSQL_FIX_PRIVILEGE_TABLES)
# mysql_fix_system_tables which should be ignored.
# Instead, concentrate on the errors in r/system_mysql_db.reject
--- disable_result_log
--- disable_query_log
-
use test;
# create system tables as in mysql-5.1.17
-
-
-CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
-
-
-CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
-
-
-CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
-
-
-CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
-
-
+CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
+CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) unsigned DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
CREATE TABLE IF NOT EXISTS plugin ( name char(64) binary DEFAULT '' NOT NULL, dl char(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='MySQL plugins';
-
-
CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table';
+CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url char(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url char(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE IF NOT EXISTS proc ( db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA' ) DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns char(64) DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'NOT_USED', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL, comment char(64) collate utf8_bin DEFAULT '' NOT NULL, PRIMARY KEY (db,name,type) ) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) binary DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
+CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
-
-CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
-
-CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
-
-
-CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url char(128) not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
-
-
-CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url char(128) not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
-
-
-CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
-
-
-CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
-
-
-CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
-
-
-CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
-
-
-CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
-
-
-CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
-
-
-CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
-
-
-CREATE TABLE IF NOT EXISTS proc ( db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum('CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA' ) DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns char(64) DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'NOT_USED', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE' ) DEFAULT '' NOT NULL, comment char(64) collate utf8_bin DEFAULT '' NOT NULL, PRIMARY KEY (db,name,type) ) engine=MyISAM character set utf8 comment='Stored Procedures';
-
-
-CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) binary DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
-
-CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
-
+-- disable_result_log
# Run the mysql_fix_privilege_tables.sql using "mysql --force"
--exec $MYSQL --force test < $MYSQL_FIX_PRIVILEGE_TABLES
-
--- enable_query_log
-- enable_result_log
# Dump the tables that should be compared
-- source include/system_db_struct.inc
--- disable_query_log
-
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos;
-
--- enable_query_log
+DROP VIEW user;
+DROP TABLE db, host, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, transaction_registry, table_stats, column_stats, index_stats, roles_mapping, gtid_slave_pos, global_priv;
# check that we dropped all system tables
show tables;
diff --git a/mysql-test/main/system_mysql_db_refs.result b/mysql-test/main/system_mysql_db_refs.result
index 96ecd2f27c0..870a550bb95 100644
--- a/mysql-test/main/system_mysql_db_refs.result
+++ b/mysql-test/main/system_mysql_db_refs.result
@@ -2,9 +2,6 @@ set @name="This is a very long string, that mustn't find room in a system field
create table test_db select * from mysql.db;
delete from test_db;
insert ignore into test_db (Host,Db,User) values (@name,@name,@name);
-create table test_host select * from mysql.host;
-delete from test_host;
-insert ignore into test_host (Host,Db) values (@name,@name);
create table test_user select * from mysql.user;
delete from test_user;
insert ignore into test_user (Host,User) values (@name,@name);
@@ -19,28 +16,24 @@ delete from test_columns_priv;
insert ignore into test_columns_priv (Host,Db,User,Table_name,Column_name) values (@name,@name,@name,@name,@name);
select
if(isnull(test_db.Host),'WRONG!!!','ok') as test_db_Host,
-if(isnull(test_host.Host),'WRONG!!!','ok') as test_host_Host,
if(isnull(test_user.Host),'WRONG!!!','ok') as test_user_Host,
if(isnull(test_tables_priv.Host),'WRONG!!!','ok') as test_tables_priv_Host,
if(isnull(test_columns_priv.Host),'WRONG!!!','ok') as test_columns_priv_Host
from test_db
-left join test_host on test_db.Host=test_host.Host
left join test_user on test_db.Host=test_user.Host
left join test_tables_priv on test_db.Host=test_tables_priv.Host
left join test_columns_priv on test_db.Host=test_columns_priv.Host;
-test_db_Host test_host_Host test_user_Host test_tables_priv_Host test_columns_priv_Host
-ok ok ok ok ok
+test_db_Host test_user_Host test_tables_priv_Host test_columns_priv_Host
+ok ok ok ok
select
if(isnull(test_db.Db),'WRONG!!!','ok') as test_db_Db,
-if(isnull(test_host.Db),'WRONG!!!','ok') as test_host_Db,
if(isnull(test_tables_priv.Db),'WRONG!!!','ok') as test_tables_priv_Db,
if(isnull(test_columns_priv.Db),'WRONG!!!','ok') as est_columns_priv_Db
from test_db
-left join test_host on test_db.Db=test_host.Db
left join test_tables_priv on test_db.Db=test_tables_priv.Db
left join test_columns_priv on test_db.Db=test_columns_priv.Db;
-test_db_Db test_host_Db test_tables_priv_Db est_columns_priv_Db
-ok ok ok ok
+test_db_Db test_tables_priv_Db est_columns_priv_Db
+ok ok ok
select
if(isnull(test_db.User),'WRONG!!!','ok') as test_db_User,
if(isnull(test_user.User),'WRONG!!!','ok') as test_user_User,
@@ -62,6 +55,5 @@ ok ok
drop table test_columns_priv;
drop table test_tables_priv;
drop table test_func;
-drop table test_host;
drop table test_user;
drop table test_db;
diff --git a/mysql-test/main/system_mysql_db_refs.test b/mysql-test/main/system_mysql_db_refs.test
index 63f30e7db63..084d5bbf868 100644
--- a/mysql-test/main/system_mysql_db_refs.test
+++ b/mysql-test/main/system_mysql_db_refs.test
@@ -14,12 +14,6 @@ delete from test_db;
insert ignore into test_db (Host,Db,User) values (@name,@name,@name);
--enable_warnings
-create table test_host select * from mysql.host;
-delete from test_host;
---disable_warnings
-insert ignore into test_host (Host,Db) values (@name,@name);
---enable_warnings
-
create table test_user select * from mysql.user;
delete from test_user;
--disable_warnings
@@ -48,13 +42,11 @@ insert ignore into test_columns_priv (Host,Db,User,Table_name,Column_name) value
select
if(isnull(test_db.Host),'WRONG!!!','ok') as test_db_Host,
- if(isnull(test_host.Host),'WRONG!!!','ok') as test_host_Host,
if(isnull(test_user.Host),'WRONG!!!','ok') as test_user_Host,
if(isnull(test_tables_priv.Host),'WRONG!!!','ok') as test_tables_priv_Host,
if(isnull(test_columns_priv.Host),'WRONG!!!','ok') as test_columns_priv_Host
from test_db
-left join test_host on test_db.Host=test_host.Host
left join test_user on test_db.Host=test_user.Host
left join test_tables_priv on test_db.Host=test_tables_priv.Host
left join test_columns_priv on test_db.Host=test_columns_priv.Host;
@@ -63,12 +55,10 @@ left join test_columns_priv on test_db.Host=test_columns_priv.Host;
select
if(isnull(test_db.Db),'WRONG!!!','ok') as test_db_Db,
- if(isnull(test_host.Db),'WRONG!!!','ok') as test_host_Db,
if(isnull(test_tables_priv.Db),'WRONG!!!','ok') as test_tables_priv_Db,
if(isnull(test_columns_priv.Db),'WRONG!!!','ok') as est_columns_priv_Db
from test_db
-left join test_host on test_db.Db=test_host.Db
left join test_tables_priv on test_db.Db=test_tables_priv.Db
left join test_columns_priv on test_db.Db=test_columns_priv.Db;
@@ -96,7 +86,6 @@ left join test_columns_priv on test_tables_priv.Table_name=test_columns_priv.Tab
drop table test_columns_priv;
drop table test_tables_priv;
drop table test_func;
-drop table test_host;
drop table test_user;
drop table test_db;
diff --git a/mysql-test/main/table_elim.result b/mysql-test/main/table_elim.result
index cf9a4a38779..2bfbbfb433f 100644
--- a/mysql-test/main/table_elim.result
+++ b/mysql-test/main/table_elim.result
@@ -138,7 +138,7 @@ Note 1003 /* select#1 */ select `f`.`id` AS `id` from `test`.`t0` `f` join `test
This should use facts, a2 and its subquery:
explain extended select id from v1 where attr2 between 12 and 14;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY a2 range PRIMARY,attr2 attr2 5 NULL 5 100.00 Using index condition; Using where
+1 PRIMARY a2 range PRIMARY,attr2 attr2 5 NULL 4 100.00 Using index condition; Using where
1 PRIMARY f eq_ref PRIMARY PRIMARY 4 test.a2.id 1 100.00 Using index
3 DEPENDENT SUBQUERY t2 ref PRIMARY PRIMARY 4 test.a2.id 2 100.00 Using index
Warnings:
@@ -166,7 +166,7 @@ Note 1003 /* select#1 */ select `f`.`id` AS `id` from `test`.`t0` `f` join `test
This should use facts, a2 and its subquery:
explain extended select id from v2 where attr2 between 12 and 14;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 PRIMARY a2 range PRIMARY,attr2 attr2 5 NULL 5 100.00 Using index condition
+1 PRIMARY a2 range PRIMARY,attr2 attr2 5 NULL 4 100.00 Using index condition
1 PRIMARY f eq_ref PRIMARY PRIMARY 4 test.a2.id 1 100.00 Using where; Using index
3 DEPENDENT SUBQUERY t2 ref PRIMARY PRIMARY 4 test.f.id 2 100.00 Using index
Warnings:
diff --git a/mysql-test/main/tc_heuristic_recover.result b/mysql-test/main/tc_heuristic_recover.result
index 193a73c77f8..32c3a9779ad 100644
--- a/mysql-test/main/tc_heuristic_recover.result
+++ b/mysql-test/main/tc_heuristic_recover.result
@@ -27,6 +27,7 @@ FOUND 3 /was in the XA prepared state/ in mysqld.1.err
FOUND 1 /Found 1 prepared transactions!/ in mysqld.1.err
FOUND 2 /\[ERROR\] Can\'t init tc log/ in mysqld.1.err
FOUND 2 /Please restart mysqld without --tc-heuristic-recover/ in mysqld.1.err
+# restart
FOUND 3 /was in the XA prepared state/ in mysqld.1.err
FOUND 1 /Found 1 prepared transactions!/ in mysqld.1.err
SET TRANSACTION ISOLATION LEVEL READ COMMITTED;
diff --git a/mysql-test/main/temp_table_frm.result b/mysql-test/main/temp_table_frm.result
index 19c66380af2..a9c59ff5969 100644
--- a/mysql-test/main/temp_table_frm.result
+++ b/mysql-test/main/temp_table_frm.result
@@ -1,3 +1,7 @@
+set @save_use_stat_tables= @@use_stat_tables;
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+set @@optimizer_use_condition_selectivity=1;
+set @@use_stat_tables= NEVER;
set @@session.max_heap_table_size=16*1024*1024;
create table t1 select * from information_schema.session_status where variable_name like 'Opened%';
create temporary table t2 (a int) engine=memory;
@@ -18,4 +22,6 @@ OPENED_PLUGIN_LIBRARIES 0
OPENED_TABLE_DEFINITIONS 2
OPENED_TABLES 1
OPENED_VIEWS 0
+set @@use_stat_tables= @save_use_stat_tables;
+set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
drop table t1;
diff --git a/mysql-test/main/temp_table_frm.test b/mysql-test/main/temp_table_frm.test
index 178bd15004b..ddc4ffa575c 100644
--- a/mysql-test/main/temp_table_frm.test
+++ b/mysql-test/main/temp_table_frm.test
@@ -3,6 +3,11 @@
#
# Ensure we don't overflow the internal heap table size in the join
+set @save_use_stat_tables= @@use_stat_tables;
+set @save_optimizer_use_condition_selectivity=@@optimizer_use_condition_selectivity;
+
+set @@optimizer_use_condition_selectivity=1;
+set @@use_stat_tables= NEVER;
set @@session.max_heap_table_size=16*1024*1024;
create table t1 select * from information_schema.session_status where variable_name like 'Opened%';
create temporary table t2 (a int) engine=memory;
@@ -13,4 +18,6 @@ let $tmpdir= `select @@tmpdir`;
truncate table t2;
select variable_name, session_status.variable_value - t1.variable_value
from information_schema.session_status join t1 using (variable_name);
-drop table t1;
+set @@use_stat_tables= @save_use_stat_tables;
+set @@optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
+drop table t1; \ No newline at end of file
diff --git a/mysql-test/main/temporal_literal.result b/mysql-test/main/temporal_literal.result
index d2417d7f9eb..2197b990f3b 100644
--- a/mysql-test/main/temporal_literal.result
+++ b/mysql-test/main/temporal_literal.result
@@ -294,17 +294,17 @@ SELECT TIMESTAMP'2001-00-00 00:00:00.9999999';
TIMESTAMP'2001-00-00 00:00:00.9999999'
2001-00-00 00:00:00.999999
Warnings:
-Note 1292 Truncated incorrect datetime value: '2001-00-00 00:00:00.9999999'
+Note 1292 Truncated incorrect DATETIME value: '2001-00-00 00:00:00.9999999'
SELECT TIMESTAMP'2001-00-01 00:00:00.9999999';
TIMESTAMP'2001-00-01 00:00:00.9999999'
2001-00-01 00:00:00.999999
Warnings:
-Note 1292 Truncated incorrect datetime value: '2001-00-01 00:00:00.9999999'
+Note 1292 Truncated incorrect DATETIME value: '2001-00-01 00:00:00.9999999'
SELECT TIMESTAMP'2001-01-00 00:00:00.9999999';
TIMESTAMP'2001-01-00 00:00:00.9999999'
2001-01-00 00:00:00.999999
Warnings:
-Note 1292 Truncated incorrect datetime value: '2001-01-00 00:00:00.9999999'
+Note 1292 Truncated incorrect DATETIME value: '2001-01-00 00:00:00.9999999'
#
# String literal with bad dates and nanoseconds to DATETIME(N)
#
@@ -416,7 +416,7 @@ SELECT TIME'10:10:10.1234567';
TIME'10:10:10.1234567'
10:10:10.123456
Warnings:
-Note 1292 Truncated incorrect time value: '10:10:10.1234567'
+Note 1292 Truncated incorrect TIME value: '10:10:10.1234567'
SELECT TIME('10:10:10.1234567');
TIME('10:10:10.1234567')
10:10:10.123456
@@ -440,7 +440,7 @@ SELECT TIMESTAMP'2001-01-01 10:10:10.1234567';
TIMESTAMP'2001-01-01 10:10:10.1234567'
2001-01-01 10:10:10.123456
Warnings:
-Note 1292 Truncated incorrect datetime value: '2001-01-01 10:10:10.1234567'
+Note 1292 Truncated incorrect DATETIME value: '2001-01-01 10:10:10.1234567'
SELECT TIMESTAMP('2001-01-01 10:10:10.1234567');
TIMESTAMP('2001-01-01 10:10:10.1234567')
2001-01-01 10:10:10.123456
@@ -463,7 +463,7 @@ Warning 1292 Truncated incorrect datetime value: '2001-01-01 10:10:10.1234567xyz
CREATE TABLE t1 (a TIME(6));
INSERT INTO t1 VALUES (TIME'10:20:30.1234567');
Warnings:
-Note 1292 Truncated incorrect time value: '10:20:30.1234567'
+Note 1292 Truncated incorrect TIME value: '10:20:30.1234567'
INSERT INTO t1 VALUES (TIME('10:20:30.1234567'));
Warnings:
Note 1292 Truncated incorrect time value: '10:20:30.1234567'
diff --git a/mysql-test/main/timezone2.result b/mysql-test/main/timezone2.result
index 096e996bffb..dd137045d1a 100644
--- a/mysql-test/main/timezone2.result
+++ b/mysql-test/main/timezone2.result
@@ -332,3 +332,228 @@ NULL
#
# End of 5.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-17203 Move fractional second truncation from Item_xxx_typecast::get_date() to Time and Datetime constructors
+# (an addition for the test for MDEV-4653)
+SET timestamp=unix_timestamp('2001-02-03 10:20:30');
+SET old_mode=ZERO_DATE_TIME_CAST;
+SELECT CONVERT_TZ(TIME('00:00:00'),'+00:00','+7:5');
+CONVERT_TZ(TIME('00:00:00'),'+00:00','+7:5')
+NULL
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '00:00:00'
+SELECT CONVERT_TZ(TIME('2010-01-01 00:00:00'),'+00:00','+7:5');
+CONVERT_TZ(TIME('2010-01-01 00:00:00'),'+00:00','+7:5')
+NULL
+Warnings:
+Warning 1292 Truncated incorrect datetime value: '00:00:00'
+SET old_mode=DEFAULT;
+SET timestamp=DEFAULT;
+#
+# MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+#
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Moscow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526+3599) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT a, UNIX_TIMESTAMP(a) FROM t1;
+a UNIX_TIMESTAMP(a)
+2010-10-31 02:25:26 1288477526
+2010-10-31 02:25:25 1288481125
+SELECT UNIX_TIMESTAMP(MAX(a)) AS a FROM t1;
+a
+1288481125
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t2 SELECT MAX(a) AS a FROM t1;
+SELECT a, UNIX_TIMESTAMP(a) FROM t2;
+a UNIX_TIMESTAMP(a)
+2010-10-31 02:25:25 1288481125
+DROP TABLE t2;
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Moscow*/);
+INSERT INTO t2 VALUES (FROM_UNIXTIME(1288477526+3599) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT UNIX_TIMESTAMP(t1.a), UNIX_TIMESTAMP(t2.a) FROM t1,t2;
+UNIX_TIMESTAMP(t1.a) UNIX_TIMESTAMP(t2.a)
+1288477526 1288481125
+SELECT * FROM t1,t2 WHERE t1.a < t2.a;
+a a
+2010-10-31 02:25:26 2010-10-31 02:25:25
+DROP TABLE t1,t2;
+BEGIN NOT ATOMIC
+DECLARE a,b TIMESTAMP;
+SET time_zone='+00:00';
+SET a=FROM_UNIXTIME(1288477526);
+SET b=FROM_UNIXTIME(1288481125);
+SELECT a < b;
+SET time_zone='Europe/Moscow';
+SELECT a < b;
+END;
+$$
+a < b
+1
+a < b
+1
+CREATE OR REPLACE FUNCTION f1(uts INT) RETURNS TIMESTAMP
+BEGIN
+DECLARE ts TIMESTAMP;
+DECLARE tz VARCHAR(64) DEFAULT @@time_zone;
+SET time_zone='+00:00';
+SET ts=FROM_UNIXTIME(uts);
+SET time_zone=tz;
+RETURN ts;
+END;
+$$
+SET time_zone='+00:00';
+SELECT f1(1288477526) < f1(1288481125);
+f1(1288477526) < f1(1288481125)
+1
+SET time_zone='Europe/Moscow';
+SELECT f1(1288477526) < f1(1288481125);
+f1(1288477526) < f1(1288481125)
+1
+DROP FUNCTION f1;
+CREATE TABLE t1 (a TIMESTAMP,b TIMESTAMP);
+SET time_zone='+00:00';
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Mowcow*/,
+FROM_UNIXTIME(1288481125) /*winter time in Moscow*/);
+SELECT *, LEAST(a,b) FROM t1;
+a b LEAST(a,b)
+2010-10-30 22:25:26 2010-10-30 23:25:25 2010-10-30 22:25:26
+SET time_zone='Europe/Moscow';
+SELECT *, LEAST(a,b) FROM t1;
+a b LEAST(a,b)
+2010-10-31 02:25:26 2010-10-31 02:25:25 2010-10-31 02:25:26
+SELECT UNIX_TIMESTAMP(a), UNIX_TIMESTAMP(b), UNIX_TIMESTAMP(LEAST(a,b)) FROM t1;
+UNIX_TIMESTAMP(a) UNIX_TIMESTAMP(b) UNIX_TIMESTAMP(LEAST(a,b))
+1288477526 1288481125 1288477526
+DROP TABLE t1;
+CREATE TABLE t1 (a TIMESTAMP,b TIMESTAMP,c TIMESTAMP);
+SET time_zone='+00:00';
+INSERT INTO t1 VALUES (
+FROM_UNIXTIME(1288477526) /*summer time in Moscow*/,
+FROM_UNIXTIME(1288481125) /*winter time in Moscow*/,
+FROM_UNIXTIME(1288481126) /*winter time in Moscow*/);
+SELECT b BETWEEN a AND c FROM t1;
+b BETWEEN a AND c
+1
+SET time_zone='Europe/Moscow';
+SELECT b BETWEEN a AND c FROM t1;
+b BETWEEN a AND c
+1
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Mowcow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288481125) /*winter time in Moscow*/);
+SELECT a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+a UNIX_TIMESTAMP(a)
+2010-10-30 22:25:26 1288477526
+2010-10-30 23:25:25 1288481125
+SELECT COALESCE(a) AS a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+a UNIX_TIMESTAMP(a)
+2010-10-30 22:25:26 1288477526
+2010-10-30 23:25:25 1288481125
+SET time_zone='Europe/Moscow';
+SELECT a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+a UNIX_TIMESTAMP(a)
+2010-10-31 02:25:26 1288477526
+2010-10-31 02:25:25 1288481125
+SELECT COALESCE(a) AS a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+a UNIX_TIMESTAMP(a)
+2010-10-31 02:25:26 1288477526
+2010-10-31 02:25:25 1288481125
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Mowcow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288481126) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT a, UNIX_TIMESTAMP(a) FROM t1 GROUP BY a;
+a UNIX_TIMESTAMP(a)
+2010-10-31 02:25:26 1288477526
+2010-10-31 02:25:26 1288481126
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126));
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),CASE a WHEN b THEN 'eq' ELSE 'ne' END AS x FROM t1;
+UNIX_TIMESTAMP(a) UNIX_TIMESTAMP(b) x
+1288477526 1288481126 ne
+SET time_zone='Europe/Moscow';
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),CASE a WHEN b THEN 'eq' ELSE 'ne' END AS x FROM t1;
+UNIX_TIMESTAMP(a) UNIX_TIMESTAMP(b) x
+1288477526 1288481126 ne
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP,c TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126),FROM_UNIXTIME(1288481127));
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),a IN (b,c) AS x FROM t1;
+UNIX_TIMESTAMP(a) UNIX_TIMESTAMP(b) x
+1288477526 1288481126 0
+SET time_zone='Europe/Moscow';
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),a IN (b,c) AS x FROM t1;
+UNIX_TIMESTAMP(a) UNIX_TIMESTAMP(b) x
+1288477526 1288481126 0
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126));
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+a b
+SET time_zone='Europe/Moscow';
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+a b
+DROP TABLE t1;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1100000000),FROM_UNIXTIME(1200000000));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1100000001),FROM_UNIXTIME(1200000001));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1300000000),FROM_UNIXTIME(1400000000));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1300000001),FROM_UNIXTIME(1400000001));
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+a b
+SET time_zone='Europe/Moscow';
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+a b
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+a b
+DROP TABLE t1;
+#
+# MDEV-17979 Assertion `0' failed in Item::val_native upon SELECT with timestamp, NULLIF, GROUP BY
+#
+SET time_zone='+00:00';
+CREATE TABLE t1 (a INT, ts TIMESTAMP) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1, FROM_UNIXTIME(1288481126) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+CREATE TABLE t2 AS SELECT ts, COALESCE(ts) AS cts FROM t1 GROUP BY cts;
+SELECT ts, cts, UNIX_TIMESTAMP(ts) AS uts, UNIX_TIMESTAMP(cts) AS ucts FROM t2;
+ts cts uts ucts
+2010-10-31 02:25:26 2010-10-31 02:25:26 1288481126 1288481126
+DROP TABLE t1,t2;
+SET time_zone=DEFAULT;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/timezone2.test b/mysql-test/main/timezone2.test
index 7a38610ad95..db515653651 100644
--- a/mysql-test/main/timezone2.test
+++ b/mysql-test/main/timezone2.test
@@ -308,3 +308,201 @@ SELECT CONVERT_TZ('2001-10-08 00:00:00', MAKE_SET(0,'+01:00'), '+00:00' );
--echo #
--echo # End of 5.3 tests
--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-17203 Move fractional second truncation from Item_xxx_typecast::get_date() to Time and Datetime constructors
+--echo # (an addition for the test for MDEV-4653)
+
+SET timestamp=unix_timestamp('2001-02-03 10:20:30');
+SET old_mode=ZERO_DATE_TIME_CAST;
+SELECT CONVERT_TZ(TIME('00:00:00'),'+00:00','+7:5');
+SELECT CONVERT_TZ(TIME('2010-01-01 00:00:00'),'+00:00','+7:5');
+SET old_mode=DEFAULT;
+SET timestamp=DEFAULT;
+
+--echo #
+--echo # MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+--echo #
+
+# MAX()
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Moscow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526+3599) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT a, UNIX_TIMESTAMP(a) FROM t1;
+SELECT UNIX_TIMESTAMP(MAX(a)) AS a FROM t1;
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t2 SELECT MAX(a) AS a FROM t1;
+SELECT a, UNIX_TIMESTAMP(a) FROM t2;
+DROP TABLE t2;
+DROP TABLE t1;
+
+
+# Comparison
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Moscow*/);
+INSERT INTO t2 VALUES (FROM_UNIXTIME(1288477526+3599) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT UNIX_TIMESTAMP(t1.a), UNIX_TIMESTAMP(t2.a) FROM t1,t2;
+SELECT * FROM t1,t2 WHERE t1.a < t2.a;
+DROP TABLE t1,t2;
+
+
+# SP variable comparison
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ DECLARE a,b TIMESTAMP;
+ SET time_zone='+00:00';
+ SET a=FROM_UNIXTIME(1288477526);
+ SET b=FROM_UNIXTIME(1288481125);
+ SELECT a < b;
+ SET time_zone='Europe/Moscow';
+ SELECT a < b;
+END;
+$$
+DELIMITER ;$$
+
+
+# SP function comparison
+DELIMITER $$;
+CREATE OR REPLACE FUNCTION f1(uts INT) RETURNS TIMESTAMP
+BEGIN
+ DECLARE ts TIMESTAMP;
+ DECLARE tz VARCHAR(64) DEFAULT @@time_zone;
+ SET time_zone='+00:00';
+ SET ts=FROM_UNIXTIME(uts);
+ SET time_zone=tz;
+ RETURN ts;
+END;
+$$
+DELIMITER ;$$
+SET time_zone='+00:00';
+SELECT f1(1288477526) < f1(1288481125);
+SET time_zone='Europe/Moscow';
+SELECT f1(1288477526) < f1(1288481125);
+DROP FUNCTION f1;
+
+
+# LEAST()
+CREATE TABLE t1 (a TIMESTAMP,b TIMESTAMP);
+SET time_zone='+00:00';
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Mowcow*/,
+ FROM_UNIXTIME(1288481125) /*winter time in Moscow*/);
+SELECT *, LEAST(a,b) FROM t1;
+SET time_zone='Europe/Moscow';
+SELECT *, LEAST(a,b) FROM t1;
+SELECT UNIX_TIMESTAMP(a), UNIX_TIMESTAMP(b), UNIX_TIMESTAMP(LEAST(a,b)) FROM t1;
+DROP TABLE t1;
+
+
+# BETWEEN
+CREATE TABLE t1 (a TIMESTAMP,b TIMESTAMP,c TIMESTAMP);
+SET time_zone='+00:00';
+INSERT INTO t1 VALUES (
+ FROM_UNIXTIME(1288477526) /*summer time in Moscow*/,
+ FROM_UNIXTIME(1288481125) /*winter time in Moscow*/,
+ FROM_UNIXTIME(1288481126) /*winter time in Moscow*/);
+SELECT b BETWEEN a AND c FROM t1;
+SET time_zone='Europe/Moscow';
+SELECT b BETWEEN a AND c FROM t1;
+DROP TABLE t1;
+
+
+# ORDER BY
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Mowcow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288481125) /*winter time in Moscow*/);
+SELECT a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+SELECT COALESCE(a) AS a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+SET time_zone='Europe/Moscow';
+SELECT a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+SELECT COALESCE(a) AS a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
+DROP TABLE t1;
+
+
+# GROUP BY
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526) /*summer time in Mowcow*/);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288481126) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+SELECT a, UNIX_TIMESTAMP(a) FROM t1 GROUP BY a;
+DROP TABLE t1;
+
+
+# CASE
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126));
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),CASE a WHEN b THEN 'eq' ELSE 'ne' END AS x FROM t1;
+SET time_zone='Europe/Moscow';
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),CASE a WHEN b THEN 'eq' ELSE 'ne' END AS x FROM t1;
+DROP TABLE t1;
+
+
+# IN
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP,c TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126),FROM_UNIXTIME(1288481127));
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),a IN (b,c) AS x FROM t1;
+SET time_zone='Europe/Moscow';
+SELECT UNIX_TIMESTAMP(a),UNIX_TIMESTAMP(b),a IN (b,c) AS x FROM t1;
+DROP TABLE t1;
+
+# Comparison and IN in combination with a subquery (with one row)
+
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126));
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+
+SET time_zone='Europe/Moscow';
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+DROP TABLE t1;
+
+# Comparison and IN in combinarion with a subquery (with multiple rows)
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1100000000),FROM_UNIXTIME(1200000000));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1100000001),FROM_UNIXTIME(1200000001));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1288477526),FROM_UNIXTIME(1288481126));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1300000000),FROM_UNIXTIME(1400000000));
+INSERT INTO t1 VALUES (FROM_UNIXTIME(1300000001),FROM_UNIXTIME(1400000001));
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+
+SET time_zone='Europe/Moscow';
+SELECT * FROM t1 WHERE a = (SELECT MAX(b) FROM t1);
+SELECT * FROM t1 WHERE a = (SELECT MIN(b) FROM t1);
+SELECT * FROM t1 WHERE a IN ((SELECT MAX(b) FROM t1), (SELECT MIN(b) FROM t1));
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17979 Assertion `0' failed in Item::val_native upon SELECT with timestamp, NULLIF, GROUP BY
+--echo #
+
+SET time_zone='+00:00';
+CREATE TABLE t1 (a INT, ts TIMESTAMP) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1, FROM_UNIXTIME(1288481126) /*winter time in Moscow*/);
+SET time_zone='Europe/Moscow';
+CREATE TABLE t2 AS SELECT ts, COALESCE(ts) AS cts FROM t1 GROUP BY cts;
+SELECT ts, cts, UNIX_TIMESTAMP(ts) AS uts, UNIX_TIMESTAMP(cts) AS ucts FROM t2;
+DROP TABLE t1,t2;
+SET time_zone=DEFAULT;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/tmp_table_count-7586.result b/mysql-test/main/tmp_table_count-7586.result
index 0c526e0d4a3..637e7385685 100644
--- a/mysql-test/main/tmp_table_count-7586.result
+++ b/mysql-test/main/tmp_table_count-7586.result
@@ -38,7 +38,7 @@ EXPLAIN SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a HAVING a > 1);
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1
-2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using temporary
+2 MATERIALIZED t2 ALL NULL NULL NULL NULL 2 Using where; Using temporary
truncate table performance_schema.events_statements_history_long;
flush status;
CREATE TABLE t3 SELECT * FROM t1 WHERE a IN (SELECT * FROM t2 GROUP BY a HAVING a > 1);
diff --git a/mysql-test/main/trigger.result b/mysql-test/main/trigger.result
index b6d8be5d364..ab9dbc63888 100644
--- a/mysql-test/main/trigger.result
+++ b/mysql-test/main/trigger.result
@@ -736,6 +736,8 @@ select user() into user;
set NEW.username = user;
select count(*) from ((select 1) union (select 2)) as d1 into i;
end|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
update t1 set data = 1;
connection addconroot1;
update t1 set data = 2;
@@ -2084,6 +2086,8 @@ FOR EACH ROW BEGIN
SELECT 1 FROM t1 c WHERE
(@bug51650 IS NULL OR @bug51650 != c.b) AND c.b = NEW.a LIMIT 1 INTO @foo;
END//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SET @bug51650 = 1;
INSERT IGNORE INTO t2 VALUES();
INSERT IGNORE INTO t1 SET b = '777';
@@ -2142,7 +2146,7 @@ insert into t2 values (1),(2);
delete from t1 where i=1;
show status like 'Opened_tables';
Variable_name Value
-Opened_tables 3
+Opened_tables 6
select * from t1;
i
2
diff --git a/mysql-test/main/trigger_notembedded.test b/mysql-test/main/trigger_notembedded.test
index a31594826e7..051cd43f16f 100644
--- a/mysql-test/main/trigger_notembedded.test
+++ b/mysql-test/main/trigger_notembedded.test
@@ -907,7 +907,7 @@ connection flush;
connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for global read lock";
+ where state = "Waiting for backup lock";
--source include/wait_condition.inc
create trigger t1_bi before insert on t1 for each row begin end;
unlock tables;
diff --git a/mysql-test/main/truncate_coverage.result b/mysql-test/main/truncate_coverage.result
index 078de1ef3ab..9a343832b69 100644
--- a/mysql-test/main/truncate_coverage.result
+++ b/mysql-test/main/truncate_coverage.result
@@ -40,7 +40,7 @@ TRUNCATE TABLE m1;
connection con2;
SET DEBUG_SYNC= 'now WAIT_FOR opened';
# Sending:
-FLUSH TABLES;
+FLUSH TABLES m1;
connection default;
# Waiting for FLUSH TABLES to be blocked.
SET DEBUG_SYNC= 'now SIGNAL dropped';
diff --git a/mysql-test/main/truncate_coverage.test b/mysql-test/main/truncate_coverage.test
index 3351ce84232..1b793c6638c 100644
--- a/mysql-test/main/truncate_coverage.test
+++ b/mysql-test/main/truncate_coverage.test
@@ -81,12 +81,12 @@ SET DEBUG_SYNC= 'open_tables_after_open_and_process_table SIGNAL opened WAIT_FOR
connection con2;
SET DEBUG_SYNC= 'now WAIT_FOR opened';
--echo # Sending:
---send FLUSH TABLES
+--send FLUSH TABLES m1
connection default;
--echo # Waiting for FLUSH TABLES to be blocked.
let $wait_condition= SELECT COUNT(*)=1 FROM information_schema.processlist
- WHERE state= 'Waiting for table flush' AND info= 'FLUSH TABLES';
+ WHERE state= 'Waiting for table metadata lock' AND info= 'FLUSH TABLES m1';
--source include/wait_condition.inc
SET DEBUG_SYNC= 'now SIGNAL dropped';
diff --git a/mysql-test/main/type_bit.result b/mysql-test/main/type_bit.result
index eeedc501dc4..c2db7ee7178 100644
--- a/mysql-test/main/type_bit.result
+++ b/mysql-test/main/type_bit.result
@@ -256,7 +256,7 @@ a+0 b+0
127 403
explain select a+0, b+0 from t1 where a > 40 and a < 70 order by 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 2 NULL 8 Using where; Using index; Using filesort
+1 SIMPLE t1 range a a 2 NULL 9 Using where; Using index; Using filesort
select a+0, b+0 from t1 where a > 40 and a < 70 order by 2;
a+0 b+0
57 135
@@ -677,9 +677,13 @@ DROP TABLE t2;
CREATE TABLE t1(a BIT(13), KEY(a));
INSERT IGNORE INTO t1(a) VALUES
(65535),(65525),(65535),(65535),(65535),(65535),(65535),(65535),(65535),(65535);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
EXPLAIN SELECT 1 FROM t1 GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range NULL a 3 NULL 6 Using index for group-by
+1 SIMPLE t1 range NULL a 3 NULL 2 Using index for group-by
SELECT 1 FROM t1 GROUP BY a;
1
1
@@ -762,7 +766,7 @@ CREATE TABLE t1 (a BIT(7), b BIT(9), KEY(a, b));
INSERT INTO t1 VALUES(0, 0), (5, 3), (5, 6), (6, 4), (7, 0);
EXPLAIN SELECT a+0, b+0 FROM t1 WHERE a > 4 and b < 7 ORDER BY 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 2 NULL 4 Using where; Using index; Using filesort
+1 SIMPLE t1 index a a 5 NULL 5 Using where; Using index; Using filesort
DROP TABLE t1;
End of 5.0 tests
create table t1(a bit(7));
@@ -849,3 +853,21 @@ DROP TABLE IF EXISTS t1;
#
# End of 10.2 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-15759 Expect "Impossible WHERE" for indexed_int_column=out_of_range_int_constant
+#
+CREATE TABLE t1 (a BIT(7), KEY(a));
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+EXPLAIN SELECT * FROM t1 WHERE a=200;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN SELECT * FROM t1 WHERE a<=>200;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_bit.test b/mysql-test/main/type_bit.test
index 04db1511833..ee14b38ada0 100644
--- a/mysql-test/main/type_bit.test
+++ b/mysql-test/main/type_bit.test
@@ -326,6 +326,7 @@ CREATE TABLE t1(a BIT(13), KEY(a));
--disable_warnings
INSERT IGNORE INTO t1(a) VALUES
(65535),(65525),(65535),(65535),(65535),(65535),(65535),(65535),(65535),(65535);
+ANALYZE TABLE t1;
--enable_warnings
EXPLAIN SELECT 1 FROM t1 GROUP BY a;
@@ -483,3 +484,21 @@ DROP TABLE IF EXISTS t1;
--echo # End of 10.2 tests
--echo #
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-15759 Expect "Impossible WHERE" for indexed_int_column=out_of_range_int_constant
+--echo #
+
+CREATE TABLE t1 (a BIT(7), KEY(a));
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+EXPLAIN SELECT * FROM t1 WHERE a=200;
+EXPLAIN SELECT * FROM t1 WHERE a<=>200;
+DROP TABLE t1;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_blob.result b/mysql-test/main/type_blob.result
index 38021807b55..cfb47f7b850 100644
--- a/mysql-test/main/type_blob.result
+++ b/mysql-test/main/type_blob.result
@@ -369,8 +369,6 @@ HELLO MY 1
a 1
hello 1
drop table t1;
-create table t1 (a text, unique (a(2100)));
-ERROR 42000: Specified key was too long; max key length is 1000 bytes
create table t1 (a text, key (a(2100)));
Warnings:
Note 1071 Specified key was too long; max key length is 1000 bytes
@@ -618,7 +616,8 @@ id txt
3 Ford
drop table t1;
create table t1 (id integer primary key auto_increment, txt text, index txt_index (txt (20)));
-insert into t1 (txt) values ('Chevy'), ('Chevy '), (NULL);
+insert into t1 (txt) values
+('Chevy'), ('Chevy '), (NULL), ('Honda'), ('Subaru'), ('Honda');
select * from t1 where txt='Chevy' or txt is NULL;
id txt
1 Chevy
@@ -626,7 +625,7 @@ id txt
3 NULL
explain select * from t1 where txt='Chevy' or txt is NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref_or_null txt_index txt_index 23 const 2 Using where
+1 SIMPLE t1 ref_or_null txt_index txt_index 23 const 3 Using where
select * from t1 where txt='Chevy ';
id txt
1 Chevy
@@ -648,7 +647,7 @@ select * from t1 where txt='Chevy' or txt='Chevy ' or txt='Ford';
id txt
1 Chevy
2 Chevy
-4 Ford
+7 Ford
select * from t1 where txt='Chevy' or txt='Chevy ';
id txt
1 Chevy
@@ -688,12 +687,18 @@ id txt
2 Chevy
select * from t1 where txt > 'Chevy';
id txt
-4 Ford
+4 Honda
+5 Subaru
+6 Honda
+7 Ford
select * from t1 where txt >= 'Chevy';
id txt
1 Chevy
2 Chevy
-4 Ford
+4 Honda
+5 Subaru
+6 Honda
+7 Ford
alter table t1 modify column txt blob;
explain select * from t1 where txt='Chevy' or txt is NULL;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1088,3 +1093,34 @@ drop table t1;
#
# End of 10.2 test
#
+#
+# Start of 10.4 test
+#
+#
+# MDEV-19317 TEXT column accepts too long literals as a default value
+#
+EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a TINYTEXT DEFAULT ?)' USING REPEAT('a', 255);
+INSERT INTO t1 VALUES ();
+SELECT LENGTH(a), LENGTH(DEFAULT(a)) FROM t1;
+LENGTH(a) LENGTH(DEFAULT(a))
+255 255
+DROP TABLE t1;
+EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a TINYTEXT DEFAULT ?)' USING REPEAT('a', 256);
+ERROR 42000: Invalid default value for 'a'
+CREATE OR REPLACE TABLE t1 (a TINYTEXT DEFAULT 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
+ERROR 42000: Invalid default value for 'a'
+EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a TEXT DEFAULT ?)' USING REPEAT('a', 256);
+INSERT INTO t1 VALUES ();
+SELECT LENGTH(a), LENGTH(DEFAULT(a)) FROM t1;
+LENGTH(a) LENGTH(DEFAULT(a))
+256 256
+DROP TABLE t1;
+CREATE OR REPLACE TABLE t1 (a TEXT DEFAULT 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
+INSERT INTO t1 VALUES ();
+SELECT LENGTH(a), LENGTH(DEFAULT(a)) FROM t1;
+LENGTH(a) LENGTH(DEFAULT(a))
+256 256
+DROP TABLE t1;
+#
+# End of 10.4 test
+#
diff --git a/mysql-test/main/type_blob.test b/mysql-test/main/type_blob.test
index f97ddc755ce..df565b187b4 100644
--- a/mysql-test/main/type_blob.test
+++ b/mysql-test/main/type_blob.test
@@ -131,8 +131,6 @@ select c,count(*) from t1 group by c;
select d,count(*) from t1 group by d;
drop table t1;
--- error 1071
-create table t1 (a text, unique (a(2100))); # should give an error
create table t1 (a text, key (a(2100))); # key is auto-truncated
show create table t1;
drop table t1;
@@ -360,7 +358,8 @@ select * from t1 where txt >= 'Chevy';
drop table t1;
create table t1 (id integer primary key auto_increment, txt text, index txt_index (txt (20)));
-insert into t1 (txt) values ('Chevy'), ('Chevy '), (NULL);
+insert into t1 (txt) values
+ ('Chevy'), ('Chevy '), (NULL), ('Honda'), ('Subaru'), ('Honda');
select * from t1 where txt='Chevy' or txt is NULL;
explain select * from t1 where txt='Chevy' or txt is NULL;
select * from t1 where txt='Chevy ';
@@ -703,3 +702,39 @@ drop table t1;
--echo #
--echo # End of 10.2 test
--echo #
+
+
+--echo #
+--echo # Start of 10.4 test
+--echo #
+
+--echo #
+--echo # MDEV-19317 TEXT column accepts too long literals as a default value
+--echo #
+
+EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a TINYTEXT DEFAULT ?)' USING REPEAT('a', 255);
+INSERT INTO t1 VALUES ();
+SELECT LENGTH(a), LENGTH(DEFAULT(a)) FROM t1;
+DROP TABLE t1;
+
+--error ER_INVALID_DEFAULT
+EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a TINYTEXT DEFAULT ?)' USING REPEAT('a', 256);
+
+--error ER_INVALID_DEFAULT
+CREATE OR REPLACE TABLE t1 (a TINYTEXT DEFAULT 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
+
+
+EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a TEXT DEFAULT ?)' USING REPEAT('a', 256);
+INSERT INTO t1 VALUES ();
+SELECT LENGTH(a), LENGTH(DEFAULT(a)) FROM t1;
+DROP TABLE t1;
+
+CREATE OR REPLACE TABLE t1 (a TEXT DEFAULT 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa');
+INSERT INTO t1 VALUES ();
+SELECT LENGTH(a), LENGTH(DEFAULT(a)) FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # End of 10.4 test
+--echo #
diff --git a/mysql-test/main/type_date.result b/mysql-test/main/type_date.result
index 4b5a0ad63a0..c5c892b5e4d 100644
--- a/mysql-test/main/type_date.result
+++ b/mysql-test/main/type_date.result
@@ -211,7 +211,7 @@ a
SET SQL_MODE=TRADITIONAL;
EXPLAIN SELECT * FROM t1 WHERE a = '0000-00-00';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref i i 4 const 1 Using index
+1 SIMPLE t1 ref i i 4 const 2 Using index
SELECT * FROM t1 WHERE a = '0000-00-00';
a
0000-00-00
@@ -240,7 +240,7 @@ a
SET SQL_MODE=TRADITIONAL;
EXPLAIN SELECT * FROM t1 WHERE a = '1000-00-00';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref i i 4 const 1 Using index
+1 SIMPLE t1 ref i i 4 const 2 Using index
SELECT * FROM t1 WHERE a = '1000-00-00';
a
1000-00-00
@@ -621,7 +621,7 @@ SELECT * FROM t1 WHERE LENGTH(a)=11+RAND() AND a=' garbage ';
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
-Warning 1292 Incorrect datetime value: ' garbage '
+Warning 1292 Truncated incorrect datetime value: ' garbage '
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = DATE'0000-00-00' and <cache>(octet_length(DATE'0000-00-00')) = 11 + rand()
DROP TABLE t1;
CREATE TABLE t1 (a DATE);
@@ -872,12 +872,14 @@ SELECT group_concat(d1/(CASE 'b' WHEN 'j' THEN 'c' END))
FROM v1 GROUP BY greatest(pk, 0, d2);
group_concat(d1/(CASE 'b' WHEN 'j' THEN 'c' END))
NULL
+NULL
Warnings:
Warning 1292 Incorrect datetime value: '1' for column `test`.`t1`.`pk` at row 1
Warning 1292 Incorrect datetime value: '2' for column `test`.`t1`.`pk` at row 1
Warning 1292 Incorrect datetime value: '1' for column `test`.`t1`.`pk` at row 1
Warning 1292 Incorrect datetime value: '1' for column `test`.`t1`.`pk` at row 1
Warning 1292 Incorrect datetime value: '2' for column `test`.`t1`.`pk` at row 2
+Warning 1292 Incorrect datetime value: '2' for column `test`.`t1`.`pk` at row 2
CREATE TABLE t2 AS SELECT greatest(pk, 0, d2) AS c1 FROM t1 LIMIT 0;
SHOW CREATE TABLE t2;
Table Create Table
@@ -914,3 +916,114 @@ DROP TABLE t1;
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+#
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES ('2001-01-01'),('2001-01-02'),('2001-01-03');
+# Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE'2001:01:01',a)<=>COALESCE(DATE'2001-01-01',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING DATE'2001-01-01',DATE'2001-01-01';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE''2001-01-01'',a)<=>COALESCE(?,a)' USING DATE'2001-01-01';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(DATE''2001-01-01'',a)' USING DATE'2001-01-01';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+# Equal values but of different data types (should not propagate)
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE'2001:01:01',a)<=>COALESCE(TIMESTAMP'2001-01-01 00:00:00',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(DATE'2001-01-01',`test`.`t1`.`a`) <=> coalesce(TIMESTAMP'2001-01-01 00:00:00',`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING DATE'2001-01-01',TIMESTAMP'2001-01-01 00:00:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(DATE'2001-01-01'),`test`.`t1`.`a`) <=> coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:00'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE''2001-01-01'',a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(DATE'2001-01-01',`test`.`t1`.`a`) <=> coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:00'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(DATE''2001-01-01'',a)' USING TIMESTAMP'2001-01-01 00:00:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:00'),`test`.`t1`.`a`) <=> coalesce(DATE'2001-01-01',`test`.`t1`.`a`)
+# Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE'2001-01-01',a)<=>COALESCE(DATE'2001-01-02',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(DATE'2001-01-01',`test`.`t1`.`a`) <=> coalesce(DATE'2001-01-02',`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING DATE'2001-01-01',DATE'2001-01-02';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(DATE'2001-01-01'),`test`.`t1`.`a`) <=> coalesce(<cache>(DATE'2001-01-02'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE''2001-01-01'',a)<=>COALESCE(?,a)' USING DATE'2001-01-02';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(DATE'2001-01-01',`test`.`t1`.`a`) <=> coalesce(<cache>(DATE'2001-01-02'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(DATE''2001-01-01'',a)' USING DATE'2001-01-02';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(DATE'2001-01-02'),`test`.`t1`.`a`) <=> coalesce(DATE'2001-01-01',`test`.`t1`.`a`)
+DROP TABLE t1;
+#
+# MDEV-15406 NO_ZERO_IN_DATE erroneously affects how CAST(AS DATE) warns about fractional digit truncation
+#
+SET sql_mode='';
+CREATE TABLE t1 (a DATE);
+SELECT CAST(20061108.01 AS DATE);
+CAST(20061108.01 AS DATE)
+2006-11-08
+Warnings:
+Note 1292 Truncated incorrect date value: '20061108.01'
+INSERT INTO t1 VALUES (20061108.01);
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+DROP TABLE t1;
+SET sql_mode=NO_ZERO_IN_DATE;
+SELECT CAST(20061108.01 AS DATE);
+CAST(20061108.01 AS DATE)
+2006-11-08
+Warnings:
+Note 1292 Truncated incorrect date value: '20061108.01'
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES (20061108.01);
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# MDEV-17216 Assertion `!dt->fraction_remainder(decimals())' failed in Field_temporal_with_date::store_TIME_with_warning
+#
+SET sql_mode='';
+CREATE TABLE t1 (i1 date );
+CREATE TABLE t2 (i2 int unsigned );
+INSERT INTO t2 VALUES (0);
+INSERT INTO t1 SELECT * FROM t2;
+DROP TABLE t1,t2;
+SET sql_mode=DEFAULT;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_date.test b/mysql-test/main/type_date.test
index befee57183d..4639c004740 100644
--- a/mysql-test/main/type_date.test
+++ b/mysql-test/main/type_date.test
@@ -628,3 +628,69 @@ DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+--echo #
+
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES ('2001-01-01'),('2001-01-02'),('2001-01-03');
+--echo # Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE'2001:01:01',a)<=>COALESCE(DATE'2001-01-01',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING DATE'2001-01-01',DATE'2001-01-01';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE''2001-01-01'',a)<=>COALESCE(?,a)' USING DATE'2001-01-01';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(DATE''2001-01-01'',a)' USING DATE'2001-01-01';
+
+--echo # Equal values but of different data types (should not propagate)
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE'2001:01:01',a)<=>COALESCE(TIMESTAMP'2001-01-01 00:00:00',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING DATE'2001-01-01',TIMESTAMP'2001-01-01 00:00:00';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE''2001-01-01'',a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(DATE''2001-01-01'',a)' USING TIMESTAMP'2001-01-01 00:00:00';
+
+--echo # Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE'2001-01-01',a)<=>COALESCE(DATE'2001-01-02',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING DATE'2001-01-01',DATE'2001-01-02';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(DATE''2001-01-01'',a)<=>COALESCE(?,a)' USING DATE'2001-01-02';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(DATE''2001-01-01'',a)' USING DATE'2001-01-02';
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-15406 NO_ZERO_IN_DATE erroneously affects how CAST(AS DATE) warns about fractional digit truncation
+--echo #
+
+SET sql_mode='';
+CREATE TABLE t1 (a DATE);
+SELECT CAST(20061108.01 AS DATE);
+INSERT INTO t1 VALUES (20061108.01);
+DROP TABLE t1;
+
+SET sql_mode=NO_ZERO_IN_DATE;
+SELECT CAST(20061108.01 AS DATE);
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES (20061108.01);
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+
+--echo #
+--echo # MDEV-17216 Assertion `!dt->fraction_remainder(decimals())' failed in Field_temporal_with_date::store_TIME_with_warning
+--echo #
+SET sql_mode='';
+CREATE TABLE t1 (i1 date );
+CREATE TABLE t2 (i2 int unsigned );
+INSERT INTO t2 VALUES (0);
+INSERT INTO t1 SELECT * FROM t2;
+DROP TABLE t1,t2;
+SET sql_mode=DEFAULT;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_date_round.result b/mysql-test/main/type_date_round.result
new file mode 100644
index 00000000000..0da78c6afe5
--- /dev/null
+++ b/mysql-test/main/type_date_round.result
@@ -0,0 +1,174 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+#
+# DATE: SET
+#
+CREATE TABLE t1 (a DATE, b DATETIME(4));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+SELECT a FROM t1;
+a
+2000-12-31
+DROP TABLE t1;
+CREATE TABLE t1 (a DATE, b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999999');
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+2000-12-31
+2000-12-31
+DROP TABLE t1;
+CREATE TABLE t1 (a DATE, b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,20001231235959.9999);
+INSERT INTO t1 VALUES(NULL,20001231235959.9999999);
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+2000-12-31
+2000-12-31
+DROP TABLE t1;
+#
+# DATE: ALTER
+#
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+ALTER TABLE t1 MODIFY a DATE;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+SELECT a FROM t1;
+a
+2000-12-31
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+ALTER TABLE t1 MODIFY a DATE;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+2000-12-31
+2000-12-31
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+ALTER TABLE t1 MODIFY a DATE;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+2000-12-31
+2000-12-31
+DROP TABLE t1;
+#
+# DATE: CAST
+#
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+SELECT a, CAST(a AS DATE) FROM t1;
+a CAST(a AS DATE)
+2000-12-31 23:59:59.9999 2000-12-31
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+SELECT a, CAST(a AS DATE) FROM t1;
+a CAST(a AS DATE)
+2000-12-31 23:59:59.9999 2000-12-31
+2000-12-31 23:59:59.9999999 2000-12-31
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+SELECT a, CAST(a AS DATE) FROM t1;
+a CAST(a AS DATE)
+20001231235959.9999000000 2000-12-31
+20001231235959.9999999000 2000-12-31
+DROP TABLE t1;
+#
+# Equal field propagation
+#
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES (20010101);
+INSERT INTO t1 VALUES (20010102);
+SELECT * FROM t1 WHERE a= 20010101235959.9999999;
+a
+2001-01-02
+SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999';
+a
+2001-01-02
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND a>='2001-01-01 23:59:59.9999999';
+a
+2001-01-02
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND CONCAT(a)='2001-01-02';
+a
+2001-01-02
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND a>='2001-01-01 23:59:59.9999999';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = DATE'2001-01-02'
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND CONCAT(a)='2001-01-02';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = DATE'2001-01-02'
+DROP TABLE t1;
+#
+# Comparing non-temporal to DATE
+#
+# Although conversion from non-temporal to DATE (e.g. on SET) does not round,
+# comparison between non-temporal to DATE is performed as DATETIME.
+# So rounding does happen here.
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('2001-01-01 23:59:59.9999999');
+SELECT * FROM t1 WHERE a=DATE'2001-01-02';
+a
+2001-01-01 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1 WHERE CONCAT(a)=DATE'2001-01-02';
+a
+2001-01-01 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1 WHERE COALESCE(a)=DATE'2001-01-02';
+a
+2001-01-01 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (20010101235959.9999999);
+SELECT * FROM t1 WHERE a=DATE'2001-01-02';
+a
+20010101235959.9999999
+SELECT * FROM t1 WHERE COALESCE(a)=DATE'2001-01-02';
+a
+20010101235959.9999999
+DROP TABLE t1;
diff --git a/mysql-test/main/type_date_round.test b/mysql-test/main/type_date_round.test
new file mode 100644
index 00000000000..61e1d0a401c
--- /dev/null
+++ b/mysql-test/main/type_date_round.test
@@ -0,0 +1,113 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+
+--echo #
+--echo # DATE: SET
+--echo #
+
+CREATE TABLE t1 (a DATE, b DATETIME(4));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DATE, b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DATE, b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,20001231235959.9999);
+INSERT INTO t1 VALUES(NULL,20001231235959.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # DATE: ALTER
+--echo #
+
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+ALTER TABLE t1 MODIFY a DATE;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+ALTER TABLE t1 MODIFY a DATE;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+ALTER TABLE t1 MODIFY a DATE;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # DATE: CAST
+--echo #
+
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+SELECT a, CAST(a AS DATE) FROM t1;
+DROP TABLE t1;
+
+# This truncates microseconds but rounds nanoseconds (MySQL Bug #92475)
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+SELECT a, CAST(a AS DATE) FROM t1;
+DROP TABLE t1;
+
+# This truncates microseconds but rounds nanoseconds (MySQL Bug #92475)
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+SELECT a, CAST(a AS DATE) FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Equal field propagation
+--echo #
+
+CREATE TABLE t1 (a DATE);
+INSERT INTO t1 VALUES (20010101);
+INSERT INTO t1 VALUES (20010102);
+# DATE is compared to non-temporal as DATETIME
+# In the below queries nanoseconds should round to microseconds
+SELECT * FROM t1 WHERE a= 20010101235959.9999999;
+SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999';
+SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND a>='2001-01-01 23:59:59.9999999';
+SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND CONCAT(a)='2001-01-02';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND a>='2001-01-01 23:59:59.9999999';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='2001-01-01 23:59:59.9999999' AND CONCAT(a)='2001-01-02';
+DROP TABLE t1;
+
+
+--echo #
+--echo # Comparing non-temporal to DATE
+--echo #
+
+--echo # Although conversion from non-temporal to DATE (e.g. on SET) does not round,
+--echo # comparison between non-temporal to DATE is performed as DATETIME.
+--echo # So rounding does happen here.
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('2001-01-01 23:59:59.9999999');
+SELECT * FROM t1 WHERE a=DATE'2001-01-02';
+SELECT * FROM t1 WHERE CONCAT(a)=DATE'2001-01-02';
+SELECT * FROM t1 WHERE COALESCE(a)=DATE'2001-01-02';
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (20010101235959.9999999);
+SELECT * FROM t1 WHERE a=DATE'2001-01-02';
+SELECT * FROM t1 WHERE COALESCE(a)=DATE'2001-01-02';
+DROP TABLE t1;
diff --git a/mysql-test/main/type_datetime.result b/mysql-test/main/type_datetime.result
index b3910553f97..64c523f26ce 100644
--- a/mysql-test/main/type_datetime.result
+++ b/mysql-test/main/type_datetime.result
@@ -103,7 +103,7 @@ date numfacture expedition
0000-00-00 00:00:00 1212 0001-00-00 00:00:00
EXPLAIN SELECT * FROM t1 WHERE expedition='0001-00-00 00:00:00';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref expedition expedition 5 const 1
+1 SIMPLE t1 ref expedition expedition 5 const 2
drop table t1;
create table t1 (a datetime not null, b datetime not null);
insert into t1 values (now(), now());
@@ -361,7 +361,7 @@ greatest(cast('01-01-01' as date), '01-01-02') + 0
20010102
select least(cast('01-01-01' as datetime), '01-01-02') + 0;
least(cast('01-01-01' as datetime), '01-01-02') + 0
-20010101000000.000000
+20010101000000
select cast(least(cast('01-01-01' as datetime), '01-01-02') as signed);
cast(least(cast('01-01-01' as datetime), '01-01-02') as signed)
20010101000000
@@ -399,7 +399,7 @@ if(@bug28261 = f1, '', @bug28261:= f1)
2001-01-01
2002-02-02
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select if(@bug28261 = f1, '', @bug28261:= f1) from t1;
if(@bug28261 = f1, '', @bug28261:= f1)
2001-01-01
@@ -423,11 +423,11 @@ f1
2001-01-01 00:00:00
2002-02-02 00:00:00
Warnings:
-Warning 1292 Incorrect datetime value: '2002010'
+Warning 1292 Truncated incorrect datetime value: '2002010'
select * from t1 where f1 between 20020101 and 2007010100000;
f1
Warnings:
-Warning 1292 Incorrect datetime value: '2007010100000'
+Warning 1292 Truncated incorrect datetime value: '2007010100000'
drop table t1;
#
# Bug#27216: functions with parameters of different date types may
@@ -498,7 +498,7 @@ f1
45:44:44
15:44:44
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
drop table t1;
create table t1 (a tinyint);
insert into t1 values (), (), ();
@@ -575,18 +575,18 @@ CAST('NULL' AS DATE) < CAST('NULL' AS DATE) n9;
n1 n2 n3 n4 n5 n6 n7 n8 n9
0 0 1 1 1 0 1 0 0
Warnings:
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
-Warning 1292 Incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
+Warning 1292 Truncated incorrect datetime value: 'NULL'
End of 5.0 tests
set @org_mode=@@sql_mode;
create table t1 (da date default '1962-03-03 23:33:34', dt datetime default '1962-03-03');
@@ -917,7 +917,7 @@ CREATE TABLE t1 (a DATETIME);
INSERT INTO t1 VALUES ('0000-00-00 10:20:30');
SELECT a, LEAST(a,'2001-01-01 10:20:30') FROM t1;
a LEAST(a,'2001-01-01 10:20:30')
-0000-00-00 10:20:30 0000-00-00 10:20:30.000000
+0000-00-00 10:20:30 0000-00-00 10:20:30
DROP TABLE t1;
CREATE TABLE t1 (a DATETIME(6));
INSERT INTO t1 VALUES ('0000-00-00 00:00:00.000001');
@@ -1012,7 +1012,7 @@ SELECT * FROM t1 WHERE LENGTH(a)=30+RAND() AND a=' garbage ';
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
-Warning 1292 Incorrect datetime value: ' garbage '
+Warning 1292 Truncated incorrect datetime value: ' garbage '
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'0000-00-00 00:00:00' and <cache>(octet_length(TIMESTAMP'0000-00-00 00:00:00')) = 30 + rand()
DROP TABLE t1;
CREATE TABLE t1 (a DATETIME);;
@@ -1131,11 +1131,11 @@ INSERT INTO t1 VALUES ('00:00:00'),('00:01:00');
SELECT 1 FROM t1 WHERE 2016 > SOME (SELECT CAST(a AS DATETIME) FROM t1);
1
Warnings:
-Warning 1292 Incorrect datetime value: '2016'
+Warning 1292 Truncated incorrect datetime value: '2016'
SELECT * FROM t1 WHERE 2016 > CAST(a AS DATETIME);
a
Warnings:
-Warning 1292 Incorrect datetime value: '2016'
+Warning 1292 Truncated incorrect datetime value: '2016'
SELECT 1 FROM t1 WHERE 20160101 > SOME (SELECT CAST(a AS DATETIME) FROM t1);
1
1
@@ -1253,7 +1253,7 @@ INSERT INTO t1 VALUES ('2001-01-01 23:00:03', 'yes');
INSERT INTO t1 VALUES ('2001-01-01 23:00:04', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('2001-01-01 23:00:01','2001-01-01 23:00:02');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 6 NULL 5 Using index condition
+1 SIMPLE t1 range a a 6 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('2001-01-01 23:00:01','2001-01-01 23:00:02');
a filler
2001-01-01 23:00:03 yes
@@ -1292,7 +1292,7 @@ INSERT INTO t1 VALUES ('2001-01-01 23:00:03.1', 'yes');
INSERT INTO t1 VALUES ('2001-01-01 23:00:04.1', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('2001-01-01 23:00:01.1','2001-01-01 23:00:02.1');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 7 NULL 5 Using index condition
+1 SIMPLE t1 range a a 7 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('2001-01-01 23:00:01.1','2001-01-01 23:00:02.1');
a filler
2001-01-01 23:00:03.1 yes
@@ -1301,3 +1301,138 @@ DROP TABLE t1;
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+#
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 VALUES ('2001-01-01 00:00:00'),('2001-01-02 00:00:00'),('2001-01-03 00:00:00');
+# Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP'2001:01:01 00:00:00',a)<=>COALESCE(TIMESTAMP'2001-01-01 00:00:00',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00',TIMESTAMP'2001-01-01 00:00:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)' USING TIMESTAMP'2001-01-01 00:00:00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+# Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP'2001:01:01 00:00:00',a)<=>COALESCE(TIMESTAMP'2001-01-01 00:00:01',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(TIMESTAMP'2001-01-01 00:00:00',`test`.`t1`.`a`) <=> coalesce(TIMESTAMP'2001-01-01 00:00:01',`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00',TIMESTAMP'2001-01-01 00:00:01';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:00'),`test`.`t1`.`a`) <=> coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:01'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:01';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(TIMESTAMP'2001-01-01 00:00:00',`test`.`t1`.`a`) <=> coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:01'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)' USING TIMESTAMP'2001-01-01 00:00:01';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:01'),`test`.`t1`.`a`) <=> coalesce(TIMESTAMP'2001-01-01 00:00:00',`test`.`t1`.`a`)
+DROP TABLE t1;
+#
+# MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+#
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 VALUES ('2001-01-01 00:00:00'),('2001-01-01 00:00:01'),('2001-01-01 00:00:02');
+Equal values
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a));
+a
+2001-01-01 00:00:00
+2001-01-01 00:00:01
+2001-01-01 00:00:02
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.0';
+a
+2001-01-01 00:00:00
+2001-01-01 00:00:01
+2001-01-01 00:00:02
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.0';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where octet_length(coalesce(TIME'00:00:00.0',`test`.`t1`.`a`)) <=> octet_length(coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:00.0'),`test`.`t1`.`a`))
+Values with different formats
+SELECT LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a)),LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a)) FROM t1;
+LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a)) LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a))
+21 22
+21 22
+21 22
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a));
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where octet_length(coalesce(TIMESTAMP'2001-01-01 00:00:00.0',`test`.`t1`.`a`)) <=> octet_length(coalesce(TIMESTAMP'2001-01-01 00:00:00.00',`test`.`t1`.`a`))
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.00';
+a
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where octet_length(coalesce(TIME'00:00:00.0',`test`.`t1`.`a`)) <=> octet_length(coalesce(<cache>(TIMESTAMP'2001-01-01 00:00:00.00'),`test`.`t1`.`a`))
+DROP TABLE t1;
+#
+# MDEV-17216 Assertion `!dt->fraction_remainder(decimals())' failed in Field_temporal_with_date::store_TIME_with_warning
+#
+CREATE TABLE t1 (b BIT(20));
+CREATE TABLE t2 (t DATETIME);
+INSERT IGNORE INTO t1 VALUES (b'000001001100000');
+INSERT INTO t2 SELECT * FROM t1;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 SELECT CAST(20010101 AS UNSIGNED);
+DROP TABLE t1;
+#
+# MDEV-17563 Different results using table or view when comparing values of time type
+#
+CREATE TABLE t1 (pk int, x1 datetime, x2 varchar(1));
+INSERT INTO t1 VALUES (17,'2001-01-01 09:16:37','');
+INSERT INTO t1 VALUES (18,'2001-01-01 09:16:37','k');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk FROM t1 WHERE x1 >x2;
+pk
+17
+18
+Warnings:
+Warning 1292 Truncated incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: 'k'
+SELECT pk FROM v1 WHERE x1 >x2;
+pk
+17
+18
+Warnings:
+Warning 1292 Truncated incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: 'k'
+DROP VIEW v1;
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_datetime.test b/mysql-test/main/type_datetime.test
index 7ed723fb4aa..7bd7883f469 100644
--- a/mysql-test/main/type_datetime.test
+++ b/mysql-test/main/type_datetime.test
@@ -849,3 +849,84 @@ DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+--echo #
+
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 VALUES ('2001-01-01 00:00:00'),('2001-01-02 00:00:00'),('2001-01-03 00:00:00');
+--echo # Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP'2001:01:01 00:00:00',a)<=>COALESCE(TIMESTAMP'2001-01-01 00:00:00',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00',TIMESTAMP'2001-01-01 00:00:00';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)' USING TIMESTAMP'2001-01-01 00:00:00';
+
+--echo # Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP'2001:01:01 00:00:00',a)<=>COALESCE(TIMESTAMP'2001-01-01 00:00:01',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:00',TIMESTAMP'2001-01-01 00:00:01';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)<=>COALESCE(?,a)' USING TIMESTAMP'2001-01-01 00:00:01';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIMESTAMP''2001-01-01 00:00:00'',a)' USING TIMESTAMP'2001-01-01 00:00:01';
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+--echo #
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 VALUES ('2001-01-01 00:00:00'),('2001-01-01 00:00:01'),('2001-01-01 00:00:02');
+
+--echo Equal values
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a));
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a));
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.0';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.0';
+
+--echo Values with different formats
+SELECT LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a)),LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a)) FROM t1;
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a));
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.0',a))<=>LENGTH(COALESCE(TIMESTAMP'2001-01-01 00:00:00.00',a));
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.00';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIMESTAMP'2001-01-01 00:00:00.00';
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-17216 Assertion `!dt->fraction_remainder(decimals())' failed in Field_temporal_with_date::store_TIME_with_warning
+--echo #
+
+CREATE TABLE t1 (b BIT(20));
+CREATE TABLE t2 (t DATETIME);
+INSERT IGNORE INTO t1 VALUES (b'000001001100000');
+INSERT INTO t2 SELECT * FROM t1;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (a DATETIME);
+INSERT INTO t1 SELECT CAST(20010101 AS UNSIGNED);
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-17563 Different results using table or view when comparing values of time type
+--echo #
+
+CREATE TABLE t1 (pk int, x1 datetime, x2 varchar(1));
+INSERT INTO t1 VALUES (17,'2001-01-01 09:16:37','');
+INSERT INTO t1 VALUES (18,'2001-01-01 09:16:37','k');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk FROM t1 WHERE x1 >x2;
+SELECT pk FROM v1 WHERE x1 >x2;
+DROP VIEW v1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_datetime_round.result b/mysql-test/main/type_datetime_round.result
new file mode 100644
index 00000000000..c6584223268
--- /dev/null
+++ b/mysql-test/main/type_datetime_round.result
@@ -0,0 +1,205 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+#
+# DATETIME: SET
+#
+CREATE TABLE t1 (a DATETIME(3), b DATETIME(4));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DATETIME(3), b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999999');
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DATETIME(3), b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,20001231235959.9999);
+INSERT INTO t1 VALUES(NULL,20001231235959.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+#
+# DATETIME: ALTER
+#
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+ALTER TABLE t1 MODIFY a DATETIME(3);
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+ALTER TABLE t1 MODIFY a DATETIME(3);
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+ALTER TABLE t1 MODIFY a DATETIME(3);
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+#
+# Corner case:
+# ALTER DATETIME to a shorter DATETIME
+# All values round, maximum possible value truncates.
+#
+SET time_zone='+00:00';
+CREATE TABLE t1 (ID INT, a DATETIME(6), comment VARCHAR(64));
+INSERT INTO t1 VALUES (0, '9999-12-30 23:59:58.999999', 'Should round');
+INSERT INTO t1 VALUES (1, '9999-12-31 22:59:59.999999', 'Should round');
+INSERT INTO t1 VALUES (2, '9999-12-31 23:59:58.999999', 'Should round');
+INSERT INTO t1 VALUES (3, '9999-12-31 23:59:59.999999', 'Should truncate');
+ALTER TABLE t1 MODIFY a DATETIME(5);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 4
+SELECT * FROM t1;
+ID a comment
+0 9999-12-30 23:59:59.00000 Should round
+1 9999-12-31 23:00:00.00000 Should round
+2 9999-12-31 23:59:59.00000 Should round
+3 9999-12-31 23:59:59.99999 Should truncate
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+#
+# NOW
+#
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE OR REPLACE TABLE t1 (id SERIAL, a DATETIME(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIMESTAMP(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIME(6));
+SELECT * FROM t1;
+id a
+1 2011-01-01 00:00:00.0000
+2 2011-01-01 00:00:00.0000
+3 2011-01-01 00:00:00.0000
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+#
+# DATETIME: CAST
+#
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+SELECT a, CAST(a AS DATETIME(3)) FROM t1;
+a CAST(a AS DATETIME(3))
+2000-12-31 23:59:59.9999 2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+SELECT a, CAST(a AS DATETIME(3)) FROM t1;
+a CAST(a AS DATETIME(3))
+2000-12-31 23:59:59.9999 2001-01-01 00:00:00.000
+2000-12-31 23:59:59.9999999 2001-01-01 00:00:00.000
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+SELECT a, CAST(a AS DATETIME(3)) FROM t1;
+a CAST(a AS DATETIME(3))
+20001231235959.9999000000 2001-01-01 00:00:00.000
+20001231235959.9999999000 2001-01-01 00:00:00.000
+DROP TABLE t1;
+#
+# Equal field propagation
+#
+CREATE TABLE t1 (a DATETIME(6));
+INSERT INTO t1 VALUES (20010101235959.999999);
+INSERT INTO t1 VALUES (20010101235959.9999999);
+SELECT * FROM t1 WHERE a=20010101235959.9999999;
+a
+2001-01-02 00:00:00.000000
+SELECT * FROM t1 WHERE a='20010101235959.9999999';
+a
+2001-01-02 00:00:00.000000
+Warnings:
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+SELECT * FROM t1 WHERE a='20010101235959.9999999' AND a>='20010101235959.9999999';
+a
+2001-01-02 00:00:00.000000
+Warnings:
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+SELECT * FROM t1 WHERE a='20010101235959.9999999' AND CONCAT(a)='2001-01-02 00:00:00.000000';
+a
+2001-01-02 00:00:00.000000
+Warnings:
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='20010101235959.9999999' AND a>='20010101235959.9999999';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'2001-01-02 00:00:00'
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='20010101235959.9999999' AND CONCAT(a)='2001-01-02 00:00:00.000000';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1292 Truncated incorrect datetime value: '20010101235959.9999999'
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'2001-01-02 00:00:00'
+DROP TABLE t1;
+#
+# Comparing non-temporal to DATETIME
+#
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('2001-01-01 23:59:59.9999999');
+SELECT * FROM t1 WHERE a=TIMESTAMP'2001-01-02 00:00:00';
+a
+2001-01-01 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1 WHERE CONCAT(a)=TIMESTAMP'2001-01-02 00:00:00';
+a
+2001-01-01 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1 WHERE COALESCE(a)=TIMESTAMP'2001-01-02 00:00:00';
+a
+2001-01-01 23:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (20010101235959.9999999);
+SELECT * FROM t1 WHERE a=TIMESTAMP'2001-01-02 00:00:00';
+a
+20010101235959.9999999
+SELECT * FROM t1 WHERE COALESCE(a)=TIMESTAMP'2001-01-02 00:00:00';
+a
+20010101235959.9999999
+DROP TABLE t1;
+#
+# Literal corner case
+#
+SELECT TIMESTAMP'9999-12-31 23:59:59.999999';
+TIMESTAMP'9999-12-31 23:59:59.999999'
+9999-12-31 23:59:59.999999
+SELECT TIME'9999-12-31 23:59:59.9999999';
+ERROR HY000: Incorrect TIME value: '9999-12-31 23:59:59.9999999'
diff --git a/mysql-test/main/type_datetime_round.test b/mysql-test/main/type_datetime_round.test
new file mode 100644
index 00000000000..15aec2cf4e0
--- /dev/null
+++ b/mysql-test/main/type_datetime_round.test
@@ -0,0 +1,147 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+
+--echo #
+--echo # DATETIME: SET
+--echo #
+
+CREATE TABLE t1 (a DATETIME(3), b DATETIME(4));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DATETIME(3), b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DATETIME(3), b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,20001231235959.9999);
+INSERT INTO t1 VALUES(NULL,20001231235959.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # DATETIME: ALTER
+--echo #
+
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+ALTER TABLE t1 MODIFY a DATETIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+ALTER TABLE t1 MODIFY a DATETIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+ALTER TABLE t1 MODIFY a DATETIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Corner case:
+--echo # ALTER DATETIME to a shorter DATETIME
+--echo # All values round, maximum possible value truncates.
+--echo #
+
+SET time_zone='+00:00';
+CREATE TABLE t1 (ID INT, a DATETIME(6), comment VARCHAR(64));
+INSERT INTO t1 VALUES (0, '9999-12-30 23:59:58.999999', 'Should round');
+INSERT INTO t1 VALUES (1, '9999-12-31 22:59:59.999999', 'Should round');
+INSERT INTO t1 VALUES (2, '9999-12-31 23:59:58.999999', 'Should round');
+INSERT INTO t1 VALUES (3, '9999-12-31 23:59:59.999999', 'Should truncate');
+ALTER TABLE t1 MODIFY a DATETIME(5);
+SELECT * FROM t1;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+
+
+--echo #
+--echo # NOW
+--echo #
+
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE OR REPLACE TABLE t1 (id SERIAL, a DATETIME(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIMESTAMP(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIME(6));
+SELECT * FROM t1;
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+
+
+--echo #
+--echo # DATETIME: CAST
+--echo #
+
+CREATE TABLE t1 (a DATETIME(4));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+SELECT a, CAST(a AS DATETIME(3)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+SELECT a, CAST(a AS DATETIME(3)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+SELECT a, CAST(a AS DATETIME(3)) FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Equal field propagation
+--echo #
+
+CREATE TABLE t1 (a DATETIME(6));
+INSERT INTO t1 VALUES (20010101235959.999999);
+INSERT INTO t1 VALUES (20010101235959.9999999);
+SELECT * FROM t1 WHERE a=20010101235959.9999999;
+SELECT * FROM t1 WHERE a='20010101235959.9999999';
+SELECT * FROM t1 WHERE a='20010101235959.9999999' AND a>='20010101235959.9999999';
+SELECT * FROM t1 WHERE a='20010101235959.9999999' AND CONCAT(a)='2001-01-02 00:00:00.000000';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='20010101235959.9999999' AND a>='20010101235959.9999999';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='20010101235959.9999999' AND CONCAT(a)='2001-01-02 00:00:00.000000';
+DROP TABLE t1;
+
+--echo #
+--echo # Comparing non-temporal to DATETIME
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('2001-01-01 23:59:59.9999999');
+SELECT * FROM t1 WHERE a=TIMESTAMP'2001-01-02 00:00:00';
+SELECT * FROM t1 WHERE CONCAT(a)=TIMESTAMP'2001-01-02 00:00:00';
+SELECT * FROM t1 WHERE COALESCE(a)=TIMESTAMP'2001-01-02 00:00:00';
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (20010101235959.9999999);
+SELECT * FROM t1 WHERE a=TIMESTAMP'2001-01-02 00:00:00';
+SELECT * FROM t1 WHERE COALESCE(a)=TIMESTAMP'2001-01-02 00:00:00';
+DROP TABLE t1;
+
+--echo #
+--echo # Literal corner case
+--echo #
+
+SELECT TIMESTAMP'9999-12-31 23:59:59.999999';
+--error ER_WRONG_VALUE
+SELECT TIME'9999-12-31 23:59:59.9999999';
diff --git a/mysql-test/main/type_decimal.result b/mysql-test/main/type_decimal.result
index d87ab0b38a3..6e7d7c8b6fe 100644
--- a/mysql-test/main/type_decimal.result
+++ b/mysql-test/main/type_decimal.result
@@ -1129,3 +1129,114 @@ t2 CREATE TABLE `t2` (
DROP TABLE t2;
DROP TABLE t1;
DROP TABLE t1dec102;
+#
+# End of 10.3 tests
+#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL dynamic SQL parameters
+#
+CREATE TABLE t1 (a DECIMAL(10,1));
+INSERT INTO t1 VALUES (1),(2),(3);
+# Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>1.0+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1.0,1.0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>?+a' USING 1.0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1.0+a' USING 1.0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+# Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>1.1+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1.0 + `test`.`t1`.`a` <=> 1.1 + `test`.`t1`.`a`
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1.0,1.1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1.0 + `test`.`t1`.`a` <=> 1.1 + `test`.`t1`.`a`
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>?+a' USING 1.1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1.0 + `test`.`t1`.`a` <=> 1.1 + `test`.`t1`.`a`
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1.0+a' USING 1.1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1.1 + `test`.`t1`.`a` <=> 1.0 + `test`.`t1`.`a`
+DROP TABLE t1;
+#
+# MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+#
+CREATE TABLE t1 (a DECIMAL(10,3));
+INSERT INTO t1 VALUES (10.0),(10.1);
+Equal values
+SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.0)+a;
+a
+10.000
+10.100
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.0)+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.0;
+a
+10.000
+10.100
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+Values with different formats
+SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.00)+a;
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.00)+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where <cache>(octet_length(10.0)) + `test`.`t1`.`a` <=> <cache>(octet_length(10.00)) + `test`.`t1`.`a`
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.00;
+a
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.00;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where <cache>(octet_length(10.0)) + `test`.`t1`.`a` <=> <cache>(octet_length(10.00)) + `test`.`t1`.`a`
+DROP TABLE t1;
+#
+# MDEV-16984 Assertion `dec' failed in Dec_ptr::cmp
+#
+SET sql_mode='';
+CREATE TABLE t1 (dc decimal(10));
+INSERT INTO t1 VALUES (0000000),(NULL);
+SELECT 1 FROM t1 GROUP BY 'm' <=> dc;
+1
+1
+1
+Warnings:
+Warning 1292 Truncated incorrect DECIMAL value: 'm'
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_decimal.test b/mysql-test/main/type_decimal.test
index 71c7c7bcd5b..4ffbcbd3288 100644
--- a/mysql-test/main/type_decimal.test
+++ b/mysql-test/main/type_decimal.test
@@ -715,3 +715,67 @@ DROP TABLE t2;
DROP TABLE t1;
DROP TABLE t1dec102;
+
+--echo #
+--echo # End of 10.3 tests
+--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL dynamic SQL parameters
+--echo #
+
+CREATE TABLE t1 (a DECIMAL(10,1));
+INSERT INTO t1 VALUES (1),(2),(3);
+--echo # Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>1.0+a;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1.0,1.0;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>?+a' USING 1.0;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1.0+a' USING 1.0;
+--echo # Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>1.1+a;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1.0,1.1;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1.0+a<=>?+a' USING 1.1;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1.0+a' USING 1.1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+--echo #
+
+CREATE TABLE t1 (a DECIMAL(10,3));
+INSERT INTO t1 VALUES (10.0),(10.1);
+
+--echo Equal values
+SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.0)+a;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.0)+a;
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.0;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.0;
+
+--echo Values with different formats
+SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.00)+a;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(10.00)+a;
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.00;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(10.0)+a<=>LENGTH(?)+a' USING 10.00;
+
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-16984 Assertion `dec' failed in Dec_ptr::cmp
+--echo #
+SET sql_mode='';
+CREATE TABLE t1 (dc decimal(10));
+INSERT INTO t1 VALUES (0000000),(NULL);
+SELECT 1 FROM t1 GROUP BY 'm' <=> dc;
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_float.result b/mysql-test/main/type_float.result
index be7c639ddd3..0ce54c0126c 100644
--- a/mysql-test/main/type_float.result
+++ b/mysql-test/main/type_float.result
@@ -840,3 +840,35 @@ DROP TABLE t1;
#
# End of 10.2 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+#
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES (1),(2),(3);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1e0+a<=>1e0+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1e0,1e0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1e0+a' USING 1e0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1e0+a<=>?+a' USING 1e0;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_float.test b/mysql-test/main/type_float.test
index 9dba1c709d5..f42d3445e2a 100644
--- a/mysql-test/main/type_float.test
+++ b/mysql-test/main/type_float.test
@@ -581,3 +581,24 @@ DROP TABLE t1;
--echo #
--echo # End of 10.2 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+--echo #
+
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES (1),(2),(3);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1e0+a<=>1e0+a;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1e0,1e0;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1e0+a' USING 1e0;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1e0+a<=>?+a' USING 1e0;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_hex_hybrid.result b/mysql-test/main/type_hex_hybrid.result
new file mode 100644
index 00000000000..eec24f6e89e
--- /dev/null
+++ b/mysql-test/main/type_hex_hybrid.result
@@ -0,0 +1,24 @@
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+#
+SET NAMES utf8;
+CREATE TABLE t1 (a DECIMAL(10,3));
+INSERT INTO t1 VALUES (10.0),(10.1);
+SELECT CHARSET('a'),CHARSET(0x61),LENGTH(CHARSET('a'))+a,LENGTH(CHARSET(0x61))+a FROM t1;
+CHARSET('a') CHARSET(0x61) LENGTH(CHARSET('a'))+a LENGTH(CHARSET(0x61))+a
+utf8 binary 14.000 16.000
+utf8 binary 14.100 16.100
+SELECT * FROM t1 WHERE LENGTH(CHARSET('a'))+a<=>LENGTH(CHARSET(0x61))+a;
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(CHARSET('a'))+a<=>LENGTH(CHARSET(0x61))+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where <cache>(octet_length(charset('a'))) + `test`.`t1`.`a` <=> <cache>(octet_length(charset(0x61))) + `test`.`t1`.`a`
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_hex_hybrid.test b/mysql-test/main/type_hex_hybrid.test
new file mode 100644
index 00000000000..a39750e2635
--- /dev/null
+++ b/mysql-test/main/type_hex_hybrid.test
@@ -0,0 +1,21 @@
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+--echo #
+
+# It's important for CHARSET('a') and CHARSET(0x61) to have different lengths in this test.
+# 'latin1' and 'binary' have same lengths, so using 'utf8'.
+SET NAMES utf8;
+CREATE TABLE t1 (a DECIMAL(10,3));
+INSERT INTO t1 VALUES (10.0),(10.1);
+SELECT CHARSET('a'),CHARSET(0x61),LENGTH(CHARSET('a'))+a,LENGTH(CHARSET(0x61))+a FROM t1;
+SELECT * FROM t1 WHERE LENGTH(CHARSET('a'))+a<=>LENGTH(CHARSET(0x61))+a;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(CHARSET('a'))+a<=>LENGTH(CHARSET(0x61))+a;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_int.result b/mysql-test/main/type_int.result
index 607b333aae1..a3a702609c9 100644
--- a/mysql-test/main/type_int.result
+++ b/mysql-test/main/type_int.result
@@ -238,3 +238,47 @@ DROP FUNCTION sint64;
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+#
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1+a<=>1+a;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1,1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1+a' USING 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1+a<=>?+a' USING 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+DROP TABLE t1;
+#
+# MDEV-15759 Expect "Impossible WHERE" for indexed_int_column=out_of_range_int_constant
+#
+CREATE TABLE t1 (a TINYINT, KEY(a));
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+EXPLAIN SELECT * FROM t1 WHERE a=200;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+EXPLAIN SELECT * FROM t1 WHERE a<=>200;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_int.test b/mysql-test/main/type_int.test
index 87f73fabbc8..8d56f94388c 100644
--- a/mysql-test/main/type_int.test
+++ b/mysql-test/main/type_int.test
@@ -179,3 +179,34 @@ DROP FUNCTION sint64;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+--echo #
+
+CREATE TABLE t1 (a INT);
+INSERT INTO t1 VALUES (1),(2),(3);
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1+a<=>1+a;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>?+a' USING 1,1;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE ?+a<=>1+a' USING 1;
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE 1+a<=>?+a' USING 1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-15759 Expect "Impossible WHERE" for indexed_int_column=out_of_range_int_constant
+--echo #
+
+CREATE TABLE t1 (a TINYINT, KEY(a));
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+EXPLAIN SELECT * FROM t1 WHERE a=200;
+EXPLAIN SELECT * FROM t1 WHERE a<=>200;
+DROP TABLE t1;
+
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_interval.result b/mysql-test/main/type_interval.result
new file mode 100644
index 00000000000..65c84022210
--- /dev/null
+++ b/mysql-test/main/type_interval.result
@@ -0,0 +1,83 @@
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-17776 CAST(x AS INTERVAL DAY_SECOND(N))
+#
+CREATE TABLE t1 (a VARCHAR(128));
+INSERT INTO t1 VALUES
+('00:00:00'),
+('+00:00:01'),
+('-00:00:01'),
+('838:59:59'),
+('839:00:00'),
+('2018:01:02'),
+('87649415:59:59'),
+('3652058 23:59:59'),
+('87649416:00:00'),
+('3652059 00:00:00');
+SELECT
+EXTRACT(DAY FROM a) AS d,
+EXTRACT(HOUR FROM a) AS h,
+a,
+CAST(a AS INTERVAL DAY_SECOND(6)) AS cast_itds
+FROM t1;
+d h a cast_itds
+0 0 00:00:00 00:00:00.000000
+0 0 +00:00:01 00:00:01.000000
+0 0 -00:00:01 -00:00:01.000000
+34 22 838:59:59 34 22:59:59.000000
+34 23 839:00:00 34 23:00:00.000000
+84 2 2018:01:02 84 02:01:02.000000
+3652058 23 87649415:59:59 3652058 23:59:59.000000
+3652058 23 3652058 23:59:59 3652058 23:59:59.000000
+NULL NULL 87649416:00:00 NULL
+NULL NULL 3652059 00:00:00 NULL
+Warnings:
+Warning 1292 Incorrect interval value: '87649416:00:00'
+Warning 1292 Incorrect interval value: '87649416:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '87649416:00:00'
+Warning 1292 Incorrect interval value: '3652059 00:00:00'
+Warning 1292 Incorrect interval value: '3652059 00:00:00'
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '3652059 00:00:00'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(32,9));
+INSERT INTO t1 VALUES
+(0),
+(1),
+(-1),
+(8385959),
+(8390000),
+(20180102),
+(876494155959),
+(876494160000);
+SELECT
+EXTRACT(DAY FROM a) AS d,
+EXTRACT(HOUR FROM a) AS h,
+a,
+CAST(a AS INTERVAL DAY_SECOND(6)) AS cast_itds
+FROM t1;
+d h a cast_itds
+0 0 0.000000000 00:00:00.000000
+0 0 1.000000000 00:00:01.000000
+0 0 -1.000000000 -00:00:01.000000
+34 22 8385959.000000000 34 22:59:59.000000
+34 23 8390000.000000000 34 23:00:00.000000
+84 2 20180102.000000000 84 02:01:02.000000
+3652058 23 876494155959.000000000 3652058 23:59:59.000000
+NULL NULL 876494160000.000000000 NULL
+Warnings:
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '0.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '1.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '-1.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8385959.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '8390000.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '20180102.000000000'
+Note 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '876494155959.000000000'
+Warning 1292 Incorrect interval value: '876494160000.000000000' for column `test`.`t1`.`a` at row 8
+Warning 1292 Incorrect interval value: '876494160000.000000000' for column `test`.`t1`.`a` at row 8
+Warning 1292 Incorrect INTERVAL DAY TO SECOND value: '876494160000.000000000'
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_interval.test b/mysql-test/main/type_interval.test
new file mode 100644
index 00000000000..15999dc609d
--- /dev/null
+++ b/mysql-test/main/type_interval.test
@@ -0,0 +1,54 @@
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-17776 CAST(x AS INTERVAL DAY_SECOND(N))
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(128));
+INSERT INTO t1 VALUES
+('00:00:00'),
+('+00:00:01'),
+('-00:00:01'),
+('838:59:59'),
+('839:00:00'),
+('2018:01:02'),
+('87649415:59:59'),
+('3652058 23:59:59'),
+('87649416:00:00'),
+('3652059 00:00:00');
+
+SELECT
+ EXTRACT(DAY FROM a) AS d,
+ EXTRACT(HOUR FROM a) AS h,
+ a,
+ CAST(a AS INTERVAL DAY_SECOND(6)) AS cast_itds
+FROM t1;
+
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (a DECIMAL(32,9));
+INSERT INTO t1 VALUES
+(0),
+(1),
+(-1),
+(8385959),
+(8390000),
+(20180102),
+(876494155959),
+(876494160000);
+
+SELECT
+ EXTRACT(DAY FROM a) AS d,
+ EXTRACT(HOUR FROM a) AS h,
+ a,
+ CAST(a AS INTERVAL DAY_SECOND(6)) AS cast_itds
+FROM t1;
+
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_json.result b/mysql-test/main/type_json.result
index 0045847097b..96e96cca404 100644
--- a/mysql-test/main/type_json.result
+++ b/mysql-test/main/type_json.result
@@ -2,7 +2,7 @@ create or replace table t1(a json);
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
+ `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL CHECK (json_valid(`a`))
) ENGINE=MyISAM DEFAULT CHARSET=latin1
create or replace table t1(a json character set utf8);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'character set utf8)' at line 1
@@ -10,7 +10,7 @@ create or replace table t1(a json default '{a:1}');
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT '{a:1}'
+ `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT '{a:1}' CHECK (json_valid(`a`))
) ENGINE=MyISAM DEFAULT CHARSET=latin1
create or replace table t1(a json not null check (json_valid(a)));
show create table t1;
@@ -21,18 +21,79 @@ t1 CREATE TABLE `t1` (
insert t1 values ('[]');
insert t1 values ('a');
ERROR 23000: CONSTRAINT `t1.a` failed for `test`.`t1`
+create or replace table t1(a json not null);
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL CHECK (json_valid(`a`))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert t1 values ('[]');
+insert t1 values ('a');
+ERROR 23000: CONSTRAINT `t1.a` failed for `test`.`t1`
set timestamp=unix_timestamp('2010:11:12 13:14:15');
create or replace table t1(a json default(json_object('now', now())));
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT json_object('now',current_timestamp())
+ `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT json_object('now',current_timestamp()) CHECK (json_valid(`a`))
) ENGINE=MyISAM DEFAULT CHARSET=latin1
insert t1 values ();
select * from t1;
a
{"now": "2010-11-12 13:14:15"}
drop table t1;
+create table t1 (t json) as select json_quote('foo') as t;
+create table t2 (a json) as select json_quote('foo') as t;
+create table t3 like t1;
+select * from t1;
+t
+"foo"
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `t` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL CHECK (json_valid(`t`))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL CHECK (json_valid(`a`)),
+ `t` varchar(38) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+show create table t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `t` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL CHECK (json_valid(`t`))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1,t2,t3;
+create table t1 (t json check (length(t) > 0));
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `t` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL CHECK (octet_length(`t`) > 0)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t1 (t text) engine=myisam;
+insert into t1 values ("{}"),("");
+create table t2 (t json) select t from t1;
+ERROR 23000: CONSTRAINT `t2.t` failed for `test`.`t2`
+select * from t2;
+ERROR 42S02: Table 'test.t2' doesn't exist
+drop table t1;
+create or replace table t1(a json default(json_object('now', 1)) check (json_valid(a)));
+insert into t1 values ();
+insert into t1 values ("{}");
+insert into t1 values ("xxx");
+ERROR 23000: CONSTRAINT `t1.a` failed for `test`.`t1`
+select * from t1;
+a
+{"now": 1}
+{}
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT json_object('now',1) CHECK (json_valid(`a`))
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
select cast('{a:1}' as text);
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'text)' at line 1
select cast('{a:1}' as json);
diff --git a/mysql-test/main/type_json.test b/mysql-test/main/type_json.test
index 0cff9366145..bd13dc1fcf4 100644
--- a/mysql-test/main/type_json.test
+++ b/mysql-test/main/type_json.test
@@ -17,12 +17,47 @@ insert t1 values ('[]');
--error ER_CONSTRAINT_FAILED
insert t1 values ('a');
+create or replace table t1(a json not null);
+show create table t1;
+insert t1 values ('[]');
+--error ER_CONSTRAINT_FAILED
+insert t1 values ('a');
+
set timestamp=unix_timestamp('2010:11:12 13:14:15');
create or replace table t1(a json default(json_object('now', now())));
show create table t1;
insert t1 values ();
select * from t1;
+drop table t1;
+
+create table t1 (t json) as select json_quote('foo') as t;
+create table t2 (a json) as select json_quote('foo') as t;
+create table t3 like t1;
+select * from t1;
+show create table t1;
+show create table t2;
+show create table t3;
+drop table t1,t2,t3;
+create table t1 (t json check (length(t) > 0));
+show create table t1;
+drop table t1;
+
+create table t1 (t text) engine=myisam;
+insert into t1 values ("{}"),("");
+--error ER_CONSTRAINT_FAILED
+create table t2 (t json) select t from t1;
+--error ER_NO_SUCH_TABLE
+select * from t2;
+drop table t1;
+
+create or replace table t1(a json default(json_object('now', 1)) check (json_valid(a)));
+insert into t1 values ();
+insert into t1 values ("{}");
+--error ER_CONSTRAINT_FAILED
+insert into t1 values ("xxx");
+select * from t1;
+show create table t1;
drop table t1;
--error ER_PARSE_ERROR
diff --git a/mysql-test/main/type_temporal_innodb.result b/mysql-test/main/type_temporal_innodb.result
index b869822722d..55b398b3b02 100644
--- a/mysql-test/main/type_temporal_innodb.result
+++ b/mysql-test/main/type_temporal_innodb.result
@@ -56,7 +56,7 @@ SELECT DATE'0000-00-00'='';
DATE'0000-00-00'=''
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
CREATE TABLE t1 (a ENUM('a'), b DATE, c INT, KEY(b)) ENGINE=InnoDB;
INSERT IGNORE INTO t1 VALUES ('','0000-00-00',0);
Warnings:
@@ -65,49 +65,49 @@ SELECT * FROM t1 WHERE b='';
a b c
0000-00-00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 IGNORE KEY (b) WHERE b='';
a b c
0000-00-00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 WHERE a=b;
a b c
0000-00-00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT 1 FROM t1 WHERE (SELECT a FROM t1 group by c) = b;
1
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
ALTER TABLE t1 ENGINE=MyISAM;
SELECT * FROM t1 WHERE b='';
a b c
0000-00-00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 IGNORE KEY (b) WHERE b='';
a b c
0000-00-00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 WHERE a=b;
a b c
0000-00-00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT 1 FROM t1 WHERE (SELECT a FROM t1 group by c) = b;
1
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
DROP TABLE t1;
SELECT TIMESTAMP'0000-00-00 00:00:00'='';
TIMESTAMP'0000-00-00 00:00:00'=''
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
CREATE TABLE t1 (a ENUM('a'), b DATETIME, c INT, KEY(b)) ENGINE=InnoDB;
INSERT IGNORE INTO t1 VALUES ('','0000-00-00 00:00:00',0);
Warnings:
@@ -116,43 +116,43 @@ SELECT * FROM t1 WHERE b='';
a b c
0000-00-00 00:00:00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 IGNORE KEY (b) WHERE b='';
a b c
0000-00-00 00:00:00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 WHERE a=b;
a b c
0000-00-00 00:00:00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT 1 FROM t1 WHERE (SELECT a FROM t1 group by c) = b;
1
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
ALTER TABLE t1 ENGINE=MyISAM;
SELECT * FROM t1 WHERE b='';
a b c
0000-00-00 00:00:00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 IGNORE KEY (b) WHERE b='';
a b c
0000-00-00 00:00:00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT * FROM t1 WHERE a=b;
a b c
0000-00-00 00:00:00 0
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
SELECT 1 FROM t1 WHERE (SELECT a FROM t1 group by c) = b;
1
1
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
DROP TABLE t1;
CREATE TABLE t1 (d DATE) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('2012-12-21');
@@ -160,3 +160,15 @@ SELECT * FROM t1 WHERE LEAST( UTC_TIME(), d );
d
2012-12-21
DROP TABLE t1;
+#
+# MDEV-17969 Assertion `name' failed in THD::push_warning_truncated_value_for_field
+#
+CREATE TABLE t1 (c1 DATE , c2 TIMESTAMP) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('2006-07-17','0000-00-00 00:00:00');
+CREATE TABLE t2 (pk INT, a1 TIME) Engine=InnoDB;
+INSERT INTO t2 VALUES (6,'00:00:00');
+SET SESSION sql_mode= 'strict_all_tables,no_zero_date';
+CREATE TABLE tbl SELECT * FROM t1 WHERE t1.c1 = (SELECT c2 FROM t2 WHERE pk = 6);
+ERROR 22007: Truncated incorrect datetime value: '0000-00-00 00:00:00'
+DROP TABLE t1,t2;
+SET sql_mode=DEFAULT;
diff --git a/mysql-test/main/type_temporal_innodb.test b/mysql-test/main/type_temporal_innodb.test
index 81f2f586c51..3debb798018 100644
--- a/mysql-test/main/type_temporal_innodb.test
+++ b/mysql-test/main/type_temporal_innodb.test
@@ -66,3 +66,18 @@ CREATE TABLE t1 (d DATE) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('2012-12-21');
SELECT * FROM t1 WHERE LEAST( UTC_TIME(), d );
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17969 Assertion `name' failed in THD::push_warning_truncated_value_for_field
+--echo #
+
+CREATE TABLE t1 (c1 DATE , c2 TIMESTAMP) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('2006-07-17','0000-00-00 00:00:00');
+CREATE TABLE t2 (pk INT, a1 TIME) Engine=InnoDB;
+INSERT INTO t2 VALUES (6,'00:00:00');
+SET SESSION sql_mode= 'strict_all_tables,no_zero_date';
+--error ER_TRUNCATED_WRONG_VALUE
+CREATE TABLE tbl SELECT * FROM t1 WHERE t1.c1 = (SELECT c2 FROM t2 WHERE pk = 6);
+# ^^^ there is no column c2 in table t2
+DROP TABLE t1,t2;
+SET sql_mode=DEFAULT;
diff --git a/mysql-test/main/type_temporal_mysql56_debug.result b/mysql-test/main/type_temporal_mysql56_debug.result
new file mode 100644
index 00000000000..a6a6c071ea3
--- /dev/null
+++ b/mysql-test/main/type_temporal_mysql56_debug.result
@@ -0,0 +1,416 @@
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16542 Fix ALTER TABLE FORCE to upgrade temporal types
+#
+CREATE PROCEDURE mdev16542_create_select()
+BEGIN
+SELECT '# CREATE..SELECT and SHOW' AS ``;
+CREATE TABLE t2 AS SELECT
+a0, a1, a2, a3, a4, a5, a6,
+COALESCE(a0),
+COALESCE(a1),
+COALESCE(a2),
+COALESCE(a3),
+COALESCE(a4),
+COALESCE(a5),
+COALESCE(a6)
+FROM t1;
+SHOW CREATE TABLE t2;
+DROP TABLE t2;
+END;
+$$
+CREATE PROCEDURE mdev16542_alter_force_and_show()
+BEGIN
+SELECT '# ALTER..FORCE and SHOW' AS ``;
+ALTER TABLE t1 FORCE;
+SHOW CREATE TABLE t1;
+END;
+$$
+CREATE PROCEDURE mdev16542()
+BEGIN
+SET SESSION debug_dbug="+d,sql_type";
+SELECT '# Original table' AS ``;
+SHOW CREATE TABLE t1;
+CALL mdev16542_create_select();
+CALL mdev16542_alter_force_and_show();
+SELECT '# Setting @@global.mysql56_temporal_format=false' AS ``;
+SET @@global.mysql56_temporal_format=false;
+CALL mdev16542_create_select();
+CALL mdev16542_alter_force_and_show();
+SELECT '# Setting @@global.mysql56_temporal_format=true' AS ``;
+SET @@global.mysql56_temporal_format=true;
+CALL mdev16542_create_select();
+CALL mdev16542_alter_force_and_show();
+SET SESSION debug_dbug="-d,sql_type";
+END;
+$$
+SET @@global.mysql56_temporal_format=true;
+CREATE TABLE t1 (
+a0 TIME,
+a1 TIME(1),
+a2 TIME(2),
+a3 TIME(3),
+a4 TIME(4),
+a5 TIME(5),
+a6 TIME(6)
+);
+CALL mdev16542;
+
+# Original table
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` time /* mysql-5.6 */ DEFAULT NULL,
+ `a1` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` time(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` time /* mysql-5.6 */ DEFAULT NULL,
+ `a1` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` time(6) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a0)` time /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a1)` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a2)` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a3)` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a4)` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a5)` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a6)` time(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` time /* mysql-5.6 */ DEFAULT NULL,
+ `a1` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` time(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# Setting @@global.mysql56_temporal_format=false
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` time /* mariadb-5.3 */ DEFAULT NULL,
+ `a1` time(1) /* mariadb-5.3 */ DEFAULT NULL,
+ `a2` time(2) /* mariadb-5.3 */ DEFAULT NULL,
+ `a3` time(3) /* mariadb-5.3 */ DEFAULT NULL,
+ `a4` time(4) /* mariadb-5.3 */ DEFAULT NULL,
+ `a5` time(5) /* mariadb-5.3 */ DEFAULT NULL,
+ `a6` time(6) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a0)` time /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a1)` time(1) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a2)` time(2) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a3)` time(3) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a4)` time(4) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a5)` time(5) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a6)` time(6) /* mariadb-5.3 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` time /* mariadb-5.3 */ DEFAULT NULL,
+ `a1` time(1) /* mariadb-5.3 */ DEFAULT NULL,
+ `a2` time(2) /* mariadb-5.3 */ DEFAULT NULL,
+ `a3` time(3) /* mariadb-5.3 */ DEFAULT NULL,
+ `a4` time(4) /* mariadb-5.3 */ DEFAULT NULL,
+ `a5` time(5) /* mariadb-5.3 */ DEFAULT NULL,
+ `a6` time(6) /* mariadb-5.3 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# Setting @@global.mysql56_temporal_format=true
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` time /* mysql-5.6 */ DEFAULT NULL,
+ `a1` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` time(6) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a0)` time /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a1)` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a2)` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a3)` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a4)` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a5)` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a6)` time(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` time /* mysql-5.6 */ DEFAULT NULL,
+ `a1` time(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` time(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` time(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` time(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` time(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` time(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 (
+a0 DATETIME,
+a1 DATETIME(1),
+a2 DATETIME(2),
+a3 DATETIME(3),
+a4 DATETIME(4),
+a5 DATETIME(5),
+a6 DATETIME(6)
+);
+CALL mdev16542;
+
+# Original table
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `a1` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` datetime(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `a1` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` datetime(6) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a0)` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a1)` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a2)` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a3)` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a4)` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a5)` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a6)` datetime(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `a1` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` datetime(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# Setting @@global.mysql56_temporal_format=false
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` datetime /* mariadb-5.3 */ DEFAULT NULL,
+ `a1` datetime(1) /* mariadb-5.3 */ DEFAULT NULL,
+ `a2` datetime(2) /* mariadb-5.3 */ DEFAULT NULL,
+ `a3` datetime(3) /* mariadb-5.3 */ DEFAULT NULL,
+ `a4` datetime(4) /* mariadb-5.3 */ DEFAULT NULL,
+ `a5` datetime(5) /* mariadb-5.3 */ DEFAULT NULL,
+ `a6` datetime(6) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a0)` datetime /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a1)` datetime(1) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a2)` datetime(2) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a3)` datetime(3) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a4)` datetime(4) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a5)` datetime(5) /* mariadb-5.3 */ DEFAULT NULL,
+ `COALESCE(a6)` datetime(6) /* mariadb-5.3 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` datetime /* mariadb-5.3 */ DEFAULT NULL,
+ `a1` datetime(1) /* mariadb-5.3 */ DEFAULT NULL,
+ `a2` datetime(2) /* mariadb-5.3 */ DEFAULT NULL,
+ `a3` datetime(3) /* mariadb-5.3 */ DEFAULT NULL,
+ `a4` datetime(4) /* mariadb-5.3 */ DEFAULT NULL,
+ `a5` datetime(5) /* mariadb-5.3 */ DEFAULT NULL,
+ `a6` datetime(6) /* mariadb-5.3 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# Setting @@global.mysql56_temporal_format=true
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `a1` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` datetime(6) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a0)` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a1)` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a2)` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a3)` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a4)` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a5)` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `COALESCE(a6)` datetime(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` datetime /* mysql-5.6 */ DEFAULT NULL,
+ `a1` datetime(1) /* mysql-5.6 */ DEFAULT NULL,
+ `a2` datetime(2) /* mysql-5.6 */ DEFAULT NULL,
+ `a3` datetime(3) /* mysql-5.6 */ DEFAULT NULL,
+ `a4` datetime(4) /* mysql-5.6 */ DEFAULT NULL,
+ `a5` datetime(5) /* mysql-5.6 */ DEFAULT NULL,
+ `a6` datetime(6) /* mysql-5.6 */ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1 (
+a0 TIMESTAMP,
+a1 TIMESTAMP(1),
+a2 TIMESTAMP(2),
+a3 TIMESTAMP(3),
+a4 TIMESTAMP(4),
+a5 TIMESTAMP(5),
+a6 TIMESTAMP(6)
+);
+CALL mdev16542;
+
+# Original table
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` timestamp /* mysql-5.6 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` timestamp /* mysql-5.6 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `COALESCE(a0)` timestamp /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a1)` timestamp(1) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a2)` timestamp(2) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a3)` timestamp(3) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a4)` timestamp(4) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a5)` timestamp(5) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a6)` timestamp(6) /* mysql-5.6 */ NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` timestamp /* mysql-5.6 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# Setting @@global.mysql56_temporal_format=false
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` timestamp /* mariadb-5.3 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `COALESCE(a0)` timestamp /* mariadb-5.3 */ NULL DEFAULT NULL,
+ `COALESCE(a1)` timestamp(1) /* mariadb-5.3 */ NULL DEFAULT NULL,
+ `COALESCE(a2)` timestamp(2) /* mariadb-5.3 */ NULL DEFAULT NULL,
+ `COALESCE(a3)` timestamp(3) /* mariadb-5.3 */ NULL DEFAULT NULL,
+ `COALESCE(a4)` timestamp(4) /* mariadb-5.3 */ NULL DEFAULT NULL,
+ `COALESCE(a5)` timestamp(5) /* mariadb-5.3 */ NULL DEFAULT NULL,
+ `COALESCE(a6)` timestamp(6) /* mariadb-5.3 */ NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` timestamp /* mariadb-5.3 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mariadb-5.3 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# Setting @@global.mysql56_temporal_format=true
+
+# CREATE..SELECT and SHOW
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a0` timestamp /* mysql-5.6 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `COALESCE(a0)` timestamp /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a1)` timestamp(1) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a2)` timestamp(2) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a3)` timestamp(3) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a4)` timestamp(4) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a5)` timestamp(5) /* mysql-5.6 */ NULL DEFAULT NULL,
+ `COALESCE(a6)` timestamp(6) /* mysql-5.6 */ NULL DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+
+# ALTER..FORCE and SHOW
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a0` timestamp /* mysql-5.6 */ NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(),
+ `a1` timestamp(1) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0',
+ `a2` timestamp(2) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00',
+ `a3` timestamp(3) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000',
+ `a4` timestamp(4) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.0000',
+ `a5` timestamp(5) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.00000',
+ `a6` timestamp(6) /* mysql-5.6 */ NOT NULL DEFAULT '0000-00-00 00:00:00.000000'
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET @@global.mysql56_temporal_format=DEFAULT;
+DROP PROCEDURE mdev16542;
+DROP PROCEDURE mdev16542_create_select;
+DROP PROCEDURE mdev16542_alter_force_and_show;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_temporal_mysql56_debug.test b/mysql-test/main/type_temporal_mysql56_debug.test
new file mode 100644
index 00000000000..12edde94598
--- /dev/null
+++ b/mysql-test/main/type_temporal_mysql56_debug.test
@@ -0,0 +1,107 @@
+--source include/have_debug.inc
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16542 Fix ALTER TABLE FORCE to upgrade temporal types
+--echo #
+
+DELIMITER $$;
+CREATE PROCEDURE mdev16542_create_select()
+BEGIN
+ SELECT '# CREATE..SELECT and SHOW' AS ``;
+ CREATE TABLE t2 AS SELECT
+ a0, a1, a2, a3, a4, a5, a6,
+ COALESCE(a0),
+ COALESCE(a1),
+ COALESCE(a2),
+ COALESCE(a3),
+ COALESCE(a4),
+ COALESCE(a5),
+ COALESCE(a6)
+ FROM t1;
+ SHOW CREATE TABLE t2;
+ DROP TABLE t2;
+END;
+$$
+
+CREATE PROCEDURE mdev16542_alter_force_and_show()
+BEGIN
+ SELECT '# ALTER..FORCE and SHOW' AS ``;
+ ALTER TABLE t1 FORCE;
+ SHOW CREATE TABLE t1;
+END;
+$$
+
+CREATE PROCEDURE mdev16542()
+BEGIN
+ SET SESSION debug_dbug="+d,sql_type";
+ SELECT '# Original table' AS ``;
+ SHOW CREATE TABLE t1;
+ CALL mdev16542_create_select();
+ CALL mdev16542_alter_force_and_show();
+
+ SELECT '# Setting @@global.mysql56_temporal_format=false' AS ``;
+ SET @@global.mysql56_temporal_format=false;
+ CALL mdev16542_create_select();
+ CALL mdev16542_alter_force_and_show();
+
+ SELECT '# Setting @@global.mysql56_temporal_format=true' AS ``;
+ SET @@global.mysql56_temporal_format=true;
+ CALL mdev16542_create_select();
+ CALL mdev16542_alter_force_and_show();
+ SET SESSION debug_dbug="-d,sql_type";
+END;
+$$
+DELIMITER ;$$
+
+SET @@global.mysql56_temporal_format=true;
+
+CREATE TABLE t1 (
+ a0 TIME,
+ a1 TIME(1),
+ a2 TIME(2),
+ a3 TIME(3),
+ a4 TIME(4),
+ a5 TIME(5),
+ a6 TIME(6)
+);
+CALL mdev16542;
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (
+ a0 DATETIME,
+ a1 DATETIME(1),
+ a2 DATETIME(2),
+ a3 DATETIME(3),
+ a4 DATETIME(4),
+ a5 DATETIME(5),
+ a6 DATETIME(6)
+);
+CALL mdev16542;
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (
+ a0 TIMESTAMP,
+ a1 TIMESTAMP(1),
+ a2 TIMESTAMP(2),
+ a3 TIMESTAMP(3),
+ a4 TIMESTAMP(4),
+ a5 TIMESTAMP(5),
+ a6 TIMESTAMP(6)
+);
+CALL mdev16542;
+DROP TABLE t1;
+
+SET @@global.mysql56_temporal_format=DEFAULT;
+DROP PROCEDURE mdev16542;
+DROP PROCEDURE mdev16542_create_select;
+DROP PROCEDURE mdev16542_alter_force_and_show;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_time.result b/mysql-test/main/type_time.result
index 44f0d50d94e..673bed990a1 100644
--- a/mysql-test/main/type_time.result
+++ b/mysql-test/main/type_time.result
@@ -188,7 +188,7 @@ create table t1(f1 time);
insert into t1 values ('23:38:57');
select f1, f1 = '2010-10-11 23:38:57' from t1;
f1 f1 = '2010-10-11 23:38:57'
-23:38:57 0
+23:38:57 1
drop table t1;
#
# MDEV-4634 Crash in CONVERT_TZ
@@ -841,6 +841,18 @@ id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where
Warnings:
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'0000-00-01 10:20:30' and octet_length(`test`.`t1`.`a`) = 30 + rand()
+EXPLAIN EXTENDED
+SELECT * FROM t1 WHERE a=TIMESTAMP'0000-01-00 10:20:30' AND LENGTH(a)=8;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'0000-01-00 10:20:30' and octet_length(`test`.`t1`.`a`) = 8
+EXPLAIN EXTENDED
+SELECT * FROM t1 WHERE a=TIMESTAMP'0001-00-00 10:20:30' AND LENGTH(a)=8;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'0001-00-00 10:20:30' and octet_length(`test`.`t1`.`a`) = 8
# Old mode, TIMESTAMP-alike string literal, zero YYYYMMDD, Ok to propagate
SELECT * FROM t1 WHERE a='0000-00-00 10:20:30';
a
@@ -1451,9 +1463,9 @@ GREATEST('2010-01-01 10:10:10',TIME('-20:20:20')) AS gt_minus20_implicit,
GREATEST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('-20:20:20')) AS gt_minis20_explicit,
GREATEST('2010-01-01 10:10:10',TIME('20:20:20')) AS gt_plus20_implicit,
GREATEST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('20:20:20')) AS gt_plus20_explicit;
-gt_minus20_implicit 10:10:10.000000
+gt_minus20_implicit 10:10:10
gt_minis20_explicit 10:10:10.000000
-gt_plus20_implicit 20:20:20.000000
+gt_plus20_implicit 20:20:20
gt_plus20_explicit 20:20:20.000000
SELECT
HOUR(GREATEST('2010-01-01 10:10:10',TIME('-20:20:20'))) AS gt_minus20_implicit,
@@ -1469,9 +1481,9 @@ LEAST('2010-01-01 10:10:10',TIME('-20:20:20')) AS lt_minus20_implicit,
LEAST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('-20:20:20')) AS lt_minus20_explicit,
LEAST('2010-01-01 10:10:10',TIME('20:20:20')) AS lt_plus20_implicit,
LEAST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('20:20:20')) AS lt_plus20_explicit;
-lt_minus20_implicit -20:20:20.000000
+lt_minus20_implicit -20:20:20
lt_minus20_explicit -20:20:20.000000
-lt_plus20_implicit 10:10:10.000000
+lt_plus20_implicit 10:10:10
lt_plus20_explicit 10:10:10.000000
SELECT
HOUR(LEAST('2010-01-01 10:10:10',TIME('-20:20:20'))) AS lt_minus20_implicit,
@@ -1487,9 +1499,9 @@ GREATEST('2010-01-01 10:10:10',TIME('-200:20:20')) AS gt_minus200_implicit,
GREATEST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('-200:20:20')) AS gt_minus200_explictit,
GREATEST('2010-01-01 10:10:10',TIME('200:20:20')) AS gt_plus200_implicit,
GREATEST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('200:20:20')) AS gt_plus200_explicit;
-gt_minus200_implicit 10:10:10.000000
+gt_minus200_implicit 10:10:10
gt_minus200_explictit 10:10:10.000000
-gt_plus200_implicit 200:20:20.000000
+gt_plus200_implicit 200:20:20
gt_plus200_explicit 200:20:20.000000
SELECT
HOUR(GREATEST('2010-01-01 10:10:10',TIME('-200:20:20'))) AS gt_minus200_implicit,
@@ -1505,9 +1517,9 @@ LEAST('2010-01-01 10:10:10',TIME('-200:20:20')) AS lt_minus200_implicit,
LEAST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('-200:20:20')) AS lt_minus200_explictit,
LEAST('2010-01-01 10:10:10',TIME('200:20:20')) AS lt_plus200_implicit,
LEAST(CAST('2010-01-01 10:10:10' AS TIME(6)),TIME('200:20:20')) AS lt_plus200_explicit;
-lt_minus200_implicit -200:20:20.000000
+lt_minus200_implicit -200:20:20
lt_minus200_explictit -200:20:20.000000
-lt_plus200_implicit 10:10:10.000000
+lt_plus200_implicit 10:10:10
lt_plus200_explicit 10:10:10.000000
SELECT
HOUR(LEAST('2010-01-01 10:10:10',TIME('-200:20:20'))) AS lt_minus200_implicit,
@@ -1596,7 +1608,7 @@ INSERT INTO t1 VALUES ('23:00:03', 'yes');
INSERT INTO t1 VALUES ('23:00:04', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('23:00:01','23:00:02');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 5 Using index condition
+1 SIMPLE t1 range a a 4 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('23:00:01','23:00:02');
a filler
23:00:03 yes
@@ -1674,7 +1686,7 @@ INSERT INTO t1 VALUES ('24:00:03', 'yes');
INSERT INTO t1 VALUES ('24:00:04', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('24:00:01','24:00:02');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 5 Using index condition
+1 SIMPLE t1 range a a 4 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('24:00:01','24:00:02');
a filler
24:00:03 yes
@@ -1752,7 +1764,7 @@ INSERT INTO t1 VALUES ('838:00:03', 'yes');
INSERT INTO t1 VALUES ('838:00:04', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('838:00:01','838:00:02');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 4 NULL 5 Using index condition
+1 SIMPLE t1 range a a 4 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('838:00:01','838:00:02');
a filler
838:00:03 yes
@@ -1830,7 +1842,7 @@ INSERT INTO t1 VALUES ('23:00:03.1', 'yes');
INSERT INTO t1 VALUES ('23:00:04.1', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('23:00:01.1','23:00:02.1');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 5 Using index condition
+1 SIMPLE t1 range a a 5 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('23:00:01.1','23:00:02.1');
a filler
23:00:03.1 yes
@@ -1908,7 +1920,7 @@ INSERT INTO t1 VALUES ('838:00:03.1', 'yes');
INSERT INTO t1 VALUES ('838:00:04.1', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('838:00:01.1','838:00:02.1');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 5 Using index condition
+1 SIMPLE t1 range a a 5 NULL 4 Using index condition
SELECT * FROM t1 WHERE a NOT IN ('838:00:01.1','838:00:02.1');
a filler
838:00:03.1 yes
@@ -1953,3 +1965,248 @@ a filler
-838:00:04.1 yes
-838:00:03.1 yes
DROP TABLE t1;
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+#
+CREATE TABLE t1 (a TIME);
+INSERT INTO t1 VALUES (1),(2),(3);
+# Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME'10:20:30',a)<=>COALESCE(TIME'10:20:30',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIME'10:20:30',TIME'10:20:30';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME''10:20:30'',a)<=>COALESCE(?,a)' USING TIME'10:20:30';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIME''10:20:30'',a)' USING TIME'10:20:30';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+# Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME'10:20:30',a)<=>COALESCE(TIME'10:20:31',a);
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(TIME'10:20:30',`test`.`t1`.`a`) <=> coalesce(TIME'10:20:31',`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIME'10:20:30',TIME'10:20:31';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(TIME'10:20:30'),`test`.`t1`.`a`) <=> coalesce(<cache>(TIME'10:20:31'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME''10:20:30'',a)<=>COALESCE(?,a)' USING TIME'10:20:31';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(TIME'10:20:30',`test`.`t1`.`a`) <=> coalesce(<cache>(TIME'10:20:31'),`test`.`t1`.`a`)
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIME''10:20:30'',a)' USING TIME'10:20:31';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where coalesce(<cache>(TIME'10:20:31'),`test`.`t1`.`a`) <=> coalesce(TIME'10:20:30',`test`.`t1`.`a`)
+DROP TABLE t1;
+#
+# MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+#
+CREATE TABLE t1 (a TIME);
+INSERT INTO t1 VALUES ('00:00:00'),('00:00:01'),('00:00:02');
+Equal values
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.0',a));
+a
+00:00:00
+00:00:01
+00:00:02
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.0',a));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.0';
+a
+00:00:00
+00:00:01
+00:00:02
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.0';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where 1
+Values with different formats
+SELECT LENGTH(COALESCE(TIME'00:00:00.0',a)),LENGTH(COALESCE(TIME'00:00:00.00',a)) FROM t1;
+LENGTH(COALESCE(TIME'00:00:00.0',a)) LENGTH(COALESCE(TIME'00:00:00.00',a))
+10 11
+10 11
+10 11
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.00',a));
+a
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.00',a));
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where octet_length(coalesce(TIME'00:00:00.0',`test`.`t1`.`a`)) <=> octet_length(coalesce(TIME'00:00:00.00',`test`.`t1`.`a`))
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.00';
+a
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.00';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where octet_length(coalesce(TIME'00:00:00.0',`test`.`t1`.`a`)) <=> octet_length(coalesce(<cache>(TIME'00:00:00.00'),`test`.`t1`.`a`))
+DROP TABLE t1;
+#
+# MDEV-16971 Assertion `is_valid_value_slow()' failed in Time::adjust_time_range_or_invalidate
+#
+SET sql_mode='';
+CREATE TABLE t1 (d1 date, t1 time, KEY t1 (t1));
+INSERT INTO t1 VALUES ('1982-12-19','08:16:31'),('1981-04-19','21:52:59'),('1971-06-09','07:15:44'),('2007-08-15','03:55:02'),('1993-06-05','04:17:51'),('2034-07-01','17:31:12'),('1998-08-24','08:09:27'),('1991-01-15','01:14:07'),('2001-02-25','10:41:28'),('1974-06-24','10:21:58'),('1977-04-21','16:38:05'),('1981-12-03','01:24:42'),('1972-06-15','20:19:16'),('1989-08-10','08:53:47'),('2018-05-19','15:06:49'),('1984-01-12','15:56:11'),('2013-01-23','04:16:16'),('2000-06-10','02:06:44'),('1995-01-03','04:51:38');
+CREATE TABLE t2 (d1 date );
+INSERT INTO t2 VALUES ('2018-06-01'),('1979-10-25'),('1974-08-22'),('1980-06-17');
+SELECT * FROM (t1 JOIN t2 ON (t2.d1 = t1.t1)) WHERE (t1.d1 > 70 );
+d1 t1 d1
+UPDATE (t1 JOIN t2 ON (t2.d1 = t1.t1)) SET t1.d1 = '2018-07-07' WHERE (t1.d1 > 70 );
+DROP TABLE t1,t2;
+#
+# MDEV-17219 Assertion `!t->fraction_remainder(decimals())' failed in Field_time::store_TIME_with_warning
+#
+SET optimizer_use_condition_selectivity=3;
+CREATE TABLE t1 (it TIME NOT NULL);
+INSERT INTO t1 VALUES ('07:25:13'),('05:15:55'),('09:58:01'),('04:23:57'),('19:37:28'),('01:38:05'),('20:50:52');
+SELECT 1 FROM t1 WHERE it < -7487797330456870912;
+1
+Warnings:
+Warning 1292 Truncated incorrect time value: '-7487797330456870912'
+DROP TABLE t1;
+SET optimizer_use_condition_selectivity=DEFAULT;
+#
+# MDEV-17417 TIME(99991231235959) returns 838:59:59 instead of 23:59:58
+#
+SELECT TIME(99991231235957), TIME(99991231235958), TIME(99991231235959);
+TIME(99991231235957) TIME(99991231235958) TIME(99991231235959)
+23:59:57 23:59:58 23:59:59
+#
+# MDEV-17634 Regression: TIME(0)=TIME('z') returns NULL vs 1
+#
+SELECT
+TIMESTAMP(0)=TIMESTAMP('z') AS ts,
+DATE(0)=DATE('z') AS d,
+TIME(0)=TIME('z') AS t;
+ts d t
+1 1 1
+Warnings:
+Warning 1292 Truncated incorrect datetime value: 'z'
+Warning 1292 Truncated incorrect datetime value: 'z'
+Warning 1292 Truncated incorrect time value: 'z'
+SELECT
+TIMESTAMP(0)=TIMESTAMP('') AS ts,
+DATE(0)=DATE('') AS d,
+TIME(0)=TIME('') AS t;
+ts d t
+1 1 1
+Warnings:
+Warning 1292 Truncated incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
+Warning 1292 Truncated incorrect time value: ''
+#
+# MDEV-17563 Different results using table or view when comparing values of time type
+#
+CREATE TABLE t1 (pk int, x1 time, x2 varchar(1));
+INSERT INTO t1 VALUES (17,'09:16:37','k'),(70,'19:44:22','k');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk FROM t1 WHERE x1 >x2;
+pk
+17
+70
+Warnings:
+Warning 1292 Truncated incorrect time value: 'k'
+Warning 1292 Truncated incorrect time value: 'k'
+SELECT pk FROM v1 WHERE x1 >x2;
+pk
+17
+70
+Warnings:
+Warning 1292 Truncated incorrect time value: 'k'
+Warning 1292 Truncated incorrect time value: 'k'
+DROP VIEW v1;
+DROP TABLE t1;
+CREATE TABLE t1 (pk int, x1 time, x2 varchar(1));
+INSERT INTO t1 VALUES (17,'09:16:37',''),(70,'19:44:22','k');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk FROM t1 WHERE x1 >x2;
+pk
+17
+70
+Warnings:
+Warning 1292 Truncated incorrect time value: ''
+Warning 1292 Truncated incorrect time value: 'k'
+SELECT pk FROM v1 WHERE x1 >x2;
+pk
+17
+70
+Warnings:
+Warning 1292 Truncated incorrect time value: ''
+Warning 1292 Truncated incorrect time value: 'k'
+DROP VIEW IF EXISTS v1;
+DROP TABLE IF EXISTS t1;
+#
+# MDEV-17625 Different warnings when comparing a garbage to DATETIME vs TIME
+#
+SELECT TIMESTAMP(0)='z', DATE(0)='z', TIME(0)='z';
+TIMESTAMP(0)='z' DATE(0)='z' TIME(0)='z'
+1 1 1
+Warnings:
+Warning 1292 Truncated incorrect datetime value: 'z'
+Warning 1292 Truncated incorrect datetime value: 'z'
+Warning 1292 Truncated incorrect time value: 'z'
+#
+# MDEV-17319 Assertion `ts_type != MYSQL_TIMESTAMP_TIME' failed upon inserting into TIME field
+#
+CREATE TABLE t1 (t TIME);
+SET SESSION SQL_MODE='TRADITIONAL';
+INSERT INTO t1 VALUES ('0000-00-00 00:00:00'),('0000-00-00 00:00:00');
+ERROR 22007: Incorrect time value: '0000-00-00 00:00:00' for column `test`.`t1`.`t` at row 1
+SET sql_mode=DEFAULT;
+DROP TABLE t1;
+#
+# MDEV-18070 Assertion `nanoseconds <= 1000000000' failed in Temporal::add_nanoseconds_ssff with TIME_ROUND_FRACTIONAL
+#
+CREATE TABLE t1 (t TIME);
+SET SQL_MODE= 'TIME_ROUND_FRACTIONAL';
+INSERT INTO t1 VALUES (3e19);
+Warnings:
+Warning 1264 Out of range value for column 't' at row 1
+DROP TABLE t1;
+#
+# MDEV-18876 Assertion `is_valid_time_slow()' failed in Time::valid_MYSQL_TIME_to_valid_value
+#
+CREATE TABLE t1 (f INT);
+INSERT INTO t1 VALUES (1),(2);
+SELECT DISTINCT f FROM t1 ORDER BY 1 && ( '1972-11-06 16:58:58' BETWEEN CONVERT( 0, TIME ) AND '20:31:05' );
+f
+1
+2
+DROP TABLE t1;
+CREATE OR REPLACE TABLE t1 (a VARCHAR(32));
+INSERT INTO t1 VALUES ('1972-11-06 16:58:58');
+SELECT * FROM t1 WHERE a < TIME'20:31:05';
+a
+1972-11-06 16:58:58
+SELECT a < TIME'20:31:05' FROM t1;
+a < TIME'20:31:05'
+1
+DROP TABLE t1;
+SELECT '1972-11-06 16:58:58' < TIME'20:31:05';
+'1972-11-06 16:58:58' < TIME'20:31:05'
+1
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_time.test b/mysql-test/main/type_time.test
index d8bb66fcd5c..08e43041c76 100644
--- a/mysql-test/main/type_time.test
+++ b/mysql-test/main/type_time.test
@@ -514,6 +514,11 @@ SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-01 10:20:30' AND LENGTH(a)=8;
EXPLAIN EXTENDED
SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-01 10:20:30' AND LENGTH(a)=30+RAND();
+EXPLAIN EXTENDED
+SELECT * FROM t1 WHERE a=TIMESTAMP'0000-01-00 10:20:30' AND LENGTH(a)=8;
+EXPLAIN EXTENDED
+SELECT * FROM t1 WHERE a=TIMESTAMP'0001-00-00 10:20:30' AND LENGTH(a)=8;
+
--echo # Old mode, TIMESTAMP-alike string literal, zero YYYYMMDD, Ok to propagate
SELECT * FROM t1 WHERE a='0000-00-00 10:20:30';
SELECT * FROM t1 WHERE a='0000-00-00 10:20:30' AND LENGTH(a)=8;
@@ -1290,3 +1295,158 @@ INSERT INTO t1 VALUES ('-838:00:04.1', 'yes');
EXPLAIN SELECT * FROM t1 WHERE a NOT IN ('-838:00:01.1','-838:00:02.1');
SELECT * FROM t1 WHERE a NOT IN ('-838:00:01.1','-838:00:02.1');
DROP TABLE t1;
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-11362 True condition elimination does not work for DECIMAL and temporal dynamic SQL parameters
+--echo #
+
+CREATE TABLE t1 (a TIME);
+INSERT INTO t1 VALUES (1),(2),(3);
+--echo # Equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME'10:20:30',a)<=>COALESCE(TIME'10:20:30',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIME'10:20:30',TIME'10:20:30';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME''10:20:30'',a)<=>COALESCE(?,a)' USING TIME'10:20:30';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIME''10:20:30'',a)' USING TIME'10:20:30';
+--echo # Not equal values
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME'10:20:30',a)<=>COALESCE(TIME'10:20:31',a);
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(?,a)' USING TIME'10:20:30',TIME'10:20:31';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(TIME''10:20:30'',a)<=>COALESCE(?,a)' USING TIME'10:20:31';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(?,a)<=>COALESCE(TIME''10:20:30'',a)' USING TIME'10:20:31';
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-16426 Optimizer erroneously treats equal constants of different formats as same
+--echo #
+CREATE TABLE t1 (a TIME);
+INSERT INTO t1 VALUES ('00:00:00'),('00:00:01'),('00:00:02');
+
+--echo Equal values
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.0',a));
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.0',a));
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.0';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.0';
+
+--echo Values with different formats
+SELECT LENGTH(COALESCE(TIME'00:00:00.0',a)),LENGTH(COALESCE(TIME'00:00:00.00',a)) FROM t1;
+SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.00',a));
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME'00:00:00.0',a))<=>LENGTH(COALESCE(TIME'00:00:00.00',a));
+EXECUTE IMMEDIATE 'SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.00';
+EXECUTE IMMEDIATE 'EXPLAIN EXTENDED SELECT * FROM t1 WHERE LENGTH(COALESCE(TIME''00:00:00.0'',a))<=>LENGTH(COALESCE(?,a))' USING TIME'00:00:00.00';
+
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-16971 Assertion `is_valid_value_slow()' failed in Time::adjust_time_range_or_invalidate
+--echo #
+SET sql_mode='';
+CREATE TABLE t1 (d1 date, t1 time, KEY t1 (t1));
+INSERT INTO t1 VALUES ('1982-12-19','08:16:31'),('1981-04-19','21:52:59'),('1971-06-09','07:15:44'),('2007-08-15','03:55:02'),('1993-06-05','04:17:51'),('2034-07-01','17:31:12'),('1998-08-24','08:09:27'),('1991-01-15','01:14:07'),('2001-02-25','10:41:28'),('1974-06-24','10:21:58'),('1977-04-21','16:38:05'),('1981-12-03','01:24:42'),('1972-06-15','20:19:16'),('1989-08-10','08:53:47'),('2018-05-19','15:06:49'),('1984-01-12','15:56:11'),('2013-01-23','04:16:16'),('2000-06-10','02:06:44'),('1995-01-03','04:51:38');
+CREATE TABLE t2 (d1 date );
+INSERT INTO t2 VALUES ('2018-06-01'),('1979-10-25'),('1974-08-22'),('1980-06-17');
+SELECT * FROM (t1 JOIN t2 ON (t2.d1 = t1.t1)) WHERE (t1.d1 > 70 );
+UPDATE (t1 JOIN t2 ON (t2.d1 = t1.t1)) SET t1.d1 = '2018-07-07' WHERE (t1.d1 > 70 );
+DROP TABLE t1,t2;
+
+--echo #
+--echo # MDEV-17219 Assertion `!t->fraction_remainder(decimals())' failed in Field_time::store_TIME_with_warning
+--echo #
+SET optimizer_use_condition_selectivity=3;
+CREATE TABLE t1 (it TIME NOT NULL);
+INSERT INTO t1 VALUES ('07:25:13'),('05:15:55'),('09:58:01'),('04:23:57'),('19:37:28'),('01:38:05'),('20:50:52');
+SELECT 1 FROM t1 WHERE it < -7487797330456870912;
+DROP TABLE t1;
+SET optimizer_use_condition_selectivity=DEFAULT;
+
+--echo #
+--echo # MDEV-17417 TIME(99991231235959) returns 838:59:59 instead of 23:59:58
+--echo #
+SELECT TIME(99991231235957), TIME(99991231235958), TIME(99991231235959);
+
+--echo #
+--echo # MDEV-17634 Regression: TIME(0)=TIME('z') returns NULL vs 1
+--echo #
+SELECT
+ TIMESTAMP(0)=TIMESTAMP('z') AS ts,
+ DATE(0)=DATE('z') AS d,
+ TIME(0)=TIME('z') AS t;
+
+SELECT
+ TIMESTAMP(0)=TIMESTAMP('') AS ts,
+ DATE(0)=DATE('') AS d,
+ TIME(0)=TIME('') AS t;
+
+
+--echo #
+--echo # MDEV-17563 Different results using table or view when comparing values of time type
+--echo #
+
+CREATE TABLE t1 (pk int, x1 time, x2 varchar(1));
+INSERT INTO t1 VALUES (17,'09:16:37','k'),(70,'19:44:22','k');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk FROM t1 WHERE x1 >x2;
+SELECT pk FROM v1 WHERE x1 >x2;
+DROP VIEW v1;
+DROP TABLE t1;
+
+
+CREATE TABLE t1 (pk int, x1 time, x2 varchar(1));
+INSERT INTO t1 VALUES (17,'09:16:37',''),(70,'19:44:22','k');
+CREATE VIEW v1 AS SELECT * FROM t1;
+SELECT pk FROM t1 WHERE x1 >x2;
+SELECT pk FROM v1 WHERE x1 >x2;
+DROP VIEW IF EXISTS v1;
+DROP TABLE IF EXISTS t1;
+
+
+--echo #
+--echo # MDEV-17625 Different warnings when comparing a garbage to DATETIME vs TIME
+--echo #
+
+SELECT TIMESTAMP(0)='z', DATE(0)='z', TIME(0)='z';
+
+
+--echo #
+--echo # MDEV-17319 Assertion `ts_type != MYSQL_TIMESTAMP_TIME' failed upon inserting into TIME field
+--echo #
+
+CREATE TABLE t1 (t TIME);
+SET SESSION SQL_MODE='TRADITIONAL';
+--error ER_TRUNCATED_WRONG_VALUE
+INSERT INTO t1 VALUES ('0000-00-00 00:00:00'),('0000-00-00 00:00:00');
+SET sql_mode=DEFAULT;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18070 Assertion `nanoseconds <= 1000000000' failed in Temporal::add_nanoseconds_ssff with TIME_ROUND_FRACTIONAL
+--echo #
+
+CREATE TABLE t1 (t TIME);
+SET SQL_MODE= 'TIME_ROUND_FRACTIONAL';
+INSERT INTO t1 VALUES (3e19);
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-18876 Assertion `is_valid_time_slow()' failed in Time::valid_MYSQL_TIME_to_valid_value
+--echo #
+
+CREATE TABLE t1 (f INT);
+INSERT INTO t1 VALUES (1),(2);
+SELECT DISTINCT f FROM t1 ORDER BY 1 && ( '1972-11-06 16:58:58' BETWEEN CONVERT( 0, TIME ) AND '20:31:05' );
+DROP TABLE t1;
+
+CREATE OR REPLACE TABLE t1 (a VARCHAR(32));
+INSERT INTO t1 VALUES ('1972-11-06 16:58:58');
+SELECT * FROM t1 WHERE a < TIME'20:31:05';
+SELECT a < TIME'20:31:05' FROM t1;
+DROP TABLE t1;
+
+SELECT '1972-11-06 16:58:58' < TIME'20:31:05';
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_time_hires.result b/mysql-test/main/type_time_hires.result
index 47185116bea..ca19e9ed5bf 100644
--- a/mysql-test/main/type_time_hires.result
+++ b/mysql-test/main/type_time_hires.result
@@ -67,8 +67,10 @@ a
01:02:03.4567
select extract(microsecond from a + interval 100 microsecond) from t1 where a>'2010-11-12 01:02:03.456';
extract(microsecond from a + interval 100 microsecond)
+456800
select a from t1 where a>'2010-11-12 01:02:03.456' group by a;
a
+01:02:03.4567
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -115,6 +117,11 @@ NULL
delete from t1 where a < 20110101;
select * from t1;
a
+01:02:13.3332
+NULL
+delete from t1 where a is not null;
+select * from t1;
+a
NULL
create table t2 select * from t1;
create table t3 like t1;
diff --git a/mysql-test/main/type_time_round.result b/mysql-test/main/type_time_round.result
new file mode 100644
index 00000000000..31e97c888db
--- /dev/null
+++ b/mysql-test/main/type_time_round.result
@@ -0,0 +1,260 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+#
+# TIME: SET
+#
+CREATE TABLE t1 (a TIME(3), b TIME(4));
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME(3), b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999');
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999999');
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+00:00:01.000
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME(3), b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,0.9999);
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+00:00:01.000
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME(3), b DOUBLE);
+INSERT INTO t1 VALUES(NULL,0.9999);
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+00:00:01.000
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME(6), b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999999');
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+SELECT a FROM t1;
+a
+00:00:01.000000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME(6), b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+00:00:01.000000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIME(6), b DOUBLE);
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+00:00:01.000000
+DROP TABLE t1;
+#
+# TIME: ALTER
+#
+CREATE TABLE t1 (a TIME(4));
+INSERT INTO t1 VALUES('00:00:00.9999');
+ALTER TABLE t1 MODIFY a TIME(3);
+SELECT a FROM t1;
+a
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('00:00:00.9999');
+INSERT INTO t1 VALUES('00:00:00.9999999');
+ALTER TABLE t1 MODIFY a TIME(3);
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+SELECT a FROM t1;
+a
+00:00:01.000
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+ALTER TABLE t1 MODIFY a TIME(3);
+SELECT a FROM t1;
+a
+00:00:01.000
+00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+ALTER TABLE t1 MODIFY a TIME(3);
+Warnings:
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+00:00:01.000
+00:00:01.000
+DROP TABLE t1;
+#
+# TIME: CAST
+#
+CREATE TABLE t1 (a TIME(4));
+INSERT INTO t1 VALUES('00:00:00.9999');
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+a CAST(a AS TIME(3))
+00:00:00.9999 00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('00:00:00.9999');
+INSERT INTO t1 VALUES('00:00:00.9999999');
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+a CAST(a AS TIME(3))
+00:00:00.9999 00:00:01.000
+00:00:00.9999999 00:00:01.000
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+a CAST(a AS TIME(3))
+0.9999000000 00:00:01.000
+0.9999999000 00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+a CAST(a AS TIME(3))
+0.9999 00:00:01.000
+0.9999999 00:00:01.000
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('00:00:00.9999999');
+SELECT a, CAST(a AS TIME(6)) FROM t1;
+a CAST(a AS TIME(6))
+00:00:00.9999999 00:00:01.000000
+Warnings:
+Note 1292 Truncated incorrect time value: '00:00:00.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(6)) FROM t1;
+a CAST(a AS TIME(6))
+0.9999999000 00:00:01.000000
+DROP TABLE t1;
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(6)) FROM t1;
+a CAST(a AS TIME(6))
+0.9999999 00:00:01.000000
+DROP TABLE t1;
+#
+# NOW
+#
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE OR REPLACE TABLE t1 (id SERIAL, a TIME(4));
+INSERT INTO t1 (a) VALUES (now(6));
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+INSERT INTO t1 (a) VALUES (CURRENT_TIMESTAMP(6));
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+INSERT INTO t1 (a) VALUES (CURRENT_TIME(6));
+SELECT * FROM t1;
+id a
+1 24:00:00.0000
+2 24:00:00.0000
+3 24:00:00.0000
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+#
+# Equal field propagation
+#
+CREATE TABLE t1 (a TIME(6));
+INSERT INTO t1 VALUES (0.999999);
+INSERT INTO t1 VALUES (0.9999999);
+SELECT * FROM t1 WHERE a=0.9999999;
+a
+00:00:01.000000
+SELECT * FROM t1 WHERE a='0.9999999';
+a
+00:00:01.000000
+Warnings:
+Note 1292 Truncated incorrect time value: '0.9999999'
+SELECT * FROM t1 WHERE a='0.9999999' AND a>='0.9999999';
+a
+00:00:01.000000
+Warnings:
+Note 1292 Truncated incorrect time value: '0.9999999'
+Note 1292 Truncated incorrect time value: '0.9999999'
+SELECT * FROM t1 WHERE a='0.9999999' AND CONCAT(a)='00:00:01.000000';
+a
+00:00:01.000000
+Warnings:
+Note 1292 Truncated incorrect time value: '0.9999999'
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0.9999999' AND a>='0.9999999';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1292 Truncated incorrect time value: '0.9999999'
+Note 1292 Truncated incorrect time value: '0.9999999'
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIME'00:00:01'
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0.9999999' AND CONCAT(a)='00:00:01.000000';
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1292 Truncated incorrect time value: '0.9999999'
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIME'00:00:01'
+DROP TABLE t1;
+#
+# Comparing non-temporal to TIME
+#
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('22:59:59.9999999');
+SELECT * FROM t1 WHERE a=TIME'23:00:00';
+a
+22:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '22:59:59.9999999'
+SELECT * FROM t1 WHERE CONCAT(a)=TIME'23:00:00';
+a
+22:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '22:59:59.9999999'
+SELECT * FROM t1 WHERE COALESCE(a)=TIME'23:00:00';
+a
+22:59:59.9999999
+Warnings:
+Note 1292 Truncated incorrect time value: '22:59:59.9999999'
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (225959.9999999);
+SELECT * FROM t1 WHERE a=TIME'23:00:00';
+a
+225959.9999999
+SELECT * FROM t1 WHERE COALESCE(a)=TIME'23:00:00';
+a
+225959.9999999
+DROP TABLE t1;
+#
+# Literal corner case
+#
+SELECT TIME'838:59:59.999999';
+TIME'838:59:59.999999'
+838:59:59.999999
+SELECT TIME'838:59:59.9999999';
+ERROR HY000: Incorrect TIME value: '838:59:59.9999999'
+SELECT TIME'839:00:00';
+ERROR HY000: Incorrect TIME value: '839:00:00'
diff --git a/mysql-test/main/type_time_round.test b/mysql-test/main/type_time_round.test
new file mode 100644
index 00000000000..6d4b2d8947a
--- /dev/null
+++ b/mysql-test/main/type_time_round.test
@@ -0,0 +1,184 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+
+--echo #
+--echo # TIME: SET
+--echo #
+
+CREATE TABLE t1 (a TIME(3), b TIME(4));
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME(3), b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999');
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME(3), b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,0.9999);
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME(3), b DOUBLE);
+INSERT INTO t1 VALUES(NULL,0.9999);
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME(6), b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'00:00:00.9999999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME(6), b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIME(6), b DOUBLE);
+INSERT INTO t1 VALUES(NULL,0.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # TIME: ALTER
+--echo #
+
+CREATE TABLE t1 (a TIME(4));
+INSERT INTO t1 VALUES('00:00:00.9999');
+ALTER TABLE t1 MODIFY a TIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('00:00:00.9999');
+INSERT INTO t1 VALUES('00:00:00.9999999');
+ALTER TABLE t1 MODIFY a TIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+ALTER TABLE t1 MODIFY a TIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+ALTER TABLE t1 MODIFY a TIME(3);
+SELECT a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # TIME: CAST
+--echo #
+
+CREATE TABLE t1 (a TIME(4));
+INSERT INTO t1 VALUES('00:00:00.9999');
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('00:00:00.9999');
+INSERT INTO t1 VALUES('00:00:00.9999999');
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES(0.9999);
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(3)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('00:00:00.9999999');
+SELECT a, CAST(a AS TIME(6)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(6)) FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DOUBLE);
+INSERT INTO t1 VALUES(0.9999999);
+SELECT a, CAST(a AS TIME(6)) FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # NOW
+--echo #
+
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE OR REPLACE TABLE t1 (id SERIAL, a TIME(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIMESTAMP(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIME(6));
+SELECT * FROM t1;
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+
+
+--echo #
+--echo # Equal field propagation
+--echo #
+
+CREATE TABLE t1 (a TIME(6));
+INSERT INTO t1 VALUES (0.999999);
+INSERT INTO t1 VALUES (0.9999999);
+SELECT * FROM t1 WHERE a=0.9999999;
+SELECT * FROM t1 WHERE a='0.9999999';
+SELECT * FROM t1 WHERE a='0.9999999' AND a>='0.9999999';
+SELECT * FROM t1 WHERE a='0.9999999' AND CONCAT(a)='00:00:01.000000';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0.9999999' AND a>='0.9999999';
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0.9999999' AND CONCAT(a)='00:00:01.000000';
+DROP TABLE t1;
+
+
+--echo #
+--echo # Comparing non-temporal to TIME
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('22:59:59.9999999');
+SELECT * FROM t1 WHERE a=TIME'23:00:00';
+SELECT * FROM t1 WHERE CONCAT(a)=TIME'23:00:00';
+SELECT * FROM t1 WHERE COALESCE(a)=TIME'23:00:00';
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (225959.9999999);
+SELECT * FROM t1 WHERE a=TIME'23:00:00';
+SELECT * FROM t1 WHERE COALESCE(a)=TIME'23:00:00';
+DROP TABLE t1;
+
+--echo #
+--echo # Literal corner case
+--echo #
+
+SELECT TIME'838:59:59.999999';
+--error ER_WRONG_VALUE
+SELECT TIME'838:59:59.9999999';
+--error ER_WRONG_VALUE
+SELECT TIME'839:00:00';
diff --git a/mysql-test/main/type_timestamp.result b/mysql-test/main/type_timestamp.result
index 498cc472f17..97a70d042eb 100644
--- a/mysql-test/main/type_timestamp.result
+++ b/mysql-test/main/type_timestamp.result
@@ -492,8 +492,12 @@ INSERT INTO t1 (f2,f3) VALUES (NOW(), "0000-00-00 00:00:00");
INSERT INTO t1 (f2,f3) VALUES (NOW(), NULL);
INSERT INTO t1 (f2,f3) VALUES (NOW(), ASCII(NULL));
INSERT INTO t1 (f2,f3) VALUES (NOW(), FROM_UNIXTIME('9999999999'));
+Warnings:
+Warning 1292 Truncated incorrect unixtime value: '9999999999'
INSERT INTO t1 (f2,f3) VALUES (NOW(), TIME(NULL));
UPDATE t1 SET f2=NOW(), f3=FROM_UNIXTIME('9999999999') WHERE f1=1;
+Warnings:
+Warning 1292 Truncated incorrect unixtime value: '9999999999'
SELECT f1,f2-f3 FROM t1;
f1 f2-f3
1 0
@@ -528,6 +532,10 @@ DROP TABLE t1;
# are appended with .0
#
CREATE TABLE t1 ( a TIMESTAMP, KEY ( a ) );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:01' );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:02' );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:03' );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:04' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:01' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:02' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:03' );
@@ -544,10 +552,18 @@ a
2010-02-01 09:31:04
SELECT * FROM t1 WHERE a <= '2010-02-01 09:31:02.0';
a
+2010-02-01 09:30:01
+2010-02-01 09:30:02
+2010-02-01 09:30:03
+2010-02-01 09:30:04
2010-02-01 09:31:01
2010-02-01 09:31:02
SELECT * FROM t1 WHERE '2010-02-01 09:31:02.0' >= a;
a
+2010-02-01 09:30:01
+2010-02-01 09:30:02
+2010-02-01 09:30:03
+2010-02-01 09:30:04
2010-02-01 09:31:01
2010-02-01 09:31:02
EXPLAIN
@@ -868,7 +884,7 @@ SELECT * FROM t1 WHERE LENGTH(a)=30+RAND() AND a=' garbage ';
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
Warnings:
-Warning 1292 Incorrect datetime value: ' garbage '
+Warning 1292 Truncated incorrect datetime value: ' garbage '
Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIMESTAMP'0000-00-00 00:00:00' and <cache>(octet_length(TIMESTAMP'0000-00-00 00:00:00')) = 30 + rand()
DROP TABLE t1;
CREATE TABLE t1 (a TIMESTAMP);;
@@ -1014,3 +1030,242 @@ DROP TABLE t1;
#
# End of 10.3 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-17216 Assertion `!dt->fraction_remainder(decimals())' failed in Field_temporal_with_date::store_TIME_with_warning
+#
+CREATE TABLE t1 (b BIT(20));
+CREATE TABLE t2 (t TIMESTAMP);
+INSERT IGNORE INTO t1 VALUES (b'000001001100000');
+INSERT INTO t2 SELECT * FROM t1;
+DROP TABLE t1, t2;
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 SELECT CAST(20010101 AS UNSIGNED);
+DROP TABLE t1;
+#
+# MDEV-17928 Conversion from TIMESTAMP to VARCHAR SP variables does not work well on fractional digits
+#
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30.123456');
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE ts10 TIMESTAMP(1) DEFAULT NOW();
+DECLARE ts16 TIMESTAMP(1) DEFAULT NOW(6);
+DECLARE dt10 DATETIME(1) DEFAULT NOW();
+DECLARE dt16 DATETIME(1) DEFAULT NOW(6);
+DECLARE vts10 VARCHAR(32) DEFAULT ts10;
+DECLARE vts16 VARCHAR(32) DEFAULT ts16;
+DECLARE vdt10 VARCHAR(32) DEFAULT dt10;
+DECLARE vdt16 VARCHAR(32) DEFAULT dt16;
+DECLARE tts10 TEXT(32) DEFAULT ts10;
+DECLARE tts16 TEXT(32) DEFAULT ts16;
+DECLARE tdt10 TEXT(32) DEFAULT dt10;
+DECLARE tdt16 TEXT(32) DEFAULT dt16;
+SELECT vts10, vts16, vdt10, vdt16;
+SELECT tts10, tts16, tdt10, tdt16;
+END;
+$$
+CALL p1;
+vts10 2001-01-01 10:20:30.0
+vts16 2001-01-01 10:20:30.1
+vdt10 2001-01-01 10:20:30.0
+vdt16 2001-01-01 10:20:30.1
+tts10 2001-01-01 10:20:30.0
+tts16 2001-01-01 10:20:30.1
+tdt10 2001-01-01 10:20:30.0
+tdt16 2001-01-01 10:20:30.1
+DROP PROCEDURE p1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+#
+# MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+#
+# Testing Item_func_rollup_const::val_native()
+# There is a bug in the below output (MDEV-16612)
+# Please remove this comment when MDEV-16612 is fixed and results are re-recorded
+CREATE TABLE t1 (id INT);
+INSERT INTO t1 VALUES (1),(2);
+BEGIN NOT ATOMIC
+DECLARE v TIMESTAMP DEFAULT '2001-01-01 10:20:30'; -- "v" will be wrapped into Item_func_rollup_const
+SELECT id, v AS v, COUNT(*) FROM t1 GROUP BY id,v WITH ROLLUP;
+END;
+$$
+id v COUNT(*)
+1 2001-01-01 10:20:30 1
+1 2001-01-01 10:20:30 1
+2 2001-01-01 10:20:30 1
+2 2001-01-01 10:20:30 1
+NULL 2001-01-01 10:20:30 2
+DROP TABLE t1;
+#
+# Testing Type_handler_timestamp_common::Item_save_in_field()
+# "txt" is expected to have three fractional digits
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30.123456');
+CREATE TABLE t1 (ts1 TIMESTAMP(1) NOT NULL, ts2 TIMESTAMP(3) NOT NULL, txt TEXT);
+INSERT INTO t1 VALUES ('0000-00-00 00:00:00', '0000-00-00 00:00:00',COALESCE(ts1,ts2));
+INSERT INTO t1 VALUES (NOW(),NOW(),COALESCE(ts1,ts2));
+INSERT INTO t1 VALUES (NOW(1),NOW(3),COALESCE(ts1,ts2));
+SELECT * FROM t1;
+ts1 ts2 txt
+0000-00-00 00:00:00.0 0000-00-00 00:00:00.000 0000-00-00 00:00:00.000
+2001-01-01 10:20:30.0 2001-01-01 10:20:30.000 2001-01-01 10:20:30.000
+2001-01-01 10:20:30.1 2001-01-01 10:20:30.123 2001-01-01 10:20:30.100
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+#
+# Testing Field_timestamp::store_native
+#
+SET sql_mode='';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES ('0000-00-00 00:00:00','0000-00-00 00:00:00');
+SET sql_mode='STRICT_ALL_TABLES,NO_ZERO_DATE';
+UPDATE t1 SET a=b;
+ERROR 22007: Incorrect datetime value: '0000-00-00 00:00:00' for column `test`.`t1`.`a` at row 1
+UPDATE t1 SET a=COALESCE(b);
+ERROR 22007: Incorrect datetime value: '0000-00-00 00:00:00' for column `test`.`t1`.`a` at row 1
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# MDEV-17979 Assertion `0' failed in Item::val_native upon SELECT with timestamp, NULLIF, GROUP BY
+#
+CREATE TABLE t1 (a INT, b TIMESTAMP) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,'2018-06-19 00:00:00');
+SELECT NULLIF(b, 'N/A') AS f, MAX(a) FROM t1 GROUP BY f;
+f MAX(a)
+2018-06-19 00:00:00 1
+Warnings:
+Warning 1292 Truncated incorrect datetime value: 'N/A'
+DROP TABLE t1;
+#
+# MDEV-17972 Assertion `is_valid_value_slow()' failed in Datetime::Datetime
+#
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP(6)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
+FLUSH TABLES;
+MYD
+FF77777777FFFFFF
+SELECT a, CAST(a AS DATETIME) AS dt0, CAST(a AS DATETIME(6)) AS dt6 FROM t1;
+a dt0 dt6
+2033-07-07 03:01:11.999999 2033-07-07 03:01:11 2033-07-07 03:01:11.999999
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+#
+# MDEV-18072 Assertion `is_null() == item->null_value || conv' failed in Timestamp_or_zero_datetime_native_null::Timestamp_or_zero_datetime_native_null upon query with GROUP BY
+#
+CREATE TABLE t1 (t TIMESTAMP);
+INSERT INTO t1 () VALUES (),();
+SELECT IF(0,t,NULL) AS f FROM t1 GROUP BY 'foo';
+f
+NULL
+DROP TABLE t1;
+#
+# MDEV-18145 Assertion `0' failed in Item::val_native upon SELECT subquery with timestamp
+#
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+CREATE TABLE t2 (pk INT PRIMARY KEY) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2);
+CREATE TABLE t3 (pk INT PRIMARY KEY, b TIMESTAMP) ENGINE=MyISAM;
+SELECT ( SELECT b FROM t1 LIMIT 1 ) AS sq FROM t2 LEFT JOIN t3 USING (pk);
+sq
+NULL
+NULL
+DROP TABLE t1, t2, t3;
+#
+# MDEV-18447 Assertion `!is_zero_datetime()' failed in Timestamp_or_zero_datetime::tv
+#
+CREATE TABLE t1 (a TIMESTAMP DEFAULT 0, b TIMESTAMP DEFAULT 0, c TIME DEFAULT 0);
+INSERT INTO t1 VALUES (0,0,0);
+SELECT c IN (GREATEST(a,b)) FROM t1;
+c IN (GREATEST(a,b))
+0
+DROP TABLE t1;
+#
+# MDEV-17969 Assertion `name' failed in THD::push_warning_truncated_value_for_field
+#
+CREATE TABLE t1 (d DATE);
+INSERT INTO t1 VALUES ('2018-01-01'),('2019-01-01');
+SET SESSION SQL_MODE= 'STRICT_ALL_TABLES,NO_ZERO_DATE';
+CREATE TABLE t2 SELECT 1 AS f FROM t1 GROUP BY FROM_DAYS(d);
+ERROR 22007: Truncated incorrect date value: '0000-00-00'
+DROP TABLE t1;
+#
+# MDEV-19124 Assertion `0' failed in Item::val_native
+#
+CREATE TABLE t1 (d1 TIMESTAMP(5));
+INSERT INTO t1 VALUES ('2018-10-14 15:31:01');
+SELECT LEAD(d1,1) OVER(ORDER BY d1) FROM t1;
+LEAD(d1,1) OVER(ORDER BY d1)
+NULL
+SELECT LAG(d1,1) OVER(ORDER BY d1) FROM t1;
+LAG(d1,1) OVER(ORDER BY d1)
+NULL
+INSERT INTO t1 VALUES ('2018-10-14 15:31:02');
+INSERT INTO t1 VALUES ('2018-10-14 15:31:03');
+SELECT LEAD(d1,1) OVER(ORDER BY d1) FROM t1;
+LEAD(d1,1) OVER(ORDER BY d1)
+2018-10-14 15:31:02.00000
+2018-10-14 15:31:03.00000
+NULL
+SELECT LAG(d1,1) OVER(ORDER BY d1) FROM t1;
+LAG(d1,1) OVER(ORDER BY d1)
+NULL
+2018-10-14 15:31:01.00000
+2018-10-14 15:31:02.00000
+DROP TABLE t1;
+#
+# MDEV-18240 Assertion `0' failed in Item_cache_timestamp::val_datetime_packed
+#
+CREATE TABLE t1 (c1 timestamp);
+SELECT MIN(t1.c1) AS k1 FROM t1 HAVING (k1 >= ALL(SELECT 'a' UNION SELECT 'r'));
+k1
+Warnings:
+Warning 1292 Truncated incorrect datetime value: 'r'
+SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT 'a' UNION SELECT 'r');
+c1
+Warnings:
+Warning 1292 Truncated incorrect datetime value: 'r'
+DROP TABLE t1;
+CREATE TABLE t1 (c1 timestamp);
+INSERT INTO t1 VALUES ('2010-01-01 00:00:00');
+SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT '2010-01-01 10:00:00' UNION SELECT '2001-01-01 10:00:01');
+c1
+SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT '2000-01-01 10:00:00' UNION SELECT '2000-01-01 10:00:01');
+c1
+2010-01-01 00:00:00
+DROP TABLE t1;
+#
+# MDEV-18595 Assertion `0' failed in Item_cache_timestamp::val_datetime_packed / Predicant_to_list_comparator::cmp_arg
+#
+CREATE TABLE t1 (t TIMESTAMP DEFAULT '1971-01-01 00:00:00', f INT);
+INSERT INTO t1 VALUES ('1978-05-25 22:25:03',1),('2000-01-01 00:00:00',2);
+SELECT * FROM t1 WHERE f IN (DEFAULT(t),1);
+t f
+1978-05-25 22:25:03 1
+Warnings:
+Warning 1292 Incorrect datetime value: '1' for column `test`.`t1`.`f` at row 1
+Warning 1292 Incorrect datetime value: '2' for column `test`.`t1`.`f` at row 2
+DROP TABLE t1;
+#
+# MDEV-18503 Assertion `native.length() == binlen' failed in Type_handler_timestamp_common::make_sort_key
+#
+SET sql_mode='';
+CREATE TABLE t1 (a TIMESTAMP(3) DEFAULT 0, b TIMESTAMP);
+INSERT INTO t1 (b) VALUES ('2012-12-12 12:12:12'),('1988-08-26 12:12:12');
+SELECT GREATEST(a,b) AS f FROM t1 ORDER BY 1;
+f
+1988-08-26 12:12:12.000
+2012-12-12 12:12:12.000
+SELECT GREATEST(a,b) AS f FROM t1 ORDER BY 1 DESC;
+f
+2012-12-12 12:12:12.000
+1988-08-26 12:12:12.000
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_timestamp.test b/mysql-test/main/type_timestamp.test
index 6d81a86331a..ce932b7720c 100644
--- a/mysql-test/main/type_timestamp.test
+++ b/mysql-test/main/type_timestamp.test
@@ -367,6 +367,10 @@ DROP TABLE t1;
--echo #
CREATE TABLE t1 ( a TIMESTAMP, KEY ( a ) );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:01' );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:02' );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:03' );
+INSERT INTO t1 VALUES( '2010-02-01 09:30:04' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:01' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:02' );
INSERT INTO t1 VALUES( '2010-02-01 09:31:03' );
@@ -606,3 +610,233 @@ DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-17216 Assertion `!dt->fraction_remainder(decimals())' failed in Field_temporal_with_date::store_TIME_with_warning
+--echo #
+
+CREATE TABLE t1 (b BIT(20));
+CREATE TABLE t2 (t TIMESTAMP);
+INSERT IGNORE INTO t1 VALUES (b'000001001100000');
+INSERT INTO t2 SELECT * FROM t1;
+DROP TABLE t1, t2;
+
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 SELECT CAST(20010101 AS UNSIGNED);
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-17928 Conversion from TIMESTAMP to VARCHAR SP variables does not work well on fractional digits
+--echo #
+
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30.123456');
+DELIMITER $$;
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE ts10 TIMESTAMP(1) DEFAULT NOW();
+ DECLARE ts16 TIMESTAMP(1) DEFAULT NOW(6);
+ DECLARE dt10 DATETIME(1) DEFAULT NOW();
+ DECLARE dt16 DATETIME(1) DEFAULT NOW(6);
+ DECLARE vts10 VARCHAR(32) DEFAULT ts10;
+ DECLARE vts16 VARCHAR(32) DEFAULT ts16;
+ DECLARE vdt10 VARCHAR(32) DEFAULT dt10;
+ DECLARE vdt16 VARCHAR(32) DEFAULT dt16;
+ DECLARE tts10 TEXT(32) DEFAULT ts10;
+ DECLARE tts16 TEXT(32) DEFAULT ts16;
+ DECLARE tdt10 TEXT(32) DEFAULT dt10;
+ DECLARE tdt16 TEXT(32) DEFAULT dt16;
+
+ SELECT vts10, vts16, vdt10, vdt16;
+ SELECT tts10, tts16, tdt10, tdt16;
+END;
+$$
+DELIMITER ;$$
+--vertical_results
+CALL p1;
+--horizontal_results
+DROP PROCEDURE p1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+
+--echo #
+--echo # MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+--echo #
+
+--echo # Testing Item_func_rollup_const::val_native()
+
+--echo # There is a bug in the below output (MDEV-16612)
+--echo # Please remove this comment when MDEV-16612 is fixed and results are re-recorded
+
+CREATE TABLE t1 (id INT);
+INSERT INTO t1 VALUES (1),(2);
+DELIMITER $$;
+BEGIN NOT ATOMIC
+ DECLARE v TIMESTAMP DEFAULT '2001-01-01 10:20:30'; -- "v" will be wrapped into Item_func_rollup_const
+ SELECT id, v AS v, COUNT(*) FROM t1 GROUP BY id,v WITH ROLLUP;
+END;
+$$
+DELIMITER ;$$
+DROP TABLE t1;
+
+--echo #
+--echo # Testing Type_handler_timestamp_common::Item_save_in_field()
+--echo # "txt" is expected to have three fractional digits
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2001-01-01 10:20:30.123456');
+CREATE TABLE t1 (ts1 TIMESTAMP(1) NOT NULL, ts2 TIMESTAMP(3) NOT NULL, txt TEXT);
+INSERT INTO t1 VALUES ('0000-00-00 00:00:00', '0000-00-00 00:00:00',COALESCE(ts1,ts2));
+INSERT INTO t1 VALUES (NOW(),NOW(),COALESCE(ts1,ts2));
+INSERT INTO t1 VALUES (NOW(1),NOW(3),COALESCE(ts1,ts2));
+SELECT * FROM t1;
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+
+--echo #
+--echo # Testing Field_timestamp::store_native
+--echo #
+
+SET sql_mode='';
+CREATE TABLE t1 (a TIMESTAMP, b TIMESTAMP);
+INSERT INTO t1 VALUES ('0000-00-00 00:00:00','0000-00-00 00:00:00');
+SET sql_mode='STRICT_ALL_TABLES,NO_ZERO_DATE';
+--error ER_TRUNCATED_WRONG_VALUE
+UPDATE t1 SET a=b;
+--error ER_TRUNCATED_WRONG_VALUE
+UPDATE t1 SET a=COALESCE(b);
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+--echo #
+--echo # MDEV-17979 Assertion `0' failed in Item::val_native upon SELECT with timestamp, NULLIF, GROUP BY
+--echo #
+
+CREATE TABLE t1 (a INT, b TIMESTAMP) ENGINE=MyISAM;
+INSERT INTO t1 VALUES (1,'2018-06-19 00:00:00');
+SELECT NULLIF(b, 'N/A') AS f, MAX(a) FROM t1 GROUP BY f;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17972 Assertion `is_valid_value_slow()' failed in Datetime::Datetime
+--echo #
+
+let $MYSQLD_DATADIR= `select @@datadir`;
+SET time_zone='+00:00';
+CREATE TABLE t1 (a TIMESTAMP(6)) ENGINE=MyISAM;
+INSERT INTO t1 VALUES ('2001-01-01 10:20:30');
+FLUSH TABLES;
+--remove_file $MYSQLD_DATADIR/test/t1.MYD
+--disable_query_log
+# Write a data file with one record:
+# 0xFF - record flags
+# 0x77777777 - TIMESTAMP integer part
+# 0xFFFFFF - TIMESTAMP bad fractional part
+--eval SELECT CONCAT(0xFF,0x77777777,0xFFFFFF) INTO OUTFILE '$MYSQLD_DATADIR/test/t1.MYD' FIELDS TERMINATED BY '' ESCAPED BY '' LINES TERMINATED BY ''
+--eval SELECT HEX(LOAD_FILE('$MYSQLD_DATADIR/test/t1.MYD')) AS MYD
+--enable_query_log
+SELECT a, CAST(a AS DATETIME) AS dt0, CAST(a AS DATETIME(6)) AS dt6 FROM t1;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+
+--echo #
+--echo # MDEV-18072 Assertion `is_null() == item->null_value || conv' failed in Timestamp_or_zero_datetime_native_null::Timestamp_or_zero_datetime_native_null upon query with GROUP BY
+--echo #
+
+CREATE TABLE t1 (t TIMESTAMP);
+INSERT INTO t1 () VALUES (),();
+SELECT IF(0,t,NULL) AS f FROM t1 GROUP BY 'foo';
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18145 Assertion `0' failed in Item::val_native upon SELECT subquery with timestamp
+--echo #
+
+CREATE TABLE t1 (a INT) ENGINE=MyISAM;
+CREATE TABLE t2 (pk INT PRIMARY KEY) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2);
+CREATE TABLE t3 (pk INT PRIMARY KEY, b TIMESTAMP) ENGINE=MyISAM;
+SELECT ( SELECT b FROM t1 LIMIT 1 ) AS sq FROM t2 LEFT JOIN t3 USING (pk);
+DROP TABLE t1, t2, t3;
+
+--echo #
+--echo # MDEV-18447 Assertion `!is_zero_datetime()' failed in Timestamp_or_zero_datetime::tv
+--echo #
+
+CREATE TABLE t1 (a TIMESTAMP DEFAULT 0, b TIMESTAMP DEFAULT 0, c TIME DEFAULT 0);
+INSERT INTO t1 VALUES (0,0,0);
+SELECT c IN (GREATEST(a,b)) FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-17969 Assertion `name' failed in THD::push_warning_truncated_value_for_field
+--echo #
+
+CREATE TABLE t1 (d DATE);
+INSERT INTO t1 VALUES ('2018-01-01'),('2019-01-01');
+SET SESSION SQL_MODE= 'STRICT_ALL_TABLES,NO_ZERO_DATE';
+--error ER_TRUNCATED_WRONG_VALUE
+CREATE TABLE t2 SELECT 1 AS f FROM t1 GROUP BY FROM_DAYS(d);
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-19124 Assertion `0' failed in Item::val_native
+--echo #
+
+CREATE TABLE t1 (d1 TIMESTAMP(5));
+INSERT INTO t1 VALUES ('2018-10-14 15:31:01');
+SELECT LEAD(d1,1) OVER(ORDER BY d1) FROM t1;
+SELECT LAG(d1,1) OVER(ORDER BY d1) FROM t1;
+INSERT INTO t1 VALUES ('2018-10-14 15:31:02');
+INSERT INTO t1 VALUES ('2018-10-14 15:31:03');
+SELECT LEAD(d1,1) OVER(ORDER BY d1) FROM t1;
+SELECT LAG(d1,1) OVER(ORDER BY d1) FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18240 Assertion `0' failed in Item_cache_timestamp::val_datetime_packed
+--echo #
+
+CREATE TABLE t1 (c1 timestamp);
+SELECT MIN(t1.c1) AS k1 FROM t1 HAVING (k1 >= ALL(SELECT 'a' UNION SELECT 'r'));
+SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT 'a' UNION SELECT 'r');
+DROP TABLE t1;
+
+CREATE TABLE t1 (c1 timestamp);
+INSERT INTO t1 VALUES ('2010-01-01 00:00:00');
+SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT '2010-01-01 10:00:00' UNION SELECT '2001-01-01 10:00:01');
+SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT '2000-01-01 10:00:00' UNION SELECT '2000-01-01 10:00:01');
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18595 Assertion `0' failed in Item_cache_timestamp::val_datetime_packed / Predicant_to_list_comparator::cmp_arg
+--echo #
+
+CREATE TABLE t1 (t TIMESTAMP DEFAULT '1971-01-01 00:00:00', f INT);
+INSERT INTO t1 VALUES ('1978-05-25 22:25:03',1),('2000-01-01 00:00:00',2);
+SELECT * FROM t1 WHERE f IN (DEFAULT(t),1);
+DROP TABLE t1;
+
+
+--echo #
+--echo # MDEV-18503 Assertion `native.length() == binlen' failed in Type_handler_timestamp_common::make_sort_key
+--echo #
+
+SET sql_mode='';
+CREATE TABLE t1 (a TIMESTAMP(3) DEFAULT 0, b TIMESTAMP);
+INSERT INTO t1 (b) VALUES ('2012-12-12 12:12:12'),('1988-08-26 12:12:12');
+SELECT GREATEST(a,b) AS f FROM t1 ORDER BY 1;
+SELECT GREATEST(a,b) AS f FROM t1 ORDER BY 1 DESC;
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_timestamp_round.result b/mysql-test/main/type_timestamp_round.result
new file mode 100644
index 00000000000..7931aa0ff5b
--- /dev/null
+++ b/mysql-test/main/type_timestamp_round.result
@@ -0,0 +1,191 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+#
+# TIMESTAMP: SET
+#
+CREATE TABLE t1 (a TIMESTAMP(3) NULL DEFAULT NULL, b TIMESTAMP(4) NULL DEFAULT NULL);
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIMESTAMP(3) NULL DEFAULT NULL, b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999999');
+UPDATE t1 SET a=b;
+Warnings:
+Note 1265 Data truncated for column 'a' at row 2
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a TIMESTAMP(3) NULL DEFAULT NULL, b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,20001231235959.9999);
+INSERT INTO t1 VALUES(NULL,20001231235959.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+#
+# TIMESTAMP: ALTER
+#
+CREATE TABLE t1 (a TIMESTAMP(4) NULL DEFAULT NULL);
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+ALTER TABLE t1 MODIFY a TIMESTAMP(3) NULL DEFAULT NULL;
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+ALTER TABLE t1 MODIFY a TIMESTAMP(3) NULL DEFAULT NULL;
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2000-12-31 23:59:59.9999999'
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+ALTER TABLE t1 MODIFY a TIMESTAMP(3) NULL DEFAULT NULL;
+SELECT a FROM t1;
+a
+2001-01-01 00:00:00.000
+2001-01-01 00:00:00.000
+DROP TABLE t1;
+#
+# Corner case:
+# ALTER TIMESTAMP to a shorter TIMESTAMP
+# All values round, maximum possible value truncates.
+#
+SET time_zone='+00:00';
+CREATE TABLE t1 (ID INT, a TIMESTAMP(6), comment VARCHAR(64));
+INSERT INTO t1 VALUES (0, '2038-01-18 23:59:59.999999', 'Should round');
+INSERT INTO t1 VALUES (1, '2038-01-19 03:14:06.999999', 'Should round');
+INSERT INTO t1 VALUES (2, '2038-01-19 03:14:07.999999', 'Should truncate');
+ALTER TABLE t1 MODIFY a TIMESTAMP(5);
+Warnings:
+Warning 1264 Out of range value for column 'a' at row 3
+SELECT * FROM t1;
+ID a comment
+0 2038-01-19 00:00:00.00000 Should round
+1 2038-01-19 03:14:07.00000 Should round
+2 2038-01-19 03:14:07.99999 Should truncate
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+#
+# NOW
+#
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE OR REPLACE TABLE t1 (id SERIAL, a TIMESTAMP(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIMESTAMP(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIME(6));
+SELECT * FROM t1;
+id a
+1 2011-01-01 00:00:00.0000
+2 2011-01-01 00:00:00.0000
+3 2011-01-01 00:00:00.0000
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+#
+# DATETIME to TIMESTAMP conversion with DST change
+#
+SET sql_mode=IF(@@version LIKE '%MariaDB%',
+'STRICT_ALL_TABLES,TIME_ROUND_FRACTIONAL',
+'STRICT_ALL_TABLES');
+SET time_zone='Europe/Moscow';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.0' /* Winter time */);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.9' /* Rounds to the DST gap */);
+ERROR 22007: Incorrect datetime value: '2010-03-28 01:59:59.9' for column `test`.`t1`.`a` at row 1
+SELECT * FROM t1;
+a
+2010-03-28 01:59:59
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+SET sql_mode=@default_sql_mode;
+SET sql_mode=IF(@@version LIKE '%MariaDB%','TIME_ROUND_FRACTIONAL','');
+SET time_zone='Europe/Moscow';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.0' /* Winter time */);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.9' /* Rounds to the DST gap, then gets fixed to the first second of the summer time */);
+Warnings:
+Warning 1299 Invalid TIMESTAMP value in column 'a' at row 1
+SELECT a, UNIX_TIMESTAMP(a) FROM t1;
+a UNIX_TIMESTAMP(a)
+2010-03-28 01:59:59 1269730799
+2010-03-28 03:00:00 1269730800
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+SET sql_mode=@default_sql_mode;
+#
+# Comparing non-temporal to TIMESTAMP
+#
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('2001-01-01 23:59:59.9999999');
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t2 VALUES ('2001-01-02 00:00:00');
+SELECT * FROM t1,t2 WHERE t1.a=t2.a;
+a a
+2001-01-01 23:59:59.9999999 2001-01-02 00:00:00
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1,t2 WHERE CONCAT(t1.a)=t2.a;
+a a
+2001-01-01 23:59:59.9999999 2001-01-02 00:00:00
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+SELECT * FROM t1,t2 WHERE COALESCE(t1.a)=t2.a;
+a a
+2001-01-01 23:59:59.9999999 2001-01-02 00:00:00
+Warnings:
+Note 1292 Truncated incorrect datetime value: '2001-01-01 23:59:59.9999999'
+DROP TABLE t1,t2;
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (20010101235959.9999999);
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t2 VALUES ('2001-01-02 00:00:00');
+SELECT * FROM t1,t2 WHERE t1.a=t2.a;
+a a
+20010101235959.9999999 2001-01-02 00:00:00
+SELECT * FROM t1,t2 WHERE COALESCE(t1.a)=t2.a;
+a a
+20010101235959.9999999 2001-01-02 00:00:00
+DROP TABLE t1,t2;
+#
+# MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+#
+# Test Field_timestamp::store_native()
+#
+SET sql_mode=@default_sql_mode;
+SET time_zone='+00:00';
+CREATE TABLE t1 (ts0 TIMESTAMP, ts1 TIMESTAMP(1));
+INSERT INTO t1 VALUES ('2001-01-01 10:20:30', '2001-01-01 10:20:30.9');
+SELECT * FROM t1;
+ts0 ts1
+2001-01-01 10:20:30 2001-01-01 10:20:30.9
+# This should round
+UPDATE t1 SET ts0=COALESCE(ts1);
+SELECT * FROM t1;
+ts0 ts1
+2001-01-01 10:20:31 2001-01-01 10:20:30.9
+# Corner case
+UPDATE t1 SET ts1=FROM_UNIXTIME(2147483647.9);
+UPDATE t1 SET ts0=COALESCE(ts1);
+Warnings:
+Warning 1264 Out of range value for column 'ts0' at row 1
+SELECT * FROM t1;
+ts0 ts1
+2038-01-19 03:14:07 2038-01-19 03:14:07.9
+DROP TABLE t1;
+SET time_zone=DEFAULT;
diff --git a/mysql-test/main/type_timestamp_round.test b/mysql-test/main/type_timestamp_round.test
new file mode 100644
index 00000000000..19e0ea86da5
--- /dev/null
+++ b/mysql-test/main/type_timestamp_round.test
@@ -0,0 +1,160 @@
+SET sql_mode=IF(@@version LIKE '%MariaDB%', 'TIME_ROUND_FRACTIONAL', '');
+SET @default_sql_mode=@@sql_mode;
+
+--echo #
+--echo # TIMESTAMP: SET
+--echo #
+
+CREATE TABLE t1 (a TIMESTAMP(3) NULL DEFAULT NULL, b TIMESTAMP(4) NULL DEFAULT NULL);
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIMESTAMP(3) NULL DEFAULT NULL, b VARCHAR(64));
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES(NULL,'2000-12-31 23:59:59.9999999');
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a TIMESTAMP(3) NULL DEFAULT NULL, b DECIMAL(38,10));
+INSERT INTO t1 VALUES(NULL,20001231235959.9999);
+INSERT INTO t1 VALUES(NULL,20001231235959.9999999);
+UPDATE t1 SET a=b;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # TIMESTAMP: ALTER
+--echo #
+
+CREATE TABLE t1 (a TIMESTAMP(4) NULL DEFAULT NULL);
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+ALTER TABLE t1 MODIFY a TIMESTAMP(3) NULL DEFAULT NULL;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999');
+INSERT INTO t1 VALUES('2000-12-31 23:59:59.9999999');
+ALTER TABLE t1 MODIFY a TIMESTAMP(3) NULL DEFAULT NULL;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a DECIMAL(38,10));
+INSERT INTO t1 VALUES(20001231235959.9999);
+INSERT INTO t1 VALUES(20001231235959.9999999);
+ALTER TABLE t1 MODIFY a TIMESTAMP(3) NULL DEFAULT NULL;
+SELECT a FROM t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Corner case:
+--echo # ALTER TIMESTAMP to a shorter TIMESTAMP
+--echo # All values round, maximum possible value truncates.
+--echo #
+
+SET time_zone='+00:00';
+CREATE TABLE t1 (ID INT, a TIMESTAMP(6), comment VARCHAR(64));
+INSERT INTO t1 VALUES (0, '2038-01-18 23:59:59.999999', 'Should round');
+INSERT INTO t1 VALUES (1, '2038-01-19 03:14:06.999999', 'Should round');
+INSERT INTO t1 VALUES (2, '2038-01-19 03:14:07.999999', 'Should truncate');
+ALTER TABLE t1 MODIFY a TIMESTAMP(5);
+SELECT * FROM t1;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+
+--echo #
+--echo # NOW
+--echo #
+
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE OR REPLACE TABLE t1 (id SERIAL, a TIMESTAMP(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIMESTAMP(6));
+INSERT INTO t1 (a) VALUES (CURRENT_TIME(6));
+SELECT * FROM t1;
+DROP TABLE t1;
+SET timestamp=DEFAULT;
+SET time_zone=DEFAULT;
+
+
+--echo #
+--echo # DATETIME to TIMESTAMP conversion with DST change
+--echo #
+
+--disable_warnings
+SET sql_mode=IF(@@version LIKE '%MariaDB%',
+ 'STRICT_ALL_TABLES,TIME_ROUND_FRACTIONAL',
+ 'STRICT_ALL_TABLES');
+--enable_warnings
+SET time_zone='Europe/Moscow';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.0' /* Winter time */);
+--error ER_TRUNCATED_WRONG_VALUE
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.9' /* Rounds to the DST gap */);
+SELECT * FROM t1;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+--disable_warnings
+SET sql_mode=@default_sql_mode;
+--enable_warnings
+
+SET sql_mode=IF(@@version LIKE '%MariaDB%','TIME_ROUND_FRACTIONAL','');
+SET time_zone='Europe/Moscow';
+CREATE TABLE t1 (a TIMESTAMP);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.0' /* Winter time */);
+INSERT INTO t1 VALUES ('2010-03-28 01:59:59.9' /* Rounds to the DST gap, then gets fixed to the first second of the summer time */);
+SELECT a, UNIX_TIMESTAMP(a) FROM t1;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
+--disable_warnings
+SET sql_mode=@default_sql_mode;
+--enable_warnings
+
+
+--echo #
+--echo # Comparing non-temporal to TIMESTAMP
+--echo #
+
+CREATE TABLE t1 (a VARCHAR(64));
+INSERT t1 VALUES ('2001-01-01 23:59:59.9999999');
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t2 VALUES ('2001-01-02 00:00:00');
+SELECT * FROM t1,t2 WHERE t1.a=t2.a;
+SELECT * FROM t1,t2 WHERE CONCAT(t1.a)=t2.a;
+SELECT * FROM t1,t2 WHERE COALESCE(t1.a)=t2.a;
+DROP TABLE t1,t2;
+
+CREATE TABLE t1 (a DECIMAL(32,7));
+INSERT t1 VALUES (20010101235959.9999999);
+CREATE TABLE t2 (a TIMESTAMP);
+INSERT INTO t2 VALUES ('2001-01-02 00:00:00');
+SELECT * FROM t1,t2 WHERE t1.a=t2.a;
+SELECT * FROM t1,t2 WHERE COALESCE(t1.a)=t2.a;
+DROP TABLE t1,t2;
+
+
+--echo #
+--echo # MDEV-13995 MAX(timestamp) returns a wrong result near DST change
+--echo #
+--echo # Test Field_timestamp::store_native()
+--echo #
+
+SET sql_mode=@default_sql_mode;
+SET time_zone='+00:00';
+CREATE TABLE t1 (ts0 TIMESTAMP, ts1 TIMESTAMP(1));
+INSERT INTO t1 VALUES ('2001-01-01 10:20:30', '2001-01-01 10:20:30.9');
+SELECT * FROM t1;
+--echo # This should round
+UPDATE t1 SET ts0=COALESCE(ts1);
+SELECT * FROM t1;
+--echo # Corner case
+UPDATE t1 SET ts1=FROM_UNIXTIME(2147483647.9);
+UPDATE t1 SET ts0=COALESCE(ts1);
+SELECT * FROM t1;
+DROP TABLE t1;
+SET time_zone=DEFAULT;
diff --git a/mysql-test/main/type_varchar.result b/mysql-test/main/type_varchar.result
index 0b2a5b54d08..8911d36a020 100644
--- a/mysql-test/main/type_varchar.result
+++ b/mysql-test/main/type_varchar.result
@@ -12,7 +12,7 @@ t1 CREATE TABLE `t1` (
show create table vchar;
Table Create Table
vchar CREATE TABLE `vchar` (
- `v` varchar(30) DEFAULT NULL,
+ `v` varchar(30)/*old*/ DEFAULT NULL,
`c` char(3) DEFAULT NULL,
`e` enum('abc','def','ghi') DEFAULT NULL,
`t` text DEFAULT NULL
@@ -108,7 +108,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 257 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 257 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 257 NULL 3 Using where; Using index
alter table t1 change v v varchar(255);
select * from t1 where v like 'This is a test' order by v;
v
@@ -132,7 +132,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 258 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 258 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 258 NULL 3 Using where; Using index
alter table t1 change v v varchar(256);
select * from t1 where v like 'This is a test' order by v;
v
@@ -156,7 +156,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 259 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 259 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 259 NULL 3 Using where; Using index
alter table t1 change v v varchar(257);
select * from t1 where v like 'This is a test' order by v;
v
@@ -180,7 +180,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 260 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 260 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 260 NULL 3 Using where; Using index
alter table t1 change v v varchar(258);
select * from t1 where v like 'This is a test' order by v;
v
@@ -204,7 +204,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 261 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 261 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 261 NULL 3 Using where; Using index
alter table t1 change v v varchar(259);
select * from t1 where v like 'This is a test' order by v;
v
@@ -228,7 +228,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 262 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 262 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 262 NULL 3 Using where; Using index
alter table t1 change v v varchar(258);
select * from t1 where v like 'This is a test' order by v;
v
@@ -252,7 +252,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 261 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 261 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 261 NULL 3 Using where; Using index
alter table t1 change v v varchar(257);
select * from t1 where v like 'This is a test' order by v;
v
@@ -276,7 +276,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 260 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 260 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 260 NULL 3 Using where; Using index
alter table t1 change v v varchar(256);
select * from t1 where v like 'This is a test' order by v;
v
@@ -300,7 +300,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 259 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 259 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 259 NULL 3 Using where; Using index
alter table t1 change v v varchar(255);
select * from t1 where v like 'This is a test' order by v;
v
@@ -324,7 +324,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 258 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 258 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 258 NULL 3 Using where; Using index
alter table t1 change v v varchar(254);
select * from t1 where v like 'This is a test' order by v;
v
@@ -348,7 +348,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 257 const 3 Using where; Using index
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 257 NULL 2 Using where; Using index
+1 SIMPLE t1 range v v 257 NULL 3 Using where; Using index
alter table t1 change v v varchar(253);
alter table t1 change v v varchar(254), drop key v;
alter table t1 change v v varchar(300), add key (v(10));
@@ -374,7 +374,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref v v 13 const 4 Using where
explain select * from t1 where v like 'S%' order by v;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range v v 13 NULL 2 Using where; Using filesort
+1 SIMPLE t1 range v v 13 NULL 3 Using where; Using filesort
drop table t1;
create table t1 (pkcol varchar(16), othercol varchar(16), primary key (pkcol));
insert into t1 values ('test', 'something');
@@ -686,3 +686,40 @@ DROP TABLE t1,t2;
#
# End of 10.0 tests
#
+#
+# Start of 10.4 tests
+#
+SET sql_mode='';
+CREATE TABLE t1 (c VARCHAR(1) DEFAULT 'foo');
+ERROR 42000: Invalid default value for 'c'
+SHOW WARNINGS;
+Level Code Message
+Warning 1265 Data truncated for column 'c' at row 1
+Error 1067 Invalid default value for 'c'
+SET sql_mode='STRICT_ALL_TABLES';
+CREATE TABLE t1 (c VARCHAR(1) DEFAULT 'foo');
+ERROR 42000: Invalid default value for 'c'
+SHOW WARNINGS;
+Level Code Message
+Warning 1265 Data truncated for column 'c' at row 1
+Error 1067 Invalid default value for 'c'
+CREATE TABLE t1 (c VARCHAR(1));
+SET sql_mode='';
+ALTER TABLE t1 ALTER column c SET DEFAULT 'foo';
+ERROR 42000: Invalid default value for 'c'
+SHOW WARNINGS;
+Level Code Message
+Warning 1265 Data truncated for column 'c' at row 1
+Error 1067 Invalid default value for 'c'
+SET sql_mode='STRICT_ALL_TABLES';
+ALTER TABLE t1 ALTER column c SET DEFAULT 'foo';
+ERROR 42000: Invalid default value for 'c'
+SHOW WARNINGS;
+Level Code Message
+Warning 1265 Data truncated for column 'c' at row 1
+Error 1067 Invalid default value for 'c'
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_varchar.test b/mysql-test/main/type_varchar.test
index d70cb86fa7b..86f16afc56f 100644
--- a/mysql-test/main/type_varchar.test
+++ b/mysql-test/main/type_varchar.test
@@ -328,3 +328,34 @@ DROP TABLE t1,t2;
--echo #
--echo # End of 10.0 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+SET sql_mode='';
+--error ER_INVALID_DEFAULT
+CREATE TABLE t1 (c VARCHAR(1) DEFAULT 'foo');
+SHOW WARNINGS;
+
+SET sql_mode='STRICT_ALL_TABLES';
+--error ER_INVALID_DEFAULT
+CREATE TABLE t1 (c VARCHAR(1) DEFAULT 'foo');
+SHOW WARNINGS;
+
+CREATE TABLE t1 (c VARCHAR(1));
+SET sql_mode='';
+--error ER_INVALID_DEFAULT
+ALTER TABLE t1 ALTER column c SET DEFAULT 'foo';
+SHOW WARNINGS;
+SET sql_mode='STRICT_ALL_TABLES';
+--error ER_INVALID_DEFAULT
+ALTER TABLE t1 ALTER column c SET DEFAULT 'foo';
+SHOW WARNINGS;
+DROP TABLE t1;
+SET sql_mode=DEFAULT;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/type_varchar_mysql41.result b/mysql-test/main/type_varchar_mysql41.result
new file mode 100644
index 00000000000..116e29a27c5
--- /dev/null
+++ b/mysql-test/main/type_varchar_mysql41.result
@@ -0,0 +1,113 @@
+#
+# MDEV-16325 CREATE..SELECT..UNION creates a wrong field type for old varchar
+#
+CREATE PROCEDURE p1(col VARCHAR(32))
+BEGIN
+EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_simple AS SELECT col FROM t1old','col',col);
+SHOW CREATE TABLE t2_simple;
+DROP TABLE t2_simple;
+EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_union_vv AS SELECT col FROM t1old UNION SELECT col FROM t1old','col',col);
+SHOW CREATE TABLE t2_union_vv;
+DROP TABLE t2_union_vv;
+EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_union_vn AS SELECT col FROM t1old UNION SELECT NULL','col',col);
+SHOW CREATE TABLE t2_union_vn;
+DROP TABLE t2_union_vn;
+EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_union_nv AS SELECT NULL AS col UNION SELECT col FROM t1old','col',col);
+SHOW CREATE TABLE t2_union_nv;
+DROP TABLE t2_union_nv;
+EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2 AS SELECT
+ COALESCE(col),
+ COALESCE(col,col),
+ COALESCE(col,NULL),
+ COALESCE(NULL,col)
+ FROM t1old', 'col', col);
+SHOW CREATE TABLE t2;
+DROP TABLE t2;
+EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2 AS SELECT
+ LEAST(col,col),
+ LEAST(col,NULL),
+ LEAST(NULL,col)
+ FROM t1old','col',col);
+SHOW CREATE TABLE t2;
+DROP TABLE t2;
+END;
+$$
+TRUNCATE TABLE t1old;
+SHOW CREATE TABLE t1old;
+Table Create Table
+t1old CREATE TABLE `t1old` (
+ `v` varchar(30)/*old*/ DEFAULT NULL,
+ `c` char(3) DEFAULT NULL,
+ `e` enum('abc','def','ghi') DEFAULT NULL,
+ `t` text DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+CALL p1('v');
+Table Create Table
+t2_simple CREATE TABLE `t2_simple` (
+ `v` varchar(30) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2_union_vv CREATE TABLE `t2_union_vv` (
+ `v` varchar(30) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2_union_vn CREATE TABLE `t2_union_vn` (
+ `v` varchar(30) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2_union_nv CREATE TABLE `t2_union_nv` (
+ `v` varchar(30) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `COALESCE(v)` varchar(30) DEFAULT NULL,
+ `COALESCE(v,v)` varchar(30) DEFAULT NULL,
+ `COALESCE(v,NULL)` varchar(30) DEFAULT NULL,
+ `COALESCE(NULL,v)` varchar(30) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `LEAST(v,v)` varchar(30) DEFAULT NULL,
+ `LEAST(v,NULL)` varchar(30) DEFAULT NULL,
+ `LEAST(NULL,v)` varchar(30) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1old;
+TRUNCATE TABLE t1old;
+SHOW CREATE TABLE t1old;
+Table Create Table
+t1old CREATE TABLE `t1old` (
+ `a` varbinary(255)/*old*/ DEFAULT NULL,
+ `b` varchar(255)/*old*/ DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+CALL p1('a');
+Table Create Table
+t2_simple CREATE TABLE `t2_simple` (
+ `a` varbinary(255) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2_union_vv CREATE TABLE `t2_union_vv` (
+ `a` varbinary(255) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2_union_vn CREATE TABLE `t2_union_vn` (
+ `a` varbinary(255) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2_union_nv CREATE TABLE `t2_union_nv` (
+ `a` varbinary(255) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `COALESCE(a)` varbinary(255) DEFAULT NULL,
+ `COALESCE(a,a)` varbinary(255) DEFAULT NULL,
+ `COALESCE(a,NULL)` varbinary(255) DEFAULT NULL,
+ `COALESCE(NULL,a)` varbinary(255) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `LEAST(a,a)` varbinary(255) DEFAULT NULL,
+ `LEAST(a,NULL)` varbinary(255) DEFAULT NULL,
+ `LEAST(NULL,a)` varbinary(255) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1old;
+DROP PROCEDURE p1;
diff --git a/mysql-test/main/type_varchar_mysql41.test b/mysql-test/main/type_varchar_mysql41.test
new file mode 100644
index 00000000000..5624e9edaaa
--- /dev/null
+++ b/mysql-test/main/type_varchar_mysql41.test
@@ -0,0 +1,59 @@
+--echo #
+--echo # MDEV-16325 CREATE..SELECT..UNION creates a wrong field type for old varchar
+--echo #
+
+
+DELIMITER $$;
+CREATE PROCEDURE p1(col VARCHAR(32))
+BEGIN
+ EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_simple AS SELECT col FROM t1old','col',col);
+ SHOW CREATE TABLE t2_simple;
+ DROP TABLE t2_simple;
+
+ EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_union_vv AS SELECT col FROM t1old UNION SELECT col FROM t1old','col',col);
+ SHOW CREATE TABLE t2_union_vv;
+ DROP TABLE t2_union_vv;
+
+ EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_union_vn AS SELECT col FROM t1old UNION SELECT NULL','col',col);
+ SHOW CREATE TABLE t2_union_vn;
+ DROP TABLE t2_union_vn;
+
+ EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2_union_nv AS SELECT NULL AS col UNION SELECT col FROM t1old','col',col);
+ SHOW CREATE TABLE t2_union_nv;
+ DROP TABLE t2_union_nv;
+
+ EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2 AS SELECT
+ COALESCE(col),
+ COALESCE(col,col),
+ COALESCE(col,NULL),
+ COALESCE(NULL,col)
+ FROM t1old', 'col', col);
+ SHOW CREATE TABLE t2;
+ DROP TABLE t2;
+
+ EXECUTE IMMEDIATE REPLACE('CREATE TABLE t2 AS SELECT
+ LEAST(col,col),
+ LEAST(col,NULL),
+ LEAST(NULL,col)
+ FROM t1old','col',col);
+ SHOW CREATE TABLE t2;
+ DROP TABLE t2;
+END;
+$$
+DELIMITER ;$$
+
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+copy_file $MYSQL_TEST_DIR/std_data/vchar.frm $MYSQLD_DATADIR/test/t1old.frm;
+TRUNCATE TABLE t1old;
+SHOW CREATE TABLE t1old;
+CALL p1('v');
+DROP TABLE t1old;
+
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+copy_file $MYSQL_TEST_DIR/std_data/bug19371.frm $MYSQLD_DATADIR/test/t1old.frm;
+TRUNCATE TABLE t1old;
+SHOW CREATE TABLE t1old;
+CALL p1('a');
+DROP TABLE t1old;
+
+DROP PROCEDURE p1;
diff --git a/mysql-test/main/type_year.result b/mysql-test/main/type_year.result
index 99c3c50ea8c..71285418588 100644
--- a/mysql-test/main/type_year.result
+++ b/mysql-test/main/type_year.result
@@ -502,3 +502,88 @@ DROP TABLE t1;
#
# End of 10.2 tests
#
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-16451 Split Item_equal::add_const() into a virtual method in type_handler()
+#
+CREATE TABLE t1 (a YEAR(4));
+INSERT INTO t1 VALUES (93),(94);
+SELECT * FROM t1;
+a
+1993
+1994
+SELECT * FROM t1 WHERE a=1993 and a=93;
+a
+1993
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=1993 and a=93;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 1993
+DROP TABLE t1;
+CREATE TABLE t1 (a YEAR(2));
+Warnings:
+Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+INSERT INTO t1 VALUES (93),(94);
+SELECT * FROM t1;
+a
+93
+94
+SELECT * FROM t1 WHERE a=1993 and a=93;
+a
+93
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=1993 and a=93;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 2 100.00 Using where
+Warnings:
+Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = 93
+DROP TABLE t1;
+#
+# MDEV-16926 CAST(COALESCE(year_field)) returns wrong value
+#
+CREATE OR REPLACE TABLE t1 (a YEAR);
+INSERT INTO t1 VALUES (1970),(1978),(2000),(2069);
+SELECT a, CAST(a AS DATE), CAST(COALESCE(a) AS DATE) FROM t1;
+a CAST(a AS DATE) CAST(COALESCE(a) AS DATE)
+1970 1970-00-00 1970-00-00
+1978 1978-00-00 1978-00-00
+2000 2000-00-00 2000-00-00
+2069 2069-00-00 2069-00-00
+SELECT MIN(a), MAX(a) FROM t1;
+MIN(a) MAX(a)
+1970 2069
+DROP TABLE t1;
+CREATE OR REPLACE TABLE t1 (a YEAR(2));
+Warnings:
+Note 1287 'YEAR(2)' is deprecated and will be removed in a future release. Please use YEAR(4) instead
+INSERT INTO t1 VALUES (1970),(1978),(2000),(2069);
+SELECT a, CAST(a AS DATE), CAST(COALESCE(a) AS DATE) FROM t1;
+a CAST(a AS DATE) CAST(COALESCE(a) AS DATE)
+70 1970-00-00 1970-00-00
+78 1978-00-00 1978-00-00
+00 2000-00-00 2000-00-00
+69 2069-00-00 2069-00-00
+SELECT MIN(a), MAX(a) FROM t1;
+MIN(a) MAX(a)
+70 69
+DROP TABLE t1;
+#
+# MDEV-17015 Assertion `m_year <= 9999' failed in Year::Year upon bad argument to MAKEDATE
+#
+SELECT MAKEDATE(18446744073709551615, 1);
+MAKEDATE(18446744073709551615, 1)
+NULL
+#
+# MDEV-17607 DATE(COALESCE(year_column)) returns a wrong result
+#
+CREATE TABLE t1 (a YEAR);
+INSERT INTO t1 VALUES (NULL);
+SELECT COALESCE(a), DATE(COALESCE(a)) FROM t1;
+COALESCE(a) DATE(COALESCE(a))
+NULL NULL
+DROP TABLE t1;
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/main/type_year.test b/mysql-test/main/type_year.test
index af26a69c581..3c578b3ab59 100644
--- a/mysql-test/main/type_year.test
+++ b/mysql-test/main/type_year.test
@@ -272,3 +272,60 @@ DROP TABLE t1;
--echo #
--echo # End of 10.2 tests
--echo #
+
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-16451 Split Item_equal::add_const() into a virtual method in type_handler()
+--echo #
+
+CREATE TABLE t1 (a YEAR(4));
+INSERT INTO t1 VALUES (93),(94);
+SELECT * FROM t1;
+SELECT * FROM t1 WHERE a=1993 and a=93;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=1993 and a=93;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a YEAR(2));
+INSERT INTO t1 VALUES (93),(94);
+SELECT * FROM t1;
+SELECT * FROM t1 WHERE a=1993 and a=93;
+EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=1993 and a=93;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-16926 CAST(COALESCE(year_field)) returns wrong value
+--echo #
+
+CREATE OR REPLACE TABLE t1 (a YEAR);
+INSERT INTO t1 VALUES (1970),(1978),(2000),(2069);
+SELECT a, CAST(a AS DATE), CAST(COALESCE(a) AS DATE) FROM t1;
+SELECT MIN(a), MAX(a) FROM t1;
+DROP TABLE t1;
+
+CREATE OR REPLACE TABLE t1 (a YEAR(2));
+INSERT INTO t1 VALUES (1970),(1978),(2000),(2069);
+SELECT a, CAST(a AS DATE), CAST(COALESCE(a) AS DATE) FROM t1;
+SELECT MIN(a), MAX(a) FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17015 Assertion `m_year <= 9999' failed in Year::Year upon bad argument to MAKEDATE
+--echo #
+SELECT MAKEDATE(18446744073709551615, 1);
+
+--echo #
+--echo # MDEV-17607 DATE(COALESCE(year_column)) returns a wrong result
+--echo #
+
+CREATE TABLE t1 (a YEAR);
+INSERT INTO t1 VALUES (NULL);
+SELECT COALESCE(a), DATE(COALESCE(a)) FROM t1;
+DROP TABLE t1;
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/main/udf.result b/mysql-test/main/udf.result
index edbae7e046f..6655982a38f 100644
--- a/mysql-test/main/udf.result
+++ b/mysql-test/main/udf.result
@@ -479,3 +479,108 @@ myfunc_int(max(a) over (order by b) , b)
drop view v1;
drop function myfunc_int;
drop table t1;
+
+MDEV-15073: Generic UDAF parser code in server for windows functions
+
+CREATE AGGREGATE FUNCTION avgcost
+RETURNS REAL SONAME "UDF_EXAMPLE_LIB";
+CREATE AGGREGATE FUNCTION avg2
+RETURNS REAL SONAME "UDF_EXAMPLE_LIB";
+CREATE FUNCTION myfunc_double RETURNS REAL SONAME "UDF_EXAMPLE_LIB";
+create table t1(pk int primary key,
+a int,
+sum int,
+price float(24));
+insert into t1 values
+(1, 1, 100, 50.00),
+(2, 1, 100, 100.00),
+(3, 1, 100, 50.00),
+(4, 1, 100, 50.00),
+(5, 1, 100, 50.00),
+(6, 1, 100, NULL),
+(7, 1, NULL, NULL),
+(8, 2, 2, 2),
+(9, 2, 4, 4);
+select pk, a, sum, price, avgcost(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+from t1;
+pk a sum price avgcost(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+1 1 100 50 75.0000
+2 1 100 100 66.6667
+3 1 100 50 66.6667
+4 1 100 50 50.0000
+5 1 100 50 50.0000
+6 1 100 NULL 50.0000
+7 1 NULL NULL 0.0000
+8 2 2 2 3.3333
+9 2 4 4 3.3333
+select pk, a, sum, price, avgcost(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+pk a sum price avgcost(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+1 1 100 50 50.0000
+2 1 100 100 75.0000
+3 1 100 50 75.0000
+4 1 100 50 50.0000
+5 1 100 50 50.0000
+6 1 100 NULL 50.0000
+7 1 NULL NULL 0.0000
+8 2 2 2 2.0000
+9 2 4 4 3.3333
+select pk, a, sum, price, avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+from t1;
+pk a sum price avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+1 1 100 50 0.7500
+2 1 100 100 0.6667
+3 1 100 50 0.6667
+4 1 100 50 0.5000
+5 1 100 50 0.5000
+6 1 100 NULL 0.5000
+7 1 NULL NULL 0.0000
+8 2 2 2 1.0000
+9 2 4 4 1.0000
+select pk, a, sum, price, avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+pk a sum price avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+1 1 100 50 0.5000
+2 1 100 100 0.7500
+3 1 100 50 0.7500
+4 1 100 50 0.5000
+5 1 100 50 0.5000
+6 1 100 NULL 0.5000
+7 1 NULL NULL 0.0000
+8 2 2 2 1.0000
+9 2 4 4 1.0000
+select pk, a, sum, price, tttttttt(sprice,sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from ' at line 1
+select pk, a, sum, price, myfunc_double(sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from ' at line 1
+select pk, a, sum, price, round(sprice,sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from ' at line 1
+select pk, a, sum, price, myfunc_double(sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from ' at line 1
+set @save_sql_mode = @@sql_mode;
+set sql_mode="oracle";
+select pk, a, sum, price, avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+pk a sum price avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+1 1 100 50 0.5000
+2 1 100 100 0.7500
+3 1 100 50 0.7500
+4 1 100 50 0.5000
+5 1 100 50 0.5000
+6 1 100 NULL 0.5000
+7 1 NULL NULL 0.0000
+8 2 2 2 1.0000
+9 2 4 4 1.0000
+set sql_mode= @save_sql_mode;
+drop table t1;
+DROP FUNCTION avgcost;
+DROP FUNCTION avg2;
+DROP FUNCTION myfunc_double;
diff --git a/mysql-test/main/udf.test b/mysql-test/main/udf.test
index d2c0dad8398..bb8493135aa 100644
--- a/mysql-test/main/udf.test
+++ b/mysql-test/main/udf.test
@@ -541,3 +541,69 @@ select * from v1;
drop view v1;
drop function myfunc_int;
drop table t1;
+
+--echo
+--echo MDEV-15073: Generic UDAF parser code in server for windows functions
+--echo
+
+--replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB
+eval CREATE AGGREGATE FUNCTION avgcost
+ RETURNS REAL SONAME "$UDF_EXAMPLE_SO";
+--replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB
+eval CREATE AGGREGATE FUNCTION avg2
+ RETURNS REAL SONAME "$UDF_EXAMPLE_SO";
+--replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB
+eval CREATE FUNCTION myfunc_double RETURNS REAL SONAME "$UDF_EXAMPLE_SO";
+
+create table t1(pk int primary key,
+ a int,
+ sum int,
+ price float(24));
+insert into t1 values
+ (1, 1, 100, 50.00),
+ (2, 1, 100, 100.00),
+ (3, 1, 100, 50.00),
+ (4, 1, 100, 50.00),
+ (5, 1, 100, 50.00),
+ (6, 1, 100, NULL),
+ (7, 1, NULL, NULL),
+ (8, 2, 2, 2),
+ (9, 2, 4, 4);
+
+--sorted_result
+select pk, a, sum, price, avgcost(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+from t1;
+--sorted_result
+select pk, a, sum, price, avgcost(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+
+--sorted_result
+select pk, a, sum, price, avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING)
+from t1;
+--sorted_result
+select pk, a, sum, price, avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+--error ER_PARSE_ERROR
+select pk, a, sum, price, tttttttt(sprice,sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+--error ER_PARSE_ERROR
+select pk, a, sum, price, myfunc_double(sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+--error ER_PARSE_ERROR
+select pk, a, sum, price, round(sprice,sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+--error ER_PARSE_ERROR
+select pk, a, sum, price, myfunc_double(sum) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+
+set @save_sql_mode = @@sql_mode;
+set sql_mode="oracle";
+--sorted_result
+select pk, a, sum, price, avg2(sum, price) over (partition by a order by pk ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING)
+from t1;
+set sql_mode= @save_sql_mode;
+
+drop table t1;
+DROP FUNCTION avgcost;
+DROP FUNCTION avg2;
+DROP FUNCTION myfunc_double;
diff --git a/mysql-test/main/udf_notembedded.result b/mysql-test/main/udf_notembedded.result
index 377af563d3e..80df03862b2 100644
--- a/mysql-test/main/udf_notembedded.result
+++ b/mysql-test/main/udf_notembedded.result
@@ -2,5 +2,6 @@ create function udf_sequence returns integer soname "UDF_EXAMPLE_LIB";
create table t1 (n int key not null auto_increment, msg int as (udf_sequence()) virtual);
select * from t1;
n msg
+# restart
drop table t1;
drop function udf_sequence;
diff --git a/mysql-test/main/union.result b/mysql-test/main/union.result
index ef767b1d5af..a0421bae922 100644
--- a/mysql-test/main/union.result
+++ b/mysql-test/main/union.result
@@ -81,7 +81,7 @@ a b
2 b
1 a
(select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b;
-ERROR 42000: Table 't1' from one of the SELECTs cannot be used in global ORDER clause
+ERROR 42000: Table 't1' from one of the SELECTs cannot be used in ORDER clause
explain extended (select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by b desc;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 100.00
@@ -494,7 +494,7 @@ drop temporary table t1;
create table t1 select a from t1 union select a from t2;
ERROR 42S01: Table 't1' already exists
select a from t1 union select a from t2 order by t2.a;
-ERROR 42000: Table 't2' from one of the SELECTs cannot be used in field list
+ERROR 42000: Table 't2' from one of the SELECTs cannot be used in ORDER clause
drop table t1,t2;
select length(version()) > 1 as `*` UNION select 2;
*
@@ -564,7 +564,7 @@ explain (select * from t1 where a=1 and b=10) union (select straight_join t1.a,t
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
2 UNION t1 index PRIMARY PRIMARY 4 NULL 4 Using index
-2 UNION t2 index PRIMARY PRIMARY 4 NULL 4 Using where; Using index; Using join buffer (flat, BNL join)
+2 UNION t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 Using index
NULL UNION RESULT <union1,2> ALL NULL NULL NULL NULL NULL
explain (select * from t1 where a=1) union (select * from t1 where b=1);
id select_type table type possible_keys key key_len ref rows Extra
@@ -1532,12 +1532,15 @@ SELECT a FROM (SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY c) AS test;
ERROR 42S22: Unknown column 'c' in 'order clause'
DROP TABLE t1;
(select 1 into @var) union (select 1);
-ERROR HY000: Incorrect usage of UNION and INTO
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @var) union (select 1)' at line 1
(select 1) union (select 1 into @var);
-select @var;
-@var
-1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @var)' at line 1
(select 2) union (select 1 into @var);
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @var)' at line 1
+(select 1) union (select 1) into @var;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+(select 2) union (select 1) into @var;
ERROR 42000: Result consisted of more than one row
CREATE TABLE t1 (a int);
INSERT INTO t1 VALUES (10), (20);
@@ -1663,8 +1666,20 @@ UNION
SELECT a FROM t1 WHERE 0
) alias;
SELECT a FROM t1 UNION SELECT a INTO @v FROM t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1
SELECT a FROM t1 UNION SELECT a INTO OUTFILE 'union.out.file5' FROM t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1
SELECT a FROM t1 UNION SELECT a INTO OUTFILE 'union.out.file6' FROM t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'FROM t1' at line 1
+SELECT a FROM t1 UNION SELECT a FROM t1 INTO @v ;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+SELECT a FROM t1 UNION SELECT a FROM t1 INTO OUTFILE 'union.out.file5';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+SELECT a FROM t1 UNION SELECT a FROM t1 INTO OUTFILE 'union.out.file6';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT a INTO @v FROM t1 UNION SELECT a FROM t1;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'UNION SELECT a FROM t1' at line 1
SELECT a INTO OUTFILE 'union.out.file7' FROM t1 UNION SELECT a FROM t1;
@@ -2019,14 +2034,14 @@ SET @@global.slow_query_log= @old_slow_query_log;
CREATE TABLE t1 (a int);
CREATE TABLE t2 (b int);
CREATE TABLE t3 (c int);
-SELECT a FROM t1 UNION SELECT b FROM t2 JOIN (t3) ON ( t2.b = t3.c );
+SELECT a FROM t1 UNION SELECT b FROM t2 JOIN t3 ON ( t2.b = t3.c );
a
DROP TABLE t1, t2, t3;
CREATE TABLE t1 (pk int NOT NULL);
CREATE TABLE t2 (pk int NOT NULL, fk int NOT NULL);
-SELECT t1.pk FROM t1 LEFT JOIN (t2) ON (t1.pk = t2.fk)
+SELECT t1.pk FROM t1 LEFT JOIN t2 ON (t1.pk = t2.fk)
UNION
-SELECT t1.pk FROM t1 LEFT JOIN (t2) ON (t1.pk = t2.fk);
+SELECT t1.pk FROM t1 LEFT JOIN t2 ON (t1.pk = t2.fk);
pk
DROP TABLE t1,t2;
create table t1 (a int);
@@ -2172,7 +2187,7 @@ select id from t5 where name = (select name from t3 where id = t1.product_id)) l
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 system PRIMARY NULL NULL NULL 1
2 SUBQUERY t4 eq_ref PRIMARY PRIMARY 4 func 1 Using where
-3 SUBQUERY t2 ref PRIMARY PRIMARY 4 const 3 Using index
+3 SUBQUERY t2 ref PRIMARY PRIMARY 4 const 4 Using index
4 UNION t2 ref PRIMARY PRIMARY 4 func 1 Using where; Using index
5 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
6 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
@@ -2315,9 +2330,9 @@ GROUP BY i
HAVING i = 10;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL NULL No tables used
-2 UNION NULL NULL NULL NULL NULL NULL NULL NULL Impossible HAVING noticed after reading const tables
+2 UNION NULL NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables
Warnings:
-Note 1003 /* select#1 */ select 1 AS `1`,2 AS `2` union all /* select#2 */ select 1 AS `i`,count(0) AS `COUNT(*)` from `test`.`t2` where 1 group by 1 having 0
+Note 1003 /* select#1 */ select 1 AS `1`,2 AS `2` union all /* select#2 */ select 1 AS `i`,count(0) AS `COUNT(*)` from `test`.`t2` where 0 group by 1 having 1
DROP TABLE t1,t2;
#
# Start of 10.3 tests
diff --git a/mysql-test/main/union.test b/mysql-test/main/union.test
index 9d9194ebab6..9d25708611e 100644
--- a/mysql-test/main/union.test
+++ b/mysql-test/main/union.test
@@ -973,13 +973,17 @@ DROP TABLE t1;
#
# Bug#23345: Wrongly allowed INTO in a non-last select of a UNION.
+# (fixed)
#
---error 1221
+--error ER_PARSE_ERROR
(select 1 into @var) union (select 1);
+--error ER_PARSE_ERROR
(select 1) union (select 1 into @var);
-select @var;
---error 1172
+--error ER_PARSE_ERROR
(select 2) union (select 1 into @var);
+(select 1) union (select 1) into @var;
+--error ER_TOO_MANY_ROWS
+(select 2) union (select 1) into @var;
#
# Bug#27848: order-by of union clashes with rollup of select part
@@ -1099,9 +1103,15 @@ SELECT a INTO DUMPFILE 'union.out.file2' FROM (
SELECT a FROM t1 WHERE 0
) alias;
+--error ER_PARSE_ERROR
SELECT a FROM t1 UNION SELECT a INTO @v FROM t1;
+--error ER_PARSE_ERROR
SELECT a FROM t1 UNION SELECT a INTO OUTFILE 'union.out.file5' FROM t1;
+--error ER_PARSE_ERROR
SELECT a FROM t1 UNION SELECT a INTO OUTFILE 'union.out.file6' FROM t1;
+SELECT a FROM t1 UNION SELECT a FROM t1 INTO @v ;
+SELECT a FROM t1 UNION SELECT a FROM t1 INTO OUTFILE 'union.out.file5';
+SELECT a FROM t1 UNION SELECT a FROM t1 INTO OUTFILE 'union.out.file6';
--error ER_PARSE_ERROR
SELECT a INTO @v FROM t1 UNION SELECT a FROM t1;
--error ER_PARSE_ERROR
@@ -1361,15 +1371,15 @@ SET @@global.slow_query_log= @old_slow_query_log;
CREATE TABLE t1 (a int);
CREATE TABLE t2 (b int);
CREATE TABLE t3 (c int);
-SELECT a FROM t1 UNION SELECT b FROM t2 JOIN (t3) ON ( t2.b = t3.c );
+SELECT a FROM t1 UNION SELECT b FROM t2 JOIN t3 ON ( t2.b = t3.c );
DROP TABLE t1, t2, t3;
CREATE TABLE t1 (pk int NOT NULL);
CREATE TABLE t2 (pk int NOT NULL, fk int NOT NULL);
-SELECT t1.pk FROM t1 LEFT JOIN (t2) ON (t1.pk = t2.fk)
+SELECT t1.pk FROM t1 LEFT JOIN t2 ON (t1.pk = t2.fk)
UNION
-SELECT t1.pk FROM t1 LEFT JOIN (t2) ON (t1.pk = t2.fk);
+SELECT t1.pk FROM t1 LEFT JOIN t2 ON (t1.pk = t2.fk);
DROP TABLE t1,t2;
diff --git a/mysql-test/main/unique.result b/mysql-test/main/unique.result
new file mode 100644
index 00000000000..e982e1c4163
--- /dev/null
+++ b/mysql-test/main/unique.result
@@ -0,0 +1,9 @@
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, a varchar(30000), UNIQUE (a)) ENGINE=innodb;
+INSERT INTO t1 (a) VALUES (20),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL);
+SELECT * FROM t1 WHERE a BETWEEN '1' AND '100';
+pk a
+DROP TABLE t1;
+CREATE TABLE t2 (n BLOB, UNIQUE(n));
+INSERT INTO t2 VALUES (1);
+DELETE FROM t2 WHERE n = 1;
+DROP TABLE t2;
diff --git a/mysql-test/main/unique.test b/mysql-test/main/unique.test
new file mode 100644
index 00000000000..677ceb15a79
--- /dev/null
+++ b/mysql-test/main/unique.test
@@ -0,0 +1,22 @@
+--source include/have_innodb.inc
+
+#
+# MDEV-19224 Assertion `marked_for_read()' failed in various places with long
+# unique key
+#
+
+CREATE TABLE t1 (pk INT AUTO_INCREMENT PRIMARY KEY, a varchar(30000), UNIQUE (a)) ENGINE=innodb;
+INSERT INTO t1 (a) VALUES (20),(NULL),(NULL),(NULL),(NULL),(NULL),(NULL);
+SELECT * FROM t1 WHERE a BETWEEN '1' AND '100';
+DROP TABLE t1;
+
+#
+# MDEV-19252 Warning about assertion failure marked_for_write_or_computed()
+# printed by release build with DBUG_ASSERT_AS_PRINTF, but no failure on debug
+# build
+#
+
+CREATE TABLE t2 (n BLOB, UNIQUE(n));
+INSERT INTO t2 VALUES (1);
+DELETE FROM t2 WHERE n = 1;
+DROP TABLE t2;
diff --git a/mysql-test/main/update.result b/mysql-test/main/update.result
index 9e19abc4e9c..f5edf1c6be3 100644
--- a/mysql-test/main/update.result
+++ b/mysql-test/main/update.result
@@ -234,7 +234,9 @@ insert into t2 values ( 1, 'abcd1e');
insert into t2 values ( 2, 'abcd2e');
analyze table t1,t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
update t1, t2 set t1.a = t2.a where t2.b = t1.b;
show warnings;
@@ -326,7 +328,7 @@ delete from t1 order by a limit 1;
show status like 'handler_read%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 0
+Handler_read_key 4
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -447,7 +449,7 @@ UPDATE t1 SET user_id=null WHERE request_id=9999999999999;
show status like '%Handler_read%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 3
+Handler_read_key 2
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -459,7 +461,7 @@ UPDATE t1 SET user_id=null WHERE request_id=999999999999999999999999999999;
show status like '%Handler_read%';
Variable_name Value
Handler_read_first 0
-Handler_read_key 3
+Handler_read_key 2
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
diff --git a/mysql-test/main/update_innodb.result b/mysql-test/main/update_innodb.result
index 695561122f0..beab54833d1 100644
--- a/mysql-test/main/update_innodb.result
+++ b/mysql-test/main/update_innodb.result
@@ -89,3 +89,57 @@ update t1, t2 set a=NULL, b=2, c=NULL where b=d and e=200;
drop table t1,t2;
set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set @@use_stat_tables= @save_use_stat_tables;
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (2);
+BEGIN;
+SELECT * FROM t1 UNION
+SELECT * FROM t2 FOR UPDATE;
+a
+1
+2
+connect con2,localhost,root,,;
+BEGIN;
+SELECT * FROM t2 FOR UPDATE;;
+connection default;
+select * from t2;
+a
+2
+update t2 set a=a+100;
+commit;
+connection con2;
+a
+102
+commit;
+connection default;
+drop table t1,t2;
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (2);
+BEGIN;
+SELECT * FROM (
+SELECT * FROM t1 UNION
+SELECT * FROM t2 FOR UPDATE
+) t;
+a
+1
+2
+connection con2;
+BEGIN;
+SELECT * FROM t2 FOR UPDATE;;
+connection default;
+select * from t2;
+a
+2
+update t2 set a=a+100;
+commit;
+connection con2;
+a
+102
+commit;
+connection default;
+disconnect con2;
+drop table t1,t2;
+# End of 10.4 tests
diff --git a/mysql-test/main/update_innodb.test b/mysql-test/main/update_innodb.test
index a29dd071cf8..a5c6acf8620 100644
--- a/mysql-test/main/update_innodb.test
+++ b/mysql-test/main/update_innodb.test
@@ -76,6 +76,7 @@ UPDATE t1 SET b_id = (SELECT t2.b_id FROM t2 t2 WHERE t2.c_id = t1.c_id);
SELECT * FROM t1;
drop table t1,t2;
+
--echo #
--echo # MDEV-18300: ASAN error in Field_blob::get_key_image upon UPDATE with subquery
--echo #
@@ -104,3 +105,59 @@ drop table t1,t2;
set @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
set @@use_stat_tables= @save_use_stat_tables;
+
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (2);
+BEGIN;
+SELECT * FROM t1 UNION
+SELECT * FROM t2 FOR UPDATE;
+
+--connect(con2,localhost,root,,)
+BEGIN;
+--send SELECT * FROM t2 FOR UPDATE;
+--connection default
+
+select * from t2;
+update t2 set a=a+100;
+commit;
+
+--connection con2
+--reap
+
+commit;
+
+--connection default
+drop table t1,t2;
+
+
+CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY) engine=innodb;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t2 VALUES (2);
+BEGIN;
+SELECT * FROM (
+ SELECT * FROM t1 UNION
+ SELECT * FROM t2 FOR UPDATE
+ ) t;
+
+--connection con2
+BEGIN;
+--send SELECT * FROM t2 FOR UPDATE;
+--connection default
+
+select * from t2;
+update t2 set a=a+100;
+commit;
+
+--connection con2
+--reap
+
+commit;
+
+--connection default
+disconnect con2;
+drop table t1,t2;
+
+--echo # End of 10.4 tests
diff --git a/mysql-test/main/update_use_source.result b/mysql-test/main/update_use_source.result
index e5585fcee5d..9e43b54d81c 100644
--- a/mysql-test/main/update_use_source.result
+++ b/mysql-test/main/update_use_source.result
@@ -1177,6 +1177,7 @@ insert t1 (c1,c2,c3) select 3,seq,seq%10 from seq_1_to_200;
create index t1_idx1 on t1(c3);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
update t1 set c1=2 where exists (select 'x' from t1);
select count(*) from t1 where c1=2;
diff --git a/mysql-test/main/user_var.result b/mysql-test/main/user_var.result
index bf3d4f6dada..b475a8ca60a 100644
--- a/mysql-test/main/user_var.result
+++ b/mysql-test/main/user_var.result
@@ -22,7 +22,7 @@ i @vv1:=if(sv1.i,1,0) @vv2:=if(sv2.i,1,0) @vv3:=if(sv3.i,1,0) @vv1+@vv2+@vv3
2 1 0 0 1
explain select * from t1 where i=@vv1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref i i 4 const 1
+1 SIMPLE t1 ref i i 4 const 2
select @vv1,i,v from t1 where i=@vv1;
@vv1 i v
1 1 1
@@ -35,7 +35,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL i 4 NULL 3 Using where; Using index
explain select * from t1 where i=@vv1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref i i 4 const 1
+1 SIMPLE t1 ref i i 4 const 2
drop table t1,t2;
set @a=0,@b=0;
select @a:=10, @b:=1, @a > @b, @a < @b;
diff --git a/mysql-test/main/userstat.result b/mysql-test/main/userstat.result
index c838036f118..6ba3d0b7811 100644
--- a/mysql-test/main/userstat.result
+++ b/mysql-test/main/userstat.result
@@ -1,5 +1,7 @@
DROP TABLE IF EXISTS t1;
select variable_value from information_schema.global_status where variable_name="handler_read_key" into @global_read_key;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show columns from information_schema.client_statistics;
Field Type Null Key Default Extra
CLIENT varchar(64) NO
@@ -117,7 +119,7 @@ Handler_mrr_key_refills 0
Handler_mrr_rowid_refills 0
Handler_prepare 18
Handler_read_first 0
-Handler_read_key 9
+Handler_read_key 17
Handler_read_last 0
Handler_read_next 0
Handler_read_prev 0
@@ -135,7 +137,7 @@ Handler_update 5
Handler_write 7
select variable_value - @global_read_key as "handler_read_key" from information_schema.global_status where variable_name="handler_read_key";
handler_read_key
-9
+17
disconnect ssl_con;
set @@global.userstat=0;
select * from information_schema.index_statistics;
diff --git a/mysql-test/main/varbinary.result b/mysql-test/main/varbinary.result
index 58cab5ad1ca..3a182e74692 100644
--- a/mysql-test/main/varbinary.result
+++ b/mysql-test/main/varbinary.result
@@ -82,8 +82,8 @@ drop table t1;
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` varbinary(255) DEFAULT NULL,
- `b` varchar(255) DEFAULT NULL
+ `a` varbinary(255)/*old*/ DEFAULT NULL,
+ `b` varchar(255)/*old*/ DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
select length(a), length(b) from t1;
length(a) length(b)
diff --git a/mysql-test/main/view.result b/mysql-test/main/view.result
index 08d570b86b1..83ea0e38992 100644
--- a/mysql-test/main/view.result
+++ b/mysql-test/main/view.result
@@ -615,6 +615,7 @@ select is_updatable from information_schema.views;
is_updatable
YES
YES
+YES
select * from t1;
col1 col2
5 Hello, view world
@@ -2443,6 +2444,8 @@ SELECT Meaning FROM v1 INTO retn;
RETURN retn;
END
//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE VIEW v2 AS SELECT f1();
select * from v2;
f1()
@@ -2614,6 +2617,8 @@ declare mx int;
select max(a) from t1 into mx;
return mx;
end//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create view v1 as select f1() as a;
create view v2 as select * from v1;
drop table t1;
@@ -3152,10 +3157,14 @@ DROP VIEW v1;
DROP TABLE t1;
DROP VIEW IF EXISTS v1;
SELECT * FROM (SELECT 1) AS t into @w;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE VIEW v1 AS SELECT * FROM (SELECT 1) AS t into @w;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'into @w' at line 1
# Previously the following would fail.
SELECT * FROM (SELECT 1) AS t into @w;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
drop view if exists view_24532_a;
drop view if exists view_24532_b;
drop table if exists table_24532;
@@ -3952,6 +3961,8 @@ BEGIN
SELECT a FROM v2 INTO @a;
RETURN @a;
END//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Trigger pre-locking when opening v2.
CREATE VIEW v1 AS SELECT f1() FROM t1;
SHOW CREATE VIEW v1;
@@ -4093,7 +4104,7 @@ LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
;
SELECT 1
-FROM (( SELECT 1
+FROM ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4101,8 +4112,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t1)
-LEFT OUTER JOIN (( SELECT 1
+) t1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4110,8 +4121,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t2) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t2 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4119,8 +4130,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t3) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t3 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4128,8 +4139,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t4) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t4 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4137,8 +4148,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t5) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t5 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4146,8 +4157,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t6) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t6 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4155,8 +4166,8 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t7) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t7 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4164,18 +4175,18 @@ LEFT JOIN t4 d_alias_1 ON d_alias_1.d1 = a_alias_1.a1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t8) ON 1=1
+) t8 ON 1=1
;
1
SELECT 1
-FROM (v1 t1)
-LEFT OUTER JOIN (v1 t2) ON 1=1
-LEFT OUTER JOIN (v1 t3) ON 1=1
-LEFT OUTER JOIN (v1 t4) ON 1=1
-LEFT OUTER JOIN (v1 t5) ON 1=1
-LEFT OUTER JOIN (v1 t6) ON 1=1
-LEFT OUTER JOIN (v1 t7) ON 1=1
-LEFT OUTER JOIN (v1 t8) ON 1=1
+FROM v1 t1
+LEFT OUTER JOIN v1 t2 ON 1=1
+LEFT OUTER JOIN v1 t3 ON 1=1
+LEFT OUTER JOIN v1 t4 ON 1=1
+LEFT OUTER JOIN v1 t5 ON 1=1
+LEFT OUTER JOIN v1 t6 ON 1=1
+LEFT OUTER JOIN v1 t7 ON 1=1
+LEFT OUTER JOIN v1 t8 ON 1=1
;
1
drop view v1;
@@ -4384,7 +4395,8 @@ DROP VIEW v1,v2;
DROP TABLE t1;
CREATE TABLE t1 (a varchar(10), KEY (a)) ;
INSERT INTO t1 VALUES
-('DD'), ('ZZ'), ('ZZ'), ('KK'), ('FF'), ('HH'),('MM');
+('DD'), ('ZZ'), ('ZZ'), ('KK'), ('FF'), ('HH'), ('MM'),
+('AA'), ('DD'), ('CC'), ('GG');
CREATE VIEW v1 AS SELECT * FROM t1;
# t1 and v1 should return the same result set
SELECT * FROM v1 WHERE a > 'JJ' OR a <> 0 AND a = 'VV';
diff --git a/mysql-test/main/view.test b/mysql-test/main/view.test
index 4a9504e7873..2b7b9d1f161 100644
--- a/mysql-test/main/view.test
+++ b/mysql-test/main/view.test
@@ -4050,7 +4050,7 @@ CREATE OR REPLACE view v1 AS
;
SELECT 1
-FROM (( SELECT 1
+FROM ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4058,8 +4058,8 @@ FROM (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t1)
-LEFT OUTER JOIN (( SELECT 1
+) t1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4067,8 +4067,8 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t2) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t2 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4076,8 +4076,8 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t3) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t3 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4085,8 +4085,8 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t4) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t4 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4094,8 +4094,8 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t5) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t5 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4103,8 +4103,8 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t6) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t6 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4112,8 +4112,8 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t7) ON 1=1
-LEFT OUTER JOIN (( SELECT 1
+) t7 ON 1=1
+LEFT OUTER JOIN ( SELECT 1
FROM t1 a_alias_1
LEFT JOIN (t2 b_alias_1 JOIN t1 a_alias_2) ON b_alias_1.b1 = a_alias_1.a1 AND a_alias_2.a1 = a_alias_1.a1
LEFT JOIN t3 c_alias_1 ON c_alias_1.c1 = a_alias_1.a1
@@ -4121,18 +4121,18 @@ LEFT OUTER JOIN (( SELECT 1
LEFT JOIN t3 c_alias_2 ON c_alias_2.c1 = a_alias_1.a1
LEFT JOIN t5 e_alias_1 ON e_alias_1.e1 = a_alias_1.a1
LEFT JOIN t6 f_alias_1 ON f_alias_1.f1 = a_alias_1.a1
-) t8) ON 1=1
+) t8 ON 1=1
;
SELECT 1
-FROM (v1 t1)
-LEFT OUTER JOIN (v1 t2) ON 1=1
-LEFT OUTER JOIN (v1 t3) ON 1=1
-LEFT OUTER JOIN (v1 t4) ON 1=1
-LEFT OUTER JOIN (v1 t5) ON 1=1
-LEFT OUTER JOIN (v1 t6) ON 1=1
-LEFT OUTER JOIN (v1 t7) ON 1=1
-LEFT OUTER JOIN (v1 t8) ON 1=1
+FROM v1 t1
+LEFT OUTER JOIN v1 t2 ON 1=1
+LEFT OUTER JOIN v1 t3 ON 1=1
+LEFT OUTER JOIN v1 t4 ON 1=1
+LEFT OUTER JOIN v1 t5 ON 1=1
+LEFT OUTER JOIN v1 t6 ON 1=1
+LEFT OUTER JOIN v1 t7 ON 1=1
+LEFT OUTER JOIN v1 t8 ON 1=1
;
drop view v1;
@@ -4256,7 +4256,8 @@ DROP TABLE t1;
CREATE TABLE t1 (a varchar(10), KEY (a)) ;
INSERT INTO t1 VALUES
- ('DD'), ('ZZ'), ('ZZ'), ('KK'), ('FF'), ('HH'),('MM');
+ ('DD'), ('ZZ'), ('ZZ'), ('KK'), ('FF'), ('HH'), ('MM'),
+ ('AA'), ('DD'), ('CC'), ('GG');
CREATE VIEW v1 AS SELECT * FROM t1;
diff --git a/mysql-test/main/view_grant.result b/mysql-test/main/view_grant.result
index 82594128d85..df1429eaa4d 100644
--- a/mysql-test/main/view_grant.result
+++ b/mysql-test/main/view_grant.result
@@ -406,6 +406,8 @@ create table t2 (s1 int);
drop function if exists f2;
create function f2 () returns int begin declare v int; select s1 from t2
into v; return v; end//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create algorithm=TEMPTABLE view v1 as select f2() from t1;
create algorithm=MERGE view v2 as select f2() from t1;
create algorithm=TEMPTABLE SQL SECURITY INVOKER view v3 as select f2() from t1;
@@ -449,6 +451,8 @@ create table t2 (s1 int);
drop function if exists f2;
create function f2 () returns int begin declare v int; select s1 from t2
into v; return v; end//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create user mysqltest_1@localhost;
grant select on t1 to mysqltest_1@localhost;
grant execute on function f2 to mysqltest_1@localhost;
@@ -558,11 +562,6 @@ use test;
REVOKE ALL PRIVILEGES, GRANT OPTION FROM mysqltest_1@localhost;
drop user mysqltest_1@localhost;
drop database mysqltest;
-drop view if exists v1;
-drop table if exists t1;
-create table t1 as select * from mysql.user where user='';
-delete from mysql.user where user='';
-flush privileges;
create user 'test14256'@'%';
grant all on test.* to 'test14256'@'%';
connect test14256,localhost,test14256,,test;
@@ -588,9 +587,6 @@ test14256@% test14256@% 1
connection root;
disconnect test14256;
drop user test14256;
-insert into mysql.user select * from t1;
-flush privileges;
-drop table t1;
connection root;
create database mysqltest;
use mysqltest;
diff --git a/mysql-test/main/view_grant.test b/mysql-test/main/view_grant.test
index a70241138aa..65757e95fb3 100644
--- a/mysql-test/main/view_grant.test
+++ b/mysql-test/main/view_grant.test
@@ -669,16 +669,6 @@ drop database mysqltest;
#
# Bug#14256 definer in view definition is not fully qualified
#
---disable_warnings
-drop view if exists v1;
-drop table if exists t1;
---enable_warnings
-
-# Backup anonymous users and remove them. (They get in the way of
-# the one we test with here otherwise.)
-create table t1 as select * from mysql.user where user='';
-delete from mysql.user where user='';
-flush privileges;
# Create the test user
create user 'test14256'@'%';
@@ -708,12 +698,6 @@ connection root;
disconnect test14256;
drop user test14256;
-# Restore the anonymous users.
-insert into mysql.user select * from t1;
-flush privileges;
-
-drop table t1;
-
#
# Bug#14726 freeing stack variable in case of an error of opening a view when
# we have locked tables with LOCK TABLES statement.
diff --git a/mysql-test/main/win.result b/mysql-test/main/win.result
index 917fccaef21..e9b2a0842f0 100644
--- a/mysql-test/main/win.result
+++ b/mysql-test/main/win.result
@@ -3540,3 +3540,14 @@ DROP TABLE t1;
#
# End of 10.3 tests
#
+#
+# MDEV-16722: Assertion `type() != NULL_ITEM' failed
+#
+create table t1 (a int);
+insert into t1 values (1),(2),(3);
+SELECT row_number() OVER (order by a) FROM t1 order by NAME_CONST('myname',NULL);
+row_number() OVER (order by a)
+1
+2
+3
+drop table t1;
diff --git a/mysql-test/main/win.test b/mysql-test/main/win.test
index 81d3be19c10..c68e80614b6 100644
--- a/mysql-test/main/win.test
+++ b/mysql-test/main/win.test
@@ -2286,3 +2286,12 @@ DROP TABLE t1;
--echo #
--echo # End of 10.3 tests
--echo #
+
+--echo #
+--echo # MDEV-16722: Assertion `type() != NULL_ITEM' failed
+--echo #
+
+create table t1 (a int);
+insert into t1 values (1),(2),(3);
+SELECT row_number() OVER (order by a) FROM t1 order by NAME_CONST('myname',NULL);
+drop table t1;
diff --git a/mysql-test/main/win_big-mdev-11697.result b/mysql-test/main/win_big-mdev-11697.result
index e5dc271839c..ec9d2243c88 100644
--- a/mysql-test/main/win_big-mdev-11697.result
+++ b/mysql-test/main/win_big-mdev-11697.result
@@ -12,6 +12,7 @@ select * from data_generator
commit;
analyze table test_table;
Table Op Msg_type Msg_text
+test.test_table analyze status Engine-independent statistics collected
test.test_table analyze status OK
explain select * from (select id, lead(id) over(order by id) next_id from test_table order by id) a limit 10;
id select_type table type possible_keys key key_len ref rows Extra
@@ -44,6 +45,7 @@ select * from data_generator
commit;
analyze table test_table;
Table Op Msg_type Msg_text
+test.test_table analyze status Engine-independent statistics collected
test.test_table analyze status OK
explain select * from (select id, lead(id) over(order by id) next_id from test_table order by id) a limit 10;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index a166330d813..24bf9b40109 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -102,6 +102,7 @@ use mtr_unique;
use mtr_results;
use IO::Socket::INET;
use IO::Select;
+use Time::HiRes qw(gettimeofday);
require "mtr_process.pl";
require "mtr_io.pl";
@@ -173,6 +174,7 @@ my @DEFAULT_SUITES= qw(
binlog_encryption-
csv-
compat/oracle-
+ compat/mssql-
encryption-
federated-
funcs_1-
@@ -199,6 +201,7 @@ my @DEFAULT_SUITES= qw(
unit-
vcol-
versioning-
+ period-
);
my $opt_suites;
@@ -2290,6 +2293,10 @@ sub environment_setup {
$ENV{'EXE_MYSQL'}= $exe_mysql;
$ENV{'MYSQL_PLUGIN'}= $exe_mysql_plugin;
$ENV{'MYSQL_EMBEDDED'}= $exe_mysql_embedded;
+ if(IS_WINDOWS)
+ {
+ $ENV{'MYSQL_INSTALL_DB_EXE'}= mtr_exe_exists("$bindir/sql$opt_vs_config/mysql_install_db");
+ }
my $client_config_exe=
mtr_exe_maybe_exists(
@@ -2601,8 +2608,22 @@ sub setup_vardir() {
unlink "$plugindir/symlink_test";
}
+ for (<$bindir/plugin/auth_pam/auth_pam_tool>)
+ {
+ mkpath("$plugindir/auth_pam_tool_dir");
+ if ($opt_use_copy)
+ {
+ copy rel2abs($_), "$plugindir/auth_pam_tool_dir/auth_pam_tool"
+ }
+ else
+ {
+ symlink rel2abs($_), "$plugindir/auth_pam_tool_dir/auth_pam_tool";
+ }
+ }
+
for (<$bindir/storage/*/*.so>,
<$bindir/plugin/*/*.so>,
+ <$bindir/plugin/*/auth_pam_tool_dir>,
<$bindir/libmariadb/plugins/*/*.so>,
<$bindir/libmariadb/*.so>,
<$bindir/sql/*.so>)
@@ -2875,15 +2896,44 @@ sub mysql_server_start($) {
# Save this test case information, so next can examine it
$mysqld->{'started_tinfo'}= $tinfo;
}
+
+ # If wsrep is on, we need to wait until the first
+ # server starts and bootstraps the cluster before
+ # starting other servers. The bootsrap server in the
+ # configuration should always be the first which has
+ # wsrep_on=ON
+ if (wsrep_on($mysqld) && wsrep_is_bootstrap_server($mysqld))
+ {
+ mtr_verbose("Waiting for wsrep bootstrap server to start");
+ if ($mysqld->{WAIT}->($mysqld))
+ {
+ return 1;
+ }
+ }
}
sub mysql_server_wait {
- my ($mysqld) = @_;
+ my ($mysqld, $tinfo) = @_;
- return not sleep_until_file_created($mysqld->value('pid-file'),
- $opt_start_timeout,
- $mysqld->{'proc'},
- $warn_seconds);
+ if (!sleep_until_file_created($mysqld->value('pid-file'),
+ $opt_start_timeout,
+ $mysqld->{'proc'},
+ $warn_seconds))
+ {
+ $tinfo->{comment}= "Failed to start ".$mysqld->name() . "\n";
+ return 1;
+ }
+
+ if (wsrep_on($mysqld))
+ {
+ mtr_verbose("Waiting for wsrep server " . $mysqld->name() . " to be ready");
+ if (!wait_wsrep_ready($tinfo, $mysqld))
+ {
+ return 1;
+ }
+ }
+
+ return 0;
}
sub create_config_file_for_extern {
@@ -3224,8 +3274,8 @@ sub mysql_install_db {
$bootstrap_sql_file);
# mysql.gtid_slave_pos was created in InnoDB, but many tests
- # run without InnoDB. Alter it to MyISAM now
- mtr_tofile($bootstrap_sql_file, "ALTER TABLE gtid_slave_pos ENGINE=MyISAM;\n");
+ # run without InnoDB. Alter it to Aria now
+ mtr_tofile($bootstrap_sql_file, "ALTER TABLE gtid_slave_pos ENGINE=Aria transactional=0;\n");
}
else
{
@@ -3242,7 +3292,7 @@ sub mysql_install_db {
# Remove anonymous users
mtr_tofile($bootstrap_sql_file,
- "DELETE FROM mysql.user where user= '';\n");
+ "DELETE FROM mysql.global_priv where user= '';\n");
# Create mtr database
mtr_tofile($bootstrap_sql_file,
@@ -3265,6 +3315,7 @@ sub mysql_install_db {
# Create directories mysql and test
mkpath("$install_datadir/mysql");
+ my $realtime= gettimeofday();
if ( My::SafeProcess->run
(
name => "bootstrap",
@@ -3282,6 +3333,10 @@ sub mysql_install_db {
"Could not install system database from $bootstrap_sql_file\n" .
"The $path_bootstrap_log file contains:\n$data\n");
}
+ else
+ {
+ mtr_verbose("Spent " . sprintf("%.3f", (gettimeofday() - $realtime)) . " seconds in bootstrap");
+ }
}
@@ -4465,6 +4520,7 @@ sub extract_warning_lines ($$) {
qr|Access denied for user|,
qr|Aborted connection|,
qr|table.*is full|,
+ qr|\[ERROR\] mysqld: \Z|, # Warning from Aria recovery
qr|Linux Native AIO|, # warning that aio does not work on /dev/shm
qr|InnoDB: io_setup\(\) attempt|,
qr|InnoDB: io_setup\(\) failed with EAGAIN|,
@@ -4494,7 +4550,8 @@ sub extract_warning_lines ($$) {
qr/InnoDB: See also */,
qr/InnoDB: Cannot open .*ib_buffer_pool.* for reading: No such file or directory*/,
qr/InnoDB: Table .*mysql.*innodb_table_stats.* not found./,
- qr/InnoDB: User stopword table .* does not exist./
+ qr/InnoDB: User stopword table .* does not exist./,
+ qr/Dump thread [0-9]+ last sent to server [0-9]+ binlog file:pos .+/
);
@@ -5382,6 +5439,118 @@ sub stop_servers($$) {
}
}
+#
+# run_query_output
+#
+# Run a query against a server using mysql client. The output of
+# the query will be written into outfile.
+#
+sub run_query_output {
+ my ($mysqld, $query, $outfile)= @_;
+ my $args;
+
+ mtr_init_args(\$args);
+ mtr_add_arg($args, "--defaults-file=%s", $path_config_file);
+ mtr_add_arg($args, "--defaults-group-suffix=%s", $mysqld->after('mysqld'));
+ mtr_add_arg($args, "--silent");
+ mtr_add_arg($args, "--execute=%s", $query);
+
+ my $res= My::SafeProcess->run
+ (
+ name => "run_query_output -> ".$mysqld->name(),
+ path => $exe_mysql,
+ args => \$args,
+ output => $outfile,
+ error => $outfile
+ );
+
+ return $res
+}
+
+
+#
+# wsrep_wait_ready
+#
+# Wait until the server has been joined to the cluster and is
+# ready for operation.
+#
+# RETURN
+# 1 Server is ready
+# 0 Server didn't transition to ready state within start timeout
+#
+sub wait_wsrep_ready($$) {
+ my ($tinfo, $mysqld)= @_;
+
+ my $sleeptime= 100; # Milliseconds
+ my $loops= ($opt_start_timeout * 1000) / $sleeptime;
+
+ my $name= $mysqld->name();
+ my $outfile= "$opt_vardir/tmp/$name.wsrep_ready";
+ my $query= "SET SESSION wsrep_sync_wait = 0;
+ SELECT VARIABLE_NAME, VARIABLE_VALUE
+ FROM INFORMATION_SCHEMA.GLOBAL_STATUS
+ WHERE VARIABLE_NAME = 'wsrep_ready'";
+
+ for (my $loop= 1; $loop <= $loops; $loop++)
+ {
+ # Careful... if MTR runs with option 'verbose' then the
+ # file contains also SafeProcess verbose output
+ if (run_query_output($mysqld, $query, $outfile) == 0 &&
+ mtr_grab_file($outfile) =~ /WSREP_READY\s+ON/)
+ {
+ unlink($outfile);
+ return 1;
+ }
+ mtr_milli_sleep($sleeptime);
+ }
+
+ $tinfo->{logfile}= "WSREP did not transition to state READY";
+ return 0;
+}
+
+#
+# wsrep_is_bootstrap_server
+#
+# Check if the server is the first one to be started in the
+# cluster.
+#
+# RETURN
+# 1 The server is a bootstrap server
+# 0 The server is not a bootstrap server
+#
+sub wsrep_is_bootstrap_server($) {
+ my $mysqld= shift;
+
+ my $cluster_address= $mysqld->if_exist('wsrep-cluster-address') ||
+ $mysqld->if_exist('wsrep_cluster_address');
+ if (defined $cluster_address)
+ {
+ return $cluster_address eq "gcomm://" || $cluster_address eq "'gcomm://'";
+ }
+ return 0;
+}
+
+#
+# wsrep_on
+#
+# Check if wsrep has been enabled for a server.
+#
+# RETURN
+# 1 Wsrep has been enabled
+# 0 Wsrep is not enabled
+#
+sub wsrep_on($) {
+ my $mysqld= shift;
+ #check if wsrep_on= is set in configuration
+ if ($mysqld->if_exist('wsrep-on')) {
+ my $on= "".$mysqld->value('wsrep-on');
+ if ($on eq "1" || $on eq "ON") {
+ return 1;
+ }
+ }
+ return 0;
+}
+
#
# start_servers
@@ -5401,8 +5570,7 @@ sub start_servers($) {
for (all_servers()) {
next unless $_->{WAIT} and started($_);
- if ($_->{WAIT}->($_)) {
- $tinfo->{comment}= "Failed to start ".$_->name() . "\n";
+ if ($_->{WAIT}->($_, $tinfo)) {
return 1;
}
}
diff --git a/mysql-test/std_data/binlog-header.binlog b/mysql-test/std_data/binlog-header.binlog
new file mode 100644
index 00000000000..d37fac92f19
--- /dev/null
+++ b/mysql-test/std_data/binlog-header.binlog
Binary files differ
diff --git a/mysql-test/std_data/rpl/mysql-5.7.11-stm-temporal-round-binlog.000001 b/mysql-test/std_data/rpl/mysql-5.7.11-stm-temporal-round-binlog.000001
new file mode 100644
index 00000000000..5010e164e43
--- /dev/null
+++ b/mysql-test/std_data/rpl/mysql-5.7.11-stm-temporal-round-binlog.000001
Binary files differ
diff --git a/mysql-test/std_data/rpl/mysql-8.0.13-stm-temporal-round-binlog.000001 b/mysql-test/std_data/rpl/mysql-8.0.13-stm-temporal-round-binlog.000001
new file mode 100644
index 00000000000..4d582fdf5bb
--- /dev/null
+++ b/mysql-test/std_data/rpl/mysql-8.0.13-stm-temporal-round-binlog.000001
Binary files differ
diff --git a/mysql-test/suite/archive/disabled.def b/mysql-test/suite/archive/disabled.def
new file mode 100644
index 00000000000..ae841eaaf33
--- /dev/null
+++ b/mysql-test/suite/archive/disabled.def
@@ -0,0 +1,13 @@
+##############################################################################
+#
+# List the test cases that are to be disabled temporarily.
+#
+# Separate the test case name and the comment with ':'.
+#
+# <testcasename> : BUG#<xxxx> <date disabled> <disabler> <comment>
+#
+# Do not use any TAB characters for whitespace.
+#
+##############################################################################
+
+archive_gis : MDEV-17297 wait for the fix and then enable it
diff --git a/mysql-test/suite/archive/flush.result b/mysql-test/suite/archive/flush.result
new file mode 100644
index 00000000000..428f32d09f8
--- /dev/null
+++ b/mysql-test/suite/archive/flush.result
@@ -0,0 +1,18 @@
+CREATE TABLE t1(a INT) ENGINE=archive;
+INSERT INTO t1 VALUES(1);
+connect con1, localhost, root;
+LOCK TABLE t1 READ;
+connection default;
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+# Must return 1 row
+SELECT * FROM t2;
+a
+1
+SELECT * FROM t1;
+a
+1
+connection con1;
+UNLOCK TABLES;
+connection default;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/archive/flush.test b/mysql-test/suite/archive/flush.test
new file mode 100644
index 00000000000..feadef08d7a
--- /dev/null
+++ b/mysql-test/suite/archive/flush.test
@@ -0,0 +1,25 @@
+--source include/have_archive.inc
+
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+CREATE TABLE t1(a INT) ENGINE=archive;
+INSERT INTO t1 VALUES(1);
+# Works correct if we uncomment next row
+#FLUSH TABLE t1;
+
+connect(con1, localhost, root);
+LOCK TABLE t1 READ;
+
+connection default;
+FLUSH TABLES WITH READ LOCK;
+copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/t2.frm;
+copy_file $MYSQLD_DATADIR/test/t1.ARZ $MYSQLD_DATADIR/test/t2.ARZ;
+UNLOCK TABLES;
+--echo # Must return 1 row
+SELECT * FROM t2;
+SELECT * FROM t1;
+
+connection con1;
+UNLOCK TABLES;
+
+connection default;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/archive/rnd_pos.result b/mysql-test/suite/archive/rnd_pos.result
new file mode 100644
index 00000000000..b6b6748d53f
--- /dev/null
+++ b/mysql-test/suite/archive/rnd_pos.result
@@ -0,0 +1,56 @@
+create table t1(c1 int not null, c2 double not null, c3 char(255) not null) engine=archive;
+insert t1 select seq, seq+0.7, concat('row with c1 = ', seq) from seq_1_to_10;
+explain partitions select c1,c3 from t1 order by c2;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 10 Using filesort
+set max_length_for_sort_data = 4;
+explain partitions select c1,c3 from t1 order by c2;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
+flush status;
+select c1,c3 from t1 order by c2;
+c1 c3
+1 row with c1 = 1
+2 row with c1 = 2
+3 row with c1 = 3
+4 row with c1 = 4
+5 row with c1 = 5
+6 row with c1 = 6
+7 row with c1 = 7
+8 row with c1 = 8
+9 row with c1 = 9
+10 row with c1 = 10
+set max_length_for_sort_data = default;
+show status where variable_name like '%tmp%' and value != 0;
+Variable_name Value
+Created_tmp_tables 1
+Handler_tmp_write 10
+Rows_tmp_read 20
+alter table t1 partition by hash (c1) partitions 3;
+explain partitions select c1,c3 from t1 order by c2;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1,p2 ALL NULL NULL NULL NULL 10 Using filesort
+set max_length_for_sort_data = 4;
+explain partitions select c1,c3 from t1 order by c2;
+id select_type table partitions type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 p0,p1,p2 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
+flush status;
+select c1,c3 from t1 order by c2;
+c1 c3
+1 row with c1 = 1
+2 row with c1 = 2
+3 row with c1 = 3
+4 row with c1 = 4
+5 row with c1 = 5
+6 row with c1 = 6
+7 row with c1 = 7
+8 row with c1 = 8
+9 row with c1 = 9
+10 row with c1 = 10
+set max_length_for_sort_data = default;
+show status where variable_name like '%tmp%' and value != 0;
+Variable_name Value
+Created_tmp_tables 1
+Handler_tmp_write 10
+Rows_tmp_read 20
+drop table t1;
diff --git a/mysql-test/suite/archive/rnd_pos.test b/mysql-test/suite/archive/rnd_pos.test
new file mode 100644
index 00000000000..8c7a0e16a79
--- /dev/null
+++ b/mysql-test/suite/archive/rnd_pos.test
@@ -0,0 +1,31 @@
+#
+# MDEV-14500 Support engines without rnd_pos
+#
+source include/have_archive.inc;
+source include/have_sequence.inc;
+source include/have_partition.inc;
+
+create table t1(c1 int not null, c2 double not null, c3 char(255) not null) engine=archive;
+insert t1 select seq, seq+0.7, concat('row with c1 = ', seq) from seq_1_to_10;
+explain partitions select c1,c3 from t1 order by c2;
+set max_length_for_sort_data = 4;
+explain partitions select c1,c3 from t1 order by c2;
+flush status;
+select c1,c3 from t1 order by c2;
+set max_length_for_sort_data = default;
+--disable_ps_protocol
+show status where variable_name like '%tmp%' and value != 0;
+--enable_ps_protocol
+
+alter table t1 partition by hash (c1) partitions 3;
+explain partitions select c1,c3 from t1 order by c2;
+set max_length_for_sort_data = 4;
+explain partitions select c1,c3 from t1 order by c2;
+flush status;
+select c1,c3 from t1 order by c2;
+set max_length_for_sort_data = default;
+--disable_ps_protocol
+show status where variable_name like '%tmp%' and value != 0;
+--enable_ps_protocol
+
+drop table t1;
diff --git a/mysql-test/suite/binlog/include/binlog.test b/mysql-test/suite/binlog/include/binlog.test
index 40befc9d3d1..e5d4efb183b 100644
--- a/mysql-test/suite/binlog/include/binlog.test
+++ b/mysql-test/suite/binlog/include/binlog.test
@@ -269,12 +269,10 @@ create table if not exists t3 like tt1;
# the mysql database is replicated even when the current database is
# 'mysql'.
---disable_warnings
USE mysql;
-INSERT IGNORE INTO user SET host='localhost', user='@#@', password=password('Just a test');
-UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@';
-DELETE FROM user WHERE host='localhost' AND user='@#@';
---enable_warnings
+INSERT db SET host='localhost', user='@#@', db='Just a test';
+UPDATE db SET db='Another db' WHERE host='localhost' AND user='@#@';
+DELETE FROM db WHERE host='localhost' AND user='@#@';
use test;
source include/show_binlog_events.inc;
diff --git a/mysql-test/suite/binlog/r/binlog_index.result b/mysql-test/suite/binlog/r/binlog_index.result
index 02af5e40aab..ba91cac20e3 100644
--- a/mysql-test/suite/binlog/r/binlog_index.result
+++ b/mysql-test/suite/binlog/r/binlog_index.result
@@ -145,6 +145,7 @@ master-bin.000009
master-bin.000010
master-bin.000011
+# restart
SET @index=LOAD_FILE('MYSQLTEST_VARDIR/mysqld.1/data//master-bin.index');
SELECT @index;
@index
@@ -171,6 +172,7 @@ master-bin.000010
master-bin.000011
master-bin.000012
+# restart
SET @index=LOAD_FILE('MYSQLTEST_VARDIR/mysqld.1/data//master-bin.index');
SELECT @index;
@index
diff --git a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result
index 04846efc274..5f685dfa785 100644
--- a/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result
+++ b/mysql-test/suite/binlog/r/binlog_mysqlbinlog_row_frag.result
@@ -1,7 +1,7 @@
CREATE TABLE t (a TEXT);
RESET MASTER;
INSERT INTO t SET a=repeat('a', 1024);
-SELECT a from t into @a;
+SELECT a into @a from t;
FLUSH LOGS;
DELETE FROM t;
FOUND 1 /BINLOG @binlog_fragment_0, @binlog_fragment_1/ in mysqlbinlog.sql
diff --git a/mysql-test/suite/binlog/r/binlog_rotate_perf.result b/mysql-test/suite/binlog/r/binlog_rotate_perf.result
new file mode 100644
index 00000000000..d2ebc56e228
--- /dev/null
+++ b/mysql-test/suite/binlog/r/binlog_rotate_perf.result
@@ -0,0 +1,945 @@
+connect conn1,localhost,root,,test;
+reset master;
+create database test_rotate_db;
+use test_rotate_db;
+#currrent engine=myisam
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+CREATE TABLE t1_myisam (c1 INT) ENGINE=myisam;
+insert into t1_myisam values(0),(1);
+show master status;
+File Position Binlog_Do_DB Binlog_Ignore_DB
+master-bin.000001 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+flush logs;
+flush logs;
+flush logs;
+# left times= 99
+flush logs;
+flush logs;
+flush logs;
+# left times= 98
+flush logs;
+flush logs;
+flush logs;
+# left times= 97
+flush logs;
+flush logs;
+flush logs;
+# left times= 96
+flush logs;
+flush logs;
+flush logs;
+# left times= 95
+flush logs;
+flush logs;
+flush logs;
+# left times= 94
+flush logs;
+flush logs;
+flush logs;
+# left times= 93
+flush logs;
+flush logs;
+flush logs;
+# left times= 92
+flush logs;
+flush logs;
+flush logs;
+# left times= 91
+flush logs;
+flush logs;
+flush logs;
+# left times= 90
+flush logs;
+flush logs;
+flush logs;
+# left times= 89
+flush logs;
+flush logs;
+flush logs;
+# left times= 88
+flush logs;
+flush logs;
+flush logs;
+# left times= 87
+flush logs;
+flush logs;
+flush logs;
+# left times= 86
+flush logs;
+flush logs;
+flush logs;
+# left times= 85
+flush logs;
+flush logs;
+flush logs;
+# left times= 84
+flush logs;
+flush logs;
+flush logs;
+# left times= 83
+flush logs;
+flush logs;
+flush logs;
+# left times= 82
+flush logs;
+flush logs;
+flush logs;
+# left times= 81
+flush logs;
+flush logs;
+flush logs;
+# left times= 80
+flush logs;
+flush logs;
+flush logs;
+# left times= 79
+flush logs;
+flush logs;
+flush logs;
+# left times= 78
+flush logs;
+flush logs;
+flush logs;
+# left times= 77
+flush logs;
+flush logs;
+flush logs;
+# left times= 76
+flush logs;
+flush logs;
+flush logs;
+# left times= 75
+flush logs;
+flush logs;
+flush logs;
+# left times= 74
+flush logs;
+flush logs;
+flush logs;
+# left times= 73
+flush logs;
+flush logs;
+flush logs;
+# left times= 72
+flush logs;
+flush logs;
+flush logs;
+# left times= 71
+flush logs;
+flush logs;
+flush logs;
+# left times= 70
+flush logs;
+flush logs;
+flush logs;
+# left times= 69
+flush logs;
+flush logs;
+flush logs;
+# left times= 68
+flush logs;
+flush logs;
+flush logs;
+# left times= 67
+flush logs;
+flush logs;
+flush logs;
+# left times= 66
+flush logs;
+flush logs;
+flush logs;
+# left times= 65
+flush logs;
+flush logs;
+flush logs;
+# left times= 64
+flush logs;
+flush logs;
+flush logs;
+# left times= 63
+flush logs;
+flush logs;
+flush logs;
+# left times= 62
+flush logs;
+flush logs;
+flush logs;
+# left times= 61
+flush logs;
+flush logs;
+flush logs;
+# left times= 60
+flush logs;
+flush logs;
+flush logs;
+# left times= 59
+flush logs;
+flush logs;
+flush logs;
+# left times= 58
+flush logs;
+flush logs;
+flush logs;
+# left times= 57
+flush logs;
+flush logs;
+flush logs;
+# left times= 56
+flush logs;
+flush logs;
+flush logs;
+# left times= 55
+flush logs;
+flush logs;
+flush logs;
+# left times= 54
+flush logs;
+flush logs;
+flush logs;
+# left times= 53
+flush logs;
+flush logs;
+flush logs;
+# left times= 52
+flush logs;
+flush logs;
+flush logs;
+# left times= 51
+flush logs;
+flush logs;
+flush logs;
+# left times= 50
+flush logs;
+flush logs;
+flush logs;
+# left times= 49
+flush logs;
+flush logs;
+flush logs;
+# left times= 48
+flush logs;
+flush logs;
+flush logs;
+# left times= 47
+flush logs;
+flush logs;
+flush logs;
+# left times= 46
+flush logs;
+flush logs;
+flush logs;
+# left times= 45
+flush logs;
+flush logs;
+flush logs;
+# left times= 44
+flush logs;
+flush logs;
+flush logs;
+# left times= 43
+flush logs;
+flush logs;
+flush logs;
+# left times= 42
+flush logs;
+flush logs;
+flush logs;
+# left times= 41
+flush logs;
+flush logs;
+flush logs;
+# left times= 40
+flush logs;
+flush logs;
+flush logs;
+# left times= 39
+flush logs;
+flush logs;
+flush logs;
+# left times= 38
+flush logs;
+flush logs;
+flush logs;
+# left times= 37
+flush logs;
+flush logs;
+flush logs;
+# left times= 36
+flush logs;
+flush logs;
+flush logs;
+# left times= 35
+flush logs;
+flush logs;
+flush logs;
+# left times= 34
+flush logs;
+flush logs;
+flush logs;
+# left times= 33
+flush logs;
+flush logs;
+flush logs;
+# left times= 32
+flush logs;
+flush logs;
+flush logs;
+# left times= 31
+flush logs;
+flush logs;
+flush logs;
+# left times= 30
+flush logs;
+flush logs;
+flush logs;
+# left times= 29
+flush logs;
+flush logs;
+flush logs;
+# left times= 28
+flush logs;
+flush logs;
+flush logs;
+# left times= 27
+flush logs;
+flush logs;
+flush logs;
+# left times= 26
+flush logs;
+flush logs;
+flush logs;
+# left times= 25
+flush logs;
+flush logs;
+flush logs;
+# left times= 24
+flush logs;
+flush logs;
+flush logs;
+# left times= 23
+flush logs;
+flush logs;
+flush logs;
+# left times= 22
+flush logs;
+flush logs;
+flush logs;
+# left times= 21
+flush logs;
+flush logs;
+flush logs;
+# left times= 20
+flush logs;
+flush logs;
+flush logs;
+# left times= 19
+flush logs;
+flush logs;
+flush logs;
+# left times= 18
+flush logs;
+flush logs;
+flush logs;
+# left times= 17
+flush logs;
+flush logs;
+flush logs;
+# left times= 16
+flush logs;
+flush logs;
+flush logs;
+# left times= 15
+flush logs;
+flush logs;
+flush logs;
+# left times= 14
+flush logs;
+flush logs;
+flush logs;
+# left times= 13
+flush logs;
+flush logs;
+flush logs;
+# left times= 12
+flush logs;
+flush logs;
+flush logs;
+# left times= 11
+flush logs;
+flush logs;
+flush logs;
+# left times= 10
+flush logs;
+flush logs;
+flush logs;
+# left times= 9
+flush logs;
+flush logs;
+flush logs;
+# left times= 8
+flush logs;
+flush logs;
+flush logs;
+# left times= 7
+flush logs;
+flush logs;
+flush logs;
+# left times= 6
+flush logs;
+flush logs;
+flush logs;
+# left times= 5
+flush logs;
+flush logs;
+flush logs;
+# left times= 4
+flush logs;
+flush logs;
+show master status;
+File Position Binlog_Do_DB Binlog_Ignore_DB
+master-bin.000291 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+master-bin.000005 #
+master-bin.000006 #
+master-bin.000007 #
+master-bin.000008 #
+master-bin.000009 #
+master-bin.000010 #
+master-bin.000011 #
+master-bin.000012 #
+master-bin.000013 #
+master-bin.000014 #
+master-bin.000015 #
+master-bin.000016 #
+master-bin.000017 #
+master-bin.000018 #
+master-bin.000019 #
+master-bin.000020 #
+master-bin.000021 #
+master-bin.000022 #
+master-bin.000023 #
+master-bin.000024 #
+master-bin.000025 #
+master-bin.000026 #
+master-bin.000027 #
+master-bin.000028 #
+master-bin.000029 #
+master-bin.000030 #
+master-bin.000031 #
+master-bin.000032 #
+master-bin.000033 #
+master-bin.000034 #
+master-bin.000035 #
+master-bin.000036 #
+master-bin.000037 #
+master-bin.000038 #
+master-bin.000039 #
+master-bin.000040 #
+master-bin.000041 #
+master-bin.000042 #
+master-bin.000043 #
+master-bin.000044 #
+master-bin.000045 #
+master-bin.000046 #
+master-bin.000047 #
+master-bin.000048 #
+master-bin.000049 #
+master-bin.000050 #
+master-bin.000051 #
+master-bin.000052 #
+master-bin.000053 #
+master-bin.000054 #
+master-bin.000055 #
+master-bin.000056 #
+master-bin.000057 #
+master-bin.000058 #
+master-bin.000059 #
+master-bin.000060 #
+master-bin.000061 #
+master-bin.000062 #
+master-bin.000063 #
+master-bin.000064 #
+master-bin.000065 #
+master-bin.000066 #
+master-bin.000067 #
+master-bin.000068 #
+master-bin.000069 #
+master-bin.000070 #
+master-bin.000071 #
+master-bin.000072 #
+master-bin.000073 #
+master-bin.000074 #
+master-bin.000075 #
+master-bin.000076 #
+master-bin.000077 #
+master-bin.000078 #
+master-bin.000079 #
+master-bin.000080 #
+master-bin.000081 #
+master-bin.000082 #
+master-bin.000083 #
+master-bin.000084 #
+master-bin.000085 #
+master-bin.000086 #
+master-bin.000087 #
+master-bin.000088 #
+master-bin.000089 #
+master-bin.000090 #
+master-bin.000091 #
+master-bin.000092 #
+master-bin.000093 #
+master-bin.000094 #
+master-bin.000095 #
+master-bin.000096 #
+master-bin.000097 #
+master-bin.000098 #
+master-bin.000099 #
+master-bin.000100 #
+master-bin.000101 #
+master-bin.000102 #
+master-bin.000103 #
+master-bin.000104 #
+master-bin.000105 #
+master-bin.000106 #
+master-bin.000107 #
+master-bin.000108 #
+master-bin.000109 #
+master-bin.000110 #
+master-bin.000111 #
+master-bin.000112 #
+master-bin.000113 #
+master-bin.000114 #
+master-bin.000115 #
+master-bin.000116 #
+master-bin.000117 #
+master-bin.000118 #
+master-bin.000119 #
+master-bin.000120 #
+master-bin.000121 #
+master-bin.000122 #
+master-bin.000123 #
+master-bin.000124 #
+master-bin.000125 #
+master-bin.000126 #
+master-bin.000127 #
+master-bin.000128 #
+master-bin.000129 #
+master-bin.000130 #
+master-bin.000131 #
+master-bin.000132 #
+master-bin.000133 #
+master-bin.000134 #
+master-bin.000135 #
+master-bin.000136 #
+master-bin.000137 #
+master-bin.000138 #
+master-bin.000139 #
+master-bin.000140 #
+master-bin.000141 #
+master-bin.000142 #
+master-bin.000143 #
+master-bin.000144 #
+master-bin.000145 #
+master-bin.000146 #
+master-bin.000147 #
+master-bin.000148 #
+master-bin.000149 #
+master-bin.000150 #
+master-bin.000151 #
+master-bin.000152 #
+master-bin.000153 #
+master-bin.000154 #
+master-bin.000155 #
+master-bin.000156 #
+master-bin.000157 #
+master-bin.000158 #
+master-bin.000159 #
+master-bin.000160 #
+master-bin.000161 #
+master-bin.000162 #
+master-bin.000163 #
+master-bin.000164 #
+master-bin.000165 #
+master-bin.000166 #
+master-bin.000167 #
+master-bin.000168 #
+master-bin.000169 #
+master-bin.000170 #
+master-bin.000171 #
+master-bin.000172 #
+master-bin.000173 #
+master-bin.000174 #
+master-bin.000175 #
+master-bin.000176 #
+master-bin.000177 #
+master-bin.000178 #
+master-bin.000179 #
+master-bin.000180 #
+master-bin.000181 #
+master-bin.000182 #
+master-bin.000183 #
+master-bin.000184 #
+master-bin.000185 #
+master-bin.000186 #
+master-bin.000187 #
+master-bin.000188 #
+master-bin.000189 #
+master-bin.000190 #
+master-bin.000191 #
+master-bin.000192 #
+master-bin.000193 #
+master-bin.000194 #
+master-bin.000195 #
+master-bin.000196 #
+master-bin.000197 #
+master-bin.000198 #
+master-bin.000199 #
+master-bin.000200 #
+master-bin.000201 #
+master-bin.000202 #
+master-bin.000203 #
+master-bin.000204 #
+master-bin.000205 #
+master-bin.000206 #
+master-bin.000207 #
+master-bin.000208 #
+master-bin.000209 #
+master-bin.000210 #
+master-bin.000211 #
+master-bin.000212 #
+master-bin.000213 #
+master-bin.000214 #
+master-bin.000215 #
+master-bin.000216 #
+master-bin.000217 #
+master-bin.000218 #
+master-bin.000219 #
+master-bin.000220 #
+master-bin.000221 #
+master-bin.000222 #
+master-bin.000223 #
+master-bin.000224 #
+master-bin.000225 #
+master-bin.000226 #
+master-bin.000227 #
+master-bin.000228 #
+master-bin.000229 #
+master-bin.000230 #
+master-bin.000231 #
+master-bin.000232 #
+master-bin.000233 #
+master-bin.000234 #
+master-bin.000235 #
+master-bin.000236 #
+master-bin.000237 #
+master-bin.000238 #
+master-bin.000239 #
+master-bin.000240 #
+master-bin.000241 #
+master-bin.000242 #
+master-bin.000243 #
+master-bin.000244 #
+master-bin.000245 #
+master-bin.000246 #
+master-bin.000247 #
+master-bin.000248 #
+master-bin.000249 #
+master-bin.000250 #
+master-bin.000251 #
+master-bin.000252 #
+master-bin.000253 #
+master-bin.000254 #
+master-bin.000255 #
+master-bin.000256 #
+master-bin.000257 #
+master-bin.000258 #
+master-bin.000259 #
+master-bin.000260 #
+master-bin.000261 #
+master-bin.000262 #
+master-bin.000263 #
+master-bin.000264 #
+master-bin.000265 #
+master-bin.000266 #
+master-bin.000267 #
+master-bin.000268 #
+master-bin.000269 #
+master-bin.000270 #
+master-bin.000271 #
+master-bin.000272 #
+master-bin.000273 #
+master-bin.000274 #
+master-bin.000275 #
+master-bin.000276 #
+master-bin.000277 #
+master-bin.000278 #
+master-bin.000279 #
+master-bin.000280 #
+master-bin.000281 #
+master-bin.000282 #
+master-bin.000283 #
+master-bin.000284 #
+master-bin.000285 #
+master-bin.000286 #
+master-bin.000287 #
+master-bin.000288 #
+master-bin.000289 #
+master-bin.000290 #
+master-bin.000291 #
+reset master;
+# left times= 3
+flush logs;
+flush logs;
+show master status;
+File Position Binlog_Do_DB Binlog_Ignore_DB
+master-bin.000003 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+reset master;
+# left times= 2
+flush logs;
+flush logs;
+show master status;
+File Position Binlog_Do_DB Binlog_Ignore_DB
+master-bin.000003 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+reset master;
+# left times= 1
+flush logs;
+flush logs;
+show master status;
+File Position Binlog_Do_DB Binlog_Ignore_DB
+master-bin.000003 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+reset master;
+# left times= 0
+# [engine=myisam] after first loop_times=0, show master logs results
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+show master status;
+File Position Binlog_Do_DB Binlog_Ignore_DB
+master-bin.000001 # <Binlog_Do_DB> <Binlog_Ignore_DB>
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=10
+# restart
+# left restart times= 9
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=9
+# restart
+# left restart times= 8
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=8
+# restart
+# left restart times= 7
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=7
+# restart
+# left restart times= 6
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=6
+# restart
+# left restart times= 5
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=5
+# restart
+# left restart times= 4
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=4
+# restart
+# left restart times= 3
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=3
+# restart
+# left restart times= 2
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=2
+# restart
+# left restart times= 1
+flush logs;
+flush logs;
+#begin to restart mysqld current loop_times=1
+# restart
+# left restart times= 0
+# [engine=myisam] after second loop_times=0, show master logs results
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+master-bin.000005 #
+master-bin.000006 #
+master-bin.000007 #
+master-bin.000008 #
+master-bin.000009 #
+master-bin.000010 #
+master-bin.000011 #
+master-bin.000012 #
+master-bin.000013 #
+master-bin.000014 #
+master-bin.000015 #
+master-bin.000016 #
+master-bin.000017 #
+master-bin.000018 #
+master-bin.000019 #
+master-bin.000020 #
+master-bin.000021 #
+master-bin.000022 #
+master-bin.000023 #
+master-bin.000024 #
+master-bin.000025 #
+master-bin.000026 #
+master-bin.000027 #
+master-bin.000028 #
+master-bin.000029 #
+master-bin.000030 #
+master-bin.000031 #
+# ======= now try to change the log-bin config for mysqld =======
+#begin to restart mysqld
+# restart: --log-bin=new_log_bin
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+master-bin.000005 #
+master-bin.000006 #
+master-bin.000007 #
+master-bin.000008 #
+master-bin.000009 #
+master-bin.000010 #
+master-bin.000011 #
+master-bin.000012 #
+master-bin.000013 #
+master-bin.000014 #
+master-bin.000015 #
+master-bin.000016 #
+master-bin.000017 #
+master-bin.000018 #
+master-bin.000019 #
+master-bin.000020 #
+master-bin.000021 #
+master-bin.000022 #
+master-bin.000023 #
+master-bin.000024 #
+master-bin.000025 #
+master-bin.000026 #
+master-bin.000027 #
+master-bin.000028 #
+master-bin.000029 #
+master-bin.000030 #
+master-bin.000031 #
+new_log_bin.000001 #
+flush logs;
+flush logs;
+flush logs;
+# left times= 9
+flush logs;
+flush logs;
+flush logs;
+# left times= 8
+flush logs;
+flush logs;
+flush logs;
+# left times= 7
+flush logs;
+flush logs;
+flush logs;
+# left times= 6
+flush logs;
+flush logs;
+flush logs;
+# left times= 5
+flush logs;
+flush logs;
+flush logs;
+# left times= 4
+flush logs;
+flush logs;
+show binary logs;
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+master-bin.000003 #
+master-bin.000004 #
+master-bin.000005 #
+master-bin.000006 #
+master-bin.000007 #
+master-bin.000008 #
+master-bin.000009 #
+master-bin.000010 #
+master-bin.000011 #
+master-bin.000012 #
+master-bin.000013 #
+master-bin.000014 #
+master-bin.000015 #
+master-bin.000016 #
+master-bin.000017 #
+master-bin.000018 #
+master-bin.000019 #
+master-bin.000020 #
+master-bin.000021 #
+master-bin.000022 #
+master-bin.000023 #
+master-bin.000024 #
+master-bin.000025 #
+master-bin.000026 #
+master-bin.000027 #
+master-bin.000028 #
+master-bin.000029 #
+master-bin.000030 #
+master-bin.000031 #
+new_log_bin.000001 #
+new_log_bin.000002 #
+new_log_bin.000003 #
+new_log_bin.000004 #
+new_log_bin.000005 #
+new_log_bin.000006 #
+new_log_bin.000007 #
+new_log_bin.000008 #
+new_log_bin.000009 #
+new_log_bin.000010 #
+new_log_bin.000011 #
+new_log_bin.000012 #
+new_log_bin.000013 #
+new_log_bin.000014 #
+new_log_bin.000015 #
+new_log_bin.000016 #
+new_log_bin.000017 #
+new_log_bin.000018 #
+new_log_bin.000019 #
+new_log_bin.000020 #
+new_log_bin.000021 #
+reset master;
+# left times= 3
+flush logs;
+flush logs;
+reset master;
+# left times= 2
+flush logs;
+flush logs;
+reset master;
+# left times= 1
+flush logs;
+flush logs;
+reset master;
+# left times= 0
+# [engine=myisam] after third loop_times=0, show master logs results
+show binary logs;
+Log_name File_size
+new_log_bin.000001 #
+drop database test_rotate_db;
diff --git a/mysql-test/suite/binlog/r/binlog_row_binlog.result b/mysql-test/suite/binlog/r/binlog_row_binlog.result
index 4068a80771c..2d79de6960a 100644
--- a/mysql-test/suite/binlog/r/binlog_row_binlog.result
+++ b/mysql-test/suite/binlog/r/binlog_row_binlog.result
@@ -843,9 +843,9 @@ create table if not exists t2 select * from t1;
create temporary table tt1 (a int);
create table if not exists t3 like tt1;
USE mysql;
-INSERT IGNORE INTO user SET host='localhost', user='@#@', password=password('Just a test');
-UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@';
-DELETE FROM user WHERE host='localhost' AND user='@#@';
+INSERT db SET host='localhost', user='@#@', db='Just a test';
+UPDATE db SET db='Another db' WHERE host='localhost' AND user='@#@';
+DELETE FROM db WHERE host='localhost' AND user='@#@';
use test;
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -894,18 +894,18 @@ master-bin.000001 # Query # # use `test`; CREATE TABLE IF NOT EXISTS `t3` (
`a` int(11) DEFAULT NULL
) ENGINE=MyISAM
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # INSERT IGNORE INTO user SET host='localhost', user='@#@', password=password('Just a test')
-master-bin.000001 # Table_map # # table_id: # (mysql.user)
+master-bin.000001 # Annotate_rows # # INSERT db SET host='localhost', user='@#@', db='Just a test'
+master-bin.000001 # Table_map # # table_id: # (mysql.db)
master-bin.000001 # Write_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@'
-master-bin.000001 # Table_map # # table_id: # (mysql.user)
+master-bin.000001 # Annotate_rows # # UPDATE db SET db='Another db' WHERE host='localhost' AND user='@#@'
+master-bin.000001 # Table_map # # table_id: # (mysql.db)
master-bin.000001 # Update_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Annotate_rows # # DELETE FROM user WHERE host='localhost' AND user='@#@'
-master-bin.000001 # Table_map # # table_id: # (mysql.user)
+master-bin.000001 # Annotate_rows # # DELETE FROM db WHERE host='localhost' AND user='@#@'
+master-bin.000001 # Table_map # # table_id: # (mysql.db)
master-bin.000001 # Delete_rows_v1 # # table_id: # flags: STMT_END_F
master-bin.000001 # Query # # COMMIT
drop table t1,t2,t3,tt1;
diff --git a/mysql-test/suite/binlog/r/binlog_stm_binlog.result b/mysql-test/suite/binlog/r/binlog_stm_binlog.result
index 872ba40e05f..ccc3db2ba7e 100644
--- a/mysql-test/suite/binlog/r/binlog_stm_binlog.result
+++ b/mysql-test/suite/binlog/r/binlog_stm_binlog.result
@@ -1,4 +1,3 @@
-drop table if exists t1;
create table t1 (a int, b int) engine=innodb;
begin;
insert into t1 values (1,2);
@@ -6,8 +5,6 @@ commit;
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
-master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table t1 (a int, b int) engine=innodb
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test`; insert into t1 values (1,2)
@@ -451,9 +448,9 @@ create table if not exists t2 select * from t1;
create temporary table tt1 (a int);
create table if not exists t3 like tt1;
USE mysql;
-INSERT IGNORE INTO user SET host='localhost', user='@#@', password=password('Just a test');
-UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@';
-DELETE FROM user WHERE host='localhost' AND user='@#@';
+INSERT db SET host='localhost', user='@#@', db='Just a test';
+UPDATE db SET db='Another db' WHERE host='localhost' AND user='@#@';
+DELETE FROM db WHERE host='localhost' AND user='@#@';
use test;
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -501,15 +498,14 @@ master-bin.000001 # Query # # use `test`; create temporary table tt1 (a int)
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; create table if not exists t3 like tt1
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Query # # use `mysql`; INSERT IGNORE INTO user SET host='localhost', user='@#@', password=password('Just a test')
+master-bin.000001 # Query # # use `mysql`; INSERT db SET host='localhost', user='@#@', db='Just a test'
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Query # # use `mysql`; UPDATE user SET password=password('Another password') WHERE host='localhost' AND user='@#@'
+master-bin.000001 # Query # # use `mysql`; UPDATE db SET db='Another db' WHERE host='localhost' AND user='@#@'
master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Query # # use `mysql`; DELETE FROM user WHERE host='localhost' AND user='@#@'
+master-bin.000001 # Query # # use `mysql`; DELETE FROM db WHERE host='localhost' AND user='@#@'
master-bin.000001 # Query # # COMMIT
-master-bin.000001 # Rotate # # master-bin.000002;pos=POS
drop table t1,t2,t3,tt1;
reset master;
create table t1 (a int not null auto_increment, primary key (a)) engine=myisam;
diff --git a/mysql-test/suite/binlog/r/show_concurrent_rotate.result b/mysql-test/suite/binlog/r/show_concurrent_rotate.result
new file mode 100644
index 00000000000..cee5de33973
--- /dev/null
+++ b/mysql-test/suite/binlog/r/show_concurrent_rotate.result
@@ -0,0 +1,16 @@
+connect con1,localhost,root,,;
+FLUSH LOGS;
+FLUSH LOGS;
+FLUSH LOGS;
+SET DEBUG_SYNC= "at_after_lock_index WAIT_FOR con1_go";
+SHOW BINARY LOGS;
+connect con2,localhost,root,,;
+RESET MASTER;
+FLUSH LOGS;
+SET DEBUG_SYNC= "now SIGNAL con1_go";
+connection con1;
+# The correct result must consists of two records
+Log_name File_size
+master-bin.000001 #
+master-bin.000002 #
+SET debug_sync = 'reset';
diff --git a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test
index 2d55aa79d48..6765b26f3da 100644
--- a/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test
+++ b/mysql-test/suite/binlog/t/binlog_mysqlbinlog_row_frag.test
@@ -7,7 +7,7 @@ CREATE TABLE t (a TEXT);
# events of interest are guaranteed to stay in 000001 log
RESET MASTER;
--eval INSERT INTO t SET a=repeat('a', 1024)
-SELECT a from t into @a;
+SELECT a into @a from t;
FLUSH LOGS;
DELETE FROM t;
diff --git a/mysql-test/suite/binlog/t/binlog_rotate_perf.test b/mysql-test/suite/binlog/t/binlog_rotate_perf.test
new file mode 100644
index 00000000000..74c91feca97
--- /dev/null
+++ b/mysql-test/suite/binlog/t/binlog_rotate_perf.test
@@ -0,0 +1,102 @@
+# ==== Purpose ====
+#
+# functional test for open_binlog call using when
+#
+# flush log reset master/slave called
+#
+# ==== Related ====
+#
+# TXSQL feature 67 binlog rotate perf optimization
+
+let $engine= myisam;
+
+--source include/have_binlog_format_row.inc
+--connect(conn1,localhost,root,,test)
+
+reset master;
+create database test_rotate_db;
+use test_rotate_db;
+--echo #currrent engine=$engine
+# Create a new table
+--replace_column 2 #
+show binary logs;
+--eval CREATE TABLE t1_$engine (c1 INT) ENGINE=$engine
+--eval insert into t1_$engine values(0),(1)
+
+# do batch flush and show
+let $loop_times= 100;
+--source include/show_master_status.inc
+while ($loop_times) {
+ flush logs;
+ flush logs;
+ if ($loop_times < 5)
+ {
+ --source include/show_master_status.inc
+ if ($loop_times == 4)
+ {
+ --source include/show_binary_logs.inc
+ }
+ reset master;
+ }
+ if ($loop_times >= 5)
+ {
+ flush logs;
+ }
+
+ #
+ dec $loop_times;
+ --echo # left times= $loop_times
+}
+--echo # [engine=$engine] after first loop_times=$loop_times, show master logs results
+--source include/show_binary_logs.inc
+
+# do batch flush and show with restart mysql
+--source include/show_master_status.inc
+let $loop_times= 10;
+while ($loop_times) {
+ flush logs;
+ flush logs;
+ --echo #begin to restart mysqld current loop_times=$loop_times
+ --source include/restart_mysqld.inc
+ #
+ dec $loop_times;
+ --echo # left restart times= $loop_times
+}
+--echo # [engine=$engine] after second loop_times=$loop_times, show master logs results
+--source include/show_binary_logs.inc
+
+
+# try to change the log-bin configs and restart
+--echo # ======= now try to change the log-bin config for mysqld =======
+--let $restart_parameters="--log-bin=new_log_bin"
+--echo #begin to restart mysqld
+--source include/restart_mysqld.inc
+--let $restart_parameters= ""
+
+--source include/show_binary_logs.inc
+let $loop_times= 10;
+while ($loop_times) {
+ flush logs;
+ flush logs;
+ if ($loop_times < 5)
+ {
+ if ($loop_times == 4)
+ {
+ --source include/show_binary_logs.inc
+ }
+ reset master;
+ }
+ if ($loop_times >= 5)
+ {
+ flush logs;
+ }
+
+ #
+ dec $loop_times;
+ --echo # left times= $loop_times
+}
+--echo # [engine=$engine] after third loop_times=$loop_times, show master logs results
+--source include/show_binary_logs.inc
+
+##cleanup
+drop database test_rotate_db;
diff --git a/mysql-test/suite/binlog/t/binlog_stm_binlog.test b/mysql-test/suite/binlog/t/binlog_stm_binlog.test
index c3d8066f807..e9c8e0ed874 100644
--- a/mysql-test/suite/binlog/t/binlog_stm_binlog.test
+++ b/mysql-test/suite/binlog/t/binlog_stm_binlog.test
@@ -3,10 +3,6 @@
let collation=utf8_unicode_ci;
--source include/have_collation.inc
---disable_warnings
-drop table if exists t1;
---enable_warnings
-
# REQUIREMENT
# replace_regex should replace output of SHOW BINLOG EVENTS
diff --git a/mysql-test/suite/binlog/t/show_concurrent_rotate.test b/mysql-test/suite/binlog/t/show_concurrent_rotate.test
new file mode 100644
index 00000000000..79d36c30a86
--- /dev/null
+++ b/mysql-test/suite/binlog/t/show_concurrent_rotate.test
@@ -0,0 +1,24 @@
+--source include/have_debug.inc
+--source include/have_debug_sync.inc
+# mere to limit it run rate
+--source include/have_binlog_format_row.inc
+
+connect(con1,localhost,root,,);
+FLUSH LOGS;
+FLUSH LOGS;
+FLUSH LOGS;
+
+SET DEBUG_SYNC= "at_after_lock_index WAIT_FOR con1_go";
+--send SHOW BINARY LOGS
+
+connect(con2,localhost,root,,);
+RESET MASTER;
+FLUSH LOGS;
+SET DEBUG_SYNC= "now SIGNAL con1_go";
+
+--connection con1
+--echo # The correct result must consists of two records
+--replace_column 2 #
+--reap
+
+SET debug_sync = 'reset';
diff --git a/mysql-test/suite/binlog_encryption/binlog_index.result b/mysql-test/suite/binlog_encryption/binlog_index.result
index 02af5e40aab..ba91cac20e3 100644
--- a/mysql-test/suite/binlog_encryption/binlog_index.result
+++ b/mysql-test/suite/binlog_encryption/binlog_index.result
@@ -145,6 +145,7 @@ master-bin.000009
master-bin.000010
master-bin.000011
+# restart
SET @index=LOAD_FILE('MYSQLTEST_VARDIR/mysqld.1/data//master-bin.index');
SELECT @index;
@index
@@ -171,6 +172,7 @@ master-bin.000010
master-bin.000011
master-bin.000012
+# restart
SET @index=LOAD_FILE('MYSQLTEST_VARDIR/mysqld.1/data//master-bin.index');
SELECT @index;
@index
diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel.result b/mysql-test/suite/binlog_encryption/rpl_parallel.result
index 20f3facea27..5f78a378829 100644
--- a/mysql-test/suite/binlog_encryption/rpl_parallel.result
+++ b/mysql-test/suite/binlog_encryption/rpl_parallel.result
@@ -1518,6 +1518,7 @@ SET SESSION debug_dbug="+d,binlog_force_commit_id";
SET @commit_id= 10000;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
INSERT INTO t3 VALUES (120, 0);
SET @commit_id= 10001;
diff --git a/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.result b/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.result
index 2f7f1b07cb4..936f604be2e 100644
--- a/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.result
+++ b/mysql-test/suite/binlog_encryption/rpl_switch_stm_row_mixed.result
@@ -140,7 +140,7 @@ create table t4 select * from t1 where 3 in (select 1 union select 2 union selec
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
Warnings:
-Warning 1292 Incorrect datetime value: '3'
+Warning 1292 Truncated incorrect datetime value: '3'
insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
create procedure foo()
begin
diff --git a/mysql-test/suite/compat/mssql/parser.result b/mysql-test/suite/compat/mssql/parser.result
new file mode 100644
index 00000000000..817439a826c
--- /dev/null
+++ b/mysql-test/suite/compat/mssql/parser.result
@@ -0,0 +1,87 @@
+SET sql_mode=MSSQL;
+#
+# Start of 10.4 tests
+#
+#
+# MDEV-19142 sql_mode=MSSQL: Bracket identifiers
+#
+SELECT 'test' AS [[];
+[
+test
+SELECT 'test' AS []]];
+]
+test
+SELECT 'test' AS [[a]]];
+[a]
+test
+SELECT 'test' AS [\n];
+\n
+test
+CREATE TABLE [t 1] ([a b] INT);
+SHOW CREATE TABLE [t 1];
+Table Create Table
+t 1 CREATE TABLE "t 1" (
+ "a b" int(11) DEFAULT NULL
+)
+INSERT INTO [t 1] VALUES (10);
+SELECT [a b] FROM [t 1];
+a b
+10
+SELECT [a b] [a b alias] FROM [t 1] [t 1 alias];
+a b alias
+10
+SELECT [a b] FROM [test].[t 1];
+a b
+10
+SELECT [a b], COUNT(*) FROM [t 1] GROUP BY [a b];
+a b COUNT(*)
+10 1
+SELECT [a b], COUNT(*) FROM [t 1] GROUP BY [a b] HAVING [a b]>0;
+a b COUNT(*)
+10 1
+DROP TABLE [t 1];
+CREATE TABLE [t[1]]] (a INT);
+SHOW CREATE TABLE [t[1]]];
+Table Create Table
+t[1] CREATE TABLE "t[1]" (
+ "a" int(11) DEFAULT NULL
+)
+DROP TABLE [t[1]]];
+CREATE TABLE [t 1] ([a b] INT);
+CREATE VIEW [v 1] AS SELECT [a b] FROM [t 1];
+SHOW CREATE VIEW [v 1];
+View Create View character_set_client collation_connection
+v 1 CREATE VIEW "v 1" AS select "t 1"."a b" AS "a b" from "t 1" latin1 latin1_swedish_ci
+SELECT * FROM [v 1];
+a b
+DROP VIEW [v 1];
+DROP TABLE [t 1];
+CREATE PROCEDURE [p 1]()
+BEGIN
+SELECT 'test' [a b];
+END;
+$$
+SHOW CREATE PROCEDURE [p 1];
+Procedure sql_mode Create Procedure character_set_client collation_connection Database Collation
+p 1 PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,MSSQL,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS CREATE DEFINER="root"@"localhost" PROCEDURE "p 1"()
+BEGIN
+SELECT 'test' [a b];
+END latin1 latin1_swedish_ci latin1_swedish_ci
+CALL [p 1];
+a b
+test
+DROP PROCEDURE [p 1];
+CREATE TABLE [t1] ([a] INT);
+INSERT INTO t1 VALUES (10);
+PREPARE [stmt] FROM 'SELECT [a] FROM [test].[t1]';
+EXECUTE [stmt];
+a
+10
+DEALLOCATE PREPARE [stmt];
+EXECUTE IMMEDIATE 'SELECT [a] FROM [test].[t1]';
+a
+10
+DROP TABLE [t1];
+#
+# End of 10.4 tests
+#
diff --git a/mysql-test/suite/compat/mssql/parser.test b/mysql-test/suite/compat/mssql/parser.test
new file mode 100644
index 00000000000..59c6735c6c2
--- /dev/null
+++ b/mysql-test/suite/compat/mssql/parser.test
@@ -0,0 +1,68 @@
+SET sql_mode=MSSQL;
+
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+
+--echo #
+--echo # MDEV-19142 sql_mode=MSSQL: Bracket identifiers
+--echo #
+
+# Brackets inside bracket identifiers:
+# - When we want a left bracket inside a bracket identifier,
+# we just add a single left bracket: [
+# - When we want a right bracket inside a bracket identifier,
+# we add two right brackets: ]]
+
+
+SELECT 'test' AS [[];
+SELECT 'test' AS []]];
+SELECT 'test' AS [[a]]];
+
+# Backslash has no special meaning
+SELECT 'test' AS [\n];
+
+
+CREATE TABLE [t 1] ([a b] INT);
+SHOW CREATE TABLE [t 1];
+INSERT INTO [t 1] VALUES (10);
+SELECT [a b] FROM [t 1];
+SELECT [a b] [a b alias] FROM [t 1] [t 1 alias];
+SELECT [a b] FROM [test].[t 1];
+SELECT [a b], COUNT(*) FROM [t 1] GROUP BY [a b];
+SELECT [a b], COUNT(*) FROM [t 1] GROUP BY [a b] HAVING [a b]>0;
+DROP TABLE [t 1];
+
+CREATE TABLE [t[1]]] (a INT);
+SHOW CREATE TABLE [t[1]]];
+DROP TABLE [t[1]]];
+
+CREATE TABLE [t 1] ([a b] INT);
+CREATE VIEW [v 1] AS SELECT [a b] FROM [t 1];
+SHOW CREATE VIEW [v 1];
+SELECT * FROM [v 1];
+DROP VIEW [v 1];
+DROP TABLE [t 1];
+
+DELIMITER $$;
+CREATE PROCEDURE [p 1]()
+BEGIN
+ SELECT 'test' [a b];
+END;
+$$
+DELIMITER ;$$
+SHOW CREATE PROCEDURE [p 1];
+CALL [p 1];
+DROP PROCEDURE [p 1];
+
+CREATE TABLE [t1] ([a] INT);
+INSERT INTO t1 VALUES (10);
+PREPARE [stmt] FROM 'SELECT [a] FROM [test].[t1]';
+EXECUTE [stmt];
+DEALLOCATE PREPARE [stmt];
+EXECUTE IMMEDIATE 'SELECT [a] FROM [test].[t1]';
+DROP TABLE [t1];
+
+--echo #
+--echo # End of 10.4 tests
+--echo #
diff --git a/mysql-test/suite/compat/oracle/r/custom_aggregate_functions.result b/mysql-test/suite/compat/oracle/r/custom_aggregate_functions.result
new file mode 100644
index 00000000000..21fac1939bc
--- /dev/null
+++ b/mysql-test/suite/compat/oracle/r/custom_aggregate_functions.result
@@ -0,0 +1,136 @@
+SET sql_mode=ORACLE;
+create aggregate function f1(x INT) return INT AS
+begin
+insert into t1(sal) values (x);
+return x;
+end|
+ERROR HY000: Aggregate specific instruction(FETCH GROUP NEXT ROW) missing from the aggregate function
+create function f1(x INT) return INT AS
+begin
+set x=5;
+fetch group next row;
+return x+1;
+end |
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE TABLE marks(stud_id INT, grade_count INT);
+INSERT INTO marks VALUES (1,6), (2,4), (3,7), (4,5), (5,8);
+SELECT * FROM marks;
+stud_id grade_count
+1 6
+2 4
+3 7
+4 5
+5 8
+# Using PL/SQL syntax: EXCEPTION WHEN NO_DATA_FOUND
+CREATE AGGREGATE FUNCTION IF NOT EXISTS aggregate_count(x INT) RETURN INT AS
+count_students INT DEFAULT 0;
+BEGIN
+LOOP
+FETCH GROUP NEXT ROW;
+IF x THEN
+count_students:= count_students + 1;
+END IF;
+END LOOP;
+EXCEPTION
+WHEN NO_DATA_FOUND THEN
+RETURN count_students;
+END aggregate_count //
+SELECT aggregate_count(stud_id) FROM marks;
+aggregate_count(stud_id)
+5
+DROP FUNCTION IF EXISTS aggregate_count;
+# Using SQL/PSM systax: CONTINUE HANDLER
+CREATE AGGREGATE FUNCTION IF NOT EXISTS aggregate_count(x INT) RETURN INT AS
+count_students INT DEFAULT 0;
+CONTINUE HANDLER FOR NOT FOUND RETURN count_students;
+BEGIN
+LOOP
+FETCH GROUP NEXT ROW;
+IF x THEN
+SET count_students= count_students + 1;
+END IF;
+END LOOP;
+END //
+SELECT aggregate_count(stud_id) FROM marks;
+aggregate_count(stud_id)
+5
+DROP FUNCTION IF EXISTS aggregate_count;
+DROP TABLE marks;
+#
+# MDEV-18813 PROCEDURE and anonymous blocks silently ignore FETCH GROUP NEXT ROW
+#
+CREATE PROCEDURE p1 AS
+BEGIN
+FETCH GROUP NEXT ROW;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+BEGIN NOT ATOMIC
+FETCH GROUP NEXT ROW;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE DEFINER=root@localhost FUNCTION f1 RETURN INT AS
+BEGIN
+FETCH GROUP NEXT ROW;
+RETURN 0;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE TABLE t1 (a INT);
+CREATE TRIGGER tr1
+AFTER INSERT ON t1 FOR EACH ROW
+FETCH GROUP NEXT ROW;
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+DROP TABLE t1;
+CREATE EVENT ev1
+ON SCHEDULE EVERY 1 HOUR
+STARTS CURRENT_TIMESTAMP + INTERVAL 1 MONTH
+ENDS CURRENT_TIMESTAMP + INTERVAL 1 MONTH + INTERVAL 1 WEEK
+DO FETCH GROUP NEXT ROW;
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE PACKAGE pkg1 AS
+PROCEDURE p1;
+FUNCTION f1 RETURN INT;
+END;
+$$
+CREATE PACKAGE BODY pkg1 AS
+PROCEDURE p1 AS
+BEGIN
+FETCH GROUP NEXT ROW; -- In a package procedure
+END;
+FUNCTION f1 RETURN INT AS
+BEGIN
+RETURN 0;
+END;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE PACKAGE BODY pkg1 AS
+PROCEDURE p1 AS
+BEGIN
+NULL;
+END;
+FUNCTION f1 RETURN INT AS
+BEGIN
+FETCH GROUP NEXT ROW; -- In a package function
+RETURN 0;
+END;
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+CREATE PACKAGE BODY pkg1 AS
+PROCEDURE p1 AS
+BEGIN
+NULL;
+END;
+FUNCTION f1 RETURN INT AS
+BEGIN
+RETURN 0;
+END;
+BEGIN
+FETCH GROUP NEXT ROW; -- In a package executable section
+END;
+$$
+ERROR HY000: Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context
+DROP PACKAGE pkg1;
diff --git a/mysql-test/suite/compat/oracle/r/plugin.result b/mysql-test/suite/compat/oracle/r/plugin.result
new file mode 100644
index 00000000000..c885c03e656
--- /dev/null
+++ b/mysql-test/suite/compat/oracle/r/plugin.result
@@ -0,0 +1,48 @@
+SET sql_mode=ORACLE;
+#
+# MDEV-16294: INSTALL PLUGIN IF NOT EXISTS / UNINSTALL PLUGIN IF EXISTS
+#
+# INSTALL IF NOT EXISTS PLUGIN name SONAME library /
+# UNINSTALL IF EXISTS PLUGIN|SONAME name
+#
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+INSTALL PLUGIN IF NOT EXISTS example SONAME 'ha_example';
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+EXAMPLE ACTIVE STORAGE ENGINE
+INSTALL PLUGIN example SONAME 'ha_example';
+ERROR HY000: Plugin 'example' already installed
+INSTALL PLUGIN IF NOT EXISTS example SONAME 'ha_example';
+Warnings:
+Note 1968 Plugin 'example' already installed
+SHOW WARNINGS;
+Level Code Message
+Note 1968 Plugin 'example' already installed
+UNINSTALL PLUGIN IF EXISTS example;
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+UNINSTALL PLUGIN IF EXISTS example;
+Warnings:
+Note 1305 PLUGIN example does not exist
+SHOW WARNINGS;
+Level Code Message
+Note 1305 PLUGIN example does not exist
+UNINSTALL PLUGIN example;
+ERROR 42000: PLUGIN example does not exist
+INSTALL SONAME 'ha_example';
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+EXAMPLE ACTIVE STORAGE ENGINE
+UNUSABLE ACTIVE DAEMON
+UNINSTALL SONAME IF EXISTS 'ha_example';
+UNINSTALL SONAME IF EXISTS 'ha_example';
+Warnings:
+Note 1305 SONAME ha_example.so does not exist
+SHOW WARNINGS;
+Level Code Message
+Note 1305 SONAME ha_example.so does not exist
+select PLUGIN_NAME,PLUGIN_STATUS,PLUGIN_TYPE from information_schema.plugins where plugin_library like 'ha_example%';
+PLUGIN_NAME PLUGIN_STATUS PLUGIN_TYPE
+UNINSTALL SONAME 'ha_example';
+ERROR 42000: SONAME ha_example.so does not exist
diff --git a/mysql-test/suite/compat/oracle/r/ps.result b/mysql-test/suite/compat/oracle/r/ps.result
index 158d15e9f90..73aa04b972c 100644
--- a/mysql-test/suite/compat/oracle/r/ps.result
+++ b/mysql-test/suite/compat/oracle/r/ps.result
@@ -163,7 +163,7 @@ RETURN 'test';
END;
$$
EXECUTE IMMEDIATE 'SELECT ? FROM DUAL' USING f1();
-ERROR 42000: EXECUTE..USING does not support subqueries or stored functions
+ERROR 42000: EXECUTE IMMEDIATE does not support subqueries or stored functions
DROP FUNCTION f1;
#
# Testing simple expressions
diff --git a/mysql-test/suite/compat/oracle/r/sp-package-innodb.result b/mysql-test/suite/compat/oracle/r/sp-package-innodb.result
index 50eb2dc6cd0..0ac357df5da 100644
--- a/mysql-test/suite/compat/oracle/r/sp-package-innodb.result
+++ b/mysql-test/suite/compat/oracle/r/sp-package-innodb.result
@@ -23,6 +23,8 @@ a:=a+1;
INSERT INTO t1 VALUES (a,'pkg1 initialization');
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL pkg1.p1;
SELECT * FROM t1 ORDER BY a;
a routine
diff --git a/mysql-test/suite/compat/oracle/r/sp-package-mdl.result b/mysql-test/suite/compat/oracle/r/sp-package-mdl.result
index 18cc834461c..bb46341f202 100644
--- a/mysql-test/suite/compat/oracle/r/sp-package-mdl.result
+++ b/mysql-test/suite/compat/oracle/r/sp-package-mdl.result
@@ -61,8 +61,8 @@ TABLE_NAME pkg1.p1
CONN 2
INFO DROP PACKAGE pkg1
STATE Waiting for stored package body metadata lock
-LOCK_MODE MDL_INTENTION_EXCLUSIVE
-LOCK_TYPE Global read lock
+LOCK_MODE MDL_BACKUP_DDL
+LOCK_TYPE Backup lock
TABLE_NAME
CONN 2
INFO DROP PACKAGE pkg1
diff --git a/mysql-test/suite/compat/oracle/r/sp-package.result b/mysql-test/suite/compat/oracle/r/sp-package.result
index 9a53b04d4ad..4f0f05b1939 100644
--- a/mysql-test/suite/compat/oracle/r/sp-package.result
+++ b/mysql-test/suite/compat/oracle/r/sp-package.result
@@ -2028,6 +2028,8 @@ $$
CALL p1.p1();
@a
11
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1();
@a
12
@@ -2059,6 +2061,8 @@ BEGIN
SELECT MAX(a) FROM t1 INTO @a;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1();
@a
11
@@ -2092,6 +2096,8 @@ BEGIN
SELECT 1 FROM t1 INTO @a;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1();
ERROR 42S02: Table 'test.t1' doesn't exist
SELECT p1.f1();
@@ -2650,6 +2656,9 @@ SELECT * FROM t1 INTO b;
SELECT b.a, b.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1.p1;
b.a b.b
10 b
diff --git a/mysql-test/suite/compat/oracle/r/sp-row.result b/mysql-test/suite/compat/oracle/r/sp-row.result
index 72b33768864..218fb5d463a 100644
--- a/mysql-test/suite/compat/oracle/r/sp-row.result
+++ b/mysql-test/suite/compat/oracle/r/sp-row.result
@@ -2833,6 +2833,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2847,6 +2849,8 @@ SELECT * FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2861,6 +2865,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
@@ -2876,6 +2882,8 @@ SELECT 10,'a','b' FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2890,6 +2898,8 @@ SELECT 10,'a' FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2904,6 +2914,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
@@ -2920,6 +2932,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: The used SELECT statements have a different number of columns
DROP TABLE t1;
@@ -2935,6 +2949,8 @@ SELECT * FROM t1 INTO rec1, rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
ERROR 21000: Operand should contain 2 column(s)
DROP TABLE t1;
@@ -2950,6 +2966,8 @@ SELECT * FROM t1 INTO rec1;
SELECT rec1.a, rec1.b;
END;
$$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL p1();
rec1.a rec1.b
10 b10
diff --git a/mysql-test/suite/compat/oracle/r/sp.result b/mysql-test/suite/compat/oracle/r/sp.result
index 8fa49c4092a..6db999b238f 100644
--- a/mysql-test/suite/compat/oracle/r/sp.result
+++ b/mysql-test/suite/compat/oracle/r/sp.result
@@ -1019,9 +1019,7 @@ LOOP
EXIT WHEN unknown_ident IS NULL;
END LOOP;
END$$
-CALL p1;
ERROR 42S22: Unknown column 'unknown_ident' in 'field list'
-DROP PROCEDURE p1;
CREATE PROCEDURE p1
AS
BEGIN
@@ -1030,9 +1028,7 @@ LOOP
EXIT label WHEN unknown_ident IS NULL;
END LOOP;
END$$
-CALL p1;
ERROR 42S22: Unknown column 'unknown_ident' in 'field list'
-DROP PROCEDURE p1;
CREATE PROCEDURE p1
AS
BEGIN
@@ -1040,9 +1036,7 @@ LOOP
CONTINUE WHEN unknown_ident IS NULL;
END LOOP;
END$$
-CALL p1;
ERROR 42S22: Unknown column 'unknown_ident' in 'field list'
-DROP PROCEDURE p1;
CREATE PROCEDURE p1
AS
BEGIN
@@ -1051,9 +1045,7 @@ LOOP
CONTINUE label WHEN unknown_ident IS NULL;
END LOOP;
END$$
-CALL p1;
ERROR 42S22: Unknown column 'unknown_ident' in 'field list'
-DROP PROCEDURE p1;
#
# MDEV-10583 sql_mode=ORACLE: SQL%ROWCOUNT
#
diff --git a/mysql-test/suite/compat/oracle/r/versioning.result b/mysql-test/suite/compat/oracle/r/versioning.result
index ebedcf0f462..bbecfa1f6b1 100644
--- a/mysql-test/suite/compat/oracle/r/versioning.result
+++ b/mysql-test/suite/compat/oracle/r/versioning.result
@@ -14,3 +14,11 @@ SELECT * FROM t1 FOR SYSTEM_TIME AS OF (NOW()+INTERVAL 10 YEAR);
a
20
DROP TABLE t1;
+#
+# MDEV-17959 Assertion `opt_bootstrap || mysql_parse_status || thd->lex->select_stack_top == 0' failed in parse_sql upon DELETE HISTORY under ORACLE mode
+#
+SET SQL_MODE= ORACLE;
+CREATE TABLE t1 (a INT);
+DELETE HISTORY FROM t1;
+ERROR HY000: Table `t1` is not system-versioned
+DROP TABLE t1;
diff --git a/mysql-test/suite/compat/oracle/t/custom_aggregate_functions.test b/mysql-test/suite/compat/oracle/t/custom_aggregate_functions.test
new file mode 100644
index 00000000000..0affc4efa29
--- /dev/null
+++ b/mysql-test/suite/compat/oracle/t/custom_aggregate_functions.test
@@ -0,0 +1,170 @@
+SET sql_mode=ORACLE;
+
+delimiter |;
+--error ER_INVALID_AGGREGATE_FUNCTION
+create aggregate function f1(x INT) return INT AS
+begin
+ insert into t1(sal) values (x);
+ return x;
+end|
+
+--error ER_NOT_AGGREGATE_FUNCTION
+create function f1(x INT) return INT AS
+begin
+ set x=5;
+ fetch group next row;
+return x+1;
+end |
+
+DELIMITER ;|
+
+
+CREATE TABLE marks(stud_id INT, grade_count INT);
+INSERT INTO marks VALUES (1,6), (2,4), (3,7), (4,5), (5,8);
+SELECT * FROM marks;
+
+--echo # Using PL/SQL syntax: EXCEPTION WHEN NO_DATA_FOUND
+
+DELIMITER //;
+CREATE AGGREGATE FUNCTION IF NOT EXISTS aggregate_count(x INT) RETURN INT AS
+ count_students INT DEFAULT 0;
+BEGIN
+ LOOP
+ FETCH GROUP NEXT ROW;
+ IF x THEN
+ count_students:= count_students + 1;
+ END IF;
+ END LOOP;
+EXCEPTION
+ WHEN NO_DATA_FOUND THEN
+ RETURN count_students;
+END aggregate_count //
+DELIMITER ;//
+SELECT aggregate_count(stud_id) FROM marks;
+DROP FUNCTION IF EXISTS aggregate_count;
+
+
+--echo # Using SQL/PSM systax: CONTINUE HANDLER
+
+DELIMITER //;
+CREATE AGGREGATE FUNCTION IF NOT EXISTS aggregate_count(x INT) RETURN INT AS
+ count_students INT DEFAULT 0;
+ CONTINUE HANDLER FOR NOT FOUND RETURN count_students;
+BEGIN
+ LOOP
+ FETCH GROUP NEXT ROW;
+ IF x THEN
+ SET count_students= count_students + 1;
+ END IF;
+ END LOOP;
+END //
+DELIMITER ;//
+SELECT aggregate_count(stud_id) FROM marks;
+DROP FUNCTION IF EXISTS aggregate_count;
+
+
+DROP TABLE marks;
+
+
+--echo #
+--echo # MDEV-18813 PROCEDURE and anonymous blocks silently ignore FETCH GROUP NEXT ROW
+--echo #
+
+
+DELIMITER $$;
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE PROCEDURE p1 AS
+BEGIN
+ FETCH GROUP NEXT ROW;
+END;
+$$
+DELIMITER ;$$
+
+
+DELIMITER $$;
+--error ER_NOT_AGGREGATE_FUNCTION
+BEGIN NOT ATOMIC
+ FETCH GROUP NEXT ROW;
+END;
+$$
+DELIMITER ;$$
+
+
+DELIMITER $$;
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE DEFINER=root@localhost FUNCTION f1 RETURN INT AS
+BEGIN
+ FETCH GROUP NEXT ROW;
+ RETURN 0;
+END;
+$$
+DELIMITER ;$$
+
+
+CREATE TABLE t1 (a INT);
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE TRIGGER tr1
+ AFTER INSERT ON t1 FOR EACH ROW
+ FETCH GROUP NEXT ROW;
+DROP TABLE t1;
+
+
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE EVENT ev1
+ ON SCHEDULE EVERY 1 HOUR
+ STARTS CURRENT_TIMESTAMP + INTERVAL 1 MONTH
+ ENDS CURRENT_TIMESTAMP + INTERVAL 1 MONTH + INTERVAL 1 WEEK
+DO FETCH GROUP NEXT ROW;
+
+
+DELIMITER $$;
+CREATE PACKAGE pkg1 AS
+ PROCEDURE p1;
+ FUNCTION f1 RETURN INT;
+END;
+$$
+
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE PACKAGE BODY pkg1 AS
+ PROCEDURE p1 AS
+ BEGIN
+ FETCH GROUP NEXT ROW; -- In a package procedure
+ END;
+ FUNCTION f1 RETURN INT AS
+ BEGIN
+ RETURN 0;
+ END;
+END;
+$$
+
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE PACKAGE BODY pkg1 AS
+ PROCEDURE p1 AS
+ BEGIN
+ NULL;
+ END;
+ FUNCTION f1 RETURN INT AS
+ BEGIN
+ FETCH GROUP NEXT ROW; -- In a package function
+ RETURN 0;
+ END;
+END;
+$$
+
+--error ER_NOT_AGGREGATE_FUNCTION
+CREATE PACKAGE BODY pkg1 AS
+ PROCEDURE p1 AS
+ BEGIN
+ NULL;
+ END;
+ FUNCTION f1 RETURN INT AS
+ BEGIN
+ RETURN 0;
+ END;
+BEGIN
+ FETCH GROUP NEXT ROW; -- In a package executable section
+END;
+$$
+
+DELIMITER ;$$
+DROP PACKAGE pkg1;
diff --git a/mysql-test/suite/compat/oracle/t/plugin.test b/mysql-test/suite/compat/oracle/t/plugin.test
new file mode 100644
index 00000000000..a84c4ae7828
--- /dev/null
+++ b/mysql-test/suite/compat/oracle/t/plugin.test
@@ -0,0 +1,3 @@
+SET sql_mode=ORACLE;
+
+--source include/install_plugin_if_exists.inc
diff --git a/mysql-test/suite/compat/oracle/t/sp.test b/mysql-test/suite/compat/oracle/t/sp.test
index 6020bd95993..96b4cd59fbd 100644
--- a/mysql-test/suite/compat/oracle/t/sp.test
+++ b/mysql-test/suite/compat/oracle/t/sp.test
@@ -1092,6 +1092,7 @@ DROP FUNCTION f1;
--echo #
DELIMITER $$;
+--error ER_BAD_FIELD_ERROR
CREATE PROCEDURE p1
AS
BEGIN
@@ -1100,12 +1101,10 @@ BEGIN
END LOOP;
END$$
DELIMITER ;$$
---error ER_BAD_FIELD_ERROR
-CALL p1;
-DROP PROCEDURE p1;
DELIMITER $$;
+--error ER_BAD_FIELD_ERROR
CREATE PROCEDURE p1
AS
BEGIN
@@ -1115,12 +1114,10 @@ BEGIN
END LOOP;
END$$
DELIMITER ;$$
---error ER_BAD_FIELD_ERROR
-CALL p1;
-DROP PROCEDURE p1;
DELIMITER $$;
+--error ER_BAD_FIELD_ERROR
CREATE PROCEDURE p1
AS
BEGIN
@@ -1129,12 +1126,10 @@ BEGIN
END LOOP;
END$$
DELIMITER ;$$
---error ER_BAD_FIELD_ERROR
-CALL p1;
-DROP PROCEDURE p1;
DELIMITER $$;
+--error ER_BAD_FIELD_ERROR
CREATE PROCEDURE p1
AS
BEGIN
@@ -1144,9 +1139,6 @@ BEGIN
END LOOP;
END$$
DELIMITER ;$$
---error ER_BAD_FIELD_ERROR
-CALL p1;
-DROP PROCEDURE p1;
--echo #
--echo # MDEV-10583 sql_mode=ORACLE: SQL%ROWCOUNT
diff --git a/mysql-test/suite/compat/oracle/t/versioning.test b/mysql-test/suite/compat/oracle/t/versioning.test
index d70058c56e4..abcca8c588a 100644
--- a/mysql-test/suite/compat/oracle/t/versioning.test
+++ b/mysql-test/suite/compat/oracle/t/versioning.test
@@ -11,3 +11,13 @@ INSERT INTO t1 VALUES (20);
SELECT * FROM t1 FOR SYSTEM_TIME ALL;
SELECT * FROM t1 FOR SYSTEM_TIME AS OF (NOW()+INTERVAL 10 YEAR);
DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17959 Assertion `opt_bootstrap || mysql_parse_status || thd->lex->select_stack_top == 0' failed in parse_sql upon DELETE HISTORY under ORACLE mode
+--echo #
+
+SET SQL_MODE= ORACLE;
+CREATE TABLE t1 (a INT);
+--error ER_VERS_NOT_VERSIONED
+DELETE HISTORY FROM t1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/csv/flush.result b/mysql-test/suite/csv/flush.result
new file mode 100644
index 00000000000..b0b9b21bd0a
--- /dev/null
+++ b/mysql-test/suite/csv/flush.result
@@ -0,0 +1,25 @@
+CREATE TABLE t1(a INT NOT NULL) ENGINE=csv;
+INSERT INTO t1 VALUES(1);
+connect con1, localhost, root;
+LOCK TABLE t1 READ;
+connection default;
+FLUSH TABLES WITH READ LOCK;
+UNLOCK TABLES;
+# Must return 1 row
+SELECT * FROM t2;
+a
+1
+SELECT * FROM t1;
+a
+1
+connection con1;
+UNLOCK TABLES;
+connection default;
+INSERT INTO t2 VALUES(2);
+INSERT INTO t2 VALUES(2);
+SELECT * from t1,t2;
+a a
+1 1
+1 2
+1 2
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/csv/flush.test b/mysql-test/suite/csv/flush.test
new file mode 100644
index 00000000000..934ac26f291
--- /dev/null
+++ b/mysql-test/suite/csv/flush.test
@@ -0,0 +1,30 @@
+--source include/have_csv.inc
+
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+CREATE TABLE t1(a INT NOT NULL) ENGINE=csv;
+INSERT INTO t1 VALUES(1);
+# works correct if uncommented
+#FLUSH TABLE t1;
+
+connect(con1, localhost, root);
+LOCK TABLE t1 READ;
+
+connection default;
+FLUSH TABLES WITH READ LOCK;
+copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/t2.frm;
+copy_file $MYSQLD_DATADIR/test/t1.CSV $MYSQLD_DATADIR/test/t2.CSV;
+copy_file $MYSQLD_DATADIR/test/t1.CSM $MYSQLD_DATADIR/test/t2.CSM;
+UNLOCK TABLES;
+--echo # Must return 1 row
+SELECT * FROM t2;
+SELECT * FROM t1;
+connection con1;
+UNLOCK TABLES;
+
+connection default;
+
+INSERT INTO t2 VALUES(2);
+INSERT INTO t2 VALUES(2);
+SELECT * from t1,t2;
+
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/encryption/r/create_or_replace.result b/mysql-test/suite/encryption/r/create_or_replace.result
index 1671043b50d..f876de7346f 100644
--- a/mysql-test/suite/encryption/r/create_or_replace.result
+++ b/mysql-test/suite/encryption/r/create_or_replace.result
@@ -24,3 +24,4 @@ SET GLOBAL innodb_encryption_threads = 4;
# Success!
SET GLOBAL innodb_encryption_threads = 0;
SET GLOBAL innodb_encrypt_tables = OFF;
+# restart
diff --git a/mysql-test/suite/encryption/r/debug_key_management.result b/mysql-test/suite/encryption/r/debug_key_management.result
index 02e05b4d221..c06d2bb3965 100644
--- a/mysql-test/suite/encryption/r/debug_key_management.result
+++ b/mysql-test/suite/encryption/r/debug_key_management.result
@@ -1,3 +1,4 @@
+create table t1(a serial) engine=innoDB;
set global innodb_encrypt_tables=ON;
show variables like 'innodb_encrypt%';
Variable_name Value
@@ -13,5 +14,14 @@ set global debug_key_management_version=10;
select count(*) from information_schema.innodb_tablespaces_encryption where current_key_version <> 10;
count(*)
0
+SET GLOBAL debug_dbug = '+d,ib_log';
+SET GLOBAL innodb_log_checkpoint_now = 1;
+SET GLOBAL innodb_flush_log_at_trx_commit = 1;
+INSERT INTO t1 VALUES(NULL);
+# restart
set global innodb_encrypt_tables=OFF;
set global debug_key_management_version=1;
+select * from t1;
+a
+1
+drop table t1;
diff --git a/mysql-test/suite/encryption/r/encrypt_and_grep.result b/mysql-test/suite/encryption/r/encrypt_and_grep.result
index e5ba46d10d2..635dca14868 100644
--- a/mysql-test/suite/encryption/r/encrypt_and_grep.result
+++ b/mysql-test/suite/encryption/r/encrypt_and_grep.result
@@ -27,6 +27,7 @@ NOT FOUND /tempsecret/ in t2.ibd
FOUND 12 /dummysecret/ in t3.ibd
# ibdata1 expecting NOT FOUND
NOT FOUND /foobarsecret/ in ibdata1
+# restart
# Now turn off encryption and wait for threads to decrypt everything
SET GLOBAL innodb_encrypt_tables = off;
# Wait max 10 min for key encryption threads to decrypt all spaces
@@ -49,6 +50,7 @@ FOUND 12 /tempsecret/ in t2.ibd
FOUND 12 /dummysecret/ in t3.ibd
# ibdata1 expecting NOT FOUND
NOT FOUND /foobarsecret/ in ibdata1
+# restart
# Now turn on encryption and wait for threads to encrypt all spaces
SET GLOBAL innodb_encrypt_tables = on;
# Wait max 10 min for key encryption threads to encrypt all spaces
@@ -71,4 +73,5 @@ NOT FOUND /tempsecret/ in t2.ibd
FOUND 12 /dummysecret/ in t3.ibd
# ibdata1 expecting NOT FOUND
NOT FOUND /foobarsecret/ in ibdata1
+# restart
drop table t1, t2, t3;
diff --git a/mysql-test/suite/encryption/r/innochecksum.result b/mysql-test/suite/encryption/r/innochecksum.result
index 59804f548ae..98bc92ec015 100644
--- a/mysql-test/suite/encryption/r/innochecksum.result
+++ b/mysql-test/suite/encryption/r/innochecksum.result
@@ -20,17 +20,20 @@ CREATE TABLE t6 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB;
# Run innochecksum on t3
# no encryption corrupting the field should not have effect
# Run innochecksum on t6
-# no encryption corrupting the field should not have effect
+# In new checksum format, checksum calculated for whole page.
+# So It should affected.
# Restore the original tables
# Corrupt FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION+4 (post encryption checksum)
# Run innochecksum on t2
# Run innochecksum on t3
# Run innochecksum on t6
-# no encryption corrupting the field should not have effect
+# In new checksum format, checksum calculated for whole page.
+# So It should affected.
# Restore the original tables
# Corrupt FIL_DATA+10 (data)
# Run innochecksum on t2
# Run innochecksum on t3
# Run innochecksum on t6
# Restore the original tables
+# restart
DROP TABLE t1, t2, t3, t4, t5, t6;
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change.result b/mysql-test/suite/encryption/r/innodb-bad-key-change.result
index 68267f2498d..45c32317557 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change.result
@@ -7,6 +7,7 @@ call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=[1-9][0-9]*,
call mtr.add_suppression("InnoDB: Table `test`\\.`t[12]` is corrupted");
call mtr.add_suppression("File '.*mysql-test.std_data.keysbad3\\.txt' not found");
# Start server with keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SET GLOBAL innodb_file_per_table = ON;
CREATE TABLE t1 (c VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=2;
INSERT INTO t1 VALUES ('foobar');
@@ -28,15 +29,19 @@ foobar 1
foobar 2
# Restart server with keysbad3.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keysbad3.txt
SELECT * FROM t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keysbad3.txt
DROP TABLE t1;
# Start server with keys3.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
SET GLOBAL innodb_default_encryption_key_id=5;
CREATE TABLE t2 (c VARCHAR(8), id int not null primary key, b int, key(b)) ENGINE=InnoDB ENCRYPTED=YES;
INSERT INTO t2 VALUES ('foobar',1,2);
# Restart server with keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SELECT * FROM t2;
ERROR 42S02: Table 'test.t2' doesn't exist in engine
SELECT * FROM t2 where id = 1;
@@ -66,3 +71,4 @@ ERROR 42S02: Table 'test.t2' doesn't exist in engine
DROP TABLE t2;
# Start server with keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change2.result b/mysql-test/suite/encryption/r/innodb-bad-key-change2.result
index 9cf98b6b1fd..78b9b7854de 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change2.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change2.result
@@ -6,10 +6,12 @@ call mtr.add_suppression("InnoDB: Tablespace for table \`test\`.\`t1\` is set as
call mtr.add_suppression("InnoDB: Table `test`\\.`t1` is corrupted");
call mtr.add_suppression("InnoDB: Cannot delete tablespace .* because it is not found in the tablespace memory cache");
call mtr.add_suppression("InnoDB: ALTER TABLE `test`\\.`t1` DISCARD TABLESPACE failed to find tablespace");
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SET GLOBAL innodb_file_per_table = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
SELECT * FROM t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
SHOW WARNINGS;
@@ -32,9 +34,11 @@ test.t1 check Error Table 'test.t1' doesn't exist in engine
test.t1 check status Operation failed
SHOW WARNINGS;
Level Code Message
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
FLUSH TABLES t1 FOR EXPORT;
backup: t1
UNLOCK TABLES;
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
ALTER TABLE t1 DISCARD TABLESPACE;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
DROP TABLE t1;
@@ -42,6 +46,7 @@ CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
ALTER TABLE t1 DISCARD TABLESPACE;
restore: t1 .ibd and .cfg files
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
ALTER TABLE t1 DISCARD TABLESPACE;
Warnings:
Warning 1814 Tablespace has been discarded for table `t1`
@@ -55,6 +60,7 @@ t1 CREATE TABLE `t1` (
`f` varchar(8) DEFAULT NULL,
PRIMARY KEY (`pk`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 `ENCRYPTED`=YES `ENCRYPTION_KEY_ID`=4
+# restart: --innodb-encrypt-tables --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
RENAME TABLE t1 TO t1new;
ERROR HY000: Error on rename of './test/t1' to './test/t1new' (errno: 155 "The table does not exist in the storage engine")
ALTER TABLE t1 RENAME TO t1new;
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change3.result b/mysql-test/suite/encryption/r/innodb-bad-key-change3.result
index 2d414d75c21..160335388b1 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change3.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change3.result
@@ -22,6 +22,7 @@ backup: t1
UNLOCK TABLES;
ALTER TABLE t1 DISCARD TABLESPACE;
restore: t1 .ibd and .cfg files
+# restart
ALTER TABLE t1 IMPORT TABLESPACE;
ERROR HY000: Got error 192 'Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.' from InnoDB
SHOW CREATE TABLE t1;
diff --git a/mysql-test/suite/encryption/r/innodb-bad-key-change4.result b/mysql-test/suite/encryption/r/innodb-bad-key-change4.result
index d3bbe2eccf3..6c23f94eb47 100644
--- a/mysql-test/suite/encryption/r/innodb-bad-key-change4.result
+++ b/mysql-test/suite/encryption/r/innodb-bad-key-change4.result
@@ -3,10 +3,12 @@ call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page n
call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\]");
call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
call mtr.add_suppression("InnoDB: Table `test`\\.`t1` is corrupted");
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SET GLOBAL innodb_file_per_table = ON;
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize Error Table 'test.t1' doesn't exist in engine
@@ -19,4 +21,5 @@ test.t1 check Error Table 'test.t1' doesn't exist in engine
test.t1 check status Operation failed
SHOW WARNINGS;
Level Code Message
+# restart: --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
DROP TABLE t1;
diff --git a/mysql-test/suite/encryption/r/innodb-compressed-blob.result b/mysql-test/suite/encryption/r/innodb-compressed-blob.result
index ef49c9a6541..de20b554a67 100644
--- a/mysql-test/suite/encryption/r/innodb-compressed-blob.result
+++ b/mysql-test/suite/encryption/r/innodb-compressed-blob.result
@@ -3,6 +3,7 @@ call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]
call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]");
call mtr.add_suppression("InnoDB: Table `test`\\.`t[12]` is corrupted");
# Restart mysqld --file-key-management-filename=keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SET GLOBAL innodb_file_per_table = ON;
set GLOBAL innodb_default_encryption_key_id=4;
create table t1(a int not null primary key, b blob, index(b(10))) engine=innodb row_format=compressed;
@@ -14,6 +15,7 @@ insert into t1 values (1, repeat('secret',6000));
insert into t2 values (1, repeat('secret',6000));
insert into t3 values (1, repeat('secret',6000));
# Restart mysqld --file-key-management-filename=keys3.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
select count(*) from t1 FORCE INDEX (b) where b like 'secret%';
ERROR 42S02: Table 'test.t1' doesn't exist in engine
select count(*) from t2 FORCE INDEX (b) where b like 'secret%';
@@ -22,4 +24,5 @@ select count(*) from t3 FORCE INDEX (b) where b like 'secret%';
count(*)
1
# Restart mysqld --file-key-management-filename=keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
drop table t1,t2,t3;
diff --git a/mysql-test/suite/encryption/r/innodb-encryption-alter.result b/mysql-test/suite/encryption/r/innodb-encryption-alter.result
index 934f0607958..8a4ab8088d5 100644
--- a/mysql-test/suite/encryption/r/innodb-encryption-alter.result
+++ b/mysql-test/suite/encryption/r/innodb-encryption-alter.result
@@ -96,6 +96,7 @@ connection default;
SET DEBUG_SYNC = 'now WAIT_FOR done';
SET GLOBAL innodb_flush_log_at_trx_commit=1;
COMMIT;
+# restart
disconnect con1;
select * from t1;
f1 f2
diff --git a/mysql-test/suite/encryption/r/innodb-encryption-disable.result b/mysql-test/suite/encryption/r/innodb-encryption-disable.result
index 9d6da0a0291..e49a6b759e9 100644
--- a/mysql-test/suite/encryption/r/innodb-encryption-disable.result
+++ b/mysql-test/suite/encryption/r/innodb-encryption-disable.result
@@ -4,6 +4,7 @@ call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=[1-9][0-9]*, page number=3\\] in file .*test.t[15].ibd looks corrupted; key_version=1");
call mtr.add_suppression("InnoDB: Table `test`\\.`t[15]` is corrupted");
call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
+# restart: --innodb-encrypt-tables=ON --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
create table t5 (
`intcol1` int(32) DEFAULT NULL,
`intcol2` int(32) DEFAULT NULL,
@@ -21,9 +22,11 @@ CREATE TABLE `t1` (
) ENGINE=InnoDB;
insert into t1 values (1,2,'maria','db','encryption');
alter table t1 encrypted='yes' `encryption_key_id`=1;
+# restart: --innodb-encrypt-tables=OFF
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
select * from t5;
ERROR 42S02: Table 'test.t5' doesn't exist in engine
+# restart: --innodb-encrypt-tables=ON --plugin-load-add=file_key_management.so --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
drop table t1;
drop table t5;
diff --git a/mysql-test/suite/encryption/r/innodb-first-page-read.result b/mysql-test/suite/encryption/r/innodb-first-page-read.result
deleted file mode 100644
index 29253885e83..00000000000
--- a/mysql-test/suite/encryption/r/innodb-first-page-read.result
+++ /dev/null
@@ -1,96 +0,0 @@
-FLUSH STATUS;
-create database innodb_test;
-use innodb_test;
-create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb;
-create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact;
-create table innodb_dynamic(c1 bigint not null, b char(200)) engine=innodb row_format=dynamic;
-create table innodb_compressed(c1 bigint not null, b char(200)) engine=innodb row_format=compressed;
-create table innodb_compressed1(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=1;
-create table innodb_compressed2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=2;
-create table innodb_compressed4(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=4;
-create table innodb_compressed8(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=8;
-create table innodb_compressed16(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=16;
-create table innodb_redundant(c1 bigint not null, b char(200)) engine=innodb row_format=redundant;
-create table innodb_pagecomp(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes;
-create table innodb_pagecomp1(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=1;
-create table innodb_pagecomp2(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=2;
-create table innodb_pagecomp3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=3;
-create table innodb_pagecomp4(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=4;
-create table innodb_pagecomp5(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=5;
-create table innodb_pagecomp6(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=6;
-create table innodb_pagecomp7(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=7;
-create table innodb_pagecomp8(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=8;
-create table innodb_pagecomp9(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=9;
-create table innodb_datadir1(c1 bigint not null, b char(200)) engine=innodb DATA DIRECTORY='MYSQL_TMP_DIR';
-create table innodb_datadir2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed DATA DIRECTORY='MYSQL_TMP_DIR';
-create table innodb_datadir3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes DATA DIRECTORY='MYSQL_TMP_DIR';
-begin;
-insert into innodb_normal values (1,'secret');
-insert into innodb_compact select * from innodb_normal;
-insert into innodb_dynamic select * from innodb_normal;
-insert into innodb_compressed select * from innodb_normal;
-insert into innodb_compressed1 select * from innodb_normal;
-insert into innodb_compressed2 select * from innodb_normal;
-insert into innodb_compressed4 select * from innodb_normal;
-insert into innodb_compressed8 select * from innodb_normal;
-insert into innodb_compressed16 select * from innodb_normal;
-insert into innodb_redundant select * from innodb_normal;
-insert into innodb_pagecomp select * from innodb_normal;
-insert into innodb_pagecomp1 select * from innodb_normal;
-insert into innodb_pagecomp2 select * from innodb_normal;
-insert into innodb_pagecomp3 select * from innodb_normal;
-insert into innodb_pagecomp4 select * from innodb_normal;
-insert into innodb_pagecomp5 select * from innodb_normal;
-insert into innodb_pagecomp6 select * from innodb_normal;
-insert into innodb_pagecomp7 select * from innodb_normal;
-insert into innodb_pagecomp8 select * from innodb_normal;
-insert into innodb_pagecomp9 select * from innodb_normal;
-insert into innodb_datadir1 select * from innodb_normal;
-insert into innodb_datadir2 select * from innodb_normal;
-insert into innodb_datadir3 select * from innodb_normal;
-commit;
-FLUSH STATUS;
-# Restart server and see how many page 0's are read
-# result should actual number of tables except remote tables could be read twice
-# i.e. < 23 + 3*2 = 29
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-use innodb_test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-use test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-set global innodb_encrypt_tables=OFF;
-# wait until tables are decrypted
-# result should be actual number of tables except remote tables could be read twice
-# i.e. < 23 + 3*2 = 29
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-use innodb_test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-use test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-FLUSH STATUS;
-# restart and see number read page 0
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-use innodb_test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-use test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-VARIABLE_VALUE <= 29
-1
-drop database innodb_test;
-FLUSH STATUS;
diff --git a/mysql-test/suite/encryption/r/innodb-force-corrupt.result b/mysql-test/suite/encryption/r/innodb-force-corrupt.result
index c1145e574b7..8c9f480554d 100644
--- a/mysql-test/suite/encryption/r/innodb-force-corrupt.result
+++ b/mysql-test/suite/encryption/r/innodb-force-corrupt.result
@@ -1,5 +1,5 @@
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
-call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=3221342974");
+call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` is corrupted");
SET GLOBAL innodb_file_per_table = ON;
set global innodb_compression_algorithm = 1;
@@ -15,6 +15,7 @@ INSERT INTO t3 select * from t1;
COMMIT;
# Backup tables before corrupting
# Corrupt tables
+# restart
SELECT * FROM t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
SELECT * FROM t2;
@@ -22,4 +23,5 @@ ERROR HY000: Got error 192 'Table encrypted but decryption failed. This could be
SELECT * FROM t3;
ERROR 42S02: Table 'test.t3' doesn't exist in engine
# Restore the original tables
+# restart
DROP TABLE t1,t2,t3;
diff --git a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
index f5a91fb352e..02304fbda17 100644
--- a/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
+++ b/mysql-test/suite/encryption/r/innodb-key-rotation-disable.result
@@ -57,4 +57,5 @@ FOUND 1 /public/ in t7.ibd
FOUND 1 /public/ in t8.ibd
# t9 page compressed expecting NOT FOUND
NOT FOUND /public/ in t9.ibd
+# restart
drop database enctests;
diff --git a/mysql-test/suite/encryption/r/innodb-missing-key.result b/mysql-test/suite/encryption/r/innodb-missing-key.result
index 3a501335e2d..ecb4ad2c40e 100644
--- a/mysql-test/suite/encryption/r/innodb-missing-key.result
+++ b/mysql-test/suite/encryption/r/innodb-missing-key.result
@@ -4,6 +4,7 @@ call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file .*test.t[12].ibd looks corrupted; key_version=1");
call mtr.add_suppression("InnoDB: Table `test`\\.`t1` is corrupted");
# Start server with keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
CREATE TABLE t1(a int not null primary key auto_increment, b varchar(128)) engine=innodb ENCRYPTED=YES ENCRYPTION_KEY_ID=19;
CREATE TABLE t2(a int not null primary key auto_increment, b varchar(128)) engine=innodb ENCRYPTED=YES ENCRYPTION_KEY_ID=1;
CREATE TABLE t3(a int not null primary key auto_increment, b varchar(128)) engine=innodb ENCRYPTED=NO;
@@ -23,6 +24,7 @@ INSERT INTO t2 SELECT * FROM t1;
INSERT INTO t3 SELECT * FROM t1;
# Restart server with keys3.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
set global innodb_encryption_rotate_key_age = 1;
use test;
CREATE TABLE t4(a int not null primary key auto_increment, b varchar(128)) engine=innodb ENCRYPTED=YES ENCRYPTION_KEY_ID=1;
@@ -43,6 +45,7 @@ SELECT COUNT(1) FROM t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
# Start server with keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SELECT COUNT(1) FROM t1;
COUNT(1)
2048
diff --git a/mysql-test/suite/encryption/r/innodb-page_encryption.result b/mysql-test/suite/encryption/r/innodb-page_encryption.result
index 1069b8652da..2997f90ab97 100644
--- a/mysql-test/suite/encryption/r/innodb-page_encryption.result
+++ b/mysql-test/suite/encryption/r/innodb-page_encryption.result
@@ -118,6 +118,7 @@ variable_value >= 0
SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_decrypted';
variable_value >= 0
1
+# restart
update innodb_normal set c1 = c1 +1;
update innodb_compact set c1 = c1 + 1;
update innodb_compressed set c1 = c1 + 1;
@@ -193,6 +194,7 @@ innodb_redundant CREATE TABLE `innodb_redundant` (
`c1` bigint(20) NOT NULL,
`b` char(200) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT
+# restart
show create table innodb_compact;
Table Create Table
innodb_compact CREATE TABLE `innodb_compact` (
diff --git a/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result b/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result
index 808345ab4ca..86d0fecd2e8 100644
--- a/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result
+++ b/mysql-test/suite/encryption/r/innodb-page_encryption_compression.result
@@ -41,6 +41,7 @@ call innodb_insert_proc(2000);
insert into innodb_compact select * from innodb_normal;
insert into innodb_dynamic select * from innodb_normal;
commit;
+# restart: --innodb-encrypt-tables=OFF
set global innodb_compression_algorithm = 1;
alter table innodb_normal engine=innodb page_compressed=DEFAULT;
show create table innodb_normal;
diff --git a/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result b/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result
index 189be75b83f..4be73459cfd 100644
--- a/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result
+++ b/mysql-test/suite/encryption/r/innodb-page_encryption_log_encryption.result
@@ -60,6 +60,7 @@ variable_value > 0
SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_decrypted';
variable_value >= 0
1
+# restart
update innodb_normal set c1 = c1 +1;
update innodb_compact set c1 = c1 + 1;
update innodb_compressed set c1 = c1 + 1;
@@ -115,6 +116,7 @@ drop table innodb_redundant;
CREATE TABLE t1 (pk INT PRIMARY KEY) ENGINE=InnoDB ENCRYPTION_KEY_ID=2 ENCRYPTED=YES;
INSERT INTO t1 VALUES (1),(2);
# Restarting server...
+# restart
SELECT * FROM t1;
pk
1
diff --git a/mysql-test/suite/encryption/r/innodb-read-only.result b/mysql-test/suite/encryption/r/innodb-read-only.result
index 5d063e129e2..35ba28dbe1e 100644
--- a/mysql-test/suite/encryption/r/innodb-read-only.result
+++ b/mysql-test/suite/encryption/r/innodb-read-only.result
@@ -1,3 +1,4 @@
# Wait max 10 min for key encryption threads to encrypt all spaces
# Success!
+# restart: --innodb-read-only=1 --innodb-encrypt-tables=1
# All done
diff --git a/mysql-test/suite/encryption/r/innodb-redo-nokeys.result b/mysql-test/suite/encryption/r/innodb-redo-nokeys.result
index cc9d385bbbd..0a6beb8da38 100644
--- a/mysql-test/suite/encryption/r/innodb-redo-nokeys.result
+++ b/mysql-test/suite/encryption/r/innodb-redo-nokeys.result
@@ -5,7 +5,7 @@ call mtr.add_suppression("Plugin 'InnoDB' init function returned error\\.");
call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed.");
call mtr.add_suppression("InnoDB: The page \\[page id: space=[0-9]+, page number=[0-9]+\\] in file '.*test.t[1-4]\\.ibd' cannot be decrypted\\.");
call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\]");
-# Restart mysqld --file-key-management-filename=keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
SET GLOBAL innodb_file_per_table = ON;
create table t1(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb row_format=compressed encrypted=yes encryption_key_id=20;
create table t2(a int not null primary key auto_increment, c char(200), b blob, index(b(10))) engine=innodb row_format=compressed;
@@ -28,6 +28,6 @@ insert into t3 (c,b) values (repeat('secret9',20), repeat('secre10',6000));
insert into t4 (c,b) values (repeat('secre11',20), repeat('secre12',6000));
COMMIT;
# Kill the server
-# restart
-# Restart mysqld --file-key-management-filename=keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
+# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
drop table t1, t2,t3,t4;
diff --git a/mysql-test/suite/encryption/r/innodb-remove-encryption.result b/mysql-test/suite/encryption/r/innodb-remove-encryption.result
index 3b0ce29218b..08b31cb568d 100644
--- a/mysql-test/suite/encryption/r/innodb-remove-encryption.result
+++ b/mysql-test/suite/encryption/r/innodb-remove-encryption.result
@@ -6,6 +6,7 @@ flush tables;
create table t1(a int not null primary key, b char(200)) engine=innodb;
# Restart server with encryption
+# restart: --plugin-load-add=file_key_management.so --loose-file-key-management --loose-file-key-management-filename=MYSQL_TEST_DIR/std_data/keys.txt --file-key-management-encryption-algorithm=aes_cbc --innodb-encrypt-tables=ON --innodb-encryption-threads=4 --innodb-tablespaces-encryption --innodb-encryption-rotate-key-age=15
# Wait until encryption threads have encrypted all tablespaces
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
NAME
@@ -31,6 +32,7 @@ NAME
# Success!
# Restart server with no encryption setup, there should be no crashes
+# restart: --skip-file-key-management --innodb-encrypt-tables=OFF --innodb-encryption-threads=0 --innodb-tablespaces-encryption
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0;
NAME
innodb_system
diff --git a/mysql-test/suite/encryption/r/innodb-spatial-index,full_crc32.rdiff b/mysql-test/suite/encryption/r/innodb-spatial-index,full_crc32.rdiff
new file mode 100644
index 00000000000..2f58318b884
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb-spatial-index,full_crc32.rdiff
@@ -0,0 +1,36 @@
+--- innodb-spatial-index.result
++++ innodb-spatial-index.result
+@@ -1,23 +1,27 @@
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB
+ ENCRYPTED=YES;
+-Got one of the listed errors
++INSERT INTO t1(c, coordinate) values('mysql', ST_GeomFromText('POINT(903994614 180726515)'));
++# restart
++INSERT INTO t1(c, coordinate) values('mariadb', ST_GeomFromText('POINT(903994614 180726515)'));
++DROP TABLE t1;
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB;
+ ALTER TABLE t1 ENCRYPTED=YES;
+-Got one of the listed errors
+ DROP TABLE t1;
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL)
+ PAGE_COMPRESSED=YES, ENCRYPTED=YES ENGINE=INNODB;
+ ALTER TABLE t1 ADD SPATIAL INDEX b1(coordinate), ALGORITHM=COPY;
+-Got one of the listed errors
+ ALTER TABLE t1 ADD SPATIAL INDEX b2(coordinate), FORCE, ALGORITHM=INPLACE;
+-Got one of the listed errors
++Warnings:
++Note 1831 Duplicate index `b2`. This is deprecated and will be disallowed in a future release
+ ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
+-Got one of the listed errors
++Warnings:
++Note 1831 Duplicate index `coordinate`. This is deprecated and will be disallowed in a future release
+ CREATE SPATIAL INDEX b3 on t1(coordinate);
+-Got one of the listed errors
++Warnings:
++Note 1831 Duplicate index `b3`. This is deprecated and will be disallowed in a future release
+ DROP TABLE t1;
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=DEFAULT ENGINE=INNODB;
diff --git a/mysql-test/suite/encryption/r/innodb-spatial-index,strict_full_crc32.rdiff b/mysql-test/suite/encryption/r/innodb-spatial-index,strict_full_crc32.rdiff
new file mode 100644
index 00000000000..2f58318b884
--- /dev/null
+++ b/mysql-test/suite/encryption/r/innodb-spatial-index,strict_full_crc32.rdiff
@@ -0,0 +1,36 @@
+--- innodb-spatial-index.result
++++ innodb-spatial-index.result
+@@ -1,23 +1,27 @@
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB
+ ENCRYPTED=YES;
+-Got one of the listed errors
++INSERT INTO t1(c, coordinate) values('mysql', ST_GeomFromText('POINT(903994614 180726515)'));
++# restart
++INSERT INTO t1(c, coordinate) values('mariadb', ST_GeomFromText('POINT(903994614 180726515)'));
++DROP TABLE t1;
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB;
+ ALTER TABLE t1 ENCRYPTED=YES;
+-Got one of the listed errors
+ DROP TABLE t1;
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL)
+ PAGE_COMPRESSED=YES, ENCRYPTED=YES ENGINE=INNODB;
+ ALTER TABLE t1 ADD SPATIAL INDEX b1(coordinate), ALGORITHM=COPY;
+-Got one of the listed errors
+ ALTER TABLE t1 ADD SPATIAL INDEX b2(coordinate), FORCE, ALGORITHM=INPLACE;
+-Got one of the listed errors
++Warnings:
++Note 1831 Duplicate index `b2`. This is deprecated and will be disallowed in a future release
+ ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
+-Got one of the listed errors
++Warnings:
++Note 1831 Duplicate index `coordinate`. This is deprecated and will be disallowed in a future release
+ CREATE SPATIAL INDEX b3 on t1(coordinate);
+-Got one of the listed errors
++Warnings:
++Note 1831 Duplicate index `b3`. This is deprecated and will be disallowed in a future release
+ DROP TABLE t1;
+ CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
+ c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=DEFAULT ENGINE=INNODB;
diff --git a/mysql-test/suite/encryption/r/innodb-spatial-index.result b/mysql-test/suite/encryption/r/innodb-spatial-index.result
index 7637d61b400..66c3edcd109 100644
--- a/mysql-test/suite/encryption/r/innodb-spatial-index.result
+++ b/mysql-test/suite/encryption/r/innodb-spatial-index.result
@@ -1,22 +1,23 @@
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB
ENCRYPTED=YES;
-ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
+Got one of the listed errors
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB;
ALTER TABLE t1 ENCRYPTED=YES;
-ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
+Got one of the listed errors
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
-c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=YES ENGINE=INNODB;
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), ALGORITHM=COPY;
-ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), FORCE, ALGORITHM=INPLACE;
-ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
+c VARCHAR(256), coordinate POINT NOT NULL)
+PAGE_COMPRESSED=YES, ENCRYPTED=YES ENGINE=INNODB;
+ALTER TABLE t1 ADD SPATIAL INDEX b1(coordinate), ALGORITHM=COPY;
+Got one of the listed errors
+ALTER TABLE t1 ADD SPATIAL INDEX b2(coordinate), FORCE, ALGORITHM=INPLACE;
+Got one of the listed errors
ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
-ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
-CREATE SPATIAL INDEX b on t1(coordinate);
-ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ENCRYPTED'
+Got one of the listed errors
+CREATE SPATIAL INDEX b3 on t1(coordinate);
+Got one of the listed errors
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=DEFAULT ENGINE=INNODB;
diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_log.result b/mysql-test/suite/encryption/r/innodb_encrypt_log.result
index 0663890c685..d0c17ed09ae 100644
--- a/mysql-test/suite/encryption/r/innodb_encrypt_log.result
+++ b/mysql-test/suite/encryption/r/innodb_encrypt_log.result
@@ -38,6 +38,7 @@ NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)/ in t0.ibd
# ib_logfile0 expecting NOT FOUND
NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)/ in ib_logfile0
# Restart without redo log encryption
+# restart: --skip-innodb-encrypt-log --innodb-log-files-in-group=1
SELECT COUNT(*) FROM t0;
COUNT(*)
1024
@@ -55,6 +56,7 @@ FOUND 1 /(public|gossip).*/ in ib_logfile0
NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)|public|gossip/ in ibdata1
# t0.ibd expecting NOT FOUND
NOT FOUND /private|secret|sacr(ed|ament)|success|story|secur(e|ity)|public|gossip/ in t0.ibd
+# restart
SELECT COUNT(*) FROM t0;
COUNT(*)
1025
diff --git a/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result b/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result
index 59b28969559..c64a11dcdd9 100644
--- a/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result
+++ b/mysql-test/suite/encryption/r/innodb_encrypt_log_corruption.result
@@ -1,10 +1,12 @@
# redo log from before MariaDB 10.2.2/MySQL 5.7.9
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err
# redo log from before MariaDB 10.2.2, with corrupted log checkpoint
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -12,12 +14,14 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err
FOUND 2 /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err
# redo log from before MariaDB 10.2.2, with corrupted log block
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err
# empty redo log from before MariaDB 10.2.2
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5 --innodb-log-file-size=1m
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -25,24 +29,28 @@ COUNT(*)
1
FOUND 1 /InnoDB: Upgrading redo log:/ in mysqld.1.err
# redo log from "after" MariaDB 10.2.2, but with invalid header checksum
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Invalid redo log header checksum/ in mysqld.1.err
# distant future redo log format, with valid header checksum
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\./ in mysqld.1.err
# valid header, but old-format checkpoint blocks
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err
# valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block checksum
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -50,6 +58,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err
FOUND 1 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err
# same, but with current-version header
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -57,6 +66,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 2 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err
FOUND 2 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err
# --innodb-force-recovery=6 (skip the entire redo log)
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=6
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -64,11 +74,13 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES
FOUND 1 /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err
# valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block number
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
# --innodb-force-recovery=6 (skip the entire redo log)
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=6
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -76,6 +88,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES
# Test a corrupted MLOG_FILE_NAME record.
# valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -88,12 +101,14 @@ FOUND 1 /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8
FOUND 1 /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err
# Test a corrupted MLOG_FILE_NAME record.
# valid header, invalid checkpoint 1, valid checkpoint 2, invalid block
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err
# valid header, invalid checkpoint 1, valid checkpoint 2, invalid log record
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -101,18 +116,21 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err
FOUND 1 /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err
# 10.2 missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT for tablespace 42/ in mysqld.1.err
# 10.3 missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 2 /InnoDB: Missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT for tablespace 42/ in mysqld.1.err
# Empty 10.3 redo log
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5 --innodb-log-file-size=1m
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -120,6 +138,7 @@ COUNT(*)
1
FOUND 1 /InnoDB: .* started; log sequence number 121397[09]/ in mysqld.1.err
# Empty 10.2 redo log
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5 --innodb-log-file-size=1m
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -127,11 +146,13 @@ COUNT(*)
1
FOUND 3 /InnoDB: Upgrading redo log:/ in mysqld.1.err
# Minimal MariaDB 10.1.21 encrypted redo log
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
1
1
FOUND 1 /InnoDB: Encrypting redo log/ in mysqld.1.err
+# restart
ib_buffer_pool
ib_logfile0
ib_logfile1
diff --git a/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result b/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result
index 34eb9b0c6dd..8324bdb34f6 100644
--- a/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result
+++ b/mysql-test/suite/encryption/r/innodb_encryption-page-compression.result
@@ -146,6 +146,7 @@ variable_value > 0
SELECT variable_value > 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_page_compressed';
variable_value > 0
1
+# restart
SET GLOBAL innodb_encryption_threads = 4;
SET GLOBAL innodb_encrypt_tables = off;
update innodb_page_compressed1 set c1 = c1 + 1;
diff --git a/mysql-test/suite/encryption/r/innodb_encryption.result b/mysql-test/suite/encryption/r/innodb_encryption.result
index 559430e0210..ab31eed5cf2 100644
--- a/mysql-test/suite/encryption/r/innodb_encryption.result
+++ b/mysql-test/suite/encryption/r/innodb_encryption.result
@@ -53,6 +53,7 @@ NAME
innodb_system
# Success!
# Restart mysqld --innodb_encrypt_tables=0 --innodb_encryption_threads=0
+# restart: --innodb_encrypt_tables=0 --innodb_encryption_threads=0
SHOW VARIABLES LIKE 'innodb_encrypt%';
Variable_name Value
innodb_encrypt_log ON
diff --git a/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result b/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result
index 6b38be38b6d..752994d635c 100644
--- a/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result
+++ b/mysql-test/suite/encryption/r/innodb_encryption_discard_import.result
@@ -25,6 +25,7 @@ NOT FOUND /foobar/ in t1.ibd
NOT FOUND /temp/ in t2.ibd
# t3 ... on expecting NOT FOUND
NOT FOUND /barfoo/ in t3.ibd
+# restart
db.opt
t1.frm
t1.ibd
@@ -72,6 +73,7 @@ NOT FOUND /foobar/ in t1.ibd
NOT FOUND /temp/ in t2.ibd
# t3 ... on expecting NOT FOUND
NOT FOUND /barfoo/ in t3.ibd
+# restart
ALTER TABLE t1 ENGINE InnoDB;
SHOW CREATE TABLE t1;
Table Create Table
@@ -97,6 +99,7 @@ t3 CREATE TABLE `t3` (
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED `encrypted`=yes
# Restarting server
+# restart
# Done restarting server
# Verify that tables are still usable
SELECT COUNT(1) FROM t1;
@@ -115,8 +118,10 @@ NOT FOUND /foobar/ in t1.ibd
NOT FOUND /temp/ in t2.ibd
# t3 ... on expecting NOT FOUND
NOT FOUND /barfoo/ in t3.ibd
+# restart
# Wait max 10 min for key encryption threads to encrypt all spaces
# Success!
# Restart mysqld --innodb_encrypt_tables=0 --innodb_encryption_threads=0
+# restart: --innodb_encrypt_tables=0 --innodb_encryption_threads=0
DROP PROCEDURE innodb_insert_proc;
DROP TABLE t1, t2, t3;
diff --git a/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result b/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result
index 3d616ee71e3..46c170dd95e 100644
--- a/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result
+++ b/mysql-test/suite/encryption/r/innodb_encryption_row_compressed.result
@@ -15,6 +15,7 @@ insert into innodb_compressed1 values (10, 20, 'private', 'evenmoreprivate');
insert into innodb_compressed2 select * from innodb_compressed1;
insert into innodb_compressed3 select * from innodb_compressed1;
insert into innodb_compressed4 select * from innodb_compressed1;
+# restart
# t1 yes on expecting NOT FOUND
NOT FOUND /private/ in innodb_compressed1.ibd
# t2 yes on expecting NOT FOUND
@@ -95,6 +96,7 @@ NOT FOUND /private/ in innodb_compressed2.ibd
NOT FOUND /private/ in innodb_compressed3.ibd
# t4 yes on expecting NOT FOUND
NOT FOUND /private/ in innodb_compressed4.ibd
+# restart
select * from innodb_compressed1 where d = 40;
c1 d a b
3 40 private evenmoreprivate
diff --git a/mysql-test/suite/encryption/r/innodb_encryption_tables.result b/mysql-test/suite/encryption/r/innodb_encryption_tables.result
index e7bcc207612..1445e78c72f 100644
--- a/mysql-test/suite/encryption/r/innodb_encryption_tables.result
+++ b/mysql-test/suite/encryption/r/innodb_encryption_tables.result
@@ -101,6 +101,7 @@ variable_value >= 0
SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_page_decompressed';
variable_value >= 0
1
+# restart
update innodb_normal set c1 = c1 + 1;
update innodb_compact set c1 = c1 + 1;
update innodb_dynamic set c1 = c1 + 1;
diff --git a/mysql-test/suite/encryption/r/innodb_first_page.result b/mysql-test/suite/encryption/r/innodb_first_page.result
index aaef462e639..cf576947d9d 100644
--- a/mysql-test/suite/encryption/r/innodb_first_page.result
+++ b/mysql-test/suite/encryption/r/innodb_first_page.result
@@ -1,2 +1,4 @@
+# restart
create table t1 (a int);
+# restart
drop table t1;
diff --git a/mysql-test/suite/encryption/r/innodb_lotoftables.result b/mysql-test/suite/encryption/r/innodb_lotoftables.result
index 45800c8cd0b..e651ee01487 100644
--- a/mysql-test/suite/encryption/r/innodb_lotoftables.result
+++ b/mysql-test/suite/encryption/r/innodb_lotoftables.result
@@ -1,4 +1,5 @@
SET GLOBAL innodb_fast_shutdown=0;
+# restart
SHOW VARIABLES LIKE 'innodb_encrypt%';
Variable_name Value
innodb_encrypt_log OFF
@@ -8,29 +9,17 @@ innodb_encryption_rotation_iops 100
innodb_encryption_threads 0
create database innodb_encrypted_1;
use innodb_encrypted_1;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
set autocommit=0;
set autocommit=1;
commit work;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
# should be empty
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE NAME LIKE 'innodb_encrypted%';
NAME
create database innodb_encrypted_2;
use innodb_encrypted_2;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
set autocommit=0;
commit work;
set autocommit=1;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
# should contain 100 tables
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
NAME
@@ -139,15 +128,9 @@ SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_
NAME
create database innodb_encrypted_3;
use innodb_encrypted_3;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
set autocommit=0;
commit work;
set autocommit=1;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
# should contain 100 tables
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
NAME
@@ -355,9 +338,6 @@ innodb_encrypted_3/t_97
innodb_encrypted_3/t_98
innodb_encrypted_3/t_99
use test;
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
NAME
innodb_encrypted_2/t_1
@@ -768,43 +748,13 @@ innodb_encrypted_3/t_96
innodb_encrypted_3/t_97
innodb_encrypted_3/t_98
innodb_encrypted_3/t_99
-show status like 'innodb_pages0_read%';
-Variable_name Value
-Innodb_pages0_read 4
# Success!
# Restart mysqld --innodb_encrypt_tables=0 --innodb_encryption_threads=0
+# restart: --innodb_encrypt_tables=0 --innodb_encryption_threads=0
# Restart Success!
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
-use test;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
-use innodb_encrypted_1;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
-use innodb_encrypted_2;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
-use innodb_encrypted_3;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
use innodb_encrypted_1;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
use innodb_encrypted_2;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
use innodb_encrypted_3;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-variable_value <= 303
-1
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
NAME
innodb_encrypted_3/t_1
diff --git a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result
index 779eb5917cd..de0ddf5efe3 100644
--- a/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result
+++ b/mysql-test/suite/encryption/r/innodb_onlinealter_encryption.result
@@ -40,6 +40,7 @@ NOT FOUND /author/ in t5.ibd
NOT FOUND /mangled/ in t6.ibd
# t7 ... on expecting NOT FOUND
NOT FOUND /mysql/ in t7.ibd
+# restart
SET GLOBAL innodb_file_per_table = ON;
ALTER TABLE t1 ADD COLUMN b int default 2;
ALTER TABLE t2 ADD COLUMN b int default 2;
@@ -134,5 +135,6 @@ NOT FOUND /author/ in t5.ibd
NOT FOUND /mangled/ in t6.ibd
# t7 ... on expecting NOT FOUND
NOT FOUND /mysql/ in t7.ibd
+# restart
DROP PROCEDURE innodb_insert_proc;
DROP TABLE t1, t2, t3, t4, t5, t6, t7;
diff --git a/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result b/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result
index d3406e67f18..2b661d531f4 100644
--- a/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result
+++ b/mysql-test/suite/encryption/r/innodb_page_encryption_key_change.result
@@ -1,4 +1,4 @@
-# Restart mysqld --loose-file-key-management-filename=keys2.txt
+# restart: --loose-file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb;
show warnings;
Level Code Message
@@ -65,7 +65,7 @@ variable_value >= 0
SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_decrypted';
variable_value >= 0
1
-# Restart mysqld --loose-file-key-management-filename=keys3.txt
+# restart: --loose-file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
select * from innodb_normal;
c1 b
1 test1
diff --git a/mysql-test/suite/encryption/t/corrupted_during_recovery.test b/mysql-test/suite/encryption/t/corrupted_during_recovery.test
index 5784d5775c6..44cd03e9f8a 100644
--- a/mysql-test/suite/encryption/t/corrupted_during_recovery.test
+++ b/mysql-test/suite/encryption/t/corrupted_during_recovery.test
@@ -15,6 +15,7 @@ let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
CREATE TABLE t1(a BIGINT PRIMARY KEY) ENGINE=InnoDB, ENCRYPTED=YES;
INSERT INTO t1 VALUES(1);
# Force a redo log checkpoint.
+let $restart_noprint=2;
--source include/restart_mysqld.inc
--source ../../suite/innodb/include/no_checkpoint_start.inc
CREATE TABLE t2(a BIGINT PRIMARY KEY) ENGINE=InnoDB, ENCRYPTED=YES;
diff --git a/mysql-test/suite/encryption/t/debug_key_management.test b/mysql-test/suite/encryption/t/debug_key_management.test
index 22b213c6135..c370ecf5bd8 100644
--- a/mysql-test/suite/encryption/t/debug_key_management.test
+++ b/mysql-test/suite/encryption/t/debug_key_management.test
@@ -1,10 +1,15 @@
-- source include/have_innodb.inc
+-- source include/have_debug.inc
+-- source include/not_embedded.inc
+
if (`select count(*) = 0 from information_schema.plugins
where plugin_name = 'debug_key_management' and plugin_status='active'`)
{
--skip Needs debug_key_management
}
+create table t1(a serial) engine=innoDB;
+
set global innodb_encrypt_tables=ON;
show variables like 'innodb_encrypt%';
@@ -17,10 +22,21 @@ set global debug_key_management_version=10;
let $wait_condition= select count(*) = $tables_count from information_schema.innodb_tablespaces_encryption where current_key_version=10;
--source include/wait_condition.inc
-
select count(*) from information_schema.innodb_tablespaces_encryption where current_key_version <> 10;
+
+# Test redo log key rotation and crash recovery.
+SET GLOBAL debug_dbug = '+d,ib_log';
+SET GLOBAL innodb_log_checkpoint_now = 1;
+SET GLOBAL innodb_flush_log_at_trx_commit = 1;
+INSERT INTO t1 VALUES(NULL);
+let $shutdown_timeout = 0;
+-- source include/restart_mysqld.inc
+
# Note that we expect that key_version is increasing so disable encryption before reset
set global innodb_encrypt_tables=OFF;
set global debug_key_management_version=1;
+select * from t1;
+
+drop table t1;
diff --git a/mysql-test/suite/encryption/t/innochecksum.test b/mysql-test/suite/encryption/t/innochecksum.test
index f57b8d265bb..0cc48e53fc9 100644
--- a/mysql-test/suite/encryption/t/innochecksum.test
+++ b/mysql-test/suite/encryption/t/innochecksum.test
@@ -8,16 +8,19 @@
-- source include/have_innodb.inc
-- source include/have_file_key_management_plugin.inc
-- source include/innodb_page_size_small.inc
+-- source include/innodb_checksum_algorithm.inc
if (!$INNOCHECKSUM) {
--echo Need innochecksum binary
--die Need innochecksum binary
}
+let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`;
SET GLOBAL innodb_file_per_table = ON;
# zlib
set global innodb_compression_algorithm = 1;
+
--echo # Create and populate a tables
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
CREATE TABLE t2 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT) ENGINE=InnoDB ROW_FORMAT=COMPRESSED ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
@@ -136,7 +139,20 @@ EOF
--exec $INNOCHECKSUM $t3_IBD
--echo # Run innochecksum on t6
---echo # no encryption corrupting the field should not have effect
+--echo # In new checksum format, checksum calculated for whole page.
+--echo # So It should affected.
+let $error_code = 0;
+if ($checksum_algorithm == "full_crc32")
+{
+ let $error_code = 1;
+}
+
+if ($checksum_algorithm == "strict_full_crc32")
+{
+ let $error_code = 1;
+}
+
+--error $error_code
--exec $INNOCHECKSUM $t6_IBD
--enable_result_log
@@ -193,7 +209,9 @@ EOF
--exec $INNOCHECKSUM $t3_IBD
--echo # Run innochecksum on t6
---echo # no encryption corrupting the field should not have effect
+--echo # In new checksum format, checksum calculated for whole page.
+--echo # So It should affected.
+--error $error_code
--exec $INNOCHECKSUM $t6_IBD
--enable_result_log
diff --git a/mysql-test/suite/encryption/t/innodb-first-page-read.test b/mysql-test/suite/encryption/t/innodb-first-page-read.test
deleted file mode 100644
index c86e16c52b8..00000000000
--- a/mysql-test/suite/encryption/t/innodb-first-page-read.test
+++ /dev/null
@@ -1,104 +0,0 @@
--- source include/have_innodb.inc
--- source include/have_file_key_management_plugin.inc
--- source include/not_embedded.inc
-
-FLUSH STATUS;
-
-create database innodb_test;
-use innodb_test;
-create table innodb_normal(c1 bigint not null, b char(200)) engine=innodb;
-create table innodb_compact(c1 bigint not null, b char(200)) engine=innodb row_format=compact;
-create table innodb_dynamic(c1 bigint not null, b char(200)) engine=innodb row_format=dynamic;
-create table innodb_compressed(c1 bigint not null, b char(200)) engine=innodb row_format=compressed;
-create table innodb_compressed1(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=1;
-create table innodb_compressed2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=2;
-create table innodb_compressed4(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=4;
-create table innodb_compressed8(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=8;
-create table innodb_compressed16(c1 bigint not null, b char(200)) engine=innodb row_format=compressed key_block_size=16;
-create table innodb_redundant(c1 bigint not null, b char(200)) engine=innodb row_format=redundant;
-create table innodb_pagecomp(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes;
-create table innodb_pagecomp1(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=1;
-create table innodb_pagecomp2(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=2;
-create table innodb_pagecomp3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=3;
-create table innodb_pagecomp4(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=4;
-create table innodb_pagecomp5(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=5;
-create table innodb_pagecomp6(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=6;
-create table innodb_pagecomp7(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=7;
-create table innodb_pagecomp8(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=8;
-create table innodb_pagecomp9(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes page_compression_level=9;
-
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-eval create table innodb_datadir1(c1 bigint not null, b char(200)) engine=innodb DATA DIRECTORY='$MYSQL_TMP_DIR';
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-eval create table innodb_datadir2(c1 bigint not null, b char(200)) engine=innodb row_format=compressed DATA DIRECTORY='$MYSQL_TMP_DIR';
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-eval create table innodb_datadir3(c1 bigint not null, b char(200)) engine=innodb page_compressed=yes DATA DIRECTORY='$MYSQL_TMP_DIR';
-
-begin;
-insert into innodb_normal values (1,'secret');
-insert into innodb_compact select * from innodb_normal;
-insert into innodb_dynamic select * from innodb_normal;
-insert into innodb_compressed select * from innodb_normal;
-insert into innodb_compressed1 select * from innodb_normal;
-insert into innodb_compressed2 select * from innodb_normal;
-insert into innodb_compressed4 select * from innodb_normal;
-insert into innodb_compressed8 select * from innodb_normal;
-insert into innodb_compressed16 select * from innodb_normal;
-insert into innodb_redundant select * from innodb_normal;
-insert into innodb_pagecomp select * from innodb_normal;
-insert into innodb_pagecomp1 select * from innodb_normal;
-insert into innodb_pagecomp2 select * from innodb_normal;
-insert into innodb_pagecomp3 select * from innodb_normal;
-insert into innodb_pagecomp4 select * from innodb_normal;
-insert into innodb_pagecomp5 select * from innodb_normal;
-insert into innodb_pagecomp6 select * from innodb_normal;
-insert into innodb_pagecomp7 select * from innodb_normal;
-insert into innodb_pagecomp8 select * from innodb_normal;
-insert into innodb_pagecomp9 select * from innodb_normal;
-insert into innodb_datadir1 select * from innodb_normal;
-insert into innodb_datadir2 select * from innodb_normal;
-insert into innodb_datadir3 select * from innodb_normal;
-commit;
-
-FLUSH STATUS;
-
---echo # Restart server and see how many page 0's are read
---source include/restart_mysqld.inc
-
---echo # result should actual number of tables except remote tables could be read twice
---echo # i.e. < 23 + 3*2 = 29
-
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-use innodb_test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-use test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-
-set global innodb_encrypt_tables=OFF;
-
---echo # wait until tables are decrypted
---let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0
---source include/wait_condition.inc
-
---echo # result should be actual number of tables except remote tables could be read twice
---echo # i.e. < 23 + 3*2 = 29
-
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-use innodb_test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-use test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-
-FLUSH STATUS;
-
---echo # restart and see number read page 0
--- source include/restart_mysqld.inc
-
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-use innodb_test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-use test;
-SELECT VARIABLE_VALUE <= 29 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'innodb_pages0_read';
-
-drop database innodb_test;
-FLUSH STATUS;
diff --git a/mysql-test/suite/encryption/t/innodb-force-corrupt.test b/mysql-test/suite/encryption/t/innodb-force-corrupt.test
index ae7e5c81aa1..e8048150be2 100644
--- a/mysql-test/suite/encryption/t/innodb-force-corrupt.test
+++ b/mysql-test/suite/encryption/t/innodb-force-corrupt.test
@@ -8,7 +8,7 @@
-- source include/not_embedded.inc
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
-call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=3221342974");
+call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` is corrupted");
SET GLOBAL innodb_file_per_table = ON;
diff --git a/mysql-test/suite/encryption/t/innodb-redo-nokeys.test b/mysql-test/suite/encryption/t/innodb-redo-nokeys.test
index 66720eb8585..6fa28cd8ca8 100644
--- a/mysql-test/suite/encryption/t/innodb-redo-nokeys.test
+++ b/mysql-test/suite/encryption/t/innodb-redo-nokeys.test
@@ -11,7 +11,6 @@ call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE faile
call mtr.add_suppression("InnoDB: The page \\[page id: space=[0-9]+, page number=[0-9]+\\] in file '.*test.t[1-4]\\.ibd' cannot be decrypted\\.");
call mtr.add_suppression("failed to read or decrypt \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\]");
---echo # Restart mysqld --file-key-management-filename=keys2.txt
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
-- source include/restart_mysqld.inc
@@ -61,13 +60,11 @@ let $cleanup= drop table t1,t2,t3,t4;
--let CLEANUP_IF_CHECKPOINT= $cleanup;
--source ../../suite/innodb/include/no_checkpoint_end.inc
---echo # restart
-- source include/start_mysqld.inc
#
# In above server does start but InnoDB refuses to start
# thus we need to restart server with correct key file
#
---echo # Restart mysqld --file-key-management-filename=keys2.txt
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
-- source include/restart_mysqld.inc
diff --git a/mysql-test/suite/encryption/t/innodb-spatial-index.test b/mysql-test/suite/encryption/t/innodb-spatial-index.test
index 2bf56817740..2caffb141e1 100644
--- a/mysql-test/suite/encryption/t/innodb-spatial-index.test
+++ b/mysql-test/suite/encryption/t/innodb-spatial-index.test
@@ -1,25 +1,43 @@
--source include/have_innodb.inc
--source include/have_file_key_management_plugin.inc
+--source include/innodb_checksum_algorithm.inc
#
# MDEV-11974: MariaDB 10.2 encryption does not support spatial indexes
#
#
-# (1) Do not allow creating table with ENCRYPTED=YES
#
#
---error ER_CANT_CREATE_TABLE
+let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`;
+let $error_code = ER_CANT_CREATE_TABLE, ER_ILLEGAL_HA_CREATE_OPTION;
+if ($checksum_algorithm == "full_crc32")
+{
+ let $error_code = 0;
+}
+if ($checksum_algorithm == "strict_full_crc32")
+{
+ let $error_code = 0;
+}
+
+--error $error_code
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB
ENCRYPTED=YES;
+if (!$error_code) {
+INSERT INTO t1(c, coordinate) values('mysql', ST_GeomFromText('POINT(903994614 180726515)'));
+--source include/restart_mysqld.inc
+INSERT INTO t1(c, coordinate) values('mariadb', ST_GeomFromText('POINT(903994614 180726515)'));
+DROP TABLE t1;
+}
+
#
# (2) Alter table
#
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
c VARCHAR(256), coordinate POINT NOT NULL, SPATIAL index(coordinate)) ENGINE=INNODB;
---error ER_ILLEGAL_HA_CREATE_OPTION
+--error $error_code
ALTER TABLE t1 ENCRYPTED=YES;
DROP TABLE t1;
@@ -27,17 +45,18 @@ DROP TABLE t1;
# Index creation
#
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
-c VARCHAR(256), coordinate POINT NOT NULL) ENCRYPTED=YES ENGINE=INNODB;
+c VARCHAR(256), coordinate POINT NOT NULL)
+PAGE_COMPRESSED=YES, ENCRYPTED=YES ENGINE=INNODB;
# FIXME: MDEV-13851 Encrypted table refuses some form of ALGORITHM=COPY,
# but allows rebuild by FORCE
---error ER_CANT_CREATE_TABLE
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), ALGORITHM=COPY;
---error ER_ILLEGAL_HA_CREATE_OPTION
-ALTER TABLE t1 ADD SPATIAL INDEX b(coordinate), FORCE, ALGORITHM=INPLACE;
---error ER_ILLEGAL_HA_CREATE_OPTION
+--error $error_code
+ALTER TABLE t1 ADD SPATIAL INDEX b1(coordinate), ALGORITHM=COPY;
+--error $error_code
+ALTER TABLE t1 ADD SPATIAL INDEX b2(coordinate), FORCE, ALGORITHM=INPLACE;
+--error $error_code
ALTER TABLE t1 ADD SPATIAL INDEX(coordinate);
---error ER_ILLEGAL_HA_CREATE_OPTION
-CREATE SPATIAL INDEX b on t1(coordinate);
+--error $error_code
+CREATE SPATIAL INDEX b3 on t1(coordinate);
DROP TABLE t1;
CREATE TABLE t1 (pk INT PRIMARY KEY AUTO_INCREMENT,
diff --git a/mysql-test/suite/encryption/t/innodb_encrypt_key_rotation_age.test b/mysql-test/suite/encryption/t/innodb_encrypt_key_rotation_age.test
index c3fafb0751b..bc4c43e1ce8 100644
--- a/mysql-test/suite/encryption/t/innodb_encrypt_key_rotation_age.test
+++ b/mysql-test/suite/encryption/t/innodb_encrypt_key_rotation_age.test
@@ -14,6 +14,7 @@ INSERT INTO t3 SELECT * FROM t1;
--echo # Restart the server with encryption
+let $restart_noprint=2;
let $restart_parameters= --innodb_encryption_threads=5 --innodb_encryption_rotate_key_age=16384;
--source include/restart_mysqld.inc
diff --git a/mysql-test/suite/encryption/t/innodb_lotoftables.test b/mysql-test/suite/encryption/t/innodb_lotoftables.test
index 4ccdc7d5c49..413fc8685f2 100644
--- a/mysql-test/suite/encryption/t/innodb_lotoftables.test
+++ b/mysql-test/suite/encryption/t/innodb_lotoftables.test
@@ -21,7 +21,6 @@ SHOW VARIABLES LIKE 'innodb_encrypt%';
#
create database innodb_encrypted_1;
use innodb_encrypted_1;
-show status like 'innodb_pages0_read%';
set autocommit=0;
let $tables = 100;
@@ -44,7 +43,6 @@ while ($tables)
set autocommit=1;
commit work;
-show status like 'innodb_pages0_read%';
#
# Verify
#
@@ -56,7 +54,6 @@ SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE NAME LIK
#
create database innodb_encrypted_2;
use innodb_encrypted_2;
-show status like 'innodb_pages0_read%';
set autocommit=0;
--disable_query_log
@@ -79,7 +76,6 @@ while ($tables)
commit work;
set autocommit=1;
-show status like 'innodb_pages0_read%';
#
# Verify
#
@@ -93,7 +89,6 @@ SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_
#
create database innodb_encrypted_3;
use innodb_encrypted_3;
-show status like 'innodb_pages0_read%';
set autocommit=0;
--disable_query_log
@@ -116,7 +111,6 @@ while ($tables)
commit work;
set autocommit=1;
-show status like 'innodb_pages0_read%';
#
# Verify
#
@@ -126,7 +120,6 @@ SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
use test;
-show status like 'innodb_pages0_read%';
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
@@ -140,7 +133,6 @@ SET GLOBAL innodb_encryption_threads=4;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
-show status like 'innodb_pages0_read%';
--echo # Success!
--echo # Restart mysqld --innodb_encrypt_tables=0 --innodb_encryption_threads=0
@@ -149,16 +141,6 @@ show status like 'innodb_pages0_read%';
--echo # Restart Success!
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-use test;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-use innodb_encrypted_1;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-use innodb_encrypted_2;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-use innodb_encrypted_3;
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-
use innodb_encrypted_1;
--disable_result_log
@@ -172,8 +154,6 @@ while ($tables)
--enable_query_log
--enable_result_log
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-
use innodb_encrypted_2;
--disable_result_log
@@ -187,8 +167,6 @@ while ($tables)
--enable_query_log
--enable_result_log
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-
use innodb_encrypted_3;
--disable_result_log
@@ -202,8 +180,6 @@ while ($tables)
--enable_query_log
--enable_result_log
-SELECT variable_value <= 303 FROM information_schema.global_status WHERE variable_name = 'innodb_pages0_read';
-
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 AND NAME LIKE 'innodb_encrypted%' ORDER BY NAME;
diff --git a/mysql-test/suite/encryption/t/innodb_page_encryption_key_change.test b/mysql-test/suite/encryption/t/innodb_page_encryption_key_change.test
index 1babf577473..acba1f600e7 100644
--- a/mysql-test/suite/encryption/t/innodb_page_encryption_key_change.test
+++ b/mysql-test/suite/encryption/t/innodb_page_encryption_key_change.test
@@ -3,7 +3,6 @@
# embedded does not support restart
-- source include/not_embedded.inc
---echo # Restart mysqld --loose-file-key-management-filename=keys2.txt
-- let $restart_parameters=--loose-file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
-- source include/restart_mysqld.inc
@@ -43,7 +42,6 @@ select * from innodb_redundant;
SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_encrypted';
SELECT variable_value >= 0 FROM information_schema.global_status WHERE variable_name = 'innodb_num_pages_decrypted';
---echo # Restart mysqld --loose-file-key-management-filename=keys3.txt
-- let $restart_parameters=--loose-file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys3.txt
-- source include/restart_mysqld.inc
diff --git a/mysql-test/suite/engines/funcs/r/an_calendar.result b/mysql-test/suite/engines/funcs/r/an_calendar.result
index f057c3b618f..669b097b325 100644
--- a/mysql-test/suite/engines/funcs/r/an_calendar.result
+++ b/mysql-test/suite/engines/funcs/r/an_calendar.result
@@ -11,6 +11,7 @@ Note 1265 Data truncated for column 'c1' at row 1
Note 1265 Data truncated for column 'c2' at row 1
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 TIME NULL, c2 TIME NULL);
@@ -25,6 +26,7 @@ Note 1265 Data truncated for column 'c1' at row 1
Note 1265 Data truncated for column 'c2' at row 1
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 DATETIME NULL, c2 DATETIME NULL);
@@ -33,6 +35,7 @@ INSERT INTO t1 VALUES(NOW(),NOW());
INSERT INTO t1 VALUES(NOW(),NOW());
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 TIMESTAMP NULL, c2 TIMESTAMP NULL);
@@ -41,5 +44,6 @@ INSERT INTO t1 VALUES(NOW(),NOW());
INSERT INTO t1 VALUES(NOW(),NOW());
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
diff --git a/mysql-test/suite/engines/funcs/r/an_number.result b/mysql-test/suite/engines/funcs/r/an_number.result
index a77fc0e7d69..257213d0bf8 100644
--- a/mysql-test/suite/engines/funcs/r/an_number.result
+++ b/mysql-test/suite/engines/funcs/r/an_number.result
@@ -4,6 +4,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 SMALLINT NULL, c2 SMALLINT NULL);
@@ -11,6 +12,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 MEDIUMINT NULL, c2 MEDIUMINT NULL);
@@ -18,6 +20,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 INT NULL, c2 INT NULL);
@@ -25,6 +28,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 INTEGER NULL, c2 INTEGER NULL);
@@ -32,6 +36,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 BIGINT NULL, c2 BIGINT NULL);
@@ -39,6 +44,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 DECIMAL NULL, c2 DECIMAL NULL);
@@ -46,6 +52,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 DEC NULL, c2 DEC NULL);
@@ -53,6 +60,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 FIXED NULL, c2 FIXED NULL);
@@ -60,6 +68,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 NUMERIC NULL, c2 NUMERIC NULL);
@@ -67,6 +76,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 DOUBLE NULL, c2 DOUBLE NULL);
@@ -74,6 +84,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 REAL NULL, c2 REAL NULL);
@@ -81,6 +92,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 DOUBLE PRECISION NULL, c2 DOUBLE PRECISION NULL);
@@ -88,6 +100,7 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 FLOAT NULL, c2 FLOAT NULL);
@@ -95,5 +108,6 @@ INSERT INTO t1 VALUES(1,2);
INSERT INTO t1 VALUES(3,4);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
diff --git a/mysql-test/suite/engines/funcs/r/an_string.result b/mysql-test/suite/engines/funcs/r/an_string.result
index c7762155a7d..f3e7eaedb15 100644
--- a/mysql-test/suite/engines/funcs/r/an_string.result
+++ b/mysql-test/suite/engines/funcs/r/an_string.result
@@ -4,6 +4,7 @@ INSERT INTO t1 VALUES('abc','ABCDEFG');
INSERT INTO t1 VALUES('123','1234567890');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 VARCHAR(100) NULL, c2 VARCHAR(100) NULL);
@@ -11,6 +12,7 @@ INSERT INTO t1 VALUES('abc','ABCDEFG');
INSERT INTO t1 VALUES('123','1234567890');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 BINARY(100) NULL, c2 BINARY(100) NULL);
@@ -18,6 +20,7 @@ INSERT INTO t1 VALUES('abc','ABCDEFG');
INSERT INTO t1 VALUES('123','1234567890');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 VARBINARY(100) NULL, c2 VARBINARY(100) NULL);
@@ -25,6 +28,7 @@ INSERT INTO t1 VALUES('abc','ABCDEFG');
INSERT INTO t1 VALUES('123','1234567890');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 BLOB(100) NULL, c2 BLOB(100) NULL);
@@ -32,6 +36,9 @@ INSERT INTO t1 VALUES('abc','ABCDEFG');
INSERT INTO t1 VALUES('123','1234567890');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'c1'
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'c2'
test.t1 analyze status OK
DROP TABLE t1;
CREATE TABLE t1(c1 TEXT(100) NULL, c2 TEXT(100) NULL);
@@ -39,5 +46,8 @@ INSERT INTO t1 VALUES('abc','ABCDEFG');
INSERT INTO t1 VALUES('123','1234567890');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'c1'
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'c2'
test.t1 analyze status OK
DROP TABLE t1;
diff --git a/mysql-test/suite/engines/funcs/r/tc_partition_analyze.result b/mysql-test/suite/engines/funcs/r/tc_partition_analyze.result
index 884408d29b3..52a8b2de642 100644
--- a/mysql-test/suite/engines/funcs/r/tc_partition_analyze.result
+++ b/mysql-test/suite/engines/funcs/r/tc_partition_analyze.result
@@ -33,6 +33,7 @@ t1 CREATE TABLE `t1` (
PARTITION `p5` VALUES LESS THAN MAXVALUE ENGINE = ENGINE)
ALTER TABLE t1 ANALYZE PARTITION p1,p2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1 ORDER BY c1;
c1 c2
diff --git a/mysql-test/suite/federated/error_on_close-8313.result b/mysql-test/suite/federated/error_on_close-8313.result
index d8b72c1c52a..f1220015cc3 100644
--- a/mysql-test/suite/federated/error_on_close-8313.result
+++ b/mysql-test/suite/federated/error_on_close-8313.result
@@ -18,6 +18,7 @@ connection='s1';
select * from t1;
foo bar
connection slave;
+# restart
connection master;
drop table t1;
drop server s1;
diff --git a/mysql-test/suite/federated/federatedx_create_handlers.result b/mysql-test/suite/federated/federatedx_create_handlers.result
new file mode 100644
index 00000000000..473972c2cd4
--- /dev/null
+++ b/mysql-test/suite/federated/federatedx_create_handlers.result
@@ -0,0 +1,312 @@
+connect master,127.0.0.1,root,,test,$MASTER_MYPORT,;
+connect slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
+connection master;
+CREATE DATABASE federated;
+connection slave;
+CREATE DATABASE federated;
+connection default;
+set global federated_pushdown=1;
+connection slave;
+DROP TABLE IF EXISTS federated.t1;
+Warnings:
+Note 1051 Unknown table 'federated.t1'
+CREATE TABLE federated.t1 (
+id int(20) NOT NULL,
+name varchar(16) NOT NULL default ''
+)
+DEFAULT CHARSET=latin1;
+INSERT INTO federated.t1 VALUES
+(3,'xxx'), (7,'yyy'), (4,'xxx'), (1,'zzz'), (5,'yyy');
+DROP TABLE IF EXISTS federated.t2;
+Warnings:
+Note 1051 Unknown table 'federated.t2'
+CREATE TABLE federated.t2 (
+name varchar(16) NOT NULL default ''
+)
+DEFAULT CHARSET=latin1;
+INSERT INTO federated.t2 VALUES
+('yyy'), ('www'), ('yyy'), ('xxx'), ('www'), ('yyy'), ('www');
+connection master;
+DROP TABLE IF EXISTS federated.t1;
+Warnings:
+Note 1051 Unknown table 'federated.t1'
+CREATE TABLE federated.t1 (
+id int(20) NOT NULL,
+name varchar(16) NOT NULL default ''
+)
+ENGINE="FEDERATED" DEFAULT CHARSET=latin1
+CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
+DROP TABLE IF EXISTS federated.t2;
+Warnings:
+Note 1051 Unknown table 'federated.t2'
+CREATE TABLE federated.t2 (
+name varchar(16) NOT NULL default ''
+)
+ENGINE="FEDERATED" DEFAULT CHARSET=latin1
+CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2';
+SELECT * FROM federated.t1;
+id name
+3 xxx
+7 yyy
+4 xxx
+1 zzz
+5 yyy
+SELECT id FROM federated.t1 WHERE id < 5;
+id
+3
+4
+1
+SELECT count(*), name FROM federated.t1 WHERE id < 5 GROUP BY name;
+count(*) name
+2 xxx
+1 zzz
+SELECT * FROM federated.t1, federated.t2
+WHERE federated.t1.name = federated.t2.name;
+id name name
+7 yyy yyy
+5 yyy yyy
+7 yyy yyy
+5 yyy yyy
+3 xxx xxx
+4 xxx xxx
+7 yyy yyy
+5 yyy yyy
+SELECT * FROM federated.t1 LEFT JOIN federated.t2
+ON federated.t1.name = federated.t2.name
+WHERE federated.t1.id > 1;
+id name name
+7 yyy yyy
+5 yyy yyy
+7 yyy yyy
+5 yyy yyy
+3 xxx xxx
+4 xxx xxx
+7 yyy yyy
+5 yyy yyy
+SELECT * FROM federated.t1
+WHERE id IN (SELECT count(*) FROM federated.t2 GROUP BY name);
+id name
+3 xxx
+1 zzz
+EXPLAIN
+SELECT id FROM federated.t1 WHERE id < 5;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PUSHED SELECT NULL NULL NULL NULL NULL NULL NULL NULL
+EXPLAIN EXTENDED
+SELECT id FROM federated.t1 WHERE id < 5;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 PUSHED SELECT NULL NULL NULL NULL NULL NULL NULL NULL NULL
+Warnings:
+Note 1003 select `federated`.`t1`.`id` AS `id` from `federated`.`t1` where `federated`.`t1`.`id` < 5
+EXPLAIN FORMAT=JSON
+SELECT id FROM federated.t1 WHERE id < 5;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Pushed select"
+ }
+ }
+}
+ANALYZE
+SELECT id FROM federated.t1 WHERE id < 5;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PUSHED SELECT NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+ANALYZE FORMAT=JSON
+SELECT id FROM federated.t1 WHERE id < 5;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "message": "Pushed select"
+ }
+ }
+}
+CREATE TABLE federated.t3 (
+name varchar(16) NOT NULL default ''
+)
+DEFAULT CHARSET=latin1;
+INSERT INTO federated.t3 VALUES
+('yyy'), ('www'), ('yyy'), ('xxx'), ('www'), ('yyy'), ('www');
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+name id name
+yyy 5 yyy
+yyy 7 yyy
+yyy 5 yyy
+yyy 7 yyy
+xxx 4 xxx
+yyy 5 yyy
+yyy 7 yyy
+EXPLAIN
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 7
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2
+2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
+EXPLAIN FORMAT=JSON
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+EXPLAIN
+{
+ "query_block": {
+ "select_id": 1,
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "rows": 7,
+ "filtered": 100
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "18",
+ "used_key_parts": ["name"],
+ "ref": ["federated.t3.name"],
+ "rows": 2,
+ "filtered": 100,
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "message": "Pushed derived"
+ }
+ }
+ }
+ }
+ }
+}
+ANALYZE
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+id select_type table type possible_keys key key_len ref rows r_rows filtered r_filtered Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 7 7.00 100.00 100.00
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2 0.00 100.00 100.00
+2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL
+SELECT *
+FROM federated.t3, (SELECT t1.name FROM federated.t1
+WHERE id IN (SELECT count(*)
+FROM federated.t2 GROUP BY name)) t
+WHERE federated.t3.name=t.name;
+name name
+xxx xxx
+EXPLAIN
+SELECT *
+FROM federated.t3, (SELECT t1.name FROM federated.t1
+WHERE id IN (SELECT count(*)
+FROM federated.t2 GROUP BY name)) t
+WHERE federated.t3.name=t.name;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 7
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2
+2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
+3 MATERIALIZED t2 ALL NULL NULL NULL NULL 7 Using temporary
+ANALYZE FORMAT=JSON
+SELECT *
+FROM federated.t3, (SELECT t1.name FROM federated.t1
+WHERE id IN (SELECT count(*)
+FROM federated.t2 GROUP BY name)) t
+WHERE federated.t3.name=t.name;
+ANALYZE
+{
+ "query_block": {
+ "select_id": 1,
+ "r_loops": 1,
+ "r_total_time_ms": "REPLACED",
+ "table": {
+ "table_name": "t3",
+ "access_type": "ALL",
+ "r_loops": 1,
+ "rows": 7,
+ "r_rows": 7,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100
+ },
+ "table": {
+ "table_name": "<derived2>",
+ "access_type": "ref",
+ "possible_keys": ["key0"],
+ "key": "key0",
+ "key_length": "18",
+ "used_key_parts": ["name"],
+ "ref": ["federated.t3.name"],
+ "r_loops": 7,
+ "rows": 2,
+ "r_rows": 0,
+ "r_total_time_ms": "REPLACED",
+ "filtered": 100,
+ "r_filtered": 100,
+ "materialized": {
+ "query_block": {
+ "select_id": 2,
+ "table": {
+ "message": "Pushed derived"
+ },
+ "subqueries": [
+ {
+ "query_block": {
+ "select_id": 3,
+ "temporary_table": {
+ "table": {
+ "table_name": "t2",
+ "access_type": "ALL",
+ "r_loops": 0,
+ "rows": 7,
+ "r_rows": null,
+ "filtered": 100,
+ "r_filtered": null
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+}
+SELECT t.id, federated.t3.name
+FROM federated.t3,
+( SELECT * FROM federated.t1 WHERE id < 3
+UNION
+SELECT * FROM federated.t1 WHERE id >= 5) t
+WHERE federated.t3.name=t.name;
+id name
+5 yyy
+7 yyy
+5 yyy
+7 yyy
+5 yyy
+7 yyy
+EXPLAIN
+SELECT t.id, federated.t3.name
+FROM federated.t3,
+( SELECT * FROM federated.t1 WHERE id < 3
+UNION
+SELECT * FROM federated.t1 WHERE id >= 5) t
+WHERE federated.t3.name=t.name;
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t3 ALL NULL NULL NULL NULL 7
+1 PRIMARY <derived2> ref key0 key0 18 federated.t3.name 2
+2 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
+DROP TABLE federated.t1, federated.t2, federated.t3;
+connection slave;
+DROP TABLE federated.t1, federated.t2;
+connection default;
+set global federated_pushdown=0;
+connection master;
+DROP TABLE IF EXISTS federated.t1;
+DROP DATABASE IF EXISTS federated;
+connection slave;
+DROP TABLE IF EXISTS federated.t1;
+DROP DATABASE IF EXISTS federated;
diff --git a/mysql-test/suite/federated/federatedx_create_handlers.test b/mysql-test/suite/federated/federatedx_create_handlers.test
new file mode 100644
index 00000000000..373b2aaaa33
--- /dev/null
+++ b/mysql-test/suite/federated/federatedx_create_handlers.test
@@ -0,0 +1,161 @@
+--source have_federatedx.inc
+--source include/federated.inc
+
+connection default;
+
+set global federated_pushdown=1;
+
+connection slave;
+
+DROP TABLE IF EXISTS federated.t1;
+
+CREATE TABLE federated.t1 (
+ id int(20) NOT NULL,
+ name varchar(16) NOT NULL default ''
+)
+DEFAULT CHARSET=latin1;
+
+INSERT INTO federated.t1 VALUES
+ (3,'xxx'), (7,'yyy'), (4,'xxx'), (1,'zzz'), (5,'yyy');
+
+DROP TABLE IF EXISTS federated.t2;
+
+CREATE TABLE federated.t2 (
+ name varchar(16) NOT NULL default ''
+)
+DEFAULT CHARSET=latin1;
+
+INSERT INTO federated.t2 VALUES
+ ('yyy'), ('www'), ('yyy'), ('xxx'), ('www'), ('yyy'), ('www');
+
+
+connection master;
+
+DROP TABLE IF EXISTS federated.t1;
+
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval
+CREATE TABLE federated.t1 (
+ id int(20) NOT NULL,
+ name varchar(16) NOT NULL default ''
+)
+ENGINE="FEDERATED" DEFAULT CHARSET=latin1
+CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
+
+DROP TABLE IF EXISTS federated.t2;
+
+--replace_result $SLAVE_MYPORT SLAVE_PORT
+eval
+CREATE TABLE federated.t2 (
+ name varchar(16) NOT NULL default ''
+)
+ENGINE="FEDERATED" DEFAULT CHARSET=latin1
+CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2';
+
+SELECT * FROM federated.t1;
+
+SELECT id FROM federated.t1 WHERE id < 5;
+
+SELECT count(*), name FROM federated.t1 WHERE id < 5 GROUP BY name;
+
+SELECT * FROM federated.t1, federated.t2
+ WHERE federated.t1.name = federated.t2.name;
+
+SELECT * FROM federated.t1 LEFT JOIN federated.t2
+ ON federated.t1.name = federated.t2.name
+ WHERE federated.t1.id > 1;
+
+SELECT * FROM federated.t1
+ WHERE id IN (SELECT count(*) FROM federated.t2 GROUP BY name);
+
+EXPLAIN
+SELECT id FROM federated.t1 WHERE id < 5;
+
+EXPLAIN EXTENDED
+SELECT id FROM federated.t1 WHERE id < 5;
+
+EXPLAIN FORMAT=JSON
+SELECT id FROM federated.t1 WHERE id < 5;
+
+ANALYZE
+SELECT id FROM federated.t1 WHERE id < 5;
+
+--source include/analyze-format.inc
+ANALYZE FORMAT=JSON
+SELECT id FROM federated.t1 WHERE id < 5;
+
+CREATE TABLE federated.t3 (
+ name varchar(16) NOT NULL default ''
+)
+DEFAULT CHARSET=latin1;
+
+INSERT INTO federated.t3 VALUES
+ ('yyy'), ('www'), ('yyy'), ('xxx'), ('www'), ('yyy'), ('www');
+
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+
+EXPLAIN
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+
+EXPLAIN FORMAT=JSON
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+
+ANALYZE
+SELECT *
+FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
+WHERE federated.t3.name=t.name;
+
+SELECT *
+FROM federated.t3, (SELECT t1.name FROM federated.t1
+ WHERE id IN (SELECT count(*)
+ FROM federated.t2 GROUP BY name)) t
+WHERE federated.t3.name=t.name;
+
+EXPLAIN
+SELECT *
+FROM federated.t3, (SELECT t1.name FROM federated.t1
+ WHERE id IN (SELECT count(*)
+ FROM federated.t2 GROUP BY name)) t
+WHERE federated.t3.name=t.name;
+
+--source include/analyze-format.inc
+ANALYZE FORMAT=JSON
+SELECT *
+FROM federated.t3, (SELECT t1.name FROM federated.t1
+ WHERE id IN (SELECT count(*)
+ FROM federated.t2 GROUP BY name)) t
+WHERE federated.t3.name=t.name;
+
+SELECT t.id, federated.t3.name
+FROM federated.t3,
+ ( SELECT * FROM federated.t1 WHERE id < 3
+ UNION
+ SELECT * FROM federated.t1 WHERE id >= 5) t
+WHERE federated.t3.name=t.name;
+
+EXPLAIN
+SELECT t.id, federated.t3.name
+FROM federated.t3,
+ ( SELECT * FROM federated.t1 WHERE id < 3
+ UNION
+ SELECT * FROM federated.t1 WHERE id >= 5) t
+WHERE federated.t3.name=t.name;
+
+
+DROP TABLE federated.t1, federated.t2, federated.t3;
+
+connection slave;
+DROP TABLE federated.t1, federated.t2;
+
+connection default;
+
+set global federated_pushdown=0;
+
+source include/federated_cleanup.inc;
+
diff --git a/mysql-test/suite/federated/net_thd_crash-12725.result b/mysql-test/suite/federated/net_thd_crash-12725.result
index 8c85b7a7594..cd9f5b0a715 100644
--- a/mysql-test/suite/federated/net_thd_crash-12725.result
+++ b/mysql-test/suite/federated/net_thd_crash-12725.result
@@ -5,6 +5,7 @@ CREATE TABLE t2 (i INT) ENGINE=FEDERATED CONNECTION="mysql://root@localhost:MAST
ALTER TABLE t2 DISABLE KEYS;
ERROR HY000: Storage engine FEDERATED of the table `test`.`t2` doesn't have this option
CREATE TABLE t3 (i INT) ENGINE=FEDERATED CONNECTION="mysql://root@localhost:MASTER_MYPORT/test/t1";
+# restart
SET GLOBAL query_cache_size= default;
SET GLOBAL query_cache_type= default;
drop table t1, t2, t3;
diff --git a/mysql-test/suite/federated/net_thd_crash-12951.result b/mysql-test/suite/federated/net_thd_crash-12951.result
index 573ac96efff..a46bad2dbdc 100644
--- a/mysql-test/suite/federated/net_thd_crash-12951.result
+++ b/mysql-test/suite/federated/net_thd_crash-12951.result
@@ -5,6 +5,7 @@ create table t2 (i int) engine=federated
CONNECTION="mysql://root@localhost:MASTER_MYPORT/test/t1";
select * from t2;
i
+# restart
drop table t2;
drop table t1;
set global query_cache_type= default;
diff --git a/mysql-test/suite/funcs_1/r/innodb_func_view.result b/mysql-test/suite/funcs_1/r/innodb_func_view.result
index 7be6bb2f049..7850e017fb2 100644
--- a/mysql-test/suite/funcs_1/r/innodb_func_view.result
+++ b/mysql-test/suite/funcs_1/r/innodb_func_view.result
@@ -3778,14 +3778,14 @@ WHERE select_id = 51 OR select_id IS NULL order by id;
CAST(my_varbinary_1000 AS TIME) my_varbinary_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 23
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_varbinary_1000` as time) AS `CAST(my_varbinary_1000 AS TIME)`,`t1_values`.`my_varbinary_1000` AS `my_varbinary_1000`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3795,14 +3795,14 @@ WHERE select_id = 51 OR select_id IS NULL) order by id;
CAST(my_varbinary_1000 AS TIME) my_varbinary_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 23
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
DROP VIEW v1;
@@ -3813,15 +3813,15 @@ my_binary_30, id FROM t1_values
WHERE select_id = 50 OR select_id IS NULL order by id;
CAST(my_binary_30 AS TIME) my_binary_30 id
NULL NULL 1
-00:00:00 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL 2
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 22
Warnings:
-Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '1 17:58\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SHOW CREATE VIEW v1;
@@ -3832,15 +3832,15 @@ WHERE v1.id IN (SELECT id FROM t1_values
WHERE select_id = 50 OR select_id IS NULL) order by id;
CAST(my_binary_30 AS TIME) my_binary_30 id
NULL NULL 1
-00:00:00
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$--
+NULL
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$--
-00:00:01 -1
41:58:00 1 17:58
Warnings:
-Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '1 17:58\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
DROP VIEW v1;
@@ -3854,14 +3854,14 @@ WHERE select_id = 49 OR select_id IS NULL order by id;
CAST(my_varchar_1000 AS TIME) my_varchar_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 21
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_varchar_1000` as time) AS `CAST(my_varchar_1000 AS TIME)`,`t1_values`.`my_varchar_1000` AS `my_varchar_1000`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3871,14 +3871,14 @@ WHERE select_id = 49 OR select_id IS NULL) order by id;
CAST(my_varchar_1000 AS TIME) my_varchar_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 21
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
DROP VIEW v1;
@@ -3890,14 +3890,14 @@ WHERE select_id = 48 OR select_id IS NULL order by id;
CAST(my_char_30 AS TIME) my_char_30 id
NULL NULL 1
NULL 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 20
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_char_30` as time) AS `CAST(my_char_30 AS TIME)`,`t1_values`.`my_char_30` AS `my_char_30`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3907,14 +3907,14 @@ WHERE select_id = 48 OR select_id IS NULL) order by id;
CAST(my_char_30 AS TIME) my_char_30 id
NULL NULL 1
NULL 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 20
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
DROP VIEW v1;
diff --git a/mysql-test/suite/funcs_1/r/is_check_constraints.result b/mysql-test/suite/funcs_1/r/is_check_constraints.result
index 678cfb8db2f..eaf90f44544 100644
--- a/mysql-test/suite/funcs_1/r/is_check_constraints.result
+++ b/mysql-test/suite/funcs_1/r/is_check_constraints.result
@@ -9,6 +9,11 @@ t int, check (t>32) # table constraint
) ENGINE=myisam;
SELECT * from information_schema.check_constraints order by check_clause;
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME CONSTRAINT_1
TABLE_NAME t0
@@ -17,6 +22,11 @@ ALTER TABLE t0
ADD CONSTRAINT CHK_t0_t CHECK(t<100);
SELECT * from information_schema.check_constraints order by check_clause;
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME CHK_t0_t
TABLE_NAME t0
@@ -30,6 +40,11 @@ ALTER TABLE t0
DROP CONSTRAINT CHK_t0_t;
SELECT * from information_schema.check_constraints order by check_clause;
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME CONSTRAINT_1
TABLE_NAME t0
@@ -40,6 +55,11 @@ tt int, CONSTRAINT CHK_tt CHECK(tt<100) # table constraint
) ENGINE=InnoDB;
SELECT * from information_schema.check_constraints order by check_clause;
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME CHK_tt
TABLE_NAME t1
@@ -58,6 +78,11 @@ ALTER TABLE t1
DROP CONSTRAINT CHK_tt;
SELECT * from information_schema.check_constraints order by check_clause;
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME t
TABLE_NAME t1
@@ -81,6 +106,11 @@ CONSTRAINT_NAME name
TABLE_NAME t2
CHECK_CLAUSE char_length(`name`) > 2
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME CHK_dates
TABLE_NAME t2
@@ -104,6 +134,11 @@ CONSTRAINT_NAME name
TABLE_NAME t2
CHECK_CLAUSE char_length(`name`) > 2
CONSTRAINT_CATALOG def
+CONSTRAINT_SCHEMA mysql
+CONSTRAINT_NAME Priv
+TABLE_NAME global_priv
+CHECK_CLAUSE json_valid(`Priv`)
+CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME CHK_dates
TABLE_NAME t2
@@ -132,6 +167,7 @@ CONSTRAINT b check (b>10) # table constraint
select * from information_schema.check_constraints order by check_clause;
CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_NAME CHECK_CLAUSE
def test name t2 char_length(`name`) > 2
+def mysql Priv global_priv json_valid(`Priv`)
def test b t3 `b` > 0
def test b t3 `b` > 10
def test CHK_dates t2 `start_date` is null
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is.result b/mysql-test/suite/funcs_1/r/is_columns_is.result
index dfaa5d75137..386b0f07f98 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is.result
@@ -205,6 +205,10 @@ def information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA 10 NULL YES varc
def information_schema KEY_COLUMN_USAGE TABLE_CATALOG 4 '' NO varchar 512 1536 NULL NULL NULL utf8 utf8_general_ci varchar(512) select NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_NAME 6 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_SCHEMA 5 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select NEVER NULL
+def information_schema OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES 4 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) select NEVER NULL
+def information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE 3 0 NO int NULL NULL 10 0 NULL NULL NULL int(20) select NEVER NULL
+def information_schema OPTIMIZER_TRACE QUERY 1 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select NEVER NULL
+def information_schema OPTIMIZER_TRACE TRACE 2 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select NEVER NULL
def information_schema PARAMETERS CHARACTER_MAXIMUM_LENGTH 8 NULL YES int NULL NULL 10 0 NULL NULL NULL int(21) select NEVER NULL
def information_schema PARAMETERS CHARACTER_OCTET_LENGTH 9 NULL YES int NULL NULL 10 0 NULL NULL NULL int(21) select NEVER NULL
def information_schema PARAMETERS CHARACTER_SET_NAME 13 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select NEVER NULL
@@ -743,6 +747,10 @@ NULL information_schema KEY_COLUMN_USAGE POSITION_IN_UNIQUE_CONSTRAINT bigint NU
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_COLUMN_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
+1.0000 information_schema OPTIMIZER_TRACE QUERY longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
+1.0000 information_schema OPTIMIZER_TRACE TRACE longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
+NULL information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE int NULL NULL NULL NULL int(20)
+NULL information_schema OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES tinyint NULL NULL NULL NULL tinyint(1)
3.0000 information_schema PARAMETERS SPECIFIC_CATALOG varchar 512 1536 utf8 utf8_general_ci varchar(512)
3.0000 information_schema PARAMETERS SPECIFIC_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema PARAMETERS SPECIFIC_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
index 96fb1e286c0..97346c4648d 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_is_embedded.result
@@ -205,6 +205,10 @@ def information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA 10 NULL YES varc
def information_schema KEY_COLUMN_USAGE TABLE_CATALOG 4 '' NO varchar 512 1536 NULL NULL NULL utf8 utf8_general_ci varchar(512) NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_NAME 6 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) NEVER NULL
def information_schema KEY_COLUMN_USAGE TABLE_SCHEMA 5 '' NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) NEVER NULL
+def information_schema OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES 4 0 NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) NEVER NULL
+def information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE 3 0 NO int NULL NULL 10 0 NULL NULL NULL int(20) NEVER NULL
+def information_schema OPTIMIZER_TRACE QUERY 1 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext NEVER NULL
+def information_schema OPTIMIZER_TRACE TRACE 2 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext NEVER NULL
def information_schema PARAMETERS CHARACTER_MAXIMUM_LENGTH 8 NULL YES int NULL NULL 10 0 NULL NULL NULL int(21) NEVER NULL
def information_schema PARAMETERS CHARACTER_OCTET_LENGTH 9 NULL YES int NULL NULL 10 0 NULL NULL NULL int(21) NEVER NULL
def information_schema PARAMETERS CHARACTER_SET_NAME 13 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) NEVER NULL
@@ -743,6 +747,10 @@ NULL information_schema KEY_COLUMN_USAGE POSITION_IN_UNIQUE_CONSTRAINT bigint NU
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_TABLE_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema KEY_COLUMN_USAGE REFERENCED_COLUMN_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
+1.0000 information_schema OPTIMIZER_TRACE QUERY longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
+1.0000 information_schema OPTIMIZER_TRACE TRACE longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
+NULL information_schema OPTIMIZER_TRACE MISSING_BYTES_BEYOND_MAX_MEM_SIZE int NULL NULL NULL NULL int(20)
+NULL information_schema OPTIMIZER_TRACE INSUFFICIENT_PRIVILEGES tinyint NULL NULL NULL NULL tinyint(1)
3.0000 information_schema PARAMETERS SPECIFIC_CATALOG varchar 512 1536 utf8 utf8_general_ci varchar(512)
3.0000 information_schema PARAMETERS SPECIFIC_SCHEMA varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 information_schema PARAMETERS SPECIFIC_NAME varchar 64 192 utf8 utf8_general_ci varchar(64)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_mysql.result b/mysql-test/suite/funcs_1/r/is_columns_mysql.result
index f5452e0c8e8..31160f8a3ec 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_mysql.result
@@ -61,7 +61,7 @@ def mysql event modified 9 '0000-00-00 00:00:00' NO timestamp NULL NULL NULL NUL
def mysql event name 2 '' NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) PRI select,insert,update,references NEVER NULL
def mysql event on_completion 14 'DROP' NO enum 8 24 NULL NULL NULL utf8 utf8_general_ci enum('DROP','PRESERVE') select,insert,update,references NEVER NULL
def mysql event originator 17 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL
-def mysql event sql_mode 15 '' NO set 539 1617 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') select,insert,update,references NEVER NULL
+def mysql event sql_mode 15 '' NO set 561 1683 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') select,insert,update,references NEVER NULL
def mysql event starts 11 NULL YES datetime NULL NULL NULL NULL 0 NULL NULL datetime select,insert,update,references NEVER NULL
def mysql event status 13 'ENABLED' NO enum 18 54 NULL NULL NULL utf8 utf8_general_ci enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') select,insert,update,references NEVER NULL
def mysql event time_zone 18 'SYSTEM' NO char 64 64 NULL NULL NULL latin1 latin1_swedish_ci char(64) select,insert,update,references NEVER NULL
@@ -75,6 +75,9 @@ def mysql general_log event_time 1 current_timestamp(6) NO timestamp NULL NULL N
def mysql general_log server_id 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL
def mysql general_log thread_id 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(21) unsigned select,insert,update,references NEVER NULL
def mysql general_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references NEVER NULL
+def mysql global_priv Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) PRI select,insert,update,references NEVER NULL
+def mysql global_priv Priv 3 '{}' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql global_priv User 2 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) PRI select,insert,update,references NEVER NULL
def mysql gtid_slave_pos domain_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references NEVER NULL
def mysql gtid_slave_pos seq_no 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def mysql gtid_slave_pos server_id 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL
@@ -93,26 +96,6 @@ def mysql help_topic help_category_id 3 NULL NO smallint NULL NULL 5 0 NULL NULL
def mysql help_topic help_topic_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references NEVER NULL
def mysql help_topic name 2 NULL NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) UNI select,insert,update,references NEVER NULL
def mysql help_topic url 6 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_general_ci text select,insert,update,references NEVER NULL
-def mysql host Alter_priv 12 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Alter_routine_priv 18 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Create_priv 7 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Create_routine_priv 17 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Create_tmp_table_priv 13 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Create_view_priv 15 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Db 2 '' NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI select,insert,update,references NEVER NULL
-def mysql host Delete_priv 6 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Drop_priv 8 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Execute_priv 19 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Grant_priv 9 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) PRI select,insert,update,references NEVER NULL
-def mysql host Index_priv 11 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Insert_priv 4 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Lock_tables_priv 14 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host References_priv 10 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Select_priv 3 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Show_view_priv 16 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Trigger_priv 20 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql host Update_priv 5 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
def mysql index_stats avg_frequency 5 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4) select,insert,update,references NEVER NULL
def mysql index_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references NEVER NULL
def mysql index_stats index_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references NEVER NULL
@@ -153,7 +136,7 @@ def mysql proc returns 10 NULL NO longblob 4294967295 4294967295 NULL NULL NULL
def mysql proc security_type 8 'DEFINER' NO enum 7 21 NULL NULL NULL utf8 utf8_general_ci enum('INVOKER','DEFINER') select,insert,update,references NEVER NULL
def mysql proc specific_name 4 '' NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) select,insert,update,references NEVER NULL
def mysql proc sql_data_access 6 'CONTAINS_SQL' NO enum 17 51 NULL NULL NULL utf8 utf8_general_ci enum('CONTAINS_SQL','NO_SQL','READS_SQL_DATA','MODIFIES_SQL_DATA') select,insert,update,references NEVER NULL
-def mysql proc sql_mode 15 '' NO set 539 1617 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') select,insert,update,references NEVER NULL
+def mysql proc sql_mode 15 '' NO set 561 1683 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') select,insert,update,references NEVER NULL
def mysql proc type 3 NULL NO enum 12 36 NULL NULL NULL utf8 utf8_general_ci enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') PRI select,insert,update,references NEVER NULL
def mysql procs_priv Db 2 '' NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI select,insert,update,references NEVER NULL
def mysql procs_priv Grantor 6 '' NO char 141 423 NULL NULL NULL utf8 utf8_bin char(141) MUL select,insert,update,references NEVER NULL
@@ -226,53 +209,53 @@ def mysql transaction_registry commit_id 2 NULL NO bigint NULL NULL 20 0 NULL NU
def mysql transaction_registry commit_timestamp 4 '0000-00-00 00:00:00.000000' NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) MUL select,insert,update,references NEVER NULL
def mysql transaction_registry isolation_level 5 NULL NO enum 16 48 NULL NULL NULL utf8 utf8_bin enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE') select,insert,update,references NEVER NULL
def mysql transaction_registry transaction_id 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned PRI select,insert,update,references NEVER NULL
-def mysql user Alter_priv 17 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Alter_routine_priv 28 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user authentication_string 43 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references NEVER NULL
-def mysql user Create_priv 8 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Create_routine_priv 27 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Create_tablespace_priv 32 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Create_tmp_table_priv 20 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Create_user_priv 29 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Create_view_priv 25 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user default_role 46 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) select,insert,update,references NEVER NULL
-def mysql user Delete_history_priv 33 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Delete_priv 7 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Drop_priv 9 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Event_priv 30 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Execute_priv 22 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user File_priv 13 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Grant_priv 14 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) PRI select,insert,update,references NEVER NULL
-def mysql user Index_priv 16 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Insert_priv 5 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user is_role 45 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Lock_tables_priv 21 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user max_connections 40 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned select,insert,update,references NEVER NULL
-def mysql user max_questions 38 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned select,insert,update,references NEVER NULL
+def mysql user Alter_priv 17 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Alter_routine_priv 28 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user authentication_string 43 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql user Create_priv 8 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Create_routine_priv 27 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Create_tablespace_priv 32 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Create_tmp_table_priv 20 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Create_user_priv 29 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Create_view_priv 25 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user default_role 46 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql user Delete_history_priv 33 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Delete_priv 7 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Drop_priv 9 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Event_priv 30 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Execute_priv 22 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user File_priv 13 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Grant_priv 14 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL
+def mysql user Index_priv 16 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Insert_priv 5 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user is_role 45 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Lock_tables_priv 21 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user max_connections 40 0 NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
+def mysql user max_questions 38 0 NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
def mysql user max_statement_time 47 0.000000 NO decimal NULL NULL 12 6 NULL NULL NULL decimal(12,6) select,insert,update,references NEVER NULL
-def mysql user max_updates 39 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned select,insert,update,references NEVER NULL
-def mysql user max_user_connections 41 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL
-def mysql user Password 3 '' NO char 41 41 NULL NULL NULL latin1 latin1_bin char(41) select,insert,update,references NEVER NULL
-def mysql user password_expired 44 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user plugin 42 '' NO char 64 64 NULL NULL NULL latin1 latin1_swedish_ci char(64) select,insert,update,references NEVER NULL
-def mysql user Process_priv 12 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user References_priv 15 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Reload_priv 10 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Repl_client_priv 24 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Repl_slave_priv 23 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Select_priv 4 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Show_db_priv 18 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Show_view_priv 26 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Shutdown_priv 11 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user ssl_cipher 35 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob select,insert,update,references NEVER NULL
-def mysql user ssl_type 34 '' NO enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('','ANY','X509','SPECIFIED') select,insert,update,references NEVER NULL
-def mysql user Super_priv 19 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Trigger_priv 31 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user Update_priv 6 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references NEVER NULL
-def mysql user User 2 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) PRI select,insert,update,references NEVER NULL
-def mysql user x509_issuer 36 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob select,insert,update,references NEVER NULL
-def mysql user x509_subject 37 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob select,insert,update,references NEVER NULL
+def mysql user max_updates 39 0 NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL
+def mysql user max_user_connections 41 0 NO bigint NULL NULL 19 0 NULL NULL NULL bigint(21) select,insert,update,references NEVER NULL
+def mysql user Password 3 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql user password_expired 44 '' NO varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user plugin 42 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql user Process_priv 12 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user References_priv 15 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Reload_priv 10 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Repl_client_priv 24 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Repl_slave_priv 23 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Select_priv 4 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Show_db_priv 18 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Show_view_priv 26 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Shutdown_priv 11 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user ssl_cipher 35 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql user ssl_type 34 NULL YES varchar 9 9 NULL NULL NULL latin1 latin1_swedish_ci varchar(9) select,insert,update,references NEVER NULL
+def mysql user Super_priv 19 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Trigger_priv 31 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user Update_priv 6 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) select,insert,update,references NEVER NULL
+def mysql user User 2 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) select,insert,update,references NEVER NULL
+def mysql user x509_issuer 36 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
+def mysql user x509_subject 37 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext select,insert,update,references NEVER NULL
##########################################################################
# Show the quotient of CHARACTER_OCTET_LENGTH and CHARACTER_MAXIMUM_LENGTH
##########################################################################
@@ -289,11 +272,12 @@ COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
1.0000 blob NULL NULL
1.0000 longblob NULL NULL
1.0000 varbinary NULL NULL
-1.0000 char latin1 latin1_bin
1.0000 char latin1 latin1_swedish_ci
+1.0000 varchar latin1 latin1_swedish_ci
1.0000 text utf8 utf8_bin
1.0000 mediumtext utf8 utf8_general_ci
1.0000 text utf8 utf8_general_ci
+1.0000 longtext utf8mb4 utf8mb4_bin
SELECT DISTINCT
CHARACTER_OCTET_LENGTH / CHARACTER_MAXIMUM_LENGTH AS COL_CML,
DATA_TYPE,
@@ -400,7 +384,7 @@ NULL mysql event starts datetime NULL NULL NULL NULL datetime
NULL mysql event ends datetime NULL NULL NULL NULL datetime
3.0000 mysql event status enum 18 54 utf8 utf8_general_ci enum('ENABLED','DISABLED','SLAVESIDE_DISABLED')
3.0000 mysql event on_completion enum 8 24 utf8 utf8_general_ci enum('DROP','PRESERVE')
-3.0000 mysql event sql_mode set 539 1617 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT')
+3.0000 mysql event sql_mode set 561 1683 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL')
3.0000 mysql event comment char 64 192 utf8 utf8_bin char(64)
NULL mysql event originator int NULL NULL NULL NULL int(10) unsigned
1.0000 mysql event time_zone char 64 64 latin1 latin1_swedish_ci char(64)
@@ -418,6 +402,9 @@ NULL mysql general_log thread_id bigint NULL NULL NULL NULL bigint(21) unsigned
NULL mysql general_log server_id int NULL NULL NULL NULL int(10) unsigned
3.0000 mysql general_log command_type varchar 64 192 utf8 utf8_general_ci varchar(64)
1.0000 mysql general_log argument mediumtext 16777215 16777215 utf8 utf8_general_ci mediumtext
+3.0000 mysql global_priv Host char 60 180 utf8 utf8_bin char(60)
+3.0000 mysql global_priv User char 80 240 utf8 utf8_bin char(80)
+1.0000 mysql global_priv Priv longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
NULL mysql gtid_slave_pos domain_id int NULL NULL NULL NULL int(10) unsigned
NULL mysql gtid_slave_pos sub_id bigint NULL NULL NULL NULL bigint(20) unsigned
NULL mysql gtid_slave_pos server_id int NULL NULL NULL NULL int(10) unsigned
@@ -436,26 +423,6 @@ NULL mysql help_topic help_category_id smallint NULL NULL NULL NULL smallint(5)
1.0000 mysql help_topic description text 65535 65535 utf8 utf8_general_ci text
1.0000 mysql help_topic example text 65535 65535 utf8 utf8_general_ci text
1.0000 mysql help_topic url text 65535 65535 utf8 utf8_general_ci text
-3.0000 mysql host Host char 60 180 utf8 utf8_bin char(60)
-3.0000 mysql host Db char 64 192 utf8 utf8_bin char(64)
-3.0000 mysql host Select_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Insert_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Update_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Delete_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Drop_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Grant_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host References_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Index_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Alter_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_tmp_table_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Lock_tables_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Show_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Alter_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Execute_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Trigger_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
3.0000 mysql index_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
3.0000 mysql index_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
3.0000 mysql index_stats index_name varchar 64 192 utf8 utf8_bin varchar(64)
@@ -491,7 +458,7 @@ NULL mysql innodb_table_stats sum_of_other_index_sizes bigint NULL NULL NULL NUL
3.0000 mysql proc definer char 141 423 utf8 utf8_bin char(141)
NULL mysql proc created timestamp NULL NULL NULL NULL timestamp
NULL mysql proc modified timestamp NULL NULL NULL NULL timestamp
-3.0000 mysql proc sql_mode set 539 1617 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT')
+3.0000 mysql proc sql_mode set 561 1683 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL')
1.0000 mysql proc comment text 65535 65535 utf8 utf8_bin text
3.0000 mysql proc character_set_client char 32 96 utf8 utf8_bin char(32)
3.0000 mysql proc collation_connection char 32 96 utf8 utf8_bin char(32)
@@ -571,48 +538,48 @@ NULL mysql transaction_registry commit_timestamp timestamp NULL NULL NULL NULL t
3.0000 mysql transaction_registry isolation_level enum 16 48 utf8 utf8_bin enum('READ-UNCOMMITTED','READ-COMMITTED','REPEATABLE-READ','SERIALIZABLE')
3.0000 mysql user Host char 60 180 utf8 utf8_bin char(60)
3.0000 mysql user User char 80 240 utf8 utf8_bin char(80)
-1.0000 mysql user Password char 41 41 latin1 latin1_bin char(41)
-3.0000 mysql user Select_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Insert_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Update_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Delete_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Drop_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Reload_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Shutdown_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Process_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user File_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Grant_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user References_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Index_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Alter_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Show_db_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Super_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_tmp_table_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Lock_tables_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Execute_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Repl_slave_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Repl_client_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Show_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Alter_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_user_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Event_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Trigger_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_tablespace_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Delete_history_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user ssl_type enum 9 27 utf8 utf8_general_ci enum('','ANY','X509','SPECIFIED')
-1.0000 mysql user ssl_cipher blob 65535 65535 NULL NULL blob
-1.0000 mysql user x509_issuer blob 65535 65535 NULL NULL blob
-1.0000 mysql user x509_subject blob 65535 65535 NULL NULL blob
-NULL mysql user max_questions int NULL NULL NULL NULL int(11) unsigned
-NULL mysql user max_updates int NULL NULL NULL NULL int(11) unsigned
-NULL mysql user max_connections int NULL NULL NULL NULL int(11) unsigned
-NULL mysql user max_user_connections int NULL NULL NULL NULL int(11)
-1.0000 mysql user plugin char 64 64 latin1 latin1_swedish_ci char(64)
-1.0000 mysql user authentication_string text 65535 65535 utf8 utf8_bin text
-3.0000 mysql user password_expired enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user is_role enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user default_role char 80 240 utf8 utf8_bin char(80)
+1.0000 mysql user Password longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user Select_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Insert_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Update_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Delete_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Drop_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Reload_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Shutdown_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Process_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user File_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Grant_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user References_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Index_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Alter_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Show_db_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Super_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_tmp_table_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Lock_tables_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Execute_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Repl_slave_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Repl_client_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_view_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Show_view_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_routine_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Alter_routine_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_user_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Event_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Trigger_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_tablespace_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Delete_history_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user ssl_type varchar 9 9 latin1 latin1_swedish_ci varchar(9)
+1.0000 mysql user ssl_cipher longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user x509_issuer longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user x509_subject longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+NULL mysql user max_questions bigint NULL NULL NULL NULL bigint(20) unsigned
+NULL mysql user max_updates bigint NULL NULL NULL NULL bigint(20) unsigned
+NULL mysql user max_connections bigint NULL NULL NULL NULL bigint(20) unsigned
+NULL mysql user max_user_connections bigint NULL NULL NULL NULL bigint(21)
+1.0000 mysql user plugin longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user authentication_string longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user password_expired varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user is_role varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user default_role longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
NULL mysql user max_statement_time decimal NULL NULL NULL NULL decimal(12,6)
diff --git a/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result
index 9f17724b356..6ad671a54c8 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_mysql_embedded.result
@@ -61,7 +61,7 @@ def mysql event modified 9 '0000-00-00 00:00:00' NO timestamp NULL NULL NULL NUL
def mysql event name 2 '' NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) PRI NEVER NULL
def mysql event on_completion 14 'DROP' NO enum 8 24 NULL NULL NULL utf8 utf8_general_ci enum('DROP','PRESERVE') NEVER NULL
def mysql event originator 17 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned NEVER NULL
-def mysql event sql_mode 15 '' NO set 539 1617 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NEVER NULL
+def mysql event sql_mode 15 '' NO set 561 1683 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NEVER NULL
def mysql event starts 11 NULL YES datetime NULL NULL NULL NULL 0 NULL NULL datetime NEVER NULL
def mysql event status 13 'ENABLED' NO enum 18 54 NULL NULL NULL utf8 utf8_general_ci enum('ENABLED','DISABLED','SLAVESIDE_DISABLED') NEVER NULL
def mysql event time_zone 18 'SYSTEM' NO char 64 64 NULL NULL NULL latin1 latin1_swedish_ci char(64) NEVER NULL
@@ -75,6 +75,9 @@ def mysql general_log event_time 1 current_timestamp(6) NO timestamp NULL NULL N
def mysql general_log server_id 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned NEVER NULL
def mysql general_log thread_id 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(21) unsigned NEVER NULL
def mysql general_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext NEVER NULL
+def mysql global_priv Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) PRI NEVER NULL
+def mysql global_priv Priv 3 '{}' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql global_priv User 2 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) PRI NEVER NULL
def mysql gtid_slave_pos domain_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI NEVER NULL
def mysql gtid_slave_pos seq_no 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned NEVER NULL
def mysql gtid_slave_pos server_id 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned NEVER NULL
@@ -93,26 +96,6 @@ def mysql help_topic help_category_id 3 NULL NO smallint NULL NULL 5 0 NULL NULL
def mysql help_topic help_topic_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI NEVER NULL
def mysql help_topic name 2 NULL NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) UNI NEVER NULL
def mysql help_topic url 6 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_general_ci text NEVER NULL
-def mysql host Alter_priv 12 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Alter_routine_priv 18 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Create_priv 7 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Create_routine_priv 17 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Create_tmp_table_priv 13 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Create_view_priv 15 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Db 2 '' NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI NEVER NULL
-def mysql host Delete_priv 6 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Drop_priv 8 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Execute_priv 19 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Grant_priv 9 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) PRI NEVER NULL
-def mysql host Index_priv 11 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Insert_priv 4 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Lock_tables_priv 14 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host References_priv 10 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Select_priv 3 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Show_view_priv 16 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Trigger_priv 20 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql host Update_priv 5 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
def mysql index_stats avg_frequency 5 NULL YES decimal NULL NULL 12 4 NULL NULL NULL decimal(12,4) NEVER NULL
def mysql index_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI NEVER NULL
def mysql index_stats index_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI NEVER NULL
@@ -139,7 +122,7 @@ def mysql proc returns 10 NULL NO longblob 4294967295 4294967295 NULL NULL NULL
def mysql proc security_type 8 'DEFINER' NO enum 7 21 NULL NULL NULL utf8 utf8_general_ci enum('INVOKER','DEFINER') NEVER NULL
def mysql proc specific_name 4 '' NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) NEVER NULL
def mysql proc sql_data_access 6 'CONTAINS_SQL' NO enum 17 51 NULL NULL NULL utf8 utf8_general_ci enum('CONTAINS_SQL','NO_SQL','READS_SQL_DATA','MODIFIES_SQL_DATA') NEVER NULL
-def mysql proc sql_mode 15 '' NO set 539 1617 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') NEVER NULL
+def mysql proc sql_mode 15 '' NO set 561 1683 NULL NULL NULL utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') NEVER NULL
def mysql proc type 3 NULL NO enum 12 36 NULL NULL NULL utf8 utf8_general_ci enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') PRI NEVER NULL
def mysql procs_priv Db 2 '' NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI NEVER NULL
def mysql procs_priv Grantor 6 '' NO char 141 423 NULL NULL NULL utf8 utf8_bin char(141) MUL NEVER NULL
@@ -207,53 +190,53 @@ def mysql time_zone_transition_type Is_DST 4 0 NO tinyint NULL NULL 3 0 NULL NUL
def mysql time_zone_transition_type Offset 3 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) NEVER NULL
def mysql time_zone_transition_type Time_zone_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI NEVER NULL
def mysql time_zone_transition_type Transition_type_id 2 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI NEVER NULL
-def mysql user Alter_priv 17 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Alter_routine_priv 28 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user authentication_string 43 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text NEVER NULL
-def mysql user Create_priv 8 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Create_routine_priv 27 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Create_tablespace_priv 32 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Create_tmp_table_priv 20 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Create_user_priv 29 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Create_view_priv 25 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user default_role 46 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) NEVER NULL
-def mysql user Delete_history_priv 33 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Delete_priv 7 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Drop_priv 9 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Event_priv 30 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Execute_priv 22 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user File_priv 13 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Grant_priv 14 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) PRI NEVER NULL
-def mysql user Index_priv 16 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Insert_priv 5 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user is_role 45 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Lock_tables_priv 21 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user max_connections 40 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned NEVER NULL
-def mysql user max_questions 38 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned NEVER NULL
+def mysql user Alter_priv 17 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Alter_routine_priv 28 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user authentication_string 43 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql user Create_priv 8 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Create_routine_priv 27 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Create_tablespace_priv 32 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Create_tmp_table_priv 20 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Create_user_priv 29 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Create_view_priv 25 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user default_role 46 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql user Delete_history_priv 33 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Delete_priv 7 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Drop_priv 9 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Event_priv 30 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Execute_priv 22 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user File_priv 13 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Grant_priv 14 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Host 1 '' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) NEVER NULL
+def mysql user Index_priv 16 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Insert_priv 5 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user is_role 45 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Lock_tables_priv 21 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user max_connections 40 0 NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned NEVER NULL
+def mysql user max_questions 38 0 NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned NEVER NULL
def mysql user max_statement_time 47 0.000000 NO decimal NULL NULL 12 6 NULL NULL NULL decimal(12,6) NEVER NULL
-def mysql user max_updates 39 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned NEVER NULL
-def mysql user max_user_connections 41 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) NEVER NULL
-def mysql user Password 3 '' NO char 41 41 NULL NULL NULL latin1 latin1_bin char(41) NEVER NULL
-def mysql user password_expired 44 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user plugin 42 '' NO char 64 64 NULL NULL NULL latin1 latin1_swedish_ci char(64) NEVER NULL
-def mysql user Process_priv 12 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user References_priv 15 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Reload_priv 10 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Repl_client_priv 24 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Repl_slave_priv 23 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Select_priv 4 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Show_db_priv 18 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Show_view_priv 26 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Shutdown_priv 11 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user ssl_cipher 35 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob NEVER NULL
-def mysql user ssl_type 34 '' NO enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('','ANY','X509','SPECIFIED') NEVER NULL
-def mysql user Super_priv 19 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Trigger_priv 31 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user Update_priv 6 'N' NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') NEVER NULL
-def mysql user User 2 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) PRI NEVER NULL
-def mysql user x509_issuer 36 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob NEVER NULL
-def mysql user x509_subject 37 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob NEVER NULL
+def mysql user max_updates 39 0 NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned NEVER NULL
+def mysql user max_user_connections 41 0 NO bigint NULL NULL 19 0 NULL NULL NULL bigint(21) NEVER NULL
+def mysql user Password 3 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql user password_expired 44 '' NO varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user plugin 42 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql user Process_priv 12 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user References_priv 15 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Reload_priv 10 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Repl_client_priv 24 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Repl_slave_priv 23 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Select_priv 4 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Show_db_priv 18 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Show_view_priv 26 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Shutdown_priv 11 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user ssl_cipher 35 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql user ssl_type 34 NULL YES varchar 9 9 NULL NULL NULL latin1 latin1_swedish_ci varchar(9) NEVER NULL
+def mysql user Super_priv 19 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Trigger_priv 31 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user Update_priv 6 NULL YES varchar 1 1 NULL NULL NULL latin1 latin1_swedish_ci varchar(1) NEVER NULL
+def mysql user User 2 '' NO char 80 240 NULL NULL NULL utf8 utf8_bin char(80) NEVER NULL
+def mysql user x509_issuer 36 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
+def mysql user x509_subject 37 '' NO longtext 4294967295 4294967295 NULL NULL NULL utf8mb4 utf8mb4_bin longtext NEVER NULL
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
Warning 1286 Unknown storage engine 'InnoDB'
@@ -274,11 +257,12 @@ COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
1.0000 blob NULL NULL
1.0000 longblob NULL NULL
1.0000 varbinary NULL NULL
-1.0000 char latin1 latin1_bin
1.0000 char latin1 latin1_swedish_ci
+1.0000 varchar latin1 latin1_swedish_ci
1.0000 text utf8 utf8_bin
1.0000 mediumtext utf8 utf8_general_ci
1.0000 text utf8 utf8_general_ci
+1.0000 longtext utf8mb4 utf8mb4_bin
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
Warning 1286 Unknown storage engine 'InnoDB'
@@ -397,7 +381,7 @@ NULL mysql event starts datetime NULL NULL NULL NULL datetime
NULL mysql event ends datetime NULL NULL NULL NULL datetime
3.0000 mysql event status enum 18 54 utf8 utf8_general_ci enum('ENABLED','DISABLED','SLAVESIDE_DISABLED')
3.0000 mysql event on_completion enum 8 24 utf8 utf8_general_ci enum('DROP','PRESERVE')
-3.0000 mysql event sql_mode set 539 1617 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT')
+3.0000 mysql event sql_mode set 561 1683 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL')
3.0000 mysql event comment char 64 192 utf8 utf8_bin char(64)
NULL mysql event originator int NULL NULL NULL NULL int(10) unsigned
1.0000 mysql event time_zone char 64 64 latin1 latin1_swedish_ci char(64)
@@ -415,6 +399,9 @@ NULL mysql general_log thread_id bigint NULL NULL NULL NULL bigint(21) unsigned
NULL mysql general_log server_id int NULL NULL NULL NULL int(10) unsigned
3.0000 mysql general_log command_type varchar 64 192 utf8 utf8_general_ci varchar(64)
1.0000 mysql general_log argument mediumtext 16777215 16777215 utf8 utf8_general_ci mediumtext
+3.0000 mysql global_priv Host char 60 180 utf8 utf8_bin char(60)
+3.0000 mysql global_priv User char 80 240 utf8 utf8_bin char(80)
+1.0000 mysql global_priv Priv longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
NULL mysql gtid_slave_pos domain_id int NULL NULL NULL NULL int(10) unsigned
NULL mysql gtid_slave_pos sub_id bigint NULL NULL NULL NULL bigint(20) unsigned
NULL mysql gtid_slave_pos server_id int NULL NULL NULL NULL int(10) unsigned
@@ -433,26 +420,6 @@ NULL mysql help_topic help_category_id smallint NULL NULL NULL NULL smallint(5)
1.0000 mysql help_topic description text 65535 65535 utf8 utf8_general_ci text
1.0000 mysql help_topic example text 65535 65535 utf8 utf8_general_ci text
1.0000 mysql help_topic url text 65535 65535 utf8 utf8_general_ci text
-3.0000 mysql host Host char 60 180 utf8 utf8_bin char(60)
-3.0000 mysql host Db char 64 192 utf8 utf8_bin char(64)
-3.0000 mysql host Select_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Insert_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Update_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Delete_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Drop_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Grant_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host References_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Index_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Alter_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_tmp_table_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Lock_tables_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Show_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Create_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Alter_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Execute_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql host Trigger_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
3.0000 mysql index_stats db_name varchar 64 192 utf8 utf8_bin varchar(64)
3.0000 mysql index_stats table_name varchar 64 192 utf8 utf8_bin varchar(64)
3.0000 mysql index_stats index_name varchar 64 192 utf8 utf8_bin varchar(64)
@@ -474,7 +441,7 @@ NULL mysql index_stats avg_frequency decimal NULL NULL NULL NULL decimal(12,4)
3.0000 mysql proc definer char 141 423 utf8 utf8_bin char(141)
NULL mysql proc created timestamp NULL NULL NULL NULL timestamp
NULL mysql proc modified timestamp NULL NULL NULL NULL timestamp
-3.0000 mysql proc sql_mode set 539 1617 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT')
+3.0000 mysql proc sql_mode set 561 1683 utf8 utf8_general_ci set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL')
1.0000 mysql proc comment text 65535 65535 utf8 utf8_bin text
3.0000 mysql proc character_set_client char 32 96 utf8 utf8_bin char(32)
3.0000 mysql proc collation_connection char 32 96 utf8 utf8_bin char(32)
@@ -549,50 +516,50 @@ NULL mysql time_zone_transition_type Is_DST tinyint NULL NULL NULL NULL tinyint(
3.0000 mysql time_zone_transition_type Abbreviation char 8 24 utf8 utf8_general_ci char(8)
3.0000 mysql user Host char 60 180 utf8 utf8_bin char(60)
3.0000 mysql user User char 80 240 utf8 utf8_bin char(80)
-1.0000 mysql user Password char 41 41 latin1 latin1_bin char(41)
-3.0000 mysql user Select_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Insert_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Update_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Delete_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Drop_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Reload_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Shutdown_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Process_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user File_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Grant_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user References_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Index_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Alter_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Show_db_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Super_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_tmp_table_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Lock_tables_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Execute_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Repl_slave_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Repl_client_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Show_view_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Alter_routine_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_user_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Event_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Trigger_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Create_tablespace_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user Delete_history_priv enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user ssl_type enum 9 27 utf8 utf8_general_ci enum('','ANY','X509','SPECIFIED')
-1.0000 mysql user ssl_cipher blob 65535 65535 NULL NULL blob
-1.0000 mysql user x509_issuer blob 65535 65535 NULL NULL blob
-1.0000 mysql user x509_subject blob 65535 65535 NULL NULL blob
-NULL mysql user max_questions int NULL NULL NULL NULL int(11) unsigned
-NULL mysql user max_updates int NULL NULL NULL NULL int(11) unsigned
-NULL mysql user max_connections int NULL NULL NULL NULL int(11) unsigned
-NULL mysql user max_user_connections int NULL NULL NULL NULL int(11)
-1.0000 mysql user plugin char 64 64 latin1 latin1_swedish_ci char(64)
-1.0000 mysql user authentication_string text 65535 65535 utf8 utf8_bin text
-3.0000 mysql user password_expired enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user is_role enum 1 3 utf8 utf8_general_ci enum('N','Y')
-3.0000 mysql user default_role char 80 240 utf8 utf8_bin char(80)
+1.0000 mysql user Password longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user Select_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Insert_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Update_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Delete_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Drop_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Reload_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Shutdown_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Process_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user File_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Grant_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user References_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Index_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Alter_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Show_db_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Super_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_tmp_table_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Lock_tables_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Execute_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Repl_slave_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Repl_client_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_view_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Show_view_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_routine_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Alter_routine_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_user_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Event_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Trigger_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Create_tablespace_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user Delete_history_priv varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user ssl_type varchar 9 9 latin1 latin1_swedish_ci varchar(9)
+1.0000 mysql user ssl_cipher longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user x509_issuer longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user x509_subject longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+NULL mysql user max_questions bigint NULL NULL NULL NULL bigint(20) unsigned
+NULL mysql user max_updates bigint NULL NULL NULL NULL bigint(20) unsigned
+NULL mysql user max_connections bigint NULL NULL NULL NULL bigint(20) unsigned
+NULL mysql user max_user_connections bigint NULL NULL NULL NULL bigint(21)
+1.0000 mysql user plugin longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user authentication_string longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
+1.0000 mysql user password_expired varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user is_role varchar 1 1 latin1 latin1_swedish_ci varchar(1)
+1.0000 mysql user default_role longtext 4294967295 4294967295 utf8mb4 utf8mb4_bin longtext
NULL mysql user max_statement_time decimal NULL NULL NULL NULL decimal(12,6)
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
diff --git a/mysql-test/suite/funcs_1/r/is_key_column_usage.result b/mysql-test/suite/funcs_1/r/is_key_column_usage.result
index 5c126a48419..efb67e32cee 100644
--- a/mysql-test/suite/funcs_1/r/is_key_column_usage.result
+++ b/mysql-test/suite/funcs_1/r/is_key_column_usage.result
@@ -90,6 +90,8 @@ def mysql PRIMARY def mysql db User
def mysql PRIMARY def mysql event db
def mysql PRIMARY def mysql event name
def mysql PRIMARY def mysql func name
+def mysql PRIMARY def mysql global_priv Host
+def mysql PRIMARY def mysql global_priv User
def mysql PRIMARY def mysql gtid_slave_pos domain_id
def mysql PRIMARY def mysql gtid_slave_pos sub_id
def mysql PRIMARY def mysql help_category help_category_id
@@ -100,8 +102,6 @@ def mysql PRIMARY def mysql help_relation help_keyword_id
def mysql PRIMARY def mysql help_relation help_topic_id
def mysql PRIMARY def mysql help_topic help_topic_id
def mysql name def mysql help_topic name
-def mysql PRIMARY def mysql host Db
-def mysql PRIMARY def mysql host Host
def mysql PRIMARY def mysql index_stats db_name
def mysql PRIMARY def mysql index_stats index_name
def mysql PRIMARY def mysql index_stats prefix_arity
@@ -144,8 +144,6 @@ def mysql PRIMARY def mysql time_zone_transition_type Time_zone_id
def mysql PRIMARY def mysql time_zone_transition_type Transition_type_id
def mysql commit_id def mysql transaction_registry commit_id
def mysql PRIMARY def mysql transaction_registry transaction_id
-def mysql PRIMARY def mysql user Host
-def mysql PRIMARY def mysql user User
########################################################################################
# Testcase 3.2.7.2 + 3.2.7.3: INFORMATION_SCHEMA.KEY_COLUMN_USAGE accessible information
########################################################################################
diff --git a/mysql-test/suite/funcs_1/r/is_key_column_usage_embedded.result b/mysql-test/suite/funcs_1/r/is_key_column_usage_embedded.result
index d41f7395483..cf67be8f7a0 100644
--- a/mysql-test/suite/funcs_1/r/is_key_column_usage_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_key_column_usage_embedded.result
@@ -90,6 +90,8 @@ def mysql PRIMARY def mysql db User
def mysql PRIMARY def mysql event db
def mysql PRIMARY def mysql event name
def mysql PRIMARY def mysql func name
+def mysql PRIMARY def mysql global_priv Host
+def mysql PRIMARY def mysql global_priv User
def mysql PRIMARY def mysql gtid_slave_pos domain_id
def mysql PRIMARY def mysql gtid_slave_pos sub_id
def mysql PRIMARY def mysql help_category help_category_id
@@ -100,8 +102,6 @@ def mysql PRIMARY def mysql help_relation help_keyword_id
def mysql PRIMARY def mysql help_relation help_topic_id
def mysql PRIMARY def mysql help_topic help_topic_id
def mysql name def mysql help_topic name
-def mysql PRIMARY def mysql host Db
-def mysql PRIMARY def mysql host Host
def mysql PRIMARY def mysql index_stats db_name
def mysql PRIMARY def mysql index_stats index_name
def mysql PRIMARY def mysql index_stats prefix_arity
@@ -144,8 +144,6 @@ def mysql PRIMARY def mysql time_zone_transition_type Time_zone_id
def mysql PRIMARY def mysql time_zone_transition_type Transition_type_id
def mysql commit_id def mysql transaction_registry commit_id
def mysql PRIMARY def mysql transaction_registry transaction_id
-def mysql PRIMARY def mysql user Host
-def mysql PRIMARY def mysql user User
########################################################################################
# Testcase 3.2.7.2 + 3.2.7.3: INFORMATION_SCHEMA.KEY_COLUMN_USAGE accessible information
########################################################################################
diff --git a/mysql-test/suite/funcs_1/r/is_routines_embedded.result b/mysql-test/suite/funcs_1/r/is_routines_embedded.result
index 1739a0c15c8..ec375e9c5f6 100644
--- a/mysql-test/suite/funcs_1/r/is_routines_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_routines_embedded.result
@@ -197,7 +197,7 @@ sp_6_408002_2 def db_datadict_2 sp_6_408002_2 PROCEDURE NULL NULL NULL NULL NUL
SELECT * FROM db_datadict_2.res_6_408002_2;
END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost latin1 latin1_swedish_ci latin1_swedish_ci
add_suppression def mtr add_suppression PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN INSERT INTO test_suppressions (pattern) VALUES (pattern); FLUSH NO_WRITE_TO_BINLOG TABLE test_suppressions; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
-check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert'); SELECT * FROM INFORMATION_SCHEMA.ROUTINES; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.host, mysql.plugin, mysql.proc, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.user; SELECT * FROM INFORMATION_SCHEMA.PLUGINS; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
+check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert'); SELECT * FROM INFORMATION_SCHEMA.ROUTINES; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.proc, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
check_warnings def mtr check_warnings PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN DECLARE `pos` bigint unsigned; SET SQL_LOG_BIN=0, SQL_SAFE_UPDATES=0; UPDATE error_log el, global_suppressions gs SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP gs.pattern; UPDATE error_log el, test_suppressions ts SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP ts.pattern; SELECT COUNT(*) INTO @num_warnings FROM error_log WHERE suspicious=1; IF @num_warnings > 0 THEN SELECT line FROM error_log WHERE suspicious=1; SELECT 2 INTO result; ELSE SELECT 0 INTO RESULT; END IF; TRUNCATE test_suppressions; DROP TABLE error_log; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
connect testuser2, localhost, testuser2, , db_datadict;
SELECT * FROM information_schema.routines;
@@ -209,7 +209,7 @@ sp_6_408002_2 def db_datadict_2 sp_6_408002_2 PROCEDURE NULL NULL NULL NULL NUL
SELECT * FROM db_datadict_2.res_6_408002_2;
END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost latin1 latin1_swedish_ci latin1_swedish_ci
add_suppression def mtr add_suppression PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN INSERT INTO test_suppressions (pattern) VALUES (pattern); FLUSH NO_WRITE_TO_BINLOG TABLE test_suppressions; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
-check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert'); SELECT * FROM INFORMATION_SCHEMA.ROUTINES; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.host, mysql.plugin, mysql.proc, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.user; SELECT * FROM INFORMATION_SCHEMA.PLUGINS; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
+check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert'); SELECT * FROM INFORMATION_SCHEMA.ROUTINES; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.proc, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
check_warnings def mtr check_warnings PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN DECLARE `pos` bigint unsigned; SET SQL_LOG_BIN=0, SQL_SAFE_UPDATES=0; UPDATE error_log el, global_suppressions gs SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP gs.pattern; UPDATE error_log el, test_suppressions ts SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP ts.pattern; SELECT COUNT(*) INTO @num_warnings FROM error_log WHERE suspicious=1; IF @num_warnings > 0 THEN SELECT line FROM error_log WHERE suspicious=1; SELECT 2 INTO result; ELSE SELECT 0 INTO RESULT; END IF; TRUNCATE test_suppressions; DROP TABLE error_log; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
connect testuser3, localhost, testuser3, , test;
SELECT * FROM information_schema.routines;
@@ -221,7 +221,7 @@ sp_6_408002_2 def db_datadict_2 sp_6_408002_2 PROCEDURE NULL NULL NULL NULL NUL
SELECT * FROM db_datadict_2.res_6_408002_2;
END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost latin1 latin1_swedish_ci latin1_swedish_ci
add_suppression def mtr add_suppression PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN INSERT INTO test_suppressions (pattern) VALUES (pattern); FLUSH NO_WRITE_TO_BINLOG TABLE test_suppressions; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
-check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert'); SELECT * FROM INFORMATION_SCHEMA.ROUTINES; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.host, mysql.plugin, mysql.proc, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.user; SELECT * FROM INFORMATION_SCHEMA.PLUGINS; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
+check_testcase def mtr check_testcase PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN SELECT * FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE variable_name NOT IN ('timestamp') AND variable_name not like "Last_IO_Err*" AND variable_name != 'INNODB_IBUF_MAX_SIZE' AND variable_name != 'INNODB_USE_NATIVE_AIO' AND variable_name != 'INNODB_BUFFER_POOL_LOAD_AT_STARTUP' AND variable_name not like 'GTID%POS' AND variable_name != 'GTID_BINLOG_STATE' ORDER BY variable_name; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA ORDER BY BINARY SCHEMA_NAME; SELECT * FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME NOT IN ('mtr_wsrep_notify', 'wsrep_schema') ORDER BY BINARY SCHEMA_NAME; SELECT table_name AS tables_in_test FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='test'; SELECT CONCAT(table_schema, '.', table_name) AS tables_in_mysql FROM INFORMATION_SCHEMA.TABLES WHERE table_schema='mysql' ORDER BY tables_in_mysql; SELECT CONCAT(table_schema, '.', table_name) AS columns_in_mysql, column_name, ordinal_position, column_default, is_nullable, data_type, character_maximum_length, character_octet_length, numeric_precision, numeric_scale, character_set_name, collation_name, column_type, column_key, extra, column_comment FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='mysql' ORDER BY columns_in_mysql; SELECT * FROM INFORMATION_SCHEMA.EVENTS; SELECT * FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME NOT IN ('gs_insert', 'ts_insert'); SELECT * FROM INFORMATION_SCHEMA.ROUTINES; SHOW STATUS LIKE 'slave_open_temp_tables'; checksum table mysql.columns_priv, mysql.db, mysql.func, mysql.help_category, mysql.help_keyword, mysql.help_relation, mysql.plugin, mysql.proc, mysql.procs_priv, mysql.roles_mapping, mysql.tables_priv, mysql.time_zone, mysql.time_zone_leap_second, mysql.time_zone_name, mysql.time_zone_transition, mysql.time_zone_transition_type, mysql.global_priv; SELECT * FROM INFORMATION_SCHEMA.PLUGINS; select * from information_schema.session_variables where variable_name = 'debug_sync'; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
check_warnings def mtr check_warnings PROCEDURE NULL NULL NULL NULL NULL NULL NULL NULL SQL BEGIN DECLARE `pos` bigint unsigned; SET SQL_LOG_BIN=0, SQL_SAFE_UPDATES=0; UPDATE error_log el, global_suppressions gs SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP gs.pattern; UPDATE error_log el, test_suppressions ts SET suspicious=0 WHERE el.suspicious=1 AND el.line REGEXP ts.pattern; SELECT COUNT(*) INTO @num_warnings FROM error_log WHERE suspicious=1; IF @num_warnings > 0 THEN SELECT line FROM error_log WHERE suspicious=1; SELECT 2 INTO result; ELSE SELECT 0 INTO RESULT; END IF; TRUNCATE test_suppressions; DROP TABLE error_log; END NULL NULL SQL NO CONTAINS SQL NULL DEFINER YYYY-MM-DD hh:mm:ss YYYY-MM-DD hh:mm:ss root@localhost utf8 utf8_general_ci latin1_swedish_ci
connection default;
disconnect testuser1;
diff --git a/mysql-test/suite/funcs_1/r/is_statistics.result b/mysql-test/suite/funcs_1/r/is_statistics.result
index 419eb0b4b4c..bacc106a1be 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics.result
@@ -101,6 +101,8 @@ def mysql db mysql User
def mysql event mysql PRIMARY
def mysql event mysql PRIMARY
def mysql func mysql PRIMARY
+def mysql global_priv mysql PRIMARY
+def mysql global_priv mysql PRIMARY
def mysql gtid_slave_pos mysql PRIMARY
def mysql gtid_slave_pos mysql PRIMARY
def mysql help_category mysql name
@@ -111,8 +113,6 @@ def mysql help_relation mysql PRIMARY
def mysql help_relation mysql PRIMARY
def mysql help_topic mysql name
def mysql help_topic mysql PRIMARY
-def mysql host mysql PRIMARY
-def mysql host mysql PRIMARY
def mysql index_stats mysql PRIMARY
def mysql index_stats mysql PRIMARY
def mysql index_stats mysql PRIMARY
@@ -150,8 +150,6 @@ def mysql time_zone_transition mysql PRIMARY
def mysql time_zone_transition mysql PRIMARY
def mysql time_zone_transition_type mysql PRIMARY
def mysql time_zone_transition_type mysql PRIMARY
-def mysql user mysql PRIMARY
-def mysql user mysql PRIMARY
Warnings:
Warning 1286 Unknown storage engine 'InnoDB'
Warning 1286 Unknown storage engine 'InnoDB'
diff --git a/mysql-test/suite/funcs_1/r/is_statistics_mysql.result b/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
index 31fb460f9aa..c01c456edf8 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
@@ -22,6 +22,8 @@ def mysql db 1 mysql User 1 User A #CARD# NULL NULL BTREE
def mysql event 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
def mysql event 0 mysql PRIMARY 2 name A #CARD# NULL NULL BTREE
def mysql func 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
+def mysql global_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
+def mysql global_priv 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
def mysql gtid_slave_pos 0 mysql PRIMARY 1 domain_id A #CARD# NULL NULL BTREE
def mysql gtid_slave_pos 0 mysql PRIMARY 2 sub_id A #CARD# NULL NULL BTREE
def mysql help_category 0 mysql name 1 name A #CARD# NULL NULL BTREE
@@ -32,8 +34,6 @@ def mysql help_relation 0 mysql PRIMARY 1 help_keyword_id A #CARD# NULL NULL BT
def mysql help_relation 0 mysql PRIMARY 2 help_topic_id A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql name 1 name A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql PRIMARY 1 help_topic_id A #CARD# NULL NULL BTREE
-def mysql host 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
-def mysql host 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BTREE
@@ -82,8 +82,6 @@ def mysql transaction_registry 0 mysql commit_id 1 commit_id A #CARD# NULL NULL
def mysql transaction_registry 1 mysql commit_timestamp 1 commit_timestamp A #CARD# NULL NULL BTREE
def mysql transaction_registry 1 mysql commit_timestamp 2 transaction_id A #CARD# NULL NULL BTREE
def mysql transaction_registry 0 mysql PRIMARY 1 transaction_id A #CARD# NULL NULL BTREE
-def mysql user 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
-def mysql user 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
connect testuser1,localhost,testuser1,,db_datadict;
SELECT * FROM information_schema.statistics
WHERE table_schema = 'mysql'
diff --git a/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result
index c81052321f8..b7bd3ec2e23 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics_mysql_embedded.result
@@ -22,6 +22,8 @@ def mysql db 1 mysql User 1 User A #CARD# NULL NULL BTREE
def mysql event 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
def mysql event 0 mysql PRIMARY 2 name A #CARD# NULL NULL BTREE
def mysql func 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
+def mysql global_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
+def mysql global_priv 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
def mysql gtid_slave_pos 0 mysql PRIMARY 1 domain_id A #CARD# NULL NULL BTREE
def mysql gtid_slave_pos 0 mysql PRIMARY 2 sub_id A #CARD# NULL NULL BTREE
def mysql help_category 0 mysql name 1 name A #CARD# NULL NULL BTREE
@@ -32,8 +34,6 @@ def mysql help_relation 0 mysql PRIMARY 1 help_keyword_id A #CARD# NULL NULL BT
def mysql help_relation 0 mysql PRIMARY 2 help_topic_id A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql name 1 name A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql PRIMARY 1 help_topic_id A #CARD# NULL NULL BTREE
-def mysql host 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
-def mysql host 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BTREE
@@ -82,8 +82,6 @@ def mysql transaction_registry 0 mysql commit_id 1 commit_id A #CARD# NULL NULL
def mysql transaction_registry 1 mysql commit_timestamp 1 commit_timestamp A #CARD# NULL NULL BTREE
def mysql transaction_registry 1 mysql commit_timestamp 2 transaction_id A #CARD# NULL NULL BTREE
def mysql transaction_registry 0 mysql PRIMARY 1 transaction_id A #CARD# NULL NULL BTREE
-def mysql user 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
-def mysql user 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
connect testuser1,localhost,testuser1,,db_datadict;
SELECT * FROM information_schema.statistics
WHERE table_schema = 'mysql'
@@ -104,6 +102,8 @@ def mysql db 1 mysql User 1 User A #CARD# NULL NULL BTREE
def mysql event 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
def mysql event 0 mysql PRIMARY 2 name A #CARD# NULL NULL BTREE
def mysql func 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
+def mysql global_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
+def mysql global_priv 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
def mysql gtid_slave_pos 0 mysql PRIMARY 1 domain_id A #CARD# NULL NULL BTREE
def mysql gtid_slave_pos 0 mysql PRIMARY 2 sub_id A #CARD# NULL NULL BTREE
def mysql help_category 0 mysql name 1 name A #CARD# NULL NULL BTREE
@@ -114,8 +114,6 @@ def mysql help_relation 0 mysql PRIMARY 1 help_keyword_id A #CARD# NULL NULL BT
def mysql help_relation 0 mysql PRIMARY 2 help_topic_id A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql name 1 name A #CARD# NULL NULL BTREE
def mysql help_topic 0 mysql PRIMARY 1 help_topic_id A #CARD# NULL NULL BTREE
-def mysql host 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
-def mysql host 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 1 db_name A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
def mysql index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BTREE
@@ -164,8 +162,6 @@ def mysql transaction_registry 0 mysql commit_id 1 commit_id A #CARD# NULL NULL
def mysql transaction_registry 1 mysql commit_timestamp 1 commit_timestamp A #CARD# NULL NULL BTREE
def mysql transaction_registry 1 mysql commit_timestamp 2 transaction_id A #CARD# NULL NULL BTREE
def mysql transaction_registry 0 mysql PRIMARY 1 transaction_id A #CARD# NULL NULL BTREE
-def mysql user 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
-def mysql user 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
connection default;
disconnect testuser1;
DROP USER testuser1@localhost;
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints.result b/mysql-test/suite/funcs_1/r/is_table_constraints.result
index d968d3b65de..bc0f9a724a1 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints.result
@@ -63,6 +63,8 @@ def mysql PRIMARY mysql column_stats
def mysql PRIMARY mysql db
def mysql PRIMARY mysql event
def mysql PRIMARY mysql func
+def mysql PRIMARY mysql global_priv
+def mysql Priv mysql global_priv
def mysql PRIMARY mysql gtid_slave_pos
def mysql name mysql help_category
def mysql PRIMARY mysql help_category
@@ -71,7 +73,6 @@ def mysql PRIMARY mysql help_keyword
def mysql PRIMARY mysql help_relation
def mysql name mysql help_topic
def mysql PRIMARY mysql help_topic
-def mysql PRIMARY mysql host
def mysql PRIMARY mysql index_stats
def mysql PRIMARY mysql innodb_index_stats
def mysql PRIMARY mysql innodb_table_stats
@@ -90,7 +91,6 @@ def mysql PRIMARY mysql time_zone_transition
def mysql PRIMARY mysql time_zone_transition_type
def mysql commit_id mysql transaction_registry
def mysql PRIMARY mysql transaction_registry
-def mysql PRIMARY mysql user
#########################################################################################
# Testcase 3.2.7.2 + 3.2.7.3: INFORMATION_SCHEMA.TABLE_CONSTRAINTS accessible information
#########################################################################################
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
index b56c5115f16..d5da807388b 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
@@ -12,6 +12,8 @@ def mysql PRIMARY mysql column_stats PRIMARY KEY
def mysql PRIMARY mysql db PRIMARY KEY
def mysql PRIMARY mysql event PRIMARY KEY
def mysql PRIMARY mysql func PRIMARY KEY
+def mysql PRIMARY mysql global_priv PRIMARY KEY
+def mysql Priv mysql global_priv CHECK
def mysql PRIMARY mysql gtid_slave_pos PRIMARY KEY
def mysql name mysql help_category UNIQUE
def mysql PRIMARY mysql help_category PRIMARY KEY
@@ -20,7 +22,6 @@ def mysql PRIMARY mysql help_keyword PRIMARY KEY
def mysql PRIMARY mysql help_relation PRIMARY KEY
def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
-def mysql PRIMARY mysql host PRIMARY KEY
def mysql PRIMARY mysql index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_table_stats PRIMARY KEY
@@ -39,7 +40,6 @@ def mysql PRIMARY mysql time_zone_transition PRIMARY KEY
def mysql PRIMARY mysql time_zone_transition_type PRIMARY KEY
def mysql commit_id mysql transaction_registry UNIQUE
def mysql PRIMARY mysql transaction_registry PRIMARY KEY
-def mysql PRIMARY mysql user PRIMARY KEY
connect testuser1,localhost,testuser1,,db_datadict;
SELECT * FROM information_schema.table_constraints
WHERE table_schema = 'mysql'
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result
index b40bc0ea0c7..0426877bc1c 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql_embedded.result
@@ -12,6 +12,8 @@ def mysql PRIMARY mysql column_stats PRIMARY KEY
def mysql PRIMARY mysql db PRIMARY KEY
def mysql PRIMARY mysql event PRIMARY KEY
def mysql PRIMARY mysql func PRIMARY KEY
+def mysql PRIMARY mysql global_priv PRIMARY KEY
+def mysql Priv mysql global_priv CHECK
def mysql PRIMARY mysql gtid_slave_pos PRIMARY KEY
def mysql name mysql help_category UNIQUE
def mysql PRIMARY mysql help_category PRIMARY KEY
@@ -20,7 +22,6 @@ def mysql PRIMARY mysql help_keyword PRIMARY KEY
def mysql PRIMARY mysql help_relation PRIMARY KEY
def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
-def mysql PRIMARY mysql host PRIMARY KEY
def mysql PRIMARY mysql index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_table_stats PRIMARY KEY
@@ -39,7 +40,6 @@ def mysql PRIMARY mysql time_zone_transition PRIMARY KEY
def mysql PRIMARY mysql time_zone_transition_type PRIMARY KEY
def mysql commit_id mysql transaction_registry UNIQUE
def mysql PRIMARY mysql transaction_registry PRIMARY KEY
-def mysql PRIMARY mysql user PRIMARY KEY
connect testuser1,localhost,testuser1,,db_datadict;
SELECT * FROM information_schema.table_constraints
WHERE table_schema = 'mysql'
@@ -50,6 +50,8 @@ def mysql PRIMARY mysql column_stats PRIMARY KEY
def mysql PRIMARY mysql db PRIMARY KEY
def mysql PRIMARY mysql event PRIMARY KEY
def mysql PRIMARY mysql func PRIMARY KEY
+def mysql PRIMARY mysql global_priv PRIMARY KEY
+def mysql Priv mysql global_priv CHECK
def mysql PRIMARY mysql gtid_slave_pos PRIMARY KEY
def mysql name mysql help_category UNIQUE
def mysql PRIMARY mysql help_category PRIMARY KEY
@@ -58,7 +60,6 @@ def mysql PRIMARY mysql help_keyword PRIMARY KEY
def mysql PRIMARY mysql help_relation PRIMARY KEY
def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
-def mysql PRIMARY mysql host PRIMARY KEY
def mysql PRIMARY mysql index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_table_stats PRIMARY KEY
@@ -77,7 +78,6 @@ def mysql PRIMARY mysql time_zone_transition PRIMARY KEY
def mysql PRIMARY mysql time_zone_transition_type PRIMARY KEY
def mysql commit_id mysql transaction_registry UNIQUE
def mysql PRIMARY mysql transaction_registry PRIMARY KEY
-def mysql PRIMARY mysql user PRIMARY KEY
connection default;
disconnect testuser1;
DROP USER testuser1@localhost;
diff --git a/mysql-test/suite/funcs_1/r/is_tables_is.result b/mysql-test/suite/funcs_1/r/is_tables_is.result
index 5fee1e0050a..9af3aa860a0 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_is.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_is.result
@@ -489,6 +489,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_TRACE
+TABLE_TYPE SYSTEM VIEW
+ENGINE MYISAM_OR_MARIA
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME PARAMETERS
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
@@ -1530,6 +1555,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_TRACE
+TABLE_TYPE SYSTEM VIEW
+ENGINE MYISAM_OR_MARIA
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME PARAMETERS
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
diff --git a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
index 5fee1e0050a..9af3aa860a0 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_is_embedded.result
@@ -489,6 +489,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_TRACE
+TABLE_TYPE SYSTEM VIEW
+ENGINE MYISAM_OR_MARIA
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME PARAMETERS
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
@@ -1530,6 +1555,31 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA information_schema
+TABLE_NAME OPTIMIZER_TRACE
+TABLE_TYPE SYSTEM VIEW
+ENGINE MYISAM_OR_MARIA
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_ROWS #TBLR#
+AVG_ROW_LENGTH #ARL#
+DATA_LENGTH #DL#
+MAX_DATA_LENGTH #MDL#
+INDEX_LENGTH #IL#
+DATA_FREE #DF#
+AUTO_INCREMENT NULL
+CREATE_TIME #CRT#
+UPDATE_TIME #UT#
+CHECK_TIME #CT#
+TABLE_COLLATION utf8_general_ci
+CHECKSUM NULL
+CREATE_OPTIONS #CO#
+TABLE_COMMENT #TC#
+MAX_INDEX_LENGTH #MIL#
+TEMPORARY Y
+user_comment
+Separator -----------------------------------------------------
+TABLE_CATALOG def
+TABLE_SCHEMA information_schema
TABLE_NAME PARAMETERS
TABLE_TYPE SYSTEM VIEW
ENGINE MYISAM_OR_MARIA
diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql.result b/mysql-test/suite/funcs_1/r/is_tables_mysql.result
index 2ceed585699..be432e2422a 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_mysql.result
@@ -16,7 +16,7 @@ TABLE_NAME columns_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -66,7 +66,7 @@ TABLE_NAME db
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -116,7 +116,7 @@ TABLE_NAME func
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -162,11 +162,11 @@ user_comment General log
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME gtid_slave_pos
+TABLE_NAME global_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT Fixed
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -177,17 +177,17 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION latin1_swedish_ci
+TABLE_COLLATION utf8_bin
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment Replication slave GTID position
+user_comment Users and global privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_category
+TABLE_NAME gtid_slave_pos
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
@@ -202,21 +202,21 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_general_ci
+TABLE_COLLATION latin1_swedish_ci
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help categories
+user_comment Replication slave GTID position
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_keyword
+TABLE_NAME help_category
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -233,15 +233,15 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help keywords
+user_comment help categories
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_relation
+TABLE_NAME help_keyword
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -258,11 +258,11 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment keyword-topic relation
+user_comment help keywords
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_topic
+TABLE_NAME help_relation
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
@@ -283,15 +283,15 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help topics
+user_comment keyword-topic relation
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME host
+TABLE_NAME help_topic
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -302,13 +302,13 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_bin
+TABLE_COLLATION utf8_general_ci
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment Host privileges; Merged with database privileges
+user_comment help topics
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
@@ -441,7 +441,7 @@ TABLE_NAME procs_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -466,7 +466,7 @@ TABLE_NAME proxies_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -491,7 +491,7 @@ TABLE_NAME roles_mapping
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -516,7 +516,7 @@ TABLE_NAME servers
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -566,7 +566,7 @@ TABLE_NAME tables_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -616,7 +616,7 @@ TABLE_NAME time_zone
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -641,7 +641,7 @@ TABLE_NAME time_zone_leap_second
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -666,7 +666,7 @@ TABLE_NAME time_zone_name
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -691,7 +691,7 @@ TABLE_NAME time_zone_transition
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -716,7 +716,7 @@ TABLE_NAME time_zone_transition_type
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -763,10 +763,10 @@ Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
TABLE_NAME user
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_TYPE VIEW
+ENGINE NULL
+VERSION NULL
+ROW_FORMAT NULL
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -777,13 +777,13 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_bin
+TABLE_COLLATION NULL
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
-TEMPORARY N
-user_comment Users and global privileges
+TEMPORARY NULL
+user_comment VIEW
Separator -----------------------------------------------------
DROP USER testuser1@localhost;
CREATE USER testuser1@localhost;
diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result b/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result
index 77fa6ddae1e..01381a5e746 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_mysql_embedded.result
@@ -16,7 +16,7 @@ TABLE_NAME columns_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -66,7 +66,7 @@ TABLE_NAME db
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -116,7 +116,7 @@ TABLE_NAME func
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -162,11 +162,11 @@ user_comment General log
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME gtid_slave_pos
+TABLE_NAME global_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT Fixed
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -177,17 +177,17 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION latin1_swedish_ci
+TABLE_COLLATION utf8_bin
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment Replication slave GTID position
+user_comment Users and global privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_category
+TABLE_NAME gtid_slave_pos
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
@@ -202,21 +202,21 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_general_ci
+TABLE_COLLATION latin1_swedish_ci
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help categories
+user_comment Replication slave GTID position
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_keyword
+TABLE_NAME help_category
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -233,15 +233,15 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help keywords
+user_comment help categories
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_relation
+TABLE_NAME help_keyword
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -258,11 +258,11 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment keyword-topic relation
+user_comment help keywords
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_topic
+TABLE_NAME help_relation
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
@@ -283,15 +283,15 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help topics
+user_comment keyword-topic relation
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME host
+TABLE_NAME help_topic
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -302,13 +302,13 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_bin
+TABLE_COLLATION utf8_general_ci
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment Host privileges; Merged with database privileges
+user_comment help topics
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
@@ -441,7 +441,7 @@ TABLE_NAME procs_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -466,7 +466,7 @@ TABLE_NAME proxies_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -491,7 +491,7 @@ TABLE_NAME roles_mapping
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -516,7 +516,7 @@ TABLE_NAME servers
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -566,7 +566,7 @@ TABLE_NAME tables_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -616,7 +616,7 @@ TABLE_NAME time_zone
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -641,7 +641,7 @@ TABLE_NAME time_zone_leap_second
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -666,7 +666,7 @@ TABLE_NAME time_zone_name
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -691,7 +691,7 @@ TABLE_NAME time_zone_transition
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -716,7 +716,7 @@ TABLE_NAME time_zone_transition_type
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -763,10 +763,10 @@ Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
TABLE_NAME user
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_TYPE VIEW
+ENGINE NULL
+VERSION NULL
+ROW_FORMAT NULL
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -777,13 +777,13 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_bin
+TABLE_COLLATION NULL
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
-TEMPORARY N
-user_comment Users and global privileges
+TEMPORARY NULL
+user_comment VIEW
Separator -----------------------------------------------------
DROP USER testuser1@localhost;
CREATE USER testuser1@localhost;
@@ -805,7 +805,7 @@ TABLE_NAME columns_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -855,7 +855,7 @@ TABLE_NAME db
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -905,7 +905,7 @@ TABLE_NAME func
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -951,11 +951,11 @@ user_comment General log
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME gtid_slave_pos
+TABLE_NAME global_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT Fixed
+VERSION 11
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -966,17 +966,17 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION latin1_swedish_ci
+TABLE_COLLATION utf8_bin
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment Replication slave GTID position
+user_comment Users and global privileges
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_category
+TABLE_NAME gtid_slave_pos
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
@@ -991,21 +991,21 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_general_ci
+TABLE_COLLATION latin1_swedish_ci
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help categories
+user_comment Replication slave GTID position
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_keyword
+TABLE_NAME help_category
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1022,15 +1022,15 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help keywords
+user_comment help categories
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_relation
+TABLE_NAME help_keyword
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1047,11 +1047,11 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment keyword-topic relation
+user_comment help keywords
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME help_topic
+TABLE_NAME help_relation
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
@@ -1072,15 +1072,15 @@ CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment help topics
+user_comment keyword-topic relation
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME host
+TABLE_NAME help_topic
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1091,13 +1091,13 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_bin
+TABLE_COLLATION utf8_general_ci
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
TEMPORARY N
-user_comment Host privileges; Merged with database privileges
+user_comment help topics
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
@@ -1230,7 +1230,7 @@ TABLE_NAME procs_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1255,7 +1255,7 @@ TABLE_NAME proxies_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1280,7 +1280,7 @@ TABLE_NAME roles_mapping
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1305,7 +1305,7 @@ TABLE_NAME servers
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1355,7 +1355,7 @@ TABLE_NAME tables_priv
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1405,7 +1405,7 @@ TABLE_NAME time_zone
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1430,7 +1430,7 @@ TABLE_NAME time_zone_leap_second
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1455,7 +1455,7 @@ TABLE_NAME time_zone_name
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1480,7 +1480,7 @@ TABLE_NAME time_zone_transition
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1505,7 +1505,7 @@ TABLE_NAME time_zone_transition_type
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
VERSION 10
-ROW_FORMAT Fixed
+ROW_FORMAT DYNAMIC_OR_PAGE
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1552,10 +1552,10 @@ Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
TABLE_NAME user
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
+TABLE_TYPE VIEW
+ENGINE NULL
+VERSION NULL
+ROW_FORMAT NULL
TABLE_ROWS #TBLR#
AVG_ROW_LENGTH #ARL#
DATA_LENGTH #DL#
@@ -1566,13 +1566,13 @@ AUTO_INCREMENT NULL
CREATE_TIME #CRT#
UPDATE_TIME #UT#
CHECK_TIME #CT#
-TABLE_COLLATION utf8_bin
+TABLE_COLLATION NULL
CHECKSUM NULL
CREATE_OPTIONS #CO#
TABLE_COMMENT #TC#
MAX_INDEX_LENGTH #MIL#
-TEMPORARY N
-user_comment Users and global privileges
+TEMPORARY NULL
+user_comment VIEW
Separator -----------------------------------------------------
connection default;
disconnect testuser1;
diff --git a/mysql-test/suite/funcs_1/r/is_user_privileges.result b/mysql-test/suite/funcs_1/r/is_user_privileges.result
index cb619831baa..c405ec19af4 100644
--- a/mysql-test/suite/funcs_1/r/is_user_privileges.result
+++ b/mysql-test/suite/funcs_1/r/is_user_privileges.result
@@ -54,7 +54,6 @@ grantee table_catalog privilege_type
# Testcases 3.2.16.2+3.2.16.3+3.2.16.4: INFORMATION_SCHEMA.USER_PRIVILEGES
# accessible information
##########################################################################
-DROP DATABASE IF EXISTS db_datadict;
CREATE DATABASE db_datadict;
DROP USER 'testuser1'@'localhost';
CREATE USER 'testuser1'@'localhost';
@@ -63,7 +62,7 @@ CREATE USER 'testuser2'@'localhost';
DROP USER 'testuser3'@'localhost';
CREATE USER 'testuser3'@'localhost';
GRANT SELECT ON db_datadict.* TO 'testuser1'@'localhost';
-GRANT SELECT ON mysql.user TO 'testuser1'@'localhost';
+GRANT SELECT ON mysql.global_priv TO 'testuser1'@'localhost';
GRANT INSERT ON *.* TO 'testuser2'@'localhost';
GRANT UPDATE ON *.* TO 'testuser2'@'localhost';
SELECT * FROM information_schema.user_privileges
@@ -85,149 +84,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
#
# Add GRANT OPTION db_datadict.* to testuser1;
GRANT UPDATE ON db_datadict.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
@@ -250,149 +132,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
connect testuser1, localhost, testuser1, , db_datadict;
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -401,154 +166,37 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
GRANT SELECT, UPDATE ON `db_datadict`.* TO 'testuser1'@'localhost' WITH GRANT OPTION
-GRANT SELECT ON `mysql`.`user` TO 'testuser1'@'localhost'
+GRANT SELECT ON `mysql`.`global_priv` TO 'testuser1'@'localhost'
# Now add SELECT on *.* to testuser1;
connection default;
@@ -574,149 +222,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv Y
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 1,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
GRANT SELECT ON *.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
#
# Here <SELECT YES> is shown correctly for testuser1;
@@ -739,149 +270,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv Y
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv Y
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 1025,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
connection testuser1;
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -890,154 +304,37 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE SELECT
IS_GRANTABLE YES
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv Y
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv Y
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 1025,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
SHOW GRANTS;
Grants for testuser1@localhost
GRANT SELECT ON *.* TO 'testuser1'@'localhost' WITH GRANT OPTION
GRANT SELECT, UPDATE ON `db_datadict`.* TO 'testuser1'@'localhost' WITH GRANT OPTION
-GRANT SELECT ON `mysql`.`user` TO 'testuser1'@'localhost'
+GRANT SELECT ON `mysql`.`global_priv` TO 'testuser1'@'localhost'
connect testuser2, localhost, testuser2, , db_datadict;
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -1050,9 +347,9 @@ GRANTEE 'testuser2'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE UPDATE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-ERROR 42000: SELECT command denied to user 'testuser2'@'localhost' for table 'user'
+ERROR 42000: SELECT command denied to user 'testuser2'@'localhost' for table 'global_priv'
SHOW GRANTS;
Grants for testuser2@localhost
GRANT INSERT, UPDATE ON *.* TO 'testuser2'@'localhost'
@@ -1064,9 +361,9 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-ERROR 42000: SELECT command denied to user 'testuser3'@'localhost' for table 'user'
+ERROR 42000: SELECT command denied to user 'testuser3'@'localhost' for table 'global_priv'
SHOW GRANTS;
Grants for testuser3@localhost
GRANT USAGE ON *.* TO 'testuser3'@'localhost'
@@ -1093,149 +390,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
connection testuser1;
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -1244,9 +424,9 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-ERROR 42000: SELECT command denied to user 'testuser1'@'localhost' for table 'user'
+ERROR 42000: SELECT command denied to user 'testuser1'@'localhost' for table 'global_priv'
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
@@ -1259,19 +439,19 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-ERROR 42000: SELECT command denied to user 'testuser1'@'localhost' for table 'user'
+ERROR 42000: SELECT command denied to user 'testuser1'@'localhost' for table 'global_priv'
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
CREATE TABLE db_datadict.tb_66 ( c1 TEXT );
ERROR 42000: CREATE command denied to user 'testuser1'@'localhost' for table 'tb_66'
-# Add ALL on db_datadict.* (and select on mysql.user) to testuser1;
+# Add ALL on db_datadict.* (and select on mysql.global_priv) to testuser1;
connection default;
GRANT ALL ON db_datadict.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
-GRANT SELECT ON mysql.user TO 'testuser1'@'localhost';
+GRANT SELECT ON mysql.global_priv TO 'testuser1'@'localhost';
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
ORDER BY grantee, table_catalog, privilege_type;
@@ -1291,149 +471,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
connection testuser1;
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -1442,154 +505,37 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
GRANT ALL PRIVILEGES ON `db_datadict`.* TO 'testuser1'@'localhost' WITH GRANT OPTION
-GRANT SELECT ON `mysql`.`user` TO 'testuser1'@'localhost'
+GRANT SELECT ON `mysql`.`global_priv` TO 'testuser1'@'localhost'
CREATE TABLE db_datadict.tb_56 ( c1 TEXT );
ERROR 42000: CREATE command denied to user 'testuser1'@'localhost' for table 'tb_56'
USE db_datadict;
@@ -1600,154 +546,37 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
GRANT ALL PRIVILEGES ON `db_datadict`.* TO 'testuser1'@'localhost' WITH GRANT OPTION
-GRANT SELECT ON `mysql`.`user` TO 'testuser1'@'localhost'
+GRANT SELECT ON `mysql`.`global_priv` TO 'testuser1'@'localhost'
CREATE TABLE tb_57 ( c1 TEXT )
ENGINE = <other_engine_type>;
@@ -1773,149 +602,32 @@ GRANTEE 'testuser3'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-Host localhost
-User testuser1
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser2
-Password
-Select_priv N
-Insert_priv Y
-Update_priv Y
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
-Host localhost
-User testuser3
-Password
-Select_priv N
-Insert_priv N
-Update_priv N
-Delete_priv N
-Create_priv N
-Drop_priv N
-Reload_priv N
-Shutdown_priv N
-Process_priv N
-File_priv N
-Grant_priv N
-References_priv N
-Index_priv N
-Alter_priv N
-Show_db_priv N
-Super_priv N
-Create_tmp_table_priv N
-Lock_tables_priv N
-Execute_priv N
-Repl_slave_priv N
-Repl_client_priv N
-Create_view_priv N
-Show_view_priv N
-Create_routine_priv N
-Alter_routine_priv N
-Create_user_priv N
-Event_priv N
-Trigger_priv N
-Create_tablespace_priv N
-Delete_history_priv N
-ssl_type
-ssl_cipher
-x509_issuer
-x509_subject
-max_questions 0
-max_updates 0
-max_connections 0
-max_user_connections 0
-plugin
-authentication_string
-password_expired N
-is_role N
-default_role
-max_statement_time 0.000000
+host localhost
+user testuser1
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser2
+json_detailed(priv) {
+ "access": 6,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
+host localhost
+user testuser3
+json_detailed(priv) {
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "",
+ "password_last_changed": #
+}
connection testuser1;
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -1924,9 +636,9 @@ GRANTEE 'testuser1'@'localhost'
TABLE_CATALOG def
PRIVILEGE_TYPE USAGE
IS_GRANTABLE NO
-SELECT * FROM mysql.user
+SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
-ERROR 42000: SELECT command denied to user 'testuser1'@'localhost' for table 'user'
+ERROR 42000: SELECT command denied to user 'testuser1'@'localhost' for table 'global_priv'
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
diff --git a/mysql-test/suite/funcs_1/r/is_views.result b/mysql-test/suite/funcs_1/r/is_views.result
index 62ec33c8340..e6a2715bbb3 100644
--- a/mysql-test/suite/funcs_1/r/is_views.result
+++ b/mysql-test/suite/funcs_1/r/is_views.result
@@ -70,6 +70,7 @@ ALGORITHM varchar(10) NO
SELECT table_catalog, table_schema, table_name
FROM information_schema.views WHERE table_catalog IS NOT NULL;
table_catalog table_schema table_name
+def mysql user
################################################################################
# Testcase 3.2.13.2 + 3.2.13.3: INFORMATION_SCHEMA.VIEWS accessible information
################################################################################
diff --git a/mysql-test/suite/funcs_1/r/is_views_embedded.result b/mysql-test/suite/funcs_1/r/is_views_embedded.result
index c382370e892..cc6ade7daaf 100644
--- a/mysql-test/suite/funcs_1/r/is_views_embedded.result
+++ b/mysql-test/suite/funcs_1/r/is_views_embedded.result
@@ -70,6 +70,7 @@ ALGORITHM varchar(10) NO
SELECT table_catalog, table_schema, table_name
FROM information_schema.views WHERE table_catalog IS NOT NULL;
table_catalog table_schema table_name
+def mysql user
################################################################################
# Testcase 3.2.13.2 + 3.2.13.3: INFORMATION_SCHEMA.VIEWS accessible information
################################################################################
diff --git a/mysql-test/suite/funcs_1/r/memory_func_view.result b/mysql-test/suite/funcs_1/r/memory_func_view.result
index 85539fbf06f..88bfa702be8 100644
--- a/mysql-test/suite/funcs_1/r/memory_func_view.result
+++ b/mysql-test/suite/funcs_1/r/memory_func_view.result
@@ -3779,14 +3779,14 @@ WHERE select_id = 51 OR select_id IS NULL order by id;
CAST(my_varbinary_1000 AS TIME) my_varbinary_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 23
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_varbinary_1000` as time) AS `CAST(my_varbinary_1000 AS TIME)`,`t1_values`.`my_varbinary_1000` AS `my_varbinary_1000`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3796,14 +3796,14 @@ WHERE select_id = 51 OR select_id IS NULL) order by id;
CAST(my_varbinary_1000 AS TIME) my_varbinary_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 23
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
DROP VIEW v1;
@@ -3814,15 +3814,15 @@ my_binary_30, id FROM t1_values
WHERE select_id = 50 OR select_id IS NULL order by id;
CAST(my_binary_30 AS TIME) my_binary_30 id
NULL NULL 1
-00:00:00 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL 2
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 22
Warnings:
-Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '1 17:58\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SHOW CREATE VIEW v1;
@@ -3833,15 +3833,15 @@ WHERE v1.id IN (SELECT id FROM t1_values
WHERE select_id = 50 OR select_id IS NULL) order by id;
CAST(my_binary_30 AS TIME) my_binary_30 id
NULL NULL 1
-00:00:00
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$--
+NULL
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$--
-00:00:01 -1
41:58:00 1 17:58
Warnings:
-Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '1 17:58\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
DROP VIEW v1;
@@ -3855,14 +3855,14 @@ WHERE select_id = 49 OR select_id IS NULL order by id;
CAST(my_varchar_1000 AS TIME) my_varchar_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 21
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_varchar_1000` as time) AS `CAST(my_varchar_1000 AS TIME)`,`t1_values`.`my_varchar_1000` AS `my_varchar_1000`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3872,14 +3872,14 @@ WHERE select_id = 49 OR select_id IS NULL) order by id;
CAST(my_varchar_1000 AS TIME) my_varchar_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 21
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
DROP VIEW v1;
@@ -3891,14 +3891,14 @@ WHERE select_id = 48 OR select_id IS NULL order by id;
CAST(my_char_30 AS TIME) my_char_30 id
NULL NULL 1
NULL 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 20
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_char_30` as time) AS `CAST(my_char_30 AS TIME)`,`t1_values`.`my_char_30` AS `my_char_30`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3908,14 +3908,14 @@ WHERE select_id = 48 OR select_id IS NULL) order by id;
CAST(my_char_30 AS TIME) my_char_30 id
NULL NULL 1
NULL 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 20
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
DROP VIEW v1;
diff --git a/mysql-test/suite/funcs_1/r/myisam_func_view.result b/mysql-test/suite/funcs_1/r/myisam_func_view.result
index 85539fbf06f..88bfa702be8 100644
--- a/mysql-test/suite/funcs_1/r/myisam_func_view.result
+++ b/mysql-test/suite/funcs_1/r/myisam_func_view.result
@@ -3779,14 +3779,14 @@ WHERE select_id = 51 OR select_id IS NULL order by id;
CAST(my_varbinary_1000 AS TIME) my_varbinary_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 23
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_varbinary_1000` as time) AS `CAST(my_varbinary_1000 AS TIME)`,`t1_values`.`my_varbinary_1000` AS `my_varbinary_1000`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3796,14 +3796,14 @@ WHERE select_id = 51 OR select_id IS NULL) order by id;
CAST(my_varbinary_1000 AS TIME) my_varbinary_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 23
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
DROP VIEW v1;
@@ -3814,15 +3814,15 @@ my_binary_30, id FROM t1_values
WHERE select_id = 50 OR select_id IS NULL order by id;
CAST(my_binary_30 AS TIME) my_binary_30 id
NULL NULL 1
-00:00:00 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL 2
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 22
Warnings:
-Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '1 17:58\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
SHOW CREATE VIEW v1;
@@ -3833,15 +3833,15 @@ WHERE v1.id IN (SELECT id FROM t1_values
WHERE select_id = 50 OR select_id IS NULL) order by id;
CAST(my_binary_30 AS TIME) my_binary_30 id
NULL NULL 1
-00:00:00
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$--
+NULL
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$--
-00:00:01 -1
41:58:00 1 17:58
Warnings:
-Warning 1292 Truncated incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '-1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
Warning 1292 Truncated incorrect time value: '1 17:58\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
DROP VIEW v1;
@@ -3855,14 +3855,14 @@ WHERE select_id = 49 OR select_id IS NULL order by id;
CAST(my_varchar_1000 AS TIME) my_varchar_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 21
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_varchar_1000` as time) AS `CAST(my_varchar_1000 AS TIME)`,`t1_values`.`my_varchar_1000` AS `my_varchar_1000`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3872,14 +3872,14 @@ WHERE select_id = 49 OR select_id IS NULL) order by id;
CAST(my_varchar_1000 AS TIME) my_varchar_1000 id
NULL NULL 1
NULL 2
-00:00:00 <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <---------1000 characters--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 21
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<---------1000 characters-------------------------------------------------------------------------------------------------------'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$-- '
DROP VIEW v1;
@@ -3891,14 +3891,14 @@ WHERE select_id = 48 OR select_id IS NULL order by id;
CAST(my_char_30 AS TIME) my_char_30 id
NULL NULL 1
NULL 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 20
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
SHOW CREATE VIEW v1;
View Create View character_set_client collation_connection
v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select cast(`t1_values`.`my_char_30` as time) AS `CAST(my_char_30 AS TIME)`,`t1_values`.`my_char_30` AS `my_char_30`,`t1_values`.`id` AS `id` from `t1_values` latin1 latin1_swedish_ci
@@ -3908,14 +3908,14 @@ WHERE select_id = 48 OR select_id IS NULL) order by id;
CAST(my_char_30 AS TIME) my_char_30 id
NULL NULL 1
NULL 2
-00:00:00 <--------30 characters-------> 3
--00:00:00 ---äÖüß@µ*$-- 4
+NULL <--------30 characters-------> 3
+NULL ---äÖüß@µ*$-- 4
-00:00:01 -1 5
41:58:00 1 17:58 20
Warnings:
-Warning 1292 Truncated incorrect time value: ''
-Warning 1292 Truncated incorrect time value: '<--------30 characters------->'
-Warning 1292 Truncated incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
+Warning 1292 Incorrect time value: ''
+Warning 1292 Incorrect time value: '<--------30 characters------->'
+Warning 1292 Incorrect time value: ' ---\xC3\xA4\xC3\x96\xC3\xBC\xC3\x9F@\xC2\xB5*$--'
DROP VIEW v1;
diff --git a/mysql-test/suite/funcs_1/r/storedproc.result b/mysql-test/suite/funcs_1/r/storedproc.result
index 9899456f7a8..81ed8405b7d 100644
--- a/mysql-test/suite/funcs_1/r/storedproc.result
+++ b/mysql-test/suite/funcs_1/r/storedproc.result
@@ -15723,6 +15723,7 @@ Testcase 4.3.7:
DROP PROCEDURE IF EXISTS sp7;
CREATE PROCEDURE sp7()
BEGIN
+DECLARE count INT DEFAULT 100;
label1: loop
set @dummystring = 'temp value';
if count > 10 then leave label1;
@@ -15732,7 +15733,7 @@ END label1 loop;
END//
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'iterate;
END label1 loop;
-END' at line 7
+END' at line 8
DROP PROCEDURE IF EXISTS sp7;
CREATE PROCEDURE sp7()
BEGIN
diff --git a/mysql-test/suite/funcs_1/t/is_user_privileges.test b/mysql-test/suite/funcs_1/t/is_user_privileges.test
index a61e9187d15..4293c4e16fb 100644
--- a/mysql-test/suite/funcs_1/t/is_user_privileges.test
+++ b/mysql-test/suite/funcs_1/t/is_user_privileges.test
@@ -77,9 +77,6 @@ WHERE table_catalog IS NULL OR table_catalog <> 'def';
# 3.2.16.4: Ensure that the table does not show any information on any
# privileges that are not user privileges for the current user.
#
---disable_warnings
-DROP DATABASE IF EXISTS db_datadict;
---enable_warnings
CREATE DATABASE db_datadict;
--error 0,ER_CANNOT_USER
@@ -93,7 +90,7 @@ DROP USER 'testuser3'@'localhost';
CREATE USER 'testuser3'@'localhost';
GRANT SELECT ON db_datadict.* TO 'testuser1'@'localhost';
-GRANT SELECT ON mysql.user TO 'testuser1'@'localhost';
+GRANT SELECT ON mysql.global_priv TO 'testuser1'@'localhost';
GRANT INSERT ON *.* TO 'testuser2'@'localhost';
GRANT UPDATE ON *.* TO 'testuser2'@'localhost';
@@ -101,11 +98,12 @@ GRANT UPDATE ON *.* TO 'testuser2'@'localhost';
let $my_select1= SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
ORDER BY grantee, table_catalog, privilege_type;
-let $my_select2= SELECT * FROM mysql.user
+let $my_select2= SELECT host,user,json_detailed(priv) FROM mysql.global_priv
WHERE user LIKE 'testuser%' ORDER BY host, user;
let $my_show= SHOW GRANTS;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
@@ -114,6 +112,7 @@ eval $my_select2;
GRANT UPDATE ON db_datadict.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
@@ -121,6 +120,7 @@ eval $my_select2;
connect (testuser1, localhost, testuser1, , db_datadict);
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
eval $my_show;
@@ -134,6 +134,7 @@ GRANT SELECT ON *.* TO 'testuser1'@'localhost';
--echo # Here <SELECT NO> is shown correctly for testuser1;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
@@ -142,6 +143,7 @@ GRANT SELECT ON *.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
--echo # Here <SELECT YES> is shown correctly for testuser1;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
@@ -149,6 +151,7 @@ eval $my_select2;
connection testuser1;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
eval $my_show;
@@ -177,6 +180,7 @@ connection default;
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'testuser1'@'localhost';
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
@@ -203,18 +207,20 @@ eval $my_show;
CREATE TABLE db_datadict.tb_66 ( c1 TEXT );
--echo
---echo # Add ALL on db_datadict.* (and select on mysql.user) to testuser1;
+--echo # Add ALL on db_datadict.* (and select on mysql.global_priv) to testuser1;
connection default;
GRANT ALL ON db_datadict.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
-GRANT SELECT ON mysql.user TO 'testuser1'@'localhost';
+GRANT SELECT ON mysql.global_priv TO 'testuser1'@'localhost';
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
connection testuser1;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
eval $my_show;
@@ -227,6 +233,7 @@ CREATE TABLE db_datadict.tb_56 ( c1 TEXT );
USE db_datadict;
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
eval $my_show;
@@ -241,6 +248,7 @@ connection default;
REVOKE ALL PRIVILEGES, GRANT OPTION FROM 'testuser1'@'localhost';
--vertical_results
eval $my_select1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
eval $my_select2;
--horizontal_results
diff --git a/mysql-test/suite/funcs_1/t/row_count_func.test b/mysql-test/suite/funcs_1/t/row_count_func.test
index 1694928b26c..3a76a6cac7c 100644
--- a/mysql-test/suite/funcs_1/t/row_count_func.test
+++ b/mysql-test/suite/funcs_1/t/row_count_func.test
@@ -14,6 +14,7 @@ INSERT INTO t1 VALUES (1), (2), (3);
--echo
--echo # -- Check 1.
+--disable_warnings
--enable_info
--echo SELECT * FROM t1 INTO OUTFILE "MYSQL_TMP_DIR/bug21818.txt";
--disable_query_log # to avoid $MYSQL_TMP_DIR in query log
@@ -34,6 +35,7 @@ SELECT a FROM t1 LIMIT 1 INTO @a;
--echo
SELECT ROW_COUNT();
+--enable_warnings
--echo
--echo # -- Check 3.
diff --git a/mysql-test/suite/funcs_1/t/storedproc.test b/mysql-test/suite/funcs_1/t/storedproc.test
index 98385d42b22..31786410a7b 100644
--- a/mysql-test/suite/funcs_1/t/storedproc.test
+++ b/mysql-test/suite/funcs_1/t/storedproc.test
@@ -18861,6 +18861,7 @@ delimiter //;
--error ER_PARSE_ERROR
CREATE PROCEDURE sp7()
BEGIN
+ DECLARE count INT DEFAULT 100;
label1: loop
set @dummystring = 'temp value';
if count > 10 then leave label1;
diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def
index d744e3cb811..d40b409cb33 100644
--- a/mysql-test/suite/galera/disabled.def
+++ b/mysql-test/suite/galera/disabled.def
@@ -11,19 +11,26 @@
##############################################################################
GAL-419 : MDEV-13549 Galera test failures
+GCF-1081 : MDEV-18283 Galera test failure on galera.GCF-1081
MW-328A : MDEV-17847 Galera test failure on MW-328[A|B|C]
MW-328B : MDEV-17847 Galera test failure on MW-328[A|B|C]
MW-328C : MDEV-17847 Galera test failure on MW-328[A|B|C]
MW-329 : wsrep_local_replays not stable
-MW-336 : MDEV-13549 Timeout in wait_condition.inc for PROCESSLIST
+MW-336 : MDEV-13549 incorrect wait_condition for wsrep_slave_threads changes
+MW-360 : needs rewrite to be MariaDB gtid compatible
MW-416 : MDEV-13549 Galera test failures
MW-44 : MDEV-15809 Test failure on galera.MW-44
galera_account_management : MariaDB 10.0 does not support ALTER USER
galera_as_master_gtid : Requires MySQL GTID
galera_as_master_gtid_change_master : Requires MySQL GTID
+galera_as_slave_gtid_replicate_do_db_cc : Requires MySQL GTID
galera_as_slave_preordered : wsrep-preordered feature not merged to MariaDB
galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event()
+galera_autoinc_sst_mariabackup : MDEV-18177 Galera test failure on galera_autoinc_sst_mariabackup
+galera_bf_abort_group_commit : MDEV-18282 Galera test failure on galera.galera_bf_abort_group_commit
galera_binlog_rows_query_log_events: MariaDB does not support binlog_rows_query_log_events
+galera_concurrent_ctas : MDEV-18180 Galera test failure on galera.galera_concurrent_ctas
+galera_encrypt_tmp_files : Get error failed to enable encryption of temporary files
galera_flush : MariaDB does not have global.thread_statistics
galera_gcache_recover_manytrx : MDEV-18834 Galera test failure
galera_gcs_fc_limit : MDEV-17061 Timeout in wait_condition.inc for PROCESSLIST
@@ -31,13 +38,20 @@ galera_ist_mariabackup : MDEV-18829 test leaves port open
galera_ist_progress: MDEV-15236 fails when trying to read transfer status
galera_kill_applier : race condition at the start of the test
galera_kill_ddl : MDEV-17108 Test failure on galera.galera_kill_ddl
+galera_kill_largechanges : MDEV-18179 Galera test failure on galera.galera_kill_largechanges
+galera_kill_nochanges : MDEV-18280 Galera test failure on galera_split_brain and galera_kill_nochanges
+galera_many_tables_nopk : MDEV-18182 Galera test failure on galera.galera_many_tables_nopk
galera_migrate : MariaDB does not support START SLAVE USER
galera_pc_ignore_sb : MDEV-15811/MDEV-17357 Test failure
+galera_split_brain : MDEV-18280 Galera test failure on galera_split_brain and galera_kill_nochanges
galera_ssl_upgrade : MDEV-13549 Galera test failures
galera_sst_mysqldump_with_key : MDEV-16890 Galera test failure
galera_var_node_address : MDEV-17151 Galera test failure
galera_var_notify_cmd : MDEV-13549 Galera test failures
+galera_var_reject_queries : assertion in inline_mysql_socket_send
+galera_var_retry_autocommit: MDEV-18181 Galera test failure on galera.galera_var_retry_autocommit
galera_wan : MDEV-17259: Test failure on galera.galera_wan
+mysql-wsrep#198 : MDEV-18935 Galera test mysql-wsrep#198 sporaric assertion transaction.cpp:362: int wsrep::transaction::before_commit(): Assertion `state() == s_executing || state() == s_committing || state() == s_must_abort || state() == s_replaying' failed.
partition : MDEV-13549 regularly showing auto_increment mismatch
pxc-421: Lock timeout exceeded
query_cache : MDEV-18137: Galera test failure on query_cache
diff --git a/mysql-test/suite/galera/galera_2nodes.cnf b/mysql-test/suite/galera/galera_2nodes.cnf
index b24f3603894..ef8a17a77be 100644
--- a/mysql-test/suite/galera/galera_2nodes.cnf
+++ b/mysql-test/suite/galera/galera_2nodes.cnf
@@ -2,7 +2,7 @@
!include include/default_mysqld.cnf
[mysqld]
-wsrep-on=1
+loose-innodb
binlog-format=row
innodb-autoinc-lock-mode=2
default-storage-engine=innodb
@@ -10,20 +10,35 @@ wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
# enforce read-committed characteristics across the cluster
wsrep-sync-wait=15
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.1]
+loose-innodb
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep-cluster-address=gcomm://
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;gcache.size=10M'
wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
+# enforce read-committed characteristics across the cluster
+wsrep_causal_reads=ON
+wsrep_sync_wait = 15
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
+
[mysqld.2]
+loose-innodb
+# debug=d:t:i:o,/tmp/mysqld.2.trace
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S'
@@ -34,8 +49,14 @@ wsrep_sync_wait = 15
wsrep_node_address=127.0.0.1
wsrep_sst_receive_address=127.0.0.2:@mysqld.2.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
-wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
+wsrep_sst_receive_address='127.0.0.2:@mysqld.2.#sst_port'
+# enforce read-committed characteristics across the cluster
+wsrep_causal_reads=ON
+wsrep_sync_wait = 15
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[ENV]
NODE_MYPORT_1= @mysqld.1.port
@@ -43,4 +64,3 @@ NODE_MYSOCK_1= @mysqld.1.socket
NODE_MYPORT_2= @mysqld.2.port
NODE_MYSOCK_2= @mysqld.2.socket
-
diff --git a/mysql-test/suite/galera/galera_2nodes_as_master.cnf b/mysql-test/suite/galera/galera_2nodes_as_master.cnf
index 33bfc475721..4403416b033 100644
--- a/mysql-test/suite/galera/galera_2nodes_as_master.cnf
+++ b/mysql-test/suite/galera/galera_2nodes_as_master.cnf
@@ -11,6 +11,9 @@ log-bin=mysqld-bin
binlog-format=row
innodb-autoinc-lock-mode=2
default-storage-engine=innodb
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.1]
#galera_port=@OPT.port
@@ -30,6 +33,9 @@ wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.2]
#galera_port=@OPT.port
@@ -49,9 +55,15 @@ wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.3]
server-id=3
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[ENV]
NODE_MYPORT_1= @mysqld.1.port
@@ -62,9 +74,3 @@ NODE_MYSOCK_2= @mysqld.2.socket
NODE_MYPORT_3= @mysqld.3.port
NODE_MYSOCK_3= @mysqld.3.socket
-
-NODE_GALERAPORT_1= @mysqld.1.#galera_port
-NODE_GALERAPORT_2= @mysqld.2.#galera_port
-
-NODE_SSTPORT_1= @mysqld.1.#sst_port
-NODE_SSTPORT_2= @mysqld.2.#sst_port
diff --git a/mysql-test/suite/galera/galera_2nodes_as_slave.cnf b/mysql-test/suite/galera/galera_2nodes_as_slave.cnf
index 4d9e39d2aae..d1fa7bfbfca 100644
--- a/mysql-test/suite/galera/galera_2nodes_as_slave.cnf
+++ b/mysql-test/suite/galera/galera_2nodes_as_slave.cnf
@@ -9,17 +9,13 @@
binlog-format=row
[mysqld.1]
-log-bin
-server-id=1
-
-[mysqld.2]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
-
wsrep-on=1
-log-bin
+log-bin=master-bin
+log-bin-index=master-bin
log-slave-updates
innodb-autoinc-lock-mode=2
@@ -27,38 +23,53 @@ default-storage-engine=innodb
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
wsrep-cluster-address=gcomm://
-wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=10M'
-wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
-wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M'
+wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
+wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
-server-id=2
+server-id=1
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
-[mysqld.3]
+[mysqld.2]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
-
wsrep-on=1
-log-bin
+log-bin=master-bin
+log-bin-index=master-bin
log-slave-updates
innodb-autoinc-lock-mode=2
default-storage-engine=innodb
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
-wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.2.#galera_port'
-wsrep_provider_options='base_port=@mysqld.3.#galera_port;gcache.size=10M'
-wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port
-wsrep_sst_receive_address='127.0.0.1:@mysqld.3.#sst_port'
+wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=10M'
+wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
+wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
+server-id=2
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
+
+[mysqld.3]
+log-bin=master-bin
+log-bin-index=master-bin
server-id=3
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
+
[ENV]
NODE_MYPORT_1= @mysqld.1.port
@@ -69,9 +80,3 @@ NODE_MYSOCK_2= @mysqld.2.socket
NODE_MYPORT_3= @mysqld.3.port
NODE_MYSOCK_3= @mysqld.3.socket
-
-NODE_GALERAPORT_2= @mysqld.2.#galera_port
-NODE_GALERAPORT_3= @mysqld.3.#galera_port
-
-NODE_SSTPORT_2= @mysqld.2.#sst_port
-NODE_SSTPORT_3= @mysqld.3.#sst_port
diff --git a/mysql-test/suite/galera/galera_3nodes_as_slave.cnf b/mysql-test/suite/galera/galera_3nodes_as_slave.cnf
index ac1ca34e242..c84c4b25d2a 100644
--- a/mysql-test/suite/galera/galera_3nodes_as_slave.cnf
+++ b/mysql-test/suite/galera/galera_3nodes_as_slave.cnf
@@ -8,14 +8,15 @@
[mysqld]
log-bin
binlog-format=row
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.1]
-server-id=1
-
-[mysqld.2]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
log-slave-updates
@@ -24,19 +25,23 @@ default-storage-engine=innodb
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
wsrep-cluster-address=gcomm://
-wsrep_provider_options='base_port=@mysqld.2.#galera_port;evs.install_timeout = PT15S;evs.max_install_timeouts=1;gcache.size=10M'
-wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
-wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;evs.install_timeout = PT15S;evs.max_install_timeouts=1;gcache.size=10M'
+wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
+wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
-server-id=2
+server-id=1
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
-[mysqld.3]
+[mysqld.2]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
log-slave-updates
@@ -44,20 +49,24 @@ innodb-autoinc-lock-mode=2
default-storage-engine=innodb
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
-wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.2.#galera_port'
-wsrep_provider_options='base_port=@mysqld.3.#galera_port;evs.install_timeout=PT15S;evs.max_install_timeouts=1;gcache.size=10M'
-wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port
-wsrep_sst_receive_address='127.0.0.1:@mysqld.3.#sst_port'
+wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;evs.install_timeout=PT15S;evs.max_install_timeouts=1;gcache.size=10M'
+wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
+wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
-server-id=3
+server-id=2
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
-[mysqld.4]
+[mysqld.3]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
log-slave-updates
@@ -65,15 +74,24 @@ innodb-autoinc-lock-mode=2
default-storage-engine=innodb
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
-wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.2.#galera_port'
-wsrep_provider_options='base_port=@mysqld.4.#galera_port;evs.install_timeout=PT15S;evs.max_install_timeouts=1;gcache.size=10M'
-wsrep_node_incoming_address=127.0.0.1:@mysqld.4.port
-wsrep_sst_receive_address='127.0.0.1:@mysqld.4.#sst_port'
+wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
+wsrep_provider_options='base_port=@mysqld.3.#galera_port;evs.install_timeout=PT15S;evs.max_install_timeouts=1;gcache.size=10M'
+wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port
+wsrep_sst_receive_address='127.0.0.1:@mysqld.3.#sst_port'
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
+server-id=3
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
+
+[mysqld.4]
server-id=4
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[ENV]
NODE_MYPORT_1= @mysqld.1.port
@@ -87,11 +105,3 @@ NODE_MYSOCK_3= @mysqld.3.socket
NODE_MYPORT_4= @mysqld.4.port
NODE_MYSOCK_4= @mysqld.4.socket
-
-NODE_GALERAPORT_2= @mysqld.2.#galera_port
-NODE_GALERAPORT_3= @mysqld.3.#galera_port
-NODE_GALERAPORT_4= @mysqld.4.#galera_port
-
-NODE_SSTPORT_2= @mysqld.2.#sst_port
-NODE_SSTPORT_3= @mysqld.3.#sst_port
-NODE_SSTPORT_4= @mysqld.4.#sst_port
diff --git a/mysql-test/suite/galera/galera_4nodes.cnf b/mysql-test/suite/galera/galera_4nodes.cnf
index 1c195afd54b..7f59f75b2dc 100644
--- a/mysql-test/suite/galera/galera_4nodes.cnf
+++ b/mysql-test/suite/galera/galera_4nodes.cnf
@@ -5,50 +5,68 @@
binlog-format=row
innodb-autoinc-lock-mode=2
default-storage-engine=innodb
-wsrep-on=1
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
# enforce read-committed characteristics across the cluster
wsrep-causal-reads=ON
wsrep-sync-wait=15
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.1]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep-cluster-address=gcomm://
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M'
wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.2]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=10M'
wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.3]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='base_port=@mysqld.3.#galera_port;gcache.size=10M'
wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.3.#sst_port'
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[mysqld.4]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='base_port=@mysqld.4.#galera_port;gcache.size=10M'
wsrep_node_incoming_address=127.0.0.1:@mysqld.4.port
wsrep_sst_receive_address='127.0.0.1:@mysqld.4.#sst_port'
+# lock schedule alg appears to be VATS by default, and it is not
+# yet compatible with galera
+innodb_lock_schedule_algorithm=FCFS
[ENV]
NODE_MYPORT_1= @mysqld.1.port
@@ -62,13 +80,3 @@ NODE_MYSOCK_3= @mysqld.3.socket
NODE_MYPORT_4= @mysqld.4.port
NODE_MYSOCK_4= @mysqld.4.socket
-
-NODE_GALERAPORT_1= @mysqld.1.#galera_port
-NODE_GALERAPORT_2= @mysqld.2.#galera_port
-NODE_GALERAPORT_3= @mysqld.3.#galera_port
-NODE_GALERAPORT_4= @mysqld.4.#galera_port
-
-NODE_SSTPORT_1= @mysqld.1.#sst_port
-NODE_SSTPORT_2= @mysqld.2.#sst_port
-NODE_SSTPORT_3= @mysqld.3.#sst_port
-NODE_SSTPORT_4= @mysqld.4.#sst_port
diff --git a/mysql-test/suite/galera/include/galera_base_port.inc b/mysql-test/suite/galera/include/galera_base_port.inc
new file mode 100644
index 00000000000..caf986ee950
--- /dev/null
+++ b/mysql-test/suite/galera/include/galera_base_port.inc
@@ -0,0 +1,8 @@
+#
+# Extract base_port from galera node.
+#
+
+# Convert "... base_port = N; ..." to "N; ..."
+--let $s1 = `SELECT SUBSTR(@@wsrep_provider_options, LOCATE('base_port =', @@wsrep_provider_options) + LENGTH('base_port = '))`
+# Convert "N; ..." to "N"
+--let $_NODE_GALERAPORT = `SELECT SUBSTR('$s1', 1, LOCATE(';', '$s1') - 1)`
diff --git a/mysql-test/suite/galera/include/galera_concurrent_test.inc b/mysql-test/suite/galera/include/galera_concurrent_test.inc
new file mode 100644
index 00000000000..3d1bc7674a1
--- /dev/null
+++ b/mysql-test/suite/galera/include/galera_concurrent_test.inc
@@ -0,0 +1,90 @@
+#
+# Perform a quick concurrent test on two nodes using a set of predefined statements.
+#
+# Such tests are not deterministic, so we are hoping to catch assertions, slave apply errors
+# and cases where the two nodes diverge
+#
+# Parameters:
+# - $wsrep_trx_fragment_size
+# - $count
+# - $query_node_1
+# - $query_node_1a (optional)
+# - $query_node_2
+#
+
+if (!$count) {
+ --let $count = 50;
+}
+
+if (!$node_1a_connected) {
+ --connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+ --let $node_1a_connected = 1
+}
+
+--echo Running a concurrent test with the following queries:
+--echo $query_node_1
+--echo $query_node_1a
+--echo $query_node_2
+
+--connection node_1
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+
+SET SESSION wsrep_sync_wait = 0;
+--eval SET SESSION wsrep_trx_fragment_size = $wsrep_trx_fragment_size;
+
+--connection node_1a
+SET SESSION wsrep_sync_wait = 0;
+--eval SET SESSION wsrep_trx_fragment_size = $wsrep_trx_fragment_size;
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+--eval SET SESSION wsrep_trx_fragment_size = $wsrep_trx_fragment_size;
+
+--disable_query_log
+--let $i = `SELECT $count`
+while ($i)
+{
+ --connection node_1
+ --send_eval $query_node_1
+
+ --connection node_1a
+ if ($query_node_1a) {
+ --send_eval $query_node_1a
+ }
+
+ --connection node_2
+ --send_eval $query_node_2
+
+ --connection node_1
+ --error 0,ER_QUERY_INTERRUPTED,ER_LOCK_DEADLOCK,ER_DUP_ENTRY
+ --reap
+
+ --connection node_1a
+ if ($query_node_1a) {
+ --error 0,ER_QUERY_INTERRUPTED,ER_LOCK_DEADLOCK,ER_DUP_ENTRY
+ --reap
+ }
+
+ --connection node_2
+ --error 0,ER_QUERY_INTERRUPTED,ER_LOCK_DEADLOCK,ER_DUP_ENTRY
+ --reap
+ --dec $i
+}
+
+SET SESSION wsrep_sync_wait = 15;
+--enable_query_log
+
+--let $diff_servers = 1 2
+--source include/diff_servers.inc
+
+DROP TABLE t1;
+
+--let $query_node_1 = ""
+--let $query_node_1a = ""
+--let $query_node_2 = ""
+
+--echo Concurrent test end
diff --git a/mysql-test/suite/galera/include/galera_dump_sr_table.inc b/mysql-test/suite/galera/include/galera_dump_sr_table.inc
new file mode 100644
index 00000000000..1e6ff5548d2
--- /dev/null
+++ b/mysql-test/suite/galera/include/galera_dump_sr_table.inc
@@ -0,0 +1,28 @@
+#
+# Dump the contents of the SR table using mysqldump
+#
+
+--let $sr_min = `SELECT MIN(seqno) FROM mysql.wsrep_streaming_log`
+--let $sr_max = `SELECT MAX(seqno) FROM mysql.wsrep_streaming_log`
+
+--let $seqno = $sr_min
+while ($seqno <= $sr_max)
+{
+ --let $sr_fragment_file = $MYSQLTEST_VARDIR/tmp/sr_fragment.log
+ --exec rm -rf $sr_fragment_file
+ --disable_query_log
+ --eval SELECT frag INTO DUMPFILE '$sr_fragment_file' FROM mysql.wsrep_streaming_log WHERE seqno = $seqno
+ --enable_query_log
+
+ --let $sr_binlog_file = $MYSQLTEST_VARDIR/tmp/sr_binlog.log
+ --exec rm -rf $sr_binlog_file
+
+ --exec cp std_data/binlog-header.binlog $sr_binlog_file
+ --exec cat $sr_fragment_file >> $sr_binlog_file
+
+ --replace_regex /SET TIMESTAMP=[0-9]+/SET TIMESTAMP=<TIMESTAMP>/ /#[0-9]+ +[0-9]+:[0-9]+:[0-9]+/<ISO TIMESTAMP>/ /pseudo_thread_id=[0-9]+/pseudo_thread_id=<PSEUDO_THREAD_ID>/ /thread_id=[0-9]+/thread_id=<QUERY_THREAD_ID>/ /table id [0-9]+/table id <TABLE_ID>/ /mapped to number [0-9]+/mapped to number <TABLE_ID>/ /auto_increment_increment=[0-9]+/auto_increment_increment=<AUTO_INCREMENT_INCREMENT>/ /auto_increment_offset=[0-9]+/auto_increment_offset=<AUTO_INCREMENT_OFFSET>/ /exec_time=[0-9]+/exec_time=<EXEC_TIME>/
+ --exec $MYSQL_BINLOG $sr_binlog_file --base64-output=decode-rows --start-position=256 --skip-annotate-row-events | grep -v 'SET @' 2>&1
+
+ --inc $seqno
+}
+
diff --git a/mysql-test/suite/galera/include/galera_load_provider.inc b/mysql-test/suite/galera/include/galera_load_provider.inc
index aeab7e6ea19..0f843597d9c 100644
--- a/mysql-test/suite/galera/include/galera_load_provider.inc
+++ b/mysql-test/suite/galera/include/galera_load_provider.inc
@@ -2,7 +2,75 @@
--disable_query_log
--eval SET GLOBAL wsrep_provider = '$wsrep_provider_orig';
+
+#
+# count occurences of successful node starts in error log
+#
+perl;
+ use strict;
+ my $test_log=$ENV{'LOG_FILE'} or die "LOG_FILE not set";
+ my $test_log_copy=$test_log . '.copy';
+ if (-e $test_log_copy) {
+ unlink $test_log_copy;
+ }
+
+EOF
+--copy_file $LOG_FILE $LOG_FILE.copy
+
+#
+# now join to the cluster
+#
--eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_orig';
+
+--enable_query_log
+
+#
+# Cluster address change above, will launch SST/IST
+# if mysqldump ST has been configured, mysqld will close all
+# client connections, and it will be hard for us to poll for
+# cluster status during SST process, therefore wait_until_connected_again.inc
+# and wait_until_ready.inc may fail in this phase
+# To workaround this, we do first lazy polling here just to see when
+# client connections will be possible, and after that check for node readyness
+#
+--disable_result_log
+--disable_query_log
+
+--error 0,1
+perl;
+ use strict;
+ my $logfile = $ENV{'LOG_FILE'} or die ("no error log file set");
+
+ my $counter = 1000;
+ #my $found = false
+
+ while ($counter > 0) {
+
+ open(FILE, "$logfile") or die("Unable to open $logfile : $!\n");
+ my $new_sync_count = () = grep(/Synchronized with group/g,<FILE>);
+ close(FILE);
+
+ open(FILEN, "$logfile.copy") or die("Unable to open $logfile.copy : $!\n");
+ my $old_sync_count = () = grep(/Synchronized with group/g,<FILEN>);
+ close(FILEN);
+
+ if ($new_sync_count > $old_sync_count ) {
+ exit(0);
+ }
+ $counter--;
+ sleep(5);
+ }
+ exit(1);
+EOF
+if ($errno)
+{
+--echo "SST failed $errno"
+}
+
+--remove_file $LOG_FILE.copy
+
--enable_query_log
+--enable_result_log
+#--eval SET GLOBAL log_error = $log_error_;
--source include/galera_wait_ready.inc
diff --git a/mysql-test/suite/galera/include/galera_sst_restore.inc b/mysql-test/suite/galera/include/galera_sst_restore.inc
index 7c9a08090ad..83d07f086d1 100644
--- a/mysql-test/suite/galera/include/galera_sst_restore.inc
+++ b/mysql-test/suite/galera/include/galera_sst_restore.inc
@@ -20,7 +20,7 @@ CALL mtr.add_suppression("Can't open and lock time zone table");
CALL mtr.add_suppression("Can't open and lock privilege tables");
CALL mtr.add_suppression("Info table is not ready to be used");
CALL mtr.add_suppression("Native table .* has the wrong structure");
-
+CALL mtr.add_suppression("Table \'mysql.gtid_slave_pos\' doesn\'t exist");
--disable_query_log
--eval SET GLOBAL wsrep_sst_method = '$wsrep_sst_method_orig';
--eval SET GLOBAL wsrep_sst_receive_address = '$wsrep_sst_receive_address_orig';
diff --git a/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc b/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc
index c8869746bd1..d6d7552f7b6 100644
--- a/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc
+++ b/mysql-test/suite/galera/include/galera_st_disconnect_slave.inc
@@ -55,6 +55,14 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
--connection node_2
--source suite/galera/include/galera_load_provider.inc
+#
+# client connections were killed by provider load, so have to re-open here
+#
+--disconnect node_2
+--connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2
+--enable_reconnect
+
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
diff --git a/mysql-test/suite/galera/include/galera_unload_provider.inc b/mysql-test/suite/galera/include/galera_unload_provider.inc
index edc7eb31e0e..cd841f51fbc 100644
--- a/mysql-test/suite/galera/include/galera_unload_provider.inc
+++ b/mysql-test/suite/galera/include/galera_unload_provider.inc
@@ -3,5 +3,13 @@
--let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address`
--let $wsrep_provider_orig = `SELECT @@wsrep_provider`
--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
+--let $wsrep_error_log_orig = `SELECT @@log_error`
+if(!$wsrep_log_error_orig)
+{
+ # MySQL Server on windows is started with --console and thus
+ # does not know the location of its .err log, use default location
+ let $wsrep_log_error_orig = $MYSQLTEST_VARDIR/log/mysqld.2.err;
+}
+--let LOG_FILE= $wsrep_log_error_orig
SET GLOBAL wsrep_provider = 'none';
diff --git a/mysql-test/suite/galera/r/GAL-382.result b/mysql-test/suite/galera/r/GAL-382.result
index fb7c229bd56..137efe4efba 100644
--- a/mysql-test/suite/galera/r/GAL-382.result
+++ b/mysql-test/suite/galera/r/GAL-382.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
create table t1 (i int, j int, k int, primary key pk(i)) engine=innodb;
insert into t1 values (1, 1, 1), (2, 2, 2), (3, 3, 3);
diff --git a/mysql-test/suite/galera/r/GAL-401.result b/mysql-test/suite/galera/r/GAL-401.result
index 3b55b7589b7..3dfc32ffb8c 100644
--- a/mysql-test/suite/galera/r/GAL-401.result
+++ b/mysql-test/suite/galera/r/GAL-401.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true';
connection node_2;
SET @@global.wsrep_desync = 1;
diff --git a/mysql-test/suite/galera/r/GAL-480.result b/mysql-test/suite/galera/r/GAL-480.result
index 143f48a69e3..8a4f8edcdd6 100644
--- a/mysql-test/suite/galera/r/GAL-480.result
+++ b/mysql-test/suite/galera/r/GAL-480.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 CHAR(10), f0 integer) ENGINE=InnoDB;
FLUSH TABLE t1 FOR EXPORT;
diff --git a/mysql-test/suite/galera/r/GCF-1081.result b/mysql-test/suite/galera/r/GCF-1081.result
new file mode 100644
index 00000000000..ede512ec6b1
--- /dev/null
+++ b/mysql-test/suite/galera/r/GCF-1081.result
@@ -0,0 +1,47 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 0), (3, 0);
+CREATE PROCEDURE proc_update ()
+BEGIN
+UPDATE t1 SET f2 = 1 where f1 > 0;
+END|
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+CALL proc_update ();;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+connection node_1a;
+SET GLOBAL DEBUG = 'd,sync.wsrep_before_BF_victim_unlock';
+Warnings:
+Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead
+connection node_2;
+INSERT INTO t1 VALUES (2, 2);;
+connection node_1a;
+SET SESSION DEBUG_SYNC = 'now WAIT_FOR sync.wsrep_before_BF_victim_unlock_reached';
+SET GLOBAL DEBUG = '';
+Warnings:
+Warning 1287 '@@debug' is deprecated and will be removed in a future release. Please use '@@debug_dbug' instead
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 1
+2 2
+3 1
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 1
+2 2
+3 1
+wsrep_local_replays
+1
+DROP PROCEDURE proc_update;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/GCF-939.result b/mysql-test/suite/galera/r/GCF-939.result
new file mode 100644
index 00000000000..24d4eab67e5
--- /dev/null
+++ b/mysql-test/suite/galera/r/GCF-939.result
@@ -0,0 +1,13 @@
+connection node_2;
+connection node_1;
+connection node_1;
+DROP TABLE t1;
+ERROR 42S02: Unknown table 'test.t1'
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+GRA_.log
+GRA_.log
+DROP TABLE t1;
+CALL mtr.add_suppression("Ignoring error 'Unknown table 'test.t1'' on query");
+connection node_2;
+CALL mtr.add_suppression("Error 'Unknown table 'test.t1'' on query");
diff --git a/mysql-test/suite/galera/r/MDEV-15443.result b/mysql-test/suite/galera/r/MDEV-15443.result
index 618e5459878..21332b372e8 100644
--- a/mysql-test/suite/galera/r/MDEV-15443.result
+++ b/mysql-test/suite/galera/r/MDEV-15443.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/MDEV-16509.result b/mysql-test/suite/galera/r/MDEV-16509.result
new file mode 100644
index 00000000000..571952ddf96
--- /dev/null
+++ b/mysql-test/suite/galera/r/MDEV-16509.result
@@ -0,0 +1,75 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_sync_wait = 0;
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
+SET DEBUG_SYNC = "wsrep_before_commit_order_leave SIGNAL bcol_reached WAIT_FOR bcol_continue";
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached WAIT_FOR acol_continue";
+SET DEBUG_SYNC = "after_group_after_commit SIGNAL after_group_reached WAIT_FOR after_group_continue";
+INSERT INTO t1 VALUES (1);
+connection ctrl;
+SET DEBUG_SYNC = "now WAIT_FOR bcol_reached";
+wsrep_last_seen_gtid_match
+1
+SELECT * FROM t1;
+f1
+1
+SET DEBUG_SYNC = "now SIGNAL bcol_continue";
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached";
+wsrep_last_seen_gtid_match
+1
+SELECT * FROM t1;
+f1
+1
+SET DEBUG_SYNC = "now SIGNAL acol_continue";
+SET DEBUG_SYNC = "now WAIT_FOR after_group_reached";
+wsrep_last_seen_gtid_do_not_match
+1
+SET DEBUG_SYNC = "now SIGNAL after_group_continue";
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
+connection ctrl;
+connection node_1;
+SET DEBUG_SYNC = "wsrep_before_commit_order_leave SIGNAL bcol_reached_1 WAIT_FOR bcol_continue_1";
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached_1 WAIT_FOR acol_continue_1";
+SET DEBUG_SYNC = "after_group_after_commit SIGNAL agac_reached_1 WAIT_FOR agac_continue_1";
+INSERT INTO t1 VALUES (2);;
+connection ctrl;
+SET DEBUG_SYNC = "now WAIT_FOR bcol_reached_1";
+wsrep_last_seen_gtid_match
+1
+connection node_1a;
+SET DEBUG_SYNC = "wsrep_before_commit_order_leave SIGNAL bcol_reached_2 WAIT_FOR bcol_continue_2";
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached_2 WAIT_FOR acol_continue_2";
+SET DEBUG_SYNC = "after_group_after_commit SIGNAL agac_reached_2 WAIT_FOR agac_continue_2";
+INSERT INTO t1 VALUES (3);;
+connection ctrl;
+SET DEBUG_SYNC = "now SIGNAL bcol_continue_1";
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached_1";
+SET DEBUG_SYNC = "now WAIT_FOR bcol_reached_2";
+wsrep_last_seen_gtid_match
+1
+SET DEBUG_SYNC = "now SIGNAL bcol_continue_2";
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached_2";
+wsrep_last_seen_gtid_match
+1
+SET DEBUG_SYNC = "now SIGNAL acol_continue_1";
+SET DEBUG_SYNC = "now WAIT_FOR agac_reached_1";
+wsrep_last_seen_gtid_no_match
+1
+SET DEBUG_SYNC = "now SIGNAL acol_continue_2";
+SET DEBUG_SYNC = "now WAIT_FOR agac_reached_2";
+wsrep_last_seen_gtid_no_match
+1
+SET DEBUG_SYNC = "now SIGNAL agac_continue_1";
+SET DEBUG_SYNC = "now SIGNAL agac_continue_2";
+connection node_1;
+connection node_1a;
+connection ctrl;
+SET DEBUG_SYNC = "RESET";
+DROP TABLE t1;
+disconnect ctrl;
+disconnect node_1a;
+disconnect node_2;
+disconnect node_1;
diff --git a/mysql-test/suite/galera/r/MW-252.result b/mysql-test/suite/galera/r/MW-252.result
index 795d3fff670..4d458802614 100644
--- a/mysql-test/suite/galera/r/MW-252.result
+++ b/mysql-test/suite/galera/r/MW-252.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
FLUSH TABLES WITH READ LOCK;
diff --git a/mysql-test/suite/galera/r/MW-258.result b/mysql-test/suite/galera/r/MW-258.result
index 1c2a1744c98..22963557daf 100644
--- a/mysql-test/suite/galera/r/MW-258.result
+++ b/mysql-test/suite/galera/r/MW-258.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER);
LOCK TABLE t1 WRITE;
diff --git a/mysql-test/suite/galera/r/MW-259.result b/mysql-test/suite/galera/r/MW-259.result
index 5256a95c52c..9a0f2ccfa23 100644
--- a/mysql-test/suite/galera/r/MW-259.result
+++ b/mysql-test/suite/galera/r/MW-259.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1;
diff --git a/mysql-test/suite/galera/r/MW-284.result b/mysql-test/suite/galera/r/MW-284.result
index 0f6c0be25fe..11a0a7df387 100644
--- a/mysql-test/suite/galera/r/MW-284.result
+++ b/mysql-test/suite/galera/r/MW-284.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
@@ -24,7 +26,9 @@ RESET SLAVE ALL;
CALL mtr.add_suppression('failed registering on master');
CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work');
connection node_1;
+set global wsrep_on=OFF;
RESET MASTER;
+set global wsrep_on=ON;
CALL mtr.add_suppression('WSREP: Last Applied Action message in non-primary configuration from member');
connection node_2;
CALL mtr.add_suppression('WSREP: Last Applied Action message in non-primary configuration from member');
diff --git a/mysql-test/suite/galera/r/MW-285.result b/mysql-test/suite/galera/r/MW-285.result
index 8c5a21fcbee..762f22d5d25 100644
--- a/mysql-test/suite/galera/r/MW-285.result
+++ b/mysql-test/suite/galera/r/MW-285.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE parent1 ( id INT PRIMARY KEY, KEY (id) ) ENGINE=InnoDB;
CREATE TABLE parent2 ( id INT PRIMARY KEY, KEY (id) ) ENGINE=InnoDB;
CREATE TABLE child (
diff --git a/mysql-test/suite/galera/r/MW-286.result b/mysql-test/suite/galera/r/MW-286.result
index f3bef6f7516..b3accb1cd9b 100644
--- a/mysql-test/suite/galera/r/MW-286.result
+++ b/mysql-test/suite/galera/r/MW-286.result
@@ -1,15 +1,24 @@
+connection node_2;
+connection node_1;
connection node_1;
-CREATE TABLE ten (f1 INTEGER) Engine=InnoDB;
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
-INSERT INTO t1 (f1) SELECT 000000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
-INSERT INTO t1 (f1) SELECT 100000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;;
+INSERT INTO t1 (f1) VALUES (1), (2), (3);
connection node_2;
SET GLOBAL wsrep_desync = TRUE;
SET wsrep_on = FALSE;
-ALTER TABLE t1 ADD PRIMARY KEY (f1);
+SET SESSION debug_sync = "alter_table_inplace_after_lock_upgrade SIGNAL mdl_locked WAIT_FOR mdl_continue";
+ALTER TABLE t1 ADD PRIMARY KEY (f1);;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2a;
+SET SESSION debug_sync = "now WAIT_FOR mdl_locked";
+connection node_1;
+INSERT INTO t1(f1) VALUES (11);
+connection node_2a;
+SET debug_sync = "now SIGNAL mdl_continue";
+SET debug_sync='RESET';
+connection node_2;
+ERROR 70100: Query execution was interrupted
SET wsrep_on = TRUE;
SET GLOBAL wsrep_desync = FALSE;
connection node_1;
DROP TABLE t1;
-DROP TABLE ten;
diff --git a/mysql-test/suite/galera/r/MW-292.result b/mysql-test/suite/galera/r/MW-292.result
index 5b9214ace2a..81e5a316b63 100644
--- a/mysql-test/suite/galera/r/MW-292.result
+++ b/mysql-test/suite/galera/r/MW-292.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE rand_table (f1 FLOAT);
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
INSERT INTO t1 VALUES (1, 'a');
@@ -10,19 +12,28 @@ SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
f1 f2
2 a
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
-SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
-connection node_1;
-COMMIT;;
-connection node_1a;
-SET SESSION wsrep_sync_wait = 0;
-SET SESSION wsrep_on = 0;
-SET SESSION wsrep_on = 1;
+SET SESSION wsrep_sync_wait=0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
connection node_2;
UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
connection node_1;
SELECT TIMEDIFF(SYSDATE(), NOW()) < 2;
TIMEDIFF(SYSDATE(), NOW()) < 2
diff --git a/mysql-test/suite/galera/r/MW-309.result b/mysql-test/suite/galera/r/MW-309.result
index 3dd49a041ee..0169b56e3e1 100644
--- a/mysql-test/suite/galera/r/MW-309.result
+++ b/mysql-test/suite/galera/r/MW-309.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 SELECT * FROM t1;
diff --git a/mysql-test/suite/galera/r/MW-313.result b/mysql-test/suite/galera/r/MW-313.result
index dc605ffc370..909caf77f1d 100644
--- a/mysql-test/suite/galera/r/MW-313.result
+++ b/mysql-test/suite/galera/r/MW-313.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 SELECT * FROM t1;
diff --git a/mysql-test/suite/galera/r/MW-328A.result b/mysql-test/suite/galera/r/MW-328A.result
index db0301b6bf2..f4bb018b442 100644
--- a/mysql-test/suite/galera/r/MW-328A.result
+++ b/mysql-test/suite/galera/r/MW-328A.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 CHAR(20) DEFAULT 'abc') ENGINE=InnoDB;
INSERT INTO t1 (f1) VALUES (1);
CREATE TABLE t2 (f1 CHAR(20)) ENGINE=InnoDB;
@@ -14,10 +16,6 @@ connection node_1X;
CALL proc_update();;
connection node_2;
SET SESSION wsrep_retry_autocommit = 0;
-have_successes
-1
-have_deadlocks
-1
connection node_1;
connection node_1X;
Got one of the listed errors
@@ -25,3 +23,22 @@ connection node_1;
DROP PROCEDURE proc_update;
DROP TABLE t1, t2;
CALL mtr.add_suppression("conflict state 3 after post commit");
+connection node_1;
+CREATE TABLE t1 (i int primary key, j int) engine=innodb;
+INSERT INTO t1 values (1,0);
+BEGIN;
+UPDATE t1 SET j=1 WHERE i=1;
+connection node_2;
+UPDATE t1 SET j=2 WHERE i=1;
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT * FROM t1;
+i j
+1 2
+connection node_2;
+SELECT * FROM t1;
+i j
+1 2
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/MW-328B.result b/mysql-test/suite/galera/r/MW-328B.result
index e898e315ca8..d29c3a50f3d 100644
--- a/mysql-test/suite/galera/r/MW-328B.result
+++ b/mysql-test/suite/galera/r/MW-328B.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 CHAR(20) DEFAULT 'abc') ENGINE=InnoDB;
INSERT INTO t1 (f1) VALUES (1);
CREATE TABLE t2 (f1 CHAR(20)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/MW-328C.result b/mysql-test/suite/galera/r/MW-328C.result
index d8e164e7b4a..748f9420764 100644
--- a/mysql-test/suite/galera/r/MW-328C.result
+++ b/mysql-test/suite/galera/r/MW-328C.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 CHAR(20) DEFAULT 'abc') ENGINE=InnoDB;
INSERT INTO t1 (f1) VALUES (1);
CREATE TABLE t2 (f1 CHAR(20)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/MW-328D.result b/mysql-test/suite/galera/r/MW-328D.result
index 6562136ec27..43e1cefe08f 100644
--- a/mysql-test/suite/galera/r/MW-328D.result
+++ b/mysql-test/suite/galera/r/MW-328D.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (i INT) ENGINE = InnoDB;
INSERT INTO t1 (i) VALUES(1);
CREATE TABLE t2 (i INT) ENGINE = InnoDB;
diff --git a/mysql-test/suite/galera/r/MW-328E.result b/mysql-test/suite/galera/r/MW-328E.result
index 89654ec066a..729fdea1a63 100644
--- a/mysql-test/suite/galera/r/MW-328E.result
+++ b/mysql-test/suite/galera/r/MW-328E.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
create table t1 (i int primary key, j int) engine=innodb;
create table t2 (i int primary key, j int) engine=innodb;
insert into t1 values (1,0);
diff --git a/mysql-test/suite/galera/r/MW-329.result b/mysql-test/suite/galera/r/MW-329.result
index a3cb7277a9c..334ff9f80fb 100644
--- a/mysql-test/suite/galera/r/MW-329.result
+++ b/mysql-test/suite/galera/r/MW-329.result
@@ -1,10 +1,6 @@
CALL mtr.add_suppression("WSREP: .*conflict state . after post commit .*");
CREATE TABLE t1 (f1 INTEGER, f2 CHAR(20) DEFAULT 'abc') ENGINE=InnoDB;
INSERT INTO t1 (f1) VALUES (1),(65535);
-FLUSH STATUS;
-SELECT VARIABLE_VALUE = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
-VARIABLE_VALUE = 0
-1
CREATE PROCEDURE proc_insert ()
BEGIN
DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END;
@@ -16,10 +12,7 @@ END|
connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1b;
CALL proc_insert();;
-connection node_2;
-CALL mtr.add_suppression("WSREP: Failed to report last committed .*");
-SELECT VARIABLE_VALUE > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
-VARIABLE_VALUE > 0
+wsrep_local_replays
1
connection node_1;
connection node_1b;
diff --git a/mysql-test/suite/galera/r/MW-336.result b/mysql-test/suite/galera/r/MW-336.result
index 4d7d6440066..d344775648f 100644
--- a/mysql-test/suite/galera/r/MW-336.result
+++ b/mysql-test/suite/galera/r/MW-336.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
INSERT INTO t1 values(0);
connection node_1;
diff --git a/mysql-test/suite/galera/r/MW-357.result b/mysql-test/suite/galera/r/MW-357.result
index 35855e21233..dc391be4dd3 100644
--- a/mysql-test/suite/galera/r/MW-357.result
+++ b/mysql-test/suite/galera/r/MW-357.result
@@ -1,4 +1,6 @@
connection node_2;
+connection node_1;
+connection node_2;
SET GLOBAL wsrep_slave_threads = 0;
Warnings:
Warning 1292 Truncated incorrect wsrep_slave_threads value: '0'
diff --git a/mysql-test/suite/galera/r/MW-360.result b/mysql-test/suite/galera/r/MW-360.result
new file mode 100644
index 00000000000..f20d5be2135
--- /dev/null
+++ b/mysql-test/suite/galera/r/MW-360.result
@@ -0,0 +1,41 @@
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+DROP TABLE t1;
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+DROP TABLE t1, t2;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+CREATE TEMPORARY TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+DROP TABLE t1, t2;
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+CREATE TEMPORARY TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t3 VALUES (3);
+DROP TABLE t1, t2, t3;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+CREATE TEMPORARY TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+CREATE TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t3 VALUES (3);
+DROP TABLE t1, t2, t3;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (2);
+DROP TABLE t1;
+DROP TABLE t1;
+gtid_executed_equal
+1
diff --git a/mysql-test/suite/galera/r/MW-369.result b/mysql-test/suite/galera/r/MW-369.result
index 516904d1b2a..9f0a77edbbc 100644
--- a/mysql-test/suite/galera/r/MW-369.result
+++ b/mysql-test/suite/galera/r/MW-369.result
@@ -1,25 +1,36 @@
+connection node_2;
+connection node_1;
CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id INTEGER,
CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES p (f1)) ;
INSERT INTO p VALUES (1, 0);
INSERT INTO p VALUES (2, 0);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
DELETE FROM p WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
INSERT INTO c VALUES (1, 1);
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
SELECT * FROM p;
f1 f2
1 0
@@ -29,6 +40,7 @@ f1 p_id
1 1
DROP TABLE c;
DROP TABLE p;
+connection node_1;
CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id INTEGER,
f2 INTEGER,
@@ -36,22 +48,30 @@ CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES p (f1)) ;
INSERT INTO p VALUES (1, 0);
INSERT INTO p VALUES (2, 0);
INSERT INTO c VALUES (1, 1, 0);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE p SET f2 = 1 WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
UPDATE c SET f2 = 1 WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+connection node_2;
SELECT * FROM p;
f1 f2
1 1
@@ -61,28 +81,37 @@ f1 p_id f2
1 1 1
DROP TABLE c;
DROP TABLE p;
+connection node_1;
CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id INTEGER,
CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES p (f1)) ;
INSERT INTO p VALUES (1, 0);
INSERT INTO p VALUES (2, 0);
INSERT INTO c VALUES (1, 1);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE p SET f2 = 1 WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
DELETE FROM c WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+connection node_2;
SELECT * FROM p;
f1 f2
1 1
@@ -95,23 +124,31 @@ CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER UNIQUE KEY) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id INTEGER,
CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES p (f2)) ;
INSERT INTO p VALUES (1, 0);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE p SET f2 = 1 WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
INSERT INTO c VALUES (1, 0);;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
SELECT * FROM p;
f1 f2
1 0
@@ -127,23 +164,31 @@ ON DELETE CASCADE) ;
INSERT INTO p VALUES (1, 0);
INSERT INTO p VALUES (2, 0);
INSERT INTO c VALUES (1, 1, 0);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
DELETE FROM p WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
UPDATE c SET f2 = 1 WHERE f1 = 1;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
SELECT * FROM p;
f1 f2
1 0
@@ -153,3 +198,87 @@ f1 p_id f2
1 1 1
DROP TABLE c;
DROP TABLE p;
+#
+# Start of 10.4 tests
+#
+connection node_1;
+CREATE TABLE pf (f1 INTEGER PRIMARY KEY) ENGINE=INNODB;
+CREATE TABLE cf (
+f1 INTEGER PRIMARY KEY,
+p_id INTEGER,
+CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES pf (f1)
+);
+INSERT INTO pf VALUES (1);
+connection node_1;
+SET AUTOCOMMIT=ON;
+START TRANSACTION;
+INSERT INTO cf (f1, p_id) VALUES (10, 1);
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO cf (f1, p_id) VALUES (20, 1);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+connection node_2;
+SELECT * FROM pf;
+f1
+1
+SELECT * FROM cf;
+f1 p_id
+10 1
+20 1
+DROP TABLE cf;
+DROP TABLE pf;
+connection node_1;
+CREATE TABLE pg (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
+CREATE TABLE cg (f1 INTEGER PRIMARY KEY, p_id INTEGER,
+f2 INTEGER,
+CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES pg (f1)) ;
+INSERT INTO pg VALUES (1, 0);
+INSERT INTO pg VALUES (2, 0);
+connection node_1;
+SET AUTOCOMMIT=ON;
+START TRANSACTION;
+UPDATE pg SET f2 = 1 WHERE f1 = 1;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO cg VALUES (1, 1, 0);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+connection node_2;
+SELECT * FROM pg;
+f1 f2
+1 1
+2 0
+SELECT * FROM cg;
+f1 p_id f2
+1 1 0
+DROP TABLE cg;
+DROP TABLE pg;
diff --git a/mysql-test/suite/galera/r/MW-388.result b/mysql-test/suite/galera/r/MW-388.result
index a2cf02712bb..ab78b989efe 100644
--- a/mysql-test/suite/galera/r/MW-388.result
+++ b/mysql-test/suite/galera/r/MW-388.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(255)) Engine=InnoDB;
CREATE PROCEDURE insert_proc ()
@@ -18,18 +20,18 @@ connection node_1a;
SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
connection node_1;
SET SESSION wsrep_sync_wait = 0;
-SET SESSION DEBUG_SYNC = 'wsrep_after_replication SIGNAL wsrep_after_replication_reached WAIT_FOR wsrep_after_replication_continue';
+SET SESSION DEBUG_SYNC = 'wsrep_after_certification SIGNAL wsrep_after_certification_reached WAIT_FOR wsrep_after_certification_continue';
CALL insert_proc ();;
connection node_1a;
-SET SESSION DEBUG_SYNC = "now WAIT_FOR wsrep_after_replication_reached";
+SET SESSION DEBUG_SYNC = "now WAIT_FOR wsrep_after_certification_reached";
SET GLOBAL DEBUG_DBUG = "";
-SET DEBUG_SYNC = "now SIGNAL wsrep_after_replication_continue";
+SET DEBUG_SYNC = "now SIGNAL wsrep_after_certification_continue";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
connection node_2;
connection node_1;
-SELECT @errno = 1213;
-@errno = 1213
-0
+SELECT @errno `expect 1213`;
+expect 1213
+1213
SELECT * FROM t1;
f1 f2
1 node 2
diff --git a/mysql-test/suite/galera/r/MW-402.result b/mysql-test/suite/galera/r/MW-402.result
index 9be98d629fb..f692c90d611 100644
--- a/mysql-test/suite/galera/r/MW-402.result
+++ b/mysql-test/suite/galera/r/MW-402.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1;
CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
@@ -20,14 +22,14 @@ connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
connection node_1;
COMMIT;
connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
@@ -60,14 +62,14 @@ connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
connection node_1;
COMMIT;
connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
@@ -102,14 +104,14 @@ connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
connection node_1;
COMMIT;
connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
@@ -135,14 +137,14 @@ connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
connection node_1;
COMMIT;
connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
@@ -159,8 +161,10 @@ DROP TABLE p;
connection node_1;
CREATE TABLE p1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE p2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
-CREATE TABLE c (f1 INTEGER PRIMARY KEY, p1_id INTEGER, p2_id INTEGER, f2 INTEGER,
-CONSTRAINT fk_1 FOREIGN KEY (p1_id) REFERENCES p1 (f1) ON DELETE CASCADE,
+CREATE TABLE c (f1 INTEGER PRIMARY KEY, p1_id INTEGER, p2_id INTEGER,
+f2 INTEGER,
+CONSTRAINT fk_1 FOREIGN KEY (p1_id) REFERENCES p1 (f1)
+ON DELETE CASCADE,
CONSTRAINT fk_2 FOREIGN KEY (p2_id) REFERENCES p2 (f1));
INSERT INTO p1 VALUES (1, 0);
INSERT INTO p2 VALUES (1, 0);
@@ -179,14 +183,14 @@ connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
connection node_1;
COMMIT;
connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
connection node_1;
connection node_2;
@@ -197,4 +201,52 @@ f1 f2
1 2
SELECT * FROM c;
f1 p1_id p2_id f2
+DROP TABLE c;
+DROP TABLE p1;
+DROP TABLE p2;
+connection node_1;
+CREATE TABLE p1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
+CREATE TABLE p2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
+CREATE TABLE c (f1 INTEGER PRIMARY KEY, p1_id INTEGER, p2_id INTEGER,
+f2 INTEGER,
+CONSTRAINT fk_1 FOREIGN KEY (p1_id) REFERENCES p1 (f1)
+ON DELETE CASCADE,
+CONSTRAINT fk_2 FOREIGN KEY (p2_id) REFERENCES p2 (f1)
+ON DELETE CASCADE);
+INSERT INTO p1 VALUES (1, 0);
+INSERT INTO p2 VALUES (1, 0);
+INSERT INTO c VALUES (1, 1, 1, 0);
+connection node_1a;
+connection node_1;
+SET AUTOCOMMIT=ON;
+START TRANSACTION;
+DELETE FROM p2 WHERE f1=1;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+DELETE FROM p1 WHERE f1=1;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
+SELECT * FROM p1;
+f1 f2
+SELECT * FROM p2;
+f1 f2
+1 0
+SELECT * FROM c;
+f1 p1_id p2_id f2
DROP TABLE c,p1,p2;
diff --git a/mysql-test/suite/galera/r/MW-416.result b/mysql-test/suite/galera/r/MW-416.result
index 05399b213a8..537e648df6b 100644
--- a/mysql-test/suite/galera/r/MW-416.result
+++ b/mysql-test/suite/galera/r/MW-416.result
@@ -109,6 +109,5 @@ mtr
mysql
performance_schema
test
-SHOW GLOBAL STATUS LIKE 'wsrep_replicated';
-Variable_name Value
-wsrep_replicated 3
+wsrep_replicated_after_diff
+1
diff --git a/mysql-test/suite/galera/r/MW-44.result b/mysql-test/suite/galera/r/MW-44.result
index a07719daca1..6973e4c78c9 100644
--- a/mysql-test/suite/galera/r/MW-44.result
+++ b/mysql-test/suite/galera/r/MW-44.result
@@ -1,5 +1,10 @@
+connection node_2;
+connection node_1;
+connection node_1;
TRUNCATE TABLE mysql.general_log;
+connection node_2;
TRUNCATE TABLE mysql.general_log;
+connection node_1;
SELECT Argument FROM mysql.general_log;
Argument
SET GLOBAL general_log='ON';
@@ -12,8 +17,10 @@ SELECT argument FROM mysql.general_log WHERE argument LIKE 'CREATE%' OR argument
argument
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
ALTER TABLE t1 ADD COLUMN f2 INTEGER
+connection node_2;
SELECT Argument FROM mysql.general_log;
Argument
DROP TABLE t1;
SET GLOBAL general_log='OFF';
+connection node_1;
SET GLOBAL general_log='OFF';
diff --git a/mysql-test/suite/galera/r/MW-86-wait1.result b/mysql-test/suite/galera/r/MW-86-wait1.result
index 85729c6462e..8ad9c8bf181 100644
--- a/mysql-test/suite/galera/r/MW-86-wait1.result
+++ b/mysql-test/suite/galera/r/MW-86-wait1.result
@@ -1,9 +1,13 @@
connection node_2;
+connection node_1;
+SET @orig_debug=@@debug_dbug;
+connection node_2;
SELECT @@debug_sync;
@@debug_sync
ON - current signal: ''
+set debug_sync='RESET';
SET SESSION wsrep_sync_wait = 1;
-SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
connection node_1;
CREATE TABLE t_wait1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t_wait1 VALUES (1);
@@ -34,11 +38,11 @@ SHOW TABLES;
SHOW TRIGGERS;
SHOW GLOBAL VARIABLES LIKE 'foo_bar';
SHOW WARNINGS;
-SET GLOBAL debug_dbug = "";
+SET GLOBAL debug_dbug = @orig_debug;
SET SESSION debug_sync = "now SIGNAL signal.wsrep_apply_cb";
+SET debug_sync='RESET';
SET SESSION wsrep_sync_wait = default;
DROP TABLE t_wait1;
-SET debug_sync='RESET';
SELECT @@debug_sync;
@@debug_sync
ON - current signal: ''
diff --git a/mysql-test/suite/galera/r/MW-86-wait8.result b/mysql-test/suite/galera/r/MW-86-wait8.result
index d1341542ef7..07e59ebbe0a 100644
--- a/mysql-test/suite/galera/r/MW-86-wait8.result
+++ b/mysql-test/suite/galera/r/MW-86-wait8.result
@@ -1,9 +1,12 @@
connection node_2;
+connection node_1;
+SET @orig_debug=@@debug_dbug;
+connection node_2;
SELECT @@debug_sync;
@@debug_sync
ON - current signal: ''
SET SESSION wsrep_sync_wait = 8;
-SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
connection node_1;
CREATE TABLE t_wait8 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t_wait8 VALUES (1);
@@ -36,11 +39,11 @@ SHOW TABLES;
SHOW TRIGGERS;
SHOW GLOBAL VARIABLES LIKE 'foo_bar';
SHOW WARNINGS;
-SET GLOBAL debug_dbug = "";
+SET GLOBAL debug_dbug = @orig_debug;
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET debug_sync='RESET';
SET SESSION wsrep_sync_wait = default;
DROP TABLE t_wait8;
-SET debug_sync='RESET';
SELECT @@debug_sync;
@@debug_sync
ON - current signal: ''
diff --git a/mysql-test/suite/galera/r/basic.result b/mysql-test/suite/galera/r/basic.result
index e85c805253f..10f180e7a94 100644
--- a/mysql-test/suite/galera/r/basic.result
+++ b/mysql-test/suite/galera/r/basic.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
USE test;
CREATE TABLE t1(c1 INT PRIMARY KEY) ENGINE=INNODB;
INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
diff --git a/mysql-test/suite/galera/r/binlog_checksum.result b/mysql-test/suite/galera/r/binlog_checksum.result
index e86f3892ac7..4106354eb7d 100644
--- a/mysql-test/suite/galera/r/binlog_checksum.result
+++ b/mysql-test/suite/galera/r/binlog_checksum.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
# On node_1
connection node_1;
SET @binlog_checksum_saved= @@GLOBAL.BINLOG_CHECKSUM;
diff --git a/mysql-test/suite/galera/r/create.result b/mysql-test/suite/galera/r/create.result
index a445b32e8bf..6ad94dd3d43 100644
--- a/mysql-test/suite/galera/r/create.result
+++ b/mysql-test/suite/galera/r/create.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-6924 : Server crashed on CREATE TABLE ... SELECT
#
diff --git a/mysql-test/suite/galera/r/enforce_storage_engine.result b/mysql-test/suite/galera/r/enforce_storage_engine.result
index 746aa22bf20..1a453241427 100644
--- a/mysql-test/suite/galera/r/enforce_storage_engine.result
+++ b/mysql-test/suite/galera/r/enforce_storage_engine.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-8831 : enforce_storage_engine doesn't block table creation on
# other nodes (galera cluster)
diff --git a/mysql-test/suite/galera/r/enforce_storage_engine2.result b/mysql-test/suite/galera/r/enforce_storage_engine2.result
index 128994ed221..8b174139eae 100644
--- a/mysql-test/suite/galera/r/enforce_storage_engine2.result
+++ b/mysql-test/suite/galera/r/enforce_storage_engine2.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-9312: storage engine not enforced during galera cluster
# replication
diff --git a/mysql-test/suite/galera/r/ev51914.result b/mysql-test/suite/galera/r/ev51914.result
index 3f3d67d01ef..b9d8a82bfa0 100644
--- a/mysql-test/suite/galera/r/ev51914.result
+++ b/mysql-test/suite/galera/r/ev51914.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SAVEPOINT in a stored function should be forbidden
CREATE FUNCTION f1 () RETURNS INT BEGIN
diff --git a/mysql-test/suite/galera/r/fk.result b/mysql-test/suite/galera/r/fk.result
index ab8e1c8f680..17fc99a904e 100644
--- a/mysql-test/suite/galera/r/fk.result
+++ b/mysql-test/suite/galera/r/fk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
USE test;
# On node_1
diff --git a/mysql-test/suite/galera/r/galera#414.result b/mysql-test/suite/galera/r/galera#414.result
index 34dcb6242d3..2c1dccfd131 100644
--- a/mysql-test/suite/galera/r/galera#414.result
+++ b/mysql-test/suite/galera/r/galera#414.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera#500.result b/mysql-test/suite/galera/r/galera#500.result
index 6a07d0359a4..a5ab0b19718 100644
--- a/mysql-test/suite/galera/r/galera#500.result
+++ b/mysql-test/suite/galera/r/galera#500.result
@@ -1,10 +1,18 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_2;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options="gmcast.isolate=2";
+connection node_1;
SET SESSION wsrep_sync_wait = 0;
SHOW STATUS LIKE 'wsrep_cluster_status';
Variable_name Value
wsrep_cluster_status non-Primary
SET SESSION wsrep_sync_wait = default;
SET GLOBAL wsrep_provider_options="pc.bootstrap=1";
+connection node_2;
SET SESSION wsrep_on=0;
+connection node_2;
CALL mtr.add_suppression("WSREP: exception from gcomm, backend must be restarted: Gcomm backend termination was requested by setting gmcast.isolate=2.");
diff --git a/mysql-test/suite/galera/r/galera#505.result b/mysql-test/suite/galera/r/galera#505.result
index 8d3e3ec072a..bc7eb3b9ed4 100644
--- a/mysql-test/suite/galera/r/galera#505.result
+++ b/mysql-test/suite/galera/r/galera#505.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET SESSION wsrep_sync_wait=0;
SET SESSION wsrep_sync_wait=DEFAULT;
diff --git a/mysql-test/suite/galera/r/galera_FK_duplicate_client_insert.result b/mysql-test/suite/galera/r/galera_FK_duplicate_client_insert.result
index 3eb638ca49a..5ae577a6323 100644
--- a/mysql-test/suite/galera/r/galera_FK_duplicate_client_insert.result
+++ b/mysql-test/suite/galera/r/galera_FK_duplicate_client_insert.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE user(id int primary key, j int) ENGINE=InnoDB;
CREATE TABLE user_session(id int primary key, fk1 int, fk2 int) ENGINE=InnoDB;
alter table user_session add foreign key (fk1) references user(id);
diff --git a/mysql-test/suite/galera/r/galera_admin.result b/mysql-test/suite/galera/r/galera_admin.result
index ed753247236..01e2aac16b2 100644
--- a/mysql-test/suite/galera/r/galera_admin.result
+++ b/mysql-test/suite/galera/r/galera_admin.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
DROP TABLE IF EXISTS t1, t2;
DROP TABLE IF EXISTS x1, x2;
connection node_1;
@@ -16,7 +18,9 @@ connection node_2;
connection node_1;
ANALYZE TABLE t1, t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
connection node_2;
# OPTIMIZE test
diff --git a/mysql-test/suite/galera/r/galera_alter_engine_innodb.result b/mysql-test/suite/galera/r/galera_alter_engine_innodb.result
index ff6ab792c0e..dfa30441f85 100644
--- a/mysql-test/suite/galera/r/galera_alter_engine_innodb.result
+++ b/mysql-test/suite/galera/r/galera_alter_engine_innodb.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
ALTER TABLE t1 ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_alter_engine_myisam.result b/mysql-test/suite/galera/r/galera_alter_engine_myisam.result
index 389383858ac..b3a9bdd30df 100644
--- a/mysql-test/suite/galera/r/galera_alter_engine_myisam.result
+++ b/mysql-test/suite/galera/r/galera_alter_engine_myisam.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET GLOBAL wsrep_replicate_myisam = TRUE;
CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1);
diff --git a/mysql-test/suite/galera/r/galera_alter_table_force.result b/mysql-test/suite/galera/r/galera_alter_table_force.result
index d0a2f81b631..271796422cd 100644
--- a/mysql-test/suite/galera/r/galera_alter_table_force.result
+++ b/mysql-test/suite/galera/r/galera_alter_table_force.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
ALTER TABLE t1 FORCE;
diff --git a/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result b/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result
index a6607906661..11fda5d8aab 100644
--- a/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result
+++ b/mysql-test/suite/galera/r/galera_applier_ftwrl_table.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET SESSION wsrep_sync_wait = 0;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result b/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result
index 9711100d155..a5aeb6eb366 100644
--- a/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result
+++ b/mysql-test/suite/galera/r/galera_applier_ftwrl_table_alter.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET SESSION wsrep_sync_wait = 0;
SET SESSION lock_wait_timeout = 60;
diff --git a/mysql-test/suite/galera/r/galera_as_master.result b/mysql-test/suite/galera/r/galera_as_master.result
index 2a7262359fa..4aca328be56 100644
--- a/mysql-test/suite/galera/r/galera_as_master.result
+++ b/mysql-test/suite/galera/r/galera_as_master.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
START SLAVE;
connection node_1;
@@ -54,4 +56,6 @@ STOP SLAVE;
RESET SLAVE ALL;
CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work');
connection node_1;
+set global wsrep_on=OFF;
RESET MASTER;
+set global wsrep_on=ON;
diff --git a/mysql-test/suite/galera/r/galera_as_master_gtid.result b/mysql-test/suite/galera/r/galera_as_master_gtid.result
index 8dfe462d495..4f5c38b607a 100644
--- a/mysql-test/suite/galera/r/galera_as_master_gtid.result
+++ b/mysql-test/suite/galera/r/galera_as_master_gtid.result
@@ -5,55 +5,19 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
uuids_do_not_match
1
-SHOW BINLOG EVENTS IN 'mysqld-bin.000002' FROM 120;
-Log_name Pos Event_type Server_id End_log_pos Info
-mysqld-bin.000002 120 Previous_gtids 1 151
-mysqld-bin.000002 151 Gtid 1 199 SET @@SESSION.GTID_NEXT= '<effective_uuid>:1'
-mysqld-bin.000002 199 Query 1 327 use `test`; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB
-mysqld-bin.000002 327 Gtid 1 375 SET @@SESSION.GTID_NEXT= '<effective_uuid>:2'
-mysqld-bin.000002 375 Query 1 452 BEGIN
-mysqld-bin.000002 452 Table_map 1 497 table_id: # (test.t1)
-mysqld-bin.000002 497 Write_rows 1 537 table_id: # flags: STMT_END_F
-mysqld-bin.000002 537 Xid 1 568 COMMIT /* xid=# */
INSERT INTO t1 VALUES(2);
uuids_do_not_match
1
uuids_match
1
-SHOW BINLOG EVENTS IN 'mysqld-bin.000003' FROM 120;
-Log_name Pos Event_type Server_id End_log_pos Info
-mysqld-bin.000003 120 Previous_gtids 2 151
-mysqld-bin.000003 151 Gtid 1 199 SET @@SESSION.GTID_NEXT= '<effective_uuid>:1'
-mysqld-bin.000003 199 Query 1 327 use `test`; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB
-mysqld-bin.000003 327 Gtid 1 375 SET @@SESSION.GTID_NEXT= '<effective_uuid>:2'
-mysqld-bin.000003 375 Query 1 443 BEGIN
-mysqld-bin.000003 443 Table_map 1 488 table_id: # (test.t1)
-mysqld-bin.000003 488 Write_rows 1 528 table_id: # flags: STMT_END_F
-mysqld-bin.000003 528 Xid 1 559 COMMIT /* xid=# */
-mysqld-bin.000003 559 Gtid 2 607 SET @@SESSION.GTID_NEXT= '<effective_uuid>:3'
-mysqld-bin.000003 607 Query 2 684 BEGIN
-mysqld-bin.000003 684 Table_map 2 729 table_id: # (test.t1)
-mysqld-bin.000003 729 Write_rows 2 769 table_id: # flags: STMT_END_F
-mysqld-bin.000003 769 Xid 2 800 COMMIT /* xid=# */
uuids_do_not_match
1
uuids_match
1
-SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 120;
-Log_name Pos Event_type Server_id End_log_pos Info
-mysqld-bin.000001 120 Previous_gtids 3 151
-mysqld-bin.000001 151 Gtid 1 199 SET @@SESSION.GTID_NEXT= '<effective_uuid>:1'
-mysqld-bin.000001 199 Query 1 327 use `test`; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB
-mysqld-bin.000001 327 Gtid 1 375 SET @@SESSION.GTID_NEXT= '<effective_uuid>:2'
-mysqld-bin.000001 375 Query 1 443 BEGIN
-mysqld-bin.000001 443 Table_map 1 488 table_id: # (test.t1)
-mysqld-bin.000001 488 Write_rows 1 528 table_id: # flags: STMT_END_F
-mysqld-bin.000001 528 Xid 1 559 COMMIT /* xid=# */
-mysqld-bin.000001 559 Gtid 2 607 SET @@SESSION.GTID_NEXT= '<effective_uuid>:3'
-mysqld-bin.000001 607 Query 2 675 BEGIN
-mysqld-bin.000001 675 Table_map 2 720 table_id: # (test.t1)
-mysqld-bin.000001 720 Write_rows 2 760 table_id: # flags: STMT_END_F
-mysqld-bin.000001 760 Xid 2 791 COMMIT /* xid=# */
DROP TABLE t1;
+gtid_executed_equal
+1
+gtid_executed_equal
+1
STOP SLAVE;
RESET SLAVE ALL;
diff --git a/mysql-test/suite/galera/r/galera_as_master_large.result b/mysql-test/suite/galera/r/galera_as_master_large.result
index dad74211af9..826d2f32057 100644
--- a/mysql-test/suite/galera/r/galera_as_master_large.result
+++ b/mysql-test/suite/galera/r/galera_as_master_large.result
@@ -2,6 +2,8 @@
# MDEV-9044 : Getting binlog corruption on my Galera cluster (10.1.8)
# making it impossible to async slave.
#
+connection node_2;
+connection node_1;
connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
START SLAVE;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_as_slave.result b/mysql-test/suite/galera/r/galera_as_slave.result
index 9ccb5106234..391ceecd509 100644
--- a/mysql-test/suite/galera/r/galera_as_slave.result
+++ b/mysql-test/suite/galera/r/galera_as_slave.result
@@ -1,12 +1,14 @@
-connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
-START SLAVE;
connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_2;
+START SLAVE;
+connection node_3;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
connection node_2;
INSERT INTO t1 VALUES (2);
-connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
SELECT COUNT(*) = 2 FROM t1;
COUNT(*) = 2
1
@@ -15,10 +17,10 @@ connection node_2;
SELECT COUNT(*) = 3 FROM t1;
COUNT(*) = 3
1
-connection node_1;
+connection node_3;
DROP TABLE t1;
connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
-connection node_1;
+connection node_3;
RESET MASTER;
diff --git a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result
index 60f3216aa9c..6ac51f44950 100644
--- a/mysql-test/suite/galera/r/galera_as_slave_autoinc.result
+++ b/mysql-test/suite/galera/r/galera_as_slave_autoinc.result
@@ -1,7 +1,9 @@
-connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
-START SLAVE;
connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_2;
+START SLAVE;
+connection node_3;
SET SESSION binlog_format='STATEMENT';
CREATE TABLE t1 (
i int(11) NOT NULL AUTO_INCREMENT,
@@ -60,7 +62,7 @@ binlog_format ROW
show variables like 'auto_increment_increment';
Variable_name Value
auto_increment_increment 2
-connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
select * from t1;
i c
1 dummy_text
@@ -79,10 +81,10 @@ binlog_format ROW
show variables like 'auto_increment_increment';
Variable_name Value
auto_increment_increment 2
-connection node_1;
+connection node_3;
DROP TABLE t1;
connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
-connection node_1;
+connection node_3;
RESET MASTER;
diff --git a/mysql-test/suite/galera/r/galera_as_slave_gtid.result b/mysql-test/suite/galera/r/galera_as_slave_gtid.result
index 0ef9d208bf4..180b72bf729 100644
--- a/mysql-test/suite/galera/r/galera_as_slave_gtid.result
+++ b/mysql-test/suite/galera/r/galera_as_slave_gtid.result
@@ -1,7 +1,9 @@
-connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
-START SLAVE;
connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_2;
+START SLAVE;
+connection node_3;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
SELECT LENGTH(@@global.gtid_binlog_state) > 1;
@@ -10,15 +12,15 @@ LENGTH(@@global.gtid_binlog_state) > 1
connection node_2;
gtid_binlog_state_equal
1
-connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
gtid_binlog_state_equal
1
-connection node_1;
-DROP TABLE t1;
connection node_3;
+DROP TABLE t1;
+connection node_1;
connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
diff --git a/mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db.result b/mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db.result
new file mode 100644
index 00000000000..9589d319991
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db.result
@@ -0,0 +1,159 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+connection node_3;
+RESET MASTER;
+connection node_2;
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+START SLAVE;
+connection node_3;
+CREATE SCHEMA test1;
+CREATE SCHEMA test2;
+USE test1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY,f2 CHAR(5) DEFAULT 'abc') ENGINE=InnoDB;
+USE test2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY,f2 CHAR(5) DEFAULT 'abc') ENGINE=InnoDB;
+INSERT INTO test1.t1 (f1) VALUES (1);
+INSERT INTO test2.t1 (f1) VALUES (1);
+INSERT INTO test1.t1 (f1) VALUES (2);
+INSERT INTO test2.t1 (f1) VALUES (2);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+UPDATE test2.t1 SET test2.t1.f2 = 'cde';
+UPDATE test1.t1, test2.t1 SET test1.t1.f2 = 'klm', test2.t1.f2 = 'xyz';
+DELETE test1.t1, test2.t1 FROM test1.t1 INNER JOIN test2.t1 WHERE test1.t1.f1 = test2.t1.f1 AND test1.t1.f1 = 3;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO test2.t1 (f1) VALUES (999);
+INSERT INTO test2.t1 (f1) VALUES (9999);
+COMMIT;
+START TRANSACTION;
+INSERT INTO test1.t1 (f1) VALUES (111);
+INSERT INTO test1.t1 (f1) VALUES (222);
+COMMIT;
+START TRANSACTION;
+INSERT INTO test1.t1 (f1) VALUES (333);
+INSERT INTO test2.t1 (f1) VALUES (99999);
+COMMIT;
+connection node_2;
+SHOW BINLOG EVENTS IN 'master-bin.000001' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 256 Gtid_list 2 285 []
+master-bin.000001 285 Binlog_checkpoint 2 329 master-bin.000001
+master-bin.000001 329 Gtid 3 371 GTID 0-3-1
+master-bin.000001 371 Query 3 458 CREATE SCHEMA test1
+master-bin.000001 458 Gtid 3 500 GTID 0-3-3
+master-bin.000001 500 Query 3 647 use `test1`; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY,f2 CHAR(5) DEFAULT 'abc') ENGINE=InnoDB
+master-bin.000001 647 Gtid 3 689 BEGIN GTID 0-3-5
+master-bin.000001 689 Annotate_rows 3 748 INSERT INTO test1.t1 (f1) VALUES (1)
+master-bin.000001 748 Table_map 3 797 table_id: ### (test1.t1)
+master-bin.000001 797 Write_rows_v1 3 839 table_id: ### flags: STMT_END_F
+master-bin.000001 839 Xid 3 870 COMMIT /* xid=### */
+master-bin.000001 870 Gtid 3 912 BEGIN GTID 0-3-7
+master-bin.000001 912 Annotate_rows 3 971 INSERT INTO test1.t1 (f1) VALUES (2)
+master-bin.000001 971 Table_map 3 1020 table_id: ### (test1.t1)
+master-bin.000001 1020 Write_rows_v1 3 1062 table_id: ### flags: STMT_END_F
+master-bin.000001 1062 Xid 3 1093 COMMIT /* xid=### */
+master-bin.000001 1093 Gtid 3 1135 BEGIN GTID 0-3-9
+master-bin.000001 1135 Annotate_rows 3 1194 INSERT INTO test1.t1 (f1) VALUES (3)
+master-bin.000001 1194 Table_map 3 1243 table_id: ### (test1.t1)
+master-bin.000001 1243 Write_rows_v1 3 1285 table_id: ### flags: STMT_END_F
+master-bin.000001 1285 Xid 3 1316 COMMIT /* xid=### */
+master-bin.000001 1316 Gtid 3 1358 BEGIN GTID 0-3-12
+master-bin.000001 1358 Annotate_rows 3 1451 UPDATE test1.t1, test2.t1 SET test1.t1.f2 = 'klm', test2.t1.f2 = 'xyz'
+master-bin.000001 1451 Table_map 3 1500 table_id: ### (test1.t1)
+master-bin.000001 1500 Update_rows_v1 3 1588 table_id: ### flags: STMT_END_F
+master-bin.000001 1588 Xid 3 1619 COMMIT /* xid=### */
+master-bin.000001 1619 Gtid 3 1661 BEGIN GTID 0-3-13
+master-bin.000001 1661 Annotate_rows 3 1795 DELETE test1.t1, test2.t1 FROM test1.t1 INNER JOIN test2.t1 WHERE test1.t1.f1 = test2.t1.f1 AND test1.t1.f1 = 3
+master-bin.000001 1795 Table_map 3 1844 table_id: ### (test1.t1)
+master-bin.000001 1844 Delete_rows_v1 3 1886 table_id: ### flags: STMT_END_F
+master-bin.000001 1886 Xid 3 1917 COMMIT /* xid=### */
+master-bin.000001 1917 Gtid 3 1959 BEGIN GTID 0-3-15
+master-bin.000001 1959 Annotate_rows 3 2020 INSERT INTO test1.t1 (f1) VALUES (111)
+master-bin.000001 2020 Table_map 3 2069 table_id: ### (test1.t1)
+master-bin.000001 2069 Write_rows_v1 3 2111 table_id: ### flags: STMT_END_F
+master-bin.000001 2111 Annotate_rows 3 2172 INSERT INTO test1.t1 (f1) VALUES (222)
+master-bin.000001 2172 Table_map 3 2221 table_id: ### (test1.t1)
+master-bin.000001 2221 Write_rows_v1 3 2263 table_id: ### flags: STMT_END_F
+master-bin.000001 2263 Xid 3 2294 COMMIT /* xid=### */
+master-bin.000001 2294 Gtid 3 2336 BEGIN GTID <effective_uuid>
+master-bin.000001 2336 Annotate_rows 3 2397 INSERT INTO test1.t1 (f1) VALUES (333)
+master-bin.000001 2397 Table_map 3 2446 table_id: ### (test1.t1)
+master-bin.000001 2446 Write_rows_v1 3 2488 table_id: ### flags: STMT_END_F
+master-bin.000001 2488 Xid 3 2519 COMMIT /* xid=### */
+connection node_1;
+gtid_executed_equal
+0
+SHOW BINLOG EVENTS IN 'master-bin.000001' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+master-bin.000001 256 Gtid_list 1 285 []
+master-bin.000001 285 Binlog_checkpoint 1 329 master-bin.000001
+master-bin.000001 329 Gtid 3 371 GTID 0-3-1
+master-bin.000001 371 Query 3 458 CREATE SCHEMA test1
+master-bin.000001 458 Gtid 3 500 GTID 0-3-2
+master-bin.000001 500 Query 3 647 use `test1`; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY,f2 CHAR(5) DEFAULT 'abc') ENGINE=InnoDB
+master-bin.000001 647 Gtid 3 689 BEGIN GTID 0-3-3
+master-bin.000001 689 Annotate_rows 3 748 INSERT INTO test1.t1 (f1) VALUES (1)
+master-bin.000001 748 Table_map 3 797 table_id: ### (test1.t1)
+master-bin.000001 797 Write_rows_v1 3 839 table_id: ### flags: STMT_END_F
+master-bin.000001 839 Xid 3 870 COMMIT /* xid=### */
+master-bin.000001 870 Gtid 3 912 BEGIN GTID 0-3-4
+master-bin.000001 912 Annotate_rows 3 971 INSERT INTO test1.t1 (f1) VALUES (2)
+master-bin.000001 971 Table_map 3 1020 table_id: ### (test1.t1)
+master-bin.000001 1020 Write_rows_v1 3 1062 table_id: ### flags: STMT_END_F
+master-bin.000001 1062 Xid 3 1093 COMMIT /* xid=### */
+master-bin.000001 1093 Gtid 3 1135 BEGIN GTID 0-3-5
+master-bin.000001 1135 Annotate_rows 3 1194 INSERT INTO test1.t1 (f1) VALUES (3)
+master-bin.000001 1194 Table_map 3 1243 table_id: ### (test1.t1)
+master-bin.000001 1243 Write_rows_v1 3 1285 table_id: ### flags: STMT_END_F
+master-bin.000001 1285 Xid 3 1316 COMMIT /* xid=### */
+master-bin.000001 1316 Gtid 3 1358 BEGIN GTID 0-3-6
+master-bin.000001 1358 Annotate_rows 3 1451 UPDATE test1.t1, test2.t1 SET test1.t1.f2 = 'klm', test2.t1.f2 = 'xyz'
+master-bin.000001 1451 Table_map 3 1500 table_id: ### (test1.t1)
+master-bin.000001 1500 Update_rows_v1 3 1588 table_id: ### flags: STMT_END_F
+master-bin.000001 1588 Xid 3 1619 COMMIT /* xid=### */
+master-bin.000001 1619 Gtid 3 1661 BEGIN GTID 0-3-7
+master-bin.000001 1661 Annotate_rows 3 1795 DELETE test1.t1, test2.t1 FROM test1.t1 INNER JOIN test2.t1 WHERE test1.t1.f1 = test2.t1.f1 AND test1.t1.f1 = 3
+master-bin.000001 1795 Table_map 3 1844 table_id: ### (test1.t1)
+master-bin.000001 1844 Delete_rows_v1 3 1886 table_id: ### flags: STMT_END_F
+master-bin.000001 1886 Xid 3 1917 COMMIT /* xid=### */
+master-bin.000001 1917 Gtid 3 1959 BEGIN GTID 0-3-8
+master-bin.000001 1959 Annotate_rows 3 2020 INSERT INTO test1.t1 (f1) VALUES (111)
+master-bin.000001 2020 Table_map 3 2069 table_id: ### (test1.t1)
+master-bin.000001 2069 Write_rows_v1 3 2111 table_id: ### flags: STMT_END_F
+master-bin.000001 2111 Annotate_rows 3 2172 INSERT INTO test1.t1 (f1) VALUES (222)
+master-bin.000001 2172 Table_map 3 2221 table_id: ### (test1.t1)
+master-bin.000001 2221 Write_rows_v1 3 2263 table_id: ### flags: STMT_END_F
+master-bin.000001 2263 Xid 3 2294 COMMIT /* xid=### */
+master-bin.000001 2294 Gtid 3 2336 BEGIN GTID 0-3-9
+master-bin.000001 2336 Annotate_rows 3 2397 INSERT INTO test1.t1 (f1) VALUES (333)
+master-bin.000001 2397 Table_map 3 2446 table_id: ### (test1.t1)
+master-bin.000001 2446 Write_rows_v1 3 2488 table_id: ### flags: STMT_END_F
+master-bin.000001 2488 Xid 3 2519 COMMIT /* xid=### */
+include/diff_servers.inc [servers=1 2]
+connection node_1;
+SELECT COUNT(*) = 2 FROM test1.t1 WHERE f1 IN (1,2);
+COUNT(*) = 2
+1
+SELECT COUNT(*) = 3 FROM test1.t1 WHERE f1 IN (111,222,333);
+COUNT(*) = 3
+1
+SELECT COUNT(*) = 2 FROM test1.t1 WHERE f2 = 'klm';
+COUNT(*) = 2
+1
+USE test2;
+ERROR 42000: Unknown database 'test2'
+connection node_3;
+DROP SCHEMA test1;
+DROP SCHEMA test2;
+connection node_1;
+connection node_2;
+STOP SLAVE;
+RESET SLAVE ALL;
diff --git a/mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db_cc.result b/mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db_cc.result
new file mode 100644
index 00000000000..fc10cc01a45
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_as_slave_gtid_replicate_do_db_cc.result
@@ -0,0 +1,315 @@
+RESET MASTER;
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+START SLAVE USER='root';
+Warnings:
+Note 1759 Sending passwords in plain text without SSL/TLS is extremely insecure.
+CREATE SCHEMA test1;
+CREATE SCHEMA test2;
+USE test1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+USE test2;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO test1.t1 (f1) VALUES (1);
+INSERT INTO test2.t1 (f1) VALUES (1);
+INSERT INTO test1.t1 (f1) VALUES (2);
+INSERT INTO test2.t1 (f1) VALUES (2);
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+SET SESSION wsrep_on=OFF;
+include/wait_for_slave_sql_error.inc [errno=1047]
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+INSERT INTO test1.t1 (f1) VALUES (5);
+INSERT INTO test2.t1 (f1) VALUES (5);
+SET SESSION wsrep_on=ON;
+INSERT INTO test1.t1 (f1) VALUES (6);
+INSERT INTO test2.t1 (f1) VALUES (6);
+START SLAVE;
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 120;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 120 Previous_gtids 2 151
+mysqld-bin.000001 151 Gtid 1 199 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:1'
+mysqld-bin.000001 199 Query 1 294 CREATE SCHEMA test1
+mysqld-bin.000001 294 Gtid 1 342 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:2'
+mysqld-bin.000001 342 Query 1 415 BEGIN
+mysqld-bin.000001 415 Query 1 489 COMMIT
+mysqld-bin.000001 489 Gtid 1 537 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:3'
+mysqld-bin.000001 537 Query 1 655 use `test1`; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+mysqld-bin.000001 655 Gtid 1 703 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:4'
+mysqld-bin.000001 703 Query 1 776 BEGIN
+mysqld-bin.000001 776 Query 1 850 COMMIT
+mysqld-bin.000001 850 Gtid 1 898 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:5'
+mysqld-bin.000001 898 Query 1 961 BEGIN
+mysqld-bin.000001 961 Table_map 1 1007 table_id: ### (test1.t1)
+mysqld-bin.000001 1007 Write_rows 1 1047 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 1047 Xid 1 1078 COMMIT /* xid=### */
+mysqld-bin.000001 1078 Gtid 1 1126 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:6'
+mysqld-bin.000001 1126 Query 1 1189 BEGIN
+mysqld-bin.000001 1189 Query 1 1253 COMMIT
+mysqld-bin.000001 1253 Gtid 1 1301 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:7'
+mysqld-bin.000001 1301 Query 1 1364 BEGIN
+mysqld-bin.000001 1364 Table_map 1 1410 table_id: ### (test1.t1)
+mysqld-bin.000001 1410 Write_rows 1 1450 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 1450 Xid 1 1481 COMMIT /* xid=### */
+mysqld-bin.000001 1481 Gtid 1 1529 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:8'
+mysqld-bin.000001 1529 Query 1 1592 BEGIN
+mysqld-bin.000001 1592 Query 1 1656 COMMIT
+mysqld-bin.000001 1656 Gtid 2 1704 SET @@SESSION.GTID_NEXT= '<effective_uuid_2>:12'
+mysqld-bin.000001 1704 Query 2 1772 BEGIN
+mysqld-bin.000001 1772 Ignorable 2 1795 # Unrecognized ignorable event
+mysqld-bin.000001 1795 Query 2 1864 COMMIT
+mysqld-bin.000001 1864 Gtid 2 1912 SET @@SESSION.GTID_NEXT= '<effective_uuid_2>:13'
+mysqld-bin.000001 1912 Query 2 1980 BEGIN
+mysqld-bin.000001 1980 Ignorable 2 2003 # Unrecognized ignorable event
+mysqld-bin.000001 2003 Query 2 2072 COMMIT
+mysqld-bin.000001 2072 Gtid 1 2120 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:9'
+mysqld-bin.000001 2120 Query 1 2183 BEGIN
+mysqld-bin.000001 2183 Table_map 1 2229 table_id: ### (test1.t1)
+mysqld-bin.000001 2229 Write_rows 1 2269 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 2269 Xid 1 2300 COMMIT /* xid=### */
+mysqld-bin.000001 2300 Gtid 1 2348 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:10'
+mysqld-bin.000001 2348 Query 1 2411 BEGIN
+mysqld-bin.000001 2411 Query 1 2475 COMMIT
+mysqld-bin.000001 2475 Gtid 1 2523 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:11'
+mysqld-bin.000001 2523 Query 1 2586 BEGIN
+mysqld-bin.000001 2586 Table_map 1 2632 table_id: ### (test1.t1)
+mysqld-bin.000001 2632 Write_rows 1 2672 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 2672 Xid 1 2703 COMMIT /* xid=### */
+mysqld-bin.000001 2703 Gtid 1 2751 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:12'
+mysqld-bin.000001 2751 Query 1 2814 BEGIN
+mysqld-bin.000001 2814 Query 1 2878 COMMIT
+mysqld-bin.000001 2878 Gtid 1 2926 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:13'
+mysqld-bin.000001 2926 Query 1 2989 BEGIN
+mysqld-bin.000001 2989 Table_map 1 3035 table_id: ### (test1.t1)
+mysqld-bin.000001 3035 Write_rows 1 3075 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 3075 Xid 1 3106 COMMIT /* xid=### */
+mysqld-bin.000001 3106 Gtid 1 3154 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:14'
+mysqld-bin.000001 3154 Query 1 3217 BEGIN
+mysqld-bin.000001 3217 Query 1 3281 COMMIT
+mysqld-bin.000001 3281 Gtid 1 3329 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:15'
+mysqld-bin.000001 3329 Query 1 3392 BEGIN
+mysqld-bin.000001 3392 Table_map 1 3438 table_id: ### (test1.t1)
+mysqld-bin.000001 3438 Write_rows 1 3478 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 3478 Xid 1 3509 COMMIT /* xid=### */
+mysqld-bin.000001 3509 Gtid 1 3557 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:16'
+mysqld-bin.000001 3557 Query 1 3620 BEGIN
+mysqld-bin.000001 3620 Query 1 3684 COMMIT
+mysqld-bin.000001 3684 Gtid 1 3732 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:17'
+mysqld-bin.000001 3732 Query 1 3795 BEGIN
+mysqld-bin.000001 3795 Table_map 1 3841 table_id: ### (test1.t1)
+mysqld-bin.000001 3841 Write_rows 1 3881 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 3881 Xid 1 3912 COMMIT /* xid=### */
+mysqld-bin.000001 3912 Gtid 1 3960 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:18'
+mysqld-bin.000001 3960 Query 1 4023 BEGIN
+mysqld-bin.000001 4023 Query 1 4087 COMMIT
+mysqld-bin.000001 4087 Gtid 1 4135 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:19'
+mysqld-bin.000001 4135 Query 1 4198 BEGIN
+mysqld-bin.000001 4198 Table_map 1 4244 table_id: ### (test1.t1)
+mysqld-bin.000001 4244 Write_rows 1 4284 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 4284 Xid 1 4315 COMMIT /* xid=### */
+mysqld-bin.000001 4315 Gtid 1 4363 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:20'
+mysqld-bin.000001 4363 Query 1 4426 BEGIN
+mysqld-bin.000001 4426 Query 1 4490 COMMIT
+mysqld-bin.000001 4490 Gtid 1 4538 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:21'
+mysqld-bin.000001 4538 Query 1 4601 BEGIN
+mysqld-bin.000001 4601 Table_map 1 4647 table_id: ### (test1.t1)
+mysqld-bin.000001 4647 Write_rows 1 4687 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 4687 Xid 1 4718 COMMIT /* xid=### */
+mysqld-bin.000001 4718 Gtid 1 4766 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:22'
+mysqld-bin.000001 4766 Query 1 4829 BEGIN
+mysqld-bin.000001 4829 Query 1 4893 COMMIT
+mysqld-bin.000001 4893 Gtid 1 4941 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:23'
+mysqld-bin.000001 4941 Query 1 5004 BEGIN
+mysqld-bin.000001 5004 Table_map 1 5050 table_id: ### (test1.t1)
+mysqld-bin.000001 5050 Write_rows 1 5090 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 5090 Xid 1 5121 COMMIT /* xid=### */
+mysqld-bin.000001 5121 Gtid 1 5169 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:24'
+mysqld-bin.000001 5169 Query 1 5232 BEGIN
+mysqld-bin.000001 5232 Query 1 5296 COMMIT
+mysqld-bin.000001 5296 Gtid 1 5344 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:25'
+mysqld-bin.000001 5344 Query 1 5407 BEGIN
+mysqld-bin.000001 5407 Table_map 1 5453 table_id: ### (test1.t1)
+mysqld-bin.000001 5453 Write_rows 1 5493 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 5493 Xid 1 5524 COMMIT /* xid=### */
+mysqld-bin.000001 5524 Gtid 1 5572 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:26'
+mysqld-bin.000001 5572 Query 1 5635 BEGIN
+mysqld-bin.000001 5635 Query 1 5699 COMMIT
+mysqld-bin.000001 5699 Gtid 1 5747 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:27'
+mysqld-bin.000001 5747 Query 1 5810 BEGIN
+mysqld-bin.000001 5810 Table_map 1 5856 table_id: ### (test1.t1)
+mysqld-bin.000001 5856 Write_rows 1 5896 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 5896 Xid 1 5927 COMMIT /* xid=### */
+mysqld-bin.000001 5927 Gtid 1 5975 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:28'
+mysqld-bin.000001 5975 Query 1 6038 BEGIN
+mysqld-bin.000001 6038 Query 1 6102 COMMIT
+mysqld-bin.000001 6102 Gtid 1 6150 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:29'
+mysqld-bin.000001 6150 Query 1 6213 BEGIN
+mysqld-bin.000001 6213 Table_map 1 6259 table_id: ### (test1.t1)
+mysqld-bin.000001 6259 Write_rows 1 6299 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 6299 Xid 1 6330 COMMIT /* xid=### */
+mysqld-bin.000001 6330 Gtid 1 6378 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:30'
+mysqld-bin.000001 6378 Query 1 6441 BEGIN
+mysqld-bin.000001 6441 Query 1 6505 COMMIT
+USE test2;
+ERROR 42000: Unknown database 'test2'
+gtid_executed_equal
+1
+USE test2;
+ERROR 42000: Unknown database 'test2'
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 120;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 120 Previous_gtids 4 151
+mysqld-bin.000001 151 Gtid 1 199 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:1'
+mysqld-bin.000001 199 Query 1 294 CREATE SCHEMA test1
+mysqld-bin.000001 294 Gtid 1 342 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:2'
+mysqld-bin.000001 342 Query 1 415 BEGIN
+mysqld-bin.000001 415 Query 1 489 COMMIT
+mysqld-bin.000001 489 Gtid 1 537 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:3'
+mysqld-bin.000001 537 Query 1 655 use `test1`; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+mysqld-bin.000001 655 Gtid 1 703 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:4'
+mysqld-bin.000001 703 Query 1 776 BEGIN
+mysqld-bin.000001 776 Query 1 850 COMMIT
+mysqld-bin.000001 850 Gtid 1 898 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:5'
+mysqld-bin.000001 898 Query 1 961 BEGIN
+mysqld-bin.000001 961 Table_map 1 1007 table_id: ### (test1.t1)
+mysqld-bin.000001 1007 Write_rows 1 1047 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 1047 Xid 1 1078 COMMIT /* xid=### */
+mysqld-bin.000001 1078 Gtid 1 1126 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:6'
+mysqld-bin.000001 1126 Query 1 1189 BEGIN
+mysqld-bin.000001 1189 Query 1 1253 COMMIT
+mysqld-bin.000001 1253 Gtid 1 1301 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:7'
+mysqld-bin.000001 1301 Query 1 1364 BEGIN
+mysqld-bin.000001 1364 Table_map 1 1410 table_id: ### (test1.t1)
+mysqld-bin.000001 1410 Write_rows 1 1450 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 1450 Xid 1 1481 COMMIT /* xid=### */
+mysqld-bin.000001 1481 Gtid 1 1529 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:8'
+mysqld-bin.000001 1529 Query 1 1592 BEGIN
+mysqld-bin.000001 1592 Query 1 1656 COMMIT
+mysqld-bin.000001 1656 Gtid 4 1704 SET @@SESSION.GTID_NEXT= '<effective_uuid_2>:12'
+mysqld-bin.000001 1704 Query 4 1772 BEGIN
+mysqld-bin.000001 1772 Ignorable 4 1795 # Unrecognized ignorable event
+mysqld-bin.000001 1795 Query 4 1864 COMMIT
+mysqld-bin.000001 1864 Gtid 4 1912 SET @@SESSION.GTID_NEXT= '<effective_uuid_2>:13'
+mysqld-bin.000001 1912 Query 4 1980 BEGIN
+mysqld-bin.000001 1980 Ignorable 4 2003 # Unrecognized ignorable event
+mysqld-bin.000001 2003 Query 4 2072 COMMIT
+mysqld-bin.000001 2072 Gtid 1 2120 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:9'
+mysqld-bin.000001 2120 Query 1 2183 BEGIN
+mysqld-bin.000001 2183 Table_map 1 2229 table_id: ### (test1.t1)
+mysqld-bin.000001 2229 Write_rows 1 2269 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 2269 Xid 1 2300 COMMIT /* xid=### */
+mysqld-bin.000001 2300 Gtid 1 2348 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:10'
+mysqld-bin.000001 2348 Query 1 2411 BEGIN
+mysqld-bin.000001 2411 Query 1 2475 COMMIT
+mysqld-bin.000001 2475 Gtid 1 2523 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:11'
+mysqld-bin.000001 2523 Query 1 2586 BEGIN
+mysqld-bin.000001 2586 Table_map 1 2632 table_id: ### (test1.t1)
+mysqld-bin.000001 2632 Write_rows 1 2672 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 2672 Xid 1 2703 COMMIT /* xid=### */
+mysqld-bin.000001 2703 Gtid 1 2751 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:12'
+mysqld-bin.000001 2751 Query 1 2814 BEGIN
+mysqld-bin.000001 2814 Query 1 2878 COMMIT
+mysqld-bin.000001 2878 Gtid 1 2926 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:13'
+mysqld-bin.000001 2926 Query 1 2989 BEGIN
+mysqld-bin.000001 2989 Table_map 1 3035 table_id: ### (test1.t1)
+mysqld-bin.000001 3035 Write_rows 1 3075 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 3075 Xid 1 3106 COMMIT /* xid=### */
+mysqld-bin.000001 3106 Gtid 1 3154 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:14'
+mysqld-bin.000001 3154 Query 1 3217 BEGIN
+mysqld-bin.000001 3217 Query 1 3281 COMMIT
+mysqld-bin.000001 3281 Gtid 1 3329 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:15'
+mysqld-bin.000001 3329 Query 1 3392 BEGIN
+mysqld-bin.000001 3392 Table_map 1 3438 table_id: ### (test1.t1)
+mysqld-bin.000001 3438 Write_rows 1 3478 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 3478 Xid 1 3509 COMMIT /* xid=### */
+mysqld-bin.000001 3509 Gtid 1 3557 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:16'
+mysqld-bin.000001 3557 Query 1 3620 BEGIN
+mysqld-bin.000001 3620 Query 1 3684 COMMIT
+mysqld-bin.000001 3684 Gtid 1 3732 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:17'
+mysqld-bin.000001 3732 Query 1 3795 BEGIN
+mysqld-bin.000001 3795 Table_map 1 3841 table_id: ### (test1.t1)
+mysqld-bin.000001 3841 Write_rows 1 3881 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 3881 Xid 1 3912 COMMIT /* xid=### */
+mysqld-bin.000001 3912 Gtid 1 3960 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:18'
+mysqld-bin.000001 3960 Query 1 4023 BEGIN
+mysqld-bin.000001 4023 Query 1 4087 COMMIT
+mysqld-bin.000001 4087 Gtid 1 4135 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:19'
+mysqld-bin.000001 4135 Query 1 4198 BEGIN
+mysqld-bin.000001 4198 Table_map 1 4244 table_id: ### (test1.t1)
+mysqld-bin.000001 4244 Write_rows 1 4284 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 4284 Xid 1 4315 COMMIT /* xid=### */
+mysqld-bin.000001 4315 Gtid 1 4363 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:20'
+mysqld-bin.000001 4363 Query 1 4426 BEGIN
+mysqld-bin.000001 4426 Query 1 4490 COMMIT
+mysqld-bin.000001 4490 Gtid 1 4538 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:21'
+mysqld-bin.000001 4538 Query 1 4601 BEGIN
+mysqld-bin.000001 4601 Table_map 1 4647 table_id: ### (test1.t1)
+mysqld-bin.000001 4647 Write_rows 1 4687 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 4687 Xid 1 4718 COMMIT /* xid=### */
+mysqld-bin.000001 4718 Gtid 1 4766 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:22'
+mysqld-bin.000001 4766 Query 1 4829 BEGIN
+mysqld-bin.000001 4829 Query 1 4893 COMMIT
+mysqld-bin.000001 4893 Gtid 1 4941 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:23'
+mysqld-bin.000001 4941 Query 1 5004 BEGIN
+mysqld-bin.000001 5004 Table_map 1 5050 table_id: ### (test1.t1)
+mysqld-bin.000001 5050 Write_rows 1 5090 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 5090 Xid 1 5121 COMMIT /* xid=### */
+mysqld-bin.000001 5121 Gtid 1 5169 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:24'
+mysqld-bin.000001 5169 Query 1 5232 BEGIN
+mysqld-bin.000001 5232 Query 1 5296 COMMIT
+mysqld-bin.000001 5296 Gtid 1 5344 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:25'
+mysqld-bin.000001 5344 Query 1 5407 BEGIN
+mysqld-bin.000001 5407 Table_map 1 5453 table_id: ### (test1.t1)
+mysqld-bin.000001 5453 Write_rows 1 5493 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 5493 Xid 1 5524 COMMIT /* xid=### */
+mysqld-bin.000001 5524 Gtid 1 5572 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:26'
+mysqld-bin.000001 5572 Query 1 5635 BEGIN
+mysqld-bin.000001 5635 Query 1 5699 COMMIT
+mysqld-bin.000001 5699 Gtid 1 5747 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:27'
+mysqld-bin.000001 5747 Query 1 5810 BEGIN
+mysqld-bin.000001 5810 Table_map 1 5856 table_id: ### (test1.t1)
+mysqld-bin.000001 5856 Write_rows 1 5896 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 5896 Xid 1 5927 COMMIT /* xid=### */
+mysqld-bin.000001 5927 Gtid 1 5975 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:28'
+mysqld-bin.000001 5975 Query 1 6038 BEGIN
+mysqld-bin.000001 6038 Query 1 6102 COMMIT
+mysqld-bin.000001 6102 Gtid 1 6150 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:29'
+mysqld-bin.000001 6150 Query 1 6213 BEGIN
+mysqld-bin.000001 6213 Table_map 1 6259 table_id: ### (test1.t1)
+mysqld-bin.000001 6259 Write_rows 1 6299 table_id: ### flags: STMT_END_F
+mysqld-bin.000001 6299 Xid 1 6330 COMMIT /* xid=### */
+mysqld-bin.000001 6330 Gtid 1 6378 SET @@SESSION.GTID_NEXT= '<effective_uuid_1>:30'
+mysqld-bin.000001 6378 Query 1 6441 BEGIN
+mysqld-bin.000001 6441 Query 1 6505 COMMIT
+DROP SCHEMA test1;
+DROP SCHEMA test2;
+STOP SLAVE;
+RESET SLAVE ALL;
+CALL mtr.add_suppression("GTID replication failed");
+CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed");
+CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047");
+CALL mtr.add_suppression("TO isolation failed for");
+CALL mtr.add_suppression("Slave SQL: Error 'Deadlock found when trying to get lock; try restarting transaction' on query");
+CALL mtr.add_suppression("Slave SQL: Error 'WSREP has not yet prepared node for application use' on query");
+CALL mtr.add_suppression("Slave: WSREP has not yet prepared node for application use Error_code: 1047");
diff --git a/mysql-test/suite/galera/r/galera_as_slave_nonprim.result b/mysql-test/suite/galera/r/galera_as_slave_nonprim.result
index 365ea31f292..fefc988d9d4 100644
--- a/mysql-test/suite/galera/r/galera_as_slave_nonprim.result
+++ b/mysql-test/suite/galera/r/galera_as_slave_nonprim.result
@@ -1,13 +1,28 @@
+connection node_2;
+connection node_1;
+connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4;
+connection node_2;
START SLAVE;
SET SESSION wsrep_sync_wait = 0;
+connection node_4;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
+connection node_1;
+connection node_4;
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+connection node_2;
+connection node_1;
expected_error
1
+connection node_2;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
+connection node_1;
+connection node_2;
START SLAVE;
+connection node_4;
DROP TABLE t1;
+connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
CALL mtr.add_suppression("Slave SQL: Error 'Unknown command' on query");
@@ -15,4 +30,5 @@ CALL mtr.add_suppression("Slave: Unknown command Error_code: 1047");
CALL mtr.add_suppression("Transport endpoint is not connected");
CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed, 'Deadlock found when trying to get lock; try restarting transaction', Error_code: 1213");
CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047");
+connection node_4;
RESET MASTER;
diff --git a/mysql-test/suite/galera/r/galera_autoinc_sst_mariabackup.result b/mysql-test/suite/galera/r/galera_autoinc_sst_mariabackup.result
index 91f45c93257..7c98b3e85ed 100644
--- a/mysql-test/suite/galera/r/galera_autoinc_sst_mariabackup.result
+++ b/mysql-test/suite/galera/r/galera_autoinc_sst_mariabackup.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort.result b/mysql-test/suite/galera/r/galera_bf_abort.result
index c2e89965fce..cb4a27c82fd 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(6)) ENGINE=InnoDB;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
@@ -9,7 +11,7 @@ INSERT INTO t1 VALUES (1,'node_1');
connection node_2a;
connection node_2;
INSERT INTO t1 VALUES (2, 'node_2');
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
wsrep_local_aborts_increment
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result b/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result
index 7b98c807efb..1150a9fff63 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort_flush_for_export.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET AUTOCOMMIT=OFF;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_for_update.result b/mysql-test/suite/galera/r/galera_bf_abort_for_update.result
index ec8bddb087a..7dd3053b7bb 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort_for_update.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort_for_update.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1, 10);
connection node_1;
@@ -10,7 +12,7 @@ UPDATE t1 SET f1 = 2;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
wsrep_local_bf_aborts_diff
1
connection node_1;
@@ -23,7 +25,7 @@ UPDATE t1 SET f2 = 20;
connection node_1a;
connection node_1;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
wsrep_local_bf_aborts_diff
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result b/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result
index ae1ca6d2157..43ca6089d78 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort_ftwrl.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET AUTOCOMMIT=OFF;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result b/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result
index 8c6c7b7d7a4..0ef2a1a72c6 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort_get_lock.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2a;
SELECT GET_LOCK("foo", 1000);
@@ -10,7 +12,7 @@ SELECT GET_LOCK("foo", 1000);;
connection node_1;
INSERT INTO t1 VALUES (1);
connection node_2;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
wsrep_local_aborts_increment
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_group_commit.result b/mysql-test/suite/galera/r/galera_bf_abort_group_commit.result
new file mode 100644
index 00000000000..2a2ddd519f4
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_bf_abort_group_commit.result
@@ -0,0 +1,685 @@
+SET SESSION wsrep_sync_wait = 0;
+galera_sr_bf_abort_at_commit = 0
+after_replicate_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+INSERT INTO t1 VALUES (3);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+local_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+INSERT INTO t1 VALUES (3);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+apply_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_master_enter_sync';
+INSERT INTO t1 VALUES (3);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_master_enter_sync';
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+commit_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+INSERT INTO t1 VALUES (3);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+galera_sr_bf_abort_at_commit = 1
+after_replicate_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+local_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+apply_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_master_enter_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_master_enter_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+commit_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+galera_sr_bf_abort_at_commit = 1
+after_replicate_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 0;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+local_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 0;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+apply_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 0;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_master_enter_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_master_enter_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+commit_monitor_master_enter_sync
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 0;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+COMMIT;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+ROLLBACK;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+CALL mtr.add_suppression("WSREP: fragment replication failed: 1");
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result b/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result
index 81b5816ddbe..e7882e43b6a 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort_lock_table.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET AUTOCOMMIT=OFF;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_shutdown.result b/mysql-test/suite/galera/r/galera_bf_abort_shutdown.result
new file mode 100644
index 00000000000..fa2a5c373f2
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_bf_abort_shutdown.result
@@ -0,0 +1,12 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+connection node_2;
+SET DEBUG_SYNC = 'wsrep_before_certification WAIT_FOR continue';
+INSERT INTO t1 VALUES (1);
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_abort_sleep.result b/mysql-test/suite/galera/r/galera_bf_abort_sleep.result
index 9cd6abad5a1..00d6600d264 100644
--- a/mysql-test/suite/galera/r/galera_bf_abort_sleep.result
+++ b/mysql-test/suite/galera/r/galera_bf_abort_sleep.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET AUTOCOMMIT=OFF;
@@ -6,7 +8,7 @@ SELECT SLEEP(1000);;
connection node_1;
INSERT INTO t1 VALUES (1);
connection node_2;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
wsrep_local_aborts_increment
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_background_statistics.result b/mysql-test/suite/galera/r/galera_bf_background_statistics.result
index c2c3fce2b14..a8c8842b8e1 100644
--- a/mysql-test/suite/galera/r/galera_bf_background_statistics.result
+++ b/mysql-test/suite/galera/r/galera_bf_background_statistics.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT @@innodb_stats_persistent;
@@innodb_stats_persistent
1
@@ -27,7 +29,7 @@ SELECT SLEEP(1000);;
connection node_1;
ALTER TABLE t1 CHANGE f2 f2 INTEGER NOT NULL DEFAULT 1;
connection node_2;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
wsrep_local_aborts_increment
1
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_bf_lock_wait.result b/mysql-test/suite/galera/r/galera_bf_lock_wait.result
index 7ec524da888..f893848a72d 100644
--- a/mysql-test/suite/galera/r/galera_bf_lock_wait.result
+++ b/mysql-test/suite/galera/r/galera_bf_lock_wait.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 ENGINE=InnoDB select 1 as a, 1 as b union select 2, 2;
ALTER TABLE t1 add primary key(a);
CREATE PROCEDURE p1()
diff --git a/mysql-test/suite/galera/r/galera_binlog_cache_size.result b/mysql-test/suite/galera/r/galera_binlog_cache_size.result
index 6aac74ab5f0..c5a09074537 100644
--- a/mysql-test/suite/galera/r/galera_binlog_cache_size.result
+++ b/mysql-test/suite/galera/r/galera_binlog_cache_size.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 VARCHAR(767)) ENGINE=InnoDB;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
diff --git a/mysql-test/suite/galera/r/galera_binlog_checksum.result b/mysql-test/suite/galera/r/galera_binlog_checksum.result
index 7303aa61122..3ef7cf5c41e 100644
--- a/mysql-test/suite/galera/r/galera_binlog_checksum.result
+++ b/mysql-test/suite/galera/r/galera_binlog_checksum.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_binlog_event_max_size_max.result b/mysql-test/suite/galera/r/galera_binlog_event_max_size_max.result
index 46582ff5c4b..b513c491bb0 100644
--- a/mysql-test/suite/galera/r/galera_binlog_event_max_size_max.result
+++ b/mysql-test/suite/galera/r/galera_binlog_event_max_size_max.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 VARCHAR(1000));
diff --git a/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result b/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result
index 7b88af5d5af..6c2279bece0 100644
--- a/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result
+++ b/mysql-test/suite/galera/r/galera_binlog_event_max_size_min.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 VARCHAR(1000));
INSERT INTO t1 VALUES (REPEAT('x', 1000));
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_binlog_row_image.result b/mysql-test/suite/galera/r/galera_binlog_row_image.result
index d54db61105e..20e6bd9fab8 100644
--- a/mysql-test/suite/galera/r/galera_binlog_row_image.result
+++ b/mysql-test/suite/galera/r/galera_binlog_row_image.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET SESSION binlog_row_image=minimal;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result b/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result
index 78b40228eb0..a09ad6b97b9 100644
--- a/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result
+++ b/mysql-test/suite/galera/r/galera_binlog_stmt_autoinc.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_commit_empty.result b/mysql-test/suite/galera/r/galera_commit_empty.result
new file mode 100644
index 00000000000..75b94c54b1e
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_commit_empty.result
@@ -0,0 +1,15 @@
+connection node_2;
+connection node_1;
+START TRANSACTION;
+COMMIT;
+START TRANSACTION;
+COMMIT;
+START TRANSACTION READ ONLY;
+COMMIT;
+START TRANSACTION;
+COMMIT;
+START TRANSACTION;
+START TRANSACTION;
+COMMIT;
+wsrep_last_committed_diff
+1
diff --git a/mysql-test/suite/galera/r/galera_concurrent_ctas.result b/mysql-test/suite/galera/r/galera_concurrent_ctas.result
index 8a3ac1ae0d3..39c55277c52 100644
--- a/mysql-test/suite/galera/r/galera_concurrent_ctas.result
+++ b/mysql-test/suite/galera/r/galera_concurrent_ctas.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
disconnect node_2;
disconnect node_1;
# End of test
diff --git a/mysql-test/suite/galera/r/galera_create_function.result b/mysql-test/suite/galera/r/galera_create_function.result
index 9118c2864f3..85fa85d81f1 100644
--- a/mysql-test/suite/galera/r/galera_create_function.result
+++ b/mysql-test/suite/galera/r/galera_create_function.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE USER 'user1';
CREATE
diff --git a/mysql-test/suite/galera/r/galera_create_procedure.result b/mysql-test/suite/galera/r/galera_create_procedure.result
index 98dc4a856dc..24a0bd66b39 100644
--- a/mysql-test/suite/galera/r/galera_create_procedure.result
+++ b/mysql-test/suite/galera/r/galera_create_procedure.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE USER 'user1';
CREATE TABLE t1 (f1 INTEGER);
diff --git a/mysql-test/suite/galera/r/galera_create_table_as_select.result b/mysql-test/suite/galera/r/galera_create_table_as_select.result
new file mode 100644
index 00000000000..6f65ee99f0a
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_create_table_as_select.result
@@ -0,0 +1,103 @@
+connection node_2;
+connection node_1;
+connection node_1;
+SET SESSION default_storage_engine=InnoDB;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t1 AS SELECT * FROM t2;
+ERROR 42S01: Table 't1' already exists
+DROP TABLE t1,t2;
+CREATE TABLE t1 AS SELECT * FROM t2;
+ERROR 42S02: Table 'test.t2' doesn't exist
+CREATE TABLE t1 AS SELECT 1 FROM DUAL;
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_1;
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t1 AS SELECT * FROM t2;
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+DROP TABLE t1,t2;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+CREATE TABLE t1 AS SELECT * FROM t2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1;
+DROP TABLE t1,t2;
+connection node_1;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+CREATE TABLE t1 AS SELECT MAX(f1) AS f1 FROM t2;
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT f1 = 5 FROM t1;
+f1 = 5
+1
+connection node_1;
+DROP TABLE t1,t2;
+connection node_1;
+CREATE PROCEDURE sp1 ()
+BEGIN
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+CREATE TABLE t1 AS SELECT * FROM t2;
+END|
+CALL sp1();
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1;
+DROP TABLE t1, t2;
+DROP PROCEDURE sp1;
+connection node_1;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+PREPARE stmt FROM 'CREATE TABLE t1 AS SELECT * FROM t2';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1, t2;
+connection node_1;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+LOCK TABLE t2 WRITE;
+connection node_1;
+CREATE TABLE t1 AS SELECT * FROM t2;;
+connection node_2;
+SELECT COUNT(*) = 5 FROM t2;
+COUNT(*) = 5
+1
+CREATE TABLE t1 AS SELECT * FROM t2;
+connection node_1a;
+UNLOCK TABLES;
+connection node_1;
+Got one of the listed errors
+DROP TABLE t1, t2;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+CREATE TEMPORARY TABLE t1 AS SELECT * FROM t2;
+connection node_2;
+SELECT * FROM t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+CALL mtr.add_suppression("Slave SQL: Error 'Unknown table 'test.t1'' on query");
+connection node_1;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/galera/r/galera_create_table_like.result b/mysql-test/suite/galera/r/galera_create_table_like.result
index 131ac311bca..82543331ad5 100644
--- a/mysql-test/suite/galera/r/galera_create_table_like.result
+++ b/mysql-test/suite/galera/r/galera_create_table_like.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE SCHEMA schema1;
CREATE SCHEMA schema2;
USE schema1;
diff --git a/mysql-test/suite/galera/r/galera_create_trigger.result b/mysql-test/suite/galera/r/galera_create_trigger.result
index d07a007543e..56b35150d87 100644
--- a/mysql-test/suite/galera/r/galera_create_trigger.result
+++ b/mysql-test/suite/galera/r/galera_create_trigger.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE definer_root (f1 INTEGER, trigger_user VARCHAR(100)) ENGINE=InnoDB;
CREATE TABLE definer_user (f1 INTEGER, trigger_user VARCHAR(100)) ENGINE=InnoDB;
CREATE TABLE definer_current_user (f1 INTEGER, trigger_user VARCHAR(100)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_ddl_multiline.result b/mysql-test/suite/galera/r/galera_ddl_multiline.result
index 339a91125eb..9e70731a62b 100644
--- a/mysql-test/suite/galera/r/galera_ddl_multiline.result
+++ b/mysql-test/suite/galera/r/galera_ddl_multiline.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result
index e7a2508c0f3..6dd5258ff6d 100644
--- a/mysql-test/suite/galera/r/galera_defaults.result
+++ b/mysql-test/suite/galera/r/galera_defaults.result
@@ -1,6 +1,8 @@
-SELECT COUNT(*) = 43 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_%';
-COUNT(*) = 43
-0
+connection node_2;
+connection node_1;
+SELECT COUNT(*) `expect 48` FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_%';
+expect 48
+49
SELECT VARIABLE_NAME, VARIABLE_VALUE
FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
WHERE VARIABLE_NAME LIKE 'wsrep_%'
@@ -25,14 +27,15 @@ WSREP_CLUSTER_ADDRESS gcomm://
WSREP_CLUSTER_NAME my_wsrep_cluster
WSREP_CONVERT_LOCK_TO_TRX OFF
WSREP_DBUG_OPTION
-WSREP_DEBUG OFF
+WSREP_DEBUG NONE
WSREP_DESYNC OFF
WSREP_DIRTY_READS OFF
WSREP_DRUPAL_282555_WORKAROUND OFF
WSREP_FORCED_BINLOG_FORMAT NONE
WSREP_GTID_DOMAIN_ID 0
WSREP_GTID_MODE OFF
-WSREP_LOAD_DATA_SPLITTING ON
+WSREP_IGNORE_APPLY_ERRORS 7
+WSREP_LOAD_DATA_SPLITTING OFF
WSREP_LOG_CONFLICTS OFF
WSREP_MAX_WS_ROWS 0
WSREP_MAX_WS_SIZE 2147483647
@@ -48,8 +51,11 @@ WSREP_RETRY_AUTOCOMMIT 1
WSREP_SLAVE_FK_CHECKS ON
WSREP_SLAVE_THREADS 1
WSREP_SLAVE_UK_CHECKS OFF
+WSREP_SR_STORE table
WSREP_SST_AUTH
WSREP_SST_DONOR
WSREP_SST_DONOR_REJECTS_QUERIES OFF
WSREP_SST_METHOD rsync
WSREP_SYNC_WAIT 15
+WSREP_TRX_FRAGMENT_SIZE 0
+WSREP_TRX_FRAGMENT_UNIT bytes
diff --git a/mysql-test/suite/galera/r/galera_delete_limit.result b/mysql-test/suite/galera/r/galera_delete_limit.result
index f6fb2e56346..9898bfcf169 100644
--- a/mysql-test/suite/galera/r/galera_delete_limit.result
+++ b/mysql-test/suite/galera/r/galera_delete_limit.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER) Engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
diff --git a/mysql-test/suite/galera/r/galera_desync_overlapped.result b/mysql-test/suite/galera/r/galera_desync_overlapped.result
index f9920e7f7ad..e3f40d444ee 100644
--- a/mysql-test/suite/galera/r/galera_desync_overlapped.result
+++ b/mysql-test/suite/galera/r/galera_desync_overlapped.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
diff --git a/mysql-test/suite/galera/r/galera_drop_database.result b/mysql-test/suite/galera/r/galera_drop_database.result
index 86135625a71..ac76683cb25 100644
--- a/mysql-test/suite/galera/r/galera_drop_database.result
+++ b/mysql-test/suite/galera/r/galera_drop_database.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
CREATE DATABASE fts;
diff --git a/mysql-test/suite/galera/r/galera_drop_multi.result b/mysql-test/suite/galera/r/galera_drop_multi.result
index 7793ef93b90..1ff8afe3219 100644
--- a/mysql-test/suite/galera/r/galera_drop_multi.result
+++ b/mysql-test/suite/galera/r/galera_drop_multi.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM;
CREATE TEMPORARY TABLE t2 (f1 INTEGER) ENGINE=MyISAM;
CREATE TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result b/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result
index 38480d186ba..fdfca3316b1 100644
--- a/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result
+++ b/mysql-test/suite/galera/r/galera_encrypt_tmp_files.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
VARIABLE_VALUE = 'Synced'
1
diff --git a/mysql-test/suite/galera/r/galera_enum.result b/mysql-test/suite/galera/r/galera_enum.result
index a2a6317e2a0..40fc6931f6a 100644
--- a/mysql-test/suite/galera/r/galera_enum.result
+++ b/mysql-test/suite/galera/r/galera_enum.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 ENUM('', 'one', 'two'), KEY (f1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES ('');
@@ -38,7 +40,7 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_1;
SELECT COUNT(*) FROM t1 WHERE f1 = 'three';
COUNT(*)
diff --git a/mysql-test/suite/galera/r/galera_events.result b/mysql-test/suite/galera/r/galera_events.result
index f01627aba70..791b0be729d 100644
--- a/mysql-test/suite/galera/r/galera_events.result
+++ b/mysql-test/suite/galera/r/galera_events.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE EVENT event1 ON SCHEDULE AT CURRENT_TIMESTAMP + INTERVAL 1 HOUR DO SELECT 1;
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
index 73375ae55c5..291d641db88 100644
--- a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
+++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_update.result b/mysql-test/suite/galera/r/galera_fk_cascade_update.result
index 5fe8b532473..5294826a8f9 100644
--- a/mysql-test/suite/galera/r/galera_fk_cascade_update.result
+++ b/mysql-test/suite/galera/r/galera_fk_cascade_update.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE grandparent (
id INT NOT NULL PRIMARY KEY
) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_fk_conflict.result b/mysql-test/suite/galera/r/galera_fk_conflict.result
index a08aa30a82e..f9f151abce4 100644
--- a/mysql-test/suite/galera/r/galera_fk_conflict.result
+++ b/mysql-test/suite/galera/r/galera_fk_conflict.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE parent (
id INT PRIMARY KEY,
KEY (id)
@@ -22,6 +24,6 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE child;
DROP TABLE parent;
diff --git a/mysql-test/suite/galera/r/galera_fk_mismatch.result b/mysql-test/suite/galera/r/galera_fk_mismatch.result
index bdc60c9e099..a030b1214c5 100644
--- a/mysql-test/suite/galera/r/galera_fk_mismatch.result
+++ b/mysql-test/suite/galera/r/galera_fk_mismatch.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE parent (
id1 INT,
id2 INT,
diff --git a/mysql-test/suite/galera/r/galera_fk_multicolumn.result b/mysql-test/suite/galera/r/galera_fk_multicolumn.result
index f5b6aa23692..b626d963af8 100644
--- a/mysql-test/suite/galera/r/galera_fk_multicolumn.result
+++ b/mysql-test/suite/galera/r/galera_fk_multicolumn.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t0 (
f1 INT PRIMARY KEY,
f2 INT UNIQUE
diff --git a/mysql-test/suite/galera/r/galera_fk_multitable.result b/mysql-test/suite/galera/r/galera_fk_multitable.result
index 04ff7adc3e9..83e1491ab34 100644
--- a/mysql-test/suite/galera/r/galera_fk_multitable.result
+++ b/mysql-test/suite/galera/r/galera_fk_multitable.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t0 (
f0 INT PRIMARY KEY
);
diff --git a/mysql-test/suite/galera/r/galera_fk_no_pk.result b/mysql-test/suite/galera/r/galera_fk_no_pk.result
index e7b5f0b2b64..622e63dbb8f 100644
--- a/mysql-test/suite/galera/r/galera_fk_no_pk.result
+++ b/mysql-test/suite/galera/r/galera_fk_no_pk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE parent (
id INT,
KEY (id)
diff --git a/mysql-test/suite/galera/r/galera_fk_selfreferential.result b/mysql-test/suite/galera/r/galera_fk_selfreferential.result
index 3b4dbf2a8e9..9a64521e7b8 100644
--- a/mysql-test/suite/galera/r/galera_fk_selfreferential.result
+++ b/mysql-test/suite/galera/r/galera_fk_selfreferential.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (
f1 INT NOT NULL PRIMARY KEY,
f2 INT,
diff --git a/mysql-test/suite/galera/r/galera_fk_setnull.result b/mysql-test/suite/galera/r/galera_fk_setnull.result
index d4f20fe60a3..afb3fc3b9b8 100644
--- a/mysql-test/suite/galera/r/galera_fk_setnull.result
+++ b/mysql-test/suite/galera/r/galera_fk_setnull.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE parent (
id INT NOT NULL,
PRIMARY KEY (id)
diff --git a/mysql-test/suite/galera/r/galera_flush_local.result b/mysql-test/suite/galera/r/galera_flush_local.result
index a8e798a693d..146833fc3c8 100644
--- a/mysql-test/suite/galera/r/galera_flush_local.result
+++ b/mysql-test/suite/galera/r/galera_flush_local.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
DROP TABLE IF EXISTS t1, t2, x1, x2;
connection node_1;
CREATE TABLE t1 (f1 INTEGER);
@@ -45,7 +47,9 @@ UNLOCK TABLES;
FLUSH LOCAL TABLES t1;
ANALYZE LOCAL TABLE t1, t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
OPTIMIZE LOCAL TABLE t1, t2;
Table Op Msg_type Msg_text
@@ -120,7 +124,9 @@ UNLOCK TABLES;
FLUSH TABLES t1;
ANALYZE TABLE t1, t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
OPTIMIZE TABLE t1, t2;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/galera/r/galera_forced_binlog_format.result b/mysql-test/suite/galera/r/galera_forced_binlog_format.result
index b94e6530886..a94ac0c112d 100644
--- a/mysql-test/suite/galera/r/galera_forced_binlog_format.result
+++ b/mysql-test/suite/galera/r/galera_forced_binlog_format.result
@@ -1,5 +1,10 @@
+connection node_2;
connection node_1;
+connection node_1;
+SEt GLOBAL wsrep_on=OFF;
RESET MASTER;
+SEt GLOBAL wsrep_on=ON;
+FLUSH BINARY LOGS;
SET SESSION binlog_format = 'STATEMENT';
Warnings:
Warning 1105 MariaDB Galera and flashback do not support binlog format: STATEMENT
@@ -13,18 +18,7 @@ SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
Log_name Pos Event_type Server_id End_log_pos Info
mysqld-bin.000001 <Pos> Gtid_list 1 <End_log_pos> []
mysqld-bin.000001 <Pos> Binlog_checkpoint 1 <End_log_pos> mysqld-bin.000001
-mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> GTID 0-1-1
-mysqld-bin.000001 <Pos> Query 1 <End_log_pos> use `test`; CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB
-mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
-mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1)
-mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
-mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
-mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
-mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-3
-mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (2)
-mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
-mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
-mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Rotate 1 <End_log_pos> mysqld-bin.000002;pos=4
DROP TABLE t1;
#
# MDEV-9401: wsrep_forced_binlog_format with binlog causes crash
@@ -43,6 +37,4 @@ GRANT ALL PRIVILEGES ON `testdb_9401`.`t1` TO 'dummy'@'localhost'
REVOKE ALL PRIVILEGES, GRANT OPTION FROM dummy@localhost;
DROP USER dummy@localhost;
DROP DATABASE testdb_9401;
-disconnect node_2;
-disconnect node_1;
# End of tests
diff --git a/mysql-test/suite/galera/r/galera_ftwrl.result b/mysql-test/suite/galera/r/galera_ftwrl.result
index 0565781c051..eae8028a7f4 100644
--- a/mysql-test/suite/galera/r/galera_ftwrl.result
+++ b/mysql-test/suite/galera/r/galera_ftwrl.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET GLOBAL wsrep_provider_options = "repl.causal_read_timeout=PT1S";
diff --git a/mysql-test/suite/galera/r/galera_ftwrl_drain.result b/mysql-test/suite/galera/r/galera_ftwrl_drain.result
index 751811b88fd..2342643e745 100644
--- a/mysql-test/suite/galera/r/galera_ftwrl_drain.result
+++ b/mysql-test/suite/galera/r/galera_ftwrl_drain.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
@@ -22,7 +24,7 @@ connection node_2;
SET SESSION lock_wait_timeout = 1;
SET SESSION innodb_lock_wait_timeout=1;
SET SESSION wait_timeout=1;
-INSERT INTO t2 VALUES (2);
+INSERT INTO t1 VALUES (2);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
connection node_2a;
UNLOCK TABLES;
diff --git a/mysql-test/suite/galera/r/galera_fulltext.result b/mysql-test/suite/galera/r/galera_fulltext.result
index 18e3bff40fc..a22296278fa 100644
--- a/mysql-test/suite/galera/r/galera_fulltext.result
+++ b/mysql-test/suite/galera/r/galera_fulltext.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_gcache_recover.result b/mysql-test/suite/galera/r/galera_gcache_recover.result
index d3ba06c1333..819c595ece3 100644
--- a/mysql-test/suite/galera/r/galera_gcache_recover.result
+++ b/mysql-test/suite/galera/r/galera_gcache_recover.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
SET SESSION wsrep_sync_wait = 0;
diff --git a/mysql-test/suite/galera/r/galera_gcache_recover_full_gcache.result b/mysql-test/suite/galera/r/galera_gcache_recover_full_gcache.result
index 588af5668bb..a0d128f5fa3 100644
--- a/mysql-test/suite/galera/r/galera_gcache_recover_full_gcache.result
+++ b/mysql-test/suite/galera/r/galera_gcache_recover_full_gcache.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET SESSION wsrep_sync_wait = 0;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 LONGBLOB) ENGINE=InnoDB;
connection node_2;
@@ -21,6 +23,6 @@ include/diff_servers.inc [servers=1 2]
connection node_1;
DROP TABLE t1;
CALL mtr.add_suppression("Skipped GCache ring buffer recovery");
-include/assert_grep.inc [IST first seqno 2 not found from cache, falling back to SST]
+include/assert_grep.inc [IST first seqno [24] not found from cache, falling back to SST]
connection node_2;
CALL mtr.add_suppression("Skipped GCache ring buffer recovery");
diff --git a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result
index 604b24a8fab..a0a45446eff 100644
--- a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result
+++ b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET SESSION wsrep_sync_wait = 0;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 LONGBLOB) ENGINE=InnoDB;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_gcs_fc_limit.result b/mysql-test/suite/galera/r/galera_gcs_fc_limit.result
index 464a8b7ea97..83eaa0fb7aa 100644
--- a/mysql-test/suite/galera/r/galera_gcs_fc_limit.result
+++ b/mysql-test/suite/galera/r/galera_gcs_fc_limit.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_gcs_fragment.result b/mysql-test/suite/galera/r/galera_gcs_fragment.result
index 0c9c1819f60..bdd749047ee 100644
--- a/mysql-test/suite/galera/r/galera_gcs_fragment.result
+++ b/mysql-test/suite/galera/r/galera_gcs_fragment.result
@@ -1,24 +1,37 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 TEXT);
+connection node_2;
SET GLOBAL wsrep_cluster_address='';
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
SET SESSION wsrep_sync_wait=0;
SET GLOBAL wsrep_provider_options = 'dbug=d,gcs_core_after_frag_send';
+connection node_1;
SET SESSION wsrep_retry_autocommit=0;
INSERT INTO t1 VALUES (1, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_2;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
INSERT INTO t1 VALUES (2, "bbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
+connection node_1a;
SET GLOBAL wsrep_provider_options = 'signal=gcs_core_after_frag_send';
-ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+ERROR HY000: Got error 6 "No such device or address" during COMMIT
INSERT INTO t1 VALUES (3, "cccccaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
SELECT * FROM t1;
f1 f2
2 bbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
3 cccccaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+connection node_2;
SELECT * FROM t1;
f1 f2
2 bbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
3 cccccaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+connection node_1;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_gcs_max_packet_size.result b/mysql-test/suite/galera/r/galera_gcs_max_packet_size.result
index ce74f3db433..b97be5733ff 100644
--- a/mysql-test/suite/galera/r/galera_gcs_max_packet_size.result
+++ b/mysql-test/suite/galera/r/galera_gcs_max_packet_size.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
CREATE TABLE t1 (f1 INT PRIMARY KEY AUTO_INCREMENT, f2 INTEGER) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_gra_log.result b/mysql-test/suite/galera/r/galera_gra_log.result
index 777eda42046..33853188965 100644
--- a/mysql-test/suite/galera/r/galera_gra_log.result
+++ b/mysql-test/suite/galera/r/galera_gra_log.result
@@ -1,4 +1,6 @@
connection node_2;
+connection node_1;
+connection node_2;
SET SESSION wsrep_on=OFF;
CREATE TABLE t1 (f1 INTEGER);
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_gtid.result b/mysql-test/suite/galera/r/galera_gtid.result
index acc5eae9876..f27e2590898 100644
--- a/mysql-test/suite/galera/r/galera_gtid.result
+++ b/mysql-test/suite/galera/r/galera_gtid.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY);
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_gtid_slave.result b/mysql-test/suite/galera/r/galera_gtid_slave.result
index f8ca6322d22..7a3048231af 100644
--- a/mysql-test/suite/galera/r/galera_gtid_slave.result
+++ b/mysql-test/suite/galera/r/galera_gtid_slave.result
@@ -1,7 +1,9 @@
-connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2;
-START SLAVE;
connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_2;
+START SLAVE;
+connection node_3;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
@@ -11,27 +13,31 @@ insert into t2 values(22);
commit;
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4
+2-3-4
connection node_2;
INSERT INTO t1 VALUES(2);
INSERT INTO t1 VALUES(3);
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4,2-2-2
-connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+0-2-2,2-3-4
+connection node_1;
INSERT INTO t1 VALUES(4);
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4,2-2-2,2-3-3
-connection node_1;
+1-1-1,2-3-4,2-2-6
+connection node_3;
DROP TABLE t1,t2;
connection node_2;
-connection node_3;
+connection node_1;
connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
+SET GLOBAL wsrep_on=OFF;
reset master;
-connection node_3;
-reset master;
+SET GLOBAL wsrep_on=ON;
connection node_1;
+SET GLOBAL wsrep_on=OFF;
+reset master;
+SET GLOBAL wsrep_on=ON;
+connection node_3;
reset master;
diff --git a/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result b/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result
index 380a0235ac2..7c5519af495 100644
--- a/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result
+++ b/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result
@@ -1,38 +1,40 @@
-connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
#Connection 2
connection node_2;
START SLAVE;
-#Connection 1
-connection node_1;
+#Connection 3
+connection node_3;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 int unique) ENGINE=InnoDB;
INSERT INTO t2 VALUES(1,11);
INSERT INTO t2 VALUES(2,22);
INSERT INTO t2 VALUES(3,33);
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4
+2-3-4
include/save_master_gtid.inc
#Connection 2
connection node_2;
include/sync_with_master_gtid.inc
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4
+2-3-4
INSERT INTO t2 VALUES(4,44);
INSERT INTO t2 VALUES(5,55);
INSERT INTO t2 VALUES(6,66);
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4,2-2-3
-#Connection 3
-connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+0-2-3,2-3-4
+#Connection 1
+connection node_1;
INSERT INTO t2 VALUES(7,77);
INSERT INTO t2 VALUES(8,88);
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-4,2-2-3,2-3-5
-#Connection 1
-connection node_1;
+1-1-2,2-3-4,2-2-7
+#Connection 3
+connection node_3;
CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
@@ -48,76 +50,78 @@ START TRANSACTION;
INSERT INTO t1 VALUES ('node2_committed_before');
INSERT INTO t1 VALUES ('node2_committed_before');
COMMIT;
-#Connection 3
-connection node_3;
+#Connection 1
+connection node_1;
+connection node_1;
connection node_2;
-connection node_3;
-Shutting down server ...
#Connection 2
connection node_2;
+Shutting down server ...
+#Connection 1
+connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES ('node1_committed_during');
INSERT INTO t1 VALUES ('node1_committed_during');
COMMIT;
-#Connection 3
-connection node_3;
+#Connection 2
+connection node_2;
Starting server ...
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node3_committed_after');
-INSERT INTO t1 VALUES ('node3_committed_after');
+INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES ('node2_committed_after');
COMMIT;
-#Connection 2
-connection node_2;
+#Connection 1
+connection node_1;
Select * from t1 order by f1;
f1
node1_committed_before
node1_committed_before
node1_committed_during
node1_committed_during
+node2_committed_after
+node2_committed_after
node2_committed_before
node2_committed_before
-node3_committed_after
-node3_committed_after
-#Connection 3
-connection node_3;
+#Connection 2
+connection node_2;
Select * from t1 order by f1;
f1
node1_committed_before
node1_committed_before
node1_committed_during
node1_committed_during
+node2_committed_after
+node2_committed_after
node2_committed_before
node2_committed_before
-node3_committed_after
-node3_committed_after
+#Connection 1
+connection node_1;
+SELECT @@global.gtid_binlog_state;
+@@global.gtid_binlog_state
+1-1-3,2-3-6,2-2-9
#Connection 2
connection node_2;
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-6,2-2-7,2-3-8
+0-1-7,0-2-8,2-3-6
#Connection 3
connection node_3;
-SELECT @@global.gtid_binlog_state;
-@@global.gtid_binlog_state
-1-1-6,2-2-7,2-3-8
-#Connection 1
-connection node_1;
SET AUTOCOMMIT=ON;
#Connection 2
connection node_2;
SET AUTOCOMMIT=ON;
-#Connection 3
-connection node_3;
+#Connection 1
+connection node_1;
SET AUTOCOMMIT=ON;
#Connection 2
connection node_2;
STOP slave;
INSERT INTO t1 VALUES ('node2_slave_stoped');
-#Connection 1
-connection node_1;
-INSERT INTO t1 VALUES ('node1_normal_entry');
+#Connection 3
+connection node_3;
+INSERT INTO t1 VALUES ('node3_normal_entry');
include/save_master_gtid.inc
#Connection 2
connection node_2;
@@ -130,31 +134,35 @@ count(*)
12
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-7,2-3-8,2-2-11
-#Connection 3
-connection node_3;
+0-1-7,0-2-11,2-3-7
+#Connection 1
+connection node_1;
SELECT count(*) from t1;
count(*)
12
SELECT @@global.gtid_binlog_state;
@@global.gtid_binlog_state
-1-1-7,2-3-8,2-2-11
-#Connection 1
-connection node_1;
+1-1-3,2-3-7,2-2-12
+#Connection 3
+connection node_3;
DROP TABLE t2,t1;
#Connection 2
connection node_2;
-#Connection 3
-connection node_3;
+#Connection 1
+connection node_1;
#Connection 2
connection node_2;
STOP SLAVE;
RESET SLAVE ALL;
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
set global gtid_slave_pos="";
+#Connection 1
+connection node_1;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
#Connection 3
connection node_3;
reset master;
-#Connection 1
-connection node_1;
-reset master;
diff --git a/mysql-test/suite/galera/r/galera_insert_ignore.result b/mysql-test/suite/galera/r/galera_insert_ignore.result
index 7057affaa0e..417524240e7 100644
--- a/mysql-test/suite/galera/r/galera_insert_ignore.result
+++ b/mysql-test/suite/galera/r/galera_insert_ignore.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET GLOBAL wsrep_sync_wait = 15;
connection node_2;
SET GLOBAL wsrep_sync_wait = 15;
diff --git a/mysql-test/suite/galera/r/galera_insert_multi.result b/mysql-test/suite/galera/r/galera_insert_multi.result
index 913dd42403a..d7a4f01873e 100644
--- a/mysql-test/suite/galera/r/galera_insert_multi.result
+++ b/mysql-test/suite/galera/r/galera_insert_multi.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
@@ -51,7 +53,7 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
ROLLBACK;
INSERT INTO t1 VALUES (1), (2);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
diff --git a/mysql-test/suite/galera/r/galera_ist_innodb_flush_logs.result b/mysql-test/suite/galera/r/galera_ist_innodb_flush_logs.result
index 481a85711be..cad00aaee48 100644
--- a/mysql-test/suite/galera/r/galera_ist_innodb_flush_logs.result
+++ b/mysql-test/suite/galera/r/galera_ist_innodb_flush_logs.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff b/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
index 9684e290778..fe54c515395 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup,debug.rdiff
@@ -1,6 +1,6 @@
---- r/galera_ist_mariabackup.result 2018-11-21 22:30:21.968817468 +0200
-+++ r/galera_ist_mariabackup.reject 2018-11-22 09:16:27.832601754 +0200
-@@ -285,3 +285,111 @@
+--- galera_ist_mariabackup.result 2018-12-11 13:33:56.728535840 +0100
++++ galera_ist_mariabackup.reject 2018-12-11 13:37:40.572535840 +0100
+@@ -290,3 +290,111 @@
DROP TABLE t1;
COMMIT;
SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup.result b/mysql-test/suite/galera/r/galera_ist_mariabackup.result
index 8a7c02ab1b6..13f7d898a59 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup.result
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been temporarily disconnected
@@ -47,6 +49,9 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
+disconnect node_2;
+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES ('node2_committed_after');
diff --git a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result
index 7813b5a58fc..99b9c8d6c1b 100644
--- a/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result
+++ b/mysql-test/suite/galera/r/galera_ist_mariabackup_innodb_flush_logs.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
Performing State Transfer on a server that has been killed and restarted
connection node_1;
CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_ist_mysqldump.result b/mysql-test/suite/galera/r/galera_ist_mysqldump.result
index 296ecc2adc7..222eb7704e8 100644
--- a/mysql-test/suite/galera/r/galera_ist_mysqldump.result
+++ b/mysql-test/suite/galera/r/galera_ist_mysqldump.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
Setting SST method to mysqldump ...
call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
@@ -210,3 +212,4 @@ CALL mtr.add_suppression("Can't open and lock time zone table");
CALL mtr.add_suppression("Can't open and lock privilege tables");
CALL mtr.add_suppression("Info table is not ready to be used");
CALL mtr.add_suppression("Native table .* has the wrong structure");
+CALL mtr.add_suppression("Table \'mysql.gtid_slave_pos\' doesn\'t exist");
diff --git a/mysql-test/suite/galera/r/galera_ist_progress.result b/mysql-test/suite/galera/r/galera_ist_progress.result
index ed36a217624..9fc7febbea5 100644
--- a/mysql-test/suite/galera/r/galera_ist_progress.result
+++ b/mysql-test/suite/galera/r/galera_ist_progress.result
@@ -1,10 +1,6 @@
-connection node_2;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
-connection node_1;
-connection node_2;
SET SESSION wsrep_on = OFF;
SET SESSION wsrep_on = ON;
-connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);
@@ -16,13 +12,8 @@ INSERT INTO t1 VALUES (7);
INSERT INTO t1 VALUES (8);
INSERT INTO t1 VALUES (9);
INSERT INTO t1 VALUES (10);
-connection node_2;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
-connection node_1;
-connection node_2;
-connection node_1;
include/assert_grep.inc [Receiving IST: 11 writesets, seqnos]
include/assert_grep.inc [Receiving IST\.\.\. 0\.0% \( 0/11 events\) complete]
include/assert_grep.inc [Receiving IST\.\.\.100\.0% \(11/11 events\) complete]
-connection node_1;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_ist_recv_bind.result b/mysql-test/suite/galera/r/galera_ist_recv_bind.result
index ffc751d8672..be72aa60ab0 100644
--- a/mysql-test/suite/galera/r/galera_ist_recv_bind.result
+++ b/mysql-test/suite/galera/r/galera_ist_recv_bind.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SELECT @@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%';
@@wsrep_provider_options LIKE '%ist.recv_bind = 127.0.0.1%'
diff --git a/mysql-test/suite/galera/r/galera_ist_restart_joiner.result b/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
index c81cecfae1c..80d2c90642b 100644
--- a/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
+++ b/mysql-test/suite/galera/r/galera_ist_restart_joiner.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
@@ -13,7 +15,6 @@ Loading wsrep_provider ...
SET SESSION wsrep_on=OFF;
SET SESSION wsrep_on=ON;
connection node_1;
-connection node_1;
UPDATE t1 SET f2 = 'd' WHERE f1 > 3;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_ist_rsync.result b/mysql-test/suite/galera/r/galera_ist_rsync.result
index 8a7c02ab1b6..13f7d898a59 100644
--- a/mysql-test/suite/galera/r/galera_ist_rsync.result
+++ b/mysql-test/suite/galera/r/galera_ist_rsync.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been temporarily disconnected
@@ -47,6 +49,9 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
+disconnect node_2;
+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES ('node2_committed_after');
diff --git a/mysql-test/suite/galera/r/galera_kill_ddl.result b/mysql-test/suite/galera/r/galera_kill_ddl.result
index b11353fcbcc..9d66140465c 100644
--- a/mysql-test/suite/galera/r/galera_kill_ddl.result
+++ b/mysql-test/suite/galera/r/galera_kill_ddl.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
call mtr.add_suppression("WSREP: Last Applied Action message in non-primary configuration from member .*");
connection node_1;
SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true';
diff --git a/mysql-test/suite/galera/r/galera_kill_largechanges.result b/mysql-test/suite/galera/r/galera_kill_largechanges.result
index d85d421c164..99a8005e03e 100644
--- a/mysql-test/suite/galera/r/galera_kill_largechanges.result
+++ b/mysql-test/suite/galera/r/galera_kill_largechanges.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_kill_smallchanges.result b/mysql-test/suite/galera/r/galera_kill_smallchanges.result
index f500db8871b..9d52de585c2 100644
--- a/mysql-test/suite/galera/r/galera_kill_smallchanges.result
+++ b/mysql-test/suite/galera/r/galera_kill_smallchanges.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_last_committed_id.result b/mysql-test/suite/galera/r/galera_last_committed_id.result
new file mode 100644
index 00000000000..fabc5337576
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_last_committed_id.result
@@ -0,0 +1,38 @@
+connection node_2;
+connection node_1;
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1'
+1
+wsrep_last_committed_id_match
+1
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+connection node_1;
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1'
+1
+connection node_1a;
+INSERT INTO t1 VALUES (1);
+connection node_1;
+wsrep_last_committed_id_match
+1
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1'
+1
+INSERT INTO t1 VALUES (1);
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1'
+1
+wsrep_last_committed_id_match
+1
+COMMIT;
+wsrep_last_committed_id_advanced
+1
+wsrep_last_committed_id_advanced
+1
+SET AUTOCOMMIT=ON;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_load_data.result b/mysql-test/suite/galera/r/galera_load_data.result
index f0737b944e1..97d82f0f534 100644
--- a/mysql-test/suite/galera/r/galera_load_data.result
+++ b/mysql-test/suite/galera/r/galera_load_data.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
create database cardtest02;
use cardtest02;
diff --git a/mysql-test/suite/galera/r/galera_lock_table.result b/mysql-test/suite/galera/r/galera_lock_table.result
index ce529deb22c..c3df1749ada 100644
--- a/mysql-test/suite/galera/r/galera_lock_table.result
+++ b/mysql-test/suite/galera/r/galera_lock_table.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_log_bin.result b/mysql-test/suite/galera/r/galera_log_bin.result
index a6f0ef12be1..12e5e59a426 100644
--- a/mysql-test/suite/galera/r/galera_log_bin.result
+++ b/mysql-test/suite/galera/r/galera_log_bin.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
CREATE TABLE t2 (id INT) ENGINE=InnoDB;
@@ -66,4 +68,6 @@ mysqld-bin.000003 # Query # # use `test`; ALTER TABLE t1 ADD COLUMN f2 INTEGER
DROP TABLE t1;
DROP TABLE t2;
connection node_1;
+SET GLOBAL wsrep_on=OFF;
RESET MASTER;
+SET GLOBAL wsrep_on=ON;
diff --git a/mysql-test/suite/galera/r/galera_log_output_csv.result b/mysql-test/suite/galera/r/galera_log_output_csv.result
index 5cb61b36332..efe8c73c986 100644
--- a/mysql-test/suite/galera/r/galera_log_output_csv.result
+++ b/mysql-test/suite/galera/r/galera_log_output_csv.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
SELECT COUNT(*) > 0 FROM mysql.general_log;
diff --git a/mysql-test/suite/galera/r/galera_many_columns.result b/mysql-test/suite/galera/r/galera_many_columns.result
index db8a8f5ec9d..64e97f11fa7 100644
--- a/mysql-test/suite/galera/r/galera_many_columns.result
+++ b/mysql-test/suite/galera/r/galera_many_columns.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
INSERT INTO t1 (f1) VALUES (DEFAULT);
connection node_2;
SELECT f1 = 'ABC', f1017 = 'ABC' FROM t1;
@@ -19,7 +21,7 @@ UPDATE t1 SET f2 = 'CDE' WHERE f1 = 'XYZ' AND f1017 = 'XYZ';
COMMIT;
connection node_1;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
ROLLBACK;
connection node_2;
ROLLBACK;
diff --git a/mysql-test/suite/galera/r/galera_many_indexes.result b/mysql-test/suite/galera/r/galera_many_indexes.result
index 5691eef4c00..963d3552252 100644
--- a/mysql-test/suite/galera/r/galera_many_indexes.result
+++ b/mysql-test/suite/galera/r/galera_many_indexes.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 VARCHAR(767) PRIMARY KEY) ENGINE=InnoDB;
CREATE UNIQUE INDEX i63 ON t1(f1);
CREATE UNIQUE INDEX i62 ON t1(f1);
@@ -91,6 +93,7 @@ COUNT(*) = 1
INSERT INTO t1 VALUES (REPEAT('b', 767));
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
connection node_1;
SELECT COUNT(*) = 2 FROM t1;
@@ -98,6 +101,7 @@ COUNT(*) = 2
1
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DELETE FROM t1 WHERE f1 = REPEAT('b', 767);
connection node_1;
@@ -129,5 +133,5 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_many_rows.result b/mysql-test/suite/galera/r/galera_many_rows.result
index d6669fe6bdf..566bc59f8ab 100644
--- a/mysql-test/suite/galera/r/galera_many_rows.result
+++ b/mysql-test/suite/galera/r/galera_many_rows.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
@@ -37,6 +39,6 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1;
DROP TABLE ten;
diff --git a/mysql-test/suite/galera/r/galera_many_tables_nopk.result b/mysql-test/suite/galera/r/galera_many_tables_nopk.result
index 573ce758a13..2a226defcc7 100644
--- a/mysql-test/suite/galera/r/galera_many_tables_nopk.result
+++ b/mysql-test/suite/galera/r/galera_many_tables_nopk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
@@ -18,6 +20,6 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP SCHEMA test;
CREATE SCHEMA test;
diff --git a/mysql-test/suite/galera/r/galera_many_tables_pk.result b/mysql-test/suite/galera/r/galera_many_tables_pk.result
index 67624d5edb0..2700df8ebe4 100644
--- a/mysql-test/suite/galera/r/galera_many_tables_pk.result
+++ b/mysql-test/suite/galera/r/galera_many_tables_pk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
SELECT COUNT(*) = 100 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME LIKE 't%';
@@ -23,7 +25,7 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
include/diff_servers.inc [servers=1 2]
DROP SCHEMA test;
CREATE SCHEMA test;
diff --git a/mysql-test/suite/galera/r/galera_mdev_10812.result b/mysql-test/suite/galera/r/galera_mdev_10812.result
index de0a08a3794..16eacc6beab 100644
--- a/mysql-test/suite/galera/r/galera_mdev_10812.result
+++ b/mysql-test/suite/galera/r/galera_mdev_10812.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-10812: On COM_STMT_CLOSE/COM_QUIT, when wsrep_conflict_state
# is ABORTED, it causes wrong response to be sent to the client
diff --git a/mysql-test/suite/galera/r/galera_mdev_13787.result b/mysql-test/suite/galera/r/galera_mdev_13787.result
index b1caec0283c..4d0770918c0 100644
--- a/mysql-test/suite/galera/r/galera_mdev_13787.result
+++ b/mysql-test/suite/galera/r/galera_mdev_13787.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
create table t(a int);
insert into t select 1;
diff --git a/mysql-test/suite/galera/r/galera_mdev_15611.result b/mysql-test/suite/galera/r/galera_mdev_15611.result
index 9ea1684494a..5461f8f03fb 100644
--- a/mysql-test/suite/galera/r/galera_mdev_15611.result
+++ b/mysql-test/suite/galera/r/galera_mdev_15611.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (
id int primary key
diff --git a/mysql-test/suite/galera/r/galera_mdl_race.result b/mysql-test/suite/galera/r/galera_mdl_race.result
index 968258e8ec3..cf747ed8efb 100644
--- a/mysql-test/suite/galera/r/galera_mdl_race.result
+++ b/mysql-test/suite/galera/r/galera_mdl_race.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
INSERT INTO t1 VALUES (1, 'a');
@@ -22,7 +24,7 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_before_mdl_wait";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_after_BF_victim_lock";
UNLOCK TABLES;
connection node_1;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
COUNT(*) = 1
1
diff --git a/mysql-test/suite/galera/r/galera_multi_database.result b/mysql-test/suite/galera/r/galera_multi_database.result
index f6242de663b..a9d58d5d0e2 100644
--- a/mysql-test/suite/galera/r/galera_multi_database.result
+++ b/mysql-test/suite/galera/r/galera_multi_database.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE DATABASE d1;
CREATE TABLE d1.t1(f1 INTEGER) ENGINE=InnoDB;
CREATE DATABASE d2;
diff --git a/mysql-test/suite/galera/r/galera_myisam_autocommit.result b/mysql-test/suite/galera/r/galera_myisam_autocommit.result
index e9578a261e6..6213e8f6380 100644
--- a/mysql-test/suite/galera/r/galera_myisam_autocommit.result
+++ b/mysql-test/suite/galera/r/galera_myisam_autocommit.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2), (3);
diff --git a/mysql-test/suite/galera/r/galera_myisam_transactions.result b/mysql-test/suite/galera/r/galera_myisam_transactions.result
index 25796c309d1..091c5ffb6f6 100644
--- a/mysql-test/suite/galera/r/galera_myisam_transactions.result
+++ b/mysql-test/suite/galera/r/galera_myisam_transactions.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER) ENGINE=MyISAM;
CREATE TABLE t3 (f1 INTEGER) ENGINE=MyISAM;
diff --git a/mysql-test/suite/galera/r/galera_nopk_bit.result b/mysql-test/suite/galera/r/galera_nopk_bit.result
index 21da039df09..97ded793c08 100644
--- a/mysql-test/suite/galera/r/galera_nopk_bit.result
+++ b/mysql-test/suite/galera/r/galera_nopk_bit.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 BIT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL),(0),(b'1');
connection node_2;
@@ -28,6 +30,6 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera/r/galera_nopk_blob.result b/mysql-test/suite/galera/r/galera_nopk_blob.result
index 53e04f72d1e..6a3cee516c9 100644
--- a/mysql-test/suite/galera/r/galera_nopk_blob.result
+++ b/mysql-test/suite/galera/r/galera_nopk_blob.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 BLOB) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL),('abc');
connection node_2;
@@ -28,6 +30,6 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera/r/galera_nopk_large_varchar.result b/mysql-test/suite/galera/r/galera_nopk_large_varchar.result
index a83cf7f2d91..6d29306996b 100644
--- a/mysql-test/suite/galera/r/galera_nopk_large_varchar.result
+++ b/mysql-test/suite/galera/r/galera_nopk_large_varchar.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 VARCHAR(8000)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL),(CONCAT(REPEAT('x', 7999), 'a'));
connection node_2;
@@ -31,6 +33,6 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera/r/galera_nopk_unicode.result b/mysql-test/suite/galera/r/galera_nopk_unicode.result
index b2a8bb63df9..587ba9285da 100644
--- a/mysql-test/suite/galera/r/galera_nopk_unicode.result
+++ b/mysql-test/suite/galera/r/galera_nopk_unicode.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (
f1 VARCHAR(255),
KEY (f1)
@@ -19,7 +21,7 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SELECT f1 = 'текÑÑ‚2' FROM t1;
f1 = 'текÑÑ‚2'
1
diff --git a/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result b/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result
index ab56a8a2aa6..48625b3ba4a 100644
--- a/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result
+++ b/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
@@ -8,10 +10,10 @@ INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
connection node_2a;
SET SESSION wsrep_sync_wait=0;
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%applied write set%';
+SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%committing%';
COUNT(*) = 1
1
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock';
+SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Waiting for table metadata lock%';
COUNT(*) = 1
1
SELECT COUNT(*) = 0 FROM t1;
@@ -30,7 +32,7 @@ COUNT(*) = 1
SELECT COUNT(*) = 1 FROM t2;
COUNT(*) = 1
1
-SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'committed%';
+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%committed%';
COUNT(*) = 2
1
SET GLOBAL wsrep_slave_threads = 1;;
diff --git a/mysql-test/suite/galera/r/galera_parallel_autoinc_largetrx.result b/mysql-test/suite/galera/r/galera_parallel_autoinc_largetrx.result
index 827b2aa9dac..d2e09d7084f 100644
--- a/mysql-test/suite/galera/r/galera_parallel_autoinc_largetrx.result
+++ b/mysql-test/suite/galera/r/galera_parallel_autoinc_largetrx.result
@@ -1,5 +1,7 @@
+connection node_2;
+connection node_1;
connection node_1;
-CREATE TABLE ten (f1 INTEGER);
+CREATE TABLE ten (f1 INTEGER) engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
connection node_2;
@@ -11,18 +13,26 @@ INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
connection node_2;
INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
connection node_1;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+30000
+SELECT COUNT(DISTINCT f1) FROM t1;
+COUNT(DISTINCT f1)
+30000
connection node_1a;
-connection node_2;
SELECT COUNT(*) FROM t1;
COUNT(*)
30000
SELECT COUNT(DISTINCT f1) FROM t1;
COUNT(DISTINCT f1)
30000
-SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE
-USER = 'system user' AND STATE NOT LIKE 'InnoDB%';
+connection node_2;
+SELECT COUNT(*) FROM t1;
COUNT(*)
-3
+30000
+SELECT COUNT(DISTINCT f1) FROM t1;
+COUNT(DISTINCT f1)
+30000
connection default;
DROP TABLE t1;
DROP TABLE ten;
diff --git a/mysql-test/suite/galera/r/galera_parallel_autoinc_manytrx.result b/mysql-test/suite/galera/r/galera_parallel_autoinc_manytrx.result
index c8c07221cb1..09a415d47eb 100644
--- a/mysql-test/suite/galera/r/galera_parallel_autoinc_manytrx.result
+++ b/mysql-test/suite/galera/r/galera_parallel_autoinc_manytrx.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
@@ -12,7 +14,7 @@ COUNT(*) = 20000
SELECT COUNT(DISTINCT f1) = 20000 FROM t1;
COUNT(DISTINCT f1) = 20000
1
-SELECT COUNT(*) = 4 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'committed%';
+SELECT COUNT(*) = 4 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'wsrep applier committed%';
COUNT(*) = 4
1
connection default;
diff --git a/mysql-test/suite/galera/r/galera_parallel_simple.result b/mysql-test/suite/galera/r/galera_parallel_simple.result
index 3f657a0479e..d95abefdc24 100644
--- a/mysql-test/suite/galera/r/galera_parallel_simple.result
+++ b/mysql-test/suite/galera/r/galera_parallel_simple.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT) ENGINE=InnoDB;
CREATE TABLE t2 (id INT) ENGINE=InnoDB;
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_pc_recovery.result b/mysql-test/suite/galera/r/galera_pc_recovery.result
new file mode 100644
index 00000000000..17a43d17211
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_pc_recovery.result
@@ -0,0 +1,37 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+VARIABLE_VALUE = 'Primary'
+1
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+VARIABLE_VALUE = 2
+1
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+VARIABLE_VALUE = 'Primary'
+1
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+DROP TABLE t1;
+connection node_1;
+CALL mtr.add_suppression("points to own listening address, blacklisting");
+CALL mtr.add_suppression("non weight changing install in S_PRIM");
+CALL mtr.add_suppression("No re-merged primary component found");
+connection node_2;
+CALL mtr.add_suppression("points to own listening address, blacklisting");
+CALL mtr.add_suppression("non weight changing install in S_PRIM");
+CALL mtr.add_suppression("No re-merged primary component found");
diff --git a/mysql-test/suite/galera/r/galera_pk_bigint_signed.result b/mysql-test/suite/galera/r/galera_pk_bigint_signed.result
index 807ab62c548..0b538778204 100644
--- a/mysql-test/suite/galera/r/galera_pk_bigint_signed.result
+++ b/mysql-test/suite/galera/r/galera_pk_bigint_signed.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 BIGINT SIGNED PRIMARY KEY, f2 VARCHAR(5)) ENGINE=InnoDB;
INSERT INTO t1 VALUES
(-9223372036854775808, 'min'),
@@ -27,6 +29,6 @@ COMMIT;
SET AUTOCOMMIT=ON;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SET AUTOCOMMIT=ON;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result b/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result
index c94b7e2314a..9442f79cd14 100644
--- a/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result
+++ b/mysql-test/suite/galera/r/galera_pk_bigint_unsigned.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 BIGINT UNSIGNED PRIMARY KEY, f2 VARCHAR(5)) ENGINE=InnoDB;
INSERT INTO t1 VALUES
(18446744073709551615, 'max')
@@ -24,6 +26,6 @@ COMMIT;
SET AUTOCOMMIT=ON;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SET AUTOCOMMIT=ON;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_prepared_statement.result b/mysql-test/suite/galera/r/galera_prepared_statement.result
index 6f546b32819..b84f25e9929 100644
--- a/mysql-test/suite/galera/r/galera_prepared_statement.result
+++ b/mysql-test/suite/galera/r/galera_prepared_statement.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 CHAR(5)) ENGINE=InnoDB;
CREATE TABLE t2 (f1 CHAR(5)) ENGINE=InnoDB;
CREATE TABLE t3 (f1 CHAR(5)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_query_cache.result b/mysql-test/suite/galera/r/galera_query_cache.result
index e64c9438646..8f5bc4b6f37 100644
--- a/mysql-test/suite/galera/r/galera_query_cache.result
+++ b/mysql-test/suite/galera/r/galera_query_cache.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result b/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result
index a994852a78a..0f2f61ea97f 100644
--- a/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result
+++ b/mysql-test/suite/galera/r/galera_query_cache_sync_wait.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_read_only.result b/mysql-test/suite/galera/r/galera_read_only.result
index 4c2523f8691..fe8b45fa596 100644
--- a/mysql-test/suite/galera/r/galera_read_only.result
+++ b/mysql-test/suite/galera/r/galera_read_only.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET GLOBAL read_only=TRUE;
diff --git a/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result b/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result
index 2470f59c497..3d421216f93 100644
--- a/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result
+++ b/mysql-test/suite/galera/r/galera_repl_key_format_flat16.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET GLOBAL wsrep_provider_options = 'repl.key_format=FLAT16';
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_repl_max_ws_size.result b/mysql-test/suite/galera/r/galera_repl_max_ws_size.result
index da24a741351..0528df7b9f6 100644
--- a/mysql-test/suite/galera/r/galera_repl_max_ws_size.result
+++ b/mysql-test/suite/galera/r/galera_repl_max_ws_size.result
@@ -1,8 +1,10 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 VARCHAR(512)) ENGINE=InnoDB;
SET GLOBAL wsrep_provider_options = 'repl.max_ws_size=512';
INSERT INTO t1 VALUES (REPEAT('a', 512));
-ERROR HY000: Got error 90 "Message too long" during COMMIT
+ERROR HY000: Got error 5 "Input/output error" during COMMIT
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
diff --git a/mysql-test/suite/galera/r/galera_restart_nochanges.result b/mysql-test/suite/galera/r/galera_restart_nochanges.result
index b35ae50e2fb..09f8d9a586b 100644
--- a/mysql-test/suite/galera/r/galera_restart_nochanges.result
+++ b/mysql-test/suite/galera/r/galera_restart_nochanges.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_restart_on_unknown_option.result b/mysql-test/suite/galera/r/galera_restart_on_unknown_option.result
index ba6f30fcf30..6e672c2d444 100644
--- a/mysql-test/suite/galera/r/galera_restart_on_unknown_option.result
+++ b/mysql-test/suite/galera/r/galera_restart_on_unknown_option.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CALL mtr.add_suppression("Aborting");
CALL mtr.add_suppression("unknown option '--galera-unknown-option'");
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_roles.result b/mysql-test/suite/galera/r/galera_roles.result
index bef89acfc92..b1124b597c5 100644
--- a/mysql-test/suite/galera/r/galera_roles.result
+++ b/mysql-test/suite/galera/r/galera_roles.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# Testing CREATE/GRANT role
#
diff --git a/mysql-test/suite/galera/r/galera_rsu_add_pk.result b/mysql-test/suite/galera/r/galera_rsu_add_pk.result
index 4c79da154e2..9b068ba30d1 100644
--- a/mysql-test/suite/galera/r/galera_rsu_add_pk.result
+++ b/mysql-test/suite/galera/r/galera_rsu_add_pk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
diff --git a/mysql-test/suite/galera/r/galera_rsu_drop_pk.result b/mysql-test/suite/galera/r/galera_rsu_drop_pk.result
index f64649ef4e2..7d731955691 100644
--- a/mysql-test/suite/galera/r/galera_rsu_drop_pk.result
+++ b/mysql-test/suite/galera/r/galera_rsu_drop_pk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
diff --git a/mysql-test/suite/galera/r/galera_rsu_error.result b/mysql-test/suite/galera/r/galera_rsu_error.result
index 5c16e34b492..f78e8fa544c 100644
--- a/mysql-test/suite/galera/r/galera_rsu_error.result
+++ b/mysql-test/suite/galera/r/galera_rsu_error.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
INSERT INTO t1 VALUES (1), (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_rsu_simple.result b/mysql-test/suite/galera/r/galera_rsu_simple.result
index d0ddcfb4d64..d75ef094065 100644
--- a/mysql-test/suite/galera/r/galera_rsu_simple.result
+++ b/mysql-test/suite/galera/r/galera_rsu_simple.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
connection node_2;
SET SESSION wsrep_OSU_method = "RSU";
diff --git a/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result b/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result
index a103e810588..310611a0e49 100644
--- a/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result
+++ b/mysql-test/suite/galera/r/galera_rsu_wsrep_desync.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
SET GLOBAL wsrep_desync=1;
diff --git a/mysql-test/suite/galera/r/galera_sbr.result b/mysql-test/suite/galera/r/galera_sbr.result
index 0bdaeef5b8a..c5fdecece0e 100644
--- a/mysql-test/suite/galera/r/galera_sbr.result
+++ b/mysql-test/suite/galera/r/galera_sbr.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET SESSION binlog_format = 'STATEMENT';
Warnings:
diff --git a/mysql-test/suite/galera/r/galera_sbr_binlog.result b/mysql-test/suite/galera/r/galera_sbr_binlog.result
index 0bdaeef5b8a..c5fdecece0e 100644
--- a/mysql-test/suite/galera/r/galera_sbr_binlog.result
+++ b/mysql-test/suite/galera/r/galera_sbr_binlog.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET SESSION binlog_format = 'STATEMENT';
Warnings:
diff --git a/mysql-test/suite/galera/r/galera_schema_dirty_reads.result b/mysql-test/suite/galera/r/galera_schema_dirty_reads.result
index edf20da92c6..fbac9ff4eb7 100644
--- a/mysql-test/suite/galera/r/galera_schema_dirty_reads.result
+++ b/mysql-test/suite/galera/r/galera_schema_dirty_reads.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
USE information_schema;
SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads";
VARIABLE_NAME VARIABLE_VALUE
diff --git a/mysql-test/suite/galera/r/galera_serializable.result b/mysql-test/suite/galera/r/galera_serializable.result
index be3f93a081f..e3785663271 100644
--- a/mysql-test/suite/galera/r/galera_serializable.result
+++ b/mysql-test/suite/galera/r/galera_serializable.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
@@ -9,7 +11,7 @@ connection node_2;
INSERT INTO t1 VALUES (1,1);
connection node_1;
SELECT * FROM t1;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
ROLLBACK;
DELETE FROM t1;
connection node_1;
@@ -22,7 +24,7 @@ connection node_2;
UPDATE t1 SET f2 = 2;
connection node_1;
UPDATE t1 SET f2 = 3;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
ROLLBACK;
DELETE FROM t1;
connection node_1;
@@ -33,5 +35,5 @@ connection node_2;
INSERT INTO t1 VALUES (1,2);
connection node_1;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_server.result b/mysql-test/suite/galera/r/galera_server.result
index cc08b826e82..5130dee3459 100644
--- a/mysql-test/suite/galera/r/galera_server.result
+++ b/mysql-test/suite/galera/r/galera_server.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
# On node_1
CREATE SERVER s1
diff --git a/mysql-test/suite/galera/r/galera_sp_bf_abort.result b/mysql-test/suite/galera/r/galera_sp_bf_abort.result
new file mode 100644
index 00000000000..9216cc4fa5a
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_sp_bf_abort.result
@@ -0,0 +1,356 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+connection node_1;
+CREATE PROCEDURE proc_update_insert()
+BEGIN
+UPDATE t1 SET f2 = 'b';
+INSERT INTO t1 VALUES (4, 'd');
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_update_insert;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 b
+2 c
+3 b
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_update_insert_with_exit_handler()
+BEGIN
+DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN END;
+UPDATE t1 SET f2 = 'b';
+INSERT INTO t1 VALUES (4, 'd');
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_update_insert_with_exit_handler;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 b
+2 c
+3 b
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_update_insert_with_continue_handler()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END;
+UPDATE t1 SET f2 = 'b';
+INSERT INTO t1 VALUES (4, 'd');
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_update_insert_with_continue_handler;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 b
+2 c
+3 b
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_update_insert_transaction()
+BEGIN
+START TRANSACTION;
+UPDATE t1 SET f2 = 'b';
+INSERT INTO t1 VALUES (4, 'd');
+COMMIT;
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_update_insert_transaction;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 b
+2 c
+3 b
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_update_insert_transaction_with_continue_handler()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'b';
+INSERT INTO t1 VALUES (4, 'd');
+COMMIT;
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_update_insert_transaction_with_continue_handler;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 b
+2 c
+3 b
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_update_insert_transaction_with_exit_handler()
+BEGIN
+DECLARE EXIT HANDLER FOR SQLEXCEPTION BEGIN END;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'b';
+INSERT INTO t1 VALUES (4, 'd');
+COMMIT;
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_update_insert_transaction_with_exit_handler;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 b
+2 c
+3 b
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_insert_insert_conflict()
+BEGIN
+INSERT INTO t1 VALUES (2, 'd');
+INSERT INTO t1 VALUES (4, 'd');
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_insert_insert_conflict;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+Got one of the listed errors
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 c
+3 a
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_insert_insert_conflict_with_exit_handler()
+BEGIN
+DECLARE EXIT HANDLER FOR SQLEXCEPTION SELECT "Conflict exit handler";
+INSERT INTO t1 VALUES (2, 'd');
+INSERT INTO t1 VALUES (4, 'd');
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_insert_insert_conflict_with_exit_handler;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+Conflict exit handler
+Conflict exit handler
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 c
+3 a
+wsrep_local_replays
+1
+DELETE FROM t1;
+connection node_1;
+CREATE PROCEDURE proc_insert_insert_conflict_with_continue_handler()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SELECT "Conflict continue handler";
+INSERT INTO t1 VALUES (2, 'd');
+INSERT INTO t1 VALUES (4, 'd');
+END|
+INSERT INTO t1 VALUES (1, 'a'), (3, 'a');
+SET SESSION wsrep_sync_wait = 0;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+INSERT INTO t1 VALUES (2, 'c');
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+CALL proc_insert_insert_conflict_with_continue_handler;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+Conflict continue handler
+Conflict continue handler
+SET SESSION wsrep_sync_wait = default;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 c
+3 a
+4 d
+wsrep_local_replays
+1
+DELETE FROM t1;
+DROP PROCEDURE proc_update_insert;
+DROP PROCEDURE proc_update_insert_with_continue_handler;
+DROP PROCEDURE proc_update_insert_with_exit_handler;
+DROP PROCEDURE proc_update_insert_transaction;
+DROP PROCEDURE proc_update_insert_transaction_with_continue_handler;
+DROP PROCEDURE proc_update_insert_transaction_with_exit_handler;
+DROP PROCEDURE proc_insert_insert_conflict;
+DROP PROCEDURE proc_insert_insert_conflict_with_exit_handler;
+DROP PROCEDURE proc_insert_insert_conflict_with_continue_handler;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_sp_insert_parallel.result b/mysql-test/suite/galera/r/galera_sp_insert_parallel.result
new file mode 100644
index 00000000000..3f072be7004
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_sp_insert_parallel.result
@@ -0,0 +1,41 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+CREATE PROCEDURE proc_insert()
+BEGIN
+DECLARE i INT;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END;
+SET i = 0;
+WHILE i < 1000 DO
+INSERT IGNORE INTO t1 (f1, f2)
+VALUES (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+(FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15));
+SET i = i + 1;
+END WHILE;
+END|
+connection node_1;
+SELECT 0;
+0
+0
+SET SESSION wsrep_sync_wait = 0;
+CALL proc_insert;
+connection node_2;
+SELECT 0;
+0
+0
+SET SESSION wsrep_sync_wait = 0;
+CALL proc_insert;
+connection node_1;
+SET SESSION wsrep_sync_wait = default;
+connection node_2;
+SET SESSION wsrep_sync_wait = default;
+connection node_1;
+DROP PROCEDURE proc_insert;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result b/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result
index 14407c917a1..e9f81192386 100644
--- a/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result
+++ b/mysql-test/suite/galera/r/galera_sql_log_bin_zero.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
SET SESSION sql_log_bin = 0;
INSERT INTO t1 VALUES (1);
diff --git a/mysql-test/suite/galera/r/galera_ssl.result b/mysql-test/suite/galera/r/galera_ssl.result
index 022d06319b8..ec3b717e3f3 100644
--- a/mysql-test/suite/galera/r/galera_ssl.result
+++ b/mysql-test/suite/galera/r/galera_ssl.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
VARIABLE_VALUE = 'Synced'
1
diff --git a/mysql-test/suite/galera/r/galera_ssl_compression.result b/mysql-test/suite/galera/r/galera_ssl_compression.result
index 333d646376c..0acc4b97eea 100644
--- a/mysql-test/suite/galera/r/galera_ssl_compression.result
+++ b/mysql-test/suite/galera/r/galera_ssl_compression.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 'Synced' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_comment';
VARIABLE_VALUE = 'Synced'
1
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup.result b/mysql-test/suite/galera/r/galera_sst_mariabackup.result
index fdb5883b590..4fdc283b286 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup.result
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result
index fdb5883b590..4fdc283b286 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_data_dir.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_encrypt_with_key.result b/mysql-test/suite/galera/r/galera_sst_mariabackup_encrypt_with_key.result
index 990e0a29506..409da775d9a 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_encrypt_with_key.result
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_encrypt_with_key.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT 1;
1
1
diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup_table_options.result b/mysql-test/suite/galera/r/galera_sst_mariabackup_table_options.result
index 9180ed5e421..568c06de94c 100644
--- a/mysql-test/suite/galera/r/galera_sst_mariabackup_table_options.result
+++ b/mysql-test/suite/galera/r/galera_sst_mariabackup_table_options.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that starts from a clean var directory
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff
index 3eadee615ed..2978411c8f7 100644
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff
@@ -1,6 +1,22 @@
---- galera_sst_mysqldump.result
-+++ galera_sst_mysqldump,debug.reject
-@@ -388,6 +388,114 @@
+--- galera_sst_mysqldump.result 2018-11-29 23:54:03.663607613 +0100
++++ galera_sst_mysqldump,debug.reject 2018-11-29 23:55:42.377562815 +0100
+@@ -1,3 +1,5 @@
++connection node_2;
++connection node_1;
+ Setting SST method to mysqldump ...
+ call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
+ call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
+@@ -56,6 +58,9 @@
+ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+ connection node_2;
+ Loading wsrep provider ...
++disconnect node_2;
++connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
++connection node_2;
+ SET AUTOCOMMIT=OFF;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES ('node2_committed_after');
+@@ -390,6 +395,114 @@
DROP TABLE t1;
COMMIT;
SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff b/mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff
new file mode 100644
index 00000000000..3e8fee1b098
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump,release.rdiff
@@ -0,0 +1,18 @@
+--- suite/galera/r/galera_sst_mysqldump.result 2018-12-20 14:22:41.730134062 +0100
++++ suite/galera/r/galera_sst_mysqldump.reject 2019-01-16 22:18:44.139781857 +0100
+@@ -1,3 +1,5 @@
++connection node_2;
++connection node_1;
+ Setting SST method to mysqldump ...
+ call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
+ call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
+@@ -56,6 +58,9 @@
+ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
+ connection node_2;
+ Loading wsrep provider ...
++disconnect node_2;
++connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
++connection node_2;
+ SET AUTOCOMMIT=OFF;
+ START TRANSACTION;
+ INSERT INTO t1 VALUES ('node2_committed_after');
diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump.result b/mysql-test/suite/galera/r/galera_sst_mysqldump.result
index 5c530c32ce6..4ed679ba477 100644
--- a/mysql-test/suite/galera/r/galera_sst_mysqldump.result
+++ b/mysql-test/suite/galera/r/galera_sst_mysqldump.result
@@ -400,3 +400,4 @@ CALL mtr.add_suppression("Can't open and lock time zone table");
CALL mtr.add_suppression("Can't open and lock privilege tables");
CALL mtr.add_suppression("Info table is not ready to be used");
CALL mtr.add_suppression("Native table .* has the wrong structure");
+CALL mtr.add_suppression("Table \'mysql.gtid_slave_pos\' doesn\'t exist");
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync.result b/mysql-test/suite/galera/r/galera_sst_rsync.result
index ff85a7d6c0f..d41d0d34e75 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync.result
+++ b/mysql-test/suite/galera/r/galera_sst_rsync.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
index 525156d88da..8ffe51c0cc3 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
+++ b/mysql-test/suite/galera/r/galera_sst_rsync2,debug.rdiff
@@ -1,6 +1,12 @@
---- suite/galera/r/galera_sst_rsync2.result 2018-09-12 13:09:35.352229478 +0200
-+++ suite/galera/r/galera_sst_rsync2,debug.reject 2018-09-12 17:00:51.601974979 +0200
-@@ -286,3 +286,111 @@
+--- galera_sst_rsync2.result 2018-11-29 17:57:53.288606346 +0100
++++ galera_sst_rsync2,debug.reject 2018-11-29 18:00:01.172512000 +0100
+@@ -1,3 +1,5 @@
++connection node_2;
++connection node_1;
+ connection node_1;
+ connection node_2;
+ Performing State Transfer on a server that has been shut down cleanly and restarted
+@@ -286,3 +288,111 @@
DROP TABLE t1;
COMMIT;
SET AUTOCOMMIT=ON;
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync2.result b/mysql-test/suite/galera/r/galera_sst_rsync2.result
index ff85a7d6c0f..d41d0d34e75 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync2.result
+++ b/mysql-test/suite/galera/r/galera_sst_rsync2.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
diff --git a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result
index ff85a7d6c0f..d41d0d34e75 100644
--- a/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result
+++ b/mysql-test/suite/galera/r/galera_sst_rsync_data_dir.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Performing State Transfer on a server that has been shut down cleanly and restarted
diff --git a/mysql-test/suite/galera/r/galera_status_cluster.result b/mysql-test/suite/galera/r/galera_status_cluster.result
index ad92a51b775..9db0b88adc9 100644
--- a/mysql-test/suite/galera/r/galera_status_cluster.result
+++ b/mysql-test/suite/galera/r/galera_status_cluster.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 2
diff --git a/mysql-test/suite/galera/r/galera_status_local_index.result b/mysql-test/suite/galera/r/galera_status_local_index.result
index 8c36b60cc5f..c67498fff9f 100644
--- a/mysql-test/suite/galera/r/galera_status_local_index.result
+++ b/mysql-test/suite/galera/r/galera_status_local_index.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE wsrep_local_indexes (wsrep_local_index INTEGER);
INSERT INTO wsrep_local_indexes VALUES ((SELECT variable_value FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_local_index'));
diff --git a/mysql-test/suite/galera/r/galera_status_local_state.result b/mysql-test/suite/galera/r/galera_status_local_state.result
index 65713f1975c..3fe988ee6ac 100644
--- a/mysql-test/suite/galera/r/galera_status_local_state.result
+++ b/mysql-test/suite/galera/r/galera_status_local_state.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state';
VARIABLE_VALUE = 4
1
diff --git a/mysql-test/suite/galera/r/galera_suspend_slave.result b/mysql-test/suite/galera/r/galera_suspend_slave.result
index 07433399081..ce49b491778 100644
--- a/mysql-test/suite/galera/r/galera_suspend_slave.result
+++ b/mysql-test/suite/galera/r/galera_suspend_slave.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_sync_wait_show.result b/mysql-test/suite/galera/r/galera_sync_wait_show.result
index def771ff88d..793da3246ff 100644
--- a/mysql-test/suite/galera/r/galera_sync_wait_show.result
+++ b/mysql-test/suite/galera/r/galera_sync_wait_show.result
@@ -1,4 +1,6 @@
connection node_2;
+connection node_1;
+connection node_2;
SET SESSION wsrep_sync_wait = 8;
connection node_1;
CREATE DATABASE db1;
diff --git a/mysql-test/suite/galera/r/galera_sync_wait_upto.result b/mysql-test/suite/galera/r/galera_sync_wait_upto.result
new file mode 100644
index 00000000000..7d691e105da
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_sync_wait_upto.result
@@ -0,0 +1,43 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+SELECT WSREP_SYNC_WAIT_UPTO_GTID(NULL);
+ERROR HY000: Incorrect arguments to wsrep_sync_wait_upto_gtid
+SELECT WSREP_SYNC_WAIT_UPTO_GTID('a');
+ERROR HY000: Incorrect arguments to wsrep_sync_wait_upto_gtid
+SELECT WSREP_SYNC_WAIT_UPTO_GTID(2);
+ERROR HY000: Incorrect arguments to wsrep_sync_wait_upto_gtid
+WSREP_SYNC_WAIT_UPTO
+1
+WSREP_SYNC_WAIT_UPTO
+1
+WSREP_SYNC_WAIT_UPTO
+1
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection node_2;
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+connection node_1;
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2a;
+SET SESSION wsrep_sync_wait = 0;
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+connection node_2;
+WSREP_SYNC_WAIT_UPTO
+1
+gtid_current = gtid_first
+1
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+WSREP_SYNC_WAIT_UPTO
+1
+seqno_current = seqno_second
+1
+SET DEBUG_SYNC = "RESET";
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result b/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result
index 8a86dfd11e2..a23b0523140 100644
--- a/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result
+++ b/mysql-test/suite/galera/r/galera_toi_alter_auto_increment.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_error.result b/mysql-test/suite/galera/r/galera_toi_ddl_error.result
index dafad153867..386dc28bed3 100644
--- a/mysql-test/suite/galera/r/galera_toi_ddl_error.result
+++ b/mysql-test/suite/galera/r/galera_toi_ddl_error.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
@@ -19,3 +21,6 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
DROP TABLE ten;
+CALL mtr.add_suppression("Ignoring error 'Duplicate entry '111110' for key 'PRIMARY'' on query.");
+connection node_2;
+CALL mtr.add_suppression("Ignoring error 'Duplicate entry '111110' for key 'PRIMARY'' on query.");
diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result b/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result
index 0dbc89978d4..0ecc4a4619f 100644
--- a/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result
+++ b/mysql-test/suite/galera/r/galera_toi_ddl_fk_insert.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE parent (
diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_fk_update.result b/mysql-test/suite/galera/r/galera_toi_ddl_fk_update.result
index a5db90aa965..a7966e36133 100644
--- a/mysql-test/suite/galera/r/galera_toi_ddl_fk_update.result
+++ b/mysql-test/suite/galera/r/galera_toi_ddl_fk_update.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE parent (
diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_locking.result b/mysql-test/suite/galera/r/galera_toi_ddl_locking.result
index 68743c024a0..bbe181ce6f5 100644
--- a/mysql-test/suite/galera/r/galera_toi_ddl_locking.result
+++ b/mysql-test/suite/galera/r/galera_toi_ddl_locking.result
@@ -1,32 +1,46 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_sync_wait = 0;
connection node_1;
-SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue';
+SET DEBUG_SYNC= 'RESET';
+SET DEBUG_SYNC = 'alter_table_before_open_tables SIGNAL before_open_tables WAIT_FOR continue';
ALTER TABLE t1 ADD COLUMN f2 INTEGER;;
connection node_1a;
-SET SESSION wsrep_sync_wait = 0;
+SET DEBUG_SYNC= 'now WAIT_FOR before_open_tables';
+SET wsrep_retry_autocommit=0;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
SELECT COUNT(*) = 0 FROM t2;
COUNT(*) = 0
1
-INSERT INTO t1 VALUES (1);
-Got one of the listed errors
+INSERT INTO t1 VALUES (1);;
+connection node_1c;
+SET SESSION wsrep_sync_wait = 0;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t2;
+COUNT(*) = 0
+1
INSERT INTO t2 VALUES (1);
COMMIT;;
connection node_1b;
SET SESSION wsrep_sync_wait = 0;
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'Commit';
-COUNT(*) = 1
-1
SELECT COUNT(*) = 0 FROM t2;
COUNT(*) = 0
1
+SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'Commit';
+COUNT(*) = 1
+1
SET DEBUG_SYNC= 'now SIGNAL continue';
connection node_1a;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1c;
connection node_1;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
@@ -34,6 +48,7 @@ COUNT(*) = 0
SELECT COUNT(*) = 1 FROM t2;
COUNT(*) = 1
1
+SET debug_sync='RESET';
connection node_2;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
@@ -41,9 +56,5 @@ COUNT(*) = 0
SELECT COUNT(*) = 1 FROM t2;
COUNT(*) = 1
1
-connection node_1;
-SET DEBUG_SYNC= 'RESET';
-connection node_1b;
-SET DEBUG_SYNC= 'RESET';
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result b/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result
index 3844fa97d82..5412cd3faee 100644
--- a/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result
+++ b/mysql-test/suite/galera/r/galera_toi_ddl_nonconflicting.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 INTEGER);
connection node_2;
ALTER TABLE t1 ADD COLUMN f3 INTEGER; INSERT INTO t1 (f1, f2) VALUES (DEFAULT, 123);;
diff --git a/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result b/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result
index 722bb9d9e12..db702b1a59e 100644
--- a/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result
+++ b/mysql-test/suite/galera/r/galera_toi_ddl_sequential.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_toi_drop_database.result b/mysql-test/suite/galera/r/galera_toi_drop_database.result
index 6d88c8ea230..48056c70126 100644
--- a/mysql-test/suite/galera/r/galera_toi_drop_database.result
+++ b/mysql-test/suite/galera/r/galera_toi_drop_database.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE DATABASE database1;
USE database1;
@@ -14,9 +16,9 @@ INSERT INTO t2 (f1) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, te
connection node_2;
DROP DATABASE database1;;
connection node_1;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_1a;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_2;
connection node_1;
SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME = 'database1';
diff --git a/mysql-test/suite/galera/r/galera_toi_ftwrl.result b/mysql-test/suite/galera/r/galera_toi_ftwrl.result
index 0f13e95b689..fdc8b294c1c 100644
--- a/mysql-test/suite/galera/r/galera_toi_ftwrl.result
+++ b/mysql-test/suite/galera/r/galera_toi_ftwrl.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
FLUSH TABLES WITH READ LOCK;
diff --git a/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result b/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result
index f5cc14ed0f1..ee8c826fd18 100644
--- a/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result
+++ b/mysql-test/suite/galera/r/galera_toi_lock_exclusive.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
@@ -8,7 +10,7 @@ connection node_2a;
ALTER TABLE t1 ADD COLUMN f2 INTEGER, LOCK=EXCLUSIVE;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_1;
INSERT INTO t1 VALUES (2, 2);
SELECT COUNT(*) = 2 FROM t1;
diff --git a/mysql-test/suite/galera/r/galera_toi_lock_shared.result b/mysql-test/suite/galera/r/galera_toi_lock_shared.result
index 950c4d83c70..fe1c88075d5 100644
--- a/mysql-test/suite/galera/r/galera_toi_lock_shared.result
+++ b/mysql-test/suite/galera/r/galera_toi_lock_shared.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_toi_truncate.result b/mysql-test/suite/galera/r/galera_toi_truncate.result
index 73285d723c1..081a82e6e7d 100644
--- a/mysql-test/suite/galera/r/galera_toi_truncate.result
+++ b/mysql-test/suite/galera/r/galera_toi_truncate.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
@@ -11,7 +13,7 @@ connection node_1;
TRUNCATE TABLE t1;;
connection node_1;
connection node_2;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_2;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
diff --git a/mysql-test/suite/galera/r/galera_transaction_read_only.result b/mysql-test/suite/galera/r/galera_transaction_read_only.result
index b388f195fb1..55923f58b65 100644
--- a/mysql-test/suite/galera/r/galera_transaction_read_only.result
+++ b/mysql-test/suite/galera/r/galera_transaction_read_only.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_transaction_replay.result b/mysql-test/suite/galera/r/galera_transaction_replay.result
index 7fd837433d2..272086c4eba 100644
--- a/mysql-test/suite/galera/r/galera_transaction_replay.result
+++ b/mysql-test/suite/galera/r/galera_transaction_replay.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
INSERT INTO t1 VALUES (1, 'a');
INSERT INTO t1 VALUES (2, 'a');
@@ -9,19 +11,73 @@ SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
f1 f2
2 a
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
-SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
+SET SESSION wsrep_sync_wait=0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
connection node_1;
-COMMIT;;
+COMMIT;
connection node_1a;
-SET SESSION wsrep_sync_wait = 0;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+connection node_1;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 1
+1
+wsrep_local_replays
+1
connection node_2;
-UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 1
+1
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+connection node_1;
+SET AUTOCOMMIT=ON;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
+f1 f2
+2 a
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+connection node_2;
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
connection node_1;
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
COUNT(*) = 1
@@ -46,22 +102,34 @@ SELECT * FROM t1;
i j
1 0
3 0
+SET AUTOCOMMIT=ON;
PREPARE stmt1 FROM "UPDATE t1 SET j = 1 where i > 0";
connection node_1a;
-SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync';
-connection node_1;
-EXECUTE stmt1;;
-connection node_1a;
-SET SESSION wsrep_sync_wait = 0;
-SET SESSION wsrep_on = 0;
-SET SESSION wsrep_on = 1;
+SET SESSION wsrep_sync_wait=0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
connection node_2;
INSERT INTO t1 VALUES(2,2);
connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+SET SESSION wsrep_sync_wait=0;
+EXECUTE stmt1;
connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
connection node_1;
+SET SESSION wsrep_sync_wait=7;
SELECT * FROM t1;
i j
1 1
@@ -74,5 +142,7 @@ i j
2 2
3 1
connection node_1;
+wsrep_local_replays
+1
DEALLOCATE PREPARE stmt1;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_truncate.result b/mysql-test/suite/galera/r/galera_truncate.result
index 4f3d72dbca7..c649d9bbaf9 100644
--- a/mysql-test/suite/galera/r/galera_truncate.result
+++ b/mysql-test/suite/galera/r/galera_truncate.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_truncate_temporary.result b/mysql-test/suite/galera/r/galera_truncate_temporary.result
index 183ebd9d24a..81373bda739 100644
--- a/mysql-test/suite/galera/r/galera_truncate_temporary.result
+++ b/mysql-test/suite/galera/r/galera_truncate_temporary.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TEMPORARY TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
INSERT INTO t1 VALUES (1);
TRUNCATE TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_unicode_identifiers.result b/mysql-test/suite/galera/r/galera_unicode_identifiers.result
index 77848bc751f..5db1be8c910 100644
--- a/mysql-test/suite/galera/r/galera_unicode_identifiers.result
+++ b/mysql-test/suite/galera/r/galera_unicode_identifiers.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET GLOBAL wsrep_sync_wait = 15;
connection node_2;
SET GLOBAL wsrep_sync_wait = 15;
diff --git a/mysql-test/suite/galera/r/galera_unicode_pk.result b/mysql-test/suite/galera/r/galera_unicode_pk.result
index 0e8965a76e3..bb36fd4f369 100644
--- a/mysql-test/suite/galera/r/galera_unicode_pk.result
+++ b/mysql-test/suite/galera/r/galera_unicode_pk.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (
f1 VARCHAR(255) PRIMARY KEY
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
@@ -18,7 +20,7 @@ connection node_1;
COMMIT;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SELECT f1 = 'текÑÑ‚2' FROM t1;
f1 = 'текÑÑ‚2'
1
@@ -35,6 +37,6 @@ connection node_2;
COMMIT;
connection node_1;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
COMMIT;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_update_limit.result b/mysql-test/suite/galera/r/galera_update_limit.result
index 20a94e6f504..30c89a38dff 100644
--- a/mysql-test/suite/galera/r/galera_update_limit.result
+++ b/mysql-test/suite/galera/r/galera_update_limit.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER) Engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
diff --git a/mysql-test/suite/galera/r/galera_v1_row_events.result b/mysql-test/suite/galera/r/galera_v1_row_events.result
index b0ea2293119..80fe2fb6d8d 100644
--- a/mysql-test/suite/galera/r/galera_v1_row_events.result
+++ b/mysql-test/suite/galera/r/galera_v1_row_events.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_var_OSU_method.result b/mysql-test/suite/galera/r/galera_var_OSU_method.result
index 18e8bd2271a..2b0a713e86b 100644
--- a/mysql-test/suite/galera/r/galera_var_OSU_method.result
+++ b/mysql-test/suite/galera/r/galera_var_OSU_method.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
connection node_1;
SET SESSION wsrep_OSU_method = "RSU";
diff --git a/mysql-test/suite/galera/r/galera_var_OSU_method2.result b/mysql-test/suite/galera/r/galera_var_OSU_method2.result
index 0e3751645a8..ca4f617d903 100644
--- a/mysql-test/suite/galera/r/galera_var_OSU_method2.result
+++ b/mysql-test/suite/galera/r/galera_var_OSU_method2.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
connection node_1;
SET SESSION wsrep_OSU_method = "TOI";
diff --git a/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result b/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result
index ba117b4c2d5..86c30fa4b23 100644
--- a/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result
+++ b/mysql-test/suite/galera/r/galera_var_auto_inc_control_off.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET GLOBAL wsrep_auto_increment_control = OFF;
SET GLOBAL auto_increment_increment = 1;
@@ -59,7 +61,7 @@ connection node_1a;
COMMIT;
connection node_2a;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
connection node_1a;
SELECT * FROM t1;
f1 node
diff --git a/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result b/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result
index b71cf4c831d..48c649b66a8 100644
--- a/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result
+++ b/mysql-test/suite/galera/r/galera_var_auto_inc_control_on.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, node VARCHAR(10)) ENGINE=InnoDB;
SELECT @@auto_increment_increment = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size');
diff --git a/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result b/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result
index ca3844bf6bf..f5472aa931f 100644
--- a/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result
+++ b/mysql-test/suite/galera/r/galera_var_certify_nonPK_off.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET GLOBAL wsrep_certify_nonPK = OFF;
connection node_2;
SET GLOBAL wsrep_certify_nonPK = OFF;
diff --git a/mysql-test/suite/galera/r/galera_var_cluster_address.result b/mysql-test/suite/galera/r/galera_var_cluster_address.result
index 378d8ca84f5..8c1070e43ce 100644
--- a/mysql-test/suite/galera/r/galera_var_cluster_address.result
+++ b/mysql-test/suite/galera/r/galera_var_cluster_address.result
@@ -1,7 +1,11 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_2;
SET GLOBAL wsrep_cluster_address = 'foo://';
+SHOW STATUS;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SET SESSION wsrep_sync_wait=0;
SELECT COUNT(*) > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS;
COUNT(*) > 0
@@ -11,7 +15,7 @@ Variable_name Value
wsrep_ready OFF
SHOW STATUS LIKE 'wsrep_cluster_status';
Variable_name Value
-wsrep_cluster_status non-Primary
+wsrep_cluster_status Disconnected
SHOW STATUS LIKE 'wsrep_local_state';
Variable_name Value
wsrep_local_state 0
@@ -39,7 +43,7 @@ CALL mtr.add_suppression("Failed to initialize backend using 'foo");
CALL mtr.add_suppression("Failed to open channel 'my_wsrep_cluster' at 'foo");
CALL mtr.add_suppression("gcs connect failed: Socket type not supported");
CALL mtr.add_suppression("wsrep::connect\\(\\) failed: 7");
-CALL mtr.add_suppression("gcs_caused\\(\\) returned -103 \\(Software caused connection abort\\)");
+CALL mtr.add_suppression("gcs_caused\\(\\) returned -[0-9]+ \\(Software caused connection abort\\)");
CALL mtr.add_suppression("failed to open gcomm backend connection: 110: failed to reach primary view: 110");
CALL mtr.add_suppression("Failed to open backend connection: -110 \\(Connection timed out\\)");
CALL mtr.add_suppression("gcs connect failed: Connection timed out");
diff --git a/mysql-test/suite/galera/r/galera_var_desync_on.result b/mysql-test/suite/galera/r/galera_var_desync_on.result
index 26798e51926..6a2e501eee2 100644
--- a/mysql-test/suite/galera/r/galera_var_desync_on.result
+++ b/mysql-test/suite/galera/r/galera_var_desync_on.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_var_dirty_reads.result b/mysql-test/suite/galera/r/galera_var_dirty_reads.result
index 020efb7b8f1..33536d95186 100644
--- a/mysql-test/suite/galera/r/galera_var_dirty_reads.result
+++ b/mysql-test/suite/galera/r/galera_var_dirty_reads.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_2;
@@ -14,7 +16,7 @@ Variable_name Value
wsrep_ready OFF
SHOW STATUS LIKE 'wsrep_cluster_status';
Variable_name Value
-wsrep_cluster_status non-Primary
+wsrep_cluster_status Disconnected
SELECT * FROM t1;
ERROR 08S01: WSREP has not yet prepared node for application use
SELECT 1 FROM t1;
@@ -50,4 +52,3 @@ i
DROP TABLE t1;
disconnect node_2;
disconnect node_1;
-# End of test
diff --git a/mysql-test/suite/galera/r/galera_var_fkchecks.result b/mysql-test/suite/galera/r/galera_var_fkchecks.result
index 8b1b913a584..42f8085ee1a 100644
--- a/mysql-test/suite/galera/r/galera_var_fkchecks.result
+++ b/mysql-test/suite/galera/r/galera_var_fkchecks.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE parent (
id INT PRIMARY KEY,
KEY (id)
diff --git a/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result b/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result
index 1a8733e2e1a..d2faf5ff463 100644
--- a/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result
+++ b/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
# On node_1
connection node_1;
list of GTID variables :
diff --git a/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result b/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result
new file mode 100644
index 00000000000..48c845a4c2b
--- /dev/null
+++ b/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result
@@ -0,0 +1,186 @@
+connection node_2;
+connection node_1;
+connection node_2;
+SET GLOBAL wsrep_ignore_apply_errors = 1;
+connection node_1;
+SET GLOBAL wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = ON;
+DROP TABLE t1;
+SET GLOBAL wsrep_on = OFF;
+CREATE SCHEMA s1;
+SET GLOBAL wsrep_on = ON;
+DROP SCHEMA s1;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+CREATE INDEX idx1 ON t1 (f1);
+SET GLOBAL wsrep_on = ON;
+DROP INDEX idx1 ON t1;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+CREATE INDEX idx1 ON t1 (f1);
+SET GLOBAL wsrep_on = ON;
+ALTER TABLE t1 DROP INDEX idx1;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+SET GLOBAL wsrep_on = ON;
+ALTER TABLE t1 DROP COLUMN f2;
+DROP TABLE t1;
+connection node_2;
+SET GLOBAL wsrep_ignore_apply_errors = 2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_on = ON;
+DELETE FROM t1 WHERE f1 = 1;
+connection node_1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER);
+INSERT INTO t1 VALUES (2);
+SET GLOBAL wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_on = ON;
+START TRANSACTION;
+INSERT INTO t1 VALUES (3);
+DELETE FROM t1 WHERE f1 = 1;
+DELETE FROM t1 WHERE f1 = 2;
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+connection node_2;
+SET SESSION wsrep_on = OFF;
+DELETE FROM t1 WHERE f1 = 3;
+SET SESSION wsrep_on = ON;
+connection node_1;
+DELETE FROM t1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+VARIABLE_VALUE = 'Primary'
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+connection node_2;
+SET SESSION wsrep_on = OFF;
+DELETE FROM t1 WHERE f1 = 3;
+SET SESSION wsrep_on = ON;
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+DELETE FROM t1 WHERE f1 = 1;
+DELETE FROM t1 WHERE f1 = 2;
+DELETE FROM t1 WHERE f1 = 3;
+DELETE FROM t1 WHERE f1 = 4;
+DELETE FROM t1 WHERE f1 = 5;
+COMMIT;
+SET AUTOCOMMIT=ON;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+VARIABLE_VALUE = 'Primary'
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3);
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3);
+connection node_2;
+SET SESSION wsrep_on = OFF;
+DELETE FROM t2 WHERE f1 = 2;
+DELETE FROM t1 WHERE f1 = 3;
+SET SESSION wsrep_on = ON;
+connection node_1;
+DELETE t1, t2 FROM t1 JOIN t2 WHERE t1.f1 = t2.f1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+VARIABLE_VALUE = 'Primary'
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+DROP TABLE t1,t2;
+connection node_1;
+CREATE TABLE parent (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
+INSERT INTO parent VALUES (1),(2),(3);
+CREATE TABLE child (id INT, parent_id INT, INDEX par_ind (parent_id), FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE CASCADE) ENGINE=INNODB;
+INSERT INTO child VALUES (1,1),(2,2),(3,3);
+connection node_2;
+SET SESSION wsrep_on = OFF;
+DELETE FROM child WHERE parent_id = 2;
+SET SESSION wsrep_on = ON;
+connection node_1;
+DELETE FROM parent;
+SELECT COUNT(*) = 0 FROM parent;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM child;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+VARIABLE_VALUE = 'Primary'
+1
+SELECT COUNT(*) = 0 FROM parent;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM child;
+COUNT(*) = 0
+1
+DROP TABLE child, parent;
+connection node_2;
+SET GLOBAL wsrep_ignore_apply_errors = 4;
+connection node_2;
+SET GLOBAL wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = ON;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER, f2 INTEGER);
+DROP TABLE t1;
+connection node_2;
+SET GLOBAL wsrep_ignore_apply_errors = 7;
+CALL mtr.add_suppression("Can't find record in 't.*'");
+CALL mtr.add_suppression("Slave SQL: Could not execute Delete_rows event");
+CALL mtr.add_suppression("Slave SQL: Error 'Unknown table 'test.t1'' on query. Default database: 'test'. Query: 'DROP TABLE t1', Error_code: 1051");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't drop database 's1'; database doesn't exist' on query. Default database: 'test'. Query: 'DROP SCHEMA s1', Error_code: 1008");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'idx1'; check that column/key exists' on query. Default database: 'test'. Query: 'DROP INDEX idx1 ON t1', Error_code: 1091");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'idx1'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t1 DROP INDEX idx1', Error_code: 1091");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'f2'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t1 DROP COLUMN f2', Error_code: 1091");
+CALL mtr.add_suppression("Slave SQL: Error 'Table 't1' already exists' on query.");
diff --git a/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result b/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result
index 4db4e539c50..f5ec473fea4 100644
--- a/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result
+++ b/mysql-test/suite/galera/r/galera_var_innodb_disallow_writes.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1a;
SET SESSION wsrep_sync_wait = 0;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_var_load_data_splitting.result b/mysql-test/suite/galera/r/galera_var_load_data_splitting.result
index 3e451abbed1..66bc6bc4a9a 100644
--- a/mysql-test/suite/galera/r/galera_var_load_data_splitting.result
+++ b/mysql-test/suite/galera/r/galera_var_load_data_splitting.result
@@ -1,6 +1,10 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SET GLOBAL wsrep_load_data_splitting = TRUE;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
connection node_2;
SELECT COUNT(*) = 95000 FROM t1;
COUNT(*) = 95000
@@ -8,5 +12,11 @@ COUNT(*) = 95000
wsrep_last_committed_diff
1
connection node_1;
-SET GLOBAL wsrep_load_data_splitting = 1;;
+SET GLOBAL wsrep_load_data_splitting = 0;;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
+connection node_2;
+SET GLOBAL wsrep_load_data_splitting = 0;;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_var_log_bin.result b/mysql-test/suite/galera/r/galera_var_log_bin.result
index b0ea2293119..80fe2fb6d8d 100644
--- a/mysql-test/suite/galera/r/galera_var_log_bin.result
+++ b/mysql-test/suite/galera/r/galera_var_log_bin.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/galera_var_max_ws_rows.result b/mysql-test/suite/galera/r/galera_var_max_ws_rows.result
index 6bf67a3fb60..16e868f0485 100644
--- a/mysql-test/suite/galera/r/galera_var_max_ws_rows.result
+++ b/mysql-test/suite/galera/r/galera_var_max_ws_rows.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/galera_var_max_ws_size.result b/mysql-test/suite/galera/r/galera_var_max_ws_size.result
index 53bac04fa86..89c9698eed4 100644
--- a/mysql-test/suite/galera/r/galera_var_max_ws_size.result
+++ b/mysql-test/suite/galera/r/galera_var_max_ws_size.result
@@ -1,8 +1,10 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 VARCHAR(1024)) Engine=InnoDB;
SET GLOBAL wsrep_max_ws_size = 1024;
INSERT INTO t1 VALUES (DEFAULT, REPEAT('X', 1024));
-ERROR HY000: Got error 90 "Message too long" during COMMIT
+ERROR HY000: Got error 5 "Input/output error" during COMMIT
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
diff --git a/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result b/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result
index 202633a020e..d7a38d24722 100644
--- a/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result
+++ b/mysql-test/suite/galera/r/galera_var_mysql_replication_bundle.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB;
SET GLOBAL wsrep_mysql_replication_bundle = 2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_var_node_address.result b/mysql-test/suite/galera/r/galera_var_node_address.result
index 7696d1e3f4f..cf45861c8ad 100644
--- a/mysql-test/suite/galera/r/galera_var_node_address.result
+++ b/mysql-test/suite/galera/r/galera_var_node_address.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
call mtr.add_suppression("WSREP: Stray state UUID msg: .* current group state WAIT_STATE_UUID .*");
call mtr.add_suppression("WSREP: Protocol violation. JOIN message sender .* is not in state transfer (.*). Message ignored.");
call mtr.add_suppression("WSREP: Sending JOIN failed: -[0-9]+ (Transport endpoint is not connected). Will retry in new primary component.");
diff --git a/mysql-test/suite/galera/r/galera_var_reject_queries.result b/mysql-test/suite/galera/r/galera_var_reject_queries.result
index caf98566595..b95e5773830 100644
--- a/mysql-test/suite/galera/r/galera_var_reject_queries.result
+++ b/mysql-test/suite/galera/r/galera_var_reject_queries.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER);
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result b/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result
index 382466b1387..8968f89d11b 100644
--- a/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result
+++ b/mysql-test/suite/galera/r/galera_var_replicate_myisam_off.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SET GLOBAL wsrep_replicate_myisam = FALSE;
CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=MyISAM;
INSERT INTO t1 VALUES (1);
diff --git a/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result b/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result
index 87f8862df7e..716af033e7a 100644
--- a/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result
+++ b/mysql-test/suite/galera/r/galera_var_replicate_myisam_on.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_1;
SET GLOBAL wsrep_replicate_myisam = TRUE;
diff --git a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result
index c0bf6035184..b8943464cb7 100644
--- a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result
+++ b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result
@@ -1,15 +1,17 @@
+connection node_2;
+connection node_1;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 0;
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue';
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue';
INSERT INTO t1 (f1) VALUES (2);
connection node_1a;
-SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
connection node_2;
TRUNCATE TABLE t1;
connection node_1;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
@@ -18,10 +20,10 @@ DROP TABLE t1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 1;
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue';
-INSERT INTO t1 (f1) VALUES (2);
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue';
+INSERT INTO t1 (f1) VALUES (3);
connection node_1a;
-SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
connection node_2;
TRUNCATE TABLE t1;
connection node_1;
@@ -34,10 +36,10 @@ connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 1;
SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 2';
-INSERT INTO t1 VALUES (2);;
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue EXECUTE 2';
+INSERT INTO t1 VALUES (4);;
connection node_1a;
-SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
connection node_2;
TRUNCATE TABLE t1;
connection node_1a;
@@ -45,7 +47,7 @@ SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached';
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
-SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue WAIT_FOR before_cert';
connection node_2;
TRUNCATE TABLE t1;
connection node_1a;
@@ -53,7 +55,7 @@ SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
connection node_1;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SET DEBUG_SYNC = 'RESET';
SET GLOBAL debug_dbug = NULL;
DROP TABLE t1;
@@ -61,8 +63,8 @@ connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 64;
SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 64';
-INSERT INTO t1 VALUES (2);
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue EXECUTE 64';
+INSERT INTO t1 VALUES (5);
connection node_1;
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
diff --git a/mysql-test/suite/galera/r/galera_var_slave_threads.result b/mysql-test/suite/galera/r/galera_var_slave_threads.result
index c28cc091ae9..168b45154b8 100644
--- a/mysql-test/suite/galera/r/galera_var_slave_threads.result
+++ b/mysql-test/suite/galera/r/galera_var_slave_threads.result
@@ -1,3 +1,7 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
connection node_1;
CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB;
CREATE TABLE t2 (f1 INT AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB;
@@ -13,6 +17,9 @@ SELECT @@wsrep_slave_threads = 1;
@@wsrep_slave_threads = 1
1
SET GLOBAL wsrep_slave_threads = 1;
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND COMMAND != 'Daemon';
+COUNT(*)
+3
SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
COUNT(*)
1
@@ -20,8 +27,11 @@ SET GLOBAL wsrep_slave_threads = 64;
connection node_1;
INSERT INTO t1 VALUES (1);
connection node_2;
-SELECT COUNT(*) = 1 FROM t1;
-COUNT(*) = 1
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+1
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
+COUNT(*)
1
SET GLOBAL wsrep_slave_threads = 1;
connection node_1;
@@ -32,26 +42,42 @@ COUNT(*)
SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
COUNT(*)
1
+SET GLOBAL wsrep_slave_threads = 5;
SET GLOBAL wsrep_slave_threads = 1;
-DROP TABLE t1;
-DROP TABLE t2;
-#
-# lp:1372840 - Changing wsrep_slave_threads causes future connections to hang
-#
+connection node_2;
+Shutting down server ...
+connection node_1;
+show status like 'wsrep_cluster_size';
+Variable_name Value
+wsrep_cluster_size 1
+SET GLOBAL wsrep_slave_threads = 6;
+SET GLOBAL wsrep_slave_threads = 1;
+SET GLOBAL wsrep_cluster_address='';
+SET GLOBAL wsrep_cluster_address='gcomm://';
+SET GLOBAL wsrep_slave_threads = 10;
+connection node_2;
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND COMMAND != 'Daemon';
+COUNT(*)
+3
connection node_1;
-CREATE TABLE t1 (i INT AUTO_INCREMENT PRIMARY KEY) ENGINE=INNODB;
+SET GLOBAL wsrep_slave_threads = 1;
connection node_2;
-SET GLOBAL wsrep_slave_threads = 4;
SET GLOBAL wsrep_slave_threads = 1;
connection node_1;
-INSERT INTO t1 VALUES (DEFAULT);
-INSERT INTO t1 VALUES (DEFAULT);
-INSERT INTO t1 VALUES (DEFAULT);
-DROP TABLE t1;
connection node_2;
-SELECT NAME FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t%';
-NAME
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
-COUNT(*) = 1
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+1
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+192
+connection node_1;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
1
+SELECT COUNT(*) FROM t2;
+COUNT(*)
+192
+DROP TABLE t1;
+DROP TABLE t2;
# End of tests
diff --git a/mysql-test/suite/galera/r/galera_var_sst_auth.result b/mysql-test/suite/galera/r/galera_var_sst_auth.result
index 1db83197870..6a5683e2633 100644
--- a/mysql-test/suite/galera/r/galera_var_sst_auth.result
+++ b/mysql-test/suite/galera/r/galera_var_sst_auth.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config
#
diff --git a/mysql-test/suite/galera/r/galera_var_sync_wait.result b/mysql-test/suite/galera/r/galera_var_sync_wait.result
index 3dfe902767c..80fbd3f3e05 100644
--- a/mysql-test/suite/galera/r/galera_var_sync_wait.result
+++ b/mysql-test/suite/galera/r/galera_var_sync_wait.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-10161: wsrep_sync_wait not enabled when set to 1 in config file
#
diff --git a/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result b/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result
index 36340f505ff..5323bc9bf60 100644
--- a/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result
+++ b/mysql-test/suite/galera/r/galera_var_wsrep_on_off.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
SET SESSION wsrep_on = FALSE;
diff --git a/mysql-test/suite/galera/r/galera_wan.result b/mysql-test/suite/galera/r/galera_wan.result
index 41b915fa5bf..bc4113ffb1c 100644
--- a/mysql-test/suite/galera/r/galera_wan.result
+++ b/mysql-test/suite/galera/r/galera_wan.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CALL mtr.add_suppression("WSREP: Stray state UUID msg:.*");
CALL mtr.add_suppression("WSREP: Sending JOIN failed:.*");
CALL mtr.add_suppression("There are no nodes in the same segment that will ever be able to become donors, yet there is a suitable donor outside");
diff --git a/mysql-test/suite/galera/r/galera_wan_restart_ist.result b/mysql-test/suite/galera/r/galera_wan_restart_ist.result
index 8a2a7d0818e..7b87d534d92 100644
--- a/mysql-test/suite/galera/r/galera_wan_restart_ist.result
+++ b/mysql-test/suite/galera/r/galera_wan_restart_ist.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_wan_restart_sst.result b/mysql-test/suite/galera/r/galera_wan_restart_sst.result
index 71786cdd023..1296744a9c1 100644
--- a/mysql-test/suite/galera/r/galera_wan_restart_sst.result
+++ b/mysql-test/suite/galera/r/galera_wan_restart_sst.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 4
1
diff --git a/mysql-test/suite/galera/r/galera_wsrep_desync_wsrep_on.result b/mysql-test/suite/galera/r/galera_wsrep_desync_wsrep_on.result
index 99c680c7b12..e01825fd944 100644
--- a/mysql-test/suite/galera/r/galera_wsrep_desync_wsrep_on.result
+++ b/mysql-test/suite/galera/r/galera_wsrep_desync_wsrep_on.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
diff --git a/mysql-test/suite/galera/r/galera_wsrep_log_conficts.result b/mysql-test/suite/galera/r/galera_wsrep_log_conficts.result
index fa49d8c57c2..59883f1ca7e 100644
--- a/mysql-test/suite/galera/r/galera_wsrep_log_conficts.result
+++ b/mysql-test/suite/galera/r/galera_wsrep_log_conficts.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (
f1 VARCHAR(255) PRIMARY KEY
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
@@ -20,6 +22,6 @@ connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
connection node_2a;
connection node_2;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
include/assert_grep.inc [cluster conflict due to high priority abort for threads]
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result b/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result
index ca388496794..8edf1a02e9d 100644
--- a/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result
+++ b/mysql-test/suite/galera/r/galera_wsrep_new_cluster.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
VARIABLE_VALUE = 'Primary'
1
diff --git a/mysql-test/suite/galera/r/galera_wsrep_provider_options_syntax.result b/mysql-test/suite/galera/r/galera_wsrep_provider_options_syntax.result
index f19dc40205b..0f7cd134156 100644
--- a/mysql-test/suite/galera/r/galera_wsrep_provider_options_syntax.result
+++ b/mysql-test/suite/galera/r/galera_wsrep_provider_options_syntax.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
call mtr.add_suppression("WSREP\: Unknown parameter 'gmcasts\.segment'");
call mtr.add_suppression("WSREP\: Set options returned 7");
SET GLOBAL wsrep_provider_options="gmcasts.segment=1";
diff --git a/mysql-test/suite/galera/r/galera_wsrep_provider_unset_set.result b/mysql-test/suite/galera/r/galera_wsrep_provider_unset_set.result
index d56d9340474..7a645407004 100644
--- a/mysql-test/suite/galera/r/galera_wsrep_provider_unset_set.result
+++ b/mysql-test/suite/galera/r/galera_wsrep_provider_unset_set.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/galera_zero_length_column.result b/mysql-test/suite/galera/r/galera_zero_length_column.result
index 572d94d6756..fa29264704f 100644
--- a/mysql-test/suite/galera/r/galera_zero_length_column.result
+++ b/mysql-test/suite/galera/r/galera_zero_length_column.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY , f2 VARCHAR(0)) ENGINE=InnoDB;
CREATE TABLE t2 (f1 VARCHAR(0)) ENGINE=InnoDB;
diff --git a/mysql-test/suite/galera/r/grant.result b/mysql-test/suite/galera/r/grant.result
index a2ca72ee8ec..56f56d3b023 100644
--- a/mysql-test/suite/galera/r/grant.result
+++ b/mysql-test/suite/galera/r/grant.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV#6266: Changing password fails on galera cluster
#
@@ -13,7 +15,7 @@ SET PASSWORD FOR 'user_6266'@'localhost' = PASSWORD('newpass');
# On node_2
connection node_2;
SELECT user FROM mysql.user WHERE user='user_6266';
-user
+User
user_6266
DROP USER 'user_6266'@'localhost';
disconnect node_2;
diff --git a/mysql-test/suite/galera/r/lp1276424.result b/mysql-test/suite/galera/r/lp1276424.result
index 363758e0d66..be27ee5374f 100644
--- a/mysql-test/suite/galera/r/lp1276424.result
+++ b/mysql-test/suite/galera/r/lp1276424.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INT DEFAULT NULL, UNIQUE KEY i1 (f1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL);
INSERT INTO t1 VALUES (NULL);
diff --git a/mysql-test/suite/galera/r/lp1347768.result b/mysql-test/suite/galera/r/lp1347768.result
index 7beb167d538..49c8894c081 100644
--- a/mysql-test/suite/galera/r/lp1347768.result
+++ b/mysql-test/suite/galera/r/lp1347768.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE `r8kmb_redirect_links` (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT,
`old_url` varchar(255) DEFAULT NULL,
diff --git a/mysql-test/suite/galera/r/lp1376747-2.result b/mysql-test/suite/galera/r/lp1376747-2.result
index b85e130f4f4..5e5b5be6c86 100644
--- a/mysql-test/suite/galera/r/lp1376747-2.result
+++ b/mysql-test/suite/galera/r/lp1376747-2.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/lp1376747-3.result b/mysql-test/suite/galera/r/lp1376747-3.result
index a2c55b5f1f6..62893b85bcf 100644
--- a/mysql-test/suite/galera/r/lp1376747-3.result
+++ b/mysql-test/suite/galera/r/lp1376747-3.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/lp1376747-4.result b/mysql-test/suite/galera/r/lp1376747-4.result
index f1d32aa8f69..d6884cc3746 100644
--- a/mysql-test/suite/galera/r/lp1376747-4.result
+++ b/mysql-test/suite/galera/r/lp1376747-4.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
diff --git a/mysql-test/suite/galera/r/lp1376747.result b/mysql-test/suite/galera/r/lp1376747.result
index 16d4fa3fc52..1b9dd545409 100644
--- a/mysql-test/suite/galera/r/lp1376747.result
+++ b/mysql-test/suite/galera/r/lp1376747.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_2;
diff --git a/mysql-test/suite/galera/r/lp1438990.result b/mysql-test/suite/galera/r/lp1438990.result
index d48d2435faa..a324121d7da 100644
--- a/mysql-test/suite/galera/r/lp1438990.result
+++ b/mysql-test/suite/galera/r/lp1438990.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY);
diff --git a/mysql-test/suite/galera/r/lp959512.result b/mysql-test/suite/galera/r/lp959512.result
index 55adfa360b0..589030002db 100644
--- a/mysql-test/suite/galera/r/lp959512.result
+++ b/mysql-test/suite/galera/r/lp959512.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
DROP TABLE IF EXISTS variable;
Warnings:
Note 1051 Unknown table 'test.variable'
diff --git a/mysql-test/suite/galera/r/mdev_10518.result b/mysql-test/suite/galera/r/mdev_10518.result
index 4ccd5fd1d23..252aa244f77 100644
--- a/mysql-test/suite/galera/r/mdev_10518.result
+++ b/mysql-test/suite/galera/r/mdev_10518.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
# On node_1
connection node_1;
list of GTID variables :
diff --git a/mysql-test/suite/galera/r/mdev_18730.result b/mysql-test/suite/galera/r/mdev_18730.result
new file mode 100644
index 00000000000..ff0a934ba6d
--- /dev/null
+++ b/mysql-test/suite/galera/r/mdev_18730.result
@@ -0,0 +1,27 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+connection node_1;
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached WAIT_FOR acol_continue";
+INSERT INTO t1 VALUES (1);
+connection ctrl;
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached";
+connection node_1_sr;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_unit = 'rows';
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (2);
+connection ctrl;
+SET DEBUG_SYNC = "now SIGNAL acol_continue";
+connection node_1;
+connection node_1_sr;
+ROLLBACK;
+connection ctrl;
+SET DEBUG_SYNC = "RESET";
+disconnect ctrl;
+disconnect node_1_sr;
+connection node_1;
+DROP TABLE t1;
+disconnect node_2;
+disconnect node_1;
diff --git a/mysql-test/suite/galera/r/mdev_9290.result b/mysql-test/suite/galera/r/mdev_9290.result
index 276ab9e8ecb..d8fc35b02f4 100644
--- a/mysql-test/suite/galera/r/mdev_9290.result
+++ b/mysql-test/suite/galera/r/mdev_9290.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-9290 : InnoDB: Assertion failure in file trx0sys.cc line 353
# InnoDB: Failing assertion: xid_seqno > trx_sys_cur_xid_seqno
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#110.result b/mysql-test/suite/galera/r/mysql-wsrep#110.result
index 6d4031d71cd..344185f4f4e 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#110.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#110.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY);
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#198.result b/mysql-test/suite/galera/r/mysql-wsrep#198.result
index 33f36d407db..5b569ffae27 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#198.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#198.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (id INT PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#201.result b/mysql-test/suite/galera/r/mysql-wsrep#201.result
index fe5725cab27..e55c38ea7cc 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#201.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#201.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (id INT PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (DEFAULT);
connection node_2;
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#237.result b/mysql-test/suite/galera/r/mysql-wsrep#237.result
index 19503dd5781..bc348613d25 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#237.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#237.result
@@ -1,6 +1,8 @@
+connection node_2;
+connection node_1;
CREATE TABLE t (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
connection node_1;
-SET DEBUG_SYNC = 'wsrep_before_replication WAIT_FOR continue';
+SET DEBUG_SYNC = 'wsrep_before_certification WAIT_FOR continue';
INSERT INTO t values (1);;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1a;
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#247.result b/mysql-test/suite/galera/r/mysql-wsrep#247.result
index e59c6d1a299..704f0ba923c 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#247.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#247.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET GLOBAL wsrep_desync=1;
SET wsrep_OSU_method=RSU;
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#31.result b/mysql-test/suite/galera/r/mysql-wsrep#31.result
index 1092f4ddb0c..d1d2861ec3a 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#31.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#31.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#33.result b/mysql-test/suite/galera/r/mysql-wsrep#33.result
index 6a5251204b9..5589faad8b1 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#33.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#33.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
Setting SST method to mysqldump ...
@@ -56,6 +58,9 @@ INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after');
connection node_2;
Loading wsrep provider ...
+disconnect node_2;
+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
INSERT INTO t1 VALUES ('node2_committed_after');
@@ -114,5 +119,6 @@ CALL mtr.add_suppression("Can't open and lock time zone table");
CALL mtr.add_suppression("Can't open and lock privilege tables");
CALL mtr.add_suppression("Info table is not ready to be used");
CALL mtr.add_suppression("Native table .* has the wrong structure");
+CALL mtr.add_suppression("Table \'mysql.gtid_slave_pos\' doesn\'t exist");
connection node_2;
Restarting server ...
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#332.result b/mysql-test/suite/galera/r/mysql-wsrep#332.result
index 8667f5e9c41..565979a93ae 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#332.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#332.result
@@ -1,26 +1,38 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1;
CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id INTEGER) ENGINE=INNODB;
INSERT INTO p VALUES (1, 0);
INSERT INTO p VALUES (2, 0);
INSERT INTO c VALUES (1, 1);
INSERT INTO c VALUES (2, 2);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE p SET f1 = f1 + 100;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
ALTER TABLE c ADD FOREIGN KEY (p_id) REFERENCES p(f1);
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
SELECT * FROM p;
f1 f2
1 0
@@ -31,6 +43,7 @@ f1 p_id
2 2
DROP TABLE c;
DROP TABLE p;
+connection node_1;
CREATE TABLE p1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE p2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id1 INTEGER, p_id2 INTEGER) ENGINE=INNODB;
@@ -38,23 +51,31 @@ INSERT INTO p1 VALUES (1, 0), (2, 0);
INSERT INTO p2 VALUES (1, 0), (2, 0);
INSERT INTO c VALUES (1, 1, 1);
INSERT INTO c VALUES (2, 2, 2);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE p1 SET f1 = f1 + 100;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
ALTER TABLE c ADD FOREIGN KEY (p_id1) REFERENCES p1(f1), ADD FOREIGN KEY (p_id2) REFERENCES p2(f1);
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
SELECT * FROM p1;
f1 f2
1 0
@@ -70,6 +91,7 @@ f1 p_id1 p_id2
DROP TABLE c;
DROP TABLE p1;
DROP TABLE p2;
+connection node_1;
CREATE TABLE p1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE p2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id1 INTEGER, p_id2 INTEGER) ENGINE=INNODB;
@@ -77,23 +99,31 @@ INSERT INTO p1 VALUES (1, 0), (2, 0);
INSERT INTO p2 VALUES (1, 0), (2, 0);
INSERT INTO c VALUES (1, 1, 1);
INSERT INTO c VALUES (2, 2, 2);
+connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE p2 SET f1 = f1 + 100;
+connection node_1a;
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
ALTER TABLE c ADD FOREIGN KEY (p_id1) REFERENCES p1(f1), ADD FOREIGN KEY (p_id2) REFERENCES p2(f1);
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'dbug=';
-SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
COMMIT;
+connection node_1a;
SET SESSION wsrep_on = 0;
SET SESSION wsrep_on = 1;
SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
-SET GLOBAL wsrep_provider_options = 'signal=local_monitor_enter_sync';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
SELECT * FROM p1;
f1 f2
1 0
diff --git a/mysql-test/suite/galera/r/mysql-wsrep#90.result b/mysql-test/suite/galera/r/mysql-wsrep#90.result
index 0b8f55e6219..b0fa06ffaf8 100644
--- a/mysql-test/suite/galera/r/mysql-wsrep#90.result
+++ b/mysql-test/suite/galera/r/mysql-wsrep#90.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
connection node_1;
SET GLOBAL wsrep_OSU_method = "RSU";
diff --git a/mysql-test/suite/galera/r/partition.result b/mysql-test/suite/galera/r/partition.result
index bdf6df25589..2e16d06519c 100644
--- a/mysql-test/suite/galera/r/partition.result
+++ b/mysql-test/suite/galera/r/partition.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV#4953 Galera: DELETE from a partitioned table is not replicated
#
@@ -116,6 +118,8 @@ connection node_1;
# Case 1: wsrep_load_data_splitting = ON & LOAD DATA with 20002
# entries.
SET GLOBAL wsrep_load_data_splitting = ON;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
connection node_2;
@@ -123,11 +127,13 @@ SELECT COUNT(*) FROM t1;
COUNT(*)
20002
wsrep_last_committed_diff
-1
+AS_EXPECTED_3_or_5
DROP TABLE t1;
# Case 2: wsrep_load_data_splitting = ON & LOAD DATA with 101 entries.
connection node_1;
SET GLOBAL wsrep_load_data_splitting = ON;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
connection node_2;
@@ -141,6 +147,8 @@ DROP TABLE t1;
# entries.
connection node_1;
SET GLOBAL wsrep_load_data_splitting = OFF;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
CREATE TABLE t1 (pk INT PRIMARY KEY)
ENGINE=INNODB PARTITION BY HASH(pk) PARTITIONS 2;
connection node_2;
@@ -148,10 +156,12 @@ SELECT COUNT(*) FROM t1;
COUNT(*)
20002
wsrep_last_committed_diff
-1
+AS_EXPECTED_1_or_2
DROP TABLE t1;
connection node_1;
-SET GLOBAL wsrep_load_data_splitting = 1;;
+SET GLOBAL wsrep_load_data_splitting = 0;;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
disconnect node_2;
disconnect node_1;
# End of test
diff --git a/mysql-test/suite/galera/r/pxc-421.result b/mysql-test/suite/galera/r/pxc-421.result
index a317b3e40e1..058af15c098 100644
--- a/mysql-test/suite/galera/r/pxc-421.result
+++ b/mysql-test/suite/galera/r/pxc-421.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_1;
diff --git a/mysql-test/suite/galera/r/query_cache.result b/mysql-test/suite/galera/r/query_cache.result
index a1d5d96d627..5dabd38a982 100644
--- a/mysql-test/suite/galera/r/query_cache.result
+++ b/mysql-test/suite/galera/r/query_cache.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
# Execute FLUSH/RESET commands.
# On node-1
@@ -1336,9 +1338,13 @@ show status like "Qcache_hits";
Variable_name Value
Qcache_hits 12
select * from t1 into outfile "query_cache.out.file";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select * from t1 into outfile "query_cache.out.file";
ERROR HY000: File 'query_cache.out.file' already exists
select * from t1 limit 1 into dumpfile "query_cache.dump.file";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
diff --git a/mysql-test/suite/galera/r/rename.result b/mysql-test/suite/galera/r/rename.result
index a7ec431657b..3ad715fa38c 100644
--- a/mysql-test/suite/galera/r/rename.result
+++ b/mysql-test/suite/galera/r/rename.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-8598 : Failed MySQL DDL commands and Galera replication
#
diff --git a/mysql-test/suite/galera/r/rpl_row_annotate.result b/mysql-test/suite/galera/r/rpl_row_annotate.result
index 23de06f015b..61fa2bc286c 100644
--- a/mysql-test/suite/galera/r/rpl_row_annotate.result
+++ b/mysql-test/suite/galera/r/rpl_row_annotate.result
@@ -1,9 +1,15 @@
+connection node_2;
+connection node_1;
# On node_2
connection node_2;
+SET GLOBAL wsrep_on=OFF;
RESET MASTER;
+SET GLOBAL wsrep_on=ON;
# On node_1
connection node_1;
+SET GLOBAL wsrep_on=OFF;
RESET MASTER;
+SET GLOBAL wsrep_on=ON;
CREATE TABLE t1(i INT)ENGINE=INNODB;
INSERT INTO t1 VALUES(1);
DELETE FROM t1 WHERE i = 1;
@@ -68,6 +74,4 @@ mysqld-bin.000001 # Table_map 2 # table_id: # (test.t1)
mysqld-bin.000001 # Delete_rows_v1 2 # table_id: # flags: STMT_END_F
mysqld-bin.000001 # Xid 2 # COMMIT /* xid= */
DROP TABLE t1;
-disconnect node_2;
-disconnect node_1;
# End of test
diff --git a/mysql-test/suite/galera/r/sql_log_bin.result b/mysql-test/suite/galera/r/sql_log_bin.result
index c175a0a0e7a..6efd70ca8b8 100644
--- a/mysql-test/suite/galera/r/sql_log_bin.result
+++ b/mysql-test/suite/galera/r/sql_log_bin.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
# On node_1
connection node_1;
diff --git a/mysql-test/suite/galera/r/unique_key.result b/mysql-test/suite/galera/r/unique_key.result
index 9f1fc858389..bb7e22014d9 100644
--- a/mysql-test/suite/galera/r/unique_key.result
+++ b/mysql-test/suite/galera/r/unique_key.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV#5552 Deadlock when inserting NULL column value in column with
# UNIQUE index
diff --git a/mysql-test/suite/galera/r/versioning_trx_id.result b/mysql-test/suite/galera/r/versioning_trx_id.result
index f15916e51a5..ec3dc09e97b 100644
--- a/mysql-test/suite/galera/r/versioning_trx_id.result
+++ b/mysql-test/suite/galera/r/versioning_trx_id.result
@@ -1,8 +1,11 @@
+connection node_2;
+connection node_1;
connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
connection node_1;
create table t1 (a int, s bigint unsigned as row start, e bigint unsigned as row end, period for system_time(s,e)) engine=InnoDB with system versioning;
insert into t1 (a) values (1),(2);
connection node_2;
+set session wsrep_sync_wait=15;
insert into t1 (a) values (3),(4);
select a from t1;
a
@@ -17,6 +20,7 @@ select count(*) from mysql.transaction_registry where begin_timestamp>=commit_ti
count(*)
0
connection node_3;
+set session wsrep_sync_wait=15;
insert into t1 (a) values (5),(6);
select a from t1;
a
@@ -33,6 +37,7 @@ select count(*) from mysql.transaction_registry where begin_timestamp>=commit_ti
count(*)
0
connection node_1;
+set session wsrep_sync_wait=15;
select a from t1;
a
1
@@ -48,5 +53,4 @@ select count(*) from mysql.transaction_registry where begin_timestamp>=commit_ti
count(*)
0
drop table t1;
-disconnect node_2;
-disconnect node_1;
+disconnect node_3;
diff --git a/mysql-test/suite/galera/r/view.result b/mysql-test/suite/galera/r/view.result
index f8da811f9cc..45d5b422f3f 100644
--- a/mysql-test/suite/galera/r/view.result
+++ b/mysql-test/suite/galera/r/view.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
#
# MDEV-7222: Cluster Node Crash at CREATE DEFINER statement
#
diff --git a/mysql-test/suite/galera/r/wsrep_trx_fragment_size_sr.result b/mysql-test/suite/galera/r/wsrep_trx_fragment_size_sr.result
new file mode 100644
index 00000000000..4139ecd6472
--- /dev/null
+++ b/mysql-test/suite/galera/r/wsrep_trx_fragment_size_sr.result
@@ -0,0 +1,15 @@
+connection node_2;
+connection node_1;
+SELECT variable_value FROM information_schema.session_variables
+WHERE variable_name = 'wsrep_trx_fragment_size';
+variable_value
+0
+SET SESSION wsrep_trx_fragment_size = 0;
+SET SESSION wsrep_trx_fragment_size = 123;
+SELECT variable_value FROM information_schema.global_variables
+WHERE variable_name = 'wsrep_trx_fragment_size';
+variable_value
+0
+SET GLOBAL wsrep_trx_fragment_size = 0;
+SET GLOBAL wsrep_trx_fragment_size = 123;
+SET GLOBAL wsrep_trx_fragment_size = default;
diff --git a/mysql-test/suite/galera/suite.pm b/mysql-test/suite/galera/suite.pm
index faea228b6b0..b8456189848 100644
--- a/mysql-test/suite/galera/suite.pm
+++ b/mysql-test/suite/galera/suite.pm
@@ -9,9 +9,9 @@ return "Not run for embedded server" if $::opt_embedded_server;
return "WSREP is not compiled in" unless defined $::mysqld_variables{'wsrep-on'};
my ($provider) = grep { -f $_ } $ENV{WSREP_PROVIDER},
- "/usr/lib64/galera-3/libgalera_smm.so",
+ "/usr/lib64/galera-4/libgalera_smm.so",
"/usr/lib64/galera/libgalera_smm.so",
- "/usr/lib/galera-3/libgalera_smm.so",
+ "/usr/lib/galera-4/libgalera_smm.so",
"/usr/lib/galera/libgalera_smm.so";
return "No wsrep provider library" unless -f $provider;
@@ -82,6 +82,8 @@ push @::global_suppressions,
qr|WSREP: .*core_handle_uuid_msg.*|,
qr(WSREP: --wsrep-causal-reads=ON takes precedence over --wsrep-sync-wait=0. WSREP_SYNC_WAIT_BEFORE_READ is on),
qr|WSREP: JOIN message from member .* in non-primary configuration. Ignored.|,
+ qr|Query apply failed:*|,
+ qr(WSREP: Ignoring error*),
qr(WSREP: Failed to remove page file .*),
qr(WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to .*),
);
diff --git a/mysql-test/suite/galera/t/GAL-419.test b/mysql-test/suite/galera/t/GAL-419.test
index e50b948bf35..07abcbcc47b 100644
--- a/mysql-test/suite/galera/t/GAL-419.test
+++ b/mysql-test/suite/galera/t/GAL-419.test
@@ -5,11 +5,11 @@
--source include/galera_cluster.inc
--source include/big_test.inc
---connection node_2
+--connection node_1
SET SESSION wsrep_sync_wait = 0;
--source include/kill_galera.inc
---connection node_1
+--connection node_2
SET SESSION wsrep_sync_wait = 0;
--source include/kill_galera.inc
diff --git a/mysql-test/suite/galera/t/GCF-1081.test b/mysql-test/suite/galera/t/GCF-1081.test
new file mode 100644
index 00000000000..38553feda39
--- /dev/null
+++ b/mysql-test/suite/galera/t/GCF-1081.test
@@ -0,0 +1,72 @@
+#
+# GCF-1081 - Assertion `!thd->sp_runtime_ctx`
+#
+# Test replaying of stored procedures
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+--connection node_1
+
+--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 0), (3, 0);
+
+DELIMITER |;
+CREATE PROCEDURE proc_update ()
+BEGIN
+ UPDATE t1 SET f2 = 1 where f1 > 0;
+END|
+DELIMITER ;|
+
+# Block the SP
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+--send CALL proc_update ();
+
+# Wait until SP is blocked
+--connection node_1a
+SET SESSION wsrep_sync_wait = 0;
+--source include/galera_wait_sync_point.inc
+
+# Issue a conflicting insert on node #2
+--connection node_1a
+SET GLOBAL debug_dbug = 'd,sync.wsrep_before_BF_victim_unlock';
+
+--connection node_2
+--send INSERT INTO t1 VALUES (2, 2);
+
+# Wait until it BF aborts the SP
+--connection node_1a
+SET SESSION DEBUG_SYNC = 'now WAIT_FOR sync.wsrep_before_BF_victim_unlock_reached';
+SET GLOBAL debug_dbug = '';
+
+# Unblock the SP
+--connection node_1a
+--source include/galera_clear_sync_point.inc
+--source include/galera_signal_sync_point.inc
+
+--connection node_2
+--reap
+SELECT * FROM t1;
+
+# SP succeeds
+--connection node_1
+--reap
+SELECT * FROM t1;
+
+# wsrep_local_replays has increased by 1
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old AS wsrep_local_replays;
+--enable_query_log
+
+DROP PROCEDURE proc_update;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/GCF-939.test b/mysql-test/suite/galera/t/GCF-939.test
new file mode 100644
index 00000000000..637d656996e
--- /dev/null
+++ b/mysql-test/suite/galera/t/GCF-939.test
@@ -0,0 +1,31 @@
+#
+# GCF-939 Avoid creation of GRA log files when applier is successfull
+#
+
+--source include/galera_cluster.inc
+
+--exec rm -rf $MYSQLTEST_VARDIR/mysqld.2/data/GRA_*.log
+
+--connection node_1
+--error ER_BAD_TABLE_ERROR
+DROP TABLE t1;
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+# Expect only one GRA_*.log file
+# TODO replace_regex is somehow broken, it will filter out
+# result totally if replacement string is already in result
+# fixed this temporarily by calling list_files twice
+# to get GRA_.log two times, this works for some reason
+#
+--replace_regex /GRA_.+\.log/GRA_.log/
+--list_files $MYSQLTEST_VARDIR/mysqld.2/data GRA_*.log
+--replace_regex /GRA_.+\.log/GRA_.log/
+--list_files $MYSQLTEST_VARDIR/mysqld.2/data GRA_*.log
+
+DROP TABLE t1;
+CALL mtr.add_suppression("Ignoring error 'Unknown table 'test.t1'' on query");
+--connection node_2
+CALL mtr.add_suppression("Error 'Unknown table 'test.t1'' on query");
+
diff --git a/mysql-test/suite/galera/t/MDEV-16509.test b/mysql-test/suite/galera/t/MDEV-16509.test
new file mode 100644
index 00000000000..2242ef6aff3
--- /dev/null
+++ b/mysql-test/suite/galera/t/MDEV-16509.test
@@ -0,0 +1,141 @@
+#
+# Test various executions which go through binlog group commit
+#
+
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+
+--let $galera_connection_name = ctrl
+--let $galera_server_number = 1
+--source include/galera_connect.inc
+
+# Scenario 1: Block INSERT after commit order release after queued for
+# group commit. Verify that
+#
+# - WSREP_LAST_SEEN_GTID is not advanced before commit finishes
+# - The INSERT does not become visible before commit finishes
+
+# Turn off sync wait to avoid blocking and use WSREP_LAST_SEEN_GTID()
+# to observe gtid position.
+SET SESSION wsrep_sync_wait = 0;
+--let $last_seen_gtid_prev = `SELECT WSREP_LAST_SEEN_GTID()`
+
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+# Set up sync points
+SET DEBUG_SYNC = "wsrep_before_commit_order_leave SIGNAL bcol_reached WAIT_FOR bcol_continue";
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached WAIT_FOR acol_continue";
+SET DEBUG_SYNC = "after_group_after_commit SIGNAL after_group_reached WAIT_FOR after_group_continue";
+# Send insert which will block in the sync points above
+--send INSERT INTO t1 VALUES (1)
+
+--connection ctrl
+# INSERT has gone through wsrep_ordered_commit() and the transaction is
+# committed in memory.
+SET DEBUG_SYNC = "now WAIT_FOR bcol_reached";
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_match
+--enable_query_log
+SELECT * FROM t1;
+SET DEBUG_SYNC = "now SIGNAL bcol_continue";
+
+# SE commit finished but wsrep_after_commit() has not called yet.
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached";
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_match
+--enable_query_log
+SELECT * FROM t1;
+SET DEBUG_SYNC = "now SIGNAL acol_continue";
+
+SET DEBUG_SYNC = "now WAIT_FOR after_group_reached";
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() != '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_do_not_match
+--enable_query_log
+SET DEBUG_SYNC = "now SIGNAL after_group_continue";
+
+--connection node_1
+--reap
+
+#
+# Scenario 2: Verify that two INSERTs from two different connections
+# queue for commit.
+#
+--let $galera_connection_name = node_1a
+--let $galera_server_number = 1
+--source include/galera_connect.inc
+SET SESSION wsrep_sync_wait = 0;
+
+--connection ctrl
+--let $last_seen_gtid_prev = `SELECT WSREP_LAST_SEEN_GTID()`
+
+--connection node_1
+SET DEBUG_SYNC = "wsrep_before_commit_order_leave SIGNAL bcol_reached_1 WAIT_FOR bcol_continue_1";
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached_1 WAIT_FOR acol_continue_1";
+SET DEBUG_SYNC = "after_group_after_commit SIGNAL agac_reached_1 WAIT_FOR agac_continue_1";
+--send INSERT INTO t1 VALUES (2);
+--connection ctrl
+SET DEBUG_SYNC = "now WAIT_FOR bcol_reached_1";
+
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_match
+--enable_query_log
+
+--connection node_1a
+SET DEBUG_SYNC = "wsrep_before_commit_order_leave SIGNAL bcol_reached_2 WAIT_FOR bcol_continue_2";
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached_2 WAIT_FOR acol_continue_2";
+SET DEBUG_SYNC = "after_group_after_commit SIGNAL agac_reached_2 WAIT_FOR agac_continue_2";
+--send INSERT INTO t1 VALUES (3);
+
+# Now INSERTs are queued, node_1 waiting after releasing commit order,
+# node_1a waiting before releasing commit order.
+--connection ctrl
+SET DEBUG_SYNC = "now SIGNAL bcol_continue_1";
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached_1";
+SET DEBUG_SYNC = "now WAIT_FOR bcol_reached_2";
+
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_match
+--enable_query_log
+
+SET DEBUG_SYNC = "now SIGNAL bcol_continue_2";
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached_2";
+
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_match
+--enable_query_log
+
+# Last seen GTIDs are incremented one by one once after_group_after_commit
+# is reached.
+SET DEBUG_SYNC = "now SIGNAL acol_continue_1";
+SET DEBUG_SYNC = "now WAIT_FOR agac_reached_1";
+
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() != '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_no_match
+--enable_query_log
+
+--let $last_seen_gtid_prev = `SELECT WSREP_LAST_SEEN_GTID()`
+SET DEBUG_SYNC = "now SIGNAL acol_continue_2";
+SET DEBUG_SYNC = "now WAIT_FOR agac_reached_2";
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() != '$last_seen_gtid_prev' AS wsrep_last_seen_gtid_no_match
+--enable_query_log
+
+SET DEBUG_SYNC = "now SIGNAL agac_continue_1";
+SET DEBUG_SYNC = "now SIGNAL agac_continue_2";
+
+--connection node_1
+--reap
+--connection node_1a
+--reap
+
+--connection ctrl
+SET DEBUG_SYNC = "RESET";
+
+DROP TABLE t1;
+
+--disconnect ctrl
+--disconnect node_1a
+--source include/galera_end.inc
diff --git a/mysql-test/suite/galera/t/MW-284.test b/mysql-test/suite/galera/t/MW-284.test
index 5e17baa1bdb..b52db4c68b0 100644
--- a/mysql-test/suite/galera/t/MW-284.test
+++ b/mysql-test/suite/galera/t/MW-284.test
@@ -63,7 +63,9 @@ CALL mtr.add_suppression('failed registering on master');
CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work');
--connection node_1
+set global wsrep_on=OFF;
RESET MASTER;
+set global wsrep_on=ON;
CALL mtr.add_suppression('WSREP: Last Applied Action message in non-primary configuration from member');
--connection node_2
diff --git a/mysql-test/suite/galera/t/MW-286.test b/mysql-test/suite/galera/t/MW-286.test
index 426b4493bb7..9c849861001 100644
--- a/mysql-test/suite/galera/t/MW-286.test
+++ b/mysql-test/suite/galera/t/MW-286.test
@@ -4,31 +4,55 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
---source include/big_test.inc
-
+--source include/have_debug_sync.inc
+
--connection node_1
-CREATE TABLE ten (f1 INTEGER) Engine=InnoDB;
-INSERT INTO ten VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
-
# Insert some values before the ALTER
-INSERT INTO t1 (f1) SELECT 000000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
-
-# Insert more values while the ALTER is running
---send INSERT INTO t1 (f1) SELECT 100000 + (10000 * a1.f1) + (1000 * a2.f1) + (100 * a3.f1) + (10 * a4.f1) + a5.f1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
+INSERT INTO t1 (f1) VALUES (1), (2), (3);
+#
+# run ALTER with no wsrep replication
+#
--connection node_2
SET GLOBAL wsrep_desync = TRUE;
SET wsrep_on = FALSE;
---error 0,ER_QUERY_INTERRUPTED
-ALTER TABLE t1 ADD PRIMARY KEY (f1);
+#
+# stop ALTER processing after it has acquired exclusive MDL lock
+#
+SET SESSION debug_sync = "alter_table_inplace_after_lock_upgrade SIGNAL mdl_locked WAIT_FOR mdl_continue";
+
+--send ALTER TABLE t1 ADD PRIMARY KEY (f1);
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2a
+SET SESSION debug_sync = "now WAIT_FOR mdl_locked";
+
+#
+# replicate conflicting insert from node_1
+#
+--connection node_1
+INSERT INTO t1(f1) VALUES (11);
+
+#
+# let parked ALTER processing to continue after the conflict
+#
+--connection node_2a
+SET debug_sync = "now SIGNAL mdl_continue";
+SET debug_sync='RESET';
+
+#
+# ALTER should have been aborted with query interupted error code
+#
+--connection node_2
+--error ER_QUERY_INTERRUPTED
+--reap
SET wsrep_on = TRUE;
SET GLOBAL wsrep_desync = FALSE;
--connection node_1
-reap;
+
DROP TABLE t1;
-DROP TABLE ten;
diff --git a/mysql-test/suite/galera/t/MW-292.test b/mysql-test/suite/galera/t/MW-292.test
index ecb1273759e..9580d53d85c 100644
--- a/mysql-test/suite/galera/t/MW-292.test
+++ b/mysql-test/suite/galera/t/MW-292.test
@@ -9,7 +9,7 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
@@ -25,37 +25,51 @@ START TRANSACTION;
UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
-# Block the commit
+# Block the applier on node #1 and issue a conflicting update on node #2
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
---let $galera_sync_point = commit_monitor_enter_sync
+SET SESSION wsrep_sync_wait=0;
+--let $galera_sync_point = apply_monitor_slave_enter_sync
--source include/galera_set_sync_point.inc
---connection node_1
---send COMMIT;
+--connection node_2
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
-# Wait until commit is blocked
--connection node_1a
-SET SESSION wsrep_sync_wait = 0;
--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
-# Issue a conflicting update on node #2
---connection node_2
-UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+# Block the commit, send the COMMIT and wait until it gets blocked
-# Wait for both transactions to be blocked
---connection node_1a
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event::find_row%';
---source include/wait_condition.inc
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT';
---source include/wait_condition.inc
+--connection node_1
+--send COMMIT
-# Unblock the commit
--connection node_1a
+
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
--source include/galera_clear_sync_point.inc
+
+# Let the conflicting UPDATE proceed and wait until it hits abort_trx_end.
+# The victim transaction still sits in commit_monitor_master_sync_point.
+
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = abort_trx_end commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+
+# Let the transactions proceed
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = commit_monitor_master_enter_sync
--source include/galera_signal_sync_point.inc
-# Commit succeeds via replay
+# Commit succeeds
--connection node_1
--reap
diff --git a/mysql-test/suite/galera/t/MW-328A.test b/mysql-test/suite/galera/t/MW-328A.test
index 09aad1bcf60..097d4ac4ff6 100644
--- a/mysql-test/suite/galera/t/MW-328A.test
+++ b/mysql-test/suite/galera/t/MW-328A.test
@@ -3,8 +3,13 @@
#
#
-# Attempt to insert into t2 and check if insert actually inserted rows if
-# a success was reported.
+# test phase 1 is not deterministic
+#
+# Here we attempt to insert into t2 and check if insert actually
+# inserted rows if a success was reported.
+#
+# However, deadlocks may or may not happen in this test execution
+# it all depends on timing.
#
--source include/big_test.inc
@@ -25,7 +30,7 @@ while ($count)
{
TRUNCATE TABLE t2;
- --error 0,1213
+ --error 0,ER_LOCK_DEADLOCK
INSERT IGNORE INTO t2 SELECT f2 FROM t1;
if ($mysql_errno != 1213) {
--inc $successes
@@ -44,14 +49,31 @@ while ($count)
--enable_query_log
+
+--source suite/galera/t/MW-328-footer.inc
+
#
-# Check that the test produced both deadlocks and successes
+# Test phase 2 is deterministic
+# Here we generate a sure conflict in node 1 and verify that
+# insert failed in both nodes
#
+--connection node_1
+CREATE TABLE t1 (i int primary key, j int) engine=innodb;
+INSERT INTO t1 values (1,0);
---disable_query_log
---eval SELECT $successes > 0 AS have_successes
---eval SELECT $deadlocks > 0 AS have_deadlocks
---enable_query_log
+BEGIN;
+UPDATE t1 SET j=1 WHERE i=1;
+--connection node_2
+UPDATE t1 SET j=2 WHERE i=1;
---source suite/galera/t/MW-328-footer.inc
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+SELECT * FROM t1;
+--connection node_2
+SELECT * FROM t1;
+--connection node_1
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/MW-329.test b/mysql-test/suite/galera/t/MW-329.test
index 5baa4d14966..78755fc3b42 100644
--- a/mysql-test/suite/galera/t/MW-329.test
+++ b/mysql-test/suite/galera/t/MW-329.test
@@ -11,11 +11,6 @@ CREATE TABLE t1 (f1 INTEGER, f2 CHAR(20) DEFAULT 'abc') ENGINE=InnoDB;
# We start with a populated table
INSERT INTO t1 (f1) VALUES (1),(65535);
-# Clear the wsrep_local_replays counter
-
-FLUSH STATUS;
-SELECT VARIABLE_VALUE = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
-
#
# Run concurrent INSERTs
#
@@ -42,8 +37,9 @@ DELIMITER ;|
#
--connection node_2
-CALL mtr.add_suppression("WSREP: Failed to report last committed .*");
---let $count = 200
+--let $count = 10
+--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
while ($count)
{
--let $signature = `SELECT LEFT(MD5(RAND()), 10)`
@@ -57,14 +53,28 @@ while ($count)
--die ROW_COUNT() = 0
}
}
- --dec $count
+
+ #
+ # Ensure at least one replay happens
+ #
+
+ --let $wsrep_replays = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+ --disable_query_log
+ if (`SELECT $wsrep_replays - $wsrep_local_replays_old > 0`) {
+ --dec $count
+ }
+ --enable_query_log
}
#
# Confirm that some transaction replays occurred
#
-SELECT VARIABLE_VALUE > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays';
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old > 0 AS wsrep_local_replays;
+--enable_query_log
+
#
# Terminate the stored procedure
diff --git a/mysql-test/suite/galera/t/MW-336.test b/mysql-test/suite/galera/t/MW-336.test
index 40d093a1a86..6549ecfe6ea 100644
--- a/mysql-test/suite/galera/t/MW-336.test
+++ b/mysql-test/suite/galera/t/MW-336.test
@@ -15,7 +15,7 @@ SET GLOBAL wsrep_slave_threads = 1;
--echo # Wait 10 slave threads to start 1
--let $wait_timeout=600
---let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--let $wait_condition = SELECT COUNT(*) = 12 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
--source include/wait_condition.inc
--connection node_2
@@ -49,13 +49,13 @@ SELECT COUNT(*) FROM t1;
SET GLOBAL wsrep_slave_threads = 10;
--echo # Wait 10 slave threads to start 2
---let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--let $wait_condition = SELECT COUNT(*) = 12 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
--source include/wait_condition.inc
SET GLOBAL wsrep_slave_threads = 20;
--echo # Wait 20 slave threads to start 3
---let $wait_condition = SELECT COUNT(*) = 21 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+--let $wait_condition = SELECT COUNT(*) = 22 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
--source include/wait_condition.inc
SET GLOBAL wsrep_slave_threads = 1;
diff --git a/mysql-test/suite/galera/t/MW-360-master.opt b/mysql-test/suite/galera/t/MW-360-master.opt
new file mode 100644
index 00000000000..e51c49c3808
--- /dev/null
+++ b/mysql-test/suite/galera/t/MW-360-master.opt
@@ -0,0 +1,2 @@
+--gtid-domain-id=1 --log-bin --log-slave-updates
+
diff --git a/mysql-test/suite/galera/t/MW-360.test b/mysql-test/suite/galera/t/MW-360.test
new file mode 100644
index 00000000000..b776631cfff
--- /dev/null
+++ b/mysql-test/suite/galera/t/MW-360.test
@@ -0,0 +1,100 @@
+#
+# MW-360 DROP TABLE containing temporary tables results in binlog divergence
+#
+
+--source include/galera_cluster.inc
+--source include/have_binlog_format_row.inc
+
+--connection node_1
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+
+--connection node_2
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+
+--connection node_1
+
+#
+# Straightforward temporary table
+#
+
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+DROP TABLE t1;
+
+--let $local_uuid = `SELECT LEFT(@@global.gtid_executed, 36)`
+
+#
+# A mix of normal and temporary tables
+#
+
+# Temp table first, normal table second
+
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+
+DROP TABLE t1, t2;
+
+# Normal table first, temporary table second
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+CREATE TEMPORARY TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+
+DROP TABLE t1, t2;
+
+# Temporary table first, normal table second, temp table third
+
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+
+CREATE TEMPORARY TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t3 VALUES (3);
+
+DROP TABLE t1, t2, t3;
+
+# Normal table first, temporary table second, normal table third
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+CREATE TEMPORARY TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (2);
+
+CREATE TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t3 VALUES (3);
+
+DROP TABLE t1, t2, t3;
+
+#
+# A temporary table masking a normal one
+#
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+CREATE TEMPORARY TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (2);
+
+DROP TABLE t1;
+DROP TABLE t1;
+
+--connection node_2
+--let $gtid_executed_node2 = `SELECT @@global.gtid_executed;`
+
+--connection node_1
+--disable_query_log
+# Node 1 has extra GTID set generated by the temporary table drop
+--eval SELECT GTID_SUBSET('$gtid_executed_node2', @@global.gtid_executed) AS gtid_executed_equal;
+--enable_query_log
diff --git a/mysql-test/suite/galera/t/MW-369.inc b/mysql-test/suite/galera/t/MW-369.inc
index 5fd9ef150ae..71df979d6ba 100644
--- a/mysql-test/suite/galera/t/MW-369.inc
+++ b/mysql-test/suite/galera/t/MW-369.inc
@@ -24,7 +24,6 @@
--connection node_1
SET AUTOCOMMIT=ON;
START TRANSACTION;
-
--eval $mw_369_parent_query
#
@@ -51,7 +50,7 @@ SET SESSION wsrep_sync_wait = 0;
--source include/galera_wait_sync_point.inc
--source include/galera_clear_sync_point.inc
---let $galera_sync_point = local_monitor_enter_sync
+--let $galera_sync_point = local_monitor_master_enter_sync
--source include/galera_set_sync_point.inc
--connection node_1
@@ -61,7 +60,7 @@ SET SESSION wsrep_sync_wait = 0;
# Wait until both sync points have been reached
#
--connection node_1a
---let $galera_sync_point = apply_monitor_slave_enter_sync local_monitor_enter_sync
+--let $galera_sync_point = apply_monitor_slave_enter_sync local_monitor_master_enter_sync
--source include/galera_wait_sync_point.inc
#
@@ -70,6 +69,6 @@ SET SESSION wsrep_sync_wait = 0;
--let $galera_sync_point = apply_monitor_slave_enter_sync
--source include/galera_signal_sync_point.inc
---let $galera_sync_point = local_monitor_enter_sync
+--let $galera_sync_point = local_monitor_master_enter_sync
--source include/galera_signal_sync_point.inc
--source include/galera_clear_sync_point.inc
diff --git a/mysql-test/suite/galera/t/MW-369.test b/mysql-test/suite/galera/t/MW-369.test
index 720d6daf518..c8f8c974019 100644
--- a/mysql-test/suite/galera/t/MW-369.test
+++ b/mysql-test/suite/galera/t/MW-369.test
@@ -24,7 +24,7 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
CREATE TABLE p (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE c (f1 INTEGER PRIMARY KEY, p_id INTEGER,
@@ -244,3 +244,101 @@ SELECT * FROM c;
DROP TABLE c;
DROP TABLE p;
+--echo #
+--echo # Start of 10.4 tests
+--echo #
+#
+# Test F Outline:
+# ===============
+#
+# Test two concurrent INSERTs on the child table.
+#
+# The pf table will originally have row (1)
+# The cf table will originally be empty
+#
+# A new row (10, 1) pointing to parent row (1) is inserted from
+# connection node_2. A transaction which tries to INSERT another child
+# row (20, 1), pointing to the same parent, is run from connection node_1.
+#
+# Expected Outcome:
+# =================
+# Both INSERTs should succeed since they don't modify the common parent
+# key.
+#
+# At the end of the test:
+# parent table should have row (1)
+# child table should have rows (10, 1), (20, 1)
+
+--connection node_1
+
+CREATE TABLE pf (f1 INTEGER PRIMARY KEY) ENGINE=INNODB;
+CREATE TABLE cf (
+ f1 INTEGER PRIMARY KEY,
+ p_id INTEGER,
+ CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES pf (f1)
+);
+
+INSERT INTO pf VALUES (1);
+
+# This is run on node1:
+--let $mw_369_parent_query = INSERT INTO cf (f1, p_id) VALUES (10, 1)
+# This is run on node2:
+--let $mw_369_child_query = INSERT INTO cf (f1, p_id) VALUES (20, 1)
+--source MW-369.inc
+
+--connection node_1
+--reap
+
+--connection node_2
+SELECT * FROM pf;
+SELECT * FROM cf;
+
+DROP TABLE cf;
+DROP TABLE pf;
+
+#
+# Test G Outline:
+# ===============
+#
+# This test is similar to test B where a existing
+# child table row is updated concurrently from another node
+# with a transaction which updates the parent table, except
+# that here the child table row is inserted, not updated.
+#
+# The pg table will originally have rows (1, 0), (2, 0).
+# The cg table will originally be empty
+#
+# Expected outcome:
+# ================
+#
+# Both UPDATE and INSERT should succeed since they are done to separate tables
+# and UPDATE to parent row does not touch the foreign key referenced by the
+# child row INSERT. The parent table shall contain rows (1, 1), (2, 0).
+# The child table shall contain row (1, 1, 0) which points to parent table
+# row (1, 0).
+#
+
+--connection node_1
+CREATE TABLE pg (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
+CREATE TABLE cg (f1 INTEGER PRIMARY KEY, p_id INTEGER,
+ f2 INTEGER,
+ CONSTRAINT fk_1 FOREIGN KEY (p_id) REFERENCES pg (f1)) ;
+
+INSERT INTO pg VALUES (1, 0);
+INSERT INTO pg VALUES (2, 0);
+
+--let mw_369_parent_query = UPDATE pg SET f2 = 1 WHERE f1 = 1
+--let $mw_369_child_query = INSERT INTO cg VALUES (1, 1, 0)
+--source MW-369.inc
+
+# Commit succeeds
+--connection node_1
+--reap
+
+--connection node_2
+SELECT * FROM pg;
+SELECT * FROM cg;
+
+DROP TABLE cg;
+DROP TABLE pg;
+
diff --git a/mysql-test/suite/galera/t/MW-388.test b/mysql-test/suite/galera/t/MW-388.test
index 042b7e2fee5..e99559400c1 100644
--- a/mysql-test/suite/galera/t/MW-388.test
+++ b/mysql-test/suite/galera/t/MW-388.test
@@ -1,6 +1,5 @@
--source include/galera_cluster.inc
--source include/have_debug_sync.inc
-
--connection node_1
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(255)) Engine=InnoDB;
@@ -40,24 +39,25 @@ SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached";
--connection node_1
SET SESSION wsrep_sync_wait = 0;
-SET SESSION DEBUG_SYNC = 'wsrep_after_replication SIGNAL wsrep_after_replication_reached WAIT_FOR wsrep_after_replication_continue';
+SET SESSION DEBUG_SYNC = 'wsrep_after_certification SIGNAL wsrep_after_certification_reached WAIT_FOR wsrep_after_certification_continue';
--send CALL insert_proc ();
--connection node_1a
-SET SESSION DEBUG_SYNC = "now WAIT_FOR wsrep_after_replication_reached";
+SET SESSION DEBUG_SYNC = "now WAIT_FOR wsrep_after_certification_reached";
SET GLOBAL DEBUG_DBUG = "";
-SET DEBUG_SYNC = "now SIGNAL wsrep_after_replication_continue";
+SET DEBUG_SYNC = "now SIGNAL wsrep_after_certification_continue";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
--connection node_2
--reap
--connection node_1
-# We expect no errors here, because the handler in insert_proc() caught the deadlock error
+# We expect no errors here, because the handler in insert_proc()
+# caught the deadlock error
--reap
-SELECT @errno = 1213;
+SELECT @errno `expect 1213`;
SELECT * FROM t1;
--connection node_2
diff --git a/mysql-test/suite/galera/t/MW-402.test b/mysql-test/suite/galera/t/MW-402.test
index 36b691c6295..4b83e25dc50 100644
--- a/mysql-test/suite/galera/t/MW-402.test
+++ b/mysql-test/suite/galera/t/MW-402.test
@@ -1,6 +1,6 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
#
# we must open connection node_1a here, MW-369.inc will use it later
@@ -135,7 +135,6 @@ SELECT * FROM c;
DROP TABLE c;
DROP TABLE p;
-
#
# CASCADE DELETE tests with two parent tables
# Here we cause cascaded operation on child table through
@@ -151,10 +150,11 @@ DROP TABLE p;
CREATE TABLE p1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
CREATE TABLE p2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
-CREATE TABLE c (f1 INTEGER PRIMARY KEY, p1_id INTEGER, p2_id INTEGER, f2 INTEGER,
- CONSTRAINT fk_1 FOREIGN KEY (p1_id) REFERENCES p1 (f1) ON DELETE CASCADE,
+CREATE TABLE c (f1 INTEGER PRIMARY KEY, p1_id INTEGER, p2_id INTEGER,
+ f2 INTEGER,
+ CONSTRAINT fk_1 FOREIGN KEY (p1_id) REFERENCES p1 (f1)
+ ON DELETE CASCADE,
CONSTRAINT fk_2 FOREIGN KEY (p2_id) REFERENCES p2 (f1));
-
INSERT INTO p1 VALUES (1, 0);
INSERT INTO p2 VALUES (1, 0);
@@ -171,7 +171,51 @@ INSERT INTO c VALUES (1, 1, 1, 0);
--connection node_1
--reap
-# same as previous, but statements in different order
+--connection node_2
+SELECT * FROM p1;
+SELECT * FROM p2;
+SELECT * FROM c;
+
+DROP TABLE c;
+DROP TABLE p1;
+DROP TABLE p2;
+
+#
+# CASCADE DELETE tests with two parent tables
+# Here we cause cascaded operation on child table through
+# one parent table and issue other delete operation through the
+# other parent table. The cascade progresses to same child table row where
+# we should see the conflict to happen
+#
+# As a result, the update on p2 should fail
+#
+--connection node_1
+
+CREATE TABLE p1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
+CREATE TABLE p2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=INNODB;
+CREATE TABLE c (f1 INTEGER PRIMARY KEY, p1_id INTEGER, p2_id INTEGER,
+ f2 INTEGER,
+ CONSTRAINT fk_1 FOREIGN KEY (p1_id) REFERENCES p1 (f1)
+ ON DELETE CASCADE,
+ CONSTRAINT fk_2 FOREIGN KEY (p2_id) REFERENCES p2 (f1)
+ ON DELETE CASCADE);
+
+INSERT INTO p1 VALUES (1, 0);
+INSERT INTO p2 VALUES (1, 0);
+
+INSERT INTO c VALUES (1, 1, 1, 0);
+
+--let $mw_369_parent_query = DELETE FROM p2 WHERE f1=1
+--let $mw_369_child_query = DELETE FROM p1 WHERE f1=1
+
+--connection node_1a
+--source MW-369.inc
+
+# Commit succeeds
+--connection node_1
+--error ER_LOCK_DEADLOCK
+--reap
+
--connection node_2
SELECT * FROM p1;
SELECT * FROM p2;
diff --git a/mysql-test/suite/galera/t/MW-416.test b/mysql-test/suite/galera/t/MW-416.test
index df4fa35abc7..48eada95cb8 100644
--- a/mysql-test/suite/galera/t/MW-416.test
+++ b/mysql-test/suite/galera/t/MW-416.test
@@ -2,11 +2,10 @@
--source include/have_innodb.inc
--source include/wait_until_ready.inc
-
CREATE USER 'userMW416'@'localhost';
GRANT SELECT, INSERT, UPDATE ON test.* TO 'userMW416'@'localhost';
-SHOW GLOBAL STATUS LIKE 'wsrep_replicated';
+--let $wsrep_replicated_before = `SELECT variable_value FROM information_schema.global_status WHERE variable_name = 'wsrep_replicated'`
--connect userMW416, localhost, userMW416,, test, $NODE_MYPORT_1
--connection userMW416
@@ -131,4 +130,7 @@ UNINSTALL PLUGIN plg;
--connection node_1
DROP USER 'userMW416'@'localhost';
SHOW DATABASES;
-SHOW GLOBAL STATUS LIKE 'wsrep_replicated';
+--let $wsrep_replicated_after = `SELECT variable_value FROM information_schema.global_status WHERE variable_name = 'wsrep_replicated'`
+--disable_query_log
+--eval SELECT $wsrep_replicated_after - $wsrep_replicated_before AS wsrep_replicated_after_diff
+--enable_query_log
diff --git a/mysql-test/suite/galera/t/MW-86-wait1.test b/mysql-test/suite/galera/t/MW-86-wait1.test
index 40a7882829b..9fe863bf340 100644
--- a/mysql-test/suite/galera/t/MW-86-wait1.test
+++ b/mysql-test/suite/galera/t/MW-86-wait1.test
@@ -6,13 +6,15 @@
--source include/galera_cluster.inc
--source include/have_binlog_format_row.inc
--source include/have_debug_sync.inc
+SET @orig_debug=@@debug_dbug;
--connection node_2
# Make sure no signals have been leftover from previous tests to surprise us.
SELECT @@debug_sync;
+set debug_sync='RESET';
SET SESSION wsrep_sync_wait = 1;
-SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
--connection node_1
CREATE TABLE t_wait1 (f1 INTEGER) ENGINE=InnoDB;
@@ -90,16 +92,15 @@ SHOW WARNINGS;
--enable_result_log
# Unblock the background INSERT and remove the sync point.
-SET GLOBAL debug_dbug = "";
-SET SESSION debug_sync = "now SIGNAL signal.wsrep_apply_cb";
+SET GLOBAL debug_dbug = @orig_debug;
+SET SESSION debug_sync = "now SIGNAL signal.wsrep_apply_cb";
+SET debug_sync='RESET';
SET SESSION wsrep_sync_wait = default;
# This will wait for the background INSERT to complete before we quit
# from the test.
DROP TABLE t_wait1;
-SET debug_sync='RESET';
-
# Make sure no pending signals are leftover to surprise subsequent tests.
SELECT @@debug_sync;
diff --git a/mysql-test/suite/galera/t/MW-86-wait8.test b/mysql-test/suite/galera/t/MW-86-wait8.test
index 551b0f67b7c..7d9e54a16ba 100644
--- a/mysql-test/suite/galera/t/MW-86-wait8.test
+++ b/mysql-test/suite/galera/t/MW-86-wait8.test
@@ -4,13 +4,14 @@
--source include/galera_cluster.inc
--source include/have_binlog_format_row.inc
--source include/have_debug_sync.inc
+SET @orig_debug=@@debug_dbug;
--connection node_2
# Make sure no signals have been leftover from previous tests to surprise us.
SELECT @@debug_sync;
SET SESSION wsrep_sync_wait = 8;
-SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb";
+SET GLOBAL debug_dbug = "+d,sync.wsrep_apply_cb";
--connection node_1
CREATE TABLE t_wait8 (f1 INTEGER) ENGINE=InnoDB;
@@ -112,8 +113,10 @@ SHOW WARNINGS;
--enable_query_log
# Unblock the background INSERT and remove the sync point.
-SET GLOBAL debug_dbug = "";
+SET GLOBAL debug_dbug = @orig_debug;
+
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+SET debug_sync='RESET';
SET SESSION wsrep_sync_wait = default;
@@ -121,7 +124,5 @@ SET SESSION wsrep_sync_wait = default;
# from the test.
DROP TABLE t_wait8;
-SET debug_sync='RESET';
-
# Make sure no pending signals are leftover to surprise subsequent tests.
SELECT @@debug_sync;
diff --git a/mysql-test/suite/galera/t/galera#500.test b/mysql-test/suite/galera/t/galera#500.test
index 3c8490b6907..60f303b7103 100644
--- a/mysql-test/suite/galera/t/galera#500.test
+++ b/mysql-test/suite/galera/t/galera#500.test
@@ -8,6 +8,10 @@
--source include/galera_cluster.inc
--source include/galera_have_debug_sync.inc
+--let $node_1=node_1
+--let $node_2=node_2
+--source suite/galera/include/auto_increment_offset_save.inc
+
# Force node_2 gcomm background thread to terminate via exception.
--connection node_2
--let $wsrep_cluster_address = `SELECT @@wsrep_cluster_address`
@@ -36,3 +40,5 @@ SET SESSION wsrep_on=0;
--connection node_2
CALL mtr.add_suppression("WSREP: exception from gcomm, backend must be restarted: Gcomm backend termination was requested by setting gmcast.isolate=2.");
+
+--source suite/galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera/t/galera_applier_ftwrl_table_alter-master.opt b/mysql-test/suite/galera/t/galera_applier_ftwrl_table_alter-master.opt
index d8ecaacaa4c..6f688b39fd5 100644
--- a/mysql-test/suite/galera/t/galera_applier_ftwrl_table_alter-master.opt
+++ b/mysql-test/suite/galera/t/galera_applier_ftwrl_table_alter-master.opt
@@ -1 +1 @@
---lock_wait_timeout=5 --innodb_lock_wait_timeout=5 --wait_timeout=5
+--lock_wait_timeout=5 --innodb_lock_wait_timeout=5 --wait_timeout=60
diff --git a/mysql-test/suite/galera/t/galera_as_master.test b/mysql-test/suite/galera/t/galera_as_master.test
index 49f3c993256..1c439ffff63 100644
--- a/mysql-test/suite/galera/t/galera_as_master.test
+++ b/mysql-test/suite/galera/t/galera_as_master.test
@@ -64,4 +64,6 @@ RESET SLAVE ALL;
CALL mtr.add_suppression('You need to use --log-bin to make --binlog-format work');
--connection node_1
+set global wsrep_on=OFF;
RESET MASTER;
+set global wsrep_on=ON;
diff --git a/mysql-test/suite/galera/t/galera_as_master_gtid.cnf b/mysql-test/suite/galera/t/galera_as_master_gtid.cnf
index 19517556331..75caba5420a 100644
--- a/mysql-test/suite/galera/t/galera_as_master_gtid.cnf
+++ b/mysql-test/suite/galera/t/galera_as_master_gtid.cnf
@@ -1,8 +1,6 @@
!include ../galera_2nodes_as_master.cnf
[mysqld]
-gtid-mode=ON
log-bin=mysqld-bin
log-slave-updates
-enforce-gtid-consistency
binlog-format=ROW
diff --git a/mysql-test/suite/galera/t/galera_as_master_gtid.test b/mysql-test/suite/galera/t/galera_as_master_gtid.test
index 9db104b7cab..9be065e448b 100644
--- a/mysql-test/suite/galera/t/galera_as_master_gtid.test
+++ b/mysql-test/suite/galera/t/galera_as_master_gtid.test
@@ -27,10 +27,6 @@ INSERT INTO t1 VALUES(1);
--eval SELECT '$effective_uuid' != @@global.server_uuid AS uuids_do_not_match;
--enable_query_log
---replace_result $effective_uuid <effective_uuid>
---replace_regex /table_id: [0-9]+/table_id: #/ /xid=[0-9]+/xid=#/
-SHOW BINLOG EVENTS IN 'mysqld-bin.000002' FROM 120;
-
--connection node_2
INSERT INTO t1 VALUES(2);
@@ -39,10 +35,6 @@ INSERT INTO t1 VALUES(2);
--eval SELECT '$effective_uuid' = LEFT(@@global.gtid_executed, 36) AS uuids_match;
--enable_query_log
---replace_result $effective_uuid <effective_uuid>
---replace_regex /table_id: [0-9]+/table_id: #/ /xid=[0-9]+/xid=#/
-SHOW BINLOG EVENTS IN 'mysqld-bin.000003' FROM 120;
-
--connection node_3
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
@@ -55,10 +47,6 @@ SHOW BINLOG EVENTS IN 'mysqld-bin.000003' FROM 120;
--eval SELECT '$effective_uuid' = LEFT(@@global.gtid_executed, 36) AS uuids_match;
--enable_query_log
---replace_result $effective_uuid <effective_uuid>
---replace_regex /table_id: [0-9]+/table_id: #/ /xid=[0-9]+/xid=#/
-SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 120;
-
--connection node_1
DROP TABLE t1;
@@ -66,5 +54,17 @@ DROP TABLE t1;
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
+--connection node_1
+--let $gtid_executed_node1 = `SELECT @@global.gtid_executed;`
+
+--connection node_2
+--disable_query_log
+--eval SELECT '$gtid_executed_node1' = @@global.gtid_executed AS gtid_executed_equal
+--enable_query_log
+--connection node_3
+--disable_query_log
+--eval SELECT '$gtid_executed_node1' = @@global.gtid_executed AS gtid_executed_equal
+--enable_query_log
+
STOP SLAVE;
RESET SLAVE ALL;
diff --git a/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.cnf b/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.cnf
index 19517556331..75caba5420a 100644
--- a/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.cnf
+++ b/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.cnf
@@ -1,8 +1,6 @@
!include ../galera_2nodes_as_master.cnf
[mysqld]
-gtid-mode=ON
log-bin=mysqld-bin
log-slave-updates
-enforce-gtid-consistency
binlog-format=ROW
diff --git a/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.test b/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.test
index 23606d7ac4c..61c7eed6543 100644
--- a/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.test
+++ b/mysql-test/suite/galera/t/galera_as_master_gtid_change_master.test
@@ -24,6 +24,8 @@ INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
--connection node_3
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
STOP SLAVE;
--disable_query_log
--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_2, MASTER_AUTO_POSITION=1;
diff --git a/mysql-test/suite/galera/t/galera_as_slave.test b/mysql-test/suite/galera/t/galera_as_slave.test
index 849b75eadd1..da92437b118 100644
--- a/mysql-test/suite/galera/t/galera_as_slave.test
+++ b/mysql-test/suite/galera/t/galera_as_slave.test
@@ -5,18 +5,19 @@
#
--source include/have_innodb.inc
-
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
---connection node_1
+--connection node_3
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
@@ -29,14 +30,14 @@ INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES (2);
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_1
SELECT COUNT(*) = 2 FROM t1;
INSERT INTO t1 VALUES (3);
--connection node_2
SELECT COUNT(*) = 3 FROM t1;
---connection node_1
+--connection node_3
DROP TABLE t1;
--connection node_2
@@ -46,5 +47,5 @@ DROP TABLE t1;
STOP SLAVE;
RESET SLAVE ALL;
---connection node_1
+--connection node_3
RESET MASTER;
diff --git a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test
index 59483d0591c..e0c8bf29682 100644
--- a/mysql-test/suite/galera/t/galera_as_slave_autoinc.test
+++ b/mysql-test/suite/galera/t/galera_as_slave_autoinc.test
@@ -5,18 +5,19 @@
#
--source include/have_innodb.inc
-
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
---connection node_1
+--connection node_3
##
## Verify the correct operation of the auto-increment when
@@ -62,14 +63,13 @@ select * from t1;
show variables like 'binlog_format';
show variables like 'auto_increment_increment';
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
-
+--connection node_1
select * from t1;
show variables like 'binlog_format';
show variables like 'auto_increment_increment';
---connection node_1
+--connection node_3
DROP TABLE t1;
--connection node_2
@@ -79,5 +79,5 @@ DROP TABLE t1;
STOP SLAVE;
RESET SLAVE ALL;
---connection node_1
+--connection node_3
RESET MASTER;
diff --git a/mysql-test/suite/galera/t/galera_as_slave_gtid.test b/mysql-test/suite/galera/t/galera_as_slave_gtid.test
index c2331a2ae05..3b0f191ad83 100644
--- a/mysql-test/suite/galera/t/galera_as_slave_gtid.test
+++ b/mysql-test/suite/galera/t/galera_as_slave_gtid.test
@@ -8,18 +8,19 @@
#
--source include/have_innodb.inc
-
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
---connection node_1
+--connection node_3
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
@@ -34,17 +35,21 @@ SELECT LENGTH(@@global.gtid_binlog_state) > 1;
--source include/wait_condition.inc
--disable_query_log
+
--eval SELECT '$gtid_binlog_state_node1' = @@global.gtid_binlog_state AS gtid_binlog_state_equal;
+#--eval SELECT GTID_SUBSET('$gtid_executed_node1', @@global.gtid_executed) AS gtid_executed_equal;
+
--enable_query_log
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_1
SELECT COUNT(*) = 1 FROM t1;
--disable_query_log
--eval SELECT '$gtid_binlog_state_node1' = @@global.gtid_binlog_state AS gtid_binlog_state_equal;
+#--eval SELECT GTID_SUBSET('$gtid_executed_node1', @@global.gtid_executed) AS gtid_executed_equal;
--enable_query_log
---connection node_1
+--connection node_3
DROP TABLE t1;
#
@@ -55,7 +60,7 @@ DROP TABLE t1;
--sleep 1
---connection node_3
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
diff --git a/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.cnf b/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.cnf
new file mode 100644
index 00000000000..e0852c50c44
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.cnf
@@ -0,0 +1,17 @@
+!include ../galera_2nodes_as_slave.cnf
+
+[mysqld]
+#gtid-mode=ON
+log-bin=master-bin
+log-bin-index=master-bin
+log-slave-updates
+#enforce-gtid-consistency
+binlog-format=ROW
+
+[mysqld.2]
+replicate-do-db=test1
+replicate-wild-do-table=test1.%
+
+[mysqld.3]
+replicate-do-db=test1
+replicate-wild-do-table=test1.%
diff --git a/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.test b/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.test
new file mode 100644
index 00000000000..81b6d446ba6
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db.test
@@ -0,0 +1,150 @@
+#
+# Test Galera as a slave to a MySQL master using GTIDs
+#
+
+--source include/have_innodb.inc
+--source include/galera_cluster.inc
+--source include/have_log_bin.inc
+
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+--connection node_1
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+
+--connection node_3
+RESET MASTER;
+
+--connection node_2
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+--disable_query_log
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_3, MASTER_USER='root';
+--enable_query_log
+START SLAVE;
+
+--connection node_3
+CREATE SCHEMA test1;
+CREATE SCHEMA test2;
+USE test1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY,f2 CHAR(5) DEFAULT 'abc') ENGINE=InnoDB;
+USE test2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY,f2 CHAR(5) DEFAULT 'abc') ENGINE=InnoDB;
+
+#
+# First , some autocommit stuff
+#
+
+# Simple inserts
+
+INSERT INTO test1.t1 (f1) VALUES (1);
+INSERT INTO test2.t1 (f1) VALUES (1);
+
+INSERT INTO test1.t1 (f1) VALUES (2);
+INSERT INTO test2.t1 (f1) VALUES (2);
+
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+
+# Update that only covers test2.t1
+
+UPDATE test2.t1 SET test2.t1.f2 = 'cde';
+
+# Multi-table UPDATE
+
+UPDATE test1.t1, test2.t1 SET test1.t1.f2 = 'klm', test2.t1.f2 = 'xyz';
+
+# Multi-table DELETE
+
+DELETE test1.t1, test2.t1 FROM test1.t1 INNER JOIN test2.t1 WHERE test1.t1.f1 = test2.t1.f1 AND test1.t1.f1 = 3;
+
+#
+# Multi-statement transactions
+#
+
+# Transaction which is not replicated at all
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO test2.t1 (f1) VALUES (999);
+INSERT INTO test2.t1 (f1) VALUES (9999);
+COMMIT;
+
+# Transaction that is completely replicated
+START TRANSACTION;
+INSERT INTO test1.t1 (f1) VALUES (111);
+INSERT INTO test1.t1 (f1) VALUES (222);
+COMMIT;
+
+# Transaction that is partially replicated
+
+START TRANSACTION;
+INSERT INTO test1.t1 (f1) VALUES (333);
+INSERT INTO test2.t1 (f1) VALUES (99999);
+COMMIT;
+
+#
+# Make sure binary logs and gtid_executed strings are equal
+#
+
+--sleep 2
+--connection node_2
+--let $effective_uuid = `SELECT LEFT(@@global.gtid_current_pos, 36)`
+--let $gtid_executed_node2 = `SELECT @@global.gtid_current_pos;`
+
+--replace_result $effective_uuid <effective_uuid>
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+SHOW BINLOG EVENTS IN 'master-bin.000001' FROM 256;
+
+--connection node_1
+
+--disable_query_log
+--eval SELECT '$gtid_executed_node2' = @@global.gtid_current_pos AS gtid_executed_equal;
+--enable_query_log
+
+--replace_result $effective_uuid <effective_uuid>
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+SHOW BINLOG EVENTS IN 'master-bin.000001' FROM 256;
+
+#
+# Final consistency checks
+#
+
+--let $diff_servers = 1 2
+--source include/diff_servers.inc
+
+--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 2 FROM test1.t1 WHERE f1 IN (1,2);
+SELECT COUNT(*) = 3 FROM test1.t1 WHERE f1 IN (111,222,333);
+SELECT COUNT(*) = 2 FROM test1.t1 WHERE f2 = 'klm';
+
+--error 1049
+USE test2;
+
+#
+# Cleanup
+#
+
+--connection node_3
+DROP SCHEMA test1;
+DROP SCHEMA test2;
+
+--sleep 1
+
+--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+STOP SLAVE;
+RESET SLAVE ALL;
diff --git a/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db_cc.test b/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db_cc.test
new file mode 100644
index 00000000000..1604016f3c3
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_as_slave_gtid_replicate_do_db_cc.test
@@ -0,0 +1,176 @@
+#
+# Test the case where a Galera slave to async replication goes non-prim while
+# a stream of replication events including filtered events is arriving
+#
+
+--source include/have_innodb.inc
+--source include/have_log_bin.inc
+--source include/big_test.inc
+
+# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--source include/galera_cluster.inc
+
+--connection node_1
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_3
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+
+--connection node_2
+SET global wsrep_on=OFF;
+RESET MASTER;
+SET global wsrep_on=ON;
+
+--connection node_2
+--disable_query_log
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_1;
+--enable_query_log
+START SLAVE USER='root';
+
+--connection node_1
+CREATE SCHEMA test1;
+CREATE SCHEMA test2;
+USE test1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+USE test2;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+INSERT INTO test1.t1 (f1) VALUES (1);
+INSERT INTO test2.t1 (f1) VALUES (1);
+
+INSERT INTO test1.t1 (f1) VALUES (2);
+INSERT INTO test2.t1 (f1) VALUES (2);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+--let $wait_condition = SELECT COUNT(*) = 2 FROM test1.t1;
+--source include/wait_condition.inc
+
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+
+--connection node_1
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+INSERT INTO test1.t1 (f1) VALUES (3);
+INSERT INTO test2.t1 (f1) VALUES (3);
+
+--connection node_2
+SET SESSION wsrep_on=OFF;
+--let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_cluster_status';
+--source include/wait_condition.inc
+
+--let $slave_sql_errno = 1047
+--source include/wait_for_slave_sql_error.inc
+
+--connection node_1
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+INSERT INTO test1.t1 (f1) VALUES (4);
+INSERT INTO test2.t1 (f1) VALUES (4);
+
+--connection node_2
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+
+--connection node_1
+INSERT INTO test1.t1 (f1) VALUES (5);
+INSERT INTO test2.t1 (f1) VALUES (5);
+
+--connection node_2
+--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_cluster_status';
+--source include/wait_condition.inc
+SET SESSION wsrep_on=ON;
+--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_ready';
+--source include/wait_condition.inc
+
+--connection node_1
+INSERT INTO test1.t1 (f1) VALUES (6);
+INSERT INTO test2.t1 (f1) VALUES (6);
+
+--connection node_2
+START SLAVE;
+
+#
+# Consistency checks
+#
+
+--sleep 2
+--connection node_2
+--let $wait_condition = SELECT COUNT(DISTINCT f1) = 6 FROM test1.t1;
+--source include/wait_condition.inc
+
+--connection node_3
+--let $wait_condition = SELECT COUNT(DISTINCT f1) = 6 FROM test1.t1;
+--source include/wait_condition.inc
+
+--connection node_2
+--let $gtid_executed_node2 = `SELECT @@global.gtid_executed;`
+
+--let $effective_uuid_1 = `SELECT SUBSTRING_INDEX(@@global.gtid_executed, ':', 1)`
+--let $effective_uuid_2 = `SELECT SUBSTRING_INDEX(SUBSTRING_INDEX(@@global.gtid_executed, '\n', -1), ':', 1)`
+
+--replace_result $effective_uuid_1 <effective_uuid_1> $effective_uuid_2 <effective_uuid_2>
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 120;
+
+--error 1049
+USE test2;
+
+--connection node_3
+
+--disable_query_log
+--eval SELECT '$gtid_executed_node2' = @@global.gtid_executed AS gtid_executed_equal;
+--enable_query_log
+
+--error 1049
+USE test2;
+
+--replace_result $effective_uuid_1 <effective_uuid_1> $effective_uuid_2 <effective_uuid_2>
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 120;
+
+#
+# Cleanup
+#
+
+--connection node_1
+DROP SCHEMA test1;
+DROP SCHEMA test2;
+
+--sleep 1
+
+--connection node_3
+--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+--connection node_2
+STOP SLAVE;
+RESET SLAVE ALL;
+CALL mtr.add_suppression("GTID replication failed");
+CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed");
+CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047");
+CALL mtr.add_suppression("TO isolation failed for");
+CALL mtr.add_suppression("Slave SQL: Error 'Deadlock found when trying to get lock; try restarting transaction' on query");
+CALL mtr.add_suppression("Slave SQL: Error 'WSREP has not yet prepared node for application use' on query");
+CALL mtr.add_suppression("Slave: WSREP has not yet prepared node for application use Error_code: 1047");
diff --git a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test
index 46a93458271..31c0b9ca162 100644
--- a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test
+++ b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test
@@ -7,22 +7,23 @@
--source include/have_innodb.inc
--source include/big_test.inc
+--source include/galera_cluster.inc
# Step #1. Establish replication
#
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
-#
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
---source include/galera_cluster.inc
+# As node 4 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_4 connection here
+--connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_1, MASTER_USER='root';
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_4, MASTER_USER='root';
--enable_query_log
START SLAVE;
SET SESSION wsrep_sync_wait = 0;
---connection node_1
+--connection node_4
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
--connection node_2
@@ -34,22 +35,21 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
---connection node_3
+--connection node_1
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
# Step #3. Force async replication to fail by creating a replication event while the slave is non-prim
---connection node_1
+--connection node_4
INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
--connection node_2
--sleep 5
--let $value = query_get_value(SHOW SLAVE STATUS, Last_SQL_Error, 1)
---connection node_3
+--connection node_1
--disable_query_log
--eval SELECT "$value" IN ("Error 'Unknown command' on query. Default database: 'test'. Query: 'BEGIN'", "Node has dropped from cluster") AS expected_error
--enable_query_log
@@ -58,7 +58,7 @@ INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
--connection node_2
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
---connection node_3
+--connection node_1
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
@@ -74,7 +74,7 @@ START SLAVE;
--let $wait_condition = SELECT COUNT(*) = 5 FROM t1;
--source include/wait_condition.inc
---connection node_1
+--connection node_4
DROP TABLE t1;
--sleep 2
@@ -92,5 +92,5 @@ CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be
CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047");
---connection node_1
+--connection node_4
RESET MASTER;
diff --git a/mysql-test/suite/galera/t/galera_as_slave_preordered.test b/mysql-test/suite/galera/t/galera_as_slave_preordered.test
index 6f221f83b3a..5b3c78b2cb1 100644
--- a/mysql-test/suite/galera/t/galera_as_slave_preordered.test
+++ b/mysql-test/suite/galera/t/galera_as_slave_preordered.test
@@ -6,14 +6,15 @@
--source include/have_innodb.inc
--source include/have_log_bin.inc
-
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE USER='root';
@@ -21,7 +22,7 @@ START SLAVE USER='root';
# Issue many large-ish transaction on the async master
#
---connection node_1
+--connection node_3
CREATE TABLE ten (f1 INTEGER);
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
@@ -63,8 +64,7 @@ while ($count)
SELECT COUNT(DISTINCT f1) = 2 * 100 * 10 * 10 FROM t1;
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
---connection node_3
+--connection node_1
SELECT COUNT(*) = 2 * 100 * 10 * 10 FROM t1;
SELECT COUNT(DISTINCT f1) = 2 * 100 * 10 * 10 FROM t1;
@@ -72,7 +72,7 @@ SELECT COUNT(DISTINCT f1) = 2 * 100 * 10 * 10 FROM t1;
# Cleanup
#
---connection node_1
+--connection node_3
DROP TABLE t1;
DROP TABLE ten;
@@ -80,5 +80,8 @@ DROP TABLE ten;
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
+--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'ten';
+--source include/wait_condition.inc
+
STOP SLAVE;
RESET SLAVE ALL;
diff --git a/mysql-test/suite/galera/t/galera_as_slave_replication_bundle.test b/mysql-test/suite/galera/t/galera_as_slave_replication_bundle.test
index 460e040c010..fa5f3f9c7c6 100644
--- a/mysql-test/suite/galera/t/galera_as_slave_replication_bundle.test
+++ b/mysql-test/suite/galera/t/galera_as_slave_replication_bundle.test
@@ -5,18 +5,19 @@
#
--source include/have_innodb.inc
-
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
---connection node_1
+--connection node_3
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
INSERT INTO t1 VALUES(2);
@@ -36,7 +37,7 @@ INSERT INTO t1 VALUES(5);
SELECT COUNT(*) = 4 FROM t1;
# Bundle is now complete, the last INSERT and the DROP are delivered
---connection node_1
+--connection node_3
DROP TABLE t1;
--connection node_2
diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.cnf b/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.cnf
index bd6bf9d4f98..cddb8e0e174 100644
--- a/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.cnf
+++ b/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.cnf
@@ -5,7 +5,7 @@ wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
[mysqld.1]
-wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=10M;pc.ignore_sb=true'
[mysqld.2]
-wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=10M;pc.ignore_sb=true'
diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.test b/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.test
index 6aa8ad2923c..4a79ed74038 100644
--- a/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.test
+++ b/mysql-test/suite/galera/t/galera_autoinc_sst_mariabackup.test
@@ -44,6 +44,7 @@ DELIMITER ;|
--connection node_2a
--source include/kill_galera.inc
+--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
--source include/start_mysqld.inc
INSERT INTO t1 VALUES (DEFAULT);
@@ -55,7 +56,6 @@ INSERT INTO t1 VALUES (DEFAULT);
--disable_query_log
--eval KILL CONNECTION $connection_id
--enable_query_log
-
INSERT INTO t1 VALUES (DEFAULT);
--connection node_1
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_group_commit.cnf b/mysql-test/suite/galera/t/galera_bf_abort_group_commit.cnf
new file mode 100644
index 00000000000..612418c17c0
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_group_commit.cnf
@@ -0,0 +1,15 @@
+!include ../galera_2nodes.cnf
+
+# We set repl.commit_order=1 in order to disable provider commit
+# ordering.
+
+[mysqld.1]
+log-bin
+log-slave-updates
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;repl.commit_order=1'
+
+[mysqld.2]
+
+log-bin
+log-slave-updates
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;repl.commit_order=1' \ No newline at end of file
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_group_commit.test b/mysql-test/suite/galera/t/galera_bf_abort_group_commit.test
new file mode 100644
index 00000000000..a828701cd0e
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_group_commit.test
@@ -0,0 +1,77 @@
+#
+# This test uses galera_sr_bf_abort.inc to probe various BF abort points
+# for SR transactions with wsrep provider commit ordering disabled.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/galera_have_debug_sync.inc
+
+# Control connection for manipulating sync points on node 1
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_sync_wait = 0;
+
+# SR bf abort on fragment
+--let $wsrep_trx_fragment_size = 1
+--echo galera_sr_bf_abort_at_commit = 0
+--let $galera_sr_bf_abort_at_commit = 0
+
+--echo after_replicate_sync
+--let $galera_sr_bf_abort_sync_point = after_replicate_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo local_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = local_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo apply_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = apply_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo commit_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = commit_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+# SR bf abort on commit fragment
+--let $wsrep_trx_fragment_size = 1
+--echo galera_sr_bf_abort_at_commit = 1
+--let $galera_sr_bf_abort_at_commit = 1
+
+--echo after_replicate_sync
+--let $galera_sr_bf_abort_sync_point = after_replicate_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo local_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = local_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo apply_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = apply_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo commit_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = commit_monitor_master_enter_sync
+--source suite/galera/t/galera_sr_bf_abort.inc
+
+# Normal bf abort on commit
+--let $wsrep_trx_fragment_size = 0
+--echo galera_sr_bf_abort_at_commit = 1
+--let $galera_sr_bf_abort_at_commit = 1
+
+--echo after_replicate_sync
+--let $galera_sr_bf_abort_sync_point = after_replicate_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo local_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = local_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo apply_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = apply_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+--echo commit_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = commit_monitor_master_enter_sync
+--source ../../suite/galera_sr/t/galera_sr_bf_abort.inc
+
+CALL mtr.add_suppression("WSREP: fragment replication failed: 1");
diff --git a/mysql-test/suite/galera/t/galera_bf_abort_shutdown.test b/mysql-test/suite/galera/t/galera_bf_abort_shutdown.test
new file mode 100644
index 00000000000..c7af8375b3f
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_bf_abort_shutdown.test
@@ -0,0 +1,33 @@
+#
+# This test verifies that the server can be shut down even if
+# some of the wsrep transactions are in QUERY_COMMITTING state.
+# In this case the shutdown sequence may do a BF abort for the
+# connection.
+#
+
+--source include/have_innodb.inc
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source include/auto_increment_offset_save.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+
+--connection node_2
+SET DEBUG_SYNC = 'wsrep_before_certification WAIT_FOR continue';
+--send INSERT INTO t1 VALUES (1)
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--source include/restart_mysqld.inc
+
+# Restore original auto_increment_offset values.
+--let $node_2=node_2a
+--source include/auto_increment_offset_restore.inc
+
+--connection node_1
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_commit_empty.test b/mysql-test/suite/galera/t/galera_commit_empty.test
new file mode 100644
index 00000000000..4e1a1e4eb2c
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_commit_empty.test
@@ -0,0 +1,35 @@
+# Test empty transactions.
+#
+# Check that the empty transaction gets terminated by starting and new
+# transaction after it. If the empty transaction is not terminated
+# appropriately, the following START TRANSACTION will fail.
+#
+# Also check that empty transactions don't generate any write sets.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+START TRANSACTION;
+COMMIT;
+
+START TRANSACTION;
+COMMIT;
+
+START TRANSACTION READ ONLY;
+COMMIT;
+
+START TRANSACTION;
+COMMIT;
+
+START TRANSACTION;
+START TRANSACTION;
+COMMIT;
+
+--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+--disable_query_log
+--eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before AS wsrep_last_committed_diff
+--enable_query_log
diff --git a/mysql-test/suite/galera/t/galera_create_table_as_select.test b/mysql-test/suite/galera/t/galera_create_table_as_select.test
new file mode 100644
index 00000000000..a6c1f657280
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_create_table_as_select.test
@@ -0,0 +1,145 @@
+#
+# CREATE TABLE AS SELECT tests
+#
+
+--source include/galera_cluster.inc
+
+--connection node_1
+SET SESSION default_storage_engine=InnoDB;
+
+# Left table already exists
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+--error ER_TABLE_EXISTS_ERROR
+CREATE TABLE t1 AS SELECT * FROM t2;
+DROP TABLE t1,t2;
+
+# Right table does not exist
+--error ER_NO_SUCH_TABLE
+CREATE TABLE t1 AS SELECT * FROM t2;
+
+# No right table at all
+CREATE TABLE t1 AS SELECT 1 FROM DUAL;
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+
+--connection node_1
+DROP TABLE t1;
+
+# Empty right table
+--connection node_1
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t1 AS SELECT * FROM t2;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+DROP TABLE t1,t2;
+
+# Right table is MyISAM
+
+CREATE TABLE t2 (f1 INTEGER) ENGINE=MyISAM;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+CREATE TABLE t1 AS SELECT * FROM t2;
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1
+DROP TABLE t1,t2;
+
+# Right side is a subquery
+
+--connection node_1
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+CREATE TABLE t1 AS SELECT MAX(f1) AS f1 FROM t2;
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+SELECT f1 = 5 FROM t1;
+
+--connection node_1
+DROP TABLE t1,t2;
+
+# Inside a stored procedure
+
+--connection node_1
+DELIMITER |;
+CREATE PROCEDURE sp1 ()
+BEGIN
+ CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+ INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+ CREATE TABLE t1 AS SELECT * FROM t2;
+END|
+DELIMITER ;|
+CALL sp1();
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1
+DROP TABLE t1, t2;
+DROP PROCEDURE sp1;
+
+# Inside a prepared statement
+
+--connection node_1
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+
+PREPARE stmt FROM 'CREATE TABLE t1 AS SELECT * FROM t2';
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+DROP TABLE t1, t2;
+
+#
+# Multi-master conflict
+#
+
+--connection node_1
+
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+LOCK TABLE t2 WRITE;
+
+--connection node_1
+--send CREATE TABLE t1 AS SELECT * FROM t2;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t2;
+CREATE TABLE t1 AS SELECT * FROM t2;
+
+--connection node_1a
+UNLOCK TABLES;
+
+--connection node_1
+--error ER_TABLE_EXISTS_ERROR,ER_LOCK_DEADLOCK
+--reap
+
+DROP TABLE t1, t2;
+
+#
+# Temporary table
+#
+
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3),(4),(5);
+
+CREATE TEMPORARY TABLE t1 AS SELECT * FROM t2;
+
+--connection node_2
+--error ER_NO_SUCH_TABLE
+SELECT * FROM t1;
+
+CALL mtr.add_suppression("Slave SQL: Error 'Unknown table 'test.t1'' on query");
+
+--connection node_1
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/galera/t/galera_defaults.test b/mysql-test/suite/galera/t/galera_defaults.test
index 3d4a7da7b54..3f8be268135 100644
--- a/mysql-test/suite/galera/t/galera_defaults.test
+++ b/mysql-test/suite/galera/t/galera_defaults.test
@@ -19,7 +19,7 @@ source ../wsrep/include/check_galera_version.inc;
# Global Variables
-SELECT COUNT(*) = 43 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_%';
+SELECT COUNT(*) `expect 48` FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_%';
SELECT VARIABLE_NAME, VARIABLE_VALUE
FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
diff --git a/mysql-test/suite/galera/t/galera_forced_binlog_format.test b/mysql-test/suite/galera/t/galera_forced_binlog_format.test
index 364f41529a4..e9d7fa1c3a3 100644
--- a/mysql-test/suite/galera/t/galera_forced_binlog_format.test
+++ b/mysql-test/suite/galera/t/galera_forced_binlog_format.test
@@ -7,7 +7,10 @@
--source include/galera_cluster.inc
--connection node_1
+SEt GLOBAL wsrep_on=OFF;
RESET MASTER;
+SEt GLOBAL wsrep_on=ON;
+FLUSH BINARY LOGS;
SET SESSION binlog_format = 'STATEMENT';
@@ -40,6 +43,6 @@ REVOKE ALL PRIVILEGES, GRANT OPTION FROM dummy@localhost;
DROP USER dummy@localhost;
DROP DATABASE testdb_9401;
---source include/galera_end.inc
+#--source include/galera_end.inc
--echo # End of tests
diff --git a/mysql-test/suite/galera/t/galera_ftwrl_drain.test b/mysql-test/suite/galera/t/galera_ftwrl_drain.test
index 690e890cdea..ee64e147f65 100644
--- a/mysql-test/suite/galera/t/galera_ftwrl_drain.test
+++ b/mysql-test/suite/galera/t/galera_ftwrl_drain.test
@@ -12,7 +12,7 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
--connection node_1
CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
@@ -55,7 +55,7 @@ SET SESSION innodb_lock_wait_timeout=1;
SET SESSION wait_timeout=1;
--error ER_LOCK_WAIT_TIMEOUT
-INSERT INTO t2 VALUES (2);
+INSERT INTO t1 VALUES (2);
--connection node_2a
UNLOCK TABLES;
diff --git a/mysql-test/suite/galera/t/galera_gcache_recover_full_gcache.test b/mysql-test/suite/galera/t/galera_gcache_recover_full_gcache.test
index b7fd9cf3aed..d728a094f10 100644
--- a/mysql-test/suite/galera/t/galera_gcache_recover_full_gcache.test
+++ b/mysql-test/suite/galera/t/galera_gcache_recover_full_gcache.test
@@ -13,6 +13,14 @@ SET SESSION wsrep_sync_wait = 0;
--source include/kill_galera.inc
--connection node_1
+
+#
+# Wait until the configuration change is over in order to avoid
+# replication error due to configuration change.
+#
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
INSERT INTO t1 (f2) VALUES (REPEAT('x', 1024 * 1024 * 10));
INSERT INTO t1 (f2) VALUES (REPEAT('x', 1024 * 1024 * 10));
INSERT INTO t1 (f2) VALUES (REPEAT('x', 1024 * 1024 * 10));
@@ -48,8 +56,8 @@ DROP TABLE t1;
CALL mtr.add_suppression("Skipped GCache ring buffer recovery");
# Confirm that IST did not take place
---let $assert_text = IST first seqno 2 not found from cache, falling back to SST
---let $assert_select = IST first seqno 2 not found from cache, falling back to SST
+--let $assert_text = IST first seqno [24] not found from cache, falling back to SST
+--let $assert_select = IST first seqno [24] not found from cache, falling back to SST
--let $assert_count = 1
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.1.err
--let $assert_only_after = starting as process
diff --git a/mysql-test/suite/galera/t/galera_gcs_fragment.test b/mysql-test/suite/galera/t/galera_gcs_fragment.test
index 80d3a5cb659..d2593fec8c8 100644
--- a/mysql-test/suite/galera/t/galera_gcs_fragment.test
+++ b/mysql-test/suite/galera/t/galera_gcs_fragment.test
@@ -1,7 +1,7 @@
# Test fragmentation over configuration changes
--source include/galera_cluster.inc
--source include/have_innodb.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
@@ -54,7 +54,7 @@ INSERT INTO t1 VALUES (2, "bbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
# Deadlock error should be returned since write set send was
# interrupted by gcs
--connection node_1
---error ER_LOCK_DEADLOCK
+--error ER_ERROR_DURING_COMMIT
--reap
# Do additional insert to verify that node_1 remain operational
diff --git a/mysql-test/suite/galera/t/galera_gtid-master.opt b/mysql-test/suite/galera/t/galera_gtid-master.opt
index 8a755e98b00..30317469ae7 100644
--- a/mysql-test/suite/galera/t/galera_gtid-master.opt
+++ b/mysql-test/suite/galera/t/galera_gtid-master.opt
@@ -1 +1 @@
---log-bin --log-slave-updates
+--log-bin --log-slave-updates --loose-new-servers-for-galera_gtid-test
diff --git a/mysql-test/suite/galera/t/galera_gtid_slave.test b/mysql-test/suite/galera/t/galera_gtid_slave.test
index 19bfd8e17db..df55ea03cb0 100644
--- a/mysql-test/suite/galera/t/galera_gtid_slave.test
+++ b/mysql-test/suite/galera/t/galera_gtid_slave.test
@@ -8,18 +8,19 @@
#
--source include/have_innodb.inc
-
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3;
--enable_query_log
START SLAVE;
---connection node_1
+--connection node_3
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
@@ -49,31 +50,43 @@ INSERT INTO t1 VALUES(2);
INSERT INTO t1 VALUES(3);
SELECT @@global.gtid_binlog_state;
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 3 FROM t1;
--source include/wait_condition.inc
INSERT INTO t1 VALUES(4);
SELECT @@global.gtid_binlog_state;
---connection node_1
+--connection node_3
DROP TABLE t1,t2;
+#
+# Unfortunately without the sleep below the following statement fails with "query returned no rows", which
+# is difficult to understand given that it is an aggregate query. A "query execution was interrupted"
+# warning is also reported by MTR, which is also weird.
+#
+
+--sleep 1
+
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
---connection node_3
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
--connection node_2
STOP SLAVE;
RESET SLAVE ALL;
+SET GLOBAL wsrep_on=OFF;
reset master;
+SET GLOBAL wsrep_on=ON;
---connection node_3
+--connection node_1
+SET GLOBAL wsrep_on=OFF;
reset master;
+SET GLOBAL wsrep_on=ON;
---connection node_1
+--connection node_3
reset master;
diff --git a/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test b/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test
index 3ed7ec1d09e..d03445d537a 100644
--- a/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test
+++ b/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test
@@ -8,21 +8,21 @@
--source include/big_test.inc
--source include/have_innodb.inc
-# As node #1 is not a Galera node, we connect to node #2 in order to run include/galera_cluster.inc
---connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--source include/galera_cluster.inc
+# As node #3 is not a Galera node, and galera_cluster.inc does not open connetion to it
+# we open the node_3 connection here
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
--echo #Connection 2
--connection node_2
--disable_query_log
---eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_1,master_use_gtid=slave_pos;
+--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$NODE_MYPORT_3,master_use_gtid=slave_pos;
--enable_query_log
START SLAVE;
--sleep 1
-
---echo #Connection 1
---connection node_1
+--echo #Connection 3
+--connection node_3
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 int unique) ENGINE=InnoDB;
INSERT INTO t2 VALUES(1,11);
INSERT INTO t2 VALUES(2,22);
@@ -30,7 +30,6 @@ INSERT INTO t2 VALUES(3,33);
SELECT @@global.gtid_binlog_state;
--source include/save_master_gtid.inc
-
--echo #Connection 2
--connection node_2
--source include/sync_with_master_gtid.inc
@@ -40,9 +39,8 @@ INSERT INTO t2 VALUES(4,44);
INSERT INTO t2 VALUES(5,55);
INSERT INTO t2 VALUES(6,66);
SELECT @@global.gtid_binlog_state;
-
---echo #Connection 3
---connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--echo #Connection 1
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME= 't2';
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 6 FROM t2;
@@ -53,8 +51,8 @@ INSERT INTO t2 VALUES(8,88);
SELECT @@global.gtid_binlog_state;
#Perform SST
---echo #Connection 1
---connection node_1
+--echo #Connection 3
+--connection node_3
CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
@@ -62,7 +60,6 @@ INSERT INTO t1 VALUES ('node1_committed_before');
INSERT INTO t1 VALUES ('node1_committed_before');
COMMIT;
--source include/save_master_gtid.inc
-
--echo #Connection 2
--connection node_2
--source include/sync_with_master_gtid.inc
@@ -71,22 +68,21 @@ START TRANSACTION;
INSERT INTO t1 VALUES ('node2_committed_before');
INSERT INTO t1 VALUES ('node2_committed_before');
COMMIT;
-
---echo #Connection 3
---connection node_3
+--echo #Connection 1
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME= 't1';
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 4 FROM t1;
--source include/wait_condition.inc
---let $node_1= node_2
---let $node_2= node_3
+--let $node_1= node_1
+--let $node_2= node_2
--source include/auto_increment_offset_save.inc
---echo Shutting down server ...
---source include/shutdown_mysqld.inc
-
-
--echo #Connection 2
--connection node_2
+--echo Shutting down server ...
+--source include/shutdown_mysqld.inc
+--echo #Connection 1
+--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
SET AUTOCOMMIT=OFF;
@@ -94,51 +90,46 @@ START TRANSACTION;
INSERT INTO t1 VALUES ('node1_committed_during');
INSERT INTO t1 VALUES ('node1_committed_during');
COMMIT;
-
---echo #Connection 3
---connection node_3
+--echo #Connection 2
+--connection node_2
--echo Starting server ...
--source include/start_mysqld.inc
+
--source include/wait_until_ready.inc
--source include/auto_increment_offset_restore.inc
SET AUTOCOMMIT=OFF;
START TRANSACTION;
-INSERT INTO t1 VALUES ('node3_committed_after');
-INSERT INTO t1 VALUES ('node3_committed_after');
+INSERT INTO t1 VALUES ('node2_committed_after');
+INSERT INTO t1 VALUES ('node2_committed_after');
COMMIT;
-
---echo #Connection 2
---connection node_2
+--echo #Connection 1
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 8 FROM t1;
--source include/wait_condition.inc
Select * from t1 order by f1;
-
---echo #Connection 3
---connection node_3
+--echo #Connection 2
+--connection node_2
Select * from t1 order by f1;
#SST Done
--sleep 1
+--echo #Connection 1
+--connection node_1
+SELECT @@global.gtid_binlog_state;
--echo #Connection 2
--connection node_2
SELECT @@global.gtid_binlog_state;
-
--echo #Connection 3
--connection node_3
-SELECT @@global.gtid_binlog_state;
-
---echo #Connection 1
---connection node_1
SET AUTOCOMMIT=ON;
#drop table t1;
#CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB;
-
--echo #Connection 2
--connection node_2
SET AUTOCOMMIT=ON;
---echo #Connection 3
---connection node_3
+--echo #Connection 1
+--connection node_1
SET AUTOCOMMIT=ON;
#
@@ -148,10 +139,9 @@ SET AUTOCOMMIT=ON;
STOP slave;
--sleep 1
INSERT INTO t1 VALUES ('node2_slave_stoped');
-
---echo #Connection 1
---connection node_1
-INSERT INTO t1 VALUES ('node1_normal_entry');
+--echo #Connection 3
+--connection node_3
+INSERT INTO t1 VALUES ('node3_normal_entry');
--source include/save_master_gtid.inc
#start slave
@@ -163,16 +153,14 @@ start slave;
INSERT INTO t1 VALUES ('node2_slave_started');
SELECT count(*) from t1;
SELECT @@global.gtid_binlog_state;
-
---echo #Connection 3
---connection node_3
+--echo #Connection 1
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 12 FROM t1;
--source include/wait_condition.inc
SELECT count(*) from t1;
SELECT @@global.gtid_binlog_state;
-
---echo #Connection 1
---connection node_1
+--echo #Connection 3
+--connection node_3
DROP TABLE t2,t1;
# Unfortunately without the sleep below the following statement fails with "query returned no rows", which
@@ -181,30 +169,31 @@ DROP TABLE t2,t1;
#
--sleep 3
-
--echo #Connection 2
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2';
--source include/wait_condition.inc
-
---echo #Connection 3
---connection node_3
+--echo #Connection 1
+--connection node_1
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
--source include/wait_condition.inc
-
--echo #Connection 2
--connection node_2
STOP SLAVE;
RESET SLAVE ALL;
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
+
--disable_warnings
set global gtid_slave_pos="";
--enable_warnings
-reset master;
-
---echo #Connection 3
---connection node_3
-reset master;
--echo #Connection 1
--connection node_1
+set global wsrep_on=OFF;
+reset master;
+set global wsrep_on=ON;
+--echo #Connection 3
+--connection node_3
reset master;
diff --git a/mysql-test/suite/galera/t/galera_ist_progress.test b/mysql-test/suite/galera/t/galera_ist_progress.test
index 3ba63415c28..dd93161eab8 100644
--- a/mysql-test/suite/galera/t/galera_ist_progress.test
+++ b/mysql-test/suite/galera/t/galera_ist_progress.test
@@ -58,8 +58,8 @@ SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.2.err
--let $assert_only_after = Need state transfer
---let $assert_text = Receiving IST: 11 writesets, seqnos
---let $assert_select = Receiving IST: 11 writesets, seqnos
+--let $assert_text = Receiving IST: 1[13] writesets
+--let $assert_select = Receiving IST: 1[13] writesets
--source include/assert_grep.inc
--let $assert_text = Receiving IST\.\.\. 0\.0% \( 0/11 events\) complete
diff --git a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
index 633318629a6..42f210170bc 100644
--- a/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
+++ b/mysql-test/suite/galera/t/galera_ist_restart_joiner.test
@@ -7,7 +7,7 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
# This could cause out of storage if run /dev/shm
--source include/big_test.inc
@@ -43,8 +43,6 @@ UPDATE t1 SET f2 = 'c' WHERE f1 > 2;
# ... and restart provider to force IST
--echo Loading wsrep_provider ...
--disable_query_log
-# base_port setting is lost for some reason when unloading provider, so we need to restore it
---eval SET GLOBAL wsrep_provider_options= 'base_port=$NODE_GALERAPORT_2';
--eval SET GLOBAL wsrep_provider = '$wsrep_provider_orig';
# Make sure IST will block ...
--let $galera_sync_point = recv_IST_after_apply_trx
diff --git a/mysql-test/suite/galera/t/galera_kill_applier.test b/mysql-test/suite/galera/t/galera_kill_applier.test
index d04b72bce0a..b66e0bcbbd0 100644
--- a/mysql-test/suite/galera/t/galera_kill_applier.test
+++ b/mysql-test/suite/galera/t/galera_kill_applier.test
@@ -6,7 +6,6 @@
--source include/have_innodb.inc
--connection node_1
---sleep 2
--let $applier_thread = `SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE IS NULL LIMIT 1`
--disable_query_log
diff --git a/mysql-test/suite/galera/t/galera_last_committed_id.test b/mysql-test/suite/galera/t/galera_last_committed_id.test
new file mode 100644
index 00000000000..550838cdcd9
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_last_committed_id.test
@@ -0,0 +1,68 @@
+#
+# Tests functions WSREP_LAST_WRITTEN_GTID and WSREP_LAST_SEEN_GTID
+#
+
+--source include/galera_cluster.inc
+
+# Returns -1 if no transactions have been run
+
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+
+--disable_query_log
+--let $seqno = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+--let $state = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_uuid'`
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$state:$seqno' AS wsrep_last_committed_id_match;
+--enable_query_log
+
+# WSREP_LAST_WRITTEN_GTID() should not be influenced by transactions committed
+# on other connections
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+--connection node_1
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+
+# WSREP_LAST_SEEN_GTID() should be influenced by transactions committed
+# on other connections
+
+--connection node_1a
+INSERT INTO t1 VALUES (1);
+--disable_query_log
+--let $wsrep_last_committed_id_conn_1a = `SELECT WSREP_LAST_SEEN_GTID()`
+--enable_query_log
+
+--connection node_1
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$wsrep_last_committed_id_conn_1a' AS wsrep_last_committed_id_match;
+--enable_query_log
+
+# Should not advance while a transaction is in progress
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+
+--disable_query_log
+--let $wsrep_last_committed_id_before = `SELECT WSREP_LAST_SEEN_GTID()`
+--enable_query_log
+INSERT INTO t1 VALUES (1);
+SELECT WSREP_LAST_WRITTEN_GTID() = '00000000-0000-0000-0000-000000000000:-1';
+--disable_query_log
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$wsrep_last_committed_id_before' AS wsrep_last_committed_id_match;
+--enable_query_log
+
+# Should only advance after the transaction has been committed
+
+COMMIT;
+--disable_query_log
+--let $seqno = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+--let $state = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_uuid'`
+--eval SELECT WSREP_LAST_WRITTEN_GTID() = '$state:$seqno' AS wsrep_last_committed_id_advanced;
+--eval SELECT WSREP_LAST_SEEN_GTID() = '$state:$seqno' AS wsrep_last_committed_id_advanced;
+--enable_query_log
+SET AUTOCOMMIT=ON;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_log_bin.test b/mysql-test/suite/galera/t/galera_log_bin.test
index 2f0faa761c5..57df53e29b1 100644
--- a/mysql-test/suite/galera/t/galera_log_bin.test
+++ b/mysql-test/suite/galera/t/galera_log_bin.test
@@ -36,4 +36,6 @@ DROP TABLE t1;
DROP TABLE t2;
--connection node_1
+SET GLOBAL wsrep_on=OFF;
RESET MASTER;
+SET GLOBAL wsrep_on=ON;
diff --git a/mysql-test/suite/galera/t/galera_migrate.cnf b/mysql-test/suite/galera/t/galera_migrate.cnf
index ed48f208e52..2e1e9f161a9 100644
--- a/mysql-test/suite/galera/t/galera_migrate.cnf
+++ b/mysql-test/suite/galera/t/galera_migrate.cnf
@@ -29,11 +29,13 @@ wsrep_sync_wait = 15
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+log-slave-updates
[mysqld.4]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+log-slave-updates
[ENV]
NODE_MYPORT_1= @mysqld.1.port
diff --git a/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test b/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test
index 08ed3fac67e..5a33c16c86e 100644
--- a/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test
+++ b/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test
@@ -32,8 +32,8 @@ INSERT INTO t2 VALUES (1);
--connection node_2a
--sleep 1
SET SESSION wsrep_sync_wait=0;
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%applied write set%';
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock';
+SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%committing%';
+SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Waiting for table metadata lock%';
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) = 0 FROM t2;
@@ -44,7 +44,7 @@ UNLOCK TABLES;
--eval SET SESSION wsrep_sync_wait = $wsrep_sync_wait_orig;
SELECT COUNT(*) = 1 FROM t1;
SELECT COUNT(*) = 1 FROM t2;
-SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'committed%';
+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%committed%';
--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_parallel_autoinc_largetrx.test b/mysql-test/suite/galera/t/galera_parallel_autoinc_largetrx.test
index 644b4687cb3..203d18b85a6 100644
--- a/mysql-test/suite/galera/t/galera_parallel_autoinc_largetrx.test
+++ b/mysql-test/suite/galera/t/galera_parallel_autoinc_largetrx.test
@@ -12,13 +12,16 @@
--source include/galera_connect.inc
--connection node_1
-CREATE TABLE ten (f1 INTEGER);
+CREATE TABLE ten (f1 INTEGER) engine=InnoDB;
INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
+
--connection node_2
--let $wsrep_slave_threads_orig = `SELECT @@wsrep_slave_threads`
SET GLOBAL wsrep_slave_threads = 4;
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
+--source include/wait_condition.inc
--connection node_1
--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
@@ -31,16 +34,18 @@ SET GLOBAL wsrep_slave_threads = 4;
--connection node_1
--reap
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(DISTINCT f1) FROM t1;
--connection node_1a
--reap
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(DISTINCT f1) FROM t1;
--connection node_2
--reap
SELECT COUNT(*) FROM t1;
SELECT COUNT(DISTINCT f1) FROM t1;
-SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE
- USER = 'system user' AND STATE NOT LIKE 'InnoDB%';
--disable_query_log
--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig;
diff --git a/mysql-test/suite/galera/t/galera_parallel_autoinc_manytrx.test b/mysql-test/suite/galera/t/galera_parallel_autoinc_manytrx.test
index 8680d62a36d..d2156cb3577 100644
--- a/mysql-test/suite/galera/t/galera_parallel_autoinc_manytrx.test
+++ b/mysql-test/suite/galera/t/galera_parallel_autoinc_manytrx.test
@@ -42,7 +42,7 @@ while ($count)
SELECT COUNT(*) = 20000 FROM t1;
SELECT COUNT(DISTINCT f1) = 20000 FROM t1;
-SELECT COUNT(*) = 4 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'committed%';
+SELECT COUNT(*) = 4 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'wsrep applier committed%';
--disable_query_log
--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig;
diff --git a/mysql-test/suite/galera/t/galera_parallel_simple.test b/mysql-test/suite/galera/t/galera_parallel_simple.test
index 2cd840123cf..51bb1355ba4 100644
--- a/mysql-test/suite/galera/t/galera_parallel_simple.test
+++ b/mysql-test/suite/galera/t/galera_parallel_simple.test
@@ -47,7 +47,7 @@ SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%';
--source include/wait_condition.inc
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'applied write set%';
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'committing%';
--source include/wait_condition.inc
UNLOCK TABLES;
diff --git a/mysql-test/suite/galera/t/galera_pc_recovery.test b/mysql-test/suite/galera/t/galera_pc_recovery.test
new file mode 100644
index 00000000000..1621414aff5
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_pc_recovery.test
@@ -0,0 +1,102 @@
+#
+# Test the pc.recovery=1 option. Killing all nodes simultaneously and
+# restarting them should succeed and the cluster should re-form.
+#
+
+--source include/have_innodb.inc
+--source include/galera_cluster.inc
+--source include/big_test.inc
+
+# Save galera ports
+--connection node_1
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
+
+--connection node_2
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+# Instruct MTR to not restart the nodes automatically when they are killed
+
+--let $NODE_1_PIDFILE = `SELECT @@pid_file`
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+--let $NODE_2_PIDFILE = `SELECT @@pid_file`
+
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--exec kill -9 `cat $NODE_1_PIDFILE` `cat $NODE_2_PIDFILE`
+
+# Perform --wsrep-recover and preserve the positions into variables by placing them in $MYSQL_TMP_DIR/galera_wsrep_start_position.inc and then --source'ing it
+
+--exec $MYSQLD --defaults-group-suffix=.1 --defaults-file=$MYSQLTEST_VARDIR/my.cnf --wsrep-recover --log-error=$MYSQL_TMP_DIR/galera_wsrep_recover.1.log > $MYSQL_TMP_DIR/galera_wsrep_recover.1.log 2>&1
+--exec $MYSQLD --defaults-group-suffix=.2 --defaults-file=$MYSQLTEST_VARDIR/my.cnf --wsrep-recover --log-error=$MYSQL_TMP_DIR/galera_wsrep_recover.2.log > $MYSQL_TMP_DIR/galera_wsrep_recover.2.log 2>&1
+
+--perl
+ use strict;
+ my $wsrep_start_position1 = `grep 'WSREP: Recovered position:' $ENV{MYSQL_TMP_DIR}/galera_wsrep_recover.1.log | sed 's/.*WSREP\:\ Recovered\ position://' | sed 's/^[ \t]*//'`;
+ chomp($wsrep_start_position1);
+
+ my $wsrep_start_position2 = `grep 'WSREP: Recovered position:' $ENV{MYSQL_TMP_DIR}/galera_wsrep_recover.2.log | sed 's/.*WSREP\:\ Recovered\ position://' | sed 's/^[ \t]*//'`;
+ chomp($wsrep_start_position2);
+
+ die if $wsrep_start_position1 eq '' || $wsrep_start_position2 eq '';
+
+ open(FILE, ">", "$ENV{MYSQL_TMP_DIR}/galera_wsrep_start_position.inc") or die;
+ print FILE "--let \$galera_wsrep_start_position1 = $wsrep_start_position1\n";
+ print FILE "--let \$galera_wsrep_start_position2 = $wsrep_start_position2\n";
+ close FILE;
+EOF
+
+--source $MYSQL_TMP_DIR/galera_wsrep_start_position.inc
+
+if ($galera_wsrep_start_position1 == '') {
+ --die "Could not obtain wsrep_start_position."
+}
+
+if ($galera_wsrep_start_position2 == '') {
+ --die "Could not obtain wsrep_start_position."
+}
+
+--remove_file $MYSQL_TMP_DIR/galera_wsrep_start_position.inc
+
+# Instruct MTR to perform the actual restart using --wsrep-start-position . Proper --wsrep_cluster_address is used as my.cnf only contains 'gcomm://' for node #1
+
+--exec echo "restart: --wsrep-start-position=$galera_wsrep_start_position1 --wsrep_cluster_address=gcomm://127.0.0.1:$NODE_GALERAPORT_1,127.0.0.1:$NODE_GALERAPORT_2" > $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--exec echo "restart: --wsrep-start-position=$galera_wsrep_start_position2 --wsrep_cluster_address=gcomm://127.0.0.1:$NODE_GALERAPORT_1,127.0.0.1:$NODE_GALERAPORT_2" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+
+--sleep 5
+--connection node_1
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+# Confirm that the cluster has re-formed and data is present
+
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+SELECT COUNT(*) = 1 FROM t1;
+
+--connection node_2
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+SELECT COUNT(*) = 1 FROM t1;
+
+DROP TABLE t1;
+
+--connection node_1
+CALL mtr.add_suppression("points to own listening address, blacklisting");
+CALL mtr.add_suppression("non weight changing install in S_PRIM");
+CALL mtr.add_suppression("No re-merged primary component found");
+
+--connection node_2
+CALL mtr.add_suppression("points to own listening address, blacklisting");
+CALL mtr.add_suppression("non weight changing install in S_PRIM");
+CALL mtr.add_suppression("No re-merged primary component found");
diff --git a/mysql-test/suite/galera/t/galera_sp_insert_parallel.test b/mysql-test/suite/galera/t/galera_sp_insert_parallel.test
new file mode 100644
index 00000000000..b6878a9c32a
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_sp_insert_parallel.test
@@ -0,0 +1,55 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+
+
+DELIMITER |;
+CREATE PROCEDURE proc_insert()
+BEGIN
+ DECLARE i INT;
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION BEGIN END;
+
+ SET i = 0;
+ WHILE i < 1000 DO
+ INSERT IGNORE INTO t1 (f1, f2)
+ VALUES (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15)),
+ (FLOOR(1 + RAND() * 65535), FLOOR(1 + RAND() * 15));
+ SET i = i + 1;
+
+ END WHILE;
+END|
+DELIMITER ;|
+
+--connection node_1
+SELECT 0;
+SET SESSION wsrep_sync_wait = 0;
+--send CALL proc_insert
+--connection node_2
+SELECT 0;
+SET SESSION wsrep_sync_wait = 0;
+--send CALL proc_insert
+
+--connection node_1
+--error 0,ER_LOCK_DEADLOCK,ER_QUERY_INTERRUPTED
+--disable_warnings
+--reap
+--enable_warnings
+SET SESSION wsrep_sync_wait = default;
+--connection node_2
+--error 0,ER_LOCK_DEADLOCK,ER_QUERY_INTERRUPTED
+--disable_warnings
+--reap
+--enable_warnings
+SET SESSION wsrep_sync_wait = default;
+
+--connection node_1
+DROP PROCEDURE proc_insert;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_split_brain.test b/mysql-test/suite/galera/t/galera_split_brain.test
index a85a2ad9b8d..91a2cc326a2 100644
--- a/mysql-test/suite/galera/t/galera_split_brain.test
+++ b/mysql-test/suite/galera/t/galera_split_brain.test
@@ -1,6 +1,8 @@
#
-# Confirm that with two nodes, killing one causes the other to stop accepting connections
-# The pc.ignore_sb=true wsrep_provider option is tested in the galera_kill_* tests.
+# Confirm that with two nodes, killing one causes the other to stop accepting
+# connections.
+# The pc.ignore_sb=true wsrep_provider option is tested in the galera_kill_*
+# tests.
#
--source include/galera_cluster.inc
diff --git a/mysql-test/suite/galera/t/galera_ssl_upgrade.test b/mysql-test/suite/galera/t/galera_ssl_upgrade.test
index eb5f79c12b4..2ce932b5398 100644
--- a/mysql-test/suite/galera/t/galera_ssl_upgrade.test
+++ b/mysql-test/suite/galera/t/galera_ssl_upgrade.test
@@ -46,4 +46,8 @@ SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_N
--source include/wait_condition.inc
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+# 5. Make sure node_2 is ready as well
+--connection node_2
+--source include/galera_wait_ready.inc
+
# Upgrade complete. Both nodes now use the new key and certificate
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf
index 336296e9bfe..be2ca0d1f09 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf
@@ -3,7 +3,7 @@
[mysqld]
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
-wsrep_debug=ON
+wsrep_debug=1
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_data_dir.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup_data_dir.cnf
index aeb2301925e..b1caf700a12 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_data_dir.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_data_dir.cnf
@@ -3,7 +3,7 @@
[mysqld]
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
-wsrep_debug=ON
+wsrep_debug=1
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_encrypt_with_key.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup_encrypt_with_key.cnf
index 646d7322b41..60db7ddba77 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_encrypt_with_key.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_encrypt_with_key.cnf
@@ -3,7 +3,7 @@
[mysqld]
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
-wsrep_debug=ON
+wsrep_debug=1
[SST]
tkey=@ENV.MYSQL_TEST_DIR/std_data/cakey.pem
diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
index 336296e9bfe..be2ca0d1f09 100644
--- a/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mariabackup_table_options.cnf
@@ -3,7 +3,7 @@
[mysqld]
wsrep_sst_method=mariabackup
wsrep_sst_auth="root:"
-wsrep_debug=ON
+wsrep_debug=1
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump.cnf b/mysql-test/suite/galera/t/galera_sst_mysqldump.cnf
index 574ae28b54a..af4fedf12ce 100644
--- a/mysql-test/suite/galera/t/galera_sst_mysqldump.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mysqldump.cnf
@@ -5,7 +5,5 @@
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
-
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
-
diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump.test b/mysql-test/suite/galera/t/galera_sst_mysqldump.test
index 835fac94a68..cce4d374a6d 100644
--- a/mysql-test/suite/galera/t/galera_sst_mysqldump.test
+++ b/mysql-test/suite/galera/t/galera_sst_mysqldump.test
@@ -1,6 +1,5 @@
--source include/big_test.inc
--source include/galera_cluster.inc
-
--source suite/galera/include/galera_sst_set_mysqldump.inc
--let $node_1=node_1
@@ -14,7 +13,6 @@
--source suite/galera/include/galera_st_shutdown_slave.inc
--source suite/galera/include/galera_st_clean_slave.inc
-
--source suite/galera/include/galera_st_kill_slave.inc
--source suite/galera/include/galera_st_kill_slave_ddl.inc
diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf b/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf
index 44e5573b3e6..ebd116a359f 100644
--- a/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf
+++ b/mysql-test/suite/galera/t/galera_sst_mysqldump_with_key.cnf
@@ -5,12 +5,16 @@
[mysqld.1]
wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
+wsrep_causal_reads=0
+wsrep_sync_wait=0
[mysqld.2]
wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
+wsrep_causal_reads=0
+wsrep_sync_wait=0
[mysqld]
-wsrep_debug=ON
+wsrep_debug=1
[client]
ssl-ca=@ENV.MYSQL_TEST_DIR/std_data/cacert.pem
diff --git a/mysql-test/suite/galera/t/galera_sync_wait_upto-master.opt b/mysql-test/suite/galera/t/galera_sync_wait_upto-master.opt
new file mode 100644
index 00000000000..0b35236062b
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_sync_wait_upto-master.opt
@@ -0,0 +1 @@
+--wsrep-sync-wait=0 --wsrep-causal-reads=OFF \ No newline at end of file
diff --git a/mysql-test/suite/galera/t/galera_sync_wait_upto.test b/mysql-test/suite/galera/t/galera_sync_wait_upto.test
new file mode 100644
index 00000000000..32c6b590c84
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_sync_wait_upto.test
@@ -0,0 +1,115 @@
+#
+# Tests the wsrep_sync_wait_upto variable.
+#
+
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+
+
+# Test with invalid values
+
+--error ER_WRONG_ARGUMENTS
+SELECT WSREP_SYNC_WAIT_UPTO_GTID(NULL);
+
+--error ER_WRONG_ARGUMENTS
+SELECT WSREP_SYNC_WAIT_UPTO_GTID('a');
+
+--error ER_WRONG_ARGUMENTS
+SELECT WSREP_SYNC_WAIT_UPTO_GTID(2);
+
+
+# If set to low value, expect no waiting
+
+--disable_query_log
+--let $seqno = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+--let $state = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_state_uuid'`
+--enable_query_log
+
+--disable_query_log
+--eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('00000000-0000-0000-0000-000000000000:-1') AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+--disable_query_log
+--eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('$state:0') AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+
+# If set to current last_committed value
+
+--disable_query_log
+--eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('$state:$seqno') AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+
+# If set to very high value, will wait
+
+--disable_query_log
+--error ER_LOCK_WAIT_TIMEOUT
+--eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('$state:9223372036854775807', 1) AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+
+# If applier is blocked, will wait
+
+--connection node_2
+SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb";
+
+
+--connection node_1
+# Perform two inserts and record the IDs of each
+INSERT INTO t1 VALUES (2);
+--let $gtid_first = `SELECT WSREP_LAST_WRITTEN_GTID()`
+
+INSERT INTO t1 VALUES (3);
+--let $gtid_second = `SELECT WSREP_LAST_WRITTEN_GTID()`
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+
+--disable_query_log
+--error ER_LOCK_WAIT_TIMEOUT
+--eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('$gtid_first', 1) AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+--disable_query_log
+--send_eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('$gtid_first') AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+# Unblock applier
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2a
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO LIKE 'SELECT WSREP_SYNC_WAIT%';
+--source include/wait_condition.inc
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+
+--connection node_2
+--reap
+
+# Confirm that we were allowed to proceed when the applier reached $seqno_first
+--let $gtid_current = `SELECT WSREP_LAST_SEEN_GTID()`
+--disable_query_log
+--eval SELECT '$gtid_current' = '$gtid_first' AS `gtid_current = gtid_first`
+--enable_query_log
+
+SET GLOBAL DEBUG_DBUG = "";
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+
+# Move forward some more, to $seqno_second;
+
+--disable_query_log
+--eval SELECT WSREP_SYNC_WAIT_UPTO_GTID('$gtid_second') AS WSREP_SYNC_WAIT_UPTO;
+--enable_query_log
+
+--let $gtid_current = `SELECT WSREP_LAST_SEEN_GTID()`
+--disable_query_log
+--eval SELECT '$gtid_current' = '$gtid_second' AS `seqno_current = seqno_second`
+--enable_query_log
+
+SET DEBUG_SYNC = "RESET";
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_toi_ddl_error.test b/mysql-test/suite/galera/t/galera_toi_ddl_error.test
index c586d97bdb5..6ee2a6e9b16 100644
--- a/mysql-test/suite/galera/t/galera_toi_ddl_error.test
+++ b/mysql-test/suite/galera/t/galera_toi_ddl_error.test
@@ -27,3 +27,8 @@ SHOW CREATE TABLE t1;
DROP TABLE t1;
DROP TABLE ten;
+
+CALL mtr.add_suppression("Ignoring error 'Duplicate entry '111110' for key 'PRIMARY'' on query.");
+
+--connection node_2
+CALL mtr.add_suppression("Ignoring error 'Duplicate entry '111110' for key 'PRIMARY'' on query."); \ No newline at end of file
diff --git a/mysql-test/suite/galera/t/galera_toi_ddl_locking.test b/mysql-test/suite/galera/t/galera_toi_ddl_locking.test
index 12c83a1f87a..22a45316306 100644
--- a/mysql-test/suite/galera/t/galera_toi_ddl_locking.test
+++ b/mysql-test/suite/galera/t/galera_toi_ddl_locking.test
@@ -11,20 +11,31 @@
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
---connection node_1
-SET DEBUG_SYNC = 'alter_table_before_open_tables WAIT_FOR continue';
---send ALTER TABLE t1 ADD COLUMN f2 INTEGER;
-
--let $galera_connection_name = node_1a
--let $galera_server_number = 1
--source include/galera_connect.inc
+SET SESSION wsrep_sync_wait = 0;
--let $galera_connection_name = node_1b
--let $galera_server_number = 1
--source include/galera_connect.inc
+# node_1c tests write to unrelated table trough a transaction
+--let $galera_connection_name = node_1c
+--let $galera_server_number = 1
+--source include/galera_connect.inc
+
+--connection node_1
+SET DEBUG_SYNC= 'RESET';
+SET DEBUG_SYNC = 'alter_table_before_open_tables SIGNAL before_open_tables WAIT_FOR continue';
+--send ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+
--connection node_1a
-SET SESSION wsrep_sync_wait = 0;
+SET DEBUG_SYNC= 'now WAIT_FOR before_open_tables';
+
+# if we would retry the insert, it would fail for wrong column count
+# on second try
+SET wsrep_retry_autocommit=0;
# Allowed
SELECT COUNT(*) = 0 FROM t1;
@@ -32,45 +43,56 @@ SELECT COUNT(*) = 0 FROM t1;
# Allowed
SELECT COUNT(*) = 0 FROM t2;
-# Not allowed
---error ER_LOCK_DEADLOCK,ER_ERROR_DURING_COMMIT
-INSERT INTO t1 VALUES (1);
+# Not allowed, this will hang because of ALTER
+--send INSERT INTO t1 VALUES (1);
+
+--connection node_1c
+SET SESSION wsrep_sync_wait = 0;
SET AUTOCOMMIT=OFF;
START TRANSACTION;
# Allowed
+SELECT COUNT(*) = 0 FROM t1;
+
+# Allowed
+SELECT COUNT(*) = 0 FROM t2;
+
+# Allowed (until commit)
INSERT INTO t2 VALUES (1);
# Hangs
--send COMMIT;
---sleep 1
--connection node_1b
SET SESSION wsrep_sync_wait = 0;
# The Commit issued above is still not done
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'Commit';
+--sleep 1
SELECT COUNT(*) = 0 FROM t2;
+
+SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE INFO = 'Commit';
+
SET DEBUG_SYNC= 'now SIGNAL continue';
--connection node_1a
+--error ER_LOCK_DEADLOCK
+--reap
+
+--connection node_1c
+# this will succeeed, becaus the query will be replayed
--reap
--connection node_1
--reap
+
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) = 1 FROM t2;
+SET debug_sync='RESET';
--connection node_2
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) = 1 FROM t2;
---connection node_1
-SET DEBUG_SYNC= 'RESET';
-
---connection node_1b
-SET DEBUG_SYNC= 'RESET';
-
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera/t/galera_transaction_replay.test b/mysql-test/suite/galera/t/galera_transaction_replay.test
index 29870829ba3..655714f26c9 100644
--- a/mysql-test/suite/galera/t/galera_transaction_replay.test
+++ b/mysql-test/suite/galera/t/galera_transaction_replay.test
@@ -1,12 +1,25 @@
#
-# This test tests the operation of transaction replay. If a potentially conflicting remote transaction arrives at
-# just the right time during the commit of a local transaction, the local transaction will be aborted and replayed.
+# This test tests the operation of transaction replay. If a potentially
+# conflicting remote transaction arrives at just the right time during
+# the commit of a local transaction, the local transaction will be aborted
+# and replayed.
+#
+# Because the write set with higher sequence number cannot BF abort
+# the victim with lower sequence number, the conflicting remote transaction
+# must be executed first and must be allowed to proceed up to the apply
+# monitor before sending the COMMIT for the transaction to be replayed.
#
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+######################################################################
+#
+# Scenario #1, the victim will have higher seqno and will be replayed
+#
+######################################################################
--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
@@ -21,34 +34,123 @@ START TRANSACTION;
UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
-# Block the commit
+# Block the applier on node #1 and issue a conflicting update on node #2
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
---let $galera_sync_point = commit_monitor_enter_sync
+SET SESSION wsrep_sync_wait=0;
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+
+--connection node_1a
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# Block the commit, send the COMMIT and wait until it gets blocked
+
+--let $galera_sync_point = commit_monitor_master_enter_sync
--source include/galera_set_sync_point.inc
--connection node_1
---send COMMIT;
+--send COMMIT
-# Wait until commit is blocked
--connection node_1a
-SET SESSION wsrep_sync_wait = 0;
+
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# Let the conflicting UPDATE proceed and wait until it hits abort_trx_end.
+# The victim transaction still sits in commit_monitor_master_sync_point.
+
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = abort_trx_end commit_monitor_master_enter_sync
--source include/galera_wait_sync_point.inc
-# Issue a conflicting update on node #2
+# Let the transactions proceed
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_signal_sync_point.inc
+
+# Commit succeeds
+--connection node_1
+--reap
+
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+
+# wsrep_local_replays has increased by 1
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays;
+--enable_query_log
+
--connection node_2
-UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+
+DROP TABLE t1;
+
+#########################################################################
+#
+# Scenario #2, the victim will have lower seqno and will not be replayed
+#
+#########################################################################
+
+--connection node_1
+
+--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+
+--connection node_1
+SET AUTOCOMMIT=ON;
+START TRANSACTION;
+
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
+
+# Block the commit, send the COMMIT and wait until it gets blocked
+
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+--send COMMIT
-# Wait for both transactions to be blocked
--connection node_1a
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event::find_row%';
---source include/wait_condition.inc
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT';
---source include/wait_condition.inc
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+
+# Set sync point at the end of BF abort, issue a conflicting update
+# and wait for the conflicting update to hit the sync point.
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
-# Unblock the commit
--connection node_1a
+--let $galera_sync_point = abort_trx_end commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+# Let the transactions proceed
--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = commit_monitor_master_enter_sync
--source include/galera_signal_sync_point.inc
# Commit succeeds
@@ -58,10 +160,10 @@ UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
-# wsrep_local_replays has increased by 1
+# wsrep_local_replays should have not increased
--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
--disable_query_log
---eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays;
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 0 AS wsrep_local_replays;
--enable_query_log
--connection node_2
@@ -70,55 +172,84 @@ SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
DROP TABLE t1;
-#echo "# test for PS replaying"
+########################################
#
# test replaying of prepared statements
#
+########################################
+
--connection node_1
+
+--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
CREATE TABLE t1 (i int primary key, j int) ENGINE=INNODB;
INSERT INTO t1 VALUES (1, 0), (3, 0);
SELECT * FROM t1;
-
+SET AUTOCOMMIT=ON;
PREPARE stmt1 FROM "UPDATE t1 SET j = 1 where i > 0";
-# block the commit of PS
+
+# Block the applier on node #1 and issue a conflicting update on node #2
--connection node_1a
---let $galera_sync_point = commit_monitor_enter_sync
+SET SESSION wsrep_sync_wait=0;
+--let $galera_sync_point = apply_monitor_slave_enter_sync
--source include/galera_set_sync_point.inc
---connection node_1
---send EXECUTE stmt1;
+--connection node_2
+INSERT INTO t1 VALUES(2,2);
-# Wait until commit is blocked
--connection node_1a
-SET SESSION wsrep_sync_wait = 0;
--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
-# Issue a conflicting update on node_2
---connection node_2
-#UPDATE t1 SET j=2;
-INSERT INTO t1 VALUES(2,2);
+# Block the commit, send the EXECUTE stmt1 and wait until it gets blocked
+--let $galera_sync_point = commit_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
-# Wait until applying begins in node_1
---connection node_1a
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Write_rows_log_event::write_row%';
---source include/wait_condition.inc
+--connection node_1
+SET SESSION wsrep_sync_wait=0;
+--send EXECUTE stmt1
-# Unblock the PS commit
--connection node_1a
+
+--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
--source include/galera_clear_sync_point.inc
+
+# Let the conflicting INSERT proceed and wait until it hits abort_trx_end.
+# The victim transaction still sits in commit_monitor_master_sync_point.
+
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = abort_trx_end commit_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+
+# Let the transactions proceed
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = commit_monitor_master_enter_sync
--source include/galera_signal_sync_point.inc
# Commit succeeds
--connection node_1
--reap
+SET SESSION wsrep_sync_wait=7;
SELECT * FROM t1;
--connection node_2
SELECT * FROM t1;
--connection node_1
+# wsrep_local_replays has increased by 1
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays;
+--enable_query_log
+
DEALLOCATE PREPARE stmt1;
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_var_cluster_address.test b/mysql-test/suite/galera/t/galera_var_cluster_address.test
index 6d99d35cdac..85ae73ffda4 100644
--- a/mysql-test/suite/galera/t/galera_var_cluster_address.test
+++ b/mysql-test/suite/galera/t/galera_var_cluster_address.test
@@ -19,8 +19,8 @@
SET GLOBAL wsrep_cluster_address = 'foo://';
# With wsrep_sync_wait, this returns an error
-#--error ER_LOCK_WAIT_TIMEOUT
-#SHOW STATUS;
+--error ER_LOCK_WAIT_TIMEOUT
+SHOW STATUS;
SET SESSION wsrep_sync_wait=0;
@@ -29,7 +29,7 @@ SELECT COUNT(*) > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS;
# Must return 'OFF'
SHOW STATUS LIKE 'wsrep_ready';
-# Must return 'Non-primary'
+# Must return 'Disconnected'
SHOW STATUS LIKE 'wsrep_cluster_status';
# Must return 0 = 'Initialized'
@@ -49,10 +49,9 @@ SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VA
--connection node_2
--disable_query_log
--eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_node2';
+--source include/galera_wait_ready.inc
--enable_query_log
---source include/wait_until_connected_again.inc
-
--connection node_1
SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
@@ -63,7 +62,7 @@ CALL mtr.add_suppression("Failed to initialize backend using 'foo");
CALL mtr.add_suppression("Failed to open channel 'my_wsrep_cluster' at 'foo");
CALL mtr.add_suppression("gcs connect failed: Socket type not supported");
CALL mtr.add_suppression("wsrep::connect\\(\\) failed: 7");
-CALL mtr.add_suppression("gcs_caused\\(\\) returned -103 \\(Software caused connection abort\\)");
+CALL mtr.add_suppression("gcs_caused\\(\\) returned -[0-9]+ \\(Software caused connection abort\\)");
CALL mtr.add_suppression("failed to open gcomm backend connection: 110: failed to reach primary view: 110");
CALL mtr.add_suppression("Failed to open backend connection: -110 \\(Connection timed out\\)");
CALL mtr.add_suppression("gcs connect failed: Connection timed out");
diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test
index 3e2108868af..859642a6fdf 100644
--- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test
+++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test
@@ -27,7 +27,7 @@ SET SESSION wsrep_sync_wait=0;
# Must return 'OFF'
SHOW STATUS LIKE 'wsrep_ready';
-# Must return 'Non-primary'
+# Must return 'Disconnected'
SHOW STATUS LIKE 'wsrep_cluster_status';
--error ER_UNKNOWN_COM_ERROR
@@ -48,6 +48,7 @@ SET @@session.wsrep_dirty_reads=OFF;
--error ER_UNKNOWN_COM_ERROR
SELECT i, variable_name, variable_value FROM t1, information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads" AND i = 1;
+
SELECT 1;
USE information_schema;
@@ -60,6 +61,7 @@ SELECT COUNT(*) >= 10 FROM performance_schema.events_statements_history;
--eval SET @@global.wsrep_cluster_address = '$wsrep_cluster_address_saved'
--enable_query_log
--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
--connection node_1
USE test;
@@ -67,8 +69,7 @@ SELECT * FROM t1;
# Cleanup
DROP TABLE t1;
+# Restore original auto_increment_offset values.
--source include/auto_increment_offset_restore.inc
--source include/galera_end.inc
---echo # End of test
-
diff --git a/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test b/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test
new file mode 100644
index 00000000000..ddf561c8784
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test
@@ -0,0 +1,235 @@
+#
+# Test option wsrep_ignore_apply_errors
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+
+#
+# Ignore reconciling DDL errors on node_2
+#
+
+--connection node_2
+SET GLOBAL wsrep_ignore_apply_errors = 1;
+
+# Drop table that does not exist
+--connection node_1
+SET GLOBAL wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = ON;
+DROP TABLE t1;
+
+# Drop schema that does not exist
+SET GLOBAL wsrep_on = OFF;
+CREATE SCHEMA s1;
+SET GLOBAL wsrep_on = ON;
+DROP SCHEMA s1;
+
+# Drop index that does not exist using DROP INDEX
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+CREATE INDEX idx1 ON t1 (f1);
+SET GLOBAL wsrep_on = ON;
+DROP INDEX idx1 ON t1;
+DROP TABLE t1;
+
+# Drop index that does not exist using ALTER TABLE
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+CREATE INDEX idx1 ON t1 (f1);
+SET GLOBAL wsrep_on = ON;
+ALTER TABLE t1 DROP INDEX idx1;
+DROP TABLE t1;
+
+# Drop column that does not exist
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+SET GLOBAL wsrep_on = ON;
+ALTER TABLE t1 DROP COLUMN f2;
+DROP TABLE t1;
+
+
+#
+# Ignore reconciling DML errors on node_2
+#
+
+--connection node_2
+SET GLOBAL wsrep_ignore_apply_errors = 2;
+
+# Delete row that does not exist
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_on = ON;
+DELETE FROM t1 WHERE f1 = 1;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM t1;
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+
+DROP TABLE t1;
+
+# Delete row that does not exist in a multi statement transaction
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER);
+INSERT INTO t1 VALUES (2);
+SET GLOBAL wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_on = ON;
+START TRANSACTION;
+INSERT INTO t1 VALUES (3);
+DELETE FROM t1 WHERE f1 = 1;
+DELETE FROM t1 WHERE f1 = 2;
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 1 FROM t1;
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+
+DROP TABLE t1;
+
+#
+# Multi-row delete where only one row does not exist
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 5 FROM t1;
+--source include/wait_condition.inc
+
+SET SESSION wsrep_on = OFF;
+DELETE FROM t1 WHERE f1 = 3;
+SET SESSION wsrep_on = ON;
+--connection node_1
+DELETE FROM t1;
+
+SELECT COUNT(*) = 0 FROM t1;
+--connection node_2
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+SELECT COUNT(*) = 0 FROM t1;
+DROP TABLE t1;
+
+#
+# Multi-statement delete where only one row does not exist
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5);
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 5 FROM t1;
+--source include/wait_condition.inc
+
+SET SESSION wsrep_on = OFF;
+DELETE FROM t1 WHERE f1 = 3;
+SET SESSION wsrep_on = ON;
+--connection node_1
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+DELETE FROM t1 WHERE f1 = 1;
+DELETE FROM t1 WHERE f1 = 2;
+DELETE FROM t1 WHERE f1 = 3;
+DELETE FROM t1 WHERE f1 = 4;
+DELETE FROM t1 WHERE f1 = 5;
+COMMIT;
+SET AUTOCOMMIT=ON;
+
+SELECT COUNT(*) = 0 FROM t1;
+--connection node_2
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+SELECT COUNT(*) = 0 FROM t1;
+DROP TABLE t1;
+
+#
+# Multi-table delete
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3);
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 3 FROM t2;
+--source include/wait_condition.inc
+
+SET SESSION wsrep_on = OFF;
+DELETE FROM t2 WHERE f1 = 2;
+DELETE FROM t1 WHERE f1 = 3;
+SET SESSION wsrep_on = ON;
+
+--connection node_1
+DELETE t1, t2 FROM t1 JOIN t2 WHERE t1.f1 = t2.f1;
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_2
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+SELECT COUNT(*) = 0 FROM t1;
+DROP TABLE t1,t2;
+
+#
+# Foreign keys
+#
+
+--connection node_1
+CREATE TABLE parent (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
+INSERT INTO parent VALUES (1),(2),(3);
+CREATE TABLE child (id INT, parent_id INT, INDEX par_ind (parent_id), FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE CASCADE) ENGINE=INNODB;
+INSERT INTO child VALUES (1,1),(2,2),(3,3);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 3 FROM child;
+--source include/wait_condition.inc
+
+SET SESSION wsrep_on = OFF;
+DELETE FROM child WHERE parent_id = 2;
+SET SESSION wsrep_on = ON;
+
+--connection node_1
+DELETE FROM parent;
+SELECT COUNT(*) = 0 FROM parent;
+SELECT COUNT(*) = 0 FROM child;
+
+--connection node_2
+SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+SELECT COUNT(*) = 0 FROM parent;
+SELECT COUNT(*) = 0 FROM child;
+DROP TABLE child, parent;
+
+#
+# Ignore all DDL errors on node_2
+#
+
+--connection node_2
+SET GLOBAL wsrep_ignore_apply_errors = 4;
+
+# Create a table that already exists
+--connection node_2
+SET GLOBAL wsrep_on = OFF;
+CREATE TABLE t1 (f1 INTEGER);
+SET GLOBAL wsrep_on = ON;
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER, f2 INTEGER);
+DROP TABLE t1;
+
+
+--connection node_2
+SET GLOBAL wsrep_ignore_apply_errors = 7;
+
+CALL mtr.add_suppression("Can't find record in 't.*'");
+CALL mtr.add_suppression("Slave SQL: Could not execute Delete_rows event");
+CALL mtr.add_suppression("Slave SQL: Error 'Unknown table 'test.t1'' on query. Default database: 'test'. Query: 'DROP TABLE t1', Error_code: 1051");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't drop database 's1'; database doesn't exist' on query. Default database: 'test'. Query: 'DROP SCHEMA s1', Error_code: 1008");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'idx1'; check that column/key exists' on query. Default database: 'test'. Query: 'DROP INDEX idx1 ON t1', Error_code: 1091");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'idx1'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t1 DROP INDEX idx1', Error_code: 1091");
+CALL mtr.add_suppression("Slave SQL: Error 'Can't DROP 'f2'; check that column/key exists' on query. Default database: 'test'. Query: 'ALTER TABLE t1 DROP COLUMN f2', Error_code: 1091");
+CALL mtr.add_suppression("Slave SQL: Error 'Table 't1' already exists' on query.");
diff --git a/mysql-test/suite/galera/t/galera_var_innodb_disallow_writes.test b/mysql-test/suite/galera/t/galera_var_innodb_disallow_writes.test
index 65b55435a9e..9ccecc5a61d 100644
--- a/mysql-test/suite/galera/t/galera_var_innodb_disallow_writes.test
+++ b/mysql-test/suite/galera/t/galera_var_innodb_disallow_writes.test
@@ -1,9 +1,18 @@
#
# This test checks that innodb_disallow_writes works as expected
#
+# Note that we need to enable binlog for this test: If the commit
+# to InnoDB is done in one phase, the transaction is committed in
+# memory before it is persisted to disk. This means that the
+# innodb_disallow_writes=ON may not prevent transaction to
+# become visible to other readers. On the other hand, if the
+# commit is two phase (as it is with binlog), the transaction
+# will be blocked in prepare phase.
+#
--source include/galera_cluster.inc
--source include/have_innodb.inc
+--source include/have_log_bin.inc
# Open a separate connection to be used to run SHOW PROCESSLIST
--let $galera_connection_name = node_1a
diff --git a/mysql-test/suite/galera/t/galera_var_load_data_splitting.test b/mysql-test/suite/galera/t/galera_var_load_data_splitting.test
index 38dab0a981b..621cb69fc16 100644
--- a/mysql-test/suite/galera/t/galera_var_load_data_splitting.test
+++ b/mysql-test/suite/galera/t/galera_var_load_data_splitting.test
@@ -36,4 +36,7 @@ SELECT COUNT(*) = 95000 FROM t1;
--connection node_1
--eval SET GLOBAL wsrep_load_data_splitting = $wsrep_load_data_splitting_orig;
+--connection node_2
+--eval SET GLOBAL wsrep_load_data_splitting = $wsrep_load_data_splitting_orig;
+
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_var_log_bin.cnf b/mysql-test/suite/galera/t/galera_var_log_bin.cnf
index f7f17e3720a..30ccee2024e 100644
--- a/mysql-test/suite/galera/t/galera_var_log_bin.cnf
+++ b/mysql-test/suite/galera/t/galera_var_log_bin.cnf
@@ -3,3 +3,8 @@
[mysqld]
log-bin
+[mysqld.1]
+log-slave-updates
+
+[mysqld.2]
+log-slave-updates
diff --git a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test
index 142f02546b4..df541b774a4 100644
--- a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test
+++ b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test
@@ -16,11 +16,11 @@
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 0;
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue';
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue';
--send INSERT INTO t1 (f1) VALUES (2)
--connection node_1a
-SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
TRUNCATE TABLE t1;
@@ -42,11 +42,11 @@ DROP TABLE t1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 1;
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue';
---send INSERT INTO t1 (f1) VALUES (2)
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue';
+--send INSERT INTO t1 (f1) VALUES (3)
--connection node_1a
-SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
TRUNCATE TABLE t1;
@@ -68,12 +68,12 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 1;
SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 2';
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue EXECUTE 2';
---send INSERT INTO t1 VALUES (2);
+--send INSERT INTO t1 VALUES (4);
--connection node_1a
-SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
TRUNCATE TABLE t1;
@@ -81,7 +81,7 @@ TRUNCATE TABLE t1;
--connection node_1a
SET DEBUG_SYNC = 'now WAIT_FOR wsrep_retry_autocommit_reached';
SELECT COUNT(*) = 0 FROM t1;
-SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue WAIT_FOR before_rep';
+SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue WAIT_FOR before_cert';
--connection node_2
TRUNCATE TABLE t1;
@@ -107,9 +107,9 @@ CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
SET SESSION wsrep_retry_autocommit = 64;
SET GLOBAL debug_dbug = '+d,sync.wsrep_retry_autocommit';
-SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue EXECUTE 64';
+SET DEBUG_SYNC = 'wsrep_before_certification SIGNAL before_cert WAIT_FOR continue EXECUTE 64';
---send INSERT INTO t1 VALUES (2)
+--send INSERT INTO t1 VALUES (5)
--disable_query_log
--disable_result_log
@@ -117,7 +117,7 @@ SET DEBUG_SYNC = 'wsrep_before_replication SIGNAL before_rep WAIT_FOR continue E
while ($count)
{
--connection node_1a
- SET DEBUG_SYNC = 'now WAIT_FOR before_rep';
+ SET DEBUG_SYNC = 'now WAIT_FOR before_cert';
--connection node_2
TRUNCATE TABLE t1;
diff --git a/mysql-test/suite/galera/t/galera_var_slave_threads.cnf b/mysql-test/suite/galera/t/galera_var_slave_threads.cnf
new file mode 100644
index 00000000000..889c81b4a0a
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_var_slave_threads.cnf
@@ -0,0 +1,7 @@
+!include ../galera_2nodes.cnf
+
+[mysqld.1]
+auto_increment_offset=1
+
+[mysqld.2]
+auto_increment_offset=2
diff --git a/mysql-test/suite/galera/t/galera_var_slave_threads.test b/mysql-test/suite/galera/t/galera_var_slave_threads.test
index 80edcb2aff9..12d8006db4b 100644
--- a/mysql-test/suite/galera/t/galera_var_slave_threads.test
+++ b/mysql-test/suite/galera/t/galera_var_slave_threads.test
@@ -8,6 +8,11 @@
--source include/have_innodb.inc
--let $wsrep_slave_threads_orig = `SELECT @@wsrep_slave_threads`
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source include/auto_increment_offset_save.inc
+
--connection node_1
CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB;
CREATE TABLE t2 (f1 INT AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB;
@@ -21,6 +26,7 @@ SELECT @@wsrep_slave_threads = 1;
SET GLOBAL wsrep_slave_threads = 1;
# There is a separate wsrep_aborter thread at all times
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND COMMAND != 'Daemon';
SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
#
@@ -33,13 +39,15 @@ SET GLOBAL wsrep_slave_threads = 64;
INSERT INTO t1 VALUES (1);
--connection node_2
---let $wait_timeout=600
---let $wait_condition = SELECT COUNT(*) = 1 FROM t1;
---source include/wait_condition.inc
+SELECT COUNT(*) FROM t1;
-SELECT COUNT(*) = 1 FROM t1;
+#
+# note, in wsrep API #26, we have 2 rollbacker threads, counted as system user's
+#
---let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%');
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
+
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
--source include/wait_condition.inc
#
@@ -65,49 +73,122 @@ while ($count)
--connection node_2
SELECT COUNT(*) FROM t2;
---let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%')
---source include/wait_condition.inc
SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
+--source include/wait_condition.inc
---eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig
+--let $wsrep_cluster_address_node2 = `SELECT @@wsrep_cluster_address`
+--let $wsrep_provider_node2 = `SELECT @@wsrep_provider`
-DROP TABLE t1;
-DROP TABLE t2;
+SET GLOBAL wsrep_slave_threads = 5;
---echo #
---echo # lp:1372840 - Changing wsrep_slave_threads causes future connections to hang
---echo #
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
+--source include/wait_condition.inc
---connection node_1
-CREATE TABLE t1 (i INT AUTO_INCREMENT PRIMARY KEY) ENGINE=INNODB;
+SET GLOBAL wsrep_slave_threads = 1;
+#
+# test phase for bug https://github.com/codership/mysql-wsrep/issues/319
+#
+
+# shutdown node 2
--connection node_2
-SET GLOBAL wsrep_slave_threads = 4;
---let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%')
+--echo Shutting down server ...
+--source include/shutdown_mysqld.inc
+
+# wait until node_1 is ready as one node cluster
+--connection node_1
+
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
--source include/wait_condition.inc
+show status like 'wsrep_cluster_size';
+# step up slave threads to 6, and make sure all appliers and rollbacker thread are running
+SET GLOBAL wsrep_slave_threads = 6;
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
+--source include/wait_condition.inc
+
+# change to invalid cluster address
SET GLOBAL wsrep_slave_threads = 1;
+SET GLOBAL wsrep_cluster_address='';
---connection node_1
-INSERT INTO t1 VALUES (DEFAULT);
-INSERT INTO t1 VALUES (DEFAULT);
-INSERT INTO t1 VALUES (DEFAULT);
-DROP TABLE t1;
+# join back to single node cluster
+SET GLOBAL wsrep_cluster_address='gcomm://';
+--source include/wait_until_connected_again.inc
---connection node_2
+# we should have 1 applier thread now
+--let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND COMMAND != 'Daemon'
+--source include/wait_condition.inc
-# Wait until above DDL is replicated
---let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t%';
+# test if we can increase applier count now (fails in bug #319)
+SET GLOBAL wsrep_slave_threads = 10;
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
--source include/wait_condition.inc
-SELECT NAME FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t%';
+# restart node 2
+--connection node_2
+--source include/start_mysqld.inc
+--source include/wait_until_connected_again.inc
+
+SELECT COUNT(*) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND COMMAND != 'Daemon';
+#
#
-# make sure that we are left with exactly one applier thread before we leaving the test
+# cleanup to original state
#
---let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND (STATE IS NULL OR STATE NOT LIKE 'InnoDB%')
+--connection node_1
+--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig
+
+--connection node_2
+--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig
+
+
+--disable_result_log
+--disable_query_log
+# Generate 64 replication events, to help node 1 to purge excessive applier threads
+--let $count = 64
+while ($count)
+{
+ INSERT INTO t2 VALUES (DEFAULT);
+ --dec $count
+}
+--enable_query_log
+--enable_result_log
+
+--connection node_1
+--disable_result_log
+--disable_query_log
+# Generate 64 replication events, to help node 2 to purge excessive applier threads
+--let $count = 64
+while ($count)
+{
+ INSERT INTO t2 VALUES (DEFAULT);
+ --dec $count
+}
+--enable_query_log
+--enable_result_log
+
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
--source include/wait_condition.inc
-SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%';
+
+
+--connection node_2
+--let $wait_condition = SELECT VARIABLE_VALUE = @@wsrep_slave_threads + 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_thread_count';
+--source include/wait_condition.inc
+
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM t2;
+
+--connection node_1
+
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM t2;
+
+DROP TABLE t1;
+DROP TABLE t2;
+
+# Restore original auto_increment_offset values.
+--source include/auto_increment_offset_restore.inc
--echo # End of tests
diff --git a/mysql-test/suite/galera/t/galera_vote_drop_temporary-master.opt b/mysql-test/suite/galera/t/galera_vote_drop_temporary-master.opt
new file mode 100644
index 00000000000..beae84b3862
--- /dev/null
+++ b/mysql-test/suite/galera/t/galera_vote_drop_temporary-master.opt
@@ -0,0 +1 @@
+--log-bin
diff --git a/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test b/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test
index 6ba8ce786c8..28025363019 100644
--- a/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test
+++ b/mysql-test/suite/galera/t/galera_wsrep_new_cluster.test
@@ -5,7 +5,6 @@
#
--source include/galera_cluster.inc
---source include/have_innodb.inc
SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_connected';
diff --git a/mysql-test/suite/galera/t/mdev_18730.test b/mysql-test/suite/galera/t/mdev_18730.test
new file mode 100644
index 00000000000..e32bae68ce9
--- /dev/null
+++ b/mysql-test/suite/galera/t/mdev_18730.test
@@ -0,0 +1,71 @@
+#
+# Test scenario:
+#
+# Run an autocommit INSERT and stop the execution after the INSERT
+# has released commit order critical section. On another connection,
+# run SR transaction which will store one fragment into streaming log.
+# If the bug is present, the fragment streaming log commit may
+# out of order, and completing INSERT may cause assertion in debug build.
+#
+# Note that due to nature of this bug, it is may not be possible
+# to construct fully deterministic test case which will crash the
+# server each time if the bug is present, but will work with fix.
+#
+
+--source include/galera_cluster.inc
+--source include/have_log_bin.inc
+--source include/have_debug_sync.inc
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+
+# Control connection for controlling node_1 debug sync points
+--let $galera_connection_name = ctrl
+--let $galera_server_number = 1
+--source include/galera_connect.inc
+
+# Another connection for SR transaction
+--let $galera_connection_name = node_1_sr
+--let $galera_server_number = 1
+--source include/galera_connect.inc
+
+# Set up sync point and send INSERT
+--connection node_1
+SET DEBUG_SYNC = "wsrep_after_commit_order_leave SIGNAL acol_reached WAIT_FOR acol_continue";
+--send INSERT INTO t1 VALUES (1)
+
+# Wait until INSERT releases commit order
+--connection ctrl
+SET DEBUG_SYNC = "now WAIT_FOR acol_reached";
+
+# Streaming transaction, will replicate fragment for each row separately.
+--connection node_1_sr
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_unit = 'rows';
+SET SESSION wsrep_trx_fragment_size = 1;
+
+START TRANSACTION;
+--send INSERT INTO t1 VALUES (2)
+
+--connection ctrl
+# Now let the thread node_1 continue after a one second sleep.
+# The sleep while not completely deterministic, will allow the SR
+# insert to complete the commit out of order in most of the cases if
+# the bug is present, leading to assertion in debug build.
+--sleep 1
+SET DEBUG_SYNC = "now SIGNAL acol_continue";
+
+--connection node_1
+--reap
+--connection node_1_sr
+--reap
+ROLLBACK;
+
+--connection ctrl
+SET DEBUG_SYNC = "RESET";
+--disconnect ctrl
+--disconnect node_1_sr
+
+--connection node_1
+DROP TABLE t1;
+
+--source include/galera_end.inc
diff --git a/mysql-test/suite/galera/t/mysql-wsrep#198-master.opt b/mysql-test/suite/galera/t/mysql-wsrep#198-master.opt
new file mode 100644
index 00000000000..beae84b3862
--- /dev/null
+++ b/mysql-test/suite/galera/t/mysql-wsrep#198-master.opt
@@ -0,0 +1 @@
+--log-bin
diff --git a/mysql-test/suite/galera/t/mysql-wsrep#237.test b/mysql-test/suite/galera/t/mysql-wsrep#237.test
index cba8dfeb275..174266bdbc5 100644
--- a/mysql-test/suite/galera/t/mysql-wsrep#237.test
+++ b/mysql-test/suite/galera/t/mysql-wsrep#237.test
@@ -7,13 +7,13 @@
CREATE TABLE t (f1 INTEGER PRIMARY KEY) Engine=InnoDB;
--connection node_1
-SET DEBUG_SYNC = 'wsrep_before_replication WAIT_FOR continue';
+SET DEBUG_SYNC = 'wsrep_before_certification WAIT_FOR continue';
--send INSERT INTO t values (1);
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1a
SET SESSION wsrep_sync_wait = 0;
---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'debug sync point: wsrep_before_replication'
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'debug sync point: wsrep_before_certification'
--source include/wait_condition.inc
--connection node_2
diff --git a/mysql-test/suite/galera/t/mysql-wsrep#332.test b/mysql-test/suite/galera/t/mysql-wsrep#332.test
index 2da01ba900e..674a5c3de52 100644
--- a/mysql-test/suite/galera/t/mysql-wsrep#332.test
+++ b/mysql-test/suite/galera/t/mysql-wsrep#332.test
@@ -1,7 +1,7 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
# Open connection node_1a here, MW-369.inc will use it later
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
diff --git a/mysql-test/suite/galera/t/partition.test b/mysql-test/suite/galera/t/partition.test
index 0159ef8f607..13e09a4e3e6 100644
--- a/mysql-test/suite/galera/t/partition.test
+++ b/mysql-test/suite/galera/t/partition.test
@@ -134,9 +134,12 @@ CREATE TABLE t1 (pk INT PRIMARY KEY)
SELECT COUNT(*) FROM t1;
-# LOAD-ing 20002 rows causes 3 commits to be registered
+# LOAD-ing 20002 rows causes
+# 3 commits to be registered when the Galera library does not support streaming replication and
+# 5 commits to be registered when the Galera library supports streaming replication
--disable_query_log
---eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 3 AS wsrep_last_committed_diff;
+--replace_result 3 AS_EXPECTED_3_or_5 5 AS_EXPECTED_3_or_5
+--eval SELECT $wsrep_last_committed_after - $wsrep_last_committed_before AS wsrep_last_committed_diff;
--enable_query_log
DROP TABLE t1;
@@ -167,7 +170,7 @@ SELECT COUNT(*) FROM t1;
# LOAD-ing 101 rows causes 1 commit to be registered
--disable_query_log
---eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 1 AS wsrep_last_committed_diff;
+--eval SELECT $wsrep_last_committed_after - $wsrep_last_committed_before AS wsrep_last_committed_diff;
--enable_query_log
DROP TABLE t1;
@@ -196,9 +199,12 @@ CREATE TABLE t1 (pk INT PRIMARY KEY)
--source include/wait_condition.inc
SELECT COUNT(*) FROM t1;
-# LOAD-ing 20002 rows causes 1 commit to be registered
+# LOAD-ing 20002 rows causes
+# 1 commit to be registered when the Galera library does not support streaming replication and
+# 2 commits to be registered when the Galera library supports streaming replication
--disable_query_log
---eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 1 AS wsrep_last_committed_diff;
+--replace_result 1 AS_EXPECTED_1_or_2 2 AS_EXPECTED_1_or_2
+--eval SELECT $wsrep_last_committed_after - $wsrep_last_committed_before AS wsrep_last_committed_diff;
--enable_query_log
DROP TABLE t1;
diff --git a/mysql-test/suite/galera/t/rpl_row_annotate.test b/mysql-test/suite/galera/t/rpl_row_annotate.test
index b1cfdb36639..0ec30829982 100644
--- a/mysql-test/suite/galera/t/rpl_row_annotate.test
+++ b/mysql-test/suite/galera/t/rpl_row_annotate.test
@@ -3,11 +3,15 @@
--echo # On node_2
--connection node_2
+SET GLOBAL wsrep_on=OFF;
RESET MASTER;
+SET GLOBAL wsrep_on=ON;
--echo # On node_1
--connection node_1
+SET GLOBAL wsrep_on=OFF;
RESET MASTER;
+SET GLOBAL wsrep_on=ON;
CREATE TABLE t1(i INT)ENGINE=INNODB;
INSERT INTO t1 VALUES(1);
DELETE FROM t1 WHERE i = 1;
@@ -38,5 +42,4 @@ let $start_pos= `select @binlog_start_pos`;
# Cleanup
DROP TABLE t1;
---source include/galera_end.inc
--echo # End of test
diff --git a/mysql-test/suite/galera/t/versioning_trx_id.test b/mysql-test/suite/galera/t/versioning_trx_id.test
index 175ead265a5..a99d7891ea0 100644
--- a/mysql-test/suite/galera/t/versioning_trx_id.test
+++ b/mysql-test/suite/galera/t/versioning_trx_id.test
@@ -7,22 +7,34 @@ create table t1 (a int, s bigint unsigned as row start, e bigint unsigned as row
insert into t1 (a) values (1),(2);
--connection node_2
+set session wsrep_sync_wait=15;
insert into t1 (a) values (3),(4);
select a from t1;
select count(*) from mysql.transaction_registry where begin_timestamp='0-0-0';
+if (`SELECT count(*) from mysql.transaction_registry where begin_timestamp>=commit_timestamp`) {
+ select * from mysql.transaction_registry;
+}
select count(*) from mysql.transaction_registry where begin_timestamp>=commit_timestamp;
--connection node_3
+set session wsrep_sync_wait=15;
insert into t1 (a) values (5),(6);
select a from t1;
select count(*) from mysql.transaction_registry where begin_timestamp='0-0-0';
+if (`SELECT count(*) from mysql.transaction_registry where begin_timestamp>=commit_timestamp`) {
+ select * from mysql.transaction_registry;
+}
select count(*) from mysql.transaction_registry where begin_timestamp>=commit_timestamp;
--connection node_1
+set session wsrep_sync_wait=15;
select a from t1;
select count(*) from mysql.transaction_registry where begin_timestamp='0-0-0';
+if (`SELECT count(*) from mysql.transaction_registry where begin_timestamp>=commit_timestamp`) {
+ select * from mysql.transaction_registry;
+}
select count(*) from mysql.transaction_registry where begin_timestamp>=commit_timestamp;
drop table t1;
---source include/galera_end.inc
+--disconnect node_3
diff --git a/mysql-test/suite/galera/t/wsrep_trx_fragment_size_sr.test b/mysql-test/suite/galera/t/wsrep_trx_fragment_size_sr.test
new file mode 100644
index 00000000000..a970cc09afc
--- /dev/null
+++ b/mysql-test/suite/galera/t/wsrep_trx_fragment_size_sr.test
@@ -0,0 +1,22 @@
+-- source include/galera_cluster.inc
+
+-- let $sr = `SELECT variable_value LIKE '%:STREAMING:%' FROM information_schema.session_status WHERE variable_name = 'wsrep_provider_capabilities'`
+
+if (!$sr)
+{
+ -- skip The test requires a wsrep provider that supports streaming replication.
+}
+
+SELECT variable_value FROM information_schema.session_variables
+WHERE variable_name = 'wsrep_trx_fragment_size';
+
+SET SESSION wsrep_trx_fragment_size = 0;
+SET SESSION wsrep_trx_fragment_size = 123;
+
+SELECT variable_value FROM information_schema.global_variables
+WHERE variable_name = 'wsrep_trx_fragment_size';
+
+SET GLOBAL wsrep_trx_fragment_size = 0;
+SET GLOBAL wsrep_trx_fragment_size = 123;
+
+SET GLOBAL wsrep_trx_fragment_size = default;
diff --git a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf
index 477789175fb..3a3c43781d5 100644
--- a/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf
+++ b/mysql-test/suite/galera_3nodes/galera_2x3nodes.cnf
@@ -11,19 +11,18 @@ wsrep_gtid_mode=1
gtid_ignore_duplicates
auto_increment_increment=3
-wsrep-on=1
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
# enforce read-committed characteristics across the cluster
-# wsrep-causal-reads=ON
-# wsrep-sync-wait=15
+wsrep-sync-wait=15
[mysqld.1]
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep-cluster-address='gcomm://'
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT300S;evs.inactive_timeout=PT1000M;evs.install_timeout=PT155S;evs.keepalive_period = PT100S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT30S;evs.inactive_timeout=PT90S;evs.install_timeout=PT60S;pc.wait_prim_timeout = PT60S'
wsrep_sst_receive_address=127.0.0.2:@mysqld.1.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.1.port
@@ -33,8 +32,9 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT300S;evs.inactive_timeout=PT1000M;evs.install_timeout=PT155S;evs.keepalive_period = PT100S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT30S;evs.inactive_timeout=PT90S;evs.install_timeout=PT60S;pc.wait_prim_timeout = PT60S'
wsrep_sst_receive_address=127.0.0.2:@mysqld.2.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.2.port
@@ -44,8 +44,9 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.3.#galera_port;evs.suspect_timeout=PT300S;evs.inactive_timeout=PT1000M;evs.install_timeout=PT155S;evs.keepalive_period = PT100S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.3.#galera_port;evs.suspect_timeout=PT30S;evs.inactive_timeout=PT90S;evs.install_timeout=PT60S;pc.wait_prim_timeout = PT60S'
wsrep_sst_receive_address=127.0.0.2:@mysqld.3.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.3.port
@@ -57,9 +58,9 @@ wsrep_cluster_name=cluster2
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
-
+wsrep-on=1
wsrep-cluster-address='gcomm://'
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.4.#galera_port;evs.suspect_timeout=PT300S;evs.inactive_timeout=PT1000M;evs.install_timeout=PT155S;evs.keepalive_period = PT100S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.4.#galera_port;evs.suspect_timeout=PT30S;evs.inactive_timeout=PT90S;evs.install_timeout=PT60S;pc.wait_prim_timeout = PT60S'
wsrep_sst_receive_address=127.0.0.2:@mysqld.4.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.4.port
@@ -70,8 +71,9 @@ wsrep_cluster_name=cluster2
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.4.#galera_port'
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.5.#galera_port;evs.suspect_timeout=PT300S;evs.inactive_timeout=PT1000M;evs.install_timeout=PT155S;evs.keepalive_period = PT100S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.5.#galera_port;evs.suspect_timeout=PT30S;evs.inactive_timeout=PT90S;evs.install_timeout=PT60S;pc.wait_prim_timeout = PT60S'
wsrep_sst_receive_address=127.0.0.2:@mysqld.5.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.5.port
@@ -82,8 +84,9 @@ wsrep_cluster_name=cluster2
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.4.#galera_port'
-wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.6.#galera_port;evs.suspect_timeout=PT300S;evs.inactive_timeout=PT1000M;evs.install_timeout=PT155S;evs.keepalive_period = PT100S'
+wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.6.#galera_port;evs.suspect_timeout=PT30S;evs.inactive_timeout=PT90S;evs.install_timeout=PT60S;pc.wait_prim_timeout = PT60S'
wsrep_sst_receive_address=127.0.0.2:@mysqld.6.#sst_port
wsrep_node_incoming_address=127.0.0.1:@mysqld.6.port
@@ -107,17 +110,3 @@ NODE_MYSOCK_5= @mysqld.5.socket
NODE_MYPORT_6= @mysqld.6.port
NODE_MYSOCK_6= @mysqld.6.socket
-
-NODE_GALERAPORT_1= @mysqld.1.#galera_port
-NODE_GALERAPORT_2= @mysqld.2.#galera_port
-NODE_GALERAPORT_3= @mysqld.3.#galera_port
-NODE_GALERAPORT_4= @mysqld.4.#galera_port
-NODE_GALERAPORT_5= @mysqld.5.#galera_port
-NODE_GALERAPORT_6= @mysqld.6.#galera_port
-
-NODE_SSTPORT_1= @mysqld.1.#sst_port
-NODE_SSTPORT_2= @mysqld.2.#sst_port
-NODE_SSTPORT_3= @mysqld.3.#sst_port
-NODE_SSTPORT_4= @mysqld.4.#sst_port
-NODE_SSTPORT_5= @mysqld.5.#sst_port
-NODE_SSTPORT_6= @mysqld.6.#sst_port
diff --git a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
index e5aa81b8742..d33ed0caddf 100644
--- a/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
+++ b/mysql-test/suite/galera_3nodes/galera_3nodes.cnf
@@ -7,7 +7,6 @@ innodb-autoinc-lock-mode=2
default-storage-engine=innodb
auto_increment_increment=3
-wsrep-on=1
wsrep-provider=@ENV.WSREP_PROVIDER
wsrep_node_address=127.0.0.1
# enforce read-committed characteristics across the cluster
@@ -18,6 +17,8 @@ wsrep-sync-wait=15
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+#wsrep-new-cluster
+wsrep-on=1
wsrep-cluster-address='gcomm://'
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S'
@@ -29,6 +30,7 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S'
@@ -40,6 +42,7 @@ wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.3.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S'
@@ -56,12 +59,3 @@ NODE_MYSOCK_2= @mysqld.2.socket
NODE_MYPORT_3= @mysqld.3.port
NODE_MYSOCK_3= @mysqld.3.socket
-
-NODE_GALERAPORT_1= @mysqld.1.#galera_port
-NODE_GALERAPORT_2= @mysqld.2.#galera_port
-NODE_GALERAPORT_3= @mysqld.3.#galera_port
-
-NODE_SSTPORT_1= @mysqld.1.#sst_port
-NODE_SSTPORT_2= @mysqld.2.#sst_port
-NODE_SSTPORT_3= @mysqld.3.#sst_port
-
diff --git a/mysql-test/include/galera_resume.inc b/mysql-test/suite/galera_3nodes/include/galera_resume.inc
index af8f2b956fd..af8f2b956fd 100644
--- a/mysql-test/include/galera_resume.inc
+++ b/mysql-test/suite/galera_3nodes/include/galera_resume.inc
diff --git a/mysql-test/suite/galera_3nodes/r/GAL-501.result b/mysql-test/suite/galera_3nodes/r/GAL-501.result
index bcf74142144..063e88ec21a 100644
--- a/mysql-test/suite/galera_3nodes/r/GAL-501.result
+++ b/mysql-test/suite/galera_3nodes/r/GAL-501.result
@@ -1,3 +1,6 @@
+connection node_2;
+connection node_1;
+connection node_3;
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
diff --git a/mysql-test/suite/galera_3nodes/r/galera_certification_ccc.result b/mysql-test/suite/galera_3nodes/r/galera_certification_ccc.result
index b1bbb1406a1..6393a30da6f 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_certification_ccc.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_certification_ccc.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_3;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_certification_double_failure.result b/mysql-test/suite/galera_3nodes/r/galera_certification_double_failure.result
index e1528c6f74f..d43b42bec45 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_certification_double_failure.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_certification_double_failure.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
@@ -11,6 +13,7 @@ connection node_3;
INSERT INTO t2 VALUES (1);
connection node_1;
COMMIT;
-ERROR 40001: Deadlock: wsrep aborted transaction
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_3;
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result b/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result
index 6a7eea94077..3543feff78c 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S';
connection node_2;
@@ -21,6 +23,6 @@ SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
DROP TABLE t1;
-Resuming node ...
connection node_3;
+Resuming node ...
CALL mtr.add_suppression("WSREP: gcs_caused() returned -1 \\(Operation not permitted\\)");
diff --git a/mysql-test/suite/galera_3nodes/r/galera_garbd.result b/mysql-test/suite/galera_3nodes/r/galera_garbd.result
index fb7e729dc77..ebc5fdf33f4 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_garbd.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_garbd.result
@@ -1,3 +1,7 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
connection node_1;
connection node_2;
connection node_3;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_gtid_2_cluster.result b/mysql-test/suite/galera_3nodes/r/galera_gtid_2_cluster.result
index 35ca84119e7..afb94d493c4 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_gtid_2_cluster.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_gtid_2_cluster.result
@@ -1,27 +1,40 @@
+connection node_2;
+connection node_1;
+connection node_1;
cluster 1 node 1
SHOW STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 3
+connection node_2;
cluster 1 node 2
SHOW STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 3
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_3;
cluster 1 node 3
SHOW STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 3
+connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4;
+connection node_4;
cluster 2 node 1
SHOW STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 3
+connect node_5, 127.0.0.1, root, , test, $NODE_MYPORT_5;
+connection node_5;
cluster 2 node 2
SHOW STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 3
+connect node_6, 127.0.0.1, root, , test, $NODE_MYPORT_6;
+connection node_6;
cluster 2 node 3
SHOW STATUS LIKE 'wsrep_cluster_size';
Variable_name Value
wsrep_cluster_size 3
+connection node_1;
change master to master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_4, master_use_gtid=current_pos, ignore_server_ids=(12,13);;
start slave;
include/wait_for_slave_to_start.inc
@@ -31,6 +44,7 @@ select @@gtid_binlog_state;
select @@gtid_slave_pos;
@@gtid_slave_pos
+connection node_4;
change master to master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_1, master_use_gtid=current_pos, ignore_server_ids=(22,23);;
start slave;
include/wait_for_slave_to_start.inc
@@ -41,15 +55,19 @@ select @@gtid_slave_pos;
@@gtid_slave_pos
cluster 1 node 1
+connection node_1;
create table t1 (cluster_domain_id int ,node_server_id int, seq_no int);
insert into t1 values (1, 11, 2);
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 2 node 1
+connection node_4;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2
@@ -62,9 +80,12 @@ cluster_domain_id node_server_id seq_no
1 11 2
2 21 1
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 1 node 2
+connection node_2;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2,2-21-1
@@ -73,9 +94,12 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2,1-12-3,2-21-1
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 1 node 3
+connection node_3;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2,1-12-3,2-21-1
@@ -84,9 +108,12 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 2 node 2
+connection node_5;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1
@@ -95,9 +122,12 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2
#wait for sync cluster 2 and 1
+connection node_4;
include/save_master_gtid.inc
+connection node_1;
include/sync_with_master_gtid.inc
cluster 2 node 3
+connection node_6;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2
@@ -106,28 +136,53 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2,2-23-3
#wait for sync cluster 2 and 1
+connection node_4;
include/save_master_gtid.inc
+connection node_1;
include/sync_with_master_gtid.inc
cluster 1 node 1
+connection node_1;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2,2-23-3
drop table t1;
stop slave;
reset slave;
+cluster 2 node 1
+connection node_4;
+stop slave;
+reset slave;
+cluster 1 node 1
+connection node_1;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
cluster 2 node 1
-stop slave;
-reset slave;
+connection node_4;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
+connection node_2;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_3;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_5;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_6;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_1;
change master to master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_6, master_use_gtid=current_pos, ignore_server_ids=(12,13);;
start slave;
include/wait_for_slave_to_start.inc
@@ -137,6 +192,7 @@ select @@gtid_binlog_state;
select @@gtid_slave_pos;
@@gtid_slave_pos
+connection node_4;
change master to master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_3, master_use_gtid=current_pos, ignore_server_ids=(22,23);;
start slave;
include/wait_for_slave_to_start.inc
@@ -147,15 +203,19 @@ select @@gtid_slave_pos;
@@gtid_slave_pos
cluster 1 node 1
+connection node_1;
create table t1 (cluster_domain_id int ,node_server_id int, seq_no int);
insert into t1 values (1, 11, 2);
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 2 node 1
+connection node_4;
insert into t1 values (2, 21, 1);
select @@gtid_binlog_state;
@@gtid_binlog_state
@@ -165,9 +225,12 @@ cluster_domain_id node_server_id seq_no
1 11 2
2 21 1
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 1 node 2
+connection node_2;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2,2-21-1
@@ -176,9 +239,12 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2,1-12-3,2-21-1
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 1 node 3
+connection node_3;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-11-2,1-12-3,2-21-1
@@ -187,9 +253,12 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1
#wait for sync cluster 1 and 2
+connection node_1;
include/save_master_gtid.inc
+connection node_4;
include/sync_with_master_gtid.inc
cluster 2 node 2
+connection node_5;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1
@@ -198,9 +267,12 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2
#wait for sync cluster 2 and 1
+connection node_4;
include/save_master_gtid.inc
+connection node_1;
include/sync_with_master_gtid.inc
cluster 2 node 3
+connection node_6;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2
@@ -209,23 +281,43 @@ select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2,2-23-3
#wait for sync cluster 2 and 1
+connection node_4;
include/save_master_gtid.inc
+connection node_1;
include/sync_with_master_gtid.inc
cluster 1 node 1
+connection node_1;
select @@gtid_binlog_state;
@@gtid_binlog_state
1-12-3,1-11-2,1-13-4,2-21-1,2-22-2,2-23-3
drop table t1;
stop slave;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
cluster 2 node 1
+connection node_4;
stop slave;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
+connection node_2;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_3;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_5;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
+connection node_6;
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_innobackupex_backup.result b/mysql-test/suite/galera_3nodes/r/galera_innobackupex_backup.result
deleted file mode 100644
index 6ed7587303d..00000000000
--- a/mysql-test/suite/galera_3nodes/r/galera_innobackupex_backup.result
+++ /dev/null
@@ -1,17 +0,0 @@
-connection node_1;
-connection node_2;
-connection node_3;
-connection node_1;
-CREATE TABLE t1 (f1 INTEGER);
-INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
-connection node_2;
-SELECT COUNT(*) = 10 FROM t1;
-COUNT(*) = 10
-1
-Killing server ...
-connection node_1;
-INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
-SELECT COUNT(*) = 20 FROM t1;
-COUNT(*) = 20
-1
-DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup.result b/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup.result
index 5665ed5f46a..8ec1ff090ff 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
@@ -19,4 +21,4 @@ connection node_1;
include/assert_grep.inc [Streaming the backup to joiner at \[::1\]]
include/assert_grep.inc [async IST sender starting to serve tcp://\[::1\]:]
include/assert_grep.inc [IST receiver addr using tcp://\[::1\]]
-include/assert_grep.inc [Prepared IST receiver, listening at: tcp://\[::1\]]
+include/assert_grep.inc [Prepared IST receiver for 3-6, listening at: tcp://\[::1\]]
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup_section.result b/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup_section.result
index 53e35939a79..8ec1ff090ff 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup_section.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_ipv6_mariabackup_section.result
@@ -1,18 +1,24 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 3
1
+connection node_2;
SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
+connection node_2;
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
1
DROP TABLE t1;
+connection node_1;
include/assert_grep.inc [Streaming the backup to joiner at \[::1\]]
include/assert_grep.inc [async IST sender starting to serve tcp://\[::1\]:]
include/assert_grep.inc [IST receiver addr using tcp://\[::1\]]
-include/assert_grep.inc [Prepared IST receiver, listening at: tcp://\[::1\]]
+include/assert_grep.inc [Prepared IST receiver for 3-6, listening at: tcp://\[::1\]]
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ipv6_mysqldump.result b/mysql-test/suite/galera_3nodes/r/galera_ipv6_mysqldump.result
index 3564dc8c5a1..bfc210db2ba 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_ipv6_mysqldump.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_ipv6_mysqldump.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to'");
call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
connection node_1;
@@ -33,5 +35,6 @@ CALL mtr.add_suppression("Can't open and lock time zone table");
CALL mtr.add_suppression("Can't open and lock privilege tables");
CALL mtr.add_suppression("Info table is not ready to be used");
CALL mtr.add_suppression("Native table .* has the wrong structure");
+CALL mtr.add_suppression("Table \'mysql.gtid_slave_pos\' doesn\'t exist");
connection node_2;
CALL mtr.add_suppression("Unsupported protocol downgrade: incremental data collection disabled. Expect abort");
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync.result b/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync.result
index bcf74142144..3f810d3eb97 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync_section.result b/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync_section.result
index a2bf5f4d98c..3f810d3eb97 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync_section.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_ipv6_rsync_section.result
@@ -1,12 +1,17 @@
+connection node_2;
+connection node_1;
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
VARIABLE_VALUE LIKE '%[::1]%'
1
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 3
1
+connection node_2;
SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+connection node_1;
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
+connection node_2;
SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
SELECT COUNT(*) = 1 FROM t1;
COUNT(*) = 1
diff --git a/mysql-test/suite/galera_3nodes/r/galera_ist_gcache_rollover.result b/mysql-test/suite/galera_3nodes/r/galera_ist_gcache_rollover.result
index aa3e349eda7..3b0c32547fe 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_ist_gcache_rollover.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_ist_gcache_rollover.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_3;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_load_data_ist.result b/mysql-test/suite/galera_3nodes/r/galera_load_data_ist.result
deleted file mode 100644
index cfb897e1076..00000000000
--- a/mysql-test/suite/galera_3nodes/r/galera_load_data_ist.result
+++ /dev/null
@@ -1,36 +0,0 @@
-connection node_1;
-connection node_2;
-connection node_3;
-connection node_1;
-CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
-connection node_2;
-connection node_3;
-SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
-SET SESSION wsrep_on = OFF;
-SET SESSION wsrep_on = ON;
-SET SESSION wsrep_sync_wait = 0;
-connection node_2a;
-SET SESSION wsrep_sync_wait = 0;
-connection node_2;
-SET GLOBAL wsrep_load_data_splitting = TRUE;
-SET DEBUG_SYNC='intermediate_transaction_commit SIGNAL commited WAIT_FOR ist';
-connection node_2a;
-SET DEBUG_SYNC='now WAIT_FOR commited';
-connection node_3;
-SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
-connection node_2a;
-SET DEBUG_SYNC='now SIGNAL ist';
-connection node_1;
-connection node_2;
-SET DEBUG_SYNC='RESET';
-SELECT COUNT(*) = 95000 FROM t1;
-COUNT(*) = 95000
-1
-wsrep_last_committed_diff
-1
-connection node_1;
-SET GLOBAL wsrep_load_data_splitting = 1;;
-DROP TABLE t1;
-disconnect node_3;
-disconnect node_2;
-disconnect node_1;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_parallel_apply_3nodes.result b/mysql-test/suite/galera_3nodes/r/galera_parallel_apply_3nodes.result
index 4f9951c382f..8211fb8501e 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_parallel_apply_3nodes.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_parallel_apply_3nodes.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
connection node_3;
@@ -12,7 +14,7 @@ connection node_3;
SELECT f1 = 111 FROM t1;
f1 = 111
1
-SELECT COUNT(*) IN (1, 2) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'committed%';
+SELECT COUNT(*) IN (1, 2) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%committed%';
COUNT(*) IN (1, 2)
1
SET GLOBAL wsrep_slave_threads = 1;;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result b/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result
index 3405beef12f..ee49330e892 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_pc_bootstrap.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
CREATE TABLE t1 (f1 INTEGER);
connection node_1;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
diff --git a/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result b/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result
index 5fb9c1b9d66..3ae983f9550 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_pc_weight.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_weight';
VARIABLE_VALUE = 3
diff --git a/mysql-test/suite/galera_3nodes/r/galera_safe_to_bootstrap.result b/mysql-test/suite/galera_3nodes/r/galera_safe_to_bootstrap.result
index 45b4d63fb4f..4da24e8dbfb 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_safe_to_bootstrap.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_safe_to_bootstrap.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_3;
@@ -38,6 +40,8 @@ CALL mtr.add_suppression("WSREP: wsrep::connect(.*) failed: 7");
CALL mtr.add_suppression("Aborting");
CALL mtr.add_suppression("WSREP: moving position backwards: [0-9]+ -> 0");
CALL mtr.add_suppression("Failed to prepare for incremental state transfer");
+CALL mtr.add_suppression("It may not be safe to bootstrap the cluster from this node");
+CALL mtr.add_suppression("Aborting");
connection node_3;
CALL mtr.add_suppression("WSREP: no nodes coming from prim view, prim not possible");
CALL mtr.add_suppression("WSREP: It may not be safe to bootstrap the cluster from this node");
@@ -45,6 +49,8 @@ CALL mtr.add_suppression("WSREP: wsrep::connect(.*) failed: 7");
CALL mtr.add_suppression("Aborting");
CALL mtr.add_suppression("WSREP: moving position backwards: [0-9]+ -> 0");
CALL mtr.add_suppression("Failed to prepare for incremental state transfer");
+CALL mtr.add_suppression("It may not be safe to bootstrap the cluster from this node");
+CALL mtr.add_suppression("Aborting");
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result b/mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
index 77991a6d468..cb327107e2a 100644
--- a/mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
+++ b/mysql-test/suite/galera_3nodes/r/galera_var_dirty_reads2.result
@@ -1,3 +1,5 @@
+connection node_2;
+connection node_1;
connection node_1;
connection node_2;
connection node_3;
diff --git a/mysql-test/suite/galera_3nodes/r/galera_wsrep_schema.result b/mysql-test/suite/galera_3nodes/r/galera_wsrep_schema.result
new file mode 100644
index 00000000000..23ced3ba734
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/r/galera_wsrep_schema.result
@@ -0,0 +1,82 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_3;
+SHOW CREATE TABLE mysql.wsrep_cluster;
+Table Create Table
+wsrep_cluster CREATE TABLE `wsrep_cluster` (
+ `cluster_uuid` char(36) NOT NULL,
+ `view_id` bigint(20) NOT NULL,
+ `view_seqno` bigint(20) NOT NULL,
+ `protocol_version` int(11) NOT NULL,
+ `capabilities` int(11) NOT NULL,
+ PRIMARY KEY (`cluster_uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SHOW CREATE TABLE mysql.wsrep_cluster_members;
+Table Create Table
+wsrep_cluster_members CREATE TABLE `wsrep_cluster_members` (
+ `node_uuid` char(36) NOT NULL,
+ `cluster_uuid` char(36) NOT NULL,
+ `node_name` char(32) NOT NULL,
+ `node_incoming_address` varchar(256) NOT NULL,
+ PRIMARY KEY (`node_uuid`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster;
+COUNT(*) = 1
+1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 3
+1
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size')
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+COUNT(*) = 1
+1
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+node_incoming_address LIKE '127.0.0.1:%'
+1
+1
+1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+1
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+COUNT(*) = 1
+1
+connection node_2;
+connection node_1;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+SELECT COUNT(*) = 2 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 2
+1
+connection node_2;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 3
+1
+connection node_1;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid')
+1
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+COUNT(*) = 3
+1
+connection node_1;
+CALL mtr.add_suppression("SYNC message from member");
+connection node_2;
+CALL mtr.add_suppression("SYNC message from member");
+connection node_3;
+CALL mtr.add_suppression("SYNC message from member");
diff --git a/mysql-test/suite/galera_3nodes/suite.pm b/mysql-test/suite/galera_3nodes/suite.pm
index a7c1bf79c06..5ed5cbd95db 100644
--- a/mysql-test/suite/galera_3nodes/suite.pm
+++ b/mysql-test/suite/galera_3nodes/suite.pm
@@ -9,9 +9,9 @@ return "Not run for embedded server" if $::opt_embedded_server;
return "WSREP is not compiled in" unless defined $::mysqld_variables{'wsrep-on'};
my ($provider) = grep { -f $_ } $ENV{WSREP_PROVIDER},
- "/usr/lib64/galera-3/libgalera_smm.so",
+ "/usr/lib64/galera-4/libgalera_smm.so",
"/usr/lib64/galera/libgalera_smm.so",
- "/usr/lib/galera-3/libgalera_smm.so",
+ "/usr/lib/galera-4/libgalera_smm.so",
"/usr/lib/galera/libgalera_smm.so";
return "No wsrep provider library" unless -f $provider;
diff --git a/mysql-test/suite/galera_3nodes/t/GAL-501.test b/mysql-test/suite/galera_3nodes/t/GAL-501.test
index a36f21630ac..c4b17cdb21e 100644
--- a/mysql-test/suite/galera_3nodes/t/GAL-501.test
+++ b/mysql-test/suite/galera_3nodes/t/GAL-501.test
@@ -7,6 +7,12 @@
--source include/galera_cluster.inc
--source include/check_ipv6.inc
+--let $galera_connection_name = node_3
+--let $galera_server_number = 3
+--source include/galera_connect.inc
+--connection node_3
+--source include/galera_wait_ready.inc
+
# Confirm that initial handshake happened over ipv6
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
diff --git a/mysql-test/suite/galera_3nodes/t/galera_certification_double_failure.test b/mysql-test/suite/galera_3nodes/t/galera_certification_double_failure.test
index a2ad0765028..5366d2a4a6e 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_certification_double_failure.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_certification_double_failure.test
@@ -29,5 +29,7 @@ INSERT INTO t2 VALUES (1);
--error ER_LOCK_DEADLOCK
COMMIT;
+--connection node_3
+--source include/galera_wait_ready.inc
DROP TABLE t1;
DROP TABLE t2;
diff --git a/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test b/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test
index 4dab936c343..a4767928681 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test
@@ -56,13 +56,13 @@ DROP TABLE t1;
# Reconnect node #3 so that MTR's end-of-test checks can run
---source include/galera_resume.inc
--connection node_3
+--source include/galera_resume.inc
--source include/wait_until_connected_again.inc
-CALL mtr.add_suppression("WSREP: gcs_caused() returned -1 \\(Operation not permitted\\)");
-
--disable_query_log
--eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_node3';
--enable_query_log
---source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+
+CALL mtr.add_suppression("WSREP: gcs_caused() returned -1 \\(Operation not permitted\\)");
diff --git a/mysql-test/suite/galera_3nodes/t/galera_garbd.test b/mysql-test/suite/galera_3nodes/t/galera_garbd.test
index 2d03e8897b9..fa3cba0ab16 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_garbd.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_garbd.test
@@ -7,9 +7,20 @@
--source include/have_innodb.inc
--source include/big_test.inc
+# Save galera ports
+--connection node_1
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_1 = $_NODE_GALERAPORT
+
+--connection node_2
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_2 = $_NODE_GALERAPORT
+
--let $galera_connection_name = node_3
--let $galera_server_number = 3
--source include/galera_connect.inc
+--source suite/galera/include/galera_base_port.inc
+--let $NODE_GALERAPORT_3 = $_NODE_GALERAPORT
# Save original auto_increment_offset values.
--let $node_1=node_1
@@ -28,9 +39,33 @@
--source include/wait_condition.inc
--echo Starting garbd ...
---let $gp1 = `SELECT SUBSTR(@@wsrep_provider_options, LOCATE('base_port =', @@wsrep_provider_options) + LENGTH('base_port = '))`
---let $galera_port_1 = `SELECT SUBSTR('$gp1', 1, LOCATE(';', '$gp1') - 1)`
---exec `dirname $WSREP_PROVIDER`/../../bin/garb/garbd --address "gcomm://127.0.0.1:$galera_port_1" --group my_wsrep_cluster --options 'base_port=$galera_port_3' > $MYSQL_TMP_DIR/garbd.log 2>&1 &
+--perl
+use strict;
+use File::Basename;
+my $provider_dirname = dirname($ENV{WSREP_PROVIDER});
+# Array of possible garbd executable paths to be checked
+# base is $provider_dirname
+my @garbd_executable_search = (
+ # WSREP_PROVIDER is set in galera repository
+ ${provider_dirname}."/garb/garbd",
+ # MariaDB (PR #1147)
+ ${provider_dirname}."/../../bin/garb/garbd"
+);
+my $garbd_executable = '';
+foreach my $garbd (@garbd_executable_search) {
+ if (-f $garbd) {
+ $garbd_executable= $garbd;
+ }
+}
+if ($garbd_executable eq '') {
+ die("Didn't locate garbd\n");
+}
+die unless open(FILE, ">$ENV{MYSQLTEST_VARDIR}/tmp/garbd.inc");
+print FILE "--exec $garbd_executable --address \"gcomm://127.0.0.1:\$NODE_GALERAPORT_1\" --group my_wsrep_cluster --options 'base_port=\$NODE_GALERAPORT_3' > \$MYSQL_TMP_DIR/garbd.log 2>&1 &\n";
+close(FILE);
+EOF
+--source $MYSQLTEST_VARDIR/tmp/garbd.inc
+--remove_file $MYSQLTEST_VARDIR/tmp/garbd.inc
--sleep 5
@@ -44,7 +79,10 @@ INSERT INTO t1 VALUES (1);
SELECT COUNT(*) = 1 FROM t1;
--echo Killing garbd ...
---exec pkill --oldest --full garbd.*$galera_port_3
+# FreeBSD's /bin/pkill only supports short versions of the options:
+# -o Select only the oldest (least recently started)
+# -f Match against full argument lists
+--exec pkill -o -f garbd.*$NODE_GALERAPORT_3
--sleep 5
@@ -61,6 +99,7 @@ DROP TABLE t1;
--echo Restarting node #3 to satisfy MTR's end-of-test checks
--connection node_3
+let $restart_noprint=2;
--source include/start_mysqld.inc
# Restore original auto_increment_offset values.
diff --git a/mysql-test/suite/galera_3nodes/t/galera_gtid_2_cluster.test b/mysql-test/suite/galera_3nodes/t/galera_gtid_2_cluster.test
index c679db1305d..925600ffaa8 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_gtid_2_cluster.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_gtid_2_cluster.test
@@ -138,27 +138,45 @@ select @@gtid_binlog_state;
drop table t1;
stop slave;
reset slave;
+
+--echo cluster 2 node 1
+--connection node_4
+stop slave;
+reset slave;
+
+--echo cluster 1 node 1
+--connection node_1
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
--sleep 2
--echo cluster 2 node 1
--connection node_4
-stop slave;
-reset slave;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
--connection node_2
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
--connection node_3
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
--connection node_5
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
--connection node_6
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
#--enable_parsing
#
# This test creates 2x 3 nodes galera cluster.
@@ -272,21 +290,33 @@ select @@gtid_binlog_state;
drop table t1;
stop slave;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
--echo cluster 2 node 1
--connection node_4
stop slave;
change master to master_use_gtid=no, ignore_server_ids=();
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
set global GTID_SLAVE_POS="";
--connection node_2
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
--connection node_3
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
--connection node_5
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
--connection node_6
+set global wsrep_on=OFF;
reset master;
+set global wsrep_on=ON;
diff --git a/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test b/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test
deleted file mode 100644
index cd5c020ae38..00000000000
--- a/mysql-test/suite/galera_3nodes/t/galera_innobackupex_backup.test
+++ /dev/null
@@ -1,80 +0,0 @@
-#
-# This test uses innobackupex to take a backup on node #2 and then restores that node from backup
-#
-
---source include/galera_cluster.inc
---source include/have_innodb.inc
---source suite/galera/include/have_mariabackup.inc
-
---let $galera_connection_name = node_3
---let $galera_server_number = 3
---source include/galera_connect.inc
-
-# Save original auto_increment_offset values.
---let $node_1=node_1
---let $node_2=node_2
---let $node_3=node_3
---source ../galera/include/auto_increment_offset_save.inc
-
---connection node_1
-CREATE TABLE t1 (f1 INTEGER);
-INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
-
---connection node_2
-SELECT COUNT(*) = 10 FROM t1;
-
---exec rm -rf $MYSQL_TMP_DIR/innobackupex_backup
---exec mariabackup --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --galera-info --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp $MYSQL_TMP_DIR/innobackupex_backup &> $MYSQL_TMP_DIR/innobackupex-backup.log
---exec mariabackup --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --apply-log --galera-info --port=$NODE_MYPORT_2 --host=127.0.0.1 --no-timestamp $MYSQL_TMP_DIR/innobackupex_backup &> $MYSQL_TMP_DIR/innobackupex-apply.log
-
---source ../galera/include/kill_galera.inc
---sleep 1
-
---connection node_1
-INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
-
---exec rm -rf $MYSQLTEST_VARDIR/mysqld.2/data/*
---exec mariabackup --innobackupex --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group=mysqld.2 --copy-back --port=$NODE_MYPORT_2 --host=127.0.0.1 $MYSQL_TMP_DIR/innobackupex_backup &> $MYSQL_TMP_DIR/innobackupex-restore.log
-
-#
-# Convert the xtrabackup_galera_info into a grastate.dat file
-#
-
---perl
- use strict;
- my $xtrabackup_galera_info_file = $ENV{'MYSQL_TMP_DIR'}.'/innobackupex_backup/xtrabackup_galera_info';
- open(XTRABACKUP_GALERA_INFO, $xtrabackup_galera_info_file) or die "Can not open $xtrabackup_galera_info_file: $!";
- my $xtrabackup_galera_info = <XTRABACKUP_GALERA_INFO>;
- my ($uuid, $seqno) = split(':', $xtrabackup_galera_info);
-
- my $grastate_dat_file = $ENV{'MYSQLTEST_VARDIR'}.'/mysqld.2/data/grastate.dat';
- die "grastate.dat already exists" if -e $grastate_dat_file;
-
- open(GRASTATE_DAT, ">$grastate_dat_file") or die "Can not write to $grastate_dat_file: $!";
- print GRASTATE_DAT "version: 2.1\n";
- print GRASTATE_DAT "uuid: $uuid\n";
- print GRASTATE_DAT "seqno: $seqno\n";
- print GRASTATE_DAT "cert_index:\n";
- exit(0);
-EOF
-
---source include/start_mysqld.inc
---sleep 5
-
---source include/wait_until_connected_again.inc
---let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
---source include/wait_condition.inc
-
-SELECT COUNT(*) = 20 FROM t1;
-
-DROP TABLE t1;
-
---sleep 10
-
---let $galera_connection_name = node_2a
---let $galera_server_number = 2
---source include/galera_connect.inc
---let $node_2=node_2a
-
-# Restore original auto_increment_offset values.
---source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf
index 5ac00fa056b..969f364a1ec 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.cnf
@@ -10,18 +10,23 @@ wsrep-cluster-address=gcomm://
wsrep_provider_options='base_host=[::1];base_port=@mysqld.1.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.1.#galera_port;ist.recv_addr=[::1]:@mysqld.1.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.1.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.1.port'
+wsrep_node_name=node_1
[mysqld.2]
wsrep_cluster_address='gcomm://[::1]:@mysqld.1.#galera_port'
wsrep_provider_options='base_host=[::1];base_port=@mysqld.2.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.2.#galera_port;ist.recv_addr=[::1]:@mysqld.2.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.2.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.2.port'
+wsrep_node_name=node_2
+wsrep_sst_donor=node_1
[mysqld.3]
wsrep_cluster_address='gcomm://[::1]:@mysqld.1.#galera_port'
wsrep_provider_options='base_host=[::1];base_port=@mysqld.3.#galera_port;gmcast.listen_addr=tcp://[::]:@mysqld.3.#galera_port;ist.recv_addr=[::1]:@mysqld.3.#ist_port'
wsrep_sst_receive_address='[::1]:@mysqld.3.#sst_port'
wsrep_node_incoming_address='[::1]:@mysqld.3.port'
+wsrep_node_name=node_3
+wsrep_sst_donor=node_1
[SST]
transferfmt=@ENV.MTR_GALERA_TFMT
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.test b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.test
index 84c33251c98..71d17c133a5 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup.test
@@ -38,22 +38,31 @@ DROP TABLE t1;
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.1.err
--let $assert_only_after = CURRENT_TEST
+# The SSTs happen when nodes are started first time
--let $assert_count = 2
--let $assert_text = Streaming the backup to joiner at \[::1\]
--let $assert_select = Streaming the backup to joiner at \[::1\]
--source include/assert_grep.inc
---let $assert_count = 1
+# There will be 3 ISTs donated from node_1 in Galera 4.
+# Two first happen at the initial startup to populate the certification
+# index. The third one is from the IST which happens during the actual test.
+--let $assert_count = 3
--let $assert_text = async IST sender starting to serve tcp://\[::1\]:
--let $assert_select = async IST sender starting to serve tcp://\[::1\]:
--source include/assert_grep.inc
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.2.err
+# There are two ISTs on joiner, the first at the initial startup, the second
+# during the actual test.
+--let $assert_count = 2
--let $assert_text = IST receiver addr using tcp://\[::1\]
--let $assert_select = IST receiver addr using tcp://\[::1\]
--source include/assert_grep.inc
---let $assert_text = Prepared IST receiver, listening at: tcp://\[::1\]
---let $assert_select = Prepared IST receiver, listening at: tcp://\[::1\]
+# The receiver expects seqnos 3-6 only once.
+--let $assert_count = 1
+--let $assert_text = Prepared IST receiver for 3-6, listening at: tcp://\[::1\]
+--let $assert_select = Prepared IST receiver for 3-6, listening at: tcp://\[::1\]
--source include/assert_grep.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.test b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.test
index 95cd1a5bea5..71d17c133a5 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mariabackup_section.test
@@ -39,31 +39,30 @@ DROP TABLE t1;
--let $assert_only_after = CURRENT_TEST
# The SSTs happen when nodes are started first time
---let $assert_count= 2
+--let $assert_count = 2
--let $assert_text = Streaming the backup to joiner at \[::1\]
--let $assert_select = Streaming the backup to joiner at \[::1\]
--source include/assert_grep.inc
-# There will be 1 ISTs donated from node_1 in Galera 3.
+# There will be 3 ISTs donated from node_1 in Galera 4.
# Two first happen at the initial startup to populate the certification
# index. The third one is from the IST which happens during the actual test.
---let $assert_count= 1
+--let $assert_count = 3
--let $assert_text = async IST sender starting to serve tcp://\[::1\]:
--let $assert_select = async IST sender starting to serve tcp://\[::1\]:
--source include/assert_grep.inc
---connection node_2
--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.2.err
---let $assert_only_after = CURRENT_TEST
-# There is one ISTs on joiner at Galera 3.
---let $assert_count= 1
+# There are two ISTs on joiner, the first at the initial startup, the second
+# during the actual test.
+--let $assert_count = 2
--let $assert_text = IST receiver addr using tcp://\[::1\]
--let $assert_select = IST receiver addr using tcp://\[::1\]
--source include/assert_grep.inc
-# There will be only one Prepared IST and in Galera 3 segnos are not printed
---let $assert_count= 1
---let $assert_text = Prepared IST receiver, listening at: tcp://\[::1\]
---let $assert_select = Prepared IST receiver, listening at: tcp://\[::1\]
+# The receiver expects seqnos 3-6 only once.
+--let $assert_count = 1
+--let $assert_text = Prepared IST receiver for 3-6, listening at: tcp://\[::1\]
+--let $assert_select = Prepared IST receiver for 3-6, listening at: tcp://\[::1\]
--source include/assert_grep.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test
index 4a6de8abb9c..c9c32f23230 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_ipv6_mysqldump.test
@@ -59,6 +59,7 @@ INSERT INTO t1 VALUES (1);
--connection node_2
--echo Starting server ...
--let $start_mysqld_params = --wsrep_sst_auth=sst: --wsrep_sst_method=mysqldump --wsrep-sst-receive-address=[::1].1:$NODE_MYPORT_2
+let $restart_noprint=2;
--source include/start_mysqld.inc
#--source suite/galera/include/galera_load_provider.inc
@@ -77,6 +78,11 @@ DROP TABLE t1;
SELECT VARIABLE_VALUE LIKE '%[::1]%' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_incoming_addresses';
+# restart node so we don't fail on WSREP_START_POSITION internal check
+--source include/restart_mysqld.inc
+--source include/wait_until_connected_again.inc
+
+--source ../galera/include/auto_increment_offset_restore.inc
--source suite/galera/include/galera_sst_restore.inc
--connection node_2
CALL mtr.add_suppression("Unsupported protocol downgrade: incremental data collection disabled. Expect abort");
diff --git a/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test b/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test
index ebc756d60b1..69e80ee6c3d 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_ist_gcache_rollover.test
@@ -11,7 +11,7 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_debug_sync.inc
---source suite/galera/include/galera_have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
--let $galera_connection_name = node_3
--let $galera_server_number = 3
diff --git a/mysql-test/suite/galera_3nodes/t/galera_load_data_ist.cnf b/mysql-test/suite/galera_3nodes/t/galera_load_data_ist.cnf
deleted file mode 100644
index 35ecb8b5937..00000000000
--- a/mysql-test/suite/galera_3nodes/t/galera_load_data_ist.cnf
+++ /dev/null
@@ -1,4 +0,0 @@
-!include ../galera_3nodes.cnf
-
-[mysqld]
-wsrep-causal-reads=OFF
diff --git a/mysql-test/suite/galera_3nodes/t/galera_load_data_ist.test b/mysql-test/suite/galera_3nodes/t/galera_load_data_ist.test
deleted file mode 100644
index e1140da229b..00000000000
--- a/mysql-test/suite/galera_3nodes/t/galera_load_data_ist.test
+++ /dev/null
@@ -1,124 +0,0 @@
---source include/have_debug_sync.inc
---source include/galera_cluster.inc
---source include/have_innodb.inc
---source include/big_test.inc
-
-# Establish connection to the third node:
---let $galera_connection_name = node_3
---let $galera_server_number = 3
---source include/galera_connect.inc
-
-# Establish additional connection to the second node
-# (which is used in the test for synchronization control):
---let $galera_connection_name = node_2a
---let $galera_server_number = 2
---source include/galera_connect.inc
-
-# Save original auto_increment_offset values:
---let $node_1=node_1
---let $node_2=node_2
---let $node_3=node_3
---source ../galera/include/auto_increment_offset_save.inc
-
-# Create a file for LOAD DATA with 95K entries
---connection node_1
---perl
-open(FILE, ">", "$ENV{'MYSQLTEST_VARDIR'}/tmp/galera_var_load_data_splitting.csv") or die;
-foreach my $i (1..95000) {
- print FILE "$i\n";
-}
-EOF
-
-CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
-
-# Let's wait for the completion of the formation of a cluster
-# of three nodes:
---let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
---source include/wait_condition.inc
---connection node_2
---source include/wait_until_ready.inc
---connection node_3
---source include/wait_until_ready.inc
-
-# Disconnect the third node from the cluster:
-SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
-SET SESSION wsrep_on = OFF;
---let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
---source include/wait_condition.inc
-SET SESSION wsrep_on = ON;
-SET SESSION wsrep_sync_wait = 0;
-
-# Disable sync wait for control connection:
---connection node_2a
-SET SESSION wsrep_sync_wait = 0;
-
-# Let's wait until the other nodes stop seeing the third
-# node in the cluster:
---let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
---source include/wait_condition.inc
-
-# Record wsrep_last_committed as it was before LOAD DATA:
---connection node_2
---let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
-
-# Enable splitting for LOAD DATA:
---let $wsrep_load_data_splitting_orig = `SELECT @@wsrep_load_data_splitting`
-SET GLOBAL wsrep_load_data_splitting = TRUE;
-
-# Stop after the first commit and wait for the IST signal:
-SET DEBUG_SYNC='intermediate_transaction_commit SIGNAL commited WAIT_FOR ist';
-
-# Perform the LOAD DATA statement:
---disable_query_log
-let v1='$MYSQLTEST_VARDIR/tmp/galera_var_load_data_splitting.csv';
---send_eval LOAD DATA INFILE $v1 INTO TABLE t1;
---enable_query_log
-
-# Wait for the first commit:
---connection node_2a
-SET DEBUG_SYNC='now WAIT_FOR commited';
-
-# Initiate the IST:
---connection node_3
-SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
-
-# Continue the execution of LOAD DATA:
---connection node_2a
-SET DEBUG_SYNC='now SIGNAL ist';
-
-# Let's wait for the recovery of the cluster
-# of three nodes:
---connection node_1
---let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
---source include/wait_condition.inc
-
-# Save the LOAD DATA results:
---connection node_2
---reap
-
-# Reset all synchronization points and signals:
-SET DEBUG_SYNC='RESET';
-
-# Read the wsrep_last_commited after LOAD DATA:
---let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
-
-# Check the records:
-SELECT COUNT(*) = 95000 FROM t1;
-
-# LOAD-ing 95K rows should causes 10 commits to be registered:
---disable_query_log
---eval SELECT $wsrep_last_committed_after = $wsrep_last_committed_before + 10 AS wsrep_last_committed_diff;
---enable_query_log
-
-# Restore the original splitting:
---connection node_1
---eval SET GLOBAL wsrep_load_data_splitting = $wsrep_load_data_splitting_orig;
-
-# Drop test table:
-DROP TABLE t1;
-
-# Restore original auto_increment_offset values:
---source ../galera/include/auto_increment_offset_restore.inc
-
---let $galera_cluster_size=3
---source include/galera_end.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_parallel_apply_3nodes.test b/mysql-test/suite/galera_3nodes/t/galera_parallel_apply_3nodes.test
index 7d80d8036a1..659df2b3c93 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_parallel_apply_3nodes.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_parallel_apply_3nodes.test
@@ -31,7 +31,7 @@ SET GLOBAL wsrep_slave_threads = 2;
--connection node_3
SELECT f1 = 111 FROM t1;
-SELECT COUNT(*) IN (1, 2) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'committed%';
+SELECT COUNT(*) IN (1, 2) FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%committed%';
--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig;
diff --git a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf
index 57026ce6928..1a61471d581 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf
+++ b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.cnf
@@ -1,5 +1,7 @@
-# We need a dedicated .cnf file, even if empty, in order to force this test to run
-# alone on a freshly started cluster. Otherwise there are adverse interactions with
-# following tests such as galera_3nodes.galera_var_dirty_reads2
+# We need a dedicated .cnf file, even if empty, in order to force this test
+# to run alone on a freshly started cluster. Otherwise there are adverse
+# interactions with following tests such as
+# galera_3nodes.galera_var_dirty_reads2
+!include ../galera_3nodes.cnf
!include ../galera_3nodes.cnf
diff --git a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test
index 0a94e7cd85d..729f14a731f 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_pc_weight.test
@@ -1,11 +1,11 @@
#
-# Test the pc.weight wsrep provider option. We set Node #1 to have a high weight and then
-# suspend it. This will cause Nodes #2 and #3 to transition to non-primary component.
+# Test the pc.weight wsrep provider option. We set Node #1 to have a high
+# weight and then suspend it. This will cause Nodes #2 and #3 to transition
+# to non-primary component.
#
--source include/big_test.inc
--source include/galera_cluster.inc
---source include/have_innodb.inc
--connection node_1
SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_weight';
@@ -55,6 +55,9 @@ SHOW STATUS LIKE 'wsrep_local_state_comment';
--connection node_1
# For Node #1, we expect a primary component of size 1
+# (NOTE: this is a bit racy as nodes 2 and 3 will try to reconnect ASAP.
+# to avoid the raice they should be suspended first as well, but that's
+# not currently possible)
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_safe_to_bootstrap.test b/mysql-test/suite/galera_3nodes/t/galera_safe_to_bootstrap.test
index b7b6c66e5ad..082cb546e87 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_safe_to_bootstrap.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_safe_to_bootstrap.test
@@ -3,17 +3,21 @@
#
--source include/galera_cluster.inc
+#
+# Create connection node_3 and save auto increment variables.
+#
--let $galera_connection_name = node_3
--let $galera_server_number = 3
--source include/galera_connect.inc
-# Save original auto_increment_offset values.
---let $node_1=node_1
---let $node_2=node_2
---let $node_3=node_3
+--let $node_1 = node_1
+--let $node_2 = node_2
+--let $node_3 = node_3
+
--source ../galera/include/auto_increment_offset_save.inc
--connection node_1
+
CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
#
@@ -58,6 +62,7 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
#
# Shut down one more node
#
+
--connection node_3
--source include/shutdown_mysqld.inc
@@ -82,8 +87,8 @@ CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
--source include/assert_grep.inc
# Restart one node
-
--connection node_2
+let $restart_noprint=2;
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
--source include/start_mysqld.inc
@@ -148,15 +153,18 @@ SET SESSION wsrep_on = OFF;
--connection node_1
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+let $restart_noprint=2;
--source include/start_mysqld.inc
--source include/wait_until_connected_again.inc
--connection node_2
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+let $restart_noprint=2;
--source include/start_mysqld.inc
--connection node_3
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.3.expect
+let $restart_noprint=2;
--source include/start_mysqld.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
@@ -169,6 +177,8 @@ CALL mtr.add_suppression("WSREP: wsrep::connect(.*) failed: 7");
CALL mtr.add_suppression("Aborting");
CALL mtr.add_suppression("WSREP: moving position backwards: [0-9]+ -> 0");
CALL mtr.add_suppression("Failed to prepare for incremental state transfer");
+CALL mtr.add_suppression("It may not be safe to bootstrap the cluster from this node");
+CALL mtr.add_suppression("Aborting");
--connection node_3
CALL mtr.add_suppression("WSREP: no nodes coming from prim view, prim not possible");
@@ -177,9 +187,13 @@ CALL mtr.add_suppression("WSREP: wsrep::connect(.*) failed: 7");
CALL mtr.add_suppression("Aborting");
CALL mtr.add_suppression("WSREP: moving position backwards: [0-9]+ -> 0");
CALL mtr.add_suppression("Failed to prepare for incremental state transfer");
+CALL mtr.add_suppression("It may not be safe to bootstrap the cluster from this node");
+CALL mtr.add_suppression("Aborting");
SHOW CREATE TABLE t1;
DROP TABLE t1;
-# Restore original auto_increment_offset values.
+#
+# Restore auto increment variables.
+#
--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes/t/galera_slave_options_ignore.test b/mysql-test/suite/galera_3nodes/t/galera_slave_options_ignore.test
index 3e8b1557e7b..8e73dee70ae 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_slave_options_ignore.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_slave_options_ignore.test
@@ -25,6 +25,7 @@ SELECT COUNT(*) = 1 FROM db2.t2B;
--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
--connection node_3
+--source include/galera_wait_ready.inc
SELECT COUNT(*) = 0 FROM db1.t1;
SELECT COUNT(*) = 1 FROM db2.t2A;
SELECT COUNT(*) = 1 FROM db2.t2B;
diff --git a/mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test b/mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test
index e3f94a012b8..9f9d6da17b9 100644
--- a/mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test
+++ b/mysql-test/suite/galera_3nodes/t/galera_var_dirty_reads2.test
@@ -117,8 +117,9 @@ SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
--source include/wait_condition.inc
--connection node_2
---let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema.test b/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema.test
new file mode 100644
index 00000000000..fb3af2cd25d
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes/t/galera_wsrep_schema.test
@@ -0,0 +1,84 @@
+#
+# This test performs basic checks on the contents of the wsrep_schema
+#
+# wsrep_members_history checks are temporarily disabled until it
+# can be made configurable.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_1
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+# Make the test fail if table structure has changed
+
+SHOW CREATE TABLE mysql.wsrep_cluster;
+SHOW CREATE TABLE mysql.wsrep_cluster_members;
+#disabled SHOW CREATE TABLE mysql.wsrep_member_history;
+
+# Checks for the wsrep_cluster table
+
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+
+# Checks for the wsrep_cluster_members table
+
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_members;
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+
+SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_members;
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_members;
+
+# Checks for the wsrep_cluster_member_history table
+
+#disabled SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_member_history;
+#disabled SELECT COUNT(*) = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size') FROM mysql.wsrep_cluster_member_history;
+SELECT COUNT(*) = 1 FROM mysql.wsrep_cluster_members WHERE node_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid');
+
+#disabled SELECT last_view_id = (SELECT view_id FROM mysql.wsrep_cluster) FROM mysql.wsrep_cluster_member_history;
+#disabled SELECT last_view_seqno = (SELECT view_seqno FROM mysql.wsrep_cluster) FROM mysql.wsrep_cluster_member_history;
+#disabled SELECT node_incoming_address LIKE '127.0.0.1:%' from mysql.wsrep_cluster_member_history;
+#disabled SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster_member_history;
+
+--connection node_2
+--source include/shutdown_mysqld.inc
+
+--connection node_1
+--source include/wait_until_connected_again.inc
+
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+SELECT COUNT(*) = 2 FROM mysql.wsrep_cluster_members;
+#disabled SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_member_history;
+#disabled SELECT COUNT(*) = 2 FROM mysql.wsrep_cluster_member_history WHERE last_view_id = (SELECT MAX(last_view_id) FROM mysql.wsrep_cluster_member_history);
+
+--connection node_2
+let $restart_noprint=2;
+--source include/start_mysqld.inc
+--source include/wait_until_connected_again.inc
+
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+#disabled SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_member_history WHERE last_view_id = (SELECT MAX(last_view_id) FROM mysql.wsrep_cluster_member_history);
+
+--connection node_1
+SELECT cluster_uuid = (SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_state_uuid') FROM mysql.wsrep_cluster;
+SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_members;
+#disabled SELECT COUNT(*) = 3 FROM mysql.wsrep_cluster_member_history WHERE last_view_id = (SELECT MAX(last_view_id) FROM mysql.wsrep_cluster_member_history);
+
+--source ../galera/include/auto_increment_offset_restore.inc
+
+--connection node_1
+CALL mtr.add_suppression("SYNC message from member");
+
+--connection node_2
+CALL mtr.add_suppression("SYNC message from member");
+
+--connection node_3
+CALL mtr.add_suppression("SYNC message from member");
diff --git a/mysql-test/suite/galera_3nodes_sr/disabled.def b/mysql-test/suite/galera_3nodes_sr/disabled.def
new file mode 100644
index 00000000000..0944abd0ad5
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/disabled.def
@@ -0,0 +1,7 @@
+GCF-336 :
+GCF-582 :
+GCF-609 :
+GCF-810A :
+GCF-810B :
+GCF-810C :
+galera_sr_kill_slave_after_apply_rollback2 : \ No newline at end of file
diff --git a/mysql-test/suite/galera_3nodes_sr/galera_3nodes.cnf b/mysql-test/suite/galera_3nodes_sr/galera_3nodes.cnf
new file mode 100644
index 00000000000..62c8214b8f2
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/galera_3nodes.cnf
@@ -0,0 +1 @@
+!include ../galera_3nodes/galera_3nodes.cnf
diff --git a/mysql-test/suite/galera_3nodes_sr/my.cnf b/mysql-test/suite/galera_3nodes_sr/my.cnf
new file mode 100644
index 00000000000..bb25b95ceea
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/my.cnf
@@ -0,0 +1 @@
+!include galera_3nodes.cnf
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-336.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-336.result
new file mode 100644
index 00000000000..bb6c11edf36
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-336.result
@@ -0,0 +1,26 @@
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM t1;
+COUNT(*) > 0
+1
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
+SET SESSION wsrep_sync_wait=0;
+INSERT INTO t1 VALUES (2);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+ERROR 08S01: WSREP has not yet prepared node for application use
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
+DROP TABLE t1;
+CALL mtr.add_suppression("replication aborted");
+CALL mtr.add_suppression("WSREP: fragment replication failed: 3");
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-582.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-582.result
new file mode 100644
index 00000000000..9e2a4823973
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-582.result
@@ -0,0 +1,23 @@
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+5
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+5
+COMMIT;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-606.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-606.result
new file mode 100644
index 00000000000..775f7ee0412
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-606.result
@@ -0,0 +1,38 @@
+connection node_2;
+connection node_1;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+connection node_2;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+INSERT INTO t1 VALUES (23);
+INSERT INTO t1 VALUES (24);
+connection node_1;
+connection node_2a;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+SET WSREP_ON=ON;
+connection node_1;
+connection node_2a;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+connection node_1;
+connection node_1;
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+0
+connection node_2;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT * FROM t1;
+f1
+COMMIT;
+connection node_1;
+SELECT * FROM t1;
+f1
+DROP TABLE t1;
+connection node_2;
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-609.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-609.result
new file mode 100644
index 00000000000..8fe13c7e2bf
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-609.result
@@ -0,0 +1,20 @@
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+INSERT INTO t1 VALUES (31),(32),(33);
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+0
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+0
+COMMIT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-810A.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-810A.result
new file mode 100644
index 00000000000..9a83ff3c041
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-810A.result
@@ -0,0 +1,256 @@
+SET GLOBAL debug="d,crash_last_fragment_commit_before_fragment_removal";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+crash_last_fragment_commit_before_fragment_removal
+COMMIT;
+Got one of the listed errors
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+SET GLOBAL debug="d,crash_last_fragment_commit_after_fragment_removal";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+crash_last_fragment_commit_after_fragment_removal
+COMMIT;
+Got one of the listed errors
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+SET GLOBAL debug="d,crash_replicate_fragment_success";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+crash_replicate_fragment_success
+COMMIT;
+Got one of the listed errors
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+SET GLOBAL debug="d,crash_replicate_fragment_after_certify";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+crash_replicate_fragment_after_certify
+COMMIT;
+Got one of the listed errors
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+SET GLOBAL debug="d,crash_replicate_fragment_before_certify";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+crash_replicate_fragment_before_certify
+COMMIT;
+Got one of the listed errors
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-810B.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-810B.result
new file mode 100644
index 00000000000..bbec3531a49
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-810B.result
@@ -0,0 +1,100 @@
+SET GLOBAL debug="d,crash_apply_cb_before_append_frag";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+SET GLOBAL debug="d,crash_apply_cb_after_append_frag";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+COMMIT;
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+COMMIT;
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug = '';
+CALL mtr.add_suppression("WSREP: Action message in non-primary configuration from member");
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-810C.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-810C.result
new file mode 100644
index 00000000000..1a6dcbfd392
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-810C.result
@@ -0,0 +1,177 @@
+SET GLOBAL debug="d,crash_commit_cb_last_fragment_commit_success";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary');
+SELECT 1 FROM t1;
+Got one of the listed errors
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug="d,crash_commit_cb_before_last_fragment_commit";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary');
+SELECT 1 FROM t1;
+Got one of the listed errors
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug="d,crash_apply_cb_after_fragment_removal";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary');
+SELECT 1 FROM t1;
+Got one of the listed errors
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET GLOBAL debug="d,crash_apply_cb_before_fragment_removal";
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('secondary'),('secondary'),('secondary'),('secondary'),('secondary');
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary'),('primary');
+SELECT 1 FROM t1;
+Got one of the listed errors
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) > 0 FROM t1 WHERE f1 = 'primary';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f1 = 'secondary';
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-817.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-817.result
new file mode 100644
index 00000000000..4eb0ebca4f4
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-817.result
@@ -0,0 +1,54 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
+SET SESSION wsrep_on = OFF;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+connection node_3;
+connection node_1a;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
+connection node_3;
+connection node_1a;
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1a;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/GCF-832.result b/mysql-test/suite/galera_3nodes_sr/r/GCF-832.result
new file mode 100644
index 00000000000..8333fff98db
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/GCF-832.result
@@ -0,0 +1,25 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_2;
+SET GLOBAL debug_dbug="d,crash_last_fragment_commit_after_fragment_removal";
+CREATE TABLE t1 (f1 VARCHAR(30)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+COMMIT;
+ERROR HY000: Lost connection to MySQL server during query
+# restart
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_isolate_master.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_isolate_master.result
new file mode 100644
index 00000000000..bb4eb829abc
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_isolate_master.result
@@ -0,0 +1,80 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM t1;
+COUNT(*) > 0
+1
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+connection node_2;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_2;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_3;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+connection node_2;
+connection node_3;
+connection node_1a;
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_3;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+CALL mtr.add_suppression("failed to send SR rollback for");
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_join_slave.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_join_slave.result
new file mode 100644
index 00000000000..dbdd95f837a
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_join_slave.result
@@ -0,0 +1,40 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_3;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+# restart
+connection node_1;
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+COMMIT;
+SELECT COUNT(*) = 10 FROM t1;
+COUNT(*) = 10
+1
+connection node_2;
+SELECT COUNT(*) = 10 FROM t1;
+COUNT(*) = 10
+1
+connection node_3;
+SELECT COUNT(*) = 10 FROM t1;
+COUNT(*) = 10
+1
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_master.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_master.result
new file mode 100644
index 00000000000..4547d06dcf9
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_master.result
@@ -0,0 +1,34 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_3;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_2;
+Killing server ...
+connection node_3;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (1);
+connection node_2;
+# restart
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply.result
new file mode 100644
index 00000000000..db04c24b5b1
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply.result
@@ -0,0 +1,54 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+Killing server ...
+connection node_1;
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+connection node_2;
+# restart
+connection node_1;
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 15 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 15
+1
+connection node_1;
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 15 FROM t1;
+COUNT(*) = 15
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+connection node_3;
+CALL mtr.add_suppression("WSREP: Action message in non-primary configuration from member");
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback.result
new file mode 100644
index 00000000000..ff9215cf31f
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback.result
@@ -0,0 +1,59 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+Killing server ...
+connection node_1;
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+connection node_2;
+# restart
+connection node_1;
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 15 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 15
+1
+connection node_1;
+ROLLBACK;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback2.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback2.result
new file mode 100644
index 00000000000..21e301ed353
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_after_apply_rollback2.result
@@ -0,0 +1,31 @@
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+Killing server ...
+INSERT INTO t1 VALUES (6);
+ROLLBACK;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_before_apply.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_before_apply.result
new file mode 100644
index 00000000000..e9dc5518e96
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_kill_slave_before_apply.result
@@ -0,0 +1,45 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+CREATE TABLE t2 (f1 INTEGER);
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+LOCK TABLE t2 WRITE;
+connection node_1;
+INSERT INTO t2 VALUES (1);
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+Killing server ...
+# restart
+connection node_1;
+COMMIT;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split.result
new file mode 100644
index 00000000000..1a50bace279
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split.result
@@ -0,0 +1,117 @@
+connection node_2;
+connection node_1;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (200);
+INSERT INTO t1 VALUES (201);
+INSERT INTO t1 VALUES (202);
+INSERT INTO t1 VALUES (203);
+INSERT INTO t1 VALUES (204);
+connection node_3;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (300);
+INSERT INTO t1 VALUES (301);
+INSERT INTO t1 VALUES (302);
+INSERT INTO t1 VALUES (303);
+INSERT INTO t1 VALUES (304);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connect node_3a, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1a;
+connection node_2a;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+connection node_3a;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+connection node_1a;
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+INSERT INTO t1 VALUES (23);
+INSERT INTO t1 VALUES (24);
+connection node_2a;
+SET SESSION wsrep_on = ON;
+SET SESSION wsrep_sync_wait = 15;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+connection node_3a;
+SET SESSION wsrep_on = ON;
+SET SESSION wsrep_sync_wait = 15;
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+connection node_1a;
+connection node_2a;
+connection node_3a;
+connection node_2;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+wsrep_gcomm_uuid_match
+1
+connection node_3;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+COUNT(DISTINCT node_uuid) = 1
+1
+wsrep_gcomm_uuid_match
+1
+connection node_1;
+INSERT INTO t1 VALUES (30);
+INSERT INTO t1 VALUES (31);
+INSERT INTO t1 VALUES (32);
+INSERT INTO t1 VALUES (33);
+INSERT INTO t1 VALUES (34);
+COMMIT;
+SELECT COUNT(*) = 15, MIN(f1) = 10, MAX(f1) = 34 FROM t1;
+COUNT(*) = 15 MIN(f1) = 10 MAX(f1) = 34
+1 1 1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+connection node_2;
+COMMIT;
+SELECT COUNT(*) = 15, MIN(f1) = 10, MAX(f1) = 34 FROM t1;
+COUNT(*) = 15 MIN(f1) = 10 MAX(f1) = 34
+1 1 1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+connection node_3;
+COMMIT;
+SELECT COUNT(*) = 15, MIN(f1) = 10, MAX(f1) = 34 FROM t1;
+COUNT(*) = 15 MIN(f1) = 10 MAX(f1) = 34
+1 1 1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=ON;
+DROP TABLE t1;
+connection node_1;
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
+connection node_2;
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
+connection node_3;
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
diff --git a/mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split_no_primary.result b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split_no_primary.result
new file mode 100644
index 00000000000..c634bac9239
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/r/galera_sr_threeway_split_no_primary.result
@@ -0,0 +1,85 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connect node_3a, 127.0.0.1, root, , test, $NODE_MYPORT_3;
+connection node_1;
+connection node_2;
+connection node_3;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+connection node_2;
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+connection node_3;
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (31);
+INSERT INTO t1 VALUES (32);
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
+connection node_3a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SHOW STATUS LIKE 'wsrep_cluster_status';
+Variable_name Value
+wsrep_cluster_status non-Primary
+SET SESSION wsrep_sync_wait = DEFAULT;
+connection node_2a;
+SET SESSION wsrep_sync_wait = 0;
+SHOW STATUS LIKE 'wsrep_cluster_status';
+Variable_name Value
+wsrep_cluster_status non-Primary
+SET SESSION wsrep_sync_wait = DEFAULT;
+connection node_3a;
+SET SESSION wsrep_sync_wait = 0;
+SHOW STATUS LIKE 'wsrep_cluster_status';
+Variable_name Value
+wsrep_cluster_status non-Primary
+SET SESSION wsrep_sync_wait = DEFAULT;
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
+connection node_3a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
+connection node_1a;
+connection node_2a;
+connection node_3a;
+connection node_1a;
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+INSERT INTO t1 VALUES(11);
+INSERT INTO t1 VALUES(21);
+INSERT INTO t1 VALUES(31);
+SELECT * FROM t1;
+f1
+11
+21
+31
+connection node_2a;
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+connection node_3a;
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+connection node_1;
+INSERT INTO t1 VALUES(103);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
+INSERT INTO t1 VALUES(203);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_3;
+INSERT INTO t1 VALUES(303);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-336.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-336.test
new file mode 100644
index 00000000000..b8d46db74f1
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-336.test
@@ -0,0 +1,47 @@
+--source include/galera_cluster.inc
+
+--connection node_2
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2a
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM t1;
+
+--connection node_2a
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
+SET SESSION wsrep_sync_wait=0;
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (2);
+--error ER_UNKNOWN_COM_ERROR
+COMMIT;
+
+--connection node_2a
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+--source include/wait_condition.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
+--source include/wait_condition.inc
+DROP TABLE t1;
+
+CALL mtr.add_suppression("replication aborted");
+CALL mtr.add_suppression("WSREP: fragment replication failed: 3");
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_3
+--source include/galera_wait_ready.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-582.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-582.test
new file mode 100644
index 00000000000..bf19ea84c87
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-582.test
@@ -0,0 +1,39 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+--connection node_2
+--source include/shutdown_mysqld.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--source include/start_mysqld.inc
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COMMIT;
+SELECT COUNT(*) FROM t1;
+
+--connection node_2
+SELECT COUNT(*) FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-606.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-606.test
new file mode 100644
index 00000000000..6d49247ab5e
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-606.test
@@ -0,0 +1,80 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the case where the cluster splits 3 ways.
+# The master transitions to a non-prim view and back to prim. Its ongoing
+# should fail to commit.
+#
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+--connection node_2
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+INSERT INTO t1 VALUES (23);
+INSERT INTO t1 VALUES (24);
+
+--connection node_1
+--let $wait_condition = SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+#
+# Isolate node_2 into a separate non-primary component
+#
+
+--connection node_2a
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+--disable_query_log
+SET WSREP_ON=OFF;
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+SET WSREP_ON=ON;
+--enable_query_log
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+#
+# Confirm that node_1 has no transactions in SR table
+#
+
+--let $wait_condition = SELECT COUNT(DISTINCT node_uuid) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+#
+# Restore cluster
+#
+
+--connection node_2a
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+--source include/galera_wait_ready.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_1
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+COMMIT;
+SELECT * FROM t1;
+COMMIT;
+
+--connection node_1
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--connection node_2
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-609.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-609.test
new file mode 100644
index 00000000000..fd346cf365b
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-609.test
@@ -0,0 +1,30 @@
+#
+# GCF-609 SR: Assertion wsrep_apply_cb on slave after master causes a duplicate key error
+#
+
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+
+--connection node_2
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+INSERT INTO t1 VALUES (31),(32),(33);
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COMMIT;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-810A.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-810A.test
new file mode 100644
index 00000000000..fda16b7265c
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-810A.test
@@ -0,0 +1,137 @@
+#
+# Exercise the crash points which crash the server at various points important to SR
+#
+
+--source include/big_test.inc
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+--connect node_2_check, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connect node_3_check, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+#
+# crash_last_fragment_commit_before_fragment_removal
+#
+
+--connection node_2
+--enable_reconnect
+SET GLOBAL debug_dbug="d,crash_last_fragment_commit_before_fragment_removal";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes.inc
+
+--echo crash_last_fragment_commit_before_fragment_removal
+
+--connection node_2
+--error 2006,2013
+COMMIT;
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_2
+--source include/start_mysqld.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check.inc
+
+#
+# crash_last_fragment_commit_after_fragment_removal
+#
+
+--connection node_2
+SET GLOBAL debug_dbug="d,crash_last_fragment_commit_after_fragment_removal";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes.inc
+
+--echo crash_last_fragment_commit_after_fragment_removal
+
+--connection node_2
+--error 2006,2013
+COMMIT;
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE variable_name = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_2
+--source include/start_mysqld.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check.inc
+
+#
+# crash_last_fragment_commit_success
+#
+# Case crash_last_fragment_commit_success is commented out,
+# the changes will be visible on slave due to succesful commit,
+# so the galera_sr_crash_post_check will fail.
+#
+
+# --connection node_2
+# SET GLOBAL debug_dbug="d,crash_last_fragment_commit_success";
+# --source suite/galera_3nodes/include/galera_expect_node_crash.inc
+# --source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes.inc
+
+# --echo crash_last_fragment_commit_success
+
+# --connection node_2
+# --error 2006,2013
+# COMMIT;
+
+# --source include/start_mysqld.inc
+# --source suite/galera_3nodes/include/galera_sr_crash_post_check.inc
+
+#
+# crash_replicate_fragment_success
+#
+
+--connection node_2
+SET GLOBAL debug_dbug="d,crash_replicate_fragment_success";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes.inc
+
+--echo crash_replicate_fragment_success
+
+--connection node_2
+--error 2006,2013
+COMMIT;
+
+--source include/start_mysqld.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check.inc
+
+#
+# crash_replicate_fragment_after_certify
+#
+
+--connection node_2
+SET GLOBAL debug_dbug="d,crash_replicate_fragment_after_certify";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes.inc
+
+--echo crash_replicate_fragment_after_certify
+
+--connection node_2
+--error 2006,2013
+COMMIT;
+
+--source include/start_mysqld.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check.inc
+
+#
+# crash_replicate_fragment_before_certify
+#
+
+--connection node_2
+SET GLOBAL debug_dbug="d,crash_replicate_fragment_before_certify";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes.inc
+
+--echo crash_replicate_fragment_before_certify
+
+--connection node_2
+--error 2006,2013
+COMMIT;
+
+--source include/start_mysqld.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check.inc
+
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-810B.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-810B.test
new file mode 100644
index 00000000000..cb41b2348de
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-810B.test
@@ -0,0 +1,49 @@
+#
+# Exercise the crash points which crash the server at various points important to SR
+#
+
+--source include/big_test.inc
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+--connect node_2_check, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connect node_3_check, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+#
+# crash_apply_cb_before_append_frag
+#
+
+--connection node_3
+SET GLOBAL debug_dbug="d,crash_apply_cb_before_append_frag";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes2.inc
+
+--connection node_3
+--error 0,2006,2013
+COMMIT;
+
+--source include/start_mysqld.inc
+--sleep 5
+--source suite/galera_3nodes/include/galera_sr_crash_post_check2.inc
+
+#
+# crash_apply_cb_after_append_frag
+#
+
+--connection node_3
+SET GLOBAL debug_dbug="d,crash_apply_cb_after_append_frag";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes2.inc
+
+--connection node_3
+--error 0,2006,2013
+COMMIT;
+
+--source include/start_mysqld.inc
+--sleep 5
+--source suite/galera_3nodes/include/galera_sr_crash_post_check2.inc
+
+--connection node_1
+CALL mtr.add_suppression("WSREP: Action message in non-primary configuration from member");
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-810C.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-810C.test
new file mode 100644
index 00000000000..6c4da2cb5f0
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-810C.test
@@ -0,0 +1,70 @@
+#
+# Exercise the crash points which crash the server at various points important to SR
+#
+
+--source include/big_test.inc
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+--connect node_2_check, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--enable_reconnect
+--connect node_3_check, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+#
+# crash_commit_cb_last_fragment_commit_success
+#
+
+--connection node_3
+SET GLOBAL debug_dbug="d,crash_commit_cb_last_fragment_commit_success";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes3.inc
+
+--source include/start_mysqld.inc
+--sleep 5
+--source include/galera_wait_ready.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check3.inc
+
+#
+# crash_commit_cb_before_last_fragment_commit
+#
+
+--connection node_3
+SET GLOBAL debug_dbug="d,crash_commit_cb_before_last_fragment_commit";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes3.inc
+
+--source include/start_mysqld.inc
+--sleep 5
+--source include/galera_wait_ready.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check3.inc
+
+#
+# crash_apply_cb_after_fragment_removal
+#
+
+--connection node_3
+SET GLOBAL debug_dbug="d,crash_apply_cb_after_fragment_removal";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes3.inc
+
+--source include/start_mysqld.inc
+--sleep 5
+--source include/galera_wait_ready.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check3.inc
+
+#
+# crash_apply_cb_before_fragment_removal
+#
+
+--connection node_3
+SET GLOBAL debug_dbug="d,crash_apply_cb_before_fragment_removal";
+--source suite/galera_3nodes/include/galera_expect_node_crash.inc
+--source suite/galera_3nodes/include/galera_sr_crash_prepare_nodes3.inc
+
+--source include/start_mysqld.inc
+--sleep 5
+--source include/galera_wait_ready.inc
+--source suite/galera_3nodes/include/galera_sr_crash_post_check3.inc
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-817.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-817.test
new file mode 100644
index 00000000000..a32da959429
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-817.test
@@ -0,0 +1,109 @@
+#
+# GCF-817 SR: master removes SR trx in non-primary view
+#
+
+--source include/galera_cluster.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+--connection node_1a
+# Force node #1 to go non-primary
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=1';
+
+SET SESSION wsrep_on = OFF;
+--let $wait_condition = SELECT VARIABLE_VALUE = 'non-Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+--source include/wait_condition.inc
+
+# SR table on master should still contain entries after going non-Prim
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+# SR table on slave should eventually clean up entries when master goes non-Prim
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+--connection node_3
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+# Restore node #1 to primary
+
+--connection node_1a
+#
+# The following sleep is a workaround for issue GCF-861.
+# Normally it's sufficient to make sure that the CC happened
+# by checking that wsrep_cluster_size has shrinked, as above.
+# However that is not always enough, so we sleep a few seconds.
+# See GCF-861 on how to reproduce.
+#
+
+--connection node_1a
+--sleep 6
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate=0';
+
+--connection node_3
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = 'Primary' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status';
+--source include/wait_condition.inc
+
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+--source include/wait_condition.inc
+
+--source include/galera_wait_ready.inc
+
+# SR table on master should contain no entries after going back to Prim state
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+--connection node_2
+# And none on slave
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+# SR table is now empty everywhere
+--connection node_1a
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+DROP TABLE t1;
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test b/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test
new file mode 100644
index 00000000000..eb7f5603452
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/GCF-832.test
@@ -0,0 +1,43 @@
+#
+# GCF-832 SR: mysql.wsrep_streaming_log table remains populated on all nodes after crash
+# followed by immediate recovery
+#
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_2
+SET GLOBAL debug_dbug="d,crash_last_fragment_commit_after_fragment_removal";
+
+--let $_server_id= `SELECT @@server_id`
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.$_server_id.expect
+--exec echo "wait" > $_expect_file_name
+
+CREATE TABLE t1 (f1 VARCHAR(30)) ENGINE=InnoDB;
+
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES ('primary'),('primary'),('primary'),('primary'),('primary');
+--error 2013
+COMMIT;
+
+--source include/start_mysqld.inc
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--enable_reconnect
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_isolate_master.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_isolate_master.test
new file mode 100644
index 00000000000..30fd0192f26
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_isolate_master.test
@@ -0,0 +1,127 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of gmcast.isolate on master during an SR transaction
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM t1;
+
+#
+# Trigger gmcast.isolate=1 .
+# The transaction is aborted and we expect the SR tables to be cleaned up
+#
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+
+--connection node_2
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
+
+#
+# Expect that the transaction is cleaned up entirely across the cluster and in all mysql.wsrep_streaming_log tables
+#
+
+--connection node_2
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_3
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+#
+# Restore cluster
+#
+
+--connection node_1a
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+
+--connection node_2
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_3
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_1a
+--source include/galera_wait_ready.inc
+
+#
+# Confirm that the previous transaction is gone on Node #1 as well
+#
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+#
+# Confirm that the transaction can be retried
+#
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+SELECT COUNT(*) = 5 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_3
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+SELECT COUNT(*) = 5 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+CALL mtr.add_suppression("failed to send SR rollback for");
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_join_slave.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_join_slave.test
new file mode 100644
index 00000000000..95aa1a37a78
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_join_slave.test
@@ -0,0 +1,59 @@
+#
+# This test kills the slave before a Streaming Replication transaction has started
+# and restarts it when the transaction is already in progress. IST should
+# bring the slave up to date so that it can receive the complete transaction.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connection node_1
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+--connection node_2
+--source include/shutdown_mysqld.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--source include/start_mysqld.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+COMMIT;
+SELECT COUNT(*) = 10 FROM t1;
+
+--connection node_2
+SELECT COUNT(*) = 10 FROM t1;
+
+--connection node_3
+SELECT COUNT(*) = 10 FROM t1;
+
+--connection node_1
+DROP TABLE t1;
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_master.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_master.test
new file mode 100644
index 00000000000..c7e7528679b
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_master.test
@@ -0,0 +1,58 @@
+#
+# This test kills the master while a Streaming Replication transaction is in progress
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_2
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_3
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_2
+--source include/kill_galera.inc
+
+--connection node_3
+# We expect that uncommitted values are no longer present
+
+--let $wait_condition = SELECT COUNT(*) = 0 FROM t1;
+--source include/wait_condition.inc
+
+--let $wait_condition = SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# and we can insert them again
+INSERT INTO t1 VALUES (1);
+
+--connection node_2
+--source include/start_mysqld.inc
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 1 FROM t1;
+
+DROP TABLE t1;
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply.test
new file mode 100644
index 00000000000..270af538085
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply.test
@@ -0,0 +1,81 @@
+#
+# This test kills the slave while a Streaming Replication transaction is in progress
+# and after a fragment has already been applied on the slave. It is expected that
+# after the slave restarts, the cluster will continue to be consistent
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--source include/kill_galera.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+
+--connection node_2
+--source include/start_mysqld.inc
+
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+
+--connection node_2
+
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) = 15 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 15 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+COMMIT;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 15 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+
+--connection node_3
+CALL mtr.add_suppression("WSREP: Action message in non-primary configuration from member");
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback.test
new file mode 100644
index 00000000000..c0df6b2777a
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback.test
@@ -0,0 +1,80 @@
+#
+# This test kills the slave while a Streaming Replication transaction is in progress
+# and after a fragment has already been applied on the slave. It is expected that
+# after the slave restarts, the cluster will continue to be consistent even if ROLLBACK
+# is issued on the SR transaction after restart.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--source include/kill_galera.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+
+--connection node_2
+--source include/start_mysqld.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+
+--connection node_2
+
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) = 15 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 15 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+ROLLBACK;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback2.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback2.test
new file mode 100644
index 00000000000..83964769ef5
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_after_apply_rollback2.test
@@ -0,0 +1,56 @@
+#
+# This test kills the slave while a Streaming Replication transaction is in progress
+# and after a fragment has already been applied on the slave. It is expected that
+# after the slave restarts, the cluster will continue to be consistent
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--source include/kill_galera.inc
+--sleep 1
+
+--connection node_1
+INSERT INTO t1 VALUES (6);
+ROLLBACK;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--source include/start_mysqld.inc
+--sleep 1
+
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+
+--connection node_2
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_before_apply.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_before_apply.test
new file mode 100644
index 00000000000..92566fa6323
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_kill_slave_before_apply.test
@@ -0,0 +1,73 @@
+#
+# This test kills the slave while a Streaming Replication transaction is in progress
+# but before a fragment has already been applied on the slave. It is expected that
+# after the slave restarts, the cluster will continue to be consistent.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+
+# Block node #2's applier before table t1's inserts have come into play
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+CREATE TABLE t2 (f1 INTEGER);
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+LOCK TABLE t2 WRITE;
+
+--connection node_1
+INSERT INTO t2 VALUES (1);
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--source include/kill_galera.inc
+--source include/start_mysqld.inc
+
+# Expect that the SR table will get some entries after the restart
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+--connection node_1
+COMMIT;
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+--let $wait_condition = SELECT COUNT(*) = 5 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+DROP TABLE t1;
+DROP TABLE t2;
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.cnf b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.cnf
new file mode 100644
index 00000000000..910d945949a
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.cnf
@@ -0,0 +1,5 @@
+!include ../galera_3nodes.cnf
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;pc.weight=3'
+
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.test
new file mode 100644
index 00000000000..62122fe4292
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split.test
@@ -0,0 +1,177 @@
+#
+# Test the case where the cluster splits 3 ways . The master remains in the
+# primary component and is able to commit its transaction.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+#
+# Begin a separate SR transaction on every node and confirm that each node
+# has SR table entries for every transaction
+#
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (200);
+INSERT INTO t1 VALUES (201);
+INSERT INTO t1 VALUES (202);
+INSERT INTO t1 VALUES (203);
+INSERT INTO t1 VALUES (204);
+
+--connection node_3
+--let $wait_condition = SELECT COUNT(DISTINCT node_uuid) = 2 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (300);
+INSERT INTO t1 VALUES (301);
+INSERT INTO t1 VALUES (302);
+INSERT INTO t1 VALUES (303);
+INSERT INTO t1 VALUES (304);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connect node_3a, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+--connection node_1a
+--let $wait_condition = SELECT COUNT(DISTINCT node_uuid) = 3 FROM mysql.wsrep_streaming_log
+--source include/wait_condition.inc
+
+#
+# Isolate nodes #2 and #3 into separate non-primary components
+#
+
+--connection node_2a
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+
+--connection node_3a
+SET GLOBAL wsrep_provider_options='gmcast.isolate=1';
+
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+INSERT INTO t1 VALUES (23);
+INSERT INTO t1 VALUES (24);
+
+#
+# Restore cluster
+#
+
+--connection node_2a
+--source include/wsrep_wait_disconnect.inc
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+--source include/galera_wait_ready.inc
+
+--connection node_3a
+--source include/wsrep_wait_disconnect.inc
+SET GLOBAL wsrep_provider_options='gmcast.isolate=0';
+--source include/galera_wait_ready.inc
+
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+--let $node_1_gcomm_uuid = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_gcomm_uuid'`
+
+--connection node_2a
+--source include/wait_condition.inc
+
+--connection node_3a
+--source include/wait_condition.inc
+
+
+#
+# Confirm that the rejoined nodes only have node #1's transaction in their SR tables
+#
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+COMMIT;
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+--disable_query_log
+--eval SELECT DISTINCT node_uuid = '$node_1_gcomm_uuid' AS wsrep_gcomm_uuid_match FROM mysql.wsrep_streaming_log;
+--enable_query_log
+
+--connection node_3
+--error ER_LOCK_DEADLOCK
+COMMIT;
+SELECT COUNT(DISTINCT node_uuid) = 1 FROM mysql.wsrep_streaming_log;
+--disable_query_log
+--eval SELECT DISTINCT node_uuid = '$node_1_gcomm_uuid' AS wsrep_gcomm_uuid_match FROM mysql.wsrep_streaming_log;
+--enable_query_log
+
+#
+# Finalize transaction on node #1
+#
+
+--connection node_1
+INSERT INTO t1 VALUES (30);
+INSERT INTO t1 VALUES (31);
+INSERT INTO t1 VALUES (32);
+INSERT INTO t1 VALUES (33);
+INSERT INTO t1 VALUES (34);
+COMMIT;
+
+#
+# Confirm that transaction is replicated correctly and SR tables are empty at the end of the test
+#
+
+SELECT COUNT(*) = 15, MIN(f1) = 10, MAX(f1) = 34 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SET AUTOCOMMIT=ON;
+
+--connection node_2
+COMMIT;
+SELECT COUNT(*) = 15, MIN(f1) = 10, MAX(f1) = 34 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SET AUTOCOMMIT=ON;
+
+--connection node_3
+COMMIT;
+SELECT COUNT(*) = 15, MIN(f1) = 10, MAX(f1) = 34 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SET AUTOCOMMIT=ON;
+
+DROP TABLE t1;
+
+--connection node_1
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
+--connection node_2
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
+--connection node_3
+CALL mtr.add_suppression("WSREP: failed to send SR rollback for ");
+
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split_no_primary.test b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split_no_primary.test
new file mode 100644
index 00000000000..f9aab8316f1
--- /dev/null
+++ b/mysql-test/suite/galera_3nodes_sr/t/galera_sr_threeway_split_no_primary.test
@@ -0,0 +1,126 @@
+#
+# This test verifies that an orphaned SR gets cleanup upon cluster
+# reconnection. Specifically, the case where the cluster goes through
+# a state of no primary components, and the nodes rejoin with the
+# same IDs.
+#
+
+--source include/galera_cluster.inc
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3
+--connect node_3a, 127.0.0.1, root, , test, $NODE_MYPORT_3
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--let $node_3=node_3
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+
+--connection node_2
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+
+--connection node_3
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (31);
+INSERT INTO t1 VALUES (32);
+
+--connection node_2a
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
+
+--connection node_3a
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
+
+--connection node_1a
+# wait until cluster is partitioned
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+SHOW STATUS LIKE 'wsrep_cluster_status';
+SET SESSION wsrep_sync_wait = DEFAULT;
+
+--connection node_2a
+# wait until cluster is partitioned
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+SHOW STATUS LIKE 'wsrep_cluster_status';
+SET SESSION wsrep_sync_wait = DEFAULT;
+
+--connection node_3a
+# wait until cluster is partitioned
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+SHOW STATUS LIKE 'wsrep_cluster_status';
+SET SESSION wsrep_sync_wait = DEFAULT;
+
+--connection node_2a
+# reconnect node 2
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
+
+--connection node_3a
+# reconnect node 3
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
+
+# wait for the cluster to whole again
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
+
+--connection node_2a
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
+
+--connection node_3a
+--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+--source include/galera_wait_ready.inc
+
+--connection node_1a
+# check that the streaming log has been cleared and there are no locks
+# from the SRs by issuing conflicting inserts
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+INSERT INTO t1 VALUES(11);
+INSERT INTO t1 VALUES(21);
+INSERT INTO t1 VALUES(31);
+SELECT * FROM t1;
+
+--connection node_2a
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+--connection node_3a
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+
+
+# check that all 3 SRs have been rolled back
+--connection node_1
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES(103);
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES(203);
+
+--connection node_3
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES(303);
+
+--connection node_1
+DROP TABLE t1;
+
+# Restore original auto_increment_offset values.
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_sr/disabled.def b/mysql-test/suite/galera_sr/disabled.def
new file mode 100644
index 00000000000..b2cc0189773
--- /dev/null
+++ b/mysql-test/suite/galera_sr/disabled.def
@@ -0,0 +1,2 @@
+galera_sr_table_contents : missing file
+GCF-437 : test relies on InnoDB redo log size limtation
diff --git a/mysql-test/suite/galera_sr/galera_2nodes.cnf b/mysql-test/suite/galera_sr/galera_2nodes.cnf
new file mode 100644
index 00000000000..0412b5654dd
--- /dev/null
+++ b/mysql-test/suite/galera_sr/galera_2nodes.cnf
@@ -0,0 +1 @@
+!include ../galera/galera_2nodes.cnf
diff --git a/mysql-test/suite/galera_sr/my.cnf b/mysql-test/suite/galera_sr/my.cnf
new file mode 100644
index 00000000000..ca163a540d9
--- /dev/null
+++ b/mysql-test/suite/galera_sr/my.cnf
@@ -0,0 +1 @@
+!include galera_2nodes.cnf
diff --git a/mysql-test/suite/galera_sr/r/GCF-1008.result b/mysql-test/suite/galera_sr/r/GCF-1008.result
new file mode 100644
index 00000000000..541ac3cddfb
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1008.result
@@ -0,0 +1,70 @@
+connection node_2;
+connection node_1;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(255)) ENGINE=InnoDB;
+connection node_2;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'x');
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,before_local_commit_monitor_enter';
+connection node_2;
+COMMIT;
+connection node_2b;
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+REPLACE INTO t1 VALUES (1,'y');
+connection node_2b;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'signal=before_local_commit_monitor_enter';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(255)) ENGINE=InnoDB;
+connection node_2;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'x');
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,before_certify_apply_monitor_enter';
+connection node_2;
+COMMIT;
+connection node_2b;
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+REPLACE INTO t1 VALUES (1,'y');
+connection node_2b;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'signal=before_certify_apply_monitor_enter';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-1018.result b/mysql-test/suite/galera_sr/r/GCF-1018.result
new file mode 100644
index 00000000000..12a4d3c70af
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1018.result
@@ -0,0 +1,25 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+INSERT INTO t1 (f2) VALUES ('a');
+INSERT INTO t1 (f2) VALUES ('b');
+INSERT INTO t1 (f2) VALUES ('c');
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2a;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_certify_apply_monitor_enter';
+connection node_2;
+SET SESSION wsrep_retry_autocommit = 0;
+SET SESSION wsrep_trx_fragment_size = 64;
+DELETE FROM t1 ORDER BY f1 DESC LIMIT 2;;
+connection node_2a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+connection node_1;
+INSERT INTO t1 (f2) VALUES ('d'),('e');
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'signal=after_certify_apply_monitor_enter';
+SET GLOBAL wsrep_provider_options = 'dbug=';
+connection node_2;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-1018B.result b/mysql-test/suite/galera_sr/r/GCF-1018B.result
new file mode 100644
index 00000000000..4752c072cc1
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1018B.result
@@ -0,0 +1,12 @@
+connection node_2;
+connection node_1;
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 64;
+SET SESSION innodb_lock_wait_timeout = 1000;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 64;
+SET SESSION innodb_lock_wait_timeout = 1000;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-1043A.result b/mysql-test/suite/galera_sr/r/GCF-1043A.result
new file mode 100644
index 00000000000..cc90461291d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1043A.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+Running a concurrent test with the following queries:
+DELETE FROM t1
+REPLACE INTO t1 VALUES (1,'y'),(2,'x')
+REPLACE INTO t1 VALUES (1,'y'),(2,'y'),(3,'y')
+connection node_1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+include/diff_servers.inc [servers=1 2]
+DROP TABLE t1;
+Concurrent test end
diff --git a/mysql-test/suite/galera_sr/r/GCF-1043B.result b/mysql-test/suite/galera_sr/r/GCF-1043B.result
new file mode 100644
index 00000000000..a10295c00b9
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1043B.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+Running a concurrent test with the following queries:
+DELETE FROM t1
+INSERT INTO t1 VALUES (1,'y'),(2,'x')
+UPDATE t1 SET f2 = 'y' WHERE f1 = 1 OR f1 = 2;
+connection node_1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+include/diff_servers.inc [servers=1 2]
+DROP TABLE t1;
+Concurrent test end
diff --git a/mysql-test/suite/galera_sr/r/GCF-1051.result b/mysql-test/suite/galera_sr/r/GCF-1051.result
new file mode 100644
index 00000000000..82fa389bb1d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1051.result
@@ -0,0 +1,46 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size=1;
+connection node_1;
+START TRANSACTION;
+SAVEPOINT A;
+INSERT INTO t1 VALUES (1);
+ROLLBACK TO SAVEPOINT A;
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=OFF;
+SAVEPOINT A;
+INSERT INTO t1 VALUES (2);
+ROLLBACK TO SAVEPOINT A;
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-1060.result b/mysql-test/suite/galera_sr/r/GCF-1060.result
new file mode 100644
index 00000000000..58af97d064b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-1060.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+Running a concurrent test with the following queries:
+TRUNCATE TABLE t1
+INSERT INTO t1 VALUE (1,'x'),(2,'x'),(3,'x')
+INSERT INTO t1 VALUE (4, 'z');
+connection node_1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;;;
+include/diff_servers.inc [servers=1 2]
+DROP TABLE t1;
+Concurrent test end
diff --git a/mysql-test/suite/galera_sr/r/GCF-437.result b/mysql-test/suite/galera_sr/r/GCF-437.result
new file mode 100644
index 00000000000..1aa0c9c0768
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-437.result
@@ -0,0 +1,12 @@
+CREATE TABLE ten (f1 INTEGER) ENGINE=MyISAM;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(512)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 2 * 1024 * 1024;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 512) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
+ERROR HY000: Writing one row to the row-based binary log failed
+CALL mtr.add_suppression("InnoDB: The total blob data length*");
+CALL mtr.add_suppression("WSREP: Error writing into mysql.wsrep_streaming_log: 139");
+CALL mtr.add_suppression("WSREP: Failed to write to frag table: 1");
+CALL mtr.add_suppression("WSREP: Failed to append frag to persistent storage");
+DROP TABLE t1;
+DROP table ten;
diff --git a/mysql-test/suite/galera_sr/r/GCF-561.result b/mysql-test/suite/galera_sr/r/GCF-561.result
new file mode 100644
index 00000000000..58663caf134
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-561.result
@@ -0,0 +1,50 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+ALTER TABLE t1 DROP COLUMN f2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+INSERT INTO t1 VALUES (6, 6);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (6, 6);
+ERROR 21S01: Column count doesn't match value count at row 1
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-571.result b/mysql-test/suite/galera_sr/r/GCF-571.result
new file mode 100644
index 00000000000..4b4f749d910
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-571.result
@@ -0,0 +1,67 @@
+connection node_2;
+connection node_1;
+SET AUTOCOMMIT=OFF;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('%abcdef%');
+INSERT INTO t1 VALUES ('%abcdef%');
+INSERT INTO t1 VALUES ('%abcdef%');
+INSERT INTO t1 VALUES ('%abcdef%');
+SAVEPOINT A;
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%abcdef%';
+COUNT(*) > 0
+1
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%xyz%';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%SAVEPOINT `A`%';
+COUNT(*) = 1
+0
+connection node_1;
+ROLLBACK TO SAVEPOINT A;
+connection node_1a;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%abcdef%';
+COUNT(*) > 0
+1
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%xyz%';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%SAVEPOINT `A`%';
+COUNT(*) = 1
+0
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%ROLLBACK TO `A`%';
+COUNT(*) = 1
+0
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%abcdef%';
+COUNT(*) > 0
+1
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%xyz%';
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%SAVEPOINT `A`%';
+COUNT(*) = 1
+0
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%ROLLBACK TO `A`%';
+COUNT(*) = 1
+0
+connection node_1;
+ROLLBACK;
+connection node_1a;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-572.result b/mysql-test/suite/galera_sr/r/GCF-572.result
new file mode 100644
index 00000000000..cb4d48b3600
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-572.result
@@ -0,0 +1,37 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(10)) ENGINE=InnoDB;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'node1');
+connection node_1a;
+INSERT INTO t1 VALUES (5, 'node2');
+connection node_1;
+INSERT INTO t1 VALUES (5, 'node1');
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT * FROM t1;
+f1 f2
+5 node2
+SET SESSION wsrep_trx_fragment_size = 10000;
+START TRANSACTION;
+INSERT INTO t1 VALUE (10, 'node1');
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+connection node_1a;
+INSERT INTO t1 VALUES(15, 'node2');
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+5 node2
+10 node1
+INSERT INTO t1 VALUES(15, 'node1');
+ERROR 23000: Duplicate entry '15' for key 'PRIMARY'
+COMMIT;
+SELECT * FROM t1;
+f1 f2
+5 node2
+10 node1
+15 node2
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-580.result b/mysql-test/suite/galera_sr/r/GCF-580.result
new file mode 100644
index 00000000000..3ee69c6c4b3
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-580.result
@@ -0,0 +1,13 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+last_committed_matches_fragment_count
+1
+COMMIT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-585.result b/mysql-test/suite/galera_sr/r/GCF-585.result
new file mode 100644
index 00000000000..ab5fed59081
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-585.result
@@ -0,0 +1,28 @@
+connection node_2;
+connection node_1;
+create table t1 (f1 integer primary key) engine=innodb;
+set autocommit=off;
+set session wsrep_trx_fragment_size=1;
+start transaction;
+insert into t1 values (1);
+insert into t1 values (2),(1);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+alter table t1 drop primary key;
+drop table t1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+update t1 set f1 = 100 where f1 = 10;
+connection node_2;
+INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+SET SESSION wsrep_trx_fragment_size=1;
+SET SESSION innodb_lock_wait_timeout=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+delete from t1 where f1 > 10;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+delete from t1 where f1 > 10 and f1 < 100;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-597.result b/mysql-test/suite/galera_sr/r/GCF-597.result
new file mode 100644
index 00000000000..7afca229251
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-597.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connection node_1;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+ROLLBACK;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-620.result b/mysql-test/suite/galera_sr/r/GCF-620.result
new file mode 100644
index 00000000000..33789f82add
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-620.result
@@ -0,0 +1,18 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 200;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (1);
+SAVEPOINT A;
+INSERT INTO t1 VALUES (1);
+ROLLBACK TO SAVEPOINT A;
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 2 FROM t1;
+COUNT(*) = 2
+0
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-623.result b/mysql-test/suite/galera_sr/r/GCF-623.result
new file mode 100644
index 00000000000..f3500b7ac2b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-623.result
@@ -0,0 +1,29 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-627.result b/mysql-test/suite/galera_sr/r/GCF-627.result
new file mode 100644
index 00000000000..891cf4af5a9
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-627.result
@@ -0,0 +1,26 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER);
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+connection node_2;
+DROP TABLE t1;
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+INSERT INTO t1 VALUES (2);
+ERROR 42S02: Table 'test.t1' doesn't exist
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/r/GCF-845.result b/mysql-test/suite/galera_sr/r/GCF-845.result
new file mode 100644
index 00000000000..df842049332
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-845.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+CREATE TABLE IF NOT EXISTS t1 (f1 INTEGER) ENGINE = InnoDB;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET SESSION AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (161);
+COMMIT;
+DELETE FROM t1 WHERE f1 > 13;
+disconnect node_1a;
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+include/assert_grep.inc [No BF-BF log line found]
diff --git a/mysql-test/suite/galera_sr/r/GCF-851.result b/mysql-test/suite/galera_sr/r/GCF-851.result
new file mode 100644
index 00000000000..52aa4c78745
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-851.result
@@ -0,0 +1,30 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION WSREP_TRX_FRAGMENT_SIZE=1;
+SET SESSION AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (10);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_1;
+START TRANSACTION;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) > 0 FROM t1;
+COUNT(*) > 0
+1
+connection node_1;
+SELECT COUNT(*) > 0 FROM t1;
+COUNT(*) > 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-867.result b/mysql-test/suite/galera_sr/r/GCF-867.result
new file mode 100644
index 00000000000..9521a86d621
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-867.result
@@ -0,0 +1,4 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-889.result b/mysql-test/suite/galera_sr/r/GCF-889.result
new file mode 100644
index 00000000000..617377de15a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-889.result
@@ -0,0 +1,25 @@
+connection node_2;
+connection node_1;
+connection node_2;
+SET GLOBAL wsrep_ignore_apply_errors = 2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET SESSION wsrep_on = ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+DELETE FROM t1 WHERE f1 = 1;
+SET SESSION wsrep_trx_fragment_size = 0;
+INSERT INTO t1 VALUES (1);
+SELECT COUNT(*) = 1;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT COUNT(*) = 1;
+COUNT(*) = 1
+1
+CALL mtr.add_suppression("Could not execute Delete_rows event on table");
+CALL mtr.add_suppression("Can't find record in 't1'");
+SET GLOBAL wsrep_ignore_apply_errors = 7;
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/GCF-900.result b/mysql-test/suite/galera_sr/r/GCF-900.result
new file mode 100644
index 00000000000..caa2d2c4138
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/GCF-900.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 INT) ENGINE=InnoDB;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 128;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 0);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (2, 0);
+connection node_2;
+ALTER TABLE t1 DROP COLUMN f2;
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1a;
+INSERT INTO t1 VALUES (3, 0);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/MDEV-18585.result b/mysql-test/suite/galera_sr/r/MDEV-18585.result
new file mode 100644
index 00000000000..d30def1ea63
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/MDEV-18585.result
@@ -0,0 +1,36 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_unit='ROWS';
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES (1), (2);
+SET SESSION wsrep_trx_fragment_unit='BYTES';
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES (3), (4);
+SET SESSION wsrep_trx_fragment_unit='STATEMENTS';
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES (5), (6);
+SET SESSION wsrep_trx_fragment_unit=default;
+SET SESSION wsrep_trx_fragment_size=default;
+SHOW BINLOG EVENTS IN 'mysqld-bin.000002' FROM 518;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000002 518 Gtid 1 560 BEGIN GTID 0-1-2
+mysqld-bin.000002 560 Annotate_rows 1 613 INSERT INTO t1 VALUES (1), (2)
+mysqld-bin.000002 613 Table_map 1 658 table_id: # (test.t1)
+mysqld-bin.000002 658 Write_rows_v1 1 696 table_id: # flags: STMT_END_F
+mysqld-bin.000002 696 Table_map 1 741 table_id: # (test.t1)
+mysqld-bin.000002 741 Write_rows_v1 1 779 table_id: # flags: STMT_END_F
+mysqld-bin.000002 779 Xid 1 810 COMMIT /* xid=# */
+mysqld-bin.000002 810 Gtid 1 852 BEGIN GTID 0-1-3
+mysqld-bin.000002 852 Annotate_rows 1 905 INSERT INTO t1 VALUES (3), (4)
+mysqld-bin.000002 905 Table_map 1 950 table_id: # (test.t1)
+mysqld-bin.000002 950 Write_rows_v1 1 988 table_id: # flags: STMT_END_F
+mysqld-bin.000002 988 Table_map 1 1033 table_id: # (test.t1)
+mysqld-bin.000002 1033 Write_rows_v1 1 1071 table_id: # flags: STMT_END_F
+mysqld-bin.000002 1071 Xid 1 1102 COMMIT /* xid=# */
+mysqld-bin.000002 1102 Gtid 1 1144 BEGIN GTID 0-1-4
+mysqld-bin.000002 1144 Annotate_rows 1 1197 INSERT INTO t1 VALUES (5), (6)
+mysqld-bin.000002 1197 Table_map 1 1242 table_id: # (test.t1)
+mysqld-bin.000002 1242 Write_rows_v1 1 1285 table_id: # flags: STMT_END_F
+mysqld-bin.000002 1285 Xid 1 1316 COMMIT /* xid=# */
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera-features#56.result b/mysql-test/suite/galera_sr/r/galera-features#56.result
new file mode 100644
index 00000000000..1d04b6f9cee
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera-features#56.result
@@ -0,0 +1,32 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+connection node_1;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+connection node_2;
+SET GLOBAL wsrep_slave_threads = 4;
+SET SESSION wsrep_trx_fragment_size = 1;
+connection node_1;
+INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
+connection node_1a;
+INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
+connection node_2;
+INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;;
+connection node_1;
+connection node_1a;
+connection node_2;
+SELECT COUNT(*) = 30000 FROM t1;
+COUNT(*) = 30000
+1
+SELECT COUNT(DISTINCT f1) = 30000 FROM t1;
+COUNT(DISTINCT f1) = 30000
+1
+SELECT COUNT(*) = 6 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
+COUNT(*) = 6
+0
+connection default;
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_bf_abort.result b/mysql-test/suite/galera_sr/r/galera_sr_bf_abort.result
new file mode 100644
index 00000000000..bf92a48b242
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_bf_abort.result
@@ -0,0 +1,555 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET SESSION wsrep_sync_wait = 0;
+galera_sr_bf_abort_at_commit = 0
+after_replicate_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+INSERT INTO t1 VALUES (3);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+local_monitor_master_enter_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+INSERT INTO t1 VALUES (3);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+apply_monitor_master_enter_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_master_enter_sync';
+connection node_1;
+INSERT INTO t1 VALUES (3);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_master_enter_sync';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+commit_monitor_master_enter_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+INSERT INTO t1 VALUES (3);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+galera_sr_bf_abort_at_commit = 1
+after_replicate_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync';
+connection node_1;
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+local_monitor_master_enter_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+connection node_1;
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+apply_monitor_master_enter_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_master_enter_sync';
+connection node_1;
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+commit_monitor_master_enter_sync
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+f1
+1
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_master_enter_sync';
+connection node_1;
+ROLLBACK;
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+connection node_1;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+DROP TABLE t1;
+CALL mtr.add_suppression("WSREP: fragment replication failed: 1");
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_blob.result b/mysql-test/suite/galera_sr/r/galera_sr_blob.result
new file mode 100644
index 00000000000..db25f54a611
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_blob.result
@@ -0,0 +1,23 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 TEXT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_2;
+connection node_1;
+INSERT INTO t1 VALUES (REPEAT('x', 65535));
+connection node_2;
+wsrep_last_committed_delta
+1
+connection node_1;
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT LENGTH(f1) = 65535 FROM t1;
+LENGTH(f1) = 65535
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result b/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result
new file mode 100644
index 00000000000..9e223414fe4
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_cc_master.result
@@ -0,0 +1,66 @@
+connection node_2;
+connection node_1;
+CALL mtr.add_suppression("WSREP: discarding established.*");
+connection node_1;
+connection node_2;
+connection node_2;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+5
+connection node_1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+5
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2a;
+SET SESSION wsrep_sync_wait=0;
+SET GLOBAL wsrep_cluster_address = '';
+SET SESSION wsrep_sync_wait = DEFAULT;
+connection node_2;
+INSERT INTO t1 VALUES (6);
+ERROR HY000: Lost connection to MySQL server during query
+connection node_1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+connection node_2a;
+connection node_1;
+connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2b;
+SELECT * FROM mysql.wsrep_streaming_log;
+node_uuid trx_id seqno flags frag
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+connection node_1;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+DROP TABLE t1;
+connection node_2b;
+CALL mtr.add_suppression("WSREP: Failed to replicate rollback fragment for");
+disconnect node_2;
+connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_cc_no_primary.result b/mysql-test/suite/galera_sr/r/galera_sr_cc_no_primary.result
new file mode 100644
index 00000000000..982d8b3aefe
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_cc_no_primary.result
@@ -0,0 +1,70 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_1;
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+connection node_2;
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+SELECT COUNT(*) `expect 6` FROM mysql.wsrep_streaming_log;
+expect 6
+6
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
+SET SESSION wsrep_sync_wait = 0;
+SHOW STATUS LIKE 'wsrep_cluster_size';
+Variable_name Value
+wsrep_cluster_size 1
+SHOW STATUS LIKE 'wsrep_cluster_status';
+Variable_name Value
+wsrep_cluster_status non-Primary
+SET SESSION wsrep_sync_wait = DEFAULT;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SHOW STATUS LIKE 'wsrep_cluster_size';
+Variable_name Value
+wsrep_cluster_size 1
+SHOW STATUS LIKE 'wsrep_cluster_status';
+Variable_name Value
+wsrep_cluster_status non-Primary
+SET SESSION wsrep_sync_wait = DEFAULT;
+connection node_2a;
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
+SHOW STATUS LIKE 'wsrep_cluster_size';
+Variable_name Value
+wsrep_cluster_size 2
+connection node_1a;
+SHOW STATUS LIKE 'wsrep_cluster_size';
+Variable_name Value
+wsrep_cluster_size 2
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+connection node_2a;
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+INSERT INTO t1 VALUES(10);
+INSERT INTO t1 VALUES(20);
+SELECT * FROM t1;
+f1
+10
+20
+connection node_1;
+INSERT INTO t1 VALUES(13);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_2;
+INSERT INTO t1 VALUES(23);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_cc_slave.result b/mysql-test/suite/galera_sr/r/galera_sr_cc_slave.result
new file mode 100644
index 00000000000..d439380e2ce
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_cc_slave.result
@@ -0,0 +1,61 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+SET GLOBAL wsrep_cluster_address = '';
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_sync_wait = default;
+connection node_1;
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_1;
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (16);
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+CALL mtr.add_suppression("points to own listening address, blacklisting");
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_concurrent.result b/mysql-test/suite/galera_sr/r/galera_sr_concurrent.result
new file mode 100644
index 00000000000..75acba366c2
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_concurrent.result
@@ -0,0 +1,36 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t2 VALUES (1);
+INSERT INTO t2 VALUES (2);
+INSERT INTO t2 VALUES (3);
+INSERT INTO t2 VALUES (4);
+INSERT INTO t2 VALUES (5);
+connection node_1;
+COMMIT;
+connection node_1a;
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+SELECT COUNT(*) = 5 FROM t2;
+COUNT(*) = 5
+1
+connection node_1;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_conflict.result b/mysql-test/suite/galera_sr/r/galera_sr_conflict.result
new file mode 100644
index 00000000000..a45bffeaa81
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_conflict.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+INSERT INTO t1 VALUES(1);;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_1;
+COMMIT;
+connection node_2;
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit.result b/mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit.result
new file mode 100644
index 00000000000..5de1ac2422c
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit.result
@@ -0,0 +1,31 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connection node_1;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+COMMIT;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit2.result b/mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit2.result
new file mode 100644
index 00000000000..2ee3d4c714c
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_conflict_on_commit2.result
@@ -0,0 +1,28 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_1;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+INSERT INTO t1 VALUES (5);;
+connection node_1;
+COMMIT;
+connection node_2;
+ERROR 23000: Duplicate entry '5' for key 'PRIMARY'
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_conflict_with_rollback_master.result b/mysql-test/suite/galera_sr/r/galera_sr_conflict_with_rollback_master.result
new file mode 100644
index 00000000000..92bf007e21b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_conflict_with_rollback_master.result
@@ -0,0 +1,29 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connection node_1;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+ROLLBACK;
+connection node_2;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_create_drop.result b/mysql-test/suite/galera_sr/r/galera_sr_create_drop.result
new file mode 100644
index 00000000000..c8658f09ff0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_create_drop.result
@@ -0,0 +1,28 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size=1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ PRIMARY KEY (`f1`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+connection node_1;
+DROP TABLE t1;
+connection node_2;
+SHOW CREATE TABLE t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+CREATE DATABASE mdev_18587;
+connection node_2;
+SHOW DATABASES LIKE 'mdev_18587';
+Database (mdev_18587)
+mdev_18587
+connection node_1;
+DROP DATABASE mdev_18587;
+connection node_2;
+SHOW DATABASES LIKE 'mdev_18587';
+Database (mdev_18587)
+connection node_1;
+SET SESSION wsrep_trx_fragment_size=DEFAULT;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_ddl_master.result b/mysql-test/suite/galera_sr/r/galera_sr_ddl_master.result
new file mode 100644
index 00000000000..cf9c7771bed
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_ddl_master.result
@@ -0,0 +1,48 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+connection node_1;
+INSERT INTO t1 VALUES (6);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+ROLLBACK;
+START TRANSACTION;
+INSERT INTO t1 (f1) VALUES (1);
+INSERT INTO t1 (f1) VALUES (2);
+INSERT INTO t1 (f1) VALUES (3);
+INSERT INTO t1 (f1) VALUES (4);
+INSERT INTO t1 (f1) VALUES (5);
+INSERT INTO t1 (f1) VALUES (6);
+COMMIT;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_ddl_schema.result b/mysql-test/suite/galera_sr/r/galera_sr_ddl_schema.result
new file mode 100644
index 00000000000..fcd6cfa4a6e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_ddl_schema.result
@@ -0,0 +1,23 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+DROP SCHEMA test;
+SELECT COUNT(*) = 0 FROM test.t1;
+ERROR 42S02: Table 'test.t1' doesn't exist
+connection node_1;
+INSERT INTO test.t1 VALUES (6, 6);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+INSERT INTO test.t1 VALUES (6, 6);
+ERROR 42S02: Table 'test.t1' doesn't exist
+CREATE SCHEMA test;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_ddl_slave.result b/mysql-test/suite/galera_sr/r/galera_sr_ddl_slave.result
new file mode 100644
index 00000000000..58663caf134
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_ddl_slave.result
@@ -0,0 +1,50 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+ALTER TABLE t1 DROP COLUMN f2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+INSERT INTO t1 VALUES (6, 6);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (6, 6);
+ERROR 21S01: Column count doesn't match value count at row 1
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_ddl_unrelated.result b/mysql-test/suite/galera_sr/r/galera_sr_ddl_unrelated.result
new file mode 100644
index 00000000000..0f23ade58c1
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_ddl_unrelated.result
@@ -0,0 +1,42 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+ALTER TABLE t2 DROP COLUMN f2;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+connection node_1;
+INSERT INTO t1 VALUES (6, 6);
+connection node_2;
+connection node_1;
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_dupkey_error.result b/mysql-test/suite/galera_sr/r/galera_sr_dupkey_error.result
new file mode 100644
index 00000000000..b23b934da33
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_dupkey_error.result
@@ -0,0 +1,46 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 BLOB) ENGINE=InnoDB;
+CREATE UNIQUE INDEX i1 ON t1 (f1(512));
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1024;
+INSERT INTO t1 VALUES (REPEAT('a', 512));
+INSERT INTO t1 VALUES (REPEAT('b', 512));
+INSERT INTO t1 VALUES (REPEAT('c', 512));
+INSERT INTO t1 VALUES (REPEAT('d', 512));
+INSERT INTO t1 VALUES (REPEAT('e', 512));
+INSERT INTO t1 VALUES (REPEAT('f', 512));
+connection node_2;
+connection node_1;
+INSERT INTO t1 VALUES (REPEAT('c', 512));
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+INSERT INTO t1 VALUES (REPEAT('d', 512));
+INSERT INTO t1 VALUES (REPEAT('e', 512));
+INSERT INTO t1 VALUES (REPEAT('f', 512));
+COMMIT;
+connection node_2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (REPEAT('a', 512));
+INSERT INTO t1 VALUES (REPEAT('b', 512));
+INSERT INTO t1 VALUES (REPEAT('c', 512));
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+1
+connection node_2;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_fk_conflict.result b/mysql-test/suite/galera_sr/r/galera_sr_fk_conflict.result
new file mode 100644
index 00000000000..1d12533cc1d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_fk_conflict.result
@@ -0,0 +1,39 @@
+connection node_2;
+connection node_1;
+CREATE TABLE grandparent (
+id INT NOT NULL PRIMARY KEY
+) ENGINE=InnoDB;
+CREATE TABLE parent (
+id INT NOT NULL PRIMARY KEY,
+grandparent_id INT,
+FOREIGN KEY (grandparent_id)
+REFERENCES grandparent(id)
+ON UPDATE CASCADE
+) ENGINE=InnoDB;
+CREATE TABLE child (
+id INT NOT NULL PRIMARY KEY,
+grandparent_id INT,
+FOREIGN KEY (grandparent_id)
+REFERENCES parent(grandparent_id)
+ON UPDATE CASCADE
+) ENGINE=InnoDB;
+INSERT INTO grandparent VALUES (1),(2),(3),(4);
+INSERT INTO parent VALUES (1,1), (2,2);
+INSERT INTO child VALUES (1,1), (2,2);
+connection node_1;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+UPDATE grandparent SET id = 5 WHERE id = 1;
+connection node_2;
+SET SESSION innodb_lock_wait_timeout = 1;
+UPDATE grandparent SET id = 10 WHERE id = 5;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+DELETE FROM child;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+connection node_1;
+COMMIT;
+include/diff_servers.inc [servers=1 2]
+DROP TABLE child;
+DROP TABLE parent;
+DROP TABLE grandparent;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_gtid.result b/mysql-test/suite/galera_sr/r/galera_sr_gtid.result
new file mode 100644
index 00000000000..be631d1d916
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_gtid.result
@@ -0,0 +1,57 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES (1);
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SET SESSION wsrep_trx_fragment_size=1;
+UPDATE t1 SET f1 = 2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size=0;
+connection node_2;
+SET SESSION wsrep_trx_fragment_size=0;
+connection node_1;
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+SHOW BINLOG EVENTS IN 'mysqld-bin.000002' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000002 <Pos> Gtid_list 1 <End_log_pos> []
+mysqld-bin.000002 <Pos> Binlog_checkpoint 1 <End_log_pos> mysqld-bin.000001
+mysqld-bin.000002 <Pos> Binlog_checkpoint 1 <End_log_pos> mysqld-bin.000002
+mysqld-bin.000002 <Pos> Gtid 1 <End_log_pos> GTID 0-1-1
+mysqld-bin.000002 <Pos> Query 1 <End_log_pos> use `test`; CREATE TABLE t1 (f1 INT PRIMARY KEY)
+mysqld-bin.000002 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
+mysqld-bin.000002 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1)
+mysqld-bin.000002 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000002 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000002 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000002 <Pos> Gtid 2 <End_log_pos> BEGIN GTID 0-2-3
+mysqld-bin.000002 <Pos> Annotate_rows 2 <End_log_pos> UPDATE t1 SET f1 = 2
+mysqld-bin.000002 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000002 <Pos> Update_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000002 <Pos> Xid 2 <End_log_pos> COMMIT /* xid=### */
+connection node_2;
+SELECT 1 FROM DUAL;
+1
+1
+SHOW BINLOG EVENTS IN 'mysqld-bin.000003' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000003 <Pos> Gtid_list 2 <End_log_pos> []
+mysqld-bin.000003 <Pos> Binlog_checkpoint 2 <End_log_pos> mysqld-bin.000003
+mysqld-bin.000003 <Pos> Gtid 1 <End_log_pos> GTID 0-1-1
+mysqld-bin.000003 <Pos> Query 1 <End_log_pos> use `test`; CREATE TABLE t1 (f1 INT PRIMARY KEY)
+mysqld-bin.000003 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
+mysqld-bin.000003 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1)
+mysqld-bin.000003 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000003 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000003 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000003 <Pos> Gtid 2 <End_log_pos> BEGIN GTID 0-2-3
+mysqld-bin.000003 <Pos> Annotate_rows 2 <End_log_pos> UPDATE t1 SET f1 = 2
+mysqld-bin.000003 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000003 <Pos> Update_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000003 <Pos> Xid 2 <End_log_pos> COMMIT /* xid=### */
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_insert_select.result b/mysql-test/suite/galera_sr/r/galera_sr_insert_select.result
new file mode 100644
index 00000000000..0302290123d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_insert_select.result
@@ -0,0 +1,18 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('a', 255) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+COMMIT;
+connection node_2;
+connection node_1;
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_kill_all_nobootstrap.result b/mysql-test/suite/galera_sr/r/galera_sr_kill_all_nobootstrap.result
new file mode 100644
index 00000000000..45ca6909f29
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_kill_all_nobootstrap.result
@@ -0,0 +1,31 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+Killing server ...
+connection node_1;
+Killing server ...
+connection node_1;
+# restart
+connection node_2;
+# restart
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_kill_all_norecovery.result b/mysql-test/suite/galera_sr/r/galera_sr_kill_all_norecovery.result
new file mode 100644
index 00000000000..7ef86c65915
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_kill_all_norecovery.result
@@ -0,0 +1,35 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+connection node_2;
+Killing server ...
+connection node_1;
+SET SESSION wsrep_sync_wait = 0;
+Killing server ...
+# restart
+connection node_2;
+# restart
+connection node_1;
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) `expect 0` FROM t1;
+expect 0
+0
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+expect 0
+0
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_kill_all_pcrecovery.result b/mysql-test/suite/galera_sr/r/galera_sr_kill_all_pcrecovery.result
new file mode 100644
index 00000000000..7731c19e4da
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_kill_all_pcrecovery.result
@@ -0,0 +1,32 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+Killing server ...
+connection node_1;
+Killing server ...
+connection node_1;
+# restart
+connection node_2;
+# restart
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_kill_connection.result b/mysql-test/suite/galera_sr/r/galera_sr_kill_connection.result
new file mode 100644
index 00000000000..96a85bc038b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_kill_connection.result
@@ -0,0 +1,32 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1a;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_kill_query.result b/mysql-test/suite/galera_sr/r/galera_sr_kill_query.result
new file mode 100644
index 00000000000..59942e717e7
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_kill_query.result
@@ -0,0 +1,31 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6;;
+connection node_2;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+Killing query ...
+connection node_1;
+ERROR 70100: Query execution was interrupted
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+INSERT INTO t1 SELECT 1 FROM ten AS t1, ten AS t2, ten AS t3;
+SELECT COUNT(*) = 1000 FROM t1;
+COUNT(*) = 1000
+1
+connection node_1a;
+SELECT COUNT(*) = 1000 FROM t1;
+COUNT(*) = 1000
+1
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_kill_slave.result b/mysql-test/suite/galera_sr/r/galera_sr_kill_slave.result
new file mode 100644
index 00000000000..d14de27d2b7
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_kill_slave.result
@@ -0,0 +1,54 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+CREATE TABLE t2 (f1 INTEGER);
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+LOCK TABLE t2 WRITE;
+connection node_1;
+INSERT INTO t2 VALUES (1);
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+Killing server ...
+connection node_1;
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+connection node_2;
+# restart
+connection node_1;
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 15 FROM t1;
+COUNT(*) = 15
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_large_fragment.result b/mysql-test/suite/galera_sr/r/galera_sr_large_fragment.result
new file mode 100644
index 00000000000..bf111f5cee4
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_large_fragment.result
@@ -0,0 +1,33 @@
+connection node_2;
+connection node_1;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(512)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1024 * 1024 * 10;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 512) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 50000 FROM t1;
+COUNT(*) > 50000
+1
+connection node_1;
+ROLLBACK;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT10M';
+SET SESSION wsrep_sync_wait = 7;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+COUNT(*) = 0
+1
+COUNT(*) = 0
+1
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_load_data.result b/mysql-test/suite/galera_sr/r/galera_sr_load_data.result
new file mode 100644
index 00000000000..99885b42fa0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_load_data.result
@@ -0,0 +1,14 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 512;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
+connection node_1;
+connection node_2;
+SELECT COUNT(*) = 20000 FROM t1;
+COUNT(*) = 20000
+1
+wsrep_last_committed_diff
+1
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_load_data_splitting.result b/mysql-test/suite/galera_sr/r/galera_sr_load_data_splitting.result
new file mode 100644
index 00000000000..61e4b56aa43
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_load_data_splitting.result
@@ -0,0 +1,19 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 512;
+SET GLOBAL wsrep_load_data_splitting = TRUE;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connection node_2;
+connection node_1;
+connection node_2;
+SELECT COUNT(*) = 95000 FROM t1;
+COUNT(*) = 95000
+1
+wsrep_last_committed_diff
+1
+connection node_1;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_log_bin.result b/mysql-test/suite/galera_sr/r/galera_sr_log_bin.result
new file mode 100644
index 00000000000..cb8e84383bc
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_log_bin.result
@@ -0,0 +1,124 @@
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t4 (f1 INTEGER) ENGINE=InnoDB;
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t2 VALUES (1);
+connection node_2;
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t3 VALUES (1);
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+connection node_2a;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t4 VALUES (1);
+connection node_1;
+INSERT INTO t1 VALUES (2);
+COMMIT;
+connection node_1a;
+INSERT INTO t2 VALUES (2);
+COMMIT;
+connection node_2;
+INSERT INTO t3 VALUES (2);
+COMMIT;
+connection node_2a;
+INSERT INTO t4 VALUES (2);
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 2 FROM t4;
+COUNT(*) = 2
+1
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 <Pos> Gtid_list 1 <End_log_pos> []
+mysqld-bin.000001 <Pos> Binlog_checkpoint 1 <End_log_pos> mysqld-bin.000001
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-1
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t2 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t2)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t2 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t2)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 2 <End_log_pos> BEGIN GTID 0-2-3
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t3 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t3)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t3 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t3)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 2 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 2 <End_log_pos> BEGIN GTID 0-2-4
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t4 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t4)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t4 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t4)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 2 <End_log_pos> COMMIT /* xid=### */
+connection node_2;
+SELECT COUNT(*) = 2 FROM t4;
+COUNT(*) = 2
+1
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 <Pos> Gtid_list 2 <End_log_pos> []
+mysqld-bin.000001 <Pos> Binlog_checkpoint 2 <End_log_pos> mysqld-bin.000001
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-1
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t2 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t2)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t2 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t2)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 2 <End_log_pos> BEGIN GTID 0-2-3
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t3 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t3)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t3 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t3)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 2 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 2 <End_log_pos> BEGIN GTID 0-2-4
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t4 VALUES (1)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t4)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Annotate_rows 2 <End_log_pos> INSERT INTO t4 VALUES (2)
+mysqld-bin.000001 <Pos> Table_map 2 <End_log_pos> table_id: ### (test.t4)
+mysqld-bin.000001 <Pos> Write_rows_v1 2 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 2 <End_log_pos> COMMIT /* xid=### */
+DROP TABLE t1,t2,t3,t4;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_many_fragments.result b/mysql-test/suite/galera_sr/r/galera_sr_many_fragments.result
new file mode 100644
index 00000000000..8c89d100260
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_many_fragments.result
@@ -0,0 +1,33 @@
+connection node_2;
+connection node_1;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(512)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 512) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 10000 FROM t1;
+COUNT(*) = 10000
+1
+connection node_1;
+ROLLBACK;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT10M';
+SET SESSION wsrep_sync_wait = 7;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+COUNT(*) = 0
+1
+COUNT(*) = 0
+1
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_myisam.result b/mysql-test/suite/galera_sr/r/galera_sr_myisam.result
new file mode 100644
index 00000000000..97818f072e1
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_myisam.result
@@ -0,0 +1,16 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 TEXT) ENGINE=MyISAM;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET GLOBAL wsrep_replicate_myisam = TRUE;
+INSERT INTO t1 VALUES (REPEAT('x', 65535));
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT LENGTH(f1) = 65535 FROM t1;
+LENGTH(f1) = 65535
+1
+DROP TABLE t1;
+connection node_1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_mysqldump_sst.result b/mysql-test/suite/galera_sr/r/galera_sr_mysqldump_sst.result
new file mode 100644
index 00000000000..2e94461f377
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_mysqldump_sst.result
@@ -0,0 +1,59 @@
+connection node_2;
+connection node_1;
+Setting SST method to mysqldump ...
+call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'");
+call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos");
+connection node_1;
+CREATE USER 'sst';
+GRANT ALL PRIVILEGES ON *.* TO 'sst';
+SET GLOBAL wsrep_sst_auth = 'sst:';
+connection node_2;
+SET GLOBAL wsrep_sst_method = 'mysqldump';
+connection node_1;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1000;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 255) FROM ten AS a1, ten AS a2, ten AS a3;
+UPDATE t1 SET f2 = REPEAT('y', 255);
+connection node_2;
+connection node_2;
+Shutting down server ...
+connection node_1;
+connection node_2;
+Starting server ...
+# restart: --wsrep_sst_auth=sst:sst --wsrep_sst_method=mysqldump --wsrep-sst-receive-address=127.0.0.1:16001
+connection node_1;
+connection node_2;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+UPDATE t1 SET f2 = REPEAT('z', 255);
+COMMIT;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 1000 FROM t1;
+COUNT(*) = 1000
+1
+SELECT COUNT(*) = 1000 FROM t1 WHERE f2 = REPEAT('z', 255);
+COUNT(*) = 1000
+1
+DROP TABLE t1;
+DROP TABLE ten;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size=0;
+connection node_1;
+CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
+DROP USER sst;
+connection node_2;
+CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query");
+CALL mtr.add_suppression("InnoDB: Error: Table \"mysql\"\\.\"innodb_index_stats\" not found");
+CALL mtr.add_suppression("Can't open and lock time zone table");
+CALL mtr.add_suppression("Can't open and lock privilege tables");
+CALL mtr.add_suppression("Info table is not ready to be used");
+CALL mtr.add_suppression("Native table .* has the wrong structure");
+CALL mtr.add_suppression("Table \'mysql.gtid_slave_pos\' doesn\'t exist");
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_parallel_apply.result b/mysql-test/suite/galera_sr/r/galera_sr_parallel_apply.result
new file mode 100644
index 00000000000..e2194e08cb8
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_parallel_apply.result
@@ -0,0 +1,37 @@
+connection node_2;
+connection node_1;
+connection node_2;
+SET GLOBAL wsrep_slave_threads = 5;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+COMMIT;
+connection node_1a;
+ROLLBACK;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1;
+DROP TABLE t1;
+connection node_2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_rollback.result b/mysql-test/suite/galera_sr/r/galera_sr_rollback.result
new file mode 100644
index 00000000000..4b275c6e0b6
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_rollback.result
@@ -0,0 +1,42 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+connection node_2;
+connection node_1;
+ROLLBACK;
+connection node_2;
+connection node_1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+connection node_2;
+connection node_1;
+COMMIT;
+connection node_2;
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_rollback_retry.result b/mysql-test/suite/galera_sr/r/galera_sr_rollback_retry.result
new file mode 100644
index 00000000000..054f7cf2eae
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_rollback_retry.result
@@ -0,0 +1,33 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+ROLLBACK;
+connection node_2;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+connection node_1;
+SELECT COUNT(*) = 5 FROM t1;
+COUNT(*) = 5
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_rollback_savepoint.result b/mysql-test/suite/galera_sr/r/galera_sr_rollback_savepoint.result
new file mode 100644
index 00000000000..f2efa20f0d3
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_rollback_savepoint.result
@@ -0,0 +1,42 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+SAVEPOINT s1;
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+connection node_2;
+connection node_1;
+ROLLBACK TO SAVEPOINT s1;
+INSERT INTO t1 VALUES (21, 'c');
+INSERT INTO t1 VALUES (22, 'c');
+INSERT INTO t1 VALUES (23, 'c');
+INSERT INTO t1 VALUES (24, 'c');
+INSERT INTO t1 VALUES (25, 'c');
+connection node_2;
+SELECT COUNT(*) = 5 FROM t1 WHERE f2 = 'a';
+COUNT(*) = 5
+1
+SELECT COUNT(*) = 0 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 0
+0
+SELECT COUNT(*) = 5 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 5
+1
+connection node_1;
+COMMIT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_rollback_statement.result b/mysql-test/suite/galera_sr/r/galera_sr_rollback_statement.result
new file mode 100644
index 00000000000..ce3214afe9e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_rollback_statement.result
@@ -0,0 +1,33 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a');
+INSERT INTO t2 VALUES (3, 'b');
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t2 SELECT * FROM t1;
+ERROR 23000: Duplicate entry '3' for key 'PRIMARY'
+last_committed_diff
+3
+connection node_2;
+connection node_1;
+INSERT INTO t2 VALUES (1, 'c');
+connection node_2;
+INSERT INTO t2 VALUES (2, 'c');
+connection node_1;
+SELECT * FROM t2;
+f1 f2
+1 c
+2 c
+3 b
+connection node_2;
+SELECT * FROM t2;
+f1 f2
+1 c
+2 c
+3 b
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = DEFAULT;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_shutdown_master.result b/mysql-test/suite/galera_sr/r/galera_sr_shutdown_master.result
new file mode 100644
index 00000000000..051b9d6b7ec
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_shutdown_master.result
@@ -0,0 +1,32 @@
+connection node_2;
+connection node_1;
+connection node_1;
+connection node_2;
+connection node_2;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE = InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1),(2),(3);
+connection node_1;
+connection node_2;
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2;
+# restart
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (1),(2),(3);
+connection node_1;
+SELECT COUNT(*) = 3 FROM t1;
+COUNT(*) = 3
+1
+DROP TABLE t1;
+connection node_2;
+CALL mtr.add_suppression("WSREP: Failed to replicate rollback fragment for ");
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_shutdown_slave.result b/mysql-test/suite/galera_sr/r/galera_sr_shutdown_slave.result
new file mode 100644
index 00000000000..6218cffcb78
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_shutdown_slave.result
@@ -0,0 +1,44 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE = InnoDB;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (11),(12),(13);
+connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1b;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (21),(22),(23);
+connection node_2;
+connection node_1;
+connection node_1a;
+INSERT INTO t1 VALUES (14),(15),(16);
+COMMIT;
+connection node_2;
+# restart
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) > 0
+1
+SELECT COUNT(*) = 6 FROM t1 WHERE f1 IN (11,12,13,14,15,16);
+COUNT(*) = 6
+1
+connection node_1b;
+INSERT INTO t1 VALUES (24),(25),(26);
+COMMIT;
+connection node_2;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 12 FROM t1;
+COUNT(*) = 12
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_small_gcache.result b/mysql-test/suite/galera_sr/r/galera_sr_small_gcache.result
new file mode 100644
index 00000000000..875f2df5214
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_small_gcache.result
@@ -0,0 +1,15 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+connection node_2;
+SELECT COUNT(*) = 10000 FROM t1;
+COUNT(*) = 10000
+1
+connection node_1;
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_table_contents.result b/mysql-test/suite/galera_sr/r/galera_sr_table_contents.result
new file mode 100644
index 00000000000..006a1a9da96
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_table_contents.result
@@ -0,0 +1,178 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3);
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3);
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+Start of Simple Insert
+INSERT INTO t1 VALUES (4);
+DELIMITER /*!*/;
+# at 256
+# at 300
+<ISO TIMESTAMP> server id 1 end_log_pos 85 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 341
+<ISO TIMESTAMP> server id 1 end_log_pos 119 CRC32 0x00000004 Write_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+End of Simple Insert
+
+ROLLBACK;
+Start of Multi-row Update
+UPDATE t1 SET f1 = f1 + 10;
+DELIMITER /*!*/;
+# at 256
+# at 301
+<ISO TIMESTAMP> server id 1 end_log_pos 86 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 342
+<ISO TIMESTAMP> server id 1 end_log_pos 126 CRC32 0x0000000b Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 301
+<ISO TIMESTAMP> server id 1 end_log_pos 212 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 342
+<ISO TIMESTAMP> server id 1 end_log_pos 252 CRC32 0x0000000c Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 301
+<ISO TIMESTAMP> server id 1 end_log_pos 338 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 342
+<ISO TIMESTAMP> server id 1 end_log_pos 378 CRC32 0x0000000d Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+End of Multi-row Update
+
+ROLLBACK;
+Start of Multi-table Update
+UPDATE t1, t2 SET t1.f1 = t1.f1 + 100, t2.f1 = t2.f1 + 100;
+DELIMITER /*!*/;
+# at 256
+# at 333
+<ISO TIMESTAMP> server id 1 end_log_pos 118 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 374
+<ISO TIMESTAMP> server id 1 end_log_pos 159 CRC32 0x00000301 Table_map: `test`.`t2` mapped to number <TABLE_ID>
+# at 415
+<ISO TIMESTAMP> server id 1 end_log_pos 199 CRC32 0x00000065 Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 333
+<ISO TIMESTAMP> server id 1 end_log_pos 317 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 374
+<ISO TIMESTAMP> server id 1 end_log_pos 358 CRC32 0x00000301 Table_map: `test`.`t2` mapped to number <TABLE_ID>
+# at 415
+<ISO TIMESTAMP> server id 1 end_log_pos 398 CRC32 0x00000066 Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 333
+<ISO TIMESTAMP> server id 1 end_log_pos 516 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 374
+<ISO TIMESTAMP> server id 1 end_log_pos 557 CRC32 0x00000301 Table_map: `test`.`t2` mapped to number <TABLE_ID>
+# at 415
+<ISO TIMESTAMP> server id 1 end_log_pos 597 CRC32 0x00000067 Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 333
+<ISO TIMESTAMP> server id 1 end_log_pos 715 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 374
+<ISO TIMESTAMP> server id 1 end_log_pos 756 CRC32 0x00000301 Table_map: `test`.`t2` mapped to number <TABLE_ID>
+# at 415
+<ISO TIMESTAMP> server id 1 end_log_pos 796 CRC32 0x00000065 Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 333
+<ISO TIMESTAMP> server id 1 end_log_pos 914 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 374
+<ISO TIMESTAMP> server id 1 end_log_pos 955 CRC32 0x00000301 Table_map: `test`.`t2` mapped to number <TABLE_ID>
+# at 415
+<ISO TIMESTAMP> server id 1 end_log_pos 995 CRC32 0x00000066 Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 333
+<ISO TIMESTAMP> server id 1 end_log_pos 1113 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 374
+<ISO TIMESTAMP> server id 1 end_log_pos 1154 CRC32 0x00000301 Table_map: `test`.`t2` mapped to number <TABLE_ID>
+# at 415
+<ISO TIMESTAMP> server id 1 end_log_pos 1194 CRC32 0x00000067 Update_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+End of Multi-table Update
+
+ROLLBACK;
+Start of Savepoint
+INSERT INTO t1 VALUES (1000);
+SAVEPOINT X;
+INSERT INTO t1 VALUES (2000);
+ROLLBACK TO SAVEPOINT X;
+DELIMITER /*!*/;
+# at 256
+# at 303
+<ISO TIMESTAMP> server id 1 end_log_pos 88 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 344
+<ISO TIMESTAMP> server id 1 end_log_pos 122 CRC32 0x000003e8 Write_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+DELIMITER /*!*/;
+# at 256
+# at 303
+<ISO TIMESTAMP> server id 1 end_log_pos 210 CRC32 0x00000301 Table_map: `test`.`t1` mapped to number <TABLE_ID>
+# at 344
+<ISO TIMESTAMP> server id 1 end_log_pos 244 CRC32 0x000007d0 Write_rows: table id <TABLE_ID> flags: STMT_END_F
+# Number of rows: 1
+DELIMITER ;
+# End of log file
+ROLLBACK /* added by mysqlbinlog */;
+/*!50003 SET COMPLETION_TYPE=@OLD_COMPLETION_TYPE*/;
+End of Savepoint
+
+ROLLBACK;
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_transaction_replay.result b/mysql-test/suite/galera_sr/r/galera_sr_transaction_replay.result
new file mode 100644
index 00000000000..5806ab5558d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_transaction_replay.result
@@ -0,0 +1,121 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+SET SESSION wsrep_sync_wait = 0;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
+f1 f2
+2 a
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+connection node_1;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+wsrep_local_replays
+1
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DELETE FROM t1;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+connection node_1;
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 1;
+SET SESSION wsrep_trx_fragment_size = 0;
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
+f1 f2
+2 a
+connection node_1a;
+SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync';
+connection node_2;
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+connection node_1a;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,local_monitor_master_enter_sync';
+connection node_1;
+COMMIT;
+connection node_1a;
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'dbug=d,abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync';
+SET SESSION wsrep_on = 0;
+SET SESSION wsrep_on = 1;
+SET GLOBAL wsrep_provider_options = 'dbug=';
+SET GLOBAL wsrep_provider_options = 'signal=abort_trx_end';
+SET GLOBAL wsrep_provider_options = 'signal=local_monitor_master_enter_sync';
+connection node_1;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+wsrep_local_replays
+1
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DELETE FROM t1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_unit_statements.result b/mysql-test/suite/galera_sr/r/galera_sr_unit_statements.result
new file mode 100644
index 00000000000..4e3bd52483e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_unit_statements.result
@@ -0,0 +1,54 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 3;
+SET SESSION wsrep_trx_fragment_unit = 'statements';
+connection node_1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+0
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+connection node_1;
+INSERT INTO t1 VALUES (2);
+connection node_2;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+2
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+1
+connection node_1;
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+2
+connection node_1;
+COMMIT;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+connection node_2;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+5
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+COUNT(*)
+0
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_v1_row_events.result b/mysql-test/suite/galera_sr/r/galera_sr_v1_row_events.result
new file mode 100644
index 00000000000..ab090e5c2a7
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_v1_row_events.result
@@ -0,0 +1,20 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_1;
+COMMIT;
+SET AUTOCOMMIT=ON;
+UPDATE t1 SET f1 = 2 WHERE f1 = 1;
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+COUNT(*) = 1
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_ws_size.result b/mysql-test/suite/galera_sr/r/galera_sr_ws_size.result
new file mode 100644
index 00000000000..b7bdd94dd68
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_ws_size.result
@@ -0,0 +1,36 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 VARCHAR(254)) ENGINE=InnoDB;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+SET SESSION wsrep_trx_fragment_size = 512;
+SET GLOBAL wsrep_provider_options='repl.max_ws_size=4096';
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+COMMIT;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT COUNT(*) = 100 FROM t1;
+COUNT(*) = 100
+1
+DROP TABLE t1;
+DROP TABLE ten;
+connection node_1;
+call mtr.add_suppression('WSREP: transaction size limit.*');
+call mtr.add_suppression('WSREP: rbr write fail.*');
+call mtr.add_suppression('WSREP: Maximum writeset size exceeded by.*');
+call mtr.add_suppression('WSREP: transaction size exceeded.*');
diff --git a/mysql-test/suite/galera_sr/r/galera_sr_ws_size2.result b/mysql-test/suite/galera_sr/r/galera_sr_ws_size2.result
new file mode 100644
index 00000000000..6bd8b6b8212
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_sr_ws_size2.result
@@ -0,0 +1,34 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 VARCHAR(254)) ENGINE=InnoDB;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+SET SESSION wsrep_trx_fragment_size = 256;
+SET GLOBAL wsrep_provider_options='repl.max_ws_size=128';
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1, ten AS a2;
+Got one of the listed errors
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+DROP TABLE t1;
+DROP TABLE ten;
+call mtr.add_suppression('WSREP: SR rollback replication failure.*');
+call mtr.add_suppression('WSREP: transaction size limit.*');
+call mtr.add_suppression('WSREP: SR rbr write fail.*');
+call mtr.add_suppression('WSREP: Maximum writeset size exceeded by.*');
+call mtr.add_suppression('WSREP: transaction size exceeded.*');
+call mtr.add_suppression('WSREP: fragment replication failed:');
+call mtr.add_suppression('WSREP: post commit failed for SR rollback');
+call mtr.add_suppression('WSREP: pre_commit for SR rollback returned 2, thd:*');
+call mtr.add_suppression('WSREP: wsrep_rollback failed to send SR ROLLBACK for *');
diff --git a/mysql-test/suite/galera_sr/r/galera_var_ignore_apply_errors_sr.result b/mysql-test/suite/galera_sr/r/galera_var_ignore_apply_errors_sr.result
new file mode 100644
index 00000000000..852208437e5
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/galera_var_ignore_apply_errors_sr.result
@@ -0,0 +1,29 @@
+connection node_2;
+connection node_1;
+connection node_2;
+SET GLOBAL wsrep_ignore_apply_errors = 2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER);
+INSERT INTO t1 VALUES (2);
+SET GLOBAL wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_on = ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (3);
+DELETE FROM t1 WHERE f1 = 1;
+DELETE FROM t1 WHERE f1 = 2;
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SET SESSION wsrep_trx_fragment_size = 0;
+DROP TABLE t1;
+SET GLOBAL wsrep_ignore_apply_errors = 7;
+CALL mtr.add_suppression("Slave SQL: Could not execute Delete_rows event");
+CALL mtr.add_suppression("Can't find record in 't1'");
diff --git a/mysql-test/suite/galera_sr/r/mdev_18631.result b/mysql-test/suite/galera_sr/r/mdev_18631.result
new file mode 100644
index 00000000000..bc6c0e48538
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mdev_18631.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+# On node_1
+connection node_1;
+CREATE TABLE t1(f1 INT PRIMARY KEY) ENGINE=INNODB;
+INSERT INTO t1 VALUES (1), (2), (3);
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+2
+3
+connection node_1;
+SELECT * FROM t1;
+f1
+1
+2
+3
+DROP TABLE t1;
+disconnect node_2;
+disconnect node_1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep#215.result b/mysql-test/suite/galera_sr/r/mysql-wsrep#215.result
new file mode 100644
index 00000000000..623bef4c3c4
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep#215.result
@@ -0,0 +1,137 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 2;
+SET SESSION wsrep_trx_fragment_unit = 'statements';
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_1a;
+SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_apply_cb';
+SET SESSION wsrep_sync_wait = 0;
+connection node_2;
+INSERT INTO t1 VALUES (1);
+connection node_1a;
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (1);;
+connection node_1a;
+connection node_1a;
+SET GLOBAL DEBUG_DBUG = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1a;
+SET DEBUG_SYNC = 'RESET';
+connection node_1;
+TRUNCATE TABLE t1;
+SET SESSION wsrep_trx_fragment_size = 10;
+SET SESSION wsrep_trx_fragment_unit = 'bytes';
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_1a;
+SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_apply_cb';
+SET SESSION wsrep_sync_wait = 0;
+connection node_2;
+INSERT INTO t1 VALUES (1);
+connection node_1a;
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (1);
+connection node_1a;
+SET GLOBAL DEBUG_DBUG = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+ROLLBACK;
+SELECT * FROM t1;
+f1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT * FROM t1;
+f1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1a;
+SET DEBUG_SYNC = 'RESET';
+connection node_1;
+TRUNCATE TABLE t1;
+SET SESSION wsrep_trx_fragment_size = 200;
+SET SESSION wsrep_trx_fragment_unit = 'bytes';
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+connection node_1a;
+SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_apply_cb';
+SET SESSION wsrep_sync_wait = 0;
+connection node_2;
+INSERT INTO t1 VALUES (1);
+connection node_1a;
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+connection node_1a;
+SET GLOBAL DEBUG_DBUG = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+connection node_1;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 1 FROM t1;
+COUNT(*) = 1
+1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_1a;
+DROP TABLE t1;
+SET DEBUG_SYNC = 'RESET';
+connection node_2;
+CALL mtr.add_suppression("WSREP: Could not find applier context for");
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#136.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#136.result
new file mode 100644
index 00000000000..84246a8eed2
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#136.result
@@ -0,0 +1,63 @@
+connection node_2;
+connection node_1;
+connection node_1;
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+connection node_2;
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2);
+COMMIT;
+SET SESSION wsrep_trx_fragment_size = 0;
+INSERT INTO t1 VALUES (3),(4);
+COMMIT;
+connection node_1;
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 <Pos> Gtid_list 1 <End_log_pos> []
+mysqld-bin.000001 <Pos> Binlog_checkpoint 1 <End_log_pos> mysqld-bin.000001
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> GTID 0-1-1
+mysqld-bin.000001 <Pos> Query 1 <End_log_pos> use `test`; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1),(2)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-3
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (3),(4)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+connection node_2;
+SELECT COUNT(*) = 4 FROM t1;
+COUNT(*) = 4
+1
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+Log_name Pos Event_type Server_id End_log_pos Info
+mysqld-bin.000001 <Pos> Gtid_list 2 <End_log_pos> []
+mysqld-bin.000001 <Pos> Binlog_checkpoint 2 <End_log_pos> mysqld-bin.000001
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> GTID 0-1-1
+mysqld-bin.000001 <Pos> Query 1 <End_log_pos> use `test`; CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-2
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (1),(2)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+mysqld-bin.000001 <Pos> Gtid 1 <End_log_pos> BEGIN GTID 0-1-3
+mysqld-bin.000001 <Pos> Annotate_rows 1 <End_log_pos> INSERT INTO t1 VALUES (3),(4)
+mysqld-bin.000001 <Pos> Table_map 1 <End_log_pos> table_id: ### (test.t1)
+mysqld-bin.000001 <Pos> Write_rows_v1 1 <End_log_pos> table_id: ### flags: STMT_END_F
+mysqld-bin.000001 <Pos> Xid 1 <End_log_pos> COMMIT /* xid=### */
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#138.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#138.result
new file mode 100644
index 00000000000..fc9afd6e1e0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#138.result
@@ -0,0 +1,24 @@
+connection node_2;
+connection node_1;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2);
+connection node_2;
+SELECT flags FROM mysql.wsrep_streaming_log;
+flags
+1
+0
+connection node_1;
+ROLLBACK;
+INSERT INTO t1 VALUES (3),(4);
+connection node_2;
+SELECT flags FROM mysql.wsrep_streaming_log;
+flags
+1
+0
+connection node_1;
+ROLLBACK;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#14.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#14.result
new file mode 100644
index 00000000000..b09c7d4047a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#14.result
@@ -0,0 +1,12 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+COMMIT;
+DROP TABLE t1;
+connection node_2;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#148.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#148.result
new file mode 100644
index 00000000000..98bf13e9d2b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#148.result
@@ -0,0 +1,39 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (6),(7),(8),(9),(10),(1);
+connection node_2;
+SET GLOBAL wsrep_slave_threads = 2;
+SET GLOBAL debug_dbug = 'd,sync.wsrep_apply_cb';
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1a;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5);;
+connection node_1;
+INSERT INTO t1 SELECT * FROM t2;;
+connection node_1a;
+INSERT INTO t1 VALUES (6), (7), (8), (9), (10);
+COMMIT;
+connection node_1;
+Got one of the listed errors
+connection node_2;
+SET GLOBAL wsrep_slave_threads = 1;
+SET GLOBAL debug_dbug = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SELECT COUNT(*) = 10 FROM t1;
+COUNT(*) = 10
+1
+DROP TABLE t1;
+DROP TABLE t2;
+SET DEBUG_SYNC = RESET;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#15.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#15.result
new file mode 100644
index 00000000000..610019e2b48
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#15.result
@@ -0,0 +1,11 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (id INT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+COMMIT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#165.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#165.result
new file mode 100644
index 00000000000..8ad3ac63438
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#165.result
@@ -0,0 +1,1045 @@
+connection node_2;
+connection node_1;
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+connection node_1a;
+START TRANSACTION;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+connection node_1b;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1c;
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (3, 'c');
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 x
+2 x
+4 x
+5 x
+connection node_1a;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 2;
+connection node_1;
+connection node_1b;
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+connection node_1;
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+connection node_1;
+SET DEBUG_SYNC = 'now SIGNAL continue';
+connection node_1c;
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+connection node_1a;
+COMMIT;
+connection node_1b;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+connection node_1;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_2;
+SELECT * FROM t1;
+f1 f2
+1 a
+2 a
+3 x
+4 a
+5 a
+connection node_1;
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#22.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#22.result
new file mode 100644
index 00000000000..0053619187c
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#22.result
@@ -0,0 +1,35 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+connection node_1;
+SAVEPOINT s1;
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+connection node_2;
+connection node_1;
+ROLLBACK TO SAVEPOINT s1;
+INSERT INTO t1 VALUES (21, 'c');
+COMMIT;
+connection node_1;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+1
+connection node_2;
+SELECT COUNT(*) = 6 FROM t1;
+COUNT(*) = 6
+0
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#27.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#27.result
new file mode 100644
index 00000000000..4cbcd49dd24
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#27.result
@@ -0,0 +1,23 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+connection node_1;
+COMMIT;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#32.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#32.result
new file mode 100644
index 00000000000..ca749a45e9a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#32.result
@@ -0,0 +1,27 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SET AUTOCOMMIT=OFF;
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+START TRANSACTION;
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (5);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+ROLLBACK;
+connection node_1;
+COMMIT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#35.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#35.result
new file mode 100644
index 00000000000..3f463875eb3
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#35.result
@@ -0,0 +1,37 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL debug_dbug = '+d,sync.wsrep_apply_cb';
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+connection node_2a;
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+connection node_2;
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+INSERT INTO t1 VALUES (1);;
+connection node_1;
+COMMIT;
+connection node_2a;
+SET GLOBAL debug_dbug = '';
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+connection node_2;
+Got one of the listed errors
+ROLLBACK;
+DROP TABLE t1;
+connection node_2a;
+SET DEBUG_SYNC = "RESET";
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#8.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#8.result
new file mode 100644
index 00000000000..56905c03e10
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#8.result
@@ -0,0 +1,39 @@
+connection node_2;
+connection node_1;
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+connection node_1;
+CREATE TABLE t1 (f1 INT PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(100), FULLTEXT (f2)) ENGINE=InnoDB;
+connection node_2;
+SELECT COUNT(*) = 13 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE name LIKE 'test/%';
+COUNT(*) = 13
+1
+connection node_1;
+INSERT INTO t1 (f2) SELECT 'foobarbaz' FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+connection node_2;
+SELECT COUNT(f2) = 10000 FROM t1 WHERE MATCH(f2) AGAINST ('foobarbaz');
+COUNT(f2) = 10000
+1
+UPDATE t1 SET f2 = 'abcdefjhk';
+connection node_1;
+SELECT COUNT(f2) = 10000 FROM t1 WHERE MATCH(f2) AGAINST ('abcdefjhk');
+COUNT(f2) = 10000
+1
+connection node_2;
+DROP TABLE t1;
+connection node_1;
+CREATE TABLE t1 (f1 VARCHAR(100), FULLTEXT (f1)) ENGINE=InnoDB;
+connection node_2;
+INSERT INTO t1 (f1) SELECT 'foobarbaz' FROM ten AS a1, ten AS a2, ten AS a3;
+connection node_1;
+SELECT COUNT(f1) = 1000 FROM t1 WHERE MATCH(f1) AGAINST ('foobarbaz');
+COUNT(f1) = 1000
+1
+UPDATE t1 SET f1 = 'abcdefjhk';
+connection node_2;
+SELECT COUNT(f1) = 1000 FROM t1 WHERE MATCH(f1) AGAINST ('abcdefjhk');
+COUNT(f1) = 1000
+1
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#9.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#9.result
new file mode 100644
index 00000000000..312c8efc6ef
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#9.result
@@ -0,0 +1,21 @@
+connection node_2;
+connection node_1;
+connection node_1;
+SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true';
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+connection node_2;
+Killing server ...
+connection node_1;
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+connection node_2;
+# restart
+connection node_2a;
+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='t1';
+COUNT(*) = 2
+1
+SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+VARIABLE_VALUE = 2
+1
+connection node_1;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#93.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#93.result
new file mode 100644
index 00000000000..17f71213767
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#93.result
@@ -0,0 +1,18 @@
+connection node_2;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER);
+SET SESSION WSREP_TRX_FRAGMENT_SIZE=1;
+START TRANSACTION;
+SAVEPOINT a;
+INSERT INTO t1 VALUES (1);
+ROLLBACK TO SAVEPOINT a;
+INSERT INTO t1 values (2);
+COMMIT;
+SELECT COUNT(*) = 0 from mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+connection node_2;
+SELECT COUNT(*) = 0 from mysql.wsrep_streaming_log;
+COUNT(*) = 0
+1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/r/mysql-wsrep-features#96.result b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#96.result
new file mode 100644
index 00000000000..dbe91aad9fd
--- /dev/null
+++ b/mysql-test/suite/galera_sr/r/mysql-wsrep-features#96.result
@@ -0,0 +1,33 @@
+connection node_2;
+connection node_1;
+connection node_1;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+CREATE TABLE t2 (f2 VARCHAR(32));
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2),(1);
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+INSERT INTO t2 VALUES ('abc');
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 1 FROM t2;
+COUNT(*) = 1
+1
+connection node_1;
+ROLLBACK;
+connection node_2;
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+COUNT(*) = 0
+1
+SELECT COUNT(*) = 0 FROM t2;
+COUNT(*) = 0
+1
+connection node_1;
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/GCF-1008.inc b/mysql-test/suite/galera_sr/t/GCF-1008.inc
new file mode 100644
index 00000000000..69245b3a91f
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1008.inc
@@ -0,0 +1,36 @@
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(255)) ENGINE=InnoDB;
+
+--connection node_2
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'x');
+
+--connection node_2a
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+--send COMMIT
+
+--connection node_2b
+--sleep 1
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 1;
+SELECT COUNT(*) = 1 FROM t1;
+REPLACE INTO t1 VALUES (1,'y');
+
+--connection node_2b
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_2a
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+--connection node_2
+--reap
+SELECT COUNT(*) = 1 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-1008.test b/mysql-test/suite/galera_sr/t/GCF-1008.test
new file mode 100644
index 00000000000..c6926840bd1
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1008.test
@@ -0,0 +1,18 @@
+#
+# GCF-1008 SR trx fails to apply because previous trx is not committed yet on applier
+#
+
+--source include/have_debug_sync.inc
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/galera_have_debug_sync.inc
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
+--let $galera_sync_point = before_local_commit_monitor_enter
+--source GCF-1008.inc
+
+--let $galera_sync_point = before_certify_apply_monitor_enter
+--source GCF-1008.inc
+
diff --git a/mysql-test/suite/galera_sr/t/GCF-1018.test b/mysql-test/suite/galera_sr/t/GCF-1018.test
new file mode 100644
index 00000000000..9a184467ebd
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1018.test
@@ -0,0 +1,39 @@
+#
+# SR: Node hang with one thread waiting in InnoDB
+#
+--source include/have_debug_sync.inc
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/galera_have_debug_sync.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+INSERT INTO t1 (f2) VALUES ('a');
+INSERT INTO t1 (f2) VALUES ('b');
+INSERT INTO t1 (f2) VALUES ('c');
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2a
+SET SESSION wsrep_sync_wait = 0;
+--let $galera_sync_point = after_certify_apply_monitor_enter
+--source include/galera_set_sync_point.inc
+
+--connection node_2
+SET SESSION wsrep_retry_autocommit = 0;
+SET SESSION wsrep_trx_fragment_size = 64;
+--send DELETE FROM t1 ORDER BY f1 DESC LIMIT 2;
+
+--connection node_2a
+--source include/galera_wait_sync_point.inc
+
+--connection node_1
+INSERT INTO t1 (f2) VALUES ('d'),('e');
+
+--connection node_2a
+--source include/galera_signal_sync_point.inc
+--source include/galera_clear_sync_point.inc
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+--reap
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-1018B.test b/mysql-test/suite/galera_sr/t/GCF-1018B.test
new file mode 100644
index 00000000000..f11309080c0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1018B.test
@@ -0,0 +1,40 @@
+#
+# SR: Node hang with one thread waiting in InnoDB
+#
+--source include/galera_cluster.inc
+--source include/big_test.inc
+
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 64;
+SET SESSION innodb_lock_wait_timeout = 1000;
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+SET SESSION wsrep_trx_fragment_size = 64;
+SET SESSION innodb_lock_wait_timeout = 1000;
+
+--let $count = 500
+--disable_query_log
+while ($count)
+{
+ --connection node_1
+ --send INSERT INTO t1 (f2) VALUES ('abc'),('abc');
+
+ --connection node_2
+ --send DELETE FROM t1 ORDER BY f1 DESC LIMIT 2;
+
+ --connection node_1
+ --error 0,ER_LOCK_DEADLOCK,ER_DUP_ENTRY,ER_LOCK_WAIT_TIMEOUT,ER_QUERY_INTERRUPTED
+ --reap
+
+ --connection node_2
+ --error 0,ER_LOCK_DEADLOCK,ER_DUP_ENTRY,ER_LOCK_WAIT_TIMEOUT,ER_QUERY_INTERRUPTED
+ --reap
+
+ --dec $count
+}
+--enable_query_log
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-1043A.test b/mysql-test/suite/galera_sr/t/GCF-1043A.test
new file mode 100644
index 00000000000..c76623742d7
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1043A.test
@@ -0,0 +1,13 @@
+#
+# Assertion `retval == WSREP_OK || retval == WSREP_TRX_FAIL || retval == WSREP_BF_ABORT || retval == WSREP_CONN_FAIL' failed with SR
+#
+
+--source include/galera_cluster.inc
+
+--let $count = 1000;
+--let $wsrep_trx_fragment_size = 1;
+--let $query_node_1 = DELETE FROM t1
+--let $query_node_1a = REPLACE INTO t1 VALUES (1,'y'),(2,'x')
+--let $query_node_2 = REPLACE INTO t1 VALUES (1,'y'),(2,'y'),(3,'y')
+
+--source suite/galera/include/galera_concurrent_test.inc
diff --git a/mysql-test/suite/galera_sr/t/GCF-1043B.test b/mysql-test/suite/galera_sr/t/GCF-1043B.test
new file mode 100644
index 00000000000..e3b6b7439ca
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1043B.test
@@ -0,0 +1,13 @@
+#
+# Assertion `retval == WSREP_OK || retval == WSREP_TRX_FAIL || retval == WSREP_BF_ABORT || retval == WSREP_CONN_FAIL' failed with SR
+#
+
+--source include/galera_cluster.inc
+
+--let $count = 1000;
+--let $wsrep_trx_fragment_size = 1;
+--let $query_node_1 = DELETE FROM t1
+--let $query_node_1a = INSERT INTO t1 VALUES (1,'y'),(2,'x')
+--let $query_node_2 = UPDATE t1 SET f2 = 'y' WHERE f1 = 1 OR f1 = 2;
+
+--source suite/galera/include/galera_concurrent_test.inc
diff --git a/mysql-test/suite/galera_sr/t/GCF-1051.test b/mysql-test/suite/galera_sr/t/GCF-1051.test
new file mode 100644
index 00000000000..1db4ed15c41
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1051.test
@@ -0,0 +1,51 @@
+#
+# Test the case where SR is rolled back to savepoint that points to the
+# very beginning of the transaction. This results in regular rollback
+# rather than rollback to savepoint.
+#
+
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size=1;
+
+#
+# Test 1: regular transaction
+#
+--connection node_1
+START TRANSACTION;
+SAVEPOINT A;
+INSERT INTO t1 VALUES (1);
+ROLLBACK TO SAVEPOINT A;
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+#
+# Test 2: AUTOCOMMIT OFF
+#
+--connection node_1
+SET AUTOCOMMIT=OFF;
+SAVEPOINT A;
+INSERT INTO t1 VALUES (2);
+ROLLBACK TO SAVEPOINT A;
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-1060.test b/mysql-test/suite/galera_sr/t/GCF-1060.test
new file mode 100644
index 00000000000..714a5ef9f90
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-1060.test
@@ -0,0 +1,9 @@
+--source include/galera_cluster.inc
+
+--let $count = 100;
+--let $wsrep_trx_fragment_size = 1;
+--let $query_node_1 = TRUNCATE TABLE t1
+--let $query_node_1a = INSERT INTO t1 VALUE (1,'x'),(2,'x'),(3,'x')
+--let $query_node_2 = INSERT INTO t1 VALUE (4, 'z');
+
+--source suite/galera/include/galera_concurrent_test.inc
diff --git a/mysql-test/suite/galera_sr/t/GCF-437.test b/mysql-test/suite/galera_sr/t/GCF-437.test
new file mode 100644
index 00000000000..f71be65708e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-437.test
@@ -0,0 +1,21 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+CREATE TABLE ten (f1 INTEGER) ENGINE=MyISAM;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(512)) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 2 * 1024 * 1024;
+
+--error ER_BINLOG_ROW_LOGGING_FAILED
+INSERT INTO t1 (f2) SELECT REPEAT('x', 512) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
+
+CALL mtr.add_suppression("InnoDB: The total blob data length*");
+CALL mtr.add_suppression("WSREP: Error writing into mysql.wsrep_streaming_log: 139");
+CALL mtr.add_suppression("WSREP: Failed to write to frag table: 1");
+CALL mtr.add_suppression("WSREP: Failed to append frag to persistent storage");
+
+DROP TABLE t1;
+DROP table ten;
diff --git a/mysql-test/suite/galera_sr/t/GCF-561.test b/mysql-test/suite/galera_sr/t/GCF-561.test
new file mode 100644
index 00000000000..4a652284e59
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-561.test
@@ -0,0 +1,65 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of DDL on a concurrent SR transaction
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+ALTER TABLE t1 DROP COLUMN f2;
+
+# SR applied before the DDL is no longer visible
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+# Transaction can not continue due to DDL, implicit ROLLBACK
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (6, 6);
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# DDL is now in effect
+--error ER_WRONG_VALUE_COUNT_ON_ROW
+INSERT INTO t1 VALUES (6, 6);
+
+# But it should be possible to reissue the transaction
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+COMMIT;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-571.test b/mysql-test/suite/galera_sr/t/GCF-571.test
new file mode 100644
index 00000000000..aca0b9f7907
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-571.test
@@ -0,0 +1,54 @@
+#
+# GCF-571 ROLLBACK TO SAVEPOINT causes all SR records to be deleted
+#
+
+--source include/galera_cluster.inc
+
+SET AUTOCOMMIT=OFF;
+CREATE TABLE t1 (f1 VARCHAR(10)) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES ('%abcdef%');
+INSERT INTO t1 VALUES ('%abcdef%');
+INSERT INTO t1 VALUES ('%abcdef%');
+INSERT INTO t1 VALUES ('%abcdef%');
+SAVEPOINT A;
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+INSERT INTO t1 VALUES ('xyzxyz');
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%abcdef%';
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%xyz%';
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%SAVEPOINT `A`%';
+
+--connection node_1
+ROLLBACK TO SAVEPOINT A;
+
+--connection node_1a
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%abcdef%';
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%xyz%';
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%SAVEPOINT `A`%';
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%ROLLBACK TO `A`%';
+
+--connection node_2
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%abcdef%';
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%xyz%';
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%SAVEPOINT `A`%';
+SELECT COUNT(*) = 1 FROM mysql.wsrep_streaming_log WHERE frag LIKE '%ROLLBACK TO `A`%';
+
+--connection node_1
+ROLLBACK;
+
+--connection node_1a
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-572.test b/mysql-test/suite/galera_sr/t/GCF-572.test
new file mode 100644
index 00000000000..abefb9b08f6
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-572.test
@@ -0,0 +1,54 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(10)) ENGINE=InnoDB;
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+
+#
+# Test 1: statement rollback is not safe
+# (some fragments were already replicated)
+#
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'node1');
+
+--connection node_1a
+INSERT INTO t1 VALUES (5, 'node2');
+
+--connection node_1
+# If we try to INSERT a duplicate key, ER_LOCK_DEADLOCK is the only possible
+# outcome at this point. Notice that ER_DUP_ENTRY is NOT an option here
+# because we were forced to rollback the whole transaction (not just the
+# statement)
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (5, 'node1');
+
+SELECT * FROM t1;
+
+#
+# Test 2: statement rollback is safe
+# (no fragments have been replicated)
+#
+
+SET SESSION wsrep_trx_fragment_size = 10000;
+
+START TRANSACTION;
+INSERT INTO t1 VALUE (10, 'node1');
+SELECT * FROM mysql.wsrep_streaming_log;
+
+--connection node_1a
+INSERT INTO t1 VALUES(15, 'node2');
+
+--connection node_1
+SELECT * FROM t1;
+# This time, only the statement is rolled back and we expect ER_DUP_ENTRY.
+--error ER_DUP_ENTRY
+INSERT INTO t1 VALUES(15, 'node1');
+
+COMMIT;
+SELECT * FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-580.test b/mysql-test/suite/galera_sr/t/GCF-580.test
new file mode 100644
index 00000000000..39a237fda57
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-580.test
@@ -0,0 +1,27 @@
+#
+# GCF-580 wsrep_last_committed_counter increases twice for every SR fragment
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+
+--let $fragments_count = `SELECT COUNT(*) FROM mysql.wsrep_streaming_log`
+--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+--disable_query_log
+--eval SELECT ($wsrep_last_committed_after - $wsrep_last_committed_before) = $fragments_count AS last_committed_matches_fragment_count
+--enable_query_log
+
+COMMIT;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-585.test b/mysql-test/suite/galera_sr/t/GCF-585.test
new file mode 100644
index 00000000000..ceb7da60df6
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-585.test
@@ -0,0 +1,44 @@
+#
+# GCF-585 SR: Assertion `total_length + wsrep_get_fragment_base(thd) == saved_pos' failed in wsrep_write_cache_once after SQL error
+#
+
+--source include/galera_cluster.inc
+
+# Test case #1
+
+create table t1 (f1 integer primary key) engine=innodb;
+set autocommit=off;
+set session wsrep_trx_fragment_size=1;
+start transaction;
+insert into t1 values (1);
+# If we try to INSERT a duplicate key, ER_LOCK_DEADLOCK is the only possible
+# outcome at this point. Notice that ER_DUP_ENTRY is NOT an option here
+# because we were forced to rollback the whole transaction (not just the
+# statement)
+--error ER_LOCK_DEADLOCK
+insert into t1 values (2),(1);
+alter table t1 drop primary key;
+drop table t1;
+
+# Test case #2
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+update t1 set f1 = 100 where f1 = 10;
+
+--connection node_2
+INSERT INTO t1 VALUES (11),(12),(13),(14),(15),(16),(17),(18),(19),(20);
+SET SESSION wsrep_trx_fragment_size=1;
+SET SESSION innodb_lock_wait_timeout=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+--error ER_LOCK_DEADLOCK
+delete from t1 where f1 > 10;
+--error ER_LOCK_DEADLOCK
+delete from t1 where f1 > 10 and f1 < 100;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-597.test b/mysql-test/suite/galera_sr/t/GCF-597.test
new file mode 100644
index 00000000000..d3d80ffc4f8
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-597.test
@@ -0,0 +1,29 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+
+SET wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_2
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+
+--connection node_1
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+ROLLBACK;
+
+DROP TABLE t1; \ No newline at end of file
diff --git a/mysql-test/suite/galera_sr/t/GCF-620.test b/mysql-test/suite/galera_sr/t/GCF-620.test
new file mode 100644
index 00000000000..abfba47ee5a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-620.test
@@ -0,0 +1,22 @@
+#
+# GCF-620 SR: ROLLBACK TO SAVEPOINT causes slave crash if wsrep_trx_fragment_size does not fall on boundary
+#
+
+--source include/galera_cluster.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 200;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (1);
+SAVEPOINT A;
+INSERT INTO t1 VALUES (1);
+ROLLBACK TO SAVEPOINT A;
+COMMIT;
+
+--connection node_2
+SELECT COUNT(*) = 2 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-623.test b/mysql-test/suite/galera_sr/t/GCF-623.test
new file mode 100644
index 00000000000..6784989bde9
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-623.test
@@ -0,0 +1,31 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1; \ No newline at end of file
diff --git a/mysql-test/suite/galera_sr/t/GCF-627.test b/mysql-test/suite/galera_sr/t/GCF-627.test
new file mode 100644
index 00000000000..86637ad8e7f
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-627.test
@@ -0,0 +1,30 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER);
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+
+--connection node_2
+DROP TABLE t1;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--error ER_NO_SUCH_TABLE
+INSERT INTO t1 VALUES (2);
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/GCF-845.test b/mysql-test/suite/galera_sr/t/GCF-845.test
new file mode 100644
index 00000000000..316317c6a10
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-845.test
@@ -0,0 +1,30 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE IF NOT EXISTS t1 (f1 INTEGER) ENGINE = InnoDB;
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET SESSION AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (161);
+COMMIT;
+DELETE FROM t1 WHERE f1 > 13;
+--disconnect node_1a
+--sleep 2
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+
+--let $assert_file = $MYSQLTEST_VARDIR/log/mysqld.2.err
+--let $assert_count = 0
+--let $assert_select = BF-BF X
+--let $assert_text = No BF-BF log line found
+--let $assert_only_after = CURRENT_TEST
+--source include/assert_grep.inc
+
diff --git a/mysql-test/suite/galera_sr/t/GCF-851.test b/mysql-test/suite/galera_sr/t/GCF-851.test
new file mode 100644
index 00000000000..28d5302a422
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-851.test
@@ -0,0 +1,24 @@
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION WSREP_TRX_FRAGMENT_SIZE=1;
+SET SESSION AUTOCOMMIT=OFF;
+
+INSERT INTO t1 VALUES (10);
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--connection node_2
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+START TRANSACTION;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) > 0 FROM t1;
+
+--connection node_1
+SELECT COUNT(*) > 0 FROM t1;
+DROP TABLE t1;
+
diff --git a/mysql-test/suite/galera_sr/t/GCF-867.test b/mysql-test/suite/galera_sr/t/GCF-867.test
new file mode 100644
index 00000000000..54476a860b7
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-867.test
@@ -0,0 +1,42 @@
+#
+# Test many ongoing SR transactions
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
+--disable_query_log
+
+--let $connections = 62
+
+--let $count = $connections
+while ($count)
+{
+--connect $count, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET SESSION wsrep_sync_wait = 0;
+--dec $count
+}
+
+
+--let $count = $connections
+while ($count)
+{
+--connection $count
+START TRANSACTION;
+--send_eval INSERT INTO t1 VALUES ($count)
+--dec $count
+}
+
+--let $count = $connections
+while ($count)
+{
+--connection $count
+--reap
+COMMIT;
+--dec $count
+}
+
+--enable_query_log
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/GCF-889.test b/mysql-test/suite/galera_sr/t/GCF-889.test
new file mode 100644
index 00000000000..e785b282019
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-889.test
@@ -0,0 +1,29 @@
+--source include/galera_cluster.inc
+
+--connection node_2
+SET GLOBAL wsrep_ignore_apply_errors = 2;
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET SESSION wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET SESSION wsrep_on = ON;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+DELETE FROM t1 WHERE f1 = 1;
+SET SESSION wsrep_trx_fragment_size = 0;
+
+INSERT INTO t1 VALUES (1);
+
+SELECT COUNT(*) = 1;
+
+--connection node_2
+SELECT COUNT(*) = 1;
+CALL mtr.add_suppression("Could not execute Delete_rows event on table");
+CALL mtr.add_suppression("Can't find record in 't1'");
+SET GLOBAL wsrep_ignore_apply_errors = 7;
+
+--connection node_1
+DROP TABLE t1;
+
diff --git a/mysql-test/suite/galera_sr/t/GCF-900.test b/mysql-test/suite/galera_sr/t/GCF-900.test
new file mode 100644
index 00000000000..3f1b53630b6
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/GCF-900.test
@@ -0,0 +1,28 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY, f2 INT) ENGINE=InnoDB;
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 128;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 0);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (2, 0);
+
+--connection node_2
+ALTER TABLE t1 DROP COLUMN f2;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+--connection node_1a
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (3, 0);
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/MDEV-18585.cnf b/mysql-test/suite/galera_sr/t/MDEV-18585.cnf
new file mode 100644
index 00000000000..00801511d76
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/MDEV-18585.cnf
@@ -0,0 +1,5 @@
+!include ../galera_2nodes.cnf
+
+[mysqld.1]
+log-bin
+log-slave-updates
diff --git a/mysql-test/suite/galera_sr/t/MDEV-18585.test b/mysql-test/suite/galera_sr/t/MDEV-18585.test
new file mode 100644
index 00000000000..18bbbe8f7e0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/MDEV-18585.test
@@ -0,0 +1,42 @@
+#
+# MDEV-18686 Verify that the Annotate_rows_log_event is written only
+# once per statement into binlog.
+#
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+
+#
+# Unit ROW
+#
+SET SESSION wsrep_trx_fragment_unit='ROWS';
+SET SESSION wsrep_trx_fragment_size=1;
+
+INSERT INTO t1 VALUES (1), (2);
+
+#
+# Unit BYTE
+#
+SET SESSION wsrep_trx_fragment_unit='BYTES';
+SET SESSION wsrep_trx_fragment_size=1;
+
+INSERT INTO t1 VALUES (3), (4);
+
+#
+# Unit STATEMENT
+#
+SET SESSION wsrep_trx_fragment_unit='STATEMENTS';
+SET SESSION wsrep_trx_fragment_size=1;
+
+INSERT INTO t1 VALUES (5), (6);
+
+#
+# Reset to default settings
+#
+SET SESSION wsrep_trx_fragment_unit=default;
+SET SESSION wsrep_trx_fragment_size=default;
+
+--replace_regex /table_id: [0-9]+/table_id: #/ /xid=[0-9]+/xid=#/
+SHOW BINLOG EVENTS IN 'mysqld-bin.000002' FROM 518;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera-features#56.test b/mysql-test/suite/galera_sr/t/galera-features#56.test
new file mode 100644
index 00000000000..0497952e355
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera-features#56.test
@@ -0,0 +1,55 @@
+##
+## This test tests parallel application of multiple auto-increment insert transactions
+##
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+# Create a second connection to node1 so that we can run transactions concurrently
+--let $galera_connection_name = node_1a
+--let $galera_server_number = 1
+--source include/galera_connect.inc
+SET SESSION wsrep_trx_fragment_size = 1;
+
+--connection node_1
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+
+CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 INTEGER) Engine=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+
+--connection node_2
+--let $wsrep_slave_threads_orig = `SELECT @@wsrep_slave_threads`
+SET GLOBAL wsrep_slave_threads = 4;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+--connection node_1
+--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_1a
+--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_2
+--send INSERT INTO t1 (f2) SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_1
+--reap
+
+--connection node_1a
+--reap
+
+--connection node_2
+--reap
+
+SELECT COUNT(*) = 30000 FROM t1;
+SELECT COUNT(DISTINCT f1) = 30000 FROM t1;
+SELECT COUNT(*) = 6 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user';
+
+--disable_query_log
+--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig;
+--enable_query_log
+
+--connection default
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_bf_abort.inc b/mysql-test/suite/galera_sr/t/galera_sr_bf_abort.inc
new file mode 100644
index 00000000000..cd9884cee81
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_bf_abort.inc
@@ -0,0 +1,145 @@
+#
+# Test SR BF abort for all sync points in master side code path
+#
+# The procedure in all test cases is the following:
+# 1) Start SR transaction on node 1, do INSERT + SELECT .. FOR UPDATE
+# 2) Set up sync point on node 1 to block slave thread processing
+# in apply monitor
+# 3) Do write on node 2 which will conflict with SELECT .. FOR UPDATE
+# 4) Set up desired sync point on master side and commit
+# 5) Wait until commit reaches master side sync point, clear sync points
+# and release all sync point waiters
+# 6) COMMIT on node 1 should return deadlock error
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+--eval SET SESSION wsrep_trx_fragment_size = $wsrep_trx_fragment_size
+SET AUTOCOMMIT=OFF;
+
+INSERT INTO t1 VALUES (1);
+SELECT * FROM t1 FOR UPDATE;
+
+# Set up sync point
+--connection node_1a
+--let galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+# Conflicting insert
+--connection node_2
+
+SET AUTOCOMMIT=ON;
+INSERT INTO t1 VALUES (2);
+
+--connection node_1a
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = $galera_sr_bf_abort_sync_point
+--source include/galera_set_sync_point.inc
+
+--connection node_1
+if ($galera_sr_bf_abort_at_commit)
+{
+ --send COMMIT
+}
+if (!$galera_sr_bf_abort_at_commit)
+{
+ --send INSERT INTO t1 VALUES (3)
+}
+
+--connection node_1a
+
+--let $cmp = `SELECT STRCMP('apply_monitor_slave_enter_sync', '$galera_sr_bf_abort_sync_point') = -1`
+
+if ($cmp)
+{
+ --let $galera_sync_point = apply_monitor_slave_enter_sync $galera_sr_bf_abort_sync_point
+}
+if (!$cmp)
+{
+ --let $galera_sync_point = $galera_sr_bf_abort_sync_point apply_monitor_slave_enter_sync
+}
+--source include/galera_wait_sync_point.inc
+
+# Let conflicting insert proceed, make sure it hits abort_trx_end and
+# let both threads continue.
+
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = abort_trx_end $galera_sr_bf_abort_sync_point
+
+--source include/galera_wait_sync_point.inc
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+--let $galera_sync_point = $galera_sr_bf_abort_sync_point
+--source include/galera_signal_sync_point.inc
+
+# Deadlock should now be retured by node_1
+--connection node_1
+if (!$galera_sr_bf_abort_at_commit)
+{
+ --error ER_LOCK_DEADLOCK
+ --reap
+}
+if ($galera_sr_bf_abort_at_commit)
+{
+ --reap
+}
+
+ROLLBACK;
+
+# Release slave insert
+--connection node_1a
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+
+# Verify that nodes are consistent
+
+# End result:
+# If the statement which was BF aborted was commit,
+# node_1 must replay the transaction so that the table
+# will have rows 1, 2. If it in turn was INSERT,
+# node_1 must abort the transaction so that only
+# INSERT ... VALUES (2) survives.
+
+--connection node_1
+SELECT * FROM t1;
+if ($galera_sr_bf_abort_at_commit)
+{
+ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+}
+if (!$galera_sr_bf_abort_at_commit)
+{
+ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+}
+--connection node_2
+SELECT * FROM t1;
+if ($galera_sr_bf_abort_at_commit)
+{
+ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 1;
+ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+}
+if (!$galera_sr_bf_abort_at_commit)
+{
+ SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+}
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# Delete entery to verify that node is unblocked
+--connection node_1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 0;
+DELETE FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_bf_abort.test b/mysql-test/suite/galera_sr/t/galera_sr_bf_abort.test
new file mode 100644
index 00000000000..a2db6a8bad2
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_bf_abort.test
@@ -0,0 +1,50 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+--let $wsrep_trx_fragment_size = 1
+
+# Control connection for manipulating sync points on node 1
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_sync_wait = 0;
+
+--echo galera_sr_bf_abort_at_commit = 0
+--let $galera_sr_bf_abort_at_commit = 0
+
+--echo after_replicate_sync
+--let $galera_sr_bf_abort_sync_point = after_replicate_sync
+--source galera_sr_bf_abort.inc
+
+--echo local_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = local_monitor_master_enter_sync
+--source galera_sr_bf_abort.inc
+
+--echo apply_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = apply_monitor_master_enter_sync
+--source galera_sr_bf_abort.inc
+
+--echo commit_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = commit_monitor_master_enter_sync
+--source galera_sr_bf_abort.inc
+
+--echo galera_sr_bf_abort_at_commit = 1
+--let $galera_sr_bf_abort_at_commit = 1
+
+--echo after_replicate_sync
+--let $galera_sr_bf_abort_sync_point = after_replicate_sync
+--source galera_sr_bf_abort.inc
+
+--echo local_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = local_monitor_master_enter_sync
+--source galera_sr_bf_abort.inc
+
+--echo apply_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = apply_monitor_master_enter_sync
+--source galera_sr_bf_abort.inc
+
+--echo commit_monitor_master_enter_sync
+--let $galera_sr_bf_abort_sync_point = commit_monitor_master_enter_sync
+--source galera_sr_bf_abort.inc
+
+CALL mtr.add_suppression("WSREP: fragment replication failed: 1");
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_blob.test b/mysql-test/suite/galera_sr/t/galera_sr_blob.test
new file mode 100644
index 00000000000..ed314d09f5a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_blob.test
@@ -0,0 +1,38 @@
+#
+# Test that a single-blob will be replicated using SR if it is sufficiently
+# large.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 TEXT) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_2
+--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+--connection node_1
+INSERT INTO t1 VALUES (REPEAT('x', 65535));
+
+--connection node_2
+--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+# Confirm that exactly one fragment was replicated
+
+--disable_query_log
+--eval SELECT ($wsrep_last_committed_after - $wsrep_last_committed_before) = 1 AS wsrep_last_committed_delta;
+--enable_query_log
+
+--connection node_1
+COMMIT;
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+SELECT LENGTH(f1) = 65535 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test b/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test
new file mode 100644
index 00000000000..da547c59626
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_cc_master.test
@@ -0,0 +1,100 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of Cluster Configuration Change on a concurrently-running SR transaction
+# We use SET GLOBAL wsrep_cluster_address = '' to cause the master (node_2) to temporarily
+# leave the cluster.
+#
+
+CALL mtr.add_suppression("WSREP: discarding established.*");
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_2
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+#
+# Trigger CC . The transaction is aborted and we expect the SR tables to be cleaned up
+#
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2a
+SET SESSION wsrep_sync_wait=0;
+--let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address`
+SET GLOBAL wsrep_cluster_address = '';
+
+--let $wait_condition = SELECT VARIABLE_VALUE = 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+SET SESSION wsrep_sync_wait = DEFAULT;
+
+--connection node_2
+--error 2013 # CR_SERVER_LOST
+INSERT INTO t1 VALUES (6);
+
+--connection node_1
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+# Restore cluster
+
+--connection node_2a
+--disable_query_log
+--eval SET GLOBAL wsrep_cluster_address='$wsrep_cluster_address_orig'
+--enable_query_log
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+
+--connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2b
+--source include/galera_wait_ready.inc
+SELECT * FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+# Repeat transaction to confirm no locks are left from previous transaction
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+
+--connection node_2b
+CALL mtr.add_suppression("WSREP: Failed to replicate rollback fragment for");
+
+--disconnect node_2
+--connect node_2, 127.0.0.1, root, , test, $NODE_MYPORT_2
+# Restore original auto_increment_offset values.
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_cc_no_primary.test b/mysql-test/suite/galera_sr/t/galera_sr_cc_no_primary.test
new file mode 100644
index 00000000000..13a1b0f009e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_cc_no_primary.test
@@ -0,0 +1,85 @@
+#
+# This test verifies that an orphaned SR gets cleanup upon cluster
+# reconnection. Specifically, the case where the cluster goes through
+# a state of no primary components, and the nodes rejoin with the
+# same IDs.
+#
+
+--source include/galera_cluster.inc
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (10);
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+
+--connection node_2
+SET SESSION wsrep_trx_fragment_size=1;
+BEGIN;
+INSERT INTO t1 VALUES (20);
+INSERT INTO t1 VALUES (21);
+INSERT INTO t1 VALUES (22);
+SELECT COUNT(*) `expect 6` FROM mysql.wsrep_streaming_log;
+
+# isolate node 2
+--connection node_2a
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 'OFF' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready'
+--source include/wait_condition.inc
+SHOW STATUS LIKE 'wsrep_cluster_size';
+SHOW STATUS LIKE 'wsrep_cluster_status';
+SET SESSION wsrep_sync_wait = DEFAULT;
+
+--connection node_1a
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 'OFF' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready'
+--source include/wait_condition.inc
+SHOW STATUS LIKE 'wsrep_cluster_size';
+SHOW STATUS LIKE 'wsrep_cluster_status';
+SET SESSION wsrep_sync_wait = DEFAULT;
+
+--connection node_2a
+# reconnect node 2
+SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 0';
+--source include/galera_wait_ready.inc
+SHOW STATUS LIKE 'wsrep_cluster_size';
+
+--connection node_1a
+# wait for reconnection and check that the streaming log has been cleared
+--source include/galera_wait_ready.inc
+SHOW STATUS LIKE 'wsrep_cluster_size';
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+
+--connection node_2a
+# check that the streaming log has been cleared and there are no locks
+# from the SRs by issuing conflicting inserts
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+INSERT INTO t1 VALUES(10);
+INSERT INTO t1 VALUES(20);
+SELECT * FROM t1;
+
+# check that both SRs have been rolled back
+--connection node_1
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES(13);
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES(23);
+
+DROP TABLE t1;
+
+# Restore original auto_increment_offset values.
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_cc_slave.test b/mysql-test/suite/galera_sr/t/galera_sr_cc_slave.test
new file mode 100644
index 00000000000..7ba0f253b4a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_cc_slave.test
@@ -0,0 +1,105 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of Cluster Configuration Change on a concurrently-running SR transaction
+# We use SET GLOBAL wsrep_cluster_address = '' to cause the slave (node_2) to temporarily
+# leave the cluster.
+#
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source ../../galera/include/auto_increment_offset_save.inc
+
+# Start with a clean slate
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+#
+# Trigger CC . The transaction should be able to continue
+#
+
+--connection node_2
+--let $wsrep_cluster_address_orig = `SELECT @@wsrep_cluster_address`
+SET GLOBAL wsrep_cluster_address = '';
+
+# Wait until the node_2 disconnects from the cluster
+SET SESSION wsrep_sync_wait = 0;
+--let $wait_condition = SELECT VARIABLE_VALUE = 'Disconnected' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_status'
+--source include/wait_condition.inc
+SET SESSION wsrep_sync_wait = default;
+
+--connection node_1
+
+# Wait until the node_1 sees the cluster configuration change
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+# Continue generating events in the transaction
+
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+# Restore cluster
+
+--connection node_2
+--disable_query_log
+--eval SET GLOBAL wsrep_cluster_address='$wsrep_cluster_address_orig';
+--enable_query_log
+--source include/galera_wait_ready.inc
+
+# Confirm that the SR table still contains entries from ongoing transaction
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+# Continue and finalize transaction
+--connection node_1
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (16);
+COMMIT;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# Confirm that transaction was replicated properly
+# and SR table is cleaned up afterwards.
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 15 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+
+CALL mtr.add_suppression("points to own listening address, blacklisting");
+
+# Restore original auto_increment_offset values.
+--source ../../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_concurrent.test b/mysql-test/suite/galera_sr/t/galera_sr_concurrent.test
new file mode 100644
index 00000000000..9ec7143d25c
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_concurrent.test
@@ -0,0 +1,45 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test two concurrent SR-replicated transactions
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t2 VALUES (1);
+INSERT INTO t2 VALUES (2);
+INSERT INTO t2 VALUES (3);
+INSERT INTO t2 VALUES (4);
+INSERT INTO t2 VALUES (5);
+
+--connection node_1
+COMMIT;
+
+--connection node_1a
+COMMIT;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+SELECT COUNT(*) = 5 FROM t2;
+
+--connection node_1
+
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_conflict.test b/mysql-test/suite/galera_sr/t/galera_sr_conflict.test
new file mode 100644
index 00000000000..dd133c2d12e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_conflict.test
@@ -0,0 +1,45 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# A conflict between a streaming replication fragment and a local transaction
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+# Wait until a streaming replication fragment has arrived
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+# Attempt a conflicting INSERT. This will block
+--send INSERT INTO t1 VALUES(1);
+
+# Observe the block from a separate connection
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'wsrep applier committed%';
+--source include/wait_condition.inc
+
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'update';
+--source include/wait_condition.inc
+
+# Commit the remote transaction, causing the local transaction to return an error
+--connection node_1
+COMMIT;
+
+--connection node_2
+--error ER_DUP_ENTRY
+--reap
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit.test b/mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit.test
new file mode 100644
index 00000000000..6675321641f
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit.test
@@ -0,0 +1,45 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the following sequence of events:
+#
+# 1. Node #2 begins a transaction
+# 2. Node #1 begins conflicting transaction that is SR replicated
+# 3. Node #2 attempts to commit, gets a deadlock error, even before #1 has committed
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_2
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+
+--connection node_1
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+--connection node_1
+COMMIT;
+
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit2.test b/mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit2.test
new file mode 100644
index 00000000000..0ea52290bb6
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_conflict_on_commit2.test
@@ -0,0 +1,46 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the following sequence of events:
+#
+# 1. Node #1 begins a transaction that is SR replicated
+# 2. Node #2 begins a conflicting transaction, hangs
+# 3. Node #1 comits
+# 4. Node #2 gets a dup key error
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_1
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--send INSERT INTO t1 VALUES (5);
+
+--sleep 1
+
+--connection node_1
+COMMIT;
+
+--connection node_2
+--error ER_DUP_ENTRY
+--reap
+
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_conflict_with_rollback_master.test b/mysql-test/suite/galera_sr/t/galera_sr_conflict_with_rollback_master.test
new file mode 100644
index 00000000000..cb96fae0122
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_conflict_with_rollback_master.test
@@ -0,0 +1,44 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the following sequence of events:
+#
+# 1. Node #2 begins a transaction
+# 2. Node #1 begins conflicting transaction that is SR replicated
+# 3. Node #1 rolls back
+# 4. Node #2 can not commit because it was BF-aborted even though the SR transaction was rolled back
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_2
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+
+--connection node_1
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+ROLLBACK;
+
+--connection node_2
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_create_drop.test b/mysql-test/suite/galera_sr/t/galera_sr_create_drop.test
new file mode 100644
index 00000000000..b7987d26191
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_create_drop.test
@@ -0,0 +1,33 @@
+#
+# Verify that CREATE/DROP DDLs work when streaming replication is on.
+#
+
+--source include/galera_cluster.inc
+
+SET SESSION wsrep_trx_fragment_size=1;
+
+#
+# CREATE/DROP TABLE succeeds and the change is propagated to node_2.
+#
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+--connection node_2
+SHOW CREATE TABLE t1;
+--connection node_1
+DROP TABLE t1;
+--connection node_2
+--error ER_NO_SUCH_TABLE
+SHOW CREATE TABLE t1;
+
+#
+# CREATE/DROP DATABASE succeeds and the change is propagated to node_2.
+#
+CREATE DATABASE mdev_18587;
+--connection node_2
+SHOW DATABASES LIKE 'mdev_18587';
+--connection node_1
+DROP DATABASE mdev_18587;
+--connection node_2
+SHOW DATABASES LIKE 'mdev_18587';
+--connection node_1
+
+SET SESSION wsrep_trx_fragment_size=DEFAULT;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_ddl_master.test b/mysql-test/suite/galera_sr/t/galera_sr_ddl_master.test
new file mode 100644
index 00000000000..7da7f55ba15
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_ddl_master.test
@@ -0,0 +1,63 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the following sequence of events on the master:
+#
+# 1. Connection #1 begins a SR transaction
+# 2. Connection #2 issues DDL
+# 3. Connection #1 attempts to continue the transaction, gets deadlock
+# 4. Connection #1 retries the transaction and succeeds
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+# SR replication is triggered and rows have been delivered to the slave
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (6);
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# Check that the transaction thus aborted could be reissued
+
+ROLLBACK;
+START TRANSACTION;
+INSERT INTO t1 (f1) VALUES (1);
+INSERT INTO t1 (f1) VALUES (2);
+INSERT INTO t1 (f1) VALUES (3);
+INSERT INTO t1 (f1) VALUES (4);
+INSERT INTO t1 (f1) VALUES (5);
+INSERT INTO t1 (f1) VALUES (6);
+COMMIT;
+
+SELECT COUNT(*) = 6 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 6 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_ddl_schema.test b/mysql-test/suite/galera_sr/t/galera_sr_ddl_schema.test
new file mode 100644
index 00000000000..a3045773387
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_ddl_schema.test
@@ -0,0 +1,43 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of DROP SCHEMA DDL on a concurrent SR transaction
+# Most other DDL tests work on a table level, so this test exercises a
+# different granularity.
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+DROP SCHEMA test;
+
+--error ER_NO_SUCH_TABLE
+SELECT COUNT(*) = 0 FROM test.t1;
+
+--connection node_1
+
+# Transaction can not continue due to DDL
+--error ER_LOCK_DEADLOCK
+INSERT INTO test.t1 VALUES (6, 6);
+
+# DDL is now in effect
+--error ER_NO_SUCH_TABLE
+INSERT INTO test.t1 VALUES (6, 6);
+
+CREATE SCHEMA test;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_ddl_slave.test b/mysql-test/suite/galera_sr/t/galera_sr_ddl_slave.test
new file mode 100644
index 00000000000..4a652284e59
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_ddl_slave.test
@@ -0,0 +1,65 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of DDL on a concurrent SR transaction
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+ALTER TABLE t1 DROP COLUMN f2;
+
+# SR applied before the DDL is no longer visible
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+# Transaction can not continue due to DDL, implicit ROLLBACK
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (6, 6);
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# DDL is now in effect
+--error ER_WRONG_VALUE_COUNT_ON_ROW
+INSERT INTO t1 VALUES (6, 6);
+
+# But it should be possible to reissue the transaction
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+COMMIT;
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_ddl_unrelated.test b/mysql-test/suite/galera_sr/t/galera_sr_ddl_unrelated.test
new file mode 100644
index 00000000000..77b6e64641d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_ddl_unrelated.test
@@ -0,0 +1,53 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test the effect of unrelated DDL on a concurrent SR transaction
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1, 1);
+INSERT INTO t1 VALUES (2, 2);
+INSERT INTO t1 VALUES (3, 3);
+INSERT INTO t1 VALUES (4, 4);
+INSERT INTO t1 VALUES (5, 5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+ALTER TABLE t2 DROP COLUMN f2;
+
+# SR applied before the DDL is still visible
+SELECT COUNT(*) = 5 FROM t1;
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+
+# Transaction can continue, even after the DDL
+--error 0
+INSERT INTO t1 VALUES (6, 6);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+COMMIT;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 6 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_dupkey_error.test b/mysql-test/suite/galera_sr/t/galera_sr_dupkey_error.test
new file mode 100644
index 00000000000..a7aca042829
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_dupkey_error.test
@@ -0,0 +1,59 @@
+#
+# Test the case where a duplicate key error happens in the middle of an SR transaction
+#
+
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 BLOB) ENGINE=InnoDB;
+CREATE UNIQUE INDEX i1 ON t1 (f1(512));
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1024;
+
+INSERT INTO t1 VALUES (REPEAT('a', 512));
+INSERT INTO t1 VALUES (REPEAT('b', 512));
+INSERT INTO t1 VALUES (REPEAT('c', 512));
+INSERT INTO t1 VALUES (REPEAT('d', 512));
+INSERT INTO t1 VALUES (REPEAT('e', 512));
+INSERT INTO t1 VALUES (REPEAT('f', 512));
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+# Deadlock error instead of dupkey since the transaction is SR and
+# statement rollback is not safe.
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (REPEAT('c', 512));
+
+# Confirm that the wsrep_streaming_log table is now empty, as it was a full transaction rollback
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# Confirm that the transaction can be restarted on either node
+
+--connection node_1
+INSERT INTO t1 VALUES (REPEAT('d', 512));
+INSERT INTO t1 VALUES (REPEAT('e', 512));
+INSERT INTO t1 VALUES (REPEAT('f', 512));
+COMMIT;
+
+--connection node_2
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (REPEAT('a', 512));
+INSERT INTO t1 VALUES (REPEAT('b', 512));
+INSERT INTO t1 VALUES (REPEAT('c', 512));
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 6 FROM t1;
+
+--connection node_2
+SELECT COUNT(*) = 6 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_fk_conflict.test b/mysql-test/suite/galera_sr/t/galera_sr_fk_conflict.test
new file mode 100644
index 00000000000..b83deaee244
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_fk_conflict.test
@@ -0,0 +1,62 @@
+#
+# Test Foreign Key with SR
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE grandparent (
+ id INT NOT NULL PRIMARY KEY
+) ENGINE=InnoDB;
+
+CREATE TABLE parent (
+ id INT NOT NULL PRIMARY KEY,
+ grandparent_id INT,
+ FOREIGN KEY (grandparent_id)
+ REFERENCES grandparent(id)
+ ON UPDATE CASCADE
+) ENGINE=InnoDB;
+
+CREATE TABLE child (
+ id INT NOT NULL PRIMARY KEY,
+ grandparent_id INT,
+ FOREIGN KEY (grandparent_id)
+ REFERENCES parent(grandparent_id)
+ ON UPDATE CASCADE
+) ENGINE=InnoDB;
+
+INSERT INTO grandparent VALUES (1),(2),(3),(4);
+INSERT INTO parent VALUES (1,1), (2,2);
+INSERT INTO child VALUES (1,1), (2,2);
+
+# Start and SR transaction
+
+--connection node_1
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+
+UPDATE grandparent SET id = 5 WHERE id = 1;
+
+# No conflicting transactions are allowed to proceed on slave
+
+--connection node_2
+SET SESSION innodb_lock_wait_timeout = 1;
+
+--error ER_LOCK_WAIT_TIMEOUT
+UPDATE grandparent SET id = 10 WHERE id = 5;
+
+--error ER_LOCK_WAIT_TIMEOUT
+DELETE FROM child;
+
+# SR transaction succesffull
+
+--connection node_1
+COMMIT;
+
+--let $diff_servers = 1 2
+--source include/diff_servers.inc
+
+DROP TABLE child;
+DROP TABLE parent;
+DROP TABLE grandparent;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_gtid-master.opt b/mysql-test/suite/galera_sr/t/galera_sr_gtid-master.opt
new file mode 100644
index 00000000000..6623c33c484
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_gtid-master.opt
@@ -0,0 +1 @@
+ --log-bin --log-slave-updates --loose-galera-sr-gtid-unique
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_gtid.test b/mysql-test/suite/galera_sr/t/galera_sr_gtid.test
new file mode 100644
index 00000000000..56464ba6f63
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_gtid.test
@@ -0,0 +1,46 @@
+#
+# Test basic Galera operation
+#
+
+--source include/have_log_bin.inc
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY);
+
+SET SESSION wsrep_trx_fragment_size=1;
+INSERT INTO t1 VALUES (1);
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+
+SET SESSION wsrep_trx_fragment_size=1;
+UPDATE t1 SET f1 = 2;
+
+#--let $gtid_executed_node2 = `SELECT @@global.gtid_executed;`
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size=0;
+
+--connection node_2
+SET SESSION wsrep_trx_fragment_size=0;
+
+--connection node_1
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+
+--disable_query_log
+#--eval SELECT '$gtid_executed_node2' = @@global.gtid_executed AS gtid_executed_equal;
+--enable_query_log
+
+--replace_regex /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/<GTID>/ /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+--replace_column 2 <Pos> 5 <End_log_pos>
+SHOW BINLOG EVENTS IN 'mysqld-bin.000002' FROM 256;
+
+--connection node_2
+# Perform causal wait
+SELECT 1 FROM DUAL;
+--replace_regex /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/<GTID>/ /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+--replace_column 2 <Pos> 5 <End_log_pos>
+SHOW BINLOG EVENTS IN 'mysqld-bin.000003' FROM 256;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_insert_select.test b/mysql-test/suite/galera_sr/t/galera_sr_insert_select.test
new file mode 100644
index 00000000000..01481db5a8b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_insert_select.test
@@ -0,0 +1,33 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test large INSERT ... SELECT with SR
+#
+
+--connection node_1
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+# Insert 10K rows.
+INSERT INTO t1 (f2) SELECT REPEAT('a', 255) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 99 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+COMMIT;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 10000 FROM t1;
+
+--connection node_1
+
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_all_nobootstrap.test b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_nobootstrap.test
new file mode 100644
index 00000000000..8fba27f9a73
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_nobootstrap.test
@@ -0,0 +1,52 @@
+#
+# Kill entire cluster during SR while pc.bootstrap is in effect
+# after restart, confirm that the mysql.wsrep_streaming_log table is empty
+#
+
+--source include/galera_cluster.inc
+--source include/big_test.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+--source include/kill_galera.inc
+--connection node_1
+--source include/kill_galera.inc
+
+--sleep 1
+
+# Bootstrap the cluster from scratch
+
+--connection node_1
+--remove_file $MYSQLTEST_VARDIR/mysqld.1/data/grastate.dat
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--source include/start_mysqld.inc
+
+--connection node_2
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--source include/start_mysqld.inc
+
+--connection node_1
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.cnf b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.cnf
new file mode 100644
index 00000000000..82c001e0131
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.cnf
@@ -0,0 +1,8 @@
+!include ../galera_2nodes.cnf
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;pc.recovery=false'
+auto_increment_offset=1
+
+[mysqld.2]
+auto_increment_offset=2
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.test b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.test
new file mode 100644
index 00000000000..5332b1a1579
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_norecovery.test
@@ -0,0 +1,71 @@
+#
+# Kill entire cluster during SR while pc.recovery is NOT in effect
+# after restart, confirm that the mysql.wsrep_streaming_log table is empty
+#
+
+--source include/galera_cluster.inc
+--source include/big_test.inc
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source ../../galera/include/auto_increment_offset_save.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+
+#
+# Kill the entire cluster and restart
+#
+--connection node_2
+--source include/kill_galera.inc
+
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+--source include/kill_galera.inc
+
+--remove_file $MYSQLTEST_VARDIR/mysqld.1/data/grastate.dat
+--let $start_mysqld_params = "--wsrep-new-cluster"
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--source include/start_mysqld.inc
+
+--connection node_2
+--let $start_mysqld_params = ""
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--source include/start_mysqld.inc
+
+
+#
+# Check that wsrep_streaming_log is empty
+#
+--connection node_1
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) `expect 0` FROM t1;
+SELECT COUNT(*) `expect 0` FROM mysql.wsrep_streaming_log;
+
+
+#
+# Cleanup
+#
+--source ../../galera/include/auto_increment_offset_restore.inc
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_all_pcrecovery.test b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_pcrecovery.test
new file mode 100644
index 00000000000..0ba7cedbd8f
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_all_pcrecovery.test
@@ -0,0 +1,54 @@
+#
+# Kill entire cluster during SR while pc.recovery is in effect
+# after restart, confirm that the mysql.wsrep_streaming_log table is empty
+#
+
+--source include/galera_cluster.inc
+--source include/big_test.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+--source include/kill_galera.inc
+--connection node_1
+--source include/kill_galera.inc
+
+--sleep 1
+
+# Bootstrap the cluster from scratch
+
+--connection node_1
+--remove_file $MYSQLTEST_VARDIR/mysqld.1/data/grastate.dat
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+--source include/start_mysqld.inc
+
+--connection node_2
+--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
+--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
+--source include/start_mysqld.inc
+
+--connection node_1
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_connection.test b/mysql-test/suite/galera_sr/t/galera_sr_kill_connection.test
new file mode 100644
index 00000000000..03d09f33fab
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_connection.test
@@ -0,0 +1,59 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test KILL CONNECTION on a transaction that has already replicated some data via SR
+#
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+# Confirm that the transaction is SR-replicated
+--connection node_2
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+# Kill the transaction by killing the entire connection
+
+--connection node_1
+--let $connection_id = `SELECT CONNECTION_ID()`
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--disable_query_log
+--eval KILL CONNECTION $connection_id
+--enable_query_log
+
+# Confirm that the disconnection caused the updates made so far to be removed
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM t1;
+--source include/wait_condition.inc
+
+# Confirm that the transaction can be reissued in its entirety on the slave without a conflict
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+COMMIT;
+
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1a
+SELECT COUNT(*) = 5 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_query.test b/mysql-test/suite/galera_sr/t/galera_sr_kill_query.test
new file mode 100644
index 00000000000..4c9f2b4d7bc
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_query.test
@@ -0,0 +1,48 @@
+--source include/galera_cluster.inc
+
+#
+# Test KILL QUERY on a statement that has already replicated some data via SR
+#
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+--let $connection_id = `SELECT CONNECTION_ID()`
+--send INSERT INTO t1 SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5, ten AS a6;
+
+# Wait for some SR to arrive on the slave.
+--connection node_2
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT TABLE_ROWS > 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1';
+--source include/wait_condition.inc
+
+--connection node_1
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--echo Killing query ...
+--disable_query_log
+--eval KILL QUERY $connection_id
+--enable_query_log
+
+--connection node_1
+--error ER_QUERY_INTERRUPTED
+--reap
+
+# Confirm that the kill caused the updates made so far to be removed
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# Confirm that the transaction can be reissued in its entirety on the slave without a conflict
+
+INSERT INTO t1 SELECT 1 FROM ten AS t1, ten AS t2, ten AS t3;
+SELECT COUNT(*) = 1000 FROM t1;
+
+--connection node_1a
+SELECT COUNT(*) = 1000 FROM t1;
+
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_slave.cnf b/mysql-test/suite/galera_sr/t/galera_sr_kill_slave.cnf
new file mode 100644
index 00000000000..290d8fe196e
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_slave.cnf
@@ -0,0 +1,4 @@
+!include ../galera_2nodes.cnf
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;pc.weight=2'
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_kill_slave.test b/mysql-test/suite/galera_sr/t/galera_sr_kill_slave.test
new file mode 100644
index 00000000000..a76a03e49b9
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_kill_slave.test
@@ -0,0 +1,80 @@
+#
+# This test kills the slave while a Streaming Replication transaction is in
+# progress but before a fragment has already been applied on the slave. It
+# is expected that after the slave restarts, the cluster will continue to
+# be consistent.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+# Block node #2's applier before table t1's inserts have come into play
+
+--connection node_2
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+CREATE TABLE t2 (f1 INTEGER);
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+LOCK TABLE t2 WRITE;
+
+--connection node_1
+INSERT INTO t2 VALUES (1);
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_1
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--sleep 2
+
+--connection node_2
+--source include/kill_galera.inc
+--sleep 1
+
+--connection node_1
+INSERT INTO t1 VALUES (6);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (10);
+
+--connection node_2
+--source include/start_mysqld.inc
+--sleep 1
+
+--source include/wait_until_connected_again.inc
+--source include/galera_wait_ready.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (11);
+INSERT INTO t1 VALUES (12);
+INSERT INTO t1 VALUES (13);
+INSERT INTO t1 VALUES (14);
+INSERT INTO t1 VALUES (15);
+COMMIT;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+--sleep 5
+SELECT COUNT(*) = 15 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_large_fragment-master.opt b/mysql-test/suite/galera_sr/t/galera_sr_large_fragment-master.opt
new file mode 100644
index 00000000000..132c6aed246
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_large_fragment-master.opt
@@ -0,0 +1 @@
+--innodb_log_file_size=1G --binlog-row-event-max-size=100M
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_large_fragment.test b/mysql-test/suite/galera_sr/t/galera_sr_large_fragment.test
new file mode 100644
index 00000000000..63278555723
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_large_fragment.test
@@ -0,0 +1,58 @@
+#
+# Test the replication and subsequent cleanup of a few, very large fragments
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(512)) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1024 * 1024 * 10;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 (f2) SELECT REPEAT('x', 512) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4, ten AS a5;
+
+--connection node_2
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) > 50000 FROM t1;
+
+--connection node_1
+ROLLBACK;
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+--let $wsrep_provider_options_node_2 = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options'`
+SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT10M';
+SET SESSION wsrep_sync_wait = 7;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+--disable_query_log
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node_2';
+
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+--let $wsrep_provider_options_node_1 = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options'`
+SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT10M';
+SET SESSION wsrep_sync_wait = 7;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+--disable_query_log
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node_1';
+
+DROP TABLE ten;
+DROP TABLE t1;
+
+CALL mtr.add_suppression('InnoDB: Resizing redo log from');
+CALL mtr.add_suppression('InnoDB: Starting to delete and rewrite log files');
+CALL mtr.add_suppression('InnoDB: New log files created, LSN=');
+
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_load_data.test b/mysql-test/suite/galera_sr/t/galera_sr_load_data.test
new file mode 100644
index 00000000000..363443a9e64
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_load_data.test
@@ -0,0 +1,49 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test Streaming Replication + LOAD DATA
+#
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 512;
+
+# Create a file for LOAD DATA with 20K entries
+--perl
+open(FILE, ">", "$ENV{'MYSQLTEST_VARDIR'}/tmp/galera_sr_load_data.csv") or die;
+foreach my $i (1..20000) {
+ print FILE "$i\n";
+}
+EOF
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+# Record wsrep_last_committed as it was before LOAD DATA
+--connection node_2
+--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+--connection node_1
+--disable_query_log
+--disable_warnings
+set global wsrep_load_data_splitting=ON;
+--enable_warnings
+--eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/galera_sr_load_data.csv' INTO TABLE t1;
+--enable_query_log
+
+--connection node_2
+--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+SELECT COUNT(*) = 20000 FROM t1;
+# LOAD-ing 20K rows causes 3 commits to be registered
+--disable_query_log
+--eval SELECT $wsrep_last_committed_after - $wsrep_last_committed_before = 3 AS wsrep_last_committed_diff
+--enable_query_log
+
+--connection node_1
+--disable_query_log
+--disable_warnings
+set global wsrep_load_data_splitting=OFF;
+--enable_warnings
+--enable_query_log
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_load_data_splitting.test b/mysql-test/suite/galera_sr/t/galera_sr_load_data_splitting.test
new file mode 100644
index 00000000000..40e63e7c67f
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_load_data_splitting.test
@@ -0,0 +1,50 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+#
+# Test Streaming Replication and LOAD DATA splitting operating at the same time
+#
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+--let $wsrep_load_data_splitting_orig = `SELECT @@wsrep_load_data_splitting`
+
+SET SESSION wsrep_trx_fragment_size = 512;
+SET GLOBAL wsrep_load_data_splitting = TRUE;
+
+
+# Create a file for LOAD DATA with 95K entries
+--perl
+open(FILE, ">", "$ENV{'MYSQLTEST_VARDIR'}/tmp/galera_sr_load_data.csv") or die;
+foreach my $i (1..95000) {
+ print FILE "$i\n";
+}
+EOF
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+# Record wsrep_last_committed as it was before LOAD DATA
+--connection node_2
+--let $wsrep_last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+--connection node_1
+--disable_query_log
+--eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/galera_sr_load_data.csv' INTO TABLE t1;
+--enable_query_log
+
+--connection node_2
+--let $wsrep_last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+
+SELECT COUNT(*) = 95000 FROM t1;
+
+# LOAD-ing 95K rows causes 10 'commits' to be registered
+--disable_query_log
+--eval SELECT $wsrep_last_committed_after - $wsrep_last_committed_before = 10 AS wsrep_last_committed_diff;
+--enable_query_log
+
+--connection node_1
+--disable_query_log
+--eval SET GLOBAL wsrep_load_data_splitting = $wsrep_load_data_splitting_orig;
+--enable_query_log
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_log_bin-master.opt b/mysql-test/suite/galera_sr/t/galera_sr_log_bin-master.opt
new file mode 100644
index 00000000000..03fcb5d040d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_log_bin-master.opt
@@ -0,0 +1 @@
+--log-slave-updates --log-bin
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_log_bin.test b/mysql-test/suite/galera_sr/t/galera_sr_log_bin.test
new file mode 100644
index 00000000000..7dfa7850c15
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_log_bin.test
@@ -0,0 +1,70 @@
+#
+# Interleave SR and non-SR transactions and confirm that the binlog is in correct order
+#
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t3 (f1 INTEGER) ENGINE=InnoDB;
+CREATE TABLE t4 (f1 INTEGER) ENGINE=InnoDB;
+
+--source include/galera_cluster.inc
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t2 VALUES (1);
+
+--connection node_2
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t3 VALUES (1);
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+--connection node_2a
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t4 VALUES (1);
+
+--connection node_1
+INSERT INTO t1 VALUES (2);
+COMMIT;
+
+--connection node_1a
+INSERT INTO t2 VALUES (2);
+COMMIT;
+
+--connection node_2
+INSERT INTO t3 VALUES (2);
+COMMIT;
+--connection node_2a
+INSERT INTO t4 VALUES (2);
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 2 FROM t4;
+
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+--replace_column 2 <Pos> 5 <End_log_pos>
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+
+--connection node_2
+SELECT COUNT(*) = 2 FROM t4;
+
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+--replace_column 2 <Pos> 5 <End_log_pos>
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+
+DROP TABLE t1,t2,t3,t4;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_many_fragments.test b/mysql-test/suite/galera_sr/t/galera_sr_many_fragments.test
new file mode 100644
index 00000000000..9b8dae9d8e3
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_many_fragments.test
@@ -0,0 +1,53 @@
+#
+# Test the replication and subsequent cleanup of a large number of small transaction fragments
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(512)) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 (f2) SELECT REPEAT('x', 512) FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_2
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 10000 FROM t1;
+
+--connection node_1
+ROLLBACK;
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+--let $wsrep_provider_options_node_2 = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options'`
+SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT10M';
+SET SESSION wsrep_sync_wait = 7;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+--disable_query_log
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node_2';
+
+--connection node_1
+SET SESSION wsrep_sync_wait = 0;
+--let $wsrep_provider_options_node_1 = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME = 'wsrep_provider_options'`
+SET GLOBAL wsrep_provider_options = 'repl.causal_read_timeout=PT10M';
+SET SESSION wsrep_sync_wait = 7;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+--disable_query_log
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node_1';
+
+DROP TABLE ten;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_myisam.test b/mysql-test/suite/galera_sr/t/galera_sr_myisam.test
new file mode 100644
index 00000000000..b037f817610
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_myisam.test
@@ -0,0 +1,29 @@
+#
+# Test that the basic MyISAM replication works even with SR enabled
+# We basically check that the data arrived on the slave and that there
+# were no assertions.
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 TEXT) ENGINE=MyISAM;
+
+--let $wsrep_replicate_myisam_orig = `SELECT @@wsrep_replicate_myisam`
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET GLOBAL wsrep_replicate_myisam = TRUE;
+
+INSERT INTO t1 VALUES (REPEAT('x', 65535));
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+SELECT LENGTH(f1) = 65535 FROM t1;
+
+DROP TABLE t1;
+
+--connection node_1
+--disable_query_log
+--eval SET GLOBAL wsrep_replicate_myisam = $wsrep_replicate_myisam_orig;
+--enable_query_log
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.cnf b/mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.cnf
new file mode 100644
index 00000000000..574ae28b54a
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.cnf
@@ -0,0 +1,11 @@
+!include ../galera_2nodes.cnf
+
+# We do not set mysqldump-related SST options here because doing so on startup
+# causes the first MTR connection to be forefully dropped by Galera, which in turn confuses MTR
+
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true'
+
+[mysqld.2]
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true'
+
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.test b/mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.test
new file mode 100644
index 00000000000..e5cf5e43b44
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_mysqldump_sst.test
@@ -0,0 +1,79 @@
+#
+# Test mysqldump SST on slave if SR transaction is in progress
+#
+
+--source include/big_test.inc
+--source include/galera_cluster.inc
+
+--source suite/galera/include/galera_sst_set_mysqldump.inc
+
+--connection node_1
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 CHAR(255)) ENGINE=InnoDB;
+
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size = 1000;
+START TRANSACTION;
+# Insert 1000 rows
+INSERT INTO t1 (f2) SELECT REPEAT('x', 255) FROM ten AS a1, ten AS a2, ten AS a3;
+
+# Update 1000 rows
+UPDATE t1 SET f2 = REPEAT('y', 255);
+
+# Wait for SR replication to kick in
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+# Restart node #2
+
+--connection node_2
+--let $MYSQLD2_DATADIR = `SELECT @@datadir`
+
+--echo Shutting down server ...
+--source include/shutdown_mysqld.inc
+
+# Force SST
+--remove_file $MYSQLD2_DATADIR/grastate.dat
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+--connection node_2
+--echo Starting server ...
+--let $restart_parameters = --wsrep_sst_auth=sst:sst --wsrep_sst_method=mysqldump --wsrep-sst-receive-address=127.0.0.1:$NODE_MYPORT_2
+--source include/start_mysqld.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
+--source include/wait_condition.inc
+
+# Check that node #2 is caught up with the SR transaction that is still in progress
+--connection node_2
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+# Finalize transaction
+--connection node_1
+UPDATE t1 SET f2 = REPEAT('z', 255);
+COMMIT;
+
+# Confirm proper replication of entire transaction to node #2
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 1000 FROM t1;
+SELECT COUNT(*) = 1000 FROM t1 WHERE f2 = REPEAT('z', 255);
+
+DROP TABLE t1;
+DROP TABLE ten;
+
+--connection node_1
+# galera_sst_restore.inc uses DROP USER internally which is incompatible
+# with SR, need to disable SR before that.
+SET SESSION wsrep_trx_fragment_size=0;
+--source suite/galera/include/galera_sst_restore.inc
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_parallel_apply.test b/mysql-test/suite/galera_sr/t/galera_sr_parallel_apply.test
new file mode 100644
index 00000000000..83a7acbe3e0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_parallel_apply.test
@@ -0,0 +1,59 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test SR with parallel apply
+#
+
+--connection node_2
+--let $wsrep_slave_threads_orig = `SELECT @@wsrep_slave_threads`
+SET GLOBAL wsrep_slave_threads = 5;
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT, f2 INTEGER) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);
+INSERT INTO t1 (f2) VALUES (1);
+--send INSERT INTO t1 (f2) VALUES (1);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);
+INSERT INTO t1 (f2) VALUES (2);
+--send INSERT INTO t1 (f2) VALUES (2);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1 WHERE f2 = 1;
+--source include/wait_condition.inc
+
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1 WHERE f2 = 2;
+--source include/wait_condition.inc
+
+--connection node_1
+--reap
+COMMIT;
+
+--connection node_1a
+--reap
+ROLLBACK;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1
+DROP TABLE t1;
+
+--connection node_2
+--disable_query_log
+--eval SET GLOBAL wsrep_slave_threads = $wsrep_slave_threads_orig;
+--enable_query_log
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_rollback.test b/mysql-test/suite/galera_sr/t/galera_sr_rollback.test
new file mode 100644
index 00000000000..33a318f8ae3
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_rollback.test
@@ -0,0 +1,76 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test that ROLLBACK works correctly with streaming replication
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) >= 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+ROLLBACK;
+
+#
+# After ROLLBACK, the table on node #2 should be empty
+#
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM t1;
+--source include/wait_condition.inc
+
+#
+# It should be possible to re-insert the values we just rolled back
+#
+
+--connection node_1
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) >= 9 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+COMMIT;
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) >= 10 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_rollback_retry.test b/mysql-test/suite/galera_sr/t/galera_sr_rollback_retry.test
new file mode 100644
index 00000000000..c6c443a0828
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_rollback_retry.test
@@ -0,0 +1,55 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test that a SR transaction that was just ROLLBACKed on one node can be
+# run against another node without any conflicts
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+ROLLBACK;
+
+#
+# After ROLLBACK, the table on node #2 should be empty
+#
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 0 FROM t1;
+--source include/wait_condition.inc
+
+#
+# It should be possible to reissue the same transaction against node #2
+#
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+COMMIT;
+
+SELECT COUNT(*) = 5 FROM t1;
+
+--connection node_1
+SELECT COUNT(*) = 5 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_rollback_savepoint.test b/mysql-test/suite/galera_sr/t/galera_sr_rollback_savepoint.test
new file mode 100644
index 00000000000..93ff7a948c4
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_rollback_savepoint.test
@@ -0,0 +1,51 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test that ROLLBACK TO SAVEPOINT works correctly with streaming replication
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+SAVEPOINT s1;
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 5 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+ROLLBACK TO SAVEPOINT s1;
+
+INSERT INTO t1 VALUES (21, 'c');
+INSERT INTO t1 VALUES (22, 'c');
+INSERT INTO t1 VALUES (23, 'c');
+INSERT INTO t1 VALUES (24, 'c');
+INSERT INTO t1 VALUES (25, 'c');
+
+--connection node_2
+SELECT COUNT(*) = 5 FROM t1 WHERE f2 = 'a';
+SELECT COUNT(*) = 0 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 5 FROM t1 WHERE f2 = 'c';
+
+--connection node_1
+COMMIT;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_rollback_statement.test b/mysql-test/suite/galera_sr/t/galera_sr_rollback_statement.test
new file mode 100644
index 00000000000..ba981c8c9a3
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_rollback_statement.test
@@ -0,0 +1,59 @@
+#
+# Test the case where a statement is rolled back due to an error while
+# Streaming Replication is in effect. We construct an INSERT ... SELECT
+# statement that will fail with a duplicate key error towards the end of
+# the statement, after a portion has already been replicated via SR.
+#
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR) ENGINE=InnoDB;
+
+INSERT INTO t1 VALUES (1, 'a'), (2, 'a'), (3, 'a');
+
+# This poison value is used to cause the INSERT ... SELECT below to fail
+INSERT INTO t2 VALUES (3, 'b');
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+--disable_query_log
+--let $last_committed_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+--enable_query_log
+
+--error ER_DUP_ENTRY
+INSERT INTO t2 SELECT * FROM t1;
+
+#
+# We should see three fragments replicated: Rows 1, 2 and rollback fragment.
+#
+--disable_query_log
+--let $last_committed_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed'`
+--eval SELECT $last_committed_after - $last_committed_before AS last_committed_diff
+--enable_query_log
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 1 FROM t2;
+--source include/wait_condition.inc
+
+# Cluster continues to operate after the implicit ROLLBACK;
+--connection node_1
+INSERT INTO t2 VALUES (1, 'c');
+
+--connection node_2
+INSERT INTO t2 VALUES (2, 'c');
+
+--connection node_1
+SELECT * FROM t2;
+
+--connection node_2
+SELECT * FROM t2;
+
+--connection node_1
+
+SET SESSION wsrep_trx_fragment_size = DEFAULT;
+
+DROP TABLE t1;
+DROP TABLE t2;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_shutdown_master.test b/mysql-test/suite/galera_sr/t/galera_sr_shutdown_master.test
new file mode 100644
index 00000000000..3f7407fe536
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_shutdown_master.test
@@ -0,0 +1,53 @@
+#
+# Shut down master (node #2) while an SR transaction is in progress
+#
+
+--source include/galera_cluster.inc
+
+# Save original auto_increment_offset values.
+--let $node_1=node_1
+--let $node_2=node_2
+--source ../galera/include/auto_increment_offset_save.inc
+
+--connection node_2
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE = InnoDB;
+
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1),(2),(3);
+
+--connection node_1
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+--connection node_2
+--source include/shutdown_mysqld.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+
+# Confirm that SR table on slave is empty
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_2
+--source include/start_mysqld.inc
+
+# SR table on master should be empty too
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+# Confirm that the INSERT can be re-issued
+INSERT INTO t1 VALUES (1),(2),(3);
+
+--connection node_1
+SELECT COUNT(*) = 3 FROM t1;
+
+DROP TABLE t1;
+
+--connection node_2
+CALL mtr.add_suppression("WSREP: Failed to replicate rollback fragment for ");
+
+# Restore original auto_increment_offset values.
+--source ../galera/include/auto_increment_offset_restore.inc
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_shutdown_slave.test b/mysql-test/suite/galera_sr/t/galera_sr_shutdown_slave.test
new file mode 100644
index 00000000000..fa2df242ccc
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_shutdown_slave.test
@@ -0,0 +1,63 @@
+#
+# Shut down slave (node #2) while an SR transaction is in progress
+#
+
+--source include/galera_cluster.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE = InnoDB;
+
+# We start two transactions on the master so that we can commit one while the slave
+# is down and commit the other after the slave has rejoined
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (11),(12),(13);
+
+--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1b
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (21),(22),(23);
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+--source include/wait_condition.inc
+
+--source include/shutdown_mysqld.inc
+
+--connection node_1
+--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+
+# Commit one transaction while the slave is down
+--connection node_1a
+INSERT INTO t1 VALUES (14),(15),(16);
+COMMIT;
+
+# Restart slave
+--connection node_2
+--source include/start_mysqld.inc
+
+# Confirm SR table on slave has entries
+SELECT COUNT(*) > 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 6 FROM t1 WHERE f1 IN (11,12,13,14,15,16);
+
+# Commit the second transaction on master after the slave has rejoined
+--connection node_1b
+INSERT INTO t1 VALUES (24),(25),(26);
+COMMIT;
+
+# Confirm that SR table on slave is empty
+--connection node_2
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+SELECT COUNT(*) = 12 FROM t1;
+
+# SR table on master should be empty too
+--connection node_1
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_small_gcache.cnf b/mysql-test/suite/galera_sr/t/galera_sr_small_gcache.cnf
new file mode 100644
index 00000000000..c8e17436e71
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_small_gcache.cnf
@@ -0,0 +1,6 @@
+!include ../galera_2nodes.cnf
+[mysqld.1]
+wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=16K'
+[mysqld.2]
+wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=16K'
+
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_small_gcache.test b/mysql-test/suite/galera_sr/t/galera_sr_small_gcache.test
new file mode 100644
index 00000000000..403b44286d9
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_small_gcache.test
@@ -0,0 +1,21 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# A simple test with a very low value for gcache.size - 16K
+#
+
+--connection node_1
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 SELECT 1 FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_2
+SELECT COUNT(*) = 10000 FROM t1;
+
+--connection node_1
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_table_contents.test b/mysql-test/suite/galera_sr/t/galera_sr_table_contents.test
new file mode 100644
index 00000000000..92d29fe4ca2
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_table_contents.test
@@ -0,0 +1,49 @@
+#
+# This test dumps the contents of the SR table under various circumstances
+#
+
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2),(3);
+
+CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1),(2),(3);
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--echo
+--echo Start of Simple Insert
+INSERT INTO t1 VALUES (4);
+--source suite/galera/include/galera_dump_sr_table.inc
+--echo End of Simple Insert
+--echo
+ROLLBACK;
+
+--echo Start of Multi-row Update
+UPDATE t1 SET f1 = f1 + 10;
+--source suite/galera/include/galera_dump_sr_table.inc
+--echo End of Multi-row Update
+--echo
+ROLLBACK;
+
+--echo Start of Multi-table Update
+UPDATE t1, t2 SET t1.f1 = t1.f1 + 100, t2.f1 = t2.f1 + 100;
+--source suite/galera/include/galera_dump_sr_table.inc
+--echo End of Multi-table Update
+--echo
+ROLLBACK;
+
+--echo Start of Savepoint
+INSERT INTO t1 VALUES (1000);
+SAVEPOINT X;
+INSERT INTO t1 VALUES (2000);
+ROLLBACK TO SAVEPOINT X;
+--source suite/galera/include/galera_dump_sr_table.inc
+--echo End of Savepoint
+--echo
+ROLLBACK;
+
+DROP TABLE t1, t2;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_transaction_replay.test b/mysql-test/suite/galera_sr/t/galera_sr_transaction_replay.test
new file mode 100644
index 00000000000..f44d67e5c8c
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_transaction_replay.test
@@ -0,0 +1,260 @@
+#
+# This test tests the operation of SR transaction replay. If a
+# potentially conflicting remote transaction arrives at
+# just the right time during the commit of a local transaction,
+# the local transaction will be aborted and replayed.
+#
+# This test is divided in two sections:
+# 1) Test the scenario where the last fragment does not have write set
+# payload, just commit flag is replicated
+# 2) Test the scenario where the last fragment has write set payload
+# and commit flag
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+--source include/galera_have_debug_sync.inc
+
+# Control connection for manipulating galera sync points
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_1
+
+--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1));
+
+#########################################################################
+#
+# 1) Replay without commit fragment write set payload
+#
+#########################################################################
+
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+
+--connection node_1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
+
+#
+# Block the commit from node_2
+#
+--connection node_1a
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+#
+# Issue conflicting UPDATE from node_2 and wait until it hits the
+# apply monitor (but does not apply yet)
+#
+--connection node_2
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+
+--connection node_1a
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+
+#
+# Set a new sync point to block in local monitor on node_1 commit
+#
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = local_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
+
+#
+# Send the commit on node_1
+#
+--connection node_1
+--send COMMIT
+
+#
+# Wait until commit reaches sync point
+#
+--connection node_1a
+SET SESSION wsrep_sync_wait = 0;
+--let $galera_sync_point = apply_monitor_slave_enter_sync local_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+
+#
+# Release conflicting slave transaction and wait until it has BF
+# aborted pending COMMIT
+#
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+
+--let $galera_sync_point = abort_trx_end local_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+
+#
+# Release both threads, local thread will now replay
+#
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+
+--let $galera_sync_point = local_monitor_master_enter_sync
+--source include/galera_signal_sync_point.inc
+
+#
+# Commit must succeed
+#
+--connection node_1
+--reap
+
+
+#
+# Check the outcome and that wsrep schema SR table is empty
+#
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+#
+# wsrep_local_replays has increased by 1
+#
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays;
+--enable_query_log
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DELETE FROM t1;
+
+#########################################################################
+#
+# 2) Replay with commit fragment write set payload
+#
+#########################################################################
+
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+
+--connection node_1
+SET AUTOCOMMIT=ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+
+#
+# Do first update SR on
+#
+UPDATE t1 SET f2 = 'x' WHERE f1 = 1;
+
+#
+# Disable SR for following statements
+#
+SET SESSION wsrep_trx_fragment_size = 0;
+
+UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
+SELECT * FROM t1 WHERE f1 = 2 FOR UPDATE;
+
+#
+# Block the commit from node_2
+#
+--connection node_1a
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_set_sync_point.inc
+
+#
+# Issue conflicting UPDATE from node_2 and wait until it hits the
+# apply monitor (but does not apply yet)
+#
+--connection node_2
+UPDATE t1 SET f2 = 'c' WHERE f1 = 2;
+
+--connection node_1a
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_wait_sync_point.inc
+
+#
+# Set a new sync point to block in local monitor on node_1 commit
+#
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = local_monitor_master_enter_sync
+--source include/galera_set_sync_point.inc
+
+#
+# Send the commit on node_1
+#
+--connection node_1
+--send COMMIT
+
+#
+# Wait until commit reaches sync point
+#
+--connection node_1a
+SET SESSION wsrep_sync_wait = 0;
+--let $galera_sync_point = apply_monitor_slave_enter_sync local_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+
+#
+# Release conflicting slave transaction and wait until it has BF
+# aborted pending COMMIT
+#
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_set_sync_point.inc
+
+--let $galera_sync_point = apply_monitor_slave_enter_sync
+--source include/galera_signal_sync_point.inc
+
+--let $galera_sync_point = abort_trx_end local_monitor_master_enter_sync
+--source include/galera_wait_sync_point.inc
+
+#
+# Release both threads, local thread will now replay
+#
+--source include/galera_clear_sync_point.inc
+--let $galera_sync_point = abort_trx_end
+--source include/galera_signal_sync_point.inc
+
+--let $galera_sync_point = local_monitor_master_enter_sync
+--source include/galera_signal_sync_point.inc
+
+#
+# Commit must succeed
+#
+--connection node_1
+--reap
+
+
+#
+# Check the outcome and that wsrep schema SR table is empty
+#
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+#
+# wsrep_local_replays has increased by 1
+#
+--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'`
+--disable_query_log
+--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 2 AS wsrep_local_replays;
+--enable_query_log
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'b';
+SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+DELETE FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_unit_statements.test b/mysql-test/suite/galera_sr/t/galera_sr_unit_statements.test
new file mode 100644
index 00000000000..0cf05765838
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_unit_statements.test
@@ -0,0 +1,54 @@
+#
+# Test wsrep_fragment_unit = statements
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 3;
+SET SESSION wsrep_trx_fragment_unit = 'statements';
+
+--connection node_1
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+
+# Expect noting is replicated yet, so far we have 2 statements
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+INSERT INTO t1 VALUES (2);
+
+# Expect 2 rows in t1 and 1 fragment in SR table
+--connection node_2
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+ --connection node_1
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+# Expect 5 rows in t1 and 2 fragments in SR table
+--connection node_2
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+COMMIT;
+
+# Expect 5 rows in t1 and empty SR table
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) FROM t1;
+SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_v1_row_events-master.opt b/mysql-test/suite/galera_sr/t/galera_sr_v1_row_events-master.opt
new file mode 100644
index 00000000000..0b5f8bf7104
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_v1_row_events-master.opt
@@ -0,0 +1 @@
+--log-bin-use-v1-row-events=1 --wsrep-trx-fragment-size=1
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_v1_row_events.test b/mysql-test/suite/galera_sr/t/galera_sr_v1_row_events.test
new file mode 100644
index 00000000000..d3d4d2d0c14
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_v1_row_events.test
@@ -0,0 +1,27 @@
+#
+# Test that Galera SR continues to run even with --log-bin-use-v1-row-events=1
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+SELECT COUNT(*) = 1 FROM t1;
+
+--connection node_1
+COMMIT;
+
+SET AUTOCOMMIT=ON;
+UPDATE t1 SET f1 = 2 WHERE f1 = 1;
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1 WHERE f1 = 2;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_ws_size.test b/mysql-test/suite/galera_sr/t/galera_sr_ws_size.test
new file mode 100644
index 00000000000..98f6e796ef6
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_ws_size.test
@@ -0,0 +1,70 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test that SR transaction is cumulatively allowed to grow beyond repl.max_ws_size
+# if individual fragements are below that size
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 VARCHAR(254)) ENGINE=InnoDB;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
+
+SET SESSION wsrep_trx_fragment_size = 512;
+SET GLOBAL wsrep_provider_options='repl.max_ws_size=4096';
+
+#
+# Create a transaction larger than repl.max_ws_size
+#
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1;
+
+#
+# We expect that the transaction can proceed successfully
+#
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 10 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+# Commit succeeds
+COMMIT;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT COUNT(*) = 100 FROM t1;
+
+#
+# Cleanup
+#
+
+DROP TABLE t1;
+DROP TABLE ten;
+
+--connection node_1
+--disable_query_log
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_orig';
+--enable_query_log
+
+call mtr.add_suppression('WSREP: transaction size limit.*');
+call mtr.add_suppression('WSREP: rbr write fail.*');
+call mtr.add_suppression('WSREP: Maximum writeset size exceeded by.*');
+call mtr.add_suppression('WSREP: transaction size exceeded.*');
diff --git a/mysql-test/suite/galera_sr/t/galera_sr_ws_size2.test b/mysql-test/suite/galera_sr/t/galera_sr_ws_size2.test
new file mode 100644
index 00000000000..2b9bc4819b8
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_sr_ws_size2.test
@@ -0,0 +1,62 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Test that if wsrep_trx_fragment_size > repl.max_ws_size, no SR takes place and
+# the transaction is properly aborted.
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER AUTO_INCREMENT PRIMARY KEY, f2 VARCHAR(254)) ENGINE=InnoDB;
+CREATE TABLE ten (f1 INTEGER);
+INSERT INTO ten VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
+
+--let $wsrep_trx_fragment_size_orig = `SELECT @@wsrep_trx_fragment_size`
+--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
+
+SET SESSION wsrep_trx_fragment_size = 256;
+SET GLOBAL wsrep_provider_options='repl.max_ws_size=128';
+
+#
+# Create a transaction larger than repl.max_ws_size
+#
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+--error ER_ERROR_DURING_COMMIT,ER_ERROR_ON_WRITE,ER_BINLOG_ROW_LOGGING_FAILED
+INSERT INTO t1 (f2) SELECT REPEAT('x', 254) FROM ten AS a1, ten AS a2;
+
+#
+# We expect that the transaction can not complete successfully
+#
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--sleep 2
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+SELECT COUNT(*) = 0 FROM t1;
+
+#
+# Cleanup
+#
+--connection node_1
+--disable_query_log
+--eval SET GLOBAL wsrep_trx_fragment_size = $wsrep_trx_fragment_size_orig;
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_orig';
+--enable_query_log
+
+DROP TABLE t1;
+DROP TABLE ten;
+
+call mtr.add_suppression('WSREP: SR rollback replication failure.*');
+call mtr.add_suppression('WSREP: transaction size limit.*');
+call mtr.add_suppression('WSREP: SR rbr write fail.*');
+call mtr.add_suppression('WSREP: Maximum writeset size exceeded by.*');
+call mtr.add_suppression('WSREP: transaction size exceeded.*');
+call mtr.add_suppression('WSREP: fragment replication failed:');
+call mtr.add_suppression('WSREP: post commit failed for SR rollback');
+call mtr.add_suppression('WSREP: pre_commit for SR rollback returned 2, thd:*');
+call mtr.add_suppression('WSREP: wsrep_rollback failed to send SR ROLLBACK for *');
diff --git a/mysql-test/suite/galera_sr/t/galera_var_ignore_apply_errors_sr.test b/mysql-test/suite/galera_sr/t/galera_var_ignore_apply_errors_sr.test
new file mode 100644
index 00000000000..ea40f58db73
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/galera_var_ignore_apply_errors_sr.test
@@ -0,0 +1,38 @@
+#
+# Test option wsrep_ignore_apply_errors
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# Delete row that does not exist using SR transaction
+#
+
+--connection node_2
+SET GLOBAL wsrep_ignore_apply_errors = 2;
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER);
+INSERT INTO t1 VALUES (2);
+SET GLOBAL wsrep_on = OFF;
+INSERT INTO t1 VALUES (1);
+SET GLOBAL wsrep_on = ON;
+SET SESSION wsrep_trx_fragment_size = 1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (3);
+DELETE FROM t1 WHERE f1 = 1;
+DELETE FROM t1 WHERE f1 = 2;
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 1 FROM t1;
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+
+SET SESSION wsrep_trx_fragment_size = 0;
+DROP TABLE t1;
+
+SET GLOBAL wsrep_ignore_apply_errors = 7;
+CALL mtr.add_suppression("Slave SQL: Could not execute Delete_rows event");
+CALL mtr.add_suppression("Can't find record in 't1'"); \ No newline at end of file
diff --git a/mysql-test/suite/galera_sr/t/mdev_18631.cnf b/mysql-test/suite/galera_sr/t/mdev_18631.cnf
new file mode 100644
index 00000000000..6fac754e21b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mdev_18631.cnf
@@ -0,0 +1,23 @@
+!include ../galera_2nodes.cnf
+
+[mysqld]
+log-bin
+log-slave-updates
+log-bin-compress
+
+[mysqld.1]
+#gtid_domain_id=1
+wsrep_gtid_mode=ON
+# Maximum allowed wsrep_gtid_domain_id.
+wsrep_gtid_domain_id=4294967295
+wsrep_trx_fragment_size=1
+wsrep_trx_fragment_unit='ROWS'
+
+[mysqld.2]
+#gtid_domain_id=2
+wsrep_gtid_mode=ON
+wsrep_gtid_domain_id=4294967295
+wsrep_trx_fragment_size=1
+wsrep_trx_fragment_unit='ROWS'
+#wsrep_gitd_domain_id value will be inherited from donor node (mysqld.1)
+#wsrep_gitd_domain_id=X
diff --git a/mysql-test/suite/galera_sr/t/mdev_18631.test b/mysql-test/suite/galera_sr/t/mdev_18631.test
new file mode 100644
index 00000000000..b6a0d85bbd4
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mdev_18631.test
@@ -0,0 +1,24 @@
+#
+# Test that streaming replication works with wsrep_gtid_mode=ON.
+# The configuration is provided in mdev_18631.cnf.
+#
+
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--echo # On node_1
+--connection node_1
+CREATE TABLE t1(f1 INT PRIMARY KEY) ENGINE=INNODB;
+
+INSERT INTO t1 VALUES (1), (2), (3);
+
+--connection node_2
+SELECT * FROM t1;
+
+--connection node_1
+SELECT * FROM t1;
+
+DROP TABLE t1;
+
+--source include/galera_end.inc
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep#215.test b/mysql-test/suite/galera_sr/t/mysql-wsrep#215.test
new file mode 100644
index 00000000000..e3e7411c7ea
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep#215.test
@@ -0,0 +1,176 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+
+#
+# Test the following sequence of events:
+#
+# 1. Node #1 begins a transaction
+# 2. Node #2 performs a conflicting insert
+# 3. Node #1 attempts to SR-replicate a conflicting transaction
+#
+
+# to sync node_1 appliers
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) ENGINE=InnoDB;
+SET SESSION wsrep_trx_fragment_size = 2;
+SET SESSION wsrep_trx_fragment_unit = 'statements';
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--let $expected_cert_failures = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'`
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_1a
+SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_apply_cb';
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_2
+INSERT INTO t1 VALUES (1);
+
+--connection node_1a
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM t1;
+--send INSERT INTO t1 VALUES (1);
+
+--connection node_1a
+# Wait for the above INSERT to fail certification
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = $expected_cert_failures FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'
+--source include/wait_condition.inc
+
+SET GLOBAL DEBUG_DBUG = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+--reap
+
+COMMIT;
+
+SELECT COUNT(*) = 1 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1a
+SET DEBUG_SYNC = 'RESET';
+
+#
+# Similar test with BYTES unit
+#
+--connection node_1
+TRUNCATE TABLE t1;
+
+SET SESSION wsrep_trx_fragment_size = 10;
+SET SESSION wsrep_trx_fragment_unit = 'bytes';
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--let $expected_cert_failures = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'`
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_1a
+SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_apply_cb';
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_2
+INSERT INTO t1 VALUES (1);
+
+--connection node_1a
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+SELECT COUNT(*) = 0 FROM t1;
+--send INSERT INTO t1 VALUES (1)
+
+# Wait for the above INSERT to fail certification
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = $expected_cert_failures FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'
+--source include/wait_condition.inc
+
+SET GLOBAL DEBUG_DBUG = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+--reap
+ROLLBACK;
+
+SELECT * FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT * FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1a
+SET DEBUG_SYNC = 'RESET';
+
+#
+# One more test with BYTES unit, but now fragment size is adjusted so
+# that second insert should trigger fragment replication.
+# Currently 200 bytes is good choice here, but this may change with
+# future MySQL versions.
+# => If this test fails after some MySQL merge, check if frgament size
+# needs to be tuned to spot at second insert statement.
+#
+--connection node_1
+TRUNCATE TABLE t1;
+
+SET SESSION wsrep_trx_fragment_size = 200;
+SET SESSION wsrep_trx_fragment_unit = 'bytes';
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+--let $expected_cert_failures = `SELECT VARIABLE_VALUE + 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'`
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+--connection node_1a
+SET GLOBAL DEBUG_DBUG = 'd,sync.wsrep_apply_cb';
+SET SESSION wsrep_sync_wait = 0;
+
+--connection node_2
+INSERT INTO t1 VALUES (1);
+
+--connection node_1a
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+INSERT INTO t1 VALUES (1);
+--send INSERT INTO t1 VALUES (2)
+
+# Wait for the above INSERT to fail certification
+--connection node_1a
+--let $wait_condition = SELECT VARIABLE_VALUE = $expected_cert_failures FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_cert_failures'
+--source include/wait_condition.inc
+
+SET GLOBAL DEBUG_DBUG = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+--reap
+COMMIT;
+
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_2
+SELECT COUNT(*) = 1 FROM t1;
+SELECT COUNT(*) = 0 FROM mysql.wsrep_streaming_log;
+
+--connection node_1a
+DROP TABLE t1;
+SET DEBUG_SYNC = 'RESET';
+
+--connection node_2
+CALL mtr.add_suppression("WSREP: Could not find applier context for");
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#136-master.opt b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#136-master.opt
new file mode 100644
index 00000000000..03fcb5d040d
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#136-master.opt
@@ -0,0 +1 @@
+--log-slave-updates --log-bin
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#136.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#136.test
new file mode 100644
index 00000000000..06e56d3c9cd
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#136.test
@@ -0,0 +1,41 @@
+# SR transactions are not binlogged #136
+
+--source include/galera_cluster.inc
+
+--connection node_1
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+
+--connection node_2
+SET GLOBAL wsrep_on=OFF;
+RESET MASTER;
+SET GLOBAL wsrep_on=ON;
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2);
+COMMIT;
+
+SET SESSION wsrep_trx_fragment_size = 0;
+INSERT INTO t1 VALUES (3),(4);
+COMMIT;
+
+--connection node_1
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+--replace_column 2 <Pos> 5 <End_log_pos>
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+
+--connection node_2
+# Wait for all updates to arrive before dumping binlog
+SELECT COUNT(*) = 4 FROM t1;
+
+--replace_regex /xid=[0-9]+/xid=###/ /table_id: [0-9]+/table_id: ###/
+--replace_column 2 <Pos> 5 <End_log_pos>
+SHOW BINLOG EVENTS IN 'mysqld-bin.000001' FROM 256;
+
+--connection node_1
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#138.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#138.test
new file mode 100644
index 00000000000..3694dc9ad43
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#138.test
@@ -0,0 +1,25 @@
+# SR: two identical transactions have different value for the WSREP_FLAG_PA_UNSAFE flag
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1),(2);
+
+--connection node_2
+SELECT flags FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+ROLLBACK;
+INSERT INTO t1 VALUES (3),(4);
+
+--connection node_2
+SELECT flags FROM mysql.wsrep_streaming_log;
+
+--connection node_1
+ROLLBACK;
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#14.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#14.test
new file mode 100644
index 00000000000..deeb890fa0b
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#14.test
@@ -0,0 +1,21 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+
+COMMIT;
+
+DROP TABLE t1;
+
+--connection node_2
+--source include/galera_wait_ready.inc
+
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#148.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#148.test
new file mode 100644
index 00000000000..e0a443061df
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#148.test
@@ -0,0 +1,62 @@
+# statement rollback for SR transaction causes slave crash for inconsistency
+
+# We test the following:
+# 1. Create a transaction that is blocked by an SR transaction
+# 2. Force the SR transaction to have a statement rollback
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/have_debug_sync.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+CREATE TABLE t2 (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (6),(7),(8),(9),(10),(1);
+
+--connection node_2
+SET GLOBAL wsrep_slave_threads = 2;
+SET GLOBAL debug_dbug = 'd,sync.wsrep_apply_cb';
+
+# Begin SR transaction
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
+
+# Begin non-SR transaction that will block waiting for the SR transaction
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connection node_1a
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+--send INSERT INTO t1 VALUES (1), (2), (3), (4), (5);
+
+# Cause the SR transaction to fail with a duplicate key error
+--connection node_1
+--send INSERT INTO t1 SELECT * FROM t2;
+
+# Continue and commit the non-SR transaction.
+--connection node_1a
+--reap
+INSERT INTO t1 VALUES (6), (7), (8), (9), (10);
+COMMIT;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK,ER_DUP_ENTRY
+--reap
+
+--connection node_2
+SET GLOBAL wsrep_slave_threads = 1;
+SET GLOBAL debug_dbug = '';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+SET DEBUG_SYNC='now SIGNAL signal.wsrep_apply_cb';
+
+SELECT COUNT(*) = 10 FROM t1;
+
+DROP TABLE t1;
+DROP TABLE t2;
+
+SET DEBUG_SYNC = RESET;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#15.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#15.test
new file mode 100644
index 00000000000..4aaff058b30
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#15.test
@@ -0,0 +1,17 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (id INT) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+
+COMMIT;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.inc b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.inc
new file mode 100644
index 00000000000..7f13afa3c47
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.inc
@@ -0,0 +1,109 @@
+# --source include/galera_cluster.inc
+# --source include/have_debug_sync.inc
+
+#
+# This test attempts to catch a race condition between autocommit
+# transaction and transaction which is rolling back due to
+# deadlock.
+#
+# Test outline:
+# * Trx 1a makes updates
+# * SR trx 1b writes a row 3, then makes updates
+# * AC trx 1c will attempt to write row 3 and will wait for lock
+# held by 1b
+# * Sync point is set for 1b to delay SR rollback
+# * SR trx 1b makes an update which makes it conflict with trx 1a
+# * Slave shows BF - BF conflict and fails in applying write event
+
+--connection node_1
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 VARCHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1, 'x'), (2, 'x'), (4, 'x'), (5, 'x');
+
+# --connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+# --connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
+# --connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1
+
+--connection node_1a
+START TRANSACTION;
+
+UPDATE t1 SET f2 = 'a' WHERE f1 = 1;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 4;
+UPDATE t1 SET f2 = 'a' WHERE f1 = 5;
+
+
+--connection node_1b
+START TRANSACTION;
+SET SESSION wsrep_trx_fragment_size = 1;
+INSERT INTO t1 VALUES (3, 'b');
+UPDATE t1 SET f2 = 'b' WHERE f1 = 2;
+
+--connection node_2
+SELECT * FROM t1;
+
+# Will block, waiting for 1b
+--connection node_1c
+SET AUTOCOMMIT=ON;
+--send INSERT INTO t1 VALUES (3, 'c')
+
+--connection node_2
+SELECT * FROM t1;
+
+# Will block, waiting for 1b
+--connection node_1a
+--send UPDATE t1 SET f2 = 'a' WHERE f1 = 2
+
+--connection node_1
+--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER != 'system user' AND STATE = 'Updating';
+--source include/wait_condition.inc
+
+# Will deadlock
+--connection node_1b
+SET DEBUG_SYNC = 'wsrep_before_SR_rollback SIGNAL wait WAIT_FOR continue';
+--send UPDATE t1 SET f2 = 'b' WHERE f1 = 1
+
+# Wait until 1b hits rollback
+--connection node_1
+SET DEBUG_SYNC = 'now WAIT_FOR wait';
+
+# UPDATE 12.06.2016: as of recent wsrep API changes, rollbacking thread no
+# longer queues ROLLBACKs and blocks on ROLLBACK replication before performing
+# the actual rollback. As a result this test is moot as both node_1a and node_1c
+# connections are hanging now until sync point is released. Thus sync point
+# release had to be moved above to release the connections. However it is not
+# impossible that further changes in the code may reintroduce the race, so
+# leaving the test as close to original as possible.
+#
+# --connection node_1a
+# --reap
+# COMMIT;
+#
+# --connection node_1c
+# --reap
+#
+# UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+
+--connection node_1
+SET DEBUG_SYNC = 'now SIGNAL continue';
+
+--connection node_1c
+--reap
+
+UPDATE t1 SET f2 = 'x' WHERE f1 = 3;
+
+--connection node_1a
+--reap
+COMMIT;
+
+--connection node_1b
+--error ER_LOCK_DEADLOCK
+--reap
+
+--connection node_1
+SELECT * FROM t1;
+--connection node_2
+SELECT * FROM t1;
+
+--connection node_1
+SET DEBUG_SYNC = 'RESET';
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.test
new file mode 100644
index 00000000000..85d501288b0
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#165.test
@@ -0,0 +1,41 @@
+--source include/galera_cluster.inc
+--source include/have_debug_sync.inc
+
+#
+# This test attempts to catch a race condition between autocommit
+# transaction and transaction which is rolling back due to
+# deadlock.
+#
+# Since it is trying to catch a race condition which may not reliably
+# occur, several runs are necessary for certainty. Hence the body of
+# the test was placed into the .inc file and sourced several times below
+#
+# Test outline:
+# * Trx 1a makes updates
+# * SR trx 1b writes a row 3, then makes updates
+# * AC trx 1c will attempt to write row 3 and will wait for lock
+# held by 1b
+# * Sync point is set for 1b to delay SR rollback
+# * SR trx 1b makes an update which makes it conflict with trx 1a
+# * Slave shows BF - BF conflict and fails in applying write event
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
+--connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1
+
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
+--source mysql-wsrep-features#165.inc
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#22.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#22.test
new file mode 100644
index 00000000000..544109dadee
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#22.test
@@ -0,0 +1,47 @@
+# Assertion `total_length + thd->wsrep_fragment_base == saved_pos' failed in wsrep_write_cache_inc() with ROLLBACK TO SAVEPOINT and SR
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER, f2 VARCHAR(10)) ENGINE=InnoDB;
+SET AUTOCOMMIT=OFF;
+SET SESSION wsrep_trx_fragment_size=1;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1, 'a');
+INSERT INTO t1 VALUES (2, 'a');
+INSERT INTO t1 VALUES (3, 'a');
+INSERT INTO t1 VALUES (4, 'a');
+INSERT INTO t1 VALUES (5, 'a');
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) = 5 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+SAVEPOINT s1;
+INSERT INTO t1 VALUES (11, 'b');
+INSERT INTO t1 VALUES (12, 'b');
+INSERT INTO t1 VALUES (13, 'b');
+INSERT INTO t1 VALUES (14, 'b');
+INSERT INTO t1 VALUES (15, 'b');
+
+--connection node_2
+--let $wait_condition = SELECT COUNT(*) = 10 FROM t1;
+--source include/wait_condition.inc
+
+--connection node_1
+ROLLBACK TO SAVEPOINT s1;
+
+INSERT INTO t1 VALUES (21, 'c');
+
+COMMIT;
+
+--connection node_1
+SELECT COUNT(*) = 6 FROM t1;
+
+
+--connection node_2
+SELECT COUNT(*) = 6 FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#27.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#27.test
new file mode 100644
index 00000000000..f9c09391f8f
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#27.test
@@ -0,0 +1,29 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+
+--connection node_2
+--sleep 2
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+
+--connection node_1
+--error ER_LOCK_DEADLOCK
+COMMIT;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#32-master.opt b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#32-master.opt
new file mode 100644
index 00000000000..a6ef074a120
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#32-master.opt
@@ -0,0 +1 @@
+--innodb-lock-wait-timeout=1
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#32.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#32.test
new file mode 100644
index 00000000000..72c7a7b5e82
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#32.test
@@ -0,0 +1,44 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# mysql-wsrep-features#32 Assertion `meta->gtid.seqno == wsrep_thd_trx_seqno(thd)' failed in wsrep_commit_cb with SR
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+--let $wait_condition = SELECT COUNT(*) > 0 FROM t1;
+--source include/wait_condition.inc
+
+SET AUTOCOMMIT=OFF;
+
+SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (9);
+INSERT INTO t1 VALUES (8);
+INSERT INTO t1 VALUES (7);
+INSERT INTO t1 VALUES (6);
+
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (5);
+ROLLBACK;
+
+--connection node_1
+COMMIT;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#35.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#35.test
new file mode 100644
index 00000000000..f63e1cca70c
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#35.test
@@ -0,0 +1,48 @@
+--source include/have_debug_sync.inc
+--source include/galera_cluster.inc
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
+
+# Block node #2's applier so that it is able to issue a conflicting INSERT before
+# node #1 INSERTs have been applied on it.
+
+--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
+SELECT COUNT(*) = 0 FROM t1;
+SET SESSION wsrep_sync_wait = 0;
+SET GLOBAL debug_dbug = '+d,sync.wsrep_apply_cb';
+
+--connection node_1
+SET SESSION wsrep_trx_fragment_size = 1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+
+INSERT INTO t1 VALUES (1);
+INSERT INTO t1 VALUES (2);
+INSERT INTO t1 VALUES (3);
+INSERT INTO t1 VALUES (4);
+INSERT INTO t1 VALUES (5);
+
+--connection node_2a
+SET SESSION debug_sync = "now WAIT_FOR sync.wsrep_apply_cb_reached";
+
+--connection node_2
+SET SESSION wsrep_sync_wait = 0;
+SELECT COUNT(*) = 0 FROM t1;
+--send INSERT INTO t1 VALUES (1);
+
+--connection node_1
+COMMIT;
+
+--connection node_2a
+SET GLOBAL debug_dbug = '';
+SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb";
+
+--connection node_2
+--error ER_DUP_ENTRY,ER_LOCK_DEADLOCK
+--reap
+ROLLBACK;
+
+DROP TABLE t1;
+
+--connection node_2a
+SET DEBUG_SYNC = "RESET";
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#8.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#8.test
new file mode 100644
index 00000000000..55210386044
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#8.test
@@ -0,0 +1,63 @@
+--source include/big_test.inc
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# InnoDB FULLTEXT indexes
+#
+
+SET SESSION wsrep_trx_fragment_size = 1;
+CREATE TABLE ten (f1 INTEGER) ENGINE=InnoDB;
+INSERT INTO ten VALUES (1),(2),(3),(4),(5),(6),(7),(8),(9),(10);
+
+#
+# Fulltext index creation causes the creation of multiple system tables
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 INT PRIMARY KEY AUTO_INCREMENT, f2 VARCHAR(100), FULLTEXT (f2)) ENGINE=InnoDB;
+
+--connection node_2
+SELECT COUNT(*) = 13 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE name LIKE 'test/%';
+
+#
+# Fulltext insertion causes a flurry of updates on those system tables
+#
+
+--connection node_1
+# Insert 10K rows
+INSERT INTO t1 (f2) SELECT 'foobarbaz' FROM ten AS a1, ten AS a2, ten AS a3, ten AS a4;
+
+--connection node_2
+SELECT COUNT(f2) = 10000 FROM t1 WHERE MATCH(f2) AGAINST ('foobarbaz');
+
+UPDATE t1 SET f2 = 'abcdefjhk';
+
+--connection node_1
+SELECT COUNT(f2) = 10000 FROM t1 WHERE MATCH(f2) AGAINST ('abcdefjhk');
+
+--connection node_2
+
+DROP TABLE t1;
+
+#
+# Same on a table with no PK
+#
+
+--connection node_1
+CREATE TABLE t1 (f1 VARCHAR(100), FULLTEXT (f1)) ENGINE=InnoDB;
+
+--connection node_2
+# We insert only 1K rows here, because updates without a PK are very slow
+INSERT INTO t1 (f1) SELECT 'foobarbaz' FROM ten AS a1, ten AS a2, ten AS a3;
+
+--connection node_1
+SELECT COUNT(f1) = 1000 FROM t1 WHERE MATCH(f1) AGAINST ('foobarbaz');
+
+UPDATE t1 SET f1 = 'abcdefjhk';
+
+--connection node_2
+SELECT COUNT(f1) = 1000 FROM t1 WHERE MATCH(f1) AGAINST ('abcdefjhk');
+
+DROP TABLE t1;
+DROP TABLE ten;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#9.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#9.test
new file mode 100644
index 00000000000..cbecf40fadf
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#9.test
@@ -0,0 +1,44 @@
+#
+# mysql-wsrep-features#9 Hang in galera::ReplicatorSMM::cert with Streaming Replication
+# when running the galera_kill_ddl.test test
+#
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+--source include/big_test.inc
+
+--connection node_1
+
+# Enable the master to continue running during the split-brain situation that
+# occurs when the slave is killed
+--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
+SET GLOBAL wsrep_provider_options = 'pc.ignore_sb=true';
+
+SET SESSION wsrep_trx_fragment_size = 1;
+
+CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB;
+
+--connection node_2
+--source include/kill_galera.inc
+
+--connection node_1
+ALTER TABLE t1 ADD COLUMN f2 INTEGER;
+
+--connection node_2
+--source include/start_mysqld.inc
+--source include/galera_wait_ready.inc
+
+--let $galera_connection_name = node_2a
+--let $galera_server_number = 2
+--source include/galera_connect.inc
+--connection node_2a
+
+SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='t1';
+SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
+
+--connection node_1
+--disable_query_log
+--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_orig';
+--enable_query_log
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#93.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#93.test
new file mode 100644
index 00000000000..442a7113537
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#93.test
@@ -0,0 +1,29 @@
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+#
+# This test sets a SAVEPOINT at the very beginning
+# of the transaction. When ROLLBACK TO SAVEPOINT is
+# issued, mysql performs a full rollback on SEs that
+# where not part of the transaction.
+# Test that SR transactions are rolled back, and
+# cleaned up properly in this case.
+#
+
+CREATE TABLE t1 (f1 INTEGER);
+SET SESSION WSREP_TRX_FRAGMENT_SIZE=1;
+
+START TRANSACTION;
+
+SAVEPOINT a;
+INSERT INTO t1 VALUES (1);
+ROLLBACK TO SAVEPOINT a;
+
+INSERT INTO t1 values (2);
+COMMIT;
+
+SELECT COUNT(*) = 0 from mysql.wsrep_streaming_log;
+--connection node_2
+SELECT COUNT(*) = 0 from mysql.wsrep_streaming_log;
+
+DROP TABLE t1;
diff --git a/mysql-test/suite/galera_sr/t/mysql-wsrep-features#96.test b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#96.test
new file mode 100644
index 00000000000..c773b310183
--- /dev/null
+++ b/mysql-test/suite/galera_sr/t/mysql-wsrep-features#96.test
@@ -0,0 +1,45 @@
+# mysql-wsrep-features#96 - "Sanity check failed" with SR and statement rolled back due to error
+
+--source include/galera_cluster.inc
+--source include/have_innodb.inc
+
+
+--connection node_1
+
+CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
+CREATE TABLE t2 (f2 VARCHAR(32));
+
+SET SESSION wsrep_trx_fragment_size=1;
+SET AUTOCOMMIT=OFF;
+START TRANSACTION;
+INSERT INTO t1 VALUES (1);
+
+# This statement causes full transaction rollback
+# rather than just statement rollback, as it is run under SR
+
+--error ER_LOCK_DEADLOCK
+INSERT INTO t1 VALUES (2),(1);
+INSERT INTO t2 VALUES ('abc');
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
+
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 1 FROM t2;
+
+--connection node_1
+ROLLBACK;
+
+--connection node_2
+SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
+
+SELECT COUNT(*) = 0 FROM t1;
+SELECT COUNT(*) = 0 FROM t2;
+
+--connection node_1
+DROP TABLE t1;
+DROP TABLE t2;
+
+
+
+
diff --git a/mysql-test/suite/gcol/inc/gcol_select.inc b/mysql-test/suite/gcol/inc/gcol_select.inc
index efaffd5168d..7dcbcc79ab6 100644
--- a/mysql-test/suite/gcol/inc/gcol_select.inc
+++ b/mysql-test/suite/gcol/inc/gcol_select.inc
@@ -35,7 +35,7 @@ insert into t2 (a) values (1);
create table t3 (a int primary key,
b int generated always as (-a) virtual,
c int generated always as (-a) stored unique);
-insert into t3 (a) values (2),(1),(3);
+insert into t3 (a) values (2),(1),(3),(5),(4),(7);
analyze table t1,t2,t3;
--echo # select_type=SIMPLE, type=system
diff --git a/mysql-test/suite/gcol/r/gcol_bug20746926.result b/mysql-test/suite/gcol/r/gcol_bug20746926.result
index 74fe76b3f1a..590caf02503 100644
--- a/mysql-test/suite/gcol/r/gcol_bug20746926.result
+++ b/mysql-test/suite/gcol/r/gcol_bug20746926.result
@@ -11,22 +11,22 @@ c timestamp generated always as ((a not in (b,b))) stored
insert t1(a) values(7777777777);
Warnings:
Warning 1265 Data truncated for column 'a' at row 1
-Warning 1292 Incorrect datetime value: '0'
+Warning 1292 Truncated incorrect datetime value: '0'
show warnings;
Level Code Message
Warning 1265 Data truncated for column 'a' at row 1
-Warning 1292 Incorrect datetime value: '0'
+Warning 1292 Truncated incorrect datetime value: '0'
disconnect con1;
connect con2,localhost,root,,;
set sql_mode='';
insert t1(a) values(6666666666);
Warnings:
Warning 1265 Data truncated for column 'a' at row 1
-Warning 1292 Incorrect datetime value: '0'
+Warning 1292 Truncated incorrect datetime value: '0'
show warnings;
Level Code Message
Warning 1265 Data truncated for column 'a' at row 1
-Warning 1292 Incorrect datetime value: '0'
+Warning 1292 Truncated incorrect datetime value: '0'
drop table t1;
disconnect con2;
connection default;
diff --git a/mysql-test/suite/gcol/r/gcol_bugfixes.result b/mysql-test/suite/gcol/r/gcol_bugfixes.result
index 9aff30aabc9..3d19f718287 100644
--- a/mysql-test/suite/gcol/r/gcol_bugfixes.result
+++ b/mysql-test/suite/gcol/r/gcol_bugfixes.result
@@ -231,6 +231,7 @@ Note 1265 Data truncated for column 'col_time_key' at row 5
Note 1265 Data truncated for column 'col_time_key' at row 6
ANALYZE TABLE c;
Table Op Msg_type Msg_text
+test.c analyze status Engine-independent statistics collected
test.c analyze status OK
explain SELECT COUNT(DISTINCT col_varchar_key) AS x
FROM c
@@ -327,6 +328,7 @@ ALTER TABLE t1 ADD COLUMN c INT GENERATED ALWAYS AS (b + 1) VIRTUAL;
ALTER TABLE t1 ADD INDEX( c );
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Make sure the index is correct. That's kinda important.
EXPLAIN
diff --git a/mysql-test/suite/gcol/r/gcol_keys_innodb.result b/mysql-test/suite/gcol/r/gcol_keys_innodb.result
index b46515e1eae..a1cc26a8f06 100644
--- a/mysql-test/suite/gcol/r/gcol_keys_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_keys_innodb.result
@@ -348,6 +348,7 @@ CREATE TABLE t1 (f1 int, gc int AS (f1 + 1) STORED, UNIQUE(gc));
INSERT INTO t1(f1) VALUES (1),(2),(0),(9),(3),(4),(8),(7),(5),(6);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Should use index
SELECT * FROM t1 WHERE f1 + 1 > 7;
@@ -455,6 +456,7 @@ INSERT INTO t1(f1) VALUES
(070707),(080808);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1 WHERE f1 + 1 > 070707;
f1 gc_int gc_date
@@ -489,6 +491,7 @@ KEY col_int_gc_key(col_int_gc_key)
INSERT INTO t1 ( col_int_key) VALUES (7);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT table1.col_int_key + 1 AS field1, table2.col_int_key AS field2
FROM (t1 AS table1 JOIN t1 AS table2 ON (table2.pk = table1.pk))
@@ -727,6 +730,7 @@ INDEX(gc_case));
INSERT INTO t (a, b) VALUES (0, 0), (0, 1), (1, 0), (1, 1);
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
EXPLAIN SELECT a, b FROM t WHERE (a AND b) = 1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -809,7 +813,9 @@ CREATE TABLE t2 (b INTEGER);
INSERT INTO t2 VALUES (1);
ANALYZE TABLE t1, t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
# Used to choose the index on a1 and get wrong results.
EXPLAIN SELECT * FROM t1 WHERE (a2 AND a2) = 0;
diff --git a/mysql-test/suite/gcol/r/gcol_keys_myisam.result b/mysql-test/suite/gcol/r/gcol_keys_myisam.result
index dcbba1f70c1..91bd8fcdb78 100644
--- a/mysql-test/suite/gcol/r/gcol_keys_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_keys_myisam.result
@@ -205,7 +205,7 @@ outr.col_varchar_nokey in ('c', 'x', 'i')
AND (outr.col_time_key IS NULL OR
outr.col_datetime_key = '2009-09-27');
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE outr ALL col_time_key,col_datetime_key NULL NULL NULL 4 x
+1 SIMPLE outr index_merge col_time_key,col_datetime_key col_time_key,col_datetime_key 4,6 NULL 2 x
SELECT
outr.col_time_key AS x
FROM c AS outr
@@ -348,6 +348,7 @@ CREATE TABLE t1 (f1 int, gc int AS (f1 + 1) STORED, UNIQUE(gc));
INSERT INTO t1(f1) VALUES (1),(2),(0),(9),(3),(4),(8),(7),(5),(6);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Should use index
SELECT * FROM t1 WHERE f1 + 1 > 7;
@@ -455,6 +456,7 @@ INSERT INTO t1(f1) VALUES
(070707),(080808);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1 WHERE f1 + 1 > 070707;
f1 gc_int gc_date
@@ -489,6 +491,7 @@ KEY col_int_gc_key(col_int_gc_key)
INSERT INTO t1 ( col_int_key) VALUES (7);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT table1.col_int_key + 1 AS field1, table2.col_int_key AS field2
FROM (t1 AS table1 JOIN t1 AS table2 ON (table2.pk = table1.pk))
@@ -727,6 +730,7 @@ INDEX(gc_case));
INSERT INTO t (a, b) VALUES (0, 0), (0, 1), (1, 0), (1, 1);
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
EXPLAIN SELECT a, b FROM t WHERE (a AND b) = 1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -809,7 +813,9 @@ CREATE TABLE t2 (b INTEGER);
INSERT INTO t2 VALUES (1);
ANALYZE TABLE t1, t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
# Used to choose the index on a1 and get wrong results.
EXPLAIN SELECT * FROM t1 WHERE (a2 AND a2) = 0;
diff --git a/mysql-test/suite/gcol/r/gcol_rollback.result b/mysql-test/suite/gcol/r/gcol_rollback.result
index 3fe40d1eccd..5ee94d3ef44 100644
--- a/mysql-test/suite/gcol/r/gcol_rollback.result
+++ b/mysql-test/suite/gcol/r/gcol_rollback.result
@@ -18,6 +18,7 @@ a b
9 9
BEGIN;
INSERT INTO t (a) VALUES (10);
+# restart
SELECT * FROM t;
a b
9 9
diff --git a/mysql-test/suite/gcol/r/gcol_select_innodb.result b/mysql-test/suite/gcol/r/gcol_select_innodb.result
index bc9bddad690..f933fe22bdc 100644
--- a/mysql-test/suite/gcol/r/gcol_select_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_select_innodb.result
@@ -17,11 +17,14 @@ insert into t2 (a) values (1);
create table t3 (a int primary key,
b int generated always as (-a) virtual,
c int generated always as (-a) stored unique);
-insert into t3 (a) values (2),(1),(3);
+insert into t3 (a) values (2),(1),(3),(5),(4),(7);
analyze table t1,t2,t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
# select_type=SIMPLE, type=system
select * from t2;
@@ -76,8 +79,8 @@ a b c
3 -3 -3
explain select * from t1 where b in (select c from t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t3 ref c c 5 test.t1.b 1 Using index
# select_type=PRIMARY, type=range,ref
select * from t1 where c in (select c from t3 where c between -2 and -1);
a b c
@@ -86,7 +89,7 @@ a b c
2 -2 -2
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using where; Using index
+1 PRIMARY t3 index c c 5 NULL 6 Using where; Using index
1 PRIMARY t1 ALL c NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
# select_type=UNION, type=system
# select_type=UNION RESULT, type=<union1,2>
@@ -170,7 +173,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where
# SELECT * FROM tbl_name WHERE <indexed gcol expr>
select * from t3 where c between -2 and -1;
a b c
@@ -233,7 +236,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 index NULL PRIMARY 4 NULL 3 Using where
+1 SIMPLE t3 index NULL PRIMARY 4 NULL 6 Using where
# SELECT * FROM tbl_name WHERE <non-indexed gcol expr> ORDER BY <non-indexed gcol>
select * from t3 where b between -2 and -1 order by b;
a b c
@@ -241,7 +244,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed gcol expr> ORDER BY <non-indexed gcol>
select * from t3 where c between -2 and -1 order by b;
a b c
@@ -257,7 +260,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed gcol expr> ORDER BY <indexed gcol>
select * from t3 where c between -2 and -1 order by c;
a b c
@@ -493,6 +496,7 @@ Warning 1292 Truncated incorrect DOUBLE value: 'c'
Warning 1292 Truncated incorrect DOUBLE value: 't'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
+Warning 1292 Truncated incorrect DOUBLE value: 'd'
DROP TABLE cc;
SET sql_mode=@save_old_sql_mode;
#
@@ -676,9 +680,13 @@ INSERT INTO t4
VALUES (1, 'j'), (2, 'c'), (0, 'a');
ANALYZE TABLE t1, t2, t3, t4;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
EXPLAIN SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
@@ -691,7 +699,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
@@ -746,7 +754,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
@@ -802,7 +810,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
@@ -866,7 +874,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
diff --git a/mysql-test/suite/gcol/r/gcol_select_myisam.result b/mysql-test/suite/gcol/r/gcol_select_myisam.result
index d0fe7fbd0d4..853412fe7fe 100644
--- a/mysql-test/suite/gcol/r/gcol_select_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_select_myisam.result
@@ -17,11 +17,14 @@ insert into t2 (a) values (1);
create table t3 (a int primary key,
b int generated always as (-a) virtual,
c int generated always as (-a) stored unique);
-insert into t3 (a) values (2),(1),(3);
+insert into t3 (a) values (2),(1),(3),(5),(4),(7);
analyze table t1,t2,t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
# select_type=SIMPLE, type=system
select * from t2;
@@ -57,7 +60,7 @@ a b c
1 -1 -1
explain select * from t3 where c>=-1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 2 Using index condition
+1 SIMPLE t3 range c c 5 NULL 1 Using index condition
# select_type=SIMPLE, type=ref
select * from t1,t3 where t1.c=t3.c and t3.c=-1;
a b c a b c
@@ -76,8 +79,8 @@ a b c
3 -3 -3
explain select * from t1 where b in (select c from t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t3 ref c c 5 test.t1.b 1 Using index
# select_type=PRIMARY, type=range,ref
select * from t1 where c in (select c from t3 where c between -2 and -1);
a b c
@@ -86,7 +89,7 @@ a b c
2 -2 -2
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using where; Using index
+1 PRIMARY t3 index c c 5 NULL 6 Using where; Using index
1 PRIMARY t1 ref c c 5 test.t3.c 1
# select_type=UNION, type=system
# select_type=UNION RESULT, type=<union1,2>
@@ -162,7 +165,7 @@ a b c
2 -2 -2
explain select * from t3 where a between 1 and 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 1 Using index condition
+1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 2 Using index condition
# SELECT * FROM tbl_name WHERE <non-indexed gcol expr>
select * from t3 where b between -2 and -1;
a b c
@@ -170,7 +173,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where
# SELECT * FROM tbl_name WHERE <indexed gcol expr>
select * from t3 where c between -2 and -1;
a b c
@@ -178,7 +181,7 @@ a b c
2 -2 -2
explain select * from t3 where c between -2 and -1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition
# bug#20022189: WL411:DEBUG ASSERT AT FIELD_LONG::VAL_INT IN SQL/FIELD.CC
CREATE TABLE t4 (
`pk` int(11) NOT NULL ,
@@ -208,7 +211,7 @@ a b c
1 -1 -1
explain select * from t3 where a between 1 and 2 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 1 Using index condition; Using filesort
+1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using filesort
# SELECT * FROM tbl_name WHERE <non-indexed gcol expr> ORDER BY <non-gcol>
select * from t3 where b between -2 and -1 order by a;
a b c
@@ -216,7 +219,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed gcol expr> ORDER BY <non-gcol>
select * from t3 where c between -2 and -1 order by a;
a b c
@@ -224,7 +227,7 @@ a b c
2 -2 -2
explain select * from t3 where c between -2 and -1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition; Using filesort
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition; Using filesort
# SELECT * FROM tbl_name WHERE <non-indexed gcol expr> ORDER BY <non-indexed gcol>
select * from t3 where b between -2 and -1 order by b;
a b c
@@ -232,7 +235,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed gcol expr> ORDER BY <non-indexed gcol>
select * from t3 where c between -2 and -1 order by b;
a b c
@@ -240,7 +243,7 @@ a b c
1 -1 -1
explain select * from t3 where c between -2 and -1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition; Using filesort
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition; Using filesort
# SELECT * FROM tbl_name WHERE <non-indexed gcol expr> ORDER BY <indexed gcol>
select * from t3 where b between -2 and -1 order by c;
a b c
@@ -248,7 +251,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed gcol expr> ORDER BY <indexed gcol>
select * from t3 where c between -2 and -1 order by c;
a b c
@@ -256,7 +259,7 @@ a b c
1 -1 -1
explain select * from t3 where c between -2 and -1 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition
# SELECT sum(<non-indexed gcol>) FROM tbl_name GROUP BY <non-indexed gcol>
select sum(b) from t1 group by b;
sum(b)
@@ -791,15 +794,15 @@ KEY (col_int_key)
INSERT INTO cc (col_int_nokey) VALUES (0),(1),(7),(0),(4),(5);
EXPLAIN SELECT pk FROM cc WHERE col_int_key > 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE cc ALL col_int_key NULL NULL NULL 6 #
+1 SIMPLE cc range col_int_key col_int_key 5 NULL 3 #
SELECT pk FROM cc WHERE col_int_key > 3;
pk
-3
5
6
+3
EXPLAIN SELECT pk FROM cc WHERE col_int_key > 3 ORDER BY 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE cc ALL col_int_key NULL NULL NULL 6 #
+1 SIMPLE cc range col_int_key col_int_key 5 NULL 3 #
SELECT pk FROM cc WHERE col_int_key > 3 ORDER BY 1;
pk
3
@@ -853,7 +856,9 @@ Warnings:
Note 1265 Data truncated for column 'col_time_key' at row 1
ANALYZE TABLE a, c;
Table Op Msg_type Msg_text
+test.a analyze status Engine-independent statistics collected
test.a analyze status OK
+test.c analyze status Engine-independent statistics collected
test.c analyze status OK
EXPLAIN
SELECT
@@ -931,7 +936,9 @@ col_varchar_nokey
(5, 'b'),(8,'m'),(7, 'j'),(2, 'v');
ANALYZE TABLE c, cc;
Table Op Msg_type Msg_text
+test.c analyze status Engine-independent statistics collected
test.c analyze status OK
+test.cc analyze status Engine-independent statistics collected
test.cc analyze status OK
EXPLAIN SELECT
alias2 . col_varchar_key AS field1
@@ -1073,6 +1080,7 @@ Warning 1292 Truncated incorrect DOUBLE value: 'c'
Warning 1292 Truncated incorrect DOUBLE value: 't'
Warning 1292 Truncated incorrect DOUBLE value: 'm'
Warning 1292 Truncated incorrect DOUBLE value: 'd'
+Warning 1292 Truncated incorrect DOUBLE value: 'd'
DROP TABLE cc;
SET sql_mode=@save_old_sql_mode;
#
@@ -1204,7 +1212,7 @@ FROM t0 AS a0, t0 AS a1, t0 AS a2;
EXPLAIN SELECT * FROM t1
WHERE i1 > 41 AND i1 <= 43;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range idx idx 4 NULL 19 Using index condition
+1 SIMPLE t1 range idx idx 4 NULL 20 Using index condition
SELECT * FROM t1
WHERE i1 > 41 AND i1 <= 43;
pk i1 i2 v1 v2
@@ -1298,9 +1306,13 @@ INSERT INTO t4
VALUES (1, 'j'), (2, 'c'), (0, 'a');
ANALYZE TABLE t1, t2, t3, t4;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
EXPLAIN SELECT /*+ NO_SEMIJOIN(@subq1) */ t1.c1, t2.i1
FROM t1 STRAIGHT_JOIN t3 STRAIGHT_JOIN t2
@@ -1313,7 +1325,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY,v_idx PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
@@ -1369,7 +1381,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY,v_idx,v_idx2 PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
@@ -1427,7 +1439,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY,v_idx2 PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
@@ -1494,7 +1506,7 @@ WHERE t4.c1 < 'o'
)
AND t1.i1 <= t3.i2_key;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 3
+1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 2
1 PRIMARY t1 ALL NULL NULL NULL NULL 4 Using join buffer (flat, BNL join)
1 PRIMARY t3 eq_ref PRIMARY,v_idx PRIMARY 4 test.t4.i1 1 Using where
1 PRIMARY t2 ALL NULL NULL NULL NULL 5 Using join buffer (flat, BNL join)
diff --git a/mysql-test/suite/gcol/r/gcol_view_innodb.result b/mysql-test/suite/gcol/r/gcol_view_innodb.result
index ec82c792493..b23dbfc4bff 100644
--- a/mysql-test/suite/gcol/r/gcol_view_innodb.result
+++ b/mysql-test/suite/gcol/r/gcol_view_innodb.result
@@ -5,6 +5,7 @@ c int generated always as (-a) stored);
insert into t1 (a) values (1), (1), (2), (2), (3);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
create view v1 (d,e) as select abs(b), abs(c) from t1;
select d,e from v1;
diff --git a/mysql-test/suite/gcol/r/gcol_view_myisam.result b/mysql-test/suite/gcol/r/gcol_view_myisam.result
index 13cb74ebcb5..264bd904c30 100644
--- a/mysql-test/suite/gcol/r/gcol_view_myisam.result
+++ b/mysql-test/suite/gcol/r/gcol_view_myisam.result
@@ -5,6 +5,7 @@ c int generated always as (-a) stored);
insert into t1 (a) values (1), (1), (2), (2), (3);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
create view v1 (d,e) as select abs(b), abs(c) from t1;
select d,e from v1;
diff --git a/mysql-test/suite/gcol/r/innodb_virtual_fk_restart.result b/mysql-test/suite/gcol/r/innodb_virtual_fk_restart.result
index 1d4766eab19..c45579aec2b 100644
--- a/mysql-test/suite/gcol/r/innodb_virtual_fk_restart.result
+++ b/mysql-test/suite/gcol/r/innodb_virtual_fk_restart.result
@@ -14,6 +14,7 @@ INSERT INTO u1 SET a=1;
INSERT INTO u2 SET b=1;
INSERT INTO t1(fld1) VALUES(1);
INSERT INTO t2(fld1, fld2) VALUES(1, 2);
+# restart
UPDATE t1 SET fld1= 2;
DELETE FROM u1;
SELECT * FROM u2;
@@ -37,6 +38,7 @@ FOREIGN KEY(fld1) REFERENCES t1(fld1)
ON UPDATE CASCADE) engine=innodb;
INSERT INTO t1 VALUES(1), (2);
INSERT INTO t2 VALUES(1, DEFAULT), (2, default);
+# restart
CREATE TEMPORARY TABLE t2 (fld1 INT NOT NULL)ENGINE=INNODB;
UPDATE t1 SET fld1= 3 WHERE fld1= 2;
connect con1,localhost,root,,test;
diff --git a/mysql-test/suite/gcol/r/innodb_virtual_rebuild.result b/mysql-test/suite/gcol/r/innodb_virtual_rebuild.result
index 2e35698e47e..35f37034721 100644
--- a/mysql-test/suite/gcol/r/innodb_virtual_rebuild.result
+++ b/mysql-test/suite/gcol/r/innodb_virtual_rebuild.result
@@ -7,31 +7,31 @@ ROW_FORMAT=REDUNDANT;
INSERT INTO t4 SET i=1;
ALTER TABLE t4 ADD INDEX(v), LOCK=NONE;
ALTER TABLE t4 ADD COLUMN k INT, LOCK=NONE;
-ALTER TABLE t4 DROP k, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: online rebuild with indexed virtual columns. Try LOCK=SHARED
-ALTER TABLE t4 DROP INDEX v, LOCK=NONE;
ALTER TABLE t4 DROP k, LOCK=NONE;
+ERROR 42000: Can't DROP COLUMN `k`; check that it exists
+ALTER TABLE t4 DROP INDEX v, LOCK=NONE;
INSERT INTO t3 SET i=1;
ALTER TABLE t3 ADD INDEX(v), LOCK=NONE;
ALTER TABLE t3 ADD COLUMN k INT, LOCK=NONE;
-ALTER TABLE t3 DROP k, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: online rebuild with indexed virtual columns. Try LOCK=SHARED
-ALTER TABLE t3 DROP INDEX v, LOCK=NONE;
ALTER TABLE t3 DROP k, LOCK=NONE;
+ERROR 42000: Can't DROP COLUMN `k`; check that it exists
+ALTER TABLE t3 DROP INDEX v, LOCK=NONE;
INSERT INTO t2 SET i=1;
ALTER TABLE t2 ADD INDEX(v), LOCK=NONE;
ALTER TABLE t2 ADD COLUMN k INT, LOCK=NONE;
-ALTER TABLE t2 DROP k, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: online rebuild with indexed virtual columns. Try LOCK=SHARED
-ALTER TABLE t2 DROP INDEX v, LOCK=NONE;
ALTER TABLE t2 DROP k, LOCK=NONE;
+ERROR 42000: Can't DROP COLUMN `k`; check that it exists
+ALTER TABLE t2 DROP INDEX v, LOCK=NONE;
INSERT INTO t1 SET i=1;
ALTER TABLE t1 ADD INDEX(v), LOCK=NONE;
ALTER TABLE t1 ADD COLUMN k INT, LOCK=NONE;
-ALTER TABLE t1 DROP k, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: online rebuild with indexed virtual columns. Try LOCK=SHARED
-ALTER TABLE t1 DROP INDEX v, LOCK=NONE;
ALTER TABLE t1 DROP k, LOCK=NONE;
+ERROR 42000: Can't DROP COLUMN `k`; check that it exists
+ALTER TABLE t1 DROP INDEX v, LOCK=NONE;
connect ddl,localhost,root,,test;
connection default;
connection ddl;
diff --git a/mysql-test/suite/gcol/t/innodb_virtual_rebuild.test b/mysql-test/suite/gcol/t/innodb_virtual_rebuild.test
index 37ab82c46db..fe4f5e307b3 100644
--- a/mysql-test/suite/gcol/t/innodb_virtual_rebuild.test
+++ b/mysql-test/suite/gcol/t/innodb_virtual_rebuild.test
@@ -14,11 +14,12 @@ while ($n)
{
eval INSERT INTO t$n SET i=1;
eval ALTER TABLE t$n ADD INDEX(v), LOCK=NONE;
-eval ALTER TABLE t$n ADD COLUMN k INT, LOCK=NONE;
+# MDEV-17468 FIXME: Fix this, and remove the 2 --error below.
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+eval ALTER TABLE t$n ADD COLUMN k INT, LOCK=NONE;
+--error ER_CANT_DROP_FIELD_OR_KEY
eval ALTER TABLE t$n DROP k, LOCK=NONE;
eval ALTER TABLE t$n DROP INDEX v, LOCK=NONE;
-eval ALTER TABLE t$n DROP k, LOCK=NONE;
dec $n;
}
diff --git a/mysql-test/suite/handler/aria.result b/mysql-test/suite/handler/aria.result
index 6b02ac9b085..1896e30f7d5 100644
--- a/mysql-test/suite/handler/aria.result
+++ b/mysql-test/suite/handler/aria.result
@@ -808,6 +808,7 @@ ERROR 42S02: Unknown table 't1' in HANDLER
handler t1 open;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
@@ -927,6 +928,7 @@ handler t1 open;
lock tables t1 write;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
unlock tables;
handler t1 read next;
@@ -1483,7 +1485,7 @@ handler t2 open;
flush tables with read lock;
handler t1 read next;
a b
-1 1
+2 1
select a from t3;
a
1
@@ -1697,6 +1699,8 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/handler/heap.result b/mysql-test/suite/handler/heap.result
index 9679b48a367..32d06b79604 100644
--- a/mysql-test/suite/handler/heap.result
+++ b/mysql-test/suite/handler/heap.result
@@ -808,7 +808,7 @@ ERROR 42S02: Unknown table 't1' in HANDLER
handler t1 open;
analyze table t1;
Table Op Msg_type Msg_text
-test.t1 analyze note The storage engine for the table doesn't support analyze
+test.t1 analyze status Operation failed
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
#
@@ -927,6 +927,7 @@ handler t1 open;
lock tables t1 write;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze note The storage engine for the table doesn't support analyze
unlock tables;
handler t1 read next;
@@ -1483,7 +1484,7 @@ handler t2 open;
flush tables with read lock;
handler t1 read next;
a b
-1 1
+2 1
select a from t3;
a
1
@@ -1697,6 +1698,8 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/handler/innodb.result b/mysql-test/suite/handler/innodb.result
index 80e8ed679a9..a1b2b318911 100644
--- a/mysql-test/suite/handler/innodb.result
+++ b/mysql-test/suite/handler/innodb.result
@@ -810,6 +810,7 @@ ERROR 42S02: Unknown table 't1' in HANDLER
handler t1 open;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
@@ -930,6 +931,7 @@ handler t1 open;
lock tables t1 write;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
unlock tables;
handler t1 read next;
@@ -1487,7 +1489,7 @@ handler t2 open;
flush tables with read lock;
handler t1 read next;
a b
-1 1
+2 1
select a from t3;
a
1
@@ -1701,6 +1703,8 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/handler/interface.result b/mysql-test/suite/handler/interface.result
index c4a169be185..8c39b15b6c9 100644
--- a/mysql-test/suite/handler/interface.result
+++ b/mysql-test/suite/handler/interface.result
@@ -175,6 +175,7 @@ c1
connect flush,localhost,root,,;
connection flush;
flush tables;
+flush table t1;
connect waiter,localhost,root,,;
connection waiter;
connection default;
@@ -258,10 +259,11 @@ a b
flush tables;
handler t1 read a next;
a b
-0 a
+2 c
+flush tables t1;
handler t1 read a next;
a b
-1 b
+0 a
flush tables with read lock;
handler t1 read a next;
a b
diff --git a/mysql-test/suite/handler/interface.test b/mysql-test/suite/handler/interface.test
index 15853dfdbf5..0ecdbf9c5cf 100644
--- a/mysql-test/suite/handler/interface.test
+++ b/mysql-test/suite/handler/interface.test
@@ -179,12 +179,13 @@ handler t1 open;
handler t1 read first;
connect (flush,localhost,root,,);
connection flush;
-send flush tables;
+flush tables;
+send flush table t1;
connect (waiter,localhost,root,,);
connection waiter;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
- where state = "Waiting for table flush";
+ where state = "Waiting for table metadata lock";
--source include/wait_condition.inc
connection default;
handler t2 open;
@@ -282,6 +283,7 @@ handler t1 read a first;
handler t1 read a next;
flush tables;
handler t1 read a next;
+flush tables t1;
handler t1 read a next;
flush tables with read lock;
handler t1 read a next;
diff --git a/mysql-test/suite/handler/myisam.result b/mysql-test/suite/handler/myisam.result
index 90e1767a1f3..2c5f8c3bfde 100644
--- a/mysql-test/suite/handler/myisam.result
+++ b/mysql-test/suite/handler/myisam.result
@@ -808,6 +808,7 @@ ERROR 42S02: Unknown table 't1' in HANDLER
handler t1 open;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
handler t1 read next;
ERROR 42S02: Unknown table 't1' in HANDLER
@@ -927,6 +928,7 @@ handler t1 open;
lock tables t1 write;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
unlock tables;
handler t1 read next;
@@ -1483,7 +1485,7 @@ handler t2 open;
flush tables with read lock;
handler t1 read next;
a b
-1 1
+2 1
select a from t3;
a
1
@@ -1697,6 +1699,8 @@ BEGIN
SELECT 1 FROM t2 INTO @a;
RETURN 1;
END|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT f1();
ERROR 42S02: Table 'test.t2' doesn't exist
CREATE TABLE t1(a INT);
diff --git a/mysql-test/suite/heap/heap.result b/mysql-test/suite/heap/heap.result
index 320966dd571..326142a71a4 100644
--- a/mysql-test/suite/heap/heap.result
+++ b/mysql-test/suite/heap/heap.result
@@ -66,7 +66,7 @@ a
alter table t1 engine=myisam;
explain select * from t1 where a in (869751,736494,226312,802616);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uniq_id uniq_id 4 NULL 4 Using where; Using index
+1 SIMPLE t1 index uniq_id uniq_id 4 NULL 5 Using where; Using index
drop table t1;
create table t1 (x int not null, y int not null, key x (x), unique y (y))
engine=heap;
diff --git a/mysql-test/suite/heap/heap_btree.result b/mysql-test/suite/heap/heap_btree.result
index 83d1bcb6c92..5985350a213 100644
--- a/mysql-test/suite/heap/heap_btree.result
+++ b/mysql-test/suite/heap/heap_btree.result
@@ -59,14 +59,14 @@ a
869751
select * from t1 where a in (869751,736494,226312,802616);
a
-226312
+869751
736494
+226312
802616
-869751
alter table t1 engine=myisam;
explain select * from t1 where a in (869751,736494,226312,802616);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uniq_id uniq_id 4 NULL 4 Using where; Using index
+1 SIMPLE t1 index uniq_id uniq_id 4 NULL 5 Using where; Using index
drop table t1;
create table t1 (x int not null, y int not null, key x using BTREE (x,y), unique y using BTREE (y))
engine=heap;
@@ -178,7 +178,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range btn btn 10 NULL 1 Using where
explain select * from t1 where btn like "h%";
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range btn btn 10 NULL # Using where
+1 SIMPLE t1 ALL btn NULL NULL NULL # Using where
explain select * from t1 where btn like "a%";
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range btn btn 10 NULL 1 Using where
@@ -350,11 +350,11 @@ insert into t1 values (869751),(736494),(226312),(802616),(728912);
alter table t1 add unique uniq_id using BTREE (a);
select 0+a from t1 where a > 736494;
0+a
-802616
869751
+802616
explain select 0+a from t1 where a > 736494;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uniq_id uniq_id 8 NULL 3 Using where
+1 SIMPLE t1 ALL uniq_id NULL NULL NULL 5 Using where
select 0+a from t1 where a = 736494;
0+a
736494
@@ -370,13 +370,13 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range uniq_id uniq_id 8 NULL 2 Using where
select 0+a from t1 where a in (869751,736494,226312,802616);
0+a
-226312
+869751
736494
+226312
802616
-869751
explain select 0+a from t1 where a in (869751,736494,226312,802616);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uniq_id uniq_id 8 NULL 4 Using where
+1 SIMPLE t1 ALL uniq_id NULL NULL NULL 5 Using where
drop table t1;
End of 5.3 tests
create table t1 (id int, a varchar(300) not null, key using btree(a)) engine=heap;
diff --git a/mysql-test/suite/heap/heap_btree.test b/mysql-test/suite/heap/heap_btree.test
index aca41c430b3..6e087c7ef24 100644
--- a/mysql-test/suite/heap/heap_btree.test
+++ b/mysql-test/suite/heap/heap_btree.test
@@ -37,6 +37,7 @@ create table t1 (a int not null) engine=heap;
insert into t1 values (869751),(736494),(226312),(802616),(728912);
select * from t1 where a > 736494;
alter table t1 add unique uniq_id using BTREE (a);
+--sorted_result
select * from t1 where a > 736494;
select * from t1 where a = 736494;
select * from t1 where a=869751 or a=736494;
diff --git a/mysql-test/suite/heap/heap_hash.result b/mysql-test/suite/heap/heap_hash.result
index 55d43588403..1fbfa99c61f 100644
--- a/mysql-test/suite/heap/heap_hash.result
+++ b/mysql-test/suite/heap/heap_hash.result
@@ -66,7 +66,7 @@ a
alter table t1 engine=myisam;
explain select * from t1 where a in (869751,736494,226312,802616);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uniq_id uniq_id 4 NULL 4 Using where; Using index
+1 SIMPLE t1 ALL uniq_id NULL NULL NULL 5 Using where
drop table t1;
create table t1 (x int not null, y int not null, key x using HASH (x), unique y using HASH (y))
engine=heap;
@@ -426,15 +426,16 @@ select 0+a from t1 where a=869751 or a=736494;
explain select 0+a from t1 where a=869751 or a=736494;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range uniq_id uniq_id 8 NULL 2 Using where
-select 0+a from t1 where a in (869751,736494,226312,802616);
+select 0+a from t1 where a in (869751,736494,226312,802616,728912);
0+a
-226312
+869751
736494
+226312
802616
-869751
-explain select 0+a from t1 where a in (869751,736494,226312,802616);
+728912
+explain select 0+a from t1 where a in (869751,736494,226312,802616,728912);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range uniq_id uniq_id 8 NULL 4 Using where
+1 SIMPLE t1 ALL uniq_id NULL NULL NULL 5 Using where
drop table t1;
End of 5.3 tests
#
diff --git a/mysql-test/suite/heap/heap_hash.test b/mysql-test/suite/heap/heap_hash.test
index 3fe95e14205..3a8979a2393 100644
--- a/mysql-test/suite/heap/heap_hash.test
+++ b/mysql-test/suite/heap/heap_hash.test
@@ -313,8 +313,8 @@ select 0+a from t1 where a = 736494;
explain select 0+a from t1 where a = 736494;
select 0+a from t1 where a=869751 or a=736494;
explain select 0+a from t1 where a=869751 or a=736494;
-select 0+a from t1 where a in (869751,736494,226312,802616);
-explain select 0+a from t1 where a in (869751,736494,226312,802616);
+select 0+a from t1 where a in (869751,736494,226312,802616,728912);
+explain select 0+a from t1 where a in (869751,736494,226312,802616,728912);
drop table t1;
--echo End of 5.3 tests
diff --git a/mysql-test/suite/innodb/r/101_compatibility.result b/mysql-test/suite/innodb/r/101_compatibility.result
index a1ca493a6b2..3782ea83baf 100644
--- a/mysql-test/suite/innodb/r/101_compatibility.result
+++ b/mysql-test/suite/innodb/r/101_compatibility.result
@@ -26,6 +26,7 @@ INSERT INTO tdd VALUES(1);
INSERT INTO tp VALUES(1);
INSERT INTO ti VALUES(1);
# Kill the server
+# restart
CHECK TABLE tr,tc,td,tz,tdd,tp,ti;
Table Op Msg_type Msg_text
test.tr check status OK
@@ -35,6 +36,7 @@ test.tz check status OK
test.tdd check status OK
test.tp check status OK
test.ti check status OK
+# restart: --innodb-read-only
CHECK TABLE tr,tc,td,tz,tdd,tp,ti;
Table Op Msg_type Msg_text
test.tr check status OK
@@ -44,4 +46,5 @@ test.tz check status OK
test.tdd check status OK
test.tp check status OK
test.ti check status OK
+# restart
DROP TABLE tr,tc,td,tz,tdd,tp,ti;
diff --git a/mysql-test/suite/innodb/r/alter_copy.result b/mysql-test/suite/innodb/r/alter_copy.result
index ac25c6c6c18..54b6cdd5ae5 100644
--- a/mysql-test/suite/innodb/r/alter_copy.result
+++ b/mysql-test/suite/innodb/r/alter_copy.result
@@ -48,6 +48,7 @@ ADD INDEX(a,b,c), ADD INDEX(a,c,b), ADD INDEX(a,c,d), ADD INDEX(a,d,c),
ADD INDEX(a,b,d), ADD INDEX(a,d,b), ADD INDEX(b,c,d), ADD INDEX(b,d,c),
ALGORITHM=COPY;
ERROR HY000: Lost connection to MySQL server during query
+# restart: --innodb-force-recovery=3
#sql-temporary.frm
#sql-temporary.ibd
FTS_INDEX_1.ibd
@@ -116,6 +117,7 @@ t1 CREATE TABLE `t1` (
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
+# restart: --innodb-read-only
#sql-temporary.frm
#sql-temporary.ibd
FTS_INDEX_1.ibd
@@ -184,6 +186,7 @@ t1 CREATE TABLE `t1` (
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
+# restart
#sql-temporary.frm
FTS_INDEX_1.ibd
FTS_INDEX_2.ibd
diff --git a/mysql-test/suite/innodb/r/alter_crash.result b/mysql-test/suite/innodb/r/alter_crash.result
index cb9c17e45ea..322caa66b07 100644
--- a/mysql-test/suite/innodb/r/alter_crash.result
+++ b/mysql-test/suite/innodb/r/alter_crash.result
@@ -17,6 +17,7 @@ Table Op Msg_type Msg_text
test.t1 check Warning InnoDB: Index c2 is marked as corrupted
test.t1 check Warning InnoDB: Index c3 is marked as corrupted
test.t1 check error Corrupt
+# restart
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check Warning InnoDB: Index c2 is marked as corrupted
@@ -47,6 +48,7 @@ SET DEBUG_DBUG='+d,innodb_alter_commit_crash_after_commit';
ALTER TABLE t1 ADD PRIMARY KEY (f2, f1);
ERROR HY000: Lost connection to MySQL server during query
# Restart mysqld after the crash and reconnect.
+# restart
SELECT * FROM information_schema.innodb_sys_tables
WHERE table_id = ID;
TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE
@@ -86,6 +88,7 @@ SET DEBUG_DBUG='+d,innodb_alter_commit_crash_before_commit';
ALTER TABLE t2 ADD PRIMARY KEY (f2, f1);
ERROR HY000: Lost connection to MySQL server during query
# Startup the server after the crash
+# restart
SELECT * FROM information_schema.innodb_sys_tables
WHERE name LIKE 'test/#sql-%';
TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE
@@ -124,6 +127,7 @@ SET DEBUG_DBUG='+d,innodb_alter_commit_crash_after_commit';
ALTER TABLE t1 ADD INDEX (b), CHANGE c d int, ALGORITHM=INPLACE;
ERROR HY000: Lost connection to MySQL server during query
# Restart mysqld after the crash and reconnect.
+# restart
SELECT * FROM information_schema.innodb_sys_tables
WHERE table_id = ID;
TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE
diff --git a/mysql-test/suite/innodb/r/alter_foreign_crash.result b/mysql-test/suite/innodb/r/alter_foreign_crash.result
index 34577ecf0fd..9a7a23a5b72 100644
--- a/mysql-test/suite/innodb/r/alter_foreign_crash.result
+++ b/mysql-test/suite/innodb/r/alter_foreign_crash.result
@@ -15,6 +15,7 @@ ALTER TABLE child ROW_FORMAT=DYNAMIC, ALGORITHM=COPY;
connection default;
SET DEBUG_SYNC='now WAIT_FOR s1';
SET DEBUG_SYNC='now SIGNAL s2 WAIT_FOR s1';
+# restart
disconnect con1;
show tables;
Tables_in_bug
diff --git a/mysql-test/suite/innodb/r/alter_kill.result b/mysql-test/suite/innodb/r/alter_kill.result
index 0f2e7b34837..8195d8e8ed1 100644
--- a/mysql-test/suite/innodb/r/alter_kill.result
+++ b/mysql-test/suite/innodb/r/alter_kill.result
@@ -13,14 +13,14 @@ connection default;
disconnect con1;
# Corrupt FIL_PAGE_OFFSET in bug16720368.ibd,
# and recompute innodb_checksum_algorithm=crc32
-# Restart mysqld
+# restart
SELECT COUNT(*) FROM bug16720368;
ERROR 42S02: Table 'test.bug16720368' doesn't exist in engine
INSERT INTO bug16720368 VALUES(1);
ERROR 42S02: Table 'test.bug16720368' doesn't exist in engine
INSERT INTO bug16720368_1 VALUES(1);
# Shut down the server to uncorrupt the data.
-# Restart the server after uncorrupting the file.
+# restart
INSERT INTO bug16720368 VALUES(9,1);
SELECT COUNT(*) FROM bug16720368;
COUNT(*)
@@ -44,7 +44,9 @@ connection default;
# Kill the server
disconnect con1;
# Attempt to start without an *.ibd file.
+# restart
FOUND 1 /\[ERROR\] InnoDB: Tablespace [0-9]+ was not found at .*test.bug16735660.ibd/ in mysqld.1.err
+# restart
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT * FROM bug16735660;
a
diff --git a/mysql-test/suite/innodb/r/alter_missing_tablespace.result b/mysql-test/suite/innodb/r/alter_missing_tablespace.result
index 237d0df26ff..935b8199ab9 100644
--- a/mysql-test/suite/innodb/r/alter_missing_tablespace.result
+++ b/mysql-test/suite/innodb/r/alter_missing_tablespace.result
@@ -7,6 +7,7 @@ CREATE TABLE t(a SERIAL)ENGINE=InnoDB;
CREATE TABLE `x..d` (a INT PRIMARY KEY, b INT) ENGINE=InnoDB;
CREATE TABLE t1(a SERIAL)ENGINE=InnoDB;
INSERT INTO t1 VALUES(1),(2),(3);
+# restart
SELECT * FROM t;
ERROR 42S02: Table 'test.t' doesn't exist in engine
ALTER TABLE t ADD INDEX (a), ALGORITHM=INPLACE;
diff --git a/mysql-test/suite/innodb/r/alter_rename_files.result b/mysql-test/suite/innodb/r/alter_rename_files.result
index 7df63a051da..490f6773765 100644
--- a/mysql-test/suite/innodb/r/alter_rename_files.result
+++ b/mysql-test/suite/innodb/r/alter_rename_files.result
@@ -2,7 +2,7 @@ CREATE TABLE t1 (x INT NOT NULL UNIQUE KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(5);
SET GLOBAL innodb_log_checkpoint_now=TRUE;
SET DEBUG_SYNC='commit_cache_rebuild SIGNAL ready WAIT_FOR finish';
-ALTER TABLE t1 ADD PRIMARY KEY(x);
+ALTER TABLE t1 FORCE;;
connect con1,localhost,root,,;
SET DEBUG_SYNC='now WAIT_FOR ready';
SET GLOBAL innodb_log_checkpoint_now=TRUE;
@@ -13,7 +13,6 @@ SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`x` int(11) NOT NULL,
- PRIMARY KEY (`x`),
UNIQUE KEY `x` (`x`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/alter_table.result b/mysql-test/suite/innodb/r/alter_table.result
index e47bfb90152..0cfd3096f3f 100644
--- a/mysql-test/suite/innodb/r/alter_table.result
+++ b/mysql-test/suite/innodb/r/alter_table.result
@@ -53,3 +53,18 @@ ALTER TABLE t1 DROP a;
ERROR HY000: Cannot drop index 'a': needed in a foreign key constraint
ALTER TABLE t1 ADD c INT;
DROP TABLE t1, tx;
+create table t1 (a int) transactional=1 engine=aria;
+create table t2 (a int) transactional=1 engine=innodb;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 TRANSACTIONAL=1
+alter table t1 engine=innodb;
+alter table t1 add column b int;
+drop table t1,t2;
diff --git a/mysql-test/suite/innodb/r/alter_varchar_change.result b/mysql-test/suite/innodb/r/alter_varchar_change.result
index 22e8d4fdf69..ddf0449a040 100644
--- a/mysql-test/suite/innodb/r/alter_varchar_change.result
+++ b/mysql-test/suite/innodb/r/alter_varchar_change.result
@@ -412,6 +412,23 @@ ALTER TABLE t1 MODIFY f2 VARCHAR(300);
CALL get_table_id("test/t1", @tbl1_id);
SELECT @tbl1_id = @tbl_id;
@tbl1_id = @tbl_id
+1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` varchar(300) DEFAULT NULL,
+ KEY `idx` (`f2`(40))
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT NOT NULL,
+f2 VARCHAR(128),
+INDEX idx(f2(40)))ENGINE=InnoDB;
+CALL get_table_id("test/t1", @tbl_id);
+ALTER TABLE t1 MODIFY f2 VARCHAR(300);
+CALL get_table_id("test/t1", @tbl1_id);
+SELECT @tbl1_id = @tbl_id;
+@tbl1_id = @tbl_id
0
SHOW CREATE TABLE t1;
Table Create Table
@@ -422,6 +439,23 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1;
CREATE TABLE t1(f1 INT NOT NULL,
+f2 VARCHAR(128),
+INDEX idx(f2(40)))ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+CALL get_table_id("test/t1", @tbl_id);
+ALTER TABLE t1 MODIFY f2 VARCHAR(300);
+CALL get_table_id("test/t1", @tbl1_id);
+SELECT @tbl1_id = @tbl_id;
+@tbl1_id = @tbl_id
+1
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f1` int(11) NOT NULL,
+ `f2` varchar(300) DEFAULT NULL,
+ KEY `idx` (`f2`(40))
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT
+DROP TABLE t1;
+CREATE TABLE t1(f1 INT NOT NULL,
f2 VARCHAR(100),
INDEX idx(f2(40)))ENGINE=InnoDB;
CALL get_table_id("test/t1", @tbl_id);
diff --git a/mysql-test/suite/innodb/r/analyze_table.result b/mysql-test/suite/innodb/r/analyze_table.result
index a5c25289ad1..57095b725eb 100644
--- a/mysql-test/suite/innodb/r/analyze_table.result
+++ b/mysql-test/suite/innodb/r/analyze_table.result
@@ -19,6 +19,7 @@ COUNT(*)
SET GLOBAL innodb_stats_persistent_sample_pages=2000;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
DROP PROCEDURE populate_t1;
diff --git a/mysql-test/suite/innodb/r/autoinc_debug.result b/mysql-test/suite/innodb/r/autoinc_debug.result
index eb9dfc2a028..5856c75f784 100644
--- a/mysql-test/suite/innodb/r/autoinc_debug.result
+++ b/mysql-test/suite/innodb/r/autoinc_debug.result
@@ -11,6 +11,7 @@ t1 CREATE TABLE `t1` (
`id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
+# restart
# SETTING auto_increment_increment IN CONNECTION1
SET AUTO_INCREMENT_INCREMENT = 2;
SET DEBUG_SYNC= 'ib_after_row_insert SIGNAL opened WAIT_FOR flushed1';
diff --git a/mysql-test/suite/innodb/r/autoinc_persist.result b/mysql-test/suite/innodb/r/autoinc_persist.result
index 083db5f7c3d..ee796160406 100644
--- a/mysql-test/suite/innodb/r/autoinc_persist.result
+++ b/mysql-test/suite/innodb/r/autoinc_persist.result
@@ -244,6 +244,7 @@ Expect 100000000000
100000000000
CREATE TABLE t13(a INT AUTO_INCREMENT PRIMARY KEY) ENGINE = InnoDB,
AUTO_INCREMENT = 1234;
+# restart
SHOW CREATE TABLE t13;
Table Create Table
t13 CREATE TABLE `t13` (
@@ -394,6 +395,7 @@ INSERT INTO t9 VALUES(0);
SELECT MAX(a) AS `Expect 100000000109` FROM t9;
Expect 100000000109
100000000109
+# restart
INSERT INTO t1 VALUES(0), (0);
SELECT a AS `Expect 110, 111` FROM t1 ORDER BY a DESC LIMIT 2;
Expect 110, 111
@@ -432,6 +434,7 @@ DELETE FROM t7 WHERE a = 100000200;
set global innodb_flush_log_at_trx_commit=1;
INSERT INTO t9 VALUES(100000000200);
DELETE FROM t9 WHERE a = 100000000200;
+# restart
INSERT INTO t1 VALUES(0);
SELECT a AS `Expect 126` FROM t1 ORDER BY a DESC LIMIT 1;
Expect 126
@@ -497,6 +500,7 @@ SELECT * FROM t19;
a
1
2
+# restart
INSERT INTO t1 VALUES(0), (0);
SELECT * FROM t1;
a
@@ -637,6 +641,7 @@ BEGIN;
# Without the fix in page_create_empty() the counter value would be lost
# when ROLLBACK deletes the last row.
ROLLBACK;
+# restart
INSERT INTO t3 VALUES(0);
SELECT MAX(a) AS `Expect 120` FROM t3;
Expect 120
@@ -738,6 +743,7 @@ test.t_inplace optimize status OK
DELETE FROM t_inplace WHERE a >= 123;
CREATE TABLE it_inplace(a INT AUTO_INCREMENT, INDEX(a)) AUTO_INCREMENT=125 ENGINE=InnoDB;
CREATE UNIQUE INDEX idx_aa ON it_inplace(a);
+# restart
INSERT INTO t_inplace VALUES(0), (0);
INSERT INTO it_inplace VALUES(0), (0);
SELECT MAX(a) AS `Expect 126` FROM t_inplace;
@@ -825,6 +831,7 @@ test.t_copy optimize status OK
DELETE FROM t_copy WHERE a >= 123;
CREATE TABLE it_copy(a INT AUTO_INCREMENT, INDEX(a)) AUTO_INCREMENT=125 ENGINE=InnoDB;
CREATE UNIQUE INDEX idx_aa ON it_copy(a);
+# restart
INSERT INTO t_copy VALUES(0), (0);
INSERT INTO it_copy VALUES(0), (0);
SELECT MAX(a) AS `Expect 126` FROM t_copy;
@@ -910,6 +917,7 @@ UPDATE t33 SET a = 10 WHERE a = 1;
INSERT INTO t33 VALUES(2, NULL);
ERROR 23000: Duplicate entry '2' for key 'PRIMARY'
COMMIT;
+# restart
# This will not insert 0
INSERT INTO t31(a) VALUES(6), (0);
SELECT * FROM t31;
diff --git a/mysql-test/suite/innodb/r/doublewrite.result b/mysql-test/suite/innodb/r/doublewrite.result
index 9a94a081abe..5adae545a71 100644
--- a/mysql-test/suite/innodb/r/doublewrite.result
+++ b/mysql-test/suite/innodb/r/doublewrite.result
@@ -4,6 +4,7 @@
# PAGE OF SYSTEM TABLESPACE
#
SET GLOBAL innodb_fast_shutdown = 0;
+# restart
show variables like 'innodb_doublewrite';
Variable_name Value
innodb_doublewrite ON
@@ -26,6 +27,8 @@ commit work;
# tablespace is full of zeroes.
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Ensure that dirty pages of table t1 is flushed.
flush tables t1 for export;
unlock tables;
@@ -41,6 +44,7 @@ set global innodb_buf_flush_list_now = 1;
# full of zeroes.
#
# MDEV-11623: Use old FSP_SPACE_FLAGS in the doublewrite buffer.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -57,6 +61,8 @@ f1 f2
# tablespace is corrupted.
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Ensure that dirty pages of table t1 is flushed.
flush tables t1 for export;
unlock tables;
@@ -69,6 +75,7 @@ set global innodb_fil_make_page_dirty_debug = @space_id;
set global innodb_buf_flush_list_now = 1;
# Kill the server
# Corrupt the first page (page_no=0) of the user tablespace.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -85,6 +92,8 @@ f1 f2
# tablespace is full of zeroes.
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Ensure that dirty pages of table t1 is flushed.
flush tables t1 for export;
unlock tables;
@@ -97,6 +106,7 @@ set global innodb_fil_make_page_dirty_debug = @space_id;
set global innodb_buf_flush_list_now = 1;
# Kill the server
# Make the 2nd page (page_no=1) of the tablespace all zeroes.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -113,6 +123,8 @@ f1 f2
# tablespace is corrupted.
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# Ensure that dirty pages of table t1 is flushed.
flush tables t1 for export;
unlock tables;
@@ -125,6 +137,7 @@ set global innodb_fil_make_page_dirty_debug = @space_id;
set global innodb_buf_flush_list_now = 1;
# Kill the server
# Corrupt the 2nd page (page_no=1) of the user tablespace.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -151,6 +164,7 @@ set global innodb_buf_flush_list_now = 1;
# Kill the server
# Make the first page (page_no=0) of the system tablespace
# all zeroes.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -176,6 +190,7 @@ set global innodb_fil_make_page_dirty_debug = 0;
set global innodb_buf_flush_list_now = 1;
# Kill the server
# Corrupt the first page (page_no=0) of the system tablespace.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -202,6 +217,7 @@ set global innodb_buf_flush_list_now = 1;
# Kill the server
# Make the 2nd page (page_no=1) of the system tablespace
# all zeroes.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -228,6 +244,7 @@ set global innodb_buf_flush_list_now = 1;
# Kill the server
# Make the 2nd page (page_no=1) of the system tablespace
# all zeroes.
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -244,8 +261,10 @@ drop table t1;
# MDEV-12600 crash during install_db with innodb_page_size=32K
# and ibdata1=3M
#
+# restart: --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/doublewrite --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/doublewrite --innodb-data-file-path=ibdata1:1M;ibdata2:1M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /\[ERROR\] InnoDB: Cannot create doublewrite buffer/ in mysqld.1.err
+# restart
diff --git a/mysql-test/suite/innodb/r/drop_table_background.result b/mysql-test/suite/innodb/r/drop_table_background.result
index e74bcd5e780..378f3ce00ab 100644
--- a/mysql-test/suite/innodb/r/drop_table_background.result
+++ b/mysql-test/suite/innodb/r/drop_table_background.result
@@ -11,6 +11,7 @@ ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
SELECT * from target;
ERROR 42S02: Table 'test.target' doesn't exist
DROP TABLE t;
+# restart
CREATE TABLE t (a INT) ENGINE=InnoDB;
DROP TABLE t;
DROP TABLE target;
diff --git a/mysql-test/suite/innodb/r/foreign_key.result b/mysql-test/suite/innodb/r/foreign_key.result
index 397b4f9304c..0015882690e 100644
--- a/mysql-test/suite/innodb/r/foreign_key.result
+++ b/mysql-test/suite/innodb/r/foreign_key.result
@@ -137,6 +137,7 @@ SELECT unique_constraint_name FROM information_schema.referential_constraints
WHERE table_name = 't2';
unique_constraint_name
PRIMARY
+# restart
SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency = 1;
SELECT unique_constraint_name FROM information_schema.referential_constraints
diff --git a/mysql-test/suite/innodb/r/full_crc32_import.result b/mysql-test/suite/innodb/r/full_crc32_import.result
new file mode 100644
index 00000000000..693ad22bab6
--- /dev/null
+++ b/mysql-test/suite/innodb/r/full_crc32_import.result
@@ -0,0 +1,129 @@
+FLUSH TABLES;
+# Treating compact format as dynamic format after import stmt
+CREATE TABLE t1
+(a int AUTO_INCREMENT PRIMARY KEY,
+b blob,
+c blob,
+KEY (b(200))) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 (b, c) values (repeat("ab", 200), repeat("bc", 200));
+INSERT INTO t1 (b, c) values (repeat("bc", 200), repeat("cd", 200));
+INSERT INTO t1 (b, c) values (repeat("cd", 200), repeat("ef", 200));
+INSERT INTO t1 (b, c) values (repeat("de", 200), repeat("fg", 200));
+INSERT INTO t1 (b, c) values (repeat("ef", 200), repeat("gh", 200));
+INSERT INTO t1 (b, c) values (repeat("fg", 200), repeat("hi", 200));
+INSERT INTO t1 (b, c) values (repeat("gh", 200), repeat("ij", 200));
+INSERT INTO t1 (b, c) values (repeat("hi", 200), repeat("jk", 200));
+INSERT INTO t1 (b, c) values (repeat("ij", 200), repeat("kl", 200));
+INSERT INTO t1 (b, c) values (repeat("jk", 200), repeat("lm", 200));
+INSERT INTO t1 (b, c) SELECT b,c FROM t1 ORDER BY a;
+INSERT INTO t1 (b, c) SELECT b,c FROM t1 ORDER BY a;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+40
+FLUSH TABLE t1 FOR EXPORT;
+# List before copying files
+db.opt
+t1.cfg
+t1.frm
+t1.ibd
+backup: t1
+UNLOCK TABLES;
+ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DISCARD TABLESPACE;
+db.opt
+t1.frm
+restore: t1 .ibd and .cfg files
+ALTER TABLE t1 IMPORT TABLESPACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) NOT NULL AUTO_INCREMENT,
+ `b` blob DEFAULT NULL,
+ `c` blob DEFAULT NULL,
+ PRIMARY KEY (`a`),
+ KEY `b` (`b`(200))
+) ENGINE=InnoDB AUTO_INCREMENT=46 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+UPDATE t1 set b = repeat("de", 100) where b = repeat("cd", 200);
+explain SELECT a FROM t1 where b = repeat("de", 100);
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref b b 203 const # Using where
+SELECT a FROM t1 where b = repeat("de", 100);
+a
+3
+13
+28
+38
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+40
+DELETE FROM t1;
+InnoDB 0 transactions not purged
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
+CREATE TABLE t1
+(c1 int AUTO_INCREMENT PRIMARY KEY,
+c2 POINT NOT NULL,
+c3 LINESTRING NOT NULL,
+SPATIAL INDEX idx1(c2)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1(c2,c3) VALUES(
+ST_GeomFromText('POINT(10 10)'),
+ST_GeomFromText('LINESTRING(5 5,20 20,30 30)'));
+INSERT INTO t1(c2,c3) VALUES(
+ST_GeomFromText('POINT(20 20)'),
+ST_GeomFromText('LINESTRING(5 15,20 10,30 20)'));
+INSERT INTO t1(c2,c3) VALUES(
+ST_GeomFromText('POINT(30 30)'),
+ST_GeomFromText('LINESTRING(10 5,20 24,30 32)'));
+INSERT INTO t1(c2,c3) VALUES(
+ST_GeomFromText('POINT(40 40)'),
+ST_GeomFromText('LINESTRING(15 5,25 20,35 30)'));
+INSERT INTO t1(c2,c3) VALUES(
+ST_GeomFromText('POINT(50 10)'),
+ST_GeomFromText('LINESTRING(15 15,24 10,31 20)'));
+INSERT INTO t1(c2,c3) VALUES(
+ST_GeomFromText('POINT(60 50)'),
+ST_GeomFromText('LINESTRING(10 15,20 44,35 32)'));
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+FLUSH TABLE t1 FOR EXPORT;
+# List before copying files
+db.opt
+t1.cfg
+t1.frm
+t1.ibd
+backup: t1
+UNLOCK TABLES;
+ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DISCARD TABLESPACE;
+restore: t1 .ibd and .cfg files
+ALTER TABLE t1 IMPORT TABLESPACE;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` int(11) NOT NULL AUTO_INCREMENT,
+ `c2` point NOT NULL,
+ `c3` linestring NOT NULL,
+ PRIMARY KEY (`c1`),
+ SPATIAL KEY `idx1` (`c2`)
+) ENGINE=InnoDB AUTO_INCREMENT=14325 DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+UPDATE t1 SET C2 = ST_GeomFromText('POINT(0 0)');
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+12288
+DELETE FROM t1;
+CHECK TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+InnoDB 0 transactions not purged
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/ibuf_not_empty.result b/mysql-test/suite/innodb/r/ibuf_not_empty.result
index 2c898b8916d..667f0b2c90b 100644
--- a/mysql-test/suite/innodb/r/ibuf_not_empty.result
+++ b/mysql-test/suite/innodb/r/ibuf_not_empty.result
@@ -18,8 +18,10 @@ INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
INSERT INTO t1 SELECT 0,b,c FROM t1;
+# restart: --innodb-force-recovery=6
check table t1;
Table Op Msg_type Msg_text
test.t1 check Warning InnoDB: Index 'b' contains #### entries, should be 4096.
test.t1 check error Corrupt
+# restart
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-32k-crash.result b/mysql-test/suite/innodb/r/innodb-32k-crash.result
index 83b8054831a..8e5249a55d5 100644
--- a/mysql-test/suite/innodb/r/innodb-32k-crash.result
+++ b/mysql-test/suite/innodb/r/innodb-32k-crash.result
@@ -119,6 +119,7 @@ BEGIN;
INSERT INTO t1 SELECT * from t2;
BEGIN;
UPDATE t1 SET a=@e,b=@e,c=@e,d=@e,e=@e;
+# restart
UPDATE t1 SET a=@f,b=@f,c=@f,d=@f,e=@f,f=@f,g=@f,h=@f,i=@f,j=@f,
k=@f,l=@f,m=@f,n=@f,o=@f,p=@f,q=@f,r=@f,s=@f,t=@f,u=@f,
v=@f,w=@f,x=@b,y=@f,z=@f,
diff --git a/mysql-test/suite/innodb/r/innodb-32k.result b/mysql-test/suite/innodb/r/innodb-32k.result
index f4a3c835c5c..3e0671f5408 100644
--- a/mysql-test/suite/innodb/r/innodb-32k.result
+++ b/mysql-test/suite/innodb/r/innodb-32k.result
@@ -653,6 +653,7 @@ insert into t2 values(
@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,
@a,@a,@a,@a,@a,@a,@a);
update t2 set col190=@b;
+# restart
set @a = repeat('abcdefghijklmnopqrstuvwxyz', 1650);
set @b = repeat('zyxwvutsrqponmlkjihgfedcba', 2000);
show create table t2;
diff --git a/mysql-test/suite/innodb/r/innodb-64k-crash.result b/mysql-test/suite/innodb/r/innodb-64k-crash.result
index 138ad5345ed..0ff5c81f3a2 100644
--- a/mysql-test/suite/innodb/r/innodb-64k-crash.result
+++ b/mysql-test/suite/innodb/r/innodb-64k-crash.result
@@ -271,6 +271,7 @@ vb=@c,wb=@c,xb=@c,yb=@c,zb=@c,
ac=@c,bc=@c,cc=@c,dc=@c,ec=@c,fc=@c,gc=@c,hc=@c,ic=@c,jc=@c,
kc=@c,lc=@c,mc=@c,nc=@c,oc=@c,pc=@c,qc=@c,rc=@c,sc=@c,tc=@c,uc=@c,
vc=@c,wc=@c,xc=@c,yc=@c,zc=@c;
+# restart
UPDATE t1 SET a=@e,b=@e,c=@e,d=@e,e=@e,f=@e,g=@e,h=@e,i=@e,j=@e,
k=@e,l=@e,m=@e,n=@e,o=@e,p=@e,q=@e,r=@e,s=@e,t=@e,u=@e,
v=@e,w=@e,x=@e,y=@e,z=@e,
diff --git a/mysql-test/suite/innodb/r/innodb-64k.result b/mysql-test/suite/innodb/r/innodb-64k.result
index 379235c510c..8f8e031bce2 100644
--- a/mysql-test/suite/innodb/r/innodb-64k.result
+++ b/mysql-test/suite/innodb/r/innodb-64k.result
@@ -719,6 +719,7 @@ insert into t2 values(
@a,@a,@a,@a,@a,@a,@a,@a,@a,@a,
@a,@a,@a,@a,@a,@a,@a);
update t2 set col190=@b;
+# restart
set @a = repeat('abcdefghijklmnopqrstuvwxyz', 1650);
set @b = repeat('zyxwvutsrqponmlkjihgfedcba', 2000);
update t2 set col189 = @b;
diff --git a/mysql-test/suite/innodb/r/innodb-alter-table.result b/mysql-test/suite/innodb/r/innodb-alter-table.result
index 0d5afab6bae..ad3b2cb04af 100644
--- a/mysql-test/suite/innodb/r/innodb-alter-table.result
+++ b/mysql-test/suite/innodb/r/innodb-alter-table.result
@@ -218,6 +218,7 @@ t CREATE TABLE `t` (
PARTITION `p99991231` VALUES LESS THAN (MAXVALUE) ENGINE = InnoDB)
analyze table t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
select count(*) from t where d ='2017-09-15';
count(*)
diff --git a/mysql-test/suite/innodb/r/innodb-alter-tempfile.result b/mysql-test/suite/innodb/r/innodb-alter-tempfile.result
index b164c3c26b0..cfc99650db6 100644
--- a/mysql-test/suite/innodb/r/innodb-alter-tempfile.result
+++ b/mysql-test/suite/innodb/r/innodb-alter-tempfile.result
@@ -8,6 +8,7 @@ CREATE TABLE t1 (f1 INT NOT NULL, f2 INT NOT NULL) ENGINE=innodb;
SET debug_dbug='+d,innodb_alter_commit_crash_before_commit';
ALTER TABLE t1 ADD PRIMARY KEY (f2, f1);
ERROR HY000: Lost connection to MySQL server during query
+# restart
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/innodb/r/innodb-alter.result b/mysql-test/suite/innodb/r/innodb-alter.result
index e7240dac770..56f80a4de3e 100644
--- a/mysql-test/suite/innodb/r/innodb-alter.result
+++ b/mysql-test/suite/innodb/r/innodb-alter.result
@@ -449,6 +449,7 @@ FULLTEXT INDEX(t)
ALTER TABLE tu ADD COLUMN c CHAR(1) NOT NULL FIRST, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
ALTER TABLE tu ADD COLUMN c CHAR(1) NOT NULL, LOCK=NONE;
+ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
DROP TABLE tu;
CREATE TABLE tv (
pk INT PRIMARY KEY, FTS_DOC_ID BIGINT UNSIGNED NOT NULL, t TEXT,
@@ -458,6 +459,7 @@ FULLTEXT INDEX(t)
ALTER TABLE tv ADD COLUMN c CHAR(1) NOT NULL FIRST, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
ALTER TABLE tv ADD COLUMN c CHAR(1) NOT NULL, LOCK=NONE;
+ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
DROP TABLE tv;
ALTER TABLE t1o CHANGE c1 dB_row_Id INT, ALGORITHM=COPY;
ERROR 42000: Incorrect column name 'dB_row_Id'
@@ -814,6 +816,7 @@ ADD FOREIGN KEY(dd) REFERENCES t1(d),
ALGORITHM=INPLACE;
ALTER TABLE t1 CHANGE b B INT, ALGORITHM=INPLACE;
ALTER TABLE t2 CHANGE aa AA INT, ALGORITHM=INPLACE;
+# restart
ALTER TABLE t1 CHANGE d D INT, ALGORITHM=INPLACE;
ALTER TABLE t2 CHANGE bb BB INT, ALGORITHM=INPLACE;
SHOW CREATE TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-autoinc-44030.result b/mysql-test/suite/innodb/r/innodb-autoinc-44030.result
index 5ec1bd38d80..857a70a03be 100644
--- a/mysql-test/suite/innodb/r/innodb-autoinc-44030.result
+++ b/mysql-test/suite/innodb/r/innodb-autoinc-44030.result
@@ -9,6 +9,7 @@ SELECT * FROM t1;
d1
1
2
+# restart
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/innodb/r/innodb-autoinc.result b/mysql-test/suite/innodb/r/innodb-autoinc.result
index eb7ff026b1a..a8271dabe6f 100644
--- a/mysql-test/suite/innodb/r/innodb-autoinc.result
+++ b/mysql-test/suite/innodb/r/innodb-autoinc.result
@@ -1110,6 +1110,7 @@ Warnings:
Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY, C2 CHAR(10)) ENGINE=InnoDB;
INSERT INTO t1(C1, C2) VALUES (1, 'innodb'), (3, 'innodb');
+# restart
INSERT INTO t1(C2) VALUES ('innodb');
SHOW CREATE TABLE t1;
Table Create Table
@@ -1121,6 +1122,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
CREATE TABLE t1(C1 FLOAT AUTO_INCREMENT KEY, C2 CHAR(10)) ENGINE=InnoDB;
INSERT INTO t1(C1, C2) VALUES (1, 'innodb'), (3, 'innodb');
+# restart
INSERT INTO t1(C2) VALUES ('innodb');
SHOW CREATE TABLE t1;
Table Create Table
@@ -1258,6 +1260,7 @@ Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(c1 BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL);
INSERT INTO t1 VALUES (18446744073709551615);
+# restart
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/suite/innodb/r/innodb-blob.result b/mysql-test/suite/innodb/r/innodb-blob.result
index c2b46661e31..70a3fcb5511 100644
--- a/mysql-test/suite/innodb/r/innodb-blob.result
+++ b/mysql-test/suite/innodb/r/innodb-blob.result
@@ -43,6 +43,7 @@ a
3
BEGIN;
INSERT INTO t2 VALUES (42);
+# restart
disconnect con1;
disconnect con2;
connection default;
@@ -103,6 +104,7 @@ DELETE FROM t1;
ROLLBACK;
disconnect con3;
connection con2;
+# restart
disconnect con2;
connection default;
ERROR HY000: Lost connection to MySQL server during query
@@ -134,6 +136,7 @@ SELECT info FROM information_schema.processlist
WHERE state = 'debug sync point: after_row_upd_extern';
info
UPDATE t3 SET c=REPEAT('j',3000) WHERE a=2
+# restart
disconnect con2;
connection default;
ERROR HY000: Lost connection to MySQL server during query
diff --git a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result
index f03072053c3..5e513661267 100644
--- a/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result
+++ b/mysql-test/suite/innodb/r/innodb-change-buffer-recovery.result
@@ -39,6 +39,7 @@ ERROR HY000: Lost connection to MySQL server during query
disconnect con1;
connection default;
FOUND 1 /Wrote log record for ibuf update in place operation/ in my_restart.err
+# restart
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
diff --git a/mysql-test/suite/innodb/r/innodb-fk.result b/mysql-test/suite/innodb/r/innodb-fk.result
index 0a269bad828..a7b667b3839 100644
--- a/mysql-test/suite/innodb/r/innodb-fk.result
+++ b/mysql-test/suite/innodb/r/innodb-fk.result
@@ -6,6 +6,7 @@ create table t1 (f1 int primary key) engine=innodb;
insert into t1 values (5);
insert into t1 values (2882);
insert into t1 values (10);
+# restart
update t1 set f1 = 28 where f1 = 2882;
select * from fk_120;
f1
@@ -27,6 +28,7 @@ drop table t1;
# Check if restrict is working fine.
#
create table t1 (f1 int primary key) engine=innodb;
+# restart
delete from t1 where f1 = 29;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`fk_29`, CONSTRAINT `pc29` FOREIGN KEY (`f1`) REFERENCES `t1` (`f1`))
select * from fk_29;
diff --git a/mysql-test/suite/innodb/r/innodb-get-fk.result b/mysql-test/suite/innodb/r/innodb-get-fk.result
index aa1bff8f134..2b1ed5e7c79 100644
--- a/mysql-test/suite/innodb/r/innodb-get-fk.result
+++ b/mysql-test/suite/innodb/r/innodb-get-fk.result
@@ -26,6 +26,7 @@ KEY `fk_crewRoleAssigned_roleCode` (`role_code`),
CONSTRAINT `fk_crewRoleAssigned_crewId` FOREIGN KEY (`crew_id`) REFERENCES `repro`.`crew` (`id`) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT `fk_crewRoleAssigned_pilotId` FOREIGN KEY (`crew_id`) REFERENCES `repro`.`pilot` (`id`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB COMMENT="This is a comment about tables";
+# restart: --innodb-read-only
ALTER TABLE `repro`.`crew_role_assigned` COMMENT = 'innodb_read_only';
ERROR HY000: Table 'crew_role_assigned' is read only
SHOW CREATE TABLE `repro`.`crew_role_assigned`;
@@ -43,6 +44,7 @@ crew_role_assigned CREATE TABLE `crew_role_assigned` (
SET GLOBAL innodb_buffer_pool_load_now = ON;
SET GLOBAL innodb_buffer_pool_dump_now = ON;
SET GLOBAL innodb_buffer_pool_load_abort = ON;
+# restart
ALTER TABLE `repro`.`crew_role_assigned` COMMENT = "This is a new comment about tables";
SHOW CREATE TABLE `repro`.`crew_role_assigned`;
Table Create Table
@@ -56,6 +58,7 @@ crew_role_assigned CREATE TABLE `crew_role_assigned` (
CONSTRAINT `fk_crewRoleAssigned_crewId` FOREIGN KEY (`crew_id`) REFERENCES `crew` (`id`) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT `fk_crewRoleAssigned_pilotId` FOREIGN KEY (`crew_id`) REFERENCES `pilot` (`id`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COMMENT='This is a new comment about tables'
+# restart: --innodb-read-only
SHOW CREATE TABLE `repro`.`crew_role_assigned`;
Table Create Table
crew_role_assigned CREATE TABLE `crew_role_assigned` (
@@ -68,6 +71,7 @@ crew_role_assigned CREATE TABLE `crew_role_assigned` (
CONSTRAINT `fk_crewRoleAssigned_crewId` FOREIGN KEY (`crew_id`) REFERENCES `crew` (`id`) ON DELETE CASCADE ON UPDATE CASCADE,
CONSTRAINT `fk_crewRoleAssigned_pilotId` FOREIGN KEY (`crew_id`) REFERENCES `pilot` (`id`) ON DELETE CASCADE ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COMMENT='This is a new comment about tables'
+# restart
DROP TABLE `repro`.`crew_role_assigned`;
DROP TABLE `repro`.`pilot`;
DROP TABLE `repro`.`crew`;
diff --git a/mysql-test/suite/innodb/r/innodb-index-online.result b/mysql-test/suite/innodb/r/innodb-index-online.result
index a0227d31393..8eebece46b5 100644
--- a/mysql-test/suite/innodb/r/innodb-index-online.result
+++ b/mysql-test/suite/innodb/r/innodb-index-online.result
@@ -127,6 +127,8 @@ ERROR 23000: Duplicate entry '4' for key 'c2'
ALTER TABLE t1 STATS_PERSISTENT=1;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'c3'
test.t1 analyze status OK
UPDATE mysql.innodb_index_stats SET stat_value = 5
WHERE database_name = 'test' AND table_name= 't1' AND index_name = 'PRIMARY'
@@ -145,6 +147,7 @@ ALTER TABLE t1_c2_stats ENGINE=INNODB;
DROP INDEX c2 ON t1;
ANALYZE TABLE t1_c2_stats;
Table Op Msg_type Msg_text
+test.t1_c2_stats analyze status Engine-independent statistics collected
test.t1_c2_stats analyze status OK
SELECT * FROM mysql.innodb_index_stats WHERE table_name IN ('t1', 't1_c2_stats');
database_name table_name index_name last_update stat_name stat_value sample_size stat_description
@@ -188,9 +191,11 @@ INSERT INTO t1 SELECT 20 + c1, c2, c3 FROM t1;
INSERT INTO t1 SELECT 40 + c1, c2, c3 FROM t1;
EXPLAIN SELECT COUNT(*) FROM t1 WHERE c2 > 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL NULL NULL NULL NULL 80 Using where
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5 Using where
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'c3'
test.t1 analyze status OK
connection con1;
UPDATE t1_c2_stats SET index_name = 'c2d';
@@ -231,8 +236,7 @@ WHERE variable_name = 'innodb_encryption_n_rowlog_blocks_encrypted');
connection con1;
SET DEBUG_SYNC = 'row_log_apply_before SIGNAL c2e_created WAIT_FOR dml2_done';
SET lock_wait_timeout = 10;
-ALTER TABLE t1 DROP INDEX c2d, ADD INDEX c2e(c2),
-ALGORITHM = INPLACE;
+ALTER TABLE t1 CHANGE c2 c22 INT, DROP INDEX c2d, ADD INDEX c2e(c22, c3(10)), ALGORITHM = NOCOPY;
connection default;
INSERT INTO t1 SELECT 80 + c1, c2, c3 FROM t1;
INSERT INTO t1 SELECT 160 + c1, c2, c3 FROM t1;
@@ -290,6 +294,7 @@ INNER JOIN INFORMATION_SCHEMA.INNODB_SYS_FIELDS sf
ON si.index_id = sf.index_id WHERE si.name = '?c2e';
name pos
c2 0
+c3 1
SET @merge_encrypt_1=
(SELECT variable_value FROM information_schema.global_status
WHERE variable_name = 'innodb_encryption_n_merge_blocks_encrypted');
diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result
index b64fd27fb35..fff2a9c9d3f 100644
--- a/mysql-test/suite/innodb/r/innodb-index.result
+++ b/mysql-test/suite/innodb/r/innodb-index.result
@@ -77,6 +77,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1 force index(c) order by c;
id select_type table type possible_keys key key_len ref rows Extra
@@ -119,6 +120,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1 force index(c) order by c;
id select_type table type possible_keys key key_len ref rows Extra
@@ -810,6 +812,7 @@ a b
2 2
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -846,6 +849,7 @@ a
2
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
@@ -1883,11 +1887,13 @@ CREATE TABLE t1(f1 INT PRIMARY KEY)ENGINE=InnoDB;
CREATE TABLE t2(f1 INT PRIMARY KEY)ENGINE=InnoDB;
# Kill the server
# Wrong space_id in a dirty file and a missing file
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
# Restore t1 and t2
+# restart
SELECT * FROM t1;
f1
SELECT * FROM t2;
diff --git a/mysql-test/suite/innodb/r/innodb-isolation.result b/mysql-test/suite/innodb/r/innodb-isolation.result
index ce9c530ff44..a308f10a8a9 100644
--- a/mysql-test/suite/innodb/r/innodb-isolation.result
+++ b/mysql-test/suite/innodb/r/innodb-isolation.result
@@ -963,15 +963,15 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 12 #
EXPLAIN SELECT c1, c2 FROM t1 WHERE c1 > ((SELECT COUNT(*) FROM t1) / 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL 8 #
+1 PRIMARY t1 index PRIMARY k2 5 NULL 12 #
2 SUBQUERY t1 index NULL k2 5 NULL 12 #
EXPLAIN SELECT COUNT(c2) FROM t1 WHERE c1 > ((SELECT COUNT(*) FROM t1) / 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL 8 #
+1 PRIMARY t1 index PRIMARY k2 5 NULL 12 #
2 SUBQUERY t1 index NULL k2 5 NULL 12 #
EXPLAIN SELECT COUNT(*) FROM t1 WHERE c1 > (SELECT AVG(c1) FROM t1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL 7 #
+1 PRIMARY t1 index PRIMARY k2 5 NULL 12 #
2 SUBQUERY t1 index NULL k2 5 NULL 12 #
#
# Make all indexes in t2 obsolete to the active repeatable read transaction
diff --git a/mysql-test/suite/innodb/r/innodb-lock-schedule-algorithm.result b/mysql-test/suite/innodb/r/innodb-lock-schedule-algorithm.result
index 928cdcfbd97..886a7f477b6 100644
--- a/mysql-test/suite/innodb/r/innodb-lock-schedule-algorithm.result
+++ b/mysql-test/suite/innodb/r/innodb-lock-schedule-algorithm.result
@@ -53,6 +53,7 @@ i2
DROP TABLE t1, t2;
disconnect con1;
# "restart: --loose-innodb-lock-schedule-algorithm=FCFS"
+# restart: --loose_innodb_lock_schedule_algorithm=FCFS
CREATE TABLE t1 (i1 INT) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1),(2);
CREATE TABLE t2 (i2 int) ENGINE=MyISAM;
diff --git a/mysql-test/suite/innodb/r/innodb-mdev-7513.result b/mysql-test/suite/innodb/r/innodb-mdev-7513.result
index 2ef54911cf1..2370b7313c8 100644
--- a/mysql-test/suite/innodb/r/innodb-mdev-7513.result
+++ b/mysql-test/suite/innodb/r/innodb-mdev-7513.result
@@ -199,6 +199,7 @@ text197 TEXT
) ENGINE = InnoDB;
INSERT INTO t1 VALUES ('abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef', 'abcdef');
DELETE FROM t1 WHERE text1 = 'abcdef';
+# restart
SELECT * from t1;
text1 text2 text3 text4 text5 text6 text7 text8 text9 text10 text11 text12 text13 text14 text15 text16 text17 text18 text19 text20 text21 text22 text23 text24 text25 text26 text27 text28 text29 text30 text31 text32 text33 text34 text35 text36 text37 text38 text39 text40 text41 text42 text43 text44 text45 text46 text47 text48 text49 text50 text51 text52 text53 text54 text55 text56 text57 text58 text59 text60 text61 text62 text63 text64 text65 text66 text67 text68 text69 text70 text71 text72 text73 text74 text75 text76 text77 text78 text79 text80 text81 text82 text83 text84 text85 text86 text87 text88 text89 text90 text91 text92 text93 text94 text95 text96 text97 text98 text99 text100 text101 text102 text103 text104 text105 text106 text107 text108 text109 text110 text111 text112 text113 text114 text115 text116 text117 text118 text119 text120 text121 text122 text123 text124 text125 text126 text127 text128 text129 text130 text131 text132 text133 text134 text135 text136 text137 text138 text139 text140 text141 text142 text143 text144 text145 text146 text147 text148 text149 text150 text151 text152 text153 text154 text155 text156 text157 text158 text159 text160 text161 text162 text163 text164 text165 text166 text167 text168 text169 text170 text171 text172 text173 text174 text175 text176 text177 text178 text179 text180 text181 text182 text183 text184 text185 text186 text187 text188 text189 text190 text191 text192 text193 text194 text195 text196 text197
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result b/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result
index 5411f5149d1..34b0d9431b5 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_bzip2.result
@@ -213,6 +213,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -299,6 +300,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -385,6 +387,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_default.result b/mysql-test/suite/innodb/r/innodb-page_compression_default.result
index 8977a149935..4610d251fc0 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_default.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_default.result
@@ -55,6 +55,7 @@ NOT FOUND /AaAaAaAa/ in innodb_page_compressed7.ibd
NOT FOUND /AaAaAaAa/ in innodb_page_compressed8.ibd
# innodb_page_compressed9 page compressed expected NOT FOUND
NOT FOUND /AaAaAaAa/ in innodb_page_compressed9.ibd
+# restart
select count(*) from innodb_page_compressed1;
count(*)
10000
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result b/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result
index aefb5c8a0be..8ea650c2241 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_lz4.result
@@ -213,6 +213,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -299,6 +300,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -385,6 +387,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result b/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result
index 606de870d7c..4206b0b83d4 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_lzma.result
@@ -213,6 +213,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -299,6 +300,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -385,6 +387,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result b/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result
index 66783c35d6c..c440d28ddd3 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_lzo.result
@@ -213,6 +213,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -299,6 +300,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result
index 98c353e87e3..2f675bcf1b4 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_snappy.result
@@ -56,6 +56,7 @@ NOT FOUND /AaAaAaAa/ in innodb_page_compressed7.ibd
NOT FOUND /AaAaAaAa/ in innodb_page_compressed8.ibd
# innodb_page_compressed9 page compressed expected NOT FOUND
NOT FOUND /AaAaAaAa/ in innodb_page_compressed9.ibd
+# restart
select count(*) from innodb_page_compressed1;
count(*)
10000
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_tables.result b/mysql-test/suite/innodb/r/innodb-page_compression_tables.result
index 439f409ea59..2aee063c1ac 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_tables.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_tables.result
@@ -79,6 +79,7 @@ count(*)
select count(*) from innodb_dynamic where c1 < 1500000;
count(*)
5000
+# restart
update innodb_compact set c1 = c1 + 1;
update innodb_dynamic set c1 = c1 + 1;
select count(*) from innodb_compact where c1 < 1500000;
diff --git a/mysql-test/suite/innodb/r/innodb-page_compression_zip.result b/mysql-test/suite/innodb/r/innodb-page_compression_zip.result
index 9dcf676cb3d..f57ab6a82d2 100644
--- a/mysql-test/suite/innodb/r/innodb-page_compression_zip.result
+++ b/mysql-test/suite/innodb/r/innodb-page_compression_zip.result
@@ -212,6 +212,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
@@ -298,6 +299,7 @@ count(*)
select count(*) from innodb_page_compressed9 where c1 < 500000;
count(*)
5000
+# restart
update innodb_page_compressed1 set c1 = c1 + 1;
update innodb_page_compressed2 set c1 = c1 + 1;
update innodb_page_compressed3 set c1 = c1 + 1;
diff --git a/mysql-test/suite/innodb/r/innodb-rollback.result b/mysql-test/suite/innodb/r/innodb-rollback.result
new file mode 100644
index 00000000000..5eca7ad6967
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb-rollback.result
@@ -0,0 +1,10 @@
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+connect con1,localhost,root,,test;
+ALTER TABLE t1;
+connect con2,localhost,root,,test;
+SELECT f() FROM t1;
+ERROR 42000: FUNCTION test.f does not exist
+disconnect con2;
+disconnect con1;
+connection default;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb-table-online.result b/mysql-test/suite/innodb/r/innodb-table-online.result
index 8b6c2324431..7872332a4a8 100644
--- a/mysql-test/suite/innodb/r/innodb-table-online.result
+++ b/mysql-test/suite/innodb/r/innodb-table-online.result
@@ -201,6 +201,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL ROWS Using where
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SET @merge_encrypt_0=
(SELECT variable_value FROM information_schema.global_status
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522,crc32.rdiff b/mysql-test/suite/innodb/r/innodb-wl5522,crc32.rdiff
new file mode 100644
index 00000000000..bb902e18931
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb-wl5522,crc32.rdiff
@@ -0,0 +1,5 @@
+120,121c120
+< Warnings:
+< Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t2.cfg', will attempt to import without schema verification
+---
+> ERROR HY000: Schema mismatch (Expected FSP_SPACE_FLAGS=0x*, .ibd file contains 0x*.)
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522,strict_crc32.rdiff b/mysql-test/suite/innodb/r/innodb-wl5522,strict_crc32.rdiff
new file mode 100644
index 00000000000..bb902e18931
--- /dev/null
+++ b/mysql-test/suite/innodb/r/innodb-wl5522,strict_crc32.rdiff
@@ -0,0 +1,5 @@
+120,121c120
+< Warnings:
+< Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t2.cfg', will attempt to import without schema verification
+---
+> ERROR HY000: Schema mismatch (Expected FSP_SPACE_FLAGS=0x*, .ibd file contains 0x*.)
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522-debug.result b/mysql-test/suite/innodb/r/innodb-wl5522-debug.result
index ec3856c20b8..ed7477fa033 100644
--- a/mysql-test/suite/innodb/r/innodb-wl5522-debug.result
+++ b/mysql-test/suite/innodb/r/innodb-wl5522-debug.result
@@ -479,12 +479,6 @@ ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
ERROR HY000: Index for table 't1' is corrupt; try to repair it
SET SESSION debug_dbug=@saved_debug_dbug;
restore: t1 .ibd and .cfg files
-SET SESSION debug_dbug="+d,ib_import_set_max_rowid_failure";
-ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-ERROR HY000: Index for table 't1' is corrupt; try to repair it
-SET SESSION debug_dbug=@saved_debug_dbug;
-unlink: t1.ibd
-unlink: t1.cfg
DROP TABLE test_wl5522.t1;
CREATE TABLE test_wl5522.t1 (
c1 BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
diff --git a/mysql-test/suite/innodb/r/innodb-wl5522.result b/mysql-test/suite/innodb/r/innodb-wl5522.result
index 62f03292baa..50330b5b164 100644
--- a/mysql-test/suite/innodb/r/innodb-wl5522.result
+++ b/mysql-test/suite/innodb/r/innodb-wl5522.result
@@ -36,6 +36,7 @@ t1.frm
t1.ibd
t2.frm
t2.ibd
+# restart
FLUSH TABLE t1, t2 FOR EXPORT;
# List before copying files
db.opt
@@ -61,6 +62,7 @@ a b c
1462 Devotion asdfuihknaskdf
1461 Cavalry ..asdasdfaeraf
# Restarting server
+# restart
# Done restarting server
# List before t1 DISCARD
db.opt
@@ -117,7 +119,8 @@ t2.frm
ALTER TABLE t2 IMPORT TABLESPACE;
ERROR HY000: Schema mismatch (Table flags don't match, server table has 0x21 and the meta-data file has 0x1; .cfg file uses ROW_FORMAT=COMPACT)
ALTER TABLE t2 IMPORT TABLESPACE;
-ERROR HY000: Schema mismatch (Expected FSP_SPACE_FLAGS=0x*, .ibd file contains 0x*.)
+Warnings:
+Warning 1810 IO Read error: (2, No such file or directory) Error opening './test/t2.cfg', will attempt to import without schema verification
DROP TABLE t2;
SET GLOBAL innodb_file_per_table = 1;
SELECT @@innodb_file_per_table;
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index 86f90c2bd79..39130049c33 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -217,6 +217,7 @@ create index skr on t1 (a);
insert into t1 values (3,""), (4,"testing");
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -1608,7 +1609,7 @@ INSERT INTO t1 VALUES (1),(2),(3);
CREATE TABLE t2 (b_id tinyint(4) NOT NULL default '0',b_a tinyint(4) NOT NULL default '0', PRIMARY KEY (b_id), KEY (b_a),
CONSTRAINT fk_b_a FOREIGN KEY (b_a) REFERENCES t1 (a_id) ON DELETE CASCADE ON UPDATE NO ACTION) ENGINE=InnoDB DEFAULT CHARSET=latin1;
INSERT INTO t2 VALUES (1,1),(2,1),(3,1),(4,2),(5,2);
-SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN (t2) on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz;
+SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN t2 on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz;
a_id b_list
1 1,2,3
2 4,5
@@ -1687,9 +1688,9 @@ select count(*) from t1 where x = 18446744073709551601;
count(*)
1
drop table t1;
-SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
-variable_value
-ok
+SELECT IF(variable_value BETWEEN 488 AND 512, 'OK', variable_value) FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
+IF(variable_value BETWEEN 488 AND 512, 'OK', variable_value)
+OK
SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_page_size';
variable_value
16384
@@ -3236,7 +3237,7 @@ select f1 from t1;
f1
show status like "handler_read_key";
Variable_name Value
-Handler_read_key 0
+Handler_read_key 3
drop table t1;
CREATE TABLE t1 (c1 INT) ENGINE=InnoDB;
CREATE TEMPORARY TABLE t2 (c1 INT) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/r/innodb_28867993.result b/mysql-test/suite/innodb/r/innodb_28867993.result
index acc6734eaee..493d3ef0415 100644
--- a/mysql-test/suite/innodb/r/innodb_28867993.result
+++ b/mysql-test/suite/innodb/r/innodb_28867993.result
@@ -1,6 +1,7 @@
create table t1 (a int) engine=innodb;
insert t1 values (1),(2);
create database ib_logfile2;
+# restart
select * from t1;
a
1
diff --git a/mysql-test/suite/innodb/r/innodb_bug12902967.result b/mysql-test/suite/innodb/r/innodb_bug12902967.result
index e784c6b306a..6478cdb6c1f 100644
--- a/mysql-test/suite/innodb/r/innodb_bug12902967.result
+++ b/mysql-test/suite/innodb/r/innodb_bug12902967.result
@@ -1,4 +1,5 @@
call mtr.add_suppression("In ALTER TABLE .* has or is referenced in foreign key constraints which are not compatible with the new table definition.");
+# restart
create table t1 (f1 integer primary key) engine innodb;
alter table t1 add constraint c1 foreign key (f1) references t1(f1);
ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150 "Foreign key constraint is incorrectly formed")
diff --git a/mysql-test/suite/innodb/r/innodb_bug14147491.result b/mysql-test/suite/innodb/r/innodb_bug14147491.result
index 15d72b74922..533eaeafeb8 100644
--- a/mysql-test/suite/innodb/r/innodb_bug14147491.result
+++ b/mysql-test/suite/innodb/r/innodb_bug14147491.result
@@ -9,6 +9,7 @@ INSERT INTO t1 (b) VALUES ('corrupt me');
# Corrupt the table
Munged a string.
Munged a string.
+# restart
# Now t1 is corrupted but we should not crash
SELECT * FROM t1;
Got one of the listed errors
diff --git a/mysql-test/suite/innodb/r/innodb_bug14676111.result b/mysql-test/suite/innodb/r/innodb_bug14676111.result
index 49d099263de..738e152b9fa 100644
--- a/mysql-test/suite/innodb/r/innodb_bug14676111.result
+++ b/mysql-test/suite/innodb/r/innodb_bug14676111.result
@@ -16,6 +16,7 @@ insert into t1 values (2);
connection default;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select CLUST_INDEX_SIZE from information_schema.INNODB_SYS_TABLESTATS where NAME = 'test/t1';
CLUST_INDEX_SIZE
@@ -26,6 +27,7 @@ disconnect con4;
connection default;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select CLUST_INDEX_SIZE from information_schema.INNODB_SYS_TABLESTATS where NAME = 'test/t1';
CLUST_INDEX_SIZE
@@ -36,6 +38,7 @@ disconnect con5;
connection default;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select CLUST_INDEX_SIZE from information_schema.INNODB_SYS_TABLESTATS where NAME = 'test/t1';
CLUST_INDEX_SIZE
@@ -47,6 +50,7 @@ disconnect con2;
connection default;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select CLUST_INDEX_SIZE from information_schema.INNODB_SYS_TABLESTATS where NAME = 'test/t1';
CLUST_INDEX_SIZE
@@ -56,6 +60,7 @@ insert into t1 values (2);
rollback;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select CLUST_INDEX_SIZE from information_schema.INNODB_SYS_TABLESTATS where NAME = 'test/t1';
CLUST_INDEX_SIZE
@@ -65,6 +70,7 @@ insert into t1 values (2);
rollback;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select CLUST_INDEX_SIZE from information_schema.INNODB_SYS_TABLESTATS where NAME = 'test/t1';
CLUST_INDEX_SIZE
diff --git a/mysql-test/suite/innodb/r/innodb_bug30423.result b/mysql-test/suite/innodb/r/innodb_bug30423.result
index c7f823a06ae..786041370ef 100644
--- a/mysql-test/suite/innodb/r/innodb_bug30423.result
+++ b/mysql-test/suite/innodb/r/innodb_bug30423.result
@@ -19,12 +19,15 @@ select @@innodb_stats_method;
nulls_equal
analyze table bug30243_1;
Table Op Msg_type Msg_text
+test.bug30243_1 analyze status Engine-independent statistics collected
test.bug30243_1 analyze status OK
analyze table bug30243_2;
Table Op Msg_type Msg_text
+test.bug30243_2 analyze status Engine-independent statistics collected
test.bug30243_2 analyze status OK
analyze table bug30243_3;
Table Op Msg_type Msg_text
+test.bug30243_3 analyze status Engine-independent statistics collected
test.bug30243_3 analyze status OK
set global innodb_stats_method = "NULL";
ERROR 42000: Variable 'innodb_stats_method' can't be set to the value of 'NULL'
@@ -34,12 +37,15 @@ select @@innodb_stats_method;
nulls_ignored
analyze table bug30243_1;
Table Op Msg_type Msg_text
+test.bug30243_1 analyze status Engine-independent statistics collected
test.bug30243_1 analyze status OK
analyze table bug30243_2;
Table Op Msg_type Msg_text
+test.bug30243_2 analyze status Engine-independent statistics collected
test.bug30243_2 analyze status OK
analyze table bug30243_3;
Table Op Msg_type Msg_text
+test.bug30243_3 analyze status Engine-independent statistics collected
test.bug30243_3 analyze status OK
explain SELECT COUNT(*), 0
FROM bug30243_1 orgs
@@ -60,12 +66,15 @@ select @@innodb_stats_method;
nulls_unequal
analyze table bug30243_1;
Table Op Msg_type Msg_text
+test.bug30243_1 analyze status Engine-independent statistics collected
test.bug30243_1 analyze status OK
analyze table bug30243_2;
Table Op Msg_type Msg_text
+test.bug30243_2 analyze status Engine-independent statistics collected
test.bug30243_2 analyze status OK
analyze table bug30243_3;
Table Op Msg_type Msg_text
+test.bug30243_3 analyze status Engine-independent statistics collected
test.bug30243_3 analyze status OK
explain SELECT COUNT(*), 0
FROM bug30243_1 orgs
@@ -83,10 +92,12 @@ COUNT(*)
set global innodb_stats_method = "nulls_unequal";
analyze table table_bug30423;
Table Op Msg_type Msg_text
+test.table_bug30423 analyze status Engine-independent statistics collected
test.table_bug30423 analyze status OK
set global innodb_stats_method = "nulls_ignored";
analyze table table_bug30423;
Table Op Msg_type Msg_text
+test.table_bug30423 analyze status Engine-independent statistics collected
test.table_bug30423 analyze status OK
set global innodb_stats_method = nulls_equal;
drop table bug30243_2;
diff --git a/mysql-test/suite/innodb/r/innodb_bug30919.result b/mysql-test/suite/innodb/r/innodb_bug30919.result
index 42aa4ff302b..0062df3f470 100644
--- a/mysql-test/suite/innodb/r/innodb_bug30919.result
+++ b/mysql-test/suite/innodb/r/innodb_bug30919.result
@@ -35,6 +35,10 @@ FROM test.part_tbl; -- debug to show the problem
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL test.proc_part();
internal_count del_count
999 1000
diff --git a/mysql-test/suite/innodb/r/innodb_bug51920.result b/mysql-test/suite/innodb/r/innodb_bug51920.result
index ddb9e29fab2..9bc35174979 100644
--- a/mysql-test/suite/innodb/r/innodb_bug51920.result
+++ b/mysql-test/suite/innodb/r/innodb_bug51920.result
@@ -11,6 +11,8 @@ connection default;
SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE INFO="UPDATE bug51920 SET i=2"
INTO @thread_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
KILL @thread_id;
connection con1;
Got one of the listed errors
diff --git a/mysql-test/suite/innodb/r/innodb_bug53046.result b/mysql-test/suite/innodb/r/innodb_bug53046.result
index 69be6c4e0a7..0321d5ace19 100644
--- a/mysql-test/suite/innodb/r/innodb_bug53046.result
+++ b/mysql-test/suite/innodb/r/innodb_bug53046.result
@@ -16,6 +16,7 @@ FROM bug53046_1;
INSERT INTO bug53046_2 VALUES (1), (2);
ANALYZE TABLE bug53046_1;
Table Op Msg_type Msg_text
+test.bug53046_1 analyze status Engine-independent statistics collected
test.bug53046_1 analyze status OK
SHOW TABLE STATUS LIKE 'bug53046_1';
UPDATE bug53046_1 SET c1 = c1 - 1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug53756.result b/mysql-test/suite/innodb/r/innodb_bug53756.result
index 06fa96c2f81..daa6113f8d4 100644
--- a/mysql-test/suite/innodb/r/innodb_bug53756.result
+++ b/mysql-test/suite/innodb/r/innodb_bug53756.result
@@ -77,6 +77,7 @@ pk c1
4 44
START TRANSACTION;
INSERT INTO bug_53756 VALUES (666,666);
+# restart
disconnect con1;
disconnect con2;
disconnect con3;
diff --git a/mysql-test/suite/innodb/r/innodb_bug57252.result b/mysql-test/suite/innodb/r/innodb_bug57252.result
index efa50c742e0..2e371cb74ee 100644
--- a/mysql-test/suite/innodb/r/innodb_bug57252.result
+++ b/mysql-test/suite/innodb/r/innodb_bug57252.result
@@ -1,6 +1,7 @@
cardinality
10
Table Op Msg_type Msg_text
+test.bug57252 analyze status Engine-independent statistics collected
test.bug57252 analyze status OK
cardinality
10
diff --git a/mysql-test/suite/innodb/r/innodb_bug59641.result b/mysql-test/suite/innodb/r/innodb_bug59641.result
index 8bf574e2bec..634edec71dd 100644
--- a/mysql-test/suite/innodb/r/innodb_bug59641.result
+++ b/mysql-test/suite/innodb/r/innodb_bug59641.result
@@ -17,6 +17,7 @@ UPDATE t SET b=4*a WHERE a=32;
XA END '789';
XA PREPARE '789';
CONNECT con3,localhost,root,,;
+# restart
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT * FROM t;
a b
@@ -29,6 +30,7 @@ a b
16 16
32 128
COMMIT;
+# restart
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT * FROM t;
a b
diff --git a/mysql-test/suite/innodb/r/innodb_bug60049.result b/mysql-test/suite/innodb/r/innodb_bug60049.result
index f34ebc0a955..47b02dedd01 100644
--- a/mysql-test/suite/innodb/r/innodb_bug60049.result
+++ b/mysql-test/suite/innodb/r/innodb_bug60049.result
@@ -6,3 +6,4 @@ SELECT @@innodb_fast_shutdown;
0
Last record of ID_IND root page (9):
18080000180500c0000000000000000c5359535f464f524549474e5f434f4c53
+# restart
diff --git a/mysql-test/suite/innodb/r/innodb_bug68148.result b/mysql-test/suite/innodb/r/innodb_bug68148.result
index 88247053389..ee6c1d62f53 100644
--- a/mysql-test/suite/innodb/r/innodb_bug68148.result
+++ b/mysql-test/suite/innodb/r/innodb_bug68148.result
@@ -19,6 +19,7 @@ main
ref_table1
ref_table2
# restart and see if we can still access the main table
+# restart
SET FOREIGN_KEY_CHECKS=0;
ALTER TABLE `main` ADD INDEX `idx_1` (`ref_id1`);
SHOW CREATE TABLE `main`;
diff --git a/mysql-test/suite/innodb/r/innodb_defrag_stats.result b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
index 7092688f07b..598124e4ccb 100644
--- a/mysql-test/suite/innodb/r/innodb_defrag_stats.result
+++ b/mysql-test/suite/innodb/r/innodb_defrag_stats.result
@@ -59,6 +59,7 @@ delete from t1 where a between 100 * 4 and 100 * 4 + 30;
delete from t1 where a between 100 * 3 and 100 * 3 + 30;
delete from t1 where a between 100 * 2 and 100 * 2 + 30;
delete from t1 where a between 100 * 1 and 100 * 1 + 30;
+# restart
# Server Restarted
# Confirm persistent stats still there after restart.
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
@@ -143,6 +144,7 @@ count(stat_value) > 0
select count(stat_value) > 0 from mysql.innodb_index_stats where table_name like '%t2%' and index_name = 'SECOND' and stat_name in ('n_leaf_pages_defrag');
count(stat_value) > 0
1
+# restart
Server Restarted
select count(stat_value) = 0 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_page_split');
count(stat_value) = 0
diff --git a/mysql-test/suite/innodb/r/innodb_defragment.result b/mysql-test/suite/innodb/r/innodb_defragment.result
index 989d3ef3397..533a39eec9d 100644
--- a/mysql-test/suite/innodb/r/innodb_defragment.result
+++ b/mysql-test/suite/innodb/r/innodb_defragment.result
@@ -40,6 +40,7 @@ disconnect con1;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
+# restart
select count(*) from t1;
count(*)
7904
@@ -68,6 +69,7 @@ SET @@global.innodb_defragment_n_pages = 3;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
+# restart
select count(stat_value) < 3 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) < 3
1
@@ -105,6 +107,7 @@ SET @@global.innodb_defragment_n_pages = 10;
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
+# restart
select count(stat_value) > 1 from mysql.innodb_index_stats where table_name like '%t1%' and stat_name in ('n_pages_freed');
count(stat_value) > 1
1
diff --git a/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result b/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result
index 5809542db8a..8453050a92a 100644
--- a/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result
+++ b/mysql-test/suite/innodb/r/innodb_defragment_fill_factor.result
@@ -12,6 +12,7 @@ INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
INSERT INTO t1 (b) SELECT b from t1;
SET GLOBAL innodb_fast_shutdown = 0;
+# restart
optimize table t1;
Table Op Msg_type Msg_text
test.t1 optimize status OK
@@ -57,6 +58,7 @@ DROP TABLE t1;
Testing table with small records
CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY AUTO_INCREMENT, b VARCHAR(16), c VARCHAR(32), KEY SECOND(a,b,c)) ENGINE=INNODB;
SET GLOBAL innodb_fast_shutdown = 0;
+# restart
optimize table t2;
Table Op Msg_type Msg_text
test.t2 optimize status OK
diff --git a/mysql-test/suite/innodb/r/innodb_force_recovery.result b/mysql-test/suite/innodb/r/innodb_force_recovery.result
index 0220f68a626..90b48bac187 100644
--- a/mysql-test/suite/innodb/r/innodb_force_recovery.result
+++ b/mysql-test/suite/innodb/r/innodb_force_recovery.result
@@ -4,6 +4,7 @@ insert into t1 values(1, 2);
insert into t2 values(1, 2);
SET GLOBAL innodb_fast_shutdown = 0;
# Restart the server with innodb_force_recovery as 4.
+# restart: --innodb-force-recovery=4
select * from t1;
f1 f2
1 2
@@ -32,6 +33,7 @@ show tables;
Tables_in_test
t2
# Restart the server with innodb_force_recovery as 5.
+# restart: --innodb-force-recovery=5
select * from t2;
f1 f2
1 2
@@ -59,6 +61,7 @@ show tables;
Tables_in_test
t2
# Restart the server with innodb_force_recovery as 6.
+# restart: --innodb-force-recovery=6
select * from t2;
f1 f2
1 2
@@ -86,6 +89,7 @@ show tables;
Tables_in_test
t2
# Restart the server with innodb_force_recovery=2
+# restart: --innodb-force-recovery=2
select * from t2;
f1 f2
1 2
@@ -100,6 +104,7 @@ disconnect con1;
connection default;
# Kill the server
# Restart the server with innodb_force_recovery=3
+# restart: --innodb-force-recovery=3
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
select * from t2;
f1 f2
@@ -112,6 +117,7 @@ SET GLOBAL innodb_lock_wait_timeout=1;
insert into t2 values(1,2);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
insert into t2 values(9,10);
+# restart
select * from t2;
f1 f2
1 2
diff --git a/mysql-test/suite/innodb/r/innodb_gis.result b/mysql-test/suite/innodb/r/innodb_gis.result
index f8b02bb163a..162219cc6ac 100644
--- a/mysql-test/suite/innodb/r/innodb_gis.result
+++ b/mysql-test/suite/innodb/r/innodb_gis.result
@@ -560,29 +560,29 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
1
-INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
-INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
+INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(3 4)'));
+INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(3 4)'));
EXPLAIN
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref p p 28 const # Using where
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
DROP TABLE t1, t2;
End of 5.0 tests
#
diff --git a/mysql-test/suite/innodb/r/innodb_max_recordsize_32k.result b/mysql-test/suite/innodb/r/innodb_max_recordsize_32k.result
index 117b7e4418e..c20c981653c 100644
--- a/mysql-test/suite/innodb/r/innodb_max_recordsize_32k.result
+++ b/mysql-test/suite/innodb/r/innodb_max_recordsize_32k.result
@@ -325,6 +325,8 @@ LENGTH(col)
FLUSH TABLE t;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze Warning Engine-independent statistics are not collected for column 'col'
test.t analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t';
@@ -345,6 +347,8 @@ LENGTH(col)
FLUSH TABLE t;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze Warning Engine-independent statistics are not collected for column 'col'
test.t analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t';
@@ -365,6 +369,8 @@ LENGTH(col)
FLUSH TABLE t;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze Warning Engine-independent statistics are not collected for column 'col'
test.t analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t';
diff --git a/mysql-test/suite/innodb/r/innodb_max_recordsize_64k.result b/mysql-test/suite/innodb/r/innodb_max_recordsize_64k.result
index de957023ff8..a74e7826729 100644
--- a/mysql-test/suite/innodb/r/innodb_max_recordsize_64k.result
+++ b/mysql-test/suite/innodb/r/innodb_max_recordsize_64k.result
@@ -528,6 +528,8 @@ LENGTH(col)
FLUSH TABLE t;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze Warning Engine-independent statistics are not collected for column 'col'
test.t analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t';
@@ -548,6 +550,8 @@ LENGTH(col)
FLUSH TABLE t;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze Warning Engine-independent statistics are not collected for column 'col'
test.t analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t';
@@ -568,6 +572,8 @@ LENGTH(col)
FLUSH TABLE t;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
+test.t analyze Warning Engine-independent statistics are not collected for column 'col'
test.t analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t';
diff --git a/mysql-test/suite/innodb/r/innodb_mysql.result b/mysql-test/suite/innodb/r/innodb_mysql.result
index 3663c18ea44..7a164da41b5 100644
--- a/mysql-test/suite/innodb/r/innodb_mysql.result
+++ b/mysql-test/suite/innodb/r/innodb_mysql.result
@@ -191,8 +191,8 @@ min(7)
7
explain select min(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2i ALL NULL NULL NULL NULL 1
-1 SIMPLE t1i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 SIMPLE t1i ALL NULL NULL NULL NULL 0
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
select min(7) from t2i join t1i;
min(7)
NULL
@@ -207,8 +207,8 @@ max(7)
7
explain select max(7) from t2i join t1i;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2i ALL NULL NULL NULL NULL 1
-1 SIMPLE t1i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
+1 SIMPLE t1i ALL NULL NULL NULL NULL 0
+1 SIMPLE t2i ALL NULL NULL NULL NULL 1 Using join buffer (flat, BNL join)
select max(7) from t2i join t1i;
max(7)
NULL
@@ -239,7 +239,7 @@ select 1, max(1) from t1i where 1=99;
explain select count(*), min(7), max(7) from t1m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1m system NULL NULL NULL NULL 0 Const row not found
-1 SIMPLE t1i ALL NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 0
select count(*), min(7), max(7) from t1m, t1i;
count(*) min(7) max(7)
0 NULL NULL
@@ -253,7 +253,7 @@ count(*) min(7) max(7)
explain select count(*), min(7), max(7) from t2m, t1i;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2m system NULL NULL NULL NULL 1
-1 SIMPLE t1i ALL NULL NULL NULL NULL 1
+1 SIMPLE t1i ALL NULL NULL NULL NULL 0
select count(*), min(7), max(7) from t2m, t1i;
count(*) min(7) max(7)
0 NULL NULL
@@ -303,6 +303,7 @@ create index idx12672_1 on t4 (a1,a2,b,c);
create index idx12672_2 on t4 (a1,a2,b);
analyze table t4;
Table Op Msg_type Msg_text
+test.t4 analyze status Engine-independent statistics collected
test.t4 analyze status OK
select distinct a1 from t4 where pk_col not in (1,2,3,4);
a1
@@ -384,7 +385,7 @@ INSERT INTO t1(id, dept, age, name) VALUES
(4020, 'cs10', 20, 'rs5'),(4027, 'cs11', 10, 'rs6'),(4028, 'cs12', 20, 'rs6');
EXPLAIN SELECT DISTINCT t1.name, t1.dept FROM t1 WHERE t1.name='rs5';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range name name 44 NULL 2 Using where; Using index for group-by
+1 SIMPLE t1 ref name name 22 const 2 Using where; Using index
SELECT DISTINCT t1.name, t1.dept FROM t1 WHERE t1.name='rs5';
name dept
rs5 cs10
@@ -2352,12 +2353,12 @@ EXPLAIN SELECT c FROM foo2 WHERE b>2;;
id 1
select_type SIMPLE
table foo2
-type range
+type index
possible_keys b
key b
key_len 5
ref NULL
-rows 5
+rows 6
Extra Using where; Using index
EXPLAIN SELECT c FROM bar WHERE c>2;;
id 1
@@ -2699,6 +2700,7 @@ INSERT INTO t1 SELECT c1+100000,c2+100000,c3+100000 from t1;
INSERT INTO t1 SELECT c1+1000000,c2+1000000,c3+1000000 from t1;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1 WHERE c1 = 99999999 AND c3 > 1 ORDER BY c3;
c1 c2 c3
@@ -2851,6 +2853,7 @@ INSERT INTO t1 VALUES (1,1,1,1,1,1), (2,2,2,2,2,2), (3,3,3,3,3,3),
(11,11,11,11,11,11);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
@@ -3017,7 +3020,7 @@ EXPLAIN SELECT * FROM t1 WHERE f1 IN
3784744,4180925,4559596,3963734,3856391,4494153)
AND f5 = 'abcdefghijklmnopwrst' AND f2 = 1221457 AND f4 = 0 ;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,idx1,idx2 idx2,idx1,PRIMARY 7,60,4 NULL 1 Using intersect(idx2,idx1,PRIMARY); Using where
+1 SIMPLE t1 range PRIMARY,idx1,idx2 PRIMARY 4 NULL 18 Using where
set optimizer_switch=@tmp_innodb_mysql;
DROP TABLE t1;
#
@@ -3050,7 +3053,7 @@ f1 f2 f3 f4
EXPLAIN SELECT * FROM t1 WHERE f2 = 1 AND f4 = TRUE
ORDER BY f1 DESC LIMIT 5;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range f2,f4 f4 1 NULL 22 Using where
+1 SIMPLE t1 index f2,f4 PRIMARY 4 NULL 5 Using where
DROP TABLE t1;
#
# Bug#54117 crash in thr_multi_unlock, temporary table
@@ -3126,7 +3129,7 @@ INSERT INTO t1 VALUES (0);
SET SQL_MODE='STRICT_ALL_TABLES';
CREATE TABLE t2
SELECT LEAST((SELECT '' FROM t1),NOW()) FROM `t1`;
-ERROR 22007: Incorrect datetime value: ''
+ERROR 22007: Truncated incorrect datetime value: ''
DROP TABLE t1;
SET SQL_MODE=DEFAULT;
#
diff --git a/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result b/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result
index a43649f4067..1e97c21c253 100644
--- a/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result
+++ b/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result
@@ -29,6 +29,7 @@ COUNT(*)
START TRANSACTION;
connection default;
SET GLOBAL innodb_fast_shutdown=3;
+# restart
SELECT COUNT(*) FROM worklog5743;
COUNT(*)
1
@@ -55,6 +56,7 @@ col_1_text = REPEAT("a", 3500) col_2_text = REPEAT("o", 3500)
1 1
connection default;
SET GLOBAL innodb_fast_shutdown=3;
+# restart
SELECT COUNT(*) FROM worklog5743;
COUNT(*)
1
@@ -79,6 +81,7 @@ col_1_text = REPEAT("b", 3500) col_2_text = REPEAT("o", 3500)
1 1
connection default;
SET GLOBAL innodb_fast_shutdown=3;
+# restart
SELECT COUNT(*) FROM worklog5743;
COUNT(*)
1
diff --git a/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result b/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result
index af0714148cf..6b69d2d9ee4 100644
--- a/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result
+++ b/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result
@@ -74,7 +74,6 @@ buffer_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NUL
buffer_index_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of index pages written (innodb_index_pages_written)
buffer_non_index_pages_written buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of non index pages written (innodb_non_index_pages_written)
buffer_pages_read buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of pages read (innodb_pages_read)
-buffer_pages0_read buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of page 0 read (innodb_pages0_read)
buffer_index_sec_rec_cluster_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of secondary record reads triggered cluster read
buffer_index_sec_rec_cluster_reads_avoided buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Number of secondary record reads avoided triggering cluster read
buffer_data_reads buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled status_counter Amount of data read in bytes (innodb_data_reads)
@@ -183,7 +182,6 @@ trx_nl_ro_commits transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL N
trx_commits_insert_update transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions committed with inserts and updates
trx_rollbacks transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions rolled back
trx_rollbacks_savepoint transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of transactions rolled back to savepoint
-trx_rollback_active transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of resurrected active transactions rolled back
trx_active_transactions transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of active transactions
trx_rseg_history_len transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled value Length of the TRX_RSEG_HISTORY list
trx_undo_slots_used transaction 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL disabled counter Number of undo slots used
@@ -387,7 +385,7 @@ SPACE NAME ENCRYPTION_SCHEME KEYSERVER_REQUESTS MIN_KEY_VERSION CURRENT_KEY_VERS
Warnings:
Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.innodb_tablespaces_encryption but the InnoDB storage engine is not installed
select * from information_schema.innodb_tablespaces_scrubbing;
-SPACE NAME COMPRESSED LAST_SCRUB_COMPLETED CURRENT_SCRUB_STARTED CURRENT_SCRUB_ACTIVE_THREADS CURRENT_SCRUB_PAGE_NUMBER CURRENT_SCRUB_MAX_PAGE_NUMBER
+SPACE NAME COMPRESSED LAST_SCRUB_COMPLETED CURRENT_SCRUB_STARTED CURRENT_SCRUB_ACTIVE_THREADS CURRENT_SCRUB_PAGE_NUMBER CURRENT_SCRUB_MAX_PAGE_NUMBER ON_SSD
Warnings:
Warning 1012 InnoDB: SELECTing from INFORMATION_SCHEMA.innodb_tablespaces_scrubbing but the InnoDB storage engine is not installed
select * from information_schema.innodb_mutexes;
diff --git a/mysql-test/suite/innodb/r/innodb_stats.result b/mysql-test/suite/innodb/r/innodb_stats.result
index d2c3bd0127e..eadce75318b 100644
--- a/mysql-test/suite/innodb/r/innodb_stats.result
+++ b/mysql-test/suite/innodb/r/innodb_stats.result
@@ -4,6 +4,7 @@ dummy INSERT, the table should be empty
dummy INSERT, the table should be empty
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -56,6 +57,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -108,6 +110,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (1);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -160,6 +163,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (1), (1);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -212,6 +216,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (1), (1), (1), (1), (1), (1), (1), (1), (1);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -264,6 +269,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (2);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -316,6 +322,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (1), (2);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -368,6 +375,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (2), (3);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -420,6 +428,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (1), (2), (3), (3);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
@@ -472,6 +481,7 @@ TRUNCATE TABLE test_innodb_stats;
INSERT INTO test_innodb_stats (a) VALUES (1), (2), (3), (4), (5), (1), (2), (3), (4), (5);
ANALYZE TABLE test_innodb_stats;
Table Op Msg_type Msg_text
+test.test_innodb_stats analyze status Engine-independent statistics collected
test.test_innodb_stats analyze status OK
SELECT
stat_name,
diff --git a/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result b/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result
index b9429046b36..66ef5ed2696 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_create_on_corrupted.result
@@ -17,6 +17,7 @@ avg_row_length 0
max_data_length 0
index_length 0
ALTER TABLE mysql.innodb_index_stats_ RENAME TO mysql.innodb_index_stats;
+# restart
SELECT seq_in_index, column_name, cardinality
FROM information_schema.statistics WHERE table_name = 'test_ps_create_on_corrupted'
ORDER BY index_name, seq_in_index;
diff --git a/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result b/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result
index b8f312ccd63..fc0a590934a 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_drop_locked.result
@@ -1,4 +1,5 @@
Table Op Msg_type Msg_text
+test.innodb_stats_drop_locked analyze status Engine-independent statistics collected
test.innodb_stats_drop_locked analyze status OK
SET autocommit=0;
SELECT table_name FROM mysql.innodb_table_stats
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch.result b/mysql-test/suite/innodb/r/innodb_stats_fetch.result
index b348e41f1d9..d7b7d78ec71 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch.result
@@ -1,3 +1,5 @@
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
CREATE TABLE test_ps_fetch
(a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), INDEX idx (c, d))
ENGINE=INNODB STATS_PERSISTENT=1;
@@ -5,6 +7,10 @@ ANALYZE TABLE test_ps_fetch;
Table test.test_ps_fetch
Op analyze
Msg_type status
+Msg_text Engine-independent statistics collected
+Table test.test_ps_fetch
+Op analyze
+Msg_type status
Msg_text OK
SELECT n_rows, clustered_index_size, sum_of_other_index_sizes
FROM mysql.innodb_table_stats WHERE table_name = 'test_ps_fetch';
@@ -143,3 +149,4 @@ avg_row_length 81
max_data_length 0
index_length 16384
DROP TABLE test_ps_fetch;
+set @@use_stat_tables = @save_use_stat_tables;
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result b/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result
index 0f0e941b838..a368f6271b9 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch_corrupted.result
@@ -7,6 +7,10 @@ ANALYZE TABLE test_ps_fetch_corrupted;
Table test.test_ps_fetch_corrupted
Op analyze
Msg_type status
+Msg_text Engine-independent statistics collected
+Table test.test_ps_fetch_corrupted
+Op analyze
+Msg_type status
Msg_text OK
SELECT n_rows, clustered_index_size, sum_of_other_index_sizes
FROM mysql.innodb_table_stats WHERE table_name = 'test_ps_fetch_corrupted';
@@ -29,3 +33,4 @@ max_data_length 0
index_length 0
ALTER TABLE mysql.innodb_index_stats_ RENAME TO mysql.innodb_index_stats;
DROP TABLE test_ps_fetch_corrupted;
+# restart
diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result b/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
index 6093fbae86b..91bb2bf3ecd 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_fetch_nonexistent.result
@@ -6,6 +6,10 @@ ANALYZE TABLE test_ps_fetch_nonexistent;
Table test.test_ps_fetch_nonexistent
Op analyze
Msg_type status
+Msg_text Engine-independent statistics collected
+Table test.test_ps_fetch_nonexistent
+Op analyze
+Msg_type status
Msg_text OK
SELECT COUNT(*)
FROM mysql.innodb_table_stats WHERE table_name = 'test_ps_fetch_nonexistent';
diff --git a/mysql-test/suite/innodb/r/innodb_stats_persistent.result b/mysql-test/suite/innodb/r/innodb_stats_persistent.result
index f4de4b6b82e..11a352e625d 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_persistent.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_persistent.result
@@ -19,6 +19,7 @@ COUNT(*)
16
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
connect con1, localhost, root,,;
START TRANSACTION;
@@ -29,7 +30,7 @@ connection default;
# DELETE must not affect statistics before COMMIT.
EXPLAIN SELECT * FROM t1 WHERE val=4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ref val val 4 const 16 Using index
+1 SIMPLE t1 ref val val 4 const 1 Using index
connection con1;
COUNT(*)
0
@@ -100,7 +101,7 @@ COUNT(*)
# ha_innobase::records_in_range() would count the delete-marked records.
EXPLAIN SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL val 4 NULL 1 Using index
+1 SIMPLE t1 index NULL val 4 NULL 16 Using index
ROLLBACK;
EXPLAIN SELECT * FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/suite/innodb/r/innodb_stats_persistent_debug.result b/mysql-test/suite/innodb/r/innodb_stats_persistent_debug.result
index 9f93f05fd56..ee1ad318d65 100644
--- a/mysql-test/suite/innodb/r/innodb_stats_persistent_debug.result
+++ b/mysql-test/suite/innodb/r/innodb_stats_persistent_debug.result
@@ -17,6 +17,7 @@ SET GLOBAL innodb_limit_optimistic_insert_debug = @save_debug;
connect con1, localhost, root,,;
ANALYZE TABLE t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
disconnect con1;
connection default;
diff --git a/mysql-test/suite/innodb/r/instant_alter.result b/mysql-test/suite/innodb/r/instant_alter.result
index 3ebc161caf1..30eddd51bfb 100644
--- a/mysql-test/suite/innodb/r/instant_alter.result
+++ b/mysql-test/suite/innodb/r/instant_alter.result
@@ -182,8 +182,8 @@ affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
INSERT INTO t1 SET id=9;
ALTER TABLE t1 DROP c3;
-affected rows: 9
-info: Records: 9 Duplicates: 0 Warnings: 0
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -236,6 +236,7 @@ UPDATE t2 SET c1 = repeat(id, 4000);
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -246,6 +247,7 @@ ROLLBACK;
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -257,6 +259,7 @@ UPDATE t2 SET d1 = repeat(id, 200);
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -267,6 +270,7 @@ ROLLBACK;
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -386,6 +390,7 @@ COMMIT;
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -409,6 +414,7 @@ test.big 385477733
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -422,6 +428,7 @@ test.big 1705165209
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -486,6 +493,391 @@ DELETE FROM t1;
COMMIT;
InnoDB 0 transactions not purged
DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT UNIQUE) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 (a) VALUES (NULL), (NULL);
+ALTER TABLE t1 DROP a, ADD COLUMN a INT;
+DELETE FROM t1;
+BEGIN;
+INSERT INTO t1 SET a=NULL;
+ROLLBACK;
+DELETE FROM t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, t VARCHAR(33101) NOT NULL) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES(347,'');
+ALTER TABLE t1 DROP COLUMN t, ALGORITHM=INSTANT;
+SELECT * FROM t1;
+a
+347
+DROP TABLE t1;
+CREATE TABLE t1 (a INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD COLUMN b INT FIRST, ADD COLUMN c INT AFTER b;
+SELECT * FROM t1;
+b c a
+NULL NULL NULL
+DROP TABLE t1;
+CREATE TABLE t1 (t TEXT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+SET @t=REPEAT('x',@@innodb_page_size / 2);
+INSERT INTO t1 VALUES (@t),(@t),(@t),(@t),(@t),(@t),(NULL),(@t),(@t),(@t),(@t);
+ALTER TABLE t1 ADD COLUMN a INT FIRST;
+UPDATE t1 SET a = 0;
+DROP TABLE t1;
+CREATE TABLE t1 (t TEXT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 SET t = @x;
+ALTER TABLE t1 DROP COLUMN t, ADD COLUMN i INT NOT NULL DEFAULT 1;
+ALTER TABLE t1 ADD COLUMN t TEXT;
+SELECT * FROM t1;
+i t
+1 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, KEY(a)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 SET a=NULL;
+ALTER TABLE t1 DROP COLUMN b;
+ALTER TABLE t1 ADD COLUMN c INT NOT NULL DEFAULT 42;
+INSERT INTO t1 SET a=NULL;
+UPDATE t1 SET a=a+2;
+SELECT * FROM t1;
+a c
+3 42
+4 42
+DROP TABLE t1;
+CREATE TABLE t1 (i INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 SET i=1;
+ALTER TABLE t1 ADD COLUMN b BIT FIRST;
+ALTER TABLE t1 ADD COLUMN v INT AS (i) VIRTUAL;
+SELECT * FROM t1;
+b i v
+NULL 1 1
+DROP TABLE t1;
+CREATE TABLE t1 (ts TIMESTAMP) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 ADD COLUMN f VARCHAR(8), ADD COLUMN dt DATETIME;
+ALTER TABLE t1 ADD COLUMN b BIT, DROP COLUMN f, ADD COLUMN t TIME FIRST;
+ALTER TABLE t1 ADD COLUMN ts2 TIMESTAMP;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (4,4,4);
+ALTER TABLE t1 DROP f1, DROP f2, ADD f4 INT, ADD f5 INT;
+DELETE FROM t1;
+ALTER TABLE t1 DROP COLUMN f4;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP f2, ADD COLUMN f4 INT;
+ALTER TABLE t1 DROP f4;
+ALTER TABLE t1 DROP f1;
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 () VALUES (),();
+SELECT * FROM t1;
+f
+NULL
+NULL
+ALTER TABLE t1 ADD COLUMN id INT NOT NULL AUTO_INCREMENT FIRST, ADD KEY(id);
+SELECT * FROM t1;
+id f
+1 NULL
+2 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 SET f=NULL;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 SET f=NULL;
+SELECT * FROM t1;
+f
+NULL
+NULL
+DROP TABLE t1;
+CREATE TABLE t1(f INT, k INT NOT NULL AUTO_INCREMENT, KEY(k)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 VALUES (1);
+DROP TABLE t1;
+CREATE TABLE t1(pk INT PRIMARY KEY, f INT, k INT AUTO_INCREMENT, KEY(k))
+ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 (pk) VALUES (1);
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY,
+f1 INT,
+f2 CHAR(255),
+f3 BIGINT,
+f4 INT,
+f5 CHAR(255),
+f6 CHAR(255),
+f7 CHAR(255) NOT NULL,
+f8 INT,
+f9 CHAR(10)
+) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES
+(1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a'),
+(2, 2, 'b', 2, 2, 'b', 'b', 'b', 2, 'b'),
+(3, 3, 'c', 3, 3, 'c', 'c', 'c', 3, 'c'),
+(4, 4, 'd', 4, 4, 'd', 'd', 'd', 4, 'd'),
+(5, 5, 'e', 5, 5, 'e', 'e', 'e', 5, 'e'),
+(6, 6, 'f', 6, 6, 'f', 'f', 'f', 6, 'f'),
+(7, 7, 'g', 7, 7, 'g', 'g', 'g', 7, 'g'),
+(8, 8, 'h', 8, 8, 'h', 'h', 'h', 8, 'h'),
+(9, 9, 'i', 9, 9, 'i', 'i', 'i', 9, 'i'),
+(10, 0, 'j', 0, 0, 'j', 'j', 'j', 0, 'j'),
+(11, 1, 'k', 1, 1, 'k', 'k', 'k', 1, 'k'),
+(12, 2, 'l', 2, 2, 'l', 'l', 'l', 2, 'l'),
+(13, 3, 'm', 3, 3, 'm', 'm', 'm', 3, 'm'),
+(14, 4, 'n', 4, 4, 'n', 'n', 'n', 4, 'n'),
+(15, 5, 'o', 5, 5, 'o', 'o', 'o', 5, 'o');
+DELETE FROM t1 WHERE pk=1;
+InnoDB 0 transactions not purged
+INSERT INTO t1 VALUES
+(1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a');
+ALTER TABLE t1 DROP COLUMN f1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY,
+f1 INT, f2 CHAR(32) NOT NULL,
+f3 INT NOT NULL, f4 INT NOT NULL, f5 INT, f6 CHAR(32) NOT NULL,
+f7 CHAR(32), f8 CHAR(32)
+) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES
+(1,9,'',2,88,88,'','',''),(2,48,'',8,68,92,'','',''),
+(3,41,'',56,84,37,'','',''),(4,NULL,'',6,6,NULL,'','',''),
+(5,52,'',37,44,20,'','',''),(6,44,'',53,4,NULL,'','',''),
+(7,24,'',54,8,54,'','',''),(8,80,'',3,52,20,'','',''),
+(9,71,'',34,32,NULL,'','',''),(10,14,'',6,64,88,'','',''),
+(11,48,'',8,25,42,'','',''),(12,16,'',8,7,NULL,'','',''),
+(13,NULL,'',22,0,95,'','',''),(14,4,'',72,48,NULL,'','',''),
+(15,4,'',5,64,2,'','',''),(16,NULL,'',9,40,30,'','',''),
+(17,92,'',48,2,NULL,'','',''),(18,36,'',48,51,7,'','',''),
+(19,NULL,'',80,96,NULL,'','',''),(20,96,'',9,80,NULL,'','',''),
+(21,50,'',16,40,NULL,'','',''),(22,NULL,'',7,84,8,'','',''),
+(23,28,'',93,80,NULL,'','',''),(24,31,'',40,38,NULL,'','',''),
+(25,85,'',8,5,88,'','',''),(26,66,'',8,32,4,'','',''),
+(51,52,'',6,92,15,'','',''),(52,77,'',24,24,28,'','',''),
+(53,8,'',75,31,NULL,'','',''),(54,48,'',5,8,1,'','',''),
+(55,90,'',56,12,5,'','',''),(56,92,'',4,9,88,'','',''),
+(57,83,'',23,40,72,'','',''),(58,7,'',4,40,32,'','',''),
+(59,28,'',2,3,32,'','',''),(60,16,'',80,4,NULL,'','',''),
+(61,44,'',88,24,NULL,'','',''),(62,4,'',5,25,3,'','',''),
+(63,NULL,'',7,24,76,'','',''),(64,0,'',13,40,73,'','',''),
+(101,NULL,'',1,49,75,'','',''),(102,34,'',10,17,20,'','',''),
+(103,8,'',2,2,NULL,'','',''),(104,12,'',44,48,52,'','',''),
+(105,8,'',4,19,38,'','',''),(106,20,'',6,80,9,'','',''),
+(107,72,'',72,16,56,'','',''),(108,76,'',98,24,21,'','',''),
+(109,67,'',16,91,NULL,'','',''),(110,72,'',72,3,48,'','',''),
+(151,8,'',3,86,NULL,'','',''),(152,NULL,'',52,72,0,'','',''),
+(153,NULL,'',46,30,92,'','',''),(154,80,'',1,40,48,'','',''),
+(155,24,'',68,68,8,'','',''),(156,85,'',85,72,60,'','',''),
+(157,7,'',7,12,6,'','',''),(158,NULL,'',48,48,80,'','',''),
+(159,12,'',0,36,0,'','',''),(160,2,'',6,52,NULL,'','',''),
+(201,0,'',1,3,NULL,'','',''),(202,NULL,'',3,53,14,'','',''),
+(203,84,'',6,20,NULL,'','',''),(204,38,'',25,13,88,'','',''),
+(205,1,'',2,69,5,'','',''),(206,7,'',60,22,NULL,'','',''),
+(207,NULL,'',5,4,NULL,'','',''),(251,7,'',0,4,40,'','',''),
+(252,4,'',16,8,NULL,'','',''),(253,14,'',60,12,99,'','',''),
+(254,84,'',68,16,5,'','',''),(255,3,'',70,36,61,'','',''),
+(256,7,'',18,48,NULL,'','',''),(257,NULL,'',68,53,NULL,'','',''),
+(258,29,'',52,16,64,'','',''),(259,NULL,'',80,92,40,'','',''),
+(301,68,'',1,48,48,'','',''),(302,2,'',1,1,32,'','',''),
+(303,44,'',60,96,16,'','',''),(304,32,'',52,64,32,'','',''),
+(305,88,'',37,72,NULL,'','',''),(306,5,'',35,60,20,'','',''),
+(307,35,'',4,48,NULL,'','',''),(308,4,'',92,44,80,'','',''),
+(351,48,'',60,4,40,'','',''),(352,7,'',9,61,13,'','',''),
+(353,0,'',5,93,53,'','',''),(354,7,'',1,20,NULL,'','',''),
+(355,84,'',5,48,96,'','',''),(356,NULL,'',39,92,36,'','',''),
+(357,88,'',9,76,44,'','',''),(358,66,'',34,67,80,'','',''),
+(359,8,'',8,52,NULL,'','',''),(360,3,'',53,83,NULL,'','',''),
+(361,23,'',44,9,48,'','',''),(362,4,'',0,54,48,'','',''),
+(363,75,'',66,76,52,'','','');
+ALTER TABLE t1 ADD COLUMN x VARCHAR(255) DEFAULT ' foobar ';
+UPDATE t1 SET f1 = 0;
+ALTER TABLE t1 DROP COLUMN x;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 VARCHAR(1), f2 VARCHAR(2)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 MODIFY f2 VARCHAR (8) FIRST;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT UNIQUE, b INT UNIQUE, PRIMARY KEY(a,b)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP PRIMARY KEY;
+ALTER TABLE t1 CHANGE COLUMN a a INT;
+DELETE FROM t1 WHERE a = NULL OR a IS NULL;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, c INT NOT NULL, d INT,
+e INT, f INT, g INT, h INT, j INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 MODIFY COLUMN c INT, MODIFY COLUMN a INT AFTER b;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL, b INT NOT NULL) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (0,0);
+ALTER TABLE t1 MODIFY a INT AFTER b;
+ALTER TABLE t1 ADD COLUMN v INT AS (a) VIRTUAL;
+ALTER TABLE t1 MODIFY b INT NOT NULL AFTER a;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (1);
+ALTER TABLE t1 ADD COLUMN b INT;
+ALTER TABLE t1 MODIFY COLUMN a INT NULL;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT, d INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, b BLOB NOT NULL) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (1,10,REPEAT('foobar',2000));
+ALTER TABLE t1 DROP COLUMN b;
+INSERT INTO t1 VALUES (2,20);
+ALTER TABLE t1 ADD COLUMN vpk INT AS (pk);
+ALTER TABLE t1 DROP COLUMN i;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD f DATE AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD COLUMN f INT AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+CREATE TABLE t1(t TEXT NOT NULL, FULLTEXT(t)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 MODIFY COLUMN t TEXT;
+DROP TABLE t1;
+CREATE TABLE t1 (f TINYINT, g SMALLINT UNSIGNED) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES(127,6502),(-128,33101);
+ALTER TABLE t1 MODIFY f SMALLINT DEFAULT 12345,
+MODIFY g BIGINT UNSIGNED DEFAULT 1234567;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+SELECT * FROM t1;
+f g
+127 6502
+-128 33101
+DROP TABLE t1;
+CREATE TABLE t1 (f BIT(8)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES (b'10000000'),(b'00000001');
+ALTER TABLE t1 MODIFY f BIT(16);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+INSERT INTO t1 VALUES (b'1000000010101111'),(b'10000000');
+SELECT HEX(f) FROM t1;
+HEX(f)
+80
+1
+80AF
+80
+ALTER TABLE t1 MODIFY f SMALLINT;
+ERROR 22003: Out of range value for column 'f' at row 3
+ALTER TABLE t1 MODIFY f SMALLINT UNSIGNED;
+affected rows: 4
+info: Records: 4 Duplicates: 0 Warnings: 0
+SELECT * FROM t1;
+f
+128
+1
+32943
+128
+ALTER TABLE t1 MODIFY f BIT;
+ERROR 22001: Data too long for column 'f' at row 1
+ALTER TABLE t1 MODIFY f BIT(15);
+ERROR 22001: Data too long for column 'f' at row 3
+DELETE FROM t1 LIMIT 3;
+ALTER TABLE t1 MODIFY f BIT(15);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 MODIFY f BIT(8);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT HEX(f) FROM t1;
+HEX(f)
+80
+DROP TABLE t1;
+CREATE TABLE t1 (b BIT NOT NULL) ENGINE=InnoDB ROW_FORMAT=REDUNDANT DEFAULT CHARSET utf16;
+INSERT INTO t1 SET b=b'1';
+ALTER TABLE t1 CHANGE b c BIT NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+CREATE TABLE t1 (c VARCHAR(10) NOT NULL DEFAULT 'scary') ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD f TINYINT NOT NULL DEFAULT -42;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 MODIFY f MEDIUMINT NOT NULL DEFAULT 64802,
+MODIFY c VARCHAR(20) NOT NULL DEFAULT 'gory',
+ADD d DATETIME;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+INSERT INTO t1() VALUES();
+INSERT INTO t1 (c,f,d) VALUES ('fury', -8388608, now());
+SELECT * FROM t1;
+c f d
+scary -42 NULL
+gory 64802 NULL
+fury -8388608 1970-01-01 03:00:42
+DROP TABLE t1;
+CREATE TABLE t1 (t TINYINT PRIMARY KEY, m MEDIUMINT UNIQUE) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+SELECT table_id INTO @table_id1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+INSERT INTO t1 VALUES (-42, -123456);
+ALTER TABLE t1 CHANGE t s SMALLINT;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT table_id INTO @table_id2 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+affected rows: 1
+ALTER TABLE t1 CHANGE m i INT, ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+ALTER TABLE t1 CHANGE m i INT;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT table_id INTO @table_id3 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+affected rows: 1
+SELECT @table_id1 = @table_id2, @table_id2 = @table_id3;
+@table_id1 = @table_id2 @table_id2 = @table_id3
+0 0
+INSERT IGNORE INTO t1 VALUES (0, -123456);
+Warnings:
+Warning 1062 Duplicate entry '-123456' for key 'm'
+REPLACE INTO t1 VALUES(-42, 123456);
+INSERT IGNORE INTO t1 VALUES(32768, 2147483648);
+Warnings:
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 'i' at row 1
+SELECT * FROM t1;
+s i
+-42 123456
+32767 2147483647
+DROP TABLE t1;
+CREATE TABLE t1 (a SERIAL, b INT, c TINYINT UNIQUE) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 (c) VALUES(1),(2),(3);
+ALTER TABLE t1 MODIFY c BIGINT;
+affected rows: 3
+info: Records: 3 Duplicates: 0 Warnings: 0
+UPDATE t1 SET b=1 WHERE c=2;
+UPDATE t1 SET c=4 WHERE a=3;
+UPDATE t1 SET b=2 WHERE c>3;
+UPDATE t1 SET c=c+1;
+ERROR 23000: Duplicate entry '2' for key 'c'
+SELECT * FROM t1;
+a b c
+1 NULL 1
+2 1 2
+3 2 4
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(1)) ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+INSERT INTO t1 VALUES(1,'a');
+ALTER TABLE t1 MODIFY b VARCHAR(256), ADD COLUMN c INT;
+INSERT INTO t1 VALUES(2,'bah',3);
+SELECT * FROM t1;
+a b c
+1 a NULL
+2 bah 3
+DROP TABLE t1;
CREATE TABLE t1
(id INT PRIMARY KEY, c2 INT UNIQUE,
c3 POINT NOT NULL DEFAULT ST_GeomFromText('POINT(3 4)'),
@@ -614,8 +1006,8 @@ affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
INSERT INTO t1 SET id=9;
ALTER TABLE t1 DROP c3;
-affected rows: 9
-info: Records: 9 Duplicates: 0 Warnings: 0
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -668,6 +1060,7 @@ UPDATE t2 SET c1 = repeat(id, 4000);
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -678,6 +1071,7 @@ ROLLBACK;
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -689,6 +1083,7 @@ UPDATE t2 SET d1 = repeat(id, 200);
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -699,6 +1094,7 @@ ROLLBACK;
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -818,6 +1214,7 @@ COMMIT;
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -841,6 +1238,7 @@ test.big 385477733
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -854,6 +1252,7 @@ test.big 1705165209
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -918,6 +1317,391 @@ DELETE FROM t1;
COMMIT;
InnoDB 0 transactions not purged
DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT UNIQUE) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 (a) VALUES (NULL), (NULL);
+ALTER TABLE t1 DROP a, ADD COLUMN a INT;
+DELETE FROM t1;
+BEGIN;
+INSERT INTO t1 SET a=NULL;
+ROLLBACK;
+DELETE FROM t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, t VARCHAR(33101) NOT NULL) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES(347,'');
+ALTER TABLE t1 DROP COLUMN t, ALGORITHM=INSTANT;
+SELECT * FROM t1;
+a
+347
+DROP TABLE t1;
+CREATE TABLE t1 (a INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD COLUMN b INT FIRST, ADD COLUMN c INT AFTER b;
+SELECT * FROM t1;
+b c a
+NULL NULL NULL
+DROP TABLE t1;
+CREATE TABLE t1 (t TEXT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+SET @t=REPEAT('x',@@innodb_page_size / 2);
+INSERT INTO t1 VALUES (@t),(@t),(@t),(@t),(@t),(@t),(NULL),(@t),(@t),(@t),(@t);
+ALTER TABLE t1 ADD COLUMN a INT FIRST;
+UPDATE t1 SET a = 0;
+DROP TABLE t1;
+CREATE TABLE t1 (t TEXT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 SET t = @x;
+ALTER TABLE t1 DROP COLUMN t, ADD COLUMN i INT NOT NULL DEFAULT 1;
+ALTER TABLE t1 ADD COLUMN t TEXT;
+SELECT * FROM t1;
+i t
+1 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, KEY(a)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 SET a=NULL;
+ALTER TABLE t1 DROP COLUMN b;
+ALTER TABLE t1 ADD COLUMN c INT NOT NULL DEFAULT 42;
+INSERT INTO t1 SET a=NULL;
+UPDATE t1 SET a=a+2;
+SELECT * FROM t1;
+a c
+3 42
+4 42
+DROP TABLE t1;
+CREATE TABLE t1 (i INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 SET i=1;
+ALTER TABLE t1 ADD COLUMN b BIT FIRST;
+ALTER TABLE t1 ADD COLUMN v INT AS (i) VIRTUAL;
+SELECT * FROM t1;
+b i v
+NULL 1 1
+DROP TABLE t1;
+CREATE TABLE t1 (ts TIMESTAMP) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 ADD COLUMN f VARCHAR(8), ADD COLUMN dt DATETIME;
+ALTER TABLE t1 ADD COLUMN b BIT, DROP COLUMN f, ADD COLUMN t TIME FIRST;
+ALTER TABLE t1 ADD COLUMN ts2 TIMESTAMP;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (4,4,4);
+ALTER TABLE t1 DROP f1, DROP f2, ADD f4 INT, ADD f5 INT;
+DELETE FROM t1;
+ALTER TABLE t1 DROP COLUMN f4;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 DROP f2, ADD COLUMN f4 INT;
+ALTER TABLE t1 DROP f4;
+ALTER TABLE t1 DROP f1;
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 () VALUES (),();
+SELECT * FROM t1;
+f
+NULL
+NULL
+ALTER TABLE t1 ADD COLUMN id INT NOT NULL AUTO_INCREMENT FIRST, ADD KEY(id);
+SELECT * FROM t1;
+id f
+1 NULL
+2 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 SET f=NULL;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 SET f=NULL;
+SELECT * FROM t1;
+f
+NULL
+NULL
+DROP TABLE t1;
+CREATE TABLE t1(f INT, k INT NOT NULL AUTO_INCREMENT, KEY(k)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 VALUES (1);
+DROP TABLE t1;
+CREATE TABLE t1(pk INT PRIMARY KEY, f INT, k INT AUTO_INCREMENT, KEY(k))
+ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 (pk) VALUES (1);
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY,
+f1 INT,
+f2 CHAR(255),
+f3 BIGINT,
+f4 INT,
+f5 CHAR(255),
+f6 CHAR(255),
+f7 CHAR(255) NOT NULL,
+f8 INT,
+f9 CHAR(10)
+) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES
+(1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a'),
+(2, 2, 'b', 2, 2, 'b', 'b', 'b', 2, 'b'),
+(3, 3, 'c', 3, 3, 'c', 'c', 'c', 3, 'c'),
+(4, 4, 'd', 4, 4, 'd', 'd', 'd', 4, 'd'),
+(5, 5, 'e', 5, 5, 'e', 'e', 'e', 5, 'e'),
+(6, 6, 'f', 6, 6, 'f', 'f', 'f', 6, 'f'),
+(7, 7, 'g', 7, 7, 'g', 'g', 'g', 7, 'g'),
+(8, 8, 'h', 8, 8, 'h', 'h', 'h', 8, 'h'),
+(9, 9, 'i', 9, 9, 'i', 'i', 'i', 9, 'i'),
+(10, 0, 'j', 0, 0, 'j', 'j', 'j', 0, 'j'),
+(11, 1, 'k', 1, 1, 'k', 'k', 'k', 1, 'k'),
+(12, 2, 'l', 2, 2, 'l', 'l', 'l', 2, 'l'),
+(13, 3, 'm', 3, 3, 'm', 'm', 'm', 3, 'm'),
+(14, 4, 'n', 4, 4, 'n', 'n', 'n', 4, 'n'),
+(15, 5, 'o', 5, 5, 'o', 'o', 'o', 5, 'o');
+DELETE FROM t1 WHERE pk=1;
+InnoDB 0 transactions not purged
+INSERT INTO t1 VALUES
+(1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a');
+ALTER TABLE t1 DROP COLUMN f1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY,
+f1 INT, f2 CHAR(32) NOT NULL,
+f3 INT NOT NULL, f4 INT NOT NULL, f5 INT, f6 CHAR(32) NOT NULL,
+f7 CHAR(32), f8 CHAR(32)
+) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES
+(1,9,'',2,88,88,'','',''),(2,48,'',8,68,92,'','',''),
+(3,41,'',56,84,37,'','',''),(4,NULL,'',6,6,NULL,'','',''),
+(5,52,'',37,44,20,'','',''),(6,44,'',53,4,NULL,'','',''),
+(7,24,'',54,8,54,'','',''),(8,80,'',3,52,20,'','',''),
+(9,71,'',34,32,NULL,'','',''),(10,14,'',6,64,88,'','',''),
+(11,48,'',8,25,42,'','',''),(12,16,'',8,7,NULL,'','',''),
+(13,NULL,'',22,0,95,'','',''),(14,4,'',72,48,NULL,'','',''),
+(15,4,'',5,64,2,'','',''),(16,NULL,'',9,40,30,'','',''),
+(17,92,'',48,2,NULL,'','',''),(18,36,'',48,51,7,'','',''),
+(19,NULL,'',80,96,NULL,'','',''),(20,96,'',9,80,NULL,'','',''),
+(21,50,'',16,40,NULL,'','',''),(22,NULL,'',7,84,8,'','',''),
+(23,28,'',93,80,NULL,'','',''),(24,31,'',40,38,NULL,'','',''),
+(25,85,'',8,5,88,'','',''),(26,66,'',8,32,4,'','',''),
+(51,52,'',6,92,15,'','',''),(52,77,'',24,24,28,'','',''),
+(53,8,'',75,31,NULL,'','',''),(54,48,'',5,8,1,'','',''),
+(55,90,'',56,12,5,'','',''),(56,92,'',4,9,88,'','',''),
+(57,83,'',23,40,72,'','',''),(58,7,'',4,40,32,'','',''),
+(59,28,'',2,3,32,'','',''),(60,16,'',80,4,NULL,'','',''),
+(61,44,'',88,24,NULL,'','',''),(62,4,'',5,25,3,'','',''),
+(63,NULL,'',7,24,76,'','',''),(64,0,'',13,40,73,'','',''),
+(101,NULL,'',1,49,75,'','',''),(102,34,'',10,17,20,'','',''),
+(103,8,'',2,2,NULL,'','',''),(104,12,'',44,48,52,'','',''),
+(105,8,'',4,19,38,'','',''),(106,20,'',6,80,9,'','',''),
+(107,72,'',72,16,56,'','',''),(108,76,'',98,24,21,'','',''),
+(109,67,'',16,91,NULL,'','',''),(110,72,'',72,3,48,'','',''),
+(151,8,'',3,86,NULL,'','',''),(152,NULL,'',52,72,0,'','',''),
+(153,NULL,'',46,30,92,'','',''),(154,80,'',1,40,48,'','',''),
+(155,24,'',68,68,8,'','',''),(156,85,'',85,72,60,'','',''),
+(157,7,'',7,12,6,'','',''),(158,NULL,'',48,48,80,'','',''),
+(159,12,'',0,36,0,'','',''),(160,2,'',6,52,NULL,'','',''),
+(201,0,'',1,3,NULL,'','',''),(202,NULL,'',3,53,14,'','',''),
+(203,84,'',6,20,NULL,'','',''),(204,38,'',25,13,88,'','',''),
+(205,1,'',2,69,5,'','',''),(206,7,'',60,22,NULL,'','',''),
+(207,NULL,'',5,4,NULL,'','',''),(251,7,'',0,4,40,'','',''),
+(252,4,'',16,8,NULL,'','',''),(253,14,'',60,12,99,'','',''),
+(254,84,'',68,16,5,'','',''),(255,3,'',70,36,61,'','',''),
+(256,7,'',18,48,NULL,'','',''),(257,NULL,'',68,53,NULL,'','',''),
+(258,29,'',52,16,64,'','',''),(259,NULL,'',80,92,40,'','',''),
+(301,68,'',1,48,48,'','',''),(302,2,'',1,1,32,'','',''),
+(303,44,'',60,96,16,'','',''),(304,32,'',52,64,32,'','',''),
+(305,88,'',37,72,NULL,'','',''),(306,5,'',35,60,20,'','',''),
+(307,35,'',4,48,NULL,'','',''),(308,4,'',92,44,80,'','',''),
+(351,48,'',60,4,40,'','',''),(352,7,'',9,61,13,'','',''),
+(353,0,'',5,93,53,'','',''),(354,7,'',1,20,NULL,'','',''),
+(355,84,'',5,48,96,'','',''),(356,NULL,'',39,92,36,'','',''),
+(357,88,'',9,76,44,'','',''),(358,66,'',34,67,80,'','',''),
+(359,8,'',8,52,NULL,'','',''),(360,3,'',53,83,NULL,'','',''),
+(361,23,'',44,9,48,'','',''),(362,4,'',0,54,48,'','',''),
+(363,75,'',66,76,52,'','','');
+ALTER TABLE t1 ADD COLUMN x VARCHAR(255) DEFAULT ' foobar ';
+UPDATE t1 SET f1 = 0;
+ALTER TABLE t1 DROP COLUMN x;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 VARCHAR(1), f2 VARCHAR(2)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 MODIFY f2 VARCHAR (8) FIRST;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT UNIQUE, b INT UNIQUE, PRIMARY KEY(a,b)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 DROP PRIMARY KEY;
+ALTER TABLE t1 CHANGE COLUMN a a INT;
+DELETE FROM t1 WHERE a = NULL OR a IS NULL;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, c INT NOT NULL, d INT,
+e INT, f INT, g INT, h INT, j INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 MODIFY COLUMN c INT, MODIFY COLUMN a INT AFTER b;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL, b INT NOT NULL) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (0,0);
+ALTER TABLE t1 MODIFY a INT AFTER b;
+ALTER TABLE t1 ADD COLUMN v INT AS (a) VIRTUAL;
+ALTER TABLE t1 MODIFY b INT NOT NULL AFTER a;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (1);
+ALTER TABLE t1 ADD COLUMN b INT;
+ALTER TABLE t1 MODIFY COLUMN a INT NULL;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT, d INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, b BLOB NOT NULL) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (1,10,REPEAT('foobar',2000));
+ALTER TABLE t1 DROP COLUMN b;
+INSERT INTO t1 VALUES (2,20);
+ALTER TABLE t1 ADD COLUMN vpk INT AS (pk);
+ALTER TABLE t1 DROP COLUMN i;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD f DATE AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD COLUMN f INT AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+CREATE TABLE t1(t TEXT NOT NULL, FULLTEXT(t)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+ALTER TABLE t1 MODIFY COLUMN t TEXT;
+DROP TABLE t1;
+CREATE TABLE t1 (f TINYINT, g SMALLINT UNSIGNED) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES(127,6502),(-128,33101);
+ALTER TABLE t1 MODIFY f SMALLINT DEFAULT 12345,
+MODIFY g BIGINT UNSIGNED DEFAULT 1234567;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+SELECT * FROM t1;
+f g
+127 6502
+-128 33101
+DROP TABLE t1;
+CREATE TABLE t1 (f BIT(8)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES (b'10000000'),(b'00000001');
+ALTER TABLE t1 MODIFY f BIT(16);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+INSERT INTO t1 VALUES (b'1000000010101111'),(b'10000000');
+SELECT HEX(f) FROM t1;
+HEX(f)
+80
+1
+80AF
+80
+ALTER TABLE t1 MODIFY f SMALLINT;
+ERROR 22003: Out of range value for column 'f' at row 3
+ALTER TABLE t1 MODIFY f SMALLINT UNSIGNED;
+affected rows: 4
+info: Records: 4 Duplicates: 0 Warnings: 0
+SELECT * FROM t1;
+f
+128
+1
+32943
+128
+ALTER TABLE t1 MODIFY f BIT;
+ERROR 22001: Data too long for column 'f' at row 1
+ALTER TABLE t1 MODIFY f BIT(15);
+ERROR 22001: Data too long for column 'f' at row 3
+DELETE FROM t1 LIMIT 3;
+ALTER TABLE t1 MODIFY f BIT(15);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 MODIFY f BIT(8);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT HEX(f) FROM t1;
+HEX(f)
+80
+DROP TABLE t1;
+CREATE TABLE t1 (b BIT NOT NULL) ENGINE=InnoDB ROW_FORMAT=COMPACT DEFAULT CHARSET utf16;
+INSERT INTO t1 SET b=b'1';
+ALTER TABLE t1 CHANGE b c BIT NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+CREATE TABLE t1 (c VARCHAR(10) NOT NULL DEFAULT 'scary') ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD f TINYINT NOT NULL DEFAULT -42;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 MODIFY f MEDIUMINT NOT NULL DEFAULT 64802,
+MODIFY c VARCHAR(20) NOT NULL DEFAULT 'gory',
+ADD d DATETIME;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+INSERT INTO t1() VALUES();
+INSERT INTO t1 (c,f,d) VALUES ('fury', -8388608, now());
+SELECT * FROM t1;
+c f d
+scary -42 NULL
+gory 64802 NULL
+fury -8388608 1970-01-01 03:00:42
+DROP TABLE t1;
+CREATE TABLE t1 (t TINYINT PRIMARY KEY, m MEDIUMINT UNIQUE) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+SELECT table_id INTO @table_id1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+INSERT INTO t1 VALUES (-42, -123456);
+ALTER TABLE t1 CHANGE t s SMALLINT;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT table_id INTO @table_id2 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+affected rows: 1
+ALTER TABLE t1 CHANGE m i INT, ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+ALTER TABLE t1 CHANGE m i INT;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT table_id INTO @table_id3 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+affected rows: 1
+SELECT @table_id1 = @table_id2, @table_id2 = @table_id3;
+@table_id1 = @table_id2 @table_id2 = @table_id3
+0 0
+INSERT IGNORE INTO t1 VALUES (0, -123456);
+Warnings:
+Warning 1062 Duplicate entry '-123456' for key 'm'
+REPLACE INTO t1 VALUES(-42, 123456);
+INSERT IGNORE INTO t1 VALUES(32768, 2147483648);
+Warnings:
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 'i' at row 1
+SELECT * FROM t1;
+s i
+-42 123456
+32767 2147483647
+DROP TABLE t1;
+CREATE TABLE t1 (a SERIAL, b INT, c TINYINT UNIQUE) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 (c) VALUES(1),(2),(3);
+ALTER TABLE t1 MODIFY c BIGINT;
+affected rows: 3
+info: Records: 3 Duplicates: 0 Warnings: 0
+UPDATE t1 SET b=1 WHERE c=2;
+UPDATE t1 SET c=4 WHERE a=3;
+UPDATE t1 SET b=2 WHERE c>3;
+UPDATE t1 SET c=c+1;
+ERROR 23000: Duplicate entry '2' for key 'c'
+SELECT * FROM t1;
+a b c
+1 NULL 1
+2 1 2
+3 2 4
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(1)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+INSERT INTO t1 VALUES(1,'a');
+ALTER TABLE t1 MODIFY b VARCHAR(256), ADD COLUMN c INT;
+INSERT INTO t1 VALUES(2,'bah',3);
+SELECT * FROM t1;
+a b c
+1 a NULL
+2 bah 3
+DROP TABLE t1;
CREATE TABLE t1
(id INT PRIMARY KEY, c2 INT UNIQUE,
c3 POINT NOT NULL DEFAULT ST_GeomFromText('POINT(3 4)'),
@@ -1046,8 +1830,8 @@ affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
INSERT INTO t1 SET id=9;
ALTER TABLE t1 DROP c3;
-affected rows: 9
-info: Records: 9 Duplicates: 0 Warnings: 0
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1100,6 +1884,7 @@ UPDATE t2 SET c1 = repeat(id, 4000);
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -1110,6 +1895,7 @@ ROLLBACK;
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -1121,6 +1907,7 @@ UPDATE t2 SET d1 = repeat(id, 200);
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -1131,6 +1918,7 @@ ROLLBACK;
connection analyze;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/t2';
@@ -1250,6 +2038,7 @@ COMMIT;
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -1273,6 +2062,7 @@ test.big 385477733
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -1286,6 +2076,7 @@ test.big 1705165209
connection analyze;
ANALYZE TABLE big;
Table Op Msg_type Msg_text
+test.big analyze status Engine-independent statistics collected
test.big analyze status OK
SELECT clust_index_size FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE name = 'test/big';
@@ -1350,10 +2141,395 @@ DELETE FROM t1;
COMMIT;
InnoDB 0 transactions not purged
DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT UNIQUE) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 (a) VALUES (NULL), (NULL);
+ALTER TABLE t1 DROP a, ADD COLUMN a INT;
+DELETE FROM t1;
+BEGIN;
+INSERT INTO t1 SET a=NULL;
+ROLLBACK;
+DELETE FROM t1;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, t VARCHAR(33101) NOT NULL) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES(347,'');
+ALTER TABLE t1 DROP COLUMN t, ALGORITHM=INSTANT;
+SELECT * FROM t1;
+a
+347
+DROP TABLE t1;
+CREATE TABLE t1 (a INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD COLUMN b INT FIRST, ADD COLUMN c INT AFTER b;
+SELECT * FROM t1;
+b c a
+NULL NULL NULL
+DROP TABLE t1;
+CREATE TABLE t1 (t TEXT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+SET @t=REPEAT('x',@@innodb_page_size / 2);
+INSERT INTO t1 VALUES (@t),(@t),(@t),(@t),(@t),(@t),(NULL),(@t),(@t),(@t),(@t);
+ALTER TABLE t1 ADD COLUMN a INT FIRST;
+UPDATE t1 SET a = 0;
+DROP TABLE t1;
+CREATE TABLE t1 (t TEXT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 SET t = @x;
+ALTER TABLE t1 DROP COLUMN t, ADD COLUMN i INT NOT NULL DEFAULT 1;
+ALTER TABLE t1 ADD COLUMN t TEXT;
+SELECT * FROM t1;
+i t
+1 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, KEY(a)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 SET a=NULL;
+ALTER TABLE t1 DROP COLUMN b;
+ALTER TABLE t1 ADD COLUMN c INT NOT NULL DEFAULT 42;
+INSERT INTO t1 SET a=NULL;
+UPDATE t1 SET a=a+2;
+SELECT * FROM t1;
+a c
+3 42
+4 42
+DROP TABLE t1;
+CREATE TABLE t1 (i INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 SET i=1;
+ALTER TABLE t1 ADD COLUMN b BIT FIRST;
+ALTER TABLE t1 ADD COLUMN v INT AS (i) VIRTUAL;
+SELECT * FROM t1;
+b i v
+NULL 1 1
+DROP TABLE t1;
+CREATE TABLE t1 (ts TIMESTAMP) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 ADD COLUMN f VARCHAR(8), ADD COLUMN dt DATETIME;
+ALTER TABLE t1 ADD COLUMN b BIT, DROP COLUMN f, ADD COLUMN t TIME FIRST;
+ALTER TABLE t1 ADD COLUMN ts2 TIMESTAMP;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (4,4,4);
+ALTER TABLE t1 DROP f1, DROP f2, ADD f4 INT, ADD f5 INT;
+DELETE FROM t1;
+ALTER TABLE t1 DROP COLUMN f4;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DROP f2, ADD COLUMN f4 INT;
+ALTER TABLE t1 DROP f4;
+ALTER TABLE t1 DROP f1;
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 () VALUES (),();
+SELECT * FROM t1;
+f
+NULL
+NULL
+ALTER TABLE t1 ADD COLUMN id INT NOT NULL AUTO_INCREMENT FIRST, ADD KEY(id);
+SELECT * FROM t1;
+id f
+1 NULL
+2 NULL
+DROP TABLE t1;
+CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 SET f=NULL;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 SET f=NULL;
+SELECT * FROM t1;
+f
+NULL
+NULL
+DROP TABLE t1;
+CREATE TABLE t1(f INT, k INT NOT NULL AUTO_INCREMENT, KEY(k)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 VALUES (1);
+DROP TABLE t1;
+CREATE TABLE t1(pk INT PRIMARY KEY, f INT, k INT AUTO_INCREMENT, KEY(k))
+ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 (pk) VALUES (1);
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY,
+f1 INT,
+f2 CHAR(255),
+f3 BIGINT,
+f4 INT,
+f5 CHAR(255),
+f6 CHAR(255),
+f7 CHAR(255) NOT NULL,
+f8 INT,
+f9 CHAR(10)
+) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES
+(1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a'),
+(2, 2, 'b', 2, 2, 'b', 'b', 'b', 2, 'b'),
+(3, 3, 'c', 3, 3, 'c', 'c', 'c', 3, 'c'),
+(4, 4, 'd', 4, 4, 'd', 'd', 'd', 4, 'd'),
+(5, 5, 'e', 5, 5, 'e', 'e', 'e', 5, 'e'),
+(6, 6, 'f', 6, 6, 'f', 'f', 'f', 6, 'f'),
+(7, 7, 'g', 7, 7, 'g', 'g', 'g', 7, 'g'),
+(8, 8, 'h', 8, 8, 'h', 'h', 'h', 8, 'h'),
+(9, 9, 'i', 9, 9, 'i', 'i', 'i', 9, 'i'),
+(10, 0, 'j', 0, 0, 'j', 'j', 'j', 0, 'j'),
+(11, 1, 'k', 1, 1, 'k', 'k', 'k', 1, 'k'),
+(12, 2, 'l', 2, 2, 'l', 'l', 'l', 2, 'l'),
+(13, 3, 'm', 3, 3, 'm', 'm', 'm', 3, 'm'),
+(14, 4, 'n', 4, 4, 'n', 'n', 'n', 4, 'n'),
+(15, 5, 'o', 5, 5, 'o', 'o', 'o', 5, 'o');
+DELETE FROM t1 WHERE pk=1;
+InnoDB 0 transactions not purged
+INSERT INTO t1 VALUES
+(1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a');
+ALTER TABLE t1 DROP COLUMN f1;
+DROP TABLE t1;
+CREATE TABLE t1 (
+pk INT PRIMARY KEY,
+f1 INT, f2 CHAR(32) NOT NULL,
+f3 INT NOT NULL, f4 INT NOT NULL, f5 INT, f6 CHAR(32) NOT NULL,
+f7 CHAR(32), f8 CHAR(32)
+) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES
+(1,9,'',2,88,88,'','',''),(2,48,'',8,68,92,'','',''),
+(3,41,'',56,84,37,'','',''),(4,NULL,'',6,6,NULL,'','',''),
+(5,52,'',37,44,20,'','',''),(6,44,'',53,4,NULL,'','',''),
+(7,24,'',54,8,54,'','',''),(8,80,'',3,52,20,'','',''),
+(9,71,'',34,32,NULL,'','',''),(10,14,'',6,64,88,'','',''),
+(11,48,'',8,25,42,'','',''),(12,16,'',8,7,NULL,'','',''),
+(13,NULL,'',22,0,95,'','',''),(14,4,'',72,48,NULL,'','',''),
+(15,4,'',5,64,2,'','',''),(16,NULL,'',9,40,30,'','',''),
+(17,92,'',48,2,NULL,'','',''),(18,36,'',48,51,7,'','',''),
+(19,NULL,'',80,96,NULL,'','',''),(20,96,'',9,80,NULL,'','',''),
+(21,50,'',16,40,NULL,'','',''),(22,NULL,'',7,84,8,'','',''),
+(23,28,'',93,80,NULL,'','',''),(24,31,'',40,38,NULL,'','',''),
+(25,85,'',8,5,88,'','',''),(26,66,'',8,32,4,'','',''),
+(51,52,'',6,92,15,'','',''),(52,77,'',24,24,28,'','',''),
+(53,8,'',75,31,NULL,'','',''),(54,48,'',5,8,1,'','',''),
+(55,90,'',56,12,5,'','',''),(56,92,'',4,9,88,'','',''),
+(57,83,'',23,40,72,'','',''),(58,7,'',4,40,32,'','',''),
+(59,28,'',2,3,32,'','',''),(60,16,'',80,4,NULL,'','',''),
+(61,44,'',88,24,NULL,'','',''),(62,4,'',5,25,3,'','',''),
+(63,NULL,'',7,24,76,'','',''),(64,0,'',13,40,73,'','',''),
+(101,NULL,'',1,49,75,'','',''),(102,34,'',10,17,20,'','',''),
+(103,8,'',2,2,NULL,'','',''),(104,12,'',44,48,52,'','',''),
+(105,8,'',4,19,38,'','',''),(106,20,'',6,80,9,'','',''),
+(107,72,'',72,16,56,'','',''),(108,76,'',98,24,21,'','',''),
+(109,67,'',16,91,NULL,'','',''),(110,72,'',72,3,48,'','',''),
+(151,8,'',3,86,NULL,'','',''),(152,NULL,'',52,72,0,'','',''),
+(153,NULL,'',46,30,92,'','',''),(154,80,'',1,40,48,'','',''),
+(155,24,'',68,68,8,'','',''),(156,85,'',85,72,60,'','',''),
+(157,7,'',7,12,6,'','',''),(158,NULL,'',48,48,80,'','',''),
+(159,12,'',0,36,0,'','',''),(160,2,'',6,52,NULL,'','',''),
+(201,0,'',1,3,NULL,'','',''),(202,NULL,'',3,53,14,'','',''),
+(203,84,'',6,20,NULL,'','',''),(204,38,'',25,13,88,'','',''),
+(205,1,'',2,69,5,'','',''),(206,7,'',60,22,NULL,'','',''),
+(207,NULL,'',5,4,NULL,'','',''),(251,7,'',0,4,40,'','',''),
+(252,4,'',16,8,NULL,'','',''),(253,14,'',60,12,99,'','',''),
+(254,84,'',68,16,5,'','',''),(255,3,'',70,36,61,'','',''),
+(256,7,'',18,48,NULL,'','',''),(257,NULL,'',68,53,NULL,'','',''),
+(258,29,'',52,16,64,'','',''),(259,NULL,'',80,92,40,'','',''),
+(301,68,'',1,48,48,'','',''),(302,2,'',1,1,32,'','',''),
+(303,44,'',60,96,16,'','',''),(304,32,'',52,64,32,'','',''),
+(305,88,'',37,72,NULL,'','',''),(306,5,'',35,60,20,'','',''),
+(307,35,'',4,48,NULL,'','',''),(308,4,'',92,44,80,'','',''),
+(351,48,'',60,4,40,'','',''),(352,7,'',9,61,13,'','',''),
+(353,0,'',5,93,53,'','',''),(354,7,'',1,20,NULL,'','',''),
+(355,84,'',5,48,96,'','',''),(356,NULL,'',39,92,36,'','',''),
+(357,88,'',9,76,44,'','',''),(358,66,'',34,67,80,'','',''),
+(359,8,'',8,52,NULL,'','',''),(360,3,'',53,83,NULL,'','',''),
+(361,23,'',44,9,48,'','',''),(362,4,'',0,54,48,'','',''),
+(363,75,'',66,76,52,'','','');
+ALTER TABLE t1 ADD COLUMN x VARCHAR(255) DEFAULT ' foobar ';
+UPDATE t1 SET f1 = 0;
+ALTER TABLE t1 DROP COLUMN x;
+DROP TABLE t1;
+CREATE TABLE t1 (f1 VARCHAR(1), f2 VARCHAR(2)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 MODIFY f2 VARCHAR (8) FIRST;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT UNIQUE, b INT UNIQUE, PRIMARY KEY(a,b)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DROP PRIMARY KEY;
+ALTER TABLE t1 CHANGE COLUMN a a INT;
+DELETE FROM t1 WHERE a = NULL OR a IS NULL;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT, c INT NOT NULL, d INT,
+e INT, f INT, g INT, h INT, j INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 MODIFY COLUMN c INT, MODIFY COLUMN a INT AFTER b;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL, b INT NOT NULL) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (0,0);
+ALTER TABLE t1 MODIFY a INT AFTER b;
+ALTER TABLE t1 ADD COLUMN v INT AS (a) VIRTUAL;
+ALTER TABLE t1 MODIFY b INT NOT NULL AFTER a;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT NOT NULL) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (1);
+ALTER TABLE t1 ADD COLUMN b INT;
+ALTER TABLE t1 MODIFY COLUMN a INT NULL;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT, d INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, b BLOB NOT NULL) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (1,10,REPEAT('foobar',2000));
+ALTER TABLE t1 DROP COLUMN b;
+INSERT INTO t1 VALUES (2,20);
+ALTER TABLE t1 ADD COLUMN vpk INT AS (pk);
+ALTER TABLE t1 DROP COLUMN i;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD f DATE AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+CREATE TABLE t1 (a INT, b INT) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD COLUMN f INT AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+CREATE TABLE t1(t TEXT NOT NULL, FULLTEXT(t)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 MODIFY COLUMN t TEXT;
+DROP TABLE t1;
+CREATE TABLE t1 (f TINYINT, g SMALLINT UNSIGNED) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES(127,6502),(-128,33101);
+ALTER TABLE t1 MODIFY f SMALLINT DEFAULT 12345,
+MODIFY g BIGINT UNSIGNED DEFAULT 1234567;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+SELECT * FROM t1;
+f g
+127 6502
+-128 33101
+DROP TABLE t1;
+CREATE TABLE t1 (f BIT(8)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES (b'10000000'),(b'00000001');
+ALTER TABLE t1 MODIFY f BIT(16);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+INSERT INTO t1 VALUES (b'1000000010101111'),(b'10000000');
+SELECT HEX(f) FROM t1;
+HEX(f)
+80
+1
+80AF
+80
+ALTER TABLE t1 MODIFY f SMALLINT;
+ERROR 22003: Out of range value for column 'f' at row 3
+ALTER TABLE t1 MODIFY f SMALLINT UNSIGNED;
+affected rows: 4
+info: Records: 4 Duplicates: 0 Warnings: 0
+SELECT * FROM t1;
+f
+128
+1
+32943
+128
+ALTER TABLE t1 MODIFY f BIT;
+ERROR 22001: Data too long for column 'f' at row 1
+ALTER TABLE t1 MODIFY f BIT(15);
+ERROR 22001: Data too long for column 'f' at row 3
+DELETE FROM t1 LIMIT 3;
+ALTER TABLE t1 MODIFY f BIT(15);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 MODIFY f BIT(8);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT HEX(f) FROM t1;
+HEX(f)
+80
+DROP TABLE t1;
+CREATE TABLE t1 (b BIT NOT NULL) ENGINE=InnoDB ROW_FORMAT=DYNAMIC DEFAULT CHARSET utf16;
+INSERT INTO t1 SET b=b'1';
+ALTER TABLE t1 CHANGE b c BIT NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SELECT HEX(c) FROM t1;
+HEX(c)
+1
+DROP TABLE t1;
+CREATE TABLE t1 (c VARCHAR(10) NOT NULL DEFAULT 'scary') ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD f TINYINT NOT NULL DEFAULT -42;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 MODIFY f MEDIUMINT NOT NULL DEFAULT 64802,
+MODIFY c VARCHAR(20) NOT NULL DEFAULT 'gory',
+ADD d DATETIME;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+INSERT INTO t1() VALUES();
+INSERT INTO t1 (c,f,d) VALUES ('fury', -8388608, now());
+SELECT * FROM t1;
+c f d
+scary -42 NULL
+gory 64802 NULL
+fury -8388608 1970-01-01 03:00:42
+DROP TABLE t1;
+CREATE TABLE t1 (t TINYINT PRIMARY KEY, m MEDIUMINT UNIQUE) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+SELECT table_id INTO @table_id1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+INSERT INTO t1 VALUES (-42, -123456);
+ALTER TABLE t1 CHANGE t s SMALLINT;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT table_id INTO @table_id2 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+affected rows: 1
+ALTER TABLE t1 CHANGE m i INT, ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+ALTER TABLE t1 CHANGE m i INT;
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
+SELECT table_id INTO @table_id3 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+affected rows: 1
+SELECT @table_id1 = @table_id2, @table_id2 = @table_id3;
+@table_id1 = @table_id2 @table_id2 = @table_id3
+0 0
+INSERT IGNORE INTO t1 VALUES (0, -123456);
+Warnings:
+Warning 1062 Duplicate entry '-123456' for key 'm'
+REPLACE INTO t1 VALUES(-42, 123456);
+INSERT IGNORE INTO t1 VALUES(32768, 2147483648);
+Warnings:
+Warning 1264 Out of range value for column 's' at row 1
+Warning 1264 Out of range value for column 'i' at row 1
+SELECT * FROM t1;
+s i
+-42 123456
+32767 2147483647
+DROP TABLE t1;
+CREATE TABLE t1 (a SERIAL, b INT, c TINYINT UNIQUE) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 (c) VALUES(1),(2),(3);
+ALTER TABLE t1 MODIFY c BIGINT;
+affected rows: 3
+info: Records: 3 Duplicates: 0 Warnings: 0
+UPDATE t1 SET b=1 WHERE c=2;
+UPDATE t1 SET c=4 WHERE a=3;
+UPDATE t1 SET b=2 WHERE c>3;
+UPDATE t1 SET c=c+1;
+ERROR 23000: Duplicate entry '2' for key 'c'
+SELECT * FROM t1;
+a b c
+1 NULL 1
+2 1 2
+3 2 4
+DROP TABLE t1;
+CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(1)) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
+INSERT INTO t1 VALUES(1,'a');
+ALTER TABLE t1 MODIFY b VARCHAR(256), ADD COLUMN c INT;
+INSERT INTO t1 VALUES(2,'bah',3);
+SELECT * FROM t1;
+a b c
+1 a NULL
+2 bah 3
+DROP TABLE t1;
disconnect analyze;
SELECT variable_value-@old_instant instants
FROM information_schema.global_status
WHERE variable_name = 'innodb_instant_alter_column';
instants
-51
+181
SET GLOBAL innodb_purge_rseg_truncate_frequency= @saved_frequency;
diff --git a/mysql-test/suite/innodb/r/instant_alter_bugs.result b/mysql-test/suite/innodb/r/instant_alter_bugs.result
index 14d88c64de8..626e8ebe7d8 100644
--- a/mysql-test/suite/innodb/r/instant_alter_bugs.result
+++ b/mysql-test/suite/innodb/r/instant_alter_bugs.result
@@ -120,7 +120,7 @@ INSERT INTO t1 (f1,f2,f3,f4,f5,f6,f7,f8) VALUES
INSERT INTO t1 (f1,f2,f3,f4,f5,f6,f7,f8) VALUES ('impact', 'b', 'h', 185, 'fj', 7, 7, 3);
ERROR 23000: Duplicate entry '7' for key 'f6'
ALTER TABLE t1 ADD COLUMN filler VARCHAR(255) DEFAULT '';
-SELECT * FROM t1 INTO OUTFILE 'load.data';
+SELECT * INTO OUTFILE 'load.data' FROM t1;
UPDATE IGNORE t1 SET pk = 0;
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
HANDLER t1 OPEN AS h;
@@ -128,3 +128,78 @@ HANDLER h READ `PRIMARY` PREV WHERE 0;
pk f1 f2 f3 f4 f5 f6 f7 f8 filler
HANDLER h CLOSE;
DROP TABLE t1;
+create table t (
+a varchar(9),
+b int,
+c int,
+row_start bigint unsigned generated always as row start invisible,
+row_end bigint unsigned generated always as row end invisible,
+period for system_time (row_start, row_end)
+) engine=innodb row_format=compressed with system versioning;
+insert into t values (repeat('a', 9), 1, 1);
+set @@system_versioning_alter_history = keep;
+alter table t modify a varchar(10), algorithm=instant;
+alter table t change b bb int, algorithm=instant;
+alter table t modify c int without system versioning, algorithm=instant;
+set @@system_versioning_alter_history = error;
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+drop table t;
+#
+# MDEV-18219 Assertion `index->n_core_null_bytes <= ...' failed
+# in rec_init_offsets after instant DROP COLUMN
+#
+CREATE TABLE t1 (a INT, b INT NOT NULL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES
+(0,9),(2,7),(3,1),(3,4),(8,4),(3,7),(6,1),(3,8),(1,2),(4,1),(0,8),(5,3),
+(1,3),(1,6),(2,1),(8,7),(6,0),(1,9),(9,4),(0,6),(9,3),(0,9),(9,4),(2,4),
+(2,7),(7,8),(8,2),(2,5),(6,1),(4,5),(5,3),(6,8),(4,9),(5,7),(7,5),(5,1),
+(8,8),(5,7),(3,8),(0,1),(8,4),(8,3),(9,7),(4,8),(1,1),(0,4),(2,6),(8,5),
+(8,8),(8,7),(6,7),(1,7),(9,6),(3,6),(1,9),(0,3),(5,3),(2,4),(0,6),(2,0),
+(6,5),(1,6),(2,4),(9,1),(3,0),(6,4),(1,3),(0,8),(3,5),(3,1),(8,9),(9,9),
+(7,9),(4,5),(2,2),(3,8),(0,8),(7,1),(2,0),(1,5),(7,3),(4,4),(3,9),(7,2),
+(6,2),(0,4),(2,0),(1,5),(5,7),(4,5),(3,7),(6,0),(2,1),(5,0),(1,0),(2,0),
+(8,4),(5,7),(3,5),(0,5),(7,6),(5,9),(1,2),(4,2),(8,5),(8,7),(2,8),(1,8),
+(4,3),(1,6),(7,8),(3,7),(4,6),(1,1),(3,0),(1,6),(2,0),(3,4),(4,8),(3,9),
+(8,0),(4,9),(4,0),(3,9),(6,4),(7,4),(5,8),(4,7),(7,3),(5,9),(2,3),(7,3),
+(0,4),(5,9),(9,8),(4,2),(3,6),(2,6),(1,8),(7,0),(0,0),(2,3),(1,2),(3,3),
+(2,7),(6,0),(9,0),(6,9),(4,6),(9,8),(0,7),(9,1),(9,6),(4,3),(7,7),(7,7),
+(4,1),(4,7),(7,3),(2,8),(5,8),(8,9),(3,9),(7,7),(0,8),(4,9),(3,2),(5,0),
+(1,7),(0,3),(2,9),(9,7),(7,5),(6,9),(8,5),(3,6),(1,1),(2,8),(7,9),(4,9),
+(6,6),(5,9),(5,3),(9,8),(3,3),(5,6),(0,9),(3,9),(7,9),(7,3),(5,2),(1,4),
+(4,4),(8,2),(2,2),(8,3),(9,1),(4,9),(9,8),(1,8),(1,8),(9,1),(1,1),(3,0),
+(4,6),(9,3),(3,3),(5,2),(0,1),(3,4),(3,2),(1,3),(4,4),(7,0),(4,6),(7,2),
+(4,5),(8,7),(7,8),(8,1),(3,5),(0,6),(3,5),(2,1),(4,4),(3,4),(2,1),(4,1);
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 DROP a;
+ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
+DROP TABLE t1;
+#
+# MDEV-19030 Assertion index->n_core_null_bytes <= ... failed
+# in rec_init_offsets after instant DROP COLUMN
+#
+CREATE TABLE t1 (a INT, b INT NOT NULL DEFAULT 0) ENGINE=InnoDB;
+INSERT INTO t1 () VALUES (),(),(),();
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 FORCE;
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 DROP a, ADD a SMALLINT NOT NULL;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
+DROP TABLE t1;
+#
+# MDEV-18623 Assertion after DROP FULLTEXT INDEX and removing NOT NULL
+#
+CREATE TABLE t1 (c TEXT NOT NULL, FULLTEXT INDEX ftidx(c)) ENGINE=InnoDB
+ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP INDEX ftidx;
+ALTER TABLE t1 MODIFY c TEXT NULL, ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported for this operation. Try ALGORITHM=INPLACE
+ALTER TABLE t1 MODIFY c TEXT NULL;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/instant_alter_charset,redundant.rdiff b/mysql-test/suite/innodb/r/instant_alter_charset,redundant.rdiff
new file mode 100644
index 00000000000..935b5dbf78c
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_charset,redundant.rdiff
@@ -0,0 +1,10 @@
+--- instant_alter_charset.result
++++ instant_alter_charset,redundant.result
+@@ -254,7 +254,6 @@
+ alter table boundary_255
+ modify b varchar(200) charset utf8mb3,
+ algorithm=instant;
+-ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+ alter table boundary_255
+ modify c varchar(300) charset utf8mb3,
+ algorithm=instant;
diff --git a/mysql-test/suite/innodb/r/instant_alter_charset.result b/mysql-test/suite/innodb/r/instant_alter_charset.result
new file mode 100644
index 00000000000..6242b167412
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_charset.result
@@ -0,0 +1,1812 @@
+set names utf8;
+create table no_rebuild (
+a char(150) charset utf8mb3 collate utf8mb3_general_ci
+) engine=innodb;
+create table rebuild (
+a varchar(150) charset ascii
+) engine=innodb;
+set @id = (select table_id from information_schema.innodb_sys_tables
+where name = 'test/no_rebuild');
+select name, prtype, len from information_schema.innodb_sys_columns
+where table_id = @id;
+name prtype len
+a 2162942 450
+select c.prtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+where t.name = 'test/rebuild' and c.name = 'a';
+prtype len
+720911 150
+alter table no_rebuild
+change a a char(150) charset utf8mb3 collate utf8mb3_spanish_ci,
+algorithm=inplace;
+alter table rebuild
+change a a varchar(150) charset latin1 not null default 'asdf',
+algorithm=inplace;
+select name, prtype, len from information_schema.innodb_sys_columns
+where table_id = @id;
+name prtype len
+a 13041918 450
+select c.prtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+where t.name = 'test/rebuild' and c.name = 'a';
+prtype len
+524559 150
+drop table no_rebuild, rebuild;
+create table supported_types (
+id int primary key auto_increment,
+a varchar(150) charset ascii,
+b text(150) charset ascii,
+c text charset ascii,
+d tinytext charset ascii,
+e mediumtext charset ascii,
+f longtext charset ascii
+) engine=innodb;
+alter table supported_types
+convert to charset latin1,
+algorithm=instant;
+drop table supported_types;
+create table various_cases (
+a char(150) charset ascii,
+b varchar(150) as (a) virtual,
+c varchar(150) as (a) persistent
+) engine=innodb;
+alter table various_cases
+change a a char(150) charset latin1,
+algorithm=inplace;
+alter table various_cases
+change a a varchar(222),
+algorithm=inplace;
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table various_cases
+change b b varchar(150) as (a) virtual,
+algorithm=inplace;
+alter table various_cases
+change c c varchar(150) as (a) persistent,
+algorithm=inplace;
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY
+alter table various_cases
+modify a char(150) charset utf8mb4,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table various_cases;
+create table all_texts (
+a tinytext charset ascii,
+b text charset ascii,
+c mediumtext charset ascii,
+d longtext charset ascii,
+footer int
+) engine=innodb;
+alter table all_texts
+convert to charset latin1 collate latin1_general_ci,
+algorithm=instant;
+drop table all_texts;
+create table all_binaries (
+a tinyblob,
+b blob,
+c mediumblob,
+d longblob,
+e varbinary(150),
+f binary(150)
+) engine=innodb;
+alter table all_binaries modify a tinytext, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_binaries modify b text, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_binaries modify c mediumtext, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_binaries modify d longtext, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_binaries modify e varchar(150), algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_binaries modify f char(150), algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table all_binaries;
+create table all_strings (
+a tinytext,
+b text,
+c mediumtext,
+d longtext,
+e varchar(150),
+f char(150)
+) engine=innodb;
+alter table all_strings modify a tinyblob, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify b blob, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify c mediumblob, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify d longblob, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify e varbinary(150), algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify f binary(150), algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify a tinytext charset binary, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify b text charset binary, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify c mediumtext charset binary, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify d longtext charset binary, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify e varchar(150) charset binary, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table all_strings modify f char(150) charset binary, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table all_strings;
+create table key_part_change (
+a char(150) charset ascii,
+b char(150) charset ascii,
+c char(150) charset ascii,
+unique key ab (a,b)
+) engine=innodb;
+alter table key_part_change
+modify a char(150) charset utf8mb4,
+drop index ab,
+add unique key ab(a,c),
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table key_part_change;
+create table key_part_change_and_rename (
+a char(100) charset ascii,
+b char(100) charset ascii,
+unique key ab (a,b)
+) engine=innodb;
+alter table key_part_change_and_rename
+change a b char(100) charset utf8mb4,
+change b a char(100) charset utf8mb4,
+drop index ab,
+add unique key ab(a,b),
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table key_part_change_and_rename;
+create table enum_and_set (
+a enum('one', 'two') charset utf8mb3,
+b set('three', 'four') charset utf8mb3
+) engine=innodb;
+alter table enum_and_set
+modify a enum('one', 'two') charset utf8mb4,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table enum_and_set
+modify b enum('three', 'four') charset utf8mb4,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table enum_and_set;
+create table compressed (
+a varchar(255) charset utf8mb3 compressed
+) engine=innodb;
+insert into compressed values ('AAA'), ('bbb'), ('CCC');
+alter table compressed
+modify a varchar(255) charset utf8mb4 compressed,
+algorithm=instant;
+select * from compressed;
+a
+AAA
+bbb
+CCC
+check table compresed;
+Table Op Msg_type Msg_text
+test.compresed check Error Table 'test.compresed' doesn't exist
+test.compresed check status Operation failed
+drop table compressed;
+create table key_part_bug (
+id int primary key auto_increment,
+a varchar(150) charset utf8mb3 unique key
+) engine=innodb;
+alter table key_part_bug
+modify a varchar(150) charset utf8mb4,
+algorithm=instant;
+drop table key_part_bug;
+create table latin1_swedish_special_case (
+copy1 varchar(150) charset ascii collate ascii_general_ci,
+copy2 char(150) charset ascii collate ascii_general_ci,
+instant1 varchar(150) charset ascii collate ascii_general_ci,
+instant2 char(150) charset ascii collate ascii_general_ci
+) engine=innodb;
+select c.name, c.prtype, c.mtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+where t.name = 'test/latin1_swedish_special_case';
+name prtype mtype len
+copy1 720911 12 150
+copy2 721150 13 150
+instant1 720911 12 150
+instant2 721150 13 150
+alter table latin1_swedish_special_case
+modify copy1 varchar(150) charset latin1 collate latin1_swedish_ci,
+modify copy2 char(150) charset latin1 collate latin1_swedish_ci,
+algorithm=copy;
+alter table latin1_swedish_special_case
+modify instant1 varchar(150) charset latin1 collate latin1_swedish_ci,
+modify instant2 char(150) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+select c.name, c.prtype, c.mtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+where t.name = 'test/latin1_swedish_special_case';
+name prtype mtype len
+copy1 524303 1 150
+copy2 524542 2 150
+instant1 524303 1 150
+instant2 524542 2 150
+alter table latin1_swedish_special_case
+modify copy1 varchar(150) charset latin1 collate latin1_general_ci,
+modify copy2 char(150) charset latin1 collate latin1_general_ci,
+algorithm=copy;
+alter table latin1_swedish_special_case
+modify instant1 varchar(150) charset latin1 collate latin1_general_ci,
+modify instant2 char(150) charset latin1 collate latin1_general_ci,
+algorithm=instant;
+select c.name, c.prtype, c.mtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+where t.name = 'test/latin1_swedish_special_case';
+name prtype mtype len
+copy1 3145743 12 150
+copy2 3145982 13 150
+instant1 3145743 12 150
+instant2 3145982 13 150
+drop table latin1_swedish_special_case;
+create table regression (a varchar(100) charset utf8mb3 primary key, b int) engine=innodb;
+alter table regression convert to character set utf8mb4;
+drop table regression;
+create table boundary_255 (
+a varchar(50) charset ascii,
+b varchar(200) charset ascii,
+c varchar(300) charset ascii
+) engine=innodb;
+alter table boundary_255
+modify a varchar(50) charset utf8mb3,
+algorithm=instant;
+alter table boundary_255
+modify b varchar(200) charset utf8mb3,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table boundary_255
+modify c varchar(300) charset utf8mb3,
+algorithm=instant;
+drop table boundary_255;
+create table fully_compatible (
+id int auto_increment unique key,
+from_charset char(255),
+from_collate char(255),
+to_charset char(255),
+to_collate char(255)
+);
+insert into fully_compatible (from_charset, from_collate, to_charset, to_collate) values
+('utf8mb3', 'utf8mb3_general_ci', 'utf8mb4', 'utf8mb4_general_ci'),
+('utf8mb3', 'utf8mb3_bin', 'utf8mb4', 'utf8mb4_bin'),
+('utf8mb3', 'utf8mb3_unicode_ci', 'utf8mb4', 'utf8mb4_unicode_ci'),
+('utf8mb3', 'utf8mb3_icelandic_ci', 'utf8mb4', 'utf8mb4_icelandic_ci'),
+('utf8mb3', 'utf8mb3_latvian_ci', 'utf8mb4', 'utf8mb4_latvian_ci'),
+('utf8mb3', 'utf8mb3_romanian_ci', 'utf8mb4', 'utf8mb4_romanian_ci'),
+('utf8mb3', 'utf8mb3_slovenian_ci', 'utf8mb4', 'utf8mb4_slovenian_ci'),
+('utf8mb3', 'utf8mb3_polish_ci', 'utf8mb4', 'utf8mb4_polish_ci'),
+('utf8mb3', 'utf8mb3_estonian_ci', 'utf8mb4', 'utf8mb4_estonian_ci'),
+('utf8mb3', 'utf8mb3_spanish_ci', 'utf8mb4', 'utf8mb4_spanish_ci'),
+('utf8mb3', 'utf8mb3_swedish_ci', 'utf8mb4', 'utf8mb4_swedish_ci'),
+('utf8mb3', 'utf8mb3_turkish_ci', 'utf8mb4', 'utf8mb4_turkish_ci'),
+('utf8mb3', 'utf8mb3_czech_ci', 'utf8mb4', 'utf8mb4_czech_ci'),
+('utf8mb3', 'utf8mb3_danish_ci', 'utf8mb4', 'utf8mb4_danish_ci'),
+('utf8mb3', 'utf8mb3_lithuanian_ci', 'utf8mb4', 'utf8mb4_lithuanian_ci'),
+('utf8mb3', 'utf8mb3_slovak_ci', 'utf8mb4', 'utf8mb4_slovak_ci'),
+('utf8mb3', 'utf8mb3_spanish2_ci', 'utf8mb4', 'utf8mb4_spanish2_ci'),
+('utf8mb3', 'utf8mb3_roman_ci', 'utf8mb4', 'utf8mb4_roman_ci'),
+('utf8mb3', 'utf8mb3_persian_ci', 'utf8mb4', 'utf8mb4_persian_ci'),
+('utf8mb3', 'utf8mb3_esperanto_ci', 'utf8mb4', 'utf8mb4_esperanto_ci'),
+('utf8mb3', 'utf8mb3_hungarian_ci', 'utf8mb4', 'utf8mb4_hungarian_ci'),
+('utf8mb3', 'utf8mb3_sinhala_ci', 'utf8mb4', 'utf8mb4_sinhala_ci'),
+('utf8mb3', 'utf8mb3_german2_ci', 'utf8mb4', 'utf8mb4_german2_ci'),
+('utf8mb3', 'utf8mb3_croatian_mysql561_ci', 'utf8mb4', 'utf8mb4_croatian_mysql561_ci'),
+('utf8mb3', 'utf8mb3_unicode_520_ci', 'utf8mb4', 'utf8mb4_unicode_520_ci'),
+('utf8mb3', 'utf8mb3_vietnamese_ci', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+('utf8mb3', 'utf8mb3_croatian_ci', 'utf8mb4', 'utf8mb4_croatian_ci'),
+('utf8mb3', 'utf8mb3_myanmar_ci', 'utf8mb4', 'utf8mb4_myanmar_ci'),
+('utf8mb3', 'utf8mb3_thai_520_w2', 'utf8mb4', 'utf8mb4_thai_520_w2'),
+('utf8mb3', 'utf8mb3_general_nopad_ci', 'utf8mb4', 'utf8mb4_general_nopad_ci'),
+('utf8mb3', 'utf8mb3_nopad_bin', 'utf8mb4', 'utf8mb4_nopad_bin'),
+('utf8mb3', 'utf8mb3_unicode_nopad_ci', 'utf8mb4', 'utf8mb4_unicode_nopad_ci'),
+('utf8mb3', 'utf8mb3_unicode_520_nopad_ci', 'utf8mb4', 'utf8mb4_unicode_520_nopad_ci'),
+('ucs2', 'ucs2_general_ci', 'utf16', 'utf16_general_ci'),
+('ucs2', 'ucs2_unicode_ci', 'utf16', 'utf16_unicode_ci'),
+('ucs2', 'ucs2_icelandic_ci', 'utf16', 'utf16_icelandic_ci'),
+('ucs2', 'ucs2_latvian_ci', 'utf16', 'utf16_latvian_ci'),
+('ucs2', 'ucs2_romanian_ci', 'utf16', 'utf16_romanian_ci'),
+('ucs2', 'ucs2_slovenian_ci', 'utf16', 'utf16_slovenian_ci'),
+('ucs2', 'ucs2_polish_ci', 'utf16', 'utf16_polish_ci'),
+('ucs2', 'ucs2_estonian_ci', 'utf16', 'utf16_estonian_ci'),
+('ucs2', 'ucs2_spanish_ci', 'utf16', 'utf16_spanish_ci'),
+('ucs2', 'ucs2_general_ci', 'utf16', 'utf16_general_ci'),
+('ascii', 'ascii_general_ci', 'utf8mb3', 'utf8mb3_general_ci'),
+('ascii', 'ascii_general_ci', 'utf8mb4', 'utf8mb4_general_ci'),
+('ascii', 'ascii_general_ci', 'latin1', 'latin1_general_ci'),
+('ascii', 'ascii_bin', 'latin1', 'latin1_bin'),
+('ascii', 'ascii_nopad_bin', 'latin1', 'latin1_nopad_bin'),
+('ascii', 'ascii_general_ci', 'latin2', 'latin2_general_ci'),
+('ascii', 'ascii_general_ci', 'latin7', 'latin7_general_ci'),
+('ascii', 'ascii_bin', 'koi8u', 'koi8u_bin'),
+('ascii', 'ascii_bin', 'ujis', 'ujis_bin'),
+('ascii', 'ascii_bin', 'big5', 'big5_bin'),
+('ascii', 'ascii_bin', 'gbk', 'gbk_bin')
+;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_general_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_general_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_bin,
+b varchar(50) charset utf8mb3 collate utf8mb3_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_bin,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_unicode_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_unicode_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_unicode_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_unicode_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_icelandic_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_icelandic_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_icelandic_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_icelandic_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_latvian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_latvian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_latvian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_latvian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_romanian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_romanian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_romanian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_romanian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_slovenian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_slovenian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_slovenian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_slovenian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_polish_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_polish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_polish_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_polish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_estonian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_estonian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_estonian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_estonian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_spanish_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_spanish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_spanish_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_spanish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_swedish_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_swedish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_turkish_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_turkish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_turkish_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_turkish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_czech_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_czech_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_czech_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_czech_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_danish_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_danish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_lithuanian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_lithuanian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_lithuanian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_lithuanian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_slovak_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_slovak_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_slovak_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_slovak_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_spanish2_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_spanish2_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_spanish2_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_spanish2_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_roman_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_roman_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_roman_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_roman_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_persian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_persian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_persian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_persian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_esperanto_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_esperanto_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_esperanto_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_esperanto_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_hungarian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_hungarian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_hungarian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_hungarian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_sinhala_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_sinhala_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_sinhala_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_sinhala_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_german2_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_german2_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_german2_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_german2_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_croatian_mysql561_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_croatian_mysql561_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_croatian_mysql561_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_croatian_mysql561_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_unicode_520_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_unicode_520_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_unicode_520_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_unicode_520_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_vietnamese_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_vietnamese_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_croatian_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_croatian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_croatian_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_croatian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_myanmar_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_myanmar_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_myanmar_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_myanmar_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_thai_520_w2,
+b varchar(50) charset utf8mb3 collate utf8mb3_thai_520_w2 primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_thai_520_w2,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_thai_520_w2,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_general_nopad_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_general_nopad_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_general_nopad_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_general_nopad_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_nopad_bin,
+b varchar(50) charset utf8mb3 collate utf8mb3_nopad_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_nopad_bin,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_nopad_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_unicode_nopad_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_unicode_nopad_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_unicode_nopad_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_unicode_nopad_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_unicode_520_nopad_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_unicode_520_nopad_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_unicode_520_nopad_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_unicode_520_nopad_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_general_ci,
+b varchar(50) charset ucs2 collate ucs2_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_general_ci,
+modify b varchar(50) charset utf16 collate utf16_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_unicode_ci,
+b varchar(50) charset ucs2 collate ucs2_unicode_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_unicode_ci,
+modify b varchar(50) charset utf16 collate utf16_unicode_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_icelandic_ci,
+b varchar(50) charset ucs2 collate ucs2_icelandic_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_icelandic_ci,
+modify b varchar(50) charset utf16 collate utf16_icelandic_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_latvian_ci,
+b varchar(50) charset ucs2 collate ucs2_latvian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_latvian_ci,
+modify b varchar(50) charset utf16 collate utf16_latvian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_romanian_ci,
+b varchar(50) charset ucs2 collate ucs2_romanian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_romanian_ci,
+modify b varchar(50) charset utf16 collate utf16_romanian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_slovenian_ci,
+b varchar(50) charset ucs2 collate ucs2_slovenian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_slovenian_ci,
+modify b varchar(50) charset utf16 collate utf16_slovenian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_polish_ci,
+b varchar(50) charset ucs2 collate ucs2_polish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_polish_ci,
+modify b varchar(50) charset utf16 collate utf16_polish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_estonian_ci,
+b varchar(50) charset ucs2 collate ucs2_estonian_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_estonian_ci,
+modify b varchar(50) charset utf16 collate utf16_estonian_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_spanish_ci,
+b varchar(50) charset ucs2 collate ucs2_spanish_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_spanish_ci,
+modify b varchar(50) charset utf16 collate utf16_spanish_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_general_ci,
+b varchar(50) charset ucs2 collate ucs2_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_general_ci,
+modify b varchar(50) charset utf16 collate utf16_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_general_ci,
+modify b varchar(50) charset utf8mb3 collate utf8mb3_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_general_ci,
+modify b varchar(50) charset utf8mb4 collate utf8mb4_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_general_ci,
+modify b varchar(50) charset latin1 collate latin1_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_bin,
+modify b varchar(50) charset latin1 collate latin1_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_nopad_bin,
+b varchar(50) charset ascii collate ascii_nopad_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_nopad_bin,
+modify b varchar(50) charset latin1 collate latin1_nopad_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset latin2 collate latin2_general_ci,
+modify b varchar(50) charset latin2 collate latin2_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset latin7 collate latin7_general_ci,
+modify b varchar(50) charset latin7 collate latin7_general_ci,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset koi8u collate koi8u_bin,
+modify b varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset ujis collate ujis_bin,
+modify b varchar(50) charset ujis collate ujis_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset big5 collate big5_bin,
+modify b varchar(50) charset big5 collate big5_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+alter table tmp
+change a a varchar(50) charset gbk collate gbk_bin,
+modify b varchar(50) charset gbk collate gbk_bin,
+algorithm=instant;
+check table tmp;
+Table Op Msg_type Msg_text
+test.tmp check status OK
+drop table tmp;
+drop table fully_compatible;
+create table compatible_without_index (
+id int auto_increment unique key,
+from_charset char(255),
+from_collate char(255),
+to_charset char(255),
+to_collate char(255)
+);
+insert into compatible_without_index (from_charset, from_collate, to_charset, to_collate) values
+('ascii', 'ascii_general_ci', 'utf8mb3', 'utf8mb3_swedish_ci'),
+('ascii', 'ascii_bin', 'latin1', 'latin1_swedish_ci'),
+('ascii', 'ascii_general_nopad_ci', 'latin1', 'latin1_swedish_ci'),
+('ascii', 'ascii_nopad_bin', 'latin1', 'latin1_swedish_ci'),
+('ascii', 'ascii_general_ci', 'koi8u', 'koi8u_bin'),
+('ascii', 'ascii_general_nopad_ci', 'koi8u', 'koi8u_bin'),
+('ascii', 'ascii_nopad_bin', 'koi8u', 'koi8u_bin'),
+('ascii', 'ascii_general_ci', 'latin1', 'latin1_swedish_ci'),
+('ascii', 'ascii_bin', 'utf8mb3', 'utf8mb3_swedish_ci'),
+('ascii', 'ascii_general_nopad_ci', 'utf8mb3', 'utf8mb3_swedish_ci'),
+('ascii', 'ascii_nopad_bin', 'utf8mb3', 'utf8mb3_swedish_ci'),
+('ascii', 'ascii_general_ci', 'utf8mb4', 'utf8mb4_danish_ci'),
+('ascii', 'ascii_bin', 'utf8mb4', 'utf8mb4_danish_ci'),
+('ascii', 'ascii_general_nopad_ci', 'utf8mb4', 'utf8mb4_danish_ci'),
+('ascii', 'ascii_nopad_bin', 'utf8mb4', 'utf8mb4_danish_ci'),
+('utf8mb3', 'utf8mb3_general_ci', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+('utf8mb3', 'utf8mb3_bin', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+('utf8mb3', 'utf8mb3_general_nopad_ci', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+('utf8mb3', 'utf8mb3_nopad_bin', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+('ascii', 'ascii_general_ci', 'gbk', 'gbk_chinese_ci'),
+('ascii', 'ascii_general_ci', 'gbk', 'gbk_chinese_nopad_ci'),
+('ucs2', 'ucs2_myanmar_ci', 'utf16', 'utf16_thai_520_w2'),
+('ucs2', 'ucs2_general_ci', 'utf16', 'utf16_unicode_nopad_ci'),
+('ucs2', 'ucs2_general_mysql500_ci', 'utf16', 'utf16_spanish2_ci'),
+('ascii', 'ascii_general_ci', 'ascii', 'ascii_bin'),
+('utf8mb3', 'utf8mb3_roman_ci', 'utf8mb3', 'utf8mb3_lithuanian_ci'),
+('utf8mb4', 'utf8mb4_thai_520_w2', 'utf8mb4', 'utf8mb4_persian_ci'),
+('utf8mb3', 'utf8mb3_myanmar_ci', 'utf8mb4', 'utf8mb4_german2_ci'),
+('utf8mb3', 'utf8mb3_general_ci', 'utf8mb3', 'utf8mb3_unicode_ci'),
+('latin1', 'latin1_general_cs', 'latin1', 'latin1_general_ci'),
+('ascii', 'ascii_general_ci', 'ujis', 'ujis_japanese_ci'),
+('ascii', 'ascii_general_ci', 'big5', 'big5_chinese_ci'),
+('ascii', 'ascii_general_ci', 'latin2', 'latin2_croatian_ci'),
+('ascii', 'ascii_general_ci', 'latin7', 'latin7_estonian_cs'),
+('utf16', 'utf16_general_ci', 'utf16', 'utf16_german2_ci')
+;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin unique key,
+c varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_nopad_ci,
+b varchar(50) charset ascii collate ascii_general_nopad_ci unique key,
+c varchar(50) charset ascii collate ascii_general_nopad_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_nopad_bin,
+b varchar(50) charset ascii collate ascii_nopad_bin unique key,
+c varchar(50) charset ascii collate ascii_nopad_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_nopad_ci,
+b varchar(50) charset ascii collate ascii_general_nopad_ci unique key,
+c varchar(50) charset ascii collate ascii_general_nopad_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_nopad_bin,
+b varchar(50) charset ascii collate ascii_nopad_bin unique key,
+c varchar(50) charset ascii collate ascii_nopad_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset koi8u collate koi8u_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin1 collate latin1_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin unique key,
+c varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_nopad_ci,
+b varchar(50) charset ascii collate ascii_general_nopad_ci unique key,
+c varchar(50) charset ascii collate ascii_general_nopad_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_nopad_bin,
+b varchar(50) charset ascii collate ascii_nopad_bin unique key,
+c varchar(50) charset ascii collate ascii_nopad_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb3 collate utf8mb3_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_bin,
+b varchar(50) charset ascii collate ascii_bin unique key,
+c varchar(50) charset ascii collate ascii_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_nopad_ci,
+b varchar(50) charset ascii collate ascii_general_nopad_ci unique key,
+c varchar(50) charset ascii collate ascii_general_nopad_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_nopad_bin,
+b varchar(50) charset ascii collate ascii_nopad_bin unique key,
+c varchar(50) charset ascii collate ascii_nopad_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_danish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_general_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_general_ci unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_bin,
+b varchar(50) charset utf8mb3 collate utf8mb3_bin unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_general_nopad_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_general_nopad_ci unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_general_nopad_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_nopad_bin,
+b varchar(50) charset utf8mb3 collate utf8mb3_nopad_bin unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_nopad_bin primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_vietnamese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset gbk collate gbk_chinese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset gbk collate gbk_chinese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset gbk collate gbk_chinese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset gbk collate gbk_chinese_nopad_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset gbk collate gbk_chinese_nopad_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset gbk collate gbk_chinese_nopad_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_myanmar_ci,
+b varchar(50) charset ucs2 collate ucs2_myanmar_ci unique key,
+c varchar(50) charset ucs2 collate ucs2_myanmar_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_thai_520_w2,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf16 collate utf16_thai_520_w2,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf16 collate utf16_thai_520_w2,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_general_ci,
+b varchar(50) charset ucs2 collate ucs2_general_ci unique key,
+c varchar(50) charset ucs2 collate ucs2_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_unicode_nopad_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf16 collate utf16_unicode_nopad_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf16 collate utf16_unicode_nopad_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ucs2 collate ucs2_general_mysql500_ci,
+b varchar(50) charset ucs2 collate ucs2_general_mysql500_ci unique key,
+c varchar(50) charset ucs2 collate ucs2_general_mysql500_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_spanish2_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf16 collate utf16_spanish2_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf16 collate utf16_spanish2_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset ascii collate ascii_bin,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset ascii collate ascii_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset ascii collate ascii_bin,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_roman_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_roman_ci unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_roman_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_lithuanian_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb3 collate utf8mb3_lithuanian_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb3 collate utf8mb3_lithuanian_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb4 collate utf8mb4_thai_520_w2,
+b varchar(50) charset utf8mb4 collate utf8mb4_thai_520_w2 unique key,
+c varchar(50) charset utf8mb4 collate utf8mb4_thai_520_w2 primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_persian_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_persian_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_persian_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_myanmar_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_myanmar_ci unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_myanmar_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb4 collate utf8mb4_german2_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb4 collate utf8mb4_german2_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb4 collate utf8mb4_german2_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf8mb3 collate utf8mb3_general_ci,
+b varchar(50) charset utf8mb3 collate utf8mb3_general_ci unique key,
+c varchar(50) charset utf8mb3 collate utf8mb3_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf8mb3 collate utf8mb3_unicode_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf8mb3 collate utf8mb3_unicode_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf8mb3 collate utf8mb3_unicode_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset latin1 collate latin1_general_cs,
+b varchar(50) charset latin1 collate latin1_general_cs unique key,
+c varchar(50) charset latin1 collate latin1_general_cs primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin1 collate latin1_general_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin1 collate latin1_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin1 collate latin1_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset ujis collate ujis_japanese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset ujis collate ujis_japanese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset ujis collate ujis_japanese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset big5 collate big5_chinese_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset big5 collate big5_chinese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset big5 collate big5_chinese_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin2 collate latin2_croatian_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin2 collate latin2_croatian_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin2 collate latin2_croatian_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset ascii collate ascii_general_ci,
+b varchar(50) charset ascii collate ascii_general_ci unique key,
+c varchar(50) charset ascii collate ascii_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset latin7 collate latin7_estonian_cs,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset latin7 collate latin7_estonian_cs,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset latin7 collate latin7_estonian_cs,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(50) charset utf16 collate utf16_general_ci,
+b varchar(50) charset utf16 collate utf16_general_ci unique key,
+c varchar(50) charset utf16 collate utf16_general_ci primary key
+) engine=innodb;
+alter table tmp
+change a a varchar(50) charset utf16 collate utf16_german2_ci,
+algorithm=instant;
+alter table tmp
+modify b varchar(50) charset utf16 collate utf16_german2_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify c varchar(50) charset utf16 collate utf16_german2_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+drop table compatible_without_index;
+create table fully_incompatible (
+id int auto_increment unique key,
+from_charset char(255),
+from_collate char(255),
+to_charset char(255),
+to_collate char(255)
+);
+insert into fully_incompatible (from_charset, from_collate, to_charset, to_collate) values
+('utf8mb4', 'utf8mb4_general_ci', 'utf8mb3', 'utf8mb3_general_ci'),
+('utf8mb4', 'utf8mb4_general_ci', 'ascii', 'ascii_general_ci'),
+('utf8mb3', 'utf8mb3_general_ci', 'ascii', 'ascii_general_ci'),
+('utf8mb3', 'utf8mb3_general_ci', 'latin1', 'latin1_general_ci'),
+('utf16', 'utf16_general_ci', 'utf32', 'utf32_general_ci'),
+('latin1', 'latin1_general_ci', 'ascii', 'ascii_general_ci'),
+('ascii', 'ascii_general_ci', 'swe7', 'swe7_swedish_ci'),
+('eucjpms', 'eucjpms_japanese_nopad_ci', 'geostd8', 'geostd8_general_ci'),
+('latin1', 'latin1_general_ci', 'utf16', 'utf16_general_ci')
+;
+create table tmp (
+a varchar(150) charset utf8mb4 collate utf8mb4_general_ci,
+b text(150) charset utf8mb4 collate utf8mb4_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset utf8mb3 collate utf8mb3_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset utf8mb3 collate utf8mb3_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset utf8mb4 collate utf8mb4_general_ci,
+b text(150) charset utf8mb4 collate utf8mb4_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset ascii collate ascii_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset ascii collate ascii_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset utf8mb3 collate utf8mb3_general_ci,
+b text(150) charset utf8mb3 collate utf8mb3_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset ascii collate ascii_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset ascii collate ascii_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset utf8mb3 collate utf8mb3_general_ci,
+b text(150) charset utf8mb3 collate utf8mb3_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset latin1 collate latin1_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset latin1 collate latin1_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset utf16 collate utf16_general_ci,
+b text(150) charset utf16 collate utf16_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset utf32 collate utf32_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset utf32 collate utf32_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset latin1 collate latin1_general_ci,
+b text(150) charset latin1 collate latin1_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset ascii collate ascii_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset ascii collate ascii_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset ascii collate ascii_general_ci,
+b text(150) charset ascii collate ascii_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset swe7 collate swe7_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset swe7 collate swe7_swedish_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset eucjpms collate eucjpms_japanese_nopad_ci,
+b text(150) charset eucjpms collate eucjpms_japanese_nopad_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset geostd8 collate geostd8_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset geostd8 collate geostd8_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+create table tmp (
+a varchar(150) charset latin1 collate latin1_general_ci,
+b text(150) charset latin1 collate latin1_general_ci,
+unique key b_idx (b(150))
+) engine=innodb;
+alter table tmp
+change a a varchar(150) charset utf16 collate utf16_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+alter table tmp
+modify b text charset utf16 collate utf16_general_ci,
+algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+drop table tmp;
+drop table fully_incompatible;
diff --git a/mysql-test/suite/innodb/r/instant_alter_crash.result b/mysql-test/suite/innodb/r/instant_alter_crash.result
index d9e57b397f3..528bd9a905a 100644
--- a/mysql-test/suite/innodb/r/instant_alter_crash.result
+++ b/mysql-test/suite/innodb/r/instant_alter_crash.result
@@ -5,7 +5,7 @@ FLUSH TABLES;
CREATE TABLE t1(id INT PRIMARY KEY, c2 INT UNIQUE)
ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
CREATE TABLE t2 LIKE t1;
-INSERT INTO t1 VALUES(1,2);
+INSERT INTO t1 VALUES(0,2);
BEGIN;
INSERT INTO t2 VALUES(2,1);
ALTER TABLE t2 ADD COLUMN (c3 TEXT NOT NULL DEFAULT 'De finibus bonorum');
@@ -15,22 +15,53 @@ ALTER TABLE t1 ADD COLUMN (c3 TEXT NOT NULL DEFAULT ' et malorum');
connection default;
SET DEBUG_SYNC='now WAIT_FOR ddl';
SET GLOBAL innodb_flush_log_at_trx_commit=1;
-COMMIT;
+INSERT INTO t2 VALUES(3,4,'accusantium doloremque laudantium');
# Kill the server
disconnect ddl;
+# restart
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
SELECT * FROM t1;
id c2
-1 2
+0 2
SELECT * FROM t2;
id c2 c3
2 1 De finibus bonorum
+3 4 accusantium doloremque laudantium
BEGIN;
DELETE FROM t1;
ROLLBACK;
InnoDB 0 transactions not purged
+INSERT INTO t2 VALUES
+(16,1551,'Omnium enim rerum'),(128,1571,' principia parva sunt');
+connect ddl, localhost, root;
+SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
+ALTER TABLE t2 DROP COLUMN c3, ADD COLUMN c5 TEXT DEFAULT 'naturam abhorrere';
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR ddl';
+SET GLOBAL innodb_flush_log_at_trx_commit=1;
+UPDATE t1 SET c2=c2+1;
+# Kill the server
+disconnect ddl;
+# restart
+SET @saved_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
+SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+SELECT * FROM t1;
+id c2
+0 3
+SELECT * FROM t2;
+id c2 c3
+2 1 De finibus bonorum
+3 4 accusantium doloremque laudantium
+16 1551 Omnium enim rerum
+128 1571 principia parva sunt
+BEGIN;
+INSERT INTO t1 SET id=1;
+DELETE FROM t2;
+ROLLBACK;
+InnoDB 0 transactions not purged
INSERT INTO t2 VALUES (64,42,'De finibus bonorum'), (347,33101,' et malorum');
connect ddl, localhost, root;
+ALTER TABLE t2 DROP COLUMN c3;
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
ALTER TABLE t2 ADD COLUMN (c4 TEXT NOT NULL DEFAULT ' et malorum');
connection default;
@@ -39,15 +70,20 @@ SET GLOBAL innodb_flush_log_at_trx_commit=1;
DELETE FROM t1;
# Kill the server
disconnect ddl;
+# restart
SET @saved_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+FOUND 3 /\[Note\] InnoDB: Rolled back recovered transaction / in mysqld.1.err
SELECT * FROM t1;
id c2
SELECT * FROM t2;
-id c2 c3
-2 1 De finibus bonorum
-64 42 De finibus bonorum
-347 33101 et malorum
+id c2
+2 1
+3 4
+64 42
+16 1551
+128 1571
+347 33101
BEGIN;
INSERT INTO t1 SET id=1;
DELETE FROM t2;
@@ -59,27 +95,43 @@ N_RECS=0; LEVEL=0
header=0x010000030074 (id=0x696e66696d756d00)
header=0x010008030000 (id=0x73757072656d756d00)
t2 clustered index root page(type 18):
-N_RECS=4; LEVEL=0
-header=0x010000030088 (id=0x696e66696d756d00)
-header=0x1000100b00b9 (id=0x80000000,
+N_RECS=7; LEVEL=0
+header=0x01000003008f (id=0x0000000000000000)
+header=0x3000100c00d4 (id=0x80000000,
DB_TRX_ID=0x000000000000,
DB_ROLL_PTR=0x80000000000000,
+ BLOB=0x000000260000000000000008,
c2=NULL(4 bytes),
c3=0x44652066696e6962757320626f6e6f72756d)
-header=0x0000180900d8 (id=0x80000002,
+header=0x0000180900f4 (id=0x80000002,
DB_TRX_ID=0x000000000000,
DB_ROLL_PTR=0x80000000000000,
c2=0x80000001)
-header=0x0000200900f8 (id=0x80000040,
+header=0x0000200b0135 (id=0x80000003,
+ DB_TRX_ID=0x000000000000,
+ DB_ROLL_PTR=0x80000000000000,
+ c2=0x80000004,
+ c3=0x6163637573616e7469756d20646f6c6f72656d717565206c617564616e7469756d)
+header=0x0000280b0165 (id=0x80000010,
+ DB_TRX_ID=0x000000000000,
+ DB_ROLL_PTR=0x80000000000000,
+ c2=0x8000060f,
+ c3=0x4f6d6e69756d20656e696d20726572756d)
+header=0x000030090185 (id=0x80000040,
DB_TRX_ID=0x000000000000,
DB_ROLL_PTR=0x80000000000000,
c2=0x8000002a)
-header=0x0000280b0074 (id=0x8000015b,
+header=0x0000380b01ba (id=0x80000080,
+ DB_TRX_ID=0x000000000000,
+ DB_ROLL_PTR=0x80000000000000,
+ c2=0x80000623,
+ c3=0x207072696e63697069612070617276612073756e74)
+header=0x0000400b0074 (id=0x8000015b,
DB_TRX_ID=0x000000000000,
DB_ROLL_PTR=0x80000000000000,
c2=0x8000814d,
c3=0x206574206d616c6f72756d)
-header=0x050008030000 (id=0x73757072656d756d00)
+header=0x080008030000 (id=0x000000000000000100)
UNLOCK TABLES;
DELETE FROM t2;
InnoDB 0 transactions not purged
@@ -96,7 +148,6 @@ Table Create Table
t2 CREATE TABLE `t2` (
`id` int(11) NOT NULL,
`c2` int(11) DEFAULT NULL,
- `c3` text NOT NULL DEFAULT 'De finibus bonorum',
PRIMARY KEY (`id`),
UNIQUE KEY `c2` (`c2`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT
diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result
index ebb24d2a5e9..866aeb48f67 100644
--- a/mysql-test/suite/innodb/r/instant_alter_debug.result
+++ b/mysql-test/suite/innodb/r/instant_alter_debug.result
@@ -1,5 +1,8 @@
SET @save_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+SET @old_instant=
+(SELECT variable_value FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column');
CREATE TABLE t1 (
pk INT AUTO_INCREMENT PRIMARY KEY,
c1 INT,
@@ -197,7 +200,8 @@ DELETE FROM t1;
connection ddl;
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL copied WAIT_FOR logged';
ALTER TABLE t1 FORCE;
-disconnect stop_purge;
+connection stop_purge;
+COMMIT;
connection default;
SET DEBUG_SYNC = 'now WAIT_FOR copied';
InnoDB 1 transactions not purged
@@ -211,6 +215,29 @@ SELECT * FROM t1;
a b c
1 2 NULL
2 3 4
+ALTER TABLE t1 DROP b, ALGORITHM=INSTANT;
+connection stop_purge;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection default;
+DELETE FROM t1;
+connection ddl;
+SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL copied WAIT_FOR logged';
+ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2 AFTER a, FORCE;
+disconnect stop_purge;
+connection default;
+SET DEBUG_SYNC = 'now WAIT_FOR copied';
+InnoDB 1 transactions not purged
+INSERT INTO t1 SET a=1;
+INSERT INTO t1 SET a=2,c=4;
+SET DEBUG_SYNC = 'now SIGNAL logged';
+connection ddl;
+UPDATE t1 SET b = b + 1 WHERE a = 2;
+connection default;
+SET DEBUG_SYNC = RESET;
+SELECT * FROM t1;
+a b c
+1 2 NULL
+2 3 4
#
# MDEV-15872 Crash in online ALTER TABLE...ADD PRIMARY KEY
# after instant ADD COLUMN ... NULL
@@ -236,4 +263,41 @@ a b c d
1 2 NULL 1
2 3 4 1
DROP TABLE t1;
+#
+# MDEV-17899 Assertion failures on rollback of instant ADD/DROP
+# MDEV-18098 Crash after rollback of instant DROP COLUMN
+#
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug='+d,ib_commit_inplace_fail_1';
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,2);
+ALTER TABLE t1 DROP COLUMN b;
+ERROR HY000: Internal error: Injected error!
+ALTER TABLE t1 DROP COLUMN b;
+ERROR HY000: Internal error: Injected error!
+ALTER TABLE t1 ADD COLUMN c INT;
+ERROR HY000: Internal error: Injected error!
+SELECT * FROM t1;
+a b
+1 2
+DROP TABLE t1;
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+ALTER TABLE t1 ADD COLUMN c INT;
+ERROR HY000: Internal error: Injected error!
+BEGIN;
+INSERT INTO t1 VALUES(1, 1);
+ROLLBACK;
+ALTER TABLE t1 DROP COLUMN b;
+ERROR HY000: Internal error: Injected error!
+INSERT INTO t1 values (1,1);
+SELECT * FROM t1;
+a b
+1 1
+DROP TABLE t1;
+SET debug_dbug = @save_dbug;
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+instants
+21
SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency;
diff --git a/mysql-test/suite/innodb/r/instant_alter_extend,utf8.rdiff b/mysql-test/suite/innodb/r/instant_alter_extend,utf8.rdiff
new file mode 100644
index 00000000000..596dfe43ab8
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_extend,utf8.rdiff
@@ -0,0 +1,29 @@
+--- instant_alter_convert.result
++++ instant_alter_convert,utf8.result
+@@ -37,7 +37,7 @@
+ test.t check status OK
+ call check_table('t');
+ name mtype prtype len
+-a 2 800FE 200
++a 13 2100FE 600
+ # CHAR enlargement
+ alter table t modify a char(220), algorithm=instant;
+ select count(a) from t where a = @bigval;
+@@ -51,7 +51,7 @@
+ test.t check status OK
+ call check_table('t');
+ name mtype prtype len
+-a 2 800FE 220
++a 13 2100FE 660
+ # Convert from VARCHAR to a bigger CHAR
+ alter table t modify a varchar(200), algorithm=instant;
+ ERROR 0A000: ALGORITHM=INSTANT is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
+@@ -72,7 +72,7 @@
+ test.t check status OK
+ call check_table('t');
+ name mtype prtype len
+-a 2 800FE 255
++a 13 2100FE 765
+ # BINARY/VARBINARY test
+ create or replace table t (a varbinary(300));
+ alter table t modify a binary(255), algorithm=instant;
diff --git a/mysql-test/suite/innodb/r/instant_alter_extend.result b/mysql-test/suite/innodb/r/instant_alter_extend.result
new file mode 100644
index 00000000000..fb03ef9a182
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_extend.result
Binary files differ
diff --git a/mysql-test/suite/innodb/r/instant_alter_import.result b/mysql-test/suite/innodb/r/instant_alter_import.result
index ab14d44bdff..2d9a39f7886 100644
--- a/mysql-test/suite/innodb/r/instant_alter_import.result
+++ b/mysql-test/suite/innodb/r/instant_alter_import.result
@@ -42,5 +42,31 @@ select * from t1;
x z
1 42
2 41
+# Remove metadata for instant DROP COLUMN, then import
+alter table t1 drop x, add column x int first, algorithm instant;
+select * from t1;
+x z
+NULL 42
+NULL 41
+alter table t1 discard tablespace;
+alter table t1 import tablespace;
+select * from t1;
+x z
+1 42
+2 41
+# Import a data file that contains instant DROP COLUMN metadata
+alter table t2 drop x;
+alter table t1 drop x, force;
+alter table t1 discard tablespace;
+flush tables t2 for export;
+unlock tables;
+alter table t1 import tablespace;
+ERROR HY000: Schema mismatch (Index field count 4 doesn't match tablespace metadata file value 5)
+select * from t1;
+ERROR HY000: Tablespace has been discarded for table `t1`
+alter table t1 import tablespace;
+ERROR HY000: Internal error: Cannot reset LSNs in table `test`.`t1` : Unsupported
+select * from t1;
+ERROR HY000: Tablespace has been discarded for table `t1`
drop table t2;
drop table t1;
diff --git a/mysql-test/suite/innodb/r/instant_alter_index_rename.result b/mysql-test/suite/innodb/r/instant_alter_index_rename.result
new file mode 100644
index 00000000000..93bbf6ee193
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_index_rename.result
@@ -0,0 +1,178 @@
+create function get_index_id(tbl_id int, index_name char(100))
+returns int
+begin
+declare res int;
+select index_id into res from information_schema.innodb_sys_indexes where
+name=index_name and table_id = tbl_id;
+return res;
+end|
+create table t (
+pk int primary key,
+a int,
+b int,
+c int,
+unique index a_key (a),
+key c_key (c)
+) engine=innodb stats_persistent=1;
+insert into t values (1, 1, 1, 1);
+set @table_id = (select table_id from information_schema.innodb_sys_tables where name='test/t');
+set @a_key_id = get_index_id(@table_id, 'a_key');
+set @c_key_id = get_index_id(@table_id, 'c_key');
+set @primary_id = get_index_id(@table_id, 'primary');
+select distinct(index_name) from mysql.innodb_index_stats where table_name = 't';
+index_name
+PRIMARY
+a_key
+c_key
+alter table t
+drop index a_key,
+add unique index a_key_strikes_back (a);
+select distinct(index_name) from mysql.innodb_index_stats where table_name = 't';
+index_name
+PRIMARY
+a_key_strikes_back
+c_key
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+select @a_key_id = get_index_id(@table_id, 'a_key_strikes_back'),
+@c_key_id = get_index_id(@table_id, 'c_key'),
+@primary_id = get_index_id(@table_id, 'primary');
+@a_key_id = get_index_id(@table_id, 'a_key_strikes_back') @c_key_id = get_index_id(@table_id, 'c_key') @primary_id = get_index_id(@table_id, 'primary')
+1 1 1
+set @a_key_strikes_back_id = get_index_id(@table_id, 'a_key_strikes_back');
+set @c_key_id = get_index_id(@table_id, 'c_key');
+set @primary_id = get_index_id(@table_id, 'primary');
+alter table t
+drop index a_key_strikes_back,
+add unique index a_key_returns (a),
+drop primary key,
+add primary key (pk),
+add unique index b_key (b);
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+select @a_key_strikes_back_id = get_index_id(@table_id, 'a_key_returns'),
+@c_key_id = get_index_id(@table_id, 'c_key'),
+@primary_id = get_index_id(@table_id, 'primary');
+@a_key_strikes_back_id = get_index_id(@table_id, 'a_key_returns') @c_key_id = get_index_id(@table_id, 'c_key') @primary_id = get_index_id(@table_id, 'primary')
+1 1 1
+set @a_key_returns_id = get_index_id(@table_id, 'a_key_returns');
+set @b_key_id = get_index_id(@table_id, 'b_key');
+set @c_key_id = get_index_id(@table_id, 'c_key');
+set @primary_id = get_index_id(@table_id, 'primary');
+alter table t
+drop key c_key,
+add key c_key2 (c);
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+select @a_key_returns_id = get_index_id(@table_id, 'a_key_returns'),
+@b_key_id = get_index_id(@table_id, 'b_key'),
+@c_key_id = get_index_id(@table_id, 'c_key2'),
+@primary_id = get_index_id(@table_id, 'primary');
+@a_key_returns_id = get_index_id(@table_id, 'a_key_returns') @b_key_id = get_index_id(@table_id, 'b_key') @c_key_id = get_index_id(@table_id, 'c_key2') @primary_id = get_index_id(@table_id, 'primary')
+1 1 1 1
+drop table t;
+drop function get_index_id;
+create table errors (
+a int,
+unique key a_key (a),
+b int
+) engine=innodb;
+alter table errors
+drop key a_key,
+drop key a_key,
+add unique key a_key2 (a);
+ERROR 42000: Can't DROP INDEX `a_key`; check that it exists
+alter table errors
+drop key a_key,
+drop key a_key2,
+add unique key a_key2 (a);
+ERROR 42000: Can't DROP INDEX `a_key2`; check that it exists
+alter table errors
+add key b_key (b),
+drop key b_key,
+add key bb_key (b);
+ERROR 42000: Can't DROP INDEX `b_key`; check that it exists
+alter table errors
+drop key a_key,
+add key a_key2 (a),
+drop key a_key,
+add key a_key2 (a);
+ERROR 42000: Can't DROP INDEX `a_key`; check that it exists
+drop table errors;
+create table corrupted (
+a int,
+key a_key (a)
+) engine=innodb;
+insert into corrupted values (1);
+select * from corrupted;
+a
+1
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug = '+d,dict_set_index_corrupted';
+check table corrupted;
+Table Op Msg_type Msg_text
+test.corrupted check Warning InnoDB: Index a_key is marked as corrupted
+test.corrupted check error Corrupt
+SET debug_dbug = @save_dbug;
+select * from corrupted;
+ERROR HY000: Index corrupted is corrupted
+alter table corrupted
+drop key a_key,
+add key a_key2 (a);
+ERROR HY000: Index a_key is corrupted
+alter table corrupted
+drop key a_key;
+select * from corrupted;
+a
+1
+check table corrupted;
+Table Op Msg_type Msg_text
+test.corrupted check status OK
+drop table corrupted;
+create table t (
+a int,
+unique key a_key (a)
+) engine=innodb stats_persistent=1;
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug = '+d,ib_rename_index_fail1';
+alter table t
+drop key a_key,
+add unique key a_key2 (a),
+algorithm=instant;
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+SET debug_dbug = @save_dbug;
+alter table t
+drop key a_key,
+add unique key `GEN_CLUST_INDEX` (a),
+algorithm=instant;
+ERROR 42000: Incorrect index name 'GEN_CLUST_INDEX'
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `a` int(11) DEFAULT NULL,
+ UNIQUE KEY `a_key` (`a`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 STATS_PERSISTENT=1
+drop table t;
+create table rename_column_and_index (
+a int,
+unique index a_key(a)
+) engine=innodb;
+insert into rename_column_and_index values (1), (3);
+alter table rename_column_and_index
+change a aa int,
+drop key a_key,
+add unique key aa_key(aa),
+algorithm=instant;
+show create table rename_column_and_index;
+Table Create Table
+rename_column_and_index CREATE TABLE `rename_column_and_index` (
+ `aa` int(11) DEFAULT NULL,
+ UNIQUE KEY `aa_key` (`aa`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+check table rename_column_and_index;
+Table Op Msg_type Msg_text
+test.rename_column_and_index check status OK
+drop table rename_column_and_index;
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff
new file mode 100644
index 00000000000..5e46c66ce73
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,32k.rdiff
@@ -0,0 +1,9 @@
+--- instant_alter_limit.result
++++ instant_alter_limit.result
+@@ -42,5 +42,5 @@
+ FROM information_schema.global_status
+ WHERE variable_name = 'innodb_instant_alter_column';
+ instants
+-502
++506
+ DROP TABLE t;
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff
new file mode 100644
index 00000000000..795116ffae4
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,4k.rdiff
@@ -0,0 +1,9 @@
+--- instant_alter_limit.result
++++ instant_alter_limit.result
+@@ -42,5 +42,5 @@
+ FROM information_schema.global_status
+ WHERE variable_name = 'innodb_instant_alter_column';
+ instants
+-502
++474
+ DROP TABLE t;
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff
new file mode 100644
index 00000000000..5e46c66ce73
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,64k.rdiff
@@ -0,0 +1,9 @@
+--- instant_alter_limit.result
++++ instant_alter_limit.result
+@@ -42,5 +42,5 @@
+ FROM information_schema.global_status
+ WHERE variable_name = 'innodb_instant_alter_column';
+ instants
+-502
++506
+ DROP TABLE t;
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff b/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff
new file mode 100644
index 00000000000..37d2ae67c4e
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_limit,8k.rdiff
@@ -0,0 +1,9 @@
+--- instant_alter_limit.result
++++ instant_alter_limit.result
+@@ -42,5 +42,5 @@
+ FROM information_schema.global_status
+ WHERE variable_name = 'innodb_instant_alter_column';
+ instants
+-502
++492
+ DROP TABLE t;
diff --git a/mysql-test/suite/innodb/r/instant_alter_limit.result b/mysql-test/suite/innodb/r/instant_alter_limit.result
new file mode 100644
index 00000000000..e169c40d462
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_limit.result
@@ -0,0 +1,46 @@
+SET @old_instant=
+(SELECT variable_value FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column');
+CREATE TABLE t(a INT PRIMARY KEY, b INT, c INT, d INT, e INT)
+ENGINE=InnoDB;
+INSERT INTO t VALUES(1,2,3,4,5);
+SELECT * FROM t;
+b d a c e
+NULL NULL 1 NULL NULL
+ALTER TABLE t DROP b, DROP c, DROP d, DROP e,
+ADD COLUMN b INT, ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported for this operation. Try ALGORITHM=INPLACE
+ALTER TABLE t CHANGE COLUMN b beta INT AFTER a, ALGORITHM=INSTANT;
+ALTER TABLE t DROP e, DROP c, DROP d, ALGORITHM=INSTANT;
+SELECT * FROM t;
+a beta
+1 NULL
+ALTER TABLE t DROP COLUMN beta, ALGORITHM=INSTANT;
+ALTER TABLE t ADD COLUMN b INT NOT NULL, ALGORITHM=INSTANT;
+ERROR 0A000: ALGORITHM=INSTANT is not supported for this operation. Try ALGORITHM=INPLACE
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+instants
+256
+ALTER TABLE t ADD COLUMN b INT NOT NULL;
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+instants
+256
+SELECT * FROM t;
+a b
+1 0
+ALTER TABLE t ADD COLUMN (c CHAR(255) NOT NULL, d BIGINT NOT NULL),
+ALGORITHM=INSTANT;
+UPDATE t SET b=b+1,d=d+1,c='foo';
+SELECT * FROM t;
+a b c d
+1 1 foo 1
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+instants
+502
+DROP TABLE t;
diff --git a/mysql-test/suite/innodb/r/instant_alter_null.result b/mysql-test/suite/innodb/r/instant_alter_null.result
new file mode 100644
index 00000000000..f49d60fc301
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_null.result
@@ -0,0 +1,56 @@
+create table t (a int NOT NULL) engine=innodb row_format= compressed;
+alter table t modify a int NULL, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported for this operation. Try ALGORITHM=INPLACE
+drop table t;
+create table t (a int NOT NULL) engine=innodb row_format= dynamic;
+alter table t modify a int NULL, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported for this operation. Try ALGORITHM=INPLACE
+drop table t;
+create table t (a int NOT NULL) engine=innodb row_format= compact;
+alter table t modify a int NULL, algorithm=instant;
+ERROR 0A000: ALGORITHM=INSTANT is not supported for this operation. Try ALGORITHM=INPLACE
+drop table t;
+create table t (
+id int primary key,
+a int NOT NULL default 0,
+b int NOT NULL default 0,
+c int NOT NULL default 0,
+index idx (a,b,c)
+) engine=innodb row_format=redundant;
+insert into t (id, a) values (0, NULL);
+ERROR 23000: Column 'a' cannot be null
+insert into t (id, b) values (0, NULL);
+ERROR 23000: Column 'b' cannot be null
+insert into t (id, c) values (0, NULL);
+ERROR 23000: Column 'c' cannot be null
+insert into t values (1,1,1,1);
+set @id = (select table_id from information_schema.innodb_sys_tables
+where name = 'test/t');
+select * from information_schema.innodb_sys_columns where table_id=@id;
+TABLE_ID NAME POS MTYPE PRTYPE LEN
+TABLE_ID id 0 6 1283 4
+TABLE_ID a 1 6 1283 4
+TABLE_ID b 2 6 1283 4
+TABLE_ID c 3 6 1283 4
+alter table t modify a int NULL, algorithm=instant;
+insert into t values (2, NULL, 2, 2);
+alter table t modify b int NULL, algorithm=nocopy;
+insert into t values (3, NULL, NULL, 3);
+alter table t modify c int NULL, algorithm=inplace;
+insert into t values (4, NULL, NULL, NULL);
+select * from information_schema.innodb_sys_columns where table_id=@id;
+TABLE_ID NAME POS MTYPE PRTYPE LEN
+TABLE_ID id 0 6 1283 4
+TABLE_ID a 1 6 1027 4
+TABLE_ID b 2 6 1027 4
+TABLE_ID c 3 6 1027 4
+select * from t;
+id a b c
+4 NULL NULL NULL
+3 NULL NULL 3
+2 NULL 2 2
+1 1 1 1
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+drop table t;
diff --git a/mysql-test/suite/innodb/r/instant_alter_purge,release.rdiff b/mysql-test/suite/innodb/r/instant_alter_purge,release.rdiff
new file mode 100644
index 00000000000..53d2be18f9c
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_purge,release.rdiff
@@ -0,0 +1,18 @@
+--- instant_alter_purge.result
++++ instant_alter_purge,release.result
+@@ -32,15 +32,11 @@
+ START TRANSACTION WITH CONSISTENT SNAPSHOT;
+ connection default;
+ DELETE FROM t1;
+-SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL go WAIT_FOR do';
+ ALTER TABLE t1 ADD COLUMN f3 INT;
+ connection purge_control;
+-SET DEBUG_SYNC='now WAIT_FOR go';
+ COMMIT;
+ InnoDB 0 transactions not purged
+-SET DEBUG_SYNC='now SIGNAL do';
+ disconnect purge_control;
+ connection default;
+-SET DEBUG_SYNC=RESET;
+ DROP TABLE t1;
+ SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
diff --git a/mysql-test/suite/innodb/r/instant_alter_purge.result b/mysql-test/suite/innodb/r/instant_alter_purge.result
new file mode 100644
index 00000000000..a3643610f04
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_alter_purge.result
@@ -0,0 +1,46 @@
+SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
+SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+#
+# MDEV-17793 Crash in purge after instant DROP and emptying the table
+#
+connect prevent_purge,localhost,root;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection default;
+CREATE TABLE t1 (f1 INT, f2 INT) ENGINE=InnoDB;
+INSERT INTO t1 () VALUES ();
+ALTER TABLE t1 DROP f2, ADD COLUMN f2 INT;
+ALTER TABLE t1 DROP f1;
+DELETE FROM t1;
+connection prevent_purge;
+COMMIT;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection default;
+ALTER TABLE t1 ADD COLUMN extra TINYINT UNSIGNED NOT NULL DEFAULT 42;
+InnoDB 1 transactions not purged
+ALTER TABLE t1 DROP extra;
+disconnect prevent_purge;
+InnoDB 0 transactions not purged
+DROP TABLE t1;
+#
+# MDEV-17813 Crash in instant ALTER TABLE due to purge
+# concurrently emptying table
+#
+CREATE TABLE t1 (f2 INT) ENGINE=InnoDB;
+INSERT INTO t1 SET f2=1;
+ALTER TABLE t1 ADD COLUMN f1 INT;
+connect purge_control,localhost,root;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection default;
+DELETE FROM t1;
+SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL go WAIT_FOR do';
+ALTER TABLE t1 ADD COLUMN f3 INT;
+connection purge_control;
+SET DEBUG_SYNC='now WAIT_FOR go';
+COMMIT;
+InnoDB 0 transactions not purged
+SET DEBUG_SYNC='now SIGNAL do';
+disconnect purge_control;
+connection default;
+SET DEBUG_SYNC=RESET;
+DROP TABLE t1;
+SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
diff --git a/mysql-test/suite/innodb/r/instant_alter_rollback.result b/mysql-test/suite/innodb/r/instant_alter_rollback.result
index 2c2083adc46..b51e067c647 100644
--- a/mysql-test/suite/innodb/r/instant_alter_rollback.result
+++ b/mysql-test/suite/innodb/r/instant_alter_rollback.result
@@ -1,29 +1,52 @@
FLUSH TABLES;
#
# MDEV-11369: Instant ADD COLUMN for InnoDB
+# MDEV-15562: Instant DROP COLUMN or changing the order of columns
#
connect to_be_killed, localhost, root;
+SET @old_instant=
+(SELECT variable_value FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column');
CREATE TABLE empty (id INT PRIMARY KEY, c2 INT UNIQUE) ENGINE=InnoDB;
CREATE TABLE once LIKE empty;
CREATE TABLE twice LIKE empty;
+CREATE TABLE thrice LIKE empty;
INSERT INTO once SET id=1,c2=1;
INSERT INTO twice SET id=1,c2=1;
+INSERT INTO thrice SET id=1,c2=1;
ALTER TABLE empty ADD COLUMN (d1 INT DEFAULT 15);
ALTER TABLE once ADD COLUMN (d1 INT DEFAULT 20);
ALTER TABLE twice ADD COLUMN (d1 INT DEFAULT 20);
+ALTER TABLE thrice ADD COLUMN (d1 INT DEFAULT 20);
ALTER TABLE twice ADD COLUMN
(d2 INT NOT NULL DEFAULT 10,
d3 VARCHAR(15) NOT NULL DEFAULT 'var och en char');
+ALTER TABLE thrice ADD COLUMN
+(d2 INT NOT NULL DEFAULT 10,
+d3 TEXT NOT NULL DEFAULT 'con');
+ALTER TABLE thrice DROP c2, DROP d3, CHANGE d2 d3 INT NOT NULL FIRST;
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+instants
+7
BEGIN;
INSERT INTO empty set id=0,c2=42;
UPDATE once set c2=c2+1;
UPDATE twice set c2=c2+1;
+UPDATE thrice set d3=d3+1;
INSERT INTO twice SET id=2,c2=0,d3='';
+INSERT INTO thrice SET id=2,d3=0;
+DELETE FROM empty;
+DELETE FROM once;
+DELETE FROM twice;
+DELETE FROM thrice;
connection default;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
CREATE TABLE foo(a INT PRIMARY KEY) ENGINE=InnoDB;
# Kill the server
disconnect to_be_killed;
+# restart
SET @saved_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
DROP TABLE foo;
@@ -37,4 +60,7 @@ id c2 d1
SELECT * FROM twice;
id c2 d1 d2 d3
1 1 20 10 var och en char
-DROP TABLE empty, once, twice;
+SELECT * FROM thrice;
+d3 id d1
+10 1 20
+DROP TABLE empty, once, twice, thrice;
diff --git a/mysql-test/suite/innodb/r/instant_auto_inc.result b/mysql-test/suite/innodb/r/instant_auto_inc.result
new file mode 100644
index 00000000000..a893b91726b
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_auto_inc.result
@@ -0,0 +1,21 @@
+create table t(id int primary key, a int) engine=InnoDB;
+insert into t (id, a) values (1, 1);
+alter table t modify column id int auto_increment;
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+insert into t (a) values (2);
+alter table t modify column id int, algorithm=instant;
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+insert into t (id, a) values (3, 3);
+select * from t;
+id a
+1 1
+2 2
+3 3
+check table t;
+Table Op Msg_type Msg_text
+test.t check status OK
+drop table t;
diff --git a/mysql-test/suite/innodb/r/instant_drop.result b/mysql-test/suite/innodb/r/instant_drop.result
new file mode 100644
index 00000000000..2b82eb3c805
--- /dev/null
+++ b/mysql-test/suite/innodb/r/instant_drop.result
@@ -0,0 +1,205 @@
+create table t1(f1 int not null, f2 int not null, f3 int not null)engine=innodb;
+insert into t1 values(1, 2, 3),(4, 5, 6);
+alter table t1 drop column f2, algorithm=instant;
+select * from t1;
+f1 f3
+1 3
+4 6
+insert into t1 values(1,2);
+select * from t1;
+f1 f3
+1 3
+4 6
+1 2
+alter table t1 add column f4 int not null default 5, algorithm=instant;
+select * from t1;
+f1 f3 f4
+1 3 5
+4 6 5
+1 2 5
+alter table t1 drop column f1, algorithm=instant;
+select * from t1;
+f3 f4
+3 5
+6 5
+2 5
+insert into t1 values(7, 9);
+select * from t1;
+f3 f4
+3 5
+6 5
+2 5
+7 9
+alter table t1 add column f5 blob default repeat('aaa', 950), drop column f4, algorithm=instant;
+select * from t1;
+f3 f5
+3 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+6 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+7 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+select f3 from t1;
+f3
+3
+6
+2
+7
+update t1 set f3 = 10 where f3 > 2;
+select * from t1;
+f3 f5
+10 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+10 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+2 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+10 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+delete from t1 where f3 = 10;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f3` int(11) NOT NULL,
+ `f5` blob DEFAULT repeat('aaa',950)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+select f3 from t1;
+f3
+2
+update t1 set f5 = 'world';
+select * from t1;
+f3 f5
+2 world
+drop table t1;
+create table t1(f1 int, f2 int not null, index idx(f2))engine=innodb;
+insert into t1 values(1, 2);
+alter table t1 drop column f1, add column f3 varchar(100) default 'thiru', algorithm=instant;
+select * from t1 force index (idx);
+f2 f3
+2 thiru
+alter table t1 drop column f3, algorithm=instant;
+select * from t1;
+f2
+2
+begin;
+insert into t1 values(10);
+select * from t1;
+f2
+2
+10
+update t1 set f2 = 100;
+select * from t1;
+f2
+100
+100
+delete from t1 where f2 = 100;
+select * from t1;
+f2
+rollback;
+select * from t1;
+f2
+2
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `f2` int(11) NOT NULL,
+ KEY `idx` (`f2`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+drop table t1;
+create table t1(f1 int, f2 int not null)engine=innodb;
+insert into t1 values(1, 2);
+alter table t1 drop column f2, algorithm=instant;
+insert into t1 values(NULL);
+select * from t1;
+f1
+1
+NULL
+drop table t1;
+create table t1(f1 int not null, f2 int not null)engine=innodb;
+insert into t1 values(1, 2);
+alter table t1 add column f5 int default 10, algorithm=instant;
+alter table t1 add column f3 int not null default 100, algorithm=instant;
+alter table t1 add column f4 int default 100, drop column f3, algorithm=instant;
+insert into t1 values(2, 3, 20, 100);
+select * from t1;
+f1 f2 f5 f4
+1 2 10 100
+2 3 20 100
+drop table t1;
+create table t1(f1 int not null, f2 int not null) engine=innodb;
+insert into t1 values(1, 1);
+alter table t1 drop column f2, add column f3 int default 3, algorithm=instant;
+select * from t1;
+f1 f3
+1 3
+update t1 set f3 = 19;
+select * from t1;
+f1 f3
+1 19
+alter table t1 drop column f1, add column f5 tinyint default 10 first,
+algorithm=instant;
+insert into t1 values(4, 10);
+select * from t1;
+f5 f3
+10 19
+4 10
+create table t2(f1 int, f2 int not null) engine=innodb;
+insert into t2(f1, f2) values(1, 2);
+alter table t2 drop column f2, add column f4 varchar(100) default repeat('a', 20), add column f5 int default 10, algorithm=instant;
+select * from t2;
+f1 f4 f5
+1 aaaaaaaaaaaaaaaaaaaa 10
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `f1` int(11) DEFAULT NULL,
+ `f4` varchar(100) DEFAULT repeat('a',20),
+ `f5` int(11) DEFAULT 10
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+alter table t2 add column f6 char(100) default repeat('a', 99), algorithm=instant;
+create table t3(f1 int, f2 int not null)engine=innodb;
+insert into t3 values(1, 2);
+alter table t3 drop column f2, add column f3 int default 1, add column f4 int default 4, algorithm=instant;
+create table t4(a varchar(1), b int, c int, primary key(a,b))engine=innodb;
+insert into t4 values('4',5,6);
+alter table t4 drop column c;
+# restart
+select * from t1;
+f5 f3
+10 19
+4 10
+alter table t1 add column f6 int default 9,drop column f5, algorithm = instant;
+insert into t1 values(4, 9);
+alter table t1 force, algorithm=inplace;
+select * from t1;
+f3 f6
+19 9
+10 9
+4 9
+select * from t2;
+f1 f4 f5 f6
+1 aaaaaaaaaaaaaaaaaaaa 10 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+alter table t2 force, algorithm=inplace;
+select * from t2;
+f1 f4 f5 f6
+1 aaaaaaaaaaaaaaaaaaaa 10 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `f1` int(11) DEFAULT NULL,
+ `f4` varchar(100) DEFAULT repeat('a',20),
+ `f5` int(11) DEFAULT 10,
+ `f6` char(100) DEFAULT repeat('a',99)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+select * from t3;
+f1 f3 f4
+1 1 4
+alter table t3 add column f5 char(100) default repeat('a', 99), algorithm=instant;
+select * from t4;
+a b
+4 5
+alter table t4 add column d varchar(5) default 'fubar';
+insert into t4 values('',0,'snafu');
+# restart
+select * from t3;
+f1 f3 f4 f5
+1 1 4 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
+select * from t4;
+a b d
+ 0 snafu
+4 5 fubar
+drop table t1,t2,t3,t4;
diff --git a/mysql-test/suite/innodb/r/log_alter_table.result b/mysql-test/suite/innodb/r/log_alter_table.result
index 9cb9ed77e33..9de89bebaa6 100644
--- a/mysql-test/suite/innodb/r/log_alter_table.result
+++ b/mysql-test/suite/innodb/r/log_alter_table.result
@@ -1,3 +1,4 @@
+# restart
#
# Bug#21801423 INNODB REDO LOG DOES NOT INDICATE WHEN
# FILES ARE CREATED
@@ -10,9 +11,11 @@ INSERT INTO t1 VALUES (1,2);
ALTER TABLE t1 ADD PRIMARY KEY(a), LOCK=SHARED, ALGORITHM=INPLACE;
ALTER TABLE t1 DROP INDEX b, ADD INDEX (b), LOCK=SHARED;
# Kill the server
+# restart: --debug=d,ib_log
FOUND 2 /scan \d+: multi-log rec MLOG_FILE_CREATE2 len \d+ page \d+:0/ in mysqld.1.err
FOUND 3 /scan \d+: log rec MLOG_INDEX_LOAD/ in mysqld.1.err
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
+# restart
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/log_corruption.result b/mysql-test/suite/innodb/r/log_corruption.result
index b174546c111..4ec34908035 100644
--- a/mysql-test/suite/innodb/r/log_corruption.result
+++ b/mysql-test/suite/innodb/r/log_corruption.result
@@ -1,10 +1,12 @@
# redo log from before MariaDB 10.2.2/MySQL 5.7.9
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2\./ in mysqld.1.err
# redo log from before MariaDB 10.2.2, with corrupted log checkpoint
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -12,12 +14,14 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and we did not find a valid checkpoint/ in mysqld.1.err
FOUND 2 /Plugin 'InnoDB' registration as a STORAGE ENGINE failed/ in mysqld.1.err
# redo log from before MariaDB 10.2.2, with corrupted log block
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Upgrade after a crash is not supported. This redo log was created before MariaDB 10\.2\.2, and it appears corrupted/ in mysqld.1.err
# empty redo log from before MariaDB 10.2.2
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5 --innodb-log-file-size=1m
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -25,24 +29,28 @@ COUNT(*)
1
FOUND 1 /InnoDB: Upgrading redo log:/ in mysqld.1.err
# redo log from "after" MariaDB 10.2.2, but with invalid header checksum
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Invalid redo log header checksum/ in mysqld.1.err
# distant future redo log format, with valid header checksum
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Unsupported redo log format. The redo log was created with malicious intentions, or perhaps\./ in mysqld.1.err
# valid header, but old-format checkpoint blocks
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: No valid checkpoint found .corrupted redo log/ in mysqld.1.err
# valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block checksum
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -50,6 +58,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err
FOUND 1 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err
# same, but with current-version header
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -57,6 +66,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 2 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 3362026715 found: 144444122/ in mysqld.1.err
FOUND 2 /InnoDB: Missing MLOG_CHECKPOINT between the checkpoint 1213964 and the end 1213952\./ in mysqld.1.err
# --innodb-force-recovery=6 (skip the entire redo log)
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=6
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -64,11 +74,13 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES
FOUND 1 /\[Note\] InnoDB: .* started; log sequence number 0/ in mysqld.1.err
# valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2, invalid block number
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
# --innodb-force-recovery=6 (skip the entire redo log)
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=6
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -76,6 +88,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
InnoDB YES Supports transactions, row-level locking, foreign keys and encryption for tables YES YES YES
# Test a corrupted MLOG_FILE_NAME record.
# valid header, valid checkpoint 1, all-zero (invalid) checkpoint 2
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -88,12 +101,14 @@ FOUND 1 /len 22. hex 38000000000012860cb7809781e80006626f67757300. asc 8
FOUND 1 /InnoDB: Set innodb_force_recovery to ignore this error/ in mysqld.1.err
# Test a corrupted MLOG_FILE_NAME record.
# valid header, invalid checkpoint 1, valid checkpoint 2, invalid block
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Invalid log block checksum. block: 2372 checkpoint no: 1 expected: 2454333373 found: 150151/ in mysqld.1.err
# valid header, invalid checkpoint 1, valid checkpoint 2, invalid log record
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -101,18 +116,21 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: MLOG_FILE_NAME incorrect:bigot/ in mysqld.1.err
FOUND 1 /len 22; hex 38000000000012860cb7809781e800066269676f7400; asc 8 bigot ;/ in mysqld.1.err
# 10.2 missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT for tablespace 42/ in mysqld.1.err
# 10.3 missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 2 /InnoDB: Missing MLOG_FILE_NAME or MLOG_FILE_DELETE before MLOG_CHECKPOINT for tablespace 42/ in mysqld.1.err
# Empty 10.3 redo log
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5 --innodb-log-file-size=1m
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -120,6 +138,7 @@ COUNT(*)
1
FOUND 1 /InnoDB: .* started; log sequence number 121397[09]/ in mysqld.1.err
# Empty 10.2 redo log
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5 --innodb-log-file-size=1m
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -127,6 +146,7 @@ COUNT(*)
1
FOUND 3 /InnoDB: Upgrading redo log:/ in mysqld.1.err
# Minimal MariaDB 10.1.21 encrypted redo log
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_corruption --innodb-force-recovery=5
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -137,3 +157,4 @@ ib_buffer_pool
ib_logfile0
ib_logfile1
ibdata1
+# restart
diff --git a/mysql-test/suite/innodb/r/log_file.result b/mysql-test/suite/innodb/r/log_file.result
index b93bfc0d02b..a4599ef303f 100644
--- a/mysql-test/suite/innodb/r/log_file.result
+++ b/mysql-test/suite/innodb/r/log_file.result
@@ -2,6 +2,7 @@
# Bug#16691130 - ASSERT WHEN INNODB_LOG_GROUP_HOME_DIR DOES NOT EXIST
# Bug#16418661 - CHANGING NAME IN FOR INNODB_DATA_FILE_PATH SHOULD NOT SUCCEED WITH LOG FILES
# Start mysqld without the possibility to create innodb_undo_tablespaces
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -9,6 +10,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /\[ERROR\] InnoDB: Could not create undo tablespace '.*undo002'/ in mysqld.1.err
# Remove undo001,undo002,ibdata1,ibdata2,ib_logfile1,ib_logfile2,ib_logfile101
# Start mysqld with non existent innodb_log_group_home_dir
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend --innodb_log_group_home_dir=/path/to/non-existent/
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -16,6 +18,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /File .path.to.non-existent.*ib_logfile101: 'create' returned OS error \d+/ in mysqld.1.err
# Remove ibdata1 & ibdata2
# Successfully let InnoDB create tablespaces
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES
WHERE engine='innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -23,6 +26,7 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED');
1
# Backup tmp/logfile/*
# 1. With ibdata2, Without ibdata1
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -45,6 +49,7 @@ undo001
undo002
undo003
# 2. With ibdata1, without ibdata2
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -84,6 +89,7 @@ ib_logfile2
undo001
undo002
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -117,6 +123,7 @@ ib_buffer_pool
undo001
undo002
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -145,6 +152,7 @@ bak_undo003
ib_buffer_pool
undo001
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -171,6 +179,7 @@ bak_undo002
bak_undo003
ib_buffer_pool
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -203,6 +212,7 @@ ibdata1
ibdata2
undo001
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -240,6 +250,7 @@ ib_logfile2
ibdata1
ibdata2
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -272,6 +283,7 @@ bak_undo003
ib_buffer_pool
ib_logfile0
ib_logfile2
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -296,6 +308,7 @@ undo001
undo002
undo003
# 10. With ibdata*, without ib_logfile0
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -335,12 +348,14 @@ ibdata2
undo001
undo002
undo003
+# restart: --innodb-log-files-in-group=3 --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_file --innodb-undo-logs=20 --innodb-undo-tablespaces=3 --innodb-data-file-path=ibdata1:16M;ibdata2:10M:autoextend
SELECT COUNT(*) `1` FROM INFORMATION_SCHEMA.ENGINES
WHERE engine='innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
1
1
FOUND 1 /Resizing redo log from 1\*\d+ to 3\*\d+ bytes; LSN=\d+/ in mysqld.1.err
+# restart
# Cleanup
bak_ib_logfile0
bak_ib_logfile1
diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result
index 78b97f7f0bb..1bf7f16413a 100644
--- a/mysql-test/suite/innodb/r/log_file_name.result
+++ b/mysql-test/suite/innodb/r/log_file_name.result
@@ -11,12 +11,14 @@ COMMIT;
# Kill the server
# Fault 0 (no real fault): Orphan file with duplicate space_id.
# Fault 1: Two dirty files with the same space_id.
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Another data file called .*t1.ibd exists with the same space ID/ in mysqld.1.err
# Fault 2: Wrong space_id in a dirty file, and a missing file.
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -24,6 +26,7 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Ignoring data file '.*t1.ibd' with space ID/ in mysqld.1.err
FOUND 1 /InnoDB: Tablespace \d+ was not found at.*t3.ibd/ in mysqld.1.err
# Fault 3: Wrong space_id in a dirty file, and no missing file.
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -33,6 +36,7 @@ FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t1.ibd/ in mysqld.1.err
FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t3.ibd/ in mysqld.1.err
FOUND 2 /InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace/ in mysqld.1.err
# Fault 4: Missing data file
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -40,16 +44,19 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Tablespace \d+ was not found at .*t[12].ibd.
.*InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace/ in mysqld.1.err
# Fault 5: Wrong type of data file
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+# restart
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
NOT FOUND /\[Note\] InnoDB: Cannot read first page of .*t2.ibd/ in mysqld.1.err
FOUND 1 /\[ERROR\] InnoDB: Datafile .*t2.*\. Cannot determine the space ID from the first 64 pages/ in mysqld.1.err
+# restart
SELECT * FROM t2;
a
9
@@ -78,6 +85,7 @@ RENAME TABLE u5 TO u6;
INSERT INTO u6 VALUES(2);
# Kill the server
# Fault 6: All-zero data file and innodb_force_recovery
+# restart: --innodb-force-recovery=1
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -86,6 +94,7 @@ FOUND 1 /\[Note\] InnoDB: Header page consists of zero bytes in datafile: .*u1.i
FOUND 1 /\[ERROR\] InnoDB: Datafile .*u1.*\. Cannot determine the space ID from the first 64 pages/ in mysqld.1.err
NOT FOUND /\[Note\] InnoDB: Cannot read first page of .*u2.ibd/ in mysqld.1.err
# Fault 7: Missing or wrong data file and innodb_force_recovery
+# restart: --innodb-force-recovery=1
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
@@ -93,9 +102,11 @@ ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /\[Note\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err
FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ in mysqld.1.err
FOUND 1 /\[ERROR\] InnoDB: Cannot rename '.*u5.ibd' to '.*u6.ibd' because the target file exists/ in mysqld.1.err
+# restart: --innodb-force-recovery=1
FOUND 1 /\[Note\] InnoDB: Header page consists of zero bytes in datafile: .*u1.ibd/ in mysqld.1.err
FOUND 1 /InnoDB: At LSN: \d+: unable to open file .*u[1-5].ibd for tablespace/ in mysqld.1.err
FOUND 1 /\[Warning\] InnoDB: Tablespace \d+ was not found at .*u[1-5].ibd, and innodb_force_recovery was set. All redo log for this tablespace will be ignored!/ in mysqld.1.err
+# restart
DROP TABLE u1,u2,u3,u6;
# List of files:
db.opt
diff --git a/mysql-test/suite/innodb/r/log_file_name_debug.result b/mysql-test/suite/innodb/r/log_file_name_debug.result
index ae7ce48fe5e..e5c1a3e198a 100644
--- a/mysql-test/suite/innodb/r/log_file_name_debug.result
+++ b/mysql-test/suite/innodb/r/log_file_name_debug.result
@@ -5,10 +5,14 @@
SET GLOBAL DEBUG_DBUG='+d,fil_names_write_bogus';
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
# Kill the server
+# restart: --debug=d,innodb_log_abort_1 --innodb-log-files-in-group=1 --innodb-log-file-size=4M
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Tablespace 4294967280 was not found at .*, but there were no modifications either/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_3,ib_log --innodb-log-files-in-group=1 --innodb-log-file-size=4M
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /srv_prepare_to_delete_redo_log_files: ib_log: MLOG_CHECKPOINT.* written/ in mysqld.1.err
+# restart
+# restart
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/log_file_size.result b/mysql-test/suite/innodb/r/log_file_size.result
index 89d5c6ac4d6..f3e8e9bc30a 100644
--- a/mysql-test/suite/innodb/r/log_file_size.result
+++ b/mysql-test/suite/innodb/r/log_file_size.result
@@ -1,19 +1,24 @@
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB;
+# restart: --innodb-thread-concurrency=1 --innodb-log-file-size=1m --innodb-log-files-in-group=2
SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Log file .*ib_logfile1 is of different size .* bytes than other log files 0 bytes!/ in mysqld.1.err
+# restart: --innodb-thread-concurrency=1 --innodb-log-file-size=1m --innodb-log-files-in-group=2
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
+# restart: --innodb-thread-concurrency=100 --innodb-log-file-size=10M --innodb-log-files-in-group=2
BEGIN;
INSERT INTO t1 VALUES (42);
+# restart: --innodb-log-file-size=6M
SELECT * FROM t1;
a
INSERT INTO t1 VALUES (42);
BEGIN;
DELETE FROM t1;
+# restart: --innodb-log-files-in-group=3 --innodb-log-file-size=5M
SELECT * FROM t1;
a
42
@@ -28,50 +33,66 @@ DELETE FROM t1 WHERE a=0;
disconnect con1;
connection default;
# Kill the server
+# restart: --innodb-log-group-home-dir=foo\;bar
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /syntax error in innodb_log_group_home_dir/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_1
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Starting crash recovery from checkpoint LSN=.*/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_3
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
+# restart: --innodb-read-only
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: innodb_read_only prevents crash recovery/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_4
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 2 /redo log from 3\*[0-9]+ to 2\*[0-9]+ bytes/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_5
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 3 /redo log from 3\*[0-9]+ to 2\*[0-9]+ bytes/ in mysqld.1.err
+# restart: --innodb-read-only
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 2 /InnoDB: innodb_read_only prevents crash recovery/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_6
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 4 /redo log from 3\*[0-9]+ to 2\*[0-9]+ bytes/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_7
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
+# restart: --innodb-read-only
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Cannot create log files in read-only mode/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_8
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_9
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_9
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Log file .*ib_logfile0 size 7 is not a multiple of 512 bytes/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_9
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Log file .*ib_logfile1 is of different size 1048576 bytes than other log files/ in mysqld.1.err
+# restart: --debug=d,innodb_log_abort_10
SELECT * FROM t1;
ERROR 42000: Unknown storage engine 'InnoDB'
FOUND 1 /InnoDB: Setting log file .*ib_logfile[0-9]+ size to/ in mysqld.1.err
FOUND 1 /InnoDB: Renaming log file .*ib_logfile101 to .*ib_logfile0/ in mysqld.1.err
+# restart
SELECT * FROM t1;
a
42
diff --git a/mysql-test/suite/innodb/r/monitor.result b/mysql-test/suite/innodb/r/monitor.result
index a40bfdac0d0..4aea9fb5b0b 100644
--- a/mysql-test/suite/innodb/r/monitor.result
+++ b/mysql-test/suite/innodb/r/monitor.result
@@ -39,7 +39,6 @@ buffer_pages_written disabled
buffer_index_pages_written disabled
buffer_non_index_pages_written disabled
buffer_pages_read disabled
-buffer_pages0_read disabled
buffer_index_sec_rec_cluster_reads disabled
buffer_index_sec_rec_cluster_reads_avoided disabled
buffer_data_reads disabled
@@ -148,7 +147,6 @@ trx_nl_ro_commits disabled
trx_commits_insert_update disabled
trx_rollbacks disabled
trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
trx_active_transactions disabled
trx_rseg_history_len disabled
trx_undo_slots_used disabled
@@ -463,7 +461,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "dml%";
name max_count min_count count max_count_reset min_count_reset count_reset status
-dml_reads 4 NULL 4 4 NULL 4 enabled
+dml_reads 2 NULL 2 2 NULL 2 enabled
dml_inserts 1 NULL 1 1 NULL 1 enabled
dml_deletes 0 NULL 0 0 NULL 0 enabled
dml_updates 2 NULL 2 2 NULL 2 enabled
@@ -477,7 +475,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "dml%";
name max_count min_count count max_count_reset min_count_reset count_reset status
-dml_reads 6 NULL 6 6 NULL 6 enabled
+dml_reads 4 NULL 4 4 NULL 4 enabled
dml_inserts 1 NULL 1 1 NULL 1 enabled
dml_deletes 2 NULL 2 2 NULL 2 enabled
dml_updates 2 NULL 2 2 NULL 2 enabled
@@ -491,7 +489,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "dml%";
name max_count min_count count max_count_reset min_count_reset count_reset status
-dml_reads 6 NULL 6 0 NULL 0 enabled
+dml_reads 4 NULL 4 0 NULL 0 enabled
dml_inserts 1 NULL 1 0 NULL 0 enabled
dml_deletes 2 NULL 2 0 NULL 0 enabled
dml_updates 2 NULL 2 0 NULL 0 enabled
@@ -507,7 +505,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "dml%";
name max_count min_count count max_count_reset min_count_reset count_reset status
-dml_reads 8 NULL 8 2 NULL 2 enabled
+dml_reads 6 NULL 6 2 NULL 2 enabled
dml_inserts 3 NULL 3 2 NULL 2 enabled
dml_deletes 4 NULL 4 2 NULL 2 enabled
dml_updates 2 NULL 2 0 NULL 0 enabled
@@ -521,7 +519,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "dml%";
name max_count min_count count max_count_reset min_count_reset count_reset status
-dml_reads 8 NULL 8 2 NULL 2 enabled
+dml_reads 6 NULL 6 2 NULL 2 enabled
dml_inserts 3 NULL 3 2 NULL 2 enabled
dml_deletes 4 NULL 4 2 NULL 2 enabled
dml_updates 2 NULL 2 0 NULL 0 enabled
@@ -535,7 +533,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "dml%";
name max_count min_count count max_count_reset min_count_reset count_reset status
-dml_reads 8 NULL 8 2 NULL 2 disabled
+dml_reads 6 NULL 6 2 NULL 2 disabled
dml_inserts 3 NULL 3 2 NULL 2 disabled
dml_deletes 4 NULL 4 2 NULL 2 disabled
dml_updates 2 NULL 2 0 NULL 0 disabled
diff --git a/mysql-test/suite/innodb/r/purge_secondary.result b/mysql-test/suite/innodb/r/purge_secondary.result
index 8f20f5baacb..1b5f2896887 100644
--- a/mysql-test/suite/innodb/r/purge_secondary.result
+++ b/mysql-test/suite/innodb/r/purge_secondary.result
@@ -134,6 +134,7 @@ test.t1 check status OK
InnoDB 0 transactions not purged
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT OTHER_INDEX_SIZE FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
WHERE NAME='test/t1';
diff --git a/mysql-test/suite/innodb/r/purge_thread_shutdown.result b/mysql-test/suite/innodb/r/purge_thread_shutdown.result
index 38cefbaf3e8..a87cba89917 100644
--- a/mysql-test/suite/innodb/r/purge_thread_shutdown.result
+++ b/mysql-test/suite/innodb/r/purge_thread_shutdown.result
@@ -33,4 +33,5 @@ set global innodb_fast_shutdown=0;
ERROR 42000: Variable 'innodb_fast_shutdown' can't be set to the value of '0'
kill ID;
ERROR 70100: Connection was killed
+# restart
drop table t1;
diff --git a/mysql-test/suite/innodb/r/read_only_recover_committed.result b/mysql-test/suite/innodb/r/read_only_recover_committed.result
index d2457154d15..e4cfebd1ec0 100644
--- a/mysql-test/suite/innodb/r/read_only_recover_committed.result
+++ b/mysql-test/suite/innodb/r/read_only_recover_committed.result
@@ -20,6 +20,7 @@ SET GLOBAL innodb_flush_log_at_trx_commit=1;
BEGIN;
INSERT INTO t VALUES(-10000);
ROLLBACK;
+# restart: --innodb-force-recovery=3
disconnect con1;
disconnect con2;
SELECT * FROM t;
@@ -31,6 +32,7 @@ a
1
20
UPDATE t SET a=3 WHERE a=1;
+# restart: --innodb-read-only
# Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
# In earlier versions, this would return the last committed version
# (only a=3; no record for a=20)!
@@ -43,6 +45,7 @@ SELECT * FROM t;
a
3
20
+# restart: --innodb-force-recovery=5
#
# MDEV-15418 innodb_force_recovery=5 displays bogus warnings
# about too new transaction identifier
@@ -62,6 +65,7 @@ SELECT * FROM t;
a
3
20
+# restart
SELECT * FROM t;
a
3
diff --git a/mysql-test/suite/innodb/r/read_only_recovery.result b/mysql-test/suite/innodb/r/read_only_recovery.result
index e639e8abacc..e83bf66432e 100644
--- a/mysql-test/suite/innodb/r/read_only_recovery.result
+++ b/mysql-test/suite/innodb/r/read_only_recovery.result
@@ -14,6 +14,7 @@ SET GLOBAL innodb_flush_log_at_trx_commit=1;
BEGIN;
INSERT INTO t VALUES(0);
ROLLBACK;
+# restart: --innodb-force-recovery=3
disconnect con1;
SELECT * FROM t;
a
@@ -23,6 +24,7 @@ SELECT * FROM t;
a
1
UPDATE t SET a=3 WHERE a=1;
+# restart: --innodb-read-only
# Starting with MariaDB 10.2, innodb_read_only implies READ UNCOMMITTED.
# In earlier versions, this would return the last committed version
# (empty table)!
@@ -33,6 +35,7 @@ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT * FROM t;
a
3
+# restart
SELECT * FROM t;
a
3
diff --git a/mysql-test/suite/innodb/r/readahead.result b/mysql-test/suite/innodb/r/readahead.result
index f7d6e6fae31..962434c794d 100644
--- a/mysql-test/suite/innodb/r/readahead.result
+++ b/mysql-test/suite/innodb/r/readahead.result
@@ -1,5 +1,6 @@
# Bug#25330449 ASSERT SIZE==SPACE->SIZE DURING BUF_READ_AHEAD_RANDOM
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=INNODB ROW_FORMAT=COMPRESSED;
+# restart
SET @saved = @@GLOBAL.innodb_random_read_ahead;
SET GLOBAL innodb_random_read_ahead = 1;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/recovery_shutdown.result b/mysql-test/suite/innodb/r/recovery_shutdown.result
index d26163a0c4a..b21beecc24b 100644
--- a/mysql-test/suite/innodb/r/recovery_shutdown.result
+++ b/mysql-test/suite/innodb/r/recovery_shutdown.result
@@ -63,4 +63,7 @@ connection default;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
CREATE TABLE u(a SERIAL) ENGINE=INNODB;
FLUSH TABLES;
+# restart
+# restart
+# restart
DROP TABLE t,u;
diff --git a/mysql-test/suite/innodb/r/rename_table.result b/mysql-test/suite/innodb/r/rename_table.result
index 9c45117cf10..23bb8c52d4a 100644
--- a/mysql-test/suite/innodb/r/rename_table.result
+++ b/mysql-test/suite/innodb/r/rename_table.result
@@ -7,6 +7,7 @@ FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES WHERE PATH LIKE '%test%';
path
./test_jfg2/test.ibd
DROP DATABASE test_jfg;
+# restart
DROP DATABASE test_jfg2;
CREATE DATABASE abc_def;
CREATE DATABASE abc_def2;
@@ -17,6 +18,7 @@ FROM INFORMATION_SCHEMA.INNODB_SYS_DATAFILES WHERE PATH LIKE '%test%';
path
./abc_def2/test1.ibd
DROP DATABASE abc_def;
+# restart
DROP DATABASE abc_def2;
call mtr.add_suppression("InnoDB: (Operating system error|The error means|Cannot rename file)");
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/r/rename_table_debug.result b/mysql-test/suite/innodb/r/rename_table_debug.result
index 646bd4faf5d..d13fc84aa43 100644
--- a/mysql-test/suite/innodb/r/rename_table_debug.result
+++ b/mysql-test/suite/innodb/r/rename_table_debug.result
@@ -6,6 +6,7 @@ SET DEBUG_SYNC='before_rename_table_commit SIGNAL renamed WAIT_FOR ever';
RENAME TABLE t1 TO t2;
connection default;
SET DEBUG_SYNC='now WAIT_FOR renamed';
+# restart
disconnect con1;
SELECT * FROM t1;
a b c d
diff --git a/mysql-test/suite/innodb/r/restart.result b/mysql-test/suite/innodb/r/restart.result
index 5e3315be204..b2132bed267 100644
--- a/mysql-test/suite/innodb/r/restart.result
+++ b/mysql-test/suite/innodb/r/restart.result
@@ -3,8 +3,8 @@
#
# FIXME: Unlike MySQL, maybe MariaDB should not read the .ibd files
# of tables with .isl file or DATA DIRECTORY attribute.
-call mtr.add_suppression("\\[ERROR\\] InnoDB: Invalid flags 0x7a207879 in .*td\\.ibd");
# FIXME: This is much more noisy than MariaDB 10.1!
+call mtr.add_suppression("\\[ERROR\\] InnoDB: Tablespace flags are invalid in datafile: .*test.t[rcd]\\.ibd");
call mtr.add_suppression("\\[ERROR\\] InnoDB: Operating system error number .* in a file operation\\.");
call mtr.add_suppression("\\[ERROR\\] InnoDB: The error means the system cannot find the path specified\\.");
call mtr.add_suppression("\\[ERROR\\] InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them\\.");
@@ -14,11 +14,13 @@ CREATE TABLE tc(a INT)ENGINE=InnoDB ROW_FORMAT=COMPACT
PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL=9;
CREATE TABLE td(a INT)ENGINE=InnoDB ROW_FORMAT=DYNAMIC
STATS_PERSISTENT=0 DATA DIRECTORY='MYSQL_TMP_DIR';
+# restart: --skip-innodb-buffer-pool-load-at-startup
SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
COUNT(*)
1
+# restart
SELECT * FROM tr;
a
SELECT * FROM tc;
diff --git a/mysql-test/suite/innodb/r/row_format_redundant.result b/mysql-test/suite/innodb/r/row_format_redundant.result
index 8852bfd8e6a..03e516f4eb4 100644
--- a/mysql-test/suite/innodb/r/row_format_redundant.result
+++ b/mysql-test/suite/innodb/r/row_format_redundant.result
@@ -1,3 +1,4 @@
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
SET GLOBAL innodb_file_per_table=1;
#
# Bug#21644827 - FTS, ASSERT !SRV_READ_ONLY_MODE || M_IMPL.M_LOG_MODE ==
@@ -33,6 +34,7 @@ row_format=redundant;
insert into t3 values(444, 'dddd', 'bbbbb', 'aaaaa');
insert into t3 values(555, 'eeee', 'ccccc', 'aaaaa');
SET GLOBAL innodb_fast_shutdown=0;
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0 --innodb-read-only
SELECT COUNT(*) FROM t1;
COUNT(*)
4096
@@ -48,12 +50,14 @@ TRUNCATE TABLE t2;
ERROR HY000: Table 't2' is read only
TRUNCATE TABLE t3;
ERROR HY000: Table 't3' is read only
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
TRUNCATE TABLE t3;
corrupted SYS_TABLES.MIX_LEN for test/t1
corrupted SYS_TABLES.MIX_LEN for test/t2
corrupted SYS_TABLES.MIX_LEN for test/t3
+# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/row_format_redundant --innodb-data-file-path=ibdata1:1M:autoextend --innodb-undo-tablespaces=0 --innodb-stats-persistent=0
TRUNCATE TABLE t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
TRUNCATE TABLE t2;
@@ -73,6 +77,7 @@ Warnings:
Warning 1932 Table 'test.t1' doesn't exist in engine
DROP TABLE t2,t3;
FOUND 50 /\[ERROR\] InnoDB: Table `test`\.`t1` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=1 SYS_TABLES\.MIX_LEN=511\b/ in mysqld.1.err
+# restart
ib_buffer_pool
ib_logfile0
ib_logfile1
diff --git a/mysql-test/suite/innodb/r/system_tables.result b/mysql-test/suite/innodb/r/system_tables.result
index 79a24f7e455..7ccf76f58cc 100644
--- a/mysql-test/suite/innodb/r/system_tables.result
+++ b/mysql-test/suite/innodb/r/system_tables.result
@@ -1,8 +1,9 @@
alter table mysql.time_zone_name engine=InnoDB;
create table envois3 (starttime datetime) engine=InnoDB;
insert envois3 values ('2008-08-11 22:43:00');
+# restart
select convert_tz(starttime,'UTC','Europe/Moscow') starttime from envois3;
starttime
2008-08-12 02:43:00
drop table envois3;
-alter table mysql.time_zone_name engine=MyISAM;
+alter table mysql.time_zone_name engine=Aria;
diff --git a/mysql-test/suite/innodb/r/table_flags.result b/mysql-test/suite/innodb/r/table_flags.result
index 8c4280738e1..7ede75f021a 100644
--- a/mysql-test/suite/innodb/r/table_flags.result
+++ b/mysql-test/suite/innodb/r/table_flags.result
@@ -1,3 +1,4 @@
+# restart: with restart_parameters
SET GLOBAL innodb_file_per_table=1;
CREATE TABLE tr(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
CREATE TABLE tc(a INT PRIMARY KEY)ENGINE=InnoDB ROW_FORMAT=COMPACT;
@@ -112,6 +113,7 @@ header=0x000050150074 (NAME='test/tz',
CLUSTER_NAME=NULL(0 bytes),
SPACE=0x00000004)
header=0x070008030000 (NAME=0x73757072656d756d00)
+# restart: with restart_parameters
SHOW CREATE TABLE tr;
ERROR 42S02: Table 'test.tr' doesn't exist in engine
SHOW CREATE TABLE tc;
@@ -143,6 +145,7 @@ ERROR 42S02: Table 'test.tp' doesn't exist in engine
FOUND 5 /InnoDB: Table `test`.`t[cp]` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=(129|289|3873|1232[13]) SYS_TABLES\.N_COLS=2147483649/ in mysqld.1.err
FOUND 2 /InnoDB: Table `test`\.`tr` in InnoDB data dictionary contains invalid flags\. SYS_TABLES\.TYPE=65 SYS_TABLES\.MIX_LEN=4294967295\b/ in mysqld.1.err
Restoring SYS_TABLES clustered index root page (8)
+# restart: with restart_parameters
SHOW CREATE TABLE tr;
Table Create Table
tr CREATE TABLE `tr` (
@@ -188,6 +191,7 @@ a
SELECT * FROM tp;
a
DROP TABLE tr,tc,td,tz,tp;
+# restart
ib_logfile0
ib_logfile1
ibdata1
diff --git a/mysql-test/suite/innodb/r/temporary_table.result b/mysql-test/suite/innodb/r/temporary_table.result
index 94c2cfc63b1..d7ea25fa117 100644
--- a/mysql-test/suite/innodb/r/temporary_table.result
+++ b/mysql-test/suite/innodb/r/temporary_table.result
@@ -121,6 +121,7 @@ truncate table t1;
select * from t1;
keyc c1 c2
# test condition of full-temp-tablespace
+# restart: --innodb_temp_data_file_path=ibtmp1:12M
create temporary table t1
(keyc int, c1 char(100), c2 char(100),
primary key(keyc)) engine = innodb;
@@ -129,6 +130,7 @@ call populate_t1();
ERROR HY000: The table 't1' is full
drop procedure populate_t1;
# test read-only mode
+# restart: --innodb-read-only
# files in MYSQL_DATA_DIR
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
@@ -138,22 +140,27 @@ create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb
ERROR HY000: Can't create table `test`.`t1` (errno: 165 "Table is read only")
# test various bad start-up parameters
FOUND 2 /InnoDB: Unable to create temporary file/ in mysqld.1.err
+# restart: --innodb_data_file_path=ibdata1:12M:autoextend --innodb_temp_data_file_path=ibdata1:12M:autoextend
FOUND 1 /innodb_temporary and innodb_system file names seem to be the same/ in mysqld.1.err
SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+# restart: --innodb_temp_data_file_path=foobar:3Gnewraw
FOUND 1 /support raw device/ in mysqld.1.err
SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+# restart: --innodb_temp_data_file_path=barbar:3Graw
FOUND 2 /support raw device/ in mysqld.1.err
SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+# restart: --innodb_temp_data_file_path=
FOUND 1 /InnoDB: syntax error in file path/ in mysqld.1.err
SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
+# restart
create temporary table t (
i int)
engine = innodb row_format = compressed;
diff --git a/mysql-test/suite/innodb/r/truncate_crash.result b/mysql-test/suite/innodb/r/truncate_crash.result
index 10ce8e92ab6..0c20e76d331 100644
--- a/mysql-test/suite/innodb/r/truncate_crash.result
+++ b/mysql-test/suite/innodb/r/truncate_crash.result
@@ -6,6 +6,7 @@ SET DEBUG_SYNC='before_trx_state_committed_in_memory SIGNAL c WAIT_FOR ever';
TRUNCATE TABLE t1;
connection default;
SET DEBUG_SYNC='now WAIT_FOR c';
+# restart
disconnect wait;
SELECT COUNT(*) FROM t1;
COUNT(*)
diff --git a/mysql-test/suite/innodb/r/truncate_missing.result b/mysql-test/suite/innodb/r/truncate_missing.result
index 263880eccd2..62a4ef5c552 100644
--- a/mysql-test/suite/innodb/r/truncate_missing.result
+++ b/mysql-test/suite/innodb/r/truncate_missing.result
@@ -10,6 +10,7 @@ t CREATE TABLE `t` (
`a` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
UNIQUE KEY `a` (`a`)
) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1
+# restart
SELECT * FROM t;
ERROR 42S02: Table 'test.t' doesn't exist in engine
TRUNCATE TABLE t;
diff --git a/mysql-test/suite/innodb/r/undo_truncate_recover.result b/mysql-test/suite/innodb/r/undo_truncate_recover.result
index bcfc136e8c0..212a1658e18 100644
--- a/mysql-test/suite/innodb/r/undo_truncate_recover.result
+++ b/mysql-test/suite/innodb/r/undo_truncate_recover.result
@@ -10,7 +10,7 @@ update t1 set c = 'InnoDB';
set global debug_dbug = '+d,ib_undo_trunc';
commit;
call mtr.add_suppression("InnoDB: innodb_undo_tablespaces=0 disables dedicated undo log tablespaces");
-call mtr.add_suppression("InnoDB: The redo log transaction size ");
SET GLOBAL innodb_fast_shutdown=0;
FOUND 1 /ib_undo_trunc/ in mysqld.1.err
+# restart: with restart_parameters
drop table t1;
diff --git a/mysql-test/suite/innodb/r/update_time.result b/mysql-test/suite/innodb/r/update_time.result
index d8b9069b1ae..c2a842b7bfc 100644
--- a/mysql-test/suite/innodb/r/update_time.result
+++ b/mysql-test/suite/innodb/r/update_time.result
@@ -43,6 +43,7 @@ XA PREPARE 'xatrx';
CONNECT con1,localhost,root,,;
call mtr.add_suppression("Found 1 prepared XA transactions");
FLUSH TABLES;
+# restart
SELECT update_time FROM information_schema.tables WHERE table_name = 't';
update_time
NULL
diff --git a/mysql-test/suite/innodb/r/update_time_wl6658.result b/mysql-test/suite/innodb/r/update_time_wl6658.result
index fcd7a53351a..fd8f8457758 100644
--- a/mysql-test/suite/innodb/r/update_time_wl6658.result
+++ b/mysql-test/suite/innodb/r/update_time_wl6658.result
@@ -69,6 +69,7 @@ table_name COUNT(update_time)
tab7 1
tab8 1
#restart the server
+# restart
SELECT table_name,update_time
FROM information_schema.tables
WHERE table_name IN ('tab1','tab2','tab3','tab4','tab5','tab7','tab8')
diff --git a/mysql-test/suite/innodb/r/xa_recovery.result b/mysql-test/suite/innodb/r/xa_recovery.result
index a93afcb07f8..b559de388e2 100644
--- a/mysql-test/suite/innodb/r/xa_recovery.result
+++ b/mysql-test/suite/innodb/r/xa_recovery.result
@@ -6,10 +6,12 @@ UPDATE t1 set a=2;
XA END 'x';
XA PREPARE 'x';
connection default;
+# restart: --innodb-force-recovery=2
disconnect con1;
connect con1,localhost,root;
SELECT * FROM t1 LOCK IN SHARE MODE;
connection default;
+# restart
disconnect con1;
SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT * FROM t1;
diff --git a/mysql-test/suite/innodb/t/alter_kill.test b/mysql-test/suite/innodb/t/alter_kill.test
index daaac4bc0ee..2313d63db69 100644
--- a/mysql-test/suite/innodb/t/alter_kill.test
+++ b/mysql-test/suite/innodb/t/alter_kill.test
@@ -2,6 +2,7 @@
# The embedded server does not support restarting in mysql-test-run.
-- source include/not_embedded.inc
-- source include/no_valgrind_without_big.inc
+-- source include/innodb_checksum_algorithm.inc
let MYSQLD_DATADIR=`select @@datadir`;
let PAGE_SIZE=`select @@innodb_page_size`;
@@ -51,20 +52,29 @@ open(FILE, "+<$file") || die "Unable to open $file";
binmode FILE;
my $ps= $ENV{PAGE_SIZE};
my $page;
+die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
+my $full_crc32 = unpack("N",substr($page,54,4)) & 0x10; # FIL_SPACE_FLAGS
sysseek(FILE, 3*$ps, 0) || die "Unable to seek $file\n";
die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
substr($page,4,4)=pack("N",0xc001cafe);
my $polynomial = 0x82f63b78; # CRC-32C
-my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
+if ($full_crc32)
+{
+ my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
+ substr($page, $ps-4, 4) = pack("N", $ck);
+}
+else
+{
+ my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
-substr($page,0,4)=$ck;
-substr($page,$ps-8,4)=$ck;
+ substr($page,0,4)=$ck;
+ substr($page,$ps-8,4)=$ck;
+}
sysseek(FILE, 3*$ps, 0) || die "Unable to rewind $file\n";
syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
close(FILE) || die "Unable to close $file";
EOF
--- echo # Restart mysqld
-- source include/start_mysqld.inc
--error ER_NO_SUCH_TABLE_IN_ENGINE
@@ -84,21 +94,29 @@ open(FILE, "+<$file") || die "Unable to open $file";
binmode FILE;
my $ps= $ENV{PAGE_SIZE};
my $page;
+die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
+my $full_crc32 = unpack("N",substr($page,54,4)) & 0x10; # FIL_SPACE_FLAGS
sysseek(FILE, 3*$ps, 0) || die "Unable to seek $file\n";
die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
substr($page,4,4)=pack("N",3);
my $polynomial = 0x82f63b78; # CRC-32C
-my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
+if ($full_crc32)
+{
+ my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
+ substr($page, $ps-4, 4) = pack("N", $ck);
+}
+else
+{
+ my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
-substr($page,0,4)=$ck;
-substr($page,$ps-8,4)=$ck;
+ substr($page,0,4)=$ck;
+ substr($page,$ps-8,4)=$ck;
+}
sysseek(FILE, 3*$ps, 0) || die "Unable to rewind $file\n";
syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
close(FILE) || die "Unable to close $file";
EOF
--- echo # Restart the server after uncorrupting the file.
-
-- source include/start_mysqld.inc
INSERT INTO bug16720368 VALUES(9,1);
diff --git a/mysql-test/suite/innodb/t/alter_rename_files.test b/mysql-test/suite/innodb/t/alter_rename_files.test
index 3ed1cb5d9fa..27408320f7d 100644
--- a/mysql-test/suite/innodb/t/alter_rename_files.test
+++ b/mysql-test/suite/innodb/t/alter_rename_files.test
@@ -11,7 +11,7 @@ SET GLOBAL innodb_log_checkpoint_now=TRUE;
# Start an ALTER TABLE and stop it before renaming the files
SET DEBUG_SYNC='commit_cache_rebuild SIGNAL ready WAIT_FOR finish';
---send ALTER TABLE t1 ADD PRIMARY KEY(x)
+--send ALTER TABLE t1 FORCE;
connect (con1,localhost,root,,);
diff --git a/mysql-test/suite/innodb/t/alter_table.test b/mysql-test/suite/innodb/t/alter_table.test
index d0943e7d407..a8b52732c91 100644
--- a/mysql-test/suite/innodb/t/alter_table.test
+++ b/mysql-test/suite/innodb/t/alter_table.test
@@ -59,3 +59,15 @@ ALTER TABLE t1 ADD b INT;
ALTER TABLE t1 DROP a;
ALTER TABLE t1 ADD c INT;
DROP TABLE t1, tx;
+
+#
+# Check that innodb supports transactional=1
+#
+
+create table t1 (a int) transactional=1 engine=aria;
+create table t2 (a int) transactional=1 engine=innodb;
+show create table t1;
+show create table t2;
+alter table t1 engine=innodb;
+alter table t1 add column b int;
+drop table t1,t2;
diff --git a/mysql-test/suite/innodb/t/alter_varchar_change.test b/mysql-test/suite/innodb/t/alter_varchar_change.test
index 23928badb24..65dc38a3411 100644
--- a/mysql-test/suite/innodb/t/alter_varchar_change.test
+++ b/mysql-test/suite/innodb/t/alter_varchar_change.test
@@ -307,6 +307,30 @@ SHOW CREATE TABLE t1;
DROP TABLE t1;
CREATE TABLE t1(f1 INT NOT NULL,
+ f2 VARCHAR(128),
+ INDEX idx(f2(40)))ENGINE=InnoDB;
+
+CALL get_table_id("test/t1", @tbl_id);
+ALTER TABLE t1 MODIFY f2 VARCHAR(300);
+CALL get_table_id("test/t1", @tbl1_id);
+
+SELECT @tbl1_id = @tbl_id;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1(f1 INT NOT NULL,
+ f2 VARCHAR(128),
+ INDEX idx(f2(40)))ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
+
+CALL get_table_id("test/t1", @tbl_id);
+ALTER TABLE t1 MODIFY f2 VARCHAR(300);
+CALL get_table_id("test/t1", @tbl1_id);
+
+SELECT @tbl1_id = @tbl_id;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1(f1 INT NOT NULL,
f2 VARCHAR(100),
INDEX idx(f2(40)))ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/t/corrupted_during_recovery.test b/mysql-test/suite/innodb/t/corrupted_during_recovery.test
index fbfb1bbe5d5..697d6e2dce0 100644
--- a/mysql-test/suite/innodb/t/corrupted_during_recovery.test
+++ b/mysql-test/suite/innodb/t/corrupted_during_recovery.test
@@ -12,6 +12,7 @@ let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
CREATE TABLE t1(a BIGINT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
# Force a redo log checkpoint.
+let $restart_noprint=2;
--source include/restart_mysqld.inc
--source ../include/no_checkpoint_start.inc
CREATE TABLE t2(a BIGINT PRIMARY KEY) ENGINE=InnoDB;
diff --git a/mysql-test/suite/innodb/t/doublewrite.combinations b/mysql-test/suite/innodb/t/doublewrite.combinations
new file mode 100644
index 00000000000..729380593f3
--- /dev/null
+++ b/mysql-test/suite/innodb/t/doublewrite.combinations
@@ -0,0 +1,5 @@
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/suite/innodb/t/doublewrite.test b/mysql-test/suite/innodb/t/doublewrite.test
index 448f934558c..87ca600aea3 100644
--- a/mysql-test/suite/innodb/t/doublewrite.test
+++ b/mysql-test/suite/innodb/t/doublewrite.test
@@ -17,11 +17,14 @@ call mtr.add_suppression("InnoDB: New log files created");
call mtr.add_suppression("InnoDB: Cannot create doublewrite buffer: the first file in innodb_data_file_path must be at least (3|6|12)M\\.");
call mtr.add_suppression("InnoDB: Database creation was aborted");
call mtr.add_suppression("Plugin 'InnoDB' (init function returned error|registration as a STORAGE ENGINE failed)");
+call mtr.add_suppression("InnoDB: A bad Space ID was found in datafile.*");
+call mtr.add_suppression("InnoDB: Checksum mismatch in datafile: .*");
--enable_query_log
--source include/restart_mysqld.inc
let INNODB_PAGE_SIZE=`select @@innodb_page_size`;
let MYSQLD_DATADIR=`select @@datadir`;
+let ALGO=`select @@innodb_checksum_algorithm`;
let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err;
show variables like 'innodb_doublewrite';
@@ -73,14 +76,22 @@ perl;
use IO::Handle;
do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
my $polynomial = 0x82f63b78; # CRC-32C
+my $algo = $ENV{ALGO};
+die "Unsupported innodb_checksum_algorithm=$algo\n" unless $algo =~ /crc32/;
my $fname= "$ENV{'MYSQLD_DATADIR'}test/t1.ibd";
my $page_size = $ENV{INNODB_PAGE_SIZE};
my $page;
+do "$ENV{MTR_SUITE_DIR}/../innodb/include/crc32.pl";
open(FILE, "+<", $fname) or die;
sysread(FILE, $page, $page_size)==$page_size||die "Unable to read $name\n";
+my $page1 = $page;
+substr($page1, 34, 4) = pack("N", 0);
+my $polynomial0 = 0x82f63b78; # CRC-32C
+my $ck0 = mycrc32(substr($page1, 0, ($page_size-4)), 0, $polynomial0);
+substr($page1, ($page_size - 4), 4) = pack("N", $ck0);
sysseek(FILE, 0, 0)||die "Unable to seek $fname\n";
-die unless syswrite(FILE, chr(0) x $page_size, $page_size) == $page_size;
+die unless syswrite(FILE, $page1, $page_size) == $page_size;
close FILE;
open(FILE, "+<", "$ENV{MYSQLD_DATADIR}ibdata1")||die "cannot open ibdata1\n";
@@ -103,12 +114,21 @@ for (my $d = $d1; $d < $d2 + 64; $d++)
$badflags |= ($flags & 15 << 6) << 7; # PAGE_SSIZE
substr ($_, 54, 4) = pack("N", $badflags);
- # Replace the innodb_checksum_algorithm=crc32 checksum
- my $ck= pack("N",
- mycrc32(substr($_, 4, 22), 0, $polynomial) ^
- mycrc32(substr($_, 38, $page_size - 38 - 8), 0, $polynomial));
- substr ($_, 0, 4) = $ck;
- substr ($_, $page_size - 8, 4) = $ck;
+ if ($algo =~ /full_crc32/)
+ {
+ my $ck = mycrc32(substr($_, 0, $page_size - 4), 0, $polynomial);
+ substr($_, $page_size - 4, 4) = pack("N", $ck);
+ }
+ else
+ {
+ # Replace the innodb_checksum_algorithm=crc32 checksum
+ my $ck= pack("N",
+ mycrc32(substr($_, 4, 22), 0, $polynomial) ^
+ mycrc32(substr($_, 38, $page_size - 38 - 8), 0,
+ $polynomial));
+ substr ($_, 0, 4) = $ck;
+ substr ($_, $page_size - 8, 4) = $ck;
+ }
syswrite(FILE, $_, $page_size)==$page_size||die;
close(FILE);
exit 0;
@@ -151,10 +171,12 @@ set global innodb_buf_flush_list_now = 1;
perl;
use IO::Handle;
my $fname= "$ENV{'MYSQLD_DATADIR'}test/t1.ibd";
+my $page_size = $ENV{INNODB_PAGE_SIZE};
open(FILE, "+<", $fname) or die;
-FILE->autoflush(1);
-binmode FILE;
-print FILE chr(0) x ($ENV{'INNODB_PAGE_SIZE'}/2);
+sysread(FILE, $page, $page_size)==$page_size||die "Unable to read $name\n";
+substr($page, 28, 4) = pack("N", 1000);
+sysseek(FILE, 0, 0)||die "Unable to seek $fname\n";
+die unless syswrite(FILE, $page, $page_size) == $page_size;
close FILE;
EOF
diff --git a/mysql-test/suite/innodb/t/full_crc32_import.opt b/mysql-test/suite/innodb/t/full_crc32_import.opt
new file mode 100644
index 00000000000..ac859973988
--- /dev/null
+++ b/mysql-test/suite/innodb/t/full_crc32_import.opt
@@ -0,0 +1 @@
+--innodb_checksum_algorithm=full_crc32
diff --git a/mysql-test/suite/innodb/t/full_crc32_import.test b/mysql-test/suite/innodb/t/full_crc32_import.test
new file mode 100644
index 00000000000..f62c68ec543
--- /dev/null
+++ b/mysql-test/suite/innodb/t/full_crc32_import.test
@@ -0,0 +1,137 @@
+-- source include/have_innodb.inc
+
+FLUSH TABLES;
+
+let $MYSQLD_TMPDIR = `SELECT @@tmpdir`;
+let $MYSQLD_DATADIR = `SELECT @@datadir`;
+
+--echo # Treating compact format as dynamic format after import stmt
+
+CREATE TABLE t1
+(a int AUTO_INCREMENT PRIMARY KEY,
+ b blob,
+ c blob,
+ KEY (b(200))) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+
+INSERT INTO t1 (b, c) values (repeat("ab", 200), repeat("bc", 200));
+INSERT INTO t1 (b, c) values (repeat("bc", 200), repeat("cd", 200));
+INSERT INTO t1 (b, c) values (repeat("cd", 200), repeat("ef", 200));
+INSERT INTO t1 (b, c) values (repeat("de", 200), repeat("fg", 200));
+INSERT INTO t1 (b, c) values (repeat("ef", 200), repeat("gh", 200));
+INSERT INTO t1 (b, c) values (repeat("fg", 200), repeat("hi", 200));
+INSERT INTO t1 (b, c) values (repeat("gh", 200), repeat("ij", 200));
+INSERT INTO t1 (b, c) values (repeat("hi", 200), repeat("jk", 200));
+INSERT INTO t1 (b, c) values (repeat("ij", 200), repeat("kl", 200));
+INSERT INTO t1 (b, c) values (repeat("jk", 200), repeat("lm", 200));
+INSERT INTO t1 (b, c) SELECT b,c FROM t1 ORDER BY a;
+INSERT INTO t1 (b, c) SELECT b,c FROM t1 ORDER BY a;
+SELECT COUNT(*) FROM t1;
+
+FLUSH TABLE t1 FOR EXPORT;
+--echo # List before copying files
+let MYSQLD_DATADIR =`SELECT @@datadir`;
+
+--list_files $MYSQLD_DATADIR/test
+perl;
+do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
+ib_backup_tablespaces("test", "t1");
+EOF
+
+UNLOCK TABLES;
+ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DISCARD TABLESPACE;
+
+--list_files $MYSQLD_DATADIR/test
+perl;
+do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
+ib_discard_tablespaces("test", "t1");
+ib_restore_tablespaces("test", "t1");
+EOF
+
+--remove_file $MYSQLD_DATADIR/test/t1.cfg
+--disable_warnings
+ALTER TABLE t1 IMPORT TABLESPACE;
+--enable_warnings
+SHOW CREATE TABLE t1;
+UPDATE t1 set b = repeat("de", 100) where b = repeat("cd", 200);
+--replace_column 9 #
+explain SELECT a FROM t1 where b = repeat("de", 100);
+SELECT a FROM t1 where b = repeat("de", 100);
+SELECT COUNT(*) FROM t1;
+DELETE FROM t1;
+--source include/wait_all_purged.inc
+CHECK TABLE t1;
+DROP TABLE t1;
+
+CREATE TABLE t1
+(c1 int AUTO_INCREMENT PRIMARY KEY,
+ c2 POINT NOT NULL,
+ c3 LINESTRING NOT NULL,
+ SPATIAL INDEX idx1(c2)) ENGINE=InnoDB ROW_FORMAT=COMPACT;
+
+INSERT INTO t1(c2,c3) VALUES(
+ ST_GeomFromText('POINT(10 10)'),
+ ST_GeomFromText('LINESTRING(5 5,20 20,30 30)'));
+
+INSERT INTO t1(c2,c3) VALUES(
+ ST_GeomFromText('POINT(20 20)'),
+ ST_GeomFromText('LINESTRING(5 15,20 10,30 20)'));
+
+INSERT INTO t1(c2,c3) VALUES(
+ ST_GeomFromText('POINT(30 30)'),
+ ST_GeomFromText('LINESTRING(10 5,20 24,30 32)'));
+
+INSERT INTO t1(c2,c3) VALUES(
+ ST_GeomFromText('POINT(40 40)'),
+ ST_GeomFromText('LINESTRING(15 5,25 20,35 30)'));
+
+INSERT INTO t1(c2,c3) VALUES(
+ ST_GeomFromText('POINT(50 10)'),
+ ST_GeomFromText('LINESTRING(15 15,24 10,31 20)'));
+
+INSERT INTO t1(c2,c3) VALUES(
+ ST_GeomFromText('POINT(60 50)'),
+ ST_GeomFromText('LINESTRING(10 15,20 44,35 32)'));
+
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+INSERT INTO t1(c2, c3) SELECT c2, c3 FROM t1;
+
+FLUSH TABLE t1 FOR EXPORT;
+--echo # List before copying files
+let MYSQLD_DATADIR =`SELECT @@datadir`;
+
+--list_files $MYSQLD_DATADIR/test
+perl;
+do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
+ib_backup_tablespaces("test", "t1");
+EOF
+
+UNLOCK TABLES;
+ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
+ALTER TABLE t1 DISCARD TABLESPACE;
+perl;
+do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
+ib_discard_tablespaces("test", "t1");
+ib_restore_tablespaces("test", "t1");
+EOF
+
+--remove_file $MYSQLD_DATADIR/test/t1.cfg
+--disable_warnings
+ALTER TABLE t1 IMPORT TABLESPACE;
+--enable_warnings
+SHOW CREATE TABLE t1;
+UPDATE t1 SET C2 = ST_GeomFromText('POINT(0 0)');
+SELECT COUNT(*) FROM t1;
+DELETE FROM t1;
+CHECK TABLE t1;
+--source include/wait_all_purged.inc
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb-alter.test b/mysql-test/suite/innodb/t/innodb-alter.test
index 14f90dec43c..961d8653e90 100644
--- a/mysql-test/suite/innodb/t/innodb-alter.test
+++ b/mysql-test/suite/innodb/t/innodb-alter.test
@@ -206,7 +206,7 @@ CREATE TABLE tu (
) ENGINE=InnoDB;
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE tu ADD COLUMN c CHAR(1) NOT NULL FIRST, LOCK=NONE;
-# Instant ADD COLUMN (adding after the visible FTS_DOC_ID)
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE tu ADD COLUMN c CHAR(1) NOT NULL, LOCK=NONE;
DROP TABLE tu;
@@ -217,7 +217,7 @@ CREATE TABLE tv (
) ENGINE=InnoDB;
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE tv ADD COLUMN c CHAR(1) NOT NULL FIRST, LOCK=NONE;
-# Instant ADD COLUMN (adding after the visible FTS_DOC_ID)
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE tv ADD COLUMN c CHAR(1) NOT NULL, LOCK=NONE;
DROP TABLE tv;
diff --git a/mysql-test/suite/innodb/t/innodb-index-online.test b/mysql-test/suite/innodb/t/innodb-index-online.test
index 4cdbdb7c584..5e21fa896a4 100644
--- a/mysql-test/suite/innodb/t/innodb-index-online.test
+++ b/mysql-test/suite/innodb/t/innodb-index-online.test
@@ -225,10 +225,7 @@ SET DEBUG_SYNC = 'row_log_apply_before SIGNAL c2e_created WAIT_FOR dml2_done';
# Ensure that the ALTER TABLE will be executed even with some concurrent DML.
SET lock_wait_timeout = 10;
--send
-# FIXME: MDEV-13668
-#ALTER TABLE t1 CHANGE c2 c22 INT, DROP INDEX c2d, ADD INDEX c2e(c22),
-ALTER TABLE t1 DROP INDEX c2d, ADD INDEX c2e(c2),
-ALGORITHM = INPLACE;
+ALTER TABLE t1 CHANGE c2 c22 INT, DROP INDEX c2d, ADD INDEX c2e(c22, c3(10)), ALGORITHM = NOCOPY;
# Generate some log (delete-mark, delete-unmark, insert etc.)
# while the index creation is blocked. Some of this may run
diff --git a/mysql-test/suite/innodb/t/innodb-index.test b/mysql-test/suite/innodb/t/innodb-index.test
index 8bd3919af91..b3c095c9b33 100644
--- a/mysql-test/suite/innodb/t/innodb-index.test
+++ b/mysql-test/suite/innodb/t/innodb-index.test
@@ -1139,7 +1139,9 @@ AND support IN ('YES', 'DEFAULT', 'ENABLED');
if ($have_debug) {
# Initiate shutdown in order to issue a redo log checkpoint and to discard
# the redo log record that was emitted due to '+d,fil_names_write_bogus'.
+--let $restart_noprint=2
--source include/restart_mysqld.inc
+--let $restart_noprint=0
}
SELECT * FROM t1;
diff --git a/mysql-test/suite/innodb/t/innodb-rollback.test b/mysql-test/suite/innodb/t/innodb-rollback.test
new file mode 100644
index 00000000000..1d3e05b021f
--- /dev/null
+++ b/mysql-test/suite/innodb/t/innodb-rollback.test
@@ -0,0 +1,17 @@
+--source include/have_innodb.inc
+
+#
+# MDEV-18632: wsrep_is_wsrep_xid: Conditional jump or move depends on uninitialised value
+#
+CREATE TABLE t1 (a INT) ENGINE=InnoDB;
+--connect (con1,localhost,root,,test)
+ALTER TABLE t1;
+--connect (con2,localhost,root,,test)
+--error ER_SP_DOES_NOT_EXIST
+SELECT f() FROM t1;
+
+# Cleanup
+--disconnect con2
+--disconnect con1
+--connection default
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test
index ca37521b9cf..fb0fe8fccd1 100644
--- a/mysql-test/suite/innodb/t/innodb-wl5522-debug.test
+++ b/mysql-test/suite/innodb/t/innodb-wl5522-debug.test
@@ -15,6 +15,7 @@
-- source include/have_innodb.inc
+let $restart_noprint=2;
call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation.");
call mtr.add_suppression("InnoDB: The error means the system cannot find the path specified.");
call mtr.add_suppression("InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them.");
@@ -1017,20 +1018,6 @@ do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
ib_restore_tablespaces("test_wl5522", "t1");
EOF
-# Test failure after importing the cluster index
-SET SESSION debug_dbug="+d,ib_import_set_max_rowid_failure";
-
---error ER_NOT_KEYFILE
-ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-
-SET SESSION debug_dbug=@saved_debug_dbug;
-
-# Left over from the failed IMPORT
-perl;
-do "$ENV{MTR_SUITE_DIR}/include/innodb-util.pl";
-ib_unlink_tablespace("test_wl5522", "t1");
-EOF
-
DROP TABLE test_wl5522.t1;
--disable_query_log
diff --git a/mysql-test/suite/innodb/t/innodb-wl5522.test b/mysql-test/suite/innodb/t/innodb-wl5522.test
index 24dad21b3af..744768a1d6c 100644
--- a/mysql-test/suite/innodb/t/innodb-wl5522.test
+++ b/mysql-test/suite/innodb/t/innodb-wl5522.test
@@ -2,6 +2,7 @@
--source include/not_embedded.inc
-- source include/have_innodb.inc
+-- source include/innodb_checksum_algorithm.inc
call mtr.add_suppression("InnoDB: Unable to import tablespace .* because it already exists. Please DISCARD the tablespace before IMPORT.");
call mtr.add_suppression("Index for table 't2' is corrupt; try to repair it");
@@ -9,6 +10,7 @@ FLUSH TABLES;
let $MYSQLD_TMPDIR = `SELECT @@tmpdir`;
let $MYSQLD_DATADIR = `SELECT @@datadir`;
+let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`;
CREATE TABLE t1
(a INT AUTO_INCREMENT PRIMARY KEY,
@@ -80,8 +82,19 @@ ALTER TABLE t2 DISCARD TABLESPACE;
--error ER_TABLE_SCHEMA_MISMATCH
ALTER TABLE t2 IMPORT TABLESPACE;
--remove_file $MYSQLD_DATADIR/test/t2.cfg
+
+let $error_code = ER_TABLE_SCHEMA_MISMATCH;
+
+if ($checksum_algorithm == "full_crc32") {
+ let $error_code = 0;
+}
+
+if ($checksum_algorithm == "strict_full_crc32") {
+ let $error_code = 0;
+}
+
--replace_regex /(FSP_SPACE_FLAGS=0x)[0-9a-f]+(,.*0x)[0-9a-f]+(.*)/\1*\2*\3/
---error ER_TABLE_SCHEMA_MISMATCH
+--error $error_code
ALTER TABLE t2 IMPORT TABLESPACE;
DROP TABLE t2;
diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test
index 2ea6d05d9eb..0062ba72add 100644
--- a/mysql-test/suite/innodb/t/innodb.test
+++ b/mysql-test/suite/innodb/t/innodb.test
@@ -1253,7 +1253,7 @@ CREATE TABLE t2 (b_id tinyint(4) NOT NULL default '0',b_a tinyint(4) NOT NULL de
CONSTRAINT fk_b_a FOREIGN KEY (b_a) REFERENCES t1 (a_id) ON DELETE CASCADE ON UPDATE NO ACTION) ENGINE=InnoDB DEFAULT CHARSET=latin1;
--enable_warnings
INSERT INTO t2 VALUES (1,1),(2,1),(3,1),(4,2),(5,2);
-SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN (t2) on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz;
+SELECT * FROM (SELECT t1.*,GROUP_CONCAT(t2.b_id SEPARATOR ',') as b_list FROM (t1 LEFT JOIN t2 on t1.a_id = t2.b_a) GROUP BY t1.a_id ) AS xyz;
DROP TABLE t2;
DROP TABLE t1;
@@ -1321,8 +1321,7 @@ drop table t1;
# Test for testable InnoDB status variables. This test
# uses previous ones(pages_created, rows_deleted, ...).
---replace_result 511 ok 512 ok 2047 ok 513 ok 514 ok 515 ok
-SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
+SELECT IF(variable_value BETWEEN 488 AND 512, 'OK', variable_value) FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
SELECT variable_value FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_page_size';
SELECT variable_value - @innodb_rows_deleted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_deleted';
SELECT variable_value - @innodb_rows_inserted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_inserted';
diff --git a/mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test b/mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test
index 83a12431802..34c9336e517 100644
--- a/mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test
+++ b/mysql-test/suite/innodb/t/innodb_bulk_create_index_debug.test
@@ -5,6 +5,7 @@
# Test Restart & Crash Recovery.
-- source include/big_test.inc
-- source include/innodb_page_size_small.inc
+let $restart_noprint=2;
# Test Row Format: REDUNDANT.
let $row_format = REDUNDANT;
diff --git a/mysql-test/suite/innodb/t/innodb_stats.test b/mysql-test/suite/innodb/t/innodb_stats.test
index b9f71f8fa6f..09515ec9720 100644
--- a/mysql-test/suite/innodb/t/innodb_stats.test
+++ b/mysql-test/suite/innodb/t/innodb_stats.test
@@ -9,6 +9,9 @@
DROP TABLE IF EXISTS test_innodb_stats;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
+
CREATE TABLE test_innodb_stats (
a INT,
KEY a_key (a)
@@ -59,3 +62,4 @@ CREATE TABLE test_innodb_stats (
-- disable_query_log
DROP TABLE test_innodb_stats;
+set @@use_stat_tables= @save_use_stat_tables;
diff --git a/mysql-test/suite/innodb/t/innodb_stats_fetch.test b/mysql-test/suite/innodb/t/innodb_stats_fetch.test
index 8544509ccad..549ad65feff 100644
--- a/mysql-test/suite/innodb/t/innodb_stats_fetch.test
+++ b/mysql-test/suite/innodb/t/innodb_stats_fetch.test
@@ -11,6 +11,9 @@
-- vertical_results
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
CREATE TABLE test_ps_fetch
(a INT, b INT, c INT, d INT, PRIMARY KEY (a, b), INDEX idx (c, d))
ENGINE=INNODB STATS_PERSISTENT=1;
@@ -77,3 +80,4 @@ table_rows, avg_row_length, max_data_length, index_length
FROM information_schema.tables WHERE table_name = 'test_ps_fetch';
DROP TABLE test_ps_fetch;
+set @@use_stat_tables = @save_use_stat_tables;
diff --git a/mysql-test/suite/innodb/t/instant_alter.test b/mysql-test/suite/innodb/t/instant_alter.test
index 3e62038e591..b6be9137532 100644
--- a/mysql-test/suite/innodb/t/instant_alter.test
+++ b/mysql-test/suite/innodb/t/instant_alter.test
@@ -361,6 +361,390 @@ COMMIT;
--source include/wait_all_purged.inc
DROP TABLE t1;
+# MDEV-15562 Instant DROP/ADD/reorder columns
+
+eval CREATE TABLE t1 (a INT, b INT UNIQUE) $engine;
+INSERT INTO t1 (a) VALUES (NULL), (NULL);
+ALTER TABLE t1 DROP a, ADD COLUMN a INT;
+DELETE FROM t1;
+BEGIN;INSERT INTO t1 SET a=NULL;ROLLBACK;
+DELETE FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (a INT PRIMARY KEY, t VARCHAR(33101) NOT NULL) $engine;
+INSERT INTO t1 VALUES(347,'');
+ALTER TABLE t1 DROP COLUMN t, ALGORITHM=INSTANT;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (a INT) $engine;
+INSERT INTO t1() VALUES();
+ALTER TABLE t1 ADD COLUMN b INT FIRST, ADD COLUMN c INT AFTER b;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (t TEXT) $engine;
+SET @t=REPEAT('x',@@innodb_page_size / 2);
+INSERT INTO t1 VALUES (@t),(@t),(@t),(@t),(@t),(@t),(NULL),(@t),(@t),(@t),(@t);
+ALTER TABLE t1 ADD COLUMN a INT FIRST;
+UPDATE t1 SET a = 0;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (t TEXT) $engine;
+INSERT INTO t1 SET t = @x;
+ALTER TABLE t1 DROP COLUMN t, ADD COLUMN i INT NOT NULL DEFAULT 1;
+ALTER TABLE t1 ADD COLUMN t TEXT;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+#
+# MDEV-17735 Assertion failure in row_parse_int() on first ADD/DROP COLUMN
+# when an AUTO_INCREMENT column is not in PRIMARY KEY
+#
+eval CREATE TABLE t1 (a INT AUTO_INCREMENT, b INT, KEY(a)) $engine;
+INSERT INTO t1 SET a=NULL;
+ALTER TABLE t1 DROP COLUMN b;
+ALTER TABLE t1 ADD COLUMN c INT NOT NULL DEFAULT 42;
+INSERT INTO t1 SET a=NULL;
+UPDATE t1 SET a=a+2;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (i INT) $engine;
+INSERT INTO t1 SET i=1;
+ALTER TABLE t1 ADD COLUMN b BIT FIRST;
+ALTER TABLE t1 ADD COLUMN v INT AS (i) VIRTUAL;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# MDEV-17721 Corrupted data dictionary after instant DROP COLUMN
+eval CREATE TABLE t1 (ts TIMESTAMP) $engine;
+ALTER TABLE t1 ADD COLUMN f VARCHAR(8), ADD COLUMN dt DATETIME;
+ALTER TABLE t1 ADD COLUMN b BIT, DROP COLUMN f, ADD COLUMN t TIME FIRST;
+ALTER TABLE t1 ADD COLUMN ts2 TIMESTAMP;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) $engine;
+INSERT INTO t1 VALUES (4,4,4);
+ALTER TABLE t1 DROP f1, DROP f2, ADD f4 INT, ADD f5 INT;
+DELETE FROM t1;
+ALTER TABLE t1 DROP COLUMN f4;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (f1 INT, f2 INT, f3 INT) $engine;
+ALTER TABLE t1 DROP f2, ADD COLUMN f4 INT;
+ALTER TABLE t1 DROP f4;
+ALTER TABLE t1 DROP f1;
+DROP TABLE t1;
+
+# MDEV-17901 Crash after instant DROP COLUMN of AUTO_INCREMENT column
+eval CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) $engine;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 () VALUES (),();
+SELECT * FROM t1;
+# Adding AUTO_INCREMENT column will always require rebuild.
+ALTER TABLE t1 ADD COLUMN id INT NOT NULL AUTO_INCREMENT FIRST, ADD KEY(id);
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# MDEV-18149 Crash after DROP COLUMN of AUTO_INCREMENT column on nonempty table
+eval CREATE TABLE t1 (id INT NOT NULL AUTO_INCREMENT, f INT, KEY(id)) $engine;
+INSERT INTO t1 SET f=NULL;
+ALTER TABLE t1 DROP COLUMN id;
+INSERT INTO t1 SET f=NULL;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# MDEV-18076/MDEV-18077 Crash on AUTO_INCREMENT column after instant DROP
+eval CREATE TABLE t1(f INT, k INT NOT NULL AUTO_INCREMENT, KEY(k)) $engine;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 VALUES (1);
+DROP TABLE t1;
+
+eval CREATE TABLE t1(pk INT PRIMARY KEY, f INT, k INT AUTO_INCREMENT, KEY(k))
+$engine;
+ALTER TABLE t1 DROP COLUMN f;
+INSERT INTO t1 (pk) VALUES (1);
+DROP TABLE t1;
+
+# MDEV-17763 Assertion `len == 20U' failed in rec_convert_dtuple_to_rec_comp
+# upon DROP COLUMN
+eval CREATE TABLE t1 (
+ pk INT PRIMARY KEY,
+ f1 INT,
+ f2 CHAR(255),
+ f3 BIGINT,
+ f4 INT,
+ f5 CHAR(255),
+ f6 CHAR(255),
+ f7 CHAR(255) NOT NULL,
+ f8 INT,
+ f9 CHAR(10)
+) $engine;
+
+INSERT INTO t1 VALUES
+ (1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a'),
+ (2, 2, 'b', 2, 2, 'b', 'b', 'b', 2, 'b'),
+ (3, 3, 'c', 3, 3, 'c', 'c', 'c', 3, 'c'),
+ (4, 4, 'd', 4, 4, 'd', 'd', 'd', 4, 'd'),
+ (5, 5, 'e', 5, 5, 'e', 'e', 'e', 5, 'e'),
+ (6, 6, 'f', 6, 6, 'f', 'f', 'f', 6, 'f'),
+ (7, 7, 'g', 7, 7, 'g', 'g', 'g', 7, 'g'),
+ (8, 8, 'h', 8, 8, 'h', 'h', 'h', 8, 'h'),
+ (9, 9, 'i', 9, 9, 'i', 'i', 'i', 9, 'i'),
+ (10, 0, 'j', 0, 0, 'j', 'j', 'j', 0, 'j'),
+ (11, 1, 'k', 1, 1, 'k', 'k', 'k', 1, 'k'),
+ (12, 2, 'l', 2, 2, 'l', 'l', 'l', 2, 'l'),
+ (13, 3, 'm', 3, 3, 'm', 'm', 'm', 3, 'm'),
+ (14, 4, 'n', 4, 4, 'n', 'n', 'n', 4, 'n'),
+ (15, 5, 'o', 5, 5, 'o', 'o', 'o', 5, 'o');
+
+DELETE FROM t1 WHERE pk=1;
+--source include/wait_all_purged.inc
+INSERT INTO t1 VALUES
+ (1, 1, 'a', 1, 1, 'a', 'a', 'a', 1, 'a');
+ALTER TABLE t1 DROP COLUMN f1;
+DROP TABLE t1;
+
+# MDEV-17820 Assertion failures on DROP COLUMN
+eval CREATE TABLE t1 (
+ pk INT PRIMARY KEY,
+ f1 INT, f2 CHAR(32) NOT NULL,
+ f3 INT NOT NULL, f4 INT NOT NULL, f5 INT, f6 CHAR(32) NOT NULL,
+ f7 CHAR(32), f8 CHAR(32)
+) $engine;
+
+INSERT INTO t1 VALUES
+ (1,9,'',2,88,88,'','',''),(2,48,'',8,68,92,'','',''),
+ (3,41,'',56,84,37,'','',''),(4,NULL,'',6,6,NULL,'','',''),
+ (5,52,'',37,44,20,'','',''),(6,44,'',53,4,NULL,'','',''),
+ (7,24,'',54,8,54,'','',''),(8,80,'',3,52,20,'','',''),
+ (9,71,'',34,32,NULL,'','',''),(10,14,'',6,64,88,'','',''),
+ (11,48,'',8,25,42,'','',''),(12,16,'',8,7,NULL,'','',''),
+ (13,NULL,'',22,0,95,'','',''),(14,4,'',72,48,NULL,'','',''),
+ (15,4,'',5,64,2,'','',''),(16,NULL,'',9,40,30,'','',''),
+ (17,92,'',48,2,NULL,'','',''),(18,36,'',48,51,7,'','',''),
+ (19,NULL,'',80,96,NULL,'','',''),(20,96,'',9,80,NULL,'','',''),
+ (21,50,'',16,40,NULL,'','',''),(22,NULL,'',7,84,8,'','',''),
+ (23,28,'',93,80,NULL,'','',''),(24,31,'',40,38,NULL,'','',''),
+ (25,85,'',8,5,88,'','',''),(26,66,'',8,32,4,'','',''),
+ (51,52,'',6,92,15,'','',''),(52,77,'',24,24,28,'','',''),
+ (53,8,'',75,31,NULL,'','',''),(54,48,'',5,8,1,'','',''),
+ (55,90,'',56,12,5,'','',''),(56,92,'',4,9,88,'','',''),
+ (57,83,'',23,40,72,'','',''),(58,7,'',4,40,32,'','',''),
+ (59,28,'',2,3,32,'','',''),(60,16,'',80,4,NULL,'','',''),
+ (61,44,'',88,24,NULL,'','',''),(62,4,'',5,25,3,'','',''),
+ (63,NULL,'',7,24,76,'','',''),(64,0,'',13,40,73,'','',''),
+ (101,NULL,'',1,49,75,'','',''),(102,34,'',10,17,20,'','',''),
+ (103,8,'',2,2,NULL,'','',''),(104,12,'',44,48,52,'','',''),
+ (105,8,'',4,19,38,'','',''),(106,20,'',6,80,9,'','',''),
+ (107,72,'',72,16,56,'','',''),(108,76,'',98,24,21,'','',''),
+ (109,67,'',16,91,NULL,'','',''),(110,72,'',72,3,48,'','',''),
+ (151,8,'',3,86,NULL,'','',''),(152,NULL,'',52,72,0,'','',''),
+ (153,NULL,'',46,30,92,'','',''),(154,80,'',1,40,48,'','',''),
+ (155,24,'',68,68,8,'','',''),(156,85,'',85,72,60,'','',''),
+ (157,7,'',7,12,6,'','',''),(158,NULL,'',48,48,80,'','',''),
+ (159,12,'',0,36,0,'','',''),(160,2,'',6,52,NULL,'','',''),
+ (201,0,'',1,3,NULL,'','',''),(202,NULL,'',3,53,14,'','',''),
+ (203,84,'',6,20,NULL,'','',''),(204,38,'',25,13,88,'','',''),
+ (205,1,'',2,69,5,'','',''),(206,7,'',60,22,NULL,'','',''),
+ (207,NULL,'',5,4,NULL,'','',''),(251,7,'',0,4,40,'','',''),
+ (252,4,'',16,8,NULL,'','',''),(253,14,'',60,12,99,'','',''),
+ (254,84,'',68,16,5,'','',''),(255,3,'',70,36,61,'','',''),
+ (256,7,'',18,48,NULL,'','',''),(257,NULL,'',68,53,NULL,'','',''),
+ (258,29,'',52,16,64,'','',''),(259,NULL,'',80,92,40,'','',''),
+ (301,68,'',1,48,48,'','',''),(302,2,'',1,1,32,'','',''),
+ (303,44,'',60,96,16,'','',''),(304,32,'',52,64,32,'','',''),
+ (305,88,'',37,72,NULL,'','',''),(306,5,'',35,60,20,'','',''),
+ (307,35,'',4,48,NULL,'','',''),(308,4,'',92,44,80,'','',''),
+ (351,48,'',60,4,40,'','',''),(352,7,'',9,61,13,'','',''),
+ (353,0,'',5,93,53,'','',''),(354,7,'',1,20,NULL,'','',''),
+ (355,84,'',5,48,96,'','',''),(356,NULL,'',39,92,36,'','',''),
+ (357,88,'',9,76,44,'','',''),(358,66,'',34,67,80,'','',''),
+ (359,8,'',8,52,NULL,'','',''),(360,3,'',53,83,NULL,'','',''),
+ (361,23,'',44,9,48,'','',''),(362,4,'',0,54,48,'','',''),
+ (363,75,'',66,76,52,'','','');
+
+ALTER TABLE t1 ADD COLUMN x VARCHAR(255) DEFAULT ' foobar ';
+UPDATE t1 SET f1 = 0;
+ALTER TABLE t1 DROP COLUMN x;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (f1 VARCHAR(1), f2 VARCHAR(2)) $engine;
+ALTER TABLE t1 MODIFY f2 VARCHAR (8) FIRST;
+DROP TABLE t1;
+
+# MDEV-18035 Failing assertion on DELETE
+eval CREATE TABLE t1 (a INT UNIQUE, b INT UNIQUE, PRIMARY KEY(a,b)) $engine;
+ALTER TABLE t1 DROP PRIMARY KEY;
+ALTER TABLE t1 CHANGE COLUMN a a INT;
+DELETE FROM t1 WHERE a = NULL OR a IS NULL;
+DROP TABLE t1;
+
+# MDEV-18048 Failing assertion on ALTER
+eval CREATE TABLE t1 (a INT, b INT, c INT NOT NULL, d INT,
+e INT, f INT, g INT, h INT, j INT) $engine;
+ALTER TABLE t1 MODIFY COLUMN c INT, MODIFY COLUMN a INT AFTER b;
+DROP TABLE t1;
+
+# MDEV-18649 Failing assertion on ALTER for ROW_FORMAT=REDUNDANT
+eval CREATE TABLE t1 (a INT NOT NULL, b INT NOT NULL) $engine;
+INSERT INTO t1 VALUES (0,0);
+ALTER TABLE t1 MODIFY a INT AFTER b;
+# Exploit MDEV-17468 to force the table definition to be reloaded
+ALTER TABLE t1 ADD COLUMN v INT AS (a) VIRTUAL;
+ALTER TABLE t1 MODIFY b INT NOT NULL AFTER a;
+DROP TABLE t1;
+
+# MDEV-18033/MDEV-18034 Failing assertion on ALTER
+eval CREATE TABLE t1 (a INT NOT NULL) $engine;
+INSERT INTO t1 VALUES (1);
+ALTER TABLE t1 ADD COLUMN b INT;
+ALTER TABLE t1 MODIFY COLUMN a INT NULL;
+DROP TABLE t1;
+
+# MDEV-18160/MDEV-18162 Failing assertion on ALTER
+eval CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT) $engine;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c INT, d INT) $engine;
+INSERT INTO t1 SET a=1;
+ALTER TABLE t1 DROP c;
+ALTER TABLE t1 DROP b, ADD v INT AS (a);
+DROP TABLE t1;
+
+# MDEV-18218 Assertion `0' failed in btr_page_reorganize_low upon DROP COLUMN
+eval CREATE TABLE t1 (pk INT PRIMARY KEY, i INT, b BLOB NOT NULL) $engine;
+INSERT INTO t1 VALUES (1,10,REPEAT('foobar',2000));
+ALTER TABLE t1 DROP COLUMN b;
+INSERT INTO t1 VALUES (2,20);
+# this evicts and reloads the table definition until MDEV-17468 is fixed
+ALTER TABLE t1 ADD COLUMN vpk INT AS (pk);
+# this would load wrong metadata from the previous DROP COLUMN b, causing a crash
+ALTER TABLE t1 DROP COLUMN i;
+DROP TABLE t1;
+
+# MDEV-18315 Assertion instant.fields[i].col->same_format(*fields[i].col)
+# failed in dict_index_t::instant_add_field
+eval CREATE TABLE t1 (a INT, b INT) $engine;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD f DATE AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+
+# MDEV-18316 Assertion is_added() failed in dict_col_t::instant_value
+eval CREATE TABLE t1 (a INT, b INT) $engine;
+INSERT INTO t1 VALUES (1,1);
+ALTER TABLE t1 ADD COLUMN f INT AFTER a;
+ALTER TABLE t1 DROP b, DROP f;
+DROP TABLE t1;
+
+# MDEV-18579 Assertion !ctx->online || num_fts_index == 0
+eval CREATE TABLE t1(t TEXT NOT NULL, FULLTEXT(t)) $engine;
+ALTER TABLE t1 MODIFY COLUMN t TEXT;
+DROP TABLE t1;
+
+# MDEV-18598 Assertions and wrong results after MDEV-15563 extending INT
+eval CREATE TABLE t1 (f TINYINT, g SMALLINT UNSIGNED) $engine;
+INSERT INTO t1 VALUES(127,6502),(-128,33101);
+--enable_info
+ALTER TABLE t1 MODIFY f SMALLINT DEFAULT 12345,
+MODIFY g BIGINT UNSIGNED DEFAULT 1234567;
+--disable_info
+SELECT * FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (f BIT(8)) $engine;
+INSERT INTO t1 VALUES (b'10000000'),(b'00000001');
+--enable_info
+ALTER TABLE t1 MODIFY f BIT(16);
+--disable_info
+INSERT INTO t1 VALUES (b'1000000010101111'),(b'10000000');
+SELECT HEX(f) FROM t1;
+--error ER_WARN_DATA_OUT_OF_RANGE
+ALTER TABLE t1 MODIFY f SMALLINT;
+--enable_info
+ALTER TABLE t1 MODIFY f SMALLINT UNSIGNED;
+--disable_info
+SELECT * FROM t1;
+--error ER_DATA_TOO_LONG
+ALTER TABLE t1 MODIFY f BIT;
+--error ER_DATA_TOO_LONG
+ALTER TABLE t1 MODIFY f BIT(15);
+DELETE FROM t1 LIMIT 3;
+--enable_info
+ALTER TABLE t1 MODIFY f BIT(15);
+ALTER TABLE t1 MODIFY f BIT(8);
+--disable_info
+SELECT HEX(f) FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (b BIT NOT NULL) $engine DEFAULT CHARSET utf16;
+INSERT INTO t1 SET b=b'1';
+--enable_info
+ALTER TABLE t1 CHANGE b c BIT NOT NULL;
+--disable_info
+SELECT HEX(c) FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (c VARCHAR(10) NOT NULL DEFAULT 'scary') $engine;
+INSERT INTO t1() VALUES();
+--enable_info
+ALTER TABLE t1 ADD f TINYINT NOT NULL DEFAULT -42;
+ALTER TABLE t1 MODIFY f MEDIUMINT NOT NULL DEFAULT 64802,
+MODIFY c VARCHAR(20) NOT NULL DEFAULT 'gory',
+ADD d DATETIME;
+--disable_info
+INSERT INTO t1() VALUES();
+INSERT INTO t1 (c,f,d) VALUES ('fury', -8388608, now());
+SELECT * FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (t TINYINT PRIMARY KEY, m MEDIUMINT UNIQUE) $engine;
+SELECT table_id INTO @table_id1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+INSERT INTO t1 VALUES (-42, -123456);
+--enable_info
+ALTER TABLE t1 CHANGE t s SMALLINT;
+SELECT table_id INTO @table_id2 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ALTER TABLE t1 CHANGE m i INT, ALGORITHM=INSTANT;
+ALTER TABLE t1 CHANGE m i INT;
+SELECT table_id INTO @table_id3 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS
+WHERE name = 'test/t1';
+--disable_info
+SELECT @table_id1 = @table_id2, @table_id2 = @table_id3;
+INSERT IGNORE INTO t1 VALUES (0, -123456);
+REPLACE INTO t1 VALUES(-42, 123456);
+INSERT IGNORE INTO t1 VALUES(32768, 2147483648);
+SELECT * FROM t1;
+DROP TABLE t1;
+
+eval CREATE TABLE t1 (a SERIAL, b INT, c TINYINT UNIQUE) $engine;
+INSERT INTO t1 (c) VALUES(1),(2),(3);
+--enable_info
+ALTER TABLE t1 MODIFY c BIGINT;
+--disable_info
+UPDATE t1 SET b=1 WHERE c=2;
+UPDATE t1 SET c=4 WHERE a=3;
+UPDATE t1 SET b=2 WHERE c>3;
+--error ER_DUP_ENTRY
+UPDATE t1 SET c=c+1;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+# MDEV-18719 Assertion (c.prtype ^ o->prtype) & ... failed on ALTER TABLE
+eval CREATE TABLE t1 (a INT PRIMARY KEY, b VARCHAR(1)) $engine;
+INSERT INTO t1 VALUES(1,'a');
+ALTER TABLE t1 MODIFY b VARCHAR(256), ADD COLUMN c INT;
+INSERT INTO t1 VALUES(2,'bah',3);
+SELECT * FROM t1;
+DROP TABLE t1;
+
dec $format;
}
disconnect analyze;
diff --git a/mysql-test/suite/innodb/t/instant_alter_bugs.test b/mysql-test/suite/innodb/t/instant_alter_bugs.test
index 9cc0037703b..a5b1508305e 100644
--- a/mysql-test/suite/innodb/t/instant_alter_bugs.test
+++ b/mysql-test/suite/innodb/t/instant_alter_bugs.test
@@ -125,7 +125,7 @@ INSERT INTO t1 (f1,f2,f3,f4,f5,f6,f7,f8) VALUES
INSERT INTO t1 (f1,f2,f3,f4,f5,f6,f7,f8) VALUES ('impact', 'b', 'h', 185, 'fj', 7, 7, 3);
ALTER TABLE t1 ADD COLUMN filler VARCHAR(255) DEFAULT '';
-SELECT * FROM t1 INTO OUTFILE 'load.data';
+SELECT * INTO OUTFILE 'load.data' FROM t1;
UPDATE IGNORE t1 SET pk = 0;
LOAD DATA INFILE 'load.data' REPLACE INTO TABLE t1;
HANDLER t1 OPEN AS h;
@@ -136,3 +136,83 @@ HANDLER h CLOSE;
DROP TABLE t1;
--let $datadir= `select @@datadir`
--remove_file $datadir/test/load.data
+
+
+create table t (
+ a varchar(9),
+ b int,
+ c int,
+ row_start bigint unsigned generated always as row start invisible,
+ row_end bigint unsigned generated always as row end invisible,
+ period for system_time (row_start, row_end)
+) engine=innodb row_format=compressed with system versioning;
+insert into t values (repeat('a', 9), 1, 1);
+set @@system_versioning_alter_history = keep;
+alter table t modify a varchar(10), algorithm=instant;
+alter table t change b bb int, algorithm=instant;
+alter table t modify c int without system versioning, algorithm=instant;
+set @@system_versioning_alter_history = error;
+check table t;
+drop table t;
+
+--echo #
+--echo # MDEV-18219 Assertion `index->n_core_null_bytes <= ...' failed
+--echo # in rec_init_offsets after instant DROP COLUMN
+--echo #
+CREATE TABLE t1 (a INT, b INT NOT NULL) ENGINE=InnoDB;
+INSERT INTO t1 VALUES
+(0,9),(2,7),(3,1),(3,4),(8,4),(3,7),(6,1),(3,8),(1,2),(4,1),(0,8),(5,3),
+(1,3),(1,6),(2,1),(8,7),(6,0),(1,9),(9,4),(0,6),(9,3),(0,9),(9,4),(2,4),
+(2,7),(7,8),(8,2),(2,5),(6,1),(4,5),(5,3),(6,8),(4,9),(5,7),(7,5),(5,1),
+(8,8),(5,7),(3,8),(0,1),(8,4),(8,3),(9,7),(4,8),(1,1),(0,4),(2,6),(8,5),
+(8,8),(8,7),(6,7),(1,7),(9,6),(3,6),(1,9),(0,3),(5,3),(2,4),(0,6),(2,0),
+(6,5),(1,6),(2,4),(9,1),(3,0),(6,4),(1,3),(0,8),(3,5),(3,1),(8,9),(9,9),
+(7,9),(4,5),(2,2),(3,8),(0,8),(7,1),(2,0),(1,5),(7,3),(4,4),(3,9),(7,2),
+(6,2),(0,4),(2,0),(1,5),(5,7),(4,5),(3,7),(6,0),(2,1),(5,0),(1,0),(2,0),
+(8,4),(5,7),(3,5),(0,5),(7,6),(5,9),(1,2),(4,2),(8,5),(8,7),(2,8),(1,8),
+(4,3),(1,6),(7,8),(3,7),(4,6),(1,1),(3,0),(1,6),(2,0),(3,4),(4,8),(3,9),
+(8,0),(4,9),(4,0),(3,9),(6,4),(7,4),(5,8),(4,7),(7,3),(5,9),(2,3),(7,3),
+(0,4),(5,9),(9,8),(4,2),(3,6),(2,6),(1,8),(7,0),(0,0),(2,3),(1,2),(3,3),
+(2,7),(6,0),(9,0),(6,9),(4,6),(9,8),(0,7),(9,1),(9,6),(4,3),(7,7),(7,7),
+(4,1),(4,7),(7,3),(2,8),(5,8),(8,9),(3,9),(7,7),(0,8),(4,9),(3,2),(5,0),
+(1,7),(0,3),(2,9),(9,7),(7,5),(6,9),(8,5),(3,6),(1,1),(2,8),(7,9),(4,9),
+(6,6),(5,9),(5,3),(9,8),(3,3),(5,6),(0,9),(3,9),(7,9),(7,3),(5,2),(1,4),
+(4,4),(8,2),(2,2),(8,3),(9,1),(4,9),(9,8),(1,8),(1,8),(9,1),(1,1),(3,0),
+(4,6),(9,3),(3,3),(5,2),(0,1),(3,4),(3,2),(1,3),(4,4),(7,0),(4,6),(7,2),
+(4,5),(8,7),(7,8),(8,1),(3,5),(0,6),(3,5),(2,1),(4,4),(3,4),(2,1),(4,1);
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 DROP a;
+# Exploit MDEV-17468 to force the table definition to be reloaded
+ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-19030 Assertion index->n_core_null_bytes <= ... failed
+--echo # in rec_init_offsets after instant DROP COLUMN
+--echo #
+CREATE TABLE t1 (a INT, b INT NOT NULL DEFAULT 0) ENGINE=InnoDB;
+INSERT INTO t1 () VALUES (),(),(),();
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 FORCE;
+INSERT INTO t1 SELECT * FROM t1;
+ALTER TABLE t1 DROP a, ADD a SMALLINT NOT NULL;
+INSERT INTO t1 SELECT * FROM t1;
+INSERT INTO t1 SELECT * FROM t1;
+# Exploit MDEV-17468 to force the table definition to be reloaded
+ALTER TABLE t1 ADD vb INT AS (b) VIRTUAL;
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-18623 Assertion after DROP FULLTEXT INDEX and removing NOT NULL
+--echo #
+CREATE TABLE t1 (c TEXT NOT NULL, FULLTEXT INDEX ftidx(c)) ENGINE=InnoDB
+ROW_FORMAT=REDUNDANT;
+ALTER TABLE t1 DROP INDEX ftidx;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE t1 MODIFY c TEXT NULL, ALGORITHM=INSTANT;
+ALTER TABLE t1 MODIFY c TEXT NULL;
+DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/instant_alter_charset.test b/mysql-test/suite/innodb/t/instant_alter_charset.test
new file mode 100644
index 00000000000..82ab9f9eb94
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_charset.test
@@ -0,0 +1,538 @@
+--source include/innodb_row_format.inc
+#--source include/innodb_page_size.inc
+
+--let $row_format= `SELECT @@GLOBAL.innodb_default_row_format`
+set names utf8;
+
+create table no_rebuild (
+ a char(150) charset utf8mb3 collate utf8mb3_general_ci
+) engine=innodb;
+create table rebuild (
+ a varchar(150) charset ascii
+) engine=innodb;
+
+set @id = (select table_id from information_schema.innodb_sys_tables
+ where name = 'test/no_rebuild');
+select name, prtype, len from information_schema.innodb_sys_columns
+ where table_id = @id;
+select c.prtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+ where t.name = 'test/rebuild' and c.name = 'a';
+alter table no_rebuild
+ change a a char(150) charset utf8mb3 collate utf8mb3_spanish_ci,
+ algorithm=inplace;
+alter table rebuild
+ change a a varchar(150) charset latin1 not null default 'asdf',
+ algorithm=inplace;
+select name, prtype, len from information_schema.innodb_sys_columns
+ where table_id = @id;
+select c.prtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+ where t.name = 'test/rebuild' and c.name = 'a';
+
+drop table no_rebuild, rebuild;
+
+create table supported_types (
+ id int primary key auto_increment,
+ a varchar(150) charset ascii,
+ b text(150) charset ascii,
+ c text charset ascii,
+ d tinytext charset ascii,
+ e mediumtext charset ascii,
+ f longtext charset ascii
+) engine=innodb;
+
+alter table supported_types
+ convert to charset latin1,
+ algorithm=instant;
+
+drop table supported_types;
+
+create table various_cases (
+ a char(150) charset ascii,
+ b varchar(150) as (a) virtual,
+ c varchar(150) as (a) persistent
+) engine=innodb;
+
+alter table various_cases
+ change a a char(150) charset latin1,
+ algorithm=inplace;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table various_cases
+ change a a varchar(222),
+ algorithm=inplace;
+
+alter table various_cases
+ change b b varchar(150) as (a) virtual,
+ algorithm=inplace;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+alter table various_cases
+ change c c varchar(150) as (a) persistent,
+ algorithm=inplace;
+
+# Can not grow storage in bytes from CHAR
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table various_cases
+ modify a char(150) charset utf8mb4,
+ algorithm=instant;
+
+drop table various_cases;
+
+
+create table all_texts (
+ a tinytext charset ascii,
+ b text charset ascii,
+ c mediumtext charset ascii,
+ d longtext charset ascii,
+ footer int
+) engine=innodb;
+
+alter table all_texts
+ convert to charset latin1 collate latin1_general_ci,
+ algorithm=instant;
+
+drop table all_texts;
+
+
+create table all_binaries (
+ a tinyblob,
+ b blob,
+ c mediumblob,
+ d longblob,
+ e varbinary(150),
+ f binary(150)
+) engine=innodb;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_binaries modify a tinytext, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_binaries modify b text, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_binaries modify c mediumtext, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_binaries modify d longtext, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_binaries modify e varchar(150), algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_binaries modify f char(150), algorithm=instant;
+
+drop table all_binaries;
+
+create table all_strings (
+ a tinytext,
+ b text,
+ c mediumtext,
+ d longtext,
+ e varchar(150),
+ f char(150)
+) engine=innodb;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify a tinyblob, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify b blob, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify c mediumblob, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify d longblob, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify e varbinary(150), algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify f binary(150), algorithm=instant;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify a tinytext charset binary, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify b text charset binary, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify c mediumtext charset binary, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify d longtext charset binary, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify e varchar(150) charset binary, algorithm=instant;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table all_strings modify f char(150) charset binary, algorithm=instant;
+
+drop table all_strings;
+
+create table key_part_change (
+ a char(150) charset ascii,
+ b char(150) charset ascii,
+ c char(150) charset ascii,
+ unique key ab (a,b)
+) engine=innodb;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table key_part_change
+ modify a char(150) charset utf8mb4,
+ drop index ab,
+ add unique key ab(a,c),
+ algorithm=instant;
+
+drop table key_part_change;
+
+create table key_part_change_and_rename (
+ a char(100) charset ascii,
+ b char(100) charset ascii,
+ unique key ab (a,b)
+) engine=innodb;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table key_part_change_and_rename
+ change a b char(100) charset utf8mb4,
+ change b a char(100) charset utf8mb4,
+ drop index ab,
+ add unique key ab(a,b),
+ algorithm=instant;
+
+drop table key_part_change_and_rename;
+
+create table enum_and_set (
+ a enum('one', 'two') charset utf8mb3,
+ b set('three', 'four') charset utf8mb3
+) engine=innodb;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table enum_and_set
+ modify a enum('one', 'two') charset utf8mb4,
+ algorithm=instant;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table enum_and_set
+ modify b enum('three', 'four') charset utf8mb4,
+ algorithm=instant;
+
+drop table enum_and_set;
+
+create table compressed (
+ a varchar(255) charset utf8mb3 compressed
+) engine=innodb;
+
+insert into compressed values ('AAA'), ('bbb'), ('CCC');
+
+alter table compressed
+ modify a varchar(255) charset utf8mb4 compressed,
+ algorithm=instant;
+
+select * from compressed;
+check table compresed;
+
+drop table compressed;
+
+create table key_part_bug (
+ id int primary key auto_increment,
+ a varchar(150) charset utf8mb3 unique key
+) engine=innodb;
+
+alter table key_part_bug
+ modify a varchar(150) charset utf8mb4,
+ algorithm=instant;
+
+drop table key_part_bug;
+
+
+create table latin1_swedish_special_case (
+ copy1 varchar(150) charset ascii collate ascii_general_ci,
+ copy2 char(150) charset ascii collate ascii_general_ci,
+ instant1 varchar(150) charset ascii collate ascii_general_ci,
+ instant2 char(150) charset ascii collate ascii_general_ci
+) engine=innodb;
+
+select c.name, c.prtype, c.mtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+ where t.name = 'test/latin1_swedish_special_case';
+alter table latin1_swedish_special_case
+ modify copy1 varchar(150) charset latin1 collate latin1_swedish_ci,
+ modify copy2 char(150) charset latin1 collate latin1_swedish_ci,
+ algorithm=copy;
+alter table latin1_swedish_special_case
+ modify instant1 varchar(150) charset latin1 collate latin1_swedish_ci,
+ modify instant2 char(150) charset latin1 collate latin1_swedish_ci,
+ algorithm=instant;
+select c.name, c.prtype, c.mtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+ where t.name = 'test/latin1_swedish_special_case';
+alter table latin1_swedish_special_case
+ modify copy1 varchar(150) charset latin1 collate latin1_general_ci,
+ modify copy2 char(150) charset latin1 collate latin1_general_ci,
+ algorithm=copy;
+alter table latin1_swedish_special_case
+ modify instant1 varchar(150) charset latin1 collate latin1_general_ci,
+ modify instant2 char(150) charset latin1 collate latin1_general_ci,
+ algorithm=instant;
+select c.name, c.prtype, c.mtype, c.len from information_schema.innodb_sys_columns as c inner join information_schema.innodb_sys_tables t on c.table_id = t.table_id
+ where t.name = 'test/latin1_swedish_special_case';
+
+drop table latin1_swedish_special_case;
+
+create table regression (a varchar(100) charset utf8mb3 primary key, b int) engine=innodb;
+alter table regression convert to character set utf8mb4;
+drop table regression;
+
+# ROW_FORMAT=DYNAMIC limitation:
+# size in bytes cannot be increased from less of equal that 255 to more than 255
+create table boundary_255 (
+ a varchar(50) charset ascii,
+ b varchar(200) charset ascii,
+ c varchar(300) charset ascii
+) engine=innodb;
+
+alter table boundary_255
+ modify a varchar(50) charset utf8mb3,
+ algorithm=instant;
+
+if ($row_format == 'redundant') {
+alter table boundary_255
+ modify b varchar(200) charset utf8mb3,
+ algorithm=instant;
+}
+if ($row_format != 'redundant') {
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table boundary_255
+ modify b varchar(200) charset utf8mb3,
+ algorithm=instant;
+}
+
+alter table boundary_255
+ modify c varchar(300) charset utf8mb3,
+ algorithm=instant;
+
+drop table boundary_255;
+
+create table fully_compatible (
+ id int auto_increment unique key,
+ from_charset char(255),
+ from_collate char(255),
+ to_charset char(255),
+ to_collate char(255)
+);
+
+insert into fully_compatible (from_charset, from_collate, to_charset, to_collate) values
+ ('utf8mb3', 'utf8mb3_general_ci', 'utf8mb4', 'utf8mb4_general_ci'),
+ ('utf8mb3', 'utf8mb3_bin', 'utf8mb4', 'utf8mb4_bin'),
+ ('utf8mb3', 'utf8mb3_unicode_ci', 'utf8mb4', 'utf8mb4_unicode_ci'),
+ ('utf8mb3', 'utf8mb3_icelandic_ci', 'utf8mb4', 'utf8mb4_icelandic_ci'),
+ ('utf8mb3', 'utf8mb3_latvian_ci', 'utf8mb4', 'utf8mb4_latvian_ci'),
+ ('utf8mb3', 'utf8mb3_romanian_ci', 'utf8mb4', 'utf8mb4_romanian_ci'),
+ ('utf8mb3', 'utf8mb3_slovenian_ci', 'utf8mb4', 'utf8mb4_slovenian_ci'),
+ ('utf8mb3', 'utf8mb3_polish_ci', 'utf8mb4', 'utf8mb4_polish_ci'),
+ ('utf8mb3', 'utf8mb3_estonian_ci', 'utf8mb4', 'utf8mb4_estonian_ci'),
+ ('utf8mb3', 'utf8mb3_spanish_ci', 'utf8mb4', 'utf8mb4_spanish_ci'),
+ ('utf8mb3', 'utf8mb3_swedish_ci', 'utf8mb4', 'utf8mb4_swedish_ci'),
+ ('utf8mb3', 'utf8mb3_turkish_ci', 'utf8mb4', 'utf8mb4_turkish_ci'),
+ ('utf8mb3', 'utf8mb3_czech_ci', 'utf8mb4', 'utf8mb4_czech_ci'),
+ ('utf8mb3', 'utf8mb3_danish_ci', 'utf8mb4', 'utf8mb4_danish_ci'),
+ ('utf8mb3', 'utf8mb3_lithuanian_ci', 'utf8mb4', 'utf8mb4_lithuanian_ci'),
+ ('utf8mb3', 'utf8mb3_slovak_ci', 'utf8mb4', 'utf8mb4_slovak_ci'),
+ ('utf8mb3', 'utf8mb3_spanish2_ci', 'utf8mb4', 'utf8mb4_spanish2_ci'),
+ ('utf8mb3', 'utf8mb3_roman_ci', 'utf8mb4', 'utf8mb4_roman_ci'),
+ ('utf8mb3', 'utf8mb3_persian_ci', 'utf8mb4', 'utf8mb4_persian_ci'),
+ ('utf8mb3', 'utf8mb3_esperanto_ci', 'utf8mb4', 'utf8mb4_esperanto_ci'),
+ ('utf8mb3', 'utf8mb3_hungarian_ci', 'utf8mb4', 'utf8mb4_hungarian_ci'),
+ ('utf8mb3', 'utf8mb3_sinhala_ci', 'utf8mb4', 'utf8mb4_sinhala_ci'),
+ ('utf8mb3', 'utf8mb3_german2_ci', 'utf8mb4', 'utf8mb4_german2_ci'),
+ ('utf8mb3', 'utf8mb3_croatian_mysql561_ci', 'utf8mb4', 'utf8mb4_croatian_mysql561_ci'),
+ ('utf8mb3', 'utf8mb3_unicode_520_ci', 'utf8mb4', 'utf8mb4_unicode_520_ci'),
+ ('utf8mb3', 'utf8mb3_vietnamese_ci', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+ ('utf8mb3', 'utf8mb3_croatian_ci', 'utf8mb4', 'utf8mb4_croatian_ci'),
+ ('utf8mb3', 'utf8mb3_myanmar_ci', 'utf8mb4', 'utf8mb4_myanmar_ci'),
+ ('utf8mb3', 'utf8mb3_thai_520_w2', 'utf8mb4', 'utf8mb4_thai_520_w2'),
+ ('utf8mb3', 'utf8mb3_general_nopad_ci', 'utf8mb4', 'utf8mb4_general_nopad_ci'),
+ ('utf8mb3', 'utf8mb3_nopad_bin', 'utf8mb4', 'utf8mb4_nopad_bin'),
+ ('utf8mb3', 'utf8mb3_unicode_nopad_ci', 'utf8mb4', 'utf8mb4_unicode_nopad_ci'),
+ ('utf8mb3', 'utf8mb3_unicode_520_nopad_ci', 'utf8mb4', 'utf8mb4_unicode_520_nopad_ci'),
+
+ ('ucs2', 'ucs2_general_ci', 'utf16', 'utf16_general_ci'),
+ ('ucs2', 'ucs2_unicode_ci', 'utf16', 'utf16_unicode_ci'),
+ ('ucs2', 'ucs2_icelandic_ci', 'utf16', 'utf16_icelandic_ci'),
+ ('ucs2', 'ucs2_latvian_ci', 'utf16', 'utf16_latvian_ci'),
+ ('ucs2', 'ucs2_romanian_ci', 'utf16', 'utf16_romanian_ci'),
+ ('ucs2', 'ucs2_slovenian_ci', 'utf16', 'utf16_slovenian_ci'),
+ ('ucs2', 'ucs2_polish_ci', 'utf16', 'utf16_polish_ci'),
+ ('ucs2', 'ucs2_estonian_ci', 'utf16', 'utf16_estonian_ci'),
+ ('ucs2', 'ucs2_spanish_ci', 'utf16', 'utf16_spanish_ci'),
+ ('ucs2', 'ucs2_general_ci', 'utf16', 'utf16_general_ci'),
+
+ ('ascii', 'ascii_general_ci', 'utf8mb3', 'utf8mb3_general_ci'),
+ ('ascii', 'ascii_general_ci', 'utf8mb4', 'utf8mb4_general_ci'),
+ ('ascii', 'ascii_general_ci', 'latin1', 'latin1_general_ci'),
+ ('ascii', 'ascii_bin', 'latin1', 'latin1_bin'),
+ ('ascii', 'ascii_nopad_bin', 'latin1', 'latin1_nopad_bin'),
+ ('ascii', 'ascii_general_ci', 'latin2', 'latin2_general_ci'),
+ ('ascii', 'ascii_general_ci', 'latin7', 'latin7_general_ci'),
+ ('ascii', 'ascii_bin', 'koi8u', 'koi8u_bin'),
+ ('ascii', 'ascii_bin', 'ujis', 'ujis_bin'),
+ ('ascii', 'ascii_bin', 'big5', 'big5_bin'),
+ ('ascii', 'ascii_bin', 'gbk', 'gbk_bin')
+;
+
+let $data_size = `select count(*) from fully_compatible`;
+let $counter = 1;
+
+while ($counter <= $data_size) {
+ let $from_charset = `select from_charset from fully_compatible where id = $counter`;
+ let $from_collate = `select from_collate from fully_compatible where id = $counter`;
+ let $to_charset = `select to_charset from fully_compatible where id = $counter`;
+ let $to_collate = `select to_collate from fully_compatible where id = $counter`;
+
+ eval create table tmp (
+ a varchar(50) charset $from_charset collate $from_collate,
+ b varchar(50) charset $from_charset collate $from_collate primary key
+ ) engine=innodb;
+
+ insert into tmp values ('AAA', 'AAA'), ('bbb', 'bbb');
+
+ eval alter table tmp
+ change a a varchar(50) charset $to_charset collate $to_collate,
+ modify b varchar(50) charset $to_charset collate $to_collate,
+ algorithm=instant;
+
+ check table tmp;
+
+ drop table tmp;
+
+ inc $counter;
+}
+
+drop table fully_compatible;
+
+
+create table compatible_without_index (
+ id int auto_increment unique key,
+ from_charset char(255),
+ from_collate char(255),
+ to_charset char(255),
+ to_collate char(255)
+);
+
+insert into compatible_without_index (from_charset, from_collate, to_charset, to_collate) values
+ ('ascii', 'ascii_general_ci', 'utf8mb3', 'utf8mb3_swedish_ci'),
+ ('ascii', 'ascii_bin', 'latin1', 'latin1_swedish_ci'),
+ ('ascii', 'ascii_general_nopad_ci', 'latin1', 'latin1_swedish_ci'),
+ ('ascii', 'ascii_nopad_bin', 'latin1', 'latin1_swedish_ci'),
+
+ ('ascii', 'ascii_general_ci', 'koi8u', 'koi8u_bin'),
+ ('ascii', 'ascii_general_nopad_ci', 'koi8u', 'koi8u_bin'),
+ ('ascii', 'ascii_nopad_bin', 'koi8u', 'koi8u_bin'),
+
+ ('ascii', 'ascii_general_ci', 'latin1', 'latin1_swedish_ci'),
+ ('ascii', 'ascii_bin', 'utf8mb3', 'utf8mb3_swedish_ci'),
+ ('ascii', 'ascii_general_nopad_ci', 'utf8mb3', 'utf8mb3_swedish_ci'),
+ ('ascii', 'ascii_nopad_bin', 'utf8mb3', 'utf8mb3_swedish_ci'),
+
+ ('ascii', 'ascii_general_ci', 'utf8mb4', 'utf8mb4_danish_ci'),
+ ('ascii', 'ascii_bin', 'utf8mb4', 'utf8mb4_danish_ci'),
+ ('ascii', 'ascii_general_nopad_ci', 'utf8mb4', 'utf8mb4_danish_ci'),
+ ('ascii', 'ascii_nopad_bin', 'utf8mb4', 'utf8mb4_danish_ci'),
+
+ ('utf8mb3', 'utf8mb3_general_ci', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+ ('utf8mb3', 'utf8mb3_bin', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+ ('utf8mb3', 'utf8mb3_general_nopad_ci', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+ ('utf8mb3', 'utf8mb3_nopad_bin', 'utf8mb4', 'utf8mb4_vietnamese_ci'),
+
+ ('ascii', 'ascii_general_ci', 'gbk', 'gbk_chinese_ci'),
+ ('ascii', 'ascii_general_ci', 'gbk', 'gbk_chinese_nopad_ci'),
+
+ ('ucs2', 'ucs2_myanmar_ci', 'utf16', 'utf16_thai_520_w2'),
+ ('ucs2', 'ucs2_general_ci', 'utf16', 'utf16_unicode_nopad_ci'),
+ ('ucs2', 'ucs2_general_mysql500_ci', 'utf16', 'utf16_spanish2_ci'),
+
+ ('ascii', 'ascii_general_ci', 'ascii', 'ascii_bin'),
+ ('utf8mb3', 'utf8mb3_roman_ci', 'utf8mb3', 'utf8mb3_lithuanian_ci'),
+ ('utf8mb4', 'utf8mb4_thai_520_w2', 'utf8mb4', 'utf8mb4_persian_ci'),
+ ('utf8mb3', 'utf8mb3_myanmar_ci', 'utf8mb4', 'utf8mb4_german2_ci'),
+ ('utf8mb3', 'utf8mb3_general_ci', 'utf8mb3', 'utf8mb3_unicode_ci'),
+ ('latin1', 'latin1_general_cs', 'latin1', 'latin1_general_ci'),
+ ('ascii', 'ascii_general_ci', 'ujis', 'ujis_japanese_ci'),
+ ('ascii', 'ascii_general_ci', 'big5', 'big5_chinese_ci'),
+ ('ascii', 'ascii_general_ci', 'latin2', 'latin2_croatian_ci'),
+ ('ascii', 'ascii_general_ci', 'latin7', 'latin7_estonian_cs'),
+ ('utf16', 'utf16_general_ci', 'utf16', 'utf16_german2_ci')
+;
+
+let $data_size = `select count(*) from compatible_without_index`;
+let $counter = 1;
+
+while ($counter <= $data_size) {
+ let $from_charset = `select from_charset from compatible_without_index where id = $counter`;
+ let $from_collate = `select from_collate from compatible_without_index where id = $counter`;
+ let $to_charset = `select to_charset from compatible_without_index where id = $counter`;
+ let $to_collate = `select to_collate from compatible_without_index where id = $counter`;
+
+ eval create table tmp (
+ a varchar(50) charset $from_charset collate $from_collate,
+ b varchar(50) charset $from_charset collate $from_collate unique key,
+ c varchar(50) charset $from_charset collate $from_collate primary key
+ ) engine=innodb;
+
+ eval alter table tmp
+ change a a varchar(50) charset $to_charset collate $to_collate,
+ algorithm=instant;
+
+ --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ eval alter table tmp
+ modify b varchar(50) charset $to_charset collate $to_collate,
+ algorithm=instant;
+
+ --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ eval alter table tmp
+ modify c varchar(50) charset $to_charset collate $to_collate,
+ algorithm=instant;
+
+ drop table tmp;
+
+ inc $counter;
+}
+
+drop table compatible_without_index;
+
+
+create table fully_incompatible (
+ id int auto_increment unique key,
+ from_charset char(255),
+ from_collate char(255),
+ to_charset char(255),
+ to_collate char(255)
+);
+
+insert into fully_incompatible (from_charset, from_collate, to_charset, to_collate) values
+ ('utf8mb4', 'utf8mb4_general_ci', 'utf8mb3', 'utf8mb3_general_ci'),
+ ('utf8mb4', 'utf8mb4_general_ci', 'ascii', 'ascii_general_ci'),
+ ('utf8mb3', 'utf8mb3_general_ci', 'ascii', 'ascii_general_ci'),
+ ('utf8mb3', 'utf8mb3_general_ci', 'latin1', 'latin1_general_ci'),
+ ('utf16', 'utf16_general_ci', 'utf32', 'utf32_general_ci'),
+ ('latin1', 'latin1_general_ci', 'ascii', 'ascii_general_ci'),
+ ('ascii', 'ascii_general_ci', 'swe7', 'swe7_swedish_ci'),
+ ('eucjpms', 'eucjpms_japanese_nopad_ci', 'geostd8', 'geostd8_general_ci'),
+ ('latin1', 'latin1_general_ci', 'utf16', 'utf16_general_ci')
+;
+
+let $data_size = `select count(*) from fully_incompatible`;
+let $counter = 1;
+
+while ($counter <= $data_size) {
+ let $from_charset = `select from_charset from fully_incompatible where id = $counter`;
+ let $from_collate = `select from_collate from fully_incompatible where id = $counter`;
+ let $to_charset = `select to_charset from fully_incompatible where id = $counter`;
+ let $to_collate = `select to_collate from fully_incompatible where id = $counter`;
+
+ eval create table tmp (
+ a varchar(150) charset $from_charset collate $from_collate,
+ b text(150) charset $from_charset collate $from_collate,
+ unique key b_idx (b(150))
+ ) engine=innodb;
+
+ --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ eval alter table tmp
+ change a a varchar(150) charset $to_charset collate $to_collate,
+ algorithm=instant;
+
+ --error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ eval alter table tmp
+ modify b text charset $to_charset collate $to_collate,
+ algorithm=instant;
+
+ drop table tmp;
+
+ inc $counter;
+}
+
+drop table fully_incompatible;
diff --git a/mysql-test/suite/innodb/t/instant_alter_crash.test b/mysql-test/suite/innodb/t/instant_alter_crash.test
index fe7301b4f78..d16ee6c929a 100644
--- a/mysql-test/suite/innodb/t/instant_alter_crash.test
+++ b/mysql-test/suite/innodb/t/instant_alter_crash.test
@@ -16,7 +16,7 @@ let MYSQLD_DATADIR=`select @@datadir`;
CREATE TABLE t1(id INT PRIMARY KEY, c2 INT UNIQUE)
ENGINE=InnoDB ROW_FORMAT=REDUNDANT;
CREATE TABLE t2 LIKE t1;
-INSERT INTO t1 VALUES(1,2);
+INSERT INTO t1 VALUES(0,2);
BEGIN;
INSERT INTO t2 VALUES(2,1);
ALTER TABLE t2 ADD COLUMN (c3 TEXT NOT NULL DEFAULT 'De finibus bonorum');
@@ -29,7 +29,7 @@ ALTER TABLE t1 ADD COLUMN (c3 TEXT NOT NULL DEFAULT ' et malorum');
connection default;
SET DEBUG_SYNC='now WAIT_FOR ddl';
SET GLOBAL innodb_flush_log_at_trx_commit=1;
-COMMIT;
+INSERT INTO t2 VALUES(3,4,'accusantium doloremque laudantium');
--source include/kill_mysqld.inc
disconnect ddl;
@@ -43,9 +43,38 @@ DELETE FROM t1;
ROLLBACK;
--source include/wait_all_purged.inc
+INSERT INTO t2 VALUES
+(16,1551,'Omnium enim rerum'),(128,1571,' principia parva sunt');
+
+connect ddl, localhost, root;
+SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
+--send
+ALTER TABLE t2 DROP COLUMN c3, ADD COLUMN c5 TEXT DEFAULT 'naturam abhorrere';
+
+connection default;
+SET DEBUG_SYNC='now WAIT_FOR ddl';
+SET GLOBAL innodb_flush_log_at_trx_commit=1;
+UPDATE t1 SET c2=c2+1;
+
+--source include/kill_mysqld.inc
+disconnect ddl;
+--source include/start_mysqld.inc
+
+SET @saved_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
+SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+
+SELECT * FROM t1;
+SELECT * FROM t2;
+BEGIN;
+INSERT INTO t1 SET id=1;
+DELETE FROM t2;
+ROLLBACK;
+--source include/wait_all_purged.inc
+
INSERT INTO t2 VALUES (64,42,'De finibus bonorum'), (347,33101,' et malorum');
connect ddl, localhost, root;
+ALTER TABLE t2 DROP COLUMN c3;
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
--send
ALTER TABLE t2 ADD COLUMN (c4 TEXT NOT NULL DEFAULT ' et malorum');
@@ -62,6 +91,10 @@ disconnect ddl;
SET @saved_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err;
+let SEARCH_PATTERN= \[Note\] InnoDB: Rolled back recovered transaction ;
+-- source include/search_pattern_in_file.inc
+
SELECT * FROM t1;
SELECT * FROM t2;
BEGIN;
@@ -96,16 +129,39 @@ for (my $offset= 0x65; $offset;
my $n_fields= unpack("n", substr($page,$offset-4,2)) >> 1 & 0x3ff;
my $start= 0;
my $name;
- for (my $i= 0; $i < $n_fields; $i++) {
- my $end= unpack("C", substr($page, $offset-7-$i, 1));
- print ",\n " if $i;
- print "$fields[$i]=";
- if ($end & 0x80) {
- print "NULL(", ($end & 0x7f) - $start, " bytes)"
- } else {
- print "0x", unpack("H*", substr($page,$offset+$start,$end-$start))
+ if (unpack("C", substr($page,$offset-3,1)) & 1) {
+ for (my $i= 0; $i < $n_fields; $i++) {
+ my $end= unpack("C", substr($page, $offset-7-$i, 1));
+ print ",\n " if $i;
+ print "$fields[$i]=";
+ if ($end & 0x80) {
+ print "NULL(", ($end & 0x7f) - $start, " bytes)"
+ } else {
+ print "0x", unpack("H*", substr($page,$offset+$start,$end-$start))
+ }
+ $start= $end & 0x7f;
+ }
+ } else {
+ for (my $i= 0; $i < $n_fields; $i++) {
+ my $end= unpack("n", substr($page, $offset-8-2*$i, 2));
+ print ",\n " if $i;
+ if ($i > 2 && !(~unpack("C",substr($page,$offset-6,1)) & 0x30)) {
+ if ($i == 3) {
+ print "BLOB=";
+ $start += 8; # skip the space_id,page_number
+ } else {
+ print "$fields[$i - 1]=";
+ }
+ } else {
+ print "$fields[$i]=";
+ }
+ if ($end & 0x8000) {
+ print "NULL(", ($end & 0x7fff) - $start, " bytes)"
+ } else {
+ print "0x", unpack("H*", substr($page,$offset+$start,($end-$start) & 0x3fff))
+ }
+ $start= $end & 0x3fff;
}
- $start= $end & 0x7f;
}
print ")\n";
}
diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test
index e54623b9cbd..73a222cb23c 100644
--- a/mysql-test/suite/innodb/t/instant_alter_debug.test
+++ b/mysql-test/suite/innodb/t/instant_alter_debug.test
@@ -5,6 +5,10 @@
SET @save_frequency= @@GLOBAL.innodb_purge_rseg_truncate_frequency;
SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+SET @old_instant=
+(SELECT variable_value FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column');
+
CREATE TABLE t1 (
pk INT AUTO_INCREMENT PRIMARY KEY,
c1 INT,
@@ -222,7 +226,8 @@ connection ddl;
SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL copied WAIT_FOR logged';
send ALTER TABLE t1 FORCE;
-disconnect stop_purge;
+connection stop_purge;
+COMMIT;
connection default;
SET DEBUG_SYNC = 'now WAIT_FOR copied';
@@ -238,6 +243,34 @@ reap;
connection default;
SET DEBUG_SYNC = RESET;
SELECT * FROM t1;
+ALTER TABLE t1 DROP b, ALGORITHM=INSTANT;
+connection stop_purge;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+connection default;
+DELETE FROM t1;
+
+connection ddl;
+SET DEBUG_SYNC='row_log_table_apply1_before SIGNAL copied WAIT_FOR logged';
+send ALTER TABLE t1 ADD COLUMN b INT NOT NULL DEFAULT 2 AFTER a, FORCE;
+
+disconnect stop_purge;
+
+connection default;
+SET DEBUG_SYNC = 'now WAIT_FOR copied';
+let $wait_all_purged = 1;
+--source include/wait_all_purged.inc
+INSERT INTO t1 SET a=1;
+INSERT INTO t1 SET a=2,c=4;
+SET DEBUG_SYNC = 'now SIGNAL logged';
+
+connection ddl;
+reap;
+UPDATE t1 SET b = b + 1 WHERE a = 2;
+
+connection default;
+SET DEBUG_SYNC = RESET;
+SELECT * FROM t1;
--echo #
--echo # MDEV-15872 Crash in online ALTER TABLE...ADD PRIMARY KEY
@@ -267,4 +300,40 @@ SET DEBUG_SYNC = RESET;
SELECT * FROM t1;
DROP TABLE t1;
+--echo #
+--echo # MDEV-17899 Assertion failures on rollback of instant ADD/DROP
+--echo # MDEV-18098 Crash after rollback of instant DROP COLUMN
+--echo #
+
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug='+d,ib_commit_inplace_fail_1';
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (1,2);
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 DROP COLUMN b;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 DROP COLUMN b;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 ADD COLUMN c INT;
+SELECT * FROM t1;
+DROP TABLE t1;
+
+CREATE TABLE t1 (a int, b int) ENGINE=InnoDB;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 ADD COLUMN c INT;
+BEGIN;
+INSERT INTO t1 VALUES(1, 1);
+ROLLBACK;
+--error ER_INTERNAL_ERROR
+ALTER TABLE t1 DROP COLUMN b;
+INSERT INTO t1 values (1,1);
+SELECT * FROM t1;
+DROP TABLE t1;
+
+SET debug_dbug = @save_dbug;
+
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+
SET GLOBAL innodb_purge_rseg_truncate_frequency = @save_frequency;
diff --git a/mysql-test/suite/innodb/t/instant_alter_extend.combinations b/mysql-test/suite/innodb/t/instant_alter_extend.combinations
new file mode 100644
index 00000000000..1465bf59ad7
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_extend.combinations
@@ -0,0 +1,5 @@
+[latin1]
+character-set-server=latin1
+
+[utf8]
+character-set-server=utf8
diff --git a/mysql-test/suite/innodb/t/instant_alter_extend.test b/mysql-test/suite/innodb/t/instant_alter_extend.test
new file mode 100644
index 00000000000..4320d9bae05
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_extend.test
@@ -0,0 +1,249 @@
+--source include/have_innodb.inc
+--source include/innodb_row_format.inc
+--source include/maybe_debug.inc
+
+-- echo #
+-- echo # MDEV-15563: Instant ROW_FORMAT=REDUNDANT column type change&extension
+-- echo # (reverted in MDEV-18627)
+-- echo #
+
+# Use character-set-server in test db
+create database best;
+use best;
+
+set default_storage_engine=innodb;
+set @bigval= repeat('0123456789', 30);
+
+delimiter ~~;
+create procedure check_table(table_name varchar(255))
+begin
+ select table_id into @table_id
+ from information_schema.innodb_sys_tables
+ where name = concat('best/', table_name);
+ select name, mtype, hex(prtype) as prtype, len
+ from information_schema.innodb_sys_columns
+ where table_id = @table_id;
+end~~
+delimiter ;~~
+
+
+--echo # VARCHAR -> CHAR, VARBINARY -> BINARY conversion
+set @bigval= repeat('0123456789', 20);
+
+create table t (a varchar(300));
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table t modify a char(255), algorithm=instant;
+alter table t modify a char(255), algorithm=copy;
+
+create or replace table t (a varchar(200));
+insert into t values (@bigval);
+insert into t values ('z');
+if ($have_debug) {
+--disable_query_log
+# This should not be reachable.
+set @save_debug= @@SESSION.debug_dbug;
+set debug_dbug= '+d,ib_instant_error';
+--enable_query_log
+}
+--enable_info
+alter table t modify a char(200);
+--disable_info
+select count(a) from t where a = @bigval;
+select a, length(a) from t where a = 'z';
+
+check table t extended;
+call check_table('t');
+
+--echo # CHAR enlargement
+--enable_info
+alter table t modify a char(220);
+--disable_info
+select count(a) from t where a = @bigval;
+select a, length(a) from t where a = 'z';
+
+check table t extended;
+call check_table('t');
+
+--echo # Convert from VARCHAR to a bigger CHAR
+--enable_info
+alter table t modify a varchar(200);
+alter table t modify a char(255);
+--disable_info
+select count(a) from t where a = @bigval;
+select a, length(a) from t where a = 'z';
+
+select * from t;
+check table t extended;
+call check_table('t');
+
+--echo # BINARY/VARBINARY test
+create or replace table t (a varbinary(300));
+insert into t values(NULL);
+--enable_info
+alter table t modify a binary(255);
+--disable_info
+
+create or replace table t (a varbinary(200));
+insert into t values (@bigval);
+insert into t values ('z');
+--enable_info
+alter table t modify a binary(200);
+--disable_info
+select count(a) from t where a = @bigval;
+select length(a) from t where left(a, 1) = 'z';
+
+check table t extended;
+call check_table('t');
+
+--echo # BINARY enlargement
+--enable_info
+alter table t modify a binary(220);
+--disable_info
+
+check table t extended;
+call check_table('t');
+
+--echo # Convert from VARBINARY to a bigger BINARY
+--enable_info
+alter table t modify a varbinary(220);
+alter table t modify a binary(255);
+--disable_info
+select count(a) from t where a = @bigval;
+select a, length(a) from t where a = 'z';
+
+select * from t;
+check table t extended;
+call check_table('t');
+
+
+--echo # Integer conversions
+create or replace table t (x tinyint);
+insert into t values (127);
+--enable_info
+alter table t modify x smallint;
+--disable_info
+select * from t;
+check table t extended;
+call check_table('t');
+
+update t set x= 32767;
+--enable_info
+alter table t modify x mediumint;
+--disable_info
+select * from t;
+check table t extended;
+call check_table('t');
+
+update t set x= 8388607;
+--enable_info
+alter table t modify x int;
+--disable_info
+select * from t;
+check table t extended;
+call check_table('t');
+
+update t set x= 2147483647;
+--enable_info
+alter table t modify x bigint;
+--disable_info
+select * from t;
+check table t extended;
+call check_table('t');
+
+if ($have_debug) {
+--disable_query_log
+# This should not be reachable.
+set debug_dbug= @save_debug;
+--enable_query_log
+}
+
+--echo # Check IMPORT TABLESPACE
+--let $MYSQLD_DATADIR= `select @@datadir`
+create or replace table t2 (x int);
+alter table t2 discard tablespace;
+
+create or replace table t1 (x tinyint);
+insert into t1 set x= 42;
+alter table t1 modify x int;
+flush tables t1 for export;
+--move_file $MYSQLD_DATADIR/best/t1.cfg $MYSQLD_DATADIR/best/t2.cfg
+--copy_file $MYSQLD_DATADIR/best/t1.ibd $MYSQLD_DATADIR/best/t2.ibd
+unlock tables;
+
+alter table t2 import tablespace;
+
+select * from t2;
+check table t2 extended;
+call check_table('t2');
+
+--echo # Check innobase_col_to_mysql() len < flen
+create or replace table t1 (x mediumint);
+insert into t1 values (1);
+insert into t1 values (1);
+--enable_info
+alter table t1 add column y int first, modify x int;
+--error ER_DUP_ENTRY
+alter table t1 add column z int first, add primary key (x);
+--disable_info
+
+--echo # Check assertion in wrong instant operation
+create or replace table t1 (a varchar(26) not null) default character set utf8mb4;
+insert into t1 values ('abcdef'), (repeat('x',26));
+--enable_info
+alter ignore table t1 modify a varchar(25) not null;
+--disable_info
+select * from t1;
+
+--echo # Check row_mysql_store_col_in_innobase_format()
+create or replace table t1(x int primary key, a varchar(20));
+insert into t1 (x) values (1);
+update t1 set a= 'foo' where x = 2;
+
+--echo #
+--echo # MDEV-18124 PK on inplace-enlarged type fails
+--echo #
+create or replace table t1 (x int, y int);
+insert into t1 (x, y) values (11, 22);
+--enable_info
+alter table t1 modify x bigint;
+alter table t1 add primary key (x);
+--disable_info
+select * from t1;
+check table t1;
+
+create or replace table t1 (a varchar(10), y int);
+insert into t1 (a, y) values ("0123456789", 33);
+--enable_info
+alter table t1 modify a char(15);
+alter table t1 add primary key (a);
+--disable_info
+select * from t1;
+check table t1;
+
+create or replace table t1 (x int primary key, y int);
+insert into t1 (x, y) values (44, 55);
+--enable_info
+alter table t1 modify x bigint;
+--disable_info
+select * from t1;
+check table t1;
+
+create or replace table t1 (x int primary key, y int);
+insert into t1 values (66, 77);
+--enable_info
+alter table t1 add column z int;
+alter table t1 drop column y;
+--disable_info
+select * from t1;
+check table t1;
+
+create or replace table t1 (x integer, a varchar(20));
+--enable_info
+insert into t1 (x, a) values (73, 'a');
+alter table t1 add index idx3 (a);
+alter table t1 modify a char(20);
+--disable_info
+select * from t1;
+check table t1;
+
+drop database best;
diff --git a/mysql-test/suite/innodb/t/instant_alter_import.test b/mysql-test/suite/innodb/t/instant_alter_import.test
index 5bd10a7994e..fb187debb51 100644
--- a/mysql-test/suite/innodb/t/instant_alter_import.test
+++ b/mysql-test/suite/innodb/t/instant_alter_import.test
@@ -43,10 +43,42 @@ flush tables t2 for export;
--move_file $MYSQLD_DATADIR/test/t2.cfg $MYSQLD_DATADIR/test/t1.cfg
--copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t1.ibd
unlock tables;
+--copy_file $MYSQLD_DATADIR/test/t1.cfg $MYSQLD_DATADIR/test/t1b.cfg
+--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t1b.ibd
--echo # The instant ADD COLUMN has to be removed from the metadata.
alter table t1 import tablespace;
select * from t1;
+--echo # Remove metadata for instant DROP COLUMN, then import
+alter table t1 drop x, add column x int first, algorithm instant;
+select * from t1;
+alter table t1 discard tablespace;
+
+--move_file $MYSQLD_DATADIR/test/t1b.cfg $MYSQLD_DATADIR/test/t1.cfg
+--move_file $MYSQLD_DATADIR/test/t1b.ibd $MYSQLD_DATADIR/test/t1.ibd
+alter table t1 import tablespace;
+select * from t1;
+
+--echo # Import a data file that contains instant DROP COLUMN metadata
+alter table t2 drop x;
+alter table t1 drop x, force;
+alter table t1 discard tablespace;
+
+flush tables t2 for export;
+--move_file $MYSQLD_DATADIR/test/t2.cfg $MYSQLD_DATADIR/test/t1.cfg
+--copy_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t1.ibd
+unlock tables;
+
+--error ER_TABLE_SCHEMA_MISMATCH
+alter table t1 import tablespace;
+--error ER_TABLESPACE_DISCARDED
+select * from t1;
+--remove_file $MYSQLD_DATADIR/test/t1.cfg
+--error ER_INTERNAL_ERROR
+alter table t1 import tablespace;
+--error ER_TABLESPACE_DISCARDED
+select * from t1;
+
drop table t2;
drop table t1;
diff --git a/mysql-test/suite/innodb/t/instant_alter_index_rename.test b/mysql-test/suite/innodb/t/instant_alter_index_rename.test
new file mode 100644
index 00000000000..3150503c815
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_index_rename.test
@@ -0,0 +1,186 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+
+delimiter |;
+create function get_index_id(tbl_id int, index_name char(100))
+ returns int
+begin
+ declare res int;
+ select index_id into res from information_schema.innodb_sys_indexes where
+ name=index_name and table_id = tbl_id;
+ return res;
+end|
+
+delimiter ;|
+
+create table t (
+ pk int primary key,
+ a int,
+ b int,
+ c int,
+ unique index a_key (a),
+ key c_key (c)
+) engine=innodb stats_persistent=1;
+
+insert into t values (1, 1, 1, 1);
+
+set @table_id = (select table_id from information_schema.innodb_sys_tables where name='test/t');
+
+set @a_key_id = get_index_id(@table_id, 'a_key');
+set @c_key_id = get_index_id(@table_id, 'c_key');
+set @primary_id = get_index_id(@table_id, 'primary');
+
+select distinct(index_name) from mysql.innodb_index_stats where table_name = 't';
+alter table t
+ drop index a_key,
+ add unique index a_key_strikes_back (a);
+select distinct(index_name) from mysql.innodb_index_stats where table_name = 't';
+
+check table t;
+select @a_key_id = get_index_id(@table_id, 'a_key_strikes_back'),
+ @c_key_id = get_index_id(@table_id, 'c_key'),
+ @primary_id = get_index_id(@table_id, 'primary');
+
+set @a_key_strikes_back_id = get_index_id(@table_id, 'a_key_strikes_back');
+set @c_key_id = get_index_id(@table_id, 'c_key');
+set @primary_id = get_index_id(@table_id, 'primary');
+
+alter table t
+ drop index a_key_strikes_back,
+ add unique index a_key_returns (a),
+ drop primary key,
+ add primary key (pk),
+ add unique index b_key (b);
+
+check table t;
+select @a_key_strikes_back_id = get_index_id(@table_id, 'a_key_returns'),
+ @c_key_id = get_index_id(@table_id, 'c_key'),
+ @primary_id = get_index_id(@table_id, 'primary');
+
+set @a_key_returns_id = get_index_id(@table_id, 'a_key_returns');
+set @b_key_id = get_index_id(@table_id, 'b_key');
+set @c_key_id = get_index_id(@table_id, 'c_key');
+set @primary_id = get_index_id(@table_id, 'primary');
+
+alter table t
+ drop key c_key,
+ add key c_key2 (c);
+
+check table t;
+select @a_key_returns_id = get_index_id(@table_id, 'a_key_returns'),
+ @b_key_id = get_index_id(@table_id, 'b_key'),
+ @c_key_id = get_index_id(@table_id, 'c_key2'),
+ @primary_id = get_index_id(@table_id, 'primary');
+
+drop table t;
+drop function get_index_id;
+
+create table errors (
+ a int,
+ unique key a_key (a),
+ b int
+) engine=innodb;
+
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table errors
+ drop key a_key,
+ drop key a_key,
+ add unique key a_key2 (a);
+
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table errors
+ drop key a_key,
+ drop key a_key2,
+ add unique key a_key2 (a);
+
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table errors
+ add key b_key (b),
+ drop key b_key,
+ add key bb_key (b);
+
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table errors
+ drop key a_key,
+ add key a_key2 (a),
+ drop key a_key,
+ add key a_key2 (a);
+
+drop table errors;
+
+--disable_query_log
+call mtr.add_suppression("Flagged corruption of `a_key` in table `test`.`corrupted` in dict_set_index_corrupted");
+--enable_query_log
+
+create table corrupted (
+ a int,
+ key a_key (a)
+) engine=innodb;
+
+insert into corrupted values (1);
+
+select * from corrupted;
+
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug = '+d,dict_set_index_corrupted';
+check table corrupted;
+SET debug_dbug = @save_dbug;
+
+--error ER_INDEX_CORRUPT
+select * from corrupted;
+
+--error ER_INDEX_CORRUPT
+alter table corrupted
+ drop key a_key,
+ add key a_key2 (a);
+
+alter table corrupted
+ drop key a_key;
+
+select * from corrupted;
+
+check table corrupted;
+
+drop table corrupted;
+
+create table t (
+ a int,
+ unique key a_key (a)
+) engine=innodb stats_persistent=1;
+
+SET @save_dbug = @@SESSION.debug_dbug;
+SET debug_dbug = '+d,ib_rename_index_fail1';
+-- error ER_LOCK_DEADLOCK
+alter table t
+ drop key a_key,
+ add unique key a_key2 (a),
+ algorithm=instant;
+SET debug_dbug = @save_dbug;
+
+--error ER_WRONG_NAME_FOR_INDEX
+alter table t
+ drop key a_key,
+ add unique key `GEN_CLUST_INDEX` (a),
+ algorithm=instant;
+
+show create table t;
+
+drop table t;
+
+
+create table rename_column_and_index (
+ a int,
+ unique index a_key(a)
+) engine=innodb;
+
+insert into rename_column_and_index values (1), (3);
+
+alter table rename_column_and_index
+ change a aa int,
+ drop key a_key,
+ add unique key aa_key(aa),
+ algorithm=instant;
+
+show create table rename_column_and_index;
+check table rename_column_and_index;
+drop table rename_column_and_index;
diff --git a/mysql-test/suite/innodb/t/instant_alter_limit.test b/mysql-test/suite/innodb/t/instant_alter_limit.test
new file mode 100644
index 00000000000..b50a1b15295
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_limit.test
@@ -0,0 +1,60 @@
+--source include/innodb_page_size.inc
+
+SET @old_instant=
+(SELECT variable_value FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column');
+
+CREATE TABLE t(a INT PRIMARY KEY, b INT, c INT, d INT, e INT)
+ENGINE=InnoDB;
+INSERT INTO t VALUES(1,2,3,4,5);
+--disable_query_log
+let $n=253;
+while ($n) {
+dec $n;
+ALTER TABLE t DROP b, DROP c, DROP d, DROP e,
+ADD COLUMN b INT FIRST, ADD COLUMN c INT, ADD COLUMN d INT AFTER b,
+ADD COLUMN e INT AFTER c, ALGORITHM=INSTANT;
+}
+--enable_query_log
+SELECT * FROM t;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE t DROP b, DROP c, DROP d, DROP e,
+ADD COLUMN b INT, ALGORITHM=INSTANT;
+ALTER TABLE t CHANGE COLUMN b beta INT AFTER a, ALGORITHM=INSTANT;
+ALTER TABLE t DROP e, DROP c, DROP d, ALGORITHM=INSTANT;
+SELECT * FROM t;
+ALTER TABLE t DROP COLUMN beta, ALGORITHM=INSTANT;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE t ADD COLUMN b INT NOT NULL, ALGORITHM=INSTANT;
+
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+
+ALTER TABLE t ADD COLUMN b INT NOT NULL;
+
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+
+SELECT * FROM t;
+ALTER TABLE t ADD COLUMN (c CHAR(255) NOT NULL, d BIGINT NOT NULL),
+ALGORITHM=INSTANT;
+
+--disable_query_log
+let $n=253;
+while ($n) {
+dec $n;
+ALTER TABLE t DROP b, DROP c, DROP d,
+ADD COLUMN (b INT NOT NULL, c CHAR(255) NOT NULL, d BIGINT NOT NULL);
+}
+--enable_query_log
+
+UPDATE t SET b=b+1,d=d+1,c='foo';
+SELECT * FROM t;
+
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
+
+DROP TABLE t;
diff --git a/mysql-test/suite/innodb/t/instant_alter_null.test b/mysql-test/suite/innodb/t/instant_alter_null.test
new file mode 100644
index 00000000000..69fb1ae4495
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_null.test
@@ -0,0 +1,57 @@
+--source include/have_innodb.inc
+
+create table t (a int NOT NULL) engine=innodb row_format= compressed;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+alter table t modify a int NULL, algorithm=instant;
+drop table t;
+
+create table t (a int NOT NULL) engine=innodb row_format= dynamic;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+alter table t modify a int NULL, algorithm=instant;
+drop table t;
+
+create table t (a int NOT NULL) engine=innodb row_format= compact;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+alter table t modify a int NULL, algorithm=instant;
+drop table t;
+
+create table t (
+ id int primary key,
+ a int NOT NULL default 0,
+ b int NOT NULL default 0,
+ c int NOT NULL default 0,
+ index idx (a,b,c)
+) engine=innodb row_format=redundant;
+
+--error ER_BAD_NULL_ERROR
+insert into t (id, a) values (0, NULL);
+--error ER_BAD_NULL_ERROR
+insert into t (id, b) values (0, NULL);
+--error ER_BAD_NULL_ERROR
+insert into t (id, c) values (0, NULL);
+
+insert into t values (1,1,1,1);
+
+set @id = (select table_id from information_schema.innodb_sys_tables
+where name = 'test/t');
+
+--replace_column 1 TABLE_ID
+select * from information_schema.innodb_sys_columns where table_id=@id;
+
+alter table t modify a int NULL, algorithm=instant;
+insert into t values (2, NULL, 2, 2);
+
+alter table t modify b int NULL, algorithm=nocopy;
+insert into t values (3, NULL, NULL, 3);
+
+alter table t modify c int NULL, algorithm=inplace;
+insert into t values (4, NULL, NULL, NULL);
+
+--replace_column 1 TABLE_ID
+select * from information_schema.innodb_sys_columns where table_id=@id;
+
+select * from t;
+
+check table t;
+
+drop table t;
diff --git a/mysql-test/suite/innodb/t/instant_alter_purge.test b/mysql-test/suite/innodb/t/instant_alter_purge.test
new file mode 100644
index 00000000000..152d200d977
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_alter_purge.test
@@ -0,0 +1,75 @@
+--source include/have_innodb.inc
+--source include/maybe_debug.inc
+if ($have_debug) {
+--source include/have_debug_sync.inc
+}
+
+SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency;
+SET GLOBAL innodb_purge_rseg_truncate_frequency=1;
+
+--echo #
+--echo # MDEV-17793 Crash in purge after instant DROP and emptying the table
+--echo #
+
+connect (prevent_purge,localhost,root);
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+connection default;
+CREATE TABLE t1 (f1 INT, f2 INT) ENGINE=InnoDB;
+INSERT INTO t1 () VALUES ();
+ALTER TABLE t1 DROP f2, ADD COLUMN f2 INT;
+ALTER TABLE t1 DROP f1;
+DELETE FROM t1;
+
+connection prevent_purge;
+COMMIT;
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+connection default;
+
+ALTER TABLE t1 ADD COLUMN extra TINYINT UNSIGNED NOT NULL DEFAULT 42;
+let $wait_all_purged= 1;
+--source include/wait_all_purged.inc
+ALTER TABLE t1 DROP extra;
+disconnect prevent_purge;
+let $wait_all_purged= 0;
+--source include/wait_all_purged.inc
+DROP TABLE t1;
+
+--echo #
+--echo # MDEV-17813 Crash in instant ALTER TABLE due to purge
+--echo # concurrently emptying table
+--echo #
+CREATE TABLE t1 (f2 INT) ENGINE=InnoDB;
+INSERT INTO t1 SET f2=1;
+ALTER TABLE t1 ADD COLUMN f1 INT;
+
+connect (purge_control,localhost,root);
+START TRANSACTION WITH CONSISTENT SNAPSHOT;
+
+connection default;
+DELETE FROM t1;
+
+if ($have_debug) {
+SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL go WAIT_FOR do';
+}
+send ALTER TABLE t1 ADD COLUMN f3 INT;
+
+connection purge_control;
+if ($have_debug) {
+SET DEBUG_SYNC='now WAIT_FOR go';
+}
+COMMIT;
+--source include/wait_all_purged.inc
+if ($have_debug) {
+SET DEBUG_SYNC='now SIGNAL do';
+}
+disconnect purge_control;
+
+connection default;
+reap;
+if ($have_debug) {
+SET DEBUG_SYNC=RESET;
+}
+DROP TABLE t1;
+
+SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;
diff --git a/mysql-test/suite/innodb/t/instant_alter_rollback.test b/mysql-test/suite/innodb/t/instant_alter_rollback.test
index b68a6ad3880..cfece7e0738 100644
--- a/mysql-test/suite/innodb/t/instant_alter_rollback.test
+++ b/mysql-test/suite/innodb/t/instant_alter_rollback.test
@@ -8,28 +8,49 @@ FLUSH TABLES;
--echo #
--echo # MDEV-11369: Instant ADD COLUMN for InnoDB
+--echo # MDEV-15562: Instant DROP COLUMN or changing the order of columns
--echo #
connect to_be_killed, localhost, root;
+SET @old_instant=
+(SELECT variable_value FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column');
CREATE TABLE empty (id INT PRIMARY KEY, c2 INT UNIQUE) ENGINE=InnoDB;
CREATE TABLE once LIKE empty;
CREATE TABLE twice LIKE empty;
+CREATE TABLE thrice LIKE empty;
INSERT INTO once SET id=1,c2=1;
INSERT INTO twice SET id=1,c2=1;
+INSERT INTO thrice SET id=1,c2=1;
ALTER TABLE empty ADD COLUMN (d1 INT DEFAULT 15);
ALTER TABLE once ADD COLUMN (d1 INT DEFAULT 20);
ALTER TABLE twice ADD COLUMN (d1 INT DEFAULT 20);
+ALTER TABLE thrice ADD COLUMN (d1 INT DEFAULT 20);
ALTER TABLE twice ADD COLUMN
(d2 INT NOT NULL DEFAULT 10,
d3 VARCHAR(15) NOT NULL DEFAULT 'var och en char');
+ALTER TABLE thrice ADD COLUMN
+(d2 INT NOT NULL DEFAULT 10,
+ d3 TEXT NOT NULL DEFAULT 'con');
+ALTER TABLE thrice DROP c2, DROP d3, CHANGE d2 d3 INT NOT NULL FIRST;
+
+SELECT variable_value-@old_instant instants
+FROM information_schema.global_status
+WHERE variable_name = 'innodb_instant_alter_column';
BEGIN;
INSERT INTO empty set id=0,c2=42;
UPDATE once set c2=c2+1;
UPDATE twice set c2=c2+1;
+UPDATE thrice set d3=d3+1;
INSERT INTO twice SET id=2,c2=0,d3='';
+INSERT INTO thrice SET id=2,d3=0;
+DELETE FROM empty;
+DELETE FROM once;
+DELETE FROM twice;
+DELETE FROM thrice;
connection default;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
@@ -48,4 +69,5 @@ SET GLOBAL innodb_purge_rseg_truncate_frequency=@saved_frequency;
SELECT * FROM empty;
SELECT * FROM once;
SELECT * FROM twice;
-DROP TABLE empty, once, twice;
+SELECT * FROM thrice;
+DROP TABLE empty, once, twice, thrice;
diff --git a/mysql-test/suite/innodb/t/instant_auto_inc.test b/mysql-test/suite/innodb/t/instant_auto_inc.test
new file mode 100644
index 00000000000..4aea81c1c49
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_auto_inc.test
@@ -0,0 +1,13 @@
+--source include/have_innodb.inc
+
+create table t(id int primary key, a int) engine=InnoDB;
+insert into t (id, a) values (1, 1);
+alter table t modify column id int auto_increment;
+check table t;
+insert into t (a) values (2);
+alter table t modify column id int, algorithm=instant;
+check table t;
+insert into t (id, a) values (3, 3);
+select * from t;
+check table t;
+drop table t;
diff --git a/mysql-test/suite/innodb/t/instant_drop.test b/mysql-test/suite/innodb/t/instant_drop.test
new file mode 100644
index 00000000000..566ac02b314
--- /dev/null
+++ b/mysql-test/suite/innodb/t/instant_drop.test
@@ -0,0 +1,108 @@
+--source include/have_innodb.inc
+
+create table t1(f1 int not null, f2 int not null, f3 int not null)engine=innodb;
+insert into t1 values(1, 2, 3),(4, 5, 6);
+alter table t1 drop column f2, algorithm=instant;
+select * from t1;
+insert into t1 values(1,2);
+select * from t1;
+alter table t1 add column f4 int not null default 5, algorithm=instant;
+select * from t1;
+alter table t1 drop column f1, algorithm=instant;
+select * from t1;
+insert into t1 values(7, 9);
+select * from t1;
+alter table t1 add column f5 blob default repeat('aaa', 950), drop column f4, algorithm=instant;
+select * from t1;
+select f3 from t1;
+update t1 set f3 = 10 where f3 > 2;
+select * from t1;
+delete from t1 where f3 = 10;
+show create table t1;
+select f3 from t1;
+update t1 set f5 = 'world';
+select * from t1;
+drop table t1;
+
+create table t1(f1 int, f2 int not null, index idx(f2))engine=innodb;
+insert into t1 values(1, 2);
+alter table t1 drop column f1, add column f3 varchar(100) default 'thiru', algorithm=instant;
+select * from t1 force index (idx);
+alter table t1 drop column f3, algorithm=instant;
+select * from t1;
+begin;
+insert into t1 values(10);
+select * from t1;
+update t1 set f2 = 100;
+select * from t1;
+delete from t1 where f2 = 100;
+select * from t1;
+rollback;
+select * from t1;
+show create table t1;
+drop table t1;
+
+create table t1(f1 int, f2 int not null)engine=innodb;
+insert into t1 values(1, 2);
+alter table t1 drop column f2, algorithm=instant;
+insert into t1 values(NULL);
+select * from t1;
+drop table t1;
+
+create table t1(f1 int not null, f2 int not null)engine=innodb;
+insert into t1 values(1, 2);
+alter table t1 add column f5 int default 10, algorithm=instant;
+alter table t1 add column f3 int not null default 100, algorithm=instant;
+alter table t1 add column f4 int default 100, drop column f3, algorithm=instant;
+insert into t1 values(2, 3, 20, 100);
+select * from t1;
+drop table t1;
+
+create table t1(f1 int not null, f2 int not null) engine=innodb;
+insert into t1 values(1, 1);
+alter table t1 drop column f2, add column f3 int default 3, algorithm=instant;
+select * from t1;
+update t1 set f3 = 19;
+select * from t1;
+alter table t1 drop column f1, add column f5 tinyint default 10 first,
+algorithm=instant;
+insert into t1 values(4, 10);
+select * from t1;
+
+create table t2(f1 int, f2 int not null) engine=innodb;
+insert into t2(f1, f2) values(1, 2);
+alter table t2 drop column f2, add column f4 varchar(100) default repeat('a', 20), add column f5 int default 10, algorithm=instant;
+select * from t2;
+show create table t2;
+alter table t2 add column f6 char(100) default repeat('a', 99), algorithm=instant;
+
+create table t3(f1 int, f2 int not null)engine=innodb;
+insert into t3 values(1, 2);
+alter table t3 drop column f2, add column f3 int default 1, add column f4 int default 4, algorithm=instant;
+
+create table t4(a varchar(1), b int, c int, primary key(a,b))engine=innodb;
+insert into t4 values('4',5,6);
+alter table t4 drop column c;
+
+--source include/restart_mysqld.inc
+select * from t1;
+alter table t1 add column f6 int default 9,drop column f5, algorithm = instant;
+insert into t1 values(4, 9);
+alter table t1 force, algorithm=inplace;
+select * from t1;
+
+select * from t2;
+alter table t2 force, algorithm=inplace;
+select * from t2;
+show create table t2;
+
+select * from t3;
+alter table t3 add column f5 char(100) default repeat('a', 99), algorithm=instant;
+
+select * from t4;
+alter table t4 add column d varchar(5) default 'fubar';
+insert into t4 values('',0,'snafu');
+--source include/restart_mysqld.inc
+select * from t3;
+select * from t4;
+drop table t1,t2,t3,t4;
diff --git a/mysql-test/suite/innodb/t/log_data_file_size.test b/mysql-test/suite/innodb/t/log_data_file_size.test
index f01e013ddfa..fe75b9ab236 100644
--- a/mysql-test/suite/innodb/t/log_data_file_size.test
+++ b/mysql-test/suite/innodb/t/log_data_file_size.test
@@ -57,6 +57,7 @@ for (my $d = $d1; $d < $d2 + 64; $d++) {
close FILE;
open(FILE, ">$ENV{MYSQLTEST_VARDIR}/log/start_mysqld.txt") || die;
print FILE "--let \$restart_parameters=$restart\n" if $restart;
+print FILE "--let \$restart_noprint=2\n";
print FILE "--source include/start_mysqld.inc\n";
close FILE;
open(FILE, "+<", "$ENV{'MYSQLD_DATADIR'}test/ibd4.ibd") or die;
diff --git a/mysql-test/suite/innodb/t/purge_thread_shutdown.test b/mysql-test/suite/innodb/t/purge_thread_shutdown.test
index 8a9a834454c..5be29b7a6a3 100644
--- a/mysql-test/suite/innodb/t/purge_thread_shutdown.test
+++ b/mysql-test/suite/innodb/t/purge_thread_shutdown.test
@@ -32,7 +32,9 @@ delete from t1 where a=3;
error ER_WRONG_VALUE_FOR_VAR;
set global innodb_fast_shutdown=0;
-let $me=`select connection_id()`;
+# Get id with space prefix to ensure that replace_result doesn't replace
+# the error code
+let $me=`select concat(' ', connection_id())`;
replace_result $me ID;
error ER_CONNECTION_KILLED;
eval kill $me;
diff --git a/mysql-test/suite/innodb/t/redo_log_during_checkpoint.test b/mysql-test/suite/innodb/t/redo_log_during_checkpoint.test
index 9ff01739f60..21c3c72da1c 100644
--- a/mysql-test/suite/innodb/t/redo_log_during_checkpoint.test
+++ b/mysql-test/suite/innodb/t/redo_log_during_checkpoint.test
@@ -12,6 +12,7 @@
# is independent of the page size.
--source include/have_innodb_16k.inc
+let $restart_noprint=2;
SET GLOBAL innodb_page_cleaner_disabled_debug = 1;
SET GLOBAL innodb_dict_stats_disabled_debug = 1;
SET GLOBAL innodb_master_thread_disabled_debug = 1;
diff --git a/mysql-test/suite/innodb/t/restart.test b/mysql-test/suite/innodb/t/restart.test
index d7582306492..a7a7855ba7b 100644
--- a/mysql-test/suite/innodb/t/restart.test
+++ b/mysql-test/suite/innodb/t/restart.test
@@ -14,8 +14,8 @@ let page_size= `select @@innodb_page_size`;
--echo # FIXME: Unlike MySQL, maybe MariaDB should not read the .ibd files
--echo # of tables with .isl file or DATA DIRECTORY attribute.
-call mtr.add_suppression("\\[ERROR\\] InnoDB: Invalid flags 0x7a207879 in .*td\\.ibd");
--echo # FIXME: This is much more noisy than MariaDB 10.1!
+call mtr.add_suppression("\\[ERROR\\] InnoDB: Tablespace flags are invalid in datafile: .*test.t[rcd]\\.ibd");
call mtr.add_suppression("\\[ERROR\\] InnoDB: Operating system error number .* in a file operation\\.");
call mtr.add_suppression("\\[ERROR\\] InnoDB: The error means the system cannot find the path specified\\.");
call mtr.add_suppression("\\[ERROR\\] InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them\\.");
diff --git a/mysql-test/suite/innodb/t/row_format_redundant.test b/mysql-test/suite/innodb/t/row_format_redundant.test
index fc72e5c2664..6f38835d7e9 100644
--- a/mysql-test/suite/innodb/t/row_format_redundant.test
+++ b/mysql-test/suite/innodb/t/row_format_redundant.test
@@ -96,6 +96,9 @@ do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
my $ps= $ENV{INNODB_PAGE_SIZE};
my $file= "$ENV{bugdir}/ibdata1";
open(FILE, "+<", $file) || die "Unable to open $file\n";
+die "Unable to read $file" unless sysread(FILE, $_, $ps) == $ps;
+my $full_crc32 = unpack("N",substr($_,54,4)) & 0x10; # FIL_SPACE_FLAGS;
+sysseek(FILE, 0, 0) || die "Unable to seek $file";
# Read DICT_HDR_TABLES, the root page number of CLUST_IND (SYS_TABLES.NAME).
sysseek(FILE, 7*$ps+38+32, 0) || die "Unable to seek $file";
die "Unable to read $file" unless sysread(FILE, $_, 4) == 4;
@@ -122,10 +125,16 @@ for (my $offset= 0x65; $offset;
}
}
my $polynomial = 0x82f63b78; # CRC-32C
-my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
+if ($full_crc32) {
+ my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
+ substr($page, $ps-4, 4) = pack("N", $ck);
+} else {
+ my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
-substr($page,0,4)=$ck;
-substr($page,$ps-8,4)=$ck;
+ substr($page,0,4)=$ck;
+ substr($page,$ps-8,4)=$ck;
+}
+
sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file";
syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
close(FILE) || die "Unable to close $file\n";
diff --git a/mysql-test/suite/innodb/t/system_tables.test b/mysql-test/suite/innodb/t/system_tables.test
index 90cb8c59fbd..172628790f8 100644
--- a/mysql-test/suite/innodb/t/system_tables.test
+++ b/mysql-test/suite/innodb/t/system_tables.test
@@ -9,4 +9,4 @@ insert envois3 values ('2008-08-11 22:43:00');
--source include/restart_mysqld.inc
select convert_tz(starttime,'UTC','Europe/Moscow') starttime from envois3;
drop table envois3;
-alter table mysql.time_zone_name engine=MyISAM;
+alter table mysql.time_zone_name engine=Aria;
diff --git a/mysql-test/suite/innodb/t/table_flags.combinations b/mysql-test/suite/innodb/t/table_flags.combinations
new file mode 100644
index 00000000000..729380593f3
--- /dev/null
+++ b/mysql-test/suite/innodb/t/table_flags.combinations
@@ -0,0 +1,5 @@
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/suite/innodb/t/table_flags.opt b/mysql-test/suite/innodb/t/table_flags.opt
new file mode 100644
index 00000000000..c44c611ed60
--- /dev/null
+++ b/mysql-test/suite/innodb/t/table_flags.opt
@@ -0,0 +1 @@
+--innodb-checksum-algorithm=crc32
diff --git a/mysql-test/suite/innodb/t/table_flags.test b/mysql-test/suite/innodb/t/table_flags.test
index 13e1fc01dc0..238530d25c5 100644
--- a/mysql-test/suite/innodb/t/table_flags.test
+++ b/mysql-test/suite/innodb/t/table_flags.test
@@ -34,6 +34,7 @@ let bugdir= $MYSQLTEST_VARDIR/tmp/table_flags;
if ($have_debug) {
--let $d=$d --debug=d,create_and_drop_garbage
}
+--let $restart_noprint=1
--let $restart_parameters=$d --innodb-stats-persistent=0
--source include/restart_mysqld.inc
@@ -56,6 +57,8 @@ do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
my $ps= $ENV{INNODB_PAGE_SIZE};
my $file= "$ENV{bugdir}/ibdata1";
open(FILE, "+<", $file) || die "Unable to open $file\n";
+die "Unable to read $file" unless sysread(FILE, $_, 58) == 58;
+my $full_crc32 = unpack("N",substr($_,54,4)) & 0x10; # FIL_SPACE_FLAGS
# Read DICT_HDR_TABLES, the root page number of CLUST_IND (SYS_TABLES.NAME).
sysseek(FILE, 7*$ps+38+32, 0) || die "Unable to seek $file";
die "Unable to read $file" unless sysread(FILE, $_, 4) == 4;
@@ -129,10 +132,18 @@ for (my $offset= 0x65; $offset;
print ")\n";
}
my $polynomial = 0x82f63b78; # CRC-32C
-my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
+if ($full_crc32)
+{
+ my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
+ substr($page, $ps-4, 4) = pack("N", $ck);
+}
+else
+{
+ my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
-substr($page,0,4)=$ck;
-substr($page,$ps-8,4)=$ck;
+ substr($page,0,4)=$ck;
+ substr($page,$ps-8,4)=$ck;
+}
sysseek(FILE, $sys_tables_root*$ps, 0) || die "Unable to seek $file";
syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
close(FILE) || die "Unable to close $file\n";
diff --git a/mysql-test/suite/innodb/t/undo_truncate.opt b/mysql-test/suite/innodb/t/undo_truncate.opt
deleted file mode 100644
index 1c897ab6cfc..00000000000
--- a/mysql-test/suite/innodb/t/undo_truncate.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb-log-buffer-size=2m
diff --git a/mysql-test/suite/innodb/t/undo_truncate.test b/mysql-test/suite/innodb/t/undo_truncate.test
index 9b91c78e35a..af6ed2b4372 100644
--- a/mysql-test/suite/innodb/t/undo_truncate.test
+++ b/mysql-test/suite/innodb/t/undo_truncate.test
@@ -1,9 +1,5 @@
--source include/have_innodb.inc
-# With 32k, truncation could happen on shutdown after the test,
-# and the mtr.add_suppression() would not filter out the warning.
-# With 64k, no truncation seems to happen.
-# --source include/innodb_page_size.inc
---source include/innodb_page_size_small.inc
+--source include/innodb_page_size.inc
--source include/have_undo_tablespaces.inc
SET @save_undo_logs = @@GLOBAL.innodb_undo_logs;
diff --git a/mysql-test/suite/innodb/t/undo_truncate_recover.combinations b/mysql-test/suite/innodb/t/undo_truncate_recover.combinations
new file mode 100644
index 00000000000..729380593f3
--- /dev/null
+++ b/mysql-test/suite/innodb/t/undo_truncate_recover.combinations
@@ -0,0 +1,5 @@
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/suite/innodb/t/undo_truncate_recover.test b/mysql-test/suite/innodb/t/undo_truncate_recover.test
index 81db3086711..a143c25eaf0 100644
--- a/mysql-test/suite/innodb/t/undo_truncate_recover.test
+++ b/mysql-test/suite/innodb/t/undo_truncate_recover.test
@@ -28,6 +28,7 @@ while ($i) {
--enable_query_log
commit;
+let $checksum_algorithm = `SELECT @@GLOBAL.innodb_checksum_algorithm`;
let SEARCH_PATTERN = ib_undo_trunc;
begin;
update t1 set c = 'MariaDB';
@@ -35,13 +36,20 @@ update t1 set c = 'InnoDB';
eval set global debug_dbug = '+d,$SEARCH_PATTERN';
commit;
call mtr.add_suppression("InnoDB: innodb_undo_tablespaces=0 disables dedicated undo log tablespaces");
-# FIXME: remove this work-around, and generate less log!
-call mtr.add_suppression("InnoDB: The redo log transaction size ");
SET GLOBAL innodb_fast_shutdown=0;
--source include/shutdown_mysqld.inc
--source include/search_pattern_in_file.inc
-# FIXME: remove this work-around, and generate less log!
---let $restart_parameters= --innodb-buffer-pool-size=16m --innodb-undo-tablespaces=1
+--let $restart_parameters= --innodb-undo-tablespaces=1
+--let $restart_noprint=1
+if ($checksum_algorithm == "strict_full_crc32")
+{
+ let $restart_parameters= $restart_parameters --innodb_checksum_algorithm=strict_crc32;
+}
+
+if ($checksum_algorithm == "strict_crc32")
+{
+ let $restart_parameters= $restart_parameters --innodb_checksum_algorithm=strict_full_crc32;
+}
--source include/start_mysqld.inc
drop table t1;
diff --git a/mysql-test/suite/innodb_fts/r/crash_recovery.result b/mysql-test/suite/innodb_fts/r/crash_recovery.result
index 7d596684344..cb6a441f905 100644
--- a/mysql-test/suite/innodb_fts/r/crash_recovery.result
+++ b/mysql-test/suite/innodb_fts/r/crash_recovery.result
@@ -24,6 +24,7 @@ DELETE FROM articles LIMIT 1;
ROLLBACK;
disconnect flush_redo_log;
connection default;
+# restart
INSERT INTO articles (title,body) VALUES
('MySQL Tutorial','DBMS stands for DataBase ...');
CREATE FULLTEXT INDEX idx ON articles (title,body);
@@ -52,6 +53,7 @@ DELETE FROM articles LIMIT 1;
ROLLBACK;
disconnect flush_redo_log;
connection default;
+# restart
INSERT INTO articles (title,body) VALUES
('MySQL Tutorial','DBMS stands for DataBase ...');
SELECT * FROM articles
@@ -82,6 +84,7 @@ INSERT INTO articles VALUES
BEGIN;
INSERT INTO articles VALUES
(100, 200, 'MySQL Tutorial','DBMS stands for DataBase ...');
+# restart
INSERT INTO articles VALUES (8, 12, 'MySQL Tutorial','DBMS stands for DataBase ...');
SELECT * FROM articles WHERE MATCH (title, body)
AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE);
diff --git a/mysql-test/suite/innodb_fts/r/fulltext_order_by.result b/mysql-test/suite/innodb_fts/r/fulltext_order_by.result
index 503f117d02f..0d3a4a85b86 100644
--- a/mysql-test/suite/innodb_fts/r/fulltext_order_by.result
+++ b/mysql-test/suite/innodb_fts/r/fulltext_order_by.result
@@ -129,7 +129,7 @@ group by
a.text, b.id, b.betreff
order by
match(b.betreff) against ('+abc' in boolean mode) desc;
-ERROR 42000: Table 'b' from one of the SELECTs cannot be used in field list
+ERROR 42000: Table 'b' from one of the SELECTs cannot be used in ORDER clause
select a.text, b.id, b.betreff
from
t2 a inner join t3 b on a.id = b.forum inner join
@@ -145,7 +145,7 @@ where
match(c.beitrag) against ('+abc' in boolean mode)
order by
match(b.betreff) against ('+abc' in boolean mode) desc;
-ERROR 42000: Table 'b' from one of the SELECTs cannot be used in field list
+ERROR 42000: Table 'b' from one of the SELECTs cannot be used in ORDER clause
select a.text, b.id, b.betreff
from
t2 a inner join t3 b on a.id = b.forum inner join
diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result b/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result
index 36d3826be59..f998881f11b 100644
--- a/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result
+++ b/mysql-test/suite/innodb_fts/r/innodb-fts-fic.result
@@ -36,6 +36,8 @@ count
2
ANALYZE TABLE articles;
Table Op Msg_type Msg_text
+test.articles analyze status Engine-independent statistics collected
+test.articles analyze Warning Engine-independent statistics are not collected for column 'body'
test.articles analyze status OK
SELECT * FROM articles WHERE MATCH (title,body)
AGAINST ('+MySQL -YourSQL' IN BOOLEAN MODE);
diff --git a/mysql-test/suite/innodb_fts/r/innodb-fts-stopword.result b/mysql-test/suite/innodb_fts/r/innodb-fts-stopword.result
index dea2f2360da..cad1acb3b01 100644
--- a/mysql-test/suite/innodb_fts/r/innodb-fts-stopword.result
+++ b/mysql-test/suite/innodb_fts/r/innodb-fts-stopword.result
@@ -249,6 +249,8 @@ articles CREATE TABLE `articles` (
ALTER TABLE articles ADD FULLTEXT INDEX idx (title,body);
ANALYZE TABLE articles;
Table Op Msg_type Msg_text
+test.articles analyze status Engine-independent statistics collected
+test.articles analyze Warning Engine-independent statistics are not collected for column 'body'
test.articles analyze status OK
SELECT * FROM articles WHERE MATCH(title,body) AGAINST ("where will");
id title body
@@ -571,6 +573,8 @@ select @@innodb_ft_enable_stopword;
1
ANALYZE TABLE articles;
Table Op Msg_type Msg_text
+test.articles analyze status Engine-independent statistics collected
+test.articles analyze Warning Engine-independent statistics are not collected for column 'body'
test.articles analyze status OK
SELECT * FROM articles WHERE MATCH(title,body) AGAINST ("where will");
id title body
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
index aec3d7f777d..431a95442e6 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result
@@ -37,9 +37,13 @@ DELETE FROM t1;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a2`) REFERENCES `t1` (`a1`) ON UPDATE CASCADE)
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze Warning Engine-independent statistics are not collected for column 'b1'
test.t1 analyze status OK
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze Warning Engine-independent statistics are not collected for column 'b2'
test.t2 analyze status OK
SELECT id1 FROM t1 WHERE MATCH (a1,b1) AGAINST ('tutorial') ORDER BY id1;
id1
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result b/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result
index dc71156b7a1..ec417af97c5 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_plugin.result
@@ -122,6 +122,7 @@ INSERT INTO articles (title, body) VALUES
('Optimizing MySQL','In this tutorial we will show ...'),
('1001 MySQL Tricks','How to use full-text search engine'),
('Go MySQL Tricks','How to use full text search engine');
+# restart
SELECT COUNT(*) FROM articles;
COUNT(*)
0
@@ -134,6 +135,7 @@ INSERT INTO articles (title, body) VALUES
('Optimizing MySQL','In this tutorial we will show ...'),
('1001 MySQL Tricks','How to use full-text search engine'),
('Go MariaDB Tricks','How to use full text search engine');
+# restart
SELECT * FROM articles WHERE
MATCH(title, body) AGAINST('MySQL');
id title body
diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_stopword_charset.result b/mysql-test/suite/innodb_fts/r/innodb_fts_stopword_charset.result
index c49c7f8ae6c..f2cfcd93af9 100644
--- a/mysql-test/suite/innodb_fts/r/innodb_fts_stopword_charset.result
+++ b/mysql-test/suite/innodb_fts/r/innodb_fts_stopword_charset.result
@@ -256,6 +256,7 @@ SELECT * FROM articles WHERE MATCH (title)
AGAINST ('love' IN NATURAL LANGUAGE MODE);
id title
# Shutdown and restart mysqld
+# restart
SET NAMES utf8;
INSERT IGNORE INTO articles (title) VALUES
('love'),('LOVE'),('lòve'),('LÒVE'),('löve'),('LÖVE'),('løve'),('LØVE'),
@@ -297,6 +298,7 @@ AGAINST ('love' IN NATURAL LANGUAGE MODE);
id title
DROP TABLE user_stopword;
# Shutdown and restart mysqld
+# restart
SET NAMES utf8;
INSERT IGNORE INTO articles (title) VALUES
('love'),('LOVE'),('lòve'),('LÒVE'),('löve'),('LÖVE'),('løve'),('LØVE'),
diff --git a/mysql-test/suite/innodb_fts/r/sync.result b/mysql-test/suite/innodb_fts/r/sync.result
index 82959cfea96..12d0971b828 100644
--- a/mysql-test/suite/innodb_fts/r/sync.result
+++ b/mysql-test/suite/innodb_fts/r/sync.result
@@ -98,6 +98,7 @@ INSERT INTO t1(title) VALUES('database');
SET debug_dbug = '+d,fts_instrument_sync_debug,fts_write_node_crash';
INSERT INTO t1(title) VALUES('mysql');
ERROR HY000: Lost connection to MySQL server during query
+# restart
After restart
SELECT * FROM t1 WHERE MATCH(title) AGAINST ('mysql database');
FTS_DOC_ID title
@@ -120,6 +121,7 @@ PRIMARY KEY(id)) ENGINE=InnoDB;
SET debug_dbug = '+d,fts_instrument_sync';
INSERT INTO t1(title) VALUES('mysql');
SET debug_dbug = @old_dbug;
+# restart
SET @old_global_dbug = @@GLOBAL.debug_dbug;
SET @old_dbug = @@SESSION.debug_dbug;
SET GLOBAL debug_dbug='+d,fts_instrument_sync,fts_instrument_sync_interrupted';
diff --git a/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test b/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test
index 2e7aa655aa1..18bce1a8105 100644
--- a/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test
+++ b/mysql-test/suite/innodb_fts/t/fulltext_table_evict.test
@@ -1,6 +1,7 @@
--source include/have_innodb.inc
--source include/have_debug.inc
--source include/big_test.inc
+let $restart_noprint=2;
--echo #
--echo # Bug Bug #27304661 MYSQL CRASH DOING SYNC INDEX ]
diff --git a/mysql-test/suite/innodb_gis/r/0.result b/mysql-test/suite/innodb_gis/r/0.result
index ffe423005d2..6dd2cd16437 100644
--- a/mysql-test/suite/innodb_gis/r/0.result
+++ b/mysql-test/suite/innodb_gis/r/0.result
@@ -560,29 +560,29 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
1
-INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
-INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
+INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(3 4)'));
+INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(3 4)'));
EXPLAIN
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref p p 28 const # Using where
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
DROP TABLE t1, t2;
End of 5.0 tests
#
diff --git a/mysql-test/suite/innodb_gis/r/1.result b/mysql-test/suite/innodb_gis/r/1.result
index 84e7bec53cd..8de9fd30894 100644
--- a/mysql-test/suite/innodb_gis/r/1.result
+++ b/mysql-test/suite/innodb_gis/r/1.result
@@ -943,29 +943,29 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
1
-INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
-INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
+INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(3 4)'));
+INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(3 4)'));
EXPLAIN
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref p p 28 const # Using where
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
DROP TABLE t1, t2;
End of 5.0 tests
#
diff --git a/mysql-test/suite/innodb_gis/r/alter_spatial_index.result b/mysql-test/suite/innodb_gis/r/alter_spatial_index.result
index 703af3c40de..cc24c6ff84f 100644
--- a/mysql-test/suite/innodb_gis/r/alter_spatial_index.result
+++ b/mysql-test/suite/innodb_gis/r/alter_spatial_index.result
@@ -464,6 +464,7 @@ ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'));
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
ALTER TABLE tab ADD SPATIAL INDEX idx2(c2 ASC);
affected rows: 0
@@ -521,7 +522,7 @@ HEX(c8)
ROLLBACK;
ALTER TABLE tab add COLUMN c9 POINT NOT NULL AFTER c5, ALGORITHM = INPLACE, LOCK=NONE;
-ERROR 0A000: LOCK=NONE is not supported. Reason: Do not support online operation on table with GIS index. Try LOCK=SHARED
+ALTER TABLE tab DROP COLUMN c9, ALGORITHM=INSTANT;
SHOW CREATE TABLE tab;
Table Create Table
tab CREATE TABLE `tab` (
@@ -586,6 +587,7 @@ tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry
tab 1 idx6 1 c4 A # 10 NULL BTREE
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))');
SET @g2 = ST_GeomFromText('LINESTRING(140 140,150 150,160 160)');
@@ -605,6 +607,7 @@ AND MBREquals(tab.c3,@g2) ORDER BY c1;
c1 ST_Astext(c2) ST_AsText(c3) ST_Astext(c4)
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText('POLYGON((4010 4010,4020 4020,4030 4030,4040 4030,4020 4010,4010 4010))');
SET @g2 = ST_GeomFromText('LINESTRING(1 1,2 2,3 3)');
@@ -644,6 +647,7 @@ tab 1 idx5 1 c5 A # 32 NULL SPATIAL testing spatial index on Geometry
tab 1 idx6 1 c4 A # 10 NULL BTREE
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText( 'POLYGON((0 0,0 30,30 40,40 50,50 30,0 0))');
SET @g2 = ST_GeomFromText('LINESTRING(1 1,2 2,3 3)');
@@ -678,6 +682,7 @@ ALTER TABLE tab ENGINE Myisam;
ALTER TABLE tab ENGINE InnoDB;
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText('POLYGON((1010 1010,1020 1020,1030 1030,1040 1030,1020 1010,1010 1010))');
SET @g2 = ST_GeomFromText('LINESTRING(400 400,500 500,600 700)');
diff --git a/mysql-test/suite/innodb_gis/r/create_spatial_index.result b/mysql-test/suite/innodb_gis/r/create_spatial_index.result
index 1af03c251bf..c69d67c411f 100644
--- a/mysql-test/suite/innodb_gis/r/create_spatial_index.result
+++ b/mysql-test/suite/innodb_gis/r/create_spatial_index.result
@@ -55,6 +55,7 @@ ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'));
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -483,6 +484,7 @@ ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'));
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
@@ -900,6 +902,7 @@ ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010
ST_GeomFromText('POLYGON((5010 5010,5020 5020,5030 5030,5040 5030,5020 5010,5010 5010))'));
ANALYZE TABLE tab;
Table Op Msg_type Msg_text
+test.tab analyze status Engine-independent statistics collected
test.tab analyze status OK
SET @g1 = ST_GeomFromText( 'POLYGON((7 1,6 2,6 3,10 3,10 1,7 1))');
EXPLAIN SELECT c1,ST_Astext(c4) FROM tab WHERE MBRContains(tab.c4, @g1) ORDER BY c1;
diff --git a/mysql-test/suite/innodb_gis/r/gis.result b/mysql-test/suite/innodb_gis/r/gis.result
index 65d1e940cd3..6929afd4228 100644
--- a/mysql-test/suite/innodb_gis/r/gis.result
+++ b/mysql-test/suite/innodb_gis/r/gis.result
@@ -939,29 +939,29 @@ id select_type table type possible_keys key key_len ref rows Extra
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
1
-INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(1 2)'));
-INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(1 2)'));
+INSERT INTO t1 VALUES (POINTFROMTEXT('POINT(3 4)'));
+INSERT INTO t2 VALUES (POINTFROMTEXT('POINT(3 4)'));
EXPLAIN
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t1 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ref p p 28 const # Using where
SELECT COUNT(*) FROM t2 WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
EXPLAIN
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 2 Using where
SELECT COUNT(*) FROM t2 IGNORE INDEX(p) WHERE p=POINTFROMTEXT('POINT(1 2)');
COUNT(*)
-2
+1
DROP TABLE t1, t2;
End of 5.0 tests
#
diff --git a/mysql-test/suite/innodb_gis/r/innodb_gis_rtree.result b/mysql-test/suite/innodb_gis/r/innodb_gis_rtree.result
index 5d4708dd111..2ef8757339e 100644
--- a/mysql-test/suite/innodb_gis/r/innodb_gis_rtree.result
+++ b/mysql-test/suite/innodb_gis/r/innodb_gis_rtree.result
@@ -744,6 +744,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
CREATE TABLE t1 (
diff --git a/mysql-test/suite/innodb_gis/r/precise.result b/mysql-test/suite/innodb_gis/r/precise.result
index 3c05b814602..6cc0368fbb4 100644
--- a/mysql-test/suite/innodb_gis/r/precise.result
+++ b/mysql-test/suite/innodb_gis/r/precise.result
@@ -216,7 +216,7 @@ st_u
MULTIPOLYGON(((525400 18370,525000.9677614468 183300,525400 183300,525400 18370)),((525000 183300,525000 183700,525000.9677614468 183300,525000 183300)),((525265.58 183481.95,525263.95 183484.75,525260.7 183491.55,525276.79 183500,525278.39 183500.84,525278.63 183500.97,525280.98 183502.26,525283.17 183503.47,525289.11 183506.62,525296.42 183510.31,525296.57 183510.39,525298.67 183511.53,525302.81 183513.8,525304.5 183510.83,525307.85 183504.95,525304.45 183504.25,525301.75 183509.35,525283.55 183500,525282.2 183499.3,525282.3 183499.1,525280.35 183498.2,525275.5 183495.7,525276.5 183493.45,525278.97 183488.73,525265.58 183481.95),(525266.99 183484.33,525263.26 183491.55,525266.15 183493.04,525269.88 183485.82,525266.99 183484.33),(525272.06 183488.37,525268.94 183494.51,525271.94 183496.03,525275.06 183489.89,525272.06 183488.37)))
SET @a=0x0000000001030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440000000000000000000000000000024400000000000000000000000000000000000000000000000000000F03F000000000000F03F0000000000000040000000000000F03F00000000000000400000000000000040000000000000F03F0000000000000040000000000000F03F000000000000F03F;
SELECT ST_ASTEXT(ST_TOUCHES(@a, ST_GEOMFROMTEXT('point(0 0)'))) t;
-ERROR HY000: Illegal parameter data type int for operation 'st_astext'
+ERROR HY000: Illegal parameter data type boolean for operation 'st_astext'
DROP TABLE IF EXISTS p1;
CREATE PROCEDURE p1(dist DOUBLE, geom TEXT)
BEGIN
diff --git a/mysql-test/suite/innodb_gis/r/rt_precise.result b/mysql-test/suite/innodb_gis/r/rt_precise.result
index 9e2c6adc2d9..c83b88b568c 100644
--- a/mysql-test/suite/innodb_gis/r/rt_precise.result
+++ b/mysql-test/suite/innodb_gis/r/rt_precise.result
@@ -18,6 +18,7 @@ count(*)
150
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT fid, ST_AsText(g) FROM t1 WHERE ST_Within(g, ST_GeomFromText('Polygon((140 140,160 140,160 160,140 140))'));
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/mysql-test/suite/innodb_gis/r/rtree.result b/mysql-test/suite/innodb_gis/r/rtree.result
index d6604314909..53719f12205 100644
--- a/mysql-test/suite/innodb_gis/r/rtree.result
+++ b/mysql-test/suite/innodb_gis/r/rtree.result
@@ -6,6 +6,7 @@ insert into t1 values (1, POINT(3.1,3.1));
insert into t1 values (1, POINT(5,5));
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
explain select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
@@ -150,6 +151,7 @@ insert into t1 values (4, "444", POINT(3.1,3.1));
insert into t1 values (5, "555", POINT(5,5));
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
set @g1 = ST_GeomFromText('Polygon((0 0,0 3,3 3,3 0,0 0))');
explain select ST_astext(t1.g) from t1 where MBRWithin(t1.g, @g1);
diff --git a/mysql-test/suite/innodb_gis/r/rtree_drop_index.result b/mysql-test/suite/innodb_gis/r/rtree_drop_index.result
index 8ac1a049b8e..e2e64786281 100644
--- a/mysql-test/suite/innodb_gis/r/rtree_drop_index.result
+++ b/mysql-test/suite/innodb_gis/r/rtree_drop_index.result
@@ -19,4 +19,5 @@ COMMIT;
disconnect purge_control;
connection default;
set global innodb_fast_shutdown = 0;
+# restart
drop table t1;
diff --git a/mysql-test/suite/innodb_gis/r/rtree_estimate.result b/mysql-test/suite/innodb_gis/r/rtree_estimate.result
index 251685df018..edb37778f54 100644
--- a/mysql-test/suite/innodb_gis/r/rtree_estimate.result
+++ b/mysql-test/suite/innodb_gis/r/rtree_estimate.result
@@ -20,6 +20,7 @@ POLYGON((5 5,20 5,20 21,5 21,5 5))
POLYGON((1.79769e308 1.79769e308,20 5,-1.79769e308 -1.79769e308,1.79769e308 1.79769e308))
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
EXPLAIN SELECT ST_AsText(g) FROM t1 WHERE MBRContains(g, @g1);
id select_type table type possible_keys key key_len ref rows Extra
@@ -29,7 +30,7 @@ ST_AsText(g)
POINT(10 10)
EXPLAIN SELECT ST_AsText(g) FROM t1 WHERE MBRDisjoint(g, @g1);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range g g 34 NULL 2 Using where
+1 SIMPLE t1 ALL g NULL NULL NULL 3 Using where
SELECT ST_AsText(g) FROM t1 WHERE MBRWithin(g, @g1);
ST_AsText(g)
POINT(10 10)
@@ -74,14 +75,14 @@ POINT(10 10)
POLYGON((5 5,20 5,20 21,5 21,5 5))
EXPLAIN SELECT ST_AsText(g) FROM t1 WHERE MBRIntersects(g, @g2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range g g 34 NULL 2 Using where
+1 SIMPLE t1 ALL g NULL NULL NULL 3 Using where
SELECT ST_AsText(g) FROM t1 WHERE MBRWithin(g, @g2);
ST_AsText(g)
POINT(10 10)
POLYGON((5 5,20 5,20 21,5 21,5 5))
EXPLAIN SELECT ST_AsText(g) FROM t1 WHERE MBRWithin(g, @g2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range g g 34 NULL 2 Using where
+1 SIMPLE t1 ALL g NULL NULL NULL 3 Using where
SELECT ST_AsText(g) FROM t1 WHERE MBRWithin(g, @g2);
ST_AsText(g)
POINT(10 10)
diff --git a/mysql-test/suite/innodb_gis/r/rtree_recovery.result b/mysql-test/suite/innodb_gis/r/rtree_recovery.result
index 4fee60caf85..6f5b02e7feb 100644
--- a/mysql-test/suite/innodb_gis/r/rtree_recovery.result
+++ b/mysql-test/suite/innodb_gis/r/rtree_recovery.result
@@ -1,3 +1,4 @@
+# restart
create table t1 (c1 int, c2 geometry not null, spatial index (c2))engine=innodb;
create procedure insert_t1(IN total int)
begin
@@ -17,6 +18,7 @@ end while;
end|
CALL insert_t1(367);
COMMIT;
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
@@ -27,6 +29,7 @@ CALL update_t1(367);
SET @poly1 = ST_GeomFromText('POLYGON((10000 10000, 10000 10350, 10350 10350, 10350 10000, 10000 10000))');
delete from t1 where ST_Contains(@poly1, c2);
COMMIT;
+# restart
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
diff --git a/mysql-test/suite/innodb_gis/r/types.result b/mysql-test/suite/innodb_gis/r/types.result
index dc46710c365..9eb40dc4231 100644
--- a/mysql-test/suite/innodb_gis/r/types.result
+++ b/mysql-test/suite/innodb_gis/r/types.result
@@ -51,6 +51,7 @@ FLUSH TABLES;
INSERT INTO t_wl6455 VALUES(11, POINT(11,11));
BEGIN;
INSERT INTO t_wl6455 VALUES(1, POINT(1,1));
+# restart
CHECK TABLE t_wl6455;
Table Op Msg_type Msg_text
test.t_wl6455 check status OK
diff --git a/mysql-test/suite/innodb_gis/t/alter_spatial_index.test b/mysql-test/suite/innodb_gis/t/alter_spatial_index.test
index 653e250017a..5843c6fc8f6 100644
--- a/mysql-test/suite/innodb_gis/t/alter_spatial_index.test
+++ b/mysql-test/suite/innodb_gis/t/alter_spatial_index.test
@@ -491,9 +491,8 @@ FROM tab LIMIT 1;
SELECT HEX(c8) FROM tab;
ROLLBACK;
-# not instant, not supported
---error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE tab add COLUMN c9 POINT NOT NULL AFTER c5, ALGORITHM = INPLACE, LOCK=NONE;
+ALTER TABLE tab DROP COLUMN c9, ALGORITHM=INSTANT;
SHOW CREATE TABLE tab;
diff --git a/mysql-test/suite/innodb_gis/t/kill_server.test b/mysql-test/suite/innodb_gis/t/kill_server.test
index 028bbbdd40d..d6657c74bcc 100644
--- a/mysql-test/suite/innodb_gis/t/kill_server.test
+++ b/mysql-test/suite/innodb_gis/t/kill_server.test
@@ -6,6 +6,7 @@
--source include/have_innodb.inc
--source include/big_test.inc
+let $restart_noprint=2;
# Create table with R-tree index.
create table t1 (c1 int, c2 geometry not null, spatial index (c2))engine=innodb;
diff --git a/mysql-test/suite/innodb_gis/t/rtree_compress2.test b/mysql-test/suite/innodb_gis/t/rtree_compress2.test
index c02a6ff0ed4..95be8b8f357 100644
--- a/mysql-test/suite/innodb_gis/t/rtree_compress2.test
+++ b/mysql-test/suite/innodb_gis/t/rtree_compress2.test
@@ -8,6 +8,7 @@
--source include/big_test.inc
# Valgrind takes too much time on PB2 even in the --big-test runs.
--source include/not_valgrind.inc
+let $restart_noprint=2;
SET GLOBAL innodb_file_per_table=1;
diff --git a/mysql-test/suite/innodb_gis/t/rtree_estimate.test b/mysql-test/suite/innodb_gis/t/rtree_estimate.test
index 4caa5feb5b1..7038799a26a 100644
--- a/mysql-test/suite/innodb_gis/t/rtree_estimate.test
+++ b/mysql-test/suite/innodb_gis/t/rtree_estimate.test
@@ -15,6 +15,7 @@ SET @g1 = ST_GeomFromText('POINT(10 10)');
SET @g2 = ST_GeomFromText('POLYGON((5 5, 20 5, 20 21, 5 21, 5 5))');
SET @g3 = ST_GeomFromText('POLYGON((1.79769e+308 1.79769e+308, 20 5, -1.79769e+308 -1.79769e+308, 1.79769e+308 1.79769e+308))');
+
# Test empty table
EXPLAIN SELECT ST_AsText(g) FROM t1 WHERE MBRContains(g, @g1);
SELECT ST_AsText(g) FROM t1 WHERE MBRWithin(g, @g1);
diff --git a/mysql-test/suite/innodb_gis/t/rtree_split.test b/mysql-test/suite/innodb_gis/t/rtree_split.test
index caf79becbd0..856fb485234 100644
--- a/mysql-test/suite/innodb_gis/t/rtree_split.test
+++ b/mysql-test/suite/innodb_gis/t/rtree_split.test
@@ -8,6 +8,7 @@
--source include/big_test.inc
--source include/not_valgrind.inc
--source include/have_debug.inc
+let $restart_noprint=2;
# Create table with R-tree index.
create table t1 (c1 int, c2 geometry not null, spatial index (c2))engine=innodb;
diff --git a/mysql-test/suite/innodb_zip/r/16k,full_crc32.rdiff b/mysql-test/suite/innodb_zip/r/16k,full_crc32.rdiff
new file mode 100644
index 00000000000..97daeacd1e8
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/r/16k,full_crc32.rdiff
@@ -0,0 +1,16 @@
+--- 16k.result
++++ 16k.reject
+@@ -41,10 +41,10 @@
+ test/t4 5 33 PRIMARY 3 3 1 50
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1.ibd
+-test/t2 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2.ibd
++test/t1 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1.ibd
++test/t2 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2.ibd
+ test/t3 Single DEFAULT 8192 Compressed MYSQLD_DATADIR/test/t3.ibd
+-test/t4 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4.ibd
++test/t4 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4.ibd
+ DROP TABLE t1, t2, t3, t4;
+ # Test 4) The maximum row size is dependent upon the page size.
+ # Redundant: 8123, Compact: 8126.
diff --git a/mysql-test/suite/innodb_zip/r/16k,strict_full_crc32.rdiff b/mysql-test/suite/innodb_zip/r/16k,strict_full_crc32.rdiff
new file mode 100644
index 00000000000..97daeacd1e8
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/r/16k,strict_full_crc32.rdiff
@@ -0,0 +1,16 @@
+--- 16k.result
++++ 16k.reject
+@@ -41,10 +41,10 @@
+ test/t4 5 33 PRIMARY 3 3 1 50
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1.ibd
+-test/t2 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2.ibd
++test/t1 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1.ibd
++test/t2 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2.ibd
+ test/t3 Single DEFAULT 8192 Compressed MYSQLD_DATADIR/test/t3.ibd
+-test/t4 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4.ibd
++test/t4 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4.ibd
+ DROP TABLE t1, t2, t3, t4;
+ # Test 4) The maximum row size is dependent upon the page size.
+ # Redundant: 8123, Compact: 8126.
diff --git a/mysql-test/suite/innodb_zip/r/16k.result b/mysql-test/suite/innodb_zip/r/16k.result
index e7def759e87..49ad5609705 100644
--- a/mysql-test/suite/innodb_zip/r/16k.result
+++ b/mysql-test/suite/innodb_zip/r/16k.result
@@ -379,7 +379,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
EXPLAIN SELECT * FROM t1 WHERE b LIKE 'adfd%';
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 769 NULL 12 Using where
+1 SIMPLE t1 range b b 769 NULL # Using where
DROP TABLE t1;
# Test 8) Test creating a table that could lead to undo log overflow.
CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob,
@@ -686,8 +686,8 @@ SELECT COUNT(*) FROM
(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY <derived2> ALL NULL NULL NULL NULL 1537
-2 DERIVED t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 1537 Using sort_union(idx,PRIMARY); Using where
+1 PRIMARY <derived2> ALL NULL NULL NULL NULL #
+2 DERIVED t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL # Using sort_union(idx,PRIMARY); Using where
SELECT COUNT(*) FROM
(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
diff --git a/mysql-test/suite/innodb_zip/r/8k,full_crc32.rdiff b/mysql-test/suite/innodb_zip/r/8k,full_crc32.rdiff
new file mode 100644
index 00000000000..f0f414b3bc3
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/r/8k,full_crc32.rdiff
@@ -0,0 +1,16 @@
+--- 8k.result
++++ 8k.result
+@@ -45,10 +45,10 @@
+ test/t4 5 33 PRIMARY 3 3 1 50
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1.ibd
+-test/t2 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2.ibd
++test/t1 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1.ibd
++test/t2 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2.ibd
+ test/t3 Single DEFAULT 4096 Compressed MYSQLD_DATADIR/test/t3.ibd
+-test/t4 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4.ibd
++test/t4 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4.ibd
+ DROP TABLE t1, t2, t3, t4;
+ # Test 4) The maximum row size is dependent upon the page size.
+ # Redundant: 4027, Compact: 4030.
diff --git a/mysql-test/suite/innodb_zip/r/8k,strict_full_crc32.rdiff b/mysql-test/suite/innodb_zip/r/8k,strict_full_crc32.rdiff
new file mode 100644
index 00000000000..f0f414b3bc3
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/r/8k,strict_full_crc32.rdiff
@@ -0,0 +1,16 @@
+--- 8k.result
++++ 8k.result
+@@ -45,10 +45,10 @@
+ test/t4 5 33 PRIMARY 3 3 1 50
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1.ibd
+-test/t2 Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2.ibd
++test/t1 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1.ibd
++test/t2 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2.ibd
+ test/t3 Single DEFAULT 4096 Compressed MYSQLD_DATADIR/test/t3.ibd
+-test/t4 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4.ibd
++test/t4 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4.ibd
+ DROP TABLE t1, t2, t3, t4;
+ # Test 4) The maximum row size is dependent upon the page size.
+ # Redundant: 4027, Compact: 4030.
diff --git a/mysql-test/suite/innodb_zip/r/bug56680.result b/mysql-test/suite/innodb_zip/r/bug56680.result
index 02ec24c98ae..1a1a5b40707 100644
--- a/mysql-test/suite/innodb_zip/r/bug56680.result
+++ b/mysql-test/suite/innodb_zip/r/bug56680.result
@@ -116,6 +116,7 @@ DF
CHECK TABLE bug56680_2;
Table Op Msg_type Msg_text
test.bug56680_2 check status OK
+# restart
CHECK TABLE bug56680_2;
Table Op Msg_type Msg_text
test.bug56680_2 check status OK
diff --git a/mysql-test/suite/innodb_zip/r/cmp_per_index.result b/mysql-test/suite/innodb_zip/r/cmp_per_index.result
index 7b27fa722b9..f73aad7e02a 100644
--- a/mysql-test/suite/innodb_zip/r/cmp_per_index.result
+++ b/mysql-test/suite/innodb_zip/r/cmp_per_index.result
@@ -72,6 +72,7 @@ index_name PRIMARY
compress_ops 65
compress_ops_ok 65
uncompress_ops 0
+# restart
SHOW CREATE TABLE t;
Table t
Create Table CREATE TABLE `t` (
diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix.result b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
index 58d35cf2603..4866c152640 100644
--- a/mysql-test/suite/innodb_zip/r/index_large_prefix.result
+++ b/mysql-test/suite/innodb_zip/r/index_large_prefix.result
@@ -337,7 +337,7 @@ create index idx4 on worklog5743(a1, a2);
ERROR 42000: Specified key was too long; max key length is 3072 bytes
show warnings;
Level Code Message
-Error 1071 Specified key was too long; max key length is 3072 bytes
+Warning 1071 Specified key was too long; max key length is 3072 bytes
Error 1071 Specified key was too long; max key length is 3072 bytes
create index idx5 on worklog5743(a1, a5);
ERROR 42000: Specified key was too long; max key length is 3072 bytes
diff --git a/mysql-test/suite/innodb_zip/r/innochecksum.result b/mysql-test/suite/innodb_zip/r/innochecksum.result
index 31d9450df80..e1d3a187186 100644
--- a/mysql-test/suite/innodb_zip/r/innochecksum.result
+++ b/mysql-test/suite/innodb_zip/r/innochecksum.result
@@ -47,6 +47,7 @@ FOUND 1 /Error while setting value \'no\' to \'strict-check\'/ in my_restart.err
[14b]: when server default checksum=crc32 rewrite new checksum=innodb with innochecksum
# Also check the long form of write option.
# start the server with innodb_checksum_algorithm=InnoDB
+# restart: --innodb_checksum_algorithm=innodb
INSERT INTO tab1 VALUES(2, 'Innochecksum CRC32');
SELECT c1,c2 FROM tab1 order by c1,c2;
c1 c2
@@ -56,6 +57,7 @@ c1 c2
[15]: when server default checksum=crc32 rewrite new checksum=none with innochecksum
# Also check the short form of write option.
# Start the server with checksum algorithm=none
+# restart: --innodb_checksum_algorithm=none
INSERT INTO tab1 VALUES(3, 'Innochecksum None');
SELECT c1,c2 FROM tab1 order by c1,c2;
c1 c2
@@ -66,6 +68,7 @@ DROP TABLE t1;
# Stop the server
[16]: rewrite into new checksum=crc32 with innochecksum
# Restart the DB server with innodb_checksum_algorithm=crc32
+# restart: --innodb_checksum_algorithm=crc32
SELECT * FROM tab1;
c1 c2
1 Innochecksum InnoDB1
@@ -79,6 +82,7 @@ c1 c2
# Stop server
[17]: rewrite into new checksum=InnoDB
# Restart the DB server with innodb_checksum_algorithm=InnoDB
+# restart: --innodb_checksum_algorithm=innodb
DELETE FROM tab1 where c1=2;
SELECT * FROM tab1;
c1 c2
@@ -88,4 +92,5 @@ c1 c2
FOUND 1 /Error while setting value \'strict_crc32\' to \'write\'/ in my_restart.err
FOUND 1 /Error while setting value \'strict_innodb\' to \'write\'/ in my_restart.err
FOUND 1 /Error while setting value \'crc23\' to \'write\'/ in my_restart.err
+# restart
DROP TABLE tab1;
diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_3.result b/mysql-test/suite/innodb_zip/r/innochecksum_3.result
index aaab68b3df9..946c86b2a09 100644
--- a/mysql-test/suite/innodb_zip/r/innochecksum_3.result
+++ b/mysql-test/suite/innodb_zip/r/innochecksum_3.result
@@ -39,6 +39,7 @@ SELECT * FROM tab2 ORDER BY col_7;
# stop the server
[1(a)]: Rewrite into new checksum=InnoDB for all *.ibd file and ibdata1
: start the server with innodb_checksum_algorithm=strict_innodb
+# restart: --innodb_checksum_algorithm=strict_innodb
INSERT INTO tab1 (pk, linestring_key, linestring_nokey)
VALUES (2, ST_GeomFromText('LINESTRING(10 10,20 20,30 30)'), ST_GeomFromText('LINESTRING(10 10,20 20,30 30)'));
SET @col_1 = repeat('a', 5);
@@ -55,6 +56,7 @@ SELECT * FROM tab2 ORDER BY col_7;
# stop the server
[1(b)]: Rewrite into new checksum=crc32 for all *.ibd file and ibdata1
# start the server with innodb_checksum_algorithm=strict_crc32
+# restart: --innodb_checksum_algorithm=strict_crc32
INSERT INTO tab1 (pk, linestring_key, linestring_nokey)
VALUES (3, ST_GeomFromText('POLYGON((0 0,5 5,10 10,15 15,0 0),(10 10,20 20,30 30,40 40,10 10))'),
ST_GeomFromText('POLYGON((0 0,5 5,10 10,15 15,0 0),(10 10,20 20,30 30,40 40,10 10))'));
@@ -71,6 +73,7 @@ FROM tab1 ORDER BY pk;
SELECT * FROM tab2 ORDER BY col_7;
# stop the server
[1(c)]: Rewrite into new checksum=none for all *.ibd file and ibdata1
+# restart: --innodb_checksum_algorithm=strict_none
INSERT INTO tab1 (pk, linestring_key, linestring_nokey)
VALUES (4, ST_GeomFromText('MULTIPOINT(0 0,5 5,10 10,20 20) '), ST_GeomFromText('MULTIPOINT(0 0,5 5,10 10,20 20) '));
SET @col_1 = repeat('m', 5);
@@ -224,4 +227,5 @@ NOT FOUND /Incorrect unsigned integer value: '18446744073709551616'/ in my_resta
NOT FOUND /Incorrect unsigned integer value: '18446744073709551616'/ in my_restart.err
NOT FOUND /Incorrect unsigned integer value: '18446744073709551616'/ in my_restart.err
NOT FOUND /Incorrect unsigned integer value: '18446744073709551616'/ in my_restart.err
+# restart
DROP TABLE tab1,tab2;
diff --git a/mysql-test/suite/innodb_zip/r/innodb-zip.result b/mysql-test/suite/innodb_zip/r/innodb-zip.result
index 0da1d6bbe85..e0a454b75a4 100644
--- a/mysql-test/suite/innodb_zip/r/innodb-zip.result
+++ b/mysql-test/suite/innodb_zip/r/innodb-zip.result
@@ -121,9 +121,13 @@ disconnect a;
disconnect b;
analyze table t1;
Table Op Msg_type Msg_text
+mysqltest_innodb_zip.t1 analyze status Engine-independent statistics collected
+mysqltest_innodb_zip.t1 analyze Warning Engine-independent statistics are not collected for column 'b'
mysqltest_innodb_zip.t1 analyze status OK
analyze table t2;
Table Op Msg_type Msg_text
+mysqltest_innodb_zip.t2 analyze status Engine-independent statistics collected
+mysqltest_innodb_zip.t2 analyze Warning Engine-independent statistics are not collected for column 'b'
mysqltest_innodb_zip.t2 analyze status OK
SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql' order by table_name;
table_schema table_name row_format data_length index_length
diff --git a/mysql-test/suite/innodb_zip/r/recover.result b/mysql-test/suite/innodb_zip/r/recover.result
index 97051efb645..b7452655268 100644
--- a/mysql-test/suite/innodb_zip/r/recover.result
+++ b/mysql-test/suite/innodb_zip/r/recover.result
@@ -11,6 +11,7 @@ insert into a select null, uuid() from a a, a b, a c;
insert into a select null, uuid() from a a, a b, a c;
SET GLOBAL innodb_flush_log_at_trx_commit=1;
COMMIT;
+# restart
SELECT COUNT(*) from a;
COUNT(*)
1010
diff --git a/mysql-test/suite/innodb_zip/r/restart,full_crc32.rdiff b/mysql-test/suite/innodb_zip/r/restart,full_crc32.rdiff
new file mode 100644
index 00000000000..c2260f68d42
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/r/restart,full_crc32.rdiff
@@ -0,0 +1,188 @@
+--- restart.result
++++ restart.reject
+@@ -211,18 +211,18 @@
+ test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0 Single
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1_restart.ibd
+-test/t2_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2_restart.ibd
++test/t1_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1_restart.ibd
++test/t2_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2_restart.ibd
+ test/t3_restart Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3_restart.ibd
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ #
+ # Shutdown the server and list the tablespace OS files
+ #
+@@ -395,18 +395,18 @@
+ test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0 Single
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1_restart.ibd
+-test/t2_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2_restart.ibd
++test/t1_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1_restart.ibd
++test/t2_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2_restart.ibd
+ test/t3_restart Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3_restart.ibd
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ DROP TABLE t1_restart;
+ DROP TABLE t2_restart;
+ DROP TABLE t3_restart;
+@@ -418,15 +418,15 @@
+ ALTER TABLE t7_restart TRUNCATE PARTITION p1;
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ INSERT INTO t5_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot');
+ INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart);
+ INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart);
+@@ -522,15 +522,15 @@
+ innodb_file_per_table ON
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ SELECT count(*) FROM t5_restart;
+ count(*)
+ 8
+@@ -623,15 +623,15 @@
+ RENAME TABLE t7_restart TO t77_restart;
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart);
+ SELECT count(*) FROM t55_restart;
+ count(*)
+@@ -720,15 +720,15 @@
+ innodb_file_per_table ON
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart);
+ SELECT count(*) FROM t55_restart;
+ count(*)
+@@ -853,15 +853,15 @@
+ #
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart);
+ SELECT count(*) FROM t4_restart;
+ count(*)
+@@ -990,15 +990,15 @@
+ #
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQLD_DATADIR/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQLD_DATADIR/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQLD_DATADIR/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart);
+ SELECT count(*) FROM t4_restart;
+ count(*)
diff --git a/mysql-test/suite/innodb_zip/r/restart,strict_full_crc32.rdiff b/mysql-test/suite/innodb_zip/r/restart,strict_full_crc32.rdiff
new file mode 100644
index 00000000000..c2260f68d42
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/r/restart,strict_full_crc32.rdiff
@@ -0,0 +1,188 @@
+--- restart.result
++++ restart.reject
+@@ -211,18 +211,18 @@
+ test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0 Single
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1_restart.ibd
+-test/t2_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2_restart.ibd
++test/t1_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1_restart.ibd
++test/t2_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2_restart.ibd
+ test/t3_restart Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3_restart.ibd
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ #
+ # Shutdown the server and list the tablespace OS files
+ #
+@@ -395,18 +395,18 @@
+ test/t7_restart#p#p1#sp#s3 test/t7_restart#p#p1#sp#s3 97 8 Dynamic 0 Single
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t1_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t1_restart.ibd
+-test/t2_restart Single DEFAULT DEFAULT Compact or Redundant MYSQLD_DATADIR/test/t2_restart.ibd
++test/t1_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t1_restart.ibd
++test/t2_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t2_restart.ibd
+ test/t3_restart Single DEFAULT 2048 Compressed MYSQLD_DATADIR/test/t3_restart.ibd
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ DROP TABLE t1_restart;
+ DROP TABLE t2_restart;
+ DROP TABLE t3_restart;
+@@ -418,15 +418,15 @@
+ ALTER TABLE t7_restart TRUNCATE PARTITION p1;
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ INSERT INTO t5_restart VALUES (1000000000, 'MySQL', 'InnoDB', '2011-11-11', 'Read this after reboot');
+ INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart);
+ INSERT INTO t5_restart (SELECT 0, c2, c3, c4, c5 FROM t5_restart);
+@@ -522,15 +522,15 @@
+ innodb_file_per_table ON
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t6_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p0.ibd
+ test/t6_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p1.ibd
+-test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
+-test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
+-test/t5_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
++test/t7_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s0.ibd
++test/t7_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p0#sp#s1.ibd
++test/t5_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t5_restart.ibd
+ test/t6_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t6_restart#p#p2.ibd
+-test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
+-test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
++test/t7_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s2.ibd
++test/t7_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t7_restart#p#p1#sp#s3.ibd
+ SELECT count(*) FROM t5_restart;
+ count(*)
+ 8
+@@ -623,15 +623,15 @@
+ RENAME TABLE t7_restart TO t77_restart;
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart);
+ SELECT count(*) FROM t55_restart;
+ count(*)
+@@ -720,15 +720,15 @@
+ innodb_file_per_table ON
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/alt_dir/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/alt_dir/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t55_restart (SELECT 0, c2, c3, c4, c5 FROM t55_restart);
+ SELECT count(*) FROM t55_restart;
+ count(*)
+@@ -853,15 +853,15 @@
+ #
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQL_TMP_DIR/new_dir/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQL_TMP_DIR/new_dir/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart);
+ SELECT count(*) FROM t4_restart;
+ count(*)
+@@ -990,15 +990,15 @@
+ #
+ === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
+ Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
+-test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
++test/t4_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t4_restart.ibd
+ test/t66_restart#p#p0 Single DEFAULT DEFAULT Compressed MYSQLD_DATADIR/test/t66_restart#p#p0.ibd
+ test/t66_restart#p#p1 Single DEFAULT DEFAULT Compressed MYSQLD_DATADIR/test/t66_restart#p#p1.ibd
+-test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s0.ibd
+-test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s1.ibd
+-test/t55_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t55_restart.ibd
++test/t77_restart#p#p0#sp#s0 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s0.ibd
++test/t77_restart#p#p0#sp#s1 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p0#sp#s1.ibd
++test/t55_restart Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t55_restart.ibd
+ test/t66_restart#p#p2 Single DEFAULT DEFAULT Compressed MYSQLD_DATADIR/test/t66_restart#p#p2.ibd
+-test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s2.ibd
+-test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s3.ibd
++test/t77_restart#p#p1#sp#s2 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s2.ibd
++test/t77_restart#p#p1#sp#s3 Single DEFAULT DEFAULT NULL MYSQLD_DATADIR/test/t77_restart#p#p1#sp#s3.ibd
+ INSERT INTO t4_restart (SELECT 0, c2, c3, c4, c5 FROM t4_restart);
+ SELECT count(*) FROM t4_restart;
+ count(*)
diff --git a/mysql-test/suite/innodb_zip/r/restart.result b/mysql-test/suite/innodb_zip/r/restart.result
index 262c34df5fb..cba587e0c58 100644
--- a/mysql-test/suite/innodb_zip/r/restart.result
+++ b/mysql-test/suite/innodb_zip/r/restart.result
@@ -263,6 +263,7 @@ t7_restart#p#p1#sp#s3.ibd
#
# Start the server and show that tables are still visible and accessible.
#
+# restart
SHOW VARIABLES LIKE 'innodb_file_per_table';
Variable_name Value
innodb_file_per_table ON
@@ -517,6 +518,7 @@ t7_restart#p#p1#sp#s3.ibd
#
# Start the server and show the tablespaces.
#
+# restart
SHOW VARIABLES LIKE 'innodb_file_per_table';
Variable_name Value
innodb_file_per_table ON
@@ -715,6 +717,7 @@ t77_restart#p#p1#sp#s3.ibd
#
# Restart the server
#
+# restart
SHOW VARIABLES LIKE 'innodb_file_per_table';
Variable_name Value
innodb_file_per_table ON
@@ -851,6 +854,7 @@ t77_restart#p#p1#sp#s3.ibd
#
# Start the server and check tablespaces.
#
+# restart
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQL_TMP_DIR/new_dir/test/t4_restart.ibd
@@ -988,6 +992,7 @@ t77_restart.par
#
# Start the server and check tablespaces.
#
+# restart
=== information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
Space_Name Space_Type Page_Size Zip_Size Formats_Permitted Path
test/t4_restart Single DEFAULT DEFAULT Dynamic MYSQLD_DATADIR/test/t4_restart.ibd
diff --git a/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result b/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result
index 394a2ea1f09..b3a4ad2b0ba 100644
--- a/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result
+++ b/mysql-test/suite/innodb_zip/r/wl5522_debug_zip.result
@@ -107,12 +107,6 @@ ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
ERROR HY000: Index for table 't1' is corrupt; try to repair it
SET SESSION debug_dbug=@saved_debug_dbug;
restore: t1 .ibd and .cfg files
-SET SESSION debug_dbug="+d,ib_import_set_max_rowid_failure";
-ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-ERROR HY000: Index for table 't1' is corrupt; try to repair it
-SET SESSION debug_dbug=@saved_debug_dbug;
-unlink: t1.ibd
-unlink: t1.cfg
DROP TABLE test_wl5522.t1;
CREATE TABLE test_wl5522.t1 (
c1 BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY,
diff --git a/mysql-test/suite/innodb_zip/r/wl5522_zip.result b/mysql-test/suite/innodb_zip/r/wl5522_zip.result
index f57e2191d9f..03bfd2cac7a 100644
--- a/mysql-test/suite/innodb_zip/r/wl5522_zip.result
+++ b/mysql-test/suite/innodb_zip/r/wl5522_zip.result
@@ -33,6 +33,7 @@ db.opt
t1.frm
t1.ibd
# Restarting server
+# restart
# Done restarting server
FLUSH TABLE t1 FOR EXPORT;
# List before copying files
@@ -56,6 +57,7 @@ a b c
1462 Devotion asdfuihknaskdf
1461 Cavalry ..asdasdfaeraf
# Restarting server
+# restart
# Done restarting server
# List before t1 DISCARD
db.opt
diff --git a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result
index 509ffe91de5..3b98527250b 100644
--- a/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result
+++ b/mysql-test/suite/innodb_zip/r/wl6347_comp_indx_stat.result
@@ -320,6 +320,7 @@ AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 5242880
+# restart
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
@@ -665,6 +666,7 @@ AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 2097152
+# restart
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
@@ -1962,6 +1964,7 @@ AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 65536
+# restart
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
@@ -2309,6 +2312,7 @@ AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 65536
+# restart
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
@@ -5109,6 +5113,7 @@ AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 65536
+# restart
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
@@ -6729,6 +6734,7 @@ AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 65536
+# restart
# set the flag on (default off)
SET GLOBAL innodb_cmp_per_index_enabled=ON;
# set the flags
diff --git a/mysql-test/suite/innodb_zip/t/16k.test b/mysql-test/suite/innodb_zip/t/16k.test
index 6a829e6bbb6..c3da0de3acc 100644
--- a/mysql-test/suite/innodb_zip/t/16k.test
+++ b/mysql-test/suite/innodb_zip/t/16k.test
@@ -2,6 +2,8 @@
--source include/big_test.inc
--source include/have_innodb.inc
--source include/have_innodb_16k.inc
+--source include/innodb_checksum_algorithm.inc
+
SET default_storage_engine=InnoDB;
--disable_query_log
@@ -339,6 +341,7 @@ SELECT a,
LENGTH(b), b=LEFT(REPEAT(d,100*a), 65535),LENGTH(c), c=REPEAT(d,20*a), d FROM t1;
SHOW CREATE TABLE t1;
CHECK TABLE t1;
+--replace_column 9 #
EXPLAIN SELECT * FROM t1 WHERE b LIKE 'adfd%';
# The following tests are disabled because of the introduced timeouts for
@@ -689,6 +692,7 @@ set @optimizer_switch_saved=@@optimizer_switch;
SET SESSION optimizer_switch='derived_merge=off';
SET SESSION sort_buffer_size = 1024*36;
+--replace_column 9 #
EXPLAIN
SELECT COUNT(*) FROM
(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
diff --git a/mysql-test/suite/innodb_zip/t/8k.test b/mysql-test/suite/innodb_zip/t/8k.test
index e10d48cb284..711eb594d4a 100644
--- a/mysql-test/suite/innodb_zip/t/8k.test
+++ b/mysql-test/suite/innodb_zip/t/8k.test
@@ -2,6 +2,8 @@
--source include/have_innodb.inc
--source include/have_innodb_8k.inc
+--source include/innodb_checksum_algorithm.inc
+
SET default_storage_engine=InnoDB;
--disable_query_log
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum.combinations b/mysql-test/suite/innodb_zip/t/innochecksum.combinations
new file mode 100644
index 00000000000..79e5f7836ed
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/t/innochecksum.combinations
@@ -0,0 +1,5 @@
+[crc32]
+--innodb-checksum-algorithm=crc32
+
+[full_crc32]
+--innodb-checksum-algorithm=full_crc32
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum.test b/mysql-test/suite/innodb_zip/t/innochecksum.test
index fec8acf52c4..ea5654056d9 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum.test
@@ -13,6 +13,8 @@
let MYSQLD_BASEDIR= `SELECT @@basedir`;
let MYSQLD_DATADIR= `SELECT @@datadir`;
let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err;
+let $checksum_algorithm = `SELECT @@innodb_checksum_algorithm`;
+
call mtr.add_suppression("InnoDB: Unable to read tablespace .* page no .* into the buffer pool after 100 attempts");
call mtr.add_suppression("InnoDB: Warning: database page corruption or a failed");
@@ -79,22 +81,34 @@ let SEARCH_PATTERN= Error: --strict-check option cannot be used together with --
--echo [9]: check the innochecksum with full form --strict-check=innodb
# Server Default checksum = crc32
---error 1
+let $error_code = 0;
+
+if ($checksum_algorithm == "crc32")
+{
+ let $error_code = 1;
+}
+
+if ($checksum_algorithm == "strict_crc32")
+{
+ let $error_code = 1;
+}
+
+--error $error_code
--exec $INNOCHECKSUM --strict-check=innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE
--echo [10]: check the innochecksum with full form --strict-check=none
--echo # when server Default checksum=crc32
---error 1
+--error $error_code
--exec $INNOCHECKSUM --strict-check=none $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE
--echo [11]: check the innochecksum with short form -C innodb
--echo # when server Default checksum=crc32
---error 1
+--error $error_code
--exec $INNOCHECKSUM -C innodb $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE
--echo [12]: check the innochecksum with short form -C none
--echo # when server Default checksum=crc32
---error 1
+--error $error_code
--exec $INNOCHECKSUM -C none $MYSQLD_DATADIR/test/tab1.ibd 2> $SEARCH_FILE
--echo [13]: check strict-check with invalid values
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_2.test b/mysql-test/suite/innodb_zip/t/innochecksum_2.test
index 330bb81ba75..1efe653cd1a 100644
--- a/mysql-test/suite/innodb_zip/t/innochecksum_2.test
+++ b/mysql-test/suite/innodb_zip/t/innochecksum_2.test
@@ -18,6 +18,7 @@ call mtr.add_suppression("\\[Warning\\] InnoDB: Difficult to find free blocks in
let MYSQLD_BASEDIR= `SELECT @@basedir`;
let MYSQLD_DATADIR= `SELECT @@datadir`;
let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err;
+let $restart_noprint=2;
SET GLOBAL innodb_compression_level=0;
SELECT @@innodb_compression_level;
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_3.combinations b/mysql-test/suite/innodb_zip/t/innochecksum_3.combinations
new file mode 100644
index 00000000000..79e5f7836ed
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/t/innochecksum_3.combinations
@@ -0,0 +1,5 @@
+[crc32]
+--innodb-checksum-algorithm=crc32
+
+[full_crc32]
+--innodb-checksum-algorithm=full_crc32
diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_3.opt b/mysql-test/suite/innodb_zip/t/innochecksum_3.opt
new file mode 100644
index 00000000000..287c7ed5280
--- /dev/null
+++ b/mysql-test/suite/innodb_zip/t/innochecksum_3.opt
@@ -0,0 +1 @@
+--innodb_checksum_algorithm=crc32
diff --git a/mysql-test/suite/innodb_zip/t/restart.test b/mysql-test/suite/innodb_zip/t/restart.test
index 05ac8274278..e0e42a682ab 100644
--- a/mysql-test/suite/innodb_zip/t/restart.test
+++ b/mysql-test/suite/innodb_zip/t/restart.test
@@ -4,6 +4,8 @@
--source include/innodb_page_size_small.inc
--source include/have_partition.inc
--source include/not_embedded.inc
+--source include/innodb_checksum_algorithm.inc
+
SET default_storage_engine=InnoDB;
LET $MYSQLD_DATADIR = `select @@datadir`;
LET $INNODB_PAGE_SIZE = `select @@innodb_page_size`;
diff --git a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
index 986c0508891..c81eca69db8 100644
--- a/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
+++ b/mysql-test/suite/innodb_zip/t/wl5522_debug_zip.test
@@ -282,20 +282,6 @@ do "$ENV{MTR_SUITE_DIR}/../innodb/include/innodb-util.pl";
ib_restore_tablespaces("test_wl5522", "t1");
EOF
-# Test failure after importing the cluster index
-SET SESSION debug_dbug="+d,ib_import_set_max_rowid_failure";
-
---error ER_NOT_KEYFILE
-ALTER TABLE test_wl5522.t1 IMPORT TABLESPACE;
-
-SET SESSION debug_dbug=@saved_debug_dbug;
-
-# Left over from the failed IMPORT
-perl;
-do "$ENV{MTR_SUITE_DIR}/../innodb/include/innodb-util.pl";
-ib_unlink_tablespace("test_wl5522", "t1");
-EOF
-
DROP TABLE test_wl5522.t1;
--disable_query_log
diff --git a/mysql-test/suite/large_tests/r/rpl_slave_net_timeout.result b/mysql-test/suite/large_tests/r/rpl_slave_net_timeout.result
index ad704c0c683..5f3ad54662b 100644
--- a/mysql-test/suite/large_tests/r/rpl_slave_net_timeout.result
+++ b/mysql-test/suite/large_tests/r/rpl_slave_net_timeout.result
@@ -17,7 +17,11 @@ include/start_slave.inc
include/stop_slave.inc
connection master;
select event_time from (select event_time from mysql.general_log as t_1 where command_type like 'Connect' order by event_time desc limit 2) as t_2 order by event_time desc limit 1 into @ts_last;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select event_time from (select event_time from mysql.general_log as t_1 where command_type like 'Connect' order by event_time desc limit 2) as t_2 order by event_time asc limit 1 into @ts_prev;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
include/assert.inc [time between last reconnection and the reconnection before that should be >= slave_net_timeout]
connection master;
set @@global.general_log = @save_general_log;
diff --git a/mysql-test/suite/maria/disabled.def b/mysql-test/suite/maria/disabled.def
new file mode 100644
index 00000000000..fd72451f61b
--- /dev/null
+++ b/mysql-test/suite/maria/disabled.def
@@ -0,0 +1,13 @@
+##############################################################################
+#
+# List the test cases that are to be disabled temporarily.
+#
+# Separate the test case name and the comment with ':'.
+#
+# <testcasename> : BUG#<xxxx> <date disabled> <disabler> <comment>
+#
+# Do not use any TAB characters for whitespace.
+#
+##############################################################################
+
+small_blocksize: Can't be used when Aria is used for privilege tables
diff --git a/mysql-test/suite/maria/icp.result b/mysql-test/suite/maria/icp.result
index 8fc93e861a7..14517fee47d 100644
--- a/mysql-test/suite/maria/icp.result
+++ b/mysql-test/suite/maria/icp.result
@@ -409,7 +409,7 @@ WHERE (pk BETWEEN 4 AND 5 OR pk < 2) AND c1 < 240
ORDER BY c1
LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY,k1 k1 5 NULL 4 Using where
+1 SIMPLE t1 range PRIMARY,k1 PRIMARY 4 NULL 3 Using index condition; Using where; Rowid-ordered scan; Using filesort
DROP TABLE t1;
#
#
@@ -590,6 +590,12 @@ i1 INTEGER NOT NULL,
PRIMARY KEY (pk)
);
INSERT INTO t2 VALUES (4,1);
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status OK
EXPLAIN
SELECT t1.d1, t2.pk, t2.i1 FROM t1 STRAIGHT_JOIN t2 ON t2.i1
WHERE t2.pk <> t1.d1 AND t2.pk = 4;
@@ -795,6 +801,12 @@ INSERT INTO t2 (g,h) VALUES
(0,'p'),(0,'f'),(0,'p'),(7,'d'),(7,'f'),(5,'j'),
(3,'e'),(1,'u'),(4,'v'),(9,'u'),(6,'i'),(1,'x'),
(7,'f'),(5,'j'),(3,'e'),(1,'u'),(4,'v'),(9,'u');
+ANALYZE TABLE t1,t2;
+Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
+test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze status Table is already up to date
SET @save_optimize_switch=@@optimizer_switch;
SET optimizer_switch='materialization=on';
EXPLAIN
@@ -804,7 +816,7 @@ AND (EXISTS (SELECT * FROM t1, t2 WHERE a = f AND h <= t.e AND a > t.b)
OR a = 0 AND h < 'z' );
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t ALL PRIMARY,c NULL NULL NULL 64 Using where
-1 PRIMARY t2 ref g g 5 test.t.c 19 Using where
+1 PRIMARY t2 ref g g 5 test.t.c 18 Using where
2 DEPENDENT SUBQUERY t1 index PRIMARY PRIMARY 4 NULL 64 Using where; Using index
2 DEPENDENT SUBQUERY t2 eq_ref PRIMARY PRIMARY 4 test.t1.a 1 Using where
SELECT COUNT(*) FROM t1 AS t, t2
diff --git a/mysql-test/suite/maria/maria-autozerofill.result b/mysql-test/suite/maria/maria-autozerofill.result
index 76ccc67ebb5..064ac9e6496 100644
--- a/mysql-test/suite/maria/maria-autozerofill.result
+++ b/mysql-test/suite/maria/maria-autozerofill.result
@@ -56,6 +56,7 @@ mysqltest.t3 optimize Note Zerofilling moved table ./mysqltest/t3
mysqltest.t3 optimize status OK
analyze table t4;
Table Op Msg_type Msg_text
+mysqltest.t4 analyze status Engine-independent statistics collected
mysqltest.t4 analyze Note Zerofilling moved table ./mysqltest/t4
mysqltest.t4 analyze status OK
repair table t5;
diff --git a/mysql-test/suite/maria/maria-gis-rtree-dynamic.result b/mysql-test/suite/maria/maria-gis-rtree-dynamic.result
index 8bea5edb6fb..2f8c1b9408c 100644
--- a/mysql-test/suite/maria/maria-gis-rtree-dynamic.result
+++ b/mysql-test/suite/maria/maria-gis-rtree-dynamic.result
@@ -749,6 +749,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
CREATE TABLE t1 (
diff --git a/mysql-test/suite/maria/maria-gis-rtree-trans.result b/mysql-test/suite/maria/maria-gis-rtree-trans.result
index 4a2f97fbd41..1c0d9e756a4 100644
--- a/mysql-test/suite/maria/maria-gis-rtree-trans.result
+++ b/mysql-test/suite/maria/maria-gis-rtree-trans.result
@@ -749,6 +749,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
CREATE TABLE t1 (
diff --git a/mysql-test/suite/maria/maria-gis-rtree.result b/mysql-test/suite/maria/maria-gis-rtree.result
index 901a7bee397..e39430a2efb 100644
--- a/mysql-test/suite/maria/maria-gis-rtree.result
+++ b/mysql-test/suite/maria/maria-gis-rtree.result
@@ -749,6 +749,7 @@ Table Op Msg_type Msg_text
test.t1 check status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
drop table t1;
CREATE TABLE t1 (
diff --git a/mysql-test/suite/maria/maria-recover.result b/mysql-test/suite/maria/maria-recover.result
index 4d0b4317afe..8a33307b2b0 100644
--- a/mysql-test/suite/maria/maria-recover.result
+++ b/mysql-test/suite/maria/maria-recover.result
@@ -27,7 +27,7 @@ ThursdayMorningsMarket
ThursdayMorningsMarketb
Warnings:
Error 145 t_corrupted2' is marked as crashed and should be repaired
-Error 1034 1 client is using or hasn't closed the table properly
+Warning 1034 1 client is using or hasn't closed the table properly
Error 1034 Wrong base information on indexpage at page: 1
select * from t_corrupted2;
a
diff --git a/mysql-test/suite/maria/maria-recover.test b/mysql-test/suite/maria/maria-recover.test
index 0f9f5e9cd26..893cd5e601d 100644
--- a/mysql-test/suite/maria/maria-recover.test
+++ b/mysql-test/suite/maria/maria-recover.test
@@ -2,6 +2,18 @@
--source include/have_maria.inc
+#
+# Ensure that we don't get warnings from mysql.proc (used by check_mysqld)
+#
+
+--disable_query_log
+--disable_warnings
+--disable_result_log
+select count(*) from mysql.proc;
+--enable_result_log
+--enable_warnings
+--enable_query_log
+
--disable_query_log
# Note: \\. matches a single period. We use '.' as directory separator to
# account for Unix and Windows variation.
diff --git a/mysql-test/suite/maria/maria-ucs2.result b/mysql-test/suite/maria/maria-ucs2.result
index 7499b37279b..1a54ab78081 100644
--- a/mysql-test/suite/maria/maria-ucs2.result
+++ b/mysql-test/suite/maria/maria-ucs2.result
@@ -1,6 +1,6 @@
select * from INFORMATION_SCHEMA.ENGINES where ENGINE="ARIA";
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
-Aria YES Crash-safe tables with MyISAM heritage NO NO NO
+Aria YES Crash-safe tables with MyISAM heritage. Used for internal temporary tables and privilege tables NO NO NO
set global storage_engine=aria;
set session storage_engine=aria;
drop table if exists t1;
diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result
index fcfecd31eb9..3e8575d2689 100644
--- a/mysql-test/suite/maria/maria.result
+++ b/mysql-test/suite/maria/maria.result
@@ -1,7 +1,7 @@
call mtr.add_suppression("Can't find record in '.*'");
select * from INFORMATION_SCHEMA.ENGINES where ENGINE="ARIA";
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
-Aria YES Crash-safe tables with MyISAM heritage NO NO NO
+Aria YES Crash-safe tables with MyISAM heritage. Used for internal temporary tables and privilege tables NO NO NO
set global storage_engine=aria;
set session storage_engine=aria;
set global aria_page_checksum=0;
@@ -353,8 +353,6 @@ test.t1 check status OK
drop table t1;
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255), KEY t1 (a, b, c, d, e));
ERROR 42000: Specified key was too long; max key length is 1000 bytes
-CREATE TABLE t1 (a varchar(32000), unique key(a));
-ERROR 42000: Specified key was too long; max key length is 1000 bytes
CREATE TABLE t1 (a varchar(1), b varchar(1), key (a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b));
ERROR 42000: Too many key parts specified; max 32 parts allowed
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255));
@@ -739,6 +737,7 @@ insert into t1 values (0),(1),(2),(3),(4);
insert into t1 select NULL from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -752,6 +751,8 @@ show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 1 a 1 a A 10 NULL NULL YES BTREE
set aria_stats_method=nulls_equal;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
show variables like 'aria_stats_method';
Variable_name Value
aria_stats_method nulls_equal
@@ -759,6 +760,7 @@ insert into t1 values (11);
delete from t1 where a=11;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -771,6 +773,7 @@ test.t1 check status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 1 a 1 a A 5 NULL NULL YES BTREE
+set @@use_stat_tables= @save_use_stat_tables;
set aria_stats_method=DEFAULT;
show variables like 'aria_stats_method';
Variable_name Value
@@ -779,6 +782,7 @@ insert into t1 values (11);
delete from t1 where a=11;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -796,6 +800,8 @@ set aria_stats_method=nulls_ignored;
show variables like 'aria_stats_method';
Variable_name Value
aria_stats_method nulls_ignored
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
create table t1 (
a char(3), b char(4), c char(5), d char(6),
key(a,b,c,d)
@@ -806,6 +812,7 @@ insert into t1 values ('bce','def1', 'yuu', NULL);
insert into t1 values ('bce','def2', NULL, 'quux');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -816,6 +823,7 @@ t1 1 a 4 d A 4 NULL NULL YES BTREE
delete from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show index from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -823,6 +831,7 @@ t1 1 a 1 a A 0 NULL NULL YES BTREE
t1 1 a 2 b A 0 NULL NULL YES BTREE
t1 1 a 3 c A 0 NULL NULL YES BTREE
t1 1 a 4 d A 0 NULL NULL YES BTREE
+set @@use_stat_tables= @save_use_stat_tables;
set aria_stats_method=DEFAULT;
drop table t1;
create table t1(
@@ -1690,9 +1699,11 @@ create table t1 (a int, key(a));
insert into t1 values (1),(2),(3),(4),(NULL),(NULL),(NULL),(NULL);
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status Table is already up to date
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/mysql-test/suite/maria/maria.test b/mysql-test/suite/maria/maria.test
index 19aab4aa944..d5b9d839699 100644
--- a/mysql-test/suite/maria/maria.test
+++ b/mysql-test/suite/maria/maria.test
@@ -374,8 +374,6 @@ drop table t1;
--error 1071
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255), KEY t1 (a, b, c, d, e));
---error 1071
-CREATE TABLE t1 (a varchar(32000), unique key(a));
--error 1070
CREATE TABLE t1 (a varchar(1), b varchar(1), key (a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b,a,b));
CREATE TABLE t1 (a varchar(255), b varchar(255), c varchar(255), d varchar(255), e varchar(255));
@@ -701,6 +699,10 @@ show index from t1;
# Set nulls to be equal:
set aria_stats_method=nulls_equal;
+
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
+
show variables like 'aria_stats_method';
insert into t1 values (11);
delete from t1 where a=11;
@@ -714,6 +716,7 @@ delete from t1 where a=11;
check table t1;
show index from t1;
+set @@use_stat_tables= @save_use_stat_tables;
# Set nulls back to be equal
set aria_stats_method=DEFAULT;
show variables like 'aria_stats_method';
@@ -735,6 +738,9 @@ drop table t1;
set aria_stats_method=nulls_ignored;
show variables like 'aria_stats_method';
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
+
create table t1 (
a char(3), b char(4), c char(5), d char(6),
key(a,b,c,d)
@@ -753,6 +759,7 @@ delete from t1;
analyze table t1;
show index from t1;
+set @@use_stat_tables= @save_use_stat_tables;
set aria_stats_method=DEFAULT;
drop table t1;
diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result
index 6099dc9c982..ba8b64f9514 100644
--- a/mysql-test/suite/maria/maria3.result
+++ b/mysql-test/suite/maria/maria3.result
@@ -1,6 +1,6 @@
select * from INFORMATION_SCHEMA.ENGINES where ENGINE="ARIA";
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
-Aria YES Crash-safe tables with MyISAM heritage NO NO NO
+Aria YES Crash-safe tables with MyISAM heritage. Used for internal temporary tables and privilege tables NO NO NO
set global storage_engine=aria;
set session storage_engine=aria;
set global aria_page_checksum=0;
@@ -209,11 +209,13 @@ create table t2 like t1;
insert into t2 select * from t1;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
delete from t2;
insert into t2 select * from t1;
analyze table t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status Table is already up to date
drop table t1,t2;
create table t1 (a bigint auto_increment, primary key(a), b char(255), c varchar(20000));
diff --git a/mysql-test/suite/maria/ps_maria.result b/mysql-test/suite/maria/ps_maria.result
index bd5f2b4c55b..6f5d557569b 100644
--- a/mysql-test/suite/maria/ps_maria.result
+++ b/mysql-test/suite/maria/ps_maria.result
@@ -1161,7 +1161,7 @@ def possible_keys 253 4_OR_8_K 0 Y 0 39 8
def key 253 64 0 Y 0 39 8
def key_len 253 4_OR_8_K 0 Y 0 39 8
def ref 253 2048 0 Y 0 39 8
-def rows 8 10 1 Y 32928 0 63
+def rows 253 64 1 Y 0 39 8
def Extra 253 255 0 N 1 39 8
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t9 ALL NULL NULL NULL NULL 2
diff --git a/mysql-test/suite/maria/system_tables.result b/mysql-test/suite/maria/system_tables.result
new file mode 100644
index 00000000000..c9944482638
--- /dev/null
+++ b/mysql-test/suite/maria/system_tables.result
@@ -0,0 +1,14 @@
+CREATE TABLE t1 (i INT) ENGINE=Aria;
+LOCK TABLE t1 WRITE;
+connect con1,localhost,root,,test;
+SET lock_wait_timeout= 2;
+FLUSH TABLES;
+FLUSH TABLES t1;
+connection default;
+CALL non_existing_sp;
+ERROR 42000: PROCEDURE test.non_existing_sp does not exist
+connection con1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+disconnect con1;
+connection default;
+DROP TABLE t1;
diff --git a/mysql-test/suite/maria/system_tables.test b/mysql-test/suite/maria/system_tables.test
new file mode 100644
index 00000000000..950989fa5ca
--- /dev/null
+++ b/mysql-test/suite/maria/system_tables.test
@@ -0,0 +1,27 @@
+#
+# Test related to Aria system tables
+#
+
+#
+# MDEV-16986 Unitialized mutex, SIGSEGV and assorted assertion failures in
+# Aria code
+#
+
+CREATE TABLE t1 (i INT) ENGINE=Aria;
+LOCK TABLE t1 WRITE;
+
+--connect (con1,localhost,root,,test)
+SET lock_wait_timeout= 2;
+FLUSH TABLES;
+--send FLUSH TABLES t1
+--connection default
+--error ER_SP_DOES_NOT_EXIST
+CALL non_existing_sp;
+--connection con1
+--error ER_LOCK_WAIT_TIMEOUT
+--reap
+
+# Cleanup
+--disconnect con1
+--connection default
+DROP TABLE t1;
diff --git a/mysql-test/suite/mariabackup/absolute_ibdata_paths.result b/mysql-test/suite/mariabackup/absolute_ibdata_paths.result
index fe211e71f2f..a29d9af65de 100644
--- a/mysql-test/suite/mariabackup/absolute_ibdata_paths.result
+++ b/mysql-test/suite/mariabackup/absolute_ibdata_paths.result
@@ -1,9 +1,11 @@
CREATE TABLE t(i INT) ENGINE INNODB;
INSERT INTO t VALUES(1);
+# restart
# xtrabackup backup
# remove datadir
# xtrabackup copy back
# restart server
+# restart
SELECT * from t;
i
1
diff --git a/mysql-test/suite/mariabackup/apply-log-only-incr.result b/mysql-test/suite/mariabackup/apply-log-only-incr.result
index 917a9647511..f84e8ea5607 100644
--- a/mysql-test/suite/mariabackup/apply-log-only-incr.result
+++ b/mysql-test/suite/mariabackup/apply-log-only-incr.result
@@ -22,7 +22,7 @@ count(*)
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart: --innodb-force-recovery=3
SELECT COUNT(*) FROM t;
COUNT(*)
1
@@ -30,6 +30,7 @@ SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
SELECT COUNT(*) FROM t;
COUNT(*)
201
+# restart
SELECT * FROM t;
a
0
diff --git a/mysql-test/suite/mariabackup/backup_ssl.result b/mysql-test/suite/mariabackup/backup_ssl.result
index 6e59da6d43a..099b8d42a85 100644
--- a/mysql-test/suite/mariabackup/backup_ssl.result
+++ b/mysql-test/suite/mariabackup/backup_ssl.result
@@ -5,5 +5,5 @@ FLUSH PRIVILEGES;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
DROP USER backup_user;
diff --git a/mysql-test/suite/mariabackup/compress_qpress.result b/mysql-test/suite/mariabackup/compress_qpress.result
index 69427f5fc4d..457cb2f10f9 100644
--- a/mysql-test/suite/mariabackup/compress_qpress.result
+++ b/mysql-test/suite/mariabackup/compress_qpress.result
@@ -9,7 +9,8 @@ t.ibd.qp
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/create_during_backup.result b/mysql-test/suite/mariabackup/create_during_backup.result
index f99c2633ab5..374c3dfc137 100644
--- a/mysql-test/suite/mariabackup/create_during_backup.result
+++ b/mysql-test/suite/mariabackup/create_during_backup.result
@@ -3,7 +3,7 @@
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT COUNT(*) from t1;
COUNT(*)
10000
diff --git a/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.result b/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.result
index a06150bcb6a..be97675c40f 100644
--- a/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.result
+++ b/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.result
@@ -4,7 +4,7 @@ DROP TABLE t;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
DROP TABLE t;
diff --git a/mysql-test/suite/mariabackup/data_directory.result b/mysql-test/suite/mariabackup/data_directory.result
index e7201918cbd..4e45127bd6a 100644
--- a/mysql-test/suite/mariabackup/data_directory.result
+++ b/mysql-test/suite/mariabackup/data_directory.result
@@ -6,7 +6,7 @@ DROP TABLE t;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
a
1
diff --git a/mysql-test/suite/mariabackup/drop_table_during_backup.result b/mysql-test/suite/mariabackup/drop_table_during_backup.result
index 8a77945e586..a0fa9db5b94 100644
--- a/mysql-test/suite/mariabackup/drop_table_during_backup.result
+++ b/mysql-test/suite/mariabackup/drop_table_during_backup.result
@@ -7,7 +7,7 @@ CREATE TABLE t5 (i int) ENGINE=INNODB;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
CREATE TABLE t1(i int);
DROP TABLE t1;
CREATE TABLE t2(i int);
diff --git a/mysql-test/suite/mariabackup/encrypted_page_compressed.result b/mysql-test/suite/mariabackup/encrypted_page_compressed.result
index 293addd2b03..de4c966caf4 100644
--- a/mysql-test/suite/mariabackup/encrypted_page_compressed.result
+++ b/mysql-test/suite/mariabackup/encrypted_page_compressed.result
@@ -1,7 +1,9 @@
call mtr.add_suppression("InnoDB: Table `test`.`t1` has an unreadable root page");
+call mtr.add_suppression("InnoDB: Encrypted page .* in file .*test.t1\\.ibd looks corrupted; key_version=1");
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT, c char(200)) ENGINE=InnoDB page_compressed=yes encrypted=yes;
insert into t1(b, c) values("mariadb", "mariabackup");
# Corrupt the table
+# restart
# xtrabackup backup
FOUND 1 /Database page corruption detected.*/ in backup.log
drop table t1;
diff --git a/mysql-test/suite/mariabackup/encrypted_page_compressed.test b/mysql-test/suite/mariabackup/encrypted_page_compressed.test
index b0bcdd9e33b..54fffb7d08f 100644
--- a/mysql-test/suite/mariabackup/encrypted_page_compressed.test
+++ b/mysql-test/suite/mariabackup/encrypted_page_compressed.test
@@ -1,5 +1,6 @@
source include/have_file_key_management.inc;
call mtr.add_suppression("InnoDB: Table `test`.`t1` has an unreadable root page");
+call mtr.add_suppression("InnoDB: Encrypted page .* in file .*test.t1\\.ibd looks corrupted; key_version=1");
CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT, c char(200)) ENGINE=InnoDB page_compressed=yes encrypted=yes;
insert into t1(b, c) values("mariadb", "mariabackup");
diff --git a/mysql-test/suite/mariabackup/encrypted_page_corruption.opt b/mysql-test/suite/mariabackup/encrypted_page_corruption.opt
index 74a6450a1ef..bc6d53b6dd9 100644
--- a/mysql-test/suite/mariabackup/encrypted_page_corruption.opt
+++ b/mysql-test/suite/mariabackup/encrypted_page_corruption.opt
@@ -4,3 +4,4 @@
--loose-file-key-management-filekey=FILE:$MTR_SUITE_DIR/filekeys-data.key
--loose-file-key-management-filename=$MTR_SUITE_DIR/filekeys-data.enc
--loose-file-key-management-encryption-algorithm=aes_cbc
+--innodb-checksum-algorithm=crc32
diff --git a/mysql-test/suite/mariabackup/encrypted_page_corruption.result b/mysql-test/suite/mariabackup/encrypted_page_corruption.result
index 9a6202a5ea1..b328d361cd6 100644
--- a/mysql-test/suite/mariabackup/encrypted_page_corruption.result
+++ b/mysql-test/suite/mariabackup/encrypted_page_corruption.result
@@ -3,6 +3,7 @@ call mtr.add_suppression("\\[ERROR\\] InnoDB: Table `test`\\.`t1` has an unreada
CREATE TABLE t1(c VARCHAR(128)) ENGINE INNODB, encrypted=yes;
insert into t1 select repeat('a',100);
# Corrupt the table
+# restart
# xtrabackup backup
FOUND 1 /Database page corruption detected.*/ in backup.log
drop table t1;
diff --git a/mysql-test/suite/mariabackup/full_backup.result b/mysql-test/suite/mariabackup/full_backup.result
index c387f5328a7..690c5e64b4a 100644
--- a/mysql-test/suite/mariabackup/full_backup.result
+++ b/mysql-test/suite/mariabackup/full_backup.result
@@ -6,7 +6,7 @@ INSERT INTO t VALUES(2);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/huge_lsn.combinations b/mysql-test/suite/mariabackup/huge_lsn.combinations
new file mode 100644
index 00000000000..729380593f3
--- /dev/null
+++ b/mysql-test/suite/mariabackup/huge_lsn.combinations
@@ -0,0 +1,5 @@
+[strict_crc32]
+--innodb-checksum-algorithm=strict_crc32
+
+[strict_full_crc32]
+--innodb-checksum-algorithm=strict_full_crc32
diff --git a/mysql-test/suite/mariabackup/huge_lsn.result b/mysql-test/suite/mariabackup/huge_lsn.result
index e7c4cc9471d..82d743bbad4 100644
--- a/mysql-test/suite/mariabackup/huge_lsn.result
+++ b/mysql-test/suite/mariabackup/huge_lsn.result
@@ -1,6 +1,7 @@
#
# MDEV-13416 mariabackup fails with EFAULT "Bad Address"
#
+# restart
FOUND 1 /InnoDB: New log files created, LSN=175964\d{8}/ in mysqld.1.err
CREATE TABLE t(i INT) ENGINE INNODB;
INSERT INTO t VALUES(1);
@@ -11,7 +12,7 @@ INSERT INTO t VALUES(2);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/huge_lsn.test b/mysql-test/suite/mariabackup/huge_lsn.test
index dcdc118df6f..d41bb61a096 100644
--- a/mysql-test/suite/mariabackup/huge_lsn.test
+++ b/mysql-test/suite/mariabackup/huge_lsn.test
@@ -20,10 +20,12 @@ my $page;
die "Unable to read $file" unless sysread(FILE, $page, $ps) == $ps;
substr($page,26,8) = pack("NN", 4096, ~1024);
my $polynomial = 0x82f63b78; # CRC-32C
-my $ck= pack("N",mycrc32(substr($page, 4, 22), 0, $polynomial) ^
- mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial));
-substr($page,0,4)=$ck;
-substr($page,$ps-8,4)=$ck;
+my $full_crc32 = unpack("N",substr($page,54,4)) & 0x10; # FIL_SPACE_FLAGS
+if ($full_crc32)
+{
+ my $ck = mycrc32(substr($page, 0, $ps-4), 0, $polynomial);
+ substr($page, $ps-4, 4) = pack("N", $ck);
+}
sysseek(FILE, 0, 0) || die "Unable to rewind $file\n";
syswrite(FILE, $page, $ps)==$ps || die "Unable to write $file\n";
close(FILE) || die "Unable to close $file\n";
diff --git a/mysql-test/suite/mariabackup/include/restart_and_restore.inc b/mysql-test/suite/mariabackup/include/restart_and_restore.inc
index 9056867b07f..2d1e5493957 100644
--- a/mysql-test/suite/mariabackup/include/restart_and_restore.inc
+++ b/mysql-test/suite/mariabackup/include/restart_and_restore.inc
@@ -5,5 +5,4 @@ echo # remove datadir;
rmdir $_datadir;
echo # xtrabackup move back;
exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$_datadir --target-dir=$targetdir --parallel=2 --throttle=1;
-echo # restart server;
--source include/start_mysqld.inc
diff --git a/mysql-test/suite/mariabackup/incremental_backup.result b/mysql-test/suite/mariabackup/incremental_backup.result
index cc7277bdde9..42a7029bb31 100644
--- a/mysql-test/suite/mariabackup/incremental_backup.result
+++ b/mysql-test/suite/mariabackup/incremental_backup.result
@@ -21,7 +21,7 @@ disconnect con1;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/incremental_ddl_before_backup.result b/mysql-test/suite/mariabackup/incremental_ddl_before_backup.result
index a6273a20ff5..71f250bfd8d 100644
--- a/mysql-test/suite/mariabackup/incremental_ddl_before_backup.result
+++ b/mysql-test/suite/mariabackup/incremental_ddl_before_backup.result
@@ -25,7 +25,7 @@ count(*)
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
select count(*) from t7;
count(*)
7168
diff --git a/mysql-test/suite/mariabackup/incremental_ddl_during_backup.result b/mysql-test/suite/mariabackup/incremental_ddl_during_backup.result
index ffca1ef0e1f..505e834b4d1 100644
--- a/mysql-test/suite/mariabackup/incremental_ddl_during_backup.result
+++ b/mysql-test/suite/mariabackup/incremental_ddl_during_backup.result
@@ -9,7 +9,7 @@ INSERT into t1 values(1);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
CREATE TABLE t1(i int);
DROP TABLE t1;
SELECT * from t1_renamed;
diff --git a/mysql-test/suite/mariabackup/incremental_encrypted.result b/mysql-test/suite/mariabackup/incremental_encrypted.result
index e9525c9c4b7..6a23f399f0e 100644
--- a/mysql-test/suite/mariabackup/incremental_encrypted.result
+++ b/mysql-test/suite/mariabackup/incremental_encrypted.result
@@ -13,7 +13,7 @@ i
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/incremental_rocksdb.result b/mysql-test/suite/mariabackup/incremental_rocksdb.result
index 4e5b9c43389..f319b46669d 100644
--- a/mysql-test/suite/mariabackup/incremental_rocksdb.result
+++ b/mysql-test/suite/mariabackup/incremental_rocksdb.result
@@ -10,7 +10,7 @@ INSERT INTO t2 VALUES(2);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t2;
i
2
diff --git a/mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result b/mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result
index 47b74e0f709..f27f9d0d303 100644
--- a/mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result
+++ b/mysql-test/suite/mariabackup/innodb_log_optimize_ddl.result
@@ -18,7 +18,7 @@ call a();
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
DROP PROCEDURE a;
CHECK TABLE tz,tr,td;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/mariabackup/log_checksum_mismatch.result b/mysql-test/suite/mariabackup/log_checksum_mismatch.result
index 806a5e62cb6..31bd9dcc0d1 100644
--- a/mysql-test/suite/mariabackup/log_checksum_mismatch.result
+++ b/mysql-test/suite/mariabackup/log_checksum_mismatch.result
@@ -7,7 +7,7 @@ INSERT INTO t VALUES(2);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/mdev-14447.result b/mysql-test/suite/mariabackup/mdev-14447.result
index 6600c13ed74..357e883178b 100644
--- a/mysql-test/suite/mariabackup/mdev-14447.result
+++ b/mysql-test/suite/mariabackup/mdev-14447.result
@@ -14,6 +14,7 @@ FOUND 1 /Checksum mismatch in datafile/ in backup.log
# remove datadir
# xtrabackup move back
# restart server
+# restart
SELECT count(*) FROM t;
count(*)
100000
diff --git a/mysql-test/suite/mariabackup/mdev-14447.test b/mysql-test/suite/mariabackup/mdev-14447.test
index 96d12368547..54cac745dc8 100644
--- a/mysql-test/suite/mariabackup/mdev-14447.test
+++ b/mysql-test/suite/mariabackup/mdev-14447.test
@@ -34,7 +34,6 @@ exec $XTRABACKUP --prepare --verbose --apply-log-only --target-dir=$basedir --in
echo # Restore and check results;
let $targetdir=$basedir;
-#-- source include/restart_and_restore.inc
let $_datadir= `SELECT @@datadir`;
let $innodb_data_file_path=`SELECT @@innodb_data_file_path`;
diff --git a/mysql-test/suite/mariabackup/missing_ibd.result b/mysql-test/suite/mariabackup/missing_ibd.result
index 53989be7c14..40caedf6492 100644
--- a/mysql-test/suite/mariabackup/missing_ibd.result
+++ b/mysql-test/suite/mariabackup/missing_ibd.result
@@ -1,5 +1,6 @@
create table t1(c1 int) engine=InnoDB;
INSERT INTO t1 VALUES(1);
+# restart
# xtrabackup backup
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist in engine
diff --git a/mysql-test/suite/mariabackup/mlog_index_load.result b/mysql-test/suite/mariabackup/mlog_index_load.result
index bc83981106e..1748bc920e8 100644
--- a/mysql-test/suite/mariabackup/mlog_index_load.result
+++ b/mysql-test/suite/mariabackup/mlog_index_load.result
@@ -8,7 +8,7 @@ t1.new
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT COUNT(*) from t1;
COUNT(*)
10000
diff --git a/mysql-test/suite/mariabackup/partition_datadir.result b/mysql-test/suite/mariabackup/partition_datadir.result
index 3fc5fe30907..f00db641fea 100644
--- a/mysql-test/suite/mariabackup/partition_datadir.result
+++ b/mysql-test/suite/mariabackup/partition_datadir.result
@@ -11,7 +11,7 @@ DROP TABLE t;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/recreate_table_during_backup.result b/mysql-test/suite/mariabackup/recreate_table_during_backup.result
index 4e038fee71a..821f9301ab6 100644
--- a/mysql-test/suite/mariabackup/recreate_table_during_backup.result
+++ b/mysql-test/suite/mariabackup/recreate_table_during_backup.result
@@ -7,7 +7,7 @@ INSERT INTO t3 SELECT UUID() FROM seq_1_to_1000;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT COUNT(*) from t1;
COUNT(*)
100
diff --git a/mysql-test/suite/mariabackup/rename_during_backup.result b/mysql-test/suite/mariabackup/rename_during_backup.result
index a4cf06b7633..ba1dbec0e1b 100644
--- a/mysql-test/suite/mariabackup/rename_during_backup.result
+++ b/mysql-test/suite/mariabackup/rename_during_backup.result
@@ -18,7 +18,7 @@ INSERT INTO b1 VALUES('b1');
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
CREATE TABLE t1(i int);
DROP TABLE t1;
SELECT * from t1_renamed;
diff --git a/mysql-test/suite/mariabackup/rename_during_mdl_lock.result b/mysql-test/suite/mariabackup/rename_during_mdl_lock.result
index 3b64cdc38bc..607460f4f05 100644
--- a/mysql-test/suite/mariabackup/rename_during_mdl_lock.result
+++ b/mysql-test/suite/mariabackup/rename_during_mdl_lock.result
@@ -3,7 +3,7 @@ CREATE TABLE t1(i int) ENGINE INNODB;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
CREATE TABLE t1(i int);
DROP TABLE t1;
SELECT * from t2;
diff --git a/mysql-test/suite/mariabackup/skip_innodb.result b/mysql-test/suite/mariabackup/skip_innodb.result
index 0d56b55bf4b..859b3c56f43 100644
--- a/mysql-test/suite/mariabackup/skip_innodb.result
+++ b/mysql-test/suite/mariabackup/skip_innodb.result
@@ -3,7 +3,7 @@ INSERT INTO t VALUES(1);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * from t;
i
1
diff --git a/mysql-test/suite/mariabackup/system_versioning.result b/mysql-test/suite/mariabackup/system_versioning.result
index 0e1e9253dd0..83e9cf04150 100644
--- a/mysql-test/suite/mariabackup/system_versioning.result
+++ b/mysql-test/suite/mariabackup/system_versioning.result
@@ -5,7 +5,7 @@ insert into t values (3);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
show create table t;
Table Create Table
t CREATE TABLE `t` (
@@ -30,7 +30,7 @@ insert into t values (3);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
show create table t;
Table Create Table
t CREATE TABLE `t` (
diff --git a/mysql-test/suite/mariabackup/unencrypted_page_compressed.result b/mysql-test/suite/mariabackup/unencrypted_page_compressed.result
index daeb9ecf8b7..7edf66b027a 100644
--- a/mysql-test/suite/mariabackup/unencrypted_page_compressed.result
+++ b/mysql-test/suite/mariabackup/unencrypted_page_compressed.result
@@ -4,6 +4,7 @@ CREATE TABLE t1 (a INT AUTO_INCREMENT PRIMARY KEY, b TEXT, c char(200)) ENGINE=I
insert into t1(b, c) values("mariadb", "mariabackup");
InnoDB 0 transactions not purged
# Corrupt the table
+# restart: --skip-innodb-buffer-pool-load-at-startup
# xtrabackup backup
FOUND 1 /Database page corruption detected.*/ in backup.log
drop table t1;
diff --git a/mysql-test/suite/mariabackup/xb_aws_key_management.result b/mysql-test/suite/mariabackup/xb_aws_key_management.result
index fd12344dfa0..0ca72291b1c 100644
--- a/mysql-test/suite/mariabackup/xb_aws_key_management.result
+++ b/mysql-test/suite/mariabackup/xb_aws_key_management.result
@@ -1,13 +1,10 @@
CREATE TABLE t(c VARCHAR(10)) ENGINE INNODB encrypted=yes;
-Warnings:
-Note 1105 AWS KMS plugin: generated encrypted datakey for key id=1, version=1
-Note 1105 AWS KMS plugin: loaded key 1, version 1, key length 128 bit
INSERT INTO t VALUES('foobar1');
# xtrabackup backup
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * from t;
c
foobar1
diff --git a/mysql-test/suite/mariabackup/xb_compressed_encrypted.result b/mysql-test/suite/mariabackup/xb_compressed_encrypted.result
index 9ba332b7ca5..22a4c151b07 100644
--- a/mysql-test/suite/mariabackup/xb_compressed_encrypted.result
+++ b/mysql-test/suite/mariabackup/xb_compressed_encrypted.result
@@ -7,7 +7,7 @@ drop table t1;
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
select sum(c1) from t1;
sum(c1)
12497500
diff --git a/mysql-test/suite/mariabackup/xb_file_key_management.result b/mysql-test/suite/mariabackup/xb_file_key_management.result
index 721d10a9d91..6cedfd2213b 100644
--- a/mysql-test/suite/mariabackup/xb_file_key_management.result
+++ b/mysql-test/suite/mariabackup/xb_file_key_management.result
@@ -8,7 +8,7 @@ INSERT INTO t VALUES('foobar2');
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
ib_logfile0
SELECT * FROM t;
c
diff --git a/mysql-test/suite/mariabackup/xb_fulltext_encrypted.result b/mysql-test/suite/mariabackup/xb_fulltext_encrypted.result
index 01a99e59200..7c0a6451820 100644
--- a/mysql-test/suite/mariabackup/xb_fulltext_encrypted.result
+++ b/mysql-test/suite/mariabackup/xb_fulltext_encrypted.result
@@ -10,5 +10,5 @@ FULLTEXT KEY (title)
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
drop table film_text;
diff --git a/mysql-test/suite/mariabackup/xb_partition.result b/mysql-test/suite/mariabackup/xb_partition.result
index f5b6ae0b24d..69896371f25 100644
--- a/mysql-test/suite/mariabackup/xb_partition.result
+++ b/mysql-test/suite/mariabackup/xb_partition.result
@@ -45,7 +45,7 @@ INSERT INTO isam_p VALUES (401), (501);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * from p;
a
201
diff --git a/mysql-test/suite/mariabackup/xb_rocksdb.result b/mysql-test/suite/mariabackup/xb_rocksdb.result
index 84476eeaba0..fafa4925e93 100644
--- a/mysql-test/suite/mariabackup/xb_rocksdb.result
+++ b/mysql-test/suite/mariabackup/xb_rocksdb.result
@@ -6,7 +6,7 @@ INSERT INTO t VALUES(2);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
@@ -16,6 +16,7 @@ i
# remove datadir
# xtrabackup move back
# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/xb_rocksdb_datadir.result b/mysql-test/suite/mariabackup/xb_rocksdb_datadir.result
index 9227198cbec..76130c2f041 100644
--- a/mysql-test/suite/mariabackup/xb_rocksdb_datadir.result
+++ b/mysql-test/suite/mariabackup/xb_rocksdb_datadir.result
@@ -3,6 +3,7 @@ INSERT INTO t VALUES(1);
# xtrabackup backup
INSERT INTO t VALUES(2);
# xtrabackup prepare
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/xb_rocksdb_datadir_debug.result b/mysql-test/suite/mariabackup/xb_rocksdb_datadir_debug.result
index 9227198cbec..76130c2f041 100644
--- a/mysql-test/suite/mariabackup/xb_rocksdb_datadir_debug.result
+++ b/mysql-test/suite/mariabackup/xb_rocksdb_datadir_debug.result
@@ -3,6 +3,7 @@ INSERT INTO t VALUES(1);
# xtrabackup backup
INSERT INTO t VALUES(2);
# xtrabackup prepare
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/mariabackup/xbstream.result b/mysql-test/suite/mariabackup/xbstream.result
index f340fedb861..93f68eca9ef 100644
--- a/mysql-test/suite/mariabackup/xbstream.result
+++ b/mysql-test/suite/mariabackup/xbstream.result
@@ -6,7 +6,7 @@ INSERT INTO t VALUES(1);
# shutdown server
# remove datadir
# xtrabackup move back
-# restart server
+# restart
SELECT * FROM t;
i
1
diff --git a/mysql-test/suite/multi_source/gtid_ignore_duplicates.result b/mysql-test/suite/multi_source/gtid_ignore_duplicates.result
index 96627b42c97..e142ff8b981 100644
--- a/mysql-test/suite/multi_source/gtid_ignore_duplicates.result
+++ b/mysql-test/suite/multi_source/gtid_ignore_duplicates.result
@@ -492,21 +492,21 @@ SET GLOBAL slave_parallel_threads= @old_parallel;
SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
connection server_1;
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
include/reset_master_slave.inc
disconnect server_1;
connection server_2;
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
include/reset_master_slave.inc
disconnect server_2;
connection server_3;
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
include/reset_master_slave.inc
disconnect server_3;
connection server_4;
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
include/reset_master_slave.inc
disconnect server_4;
diff --git a/mysql-test/suite/multi_source/gtid_ignore_duplicates.test b/mysql-test/suite/multi_source/gtid_ignore_duplicates.test
index b61da0f0f33..3d2d151bd0d 100644
--- a/mysql-test/suite/multi_source/gtid_ignore_duplicates.test
+++ b/mysql-test/suite/multi_source/gtid_ignore_duplicates.test
@@ -432,24 +432,24 @@ SET GLOBAL gtid_ignore_duplicates= @old_ignore_duplicates;
--connection server_1
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
--source include/reset_master_slave.inc
--disconnect server_1
--connection server_2
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
--source include/reset_master_slave.inc
--disconnect server_2
--connection server_3
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
--source include/reset_master_slave.inc
--disconnect server_3
--connection server_4
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
--source include/reset_master_slave.inc
--disconnect server_4
diff --git a/mysql-test/suite/parts/r/optimizer.result b/mysql-test/suite/parts/r/optimizer.result
index 465c6c7d762..42d85dbbd39 100644
--- a/mysql-test/suite/parts/r/optimizer.result
+++ b/mysql-test/suite/parts/r/optimizer.result
@@ -25,7 +25,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 2 Using where; Using index
EXPLAIN SELECT a, MAX(b) FROM t2 WHERE a IN (10,100) GROUP BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index for group-by
+1 SIMPLE t2 range a a 5 NULL 2 Using where; Using index
FLUSH status;
SELECT a, MAX(b) FROM t1 WHERE a IN (10, 100) GROUP BY a;
a MAX(b)
@@ -41,5 +41,5 @@ a MAX(b)
# Should be no more than 4 reads.
SHOW status LIKE 'handler_read_key';
Variable_name Value
-Handler_read_key 4
+Handler_read_key 2
DROP TABLE t1, t2;
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result
index 5f3610b7999..0e8db677cdf 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result
@@ -517,6 +517,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1009,6 +1010,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1516,6 +1518,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2017,6 +2020,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2516,6 +2520,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3026,6 +3031,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3538,6 +3544,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4038,6 +4045,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4531,6 +4539,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5023,6 +5032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5530,6 +5540,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6031,6 +6042,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6530,6 +6542,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7040,6 +7053,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7552,6 +7566,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8052,6 +8067,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8562,6 +8578,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9070,6 +9087,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9593,6 +9611,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10110,6 +10129,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10625,6 +10645,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11151,6 +11172,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11679,6 +11701,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12195,6 +12218,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12704,6 +12728,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13212,6 +13237,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13735,6 +13761,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14252,6 +14279,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14767,6 +14795,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15293,6 +15322,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15821,6 +15851,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16337,6 +16368,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16832,6 +16864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17325,6 +17358,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17833,6 +17867,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18335,6 +18370,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18835,6 +18871,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19346,6 +19383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19859,6 +19897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20360,6 +20399,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20854,6 +20894,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21347,6 +21388,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21855,6 +21897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22357,6 +22400,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22857,6 +22901,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23368,6 +23413,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23881,6 +23927,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24382,6 +24429,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24876,6 +24924,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25369,6 +25418,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25877,6 +25927,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26379,6 +26430,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26879,6 +26931,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27390,6 +27443,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27903,6 +27957,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28404,6 +28459,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result b/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result
index ce3dd51496e..0f12c15e4e9 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result
@@ -543,6 +543,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1066,6 +1067,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1610,6 +1612,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2144,6 +2147,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2680,6 +2684,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3227,6 +3232,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3776,6 +3782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4315,6 +4322,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4833,6 +4841,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5356,6 +5365,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5900,6 +5910,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6434,6 +6445,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6970,6 +6982,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7517,6 +7530,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8066,6 +8080,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8605,6 +8620,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_innodb.result b/mysql-test/suite/parts/r/partition_alter1_1_innodb.result
index 12f028782ed..919e6344615 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_innodb.result
@@ -835,6 +835,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1327,6 +1328,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1834,6 +1836,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2335,6 +2338,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2836,6 +2840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3346,6 +3351,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3858,6 +3864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4358,6 +4365,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4851,6 +4859,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5343,6 +5352,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5850,6 +5860,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6351,6 +6362,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6852,6 +6864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7362,6 +7375,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7874,6 +7888,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8374,6 +8389,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8884,6 +8900,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9392,6 +9409,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9915,6 +9933,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10432,6 +10451,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10949,6 +10969,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11475,6 +11496,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12003,6 +12025,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12519,6 +12542,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13028,6 +13052,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13536,6 +13561,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14059,6 +14085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14576,6 +14603,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15093,6 +15121,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15619,6 +15648,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16147,6 +16177,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16663,6 +16694,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_myisam.result b/mysql-test/suite/parts/r/partition_alter1_1_myisam.result
index a276e1593ee..05176399246 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_myisam.result
@@ -702,6 +702,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1225,6 +1226,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1769,6 +1771,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2303,6 +2306,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2841,6 +2845,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3388,6 +3393,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3937,6 +3943,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4476,6 +4483,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4994,6 +5002,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5517,6 +5526,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6061,6 +6071,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6595,6 +6606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7133,6 +7145,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7680,6 +7693,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8229,6 +8243,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8768,6 +8783,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter1_2_innodb.result
index 36823665fa1..3f720024f72 100644
--- a/mysql-test/suite/parts/r/partition_alter1_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter1_2_innodb.result
@@ -464,6 +464,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -904,6 +905,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1359,6 +1361,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1808,6 +1811,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2257,6 +2261,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2715,6 +2720,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3175,6 +3181,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3623,6 +3630,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4064,6 +4072,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4504,6 +4513,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4959,6 +4969,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5408,6 +5419,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5857,6 +5869,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6315,6 +6328,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6775,6 +6789,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7223,6 +7238,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7680,6 +7696,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8136,6 +8153,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8607,6 +8625,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9072,6 +9091,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9537,6 +9557,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10011,6 +10032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10487,6 +10509,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10951,6 +10974,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11396,6 +11420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11836,6 +11861,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12291,6 +12317,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12740,6 +12767,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13187,6 +13215,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13645,6 +13674,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14105,6 +14135,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14553,6 +14584,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14994,6 +15026,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15434,6 +15467,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15889,6 +15923,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16338,6 +16373,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16785,6 +16821,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17243,6 +17280,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17703,6 +17741,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18151,6 +18190,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18609,6 +18649,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19065,6 +19106,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19536,6 +19578,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20001,6 +20044,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20464,6 +20508,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20938,6 +20983,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21414,6 +21460,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21878,6 +21925,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22335,6 +22383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22791,6 +22840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23262,6 +23312,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23727,6 +23778,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24190,6 +24242,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24664,6 +24717,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25140,6 +25194,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25604,6 +25659,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26046,6 +26102,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26486,6 +26543,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26941,6 +26999,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27390,6 +27449,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27837,6 +27897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28295,6 +28356,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28755,6 +28817,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29203,6 +29266,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29644,6 +29708,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30084,6 +30149,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30539,6 +30605,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30988,6 +31055,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31435,6 +31503,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31893,6 +31962,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32353,6 +32423,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32801,6 +32872,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33258,6 +33330,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33714,6 +33787,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34185,6 +34259,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34650,6 +34725,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35113,6 +35189,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35587,6 +35664,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36063,6 +36141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36527,6 +36606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter1_2_myisam.result b/mysql-test/suite/parts/r/partition_alter1_2_myisam.result
index b4e1ddc577b..ba6a7e7fbbd 100644
--- a/mysql-test/suite/parts/r/partition_alter1_2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter1_2_myisam.result
@@ -489,6 +489,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -960,6 +961,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1452,6 +1454,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1934,6 +1937,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2420,6 +2424,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2915,6 +2920,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3412,6 +3418,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3899,6 +3906,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4369,6 +4377,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4840,6 +4849,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5332,6 +5342,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5814,6 +5825,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6298,6 +6310,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6793,6 +6806,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7290,6 +7304,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7777,6 +7792,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8243,6 +8259,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8714,6 +8731,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9206,6 +9224,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9688,6 +9707,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10172,6 +10192,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10667,6 +10688,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11164,6 +11186,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11651,6 +11674,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12117,6 +12141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12588,6 +12613,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13080,6 +13106,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13562,6 +13589,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14046,6 +14074,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14541,6 +14570,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15038,6 +15068,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15525,6 +15556,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result b/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result
index 586b24e4049..ff7d659613d 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result
@@ -481,6 +481,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -937,6 +938,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1408,6 +1410,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1873,6 +1876,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2338,6 +2342,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2812,6 +2817,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3288,6 +3294,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3752,6 +3759,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4248,6 +4256,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4742,6 +4751,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5251,6 +5261,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5754,6 +5765,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6257,6 +6269,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6769,6 +6782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7283,6 +7297,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7785,6 +7800,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8280,6 +8296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8774,6 +8791,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9283,6 +9301,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9786,6 +9805,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10289,6 +10309,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10801,6 +10822,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11315,6 +11337,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11817,6 +11840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12327,6 +12351,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12835,6 +12860,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13358,6 +13384,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13875,6 +13902,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14392,6 +14420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14918,6 +14947,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15446,6 +15476,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15962,6 +15993,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16471,6 +16503,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16979,6 +17012,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17502,6 +17536,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18019,6 +18054,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18536,6 +18572,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19062,6 +19099,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19590,6 +19628,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20106,6 +20145,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result
index 5a71f1063f5..a537b74201d 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result
@@ -477,6 +477,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -933,6 +934,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1404,6 +1406,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1869,6 +1872,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2332,6 +2336,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2806,6 +2811,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3282,6 +3288,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3746,6 +3753,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4243,6 +4251,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4738,6 +4747,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5248,6 +5258,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5752,6 +5763,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6254,6 +6266,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6767,6 +6780,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7282,6 +7296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7785,6 +7800,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8281,6 +8297,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8776,6 +8793,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9286,6 +9304,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9790,6 +9809,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10292,6 +10312,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10805,6 +10826,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11320,6 +11342,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11823,6 +11846,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12333,6 +12357,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12841,6 +12866,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13364,6 +13390,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13881,6 +13908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14396,6 +14424,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14922,6 +14951,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15450,6 +15480,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15966,6 +15997,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16475,6 +16507,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16983,6 +17016,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17506,6 +17540,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18023,6 +18058,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18538,6 +18574,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19064,6 +19101,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19592,6 +19630,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20108,6 +20147,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_maria.result b/mysql-test/suite/parts/r/partition_alter2_1_maria.result
index 7196be3a602..0af69f09639 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_maria.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_maria.result
@@ -491,6 +491,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -962,6 +963,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1454,6 +1456,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1936,6 +1939,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2422,6 +2426,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2917,6 +2922,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3414,6 +3420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3901,6 +3908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4420,6 +4428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4943,6 +4952,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5487,6 +5497,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6021,6 +6032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6559,6 +6571,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7106,6 +7119,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7655,6 +7669,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8194,6 +8209,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8712,6 +8728,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9235,6 +9252,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9779,6 +9797,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10313,6 +10332,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10851,6 +10871,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11398,6 +11419,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11947,6 +11969,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12486,6 +12509,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12957,6 +12981,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13428,6 +13453,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13920,6 +13946,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14402,6 +14429,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14886,6 +14914,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15381,6 +15410,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15878,6 +15908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16365,6 +16396,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16884,6 +16916,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17407,6 +17440,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17951,6 +17985,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18485,6 +18520,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19021,6 +19057,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19568,6 +19605,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20117,6 +20155,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20656,6 +20695,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21174,6 +21214,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21697,6 +21738,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22241,6 +22283,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22775,6 +22818,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23311,6 +23355,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23858,6 +23903,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24407,6 +24453,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24946,6 +24993,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_myisam.result b/mysql-test/suite/parts/r/partition_alter2_1_myisam.result
index 59039b97f65..60cb5a2e733 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_myisam.result
@@ -491,6 +491,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -962,6 +963,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1454,6 +1456,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1936,6 +1939,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2422,6 +2426,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2917,6 +2922,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3414,6 +3420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3901,6 +3908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4420,6 +4428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4943,6 +4952,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5487,6 +5497,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6021,6 +6032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6559,6 +6571,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7106,6 +7119,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7655,6 +7669,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8194,6 +8209,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8712,6 +8728,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9235,6 +9252,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9779,6 +9797,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10313,6 +10332,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10851,6 +10871,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11398,6 +11419,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11947,6 +11969,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12486,6 +12509,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12957,6 +12981,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13428,6 +13453,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13920,6 +13946,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14402,6 +14429,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14886,6 +14914,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15381,6 +15410,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15878,6 +15908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16365,6 +16396,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16884,6 +16916,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17407,6 +17440,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17951,6 +17985,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18485,6 +18520,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19021,6 +19057,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19568,6 +19605,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20117,6 +20155,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20656,6 +20695,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21174,6 +21214,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21697,6 +21738,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22241,6 +22283,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22775,6 +22818,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23311,6 +23355,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23858,6 +23903,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24407,6 +24453,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24946,6 +24993,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result b/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result
index 1d90f8f6368..d0d4e1602fd 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result
@@ -483,6 +483,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -941,6 +942,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1414,6 +1416,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1879,6 +1882,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2346,6 +2350,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2820,6 +2825,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3298,6 +3304,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3764,6 +3771,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4261,6 +4269,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4757,6 +4766,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5268,6 +5278,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5771,6 +5782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6276,6 +6288,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6788,6 +6801,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7304,6 +7318,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7808,6 +7823,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8305,6 +8321,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8801,6 +8818,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9312,6 +9330,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9815,6 +9834,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10320,6 +10340,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10832,6 +10853,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11348,6 +11370,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11852,6 +11875,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12364,6 +12388,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12874,6 +12899,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13399,6 +13425,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13916,6 +13943,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14435,6 +14463,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14961,6 +14990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15491,6 +15521,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16009,6 +16040,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16520,6 +16552,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17030,6 +17063,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17555,6 +17589,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18072,6 +18107,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18591,6 +18627,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19117,6 +19154,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19647,6 +19685,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20165,6 +20204,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result b/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result
index 4b0a481340f..4332588fda7 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result
@@ -479,6 +479,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -938,6 +939,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1412,6 +1414,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1882,6 +1885,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2350,6 +2354,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2829,6 +2834,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3308,6 +3314,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3775,6 +3782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4275,6 +4283,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4773,6 +4782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5286,6 +5296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5795,6 +5806,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6302,6 +6314,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6820,6 +6833,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7338,6 +7352,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7844,6 +7859,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8343,6 +8359,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8841,6 +8858,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9354,6 +9372,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9863,6 +9882,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10370,6 +10390,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10888,6 +10909,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11406,6 +11428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11912,6 +11935,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12425,6 +12449,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12936,6 +12961,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13462,6 +13488,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13984,6 +14011,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14504,6 +14532,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15035,6 +15064,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15566,6 +15596,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16085,6 +16116,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16597,6 +16629,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17108,6 +17141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17634,6 +17668,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18156,6 +18191,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18676,6 +18712,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19207,6 +19244,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19738,6 +19776,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20257,6 +20296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_maria.result b/mysql-test/suite/parts/r/partition_alter2_2_maria.result
index 66d5770e20e..9cc64b8239b 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_maria.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_maria.result
@@ -493,6 +493,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -966,6 +967,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1460,6 +1462,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1942,6 +1945,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2430,6 +2434,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2925,6 +2930,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3424,6 +3430,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3913,6 +3920,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4434,6 +4442,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4959,6 +4968,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5505,6 +5515,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6039,6 +6050,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6579,6 +6591,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7126,6 +7139,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7677,6 +7691,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8218,6 +8233,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8738,6 +8754,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9263,6 +9280,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9809,6 +9827,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10343,6 +10362,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10883,6 +10903,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11430,6 +11451,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11981,6 +12003,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12522,6 +12545,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12995,6 +13019,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13469,6 +13494,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13964,6 +13990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14451,6 +14478,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14940,6 +14968,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15440,6 +15469,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15940,6 +15970,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16430,6 +16461,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16952,6 +16984,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17478,6 +17511,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18025,6 +18059,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18564,6 +18599,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19105,6 +19141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19657,6 +19694,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20209,6 +20247,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20751,6 +20790,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21272,6 +21312,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21798,6 +21839,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22345,6 +22387,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22884,6 +22927,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23425,6 +23469,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23977,6 +24022,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24529,6 +24575,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25071,6 +25118,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_myisam.result b/mysql-test/suite/parts/r/partition_alter2_2_myisam.result
index 69548fdb94b..32a840c1577 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_myisam.result
@@ -493,6 +493,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -966,6 +967,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1460,6 +1462,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1942,6 +1945,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2430,6 +2434,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2925,6 +2930,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3424,6 +3430,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3913,6 +3920,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4434,6 +4442,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4959,6 +4968,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5505,6 +5515,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6039,6 +6050,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6579,6 +6591,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7126,6 +7139,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7677,6 +7691,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8218,6 +8233,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8738,6 +8754,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9263,6 +9280,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9809,6 +9827,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10343,6 +10362,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10883,6 +10903,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11430,6 +11451,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11981,6 +12003,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12522,6 +12545,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12995,6 +13019,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13469,6 +13494,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13964,6 +13990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14451,6 +14478,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14940,6 +14968,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15440,6 +15469,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15940,6 +15970,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16430,6 +16461,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16952,6 +16984,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17478,6 +17511,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18025,6 +18059,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18564,6 +18599,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19105,6 +19141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19657,6 +19694,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20209,6 +20247,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20751,6 +20790,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21272,6 +21312,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21798,6 +21839,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22345,6 +22387,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22884,6 +22927,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23425,6 +23469,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23977,6 +24022,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24529,6 +24575,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25071,6 +25118,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter4_innodb.result b/mysql-test/suite/parts/r/partition_alter4_innodb.result
index 644a5f32f84..d88bdfcbe00 100644
--- a/mysql-test/suite/parts/r/partition_alter4_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter4_innodb.result
@@ -60,6 +60,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -483,6 +484,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -519,6 +521,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -945,6 +948,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -989,6 +993,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -1418,6 +1423,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1460,6 +1466,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -1885,6 +1892,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1925,6 +1933,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -2352,6 +2361,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2396,6 +2406,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -2828,6 +2839,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2872,6 +2884,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -3306,6 +3319,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3346,6 +3360,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -3772,6 +3787,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3810,6 +3826,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -4233,6 +4250,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4269,6 +4287,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -4695,6 +4714,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4739,6 +4759,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -5168,6 +5189,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5210,6 +5232,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -5635,6 +5658,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5675,6 +5699,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -6102,6 +6127,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6146,6 +6172,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -6578,6 +6605,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6622,6 +6650,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -7056,6 +7085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7096,6 +7126,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -7522,6 +7553,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7983,6 +8015,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8445,6 +8478,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8918,6 +8952,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9385,6 +9420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9852,6 +9888,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10328,6 +10365,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10806,6 +10844,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11272,6 +11311,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11733,6 +11773,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12195,6 +12236,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12668,6 +12710,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13135,6 +13178,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13602,6 +13646,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14078,6 +14123,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14556,6 +14602,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15022,6 +15069,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15060,6 +15108,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -15483,6 +15532,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15519,6 +15569,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -15945,6 +15996,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15989,6 +16041,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -16418,6 +16471,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16460,6 +16514,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -16885,6 +16940,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16925,6 +16981,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -17352,6 +17409,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17396,6 +17454,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -17828,6 +17887,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17872,6 +17932,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -18306,6 +18367,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18346,6 +18408,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -18772,6 +18835,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19236,6 +19300,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19698,6 +19763,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20171,6 +20237,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20638,6 +20705,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21105,6 +21173,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21581,6 +21650,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22059,6 +22129,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22525,6 +22596,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22986,6 +23058,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23448,6 +23521,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23921,6 +23995,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24388,6 +24463,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24855,6 +24931,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25331,6 +25408,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25809,6 +25887,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26275,6 +26354,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26736,6 +26816,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27198,6 +27279,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27671,6 +27753,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28138,6 +28221,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28605,6 +28689,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29081,6 +29166,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29559,6 +29645,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30025,6 +30112,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30486,6 +30574,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30948,6 +31037,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31421,6 +31511,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31888,6 +31979,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32355,6 +32447,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32831,6 +32924,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33309,6 +33403,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33775,6 +33870,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34236,6 +34332,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34698,6 +34795,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35171,6 +35269,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35638,6 +35737,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36105,6 +36205,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36581,6 +36682,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37059,6 +37161,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37525,6 +37628,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37990,6 +38094,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -38453,6 +38558,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -38927,6 +39033,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39395,6 +39502,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39863,6 +39971,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -40340,6 +40449,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -40819,6 +40929,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -41286,6 +41397,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -41748,6 +41860,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -42211,6 +42324,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -42685,6 +42799,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -43153,6 +43268,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -43621,6 +43737,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -44098,6 +44215,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -44577,6 +44695,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45044,6 +45163,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45505,6 +45625,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45967,6 +46088,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -46440,6 +46562,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -46907,6 +47030,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -47374,6 +47498,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -47850,6 +47975,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -48328,6 +48454,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -48794,6 +48921,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -49255,6 +49383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -49717,6 +49846,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -50190,6 +50320,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -50657,6 +50788,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -51124,6 +51256,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -51600,6 +51733,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -52078,6 +52212,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -52544,6 +52679,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53006,6 +53142,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53469,6 +53606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53943,6 +54081,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -54411,6 +54550,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -54879,6 +55019,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -55356,6 +55497,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -55835,6 +55977,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -56302,6 +56445,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -56764,6 +56908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -57224,6 +57369,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -57695,6 +57841,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58160,6 +58307,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58625,6 +58773,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -59099,6 +59248,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -59575,6 +59725,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60039,6 +60190,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60498,6 +60650,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60958,6 +61111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -61429,6 +61583,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -61894,6 +62049,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -62359,6 +62515,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -62833,6 +62990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -63309,6 +63467,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -63773,6 +63932,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -64552,6 +64712,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65012,6 +65173,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65483,6 +65645,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65948,6 +66111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -66413,6 +66577,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -66887,6 +67052,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -67363,6 +67529,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -67827,6 +67994,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68291,6 +68459,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68753,6 +68922,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -69226,6 +69396,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -69693,6 +69864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -70160,6 +70332,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -70636,6 +70809,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -71114,6 +71288,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -71580,6 +71755,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72041,6 +72217,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72503,6 +72680,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72976,6 +73154,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -73443,6 +73622,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -73910,6 +74090,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -74386,6 +74567,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -74864,6 +75046,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -75330,6 +75513,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -75791,6 +75975,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -76253,6 +76438,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -76726,6 +76912,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -77193,6 +77380,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -77660,6 +77848,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -78136,6 +78325,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -78614,6 +78804,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -79080,6 +79271,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -79541,6 +79733,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80003,6 +80196,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80476,6 +80670,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80943,6 +81138,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -81410,6 +81606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -81886,6 +82083,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -82364,6 +82562,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -82830,6 +83029,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -83291,6 +83491,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -83753,6 +83954,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -84226,6 +84428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -84693,6 +84896,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -85160,6 +85364,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -85636,6 +85841,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -86114,6 +86320,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -86580,6 +86787,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87039,6 +87247,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87493,6 +87702,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87955,6 +88165,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -88415,6 +88626,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -88873,6 +89085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -89335,6 +89548,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -89797,6 +90011,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -90255,6 +90470,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_alter4_myisam.result b/mysql-test/suite/parts/r/partition_alter4_myisam.result
index 41723a268e1..90edd1b2210 100644
--- a/mysql-test/suite/parts/r/partition_alter4_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter4_myisam.result
@@ -60,6 +60,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -493,6 +494,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -528,6 +530,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -970,6 +973,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1013,6 +1017,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -1464,6 +1469,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1505,6 +1511,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -1948,6 +1955,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1987,6 +1995,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -2436,6 +2445,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2479,6 +2489,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -2933,6 +2944,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2976,6 +2988,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -3432,6 +3445,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3471,6 +3485,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -3921,6 +3936,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3958,6 +3974,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -4391,6 +4408,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4426,6 +4444,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -4868,6 +4887,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4911,6 +4931,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -5362,6 +5383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5403,6 +5425,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -5846,6 +5869,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5885,6 +5909,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -6334,6 +6359,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6377,6 +6403,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -6831,6 +6858,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6874,6 +6902,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -7330,6 +7359,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7369,6 +7399,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -7819,6 +7850,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8289,6 +8321,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8766,6 +8799,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9260,6 +9294,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9744,6 +9779,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10232,6 +10268,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10729,6 +10766,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11228,6 +11266,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11717,6 +11756,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12187,6 +12227,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12664,6 +12705,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13158,6 +13200,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13642,6 +13685,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14130,6 +14174,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14627,6 +14672,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15126,6 +15172,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15615,6 +15662,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15652,6 +15700,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -16085,6 +16134,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16120,6 +16170,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -16562,6 +16613,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16605,6 +16657,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -17056,6 +17109,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17097,6 +17151,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -17540,6 +17595,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17579,6 +17635,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -18028,6 +18085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18071,6 +18129,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -18525,6 +18584,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18568,6 +18628,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -19024,6 +19085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19063,6 +19125,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -19513,6 +19576,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19986,6 +20050,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20463,6 +20528,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20957,6 +21023,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21441,6 +21508,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21929,6 +21997,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22426,6 +22495,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22925,6 +22995,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23414,6 +23485,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23884,6 +23956,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24361,6 +24434,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24855,6 +24929,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25339,6 +25414,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25827,6 +25903,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26324,6 +26401,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26823,6 +26901,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27312,6 +27391,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27782,6 +27862,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28259,6 +28340,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28753,6 +28835,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29237,6 +29320,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29725,6 +29809,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30222,6 +30307,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30721,6 +30807,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31210,6 +31297,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31680,6 +31768,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32157,6 +32246,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32651,6 +32741,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33135,6 +33226,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33623,6 +33715,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34120,6 +34213,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34619,6 +34713,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35108,6 +35203,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35578,6 +35674,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36055,6 +36152,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36549,6 +36647,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37033,6 +37132,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37521,6 +37621,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -38018,6 +38119,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -38517,6 +38619,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39006,6 +39109,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39479,6 +39583,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39956,6 +40061,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -40450,6 +40556,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -40934,6 +41041,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -41422,6 +41530,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -41919,6 +42028,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -42418,6 +42528,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -42907,6 +43018,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -43377,6 +43489,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -43854,6 +43967,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -44348,6 +44462,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -44832,6 +44947,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45320,6 +45436,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45817,6 +45934,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -46316,6 +46434,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -46805,6 +46924,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -47275,6 +47395,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -47752,6 +47873,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -48246,6 +48368,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -48730,6 +48853,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -49218,6 +49342,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -49715,6 +49840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -50214,6 +50340,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -50703,6 +50830,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -51173,6 +51301,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -51650,6 +51779,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -52144,6 +52274,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -52628,6 +52759,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53116,6 +53248,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53613,6 +53746,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -54112,6 +54246,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -54601,6 +54736,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -55071,6 +55207,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -55548,6 +55685,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -56042,6 +56180,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -56526,6 +56665,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -57014,6 +57154,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -57511,6 +57652,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58010,6 +58152,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58499,6 +58642,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58970,6 +59114,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -59445,6 +59590,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -59937,6 +60083,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60419,6 +60566,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60905,6 +61053,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -61400,6 +61549,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -61897,6 +62047,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -62384,6 +62535,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -62852,6 +63004,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -63327,6 +63480,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -63819,6 +63973,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -64301,6 +64456,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -64787,6 +64943,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65282,6 +65439,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65779,6 +65937,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -66266,6 +66425,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -67054,6 +67214,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -67529,6 +67690,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68021,6 +68183,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68503,6 +68666,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68989,6 +69153,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -69484,6 +69649,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -69981,6 +70147,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -70468,6 +70635,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -70941,6 +71109,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -71418,6 +71587,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -71912,6 +72082,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72396,6 +72567,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72884,6 +73056,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -73381,6 +73554,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -73880,6 +74054,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -74369,6 +74544,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -74839,6 +75015,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -75316,6 +75493,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -75810,6 +75988,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -76294,6 +76473,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -76782,6 +76962,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -77279,6 +77460,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -77778,6 +77960,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -78267,6 +78450,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -78737,6 +78921,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -79214,6 +79399,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -79708,6 +79894,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80192,6 +80379,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80680,6 +80868,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -81177,6 +81366,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -81676,6 +81866,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -82165,6 +82356,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -82635,6 +82827,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -83112,6 +83305,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -83606,6 +83800,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -84090,6 +84285,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -84578,6 +84774,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -85075,6 +85272,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -85574,6 +85772,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -86063,6 +86262,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -86533,6 +86733,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87010,6 +87211,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87504,6 +87706,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87988,6 +88191,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -88476,6 +88680,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -88973,6 +89178,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -89472,6 +89678,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -89961,6 +90168,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -90426,6 +90634,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -90886,6 +91095,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -91354,6 +91564,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -91820,6 +92031,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -92284,6 +92496,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -92752,6 +92965,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -93220,6 +93434,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -93684,6 +93899,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_basic_innodb.result b/mysql-test/suite/parts/r/partition_basic_innodb.result
index 570a30bcd81..5ea1f98ddcb 100644
--- a/mysql-test/suite/parts/r/partition_basic_innodb.result
+++ b/mysql-test/suite/parts/r/partition_basic_innodb.result
@@ -486,6 +486,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -946,6 +947,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1424,6 +1426,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1894,6 +1897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2366,6 +2370,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2847,6 +2852,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3334,6 +3340,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3806,6 +3813,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4265,6 +4273,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4725,6 +4734,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5203,6 +5213,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5673,6 +5684,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6143,6 +6155,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6624,6 +6637,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7107,6 +7121,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7579,6 +7594,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8079,6 +8095,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8576,6 +8593,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9091,6 +9109,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9598,6 +9617,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10107,6 +10127,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10625,6 +10646,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11149,6 +11171,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11658,6 +11681,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12153,6 +12177,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12650,6 +12675,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13165,6 +13191,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13672,6 +13699,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14181,6 +14209,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14699,6 +14728,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15223,6 +15253,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15732,6 +15763,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16243,6 +16275,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16756,6 +16789,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17287,6 +17321,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17810,6 +17845,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18335,6 +18371,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18869,6 +18906,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19409,6 +19447,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19934,6 +19973,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20434,6 +20474,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20931,6 +20972,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21446,6 +21488,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21953,6 +21996,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22460,6 +22504,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22978,6 +23023,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23498,6 +23544,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24007,6 +24054,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24502,6 +24550,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24999,6 +25048,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25514,6 +25564,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26021,6 +26072,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26528,6 +26580,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27046,6 +27099,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27566,6 +27620,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28075,6 +28130,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28586,6 +28642,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29099,6 +29156,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29630,6 +29688,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30153,6 +30212,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30676,6 +30736,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31210,6 +31271,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31746,6 +31808,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32271,6 +32334,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_basic_myisam.result b/mysql-test/suite/parts/r/partition_basic_myisam.result
index 3b84c0295f2..06bf043b382 100644
--- a/mysql-test/suite/parts/r/partition_basic_myisam.result
+++ b/mysql-test/suite/parts/r/partition_basic_myisam.result
@@ -490,6 +490,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -956,6 +957,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1443,6 +1445,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1920,6 +1923,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2401,6 +2405,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2891,6 +2896,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3387,6 +3393,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3869,6 +3876,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4331,6 +4339,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4797,6 +4806,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5284,6 +5294,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5761,6 +5772,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6240,6 +6252,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6730,6 +6743,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7222,6 +7236,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7704,6 +7719,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8223,6 +8239,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8742,6 +8759,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9282,6 +9300,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9812,6 +9831,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10346,6 +10366,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10889,6 +10910,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11438,6 +11460,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11973,6 +11996,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12492,6 +12516,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13011,6 +13036,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13551,6 +13577,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14081,6 +14108,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14613,6 +14641,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15156,6 +15185,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15701,6 +15731,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16236,6 +16267,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result b/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result
index 5e59c8f08ba..df141a93033 100644
--- a/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result
+++ b/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result
@@ -505,6 +505,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1000,6 +1001,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1511,6 +1513,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2012,6 +2015,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2517,6 +2521,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3027,6 +3032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3559,6 +3565,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4065,6 +4072,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4538,6 +4546,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5033,6 +5042,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5544,6 +5554,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6045,6 +6056,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6548,6 +6560,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7058,6 +7071,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7590,6 +7604,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8096,6 +8111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8626,6 +8642,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9174,6 +9191,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9738,6 +9756,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10292,6 +10311,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10850,6 +10870,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11413,6 +11434,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11998,6 +12020,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12557,6 +12580,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13087,6 +13111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13635,6 +13660,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14199,6 +14225,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14753,6 +14780,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15309,6 +15337,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15872,6 +15901,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16457,6 +16487,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17016,6 +17047,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17540,6 +17572,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18078,6 +18111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18635,6 +18669,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19177,6 +19212,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19723,6 +19759,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20279,6 +20316,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20830,6 +20868,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_engine_innodb.result b/mysql-test/suite/parts/r/partition_engine_innodb.result
index 002fe90c78a..38221951ae8 100644
--- a/mysql-test/suite/parts/r/partition_engine_innodb.result
+++ b/mysql-test/suite/parts/r/partition_engine_innodb.result
@@ -475,6 +475,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -933,6 +934,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1395,6 +1397,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1920,6 +1923,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2421,6 +2425,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2883,6 +2888,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3342,6 +3348,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3804,6 +3811,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4269,6 +4277,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4723,6 +4732,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5178,6 +5188,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_engine_myisam.result b/mysql-test/suite/parts/r/partition_engine_myisam.result
index f940b250c19..683729fa2f3 100644
--- a/mysql-test/suite/parts/r/partition_engine_myisam.result
+++ b/mysql-test/suite/parts/r/partition_engine_myisam.result
@@ -485,6 +485,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -952,6 +953,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1427,6 +1429,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1965,6 +1968,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2479,6 +2483,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2954,6 +2959,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3422,6 +3428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3897,6 +3904,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4375,6 +4383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4836,6 +4845,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5300,6 +5310,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result b/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result
index 88469a339ad..023195e9760 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result
@@ -1020,6 +1020,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result b/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result
index 585bd24dca6..0b5f96696b0 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result
@@ -1020,6 +1020,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze note The storage engine for the table doesn't support analyze
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result b/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result
index 59181388bd0..8461d4136e0 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result
@@ -1020,6 +1020,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result b/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result
index c4d63f663e1..4c541d9467e 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result b/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result
index d29dfd343a6..8f934506ee2 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze note The storage engine for the table doesn't support analyze
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result b/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result
index f26b975b75e..47e07ecc3a6 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc2_innodb.result b/mysql-test/suite/parts/r/partition_mgm_lc2_innodb.result
index a1dad1b34b3..6cd4c4991b7 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc2_innodb.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc2_memory.result b/mysql-test/suite/parts/r/partition_mgm_lc2_memory.result
index 88f59da8f13..d1b7a599923 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc2_memory.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc2_memory.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze note The storage engine for the table doesn't support analyze
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc2_myisam.result b/mysql-test/suite/parts/r/partition_mgm_lc2_myisam.result
index a498d4667b1..ad83e95929a 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc2_myisam.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/mysql-test/suite/parts/r/partition_recover_myisam.result b/mysql-test/suite/parts/r/partition_recover_myisam.result
index 4b9e3f5c283..151ff802a82 100644
--- a/mysql-test/suite/parts/r/partition_recover_myisam.result
+++ b/mysql-test/suite/parts/r/partition_recover_myisam.result
@@ -18,10 +18,10 @@ a
11
Warnings:
Error 145 Table 't1_will_crash' is marked as crashed and should be repaired
-Error 1034 1 client is using or hasn't closed the table properly
+Warning 1034 1 client is using or hasn't closed the table properly
Error 1034 Size of indexfile is: 1024 Should be: 2048
-Error 1034 Size of datafile is: 77 Should be: 7
-Error 1034 Number of rows changed from 1 to 11
+Warning 1034 Size of datafile is: 77 Should be: 7
+Warning 1034 Number of rows changed from 1 to 11
DROP TABLE t1_will_crash;
CREATE TABLE t1_will_crash (a INT, KEY (a))
ENGINE=MyISAM
@@ -47,8 +47,8 @@ a
11
Warnings:
Error 145 Table 't1_will_crash#P#p1' is marked as crashed and should be repaired
-Error 1034 1 client is using or hasn't closed the table properly
+Warning 1034 1 client is using or hasn't closed the table properly
Error 1034 Size of indexfile is: 1024 Should be: 2048
-Error 1034 Size of datafile is: 28 Should be: 7
-Error 1034 Number of rows changed from 1 to 4
+Warning 1034 Size of datafile is: 28 Should be: 7
+Warning 1034 Number of rows changed from 1 to 4
DROP TABLE t1_will_crash;
diff --git a/mysql-test/suite/parts/r/partition_repair_myisam.result b/mysql-test/suite/parts/r/partition_repair_myisam.result
index 6e99f1d3632..0521263df12 100644
--- a/mysql-test/suite/parts/r/partition_repair_myisam.result
+++ b/mysql-test/suite/parts/r/partition_repair_myisam.result
@@ -322,7 +322,8 @@ FLUSH TABLES;
# replacing p6 with a crashed MYD file (1) (splitted dynamic record)
ANALYZE TABLE t1_will_crash;
Table Op Msg_type Msg_text
-test.t1_will_crash analyze status OK
+test.t1_will_crash analyze Warning Engine-independent statistics are not collected for column 'c'
+test.t1_will_crash analyze status Operation failed
OPTIMIZE TABLE t1_will_crash;
Table Op Msg_type Msg_text
test.t1_will_crash optimize info Found row block followed by deleted block
diff --git a/mysql-test/suite/parts/r/rpl_partition.result b/mysql-test/suite/parts/r/rpl_partition.result
index 480ab219c34..dcd45ce4fe1 100644
--- a/mysql-test/suite/parts/r/rpl_partition.result
+++ b/mysql-test/suite/parts/r/rpl_partition.result
@@ -65,6 +65,10 @@ DELETE FROM t1 WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE p2()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -86,6 +90,10 @@ DELETE FROM t2 WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE p3()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -107,6 +115,10 @@ DELETE FROM t3 WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
begin;
CALL p1();
commit;
diff --git a/mysql-test/suite/perfschema/r/bad_option_1.result b/mysql-test/suite/perfschema/r/bad_option_1.result
index 02fd04afd33..05ff5663882 100644
--- a/mysql-test/suite/perfschema/r/bad_option_1.result
+++ b/mysql-test/suite/perfschema/r/bad_option_1.result
@@ -1,2 +1,4 @@
+# Kill the server
Found: unknown variable 'performance-schema-enabled=maybe'
Found: Aborting
+# restart
diff --git a/mysql-test/suite/perfschema/r/bad_option_2.result b/mysql-test/suite/perfschema/r/bad_option_2.result
index 7a5ab31aabd..c2809525827 100644
--- a/mysql-test/suite/perfschema/r/bad_option_2.result
+++ b/mysql-test/suite/perfschema/r/bad_option_2.result
@@ -1 +1,3 @@
+# Kill the server
FOUND 1 /ambiguous option '--performance-schema-max_=12'/ in bad_option_2.txt
+# restart
diff --git a/mysql-test/suite/perfschema/r/bad_option_3.result b/mysql-test/suite/perfschema/r/bad_option_3.result
index ec717d6f7d2..1981f0b2044 100644
--- a/mysql-test/suite/perfschema/r/bad_option_3.result
+++ b/mysql-test/suite/perfschema/r/bad_option_3.result
@@ -1,2 +1,4 @@
+# Kill the server
Found: unknown option '-x'
Found: Aborting
+# restart
diff --git a/mysql-test/suite/perfschema/r/bad_option_4.result b/mysql-test/suite/perfschema/r/bad_option_4.result
index 812ee546200..1d50a80be3d 100644
--- a/mysql-test/suite/perfschema/r/bad_option_4.result
+++ b/mysql-test/suite/perfschema/r/bad_option_4.result
@@ -1,2 +1,4 @@
+# Kill the server
Found: Can't change dir to.*bad_option_h_param
Found: Aborting
+# restart
diff --git a/mysql-test/suite/perfschema/r/bad_option_5.result b/mysql-test/suite/perfschema/r/bad_option_5.result
index b318b6e0482..840717023ac 100644
--- a/mysql-test/suite/perfschema/r/bad_option_5.result
+++ b/mysql-test/suite/perfschema/r/bad_option_5.result
@@ -1,2 +1,4 @@
+# Kill the server
Found: unknown option '-X'
Found: Aborting
+# restart
diff --git a/mysql-test/suite/perfschema/r/dml_handler.result b/mysql-test/suite/perfschema/r/dml_handler.result
index ab850aee933..ce0f87e7733 100644
--- a/mysql-test/suite/perfschema/r/dml_handler.result
+++ b/mysql-test/suite/perfschema/r/dml_handler.result
@@ -6,6 +6,8 @@ SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA='performance_schema'
ORDER BY TABLE_NAME;
SELECT COUNT(*) FROM table_list INTO @table_count;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
# For each table in the performance schema, attempt HANDLER...OPEN,
# which should fail with an error 1031, ER_ILLEGAL_HA.
diff --git a/mysql-test/suite/perfschema/r/dml_setup_instruments.result b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
index a5184782af8..a972cf02285 100644
--- a/mysql-test/suite/perfschema/r/dml_setup_instruments.result
+++ b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
@@ -21,16 +21,16 @@ where name like 'Wait/Synch/Rwlock/sql/%'
'wait/synch/rwlock/sql/LOCK_named_pipe_full_access_group')
order by name limit 10;
NAME ENABLED TIMED
+wait/synch/rwlock/sql/LOCK_all_status_vars YES YES
wait/synch/rwlock/sql/LOCK_dboptions YES YES
wait/synch/rwlock/sql/LOCK_grant YES YES
wait/synch/rwlock/sql/LOCK_SEQUENCE YES YES
+wait/synch/rwlock/sql/LOCK_ssl_refresh YES YES
wait/synch/rwlock/sql/LOCK_system_variables_hash YES YES
wait/synch/rwlock/sql/LOCK_sys_init_connect YES YES
wait/synch/rwlock/sql/LOCK_sys_init_slave YES YES
wait/synch/rwlock/sql/LOGGER::LOCK_logger YES YES
wait/synch/rwlock/sql/MDL_context::LOCK_waiting_for YES YES
-wait/synch/rwlock/sql/MDL_lock::rwlock YES YES
-wait/synch/rwlock/sql/Query_cache_query::lock YES YES
select * from performance_schema.setup_instruments
where name like 'Wait/Synch/Cond/sql/%'
and name not in (
diff --git a/mysql-test/suite/perfschema/r/event_aggregate.result b/mysql-test/suite/perfschema/r/event_aggregate.result
index 9ab62329fc9..7fa08534bb1 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate.result
@@ -235,38 +235,38 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -363,10 +363,10 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 0
user2 localhost stage/sql/Closing tables 0
user2 localhost stage/sql/Init 0
@@ -375,10 +375,10 @@ user2 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 0
user2 stage/sql/Closing tables 0
user2 stage/sql/Init 0
@@ -387,24 +387,24 @@ user2 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -518,10 +518,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -530,10 +530,10 @@ user2 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -542,24 +542,24 @@ user2 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -677,10 +677,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -694,10 +694,10 @@ user3 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -711,24 +711,24 @@ user3 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -863,10 +863,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -880,10 +880,10 @@ user3 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -897,24 +897,24 @@ user3 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1053,10 +1053,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1075,10 +1075,10 @@ user4 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1097,24 +1097,24 @@ user4 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1270,10 +1270,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1292,10 +1292,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1314,24 +1314,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 20
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1478,10 +1478,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1500,10 +1500,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1522,24 +1522,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 21
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1684,10 +1684,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1706,10 +1706,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1728,24 +1728,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 22
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1887,10 +1887,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1909,10 +1909,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1931,24 +1931,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 23
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2087,10 +2087,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2109,10 +2109,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2131,24 +2131,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2288,10 +2288,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2310,10 +2310,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2332,24 +2332,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2488,10 +2488,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2510,10 +2510,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2532,24 +2532,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2688,10 +2688,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2710,10 +2710,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2732,24 +2732,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2888,10 +2888,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2910,10 +2910,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2932,24 +2932,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3088,10 +3088,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -3110,10 +3110,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -3132,24 +3132,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3288,10 +3288,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -3310,10 +3310,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -3332,24 +3332,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3510,10 +3510,10 @@ user4 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -3532,24 +3532,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3732,24 +3732,24 @@ user4 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3939,17 +3939,17 @@ localhost stage/sql/Opening tables 0
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4146,10 +4146,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4346,10 +4346,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4546,10 +4546,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4746,10 +4746,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4946,10 +4946,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -5146,10 +5146,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -5310,10 +5310,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -5418,10 +5418,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -5506,10 +5506,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a.result
index ece0402335d..19dec52aa47 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_a.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a.result
@@ -217,31 +217,31 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -326,10 +326,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 0
user2 stage/sql/Closing tables 0
user2 stage/sql/Init 0
@@ -338,24 +338,24 @@ user2 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -452,10 +452,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -464,24 +464,24 @@ user2 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -578,10 +578,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -595,24 +595,24 @@ user3 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -721,10 +721,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -738,24 +738,24 @@ user3 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -864,10 +864,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -886,24 +886,24 @@ user4 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1024,10 +1024,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1046,24 +1046,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 20
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1175,10 +1175,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1197,24 +1197,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 21
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1324,10 +1324,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1346,24 +1346,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 22
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1470,10 +1470,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1492,24 +1492,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 23
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1613,10 +1613,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1635,24 +1635,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1757,10 +1757,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1779,24 +1779,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1900,10 +1900,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1922,24 +1922,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2043,10 +2043,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2065,24 +2065,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2186,10 +2186,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2208,24 +2208,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2329,10 +2329,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2351,24 +2351,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2472,10 +2472,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2494,24 +2494,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2615,10 +2615,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2637,24 +2637,24 @@ user4 stage/sql/Opening tables 3
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2780,24 +2780,24 @@ user4 stage/sql/Opening tables 0
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2930,17 +2930,17 @@ localhost stage/sql/Opening tables 0
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3080,10 +3080,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3223,10 +3223,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3366,10 +3366,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3509,10 +3509,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3652,10 +3652,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3795,10 +3795,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3938,10 +3938,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -4045,10 +4045,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -4132,10 +4132,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result
index 083e1bffdd2..3aa27664cac 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result
@@ -185,26 +185,26 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
execute dump_stages_host;
host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -280,10 +280,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 0
user2 stage/sql/Closing tables 0
user2 stage/sql/Init 0
@@ -294,17 +294,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -392,10 +392,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -406,17 +406,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -504,10 +504,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -523,17 +523,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -633,10 +633,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -652,17 +652,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -762,10 +762,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -786,17 +786,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -908,10 +908,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -932,17 +932,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1045,10 +1045,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1069,17 +1069,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1180,10 +1180,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1204,17 +1204,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1312,10 +1312,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1336,17 +1336,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1441,10 +1441,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1465,17 +1465,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1571,10 +1571,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1595,17 +1595,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1700,10 +1700,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1724,17 +1724,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1829,10 +1829,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1853,17 +1853,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1958,10 +1958,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1982,17 +1982,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2087,10 +2087,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2111,17 +2111,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2216,10 +2216,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2240,17 +2240,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2345,10 +2345,10 @@ user host event_name count_star
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2369,17 +2369,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2498,17 +2498,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2627,17 +2627,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2763,10 +2763,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2892,10 +2892,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3021,10 +3021,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3150,10 +3150,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3279,10 +3279,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3408,10 +3408,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3537,10 +3537,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3630,10 +3630,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3703,10 +3703,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result
index c9b2b768691..4d4a842948f 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result
@@ -199,24 +199,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -289,24 +289,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -386,24 +386,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -479,24 +479,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -579,24 +579,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -675,24 +675,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -778,24 +778,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 20
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -872,24 +872,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 21
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -964,24 +964,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 22
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1053,24 +1053,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 23
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1139,24 +1139,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1226,24 +1226,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1312,24 +1312,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1398,24 +1398,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1484,24 +1484,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1570,24 +1570,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1656,24 +1656,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1742,24 +1742,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1828,24 +1828,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1921,17 +1921,17 @@ localhost stage/sql/Opening tables 0
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2014,10 +2014,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2100,10 +2100,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2186,10 +2186,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2272,10 +2272,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2358,10 +2358,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2444,10 +2444,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2530,10 +2530,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2616,10 +2616,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2702,10 +2702,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result
index 89443515f31..db281652c0c 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result
@@ -169,17 +169,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -245,17 +245,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -328,17 +328,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -407,17 +407,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -493,17 +493,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -575,17 +575,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -664,17 +664,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -744,17 +744,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -822,17 +822,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -897,17 +897,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -969,17 +969,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1042,17 +1042,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1114,17 +1114,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1186,17 +1186,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1258,17 +1258,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1330,17 +1330,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1402,17 +1402,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1474,17 +1474,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1546,17 +1546,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1618,17 +1618,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1697,10 +1697,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1769,10 +1769,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1841,10 +1841,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1913,10 +1913,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -1985,10 +1985,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2057,10 +2057,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2129,10 +2129,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2201,10 +2201,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -2273,10 +2273,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_h.result
index 0ea5d90ea41..c8996fa2846 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_h.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_h.result
@@ -203,33 +203,33 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
execute dump_stages_host;
host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -317,10 +317,10 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 0
user2 localhost stage/sql/Closing tables 0
user2 localhost stage/sql/Init 0
@@ -329,10 +329,10 @@ user2 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 0
user2 stage/sql/Closing tables 0
user2 stage/sql/Init 0
@@ -343,17 +343,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -458,10 +458,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -470,10 +470,10 @@ user2 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -484,17 +484,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -603,10 +603,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -620,10 +620,10 @@ user3 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -639,17 +639,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -775,10 +775,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -792,10 +792,10 @@ user3 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -811,17 +811,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -951,10 +951,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -973,10 +973,10 @@ user4 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -997,17 +997,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1154,10 +1154,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1176,10 +1176,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 5
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1200,17 +1200,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1348,10 +1348,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1370,10 +1370,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 5
@@ -1394,17 +1394,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1540,10 +1540,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1562,10 +1562,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1586,17 +1586,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1729,10 +1729,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1751,10 +1751,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1775,17 +1775,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1915,10 +1915,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1937,10 +1937,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -1961,17 +1961,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2102,10 +2102,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2124,10 +2124,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2148,17 +2148,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2288,10 +2288,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2310,10 +2310,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2334,17 +2334,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2474,10 +2474,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2496,10 +2496,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2520,17 +2520,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2660,10 +2660,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2682,10 +2682,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2706,17 +2706,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2846,10 +2846,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2868,10 +2868,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -2892,17 +2892,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3032,10 +3032,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -3054,10 +3054,10 @@ user4 localhost stage/sql/Opening tables 3
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -3078,17 +3078,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3240,10 +3240,10 @@ user4 localhost stage/sql/Opening tables 0
execute dump_stages_user;
user event_name count_star
user1 stage/sql/Checking permissions 3
-user1 stage/sql/Closing tables 4
+user1 stage/sql/Closing tables 6
user1 stage/sql/Init 6
user1 stage/sql/Init for update 1
-user1 stage/sql/Opening tables 3
+user1 stage/sql/Opening tables 4
user2 stage/sql/Checking permissions 3
user2 stage/sql/Closing tables 4
user2 stage/sql/Init 6
@@ -3264,17 +3264,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3450,17 +3450,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3636,17 +3636,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3829,10 +3829,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4015,10 +4015,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4201,10 +4201,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4387,10 +4387,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4573,10 +4573,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4759,10 +4759,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -4909,10 +4909,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -5003,10 +5003,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -5077,10 +5077,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_u.result b/mysql-test/suite/perfschema/r/event_aggregate_no_u.result
index 52c674024bd..508a65d9ba0 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_u.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_u.result
@@ -215,33 +215,33 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
execute dump_stages_user;
user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -324,10 +324,10 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 0
user2 localhost stage/sql/Closing tables 0
user2 localhost stage/sql/Init 0
@@ -338,24 +338,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 3
-localhost stage/sql/Closing tables 4
+localhost stage/sql/Closing tables 6
localhost stage/sql/Init 5
localhost stage/sql/Init for update 1
-localhost stage/sql/Opening tables 3
+localhost stage/sql/Opening tables 4
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -450,10 +450,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -464,24 +464,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -576,10 +576,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -595,24 +595,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 6
-localhost stage/sql/Closing tables 8
+localhost stage/sql/Closing tables 10
localhost stage/sql/Init 10
localhost stage/sql/Init for update 2
-localhost stage/sql/Opening tables 6
+localhost stage/sql/Opening tables 7
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -719,10 +719,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -738,24 +738,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -862,10 +862,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -886,24 +886,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 9
-localhost stage/sql/Closing tables 12
+localhost stage/sql/Closing tables 14
localhost stage/sql/Init 15
localhost stage/sql/Init for update 3
-localhost stage/sql/Opening tables 9
+localhost stage/sql/Opening tables 10
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1022,10 +1022,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1046,24 +1046,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 20
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1173,10 +1173,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1197,24 +1197,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 21
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1322,10 +1322,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1346,24 +1346,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 22
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1468,10 +1468,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1492,24 +1492,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 23
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1611,10 +1611,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1635,24 +1635,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1755,10 +1755,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1779,24 +1779,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1898,10 +1898,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1922,24 +1922,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2041,10 +2041,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2065,24 +2065,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2184,10 +2184,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2208,24 +2208,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2327,10 +2327,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2351,24 +2351,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2470,10 +2470,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2494,24 +2494,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2637,24 +2637,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2780,24 +2780,24 @@ user event_name count_star
execute dump_stages_host;
host event_name count_star
localhost stage/sql/Checking permissions 12
-localhost stage/sql/Closing tables 16
+localhost stage/sql/Closing tables 18
localhost stage/sql/Init 24
localhost stage/sql/Init for update 4
-localhost stage/sql/Opening tables 12
+localhost stage/sql/Opening tables 13
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2930,17 +2930,17 @@ localhost stage/sql/Opening tables 0
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3080,10 +3080,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3223,10 +3223,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3366,10 +3366,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3509,10 +3509,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3652,10 +3652,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3795,10 +3795,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3902,10 +3902,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3989,10 +3989,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -4076,10 +4076,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result
index a493631d71f..579fadc51c0 100644
--- a/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result
+++ b/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result
@@ -183,10 +183,10 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
execute dump_stages_user;
user event_name count_star
execute dump_stages_host;
@@ -194,17 +194,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -278,10 +278,10 @@ wait/synch/rwlock/sql/LOCK_grant 1
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 0
user2 localhost stage/sql/Closing tables 0
user2 localhost stage/sql/Init 0
@@ -294,17 +294,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 3
-stage/sql/Closing tables 4
+stage/sql/Closing tables 6
stage/sql/Init 5
stage/sql/Init for update 1
-stage/sql/Opening tables 3
+stage/sql/Opening tables 4
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -390,10 +390,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -406,17 +406,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -502,10 +502,10 @@ wait/synch/rwlock/sql/LOCK_grant 2
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -523,17 +523,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 6
-stage/sql/Closing tables 8
+stage/sql/Closing tables 10
stage/sql/Init 10
stage/sql/Init for update 2
-stage/sql/Opening tables 6
+stage/sql/Opening tables 7
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -631,10 +631,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -652,17 +652,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -760,10 +760,10 @@ wait/synch/rwlock/sql/LOCK_grant 3
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -786,17 +786,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 9
-stage/sql/Closing tables 12
+stage/sql/Closing tables 14
stage/sql/Init 15
stage/sql/Init for update 3
-stage/sql/Opening tables 9
+stage/sql/Opening tables 10
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -906,10 +906,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 5
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -932,17 +932,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 20
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1043,10 +1043,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 5
@@ -1069,17 +1069,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 21
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1178,10 +1178,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1204,17 +1204,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 22
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1310,10 +1310,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1336,17 +1336,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 23
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1439,10 +1439,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1465,17 +1465,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1569,10 +1569,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1595,17 +1595,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1698,10 +1698,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1724,17 +1724,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1827,10 +1827,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1853,17 +1853,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -1956,10 +1956,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -1982,17 +1982,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2085,10 +2085,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2111,17 +2111,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2214,10 +2214,10 @@ wait/synch/rwlock/sql/LOCK_grant 4
execute dump_stages_account;
user host event_name count_star
user1 localhost stage/sql/Checking permissions 3
-user1 localhost stage/sql/Closing tables 4
+user1 localhost stage/sql/Closing tables 6
user1 localhost stage/sql/Init 6
user1 localhost stage/sql/Init for update 1
-user1 localhost stage/sql/Opening tables 3
+user1 localhost stage/sql/Opening tables 4
user2 localhost stage/sql/Checking permissions 3
user2 localhost stage/sql/Closing tables 4
user2 localhost stage/sql/Init 6
@@ -2240,17 +2240,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2369,17 +2369,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2498,17 +2498,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2627,17 +2627,17 @@ host event_name count_star
execute dump_stages_global;
event_name count_star
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2763,10 +2763,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -2892,10 +2892,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3021,10 +3021,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3150,10 +3150,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3279,10 +3279,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3408,10 +3408,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
user1 localhost statement/com/Error 0
@@ -3501,10 +3501,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3574,10 +3574,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
@@ -3647,10 +3647,10 @@ stage/sql/Opening tables 0
execute dump_stages_history;
event_name count(event_name)
stage/sql/Checking permissions 12
-stage/sql/Closing tables 16
+stage/sql/Closing tables 18
stage/sql/Init 24
stage/sql/Init for update 4
-stage/sql/Opening tables 12
+stage/sql/Opening tables 13
execute dump_statements_account;
user host event_name count_star
execute dump_statements_user;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result
index a172dff7935..7963bed8213 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result
@@ -118,7 +118,7 @@ Con4 is alive
Con4 is alive
select current_user();
current_user()
-root@192.0.2.4
+root@santa.claus.ipv4.example.com
disconnect con4;
connection default;
"Dumping performance_schema.host_cache"
@@ -155,7 +155,7 @@ Con5 is alive
Con5 is alive
select current_user();
current_user()
-root@192.0.2.4
+root@santa.claus.ipv4.example.com
disconnect con5;
connection default;
"Dumping performance_schema.host_cache"
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result
index 4fdc6ef1b4c..baddc88b116 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result
@@ -118,7 +118,7 @@ Con4 is alive
Con4 is alive
select current_user();
current_user()
-root@2001:db8::6:6
+root@santa.claus.ipv6.example.com
disconnect con4;
connection default;
"Dumping performance_schema.host_cache"
@@ -155,7 +155,7 @@ Con5 is alive
Con5 is alive
select current_user();
current_user()
-root@2001:db8::6:6
+root@santa.claus.ipv6.example.com
disconnect con5;
connection default;
"Dumping performance_schema.host_cache"
diff --git a/mysql-test/suite/perfschema/r/ortho_iter.result b/mysql-test/suite/perfschema/r/ortho_iter.result
index 299551051c9..a1fb88e7059 100644
--- a/mysql-test/suite/perfschema/r/ortho_iter.result
+++ b/mysql-test/suite/perfschema/r/ortho_iter.result
@@ -95,6 +95,8 @@ close pfs_cursor;
signal sqlstate '01000' set message_text='Done', mysql_errno=12000;
end
$
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
show variables where
`Variable_name` != "performance_schema_max_statement_classes" and
`Variable_name` like "performance_schema%";
diff --git a/mysql-test/suite/perfschema/r/privilege_table_io.result b/mysql-test/suite/perfschema/r/privilege_table_io.result
index 5443c178502..2cdcb494d6a 100644
--- a/mysql-test/suite/perfschema/r/privilege_table_io.result
+++ b/mysql-test/suite/perfschema/r/privilege_table_io.result
@@ -1,3 +1,4 @@
+# restart
drop table if exists test.marker;
create table test.marker(a int);
update performance_schema.setup_consumers set enabled='NO';
@@ -8,7 +9,6 @@ truncate table performance_schema.events_waits_history_long;
flush status;
flush tables;
# We are forced to suppress here the server response.
-optimize table mysql.host;
optimize table mysql.user;
optimize table mysql.db;
optimize table mysql.proxies_priv;
@@ -96,12 +96,11 @@ where event_name like 'wait/io/table/%'
and object_schema in ("test", "mysql")
order by thread_id, event_id;
event_name short_source object_type object_schema pretty_name operation number_of_bytes
-wait/io/table/sql/handler handler.cc: TABLE mysql host fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE mysql user fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE mysql user fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE mysql user fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE mysql user fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE mysql user fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql global_priv fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql global_priv fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql global_priv fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql global_priv fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql global_priv fetch NULL
wait/io/table/sql/handler handler.cc: TABLE mysql db fetch NULL
wait/io/table/sql/handler handler.cc: TABLE mysql db fetch NULL
wait/io/table/sql/handler handler.cc: TABLE mysql db fetch NULL
@@ -112,6 +111,8 @@ wait/io/table/sql/handler handler.cc: TABLE mysql roles_mapping fetch NULL
wait/io/table/sql/handler handler.cc: TABLE mysql tables_priv fetch NULL
wait/io/table/sql/handler handler.cc: TABLE mysql procs_priv fetch NULL
wait/io/table/sql/handler handler.cc: TABLE mysql servers fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql table_stats fetch NULL
+wait/io/table/sql/handler handler.cc: TABLE mysql column_stats fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
diff --git a/mysql-test/suite/perfschema/r/selects.result b/mysql-test/suite/perfschema/r/selects.result
index c14d152856f..d623d45a6e8 100644
--- a/mysql-test/suite/perfschema/r/selects.result
+++ b/mysql-test/suite/perfschema/r/selects.result
@@ -93,6 +93,8 @@ SELECT thread_id FROM performance_schema.threads
WHERE PROCESSLIST_ID = conid INTO pid;
END;
|
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL t_ps_proc(connection_id(), @p_id);
DROP FUNCTION IF EXISTS t_ps_proc;
CREATE FUNCTION t_ps_func(conid INT) RETURNS int
diff --git a/mysql-test/suite/perfschema/r/server_init.result b/mysql-test/suite/perfschema/r/server_init.result
index 1bdb9884606..25f3f180735 100644
--- a/mysql-test/suite/perfschema/r/server_init.result
+++ b/mysql-test/suite/perfschema/r/server_init.result
@@ -68,10 +68,6 @@ where name like "wait/synch/mutex/sql/LOCK_crypt";
count(name)
1
select count(name) from mutex_instances
-where name like "wait/synch/mutex/sql/LOCK_slave_list";
-count(name)
-1
-select count(name) from mutex_instances
where name like "wait/synch/mutex/sql/LOCK_active_mi";
count(name)
1
diff --git a/mysql-test/suite/perfschema/r/setup_instruments_defaults.result b/mysql-test/suite/perfschema/r/setup_instruments_defaults.result
index f31bb1a8b7a..7384313917d 100644
--- a/mysql-test/suite/perfschema/r/setup_instruments_defaults.result
+++ b/mysql-test/suite/perfschema/r/setup_instruments_defaults.result
@@ -17,7 +17,6 @@ SELECT * FROM performance_schema.setup_instruments
WHERE name = 'wait/synch/mutex/sql/LOCK_thread_count'
AND enabled = 'no' AND timed = 'no';
NAME ENABLED TIMED
-wait/synch/mutex/sql/LOCK_thread_count NO NO
SELECT * FROM performance_schema.setup_instruments
WHERE name IN (
'wait/synch/mutex/sql/LOG_INFO::lock',
@@ -25,7 +24,6 @@ WHERE name IN (
AND enabled = 'yes' AND timed = 'yes'
ORDER BY name;
NAME ENABLED TIMED
-wait/synch/mutex/sql/LOG_INFO::lock YES YES
wait/synch/mutex/sql/THD::LOCK_thd_data YES YES
SELECT * FROM performance_schema.setup_instruments
WHERE name = 'wait/synch/mutex/sql/hash_filo::lock'
diff --git a/mysql-test/suite/perfschema/r/stage_mdl_global.result b/mysql-test/suite/perfschema/r/stage_mdl_global.result
index 7d15b250bd9..11531124cae 100644
--- a/mysql-test/suite/perfschema/r/stage_mdl_global.result
+++ b/mysql-test/suite/perfschema/r/stage_mdl_global.result
@@ -22,7 +22,7 @@ call dump_one_thread('user2');
username event_name sql_text
user2 statement/sql/insert insert into test.t1 values (1), (2), (3)
username event_name nesting_event_type
-user2 stage/sql/Waiting for global read lock STATEMENT
+user2 stage/sql/Waiting for backup lock STATEMENT
username event_name nesting_event_type
user2 stage/sql/Init STATEMENT
user2 stage/sql/Checking permissions STATEMENT
diff --git a/mysql-test/suite/perfschema/r/table_schema.result b/mysql-test/suite/perfschema/r/table_schema.result
index 5c4cf88e9a5..8caf2017fd2 100644
--- a/mysql-test/suite/perfschema/r/table_schema.result
+++ b/mysql-test/suite/perfschema/r/table_schema.result
@@ -799,6 +799,8 @@ def performance_schema users TOTAL_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 N
select count(*) from information_schema.columns
where table_schema="performance_schema" and data_type = "bigint"
and column_name like "%number_of_bytes" into @count_byte_columns;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @count_byte_columns > 0;
@count_byte_columns > 0
1
@@ -806,12 +808,16 @@ select count(*) from information_schema.columns
where table_schema="performance_schema" and data_type="bigint"
and column_name like "%number_of_bytes"
and column_type not like "%unsigned" into @count_byte_signed;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select (@count_byte_columns - @count_byte_signed) = 0;
(@count_byte_columns - @count_byte_signed) = 0
1
select count(*) from information_schema.columns
where table_schema="performance_schema" and data_type = "bigint"
and column_name like "%object_instance_begin" into @count_object_columns;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @count_object_columns > 0;
@count_object_columns > 0
1
@@ -819,6 +825,8 @@ select count(*) from information_schema.columns
where table_schema="performance_schema" and data_type="bigint"
and column_name like "%object_instance_begin"
and column_type like "%unsigned" into @count_object_unsigned;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select (@count_object_columns - @count_object_unsigned) = 0;
(@count_object_columns - @count_object_unsigned) = 0
1
diff --git a/mysql-test/suite/perfschema/t/bad_option_1.test b/mysql-test/suite/perfschema/t/bad_option_1.test
index 1c76dbdc512..bba0d6d5357 100644
--- a/mysql-test/suite/perfschema/t/bad_option_1.test
+++ b/mysql-test/suite/perfschema/t/bad_option_1.test
@@ -4,9 +4,13 @@
--source include/not_embedded.inc
--source include/have_perfschema.inc
+--source include/kill_mysqld.inc
+
let $outfile= $MYSQLTEST_VARDIR/tmp/bad_option_1.txt;
+--disable_warnings
--error 0,1
--remove_file $outfile
+--enable_warnings
--error 7
--exec $MYSQLD_BOOTSTRAP_CMD --loose-console --performance-schema-enabled=maybe > $outfile 2>&1
@@ -32,3 +36,4 @@ perl;
EOF
--remove_file $outfile
+--source include/start_mysqld.inc
diff --git a/mysql-test/suite/perfschema/t/bad_option_2.test b/mysql-test/suite/perfschema/t/bad_option_2.test
index ce3e5c3243b..d6c2b905167 100644
--- a/mysql-test/suite/perfschema/t/bad_option_2.test
+++ b/mysql-test/suite/perfschema/t/bad_option_2.test
@@ -4,9 +4,13 @@
--source include/not_embedded.inc
--source include/have_perfschema.inc
+--source include/kill_mysqld.inc
+
let $outfile= $MYSQLTEST_VARDIR/tmp/bad_option_2.txt;
+--disable_warnings
--error 0,1
--remove_file $outfile
+--enable_warnings
--error 3
--exec $MYSQLD_BOOTSTRAP_CMD --loose-console --enable-getopt-prefix-matching --performance-schema-max_=12 > $outfile 2>&1
--let SEARCH_PATTERN=ambiguous option '--performance-schema-max_=12'
@@ -14,3 +18,4 @@ let $outfile= $MYSQLTEST_VARDIR/tmp/bad_option_2.txt;
--source include/search_pattern_in_file.inc
--remove_file $outfile
+--source include/start_mysqld.inc
diff --git a/mysql-test/suite/perfschema/t/bad_option_3.test b/mysql-test/suite/perfschema/t/bad_option_3.test
index 63565df14a2..a4efcbccd74 100644
--- a/mysql-test/suite/perfschema/t/bad_option_3.test
+++ b/mysql-test/suite/perfschema/t/bad_option_3.test
@@ -4,9 +4,13 @@
--source include/not_embedded.inc
--source include/have_perfschema.inc
+--source include/kill_mysqld.inc
+
let $outfile= $MYSQLTEST_VARDIR/tmp/bad_option_3.txt;
+--disable_warnings
--error 0,1
--remove_file $outfile
+--enable_warnings
--error 2
--exec $MYSQLD_BOOTSTRAP_CMD --loose-console -a -x > $outfile 2>&1
@@ -32,3 +36,4 @@ perl;
EOF
--remove_file $outfile
+--source include/start_mysqld.inc
diff --git a/mysql-test/suite/perfschema/t/bad_option_4.test b/mysql-test/suite/perfschema/t/bad_option_4.test
index 27a32c201a9..ee8705788bc 100644
--- a/mysql-test/suite/perfschema/t/bad_option_4.test
+++ b/mysql-test/suite/perfschema/t/bad_option_4.test
@@ -4,9 +4,13 @@
--source include/not_embedded.inc
--source include/have_perfschema.inc
+--source include/kill_mysqld.inc
+
let $outfile= $MYSQLTEST_VARDIR/tmp/bad_option_4.txt;
+--disable_warnings
--error 0,1
--remove_file $outfile
+--enable_warnings
--error 1
--exec $MYSQLD_BOOTSTRAP_CMD --loose-console -a -h bad_option_h_param > $outfile 2>&1
@@ -32,3 +36,4 @@ perl;
EOF
--remove_file $outfile
+--source include/start_mysqld.inc
diff --git a/mysql-test/suite/perfschema/t/bad_option_5.test b/mysql-test/suite/perfschema/t/bad_option_5.test
index ada8228b249..c9c6fc75d52 100644
--- a/mysql-test/suite/perfschema/t/bad_option_5.test
+++ b/mysql-test/suite/perfschema/t/bad_option_5.test
@@ -4,9 +4,13 @@
--source include/not_embedded.inc
--source include/have_perfschema.inc
+--source include/kill_mysqld.inc
+
let $outfile= $MYSQLTEST_VARDIR/tmp/bad_option_5.txt;
+--disable_warnings
--error 0,1
--remove_file $outfile
+--enable_warnings
--error 2
--exec $MYSQLD_BOOTSTRAP_CMD --loose-console -aXbroken > $outfile 2>&1
@@ -35,3 +39,4 @@ perl;
EOF
--remove_file $outfile
+--source include/start_mysqld.inc
diff --git a/mysql-test/suite/perfschema/t/privilege_table_io.test b/mysql-test/suite/perfschema/t/privilege_table_io.test
index 6f729537072..a74805f5c7b 100644
--- a/mysql-test/suite/perfschema/t/privilege_table_io.test
+++ b/mysql-test/suite/perfschema/t/privilege_table_io.test
@@ -17,7 +17,6 @@
# Therefore we suppress the query_log here.
--echo # We are forced to suppress here the server response.
--disable_result_log
-optimize table mysql.host;
optimize table mysql.user;
optimize table mysql.db;
optimize table mysql.proxies_priv;
diff --git a/mysql-test/suite/perfschema/t/server_init.test b/mysql-test/suite/perfschema/t/server_init.test
index c6d25f18426..36e09adea78 100644
--- a/mysql-test/suite/perfschema/t/server_init.test
+++ b/mysql-test/suite/perfschema/t/server_init.test
@@ -71,9 +71,6 @@ select count(name) from mutex_instances
where name like "wait/synch/mutex/sql/LOCK_crypt";
select count(name) from mutex_instances
- where name like "wait/synch/mutex/sql/LOCK_slave_list";
-
-select count(name) from mutex_instances
where name like "wait/synch/mutex/sql/LOCK_active_mi";
select count(name) from mutex_instances
diff --git a/mysql-test/suite/perfschema/t/stage_mdl_global.test b/mysql-test/suite/perfschema/t/stage_mdl_global.test
index 8863d2da903..03c3d315899 100644
--- a/mysql-test/suite/perfschema/t/stage_mdl_global.test
+++ b/mysql-test/suite/perfschema/t/stage_mdl_global.test
@@ -9,7 +9,7 @@ flush tables with read lock;
connect (con2, localhost, user2, , );
-# Will wait on con1, "Waiting for global read lock"
+# Will wait on con1, "Waiting for backup lock"
--send
insert into test.t1 values (1), (2), (3);
@@ -26,7 +26,7 @@ let $wait_condition=
let $wait_condition=
select count(*) = 1 from performance_schema.threads
where `TYPE`='FOREGROUND' and PROCESSLIST_USER like 'user2'
- and PROCESSLIST_STATE = 'Waiting for global read lock';
+ and PROCESSLIST_STATE = 'Waiting for backup lock';
--source include/wait_condition.inc
call dump_one_thread('user1');
diff --git a/mysql-test/suite/period/create_triggers.inc b/mysql-test/suite/period/create_triggers.inc
new file mode 100644
index 00000000000..1126ae0845b
--- /dev/null
+++ b/mysql-test/suite/period/create_triggers.inc
@@ -0,0 +1,38 @@
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+
+create or replace procedure log(s text)
+ insert into log_tbl(log) values(s);
+
+if (!$trig_table)
+{
+ die "No $trig_table specified";
+}
+
+if (!$trig_cols)
+{
+ let $trig_cols= s, e;
+}
+
+let $old_trig_args= `select REGEXP_REPLACE('$trig_cols', '([[:word:]]+)',
+ 'old.\\\\\\\\1')`;
+let $old_trig_args= `select REPLACE('$old_trig_args', ',', ', ", ", ')`;
+let $new_trig_args= `select REGEXP_REPLACE('$trig_cols', '([[:word:]]+)',
+ 'new.\\\\\\\\1')`;
+let $new_trig_args= `select REPLACE('$new_trig_args', ',', ', ", ", ')`;
+
+eval create trigger tr1upd_$trig_table before update on $trig_table
+ for each row call log(CONCAT('>UPD: ', $old_trig_args, ' -> ', $new_trig_args));
+eval create trigger tr2upd_$trig_table after update on $trig_table
+ for each row call log(CONCAT('<UPD: ', $old_trig_args, ' -> ', $new_trig_args));
+eval create trigger tr1del_$trig_table before delete on $trig_table
+ for each row call log(CONCAT('>DEL: ', $old_trig_args));
+eval create trigger tr2del_$trig_table after delete on $trig_table
+ for each row call log(CONCAT('<DEL: ', $old_trig_args));
+eval create trigger tr1ins_$trig_table before insert on $trig_table
+ for each row call log(CONCAT('>INS: ', $new_trig_args));
+eval create trigger tr2ins_$trig_table after insert on $trig_table
+ for each row call log(CONCAT('<INS: ', $new_trig_args));
+
+
+let trig_cols= 0;
+let trig_table= 0;
diff --git a/mysql-test/suite/period/engines.combinations b/mysql-test/suite/period/engines.combinations
new file mode 100644
index 00000000000..b740ec62a42
--- /dev/null
+++ b/mysql-test/suite/period/engines.combinations
@@ -0,0 +1,6 @@
+[innodb]
+innodb
+default-storage-engine=innodb
+
+[myisam]
+default-storage-engine=myisam
diff --git a/mysql-test/suite/period/engines.inc b/mysql-test/suite/period/engines.inc
new file mode 100644
index 00000000000..9a52c7d0640
--- /dev/null
+++ b/mysql-test/suite/period/engines.inc
@@ -0,0 +1,3 @@
+#
+# see engines.combinations
+#
diff --git a/mysql-test/suite/period/r/alter.result b/mysql-test/suite/period/r/alter.result
new file mode 100644
index 00000000000..e202ba2698f
--- /dev/null
+++ b/mysql-test/suite/period/r/alter.result
@@ -0,0 +1,176 @@
+set @s= '1992-01-01';
+set @e= '1999-12-31';
+create table t (s date, e date);
+# period start/end columns are implicit NOT NULL
+alter table t add period for a(s, e);
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `a` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t (s date, e date);
+alter table t change s s date, add period for a(s, e);
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `a` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t add id int;
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ `id` int(11) DEFAULT NULL,
+ PERIOD FOR `a` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t drop id;
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `a` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert t values(@e, @s);
+ERROR 23000: CONSTRAINT `a` failed for `test`.`t`
+alter table t drop constraint a;
+ERROR HY000: Can't DROP CONSTRAINT `a`. Use DROP PERIOD `a` for this
+# no-op
+alter table t drop period if exists for b;
+Warnings:
+Note 1091 Can't DROP PERIOD `b`; check that it exists
+# no-op
+alter table t add period if not exists for a(e, s);
+Warnings:
+Note 1060 Duplicate column name 'a'
+alter table t drop period if exists for a;
+# no-op
+alter table t drop period if exists for a;
+Warnings:
+Note 1091 Can't DROP PERIOD `a`; check that it exists
+alter table t add period for a(s, e), add period if not exists for a(e, s);
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `a` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t drop period for a;
+# Constraint is dropped
+insert t values(@e, @s);
+alter table t drop period for a;
+ERROR 42000: Can't DROP PERIOD `a`; check that it exists
+alter table t add period for a(s, e), drop period for a;
+ERROR 42000: Can't DROP PERIOD `a`; check that it exists
+truncate t;
+alter table t add period for a(s, e);
+insert t values(@e, @s);
+ERROR 23000: CONSTRAINT `a` failed for `test`.`t`
+alter table t add period for a(s, e), drop period for a;
+insert t values(@e, @s);
+ERROR 23000: CONSTRAINT `a` failed for `test`.`t`
+alter table t add s1 date not null, add period for b(s1, e), drop period for a;
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ `s1` date NOT NULL,
+ PERIOD FOR `b` (`s1`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert t(s, s1, e) values(@e, @s, @e);
+insert t(s, s1, e) values(@e, @e, @s);
+ERROR 23000: CONSTRAINT `b` failed for `test`.`t`
+create table t1 like t;
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ `s1` date NOT NULL,
+ PERIOD FOR `b` (`s1`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+drop table t1;
+create table t2 (period for b(s,e)) select * from t;
+ERROR 23000: CONSTRAINT `b` failed for `test`.`t2`
+create table t2 (period for b(s1,e)) select * from t;
+drop table t2;
+# SQL16 11.27 <add table period definition>, Syntax Rules, 5)g)
+# The declared type of BC1 shall be either DATE or a timestamp type
+# and shall be equivalent to the declared type of BC2.
+create or replace table t (s timestamp not null, e timestamp(6) not null);
+alter table t add period for a(s, e);
+ERROR HY000: Fields of PERIOD FOR `a` have different types
+# SQL16 11.27 <add table period definition>, Syntax Rules, 5)c)
+# No column of T shall have a column name that is equivalent to ATPN.
+create or replace table t (a int, s date, e date);
+alter table t add period for a(s, e);
+ERROR 42S21: Duplicate column name 'a'
+# SQL16 11.27 <add table period definition>, Syntax Rules, 5)i)
+# Neither BC1 nor BC2 shall be an identity column, a generated column,
+# a system-time period start column, or a system-time period end column.
+create or replace table t (id int primary key,
+s date,
+e date generated always as (s+1));
+alter table t add period for a(s, e);
+ERROR HY000: Period field `e` cannot be GENERATED ALWAYS AS
+create or replace table t (id int primary key,
+s date,
+e date as (s+1) VIRTUAL);
+alter table t add period for a(s, e);
+ERROR HY000: Period field `e` cannot be GENERATED ALWAYS AS
+create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
+st timestamp(6) as row start,
+en timestamp(6) as row end,
+period for system_time (st, en)) with system versioning;
+alter table t add period for a(s, en);
+ERROR HY000: Period field `en` cannot be GENERATED ALWAYS AS
+# SQL16 11.27 <add table period definition>, Syntax Rules, 5)b)
+# The table descriptor of T shall not include a period descriptor other
+# than a system-time period descriptor.
+alter table t add period for a(s, e);
+alter table t add period for b(s, e);
+ERROR HY000: Cannot specify more than one application-time period
+# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)v)2)B)
+# Let S be the schema identified by the explicit or implicit
+# <schema name> of TN. Let IDCN be an implementation-dependent
+# <constraint name> that is not equivalent to the <constraint name> of
+# any table constraint descriptor included in S. The following
+# <table constraint definition> is implicit:
+# CONSTRAINT IDCN CHECK ( CN1 < CN2 )
+#
+# Due to the above standard limitation, the constraint name can't always
+# match the period name. So it matches when possible; and when not, it
+# is unique not taken name prefixed with period name.
+create or replace table t (x int, s date, e date,
+period for mytime(s, e));
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `x` int(11) DEFAULT NULL,
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `mytime` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+alter table t add constraint mytime check (x > 1);
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `x` int(11) DEFAULT NULL,
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `mytime` (`s`, `e`),
+ CONSTRAINT `mytime` CHECK (`x` > 1)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert t values (2, @e, @s);
+ERROR 23000: CONSTRAINT `mytime_1` failed for `test`.`t`
+alter table t add constraint mytime_1 check (x > 2);
+insert t values (3, @e, @s);
+ERROR 23000: CONSTRAINT `mytime_2` failed for `test`.`t`
+drop table t;
diff --git a/mysql-test/suite/period/r/create.result b/mysql-test/suite/period/r/create.result
new file mode 100644
index 00000000000..8cedb23465d
--- /dev/null
+++ b/mysql-test/suite/period/r/create.result
@@ -0,0 +1,98 @@
+create table t (id int primary key, s date, e date, period for mytime(s,e));
+# CONSTRAINT CHECK (s < e) is added implicitly, and shouldn't be shown
+# this is important for correct command-based replication
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `id` int(11) NOT NULL,
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PRIMARY KEY (`id`),
+ PERIOD FOR `mytime` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
+period for mytime(s,e));
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `id` int(11) NOT NULL,
+ `s` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ `e` timestamp(6) NOT NULL DEFAULT '0000-00-00 00:00:00.000000',
+ PRIMARY KEY (`id`),
+ PERIOD FOR `mytime` (`s`, `e`)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)a)
+# 2) If a <table period definition> TPD is specified, then:
+# a) <table scope> shall not be specified.
+create or replace temporary table t (s date, e date, period for mytime(s,e));
+ERROR HY000: Application-time period table cannot be temporary
+# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)iii)
+# The <data type or domain name> contained in CD1 is either DATE or a
+# timestamp type and it is equivalent to the <data type or domain name>
+# contained in CD2.
+create or replace table t (id int primary key, s datetime, e date,
+period for mytime(s,e));
+ERROR HY000: Fields of PERIOD FOR `mytime` have different types
+create or replace table t (s timestamp(2), e timestamp(6),
+period for mytime(s,e));
+ERROR HY000: Fields of PERIOD FOR `mytime` have different types
+create or replace table t (id int primary key, s int, e date,
+period for mytime(s,e));
+ERROR 42000: Incorrect column specifier for column 's'
+create or replace table t (id int primary key, s time, e time,
+period for mytime(s,e));
+ERROR 42000: Incorrect column specifier for column 's'
+create or replace table t (id int primary key, s date, e date,
+period for mytime(s,x));
+ERROR 42S22: Unknown column 'x' in 'mytime'
+create or replace table t (id int primary key, s date, e date,
+period for mytime(s,e),
+period for mytime2(s,e));
+ERROR HY000: Cannot specify more than one application-time period
+# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)d)
+# No <column name> in any <column definition> shall be equivalent to PN.
+create or replace table t (mytime int, s date, e date,
+period for mytime(s,e));
+ERROR 42S21: Duplicate column name 'mytime'
+# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)v)2)A)
+# Neither CD1 nor CD2 shall contain an <identity column specification>, a
+# <generation clause>, a <system time period start column specification>,
+# or a <system time period end column specification>.
+create or replace table t (id int primary key,
+s date,
+e date generated always as (s+1),
+period for mytime(s,e));
+ERROR HY000: Period field `e` cannot be GENERATED ALWAYS AS
+create or replace table t (id int primary key,
+s date,
+e date as (s+1) VIRTUAL,
+period for mytime(s,e));
+ERROR HY000: Period field `e` cannot be GENERATED ALWAYS AS
+create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
+st timestamp(6) as row start,
+en timestamp(6) as row end,
+period for system_time (st, en),
+period for mytime(st,e)) with system versioning;
+ERROR HY000: Period field `st` cannot be GENERATED ALWAYS AS
+# SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)
+# Let IDCN be an implementation-dependent <constraint name> that is not
+# equivalent to the <constraint name> of any table constraint descriptor
+# included in S.
+create or replace table t (x int, s date, e date,
+period for mytime(s, e),
+constraint mytime check (x > 1));
+show create table t;
+Table Create Table
+t CREATE TABLE `t` (
+ `x` int(11) DEFAULT NULL,
+ `s` date NOT NULL,
+ `e` date NOT NULL,
+ PERIOD FOR `mytime` (`s`, `e`),
+ CONSTRAINT `mytime` CHECK (`x` > 1)
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+insert t values (2, '2001-01-01', '2001-01-01');
+ERROR 23000: CONSTRAINT `mytime_1` failed for `test`.`t`
+show status like "Feature_application_time_periods";
+Variable_name Value
+Feature_application_time_periods 6
+drop table t;
diff --git a/mysql-test/suite/period/r/delete,myisam.rdiff b/mysql-test/suite/period/r/delete,myisam.rdiff
new file mode 100644
index 00000000000..78fb972b0bc
--- /dev/null
+++ b/mysql-test/suite/period/r/delete,myisam.rdiff
@@ -0,0 +1,18 @@
+--- suite/period/r/delete.result 2019-02-16 11:14:23.511258191 +0100
++++ suite/period/r/delete.reject 2019-02-16 11:14:32.869258690 +0100
+@@ -250,7 +250,6 @@
+ ERROR 22003: Out of range value for column 'id' at row 1
+ select * from t;
+ id s e
+-127 1999-01-01 2018-12-12
+ # same for trigger case
+ create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+ create or replace procedure log(s text)
+@@ -277,7 +276,6 @@
+ ERROR 22003: Out of range value for column 'id' at row 1
+ select * from t;
+ id s e
+-127 1999-01-01 2018-12-12
+ select * from log_tbl order by id;
+ id log
+ 1 >DEL: 1999-01-01, 2018-12-12
diff --git a/mysql-test/suite/period/r/delete.result b/mysql-test/suite/period/r/delete.result
new file mode 100644
index 00000000000..428200a4564
--- /dev/null
+++ b/mysql-test/suite/period/r/delete.result
@@ -0,0 +1,358 @@
+create table t (id int, s date, e date, period for apptime(s,e));
+insert into t values(1, '1999-01-01', '2018-12-12');
+insert into t values(1, '1999-01-01', '2017-01-01');
+insert into t values(1, '2017-01-01', '2019-01-01');
+insert into t values(2, '1998-01-01', '2018-12-12');
+insert into t values(3, '1997-01-01', '2015-01-01');
+insert into t values(4, '2016-01-01', '2020-01-01');
+insert into t values(5, '2010-01-01', '2015-01-01');
+create or replace table t1 (id int, s date, e date, period for apptime(s,e));
+insert t1 select * from t;
+create or replace table t2 (id int, s date, e date, period for apptime(s,e));
+insert t2 select * from t;
+create or replace table t3 (id int, s date, e date, period for apptime(s,e));
+insert t3 select * from t;
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t1 before update on t1
+for each row call log(CONCAT('>UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2upd_t1 after update on t1
+for each row call log(CONCAT('<UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr1del_t1 before delete on t1
+for each row call log(CONCAT('>DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr2del_t1 after delete on t1
+for each row call log(CONCAT('<DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr1ins_t1 before insert on t1
+for each row call log(CONCAT('>INS: ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2ins_t1 after insert on t1
+for each row call log(CONCAT('<INS: ', new.id, ", ", new.s, ", ", new.e));
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+delete from t1 for portion of APPTIME from '2000-01-01' to '2018-01-01';
+select * from t;
+id s e
+1 1999-01-01 2000-01-01
+1 1999-01-01 2000-01-01
+1 2018-01-01 2018-12-12
+1 2018-01-01 2019-01-01
+2 1998-01-01 2000-01-01
+2 2018-01-01 2018-12-12
+3 1997-01-01 2000-01-01
+4 2018-01-01 2020-01-01
+select * from t1;
+id s e
+1 1999-01-01 2000-01-01
+1 1999-01-01 2000-01-01
+1 2018-01-01 2018-12-12
+1 2018-01-01 2019-01-01
+2 1998-01-01 2000-01-01
+2 2018-01-01 2018-12-12
+3 1997-01-01 2000-01-01
+4 2018-01-01 2020-01-01
+select * from log_tbl order by id;
+id log
+1 >DEL: 1, 1999-01-01, 2018-12-12
+2 >INS: 1, 1999-01-01, 2000-01-01
+3 <INS: 1, 1999-01-01, 2000-01-01
+4 >INS: 1, 2018-01-01, 2018-12-12
+5 <INS: 1, 2018-01-01, 2018-12-12
+6 <DEL: 1, 1999-01-01, 2018-12-12
+7 >DEL: 1, 1999-01-01, 2017-01-01
+8 >INS: 1, 1999-01-01, 2000-01-01
+9 <INS: 1, 1999-01-01, 2000-01-01
+10 <DEL: 1, 1999-01-01, 2017-01-01
+11 >DEL: 1, 2017-01-01, 2019-01-01
+12 >INS: 1, 2018-01-01, 2019-01-01
+13 <INS: 1, 2018-01-01, 2019-01-01
+14 <DEL: 1, 2017-01-01, 2019-01-01
+15 >DEL: 2, 1998-01-01, 2018-12-12
+16 >INS: 2, 1998-01-01, 2000-01-01
+17 <INS: 2, 1998-01-01, 2000-01-01
+18 >INS: 2, 2018-01-01, 2018-12-12
+19 <INS: 2, 2018-01-01, 2018-12-12
+20 <DEL: 2, 1998-01-01, 2018-12-12
+21 >DEL: 3, 1997-01-01, 2015-01-01
+22 >INS: 3, 1997-01-01, 2000-01-01
+23 <INS: 3, 1997-01-01, 2000-01-01
+24 <DEL: 3, 1997-01-01, 2015-01-01
+25 >DEL: 4, 2016-01-01, 2020-01-01
+26 >INS: 4, 2018-01-01, 2020-01-01
+27 <INS: 4, 2018-01-01, 2020-01-01
+28 <DEL: 4, 2016-01-01, 2020-01-01
+29 >DEL: 5, 2010-01-01, 2015-01-01
+30 <DEL: 5, 2010-01-01, 2015-01-01
+# INSERT trigger only also works
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t2 before update on t2
+for each row call log(CONCAT('>UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2upd_t2 after update on t2
+for each row call log(CONCAT('<UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr1del_t2 before delete on t2
+for each row call log(CONCAT('>DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr2del_t2 after delete on t2
+for each row call log(CONCAT('<DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr1ins_t2 before insert on t2
+for each row call log(CONCAT('>INS: ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2ins_t2 after insert on t2
+for each row call log(CONCAT('<INS: ', new.id, ", ", new.s, ", ", new.e));
+drop trigger tr1del_t2;
+drop trigger tr2del_t2;
+delete from t2 for portion of APPTIME from '2000-01-01' to '2018-01-01';
+select * from log_tbl order by id;
+id log
+1 >INS: 1, 1999-01-01, 2000-01-01
+2 <INS: 1, 1999-01-01, 2000-01-01
+3 >INS: 1, 2018-01-01, 2018-12-12
+4 <INS: 1, 2018-01-01, 2018-12-12
+5 >INS: 1, 1999-01-01, 2000-01-01
+6 <INS: 1, 1999-01-01, 2000-01-01
+7 >INS: 1, 2018-01-01, 2019-01-01
+8 <INS: 1, 2018-01-01, 2019-01-01
+9 >INS: 2, 1998-01-01, 2000-01-01
+10 <INS: 2, 1998-01-01, 2000-01-01
+11 >INS: 2, 2018-01-01, 2018-12-12
+12 <INS: 2, 2018-01-01, 2018-12-12
+13 >INS: 3, 1997-01-01, 2000-01-01
+14 <INS: 3, 1997-01-01, 2000-01-01
+15 >INS: 4, 2018-01-01, 2020-01-01
+16 <INS: 4, 2018-01-01, 2020-01-01
+# removing BEFORE INSERT trigger enables internal substitution
+# DELETE+INSERT -> UPDATE, but without any side effects.
+# The optimization is disabled for non-transactional engines
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t3 before update on t3
+for each row call log(CONCAT('>UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr2upd_t3 after update on t3
+for each row call log(CONCAT('<UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr1del_t3 before delete on t3
+for each row call log(CONCAT('>DEL: ', old.s, ", ", old.e));
+create trigger tr2del_t3 after delete on t3
+for each row call log(CONCAT('<DEL: ', old.s, ", ", old.e));
+create trigger tr1ins_t3 before insert on t3
+for each row call log(CONCAT('>INS: ', new.s, ", ", new.e));
+create trigger tr2ins_t3 after insert on t3
+for each row call log(CONCAT('<INS: ', new.s, ", ", new.e));
+drop trigger tr1ins_t3;
+delete from t3 for portion of APPTIME from '2000-01-01' to '2018-01-01';
+select * from log_tbl order by id;
+id log
+1 >DEL: 1999-01-01, 2018-12-12
+2 <INS: 1999-01-01, 2000-01-01
+3 <INS: 2018-01-01, 2018-12-12
+4 <DEL: 1999-01-01, 2018-12-12
+5 >DEL: 1999-01-01, 2017-01-01
+6 <INS: 1999-01-01, 2000-01-01
+7 <DEL: 1999-01-01, 2017-01-01
+8 >DEL: 2017-01-01, 2019-01-01
+9 <INS: 2018-01-01, 2019-01-01
+10 <DEL: 2017-01-01, 2019-01-01
+11 >DEL: 1998-01-01, 2018-12-12
+12 <INS: 1998-01-01, 2000-01-01
+13 <INS: 2018-01-01, 2018-12-12
+14 <DEL: 1998-01-01, 2018-12-12
+15 >DEL: 1997-01-01, 2015-01-01
+16 <INS: 1997-01-01, 2000-01-01
+17 <DEL: 1997-01-01, 2015-01-01
+18 >DEL: 2016-01-01, 2020-01-01
+19 <INS: 2018-01-01, 2020-01-01
+20 <DEL: 2016-01-01, 2020-01-01
+21 >DEL: 2010-01-01, 2015-01-01
+22 <DEL: 2010-01-01, 2015-01-01
+# multi-table DELETE is not possible
+delete t, t1 from t1, t for portion of apptime from '2000-01-01' to '2018-01-01';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'for portion of apptime from '2000-01-01' to '2018-01-01'' at line 1
+delete t for portion of apptime from '2000-01-01' to '2018-01-01', t1 from t, t1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'for portion of apptime from '2000-01-01' to '2018-01-01', t1 from t, t1' at line 1
+# Here another check fails before parsing ends
+delete t, t1 from t for portion of apptime from '2000-01-01' to '2018-01-01', t1;
+ERROR 42S02: Unknown table 't1' in MULTI DELETE
+delete history from t2 for portion of apptime from '2000-01-01' to '2018-01-01';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'for portion of apptime from '2000-01-01' to '2018-01-01'' at line 1
+delete from t for portion of othertime from '2000-01-01' to '2018-01-01';
+ERROR HY000: Period `othertime` is not found in table
+delete from t for portion of system_time from '2000-01-01' to '2018-01-01';
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'of system_time from '2000-01-01' to '2018-01-01'' at line 1
+create or replace table t (id int, str text, s date, e date,
+period for apptime(s,e));
+insert into t values(1, 'data', '1999-01-01', '2018-12-12');
+insert into t values(1, 'other data', '1999-01-01', '2018-12-12');
+insert into t values(1, 'deleted', '2000-01-01', '2018-01-01');
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+show warnings;
+Level Code Message
+select * from t;
+id str s e
+1 data 1999-01-01 2000-01-01
+1 data 2018-01-01 2018-12-12
+1 other data 1999-01-01 2000-01-01
+1 other data 2018-01-01 2018-12-12
+drop table t1;
+# SQL16, Part 2, 15.7 <Effect of deleting rows from base tables>,
+# General rules, 8)b)i)
+# If the column descriptor that corresponds to the i-th field of BR
+# describes an identity column, a generated column, a system-time period
+# start column, or a system-time period end column, then let V i be
+# DEFAULT.
+# auto_increment field is updated
+create or replace table t (id int primary key auto_increment, s date, e date,
+period for apptime(s, e));
+insert into t values (default, '1999-01-01', '2018-12-12');
+select * from t;
+id s e
+1 1999-01-01 2018-12-12
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select * from t;
+id s e
+2 1999-01-01 2000-01-01
+3 2018-01-01 2018-12-12
+truncate t;
+# same for trigger case
+insert into t values (default, '1999-01-01', '2018-12-12');
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t before update on t
+for each row call log(CONCAT('>UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr2upd_t after update on t
+for each row call log(CONCAT('<UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr1del_t before delete on t
+for each row call log(CONCAT('>DEL: ', old.s, ", ", old.e));
+create trigger tr2del_t after delete on t
+for each row call log(CONCAT('<DEL: ', old.s, ", ", old.e));
+create trigger tr1ins_t before insert on t
+for each row call log(CONCAT('>INS: ', new.s, ", ", new.e));
+create trigger tr2ins_t after insert on t
+for each row call log(CONCAT('<INS: ', new.s, ", ", new.e));
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select * from t;
+id s e
+2 1999-01-01 2000-01-01
+3 2018-01-01 2018-12-12
+select * from log_tbl order by id;
+id log
+1 >DEL: 1999-01-01, 2018-12-12
+2 >INS: 1999-01-01, 2000-01-01
+3 <INS: 1999-01-01, 2000-01-01
+4 >INS: 2018-01-01, 2018-12-12
+5 <INS: 2018-01-01, 2018-12-12
+6 <DEL: 1999-01-01, 2018-12-12
+# generated columns are updated
+create or replace table t (s date, e date,
+xs date as (s) stored, xe date as (e) stored,
+period for apptime(s, e));
+insert into t values('1999-01-01', '2018-12-12', default, default);
+select * from t;
+s e xs xe
+1999-01-01 2018-12-12 1999-01-01 2018-12-12
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select * from t;
+s e xs xe
+1999-01-01 2000-01-01 1999-01-01 2000-01-01
+2018-01-01 2018-12-12 2018-01-01 2018-12-12
+truncate t;
+# same for trigger case
+insert into t values('1999-01-01', '2018-12-12', default, default);
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t before update on t
+for each row call log(CONCAT('>UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr2upd_t after update on t
+for each row call log(CONCAT('<UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr1del_t before delete on t
+for each row call log(CONCAT('>DEL: ', old.s, ", ", old.e));
+create trigger tr2del_t after delete on t
+for each row call log(CONCAT('<DEL: ', old.s, ", ", old.e));
+create trigger tr1ins_t before insert on t
+for each row call log(CONCAT('>INS: ', new.s, ", ", new.e));
+create trigger tr2ins_t after insert on t
+for each row call log(CONCAT('<INS: ', new.s, ", ", new.e));
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select * from t;
+s e xs xe
+1999-01-01 2000-01-01 1999-01-01 2000-01-01
+2018-01-01 2018-12-12 2018-01-01 2018-12-12
+select * from log_tbl order by id;
+id log
+1 >DEL: 1999-01-01, 2018-12-12
+2 >INS: 1999-01-01, 2000-01-01
+3 <INS: 1999-01-01, 2000-01-01
+4 >INS: 2018-01-01, 2018-12-12
+5 <INS: 2018-01-01, 2018-12-12
+6 <DEL: 1999-01-01, 2018-12-12
+# View can't be used
+create or replace view v as select * from t;
+delete from v for portion of p from '2000-01-01' to '2018-01-01';
+ERROR 42S02: 'v' is a view
+# View can't be used
+create or replace view v as select t.* from t, t as t1;
+delete from v for portion of p from '2000-01-01' to '2018-01-01';
+ERROR HY000: Can not delete from join view 'test.v'
+# auto_increment field overflow
+create or replace table t (id tinyint auto_increment primary key,
+s date, e date, period for apptime(s,e));
+insert into t values(127, '1999-01-01', '2018-12-12');
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+ERROR 22003: Out of range value for column 'id' at row 1
+select * from t;
+id s e
+127 1999-01-01 2018-12-12
+# same for trigger case
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t before update on t
+for each row call log(CONCAT('>UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr2upd_t after update on t
+for each row call log(CONCAT('<UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr1del_t before delete on t
+for each row call log(CONCAT('>DEL: ', old.s, ", ", old.e));
+create trigger tr2del_t after delete on t
+for each row call log(CONCAT('<DEL: ', old.s, ", ", old.e));
+create trigger tr1ins_t before insert on t
+for each row call log(CONCAT('>INS: ', new.s, ", ", new.e));
+create trigger tr2ins_t after insert on t
+for each row call log(CONCAT('<INS: ', new.s, ", ", new.e));
+# negotiate side effects of non-transactional MyISAM engine
+replace into t values(127, '1999-01-01', '2018-12-12');
+select * from t;
+id s e
+127 1999-01-01 2018-12-12
+truncate table log_tbl;
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+ERROR 22003: Out of range value for column 'id' at row 1
+select * from t;
+id s e
+127 1999-01-01 2018-12-12
+select * from log_tbl order by id;
+id log
+1 >DEL: 1999-01-01, 2018-12-12
+# custom constraint for period fields
+create or replace table t(id int, s date, e date, period for apptime(s,e),
+constraint dist2days check (datediff(e, s) >= 2));
+insert into t values(1, '1999-01-01', '2018-12-12'),
+(2, '1999-01-01', '1999-12-12');
+delete from t for portion of apptime from '1999-01-02' to '2018-12-12';
+ERROR 23000: CONSTRAINT `dist2days` failed for `test`.`t`
+# negotiate side effects of non-transactional MyISAM engine
+truncate t;
+insert into t values(1, '1999-01-01', '2018-12-12'),
+(2, '1999-01-01', '1999-12-12');
+delete from t for portion of apptime from '1999-01-01' to '2018-12-11';
+ERROR 23000: CONSTRAINT `dist2days` failed for `test`.`t`
+truncate t;
+insert into t values(1, '1999-01-01', '2018-12-12'),
+(2, '1999-01-01', '1999-12-12');
+delete from t for portion of apptime from '1999-01-03' to '2018-12-10';
+select *, datediff(e, s) from t;
+id s e datediff(e, s)
+1 1999-01-01 1999-01-03 2
+1 2018-12-10 2018-12-12 2
+2 1999-01-01 1999-01-03 2
+drop table t,t2,t3,log_tbl;
+drop view v;
+drop procedure log;
diff --git a/mysql-test/suite/period/r/update.result b/mysql-test/suite/period/r/update.result
new file mode 100644
index 00000000000..b86537fc9fb
--- /dev/null
+++ b/mysql-test/suite/period/r/update.result
@@ -0,0 +1,276 @@
+create table t (id int, s date, e date, period for apptime(s,e));
+insert into t values(1, '1999-01-01', '2018-12-12');
+insert into t values(1, '1999-01-01', '2017-01-01');
+insert into t values(1, '2017-01-01', '2019-01-01');
+insert into t values(2, '1998-01-01', '2018-12-12');
+insert into t values(3, '1997-01-01', '2015-01-01');
+insert into t values(4, '2016-01-01', '2020-01-01');
+insert into t values(5, '2010-01-01', '2015-01-01');
+create or replace table t1 (id int, s date, e date, period for apptime(s,e));
+insert t1 select * from t;
+create or replace table t2 (id int, s date, e date, period for apptime(s,e));
+insert t2 select * from t;
+update t for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id=id + 6;
+select * from t;
+id s e
+1 1999-01-01 2000-01-01
+1 1999-01-01 2000-01-01
+1 2018-01-01 2018-12-12
+1 2018-01-01 2019-01-01
+10 2016-01-01 2018-01-01
+11 2010-01-01 2015-01-01
+2 1998-01-01 2000-01-01
+2 2018-01-01 2018-12-12
+3 1997-01-01 2000-01-01
+4 2018-01-01 2020-01-01
+7 2000-01-01 2017-01-01
+7 2000-01-01 2018-01-01
+7 2017-01-01 2018-01-01
+8 2000-01-01 2018-01-01
+9 2000-01-01 2015-01-01
+# Check triggers
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t1 before update on t1
+for each row call log(CONCAT('>UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2upd_t1 after update on t1
+for each row call log(CONCAT('<UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr1del_t1 before delete on t1
+for each row call log(CONCAT('>DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr2del_t1 after delete on t1
+for each row call log(CONCAT('<DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr1ins_t1 before insert on t1
+for each row call log(CONCAT('>INS: ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2ins_t1 after insert on t1
+for each row call log(CONCAT('<INS: ', new.id, ", ", new.s, ", ", new.e));
+update t1 for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id=id + 6;
+select * from t1;
+id s e
+1 1999-01-01 2000-01-01
+1 1999-01-01 2000-01-01
+1 2018-01-01 2018-12-12
+1 2018-01-01 2019-01-01
+10 2016-01-01 2018-01-01
+11 2010-01-01 2015-01-01
+2 1998-01-01 2000-01-01
+2 2018-01-01 2018-12-12
+3 1997-01-01 2000-01-01
+4 2018-01-01 2020-01-01
+7 2000-01-01 2017-01-01
+7 2000-01-01 2018-01-01
+7 2017-01-01 2018-01-01
+8 2000-01-01 2018-01-01
+9 2000-01-01 2015-01-01
+select * from log_tbl order by id;
+id log
+1 >UPD: 1, 1999-01-01, 2018-12-12 -> 7, 2000-01-01, 2018-01-01
+2 >INS: 1, 1999-01-01, 2000-01-01
+3 <INS: 1, 1999-01-01, 2000-01-01
+4 >INS: 1, 2018-01-01, 2018-12-12
+5 <INS: 1, 2018-01-01, 2018-12-12
+6 <UPD: 1, 1999-01-01, 2018-12-12 -> 7, 2000-01-01, 2018-01-01
+7 >UPD: 1, 1999-01-01, 2017-01-01 -> 7, 2000-01-01, 2017-01-01
+8 >INS: 1, 1999-01-01, 2000-01-01
+9 <INS: 1, 1999-01-01, 2000-01-01
+10 <UPD: 1, 1999-01-01, 2017-01-01 -> 7, 2000-01-01, 2017-01-01
+11 >UPD: 1, 2017-01-01, 2019-01-01 -> 7, 2017-01-01, 2018-01-01
+12 >INS: 1, 2018-01-01, 2019-01-01
+13 <INS: 1, 2018-01-01, 2019-01-01
+14 <UPD: 1, 2017-01-01, 2019-01-01 -> 7, 2017-01-01, 2018-01-01
+15 >UPD: 2, 1998-01-01, 2018-12-12 -> 8, 2000-01-01, 2018-01-01
+16 >INS: 2, 1998-01-01, 2000-01-01
+17 <INS: 2, 1998-01-01, 2000-01-01
+18 >INS: 2, 2018-01-01, 2018-12-12
+19 <INS: 2, 2018-01-01, 2018-12-12
+20 <UPD: 2, 1998-01-01, 2018-12-12 -> 8, 2000-01-01, 2018-01-01
+21 >UPD: 3, 1997-01-01, 2015-01-01 -> 9, 2000-01-01, 2015-01-01
+22 >INS: 3, 1997-01-01, 2000-01-01
+23 <INS: 3, 1997-01-01, 2000-01-01
+24 <UPD: 3, 1997-01-01, 2015-01-01 -> 9, 2000-01-01, 2015-01-01
+25 >UPD: 4, 2016-01-01, 2020-01-01 -> 10, 2016-01-01, 2018-01-01
+26 >INS: 4, 2018-01-01, 2020-01-01
+27 <INS: 4, 2018-01-01, 2020-01-01
+28 <UPD: 4, 2016-01-01, 2020-01-01 -> 10, 2016-01-01, 2018-01-01
+29 >UPD: 5, 2010-01-01, 2015-01-01 -> 11, 2010-01-01, 2015-01-01
+30 <UPD: 5, 2010-01-01, 2015-01-01 -> 11, 2010-01-01, 2015-01-01
+# INSERT trigger only also works
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t2 before update on t2
+for each row call log(CONCAT('>UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2upd_t2 after update on t2
+for each row call log(CONCAT('<UPD: ', old.id, ", ", old.s, ", ", old.e, ' -> ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr1del_t2 before delete on t2
+for each row call log(CONCAT('>DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr2del_t2 after delete on t2
+for each row call log(CONCAT('<DEL: ', old.id, ", ", old.s, ", ", old.e));
+create trigger tr1ins_t2 before insert on t2
+for each row call log(CONCAT('>INS: ', new.id, ", ", new.s, ", ", new.e));
+create trigger tr2ins_t2 after insert on t2
+for each row call log(CONCAT('<INS: ', new.id, ", ", new.s, ", ", new.e));
+drop trigger tr1upd_t2;
+drop trigger tr2upd_t2;
+update t2 for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id=id + 6;
+select * from t2;
+id s e
+1 1999-01-01 2000-01-01
+1 1999-01-01 2000-01-01
+1 2018-01-01 2018-12-12
+1 2018-01-01 2019-01-01
+10 2016-01-01 2018-01-01
+11 2010-01-01 2015-01-01
+2 1998-01-01 2000-01-01
+2 2018-01-01 2018-12-12
+3 1997-01-01 2000-01-01
+4 2018-01-01 2020-01-01
+7 2000-01-01 2017-01-01
+7 2000-01-01 2018-01-01
+7 2017-01-01 2018-01-01
+8 2000-01-01 2018-01-01
+9 2000-01-01 2015-01-01
+select * from log_tbl order by id;
+id log
+1 >INS: 1, 1999-01-01, 2000-01-01
+2 <INS: 1, 1999-01-01, 2000-01-01
+3 >INS: 1, 2018-01-01, 2018-12-12
+4 <INS: 1, 2018-01-01, 2018-12-12
+5 >INS: 1, 1999-01-01, 2000-01-01
+6 <INS: 1, 1999-01-01, 2000-01-01
+7 >INS: 1, 2018-01-01, 2019-01-01
+8 <INS: 1, 2018-01-01, 2019-01-01
+9 >INS: 2, 1998-01-01, 2000-01-01
+10 <INS: 2, 1998-01-01, 2000-01-01
+11 >INS: 2, 2018-01-01, 2018-12-12
+12 <INS: 2, 2018-01-01, 2018-12-12
+13 >INS: 3, 1997-01-01, 2000-01-01
+14 <INS: 3, 1997-01-01, 2000-01-01
+15 >INS: 4, 2018-01-01, 2020-01-01
+16 <INS: 4, 2018-01-01, 2020-01-01
+select * from t for portion of apptime from 0 to 1 for system_time all;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'portion of apptime from 0 to 1 for system_time all' at line 1
+update t for portion of apptime from 0 to 1 for system_time all set id=1;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'system_time all set id=1' at line 1
+# Modifying period start/end fields is forbidden.
+# SQL16: 14.14 <update statement: searched>, Syntax Rules, 7)a)ii)
+# Neither BSTARTCOL nor BENDCOL shall be an explicit <object column>
+# contained in the <set clause list>.
+update t for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id= id + 6, s=subdate(s, 5), e=adddate(e, 5);
+ERROR HY000: Column `s` used in period `apptime` specified in update SET list
+# Precision timestamps
+create or replace table t (id int, s timestamp(5), e timestamp(5),
+period for apptime(s,e));
+insert into t values(1, '1999-01-01', '2018-12-12');
+insert into t values(1, '1999-01-01', '2017-01-01');
+update t for portion of apptime from '2000-01-01 00:00:00.00015'
+ to '2018-01-01 12:34:56.31415'
+ set id= id + 5;
+select * from t;
+id s e
+1 1999-01-01 00:00:00.00000 2000-01-01 00:00:00.00015
+1 1999-01-01 00:00:00.00000 2000-01-01 00:00:00.00015
+1 2018-01-01 12:34:56.31415 2018-12-12 00:00:00.00000
+6 2000-01-01 00:00:00.00015 2017-01-01 00:00:00.00000
+6 2000-01-01 00:00:00.00015 2018-01-01 12:34:56.31415
+# Strings
+create or replace table t (id int, str text, s date, e date,
+period for apptime(s,e));
+insert into t values(1, 'data', '1999-01-01', '2018-12-12');
+insert into t values(1, 'other data', '1999-01-01', '2018-12-12');
+update t for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id= id + 5;
+select * from t;
+id str s e
+1 data 1999-01-01 2000-01-01
+1 data 2018-01-01 2018-12-12
+1 other data 1999-01-01 2000-01-01
+1 other data 2018-01-01 2018-12-12
+6 data 2000-01-01 2018-01-01
+6 other data 2000-01-01 2018-01-01
+# multi-table UPDATE is impossible
+create or replace table t1(x int);
+update t for portion of apptime from '2000-01-01' to '2018-01-01', t1
+set t.id= t.id + 5;
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ' t1
+set t.id= t.id + 5' at line 1
+update t1 set x= (select id from t for portion of apptime from '2000-01-01' to '2018-01-01');
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'portion of apptime from '2000-01-01' to '2018-01-01')' at line 1
+# single-table views
+create or replace view v1 as select * from t where id<10;
+update v1 for portion of apptime from '2000-01-01' to '2018-01-01' set id= id + 5;
+ERROR 42S02: 'v1' is a view
+# multi-table views
+create or replace view v1 as select * from t, t1 where x=id;
+update v1 for portion of apptime from '2000-01-01' to '2018-01-01' set id= id + 5;
+ERROR 42S02: 'v1' is a view
+# SQL16: 14.14 <update statement: searched>, Syntax Rules, 7)a) iii-iv)
+# Let FROMVAL be <point in time 1>. FROMVAL shall not generally contain a
+# reference to a column of T or a <routine invocation>
+# whose subject routine is an SQL-invoked routine that
+# is possibly non-deterministic or that possibly modifies SQL-data.
+# ...Same for <point in time 2> (TOVAL)
+update t for portion of apptime from 5*(5+s) to 1 set t.id= t.id + 5;
+ERROR HY000: Expression in FOR PORTION OF must be constant
+update t for portion of apptime from 1 to e set t.id= t.id + 5;
+ERROR HY000: Expression in FOR PORTION OF must be constant
+set @s= '2000-01-01';
+set @e= '2018-01-01';
+create or replace function f() returns date return @e;
+create or replace function g() returns date not deterministic return @e;
+create or replace function h() returns date deterministic return @e;
+update t for portion of apptime from @s to f() set t.id= t.id + 5;
+ERROR HY000: Expression in FOR PORTION OF must be constant
+update t for portion of apptime from @s to g() set t.id= t.id + 5;
+ERROR HY000: Expression in FOR PORTION OF must be constant
+# success
+update t for portion of apptime from @s to h() set t.id= t.id + 5;
+# select value is cached
+update t for portion of apptime from (select s from t2 limit 1) to h() set t.id= t.id + 5;
+# auto_inrement field is updated
+create or replace table t (id int primary key auto_increment, x int,
+s date, e date, period for apptime(s, e));
+insert into t values (default, 1, '1999-01-01', '2018-12-12');
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= x + 5;
+select * from t;
+id x s e
+1 6 2000-01-01 2018-01-01
+2 1 1999-01-01 2000-01-01
+3 1 2018-01-01 2018-12-12
+truncate t;
+insert into t values (default, 1, '1999-01-01', '2018-12-12');
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= 1;
+select * from t;
+id x s e
+1 1 2000-01-01 2018-01-01
+2 1 1999-01-01 2000-01-01
+3 1 2018-01-01 2018-12-12
+# generated columns are updated
+create or replace table t (x int, s date, e date,
+xs date as (s) stored, xe date as (e) stored,
+period for apptime(s, e));
+insert into t values(1, '1999-01-01', '2018-12-12', default, default);
+select * from t;
+x s e xs xe
+1 1999-01-01 2018-12-12 1999-01-01 2018-12-12
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= x + 5;
+select *, xs=s and xe=e from t;
+x s e xs xe xs=s and xe=e
+1 1999-01-01 2000-01-01 1999-01-01 2000-01-01 1
+1 2018-01-01 2018-12-12 2018-01-01 2018-12-12 1
+6 2000-01-01 2018-01-01 2000-01-01 2018-01-01 1
+# MDEV-18921 Server crashes in bitmap_bits_set or bitmap_is_set upon
+# UPDATE IGNORE .. FOR PORTION with binary logging
+create or replace table t1 (f int, s date, e date, period for app(s,e));
+insert into t1 values (1,'2016-09-21','2019-06-14');
+update ignore t1 for portion of app from '2019-03-13' to '2019-03-14' set f = 1;
+drop table t,t1,t2,log_tbl;
+drop view v1;
+drop function f;
+drop function g;
+drop function h;
+drop procedure log;
diff --git a/mysql-test/suite/period/r/versioning.result b/mysql-test/suite/period/r/versioning.result
new file mode 100644
index 00000000000..efb7a646e87
--- /dev/null
+++ b/mysql-test/suite/period/r/versioning.result
@@ -0,0 +1,94 @@
+# DELETE
+create table t (
+s date, e date,
+row_start SYS_TYPE as row start invisible,
+row_end SYS_TYPE as row end invisible,
+period for apptime(s, e),
+period for system_time (row_start, row_end)) with system versioning;
+insert into t values('1999-01-01', '2018-12-12'),
+('1999-01-01', '1999-12-12');
+select row_start into @ins_time from t limit 1;
+select * from t order by s, e;
+s e
+1999-01-01 1999-12-12
+1999-01-01 2018-12-12
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
+from t for system_time all
+order by s, e, row_start;
+s e if(row_start = @ins_time, "OLD", "NEW") check_row(row_start, row_end)
+1999-01-01 1999-12-12 OLD CURRENT ROW
+1999-01-01 2000-01-01 NEW CURRENT ROW
+1999-01-01 2018-12-12 OLD HISTORICAL ROW
+2018-01-01 2018-12-12 NEW CURRENT ROW
+# same for trigger case
+delete from t;
+delete history from t;
+insert into t values('1999-01-01', '2018-12-12'),
+('1999-01-01', '1999-12-12');
+create or replace table log_tbl(id int auto_increment primary key, log text) engine=myisam;
+create or replace procedure log(s text)
+insert into log_tbl(log) values(s);
+create trigger tr1upd_t before update on t
+for each row call log(CONCAT('>UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr2upd_t after update on t
+for each row call log(CONCAT('<UPD: ', old.s, ", ", old.e, ' -> ', new.s, ", ", new.e));
+create trigger tr1del_t before delete on t
+for each row call log(CONCAT('>DEL: ', old.s, ", ", old.e));
+create trigger tr2del_t after delete on t
+for each row call log(CONCAT('<DEL: ', old.s, ", ", old.e));
+create trigger tr1ins_t before insert on t
+for each row call log(CONCAT('>INS: ', new.s, ", ", new.e));
+create trigger tr2ins_t after insert on t
+for each row call log(CONCAT('<INS: ', new.s, ", ", new.e));
+select row_start into @ins_time from t limit 1;
+select * from t order by s, e;
+s e
+1999-01-01 1999-12-12
+1999-01-01 2018-12-12
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
+from t for system_time all
+order by s, e, row_start;
+s e if(row_start = @ins_time, "OLD", "NEW") check_row(row_start, row_end)
+1999-01-01 1999-12-12 OLD CURRENT ROW
+1999-01-01 2000-01-01 NEW CURRENT ROW
+1999-01-01 2018-12-12 OLD HISTORICAL ROW
+2018-01-01 2018-12-12 NEW CURRENT ROW
+select * from log_tbl order by id;
+id log
+1 >DEL: 1999-01-01, 2018-12-12
+2 >INS: 1999-01-01, 2000-01-01
+3 <INS: 1999-01-01, 2000-01-01
+4 >INS: 2018-01-01, 2018-12-12
+5 <INS: 2018-01-01, 2018-12-12
+6 <DEL: 1999-01-01, 2018-12-12
+# UPDATE
+create or replace table t (x int, s date, e date,
+row_start SYS_TYPE as row start invisible,
+row_end SYS_TYPE as row end invisible,
+period for apptime(s, e),
+period for system_time(row_start, row_end)) with system versioning;
+insert into t values(1, '1999-01-01', '2018-12-12'),
+(2, '1999-01-01', '1999-12-12');
+select row_start into @ins_time from t limit 1;
+select * from t;
+x s e
+1 1999-01-01 2018-12-12
+2 1999-01-01 1999-12-12
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= x + 5;
+select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
+from t for system_time all
+order by x, s, e, row_start;
+x s e if(row_start = @ins_time, "OLD", "NEW") check_row(row_start, row_end)
+1 1999-01-01 2000-01-01 NEW CURRENT ROW
+1 1999-01-01 2018-12-12 OLD HISTORICAL ROW
+1 2018-01-01 2018-12-12 NEW CURRENT ROW
+2 1999-01-01 1999-12-12 OLD CURRENT ROW
+6 2000-01-01 2018-01-01 NEW CURRENT ROW
+drop table t,log_tbl;
+drop function check_row;
+drop function current_row;
+drop procedure verify_trt;
+drop procedure verify_trt_dummy;
+drop procedure log;
diff --git a/mysql-test/suite/period/t/alter.test b/mysql-test/suite/period/t/alter.test
new file mode 100644
index 00000000000..3f45d68cd61
--- /dev/null
+++ b/mysql-test/suite/period/t/alter.test
@@ -0,0 +1,133 @@
+set @s= '1992-01-01';
+set @e= '1999-12-31';
+
+create table t (s date, e date);
+
+--echo # period start/end columns are implicit NOT NULL
+alter table t add period for a(s, e);
+show create table t;
+
+create or replace table t (s date, e date);
+alter table t change s s date, add period for a(s, e);
+show create table t;
+
+alter table t add id int;
+show create table t;
+alter table t drop id;
+show create table t;
+
+--error ER_CONSTRAINT_FAILED
+insert t values(@e, @s);
+
+--error ER_PERIOD_CONSTRAINT_DROP
+alter table t drop constraint a;
+
+--echo # no-op
+alter table t drop period if exists for b;
+--echo # no-op
+alter table t add period if not exists for a(e, s);
+
+alter table t drop period if exists for a;
+--echo # no-op
+alter table t drop period if exists for a;
+
+alter table t add period for a(s, e), add period if not exists for a(e, s);
+show create table t;
+
+alter table t drop period for a;
+--echo # Constraint is dropped
+insert t values(@e, @s);
+
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t drop period for a;
+--error ER_CANT_DROP_FIELD_OR_KEY
+alter table t add period for a(s, e), drop period for a;
+
+truncate t;
+alter table t add period for a(s, e);
+--error ER_CONSTRAINT_FAILED
+insert t values(@e, @s);
+alter table t add period for a(s, e), drop period for a;
+--error ER_CONSTRAINT_FAILED
+insert t values(@e, @s);
+alter table t add s1 date not null, add period for b(s1, e), drop period for a;
+show create table t;
+insert t(s, s1, e) values(@e, @s, @e);
+--error ER_CONSTRAINT_FAILED
+insert t(s, s1, e) values(@e, @e, @s);
+
+create table t1 like t;
+show create table t1;
+drop table t1;
+
+--error ER_CONSTRAINT_FAILED
+create table t2 (period for b(s,e)) select * from t;
+
+create table t2 (period for b(s1,e)) select * from t;
+drop table t2;
+
+--echo # SQL16 11.27 <add table period definition>, Syntax Rules, 5)g)
+--echo # The declared type of BC1 shall be either DATE or a timestamp type
+--echo # and shall be equivalent to the declared type of BC2.
+create or replace table t (s timestamp not null, e timestamp(6) not null);
+--error ER_PERIOD_TYPES_MISMATCH
+alter table t add period for a(s, e);
+
+--echo # SQL16 11.27 <add table period definition>, Syntax Rules, 5)c)
+--echo # No column of T shall have a column name that is equivalent to ATPN.
+create or replace table t (a int, s date, e date);
+--error ER_DUP_FIELDNAME
+alter table t add period for a(s, e);
+
+--echo # SQL16 11.27 <add table period definition>, Syntax Rules, 5)i)
+--echo # Neither BC1 nor BC2 shall be an identity column, a generated column,
+--echo # a system-time period start column, or a system-time period end column.
+create or replace table t (id int primary key,
+ s date,
+ e date generated always as (s+1));
+--error ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+alter table t add period for a(s, e);
+
+create or replace table t (id int primary key,
+ s date,
+ e date as (s+1) VIRTUAL);
+--error ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+alter table t add period for a(s, e);
+
+create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
+ st timestamp(6) as row start,
+ en timestamp(6) as row end,
+ period for system_time (st, en)) with system versioning;
+--error ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+alter table t add period for a(s, en);
+
+--echo # SQL16 11.27 <add table period definition>, Syntax Rules, 5)b)
+--echo # The table descriptor of T shall not include a period descriptor other
+--echo # than a system-time period descriptor.
+alter table t add period for a(s, e);
+--error ER_MORE_THAN_ONE_PERIOD
+alter table t add period for b(s, e);
+
+--echo # SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)v)2)B)
+--echo # Let S be the schema identified by the explicit or implicit
+--echo # <schema name> of TN. Let IDCN be an implementation-dependent
+--echo # <constraint name> that is not equivalent to the <constraint name> of
+--echo # any table constraint descriptor included in S. The following
+--echo # <table constraint definition> is implicit:
+--echo # CONSTRAINT IDCN CHECK ( CN1 < CN2 )
+--echo #
+--echo # Due to the above standard limitation, the constraint name can't always
+--echo # match the period name. So it matches when possible; and when not, it
+--echo # is unique not taken name prefixed with period name.
+create or replace table t (x int, s date, e date,
+ period for mytime(s, e));
+show create table t;
+alter table t add constraint mytime check (x > 1);
+show create table t;
+--error ER_CONSTRAINT_FAILED
+insert t values (2, @e, @s);
+alter table t add constraint mytime_1 check (x > 2);
+--error ER_CONSTRAINT_FAILED
+insert t values (3, @e, @s);
+
+drop table t;
diff --git a/mysql-test/suite/period/t/create.test b/mysql-test/suite/period/t/create.test
new file mode 100644
index 00000000000..2e3de795698
--- /dev/null
+++ b/mysql-test/suite/period/t/create.test
@@ -0,0 +1,81 @@
+create table t (id int primary key, s date, e date, period for mytime(s,e));
+--echo # CONSTRAINT CHECK (s < e) is added implicitly, and shouldn't be shown
+--echo # this is important for correct command-based replication
+show create table t;
+create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
+ period for mytime(s,e));
+show create table t;
+
+--echo # SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)a)
+--echo # 2) If a <table period definition> TPD is specified, then:
+--echo # a) <table scope> shall not be specified.
+--error ER_PERIOD_TEMPORARY_NOT_ALLOWED
+create or replace temporary table t (s date, e date, period for mytime(s,e));
+
+--echo # SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)iii)
+--echo # The <data type or domain name> contained in CD1 is either DATE or a
+--echo # timestamp type and it is equivalent to the <data type or domain name>
+--echo # contained in CD2.
+--error ER_PERIOD_TYPES_MISMATCH
+create or replace table t (id int primary key, s datetime, e date,
+ period for mytime(s,e));
+--error ER_PERIOD_TYPES_MISMATCH
+create or replace table t (s timestamp(2), e timestamp(6),
+ period for mytime(s,e));
+--error ER_WRONG_FIELD_SPEC
+create or replace table t (id int primary key, s int, e date,
+ period for mytime(s,e));
+--error ER_WRONG_FIELD_SPEC
+create or replace table t (id int primary key, s time, e time,
+ period for mytime(s,e));
+--error ER_BAD_FIELD_ERROR
+create or replace table t (id int primary key, s date, e date,
+ period for mytime(s,x));
+--error ER_MORE_THAN_ONE_PERIOD
+create or replace table t (id int primary key, s date, e date,
+ period for mytime(s,e),
+ period for mytime2(s,e));
+
+--echo # SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)d)
+--echo # No <column name> in any <column definition> shall be equivalent to PN.
+--error ER_DUP_FIELDNAME
+create or replace table t (mytime int, s date, e date,
+ period for mytime(s,e));
+
+--echo # SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)e)v)2)A)
+--echo # Neither CD1 nor CD2 shall contain an <identity column specification>, a
+--echo # <generation clause>, a <system time period start column specification>,
+--echo # or a <system time period end column specification>.
+--error ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+create or replace table t (id int primary key,
+ s date,
+ e date generated always as (s+1),
+ period for mytime(s,e));
+
+--error ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+create or replace table t (id int primary key,
+ s date,
+ e date as (s+1) VIRTUAL,
+ period for mytime(s,e));
+
+--error ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+create or replace table t (id int primary key, s timestamp(6), e timestamp(6),
+ st timestamp(6) as row start,
+ en timestamp(6) as row end,
+ period for system_time (st, en),
+ period for mytime(st,e)) with system versioning;
+
+--echo # SQL16, Part 2, 11.3 <table definition>, Syntax Rules, 2)
+--echo # Let IDCN be an implementation-dependent <constraint name> that is not
+--echo # equivalent to the <constraint name> of any table constraint descriptor
+--echo # included in S.
+create or replace table t (x int, s date, e date,
+ period for mytime(s, e),
+ constraint mytime check (x > 1));
+show create table t;
+--error ER_CONSTRAINT_FAILED
+insert t values (2, '2001-01-01', '2001-01-01');
+
+show status like "Feature_application_time_periods";
+
+drop table t;
diff --git a/mysql-test/suite/period/t/delete.test b/mysql-test/suite/period/t/delete.test
new file mode 100644
index 00000000000..00bc314160f
--- /dev/null
+++ b/mysql-test/suite/period/t/delete.test
@@ -0,0 +1,186 @@
+source suite/period/engines.inc;
+source include/have_log_bin.inc;
+
+create table t (id int, s date, e date, period for apptime(s,e));
+
+insert into t values(1, '1999-01-01', '2018-12-12');
+insert into t values(1, '1999-01-01', '2017-01-01');
+insert into t values(1, '2017-01-01', '2019-01-01');
+insert into t values(2, '1998-01-01', '2018-12-12');
+insert into t values(3, '1997-01-01', '2015-01-01');
+insert into t values(4, '2016-01-01', '2020-01-01');
+insert into t values(5, '2010-01-01', '2015-01-01');
+
+create or replace table t1 (id int, s date, e date, period for apptime(s,e));
+insert t1 select * from t;
+create or replace table t2 (id int, s date, e date, period for apptime(s,e));
+insert t2 select * from t;
+create or replace table t3 (id int, s date, e date, period for apptime(s,e));
+insert t3 select * from t;
+
+--let $trig_cols=id, s, e
+--let $trig_table=t1
+--source suite/period/create_triggers.inc
+
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+delete from t1 for portion of APPTIME from '2000-01-01' to '2018-01-01';
+--sorted_result
+select * from t;
+--sorted_result
+select * from t1;
+select * from log_tbl order by id;
+
+--echo # INSERT trigger only also works
+--let $trig_cols=id, s, e
+--let $trig_table=t2
+--source suite/period/create_triggers.inc
+drop trigger tr1del_t2;
+drop trigger tr2del_t2;
+delete from t2 for portion of APPTIME from '2000-01-01' to '2018-01-01';
+select * from log_tbl order by id;
+
+--echo # removing BEFORE INSERT trigger enables internal substitution
+--echo # DELETE+INSERT -> UPDATE, but without any side effects.
+--echo # The optimization is disabled for non-transactional engines
+--let $trig_table=t3
+--source suite/period/create_triggers.inc
+drop trigger tr1ins_t3;
+delete from t3 for portion of APPTIME from '2000-01-01' to '2018-01-01';
+select * from log_tbl order by id;
+
+--echo # multi-table DELETE is not possible
+--error ER_PARSE_ERROR
+delete t, t1 from t1, t for portion of apptime from '2000-01-01' to '2018-01-01';
+
+--error ER_PARSE_ERROR
+delete t for portion of apptime from '2000-01-01' to '2018-01-01', t1 from t, t1;
+
+--echo # Here another check fails before parsing ends
+--error ER_UNKNOWN_TABLE
+delete t, t1 from t for portion of apptime from '2000-01-01' to '2018-01-01', t1;
+
+--error ER_PARSE_ERROR
+delete history from t2 for portion of apptime from '2000-01-01' to '2018-01-01';
+
+--error ER_PERIOD_NOT_FOUND
+delete from t for portion of othertime from '2000-01-01' to '2018-01-01';
+--error ER_PARSE_ERROR
+delete from t for portion of system_time from '2000-01-01' to '2018-01-01';
+
+create or replace table t (id int, str text, s date, e date,
+ period for apptime(s,e));
+
+insert into t values(1, 'data', '1999-01-01', '2018-12-12');
+insert into t values(1, 'other data', '1999-01-01', '2018-12-12');
+insert into t values(1, 'deleted', '2000-01-01', '2018-01-01');
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+show warnings;
+--sorted_result
+select * from t;
+
+drop table t1;
+
+--echo # SQL16, Part 2, 15.7 <Effect of deleting rows from base tables>,
+--echo # General rules, 8)b)i)
+--echo # If the column descriptor that corresponds to the i-th field of BR
+--echo # describes an identity column, a generated column, a system-time period
+--echo # start column, or a system-time period end column, then let V i be
+--echo # DEFAULT.
+
+--echo # auto_increment field is updated
+create or replace table t (id int primary key auto_increment, s date, e date,
+ period for apptime(s, e));
+insert into t values (default, '1999-01-01', '2018-12-12');
+select * from t;
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+--sorted_result
+select * from t;
+truncate t;
+--echo # same for trigger case
+insert into t values (default, '1999-01-01', '2018-12-12');
+--let $trig_table=t
+--source suite/period/create_triggers.inc
+
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+--sorted_result
+select * from t;
+select * from log_tbl order by id;
+
+--echo # generated columns are updated
+create or replace table t (s date, e date,
+ xs date as (s) stored, xe date as (e) stored,
+ period for apptime(s, e));
+insert into t values('1999-01-01', '2018-12-12', default, default);
+--sorted_result
+select * from t;
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+--sorted_result
+select * from t;
+truncate t;
+--echo # same for trigger case
+insert into t values('1999-01-01', '2018-12-12', default, default);
+--let $trig_table=t
+--source suite/period/create_triggers.inc
+
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+--sorted_result
+select * from t;
+select * from log_tbl order by id;
+
+--echo # View can't be used
+create or replace view v as select * from t;
+--error ER_IT_IS_A_VIEW
+delete from v for portion of p from '2000-01-01' to '2018-01-01';
+
+--echo # View can't be used
+create or replace view v as select t.* from t, t as t1;
+--error ER_VIEW_DELETE_MERGE_VIEW
+delete from v for portion of p from '2000-01-01' to '2018-01-01';
+
+--echo # auto_increment field overflow
+create or replace table t (id tinyint auto_increment primary key,
+ s date, e date, period for apptime(s,e));
+
+insert into t values(127, '1999-01-01', '2018-12-12');
+
+--error HA_ERR_AUTOINC_ERANGE
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select * from t;
+
+--echo # same for trigger case
+--let $trig_table=t
+--source suite/period/create_triggers.inc
+--echo # negotiate side effects of non-transactional MyISAM engine
+replace into t values(127, '1999-01-01', '2018-12-12');
+select * from t;
+truncate table log_tbl;
+
+--error HA_ERR_AUTOINC_ERANGE
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select * from t;
+select * from log_tbl order by id;
+
+--echo # custom constraint for period fields
+create or replace table t(id int, s date, e date, period for apptime(s,e),
+ constraint dist2days check (datediff(e, s) >= 2));
+insert into t values(1, '1999-01-01', '2018-12-12'),
+ (2, '1999-01-01', '1999-12-12');
+--error ER_CONSTRAINT_FAILED
+delete from t for portion of apptime from '1999-01-02' to '2018-12-12';
+--echo # negotiate side effects of non-transactional MyISAM engine
+truncate t;
+insert into t values(1, '1999-01-01', '2018-12-12'),
+ (2, '1999-01-01', '1999-12-12');
+--error ER_CONSTRAINT_FAILED
+delete from t for portion of apptime from '1999-01-01' to '2018-12-11';
+truncate t;
+insert into t values(1, '1999-01-01', '2018-12-12'),
+ (2, '1999-01-01', '1999-12-12');
+
+delete from t for portion of apptime from '1999-01-03' to '2018-12-10';
+--sorted_result
+select *, datediff(e, s) from t;
+
+drop table t,t2,t3,log_tbl;
+drop view v;
+drop procedure log;
diff --git a/mysql-test/suite/period/t/update.test b/mysql-test/suite/period/t/update.test
new file mode 100644
index 00000000000..5730387dfda
--- /dev/null
+++ b/mysql-test/suite/period/t/update.test
@@ -0,0 +1,165 @@
+source suite/period/engines.inc;
+source include/have_log_bin.inc;
+
+create table t (id int, s date, e date, period for apptime(s,e));
+
+insert into t values(1, '1999-01-01', '2018-12-12');
+insert into t values(1, '1999-01-01', '2017-01-01');
+insert into t values(1, '2017-01-01', '2019-01-01');
+insert into t values(2, '1998-01-01', '2018-12-12');
+insert into t values(3, '1997-01-01', '2015-01-01');
+insert into t values(4, '2016-01-01', '2020-01-01');
+insert into t values(5, '2010-01-01', '2015-01-01');
+
+create or replace table t1 (id int, s date, e date, period for apptime(s,e));
+insert t1 select * from t;
+create or replace table t2 (id int, s date, e date, period for apptime(s,e));
+insert t2 select * from t;
+
+update t for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id=id + 6;
+--sorted_result
+select * from t;
+
+--echo # Check triggers
+--let $trig_cols=id, s, e
+--let $trig_table=t1
+--source suite/period/create_triggers.inc
+
+update t1 for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id=id + 6;
+--sorted_result
+select * from t1;
+select * from log_tbl order by id;
+
+--echo # INSERT trigger only also works
+--let $trig_cols=id, s, e
+--let $trig_table=t2
+--source suite/period/create_triggers.inc
+drop trigger tr1upd_t2;
+drop trigger tr2upd_t2;
+update t2 for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id=id + 6;
+--sorted_result
+select * from t2;
+select * from log_tbl order by id;
+
+--error ER_PARSE_ERROR
+select * from t for portion of apptime from 0 to 1 for system_time all;
+--error ER_PARSE_ERROR
+update t for portion of apptime from 0 to 1 for system_time all set id=1;
+
+--echo # Modifying period start/end fields is forbidden.
+--echo # SQL16: 14.14 <update statement: searched>, Syntax Rules, 7)a)ii)
+--echo # Neither BSTARTCOL nor BENDCOL shall be an explicit <object column>
+--echo # contained in the <set clause list>.
+--error ER_PERIOD_COLUMNS_UPDATED
+update t for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id= id + 6, s=subdate(s, 5), e=adddate(e, 5);
+
+--echo # Precision timestamps
+create or replace table t (id int, s timestamp(5), e timestamp(5),
+ period for apptime(s,e));
+insert into t values(1, '1999-01-01', '2018-12-12');
+insert into t values(1, '1999-01-01', '2017-01-01');
+update t for portion of apptime from '2000-01-01 00:00:00.00015'
+ to '2018-01-01 12:34:56.31415'
+ set id= id + 5;
+--sorted_result
+select * from t;
+
+-- echo # Strings
+create or replace table t (id int, str text, s date, e date,
+ period for apptime(s,e));
+
+insert into t values(1, 'data', '1999-01-01', '2018-12-12');
+insert into t values(1, 'other data', '1999-01-01', '2018-12-12');
+update t for portion of apptime from '2000-01-01' to '2018-01-01'
+ set id= id + 5;
+--sorted_result
+select * from t;
+
+--echo # multi-table UPDATE is impossible
+create or replace table t1(x int);
+--error ER_PARSE_ERROR
+update t for portion of apptime from '2000-01-01' to '2018-01-01', t1
+ set t.id= t.id + 5;
+
+--error ER_PARSE_ERROR
+update t1 set x= (select id from t for portion of apptime from '2000-01-01' to '2018-01-01');
+
+--echo # single-table views
+create or replace view v1 as select * from t where id<10;
+--error ER_IT_IS_A_VIEW
+update v1 for portion of apptime from '2000-01-01' to '2018-01-01' set id= id + 5;
+
+--echo # multi-table views
+create or replace view v1 as select * from t, t1 where x=id;
+--error ER_IT_IS_A_VIEW
+update v1 for portion of apptime from '2000-01-01' to '2018-01-01' set id= id + 5;
+
+--echo # SQL16: 14.14 <update statement: searched>, Syntax Rules, 7)a) iii-iv)
+--echo # Let FROMVAL be <point in time 1>. FROMVAL shall not generally contain a
+--echo # reference to a column of T or a <routine invocation>
+--echo # whose subject routine is an SQL-invoked routine that
+--echo # is possibly non-deterministic or that possibly modifies SQL-data.
+--echo # ...Same for <point in time 2> (TOVAL)
+--error ER_NOT_CONSTANT_EXPRESSION
+update t for portion of apptime from 5*(5+s) to 1 set t.id= t.id + 5;
+--error ER_NOT_CONSTANT_EXPRESSION
+update t for portion of apptime from 1 to e set t.id= t.id + 5;
+
+set @s= '2000-01-01';
+set @e= '2018-01-01';
+
+create or replace function f() returns date return @e;
+create or replace function g() returns date not deterministic return @e;
+create or replace function h() returns date deterministic return @e;
+
+--error ER_NOT_CONSTANT_EXPRESSION
+update t for portion of apptime from @s to f() set t.id= t.id + 5;
+--error ER_NOT_CONSTANT_EXPRESSION
+update t for portion of apptime from @s to g() set t.id= t.id + 5;
+
+--echo # success
+update t for portion of apptime from @s to h() set t.id= t.id + 5;
+--echo # select value is cached
+update t for portion of apptime from (select s from t2 limit 1) to h() set t.id= t.id + 5;
+
+--echo # auto_inrement field is updated
+create or replace table t (id int primary key auto_increment, x int,
+ s date, e date, period for apptime(s, e));
+insert into t values (default, 1, '1999-01-01', '2018-12-12');
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= x + 5;
+--sorted_result
+select * from t;
+
+truncate t;
+insert into t values (default, 1, '1999-01-01', '2018-12-12');
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= 1;
+--sorted_result
+select * from t;
+
+--echo # generated columns are updated
+create or replace table t (x int, s date, e date,
+ xs date as (s) stored, xe date as (e) stored,
+ period for apptime(s, e));
+insert into t values(1, '1999-01-01', '2018-12-12', default, default);
+--sorted_result
+select * from t;
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= x + 5;
+--sorted_result
+select *, xs=s and xe=e from t;
+
+--echo # MDEV-18921 Server crashes in bitmap_bits_set or bitmap_is_set upon
+--echo # UPDATE IGNORE .. FOR PORTION with binary logging
+create or replace table t1 (f int, s date, e date, period for app(s,e));
+insert into t1 values (1,'2016-09-21','2019-06-14');
+update ignore t1 for portion of app from '2019-03-13' to '2019-03-14' set f = 1;
+
+drop table t,t1,t2,log_tbl;
+drop view v1;
+drop function f;
+drop function g;
+drop function h;
+drop procedure log;
diff --git a/mysql-test/suite/period/t/versioning.test b/mysql-test/suite/period/t/versioning.test
new file mode 100644
index 00000000000..ea20344515d
--- /dev/null
+++ b/mysql-test/suite/period/t/versioning.test
@@ -0,0 +1,64 @@
+source suite/versioning/engines.inc;
+source suite/versioning/common.inc;
+
+--echo # DELETE
+--replace_result $sys_datatype_expl SYS_TYPE
+eval create table t (
+ s date, e date,
+ row_start $sys_datatype_expl as row start invisible,
+ row_end $sys_datatype_expl as row end invisible,
+ period for apptime(s, e),
+ period for system_time (row_start, row_end)) with system versioning;
+insert into t values('1999-01-01', '2018-12-12'),
+ ('1999-01-01', '1999-12-12');
+
+select row_start into @ins_time from t limit 1;
+select * from t order by s, e;
+
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
+ from t for system_time all
+ order by s, e, row_start;
+
+--echo # same for trigger case
+delete from t;
+delete history from t;
+insert into t values('1999-01-01', '2018-12-12'),
+ ('1999-01-01', '1999-12-12');
+--let $trig_table=t
+--source suite/period/create_triggers.inc
+
+select row_start into @ins_time from t limit 1;
+select * from t order by s, e;
+
+delete from t for portion of apptime from '2000-01-01' to '2018-01-01';
+select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
+ from t for system_time all
+ order by s, e, row_start;
+select * from log_tbl order by id;
+
+--echo # UPDATE
+--replace_result $sys_datatype_expl SYS_TYPE
+eval create or replace table t (x int, s date, e date,
+ row_start $sys_datatype_expl as row start invisible,
+ row_end $sys_datatype_expl as row end invisible,
+ period for apptime(s, e),
+ period for system_time(row_start, row_end)) with system versioning;
+insert into t values(1, '1999-01-01', '2018-12-12'),
+ (2, '1999-01-01', '1999-12-12');
+
+select row_start into @ins_time from t limit 1;
+--sorted_result
+select * from t;
+
+update t for portion of apptime from '2000-01-01' to '2018-01-01' set x= x + 5;
+select *, if(row_start = @ins_time, "OLD", "NEW"), check_row(row_start, row_end)
+ from t for system_time all
+ order by x, s, e, row_start;
+
+drop table t,log_tbl;
+drop function check_row;
+drop function current_row;
+drop procedure verify_trt;
+drop procedure verify_trt_dummy;
+drop procedure log;
diff --git a/mysql-test/suite/plugins/r/audit_null.result b/mysql-test/suite/plugins/r/audit_null.result
index 75eb5a9f682..ada85b661ee 100644
--- a/mysql-test/suite/plugins/r/audit_null.result
+++ b/mysql-test/suite/plugins/r/audit_null.result
@@ -74,6 +74,9 @@ root[root] @ localhost [] >> create table t1 (a int)
root[root] @ localhost [] test.t1 : create
root[root] @ localhost [] >> insert t1 values (1), (2)
root[root] @ localhost [] test.t1 : write
+root[root] @ localhost [] mysql.table_stats : read
+root[root] @ localhost [] mysql.column_stats : read
+root[root] @ localhost [] mysql.index_stats : read
root[root] @ localhost [] >> select * from t1
root[root] @ localhost [] test.t1 : read
root[root] @ localhost [] >> rename table t1 to t2
@@ -90,6 +93,9 @@ root[root] @ localhost [] test.t2 : read
root[root] @ localhost [] >> select * from v1
root[root] @ localhost [] test.t2 : read
root[root] @ localhost [] test.t2 : read
+root[root] @ localhost [] mysql.table_stats : read
+root[root] @ localhost [] mysql.column_stats : read
+root[root] @ localhost [] mysql.index_stats : read
root[root] @ localhost [] >> drop view v1
root[root] @ localhost [] >> create temporary table t2 (a date)
root[root] @ localhost [] >> insert t2 values ('2020-10-09')
diff --git a/mysql-test/suite/plugins/r/audit_null_debug.result b/mysql-test/suite/plugins/r/audit_null_debug.result
index 9d5c7c4a02c..3b18edf1d0b 100644
--- a/mysql-test/suite/plugins/r/audit_null_debug.result
+++ b/mysql-test/suite/plugins/r/audit_null_debug.result
@@ -1,3 +1,6 @@
+alter table mysql.plugin engine=myisam;
+Warnings:
+Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1'
set @old_dbug=@@debug_dbug;
call mtr.add_suppression("Index for table.*mysql.plugin.MYI");
call mtr.add_suppression("Index for table 'plugin' is corrupt; try to repair it");
@@ -13,3 +16,4 @@ SET debug_dbug=@old_dbug;
uninstall plugin audit_null;
ERROR 42000: PLUGIN audit_null does not exist
delete from mysql.plugin where name='audit_null';
+alter table mysql.plugin engine=aria;
diff --git a/mysql-test/suite/plugins/r/auth_ed25519.result b/mysql-test/suite/plugins/r/auth_ed25519.result
index 4785bef3ef7..a7008b318ba 100644
--- a/mysql-test/suite/plugins/r/auth_ed25519.result
+++ b/mysql-test/suite/plugins/r/auth_ed25519.result
@@ -22,22 +22,45 @@ ed25519_password(NULL)
NULL
select * from information_schema.plugins where plugin_name='ed25519';
PLUGIN_NAME ed25519
-PLUGIN_VERSION 1.0
+PLUGIN_VERSION 1.1
PLUGIN_STATUS ACTIVE
PLUGIN_TYPE AUTHENTICATION
-PLUGIN_TYPE_VERSION 2.1
+PLUGIN_TYPE_VERSION 2.2
PLUGIN_LIBRARY auth_ed25519.so
-PLUGIN_LIBRARY_VERSION 1.13
+PLUGIN_LIBRARY_VERSION 1.14
PLUGIN_AUTHOR Sergei Golubchik
PLUGIN_DESCRIPTION Elliptic curve ED25519 based authentication
PLUGIN_LICENSE GPL
LOAD_OPTION ON
PLUGIN_MATURITY Stable
-PLUGIN_AUTH_VERSION 1.0
+PLUGIN_AUTH_VERSION 1.1
create user test1@localhost identified via ed25519 using 'ZIgUREUg5PVgQ6LskhXmO+eZLS0nC8be6HPjYWR4YJY';
show grants for test1@localhost;
Grants for test1@localhost
GRANT USAGE ON *.* TO 'test1'@'localhost' IDENTIFIED VIA ed25519 USING 'ZIgUREUg5PVgQ6LskhXmO+eZLS0nC8be6HPjYWR4YJY'
+drop user test1@localhost;
+create user test1@localhost identified via ed25519 using 'foo';
+ERROR HY000: Password hash should be 43 characters long
+create user test1@localhost identified via ed25519 using '>>>1234567890123456789012345678901234567890';
+ERROR HY000: Password hash should be base64 encoded
+create user test1@localhost identified via ed25519 using password('foo');
+show grants for test1@localhost;
+Grants for test1@localhost
+GRANT USAGE ON *.* TO 'test1'@'localhost' IDENTIFIED VIA ed25519 USING 'vubFBzIrapbfHct1/J72dnUryz5VS7lA6XHH8sIx4TI'
+select ed25519_password('foo');
+ed25519_password('foo')
+vubFBzIrapbfHct1/J72dnUryz5VS7lA6XHH8sIx4TI
+set password for test1@localhost = password('bar');
+show create user test1@localhost;
+CREATE USER for test1@localhost
+CREATE USER 'test1'@'localhost' IDENTIFIED VIA ed25519 USING 'pfzkeWMzkTefY1oshXS+/kATeN51M+4jxi3/cbyTd10'
+select ed25519_password('bar');
+ed25519_password('bar')
+pfzkeWMzkTefY1oshXS+/kATeN51M+4jxi3/cbyTd10
+set password for test1@localhost = 'ZIgUREUg5PVgQ6LskhXmO+eZLS0nC8be6HPjYWR4YJY';
+show create user test1@localhost;
+CREATE USER for test1@localhost
+CREATE USER 'test1'@'localhost' IDENTIFIED VIA ed25519 USING 'ZIgUREUg5PVgQ6LskhXmO+eZLS0nC8be6HPjYWR4YJY'
connect(localhost,test1,public,test,PORT,SOCKET);
connect con1, localhost, test1, public;
ERROR 28000: Access denied for user 'test1'@'localhost' (using password: YES)
diff --git a/mysql-test/suite/plugins/r/cracklib_password_check.result b/mysql-test/suite/plugins/r/cracklib_password_check.result
index 6b4e30b3d81..1194e6eef5a 100644
--- a/mysql-test/suite/plugins/r/cracklib_password_check.result
+++ b/mysql-test/suite/plugins/r/cracklib_password_check.result
@@ -6,7 +6,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE PASSWORD VALIDATION
PLUGIN_TYPE_VERSION 1.0
PLUGIN_LIBRARY cracklib_password_check.so
-PLUGIN_LIBRARY_VERSION 1.13
+PLUGIN_LIBRARY_VERSION 1.14
PLUGIN_AUTHOR Sergei Golubchik
PLUGIN_DESCRIPTION Password validation via CrackLib
PLUGIN_LICENSE GPL
diff --git a/mysql-test/suite/plugins/r/feedback_plugin_load.result b/mysql-test/suite/plugins/r/feedback_plugin_load.result
index f96b4d9b71f..2b8fc03b931 100644
--- a/mysql-test/suite/plugins/r/feedback_plugin_load.result
+++ b/mysql-test/suite/plugins/r/feedback_plugin_load.result
@@ -20,7 +20,7 @@ WHERE VARIABLE_NAME LIKE 'Collation used %'
ORDER BY VARIABLE_NAME;
VARIABLE_VALUE>0 VARIABLE_NAME
1 Collation used binary
-1 Collation used latin1_bin
1 Collation used latin1_swedish_ci
+1 Collation used utf8mb4_bin
1 Collation used utf8_bin
1 Collation used utf8_general_ci
diff --git a/mysql-test/suite/plugins/r/feedback_plugin_send.result b/mysql-test/suite/plugins/r/feedback_plugin_send.result
index 35db81fa6c1..0bdf94335d5 100644
--- a/mysql-test/suite/plugins/r/feedback_plugin_send.result
+++ b/mysql-test/suite/plugins/r/feedback_plugin_send.result
@@ -20,10 +20,11 @@ WHERE VARIABLE_NAME LIKE 'Collation used %'
ORDER BY VARIABLE_NAME;
VARIABLE_VALUE>0 VARIABLE_NAME
1 Collation used binary
-1 Collation used latin1_bin
1 Collation used latin1_swedish_ci
+1 Collation used utf8mb4_bin
1 Collation used utf8_bin
1 Collation used utf8_general_ci
set global sql_mode=ONLY_FULL_GROUP_BY;
+# restart
6: feedback plugin: report to 'http://mariadb.org/feedback_plugin/post' was sent
6: feedback plugin: server replied 'ok'
diff --git a/mysql-test/suite/plugins/r/max_password_errors_auth_named_pipe.result b/mysql-test/suite/plugins/r/max_password_errors_auth_named_pipe.result
new file mode 100644
index 00000000000..82d464e3cb2
--- /dev/null
+++ b/mysql-test/suite/plugins/r/max_password_errors_auth_named_pipe.result
@@ -0,0 +1,12 @@
+set @old_max_password_errors=@@max_password_errors;
+create user nosuchuser identified with 'named_pipe';
+set global max_password_errors=1;
+connect(localhost,nosuchuser,,test,MASTER_PORT,MASTER_SOCKET);
+connect pipe_con,localhost,nosuchuser,,,,,PIPE;
+ERROR 28000: Access denied for user 'nosuchuser'@'localhost'
+connect(localhost,nosuchuser,,test,MASTER_PORT,MASTER_SOCKET);
+connect pipe_con,localhost,nosuchuser,,,,,PIPE;
+ERROR 28000: Access denied for user 'nosuchuser'@'localhost'
+DROP USER nosuchuser;
+FLUSH PRIVILEGES;
+set global max_password_errors=@old_max_password_errors;
diff --git a/mysql-test/suite/plugins/r/max_password_errors_auth_socket.result b/mysql-test/suite/plugins/r/max_password_errors_auth_socket.result
new file mode 100644
index 00000000000..eb7cb64167b
--- /dev/null
+++ b/mysql-test/suite/plugins/r/max_password_errors_auth_socket.result
@@ -0,0 +1,12 @@
+set @old_max_password_errors=@@max_password_errors;
+create user nosuchuser identified with 'unix_socket';
+set global max_password_errors=1;
+connect(localhost,nosuchuser,,test,MASTER_PORT,MASTER_SOCKET);
+connect pipe_con,localhost,nosuchuser;
+ERROR 28000: Access denied for user 'nosuchuser'@'localhost'
+connect(localhost,nosuchuser,,test,MASTER_PORT,MASTER_SOCKET);
+connect pipe_con,localhost,nosuchuser;
+ERROR 28000: Access denied for user 'nosuchuser'@'localhost'
+DROP USER nosuchuser;
+FLUSH PRIVILEGES;
+set global max_password_errors=@old_max_password_errors;
diff --git a/mysql-test/suite/plugins/r/multiauth.result b/mysql-test/suite/plugins/r/multiauth.result
new file mode 100644
index 00000000000..56c33755b02
--- /dev/null
+++ b/mysql-test/suite/plugins/r/multiauth.result
@@ -0,0 +1,193 @@
+install soname 'auth_ed25519';
+create user USER identified via unix_socket OR mysql_native_password as password("GOOD");
+create user mysqltest1 identified via unix_socket OR mysql_native_password as password("good");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA unix_socket OR mysql_native_password USING '*8409037B3E362D6DAE24C8E667F4D3B66716144E'
+# name match = ok
+select user(), current_user(), database();
+user() current_user() database()
+USER@localhost USER@% test
+# name does not match, password good = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# name does not match, password bad = failure
+mysqltest: Could not open connection 'default': 1045 Access denied for user 'mysqltest1'@'localhost' (using password: YES)
+drop user USER, mysqltest1;
+create user USER identified via mysql_native_password as password("GOOD") OR unix_socket;
+create user mysqltest1 identified via mysql_native_password as password("good") OR unix_socket;
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA mysql_native_password USING '*8409037B3E362D6DAE24C8E667F4D3B66716144E' OR unix_socket
+# name match = ok
+select user(), current_user(), database();
+user() current_user() database()
+USER@localhost USER@% test
+# name does not match, password good = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# name does not match, password bad = failure
+mysqltest: Could not open connection 'default': 1698 Access denied for user 'mysqltest1'@'localhost'
+drop user USER, mysqltest1;
+create user USER identified via unix_socket OR ed25519 as password("GOOD");
+create user mysqltest1 identified via unix_socket OR ed25519 as password("good");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA unix_socket OR ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc'
+# name match = ok
+select user(), current_user(), database();
+user() current_user() database()
+USER@localhost USER@% test
+# name does not match, password good = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# name does not match, password bad = failure
+mysqltest: Could not open connection 'default': 1045 Access denied for user 'mysqltest1'@'localhost' (using password: YES)
+drop user USER, mysqltest1;
+create user USER identified via ed25519 as password("GOOD") OR unix_socket;
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket;
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc' OR unix_socket
+# name match = ok
+select user(), current_user(), database();
+user() current_user() database()
+USER@localhost USER@% test
+# name does not match, password good = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# name does not match, password bad = failure
+mysqltest: Could not open connection 'default': 1698 Access denied for user 'mysqltest1'@'localhost'
+drop user USER, mysqltest1;
+create user USER identified via ed25519 as password("GOOD") OR unix_socket OR mysql_native_password as password("works");
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket OR mysql_native_password as password("works");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc' OR unix_socket OR mysql_native_password USING '*7D8C3DF236D9163B6C274A9D47704BC496988460'
+# name match = ok
+select user(), current_user(), database();
+user() current_user() database()
+USER@localhost USER@% test
+# name does not match, password good = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# name does not match, second password works = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# name does not match, password bad = failure
+mysqltest: Could not open connection 'default': 1045 Access denied for user 'mysqltest1'@'localhost' (using password: YES)
+drop user USER, mysqltest1;
+create user mysqltest1 identified via mysql_native_password as password("good") OR mysql_native_password as password("works");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA mysql_native_password USING '*8409037B3E362D6DAE24C8E667F4D3B66716144E' OR mysql_native_password USING '*7D8C3DF236D9163B6C274A9D47704BC496988460'
+# password good = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# second password works = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+# password bad = failure
+mysqltest: Could not open connection 'default': 1045 Access denied for user 'mysqltest1'@'localhost' (using password: YES)
+drop user mysqltest1;
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket OR mysql_native_password as password("works");
+show grants for mysqltest1;
+Grants for mysqltest1@%
+GRANT USAGE ON *.* TO 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc' OR unix_socket OR mysql_native_password USING '*7D8C3DF236D9163B6C274A9D47704BC496988460'
+select json_detailed(priv) from mysql.global_priv where user='mysqltest1';
+json_detailed(priv)
+{
+ "access": 0,
+ "plugin": "mysql_native_password",
+ "authentication_string": "*7D8C3DF236D9163B6C274A9D47704BC496988460",
+ "auth_or":
+ [
+
+ {
+ "plugin": "ed25519",
+ "authentication_string": "F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc"
+ },
+
+ {
+ "plugin": "unix_socket"
+ },
+
+ {
+ }
+ ],
+ "password_last_changed": #
+}
+select password,plugin,authentication_string from mysql.user where user='mysqltest1';
+Password plugin authentication_string
+*7D8C3DF236D9163B6C274A9D47704BC496988460 mysql_native_password *7D8C3DF236D9163B6C274A9D47704BC496988460
+flush privileges;
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc' OR unix_socket OR mysql_native_password USING '*7D8C3DF236D9163B6C274A9D47704BC496988460'
+set password for mysqltest1 = password('foobar');
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'qv2mG6HWCuy32Slb5xhV4THStewNz2VINVPbgk+XAJ8' OR unix_socket OR mysql_native_password USING '*7D8C3DF236D9163B6C274A9D47704BC496988460'
+alter user mysqltest1 identified via unix_socket OR mysql_native_password as password("some");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA unix_socket OR mysql_native_password USING '*BFE3F4604CFD21E6595080A261D92EF0183B5971'
+set password for mysqltest1 = password('foobar');
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA unix_socket OR mysql_native_password USING '*9B500343BC52E2911172EB52AE5CF4847604C6E5'
+alter user mysqltest1 identified via unix_socket;
+set password for mysqltest1 = password('bla');
+ERROR HY000: SET PASSWORD is ignored for users authenticating via unix_socket plugin
+alter user mysqltest1 identified via mysql_native_password as password("some") or unix_socket;
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA mysql_native_password USING '*BFE3F4604CFD21E6595080A261D92EF0183B5971' OR unix_socket
+drop user mysqltest1;
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket OR mysql_native_password as password("works");
+ERROR HY000: Column count of mysql.user is wrong. Expected 3, found 47. Created with MariaDB XX.YY.ZZ, now running XX.YY.ZZ. Please use mysql_upgrade to fix this error
+create user USER identified via mysql_native_password as '1234567890123456789012345678901234567890a' OR unix_socket;
+create user mysqltest1 identified via mysql_native_password as '1234567890123456789012345678901234567890a' OR unix_socket;
+update mysql.global_priv set priv=replace(priv, '1234567890123456789012345678901234567890a', 'invalid password');
+flush privileges;
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA mysql_native_password USING 'invalid password' OR unix_socket
+# name match = ok
+select user(), current_user(), database();
+user() current_user() database()
+USER@localhost USER@% test
+# name does not match = failure
+mysqltest: Could not open connection 'default': 1698 Access denied for user 'mysqltest1'@'localhost'
+# SET PASSWORD helps
+set password for mysqltest1 = password('bla');
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+drop user USER, mysqltest1;
+create user mysqltest1 identified via ed25519 as password("good");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc'
+# no plugin = failure
+mysqltest: Could not open connection 'default': 1045 Plugin client_ed25519 could not be loaded: <PLUGINDIR>/no/client_ed25519.so: cannot open shared object file: No such file or directory
+alter user mysqltest1 identified via ed25519 as password("good") OR mysql_native_password as password("works");
+show create user mysqltest1;
+CREATE USER for mysqltest1@%
+CREATE USER 'mysqltest1'@'%' IDENTIFIED VIA ed25519 USING 'F4aF8bw7130VaRbdLCl4f/P/wkjDmgJXwWvpJ5gmsZc' OR mysql_native_password USING '*7D8C3DF236D9163B6C274A9D47704BC496988460'
+# no plugin = failure
+mysqltest: Could not open connection 'default': 1045 Access denied for user 'mysqltest1'@'localhost' (using password: YES)
+# no plugin, second password works = ok
+select user(), current_user(), database();
+user() current_user() database()
+mysqltest1@localhost mysqltest1@% test
+drop user mysqltest1;
+uninstall soname 'auth_ed25519';
diff --git a/mysql-test/suite/plugins/r/pam.result b/mysql-test/suite/plugins/r/pam.result
index 86303206b3b..a16cd7f3d43 100644
--- a/mysql-test/suite/plugins/r/pam.result
+++ b/mysql-test/suite/plugins/r/pam.result
@@ -20,6 +20,13 @@ Challenge input first.
Enter: not very secret challenge
Now, the magic number!
PIN: ****
+#
+# athentication is unsuccessful
+#
+Challenge input first.
+Enter: crash pam module
+Now, the magic number!
+PIN: ***
drop user test_pam;
drop user pam_test;
uninstall plugin pam;
diff --git a/mysql-test/suite/plugins/r/pam_v1.result b/mysql-test/suite/plugins/r/pam_v1.result
new file mode 100644
index 00000000000..bf4c0242df2
--- /dev/null
+++ b/mysql-test/suite/plugins/r/pam_v1.result
@@ -0,0 +1,25 @@
+install plugin pam soname 'auth_pam_v1.so';
+create user test_pam identified via pam using 'mariadb_mtr';
+create user pam_test;
+grant proxy on pam_test to test_pam;
+#
+# athentication is successful, challenge/pin are ok
+# note that current_user() differs from user()
+#
+Challenge input first.
+Enter: not very secret challenge
+Now, the magic number!
+PIN: ****
+select user(), current_user(), database();
+user() current_user() database()
+test_pam@localhost pam_test@% test
+#
+# athentication is unsuccessful
+#
+Challenge input first.
+Enter: not very secret challenge
+Now, the magic number!
+PIN: ****
+drop user test_pam;
+drop user pam_test;
+uninstall plugin pam;
diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result
index b8d2986feea..02c2b872ebc 100644
--- a/mysql-test/suite/plugins/r/server_audit.result
+++ b/mysql-test/suite/plugins/r/server_audit.result
@@ -277,11 +277,17 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create table t2 (id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_excl_users=\'odin, dva, tri\'',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'SHOW WARNINGS',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t1,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'insert into t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,READ,test,t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select * from t1',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users=\'odin, root, dva, tri\'',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t2,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'insert into t2 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,READ,test,t2,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select * from t2',0
@@ -306,6 +312,9 @@ TIME,HOSTNAME,root,localhost,ID,0,CONNECT,test,,0
TIME,HOSTNAME,root,localhost,ID,ID,CREATE,test,t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create table t1 (id2 int)',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t1,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'insert into t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,READ,test,t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select * from t1',0
@@ -318,6 +327,9 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'use sa_db',0
TIME,HOSTNAME,root,localhost,ID,ID,CREATE,sa_db,sa_t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create table sa_t1(id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,sa_db,sa_t1,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into sa_t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,table_stats,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,column_stats,
@@ -331,34 +343,34 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop database sa_db',0
TIME,HOSTNAME,root,localhost,ID,0,DISCONNECT,,,0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create database sa_db',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'use sa_db',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u1 IDENTIFIED BY *****',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'GRANT ALL ON sa_db TO u2 IDENTIFIED BY *****',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SET PASSWORD FOR u1 = PASSWORD(*****)',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u3 IDENTIFIED BY *****',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop user u1, u2, u3',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create table t1(id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop table t1',0
diff --git a/mysql-test/suite/plugins/r/show_all_plugins.result b/mysql-test/suite/plugins/r/show_all_plugins.result
index dd6cbfce4c4..3bdaf39d0d0 100644
--- a/mysql-test/suite/plugins/r/show_all_plugins.result
+++ b/mysql-test/suite/plugins/r/show_all_plugins.result
@@ -4,8 +4,8 @@ Variable_name Value
Opened_plugin_libraries 0
select * from information_schema.all_plugins where plugin_library='ha_example.so';
PLUGIN_NAME PLUGIN_VERSION PLUGIN_STATUS PLUGIN_TYPE PLUGIN_TYPE_VERSION PLUGIN_LIBRARY PLUGIN_LIBRARY_VERSION PLUGIN_AUTHOR PLUGIN_DESCRIPTION PLUGIN_LICENSE LOAD_OPTION PLUGIN_MATURITY PLUGIN_AUTH_VERSION
-EXAMPLE 0.1 NOT INSTALLED STORAGE ENGINE MYSQL_VERSION_ID ha_example.so 1.13 Brian Aker, MySQL AB Example storage engine GPL OFF Experimental 0.1
-UNUSABLE 3.14 NOT INSTALLED DAEMON MYSQL_VERSION_ID ha_example.so 1.13 Sergei Golubchik Unusable Daemon GPL OFF Experimental 3.14.15.926
+EXAMPLE 0.1 NOT INSTALLED STORAGE ENGINE MYSQL_VERSION_ID ha_example.so 1.14 Brian Aker, MySQL AB Example storage engine GPL OFF Experimental 0.1
+UNUSABLE 3.14 NOT INSTALLED DAEMON MYSQL_VERSION_ID ha_example.so 1.14 Sergei Golubchik Unusable Daemon GPL OFF Experimental 3.14.15.926
show status like '%libraries%';
Variable_name Value
Opened_plugin_libraries 1
diff --git a/mysql-test/suite/plugins/r/simple_password_check.result b/mysql-test/suite/plugins/r/simple_password_check.result
index 672d0107492..2e706115bd1 100644
--- a/mysql-test/suite/plugins/r/simple_password_check.result
+++ b/mysql-test/suite/plugins/r/simple_password_check.result
@@ -6,7 +6,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE PASSWORD VALIDATION
PLUGIN_TYPE_VERSION 1.0
PLUGIN_LIBRARY simple_password_check.so
-PLUGIN_LIBRARY_VERSION 1.13
+PLUGIN_LIBRARY_VERSION 1.14
PLUGIN_AUTHOR Sergei Golubchik
PLUGIN_DESCRIPTION Simple password strength checks
PLUGIN_LICENSE GPL
@@ -135,9 +135,11 @@ grant select on *.* to foo2 identified with mysql_old_password using '2222222222
ERROR HY000: The MariaDB server is running with the --strict-password-validation option so it cannot execute this statement
create user foo2 identified with mysql_native_password using '';
ERROR HY000: Your password does not satisfy the current policy requirements
+grant select on *.* to foo2 identified with mysql_old_password using '';
+ERROR HY000: Your password does not satisfy the current policy requirements
grant select on *.* to foo2 identified with mysql_old_password;
-ERROR 28000: Can't find any matching row in the user table
-update mysql.user set password='xxx' where user='foo1';
+ERROR HY000: Your password does not satisfy the current policy requirements
+update mysql.global_priv set priv=json_set(priv, '$.authentication_string', 'xxx') where user='foo1';
set global strict_password_validation=0;
set password for foo1 = '';
ERROR HY000: Your password does not satisfy the current policy requirements
@@ -155,6 +157,7 @@ set global strict_password_validation=1;
drop user foo1;
create role r1;
drop role r1;
+flush privileges;
uninstall plugin simple_password_check;
create user foo1 identified by 'pwd';
drop user foo1;
diff --git a/mysql-test/suite/plugins/r/thread_pool_server_audit.result b/mysql-test/suite/plugins/r/thread_pool_server_audit.result
index cf09ccb3a51..eceb2280ed4 100644
--- a/mysql-test/suite/plugins/r/thread_pool_server_audit.result
+++ b/mysql-test/suite/plugins/r/thread_pool_server_audit.result
@@ -250,11 +250,17 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create table t2 (id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_excl_users=\'odin, dva, tri\'',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'SHOW WARNINGS',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t1,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'insert into t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,READ,test,t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select * from t1',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_incl_users=\'odin, root, dva, tri\'',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t2,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'insert into t2 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,READ,test,t2,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select * from t2',0
@@ -279,6 +285,9 @@ TIME,HOSTNAME,root,localhost,ID,0,CONNECT,test,,0
TIME,HOSTNAME,root,localhost,ID,ID,CREATE,test,t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create table t1 (id2 int)',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t1,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'insert into t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,READ,test,t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select * from t1',0
@@ -291,6 +300,9 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'use sa_db',0
TIME,HOSTNAME,root,localhost,ID,ID,CREATE,sa_db,sa_t1,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create table sa_t1(id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,sa_db,sa_t1,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats,
+TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into sa_t1 values (1), (2)',0
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,table_stats,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,column_stats,
@@ -304,34 +316,34 @@ TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop database sa_db',0
TIME,HOSTNAME,root,localhost,ID,0,DISCONNECT,,,0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'create database sa_db',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'use sa_db',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u1 IDENTIFIED BY *****',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'GRANT ALL ON sa_db TO u2 IDENTIFIED BY *****',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SET PASSWORD FOR u1 = PASSWORD(*****)',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE USER u3 IDENTIFIED BY *****',0
-TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,user,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,db,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,tables_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,columns_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,procs_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proxies_priv,
TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,roles_mapping,
+TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,global_priv,
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop user u1, u2, u3',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'create table t1(id int)',0
TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop table t1',0
diff --git a/mysql-test/suite/plugins/r/unix_socket.result b/mysql-test/suite/plugins/r/unix_socket.result
index 0e08794fbe6..a725583f201 100644
--- a/mysql-test/suite/plugins/r/unix_socket.result
+++ b/mysql-test/suite/plugins/r/unix_socket.result
@@ -1,4 +1,3 @@
-install plugin unix_socket soname 'auth_socket.so';
#
# with named user
#
@@ -28,4 +27,3 @@ USER@localhost @% test
#
delete from mysql.user where user='';
FLUSH PRIVILEGES;
-uninstall plugin unix_socket;
diff --git a/mysql-test/suite/plugins/suite.pm b/mysql-test/suite/plugins/suite.pm
index a2ac3957af0..cb42fb66705 100644
--- a/mysql-test/suite/plugins/suite.pm
+++ b/mysql-test/suite/plugins/suite.pm
@@ -12,7 +12,7 @@ sub cassandra_running() {
sub skip_combinations {
my %skip;
- $skip{'t/pam.test'} = 'No pam setup for mtr'
+ $skip{'t/pam_init.inc'} = 'No pam setup for mtr'
unless -e '/etc/pam.d/mariadb_mtr';
$skip{'t/cassandra.test'} = 'Cassandra is not running'
unless cassandra_running();
diff --git a/mysql-test/suite/plugins/t/audit_null_debug.test b/mysql-test/suite/plugins/t/audit_null_debug.test
index 0534108b107..da949b15f8a 100644
--- a/mysql-test/suite/plugins/t/audit_null_debug.test
+++ b/mysql-test/suite/plugins/t/audit_null_debug.test
@@ -5,6 +5,8 @@ if (!$ADT_NULL_SO) {
skip No NULL_AUDIT plugin;
}
+alter table mysql.plugin engine=myisam;
+
set @old_dbug=@@debug_dbug;
call mtr.add_suppression("Index for table.*mysql.plugin.MYI");
call mtr.add_suppression("Index for table 'plugin' is corrupt; try to repair it");
@@ -30,4 +32,4 @@ SET debug_dbug=@old_dbug;
uninstall plugin audit_null;
delete from mysql.plugin where name='audit_null';
-
+alter table mysql.plugin engine=aria;
diff --git a/mysql-test/suite/plugins/t/auth_ed25519.test b/mysql-test/suite/plugins/t/auth_ed25519.test
index 3e02bdf97d2..8e0bdd1d460 100644
--- a/mysql-test/suite/plugins/t/auth_ed25519.test
+++ b/mysql-test/suite/plugins/t/auth_ed25519.test
@@ -28,6 +28,19 @@ query_vertical select * from information_schema.plugins where plugin_name='ed255
let $pwd=`select ed25519_password("secret")`;
eval create user test1@localhost identified via ed25519 using '$pwd';
show grants for test1@localhost;
+drop user test1@localhost;
+--error ER_PASSWD_LENGTH
+create user test1@localhost identified via ed25519 using 'foo';
+--error ER_PASSWD_LENGTH
+create user test1@localhost identified via ed25519 using '>>>1234567890123456789012345678901234567890';
+create user test1@localhost identified via ed25519 using password('foo');
+show grants for test1@localhost;
+select ed25519_password('foo');
+set password for test1@localhost = password('bar');
+show create user test1@localhost;
+select ed25519_password('bar');
+eval set password for test1@localhost = '$pwd';
+show create user test1@localhost;
replace_result $MASTER_MYPORT PORT $MASTER_MYSOCK SOCKET;
error ER_ACCESS_DENIED_ERROR;
diff --git a/mysql-test/suite/plugins/t/feedback_plugin_load.test b/mysql-test/suite/plugins/t/feedback_plugin_load.test
index 8b4aee28362..cfaf68ce96d 100644
--- a/mysql-test/suite/plugins/t/feedback_plugin_load.test
+++ b/mysql-test/suite/plugins/t/feedback_plugin_load.test
@@ -28,11 +28,11 @@ select * from information_schema.feedback where variable_name like 'feed%'
and variable_name not like '%debug%';
# Embedded server does not use the table mysqld.user and thus
-# does not automatically use latin1_bin on startup. Use it manually.
+# does not automatically use utf8mb4 on startup. Use it manually.
--disable_query_log
if (`SELECT VERSION() LIKE '%embedded%'`)
{
- DO _latin1'test' COLLATE latin1_bin;
+ create temporary table t1 (a json);
}
--enable_query_log
SELECT VARIABLE_VALUE>0, VARIABLE_NAME FROM INFORMATION_SCHEMA.FEEDBACK
diff --git a/mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.opt b/mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.opt
new file mode 100644
index 00000000000..52bf94f3511
--- /dev/null
+++ b/mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.opt
@@ -0,0 +1 @@
+--loose-enable-named-pipe --plugin-load=$AUTH_NAMED_PIPE_SO
diff --git a/mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.test b/mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.test
new file mode 100644
index 00000000000..79aeb7d3cbe
--- /dev/null
+++ b/mysql-test/suite/plugins/t/max_password_errors_auth_named_pipe.test
@@ -0,0 +1,22 @@
+# Tests that max_password_errors has no effect on login errors with
+# passwordless plugins (Windows version / auth_named_pipe)
+
+--source include/not_embedded.inc
+--source include/have_auth_named_pipe.inc
+if (`SELECT '$USERNAME' = 'nosuchuser'`) {
+ skip skipped for nosuchuser;
+}
+set @old_max_password_errors=@@max_password_errors;
+create user nosuchuser identified with 'named_pipe';
+
+set global max_password_errors=1;
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_NO_PASSWORD_ERROR;
+connect(pipe_con,localhost,nosuchuser,,,,,PIPE);
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_NO_PASSWORD_ERROR;
+connect(pipe_con,localhost,nosuchuser,,,,,PIPE);
+
+DROP USER nosuchuser;
+FLUSH PRIVILEGES;
+set global max_password_errors=@old_max_password_errors;
diff --git a/mysql-test/suite/plugins/t/max_password_errors_auth_socket.opt b/mysql-test/suite/plugins/t/max_password_errors_auth_socket.opt
new file mode 100644
index 00000000000..91bb73e34f7
--- /dev/null
+++ b/mysql-test/suite/plugins/t/max_password_errors_auth_socket.opt
@@ -0,0 +1 @@
+--loose-enable-named-pipe --plugin-load=$AUTH_SOCKET_SO
diff --git a/mysql-test/suite/plugins/t/max_password_errors_auth_socket.test b/mysql-test/suite/plugins/t/max_password_errors_auth_socket.test
new file mode 100644
index 00000000000..495a68a0b59
--- /dev/null
+++ b/mysql-test/suite/plugins/t/max_password_errors_auth_socket.test
@@ -0,0 +1,23 @@
+# Tests that max_password_errors has no effect on login errors with
+# passwordless plugins (Unix version / auth_unix_socket)
+
+--source include/not_embedded.inc
+--source include/have_unix_socket.inc
+
+if (`SELECT '$USER' = 'nosuchuser'`) {
+ skip USER is nosuchuser;
+}
+set @old_max_password_errors=@@max_password_errors;
+create user nosuchuser identified with 'unix_socket';
+
+set global max_password_errors=1;
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_NO_PASSWORD_ERROR;
+connect(pipe_con,localhost,nosuchuser);
+--replace_result $MASTER_MYSOCK MASTER_SOCKET $MASTER_MYPORT MASTER_PORT
+error ER_ACCESS_DENIED_NO_PASSWORD_ERROR;
+connect(pipe_con,localhost,nosuchuser);
+
+DROP USER nosuchuser;
+FLUSH PRIVILEGES;
+set global max_password_errors=@old_max_password_errors;
diff --git a/mysql-test/suite/plugins/t/multiauth.test b/mysql-test/suite/plugins/t/multiauth.test
new file mode 100644
index 00000000000..ffd4851c20d
--- /dev/null
+++ b/mysql-test/suite/plugins/t/multiauth.test
@@ -0,0 +1,196 @@
+#
+# MDEV-11340 Allow multiple alternative authentication methods for the same user
+#
+--source include/have_unix_socket.inc
+if (`SELECT '$USER' = 'mysqltest1'`) {
+ skip USER is mysqltest1;
+}
+if (!$AUTH_ED25519_SO) {
+ skip No auth_ed25519 plugin;
+}
+
+--let $plugindir=`SELECT @@global.plugin_dir`
+install soname 'auth_ed25519';
+
+--let $try_auth=$MYSQL_TEST < $MYSQLTEST_VARDIR/tmp/peercred_test.txt 2>&1
+
+--write_file $MYSQLTEST_VARDIR/tmp/peercred_test.txt
+--let $replace1=$USER@localhost
+--let $replace2=$USER@%
+--replace_result $replace1 "USER@localhost" $replace2 "USER@%"
+select user(), current_user(), database();
+EOF
+
+--let $creplace=create user $USER
+--let $dreplace=drop user $USER
+
+#
+# socket,password
+#
+--replace_result $creplace "create user USER"
+eval $creplace identified via unix_socket OR mysql_native_password as password("GOOD");
+create user mysqltest1 identified via unix_socket OR mysql_native_password as password("good");
+show create user mysqltest1;
+--echo # name match = ok
+--exec $try_auth -u $USER
+--echo # name does not match, password good = ok
+--exec $try_auth -u mysqltest1 -pgood
+--echo # name does not match, password bad = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pbad
+--replace_result $dreplace "drop user USER"
+eval $dreplace, mysqltest1;
+
+#
+# password,socket
+#
+--replace_result $creplace "create user USER"
+eval $creplace identified via mysql_native_password as password("GOOD") OR unix_socket;
+create user mysqltest1 identified via mysql_native_password as password("good") OR unix_socket;
+show create user mysqltest1;
+--echo # name match = ok
+--exec $try_auth -u $USER
+--echo # name does not match, password good = ok
+--exec $try_auth -u mysqltest1 -pgood
+--echo # name does not match, password bad = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pbad
+--replace_result $dreplace "drop user USER"
+eval $dreplace, mysqltest1;
+
+#
+# socket,ed25519
+#
+--replace_result $creplace "create user USER"
+eval $creplace identified via unix_socket OR ed25519 as password("GOOD");
+create user mysqltest1 identified via unix_socket OR ed25519 as password("good");
+show create user mysqltest1;
+--echo # name match = ok
+--exec $try_auth -u $USER
+--echo # name does not match, password good = ok
+--exec $try_auth -u mysqltest1 -pgood
+--echo # name does not match, password bad = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pbad
+--replace_result $dreplace "drop user USER"
+eval $dreplace, mysqltest1;
+
+#
+# ed25519,socket
+#
+--replace_result $creplace "create user USER"
+eval $creplace identified via ed25519 as password("GOOD") OR unix_socket;
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket;
+show create user mysqltest1;
+--echo # name match = ok
+--exec $try_auth -u $USER
+--echo # name does not match, password good = ok
+--exec $try_auth -u mysqltest1 -pgood
+--echo # name does not match, password bad = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pbad
+--replace_result $dreplace "drop user USER"
+eval $dreplace, mysqltest1;
+
+#
+# ed25519,socket,password
+#
+--replace_result $creplace "create user USER"
+eval $creplace identified via ed25519 as password("GOOD") OR unix_socket OR mysql_native_password as password("works");
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket OR mysql_native_password as password("works");
+show create user mysqltest1;
+--echo # name match = ok
+--exec $try_auth -u $USER
+--echo # name does not match, password good = ok
+--exec $try_auth -u mysqltest1 -pgood
+--echo # name does not match, second password works = ok
+--exec $try_auth -u mysqltest1 -pworks
+--echo # name does not match, password bad = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pbad
+--replace_result $dreplace "drop user USER"
+eval $dreplace, mysqltest1;
+
+#
+# password,password
+#
+create user mysqltest1 identified via mysql_native_password as password("good") OR mysql_native_password as password("works");
+show create user mysqltest1;
+--echo # password good = ok
+--exec $try_auth -u mysqltest1 -pgood
+--echo # second password works = ok
+--exec $try_auth -u mysqltest1 -pworks
+--echo # password bad = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pbad
+drop user mysqltest1;
+
+#
+# show grants, flush privileges, set password, alter user
+#
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket OR mysql_native_password as password("works");
+show grants for mysqltest1;
+--replace_regex /password_last_changed": [0-9]*/password_last_changed": #/
+select json_detailed(priv) from mysql.global_priv where user='mysqltest1';
+select password,plugin,authentication_string from mysql.user where user='mysqltest1';
+flush privileges;
+show create user mysqltest1;
+set password for mysqltest1 = password('foobar');
+show create user mysqltest1;
+alter user mysqltest1 identified via unix_socket OR mysql_native_password as password("some");
+show create user mysqltest1;
+set password for mysqltest1 = password('foobar');
+show create user mysqltest1;
+alter user mysqltest1 identified via unix_socket;
+--error ER_SET_PASSWORD_AUTH_PLUGIN
+set password for mysqltest1 = password('bla');
+alter user mysqltest1 identified via mysql_native_password as password("some") or unix_socket;
+show create user mysqltest1;
+drop user mysqltest1;
+
+--source include/switch_to_mysql_user.inc
+--replace_regex /\d{6}/XX.YY.ZZ/
+--error ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
+create user mysqltest1 identified via ed25519 as password("good") OR unix_socket OR mysql_native_password as password("works");
+--source include/switch_to_mysql_global_priv.inc
+
+#
+# invalid password,socket
+#
+--replace_result $creplace "create user USER"
+eval $creplace identified via mysql_native_password as '1234567890123456789012345678901234567890a' OR unix_socket;
+create user mysqltest1 identified via mysql_native_password as '1234567890123456789012345678901234567890a' OR unix_socket;
+update mysql.global_priv set priv=replace(priv, '1234567890123456789012345678901234567890a', 'invalid password');
+flush privileges;
+show create user mysqltest1;
+--echo # name match = ok
+--exec $try_auth -u $USER
+--echo # name does not match = failure
+--error 1
+--exec $try_auth -u mysqltest1
+--echo # SET PASSWORD helps
+set password for mysqltest1 = password('bla');
+--exec $try_auth -u mysqltest1 -pbla
+--replace_result $dreplace "drop user USER"
+eval $dreplace, mysqltest1;
+
+#
+# missing client-side plugin
+#
+create user mysqltest1 identified via ed25519 as password("good");
+show create user mysqltest1;
+--echo # no plugin = failure
+--replace_result $plugindir <PLUGINDIR>
+--error 1
+--exec $try_auth -u mysqltest1 -pgood --plugin-dir=$plugindir/no
+alter user mysqltest1 identified via ed25519 as password("good") OR mysql_native_password as password("works");
+show create user mysqltest1;
+--echo # no plugin = failure
+--error 1
+--exec $try_auth -u mysqltest1 -pgood --plugin-dir=$plugindir/no
+--echo # no plugin, second password works = ok
+--exec $try_auth -u mysqltest1 -pworks --plugin-dir=$plugindir/no
+drop user mysqltest1;
+
+uninstall soname 'auth_ed25519';
+--remove_file $MYSQLTEST_VARDIR/tmp/peercred_test.txt
diff --git a/mysql-test/suite/plugins/t/pam.test b/mysql-test/suite/plugins/t/pam.test
index 8a95d6baed2..6bb282f68c0 100644
--- a/mysql-test/suite/plugins/t/pam.test
+++ b/mysql-test/suite/plugins/t/pam.test
@@ -1,4 +1,4 @@
-
+let $PAM_PLUGIN_VERSION= $AUTH_PAM_SO;
--source pam_init.inc
--write_file $MYSQLTEST_VARDIR/tmp/pam_good.txt
@@ -13,6 +13,12 @@ not very secret challenge
select user(), current_user(), database();
EOF
+--write_file $MYSQLTEST_VARDIR/tmp/pam_ugly.txt
+crash pam module
+666
+select user(), current_user(), database();
+EOF
+
--echo #
--echo # athentication is successful, challenge/pin are ok
--echo # note that current_user() differs from user()
@@ -25,6 +31,12 @@ EOF
--error 1
--exec $MYSQL_TEST -u test_pam --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/pam_bad.txt
+--echo #
+--echo # athentication is unsuccessful
+--echo #
+--error 1
+--exec $MYSQL_TEST -u test_pam --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/pam_ugly.txt
+
--remove_file $MYSQLTEST_VARDIR/tmp/pam_good.txt
--remove_file $MYSQLTEST_VARDIR/tmp/pam_bad.txt
drop user test_pam;
diff --git a/mysql-test/suite/plugins/t/pam_cleartext.test b/mysql-test/suite/plugins/t/pam_cleartext.test
index 29ed7430649..5d137e6b416 100644
--- a/mysql-test/suite/plugins/t/pam_cleartext.test
+++ b/mysql-test/suite/plugins/t/pam_cleartext.test
@@ -1,4 +1,4 @@
-
+let $PAM_PLUGIN_VERSION= $AUTH_PAM_SO;
--source pam_init.inc
show variables like 'pam_use_%';
diff --git a/mysql-test/suite/plugins/t/pam_init.inc b/mysql-test/suite/plugins/t/pam_init.inc
index 281666d51a6..131b787f6b9 100644
--- a/mysql-test/suite/plugins/t/pam_init.inc
+++ b/mysql-test/suite/plugins/t/pam_init.inc
@@ -1,11 +1,11 @@
--source include/not_embedded.inc
-if (!$AUTH_PAM_SO) {
+if (!$PAM_PLUGIN_VERSION) {
skip No pam auth plugin;
}
-eval install plugin pam soname '$AUTH_PAM_SO';
+eval install plugin pam soname '$PAM_PLUGIN_VERSION';
create user test_pam identified via pam using 'mariadb_mtr';
create user pam_test;
grant proxy on pam_test to test_pam;
diff --git a/mysql-test/suite/plugins/t/pam_v1.test b/mysql-test/suite/plugins/t/pam_v1.test
new file mode 100644
index 00000000000..d908e3a4b25
--- /dev/null
+++ b/mysql-test/suite/plugins/t/pam_v1.test
@@ -0,0 +1,34 @@
+let $PAM_PLUGIN_VERSION= $AUTH_PAM_V1_SO;
+--source pam_init.inc
+
+--write_file $MYSQLTEST_VARDIR/tmp/pam_good.txt
+not very secret challenge
+9225
+select user(), current_user(), database();
+EOF
+
+--write_file $MYSQLTEST_VARDIR/tmp/pam_bad.txt
+not very secret challenge
+9224
+select user(), current_user(), database();
+EOF
+
+--echo #
+--echo # athentication is successful, challenge/pin are ok
+--echo # note that current_user() differs from user()
+--echo #
+--exec $MYSQL_TEST -u test_pam --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/pam_good.txt
+
+--echo #
+--echo # athentication is unsuccessful
+--echo #
+--error 1
+--exec $MYSQL_TEST -u test_pam --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/pam_bad.txt
+
+--remove_file $MYSQLTEST_VARDIR/tmp/pam_good.txt
+--remove_file $MYSQLTEST_VARDIR/tmp/pam_bad.txt
+drop user test_pam;
+drop user pam_test;
+let $count_sessions= 1;
+--source include/wait_until_count_sessions.inc
+uninstall plugin pam;
diff --git a/mysql-test/suite/plugins/t/simple_password_check.test b/mysql-test/suite/plugins/t/simple_password_check.test
index 4965ee492d2..b7d631ab4bb 100644
--- a/mysql-test/suite/plugins/t/simple_password_check.test
+++ b/mysql-test/suite/plugins/t/simple_password_check.test
@@ -83,11 +83,13 @@ create user foo2 identified with mysql_native_password using '111111111111111111
grant select on *.* to foo2 identified with mysql_old_password using '2222222222222222';
--error ER_NOT_VALID_PASSWORD
create user foo2 identified with mysql_native_password using '';
---error ER_PASSWORD_NO_MATCH
+--error ER_NOT_VALID_PASSWORD
+grant select on *.* to foo2 identified with mysql_old_password using '';
+--error ER_NOT_VALID_PASSWORD
grant select on *.* to foo2 identified with mysql_old_password;
# direct updates are not protected
-update mysql.user set password='xxx' where user='foo1';
+update mysql.global_priv set priv=json_set(priv, '$.authentication_string', 'xxx') where user='foo1';
set global strict_password_validation=0;
@@ -113,6 +115,8 @@ drop user foo1;
create role r1;
drop role r1;
+flush privileges;
+
uninstall plugin simple_password_check;
create user foo1 identified by 'pwd';
diff --git a/mysql-test/suite/plugins/t/unix_socket.test b/mysql-test/suite/plugins/t/unix_socket.test
index bd0323c0274..9bb56aae290 100644
--- a/mysql-test/suite/plugins/t/unix_socket.test
+++ b/mysql-test/suite/plugins/t/unix_socket.test
@@ -1,13 +1,5 @@
--source include/have_unix_socket.inc
-if (!$USER) {
- skip USER variable is undefined;
-}
-
-let $plugindir=`SELECT @@global.plugin_dir`;
-
-eval install plugin unix_socket soname '$AUTH_SOCKET_SO';
-
--echo #
--echo # with named user
--echo #
@@ -26,13 +18,13 @@ EOF
--echo #
--echo # name match = ok
--echo #
---exec $MYSQL_TEST -u $USER --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
+--exec $MYSQL_TEST -u $USER < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
--echo #
--echo # name does not match = failure
--echo #
--error 1
---exec $MYSQL_TEST -u foobar --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
+--exec $MYSQL_TEST -u foobar < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
--let $replace=drop user $USER
--replace_result $replace "drop user USER"
@@ -45,16 +37,15 @@ grant SELECT ON test.* TO '' identified via unix_socket;
--echo #
--echo # name match = ok
--echo #
---exec $MYSQL_TEST -u $USER --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
+--exec $MYSQL_TEST -u $USER < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
--echo #
--echo # name does not match = failure
--echo #
--error 1
---exec $MYSQL_TEST -u foobar --plugin-dir=$plugindir < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
+--exec $MYSQL_TEST -u foobar < $MYSQLTEST_VARDIR/tmp/peercred_test.txt
# restoring mysql.user to the original state.
delete from mysql.user where user='';
FLUSH PRIVILEGES;
-uninstall plugin unix_socket;
--remove_file $MYSQLTEST_VARDIR/tmp/peercred_test.txt
diff --git a/mysql-test/suite/roles/create_and_drop_role.result b/mysql-test/suite/roles/create_and_drop_role.result
index a163ee82f42..21aecdb9bc0 100644
--- a/mysql-test/suite/roles/create_and_drop_role.result
+++ b/mysql-test/suite/roles/create_and_drop_role.result
@@ -7,7 +7,7 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
create role test_role1;
create role test_role2, test_role3;
select user, host, is_role from user where user like 'test%';
-user host is_role
+User Host is_role
test_role1 Y
test_role2 Y
test_role3 Y
@@ -19,7 +19,7 @@ ERROR HY000: Operation CREATE ROLE failed for 'test_role1'
create role test_role1, test_role2;
ERROR HY000: Operation CREATE ROLE failed for 'test_role1'
select user, host, is_role from user where user like 'test%';
-user host is_role
+User Host is_role
test_role1 Y
test_role2 Y
drop role test_role1;
@@ -34,7 +34,7 @@ drop role dummy;
ERROR HY000: Operation DROP ROLE failed for 'dummy'
drop user dummy@'';
select user, host, is_role from user where user like 'test%';
-user host is_role
+User Host is_role
disconnect mysql;
connection default;
create role '';
diff --git a/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.result b/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.result
index afe00ed7729..5902ae0e16c 100644
--- a/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.result
+++ b/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.result
@@ -15,7 +15,6 @@ create role test_role;
create user test_user@localhost;
grant test_role to test_user@localhost;
set default role test_role for root@localhost;
-ERROR HY000: Column count of mysql.user is wrong. Expected 46, found 45. Created with MariaDB MYSQL_VERSION_ID, now running MYSQL_VERSION_ID. Please use mysql_upgrade to fix this error
drop role test_role;
drop user test_user@localhost;
alter table user add column default_role char(80) binary default '' not null
diff --git a/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test b/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test
index ebd75c34ca1..9cf0d7b4aff 100644
--- a/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test
+++ b/mysql-test/suite/roles/create_and_drop_role_invalid_user_table.test
@@ -3,6 +3,7 @@
# that don't have 'default_role' column
#
source include/not_embedded.inc;
+source include/switch_to_mysql_user.inc;
connect (mysql, localhost, root,,);
use mysql;
@@ -29,8 +30,8 @@ after password_expired;
create role test_role;
create user test_user@localhost;
grant test_role to test_user@localhost;
---replace_regex /10\d\d\d\d/MYSQL_VERSION_ID/
---error ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
+#--replace_regex /10\d\d\d\d/MYSQL_VERSION_ID/
+#--error ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
set default role test_role for root@localhost;
drop role test_role;
drop user test_user@localhost;
@@ -46,3 +47,5 @@ update user set is_role='N';
flush privileges;
create role test_role;
drop role test_role;
+
+source include/switch_to_mysql_global_priv.inc;
diff --git a/mysql-test/suite/roles/default_create_user_not_role.result b/mysql-test/suite/roles/default_create_user_not_role.result
index 171015f6e82..3f32329b80c 100644
--- a/mysql-test/suite/roles/default_create_user_not_role.result
+++ b/mysql-test/suite/roles/default_create_user_not_role.result
@@ -2,7 +2,7 @@ connect mysql, localhost, root,,;
use mysql;
create user 'test'@'localhost';
select user, host, is_role from user where user='test' and host='localhost';
-user host is_role
+User Host is_role
test localhost N
drop user 'test'@'localhost';
disconnect mysql;
diff --git a/mysql-test/suite/roles/flush_roles-17898.result b/mysql-test/suite/roles/flush_roles-17898.result
index ae8fb0a27d2..d880b1fed60 100644
--- a/mysql-test/suite/roles/flush_roles-17898.result
+++ b/mysql-test/suite/roles/flush_roles-17898.result
@@ -1,12 +1,8 @@
use mysql;
insert db (db,user,select_priv) values ('foo','dwr_foo','Y'), ('bar','dwr_bar','Y');
insert roles_mapping (user,role) values ('dwr_qux_dev','dwr_foo'),('dwr_qux_dev','dwr_bar');
-insert ignore user (user,show_db_priv,is_role) values ('dwr_foo','N','Y'), ('dwr_bar','N','Y'), ('dwr_qux_dev','Y','Y');
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
+insert global_priv values ('','dwr_foo','{"is_role":true}'), ('','dwr_bar','{"is_role":true}'),
+('','dwr_qux_dev','{"access":16384,"is_role":true}');
flush privileges;
drop role dwr_foo;
drop role dwr_bar;
diff --git a/mysql-test/suite/roles/flush_roles-17898.test b/mysql-test/suite/roles/flush_roles-17898.test
index 22568196c30..8dc55a87a01 100644
--- a/mysql-test/suite/roles/flush_roles-17898.test
+++ b/mysql-test/suite/roles/flush_roles-17898.test
@@ -5,7 +5,8 @@ source include/not_embedded.inc;
use mysql;
insert db (db,user,select_priv) values ('foo','dwr_foo','Y'), ('bar','dwr_bar','Y');
insert roles_mapping (user,role) values ('dwr_qux_dev','dwr_foo'),('dwr_qux_dev','dwr_bar');
-insert ignore user (user,show_db_priv,is_role) values ('dwr_foo','N','Y'), ('dwr_bar','N','Y'), ('dwr_qux_dev','Y','Y');
+insert global_priv values ('','dwr_foo','{"is_role":true}'), ('','dwr_bar','{"is_role":true}'),
+ ('','dwr_qux_dev','{"access":16384,"is_role":true}');
flush privileges;
drop role dwr_foo;
drop role dwr_bar;
diff --git a/mysql-test/suite/roles/grant_revoke_current.result b/mysql-test/suite/roles/grant_revoke_current.result
index 436bec92a8f..681c0857edb 100644
--- a/mysql-test/suite/roles/grant_revoke_current.result
+++ b/mysql-test/suite/roles/grant_revoke_current.result
@@ -1,3 +1,4 @@
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
grant select on *.* to current_role;
ERROR 0L000: Invalid definer
revoke select on *.* from current_role;
@@ -39,5 +40,5 @@ GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY PASSWORD '*34391
GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION
GRANT USAGE ON *.* TO 'r1'
set password='';
-update mysql.user set plugin='';
drop role r1;
+update mysql.global_priv set priv=@root_priv;
diff --git a/mysql-test/suite/roles/grant_revoke_current.test b/mysql-test/suite/roles/grant_revoke_current.test
index bffc04087b1..65a0809ac9c 100644
--- a/mysql-test/suite/roles/grant_revoke_current.test
+++ b/mysql-test/suite/roles/grant_revoke_current.test
@@ -1,4 +1,5 @@
--source include/not_embedded.inc
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
--error ER_MALFORMED_DEFINER
grant select on *.* to current_role;
@@ -25,7 +26,7 @@ show grants;
grant r1 to current_user() identified by 'barfoo';
show grants;
set password='';
-#cleanup after MDEV-16238
-update mysql.user set plugin='';
+#cleanup
drop role r1;
+update mysql.global_priv set priv=@root_priv;
diff --git a/mysql-test/suite/roles/grant_role_auto_create_user.result b/mysql-test/suite/roles/grant_role_auto_create_user.result
index 3f6139e84d0..61ce0359c0d 100644
--- a/mysql-test/suite/roles/grant_role_auto_create_user.result
+++ b/mysql-test/suite/roles/grant_role_auto_create_user.result
@@ -23,7 +23,7 @@ grant auto_create to foo@localhost;
ERROR 28000: Can't find any matching row in the user table
grant auto_create to bar@localhost identified by 'baz';
select user, host from mysql.user where user = 'bar';
-user host
+User Host
bar localhost
set sql_mode = '';
connect con1,localhost,bar,baz,;
diff --git a/mysql-test/suite/roles/i_s_applicable_roles_is_default.result b/mysql-test/suite/roles/i_s_applicable_roles_is_default.result
index 63127f8b176..ee7d17f3a1f 100644
--- a/mysql-test/suite/roles/i_s_applicable_roles_is_default.result
+++ b/mysql-test/suite/roles/i_s_applicable_roles_is_default.result
@@ -78,3 +78,4 @@ drop role role3;
drop role role2;
drop role role1;
drop user foo;
+update mysql.global_priv set priv=json_compact(json_remove(priv, '$.default_role'));
diff --git a/mysql-test/suite/roles/i_s_applicable_roles_is_default.test b/mysql-test/suite/roles/i_s_applicable_roles_is_default.test
index 59ba1f8bf75..0e6436924a9 100644
--- a/mysql-test/suite/roles/i_s_applicable_roles_is_default.test
+++ b/mysql-test/suite/roles/i_s_applicable_roles_is_default.test
@@ -51,7 +51,6 @@ set default role role3;
--sorted_result
select * from information_schema.applicable_roles;
-
set default role none;
--sorted_result
select * from information_schema.applicable_roles;
@@ -60,3 +59,4 @@ drop role role3;
drop role role2;
drop role role1;
drop user foo;
+update mysql.global_priv set priv=json_compact(json_remove(priv, '$.default_role'));
diff --git a/mysql-test/suite/roles/none_public.result b/mysql-test/suite/roles/none_public.result
index 5dd1480e8c3..c253ae1478b 100644
--- a/mysql-test/suite/roles/none_public.result
+++ b/mysql-test/suite/roles/none_public.result
@@ -40,17 +40,9 @@ ERROR OP000: Invalid role specification `none`
create definer=public view test.v1 as select 1;
ERROR OP000: Invalid role specification `public`
drop role role1;
-optimize table mysql.user;
-Table Op Msg_type Msg_text
-mysql.user optimize status OK
-insert ignore mysql.user (user, is_role) values ('none', 'Y'), ('public', 'Y');
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
+insert mysql.global_priv values ('', 'none', '{"is_role":true}'), ('', 'public', '{"is_role":true}');
flush privileges;
Warnings:
Error 1959 Invalid role specification `none`
Error 1959 Invalid role specification `public`
-delete from mysql.user where is_role='Y';
+delete from mysql.global_priv where host='';
diff --git a/mysql-test/suite/roles/none_public.test b/mysql-test/suite/roles/none_public.test
index 838a4955df5..a0ec2315cfc 100644
--- a/mysql-test/suite/roles/none_public.test
+++ b/mysql-test/suite/roles/none_public.test
@@ -50,8 +50,6 @@ create definer=public view test.v1 as select 1;
drop role role1;
-optimize table mysql.user; # to remove deleted rows and have stable row order
-insert ignore mysql.user (user, is_role) values ('none', 'Y'), ('public', 'Y');
+insert mysql.global_priv values ('', 'none', '{"is_role":true}'), ('', 'public', '{"is_role":true}');
flush privileges;
-delete from mysql.user where is_role='Y';
-
+delete from mysql.global_priv where host='';
diff --git a/mysql-test/suite/roles/prepare_stmt_with_role.result b/mysql-test/suite/roles/prepare_stmt_with_role.result
index 0352502c35c..758dca735e1 100644
--- a/mysql-test/suite/roles/prepare_stmt_with_role.result
+++ b/mysql-test/suite/roles/prepare_stmt_with_role.result
@@ -13,7 +13,7 @@ EXECUTE stmtCreateRole;
#
SELECT user, host,is_role FROM mysql.user
WHERE user = 'developers';
-user host is_role
+User Host is_role
developers Y
SHOW GRANTS;
Grants for root@localhost
@@ -73,7 +73,7 @@ EXECUTE stmtDropRole;
#
SELECT user, host,is_role FROM mysql.user
WHERE user = 'developers';
-user host is_role
+User Host is_role
SELECT * FROM mysql.roles_mapping;
Host User Role Admin_option
SHOW GRANTS;
@@ -89,7 +89,7 @@ GRANT USAGE ON *.* TO 'test_user'@'%'
EXECUTE stmtCreateRole;
SELECT user, host,is_role FROM mysql.user
WHERE user = 'developers';
-user host is_role
+User Host is_role
developers Y
SELECT * FROM mysql.roles_mapping;
Host User Role Admin_option
diff --git a/mysql-test/suite/roles/rename_user.result b/mysql-test/suite/roles/rename_user.result
index 987d90a5820..9550e15953a 100644
--- a/mysql-test/suite/roles/rename_user.result
+++ b/mysql-test/suite/roles/rename_user.result
@@ -12,7 +12,7 @@ localhost root test_role2 Y
localhost test_user test_role1 N
rename user 'test_user'@'localhost' to 'test_user_rm'@'newhost';
select user, host from user where user like 'test%';
-user host
+User Host
test_role1
test_role2
test_user_rm newhost
diff --git a/mysql-test/suite/roles/role_case_sensitive-10744.result b/mysql-test/suite/roles/role_case_sensitive-10744.result
index b898310e83c..baec3c5f2a1 100644
--- a/mysql-test/suite/roles/role_case_sensitive-10744.result
+++ b/mysql-test/suite/roles/role_case_sensitive-10744.result
@@ -11,7 +11,7 @@ create role test_role;
# Test if mysql.user has the roles created.
#
select user, host from mysql.user where is_role='y' and user like 'test%';
-user host
+User Host
test_ROLE
test_role
create database secret_db;
diff --git a/mysql-test/suite/roles/set_default_role_clear.result b/mysql-test/suite/roles/set_default_role_clear.result
index 7f54b5eabcc..70628059f65 100644
--- a/mysql-test/suite/roles/set_default_role_clear.result
+++ b/mysql-test/suite/roles/set_default_role_clear.result
@@ -10,7 +10,7 @@ set default role test_role;
select user, host, default_role from mysql.user;
ERROR 42000: SELECT command denied to user 'test_user'@'localhost' for table 'user'
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
show grants;
Grants for test_user@localhost
@@ -18,16 +18,16 @@ GRANT test_role TO 'test_user'@'localhost'
GRANT USAGE ON *.* TO 'test_user'@'localhost'
GRANT SELECT ON *.* TO 'test_role'
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
set default role NONE;
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost
set default role invalid_role;
ERROR OP000: Invalid role specification `invalid_role`
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost
select user, host, default_role from mysql.user;
ERROR 42000: SELECT command denied to user 'test_user'@'localhost' for table 'user'
diff --git a/mysql-test/suite/roles/set_default_role_for.result b/mysql-test/suite/roles/set_default_role_for.result
index 9880671ba09..706ba3fba7b 100644
--- a/mysql-test/suite/roles/set_default_role_for.result
+++ b/mysql-test/suite/roles/set_default_role_for.result
@@ -22,17 +22,17 @@ GRANT role_a TO 'user_a'@'localhost'
GRANT USAGE ON *.* TO 'user_a'@'localhost'
GRANT SELECT ON *.* TO 'role_a'
select user, host, default_role from mysql.user where user like 'user_%';
-user host default_role
+User Host default_role
user_a localhost role_a
user_b localhost role_b
set default role NONE for current_user;
select user, host, default_role from mysql.user where user like 'user_%';
-user host default_role
+User Host default_role
user_a localhost
user_b localhost role_b
set default role current_role for current_user;
select user, host, default_role from mysql.user where user like 'user_%';
-user host default_role
+User Host default_role
user_a localhost role_a
user_b localhost role_b
set default role role_b for current_user;
@@ -44,12 +44,6 @@ GRANT USAGE ON *.* TO 'user_b'@'localhost'
GRANT INSERT, UPDATE ON *.* TO 'role_b'
select user, host, default_role from mysql.user where user like 'user_%';
ERROR 42000: SELECT command denied to user 'user_b'@'localhost' for table 'user'
-insert ignore into mysql.user (user, host) values ('someuser', 'somehost');
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
set default role NONE for user_a@localhost;
show grants;
Grants for user_a@localhost
@@ -60,6 +54,5 @@ select user, host, default_role from mysql.user where user like 'user_%';
ERROR 42000: SELECT command denied to user 'user_a'@'localhost' for table 'user'
drop role role_a;
drop role role_b;
-delete from mysql.user where user = 'someuser' && host = 'somehost';
drop user user_a@localhost;
drop user user_b@localhost;
diff --git a/mysql-test/suite/roles/set_default_role_for.test b/mysql-test/suite/roles/set_default_role_for.test
index 985eaa16e3d..eff999a522b 100644
--- a/mysql-test/suite/roles/set_default_role_for.test
+++ b/mysql-test/suite/roles/set_default_role_for.test
@@ -65,8 +65,6 @@ show grants;
--error ER_TABLEACCESS_DENIED_ERROR
select user, host, default_role from mysql.user where user like 'user_%';
-# Make sure the default role setting worked from root.
-insert ignore into mysql.user (user, host) values ('someuser', 'somehost');
# Since we have update privileges on the mysql.user table, we should
# be able to set a default role for a different user.
set default role NONE for user_a@localhost;
@@ -82,6 +80,5 @@ change_user 'root';
drop role role_a;
drop role role_b;
-delete from mysql.user where user = 'someuser' && host = 'somehost';
drop user user_a@localhost;
drop user user_b@localhost;
diff --git a/mysql-test/suite/roles/set_default_role_invalid.result b/mysql-test/suite/roles/set_default_role_invalid.result
index 5bcaa9acb86..77c317c6a02 100644
--- a/mysql-test/suite/roles/set_default_role_invalid.result
+++ b/mysql-test/suite/roles/set_default_role_invalid.result
@@ -17,7 +17,7 @@ set default role test_role;
select user, host, default_role from mysql.user;
ERROR 42000: SELECT command denied to user 'test_user'@'localhost' for table 'user'
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
show grants;
Grants for test_user@localhost
@@ -25,12 +25,12 @@ GRANT test_role TO 'test_user'@'localhost'
GRANT USAGE ON *.* TO 'test_user'@'localhost'
GRANT SELECT ON *.* TO 'test_role'
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
set default role invalid_role;
ERROR OP000: Invalid role specification `invalid_role`
select user, host, default_role from mysql.user where user='test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
revoke test_role from test_user@localhost;
select user, host, default_role from mysql.user where user='test_user';
diff --git a/mysql-test/suite/roles/set_default_role_new_connection.result b/mysql-test/suite/roles/set_default_role_new_connection.result
index a59ecbd75f7..8590d33e16d 100644
--- a/mysql-test/suite/roles/set_default_role_new_connection.result
+++ b/mysql-test/suite/roles/set_default_role_new_connection.result
@@ -15,7 +15,7 @@ ERROR 42000: SELECT command denied to user 'test_user'@'localhost' for table 'us
disconnect c1;
connection default;
select user, host, default_role from mysql.user where user = 'test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
connect c1, localhost, test_user,,;
show grants;
@@ -24,13 +24,13 @@ GRANT test_role TO 'test_user'@'localhost'
GRANT USAGE ON *.* TO 'test_user'@'localhost'
GRANT SELECT ON *.* TO 'test_role'
select user, host, default_role from mysql.user where user = 'test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
set default role NONE;
disconnect c1;
connection default;
select user, host, default_role from mysql.user where user = 'test_user';
-user host default_role
+User Host default_role
test_user localhost
connect c1, localhost, test_user,,;
show grants;
@@ -42,7 +42,7 @@ ERROR 42000: SELECT command denied to user 'test_user'@'localhost' for table 'us
disconnect c1;
connection default;
select user, host, default_role from mysql.user where user = 'test_user';
-user host default_role
+User Host default_role
test_user localhost
set default role test_role for test_user@localhost;
connect c1, localhost, test_user,,;
@@ -52,7 +52,7 @@ GRANT test_role TO 'test_user'@'localhost'
GRANT USAGE ON *.* TO 'test_user'@'localhost'
GRANT SELECT ON *.* TO 'test_role'
select user, host, default_role from mysql.user where user = 'test_user';
-user host default_role
+User Host default_role
test_user localhost test_role
disconnect c1;
connection default;
diff --git a/mysql-test/suite/roles/set_default_role_ps-6960.result b/mysql-test/suite/roles/set_default_role_ps-6960.result
index c186e7bccb0..505861e89df 100644
--- a/mysql-test/suite/roles/set_default_role_ps-6960.result
+++ b/mysql-test/suite/roles/set_default_role_ps-6960.result
@@ -1,3 +1,4 @@
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
create role r1;
prepare stmt from "set password = '11111111111111111111111111111111111111111'";
execute stmt;
@@ -6,4 +7,4 @@ execute stmt;
set password = '';
set default role NONE;
drop role r1;
-update mysql.user set plugin='';
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
diff --git a/mysql-test/suite/roles/set_default_role_ps-6960.test b/mysql-test/suite/roles/set_default_role_ps-6960.test
index 8af95c9e8a0..fd965c2aa75 100644
--- a/mysql-test/suite/roles/set_default_role_ps-6960.test
+++ b/mysql-test/suite/roles/set_default_role_ps-6960.test
@@ -4,6 +4,8 @@
--source include/not_embedded.inc
+select priv into @root_priv from mysql.global_priv where user='root' and host='localhost';
+
create role r1;
prepare stmt from "set password = '11111111111111111111111111111111111111111'";
execute stmt;
@@ -13,5 +15,6 @@ execute stmt;
set password = '';
set default role NONE;
drop role r1;
-#cleanup after MDEV-16238
-update mysql.user set plugin='';
+
+#cleanup
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
diff --git a/mysql-test/suite/roles/set_role-database-recursive.result b/mysql-test/suite/roles/set_role-database-recursive.result
index 479e553c3d1..2c8c21d97c3 100644
--- a/mysql-test/suite/roles/set_role-database-recursive.result
+++ b/mysql-test/suite/roles/set_role-database-recursive.result
@@ -5,7 +5,7 @@ grant test_role1 to test_user@localhost;
grant test_role2 to test_user@localhost;
grant test_role2 to test_role1;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_role2
test_user localhost
diff --git a/mysql-test/suite/roles/set_role-database-simple.result b/mysql-test/suite/roles/set_role-database-simple.result
index e21a55edf2e..156a4453b69 100644
--- a/mysql-test/suite/roles/set_role-database-simple.result
+++ b/mysql-test/suite/roles/set_role-database-simple.result
@@ -2,7 +2,7 @@ create user 'test_user'@'localhost';
create role test_role1;
grant test_role1 to test_user@localhost;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_user localhost
select * from mysql.roles_mapping;
diff --git a/mysql-test/suite/roles/set_role-recursive.result b/mysql-test/suite/roles/set_role-recursive.result
index 2b34c3eeebe..0b37b1a3fc8 100644
--- a/mysql-test/suite/roles/set_role-recursive.result
+++ b/mysql-test/suite/roles/set_role-recursive.result
@@ -4,7 +4,7 @@ grant test_role1 to test_user@localhost;
create role test_role2;
grant test_role2 to test_role1;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_role2
test_user localhost
diff --git a/mysql-test/suite/roles/set_role-routine-simple.result b/mysql-test/suite/roles/set_role-routine-simple.result
index 3e17a78ad77..b86bf3045f3 100644
--- a/mysql-test/suite/roles/set_role-routine-simple.result
+++ b/mysql-test/suite/roles/set_role-routine-simple.result
@@ -6,7 +6,7 @@ grant test_role1 to test_user@localhost;
grant test_role3 to test_user@localhost;
grant test_role2 to test_role1;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_role2
test_role3
diff --git a/mysql-test/suite/roles/set_role-simple.result b/mysql-test/suite/roles/set_role-simple.result
index 9af698c7b09..dec5a0b4436 100644
--- a/mysql-test/suite/roles/set_role-simple.result
+++ b/mysql-test/suite/roles/set_role-simple.result
@@ -2,7 +2,7 @@ create user test_user@localhost;
create role test_role1;
grant test_role1 to test_user@localhost;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_user localhost
select * from mysql.roles_mapping;
diff --git a/mysql-test/suite/roles/set_role-table-column-priv.result b/mysql-test/suite/roles/set_role-table-column-priv.result
index 721bd3039a3..57c6ce01c9f 100644
--- a/mysql-test/suite/roles/set_role-table-column-priv.result
+++ b/mysql-test/suite/roles/set_role-table-column-priv.result
@@ -4,7 +4,7 @@ create role test_role2;
grant test_role1 to test_user@localhost;
grant test_role2 to test_role1;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_role2
test_user localhost
diff --git a/mysql-test/suite/roles/set_role-table-simple.result b/mysql-test/suite/roles/set_role-table-simple.result
index f5688dbe62e..3ecc66ba7f0 100644
--- a/mysql-test/suite/roles/set_role-table-simple.result
+++ b/mysql-test/suite/roles/set_role-table-simple.result
@@ -4,7 +4,7 @@ create role test_role2;
grant test_role1 to test_user@localhost;
grant test_role2 to test_role1;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_role2
test_user localhost
diff --git a/mysql-test/suite/roles/show_grants.result b/mysql-test/suite/roles/show_grants.result
index 5d46b038cf8..31df0113b8e 100644
--- a/mysql-test/suite/roles/show_grants.result
+++ b/mysql-test/suite/roles/show_grants.result
@@ -5,7 +5,7 @@ grant test_role1 to test_user@localhost;
grant test_role2 to test_user@localhost;
grant test_role2 to test_role1;
select user, host from mysql.user where user not like 'root';
-user host
+User Host
test_role1
test_role2
test_user localhost
diff --git a/mysql-test/suite/roles/show_grants_replicated.result b/mysql-test/suite/roles/show_grants_replicated.result
index cb9df65dbbd..7b090c982f0 100644
--- a/mysql-test/suite/roles/show_grants_replicated.result
+++ b/mysql-test/suite/roles/show_grants_replicated.result
@@ -17,7 +17,7 @@ connection slave;
# it's visible in mysql.user and I_S:
#
select user, host, is_role from mysql.user where user in ('u1', 'r1');
-user host is_role
+User Host is_role
r1 Y
u1 % N
select * from information_schema.applicable_roles;
diff --git a/mysql-test/suite/rpl/disabled.def b/mysql-test/suite/rpl/disabled.def
index d4617398c64..9f43bc3c339 100644
--- a/mysql-test/suite/rpl/disabled.def
+++ b/mysql-test/suite/rpl/disabled.def
@@ -19,3 +19,4 @@ rpl_row_mysqlbinlog : MDEV-11095
rpl_row_index_choice : MDEV-11666
rpl_parallel2 : fails after MDEV-16172
rpl_semi_sync_after_sync : fails after MDEV-16172
+rpl_auto_increment_update_failure : disabled for now
diff --git a/mysql-test/suite/rpl/include/rpl_EE_err.test b/mysql-test/suite/rpl/include/rpl_EE_err.test
index 0b3fec1f605..fa135d12436 100644
--- a/mysql-test/suite/rpl/include/rpl_EE_err.test
+++ b/mysql-test/suite/rpl/include/rpl_EE_err.test
@@ -15,7 +15,7 @@
-- source include/master-slave.inc
eval create table t1 (a int) engine=$engine_type;
-flush tables;
+flush tables t1;
let $MYSQLD_DATADIR= `select @@datadir`;
remove_file $MYSQLD_DATADIR/test/t1.MYI ;
drop table if exists t1;
diff --git a/mysql-test/suite/rpl/include/rpl_mixed_dml.inc b/mysql-test/suite/rpl/include/rpl_mixed_dml.inc
index 114cd53d244..bb1a2c173de 100644
--- a/mysql-test/suite/rpl/include/rpl_mixed_dml.inc
+++ b/mysql-test/suite/rpl/include/rpl_mixed_dml.inc
@@ -289,7 +289,6 @@ DROP TRIGGER tr1;
--echo
--echo
--echo ******************** EVENTS ********************
-GRANT EVENT ON *.* TO 'root'@'localhost';
INSERT INTO t1 VALUES(1, 'test1');
CREATE EVENT e1 ON SCHEDULE EVERY '1' SECOND COMMENT 'e_second_comment' DO DELETE FROM t1;
--source suite/rpl/include/rpl_mixed_check_event.inc
diff --git a/mysql-test/suite/rpl/include/rpl_row_001.test b/mysql-test/suite/rpl/include/rpl_row_001.test
deleted file mode 100644
index 4df2d793244..00000000000
--- a/mysql-test/suite/rpl/include/rpl_row_001.test
+++ /dev/null
@@ -1,96 +0,0 @@
-let $LOAD_FILE= $MYSQLTEST_VARDIR/std_data/words.dat;
-CREATE TABLE t1 (word CHAR(20) NOT NULL);
---replace_result $LOAD_FILE LOAD_FILE
-eval LOAD DATA INFILE '$LOAD_FILE' INTO TABLE t1;
---replace_result $LOAD_FILE LOAD_FILE
-eval LOAD DATA INFILE '$LOAD_FILE' INTO TABLE t1;
-SELECT * FROM t1 ORDER BY word LIMIT 10;
-
-#
-# Save password row for root
-#
-
-create temporary table tmp select * from mysql.user where host="localhost" and user="root";
-
-#
-# Test slave with wrong password
-#
-
-save_master_pos;
-connection slave;
-sync_with_master;
-STOP SLAVE;
-connection master;
-UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
-connection slave;
-START SLAVE;
-connection master;
-#
-# Give slave time to do at last one failed connect retry
-# This one must be short so that the slave will not stop retrying
-real_sleep 2;
-UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
-# Give slave time to connect (will retry every second)
-
-sleep 2;
-
-CREATE TABLE t3(n INT);
-INSERT INTO t3 VALUES(1),(2);
-sync_slave_with_master;
-SELECT * FROM t3 ORDER BY n;
-SELECT SUM(LENGTH(word)) FROM t1;
-connection master;
-DROP TABLE t1,t3;
-save_master_pos;
-connection slave;
-sync_with_master;
-
-# Test if the slave SQL thread can be more than 16K behind the slave
-# I/O thread (> IO_SIZE)
-
-connection master;
-# we'll use table-level locking to delay slave SQL thread
-eval CREATE TABLE t1 (n INT) ENGINE=$engine_type;
-sync_slave_with_master;
-connection master;
-RESET MASTER;
-connection slave;
-STOP SLAVE;
-RESET SLAVE;
-
-connection master;
-let $1=5000;
-# Generate 16K of relay log
-disable_query_log;
-while ($1)
-{
- eval INSERT INTO t1 VALUES($1);
- dec $1;
-}
-enable_query_log;
-SELECT COUNT(*) FROM t1;
-save_master_pos;
-
-# Try to cause a large relay log lag on the slave by locking t1
-connection slave;
-LOCK TABLES t1 READ;
-START SLAVE;
-UNLOCK TABLES;
-sync_with_master;
-SELECT COUNT(*) FROM t1;
-
-connection master;
-DROP TABLE t1;
-CREATE TABLE t1 (n INT);
-INSERT INTO t1 VALUES(3456);
-sync_slave_with_master;
-SELECT n FROM t1;
-
-connection master;
-DROP TABLE t1;
-
-# resttore old passwords
-replace into mysql.user select * from tmp;
-drop temporary table tmp;
-
-sync_slave_with_master;
diff --git a/mysql-test/suite/rpl/include/rpl_row_annotate.test b/mysql-test/suite/rpl/include/rpl_row_annotate.test
index 317a9c86539..8b4b704cef9 100644
--- a/mysql-test/suite/rpl/include/rpl_row_annotate.test
+++ b/mysql-test/suite/rpl/include/rpl_row_annotate.test
@@ -147,7 +147,7 @@ let $start_pos= `select @binlog_start_pos`;
connection master;
SET SESSION binlog_annotate_row_events = ON;
INSERT DELAYED INTO test1.t4 VALUES (1,1);
-FLUSH TABLES;
+FLUSH TABLES test1.t4;
SELECT * FROM test1.t4 ORDER BY a;
sync_slave_with_master;
diff --git a/mysql-test/suite/rpl/include/rpl_row_delayed_ins.test b/mysql-test/suite/rpl/include/rpl_row_delayed_ins.test
index bad308ff814..03c7b5282a8 100644
--- a/mysql-test/suite/rpl/include/rpl_row_delayed_ins.test
+++ b/mysql-test/suite/rpl/include/rpl_row_delayed_ins.test
@@ -10,7 +10,7 @@ eval create table t1(a int not null primary key) engine=$engine_type;
insert delayed into t1 values (1);
insert delayed into t1 values (2);
insert delayed into t1 values (3);
-flush tables;
+flush tables t1;
SELECT * FROM t1 ORDER BY a;
sync_slave_with_master;
diff --git a/mysql-test/suite/rpl/include/rpl_shutdown_wait_slaves.inc b/mysql-test/suite/rpl/include/rpl_shutdown_wait_slaves.inc
new file mode 100644
index 00000000000..17720e94dc8
--- /dev/null
+++ b/mysql-test/suite/rpl/include/rpl_shutdown_wait_slaves.inc
@@ -0,0 +1,91 @@
+--connection server_1
+
+CREATE TABLE t1 (a INT) ENGINE=innodb;
+
+--save_master_pos
+
+--connection server_2
+--sync_with_master
+
+--connection server_3
+--sync_with_master
+
+--connection server_4
+--source include/stop_slave.inc
+
+--connection server_1
+--disable_query_log
+--let $count=1000
+while ($count)
+{
+ INSERT INTO t1 SET a=1;
+ --dec $count
+}
+--enable_query_log
+--save_master_pos
+
+# Shutdown master and restart server_4 who will be waiting for the master
+# to start replication at its shutdown beginning phase.
+# The being forked out server_4 dump thread must relate to a record
+# in slave_list, and it won't start sending out binlog events
+# until has received a signal from the shutdown thread.
+# This also proves delivery to a started-in-middle-of-shutdown slave.
+--connection server_1
+SET @@GLOBAL.debug_dbug="+d,simulate_delay_at_shutdown";
+
+--connection server_4
+--source include/start_slave.inc
+
+--connection server_1
+--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+wait
+EOF
+# --shutdown_server 60
+--send SHUTDOWN WAIT FOR ALL SLAVES
+--reap
+--source include/wait_until_disconnected.inc
+#
+# MDEV-18450 liveness condition:
+# Despite shutdown even "late" slave #4 is in sync
+#
+--connection server_4
+--sync_with_master
+
+--connection server_3
+--sync_with_master
+
+--connection server_2
+--sync_with_master
+
+--connection server_1
+--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
+restart
+EOF
+
+--connection default
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+--connection server_1
+--enable_reconnect
+--source include/wait_until_connected_again.inc
+
+#
+# Cleanup
+#
+--connection server_1
+DROP TABLE t1;
+
+--connection server_2
+--disable_warnings
+--source include/start_slave.inc
+--enable_warnings
+
+--connection server_3
+--disable_warnings
+--source include/start_slave.inc
+--enable_warnings
+
+--connection server_4
+--disable_warnings
+--source include/start_slave.inc
+--enable_warnings
diff --git a/mysql-test/suite/rpl/r/password_expiration.result b/mysql-test/suite/rpl/r/password_expiration.result
new file mode 100644
index 00000000000..13fc11a3b96
--- /dev/null
+++ b/mysql-test/suite/rpl/r/password_expiration.result
@@ -0,0 +1,28 @@
+include/master-slave.inc
+[connection master]
+connection slave;
+include/stop_slave.inc
+connection master;
+create user 'repl_user' password expire;
+grant replication slave on *.* to repl_user;
+flush privileges;
+set global disconnect_on_expired_password=ON;
+connection slave;
+CHANGE MASTER TO MASTER_USER= 'repl_user';
+START SLAVE;
+include/wait_for_slave_io_error.inc [errno=1862]
+include/stop_slave_sql.inc
+RESET SLAVE;
+connection master;
+set global disconnect_on_expired_password=OFF;
+connection slave;
+START SLAVE;
+include/wait_for_slave_io_error.inc [errno=1820]
+connection master;
+DROP USER 'repl_user';
+set global disconnect_on_expired_password=default;
+connection slave;
+include/stop_slave_sql.inc
+CHANGE MASTER TO MASTER_USER='root';
+RESET SLAVE;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_EE_err.result b/mysql-test/suite/rpl/r/rpl_EE_err.result
index 1f605935005..0b0ee84229f 100644
--- a/mysql-test/suite/rpl/r/rpl_EE_err.result
+++ b/mysql-test/suite/rpl/r/rpl_EE_err.result
@@ -1,7 +1,7 @@
include/master-slave.inc
[connection master]
create table t1 (a int) engine=myisam;
-flush tables;
+flush tables t1;
drop table if exists t1;
Warnings:
Warning 1017 Can't find file: './test/t1.MYI' (errno: 2 "No such file or directory")
diff --git a/mysql-test/suite/rpl/r/rpl_create_drop_user.result b/mysql-test/suite/rpl/r/rpl_create_drop_user.result
index f8cc271e8cf..61b351b50df 100644
--- a/mysql-test/suite/rpl/r/rpl_create_drop_user.result
+++ b/mysql-test/suite/rpl/r/rpl_create_drop_user.result
@@ -17,15 +17,15 @@ CURRENT_USER
u2@localhost
disconnect user_a;
connection master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
-user password
-u1 *D9553C4CE316A9845CE49E30A2D7E3857AF966C4
-u2
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
+User Password plugin authentication_string
+u1 *D9553C4CE316A9845CE49E30A2D7E3857AF966C4 mysql_native_password *D9553C4CE316A9845CE49E30A2D7E3857AF966C4
+u2 mysql_native_password
connection slave;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
-user password
-u1 *D9553C4CE316A9845CE49E30A2D7E3857AF966C4
-u2
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
+User Password plugin authentication_string
+u1 *D9553C4CE316A9845CE49E30A2D7E3857AF966C4 mysql_native_password *D9553C4CE316A9845CE49E30A2D7E3857AF966C4
+u2 mysql_native_password
connection master;
CREATE OR REPLACE USER u1@localhost IDENTIFIED BY 'abcdefghijk2';
connect user_a, localhost, u1,'abcdefghijk2',;
@@ -35,25 +35,25 @@ CURRENT_USER
u1@localhost
disconnect user_a;
connection master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
-user password
-u1 *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E
-u2
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
+User Password plugin authentication_string
+u1 *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E mysql_native_password *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E
+u2 mysql_native_password
connection slave;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
-user password
-u1 *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E
-u2
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
+User Password plugin authentication_string
+u1 *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E mysql_native_password *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E
+u2 mysql_native_password
connection master;
CREATE USER u1@localhost;
ERROR HY000: Operation CREATE USER failed for 'u1'@'localhost'
DROP USER u3@localhost;
ERROR HY000: Operation DROP USER failed for 'u3'@'localhost'
connection slave;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
-user password
-u1 *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E
-u2
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
+User Password plugin authentication_string
+u1 *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E mysql_native_password *A9A5EF53CE2EFAA6F4A746D63A917B2370971A7E
+u2 mysql_native_password
connection master;
DROP USER IF EXISTS u1@localhost;
DROP USER u2@localhost;
@@ -61,6 +61,6 @@ DROP USER IF EXISTS u3@localhost;
Warnings:
Note 1974 Can't drop user 'u3'@'localhost'; it doesn't exist
connection slave;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
-user password
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
+User Password plugin authentication_string
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_create_if_not_exists.result b/mysql-test/suite/rpl/r/rpl_create_if_not_exists.result
index d74fd07189c..b31eacfc236 100644
--- a/mysql-test/suite/rpl/r/rpl_create_if_not_exists.result
+++ b/mysql-test/suite/rpl/r/rpl_create_if_not_exists.result
@@ -25,8 +25,6 @@ connection slave;
connection slave;
SHOW TABLES in mysqltest;
Tables_in_mysqltest
-t
-t1
SHOW EVENTS in mysqltest;
Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
mysqltest e root@localhost SYSTEM ONE TIME # NULL NULL NULL NULL SLAVESIDE_DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
diff --git a/mysql-test/suite/rpl/r/rpl_ddl.result b/mysql-test/suite/rpl/r/rpl_ddl.result
index 68c5e91f42e..22ce9a288c1 100644
--- a/mysql-test/suite/rpl/r/rpl_ddl.result
+++ b/mysql-test/suite/rpl/r/rpl_ddl.result
@@ -1354,11 +1354,11 @@ MAX(f1)
TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
connection master;
SELECT user FROM mysql.user WHERE user = 'user1';
-user
+User
user1
connection slave;
SELECT user FROM mysql.user WHERE user = 'user1';
-user
+User
user1
connection master;
@@ -1399,11 +1399,11 @@ MAX(f1)
TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
connection master;
SELECT user FROM mysql.user WHERE user = 'rename1';
-user
+User
rename1
connection slave;
SELECT user FROM mysql.user WHERE user = 'rename1';
-user
+User
rename1
connection master;
@@ -1444,10 +1444,10 @@ MAX(f1)
TEST-INFO: SLAVE: The INSERT is committed (Succeeded)
connection master;
SELECT user FROM mysql.user WHERE user = 'rename1';
-user
+User
connection slave;
SELECT user FROM mysql.user WHERE user = 'rename1';
-user
+User
use test;
connection master;
DROP TEMPORARY TABLE mysqltest1.t22;
diff --git a/mysql-test/suite/rpl/r/rpl_do_grant.result b/mysql-test/suite/rpl/r/rpl_do_grant.result
index 9eca21b38e4..5fa1002f9ac 100644
--- a/mysql-test/suite/rpl/r/rpl_do_grant.result
+++ b/mysql-test/suite/rpl/r/rpl_do_grant.result
@@ -1,14 +1,6 @@
include/master-slave.inc
[connection master]
connection master;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-connection slave;
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-connection master;
create user rpl_do_grant@localhost;
grant select on *.* to rpl_do_grant@localhost;
grant drop on test.* to rpl_do_grant@localhost;
@@ -20,11 +12,11 @@ GRANT DROP ON `test`.* TO 'rpl_do_grant'@'localhost'
connection master;
set password for rpl_do_grant@localhost=password("does it work?");
connection slave;
-select authentication_string<>_binary'' from mysql.user where user=_binary'rpl_do_grant';
-authentication_string<>_binary''
+select authentication_string<>'' from mysql.user where user='rpl_do_grant';
+authentication_string<>''
1
connection master;
-update mysql.user set authentication_string='' where user='rpl_do_grant';
+update mysql.global_priv set priv=json_remove(priv, '$.authentication_string') where user='rpl_do_grant';
flush privileges;
select authentication_string<>'' from mysql.user where user='rpl_do_grant';
authentication_string<>''
@@ -37,13 +29,8 @@ select authentication_string<>'' from mysql.user where user='rpl_do_grant';
authentication_string<>''
1
connection master;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
+drop user rpl_do_grant@localhost;
connection slave;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
connection master;
show grants for rpl_do_grant@localhost;
ERROR 42000: There is no such grant defined for user 'rpl_do_grant' on host 'localhost'
@@ -328,4 +315,6 @@ Grantor
root@localhost
connection master;
DROP USER user_bug27606@localhost;
+select priv into @root_priv from mysql.global_priv where user='root' and host='127.0.0.1';
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_drop_db.result b/mysql-test/suite/rpl/r/rpl_drop_db.result
index 1b132c20afc..3712527afe4 100644
--- a/mysql-test/suite/rpl/r/rpl_drop_db.result
+++ b/mysql-test/suite/rpl/r/rpl_drop_db.result
@@ -6,6 +6,8 @@ create database mysqltest1;
create table mysqltest1.t1 (n int);
insert into mysqltest1.t1 values (1);
select * from mysqltest1.t1 into outfile 'mysqltest1/f1.txt';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create table mysqltest1.t2 (n int);
create table mysqltest1.t3 (n int);
drop database mysqltest1;
diff --git a/mysql-test/suite/rpl/r/rpl_grant.result b/mysql-test/suite/rpl/r/rpl_grant.result
index 0f546f7edc4..274a8505fb8 100644
--- a/mysql-test/suite/rpl/r/rpl_grant.result
+++ b/mysql-test/suite/rpl/r/rpl_grant.result
@@ -4,7 +4,7 @@ connection master;
CREATE USER dummy@localhost;
CREATE USER dummy1@localhost, dummy2@localhost;
SELECT user, host FROM mysql.user WHERE user like 'dummy%';
-user host
+User Host
dummy localhost
dummy1 localhost
dummy2 localhost
@@ -14,7 +14,7 @@ COUNT(*)
connection slave;
**** On Slave ****
SELECT user,host FROM mysql.user WHERE user like 'dummy%';
-user host
+User Host
dummy localhost
dummy1 localhost
dummy2 localhost
@@ -28,13 +28,13 @@ DROP USER nonexisting@localhost, dummy@localhost;
ERROR HY000: Operation DROP USER failed for 'nonexisting'@'localhost'
DROP USER dummy1@localhost, dummy2@localhost;
SELECT user, host FROM mysql.user WHERE user like 'dummy%';
-user host
+User Host
SELECT COUNT(*) FROM mysql.user WHERE user like 'dummy%';
COUNT(*)
0
connection slave;
SELECT user,host FROM mysql.user WHERE user like 'dummy%';
-user host
+User Host
SELECT COUNT(*) FROM mysql.user WHERE user like 'dummy%';
COUNT(*)
0
diff --git a/mysql-test/suite/rpl/r/rpl_gtid_ignored.result b/mysql-test/suite/rpl/r/rpl_gtid_ignored.result
index ac608c3c2a3..de4a815ab60 100644
--- a/mysql-test/suite/rpl/r/rpl_gtid_ignored.result
+++ b/mysql-test/suite/rpl/r/rpl_gtid_ignored.result
@@ -79,7 +79,7 @@ a
9
connection server_1;
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET GLOBAL gtid_strict_mode= @old_gtid_strict_mode;
SET debug_sync = "reset";
connection server_2;
diff --git a/mysql-test/suite/rpl/r/rpl_gtid_mdev4484.result b/mysql-test/suite/rpl/r/rpl_gtid_mdev4484.result
index aaeb0c8f119..5dffdd9809c 100644
--- a/mysql-test/suite/rpl/r/rpl_gtid_mdev4484.result
+++ b/mysql-test/suite/rpl/r/rpl_gtid_mdev4484.result
@@ -3,7 +3,7 @@ include/master-slave.inc
connection slave;
include/stop_slave.inc
SET sql_log_bin=0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET sql_log_bin=1;
include/start_slave.inc
connection master;
@@ -16,36 +16,32 @@ INSERT INTO t1 VALUES (1);
connection slave;
connection slave;
include/stop_slave.inc
+SET @old_gtid_cleanup_batch_size= @@GLOBAL.gtid_cleanup_batch_size;
+SET GLOBAL gtid_cleanup_batch_size= 2;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug="+d,gtid_slave_pos_simulate_failed_delete";
SET sql_log_bin= 0;
-CALL mtr.add_suppression("Can't find file");
+CALL mtr.add_suppression("<DEBUG> Error deleting old GTID row");
SET sql_log_bin= 1;
include/start_slave.inc
connection master;
-INSERT INTO t1 VALUES (2);
-connection slave;
-include/wait_for_slave_sql_error.inc [errno=1942]
-STOP SLAVE IO_THREAD;
-SELECT domain_id, server_id, seq_no FROM mysql.gtid_slave_pos
-ORDER BY domain_id, sub_id DESC LIMIT 1;
-domain_id server_id seq_no
-0 1 3
+connection slave;
+SELECT COUNT(*), MAX(seq_no) INTO @pre_count, @pre_max_seq_no
+FROM mysql.gtid_slave_pos;
+SELECT IF(@pre_count >= 20, "OK", CONCAT("Error: too few rows seen while errors injected: ", @pre_count));
+IF(@pre_count >= 20, "OK", CONCAT("Error: too few rows seen while errors injected: ", @pre_count))
+OK
SET GLOBAL debug_dbug= @old_dbug;
-include/start_slave.inc
connection master;
-INSERT INTO t1 VALUES (3);
-connection slave;
-connection slave;
-SELECT domain_id, server_id, seq_no FROM mysql.gtid_slave_pos
-ORDER BY domain_id, sub_id DESC LIMIT 1;
-domain_id server_id seq_no
-0 1 4
-SELECT * FROM t1 ORDER BY i;
-i
-1
-2
-3
+connection slave;
+connection slave;
+SELECT IF(COUNT(*) >= 1, "OK", CONCAT("Error: too few rows seen after errors no longer injected: ", COUNT(*)))
+FROM mysql.gtid_slave_pos
+WHERE seq_no <= @pre_max_seq_no;
+IF(COUNT(*) >= 1, "OK", CONCAT("Error: too few rows seen after errors no longer injected: ", COUNT(*)))
+OK
connection master;
DROP TABLE t1;
+connection slave;
+SET GLOBAL gtid_cleanup_batch_size= @old_gtid_cleanup_batch_size;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result b/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result
index ff845794c22..50f24d56e9a 100644
--- a/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result
+++ b/mysql-test/suite/rpl/r/rpl_gtid_stop_start.result
@@ -159,7 +159,7 @@ a
8
9
SET sql_log_bin= 0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET sql_log_bin= 1;
include/start_slave.inc
connection server_1;
@@ -171,7 +171,7 @@ include/start_slave.inc
*** MDEV-4692: mysql.gtid_slave_pos accumulates values for a domain ***
SELECT domain_id, COUNT(*) FROM mysql.gtid_slave_pos GROUP BY domain_id;
domain_id COUNT(*)
-0 2
+0 3
1 2
connection server_1;
INSERT INTO t1 VALUES (11);
@@ -179,7 +179,7 @@ connection server_2;
FLUSH NO_WRITE_TO_BINLOG TABLES;
SELECT domain_id, COUNT(*) FROM mysql.gtid_slave_pos GROUP BY domain_id;
domain_id COUNT(*)
-0 2
+0 4
1 2
include/start_slave.inc
connection server_1;
@@ -189,8 +189,8 @@ connection server_2;
FLUSH NO_WRITE_TO_BINLOG TABLES;
SELECT domain_id, COUNT(*) FROM mysql.gtid_slave_pos GROUP BY domain_id;
domain_id COUNT(*)
-0 2
-1 2
+0 3
+1 1
*** MDEV-4650: show variables; ERROR 1946 (HY000): Failed to load replication slave GTID position ***
connection server_2;
SET sql_log_bin=0;
diff --git a/mysql-test/suite/rpl/r/rpl_ignore_revoke.result b/mysql-test/suite/rpl/r/rpl_ignore_revoke.result
index c86f2f4e4df..cc65d9dacfd 100644
--- a/mysql-test/suite/rpl/r/rpl_ignore_revoke.result
+++ b/mysql-test/suite/rpl/r/rpl_ignore_revoke.result
@@ -4,26 +4,26 @@ connection master;
grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
revoke select on *.* from 'user_foo'@'%';
select select_priv from mysql.user where user='user_foo' /* master:must be N */;
-select_priv
+Select_priv
N
connection slave;
grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
revoke select on *.* from 'user_foo'@'%';
select select_priv from mysql.user where user='user_foo' /* slave:must be N */;
-select_priv
+Select_priv
N
grant select on *.* to 'user_foo'@'%' identified by 'user_foopass';
select select_priv from mysql.user where user='user_foo' /* slave:must be Y */;
-select_priv
+Select_priv
Y
connection master;
revoke select on *.* from 'user_foo';
select select_priv from mysql.user where user='user_foo' /* master:must be N */;
-select_priv
+Select_priv
N
connection slave;
select select_priv from mysql.user where user='user_foo' /* slave:must get Y */;
-select_priv
+Select_priv
Y
connection slave;
revoke select on *.* FROM 'user_foo';
diff --git a/mysql-test/suite/rpl/r/rpl_ignore_table.result b/mysql-test/suite/rpl/r/rpl_ignore_table.result
index 4eeb333d10c..511eff51d22 100644
--- a/mysql-test/suite/rpl/r/rpl_ignore_table.result
+++ b/mysql-test/suite/rpl/r/rpl_ignore_table.result
@@ -1,7 +1,7 @@
include/master-slave.inc
[connection master]
call mtr.add_suppression("Can't find record in 't.'");
-call mtr.add_suppression("Can't find record in 'user'");
+call mtr.add_suppression("Can't find record in 'global_priv'");
call mtr.add_suppression("Can't find record in 'tables_priv'");
**** Test case for BUG#16487 ****
connection master;
@@ -32,12 +32,7 @@ to mysqltest3@localhost;
create database mysqltest2;
create table mysqltest2.t2 (id int);
GRANT SELECT ON mysqltest2.t2 TO mysqltest4@localhost IDENTIFIED BY 'pass';
-insert into mysql.user (user, host) values ("mysqltest5", "somehost");
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
+insert into mysql.global_priv (user, host) values ("mysqltest5", "somehost");
GRANT SELECT ON *.* TO mysqltest6@localhost;
GRANT INSERT ON *.* TO mysqltest6@localhost;
GRANT INSERT ON test.* TO mysqltest6@localhost;
diff --git a/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result b/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result
index a666ae26c51..69215ecb545 100644
--- a/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result
+++ b/mysql-test/suite/rpl/r/rpl_innodb_bug28430.result
@@ -51,6 +51,10 @@ DELETE FROM test.regular_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE test.proc_bykey()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -72,6 +76,10 @@ DELETE FROM test.bykey_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE test.proc_byrange()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -93,6 +101,10 @@ DELETE FROM test.byrange_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
begin;
CALL test.proc_norm();
commit;
diff --git a/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result b/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result
index d4640a36a7d..da6888e76a0 100644
--- a/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result
+++ b/mysql-test/suite/rpl/r/rpl_innodb_bug30888.result
@@ -26,6 +26,10 @@ DELETE FROM test.regular_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL test.proc_norm();
connection slave;
connection master;
diff --git a/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result b/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result
index 00b50df4a68..89f59deae73 100644
--- a/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result
+++ b/mysql-test/suite/rpl/r/rpl_innodb_mixed_dml.result
@@ -481,77 +481,78 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
******************** CREATE USER ********************
CREATE USER 'user_test_rpl'@'localhost' IDENTIFIED BY PASSWORD '*1111111111111111111111111111111111111111';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection master;
******************** GRANT ********************
GRANT SELECT ON *.* TO 'user_test_rpl'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 Y
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 Y
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 Y
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 Y
connection master;
******************** REVOKE ********************
REVOKE SELECT ON *.* FROM 'user_test_rpl'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection master;
******************** SET PASSWORD ********************
SET PASSWORD FOR 'user_test_rpl'@'localhost' = '*0000000000000000000000000000000000000000';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection master;
******************** RENAME USER ********************
RENAME USER 'user_test_rpl'@'localhost' TO 'user_test_rpl_2'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl_2 *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl_2 *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection master;
******************** DROP USER ********************
DROP USER 'user_test_rpl_2'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
connection master;
INSERT INTO t1 VALUES(100, 'test');
******************** ANALYZE ********************
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test_rpl.t1 analyze status Engine-independent statistics collected
test_rpl.t1 analyze status OK
******************** CHECK TABLE ********************
@@ -676,7 +677,6 @@ DROP TRIGGER tr1;
******************** EVENTS ********************
-GRANT EVENT ON *.* TO 'root'@'localhost';
INSERT INTO t1 VALUES(1, 'test1');
CREATE EVENT e1 ON SCHEDULE EVERY '1' SECOND COMMENT 'e_second_comment' DO DELETE FROM t1;
SHOW EVENTS;
@@ -1098,8 +1098,6 @@ master-bin.000001 # Query # # use `test_rpl`; DELETE FROM t2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test_rpl`; DROP TRIGGER tr1
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test_rpl`; GRANT EVENT ON *.* TO 'root'@'localhost'
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test_rpl`; INSERT INTO t1 VALUES(1, 'test1')
master-bin.000001 # Xid # # COMMIT /* XID */
diff --git a/mysql-test/suite/rpl/r/rpl_mdev10863.result b/mysql-test/suite/rpl/r/rpl_mdev10863.result
index 6accd1ee830..0326316563c 100644
--- a/mysql-test/suite/rpl/r/rpl_mdev10863.result
+++ b/mysql-test/suite/rpl/r/rpl_mdev10863.result
@@ -46,6 +46,6 @@ SET GLOBAL slave_parallel_threads=@old_parallel_threads;
SET GLOBAL max_relay_log_size= @old_max_relay;
include/start_slave.inc
connection server_1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
DROP TABLE t1;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_mdev12179.result b/mysql-test/suite/rpl/r/rpl_mdev12179.result
index 8373eb43774..dcda036cdfb 100644
--- a/mysql-test/suite/rpl/r/rpl_mdev12179.result
+++ b/mysql-test/suite/rpl/r/rpl_mdev12179.result
@@ -49,7 +49,7 @@ a
1
include/stop_slave.inc
SET sql_log_bin=0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
CREATE TABLE mysql.gtid_slave_pos_innodb LIKE mysql.gtid_slave_pos;
ALTER TABLE mysql.gtid_slave_pos_innodb ENGINE=InnoDB;
INSERT INTO mysql.gtid_slave_pos_innodb SELECT * FROM mysql.gtid_slave_pos;
@@ -77,7 +77,7 @@ SELECT table_name, engine FROM information_schema.tables
WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%'
ORDER BY table_name;
table_name engine
-gtid_slave_pos MyISAM
+gtid_slave_pos Aria
gtid_slave_pos_innodb InnoDB
SELECT @@gtid_pos_auto_engines;
@@gtid_pos_auto_engines
@@ -122,7 +122,7 @@ table_name engine
gtid_slave_pos InnoDB
include/stop_slave.inc
SET sql_log_bin=0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET sql_log_bin=1;
connection server_1;
INSERT INTO t1 VALUES (5);
@@ -157,7 +157,7 @@ SELECT lower(table_name), engine FROM information_schema.tables
WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%'
ORDER BY table_name;
lower(table_name) engine
-gtid_slave_pos MyISAM
+gtid_slave_pos Aria
gtid_slave_pos_innodb InnoDB
include/stop_slave.inc
SET sql_log_bin=0;
@@ -202,7 +202,7 @@ SELECT table_name, engine FROM information_schema.tables
WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%'
ORDER BY table_name;
table_name engine
-gtid_slave_pos MyISAM
+gtid_slave_pos Aria
SELECT domain_id, max(seq_no) FROM mysql.gtid_slave_pos GROUP BY domain_id;
domain_id max(seq_no)
0 11
@@ -250,7 +250,7 @@ SELECT lower(table_name), engine FROM information_schema.tables
WHERE table_schema='mysql' AND table_name LIKE 'gtid_slave_pos%'
ORDER BY table_name;
lower(table_name) engine
-gtid_slave_pos MyISAM
+gtid_slave_pos Aria
gtid_slave_pos_innodb InnoDB
SELECT domain_id, max(seq_no) FROM mysql.gtid_slave_pos GROUP BY domain_id;
domain_id max(seq_no)
@@ -259,6 +259,8 @@ connection server_2;
*** Restart the slave server to prove 'gtid_slave_pos_innodb' autodiscovery ***
connection server_2;
SELECT max(seq_no) FROM mysql.gtid_slave_pos_InnoDB into @seq_no;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connection server_1;
INSERT INTO t2(a) SELECT 1+MAX(a) FROM t2;
include/save_master_gtid.inc
diff --git a/mysql-test/suite/rpl/r/rpl_misc_functions.result b/mysql-test/suite/rpl/r/rpl_misc_functions.result
index 6c20623d62b..302cf2351c2 100644
--- a/mysql-test/suite/rpl/r/rpl_misc_functions.result
+++ b/mysql-test/suite/rpl/r/rpl_misc_functions.result
@@ -42,6 +42,8 @@ INSERT INTO t1 (col_a) VALUES (test_replication_sf());
INSERT INTO t1 (col_a) VALUES (test_replication_sf());
connection slave;
select * from t1 into outfile "../../tmp/t1_slave.txt";
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connection master;
create temporary table t1_slave select * from t1 where 1=0;
load data infile '../../tmp/t1_slave.txt' into table t1_slave;
diff --git a/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result
index f900a8b0e9a..e768c5c6f7c 100644
--- a/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result
+++ b/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result
@@ -30,6 +30,7 @@ test.tt_2 preload_keys note The storage engine for the table doesn't support pre
INSERT INTO tt_1(ddl_case) VALUES (39);
ANALYZE TABLE nt_1;
Table Op Msg_type Msg_text
+test.nt_1 analyze status Engine-independent statistics collected
test.nt_1 analyze status Table is already up to date
INSERT INTO tt_1(ddl_case) VALUES (38);
CHECK TABLE nt_1;
diff --git a/mysql-test/suite/rpl/r/rpl_mysql57_stm_temporal_round.result b/mysql-test/suite/rpl/r/rpl_mysql57_stm_temporal_round.result
new file mode 100644
index 00000000000..bedd103c2a0
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_mysql57_stm_temporal_round.result
@@ -0,0 +1,22 @@
+#
+# MDEV-8894 Inserting fractional seconds into MySQL 5.6 master breaks consistency on MariaDB 10 slave
+#
+include/master-slave.inc
+[connection master]
+connection slave;
+CREATE TABLE t1 (id SERIAL, a DATETIME(3));
+include/stop_slave.inc
+connection master;
+include/rpl_stop_server.inc [server_number=1]
+include/rpl_start_server.inc [server_number=1]
+connection slave;
+CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4;
+include/start_slave.inc
+connection master;
+connection slave;
+SELECT * FROM t1 ORDER BY id;
+id a
+1 2001-01-01 00:00:01.000
+include/stop_slave.inc
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_mysql80_stm_temporal_round.result b/mysql-test/suite/rpl/r/rpl_mysql80_stm_temporal_round.result
new file mode 100644
index 00000000000..23b3217895a
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_mysql80_stm_temporal_round.result
@@ -0,0 +1,23 @@
+#
+# MDEV-8894 Inserting fractional seconds into MySQL 5.6 master breaks consistency on MariaDB 10 slave
+#
+include/master-slave.inc
+[connection master]
+connection slave;
+CREATE TABLE t1 (id SERIAL, a DATETIME(3));
+include/stop_slave.inc
+connection master;
+include/rpl_stop_server.inc [server_number=1]
+include/rpl_start_server.inc [server_number=1]
+connection slave;
+CHANGE MASTER TO master_host='127.0.0.1', master_port=SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4;
+include/start_slave.inc
+connection master;
+connection slave;
+SELECT * FROM t1 ORDER BY id;
+id a
+1 2001-01-01 00:00:01.000
+2 2001-01-01 00:00:00.999
+include/stop_slave.inc
+DROP TABLE t1;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_parallel.result b/mysql-test/suite/rpl/r/rpl_parallel.result
index d994e4fdef6..9258deadaca 100644
--- a/mysql-test/suite/rpl/r/rpl_parallel.result
+++ b/mysql-test/suite/rpl/r/rpl_parallel.result
@@ -1517,6 +1517,7 @@ SET SESSION debug_dbug="+d,binlog_force_commit_id";
SET @commit_id= 10000;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
INSERT INTO t3 VALUES (120, 0);
SET @commit_id= 10001;
diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result
index ca202a66b0e..bf7a8192f56 100644
--- a/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result
+++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result
@@ -12,6 +12,8 @@ SET GLOBAL slave_parallel_threads=10;
CHANGE MASTER TO master_use_gtid=slave_pos;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET GLOBAL slave_parallel_mode='optimistic';
+SET @old_gtid_cleanup_batch_size= @@GLOBAL.gtid_cleanup_batch_size;
+SET GLOBAL gtid_cleanup_batch_size= 1000000;
connection server_1;
INSERT INTO t1 VALUES(1,1);
BEGIN;
@@ -131,6 +133,11 @@ c
204
205
206
+SELECT IF(COUNT(*) >= 30, "OK", CONCAT("Error: too few old rows found: ", COUNT(*)))
+FROM mysql.gtid_slave_pos;
+IF(COUNT(*) >= 30, "OK", CONCAT("Error: too few old rows found: ", COUNT(*)))
+OK
+SET GLOBAL gtid_cleanup_batch_size=1;
*** Test @@skip_parallel_replication. ***
connection server_2;
include/stop_slave.inc
@@ -358,6 +365,7 @@ connection server_1;
ALTER TABLE t2 COMMENT "123abc";
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
INSERT INTO t1 VALUES (1,2);
INSERT INTO t1 VALUES (2,2);
@@ -478,6 +486,7 @@ SET @old_server_id= @@SESSION.server_id;
SET SESSION server_id= 100;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
SET SESSION server_id= @old_server_id;
INSERT INTO t1 VALUES (37,0);
@@ -651,9 +660,10 @@ DROP TABLE t1, t2, t3;
include/save_master_gtid.inc
connection server_2;
include/sync_with_master_gtid.inc
-Check that no more than the expected last four GTIDs are in mysql.gtid_slave_pos
-select count(4) <= 4 from mysql.gtid_slave_pos order by domain_id, sub_id;
-count(4) <= 4
+SELECT COUNT(*) <= 5*@@GLOBAL.gtid_cleanup_batch_size
+FROM mysql.gtid_slave_pos;
+COUNT(*) <= 5*@@GLOBAL.gtid_cleanup_batch_size
1
+SET GLOBAL gtid_cleanup_batch_size= @old_gtid_cleanup_batch_size;
connection server_1;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_001.result b/mysql-test/suite/rpl/r/rpl_row_001.result
index f7684d5ad97..976ac0996bf 100644
--- a/mysql-test/suite/rpl/r/rpl_row_001.result
+++ b/mysql-test/suite/rpl/r/rpl_row_001.result
@@ -1,44 +1,6 @@
include/master-slave.inc
[connection master]
-CREATE TABLE t1 (word CHAR(20) NOT NULL);
-LOAD DATA INFILE 'LOAD_FILE' INTO TABLE t1;
-LOAD DATA INFILE 'LOAD_FILE' INTO TABLE t1;
-SELECT * FROM t1 ORDER BY word LIMIT 10;
-word
-Aarhus
-Aarhus
-Aarhus
-Aarhus
-Aaron
-Aaron
-Aaron
-Aaron
-Ababa
-Ababa
-create temporary table tmp select * from mysql.user where host="localhost" and user="root";
-connection slave;
-STOP SLAVE;
-connection master;
-UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
-connection slave;
-START SLAVE;
-connection master;
-UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
-CREATE TABLE t3(n INT);
-INSERT INTO t3 VALUES(1),(2);
-connection slave;
-SELECT * FROM t3 ORDER BY n;
-n
-1
-2
-SELECT SUM(LENGTH(word)) FROM t1;
-SUM(LENGTH(word))
-1022
-connection master;
-DROP TABLE t1,t3;
-connection slave;
-connection master;
-CREATE TABLE t1 (n INT) ENGINE=MYISAM;
+CREATE TABLE t1 (n INT);
connection slave;
connection master;
RESET MASTER;
@@ -66,7 +28,5 @@ n
3456
connection master;
DROP TABLE t1;
-replace into mysql.user select * from tmp;
-drop temporary table tmp;
connection slave;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_annotate_do.result b/mysql-test/suite/rpl/r/rpl_row_annotate_do.result
index 52f7b180fae..ba1922566bf 100644
--- a/mysql-test/suite/rpl/r/rpl_row_annotate_do.result
+++ b/mysql-test/suite/rpl/r/rpl_row_annotate_do.result
@@ -180,7 +180,7 @@ slave-bin.000001 # Rotate 2 # slave-bin.000002;pos=4
connection master;
SET SESSION binlog_annotate_row_events = ON;
INSERT DELAYED INTO test1.t4 VALUES (1,1);
-FLUSH TABLES;
+FLUSH TABLES test1.t4;
SELECT * FROM test1.t4 ORDER BY a;
a b
1 1
diff --git a/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result b/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result
index c657cf2fbb5..65535d5d417 100644
--- a/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result
+++ b/mysql-test/suite/rpl/r/rpl_row_annotate_dont.result
@@ -160,7 +160,7 @@ slave-bin.000001 # Rotate 2 # slave-bin.000002;pos=4
connection master;
SET SESSION binlog_annotate_row_events = ON;
INSERT DELAYED INTO test1.t4 VALUES (1,1);
-FLUSH TABLES;
+FLUSH TABLES test1.t4;
SELECT * FROM test1.t4 ORDER BY a;
a b
1 1
diff --git a/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff b/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff
index 0849c5dcf64..2ff34004702 100644
--- a/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff
+++ b/mysql-test/suite/rpl/r/rpl_row_big_table_id,32bit.rdiff
@@ -4,28 +4,28 @@
master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
master-bin.000002 # Annotate_rows 1 # INSERT INTO t SET a= 1
--master-bin.000002 # Table_map 1 # table_id: 4294967295 (test.t)
--master-bin.000002 # Write_rows_v1 1 # table_id: 4294967295 flags: STMT_END_F
-+master-bin.000002 # Table_map 1 # table_id: 1 (test.t)
-+master-bin.000002 # Write_rows_v1 1 # table_id: 1 flags: STMT_END_F
+-master-bin.000002 # Table_map 1 # table_id: 4294967298 (test.t)
+-master-bin.000002 # Write_rows_v1 1 # table_id: 4294967298 flags: STMT_END_F
++master-bin.000002 # Table_map 1 # table_id: 4294967294 (test.t)
++master-bin.000002 # Write_rows_v1 1 # table_id: 4294967294 flags: STMT_END_F
master-bin.000002 # Query 1 # COMMIT
master-bin.000002 # Gtid 1 # GTID #-#-#
master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
master-bin.000002 # Annotate_rows 1 # INSERT INTO t SET a= 2
--master-bin.000002 # Table_map 1 # table_id: 4294967296 (test.t)
--master-bin.000002 # Write_rows_v1 1 # table_id: 4294967296 flags: STMT_END_F
-+master-bin.000002 # Table_map 1 # table_id: 2 (test.t)
-+master-bin.000002 # Write_rows_v1 1 # table_id: 2 flags: STMT_END_F
+-master-bin.000002 # Table_map 1 # table_id: 4294967299 (test.t)
+-master-bin.000002 # Write_rows_v1 1 # table_id: 4294967299 flags: STMT_END_F
++master-bin.000002 # Table_map 1 # table_id: 1 (test.t)
++master-bin.000002 # Write_rows_v1 1 # table_id: 1 flags: STMT_END_F
master-bin.000002 # Query 1 # COMMIT
master-bin.000002 # Gtid 1 # GTID #-#-#
master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
master-bin.000002 # Annotate_rows 1 # INSERT INTO t SET a= 3
--master-bin.000002 # Table_map 1 # table_id: 4294967297 (test.t)
--master-bin.000002 # Write_rows_v1 1 # table_id: 4294967297 flags: STMT_END_F
-+master-bin.000002 # Table_map 1 # table_id: 3 (test.t)
-+master-bin.000002 # Write_rows_v1 1 # table_id: 3 flags: STMT_END_F
+-master-bin.000002 # Table_map 1 # table_id: 4294967300 (test.t)
+-master-bin.000002 # Write_rows_v1 1 # table_id: 4294967300 flags: STMT_END_F
++master-bin.000002 # Table_map 1 # table_id: 4294967294 (test.t)
++master-bin.000002 # Write_rows_v1 1 # table_id: 4294967294 flags: STMT_END_F
master-bin.000002 # Query 1 # COMMIT
connection slave;
connection master;
diff --git a/mysql-test/suite/rpl/r/rpl_row_big_table_id.result b/mysql-test/suite/rpl/r/rpl_row_big_table_id.result
index 6fece52dda3..7a0a964dc5e 100644
--- a/mysql-test/suite/rpl/r/rpl_row_big_table_id.result
+++ b/mysql-test/suite/rpl/r/rpl_row_big_table_id.result
@@ -22,22 +22,22 @@ master-bin.000002 # Gtid 1 # GTID #-#-#
master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
master-bin.000002 # Annotate_rows 1 # INSERT INTO t SET a= 1
-master-bin.000002 # Table_map 1 # table_id: 4294967295 (test.t)
-master-bin.000002 # Write_rows_v1 1 # table_id: 4294967295 flags: STMT_END_F
+master-bin.000002 # Table_map 1 # table_id: 4294967298 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967298 flags: STMT_END_F
master-bin.000002 # Query 1 # COMMIT
master-bin.000002 # Gtid 1 # GTID #-#-#
master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
master-bin.000002 # Annotate_rows 1 # INSERT INTO t SET a= 2
-master-bin.000002 # Table_map 1 # table_id: 4294967296 (test.t)
-master-bin.000002 # Write_rows_v1 1 # table_id: 4294967296 flags: STMT_END_F
+master-bin.000002 # Table_map 1 # table_id: 4294967299 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967299 flags: STMT_END_F
master-bin.000002 # Query 1 # COMMIT
master-bin.000002 # Gtid 1 # GTID #-#-#
master-bin.000002 # Query 1 # use `test`; ALTER TABLE t comment ''
master-bin.000002 # Gtid 1 # BEGIN GTID #-#-#
master-bin.000002 # Annotate_rows 1 # INSERT INTO t SET a= 3
-master-bin.000002 # Table_map 1 # table_id: 4294967297 (test.t)
-master-bin.000002 # Write_rows_v1 1 # table_id: 4294967297 flags: STMT_END_F
+master-bin.000002 # Table_map 1 # table_id: 4294967300 (test.t)
+master-bin.000002 # Write_rows_v1 1 # table_id: 4294967300 flags: STMT_END_F
master-bin.000002 # Query 1 # COMMIT
connection slave;
connection master;
diff --git a/mysql-test/suite/rpl/r/rpl_row_delayed_ins.result b/mysql-test/suite/rpl/r/rpl_row_delayed_ins.result
index 978c3d30dbd..4d439c202f5 100644
--- a/mysql-test/suite/rpl/r/rpl_row_delayed_ins.result
+++ b/mysql-test/suite/rpl/r/rpl_row_delayed_ins.result
@@ -5,7 +5,7 @@ create table t1(a int not null primary key) engine=myisam;
insert delayed into t1 values (1);
insert delayed into t1 values (2);
insert delayed into t1 values (3);
-flush tables;
+flush tables t1;
SELECT * FROM t1 ORDER BY a;
a
1
diff --git a/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result
index ef393873b97..a2f3bb44ae1 100644
--- a/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result
+++ b/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result
@@ -30,6 +30,7 @@ test.tt_2 preload_keys note The storage engine for the table doesn't support pre
INSERT INTO tt_1(ddl_case) VALUES (39);
ANALYZE TABLE nt_1;
Table Op Msg_type Msg_text
+test.nt_1 analyze status Engine-independent statistics collected
test.nt_1 analyze status Table is already up to date
INSERT INTO tt_1(ddl_case) VALUES (38);
CHECK TABLE nt_1;
diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result b/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result
index 404f0890e7c..a0ea06afa89 100644
--- a/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result
+++ b/mysql-test/suite/rpl/r/rpl_semi_sync_wait_point.result
@@ -68,6 +68,7 @@ i
1
2
# Now restart server
+# restart
# Done restarting server
# Reset setting that were lost in restart
SET @@global.rpl_semi_sync_master_timeout = 60000;
@@ -141,6 +142,7 @@ i
2
3
# Now restart server
+# restart
# Done restarting server
# Reset setting that were lost in restart
SET @@global.rpl_semi_sync_master_timeout = 60000;
diff --git a/mysql-test/suite/rpl/r/rpl_shutdown_wait_semisync_slaves.result b/mysql-test/suite/rpl/r/rpl_shutdown_wait_semisync_slaves.result
new file mode 100644
index 00000000000..cecc3daf340
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_shutdown_wait_semisync_slaves.result
@@ -0,0 +1,52 @@
+include/rpl_init.inc [topology=1->2, 1->3, 1->4]
+connection server_1;
+call mtr.add_suppression("Timeout waiting for reply of binlog");
+SET @@GLOBAL.rpl_semi_sync_master_enabled = 1;
+connection server_2;
+set global rpl_semi_sync_slave_enabled = 1;
+include/stop_slave.inc
+include/start_slave.inc
+set global rpl_semi_sync_slave_enabled = 1;
+connection server_3;
+set global rpl_semi_sync_slave_enabled = 1;
+include/stop_slave.inc
+include/start_slave.inc
+set global rpl_semi_sync_slave_enabled = 1;
+connection server_1;
+CREATE TABLE t1 (a INT) ENGINE=innodb;
+connection server_2;
+connection server_3;
+connection server_4;
+include/stop_slave.inc
+connection server_1;
+connection server_1;
+SET @@GLOBAL.debug_dbug="+d,simulate_delay_at_shutdown";
+connection server_4;
+include/start_slave.inc
+connection server_1;
+SHUTDOWN WAIT FOR ALL SLAVES;
+connection server_4;
+connection server_3;
+connection server_2;
+connection server_1;
+connection default;
+connection server_1;
+connection server_1;
+DROP TABLE t1;
+connection server_2;
+include/start_slave.inc
+connection server_3;
+include/start_slave.inc
+connection server_4;
+include/start_slave.inc
+connection server_2;
+include/stop_slave.inc
+include/start_slave.inc
+SET @@GLOBAL.rpl_semi_sync_slave_enabled = 0;;
+connection server_3;
+include/stop_slave.inc
+include/start_slave.inc
+SET @@GLOBAL.rpl_semi_sync_slave_enabled = 0;;
+connection server_1;
+SET @@GLOBAL.rpl_semi_sync_master_enabled = 0;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_shutdown_wait_slaves.result b/mysql-test/suite/rpl/r/rpl_shutdown_wait_slaves.result
new file mode 100644
index 00000000000..3b238332462
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_shutdown_wait_slaves.result
@@ -0,0 +1,29 @@
+include/rpl_init.inc [topology=1->2, 1->3, 1->4]
+connection server_1;
+CREATE TABLE t1 (a INT) ENGINE=innodb;
+connection server_2;
+connection server_3;
+connection server_4;
+include/stop_slave.inc
+connection server_1;
+connection server_1;
+SET @@GLOBAL.debug_dbug="+d,simulate_delay_at_shutdown";
+connection server_4;
+include/start_slave.inc
+connection server_1;
+SHUTDOWN WAIT FOR ALL SLAVES;
+connection server_4;
+connection server_3;
+connection server_2;
+connection server_1;
+connection default;
+connection server_1;
+connection server_1;
+DROP TABLE t1;
+connection server_2;
+include/start_slave.inc
+connection server_3;
+include/start_slave.inc
+connection server_4;
+include/start_slave.inc
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_stm_000001.result b/mysql-test/suite/rpl/r/rpl_stm_000001.result
index 0b9ed6fc09c..9ef2ca3bc53 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_000001.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_000001.result
@@ -19,11 +19,16 @@ abandons
connection slave;
stop slave;
connection master;
-UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
+create temporary table tmp select * from mysql.global_priv where host="localhost" and user="root";
+set password for root@"localhost" = password('foo');
connection slave;
start slave;
connection master;
-UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
+replace into mysql.global_priv select * from tmp;
+Warnings:
+Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave
+drop temporary table tmp;
+flush privileges;
create table t3(n int);
insert into t3 values(1),(2);
connection slave;
@@ -38,7 +43,7 @@ connection master;
drop table t1,t3;
connection slave;
connection master;
-create table t1 (n int) engine=myisam;
+create table t1 (n int);
connection slave;
connection master;
reset master;
@@ -76,31 +81,4 @@ count(*)
5000
connection master1;
drop table t1;
-create table t1 (n int);
-insert into t1 values(3456);
-insert ignore into mysql.user (Host, User, Password)
-VALUES ("10.10.10.%", "blafasel2", password("blafasel2"));
-Warnings:
-Warning 1364 Field 'ssl_cipher' doesn't have a default value
-Warning 1364 Field 'x509_issuer' doesn't have a default value
-Warning 1364 Field 'x509_subject' doesn't have a default value
-Warning 1364 Field 'authentication_string' doesn't have a default value
-select select_priv,user from mysql.user where user = _binary'blafasel2';
-select_priv user
-N blafasel2
-update mysql.user set Select_priv = "Y" where User= _binary"blafasel2";
-select select_priv,user from mysql.user where user = _binary'blafasel2';
-select_priv user
-Y blafasel2
-connection slave;
-select n from t1;
-n
-3456
-select select_priv,user from mysql.user where user = _binary'blafasel2';
-select_priv user
-Y blafasel2
-connection master1;
-drop table t1;
-delete from mysql.user where user="blafasel2";
-connection slave;
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result
index f900a8b0e9a..e768c5c6f7c 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result
@@ -30,6 +30,7 @@ test.tt_2 preload_keys note The storage engine for the table doesn't support pre
INSERT INTO tt_1(ddl_case) VALUES (39);
ANALYZE TABLE nt_1;
Table Op Msg_type Msg_text
+test.nt_1 analyze status Engine-independent statistics collected
test.nt_1 analyze status Table is already up to date
INSERT INTO tt_1(ddl_case) VALUES (38);
CHECK TABLE nt_1;
diff --git a/mysql-test/suite/rpl/r/rpl_switch_stm_row_mixed.result b/mysql-test/suite/rpl/r/rpl_switch_stm_row_mixed.result
index 2f7f1b07cb4..936f604be2e 100644
--- a/mysql-test/suite/rpl/r/rpl_switch_stm_row_mixed.result
+++ b/mysql-test/suite/rpl/r/rpl_switch_stm_row_mixed.result
@@ -140,7 +140,7 @@ create table t4 select * from t1 where 3 in (select 1 union select 2 union selec
SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR
create table t5 select * from t1 where 3 in (select 1 union select 2 union select curdate() union select 3);
Warnings:
-Warning 1292 Incorrect datetime value: '3'
+Warning 1292 Truncated incorrect datetime value: '3'
insert ignore into t5 select UUID() from t1 where 3 in (select 1 union select 2 union select 3 union select * from t4);
create procedure foo()
begin
diff --git a/mysql-test/suite/rpl/r/rpl_temporal_round.result b/mysql-test/suite/rpl/r/rpl_temporal_round.result
new file mode 100644
index 00000000000..df8cc431a74
--- /dev/null
+++ b/mysql-test/suite/rpl/r/rpl_temporal_round.result
@@ -0,0 +1,50 @@
+include/master-slave.inc
+[connection master]
+SET sql_mode=TIME_ROUND_FRACTIONAL;
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+CREATE TABLE t1 (id SERIAL, a TIMESTAMP(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES ('2011-01-01 23:59:59.999999');
+CREATE TABLE t2 (id SERIAL, a DATETIME(4));
+INSERT INTO t2 (a) VALUES (now(6));
+INSERT INTO t2 (a) VALUES ('2011-01-01 23:59:59.999999');
+CREATE TABLE t3 (id SERIAL, a TIME(4));
+INSERT INTO t3 (a) VALUES (now(6));
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+INSERT INTO t3 (a) VALUES ('2011-01-01 23:59:59.999999');
+Warnings:
+Note 1265 Data truncated for column 'a' at row 1
+SELECT * FROM t1;
+id a
+1 2011-01-01 00:00:00.0000
+2 2011-01-02 00:00:00.0000
+SELECT * FROM t2;
+id a
+1 2011-01-01 00:00:00.0000
+2 2011-01-02 00:00:00.0000
+SELECT * FROM t3;
+id a
+1 24:00:00.0000
+2 24:00:00.0000
+connection slave;
+connection slave;
+SET time_zone='+00:00';
+SELECT * FROM t1;
+id a
+1 2011-01-01 00:00:00.0000
+2 2011-01-02 00:00:00.0000
+SELECT * FROM t2;
+id a
+1 2011-01-01 00:00:00.0000
+2 2011-01-02 00:00:00.0000
+SELECT * FROM t3;
+id a
+1 24:00:00.0000
+2 24:00:00.0000
+connection master;
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_tmp_table_and_DDL.result b/mysql-test/suite/rpl/r/rpl_tmp_table_and_DDL.result
index e8e95ab7c4c..45070949f79 100644
--- a/mysql-test/suite/rpl/r/rpl_tmp_table_and_DDL.result
+++ b/mysql-test/suite/rpl/r/rpl_tmp_table_and_DDL.result
@@ -145,43 +145,43 @@ UNLOCK TABLE;
DROP DATABASE mysqltest2;
LOCK TABLE t1 WRITE;
CREATE USER test_1@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("CREATE USER test_1@localhost with table locked");
UNLOCK TABLE;
CREATE USER test_2@localhost;
LOCK TABLE t1 WRITE;
GRANT SELECT ON t1 TO test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'tables_priv' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("GRANT select on table to user with table locked");
GRANT ALL ON f2 TO test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'tables_priv' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("GRANT ALL ON f2 TO test_2 with table locked");
GRANT ALL ON p2 TO test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'tables_priv' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("GRANT ALL ON p2 TO test_2 with table locked");
GRANT USAGE ON *.* TO test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("GRANT USAGE ON *.* TO test_2 with table locked");
REVOKE ALL PRIVILEGES ON f2 FROM test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'tables_priv' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("REVOKE ALL PRIVILEGES on function to user with table locked");
REVOKE ALL PRIVILEGES ON p2 FROM test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'tables_priv' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("REVOKE ALL PRIVILEGES on procedure to user with table locked");
REVOKE ALL PRIVILEGES ON t1 FROM test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'tables_priv' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("REVOKE ALL PRIVILEGES on table to user with table locked");
REVOKE USAGE ON *.* FROM test_2@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("REVOKE USAGE ON *.* TO test_2 with table locked");
RENAME USER test_2@localhost TO test_3@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("RENAME USER test_2 TO test_3 with table locked");
UNLOCK TABLE;
RENAME USER test_2@localhost TO test_3@localhost;
LOCK TABLE t1 WRITE;
DROP USER test_3@localhost;
-ERROR HY000: Table 'user' was not locked with LOCK TABLES
+ERROR HY000: Table 'db' was not locked with LOCK TABLES
INSERT INTO t2 VALUES ("DROP USER test_3@localhost with table locked");
UNLOCK TABLE;
CREATE DATABASE db;
diff --git a/mysql-test/suite/rpl/t/password_expiration.test b/mysql-test/suite/rpl/t/password_expiration.test
new file mode 100644
index 00000000000..6934500cb40
--- /dev/null
+++ b/mysql-test/suite/rpl/t/password_expiration.test
@@ -0,0 +1,53 @@
+#
+# Test a slave connection is properly handled when the replication
+# user has an expired password
+#
+
+--source include/not_embedded.inc
+--source include/have_binlog_format_mixed.inc
+--source include/master-slave.inc
+
+--connection slave
+--source include/stop_slave.inc
+
+--connection master
+create user 'repl_user' password expire;
+grant replication slave on *.* to repl_user;
+flush privileges;
+set global disconnect_on_expired_password=ON;
+
+--connection slave
+--let $master_user= query_get_value(SHOW SLAVE STATUS, Master_User, 1)
+CHANGE MASTER TO MASTER_USER= 'repl_user';
+
+START SLAVE;
+# ER_MUST_CHANGE_PASSWORD_LOGIN
+--let $slave_io_errno= 1862
+--source include/wait_for_slave_io_error.inc
+
+# restart slave
+--source include/stop_slave_sql.inc
+RESET SLAVE;
+
+--connection master
+# force sandbox mode for repl_user
+set global disconnect_on_expired_password=OFF;
+
+--connection slave
+START SLAVE;
+# ER_MUST_CHANGE_PASSWORD
+--let $slave_io_errno= 1820
+--source include/wait_for_slave_io_error.inc
+
+--connection master
+DROP USER 'repl_user';
+set global disconnect_on_expired_password=default;
+
+--connection slave
+--source include/stop_slave_sql.inc
+eval CHANGE MASTER TO MASTER_USER='$master_user';
+RESET SLAVE;
+
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
+
diff --git a/mysql-test/suite/rpl/t/rpl_create_drop_user.test b/mysql-test/suite/rpl/t/rpl_create_drop_user.test
index 5fcf0a14c36..c5f193a0d0c 100644
--- a/mysql-test/suite/rpl/t/rpl_create_drop_user.test
+++ b/mysql-test/suite/rpl/t/rpl_create_drop_user.test
@@ -15,9 +15,11 @@ SELECT CURRENT_USER;
disconnect user_a;
connection master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
+--sorted_result
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
sync_slave_with_master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
+--sorted_result
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
connection master;
CREATE OR REPLACE USER u1@localhost IDENTIFIED BY 'abcdefghijk2';
@@ -26,9 +28,11 @@ connection user_a;
SELECT CURRENT_USER;
disconnect user_a;
connection master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
+--sorted_result
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
sync_slave_with_master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
+--sorted_result
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
connection master;
--error ER_CANNOT_USER
@@ -38,7 +42,8 @@ CREATE USER u1@localhost;
DROP USER u3@localhost;
sync_slave_with_master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
+--sorted_result
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
connection master;
DROP USER IF EXISTS u1@localhost;
@@ -46,6 +51,7 @@ DROP USER u2@localhost;
DROP USER IF EXISTS u3@localhost;
sync_slave_with_master;
-SELECT user, password FROM mysql.user WHERE user LIKE 'u%' ORDER BY user;
+--sorted_result
+SELECT user,password,plugin,authentication_string FROM mysql.user WHERE user LIKE 'u%' ;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_do_grant.test b/mysql-test/suite/rpl/t/rpl_do_grant.test
index 0024c7039e4..1350585ff93 100644
--- a/mysql-test/suite/rpl/t/rpl_do_grant.test
+++ b/mysql-test/suite/rpl/t/rpl_do_grant.test
@@ -3,18 +3,6 @@
-- source include/master-slave.inc
-# do not be influenced by other tests.
-connection master;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
-sync_slave_with_master;
-# if these DELETE did nothing on the master, we need to do them manually on the
-# slave.
-delete from mysql.user where user=_binary'rpl_ignore_grant';
-delete from mysql.db where user=_binary'rpl_ignore_grant';
-flush privileges;
-
# test replication of GRANT
connection master;
create user rpl_do_grant@localhost;
@@ -27,13 +15,13 @@ show grants for rpl_do_grant@localhost;
connection master;
set password for rpl_do_grant@localhost=password("does it work?");
sync_slave_with_master;
-select authentication_string<>_binary'' from mysql.user where user=_binary'rpl_do_grant';
+select authentication_string<>'' from mysql.user where user='rpl_do_grant';
#
# Bug#24158 SET PASSWORD in binary log fails under ANSI_QUOTES
#
connection master;
-update mysql.user set authentication_string='' where user='rpl_do_grant';
+update mysql.global_priv set priv=json_remove(priv, '$.authentication_string') where user='rpl_do_grant';
flush privileges;
select authentication_string<>'' from mysql.user where user='rpl_do_grant';
set sql_mode='ANSI_QUOTES';
@@ -42,18 +30,10 @@ set sql_mode='';
sync_slave_with_master;
select authentication_string<>'' from mysql.user where user='rpl_do_grant';
-
# clear what we have done, to not influence other tests.
connection master;
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
+drop user rpl_do_grant@localhost;
sync_slave_with_master;
-# The mysql database is not replicated, so we have to do the deletes
-# manually on the slave as well.
-delete from mysql.user where user=_binary'rpl_do_grant';
-delete from mysql.db where user=_binary'rpl_do_grant';
-flush privileges;
# End of 4.1 tests
@@ -363,5 +343,8 @@ SELECT Grantor FROM mysql.tables_priv WHERE User='user_bug27606';
--connection master
DROP USER user_bug27606@localhost;
+select priv into @root_priv from mysql.global_priv where user='root' and host='127.0.0.1';
+update mysql.global_priv set priv=@root_priv where user='root' and host='localhost';
+
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_ignored.test b/mysql-test/suite/rpl/t/rpl_gtid_ignored.test
index 6e927bd5a77..3c1324b2ea6 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_ignored.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_ignored.test
@@ -129,7 +129,7 @@ SELECT * FROM t1 ORDER BY a;
# Clean up.
--connection server_1
DROP TABLE t1;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET GLOBAL gtid_strict_mode= @old_gtid_strict_mode;
SET debug_sync = "reset";
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_mdev4484.test b/mysql-test/suite/rpl/t/rpl_gtid_mdev4484.test
index e1f5696f5a1..5c17653da8a 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_mdev4484.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_mdev4484.test
@@ -9,7 +9,7 @@
# back). So fix it to make sure we are consistent, in case an earlier test case
# left it as InnoDB.
SET sql_log_bin=0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET sql_log_bin=1;
--source include/start_slave.inc
@@ -28,37 +28,79 @@ INSERT INTO t1 VALUES (1);
# Inject an artificial error deleting entries, and check that the error handling code works.
--connection slave
--source include/stop_slave.inc
+SET @old_gtid_cleanup_batch_size= @@GLOBAL.gtid_cleanup_batch_size;
+SET GLOBAL gtid_cleanup_batch_size= 2;
SET @old_dbug= @@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug="+d,gtid_slave_pos_simulate_failed_delete";
SET sql_log_bin= 0;
-CALL mtr.add_suppression("Can't find file");
+CALL mtr.add_suppression("<DEBUG> Error deleting old GTID row");
SET sql_log_bin= 1;
--source include/start_slave.inc
--connection master
-INSERT INTO t1 VALUES (2);
+--disable_query_log
+let $i = 20;
+while ($i) {
+ eval INSERT INTO t1 VALUES ($i+10);
+ dec $i;
+}
+--enable_query_log
+--save_master_pos
--connection slave
---let $slave_sql_errno= 1942
---source include/wait_for_slave_sql_error.inc
-STOP SLAVE IO_THREAD;
-SELECT domain_id, server_id, seq_no FROM mysql.gtid_slave_pos
- ORDER BY domain_id, sub_id DESC LIMIT 1;
+--sync_with_master
+
+# Now wait for the slave background thread to try to delete old rows and
+# hit the error injection.
+--let _TEST_MYSQLD_ERROR_LOG=$MYSQLTEST_VARDIR/log/mysqld.2.err
+--perl
+ open F, '<', $ENV{'_TEST_MYSQLD_ERROR_LOG'} or die;
+ outer: while (1) {
+ inner: while (<F>) {
+ last outer if /<DEBUG> Error deleting old GTID row/;
+ }
+ # Easy way to do sub-second sleep without extra modules.
+ select(undef, undef, undef, 0.1);
+ }
+EOF
+
+# Since we injected error in the cleanup code, the rows should remain in
+# mysql.gtid_slave_pos. Check that we have at least 20 (more robust against
+# non-deterministic cleanup and future changes than checking for exact number).
+SELECT COUNT(*), MAX(seq_no) INTO @pre_count, @pre_max_seq_no
+ FROM mysql.gtid_slave_pos;
+SELECT IF(@pre_count >= 20, "OK", CONCAT("Error: too few rows seen while errors injected: ", @pre_count));
SET GLOBAL debug_dbug= @old_dbug;
---source include/start_slave.inc
--connection master
-INSERT INTO t1 VALUES (3);
+--disable_query_log
+let $i = 20;
+while ($i) {
+ eval INSERT INTO t1 VALUES ($i+40);
+ dec $i;
+}
+--enable_query_log
--sync_slave_with_master
--connection slave
-SELECT domain_id, server_id, seq_no FROM mysql.gtid_slave_pos
- ORDER BY domain_id, sub_id DESC LIMIT 1;
-SELECT * FROM t1 ORDER BY i;
-
+# Now check that 1) rows are being deleted again after removing error
+# injection, and 2) old rows are left that failed their delete while errors
+# where injected (again compensating for non-deterministic deletion).
+# Deletion is async and slightly non-deterministic, so we wait for at
+# least 10 of the 20 new rows to be deleted.
+let $wait_condition=
+ SELECT COUNT(*) <= 20-10
+ FROM mysql.gtid_slave_pos
+ WHERE seq_no > @pre_max_seq_no;
+--source include/wait_condition.inc
+SELECT IF(COUNT(*) >= 1, "OK", CONCAT("Error: too few rows seen after errors no longer injected: ", COUNT(*)))
+ FROM mysql.gtid_slave_pos
+ WHERE seq_no <= @pre_max_seq_no;
# Clean up
--connection master
DROP TABLE t1;
+--connection slave
+SET GLOBAL gtid_cleanup_batch_size= @old_gtid_cleanup_batch_size;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test
index 309debd87c5..53d62805c58 100644
--- a/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test
+++ b/mysql-test/suite/rpl/t/rpl_gtid_stop_start.test
@@ -230,7 +230,7 @@ EOF
--enable_reconnect
--source include/wait_until_connected_again.inc
SET sql_log_bin= 0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET sql_log_bin= 1;
# Do a second restart to get the mysql.gtid_slave_pos table loaded with
# the right engine.
diff --git a/mysql-test/suite/rpl/t/rpl_ignore_table.test b/mysql-test/suite/rpl/t/rpl_ignore_table.test
index 3360b789475..a3fcfc95901 100644
--- a/mysql-test/suite/rpl/t/rpl_ignore_table.test
+++ b/mysql-test/suite/rpl/t/rpl_ignore_table.test
@@ -3,7 +3,7 @@ source include/have_collation.inc;
source include/master-slave.inc;
call mtr.add_suppression("Can't find record in 't.'");
-call mtr.add_suppression("Can't find record in 'user'");
+call mtr.add_suppression("Can't find record in 'global_priv'");
call mtr.add_suppression("Can't find record in 'tables_priv'");
#
@@ -69,7 +69,7 @@ create table mysqltest2.t2 (id int);
GRANT SELECT ON mysqltest2.t2 TO mysqltest4@localhost IDENTIFIED BY 'pass';
# Create a grant manually
-insert into mysql.user (user, host) values ("mysqltest5", "somehost");
+insert into mysql.global_priv (user, host) values ("mysqltest5", "somehost");
# Partial replicate 3 with *.*
GRANT SELECT ON *.* TO mysqltest6@localhost;
diff --git a/mysql-test/suite/rpl/t/rpl_mdev10863.test b/mysql-test/suite/rpl/t/rpl_mdev10863.test
index 81cdfd84dbe..73062df861e 100644
--- a/mysql-test/suite/rpl/t/rpl_mdev10863.test
+++ b/mysql-test/suite/rpl/t/rpl_mdev10863.test
@@ -99,7 +99,7 @@ SET GLOBAL max_relay_log_size= @old_max_relay;
--source include/start_slave.inc
--connection server_1
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
DROP TABLE t1;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_mdev12179.test b/mysql-test/suite/rpl/t/rpl_mdev12179.test
index eb0f6c04b42..db1ec3d4d22 100644
--- a/mysql-test/suite/rpl/t/rpl_mdev12179.test
+++ b/mysql-test/suite/rpl/t/rpl_mdev12179.test
@@ -44,7 +44,7 @@ SELECT * FROM t1 ORDER BY a;
SET sql_log_bin=0;
# Reset storage engine for mysql.gtid_slave_pos in case an earlier test
# might have changed it to InnoDB.
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
CREATE TABLE mysql.gtid_slave_pos_innodb LIKE mysql.gtid_slave_pos;
ALTER TABLE mysql.gtid_slave_pos_innodb ENGINE=InnoDB;
INSERT INTO mysql.gtid_slave_pos_innodb SELECT * FROM mysql.gtid_slave_pos;
@@ -128,7 +128,7 @@ SELECT table_name, engine FROM information_schema.tables
--source include/stop_slave.inc
SET sql_log_bin=0;
-ALTER TABLE mysql.gtid_slave_pos ENGINE=MyISAM;
+ALTER TABLE mysql.gtid_slave_pos ENGINE=Aria;
SET sql_log_bin=1;
--write_file $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
diff --git a/mysql-test/suite/rpl/t/rpl_mysql57_stm_temporal_round.test b/mysql-test/suite/rpl/t/rpl_mysql57_stm_temporal_round.test
new file mode 100644
index 00000000000..675b7db0603
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_mysql57_stm_temporal_round.test
@@ -0,0 +1,58 @@
+--source include/have_binlog_format_statement.inc
+
+--echo #
+--echo # MDEV-8894 Inserting fractional seconds into MySQL 5.6 master breaks consistency on MariaDB 10 slave
+--echo #
+
+--source include/have_innodb.inc
+--source include/master-slave.inc
+
+--connection slave
+CREATE TABLE t1 (id SERIAL, a DATETIME(3));
+--source include/stop_slave.inc
+
+--connection master
+--let $datadir= `SELECT @@datadir`
+
+--let $rpl_server_number= 1
+--source include/rpl_stop_server.inc
+
+--remove_file $datadir/master-bin.000001
+
+#
+# Simulate MySQL 5.7.x master
+#
+# mysql-5.7.11-stm-temporal-round-binlog.000001 was recorded against a
+# table with this structure:
+#CREATE TABLE t1 (id SERIAL, a DATETIME(3));
+# (note, the CREATE statement is not inside the binary log)
+#
+# using this command line:
+# mysqld --log-bin --binlog-format=statement
+# with the following single SQL statement:
+#
+#INSERT INTO t1 (a) VALUES ('2001-01-01 00:00:00.999999');
+#
+
+--copy_file $MYSQL_TEST_DIR/std_data/rpl/mysql-5.7.11-stm-temporal-round-binlog.000001 $datadir/master-bin.000001
+
+--let $rpl_server_number= 1
+--source include/rpl_start_server.inc
+
+--source include/wait_until_connected_again.inc
+
+--connection slave
+--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1
+eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4;
+
+--source include/start_slave.inc
+
+--connection master
+--sync_slave_with_master
+SELECT * FROM t1 ORDER BY id;
+
+--source include/stop_slave.inc
+DROP TABLE t1;
+
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_mysql80_stm_temporal_round.test b/mysql-test/suite/rpl/t/rpl_mysql80_stm_temporal_round.test
new file mode 100644
index 00000000000..ad6df9d9993
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_mysql80_stm_temporal_round.test
@@ -0,0 +1,62 @@
+--source include/have_binlog_format_statement.inc
+
+--echo #
+--echo # MDEV-8894 Inserting fractional seconds into MySQL 5.6 master breaks consistency on MariaDB 10 slave
+--echo #
+
+--source include/have_innodb.inc
+--source include/master-slave.inc
+
+--connection slave
+CREATE TABLE t1 (id SERIAL, a DATETIME(3));
+--source include/stop_slave.inc
+
+--connection master
+--let $datadir= `SELECT @@datadir`
+
+--let $rpl_server_number= 1
+--source include/rpl_stop_server.inc
+
+--remove_file $datadir/master-bin.000001
+
+#
+# Simulate MySQL 8.0.x master
+#
+# mysql-8.0.13-stm-temporal-round-binlog.000001 was recorded against a
+# table with this structure:
+#CREATE TABLE t1 (id SERIAL, a DATETIME(3));
+# (note, the CREATE statement is not inside the binary log)
+#
+# using this command line:
+# mysqld --log-bin --binlog-format=statement --server-id=1 --character-set-server=latin1
+# with the following SQL script:
+#
+#SET NAMES latin1 COLLATE latin1_swedish_ci;
+#SET sql_mode='';
+#INSERT INTO t1 (a) VALUES ('2001-01-01 00:00:00.999999');
+#SET sql_mode=TIME_TRUNCATE_FRACTIONAL;
+#INSERT INTO t1 (a) VALUES ('2001-01-01 00:00:00.999999');
+#
+
+--copy_file $MYSQL_TEST_DIR/std_data/rpl/mysql-8.0.13-stm-temporal-round-binlog.000001 $datadir/master-bin.000001
+
+--let $rpl_server_number= 1
+--source include/rpl_start_server.inc
+
+--source include/wait_until_connected_again.inc
+
+--connection slave
+--replace_result $SERVER_MYPORT_1 SERVER_MYPORT_1
+eval CHANGE MASTER TO master_host='127.0.0.1', master_port=$SERVER_MYPORT_1, master_user='root', master_log_file='master-bin.000001', master_log_pos=4;
+
+--source include/start_slave.inc
+
+--connection master
+--sync_slave_with_master
+SELECT * FROM t1 ORDER BY id;
+
+--source include/stop_slave.inc
+DROP TABLE t1;
+
+--let $rpl_only_running_threads= 1
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test b/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test
index e08472d5f51..0060cf4416c 100644
--- a/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test
+++ b/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test
@@ -21,6 +21,10 @@ SET GLOBAL slave_parallel_threads=10;
CHANGE MASTER TO master_use_gtid=slave_pos;
SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode;
SET GLOBAL slave_parallel_mode='optimistic';
+# Run the first part of the test with high batch size and see that
+# old rows remain in the table.
+SET @old_gtid_cleanup_batch_size= @@GLOBAL.gtid_cleanup_batch_size;
+SET GLOBAL gtid_cleanup_batch_size= 1000000;
--connection server_1
@@ -108,7 +112,12 @@ SELECT * FROM t3 ORDER BY c;
SELECT * FROM t1 ORDER BY a;
SELECT * FROM t2 ORDER BY a;
SELECT * FROM t3 ORDER BY c;
-#SHOW STATUS LIKE 'Slave_retried_transactions';
+# Check that we have a bunch of old rows left-over - they were not deleted
+# due to high @@gtid_cleanup_batch_size. Then set a low
+# @@gtid_cleanup_batch_size so we can test that rows start being deleted.
+SELECT IF(COUNT(*) >= 30, "OK", CONCAT("Error: too few old rows found: ", COUNT(*)))
+ FROM mysql.gtid_slave_pos;
+SET GLOBAL gtid_cleanup_batch_size=1;
--echo *** Test @@skip_parallel_replication. ***
@@ -557,25 +566,18 @@ DROP TABLE t1, t2, t3;
--connection server_2
--source include/sync_with_master_gtid.inc
-# Check for left-over rows in table mysql.gtid_slave_pos (MDEV-12147).
-#
-# There was a bug when a transaction got a conflict and was rolled back. It
-# might have also handled deletion of some old rows, and these deletions would
-# then also be rolled back. And since the deletes were never re-tried, old no
-# longer needed rows would accumulate in the table without limit.
-#
-# The earlier part of this test file have plenty of transactions being rolled
-# back. But the last DROP TABLE statement runs on its own and should never
-# conflict, thus at this point the mysql.gtid_slave_pos table should be clean.
-#
-# To support @@gtid_pos_auto_engines, when a row is inserted in the table, it
-# is associated with the engine of the table at insertion time, and it will
-# only be deleted during record_gtid from a table of the same engine. Since we
-# alter the table from MyISAM to InnoDB at the start of this test, we should
-# end up with 4 rows: two left-over from when the table was MyISAM, and two
-# left-over from the InnoDB part.
---echo Check that no more than the expected last four GTIDs are in mysql.gtid_slave_pos
-select count(4) <= 4 from mysql.gtid_slave_pos order by domain_id, sub_id;
+# Check that old rows are deleted from mysql.gtid_slave_pos.
+# Deletion is asynchronous, so use wait_condition.inc.
+# Also, there is a small amount of non-determinism in the deletion of old
+# rows, so it is not guaranteed that there can never be more than
+# @@gtid_cleanup_batch_size rows in the table; so allow a bit of slack
+# here.
+let $wait_condition=
+ SELECT COUNT(*) <= 5*@@GLOBAL.gtid_cleanup_batch_size
+ FROM mysql.gtid_slave_pos;
+--source include/wait_condition.inc
+eval $wait_condition;
+SET GLOBAL gtid_cleanup_batch_size= @old_gtid_cleanup_batch_size;
--connection server_1
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_row_001.test b/mysql-test/suite/rpl/t/rpl_row_001.test
index 06d01f2476e..f66c61ffb6e 100644
--- a/mysql-test/suite/rpl/t/rpl_row_001.test
+++ b/mysql-test/suite/rpl/t/rpl_row_001.test
@@ -2,10 +2,50 @@
# By JBM 2005-02-15 Wrapped to allow reuse of test code#
########################################################
-- source include/have_binlog_format_row.inc
-# Slow test, don't run during staging part
--- source include/not_staging.inc
-- source include/master-slave.inc
-let $engine_type=MYISAM;
--- source include/rpl_row_001.test
+# Test if the slave SQL thread can be more than 16K behind the slave
+# I/O thread (> IO_SIZE)
+
+# we'll use table-level locking to delay slave SQL thread
+eval CREATE TABLE t1 (n INT);
+sync_slave_with_master;
+connection master;
+RESET MASTER;
+connection slave;
+STOP SLAVE;
+RESET SLAVE;
+
+connection master;
+let $1=5000;
+# Generate 16K of relay log
+disable_query_log;
+while ($1)
+{
+ eval INSERT INTO t1 VALUES($1);
+ dec $1;
+}
+enable_query_log;
+SELECT COUNT(*) FROM t1;
+save_master_pos;
+
+# Try to cause a large relay log lag on the slave by locking t1
+connection slave;
+LOCK TABLES t1 READ;
+START SLAVE;
+UNLOCK TABLES;
+sync_with_master;
+SELECT COUNT(*) FROM t1;
+
+connection master;
+DROP TABLE t1;
+CREATE TABLE t1 (n INT);
+INSERT INTO t1 VALUES(3456);
+sync_slave_with_master;
+SELECT n FROM t1;
+
+connection master;
+DROP TABLE t1;
+
+sync_slave_with_master;
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.cnf b/mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.cnf
new file mode 100644
index 00000000000..8ff9df0384d
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.cnf
@@ -0,0 +1,16 @@
+!include ../my.cnf
+
+[mysqld.1]
+log_warnings=3
+[mysqld.2]
+
+[mysqld.3]
+
+[mysqld.4]
+
+[ENV]
+SERVER_MYPORT_3= @mysqld.3.port
+SERVER_MYSOCK_3= @mysqld.3.socket
+
+SERVER_MYPORT_4= @mysqld.4.port
+SERVER_MYSOCK_4= @mysqld.4.socket
diff --git a/mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.test b/mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.test
new file mode 100644
index 00000000000..2c63df30fde
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_shutdown_wait_semisync_slaves.test
@@ -0,0 +1,46 @@
+#
+# MDEV-18450 "Slow" shutdown to wait for slaves that are to be fed
+# with everything in the master binlog before shutdown completes.
+#
+# This is a semisync version of basic tests.
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--let $rpl_topology=1->2, 1->3, 1->4
+--source include/rpl_init.inc
+
+--connection server_1
+call mtr.add_suppression("Timeout waiting for reply of binlog");
+--let $sav_enabled_master=`SELECT @@GLOBAL.rpl_semi_sync_master_enabled`
+SET @@GLOBAL.rpl_semi_sync_master_enabled = 1;
+
+--let slaves= 3
+--let i= 2
+while (`SELECT $i <= $slaves`)
+{
+ --connection server_$i
+ --let $sav_enabled_slave=`SELECT @@GLOBAL.rpl_semi_sync_slave_enabled`
+ set global rpl_semi_sync_slave_enabled = 1;
+
+ source include/stop_slave.inc;
+ source include/start_slave.inc;
+ set global rpl_semi_sync_slave_enabled = 1;
+
+ --inc $i
+}
+
+--source include/rpl_shutdown_wait_slaves.inc
+--let i= 2
+while (`SELECT $i <= $slaves`)
+{
+ --connection server_$i
+ source include/stop_slave.inc;
+ source include/start_slave.inc;
+ --eval SET @@GLOBAL.rpl_semi_sync_slave_enabled = $sav_enabled_slave;
+
+ --inc $i
+}
+
+--connection server_1
+--eval SET @@GLOBAL.rpl_semi_sync_master_enabled = $sav_enabled_master
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.cnf b/mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.cnf
new file mode 100644
index 00000000000..8ff9df0384d
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.cnf
@@ -0,0 +1,16 @@
+!include ../my.cnf
+
+[mysqld.1]
+log_warnings=3
+[mysqld.2]
+
+[mysqld.3]
+
+[mysqld.4]
+
+[ENV]
+SERVER_MYPORT_3= @mysqld.3.port
+SERVER_MYSOCK_3= @mysqld.3.socket
+
+SERVER_MYPORT_4= @mysqld.4.port
+SERVER_MYSOCK_4= @mysqld.4.socket
diff --git a/mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.test b/mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.test
new file mode 100644
index 00000000000..97363206776
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_shutdown_wait_slaves.test
@@ -0,0 +1,11 @@
+#
+# MDEV-18450 "Slow" shutdown to wait for slaves that are to be fed
+# with everything in the master binlog before shutdown completes.
+#
+--source include/have_innodb.inc
+--source include/have_debug.inc
+--let $rpl_topology=1->2, 1->3, 1->4
+--source include/rpl_init.inc
+
+--source include/rpl_shutdown_wait_slaves.inc
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_stm_000001.test b/mysql-test/suite/rpl/t/rpl_stm_000001.test
index 119fd6168e0..62b5c5b1cd0 100644
--- a/mysql-test/suite/rpl/t/rpl_stm_000001.test
+++ b/mysql-test/suite/rpl/t/rpl_stm_000001.test
@@ -4,7 +4,6 @@
-- source include/master-slave.inc
CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
---let $engine_type= myisam
# Load some data into t1
create table t1 (word char(20) not null);
@@ -19,7 +18,8 @@ select * from t1 limit 10;
sync_slave_with_master;
stop slave;
connection master;
-UPDATE mysql.user SET password=password('foo') WHERE host='localhost' AND user='root';
+create temporary table tmp select * from mysql.global_priv where host="localhost" and user="root";
+set password for root@"localhost" = password('foo');
connection slave;
start slave;
connection master;
@@ -27,7 +27,9 @@ connection master;
# Give slave time to do at last one failed connect retry
# This one must be short so that the slave will not stop retrying
real_sleep 2;
-UPDATE mysql.user SET password=password('') WHERE host='localhost' AND user='root';
+replace into mysql.global_priv select * from tmp;
+drop temporary table tmp;
+flush privileges;
# Give slave time to connect (will retry every second)
sleep 2;
@@ -45,7 +47,7 @@ sync_slave_with_master;
connection master;
# we'll use table-level locking to delay slave SQL thread
-eval create table t1 (n int) engine=$engine_type;
+eval create table t1 (n int);
sync_slave_with_master;
connection master;
reset master;
@@ -111,20 +113,6 @@ connection slave;
select count(*) from t1;
connection master1;
drop table t1;
-create table t1 (n int);
-insert into t1 values(3456);
-insert ignore into mysql.user (Host, User, Password)
- VALUES ("10.10.10.%", "blafasel2", password("blafasel2"));
-select select_priv,user from mysql.user where user = _binary'blafasel2';
-update mysql.user set Select_priv = "Y" where User= _binary"blafasel2";
-select select_priv,user from mysql.user where user = _binary'blafasel2';
-sync_slave_with_master;
-select n from t1;
-select select_priv,user from mysql.user where user = _binary'blafasel2';
-connection master1;
-drop table t1;
-delete from mysql.user where user="blafasel2";
-sync_slave_with_master;
# End of 4.1 tests
--source include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/t/rpl_temporal_round.test b/mysql-test/suite/rpl/t/rpl_temporal_round.test
new file mode 100644
index 00000000000..c13c18bddb5
--- /dev/null
+++ b/mysql-test/suite/rpl/t/rpl_temporal_round.test
@@ -0,0 +1,35 @@
+--source include/master-slave.inc
+
+SET sql_mode=TIME_ROUND_FRACTIONAL;
+SET time_zone='+00:00';
+SET timestamp=UNIX_TIMESTAMP('2010-12-31 23:59:59.999999');
+
+CREATE TABLE t1 (id SERIAL, a TIMESTAMP(4));
+INSERT INTO t1 (a) VALUES (now(6));
+INSERT INTO t1 (a) VALUES ('2011-01-01 23:59:59.999999');
+
+CREATE TABLE t2 (id SERIAL, a DATETIME(4));
+INSERT INTO t2 (a) VALUES (now(6));
+INSERT INTO t2 (a) VALUES ('2011-01-01 23:59:59.999999');
+
+CREATE TABLE t3 (id SERIAL, a TIME(4));
+INSERT INTO t3 (a) VALUES (now(6));
+INSERT INTO t3 (a) VALUES ('2011-01-01 23:59:59.999999');
+
+SELECT * FROM t1;
+SELECT * FROM t2;
+SELECT * FROM t3;
+
+sync_slave_with_master;
+connection slave;
+SET time_zone='+00:00';
+SELECT * FROM t1;
+SELECT * FROM t2;
+SELECT * FROM t3;
+
+connection master;
+DROP TABLE t1;
+DROP TABLE t2;
+DROP TABLE t3;
+
+--source include/rpl_end.inc
diff --git a/mysql-test/suite/sql_sequence/read_only.result b/mysql-test/suite/sql_sequence/read_only.result
index 38edcc0894b..cd8d498bb81 100644
--- a/mysql-test/suite/sql_sequence/read_only.result
+++ b/mysql-test/suite/sql_sequence/read_only.result
@@ -1,4 +1,5 @@
create sequence s1 cache 2 engine=innodb;
+# restart: --innodb-read-only
connection default;
show global variables like 'innodb_read_only';
Variable_name Value
@@ -26,6 +27,7 @@ ERROR HY000: Table 's1' is read only
select * from s1;
next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count
1 1 9223372036854775806 1 1 2 0 0
+# restart
select * from s1;
next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count
1 1 9223372036854775806 1 1 2 0 0
diff --git a/mysql-test/suite/sys_vars/r/histogram_size_basic.result b/mysql-test/suite/sys_vars/r/histogram_size_basic.result
index 1f310600d00..05a52309d9f 100644
--- a/mysql-test/suite/sys_vars/r/histogram_size_basic.result
+++ b/mysql-test/suite/sys_vars/r/histogram_size_basic.result
@@ -1,20 +1,20 @@
SET @start_global_value = @@global.histogram_size;
SELECT @start_global_value;
@start_global_value
-0
+254
SET @start_session_value = @@session.histogram_size;
SELECT @start_session_value;
@start_session_value
-0
+254
'#--------------------FN_DYNVARS_053_01-------------------------#'
SET @@global.histogram_size = DEFAULT;
SELECT @@global.histogram_size;
@@global.histogram_size
-0
+254
SET @@session.histogram_size = DEFAULT;
SELECT @@session.histogram_size;
@@session.histogram_size
-0
+254
'#--------------------FN_DYNVARS_053_03-------------------------#'
SET @@global.histogram_size = 1;
SELECT @@global.histogram_size;
@@ -129,8 +129,8 @@ SELECT @@local.histogram_size = @@session.histogram_size;
SET @@global.histogram_size = @start_global_value;
SELECT @@global.histogram_size;
@@global.histogram_size
-0
+254
SET @@session.histogram_size = @start_session_value;
SELECT @@session.histogram_size;
@@session.histogram_size
-0
+254
diff --git a/mysql-test/suite/sys_vars/r/histogram_type_basic.result b/mysql-test/suite/sys_vars/r/histogram_type_basic.result
index f688a2a15fd..db42204ac1f 100644
--- a/mysql-test/suite/sys_vars/r/histogram_type_basic.result
+++ b/mysql-test/suite/sys_vars/r/histogram_type_basic.result
@@ -1,16 +1,16 @@
SET @start_global_value = @@global.histogram_type;
SELECT @start_global_value;
@start_global_value
-SINGLE_PREC_HB
+DOUBLE_PREC_HB
SET @start_session_value = @@session.histogram_type;
SELECT @start_session_value;
@start_session_value
-SINGLE_PREC_HB
+DOUBLE_PREC_HB
SET @@global.histogram_type = 1;
SET @@global.histogram_type = DEFAULT;
SELECT @@global.histogram_type;
@@global.histogram_type
-SINGLE_PREC_HB
+DOUBLE_PREC_HB
SET @@global.histogram_type = 0;
SELECT @@global.histogram_type;
@@global.histogram_type
@@ -71,9 +71,9 @@ HISTOGRAM_TYPE DOUBLE_PREC_HB
SET @@global.histogram_type = @start_global_value;
SELECT @@global.histogram_type;
@@global.histogram_type
-SINGLE_PREC_HB
+DOUBLE_PREC_HB
SET @@session.histogram_type = @start_session_value;
SELECT @@session.histogram_type;
@@session.histogram_type
-SINGLE_PREC_HB
+DOUBLE_PREC_HB
set sql_mode='';
diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_abort_loads.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_abort_loads.result
index 1e2beea707e..55dc9a9d57a 100644
--- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_abort_loads.result
+++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_abort_loads.result
@@ -40,6 +40,7 @@ INSERT INTO t1 VALUES (@a,@a,@a,@a,@a,
SET GLOBAL innodb_buffer_pool_dump_now=1;
# Restart server
+# restart
# Abort after 16 pages
SET GLOBAL innodb_buffer_pool_load_pages_abort=16,
@@ -54,6 +55,7 @@ INNODB_BUFFER_POOL_LOAD_INCOMPLETE ON
INNODB_BUFFER_POOL_LOAD_STATUS Buffer pool(s) load aborted on request
# Restart server
+# restart
# Load buffer pool
SET GLOBAL innodb_buffer_pool_load_now=1;
@@ -82,6 +84,7 @@ INNODB_BUFFER_POOL_DUMP_STATUS Buffer pool(s) dump completed at
INNODB_BUFFER_POOL_LOAD_INCOMPLETE OFF
# Restart server
+# restart
# Load buffer pool
SET GLOBAL innodb_buffer_pool_load_now=1;
diff --git a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result
index ad329cd336f..d16e8d446d3 100644
--- a/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_buffer_pool_dump_at_shutdown_basic.result
@@ -16,4 +16,5 @@ SET GLOBAL innodb_buffer_pool_dump_at_shutdown = "string";
Got one of the listed errors
SET GLOBAL innodb_buffer_pool_dump_at_shutdown = 5;
Got one of the listed errors
+# restart
SET GLOBAL innodb_buffer_pool_dump_at_shutdown = default;
diff --git a/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result b/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result
index 9c2e95b3c7c..eec16411144 100644
--- a/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_checksum_algorithm_basic.result
@@ -26,21 +26,29 @@ SET GLOBAL innodb_checksum_algorithm = 'strict_none';
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
strict_none
+SET GLOBAL innodb_checksum_algorithm = 'full_crc32';
+SELECT @@global.innodb_checksum_algorithm;
+@@global.innodb_checksum_algorithm
+full_crc32
+SET GLOBAL innodb_checksum_algorithm = 'strict_full_crc32';
+SELECT @@global.innodb_checksum_algorithm;
+@@global.innodb_checksum_algorithm
+strict_full_crc32
SET GLOBAL innodb_checksum_algorithm = '';
ERROR 42000: Variable 'innodb_checksum_algorithm' can't be set to the value of ''
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
-strict_none
+strict_full_crc32
SET GLOBAL innodb_checksum_algorithm = 'foobar';
ERROR 42000: Variable 'innodb_checksum_algorithm' can't be set to the value of 'foobar'
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
-strict_none
+strict_full_crc32
SET GLOBAL innodb_checksum_algorithm = 123;
ERROR 42000: Variable 'innodb_checksum_algorithm' can't be set to the value of '123'
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
-strict_none
+strict_full_crc32
SET GLOBAL innodb_checksum_algorithm = @orig;
SELECT @@global.innodb_checksum_algorithm;
@@global.innodb_checksum_algorithm
diff --git a/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result
index abf2cdaf1c4..477eb7fcb61 100644
--- a/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_fil_make_page_dirty_debug_basic.result
@@ -17,6 +17,8 @@ ERROR HY000: Variable 'innodb_fil_make_page_dirty_debug' is a GLOBAL variable an
create table t1 (f1 int primary key) engine = innodb;
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = @space_id;
drop table t1;
diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result b/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result
index a7c8a2d3cc6..afecd9ab6cc 100644
--- a/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result
+++ b/mysql-test/suite/sys_vars/r/innodb_flush_method_func.result
@@ -4,11 +4,13 @@ select @@innodb_flush_method;
fsync
create table t(a serial) engine=innodb;
FLUSH TABLES;
+# restart: --innodb-flush-method=5
select @@innodb_flush_method;
@@innodb_flush_method
O_DIRECT_NO_FSYNC
insert into t values(0);
FLUSH TABLES;
+# restart: --innodb-flush-method=0
select @@innodb_flush_method;
@@innodb_flush_method
fsync
diff --git a/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result b/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result
index 20e2b78e640..b306749dcb0 100644
--- a/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_saved_page_number_debug_basic.result
@@ -17,6 +17,8 @@ ERROR HY000: Variable 'innodb_saved_page_number_debug' is a GLOBAL variable and
create table t1 (f1 int primary key) engine = innodb;
select space from information_schema.innodb_sys_tables
where name = 'test/t1' into @space_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
set global innodb_saved_page_number_debug = 0;
set global innodb_fil_make_page_dirty_debug = @space_id;
drop table t1;
diff --git a/mysql-test/suite/sys_vars/r/max_seeks_for_key_func.result b/mysql-test/suite/sys_vars/r/max_seeks_for_key_func.result
index 1798bd5d6e0..f833f417290 100644
--- a/mysql-test/suite/sys_vars/r/max_seeks_for_key_func.result
+++ b/mysql-test/suite/sys_vars/r/max_seeks_for_key_func.result
@@ -70,6 +70,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 ALL NULL NULL NULL NULL 17 Using where; Using join buffer (flat, BNL join)
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SET MAX_SEEKS_FOR_KEY=1;
EXPLAIN SELECT STRAIGHT_JOIN * FROM t1,t1 AS t2 WHERE t1.b = t2.b;
diff --git a/mysql-test/suite/sys_vars/r/myisam_stats_method_func.result b/mysql-test/suite/sys_vars/r/myisam_stats_method_func.result
index cae0e68765b..1de1d195460 100644
--- a/mysql-test/suite/sys_vars/r/myisam_stats_method_func.result
+++ b/mysql-test/suite/sys_vars/r/myisam_stats_method_func.result
@@ -20,6 +20,7 @@ INSERT INTO t1 SELECT NULL FROM t1;
SET myisam_stats_method = nulls_unequal;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -34,10 +35,13 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
t1 1 a 1 a A 10 NULL NULL YES BTREE
'Set nulls to be equal'
SET myisam_stats_method = nulls_equal;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
INSERT INTO t1 VALUES (11);
DELETE FROM t1 WHERE a = 11;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -66,6 +70,7 @@ INSERT INTO t1 VALUES ('bce','def1', 'yuu', NULL);
INSERT INTO t1 VALUES ('bce','def2', NULL, 'quux');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -76,6 +81,7 @@ t1 1 a 4 d A 4 NULL NULL YES BTREE
DELETE FROM t1;
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SHOW INDEX FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -84,5 +90,6 @@ t1 1 a 2 b A 0 NULL NULL YES BTREE
t1 1 a 3 c A 0 NULL NULL YES BTREE
t1 1 a 4 d A 0 NULL NULL YES BTREE
SET myisam_stats_method = DEFAULT;
+set @@use_stat_tables= @save_use_stat_tables;
DROP TABLE t1;
SET @@global.myisam_stats_method= @start_value;
diff --git a/mysql-test/suite/sys_vars/r/optimizer_switch_basic.result b/mysql-test/suite/sys_vars/r/optimizer_switch_basic.result
index 87c837986ac..74f4fd1ee07 100644
--- a/mysql-test/suite/sys_vars/r/optimizer_switch_basic.result
+++ b/mysql-test/suite/sys_vars/r/optimizer_switch_basic.result
@@ -1,63 +1,63 @@
SET @start_global_value = @@global.optimizer_switch;
SELECT @start_global_value;
@start_global_value
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
select @@global.optimizer_switch;
@@global.optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
select @@session.optimizer_switch;
@@session.optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
show global variables like 'optimizer_switch';
Variable_name Value
-optimizer_switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+optimizer_switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
show session variables like 'optimizer_switch';
Variable_name Value
-optimizer_switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+optimizer_switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
select * from information_schema.global_variables where variable_name='optimizer_switch';
VARIABLE_NAME VARIABLE_VALUE
-OPTIMIZER_SWITCH index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+OPTIMIZER_SWITCH index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
select * from information_schema.session_variables where variable_name='optimizer_switch';
VARIABLE_NAME VARIABLE_VALUE
-OPTIMIZER_SWITCH index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+OPTIMIZER_SWITCH index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
set global optimizer_switch=10;
set session optimizer_switch=5;
select @@global.optimizer_switch;
@@global.optimizer_switch
-index_merge=off,index_merge_union=on,index_merge_sort_union=off,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+index_merge=off,index_merge_union=on,index_merge_sort_union=off,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
select @@session.optimizer_switch;
@@session.optimizer_switch
-index_merge=on,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+index_merge=on,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
set global optimizer_switch="index_merge_sort_union=on";
set session optimizer_switch="index_merge=off";
select @@global.optimizer_switch;
@@global.optimizer_switch
-index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
select @@session.optimizer_switch;
@@session.optimizer_switch
-index_merge=off,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+index_merge=off,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
show global variables like 'optimizer_switch';
Variable_name Value
-optimizer_switch index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+optimizer_switch index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
show session variables like 'optimizer_switch';
Variable_name Value
-optimizer_switch index_merge=off,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+optimizer_switch index_merge=off,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
select * from information_schema.global_variables where variable_name='optimizer_switch';
VARIABLE_NAME VARIABLE_VALUE
-OPTIMIZER_SWITCH index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+OPTIMIZER_SWITCH index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
select * from information_schema.session_variables where variable_name='optimizer_switch';
VARIABLE_NAME VARIABLE_VALUE
-OPTIMIZER_SWITCH index_merge=off,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+OPTIMIZER_SWITCH index_merge=off,index_merge_union=off,index_merge_sort_union=on,index_merge_intersection=off,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
set session optimizer_switch="default";
select @@session.optimizer_switch;
@@session.optimizer_switch
-index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off
+index_merge=off,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=off,derived_merge=off,derived_with_keys=off,firstmatch=off,loosescan=off,materialization=off,in_to_exists=off,semijoin=off,partial_match_rowid_merge=off,partial_match_table_scan=off,subquery_cache=off,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=off,semijoin_with_cache=off,join_cache_incremental=off,join_cache_hashed=off,join_cache_bka=off,optimize_join_buffer_size=off,table_elimination=off,extended_keys=off,exists_to_in=off,orderby_uses_equalities=off,condition_pushdown_for_derived=off,split_materialized=off,condition_pushdown_for_subquery=off,rowid_filter=off,condition_pushdown_from_having=off
set optimizer_switch = replace(@@optimizer_switch, '=off', '=on');
Warnings:
Warning 1681 'engine_condition_pushdown=on' is deprecated and will be removed in a future release
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=on,mrr_cost_based=on,mrr_sort_keys=on,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=on,mrr_cost_based=on,mrr_sort_keys=on,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
set global optimizer_switch=1.1;
ERROR 42000: Incorrect argument type to variable 'optimizer_switch'
set global optimizer_switch=1e1;
@@ -69,4 +69,4 @@ ERROR 42000: Variable 'optimizer_switch' can't be set to the value of 'foobar'
SET @@global.optimizer_switch = @start_global_value;
SELECT @@global.optimizer_switch;
@@global.optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
diff --git a/mysql-test/suite/sys_vars/r/optimizer_use_condition_selectivity_basic.result b/mysql-test/suite/sys_vars/r/optimizer_use_condition_selectivity_basic.result
index a030bae3750..941b7699747 100644
--- a/mysql-test/suite/sys_vars/r/optimizer_use_condition_selectivity_basic.result
+++ b/mysql-test/suite/sys_vars/r/optimizer_use_condition_selectivity_basic.result
@@ -1,33 +1,33 @@
SET @start_global_value = @@global.optimizer_use_condition_selectivity;
SELECT @start_global_value;
@start_global_value
-1
+4
SET @start_session_value = @@session.optimizer_use_condition_selectivity;
SELECT @start_session_value;
@start_session_value
-1
+4
'#--------------------FN_DYNVARS_115_01-------------------------#'
SET @@global.optimizer_use_condition_selectivity = DEFAULT;
SELECT @@global.optimizer_use_condition_selectivity;
@@global.optimizer_use_condition_selectivity
-1
+4
SET @@session.optimizer_use_condition_selectivity = DEFAULT;
SELECT @@session.optimizer_use_condition_selectivity;
@@session.optimizer_use_condition_selectivity
-1
+4
'#--------------------FN_DYNVARS_115_02-------------------------#'
SET @@global.optimizer_use_condition_selectivity = DEFAULT;
SELECT @@global.optimizer_use_condition_selectivity = 1;
@@global.optimizer_use_condition_selectivity = 1
-1
+0
SET @@session.optimizer_use_condition_selectivity = DEFAULT;
SELECT @@session.optimizer_use_condition_selectivity = 1;
@@session.optimizer_use_condition_selectivity = 1
-1
+0
'#--------------------FN_DYNVARS_115_03-------------------------#'
SELECT @@global.optimizer_use_condition_selectivity;
@@global.optimizer_use_condition_selectivity
-1
+4
SET @@global.optimizer_use_condition_selectivity = 0;
Warnings:
Warning 1292 Truncated incorrect optimizer_use_condition_selectiv value: '0'
@@ -63,7 +63,7 @@ SELECT @@global.optimizer_use_condition_selectivity;
'#--------------------FN_DYNVARS_115_04-------------------------#'
SELECT @@session.optimizer_use_condition_selectivity;
@@session.optimizer_use_condition_selectivity
-1
+4
SET @@session.optimizer_use_condition_selectivity = 0;
Warnings:
Warning 1292 Truncated incorrect optimizer_use_condition_selectiv value: '0'
@@ -134,8 +134,8 @@ SELECT @@local.optimizer_use_condition_selectivity = @@session.optimizer_use_con
SET @@global.optimizer_use_condition_selectivity = @start_global_value;
SELECT @@global.optimizer_use_condition_selectivity;
@@global.optimizer_use_condition_selectivity
-1
+4
SET @@session.optimizer_use_condition_selectivity = @start_session_value;
SELECT @@session.optimizer_use_condition_selectivity;
@@session.optimizer_use_condition_selectivity
-1
+4
diff --git a/mysql-test/suite/sys_vars/r/secure_file_priv.result b/mysql-test/suite/sys_vars/r/secure_file_priv.result
index eeeb9a58c0f..74f816df59d 100644
--- a/mysql-test/suite/sys_vars/r/secure_file_priv.result
+++ b/mysql-test/suite/sys_vars/r/secure_file_priv.result
@@ -6,6 +6,8 @@ INSERT INTO t1 VALUES ("one"),("two"),("three"),("four"),("five");
SHOW VARIABLES LIKE 'secure_file_priv';
Variable_name Value
secure_file_priv
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
c1
one
two
diff --git a/mysql-test/suite/sys_vars/r/shared_memory_base_name_basic.result b/mysql-test/suite/sys_vars/r/shared_memory_base_name_basic.result
deleted file mode 100644
index 356b3c9e879..00000000000
--- a/mysql-test/suite/sys_vars/r/shared_memory_base_name_basic.result
+++ /dev/null
@@ -1,21 +0,0 @@
-select @@global.shared_memory_base_name;
-@@global.shared_memory_base_name
-MYSQL_TMP_DIR/mysqld.1.sock
-select @@session.shared_memory_base_name;
-ERROR HY000: Variable 'shared_memory_base_name' is a GLOBAL variable
-show global variables like 'shared_memory_base_name';
-Variable_name Value
-shared_memory_base_name MYSQL_TMP_DIR/mysqld.1.sock
-show session variables like 'shared_memory_base_name';
-Variable_name Value
-shared_memory_base_name MYSQL_TMP_DIR/mysqld.1.sock
-select * from information_schema.global_variables where variable_name='shared_memory_base_name';
-VARIABLE_NAME VARIABLE_VALUE
-SHARED_MEMORY_BASE_NAME MYSQL_TMP_DIR/mysqld.1.sock
-select * from information_schema.session_variables where variable_name='shared_memory_base_name';
-VARIABLE_NAME VARIABLE_VALUE
-SHARED_MEMORY_BASE_NAME MYSQL_TMP_DIR/mysqld.1.sock
-set global shared_memory_base_name=1;
-ERROR HY000: Variable 'shared_memory_base_name' is a read only variable
-set session shared_memory_base_name=1;
-ERROR HY000: Variable 'shared_memory_base_name' is a read only variable
diff --git a/mysql-test/suite/sys_vars/r/shared_memory_basic.result b/mysql-test/suite/sys_vars/r/shared_memory_basic.result
deleted file mode 100644
index ab671af610c..00000000000
--- a/mysql-test/suite/sys_vars/r/shared_memory_basic.result
+++ /dev/null
@@ -1,21 +0,0 @@
-select @@global.shared_memory;
-@@global.shared_memory
-0
-select @@session.shared_memory;
-ERROR HY000: Variable 'shared_memory' is a GLOBAL variable
-show global variables like 'shared_memory';
-Variable_name Value
-shared_memory OFF
-show session variables like 'shared_memory';
-Variable_name Value
-shared_memory OFF
-select * from information_schema.global_variables where variable_name='shared_memory';
-VARIABLE_NAME VARIABLE_VALUE
-SHARED_MEMORY OFF
-select * from information_schema.session_variables where variable_name='shared_memory';
-VARIABLE_NAME VARIABLE_VALUE
-SHARED_MEMORY OFF
-set global shared_memory=1;
-ERROR HY000: Variable 'shared_memory' is a read only variable
-set session shared_memory=1;
-ERROR HY000: Variable 'shared_memory' is a read only variable
diff --git a/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result b/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result
index a7815bb3f78..1e2f21ddcd1 100644
--- a/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result
+++ b/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result
@@ -1,20 +1,20 @@
select @@global.slave_transaction_retry_errors;
@@global.slave_transaction_retry_errors
-1213,1205,10,20,400
+1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,400
select @@session.slave_transaction_retry_errors;
ERROR HY000: Variable 'slave_transaction_retry_errors' is a GLOBAL variable
show global variables like 'slave_transaction_retry_errors';
Variable_name Value
-slave_transaction_retry_errors 1213,1205,10,20,400
+slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,400
show session variables like 'slave_transaction_retry_errors';
Variable_name Value
-slave_transaction_retry_errors 1213,1205,10,20,400
+slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,400
select * from information_schema.global_variables where variable_name='slave_transaction_retry_errors';
VARIABLE_NAME VARIABLE_VALUE
-SLAVE_TRANSACTION_RETRY_ERRORS 1213,1205,10,20,400
+SLAVE_TRANSACTION_RETRY_ERRORS 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,400
select * from information_schema.session_variables where variable_name='slave_transaction_retry_errors';
VARIABLE_NAME VARIABLE_VALUE
-SLAVE_TRANSACTION_RETRY_ERRORS 1213,1205,10,20,400
+SLAVE_TRANSACTION_RETRY_ERRORS 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,400
set global slave_transaction_retry_errors=1;
ERROR HY000: Variable 'slave_transaction_retry_errors' is a read only variable
set session slave_transaction_retry_errors=1;
diff --git a/mysql-test/suite/sys_vars/r/sql_mode_basic.result b/mysql-test/suite/sys_vars/r/sql_mode_basic.result
index a200f620a7c..d911e80b780 100644
--- a/mysql-test/suite/sys_vars/r/sql_mode_basic.result
+++ b/mysql-test/suite/sys_vars/r/sql_mode_basic.result
@@ -367,7 +367,15 @@ SELECT @@global.sql_mode;
@@global.sql_mode
REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,IGNORE_BAD_TABLE_OPTIONS,ONLY_FULL_GROUP_BY,NO_UNSIGNED_SUBTRACTION,NO_DIR_IN_CREATE,POSTGRESQL,ORACLE,MSSQL,DB2,MAXDB,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,MYSQL323,MYSQL40,ANSI,NO_AUTO_VALUE_ON_ZERO,NO_BACKSLASH_ESCAPES,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ALLOW_INVALID_DATES,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER,HIGH_NOT_PRECEDENCE,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH,EMPTY_STRING_IS_NULL,SIMULTANEOUS_ASSIGNMENT
SET @@global.sql_mode = 17179869184;
-ERROR 42000: Variable 'sql_mode' can't be set to the value of '17179869184'
+SELECT @@global.sql_mode;
+@@global.sql_mode
+TIME_ROUND_FRACTIONAL
+SET @@global.sql_mode = 34359738367;
+SELECT @@global.sql_mode;
+@@global.sql_mode
+REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,IGNORE_BAD_TABLE_OPTIONS,ONLY_FULL_GROUP_BY,NO_UNSIGNED_SUBTRACTION,NO_DIR_IN_CREATE,POSTGRESQL,ORACLE,MSSQL,DB2,MAXDB,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,MYSQL323,MYSQL40,ANSI,NO_AUTO_VALUE_ON_ZERO,NO_BACKSLASH_ESCAPES,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ALLOW_INVALID_DATES,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER,HIGH_NOT_PRECEDENCE,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH,EMPTY_STRING_IS_NULL,SIMULTANEOUS_ASSIGNMENT,TIME_ROUND_FRACTIONAL
+SET @@global.sql_mode = 34359738368;
+ERROR 42000: Variable 'sql_mode' can't be set to the value of '34359738368'
SET @@global.sql_mode = 0.4;
ERROR 42000: Incorrect argument type to variable 'sql_mode'
'#---------------------FN_DYNVARS_152_08----------------------#'
diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
index 22105923764..159d8ca7ae1 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result
@@ -421,11 +421,11 @@ GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE crc32
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE ENUM
-VARIABLE_COMMENT The algorithm InnoDB uses for page checksumming. Possible values are CRC32 (hardware accelerated if the CPU supports it) write crc32, allow any of the other checksums to match when reading; STRICT_CRC32 write crc32, do not allow other algorithms to match when reading; INNODB write a software calculated checksum, allow any other checksums to match when reading; STRICT_INNODB write a software calculated checksum, do not allow other algorithms to match when reading; NONE write a constant magic number, do not do any checksum verification when reading (same as innodb_checksums=OFF); STRICT_NONE write a constant magic number, do not allow values other than that magic number when reading; Files updated when this option is set to crc32 or strict_crc32 will not be readable by MariaDB versions older than 10.0.4
+VARIABLE_COMMENT The algorithm InnoDB uses for page checksumming. Possible values are FULL_CRC32 for new files, always use CRC-32C; for old, see CRC32 below; STRICT_FULL_CRC32 for new files, always use CRC-32C; for old, see STRICT_CRC32 below; CRC32 write crc32, allow any of the other checksums to match when reading; STRICT_CRC32 write crc32, do not allow other algorithms to match when reading; INNODB write a software calculated checksum, allow any other checksums to match when reading; STRICT_INNODB write a software calculated checksum, do not allow other algorithms to match when reading; NONE write a constant magic number, do not do any checksum verification when reading (same as innodb_checksums=OFF); STRICT_NONE write a constant magic number, do not allow values other than that magic number when reading; Files updated when this option is set to crc32 or strict_crc32 will not be readable by MariaDB versions older than 10.0.4; new files created with full_crc32 are readable by MariaDB 10.4.3+
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST crc32,strict_crc32,innodb,strict_innodb,none,strict_none
+ENUM_VALUE_LIST crc32,strict_crc32,innodb,strict_innodb,none,strict_none,full_crc32,strict_full_crc32
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_CMP_PER_INDEX_ENABLED
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff
index 5d47090a875..36254fa104b 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded,32bit.rdiff
@@ -1,6 +1,6 @@
---- sysvars_server_embedded.result 2017-11-17 17:00:22.462630239 +0100
-+++ sysvars_server_embedded,32bit.reject 2017-11-17 19:09:55.472258411 +0100
-@@ -58,7 +58,7 @@
+--- sysvars_server_embedded.result
++++ sysvars_server_embedded,32bit.result
+@@ -74,7 +74,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -9,7 +9,7 @@
VARIABLE_COMMENT Auto-increment columns are incremented by this
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 65535
-@@ -72,7 +72,7 @@
+@@ -88,7 +88,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -18,7 +18,7 @@
VARIABLE_COMMENT Offset added to Auto-increment columns. Used when auto-increment-increment != 1
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 65535
-@@ -86,7 +86,7 @@
+@@ -102,7 +102,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 150
VARIABLE_SCOPE GLOBAL
@@ -27,7 +27,7 @@
VARIABLE_COMMENT The number of outstanding connection requests MariaDB can have. This comes into play when the main MariaDB thread gets very many connection requests in a very short time
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 65535
-@@ -159,7 +159,7 @@
+@@ -175,7 +175,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the transactional cache for updates to transactional engines for the binary log. If you often use transactions containing many statements, you can increase this to get more performance
NUMERIC_MIN_VALUE 4096
@@ -36,7 +36,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -184,10 +184,10 @@
+@@ -200,10 +200,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -49,7 +49,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -198,10 +198,10 @@
+@@ -214,10 +214,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100000
VARIABLE_SCOPE GLOBAL
@@ -62,7 +62,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -229,7 +229,7 @@
+@@ -245,7 +245,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of file cache for the binary log
NUMERIC_MIN_VALUE 8192
@@ -71,7 +71,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -285,7 +285,7 @@
+@@ -301,7 +301,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the statement cache for updates to non-transactional engines for the binary log. If you often use statements updating a great number of rows, you can increase this to get more performance.
NUMERIC_MIN_VALUE 4096
@@ -80,7 +80,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -299,7 +299,7 @@
+@@ -315,7 +315,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!
NUMERIC_MIN_VALUE 0
@@ -89,7 +89,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -562,7 +562,7 @@
+@@ -578,7 +578,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -98,7 +98,7 @@
VARIABLE_COMMENT The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'
NUMERIC_MIN_VALUE 2
NUMERIC_MAX_VALUE 31536000
-@@ -618,7 +618,7 @@
+@@ -648,7 +648,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 15
VARIABLE_SCOPE SESSION
@@ -107,7 +107,7 @@
VARIABLE_COMMENT Long search depth for the two-step deadlock detection
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 33
-@@ -632,7 +632,7 @@
+@@ -662,7 +662,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
@@ -116,7 +116,7 @@
VARIABLE_COMMENT Short search depth for the two-step deadlock detection
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 32
-@@ -646,7 +646,7 @@
+@@ -676,7 +676,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 50000000
VARIABLE_SCOPE SESSION
@@ -125,7 +125,7 @@
VARIABLE_COMMENT Long timeout for the two-step deadlock detection (in microseconds)
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -660,7 +660,7 @@
+@@ -690,7 +690,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10000
VARIABLE_SCOPE SESSION
@@ -134,7 +134,7 @@
VARIABLE_COMMENT Short timeout for the two-step deadlock detection (in microseconds)
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -716,7 +716,7 @@
+@@ -746,7 +746,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -143,7 +143,7 @@
VARIABLE_COMMENT The default week format used by WEEK() functions
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 7
-@@ -730,7 +730,7 @@
+@@ -760,7 +760,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -152,7 +152,7 @@
VARIABLE_COMMENT After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -744,7 +744,7 @@
+@@ -774,7 +774,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 300
VARIABLE_SCOPE GLOBAL
@@ -161,7 +161,7 @@
VARIABLE_COMMENT How long a INSERT DELAYED thread should wait for INSERT statements before terminating
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -758,7 +758,7 @@
+@@ -788,7 +788,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1000
VARIABLE_SCOPE GLOBAL
@@ -170,7 +170,7 @@
VARIABLE_COMMENT What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -786,7 +786,7 @@
+@@ -816,7 +816,7 @@
GLOBAL_VALUE_ORIGIN SQL
DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
@@ -179,7 +179,7 @@
VARIABLE_COMMENT Precision of the result of '/' operator will be increased on that value
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 38
-@@ -884,7 +884,7 @@
+@@ -928,7 +928,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -188,7 +188,7 @@
VARIABLE_COMMENT If non-zero, binary logs will be purged after expire_logs_days days; possible purges happen at startup and at binary log rotation
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 99
-@@ -926,7 +926,7 @@
+@@ -970,7 +970,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE GLOBAL
@@ -197,7 +197,7 @@
VARIABLE_COMMENT The number of connections on extra-port
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 100000
-@@ -968,7 +968,7 @@
+@@ -1012,7 +1012,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -206,7 +206,7 @@
VARIABLE_COMMENT A dedicated thread is created to flush all tables at the given interval
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
-@@ -1010,7 +1010,7 @@
+@@ -1054,7 +1054,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 84
VARIABLE_SCOPE GLOBAL
@@ -215,7 +215,7 @@
VARIABLE_COMMENT The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 84
-@@ -1024,7 +1024,7 @@
+@@ -1068,7 +1068,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4
VARIABLE_SCOPE GLOBAL
@@ -224,7 +224,7 @@
VARIABLE_COMMENT The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 84
-@@ -1038,7 +1038,7 @@
+@@ -1082,7 +1082,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 20
VARIABLE_SCOPE GLOBAL
@@ -233,7 +233,7 @@
VARIABLE_COMMENT Number of best matches to use for query expansion
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1000
-@@ -1097,7 +1097,7 @@
+@@ -1141,7 +1141,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The maximum length of the result of function GROUP_CONCAT()
NUMERIC_MIN_VALUE 4
@@ -242,7 +242,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -1248,7 +1248,7 @@
+@@ -1292,7 +1292,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -251,7 +251,7 @@
VARIABLE_COMMENT Number of bytes used for a histogram. If set to 0, no histograms are created by ANALYZE.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 255
-@@ -1276,7 +1276,7 @@
+@@ -1320,7 +1320,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 128
VARIABLE_SCOPE GLOBAL
@@ -260,7 +260,7 @@
VARIABLE_COMMENT How many host names should be cached to avoid resolving.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 65536
-@@ -1430,7 +1430,7 @@
+@@ -1474,7 +1474,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 28800
VARIABLE_SCOPE SESSION
@@ -269,20 +269,7 @@
VARIABLE_COMMENT The number of seconds the server waits for activity on an interactive connection before closing it
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -1444,10 +1444,10 @@
- GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 1000
- VARIABLE_SCOPE SESSION
--VARIABLE_TYPE BIGINT UNSIGNED
-+VARIABLE_TYPE INT UNSIGNED
- VARIABLE_COMMENT The minimum number of scalar elements in the value list of IN predicate that triggers its conversion to IN subquery
- NUMERIC_MIN_VALUE 0
--NUMERIC_MAX_VALUE 18446744073709551615
-+NUMERIC_MAX_VALUE 4294967295
- NUMERIC_BLOCK_SIZE 1
- ENUM_VALUE_LIST NULL
- READ_ONLY NO
-@@ -1475,7 +1475,7 @@
+@@ -1505,7 +1505,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the buffer that is used for joins
NUMERIC_MIN_VALUE 128
@@ -291,7 +278,7 @@
NUMERIC_BLOCK_SIZE 128
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -1500,7 +1500,7 @@
+@@ -1530,7 +1530,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 2
VARIABLE_SCOPE SESSION
@@ -300,7 +287,7 @@
VARIABLE_COMMENT Controls what join operations can be executed with join buffers. Odd numbers are used for plain join buffers while even numbers are used for linked buffers
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 8
-@@ -1531,7 +1531,7 @@
+@@ -1561,7 +1561,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the buffer used for index blocks for MyISAM tables. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford
NUMERIC_MIN_VALUE 0
@@ -309,7 +296,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -1738,7 +1738,7 @@
+@@ -1768,7 +1768,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 86400
VARIABLE_SCOPE SESSION
@@ -318,7 +305,7 @@
VARIABLE_COMMENT Timeout in seconds to wait for a lock before returning an error.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
-@@ -1906,7 +1906,7 @@
+@@ -1936,7 +1936,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -327,7 +314,7 @@
VARIABLE_COMMENT Write to slow log every #th slow query. Set to 1 to log everything. Increase it to reduce the size of the slow or the performance impact of slow logging
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -1948,7 +1948,7 @@
+@@ -1978,7 +1978,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 2
VARIABLE_SCOPE SESSION
@@ -336,7 +323,7 @@
VARIABLE_COMMENT Log some not critical warnings to the general log file.Value can be between 0 and 11. Higher values mean more verbosity
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -1990,7 +1990,7 @@
+@@ -2020,7 +2020,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 16777216
VARIABLE_SCOPE SESSION
@@ -345,7 +332,7 @@
VARIABLE_COMMENT Max packet length to send to or receive from the server
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
-@@ -2000,14 +2000,14 @@
+@@ -2030,14 +2030,14 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_BINLOG_CACHE_SIZE
SESSION_VALUE NULL
@@ -363,7 +350,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2018,7 +2018,7 @@
+@@ -2048,7 +2048,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1073741824
VARIABLE_SCOPE GLOBAL
@@ -372,7 +359,7 @@
VARIABLE_COMMENT Binary log will be rotated automatically when the size exceeds this value.
NUMERIC_MIN_VALUE 4096
NUMERIC_MAX_VALUE 1073741824
-@@ -2028,14 +2028,14 @@
+@@ -2058,14 +2058,14 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_BINLOG_STMT_CACHE_SIZE
SESSION_VALUE NULL
@@ -390,16 +377,16 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2046,7 +2046,7 @@
+@@ -2076,7 +2076,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 151
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of simultaneous clients allowed
- NUMERIC_MIN_VALUE 1
+ NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 100000
-@@ -2060,7 +2060,7 @@
+@@ -2090,7 +2090,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -408,7 +395,7 @@
VARIABLE_COMMENT If there is more than this number of interrupted connections from a host this host will be blocked from further connections
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2074,7 +2074,7 @@
+@@ -2104,7 +2104,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 20
VARIABLE_SCOPE SESSION
@@ -417,7 +404,7 @@
VARIABLE_COMMENT Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero INSERT DELAYED will be not used
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
-@@ -2102,7 +2102,7 @@
+@@ -2132,7 +2132,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 64
VARIABLE_SCOPE SESSION
@@ -426,7 +413,7 @@
VARIABLE_COMMENT Max number of errors/warnings to store for a statement
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 65535
-@@ -2119,7 +2119,7 @@
+@@ -2149,7 +2149,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Don't allow creation of heap tables bigger than this
NUMERIC_MIN_VALUE 16384
@@ -435,7 +422,7 @@
NUMERIC_BLOCK_SIZE 1024
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2130,7 +2130,7 @@
+@@ -2160,7 +2160,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 20
VARIABLE_SCOPE SESSION
@@ -444,7 +431,7 @@
VARIABLE_COMMENT Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero INSERT DELAYED will be not used
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
-@@ -2158,7 +2158,7 @@
+@@ -2188,7 +2188,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE SESSION
@@ -453,7 +440,7 @@
VARIABLE_COMMENT Max number of bytes in sorted records
NUMERIC_MIN_VALUE 4
NUMERIC_MAX_VALUE 8388608
-@@ -2172,7 +2172,7 @@
+@@ -2202,7 +2202,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 1048576
VARIABLE_SCOPE GLOBAL
@@ -462,16 +449,7 @@
VARIABLE_COMMENT The maximum BLOB length to send to server from mysql_send_long_data API. Deprecated option; use max_allowed_packet instead.
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
-@@ -2186,7 +2186,7 @@
- GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 16382
- VARIABLE_SCOPE GLOBAL
--VARIABLE_TYPE BIGINT UNSIGNED
-+VARIABLE_TYPE INT UNSIGNED
- VARIABLE_COMMENT Maximum number of prepared statements in the server
- NUMERIC_MIN_VALUE 0
- NUMERIC_MAX_VALUE 1048576
-@@ -2200,7 +2200,7 @@
+@@ -2244,7 +2244,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4294967295
VARIABLE_SCOPE SESSION
@@ -480,7 +458,16 @@
VARIABLE_COMMENT Maximum number of iterations when executing recursive queries
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -2214,7 +2214,7 @@
+@@ -2261,7 +2261,7 @@
+ VARIABLE_TYPE BIGINT UNSIGNED
+ VARIABLE_COMMENT The maximum size of the container of a rowid filter
+ NUMERIC_MIN_VALUE 1024
+-NUMERIC_MAX_VALUE 18446744073709551615
++NUMERIC_MAX_VALUE 4294967295
+ NUMERIC_BLOCK_SIZE 1
+ ENUM_VALUE_LIST NULL
+ READ_ONLY NO
+@@ -2272,7 +2272,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4294967295
VARIABLE_SCOPE SESSION
@@ -489,7 +476,7 @@
VARIABLE_COMMENT Limit assumed max number of seeks when looking up rows based on a key
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2242,7 +2242,7 @@
+@@ -2300,7 +2300,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE SESSION
@@ -498,7 +485,7 @@
VARIABLE_COMMENT The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored)
NUMERIC_MIN_VALUE 4
NUMERIC_MAX_VALUE 8388608
-@@ -2256,7 +2256,7 @@
+@@ -2314,7 +2314,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -507,7 +494,7 @@
VARIABLE_COMMENT Maximum stored procedure recursion depth
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 255
-@@ -2284,7 +2284,7 @@
+@@ -2342,7 +2342,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32
VARIABLE_SCOPE SESSION
@@ -516,7 +503,7 @@
VARIABLE_COMMENT Unused, will be removed.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2312,7 +2312,7 @@
+@@ -2370,7 +2370,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4294967295
VARIABLE_SCOPE GLOBAL
@@ -525,7 +512,7 @@
VARIABLE_COMMENT After this many write locks, allow some read locks to run in between
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2326,7 +2326,7 @@
+@@ -2384,7 +2384,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE GLOBAL
@@ -534,7 +521,7 @@
VARIABLE_COMMENT Unused
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 1048576
-@@ -2340,7 +2340,7 @@
+@@ -2398,7 +2398,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 8
VARIABLE_SCOPE GLOBAL
@@ -543,7 +530,7 @@
VARIABLE_COMMENT Unused
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 1024
-@@ -2354,7 +2354,7 @@
+@@ -2412,7 +2412,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -552,7 +539,7 @@
VARIABLE_COMMENT Don't write queries to slow log that examine fewer rows than that
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -2368,7 +2368,7 @@
+@@ -2426,7 +2426,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 262144
VARIABLE_SCOPE SESSION
@@ -561,7 +548,7 @@
VARIABLE_COMMENT Size of buffer to use when using MRR with range access
NUMERIC_MIN_VALUE 8192
NUMERIC_MAX_VALUE 2147483647
-@@ -2382,10 +2382,10 @@
+@@ -2440,10 +2440,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 256
VARIABLE_SCOPE SESSION
@@ -574,7 +561,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2396,7 +2396,7 @@
+@@ -2454,7 +2454,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE GLOBAL
@@ -583,7 +570,7 @@
VARIABLE_COMMENT Block size to be used for MyISAM index pages
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 16384
-@@ -2410,7 +2410,7 @@
+@@ -2468,7 +2468,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 6
VARIABLE_SCOPE GLOBAL
@@ -592,7 +579,7 @@
VARIABLE_COMMENT Default pointer size to be used for MyISAM tables
NUMERIC_MIN_VALUE 2
NUMERIC_MAX_VALUE 7
-@@ -2420,9 +2420,9 @@
+@@ -2478,9 +2478,9 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MYISAM_MAX_SORT_FILE_SIZE
SESSION_VALUE NULL
@@ -604,7 +591,7 @@
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Don't use the fast sort index method to created index if the temporary file would get bigger than this
-@@ -2434,14 +2434,14 @@
+@@ -2492,14 +2492,14 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MYISAM_MMAP_SIZE
SESSION_VALUE NULL
@@ -622,7 +609,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
-@@ -2466,10 +2466,10 @@
+@@ -2524,10 +2524,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -635,7 +622,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2483,7 +2483,7 @@
+@@ -2541,7 +2541,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The buffer that is allocated when sorting the index when doing a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE
NUMERIC_MIN_VALUE 4096
@@ -644,7 +631,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2536,7 +2536,7 @@
+@@ -2594,7 +2594,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 16384
VARIABLE_SCOPE SESSION
@@ -653,7 +640,7 @@
VARIABLE_COMMENT Buffer length for TCP/IP and socket communication
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1048576
-@@ -2550,7 +2550,7 @@
+@@ -2608,7 +2608,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 30
VARIABLE_SCOPE SESSION
@@ -662,7 +649,7 @@
VARIABLE_COMMENT Number of seconds to wait for more data from a connection before aborting the read
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -2564,7 +2564,7 @@
+@@ -2622,7 +2622,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE SESSION
@@ -671,7 +658,7 @@
VARIABLE_COMMENT If a read on a communication port is interrupted, retry this many times before giving up
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2578,7 +2578,7 @@
+@@ -2636,7 +2636,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 60
VARIABLE_SCOPE SESSION
@@ -680,7 +667,7 @@
VARIABLE_COMMENT Number of seconds to wait for a block to be written to a connection before aborting the write
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -2648,7 +2648,7 @@
+@@ -2706,7 +2706,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -689,7 +676,7 @@
VARIABLE_COMMENT Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1
-@@ -2662,7 +2662,7 @@
+@@ -2720,7 +2720,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 62
VARIABLE_SCOPE SESSION
@@ -698,7 +685,7 @@
VARIABLE_COMMENT Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Values smaller than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 62
-@@ -2676,7 +2676,7 @@
+@@ -2734,7 +2734,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE SESSION
@@ -707,16 +694,29 @@
VARIABLE_COMMENT Controls number of record samples to check condition selectivity
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 4294967295
-@@ -2704,7 +2704,7 @@
+@@ -2762,10 +2762,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 1
+ DEFAULT_VALUE 1048576
+ VARIABLE_SCOPE SESSION
+-VARIABLE_TYPE BIGINT UNSIGNED
++VARIABLE_TYPE INT UNSIGNED
+ VARIABLE_COMMENT Maximum allowed size of an optimizer trace
+ NUMERIC_MIN_VALUE 0
+-NUMERIC_MAX_VALUE 18446744073709551615
++NUMERIC_MAX_VALUE 4294967295
+ NUMERIC_BLOCK_SIZE 1
+ ENUM_VALUE_LIST NULL
+ READ_ONLY NO
+@@ -2790,7 +2790,7 @@
+ GLOBAL_VALUE_ORIGIN COMPILE-TIME
+ DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial join when it searches for the best execution plan Meaning: 1 - use selectivity of index backed range conditions to calculate the cardinality of a partial join if the last joined table is accessed by full table scan or an index scan, 2 - use selectivity of index backed range conditions to calculate the cardinality of a partial join in any case, 3 - additionally always use selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join, 4 - use histograms to calculate selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join.5 - additionally use selectivity of certain non-range predicates calculated on record samples
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 5
-@@ -2732,7 +2732,7 @@
+@@ -2818,7 +2818,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -725,7 +725,7 @@
VARIABLE_COMMENT Maximum number of instrumented user@host accounts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2746,7 +2746,7 @@
+@@ -2832,7 +2832,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -734,7 +734,7 @@
VARIABLE_COMMENT Size of the statement digest. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 200
-@@ -2760,7 +2760,7 @@
+@@ -2846,7 +2846,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -743,7 +743,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STAGES_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2774,7 +2774,7 @@
+@@ -2860,7 +2860,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -752,7 +752,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STAGES_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
-@@ -2788,7 +2788,7 @@
+@@ -2874,7 +2874,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -761,7 +761,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STATEMENTS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2802,7 +2802,7 @@
+@@ -2898,7 +2898,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -770,7 +770,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STATEMENTS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
-@@ -2816,7 +2816,7 @@
+@@ -2902,7 +2902,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -779,7 +779,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_WAITS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2830,7 +2830,7 @@
+@@ -2916,7 +2916,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -788,7 +788,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_WAITS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
-@@ -2844,7 +2844,7 @@
+@@ -2930,7 +2930,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -797,7 +797,7 @@
VARIABLE_COMMENT Maximum number of instrumented hosts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2858,7 +2858,7 @@
+@@ -2944,7 +2944,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 80
VARIABLE_SCOPE GLOBAL
@@ -806,7 +806,7 @@
VARIABLE_COMMENT Maximum number of condition instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -2872,7 +2872,7 @@
+@@ -2958,7 +2958,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -815,7 +815,7 @@
VARIABLE_COMMENT Maximum number of instrumented condition objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2886,7 +2886,7 @@
+@@ -2972,7 +2972,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE GLOBAL
@@ -824,7 +824,7 @@
VARIABLE_COMMENT Maximum length considered for digest text, when stored in performance_schema tables.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
-@@ -2900,7 +2900,7 @@
+@@ -2986,7 +2986,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 50
VARIABLE_SCOPE GLOBAL
@@ -833,7 +833,7 @@
VARIABLE_COMMENT Maximum number of file instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -2914,7 +2914,7 @@
+@@ -3000,7 +3000,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32768
VARIABLE_SCOPE GLOBAL
@@ -842,7 +842,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented files.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
-@@ -2928,7 +2928,7 @@
+@@ -3014,7 +3014,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -851,7 +851,7 @@
VARIABLE_COMMENT Maximum number of instrumented files. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2942,7 +2942,7 @@
+@@ -3028,7 +3028,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 200
VARIABLE_SCOPE GLOBAL
@@ -860,7 +860,7 @@
VARIABLE_COMMENT Maximum number of mutex instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -2956,7 +2956,7 @@
+@@ -3042,7 +3042,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -869,7 +869,7 @@
VARIABLE_COMMENT Maximum number of instrumented MUTEX objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
-@@ -2970,7 +2970,7 @@
+@@ -3056,7 +3056,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 40
VARIABLE_SCOPE GLOBAL
@@ -878,7 +878,7 @@
VARIABLE_COMMENT Maximum number of rwlock instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -2984,7 +2984,7 @@
+@@ -3070,7 +3070,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -887,7 +887,7 @@
VARIABLE_COMMENT Maximum number of instrumented RWLOCK objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
-@@ -2998,7 +2998,7 @@
+@@ -3084,7 +3084,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -896,7 +896,7 @@
VARIABLE_COMMENT Maximum number of socket instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3012,7 +3012,7 @@
+@@ -3098,7 +3098,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -905,7 +905,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented sockets. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3026,7 +3026,7 @@
+@@ -3112,7 +3112,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 160
VARIABLE_SCOPE GLOBAL
@@ -914,16 +914,16 @@
VARIABLE_COMMENT Maximum number of stage instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3040,7 +3040,7 @@
+@@ -3126,7 +3126,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 191
+ DEFAULT_VALUE 202
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Maximum number of statement instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3054,7 +3054,7 @@
+@@ -3140,7 +3140,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -932,7 +932,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3068,7 +3068,7 @@
+@@ -3154,7 +3154,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -941,7 +941,7 @@
VARIABLE_COMMENT Maximum number of instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3082,7 +3082,7 @@
+@@ -3168,7 +3168,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 50
VARIABLE_SCOPE GLOBAL
@@ -950,7 +950,7 @@
VARIABLE_COMMENT Maximum number of thread instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3096,7 +3096,7 @@
+@@ -3182,7 +3182,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -959,7 +959,7 @@
VARIABLE_COMMENT Maximum number of instrumented threads. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3110,7 +3110,7 @@
+@@ -3196,7 +3196,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -968,7 +968,7 @@
VARIABLE_COMMENT Size of session attribute string buffer per thread. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3124,7 +3124,7 @@
+@@ -3210,7 +3210,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -977,7 +977,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_ACTORS.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1024
-@@ -3138,7 +3138,7 @@
+@@ -3224,7 +3224,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -986,7 +986,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_OBJECTS.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
-@@ -3152,7 +3152,7 @@
+@@ -3238,7 +3238,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -995,7 +995,7 @@
VARIABLE_COMMENT Maximum number of instrumented users. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3222,7 +3222,7 @@
+@@ -3294,7 +3294,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32768
VARIABLE_SCOPE SESSION
@@ -1004,7 +1004,7 @@
VARIABLE_COMMENT The size of the buffer that is allocated when preloading indexes
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
-@@ -3250,7 +3250,7 @@
+@@ -3322,7 +3322,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 15
VARIABLE_SCOPE SESSION
@@ -1013,7 +1013,7 @@
VARIABLE_COMMENT Number of statements about which profiling information is maintained. If set to 0, no profiles are stored. See SHOW PROFILES.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
-@@ -3264,7 +3264,7 @@
+@@ -3336,7 +3336,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 5
VARIABLE_SCOPE SESSION
@@ -1022,7 +1022,7 @@
VARIABLE_COMMENT Seconds between sending progress reports to the client for time-consuming statements. Set to 0 to disable progress reporting.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -3348,7 +3348,7 @@
+@@ -3420,7 +3420,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 16384
VARIABLE_SCOPE SESSION
@@ -1031,7 +1031,7 @@
VARIABLE_COMMENT Allocation block size for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
-@@ -3362,7 +3362,7 @@
+@@ -3434,7 +3434,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1048576
VARIABLE_SCOPE GLOBAL
@@ -1040,7 +1040,7 @@
VARIABLE_COMMENT Don't cache results that are bigger than this
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -3376,7 +3376,7 @@
+@@ -3458,7 +3458,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4096
VARIABLE_SCOPE GLOBAL
@@ -1049,7 +1049,7 @@
VARIABLE_COMMENT The minimum size for blocks allocated by the query cache
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -3393,7 +3393,7 @@
+@@ -3465,7 +3465,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The memory allocated to store results from old queries
NUMERIC_MIN_VALUE 0
@@ -1058,7 +1058,7 @@
NUMERIC_BLOCK_SIZE 1024
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -3446,7 +3446,7 @@
+@@ -3518,7 +3518,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 24576
VARIABLE_SCOPE SESSION
@@ -1067,7 +1067,7 @@
VARIABLE_COMMENT Persistent buffer for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
-@@ -3460,7 +3460,7 @@
+@@ -3532,7 +3532,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4096
VARIABLE_SCOPE SESSION
@@ -1076,7 +1076,7 @@
VARIABLE_COMMENT Allocation block size for storing ranges during optimization
NUMERIC_MIN_VALUE 4096
NUMERIC_MAX_VALUE 4294967295
-@@ -3474,7 +3474,7 @@
+@@ -3546,7 +3546,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 131072
VARIABLE_SCOPE SESSION
@@ -1085,7 +1085,7 @@
VARIABLE_COMMENT Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value
NUMERIC_MIN_VALUE 8192
NUMERIC_MAX_VALUE 2147483647
-@@ -3502,7 +3502,7 @@
+@@ -3574,7 +3574,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 262144
VARIABLE_SCOPE SESSION
@@ -1094,7 +1094,7 @@
VARIABLE_COMMENT When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 2147483647
-@@ -3516,10 +3516,10 @@
+@@ -3598,10 +3598,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 8388608
VARIABLE_SCOPE SESSION
@@ -1107,7 +1107,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -3558,7 +3558,7 @@
+@@ -3644,7 +3644,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -1116,7 +1116,7 @@
VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -3656,7 +3656,7 @@
+@@ -3742,7 +3742,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1073741824
VARIABLE_SCOPE GLOBAL
@@ -1125,7 +1125,7 @@
VARIABLE_COMMENT The maximum packet length to sent successfully from the master to slave.
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
-@@ -3670,7 +3670,7 @@
+@@ -3756,7 +3756,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 2
VARIABLE_SCOPE GLOBAL
@@ -1134,7 +1134,7 @@
VARIABLE_COMMENT If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
-@@ -3729,7 +3729,7 @@
+@@ -3715,7 +3715,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Each thread that needs to do a sort allocates a buffer of this size
NUMERIC_MIN_VALUE 1024
@@ -1143,7 +1143,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4020,7 +4020,7 @@
+@@ -4106,7 +4106,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 256
VARIABLE_SCOPE GLOBAL
@@ -1152,7 +1152,7 @@
VARIABLE_COMMENT The soft upper limit for number of cached stored routines for one connection.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 524288
-@@ -4090,7 +4090,7 @@
+@@ -4204,7 +4204,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 400
VARIABLE_SCOPE GLOBAL
@@ -1160,17 +1160,17 @@
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of cached table definitions
NUMERIC_MIN_VALUE 400
- NUMERIC_MAX_VALUE 524288
-@@ -4104,7 +4104,7 @@
- GLOBAL_VALUE_ORIGIN COMPILE-TIME
+ NUMERIC_MAX_VALUE 2097152
+@@ -4218,7 +4218,7 @@
+ GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 2000
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of cached open tables
- NUMERIC_MIN_VALUE 1
+ NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 1048576
-@@ -4174,7 +4174,7 @@
+@@ -4302,7 +4302,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 256
VARIABLE_SCOPE GLOBAL
@@ -1179,7 +1179,7 @@
VARIABLE_COMMENT How many threads we should keep in a cache for reuse. These are freed after 5 minutes of idle time
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
-@@ -4188,7 +4188,7 @@
+@@ -4316,7 +4316,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -1188,7 +1188,7 @@
VARIABLE_COMMENT Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.This variable has no effect, and is deprecated. It will be removed in a future release.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 512
-@@ -4295,15 +4295,15 @@
+@@ -4423,15 +4423,15 @@
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME TMP_DISK_TABLE_SIZE
@@ -1208,7 +1208,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4317,7 +4317,7 @@
+@@ -4445,7 +4445,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table. Same as tmp_table_size.
NUMERIC_MIN_VALUE 1024
@@ -1217,7 +1217,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4331,7 +4331,7 @@
+@@ -4459,7 +4459,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Alias for tmp_memory_table_size. If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table.
NUMERIC_MIN_VALUE 1024
@@ -1226,7 +1226,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4342,7 +4342,7 @@
+@@ -4470,7 +4470,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 8192
VARIABLE_SCOPE SESSION
@@ -1235,7 +1235,7 @@
VARIABLE_COMMENT Allocation block size for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
-@@ -4356,7 +4356,7 @@
+@@ -4484,7 +4484,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4096
VARIABLE_SCOPE SESSION
@@ -1244,7 +1244,7 @@
VARIABLE_COMMENT Persistent buffer for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
-@@ -4454,7 +4454,7 @@
+@@ -4582,7 +4582,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 28800
VARIABLE_SCOPE SESSION
@@ -1253,7 +1253,7 @@
VARIABLE_COMMENT The number of seconds the server waits for activity on a connection before closing it
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -4559,7 +4559,7 @@
+@@ -4687,7 +4687,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME OPEN_FILES_LIMIT
VARIABLE_SCOPE GLOBAL
@@ -1262,7 +1262,7 @@
VARIABLE_COMMENT If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 or autoset then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of file descriptors
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -4572,7 +4572,7 @@
+@@ -4700,7 +4700,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@@ -1271,7 +1271,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4582,7 +4582,7 @@
+@@ -4710,7 +4710,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@@ -1280,7 +1280,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4677,7 +4677,7 @@
+@@ -4805,7 +4805,7 @@
VARIABLE_NAME LOG_TC_SIZE
GLOBAL_VALUE_ORIGIN AUTO
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
index 89e5fef60e6..1c23154c210 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_embedded.result
@@ -40,6 +40,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST DEFAULT,COPY,INPLACE,NOCOPY,INSTANT
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME ANALYZE_SAMPLE_PERCENTAGE
+SESSION_VALUE 100.000000
+GLOBAL_VALUE 100.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 100.000000
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Percentage of rows from the table ANALYZE TABLE will sample to collect table statistics. Set to 0 to let MariaDB decide what percentage of rows to sample.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 100
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME AUTOCOMMIT
SESSION_VALUE ON
GLOBAL_VALUE ON
@@ -698,6 +712,20 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME DEFAULT_PASSWORD_LIFETIME
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT This defines the global password expiration policy. 0 means automatic password expiration is disabled. If the value is a positive integer N, the passwords must be changed every N days. This behavior can be overriden using the password expiration options in ALTER USER.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 4294967295
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME DEFAULT_REGEX_FLAGS
SESSION_VALUE
GLOBAL_VALUE
@@ -810,6 +838,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON,ALL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME DISCONNECT_ON_EXPIRED_PASSWORD
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT This variable controls how the server handles clients that are not aware of the sandbox mode. If enabled, the server disconnects the client, otherwise the server puts the client in a sandbox mode.
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME DIV_PRECISION_INCREMENT
SESSION_VALUE 4
GLOBAL_VALUE 5
@@ -881,10 +923,10 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME EQ_RANGE_INDEX_DIVE_LIMIT
-SESSION_VALUE 0
-GLOBAL_VALUE 0
+SESSION_VALUE 200
+GLOBAL_VALUE 200
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 0
+DEFAULT_VALUE 200
VARIABLE_SCOPE SESSION
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.
@@ -1287,10 +1329,10 @@ ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME HISTOGRAM_SIZE
-SESSION_VALUE 0
-GLOBAL_VALUE 0
+SESSION_VALUE 254
+GLOBAL_VALUE 254
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 0
+DEFAULT_VALUE 254
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Number of bytes used for a histogram. If set to 0, no histograms are created by ANALYZE.
@@ -1301,10 +1343,10 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME HISTOGRAM_TYPE
-SESSION_VALUE SINGLE_PREC_HB
-GLOBAL_VALUE SINGLE_PREC_HB
+SESSION_VALUE DOUBLE_PREC_HB
+GLOBAL_VALUE DOUBLE_PREC_HB
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE SINGLE_PREC_HB
+DEFAULT_VALUE DOUBLE_PREC_HB
VARIABLE_SCOPE SESSION
VARIABLE_TYPE ENUM
VARIABLE_COMMENT Specifies type of the histograms created by ANALYZE. Possible values are: SINGLE_PREC_HB - single precision height-balanced, DOUBLE_PREC_HB - double precision height-balanced.
@@ -2210,6 +2252,20 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME MAX_PASSWORD_ERRORS
+SESSION_VALUE NULL
+GLOBAL_VALUE 4294967295
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 4294967295
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT If there is more than this number of failed connect attempts due to invalid password, user will be blocked from further connections until FLUSH_PRIVILEGES.
+NUMERIC_MIN_VALUE 1
+NUMERIC_MAX_VALUE 4294967295
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_PREPARED_STMT_COUNT
SESSION_VALUE NULL
GLOBAL_VALUE 16382
@@ -2238,6 +2294,20 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME MAX_ROWID_FILTER_SIZE
+SESSION_VALUE 131072
+GLOBAL_VALUE 131072
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 131072
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_COMMENT The maximum size of the container of a rowid filter
+NUMERIC_MIN_VALUE 1024
+NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_SEEKS_FOR_KEY
SESSION_VALUE 4294967295
GLOBAL_VALUE 4294967295
@@ -2715,24 +2785,52 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SWITCH
-SESSION_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
-GLOBAL_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+SESSION_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
+GLOBAL_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+DEFAULT_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
VARIABLE_SCOPE SESSION
VARIABLE_TYPE FLAGSET
VARIABLE_COMMENT Fine-tune the optimizer behavior
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST index_merge,index_merge_union,index_merge_sort_union,index_merge_intersection,index_merge_sort_intersection,engine_condition_pushdown,index_condition_pushdown,derived_merge,derived_with_keys,firstmatch,loosescan,materialization,in_to_exists,semijoin,partial_match_rowid_merge,partial_match_table_scan,subquery_cache,mrr,mrr_cost_based,mrr_sort_keys,outer_join_with_cache,semijoin_with_cache,join_cache_incremental,join_cache_hashed,join_cache_bka,optimize_join_buffer_size,table_elimination,extended_keys,exists_to_in,orderby_uses_equalities,condition_pushdown_for_derived,split_materialized,default
+ENUM_VALUE_LIST index_merge,index_merge_union,index_merge_sort_union,index_merge_intersection,index_merge_sort_intersection,engine_condition_pushdown,index_condition_pushdown,derived_merge,derived_with_keys,firstmatch,loosescan,materialization,in_to_exists,semijoin,partial_match_rowid_merge,partial_match_table_scan,subquery_cache,mrr,mrr_cost_based,mrr_sort_keys,outer_join_with_cache,semijoin_with_cache,join_cache_incremental,join_cache_hashed,join_cache_bka,optimize_join_buffer_size,table_elimination,extended_keys,exists_to_in,orderby_uses_equalities,condition_pushdown_for_derived,split_materialized,condition_pushdown_for_subquery,rowid_filter,condition_pushdown_from_having,default
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_TRACE
+SESSION_VALUE enabled=off
+GLOBAL_VALUE enabled=off
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE enabled=off
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE FLAGSET
+VARIABLE_COMMENT Controls tracing of the Optimizer: optimizer_trace=option=val[,option=val...], where option is one of {enabled} and val is one of {on, off, default}
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST enabled,default
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_TRACE_MAX_MEM_SIZE
+SESSION_VALUE 1048576
+GLOBAL_VALUE 1048576
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1048576
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_COMMENT Maximum allowed size of an optimizer trace
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_USE_CONDITION_SELECTIVITY
-SESSION_VALUE 1
-GLOBAL_VALUE 1
+SESSION_VALUE 4
+GLOBAL_VALUE 4
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 1
+DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial join when it searches for the best execution plan Meaning: 1 - use selectivity of index backed range conditions to calculate the cardinality of a partial join if the last joined table is accessed by full table scan or an index scan, 2 - use selectivity of index backed range conditions to calculate the cardinality of a partial join in any case, 3 - additionally always use selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join, 4 - use histograms to calculate selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join.5 - additionally use selectivity of certain non-range predicates calculated on record samples
@@ -3066,9 +3164,9 @@ READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES
SESSION_VALUE NULL
-GLOBAL_VALUE 200
+GLOBAL_VALUE 202
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 200
+DEFAULT_VALUE 202
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Maximum number of statement instruments.
@@ -3845,7 +3943,7 @@ VARIABLE_COMMENT Sets the sql mode
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,IGNORE_BAD_TABLE_OPTIONS,ONLY_FULL_GROUP_BY,NO_UNSIGNED_SUBTRACTION,NO_DIR_IN_CREATE,POSTGRESQL,ORACLE,MSSQL,DB2,MAXDB,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,MYSQL323,MYSQL40,ANSI,NO_AUTO_VALUE_ON_ZERO,NO_BACKSLASH_ESCAPES,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ALLOW_INVALID_DATES,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER,HIGH_NOT_PRECEDENCE,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH,EMPTY_STRING_IS_NULL,SIMULTANEOUS_ASSIGNMENT
+ENUM_VALUE_LIST REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,IGNORE_BAD_TABLE_OPTIONS,ONLY_FULL_GROUP_BY,NO_UNSIGNED_SUBTRACTION,NO_DIR_IN_CREATE,POSTGRESQL,ORACLE,MSSQL,DB2,MAXDB,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,MYSQL323,MYSQL40,ANSI,NO_AUTO_VALUE_ON_ZERO,NO_BACKSLASH_ESCAPES,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ALLOW_INVALID_DATES,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER,HIGH_NOT_PRECEDENCE,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH,EMPTY_STRING_IS_NULL,SIMULTANEOUS_ASSIGNMENT,TIME_ROUND_FRACTIONAL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SQL_NOTES
@@ -4226,6 +4324,20 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME TCP_NODELAY
+SESSION_VALUE ON
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Set option TCP_NODELAY (disable Nagle's algorithm) on socket
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME THREAD_CACHE_SIZE
SESSION_VALUE NULL
GLOBAL_VALUE 151
@@ -4493,17 +4605,17 @@ ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME USE_STAT_TABLES
-SESSION_VALUE NEVER
-GLOBAL_VALUE NEVER
-GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE NEVER
+SESSION_VALUE PREFERABLY
+GLOBAL_VALUE PREFERABLY
+GLOBAL_VALUE_ORIGIN CONFIG
+DEFAULT_VALUE PREFERABLY_FOR_QUERIES
VARIABLE_SCOPE SESSION
VARIABLE_TYPE ENUM
VARIABLE_COMMENT Specifies how to use system statistics tables
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST NEVER,COMPLEMENTARY,PREFERABLY
+ENUM_VALUE_LIST NEVER,COMPLEMENTARY,PREFERABLY,COMPLEMENTARY_FOR_QUERIES,PREFERABLY_FOR_QUERIES
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WAIT_TIMEOUT
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff
index cb9e84b81c8..13f9c786f20 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded,32bit.rdiff
@@ -1,6 +1,6 @@
---- sysvars_server_notembedded.result 2017-12-15 20:57:40.174654761 +0200
-+++ sysvars_server_notembedded,32bit.reject 2017-12-15 21:02:20.476044700 +0200
-@@ -58,7 +58,7 @@
+--- sysvars_server_notembedded.result
++++ sysvars_server_notembedded,32bit.result
+@@ -74,7 +74,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -9,7 +9,7 @@
VARIABLE_COMMENT Auto-increment columns are incremented by this
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 65535
-@@ -72,7 +72,7 @@
+@@ -88,7 +88,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -18,7 +18,7 @@
VARIABLE_COMMENT Offset added to Auto-increment columns. Used when auto-increment-increment != 1
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 65535
-@@ -86,7 +86,7 @@
+@@ -102,7 +102,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 150
VARIABLE_SCOPE GLOBAL
@@ -27,7 +27,7 @@
VARIABLE_COMMENT The number of outstanding connection requests MariaDB can have. This comes into play when the main MariaDB thread gets very many connection requests in a very short time
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 65535
-@@ -159,7 +159,7 @@
+@@ -175,7 +175,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the transactional cache for updates to transactional engines for the binary log. If you often use transactions containing many statements, you can increase this to get more performance
NUMERIC_MIN_VALUE 4096
@@ -36,7 +36,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -184,10 +184,10 @@
+@@ -200,10 +200,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -49,7 +49,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -198,10 +198,10 @@
+@@ -214,10 +214,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100000
VARIABLE_SCOPE GLOBAL
@@ -62,7 +62,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -229,7 +229,7 @@
+@@ -245,7 +245,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of file cache for the binary log
NUMERIC_MIN_VALUE 8192
@@ -71,7 +71,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -285,7 +285,7 @@
+@@ -301,7 +301,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the statement cache for updates to non-transactional engines for the binary log. If you often use statements updating a great number of rows, you can increase this to get more performance.
NUMERIC_MIN_VALUE 4096
@@ -80,7 +80,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -299,7 +299,7 @@
+@@ -315,7 +315,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Size of tree cache used in bulk insert optimisation. Note that this is a limit per thread!
NUMERIC_MIN_VALUE 0
@@ -89,7 +89,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -562,7 +562,7 @@
+@@ -578,7 +578,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -98,7 +98,7 @@
VARIABLE_COMMENT The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'
NUMERIC_MIN_VALUE 2
NUMERIC_MAX_VALUE 31536000
-@@ -618,7 +618,7 @@
+@@ -648,7 +648,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 15
VARIABLE_SCOPE SESSION
@@ -107,7 +107,7 @@
VARIABLE_COMMENT Long search depth for the two-step deadlock detection
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 33
-@@ -632,7 +632,7 @@
+@@ -662,7 +662,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
@@ -116,7 +116,7 @@
VARIABLE_COMMENT Short search depth for the two-step deadlock detection
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 32
-@@ -646,7 +646,7 @@
+@@ -676,7 +676,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 50000000
VARIABLE_SCOPE SESSION
@@ -125,7 +125,7 @@
VARIABLE_COMMENT Long timeout for the two-step deadlock detection (in microseconds)
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -660,7 +660,7 @@
+@@ -690,7 +690,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10000
VARIABLE_SCOPE SESSION
@@ -134,7 +134,7 @@
VARIABLE_COMMENT Short timeout for the two-step deadlock detection (in microseconds)
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -730,7 +730,7 @@
+@@ -760,7 +760,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -143,7 +143,7 @@
VARIABLE_COMMENT The default week format used by WEEK() functions
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 7
-@@ -744,7 +744,7 @@
+@@ -774,7 +774,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -152,7 +152,7 @@
VARIABLE_COMMENT After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -758,7 +758,7 @@
+@@ -788,7 +788,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 300
VARIABLE_SCOPE GLOBAL
@@ -161,7 +161,7 @@
VARIABLE_COMMENT How long a INSERT DELAYED thread should wait for INSERT statements before terminating
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -772,7 +772,7 @@
+@@ -802,7 +802,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1000
VARIABLE_SCOPE GLOBAL
@@ -170,7 +170,7 @@
VARIABLE_COMMENT What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -800,7 +800,7 @@
+@@ -830,7 +830,7 @@
GLOBAL_VALUE_ORIGIN SQL
DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
@@ -179,7 +179,7 @@
VARIABLE_COMMENT Precision of the result of '/' operator will be increased on that value
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 38
-@@ -912,7 +912,7 @@
+@@ -956,7 +956,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -188,7 +188,7 @@
VARIABLE_COMMENT If non-zero, binary logs will be purged after expire_logs_days days; possible purges happen at startup and at binary log rotation
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 99
-@@ -954,7 +954,7 @@
+@@ -998,7 +998,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE GLOBAL
@@ -197,7 +197,7 @@
VARIABLE_COMMENT The number of connections on extra-port
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 100000
-@@ -996,7 +996,7 @@
+@@ -1040,7 +1040,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -206,7 +206,7 @@
VARIABLE_COMMENT A dedicated thread is created to flush all tables at the given interval
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
-@@ -1038,7 +1038,7 @@
+@@ -1082,7 +1082,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 84
VARIABLE_SCOPE GLOBAL
@@ -215,7 +215,7 @@
VARIABLE_COMMENT The maximum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 84
-@@ -1052,7 +1052,7 @@
+@@ -1096,7 +1096,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4
VARIABLE_SCOPE GLOBAL
@@ -224,7 +224,7 @@
VARIABLE_COMMENT The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 84
-@@ -1066,7 +1066,7 @@
+@@ -1110,7 +1110,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 20
VARIABLE_SCOPE GLOBAL
@@ -233,7 +233,7 @@
VARIABLE_COMMENT Number of best matches to use for query expansion
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1000
-@@ -1125,7 +1125,7 @@
+@@ -1169,7 +1169,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The maximum length of the result of function GROUP_CONCAT()
NUMERIC_MIN_VALUE 4
@@ -242,7 +242,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -1374,7 +1374,7 @@
+@@ -1432,7 +1432,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -251,7 +251,7 @@
VARIABLE_COMMENT Number of bytes used for a histogram. If set to 0, no histograms are created by ANALYZE.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 255
-@@ -1402,7 +1402,7 @@
+@@ -1460,7 +1460,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 128
VARIABLE_SCOPE GLOBAL
@@ -260,7 +260,7 @@
VARIABLE_COMMENT How many host names should be cached to avoid resolving.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 65536
-@@ -1556,7 +1556,7 @@
+@@ -1614,7 +1614,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 28800
VARIABLE_SCOPE SESSION
@@ -269,20 +269,7 @@
VARIABLE_COMMENT The number of seconds the server waits for activity on an interactive connection before closing it
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -1570,10 +1570,10 @@
- GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 1000
- VARIABLE_SCOPE SESSION
--VARIABLE_TYPE BIGINT UNSIGNED
-+VARIABLE_TYPE INT UNSIGNED
- VARIABLE_COMMENT The minimum number of scalar elements in the value list of IN predicate that triggers its conversion to IN subquery
- NUMERIC_MIN_VALUE 0
--NUMERIC_MAX_VALUE 18446744073709551615
-+NUMERIC_MAX_VALUE 4294967295
- NUMERIC_BLOCK_SIZE 1
- ENUM_VALUE_LIST NULL
- READ_ONLY NO
-@@ -1601,7 +1601,7 @@
+@@ -1645,7 +1645,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the buffer that is used for joins
NUMERIC_MIN_VALUE 128
@@ -291,7 +278,7 @@
NUMERIC_BLOCK_SIZE 128
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -1626,7 +1626,7 @@
+@@ -1670,7 +1670,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 2
VARIABLE_SCOPE SESSION
@@ -300,7 +287,7 @@
VARIABLE_COMMENT Controls what join operations can be executed with join buffers. Odd numbers are used for plain join buffers while even numbers are used for linked buffers
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 8
-@@ -1657,7 +1657,7 @@
+@@ -1701,7 +1701,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The size of the buffer used for index blocks for MyISAM tables. Increase this to get better index handling (for all reads and multiple writes) to as much as you can afford
NUMERIC_MIN_VALUE 0
@@ -309,7 +296,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -1878,7 +1878,7 @@
+@@ -1922,7 +1922,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 86400
VARIABLE_SCOPE SESSION
@@ -318,7 +305,7 @@
VARIABLE_COMMENT Timeout in seconds to wait for a lock before returning an error.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
-@@ -2088,7 +2088,7 @@
+@@ -2132,7 +2132,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -327,7 +314,7 @@
VARIABLE_COMMENT Write to slow log every #th slow query. Set to 1 to log everything. Increase it to reduce the size of the slow or the performance impact of slow logging
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2130,7 +2130,7 @@
+@@ -2174,7 +2174,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 2
VARIABLE_SCOPE SESSION
@@ -336,7 +323,7 @@
VARIABLE_COMMENT Log some not critical warnings to the general log file.Value can be between 0 and 11. Higher values mean more verbosity
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -2186,7 +2186,7 @@
+@@ -2230,7 +2230,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 16777216
VARIABLE_SCOPE SESSION
@@ -345,7 +332,7 @@
VARIABLE_COMMENT Max packet length to send to or receive from the server
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
-@@ -2196,14 +2196,14 @@
+@@ -2240,14 +2240,14 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_BINLOG_CACHE_SIZE
SESSION_VALUE NULL
@@ -363,7 +350,7 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2214,7 +2214,7 @@
+@@ -2258,7 +2258,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1073741824
VARIABLE_SCOPE GLOBAL
@@ -372,7 +359,7 @@
VARIABLE_COMMENT Binary log will be rotated automatically when the size exceeds this value.
NUMERIC_MIN_VALUE 4096
NUMERIC_MAX_VALUE 1073741824
-@@ -2224,14 +2224,14 @@
+@@ -2268,14 +2268,14 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_BINLOG_STMT_CACHE_SIZE
SESSION_VALUE NULL
@@ -390,16 +377,16 @@
NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2242,7 +2242,7 @@
+@@ -2286,7 +2286,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 151
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of simultaneous clients allowed
- NUMERIC_MIN_VALUE 1
+ NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 100000
-@@ -2256,7 +2256,7 @@
+@@ -2300,7 +2300,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -408,7 +395,7 @@
VARIABLE_COMMENT If there is more than this number of interrupted connections from a host this host will be blocked from further connections
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2270,7 +2270,7 @@
+@@ -2314,7 +2314,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 20
VARIABLE_SCOPE SESSION
@@ -417,7 +404,7 @@
VARIABLE_COMMENT Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero INSERT DELAYED will be not used
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
-@@ -2298,7 +2298,7 @@
+@@ -2342,7 +2342,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 64
VARIABLE_SCOPE SESSION
@@ -426,7 +413,7 @@
VARIABLE_COMMENT Max number of errors/warnings to store for a statement
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 65535
-@@ -2315,7 +2315,7 @@
+@@ -2359,7 +2359,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Don't allow creation of heap tables bigger than this
NUMERIC_MIN_VALUE 16384
@@ -435,7 +422,7 @@
NUMERIC_BLOCK_SIZE 1024
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2326,7 +2326,7 @@
+@@ -2370,7 +2370,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 20
VARIABLE_SCOPE SESSION
@@ -444,7 +431,7 @@
VARIABLE_COMMENT Don't start more than this number of threads to handle INSERT DELAYED statements. If set to zero INSERT DELAYED will be not used
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
-@@ -2354,7 +2354,7 @@
+@@ -2398,7 +2398,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE SESSION
@@ -453,7 +440,7 @@
VARIABLE_COMMENT Max number of bytes in sorted records
NUMERIC_MIN_VALUE 4
NUMERIC_MAX_VALUE 8388608
-@@ -2368,7 +2368,7 @@
+@@ -2412,7 +2412,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 1048576
VARIABLE_SCOPE GLOBAL
@@ -462,16 +449,7 @@
VARIABLE_COMMENT The maximum BLOB length to send to server from mysql_send_long_data API. Deprecated option; use max_allowed_packet instead.
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
-@@ -2382,7 +2382,7 @@
- GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 16382
- VARIABLE_SCOPE GLOBAL
--VARIABLE_TYPE BIGINT UNSIGNED
-+VARIABLE_TYPE INT UNSIGNED
- VARIABLE_COMMENT Maximum number of prepared statements in the server
- NUMERIC_MIN_VALUE 0
- NUMERIC_MAX_VALUE 1048576
-@@ -2396,7 +2396,7 @@
+@@ -2454,7 +2454,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4294967295
VARIABLE_SCOPE SESSION
@@ -480,7 +458,16 @@
VARIABLE_COMMENT Maximum number of iterations when executing recursive queries
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -2424,7 +2424,7 @@
+@@ -2485,7 +2485,7 @@
+ VARIABLE_TYPE BIGINT UNSIGNED
+ VARIABLE_COMMENT The maximum size of the container of a rowid filter
+ NUMERIC_MIN_VALUE 1024
+-NUMERIC_MAX_VALUE 18446744073709551615
++NUMERIC_MAX_VALUE 4294967295
+ NUMERIC_BLOCK_SIZE 1
+ ENUM_VALUE_LIST NULL
+ READ_ONLY NO
+@@ -2496,7 +2496,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4294967295
VARIABLE_SCOPE SESSION
@@ -489,7 +476,7 @@
VARIABLE_COMMENT Limit assumed max number of seeks when looking up rows based on a key
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2452,7 +2452,7 @@
+@@ -2524,7 +2524,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE SESSION
@@ -498,7 +485,7 @@
VARIABLE_COMMENT The number of bytes to use when sorting BLOB or TEXT values (only the first max_sort_length bytes of each value are used; the rest are ignored)
NUMERIC_MIN_VALUE 4
NUMERIC_MAX_VALUE 8388608
-@@ -2466,7 +2466,7 @@
+@@ -2538,7 +2538,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -507,7 +494,7 @@
VARIABLE_COMMENT Maximum stored procedure recursion depth
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 255
-@@ -2494,7 +2494,7 @@
+@@ -2566,7 +2566,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32
VARIABLE_SCOPE SESSION
@@ -516,7 +503,7 @@
VARIABLE_COMMENT Unused, will be removed.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2522,7 +2522,7 @@
+@@ -2594,7 +2594,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4294967295
VARIABLE_SCOPE GLOBAL
@@ -525,7 +512,7 @@
VARIABLE_COMMENT After this many write locks, allow some read locks to run in between
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2536,7 +2536,7 @@
+@@ -2608,7 +2608,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE GLOBAL
@@ -534,7 +521,7 @@
VARIABLE_COMMENT Unused
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 1048576
-@@ -2550,7 +2550,7 @@
+@@ -2622,7 +2622,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 8
VARIABLE_SCOPE GLOBAL
@@ -543,7 +530,7 @@
VARIABLE_COMMENT Unused
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 1024
-@@ -2564,7 +2564,7 @@
+@@ -2636,7 +2636,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE SESSION
@@ -552,7 +539,7 @@
VARIABLE_COMMENT Don't write queries to slow log that examine fewer rows than that
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -2578,7 +2578,7 @@
+@@ -2650,7 +2650,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 262144
VARIABLE_SCOPE SESSION
@@ -561,7 +548,7 @@
VARIABLE_COMMENT Size of buffer to use when using MRR with range access
NUMERIC_MIN_VALUE 8192
NUMERIC_MAX_VALUE 2147483647
-@@ -2592,10 +2592,10 @@
+@@ -2664,10 +2664,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 256
VARIABLE_SCOPE SESSION
@@ -574,7 +561,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2606,7 +2606,7 @@
+@@ -2678,7 +2678,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE GLOBAL
@@ -583,7 +570,7 @@
VARIABLE_COMMENT Block size to be used for MyISAM index pages
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 16384
-@@ -2620,7 +2620,7 @@
+@@ -2692,7 +2692,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 6
VARIABLE_SCOPE GLOBAL
@@ -592,7 +579,7 @@
VARIABLE_COMMENT Default pointer size to be used for MyISAM tables
NUMERIC_MIN_VALUE 2
NUMERIC_MAX_VALUE 7
-@@ -2630,9 +2630,9 @@
+@@ -2702,9 +2702,9 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MYISAM_MAX_SORT_FILE_SIZE
SESSION_VALUE NULL
@@ -604,7 +591,7 @@
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Don't use the fast sort index method to created index if the temporary file would get bigger than this
-@@ -2644,14 +2644,14 @@
+@@ -2716,14 +2716,14 @@
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MYISAM_MMAP_SIZE
SESSION_VALUE NULL
@@ -622,7 +609,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
-@@ -2676,10 +2676,10 @@
+@@ -2748,10 +2748,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -635,7 +622,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2693,7 +2693,7 @@
+@@ -2765,7 +2765,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The buffer that is allocated when sorting the index when doing a REPAIR or when creating indexes with CREATE INDEX or ALTER TABLE
NUMERIC_MIN_VALUE 4096
@@ -644,7 +631,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -2746,7 +2746,7 @@
+@@ -2818,7 +2818,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 16384
VARIABLE_SCOPE SESSION
@@ -653,7 +640,7 @@
VARIABLE_COMMENT Buffer length for TCP/IP and socket communication
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1048576
-@@ -2760,7 +2760,7 @@
+@@ -2832,7 +2832,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 30
VARIABLE_SCOPE SESSION
@@ -662,7 +649,7 @@
VARIABLE_COMMENT Number of seconds to wait for more data from a connection before aborting the read
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -2774,7 +2774,7 @@
+@@ -2846,7 +2846,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE SESSION
@@ -671,7 +658,7 @@
VARIABLE_COMMENT If a read on a communication port is interrupted, retry this many times before giving up
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -2788,7 +2788,7 @@
+@@ -2860,7 +2860,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 60
VARIABLE_SCOPE SESSION
@@ -680,7 +667,7 @@
VARIABLE_COMMENT Number of seconds to wait for a block to be written to a connection before aborting the write
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -2858,7 +2858,7 @@
+@@ -2930,7 +2930,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -689,7 +676,7 @@
VARIABLE_COMMENT Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1
-@@ -2872,7 +2872,7 @@
+@@ -2944,7 +2944,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 62
VARIABLE_SCOPE SESSION
@@ -698,7 +685,7 @@
VARIABLE_COMMENT Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Values smaller than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 62
-@@ -2886,7 +2886,7 @@
+@@ -2958,7 +2958,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE SESSION
@@ -707,16 +694,29 @@
VARIABLE_COMMENT Controls number of record samples to check condition selectivity
NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 4294967295
-@@ -2914,7 +2914,7 @@
+@@ -2986,10 +2986,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 1
+ DEFAULT_VALUE 1048576
+ VARIABLE_SCOPE SESSION
+-VARIABLE_TYPE BIGINT UNSIGNED
++VARIABLE_TYPE INT UNSIGNED
+ VARIABLE_COMMENT Maximum allowed size of an optimizer trace
+ NUMERIC_MIN_VALUE 0
+-NUMERIC_MAX_VALUE 18446744073709551615
++NUMERIC_MAX_VALUE 4294967295
+ NUMERIC_BLOCK_SIZE 1
+ ENUM_VALUE_LIST NULL
+ READ_ONLY NO
+@@ -2996,7 +2996,7 @@
+ GLOBAL_VALUE_ORIGIN COMPILE-TIME
+ DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial join when it searches for the best execution plan Meaning: 1 - use selectivity of index backed range conditions to calculate the cardinality of a partial join if the last joined table is accessed by full table scan or an index scan, 2 - use selectivity of index backed range conditions to calculate the cardinality of a partial join in any case, 3 - additionally always use selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join, 4 - use histograms to calculate selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join.5 - additionally use selectivity of certain non-range predicates calculated on record samples
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 5
-@@ -2942,7 +2942,7 @@
+@@ -3042,7 +3042,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -725,7 +725,7 @@
VARIABLE_COMMENT Maximum number of instrumented user@host accounts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2956,7 +2956,7 @@
+@@ -3056,7 +3056,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -734,7 +734,7 @@
VARIABLE_COMMENT Size of the statement digest. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 200
-@@ -2970,7 +2970,7 @@
+@@ -3070,7 +3070,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -743,7 +743,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STAGES_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -2984,7 +2984,7 @@
+@@ -3084,7 +3084,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -752,7 +752,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STAGES_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
-@@ -2998,7 +2998,7 @@
+@@ -3098,7 +3098,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -761,7 +761,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_STATEMENTS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3012,7 +3012,7 @@
+@@ -3112,7 +3112,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -770,7 +770,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_STATEMENTS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
-@@ -3026,7 +3026,7 @@
+@@ -3126,7 +3126,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -779,7 +779,7 @@
VARIABLE_COMMENT Number of rows in EVENTS_WAITS_HISTORY_LONG. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3040,7 +3040,7 @@
+@@ -3140,7 +3140,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -788,7 +788,7 @@
VARIABLE_COMMENT Number of rows per thread in EVENTS_WAITS_HISTORY. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1024
-@@ -3054,7 +3054,7 @@
+@@ -3154,7 +3154,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -797,7 +797,7 @@
VARIABLE_COMMENT Maximum number of instrumented hosts. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3068,7 +3068,7 @@
+@@ -3168,7 +3168,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 80
VARIABLE_SCOPE GLOBAL
@@ -806,7 +806,7 @@
VARIABLE_COMMENT Maximum number of condition instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3082,7 +3082,7 @@
+@@ -3182,7 +3182,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -815,7 +815,7 @@
VARIABLE_COMMENT Maximum number of instrumented condition objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3096,7 +3096,7 @@
+@@ -3196,7 +3196,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1024
VARIABLE_SCOPE GLOBAL
@@ -824,7 +824,7 @@
VARIABLE_COMMENT Maximum length considered for digest text, when stored in performance_schema tables.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
-@@ -3110,7 +3110,7 @@
+@@ -3210,7 +3210,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 50
VARIABLE_SCOPE GLOBAL
@@ -833,7 +833,7 @@
VARIABLE_COMMENT Maximum number of file instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3124,7 +3124,7 @@
+@@ -3196,7 +3196,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32768
VARIABLE_SCOPE GLOBAL
@@ -842,7 +842,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented files.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
-@@ -3138,7 +3138,7 @@
+@@ -3210,7 +3210,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -851,7 +851,7 @@
VARIABLE_COMMENT Maximum number of instrumented files. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3152,7 +3152,7 @@
+@@ -3252,7 +3252,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 200
VARIABLE_SCOPE GLOBAL
@@ -860,7 +860,7 @@
VARIABLE_COMMENT Maximum number of mutex instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3166,7 +3166,7 @@
+@@ -3266,7 +3266,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -869,7 +869,7 @@
VARIABLE_COMMENT Maximum number of instrumented MUTEX objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
-@@ -3180,7 +3180,7 @@
+@@ -3280,7 +3280,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 40
VARIABLE_SCOPE GLOBAL
@@ -878,7 +878,7 @@
VARIABLE_COMMENT Maximum number of rwlock instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3194,7 +3194,7 @@
+@@ -3294,7 +3294,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -887,7 +887,7 @@
VARIABLE_COMMENT Maximum number of instrumented RWLOCK objects. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 104857600
-@@ -3208,7 +3208,7 @@
+@@ -3308,7 +3308,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -896,7 +896,7 @@
VARIABLE_COMMENT Maximum number of socket instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3222,7 +3222,7 @@
+@@ -3322,7 +3322,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -905,7 +905,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented sockets. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3236,7 +3236,7 @@
+@@ -3336,7 +3336,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 160
VARIABLE_SCOPE GLOBAL
@@ -914,16 +914,16 @@
VARIABLE_COMMENT Maximum number of stage instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3250,7 +3250,7 @@
+@@ -3350,7 +3350,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
- DEFAULT_VALUE 191
+ DEFAULT_VALUE 202
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT Maximum number of statement instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3264,7 +3264,7 @@
+@@ -3364,7 +3364,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -932,7 +932,7 @@
VARIABLE_COMMENT Maximum number of opened instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3278,7 +3278,7 @@
+@@ -3378,7 +3378,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -941,7 +941,7 @@
VARIABLE_COMMENT Maximum number of instrumented tables. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3292,7 +3292,7 @@
+@@ -3392,7 +3392,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 50
VARIABLE_SCOPE GLOBAL
@@ -950,7 +950,7 @@
VARIABLE_COMMENT Maximum number of thread instruments.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 256
-@@ -3306,7 +3306,7 @@
+@@ -3406,7 +3406,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -959,7 +959,7 @@
VARIABLE_COMMENT Maximum number of instrumented threads. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3320,7 +3320,7 @@
+@@ -3420,7 +3420,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -968,7 +968,7 @@
VARIABLE_COMMENT Size of session attribute string buffer per thread. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3334,7 +3334,7 @@
+@@ -3434,7 +3434,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -977,7 +977,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_ACTORS.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1024
-@@ -3348,7 +3348,7 @@
+@@ -3448,7 +3448,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 100
VARIABLE_SCOPE GLOBAL
@@ -986,7 +986,7 @@
VARIABLE_COMMENT Maximum number of rows in SETUP_OBJECTS.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 1048576
-@@ -3362,7 +3362,7 @@
+@@ -3462,7 +3462,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE -1
VARIABLE_SCOPE GLOBAL
@@ -995,7 +995,7 @@
VARIABLE_COMMENT Maximum number of instrumented users. Use 0 to disable, -1 for automated sizing.
NUMERIC_MIN_VALUE -1
NUMERIC_MAX_VALUE 1048576
-@@ -3432,7 +3432,7 @@
+@@ -3518,7 +3518,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32768
VARIABLE_SCOPE SESSION
@@ -1004,7 +1004,7 @@
VARIABLE_COMMENT The size of the buffer that is allocated when preloading indexes
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
-@@ -3460,7 +3460,7 @@
+@@ -3546,7 +3546,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 15
VARIABLE_SCOPE SESSION
@@ -1013,7 +1013,7 @@
VARIABLE_COMMENT Number of statements about which profiling information is maintained. If set to 0, no profiles are stored. See SHOW PROFILES.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 100
-@@ -3474,7 +3474,7 @@
+@@ -3560,7 +3560,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 5
VARIABLE_SCOPE SESSION
@@ -1022,7 +1022,7 @@
VARIABLE_COMMENT Seconds between sending progress reports to the client for time-consuming statements. Set to 0 to disable progress reporting.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -3558,7 +3558,7 @@
+@@ -3644,7 +3644,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 16384
VARIABLE_SCOPE SESSION
@@ -1031,7 +1031,7 @@
VARIABLE_COMMENT Allocation block size for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
-@@ -3572,7 +3572,7 @@
+@@ -3658,7 +3658,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1048576
VARIABLE_SCOPE GLOBAL
@@ -1040,7 +1040,7 @@
VARIABLE_COMMENT Don't cache results that are bigger than this
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -3586,7 +3586,7 @@
+@@ -3672,7 +3672,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4096
VARIABLE_SCOPE GLOBAL
@@ -1049,7 +1049,7 @@
VARIABLE_COMMENT The minimum size for blocks allocated by the query cache
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -3603,7 +3603,7 @@
+@@ -3689,7 +3689,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT The memory allocated to store results from old queries
NUMERIC_MIN_VALUE 0
@@ -1058,7 +1058,7 @@
NUMERIC_BLOCK_SIZE 1024
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -3656,7 +3656,7 @@
+@@ -3742,7 +3742,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 24576
VARIABLE_SCOPE SESSION
@@ -1067,7 +1067,7 @@
VARIABLE_COMMENT Persistent buffer for query parsing and execution
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 4294967295
-@@ -3670,7 +3670,7 @@
+@@ -3756,7 +3756,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4096
VARIABLE_SCOPE SESSION
@@ -1076,7 +1076,7 @@
VARIABLE_COMMENT Allocation block size for storing ranges during optimization
NUMERIC_MIN_VALUE 4096
NUMERIC_MAX_VALUE 4294967295
-@@ -3687,7 +3687,7 @@
+@@ -3773,7 +3773,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Maximum speed(KB/s) to read binlog from master (0 = no limit)
NUMERIC_MIN_VALUE 0
@@ -1085,7 +1085,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -3698,7 +3698,7 @@
+@@ -3784,7 +3784,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 131072
VARIABLE_SCOPE SESSION
@@ -1094,7 +1094,7 @@
VARIABLE_COMMENT Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value
NUMERIC_MIN_VALUE 8192
NUMERIC_MAX_VALUE 2147483647
-@@ -3726,7 +3726,7 @@
+@@ -3812,7 +3812,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 262144
VARIABLE_SCOPE SESSION
@@ -1103,7 +1103,7 @@
VARIABLE_COMMENT When reading rows in sorted order after a sort, the rows are read through this buffer to avoid a disk seeks
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 2147483647
-@@ -4006,10 +4006,10 @@
+@@ -4092,10 +4092,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 8388608
VARIABLE_SCOPE SESSION
@@ -1116,7 +1116,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4034,10 +4034,10 @@
+@@ -4120,10 +4120,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10000
VARIABLE_SCOPE GLOBAL
@@ -1129,7 +1129,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4048,10 +4048,10 @@
+@@ -4134,10 +4134,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32
VARIABLE_SCOPE GLOBAL
@@ -1142,7 +1142,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4132,10 +4132,10 @@
+@@ -4218,10 +4218,10 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 32
VARIABLE_SCOPE GLOBAL
@@ -1155,7 +1155,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4174,7 +4174,7 @@
+@@ -4274,7 +4274,7 @@
GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 1
VARIABLE_SCOPE SESSION
@@ -1164,7 +1164,7 @@
VARIABLE_COMMENT Uniquely identifies the server instance in the community of replication partners
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 4294967295
-@@ -4356,7 +4356,7 @@
+@@ -4456,7 +4456,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -1173,7 +1173,7 @@
VARIABLE_COMMENT Maximum number of parallel threads to use on slave for events in a single replication domain. When using multiple domains, this can be used to limit a single domain from grabbing all threads and thus stalling other domains. The default of 0 means to allow a domain to grab as many threads as it wants, up to the value of slave_parallel_threads.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16383
-@@ -4398,7 +4398,7 @@
+@@ -4498,7 +4498,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 1073741824
VARIABLE_SCOPE GLOBAL
@@ -1182,7 +1182,7 @@
VARIABLE_COMMENT The maximum packet length to sent successfully from the master to slave.
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 1073741824
-@@ -4426,7 +4426,7 @@
+@@ -4526,7 +4526,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 131072
VARIABLE_SCOPE GLOBAL
@@ -1191,7 +1191,7 @@
VARIABLE_COMMENT Limit on how much memory SQL threads should use per parallel replication thread when reading ahead in the relay log looking for opportunities for parallel replication. Only used when --slave-parallel-threads > 0.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 2147483647
-@@ -4454,7 +4454,7 @@
+@@ -4554,7 +4554,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -1200,7 +1200,7 @@
VARIABLE_COMMENT If non-zero, number of threads to spawn to apply in parallel events on the slave that were group-committed on the master or were logged with GTID in different replication domains. Note that these threads are in addition to the IO and SQL threads, which are always created by a replication slave
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16383
-@@ -4468,7 +4468,7 @@
+@@ -4568,7 +4568,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -1209,7 +1209,7 @@
VARIABLE_COMMENT Alias for slave_parallel_threads
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16383
-@@ -4524,7 +4524,7 @@
+@@ -4624,7 +4624,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -1218,7 +1218,7 @@
VARIABLE_COMMENT Number of times the slave SQL thread will retry a transaction in case it failed with a deadlock, elapsed lock wait timeout or listed in slave_transaction_retry_errors, before giving up and stopping
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -4552,7 +4552,7 @@
+@@ -4652,7 +4652,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 0
VARIABLE_SCOPE GLOBAL
@@ -1227,7 +1227,7 @@
VARIABLE_COMMENT Interval of the slave SQL thread will retry a transaction in case it failed with a deadlock or elapsed lock wait timeout or listed in slave_transaction_retry_errors
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 3600
-@@ -4580,7 +4580,7 @@
+@@ -4680,7 +4680,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 2
VARIABLE_SCOPE GLOBAL
@@ -1236,7 +1236,7 @@
VARIABLE_COMMENT If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 31536000
-@@ -4639,7 +4639,7 @@
+@@ -4739,7 +4739,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Each thread that needs to do a sort allocates a buffer of this size
NUMERIC_MIN_VALUE 1024
@@ -1245,7 +1245,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -4944,7 +4944,7 @@
+@@ -5044,7 +5044,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 256
VARIABLE_SCOPE GLOBAL
@@ -1254,7 +1254,7 @@
VARIABLE_COMMENT The soft upper limit for number of cached stored routines for one connection.
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 524288
-@@ -5042,7 +5042,7 @@
+@@ -5170,7 +5170,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 400
VARIABLE_SCOPE GLOBAL
@@ -1262,17 +1262,17 @@
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of cached table definitions
NUMERIC_MIN_VALUE 400
- NUMERIC_MAX_VALUE 524288
-@@ -5056,7 +5056,7 @@
- GLOBAL_VALUE_ORIGIN COMPILE-TIME
+ NUMERIC_MAX_VALUE 2097152
+@@ -5184,7 +5184,7 @@
+ GLOBAL_VALUE_ORIGIN CONFIG
DEFAULT_VALUE 2000
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The number of cached open tables
- NUMERIC_MIN_VALUE 1
+ NUMERIC_MIN_VALUE 10
NUMERIC_MAX_VALUE 1048576
-@@ -5126,7 +5126,7 @@
+@@ -5268,7 +5268,7 @@
GLOBAL_VALUE_ORIGIN AUTO
DEFAULT_VALUE 256
VARIABLE_SCOPE GLOBAL
@@ -1281,7 +1281,7 @@
VARIABLE_COMMENT How many threads we should keep in a cache for reuse. These are freed after 5 minutes of idle time
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 16384
-@@ -5140,7 +5140,7 @@
+@@ -5282,7 +5282,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 10
VARIABLE_SCOPE GLOBAL
@@ -1290,7 +1290,7 @@
VARIABLE_COMMENT Permits the application to give the threads system a hint for the desired number of threads that should be run at the same time.This variable has no effect, and is deprecated. It will be removed in a future release.
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 512
-@@ -5345,15 +5345,15 @@
+@@ -5487,15 +5487,15 @@
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME TMP_DISK_TABLE_SIZE
@@ -1310,7 +1310,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -5367,7 +5367,7 @@
+@@ -5509,7 +5509,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table. Same as tmp_table_size.
NUMERIC_MIN_VALUE 1024
@@ -1319,7 +1319,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -5381,7 +5381,7 @@
+@@ -5523,7 +5523,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Alias for tmp_memory_table_size. If an internal in-memory temporary table exceeds this size, MariaDB will automatically convert it to an on-disk MyISAM or Aria table.
NUMERIC_MIN_VALUE 1024
@@ -1328,7 +1328,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -5392,7 +5392,7 @@
+@@ -5534,7 +5534,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 8192
VARIABLE_SCOPE SESSION
@@ -1337,7 +1337,7 @@
VARIABLE_COMMENT Allocation block size for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
-@@ -5406,7 +5406,7 @@
+@@ -5548,7 +5548,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 4096
VARIABLE_SCOPE SESSION
@@ -1346,7 +1346,7 @@
VARIABLE_COMMENT Persistent buffer for transactions to be stored in binary log
NUMERIC_MIN_VALUE 1024
NUMERIC_MAX_VALUE 134217728
-@@ -5504,7 +5504,7 @@
+@@ -5646,7 +5646,7 @@
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE 28800
VARIABLE_SCOPE SESSION
@@ -1355,7 +1355,7 @@
VARIABLE_COMMENT The number of seconds the server waits for activity on a connection before closing it
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 31536000
-@@ -5609,7 +5609,7 @@
+@@ -5751,7 +5751,7 @@
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME OPEN_FILES_LIMIT
VARIABLE_SCOPE GLOBAL
@@ -1364,7 +1364,7 @@
VARIABLE_COMMENT If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 or autoset then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of file descriptors
NUMERIC_MIN_VALUE 0
NUMERIC_MAX_VALUE 4294967295
-@@ -5622,7 +5622,7 @@
+@@ -5764,7 +5764,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@@ -1373,7 +1373,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -5632,7 +5632,7 @@
+@@ -5774,7 +5774,7 @@
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Sets the internal state of the RAND() generator for replication purposes
NUMERIC_MIN_VALUE 0
@@ -1382,7 +1382,7 @@
NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
-@@ -5727,7 +5727,7 @@
+@@ -5869,7 +5869,7 @@
VARIABLE_NAME LOG_TC_SIZE
GLOBAL_VALUE_ORIGIN AUTO
VARIABLE_SCOPE GLOBAL
diff --git a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
index ca875e7f644..6a1dacfe67b 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_server_notembedded.result
@@ -40,6 +40,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST DEFAULT,COPY,INPLACE,NOCOPY,INSTANT
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME ANALYZE_SAMPLE_PERCENTAGE
+SESSION_VALUE 100.000000
+GLOBAL_VALUE 100.000000
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 100.000000
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE DOUBLE
+VARIABLE_COMMENT Percentage of rows from the table ANALYZE TABLE will sample to collect table statistics. Set to 0 to let MariaDB decide what percentage of rows to sample.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 100
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME AUTOCOMMIT
SESSION_VALUE ON
GLOBAL_VALUE ON
@@ -712,6 +726,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NULL
+VARIABLE_NAME DEFAULT_PASSWORD_LIFETIME
+SESSION_VALUE NULL
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT This defines the global password expiration policy. 0 means automatic password expiration is disabled. If the value is a positive integer N, the passwords must be changed every N days. This behavior can be overriden using the password expiration options in ALTER USER.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 4294967295
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME DEFAULT_REGEX_FLAGS
SESSION_VALUE
GLOBAL_VALUE
@@ -824,6 +852,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON,ALL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME DISCONNECT_ON_EXPIRED_PASSWORD
+SESSION_VALUE NULL
+GLOBAL_VALUE OFF
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE OFF
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT This variable controls how the server handles clients that are not aware of the sandbox mode. If enabled, the server disconnects the client, otherwise the server puts the client in a sandbox mode.
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME DIV_PRECISION_INCREMENT
SESSION_VALUE 4
GLOBAL_VALUE 5
@@ -895,10 +937,10 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME EQ_RANGE_INDEX_DIVE_LIMIT
-SESSION_VALUE 0
-GLOBAL_VALUE 0
+SESSION_VALUE 200
+GLOBAL_VALUE 200
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 0
+DEFAULT_VALUE 200
VARIABLE_SCOPE SESSION
VARIABLE_TYPE INT UNSIGNED
VARIABLE_COMMENT The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.
@@ -1202,6 +1244,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NULL
+VARIABLE_NAME GTID_CLEANUP_BATCH_SIZE
+SESSION_VALUE NULL
+GLOBAL_VALUE 64
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 64
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT Normally does not need tuning. How many old rows must accumulate in the mysql.gtid_slave_pos table before a background job will be run to delete them. Can be increased to reduce number of commits if using many different engines with --gtid_pos_auto_engines, or to reduce CPU overhead if using a huge number of different gtid_domain_ids. Can be decreased to reduce number of old rows in the table.
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 2147483647
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME GTID_CURRENT_POS
SESSION_VALUE NULL
GLOBAL_VALUE
@@ -1413,10 +1469,10 @@ ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT NULL
VARIABLE_NAME HISTOGRAM_SIZE
-SESSION_VALUE 0
-GLOBAL_VALUE 0
+SESSION_VALUE 254
+GLOBAL_VALUE 254
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 0
+DEFAULT_VALUE 254
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Number of bytes used for a histogram. If set to 0, no histograms are created by ANALYZE.
@@ -1427,10 +1483,10 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME HISTOGRAM_TYPE
-SESSION_VALUE SINGLE_PREC_HB
-GLOBAL_VALUE SINGLE_PREC_HB
+SESSION_VALUE DOUBLE_PREC_HB
+GLOBAL_VALUE DOUBLE_PREC_HB
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE SINGLE_PREC_HB
+DEFAULT_VALUE DOUBLE_PREC_HB
VARIABLE_SCOPE SESSION
VARIABLE_TYPE ENUM
VARIABLE_COMMENT Specifies type of the histograms created by ANALYZE. Possible values are: SINGLE_PREC_HB - single precision height-balanced, DOUBLE_PREC_HB - double precision height-balanced.
@@ -2406,6 +2462,20 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME MAX_PASSWORD_ERRORS
+SESSION_VALUE NULL
+GLOBAL_VALUE 4294967295
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 4294967295
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT If there is more than this number of failed connect attempts due to invalid password, user will be blocked from further connections until FLUSH_PRIVILEGES.
+NUMERIC_MIN_VALUE 1
+NUMERIC_MAX_VALUE 4294967295
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_PREPARED_STMT_COUNT
SESSION_VALUE NULL
GLOBAL_VALUE 16382
@@ -2448,6 +2518,20 @@ NUMERIC_BLOCK_SIZE 4096
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME MAX_ROWID_FILTER_SIZE
+SESSION_VALUE 131072
+GLOBAL_VALUE 131072
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 131072
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_COMMENT The maximum size of the container of a rowid filter
+NUMERIC_MIN_VALUE 1024
+NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME MAX_SEEKS_FOR_KEY
SESSION_VALUE 4294967295
GLOBAL_VALUE 4294967295
@@ -2925,24 +3009,52 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_SWITCH
-SESSION_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
-GLOBAL_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+SESSION_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
+GLOBAL_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+DEFAULT_VALUE index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
VARIABLE_SCOPE SESSION
VARIABLE_TYPE FLAGSET
VARIABLE_COMMENT Fine-tune the optimizer behavior
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST index_merge,index_merge_union,index_merge_sort_union,index_merge_intersection,index_merge_sort_intersection,engine_condition_pushdown,index_condition_pushdown,derived_merge,derived_with_keys,firstmatch,loosescan,materialization,in_to_exists,semijoin,partial_match_rowid_merge,partial_match_table_scan,subquery_cache,mrr,mrr_cost_based,mrr_sort_keys,outer_join_with_cache,semijoin_with_cache,join_cache_incremental,join_cache_hashed,join_cache_bka,optimize_join_buffer_size,table_elimination,extended_keys,exists_to_in,orderby_uses_equalities,condition_pushdown_for_derived,split_materialized,default
+ENUM_VALUE_LIST index_merge,index_merge_union,index_merge_sort_union,index_merge_intersection,index_merge_sort_intersection,engine_condition_pushdown,index_condition_pushdown,derived_merge,derived_with_keys,firstmatch,loosescan,materialization,in_to_exists,semijoin,partial_match_rowid_merge,partial_match_table_scan,subquery_cache,mrr,mrr_cost_based,mrr_sort_keys,outer_join_with_cache,semijoin_with_cache,join_cache_incremental,join_cache_hashed,join_cache_bka,optimize_join_buffer_size,table_elimination,extended_keys,exists_to_in,orderby_uses_equalities,condition_pushdown_for_derived,split_materialized,condition_pushdown_for_subquery,rowid_filter,condition_pushdown_from_having,default
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_TRACE
+SESSION_VALUE enabled=off
+GLOBAL_VALUE enabled=off
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE enabled=off
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE FLAGSET
+VARIABLE_COMMENT Controls tracing of the Optimizer: optimizer_trace=option=val[,option=val...], where option is one of {enabled} and val is one of {on, off, default}
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST enabled,default
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME OPTIMIZER_TRACE_MAX_MEM_SIZE
+SESSION_VALUE 1048576
+GLOBAL_VALUE 1048576
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 1048576
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_COMMENT Maximum allowed size of an optimizer trace
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 18446744073709551615
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME OPTIMIZER_USE_CONDITION_SELECTIVITY
-SESSION_VALUE 1
-GLOBAL_VALUE 1
+SESSION_VALUE 4
+GLOBAL_VALUE 4
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 1
+DEFAULT_VALUE 4
VARIABLE_SCOPE SESSION
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Controls selectivity of which conditions the optimizer takes into account to calculate cardinality of a partial join when it searches for the best execution plan Meaning: 1 - use selectivity of index backed range conditions to calculate the cardinality of a partial join if the last joined table is accessed by full table scan or an index scan, 2 - use selectivity of index backed range conditions to calculate the cardinality of a partial join in any case, 3 - additionally always use selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join, 4 - use histograms to calculate selectivity of range conditions that are not backed by any index to calculate the cardinality of a partial join.5 - additionally use selectivity of certain non-range predicates calculated on record samples
@@ -3276,9 +3388,9 @@ READ_ONLY YES
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME PERFORMANCE_SCHEMA_MAX_STATEMENT_CLASSES
SESSION_VALUE NULL
-GLOBAL_VALUE 200
+GLOBAL_VALUE 202
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE 200
+DEFAULT_VALUE 202
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT Maximum number of statement instruments.
@@ -4564,12 +4676,12 @@ READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SLAVE_TRANSACTION_RETRY_ERRORS
SESSION_VALUE NULL
-GLOBAL_VALUE 1213,1205
+GLOBAL_VALUE 1158,1159,1160,1161,1205,1213,1429,2013,12701
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE VARCHAR
-VARIABLE_COMMENT Tells the slave thread to retry transaction for replication when a query event returns an error from the provided list. Deadlock and elapsed lock wait timeout errors are automatically added to this list
+VARIABLE_COMMENT Tells the slave thread to retry transaction for replication when a query event returns an error from the provided list. Deadlock error, elapsed lock wait timeout, net read error, net read timeout, net write error, net write timeout, connect error and 2 types of lost connection error are automatically added to this list
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
@@ -4755,7 +4867,7 @@ VARIABLE_COMMENT Sets the sql mode
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,IGNORE_BAD_TABLE_OPTIONS,ONLY_FULL_GROUP_BY,NO_UNSIGNED_SUBTRACTION,NO_DIR_IN_CREATE,POSTGRESQL,ORACLE,MSSQL,DB2,MAXDB,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,MYSQL323,MYSQL40,ANSI,NO_AUTO_VALUE_ON_ZERO,NO_BACKSLASH_ESCAPES,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ALLOW_INVALID_DATES,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER,HIGH_NOT_PRECEDENCE,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH,EMPTY_STRING_IS_NULL,SIMULTANEOUS_ASSIGNMENT
+ENUM_VALUE_LIST REAL_AS_FLOAT,PIPES_AS_CONCAT,ANSI_QUOTES,IGNORE_SPACE,IGNORE_BAD_TABLE_OPTIONS,ONLY_FULL_GROUP_BY,NO_UNSIGNED_SUBTRACTION,NO_DIR_IN_CREATE,POSTGRESQL,ORACLE,MSSQL,DB2,MAXDB,NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS,MYSQL323,MYSQL40,ANSI,NO_AUTO_VALUE_ON_ZERO,NO_BACKSLASH_ESCAPES,STRICT_TRANS_TABLES,STRICT_ALL_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ALLOW_INVALID_DATES,ERROR_FOR_DIVISION_BY_ZERO,TRADITIONAL,NO_AUTO_CREATE_USER,HIGH_NOT_PRECEDENCE,NO_ENGINE_SUBSTITUTION,PAD_CHAR_TO_FULL_LENGTH,EMPTY_STRING_IS_NULL,SIMULTANEOUS_ASSIGNMENT,TIME_ROUND_FRACTIONAL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME SQL_NOTES
@@ -5178,6 +5290,20 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME TCP_NODELAY
+SESSION_VALUE ON
+GLOBAL_VALUE ON
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE ON
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BOOLEAN
+VARIABLE_COMMENT Set option TCP_NODELAY (disable Nagle's algorithm) on socket
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST OFF,ON
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME THREAD_CACHE_SIZE
SESSION_VALUE NULL
GLOBAL_VALUE 151
@@ -5543,17 +5669,17 @@ ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
VARIABLE_NAME USE_STAT_TABLES
-SESSION_VALUE NEVER
-GLOBAL_VALUE NEVER
-GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE NEVER
+SESSION_VALUE PREFERABLY
+GLOBAL_VALUE PREFERABLY
+GLOBAL_VALUE_ORIGIN CONFIG
+DEFAULT_VALUE PREFERABLY_FOR_QUERIES
VARIABLE_SCOPE SESSION
VARIABLE_TYPE ENUM
VARIABLE_COMMENT Specifies how to use system statistics tables
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST NEVER,COMPLEMENTARY,PREFERABLY
+ENUM_VALUE_LIST NEVER,COMPLEMENTARY,PREFERABLY,COMPLEMENTARY_FOR_QUERIES,PREFERABLY_FOR_QUERIES
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WAIT_TIMEOUT
diff --git a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result
index e54afd2d64a..5ec5b9ccf30 100644
--- a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result
+++ b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result
@@ -129,18 +129,18 @@ READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WSREP_DEBUG
SESSION_VALUE NULL
-GLOBAL_VALUE OFF
+GLOBAL_VALUE NONE
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE OFF
+DEFAULT_VALUE NONE
VARIABLE_SCOPE GLOBAL
-VARIABLE_TYPE BOOLEAN
-VARIABLE_COMMENT To enable debug level logging
+VARIABLE_TYPE ENUM
+VARIABLE_COMMENT WSREP debug level logging
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
-ENUM_VALUE_LIST OFF,ON
+ENUM_VALUE_LIST NONE,SERVER,TRANSACTION,STREAMING,CLIENT
READ_ONLY NO
-COMMAND_LINE_ARGUMENT OPTIONAL
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WSREP_DESYNC
SESSION_VALUE NULL
GLOBAL_VALUE OFF
@@ -225,14 +225,28 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME WSREP_IGNORE_APPLY_ERRORS
+SESSION_VALUE NULL
+GLOBAL_VALUE 7
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 7
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE INT UNSIGNED
+VARIABLE_COMMENT Ignore replication errors
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 7
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WSREP_LOAD_DATA_SPLITTING
SESSION_VALUE NULL
-GLOBAL_VALUE ON
+GLOBAL_VALUE OFF
GLOBAL_VALUE_ORIGIN COMPILE-TIME
-DEFAULT_VALUE ON
+DEFAULT_VALUE OFF
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
-VARIABLE_COMMENT To commit LOAD DATA transaction after every 10K rows inserted
+VARIABLE_COMMENT To commit LOAD DATA transaction after every 10K rows inserted (deprecated)
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
@@ -533,6 +547,20 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME WSREP_SR_STORE
+SESSION_VALUE NULL
+GLOBAL_VALUE table
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE table
+VARIABLE_SCOPE GLOBAL
+VARIABLE_TYPE ENUM
+VARIABLE_COMMENT Storage for streaming replication fragments
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST none,table
+READ_ONLY YES
+COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME WSREP_SST_AUTH
SESSION_VALUE NULL
GLOBAL_VALUE
@@ -631,3 +659,31 @@ NUMERIC_BLOCK_SIZE 1
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT OPTIONAL
+VARIABLE_NAME WSREP_TRX_FRAGMENT_SIZE
+SESSION_VALUE 0
+GLOBAL_VALUE 0
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE 0
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE BIGINT UNSIGNED
+VARIABLE_COMMENT Size of transaction fragments for streaming replication (measured in units of 'wsrep_trx_fragment_unit')
+NUMERIC_MIN_VALUE 0
+NUMERIC_MAX_VALUE 2147483647
+NUMERIC_BLOCK_SIZE 1
+ENUM_VALUE_LIST NULL
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
+VARIABLE_NAME WSREP_TRX_FRAGMENT_UNIT
+SESSION_VALUE bytes
+GLOBAL_VALUE bytes
+GLOBAL_VALUE_ORIGIN COMPILE-TIME
+DEFAULT_VALUE bytes
+VARIABLE_SCOPE SESSION
+VARIABLE_TYPE ENUM
+VARIABLE_COMMENT Unit for streaming replication transaction fragments' size: bytes, rows, statements
+NUMERIC_MIN_VALUE NULL
+NUMERIC_MAX_VALUE NULL
+NUMERIC_BLOCK_SIZE NULL
+ENUM_VALUE_LIST bytes,rows,statements
+READ_ONLY NO
+COMMAND_LINE_ARGUMENT REQUIRED
diff --git a/mysql-test/suite/sys_vars/r/tcp_nodelay.result b/mysql-test/suite/sys_vars/r/tcp_nodelay.result
new file mode 100644
index 00000000000..ebb027e8cf6
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/tcp_nodelay.result
@@ -0,0 +1,5 @@
+SET tcp_nodelay=0;
+SET tcp_nodelay=1;
+SET tcp_nodelay=default;
+SET GLOBAL tcp_nodelay=0;
+ERROR HY000: Variable 'tcp_nodelay' is a SESSION variable
diff --git a/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result b/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result
index 64f6d868fa6..1de75b65caf 100644
--- a/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result
+++ b/mysql-test/suite/sys_vars/r/use_stat_tables_basic.result
@@ -1,16 +1,16 @@
SET @start_global_value = @@global.use_stat_tables;
SELECT @start_global_value;
@start_global_value
-NEVER
+PREFERABLY
SET @start_session_value = @@session.use_stat_tables;
SELECT @start_session_value;
@start_session_value
-NEVER
+PREFERABLY
SET @@global.use_stat_tables = 2;
SET @@global.use_stat_tables = DEFAULT;
SELECT @@global.use_stat_tables;
@@global.use_stat_tables
-NEVER
+PREFERABLY_FOR_QUERIES
SET @@global.use_stat_tables = 0;
SELECT @@global.use_stat_tables;
@@global.use_stat_tables
@@ -87,9 +87,9 @@ USE_STAT_TABLES COMPLEMENTARY
SET @@global.use_stat_tables = @start_global_value;
SELECT @@global.use_stat_tables;
@@global.use_stat_tables
-NEVER
+PREFERABLY
SET @@session.use_stat_tables = @start_session_value;
SELECT @@session.use_stat_tables;
@@session.use_stat_tables
-NEVER
+PREFERABLY
set sql_mode='';
diff --git a/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result b/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result
index 96c262c110c..47d00f5dede 100644
--- a/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result
+++ b/mysql-test/suite/sys_vars/r/wsrep_debug_basic.result
@@ -6,39 +6,41 @@ SET @wsrep_debug_global_saved = @@global.wsrep_debug;
# default
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-0
+NONE
# scope
SELECT @@session.wsrep_debug;
ERROR HY000: Variable 'wsrep_debug' is a GLOBAL variable
-SET @@global.wsrep_debug=OFF;
+SET @@global.wsrep_debug=0;
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-0
-SET @@global.wsrep_debug=ON;
+NONE
+SET @@global.wsrep_debug=1;
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-1
+SERVER
# valid values
-SET @@global.wsrep_debug='OFF';
+SET @@global.wsrep_debug=NONE;
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-0
-SET @@global.wsrep_debug=ON;
+NONE
+SET @@global.wsrep_debug=SERVER;
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-1
+SERVER
SET @@global.wsrep_debug=default;
SELECT @@global.wsrep_debug;
@@global.wsrep_debug
-0
+NONE
# invalid values
SET @@global.wsrep_debug=NULL;
ERROR 42000: Variable 'wsrep_debug' can't be set to the value of 'NULL'
SET @@global.wsrep_debug='junk';
ERROR 42000: Variable 'wsrep_debug' can't be set to the value of 'junk'
+SET @@global.wsrep_debug=ON;
+ERROR 42000: Variable 'wsrep_debug' can't be set to the value of 'ON'
# restore the initial value
SET @@global.wsrep_debug = @wsrep_debug_global_saved;
diff --git a/mysql-test/suite/sys_vars/r/wsrep_load_data_splitting_basic.result b/mysql-test/suite/sys_vars/r/wsrep_load_data_splitting_basic.result
index 687934a7705..3171a690486 100644
--- a/mysql-test/suite/sys_vars/r/wsrep_load_data_splitting_basic.result
+++ b/mysql-test/suite/sys_vars/r/wsrep_load_data_splitting_basic.result
@@ -6,33 +6,43 @@ SET @wsrep_load_data_splitting_global_saved = @@global.wsrep_load_data_splitting
# default
SELECT @@global.wsrep_load_data_splitting;
@@global.wsrep_load_data_splitting
-1
+0
# scope
SELECT @@session.wsrep_load_data_splitting;
ERROR HY000: Variable 'wsrep_load_data_splitting' is a GLOBAL variable
SET @@global.wsrep_load_data_splitting=OFF;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SELECT @@global.wsrep_load_data_splitting;
@@global.wsrep_load_data_splitting
0
SET @@global.wsrep_load_data_splitting=ON;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SELECT @@global.wsrep_load_data_splitting;
@@global.wsrep_load_data_splitting
1
# valid values
SET @@global.wsrep_load_data_splitting='OFF';
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SELECT @@global.wsrep_load_data_splitting;
@@global.wsrep_load_data_splitting
0
SET @@global.wsrep_load_data_splitting=ON;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SELECT @@global.wsrep_load_data_splitting;
@@global.wsrep_load_data_splitting
1
SET @@global.wsrep_load_data_splitting=default;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SELECT @@global.wsrep_load_data_splitting;
@@global.wsrep_load_data_splitting
-1
+0
# invalid values
SET @@global.wsrep_load_data_splitting=NULL;
@@ -42,4 +52,6 @@ ERROR 42000: Variable 'wsrep_load_data_splitting' can't be set to the value of '
# restore the initial value
SET @@global.wsrep_load_data_splitting = @wsrep_load_data_splitting_global_saved;
+Warnings:
+Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
# End of test
diff --git a/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result b/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result
index b2e07c55b38..15949a14e39 100644
--- a/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result
+++ b/mysql-test/suite/sys_vars/r/wsrep_provider_options_basic.result
@@ -2,9 +2,6 @@
# wsrep_provider_options
#
call mtr.add_suppression("WSREP: Failed to get provider options");
-SET @@global.wsrep_provider = @@global.wsrep_provider;
-# save the initial value
-SET @wsrep_provider_options_global_saved = @@global.wsrep_provider_options;
# default
SELECT @@global.wsrep_provider_options;
@@global.wsrep_provider_options
@@ -16,18 +13,21 @@ ERROR HY000: Variable 'wsrep_provider_options' is a GLOBAL variable
SET @@global.wsrep_provider_options='option1';
SELECT @@global.wsrep_provider_options;
@@global.wsrep_provider_options
-option1
+
# valid values
SET @@global.wsrep_provider_options='name1=value1;name2=value2';
+ERROR HY000: WSREP (galera) not started
SELECT @@global.wsrep_provider_options;
@@global.wsrep_provider_options
-name1=value1;name2=value2
+
SET @@global.wsrep_provider_options='hyphenated-name:value';
+ERROR HY000: WSREP (galera) not started
SELECT @@global.wsrep_provider_options;
@@global.wsrep_provider_options
-hyphenated-name:value
+
SET @@global.wsrep_provider_options=default;
+ERROR HY000: WSREP (galera) not started
SELECT @@global.wsrep_provider_options;
@@global.wsrep_provider_options
@@ -42,8 +42,5 @@ SET @@global.wsrep_provider_options=NULL;
Got one of the listed errors
SELECT @@global.wsrep_provider_options;
@@global.wsrep_provider_options
-NULL
-# restore the initial value
-SET @@global.wsrep_provider_options = @wsrep_provider_options_global_saved;
# End of test
diff --git a/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test b/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test
index bb0f3417f87..947007a5dd1 100644
--- a/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_checksum_algorithm_basic.test
@@ -22,6 +22,12 @@ SELECT @@global.innodb_checksum_algorithm;
SET GLOBAL innodb_checksum_algorithm = 'strict_none';
SELECT @@global.innodb_checksum_algorithm;
+SET GLOBAL innodb_checksum_algorithm = 'full_crc32';
+SELECT @@global.innodb_checksum_algorithm;
+
+SET GLOBAL innodb_checksum_algorithm = 'strict_full_crc32';
+SELECT @@global.innodb_checksum_algorithm;
+
-- error ER_WRONG_VALUE_FOR_VAR
SET GLOBAL innodb_checksum_algorithm = '';
SELECT @@global.innodb_checksum_algorithm;
diff --git a/mysql-test/suite/sys_vars/t/myisam_stats_method_func.test b/mysql-test/suite/sys_vars/t/myisam_stats_method_func.test
index 42335f00c41..5cb01958aff 100644
--- a/mysql-test/suite/sys_vars/t/myisam_stats_method_func.test
+++ b/mysql-test/suite/sys_vars/t/myisam_stats_method_func.test
@@ -65,6 +65,8 @@ SHOW INDEX FROM t1;
--echo 'Set nulls to be equal'
#=====================================
SET myisam_stats_method = nulls_equal;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
INSERT INTO t1 VALUES (11);
DELETE FROM t1 WHERE a = 11;
@@ -75,6 +77,7 @@ DELETE FROM t1 WHERE a = 11;
CHECK TABLE t1;
SHOW INDEX FROM t1;
+
#=====================================
--echo 'Set nulls to be ignored'
#=====================================
@@ -98,6 +101,7 @@ ANALYZE TABLE t1;
SHOW INDEX FROM t1;
SET myisam_stats_method = DEFAULT;
+set @@use_stat_tables= @save_use_stat_tables;
DROP TABLE t1;
SET @@global.myisam_stats_method= @start_value;
diff --git a/mysql-test/suite/sys_vars/t/shared_memory_base_name_basic.test b/mysql-test/suite/sys_vars/t/shared_memory_base_name_basic.test
deleted file mode 100644
index da165564791..00000000000
--- a/mysql-test/suite/sys_vars/t/shared_memory_base_name_basic.test
+++ /dev/null
@@ -1,25 +0,0 @@
---source include/windows.inc
---source include/not_embedded.inc
-#
-# only global
-#
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-select @@global.shared_memory_base_name;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-select @@session.shared_memory_base_name;
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-show global variables like 'shared_memory_base_name';
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-show session variables like 'shared_memory_base_name';
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-select * from information_schema.global_variables where variable_name='shared_memory_base_name';
---replace_result $MYSQL_TMP_DIR MYSQL_TMP_DIR
-select * from information_schema.session_variables where variable_name='shared_memory_base_name';
-
-#
-# show that it's read-only
-#
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-set global shared_memory_base_name=1;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-set session shared_memory_base_name=1;
diff --git a/mysql-test/suite/sys_vars/t/shared_memory_basic.test b/mysql-test/suite/sys_vars/t/shared_memory_basic.test
deleted file mode 100644
index 57be4ef7ecd..00000000000
--- a/mysql-test/suite/sys_vars/t/shared_memory_basic.test
+++ /dev/null
@@ -1,20 +0,0 @@
---source include/windows.inc
---source include/not_embedded.inc
-#
-# only global
-#
-select @@global.shared_memory;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-select @@session.shared_memory;
-show global variables like 'shared_memory';
-show session variables like 'shared_memory';
-select * from information_schema.global_variables where variable_name='shared_memory';
-select * from information_schema.session_variables where variable_name='shared_memory';
-
-#
-# show that it's read-only
-#
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-set global shared_memory=1;
---error ER_INCORRECT_GLOBAL_LOCAL_VAR
-set session shared_memory=1;
diff --git a/mysql-test/suite/sys_vars/t/sql_mode_basic.test b/mysql-test/suite/sys_vars/t/sql_mode_basic.test
index b4841ecd3ff..68be8ba969b 100644
--- a/mysql-test/suite/sys_vars/t/sql_mode_basic.test
+++ b/mysql-test/suite/sys_vars/t/sql_mode_basic.test
@@ -310,8 +310,14 @@ SELECT @@global.sql_mode;
SET @@global.sql_mode = 17179869183;
SELECT @@global.sql_mode;
---Error ER_WRONG_VALUE_FOR_VAR
SET @@global.sql_mode = 17179869184;
+SELECT @@global.sql_mode;
+
+SET @@global.sql_mode = 34359738367;
+SELECT @@global.sql_mode;
+
+--Error ER_WRONG_VALUE_FOR_VAR
+SET @@global.sql_mode = 34359738368;
# use of decimal values
diff --git a/mysql-test/suite/sys_vars/t/tcp_nodelay.test b/mysql-test/suite/sys_vars/t/tcp_nodelay.test
new file mode 100644
index 00000000000..780bad68039
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/tcp_nodelay.test
@@ -0,0 +1,7 @@
+--source include/not_embedded.inc
+# A smoke test for tcp_nodelay option
+SET tcp_nodelay=0;
+SET tcp_nodelay=1;
+SET tcp_nodelay=default;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET GLOBAL tcp_nodelay=0;
diff --git a/mysql-test/suite/sys_vars/t/wsrep_debug_basic.test b/mysql-test/suite/sys_vars/t/wsrep_debug_basic.test
index 50576ff064e..165560bb25a 100644
--- a/mysql-test/suite/sys_vars/t/wsrep_debug_basic.test
+++ b/mysql-test/suite/sys_vars/t/wsrep_debug_basic.test
@@ -14,16 +14,16 @@ SELECT @@global.wsrep_debug;
--echo # scope
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
SELECT @@session.wsrep_debug;
-SET @@global.wsrep_debug=OFF;
+SET @@global.wsrep_debug=0;
SELECT @@global.wsrep_debug;
-SET @@global.wsrep_debug=ON;
+SET @@global.wsrep_debug=1;
SELECT @@global.wsrep_debug;
--echo
--echo # valid values
-SET @@global.wsrep_debug='OFF';
+SET @@global.wsrep_debug=NONE;
SELECT @@global.wsrep_debug;
-SET @@global.wsrep_debug=ON;
+SET @@global.wsrep_debug=SERVER;
SELECT @@global.wsrep_debug;
SET @@global.wsrep_debug=default;
SELECT @@global.wsrep_debug;
@@ -34,6 +34,8 @@ SELECT @@global.wsrep_debug;
SET @@global.wsrep_debug=NULL;
--error ER_WRONG_VALUE_FOR_VAR
SET @@global.wsrep_debug='junk';
+--error ER_WRONG_VALUE_FOR_VAR
+SET @@global.wsrep_debug=ON;
--echo
--echo # restore the initial value
diff --git a/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test b/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test
index d2ea32a0637..6eb3a94b6a4 100644
--- a/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test
+++ b/mysql-test/suite/sys_vars/t/wsrep_provider_options_basic.test
@@ -6,11 +6,6 @@
call mtr.add_suppression("WSREP: Failed to get provider options");
-SET @@global.wsrep_provider = @@global.wsrep_provider;
-
---echo # save the initial value
-SET @wsrep_provider_options_global_saved = @@global.wsrep_provider_options;
-
--echo # default
SELECT @@global.wsrep_provider_options;
@@ -24,13 +19,13 @@ SELECT @@global.wsrep_provider_options;
--echo
--echo # valid values
---error 0,ER_WRONG_ARGUMENTS
+--error ER_WRONG_ARGUMENTS
SET @@global.wsrep_provider_options='name1=value1;name2=value2';
SELECT @@global.wsrep_provider_options;
---error 0,ER_WRONG_ARGUMENTS
+--error ER_WRONG_ARGUMENTS
SET @@global.wsrep_provider_options='hyphenated-name:value';
SELECT @@global.wsrep_provider_options;
---error 0,ER_WRONG_ARGUMENTS
+--error ER_WRONG_ARGUMENTS
SET @@global.wsrep_provider_options=default;
SELECT @@global.wsrep_provider_options;
@@ -43,9 +38,4 @@ SELECT @@global.wsrep_provider_options;
SET @@global.wsrep_provider_options=NULL;
SELECT @@global.wsrep_provider_options;
---echo
---echo # restore the initial value
---error 0,ER_WRONG_ARGUMENTS
-SET @@global.wsrep_provider_options = @wsrep_provider_options_global_saved;
-
--echo # End of test
diff --git a/mysql-test/suite/unit/suite.pm b/mysql-test/suite/unit/suite.pm
index c8180c59240..b7a1f9ae871 100644
--- a/mysql-test/suite/unit/suite.pm
+++ b/mysql-test/suite/unit/suite.pm
@@ -44,6 +44,8 @@ sub start_test {
my (@ctest_list)= `cd "$bin" && ctest $ctest_vs --show-only --verbose`;
return "No ctest" if $?;
+ $ENV{MYSQL_TEST_PLUGINDIR}=$::plugindir;
+
my ($command, %tests, $prefix);
for (@ctest_list) {
chomp;
diff --git a/mysql-test/suite/vcol/inc/vcol_select.inc b/mysql-test/suite/vcol/inc/vcol_select.inc
index 0641e14564a..cbd1f2cdd26 100644
--- a/mysql-test/suite/vcol/inc/vcol_select.inc
+++ b/mysql-test/suite/vcol/inc/vcol_select.inc
@@ -35,7 +35,7 @@ insert into t2 (a) values (1);
create table t3 (a int primary key,
b int as (-a),
c int as (-a) persistent unique);
-insert into t3 (a) values (2),(1),(3);
+insert into t3 (a) values (2),(1),(3),(5),(4),(7);
--echo # select_type=SIMPLE, type=system
diff --git a/mysql-test/suite/vcol/r/update.result b/mysql-test/suite/vcol/r/update.result
index 2576859b00b..5a1caaeb5eb 100644
--- a/mysql-test/suite/vcol/r/update.result
+++ b/mysql-test/suite/vcol/r/update.result
@@ -122,7 +122,7 @@ select * from t;
a b c d e
10 5 5 5 5
replace delayed t (a,b,d) values (10,6,6);
-flush tables;
+flush tables t;
check table t;
Table Op Msg_type Msg_text
test.t check status OK
@@ -130,7 +130,7 @@ select * from t;
a b c d e
10 6 6 6 6
insert delayed t(a,b,d) values (10,6,6) on duplicate key update b=7, d=7;
-flush tables;
+flush tables t;
check table t;
Table Op Msg_type Msg_text
test.t check status OK
diff --git a/mysql-test/suite/vcol/r/update_binlog.result b/mysql-test/suite/vcol/r/update_binlog.result
index 9a22005f062..828452bf084 100644
--- a/mysql-test/suite/vcol/r/update_binlog.result
+++ b/mysql-test/suite/vcol/r/update_binlog.result
@@ -124,7 +124,7 @@ select * from t;
a b c d e
10 5 5 5 5
replace delayed t (a,b,d) values (10,6,6);
-flush tables;
+flush tables t;
check table t;
Table Op Msg_type Msg_text
test.t check status OK
@@ -132,7 +132,7 @@ select * from t;
a b c d e
10 6 6 6 6
insert delayed t(a,b,d) values (10,6,6) on duplicate key update b=7, d=7;
-flush tables;
+flush tables t;
check table t;
Table Op Msg_type Msg_text
test.t check status OK
@@ -304,7 +304,7 @@ select * from t;
a b c d e
10 5 5 5 5
replace delayed t (a,b,d) values (10,6,6);
-flush tables;
+flush tables t;
check table t;
Table Op Msg_type Msg_text
test.t check status OK
@@ -312,7 +312,7 @@ select * from t;
a b c d e
10 6 6 6 6
insert delayed t(a,b,d) values (10,6,6) on duplicate key update b=7, d=7;
-flush tables;
+flush tables t;
check table t;
Table Op Msg_type Msg_text
test.t check status OK
diff --git a/mysql-test/suite/vcol/r/vcol_keys_myisam.result b/mysql-test/suite/vcol/r/vcol_keys_myisam.result
index b7086600ab1..9400127211c 100644
--- a/mysql-test/suite/vcol/r/vcol_keys_myisam.result
+++ b/mysql-test/suite/vcol/r/vcol_keys_myisam.result
@@ -277,6 +277,7 @@ a b c d
delete from t1 where b=12;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
show keys from t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -389,7 +390,7 @@ drop table t1;
CREATE TABLE t1 (i INT, d1 DATE, d2 DATE NOT NULL, t TIMESTAMP, KEY(t)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,'2023-03-16','2023-03-15','2012-12-12 12:12:12');
ALTER TABLE t1 MODIFY t FLOAT AS (i) PERSISTENT;
-SELECT i, d1, d2 FROM t1 INTO OUTFILE 'load_t1';
+SELECT i, d1, d2 INTO OUTFILE 'load_t1' FROM t1;
DELETE FROM t1;
LOAD DATA INFILE 'load_t1' INTO TABLE t1 (i,d1,d2);
SELECT * FROM t1 WHERE d2 < d1;
diff --git a/mysql-test/suite/vcol/r/vcol_misc.result b/mysql-test/suite/vcol/r/vcol_misc.result
index ef2e948a004..c3321872a45 100644
--- a/mysql-test/suite/vcol/r/vcol_misc.result
+++ b/mysql-test/suite/vcol/r/vcol_misc.result
@@ -182,16 +182,36 @@ a b c
2 3 y
0 1 y,n
drop table t1,t2;
-CREATE TABLE t1 (
+SET @old_debug= @@global.debug;
+SET @old_debug= @@global.debug;
+SET GLOBAL debug_dbug= "+d,write_delay_wakeup";
+CREATE TABLE t1 (a int,
ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
tsv TIMESTAMP AS (ADDDATE(ts, INTERVAL 1 DAY)) VIRTUAL
) ENGINE=MyISAM;
-INSERT INTO t1 (tsv) VALUES (DEFAULT);
-INSERT DELAYED INTO t1 (tsv) VALUES (DEFAULT);
+# First test FLUSH TABLES
+INSERT INTO t1 (a,tsv) VALUES (1,DEFAULT);
+INSERT DELAYED INTO t1 (a,tsv) VALUES (2,DEFAULT);
FLUSH TABLES;
+SELECT COUNT(*) > 0 FROM t1;
+COUNT(*) > 0
+1
+# Then test FLUSH TABLES t1;
+INSERT INTO t1 (a,tsv) VALUES (3,DEFAULT);
+INSERT DELAYED INTO t1 (a,tsv) VALUES (4,DEFAULT);
+FLUSH TABLES t1;
+SELECT COUNT(*) FROM t1;
+COUNT(*)
+4
+# Then test FLUSH TABLES WITH READ LOCK;
+INSERT INTO t1 (a,tsv) VALUES (5,DEFAULT);
+INSERT DELAYED INTO t1 (a,tsv) VALUES (6,DEFAULT);
+FLUSH TABLES WITH READ LOCK;
SELECT COUNT(*) FROM t1;
COUNT(*)
-2
+6
+set GLOBAL debug_dbug= @old_debug;
+unlock tables;
DROP TABLE t1;
#
# MDEV-4823 Server crashes in Item_func_not::fix_fields on
diff --git a/mysql-test/suite/vcol/r/vcol_select_innodb.result b/mysql-test/suite/vcol/r/vcol_select_innodb.result
index 63c35bade07..6ebdd87029b 100644
--- a/mysql-test/suite/vcol/r/vcol_select_innodb.result
+++ b/mysql-test/suite/vcol/r/vcol_select_innodb.result
@@ -9,7 +9,7 @@ insert into t2 (a) values (1);
create table t3 (a int primary key,
b int as (-a),
c int as (-a) persistent unique);
-insert into t3 (a) values (2),(1),(3);
+insert into t3 (a) values (2),(1),(3),(5),(4),(7);
# select_type=SIMPLE, type=system
select * from t2;
a b c
@@ -63,8 +63,8 @@ a b c
3 -3 -3
explain select * from t1 where b in (select c from t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t3 ref c c 5 test.t1.b 1 Using index
# select_type=PRIMARY, type=range,ref
select * from t1 where c in (select c from t3 where c between -2 and -1);
a b c
@@ -73,7 +73,7 @@ a b c
1 -1 -1
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using where; Using index
+1 PRIMARY t3 index c c 5 NULL 6 Using where; Using index
1 PRIMARY t1 ALL c NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
# select_type=UNION, type=system
# select_type=UNION RESULT, type=<union1,2>
@@ -160,7 +160,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where
# SELECT * FROM tbl_name WHERE <indexed vcol expr>
select * from t3 where c between -2 and -1;
a b c
@@ -192,7 +192,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 index NULL PRIMARY 4 NULL 3 Using where
+1 SIMPLE t3 index NULL PRIMARY 4 NULL 6 Using where
# SELECT * FROM tbl_name WHERE <non-indexed vcol expr> ORDER BY <non-indexed vcol>
select * from t3 where b between -2 and -1 order by b;
a b c
@@ -200,7 +200,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed vcol expr> ORDER BY <non-indexed vcol>
select * from t3 where c between -2 and -1 order by b;
a b c
@@ -216,7 +216,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed vcol expr> ORDER BY <indexed vcol>
select * from t3 where c between -2 and -1 order by c;
a b c
diff --git a/mysql-test/suite/vcol/r/vcol_select_myisam.result b/mysql-test/suite/vcol/r/vcol_select_myisam.result
index d6a8babc045..5e79a12d17e 100644
--- a/mysql-test/suite/vcol/r/vcol_select_myisam.result
+++ b/mysql-test/suite/vcol/r/vcol_select_myisam.result
@@ -9,7 +9,7 @@ insert into t2 (a) values (1);
create table t3 (a int primary key,
b int as (-a),
c int as (-a) persistent unique);
-insert into t3 (a) values (2),(1),(3);
+insert into t3 (a) values (2),(1),(3),(5),(4),(7);
# select_type=SIMPLE, type=system
select * from t2;
a b c
@@ -44,7 +44,7 @@ a b c
1 -1 -1
explain select * from t3 where c>=-1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 2 Using index condition
+1 SIMPLE t3 range c c 5 NULL 1 Using index condition
# select_type=SIMPLE, type=ref
select * from t1,t3 where t1.c=t3.c and t3.c=-1;
a b c a b c
@@ -63,8 +63,8 @@ a b c
3 -3 -3
explain select * from t1 where b in (select c from t3);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using index
-1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t1 ALL NULL NULL NULL NULL 5 Using where
+1 PRIMARY t3 ref c c 5 test.t1.b 2 Using index
# select_type=PRIMARY, type=range,ref
select * from t1 where c in (select c from t3 where c between -2 and -1);
a b c
@@ -73,8 +73,8 @@ a b c
1 -1 -1
explain select * from t1 where c in (select c from t3 where c between -2 and -1);
id select_type table type possible_keys key key_len ref rows Extra
-1 PRIMARY t3 index c c 5 NULL 3 Using where; Using index
-1 PRIMARY t1 ref c c 5 test.t3.c 2
+1 PRIMARY t1 range c c 5 NULL 3 Using index condition
+1 PRIMARY t3 index c c 5 NULL 6 Using where; Using index; Using join buffer (flat, BNL join)
# select_type=UNION, type=system
# select_type=UNION RESULT, type=<union1,2>
select * from t1 union select * from t2;
@@ -152,7 +152,7 @@ a b c
2 -2 -2
explain select * from t3 where a between 1 and 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 1 Using index condition
+1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 2 Using index condition
# SELECT * FROM tbl_name WHERE <non-indexed vcol expr>
select * from t3 where b between -2 and -1;
a b c
@@ -160,7 +160,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where
# SELECT * FROM tbl_name WHERE <indexed vcol expr>
select * from t3 where c between -2 and -1;
a b c
@@ -168,7 +168,7 @@ a b c
1 -1 -1
explain select * from t3 where c between -2 and -1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition
# SELECT * FROM tbl_name WHERE <non-vcol expr> ORDER BY <indexed vcol>
select * from t3 where a between 1 and 2 order by c;
a b c
@@ -176,7 +176,7 @@ a b c
1 -1 -1
explain select * from t3 where a between 1 and 2 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 1 Using index condition; Using filesort
+1 SIMPLE t3 range PRIMARY PRIMARY 4 NULL 2 Using index condition; Using filesort
# SELECT * FROM tbl_name WHERE <non-indexed vcol expr> ORDER BY <non-vcol>
select * from t3 where b between -2 and -1 order by a;
a b c
@@ -184,7 +184,7 @@ a b c
2 -2 -2
explain select * from t3 where b between -2 and -1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed vcol expr> ORDER BY <non-vcol>
select * from t3 where c between -2 and -1 order by a;
a b c
@@ -192,7 +192,7 @@ a b c
2 -2 -2
explain select * from t3 where c between -2 and -1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition; Using filesort
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition; Using filesort
# SELECT * FROM tbl_name WHERE <non-indexed vcol expr> ORDER BY <non-indexed vcol>
select * from t3 where b between -2 and -1 order by b;
a b c
@@ -200,7 +200,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed vcol expr> ORDER BY <non-indexed vcol>
select * from t3 where c between -2 and -1 order by b;
a b c
@@ -208,7 +208,7 @@ a b c
1 -1 -1
explain select * from t3 where c between -2 and -1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition; Using filesort
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition; Using filesort
# SELECT * FROM tbl_name WHERE <non-indexed vcol expr> ORDER BY <indexed vcol>
select * from t3 where b between -2 and -1 order by c;
a b c
@@ -216,7 +216,7 @@ a b c
1 -1 -1
explain select * from t3 where b between -2 and -1 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 ALL NULL NULL NULL NULL 3 Using where; Using filesort
+1 SIMPLE t3 ALL NULL NULL NULL NULL 6 Using where; Using filesort
# SELECT * FROM tbl_name WHERE <indexed vcol expr> ORDER BY <indexed vcol>
select * from t3 where c between -2 and -1 order by c;
a b c
@@ -224,7 +224,7 @@ a b c
1 -1 -1
explain select * from t3 where c between -2 and -1 order by c;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t3 range c c 5 NULL 1 Using index condition
+1 SIMPLE t3 range c c 5 NULL 2 Using index condition
# SELECT sum(<non-indexed vcol>) FROM tbl_name GROUP BY <non-indexed vcol>
select sum(b) from t1 group by b;
sum(b)
diff --git a/mysql-test/suite/vcol/r/wrong_arena.result b/mysql-test/suite/vcol/r/wrong_arena.result
index c105a069b7f..cd36801f601 100644
--- a/mysql-test/suite/vcol/r/wrong_arena.result
+++ b/mysql-test/suite/vcol/r/wrong_arena.result
@@ -7,9 +7,9 @@ d int as ((a,a) in ((1,1),(2,1),(NULL,1))), # cmp_item_datetime
e int as ((a,1) in ((1,1),(2,1),(NULL,1))) # cmp_item_row::alloc_comparators()
);
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '2'
-Warning 1292 Incorrect datetime value: '3'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '2'
+Warning 1292 Truncated incorrect datetime value: '3'
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -20,21 +20,21 @@ t1 CREATE TABLE `t1` (
`e` int(11) GENERATED ALWAYS AS ((`a`,1) in ((1,1),(2,1),(NULL,1))) VIRTUAL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '2'
-Warning 1292 Incorrect datetime value: '3'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '2'
+Warning 1292 Truncated incorrect datetime value: '3'
connect con1, localhost, root;
insert t1 (a) values ('2010-10-10 10:10:10');
select * from t1;
a b c d e
2010-10-10 10:10:10 1 0 0 NULL
Warnings:
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '2'
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '1'
-Warning 1292 Incorrect datetime value: '2'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '2'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '2'
disconnect con1;
connection default;
select * from t1;
@@ -50,14 +50,14 @@ select * from t1;
a b
2010-10-10 10:10:10 0000-00-00 00:00:00
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
disconnect con1;
connection default;
select * from t1;
a b
2010-10-10 10:10:10 0000-00-00 00:00:00
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
drop table t1;
create table t1 (
id int not null ,
diff --git a/mysql-test/suite/vcol/t/update.test b/mysql-test/suite/vcol/t/update.test
index 53189ee3219..e1351986968 100644
--- a/mysql-test/suite/vcol/t/update.test
+++ b/mysql-test/suite/vcol/t/update.test
@@ -93,10 +93,10 @@ check table t; select * from t;
insert t(a,b,d) select 10,4,4 on duplicate key update b=5, d=5;
check table t; select * from t;
replace delayed t (a,b,d) values (10,6,6);
-flush tables;
+flush tables t;
check table t; select * from t;
insert delayed t(a,b,d) values (10,6,6) on duplicate key update b=7, d=7;
-flush tables;
+flush tables t;
check table t; select * from t;
--write_file $MYSQLTEST_VARDIR/tmp/vblobs.txt
10 8 foo 8 foo
diff --git a/mysql-test/suite/vcol/t/vcol_keys_myisam.test b/mysql-test/suite/vcol/t/vcol_keys_myisam.test
index 99b1c9a444b..86fb8aecbe2 100644
--- a/mysql-test/suite/vcol/t/vcol_keys_myisam.test
+++ b/mysql-test/suite/vcol/t/vcol_keys_myisam.test
@@ -281,7 +281,7 @@ drop table t1;
CREATE TABLE t1 (i INT, d1 DATE, d2 DATE NOT NULL, t TIMESTAMP, KEY(t)) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1,'2023-03-16','2023-03-15','2012-12-12 12:12:12');
ALTER TABLE t1 MODIFY t FLOAT AS (i) PERSISTENT;
-SELECT i, d1, d2 FROM t1 INTO OUTFILE 'load_t1';
+SELECT i, d1, d2 INTO OUTFILE 'load_t1' FROM t1;
DELETE FROM t1;
LOAD DATA INFILE 'load_t1' INTO TABLE t1 (i,d1,d2);
SELECT * FROM t1 WHERE d2 < d1;
diff --git a/mysql-test/suite/vcol/t/vcol_misc.test b/mysql-test/suite/vcol/t/vcol_misc.test
index 255621845fb..4e6bd169b7d 100644
--- a/mysql-test/suite/vcol/t/vcol_misc.test
+++ b/mysql-test/suite/vcol/t/vcol_misc.test
@@ -1,4 +1,5 @@
--source include/have_ucs2.inc
+--source include/have_debug.inc
let $MYSQLD_DATADIR= `select @@datadir`;
@@ -184,19 +185,35 @@ drop table t1,t2;
# Bug mdev-3938: INSERT DELAYED for a table with virtual columns
#
-CREATE TABLE t1 (
+SET @old_debug= @@global.debug;
+SET @old_debug= @@global.debug;
+SET GLOBAL debug_dbug= "+d,write_delay_wakeup";
+CREATE TABLE t1 (a int,
ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
tsv TIMESTAMP AS (ADDDATE(ts, INTERVAL 1 DAY)) VIRTUAL
) ENGINE=MyISAM;
-INSERT INTO t1 (tsv) VALUES (DEFAULT);
-
-INSERT DELAYED INTO t1 (tsv) VALUES (DEFAULT);
-
+--echo # First test FLUSH TABLES
+INSERT INTO t1 (a,tsv) VALUES (1,DEFAULT);
+INSERT DELAYED INTO t1 (a,tsv) VALUES (2,DEFAULT);
FLUSH TABLES;
+# Count may be 1 or 2, depending on FLUSH happened before or after delayed
+SELECT COUNT(*) > 0 FROM t1;
+--echo # Then test FLUSH TABLES t1;
+INSERT INTO t1 (a,tsv) VALUES (3,DEFAULT);
+INSERT DELAYED INTO t1 (a,tsv) VALUES (4,DEFAULT);
+FLUSH TABLES t1;
SELECT COUNT(*) FROM t1;
+--echo # Then test FLUSH TABLES WITH READ LOCK;
+
+INSERT INTO t1 (a,tsv) VALUES (5,DEFAULT);
+INSERT DELAYED INTO t1 (a,tsv) VALUES (6,DEFAULT);
+FLUSH TABLES WITH READ LOCK;
+SELECT COUNT(*) FROM t1;
+set GLOBAL debug_dbug= @old_debug;
+unlock tables;
DROP TABLE t1;
--echo #
diff --git a/mysql-test/suite/versioning/r/alter.result b/mysql-test/suite/versioning/r/alter.result
index 528c6ca7c2f..c30aab6eb6d 100644
--- a/mysql-test/suite/versioning/r/alter.result
+++ b/mysql-test/suite/versioning/r/alter.result
@@ -195,6 +195,8 @@ a
2
1
select row_start from t where a=3 into @tm;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
alter table t add column b int;
select @tm=row_start from t where a=3;
@tm=row_start
@@ -525,7 +527,7 @@ ERROR HY000: Table `t` is already system-versioned
use mysql;
create or replace table t (x int) with system versioning;
ERROR HY000: System versioning tables in the `mysql` database are not suported
-alter table user add system versioning;
+alter table db add system versioning;
ERROR HY000: System versioning tables in the `mysql` database are not suported
use test;
# MDEV-15956 Strange ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN upon ALTER on versioning column
diff --git a/mysql-test/suite/versioning/r/commit_id.result b/mysql-test/suite/versioning/r/commit_id.result
index abf2eaf91ba..8815613292e 100644
--- a/mysql-test/suite/versioning/r/commit_id.result
+++ b/mysql-test/suite/versioning/r/commit_id.result
@@ -10,6 +10,8 @@ insert into t1 values ();
set @ts0= now(6);
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx0;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select transaction_id = @tx0 from mysql.transaction_registry
order by transaction_id desc limit 1;
transaction_id = @tx0
@@ -17,6 +19,8 @@ transaction_id = @tx0
set @ts1= now(6);
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx1;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select transaction_id = @tx1 from mysql.transaction_registry
order by transaction_id desc limit 1;
transaction_id = @tx1
@@ -24,6 +28,8 @@ transaction_id = @tx1
set @ts2= now(6);
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx2;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select transaction_id = @tx2 from mysql.transaction_registry
order by transaction_id desc limit 1;
transaction_id = @tx2
@@ -66,24 +72,32 @@ trt_trx_sees(0, @tx2)
set transaction isolation level read uncommitted;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx3;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'READ-UNCOMMITTED' from mysql.transaction_registry where transaction_id = @tx3;
isolation_level = 'READ-UNCOMMITTED'
1
set transaction isolation level read committed;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx4;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'READ-COMMITTED' from mysql.transaction_registry where transaction_id = @tx4;
isolation_level = 'READ-COMMITTED'
1
set transaction isolation level serializable;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx5;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'SERIALIZABLE' from mysql.transaction_registry where transaction_id = @tx5;
isolation_level = 'SERIALIZABLE'
1
set transaction isolation level repeatable read;
insert into t1 values ();
select sys_trx_start from t1 where id = last_insert_id() into @tx6;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select isolation_level = 'REPEATABLE-READ' from mysql.transaction_registry where transaction_id = @tx6;
isolation_level = 'REPEATABLE-READ'
1
diff --git a/mysql-test/suite/versioning/r/create.result b/mysql-test/suite/versioning/r/create.result
index 231aae66482..18fe74d8171 100644
--- a/mysql-test/suite/versioning/r/create.result
+++ b/mysql-test/suite/versioning/r/create.result
@@ -273,8 +273,12 @@ t3 CREATE TABLE `t3` (
## For versioned table
insert into t1 values (1);
select row_start from t1 into @row_start;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into t0 (y) values (2);
select st from t0 into @st;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
create or replace table t2 with system versioning as select * from t1;
show create table t2;
Table Create Table
@@ -337,8 +341,12 @@ ERROR 42S21: Duplicate column name 'row_end'
# Prepare checking for historical row
delete from t1;
select row_end from t1 for system_time all into @row_end;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
delete from t0;
select en from t0 for system_time all into @en;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
## Combinations of versioned + non-versioned
create or replace table t2 (y int);
insert into t2 values (3);
@@ -359,10 +367,14 @@ insert into t2 (y) values (1), (2);
delete from t2 where y = 2;
create or replace table t3 select * from t2 for system_time all;
select st, en from t3 where y = 1 into @st, @en;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select y from t2 for system_time all where st = @st and en = @en;
y
1
select st, en from t3 where y = 2 into @st, @en;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select y from t2 for system_time all where st = @st and en = @en;
y
2
diff --git a/mysql-test/suite/versioning/r/cte.result b/mysql-test/suite/versioning/r/cte.result
index fc070a70120..44135a83eb6 100644
--- a/mysql-test/suite/versioning/r/cte.result
+++ b/mysql-test/suite/versioning/r/cte.result
@@ -138,7 +138,7 @@ where e.mgr = a.emp_id
select name from emp where emp_id in (select emp_id from ancestors for system_time as of timestamp @ts_1);
id select_type table type possible_keys key key_len ref rows filtered Extra
1 PRIMARY <subquery4> ALL distinct_key NULL NULL NULL 4 100.00
-1 PRIMARY emp ALL PRIMARY NULL NULL NULL 4 75.00 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY emp ALL PRIMARY NULL NULL NULL 4 100.00 Using where; Using join buffer (flat, BNL join)
4 MATERIALIZED <derived2> ALL NULL NULL NULL NULL 4 100.00
2 DERIVED e ALL NULL NULL NULL NULL 4 100.00 Using where
3 RECURSIVE UNION e ALL mgr-fk NULL NULL NULL 4 100.00 Using where
diff --git a/mysql-test/suite/versioning/r/foreign.result b/mysql-test/suite/versioning/r/foreign.result
index e6e2d710fcf..30e65de5316 100644
--- a/mysql-test/suite/versioning/r/foreign.result
+++ b/mysql-test/suite/versioning/r/foreign.result
@@ -266,6 +266,8 @@ on update cascade
) engine=innodb;
insert into parent (value) values (23);
select id, value from parent into @id, @value;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into child values (default, @id, @value);
insert into subchild values (default, @id, @value);
select parent_id from subchild;
@@ -347,7 +349,7 @@ key(f)
INSERT INTO t2 VALUES (1,'against'),(2,'q');
SET SQL_MODE= '';
SET timestamp = 2;
-SELECT * FROM t1 INTO OUTFILE 't1.data';
+SELECT * INTO OUTFILE 't1.data' FROM t1;
SET timestamp = 3;
UPDATE t1 SET f13 = 'q';
SET timestamp = 4;
@@ -356,7 +358,7 @@ Warnings:
Warning 1265 Data truncated for column 'f12' at row 2
Warning 1265 Data truncated for column 'f12' at row 4
Warning 1265 Data truncated for column 'f12' at row 7
-SELECT * FROM t1 INTO OUTFILE 't1.data.2' ;
+SELECT * INTO OUTFILE 't1.data.2' FROM t1;
SET timestamp = 5;
LOAD DATA INFILE 't1.data.2' REPLACE INTO TABLE t1;
Warnings:
@@ -365,7 +367,7 @@ Warning 1265 Data truncated for column 'f12' at row 2
Warning 1265 Data truncated for column 'f12' at row 4
Warning 1265 Data truncated for column 'f12' at row 7
Warning 1265 Data truncated for column 'f1' at row 10
-SELECT * FROM t2 INTO OUTFILE 't2.data';
+SELECT * INTO OUTFILE 't2.data' FROM t2;
SET timestamp = 6;
LOAD DATA INFILE 't2.data' REPLACE INTO TABLE t2;
SET FOREIGN_KEY_CHECKS = OFF;
diff --git a/mysql-test/suite/versioning/r/insert.result b/mysql-test/suite/versioning/r/insert.result
index 01d829d3430..2645d0184e8 100644
--- a/mysql-test/suite/versioning/r/insert.result
+++ b/mysql-test/suite/versioning/r/insert.result
@@ -54,6 +54,8 @@ drop view vt1_1;
create or replace table t1( id bigint primary key, a int, b int) with system versioning;
insert into t1 values(1, 1, 1);
select row_start, row_end from t1 into @sys_start, @sys_end;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select id, a, b from t1;
id a b
1 1 1
diff --git a/mysql-test/suite/versioning/r/load_data.result b/mysql-test/suite/versioning/r/load_data.result
index 5e7b36c9a6a..1fcde73e565 100644
--- a/mysql-test/suite/versioning/r/load_data.result
+++ b/mysql-test/suite/versioning/r/load_data.result
@@ -1,6 +1,8 @@
CREATE TABLE t1 (a INT, b INT, c INT, vc INT AS (c), UNIQUE(a), UNIQUE(b)) WITH SYSTEM VERSIONING;
INSERT IGNORE INTO t1 (a,b,c) VALUES (1,2,3);
SELECT a, b, c FROM t1 INTO OUTFILE '15330.data';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
LOAD DATA INFILE '15330.data' IGNORE INTO TABLE t1 (a,b,c);
Warnings:
Warning 1062 Duplicate entry '1' for key 'a'
diff --git a/mysql-test/suite/versioning/r/online.result b/mysql-test/suite/versioning/r/online.result
index 21441acb6b2..41a2556628a 100644
--- a/mysql-test/suite/versioning/r/online.result
+++ b/mysql-test/suite/versioning/r/online.result
@@ -67,6 +67,7 @@ change a a int with system versioning,
add primary key pk (a);
affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
+# restart
update t1 set a=2;
select count(*) from t1 for system_time all;
count(*)
@@ -112,5 +113,45 @@ update t set b=11;
select count(*) from t for system_time all;
count(*)
2
+# Start of 10.4 tests
+create or replace table t (a int, b int) engine=innodb;
+alter table t
+add s bigint unsigned as row start,
+add e bigint unsigned as row end,
+add period for system_time(s, e),
+add system versioning;
+alter table t drop column b, algorithm=instant;
+alter table t add index idx(a), lock=none;
+alter table t drop column s, drop column e;
+alter table t drop system versioning, lock=none;
+ERROR 0A000: LOCK=NONE is not supported. Reason: Not implemented for system-versioned operations. Try LOCK=SHARED
+#
+# MDEV-17697 Broken versioning info after instant drop column
+#
+set @@system_versioning_alter_history= keep;
+create or replace table t1 (a int, b int) with system versioning;
+insert into t1 values (1, 1);
+alter table t1 drop column b, algorithm=instant;
+alter table t1 drop system versioning;
+create or replace table t1 (a int, b int) with system versioning;
+insert into t1 values (1, 1);
+alter table t1 drop system versioning;
+#
+# MDEV-18173 Assertion `o->ind == vers_end' or `o->ind == vers_start' failed in dict_table_t::instant_column
+#
+set @@system_versioning_alter_history= keep;
+create or replace table t1 (pk integer primary key, a int, b int, v int as (a))
+with system versioning;
+alter table t1 force;
+alter table t1 drop column b;
+#
+# MDEV-18122 Assertion `table->versioned() == m_prebuilt->table->versioned()' failed in ha_innobase::open
+#
+create or replace table t1 (
+x int,
+v int as (x) virtual,
+y int
+) with system versioning;
+alter table t1 drop system versioning;
drop database test;
create database test;
diff --git a/mysql-test/suite/versioning/r/partition.result b/mysql-test/suite/versioning/r/partition.result
index 3c33967b780..9b6f2201c22 100644
--- a/mysql-test/suite/versioning/r/partition.result
+++ b/mysql-test/suite/versioning/r/partition.result
@@ -149,6 +149,8 @@ x C D
1 1 1
set @str= concat('select row_start from t1 partition (pn) into @ts0');
prepare stmt from @str;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @now= now(6);
@@ -160,6 +162,8 @@ execute select_pn;
x C D
set @str= concat('select row_start from t1 partition (p0) into @ts1');
prepare stmt from @str;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
select @ts0 = @ts1;
@@ -175,6 +179,8 @@ x C D
2 1 1
set @str= concat('select row_start from t1 partition (pn) into @ts0');
prepare stmt from @str;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @now= now(6);
@@ -190,14 +196,20 @@ drop prepare select_p0;
drop prepare select_pn;
set @str= concat('select row_start from t1 partition (p0) where x = 2 into @ts1');
prepare stmt from @str;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @str= concat('select row_end from t1 partition (p0) where x = 2 into @ts2');
prepare stmt from @str;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
set @str= concat('select row_start from t1 partition (pn) into @ts3');
prepare stmt from @str;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
execute stmt;
drop prepare stmt;
select @ts0 = @ts1;
@@ -370,6 +382,8 @@ create or replace table t2 (f int);
create or replace trigger tr before insert on t2
for each row select table_rows from information_schema.tables
where table_name = 't1' into @a;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into t2 values (1);
# MDEV-14740 Locking assertion for system_time partitioning
create or replace table t1 (i int) with system versioning
@@ -379,6 +393,8 @@ partition pn current);
create or replace table t2 (f int);
create or replace trigger tr before insert on t2
for each row select count(*) from t1 into @a;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
insert into t2 values (1);
# MDEV-14741 Assertion `(trx)->start_file == 0' failed in row_truncate_table_for_mysql()
create or replace table t1 (i int) with system versioning
@@ -440,7 +456,7 @@ partition_name partition_ordinal_position partition_method timediff(partition_de
p0 1 SYSTEM_TIME 00:00:00.000000
pn 2 SYSTEM_TIME NULL
Warnings:
-Warning 1292 Truncated incorrect time value: 'CURRENT'
+Warning 1292 Incorrect time value: 'CURRENT'
alter table t1 add partition (partition p1 history, partition p2 history);
select partition_name,partition_ordinal_position,partition_method,timediff(partition_description, @ts) from information_schema.partitions where table_schema='test' and table_name='t1';
partition_name partition_ordinal_position partition_method timediff(partition_description, @ts)
@@ -449,7 +465,7 @@ p1 2 SYSTEM_TIME 01:00:00.000000
p2 3 SYSTEM_TIME 02:00:00.000000
pn 4 SYSTEM_TIME NULL
Warnings:
-Warning 1292 Truncated incorrect time value: 'CURRENT'
+Warning 1292 Incorrect time value: 'CURRENT'
alter table t1 drop partition p0;
select partition_name,partition_ordinal_position,partition_method,timediff(partition_description, @ts) from information_schema.partitions where table_schema='test' and table_name='t1';
partition_name partition_ordinal_position partition_method timediff(partition_description, @ts)
@@ -457,7 +473,7 @@ p1 1 SYSTEM_TIME 01:00:00.000000
p2 2 SYSTEM_TIME 02:00:00.000000
pn 3 SYSTEM_TIME NULL
Warnings:
-Warning 1292 Truncated incorrect time value: 'CURRENT'
+Warning 1292 Incorrect time value: 'CURRENT'
alter table t1 drop partition p2;
ERROR HY000: Can only drop oldest partitions when rotating by INTERVAL
select partition_name,partition_ordinal_position,partition_method,timediff(partition_description, @ts) from information_schema.partitions where table_schema='test' and table_name='t1';
@@ -466,7 +482,7 @@ p1 1 SYSTEM_TIME 01:00:00.000000
p2 2 SYSTEM_TIME 02:00:00.000000
pn 3 SYSTEM_TIME NULL
Warnings:
-Warning 1292 Truncated incorrect time value: 'CURRENT'
+Warning 1292 Incorrect time value: 'CURRENT'
#
# MDEV-15103 Assertion in ha_partition::part_records() for updating VIEW
#
diff --git a/mysql-test/suite/versioning/r/partition_innodb.result b/mysql-test/suite/versioning/r/partition_innodb.result
index de3521b6aa3..f5945304899 100644
--- a/mysql-test/suite/versioning/r/partition_innodb.result
+++ b/mysql-test/suite/versioning/r/partition_innodb.result
@@ -20,7 +20,7 @@ alter table t1 partition by system_time (
partition p0 history,
partition pn current
);
-ERROR HY000: `row_start` must be of type TIMESTAMP(6) for system-versioned table `#sql-temporary`
+ERROR HY000: `row_start` must be of type TIMESTAMP(6) for system-versioned table `t1`
create or replace table t (
a int primary key,
row_start bigint unsigned as row start invisible,
diff --git a/mysql-test/suite/versioning/r/select,trx_id.rdiff b/mysql-test/suite/versioning/r/select,trx_id.rdiff
new file mode 100644
index 00000000000..8906007a348
--- /dev/null
+++ b/mysql-test/suite/versioning/r/select,trx_id.rdiff
@@ -0,0 +1,11 @@
+--- select.result 2018-06-29 18:09:17.962447067 +0200
++++ select.reject 2018-06-29 18:10:04.618808616 +0200
+@@ -17,6 +17,8 @@
+ (8, 108),
+ (9, 109);
+ set @t0= now(6);
++Warnings:
++Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+ delete from t1 where x = 3;
+ delete from t1 where x > 7;
+ insert into t1(x, y) values(3, 33);
diff --git a/mysql-test/suite/versioning/r/select.result b/mysql-test/suite/versioning/r/select.result
index c887e524e63..3569268ce1d 100644
--- a/mysql-test/suite/versioning/r/select.result
+++ b/mysql-test/suite/versioning/r/select.result
@@ -21,6 +21,8 @@ delete from t1 where x = 3;
delete from t1 where x > 7;
insert into t1(x, y) values(3, 33);
select sys_trx_start from t1 where x = 3 and y = 33 into @t1;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select x, y from t1;
x y
0 100
@@ -192,6 +194,40 @@ NULL NULL 2 1
NULL NULL 3 1
drop table t1;
drop table t2;
+create or replace table t1(x int) with system versioning;
+insert into t1 values (1);
+delete from t1;
+insert into t1 values (2);
+delete from t1;
+insert into t1 values (3);
+delete from t1;
+select row_start into @start1 from t1 for system_time all where x = 1;
+select row_end into @end1 from t1 for system_time all where x = 1;
+select row_start into @start2 from t1 for system_time all where x = 2;
+select row_end into @end2 from t1 for system_time all where x = 2;
+select row_start into @start3 from t1 for system_time all where x = 3;
+select row_end into @end3 from t1 for system_time all where x = 3;
+select x as ASOF_x from t1 for system_time as of @start2;
+ASOF_x
+2
+select x as ASOF_x from t1 for system_time as of @end2;
+ASOF_x
+select x as FROMTO_x from t1 for system_time from @start1 to @end3;
+FROMTO_x
+1
+2
+3
+select x as FROMTO_x from t1 for system_time from @end1 to @start2;
+FROMTO_x
+select x as BETWAND_x from t1 for system_time between @start1 and @end3;
+BETWAND_x
+1
+2
+3
+select x as BETWAND_x from t1 for system_time between @end1 and @start2;
+BETWAND_x
+2
+drop table t1;
create table t1(
A int
) with system versioning;
@@ -330,6 +366,8 @@ insert into t1 values (1);
set @ts= now(6);
delete from t1;
select sys_trx_start from t1 for system_time all into @trx_start;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
## ensure @trx_start is much lower than unix timestamp
select @trx_start < unix_timestamp(@ts) - 100 as trx_start_good;
trx_start_good
@@ -526,7 +564,11 @@ period for system_time (row_start, row_end)
insert into t1 values (1);
delete from t1;
select row_start from t1 for system_time all into @t1;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select row_end from t1 for system_time all into @t2;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select * from t1 for system_time between @t1 and @t2;
a
1
diff --git a/mysql-test/suite/versioning/r/select2,trx_id.rdiff b/mysql-test/suite/versioning/r/select2,trx_id.rdiff
index a657b94c031..d23eb5afbc0 100644
--- a/mysql-test/suite/versioning/r/select2,trx_id.rdiff
+++ b/mysql-test/suite/versioning/r/select2,trx_id.rdiff
@@ -1,15 +1,15 @@
---- select2.result
-+++ select2,trx_id.result~
-@@ -22,6 +22,8 @@
- delete from t1 where x > 7;
- insert into t1(x, y) values(3, 33);
+--- select2.result 2018-06-29 17:51:17.142172085 +0200
++++ select2,trx_id.reject 2018-06-29 18:03:49.034273090 +0200
+@@ -26,6 +26,8 @@
select sys_start from t1 where x = 3 and y = 33 into @t1;
+ Warnings:
+ Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+set @x1= @t1;
+select trt_commit_ts(@x1) into @t1;
select x, y from t1;
x y
0 100
-@@ -82,7 +84,7 @@
+@@ -86,7 +88,7 @@
8 108
9 109
3 33
@@ -18,7 +18,7 @@
ASOF2_x y
0 100
1 101
-@@ -94,7 +96,7 @@
+@@ -98,7 +100,7 @@
7 107
8 108
9 109
@@ -27,7 +27,7 @@
FROMTO2_x y
0 100
1 101
-@@ -106,7 +108,7 @@
+@@ -110,7 +112,7 @@
7 107
8 108
9 109
diff --git a/mysql-test/suite/versioning/r/select2.result b/mysql-test/suite/versioning/r/select2.result
index bb5c82ee444..22388359885 100644
--- a/mysql-test/suite/versioning/r/select2.result
+++ b/mysql-test/suite/versioning/r/select2.result
@@ -18,10 +18,14 @@ insert into t1 (x, y) values
(9, 109);
set @t0= now(6);
select sys_start from t1 limit 1 into @x0;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
delete from t1 where x = 3;
delete from t1 where x > 7;
insert into t1(x, y) values(3, 33);
select sys_start from t1 where x = 3 and y = 33 into @t1;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select x, y from t1;
x y
0 100
@@ -331,7 +335,7 @@ x y
select * from (select * from t1 for system_time all, t2 for system_time all) for system_time all as t;
ERROR HY000: Table `t` is not system-versioned
select * from (t1 for system_time all join t2 for system_time all) for system_time all;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near '' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'system_time all' at line 1
# MDEV-16043 Assertion thd->Item_change_list::is_empty() failed in mysql_parse upon SELECT from a view reading from a versioned table
create or replace table t1 (a int) with system versioning;
create or replace view v1 as select * from t1;
diff --git a/mysql-test/suite/versioning/r/sysvars.result b/mysql-test/suite/versioning/r/sysvars.result
index b23742462d1..79aa8fce746 100644
--- a/mysql-test/suite/versioning/r/sysvars.result
+++ b/mysql-test/suite/versioning/r/sysvars.result
@@ -53,7 +53,7 @@ Variable_name Value
system_versioning_asof 1900-01-01 00:00:00.000000
set global system_versioning_asof= timestamp'1911-11-11 11:11:11.1111119';
Warnings:
-Note 1292 Truncated incorrect datetime value: '1911-11-11 11:11:11.1111119'
+Note 1292 Truncated incorrect DATETIME value: '1911-11-11 11:11:11.1111119'
show global variables like 'system_versioning_asof';
Variable_name Value
system_versioning_asof 1911-11-11 11:11:11.111111
@@ -80,7 +80,7 @@ Variable_name Value
system_versioning_asof 1900-01-01 00:00:00.000000
set system_versioning_asof= timestamp'1911-11-11 11:11:11.1111119';
Warnings:
-Note 1292 Truncated incorrect datetime value: '1911-11-11 11:11:11.1111119'
+Note 1292 Truncated incorrect DATETIME value: '1911-11-11 11:11:11.1111119'
show variables like 'system_versioning_asof';
Variable_name Value
system_versioning_asof 1911-11-11 11:11:11.111111
@@ -130,3 +130,14 @@ show status like "Feature_system_versioning";
Variable_name Value
Feature_system_versioning 2
drop table t;
+#
+# MDEV-16991 Rounding vs truncation for TIME, DATETIME, TIMESTAMP
+#
+SET sql_mode=TIME_ROUND_FRACTIONAL;
+SET @@global.system_versioning_asof= timestamp'2001-12-31 23:59:59.9999999';
+Warnings:
+Note 1292 Truncated incorrect DATETIME value: '2001-12-31 23:59:59.9999999'
+SELECT @@global.system_versioning_asof;
+@@global.system_versioning_asof
+2002-01-01 00:00:00.000000
+SET @@global.system_versioning_asof= DEFAULT;
diff --git a/mysql-test/suite/versioning/r/trx_id.result b/mysql-test/suite/versioning/r/trx_id.result
index 951e5ce3dd9..333b8b3efa0 100644
--- a/mysql-test/suite/versioning/r/trx_id.result
+++ b/mysql-test/suite/versioning/r/trx_id.result
@@ -17,11 +17,15 @@ add period for system_time(s, e),
add system versioning,
algorithm=inplace;
select s from t1 into @trx_start;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select count(*) = 1 from mysql.transaction_registry where transaction_id = @trx_start;
count(*) = 1
1
create or replace table t1 (x int);
select count(*) from mysql.transaction_registry into @tmp;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
alter table t1
add column s bigint unsigned as row start,
add column e bigint unsigned as row end,
@@ -40,11 +44,15 @@ add period for system_time(s, e),
add system versioning,
algorithm=copy;
select s from t1 into @trx_start;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select count(*) = 1 from mysql.transaction_registry where transaction_id = @trx_start;
count(*) = 1
1
create or replace table t1 (x int);
select count(*) from mysql.transaction_registry into @tmp;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
alter table t1
add column s bigint unsigned as row start,
add column e bigint unsigned as row end,
@@ -96,8 +104,14 @@ set @ts2= sysdate(6);
commit;
set @ts3= sysdate(6);
select sys_start from t1 where x = 1 into @trx_id1;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select sys_start from t1 where x = 2 into @trx_id2;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select sys_start from t1 where x = 3 into @trx_id3;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select @trx_id1 < @trx_id2, @trx_id2 < @trx_id3;
@trx_id1 < @trx_id2 @trx_id2 < @trx_id3
1 1
@@ -178,6 +192,8 @@ set @ts1= now(6);
insert into t1 values (1);
commit;
select row_start from t1 into @trx_id;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
select trt_begin_ts(@trx_id) <= @ts1 as BEGIN_TS_GOOD;
BEGIN_TS_GOOD
1
diff --git a/mysql-test/suite/versioning/t/alter.test b/mysql-test/suite/versioning/t/alter.test
index 4af937b96e7..89379837fac 100644
--- a/mysql-test/suite/versioning/t/alter.test
+++ b/mysql-test/suite/versioning/t/alter.test
@@ -450,7 +450,7 @@ use mysql;
--error ER_VERS_DB_NOT_SUPPORTED
create or replace table t (x int) with system versioning;
--error ER_VERS_DB_NOT_SUPPORTED
-alter table user add system versioning;
+alter table db add system versioning;
use test;
--echo # MDEV-15956 Strange ER_UNSUPPORTED_ACTION_ON_GENERATED_COLUMN upon ALTER on versioning column
diff --git a/mysql-test/suite/versioning/t/foreign.test b/mysql-test/suite/versioning/t/foreign.test
index 1f899773128..ddff3945259 100644
--- a/mysql-test/suite/versioning/t/foreign.test
+++ b/mysql-test/suite/versioning/t/foreign.test
@@ -380,15 +380,15 @@ INSERT INTO t2 VALUES (1,'against'),(2,'q');
SET SQL_MODE= '';
SET timestamp = 2;
-SELECT * FROM t1 INTO OUTFILE 't1.data';
+SELECT * INTO OUTFILE 't1.data' FROM t1;
SET timestamp = 3;
UPDATE t1 SET f13 = 'q';
SET timestamp = 4;
LOAD DATA INFILE 't1.data' REPLACE INTO TABLE t1;
-SELECT * FROM t1 INTO OUTFILE 't1.data.2' ;
+SELECT * INTO OUTFILE 't1.data.2' FROM t1;
SET timestamp = 5;
LOAD DATA INFILE 't1.data.2' REPLACE INTO TABLE t1;
-SELECT * FROM t2 INTO OUTFILE 't2.data';
+SELECT * INTO OUTFILE 't2.data' FROM t2;
SET timestamp = 6;
LOAD DATA INFILE 't2.data' REPLACE INTO TABLE t2;
SET FOREIGN_KEY_CHECKS = OFF;
diff --git a/mysql-test/suite/versioning/t/online.test b/mysql-test/suite/versioning/t/online.test
index 37405bb99ce..02fde474d55 100644
--- a/mysql-test/suite/versioning/t/online.test
+++ b/mysql-test/suite/versioning/t/online.test
@@ -134,5 +134,63 @@ select count(*) from t for system_time all;
update t set b=11;
select count(*) from t for system_time all;
+--echo # Start of 10.4 tests
+
+create or replace table t (a int, b int) engine=innodb;
+alter table t
+ add s bigint unsigned as row start,
+ add e bigint unsigned as row end,
+ add period for system_time(s, e),
+ add system versioning;
+alter table t drop column b, algorithm=instant;
+alter table t add index idx(a), lock=none;
+alter table t drop column s, drop column e;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+alter table t drop system versioning, lock=none;
+
+--echo #
+--echo # MDEV-17697 Broken versioning info after instant drop column
+--echo #
+set @@system_versioning_alter_history= keep;
+create or replace table t1 (a int, b int) with system versioning;
+insert into t1 values (1, 1);
+alter table t1 drop column b, algorithm=instant;
+alter table t1 drop system versioning;
+
+create or replace table t1 (a int, b int) with system versioning;
+insert into t1 values (1, 1);
+
+if ($have_debug) {
+--disable_query_log
+--disable_result_log
+set debug_dbug='+d,ib_commit_inplace_fail_1';
+--error ER_INTERNAL_ERROR
+alter table t1 drop column b, algorithm=instant;
+set debug_dbug= default;
+--enable_query_log
+--enable_result_log
+}
+alter table t1 drop system versioning;
+
+--echo #
+--echo # MDEV-18173 Assertion `o->ind == vers_end' or `o->ind == vers_start' failed in dict_table_t::instant_column
+--echo #
+set @@system_versioning_alter_history= keep;
+create or replace table t1 (pk integer primary key, a int, b int, v int as (a))
+with system versioning;
+
+alter table t1 force;
+alter table t1 drop column b;
+
+--echo #
+--echo # MDEV-18122 Assertion `table->versioned() == m_prebuilt->table->versioned()' failed in ha_innobase::open
+--echo #
+create or replace table t1 (
+ x int,
+ v int as (x) virtual,
+ y int
+) with system versioning;
+alter table t1 drop system versioning;
+
drop database test;
create database test;
diff --git a/mysql-test/suite/versioning/t/partition_innodb.test b/mysql-test/suite/versioning/t/partition_innodb.test
index bb4fe50ce91..d7527ff7410 100644
--- a/mysql-test/suite/versioning/t/partition_innodb.test
+++ b/mysql-test/suite/versioning/t/partition_innodb.test
@@ -21,7 +21,6 @@ create or replace table t1(
period for system_time(row_start, row_end)
) engine=InnoDB with system versioning;
---replace_regex /#sql-[0-9a-f_]*/#sql-temporary/
--error ER_VERS_FIELD_WRONG_TYPE
alter table t1 partition by system_time (
partition p0 history,
diff --git a/mysql-test/suite/versioning/t/select.test b/mysql-test/suite/versioning/t/select.test
index b7c2d500da1..ebe4503401d 100644
--- a/mysql-test/suite/versioning/t/select.test
+++ b/mysql-test/suite/versioning/t/select.test
@@ -107,6 +107,32 @@ for system_time as of timestamp @t0 as t;
drop table t1;
drop table t2;
+# Query conditions check
+
+create or replace table t1(x int) with system versioning;
+insert into t1 values (1);
+delete from t1;
+insert into t1 values (2);
+delete from t1;
+insert into t1 values (3);
+delete from t1;
+
+select row_start into @start1 from t1 for system_time all where x = 1;
+select row_end into @end1 from t1 for system_time all where x = 1;
+select row_start into @start2 from t1 for system_time all where x = 2;
+select row_end into @end2 from t1 for system_time all where x = 2;
+select row_start into @start3 from t1 for system_time all where x = 3;
+select row_end into @end3 from t1 for system_time all where x = 3;
+
+select x as ASOF_x from t1 for system_time as of @start2;
+select x as ASOF_x from t1 for system_time as of @end2;
+select x as FROMTO_x from t1 for system_time from @start1 to @end3;
+select x as FROMTO_x from t1 for system_time from @end1 to @start2;
+select x as BETWAND_x from t1 for system_time between @start1 and @end3;
+select x as BETWAND_x from t1 for system_time between @end1 and @start2;
+
+drop table t1;
+
# Wildcard expansion on hidden fields
create table t1(
diff --git a/mysql-test/suite/versioning/t/sysvars.test b/mysql-test/suite/versioning/t/sysvars.test
index 160af12fe02..52fab81b8e6 100644
--- a/mysql-test/suite/versioning/t/sysvars.test
+++ b/mysql-test/suite/versioning/t/sysvars.test
@@ -87,3 +87,13 @@ select * from t for system_time between '0-0-0' and current_timestamp(6);
show status like "Feature_system_versioning";
drop table t;
+
+
+--echo #
+--echo # MDEV-16991 Rounding vs truncation for TIME, DATETIME, TIMESTAMP
+--echo #
+
+SET sql_mode=TIME_ROUND_FRACTIONAL;
+SET @@global.system_versioning_asof= timestamp'2001-12-31 23:59:59.9999999';
+SELECT @@global.system_versioning_asof;
+SET @@global.system_versioning_asof= DEFAULT;
diff --git a/mysql-test/suite/wsrep/disabled.def b/mysql-test/suite/wsrep/disabled.def
index c7c8f2c6216..61142398372 100644
--- a/mysql-test/suite/wsrep/disabled.def
+++ b/mysql-test/suite/wsrep/disabled.def
@@ -1,2 +1,3 @@
wsrep.foreign_key : Sporadic failure "WSREP has not yet prepared node for application use"
-
+wsrep.pool_of_threads : Sporadic failure "WSREP has not yet prepared node for application use"
+wsrep.variables : Global wsrep_on manipulation causes debug asserts
diff --git a/mysql-test/suite/wsrep/my.cnf b/mysql-test/suite/wsrep/my.cnf
index 7e51b0750a1..e90686850a9 100644
--- a/mysql-test/suite/wsrep/my.cnf
+++ b/mysql-test/suite/wsrep/my.cnf
@@ -1,10 +1,8 @@
# Use default setting for mysqld processes
!include include/default_mysqld.cnf
-[mysqld]
-wsrep-on=1
-
[mysqld.1]
+wsrep-on=OFF
#galera_port=@OPT.port
#ist_port=@OPT.port
#sst_port=@OPT.port
diff --git a/mysql-test/suite/wsrep/r/mdev_7798.result b/mysql-test/suite/wsrep/r/mdev_7798.result
index 83a02f3a606..ec906ccd8a2 100644
--- a/mysql-test/suite/wsrep/r/mdev_7798.result
+++ b/mysql-test/suite/wsrep/r/mdev_7798.result
@@ -7,6 +7,7 @@ SELECT @@GLOBAL.WSREP_ON;
1
SET GLOBAL WSREP_ON= 0;
Restart the node.
+# restart
SELECT @@GLOBAL.WSREP_ON;
@@GLOBAL.WSREP_ON
1
diff --git a/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff b/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff
index 5bf6502d0d8..596abf9c681 100644
--- a/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff
+++ b/mysql-test/suite/wsrep/r/wsrep-recover-v25,binlogon.rdiff
@@ -1,17 +1,19 @@
---- r/wsrep-recover-v25.result 2019-01-27 15:38:58.819204748 +0200
-+++ r/wsrep-recover-v25.reject 2019-01-27 15:39:49.967358994 +0200
-@@ -18,11 +18,10 @@
- connection default;
- SET DEBUG_SYNC = "now WAIT_FOR after_prepare_reached";
- # Kill the server
--Expect seqno 3
--3
--Expect 5 7
-+Expect seqno 2
-+2
+--- r/wsrep-recover-v25.result 2019-02-28 09:20:56.153775856 +0200
++++ r/wsrep-recover-v25.reject 2019-02-28 09:22:16.578113115 +0200
+@@ -12,4 +12,16 @@
+ SELECT VARIABLE_VALUE `expect 6` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+ expect 6
+ 6
++connect con1, localhost, root;
++SET DEBUG_SYNC = "ha_commit_trans_after_prepare SIGNAL after_prepare_reached WAIT_FOR continue";
++INSERT INTO t1 VALUES (7);
++connection default;
++SET DEBUG_SYNC = "now WAIT_FOR after_prepare_reached";
++# Kill the server
++Expect seqno 6
++6
+Expect 5
- SELECT * FROM t1;
- f1
- 5
--7
++SELECT * FROM t1;
++f1
++5
DROP TABLE t1;
diff --git a/mysql-test/suite/wsrep/r/wsrep-recover-v25.result b/mysql-test/suite/wsrep/r/wsrep-recover-v25.result
index 6d146f67bdf..c6e9246a753 100644
--- a/mysql-test/suite/wsrep/r/wsrep-recover-v25.result
+++ b/mysql-test/suite/wsrep/r/wsrep-recover-v25.result
@@ -1,28 +1,15 @@
# Kill the server
-Expect seqno 0
-0
-CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
-# Kill the server
Expect seqno 1
1
-INSERT INTO t1 VALUES (5);
-# Kill the server
-Expect seqno 2
-2
-SELECT VARIABLE_VALUE `expect 2` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
-expect 2
-2
-connect con1, localhost, root;
-SET DEBUG_SYNC = "ha_commit_trans_after_prepare SIGNAL after_prepare_reached WAIT_FOR continue";
-INSERT INTO t1 VALUES (7);
-connection default;
-SET DEBUG_SYNC = "now WAIT_FOR after_prepare_reached";
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
# Kill the server
Expect seqno 3
3
-Expect 5 7
-SELECT * FROM t1;
-f1
+INSERT INTO t1 VALUES (5);
+# Kill the server
+Expect seqno 5
5
-7
+SELECT VARIABLE_VALUE `expect 6` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+expect 6
+6
DROP TABLE t1;
diff --git a/mysql-test/suite/wsrep/r/wsrep-recover.result b/mysql-test/suite/wsrep/r/wsrep-recover.result
new file mode 100644
index 00000000000..44b50a752d9
--- /dev/null
+++ b/mysql-test/suite/wsrep/r/wsrep-recover.result
@@ -0,0 +1,64 @@
+# Kill the server
+Expect seqno 1
+1
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+# Kill the server
+Expect seqno 3
+3
+INSERT INTO t1 VALUES (5);
+# Kill the server
+Expect seqno 5
+5
+SELECT VARIABLE_VALUE `expect 6` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+expect 6
+6
+connect con1, localhost, root;
+SET DEBUG_SYNC = "wsrep_after_certification SIGNAL after_certification_reached WAIT_FOR continue";
+INSERT INTO t1 VALUES (7);
+connect con_ctrl, localhost, root;
+SET DEBUG_SYNC = "now WAIT_FOR after_certification_reached";
+connect con2, localhost, root;
+SET DEBUG_SYNC = "wsrep_before_commit_order_enter SIGNAL before_commit_order_reached WAIT_FOR continue";
+INSERT INTO t1 VALUES (8);
+connection con_ctrl;
+SET DEBUG_SYNC = "now WAIT_FOR before_commit_order_reached";
+connection default;
+# Kill the server
+Expect seqno 6
+6
+disconnect con1;
+disconnect con2;
+disconnect con_ctrl;
+connection default;
+SELECT VARIABLE_VALUE `expect 7` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+expect 7
+7
+connect con1, localhost, root;
+SET DEBUG_SYNC = "wsrep_after_certification SIGNAL after_certification_reached WAIT_FOR continue_after_certification";
+SET DEBUG_SYNC = "wsrep_before_commit_order_enter SIGNAL before_commit_order_reached_1 WAIT_FOR continue_before_commit_order_1";
+INSERT INTO t1 VALUES (9);
+connect con_ctrl, localhost, root;
+SET DEBUG_SYNC = "now WAIT_FOR after_certification_reached";
+connect con2, localhost, root;
+SET DEBUG_SYNC = "wsrep_before_commit_order_enter SIGNAL before_commit_order_reached_2 WAIT_FOR continue_before_commit_order_2";
+INSERT INTO t1 VALUES (10);
+connection con_ctrl;
+SET DEBUG_SYNC = "now WAIT_FOR before_commit_order_reached_2";
+SET DEBUG_SYNC = "now SIGNAL continue_after_certification";
+SET DEBUG_SYNC = "now WAIT_FOR before_commit_order_reached_1";
+connection default;
+# Kill the server
+Expect seqno 7
+7
+disconnect con1;
+disconnect con2;
+disconnect con_ctrl;
+connection default;
+SELECT VARIABLE_VALUE `expect 8` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+expect 8
+8
+Expect row 5
+SELECT * FROM t1;
+f1
+5
+DROP TABLE t1;
diff --git a/mysql-test/suite/wsrep/suite.pm b/mysql-test/suite/wsrep/suite.pm
index 03e23b8d7cb..fbaf5aa2b22 100644
--- a/mysql-test/suite/wsrep/suite.pm
+++ b/mysql-test/suite/wsrep/suite.pm
@@ -9,9 +9,9 @@ return "Not run for embedded server" if $::opt_embedded_server;
return "WSREP is not compiled in" unless defined $::mysqld_variables{'wsrep-on'};
my ($provider) = grep { -f $_ } $ENV{WSREP_PROVIDER},
- "/usr/lib64/galera-3/libgalera_smm.so",
+ "/usr/lib64/galera-4/libgalera_smm.so",
"/usr/lib64/galera/libgalera_smm.so",
- "/usr/lib/galera-3/libgalera_smm.so",
+ "/usr/lib/galera-4/libgalera_smm.so",
"/usr/lib/galera/libgalera_smm.so";
return "No wsrep provider library" unless -f $provider;
diff --git a/mysql-test/suite/wsrep/t/binlog_format.cnf b/mysql-test/suite/wsrep/t/binlog_format.cnf
new file mode 100644
index 00000000000..7ec24c14d80
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/binlog_format.cnf
@@ -0,0 +1,8 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-provider=@ENV.WSREP_PROVIDER
+wsrep-cluster-address=gcomm://
+innodb_autoinc_lock_mode=2
+
diff --git a/mysql-test/suite/wsrep/t/binlog_format.opt b/mysql-test/suite/wsrep/t/binlog_format.opt
deleted file mode 100644
index e3f2470c6e5..00000000000
--- a/mysql-test/suite/wsrep/t/binlog_format.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb_autoinc_lock_mode=2 --wsrep-provider=$WSREP_PROVIDER --wsrep-cluster-address=gcomm://
diff --git a/mysql-test/suite/wsrep/t/binlog_format.test b/mysql-test/suite/wsrep/t/binlog_format.test
index 07001b17a84..695859a2ad3 100644
--- a/mysql-test/suite/wsrep/t/binlog_format.test
+++ b/mysql-test/suite/wsrep/t/binlog_format.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
diff --git a/mysql-test/suite/wsrep/t/mdev_10186.cnf b/mysql-test/suite/wsrep/t/mdev_10186.cnf
new file mode 100644
index 00000000000..3c4ca003f76
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/mdev_10186.cnf
@@ -0,0 +1,6 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=OFF
+wsrep-provider=@ENV.WSREP_PROVIDER
+wsrep-cluster-address=gcomm://
diff --git a/mysql-test/suite/wsrep/t/mdev_10186.opt b/mysql-test/suite/wsrep/t/mdev_10186.opt
deleted file mode 100644
index e2655959c62..00000000000
--- a/mysql-test/suite/wsrep/t/mdev_10186.opt
+++ /dev/null
@@ -1 +0,0 @@
---wsrep-provider=$WSREP_PROVIDER --wsrep-cluster-address=gcomm:// --wsrep-on=0
diff --git a/mysql-test/suite/wsrep/t/mdev_10186.test b/mysql-test/suite/wsrep/t/mdev_10186.test
index 98ea5192634..f86c69f8a5b 100644
--- a/mysql-test/suite/wsrep/t/mdev_10186.test
+++ b/mysql-test/suite/wsrep/t/mdev_10186.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
diff --git a/mysql-test/suite/wsrep/t/mdev_6832.cnf b/mysql-test/suite/wsrep/t/mdev_6832.cnf
new file mode 100644
index 00000000000..0bf01f81fc5
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/mdev_6832.cnf
@@ -0,0 +1,7 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-provider=@ENV.WSREP_PROVIDER
+wsrep-cluster-address=gcomm://
+
diff --git a/mysql-test/suite/wsrep/t/mdev_6832.opt b/mysql-test/suite/wsrep/t/mdev_6832.opt
deleted file mode 100644
index 16f8962dba2..00000000000
--- a/mysql-test/suite/wsrep/t/mdev_6832.opt
+++ /dev/null
@@ -1 +0,0 @@
---wsrep-provider=$WSREP_PROVIDER --wsrep-cluster-address=gcomm:// --wsrep-on=1
diff --git a/mysql-test/suite/wsrep/t/mdev_6832.test b/mysql-test/suite/wsrep/t/mdev_6832.test
index 9efccface57..226be1b788c 100644
--- a/mysql-test/suite/wsrep/t/mdev_6832.test
+++ b/mysql-test/suite/wsrep/t/mdev_6832.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
diff --git a/mysql-test/suite/wsrep/t/mdev_7798.cnf b/mysql-test/suite/wsrep/t/mdev_7798.cnf
new file mode 100644
index 00000000000..0bf01f81fc5
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/mdev_7798.cnf
@@ -0,0 +1,7 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-provider=@ENV.WSREP_PROVIDER
+wsrep-cluster-address=gcomm://
+
diff --git a/mysql-test/suite/wsrep/t/mdev_7798.opt b/mysql-test/suite/wsrep/t/mdev_7798.opt
deleted file mode 100644
index 1007d5c0b78..00000000000
--- a/mysql-test/suite/wsrep/t/mdev_7798.opt
+++ /dev/null
@@ -1 +0,0 @@
---wsrep-provider=$WSREP_PROVIDER --wsrep-cluster-address=gcomm:// --wsrep-on=1
diff --git a/mysql-test/suite/wsrep/t/mdev_7798.test b/mysql-test/suite/wsrep/t/mdev_7798.test
index 9dfff0959bc..b9938d936cc 100644
--- a/mysql-test/suite/wsrep/t/mdev_7798.test
+++ b/mysql-test/suite/wsrep/t/mdev_7798.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
--source include/have_wsrep_provider.inc
--source include/have_binlog_format_row.inc
diff --git a/mysql-test/suite/wsrep/t/pool_of_threads.cnf b/mysql-test/suite/wsrep/t/pool_of_threads.cnf
new file mode 100644
index 00000000000..b63e3324796
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/pool_of_threads.cnf
@@ -0,0 +1,8 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+wsrep-provider=@ENV.WSREP_PROVIDER
+wsrep-cluster-address=gcomm://
+thread_handling=pool-of-threads
+
diff --git a/mysql-test/suite/wsrep/t/pool_of_threads.opt b/mysql-test/suite/wsrep/t/pool_of_threads.opt
deleted file mode 100644
index 814417e5b0f..00000000000
--- a/mysql-test/suite/wsrep/t/pool_of_threads.opt
+++ /dev/null
@@ -1 +0,0 @@
---innodb_autoinc_lock_mode=2 --wsrep-provider=$WSREP_PROVIDER --wsrep-cluster-address=gcomm:// --thread_handling=pool-of-threads
diff --git a/mysql-test/suite/wsrep/t/wsrep-recover-v25.test b/mysql-test/suite/wsrep/t/wsrep-recover-v25.test
index cfd77fbdef4..743e2795c1c 100644
--- a/mysql-test/suite/wsrep/t/wsrep-recover-v25.test
+++ b/mysql-test/suite/wsrep/t/wsrep-recover-v25.test
@@ -37,9 +37,11 @@ if ($log_bin) {
# The expected recovered seqno is 1 corresponding to initial cluster
# configuration change.
#
+let $restart_noprint=2;
+
--source include/kill_mysqld.inc
--source wsrep-recover-step.inc
---echo Expect seqno 0
+--echo Expect seqno 1
--echo $wsrep_recover_start_position_seqno
--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
@@ -48,13 +50,15 @@ if ($log_bin) {
#
# Senario 2
-# The expected recovered seqno is 1 corresponding to CREATE TABLE
+# The expected recovered seqno is 3 corresponding to two configuration
+# changes and CREATE TABLE
#
CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+let $restart_noprint=2;
--source include/kill_mysqld.inc
--source wsrep-recover-step.inc
---echo Expect seqno 1
+--echo Expect seqno 3
--echo $wsrep_recover_start_position_seqno
--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
@@ -63,57 +67,53 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
#
# Scenario 3
-# The expected recovered seqno is 2 corresponding to CREATE TABLE and INSERT.
+# The expected recovered seqno is 5 corresponding to three configuration
+# changes, CREATE TABLE and INSERT.
#
-# The expected wsrep_last_committed after the server is restarted is 2.
+# The expected wsrep_last_committed after the server is restarted is 6.
#
INSERT INTO t1 VALUES (5);
+let $restart_noprint=2;
--source include/kill_mysqld.inc
--source wsrep-recover-step.inc
---echo Expect seqno 2
+--echo Expect seqno 5
--echo $wsrep_recover_start_position_seqno
--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
--source include/start_mysqld.inc
--source include/wait_wsrep_ready.inc
-SELECT VARIABLE_VALUE `expect 2` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+SELECT VARIABLE_VALUE `expect 6` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
#
# Scenario 4
#
# The INSERT gets prepared but not committed.
#
-# If binlog is off, the expected outcome is that the INSERT gets committed
-# since it is already committed in the cluster. If binlog is on, the INSERT
+# This scenario is not applicable if binlog is not on since the
+# commit is not 2PC.
+#
+# If binlog is on, the INSERT
# should be rolled back during recovery phase since it has not yet
# been logged into binlog.
#
---connect con1, localhost, root
-SET DEBUG_SYNC = "ha_commit_trans_after_prepare SIGNAL after_prepare_reached WAIT_FOR continue";
---send INSERT INTO t1 VALUES (7)
-
---connection default
-SET DEBUG_SYNC = "now WAIT_FOR after_prepare_reached";
---source include/kill_mysqld.inc
---source wsrep-recover-step.inc
if ($log_bin) {
- --echo Expect seqno 2
-}
-if (!$log_bin) {
- --echo Expect seqno 3
-}
---echo $wsrep_recover_start_position_seqno
---let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
---source include/start_mysqld.inc
---source include/wait_wsrep_ready.inc
+ --connect con1, localhost, root
+ SET DEBUG_SYNC = "ha_commit_trans_after_prepare SIGNAL after_prepare_reached WAIT_FOR continue";
+ --send INSERT INTO t1 VALUES (7)
-if ($log_bin) {
- --echo Expect 5
-}
-if (!$log_bin) {
- --echo Expect 5 7
+ --connection default
+ let $restart_noprint=2;
+ SET DEBUG_SYNC = "now WAIT_FOR after_prepare_reached";
+ --source include/kill_mysqld.inc
+ --source wsrep-recover-step.inc
+ --echo Expect seqno 6
+ --echo $wsrep_recover_start_position_seqno
+ --let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
+ --source include/start_mysqld.inc
+ --source include/wait_wsrep_ready.inc
+ --echo Expect 5
+ SELECT * FROM t1;
}
-SELECT * FROM t1;
DROP TABLE t1;
diff --git a/mysql-test/suite/wsrep/t/wsrep-recover.cnf b/mysql-test/suite/wsrep/t/wsrep-recover.cnf
new file mode 100644
index 00000000000..19986cd97bc
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep-recover.cnf
@@ -0,0 +1,9 @@
+!include ../my.cnf
+
+[mysqld.1]
+wsrep-on=ON
+binlog-format=ROW
+innodb-flush-log-at-trx-commit=1
+wsrep-cluster-address=gcomm://
+wsrep-provider=@ENV.WSREP_PROVIDER
+innodb-autoinc-lock-mode=2 \ No newline at end of file
diff --git a/mysql-test/suite/wsrep/t/wsrep-recover.combinations b/mysql-test/suite/wsrep/t/wsrep-recover.combinations
new file mode 100644
index 00000000000..1ce3b45aa1a
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep-recover.combinations
@@ -0,0 +1,4 @@
+[binlogon]
+log-bin
+
+[binlogoff]
diff --git a/mysql-test/suite/wsrep/t/wsrep-recover.test b/mysql-test/suite/wsrep/t/wsrep-recover.test
new file mode 100644
index 00000000000..75271e45b15
--- /dev/null
+++ b/mysql-test/suite/wsrep/t/wsrep-recover.test
@@ -0,0 +1,194 @@
+#
+# Verify that the wsrep XID gets updated in InnoDB rollback segment
+# properly and can be recovered with --wsrep-recover
+#
+# The test runs the following scenarios:
+#
+# 1) The server is started but no SQL is run
+# 2) DDL is executed
+# 3) INSERT is executed
+# 4) Two INSERTs are executed so that the first one in order will be
+# blocked after certification and the second one before entering
+# commit order critical section.
+# 5) Two DMLs are executed so that the prepare step is run out of order.
+# Both transactions are blocked before commit order critical section.
+#
+# After each scenario server is killed and the recovered position
+# is validated.
+#
+
+--source include/have_wsrep.inc
+--source include/have_innodb.inc
+--source include/have_wsrep_provider.inc
+--source include/have_debug_sync.inc
+
+# Binlog option for recovery run. This must be set in the test because
+# combinations file causes log-bin option to be set from command line,
+# not via my.cnf.
+#
+--let $log_bin = `SELECT @@log_bin`
+if ($log_bin) {
+--let $wsrep_recover_binlog_opt = --log-bin
+}
+
+#
+# Scenario 1
+# The expected recovered seqno is 1 corresponding to initial cluster
+# configuration change.
+#
+let $restart_noprint=2;
+--source include/kill_mysqld.inc
+--source wsrep-recover-step.inc
+--echo Expect seqno 1
+--echo $wsrep_recover_start_position_seqno
+
+--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
+--source include/start_mysqld.inc
+--source include/wait_wsrep_ready.inc
+
+#
+# Senario 2
+# The expected recovered seqno is 3 corresponding to two configuration
+# change events and CREATE TABLE.
+#
+let $restart_noprint=2;
+
+CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB;
+--source include/kill_mysqld.inc
+--source wsrep-recover-step.inc
+--echo Expect seqno 3
+--echo $wsrep_recover_start_position_seqno
+
+--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
+--source include/start_mysqld.inc
+--source include/wait_wsrep_ready.inc
+
+#
+# Scenario 3
+# The expected recovered seqno is 5 corresponding to three configuration
+# change events, CREATE TABLE and INSERT.
+#
+# The expected wsrep_last_committed after the server is restarted is 6.
+#
+let $restart_noprint=2;
+
+INSERT INTO t1 VALUES (5);
+--source include/kill_mysqld.inc
+--source wsrep-recover-step.inc
+--echo Expect seqno 5
+--echo $wsrep_recover_start_position_seqno
+--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
+--source include/start_mysqld.inc
+--source include/wait_wsrep_ready.inc
+
+SELECT VARIABLE_VALUE `expect 6` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+
+#
+# Scenario 4
+#
+# This will cause the following
+#
+# Seqno 7 - the first INSERT is blocked after it is certified but before
+# it gets prepared
+# Seqno 8 - the second INSERT is blocked before it will be ordered for
+# commit, so it becomes prepared
+#
+# As an outcome, the recovery process should return seqno 6 because
+# the range of prepared transactions found after the crash recovery
+# is not continuous up to 8.
+#
+# The expected wsrep_last_committed after server is restarted is 7.
+#
+
+# Send INSERT which will block after certification
+--connect con1, localhost, root
+SET DEBUG_SYNC = "wsrep_after_certification SIGNAL after_certification_reached WAIT_FOR continue";
+--send INSERT INTO t1 VALUES (7)
+
+--connect con_ctrl, localhost, root
+SET DEBUG_SYNC = "now WAIT_FOR after_certification_reached";
+
+# Send INSERT which will block before commit order critical section
+--connect con2, localhost, root
+SET DEBUG_SYNC = "wsrep_before_commit_order_enter SIGNAL before_commit_order_reached WAIT_FOR continue";
+--send INSERT INTO t1 VALUES (8)
+
+--connection con_ctrl
+SET DEBUG_SYNC = "now WAIT_FOR before_commit_order_reached";
+
+--connection default
+let $restart_noprint=2;
+--source include/kill_mysqld.inc
+--source wsrep-recover-step.inc
+--echo Expect seqno 6
+--echo $wsrep_recover_start_position_seqno
+--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
+--source include/start_mysqld.inc
+--source include/wait_wsrep_ready.inc
+
+--disconnect con1
+--disconnect con2
+--disconnect con_ctrl
+--connection default
+
+SELECT VARIABLE_VALUE `expect 7` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+
+#
+# Scenario 5
+#
+# This scenario will run two INSERTs in parallel so that they are
+# prepared out of order. The execution is stopped before commit
+# and the server is killed.
+#
+# The transactions will be recovered from InnoDB but
+# will be rolled back:
+# - If binlog is on, the binlog acts as a transaction coordinator.
+# The transaction is not logged into binlog, so the transaction is rolled
+# back.
+# - If binlog is not on, the transaction is 1PC and the wsrep XID
+# is not persisted before commit is complete.
+#
+
+--connect con1, localhost, root
+SET DEBUG_SYNC = "wsrep_after_certification SIGNAL after_certification_reached WAIT_FOR continue_after_certification";
+SET DEBUG_SYNC = "wsrep_before_commit_order_enter SIGNAL before_commit_order_reached_1 WAIT_FOR continue_before_commit_order_1";
+--send INSERT INTO t1 VALUES (9)
+
+--connect con_ctrl, localhost, root
+SET DEBUG_SYNC = "now WAIT_FOR after_certification_reached";
+
+--connect con2, localhost, root
+SET DEBUG_SYNC = "wsrep_before_commit_order_enter SIGNAL before_commit_order_reached_2 WAIT_FOR continue_before_commit_order_2";
+--send INSERT INTO t1 VALUES (10)
+
+--connection con_ctrl
+SET DEBUG_SYNC = "now WAIT_FOR before_commit_order_reached_2";
+SET DEBUG_SYNC = "now SIGNAL continue_after_certification";
+SET DEBUG_SYNC = "now WAIT_FOR before_commit_order_reached_1";
+
+--connection default
+let $restart_noprint=2;
+--source include/kill_mysqld.inc
+--source wsrep-recover-step.inc
+--echo Expect seqno 7
+
+--echo $wsrep_recover_start_position_seqno
+--let $restart_parameters = --wsrep-start-position=$wsrep_recover_start_position_uuid:$wsrep_recover_start_position_seqno
+--source include/start_mysqld.inc
+--source include/wait_wsrep_ready.inc
+
+--disconnect con1
+--disconnect con2
+--disconnect con_ctrl
+--connection default
+
+
+SELECT VARIABLE_VALUE `expect 8` FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_last_committed';
+
+#
+# Final sanity check: The successful inserts into t1 should result single row
+#
+--echo Expect row 5
+SELECT * FROM t1;
+
+DROP TABLE t1;
diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests
index fd63a1a0523..045e220ac55 100644
--- a/mysql-test/unstable-tests
+++ b/mysql-test/unstable-tests
@@ -23,7 +23,7 @@
#
##############################################################################
-# Based on 10.3 ed661a0e59c0a41cee2ee44fc219febdf49de10e
+# Based on 10.3 9556d56da2a87b47f545ce4c9cbd09f7778ad527
#-----------------------------------------------------------------------
@@ -31,27 +31,25 @@ archive.archive_bitfield : MDEV-11771
archive.archive_symlink : MDEV-12170 - unexpected error on rmdir
archive.discover : MDEV-10510 - Table is marked as crashed
archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug
-archive.partition_archive : Modified in 10.3.13
archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed
#-----------------------------------------------------------------------
-binlog.binlog_base64_flag : Modified in 10.3.13
-binlog.binlog_mysqlbinlog_row_frag : Added in 10.3.13
binlog.binlog_commit_wait : MDEV-10150 - Mismatch
-binlog.binlog_innodb_stm : Added in 10.3.14
+binlog.binlog_flush_binlogs_delete_domain : MDEV-14431 - Wrong exit code
+binlog.binlog_incident : Modified in 10.1.36
binlog.binlog_killed : MDEV-12925 - Wrong result
-binlog.binlog_mysqlbinlog2 : Modified in 10.3.14
binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint
binlog.load_data_stm_view : MDEV-16948 - Wrong result
+binlog_tmp_table_row : Added in 10.3.9
#-----------------------------------------------------------------------
+binlog_encryption.binlog_incident : Modified in 10.1.36
binlog_encryption.binlog_xa_recover : MDEV-12908 - Extra checkpoint
binlog_encryption.encrypted_master : MDEV-14201 - Extra warnings
binlog_encryption.encrypted_master_switch_to_unencrypted : MDEV-14190 - Can't init tc log
-binlog_encryption.encrypted_slave : MDEV-18135 - SSL error: key too small
binlog_encryption.encryption_combo : MDEV-14199 - Table is marked as crashed
binlog_encryption.rpl_binlog_errors : MDEV-12742 - Crash
binlog_encryption.rpl_checksum : MDEV-16951 - Wrong result
@@ -68,14 +66,14 @@ binlog_encryption.rpl_typeconv : MDEV-14362
#-----------------------------------------------------------------------
-connect.alter : MDEV-18135 - SSL error: key too small
-connect.drop-open-error : MDEV-18135 - SSL error: key too small
-connect.json : MDEV-18135 - SSL error: key too small
-connect.part_file : MDEV-18135 - SSL error: key too small
-connect.part_table : MDEV-18135 - SSL error: key too small; modified in 10.3.13
+compat/oracle.events : Added in 10.3.9
+compat/oracle.table_value_constr : Modified in 10.3.10
+
+#-----------------------------------------------------------------------
+
connect.pivot : MDEV-14803 - Failed to discover table
-connect.secure_file_priv : MDEV-18135 - SSL error: key too small
connect.vcol : MDEV-12374 - Fails on Windows
+connect.xml2 : Modified in 10.3.10
connect.zip : MDEV-13884 - Wrong result
#-----------------------------------------------------------------------
@@ -84,11 +82,9 @@ encryption.create_or_replace : MDEV-12694
encryption.debug_key_management : MDEV-13841 - Timeout
encryption.encrypt_and_grep : MDEV-13765 - Wrong result
encryption.innochecksum : MDEV-13644 - Assertion failure
-encryption.innodb-bad-key-change2 : MDEV-19118 - Can't connect to local MySQL server through socket
encryption.innodb-checksum-algorithm : MDEV-12898 - Deadlock of threads; MDEV-16896 - Server crash
encryption.innodb-compressed-blob : MDEV-14728 - Unable to get certificate
-encryption.innodb-discard-import : MDEV-19113 - Timeout
-encryption.innodb-encryption-alter : MDEV-13566 - Lock wait timeout; modified in 10.3.14
+encryption.innodb-encryption-alter : MDEV-13566 - Lock wait timeout
encryption.innodb_encryption_tables : MDEV-17339 - Crash on restart
encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait condition
encryption.innodb-force-corrupt : MDEV-17286 - SSL error
@@ -98,7 +94,7 @@ encryption.innodb-page_encryption_log_encryption : MDEV-17339
encryption.innodb-read-only : MDEV-16563 - Crash on startup
encryption.innodb-redo-badkey : MDEV-13893 - Page cannot be decrypted
encryption.innodb-remove-encryption : MDEV-16493 - Timeout in wait condition
-encryption.innodb-spatial-index : MDEV-13746 - Wrong result; modified in 10.3.13
+encryption.innodb-spatial-index : MDEV-13746 - Wrong result
encryption.innodb_encrypt_log : MDEV-13725 - Wrong result
encryption.innodb_encryption : MDEV-15675 - Timeout
encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure
@@ -129,12 +125,10 @@ federated.federatedx : MDEV-10617
#-----------------------------------------------------------------------
-funcs_1.is_check_constraint : Added in 10.3.13
+funcs_1.is_check_constraints : Added in 10.3.10
funcs_1.memory_views : MDEV-11773 - timeout
-funcs_1.processlist_priv_ps : Include file modified in 10.3.14
-funcs_1.processlist_priv_no_prot : Include file modified in 10.3.14
-funcs_1.processlist_val_no_prot : MDEV-11223 - Wrong result; include file modified in 10.3.14
-funcs_1.processlist_val_ps : MDEV-12175 - Wrong plan; include file modified in 10.3.14
+funcs_1.processlist_val_no_prot : MDEV-11223 - Wrong result
+funcs_1.processlist_val_ps : MDEV-12175 - Wrong plan
#-----------------------------------------------------------------------
@@ -149,64 +143,56 @@ galera_3nodes.* : Suite is no
#-----------------------------------------------------------------------
-gcol.gcol_column_def_options_innodb : Include file modified in 10.3.13
-gcol.gcol_column_def_options_myisam : Include file modified in 10.3.13
-gcol.gcol_keys_innodb : Include file modified in 10.3.13
-gcol.gcol_keys_myisam : Include file modified in 10.3.13
gcol.gcol_rollback : MDEV-16954 - Unknown storage engine 'InnoDB'
+gcol.gcol_update : Include file modified in 10.3.9
gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion
-gcol.innodb_virtual_debug : MDEV-19114 - Assertion failure
-gcol.innodb_virtual_debug_purge : MDEV-16952 - Wrong result
-gcol.innodb_virtual_fk : Modified in 10.3.13
-gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure; modified in 10.3.13
-gcol.innodb_virtual_index : Modified in 10.3.13
+gcol.innodb_virtual_debug : MDEV-14134 - Crash, assertion failure
+gcol.innodb_virtual_debug_purge : MDEV-16952 - Wrong result; modified in 10.3.10
+gcol.innodb_virtual_index : Include file modified in 10.3.9
+gcol.innodb_virtual_purge : Include file modified in 10.3.9
#-----------------------------------------------------------------------
-innodb.101_compatibility : MDEV-13891 - Wrong result; modified in 10.3.13
-innodb.add_constraint : Modified in 10.3.13
-innodb.alter_candidate_key : Added in 10.3.13
+innodb.101_compatibility : MDEV-13891 - Wrong result
+innodb.alter_algorithm : Modified in 10.3.10
innodb.alter_copy : MDEV-16181 - Assertion failure
-innodb.alter_crash : MDEV-16944 - The process cannot access the file; modified in 10.3.14
-innodb.alter_inplace_perfschema : Modified in 10.3.13
-innodb.alter_kill : Modified in 10.3.14
-innodb.alter_table : Modified in 10.3.14
-innodb.alter_varchar_change : Added in 10.3.13
+innodb.alter_crash : MDEV-16944 - The process cannot access the file
+innodb.alter_kill : MDEV-16273 - Unknown storage engine 'InnoDB', MDEV-16946 - Wrong result
+innodb.alter_not_null : Modified in 10.3.9
+innodb.alter_not_null_debug : Added in 10.3.9
innodb.autoinc_persist : MDEV-15282 - Assertion failure
innodb.binlog_consistent : MDEV-10618 - Server fails to start
-innodb.doublewrite : MDEV-12905 - Server crash; modified in 10.3.14
-innodb.foreign_key : Modified in 10.3.14
-innodb.foreign_keys : Modified in 10.3.14
+innodb.dml_purge : Include file modified in 10.3.9
+innodb.doublewrite : MDEV-12905 - Server crash
+innodb.foreign_key : Modified in 10.3.10
+innodb.foreign-keys : Modified in 10.3.10
innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed
innodb.group_commit_crash_no_optimize_thread : MDEV-13830 - Assertion failure
-innodb.ibuf_not_empty : MDEV-19021 - Wrong result
+innodb.index_merge_threshold : Include files modified in 10.3.9
+innodb.innodb-16k : Modified in 10.3.9
innodb.innodb-32k-crash : MDEV-16953 - Corrupt log record found
innodb.innodb-64k-crash : MDEV-13872 - Failure and crash on startup
-innodb.innodb-alter : Modified in 10.3.13
innodb.innodb-alter-debug : MDEV-13182 - InnoDB: adjusting FSP_SPACE_FLAGS
-innodb.innodb-alter-nullable : Modified in 10.3.14
innodb.innodb-alter-table : MDEV-10619 - Testcase timeout
innodb.innodb-alter-tempfile : MDEV-15285 - Table already exists
-innodb.innodb-alter-timestamp : Modified in 10.3.14
-innodb.innodb-bigblob : MDEV-18655 - ASAN unknown crash
innodb.innodb-blob : MDEV-12053 - Client crash
-innodb.innodb-change-buffer-recovery : MDEV-19115 - Lost connection to MySQL server during query
-innodb.innodb-corrupted-table : Modified in 10.3.14
-innodb.innodb-fk : MDEV-13832 - Assertion failure on shutdown; modified in 10.3.13
-innodb.innodb-fk-warnings : Modified in 10.3.13
+innodb.innodb-corrupted-table : Modified in 10.3.9
+innodb.innodb-fk : MDEV-13832 - Assertion failure on shutdown
innodb.innodb-get-fk : MDEV-13276 - Server crash
-innodb.innodb-index : Modified in 10.3.14
-innodb.innodb-index-online : MDEV-14809 - Cannot save statistics; modified in 10.3.14
+innodb.innodb-index-online : MDEV-14809 - Cannot save statistics
+innodb.innodb-lock : Modified in 10.3.10
+innodb.innodb-mdev-7513 : Modified in 10.3.9
+innodb.innodb-page_compression_bzip2 : Modified in 10.3.10
innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure
-innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result
-innodb.innodb-page_compression_tables : Modified in 10.3.13
+innodb.innodb-page_compression_lz4 : Modified in 10.3.10
+innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result; modified in 10.3.10
+innodb.innodb-page_compression_lzo : Modified in 10.3.10
innodb.innodb-page_compression_snappy : MDEV-13644 - Assertion failure
-innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem
-innodb.innodb-table-online : MDEV-13894 - Wrong result; modified in 10.3.14
-innodb.innodb-virtual-columns : Modified in 10.3.13
-innodb.innodb-wl5522 : MDEV-13644 - Assertion failure
+innodb.innodb-page_compression_tables : Modified in 10.3.10
+innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem; modified in 10.3.10
+innodb.innodb-table-online : MDEV-13894 - Wrong result
+innodb.innodb-wl5522 : MDEV-13644 - Assertion failure; modified in 10.3.9
innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno
-innodb.innodb_28867993 : Added in 10.3.13
innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt
innodb.innodb_bug30423 : MDEV-7311 - Wrong result
innodb.innodb_bug48024 : MDEV-14352 - Assertion failure
@@ -215,245 +201,245 @@ innodb.innodb_buffer_pool_resize : MDEV-16964
innodb.innodb_buffer_pool_resize_with_chunks : MDEV-16964 - Assertion failure
innodb.innodb_bulk_create_index_replication : MDEV-15273 - Slave failed to start
innodb.innodb_defrag_stats_many_tables : MDEV-14198 - Table is full
-innodb.innodb_force_recovery : Modified in 10.3.13
innodb.innodb_information_schema : MDEV-8851 - Wrong result
innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed
innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result
innodb.innodb_monitor : MDEV-10939 - Testcase timeout
innodb.innodb-page_compression_tables : MDEV-13644 - Assertion failure
innodb.innodb_stats : MDEV-10682 - wrong result
-innodb.innodb_stats_persistent : MDEV-17745 - Wrong result
+innodb.innodb_stats_persistent : Include file modified in 10.3.9
innodb.innodb_stats_persistent_debug : MDEV-14801 - Operation failed
innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait
-innodb.innodb_zip_innochecksum2 : MDEV-13882 - Warning: difficult to find free blocks
-innodb.instant_alter_bugs : Added in 10.3.13
-innodb.instant_alter_debug : Modified in 10.3.14
-innodb.instant_alter_import : Added in 10.3.13
-innodb.log_corruption : MDEV-13251 - Wrong result; modified in 10.3.13
+innodb.innodb_zip_innochecksum2 : MDEV-13882 - Extra warnings
+innodb.innodb_zip_innochecksum3 : MDEV-14486 - Resource temporarily unavailable
+innodb.instant_alter : Modified in 10.3.10
+innodb.instant_alter_crash : Include file modified in 10.3.9
+innodb.instant_alter_debug : Modified in 10.3.10
+innodb.instant_alter_rollback : Include file modified in 10.3.9
+innodb.log_corruption : MDEV-13251 - Wrong result
innodb.log_data_file_size : MDEV-14204 - Server failed to start
-innodb.log_file_name : MDEV-14193 - Exception; modified in 10.3.14
+innodb.log_file_name : MDEV-14193 - Exception
innodb.log_file_size : MDEV-15668 - Not found pattern
-innodb.monitor : MDEV-16179 - Wrong result
-innodb.purge_secondary : MDEV-15681 - Wrong result
+innodb.monitor : MDEV-16179 - Wrong result; modified in 10.3.9
+innodb.purge_secondary : MDEV-15681 - Wrong result; include file modified in 10.3.9
innodb.purge_thread_shutdown : MDEV-13792 - Wrong result
innodb.read_only_recovery : MDEV-13886 - Server crash
-innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile; modified in 10.3.13
-innodb.restart : Modified in 10.3.14
-innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace; modified in 10.3.13
+innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile
+innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace
+innodb.strict_mode : Modified in 10.3.9
innodb.table_definition_cache_debug : MDEV-14206 - Extra warning
-innodb.table_flags : MDEV-13572 - Wrong result; modified in 10.3.13
+innodb.table_flags : MDEV-13572 - Wrong result
innodb.temp_table_savepoint : MDEV-16182 - Wrong result
-innodb.temporary_table : MDEV-13265 - Wrong result
+innodb.temporary_table : MDEV-13265 - Wrong result; modified in 10.3.9
+innodb.truncate : Added in 10.3.10
+innodb.truncate_crash : Added in 10.3.10
+innodb.truncate_foreign : Added in 10.3.10
+innodb.truncate_missing : Added in 10.3.10
innodb.update_time : MDEV-14804 - Wrong result
-innodb.undo_truncate : MDEV-17340 - Server hung
-innodb.undo_truncate_recover : MDEV-13080 - Missing checkpoint; MDEV-17679 - Server has gone away; modified in 10.3.13
+innodb.undo_log : Include file modified in 10.3.9
+innodb.undo_truncate : MDEV-17340 - Server hung; added in 10.3.10
+innodb.undo_truncate_recover : MDEV-13080 - Missing checkpoint; added in 10.3.10
innodb.xa_recovery : MDEV-15279 - mysqld got exception
#-----------------------------------------------------------------------
-innodb_fts.fulltext3 : Modified in 10.3.14
-innodb_fts.innodb-fts-ddl : Modified in 10.3.13
+innodb_fts.fts_kill_query : Added in 10.3.9
innodb_fts.innodb-fts-fic : MDEV-14154 - Assertion failure
innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning
+innodb_fts.innodb_fts_multiple_index : Modified in 10.3.9
innodb_fts.innodb_fts_plugin : MDEV-13888 - Errors in server log
innodb_fts.innodb_fts_stopword_charset : MDEV-13259 - Table crashed
innodb_fts.sync : MDEV-14808 - Wrong result
-innodb_fts.sync_ddl : MDEV-18654 - Assertion failure
+innodb_fts.sync_ddl : MDEV-17296 - Server crash; added in 10.3.9
#-----------------------------------------------------------------------
-innodb_gis.alter_spatial_index : MDEV-13745 - Server crash
+innodb_gis.create_spatial_index : Modified in 10.3.9
innodb_gis.kill_server : MDEV-16941 - Checksum mismatch
-innodb_gis.multi_pk : MDEV-13942 - ASAN use-after-poison
-innodb_gis.point_basic : Modified in 10.3.13
-innodb_gis.rtree_compress : MDEV-13942 - ASAN use-after-poison
-innodb_gis.rtree_compress2 : MDEV-16269 - Wrong result; MDEV-13942 - ASAN use-after-poison
+innodb_gis.rtree_compress : Include file modified in 10.3.9
+innodb_gis.rtree_compress2 : MDEV-16269 - Wrong result
innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded
-innodb_gis.rtree_debug : MDEV-13942 - ASAN use-after-poison
-innodb_gis.rtree_purge : MDEV-15275 - Timeout
+innodb_gis.rtree_purge : MDEV-15275 - Timeout; include file modified in 10.3.9
innodb_gis.rtree_recovery : MDEV-15274 - Error on check
-innodb_gis.rtree_rollback1 : MDEV-13942 - ASAN use-after-poison
-innodb_gis.rtree_split : MDEV-14208 - Too many arguments; MDEV-13942 - ASAN use-after-poison
-innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file
+innodb_gis.rtree_split : MDEV-14208 - Too many arguments
+innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file; include file modified in 10.3.9
innodb_gis.types : MDEV-15679 - Table is marked as crashed
-innodb_gis.update_root : MDEV-13942 - ASAN use-after-poison
#-----------------------------------------------------------------------
innodb_zip.cmp_per_index : MDEV-14490 - Table is marked as crashed
innodb_zip.innochecksum_3 : MDEV-13279 - Extra warnings
-innodb_zip.prefix_index_liftedlimit : Modified in 10.3.13
innodb_zip.wl5522_debug_zip : MDEV-11600 - Operating system error number 2
innodb_zip.wl6470_1 : MDEV-14240 - Assertion failure
-innodb_zip.wl6501_1 : MDEV-10891 - Can't create UNIX socket
-innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout, MDEV-14104 - Error 192
#-----------------------------------------------------------------------
-main.alter_table : Modified in 10.3.13
-main.alter_table_errors : Modified in 10.3.13
main.alter_table_trans : MDEV-12084 - timeout
-main.analyze_stmt : Modified in 10.3.14
main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result
main.auth_named_pipe : MDEV-14724 - System error 2
-main.check : Modified in 10.3.13
-main.check_constraint : Modified in 10.3.14
-main.check_constraint_innodb : Added in 10.3.13
-main.connect : MDEV-17282 - Wrong result; modified in 10.3.13
+main.bootstrap : Modified in 10.3.9
+main.check_constraint : Modified in 10.3.9
+main.column_compression : Modified in 10.3.9
+main.connect : MDEV-16270, MDEV-17282 - Wrong result
main.connect2 : MDEV-13885 - Server crash
-main.constraints : Modified in 10.3.13
+main.constraints : Modified in 10.3.10
main.count_distinct2 : MDEV-11768 - timeout
main.create_delayed : MDEV-10605 - failed with timeout
main.create_drop_event : MDEV-16271 - Wrong result
-main.create_drop_role : Modified in 10.3.14
-main.ctype_ucs : MDEV-17681 - Data too long for column
+main.create_or_replace : Modified in 10.3.10
+main.create_replace_tmp : Added in 10.3.9
+main.cte_nonrecursive : Modified in 10.3.10
+main.cte_recursive : Modified in 10.3.10
+main.cte_recursive_not_embedded : Added in 10.3.9
+main.ctype_binary : Modified in 10.3.9
+main.ctype_eucjpms : Modified in 10.3.9
+main.ctype_euckr : Modified in 10.3.9
+main.ctype_gbk : Modified in 10.3.9
+main.ctype_latin1 : Modified in 10.3.9
+main.ctype_ucs : Modified in 10.3.9
+main.ctype_ujis : Modified in 10.3.9
main.ctype_upgrade : MDEV-16945 - Error upon mysql_upgrade
-main.ctype_utf16le : MDEV-10675: timeout or extra warnings
-main.ctype_utf16 : MDEV-10675: timeout or extra warnings
-main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison
-main.ddl_i18n_koi8r : Modified in 10.3.14
-main.ddl_i18n_utf8 : Modified in 10.3.14
+main.ctype_utf16le : MDEV-10675: timeout or extra warnings; modified in 10.3.9
+main.ctype_utf16 : MDEV-10675: timeout or extra warnings; modified in 10.3.9
+main.ctype_utf32 : Modified in 10.3.9
+main.ctype_utf8mb4 : Modified in 10.3.9
+main.ctype_utf8 : Modified in 10.3.9
main.debug_sync : MDEV-10607 - internal error
-main.derived_cond_pushdown : Modified in 10.3.14
+main.derived : Modified in 10.3.9
+main.derived_cond_pushdown : Modified in 10.3.10
main.derived_opt : MDEV-11768 - timeout
-main.derived_split_innodb : Modified in 10.3.14
+main.derived_split_innodb : Modified in 10.3.10
main.distinct : MDEV-14194 - Crash
main.drop_bad_db_type : MDEV-15676 - Wrong result
-main.error_simulation : Modified in 10.3.13
-main.events_1 : Modified in 10.3.14
main.events_2 : MDEV-13277 - Crash
main.events_bugs : MDEV-12892 - Crash
main.events_restart : MDEV-12236 - Server shutdown problem
main.events_slowlog : MDEV-12821 - Wrong result
-main.func_debug : Modified in 10.3.14
-main.func_json : Modified in 10.3.14
-main.func_math : Modified in 10.3.14
-main.func_misc : Modified in 10.3.13
-main.func_str : Modified in 10.3.14
-main.func_time : Modified in 10.3.14
-main.gis : MDEV-13411 - wrong result on P8; modified in 10.3.13
+main.flush : Modified in 10.3.10
+main.func_isnull : Modified in 10.3.10
+main.func_json : Modified in 10.3.10
+main.func_time : Modified in 10.3.10
+main.gis : MDEV-13411 - wrong result on P8; modified in 10.3.10
+main.gis-precise : Modified in 10.3.10
+main.grant : Modified in 10.3.10
+main.group_min_max : Modified in 10.3.10
main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown
-main.huge_frm-6224 : Modified in 10.3.13
main.index_intersect_innodb : MDEV-10643 - failed with timeout
main.index_merge_innodb : MDEV-7142 - Plan mismatch
main.innodb_mysql_lock : MDEV-7861 - Wrong result
-main.innodb_mysql_sync : Modified in 10.3.13
-main.join_cache : MDEV-17743 - Bad address from storage engine MyISAM
+main.invisible_field_debug : Modified in 10.3.9
+main.join : Modified in 10.3.10
+main.join_cache : Modified in 10.3.9
+main.join_outer : Modified in 10.3.9
main.kill-2 : MDEV-13257 - Wrong result
-main.kill_processlist-6619 : MDEV-10793 - Wrong result; modified in 10.3.14
-main.loaddata : Modified in 10.3.14
-main.log_slow : MDEV-13263 - Wrong result; modified in 10.3.14
-main.log_slow_debug : Added in 10.3.14
+main.kill_processlist-6619 : MDEV-10793 - Wrong result
+main.log_slow : MDEV-13263 - Wrong result
main.log_tables-big : MDEV-13408 - wrong result
-main.lowercase_table : Modified in 10.3.13
main.mdev375 : MDEV-10607 - sporadic "can't connect"
main.mdev-504 : MDEV-15171 - warning
main.merge : MDEV-10607 - sporadic "can't connect"
-main.mysql : Modified in 10.3.14
-main.mysqld--help : Modified in 10.3.14
+main.mysql : Modified in 10.3.10
main.mysql_client_test_comp : MDEV-16641 - Error in exec
main.mysql_client_test_nonblock : CONC-208 - Error on Power; MDEV-15096 - exec failed
-main.mysqlbinlog_row_minimal : Modified in 10.3.13
-main.mysqld_option_err : Modified in 10.3.13
+main.mysql_not_windows : Modified in 10.3.10
+main.mysqld_option_err : MDEV-12747 - Timeout
main.mysqldump : MDEV-14800 - Stack smashing detected
main.mysqlhotcopy_myisam : MDEV-10995 - Hang on debug
main.mysqlslap : MDEV-11801 - timeout
main.mysqltest : MDEV-13887 - Wrong result
main.mysql_upgrade_noengine : MDEV-14355 - Wrong result
main.mysql_upgrade_ssl : MDEV-13492 - Unknown SSL error
-main.old-mode : Modified in 10.3.14
-main.openssl_1 : MDEV-13492 - Unknown SSL error; modified in 10.3.13
+main.openssl_1 : MDEV-13492 - Unknown SSL error
main.openssl_6975 : MDEV-17184 - Failures with OpenSSL 1.1.1
+main.opt_tvc : Modified in 10.3.10
main.order_by_optimizer_innodb : MDEV-10683 - Wrong result
-main.partition : Modified in 10.3.14
main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock
+main.partition_error : Modified in 10.3.10
main.partition_innodb_plugin : MDEV-12901 - Valgrind warnings
-main.pool_of_threads : MDEV-18135 - SSL error: key too small
-main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count
+main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count; modified in 10.3.10
main.query_cache_debug : MDEV-15281 - Query cache is disabled
main.query_cache : MDEV-16180 - Wrong result
-main.range_innodb : Modified in 10.3.13
+main.range : Modified in 10.3.10
+main.range_debug : Added in 10.3.10
main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away
-main.reopen_temp_table : Modified in 10.3.14
-main.reset_connection : Modified in 10.3.13
-main.row-checksum : Modified in 10.3.13
+main.rename : Modified in 10.3.9
+main.selectivity : Modified in 10.3.10
main.set_statement : MDEV-13183 - Wrong result
main.shm : MDEV-12727 - Mismatch, ERROR 2013
-main.show_check : Modified in 10.3.14
main.show_explain : MDEV-10674 - Wrong result code
-main.show_explain_ps : Modified in 10.3.14
-main.sp : MDEV-7866 - Mismatch; modified in 10.3.14
-main.sp_notembedded : MDEV-10607 - internal error; modified in 10.3.14
-main.sp-security : MDEV-10607 - sporadic "can't connect"
-main.sp_trans : Modified in 10.3.14
-main.sp-ucs2 : Modified in 10.3.14
-main.ssl : MDEV-17184 - Failures with OpenSSL 1.1.1; modified in 10.3.13
+main.sp : MDEV-7866 - Mismatch; modified in 10.3.10
+main.sp_notembedded : MDEV-10607 - internal error
+main.sp-security : MDEV-10607 - sporadic "can't connect"; modified in 10.3.10
+main.ssl : MDEV-17184 - Failures with OpenSSL 1.1.1
main.ssl_ca : MDEV-10895 - SSL connection error on Power
-main.ssl_cipher : MDEV-17184 - Failures with OpenSSL 1.1.1; modified in 10.3.13
+main.ssl_cert_verify : MDEV-13735 - Server crash
+main.ssl_cipher : MDEV-17184 - Failures with OpenSSL 1.1.1
main.ssl_connect : MDEV-13492 - Unknown SSL error
-main.ssl_crl : MDEV-19119 - Wrong error code; modified in 10.3.14
-main.ssl_crl_clients : Modified in 10.3.14
main.ssl_timeout : MDEV-11244 - Crash
-main.stat_tables : Modified in 10.3.14
+main.stat_tables : Modified in 10.3.10
main.stat_tables_par_innodb : MDEV-14155 - Wrong rounding
main.stat_tables_par : MDEV-13266 - Wrong result
main.status : MDEV-13255 - Wrong result
-main.subselect2 : Modified in 10.3.13
+main.subselect_extra_no_semijoin : Modified in 10.3.10
main.subselect_innodb : MDEV-10614 - Wrong result
-main.subselect_mat : Modified in 10.3.13
-main.subselect_sj : Modified in 10.3.14
+main.subselect : Modified in 10.3.9
+main.subselect_sj2_mat : Modified in 10.3.9
+main.subselect_sj_mat : Modified in 10.3.9
+main.table_value_constr : Modified in 10.3.10
main.tc_heuristic_recover : MDEV-14189 - Wrong result
-main.type_blob : MDEV-15195 - Wrong result; modified in 10.3.14
+main.temp_table : Modified in 10.3.10
+main.type_blob : MDEV-15195 - Wrong result
+main.type_datetime : MDEV-14322 - wrong result
main.type_datetime_hires : MDEV-10687 - Timeout
-main.type_decimal : Modified in 10.3.14
-main.type_year : Modified in 10.3.14
-main.union : Modified in 10.3.14
-main.update : Modified in 10.3.14
-main.userstat : MDEV-12904 - SSL errors; modified in 10.3.14
-main.view : Modified in 10.3.14
-main.wait_timeout : MDEV-19023 - Lost connection to MySQL server during query
-main.win : Modified in 10.3.14
-main.xa : MDEV-11769 - lock wait timeout
+main.type_float : Modified in 10.3.10
+main.union : Modified in 10.3.9
+main.userstat : MDEV-12904 - SSL errors
+main.win : Modified in 10.3.10
+main.win_lead_lag : Modified in 10.3.10
+main.xa : MDEV-11769 - lock wait timeout; modified in 10.3.9
#-----------------------------------------------------------------------
+maria.concurrent : Added in 10.3.10
+maria.create : Added in 10.3.10
maria.insert_select : MDEV-12757 - Timeout
maria.insert_select-7314 : MDEV-16492 - Timeout
-maria.maria : MDEV-14430 - Extra warning
-maria.maria-recovery : Modified in 10.3.14
+maria.maria : MDEV-14430 - Extra warning; modified in 10.3.10
#-----------------------------------------------------------------------
-mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result; modified in 10.3.14
+mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result; opt file modified in 10.3.10
mariabackup.apply-log-only : MDEV-14192 - Assertion failure
mariabackup.apply-log-only-incr : MDEV-14192 - Assertion failure
mariabackup.backup_ssl : MDEV-14192 - Assertion failure
-mariabackup.create_with_data_directory_during_backup : MDEV-14192 - Assertion failure
+mariabackup.create_during_backup : Added in 10.3.10
+mariabackup.create_with_data_directory_during_backup : Added in 10.3.10
mariabackup.data_directory : MDEV-15270 - Error on exec
-mariabackup.drop_table_during_backup : MDEV-14192 - Assertion failure; modified in 10.3.13
-mariabackup.encrypted_page_compressed : Modified in 10.3.14
-mariabackup.encrypted_page_corruption : Modified in 10.3.14
+mariabackup.drop_table_during_backup : Added in 10.3.10
mariabackup.full_backup : MDEV-16571 - Wrong result
-mariabackup.huge_lsn : MDEV-15662 - Sequence number is in the future; MDEV-18569 - Table doesn't exist; modified in 10.3.13
+mariabackup.huge_lsn : MDEV-15662 - Sequence number is in the future
mariabackup.incremental_backup : MDEV-14192 - Assertion failure
-mariabackup.incremental_ddl_before_backup : Added in 10.3.13
-mariabackup.incremental_encrypted : MDEV-15667 - timeout
-mariabackup.incremental_rocksdb : Added in 10.3.13
-mariabackup.innodb_log_optimize_ddl : MDEV-14192 - Assertion failure
-mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result; modified in 10.3.14
-mariabackup.mdev-14447 : MDEV-15201 - Timeout; modified in 10.3.13
-mariabackup.mlog_index_load : MDEV-14192 - Assertion failure
-mariabackup.page_compression_level : Added in 10.3.14
+mariabackup.incremental_ddl_during_backup : Added in 10.3.10
+mariabackup.incremental_encrypted : MDEV-15667 - timeout; modified in 10.3.9
+mariabackup.innodb_log_optimize_ddl : Added in 10.3.9
+mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result
+mariabackup.mdev-14447 : MDEV-15201 - Timeout
+mariabackup.mlog_index_load : MDEV-14192 - Assertion failure; added in 10.3.10
+mariabackup.nolock_ddl_during_backup_end : Added in 10.3.10
mariabackup.partial_exclude : MDEV-15270 - Error on exec
-mariabackup.rename_during_backup : MDEV-14192 - Assertion failure; modified in 10.3.13
-mariabackup.rename_during_mdl_lock : MDEV-14192 - Assertion failure
-mariabackup.system_versioning : MDEV-14192 - Assertion failure
-mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error; modified in 10.3.14
-mariabackup.unsupported_redo : MDEV-14192 - Crash
+mariabackup.recreate_table_during_backup : Added in 10.3.10
+mariabackup.rename_during_backup : Added in 10.3.10
+mariabackup.rename_during_mdl_lock : Modified in 10.3.10
+mariabackup.skip_innodb : Added in 10.3.10
+mariabackup.truncate_during_backup : Added in 10.3.10
+mariabackup.undo_space_id : Opt file modified in 10.3.10
+mariabackup.unsupported_redo : MDEV-14192 - Crash; modified in 10.3.10
+mariabackup.xb_aws_key_management : MDEV-17341 - Missing warning
mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault
mariabackup.xb_file_key_management : MDEV-16571 - Wrong result
+mariabackup.xb_history : MDEV-16268 - Error on exec
mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11
-mariabackup.xb_partition : MDEV-14192 - Crash; MDEV-17584 - Crash upon shutdown
+mariabackup.xb_partition : MDEV-14192 - Crash
mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown
mariabackup.xbstream : MDEV-14192 - Crash
@@ -463,10 +449,17 @@ mroonga/storage.column_datetime_32bit_2038 : Wrong resul
mroonga/storage.column_datetime_32bit_before_unix_epoch : Wrong result on Alpha
mroonga/storage.column_datetime_32bit_max : Wrong result on Alpha
mroonga/storage.column_datetime_32bit_out_of_range : Wrong result on Alpha
-mroonga/storage.column_generated_stored_add_column : Modified in 10.3.14
+mroonga/storage.index_multiple_column_range_all_used_less_than : MDEV-16127 - Wrong result with GCC 8
+mroonga/storage.index_multiple_column_range_all_used_less_than_or_equal : MDEV-16127 - Wrong result with GCC 8
+mroonga/storage.index_multiple_column_range_partially_used_have_prefix_less_than : MDEV-16127 - Wrong result with GCC 8
+mroonga/storage.index_multiple_column_range_partially_used_have_prefix_less_than_or_equal : MDEV-16127 - Wrong result with GCC 8
+mroonga/storage.index_multiple_column_range_partially_used_no_prefix_less_than : MDEV-16127 - Wrong result with GCC 8
+mroonga/storage.index_multiple_column_range_partially_used_no_prefix_less_than_or_equal : MDEV-16127 - Wrong result with GCC 8
mroonga/storage.index_multiple_column_unique_date_32bit_equal : Wrong result on Alpha
mroonga/storage.index_multiple_column_unique_date_order_32bit_desc : Wrong result on Alpha
mroonga/storage.index_multiple_column_unique_datetime_index_read : MDEV-8643 - Valgrind
+mroonga/storage.optimization_order_limit_optimized_datetime_less_than : MDEV-16127 - Wrong result with GCC 8
+mroonga/storage.optimization_order_limit_optimized_datetime_less_than_or_equal : MDEV-16127 - Wrong result with GCC 8
mroonga/storage.repair_table_no_index_file : MDEV-9364 - wrong result, MDEV-14807 - wrong error message
mroonga/wrapper.repair_table_no_index_file : MDEV-14807 - Wrong error message
@@ -482,17 +475,16 @@ multi_source.status_vars : MDEV-4632 -
#-----------------------------------------------------------------------
-parts.partition_alter1_1_2_innodb : MDEV-18655 - ASAN unknown crash
-parts.partition_alter1_1_innodb : MDEV-18655 - ASAN unknown crash
-parts.partition_alter1_2_innodb : MDEV-18655 - ASAN unknown crash
parts.partition_alter2_2_maria : MDEV-14364 - Lost connection to MySQL server during query
parts.partition_auto_increment_archive : MDEV-16491 - Marked as crashed and should be repaired
parts.partition_auto_increment_maria : MDEV-14430 - Extra warning
-parts.partition_auto_increment_max : Added in 10.3.14
parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket; MDEV-15095 - Table doesn't exist
parts.partition_exch_qa_10 : MDEV-11765 - wrong result
parts.partition_innodb_status_file : MDEV-12901 - Valgrind
parts.partition_special_innodb : MDEV-16942 - Timeout
+parts.show_create : Modified in 10.3.10
+parts.truncate_locked : Added in 10.3.9
+parts.update_and_cache : Added in 10.3.10
#-----------------------------------------------------------------------
@@ -500,10 +492,11 @@ percona.* : MDEV-10997
#-----------------------------------------------------------------------
+perfschema.bad_option_1 : MDEV-13892 - Timeout
+perfschema.bad_option_3 : MDEV-12728 - Timeout on Power
+perfschema.bad_option_5 : MDEV-14197 - Timeout
perfschema.connect_attrs : MDEV-17283 - Wrong result
perfschema.dml_file_instances : MDEV-15179 - Wrong result
-perfschema.dml_setup_instruments : Modified in 10.3.13
-perfschema.dml_threads : MDEV-17746 - Wrong errno
perfschema.func_file_io : MDEV-5708 - fails for s390x
perfschema.func_mutex : MDEV-5708 - fails for s390x
perfschema.hostcache_ipv4_addrinfo_again_allow : MDEV-12759 - Crash
@@ -511,8 +504,8 @@ perfschema.hostcache_ipv6_addrinfo_again_allow : MDEV-12752
perfschema.hostcache_ipv6_addrinfo_bad_allow : MDEV-13260 - Crash
perfschema.hostcache_ipv6_ssl : MDEV-10696 - Crash
perfschema.privilege_table_io : MDEV-13184 - Extra lines
-perfschema.relaylog : MDEV-18134 - Wrong result
perfschema.rpl_gtid_func : MDEV-16897 - Wrong result
+perfschema.socket_connect : MDEV-15677 - Wrong result
perfschema.socket_summary_by_event_name_func : MDEV-10622 - Wrong result
perfschema.stage_mdl_global : MDEV-11803 - wrong result on slow builders
perfschema.stage_mdl_procedure : MDEV-11545 - Missing row
@@ -525,7 +518,6 @@ perfschema_stress.* : MDEV-10996
#-----------------------------------------------------------------------
-plugins.audit_null : Modified in 10.3.13
plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such
plugins.processlist : MDEV-16574 - Wrong result
plugins.server_audit : MDEV-14295 - Wrong result
@@ -535,31 +527,6 @@ plugins.thread_pool_server_audit : MDEV-14295
rocksdb.* : Too many crashes in various tests
-rocksdb.2pc_group_commit : MDEV-14455 - Wrong result
-rocksdb.allow_no_primary_key_with_sk : MDEV-16639 - Server crash
-rocksdb.autoinc_crash_safe_partition : MDEV-16639, MDEV-16637 - Server crash
-rocksdb.autoinc_vars_thread : MDEV-16573 - Debug sync timed out
-rocksdb.bloomfilter2 : MDEV-16564 - Wrong result
-rocksdb.deadlock : MDEV-16033 - Timeout
-rocksdb.drop_index_inplace : MDEV-14162 - Crash on shutdown
-rocksdb.drop_table : MDEV-14308 - Timeout
-rocksdb.drop_table3 : MDEV-16949 - Server crash
-rocksdb.dup_key_update : MDEV-17284 - Wrong result
-rocksdb.locking_issues : MDEV-14464 - Wrong result
-rocksdb.mariadb_ignore_dirs : MDEV-16639 - Server crash
-rocksdb.mariadb_port_fixes : MDEV-16387 - Wrong plan
-rocksdb.max_open_files : MDEV-16639 - Server crash
-rocksdb.perf_context : MDEV-17285 - Wrong results
-rocksdb.rocksdb_cf_options : MDEV-16639 - Server crash
-rocksdb.rocksdb_cf_per_partition : MDEV-16636 - Wrong result
-rocksdb.rocksdb_parts : MDEV-13843 - Wrong result
-rocksdb.ttl_primary_read_filtering : MDEV-16560 - Wrong result
-rocksdb.ttl_secondary : MDEV-16943 - Timeout
-rocksdb.ttl_secondary_read_filtering : MDEV-16560 - Wrong result
-rocksdb.unique_check : MDEV-16576 - Wrong errno
-rocksdb.use_direct_reads_writes : MDEV-16646 - Server crash
-rocksdb.write_sync : MDEV-16965 - WRong result
-
#-----------------------------------------------------------------------
rocksdb_rpl.mdev12179 : MDEV-16632 - Crash
@@ -582,6 +549,7 @@ rpl-tokudb.* : MDEV-14354
rpl-tokudb.rpl_tokudb_commit_after_flush : MDEV-16966 - Server crash
rpl.last_insert_id : MDEV-10625 - warnings in error log
+rpl.rpl_15867 : Added in 10.3.9
rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips
rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips
rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log
@@ -591,16 +559,15 @@ rpl.rpl_colSize : MDEV-16112
rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac
rpl.rpl_ddl : MDEV-10417 - Fails on Mips
rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash
-rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Table marked as crashed
-rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result; MDEV-19043 - Table marked as crashed
+rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result
rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start
rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning
rpl.rpl_extra_col_master_myisam : MDEV-14203 - Extra warning
+rpl.rpl_foreign_key_innodb : Modified in 10.3.10
rpl.rpl_gtid_basic : MDEV-10681 - server startup problem
rpl.rpl_gtid_crash : MDEV-9501 - Failed registering on master, MDEV-13643 - Lost connection
rpl.rpl_gtid_delete_domain : MDEV-14463 - Timeout
rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash
-rpl.rpl_gtid_excess_initial_delay : Added in 10.3.14
rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings
rpl.rpl_gtid_reconnect : MDEV-14497 - Crash
rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown, MDEV-12629 - Valgrind warnings
@@ -613,39 +580,37 @@ rpl.rpl_insert_id_pk : MDEV-16567
rpl.rpl_insert_ignore : MDEV-14365 - Lost connection to MySQL server during query
rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips
rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog
-rpl.rpl_mdev12179 : MDEV-19043 - Table marked as crashed
rpl.rpl_mdev6020 : MDEV-15272 - Server crash
rpl.rpl_mixed_mixing_engines : MDEV-14489 - Sync slave with master failed
rpl.rpl_non_direct_mixed_mixing_engines : MDEV-14489 - Sync slave with master failed
rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait
rpl.rpl_non_direct_stm_mixing_engines : MDEV-14489 - Failed sync_slave_with_master
rpl.rpl_parallel : MDEV-10653 - Timeouts
-rpl.rpl_parallel2 : MDEV-17390 - Operation cannot be performed
rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash
rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure
rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout
rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master
rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master
rpl.rpl_parallel_retry : MDEV-11119 - Crash; MDEV-17109 - Timeout
-rpl.rpl_parallel_temptable : MDEV-10356 - Crash; MDEV-19076 - Wrong result
+rpl.rpl_parallel_temptable : MDEV-10356 - Crash
rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips
rpl.rpl_password_boundaries : MDEV-11534 - Slave IO warnings
-rpl.rpl_rewrt_db : Modified in 10.3.14
-rpl.rpl_row_001 : MDEV-16653 - MTR's internal check fails
+rpl.rpl_row_001 : MDEV-16653 - MTR's internal check fails; modified in 10.3.9
rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start
rpl.rpl_row_basic_2myisam : MDEV-13875 - command "diff_files" failed
-rpl.rpl_row_big_table_id : Added in 10.3.13
rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result
rpl.rpl_row_img_blobs : MDEV-13875 - command "diff_files" failed
rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed
rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed
rpl.rpl_row_index_choice : MDEV-15196 - Slave crash
rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x
+rpl.rpl_row_spatial : Added in 10.3.10
rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum
rpl.rpl_semi_sync : MDEV-11220 - Wrong result
rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result
rpl.rpl_semi_sync_after_sync_row : MDEV-14366 - Wrong result
rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings
+rpl.rpl_semi_sync_master_shutdown : Added in 10.3.9
rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Assorted failures
rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition
rpl.rpl_semisync_ali_issues : MDEV-16272 - Wrong result
@@ -653,22 +618,20 @@ rpl.rpl_set_statement_default_master : MDEV-13258
rpl.rpl_show_slave_hosts : MDEV-10681 - Crash
rpl.rpl_skip_replication : MDEV-13258 - Extra warning
rpl.rpl_slave_grp_exec : MDEV-10514 - Deadlock
-rpl.rpl_slave_invalid_external_user : Added in 10.3.14
rpl.rpl_slave_load_tmpdir_not_exist : MDEV-14203 - Extra warning
rpl.rpl_slow_query_log : MDEV-13250 - Test abort
rpl.rpl_sp_effects : MDEV-13249 - Crash
rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout
+rpl.rpl_stm_000001 : MDEV-16274 - Connection attributes were truncated; modified in 10.3.9
rpl.rpl_stm_mixing_engines : MDEV-14489 - Sync slave with master failed
rpl.rpl_stm_multi_query : MDEV-9501 - Failed registering on master
rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion
+rpl.rpl_stm_reset_slave : MDEV-16274 - Connection attributes were truncated
rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash
rpl.rpl_sync : MDEV-13830 - Assertion failure
rpl.rpl_temporal_mysql56_to_mariadb53 : MDEV-9501 - Failed registering on master
rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries
-rpl.rpl_trigger : MDEV-18055 - Wrong result
-rpl.rpl_user : Modified in 10.3.13
rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result
-rpl.show_status_stop_slave_race-7126 : MDEV-17438 - Timeout
rpl/extra/rpl_tests.* : MDEV-10994 - Not maintained
@@ -681,8 +644,9 @@ sphinx.union-5539 : MDEV-10986
#-----------------------------------------------------------------------
-spider.* : MDEV-9329, MDEV-18737 - tests are too memory-consuming; init_spider.inc modified in 10.3.13
+spider.* : MDEV-9329 - tests are too memory-consuming
spider.basic_sql : MDEV-11186 - Internal check fails
+spider.timestamp : Added in 10.3.9
spider/bg.direct_aggregate : MDEV-7098 - Packets out of order
spider/bg.direct_aggregate_part : MDEV-7098 - Trying to unlock mutex that wasn't locked
@@ -693,13 +657,13 @@ spider/bg.spider_fixes : MDEV-7098 -
spider/bg.spider_fixes_part : MDEV-7098 - Trying to unlock mutex that wasn't locked
spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x
-spider/bugfix.* : Added in 10.3.13
-
spider/handler.* : MDEV-10987, MDEV-10990 - Tests have not been maintained
#-----------------------------------------------------------------------
+sql_sequence.auto_increment : Added in 10.3.10
sql_sequence.concurrent_create : MDEV-16635 - Server crash
+sql_sequence.next : Modified in 10.3.10
#-----------------------------------------------------------------------
@@ -713,27 +677,23 @@ stress.ddl_innodb : MDEV-10635
#-----------------------------------------------------------------------
sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu/s390x
-sys_vars.delayed_insert_limit_func : MDEV-17683 - Wrong result
sys_vars.innodb_buffer_pool_dump_at_shutdown_basic : MDEV-14280 - Unexpected error
+sys_vars.innodb_log_optimize_ddl_basic : Added in 10.3.9
sys_vars.keep_files_on_create_basic : MDEV-10676 - timeout
sys_vars.log_slow_admin_statements_func : MDEV-12235 - Server crash
-sys_vars.max_prepared_stmt_count_func : Modified in 10.3.14
sys_vars.rpl_init_slave_func : MDEV-10149 - Test assertion
sys_vars.slow_query_log_func : MDEV-14273 - Wrong result
-sys_vars.sql_buffer_result_func : Modified in 10.3.14
-sys_vars.table_definition_cache_basic : Modified in 10.3.13
sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result
-sys_vars.thread_stack_basic : Modified in 10.3.14
-sys_vars.tmp_disk_table_size_func : Modified in 10.3.13
-sys_vars.transaction_prealloc_size_bug27322 : MDEV-19024 - Wrong result
sys_vars.wait_timeout_func : MDEV-12896 - Wrong result
+sys_vars.wsrep_start_position_basic : Modified in 10.3.10
#-----------------------------------------------------------------------
+tokudb.* : suite.pm and massive modifications in 10.3.10
+
tokudb.change_column_all_1000_10 : MDEV-12640 - Lost connection
tokudb.change_column_bin : MDEV-12640 - Lost connection
tokudb.change_column_char : MDEV-12822 - Lost connection
-tokudb.change_column_varbin : MDEV-17682 - Timeout
tokudb.cluster_filter : MDEV-10678 - Wrong execution plan
tokudb.cluster_filter_hidden : MDEV-10678 - Wrong execution plan
tokudb.cluster_filter_unpack_varchar : MDEV-10636 - Wrong execution plan
@@ -762,12 +722,13 @@ tokudb_backup.* : MDEV-11001
#-----------------------------------------------------------------------
-tokudb_bugs.PS-4979 : Added in 10.3.13
+tokudb_bugs.alter_table_comment_rebuild_data : Added in 10.1.36
tokudb_bugs.checkpoint_lock : MDEV-10637 - Wrong processlist output
tokudb_bugs.checkpoint_lock_3 : MDEV-10637 - Wrong processlist output
tokudb_bugs.frm_store : MDEV-12823 - Valgrind
tokudb_bugs.frm_store2 : MDEV-12823 - Valgrind
tokudb_bugs.frm_store3 : MDEV-12823 - Valgrind
+tokudb_bugs.PS-3773 : Added in 10.1.36
tokudb_bugs.xa : MDEV-11804 - Lock wait timeout
tokudb_bugs.xa-3 : MDEV-16953 - Corrupt log record found
@@ -786,39 +747,29 @@ tokudb_sys_vars.* : MDEV-11001
#-----------------------------------------------------------------------
unit.conc_basic-t : MDEV-15286 - not ok 7 - test_reconnect_maxpackage
-unit.conc_errors : MDEV-18634 - ASAN global-buffer-overflow
unit.conc_misc : MDEV-14811 - not ok 12 - test_conc49
unit.conc_ps_bugs : MDEV-13252 - not ok 44 test_bug4236
-unit.lf : MDEV-18416 - object was probably modified after being freed
+unit.lf : MDEV-12897 - Signal 11 thrown
unit.ma_test_loghandler : MDEV-10638 - record read not ok
-unit.my_atomic : MDEV-18472 - Signal 11
+unit.my_atomic : MDEV-15670 - Signal 11 thrown
#-----------------------------------------------------------------------
+vcol.index : Modified in 10.3.10
vcol.not_supported : MDEV-10639 - Testcase timeout
-vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout; modified in 10.3.13
-vcol.vcol_keys_myisam : Modified in 10.3.14
+vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout
vcol.vcol_misc : MDEV-16651 - Wrong error message
-vcol.wrong_arena : Modified in 10.3.14
#-----------------------------------------------------------------------
-versioning.alter : Modified in 10.3.14
-versioning.foreign : Modified in 10.3.14
-versioning.partition : Modified in 10.3.14
-versioning.partition_innodb : Modified in 10.3.14
-versioning.rpl : Modified in 10.3.14
-versioning.rpl_row : Added in 10.3.14
-versioning.trx_id : Modified in 10.3.14
-versioning.update : Modified in 10.3.14
+versioning.truncate : Modified in 10.3.10
#-----------------------------------------------------------------------
wsrep.foreign_key : MDEV-14725 - WSREP has not yet prepared node
wsrep.mdev_6832 : MDEV-14195 - Check testcase failed
wsrep.pool_of_threads : MDEV-17345 - WSREP has not yet prepared node for application use
-wsrep.variables : MDEV-14311 - Wrong result; MDEV-17585 - Deadlock; modified in 10.3.13
-wsrep.wsrep-recover-v25 : Added in 10.3.13
+wsrep.variables : MDEV-14311 - Wrong result; modified in 10.3.10
#-----------------------------------------------------------------------
diff --git a/mysql-test/valgrind.supp b/mysql-test/valgrind.supp
index a5b65f5315c..2ad9eb7532f 100644
--- a/mysql-test/valgrind.supp
+++ b/mysql-test/valgrind.supp
@@ -1779,3 +1779,23 @@
fun:CRYPTO_malloc
fun:ENGINE_new
}
+
+#
+# OpenSSL 1.0.1l problems
+#
+
+{
+ OpenSSL 1.0.1l wrong jump
+ Memcheck:Cond
+ fun:bcmp
+ obj:/usr/lib64/libcrypto.so*
+ fun:FIPS_selftest
+}
+
+{
+ OpenSSL 1.0.1l wrong jump 2
+ Memcheck:Cond
+ obj:/usr/lib64/libcrypto.so*
+ fun:FIPS_mode_set
+ obj:/usr/lib64/libcrypto.so*
+}
diff --git a/mysys/errors.c b/mysys/errors.c
index 0c6942c5b82..d8c6811fbbe 100644
--- a/mysys/errors.c
+++ b/mysys/errors.c
@@ -107,12 +107,12 @@ void init_glob_errs()
void wait_for_free_space(const char *filename, int errors)
{
if (errors == 0)
- my_error(EE_DISK_FULL,MYF(ME_BELL | ME_NOREFRESH | ME_JUST_WARNING),
+ my_error(EE_DISK_FULL,MYF(ME_BELL | ME_ERROR_LOG | ME_WARNING),
filename,my_errno,MY_WAIT_FOR_USER_TO_FIX_PANIC);
if (!(errors % MY_WAIT_GIVE_USER_A_MESSAGE))
my_printf_error(EE_DISK_FULL,
"Retry in %d secs. Message reprinted in %d secs",
- MYF(ME_BELL | ME_NOREFRESH | ME_JUST_WARNING),
+ MYF(ME_BELL | ME_ERROR_LOG | ME_WARNING),
MY_WAIT_FOR_USER_TO_FIX_PANIC,
MY_WAIT_GIVE_USER_A_MESSAGE * MY_WAIT_FOR_USER_TO_FIX_PANIC );
(void) sleep(MY_WAIT_FOR_USER_TO_FIX_PANIC);
diff --git a/mysys/mf_cache.c b/mysys/mf_cache.c
index 478900ddb2a..6c5a76dc770 100644
--- a/mysys/mf_cache.c
+++ b/mysys/mf_cache.c
@@ -82,7 +82,7 @@ void close_cached_file(IO_CACHE *cache)
#ifdef CANT_DELETE_OPEN_FILES
if (cache->file_name)
{
- (void) my_delete(cache->file_name,MYF(MY_WME | ME_NOINPUT));
+ (void) my_delete(cache->file_name, MYF(MY_WME));
my_free(cache->file_name);
}
#endif
diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c
index b09760cd98d..b1f67375903 100644
--- a/mysys/mf_iocache.c
+++ b/mysys/mf_iocache.c
@@ -496,10 +496,17 @@ my_bool reinit_io_cache(IO_CACHE *info, enum cache_type type,
{
/*
If we change from WRITE_CACHE to READ_CACHE, assume that everything
- after the current positions should be ignored
+ after the current positions should be ignored. In other cases we
+ update end_of_file as it may have changed since last init.
*/
- if (info->type == WRITE_CACHE && type == READ_CACHE)
- info->end_of_file=my_b_tell(info);
+ if (type == READ_CACHE)
+ {
+ if (info->type == WRITE_CACHE)
+ info->end_of_file= my_b_tell(info);
+ else
+ info->end_of_file= mysql_file_seek(info->file, 0L, MY_SEEK_END,
+ MYF(0));
+ }
/* flush cache if we want to reuse it */
if (!clear_cache && my_b_flush_io_cache(info,1))
DBUG_RETURN(1);
@@ -1546,8 +1553,7 @@ int _my_b_async_read(IO_CACHE *info, uchar *Buffer, size_t Count)
if (info->aio_result.result.aio_errno)
{
if (info->myflags & MY_WME)
- my_error(EE_READ, MYF(ME_BELL+ME_WAITTANG),
- my_filename(info->file),
+ my_error(EE_READ, MYF(ME_BELL), my_filename(info->file),
info->aio_result.result.aio_errno);
my_errno=info->aio_result.result.aio_errno;
info->error= -1;
@@ -1640,8 +1646,7 @@ int _my_b_async_read(IO_CACHE *info, uchar *Buffer, size_t Count)
if (Count != use_length)
{ /* Didn't find hole block */
if (info->myflags & (MY_WME | MY_FAE | MY_FNABP) && Count != org_Count)
- my_error(EE_EOFERR, MYF(ME_BELL+ME_WAITTANG),
- my_filename(info->file),my_errno);
+ my_error(EE_EOFERR, MYF(ME_BELL), my_filename(info->file), my_errno);
info->error=(int) (read_length+left_length);
return 1;
}
diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c
index edf8cd3be8a..a394f2f669a 100644
--- a/mysys/mf_keycache.c
+++ b/mysys/mf_keycache.c
@@ -576,7 +576,7 @@ int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache,
if (blocks < 8)
{
my_errno= ENOMEM;
- my_error(EE_OUTOFMEMORY, MYF(ME_FATALERROR),
+ my_error(EE_OUTOFMEMORY, MYF(ME_FATAL),
blocks * keycache->key_cache_block_size);
goto err;
}
diff --git a/mysys/mf_tempfile.c b/mysys/mf_tempfile.c
index 0ff7066fd95..4df856c8b14 100644
--- a/mysys/mf_tempfile.c
+++ b/mysys/mf_tempfile.c
@@ -65,7 +65,7 @@ File create_temp_file(char *to, const char *dir, const char *prefix,
File file= -1;
DBUG_ENTER("create_temp_file");
- DBUG_PRINT("enter", ("dir: %s, prefix: %s", dir, prefix));
+ DBUG_PRINT("enter", ("dir: %s, prefix: %s", dir ? dir : "(null)", prefix));
DBUG_ASSERT((mode & (O_EXCL | O_TRUNC | O_CREAT | O_RDWR)) == 0);
mode|= O_TRUNC | O_CREAT | O_RDWR; /* not O_EXCL, see Windows code below */
@@ -110,6 +110,35 @@ File create_temp_file(char *to, const char *dir, const char *prefix,
}
}
#elif defined(HAVE_MKSTEMP)
+ if (!dir && ! (dir =getenv("TMPDIR")))
+ dir= DEFAULT_TMPDIR;
+#ifdef O_TMPFILE
+ {
+ static int O_TMPFILE_works= 1;
+
+ if ((MyFlags & MY_TEMPORARY) && O_TMPFILE_works)
+ {
+ /* explictly don't use O_EXCL here has it has a different
+ meaning with O_TMPFILE
+ */
+ if ((file= open(dir, mode | O_TMPFILE | O_CLOEXEC,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP)) >= 0)
+ {
+ my_snprintf(to, FN_REFLEN, "%s/#sql/fd=%d", dir, file);
+ file=my_register_filename(file, to, FILE_BY_O_TMPFILE,
+ EE_CANTCREATEFILE, MyFlags);
+ }
+ else if (errno == EOPNOTSUPP || errno == EINVAL)
+ {
+ my_printf_error(EE_CANTCREATEFILE, "O_TMPFILE is not supported on %s "
+ "(disabling future attempts)",
+ MYF(ME_NOTE | ME_ERROR_LOG_ONLY), dir);
+ O_TMPFILE_works= 0;
+ }
+ }
+ }
+ if (file == -1)
+#endif /* O_TMPFILE */
{
char prefix_buff[30];
uint pfx_len;
@@ -119,8 +148,6 @@ File create_temp_file(char *to, const char *dir, const char *prefix,
prefix ? prefix : "tmp.",
sizeof(prefix_buff)-7),"XXXXXX") -
prefix_buff);
- if (!dir && ! (dir =getenv("TMPDIR")))
- dir= DEFAULT_TMPDIR;
if (strlen(dir)+ pfx_len > FN_REFLEN-2)
{
errno=my_errno= ENAMETOOLONG;
@@ -137,7 +164,7 @@ File create_temp_file(char *to, const char *dir, const char *prefix,
{
int tmp=my_errno;
close(org_file);
- (void) my_delete(to, MYF(MY_WME | ME_NOINPUT));
+ (void) my_delete(to, MYF(MY_WME));
my_errno=tmp;
}
}
diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c
index f519cbf105f..576142343f4 100644
--- a/mysys/my_alloc.c
+++ b/mysys/my_alloc.c
@@ -192,7 +192,7 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
length+=ALIGN_SIZE(sizeof(USED_MEM));
if (!(next = (USED_MEM*) my_malloc(length,
- MYF(MY_WME | ME_FATALERROR |
+ MYF(MY_WME | ME_FATAL |
MALLOC_FLAG(mem_root->block_size)))))
{
if (mem_root->error_handler)
@@ -248,7 +248,7 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
get_size= MY_MAX(get_size, block_size);
if (!(next = (USED_MEM*) my_malloc(get_size,
- MYF(MY_WME | ME_FATALERROR |
+ MYF(MY_WME | ME_FATAL |
MALLOC_FLAG(mem_root->
block_size)))))
{
@@ -492,3 +492,14 @@ void *memdup_root(MEM_ROOT *root, const void *str, size_t len)
memcpy(pos,str,len);
return pos;
}
+
+LEX_CSTRING safe_lexcstrdup_root(MEM_ROOT *root, const LEX_CSTRING str)
+{
+ LEX_CSTRING res;
+ if (str.length)
+ res.str= strmake_root(root, str.str, str.length);
+ else
+ res.str= (const char *)"";
+ res.length= str.length;
+ return res;
+}
diff --git a/mysys/my_chsize.c b/mysys/my_chsize.c
index 51da6be7935..33ed230a4ce 100644
--- a/mysys/my_chsize.c
+++ b/mysys/my_chsize.c
@@ -96,6 +96,6 @@ int my_chsize(File fd, my_off_t newlength, int filler, myf MyFlags)
err:
DBUG_PRINT("error", ("errno: %d", errno));
if (MyFlags & MY_WME)
- my_error(EE_CANT_CHSIZE, MYF(ME_BELL+ME_WAITTANG), my_errno);
+ my_error(EE_CANT_CHSIZE, MYF(ME_BELL), my_errno);
DBUG_RETURN(1);
} /* my_chsize */
diff --git a/mysys/my_copy.c b/mysys/my_copy.c
index bd23dfc48cd..3360b41f64d 100644
--- a/mysys/my_copy.c
+++ b/mysys/my_copy.c
@@ -112,7 +112,7 @@ int my_copy(const char *from, const char *to, myf MyFlags)
{
my_errno= errno;
if (MyFlags & MY_WME)
- my_error(EE_CHANGE_PERMISSIONS, MYF(ME_BELL+ME_WAITTANG), to, errno);
+ my_error(EE_CHANGE_PERMISSIONS, MYF(ME_BELL), to, errno);
if (MyFlags & MY_FAE)
goto err;
}
@@ -122,7 +122,7 @@ int my_copy(const char *from, const char *to, myf MyFlags)
{
my_errno= errno;
if (MyFlags & MY_WME)
- my_error(EE_CANT_COPY_OWNERSHIP, MYF(ME_BELL+ME_WAITTANG), to, errno);
+ my_error(EE_CANT_COPY_OWNERSHIP, MYF(ME_BELL), to, errno);
if (MyFlags & MY_FAE)
goto err;
}
diff --git a/mysys/my_delete.c b/mysys/my_delete.c
index c7023b61df0..044c55d8fb1 100644
--- a/mysys/my_delete.c
+++ b/mysys/my_delete.c
@@ -47,11 +47,9 @@ int my_delete(const char *name, myf MyFlags)
{
my_errno=errno;
if (MyFlags & (MY_FAE+MY_WME))
- my_error(EE_DELETE,MYF(ME_BELL+ME_WAITTANG+(MyFlags & ME_NOINPUT)),
- name,errno);
+ my_error(EE_DELETE, MYF(ME_BELL), name, errno);
}
- else if ((MyFlags & MY_SYNC_DIR) &&
- my_sync_dir_by_file(name, MyFlags))
+ else if ((MyFlags & MY_SYNC_DIR) && my_sync_dir_by_file(name, MyFlags))
err= -1;
DBUG_RETURN(err);
} /* my_delete */
diff --git a/mysys/my_fopen.c b/mysys/my_fopen.c
index fbd84049700..79b7e32cbb1 100644
--- a/mysys/my_fopen.c
+++ b/mysys/my_fopen.c
@@ -76,7 +76,7 @@ FILE *my_fopen(const char *filename, int flags, myf MyFlags)
DBUG_PRINT("error",("Got error %d on open",my_errno));
if (MyFlags & (MY_FFNF | MY_FAE | MY_WME))
my_error((flags & O_RDONLY) ? EE_FILENOTFOUND : EE_CANTCREATEFILE,
- MYF(ME_BELL+ME_WAITTANG), filename, my_errno);
+ MYF(ME_BELL), filename, my_errno);
DBUG_RETURN((FILE*) 0);
} /* my_fopen */
@@ -179,8 +179,7 @@ int my_fclose(FILE *fd, myf MyFlags)
{
my_errno=errno;
if (MyFlags & (MY_FAE | MY_WME))
- my_error(EE_BADCLOSE, MYF(ME_BELL+ME_WAITTANG),
- name,errno);
+ my_error(EE_BADCLOSE, MYF(ME_BELL), name, errno);
}
else
statistic_decrement(my_stream_opened, &THR_LOCK_open);
@@ -214,7 +213,7 @@ FILE *my_fdopen(File Filedes, const char *name, int Flags, myf MyFlags)
{
my_errno=errno;
if (MyFlags & (MY_FAE | MY_WME))
- my_error(EE_CANT_OPEN_STREAM, MYF(ME_BELL+ME_WAITTANG),errno);
+ my_error(EE_CANT_OPEN_STREAM, MYF(ME_BELL), errno);
}
else
{
diff --git a/mysys/my_fstream.c b/mysys/my_fstream.c
index bfcf24bfa2e..7d49564a01f 100644
--- a/mysys/my_fstream.c
+++ b/mysys/my_fstream.c
@@ -55,11 +55,11 @@ size_t my_fread(FILE *stream, uchar *Buffer, size_t Count, myf MyFlags)
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
{
if (ferror(stream))
- my_error(EE_READ, MYF(ME_BELL+ME_WAITTANG),
+ my_error(EE_READ, MYF(ME_BELL),
my_filename(my_fileno(stream)),errno);
else
if (MyFlags & (MY_NABP | MY_FNABP))
- my_error(EE_EOFERR, MYF(ME_BELL+ME_WAITTANG),
+ my_error(EE_EOFERR, MYF(ME_BELL),
my_filename(my_fileno(stream)),errno);
}
my_errno=errno ? errno : -1;
@@ -140,8 +140,8 @@ size_t my_fwrite(FILE *stream, const uchar *Buffer, size_t Count, myf MyFlags)
{
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
{
- my_error(EE_WRITE, MYF(ME_BELL+ME_WAITTANG),
- my_filename(my_fileno(stream)),errno);
+ my_error(EE_WRITE, MYF(ME_BELL),
+ my_filename(my_fileno(stream)), errno);
}
writtenbytes= (size_t) -1; /* Return that we got error */
break;
diff --git a/mysys/my_getwd.c b/mysys/my_getwd.c
index 46710e79f62..978f34f2e89 100644
--- a/mysys/my_getwd.c
+++ b/mysys/my_getwd.c
@@ -64,7 +64,7 @@ int my_getwd(char * buf, size_t size, myf MyFlags)
if (!getcwd(buf,(uint) (size-2)) && MyFlags & MY_WME)
{
my_errno=errno;
- my_error(EE_GETWD,MYF(ME_BELL+ME_WAITTANG),errno);
+ my_error(EE_GETWD,MYF(ME_BELL),errno);
DBUG_RETURN(-1);
}
#elif defined(HAVE_GETWD)
@@ -104,7 +104,7 @@ int my_setwd(const char *dir, myf MyFlags)
{
my_errno=errno;
if (MyFlags & MY_WME)
- my_error(EE_SETWD,MYF(ME_BELL+ME_WAITTANG),start,errno);
+ my_error(EE_SETWD,MYF(ME_BELL),start,errno);
}
else
{
diff --git a/mysys/my_lib.c b/mysys/my_lib.c
index dc6b7cfd292..7b734182499 100644
--- a/mysys/my_lib.c
+++ b/mysys/my_lib.c
@@ -185,7 +185,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags)
(void) closedir(dirp);
my_dirend(&dirh->dir);
if (MyFlags & (MY_FAE | MY_WME))
- my_error(EE_DIR, MYF(ME_BELL | ME_WAITTANG), path, my_errno);
+ my_error(EE_DIR, MYF(ME_BELL), path, my_errno);
DBUG_RETURN(NULL);
} /* my_dir */
@@ -308,7 +308,7 @@ error:
_findclose(handle);
my_dirend(&dirh->dir);
if (MyFlags & (MY_FAE | MY_WME))
- my_error(EE_DIR,MYF(ME_BELL | ME_WAITTANG), path, errno);
+ my_error(EE_DIR,MYF(ME_BELL), path, errno);
DBUG_RETURN(NULL);
} /* my_dir */
@@ -358,7 +358,7 @@ MY_STAT *my_stat(const char *path, MY_STAT *stat_area, myf my_flags)
error:
if (my_flags & (MY_FAE+MY_WME))
{
- my_error(EE_STAT, MYF(ME_BELL+ME_WAITTANG),path,my_errno);
+ my_error(EE_STAT, MYF(ME_BELL), path, my_errno);
DBUG_RETURN((MY_STAT *) NULL);
}
DBUG_RETURN((MY_STAT *) NULL);
diff --git a/mysys/my_lock.c b/mysys/my_lock.c
index 34b1723e13c..fb3f1ceaba9 100644
--- a/mysys/my_lock.c
+++ b/mysys/my_lock.c
@@ -219,9 +219,9 @@ int my_lock(File fd, int locktype, my_off_t start, my_off_t length,
if (MyFlags & MY_WME)
{
if (locktype == F_UNLCK)
- my_error(EE_CANTUNLOCK,MYF(ME_BELL+ME_WAITTANG),my_errno);
+ my_error(EE_CANTUNLOCK,MYF(ME_BELL),my_errno);
else
- my_error(EE_CANTLOCK,MYF(ME_BELL+ME_WAITTANG),my_errno);
+ my_error(EE_CANTLOCK,MYF(ME_BELL),my_errno);
}
DBUG_PRINT("error",("my_errno: %d (%d)",my_errno,errno));
DBUG_RETURN(-1);
diff --git a/mysys/my_lockmem.c b/mysys/my_lockmem.c
index 3e27564f100..674f4700f79 100644
--- a/mysys/my_lockmem.c
+++ b/mysys/my_lockmem.c
@@ -43,7 +43,7 @@ uchar *my_malloc_lock(uint size,myf MyFlags)
if (!(ptr=memalign(pagesize,size)))
{
if (MyFlags & (MY_FAE+MY_WME))
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_WAITTANG+ME_FATALERROR), size);
+ my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATAL), size);
DBUG_RETURN(0);
}
success = mlock((uchar*) ptr,size);
diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c
index 78b61e8f761..05bc5349b35 100644
--- a/mysys/my_malloc.c
+++ b/mysys/my_malloc.c
@@ -106,8 +106,7 @@ void *my_malloc(size_t size, myf my_flags)
if (my_flags & MY_FAE)
error_handler_hook=fatal_error_handler_hook;
if (my_flags & (MY_FAE+MY_WME))
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL + ME_WAITTANG +
- ME_NOREFRESH + ME_FATALERROR),size);
+ my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_ERROR_LOG+ME_FATAL),size);
if (my_flags & MY_FAE)
abort();
}
@@ -176,7 +175,7 @@ void *my_realloc(void *oldpoint, size_t size, myf my_flags)
DBUG_RETURN(oldpoint);
my_errno=errno;
if (my_flags & (MY_FAE+MY_WME))
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL + ME_WAITTANG + ME_FATALERROR), size);
+ my_error(EE_OUTOFMEMORY, MYF(ME_BELL + ME_FATAL), size);
}
else
{
diff --git a/mysys/my_mkdir.c b/mysys/my_mkdir.c
index 5e9691f5b91..505c312ad56 100644
--- a/mysys/my_mkdir.c
+++ b/mysys/my_mkdir.c
@@ -36,7 +36,7 @@ int my_mkdir(const char *dir, int Flags, myf MyFlags)
my_errno=errno;
DBUG_PRINT("error",("error %d when creating direcory %s",my_errno,dir));
if (MyFlags & (MY_FFNF | MY_FAE | MY_WME))
- my_error(EE_CANT_MKDIR, MYF(ME_BELL+ME_WAITTANG), dir, my_errno);
+ my_error(EE_CANT_MKDIR, MYF(ME_BELL), dir, my_errno);
DBUG_RETURN(-1);
}
DBUG_RETURN(0);
diff --git a/mysys/my_once.c b/mysys/my_once.c
index dfd5de81ac7..1a719a62fbf 100644
--- a/mysys/my_once.c
+++ b/mysys/my_once.c
@@ -59,7 +59,7 @@ void* my_once_alloc(size_t Size, myf MyFlags)
{
my_errno=errno;
if (MyFlags & (MY_FAE+MY_WME))
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_WAITTANG+ME_FATALERROR), get_size);
+ my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATAL), get_size);
return((uchar*) 0);
}
DBUG_PRINT("test",("my_once_malloc %lu byte malloced", (ulong) get_size));
diff --git a/mysys/my_open.c b/mysys/my_open.c
index 92e46610100..37cde868b61 100644
--- a/mysys/my_open.c
+++ b/mysys/my_open.c
@@ -98,7 +98,7 @@ int my_close(File fd, myf MyFlags)
DBUG_PRINT("error",("Got error %d on close",err));
my_errno=errno;
if (MyFlags & (MY_FAE | MY_WME))
- my_error(EE_BADCLOSE, MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ my_error(EE_BADCLOSE, MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
name,errno);
}
if (name)
@@ -153,7 +153,7 @@ File my_register_filename(File fd, const char *FileName, enum file_type
if (my_errno == EMFILE)
error_message_number= EE_OUT_OF_FILERESOURCES;
my_error(error_message_number,
- MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
FileName, my_errno);
}
DBUG_RETURN(-1);
diff --git a/mysys/my_pread.c b/mysys/my_pread.c
index 2b3bfdc9e3d..2b7ce4bc0d6 100644
--- a/mysys/my_pread.c
+++ b/mysys/my_pread.c
@@ -96,11 +96,11 @@ size_t my_pread(File Filedes, uchar *Buffer, size_t Count, my_off_t offset,
{
if (readbytes == (size_t) -1)
my_error(EE_READ,
- MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
my_filename(Filedes),my_errno);
else if (MyFlags & (MY_NABP | MY_FNABP))
my_error(EE_EOFERR,
- MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
my_filename(Filedes),my_errno);
}
if (readbytes == (size_t) -1 || (MyFlags & (MY_FNABP | MY_NABP)))
@@ -185,7 +185,7 @@ size_t my_pwrite(int Filedes, const uchar *Buffer, size_t Count,
if (MyFlags & (MY_NABP | MY_FNABP))
{
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
- my_error(EE_WRITE, MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ my_error(EE_WRITE, MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
my_filename(Filedes),my_errno);
DBUG_RETURN(MY_FILE_ERROR); /* Error on write */
}
diff --git a/mysys/my_read.c b/mysys/my_read.c
index 89b368e9800..b0a03ac03b6 100644
--- a/mysys/my_read.c
+++ b/mysys/my_read.c
@@ -90,11 +90,11 @@ size_t my_read(File Filedes, uchar *Buffer, size_t Count, myf MyFlags)
{
if (readbytes == (size_t) -1)
my_error(EE_READ,
- MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
my_filename(Filedes), got_errno);
else if (MyFlags & (MY_NABP | MY_FNABP))
my_error(EE_EOFERR,
- MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
my_filename(Filedes), got_errno);
}
if (readbytes == (size_t) -1 ||
diff --git a/mysys/my_redel.c b/mysys/my_redel.c
index 9091c74e6b5..8bf81ae2876 100644
--- a/mysys/my_redel.c
+++ b/mysys/my_redel.c
@@ -102,7 +102,7 @@ int my_copystat(const char *from, const char *to, int MyFlags)
{
my_errno= errno;
if (MyFlags & (MY_FAE+MY_WME))
- my_error(EE_CHANGE_PERMISSIONS, MYF(ME_BELL+ME_WAITTANG), from, errno);
+ my_error(EE_CHANGE_PERMISSIONS, MYF(ME_BELL), from, errno);
return -1;
}
@@ -110,14 +110,14 @@ int my_copystat(const char *from, const char *to, int MyFlags)
if (statbuf.st_nlink > 1 && MyFlags & MY_LINK_WARNING)
{
if (MyFlags & MY_LINK_WARNING)
- my_error(EE_LINK_WARNING,MYF(ME_BELL+ME_WAITTANG),from,statbuf.st_nlink);
+ my_error(EE_LINK_WARNING,MYF(ME_BELL),from,statbuf.st_nlink);
}
/* Copy ownership */
if (chown(to, statbuf.st_uid, statbuf.st_gid))
{
my_errno= errno;
if (MyFlags & MY_WME)
- my_error(EE_CHANGE_OWNERSHIP, MYF(ME_BELL+ME_WAITTANG), from, errno);
+ my_error(EE_CHANGE_OWNERSHIP, MYF(ME_BELL), from, errno);
if (MyFlags & MY_FAE)
return -1;
}
diff --git a/mysys/my_rename.c b/mysys/my_rename.c
index 17f693629a8..f3831c2e73e 100644
--- a/mysys/my_rename.c
+++ b/mysys/my_rename.c
@@ -45,7 +45,7 @@ int my_rename(const char *from, const char *to, myf MyFlags)
my_errno= errno;
error = -1;
if (MyFlags & (MY_FAE+MY_WME))
- my_error(EE_LINK, MYF(ME_BELL+ME_WAITTANG),from,to,my_errno);
+ my_error(EE_LINK, MYF(ME_BELL),from,to,my_errno);
}
else if (MyFlags & MY_SYNC_DIR)
{
diff --git a/mysys/my_setuser.c b/mysys/my_setuser.c
index 14ab04dd10f..e35d6602aca 100644
--- a/mysys/my_setuser.c
+++ b/mysys/my_setuser.c
@@ -27,7 +27,7 @@ struct passwd *my_check_user(const char *user, myf MyFlags)
my_errno= EPERM;
if (MyFlags & MY_WME)
my_printf_error(my_errno, "One can only use the --user switch if "
- "running as root", MYF(ME_JUST_WARNING|ME_NOREFRESH));
+ "running as root", MYF(ME_WARNING|ME_ERROR_LOG));
}
}
DBUG_RETURN(NULL);
@@ -38,7 +38,7 @@ struct passwd *my_check_user(const char *user, myf MyFlags)
{
my_errno= EINVAL;
my_printf_error(my_errno, "Please consult the Knowledge Base to find "
- "out how to run mysqld as root!", MYF(ME_NOREFRESH));
+ "out how to run mysqld as root!", MYF(ME_ERROR_LOG));
}
DBUG_RETURN(NULL);
}
@@ -54,7 +54,7 @@ struct passwd *my_check_user(const char *user, myf MyFlags)
{
my_errno= EINVAL;
my_printf_error(my_errno, "Can't change to run as user '%s'. Please "
- "check that the user exists!", MYF(ME_NOREFRESH), user);
+ "check that the user exists!", MYF(ME_ERROR_LOG), user);
DBUG_RETURN(NULL);
}
}
@@ -74,7 +74,7 @@ int my_set_user(const char *user, struct passwd *user_info, myf MyFlags)
{
my_errno= errno;
if (MyFlags & MY_WME)
- my_printf_error(errno, "Cannot change uid/gid (errno: %d)", MYF(ME_NOREFRESH),
+ my_printf_error(errno, "Cannot change uid/gid (errno: %d)", MYF(ME_ERROR_LOG),
errno);
DBUG_RETURN(my_errno);
}
diff --git a/mysys/my_static.c b/mysys/my_static.c
index f2a9fbb7335..7b69deea8e1 100644
--- a/mysys/my_static.c
+++ b/mysys/my_static.c
@@ -102,7 +102,7 @@ my_bool my_disable_copystat_in_redel=0;
/* Typelib by all clients */
const char *sql_protocol_names_lib[] =
-{ "TCP", "SOCKET", "PIPE", "MEMORY", NullS };
+{ "TCP", "SOCKET", "PIPE", NullS };
TYPELIB sql_protocol_typelib ={ array_elements(sql_protocol_names_lib) - 1, "",
sql_protocol_names_lib, NULL };
diff --git a/mysys/my_sync.c b/mysys/my_sync.c
index cf8dfb6a8c8..575bbb9f748 100644
--- a/mysys/my_sync.c
+++ b/mysys/my_sync.c
@@ -122,7 +122,7 @@ int my_sync(File fd, myf my_flags)
res= 0;
}
else if (my_flags & MY_WME)
- my_error(EE_SYNC, MYF(ME_BELL+ME_WAITTANG), my_filename(fd), my_errno);
+ my_error(EE_SYNC, MYF(ME_BELL), my_filename(fd), my_errno);
}
else
{
diff --git a/mysys/my_write.c b/mysys/my_write.c
index 43735c18f0a..f6e304f9a4e 100644
--- a/mysys/my_write.c
+++ b/mysys/my_write.c
@@ -107,7 +107,7 @@ size_t my_write(File Filedes, const uchar *Buffer, size_t Count, myf MyFlags)
{
if (MyFlags & (MY_WME | MY_FAE | MY_FNABP))
{
- my_error(EE_WRITE, MYF(ME_BELL | ME_WAITTANG | (MyFlags & (ME_JUST_INFO | ME_NOREFRESH))),
+ my_error(EE_WRITE, MYF(ME_BELL | (MyFlags & (ME_NOTE | ME_ERROR_LOG))),
my_filename(Filedes),my_errno);
}
DBUG_RETURN(MY_FILE_ERROR); /* Error on read */
diff --git a/mysys/thr_alarm.c b/mysys/thr_alarm.c
index 357923cf388..09c4984c069 100644
--- a/mysys/thr_alarm.c
+++ b/mysys/thr_alarm.c
@@ -731,7 +731,8 @@ static void *signal_hand(void *arg __attribute__((unused)))
DBUG_PRINT("info",("Starting signal and alarm handling thread"));
for(;;)
{
- while ((error=my_sigwait(&set,&sig)) == EINTR)
+ int code;
+ while ((error=my_sigwait(&set,&sig,&code)) == EINTR)
printf("sigwait restarted\n");
if (error)
{
@@ -805,8 +806,7 @@ int main(int argc __attribute__((unused)),char **argv __attribute__((unused)))
/* Start signal thread and wait for it to start */
mysql_mutex_lock(&LOCK_thread_count);
- mysql_thread_create(0,
- &tid, &thr_attr, signal_hand, NULL);
+ mysql_thread_create(0, &tid, &thr_attr, signal_hand, NULL);
mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
mysql_mutex_unlock(&LOCK_thread_count);
DBUG_PRINT("info",("signal thread created"));
diff --git a/pcre/pcre_dfa_exec.c b/pcre/pcre_dfa_exec.c
index f333381d088..830c4e4be48 100644
--- a/pcre/pcre_dfa_exec.c
+++ b/pcre/pcre_dfa_exec.c
@@ -2198,6 +2198,7 @@ for (;;)
case 0x2029:
#endif /* Not EBCDIC */
if ((md->moptions & PCRE_BSR_ANYCRLF) != 0) break;
+ /* fall through */
case CHAR_LF:
ADD_NEW(state_offset + 1, 0);
diff --git a/pcre/pcre_exec.c b/pcre/pcre_exec.c
index 93256d32455..88a9a79e489 100644
--- a/pcre/pcre_exec.c
+++ b/pcre/pcre_exec.c
@@ -2086,7 +2086,7 @@ for (;;)
case OP_CIRC:
if (md->notbol && eptr == md->start_subject) RRETURN(MATCH_NOMATCH);
- /* Start of subject assertion */
+ /* Fall through. Start of subject assertion */
case OP_SOD:
if (eptr != md->start_subject) RRETURN(MATCH_NOMATCH);
diff --git a/pcre/pcregrep.c b/pcre/pcregrep.c
index 79d9e286c75..22c43bcda00 100644
--- a/pcre/pcregrep.c
+++ b/pcre/pcregrep.c
@@ -2407,7 +2407,7 @@ handle_option(int letter, int options)
switch(letter)
{
case N_FOFFSETS: file_offsets = TRUE; break;
- case N_HELP: help(); pcregrep_exit(0);
+ case N_HELP: help(); pcregrep_exit(0); break;
case N_LBUFFER: line_buffered = TRUE; break;
case N_LOFFSETS: line_offsets = number = TRUE; break;
case N_NOJIT: study_options &= ~PCRE_STUDY_JIT_COMPILE; break;
diff --git a/plugin/auth_ed25519/CMakeLists.txt b/plugin/auth_ed25519/CMakeLists.txt
index 73d8eeb208b..ffd2523d1af 100644
--- a/plugin/auth_ed25519/CMakeLists.txt
+++ b/plugin/auth_ed25519/CMakeLists.txt
@@ -24,8 +24,8 @@ ENDIF()
MYSQL_ADD_PLUGIN(auth_ed25519 server_ed25519.c ${REF10_SOURCES} MODULE_ONLY)
# client plugin and unit test ed25519-t can use the library
-MYSQL_ADD_PLUGIN(client_ed25519 client_ed25519.c MODULE_ONLY
- CLIENT LINK_LIBRARIES mysys_ssl ref10 COMPONENT ClientPlugins)
+#MYSQL_ADD_PLUGIN(client_ed25519 client_ed25519.c MODULE_ONLY
+# CLIENT LINK_LIBRARIES mysys_ssl ref10 COMPONENT ClientPlugins)
IF(WITH_UNIT_TESTS)
MY_ADD_TESTS(ed25519 LINK_LIBRARIES mysys ref10)
diff --git a/plugin/auth_ed25519/server_ed25519.c b/plugin/auth_ed25519/server_ed25519.c
index 23b4e7389c7..d2e9e70a9b9 100644
--- a/plugin/auth_ed25519/server_ed25519.c
+++ b/plugin/auth_ed25519/server_ed25519.c
@@ -15,6 +15,7 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
#include <mysql/plugin_auth.h>
+#include <mysqld_error.h>
#include "common.h"
#if !defined(__attribute__) && !defined(__GNUC__)
@@ -36,16 +37,6 @@ static int auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info)
int pkt_len;
unsigned long nonce[CRYPTO_LONGS + NONCE_LONGS];
unsigned char *pkt, *reply= (unsigned char*)nonce;
- unsigned char pk[PASSWORD_LEN_BUF/4*3];
- char pw[PASSWORD_LEN_BUF];
-
- /* prepare the pk */
- if (info->auth_string_length != PASSWORD_LEN)
- return CR_AUTH_USER_CREDENTIALS;
- memcpy(pw, info->auth_string, PASSWORD_LEN);
- pw[PASSWORD_LEN]= '=';
- if (my_base64_decode(pw, PASSWORD_LEN_BUF, pk, NULL, 0) != CRYPTO_PUBLICKEYBYTES)
- return CR_AUTH_USER_CREDENTIALS;
info->password_used= PASSWORD_USED_YES;
@@ -62,17 +53,52 @@ static int auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info)
return CR_AUTH_HANDSHAKE;
memcpy(reply, pkt, CRYPTO_BYTES);
- if (crypto_sign_open(reply, CRYPTO_BYTES + NONCE_BYTES, pk))
+ if (crypto_sign_open(reply, CRYPTO_BYTES + NONCE_BYTES,
+ (unsigned char*)info->auth_string))
return CR_ERROR;
return CR_OK;
}
+static int compute_password_digest(const char *pw, size_t pwlen,
+ char *d, size_t *dlen)
+{
+ unsigned char pk[CRYPTO_PUBLICKEYBYTES];
+ if (*dlen < PASSWORD_LEN || pwlen == 0)
+ return 1;
+ *dlen= PASSWORD_LEN;
+ crypto_sign_keypair(pk, (unsigned char*)pw, pwlen);
+ my_base64_encode(pk, CRYPTO_PUBLICKEYBYTES, d);
+ return 0;
+}
+
+static int digest_to_binary(const char *d, size_t dlen,
+ unsigned char *b, size_t *blen)
+{
+ char pw[PASSWORD_LEN_BUF];
+
+ if (*blen < CRYPTO_PUBLICKEYBYTES || dlen != PASSWORD_LEN)
+ {
+ my_printf_error(ER_PASSWD_LENGTH, "Password hash should be %d characters long", 0, PASSWORD_LEN);
+ return 1;
+ }
+
+ *blen= CRYPTO_PUBLICKEYBYTES;
+ memcpy(pw, d, PASSWORD_LEN);
+ pw[PASSWORD_LEN]= '=';
+ if (my_base64_decode(pw, PASSWORD_LEN_BUF, b, 0, 0) == CRYPTO_PUBLICKEYBYTES)
+ return 0;
+ my_printf_error(ER_PASSWD_LENGTH, "Password hash should be base64 encoded", 0);
+ return 1;
+}
+
static struct st_mysql_auth info =
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"client_ed25519",
- auth
+ auth,
+ compute_password_digest,
+ digest_to_binary
};
static int init(void *p __attribute__((unused)))
@@ -97,10 +123,10 @@ maria_declare_plugin(ed25519)
PLUGIN_LICENSE_GPL,
init,
deinit,
- 0x0100,
+ 0x0101,
NULL,
NULL,
- "1.0",
+ "1.1",
MariaDB_PLUGIN_MATURITY_STABLE
}
maria_declare_plugin_end;
diff --git a/plugin/auth_examples/dialog_examples.c b/plugin/auth_examples/dialog_examples.c
index 067244d6f7d..1c96c8d7faf 100644
--- a/plugin/auth_examples/dialog_examples.c
+++ b/plugin/auth_examples/dialog_examples.c
@@ -81,7 +81,8 @@ static struct st_mysql_auth two_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"dialog", /* requires dialog client plugin */
- two_questions
+ two_questions,
+ NULL, NULL /* no PASSWORD() */
};
/* dialog demo where the number of questions is not known in advance */
@@ -118,7 +119,8 @@ static struct st_mysql_auth three_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"dialog", /* requires dialog client plugin */
- three_attempts
+ three_attempts,
+ NULL, NULL /* no PASSWORD() */
};
mysql_declare_plugin(dialog)
diff --git a/plugin/auth_examples/qa_auth_interface.c b/plugin/auth_examples/qa_auth_interface.c
index 08ddbf7f30a..70050cf0d91 100644
--- a/plugin/auth_examples/qa_auth_interface.c
+++ b/plugin/auth_examples/qa_auth_interface.c
@@ -136,7 +136,8 @@ static struct st_mysql_auth qa_auth_test_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"qa_auth_interface", /* requires test_plugin client's plugin */
- qa_auth_interface
+ qa_auth_interface,
+ NULL, NULL /* no PASSWORD() */
};
mysql_declare_plugin(test_plugin)
diff --git a/plugin/auth_examples/qa_auth_server.c b/plugin/auth_examples/qa_auth_server.c
index 59b926b63dc..0ed16b692cf 100644
--- a/plugin/auth_examples/qa_auth_server.c
+++ b/plugin/auth_examples/qa_auth_server.c
@@ -56,7 +56,8 @@ static struct st_mysql_auth qa_auth_test_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"qa_auth_interface", /* requires test_plugin client's plugin */
- qa_auth_interface
+ qa_auth_interface,
+ NULL, NULL /* no PASSWORD() */
};
mysql_declare_plugin(test_plugin)
diff --git a/plugin/auth_examples/test_plugin.c b/plugin/auth_examples/test_plugin.c
index 8cc17894be4..e2d79d753f4 100644
--- a/plugin/auth_examples/test_plugin.c
+++ b/plugin/auth_examples/test_plugin.c
@@ -69,7 +69,8 @@ static struct st_mysql_auth auth_test_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"auth_test_plugin", /* requires test_plugin client's plugin */
- auth_test_plugin
+ auth_test_plugin,
+ NULL, NULL /* no PASSWORD() */
};
/**
@@ -99,7 +100,8 @@ static struct st_mysql_auth auth_cleartext_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"mysql_clear_password", /* requires the clear text plugin */
- auth_cleartext_plugin
+ auth_cleartext_plugin,
+ NULL, NULL /* no PASSWORD() */
};
mysql_declare_plugin(test_plugin)
diff --git a/plugin/auth_gssapi/gssapi_server.cc b/plugin/auth_gssapi/gssapi_server.cc
index a498aba982d..8aa13aac6c9 100644
--- a/plugin/auth_gssapi/gssapi_server.cc
+++ b/plugin/auth_gssapi/gssapi_server.cc
@@ -145,7 +145,7 @@ int plugin_deinit()
}
-int auth_server(MYSQL_PLUGIN_VIO *vio,const char *user, size_t userlen, int use_full_name)
+int auth_server(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *auth_info)
{
int rc= CR_ERROR; /* return code */
@@ -157,6 +157,9 @@ int auth_server(MYSQL_PLUGIN_VIO *vio,const char *user, size_t userlen, int use_
gss_name_t client_name;
gss_buffer_desc client_name_buf, input, output;
char *client_name_str;
+ const char *user= 0;
+ size_t userlen;
+ int use_full_name;
/* server acquires credential */
major= gss_acquire_cred(&minor, service_name, GSS_C_INDEFINITE,
@@ -180,6 +183,21 @@ int auth_server(MYSQL_PLUGIN_VIO *vio,const char *user, size_t userlen, int use_
log_error(0, 0, "fail to read token from client");
goto cleanup;
}
+ if (!user)
+ {
+ if (auth_info->auth_string_length > 0)
+ {
+ use_full_name= 1;
+ user= auth_info->auth_string;
+ userlen= auth_info->auth_string_length;
+ }
+ else
+ {
+ use_full_name= 0;
+ user= auth_info->user_name;
+ userlen= auth_info->user_name_length;
+ }
+ }
input.length= len;
major= gss_accept_sec_context(&minor, &ctxt, cred, &input,
diff --git a/plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.result b/plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.result
new file mode 100644
index 00000000000..c65eb7a8634
--- /dev/null
+++ b/plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.result
@@ -0,0 +1,34 @@
+INSTALL SONAME 'auth_gssapi';
+Warnings:
+Note 1105 SSPI: using principal name 'localhost', mech 'Negotiate'
+CREATE USER 'nosuchuser' IDENTIFIED WITH gssapi OR mysql_native_password as password("good");
+connect(localhost,nosuchuser,,test,MASTER_MYPORT,MASTER_MYSOCK);
+connect con1,localhost,nosuchuser,,;
+ERROR 28000: Access denied for user 'nosuchuser'@'localhost' (using password: NO)
+connect con1,localhost,nosuchuser,good,;
+SELECT USER(),CURRENT_USER();
+USER() CURRENT_USER()
+nosuchuser@localhost nosuchuser@%
+disconnect con1;
+connection default;
+DROP USER nosuchuser;
+CREATE USER 'nosuchuser' IDENTIFIED WITH mysql_native_password as password("good") OR gssapi;
+connect(localhost,nosuchuser,,test,MASTER_MYPORT,MASTER_MYSOCK);
+connect con1,localhost,nosuchuser,,;
+ERROR 28000: GSSAPI name mismatch, requested 'nosuchuser', actual name 'GSSAPI_SHORTNAME'
+connect con1,localhost,nosuchuser,good,;
+SELECT USER(),CURRENT_USER();
+USER() CURRENT_USER()
+nosuchuser@localhost nosuchuser@%
+disconnect con1;
+connection default;
+DROP USER nosuchuser;
+CREATE USER 'GSSAPI_SHORTNAME' IDENTIFIED WITH mysql_native_password as password("good") OR gssapi;
+connect con1,localhost,$GSSAPI_SHORTNAME,,;
+SELECT USER(),CURRENT_USER();
+USER() CURRENT_USER()
+GSSAPI_SHORTNAME@localhost GSSAPI_SHORTNAME@%
+disconnect con1;
+connection default;
+DROP USER 'GSSAPI_SHORTNAME';
+UNINSTALL SONAME 'auth_gssapi';
diff --git a/plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.test b/plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.test
new file mode 100644
index 00000000000..10e1e80907e
--- /dev/null
+++ b/plugin/auth_gssapi/mysql-test/auth_gssapi/multiauth.test
@@ -0,0 +1,36 @@
+--replace_regex /name '[^']+'/name 'localhost'/
+INSTALL SONAME 'auth_gssapi';
+
+# gssapi,password
+CREATE USER 'nosuchuser' IDENTIFIED WITH gssapi OR mysql_native_password as password("good");
+replace_result $MASTER_MYSOCK MASTER_MYSOCK $MASTER_MYPORT MASTER_MYPORT;
+error ER_ACCESS_DENIED_ERROR;
+connect (con1,localhost,nosuchuser,,);
+connect (con1,localhost,nosuchuser,good,);
+SELECT USER(),CURRENT_USER();
+disconnect con1;
+connection default;
+DROP USER nosuchuser;
+
+# password,gssapi
+CREATE USER 'nosuchuser' IDENTIFIED WITH mysql_native_password as password("good") OR gssapi;
+replace_result $MASTER_MYSOCK MASTER_MYSOCK $MASTER_MYPORT MASTER_MYPORT $GSSAPI_SHORTNAME GSSAPI_SHORTNAME;
+error ER_ACCESS_DENIED_ERROR;
+connect (con1,localhost,nosuchuser,,);
+connect (con1,localhost,nosuchuser,good,);
+SELECT USER(),CURRENT_USER();
+disconnect con1;
+connection default;
+DROP USER nosuchuser;
+
+replace_result $GSSAPI_SHORTNAME GSSAPI_SHORTNAME;
+eval CREATE USER '$GSSAPI_SHORTNAME' IDENTIFIED WITH mysql_native_password as password("good") OR gssapi;
+connect (con1,localhost,$GSSAPI_SHORTNAME,,);
+replace_result $GSSAPI_SHORTNAME GSSAPI_SHORTNAME;
+SELECT USER(),CURRENT_USER();
+disconnect con1;
+connection default;
+replace_result $GSSAPI_SHORTNAME GSSAPI_SHORTNAME;
+eval DROP USER '$GSSAPI_SHORTNAME';
+
+UNINSTALL SONAME 'auth_gssapi';
diff --git a/plugin/auth_gssapi/server_plugin.cc b/plugin/auth_gssapi/server_plugin.cc
index 5db86cffbe4..bce6a812d12 100644
--- a/plugin/auth_gssapi/server_plugin.cc
+++ b/plugin/auth_gssapi/server_plugin.cc
@@ -64,41 +64,11 @@ unsigned long srv_mech;
*/
static int gssapi_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *auth_info)
{
- int use_full_name;
- const char *user;
- int user_len;
-
- /* No user name yet ? Read the client handshake packet with the user name. */
- if (auth_info->user_name == 0)
- {
- unsigned char *pkt;
- if (vio->read_packet(vio, &pkt) < 0)
- return CR_ERROR;
- }
-
/* Send first packet with target name and mech name */
if (vio->write_packet(vio, (unsigned char *)first_packet, first_packet_len))
- {
return CR_ERROR;
- }
-
- /* Figure out whether to use full name (as given in IDENTIFIED AS clause)
- * or just short username auth_string
- */
- if (auth_info->auth_string_length > 0)
- {
- use_full_name= 1;
- user= auth_info->auth_string;
- user_len= auth_info->auth_string_length;
- }
- else
- {
- use_full_name= 0;
- user= auth_info->user_name;
- user_len= auth_info->user_name_length;
- }
-
- return auth_server(vio, user, user_len, use_full_name);
+
+ return auth_server(vio, auth_info);
}
static int initialize_plugin(void *unused)
@@ -169,7 +139,7 @@ static struct st_mysql_sys_var *system_variables[]= {
static struct st_mysql_auth server_handler= {
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
"auth_gssapi_client",
- gssapi_auth
+ gssapi_auth, NULL, NULL
};
maria_declare_plugin(gssapi_server)
diff --git a/plugin/auth_gssapi/server_plugin.h b/plugin/auth_gssapi/server_plugin.h
index 6284a319d03..84552d3a263 100644
--- a/plugin/auth_gssapi/server_plugin.h
+++ b/plugin/auth_gssapi/server_plugin.h
@@ -48,4 +48,4 @@ extern char *srv_keytab_path;
int plugin_init();
int plugin_deinit();
-int auth_server(MYSQL_PLUGIN_VIO *vio, const char *username, size_t username_len, int use_full_name);
+int auth_server(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *auth_info);
diff --git a/plugin/auth_gssapi/sspi_server.cc b/plugin/auth_gssapi/sspi_server.cc
index af78829df6e..44aa5051472 100644
--- a/plugin/auth_gssapi/sspi_server.cc
+++ b/plugin/auth_gssapi/sspi_server.cc
@@ -140,7 +140,7 @@ static int get_client_name_from_context(CtxtHandle *ctxt,
}
-int auth_server(MYSQL_PLUGIN_VIO *vio, const char *user, size_t user_len, int compare_full_name)
+int auth_server(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *auth_info)
{
int ret;
SECURITY_STATUS sspi_ret;
@@ -155,6 +155,8 @@ int auth_server(MYSQL_PLUGIN_VIO *vio, const char *user, size_t user_len, int co
SecBuffer outbuf;
void* out= NULL;
char client_name[MYSQL_USERNAME_LENGTH + 1];
+ const char *user= 0;
+ int compare_full_name;
ret= CR_ERROR;
SecInvalidateHandle(&cred);
@@ -207,6 +209,19 @@ int auth_server(MYSQL_PLUGIN_VIO *vio, const char *user, size_t user_len, int co
log_error(SEC_E_OK, "communication error(read)");
goto cleanup;
}
+ if (!user)
+ {
+ if (auth_info->auth_string_length > 0)
+ {
+ compare_full_name= 1;
+ user= auth_info->auth_string;
+ }
+ else
+ {
+ compare_full_name= 0;
+ user= auth_info->user_name;
+ }
+ }
inbuf.cbBuffer= len;
outbuf.cbBuffer= SSPI_MAX_TOKEN_SIZE;
sspi_ret= AcceptSecurityContext(
diff --git a/plugin/auth_pam/CMakeLists.txt b/plugin/auth_pam/CMakeLists.txt
index 606fef002e7..10f7e47eb40 100644
--- a/plugin/auth_pam/CMakeLists.txt
+++ b/plugin/auth_pam/CMakeLists.txt
@@ -9,6 +9,14 @@ IF(HAVE_PAM_APPL_H)
ADD_DEFINITIONS(-DHAVE_STRNDUP)
ENDIF(HAVE_STRNDUP)
FIND_LIBRARY(PAM_LIBRARY pam)
- MYSQL_ADD_PLUGIN(auth_pam auth_pam.c LINK_LIBRARIES pam MODULE_ONLY)
+ ADD_DEFINITIONS(-D_GNU_SOURCE)
+ MYSQL_ADD_PLUGIN(auth_pam_v1 auth_pam_v1.c LINK_LIBRARIES pam MODULE_ONLY)
+ MYSQL_ADD_PLUGIN(auth_pam auth_pam.c LINK_LIBRARIES pam dl MODULE_ONLY)
+ MYSQL_ADD_EXECUTABLE(auth_pam_tool auth_pam_tool.c DESTINATION ${INSTALL_PLUGINDIR}/auth_pam_tool_dir COMPONENT Server)
+ TARGET_LINK_LIBRARIES(auth_pam_tool pam)
+ INSTALL(CODE "EXECUTE_PROCESS(
+ COMMAND chmod u=rwx,g=,o= auth_pam_tool_dir
+ COMMAND chmod u=rwxs,g=rx,o=rx auth_pam_tool_dir/auth_pam_tool
+ WORKING_DIRECTORY \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/${INSTALL_PLUGINDIR}/)"
+ COMPONENT Server)
ENDIF(HAVE_PAM_APPL_H)
-
diff --git a/plugin/auth_pam/auth_pam.c b/plugin/auth_pam/auth_pam.c
index a6a981f9641..779f6ced1ad 100644
--- a/plugin/auth_pam/auth_pam.c
+++ b/plugin/auth_pam/auth_pam.c
@@ -14,36 +14,12 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
-#define _GNU_SOURCE 1 /* for strndup */
-#include <mysql/plugin_auth.h>
-#include <stdio.h>
+#include <unistd.h>
#include <string.h>
-#include <security/pam_appl.h>
-#include <security/pam_modules.h>
-
-struct param {
- unsigned char buf[10240], *ptr;
- MYSQL_PLUGIN_VIO *vio;
-};
-
-/* It least solaris doesn't have strndup */
-
-#ifndef HAVE_STRNDUP
-char *strndup(const char *from, size_t length)
-{
- char *ptr;
- size_t max_length= strlen(from);
- if (length > max_length)
- length= max_length;
- if ((ptr= (char*) malloc(length+1)) != 0)
- {
- memcpy((char*) ptr, (char*) from, length);
- ptr[length]=0;
- }
- return ptr;
-}
-#endif
+#include <mysql/plugin_auth.h>
+#include "auth_pam_tool.h"
+#include <my_global.h>
#ifndef DBUG_OFF
static char pam_debug = 0;
@@ -52,158 +28,175 @@ static char pam_debug = 0;
#define PAM_DEBUG(X) /* no-op */
#endif
-static int conv(int n, const struct pam_message **msg,
- struct pam_response **resp, void *data)
+static char *opt_plugin_dir; /* To be dynamically linked. */
+static const char *tool_name= "auth_pam_tool_dir/auth_pam_tool";
+static const int tool_name_len= 31;
+
+static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info)
{
- struct param *param = (struct param *)data;
- unsigned char *end = param->buf + sizeof(param->buf) - 1;
- int i;
+ int p_to_c[2], c_to_p[2]; /* Parent-to-child and child-to-parent pipes. */
+ pid_t proc_id;
+ int result= CR_ERROR, pkt_len;
+ unsigned char field, *pkt;
- *resp = 0;
+ PAM_DEBUG((stderr, "PAM: opening pipes.\n"));
+ if (pipe(p_to_c) < 0 || pipe(c_to_p) < 0)
+ {
+ /* Error creating pipes. */
+ return CR_ERROR;
+ }
+ PAM_DEBUG((stderr, "PAM: forking.\n"));
+ if ((proc_id= fork()) < 0)
+ {
+ /* Error forking. */
+ close(p_to_c[0]);
+ close(c_to_p[1]);
+ goto error_ret;
+ }
- for (i = 0; i < n; i++)
+ if (proc_id == 0)
{
- /* if there's a message - append it to the buffer */
- if (msg[i]->msg)
+ /* The 'sandbox' process started. */
+ char toolpath[FN_REFLEN];
+ size_t plugin_dir_len= strlen(opt_plugin_dir);
+
+ PAM_DEBUG((stderr, "PAM: Child process prepares pipes.\n"));
+
+ if (close(p_to_c[1]) < 0 ||
+ close(c_to_p[0]) < 0 ||
+ dup2(p_to_c[0], 0) < 0 || /* Parent's pipe to STDIN. */
+ dup2(c_to_p[1], 1) < 0) /* Sandbox's pipe to STDOUT. */
{
- int len = strlen(msg[i]->msg);
- if (len > end - param->ptr)
- len = end - param->ptr;
- if (len > 0)
- {
- memcpy(param->ptr, msg[i]->msg, len);
- param->ptr+= len;
- *(param->ptr)++ = '\n';
- }
+ exit(-1);
}
- /* if the message style is *_PROMPT_*, meaning PAM asks a question,
- send the accumulated text to the client, read the reply */
- if (msg[i]->msg_style == PAM_PROMPT_ECHO_OFF ||
- msg[i]->msg_style == PAM_PROMPT_ECHO_ON)
+
+ PAM_DEBUG((stderr, "PAM: check tool directory: %s, %s.\n",
+ opt_plugin_dir, tool_name));
+ if (plugin_dir_len + tool_name_len + 2 > sizeof(toolpath))
{
- int pkt_len;
- unsigned char *pkt;
+ /* Tool path too long. */
+ exit(-1);
+ }
- /* allocate the response array.
- freeing it is the responsibility of the caller */
- if (*resp == 0)
- {
- *resp = calloc(sizeof(struct pam_response), n);
- if (*resp == 0)
- return PAM_BUF_ERR;
- }
+ memcpy(toolpath, opt_plugin_dir, plugin_dir_len);
+ if (plugin_dir_len && toolpath[plugin_dir_len-1] != FN_LIBCHAR)
+ toolpath[plugin_dir_len++]= FN_LIBCHAR;
+ memcpy(toolpath+plugin_dir_len, tool_name, tool_name_len+1);
- /* dialog plugin interprets the first byte of the packet
- as the magic number.
- 2 means "read the input with the echo enabled"
- 4 means "password-like input, echo disabled"
- C'est la vie. */
- param->buf[0] = msg[i]->msg_style == PAM_PROMPT_ECHO_ON ? 2 : 4;
- PAM_DEBUG((stderr, "PAM: conv: send(%.*s)\n", (int)(param->ptr - param->buf - 1), param->buf));
- if (param->vio->write_packet(param->vio, param->buf, param->ptr - param->buf - 1))
- return PAM_CONV_ERR;
-
- pkt_len = param->vio->read_packet(param->vio, &pkt);
- if (pkt_len < 0)
- {
- PAM_DEBUG((stderr, "PAM: conv: recv() ERROR\n"));
- return PAM_CONV_ERR;
- }
- PAM_DEBUG((stderr, "PAM: conv: recv(%.*s)\n", pkt_len, pkt));
- /* allocate and copy the reply to the response array */
- if (!((*resp)[i].resp= strndup((char*) pkt, pkt_len)))
- return PAM_CONV_ERR;
- param->ptr = param->buf + 1;
- }
+ PAM_DEBUG((stderr, "PAM: execute pam sandbox [%s].\n", toolpath));
+ (void) execl(toolpath, toolpath, NULL);
+ PAM_DEBUG((stderr, "PAM: exec() failed.\n"));
+ exit(-1);
}
- return PAM_SUCCESS;
-}
-#define DO(X) if ((status = (X)) != PAM_SUCCESS) goto end
+ /* Parent process continues. */
-#if defined(SOLARIS) || defined(__sun)
-typedef void** pam_get_item_3_arg;
+ PAM_DEBUG((stderr, "PAM: parent continues.\n"));
+ if (close(p_to_c[0]) < 0 ||
+ close(c_to_p[1]) < 0)
+ goto error_ret;
+
+ /* no user name yet ? read the client handshake packet with the user name */
+ if (info->user_name == 0)
+ {
+ if ((pkt_len= vio->read_packet(vio, &pkt) < 0))
+ return CR_ERROR;
+ }
+ else
+ pkt= NULL;
+
+ PAM_DEBUG((stderr, "PAM: parent sends user data [%s], [%s].\n",
+ info->user_name, info->auth_string));
+
+#ifndef DBUG_OFF
+ field= pam_debug;
#else
-typedef const void** pam_get_item_3_arg;
+ field= 0;
#endif
+ if (write(p_to_c[1], &field, 1) != 1 ||
+ write_string(p_to_c[1], (const uchar *) info->user_name,
+ info->user_name_length) ||
+ write_string(p_to_c[1], (const uchar *) info->auth_string,
+ info->auth_string_length))
+ goto error_ret;
+
+ for (;;)
+ {
+ PAM_DEBUG((stderr, "PAM: listening to the sandbox.\n"));
+ if (read(c_to_p[0], &field, 1) < 1)
+ {
+ PAM_DEBUG((stderr, "PAM: read failed.\n"));
+ goto error_ret;
+ }
-static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info)
-{
- pam_handle_t *pamh = NULL;
- int status;
- const char *new_username= NULL;
- struct param param;
- /* The following is written in such a way to make also solaris happy */
- struct pam_conv pam_start_arg = { &conv, (char*) &param };
+ if (field == AP_EOF)
+ {
+ PAM_DEBUG((stderr, "PAM: auth OK returned.\n"));
+ break;
+ }
- /*
- get the service name, as specified in
+ switch (field)
+ {
+ case AP_AUTHENTICATED_AS:
+ PAM_DEBUG((stderr, "PAM: reading authenticated_as string.\n"));
+ if (read_string(c_to_p[0], info->authenticated_as,
+ sizeof(info->authenticated_as) - 1) < 0)
+ goto error_ret;
+ break;
+
+ case AP_CONV:
+ {
+ unsigned char buf[10240];
+ int buf_len;
- CREATE USER ... IDENTIFIED WITH pam AS "service"
- */
- const char *service = info->auth_string && info->auth_string[0]
- ? info->auth_string : "mysql";
+ PAM_DEBUG((stderr, "PAM: getting CONV string.\n"));
+ if ((buf_len= read_string(c_to_p[0], (char *) buf, sizeof(buf))) < 0)
+ goto error_ret;
- param.ptr = param.buf + 1;
- param.vio = vio;
+ if (!pkt || (buf[0] >> 1) != 2)
+ {
+ PAM_DEBUG((stderr, "PAM: sending CONV string.\n"));
+ if (vio->write_packet(vio, buf, buf_len))
+ goto error_ret;
- PAM_DEBUG((stderr, "PAM: pam_start(%s, %s)\n", service, info->user_name));
- DO( pam_start(service, info->user_name, &pam_start_arg, &pamh) );
+ PAM_DEBUG((stderr, "PAM: reading CONV answer.\n"));
+ if ((pkt_len= vio->read_packet(vio, &pkt)) < 0)
+ goto error_ret;
+ }
- PAM_DEBUG((stderr, "PAM: pam_authenticate(0)\n"));
- DO( pam_authenticate (pamh, 0) );
+ PAM_DEBUG((stderr, "PAM: answering CONV.\n"));
+ if (write_string(p_to_c[1], pkt, pkt_len))
+ goto error_ret;
- PAM_DEBUG((stderr, "PAM: pam_acct_mgmt(0)\n"));
- DO( pam_acct_mgmt(pamh, 0) );
+ pkt= NULL;
+ }
+ break;
- PAM_DEBUG((stderr, "PAM: pam_get_item(PAM_USER)\n"));
- DO( pam_get_item(pamh, PAM_USER, (pam_get_item_3_arg) &new_username) );
+ default:
+ PAM_DEBUG((stderr, "PAM: unknown sandbox field.\n"));
+ goto error_ret;
+ }
+ }
+ result= CR_OK;
- if (new_username && strcmp(new_username, info->user_name))
- strncpy(info->authenticated_as, new_username,
- sizeof(info->authenticated_as)-1);
- info->authenticated_as[sizeof(info->authenticated_as)-1]= 0;
+error_ret:
+ close(p_to_c[1]);
+ close(c_to_p[0]);
-end:
- pam_end(pamh, status);
- PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, info->authenticated_as));
- return status == PAM_SUCCESS ? CR_OK : CR_ERROR;
+ PAM_DEBUG((stderr, "PAM: auth result %d.\n", result));
+ return result;
}
-static struct st_mysql_auth info =
-{
- MYSQL_AUTHENTICATION_INTERFACE_VERSION,
- "dialog",
- pam_auth
-};
-
-static char use_cleartext_plugin;
-static MYSQL_SYSVAR_BOOL(use_cleartext_plugin, use_cleartext_plugin,
- PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
- "Use mysql_cleartext_plugin on the client side instead of the dialog "
- "plugin. This may be needed for compatibility reasons, but it only "
- "supports simple PAM policies that don't require anything besides "
- "a password", NULL, NULL, 0);
-
-#ifndef DBUG_OFF
-static MYSQL_SYSVAR_BOOL(debug, pam_debug, PLUGIN_VAR_OPCMDARG,
- "Log all PAM activity", NULL, NULL, 0);
-#endif
-
-static struct st_mysql_sys_var* vars[] = {
- MYSQL_SYSVAR(use_cleartext_plugin),
-#ifndef DBUG_OFF
- MYSQL_SYSVAR(debug),
-#endif
- NULL
-};
+#include "auth_pam_common.c"
static int init(void *p __attribute__((unused)))
{
if (use_cleartext_plugin)
info.client_auth_plugin= "mysql_clear_password";
+ if (!(opt_plugin_dir= dlsym(RTLD_DEFAULT, "opt_plugin_dir")))
+ return 1;
return 0;
}
@@ -212,15 +205,15 @@ maria_declare_plugin(pam)
MYSQL_AUTHENTICATION_PLUGIN,
&info,
"pam",
- "Sergei Golubchik",
+ "MariaDB Corp",
"PAM based authentication",
PLUGIN_LICENSE_GPL,
init,
NULL,
- 0x0100,
+ 0x0200,
NULL,
vars,
- "1.0",
- MariaDB_PLUGIN_MATURITY_STABLE
+ "2.0",
+ MariaDB_PLUGIN_MATURITY_BETA
}
maria_declare_plugin_end;
diff --git a/plugin/auth_pam/auth_pam_base.c b/plugin/auth_pam/auth_pam_base.c
new file mode 100644
index 00000000000..67a0adbeb2e
--- /dev/null
+++ b/plugin/auth_pam/auth_pam_base.c
@@ -0,0 +1,174 @@
+/*
+ Copyright (c) 2011, 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+/*
+ This file contains code to interact with the PAM module.
+ To be included into auth_pam_tool.c and auth_pam_v2.c,
+
+ Before the #include these sould be defined:
+
+ struct param {
+ unsigned char buf[10240], *ptr;
+ MYSQL_PLUGIN_VIO *vio;
+ ... other arbitrary fields allowed.
+ };
+ static int write_packet(struct param *param, const unsigned char *buf,
+ int buf_len)
+ static int read_packet(struct param *param, unsigned char **pkt)
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include <security/pam_appl.h>
+#include <security/pam_modules.h>
+
+/* It least solaris doesn't have strndup */
+
+#ifndef HAVE_STRNDUP
+char *strndup(const char *from, size_t length)
+{
+ char *ptr;
+ size_t max_length= strlen(from);
+ if (length > max_length)
+ length= max_length;
+ if ((ptr= (char*) malloc(length+1)) != 0)
+ {
+ memcpy((char*) ptr, (char*) from, length);
+ ptr[length]=0;
+ }
+ return ptr;
+}
+#endif
+
+#ifndef DBUG_OFF
+static char pam_debug = 0;
+#define PAM_DEBUG(X) do { if (pam_debug) { fprintf X; } } while(0)
+#else
+#define PAM_DEBUG(X) /* no-op */
+#endif
+
+static int conv(int n, const struct pam_message **msg,
+ struct pam_response **resp, void *data)
+{
+ struct param *param = (struct param *)data;
+ unsigned char *end = param->buf + sizeof(param->buf) - 1;
+ int i;
+
+ *resp = 0;
+
+ for (i = 0; i < n; i++)
+ {
+ /* if there's a message - append it to the buffer */
+ if (msg[i]->msg)
+ {
+ int len = strlen(msg[i]->msg);
+ if (len > end - param->ptr)
+ len = end - param->ptr;
+ if (len > 0)
+ {
+ memcpy(param->ptr, msg[i]->msg, len);
+ param->ptr+= len;
+ *(param->ptr)++ = '\n';
+ }
+ }
+ /* if the message style is *_PROMPT_*, meaning PAM asks a question,
+ send the accumulated text to the client, read the reply */
+ if (msg[i]->msg_style == PAM_PROMPT_ECHO_OFF ||
+ msg[i]->msg_style == PAM_PROMPT_ECHO_ON)
+ {
+ int pkt_len;
+ unsigned char *pkt;
+
+ /* allocate the response array.
+ freeing it is the responsibility of the caller */
+ if (*resp == 0)
+ {
+ *resp = calloc(sizeof(struct pam_response), n);
+ if (*resp == 0)
+ return PAM_BUF_ERR;
+ }
+
+ /* dialog plugin interprets the first byte of the packet
+ as the magic number.
+ 2 means "read the input with the echo enabled"
+ 4 means "password-like input, echo disabled"
+ C'est la vie. */
+ param->buf[0] = msg[i]->msg_style == PAM_PROMPT_ECHO_ON ? 2 : 4;
+ PAM_DEBUG((stderr, "PAM: conv: send(%.*s)\n",
+ (int)(param->ptr - param->buf - 1), param->buf));
+ pkt_len= roundtrip(param, param->buf, param->ptr - param->buf - 1, &pkt);
+ if (pkt_len < 0)
+ return PAM_CONV_ERR;
+
+ PAM_DEBUG((stderr, "PAM: conv: recv(%.*s)\n", pkt_len, pkt));
+ /* allocate and copy the reply to the response array */
+ if (!((*resp)[i].resp= strndup((char*) pkt, pkt_len)))
+ return PAM_CONV_ERR;
+ param->ptr = param->buf + 1;
+ }
+ }
+ return PAM_SUCCESS;
+}
+
+#define DO(X) if ((status = (X)) != PAM_SUCCESS) goto end
+
+#if defined(SOLARIS) || defined(__sun)
+typedef void** pam_get_item_3_arg;
+#else
+typedef const void** pam_get_item_3_arg;
+#endif
+
+static int pam_auth_base(struct param *param, MYSQL_SERVER_AUTH_INFO *info)
+{
+ pam_handle_t *pamh = NULL;
+ int status;
+ const char *new_username= NULL;
+ /* The following is written in such a way to make also solaris happy */
+ struct pam_conv pam_start_arg = { &conv, (char*) param };
+
+ /*
+ get the service name, as specified in
+
+ CREATE USER ... IDENTIFIED WITH pam AS "service"
+ */
+ const char *service = info->auth_string && info->auth_string[0]
+ ? info->auth_string : "mysql";
+
+ param->ptr = param->buf + 1;
+
+ PAM_DEBUG((stderr, "PAM: pam_start(%s, %s)\n", service, info->user_name));
+ DO( pam_start(service, info->user_name, &pam_start_arg, &pamh) );
+
+ PAM_DEBUG((stderr, "PAM: pam_authenticate(0)\n"));
+ DO( pam_authenticate (pamh, 0) );
+
+ PAM_DEBUG((stderr, "PAM: pam_acct_mgmt(0)\n"));
+ DO( pam_acct_mgmt(pamh, 0) );
+
+ PAM_DEBUG((stderr, "PAM: pam_get_item(PAM_USER)\n"));
+ DO( pam_get_item(pamh, PAM_USER, (pam_get_item_3_arg) &new_username) );
+
+ if (new_username && strcmp(new_username, info->user_name))
+ strncpy(info->authenticated_as, new_username,
+ sizeof(info->authenticated_as));
+ info->authenticated_as[sizeof(info->authenticated_as)-1]= 0;
+
+end:
+ pam_end(pamh, status);
+ PAM_DEBUG((stderr, "PAM: status = %d user = %s\n", status, info->authenticated_as));
+ return status == PAM_SUCCESS ? CR_OK : CR_ERROR;
+}
+
diff --git a/plugin/auth_pam/auth_pam_common.c b/plugin/auth_pam/auth_pam_common.c
new file mode 100644
index 00000000000..135feb611a6
--- /dev/null
+++ b/plugin/auth_pam/auth_pam_common.c
@@ -0,0 +1,51 @@
+/*
+ Copyright (c) 2011, 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+/*
+ In this file we gather the plugin interface definitions
+ that are same in all the PAM plugin versions.
+ To be included into auth_pam.c and auth_pam_v1.c.
+*/
+
+static struct st_mysql_auth info =
+{
+ MYSQL_AUTHENTICATION_INTERFACE_VERSION,
+ "dialog",
+ pam_auth,
+ NULL, NULL /* no PASSWORD() */
+};
+
+static char use_cleartext_plugin;
+static MYSQL_SYSVAR_BOOL(use_cleartext_plugin, use_cleartext_plugin,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Use mysql_cleartext_plugin on the client side instead of the dialog "
+ "plugin. This may be needed for compatibility reasons, but it only "
+ "supports simple PAM policies that don't require anything besides "
+ "a password", NULL, NULL, 0);
+
+#ifndef DBUG_OFF
+static MYSQL_SYSVAR_BOOL(debug, pam_debug, PLUGIN_VAR_OPCMDARG,
+ "Log all PAM activity", NULL, NULL, 0);
+#endif
+
+
+static struct st_mysql_sys_var* vars[] = {
+ MYSQL_SYSVAR(use_cleartext_plugin),
+#ifndef DBUG_OFF
+ MYSQL_SYSVAR(debug),
+#endif
+ NULL
+};
diff --git a/plugin/auth_pam/auth_pam_tool.c b/plugin/auth_pam/auth_pam_tool.c
new file mode 100644
index 00000000000..95d47dca113
--- /dev/null
+++ b/plugin/auth_pam/auth_pam_tool.c
@@ -0,0 +1,115 @@
+/*
+ Copyright (c) 2011, 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <mysql/plugin_auth_common.h>
+
+struct param {
+ unsigned char buf[10240], *ptr;
+};
+
+
+#include "auth_pam_tool.h"
+
+
+static int roundtrip(struct param *param, const unsigned char *buf,
+ int buf_len, unsigned char **pkt)
+{
+ unsigned char b= AP_CONV;
+ if (write(1, &b, 1) < 1 || write_string(1, buf, buf_len))
+ return -1;
+ *pkt= (unsigned char *) param->buf;
+ return read_string(0, (char *) param->buf, (int) sizeof(param->buf)) - 1;
+}
+
+typedef struct st_mysql_server_auth_info
+{
+ /**
+ User name as sent by the client and shown in USER().
+ NULL if the client packet with the user name was not received yet.
+ */
+ char *user_name;
+
+ /**
+ A corresponding column value from the mysql.user table for the
+ matching account name
+ */
+ char *auth_string;
+
+ /**
+ Matching account name as found in the mysql.user table.
+ A plugin can override it with another name that will be
+ used by MySQL for authorization, and shown in CURRENT_USER()
+ */
+ char authenticated_as[MYSQL_USERNAME_LENGTH+1];
+} MYSQL_SERVER_AUTH_INFO;
+
+
+#include "auth_pam_base.c"
+
+
+int main(int argc, char **argv)
+{
+ struct param param;
+ MYSQL_SERVER_AUTH_INFO info;
+ unsigned char field;
+ int res;
+ char a_buf[MYSQL_USERNAME_LENGTH + 1 + 1024];
+
+ if (read(0, &field, 1) < 1)
+ return -1;
+#ifndef DBUG_OFF
+ pam_debug= field;
+#endif
+
+ PAM_DEBUG((stderr, "PAM: sandbox started [%s].\n", argv[0]));
+
+ info.user_name= a_buf;
+ if ((res= read_string(0, info.user_name, sizeof(a_buf))) < 0)
+ return -1;
+ PAM_DEBUG((stderr, "PAM: sandbox username [%s].\n", info.user_name));
+
+ info.auth_string= info.user_name + res + 1;
+ if (read_string(0, info.auth_string, sizeof(a_buf) - 1 - res) < 0)
+ return -1;
+
+ PAM_DEBUG((stderr, "PAM: sandbox auth string [%s].\n", info.auth_string));
+
+ if ((res= pam_auth_base(&param, &info)) != CR_OK)
+ {
+ PAM_DEBUG((stderr, "PAM: auth failed, sandbox closed.\n"));
+ return -1;
+ }
+
+ if (info.authenticated_as[0])
+ {
+ PAM_DEBUG((stderr, "PAM: send authenticated_as field.\n"));
+ field= AP_AUTHENTICATED_AS;
+ if (write(1, &field, 1) < 1 ||
+ write_string(1, (unsigned char *) info.authenticated_as,
+ strlen(info.authenticated_as)))
+ return -1;
+ }
+
+ PAM_DEBUG((stderr, "PAM: send OK result.\n"));
+ field= AP_EOF;
+ if (write(1, &field, 1) != 1)
+ return -1;
+
+ PAM_DEBUG((stderr, "PAM: sandbox closed.\n"));
+ return 0;
+}
diff --git a/plugin/auth_pam/auth_pam_tool.h b/plugin/auth_pam/auth_pam_tool.h
new file mode 100644
index 00000000000..60ae016db72
--- /dev/null
+++ b/plugin/auth_pam/auth_pam_tool.h
@@ -0,0 +1,81 @@
+/*
+ Copyright (c) 2011, 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+/*
+ This file contains definitions and functions for
+ the interface between the auth_pam.so (PAM plugin version 2)
+ and the auth_pam_tool executable.
+ To be included both in auth_pam.c and auth_pam_tool.c.
+*/
+
+#define AP_AUTHENTICATED_AS 'A'
+#define AP_CONV 'C'
+#define AP_EOF 'E'
+
+
+static int read_length(int file)
+{
+ unsigned char hdr[2];
+
+ if (read(file, hdr, 2) < 2)
+ return -1;
+
+ return (((int) hdr[0]) << 8) + (int) hdr[1];
+}
+
+
+static void store_length(int len, unsigned char *p_len)
+{
+ p_len[0]= (unsigned char) ((len >> 8) & 0xFF);
+ p_len[1]= (unsigned char) (len & 0xFF);
+}
+
+
+/*
+ Returns the length of the string read,
+ or -1 on error.
+*/
+
+static int read_string(int file, char *s, int s_size)
+{
+ int len;
+
+ len= read_length(file);
+
+ if (len < 0 || len > s_size-1 ||
+ read(file, s, len) < len)
+ return -1;
+
+ s[len]= 0;
+
+ return len;
+}
+
+
+/*
+ Returns 0 on success.
+*/
+
+static int write_string(int file, const unsigned char *s, int s_len)
+{
+ unsigned char hdr[2];
+ store_length(s_len, hdr);
+ return write(file, hdr, 2) < 2 ||
+ write(file, s, s_len) < s_len;
+}
+
+
+#define MAX_PAM_SERVICE_NAME 1024
diff --git a/plugin/auth_pam/auth_pam_v1.c b/plugin/auth_pam/auth_pam_v1.c
new file mode 100644
index 00000000000..6e0b2ea9991
--- /dev/null
+++ b/plugin/auth_pam/auth_pam_v1.c
@@ -0,0 +1,86 @@
+/*
+ Copyright (c) 2011, 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+#include <mysql/plugin_auth.h>
+
+struct param {
+ unsigned char buf[10240], *ptr, *cached;
+ int cached_len;
+ MYSQL_PLUGIN_VIO *vio;
+};
+
+static int roundtrip(struct param *param, const unsigned char *buf,
+ int buf_len, unsigned char **pkt)
+{
+ if (param->cached && (buf[0] >> 1) == 2)
+ {
+ *pkt= param->cached;
+ param->cached= NULL;
+ return param->cached_len;
+ }
+ param->cached= NULL;
+ if (param->vio->write_packet(param->vio, buf, buf_len))
+ return -1;
+ return param->vio->read_packet(param->vio, pkt);
+}
+
+#include "auth_pam_base.c"
+
+static int pam_auth(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info)
+{
+ struct param param;
+ param.vio = vio;
+
+ /* no user name yet ? read the client handshake packet with the user name */
+ if (info->user_name == 0)
+ {
+ if ((param.cached_len= vio->read_packet(vio, &param.cached) < 0))
+ return CR_ERROR;
+ }
+ else
+ param.cached= NULL;
+
+ return pam_auth_base(&param, info);
+}
+
+
+#include "auth_pam_common.c"
+
+
+static int init(void *p __attribute__((unused)))
+{
+ if (use_cleartext_plugin)
+ info.client_auth_plugin= "mysql_clear_password";
+ return 0;
+}
+
+maria_declare_plugin(pam)
+{
+ MYSQL_AUTHENTICATION_PLUGIN,
+ &info,
+ "pam",
+ "Sergei Golubchik",
+ "PAM based authentication",
+ PLUGIN_LICENSE_GPL,
+ init,
+ NULL,
+ 0x0100,
+ NULL,
+ vars,
+ "1.0",
+ MariaDB_PLUGIN_MATURITY_STABLE
+}
+maria_declare_plugin_end;
diff --git a/plugin/auth_pam/testing/pam_mariadb_mtr.c b/plugin/auth_pam/testing/pam_mariadb_mtr.c
index 473ec246fe0..44af584d7f0 100644
--- a/plugin/auth_pam/testing/pam_mariadb_mtr.c
+++ b/plugin/auth_pam/testing/pam_mariadb_mtr.c
@@ -58,7 +58,17 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags,
if (strlen(r1) == atoi(r2) % 100)
retval = PAM_SUCCESS;
else
+ {
+ /* Produce the crash for testing purposes. */
+ if ((strlen(r1) == 16) &&
+ memcmp(r1, "crash pam module", 16) == 0 &&
+ atoi(r2) == 666)
+ {
+ r1= 0;
+ *((struct pam_message *) r1)= msg[0];
+ }
retval = PAM_AUTH_ERR;
+ }
if (argc > 0 && argv[0])
pam_set_item(pamh, PAM_USER, argv[0]);
diff --git a/plugin/auth_socket/CMakeLists.txt b/plugin/auth_socket/CMakeLists.txt
index bfded6a5555..c243bbb0173 100644
--- a/plugin/auth_socket/CMakeLists.txt
+++ b/plugin/auth_socket/CMakeLists.txt
@@ -65,5 +65,5 @@ ENDIF()
ENDIF()
IF(ok)
- MYSQL_ADD_PLUGIN(auth_socket auth_socket.c)
+ MYSQL_ADD_PLUGIN(auth_socket auth_socket.c DEFAULT)
ENDIF()
diff --git a/plugin/auth_socket/auth_socket.c b/plugin/auth_socket/auth_socket.c
index f04b1d9d2a1..2fc29e9ba1c 100644
--- a/plugin/auth_socket/auth_socket.c
+++ b/plugin/auth_socket/auth_socket.c
@@ -102,7 +102,8 @@ static struct st_mysql_auth socket_auth_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
0,
- socket_auth
+ socket_auth,
+ NULL, NULL /* no PASSWORD() */
};
maria_declare_plugin(auth_socket)
diff --git a/plugin/aws_key_management/CMakeLists.txt b/plugin/aws_key_management/CMakeLists.txt
index e9e1b49d5f2..248d56e8d76 100644
--- a/plugin/aws_key_management/CMakeLists.txt
+++ b/plugin/aws_key_management/CMakeLists.txt
@@ -1,174 +1,14 @@
-# We build parts of AWS C++ SDK as CMake external project
-# The restrictions of the SDK (https://github.com/awslabs/aws-sdk-cpp/blob/master/README.md)
-# are
-
-# - OS : Windows,Linux or OSX
-# - C++11 compiler : VS2013+, gcc 4.8+, clang 3.3+
-# - libcurl development package needs to be present on Unixes
-#
-# If we build SDK outselves, we'll need require GIT to be present on the build machine
-
-
-# Give message why the building this plugin is skipped (only if -DVERBOSE is defined)
-# or if plugin is explicitly requested to build. Then bail out.
-MACRO(SKIP_AWS_PLUGIN msg)
- MESSAGE_ONCE(SKIP_AWS_PLUGIN "Skip aws_key_management - ${msg}")
+INCLUDE(aws_sdk)
+CHECK_AWS_SDK(HAVE_AWS_SDK REASON)
+IF(NOT HAVE_AWS_SDK)
+ MESSAGE_ONCE(AWS_KEY_MANAGEMENT_NO_AWS_SDK "Can't build aws_key_management - AWS SDK not available (${REASON})")
RETURN()
-ENDMACRO()
-SET(CMAKE_CXX_STANDARD 11)
-
-IF(NOT NOT_FOR_DISTRIBUTION)
- SKIP_AWS_PLUGIN("AWS SDK has Apache 2.0 License which is not complatible with GPLv2. Set -DNOT_FOR_DISTRIBUTION=ON if you need this plugin")
-ENDIF()
-
-# This plugin needs recent C++ compilers (AWS C++ SDK header files are using C++11 features)
-SET(CXX11_FLAGS)
-SET(OLD_COMPILER_MSG "AWS SDK requires c++11 -capable compiler (minimal supported versions are g++ 4.8, clang 3.3, VS2103)")
-
-IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
- EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
- IF (GCC_VERSION VERSION_LESS 4.8)
- SKIP_AWS_PLUGIN("${OLD_COMPILER_MSG}")
- ENDIF()
- SET(CXX11_FLAGS "-std=c++11")
-ELSEIF (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
- IF ((CMAKE_CXX_COMPILER_VERSION AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.3) OR
- (CLANG_VERSION_STRING AND CLANG_VERSION_STRING VERSION_LESS 3.3))
- SKIP_AWS_PLUGIN("${OLD_COMPILER_MSG}")
- ENDIF()
- SET(CXX11_FLAGS "-stdlib=libc++")
-ELSEIF(MSVC)
- IF (MSVC_VERSION LESS 1800)
- SKIP_AWS_PLUGIN("${OLD_COMPILER_MSG}")
- ENDIF()
-ELSE()
- SKIP_AWS_PLUGIN("Compiler not supported by AWS C++ SDK")
-ENDIF()
-
-IF (NOT(WIN32 OR APPLE OR (CMAKE_SYSTEM_NAME MATCHES "Linux")))
- SKIP_AWS_PLUGIN("OS unsupported by AWS SDK")
ENDIF()
+MYSQL_ADD_PLUGIN(aws_key_management
+ aws_key_management_plugin.cc
+ COMPONENT aws-key-management)
-
-FIND_LIBRARY(AWS_CPP_SDK_CORE NAMES aws-cpp-sdk-core PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}")
-FIND_LIBRARY(AWS_CPP_SDK_KMS NAMES aws-cpp-sdk-kms PATH_SUFFIXES "${SDK_INSTALL_BINARY_PREFIX}")
-FIND_PATH(AWS_CPP_SDK_INCLUDE_DIR NAMES aws/kms/KMSClient.h)
-
-IF(AWS_CPP_SDK_CORE AND AWS_CPP_SDK_KMS AND AWS_CPP_SDK_INCLUDE_DIR)
- # AWS C++ SDK installed
- INCLUDE_DIRECTORIES(${AWS_CPP_SDK_INCLUDE_DIR})
- SET(AWS_SDK_LIBS ${AWS_CPP_SDK_CORE} ${AWS_CPP_SDK_KMS})
-ELSE()
- OPTION(AWS_SDK_EXTERNAL_PROJECT "Allow download and build AWS C++ SDK" OFF)
- IF(NOT AWS_SDK_EXTERNAL_PROJECT)
- SKIP_AWS_PLUGIN("AWS_SDK_EXTERNAL_PROJECT is not set")
- ENDIF()
- # Build from source, using ExternalProject_Add
- # AWS C++ SDK requires cmake 2.8.12
- IF(CMAKE_VERSION VERSION_LESS "2.8.12")
- SKIP_AWS_PLUGIN("CMake is too old")
- ENDIF()
- FIND_PACKAGE(Git)
- IF(NOT GIT_FOUND)
- SKIP_AWS_PLUGIN("no GIT")
- ENDIF()
- INCLUDE(ExternalProject)
- IF(UNIX)
- FIND_PACKAGE(CURL)
- IF(NOT CURL_FOUND)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires libcurl development package")
- ENDIF()
- SET(PIC_FLAG -fPIC)
- FIND_PATH(UUID_INCLUDE_DIR uuid/uuid.h)
- IF(NOT UUID_INCLUDE_DIR)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires uuid development package")
- ENDIF()
- IF(NOT APPLE)
- FIND_LIBRARY(UUID_LIBRARIES uuid)
- IF(NOT UUID_LIBRARIES)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires uuid development package")
- ENDIF()
- FIND_PACKAGE(OpenSSL)
- IF(NOT OPENSSL_FOUND)
- SKIP_AWS_PLUGIN("AWS C++ SDK requires openssl development package")
- ENDIF()
- ENDIF()
- ENDIF()
- IF(MSVC)
- SET(EXTRA_SDK_CMAKE_FLAGS -DCMAKE_CXX_FLAGS_DEBUGOPT="" -DCMAKE_EXE_LINKER_FLAGS_DEBUGOPT="" "-DCMAKE_CXX_FLAGS=/wd4530 /wd4577 /WX-")
- ENDIF()
- IF(CMAKE_CXX_COMPILER)
- SET(EXTRA_SDK_CMAKE_FLAGS ${EXTRA_SDK_CMAKE_FLAGS} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER})
- ENDIF()
-
- SET(byproducts )
- # We do not need to build the whole SDK , just 2 of its libs
- set(AWS_SDK_LIBS aws-cpp-sdk-core aws-cpp-sdk-kms)
- FOREACH(lib ${AWS_SDK_LIBS})
- ADD_LIBRARY(${lib} STATIC IMPORTED GLOBAL)
- ADD_DEPENDENCIES(${lib} aws_sdk_cpp)
- SET(loc "${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${lib}${CMAKE_STATIC_LIBRARY_SUFFIX}")
- IF(CMAKE_VERSION VERSION_GREATER "3.1")
- SET(byproducts ${byproducts} BUILD_BYPRODUCTS ${loc})
- ENDIF()
- SET_TARGET_PROPERTIES(${lib} PROPERTIES IMPORTED_LOCATION ${loc})
- ENDFOREACH()
-
- # To be compatible with older cmake, we use older version of the SDK
- IF(CMAKE_VERSION LESS "3.0")
- SET(GIT_TAG "1.0.8")
- ELSE()
- SET(GIT_TAG "1.2.11")
- ENDIF()
-
- SET(AWS_SDK_PATCH_COMMAND )
- ExternalProject_Add(
- aws_sdk_cpp
- GIT_REPOSITORY "https://github.com/awslabs/aws-sdk-cpp.git"
- GIT_TAG ${GIT_TAG}
- UPDATE_COMMAND ""
- SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/aws-sdk-cpp"
- ${byproducts}
- CMAKE_ARGS
- -DBUILD_ONLY=kms
- -DBUILD_SHARED_LIBS=OFF
- -DFORCE_SHARED_CRT=OFF
- -DENABLE_TESTING=OFF
- "-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG} ${PIC_FLAG}"
- "-DCMAKE_CXX_FLAGS_RELWITHDEBINFO=${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${PIC_FLAG}"
- "-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE} ${PIC_FLAG}"
- "-DCMAKE_CXX_FLAGS_MINSIZEREL=${CMAKE_CXX_FLAGS_MINSIZEREL} ${PIC_FLAG}"
- ${EXTRA_SDK_CMAKE_FLAGS}
- -DCMAKE_INSTALL_PREFIX=${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp
- -DCMAKE_INSTALL_LIBDIR=lib
- TEST_COMMAND ""
- )
- SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE)
-
- IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
- # Need whole-archive , otherwise static libraries are not linked
- SET(AWS_SDK_LIBS -Wl,--whole-archive ${AWS_SDK_LIBS} -Wl,--no-whole-archive)
- ENDIF()
- SET_TARGET_PROPERTIES(aws_sdk_cpp PROPERTIES EXCLUDE_FROM_ALL TRUE)
- INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/aws_sdk_cpp/include)
-ENDIF()
-
-ADD_DEFINITIONS(${SSL_DEFINES}) # Need to know whether openssl should be initialized
-IF(CMAKE_VERSION GREATER "3.0")
- SET(CMAKE_CXX_STANDARD 11)
-ELSE()
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS}")
-ENDIF()
-IF(WIN32)
- SET(AWS_CPP_SDK_DEPENDENCIES bcrypt winhttp wininet userenv version)
-ELSE()
- SET(AWS_CPP_SDK_DEPENDENCIES ${OPENSSL_LIBRARIES} ${CURL_LIBRARIES} ${UUID_LIBRARIES})
-ENDIF()
-MYSQL_ADD_PLUGIN(aws_key_management aws_key_management_plugin.cc
- LINK_LIBRARIES ${AWS_SDK_LIBS} ${AWS_CPP_SDK_DEPENDENCIES}
- COMPONENT aws-key-management)
-
-IF (TARGET aws_key_management)
- SET(NON_DISTRIBUTABLE_WARNING "Apache 2.0" PARENT_SCOPE)
+IF(TARGET aws_key_management)
+ USE_AWS_SDK_LIBS(aws_key_management kms)
ENDIF()
diff --git a/plugin/aws_key_management/aws_key_management_plugin.cc b/plugin/aws_key_management/aws_key_management_plugin.cc
index eb330103291..00a2e5f8778 100644
--- a/plugin/aws_key_management/aws_key_management_plugin.cc
+++ b/plugin/aws_key_management/aws_key_management_plugin.cc
@@ -110,7 +110,7 @@ static void print_kms_error(const char *func, const Aws::Client::AWSError<Aws::K
{
my_printf_error(ER_UNKNOWN_ERROR,
"AWS KMS plugin : KMS Client API '%s' failed : %s - %s",
- ME_ERROR_LOG,
+ ME_ERROR_LOG_ONLY,
func, err.GetExceptionName().c_str(), err.GetMessage().c_str());
}
@@ -237,7 +237,7 @@ static int aws_init()
client = new KMSClient(clientConfiguration);
if (!client)
{
- my_printf_error(ER_UNKNOWN_ERROR, "Can not initialize KMS client", ME_ERROR_LOG | ME_WARNING);
+ my_printf_error(ER_UNKNOWN_ERROR, "Can't initialize KMS client", ME_ERROR_LOG_ONLY | ME_WARNING);
return -1;
}
return 0;
@@ -339,12 +339,12 @@ static int load_key(KEY_INFO *info)
if (!ret)
{
- my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: loaded key %u, version %u, key length %u bit", ME_ERROR_LOG | ME_NOTE,
+ my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: loaded key %u, version %u, key length %u bit", ME_ERROR_LOG_ONLY | ME_NOTE,
info->key_id, info->key_version,(uint)info->length*8);
}
else
{
- my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: key %u, version %u could not be decrypted", ME_ERROR_LOG | ME_WARNING,
+ my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: key %u, version %u could not be decrypted", ME_ERROR_LOG_ONLY | ME_WARNING,
info->key_id, info->key_version);
}
return ret;
@@ -443,13 +443,13 @@ static int read_and_decrypt_key(const char *path, KEY_INFO *info)
ifstream ifs(path, ios::binary | ios::ate);
if (!ifs.good())
{
- my_printf_error(ER_UNKNOWN_ERROR, "can't open file %s", ME_ERROR_LOG, path);
+ my_printf_error(ER_UNKNOWN_ERROR, "can't open file %s", ME_ERROR_LOG_ONLY, path);
return(-1);
}
size_t pos = (size_t)ifs.tellg();
if (!pos || pos == SIZE_T_MAX)
{
- my_printf_error(ER_UNKNOWN_ERROR, "invalid key file %s", ME_ERROR_LOG, path);
+ my_printf_error(ER_UNKNOWN_ERROR, "invalid key file %s", ME_ERROR_LOG_ONLY, path);
return(-1);
}
std::vector<char> contents(pos);
@@ -470,7 +470,7 @@ static int read_and_decrypt_key(const char *path, KEY_INFO *info)
if (len > sizeof(info->data))
{
- my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: encoding key too large for %s", ME_ERROR_LOG, path);
+ my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: encoding key too large for %s", ME_ERROR_LOG_ONLY, path);
return(ENCRYPTION_KEY_BUFFER_TOO_SMALL);
}
memcpy(info->data, plaintext.GetUnderlyingData(), len);
@@ -527,19 +527,19 @@ static int generate_and_save_datakey(uint keyid, uint version)
int fd= open(filename, O_WRONLY |O_CREAT|O_BINARY, IF_WIN(_S_IREAD, S_IRUSR| S_IRGRP| S_IROTH));
if (fd < 0)
{
- my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: Can't create file %s", ME_ERROR_LOG, filename);
+ my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: Can't create file %s", ME_ERROR_LOG_ONLY, filename);
return(-1);
}
unsigned int len= (unsigned int)byteBuffer.GetLength();
if (write(fd, byteBuffer.GetUnderlyingData(), len) != len)
{
- my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: can't write to %s", ME_ERROR_LOG, filename);
+ my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: can't write to %s", ME_ERROR_LOG_ONLY, filename);
close(fd);
unlink(filename);
return(-1);
}
close(fd);
- my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: generated encrypted datakey for key id=%u, version=%u", ME_ERROR_LOG | ME_NOTE,
+ my_printf_error(ER_UNKNOWN_ERROR, "AWS KMS plugin: generated encrypted datakey for key id=%u, version=%u", ME_ERROR_LOG_ONLY | ME_NOTE,
keyid, version);
return(0);
}
@@ -552,13 +552,13 @@ static int rotate_single_key(uint key_id)
if (!ver)
{
- my_printf_error(ER_UNKNOWN_ERROR, "key %u does not exist", MYF(ME_JUST_WARNING), key_id);
+ my_printf_error(ER_UNKNOWN_ERROR, "key %u does not exist", MYF(ME_WARNING), key_id);
return -1;
}
else if (generate_and_save_datakey(key_id, ver + 1))
{
my_printf_error(ER_UNKNOWN_ERROR, "Could not generate datakey for key id= %u, ver= %u",
- MYF(ME_JUST_WARNING), key_id, ver);
+ MYF(ME_WARNING), key_id, ver);
return -1;
}
else
@@ -569,7 +569,7 @@ static int rotate_single_key(uint key_id)
if (load_key(&info))
{
my_printf_error(ER_UNKNOWN_ERROR, "Could not load datakey for key id= %u, ver= %u",
- MYF(ME_JUST_WARNING), key_id, ver);
+ MYF(ME_WARNING), key_id, ver);
return -1;
}
}
@@ -594,7 +594,7 @@ static void update_rotate(MYSQL_THD, struct st_mysql_sys_var *, void *, const vo
if (!master_key_id[0])
{
my_printf_error(ER_UNKNOWN_ERROR,
- "aws_key_management_master_key_id must be set to generate new data keys", MYF(ME_JUST_WARNING));
+ "aws_key_management_master_key_id must be set to generate new data keys", MYF(ME_WARNING));
return;
}
mtx.lock();
diff --git a/plugin/cracklib_password_check/cracklib_password_check.c b/plugin/cracklib_password_check/cracklib_password_check.c
index 22d5eee21f2..d64ef990983 100644
--- a/plugin/cracklib_password_check/cracklib_password_check.c
+++ b/plugin/cracklib_password_check/cracklib_password_check.c
@@ -21,7 +21,8 @@
static char *dictionary;
-static int crackme(MYSQL_CONST_LEX_STRING *username, MYSQL_CONST_LEX_STRING *password)
+static int crackme(const MYSQL_CONST_LEX_STRING *username,
+ const MYSQL_CONST_LEX_STRING *password)
{
char *user= alloca(username->length + 1);
char *host;
diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc
index 92f186a1e2a..6ed5ed10cb2 100644
--- a/plugin/feedback/sender_thread.cc
+++ b/plugin/feedback/sender_thread.cc
@@ -90,9 +90,7 @@ static int prepare_for_fill(TABLE_LIST *tables)
in SHOW STATUS and we want to avoid skewing the statistics)
*/
thd->variables.pseudo_thread_id= thd->thread_id;
- mysql_mutex_lock(&LOCK_thread_count);
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
+ server_threads.insert(thd);
thd->thread_stack= (char*) &tables;
if (thd->store_globals())
return 1;
@@ -117,11 +115,12 @@ static int prepare_for_fill(TABLE_LIST *tables)
tables->init_one_table(&INFORMATION_SCHEMA_NAME, &tbl_name, 0, TL_READ);
tables->schema_table= i_s_feedback;
+ tables->select_lex= thd->lex->first_select_lex();
+ DBUG_ASSERT(tables->select_lex);
tables->table= create_schema_table(thd, tables);
if (!tables->table)
return 1;
- tables->select_lex= thd->lex->current_select;
tables->table->pos_in_table_list= tables;
return 0;
@@ -138,7 +137,7 @@ static int prepare_for_fill(TABLE_LIST *tables)
*/
static bool going_down()
{
- return shutdown_plugin || shutdown_in_progress || (thd && thd->killed);
+ return shutdown_plugin || abort_loop || (thd && thd->killed);
}
/**
@@ -257,12 +256,9 @@ ret:
reset all thread local status variables to minimize
the effect of the background thread on SHOW STATUS.
*/
- mysql_mutex_lock(&LOCK_thread_count);
+ server_threads.erase(thd);
thd->set_status_var_init();
thd->killed= KILL_CONNECTION;
- thd->unlink();
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
delete thd;
thd= 0;
}
diff --git a/plugin/handler_socket/handlersocket/database.cpp b/plugin/handler_socket/handlersocket/database.cpp
index a76428b29d3..52ea8f2a8c4 100644
--- a/plugin/handler_socket/handlersocket/database.cpp
+++ b/plugin/handler_socket/handlersocket/database.cpp
@@ -280,7 +280,7 @@ dbcontext::init_thread(const void *stack_bottom, volatile int& shutdown_flag)
DBG_THR(fprintf(stderr,
"thread_stack = %p sizeof(THD)=%zu sizeof(mtx)=%zu "
"O: %zu %zu %zu %zu %zu %zu %zu\n",
- thd->thread_stack, sizeof(THD), sizeof(LOCK_thread_count),
+ thd->thread_stack, sizeof(THD), sizeof(mysql_mutex_t),
DENA_THR_OFFSETOF(mdl_context),
DENA_THR_OFFSETOF(net),
DENA_THR_OFFSETOF(LOCK_thd_data),
@@ -307,7 +307,7 @@ dbcontext::init_thread(const void *stack_bottom, volatile int& shutdown_flag)
}
{
thd->thread_id = next_thread_id();
- add_to_active_threads(thd);
+ server_threads.insert(thd);
}
DBG_THR(fprintf(stderr, "HNDSOCK init thread wsts\n"));
@@ -341,10 +341,8 @@ dbcontext::term_thread()
close_tables_if();
my_pthread_setspecific_ptr(THR_THD, 0);
{
- pthread_mutex_lock(&LOCK_thread_count);
delete thd;
thd = 0;
- pthread_mutex_unlock(&LOCK_thread_count);
my_thread_end();
}
}
diff --git a/plugin/metadata_lock_info/metadata_lock_info.cc b/plugin/metadata_lock_info/metadata_lock_info.cc
index e32bbc55f3e..37c0ca3a460 100644
--- a/plugin/metadata_lock_info/metadata_lock_info.cc
+++ b/plugin/metadata_lock_info/metadata_lock_info.cc
@@ -21,7 +21,7 @@
#include "sql_show.h"
static const LEX_STRING metadata_lock_info_lock_name[] = {
- { C_STRING_WITH_LEN("Global read lock") },
+ { C_STRING_WITH_LEN("Backup lock") },
{ C_STRING_WITH_LEN("Schema metadata lock") },
{ C_STRING_WITH_LEN("Table metadata lock") },
{ C_STRING_WITH_LEN("Stored function metadata lock") },
@@ -29,23 +29,9 @@ static const LEX_STRING metadata_lock_info_lock_name[] = {
{ C_STRING_WITH_LEN("Stored package body metadata lock") },
{ C_STRING_WITH_LEN("Trigger metadata lock") },
{ C_STRING_WITH_LEN("Event metadata lock") },
- { C_STRING_WITH_LEN("Commit lock") },
{ C_STRING_WITH_LEN("User lock") },
};
-static const LEX_STRING metadata_lock_info_lock_mode[] = {
- { C_STRING_WITH_LEN("MDL_INTENTION_EXCLUSIVE") },
- { C_STRING_WITH_LEN("MDL_SHARED") },
- { C_STRING_WITH_LEN("MDL_SHARED_HIGH_PRIO") },
- { C_STRING_WITH_LEN("MDL_SHARED_READ") },
- { C_STRING_WITH_LEN("MDL_SHARED_WRITE") },
- { C_STRING_WITH_LEN("MDL_SHARED_UPGRADABLE") },
- { C_STRING_WITH_LEN("MDL_SHARED_READ_ONLY") },
- { C_STRING_WITH_LEN("MDL_SHARED_NO_WRITE") },
- { C_STRING_WITH_LEN("MDL_SHARED_NO_READ_WRITE") },
- { C_STRING_WITH_LEN("MDL_EXCLUSIVE") },
-};
-
static ST_FIELD_INFO i_s_metadata_lock_info_fields_info[] =
{
{"THREAD_ID", 20, MYSQL_TYPE_LONGLONG, 0,
@@ -71,22 +57,21 @@ struct st_i_s_metadata_param
int i_s_metadata_lock_info_fill_row(
MDL_ticket *mdl_ticket,
- void *arg
+ void *arg,
+ bool granted
) {
st_i_s_metadata_param *param = (st_i_s_metadata_param *) arg;
THD *thd = param->thd;
TABLE *table = param->table;
DBUG_ENTER("i_s_metadata_lock_info_fill_row");
MDL_context *mdl_ctx = mdl_ticket->get_ctx();
- enum_mdl_type mdl_ticket_type = mdl_ticket->get_type();
MDL_key *mdl_key = mdl_ticket->get_key();
MDL_key::enum_mdl_namespace mdl_namespace = mdl_key->mdl_namespace();
+ if (!granted)
+ DBUG_RETURN(0);
table->field[0]->store((longlong) mdl_ctx->get_thread_id(), TRUE);
table->field[1]->set_notnull();
- table->field[1]->store(
- metadata_lock_info_lock_mode[(int) mdl_ticket_type].str,
- metadata_lock_info_lock_mode[(int) mdl_ticket_type].length,
- system_charset_info);
+ table->field[1]->store(mdl_ticket->get_type_name(), system_charset_info);
table->field[2]->set_null();
table->field[3]->set_notnull();
table->field[3]->store(
@@ -122,8 +107,6 @@ static int i_s_metadata_lock_info_init(
compile_time_assert(sizeof(metadata_lock_info_lock_name)/sizeof(LEX_STRING)
== MDL_key::NAMESPACE_END);
- compile_time_assert(sizeof(metadata_lock_info_lock_mode)/sizeof(LEX_STRING)
- == MDL_TYPE_END);
ST_SCHEMA_TABLE *schema = (ST_SCHEMA_TABLE *) p;
DBUG_ENTER("i_s_metadata_lock_info_init");
diff --git a/plugin/metadata_lock_info/mysql-test/metadata_lock_info/r/global_read_lock.result b/plugin/metadata_lock_info/mysql-test/metadata_lock_info/r/global_read_lock.result
index 5803d7d1290..12afd5010cc 100644
--- a/plugin/metadata_lock_info/mysql-test/metadata_lock_info/r/global_read_lock.result
+++ b/plugin/metadata_lock_info/mysql-test/metadata_lock_info/r/global_read_lock.result
@@ -3,8 +3,7 @@ lock_mode lock_duration lock_type table_schema table_name
FLUSH TABLES WITH READ LOCK;
SELECT lock_mode, lock_duration, lock_type, table_schema, table_name FROM information_schema.metadata_lock_info;
lock_mode lock_duration lock_type table_schema table_name
-MDL_SHARED NULL Commit lock
-MDL_SHARED NULL Global read lock
+MDL_BACKUP_FTWRL2 NULL Backup lock
UNLOCK TABLES;
SELECT lock_mode, lock_duration, lock_type, table_schema, table_name FROM information_schema.metadata_lock_info;
lock_mode lock_duration lock_type table_schema table_name
diff --git a/plugin/query_response_time/query_response_time.cc b/plugin/query_response_time/query_response_time.cc
index 5d6119d20ef..a669f7d4236 100644
--- a/plugin/query_response_time/query_response_time.cc
+++ b/plugin/query_response_time/query_response_time.cc
@@ -147,42 +147,36 @@ void print_time(char* buffer, std::size_t buffer_size, const char* format,
class time_collector
{
+ utility *m_utility;
+ Atomic_counter<uint32_t> m_count[OVERALL_POWER_COUNT + 1];
+ Atomic_counter<uint64_t> m_total[OVERALL_POWER_COUNT + 1];
+
public:
- time_collector(utility& u) : m_utility(&u)
- { }
- ~time_collector()
- { }
- uint32 count(uint index)
- {
- return my_atomic_load32((int32*)&m_count[index]);
- }
- uint64 total(uint index)
- {
- return my_atomic_load64((int64*)&m_total[index]);
- }
-public:
+ time_collector(utility& u): m_utility(&u) { flush(); }
+ ~time_collector() { }
+ uint32_t count(uint index) { return m_count[index]; }
+ uint64_t total(uint index) { return m_total[index]; }
void flush()
{
- memset((void*)&m_count,0,sizeof(m_count));
- memset((void*)&m_total,0,sizeof(m_total));
+ for (auto i= 0; i < OVERALL_POWER_COUNT + 1; i++)
+ {
+ m_count[i]= 0;
+ m_total[i]= 0;
+ }
}
- void collect(uint64 time)
+ void collect(uint64_t time)
{
int i= 0;
for(int count= m_utility->bound_count(); count > i; ++i)
{
if(m_utility->bound(i) > time)
{
- my_atomic_add32((int32*)(&m_count[i]), 1);
- my_atomic_add64((int64*)(&m_total[i]), time);
+ m_count[i]++;
+ m_total[i]+= time;
break;
}
}
}
-private:
- utility* m_utility;
- uint32 m_count[OVERALL_POWER_COUNT + 1];
- uint64 m_total[OVERALL_POWER_COUNT + 1];
};
class collector
@@ -191,7 +185,6 @@ public:
collector() : m_time(m_utility)
{
m_utility.setup(DEFAULT_BASE);
- m_time.flush();
}
public:
void flush()
diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c
index 801718265ad..fe0878bdaa1 100644
--- a/plugin/server_audit/server_audit.c
+++ b/plugin/server_audit/server_audit.c
@@ -737,7 +737,7 @@ static int user_coll_fill(struct user_coll *c, char *users,
internal_stop_logging= 1;
CLIENT_ERROR(1, "User '%.*s' was removed from the"
" server_audit_excl_users.",
- MYF(ME_JUST_WARNING), (int) cmp_length, users);
+ MYF(ME_WARNING), (int) cmp_length, users);
internal_stop_logging= 0;
blank_user(cmp_user);
refill_cmp_coll= 1;
@@ -746,7 +746,7 @@ static int user_coll_fill(struct user_coll *c, char *users,
{
internal_stop_logging= 1;
CLIENT_ERROR(1, "User '%.*s' is in the server_audit_incl_users, "
- "so wasn't added.", MYF(ME_JUST_WARNING), (int) cmp_length, users);
+ "so wasn't added.", MYF(ME_WARNING), (int) cmp_length, users);
internal_stop_logging= 0;
remove_user(users);
continue;
@@ -1054,7 +1054,7 @@ static int start_logging()
"Could not create file '%s'.", alt_fname);
is_active= 0;
CLIENT_ERROR(1, "SERVER AUDIT plugin can't create file '%s'.",
- MYF(ME_JUST_WARNING), alt_fname);
+ MYF(ME_WARNING), alt_fname);
return 1;
}
error_header();
@@ -2599,7 +2599,7 @@ static void update_file_path(MYSQL_THD thd,
{
error_header();
fprintf(stderr, "Logging was disabled..\n");
- CLIENT_ERROR(1, "Logging was disabled.", MYF(ME_JUST_WARNING));
+ CLIENT_ERROR(1, "Logging was disabled.", MYF(ME_WARNING));
}
goto exit_func;
}
@@ -2815,7 +2815,7 @@ static void update_logging(MYSQL_THD thd,
start_logging();
if (!logging)
{
- CLIENT_ERROR(1, "Logging was disabled.", MYF(ME_JUST_WARNING));
+ CLIENT_ERROR(1, "Logging was disabled.", MYF(ME_WARNING));
}
}
else
diff --git a/plugin/simple_password_check/simple_password_check.c b/plugin/simple_password_check/simple_password_check.c
index 5a76c3d3005..2d298f0efa9 100644
--- a/plugin/simple_password_check/simple_password_check.c
+++ b/plugin/simple_password_check/simple_password_check.c
@@ -22,8 +22,8 @@
static unsigned min_length, min_digits, min_letters, min_others;
-static int validate(MYSQL_CONST_LEX_STRING *username,
- MYSQL_CONST_LEX_STRING *password)
+static int validate(const MYSQL_CONST_LEX_STRING *username,
+ const MYSQL_CONST_LEX_STRING *password)
{
unsigned digits=0 , uppers=0 , lowers=0, others=0, length= (unsigned)password->length;
const char *ptr= password->str, *end= ptr + length;
diff --git a/plugin/wsrep_info/mysql-test/wsrep_info/my.cnf b/plugin/wsrep_info/mysql-test/wsrep_info/my.cnf
index 70682178ca1..1c7a8ad4122 100644
--- a/plugin/wsrep_info/mysql-test/wsrep_info/my.cnf
+++ b/plugin/wsrep_info/mysql-test/wsrep_info/my.cnf
@@ -2,23 +2,26 @@
!include include/default_mysqld.cnf
[mysqld]
-wsrep-on=1
binlog-format=row
innodb-autoinc-lock-mode=2
innodb-locks-unsafe-for-binlog=1
-wsrep-cluster-address=gcomm://
wsrep_provider=@ENV.WSREP_PROVIDER
[mysqld.1]
#galera_port=@OPT.port
+#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
+wsrep-cluster-address=gcomm://
wsrep_provider_options='base_port=@mysqld.1.#galera_port'
wsrep_sst_receive_address='127.0.0.1:@mysqld.1.#sst_port'
wsrep_node_name=test-node-1
[mysqld.2]
#galera_port=@OPT.port
+#ist_port=@OPT.port
#sst_port=@OPT.port
+wsrep-on=1
wsrep_cluster_address='gcomm://127.0.0.1:@mysqld.1.#galera_port'
wsrep_provider_options='base_port=@mysqld.2.#galera_port'
wsrep_sst_receive_address='127.0.0.1:@mysqld.2.#sst_port'
diff --git a/plugin/wsrep_info/mysql-test/wsrep_info/r/plugin.result b/plugin/wsrep_info/mysql-test/wsrep_info/r/plugin.result
index f33a628d428..f99f27f3539 100644
--- a/plugin/wsrep_info/mysql-test/wsrep_info/r/plugin.result
+++ b/plugin/wsrep_info/mysql-test/wsrep_info/r/plugin.result
@@ -1,8 +1,10 @@
+connection node_2;
+connection node_1;
# On node 1
connection node_1;
SELECT * FROM INFORMATION_SCHEMA.WSREP_STATUS;
-NODE_INDEX NODE_STATUS CLUSTER_STATUS CLUSTER_SIZE CLUSTER_STATE_UUID CLUSTER_STATE_SEQNO CLUSTER_CONF_ID GAP PROTOCOL_VERSION
-<IDX> Synced Primary 2 <CLUSTER_STATE_UUID> 0 <CLUSTER_CONF_ID> NO 3
+NODE_INDEX NODE_STATUS CLUSTER_STATUS CLUSTER_SIZE CLUSTER_STATE_UUID CLUSTER_STATE_SEQNO CLUSTER_CONF_ID PROTOCOL_VERSION
+<IDX> synced primary 2 <CLUSTER_STATE_UUID> 2 <CLUSTER_CONF_ID> 4
SELECT * FROM INFORMATION_SCHEMA.WSREP_MEMBERSHIP ORDER BY NAME;
INDEX UUID NAME ADDRESS
<IDX> <MEMBER_ID> test-node-1 <ADDRESS>
@@ -10,8 +12,8 @@ INDEX UUID NAME ADDRESS
# On node 2
connection node_2;
SELECT * FROM INFORMATION_SCHEMA.WSREP_STATUS;
-NODE_INDEX NODE_STATUS CLUSTER_STATUS CLUSTER_SIZE CLUSTER_STATE_UUID CLUSTER_STATE_SEQNO CLUSTER_CONF_ID GAP PROTOCOL_VERSION
-<IDX> Synced Primary 2 <CLUSTER_STATE_UUID> 0 <CLUSTER_CONF_ID> YES 3
+NODE_INDEX NODE_STATUS CLUSTER_STATUS CLUSTER_SIZE CLUSTER_STATE_UUID CLUSTER_STATE_SEQNO CLUSTER_CONF_ID PROTOCOL_VERSION
+<IDX> synced primary 2 <CLUSTER_STATE_UUID> 2 <CLUSTER_CONF_ID> 4
SELECT * FROM INFORMATION_SCHEMA.WSREP_MEMBERSHIP ORDER BY NAME;
INDEX UUID NAME ADDRESS
<IDX> <MEMBER_ID> test-node-1 <ADDRESS>
diff --git a/plugin/wsrep_info/mysql-test/wsrep_info/suite.pm b/plugin/wsrep_info/mysql-test/wsrep_info/suite.pm
index 96a0c706e01..a095dace6cb 100644
--- a/plugin/wsrep_info/mysql-test/wsrep_info/suite.pm
+++ b/plugin/wsrep_info/mysql-test/wsrep_info/suite.pm
@@ -9,9 +9,9 @@ return "Not run for embedded server" if $::opt_embedded_server;
return "WSREP is not compiled in" unless defined $::mysqld_variables{'wsrep-on'};
my ($provider) = grep { -f $_ } $ENV{WSREP_PROVIDER},
- "/usr/lib64/galera-3/libgalera_smm.so",
+ "/usr/lib64/galera-4/libgalera_smm.so",
"/usr/lib64/galera/libgalera_smm.so",
- "/usr/lib/galera-3/libgalera_smm.so",
+ "/usr/lib/galera-4/libgalera_smm.so",
"/usr/lib/galera/libgalera_smm.so";
return "No wsrep provider library" unless -f $provider;
diff --git a/plugin/wsrep_info/plugin.cc b/plugin/wsrep_info/plugin.cc
index 428bcc5dcfc..18e41b20c98 100644
--- a/plugin/wsrep_info/plugin.cc
+++ b/plugin/wsrep_info/plugin.cc
@@ -52,37 +52,9 @@
#define COLUMN_WSREP_STATUS_CLUSTER_STATE_SEQNO 5
/* Cluster membership changes */
#define COLUMN_WSREP_STATUS_CLUSTER_CONF_ID 6
-/* Gap between global and local states ? */
-#define COLUMN_WSREP_STATUS_GAP 7
/* Application protocol version */
-#define COLUMN_WSREP_STATUS_PROTO_VERSION 8
+#define COLUMN_WSREP_STATUS_PROTO_VERSION 7
-static const char* get_member_status(wsrep_member_status_t status)
-{
- switch (status)
- {
- case WSREP_MEMBER_UNDEFINED: return "Undefined";
- case WSREP_MEMBER_JOINER: return "Joiner";
- case WSREP_MEMBER_DONOR: return "Donor";
- case WSREP_MEMBER_JOINED: return "Joined";
- case WSREP_MEMBER_SYNCED: return "Synced";
- case WSREP_MEMBER_ERROR: return "Error";
- default: break;
- }
- return "UNKNOWN";
-}
-
-static const char* get_cluster_status(wsrep_view_status_t status)
-{
- switch (status)
- {
- case WSREP_VIEW_PRIMARY: return "Primary";
- case WSREP_VIEW_NON_PRIMARY: return "Non-primary";
- case WSREP_VIEW_DISCONNECTED: return "Disconnected";
- default: break;
- }
- return "UNKNOWN";
-}
static ST_FIELD_INFO wsrep_memb_fields[]=
{
@@ -107,7 +79,6 @@ static ST_FIELD_INFO wsrep_status_fields[]=
0, 0, 0, 0},
{"CLUSTER_CONF_ID", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG,
0, 0, 0, 0},
- {"GAP", 10, MYSQL_TYPE_STRING, 0, 0, 0, 0},
{"PROTOCOL_VERSION", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG,
0, 0, 0, 0},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, 0}
@@ -122,25 +93,26 @@ static int wsrep_memb_fill_table(THD *thd, TABLE_LIST *tables, COND *cond)
wsrep_config_state->lock();
- Dynamic_array<wsrep_member_info_t> *memb_arr=
- wsrep_config_state->get_member_info();
+ const wsrep::view& view(wsrep_config_state->get_view_info());
+ const std::vector<wsrep::view::member>& members(view.members());
+
TABLE *table= tables->table;
- for (unsigned int i= 0; i < memb_arr->elements(); i ++)
+ for (unsigned int i= 0; i < members.size(); i++)
{
- wsrep_member_info_t memb= memb_arr->at(i);
-
table->field[COLUMN_WSREP_MEMB_INDEX]->store(i, 0);
- char uuid[40];
- wsrep_uuid_print(&memb.id, uuid, sizeof(uuid));
- table->field[COLUMN_WSREP_MEMB_UUID]->store(uuid, sizeof(uuid),
+ std::ostringstream os;
+ os << members[i].id();
+ table->field[COLUMN_WSREP_MEMB_UUID]->store(os.str().c_str(),
+ os.str().length(),
system_charset_info);
- table->field[COLUMN_WSREP_MEMB_NAME]->store(memb.name, strlen(memb.name),
+ table->field[COLUMN_WSREP_MEMB_NAME]->store(members[i].name().c_str(),
+ members[i].name().length(),
system_charset_info);
- table->field[COLUMN_WSREP_MEMB_ADDRESS]->store(memb.incoming,
- strlen(memb.incoming),
+ table->field[COLUMN_WSREP_MEMB_ADDRESS]->store(members[i].incoming().c_str(),
+ members[i].incoming().length(),
system_charset_info);
if (schema_table_store_record(thd, table))
@@ -177,35 +149,34 @@ static int wsrep_status_fill_table(THD *thd, TABLE_LIST *tables, COND *cond)
wsrep_config_state->lock();
- wsrep_view_info_t view= wsrep_config_state->get_view_info();
- wsrep_member_status_t status= wsrep_config_state->get_status();
+ const wsrep::view& view= wsrep_config_state->get_view_info();
+ enum wsrep::server_state::state status= wsrep_config_state->get_status();
TABLE *table= tables->table;
table->field[COLUMN_WSREP_STATUS_NODE_INDEX]
- ->store(view.my_idx, 0);
+ ->store(view.own_index(), 0);
table->field[COLUMN_WSREP_STATUS_NODE_STATUS]
- ->store(get_member_status(status), strlen(get_member_status(status)),
+ ->store(to_c_string(status),
+ strlen(to_c_string(status)),
system_charset_info);
table->field[COLUMN_WSREP_STATUS_CLUSTER_STATUS]
- ->store(get_cluster_status(view.status),
- strlen(get_cluster_status(view.status)),
+ ->store(to_c_string(view.status()),
+ strlen(to_c_string(view.status())),
system_charset_info);
- table->field[COLUMN_WSREP_STATUS_CLUSTER_SIZE]->store(view.memb_num, 0);
+ table->field[COLUMN_WSREP_STATUS_CLUSTER_SIZE]->store(view.members().size(), 0);
- char uuid[40];
- wsrep_uuid_print(&view.state_id.uuid, uuid, sizeof(uuid));
+ std::ostringstream os;
+ os << view.state_id().id();
table->field[COLUMN_WSREP_STATUS_CLUSTER_STATE_UUID]
- ->store(uuid, sizeof(uuid), system_charset_info);
+ ->store(os.str().c_str(), os.str().length(), system_charset_info);
table->field[COLUMN_WSREP_STATUS_CLUSTER_STATE_SEQNO]
- ->store(view.state_id.seqno, 0);
- table->field[COLUMN_WSREP_STATUS_CLUSTER_CONF_ID]->store(view.view, 0);
-
- const char *gap= (view.state_gap == true) ? "YES" : "NO";
- table->field[COLUMN_WSREP_STATUS_GAP]->store(gap, strlen(gap),
- system_charset_info);
- table->field[COLUMN_WSREP_STATUS_PROTO_VERSION]->store(view.proto_ver, 0);
+ ->store(view.state_id().seqno().get(), 0);
+ table->field[COLUMN_WSREP_STATUS_CLUSTER_CONF_ID]
+ ->store(view.view_seqno().get(), 0);
+ table->field[COLUMN_WSREP_STATUS_PROTO_VERSION]
+ ->store(view.protocol_version(), 0);
if (schema_table_store_record(thd, table))
rc= 1;
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index 901a774be4b..eca5b7ad3e5 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -31,7 +31,7 @@ ENDIF()
IF(CAT_EXECUTABLE)
SET(CAT_COMMAND COMMAND
${CMAKE_COMMAND} -E chdir ${CMAKE_CURRENT_SOURCE_DIR}
- ${CAT_EXECUTABLE} mysql_system_tables.sql mysql_system_tables_fix.sql mysql_performance_tables.sql >
+ ${CAT_EXECUTABLE} mysql_system_tables_fix.sql mysql_system_tables.sql mysql_performance_tables.sql >
${CMAKE_CURRENT_BINARY_DIR}/mysql_fix_privilege_tables.sql
)
ELSEIF(WIN32)
@@ -39,7 +39,7 @@ ELSEIF(WIN32)
native_outfile )
SET(CAT_COMMAND
COMMAND ${CMAKE_COMMAND} -E chdir ${CMAKE_CURRENT_SOURCE_DIR}
- cmd /c copy /b mysql_system_tables.sql + mysql_system_tables_fix.sql + mysql_performance_tables.sql
+ cmd /c copy /b mysql_system_tables_fix.sql + mysql_system_tables.sql + mysql_performance_tables.sql
${native_outfile} )
ELSE()
MESSAGE(FATAL_ERROR "Cannot concatenate files")
diff --git a/scripts/fill_help_tables.sql b/scripts/fill_help_tables.sql
index f8055867b5c..1dc2ca1f9cb 100644
--- a/scripts/fill_help_tables.sql
+++ b/scripts/fill_help_tables.sql
@@ -31,6 +31,7 @@ delete from help_category;
delete from help_keyword;
delete from help_relation;
+lock tables help_topic write, help_category write, help_keyword write, help_relation write;
insert into help_category (help_category_id,name,parent_category_id,url) values (1,'Geographic',0,'');
insert into help_category (help_category_id,name,parent_category_id,url) values (2,'Polygon properties',34,'');
insert into help_category (help_category_id,name,parent_category_id,url) values (3,'WKT',34,'');
@@ -2073,3 +2074,4 @@ insert into help_relation (help_topic_id,help_keyword_id) values (209,461);
insert into help_relation (help_topic_id,help_keyword_id) values (468,461);
insert into help_relation (help_topic_id,help_keyword_id) values (201,462);
insert into help_relation (help_topic_id,help_keyword_id) values (468,463);
+unlock tables;
diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh
index 52107405525..59607d7f508 100644
--- a/scripts/mysql_install_db.sh
+++ b/scripts/mysql_install_db.sh
@@ -37,8 +37,8 @@ force=0
in_rpm=0
ip_only=0
cross_bootstrap=0
-auth_root_authentication_method=normal
-auth_root_socket_user='root'
+auth_root_authentication_method=socket
+auth_root_socket_user=""
skip_test_db=0
dirname0=`dirname $0 2>/dev/null`
@@ -49,17 +49,17 @@ usage()
cat <<EOF
Usage: $0 [OPTIONS]
--auth-root-authentication-method=normal|socket
- Chooses the authentication method for the created initial
- root user. The default is 'normal' to creates a root user
- that can login without password, which can be insecure.
- The alternative 'socket' allows only the system root user
- to login as MariaDB root; this requires the unix socket
- authentication plugin.
+ Chooses the authentication method for the created
+ initial root user. The historical behavior is 'normal'
+ to creates a root user that can login without password,
+ which can be insecure. The default behavior 'socket'
+ sets an invalid root password but allows the system root
+ user to login as MariaDB root without a password.
--auth-root-socket-user=user
Used with --auth-root-authentication-method=socket. It
- specifies the name of the MariaDB root account, as well
- as of the system account allowed to access it. Defaults
- to 'root'.
+ specifies the name of the second MariaDB root account,
+ as well as of the system account allowed to access it.
+ Defaults to the value of --user.
--basedir=path The path to the MariaDB installation directory.
--builddir=path If using --srcdir with out-of-directory builds, you
will need to set this to the location of the build
@@ -240,7 +240,7 @@ cannot_find_file()
fi
echo
- echo "If you compiled from source, you need to either run 'make install' to"
+ echo "If you compiledx from source, you need to either run 'make install' to"
echo "copy the software into the correct location ready for operation."
echo "If you don't want to do a full install, you can use the --srcdir"
echo "option to only install the mysql database and privilege tables"
@@ -316,6 +316,7 @@ then
srcpkgdatadir="$srcdir/scripts"
buildpkgdatadir="$builddir/scripts"
plugindir="$builddir/plugin/auth_socket"
+ pamtooldir="$builddir/plugin/auth_pam"
elif test -n "$basedir"
then
bindir="$basedir/bin" # only used in the help text
@@ -344,7 +345,8 @@ then
cannot_find_file fill_help_tables.sql @pkgdata_locations@
exit 1
fi
- plugindir=`find_in_dirs --dir auth_socket.so $basedir/lib*/plugin $basedir/lib*/mysql/plugin`
+ plugindir=`find_in_dirs --dir auth_pam.so $basedir/lib*/plugin $basedir/lib*/mysql/plugin`
+ pamtooldir=$plugindir
# relative from where the script was run for a relocatable install
elif test -n "$dirname0" -a -x "$rel_mysqld" -a ! "$rel_mysqld" -ef "@sbindir@/mysqld"
then
@@ -355,6 +357,7 @@ then
srcpkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
buildpkgdatadir="$basedir/@INSTALL_MYSQLSHAREDIR@"
plugindir="$basedir/@INSTALL_PLUGINDIR@"
+ pamtooldir=$plugindir
else
basedir="@prefix@"
bindir="@bindir@"
@@ -363,6 +366,7 @@ else
srcpkgdatadir="@pkgdatadir@"
buildpkgdatadir="@pkgdatadir@"
plugindir="@pkgplugindir@"
+ pamtooldir="@pkgplugindir@"
fi
# Set up paths to SQL scripts required for bootstrap
@@ -463,9 +467,33 @@ done
if test -n "$user"
then
+ chown $user "$pamtooldir/auth_pam_tool_dir"
+ if test $? -ne 0
+ then
+ echo "Cannot change ownership of the '$pamtooldir/auth_pam_tool_dir' directory"
+ echo " to the '$user' user. Check that you have the necessary permissions and try again."
+ exit 1
+ fi
+ if test -z "$srcdir"
+ then
+ chown 0 "$pamtooldir/auth_pam_tool_dir/auth_pam_tool"
+ if test $? -ne 0
+ then
+ echo "Couldn't set an owner to '$pamtooldir/auth_pam_tool_dir/auth_pam_tool'."
+ echo " It must be root, the PAM authentication plugin doesn't work otherwise.."
+ echo
+ fi
+ fi
args="$args --user=$user"
fi
+if test -f "$ldata/mysql/user.frm"
+then
+ echo "mysql.user table already exists!"
+ echo "Run mysql_upgrade, not mysql_install_db"
+ exit 0
+fi
+
# When doing a "cross bootstrap" install, no reference to the current
# host should be added to the system tables. So we filter out any
# lines which contain the current host name.
@@ -487,17 +515,20 @@ mysqld_install_cmd_line()
--net_buffer_length=16K
}
+# Use $auth_root_socket_user if explicitly specified.
+# Otherwise use the owner of datadir - ${user:-$USER}
+# Use 'root' as a fallback
+auth_root_socket_user=${auth_root_socket_user:-${user:-${USER:-root}}}
+
cat_sql()
{
echo "use mysql;"
case "$auth_root_authentication_method" in
normal)
- echo "SET @skip_auth_root_nopasswd=NULL;"
echo "SET @auth_root_socket=NULL;"
;;
socket)
- echo "SET @skip_auth_root_nopasswd=1;"
echo "SET @auth_root_socket='$auth_root_socket_user';"
;;
esac
@@ -576,6 +607,16 @@ then
echo "which will also give you the option of removing the test"
echo "databases and anonymous user created by default. This is"
echo "strongly recommended for production servers."
+ else
+ echo
+ echo
+ echo "Two all-privilege accounts were created."
+ echo "One is root@localhost, it has no password, but you need to"
+ echo "be system 'root' user to connect. Use, for example, sudo mysql"
+ echo "The second is $auth_root_socket_user@localhost, it has no password either, but"
+ echo "you need to be the system '$auth_root_socket_user' user to connect."
+ echo "After connecting you can set the password, if you would need to be"
+ echo "able to connect as any of these users with a password and without sudo"
fi
echo
diff --git a/scripts/mysql_secure_installation.sh b/scripts/mysql_secure_installation.sh
index 57e4d43dfad..6441bbdabbd 100644
--- a/scripts/mysql_secure_installation.sh
+++ b/scripts/mysql_secure_installation.sh
@@ -17,6 +17,7 @@
config=".my.cnf.$$"
command=".mysql.$$"
+output=".my.output.$$"
trap "interrupt" 1 2 3 6 15
@@ -216,7 +217,7 @@ prepare() {
do_query() {
echo "$1" >$command
#sed 's,^,> ,' < $command # Debugging
- $mysql_command --defaults-file=$config $defaults_extra_file $no_defaults $args <$command
+ $mysql_command --defaults-file=$config $defaults_extra_file $no_defaults $args <$command >$output
return $?
}
@@ -268,15 +269,18 @@ get_root_password() {
echo
stty echo
if [ "x$password" = "x" ]; then
- hadpass=0
+ emptypass=1
else
- hadpass=1
+ emptypass=0
fi
rootpass=$password
make_config
- do_query ""
+ do_query "show create user root@localhost"
status=$?
done
+ if grep -q unix_socket $output; then
+ emptypass=0
+ fi
echo "OK, successfully used password, moving on..."
echo
}
@@ -304,7 +308,7 @@ set_root_password() {
fi
esc_pass=`basic_single_escape "$password1"`
- do_query "UPDATE mysql.user SET Password=PASSWORD('$esc_pass') WHERE User='root';"
+ do_query "UPDATE mysql.global_priv SET priv=json_set(priv, '$.plugin', 'mysql_native_password', '$.authentication_string', PASSWORD('$esc_pass')) WHERE User='root';"
if [ $? -eq 0 ]; then
echo "Password updated successfully!"
echo "Reloading privilege tables.."
@@ -324,7 +328,7 @@ set_root_password() {
}
remove_anonymous_users() {
- do_query "DELETE FROM mysql.user WHERE User='';"
+ do_query "DELETE FROM mysql.global_priv WHERE User='';"
if [ $? -eq 0 ]; then
echo " ... Success!"
else
@@ -336,7 +340,7 @@ remove_anonymous_users() {
}
remove_remote_root() {
- do_query "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');"
+ do_query "DELETE FROM mysql.global_priv WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');"
if [ $? -eq 0 ]; then
echo " ... Success!"
else
@@ -386,7 +390,7 @@ interrupt() {
cleanup() {
echo "Cleaning up..."
- rm -f $config $command
+ rm -f $config $command $output
}
# Remove the files before exiting.
@@ -405,9 +409,8 @@ echo "NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB"
echo " SERVERS IN PRODUCTION USE! PLEASE READ EACH STEP CAREFULLY!"
echo
echo "In order to log into MariaDB to secure it, we'll need the current"
-echo "password for the root user. If you've just installed MariaDB, and"
-echo "you haven't set the root password yet, the password will be blank,"
-echo "so you should just press enter here."
+echo "password for the root user. If you've just installed MariaDB, and"
+echo "haven't set the root password yet, you should just press enter here."
echo
get_root_password
@@ -417,15 +420,47 @@ get_root_password
# Set the root password
#
-echo "Setting the root password ensures that nobody can log into the MariaDB"
-echo "root user without the proper authorisation."
+echo "Setting the root password or using the unix_socket ensures that nobody"
+echo "can log into the MariaDB root user without the proper authorisation."
+echo
+
+while true ; do
+ if [ $emptypass -eq 1 ]; then
+ echo $echo_n "Enable unix_socket authentication? [Y/n] $echo_c"
+ else
+ echo "You already have your root account protected, so you can safely answer 'n'."
+ echo
+ echo $echo_n "Switch to unix_socket authentication [Y/n] $echo_c"
+ fi
+ read reply
+ validate_reply $reply && break
+done
+
+if [ "$reply" = "n" ]; then
+ echo " ... skipping."
+else
+ emptypass=0
+ do_query "UPDATE mysql.global_priv SET priv=json_set(priv, '$.plugin', 'mysql_native_password', '$.authentication_string', 'invalid', '$.auth_or', json_array(json_object(), json_object('plugin', 'unix_socket'))) WHERE User='root';"
+ if [ $? -eq 0 ]; then
+ echo "Enabled successfully!"
+ echo "Reloading privilege tables.."
+ reload_privilege_tables
+ if [ $? -eq 1 ]; then
+ clean_and_exit
+ fi
+ echo
+ else
+ echo "Failed!"
+ clean_and_exit
+ fi
+fi
echo
while true ; do
- if [ $hadpass -eq 0 ]; then
+ if [ $emptypass -eq 1 ]; then
echo $echo_n "Set root password? [Y/n] $echo_c"
else
- echo "You already have a root password set, so you can safely answer 'n'."
+ echo "You already have your root account protected, so you can safely answer 'n'."
echo
echo $echo_n "Change the root password? [Y/n] $echo_c"
fi
diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql
index 9f8af61752f..f788f5d67d5 100644
--- a/scripts/mysql_system_tables.sql
+++ b/scripts/mysql_system_tables.sql
@@ -21,70 +21,118 @@
set sql_mode='';
set @orig_storage_engine=@@storage_engine;
-set storage_engine=myisam;
+set storage_engine=Aria;
set system_versioning_alter_history=keep;
set @have_innodb= (select count(engine) from information_schema.engines where engine='INNODB' and support != 'NO');
-SET @innodb_or_myisam=IF(@have_innodb <> 0, 'InnoDB', 'MyISAM');
+SET @innodb_or_aria=IF(@have_innodb <> 0, 'InnoDB', 'Aria');
-CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_history_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
+CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_history_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
-- Remember for later if db table already existed
set @had_db_table= @@warning_count != 0;
-CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
-
-CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_history_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, default_role char(80) binary DEFAULT '' NOT NULL, max_statement_time decimal(12,6) DEFAULT 0 NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+CREATE TABLE IF NOT EXISTS global_priv (Host char(60) binary DEFAULT '', User char(80) binary DEFAULT '', Priv JSON NOT NULL DEFAULT '{}' CHECK(JSON_VALID(Priv)), PRIMARY KEY Host (Host,User)) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+
+CREATE DEFINER=root@localhost SQL SECURITY DEFINER VIEW IF NOT EXISTS user AS SELECT
+ Host,
+ User,
+ IF(JSON_VALUE(Priv, '$.plugin') IN ('mysql_native_password', 'mysql_old_password'), IFNULL(JSON_VALUE(Priv, '$.authentication_string'), ''), '') AS Password,
+ IF(JSON_VALUE(Priv, '$.access') & 1, 'Y', 'N') AS Select_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 2, 'Y', 'N') AS Insert_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 4, 'Y', 'N') AS Update_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 8, 'Y', 'N') AS Delete_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 16, 'Y', 'N') AS Create_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 32, 'Y', 'N') AS Drop_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 64, 'Y', 'N') AS Reload_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 128, 'Y', 'N') AS Shutdown_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 256, 'Y', 'N') AS Process_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 512, 'Y', 'N') AS File_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 1024, 'Y', 'N') AS Grant_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 2048, 'Y', 'N') AS References_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 4096, 'Y', 'N') AS Index_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 8192, 'Y', 'N') AS Alter_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 16384, 'Y', 'N') AS Show_db_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 32768, 'Y', 'N') AS Super_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 65536, 'Y', 'N') AS Create_tmp_table_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 131072, 'Y', 'N') AS Lock_tables_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 262144, 'Y', 'N') AS Execute_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 524288, 'Y', 'N') AS Repl_slave_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 1048576, 'Y', 'N') AS Repl_client_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 2097152, 'Y', 'N') AS Create_view_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 4194304, 'Y', 'N') AS Show_view_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 8388608, 'Y', 'N') AS Create_routine_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 16777216, 'Y', 'N') AS Alter_routine_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 33554432, 'Y', 'N') AS Create_user_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 67108864, 'Y', 'N') AS Event_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 134217728, 'Y', 'N') AS Trigger_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 268435456, 'Y', 'N') AS Create_tablespace_priv,
+ IF(JSON_VALUE(Priv, '$.access') & 536870912, 'Y', 'N') AS Delete_history_priv,
+ ELT(IFNULL(JSON_VALUE(Priv, '$.ssl_type'), 0) + 1, '', 'ANY','X509', 'SPECIFIED') AS ssl_type,
+ IFNULL(JSON_VALUE(Priv, '$.ssl_cipher'), '') AS ssl_cipher,
+ IFNULL(JSON_VALUE(Priv, '$.x509_issuer'), '') AS x509_issuer,
+ IFNULL(JSON_VALUE(Priv, '$.x509_subject'), '') AS x509_subject,
+ CAST(IFNULL(JSON_VALUE(Priv, '$.max_questions'), 0) AS UNSIGNED) AS max_questions,
+ CAST(IFNULL(JSON_VALUE(Priv, '$.max_updates'), 0) AS UNSIGNED) AS max_updates,
+ CAST(IFNULL(JSON_VALUE(Priv, '$.max_connections'), 0) AS UNSIGNED) AS max_connections,
+ CAST(IFNULL(JSON_VALUE(Priv, '$.max_user_connections'), 0) AS SIGNED) AS max_user_connections,
+ IFNULL(JSON_VALUE(Priv, '$.plugin'), '') AS plugin,
+ IFNULL(JSON_VALUE(Priv, '$.authentication_string'), '') AS authentication_string,
+ 'N' AS password_expired,
+ ELT(IFNULL(JSON_VALUE(Priv, '$.is_role'), 0) + 1, 'N', 'Y') AS is_role,
+ IFNULL(JSON_VALUE(Priv, '$.default_role'), '') AS default_role,
+ CAST(IFNULL(JSON_VALUE(Priv, '$.max_statement_time'), 0.0) AS DECIMAL(12,6)) AS max_statement_time
+ FROM global_priv;
-- Remember for later if user table already existed
set @had_user_table= @@warning_count != 0;
-CREATE TABLE IF NOT EXISTS roles_mapping ( Host char(60) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Role char(80) binary DEFAULT '' NOT NULL, Admin_option enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, UNIQUE (Host, User, Role)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Granted roles';
+CREATE TABLE IF NOT EXISTS roles_mapping ( Host char(60) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Role char(80) binary DEFAULT '' NOT NULL, Admin_option enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, UNIQUE (Host, User, Role)) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Granted roles';
-CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
+CREATE TABLE IF NOT EXISTS func ( name char(64) binary DEFAULT '' NOT NULL, ret tinyint(1) DEFAULT '0' NOT NULL, dl char(128) DEFAULT '' NOT NULL, type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL, PRIMARY KEY (name) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions';
-CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl varchar(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_general_ci comment='MySQL plugins';
+CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl varchar(128) DEFAULT '' NOT NULL, PRIMARY KEY (name) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_general_ci comment='MySQL plugins';
-CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(80) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table';
+CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(80) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) engine=Aria transactional=1 CHARACTER SET utf8 comment='MySQL Foreign Servers table';
-CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger','Delete versioning rows') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger','Delete versioning rows') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
-CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
-CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
+CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=Aria transactional=0 CHARACTER SET utf8 comment='help topics';
-CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url text not null, primary key (help_category_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help categories';
+CREATE TABLE IF NOT EXISTS help_category ( help_category_id smallint unsigned not null, name char(64) not null, parent_category_id smallint unsigned null, url text not null, primary key (help_category_id), unique index (name) ) engine=Aria transactional=0 CHARACTER SET utf8 comment='help categories';
-CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation';
+CREATE TABLE IF NOT EXISTS help_relation ( help_topic_id int unsigned not null references help_topic, help_keyword_id int unsigned not null references help_keyword, primary key (help_keyword_id, help_topic_id) ) engine=Aria transactional=0 CHARACTER SET utf8 comment='keyword-topic relation';
-CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help keywords';
+CREATE TABLE IF NOT EXISTS help_keyword ( help_keyword_id int unsigned not null, name char(64) not null, primary key (help_keyword_id), unique index (name) ) engine=Aria transactional=0 CHARACTER SET utf8 comment='help keywords';
-CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone names';
+CREATE TABLE IF NOT EXISTS time_zone_name ( Name char(64) NOT NULL, Time_zone_id int unsigned NOT NULL, PRIMARY KEY Name (Name) ) engine=Aria transactional=1 CHARACTER SET utf8 comment='Time zone names';
-CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zones';
+CREATE TABLE IF NOT EXISTS time_zone ( Time_zone_id int unsigned NOT NULL auto_increment, Use_leap_seconds enum('Y','N') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY TzId (Time_zone_id) ) engine=Aria transactional=1 CHARACTER SET utf8 comment='Time zones';
-CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions';
+CREATE TABLE IF NOT EXISTS time_zone_transition ( Time_zone_id int unsigned NOT NULL, Transition_time bigint signed NOT NULL, Transition_type_id int unsigned NOT NULL, PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time) ) engine=Aria transactional=1 CHARACTER SET utf8 comment='Time zone transitions';
-CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types';
+CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsigned NOT NULL, Transition_type_id int unsigned NOT NULL, Offset int signed DEFAULT 0 NOT NULL, Is_DST tinyint unsigned DEFAULT 0 NOT NULL, Abbreviation char(8) DEFAULT '' NOT NULL, PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id) ) engine=Aria transactional=1 CHARACTER SET utf8 comment='Time zone transition types';
-CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
+CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=Aria transactional=1 CHARACTER SET utf8 comment='Leap seconds information for time zones';
-CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob NOT NULL, body longblob NOT NULL, definer char(141) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH', 'EMPTY_STRING_IS_NULL', 'SIMULTANEOUS_ASSIGNMENT') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, aggregate enum('NONE', 'GROUP') DEFAULT 'NONE' NOT NULL, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob NOT NULL, body longblob NOT NULL, definer char(141) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH', 'EMPTY_STRING_IS_NULL', 'SIMULTANEOUS_ASSIGNMENT', 'TIME_ROUND_FRACTIONAL') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, aggregate enum('NONE', 'GROUP') DEFAULT 'NONE' NOT NULL, PRIMARY KEY (db,name,type)) engine=Aria transactional=1 character set utf8 comment='Stored Procedures';
-CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
+CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY') NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
-- Create general_log if CSV is enabled.
@@ -103,7 +151,7 @@ PREPARE stmt FROM @str;
EXECUTE stmt;
DROP PREPARE stmt;
-CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
+CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(141) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT','TIME_ROUND_FRACTIONAL') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) engine=Aria transactional=1 DEFAULT CHARSET=utf8 COMMENT 'Events';
SET @create_innodb_table_stats="CREATE TABLE IF NOT EXISTS innodb_table_stats (
database_name VARCHAR(64) NOT NULL,
@@ -170,7 +218,7 @@ SET @cmd="CREATE TABLE IF NOT EXISTS slave_relay_log_info (
Id INTEGER UNSIGNED NOT NULL COMMENT 'Internal Id that uniquely identifies this record.',
PRIMARY KEY(Id)) DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT 'Relay Log Information'";
-SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_myisam);
+SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_aria);
-- Don't create the table; MariaDB will have another implementation
#PREPARE stmt FROM @str;
#EXECUTE stmt;
@@ -202,7 +250,7 @@ SET @cmd= "CREATE TABLE IF NOT EXISTS slave_master_info (
Enabled_auto_position BOOLEAN NOT NULL COMMENT 'Indicates whether GTIDs will be used to retrieve events from the master.',
PRIMARY KEY(Host, Port)) DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT 'Master Information'";
-SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_myisam);
+SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_aria);
-- Don't create the table; MariaDB will have another implementation
#PREPARE stmt FROM @str;
#EXECUTE stmt;
@@ -223,26 +271,33 @@ SET @cmd= "CREATE TABLE IF NOT EXISTS slave_worker_info (
Checkpoint_group_bitmap BLOB NOT NULL,
PRIMARY KEY(Id)) DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT 'Worker Information'";
-SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_myisam);
+SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_aria);
-- Don't create the table; MariaDB will have another implementation
#PREPARE stmt FROM @str;
#EXECUTE stmt;
#DROP PREPARE stmt;
-CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(80) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges';
+CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(80) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(80) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(141) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges';
-- Remember for later if proxies_priv table already existed
set @had_proxies_priv_table= @@warning_count != 0;
+-- The following needs to be done both for new installations
+-- and for upgrades
+CREATE TEMPORARY TABLE tmp_proxies_priv LIKE proxies_priv;
+INSERT INTO tmp_proxies_priv VALUES ('localhost', 'root', '', '', TRUE, '', now());
+INSERT INTO proxies_priv SELECT * FROM tmp_proxies_priv WHERE @had_proxies_priv_table=0;
+DROP TABLE tmp_proxies_priv;
+
--
-- Tables unique for MariaDB
--
-CREATE TABLE IF NOT EXISTS table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
+CREATE TABLE IF NOT EXISTS table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) engine=Aria transactional=0 CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
-CREATE TABLE IF NOT EXISTS column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varbinary(255) DEFAULT NULL, max_value varbinary(255) DEFAULT NULL, nulls_ratio decimal(12,4) DEFAULT NULL, avg_length decimal(12,4) DEFAULT NULL, avg_frequency decimal(12,4) DEFAULT NULL, hist_size tinyint unsigned, hist_type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB'), histogram varbinary(255), PRIMARY KEY (db_name,table_name,column_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
+CREATE TABLE IF NOT EXISTS column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varbinary(255) DEFAULT NULL, max_value varbinary(255) DEFAULT NULL, nulls_ratio decimal(12,4) DEFAULT NULL, avg_length decimal(12,4) DEFAULT NULL, avg_frequency decimal(12,4) DEFAULT NULL, hist_size tinyint unsigned, hist_type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB'), histogram varbinary(255), PRIMARY KEY (db_name,table_name,column_name) ) engine=Aria transactional=0 CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
-CREATE TABLE IF NOT EXISTS index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, index_name varchar(64) NOT NULL, prefix_arity int(11) unsigned NOT NULL, avg_frequency decimal(12,4) DEFAULT NULL, PRIMARY KEY (db_name,table_name,index_name,prefix_arity) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Indexes';
+CREATE TABLE IF NOT EXISTS index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, index_name varchar(64) NOT NULL, prefix_arity int(11) unsigned NOT NULL, avg_frequency decimal(12,4) DEFAULT NULL, PRIMARY KEY (db_name,table_name,index_name,prefix_arity) ) engine=Aria transactional=0 CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Indexes';
-- Note: This definition must be kept in sync with the one used in
-- build_gtid_pos_create_query() in sql/slave.cc
@@ -253,7 +308,7 @@ SET @cmd= "CREATE TABLE IF NOT EXISTS gtid_slave_pos (
seq_no BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (domain_id, sub_id)) CHARSET=latin1
COMMENT='Replication slave GTID position'";
-SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_myisam);
+SET @str=CONCAT(@cmd, ' ENGINE=', @innodb_or_aria);
PREPARE stmt FROM @str;
EXECUTE stmt;
DROP PREPARE stmt;
@@ -262,6 +317,7 @@ set storage_engine=@orig_storage_engine;
--
-- Drop some tables not used anymore in MariaDB
----
+--
drop table if exists mysql.ndb_binlog_index;
+drop table if exists mysql.host;
diff --git a/scripts/mysql_system_tables_data.sql b/scripts/mysql_system_tables_data.sql
index 4761fe51dcc..9d0088aa333 100644
--- a/scripts/mysql_system_tables_data.sql
+++ b/scripts/mysql_system_tables_data.sql
@@ -25,28 +25,31 @@
-- add escape character in front of wildcard character to convert "_" or "%" to
-- a plain character
SELECT LOWER( REPLACE((SELECT REPLACE(@@hostname,'_','\_')),'%','\%') )INTO @current_hostname;
+SELECT '{"access":18446744073709551615}' INTO @all_privileges;
+SELECT '{"access":18446744073709551615,"plugin":"mysql_native_password","authentication_string":"invalid","auth_or":[{},{"plugin":"unix_socket"}]}' into @all_with_auth;
--- Fill "user" table with default users allowing root access
--- from local machine if "user" table didn't exist before
-CREATE TEMPORARY TABLE tmp_user_nopasswd LIKE user;
-CREATE TEMPORARY TABLE tmp_user_socket LIKE user;
+
+-- Fill "global_priv" table with default users allowing root access
+-- from local machine if "global_priv" table didn't exist before
+CREATE TEMPORARY TABLE tmp_user_nopasswd LIKE global_priv;
+CREATE TEMPORARY TABLE tmp_user_socket LIKE global_priv;
-- Classic passwordless root account.
-INSERT INTO tmp_user_nopasswd VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N', 'N','', 0);
-REPLACE INTO tmp_user_nopasswd SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N','N','',0 FROM dual WHERE @current_hostname != 'localhost';
-REPLACE INTO tmp_user_nopasswd VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N','N','',0);
-REPLACE INTO tmp_user_nopasswd VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','N','N', '', 0);
+INSERT INTO tmp_user_nopasswd VALUES ('localhost','root',@all_privileges);
+REPLACE INTO tmp_user_nopasswd SELECT @current_hostname,'root',@all_privileges FROM dual WHERE @current_hostname != 'localhost';
+REPLACE INTO tmp_user_nopasswd VALUES ('127.0.0.1','root',@all_privileges);
+REPLACE INTO tmp_user_nopasswd VALUES ('::1','root',@all_privileges);
-- More secure root account using unix socket auth.
-INSERT INTO tmp_user_socket VALUES ('localhost',IFNULL(@auth_root_socket, 'root'),'','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'unix_socket','','N', 'N','', 0);
+INSERT INTO tmp_user_socket VALUES ('localhost', 'root',@all_with_auth);
+REPLACE INTO tmp_user_socket VALUES ('localhost',IFNULL(@auth_root_socket, 'root'),@all_with_auth);
IF @auth_root_socket is not null THEN
IF not exists(select 1 from information_schema.plugins where plugin_name='unix_socket') THEN
INSTALL SONAME 'auth_socket'; END IF; END IF;
-INSERT INTO user SELECT * FROM tmp_user_nopasswd WHERE @had_user_table=0 AND @skip_auth_root_nopasswd IS NULL;
-INSERT INTO user SELECT * FROM tmp_user_socket WHERE @had_user_table=0 AND @auth_root_socket IS NOT NULL;
+INSERT INTO global_priv SELECT * FROM tmp_user_nopasswd WHERE @had_user_table=0 AND @auth_root_socket IS NULL;
+INSERT INTO global_priv SELECT * FROM tmp_user_socket WHERE @had_user_table=0 AND @auth_root_socket IS NOT NULL;
DROP TABLE tmp_user_nopasswd, tmp_user_socket;
CREATE TEMPORARY TABLE tmp_proxies_priv LIKE proxies_priv;
-INSERT INTO tmp_proxies_priv VALUES ('localhost', 'root', '', '', TRUE, '', now());
-REPLACE INTO tmp_proxies_priv SELECT @current_hostname, 'root', '', '', TRUE, '', now() FROM DUAL WHERE @current_hostname != 'localhost';
+INSERT INTO tmp_proxies_priv SELECT @current_hostname, 'root', '', '', TRUE, '', now() FROM DUAL WHERE @current_hostname != 'localhost';
INSERT INTO proxies_priv SELECT * FROM tmp_proxies_priv WHERE @had_proxies_priv_table=0;
DROP TABLE tmp_proxies_priv;
diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql
index 945b569e52a..2af8ee49adb 100644
--- a/scripts/mysql_system_tables_fix.sql
+++ b/scripts/mysql_system_tables_fix.sql
@@ -25,9 +25,11 @@
# adding a 'SHOW WARNINGS' after the statement.
set sql_mode='';
-set storage_engine=MyISAM;
+set storage_engine=Aria;
set enforce_storage_engine=NULL;
+set @have_innodb= (select count(engine) from information_schema.engines where engine='INNODB' and support != 'NO');
+
ALTER TABLE user add File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
# Detect whether or not we had the Grant_priv column
@@ -38,10 +40,6 @@ ALTER TABLE user add Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N
add References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
add Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
add Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-ALTER TABLE host add Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- add References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- add Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- add Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
ALTER TABLE db add Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
add References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
add Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
@@ -50,8 +48,6 @@ ALTER TABLE db add Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N'
# Fix privileges for old tables
UPDATE user SET Grant_priv=File_priv,References_priv=Create_priv,Index_priv=Create_priv,Alter_priv=Create_priv WHERE @hadGrantPriv = 0;
UPDATE db SET References_priv=Create_priv,Index_priv=Create_priv,Alter_priv=Create_priv WHERE @hadGrantPriv = 0;
-UPDATE host SET References_priv=Create_priv,Index_priv=Create_priv,Alter_priv=Create_priv WHERE @hadGrantPriv = 0;
-
#
# The second alter changes ssl_type to new 4.0.2 format
# Adding columns needed by GRANT .. REQUIRE (openssl)
@@ -75,7 +71,7 @@ ALTER TABLE tables_priv
MODIFY User char(80) binary NOT NULL default '',
MODIFY Table_name char(64) NOT NULL default '',
MODIFY Grantor char(141) COLLATE utf8_bin NOT NULL default '',
- ENGINE=MyISAM,
+ ENGINE=Aria,
CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
ALTER TABLE tables_priv
@@ -103,7 +99,7 @@ ALTER TABLE columns_priv
MODIFY User char(80) binary NOT NULL default '',
MODIFY Table_name char(64) NOT NULL default '',
MODIFY Column_name char(64) NOT NULL default '',
- ENGINE=MyISAM,
+ ENGINE=Aria,
CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin,
COMMENT='Column privileges';
@@ -155,15 +151,11 @@ ADD max_connections int(11) unsigned NOT NULL DEFAULT 0 AFTER max_updates;
ALTER TABLE db
ADD Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
ADD Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-ALTER TABLE host
-ADD Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL,
-ADD Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL;
alter table user change max_questions max_questions int(11) unsigned DEFAULT 0 NOT NULL;
alter table db comment='Database privileges';
-alter table host comment='Host privileges; Merged with database privileges';
alter table user comment='Users and global privileges';
alter table func comment='User defined functions';
@@ -172,7 +164,7 @@ alter table func comment='User defined functions';
ALTER TABLE user
MODIFY Host char(60) NOT NULL default '',
MODIFY User char(80) binary NOT NULL default '',
- ENGINE=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ ENGINE=Aria, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
# In MySQL 5.7.6 the Password column is removed. Recreate it to preserve the number
# of columns MariaDB expects in the user table.
@@ -208,7 +200,7 @@ ALTER TABLE db
MODIFY Host char(60) NOT NULL default '',
MODIFY Db char(64) NOT NULL default '',
MODIFY User char(80) binary NOT NULL default '',
- ENGINE=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ ENGINE=Aria, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
ALTER TABLE db
MODIFY Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
MODIFY Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
@@ -223,26 +215,9 @@ ALTER TABLE db
MODIFY Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
MODIFY Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-ALTER TABLE host
- MODIFY Host char(60) NOT NULL default '',
- MODIFY Db char(64) NOT NULL default '',
- ENGINE=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
-ALTER TABLE host
- MODIFY Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL,
- MODIFY Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
ALTER TABLE func
- ENGINE=MyISAM, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
+ ENGINE=Aria, CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
ALTER TABLE func
MODIFY type enum ('function','aggregate') COLLATE utf8_general_ci NOT NULL;
@@ -300,8 +275,6 @@ SELECT @hadCreateViewPriv:=1 FROM user WHERE Create_view_priv LIKE '%';
ALTER TABLE db ADD Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Lock_tables_priv;
ALTER TABLE db MODIFY Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Lock_tables_priv;
-ALTER TABLE host ADD Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Lock_tables_priv;
-ALTER TABLE host MODIFY Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Lock_tables_priv;
ALTER TABLE user ADD Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Repl_client_priv;
ALTER TABLE user MODIFY Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Repl_client_priv;
@@ -312,9 +285,6 @@ ALTER TABLE user MODIFY Create_view_priv enum('N','Y') COLLATE utf8_general_ci D
ALTER TABLE db ADD Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_view_priv;
ALTER TABLE db MODIFY Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_view_priv;
-ALTER TABLE host ADD Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_view_priv;
-ALTER TABLE host MODIFY Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_view_priv;
-
ALTER TABLE user ADD Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_view_priv;
ALTER TABLE user MODIFY Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_view_priv;
@@ -335,9 +305,6 @@ SELECT @hadCreateRoutinePriv:=1 FROM user WHERE Create_routine_priv LIKE '%';
ALTER TABLE db ADD Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Show_view_priv;
ALTER TABLE db MODIFY Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Show_view_priv;
-ALTER TABLE host ADD Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Show_view_priv;
-ALTER TABLE host MODIFY Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Show_view_priv;
-
ALTER TABLE user ADD Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Show_view_priv;
ALTER TABLE user MODIFY Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Show_view_priv;
@@ -347,24 +314,17 @@ ALTER TABLE user MODIFY Create_routine_priv enum('N','Y') COLLATE utf8_general_c
ALTER TABLE db ADD Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_routine_priv;
ALTER TABLE db MODIFY Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_routine_priv;
-ALTER TABLE host ADD Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_routine_priv;
-ALTER TABLE host MODIFY Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_routine_priv;
-
ALTER TABLE user ADD Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_routine_priv;
ALTER TABLE user MODIFY Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Create_routine_priv;
ALTER TABLE db ADD Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Alter_routine_priv;
ALTER TABLE db MODIFY Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Alter_routine_priv;
-ALTER TABLE host ADD Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Alter_routine_priv;
-ALTER TABLE host MODIFY Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Alter_routine_priv;
-
#
# Assign create/alter routine privileges to people who have create privileges
#
UPDATE user SET Create_routine_priv=Create_priv, Alter_routine_priv=Alter_priv where user<>"" AND @hadCreateRoutinePriv = 0;
UPDATE db SET Create_routine_priv=Create_priv, Alter_routine_priv=Alter_priv, Execute_priv=Select_priv where user<>"" AND @hadCreateRoutinePriv = 0;
-UPDATE host SET Create_routine_priv=Create_priv, Alter_routine_priv=Alter_priv, Execute_priv=Select_priv where @hadCreateRoutinePriv = 0;
#
# Add max_user_connections resource limit
@@ -392,7 +352,7 @@ UPDATE user LEFT JOIN db USING (Host,User) SET Create_user_priv='Y'
#
ALTER TABLE procs_priv
- ENGINE=MyISAM,
+ ENGINE=Aria,
CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;
ALTER TABLE procs_priv
@@ -459,7 +419,8 @@ ALTER TABLE proc MODIFY name char(64) DEFAULT '' NOT NULL,
'NO_ENGINE_SUBSTITUTION',
'PAD_CHAR_TO_FULL_LENGTH',
'EMPTY_STRING_IS_NULL',
- 'SIMULTANEOUS_ASSIGNMENT'
+ 'SIMULTANEOUS_ASSIGNMENT',
+ 'TIME_ROUND_FRACTIONAL'
) DEFAULT '' NOT NULL,
DEFAULT CHARACTER SET utf8;
@@ -595,7 +556,8 @@ ALTER TABLE event MODIFY sql_mode
'NO_ENGINE_SUBSTITUTION',
'PAD_CHAR_TO_FULL_LENGTH',
'EMPTY_STRING_IS_NULL',
- 'SIMULTANEOUS_ASSIGNMENT'
+ 'SIMULTANEOUS_ASSIGNMENT',
+ 'TIME_ROUND_FRACTIONAL'
) DEFAULT '' NOT NULL AFTER on_completion;
ALTER TABLE event MODIFY name char(64) CHARACTER SET utf8 NOT NULL default '';
@@ -642,9 +604,6 @@ SELECT @hadTriggerPriv :=1 FROM user WHERE Trigger_priv LIKE '%';
ALTER TABLE user ADD Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Event_priv;
ALTER TABLE user MODIFY Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER Event_priv;
-ALTER TABLE host ADD Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-ALTER TABLE host MODIFY Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-
ALTER TABLE db ADD Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
ALTER TABLE db MODIFY Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
@@ -684,6 +643,9 @@ ALTER TABLE user ADD plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL,
ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL,
MODIFY authentication_string TEXT NOT NULL;
ALTER TABLE user ADD password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ALTER TABLE user ADD password_last_changed timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL after password_expired;
+ALTER TABLE user ADD password_lifetime smallint unsigned DEFAULT NULL after password_last_changed;
+ALTER TABLE user ADD account_locked enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL after password_lifetime;
ALTER TABLE user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
ALTER TABLE user ADD default_role char(80) binary DEFAULT '' NOT NULL;
ALTER TABLE user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL;
@@ -692,63 +654,35 @@ ALTER TABLE user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL;
ALTER TABLE user MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
ALTER TABLE user MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
--- Need to pre-fill mysql.proxies_priv with access for root even when upgrading from
--- older versions
-
-CREATE TEMPORARY TABLE tmp_proxies_priv LIKE proxies_priv;
-INSERT INTO tmp_proxies_priv VALUES ('localhost', 'root', '', '', TRUE, '', now());
-INSERT INTO proxies_priv SELECT * FROM tmp_proxies_priv WHERE @had_proxies_priv_table=0;
-DROP TABLE tmp_proxies_priv;
-
-- Checking for any duplicate hostname and username combination are exists.
-- If exits we will throw error.
-DROP PROCEDURE IF EXISTS mysql.count_duplicate_host_names;
DELIMITER //
-CREATE PROCEDURE mysql.count_duplicate_host_names()
-BEGIN
+BEGIN NOT ATOMIC
SET @duplicate_hosts=(SELECT count(*) FROM mysql.user GROUP BY user, lower(host) HAVING count(*) > 1 LIMIT 1);
IF @duplicate_hosts > 1 THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = 'Multiple accounts exist for @user_name, @host_name that differ only in Host lettercase; remove all except one of them';
END IF;
END //
DELIMITER ;
-CALL mysql.count_duplicate_host_names();
-- Get warnings (if any)
SHOW WARNINGS;
-DROP PROCEDURE mysql.count_duplicate_host_names;
# Convering the host name to lower case for existing users
UPDATE user SET host=LOWER( host ) WHERE LOWER( host ) <> host;
-# fix bad data when upgrading from unfixed InnoDB (MDEV-13360)
-set @str="delete from innodb_index_stats where length(table_name) > 64";
-set @str=if(@have_innodb <> 0, @str, "set @dummy = 0");
-prepare stmt from @str;
-execute stmt;
-set @str=replace(@str, "innodb_index_stats", "innodb_table_stats");
-prepare stmt from @str;
-execute stmt;
-
-# update table_name and timestamp fields in the innodb stat tables
-set @str="alter table mysql.innodb_index_stats modify last_update timestamp not null default current_timestamp on update current_timestamp, modify table_name varchar(199)";
-set @str=if(@have_innodb <> 0, @str, "set @dummy = 0");
-prepare stmt from @str;
-execute stmt;
-
-set @str="alter table mysql.innodb_table_stats modify last_update timestamp not null default current_timestamp on update current_timestamp, modify table_name varchar(199)";
-set @str=if(@have_innodb <> 0, @str, "set @dummy = 0");
-prepare stmt from @str;
-execute stmt;
-
-set @str=replace(@str, "innodb_index_stats", "innodb_table_stats");
-prepare stmt from @str;
-execute stmt;
-
-SET @innodb_index_stats_fk= (select count(*) from information_schema.referential_constraints where constraint_schema='mysql' and table_name = 'innodb_index_stats' and referenced_table_name = 'innodb_table_stats' and constraint_name = 'innodb_index_stats_ibfk_1');
-SET @str=IF(@innodb_index_stats_fk > 0 and @have_innodb > 0, "ALTER TABLE mysql.innodb_index_stats DROP FOREIGN KEY `innodb_index_stats_ibfk_1`", "SET @dummy = 0");
-PREPARE stmt FROM @str;
-EXECUTE stmt;
-DROP PREPARE stmt;
+DELIMITER //
+if @have_innodb then
+ # fix bad data when upgrading from unfixed InnoDB (MDEV-13360)
+ delete from innodb_index_stats where length(table_name) > 64;
+ delete from innodb_table_stats where length(table_name) > 64;
+
+ # update table_name and timestamp fields in the innodb stat tables
+ alter table innodb_index_stats modify last_update timestamp not null default current_timestamp on update current_timestamp, modify table_name varchar(199);
+ alter table innodb_table_stats modify last_update timestamp not null default current_timestamp on update current_timestamp, modify table_name varchar(199);
+
+ alter table innodb_index_stats drop foreign key if exists innodb_index_stats_ibfk_1;
+end if //
+DELIMITER ;
# MDEV-4332 longer user names
alter table user modify User char(80) binary not null default '';
@@ -780,3 +714,89 @@ ALTER TABLE help_topic MODIFY url TEXT NOT NULL;
# MDEV-7383 - varbinary on mix/max of column_stats
alter table column_stats modify min_value varbinary(255) DEFAULT NULL, modify max_value varbinary(255) DEFAULT NULL;
+
+--
+-- Ensure that all tables are of type Aria and transactional
+--
+
+ALTER TABLE user ENGINE=Aria transactional=1;
+ALTER TABLE db ENGINE=Aria transactional=1;
+ALTER TABLE func ENGINE=Aria transactional=1;
+ALTER TABLE procs_priv ENGINE=Aria transactional=1;
+ALTER TABLE tables_priv ENGINE=Aria transactional=1;
+ALTER TABLE columns_priv ENGINE=Aria transactional=1;
+ALTER TABLE roles_mapping ENGINE=Aria transactional=1;
+ALTER TABLE plugin ENGINE=Aria transactional=1;
+ALTER TABLE servers ENGINE=Aria transactional=1;
+ALTER TABLE time_zone_name ENGINE=Aria transactional=1;
+ALTER TABLE time_zone ENGINE=Aria transactional=1;
+ALTER TABLE time_zone_transition ENGINE=Aria transactional=1;
+ALTER TABLE time_zone_transition_type ENGINE=Aria transactional=1;
+ALTER TABLE time_zone_leap_second ENGINE=Aria transactional=1;
+ALTER TABLE proc ENGINE=Aria transactional=1;
+ALTER TABLE event ENGINE=Aria transactional=1;
+ALTER TABLE proxies_priv ENGINE=Aria transactional=1;
+
+-- The following tables doesn't have to be transactional
+ALTER TABLE help_topic ENGINE=Aria transactional=0;
+ALTER TABLE help_category ENGINE=Aria transactional=0;
+ALTER TABLE help_relation ENGINE=Aria transactional=0;
+ALTER TABLE help_keyword ENGINE=Aria transactional=0;
+ALTER TABLE table_stats ENGINE=Aria transactional=0;
+ALTER TABLE column_stats ENGINE=Aria transactional=0;
+ALTER TABLE index_stats ENGINE=Aria transactional=0;
+
+DELIMITER //
+IF 'BASE TABLE' = (select table_type from information_schema.tables where table_schema=database() and table_name='user') THEN
+ CREATE TABLE IF NOT EXISTS global_priv (Host char(60) binary DEFAULT '', User char(80) binary DEFAULT '', Priv JSON NOT NULL DEFAULT '{}' CHECK(JSON_VALID(Priv)), PRIMARY KEY Host (Host,User)) engine=Aria transactional=1 CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'
+ SELECT Host, User, JSON_COMPACT(JSON_OBJECT('access',
+ 1*('Y'=Select_priv)+
+ 2*('Y'=Insert_priv)+
+ 4*('Y'=Update_priv)+
+ 8*('Y'=Delete_priv)+
+ 16*('Y'=Create_priv)+
+ 32*('Y'=Drop_priv)+
+ 64*('Y'=Reload_priv)+
+ 128*('Y'=Shutdown_priv)+
+ 256*('Y'=Process_priv)+
+ 512*('Y'=File_priv)+
+ 1024*('Y'=Grant_priv)+
+ 2048*('Y'=References_priv)+
+ 4096*('Y'=Index_priv)+
+ 8192*('Y'=Alter_priv)+
+ 16384*('Y'=Show_db_priv)+
+ 32768*('Y'=Super_priv)+
+ 65536*('Y'=Create_tmp_table_priv)+
+ 131072*('Y'=Lock_tables_priv)+
+ 262144*('Y'=Execute_priv)+
+ 524288*('Y'=Repl_slave_priv)+
+ 1048576*('Y'=Repl_client_priv)+
+ 2097152*('Y'=Create_view_priv)+
+ 4194304*('Y'=Show_view_priv)+
+ 8388608*('Y'=Create_routine_priv)+
+ 16777216*('Y'=Alter_routine_priv)+
+ 33554432*('Y'=Create_user_priv)+
+ 67108864*('Y'=Event_priv)+
+ 134217728*('Y'=Trigger_priv)+
+ 268435456*('Y'=Create_tablespace_priv)+
+ 536870912*('Y'=Delete_history_priv),
+ 'ssl_type', ssl_type-1,
+ 'ssl_cipher', ssl_cipher,
+ 'x509_issuer', x509_issuer,
+ 'x509_subject', x509_subject,
+ 'max_questions', max_questions,
+ 'max_updates', max_updates,
+ 'max_connections', max_connections,
+ 'max_user_connections', max_user_connections,
+ 'max_statement_time', max_statement_time,
+ 'plugin', if(plugin>'',plugin,if(length(password)=16,'mysql_old_password','mysql_native_password')),
+ 'authentication_string', if(plugin>'' and authentication_string>'',authentication_string,password),
+ 'password_last_changed', if(password_expired='Y', 0, UNIX_TIMESTAMP(password_last_changed)),
+ 'password_lifetime', ifnull(password_lifetime, -1),
+ 'account_locked', 'Y'=account_locked,
+ 'default_role', default_role,
+ 'is_role', 'Y'=is_role)) as Priv
+ FROM user;
+ DROP TABLE user;
+END IF//
+DELIMITER ;
diff --git a/scripts/mysql_test_db.sql b/scripts/mysql_test_db.sql
index c1bb3661ec3..9f8a0cf604c 100644
--- a/scripts/mysql_test_db.sql
+++ b/scripts/mysql_test_db.sql
@@ -24,8 +24,8 @@ INSERT INTO db SELECT * FROM tmp_db WHERE @had_db_table=0;
DROP TABLE tmp_db;
-- Anonymous user with no privileges.
-CREATE TEMPORARY TABLE tmp_user_anonymous LIKE user;
+CREATE TEMPORARY TABLE tmp_user_anonymous LIKE global_priv;
INSERT INTO tmp_user_anonymous (host,user) VALUES ('localhost','');
INSERT INTO tmp_user_anonymous (host,user) SELECT @current_hostname,'' FROM dual WHERE @current_hostname != 'localhost';
-INSERT INTO user SELECT * FROM tmp_user_anonymous WHERE @had_user_table=0;
+INSERT INTO global_priv SELECT * FROM tmp_user_anonymous WHERE @had_user_table=0;
DROP TABLE tmp_user_anonymous;
diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh
index 5797bdc68d7..5f09ac3c235 100644
--- a/scripts/mysqld_safe.sh
+++ b/scripts/mysqld_safe.sh
@@ -265,7 +265,16 @@ wsrep_recover_position() {
wsrep_start_position_opt="--wsrep_start_position=$start_pos"
fi
- [ $ret -eq 0 ] && rm $wr_logfile
+ if [ $ret -eq 0 ] ; then
+ local wr_logfile_permanent="$DATADIR/wsrep_recovery.ok"
+ else
+ local wr_logfile_permanent="$DATADIR/wsrep_recovery.fail"
+ fi
+ touch $wr_logfile_permanent
+ [ "$euid" = "0" ] && chown $user $wr_logfile_permanent
+ chmod 600 $wr_logfile_permanent
+ cat "$wr_logfile" >> $wr_logfile_permanent
+ rm -f "$wr_logfile"
return $ret
}
diff --git a/scripts/wsrep_sst_mysqldump.sh b/scripts/wsrep_sst_mysqldump.sh
index faa3f10639b..d36deb5759f 100644
--- a/scripts/wsrep_sst_mysqldump.sh
+++ b/scripts/wsrep_sst_mysqldump.sh
@@ -25,6 +25,7 @@ EINVAL=22
local_ip()
{
[ "$1" = "127.0.0.1" ] && return 0
+ [ "$1" = "127.0.0.2" ] && return 0
[ "$1" = "localhost" ] && return 0
[ "$1" = "[::1]" ] && return 0
[ "$1" = "$(hostname -s)" ] && return 0
@@ -138,8 +139,8 @@ then
# turned off for the session so that gtid state does not get altered while
# the dump gets replayed on joiner.
if [[ "$LOG_BIN" == 'ON' ]]; then
- RESET_MASTER="RESET MASTER;"
- SET_GTID_BINLOG_STATE="SET @@global.gtid_binlog_state='$GTID_BINLOG_STATE';"
+ RESET_MASTER="SET GLOBAL wsrep_on=OFF; RESET MASTER; SET GLOBAL wsrep_on=ON;"
+ SET_GTID_BINLOG_STATE="SET GLOBAL wsrep_on=OFF; SET @@global.gtid_binlog_state='$GTID_BINLOG_STATE'; SET GLOBAL wsrep_on=ON;"
SQL_LOG_BIN_OFF="SET @@session.sql_log_bin=OFF;"
fi
fi
diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh
index b5dca7e6c25..f4547bcc0bb 100644
--- a/scripts/wsrep_sst_rsync.sh
+++ b/scripts/wsrep_sst_rsync.sh
@@ -147,6 +147,7 @@ if ! [ -z $WSREP_SST_OPT_BINLOG_INDEX ]
then
BINLOG_INDEX_DIRNAME=$(dirname $WSREP_SST_OPT_BINLOG_INDEX)
BINLOG_INDEX_FILENAME=$(basename $WSREP_SST_OPT_BINLOG_INDEX)
+ BINLOG_INDEX_FILENAME=${BINLOG_INDEX_FILENAME%.index}.index
fi
WSREP_LOG_DIR=${WSREP_LOG_DIR:-""}
@@ -265,11 +266,12 @@ EOF
OLD_PWD="$(pwd)"
cd $BINLOG_DIRNAME
- if ! [ -z $WSREP_SST_OPT_BINLOG_INDEX ]
- binlog_files_full=$(tail -n $BINLOG_N_FILES ${BINLOG_FILENAME}.index)
+ if [ -z $WSREP_SST_OPT_BINLOG_INDEX ]
then
- cd $BINLOG_INDEX_DIRNAME
- binlog_files_full=$(tail -n $BINLOG_N_FILES ${BINLOG_INDEX_FILENAME}.index)
+ binlog_files_full=$(tail -n $BINLOG_N_FILES ${BINLOG_FILENAME}.index)
+ else
+ cd $BINLOG_INDEX_DIRNAME
+ binlog_files_full=$(tail -n $BINLOG_N_FILES ${BINLOG_INDEX_FILENAME})
fi
cd $BINLOG_DIRNAME
@@ -325,11 +327,11 @@ EOF
exit 255 # unknown error
fi
- # second, we transfer InnoDB log files
+ # second, we transfer InnoDB and Aria log files
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --dirs --delete --quiet \
- $WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' -f '- **' "$WSREP_LOG_DIR/" \
+ $WHOLE_FILE_OPT -f '+ /ib_logfile[0-9]*' -f '+ /aria_log.*' -f '+ /aria_log_control' -f '- **' "$WSREP_LOG_DIR/" \
rsync://$WSREP_SST_OPT_ADDR-log_dir >&2 || RC=$?
if [ $RC -ne 0 ]; then
@@ -350,7 +352,7 @@ EOF
rsync ${STUNNEL:+--rsh="$STUNNEL"} \
--owner --group --perms --links --specials \
--ignore-times --inplace --recursive --delete --quiet \
- $WHOLE_FILE_OPT --exclude '*/ib_logfile*' "$WSREP_SST_OPT_DATA"/{}/ \
+ $WHOLE_FILE_OPT --exclude '*/ib_logfile*' --exclude "*/aria_log.*" --exclude "*/aria_log_control" "$WSREP_SST_OPT_DATA"/{}/ \
rsync://$WSREP_SST_OPT_ADDR/{} >&2 || RC=$?
cd "$OLD_PWD"
@@ -506,15 +508,16 @@ EOF
# Clean up old binlog files first
rm -f ${BINLOG_FILENAME}.*
wsrep_log_info "Extracting binlog files:"
- tar -xvf $BINLOG_TAR_FILE >&2
- for ii in $(ls -1 ${BINLOG_FILENAME}.*)
- do
- if ! [ -z $WSREP_SST_OPT_BINLOG_INDEX ]
- echo ${BINLOG_DIRNAME}/${ii} >> ${BINLOG_FILENAME}.index
- then
- echo ${BINLOG_DIRNAME}/${ii} >> ${BINLOG_INDEX_DIRNAME}/${BINLOG_INDEX_FILENAME}.index
+ tar -xvf $BINLOG_TAR_FILE >> _binlog_tmp_files_$!
+ while read bin_file; do
+ if [ -z $WSREP_SST_OPT_BINLOG_INDEX ]
+ then
+ echo ${BINLOG_DIRNAME}/${bin_file} >> ${BINLOG_FILENAME}.index
+ else
+ echo ${BINLOG_DIRNAME}/${bin_file} >> ${BINLOG_INDEX_DIRNAME}/${BINLOG_INDEX_FILENAME}
fi
- done
+ done < _binlog_tmp_files_$!
+ rm -f _binlog_tmp_files_$!
fi
cd "$OLD_PWD"
diff --git a/sql-common/client.c b/sql-common/client.c
index 160a85fd7b5..c66cb1a749d 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -127,10 +127,6 @@ char *mysql_unix_port= 0;
const char *unknown_sqlstate= "HY000";
const char *not_error_sqlstate= "00000";
const char *cant_connect_sqlstate= "08001";
-#ifdef HAVE_SMEM
-char *shared_memory_base_name= 0;
-const char *def_shared_memory_base_name= default_shared_memory_base_name;
-#endif
static void mysql_close_free_options(MYSQL *mysql);
static void mysql_close_free(MYSQL *mysql);
@@ -326,248 +322,6 @@ HANDLE create_named_pipe(MYSQL *mysql, uint connect_timeout, char **arg_host,
#endif
-/*
- Create new shared memory connection, return handler of connection
-
- SYNOPSIS
- create_shared_memory()
- mysql Pointer of mysql structure
- net Pointer of net structure
- connect_timeout Timeout of connection
-*/
-
-#ifdef HAVE_SMEM
-HANDLE create_shared_memory(MYSQL *mysql,NET *net, uint connect_timeout)
-{
- ulong smem_buffer_length = shared_memory_buffer_length + 4;
- /*
- event_connect_request is event object for start connection actions
- event_connect_answer is event object for confirm, that server put data
- handle_connect_file_map is file-mapping object, use for create shared
- memory
- handle_connect_map is pointer on shared memory
- handle_map is pointer on shared memory for client
- event_server_wrote,
- event_server_read,
- event_client_wrote,
- event_client_read are events for transfer data between server and client
- handle_file_map is file-mapping object, use for create shared memory
- */
- HANDLE event_connect_request = NULL;
- HANDLE event_connect_answer = NULL;
- HANDLE handle_connect_file_map = NULL;
- char *handle_connect_map = NULL;
-
- char *handle_map = NULL;
- HANDLE event_server_wrote = NULL;
- HANDLE event_server_read = NULL;
- HANDLE event_client_wrote = NULL;
- HANDLE event_client_read = NULL;
- HANDLE event_conn_closed = NULL;
- HANDLE handle_file_map = NULL;
- ulong connect_number;
- char connect_number_char[22], *p;
- char *tmp= NULL;
- char *suffix_pos;
- DWORD error_allow = 0;
- DWORD error_code = 0;
- DWORD event_access_rights= SYNCHRONIZE | EVENT_MODIFY_STATE;
- char *shared_memory_base_name = mysql->options.shared_memory_base_name;
- static const char *name_prefixes[] = {"","Global\\"};
- const char *prefix;
- uint i;
-
- /*
- If this is NULL, somebody freed the MYSQL* options. mysql_close()
- is a good candidate. We don't just silently (re)set it to
- def_shared_memory_base_name as that would create really confusing/buggy
- behavior if the user passed in a different name on the command-line or
- in a my.cnf.
- */
- DBUG_ASSERT(shared_memory_base_name != NULL);
-
- /*
- get enough space base-name + '_' + longest suffix we might ever send
- */
- if (!(tmp= (char *)my_malloc(strlen(shared_memory_base_name) + 32L, MYF(MY_FAE))))
- goto err;
-
- /*
- The name of event and file-mapping events create agree next rule:
- shared_memory_base_name+unique_part
- Where:
- shared_memory_base_name is unique value for each server
- unique_part is uniquel value for each object (events and file-mapping)
- */
- for (i = 0; i< array_elements(name_prefixes); i++)
- {
- prefix= name_prefixes[i];
- suffix_pos = strxmov(tmp, prefix , shared_memory_base_name, "_", NullS);
- strmov(suffix_pos, "CONNECT_REQUEST");
- event_connect_request= OpenEvent(event_access_rights, FALSE, tmp);
- if (event_connect_request)
- {
- break;
- }
- }
- if (!event_connect_request)
- {
- error_allow = CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR;
- goto err;
- }
- strmov(suffix_pos, "CONNECT_ANSWER");
- if (!(event_connect_answer= OpenEvent(event_access_rights,FALSE,tmp)))
- {
- error_allow = CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR;
- goto err;
- }
- strmov(suffix_pos, "CONNECT_DATA");
- if (!(handle_connect_file_map= OpenFileMapping(FILE_MAP_WRITE,FALSE,tmp)))
- {
- error_allow = CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR;
- goto err;
- }
- if (!(handle_connect_map= MapViewOfFile(handle_connect_file_map,
- FILE_MAP_WRITE,0,0,sizeof(DWORD))))
- {
- error_allow = CR_SHARED_MEMORY_CONNECT_MAP_ERROR;
- goto err;
- }
-
- /* Send to server request of connection */
- if (!SetEvent(event_connect_request))
- {
- error_allow = CR_SHARED_MEMORY_CONNECT_SET_ERROR;
- goto err;
- }
-
- /* Wait of answer from server */
- if (WaitForSingleObject(event_connect_answer,connect_timeout*1000) !=
- WAIT_OBJECT_0)
- {
- error_allow = CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR;
- goto err;
- }
-
- /* Get number of connection */
- connect_number = uint4korr(handle_connect_map);/*WAX2*/
- p= int10_to_str(connect_number, connect_number_char, 10);
-
- /*
- The name of event and file-mapping events create agree next rule:
- shared_memory_base_name+unique_part+number_of_connection
-
- Where:
- shared_memory_base_name is uniquel value for each server
- unique_part is uniquel value for each object (events and file-mapping)
- number_of_connection is number of connection between server and client
- */
- suffix_pos = strxmov(tmp, prefix , shared_memory_base_name, "_", connect_number_char,
- "_", NullS);
- strmov(suffix_pos, "DATA");
- if ((handle_file_map = OpenFileMapping(FILE_MAP_WRITE,FALSE,tmp)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_FILE_MAP_ERROR;
- goto err2;
- }
- if ((handle_map = MapViewOfFile(handle_file_map,FILE_MAP_WRITE,0,0,
- smem_buffer_length)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_MAP_ERROR;
- goto err2;
- }
-
- strmov(suffix_pos, "SERVER_WROTE");
- if ((event_server_wrote = OpenEvent(event_access_rights,FALSE,tmp)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_EVENT_ERROR;
- goto err2;
- }
-
- strmov(suffix_pos, "SERVER_READ");
- if ((event_server_read = OpenEvent(event_access_rights,FALSE,tmp)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_EVENT_ERROR;
- goto err2;
- }
-
- strmov(suffix_pos, "CLIENT_WROTE");
- if ((event_client_wrote = OpenEvent(event_access_rights,FALSE,tmp)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_EVENT_ERROR;
- goto err2;
- }
-
- strmov(suffix_pos, "CLIENT_READ");
- if ((event_client_read = OpenEvent(event_access_rights,FALSE,tmp)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_EVENT_ERROR;
- goto err2;
- }
-
- strmov(suffix_pos, "CONNECTION_CLOSED");
- if ((event_conn_closed = OpenEvent(event_access_rights,FALSE,tmp)) == NULL)
- {
- error_allow = CR_SHARED_MEMORY_EVENT_ERROR;
- goto err2;
- }
- /*
- Set event that server should send data
- */
- SetEvent(event_server_read);
-
-err2:
- if (error_allow == 0)
- {
- net->vio= vio_new_win32shared_memory(handle_file_map,handle_map,
- event_server_wrote,
- event_server_read,event_client_wrote,
- event_client_read,event_conn_closed);
- }
- else
- {
- error_code = GetLastError();
- if (event_server_read)
- CloseHandle(event_server_read);
- if (event_server_wrote)
- CloseHandle(event_server_wrote);
- if (event_client_read)
- CloseHandle(event_client_read);
- if (event_client_wrote)
- CloseHandle(event_client_wrote);
- if (event_conn_closed)
- CloseHandle(event_conn_closed);
- if (handle_map)
- UnmapViewOfFile(handle_map);
- if (handle_file_map)
- CloseHandle(handle_file_map);
- }
-err:
- my_free(tmp);
- if (error_allow)
- error_code = GetLastError();
- if (event_connect_request)
- CloseHandle(event_connect_request);
- if (event_connect_answer)
- CloseHandle(event_connect_answer);
- if (handle_connect_map)
- UnmapViewOfFile(handle_connect_map);
- if (handle_connect_file_map)
- CloseHandle(handle_connect_file_map);
- if (error_allow)
- {
- if (error_allow == CR_SHARED_MEMORY_EVENT_ERROR)
- set_mysql_extended_error(mysql, error_allow, unknown_sqlstate,
- ER(error_allow), suffix_pos, error_code);
- else
- set_mysql_extended_error(mysql, error_allow, unknown_sqlstate,
- ER(error_allow), error_code);
- return(INVALID_HANDLE_VALUE);
- }
- return(handle_map);
-}
-#endif
-
/**
Read a packet from server. Give error message if socket was down
or packet is an error message
@@ -987,7 +741,7 @@ static const char *default_options[]=
"ssl-key" ,"ssl-cert" ,"ssl-ca" ,"ssl-capath",
"character-sets-dir", "default-character-set", "interactive-timeout",
"connect-timeout", "local-infile", "disable-local-infile",
- "ssl-cipher", "max-allowed-packet", "protocol", "shared-memory-base-name",
+ "ssl-cipher", "max-allowed-packet", "protocol",
"multi-results", "multi-statements", "multi-queries", "secure-auth",
"report-data-truncation", "plugin-dir", "default-auth",
"bind-address", "ssl-crl", "ssl-crlpath",
@@ -1000,7 +754,7 @@ enum option_id {
OPT_ssl_key, OPT_ssl_cert, OPT_ssl_ca, OPT_ssl_capath,
OPT_character_sets_dir, OPT_default_character_set, OPT_interactive_timeout,
OPT_connect_timeout, OPT_local_infile, OPT_disable_local_infile,
- OPT_ssl_cipher, OPT_max_allowed_packet, OPT_protocol, OPT_shared_memory_base_name,
+ OPT_ssl_cipher, OPT_max_allowed_packet, OPT_protocol,
OPT_multi_results, OPT_multi_statements, OPT_multi_queries, OPT_secure_auth,
OPT_report_data_truncation, OPT_plugin_dir, OPT_default_auth,
OPT_bind_address, OPT_ssl_crl, OPT_ssl_crlpath,
@@ -1241,13 +995,6 @@ void mysql_read_default_options(struct st_mysql_options *options,
options->protocol= UINT_MAX32;
}
break;
- case OPT_shared_memory_base_name:
-#ifdef HAVE_SMEM
- if (options->shared_memory_base_name != def_shared_memory_base_name)
- my_free(options->shared_memory_base_name);
- options->shared_memory_base_name=my_strdup(opt_arg,MYF(MY_WME));
-#endif
- break;
case OPT_multi_results:
options->client_flag|= CLIENT_MULTI_RESULTS;
break;
@@ -1640,10 +1387,6 @@ mysql_init(MYSQL *mysql)
? WAIT_FOR_QUERY : ALWAYS_ACCEPT;
#endif
-#ifdef HAVE_SMEM
- mysql->options.shared_memory_base_name= (char*) def_shared_memory_base_name;
-#endif
-
mysql->options.methods_to_use= MYSQL_OPT_GUESS_CONNECTION;
mysql->options.report_data_truncation= TRUE; /* default */
@@ -2848,12 +2591,6 @@ void mpvio_info(Vio *vio, MYSQL_PLUGIN_VIO_INFO *info)
info->protocol= MYSQL_VIO_PIPE;
info->handle= vio->hPipe;
return;
- case VIO_TYPE_SHARED_MEMORY:
- info->protocol= MYSQL_VIO_MEMORY;
-#ifdef HAVE_SMEM
- info->handle= vio->handle_file_map; /* or what ? */
-#endif
- return;
#endif
default: DBUG_ASSERT(0);
}
@@ -3188,42 +2925,7 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user,
/*
Part 0: Grab a socket and connect it to the server
*/
-#if defined(HAVE_SMEM)
- if ((!mysql->options.protocol ||
- mysql->options.protocol == MYSQL_PROTOCOL_MEMORY) &&
- (!host || !strcmp(host,LOCAL_HOST)) &&
- mysql->options.shared_memory_base_name)
- {
- DBUG_PRINT("info", ("Using shared memory"));
- if ((create_shared_memory(mysql,net, mysql->options.connect_timeout)) ==
- INVALID_HANDLE_VALUE)
- {
- DBUG_PRINT("error",
- ("host: '%s' socket: '%s' shared memory: %s have_tcpip: %d",
- host ? host : "<null>",
- unix_socket ? unix_socket : "<null>",
- mysql->options.shared_memory_base_name,
- (int) have_tcpip));
- if (mysql->options.protocol == MYSQL_PROTOCOL_MEMORY)
- goto error;
- /*
- Try also with PIPE or TCP/IP. Clear the error from
- create_shared_memory().
- */
-
- net_clear_error(net);
- }
- else
- {
- mysql->options.protocol=MYSQL_PROTOCOL_MEMORY;
- unix_socket = 0;
- host=mysql->options.shared_memory_base_name;
- my_snprintf(host_info=buff, sizeof(buff)-1,
- ER(CR_SHARED_MEMORY_CONNECTION), host);
- }
- }
-#endif /* HAVE_SMEM */
#if defined(HAVE_SYS_UN_H)
if (!net->vio &&
(!mysql->options.protocol ||
@@ -3836,10 +3538,6 @@ static void mysql_close_free_options(MYSQL *mysql)
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
mysql_ssl_free(mysql);
#endif /* HAVE_OPENSSL && !EMBEDDED_LIBRARY */
-#ifdef HAVE_SMEM
- if (mysql->options.shared_memory_base_name != def_shared_memory_base_name)
- my_free(mysql->options.shared_memory_base_name);
-#endif /* HAVE_SMEM */
if (mysql->options.extension)
{
struct mysql_async_context *ctxt= mysql->options.extension->async_context;
@@ -4338,13 +4036,6 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg)
case MYSQL_OPT_PROTOCOL:
mysql->options.protocol= *(uint*) arg;
break;
- case MYSQL_SHARED_MEMORY_BASE_NAME:
-#ifdef HAVE_SMEM
- if (mysql->options.shared_memory_base_name != def_shared_memory_base_name)
- my_free(mysql->options.shared_memory_base_name);
- mysql->options.shared_memory_base_name=my_strdup(arg,MYF(MY_WME));
-#endif
- break;
case MYSQL_OPT_USE_REMOTE_CONNECTION:
case MYSQL_OPT_USE_EMBEDDED_CONNECTION:
case MYSQL_OPT_GUESS_CONNECTION:
@@ -4482,8 +4173,8 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg)
}
}
break;
+ case MYSQL_SHARED_MEMORY_BASE_NAME:
default:
- break;
DBUG_RETURN(1);
}
DBUG_RETURN(0);
diff --git a/sql-common/my_time.c b/sql-common/my_time.c
index c4731d6b601..60df4ef118d 100644
--- a/sql-common/my_time.c
+++ b/sql-common/my_time.c
@@ -58,6 +58,19 @@ uint calc_days_in_year(uint year)
366 : 365);
}
+
+#ifdef DBUG_ASSERT_EXISTS
+
+
+static const ulonglong C_KNOWN_FLAGS= C_TIME_NO_ZERO_IN_DATE |
+ C_TIME_NO_ZERO_DATE |
+ C_TIME_INVALID_DATES;
+
+#define C_FLAGS_OK(flags) (((flags) & ~C_KNOWN_FLAGS) == 0)
+
+#endif
+
+
/**
@brief Check datetime value for validity according to flags.
@@ -82,13 +95,14 @@ uint calc_days_in_year(uint year)
my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date,
ulonglong flags, int *was_cut)
{
+ DBUG_ASSERT(C_FLAGS_OK(flags));
if (ltime->time_type == MYSQL_TIMESTAMP_TIME)
return FALSE;
if (not_zero_date)
{
- if (((flags & TIME_NO_ZERO_IN_DATE) &&
+ if (((flags & C_TIME_NO_ZERO_IN_DATE) &&
(ltime->month == 0 || ltime->day == 0)) || ltime->neg ||
- (!(flags & TIME_INVALID_DATES) &&
+ (!(flags & C_TIME_INVALID_DATES) &&
ltime->month && ltime->day > days_in_month[ltime->month-1] &&
(ltime->month != 2 || calc_days_in_year(ltime->year) != 366 ||
ltime->day != 29)))
@@ -97,12 +111,13 @@ my_bool check_date(const MYSQL_TIME *ltime, my_bool not_zero_date,
return TRUE;
}
}
- else if (flags & TIME_NO_ZERO_DATE)
+ else if (flags & C_TIME_NO_ZERO_DATE)
{
/*
We don't set *was_cut here to signal that the problem was a zero date
and not an invalid date
*/
+ *was_cut|= MYSQL_TIME_WARN_ZERO_DATE;
return TRUE;
}
return FALSE;
@@ -145,7 +160,8 @@ static int get_punct(const char **str, const char *end)
return 1;
}
-static int get_date_time_separator(uint *number_of_fields, ulonglong flags,
+static int get_date_time_separator(uint *number_of_fields,
+ my_bool punct_is_date_time_separator,
const char **str, const char *end)
{
const char *s= *str;
@@ -164,11 +180,11 @@ static int get_date_time_separator(uint *number_of_fields, ulonglong flags,
but
cast("11:11:11.12.12.12" as time) should give 11:11:11.12
that is, a punctuation character can be accepted as a date/time separator
- only if TIME_DATETIME_ONLY (see str_to_time) is not set.
+ only if "punct_is_date_time_separator" is set.
*/
if (my_ispunct(&my_charset_latin1, *s))
{
- if (flags & TIME_DATETIME_ONLY)
+ if (!punct_is_date_time_separator)
{
/* see above, returning 1 is not enough, we need hard abort here */
*number_of_fields= 0;
@@ -241,25 +257,220 @@ static void get_microseconds(ulong *val, MYSQL_TIME_STATUS *status,
*val= (ulong) (tmp * log_10_int[6 - (*str - start)]);
else
*val= tmp;
+ if (str[0] < end && my_isdigit(&my_charset_latin1, str[0][0]))
+ {
+ /*
+ We don't need the exact nanoseconds value.
+ Knowing the first digit is enough for rounding.
+ */
+ status->nanoseconds= 100 * (uint)(str[0][0] - '0');
+ }
if (skip_digits(str, end))
status->warnings|= MYSQL_TIME_NOTE_TRUNCATED;
}
+static int check_time_range_internal(MYSQL_TIME *ltime,
+ ulong max_hour, ulong err_hour,
+ uint dec, int *warning);
+
+int check_time_range(MYSQL_TIME *ltime, uint dec, int *warning)
+{
+ return check_time_range_internal(ltime, TIME_MAX_HOUR, UINT_MAX32,
+ dec, warning);
+}
+
+
+static my_bool
+set_neg(my_bool neg, MYSQL_TIME_STATUS *st, MYSQL_TIME *ltime)
+{
+ if ((ltime->neg= neg) && ltime->time_type != MYSQL_TIMESTAMP_TIME)
+ {
+ st->warnings|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+ /* Remove trailing spaces and garbage */
+static my_bool get_suffix(const char *str, size_t length, size_t *new_length)
+{
+ /*
+ QQ: perhaps 'T' should be considered as a date/time delimiter only
+ if it's followed by a digit. Learn ISO 8601 details.
+ */
+ my_bool garbage= FALSE;
+ for ( ; length > 0 ; length--)
+ {
+ char ch= str[length - 1];
+ if (my_isdigit(&my_charset_latin1, ch) ||
+ my_ispunct(&my_charset_latin1, ch))
+ break;
+ if (my_isspace(&my_charset_latin1, ch))
+ continue;
+ if (ch == 'T')
+ {
+ /* 'T' has a meaning only after a digit. Otherwise it's a garbage */
+ if (length >= 2 && my_isdigit(&my_charset_latin1, str[length - 2]))
+ break;
+ }
+ garbage= TRUE;
+ }
+ *new_length= length;
+ return garbage;
+}
+
+
+static size_t get_prefix(const char *str, size_t length, const char **endptr)
+{
+ const char *str0= str, *end= str + length;
+ for (; str < end && my_isspace(&my_charset_latin1, *str) ; str++)
+ { }
+ *endptr= str;
+ return str - str0;
+}
+
+
+static size_t get_sign(my_bool *neg, const char *str, size_t length,
+ const char **endptr)
+{
+ const char *str0= str;
+ if (length)
+ {
+ if ((*neg= (*str == '-')) || (*str == '+'))
+ str++;
+ }
+ else
+ *neg= FALSE;
+ *endptr= str;
+ return str - str0;
+}
+
+
+static my_bool find_body(my_bool *neg, const char *str, size_t length,
+ MYSQL_TIME *to, int *warn,
+ const char **new_str, size_t *new_length)
+{
+ size_t sign_length;
+ *warn= 0;
+ length-= get_prefix(str, length, &str);
+ sign_length= get_sign(neg, str, length, &str);
+ length-= sign_length;
+ /* There can be a space after a sign again: '- 10:20:30' or '- 1 10:20:30' */
+ length-= get_prefix(str, length, &str);
+ if (get_suffix(str, length, &length))
+ *warn|= MYSQL_TIME_WARN_TRUNCATED;
+ *new_str= str;
+ *new_length= length;
+ if (!length || !my_isdigit(&my_charset_latin1, *str))
+ {
+ *warn|= MYSQL_TIME_WARN_EDOM;
+ set_zero_time(to, MYSQL_TIMESTAMP_ERROR);
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+typedef struct
+{
+ uint count_punct;
+ uint count_colon;
+ uint count_iso_date_time_separator;
+} MYSQL_TIME_USED_CHAR_STATISTICS;
+
+
+static void
+mysql_time_used_char_statistics_init(MYSQL_TIME_USED_CHAR_STATISTICS *to,
+ const char *str, const char *end)
+{
+ const char *s;
+ bzero((void *) to, sizeof(MYSQL_TIME_USED_CHAR_STATISTICS));
+ for (s= str; s < end; s++)
+ {
+ if (my_ispunct(&my_charset_latin1, *s))
+ to->count_punct++;
+ if (*s == ':')
+ to->count_colon++;
+ if (*s == 'T')
+ to->count_iso_date_time_separator++;
+ }
+}
+
+
+static my_bool
+is_datetime_body_candidate(const char *str, size_t length,
+ my_bool allow_dates_delimited,
+ my_bool allow_dates_numeric)
+{
+ static uint min_date_length= 5; /* '1-1-1' -> '0001-01-01' */
+ uint pos, count_punct= 0;
+ uint date_time_separator_length= MY_TEST(!allow_dates_delimited);
+ if (length >= 12)
+ return TRUE;
+ /*
+ The shortest possible DATE is '1-1-1', which is 5 characters.
+ To make a full datetime it should be at least followed by a space or a 'T'.
+ To make a date it should be just not less that 5 characters.
+ */
+ if (length < min_date_length + date_time_separator_length &&
+ !allow_dates_numeric)
+ return FALSE;
+ for (pos= 0; pos < length; pos++)
+ {
+ if (str[pos] == 'T') /* Date/time separator */
+ return TRUE;
+ if (str[pos] == ' ')
+ {
+ /*
+ We found a space. If can be a DATE/TIME separator:
+ TIME('1-1-1 1:1:1.0) -> '0001-01-01 01:01:01.0'
+
+ But it can be also a DAY/TIME separator:
+ TIME('1 11') -> 35:00:00 = 1 day 11 hours
+ TIME('1 111') -> 135:00:00 = 1 day 111 hours
+ TIME('11 11') -> 275:00:00 = 11 days 11 hours
+ TIME('111 11') -> 838:59:59 = 111 days 11 hours with overflow
+ TIME('1111 11') -> 838:59:59 = 1111 days 11 hours with overflow
+ */
+ return count_punct > 0; /* Can be a DATE if already had separators*/
+ }
+ if (my_ispunct(&my_charset_latin1, str[pos]))
+ {
+ if (allow_dates_delimited && str[pos] != ':')
+ return TRUE;
+ count_punct++;
+ }
+ }
+ return allow_dates_numeric && count_punct == 0;
+}
+
+
+static my_bool
+str_to_DDhhmmssff_internal(my_bool neg, const char *str, size_t length,
+ MYSQL_TIME *l_time,
+ ulong max_hour, ulong err_hour,
+ MYSQL_TIME_STATUS *status,
+ const char **endptr);
+
+
/*
Convert a timestamp string to a MYSQL_TIME value.
SYNOPSIS
- str_to_datetime()
+ str_to_datetime_or_date_body()
str String to parse
length Length of string
l_time Date is stored here
flags Bitmap of following items
- TIME_FUZZY_DATE
TIME_DATETIME_ONLY Set if we only allow full datetimes.
TIME_NO_ZERO_IN_DATE Don't allow partial dates
TIME_NO_ZERO_DATE Don't allow 0000-00-00 date
TIME_INVALID_DATES Allow 2000-02-31
+ punct_is_date_time_separator
+ Allow punctuation as a date/time separator,
+ or return a hard error.
status Conversion status
@@ -292,32 +503,22 @@ static void get_microseconds(ulong *val, MYSQL_TIME_STATUS *status,
#define MAX_DATE_PARTS 8
-my_bool
-str_to_datetime(const char *str, size_t length, MYSQL_TIME *l_time,
- ulonglong flags, MYSQL_TIME_STATUS *status)
+static my_bool
+str_to_datetime_or_date_body(const char *str, size_t length, MYSQL_TIME *l_time,
+ ulonglong flags,
+ my_bool punct_is_date_time_separator,
+ MYSQL_TIME_STATUS *status,
+ uint *number_of_fields,
+ const char **endptr)
{
const char *end=str+length, *pos;
- uint number_of_fields= 0, digits, year_length, not_zero_date;
- DBUG_ENTER("str_to_datetime");
+ uint digits, year_length, not_zero_date;
+ int warn= 0;
+ DBUG_ENTER("str_to_datetime_or_date_body");
+ DBUG_ASSERT(C_FLAGS_OK(flags));
bzero(l_time, sizeof(*l_time));
-
- if (flags & TIME_TIME_ONLY)
- {
- my_bool ret= str_to_time(str, length, l_time, flags, status);
- DBUG_RETURN(ret);
- }
-
- my_time_status_init(status);
-
- /* Skip space at start */
- for (; str != end && my_isspace(&my_charset_latin1, *str) ; str++)
- ;
- if (str == end || ! my_isdigit(&my_charset_latin1, *str))
- {
- status->warnings= MYSQL_TIME_WARN_TRUNCATED;
- l_time->time_type= MYSQL_TIMESTAMP_NONE;
- DBUG_RETURN(1);
- }
+ *number_of_fields= 0;
+ *endptr= str;
/*
Calculate number of digits in first part.
@@ -345,49 +546,53 @@ str_to_datetime(const char *str, size_t length, MYSQL_TIME *l_time,
(only numbers like [YY]YYMMDD[T][hhmmss[.uuuuuu]])
*/
year_length= (digits == 4 || digits == 8 || digits >= 14) ? 4 : 2;
- if (get_digits(&l_time->year, &number_of_fields, &str, end, year_length)
- || get_digits(&l_time->month, &number_of_fields, &str, end, 2)
- || get_digits(&l_time->day, &number_of_fields, &str, end, 2)
+ if (get_digits(&l_time->year, number_of_fields, &str, end, year_length)
+ || get_digits(&l_time->month, number_of_fields, &str, end, 2)
+ || get_digits(&l_time->day, number_of_fields, &str, end, 2)
|| get_maybe_T(&str, end)
- || get_digits(&l_time->hour, &number_of_fields, &str, end, 2)
- || get_digits(&l_time->minute, &number_of_fields, &str, end, 2)
- || get_digits(&l_time->second, &number_of_fields, &str, end, 2))
- status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
+ || get_digits(&l_time->hour, number_of_fields, &str, end, 2)
+ || get_digits(&l_time->minute, number_of_fields, &str, end, 2)
+ || get_digits(&l_time->second, number_of_fields, &str, end, 2))
+ warn|= MYSQL_TIME_WARN_TRUNCATED;
}
else
{
const char *start= str;
- if (get_number(&l_time->year, &number_of_fields, &str, end))
- status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
+ if (get_number(&l_time->year, number_of_fields, &str, end))
+ warn|= MYSQL_TIME_WARN_TRUNCATED;
year_length= (uint)(str - start);
- if (!status->warnings &&
+ if (!warn &&
(get_punct(&str, end)
- || get_number(&l_time->month, &number_of_fields, &str, end)
+ || get_number(&l_time->month, number_of_fields, &str, end)
|| get_punct(&str, end)
- || get_number(&l_time->day, &number_of_fields, &str, end)
- || get_date_time_separator(&number_of_fields, flags, &str, end)
- || get_number(&l_time->hour, &number_of_fields, &str, end)
+ || get_number(&l_time->day, number_of_fields, &str, end)
+ || get_date_time_separator(number_of_fields,
+ punct_is_date_time_separator, &str, end)
+ || get_number(&l_time->hour, number_of_fields, &str, end)
|| get_punct(&str, end)
- || get_number(&l_time->minute, &number_of_fields, &str, end)
+ || get_number(&l_time->minute, number_of_fields, &str, end)
|| get_punct(&str, end)
- || get_number(&l_time->second, &number_of_fields, &str, end)))
- status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
+ || get_number(&l_time->second, number_of_fields, &str, end)))
+ warn|= MYSQL_TIME_WARN_TRUNCATED;
}
+ status->warnings|= warn;
+ *endptr= str;
/* we're ok if date part is correct. even if the rest is truncated */
- if (number_of_fields < 3)
+ if (*number_of_fields < 3)
{
l_time->time_type= MYSQL_TIMESTAMP_NONE;
status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
DBUG_RETURN(TRUE);
}
- if (!status->warnings && str < end && *str == '.')
+ if (!warn && str < end && *str == '.')
{
str++;
get_microseconds(&l_time->second_part, status,
- &number_of_fields, &str, end);
+ number_of_fields, &str, end);
+ *endptr= str;
}
not_zero_date = l_time->year || l_time->month || l_time->day ||
@@ -407,23 +612,16 @@ str_to_datetime(const char *str, size_t length, MYSQL_TIME *l_time,
if (check_date(l_time, not_zero_date, flags, &status->warnings))
goto err;
- l_time->time_type= (number_of_fields <= 3 ?
+ l_time->time_type= (*number_of_fields <= 3 ?
MYSQL_TIMESTAMP_DATE : MYSQL_TIMESTAMP_DATETIME);
- for (; str != end ; str++)
- {
- if (!my_isspace(&my_charset_latin1,*str))
- {
- status->warnings= MYSQL_TIME_WARN_TRUNCATED;
- break;
- }
- }
+ if (str != end)
+ status->warnings= MYSQL_TIME_WARN_TRUNCATED;
DBUG_RETURN(FALSE);
err:
- bzero((char*) l_time, sizeof(*l_time));
- l_time->time_type= MYSQL_TIMESTAMP_ERROR;
+ set_zero_time(l_time, MYSQL_TIMESTAMP_ERROR);
DBUG_RETURN(TRUE);
}
@@ -432,7 +630,7 @@ err:
Convert a time string to a MYSQL_TIME struct.
SYNOPSIS
- str_to_time()
+ str_to_datetime_or_date_or_time_body()
str A string in full TIMESTAMP format or
[-] DAYS [H]H:MM:SS, [H]H:MM:SS, [M]M:SS, [H]HMMSS,
[M]MSS or [S]S
@@ -457,45 +655,280 @@ err:
TRUE on error
*/
-my_bool str_to_time(const char *str, size_t length, MYSQL_TIME *l_time,
- ulonglong fuzzydate, MYSQL_TIME_STATUS *status)
+static my_bool
+str_to_datetime_or_date_or_time_body(const char *str, size_t length,
+ MYSQL_TIME *l_time,
+ ulonglong fuzzydate,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour,
+ my_bool allow_dates_delimited,
+ my_bool allow_dates_numeric)
{
- ulong date[5];
- ulonglong value;
- const char *end=str+length, *end_of_days;
- my_bool found_days,found_hours, neg= 0;
- uint UNINIT_VAR(state);
-
- my_time_status_init(status);
- for (; str != end && my_isspace(&my_charset_latin1,*str) ; str++)
- length--;
- if (str != end && *str == '-')
- {
- neg=1;
- str++;
- length--;
- }
- if (str == end)
- {
- status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
- goto err;
- }
+ const char *endptr;
+ DBUG_ASSERT(C_FLAGS_OK(fuzzydate));
/* Check first if this is a full TIMESTAMP */
- if (length >= 12)
+ if (is_datetime_body_candidate(str, length,
+ allow_dates_delimited,
+ allow_dates_numeric))
{ /* Probably full timestamp */
- (void) str_to_datetime(str, length, l_time,
- (fuzzydate & ~TIME_TIME_ONLY) | TIME_DATETIME_ONLY,
- status);
- if (l_time->time_type >= MYSQL_TIMESTAMP_ERROR)
- return l_time->time_type == MYSQL_TIMESTAMP_ERROR;
+ int warn_copy= status->warnings; /* could already be set by find_body() */
+ uint number_of_fields;
+ (void) str_to_datetime_or_date_body(str, length, l_time, fuzzydate,
+ FALSE, status,
+ &number_of_fields, &endptr);
+ DBUG_ASSERT(endptr >= str);
+ DBUG_ASSERT(endptr <= str + length);
+ switch (l_time->time_type) {
+ case MYSQL_TIMESTAMP_DATETIME:
+ return FALSE;
+ case MYSQL_TIMESTAMP_DATE:
+ {
+ /*
+ Successfully parsed as DATE, but it can also be a TIME:
+ '24:02:03' - continue and parse as TIME
+ '24:02:03 garbage /////' - continue and parse as TIME
+ '24:02:03T' - return DATE
+ '24-02-03' - return DATE
+ '24/02/03' - return DATE
+ '11111' - return DATE
+ */
+ MYSQL_TIME_USED_CHAR_STATISTICS used_chars;
+ mysql_time_used_char_statistics_init(&used_chars, str, endptr);
+ if (used_chars.count_iso_date_time_separator || !used_chars.count_colon)
+ return FALSE;
+ }
+ break;
+ case MYSQL_TIMESTAMP_ERROR:
+ {
+ MYSQL_TIME_USED_CHAR_STATISTICS used_chars;
+ /*
+ Check if it parsed as DATETIME but then failed as out of range:
+ '2011-02-32 8:46:06.23434' - return error
+ */
+ if (number_of_fields > 3)
+ return TRUE;
+ /*
+ Check if it parsed as DATE but then failed as out of range:
+ '100000:02:03' - continue and parse as TIME
+ '100000:02:03T' - return error
+ '100000/02/03' - return error
+ '100000-02-03' - return error
+ */
+ mysql_time_used_char_statistics_init(&used_chars, str, endptr);
+ if (used_chars.count_iso_date_time_separator || !used_chars.count_colon)
+ return TRUE;
+ }
+ break;
+ case MYSQL_TIMESTAMP_NONE:
+ {
+ if (allow_dates_numeric && endptr >= str + length)
+ {
+ /*
+ For backward compatibility this parses as DATE and fails:
+ EXTRACT(DAY FROM '1111') -- return error
+ EXTRACT(DAY FROM '1') -- return error
+ */
+ MYSQL_TIME_USED_CHAR_STATISTICS used_chars;
+ mysql_time_used_char_statistics_init(&used_chars, str, endptr);
+ if (!used_chars.count_iso_date_time_separator &&
+ !used_chars.count_colon &&
+ !used_chars.count_punct)
+ return TRUE;
+ }
+ /*
+ - '256 10:30:30' - continue and parse as TIME
+ - '4294967296:59:59.123456456' - continue and parse as TIME
+ */
+ }
+ break;
+ case MYSQL_TIMESTAMP_TIME:
+ DBUG_ASSERT(0);
+ break;
+ }
+ my_time_status_init(status);
+ status->warnings= warn_copy;
+ }
+
+ if (!str_to_DDhhmmssff_internal(FALSE, str, length, l_time,
+ time_max_hour, time_err_hour,
+ status, &endptr))
+ return FALSE;
+
+ set_zero_time(l_time, MYSQL_TIMESTAMP_ERROR);
+ return TRUE;
+}
+
+
+/*
+ Convert a string with INTERVAL DAY TO SECOND to MYSQL_TIME.
+ Input format: [-][DD ]hh:mm:ss.ffffff
+
+ If the input string appears to be a DATETIME, error is returned.
+*/
+my_bool str_to_DDhhmmssff(const char *str, size_t length, MYSQL_TIME *ltime,
+ ulong max_hour, MYSQL_TIME_STATUS *status)
+{
+ my_bool neg;
+ const char *endptr;
+
+ my_time_status_init(status);
+ if (find_body(&neg, str, length, ltime, &status->warnings, &str, &length))
+ return TRUE;
+
+ /* Reject anything that might be parsed as a full TIMESTAMP */
+ if (is_datetime_body_candidate(str, length, FALSE, FALSE))
+ {
+ uint number_of_fields;
+ (void) str_to_datetime_or_date_body(str, length, ltime, 0, FALSE,
+ status, &number_of_fields, &endptr);
+ if (ltime->time_type > MYSQL_TIMESTAMP_ERROR)
+ {
+ status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
+ ltime->time_type= MYSQL_TIMESTAMP_NONE;
+ return TRUE;
+ }
my_time_status_init(status);
}
+ /*
+ Scan DDhhmmssff then reject anything that can remind date/datetime.
+ For example, in case of '2001-01-01', str_to_DDhhmmssff_internal()
+ will scan only '2001'.
+ */
+ if (str_to_DDhhmmssff_internal(neg, str, length, ltime, max_hour,
+ UINT_MAX32, status, &endptr) ||
+ (endptr < str + length && endptr[0] == '-'))
+ return TRUE;
+ return FALSE;
+}
+
+
+my_bool
+str_to_datetime_or_date_or_time(const char *str, size_t length,
+ MYSQL_TIME *to, ulonglong mode,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour)
+{
+ my_bool neg;
+ DBUG_ASSERT(C_FLAGS_OK(mode));
+ my_time_status_init(status);
+ return
+ find_body(&neg, str, length, to, &status->warnings, &str, &length) ||
+ str_to_datetime_or_date_or_time_body(str, length, to, mode, status,
+ time_max_hour, time_err_hour,
+ FALSE, FALSE) ||
+ set_neg(neg, status, to);
+}
+
+
+my_bool
+str_to_datetime_or_date_or_interval_hhmmssff(const char *str, size_t length,
+ MYSQL_TIME *to, ulonglong mode,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour)
+{
+ my_bool neg;
+ DBUG_ASSERT(C_FLAGS_OK(mode));
+ my_time_status_init(status);
+ return
+ find_body(&neg, str, length, to, &status->warnings, &str, &length) ||
+ str_to_datetime_or_date_or_time_body(str, length, to, mode, status,
+ time_max_hour, time_err_hour,
+ TRUE, FALSE) ||
+ set_neg(neg, status, to);
+}
+
+
+my_bool
+str_to_datetime_or_date_or_interval_day(const char *str, size_t length,
+ MYSQL_TIME *to, ulonglong mode,
+ MYSQL_TIME_STATUS *status,
+ ulong time_max_hour,
+ ulong time_err_hour)
+{
+ my_bool neg;
+ DBUG_ASSERT(C_FLAGS_OK(mode));
+ my_time_status_init(status);
+ /*
+ For backward compatibility we allow to parse non-delimited
+ values as DATE rather than as TIME:
+ EXTRACT(DAY FROM '11111')
+ */
+ return
+ find_body(&neg, str, length, to, &status->warnings, &str, &length) ||
+ str_to_datetime_or_date_or_time_body(str, length, to, mode, status,
+ time_max_hour, time_err_hour,
+ TRUE, TRUE) ||
+ set_neg(neg, status, to);
+}
+
+
+my_bool
+str_to_datetime_or_date(const char *str, size_t length, MYSQL_TIME *l_time,
+ ulonglong flags, MYSQL_TIME_STATUS *status)
+{
+ my_bool neg;
+ uint number_of_fields;
+ const char *endptr;
+ DBUG_ASSERT(C_FLAGS_OK(flags));
+ my_time_status_init(status);
+ return
+ find_body(&neg, str, length, l_time, &status->warnings, &str, &length) ||
+ str_to_datetime_or_date_body(str, length, l_time, flags, TRUE,
+ status, &number_of_fields, &endptr) ||
+ set_neg(neg, status, l_time);
+}
+
+
+
+/**
+ Convert a string to INTERVAL DAY TO SECOND.
+ Input format: [DD ]hh:mm:ss.ffffff
+
+ Datetime or date formats are not understood.
+
+ Optional leading spaces and signs must be scanned by the caller.
+ "str" should point to the first digit.
+
+ @param neg - set the value to be negative
+ @param str - the input string
+ @param length - length of "str"
+ @param[OUT] l_time - write the result here
+ @param max_hour - if the result hour value appears to be greater than
+ max_hour, then cut to result to 'max_hour:59:59.999999'
+ @param err_hour - if the hour appears to be greater than err_hour,
+ return an error (without cut)
+ @param status
+ @param endptr
+*/
+static my_bool
+str_to_DDhhmmssff_internal(my_bool neg, const char *str, size_t length,
+ MYSQL_TIME *l_time,
+ ulong max_hour, ulong err_hour,
+ MYSQL_TIME_STATUS *status, const char **endptr)
+{
+ ulong date[5];
+ ulonglong value;
+ const char *end=str + length, *end_of_days;
+ my_bool found_days, found_hours;
+ uint UNINIT_VAR(state);
+
+ *endptr= str;
l_time->neg= neg;
- /* Not a timestamp. Try to get this as a DAYS_TO_SECOND string */
+ /* Not a timestamp. Try to get this as a DAYS TO SECOND string */
for (value=0; str != end && my_isdigit(&my_charset_latin1,*str) ; str++)
+ {
value=value*10L + (long) (*str - '0');
+ if (value >= 42949672955959ULL) /* i.e. UINT_MAX32 : 59 : 59 */
+ {
+ status->warnings|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ goto err;
+ }
+ }
/* Skip all space after 'days' */
end_of_days= str;
@@ -514,6 +947,11 @@ my_bool str_to_time(const char *str, size_t length, MYSQL_TIME *l_time,
my_isdigit(&my_charset_latin1, str[1]))
{
date[0]= 0; /* Assume we found hours */
+ if (value >= UINT_MAX32)
+ {
+ status->warnings|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ goto err;
+ }
date[1]= (ulong) value;
state=2;
found_hours=1;
@@ -523,6 +961,7 @@ my_bool str_to_time(const char *str, size_t length, MYSQL_TIME *l_time,
{
/* String given as one number; assume HHMMSS format */
date[0]= 0;
+ DBUG_ASSERT(value <= ((ulonglong) UINT_MAX32) * 10000ULL);
date[1]= (ulong) (value/10000);
date[2]= (ulong) (value/100 % 100);
date[3]= (ulong) (value % 100);
@@ -557,7 +996,7 @@ my_bool str_to_time(const char *str, size_t length, MYSQL_TIME *l_time,
fractional:
/* Get fractional second part */
- if (!status->warnings && str < end && *str == '.')
+ if (str < end && *str == '.')
{
uint number_of_fields= 0;
str++;
@@ -605,6 +1044,11 @@ fractional:
goto err;
}
+ if ((ulonglong) date[0] * 24 + date[1] > (ulonglong) UINT_MAX32)
+ {
+ status->warnings|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ goto err;
+ }
l_time->year= 0; /* For protocol::store_time */
l_time->month= 0;
l_time->day= 0;
@@ -614,27 +1058,20 @@ fractional:
l_time->second_part= date[4];
l_time->time_type= MYSQL_TIMESTAMP_TIME;
+ *endptr= str;
+
/* Check if the value is valid and fits into MYSQL_TIME range */
- if (check_time_range(l_time, 6, &status->warnings))
+ if (check_time_range_internal(l_time, max_hour, err_hour,
+ 6, &status->warnings))
return TRUE;
/* Check if there is garbage at end of the MYSQL_TIME specification */
if (str != end)
- {
- do
- {
- if (!my_isspace(&my_charset_latin1,*str))
- {
- status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
- break;
- }
- } while (++str != end);
- }
+ status->warnings|= MYSQL_TIME_WARN_TRUNCATED;
return FALSE;
err:
- bzero((char*) l_time, sizeof(*l_time));
- l_time->time_type= MYSQL_TIMESTAMP_ERROR;
+ *endptr= str;
return TRUE;
}
@@ -643,8 +1080,11 @@ err:
Check 'time' value to lie in the MYSQL_TIME range
SYNOPSIS:
- check_time_range()
+ check_time_range_internal()
time pointer to MYSQL_TIME value
+ ulong max_hour - maximum allowed hour value. if the hour is greater,
+ cut the time value to 'max_hour:59:59.999999'
+ ulong err_hour - if hour is greater than this value, return an error
uint dec
warning set MYSQL_TIME_WARN_OUT_OF_RANGE flag if the value is out of range
@@ -658,13 +1098,16 @@ err:
1 time value is invalid
*/
-int check_time_range(struct st_mysql_time *my_time, uint dec, int *warning)
+int check_time_range_internal(struct st_mysql_time *my_time,
+ ulong max_hour, ulong err_hour,
+ uint dec, int *warning)
{
- longlong hour;
+ ulonglong hour;
static ulong max_sec_part[TIME_SECOND_PART_DIGITS+1]= {000000, 900000, 990000,
999000, 999900, 999990, 999999};
- if (my_time->minute >= 60 || my_time->second >= 60)
+ if (my_time->minute >= 60 || my_time->second >= 60 ||
+ my_time->hour > err_hour)
{
*warning|= MYSQL_TIME_WARN_TRUNCATED;
return 1;
@@ -675,14 +1118,14 @@ int check_time_range(struct st_mysql_time *my_time, uint dec, int *warning)
if (dec == AUTO_SEC_PART_DIGITS)
dec= TIME_SECOND_PART_DIGITS;
- if (hour <= TIME_MAX_HOUR &&
- (hour != TIME_MAX_HOUR || my_time->minute != TIME_MAX_MINUTE ||
+ if (hour <= max_hour &&
+ (hour != max_hour || my_time->minute != TIME_MAX_MINUTE ||
my_time->second != TIME_MAX_SECOND ||
my_time->second_part <= max_sec_part[dec]))
return 0;
my_time->day= 0;
- my_time->hour= TIME_MAX_HOUR;
+ my_time->hour= max_hour;
my_time->minute= TIME_MAX_MINUTE;
my_time->second= TIME_MAX_SECOND;
my_time->second_part= max_sec_part[dec];
@@ -1031,6 +1474,46 @@ static char* fmt_number(uint val, char *out, uint digits)
}
+static int my_mmssff_to_str(const MYSQL_TIME *ltime, char *to, uint fsp)
+{
+ char *pos= to;
+ if (fsp == AUTO_SEC_PART_DIGITS)
+ fsp= ltime->second_part ? TIME_SECOND_PART_DIGITS : 0;
+ DBUG_ASSERT(fsp <= TIME_SECOND_PART_DIGITS);
+ pos= fmt_number(ltime->minute, pos, 2);
+ *pos++= ':';
+ pos= fmt_number(ltime->second, pos, 2);
+ if (fsp)
+ {
+ *pos++= '.';
+ pos= fmt_number((uint)sec_part_shift(ltime->second_part, fsp), pos, fsp);
+ }
+ return (int) (pos - to);
+}
+
+
+int my_interval_DDhhmmssff_to_str(const MYSQL_TIME *ltime, char *to, uint fsp)
+{
+ uint hour= ltime->day * 24 + ltime->hour;
+ char *pos= to;
+ DBUG_ASSERT(!ltime->year);
+ DBUG_ASSERT(!ltime->month);
+
+ if(ltime->neg)
+ *pos++= '-';
+ if (hour >= 24)
+ {
+ pos= longlong10_to_str((longlong) hour / 24, pos, 10);
+ *pos++= ' ';
+ }
+ pos= fmt_number(hour % 24, pos, 2);
+ *pos++= ':';
+ pos+= my_mmssff_to_str(ltime, pos, fsp);
+ *pos= 0;
+ return (int) (pos-to);
+}
+
+
/*
Functions to convert time/date/datetime value to a string,
using default format.
@@ -1048,11 +1531,6 @@ int my_time_to_str(const MYSQL_TIME *l_time, char *to, uint digits)
uint hour= day * 24 + l_time->hour;
char*pos= to;
- if (digits == AUTO_SEC_PART_DIGITS)
- digits= l_time->second_part ? TIME_SECOND_PART_DIGITS : 0;
-
- DBUG_ASSERT(digits <= TIME_SECOND_PART_DIGITS);
-
if(l_time->neg)
*pos++= '-';
@@ -1063,17 +1541,7 @@ int my_time_to_str(const MYSQL_TIME *l_time, char *to, uint digits)
pos= fmt_number(hour, pos, 2);
*pos++= ':';
- pos= fmt_number(l_time->minute, pos, 2);
- *pos++= ':';
- pos= fmt_number(l_time->second, pos, 2);
-
- if (digits)
- {
- *pos++= '.';
- pos= fmt_number((uint)sec_part_shift(l_time->second_part, digits),
- pos, digits);
- }
-
+ pos+= my_mmssff_to_str(l_time, pos, digits);
*pos= 0;
return (int) (pos-to);
}
@@ -1095,12 +1563,6 @@ int my_date_to_str(const MYSQL_TIME *l_time, char *to)
int my_datetime_to_str(const MYSQL_TIME *l_time, char *to, uint digits)
{
char *pos= to;
-
- if (digits == AUTO_SEC_PART_DIGITS)
- digits= l_time->second_part ? TIME_SECOND_PART_DIGITS : 0;
-
- DBUG_ASSERT(digits <= TIME_SECOND_PART_DIGITS);
-
pos= fmt_number(l_time->year, pos, 4);
*pos++='-';
pos= fmt_number(l_time->month, pos, 2);
@@ -1109,17 +1571,7 @@ int my_datetime_to_str(const MYSQL_TIME *l_time, char *to, uint digits)
*pos++=' ';
pos= fmt_number(l_time->hour, pos, 2);
*pos++= ':';
- pos= fmt_number(l_time->minute, pos, 2);
- *pos++= ':';
- pos= fmt_number(l_time->second, pos, 2);
-
- if (digits)
- {
- *pos++='.';
- pos= fmt_number((uint) sec_part_shift(l_time->second_part, digits), pos,
- digits);
- }
-
+ pos+= my_mmssff_to_str(l_time, pos, digits);
*pos= 0;
return (int)(pos - to);
}
@@ -1185,7 +1637,7 @@ int my_timeval_to_str(const struct timeval *tm, char *to, uint dec)
representation and form value of DATETIME type as side-effect.
SYNOPSIS
- number_to_datetime()
+ number_to_datetime_or_date()
nr - datetime value as number
time_res - pointer for structure for broken-down representation
flags - flags to use in validating date, as in str_to_datetime()
@@ -1206,10 +1658,12 @@ int my_timeval_to_str(const struct timeval *tm, char *to, uint dec)
Datetime value in YYYYMMDDHHMMSS format.
*/
-longlong number_to_datetime(longlong nr, ulong sec_part, MYSQL_TIME *time_res,
- ulonglong flags, int *was_cut)
+longlong number_to_datetime_or_date(longlong nr, ulong sec_part,
+ MYSQL_TIME *time_res,
+ ulonglong flags, int *was_cut)
{
long part1,part2;
+ DBUG_ASSERT(C_FLAGS_OK(flags));
*was_cut= 0;
time_res->time_type=MYSQL_TIMESTAMP_DATE;
@@ -1279,8 +1733,8 @@ longlong number_to_datetime(longlong nr, ulong sec_part, MYSQL_TIME *time_res,
}
/* Don't want to have was_cut get set if NO_ZERO_DATE was violated. */
- if (nr || !(flags & TIME_NO_ZERO_DATE))
- *was_cut= 1;
+ if (nr || !(flags & C_TIME_NO_ZERO_DATE))
+ *was_cut= MYSQL_TIME_WARN_TRUNCATED;
return -1;
err:
@@ -1289,7 +1743,7 @@ longlong number_to_datetime(longlong nr, ulong sec_part, MYSQL_TIME *time_res,
enum enum_mysql_timestamp_type save= time_res->time_type;
bzero((char*) time_res, sizeof(*time_res));
time_res->time_type= save; /* Restore range */
- *was_cut= 1; /* Found invalid date */
+ *was_cut= MYSQL_TIME_WARN_TRUNCATED; /* Found invalid date */
}
return -1;
}
@@ -1310,22 +1764,20 @@ longlong number_to_datetime(longlong nr, ulong sec_part, MYSQL_TIME *time_res,
0 time value is valid, but was possibly truncated
-1 time value is invalid
*/
-int number_to_time(my_bool neg, ulonglong nr, ulong sec_part,
- MYSQL_TIME *ltime, int *was_cut)
+int number_to_time_only(my_bool neg, ulonglong nr, ulong sec_part,
+ ulong max_hour, MYSQL_TIME *ltime, int *was_cut)
{
- if (nr > 9999999 && nr < 99991231235959ULL && neg == 0)
- return number_to_datetime(nr, sec_part, ltime,
- TIME_INVALID_DATES, was_cut) < 0 ? -1 : 0;
-
+ static const ulonglong TIME_MAX_mmss= TIME_MAX_MINUTE*100 + TIME_MAX_SECOND;
+ ulonglong time_max_value= max_hour * 10000ULL + TIME_MAX_mmss;
*was_cut= 0;
ltime->year= ltime->month= ltime->day= 0;
ltime->time_type= MYSQL_TIMESTAMP_TIME;
ltime->neg= neg;
- if (nr > TIME_MAX_VALUE)
+ if (nr > time_max_value)
{
- nr= TIME_MAX_VALUE;
+ nr= time_max_value;
sec_part= TIME_MAX_SECOND_PART;
*was_cut= MYSQL_TIME_WARN_OUT_OF_RANGE;
}
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index c6910f469f9..2879970e9ff 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -16,21 +16,27 @@
IF(WITH_WSREP AND NOT EMBEDDED_LIBRARY)
- SET(WSREP_INCLUDES ${CMAKE_SOURCE_DIR}/wsrep)
SET(WSREP_SOURCES
+ wsrep_client_service.cc
+ wsrep_high_priority_service.cc
+ wsrep_server_service.cc
+ wsrep_storage_service.cc
+ wsrep_server_state.cc
+ wsrep_utils.cc
+ wsrep_xid.cc
wsrep_check_opts.cc
- wsrep_hton.cc
- wsrep_mysqld.cc
+ wsrep_mysqld.cc
wsrep_notify.cc
wsrep_sst.cc
- wsrep_utils.cc
wsrep_var.cc
wsrep_binlog.cc
wsrep_applier.cc
wsrep_thd.cc
- wsrep_xid.cc
+ wsrep_schema.cc
+ wsrep_plugin.cc
+ service_wsrep.cc
)
- SET(WSREP_LIB wsrep)
+ SET(WSREP_LIB wsrep-lib wsrep_api_v26)
ELSE()
SET(WSREP_SOURCES wsrep_dummy.cc)
ENDIF()
@@ -42,7 +48,6 @@ ${PCRE_INCLUDES}
${ZLIB_INCLUDE_DIR}
${SSL_INCLUDE_DIRS}
${CMAKE_BINARY_DIR}/sql
-${WSREP_INCLUDES}
)
@@ -96,7 +101,7 @@ SET (SQL_SOURCE
sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc
debug_sync.cc
sql_repl.cc sql_select.cc sql_show.cc sql_state.c
- group_by_handler.cc
+ group_by_handler.cc derived_handler.cc select_handler.cc
sql_statistics.cc sql_string.cc lex_string.h
sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc
sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc
@@ -121,22 +126,24 @@ SET (SQL_SOURCE
opt_index_cond_pushdown.cc opt_subselect.cc
opt_table_elimination.cc sql_expression_cache.cc
gcalc_slicescan.cc gcalc_tools.cc
- threadpool_common.cc ../sql-common/mysql_async.c
+ ../sql-common/mysql_async.c
my_apc.cc mf_iocache_encr.cc item_jsonfunc.cc
my_json_writer.cc
rpl_gtid.cc rpl_parallel.cc
semisync.cc semisync_master.cc semisync_slave.cc
semisync_master_ack_receiver.cc
- sql_type.cc
+ sql_type.cc sql_type_json.cc
item_windowfunc.cc sql_window.cc
sql_cte.cc
item_vers.cc
sql_sequence.cc sql_sequence.h ha_sequence.h
sql_tvc.cc sql_tvc.h
opt_split.cc
+ rowid_filter.cc rowid_filter.h
+ opt_trace.cc
${WSREP_SOURCES}
table_cache.cc encryption.cc temporary_tables.cc
- proxy_protocol.cc
+ proxy_protocol.cc backup.cc xa.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_builtin.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_yacc.cc
${CMAKE_CURRENT_BINARY_DIR}/sql_yacc_ora.cc
@@ -145,16 +152,21 @@ SET (SQL_SOURCE
${MYSYS_LIBWRAP_SOURCE}
)
-IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR
- CMAKE_SYSTEM_NAME MATCHES "Windows" OR
- CMAKE_SYSTEM_NAME MATCHES "SunOS" OR
- HAVE_KQUEUE)
+IF ((CMAKE_SYSTEM_NAME MATCHES "Linux" OR
+ CMAKE_SYSTEM_NAME MATCHES "SunOS" OR
+ WIN32 OR
+ HAVE_KQUEUE)
+ AND (NOT DISABLE_THREADPOOL))
ADD_DEFINITIONS(-DHAVE_POOL_OF_THREADS)
IF(WIN32)
SET(SQL_SOURCE ${SQL_SOURCE} threadpool_win.cc)
ENDIF()
SET(SQL_SOURCE ${SQL_SOURCE} threadpool_generic.cc)
+ SET(SQL_SOURCE ${SQL_SOURCE} threadpool_common.cc)
+ENDIF()
+IF(WIN32)
+ SET(SQL_SOURCE ${SQL_SOURCE} handle_connections_win.cc)
ENDIF()
MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY
@@ -440,12 +452,6 @@ IF(WIN32 AND TARGET mysqld AND NOT CMAKE_CROSSCOMPILING)
ALL
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/initdb.dep
)
- INSTALL(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/data DESTINATION .
- COMPONENT DataFiles
- PATTERN "initdb.dep" EXCLUDE
- PATTERN "bootstrap.sql" EXCLUDE
- PATTERN "aria*" EXCLUDE
- )
ELSE()
# Not windows or cross compiling, just install an empty directory
INSTALL(FILES ${DUMMY_FILE} DESTINATION data/mysql COMPONENT DataFiles)
diff --git a/sql/MSG00001.bin b/sql/MSG00001.bin
index 89f547694f5..5c1cd0badeb 100644
--- a/sql/MSG00001.bin
+++ b/sql/MSG00001.bin
Binary files differ
diff --git a/sql/backup.cc b/sql/backup.cc
new file mode 100644
index 00000000000..99c18e1260b
--- /dev/null
+++ b/sql/backup.cc
@@ -0,0 +1,385 @@
+/* Copyright (c) 2018, MariaDB Corporation
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/*
+ Implementation of BACKUP STAGE, an interface for external backup tools.
+
+ TODO:
+ - At backup_start() we call ha_prepare_for_backup() for all active
+ storage engines. If someone tries to load a new storage engine
+ that requires prepare_for_backup() for it to work, that storage
+ engines has to be blocked from loading until backup finishes.
+ As we currently don't have any loadable storage engine that
+ requires this and we have not implemented that part.
+ This can easily be done by adding a
+ PLUGIN_CANT_BE_LOADED_WHILE_BACKUP_IS_RUNNING flag to
+ maria_declare_plugin and check this before calling
+ plugin_initialize()
+*/
+
+#include "mariadb.h"
+#include "sql_class.h"
+#include "sql_base.h" // flush_tables
+#include "sql_insert.h" // kill_delayed_threads
+#include "sql_handler.h" // mysql_ha_cleanup_no_free
+#include <my_sys.h>
+
+static const char *stage_names[]=
+{"START", "FLUSH", "BLOCK_DDL", "BLOCK_COMMIT", "END", 0};
+
+TYPELIB backup_stage_names=
+{ array_elements(stage_names)-1, "", stage_names, 0 };
+
+static MDL_ticket *backup_flush_ticket;
+
+static bool backup_start(THD *thd);
+static bool backup_flush(THD *thd);
+static bool backup_block_ddl(THD *thd);
+static bool backup_block_commit(THD *thd);
+
+/**
+ Run next stage of backup
+*/
+
+void backup_init()
+{
+ backup_flush_ticket= 0;
+}
+
+bool run_backup_stage(THD *thd, backup_stages stage)
+{
+ backup_stages next_stage;
+ DBUG_ENTER("run_backup_stage");
+
+ if (thd->current_backup_stage == BACKUP_FINISHED)
+ {
+ if (stage != BACKUP_START)
+ {
+ my_error(ER_BACKUP_NOT_RUNNING, MYF(0));
+ DBUG_RETURN(1);
+ }
+ next_stage= BACKUP_START;
+ }
+ else
+ {
+ if ((uint) thd->current_backup_stage >= (uint) stage)
+ {
+ my_error(ER_BACKUP_WRONG_STAGE, MYF(0), stage_names[stage],
+ stage_names[thd->current_backup_stage]);
+ DBUG_RETURN(1);
+ }
+ if (stage == BACKUP_END)
+ {
+ /*
+ If end is given, jump directly to stage end. This is to allow one
+ to abort backup quickly.
+ */
+ next_stage= stage;
+ }
+ else
+ {
+ /* Go trough all not used stages until we reach 'stage' */
+ next_stage= (backup_stages) ((uint) thd->current_backup_stage + 1);
+ }
+ }
+
+ do
+ {
+ bool res;
+ backup_stages previous_stage= thd->current_backup_stage;
+ thd->current_backup_stage= next_stage;
+ switch (next_stage) {
+ case BACKUP_START:
+ if (!(res= backup_start(thd)))
+ break;
+ /* Reset backup stage to start for next backup try */
+ previous_stage= BACKUP_FINISHED;
+ break;
+ case BACKUP_FLUSH:
+ res= backup_flush(thd);
+ break;
+ case BACKUP_WAIT_FOR_FLUSH:
+ res= backup_block_ddl(thd);
+ break;
+ case BACKUP_LOCK_COMMIT:
+ res= backup_block_commit(thd);
+ break;
+ case BACKUP_END:
+ res= backup_end(thd);
+ break;
+ case BACKUP_FINISHED:
+ DBUG_ASSERT(0);
+ res= 0;
+ }
+ if (res)
+ {
+ thd->current_backup_stage= previous_stage;
+ my_error(ER_BACKUP_STAGE_FAILED, MYF(0), stage_names[(uint) stage]);
+ DBUG_RETURN(1);
+ }
+ next_stage= (backup_stages) ((uint) next_stage + 1);
+ } while ((uint) next_stage <= (uint) stage);
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ Start the backup
+
+ - Wait for previous backup to stop running
+ - Start service to log changed tables (TODO)
+ - Block purge of redo files (Required at least for Aria)
+ - An handler can optionally do a checkpoint of all tables,
+ to speed up the recovery stage of the backup.
+*/
+
+static bool backup_start(THD *thd)
+{
+ MDL_request mdl_request;
+ DBUG_ENTER("backup_start");
+
+ thd->current_backup_stage= BACKUP_FINISHED; // For next test
+ if (thd->has_read_only_protection())
+ DBUG_RETURN(1);
+ thd->current_backup_stage= BACKUP_START;
+
+ if (thd->locked_tables_mode)
+ {
+ my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ mdl_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_START, MDL_EXPLICIT);
+ if (thd->mdl_context.acquire_lock(&mdl_request,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(1);
+
+ backup_flush_ticket= mdl_request.ticket;
+
+ ha_prepare_for_backup();
+ DBUG_RETURN(0);
+}
+
+/**
+ backup_flush()
+
+ - FLUSH all changes for not active non transactional tables, except
+ for statistics and log tables. Close the tables, to ensure they
+ are marked as closed after backup.
+
+ - BLOCK all NEW write locks for all non transactional tables
+ (except statistics and log tables). Already granted locks are
+ not affected (Running statements with non transaction tables will
+ continue running).
+
+ - The following DDL's doesn't have to be blocked as they can't set
+ the table in a non consistent state:
+ CREATE, RENAME, DROP
+*/
+
+static bool backup_flush(THD *thd)
+{
+ DBUG_ENTER("backup_flush");
+ /*
+ Lock all non transactional normal tables to be used in new DML's
+ */
+ if (thd->mdl_context.upgrade_shared_lock(backup_flush_ticket,
+ MDL_BACKUP_FLUSH,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(1);
+
+ /*
+ Free unused tables and table shares so that mariabackup knows what
+ is safe to copy
+ */
+ tc_purge(false);
+ tdc_purge(true);
+
+ DBUG_RETURN(0);
+}
+
+/**
+ backup_block_ddl()
+
+ - Kill all insert delay handlers, to ensure that all non transactional
+ tables are closed (can be improved in the future).
+
+ - Close handlers as other threads may wait for these, which can cause deadlocks.
+
+ - Wait for all statements using write locked non-transactional tables to end.
+
+ - Mark all not used active non transactional tables (except
+ statistics and log tables) to be closed with
+ handler->extra(HA_EXTRA_FLUSH)
+
+ - Block TRUNCATE TABLE, CREATE TABLE, DROP TABLE and RENAME
+ TABLE. Block also start of a new ALTER TABLE and the final rename
+ phase of ALTER TABLE. Running ALTER TABLES are not blocked. Both normal
+ and inline ALTER TABLE'S should be blocked when copying is completed but
+ before final renaming of the tables / new table is activated.
+ This will probably require a callback from the InnoDB code.
+*/
+
+static bool backup_block_ddl(THD *thd)
+{
+ DBUG_ENTER("backup_block_ddl");
+
+ kill_delayed_threads();
+ mysql_ha_cleanup_no_free(thd);
+
+ /* Wait until all non trans statements has ended */
+ if (thd->mdl_context.upgrade_shared_lock(backup_flush_ticket,
+ MDL_BACKUP_WAIT_FLUSH,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(1);
+
+ /*
+ Remove not used tables from the table share. Flush all changes to
+ non transaction tables and mark those that are not in use in write
+ operations as closed. From backup purposes it's not critical if
+ flush_tables() returns an error. It's ok to continue with next
+ backup stage even if we got an error.
+ */
+ (void) flush_tables(thd, FLUSH_NON_TRANS_TABLES);
+
+ /*
+ block new DDL's, in addition to all previous blocks
+ We didn't do this lock above, as we wanted DDL's to be executed while
+ we wait for non transactional tables (which may take a while).
+ */
+ if (thd->mdl_context.upgrade_shared_lock(backup_flush_ticket,
+ MDL_BACKUP_WAIT_DDL,
+ thd->variables.lock_wait_timeout))
+ {
+ /*
+ Could be a timeout. Downgrade lock to what is was before this function
+ was called so that this function can be called again
+ */
+ backup_flush_ticket->downgrade_lock(MDL_BACKUP_FLUSH);
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+/**
+ backup_block_commit()
+
+ Block commits, writes to log and statistics tables and binary log
+*/
+
+static bool backup_block_commit(THD *thd)
+{
+ DBUG_ENTER("backup_block_commit");
+ if (thd->mdl_context.upgrade_shared_lock(backup_flush_ticket,
+ MDL_BACKUP_WAIT_COMMIT,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(1);
+ flush_tables(thd, FLUSH_SYS_TABLES);
+ DBUG_RETURN(0);
+}
+
+/**
+ backup_end()
+
+ Safe to run, even if backup has not been run by this thread.
+ This is for example the case when a THD ends.
+*/
+
+bool backup_end(THD *thd)
+{
+ DBUG_ENTER("backup_end");
+
+ if (thd->current_backup_stage != BACKUP_FINISHED)
+ {
+ ha_end_backup();
+ thd->current_backup_stage= BACKUP_FINISHED;
+ thd->mdl_context.release_lock(backup_flush_ticket);
+ }
+ DBUG_RETURN(0);
+}
+
+
+/**
+ backup_set_alter_copy_lock()
+
+ @param thd
+ @param table From table that is part of ALTER TABLE. This is only used
+ for the assert to ensure we use this function correctly.
+
+ Downgrades the MDL_BACKUP_DDL lock to MDL_BACKUP_ALTER_COPY to allow
+ copy of altered table to proceed under MDL_BACKUP_WAIT_DDL
+
+ Note that in some case when using non transactional tables,
+ the lock may be of type MDL_BACKUP_DML.
+*/
+
+void backup_set_alter_copy_lock(THD *thd, TABLE *table)
+{
+ MDL_ticket *ticket= thd->mdl_backup_ticket;
+
+ /* Ticket maybe NULL in case of LOCK TABLES or for temporary tables*/
+ DBUG_ASSERT(ticket || thd->locked_tables_mode ||
+ table->s->tmp_table != NO_TMP_TABLE);
+ if (ticket)
+ ticket->downgrade_lock(MDL_BACKUP_ALTER_COPY);
+}
+
+/**
+ backup_reset_alter_copy_lock
+
+ Upgrade the lock of the original ALTER table MDL_BACKUP_DDL
+ Can fail if MDL lock was killed
+*/
+
+bool backup_reset_alter_copy_lock(THD *thd)
+{
+ bool res= 0;
+ MDL_ticket *ticket= thd->mdl_backup_ticket;
+
+ /* Ticket maybe NULL in case of LOCK TABLES or for temporary tables*/
+ if (ticket)
+ res= thd->mdl_context.upgrade_shared_lock(ticket, MDL_BACKUP_DDL,
+ thd->variables.lock_wait_timeout);
+ return res;
+}
+
+
+/*****************************************************************************
+ Backup locks
+ These functions are used by maria_backup to ensure that there are no active
+ ddl's on the object the backup is going to copy
+*****************************************************************************/
+
+
+bool backup_lock(THD *thd, TABLE_LIST *table)
+{
+ backup_unlock(thd);
+ table->mdl_request.duration= MDL_EXPLICIT;
+ if (thd->mdl_context.acquire_lock(&table->mdl_request,
+ thd->variables.lock_wait_timeout))
+ return 1;
+ thd->mdl_backup_lock= table->mdl_request.ticket;
+ return 0;
+}
+
+
+/* Release old backup lock if it exists */
+
+void backup_unlock(THD *thd)
+{
+ if (thd->mdl_backup_lock)
+ thd->mdl_context.release_lock(thd->mdl_backup_lock);
+ thd->mdl_backup_lock= 0;
+}
diff --git a/sql/backup.h b/sql/backup.h
new file mode 100644
index 00000000000..8d8a28b6082
--- /dev/null
+++ b/sql/backup.h
@@ -0,0 +1,34 @@
+#ifndef BACKUP_INCLUDED
+#define BACKUP_INCLUDED
+/* Copyright (c) 2018, MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+enum backup_stages
+{
+ BACKUP_START, BACKUP_FLUSH, BACKUP_WAIT_FOR_FLUSH, BACKUP_LOCK_COMMIT,
+ BACKUP_END, BACKUP_FINISHED
+};
+
+extern TYPELIB backup_stage_names;
+
+void backup_init();
+bool run_backup_stage(THD *thd, backup_stages stage);
+bool backup_end(THD *thd);
+void backup_set_alter_copy_lock(THD *thd, TABLE *altered_table);
+bool backup_reset_alter_copy_lock(THD *thd);
+
+bool backup_lock(THD *thd, TABLE_LIST *table);
+void backup_unlock(THD *thd);
+#endif /* BACKUP_INCLUDED */
diff --git a/sql/compat56.cc b/sql/compat56.cc
index d1cb8b0042c..1285de9fd12 100644
--- a/sql/compat56.cc
+++ b/sql/compat56.cc
@@ -20,6 +20,19 @@
#include "myisampack.h"
#include "my_time.h"
+
+static const int my_max_usec_value[7]
+{
+ 0,
+ 900000,
+ 990000,
+ 999000,
+ 999900,
+ 999990,
+ 999999
+};
+
+
/*** MySQL56 TIME low-level memory and disk representation routines ***/
/*
@@ -397,19 +410,21 @@ void my_timestamp_from_binary(struct timeval *tm, const uchar *ptr, uint dec)
case 0:
default:
tm->tv_usec= 0;
- break;
+ return;
case 1:
case 2:
tm->tv_usec= ((int) ptr[4]) * 10000;
break;
case 3:
case 4:
- tm->tv_usec= mi_sint2korr(ptr + 4) * 100;
+ tm->tv_usec= (uint) mi_uint2korr(ptr + 4) * 100;
break;
case 5:
case 6:
- tm->tv_usec= mi_sint3korr(ptr + 4);
+ tm->tv_usec= (uint) mi_uint3korr(ptr + 4);
}
+ // The binary data my be corrupt. Cut fractional seconds to the valid range.
+ set_if_smaller(tm->tv_usec, my_max_usec_value[dec]);
}
diff --git a/sql/compat56.h b/sql/compat56.h
index bb5e2670f7d..ff887ebf1bb 100644
--- a/sql/compat56.h
+++ b/sql/compat56.h
@@ -19,6 +19,15 @@
/** MySQL56 routines and macros **/
+
+/*
+ Buffer size for a native TIMESTAMP representation, for use with NativBuffer.
+ 4 bytes for seconds
+ 3 bytes for microseconds
+ 1 byte for the trailing '\0' (class Native reserves extra 1 byte for '\0')
+*/
+#define STRING_BUFFER_TIMESTAMP_BINARY_SIZE 8 /* 4 + 3 + 1 */
+
#define MY_PACKED_TIME_GET_INT_PART(x) ((x) >> 24)
#define MY_PACKED_TIME_GET_FRAC_PART(x) ((x) % (1LL << 24))
#define MY_PACKED_TIME_MAKE(i, f) ((((longlong) (i)) << 24) + (f))
diff --git a/sql/datadict.cc b/sql/datadict.cc
index 231e7ea22ca..dae2de9b393 100644
--- a/sql/datadict.cc
+++ b/sql/datadict.cc
@@ -181,39 +181,24 @@ err:
@param thd Thread context.
@param db Name of the database to which the table belongs to.
@param name Table name.
- @param path For temporary tables only - path to table files.
- Otherwise NULL (the path is calculated from db and table names).
@retval FALSE Success.
@retval TRUE Error.
*/
-bool dd_recreate_table(THD *thd, const char *db, const char *table_name,
- const char *path)
+bool dd_recreate_table(THD *thd, const char *db, const char *table_name)
{
- bool error= TRUE;
HA_CREATE_INFO create_info;
char path_buf[FN_REFLEN + 1];
DBUG_ENTER("dd_recreate_table");
+ /* There should be a exclusive metadata lock on the table. */
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
+ MDL_EXCLUSIVE));
create_info.init();
-
- if (path)
- create_info.options|= HA_LEX_CREATE_TMP_TABLE;
- else
- {
- build_table_filename(path_buf, sizeof(path_buf) - 1,
- db, table_name, "", 0);
- path= path_buf;
-
- /* There should be a exclusive metadata lock on the table. */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
- MDL_EXCLUSIVE));
- }
-
+ build_table_filename(path_buf, sizeof(path_buf) - 1,
+ db, table_name, "", 0);
/* Attempt to reconstruct the table. */
- error= ha_create_table(thd, path, db, table_name, &create_info, NULL);
-
- DBUG_RETURN(error);
+ DBUG_RETURN(ha_create_table(thd, path_buf, db, table_name, &create_info, 0));
}
diff --git a/sql/datadict.h b/sql/datadict.h
index e102618c2f7..d4547a1f5f1 100644
--- a/sql/datadict.h
+++ b/sql/datadict.h
@@ -47,7 +47,6 @@ static inline bool dd_frm_is_view(THD *thd, char *path)
return dd_frm_type(thd, path, NULL, &not_used2) == TABLE_TYPE_VIEW;
}
-bool dd_recreate_table(THD *thd, const char *db, const char *table_name,
- const char *path = NULL);
+bool dd_recreate_table(THD *thd, const char *db, const char *table_name);
#endif // DATADICT_INCLUDED
diff --git a/sql/derived_handler.cc b/sql/derived_handler.cc
new file mode 100644
index 00000000000..76fd736de2b
--- /dev/null
+++ b/sql/derived_handler.cc
@@ -0,0 +1,127 @@
+/*
+ Copyright (c) 2018, 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+#include "mariadb.h"
+#include "sql_priv.h"
+#include "sql_select.h"
+#include "derived_handler.h"
+
+
+/**
+ The methods of the Pushdown_derived class.
+
+ The objects of this class are used for pushdown of the derived tables
+ into engines. The main method of the class is Pushdown_derived::execute()
+ that initiates execution of the query specifying a derived by a foreign
+ engine, receives the rows of the result set and put them in a temporary
+ table on the server side.
+
+ The method uses only the functions of the derived_handle interface to do
+ this. The constructor of the class gets this interface as a parameter.
+
+ Currently a derived tables pushed into an engine is always materialized.
+ It could be changed if the cases when the tables is used as driving table.
+*/
+
+
+Pushdown_derived::Pushdown_derived(TABLE_LIST *tbl, derived_handler *h)
+ : derived(tbl), handler(h)
+{
+ is_analyze= handler->thd->lex->analyze_stmt;
+}
+
+
+Pushdown_derived::~Pushdown_derived()
+{
+ delete handler;
+}
+
+
+int Pushdown_derived::execute()
+{
+ int err;
+ THD *thd= handler->thd;
+ TABLE *table= handler->table;
+ TMP_TABLE_PARAM *tmp_table_param= handler->tmp_table_param;
+
+ DBUG_ENTER("Pushdown_query::execute");
+
+ if ((err= handler->init_scan()))
+ goto error;
+
+ if (is_analyze)
+ {
+ handler->end_scan();
+ DBUG_RETURN(0);
+ }
+
+ while (!(err= handler->next_row()))
+ {
+ if (unlikely(thd->check_killed()))
+ {
+ handler->end_scan();
+ DBUG_RETURN(-1);
+ }
+
+ if ((err= table->file->ha_write_tmp_row(table->record[0])))
+ {
+ bool is_duplicate;
+ if (likely(!table->file->is_fatal_error(err, HA_CHECK_DUP)))
+ continue; // Distinct elimination
+
+ if (create_internal_tmp_table_from_heap(thd, table,
+ tmp_table_param->start_recinfo,
+ &tmp_table_param->recinfo,
+ err, 1, &is_duplicate))
+ DBUG_RETURN(1);
+ if (is_duplicate)
+ continue;
+ }
+ }
+
+ if (err != 0 && err != HA_ERR_END_OF_FILE)
+ goto error;
+
+ if ((err= handler->end_scan()))
+ goto error_2;
+
+ DBUG_RETURN(0);
+
+error:
+ handler->end_scan();
+error_2:
+ handler->print_error(err, MYF(0));
+ DBUG_RETURN(-1); // Error not sent to client
+}
+
+
+void derived_handler::print_error(int error, myf errflag)
+{
+ my_error(ER_GET_ERRNO, MYF(0), error, hton_name(ht)->str);
+}
+
+
+void derived_handler::set_derived(TABLE_LIST *tbl)
+{
+ derived= tbl;
+ table= tbl->table;
+ unit= tbl->derived;
+ select= unit->first_select();
+ tmp_table_param= select->next_select() ?
+ ((select_unit *)(unit->result))->get_tmp_table_param() :
+ &select->join->tmp_table_param;
+}
+
diff --git a/sql/derived_handler.h b/sql/derived_handler.h
new file mode 100644
index 00000000000..171165bbe6f
--- /dev/null
+++ b/sql/derived_handler.h
@@ -0,0 +1,85 @@
+/*
+ Copyright (c) 2016, 2017 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef DERIVED_HANDLER_INCLUDED
+#define DERIVED_HANDLER_INCLUDED
+
+#include "mariadb.h"
+#include "sql_priv.h"
+
+class TMP_TABLE_PARAM;
+
+typedef class st_select_lex_unit SELECT_LEX_UNIT;
+
+/**
+ @class derived_handler
+
+ This interface class is to be used for execution of queries that specify
+ derived table by foreign engines
+*/
+
+class derived_handler
+{
+public:
+ THD *thd;
+ handlerton *ht;
+
+ TABLE_LIST *derived;
+
+ /*
+ Temporary table where all results should be stored in record[0]
+ The table has a field for every item from the select list of
+ the specification of derived.
+ */
+ TABLE *table;
+
+ /* The parameters if the temporary table used at its creation */
+ TMP_TABLE_PARAM *tmp_table_param;
+
+ SELECT_LEX_UNIT *unit; // Specifies the derived table
+
+ SELECT_LEX *select; // The first select of the specification
+
+ derived_handler(THD *thd_arg, handlerton *ht_arg)
+ : thd(thd_arg), ht(ht_arg), derived(0),table(0), tmp_table_param(0),
+ unit(0), select(0) {}
+ virtual ~derived_handler() {}
+
+ /*
+ Functions to scan data. All these returns 0 if ok, error code in case
+ of error
+ */
+
+ /* Initialize the process of producing rows of the derived table */
+ virtual int init_scan()= 0;
+
+ /*
+ Put the next produced row of the derived in table->record[0] and return 0.
+ Return HA_ERR_END_OF_FILE if there are no more rows, return other error
+ number in case of fatal error.
+ */
+ virtual int next_row()= 0;
+
+ /* End prodicing rows */
+ virtual int end_scan()=0;
+
+ /* Report errors */
+ virtual void print_error(int error, myf errflag);
+
+ void set_derived(TABLE_LIST *tbl);
+};
+
+#endif /* DERIVED_HANDLER_INCLUDED */
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 86a710f87c6..6327cd138de 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -32,7 +32,9 @@
#include "event_db_repository.h"
#include "sp_head.h"
#include "sql_show.h" // append_definer, append_identifier
-
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h"
+#endif /* WITH_WSREP */
/**
@addtogroup Event_Scheduler
@{
@@ -479,14 +481,24 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table)
uint not_used;
if (!starts_null)
{
- table->field[ET_FIELD_STARTS]->get_date(&time, TIME_NO_ZERO_DATE);
+ /*
+ The expected data type for these columns in mysql.events:
+ starts, ends, execute_at, last_executed
+ is DATETIME. No nanosecond truncation should normally be needed,
+ unless the DBA changes them, e.g. to VARCHAR, DECIMAL, etc.
+ For this unexpected case let's use the default round mode,
+ according to the current session settings.
+ */
+ table->field[ET_FIELD_STARTS]->get_date(&time, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode());
starts= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
ends_null= table->field[ET_FIELD_ENDS]->is_null();
if (!ends_null)
{
- table->field[ET_FIELD_ENDS]->get_date(&time, TIME_NO_ZERO_DATE);
+ table->field[ET_FIELD_ENDS]->get_date(&time, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode());
ends= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
@@ -502,8 +514,8 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table)
DBUG_ASSERT(!(starts_null && ends_null && !expression && execute_at_null));
if (!expression && !execute_at_null)
{
- if (table->field[ET_FIELD_EXECUTE_AT]->get_date(&time,
- TIME_NO_ZERO_DATE))
+ if (table->field[ET_FIELD_EXECUTE_AT]->get_date(&time, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode()))
DBUG_RETURN(TRUE);
execute_at= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
@@ -535,8 +547,8 @@ Event_queue_element::load_from_row(THD *thd, TABLE *table)
if (!table->field[ET_FIELD_LAST_EXECUTED]->is_null())
{
- table->field[ET_FIELD_LAST_EXECUTED]->get_date(&time,
- TIME_NO_ZERO_DATE);
+ table->field[ET_FIELD_LAST_EXECUTED]->get_date(&time, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode());
last_executed= my_tz_OFFSET0->TIME_to_gmt_sec(&time,&not_used);
}
@@ -654,7 +666,7 @@ my_time_t
add_interval(MYSQL_TIME *ltime, const Time_zone *time_zone,
interval_type scale, INTERVAL interval)
{
- if (date_add_interval(ltime, scale, interval))
+ if (date_add_interval(current_thd, ltime, scale, interval))
return 0;
uint not_used;
@@ -758,8 +770,8 @@ bool get_next_time(const Time_zone *time_zone, my_time_t *next,
if (seconds)
{
- longlong seconds_diff;
- long microsec_diff;
+ ulonglong seconds_diff;
+ ulong microsec_diff;
bool negative= calc_time_diff(&local_now, &local_start, 1,
&seconds_diff, &microsec_diff);
if (!negative)
@@ -1343,6 +1355,10 @@ Event_job_data::execute(THD *thd, bool drop)
thd->reset_for_next_command();
+#ifdef WITH_WSREP
+ wsrep_open(thd);
+ wsrep_before_command(thd);
+#endif /* WITH_WSREP */
/*
MySQL parser currently assumes that current database is either
present in THD or all names in all statements are fully specified.
@@ -1517,6 +1533,10 @@ end:
if (save_sctx)
event_sctx.restore_security_context(thd, save_sctx);
#endif
+#ifdef WITH_WSREP
+ wsrep_after_command_ignore_result(thd);
+ wsrep_close(thd);
+#endif /* WITH_WSREP */
thd->lex->unit.cleanup();
thd->end_statement();
thd->cleanup_after_query();
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index b2ff80626db..00d625879de 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -216,7 +216,13 @@ Event_parse_data::init_execute_at(THD *thd)
(starts_null && ends_null)));
DBUG_ASSERT(starts_null && ends_null);
- if (item_execute_at->get_date(&ltime, TIME_NO_ZERO_DATE))
+ /*
+ The expected data type is DATETIME. No nanoseconds truncation should
+ normally be needed. Using the default rounding mode.
+ See more comments in event_data_object.cc.
+ */
+ if (item_execute_at->get_date(thd, &ltime, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode()))
goto wrong_value;
ltime_utc= TIME_to_timestamp(thd,&ltime,&not_used);
@@ -275,7 +281,7 @@ Event_parse_data::init_interval(THD *thd)
if (item_expression->fix_fields(thd, &item_expression))
goto wrong_value;
- if (get_interval_value(item_expression, interval, &interval_tmp))
+ if (get_interval_value(thd, item_expression, interval, &interval_tmp))
goto wrong_value;
expression= 0;
@@ -378,7 +384,8 @@ Event_parse_data::init_starts(THD *thd)
if (item_starts->fix_fields(thd, &item_starts))
goto wrong_value;
- if (item_starts->get_date(&ltime, TIME_NO_ZERO_DATE))
+ if (item_starts->get_date(thd, &ltime, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode()))
goto wrong_value;
ltime_utc= TIME_to_timestamp(thd, &ltime, &not_used);
@@ -433,7 +440,8 @@ Event_parse_data::init_ends(THD *thd)
goto error_bad_params;
DBUG_PRINT("info", ("convert to TIME"));
- if (item_ends->get_date(&ltime, TIME_NO_ZERO_DATE))
+ if (item_ends->get_date(thd, &ltime, TIME_NO_ZERO_DATE |
+ thd->temporal_round_mode()))
goto error_bad_params;
ltime_utc= TIME_to_timestamp(thd, &ltime, &not_used);
@@ -472,7 +480,7 @@ Event_parse_data::report_bad_value(const char *item_name, Item *bad_item)
{
char buff[120];
String str(buff,(uint32) sizeof(buff), system_charset_info);
- String *str2= bad_item->fixed? bad_item->val_str(&str):NULL;
+ String *str2= bad_item->is_fixed() ? bad_item->val_str(&str) : NULL;
my_error(ER_WRONG_VALUE, MYF(0), item_name, str2? str2->c_ptr_safe():"NULL");
}
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index f459fd34aee..99b3c9b93fb 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -150,7 +150,7 @@ deinit_event_thread(THD *thd)
{
thd->proc_info= "Clearing";
DBUG_PRINT("exit", ("Event thread finishing"));
- unlink_not_visible_thd(thd);
+ server_threads.erase(thd);
delete thd;
}
@@ -185,7 +185,7 @@ pre_init_event_thread(THD* thd)
thd->net.read_timeout= slave_net_timeout;
thd->variables.option_bits|= OPTION_AUTO_IS_NULL;
thd->client_capabilities|= CLIENT_MULTI_RESULTS;
- add_to_active_threads(thd);
+ server_threads.insert(thd);
/*
Guarantees that we will see the thread in SHOW PROCESSLIST though its
@@ -679,20 +679,20 @@ end:
Event_scheduler::workers_count()
*/
+static my_bool workers_count_callback(THD *thd, uint32_t *count)
+{
+ if (thd->system_thread == SYSTEM_THREAD_EVENT_WORKER)
+ ++*count;
+ return 0;
+}
+
+
uint
Event_scheduler::workers_count()
{
- THD *tmp;
- uint count= 0;
-
+ uint32_t count= 0;
DBUG_ENTER("Event_scheduler::workers_count");
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
- if (tmp->system_thread == SYSTEM_THREAD_EVENT_WORKER)
- ++count;
- mysql_mutex_unlock(&LOCK_thread_count);
- DBUG_PRINT("exit", ("%d", count));
+ server_threads.iterate(workers_count_callback, &count);
DBUG_RETURN(count);
}
diff --git a/sql/events.cc b/sql/events.cc
index c3a578f1097..196c8df591d 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -336,7 +336,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
if (check_access(thd, EVENT_ACL, parse_data->dbname.str, NULL, NULL, 0, 0))
DBUG_RETURN(TRUE);
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
if (lock_object_name(thd, MDL_key::EVENT,
parse_data->dbname.str, parse_data->name.str))
@@ -401,7 +401,7 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
my_message_sql(ER_STARTUP,
"Event Error: An error occurred while creating query "
"string, before writing it into binary log.",
- MYF(ME_NOREFRESH));
+ MYF(ME_ERROR_LOG));
ret= true;
}
else
@@ -419,10 +419,10 @@ Events::create_event(THD *thd, Event_parse_data *parse_data)
thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(ret);
-
-WSREP_ERROR_LABEL:
- DBUG_RETURN(TRUE);
-
+#ifdef WITH_WSREP
+wsrep_error_label:
+ DBUG_RETURN(true);
+#endif
}
@@ -550,9 +550,10 @@ Events::update_event(THD *thd, Event_parse_data *parse_data,
thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(ret);
-
-WSREP_ERROR_LABEL:
- DBUG_RETURN(TRUE);
+#ifdef WITH_WSREP
+wsrep_error_label:
+ DBUG_RETURN(true);
+#endif
}
@@ -617,9 +618,10 @@ Events::drop_event(THD *thd, const LEX_CSTRING *dbname,
thd->restore_stmt_binlog_format(save_binlog_format);
DBUG_RETURN(ret);
-
-WSREP_ERROR_LABEL:
- DBUG_RETURN(TRUE);
+#ifdef WITH_WSREP
+wsrep_error_label:
+ DBUG_RETURN(true);
+#endif
}
@@ -824,12 +826,13 @@ Events::fill_schema_events(THD *thd, TABLE_LIST *tables, COND * /* cond */)
*/
if (thd->lex->sql_command == SQLCOM_SHOW_EVENTS)
{
- DBUG_ASSERT(thd->lex->select_lex.db.str);
- if (!is_infoschema_db(&thd->lex->select_lex.db) && // There is no events in I_S
- check_access(thd, EVENT_ACL, thd->lex->select_lex.db.str,
+ DBUG_ASSERT(thd->lex->first_select_lex()->db.str);
+ if (!is_infoschema_db(&thd->lex->first_select_lex()->db) && // There is no events in I_S
+ check_access(thd, EVENT_ACL, thd->lex->first_select_lex()->db.str,
NULL, NULL, 0, 0))
DBUG_RETURN(1);
- db= normalize_db_name(thd->lex->select_lex.db.str, db_tmp, sizeof(db_tmp));
+ db= normalize_db_name(thd->lex->first_select_lex()->db.str,
+ db_tmp, sizeof(db_tmp));
}
ret= db_repository->fill_schema_events(thd, tables, db);
@@ -924,7 +927,7 @@ Events::init(THD *thd, bool opt_noacl_or_bootstrap)
my_message(ER_STARTUP,
"Event Scheduler: An error occurred when initializing "
"system tables. Disabling the Event Scheduler.",
- MYF(ME_NOREFRESH));
+ MYF(ME_ERROR_LOG));
/* Disable the scheduler since the system tables are not up to date */
opt_event_scheduler= EVENTS_OFF;
goto end;
@@ -946,7 +949,7 @@ Events::init(THD *thd, bool opt_noacl_or_bootstrap)
{
my_message_sql(ER_STARTUP,
"Event Scheduler: Error while loading from mysql.event table.",
- MYF(ME_NOREFRESH));
+ MYF(ME_ERROR_LOG));
res= TRUE; /* fatal error: request unireg_abort */
goto end;
}
@@ -1163,7 +1166,7 @@ Events::load_events_from_db(THD *thd)
{
my_message_sql(ER_STARTUP,
"Event Scheduler: Failed to open table mysql.event",
- MYF(ME_NOREFRESH));
+ MYF(ME_ERROR_LOG));
DBUG_RETURN(TRUE);
}
@@ -1189,7 +1192,7 @@ Events::load_events_from_db(THD *thd)
"Event Scheduler: "
"Error while loading events from mysql.event. "
"The table probably contains bad data or is corrupted",
- MYF(ME_NOREFRESH));
+ MYF(ME_ERROR_LOG));
delete et;
goto end;
}
@@ -1228,9 +1231,9 @@ Events::load_events_from_db(THD *thd)
}
my_printf_error(ER_STARTUP,
"Event Scheduler: Loaded %d event%s",
- MYF(ME_NOREFRESH |
+ MYF(ME_ERROR_LOG |
(global_system_variables.log_warnings) ?
- ME_JUST_INFO: 0),
+ ME_NOTE: 0),
count, (count == 1) ? "" : "s");
ret= FALSE;
diff --git a/sql/field.cc b/sql/field.cc
index 0f75772e485..0de5c377703 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2008, 2017, MariaDB
+ Copyright (c) 2008, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -33,11 +33,6 @@
#include "rpl_rli.h" // Pull in Relay_log_info
#include "slave.h" // Pull in rpl_master_has_bug()
#include "strfunc.h" // find_type2, find_set
-#include "sql_time.h" // str_to_datetime_with_warn,
- // str_to_time_with_warn,
- // TIME_to_timestamp,
- // make_time, make_date,
- // make_truncated_value_warning
#include "tztime.h" // struct Time_zone
#include "filesort.h" // change_double_for_sort
#include "log_event.h" // class Table_map_log_event
@@ -63,20 +58,30 @@ const char field_separator=',';
((ulong) ((1LL << MY_MIN(arg, 4) * 8) - 1))
// Column marked for read or the field set to read out or record[0] or [1]
-#define ASSERT_COLUMN_MARKED_FOR_READ \
- DBUG_ASSERT(!table || \
- (!table->read_set || \
- bitmap_is_set(table->read_set, field_index) || \
- (!(ptr >= table->record[0] && \
- ptr < table->record[0] + table->s->reclength))))
-
-#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED \
- DBUG_ASSERT(is_stat_field || !table || \
- (!table->write_set || \
- bitmap_is_set(table->write_set, field_index) || \
- (!(ptr >= table->record[0] && \
- ptr < table->record[0] + table->s->reclength))) || \
- (table->vcol_set && bitmap_is_set(table->vcol_set, field_index)))
+inline bool Field::marked_for_read() const
+{
+ return !table ||
+ (!table->read_set ||
+ bitmap_is_set(table->read_set, field_index) ||
+ (!(ptr >= table->record[0] &&
+ ptr < table->record[0] + table->s->reclength)));
+}
+
+/*
+ The name of this function is a bit missleading as in 10.4 we don't
+ have to test anymore if the field is computed. Instead we mark
+ changed fields with DBUG_FIX_WRITE_SET() in table.cc
+*/
+
+inline bool Field::marked_for_write_or_computed() const
+{
+ return (is_stat_field || !table ||
+ (!table->write_set ||
+ bitmap_is_set(table->write_set, field_index) ||
+ (!(ptr >= table->record[0] &&
+ ptr < table->record[0] + table->s->reclength))));
+}
+
#define FLAGSTR(S,F) ((S) & (F) ? #F " " : "")
@@ -93,15 +98,14 @@ const int FIELDTYPE_LAST= 254;
const int FIELDTYPE_NUM= FIELDTYPE_TEAR_FROM + (FIELDTYPE_LAST -
FIELDTYPE_TEAR_TO);
-static inline int field_type2index (enum_field_types field_type)
+static inline int merge_type2index(enum_field_types merge_type)
{
- DBUG_ASSERT(real_type_to_type(field_type) < FIELDTYPE_TEAR_FROM ||
- real_type_to_type(field_type) > FIELDTYPE_TEAR_TO);
- DBUG_ASSERT(field_type <= FIELDTYPE_LAST);
- field_type= real_type_to_type(field_type);
- if (field_type < FIELDTYPE_TEAR_FROM)
- return field_type;
- return FIELDTYPE_TEAR_FROM + (field_type - FIELDTYPE_TEAR_TO) - 1;
+ DBUG_ASSERT(merge_type < FIELDTYPE_TEAR_FROM ||
+ merge_type > FIELDTYPE_TEAR_TO);
+ DBUG_ASSERT(merge_type <= FIELDTYPE_LAST);
+ if (merge_type < FIELDTYPE_TEAR_FROM)
+ return merge_type;
+ return FIELDTYPE_TEAR_FROM + (merge_type - FIELDTYPE_TEAR_TO) - 1;
}
@@ -926,31 +930,37 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]=
}
};
-/**
- Return type of which can carry value of both given types in UNION result.
-
- @param a type for merging
- @param b type for merging
-
- @return
- type of field
-*/
-
-enum_field_types Field::field_type_merge(enum_field_types a,
- enum_field_types b)
-{
- return field_types_merge_rules[field_type2index(a)]
- [field_type2index(b)];
-}
const Type_handler *
Type_handler::aggregate_for_result_traditional(const Type_handler *a,
const Type_handler *b)
{
- enum_field_types ta= a->real_field_type();
- enum_field_types tb= b->real_field_type();
- return
- Type_handler::get_handler_by_real_type(Field::field_type_merge(ta, tb));
+ if (a == b)
+ {
+ /*
+ If two traditional handlers are equal, quickly return "a".
+ Some handlers (e.g. Type_handler_bool) pretend to be traditional,
+ but in fact they are not traditional in full extent, they are
+ only sub-types for now (and don't have a corresponding Field_xxx yet).
+ Here we preserve such handlers during aggregation.
+ As a result, COALESCE(true,true) preserves the "boolean" data type.
+
+ Need to do this conversion for deprecated data types,
+ similar to what field_type_merge_rules[][] does.
+ */
+ switch (a->field_type()) {
+ case MYSQL_TYPE_DECIMAL: return &type_handler_newdecimal;
+ case MYSQL_TYPE_DATE: return &type_handler_newdate;
+ case MYSQL_TYPE_VAR_STRING: return &type_handler_varchar;
+ default: break;
+ }
+ return a;
+ }
+ enum_field_types ta= a->traditional_merge_field_type();
+ enum_field_types tb= b->traditional_merge_field_type();
+ enum_field_types res= field_types_merge_rules[merge_type2index(ta)]
+ [merge_type2index(tb)];
+ return Type_handler::get_handler_by_real_type(res);
}
@@ -1372,6 +1382,14 @@ error:
}
+bool Field::make_empty_rec_store_default_value(THD *thd, Item *item)
+{
+ DBUG_ASSERT(!(flags & BLOB_FLAG));
+ int res= item->save_in_field(this, true);
+ return res != 0 && res != 3;
+}
+
+
/**
Numeric fields base class constructor.
*/
@@ -1687,7 +1705,7 @@ int Field::warn_if_overflow(int op_result)
String *Field::val_int_as_str(String *val_buffer, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
CHARSET_INFO *cs= &my_charset_bin;
uint length;
longlong value= val_int();
@@ -1832,12 +1850,9 @@ int Field::store(const char *to, size_t length, CHARSET_INFO *cs,
}
-int Field::store_timestamp(my_time_t ts, ulong sec_part)
+int Field::store_timestamp_dec(const timeval &ts, uint dec)
{
- MYSQL_TIME ltime;
- THD *thd= get_thd();
- thd->timestamp_to_TIME(&ltime, ts, sec_part, 0);
- return store_time_dec(&ltime, decimals());
+ return store_time_dec(Datetime(get_thd(), ts).get_mysql_time(), dec);
}
/**
@@ -1937,14 +1952,6 @@ Field::unpack(uchar* to, const uchar *from, const uchar *from_end,
}
-my_decimal *Field::val_decimal(my_decimal *decimal)
-{
- /* This never have to be called */
- DBUG_ASSERT(0);
- return 0;
-}
-
-
void Field_num::add_zerofill_and_unsigned(String &res) const
{
if (unsigned_flag)
@@ -1980,7 +1987,7 @@ void Field::make_send_field(Send_field *field)
}
field->col_name= field_name;
field->length=field_length;
- field->type=type();
+ field->set_handler(type_handler());
field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags;
field->decimals= 0;
}
@@ -2046,7 +2053,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val,
int Field_int::store_decimal(const my_decimal *val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int err= 0;
longlong i= convert_decimal2longlong(val, unsigned_flag, &err);
return MY_TEST(err | store(i, unsigned_flag));
@@ -2069,26 +2076,25 @@ int Field_int::store_decimal(const my_decimal *val)
my_decimal* Field_int::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong nr= val_int();
int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value);
return decimal_value;
}
-bool Field_int::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Field_int::get_date(MYSQL_TIME *ltime,date_mode_t fuzzydate)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
- longlong nr= val_int();
- bool neg= !(flags & UNSIGNED_FLAG) && nr < 0;
- return int_to_datetime_with_warn(neg, neg ? -nr : nr, ltime, fuzzydate,
- table->s, field_name.str);
+ DBUG_ASSERT(marked_for_read());
+ Longlong_hybrid nr(val_int(), (flags & UNSIGNED_FLAG));
+ return int_to_datetime_with_warn(get_thd(), nr, ltime,
+ fuzzydate, table->s, field_name.str);
}
-bool Field_vers_trx_id::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate, ulonglong trx_id)
+bool Field_vers_trx_id::get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate, ulonglong trx_id)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
DBUG_ASSERT(ltime);
if (!table || !table->s)
return true;
@@ -2214,7 +2220,7 @@ void Field_num::make_send_field(Send_field *field)
int Field_str::store_decimal(const my_decimal *d)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
double val;
/* TODO: use decimal2string? */
int err= warn_if_overflow(my_decimal2double(E_DEC_FATAL_ERROR &
@@ -2225,7 +2231,7 @@ int Field_str::store_decimal(const my_decimal *d)
my_decimal *Field_str::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong nr= val_int();
int2my_decimal(E_DEC_FATAL_ERROR, nr, 0, decimal_value);
return decimal_value;
@@ -2266,15 +2272,13 @@ uint Field::fill_cache_field(CACHE_FIELD *copy)
}
-bool Field::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Field::get_date(MYSQL_TIME *to, date_mode_t mode)
{
- char buff[40];
- String tmp(buff,sizeof(buff),&my_charset_bin),*res;
- if (!(res=val_str(&tmp)) ||
- str_to_datetime_with_warn(res->charset(), res->ptr(), res->length(),
- ltime, fuzzydate))
- return 1;
- return 0;
+ StringBuffer<40> tmp;
+ Temporal::Warn_push warn(get_thd(), NULL, NullS, to, mode);
+ Temporal_hybrid *t= new(to) Temporal_hybrid(get_thd(), &warn,
+ val_str(&tmp), mode);
+ return !t->is_valid_temporal();
}
/**
@@ -2286,7 +2290,7 @@ bool Field::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
int Field::store_time_dec(const MYSQL_TIME *ltime, uint dec)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
char buff[MAX_DATE_STRING_REP_LENGTH];
uint length= (uint) my_TIME_to_str(ltime, buff, dec);
/* Avoid conversion when field character set is ASCII compatible */
@@ -2344,6 +2348,36 @@ Field *Field::new_key_field(MEM_ROOT *root, TABLE *new_table,
}
+/**
+ Create field for temporary table from given field.
+
+ @param thd Thread handler
+ @param table Temporary table
+ @param maybe_null_arg If the result field should be NULL-able,
+ even if the original field is NOT NULL, e.g. for:
+ - OUTER JOIN fields
+ - WITH ROLLUP fields
+ - arguments of aggregate functions, e.g. SUM(column1)
+ @retval NULL, on error
+ @retval pointer to the new field created, on success.
+*/
+
+Field *Field::create_tmp_field(MEM_ROOT *mem_root, TABLE *new_table,
+ bool maybe_null_arg)
+{
+ Field *new_field;
+
+ if ((new_field= make_new_field(mem_root, new_table, new_table == table)))
+ {
+ new_field->init_for_tmp_table(this, new_table);
+ new_field->flags|= flags & NO_DEFAULT_VALUE_FLAG;
+ if (maybe_null_arg)
+ new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join
+ }
+ return new_field;
+}
+
+
/* This is used to generate a field in TABLE from TABLE_SHARE */
Field *Field::clone(MEM_ROOT *root, TABLE *new_table)
@@ -2415,6 +2449,15 @@ void Field_null::sql_type(String &res) const
}
+uint Field_null::is_equal(Create_field *new_field)
+{
+ DBUG_ASSERT(!compression_method());
+ return new_field->type_handler() == type_handler() &&
+ new_field->charset == field_charset &&
+ new_field->length == max_display_length();
+}
+
+
/****************************************************************************
Field_row, e.g. for ROW-type SP variables
****************************************************************************/
@@ -2513,7 +2556,7 @@ void Field_decimal::overflow(bool negative)
int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
char buff[STRING_BUFFER_USUAL_SIZE];
String tmp(buff,sizeof(buff), &my_charset_bin);
const uchar *from= (uchar*) from_arg;
@@ -2879,7 +2922,7 @@ int Field_decimal::store(const char *from_arg, size_t len, CHARSET_INFO *cs)
int Field_decimal::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
if (unsigned_flag && nr < 0)
{
overflow(1);
@@ -2917,7 +2960,7 @@ int Field_decimal::store(double nr)
int Field_decimal::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
char buff[22];
uint length, int_part;
char fyllchar;
@@ -2953,7 +2996,7 @@ int Field_decimal::store(longlong nr, bool unsigned_val)
double Field_decimal::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int not_used;
char *end_not_used;
return my_strntod(&my_charset_bin, (char*) ptr, field_length, &end_not_used,
@@ -2962,7 +3005,7 @@ double Field_decimal::val_real(void)
longlong Field_decimal::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int not_used;
if (unsigned_flag)
return my_strntoull(&my_charset_bin, (char*) ptr, field_length, 10, NULL,
@@ -2975,7 +3018,7 @@ longlong Field_decimal::val_int(void)
String *Field_decimal::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
uchar *str;
size_t tmp_length;
@@ -3149,7 +3192,7 @@ void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value,
Otherwise sets maximal number that can be stored in the field.
@param decimal_value my_decimal
- @param [OUT] native_error the error returned by my_decimal2binary().
+ @param [OUT] native_error the error returned by my_decimal::to_binary().
@retval
0 ok
@@ -3160,7 +3203,7 @@ void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value,
bool Field_new_decimal::store_value(const my_decimal *decimal_value,
int *native_error)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
DBUG_ENTER("Field_new_decimal::store_value");
#ifndef DBUG_OFF
@@ -3187,8 +3230,8 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value,
}
#endif
- *native_error= my_decimal2binary(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW,
- decimal_value, ptr, precision, dec);
+ *native_error= decimal_value->to_binary(ptr, precision, dec,
+ E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW);
if (unlikely(*native_error == E_DEC_OVERFLOW))
{
@@ -3196,7 +3239,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value,
DBUG_PRINT("info", ("overflow"));
set_warning(ER_WARN_DATA_OUT_OF_RANGE, 1);
set_value_on_overflow(&buff, decimal_value->sign());
- my_decimal2binary(E_DEC_FATAL_ERROR, &buff, ptr, precision, dec);
+ buff.to_binary(ptr, precision, dec);
error= 1;
}
DBUG_EXECUTE("info", print_decimal_buff(decimal_value, (uchar *) ptr,
@@ -3218,7 +3261,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
int Field_new_decimal::store(const char *from, size_t length,
CHARSET_INFO *charset_arg)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
my_decimal decimal_value;
THD *thd= get_thd();
DBUG_ENTER("Field_new_decimal::store(char*)");
@@ -3302,7 +3345,7 @@ int Field_new_decimal::store(const char *from, size_t length,
int Field_new_decimal::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
my_decimal decimal_value;
int err;
THD *thd= get_thd();
@@ -3327,7 +3370,7 @@ int Field_new_decimal::store(double nr)
int Field_new_decimal::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
my_decimal decimal_value;
int err;
@@ -3349,7 +3392,7 @@ int Field_new_decimal::store(longlong nr, bool unsigned_val)
int Field_new_decimal::store_decimal(const my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
return store_value(decimal_value);
}
@@ -3361,40 +3404,9 @@ int Field_new_decimal::store_time_dec(const MYSQL_TIME *ltime, uint dec_arg)
}
-double Field_new_decimal::val_real(void)
-{
- ASSERT_COLUMN_MARKED_FOR_READ;
- double dbl;
- my_decimal decimal_value;
- my_decimal2double(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), &dbl);
- return dbl;
-}
-
-
-longlong Field_new_decimal::val_int(void)
-{
- ASSERT_COLUMN_MARKED_FOR_READ;
- longlong i;
- my_decimal decimal_value;
- my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
- unsigned_flag, &i);
- return i;
-}
-
-
-ulonglong Field_new_decimal::val_uint(void)
-{
- ASSERT_COLUMN_MARKED_FOR_READ;
- longlong i;
- my_decimal decimal_value;
- my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), true, &i);
- return i;
-}
-
-
my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
DBUG_ENTER("Field_new_decimal::val_decimal");
binary2my_decimal(E_DEC_FATAL_ERROR, ptr, decimal_value,
precision, dec);
@@ -3404,28 +3416,6 @@ my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value)
}
-String *Field_new_decimal::val_str(String *val_buffer,
- String *val_ptr __attribute__((unused)))
-{
- ASSERT_COLUMN_MARKED_FOR_READ;
- my_decimal decimal_value;
- uint fixed_precision= zerofill ? precision : 0;
- my_decimal2string(E_DEC_FATAL_ERROR, val_decimal(&decimal_value),
- fixed_precision, dec, '0', val_buffer);
- val_buffer->set_charset(&my_charset_numeric);
- return val_buffer;
-}
-
-
-bool Field_new_decimal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
-{
- my_decimal value;
- return decimal_to_datetime_with_warn(val_decimal(&value),
- ltime, fuzzydate, table->s,
- field_name.str);
-}
-
-
int Field_new_decimal::cmp(const uchar *a,const uchar*b)
{
return memcmp(a, b, bin_size);
@@ -3507,7 +3497,7 @@ uint Field_new_decimal::is_equal(Create_field *new_field)
return ((new_field->type_handler() == type_handler()) &&
((new_field->flags & UNSIGNED_FLAG) ==
(uint) (flags & UNSIGNED_FLAG)) &&
- ((new_field->flags & AUTO_INCREMENT_FLAG) ==
+ ((new_field->flags & AUTO_INCREMENT_FLAG) <=
(uint) (flags & AUTO_INCREMENT_FLAG)) &&
(new_field->length == max_display_length()) &&
(new_field->decimals == dec));
@@ -3580,8 +3570,8 @@ Item *Field_new_decimal::get_equal_const_item(THD *thd, const Context &ctx,
if (const_item->field_type() != MYSQL_TYPE_NEWDECIMAL ||
const_item->decimal_scale() != decimals())
{
- my_decimal *val, val_buffer, val_buffer2;
- if (!(val= const_item->val_decimal(&val_buffer)))
+ VDec val(const_item);
+ if (val.is_null())
{
DBUG_ASSERT(0);
return const_item;
@@ -3591,9 +3581,9 @@ Item *Field_new_decimal::get_equal_const_item(THD *thd, const Context &ctx,
See comments about truncation in the same place in
Field_time::get_equal_const_item().
*/
- my_decimal_round(E_DEC_FATAL_ERROR, val, decimals(), true, &val_buffer2);
- return new (thd->mem_root) Item_decimal(thd, field_name.str,
- &val_buffer2,
+ my_decimal tmp;
+ val.round_to(&tmp, decimals(), TRUNCATE);
+ return new (thd->mem_root) Item_decimal(thd, field_name.str, &tmp,
decimals(), field_length);
}
break;
@@ -3619,7 +3609,7 @@ int Field_int::store_time_dec(const MYSQL_TIME *ltime, uint dec_arg)
int Field_tiny::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error;
longlong rnd;
@@ -3631,7 +3621,7 @@ int Field_tiny::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_tiny::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -3674,7 +3664,7 @@ int Field_tiny::store(double nr)
int Field_tiny::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
if (unsigned_flag)
@@ -3719,7 +3709,7 @@ int Field_tiny::store(longlong nr, bool unsigned_val)
double Field_tiny::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int tmp= unsigned_flag ? (int) ptr[0] :
(int) ((signed char*) ptr)[0];
return (double) tmp;
@@ -3728,7 +3718,7 @@ double Field_tiny::val_real(void)
longlong Field_tiny::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int tmp= unsigned_flag ? (int) ptr[0] :
(int) ((signed char*) ptr)[0];
return (longlong) tmp;
@@ -3738,7 +3728,7 @@ longlong Field_tiny::val_int(void)
String *Field_tiny::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
long nr= unsigned_flag ? (long) ptr[0] : (long) ((signed char*) ptr)[0];
return val_str_from_long(val_buffer, 5, -10, nr);
}
@@ -3779,7 +3769,7 @@ void Field_tiny::sql_type(String &res) const
int Field_short::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int store_tmp;
int error;
longlong rnd;
@@ -3793,7 +3783,7 @@ int Field_short::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_short::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
int16 res;
nr=rint(nr);
@@ -3838,7 +3828,7 @@ int Field_short::store(double nr)
int Field_short::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
int16 res;
@@ -3886,7 +3876,7 @@ int Field_short::store(longlong nr, bool unsigned_val)
double Field_short::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
short j;
j=sint2korr(ptr);
return unsigned_flag ? (double) (unsigned short) j : (double) j;
@@ -3894,7 +3884,7 @@ double Field_short::val_real(void)
longlong Field_short::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
short j;
j=sint2korr(ptr);
return unsigned_flag ? (longlong) (unsigned short) j : (longlong) j;
@@ -3904,7 +3894,7 @@ longlong Field_short::val_int(void)
String *Field_short::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
short j= sint2korr(ptr);
long nr= unsigned_flag ? (long) (unsigned short) j : (long) j;
return val_str_from_long(val_buffer, 7, -10, nr);
@@ -3953,7 +3943,7 @@ void Field_short::sql_type(String &res) const
int Field_medium::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int store_tmp;
int error;
longlong rnd;
@@ -3967,7 +3957,7 @@ int Field_medium::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_medium::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
nr=rint(nr);
if (unsigned_flag)
@@ -4013,7 +4003,7 @@ int Field_medium::store(double nr)
int Field_medium::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
if (unsigned_flag)
@@ -4062,7 +4052,7 @@ int Field_medium::store(longlong nr, bool unsigned_val)
double Field_medium::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return (double) j;
}
@@ -4070,7 +4060,7 @@ double Field_medium::val_real(void)
longlong Field_medium::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return (longlong) j;
}
@@ -4079,7 +4069,7 @@ longlong Field_medium::val_int(void)
String *Field_medium::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
long nr= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
return val_str_from_long(val_buffer, 10, -10, nr);
}
@@ -4105,7 +4095,7 @@ String *Field_int::val_str_from_long(String *val_buffer,
bool Field_medium::send_binary(Protocol *protocol)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return protocol->store_long(Field_medium::val_int());
}
@@ -4151,7 +4141,7 @@ void Field_medium::sql_type(String &res) const
int Field_long::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
long store_tmp;
int error;
longlong rnd;
@@ -4165,7 +4155,7 @@ int Field_long::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_long::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
int32 res;
nr=rint(nr);
@@ -4210,7 +4200,7 @@ int Field_long::store(double nr)
int Field_long::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
int32 res;
@@ -4256,7 +4246,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
double Field_long::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int32 j;
j=sint4korr(ptr);
return unsigned_flag ? (double) (uint32) j : (double) j;
@@ -4264,7 +4254,7 @@ double Field_long::val_real(void)
longlong Field_long::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int32 j;
/* See the comment in Field_long::store(long long) */
DBUG_ASSERT(!table || table->in_use == current_thd);
@@ -4276,7 +4266,7 @@ longlong Field_long::val_int(void)
String *Field_long::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
long nr= unsigned_flag ? (long) uint4korr(ptr) : sint4korr(ptr);
return val_str_from_long(val_buffer, 12, unsigned_flag ? 10 : -10, nr);
}
@@ -4284,7 +4274,7 @@ String *Field_long::val_str(String *val_buffer,
bool Field_long::send_binary(Protocol *protocol)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return protocol->store_long(Field_long::val_int());
}
@@ -4324,7 +4314,7 @@ void Field_long::sql_type(String &res) const
int Field_longlong::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
char *end;
ulonglong tmp;
@@ -4347,7 +4337,7 @@ int Field_longlong::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_longlong::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
Converter_double_to_longlong conv(nr, unsigned_flag);
if (unlikely(conv.error()))
@@ -4360,7 +4350,7 @@ int Field_longlong::store(double nr)
int Field_longlong::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
if (unlikely(nr < 0)) // Only possible error
@@ -4384,7 +4374,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val)
double Field_longlong::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong j;
j=sint8korr(ptr);
/* The following is open coded to avoid a bug in gcc 3.3 */
@@ -4399,7 +4389,7 @@ double Field_longlong::val_real(void)
longlong Field_longlong::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong j;
j=sint8korr(ptr);
return j;
@@ -4429,7 +4419,7 @@ String *Field_longlong::val_str(String *val_buffer,
bool Field_longlong::send_binary(Protocol *protocol)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return protocol->store_longlong(Field_longlong::val_int(), unsigned_flag);
}
@@ -4471,14 +4461,14 @@ void Field_longlong::sql_type(String &res) const
void Field_longlong::set_max()
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
set_notnull();
int8store(ptr, unsigned_flag ? ULONGLONG_MAX : LONGLONG_MAX);
}
bool Field_longlong::is_max()
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
if (unsigned_flag)
{
ulonglong j;
@@ -4508,7 +4498,7 @@ int Field_float::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_float::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= truncate_double(&nr, field_length,
not_fixed ? NOT_FIXED_DEC : dec,
unsigned_flag, FLT_MAX);
@@ -4537,7 +4527,7 @@ int Field_float::store(longlong nr, bool unsigned_val)
double Field_float::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
float j;
float4get(j,ptr);
return ((double) j);
@@ -4554,7 +4544,7 @@ longlong Field_float::val_int(void)
String *Field_float::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
DBUG_ASSERT(!zerofill || field_length <= MAX_FIELD_CHARLENGTH);
float nr;
float4get(nr,ptr);
@@ -4636,7 +4626,7 @@ void Field_float::sort_string(uchar *to,uint length __attribute__((unused)))
bool Field_float::send_binary(Protocol *protocol)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return protocol->store((float) Field_float::val_real(), dec, (String*) 0);
}
@@ -4687,7 +4677,7 @@ int Field_double::store(const char *from,size_t len,CHARSET_INFO *cs)
int Field_double::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= truncate_double(&nr, field_length,
not_fixed ? NOT_FIXED_DEC : dec,
unsigned_flag, DBL_MAX);
@@ -4833,13 +4823,6 @@ Converter_double_to_longlong::push_warning(THD *thd,
}
-int Field_real::store_decimal(const my_decimal *dm)
-{
- double dbl;
- my_decimal2double(E_DEC_FATAL_ERROR, dm, &dbl);
- return store(dbl);
-}
-
int Field_real::store_time_dec(const MYSQL_TIME *ltime, uint dec_arg)
{
return store(TIME_to_double(ltime));
@@ -4848,7 +4831,7 @@ int Field_real::store_time_dec(const MYSQL_TIME *ltime, uint dec_arg)
double Field_double::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
double j;
float8get(j,ptr);
return j;
@@ -4866,17 +4849,17 @@ longlong Field_double::val_int_from_real(bool want_unsigned_result)
my_decimal *Field_real::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
double2my_decimal(E_DEC_FATAL_ERROR, val_real(), decimal_value);
return decimal_value;
}
-bool Field_real::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Field_real::get_date(MYSQL_TIME *ltime,date_mode_t fuzzydate)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
double nr= val_real();
- return double_to_datetime_with_warn(nr, ltime, fuzzydate,
+ return double_to_datetime_with_warn(get_thd(), nr, ltime, fuzzydate,
table->s, field_name.str);
}
@@ -4904,7 +4887,7 @@ Item *Field_real::get_equal_const_item(THD *thd, const Context &ctx,
String *Field_double::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
DBUG_ASSERT(!zerofill || field_length <= MAX_FIELD_CHARLENGTH);
double nr;
float8get(nr,ptr);
@@ -5056,147 +5039,180 @@ int Field_timestamp::save_in_field(Field *to)
{
ulong sec_part;
my_time_t ts= get_timestamp(&sec_part);
- return to->store_timestamp(ts, sec_part);
+ return to->store_timestamp_dec(Timeval(ts, sec_part), decimals());
}
my_time_t Field_timestamp::get_timestamp(const uchar *pos,
ulong *sec_part) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
*sec_part= 0;
return sint4korr(pos);
}
-int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time,
- const ErrConv *str,
- int was_cut,
- bool have_smth_to_conv)
+bool Field_timestamp::val_native(Native *to)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
- uint error = 0;
- my_time_t timestamp;
+ DBUG_ASSERT(marked_for_read());
+ my_time_t sec= (my_time_t) sint4korr(ptr);
+ return Timestamp_or_zero_datetime(Timestamp(sec, 0), sec == 0).
+ to_native(to, 0);
+}
- if (MYSQL_TIME_WARN_HAVE_WARNINGS(was_cut) || !have_smth_to_conv)
- {
- error= 1;
- set_datetime_warning(WARN_DATA_TRUNCATED,
- str, MYSQL_TIMESTAMP_DATETIME, 1);
- }
- else if (MYSQL_TIME_WARN_HAVE_NOTES(was_cut))
+
+int Field_timestamp::store_TIME_with_warning(THD *thd, const Datetime *dt,
+ const ErrConv *str, int was_cut)
+{
+ DBUG_ASSERT(marked_for_write_or_computed());
+ static const Timestamp zero(0, 0);
+
+ // Handle totally bad values
+ if (!dt->is_valid_datetime())
{
- error= 3;
- set_datetime_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED,
- str, MYSQL_TIMESTAMP_DATETIME, 1);
+ set_datetime_warning(WARN_DATA_TRUNCATED, str, "datetime", 1);
+ store_TIMESTAMP(zero);
+ return 1;
}
- /* Only convert a correct date (not a zero date) */
- if (have_smth_to_conv && l_time->month)
+
+ // Handle values that do not need DATETIME to TIMESTAMP conversion
+ if (!dt->get_mysql_time()->month)
{
- uint conversion_error;
- timestamp= TIME_to_timestamp(thd, l_time, &conversion_error);
- if (timestamp == 0 && l_time->second_part == 0)
- conversion_error= ER_WARN_DATA_OUT_OF_RANGE;
- if (unlikely(conversion_error))
- {
- set_datetime_warning(conversion_error,
- str, MYSQL_TIMESTAMP_DATETIME, !error);
- error= 1;
- }
+ /*
+ Zero date is allowed by the current sql_mode. Store zero timestamp.
+ Return success or a warning about non-fatal truncation, e.g.:
+ INSERT INTO t1 (ts) VALUES ('0000-00-00 00:00:00 some tail');
+ */
+ store_TIMESTAMP(zero);
+ return store_TIME_return_code_with_warnings(was_cut, str, "datetime");
}
- else
+
+ // Convert DATETIME to TIMESTAMP
+ uint conversion_error;
+ const MYSQL_TIME *l_time= dt->get_mysql_time();
+ my_time_t timestamp= TIME_to_timestamp(thd, l_time, &conversion_error);
+ if (timestamp == 0 && l_time->second_part == 0)
{
- timestamp= 0;
- l_time->second_part= 0;
+ set_datetime_warning(ER_WARN_DATA_OUT_OF_RANGE, str, "datetime", 1);
+ store_TIMESTAMP(zero);
+ return 1; // date was fine but pointed to a DST gap
}
- store_TIME(timestamp, l_time->second_part);
- return error;
-}
+ // Store the value
+ DBUG_ASSERT(!dt->fraction_remainder(decimals()));
+ store_TIMESTAMP(Timestamp(timestamp, l_time->second_part));
-static bool
-copy_or_convert_to_datetime(THD *thd, const MYSQL_TIME *from, MYSQL_TIME *to)
-{
- if (from->time_type == MYSQL_TIMESTAMP_TIME)
- return time_to_datetime(thd, from, to);
- *to= *from;
- return false;
+ // Calculate return value and send warnings if needed
+ if (unlikely(conversion_error)) // e.g. DATETIME in the DST gap
+ {
+ set_datetime_warning(conversion_error, str, "datetime", 1);
+ return 1;
+ }
+ return store_TIME_return_code_with_warnings(was_cut, str, "datetime");
}
-sql_mode_t Field_timestamp::sql_mode_for_timestamp(THD *thd) const
+date_conv_mode_t Timestamp::sql_mode_for_timestamp(THD *thd)
{
// We don't want to store invalid or fuzzy datetime values in TIMESTAMP
- return (thd->variables.sql_mode & MODE_NO_ZERO_DATE) | MODE_NO_ZERO_IN_DATE;
+ return date_conv_mode_t((thd->variables.sql_mode & MODE_NO_ZERO_DATE) |
+ MODE_NO_ZERO_IN_DATE);
}
int Field_timestamp::store_time_dec(const MYSQL_TIME *ltime, uint dec)
{
- int unused;
+ int warn;
ErrConvTime str(ltime);
THD *thd= get_thd();
- MYSQL_TIME l_time;
- bool valid= !copy_or_convert_to_datetime(thd, ltime, &l_time) &&
- !check_date(&l_time, pack_time(&l_time) != 0,
- sql_mode_for_timestamp(thd), &unused);
- return store_TIME_with_warning(thd, &l_time, &str, false, valid);
+ Datetime dt(thd, &warn, ltime, Timestamp::DatetimeOptions(thd), decimals());
+ return store_TIME_with_warning(thd, &dt, &str, warn);
}
int Field_timestamp::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- MYSQL_TIME l_time;
- MYSQL_TIME_STATUS status;
- bool have_smth_to_conv;
ErrConvString str(from, len, cs);
THD *thd= get_thd();
-
- have_smth_to_conv= !str_to_datetime(cs, from, len, &l_time,
- sql_mode_for_timestamp(thd), &status);
- return store_TIME_with_warning(thd, &l_time, &str,
- status.warnings, have_smth_to_conv);
+ MYSQL_TIME_STATUS st;
+ Datetime dt(thd, &st, from, len, cs, Timestamp::DatetimeOptions(thd), decimals());
+ return store_TIME_with_warning(thd, &dt, &str, st.warnings);
}
int Field_timestamp::store(double nr)
{
- MYSQL_TIME l_time;
int error;
ErrConvDouble str(nr);
THD *thd= get_thd();
-
- longlong tmp= double_to_datetime(nr, &l_time, sql_mode_for_timestamp(thd),
- &error);
- return store_TIME_with_warning(thd, &l_time, &str, error, tmp != -1);
+ Datetime dt(thd, &error, nr, Timestamp::DatetimeOptions(thd), decimals());
+ return store_TIME_with_warning(thd, &dt, &str, error);
}
int Field_timestamp::store(longlong nr, bool unsigned_val)
{
- MYSQL_TIME l_time;
int error;
- ErrConvInteger str(nr, unsigned_val);
+ Longlong_hybrid tmp(nr, unsigned_val);
+ ErrConvInteger str(tmp);
THD *thd= get_thd();
+ Datetime dt(&error, tmp, Timestamp::DatetimeOptions(thd));
+ return store_TIME_with_warning(thd, &dt, &str, error);
+}
+
- longlong tmp= number_to_datetime(nr, 0, &l_time, sql_mode_for_timestamp(thd),
- &error);
- return store_TIME_with_warning(thd, &l_time, &str, error, tmp != -1);
+int Field_timestamp::store_timestamp_dec(const timeval &ts, uint dec)
+{
+ int warn= 0;
+ time_round_mode_t mode= Datetime::default_round_mode(get_thd());
+ store_TIMESTAMP(Timestamp(ts).round(decimals(), mode, &warn));
+ if (warn)
+ {
+ /*
+ We're here if rounding would overflow outside of the supported TIMESTAMP
+ range, so truncation happened instead:
+ CREATE TABLE t1 (a TIMESTAMP(6));
+ INSERT INTO t1 VALUES ('maximum-possible-timestamp.999999');
+ ALTER TABLE t1 MODIFY a TIMESTAMP(5);
+ SELECT * FROM t1; --> 'maximum-possible-timestamp.99999' (5 digits)
+ Raise a warning, like DATETIME does for '9999-12-31 23:59:59.999999'.
+ */
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ }
+ if (ts.tv_sec == 0 && ts.tv_usec == 0 &&
+ get_thd()->variables.sql_mode & (ulonglong) TIME_NO_ZERO_DATE)
+ return zero_time_stored_return_code_with_warning();
+ return 0;
}
-int Field_timestamp::store_timestamp(my_time_t ts, ulong sec_part)
+int Field_timestamp::zero_time_stored_return_code_with_warning()
{
- store_TIME(ts, sec_part);
- if (ts == 0 && sec_part == 0 &&
- get_thd()->variables.sql_mode & TIME_NO_ZERO_DATE)
+ if (get_thd()->variables.sql_mode & (ulonglong) TIME_NO_ZERO_DATE)
{
ErrConvString s(
STRING_WITH_LEN("0000-00-00 00:00:00.000000") - (decimals() ? 6 - decimals() : 7),
system_charset_info);
- set_datetime_warning(WARN_DATA_TRUNCATED, &s, MYSQL_TIMESTAMP_DATETIME, 1);
+ set_datetime_warning(WARN_DATA_TRUNCATED, &s, "datetime", 1);
return 1;
}
return 0;
+
+}
+
+
+int Field_timestamp::store_native(const Native &value)
+{
+ if (!value.length()) // Zero datetime
+ {
+ reset();
+ return zero_time_stored_return_code_with_warning();
+ }
+ /*
+ The exact second precision is not important here.
+ Field_timestamp*::store_timestamp_dec() do not use the "dec" parameter.
+ Passing TIME_SECOND_PART_DIGITS is OK.
+ */
+ return store_timestamp_dec(Timestamp(value).tv(), TIME_SECOND_PART_DIGITS);
}
@@ -5209,7 +5225,7 @@ double Field_timestamp::val_real(void)
longlong Field_timestamp::val_int(void)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_NO_ZERO_DATE))
+ if (get_date(&ltime, Datetime::Options(TIME_NO_ZERO_DATE, get_thd())))
return 0;
return ltime.year * 10000000000LL + ltime.month * 100000000LL +
@@ -5229,7 +5245,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr)
to= (char*) val_buffer->ptr();
val_buffer->length(field_length);
- if (get_date(&ltime, TIME_NO_ZERO_DATE))
+ if (get_date(&ltime, Datetime::Options(TIME_NO_ZERO_DATE, get_thd())))
{ /* Zero time is "000000" */
val_ptr->set(zero_timestamp, field_length, &my_charset_numeric);
return val_ptr;
@@ -5297,11 +5313,11 @@ Field_timestamp::validate_value_in_record(THD *thd, const uchar *record) const
DBUG_ASSERT(!is_null_in_record(record));
ulong sec_part;
return !get_timestamp(ptr_in_record(record), &sec_part) && !sec_part &&
- (sql_mode_for_dates(thd) & TIME_NO_ZERO_DATE) != 0;
+ bool(sql_mode_for_dates(thd) & TIME_NO_ZERO_DATE) != false;
}
-bool Field_timestamp::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Field_timestamp::get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
ulong sec_part;
my_time_t ts= get_timestamp(&sec_part);
@@ -5312,7 +5328,7 @@ bool Field_timestamp::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
bool Field_timestamp::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
- Field_timestamp::get_date(&ltime, 0);
+ Field_timestamp::get_date(&ltime, date_mode_t(0));
return protocol->store(&ltime, 0);
}
@@ -5351,7 +5367,7 @@ void Field_timestamp::sql_type(String &res) const
int Field_timestamp::set_time()
{
set_notnull();
- store_TIME(get_thd()->query_start(), 0);
+ store_TIMESTAMP(Timestamp(get_thd()->query_start(), 0));
return 0;
}
@@ -5442,24 +5458,36 @@ static longlong read_lowendian(const uchar *from, uint bytes)
}
}
-void Field_timestamp_hires::store_TIME(my_time_t timestamp, ulong sec_part)
+void Field_timestamp_hires::store_TIMEVAL(const timeval &tv)
{
- mi_int4store(ptr, timestamp);
- store_bigendian(sec_part_shift(sec_part, dec), ptr+4, sec_part_bytes(dec));
+ mi_int4store(ptr, tv.tv_sec);
+ store_bigendian(sec_part_shift(tv.tv_usec, dec), ptr+4, sec_part_bytes(dec));
}
my_time_t Field_timestamp_hires::get_timestamp(const uchar *pos,
ulong *sec_part) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
*sec_part= (long)sec_part_unshift(read_bigendian(pos+4, sec_part_bytes(dec)), dec);
return mi_uint4korr(pos);
}
+
+bool Field_timestamp_hires::val_native(Native *to)
+{
+ DBUG_ASSERT(marked_for_read());
+ struct timeval tm;
+ tm.tv_sec= mi_uint4korr(ptr);
+ tm.tv_usec= (ulong) sec_part_unshift(read_bigendian(ptr+4, sec_part_bytes(dec)), dec);
+ return Timestamp_or_zero_datetime(Timestamp(tm), tm.tv_sec == 0).
+ to_native(to, dec);
+}
+
+
double Field_timestamp_with_dec::val_real(void)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, TIME_NO_ZERO_DATE))
+ if (get_date(&ltime, Datetime::Options(TIME_NO_ZERO_DATE, get_thd())))
return 0;
return ltime.year * 1e10 + ltime.month * 1e8 +
@@ -5470,29 +5498,17 @@ double Field_timestamp_with_dec::val_real(void)
my_decimal *Field_timestamp_with_dec::val_decimal(my_decimal *d)
{
MYSQL_TIME ltime;
- get_date(&ltime, 0);
+ get_date(&ltime, date_mode_t(0));
return TIME_to_my_decimal(&ltime, d);
}
int Field_timestamp::store_decimal(const my_decimal *d)
{
- ulonglong nr;
- ulong sec_part;
int error;
- MYSQL_TIME ltime;
- longlong tmp;
THD *thd= get_thd();
ErrConvDecimal str(d);
-
- if (my_decimal2seconds(d, &nr, &sec_part))
- {
- tmp= -1;
- error= 2;
- }
- else
- tmp= number_to_datetime(nr, sec_part, &ltime, sql_mode_for_timestamp(thd),
- &error);
- return store_TIME_with_warning(thd, &ltime, &str, error, tmp != -1);
+ Datetime dt(thd, &error, d, Timestamp::DatetimeOptions(thd), decimals());
+ return store_TIME_with_warning(thd, &dt, &str, error);
}
int Field_timestamp_with_dec::set_time()
@@ -5500,14 +5516,15 @@ int Field_timestamp_with_dec::set_time()
THD *thd= get_thd();
set_notnull();
// Avoid writing microseconds into binlog for FSP=0
- store_TIME(thd->query_start(), decimals() ? thd->query_start_sec_part() : 0);
+ ulong msec= decimals() ? thd->query_start_sec_part() : 0;
+ store_TIMESTAMP(Timestamp(thd->query_start(), msec).trunc(decimals()));
return 0;
}
bool Field_timestamp_with_dec::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
- Field_timestamp::get_date(&ltime, 0);
+ Field_timestamp::get_date(&ltime, date_mode_t(0));
return protocol->store(&ltime, dec);
}
@@ -5536,19 +5553,15 @@ void Field_timestamp_with_dec::make_send_field(Send_field *field)
** MySQL-5.6 compatible TIMESTAMP(N)
**************************************************************/
-void Field_timestampf::store_TIME(my_time_t timestamp, ulong sec_part)
+void Field_timestampf::store_TIMEVAL(const timeval &tm)
{
- struct timeval tm;
- tm.tv_sec= timestamp;
- tm.tv_usec= sec_part;
- my_timeval_trunc(&tm, dec);
my_timestamp_to_binary(&tm, ptr, dec);
}
void Field_timestampf::set_max()
{
DBUG_ENTER("Field_timestampf::set_max");
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
DBUG_ASSERT(dec == TIME_SECOND_PART_DIGITS);
set_notnull();
@@ -5561,7 +5574,7 @@ void Field_timestampf::set_max()
bool Field_timestampf::is_max()
{
DBUG_ENTER("Field_timestampf::is_max");
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
DBUG_RETURN(mi_sint4korr(ptr) == TIMESTAMP_MAX_VALUE &&
mi_sint3korr(ptr + 4) == TIME_MAX_SECOND_PART);
@@ -5577,6 +5590,19 @@ my_time_t Field_timestampf::get_timestamp(const uchar *pos,
}
+bool Field_timestampf::val_native(Native *to)
+{
+ DBUG_ASSERT(marked_for_read());
+ // Check if it's '0000-00-00 00:00:00' rather than a real timestamp
+ if (ptr[0] == 0 && ptr[1] == 0 && ptr[2] == 0 && ptr[3] == 0)
+ {
+ to->length(0);
+ return false;
+ }
+ return Field::val_native(to);
+}
+
+
/*************************************************************/
uint Field_temporal::is_equal(Create_field *new_field)
{
@@ -5587,7 +5613,7 @@ uint Field_temporal::is_equal(Create_field *new_field)
void Field_temporal::set_warnings(Sql_condition::enum_warning_level trunc_level,
const ErrConv *str, int was_cut,
- timestamp_type ts_type)
+ const char *typestr)
{
/*
error code logic:
@@ -5600,9 +5626,9 @@ void Field_temporal::set_warnings(Sql_condition::enum_warning_level trunc_level,
a DATE field and non-zero time part is thrown away.
*/
if (was_cut & MYSQL_TIME_WARN_TRUNCATED)
- set_datetime_warning(trunc_level, WARN_DATA_TRUNCATED, str, ts_type, 1);
+ set_datetime_warning(trunc_level, WARN_DATA_TRUNCATED, str, typestr, 1);
if (was_cut & MYSQL_TIME_WARN_OUT_OF_RANGE)
- set_datetime_warning(ER_WARN_DATA_OUT_OF_RANGE, str, ts_type, 1);
+ set_datetime_warning(ER_WARN_DATA_OUT_OF_RANGE, str, typestr, 1);
}
@@ -5616,107 +5642,68 @@ void Field_temporal::set_warnings(Sql_condition::enum_warning_level trunc_level,
3 Datetime value that was cut (warning level NOTE)
This is used by opt_range.cc:get_mm_leaf().
*/
-int Field_temporal_with_date::store_TIME_with_warning(MYSQL_TIME *ltime,
- const ErrConv *str,
- int was_cut,
- int have_smth_to_conv)
+int Field_datetime::store_TIME_with_warning(const Datetime *dt,
+ const ErrConv *str,
+ int was_cut)
{
- Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN;
- int ret= 2;
-
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
-
- if (was_cut == 0 && have_smth_to_conv == 0) // special case: zero date
- {
- was_cut= MYSQL_TIME_WARN_OUT_OF_RANGE;
- }
- else if (!have_smth_to_conv)
- {
- bzero(ltime, sizeof(*ltime));
- was_cut= MYSQL_TIME_WARN_TRUNCATED;
- ret= 1;
- }
- else if (!MYSQL_TIME_WARN_HAVE_WARNINGS(was_cut) &&
- (MYSQL_TIME_WARN_HAVE_NOTES(was_cut) ||
- (type_handler()->mysql_timestamp_type() == MYSQL_TIMESTAMP_DATE &&
- (ltime->hour || ltime->minute || ltime->second || ltime->second_part))))
- {
- trunc_level= Sql_condition::WARN_LEVEL_NOTE;
- was_cut|= MYSQL_TIME_WARN_TRUNCATED;
- ret= 3;
- }
- set_warnings(trunc_level, str, was_cut,
- type_handler()->mysql_timestamp_type());
- store_TIME(ltime);
- return was_cut ? ret : 0;
+ DBUG_ASSERT(marked_for_write_or_computed());
+ // Handle totally bad values
+ if (!dt->is_valid_datetime())
+ return store_invalid_with_warning(str, was_cut, "datetime");
+ // Store the value
+ DBUG_ASSERT(!dt->fraction_remainder(decimals()));
+ store_datetime(*dt);
+ // Caclulate return value and send warnings if needed
+ return store_TIME_return_code_with_warnings(was_cut, str, "datetime");
}
-int Field_temporal_with_date::store(const char *from, size_t len, CHARSET_INFO *cs)
+int Field_datetime::store(const char *from, size_t len, CHARSET_INFO *cs)
{
- MYSQL_TIME ltime;
- MYSQL_TIME_STATUS status;
- THD *thd= get_thd();
+ MYSQL_TIME_STATUS st;
ErrConvString str(from, len, cs);
- bool func_res= !str_to_datetime(cs, from, len, &ltime,
- sql_mode_for_dates(thd),
- &status);
- return store_TIME_with_warning(&ltime, &str, status.warnings, func_res);
+ THD *thd= get_thd();
+ Datetime dt(thd, &st, from, len, cs, Datetime::Options(thd), decimals());
+ return store_TIME_with_warning(&dt, &str, st.warnings);
}
-
-int Field_temporal_with_date::store(double nr)
+int Field_datetime::store(double nr)
{
- int error= 0;
- MYSQL_TIME ltime;
- THD *thd= get_thd();
+ int error;
ErrConvDouble str(nr);
-
- longlong tmp= double_to_datetime(nr, &ltime,
- (uint) sql_mode_for_dates(thd), &error);
- return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
+ THD *thd= get_thd();
+ Datetime dt(thd, &error, nr, Datetime::Options(thd), decimals());
+ return store_TIME_with_warning(&dt, &str, error);
}
-int Field_temporal_with_date::store(longlong nr, bool unsigned_val)
+int Field_datetime::store(longlong nr, bool unsigned_val)
{
int error;
- MYSQL_TIME ltime;
- longlong tmp;
+ Longlong_hybrid tmp(nr, unsigned_val);
+ ErrConvInteger str(tmp);
THD *thd= get_thd();
- ErrConvInteger str(nr, unsigned_val);
-
- tmp= number_to_datetime(nr, 0, &ltime, sql_mode_for_dates(thd), &error);
-
- return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
+ Datetime dt(&error, tmp, Datetime::Options(thd));
+ return store_TIME_with_warning(&dt, &str, error);
}
-
-int Field_temporal_with_date::store_time_dec(const MYSQL_TIME *ltime, uint dec)
+int Field_datetime::store_time_dec(const MYSQL_TIME *ltime, uint dec)
{
- int error= 0, have_smth_to_conv= 1;
+ int error;
ErrConvTime str(ltime);
- MYSQL_TIME l_time;
+ THD *thd= get_thd();
+ Datetime dt(thd, &error, ltime, Datetime::Options(thd), decimals());
+ return store_TIME_with_warning(&dt, &str, error);
+}
- if (copy_or_convert_to_datetime(get_thd(), ltime, &l_time))
- {
- /*
- Set have_smth_to_conv and error in a way to have
- store_TIME_with_warning do bzero().
- */
- have_smth_to_conv= false;
- error= MYSQL_TIME_WARN_OUT_OF_RANGE;
- }
- else
- {
- /*
- We don't perform range checking here since values stored in TIME
- structure always fit into DATETIME range.
- */
- have_smth_to_conv= !check_date(&l_time, pack_time(&l_time) != 0,
- sql_mode_for_dates(get_thd()), &error);
- }
- return store_TIME_with_warning(&l_time, &str, error, have_smth_to_conv);
+
+int Field_datetime::store_decimal(const my_decimal *d)
+{
+ int error;
+ ErrConvDecimal str(d);
+ THD *thd= get_thd();
+ Datetime tm(thd, &error, d, Datetime::Options(thd), decimals());
+ return store_TIME_with_warning(&tm, &str, error);
}
@@ -5726,14 +5713,14 @@ Field_temporal_with_date::validate_value_in_record(THD *thd,
{
DBUG_ASSERT(!is_null_in_record(record));
MYSQL_TIME ltime;
- return get_TIME(&ltime, ptr_in_record(record), sql_mode_for_dates(thd));
+ return get_TIME(&ltime, ptr_in_record(record), Datetime::Options(thd));
}
my_decimal *Field_temporal::val_decimal(my_decimal *d)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, 0))
+ if (get_date(&ltime, date_mode_t(0)))
{
bzero(&ltime, sizeof(ltime));
ltime.time_type= type_handler()->mysql_timestamp_type();
@@ -5766,7 +5753,8 @@ Item *Field_temporal::get_equal_const_item_datetime(THD *thd,
const_item->field_type() != MYSQL_TYPE_TIMESTAMP) ||
const_item->decimals != decimals())
{
- Datetime dt(thd, const_item, 0);
+ Datetime::Options opt(TIME_CONV_NONE, thd);
+ Datetime dt(thd, const_item, opt, decimals());
if (!dt.is_valid_datetime())
return NULL;
/*
@@ -5781,7 +5769,7 @@ Item *Field_temporal::get_equal_const_item_datetime(THD *thd,
case ANY_SUBST:
if (!is_temporal_type_with_date(const_item->field_type()))
{
- Datetime dt(thd, const_item, TIME_FUZZY_DATES | TIME_INVALID_DATES);
+ Datetime dt(thd, const_item, Datetime::Options_cmp(thd));
if (!dt.is_valid_datetime())
return NULL;
return new (thd->mem_root)
@@ -5802,36 +5790,18 @@ Item *Field_temporal::get_equal_const_item_datetime(THD *thd,
** In number context: HHMMSS
** Stored as a 3 byte unsigned int
****************************************************************************/
-int Field_time::store_TIME_with_warning(MYSQL_TIME *ltime,
- const ErrConv *str,
- int was_cut,
- int have_smth_to_conv)
+int Field_time::store_TIME_with_warning(const Time *t,
+ const ErrConv *str, int warn)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
-
- if (!have_smth_to_conv)
- {
- bzero(ltime, sizeof(*ltime));
- store_TIME(ltime);
- set_warnings(Sql_condition::WARN_LEVEL_WARN, str, MYSQL_TIME_WARN_TRUNCATED);
- return 1;
- }
- if (ltime->year != 0 || ltime->month != 0)
- {
- ltime->year= ltime->month= ltime->day= 0;
- was_cut|= MYSQL_TIME_NOTE_TRUNCATED;
- }
- my_time_trunc(ltime, decimals());
- store_TIME(ltime);
- if (!MYSQL_TIME_WARN_HAVE_WARNINGS(was_cut) &&
- MYSQL_TIME_WARN_HAVE_NOTES(was_cut))
- {
- set_warnings(Sql_condition::WARN_LEVEL_NOTE, str,
- was_cut | MYSQL_TIME_WARN_TRUNCATED);
- return 3;
- }
- set_warnings(Sql_condition::WARN_LEVEL_WARN, str, was_cut);
- return was_cut ? 2 : 0;
+ DBUG_ASSERT(marked_for_write_or_computed());
+ // Handle totally bad values
+ if (!t->is_valid_time())
+ return store_invalid_with_warning(str, warn, "time");
+ // Store the value
+ DBUG_ASSERT(!t->fraction_remainder(decimals()));
+ store_TIME(*t);
+ // Calculate return value and send warnings if needed
+ return store_TIME_return_code_with_warnings(warn, str, "time");
}
@@ -5848,88 +5818,56 @@ void Field_time::store_TIME(const MYSQL_TIME *ltime)
int Field_time::store(const char *from,size_t len,CHARSET_INFO *cs)
{
- MYSQL_TIME ltime;
- MYSQL_TIME_STATUS status;
ErrConvString str(from, len, cs);
- bool have_smth_to_conv=
- !str_to_time(cs, from, len, &ltime, sql_mode_for_dates(get_thd()),
- &status);
-
- return store_TIME_with_warning(&ltime, &str,
- status.warnings, have_smth_to_conv);
-}
-
-
-/**
- subtract a given number of days from DATETIME, return TIME
-
- optimized version of calc_time_diff()
-
- @note it might generate TIME values outside of the valid TIME range!
-*/
-static void calc_datetime_days_diff(MYSQL_TIME *ltime, long days)
-{
- long daydiff= calc_daynr(ltime->year, ltime->month, ltime->day) - days;
- ltime->year= ltime->month= 0;
- if (daydiff >=0 )
- {
- ltime->day= daydiff;
- ltime->time_type= MYSQL_TIMESTAMP_TIME;
- }
- else
- {
- longlong timediff= ((((daydiff * 24LL +
- ltime->hour) * 60LL +
- ltime->minute) * 60LL +
- ltime->second) * 1000000LL +
- ltime->second_part);
- unpack_time(timediff, ltime, MYSQL_TIMESTAMP_TIME);
- }
+ MYSQL_TIME_STATUS st;
+ THD *thd= get_thd();
+ /*
+ Unlike number-to-time conversion, we need to additionally pass
+ MODE_NO_ZERO_DATE here (if it presents in the current sql_mode):
+ SET sql_mode='STRICT_ALL_TABLES,NO_ZERO_DATE';
+ INSERT INTO t1 VALUES ('0000-00-00 00:00:00'); -- error
+ INSERT INTO t1 VALUES (0); -- ok
+ In the first INSERT we have a zero date.
+ In the second INSERT we don't have a zero date (it is just a zero time).
+ */
+ Time::Options opt(sql_mode_for_dates(thd), thd);
+ Time tm(thd, &st, from, len, cs, opt, decimals());
+ return store_TIME_with_warning(&tm, &str, st.warnings);
}
int Field_time::store_time_dec(const MYSQL_TIME *ltime, uint dec)
{
- MYSQL_TIME l_time= *ltime;
ErrConvTime str(ltime);
- int was_cut= 0;
-
- if (curdays && l_time.time_type != MYSQL_TIMESTAMP_TIME)
- calc_datetime_days_diff(&l_time, curdays);
-
- int have_smth_to_conv= !check_time_range(&l_time, decimals(), &was_cut);
- return store_TIME_with_warning(&l_time, &str, was_cut, have_smth_to_conv);
+ int warn;
+ Time tm(&warn, ltime, curdays, Time::Options(get_thd()), decimals());
+ return store_TIME_with_warning(&tm, &str, warn);
}
int Field_time::store(double nr)
{
- MYSQL_TIME ltime;
ErrConvDouble str(nr);
int was_cut;
- bool neg= nr < 0;
- if (neg)
- nr= -nr;
- int have_smth_to_conv= !number_to_time(neg, (ulonglong) nr,
- (ulong)((nr - floor(nr)) * TIME_SECOND_PART_FACTOR),
- &ltime, &was_cut);
-
- return store_TIME_with_warning(&ltime, &str, was_cut, have_smth_to_conv);
+ Time tm(get_thd(), &was_cut, nr, Time::Options(get_thd()), decimals());
+ return store_TIME_with_warning(&tm, &str, was_cut);
}
int Field_time::store(longlong nr, bool unsigned_val)
{
- MYSQL_TIME ltime;
- ErrConvInteger str(nr, unsigned_val);
+ Longlong_hybrid tmp(nr, unsigned_val);
+ ErrConvInteger str(tmp);
int was_cut;
- if (nr < 0 && unsigned_val)
- nr= 99991231235959LL + 1;
- int have_smth_to_conv= !number_to_time(nr < 0,
- (ulonglong) (nr < 0 ? -nr : nr),
- 0, &ltime, &was_cut);
-
- return store_TIME_with_warning(&ltime, &str, was_cut, have_smth_to_conv);
+ THD *thd= get_thd();
+ /*
+ Need fractional digit truncation if nr overflows to '838:59:59.999999'.
+ The constructor used below will always truncate (never round).
+ We don't need to care to overwrite the default session rounding mode
+ from HALF_UP to TRUNCATE.
+ */
+ Time tm(thd, &was_cut, tmp, Time::Options(thd), decimals());
+ return store_TIME_with_warning(&tm, &str, was_cut);
}
@@ -5957,14 +5895,14 @@ Field *Field_time::new_key_field(MEM_ROOT *root, TABLE *new_table,
double Field_time::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
uint32 j= (uint32) uint3korr(ptr);
return (double) j;
}
longlong Field_time::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return (longlong) sint3korr(ptr);
}
@@ -5978,9 +5916,9 @@ longlong Field_time::val_int(void)
String *Field_time::val_str(String *str,
String *unused __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
MYSQL_TIME ltime;
- get_date(&ltime, TIME_TIME_ONLY);
+ get_date(&ltime, Datetime::Options(TIME_TIME_ONLY, get_thd()));
str->alloc(field_length + 1);
str->length(my_time_to_str(&ltime, const_cast<char*>(str->ptr()), decimals()));
str->set_charset(&my_charset_numeric);
@@ -5988,9 +5926,10 @@ String *Field_time::val_str(String *str,
}
-bool Field_time::check_zero_in_date_with_warn(ulonglong fuzzydate)
+bool Field_time::check_zero_in_date_with_warn(date_mode_t fuzzydate)
{
- if (!(fuzzydate & TIME_TIME_ONLY) && (fuzzydate & TIME_NO_ZERO_IN_DATE))
+ date_conv_mode_t tmp= date_conv_mode_t(fuzzydate);
+ if (!(tmp & TIME_TIME_ONLY) && (tmp & TIME_NO_ZERO_IN_DATE))
{
THD *thd= get_thd();
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -6010,7 +5949,7 @@ bool Field_time::check_zero_in_date_with_warn(ulonglong fuzzydate)
DATE_FORMAT(time, "%l.%i %p")
*/
-bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Field_time::get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (check_zero_in_date_with_warn(fuzzydate))
return true;
@@ -6035,7 +5974,7 @@ bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
bool Field_time::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
- get_date(&ltime, TIME_TIME_ONLY);
+ get_date(&ltime, Time::Options(TIME_TIME_ONLY, get_thd()));
return protocol->store_time(&ltime, decimals());
}
@@ -6084,16 +6023,10 @@ void Field_time_hires::store_TIME(const MYSQL_TIME *ltime)
int Field_time::store_decimal(const my_decimal *d)
{
- ulonglong nr;
- ulong sec_part;
ErrConvDecimal str(d);
- MYSQL_TIME ltime;
int was_cut;
- bool neg= my_decimal2seconds(d, &nr, &sec_part);
-
- int have_smth_to_conv= !number_to_time(neg, nr, sec_part, &ltime, &was_cut);
-
- return store_TIME_with_warning(&ltime, &str, was_cut, have_smth_to_conv);
+ Time tm(get_thd(), &was_cut, d, Time::Options(get_thd()), decimals());
+ return store_TIME_with_warning(&tm, &str, was_cut);
}
@@ -6133,14 +6066,27 @@ bool Field_time::can_be_substituted_to_equal_item(const Context &ctx,
Item *Field_time::get_equal_const_item(THD *thd, const Context &ctx,
Item *const_item)
{
+ /*
+ Old mode conversion from DATETIME with non-zero YYYYMMDD part
+ to TIME works very inconsistently. Possible variants:
+ - truncate the YYYYMMDD part
+ - add (MM*33+DD)*24 to hours
+ - add (MM*31+DD)*24 to hours
+ Let's disallow propagation of DATETIME with non-zero YYYYMMDD
+ as an equal constant for a TIME field.
+ */
+ Time::datetime_to_time_mode_t mode=
+ (thd->variables.old_behavior & OLD_MODE_ZERO_DATE_TIME_CAST) ?
+ Time::DATETIME_TO_TIME_YYYYMMDD_00000000_ONLY :
+ Time::DATETIME_TO_TIME_MINUS_CURRENT_DATE;
+
switch (ctx.subst_constraint()) {
case ANY_SUBST:
if (const_item->field_type() != MYSQL_TYPE_TIME)
{
- MYSQL_TIME ltime;
// Get the value of const_item with conversion from DATETIME to TIME
- ulonglong fuzzydate= Time::comparison_flags_for_get_date();
- if (const_item->get_time_with_conversion(thd, &ltime, fuzzydate))
+ Time tm(get_thd(), const_item, Time::Options_cmp(thd, mode));
+ if (!tm.is_valid_time())
return NULL;
/*
Replace a DATE/DATETIME constant to a TIME constant:
@@ -6152,8 +6098,9 @@ Item *Field_time::get_equal_const_item(THD *thd, const Context &ctx,
(assuming CURRENT_DATE is '2015-08-30'
*/
- return new (thd->mem_root) Item_time_literal(thd, &ltime,
- ltime.second_part ?
+ return new (thd->mem_root) Item_time_literal(thd, tm.get_mysql_time(),
+ tm.get_mysql_time()->
+ second_part ?
TIME_SECOND_PART_DIGITS :
0);
}
@@ -6162,9 +6109,6 @@ Item *Field_time::get_equal_const_item(THD *thd, const Context &ctx,
if (const_item->field_type() != MYSQL_TYPE_TIME ||
const_item->decimals != decimals())
{
- MYSQL_TIME ltime;
- if (const_item->get_time_with_conversion(thd, &ltime, TIME_TIME_ONLY))
- return NULL;
/*
Note, the value returned in "ltime" can have more fractional
digits that decimals(). The Item_time_literal constructor will
@@ -6179,7 +6123,12 @@ Item *Field_time::get_equal_const_item(THD *thd, const Context &ctx,
The optimized WHERE will return with "Impossible WHERE", without
having to do the full table scan.
*/
- return new (thd->mem_root) Item_time_literal(thd, &ltime, decimals());
+ Time tm(thd, const_item, Time::Options(TIME_TIME_ONLY, thd, mode),
+ decimals());
+ if (!tm.is_valid_time())
+ return NULL;
+ return new (thd->mem_root) Item_time_literal(thd, tm.get_mysql_time(),
+ decimals());
}
break;
}
@@ -6189,22 +6138,22 @@ Item *Field_time::get_equal_const_item(THD *thd, const Context &ctx,
longlong Field_time_with_dec::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
MYSQL_TIME ltime;
- get_date(&ltime, TIME_TIME_ONLY);
+ get_date(&ltime, Time::Options(TIME_TIME_ONLY, get_thd()));
longlong val= TIME_to_ulonglong_time(&ltime);
return ltime.neg ? -val : val;
}
double Field_time_with_dec::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
MYSQL_TIME ltime;
- get_date(&ltime, TIME_TIME_ONLY);
+ get_date(&ltime, Time::Options(TIME_TIME_ONLY, get_thd()));
return TIME_to_double(&ltime);
}
-bool Field_time_hires::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Field_time_hires::get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (check_zero_in_date_with_warn(fuzzydate))
return true;
@@ -6256,7 +6205,7 @@ void Field_timef::store_TIME(const MYSQL_TIME *ltime)
my_time_packed_to_binary(tmp, ptr, dec);
}
-bool Field_timef::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Field_timef::get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (check_zero_in_date_with_warn(fuzzydate))
return true;
@@ -6273,7 +6222,7 @@ bool Field_timef::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
int Field_year::store(const char *from, size_t len,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
char *end;
int error;
longlong nr= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error);
@@ -6321,7 +6270,7 @@ int Field_year::store(double nr)
int Field_year::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
if (nr < 0 || (nr >= 100 && nr <= 1900) || nr > 2155)
{
*ptr= 0;
@@ -6346,13 +6295,14 @@ int Field_year::store_time_dec(const MYSQL_TIME *ltime, uint dec_arg)
if (Field_year::store(ltime->year, 0))
return 1;
- set_datetime_warning(WARN_DATA_TRUNCATED, &str, ltime->time_type, 1);
+ const char *typestr= Temporal::type_name_by_timestamp_type(ltime->time_type);
+ set_datetime_warning(WARN_DATA_TRUNCATED, &str, typestr, 1);
return 0;
}
bool Field_year::send_binary(Protocol *protocol)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
ulonglong tmp= Field_year::val_int();
return protocol->store_short(tmp);
}
@@ -6366,7 +6316,7 @@ double Field_year::val_real(void)
longlong Field_year::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
DBUG_ASSERT(field_length == 2 || field_length == 4);
int tmp= (int) ptr[0];
if (field_length != 4)
@@ -6390,12 +6340,13 @@ String *Field_year::val_str(String *val_buffer,
}
-bool Field_year::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Field_year::get_date(MYSQL_TIME *ltime,date_mode_t fuzzydate)
{
int tmp= (int) ptr[0];
if (tmp || field_length != 4)
tmp+= 1900;
- return int_to_datetime_with_warn(false, tmp * 10000,
+ return int_to_datetime_with_warn(get_thd(),
+ Longlong_hybrid(tmp * 10000, true),
ltime, fuzzydate, table->s, field_name.str);
}
@@ -6408,6 +6359,71 @@ void Field_year::sql_type(String &res) const
}
+/*****************************************************************************/
+
+int Field_date_common::store_TIME_with_warning(const Datetime *dt,
+ const ErrConv *str,
+ int was_cut)
+{
+ DBUG_ASSERT(marked_for_write_or_computed());
+ // Handle totally bad values
+ if (!dt->is_valid_datetime())
+ return store_invalid_with_warning(str, was_cut, "date");
+ // Store the value
+ if (!dt->hhmmssff_is_zero())
+ was_cut|= MYSQL_TIME_NOTE_TRUNCATED;
+ store_datetime(*dt);
+ // Caclulate return value and send warnings if needed
+ return store_TIME_return_code_with_warnings(was_cut, str, "date");
+}
+
+int Field_date_common::store(const char *from, size_t len, CHARSET_INFO *cs)
+{
+ MYSQL_TIME_STATUS st;
+ ErrConvString str(from, len, cs);
+ THD *thd= get_thd();
+ Datetime dt(thd, &st, from, len, cs, Date::Options(thd), 0);
+ return store_TIME_with_warning(&dt, &str, st.warnings);
+}
+
+int Field_date_common::store(double nr)
+{
+ int error;
+ ErrConvDouble str(nr);
+ THD *thd= get_thd();
+ Datetime dt(thd, &error, nr, Date::Options(thd), 0);
+ return store_TIME_with_warning(&dt, &str, error);
+}
+
+int Field_date_common::store(longlong nr, bool unsigned_val)
+{
+ int error;
+ Longlong_hybrid tmp(nr, unsigned_val);
+ ErrConvInteger str(tmp);
+ THD *thd= get_thd();
+ Datetime dt(&error, tmp, Date::Options(thd));
+ return store_TIME_with_warning(&dt, &str, error);
+}
+
+int Field_date_common::store_time_dec(const MYSQL_TIME *ltime, uint dec)
+{
+ int error;
+ ErrConvTime str(ltime);
+ THD *thd= get_thd();
+ Datetime dt(thd, &error, ltime, Date::Options(thd), 0);
+ return store_TIME_with_warning(&dt, &str, error);
+}
+
+int Field_date_common::store_decimal(const my_decimal *d)
+{
+ int error;
+ ErrConvDecimal str(d);
+ THD *thd= get_thd();
+ Datetime tm(thd, &error, d, Date::Options(thd), 0);
+ return store_TIME_with_warning(&tm, &str, error);
+}
+
+
/****************************************************************************
** date type
** In string context: YYYY-MM-DD
@@ -6415,7 +6431,7 @@ void Field_year::sql_type(String &res) const
** Stored as a 4 byte unsigned int
****************************************************************************/
-void Field_date::store_TIME(MYSQL_TIME *ltime)
+void Field_date::store_TIME(const MYSQL_TIME *ltime)
{
uint tmp= ltime->year*10000L + ltime->month*100+ltime->day;
int4store(ptr,tmp);
@@ -6434,7 +6450,7 @@ bool Field_date::send_binary(Protocol *protocol)
double Field_date::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int32 j;
j=sint4korr(ptr);
return (double) (uint32) j;
@@ -6443,7 +6459,7 @@ double Field_date::val_real(void)
longlong Field_date::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int32 j;
j=sint4korr(ptr);
return (longlong) (uint32) j;
@@ -6451,9 +6467,9 @@ longlong Field_date::val_int(void)
bool Field_date::get_TIME(MYSQL_TIME *ltime, const uchar *pos,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int32 tmp= sint4korr(pos);
ltime->year= (int) ((uint32) tmp/10000L % 10000);
ltime->month= (int) ((uint32) tmp/100 % 100);
@@ -6468,7 +6484,7 @@ String *Field_date::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
MYSQL_TIME ltime;
- get_TIME(&ltime, ptr, 0);
+ get_TIME(&ltime, ptr, date_mode_t(0));
val_buffer->alloc(MAX_DATE_STRING_REP_LENGTH);
uint length= (uint) my_date_to_str(&ltime,
const_cast<char*>(val_buffer->ptr()));
@@ -6508,7 +6524,7 @@ void Field_date::sql_type(String &res) const
** In number context: YYYYMMDD
****************************************************************************/
-void Field_newdate::store_TIME(MYSQL_TIME *ltime)
+void Field_newdate::store_TIME(const MYSQL_TIME *ltime)
{
uint tmp= ltime->year*16*32 + ltime->month*32+ltime->day;
int3store(ptr,tmp);
@@ -6518,21 +6534,21 @@ void Field_newdate::store_TIME(MYSQL_TIME *ltime)
bool Field_newdate::send_binary(Protocol *protocol)
{
MYSQL_TIME tm;
- Field_newdate::get_date(&tm,0);
+ Field_newdate::get_date(&tm, date_mode_t(0));
return protocol->store_date(&tm);
}
double Field_newdate::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return (double) Field_newdate::val_int();
}
longlong Field_newdate::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
ulong j= uint3korr(ptr);
j= (j % 32L)+(j / 32L % 16L)*100L + (j/(16L*32L))*10000L;
return (longlong) j;
@@ -6542,7 +6558,7 @@ longlong Field_newdate::val_int(void)
String *Field_newdate::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
val_buffer->alloc(field_length);
val_buffer->length(field_length);
uint32 tmp=(uint32) uint3korr(ptr);
@@ -6570,9 +6586,9 @@ String *Field_newdate::val_str(String *val_buffer,
bool Field_newdate::get_TIME(MYSQL_TIME *ltime, const uchar *pos,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
uint32 tmp=(uint32) uint3korr(pos);
ltime->day= tmp & 31;
ltime->month= (tmp >> 5) & 15;
@@ -6613,8 +6629,14 @@ Item *Field_newdate::get_equal_const_item(THD *thd, const Context &ctx,
case ANY_SUBST:
if (!is_temporal_type_with_date(const_item->field_type()))
{
- // Get the value of const_item with conversion from TIME to DATETIME
- Datetime dt(thd, const_item, TIME_FUZZY_DATES | TIME_INVALID_DATES);
+ /*
+ DATE is compared to DATETIME-alike non-temporal values
+ (such as VARCHAR, DECIMAL) as DATETIME, e.g.:
+ WHERE date_column=20010101235959.0000009
+ So here we convert the constant to DATETIME normally.
+ In case if TIME_ROUND_FRACTIONAL is enabled, nanoseconds will round.
+ */
+ Datetime dt(thd, const_item, Datetime::Options_cmp(thd));
if (!dt.is_valid_datetime())
return NULL;
/*
@@ -6641,10 +6663,17 @@ Item *Field_newdate::get_equal_const_item(THD *thd, const Context &ctx,
case IDENTITY_SUBST:
if (const_item->field_type() != MYSQL_TYPE_DATE)
{
- Date d(thd, const_item, 0);
- if (!d.is_valid_date())
+ /*
+ DATE is compared to non-temporal as DATETIME.
+ We need to convert to DATETIME first, taking into account the
+ current session rounding mode (even though this is IDENTITY_SUBSTS!),
+ then convert the result to DATE.
+ */
+ Datetime dt(thd, const_item, Datetime::Options(TIME_CONV_NONE, thd));
+ if (!dt.is_valid_datetime())
return NULL;
- return new (thd->mem_root) Item_date_literal(thd, d.get_mysql_time());
+ return new (thd->mem_root)
+ Item_date_literal(thd, Date(&dt).get_mysql_time());
}
break;
}
@@ -6659,7 +6688,7 @@ Item *Field_newdate::get_equal_const_item(THD *thd, const Context &ctx,
** Stored as a 8 byte unsigned int. Should sometimes be change to a 6 byte int.
****************************************************************************/
-void Field_datetime::store_TIME(MYSQL_TIME *ltime)
+void Field_datetime::store_TIME(const MYSQL_TIME *ltime)
{
ulonglong tmp= TIME_to_ulonglong_datetime(ltime);
int8store(ptr,tmp);
@@ -6668,7 +6697,7 @@ void Field_datetime::store_TIME(MYSQL_TIME *ltime)
bool Field_datetime::send_binary(Protocol *protocol)
{
MYSQL_TIME tm;
- Field_datetime::get_date(&tm, 0);
+ Field_datetime::get_date(&tm, date_mode_t(0));
return protocol->store(&tm, 0);
}
@@ -6680,7 +6709,7 @@ double Field_datetime::val_real(void)
longlong Field_datetime::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong j;
j=sint8korr(ptr);
return j;
@@ -6693,7 +6722,7 @@ String *Field_datetime::val_str(String *val_buffer,
val_buffer->alloc(field_length);
val_buffer->length(field_length);
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
ulonglong tmp;
long part1,part2;
char *pos;
@@ -6734,9 +6763,9 @@ String *Field_datetime::val_str(String *val_buffer,
}
bool Field_datetime::get_TIME(MYSQL_TIME *ltime, const uchar *pos,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong tmp= sint8korr(pos);
uint32 part1,part2;
part1=(uint32) (tmp/1000000LL);
@@ -6793,48 +6822,28 @@ void Field_datetime::sql_type(String &res) const
int Field_datetime::set_time()
{
THD *thd= table->in_use;
- MYSQL_TIME now_time;
- thd->variables.time_zone->gmt_sec_to_TIME(&now_time, thd->query_start());
- now_time.second_part= thd->query_start_sec_part();
set_notnull();
- store_TIME(&now_time);
- thd->time_zone_used= 1;
+ // Here we always truncate (not round), no matter what sql_mode is
+ if (decimals())
+ store_datetime(Datetime(thd, Timeval(thd->query_start(),
+ thd->query_start_sec_part())
+ ).trunc(decimals()));
+ else
+ store_datetime(Datetime(thd, Timeval(thd->query_start(), 0)));
return 0;
}
-void Field_datetime_hires::store_TIME(MYSQL_TIME *ltime)
+void Field_datetime_hires::store_TIME(const MYSQL_TIME *ltime)
{
ulonglong packed= sec_part_shift(pack_time(ltime), dec);
store_bigendian(packed, ptr, Field_datetime_hires::pack_length());
}
-int Field_temporal_with_date::store_decimal(const my_decimal *d)
-{
- ulonglong nr;
- ulong sec_part;
- int error;
- MYSQL_TIME ltime;
- longlong tmp;
- THD *thd= get_thd();
- ErrConvDecimal str(d);
-
- if (my_decimal2seconds(d, &nr, &sec_part))
- {
- tmp= -1;
- error= 2;
- }
- else
- tmp= number_to_datetime(nr, sec_part, &ltime, sql_mode_for_dates(thd),
- &error);
-
- return store_TIME_with_warning(&ltime, &str, error, tmp != -1);
-}
-
bool Field_datetime_with_dec::send_binary(Protocol *protocol)
{
MYSQL_TIME ltime;
- get_date(&ltime, 0);
+ get_date(&ltime, date_mode_t(0));
return protocol->store(&ltime, dec);
}
@@ -6842,14 +6851,14 @@ bool Field_datetime_with_dec::send_binary(Protocol *protocol)
double Field_datetime_with_dec::val_real(void)
{
MYSQL_TIME ltime;
- get_date(&ltime, 0);
+ get_date(&ltime, date_mode_t(0));
return TIME_to_double(&ltime);
}
longlong Field_datetime_with_dec::val_int(void)
{
MYSQL_TIME ltime;
- get_date(&ltime, 0);
+ get_date(&ltime, date_mode_t(0));
return TIME_to_ulonglong_datetime(&ltime);
}
@@ -6858,7 +6867,7 @@ String *Field_datetime_with_dec::val_str(String *str,
String *unused __attribute__((unused)))
{
MYSQL_TIME ltime;
- get_date(&ltime, 0);
+ get_date(&ltime, date_mode_t(0));
str->alloc(field_length+1);
str->length(field_length);
my_datetime_to_str(&ltime, (char*) str->ptr(), dec);
@@ -6868,9 +6877,9 @@ String *Field_datetime_with_dec::val_str(String *str,
bool Field_datetime_hires::get_TIME(MYSQL_TIME *ltime, const uchar *pos,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
ulonglong packed= read_bigendian(pos, Field_datetime_hires::pack_length());
unpack_time(sec_part_unshift(packed, dec), ltime, MYSQL_TIMESTAMP_DATETIME);
return validate_MMDD(packed, ltime->month, ltime->day, fuzzydate);
@@ -6901,17 +6910,16 @@ int Field_datetimef::reset()
return 0;
}
-void Field_datetimef::store_TIME(MYSQL_TIME *ltime)
+void Field_datetimef::store_TIME(const MYSQL_TIME *ltime)
{
- my_time_trunc(ltime, decimals());
longlong tmp= TIME_to_longlong_datetime_packed(ltime);
my_datetime_packed_to_binary(tmp, ptr, dec);
}
bool Field_datetimef::get_TIME(MYSQL_TIME *ltime, const uchar *pos,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
longlong tmp= my_datetime_packed_from_binary(pos, dec);
TIME_from_longlong_datetime_packed(ltime, tmp);
return validate_MMDD(tmp, ltime->month, ltime->day, fuzzydate);
@@ -7016,7 +7024,7 @@ Field_longstr::report_if_important_data(const char *pstr, const char *end,
int Field_string::store(const char *from, size_t length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
uint copy_length;
int rc;
@@ -7062,7 +7070,7 @@ int Field_str::store(longlong nr, bool unsigned_val)
int Field_str::store(double nr)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
uint local_char_length= MY_MIN(sizeof(buff),
field_length / field_charset->mbmaxlen);
@@ -7083,25 +7091,34 @@ int Field_str::store(double nr)
return store(buff, (uint)length, &my_charset_numeric);
}
-uint Field::is_equal(Create_field *new_field)
+
+uint Field_string::is_equal(Create_field *new_field)
{
- return new_field->type_handler() == type_handler();
-}
+ DBUG_ASSERT(!compression_method());
+ if (new_field->type_handler() != type_handler())
+ return IS_EQUAL_NO;
+ if (new_field->length < max_display_length())
+ return IS_EQUAL_NO;
+ if (new_field->char_length < char_length())
+ return IS_EQUAL_NO;
+ const bool part_of_a_key= !new_field->field->part_of_key.is_clear_all();
+ if (!Type_handler::Charsets_are_compatible(field_charset, new_field->charset,
+ part_of_a_key))
+ return IS_EQUAL_NO;
-uint Field_str::is_equal(Create_field *new_field)
-{
- return new_field->type_handler() == type_handler() &&
- new_field->charset == field_charset &&
- new_field->length == max_display_length();
+ if (new_field->length == max_display_length())
+ return new_field->charset == field_charset
+ ? IS_EQUAL_YES : IS_EQUAL_PACK_LENGTH;
+
+ return IS_EQUAL_NO;
}
int Field_longstr::store_decimal(const my_decimal *d)
{
- char buff[DECIMAL_MAX_STR_LENGTH+1];
- String str(buff, sizeof(buff), &my_charset_numeric);
- my_decimal2string(E_DEC_FATAL_ERROR, d, 0, 0, 0, &str);
+ StringBuffer<DECIMAL_MAX_STR_LENGTH+1> str;
+ d->to_string(&str);
return store(str.ptr(), str.length(), str.charset());
}
@@ -7185,7 +7202,7 @@ Field_string::Warn_filter_string::Warn_filter_string(const THD *thd,
double Field_string::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
return Converter_strntod_with_warn(get_thd(),
Warn_filter_string(thd, this),
@@ -7197,7 +7214,7 @@ double Field_string::val_real(void)
longlong Field_string::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
return Converter_strntoll_with_warn(thd, Warn_filter_string(thd, this),
Field_string::charset(),
@@ -7209,7 +7226,7 @@ longlong Field_string::val_int(void)
String *Field_string::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
/* See the comment for Field_long::store(long long) */
DBUG_ASSERT(!table || table->in_use == current_thd);
size_t length;
@@ -7227,7 +7244,7 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd,
Warn_filter_string(thd, this),
@@ -7318,11 +7335,12 @@ void Field_string::sql_type(String &res) const
size_t length;
length= cs->cset->snprintf(cs,(char*) res.ptr(),
- res.alloced_length(), "%s(%d)",
+ res.alloced_length(), "%s(%d)%s",
(type() == MYSQL_TYPE_VAR_STRING ?
(has_charset() ? "varchar" : "varbinary") :
(has_charset() ? "char" : "binary")),
- (int) field_length / charset()->mbmaxlen);
+ (int) field_length / charset()->mbmaxlen,
+ type() == MYSQL_TYPE_VAR_STRING ? "/*old*/" : "");
res.length(length);
if ((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) &&
has_charset() && (charset()->state & MY_CS_BINSORT))
@@ -7559,7 +7577,7 @@ int Field_varstring::save_field_metadata(uchar *metadata_ptr)
int Field_varstring::store(const char *from,size_t length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
uint copy_length;
int rc;
@@ -7576,7 +7594,7 @@ int Field_varstring::store(const char *from,size_t length,CHARSET_INFO *cs)
double Field_varstring::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
return Converter_strntod_with_warn(thd, Warn_filter(thd),
Field_varstring::charset(),
@@ -7587,7 +7605,7 @@ double Field_varstring::val_real(void)
longlong Field_varstring::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
return Converter_strntoll_with_warn(thd, Warn_filter(thd),
Field_varstring::charset(),
@@ -7599,7 +7617,7 @@ longlong Field_varstring::val_int(void)
String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
val_ptr->set((const char*) get_data(), get_length(), field_charset);
return val_ptr;
}
@@ -7607,7 +7625,7 @@ String *Field_varstring::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
Converter_str2my_decimal_with_warn(thd, Warn_filter(thd),
E_DEC_FATAL_ERROR,
@@ -7929,17 +7947,31 @@ Field *Field_varstring::new_key_field(MEM_ROOT *root, TABLE *new_table,
uint Field_varstring::is_equal(Create_field *new_field)
{
- if (new_field->type_handler() == type_handler() &&
- new_field->charset == field_charset &&
- !new_field->compression_method() == !compression_method())
+ if (new_field->length < field_length)
+ return IS_EQUAL_NO;
+ if (new_field->char_length < char_length())
+ return IS_EQUAL_NO;
+ if (!new_field->compression_method() != !compression_method())
+ return IS_EQUAL_NO;
+
+ bool part_of_a_key= !new_field->field->part_of_key.is_clear_all();
+ if (!Type_handler::Charsets_are_compatible(field_charset, new_field->charset,
+ part_of_a_key))
+ return IS_EQUAL_NO;
+
+ const Type_handler *new_type_handler= new_field->type_handler();
+ if (new_type_handler == type_handler())
{
if (new_field->length == field_length)
- return IS_EQUAL_YES;
- if (new_field->length > field_length &&
- ((new_field->length <= 255 && field_length <= 255) ||
- (new_field->length > 255 && field_length > 255)))
- return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer variable length
+ return new_field->charset == field_charset
+ ? IS_EQUAL_YES : IS_EQUAL_PACK_LENGTH;
+ if (field_length <= 127 ||
+ new_field->length <= 255 ||
+ field_length > 255 ||
+ (table->file->ha_table_flags() & HA_EXTENDED_TYPES_CONVERSION))
+ return IS_EQUAL_PACK_LENGTH; // VARCHAR, longer length
}
+
return IS_EQUAL_NO;
}
@@ -8096,7 +8128,7 @@ String *Field_longstr::uncompress(String *val_buffer, String *val_ptr,
int Field_varstring_compressed::store(const char *from, size_t length,
CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
uint compressed_length;
int rc= compress((char*) get_data(), field_length, from, (uint) length,
Field_varstring_compressed::max_display_length(),
@@ -8109,14 +8141,14 @@ int Field_varstring_compressed::store(const char *from, size_t length,
String *Field_varstring_compressed::val_str(String *val_buffer, String *val_ptr)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return uncompress(val_buffer, val_ptr, get_data(), get_length());
}
double Field_varstring_compressed::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
String buf;
val_str(&buf, &buf);
@@ -8127,7 +8159,7 @@ double Field_varstring_compressed::val_real(void)
longlong Field_varstring_compressed::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
String buf;
val_str(&buf, &buf);
@@ -8228,7 +8260,7 @@ int Field_blob::copy_value(Field_blob *from)
int Field_blob::store(const char *from,size_t length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
size_t copy_length, new_length;
uint copy_len;
char *tmp;
@@ -8324,7 +8356,7 @@ oom_error:
double Field_blob::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
char *blob;
memcpy(&blob, ptr+packlength, sizeof(char*));
if (!blob)
@@ -8338,7 +8370,7 @@ double Field_blob::val_real(void)
longlong Field_blob::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
char *blob;
memcpy(&blob, ptr+packlength, sizeof(char*));
if (!blob)
@@ -8353,7 +8385,7 @@ longlong Field_blob::val_int(void)
String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
String *val_ptr)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
char *blob;
memcpy(&blob, ptr+packlength, sizeof(char*));
if (!blob)
@@ -8366,7 +8398,7 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_blob::val_decimal(my_decimal *decimal_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
const char *blob;
size_t length;
memcpy(&blob, ptr+packlength, sizeof(const uchar*));
@@ -8706,17 +8738,65 @@ uint Field_blob::max_packed_col_length(uint max_length)
uint Field_blob::is_equal(Create_field *new_field)
{
- return new_field->type_handler() == type_handler() &&
- new_field->charset == field_charset &&
- new_field->pack_length == pack_length() &&
- !new_field->compression_method() == !compression_method();
+ if (new_field->type_handler() != type_handler())
+ {
+ return IS_EQUAL_NO;
+ }
+ if (!new_field->compression_method() != !compression_method())
+ {
+ return IS_EQUAL_NO;
+ }
+ if (new_field->pack_length != pack_length())
+ {
+ return IS_EQUAL_NO;
+ }
+
+ bool part_of_a_key= !new_field->field->part_of_key.is_clear_all();
+ if (!Type_handler::Charsets_are_compatible(field_charset, new_field->charset,
+ part_of_a_key))
+ {
+ return IS_EQUAL_NO;
+ }
+
+ if (field_charset != new_field->charset)
+ {
+ return IS_EQUAL_PACK_LENGTH;
+ }
+
+ return IS_EQUAL_YES;
+}
+
+
+void Field_blob::make_send_field(Send_field *field)
+{
+ /*
+ Historically all BLOB variant Fields are displayed as MYSQL_TYPE_BLOB
+ in the result set metadata. Note, Item can work differently and
+ display the exact BLOB type, such as
+ MYSQL_TYPE_{TINY_BLOB|BLOB|MEDIUM_BLOB|LONG_BLOB}.
+ QQ: this should be made consistent eventually.
+ */
+ Field_longstr::make_send_field(field);
+ field->set_handler(&type_handler_blob);
+}
+
+
+bool Field_blob::make_empty_rec_store_default_value(THD *thd, Item *item)
+{
+ DBUG_ASSERT(flags & BLOB_FLAG);
+ int res= item->save_in_field(this, true);
+ DBUG_ASSERT(res != 3); // Field_blob never returns 3
+ if (res)
+ return true; // E.g. truncation happened
+ reset(); // Clear the pointer to a String, it should not be written to frm
+ return false;
}
int Field_blob_compressed::store(const char *from, size_t length,
CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
uint compressed_length;
uint max_length= max_data_length();
uint to_length= (uint) MY_MIN(max_length,
@@ -8743,14 +8823,14 @@ oom:
String *Field_blob_compressed::val_str(String *val_buffer, String *val_ptr)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return uncompress(val_buffer, val_ptr, get_ptr(), get_length());
}
double Field_blob_compressed::val_real(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
String buf;
val_str(&buf, &buf);
@@ -8761,7 +8841,7 @@ double Field_blob_compressed::val_real(void)
longlong Field_blob_compressed::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
THD *thd= get_thd();
String buf;
val_str(&buf, &buf);
@@ -9067,7 +9147,7 @@ void Field_enum::store_type(ulonglong value)
int Field_enum::store(const char *from,size_t length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int err= 0;
char buff[STRING_BUFFER_USUAL_SIZE];
String tmpstr(buff,sizeof(buff), &my_charset_bin);
@@ -9119,7 +9199,7 @@ int Field_enum::store(double nr)
int Field_enum::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
if ((ulonglong) nr > typelib->count || nr == 0)
{
@@ -9143,7 +9223,7 @@ double Field_enum::val_real(void)
longlong Field_enum::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
return read_lowendian(ptr, packlength);
}
@@ -9250,7 +9330,7 @@ Field *Field_enum::make_new_field(MEM_ROOT *root, TABLE *new_table,
int Field_set::store(const char *from,size_t length,CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
bool got_warning= 0;
int err= 0;
char *not_used;
@@ -9290,7 +9370,7 @@ int Field_set::store(const char *from,size_t length,CHARSET_INFO *cs)
int Field_set::store(longlong nr, bool unsigned_val)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int error= 0;
ulonglong max_nr;
@@ -9508,12 +9588,33 @@ bool Field_num::eq_def(const Field *field) const
uint Field_num::is_equal(Create_field *new_field)
{
- return ((new_field->type_handler() == type_handler()) &&
- ((new_field->flags & UNSIGNED_FLAG) ==
- (uint) (flags & UNSIGNED_FLAG)) &&
- ((new_field->flags & AUTO_INCREMENT_FLAG) ==
- (uint) (flags & AUTO_INCREMENT_FLAG)) &&
- (new_field->pack_length == pack_length()));
+ if (((new_field->flags & UNSIGNED_FLAG) != (flags & UNSIGNED_FLAG)) ||
+ ((new_field->flags & AUTO_INCREMENT_FLAG) > (flags & AUTO_INCREMENT_FLAG)))
+ return IS_EQUAL_NO;
+
+ const Type_handler *th= type_handler(), *new_th = new_field->type_handler();
+
+ if (th == new_th && new_field->pack_length == pack_length())
+ return IS_EQUAL_YES;
+ /* FIXME: Test and consider returning IS_EQUAL_YES for the following:
+ TINYINT UNSIGNED to BIT(8)
+ SMALLINT UNSIGNED to BIT(16)
+ MEDIUMINT UNSIGNED to BIT(24)
+ INT UNSIGNED to BIT(32)
+ BIGINT UNSIGNED to BIT(64)
+
+ BIT(1..7) to TINYINT, or BIT(1..8) to TINYINT UNSIGNED
+ BIT(9..15) to SMALLINT, or BIT(9..16) to SMALLINT UNSIGNED
+ BIT(17..23) to MEDIUMINT, or BIT(17..24) to MEDIUMINT UNSIGNED
+ BIT(25..31) to INT, or BIT(25..32) to INT UNSIGNED
+ BIT(57..63) to BIGINT, or BIT(57..64) to BIGINT UNSIGNED
+
+ Note: InnoDB stores integers in big-endian format, and BIT appears
+ to use big-endian format. For storage engines that use little-endian
+ format for integers, we can only return IS_EQUAL_YES for the TINYINT
+ conversion. */
+
+ return IS_EQUAL_NO;
}
@@ -9669,7 +9770,7 @@ uint Field_bit::is_equal(Create_field *new_field)
int Field_bit::store(const char *from, size_t length, CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int delta;
for (; length && !*from; from++, length--) // skip left 0's
@@ -9746,7 +9847,7 @@ double Field_bit::val_real(void)
longlong Field_bit::val_int(void)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
ulonglong bits= 0;
if (bit_len)
{
@@ -9771,7 +9872,7 @@ longlong Field_bit::val_int(void)
String *Field_bit::val_str(String *val_buffer,
String *val_ptr __attribute__((unused)))
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
char buff[sizeof(longlong)];
uint length= MY_MIN(pack_length(), sizeof(longlong));
ulonglong bits= val_int();
@@ -9787,7 +9888,7 @@ String *Field_bit::val_str(String *val_buffer,
my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
{
- ASSERT_COLUMN_MARKED_FOR_READ;
+ DBUG_ASSERT(marked_for_read());
int2my_decimal(E_DEC_FATAL_ERROR, val_int(), 1, deciaml_value);
return deciaml_value;
}
@@ -9839,7 +9940,7 @@ int Field_bit::key_cmp(const uchar *str, uint length)
}
-int Field_bit::cmp_offset(uint row_offset)
+int Field_bit::cmp_offset(my_ptrdiff_t row_offset)
{
if (bit_len)
{
@@ -10105,7 +10206,7 @@ Field_bit_as_char::Field_bit_as_char(uchar *ptr_arg, uint32 len_arg,
int Field_bit_as_char::store(const char *from, size_t length, CHARSET_INFO *cs)
{
- ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
+ DBUG_ASSERT(marked_for_write_or_computed());
int delta;
uchar bits= (uchar) (field_length & 7);
@@ -10456,6 +10557,13 @@ bool Column_definition::fix_attributes_temporal_with_time(uint int_part_length)
}
+bool Column_definition::validate_check_constraint(THD *thd)
+{
+ return check_constraint &&
+ check_expression(check_constraint, &field_name, VCOL_CHECK_FIELD);
+}
+
+
bool Column_definition::check(THD *thd)
{
DBUG_ENTER("Column_definition::check");
@@ -10464,15 +10572,14 @@ bool Column_definition::check(THD *thd)
if (vcol_info)
{
DBUG_ASSERT(vcol_info->expr);
- vcol_info->set_field_type(real_field_type());
+ vcol_info->set_handler(type_handler());
if (check_expression(vcol_info, &field_name, vcol_info->stored_in_db
? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL))
DBUG_RETURN(TRUE);
}
- if (check_constraint &&
- check_expression(check_constraint, &field_name, VCOL_CHECK_FIELD))
- DBUG_RETURN(1);
+ if (type_handler()->Column_definition_validate_check_constraint(thd, this))
+ DBUG_RETURN(TRUE);
if (default_value)
{
@@ -10596,322 +10703,90 @@ uint pack_length_to_packflag(uint type)
}
-Field *make_field(TABLE_SHARE *share,
- MEM_ROOT *mem_root,
- uchar *ptr, uint32 field_length,
- uchar *null_pos, uchar null_bit,
- uint pack_flag,
- const Type_handler *handler,
- CHARSET_INFO *field_charset,
- Field::geometry_type geom_type, uint srid,
- Field::utype unireg_check,
- TYPELIB *interval,
- const LEX_CSTRING *field_name,
- uint32 flags)
+uint Column_definition_attributes::pack_flag_to_pack_length() const
{
- uchar *UNINIT_VAR(bit_ptr);
- uchar UNINIT_VAR(bit_offset);
+ uint type= f_packtype(pack_flag); // 0..15
+ DBUG_ASSERT(type < 16);
+ switch (type) {
+ case MYSQL_TYPE_TINY: return 1;
+ case MYSQL_TYPE_SHORT: return 2;
+ case MYSQL_TYPE_LONG: return 4;
+ case MYSQL_TYPE_LONGLONG: return 8;
+ case MYSQL_TYPE_INT24: return 3;
+ }
+ return 0; // This should not happen
+}
+
+Field *Column_definition_attributes::make_field(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const Record_addr *rec,
+ const Type_handler *handler,
+ const LEX_CSTRING *field_name,
+ uint32 flags)
+ const
+{
+ DBUG_ASSERT(length <= UINT_MAX32);
DBUG_PRINT("debug", ("field_type: %s, field_length: %u, interval: %p, pack_flag: %s%s%s%s%s",
- handler->name().ptr(), field_length, interval,
+ handler->name().ptr(), (uint) length, interval,
FLAGSTR(pack_flag, FIELDFLAG_BINARY),
FLAGSTR(pack_flag, FIELDFLAG_INTERVAL),
FLAGSTR(pack_flag, FIELDFLAG_NUMBER),
FLAGSTR(pack_flag, FIELDFLAG_PACK),
FLAGSTR(pack_flag, FIELDFLAG_BLOB)));
- if (handler == &type_handler_row)
- {
- DBUG_ASSERT(field_length == 0);
- DBUG_ASSERT(f_maybe_null(pack_flag));
- return new (mem_root) Field_row(ptr, field_name);
- }
-
- if (handler->real_field_type() == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
- {
- bit_ptr= null_pos;
- bit_offset= null_bit;
- if (f_maybe_null(pack_flag)) // if null field
- {
- bit_ptr+= (null_bit == 7); // shift bit_ptr and bit_offset
- bit_offset= (bit_offset + 1) & 7;
- }
- }
-
- if (!f_maybe_null(pack_flag))
- {
- null_pos=0;
- null_bit=0;
- }
- else
- {
- null_bit= ((uchar) 1) << null_bit;
- }
-
-
- if (f_is_alpha(pack_flag))
- {
- if (!f_is_packed(pack_flag))
- {
- enum_field_types field_type= handler->real_field_type();
- if (field_type == MYSQL_TYPE_STRING ||
- field_type == MYSQL_TYPE_DECIMAL || // 3.23 or 4.0 string
- field_type == MYSQL_TYPE_VAR_STRING)
- return new (mem_root)
- Field_string(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- field_charset);
- if (field_type == MYSQL_TYPE_VARCHAR)
- {
- if (unireg_check == Field::TMYSQL_COMPRESSED)
- return new (mem_root)
- Field_varstring_compressed(
- ptr, field_length,
- HA_VARCHAR_PACKLENGTH(field_length),
- null_pos, null_bit,
- unireg_check, field_name,
- share, field_charset, zlib_compression_method);
-
- return new (mem_root)
- Field_varstring(ptr,field_length,
- HA_VARCHAR_PACKLENGTH(field_length),
- null_pos,null_bit,
- unireg_check, field_name,
- share,
- field_charset);
- }
- return 0; // Error
- }
-
- // MYSQL_TYPE_VAR_STRING is handled above
- DBUG_ASSERT(f_packtype(pack_flag) != MYSQL_TYPE_VAR_STRING);
- const Type_handler *tmp;
- tmp= Type_handler::get_handler_by_real_type((enum_field_types)
- f_packtype(pack_flag));
- uint pack_length= tmp->calc_pack_length(field_length);
-
-#ifdef HAVE_SPATIAL
- if (f_is_geom(pack_flag))
- {
- status_var_increment(current_thd->status_var.feature_gis);
- return new (mem_root)
- Field_geom(ptr,null_pos,null_bit,
- unireg_check, field_name, share,
- pack_length, geom_type, srid);
- }
-#endif
- if (f_is_blob(pack_flag))
- {
- if (unireg_check == Field::TMYSQL_COMPRESSED)
- return new (mem_root)
- Field_blob_compressed(ptr, null_pos, null_bit,
- unireg_check, field_name, share,
- pack_length, field_charset, zlib_compression_method);
-
- return new (mem_root)
- Field_blob(ptr,null_pos,null_bit,
- unireg_check, field_name, share,
- pack_length, field_charset);
- }
- if (interval)
- {
- if (f_is_enum(pack_flag))
- return new (mem_root)
- Field_enum(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- pack_length, interval, field_charset);
- else
- return new (mem_root)
- Field_set(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- pack_length, interval, field_charset);
- }
- }
-
- switch (handler->real_field_type()) {
- case MYSQL_TYPE_DECIMAL:
- return new (mem_root)
- Field_decimal(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_decimals(pack_flag),
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- case MYSQL_TYPE_NEWDECIMAL:
- return new (mem_root)
- Field_new_decimal(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_decimals(pack_flag),
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- case MYSQL_TYPE_FLOAT:
- {
- int decimals= f_decimals(pack_flag);
- if (decimals == FLOATING_POINT_DECIMALS)
- decimals= NOT_FIXED_DEC;
- return new (mem_root)
- Field_float(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- decimals,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag)== 0);
- }
- case MYSQL_TYPE_DOUBLE:
- {
- int decimals= f_decimals(pack_flag);
- if (decimals == FLOATING_POINT_DECIMALS)
- decimals= NOT_FIXED_DEC;
- return new (mem_root)
- Field_double(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- decimals,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag)== 0);
- }
- case MYSQL_TYPE_TINY:
- return new (mem_root)
- Field_tiny(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- case MYSQL_TYPE_SHORT:
- return new (mem_root)
- Field_short(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- case MYSQL_TYPE_INT24:
- return new (mem_root)
- Field_medium(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- case MYSQL_TYPE_LONG:
- return new (mem_root)
- Field_long(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- case MYSQL_TYPE_LONGLONG:
- if (flags & (VERS_SYS_START_FLAG|VERS_SYS_END_FLAG))
- {
- return new (mem_root)
- Field_vers_trx_id(ptr, field_length, null_pos, null_bit,
- unireg_check, field_name,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- }
- else
- {
- return new (mem_root)
- Field_longlong(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name,
- f_is_zerofill(pack_flag) != 0,
- f_is_dec(pack_flag) == 0);
- }
- case MYSQL_TYPE_TIMESTAMP:
- {
- uint dec= field_length > MAX_DATETIME_WIDTH ?
- field_length - MAX_DATETIME_WIDTH - 1: 0;
- return new_Field_timestamp(mem_root, ptr, null_pos, null_bit, unireg_check,
- field_name, share, dec);
- }
- case MYSQL_TYPE_TIMESTAMP2:
- {
- uint dec= field_length > MAX_DATETIME_WIDTH ?
- field_length - MAX_DATETIME_WIDTH - 1: 0;
- return new (mem_root)
- Field_timestampf(ptr, null_pos, null_bit, unireg_check,
- field_name, share, dec);
- }
- case MYSQL_TYPE_YEAR:
- return new (mem_root)
- Field_year(ptr,field_length,null_pos,null_bit,
- unireg_check, field_name);
- case MYSQL_TYPE_DATE:
- return new (mem_root)
- Field_date(ptr,null_pos,null_bit,
- unireg_check, field_name);
- case MYSQL_TYPE_NEWDATE:
- return new (mem_root)
- Field_newdate(ptr,null_pos,null_bit,
- unireg_check, field_name);
- case MYSQL_TYPE_TIME:
- {
- uint dec= field_length > MIN_TIME_WIDTH ?
- field_length - MIN_TIME_WIDTH - 1: 0;
- return new_Field_time(mem_root, ptr, null_pos, null_bit, unireg_check,
- field_name, dec);
- }
- case MYSQL_TYPE_TIME2:
- {
- uint dec= field_length > MIN_TIME_WIDTH ?
- field_length - MIN_TIME_WIDTH - 1: 0;
- return new (mem_root)
- Field_timef(ptr, null_pos, null_bit, unireg_check,
- field_name, dec);
- }
- case MYSQL_TYPE_DATETIME:
- {
- uint dec= field_length > MAX_DATETIME_WIDTH ?
- field_length - MAX_DATETIME_WIDTH - 1: 0;
- return new_Field_datetime(mem_root, ptr, null_pos, null_bit, unireg_check,
- field_name, dec);
- }
- case MYSQL_TYPE_DATETIME2:
- {
- uint dec= field_length > MAX_DATETIME_WIDTH ?
- field_length - MAX_DATETIME_WIDTH - 1: 0;
- return new (mem_root)
- Field_datetimef(ptr, null_pos, null_bit, unireg_check,
- field_name, dec);
- }
- case MYSQL_TYPE_NULL:
- return new (mem_root)
- Field_null(ptr, field_length, unireg_check, field_name,
- field_charset);
- case MYSQL_TYPE_BIT:
- return (f_bit_as_char(pack_flag) ?
- new (mem_root)
- Field_bit_as_char(ptr, field_length, null_pos, null_bit,
- unireg_check, field_name) :
- new (mem_root)
- Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr,
- bit_offset, unireg_check, field_name));
-
- default: // Impossible (Wrong version)
- break;
- }
- return 0;
+ Record_addr addr(rec->ptr(), f_maybe_null(pack_flag) ? rec->null() :
+ Bit_addr());
+ /*
+ Special code for the BIT-alike data types
+ who store data bits together with NULL-bits.
+ */
+ Bit_addr bit(rec->null());
+ if (f_maybe_null(pack_flag))
+ bit.inc();
+ return handler->make_table_field_from_def(share, mem_root, field_name,
+ addr, bit, this, flags);
}
+
bool Field_vers_trx_id::test_if_equality_guarantees_uniqueness(const Item* item) const
{
- return item->type() == Item::DATE_ITEM;
+ return item->is_of_type(Item::CONST_ITEM, TIME_RESULT);
}
+Column_definition_attributes::Column_definition_attributes(const Field *field)
+ :length(field->character_octet_length() / field->charset()->mbmaxlen),
+ unireg_check(field->unireg_check),
+ interval(NULL),
+ charset(field->charset()), // May be NULL ptr
+ srid(0),
+ geom_type(Field::GEOM_GEOMETRY),
+ pack_flag(0)
+{}
+
+
/** Create a field suitable for create of table. */
Column_definition::Column_definition(THD *thd, Field *old_field,
Field *orig_field)
+ :Column_definition_attributes(old_field)
{
on_update= NULL;
field_name= old_field->field_name;
- length= old_field->field_length;
flags= old_field->flags;
- unireg_check=old_field->unireg_check;
pack_length=old_field->pack_length();
key_length= old_field->key_length();
set_handler(old_field->type_handler());
- charset= old_field->charset(); // May be NULL ptr
comment= old_field->comment;
decimals= old_field->decimals();
vcol_info= old_field->vcol_info;
option_list= old_field->option_list;
- pack_flag= 0;
compression_method_ptr= 0;
versioning= VERSIONING_NOT_SET;
invisible= old_field->invisible;
+ interval_list.empty(); // prepare_interval_field() needs this
+ char_length= (uint) length;
if (orig_field)
{
@@ -10929,66 +10804,9 @@ Column_definition::Column_definition(THD *thd, Field *old_field,
check_constraint= 0;
}
- switch (real_field_type()) {
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- length/= charset->mbmaxlen;
- key_length/= charset->mbmaxlen;
- break;
- case MYSQL_TYPE_STRING:
- /* Change CHAR -> VARCHAR if dynamic record length */
- if (old_field->type() == MYSQL_TYPE_VAR_STRING)
- set_handler(&type_handler_varchar);
- /* fall through */
-
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- case MYSQL_TYPE_VARCHAR:
- case MYSQL_TYPE_VAR_STRING:
- /* This is corrected in create_length_to_internal_length */
- length= (length+charset->mbmaxlen-1) / charset->mbmaxlen -
- MY_TEST(old_field->compression_method());
- break;
-#ifdef HAVE_SPATIAL
- case MYSQL_TYPE_GEOMETRY:
- geom_type= ((Field_geom*)old_field)->geom_type;
- srid= ((Field_geom*)old_field)->srid;
- break;
-#endif
- case MYSQL_TYPE_YEAR:
- if (length != 4)
- {
- char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1];
- my_snprintf(buff, sizeof(buff), "YEAR(%llu)", length);
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_WARN_DEPRECATED_SYNTAX,
- ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX),
- buff, "YEAR(4)");
- }
- break;
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- /*
- Floating points are stored with FLOATING_POINT_DECIMALS but internally
- in MariaDB used with NOT_FIXED_DEC, which is >= FLOATING_POINT_DECIMALS.
- */
- if (decimals >= FLOATING_POINT_DECIMALS)
- decimals= NOT_FIXED_DEC;
- break;
- default:
- break;
- }
+ type_handler()->Column_definition_reuse_fix_attributes(thd, this, old_field);
- if (flags & (ENUM_FLAG | SET_FLAG))
- interval= ((Field_enum*) old_field)->typelib;
- else
- interval=0;
-
- interval_list.empty(); // prepare_interval_field() needs this
-
- char_length= (uint)length;
+ type_handler()->Column_definition_implicit_upgrade(this);
/*
Copy the default (constant/function) from the column object orig_field, if
@@ -11051,6 +10869,9 @@ Column_definition::redefine_stage1_common(const Column_definition *dup_field,
vcol_info= dup_field->vcol_info;
invisible= dup_field->invisible;
check_constraint= dup_field->check_constraint;
+ comment= dup_field->comment;
+ option_list= dup_field->option_list;
+ versioning= dup_field->versioning;
}
@@ -11071,11 +10892,11 @@ Column_definition::redefine_stage1_common(const Column_definition *dup_field,
uint32 Field_blob::char_length() const
{
- return Field_blob::octet_length();
+ return Field_blob::character_octet_length();
}
-uint32 Field_blob::octet_length() const
+uint32 Field_blob::character_octet_length() const
{
switch (packlength)
{
@@ -11144,6 +10965,13 @@ bool Column_definition::set_compressed(const char *method)
}
+Send_field::Send_field(THD *thd, Item *item)
+{
+ item->make_send_field(thd, this);
+ normalize();
+}
+
+
/**
maximum possible display length for blob.
@@ -11237,13 +11065,23 @@ Field::set_warning(Sql_condition::enum_warning_level level, uint code,
void Field::set_datetime_warning(Sql_condition::enum_warning_level level,
uint code, const ErrConv *str,
- timestamp_type ts_type, int cuted_increment)
+ const char *typestr, int cuted_increment)
const
{
THD *thd= get_thd();
if (thd->really_abort_on_warning() && level >= Sql_condition::WARN_LEVEL_WARN)
- make_truncated_value_warning(thd, level, str, ts_type,
- table->s, field_name.str);
+ {
+ /*
+ field_str.name can be NULL when field is not in the select list:
+ SET SESSION SQL_MODE= 'STRICT_ALL_TABLES,NO_ZERO_DATE';
+ CREATE OR REPLACE TABLE t2 SELECT 1 AS f FROM t1 GROUP BY FROM_DAYS(d);
+ Can't call push_warning_truncated_value_for_field() directly here,
+ as it expect a non-NULL name.
+ */
+ thd->push_warning_wrong_or_truncated_value(level, false, typestr,
+ str->ptr(), table->s,
+ field_name.str);
+ }
else
set_warning(level, code, cuted_increment);
}
diff --git a/sql/field.h b/sql/field.h
index 422a223ed87..676c85abbef 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -48,6 +48,9 @@ class Item_equal;
class Virtual_tmp_table;
class Qualified_column_ident;
class Table_ident;
+class SEL_ARG;
+class RANGE_OPT_PARAM;
+struct KEY_PART;
enum enum_check_fields
{
@@ -461,37 +464,13 @@ inline bool is_temporal_type_with_date(enum_field_types type)
case MYSQL_TYPE_DATETIME2:
case MYSQL_TYPE_TIMESTAMP2:
DBUG_ASSERT(0); // field->real_type() should not get to here.
+ return false;
default:
return false;
}
}
-/**
- Convert temporal real types as retuned by field->real_type()
- to field type as returned by field->type().
-
- @param real_type Real type.
- @retval Field type.
-*/
-inline enum_field_types real_type_to_type(enum_field_types real_type)
-{
- switch (real_type)
- {
- case MYSQL_TYPE_TIME2:
- return MYSQL_TYPE_TIME;
- case MYSQL_TYPE_DATETIME2:
- return MYSQL_TYPE_DATETIME;
- case MYSQL_TYPE_TIMESTAMP2:
- return MYSQL_TYPE_TIMESTAMP;
- case MYSQL_TYPE_NEWDATE:
- return MYSQL_TYPE_DATE;
- /* Note: NEWDECIMAL is a type, not only a real_type */
- default: return real_type;
- }
-}
-
-
enum enum_vcol_info_type
{
VCOL_GENERATED_VIRTUAL, VCOL_GENERATED_STORED,
@@ -545,7 +524,8 @@ static inline const char *vcol_type_name(enum_vcol_info_type type)
- whether the field is used in a partitioning expression
*/
-class Virtual_column_info: public Sql_alloc
+class Virtual_column_info: public Sql_alloc,
+ private Type_handler_hybrid_field_type
{
private:
enum_vcol_info_type vcol_type; /* Virtual column expression type */
@@ -553,7 +533,6 @@ private:
The following data is only updated by the parser and read
when a Create_field object is created/initialized.
*/
- enum_field_types field_type; /* Real field type*/
/* Flag indicating that the field used in a partitioning expression */
bool in_partitioning_expr;
@@ -562,20 +541,20 @@ public:
bool stored_in_db;
bool utf8; /* Already in utf8 */
Item *expr;
- LEX_CSTRING name; /* Name of constraint */
+ Lex_ident name; /* Name of constraint */
/* see VCOL_* (VCOL_FIELD_REF, ...) */
uint flags;
Virtual_column_info()
- : vcol_type((enum_vcol_info_type)VCOL_TYPE_NONE),
- field_type((enum enum_field_types)MYSQL_TYPE_VIRTUAL),
+ :Type_handler_hybrid_field_type(&type_handler_null),
+ vcol_type((enum_vcol_info_type)VCOL_TYPE_NONE),
in_partitioning_expr(FALSE), stored_in_db(FALSE),
utf8(TRUE), expr(NULL), flags(0)
{
name.str= NULL;
name.length= 0;
};
- ~Virtual_column_info() {}
+ ~Virtual_column_info() {};
enum_vcol_info_type get_vcol_type() const
{
return vcol_type;
@@ -589,14 +568,11 @@ public:
DBUG_ASSERT(vcol_type != VCOL_TYPE_NONE);
return vcol_type_name(vcol_type);
}
- enum_field_types get_real_type() const
- {
- return field_type;
- }
- void set_field_type(enum_field_types fld_type)
+ void set_handler(const Type_handler *handler)
{
/* Calling this function can only be done once. */
- field_type= fld_type;
+ DBUG_ASSERT(type_handler() == &type_handler_null);
+ Type_handler_hybrid_field_type::set_handler(handler);
}
bool is_stored() const
{
@@ -632,7 +608,9 @@ protected:
static void do_field_int(Copy_field *copy);
static void do_field_real(Copy_field *copy);
static void do_field_string(Copy_field *copy);
- static void do_field_temporal(Copy_field *copy);
+ static void do_field_date(Copy_field *copy);
+ static void do_field_temporal(Copy_field *copy, date_mode_t fuzzydate);
+ static void do_field_datetime(Copy_field *copy);
static void do_field_timestamp(Copy_field *copy);
static void do_field_decimal(Copy_field *copy);
public:
@@ -647,6 +625,9 @@ public:
static void operator delete(void *ptr, MEM_ROOT *mem_root)
{ DBUG_ASSERT(0); }
+ bool marked_for_read() const;
+ bool marked_for_write_or_computed() const;
+
/**
Used by System Versioning.
*/
@@ -798,13 +779,32 @@ public:
@retval false - conversion is needed
*/
virtual bool memcpy_field_possible(const Field *from) const= 0;
+ virtual bool make_empty_rec_store_default_value(THD *thd, Item *item);
+ virtual void make_empty_rec_reset(THD *thd)
+ {
+ reset();
+ }
virtual int store(const char *to, size_t length,CHARSET_INFO *cs)=0;
virtual int store_hex_hybrid(const char *str, size_t length);
virtual int store(double nr)=0;
virtual int store(longlong nr, bool unsigned_val)=0;
virtual int store_decimal(const my_decimal *d)=0;
virtual int store_time_dec(const MYSQL_TIME *ltime, uint dec);
- virtual int store_timestamp(my_time_t timestamp, ulong sec_part);
+ virtual int store_timestamp_dec(const timeval &ts, uint dec);
+ int store_timestamp(my_time_t timestamp, ulong sec_part)
+ {
+ return store_timestamp_dec(Timeval(timestamp, sec_part),
+ TIME_SECOND_PART_DIGITS);
+ }
+ /**
+ Store a value represented in native format
+ */
+ virtual int store_native(const Native &value)
+ {
+ DBUG_ASSERT(0);
+ reset();
+ return 0;
+ }
int store_time(const MYSQL_TIME *ltime)
{ return store_time_dec(ltime, TIME_SECOND_PART_DIGITS); }
int store(const char *to, size_t length, CHARSET_INFO *cs,
@@ -836,7 +836,7 @@ public:
return nr < 0 ? 0 : (ulonglong) nr;
}
virtual bool val_bool(void)= 0;
- virtual my_decimal *val_decimal(my_decimal *);
+ virtual my_decimal *val_decimal(my_decimal *)=0;
inline String *val_str(String *str) { return val_str(str, str); }
/*
val_str(buf1, buf2) gets two buffers and should use them as follows:
@@ -851,6 +851,11 @@ public:
This trickery is used to decrease a number of malloc calls.
*/
virtual String *val_str(String*,String *)=0;
+ virtual bool val_native(Native *to)
+ {
+ DBUG_ASSERT(!is_null());
+ return to->copy((const char *) ptr, pack_length());
+ }
String *val_int_as_str(String *val_buffer, bool unsigned_flag);
/*
Return the field value as a LEX_CSTRING, without padding to full length
@@ -873,6 +878,10 @@ public:
to be quoted when used in constructing an SQL query.
*/
virtual bool str_needs_quotes() { return FALSE; }
+ const Type_handler *type_handler_for_comparison() const
+ {
+ return type_handler()->type_handler_for_comparison();
+ }
Item_result result_type () const
{
return type_handler()->result_type();
@@ -881,7 +890,6 @@ public:
{
return type_handler()->cmp_type();
}
- static enum_field_types field_type_merge(enum_field_types, enum_field_types);
virtual bool eq(Field *field)
{
return (ptr == field->ptr && null_ptr == field->null_ptr &&
@@ -1081,7 +1089,7 @@ public:
virtual int cmp(const uchar *,const uchar *)=0;
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
{ return memcmp(a,b,pack_length()); }
- virtual int cmp_offset(uint row_offset)
+ virtual int cmp_offset(my_ptrdiff_t row_offset)
{ return cmp(ptr,ptr+row_offset); }
virtual int cmp_binary_offset(uint row_offset)
{ return cmp_binary(ptr, ptr+row_offset); };
@@ -1246,6 +1254,12 @@ public:
virtual Field *new_key_field(MEM_ROOT *root, TABLE *new_table,
uchar *new_ptr, uint32 length,
uchar *new_null_ptr, uint new_null_bit);
+ Field *create_tmp_field(MEM_ROOT *root, TABLE *new_table,
+ bool maybe_null_arg);
+ Field *create_tmp_field(MEM_ROOT *root, TABLE *new_table)
+ {
+ return create_tmp_field(root, new_table, maybe_null());
+ }
Field *clone(MEM_ROOT *mem_root, TABLE *new_table);
Field *clone(MEM_ROOT *mem_root, TABLE *new_table, my_ptrdiff_t diff,
bool stat_flag= FALSE);
@@ -1356,8 +1370,7 @@ public:
}
void copy_from_tmp(int offset);
uint fill_cache_field(struct st_cache_field *copy);
- virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_time(MYSQL_TIME *ltime) { return get_date(ltime, TIME_TIME_ONLY); }
+ virtual bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
virtual TYPELIB *get_typelib() const { return NULL; }
virtual CHARSET_INFO *charset(void) const { return &my_charset_bin; }
virtual CHARSET_INFO *charset_for_protocol(void) const
@@ -1380,13 +1393,13 @@ protected:
return set_warning(Sql_condition::WARN_LEVEL_NOTE, code, cuted_increment);
}
void set_datetime_warning(Sql_condition::enum_warning_level, uint code,
- const ErrConv *str, timestamp_type ts_type,
+ const ErrConv *str, const char *typestr,
int cuted_increment) const;
void set_datetime_warning(uint code,
- const ErrConv *str, timestamp_type ts_type,
+ const ErrConv *str, const char *typestr,
int cuted_increment) const
{
- set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, code, str, ts_type,
+ set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, code, str, typestr,
cuted_increment);
}
void set_warning_truncated_wrong_value(const char *type, const char *value);
@@ -1396,6 +1409,59 @@ protected:
}
int warn_if_overflow(int op_result);
Copy_func *get_identical_copy_func() const;
+ bool can_optimize_scalar_range(const RANGE_OPT_PARAM *param,
+ const KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op,
+ const Item *value) const;
+ uchar *make_key_image(MEM_ROOT *mem_root, const KEY_PART *key_part);
+ SEL_ARG *get_mm_leaf_int(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value,
+ bool unsigned_field);
+ /*
+ Make a leaf tree for the cases when the value was stored
+ to the field exactly, without any truncation, rounding or adjustments.
+ For example, if we stored an INT value into an INT column,
+ and value->save_in_field_no_warnings() returned 0,
+ we know that the value was stored exactly.
+ */
+ SEL_ARG *stored_field_make_mm_leaf_exact(RANGE_OPT_PARAM *param,
+ KEY_PART *key_part,
+ scalar_comparison_op op,
+ Item *value);
+ /*
+ Make a leaf tree for the cases when we don't know if
+ the value was stored to the field without any data loss,
+ or was modified to a smaller or a greater value.
+ Used for the data types whose methods Field::store*()
+ silently adjust the value. This is the most typical case.
+ */
+ SEL_ARG *stored_field_make_mm_leaf(RANGE_OPT_PARAM *param,
+ KEY_PART *key_part,
+ scalar_comparison_op op, Item *value);
+ /*
+ Make a leaf tree when an INT value was stored into a field of INT type,
+ and some truncation happened. Tries to adjust the range search condition
+ when possible, e.g. "tinytint < 300" -> "tinyint <= 127".
+ Can also return SEL_ARG_IMPOSSIBLE(), and NULL (not sargable).
+ */
+ SEL_ARG *stored_field_make_mm_leaf_bounded_int(RANGE_OPT_PARAM *param,
+ KEY_PART *key_part,
+ scalar_comparison_op op,
+ Item *value,
+ bool unsigned_field);
+ /*
+ Make a leaf tree when some truncation happened during
+ value->save_in_field_no_warning(this), and we cannot yet adjust the range
+ search condition for the current combination of the field and the value
+ data types.
+ Returns SEL_ARG_IMPOSSIBLE() for "=" and "<=>".
+ Returns NULL (not sargable) for other comparison operations.
+ */
+ SEL_ARG *stored_field_make_mm_leaf_truncated(RANGE_OPT_PARAM *prm,
+ scalar_comparison_op,
+ Item *value);
public:
void set_table_name(String *alias)
{
@@ -1406,6 +1472,19 @@ public:
orig_table= table= table_arg;
set_table_name(&table_arg->alias);
}
+ virtual void init_for_tmp_table(Field *org_field, TABLE *new_table)
+ {
+ init(new_table);
+ orig_table= org_field->orig_table;
+ vcol_info= 0;
+ cond_selectivity= 1.0;
+ next_equal_field= NULL;
+ option_list= NULL;
+ option_struct= NULL;
+ if (org_field->type() == MYSQL_TYPE_VAR_STRING ||
+ org_field->type() == MYSQL_TYPE_VARCHAR)
+ new_table->s->db_create_options|= HA_OPTION_PACK_RECORD;
+ }
void init_for_make_new_field(TABLE *new_table_arg, TABLE *orig_table_arg)
{
init(new_table_arg);
@@ -1428,16 +1507,25 @@ public:
of a table is compatible with the old definition so that it can
determine if data needs to be copied over (table data change).
*/
- virtual uint is_equal(Create_field *new_field);
+ virtual uint is_equal(Create_field *new_field)= 0;
/* convert decimal to longlong with overflow check */
longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag,
int *err);
+ /*
+ Maximum number of bytes in character representation.
+ - For string types it is equal to the field capacity, in bytes.
+ - For non-string types it represents the longest possible string length
+ after conversion to string.
+ */
+ virtual uint32 character_octet_length() const
+ {
+ return field_length;
+ }
/* The max. number of characters */
virtual uint32 char_length() const
{
return field_length / charset()->mbmaxlen;
}
-
virtual geometry_type get_geometry_type()
{
/* shouldn't get here. */
@@ -1566,6 +1654,10 @@ public:
const Item *item,
bool is_eq_func) const;
+ virtual SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value)= 0;
+
bool can_optimize_outer_join_table_elimination(const Item_bool_func *cond,
const Item *item) const
{
@@ -1727,6 +1819,9 @@ public:
{
return pos_in_interval_val_real(min, max);
}
+ SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value);
};
@@ -1764,6 +1859,7 @@ public:
enum Derivation derivation(void) const { return field_derivation; }
bool binary() const { return field_charset == &my_charset_bin; }
uint32 max_display_length() const { return field_length; }
+ uint32 character_octet_length() const { return field_length; }
uint32 char_length() const { return field_length / field_charset->mbmaxlen; }
Information_schema_character_attributes
information_schema_character_attributes() const
@@ -1775,7 +1871,6 @@ public:
my_decimal *val_decimal(my_decimal *);
bool val_bool() { return val_real() != 0e0; }
virtual bool str_needs_quotes() { return TRUE; }
- uint is_equal(Create_field *new_field);
bool eq_cmp_as_binary() { return MY_TEST(flags & BINARY_FLAG); }
virtual uint length_size() { return 0; }
double pos_in_interval(Field *min, Field *max)
@@ -1783,6 +1878,9 @@ public:
return pos_in_interval_val_str(min, max, length_size());
}
bool test_if_equality_guarantees_uniqueness(const Item *const_item) const;
+ SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value);
};
/* base class for Field_string, Field_varstring and Field_blob */
@@ -1895,9 +1993,9 @@ public:
return Field_num::memcpy_field_possible(from) &&
field_length >= from->field_length;
}
- int store_decimal(const my_decimal *);
+ int store_decimal(const my_decimal *dec) { return store(dec->to_double()); }
int store_time_dec(const MYSQL_TIME *ltime, uint dec);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
my_decimal *val_decimal(my_decimal *);
bool val_bool() { return val_real() != 0e0; }
uint32 max_display_length() const { return field_length; }
@@ -1978,8 +2076,8 @@ public:
}
int save_in_field(Field *to)
{
- my_decimal buff;
- return to->store_decimal(val_decimal(&buff));
+ my_decimal tmp(ptr, precision, dec);
+ return to->store_decimal(&tmp);
}
bool memcpy_field_possible(const Field *from) const
{
@@ -1995,17 +2093,34 @@ public:
int store(longlong nr, bool unsigned_val);
int store_time_dec(const MYSQL_TIME *ltime, uint dec);
int store_decimal(const my_decimal *);
- double val_real(void);
- longlong val_int(void);
- ulonglong val_uint(void);
+ double val_real(void)
+ {
+ return my_decimal(ptr, precision, dec).to_double();
+ }
+ longlong val_int(void)
+ {
+ return my_decimal(ptr, precision, dec).to_longlong(unsigned_flag);
+ }
+ ulonglong val_uint(void)
+ {
+ return (ulonglong) my_decimal(ptr, precision, dec).to_longlong(true);
+ }
my_decimal *val_decimal(my_decimal *);
- String *val_str(String*, String *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ String *val_str(String *val_buffer, String *val_ptr __attribute__((unused)))
+ {
+ uint fixed_precision= zerofill ? precision : 0;
+ return my_decimal(ptr, precision, dec).
+ to_string(val_buffer, fixed_precision, dec, '0');
+ }
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ {
+ my_decimal nr(ptr, precision, dec);
+ return decimal_to_datetime_with_warn(get_thd(), &nr, ltime,
+ fuzzydate, table->s, field_name.str);
+ }
bool val_bool()
{
- my_decimal decimal_value;
- my_decimal *val= val_decimal(&decimal_value);
- return val ? !my_decimal_is_zero(val) : 0;
+ return my_decimal(ptr, precision, dec).to_bool();
}
int cmp(const uchar *, const uchar *);
void sort_string(uchar *buff, uint length);
@@ -2050,7 +2165,7 @@ public:
return nr < 0 && !unsigned_flag ? 0 : (ulonglong) nr;
}
int store_time_dec(const MYSQL_TIME *ltime, uint dec);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
virtual const Type_limits_int *type_limits_int() const= 0;
uint32 max_display_length() const
{
@@ -2080,6 +2195,12 @@ public:
uint32 prec= type_limits_int()->precision();
return Information_schema_numeric_attributes(prec, 0);
}
+ SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value)
+ {
+ return get_mm_leaf_int(param, key_part, cond, op, value, unsigned_flag);
+ }
};
@@ -2347,8 +2468,8 @@ public:
{}
const Type_handler *type_handler() const { return &type_handler_vers_trx_id; }
uint size_of() const { return sizeof(*this); }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate, ulonglong trx_id);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate, ulonglong trx_id);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
return get_date(ltime, fuzzydate, (ulonglong) val_int());
}
@@ -2455,6 +2576,11 @@ public:
if (dec_arg >= FLOATING_POINT_DECIMALS)
dec_arg= NOT_FIXED_DEC;
}
+ void init_for_tmp_table(Field *org_field, TABLE *new_table)
+ {
+ Field::init_for_tmp_table(org_field, new_table);
+ not_fixed= true;
+ }
const Type_handler *type_handler() const { return &type_handler_double; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_DOUBLE; }
int store(const char *to,size_t length,CHARSET_INFO *charset);
@@ -2516,6 +2642,7 @@ public:
my_decimal *val_decimal(my_decimal *) { return 0; }
String *val_str(String *value,String *value2)
{ value2->length(0); return value2;}
+ uint is_equal(Create_field *new_field);
int cmp(const uchar *a, const uchar *b) { return 0;}
void sort_string(uchar *buff, uint length) {}
uint32 pack_length() const { return 0; }
@@ -2540,6 +2667,35 @@ class Field_temporal: public Field {
protected:
Item *get_equal_const_item_datetime(THD *thd, const Context &ctx,
Item *const_item);
+ void set_warnings(Sql_condition::enum_warning_level trunc_level,
+ const ErrConv *str, int was_cut, const char *typestr);
+ int store_TIME_return_code_with_warnings(int warn, const ErrConv *str,
+ const char *typestr)
+ {
+ if (!MYSQL_TIME_WARN_HAVE_WARNINGS(warn) &&
+ MYSQL_TIME_WARN_HAVE_NOTES(warn))
+ {
+ set_warnings(Sql_condition::WARN_LEVEL_NOTE, str,
+ warn | MYSQL_TIME_WARN_TRUNCATED, typestr);
+ return 3;
+ }
+ set_warnings(Sql_condition::WARN_LEVEL_WARN, str, warn, typestr);
+ return warn ? 2 : 0;
+ }
+ int store_invalid_with_warning(const ErrConv *str, int was_cut,
+ const char *typestr)
+ {
+ DBUG_ASSERT(was_cut);
+ reset();
+ Sql_condition::enum_warning_level level= Sql_condition::WARN_LEVEL_WARN;
+ if (was_cut & MYSQL_TIME_WARN_ZERO_DATE)
+ {
+ set_warnings(level, str, MYSQL_TIME_WARN_OUT_OF_RANGE, typestr);
+ return 2;
+ }
+ set_warnings(level, str, MYSQL_TIME_WARN_TRUNCATED, typestr);
+ return 1;
+ }
public:
Field_temporal(uchar *ptr_arg,uint32 len_arg, uchar *null_ptr_arg,
uchar null_bit_arg, utype unireg_check_arg,
@@ -2555,7 +2711,8 @@ public:
int save_in_field(Field *to)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, 0))
+ // For temporal types no truncation needed. Rounding mode is not important.
+ if (get_date(&ltime, TIME_CONV_NONE | TIME_FRAC_NONE))
return to->reset();
return to->store_time_dec(&ltime, decimals());
}
@@ -2574,8 +2731,6 @@ public:
return (Field::eq_def(field) && decimals() == field->decimals());
}
my_decimal *val_decimal(my_decimal*);
- void set_warnings(Sql_condition::enum_warning_level trunc_level,
- const ErrConv *str, int was_cut, timestamp_type ts_type);
double pos_in_interval(Field *min, Field *max)
{
return pos_in_interval_val_real(min, max);
@@ -2590,6 +2745,9 @@ public:
{
return true;
}
+ SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value);
};
@@ -2602,18 +2760,20 @@ public:
*/
class Field_temporal_with_date: public Field_temporal {
protected:
- int store_TIME_with_warning(MYSQL_TIME *ltime, const ErrConv *str,
- int was_cut, int have_smth_to_conv);
- virtual void store_TIME(MYSQL_TIME *ltime) = 0;
+ virtual void store_TIME(const MYSQL_TIME *ltime) = 0;
+ void store_datetime(const Datetime &dt)
+ {
+ return store_TIME(dt.get_mysql_time());
+ }
virtual bool get_TIME(MYSQL_TIME *ltime, const uchar *pos,
- ulonglong fuzzydate) const = 0;
+ date_mode_t fuzzydate) const = 0;
bool validate_MMDD(bool not_zero_date, uint month, uint day,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
if (!not_zero_date)
- return fuzzydate & TIME_NO_ZERO_DATE;
+ return bool(fuzzydate & TIME_NO_ZERO_DATE);
if (!month || !day)
- return fuzzydate & TIME_NO_ZERO_IN_DATE;
+ return bool(fuzzydate & TIME_NO_ZERO_IN_DATE);
return false;
}
public:
@@ -2624,20 +2784,23 @@ public:
:Field_temporal(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg)
{}
- int store(const char *to, size_t length, CHARSET_INFO *charset);
- int store(double nr);
- int store(longlong nr, bool unsigned_val);
- int store_time_dec(const MYSQL_TIME *ltime, uint dec);
- int store_decimal(const my_decimal *);
bool validate_value_in_record(THD *thd, const uchar *record) const;
};
class Field_timestamp :public Field_temporal {
protected:
- sql_mode_t sql_mode_for_timestamp(THD *thd) const;
- int store_TIME_with_warning(THD *, MYSQL_TIME *, const ErrConv *,
- int warnings, bool have_smth_to_conv);
+ int store_TIME_with_warning(THD *, const Datetime *,
+ const ErrConv *, int warn);
+ virtual void store_TIMEVAL(const timeval &tv)
+ {
+ int4store(ptr, tv.tv_sec);
+ }
+ void store_TIMESTAMP(const Timestamp &ts)
+ {
+ store_TIMEVAL(ts.tv());
+ }
+ int zero_time_stored_return_code_with_warning();
public:
Field_timestamp(uchar *ptr_arg, uint32 len_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
@@ -2652,7 +2815,7 @@ public:
int store(longlong nr, bool unsigned_val);
int store_time_dec(const MYSQL_TIME *ltime, uint dec);
int store_decimal(const my_decimal *);
- int store_timestamp(my_time_t timestamp, ulong sec_part);
+ int store_timestamp_dec(const timeval &ts, uint dec);
int save_in_field(Field *to);
double val_real(void);
longlong val_int(void);
@@ -2677,11 +2840,19 @@ public:
{
return get_timestamp(ptr, sec_part);
}
- virtual void store_TIME(my_time_t timestamp, ulong sec_part)
+ /*
+ This method is used by storage/perfschema and
+ Item_func_now_local::save_in_field().
+ */
+ void store_TIME(my_time_t ts, ulong sec_part)
{
- int4store(ptr,timestamp);
+ int warn;
+ time_round_mode_t mode= Datetime::default_round_mode(get_thd());
+ store_TIMESTAMP(Timestamp(ts, sec_part).round(decimals(), mode, &warn));
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ int store_native(const Native &value);
+ bool val_native(Native *to);
uchar *pack(uchar *to, const uchar *from,
uint max_length __attribute__((unused)))
{
@@ -2749,6 +2920,7 @@ class Field_timestamp_hires :public Field_timestamp_with_dec {
{
return Type_handler_timestamp::sec_part_bytes(dec);
}
+ void store_TIMEVAL(const timeval &tv);
public:
Field_timestamp_hires(uchar *ptr_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
@@ -2760,8 +2932,8 @@ public:
{
DBUG_ASSERT(dec);
}
+ bool val_native(Native *to);
my_time_t get_timestamp(const uchar *pos, ulong *sec_part) const;
- void store_TIME(my_time_t timestamp, ulong sec_part);
int cmp(const uchar *,const uchar *);
uint32 pack_length() const { return 4 + sec_part_bytes(dec); }
uint size_of() const { return sizeof(*this); }
@@ -2777,6 +2949,7 @@ class Field_timestampf :public Field_timestamp_with_dec {
*metadata_ptr= (uchar) decimals();
return 1;
}
+ void store_TIMEVAL(const timeval &tv);
public:
Field_timestampf(uchar *ptr_arg,
uchar *null_ptr_arg, uchar null_bit_arg,
@@ -2805,12 +2978,12 @@ public:
}
void set_max();
bool is_max();
- void store_TIME(my_time_t timestamp, ulong sec_part);
my_time_t get_timestamp(const uchar *pos, ulong *sec_part) const;
my_time_t get_timestamp(ulong *sec_part) const
{
return get_timestamp(ptr, sec_part);
}
+ bool val_native(Native *to);
uint size_of() const { return sizeof(*this); }
};
@@ -2823,7 +2996,10 @@ public:
:Field_tiny(ptr_arg, len_arg, null_ptr_arg, null_bit_arg,
unireg_check_arg, field_name_arg, 1, 1)
{}
- const Type_handler *type_handler() const { return &type_handler_year; }
+ const Type_handler *type_handler() const
+ {
+ return field_length == 2 ? &type_handler_year2 : &type_handler_year;
+ }
Copy_func *get_copy_func(const Field *from) const
{
if (eq_def(from))
@@ -2837,7 +3013,7 @@ public:
return do_field_string;
}
case TIME_RESULT:
- return do_field_temporal;
+ return do_field_date;
case DECIMAL_RESULT:
return do_field_decimal;
case REAL_RESULT:
@@ -2858,7 +3034,7 @@ public:
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool send_binary(Protocol *protocol);
Information_schema_numeric_attributes
information_schema_numeric_attributes() const
@@ -2870,18 +3046,44 @@ public:
};
-class Field_date :public Field_temporal_with_date {
- void store_TIME(MYSQL_TIME *ltime);
- bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, ulonglong fuzzydate) const;
+class Field_date_common: public Field_temporal_with_date
+{
+protected:
+ int store_TIME_with_warning(const Datetime *ltime, const ErrConv *str,
+ int was_cut);
+public:
+ Field_date_common(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
+ enum utype unireg_check_arg,
+ const LEX_CSTRING *field_name_arg)
+ :Field_temporal_with_date(ptr_arg, MAX_DATE_WIDTH,
+ null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg)
+ {}
+ Copy_func *get_copy_func(const Field *from) const;
+ SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value);
+ int store(const char *to, size_t length, CHARSET_INFO *charset);
+ int store(double nr);
+ int store(longlong nr, bool unsigned_val);
+ int store_time_dec(const MYSQL_TIME *ltime, uint dec);
+ int store_decimal(const my_decimal *);
+};
+
+
+class Field_date :public Field_date_common
+{
+ void store_TIME(const MYSQL_TIME *ltime);
+ bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, date_mode_t fuzzydate) const;
public:
Field_date(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg)
- :Field_temporal_with_date(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg) {}
+ :Field_date_common(ptr_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg) {}
const Type_handler *type_handler() const { return &type_handler_date; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; }
int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; return 0; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{ return Field_date::get_TIME(ltime, ptr, fuzzydate); }
double val_real(void);
longlong val_int(void);
@@ -2905,14 +3107,15 @@ public:
};
-class Field_newdate :public Field_temporal_with_date {
- void store_TIME(MYSQL_TIME *ltime);
- bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, ulonglong fuzzydate) const;
+class Field_newdate :public Field_date_common
+{
+ void store_TIME(const MYSQL_TIME *ltime);
+ bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, date_mode_t fuzzydate) const;
public:
Field_newdate(uchar *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg,
enum utype unireg_check_arg, const LEX_CSTRING *field_name_arg)
- :Field_temporal_with_date(ptr_arg, MAX_DATE_WIDTH, null_ptr_arg, null_bit_arg,
- unireg_check_arg, field_name_arg)
+ :Field_date_common(ptr_arg, null_ptr_arg, null_bit_arg,
+ unireg_check_arg, field_name_arg)
{}
const Type_handler *type_handler() const { return &type_handler_newdate; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_UINT24; }
@@ -2925,7 +3128,7 @@ public:
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return 3; }
void sql_type(String &str) const;
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{ return Field_newdate::get_TIME(ltime, ptr, fuzzydate); }
uint size_of() const { return sizeof(*this); }
Item *get_equal_const_item(THD *thd, const Context &ctx, Item *const_item);
@@ -2941,14 +3144,12 @@ class Field_time :public Field_temporal {
long curdays;
protected:
virtual void store_TIME(const MYSQL_TIME *ltime);
- int store_TIME_with_warning(MYSQL_TIME *ltime, const ErrConv *str,
- int was_cut, int have_smth_to_conv);
- void set_warnings(Sql_condition::enum_warning_level level,
- const ErrConv *str, int was_cut)
+ void store_TIME(const Time &t)
{
- Field_temporal::set_warnings(level, str, was_cut, MYSQL_TIMESTAMP_TIME);
+ return store_TIME(t.get_mysql_time());
}
- bool check_zero_in_date_with_warn(ulonglong fuzzydate);
+ int store_TIME_with_warning(const Time *ltime, const ErrConv *str, int warn);
+ bool check_zero_in_date_with_warn(date_mode_t fuzzydate);
static void do_field_time(Copy_field *copy);
public:
Field_time(uchar *ptr_arg, uint length_arg, uchar *null_ptr_arg,
@@ -2982,7 +3183,7 @@ public:
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool send_binary(Protocol *protocol);
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
@@ -3043,7 +3244,7 @@ public:
((TIME_MAX_VALUE_SECONDS+1LL)*TIME_SECOND_PART_FACTOR), dec);
}
int reset(void);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return Type_handler_time::hires_bytes(dec); }
@@ -3094,14 +3295,17 @@ public:
return memcmp(a_ptr, b_ptr, pack_length());
}
int reset();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate);
uint size_of() const { return sizeof(*this); }
};
class Field_datetime :public Field_temporal_with_date {
- void store_TIME(MYSQL_TIME *ltime);
- bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, ulonglong fuzzydate) const;
+ void store_TIME(const MYSQL_TIME *ltime);
+ bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, date_mode_t fuzzydate) const;
+protected:
+ int store_TIME_with_warning(const Datetime *ltime, const ErrConv *str,
+ int was_cut);
public:
Field_datetime(uchar *ptr_arg, uint length_arg, uchar *null_ptr_arg,
uchar null_bit_arg, enum utype unireg_check_arg,
@@ -3115,6 +3319,11 @@ public:
}
const Type_handler *type_handler() const { return &type_handler_datetime; }
enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONGLONG; }
+ int store(const char *to, size_t length, CHARSET_INFO *charset);
+ int store(double nr);
+ int store(longlong nr, bool unsigned_val);
+ int store_time_dec(const MYSQL_TIME *ltime, uint dec);
+ int store_decimal(const my_decimal *);
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
@@ -3123,7 +3332,7 @@ public:
void sort_string(uchar *buff,uint length);
uint32 pack_length() const { return 8; }
void sql_type(String &str) const;
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{ return Field_datetime::get_TIME(ltime, ptr, fuzzydate); }
int set_time();
int evaluate_update_default_function()
@@ -3193,8 +3402,8 @@ public:
DATETIME(1..6)
*/
class Field_datetime_hires :public Field_datetime_with_dec {
- void store_TIME(MYSQL_TIME *ltime);
- bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, ulonglong fuzzydate) const;
+ void store_TIME(const MYSQL_TIME *ltime);
+ bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, date_mode_t fuzzydate) const;
public:
Field_datetime_hires(uchar *ptr_arg, uchar *null_ptr_arg,
uchar null_bit_arg, enum utype unireg_check_arg,
@@ -3206,7 +3415,7 @@ public:
}
int cmp(const uchar *,const uchar *);
uint32 pack_length() const { return Type_handler_datetime::hires_bytes(dec); }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{ return Field_datetime_hires::get_TIME(ltime, ptr, fuzzydate); }
uint size_of() const { return sizeof(*this); }
};
@@ -3216,8 +3425,8 @@ public:
DATETIME(0..6) - MySQL56 version
*/
class Field_datetimef :public Field_datetime_with_dec {
- void store_TIME(MYSQL_TIME *ltime);
- bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, ulonglong fuzzydate) const;
+ void store_TIME(const MYSQL_TIME *ltime);
+ bool get_TIME(MYSQL_TIME *ltime, const uchar *pos, date_mode_t fuzzydate) const;
int save_field_metadata(uchar *metadata_ptr)
{
*metadata_ptr= (uchar) decimals();
@@ -3248,7 +3457,7 @@ public:
return memcmp(a_ptr, b_ptr, pack_length());
}
int reset();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(MYSQL_TIME *ltime, date_mode_t fuzzydate)
{ return Field_datetimef::get_TIME(ltime, ptr, fuzzydate); }
uint size_of() const { return sizeof(*this); }
};
@@ -3357,6 +3566,7 @@ public:
int cmp(const uchar *,const uchar *);
void sort_string(uchar *buff,uint length);
void sql_type(String &str) const;
+ uint is_equal(Create_field *new_field);
virtual uchar *pack(uchar *to, const uchar *from,
uint max_length);
virtual const uchar *unpack(uchar* to, const uchar *from,
@@ -3521,6 +3731,7 @@ private:
str.append(STRING_WITH_LEN(" /*!100301 COMPRESSED*/"));
}
uint32 max_display_length() const { return field_length - 1; }
+ uint32 character_octet_length() const { return field_length - 1; }
uint32 char_length() const
{
return (field_length - 1) / field_charset->mbmaxlen;
@@ -3653,10 +3864,11 @@ public:
Information_schema_character_attributes
information_schema_character_attributes() const
{
- uint32 octets= Field_blob::octet_length();
+ uint32 octets= Field_blob::character_octet_length();
uint32 chars= octets / field_charset->mbminlen;
return Information_schema_character_attributes(octets, chars);
}
+ void make_send_field(Send_field *);
Copy_func *get_copy_func(const Field *from) const
{
/*
@@ -3685,6 +3897,7 @@ public:
!compression_method() == !from->compression_method() &&
!table->copy_blobs;
}
+ bool make_empty_rec_store_default_value(THD *thd, Item *item);
int store(const char *to, size_t length, CHARSET_INFO *charset);
using Field_str::store;
double val_real(void);
@@ -3819,7 +4032,7 @@ public:
{ return charset() == &my_charset_bin ? FALSE : TRUE; }
uint32 max_display_length() const;
uint32 char_length() const;
- uint32 octet_length() const;
+ uint32 character_octet_length() const;
uint is_equal(Create_field *new_field);
friend void TABLE::remember_blob_values(String *blob_storage);
@@ -3911,6 +4124,10 @@ public:
{
return Information_schema_character_attributes();
}
+ void make_send_field(Send_field *to)
+ {
+ Field_longstr::make_send_field(to);
+ }
bool can_optimize_range(const Item_bool_func *cond,
const Item *item,
bool is_eq_func) const;
@@ -3998,6 +4215,16 @@ public:
return save_in_field_str(to);
}
bool memcpy_field_possible(const Field *from) const { return false; }
+ void make_empty_rec_reset(THD *thd)
+ {
+ if (flags & NOT_NULL_FLAG)
+ {
+ set_notnull();
+ store((longlong) 1, true);
+ }
+ else
+ reset();
+ }
int store(const char *to,size_t length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
@@ -4064,6 +4291,11 @@ public:
{
flags=(flags & ~ENUM_FLAG) | SET_FLAG;
}
+ void make_empty_rec_reset(THD *thd)
+ {
+ Field::make_empty_rec_reset(thd);
+ }
+
int store_field(Field *from) { return from->save_in_field(this); }
int store(const char *to,size_t length,CHARSET_INFO *charset);
int store(double nr) { return Field_set::store((longlong) nr, FALSE); }
@@ -4152,7 +4384,7 @@ public:
int key_cmp(const uchar *a, const uchar *b)
{ return cmp_binary((uchar *) a, (uchar *) b); }
int key_cmp(const uchar *str, uint length);
- int cmp_offset(uint row_offset);
+ int cmp_offset(my_ptrdiff_t row_offset);
bool update_min(Field *min_val, bool force_update)
{
longlong val= val_int();
@@ -4227,6 +4459,12 @@ public:
}
void hash(ulong *nr, ulong *nr2);
+ SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value)
+ {
+ return get_mm_leaf_int(param, key_part, cond, op, value, true);
+ }
private:
virtual size_t do_last_null_byte() const;
int save_field_metadata(uchar *first_byte);
@@ -4271,21 +4509,53 @@ public:
extern const LEX_CSTRING null_clex_str;
-Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
- uchar *ptr, uint32 field_length,
- uchar *null_pos, uchar null_bit,
- uint pack_flag, const Type_handler *handler,
- CHARSET_INFO *cs,
- Field::geometry_type geom_type, uint srid,
- Field::utype unireg_check,
- TYPELIB *interval, const LEX_CSTRING *field_name,
- uint32 flags);
+class Column_definition_attributes
+{
+public:
+ /*
+ At various stages in execution this can be length of field in bytes or
+ max number of characters.
+ */
+ ulonglong length;
+ Field::utype unireg_check;
+ TYPELIB *interval; // Which interval to use
+ CHARSET_INFO *charset;
+ uint32 srid;
+ Field::geometry_type geom_type;
+ uint pack_flag;
+ Column_definition_attributes()
+ :length(0),
+ unireg_check(Field::NONE),
+ interval(NULL),
+ charset(&my_charset_bin),
+ srid(0),
+ geom_type(Field::GEOM_GEOMETRY),
+ pack_flag(0)
+ { }
+ Column_definition_attributes(const Field *field);
+ Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const Record_addr *rec,
+ const Type_handler *handler,
+ const LEX_CSTRING *field_name,
+ uint32 flags) const;
+ uint temporal_dec(uint intlen) const
+ {
+ return (uint) (length > intlen ? length - intlen - 1 : 0);
+ }
+ uint pack_flag_to_pack_length() const;
+ void frm_pack_basic(uchar *buff) const;
+ void frm_pack_charset(uchar *buff) const;
+ void frm_unpack_basic(const uchar *buff);
+ bool frm_unpack_charset(TABLE_SHARE *share, const uchar *buff);
+};
+
/*
Create field class for CREATE TABLE
*/
class Column_definition: public Sql_alloc,
- public Type_handler_hybrid_field_type
+ public Type_handler_hybrid_field_type,
+ public Column_definition_attributes
{
/**
Create "interval" from "interval_list".
@@ -4340,11 +4610,6 @@ public:
WITHOUT_VERSIONING
};
Item *on_update; // ON UPDATE NOW()
- /*
- At various stages in execution this can be length of field in bytes or
- max number of characters.
- */
- ulonglong length;
field_visibility_t invisible;
/*
The value of `length' as set by parser: is the number of characters
@@ -4352,15 +4617,9 @@ public:
*/
uint32 char_length;
uint decimals, flags, pack_length, key_length;
- Field::utype unireg_check;
- TYPELIB *interval; // Which interval to use
List<String> interval_list;
- CHARSET_INFO *charset;
- uint32 srid;
- Field::geometry_type geom_type;
engine_option_value *option_list;
- uint pack_flag;
/*
This is additinal data provided for any computed(virtual) field.
@@ -4374,17 +4633,17 @@ public:
enum_column_versioning versioning;
+ Table_period_info *period;
+
Column_definition()
:Type_handler_hybrid_field_type(&type_handler_null),
compression_method_ptr(0),
comment(null_clex_str),
- on_update(NULL), length(0), invisible(VISIBLE), decimals(0),
- flags(0), pack_length(0), key_length(0), unireg_check(Field::NONE),
- interval(0), charset(&my_charset_bin),
- srid(0), geom_type(Field::GEOM_GEOMETRY),
- option_list(NULL), pack_flag(0),
+ on_update(NULL), invisible(VISIBLE), char_length(0), decimals(0),
+ flags(0), pack_length(0), key_length(0),
+ option_list(NULL),
vcol_info(0), default_value(0), check_constraint(0),
- versioning(VERSIONING_NOT_SET)
+ versioning(VERSIONING_NOT_SET), period(NULL)
{
interval_list.empty();
}
@@ -4491,6 +4750,7 @@ public:
bool fix_attributes_bit();
bool check(THD *thd);
+ bool validate_check_constraint(THD *thd);
bool stored_in_db() const { return !vcol_info || vcol_info->stored_in_db; }
@@ -4512,20 +4772,18 @@ public:
}
Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
- uchar *ptr, uchar *null_pos, uchar null_bit,
+ const Record_addr *addr,
const LEX_CSTRING *field_name_arg) const
{
- return ::make_field(share, mem_root, ptr,
- (uint32)length, null_pos, null_bit,
- pack_flag, type_handler(), charset,
- geom_type, srid, unireg_check, interval,
- field_name_arg, flags);
+ return Column_definition_attributes::make_field(share, mem_root, addr,
+ type_handler(),
+ field_name_arg, flags);
}
Field *make_field(TABLE_SHARE *share, MEM_ROOT *mem_root,
const LEX_CSTRING *field_name_arg) const
{
- return make_field(share, mem_root, (uchar *) 0, (uchar *) "", 0,
- field_name_arg);
+ Record_addr addr(true);
+ return make_field(share, mem_root, &addr, field_name_arg);
}
/* Return true if default is an expression that must be saved explicitely */
bool has_default_expression();
@@ -4737,7 +4995,7 @@ public:
/** structure with parsed options (for comparing fields in ALTER TABLE) */
ha_field_option_struct *option_struct;
uint offset;
- uint8 interval_id; // For rea_create_table
+ uint8 interval_id;
bool create_if_not_exists; // Used in ALTER TABLE IF NOT EXISTS
Create_field():
@@ -4764,15 +5022,86 @@ public:
A class for sending info to the client
*/
-class Send_field :public Sql_alloc {
- public:
+class Send_field :public Sql_alloc,
+ public Type_handler_hybrid_field_type
+{
+public:
const char *db_name;
const char *table_name,*org_table_name;
LEX_CSTRING col_name, org_col_name;
ulong length;
uint flags, decimals;
- enum_field_types type;
Send_field() {}
+ Send_field(Field *field)
+ {
+ field->make_send_field(this);
+ DBUG_ASSERT(table_name != 0);
+ normalize();
+ }
+ Send_field(THD *thd, Item *item);
+ Send_field(Field *field,
+ const char *db_name_arg,
+ const char *table_name_arg)
+ :Type_handler_hybrid_field_type(field->type_handler()),
+ db_name(db_name_arg),
+ table_name(table_name_arg),
+ org_table_name(table_name_arg),
+ col_name(field->field_name),
+ org_col_name(field->field_name),
+ length(field->field_length),
+ flags(field->table->maybe_null ?
+ (field->flags & ~NOT_NULL_FLAG) : field->flags),
+ decimals(field->decimals())
+ {
+ normalize();
+ }
+
+private:
+ void normalize()
+ {
+ /* limit number of decimals for float and double */
+ if (type_handler()->field_type() == MYSQL_TYPE_FLOAT ||
+ type_handler()->field_type() == MYSQL_TYPE_DOUBLE)
+ set_if_smaller(decimals, FLOATING_POINT_DECIMALS);
+ }
+public:
+ // This should move to Type_handler eventually
+ uint32 max_char_length(CHARSET_INFO *cs) const
+ {
+ return type_handler()->field_type() >= MYSQL_TYPE_TINY_BLOB &&
+ type_handler()->field_type() <= MYSQL_TYPE_BLOB ?
+ length / cs->mbminlen :
+ length / cs->mbmaxlen;
+ }
+ uint32 max_octet_length(CHARSET_INFO *from, CHARSET_INFO *to) const
+ {
+ /*
+ For TEXT/BLOB columns, field_length describes the maximum data
+ length in bytes. There is no limit to the number of characters
+ that a TEXT column can store, as long as the data fits into
+ the designated space.
+ For the rest of textual columns, field_length is evaluated as
+ char_count * mbmaxlen, where character count is taken from the
+ definition of the column. In other words, the maximum number
+ of characters here is limited by the column definition.
+
+ When one has a LONG TEXT column with a single-byte
+ character set, and the connection character set is multi-byte, the
+ client may get fields longer than UINT_MAX32, due to
+ <character set column> -> <character set connection> conversion.
+ In that case column max length would not fit into the 4 bytes
+ reserved for it in the protocol. So we cut it here to UINT_MAX32.
+ */
+ return char_to_byte_length_safe(max_char_length(from), to->mbmaxlen);
+ }
+
+ // This should move to Type_handler eventually
+ bool is_sane() const
+ {
+ return (decimals <= FLOATING_POINT_DECIMALS ||
+ (type_handler()->field_type() != MYSQL_TYPE_FLOAT &&
+ type_handler()->field_type() != MYSQL_TYPE_DOUBLE));
+ }
};
@@ -4847,7 +5176,7 @@ bool check_expression(Virtual_column_info *vcol, LEX_CSTRING *name,
#define FIELDFLAG_DEC_SHIFT 8
#define FIELDFLAG_MAX_DEC 63U
-#define MTYP_TYPENR(type) (type & 127U) /* Remove bits from type */
+#define MTYP_TYPENR(type) ((type) & 127U) // Remove bits from type
#define f_is_dec(x) ((x) & FIELDFLAG_DECIMAL)
#define f_is_num(x) ((x) & FIELDFLAG_NUMBER)
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index dddb2182051..2f56be60dd6 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -413,8 +413,8 @@ void Field::do_field_real(Copy_field *copy)
void Field::do_field_decimal(Copy_field *copy)
{
- my_decimal value;
- copy->to_field->store_decimal(copy->from_field->val_decimal(&value));
+ my_decimal value(copy->from_field);
+ copy->to_field->store_decimal(&value);
}
@@ -425,24 +425,32 @@ void Field::do_field_timestamp(Copy_field *copy)
}
-void Field::do_field_temporal(Copy_field *copy)
+void Field::do_field_temporal(Copy_field *copy, date_mode_t fuzzydate)
{
MYSQL_TIME ltime;
// TODO: we now need to check result
- if (copy->from_field->get_date(&ltime, 0))
+ if (copy->from_field->get_date(&ltime, fuzzydate))
copy->to_field->reset();
else
copy->to_field->store_time_dec(&ltime, copy->from_field->decimals());
}
+void Field::do_field_datetime(Copy_field *copy)
+{
+ return do_field_temporal(copy, Datetime::Options(TIME_CONV_NONE, current_thd));
+}
+
+
+void Field::do_field_date(Copy_field *copy)
+{
+ return do_field_temporal(copy, Date::Options(TIME_CONV_NONE));
+}
+
+
void Field_time::do_field_time(Copy_field *copy)
{
- MYSQL_TIME ltime;
- if (copy->from_field->get_date(&ltime, TIME_TIME_ONLY))
- copy->to_field->reset();
- else
- copy->to_field->store_time_dec(&ltime, copy->from_field->decimals());
+ return do_field_temporal(copy, Time::Options(current_thd));
}
@@ -720,13 +728,20 @@ void Copy_field::set(Field *to,Field *from,bool save)
Field::Copy_func *Field_timestamp::get_copy_func(const Field *from) const
{
Field::Copy_func *copy= Field_temporal::get_copy_func(from);
- if (copy == do_field_temporal && from->type() == MYSQL_TYPE_TIMESTAMP)
+ if (copy == do_field_datetime && from->type() == MYSQL_TYPE_TIMESTAMP)
return do_field_timestamp;
else
return copy;
}
+Field::Copy_func *Field_date_common::get_copy_func(const Field *from) const
+{
+ Field::Copy_func *copy= Field_temporal::get_copy_func(from);
+ return copy == do_field_datetime ? do_field_date : copy;
+}
+
+
Field::Copy_func *Field_temporal::get_copy_func(const Field *from) const
{
/* If types are not 100 % identical then convert trough get_date() */
@@ -739,7 +754,7 @@ Field::Copy_func *Field_temporal::get_copy_func(const Field *from) const
if (!eq_def(from) ||
(table->in_use->variables.sql_mode &
(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE)))
- return do_field_temporal;
+ return do_field_datetime;
return get_identical_copy_func();
}
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 98c4e3b860b..924e51b58cc 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -55,9 +55,7 @@ static bool save_index(Sort_param *param, uint count,
static uint suffix_length(ulong string_length);
static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
bool *multi_byte_charset);
-static SORT_ADDON_FIELD *get_addon_fields(ulong max_length_for_sort_data,
- Field **ptabfield,
- uint sortlength,
+static SORT_ADDON_FIELD *get_addon_fields(TABLE *table, uint sortlength,
LEX_STRING *addon_buf);
static void unpack_addon_fields(struct st_sort_addon_field *addon_field,
uchar *buff, uchar *buff_end);
@@ -66,7 +64,6 @@ static bool check_if_pq_applicable(Sort_param *param, SORT_INFO *info,
ha_rows records, size_t memory_available);
void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
- ulong max_length_for_sort_data,
ha_rows maxrows, bool sort_positions)
{
DBUG_ASSERT(addon_field == 0 && addon_buf.length == 0);
@@ -80,8 +77,7 @@ void Sort_param::init_for_filesort(uint sortlen, TABLE *table,
Get the descriptors of all fields whose values are appended
to sorted fields and get its total length in addon_buf.length
*/
- addon_field= get_addon_fields(max_length_for_sort_data,
- table->field, sort_length, &addon_buf);
+ addon_field= get_addon_fields(table, sort_length, &addon_buf);
}
if (addon_field)
{
@@ -185,9 +181,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
param.init_for_filesort(sortlength(thd, filesort->sortorder, s_length,
&multi_byte_charset),
- table,
- thd->variables.max_length_for_sort_data,
- max_rows, filesort->sort_positions);
+ table, max_rows, filesort->sort_positions);
sort->addon_buf= param.addon_buf;
sort->addon_field= param.addon_field;
@@ -253,7 +247,7 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
}
if (memory_available < min_sort_memory)
{
- my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR + ME_FATALERROR));
+ my_error(ER_OUT_OF_SORTMEMORY,MYF(ME_ERROR_LOG + ME_FATAL));
goto err;
}
tracker->report_sort_buffer_size(sort->sort_buffer_size());
@@ -705,7 +699,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
uchar *ref_pos, *next_pos, ref_buff[MAX_REFLENGTH];
TABLE *sort_form;
handler *file;
- MY_BITMAP *save_read_set, *save_write_set, *save_vcol_set;
+ MY_BITMAP *save_read_set, *save_write_set;
Item *sort_cond;
ha_rows retval;
DBUG_ENTER("find_all_keys");
@@ -740,13 +734,11 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
/* Remember original bitmaps */
save_read_set= sort_form->read_set;
save_write_set= sort_form->write_set;
- save_vcol_set= sort_form->vcol_set;
/* Set up temporary column read map for columns used by sort */
DBUG_ASSERT(save_read_set != &sort_form->tmp_set);
bitmap_clear_all(&sort_form->tmp_set);
- sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set,
- &sort_form->tmp_set);
+ sort_form->column_bitmaps_set(&sort_form->tmp_set, &sort_form->tmp_set);
register_used_fields(param);
if (quick_select)
select->quick->add_used_key_part_to_set();
@@ -804,16 +796,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
*/
MY_BITMAP *tmp_read_set= sort_form->read_set;
MY_BITMAP *tmp_write_set= sort_form->write_set;
- MY_BITMAP *tmp_vcol_set= sort_form->vcol_set;
if (select->cond->with_subquery())
- sort_form->column_bitmaps_set(save_read_set, save_write_set,
- save_vcol_set);
+ sort_form->column_bitmaps_set(save_read_set, save_write_set);
write_record= (select->skip_record(thd) > 0);
if (select->cond->with_subquery())
- sort_form->column_bitmaps_set(tmp_read_set,
- tmp_write_set,
- tmp_vcol_set);
+ sort_form->column_bitmaps_set(tmp_read_set, tmp_write_set);
}
else
write_record= true;
@@ -859,7 +847,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
}
/* Signal we should use orignal column read and write maps */
- sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ sort_form->column_bitmaps_set(save_read_set, save_write_set);
if (unlikely(thd->is_error()))
DBUG_RETURN(HA_POS_ERROR);
@@ -867,8 +855,8 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos));
if (unlikely(error != HA_ERR_END_OF_FILE))
{
- file->print_error(error,MYF(ME_ERROR | ME_WAITTANG)); // purecov: inspected
- DBUG_RETURN(HA_POS_ERROR); /* purecov: inspected */
+ file->print_error(error,MYF(ME_ERROR_LOG));
+ DBUG_RETURN(HA_POS_ERROR);
}
if (indexpos && idx &&
write_keys(param, fs_info, idx, buffpek_pointers, tempfile))
@@ -880,7 +868,7 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
DBUG_RETURN(retval);
err:
- sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ sort_form->column_bitmaps_set(save_read_set, save_write_set);
DBUG_RETURN(HA_POS_ERROR);
} /* find_all_keys */
@@ -1058,7 +1046,10 @@ Type_handler_temporal_result::make_sort_key(uchar *to, Item *item,
Sort_param *param) const
{
MYSQL_TIME buf;
- if (item->get_date_result(&buf, TIME_INVALID_DATES))
+ // This is a temporal type. No nanoseconds. Rounding mode is not important.
+ DBUG_ASSERT(item->cmp_type() == TIME_RESULT);
+ static const Temporal::Options opt(TIME_INVALID_DATES, TIME_FRAC_NONE);
+ if (item->get_date_result(current_thd, &buf, opt))
{
DBUG_ASSERT(item->maybe_null);
DBUG_ASSERT(item->null_value);
@@ -1072,6 +1063,39 @@ Type_handler_temporal_result::make_sort_key(uchar *to, Item *item,
void
+Type_handler_timestamp_common::make_sort_key(uchar *to, Item *item,
+ const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const
+{
+ THD *thd= current_thd;
+ uint binlen= my_timestamp_binary_length(item->decimals);
+ Timestamp_or_zero_datetime_native_null native(thd, item);
+ if (native.is_null() || native.is_zero_datetime())
+ {
+ // NULL or '0000-00-00 00:00:00'
+ bzero(to, item->maybe_null ? binlen + 1 : binlen);
+ }
+ else
+ {
+ if (item->maybe_null)
+ *to++= 1;
+ if (native.length() != binlen)
+ {
+ /*
+ Some items can return native representation with a different
+ number of fractional digits, e.g.: GREATEST(ts_3, ts_4) can
+ return a value with 3 fractional digits, although its fractional
+ precision is 4. Re-pack with a proper precision now.
+ */
+ Timestamp(native).to_native(&native, item->datetime_precision(thd));
+ }
+ DBUG_ASSERT(native.length() == binlen);
+ memcpy((char *) to, native.ptr(), binlen);
+ }
+}
+
+
+void
Type_handler::make_sort_key_longlong(uchar *to,
bool maybe_null,
bool null_value,
@@ -1117,9 +1141,8 @@ Type_handler_decimal_result::make_sort_key(uchar *to, Item *item,
}
*to++= 1;
}
- my_decimal2binary(E_DEC_FATAL_ERROR, dec_val, to,
- item->max_length - (item->decimals ? 1 : 0),
- item->decimals);
+ dec_val->to_binary(to, item->max_length - (item->decimals ? 1 : 0),
+ item->decimals);
}
@@ -1879,6 +1902,15 @@ Type_handler_temporal_result::sortlength(THD *thd,
void
+Type_handler_timestamp_common::sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *sortorder) const
+{
+ sortorder->length= my_timestamp_binary_length(item->decimals);
+}
+
+
+void
Type_handler_int_result::sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *sortorder) const
@@ -1965,6 +1997,30 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
return length;
}
+bool filesort_use_addons(TABLE *table, uint sortlength,
+ uint *length, uint *fields, uint *null_fields)
+{
+ Field **pfield, *field;
+ *length= *fields= *null_fields= 0;
+
+ for (pfield= table->field; (field= *pfield) ; pfield++)
+ {
+ if (!bitmap_is_set(table->read_set, field->field_index))
+ continue;
+ if (field->flags & BLOB_FLAG)
+ return false;
+ (*length)+= field->max_packed_col_length(field->pack_length());
+ if (field->maybe_null())
+ (*null_fields)++;
+ (*fields)++;
+ }
+ if (!*fields)
+ return false;
+ (*length)+= (*null_fields+7)/8;
+
+ return *length + sortlength <
+ table->in_use->variables.max_length_for_sort_data;
+}
/**
Get descriptors of fields appended to sorted fields and
@@ -1972,7 +2028,7 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
The function first finds out what fields are used in the result set.
Then it calculates the length of the buffer to store the values of
- these fields together with the value of sort values.
+ these fields together with the value of sort values.
If the calculated length is not greater than max_length_for_sort_data
the function allocates memory for an array of descriptors containing
layouts for the values of the non-sorted fields in the buffer and
@@ -1994,16 +2050,13 @@ sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length,
*/
static SORT_ADDON_FIELD *
-get_addon_fields(ulong max_length_for_sort_data,
- Field **ptabfield, uint sortlength, LEX_STRING *addon_buf)
+get_addon_fields(TABLE *table, uint sortlength, LEX_STRING *addon_buf)
{
Field **pfield;
Field *field;
SORT_ADDON_FIELD *addonf;
- uint length= 0;
- uint fields= 0;
- uint null_fields= 0;
- MY_BITMAP *read_set= (*ptabfield)->table->read_set;
+ uint length, fields, null_fields;
+ MY_BITMAP *read_set= table->read_set;
DBUG_ENTER("get_addon_fields");
/*
@@ -2012,40 +2065,28 @@ get_addon_fields(ulong max_length_for_sort_data,
Note for future refinement:
This this a too strong condition.
Actually we need only the fields referred in the
- result set. And for some of them it makes sense to use
+ result set. And for some of them it makes sense to use
the values directly from sorted fields.
But beware the case when item->cmp_type() != item->result_type()
*/
addon_buf->str= 0;
addon_buf->length= 0;
- for (pfield= ptabfield; (field= *pfield) ; pfield++)
- {
- if (!bitmap_is_set(read_set, field->field_index))
- continue;
- if (field->flags & BLOB_FLAG)
- DBUG_RETURN(0);
- length+= field->max_packed_col_length(field->pack_length());
- if (field->maybe_null())
- null_fields++;
- fields++;
- }
- if (!fields)
- DBUG_RETURN(0);
- length+= (null_fields+7)/8;
+ // see remove_const() for HA_SLOW_RND_POS explanation
+ if (table->file->ha_table_flags() & HA_SLOW_RND_POS)
+ sortlength= 0;
- if (length+sortlength > max_length_for_sort_data ||
- !my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC),
- &addonf, sizeof(SORT_ADDON_FIELD) * (fields+1),
- &addon_buf->str, length,
- NullS))
+ if (!filesort_use_addons(table, sortlength, &length, &fields, &null_fields) ||
+ !my_multi_malloc(MYF(MY_WME | MY_THREAD_SPECIFIC), &addonf,
+ sizeof(SORT_ADDON_FIELD) * (fields+1),
+ &addon_buf->str, length, NullS))
DBUG_RETURN(0);
addon_buf->length= length;
length= (null_fields+7)/8;
null_fields= 0;
- for (pfield= ptabfield; (field= *pfield) ; pfield++)
+ for (pfield= table->field; (field= *pfield) ; pfield++)
{
if (!bitmap_is_set(read_set, field->field_index))
continue;
@@ -2067,7 +2108,7 @@ get_addon_fields(ulong max_length_for_sort_data,
addonf++;
}
addonf->field= 0; // Put end marker
-
+
DBUG_PRINT("info",("addon_length: %d",length));
DBUG_RETURN(addonf-fields);
}
diff --git a/sql/filesort.h b/sql/filesort.h
index bd1d81f91ef..359f44a3907 100644
--- a/sql/filesort.h
+++ b/sql/filesort.h
@@ -161,6 +161,9 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort,
Filesort_tracker* tracker, JOIN *join=NULL,
table_map first_table_bit=0);
+bool filesort_use_addons(TABLE *table, uint sortlength,
+ uint *length, uint *fields, uint *null_fields);
+
void change_double_for_sort(double nr,uchar *to);
#endif /* FILESORT_INCLUDED */
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 98a3259a39b..9dcd57a5f82 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1367,7 +1367,7 @@ bool print_admin_msg(THD* thd, uint len,
protocol->store(msgbuf, msg_length, system_charset_info);
if (protocol->write())
{
- sql_print_error("Failed on my_net_write, writing to stderr instead: %s\n",
+ sql_print_error("Failed on my_net_write, writing to stderr instead: %s",
msgbuf);
goto err;
}
@@ -2828,6 +2828,8 @@ bool ha_partition::create_handler_file(const char *name)
}
}
(void) mysql_file_close(file, MYF(0));
+ if (result)
+ mysql_file_delete(key_file_partition, file_name, MYF(MY_WME));
}
else
result= TRUE;
@@ -5281,7 +5283,7 @@ bool ha_partition::init_record_priority_queue()
/* Initialize priority queue, initialized to reading forward. */
int (*cmp_func)(void *, uchar *, uchar *);
void *cmp_arg= (void*) this;
- if (!m_using_extended_keys && !(table_flags() & HA_CMP_REF_IS_EXPENSIVE))
+ if (!m_using_extended_keys && !(table_flags() & HA_SLOW_CMP_REF))
cmp_func= cmp_key_rowid_part_id;
else
cmp_func= cmp_key_part_id;
@@ -5759,12 +5761,6 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index,
get_partition_set(table, buf, index, &m_start_key, &m_part_spec);
- /*
- We have either found exactly 1 partition
- (in which case start_part == end_part)
- or no matching partitions (start_part > end_part)
- */
- DBUG_ASSERT(m_part_spec.start_part >= m_part_spec.end_part);
/* The start part is must be marked as used. */
DBUG_ASSERT(m_part_spec.start_part > m_part_spec.end_part ||
bitmap_is_set(&(m_part_info->read_partitions),
@@ -6266,19 +6262,22 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno,
uint ret_mrr_mode= 0;
range_seq_t seq_it;
part_id_range save_part_spec;
+ Cost_estimate part_cost;
DBUG_ENTER("ha_partition::multi_range_read_info_const");
DBUG_PRINT("enter", ("partition this: %p", this));
m_mrr_new_full_buffer_size= 0;
save_part_spec= m_part_spec;
+ cost->reset();
+
seq_it= seq->init(seq_init_param, n_ranges, *mrr_mode);
if (unlikely((error= multi_range_key_create_key(seq, seq_it))))
{
if (likely(error == HA_ERR_END_OF_FILE)) // No keys in range
{
rows= 0;
- goto calc_cost;
+ goto end;
}
/*
This error means that we can't do multi_range_read for the moment
@@ -6307,18 +6306,20 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno,
ha_rows tmp_rows;
uint tmp_mrr_mode;
m_mrr_buffer_size[i]= 0;
+ part_cost.reset();
tmp_mrr_mode= *mrr_mode;
tmp_rows= (*file)->
multi_range_read_info_const(keyno, &m_part_seq_if,
&m_partition_part_key_multi_range_hld[i],
m_part_mrr_range_length[i],
&m_mrr_buffer_size[i],
- &tmp_mrr_mode, cost);
+ &tmp_mrr_mode, &part_cost);
if (tmp_rows == HA_POS_ERROR)
{
m_part_spec= save_part_spec;
DBUG_RETURN(HA_POS_ERROR);
}
+ cost->add(&part_cost);
rows+= tmp_rows;
ret_mrr_mode|= tmp_mrr_mode;
m_mrr_new_full_buffer_size+= m_mrr_buffer_size[i];
@@ -6326,15 +6327,8 @@ ha_rows ha_partition::multi_range_read_info_const(uint keyno,
} while (*(++file));
*mrr_mode= ret_mrr_mode;
-calc_cost:
+end:
m_part_spec= save_part_spec;
- cost->reset();
- cost->avg_io_cost= 1;
- if ((*mrr_mode & HA_MRR_INDEX_ONLY) && rows > 2)
- cost->io_count= keyread_time(keyno, n_ranges, (uint) rows);
- else
- cost->io_count= read_time(keyno, n_ranges, rows);
- cost->cpu_cost= (double) rows / TIME_FOR_COMPARE + 0.01;
DBUG_RETURN(rows);
}
@@ -6347,10 +6341,13 @@ ha_rows ha_partition::multi_range_read_info(uint keyno, uint n_ranges,
{
uint i;
handler **file;
- ha_rows rows;
+ ha_rows rows= 0;
+ Cost_estimate part_cost;
DBUG_ENTER("ha_partition::multi_range_read_info");
DBUG_PRINT("enter", ("partition this: %p", this));
+ cost->reset();
+
m_mrr_new_full_buffer_size= 0;
file= m_file;
do
@@ -6358,22 +6355,20 @@ ha_rows ha_partition::multi_range_read_info(uint keyno, uint n_ranges,
i= (uint)(file - m_file);
if (bitmap_is_set(&(m_part_info->read_partitions), (i)))
{
+ ha_rows tmp_rows;
m_mrr_buffer_size[i]= 0;
- if ((rows= (*file)->multi_range_read_info(keyno, n_ranges, keys,
- key_parts,
- &m_mrr_buffer_size[i],
- mrr_mode, cost)))
+ part_cost.reset();
+ if ((tmp_rows= (*file)->multi_range_read_info(keyno, n_ranges, keys,
+ key_parts,
+ &m_mrr_buffer_size[i],
+ mrr_mode, &part_cost)))
DBUG_RETURN(rows);
+ cost->add(&part_cost);
+ rows+= tmp_rows;
m_mrr_new_full_buffer_size+= m_mrr_buffer_size[i];
}
} while (*(++file));
- cost->reset();
- cost->avg_io_cost= 1;
- if (*mrr_mode & HA_MRR_INDEX_ONLY)
- cost->io_count= keyread_time(keyno, n_ranges, (uint) rows);
- else
- cost->io_count= read_time(keyno, n_ranges, rows);
DBUG_RETURN(0);
}
@@ -6815,7 +6810,7 @@ FT_INFO *ha_partition::ft_init_ext(uint flags, uint inx, String *key)
sizeof(FT_INFO *) * m_tot_parts,
NullS)))
{
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
DBUG_RETURN(NULL);
}
ft_target->part_ft_info= tmp_ft_info;
@@ -9342,6 +9337,43 @@ double ha_partition::scan_time()
/**
+ @brief
+ Caculate time to scan the given index (index only scan)
+
+ @param inx Index number to scan
+
+ @return time for scanning index inx
+*/
+
+double ha_partition::key_scan_time(uint inx)
+{
+ double scan_time= 0;
+ uint i;
+ DBUG_ENTER("ha_partition::key_scan_time");
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+ scan_time+= m_file[i]->key_scan_time(inx);
+ DBUG_RETURN(scan_time);
+}
+
+
+double ha_partition::keyread_time(uint inx, uint ranges, ha_rows rows)
+{
+ double read_time= 0;
+ uint i;
+ DBUG_ENTER("ha_partition::keyread_time");
+ if (!ranges)
+ DBUG_RETURN(handler::keyread_time(inx, ranges, rows));
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+ read_time+= m_file[i]->keyread_time(inx, ranges, rows);
+ DBUG_RETURN(read_time);
+}
+
+
+/**
Find number of records in a range.
@param inx Index number
@param min_key Start of range
@@ -9778,7 +9810,7 @@ void ha_partition::print_error(int error, myf errflag)
append_row_to_str(str);
/* Log this error, so the DBA can notice it and fix it! */
- sql_print_error("Table '%-192s' corrupted: row in wrong partition: %s\n"
+ sql_print_error("Table '%-192s' corrupted: row in wrong partition: %s"
"Please REPAIR the table!",
table->s->table_name.str,
str.c_ptr_safe());
@@ -10695,8 +10727,6 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair)
{
/* Only need to read the partitioning fields. */
bitmap_union(table->read_set, &m_part_info->full_part_field_set);
- if (table->vcol_set)
- bitmap_union(table->vcol_set, &m_part_info->full_part_field_set);
}
if ((result= m_file[read_part_id]->ha_rnd_init(1)))
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index e1f1503f8ec..5913b3d2aa8 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -928,6 +928,10 @@ public:
*/
virtual double scan_time();
+ virtual double key_scan_time(uint inx);
+
+ virtual double keyread_time(uint inx, uint ranges, ha_rows rows);
+
/*
The next method will never be called if you do not implement indexes.
*/
@@ -1056,10 +1060,6 @@ public:
with hidden primary key)
(No handler has this limitation currently)
- HA_WANTS_PRIMARY_KEY:
- Can't define a table without primary key except sequences
- (Only InnoDB has this when using innodb_force_primary_key == ON)
-
HA_STATS_RECORDS_IS_EXACT:
Does the counter of records after the info call specify an exact
value or not. If it does this flag is set.
diff --git a/sql/handle_connections_win.cc b/sql/handle_connections_win.cc
new file mode 100644
index 00000000000..e5b601d7fe0
--- /dev/null
+++ b/sql/handle_connections_win.cc
@@ -0,0 +1,560 @@
+/* Copyright (c) 2018 MariaDB Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
+
+/* Accepting connections on Windows */
+
+#include <my_global.h>
+#include <sql_class.h>
+#include <sql_connect.h>
+#include <mysqld.h>
+#include <mswsock.h>
+#include <mysql/psi/mysql_socket.h>
+#include <sddl.h>
+
+#include <handle_connections_win.h>
+
+/* From mysqld.cc */
+extern HANDLE hEventShutdown;
+extern MYSQL_SOCKET base_ip_sock, extra_ip_sock;
+#ifdef HAVE_POOL_OF_THREADS
+extern PTP_CALLBACK_ENVIRON get_threadpool_win_callback_environ();
+extern void tp_win_callback_prolog();
+#else
+#define get_threadpool_win_callback_environ() 0
+#define tp_win_callback_prolog() do{}while(0)
+#endif
+static SECURITY_ATTRIBUTES pipe_security;
+
+/**
+ Abstract base class for accepting new connection,
+ asynchronously (i.e the accept() operation can be posted,
+ and result is retrieved later) , and creating a new connection.
+*/
+
+struct Listener
+{
+ /** Windows handle of the Listener.
+ Subclasses would use SOCKET or named pipe handle
+ */
+ HANDLE m_handle;
+ /** Required for all async IO*/
+ OVERLAPPED m_overlapped;
+
+ /** Create new listener
+ @param handle - @see m_handle
+ @param wait_handle - usually, event handle or INVALID_HANDLE_VALUE
+ @see wait_handle
+ */
+ Listener(HANDLE handle, HANDLE wait_handle):
+ m_handle(handle), m_overlapped()
+ {
+ m_overlapped.hEvent= wait_handle;
+ }
+
+ /**
+ if not NULL, this handle can be be used in WaitForSingle/MultipleObject(s).
+ This handle will be closed when object is destroyed.
+
+ If NULL, the completion notification happens in threadpool.
+ */
+ HANDLE wait_handle()
+ {
+ return m_overlapped.hEvent;
+ }
+
+ /* Start waiting for new client connection. */
+ virtual void begin_accept()= 0;
+
+ /**
+ Completion callback,called whenever IO posted by begin_accept is finisjed
+ Listener needs to create a new THD then (or, call scheduler so it creates one)
+
+ @param success - whether IO completed successfull
+ */
+ virtual void completion_callback(bool success)= 0;
+
+ /**
+ Completion callback for Listener, that uses events for waiting
+ to IO. Not suitable for threadpool etc. Retrieves the status of
+ completed IO from the OVERLAPPED structure
+ */
+ void completion_callback()
+ {
+ DBUG_ASSERT(wait_handle() && (wait_handle() != INVALID_HANDLE_VALUE));
+ DWORD bytes;
+ return completion_callback(
+ GetOverlappedResult(wait_handle(), &m_overlapped, &bytes, FALSE));
+ }
+
+ /** Cancel an in-progress IO. Useful for threadpool-bound IO */
+ void cancel()
+ {
+ CancelIoEx(m_handle, &m_overlapped);
+ }
+
+ /* Destructor. Closes wait handle, if it was passed in constructor */
+ virtual ~Listener()
+ {
+ if (m_overlapped.hEvent)
+ CloseHandle(m_overlapped.hEvent);
+ };
+};
+
+/* Winsock extension finctions. */
+static LPFN_ACCEPTEX my_AcceptEx;
+static LPFN_GETACCEPTEXSOCKADDRS my_GetAcceptExSockaddrs;
+
+/**
+ Listener that handles socket connections.
+ Can be threadpool-bound (i.e the completion is executed in threadpool thread),
+ or use events for waits.
+
+ Threadpool-bound listener should be used with theradpool scheduler, for better
+ performance.
+*/
+struct Socket_Listener: public Listener
+{
+ /** Client socket passed to AcceptEx() call.*/
+ SOCKET m_client_socket;
+
+ /** Buffer for sockaddrs passed to AcceptEx()/GetAcceptExSockaddrs() */
+ char m_buffer[2 * sizeof(sockaddr_storage) + 32];
+
+ /* Threadpool IO struct.*/
+ PTP_IO m_tp_io;
+
+ /**
+ Callback for Windows threadpool's StartThreadpoolIo() function.
+ */
+ static void CALLBACK tp_accept_completion_callback(
+ PTP_CALLBACK_INSTANCE, PVOID context, PVOID , ULONG io_result,
+ ULONG_PTR, PTP_IO io)
+ {
+ tp_win_callback_prolog();
+ Listener *listener= (Listener *)context;
+
+ if (io_result == ERROR_OPERATION_ABORTED)
+ {
+ /* ERROR_OPERATION_ABORTED caused by CancelIoEx()*/
+ CloseThreadpoolIo(io);
+ delete listener;
+ return;
+ }
+ listener->completion_callback(io_result == 0);
+ }
+
+ /**
+ Constructor
+ @param listen_socket - listening socket
+ @PTP_CALLBACK_ENVIRON callback_environ - threadpool environment, or NULL
+ if threadpool is not used for completion callbacks.
+ */
+ Socket_Listener(MYSQL_SOCKET listen_socket, PTP_CALLBACK_ENVIRON callback_environ) :
+ Listener((HANDLE)listen_socket.fd,0),
+ m_client_socket(INVALID_SOCKET)
+ {
+ if (callback_environ)
+ {
+ /* Accept executed in threadpool. */
+ m_tp_io= CreateThreadpoolIo(m_handle,
+ tp_accept_completion_callback, this, callback_environ);
+ }
+ else
+ {
+ /* Completion signaled via event. */
+ m_tp_io= 0;
+ m_overlapped.hEvent= CreateEvent(0, FALSE , FALSE, 0);
+ }
+ }
+
+ /*
+ Use AcceptEx to asynchronously wait for new connection;
+ */
+ void begin_accept()
+ {
+retry :
+ m_client_socket= socket(server_socket_ai_family, SOCK_STREAM, IPPROTO_TCP);
+ if (m_client_socket == INVALID_SOCKET)
+ {
+ sql_perror("socket() call failed.");
+ unireg_abort(1);
+ }
+
+ DWORD bytes_received;
+ if (m_tp_io)
+ StartThreadpoolIo(m_tp_io);
+
+ BOOL ret= my_AcceptEx(
+ (SOCKET)m_handle,
+ m_client_socket,
+ m_buffer,
+ 0,
+ sizeof(sockaddr_storage) + 16,
+ sizeof(sockaddr_storage) + 16,
+ &bytes_received,
+ &m_overlapped);
+
+ DWORD last_error= ret? 0: WSAGetLastError();
+ if (last_error == WSAECONNRESET)
+ {
+ if (m_tp_io)
+ CancelThreadpoolIo(m_tp_io);
+ goto retry;
+ }
+
+ if (ret || last_error == ERROR_IO_PENDING || abort_loop)
+ return;
+
+ sql_print_error("my_AcceptEx failed, last error %u", last_error);
+ abort();
+ }
+
+ /* Create new socket connection.*/
+ void completion_callback(bool success)
+ {
+ if (!success)
+ {
+ /* my_AcceptEx() returned error */
+ closesocket(m_client_socket);
+ begin_accept();
+ return;
+ }
+
+ MYSQL_SOCKET s_client{m_client_socket};
+ MYSQL_SOCKET s_listen{(SOCKET)m_handle};
+
+#ifdef HAVE_PSI_SOCKET_INTERFACE
+ /* Parse socket addresses buffer filled by AcceptEx(),
+ only needed for PSI instrumentation. */
+ sockaddr *local_addr, *remote_addr;
+ int local_addr_len, remote_addr_len;
+
+ my_GetAcceptExSockaddrs(m_buffer,
+ 0, sizeof(sockaddr_storage) + 16, sizeof(sockaddr_storage) + 16,
+ &local_addr, &local_addr_len, &remote_addr, &remote_addr_len);
+
+ s_client.m_psi= PSI_SOCKET_CALL(init_socket)
+ (key_socket_client_connection, (const my_socket*)&s_listen.fd, remote_addr, remote_addr_len);
+#endif
+
+ /* Start accepting new connection. After this point, do not use
+ any member data, they could be used by a different (threadpool) thread. */
+ begin_accept();
+
+ /* Some chores post-AcceptEx() that we need to create a normal socket.*/
+ if (setsockopt(s_client.fd, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char *)&s_listen.fd, sizeof(s_listen.fd)))
+ {
+ if (!abort_loop)
+ {
+ sql_perror("setsockopt(SO_UPDATE_ACCEPT_CONTEXT) failed.");
+ abort();
+ }
+ }
+
+ /* Create a new connection.*/
+ handle_accepted_socket(s_client, s_listen);
+ }
+
+ ~Socket_Listener()
+ {
+ if (m_client_socket != INVALID_SOCKET)
+ closesocket(m_client_socket);
+ }
+
+ /*
+ Retrieve the pointer to the Winsock extension functions
+ AcceptEx and GetAcceptExSockaddrs.
+ */
+ static void init_winsock_extensions()
+ {
+ SOCKET s= mysql_socket_getfd(base_ip_sock);
+ if (s == INVALID_SOCKET)
+ s= mysql_socket_getfd(extra_ip_sock);
+ if (s == INVALID_SOCKET)
+ {
+ /* --skip-networking was used*/
+ return;
+ }
+ GUID guid_AcceptEx= WSAID_ACCEPTEX;
+ GUID guid_GetAcceptExSockaddrs= WSAID_GETACCEPTEXSOCKADDRS;
+
+ GUID *guids[]= { &guid_AcceptEx, &guid_GetAcceptExSockaddrs };
+ void *funcs[]= { &my_AcceptEx, &my_GetAcceptExSockaddrs };
+ DWORD bytes;
+ for (int i= 0; i < array_elements(guids); i++)
+ {
+ if (WSAIoctl(s,
+ SIO_GET_EXTENSION_FUNCTION_POINTER,
+ guids[i], sizeof(GUID),
+ funcs[i], sizeof(void *),
+ &bytes, 0, 0) == -1)
+ {
+ sql_print_error("WSAIoctl(SIO_GET_EXTENSION_FUNCTION_POINTER) failed");
+ unireg_abort(1);
+ }
+ }
+ }
+};
+
+
+/**
+ Pipe Listener.
+ Only event notification mode is implemented, no threadpool
+*/
+struct Pipe_Listener : public Listener
+{
+ PTP_CALLBACK_ENVIRON m_tp_env;
+ Pipe_Listener():
+ Listener(INVALID_HANDLE_VALUE, CreateEvent(0, FALSE, FALSE, 0)),
+ m_tp_env(get_threadpool_win_callback_environ())
+ {
+ }
+
+ /*
+ Creates local named pipe instance \\.\pipe\$socket for named pipe connection.
+ */
+ static HANDLE create_named_pipe()
+ {
+ static bool first_instance= true;
+ static char pipe_name[512];
+ DWORD open_mode= PIPE_ACCESS_DUPLEX |
+ FILE_FLAG_OVERLAPPED;
+
+ if (first_instance)
+ {
+ snprintf(pipe_name, sizeof(pipe_name), "\\\\.\\pipe\\%s", mysqld_unix_port);
+ open_mode |= FILE_FLAG_FIRST_PIPE_INSTANCE;
+ if (!ConvertStringSecurityDescriptorToSecurityDescriptorA(
+ "S:(ML;; NW;;; LW) D:(A;; FRFW;;; WD)",
+ 1, &pipe_security.lpSecurityDescriptor, NULL))
+ {
+ sql_perror("Can't start server : Initialize security descriptor");
+ unireg_abort(1);
+ }
+ pipe_security.nLength= sizeof(SECURITY_ATTRIBUTES);
+ pipe_security.bInheritHandle= FALSE;
+ }
+ HANDLE pipe_handle= CreateNamedPipe(pipe_name,
+ open_mode,
+ PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
+ PIPE_UNLIMITED_INSTANCES,
+ (int)global_system_variables.net_buffer_length,
+ (int)global_system_variables.net_buffer_length,
+ NMPWAIT_USE_DEFAULT_WAIT,
+ &pipe_security);
+ if (pipe_handle == INVALID_HANDLE_VALUE)
+ {
+ sql_perror("Create named pipe failed");
+ sql_print_error("Aborting");
+ exit(1);
+ }
+ first_instance= false;
+ return pipe_handle;
+ }
+
+ static void create_pipe_connection(HANDLE pipe)
+ {
+ CONNECT *connect;
+ if (!(connect= new CONNECT) || !(connect->vio= vio_new_win32pipe(pipe)))
+ {
+ CloseHandle(pipe);
+ delete connect;
+ statistic_increment(aborted_connects, &LOCK_status);
+ statistic_increment(connection_errors_internal, &LOCK_status);
+ return;
+ }
+ connect->host= my_localhost;
+ create_new_thread(connect);
+ }
+
+ /* Threadpool callback.*/
+ static void CALLBACK tp_create_pipe_connection(
+ PTP_CALLBACK_INSTANCE,void *Context)
+ {
+ tp_win_callback_prolog();
+ create_pipe_connection(Context);
+ }
+
+ void begin_accept()
+ {
+ m_handle= create_named_pipe();
+ BOOL connected= ConnectNamedPipe(m_handle, &m_overlapped);
+ if (connected)
+ {
+ /* Overlapped ConnectNamedPipe should return zero. */
+ sql_perror("Overlapped ConnectNamedPipe() already connected.");
+ abort();
+ }
+ DWORD last_error= GetLastError();
+ switch (last_error)
+ {
+ case ERROR_PIPE_CONNECTED:
+ /* Client is already connected, so signal an event.*/
+ {
+ /*
+ Cleanup overlapped (so that subsequent GetOverlappedResult()
+ does not show results of previous IO
+ */
+ HANDLE e= m_overlapped.hEvent;
+ memset(&m_overlapped, 0, sizeof(m_overlapped));
+ m_overlapped.hEvent = e;
+ }
+ if (!SetEvent(m_overlapped.hEvent))
+ {
+ sql_perror("SetEvent() failed for connected pipe.");
+ abort();
+ }
+ break;
+ case ERROR_IO_PENDING:
+ break;
+ default:
+ sql_perror("ConnectNamedPipe() failed.");
+ abort();
+ break;
+ }
+ }
+
+ void completion_callback(bool success)
+ {
+ if (!success)
+ {
+#ifdef DBUG_OFF
+ sql_print_warning("ConnectNamedPipe completed with %u", GetLastError());
+#endif
+ CloseHandle(m_handle);
+ m_handle= INVALID_HANDLE_VALUE;
+ begin_accept();
+ return;
+ }
+ HANDLE pipe= m_handle;
+ begin_accept();
+ // If threadpool is on, create connection in threadpool thread
+ if (!m_tp_env || !TrySubmitThreadpoolCallback(tp_create_pipe_connection, pipe, m_tp_env))
+ create_pipe_connection(pipe);
+ }
+
+ ~Pipe_Listener()
+ {
+ if (m_handle != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle(m_handle);
+ }
+ }
+
+ static void cleanup()
+ {
+ LocalFree(pipe_security.lpSecurityDescriptor);
+ }
+};
+
+/**
+ Accept new client connections on Windows.
+
+ Since we deal with pipe and sockets, they cannot be put into a select/loop.
+ But we can use asynchronous IO, and WaitForMultipleObject() loop.
+
+ In addition, for slightly better performance, if we're using threadpool,
+ socket connections are accepted directly in the threadpool.
+
+ The mode of operation is therefore
+
+ 1. There is WaitForMultipleObject() loop that waits for shutdown notification
+ (hEventShutdown),and possibly pipes and sockets(e.g if threadpool is not used)
+ This loop ends when shutdown notification is detected.
+
+ 2. If threadpool is used, new socket connections are accepted there.
+*/
+
+
+#define MAX_WAIT_HANDLES 32
+#define NUM_PIPE_LISTENERS 24
+#define SHUTDOWN_IDX 0
+#define LISTENER_START_IDX 1
+
+void handle_connections_win()
+{
+ Listener* all_listeners[MAX_WAIT_HANDLES]= {};
+ HANDLE wait_events[MAX_WAIT_HANDLES]= {};
+ int n_listeners= 0;
+ int n_waits= 0;
+
+ Socket_Listener::init_winsock_extensions();
+
+ /* Listen for TCP connections on "extra-port" (no threadpool).*/
+ if (extra_ip_sock.fd != INVALID_SOCKET)
+ all_listeners[n_listeners++]= new Socket_Listener(extra_ip_sock, 0);
+
+ /* Listen for named pipe connections */
+ if (mysqld_unix_port[0] && !opt_bootstrap && opt_enable_named_pipe)
+ {
+ /*
+ Use several listeners for pipe, to reduce ERROR_PIPE_BUSY on client side.
+ */
+ for (int i= 0; i < NUM_PIPE_LISTENERS; i++)
+ all_listeners[n_listeners++]= new Pipe_Listener();
+ }
+
+ if (base_ip_sock.fd != INVALID_SOCKET)
+ {
+ /* Wait for TCP connections.*/
+ SetFileCompletionNotificationModes((HANDLE)base_ip_sock.fd, FILE_SKIP_SET_EVENT_ON_HANDLE);
+ all_listeners[n_listeners++]= new Socket_Listener(base_ip_sock, get_threadpool_win_callback_environ());
+ }
+
+ if (!n_listeners && !opt_bootstrap)
+ {
+ sql_print_error("Either TCP connections or named pipe connections must be enabled.");
+ unireg_abort(1);
+ }
+
+ wait_events[SHUTDOWN_IDX]= hEventShutdown;
+ n_waits = 1;
+
+ for (int i= 0; i < n_listeners; i++)
+ {
+ HANDLE wait_handle= all_listeners[i]->wait_handle();
+ if(wait_handle)
+ {
+ DBUG_ASSERT((i == 0) || (all_listeners[i-1]->wait_handle() != 0));
+ wait_events[n_waits++]= wait_handle;
+ }
+ all_listeners[i]->begin_accept();
+ }
+
+ for (;;)
+ {
+ DWORD idx = WaitForMultipleObjects(n_waits ,wait_events, FALSE, INFINITE);
+ DBUG_ASSERT((int)idx >= 0 && (int)idx < n_waits);
+
+ if (idx == SHUTDOWN_IDX)
+ break;
+
+ all_listeners[idx - LISTENER_START_IDX]->completion_callback();
+ }
+
+ /* Cleanup */
+ for (int i= 0; i < n_listeners; i++)
+ {
+ Listener *listener= all_listeners[i];
+ if (listener->wait_handle())
+ delete listener;
+ else
+ // Threadpool-bound listener will be deleted in threadpool
+ // Do not call destructor, because callback maybe running.
+ listener->cancel();
+ }
+ Pipe_Listener::cleanup();
+}
diff --git a/sql/handle_connections_win.h b/sql/handle_connections_win.h
new file mode 100644
index 00000000000..a81f4346fb2
--- /dev/null
+++ b/sql/handle_connections_win.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2018 MariaDB Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
+
+/**
+ Handles incoming socket and pipe connections, on Windows.
+ Creates new (THD) connections..
+*/
+extern void handle_connections_win();
diff --git a/sql/handler.cc b/sql/handler.cc
index f39cb55c7f6..12a05aba90d 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -43,6 +43,7 @@
#include "debug_sync.h" // DEBUG_SYNC
#include "sql_audit.h"
#include "ha_sequence.h"
+#include "rowid_filter.h"
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -54,8 +55,12 @@
#include "semisync_master.h"
#include "wsrep_mysqld.h"
-#include "wsrep.h"
+#ifdef WITH_WSREP
+#include "wsrep_binlog.h"
#include "wsrep_xid.h"
+#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h" /* wsrep transaction hooks */
+#endif /* WITH_WSREP */
/*
While we have legacy_db_type, we have this array to
@@ -251,6 +256,9 @@ handlerton *ha_checktype(THD *thd, handlerton *hton, bool no_substitute)
if (no_substitute)
return NULL;
+#ifdef WITH_WSREP
+ (void)wsrep_after_rollback(thd, false);
+#endif /* WITH_WSREP */
return ha_default_handlerton(thd);
} /* ha_checktype */
@@ -296,7 +304,7 @@ handler *get_ha_partition(partition_info *part_info)
}
else
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL),
static_cast<int>(sizeof(ha_partition)));
}
DBUG_RETURN(((handler*) partition));
@@ -683,7 +691,7 @@ int ha_init()
binary log (which is considered a transaction-capable storage engine in
counting total_ha)
*/
- opt_using_transactions= total_ha>(ulong)opt_bin_log;
+ opt_using_transactions= total_ha > (ulong) opt_bin_log;
savepoint_alloc_size+= sizeof(SAVEPOINT);
DBUG_RETURN(error);
}
@@ -693,7 +701,6 @@ int ha_end()
int error= 0;
DBUG_ENTER("ha_end");
-
/*
This should be eventualy based on the graceful shutdown flag.
So if flag is equal to HA_PANIC_CLOSE, the deallocate
@@ -823,6 +830,43 @@ void ha_kill_query(THD* thd, enum thd_kill_levels level)
}
+/*****************************************************************************
+ Backup functions
+******************************************************************************/
+
+static my_bool plugin_prepare_for_backup(THD *unused1, plugin_ref plugin,
+ void *not_used)
+{
+ handlerton *hton= plugin_hton(plugin);
+ if (hton->state == SHOW_OPTION_YES && hton->prepare_for_backup)
+ hton->prepare_for_backup();
+ return FALSE;
+}
+
+void ha_prepare_for_backup()
+{
+ plugin_foreach_with_mask(0, plugin_prepare_for_backup,
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ PLUGIN_IS_DELETED|PLUGIN_IS_READY, 0);
+}
+
+static my_bool plugin_end_backup(THD *unused1, plugin_ref plugin,
+ void *not_used)
+{
+ handlerton *hton= plugin_hton(plugin);
+ if (hton->state == SHOW_OPTION_YES && hton->end_backup)
+ hton->end_backup();
+ return FALSE;
+}
+
+void ha_end_backup()
+{
+ plugin_foreach_with_mask(0, plugin_end_backup,
+ MYSQL_STORAGE_ENGINE_PLUGIN,
+ PLUGIN_IS_DELETED|PLUGIN_IS_READY, 0);
+}
+
+
/* ========================================================================
======================= TRANSACTIONS ===================================*/
@@ -1155,25 +1199,40 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht_arg)
ha_info->register_ha(trans, ht_arg);
trans->no_2pc|=(ht_arg->prepare==0);
- if (thd->transaction.xid_state.xid.is_null())
- thd->transaction.xid_state.xid.set(thd->query_id);
+
+ /* Set implicit xid even if there's explicit XA, it will be ignored anyway. */
+ if (thd->transaction.implicit_xid.is_null())
+ thd->transaction.implicit_xid.set(thd->query_id);
+
DBUG_VOID_RETURN;
}
static int prepare_or_error(handlerton *ht, THD *thd, bool all)
{
+#ifdef WITH_WSREP
+ const bool run_wsrep_hooks= wsrep_run_commit_hook(thd, all);
+ if (run_wsrep_hooks && ht->flags & HTON_WSREP_REPLICATION &&
+ wsrep_before_prepare(thd, all))
+ {
+ return(1);
+ }
+#endif /* WITH_WSREP */
+
int err= ht->prepare(ht, thd, all);
status_var_increment(thd->status_var.ha_prepare_count);
if (err)
{
- /* avoid sending error, if we're going to replay the transaction */
-#ifdef WITH_WSREP
- if (ht != wsrep_hton ||
- err == EMSGSIZE || thd->wsrep_conflict_state != MUST_REPLAY)
-#endif
my_error(ER_ERROR_DURING_COMMIT, MYF(0), err);
}
+#ifdef WITH_WSREP
+ if (run_wsrep_hooks && !err && ht->flags & HTON_WSREP_REPLICATION &&
+ wsrep_after_prepare(thd, all))
+ {
+ err= 1;
+ }
+#endif /* WITH_WSREP */
+
return err;
}
@@ -1314,6 +1373,9 @@ int ha_commit_trans(THD *thd, bool all)
Ha_trx_info *ha_info= trans->ha_list;
bool need_prepare_ordered, need_commit_ordered;
my_xid xid;
+#ifdef WITH_WSREP
+ const bool run_wsrep_hooks= wsrep_run_commit_hook(thd, all);
+#endif /* WITH_WSREP */
DBUG_ENTER("ha_commit_trans");
DBUG_PRINT("info",("thd: %p option_bits: %lu all: %d",
thd, (ulong) thd->variables.option_bits, all));
@@ -1358,7 +1420,7 @@ int ha_commit_trans(THD *thd, bool all)
}
#ifdef WITH_ARIA_STORAGE_ENGINE
- ha_maria::implicit_commit(thd, TRUE);
+ ha_maria::implicit_commit(thd, TRUE);
#endif
if (!ha_info)
@@ -1368,6 +1430,12 @@ int ha_commit_trans(THD *thd, bool all)
*/
if (is_real_trans)
thd->transaction.cleanup();
+#ifdef WITH_WSREP
+ if (wsrep_is_active(thd) && is_real_trans && !error)
+ {
+ wsrep_commit_empty(thd, all);
+ }
+#endif /* WITH_WSREP */
DBUG_RETURN(0);
}
@@ -1395,8 +1463,7 @@ int ha_commit_trans(THD *thd, bool all)
We allow the owner of FTWRL to COMMIT; we assume that it knows
what it does.
*/
- mdl_request.init(MDL_key::COMMIT, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_EXPLICIT);
+ mdl_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, MDL_EXPLICIT);
if (!WSREP(thd) &&
thd->mdl_context.acquire_lock(&mdl_request,
@@ -1454,13 +1521,33 @@ int ha_commit_trans(THD *thd, bool all)
if (trans->no_2pc || (rw_ha_count <= 1))
{
+#ifdef WITH_WSREP
+ /*
+ This commit will not go through log_and_order() where wsrep commit
+ ordering is normally done. Commit ordering must be done here.
+ */
+ if (run_wsrep_hooks)
+ error= wsrep_before_commit(thd, all);
+ if (error)
+ {
+ ha_rollback_trans(thd, FALSE);
+ goto wsrep_err;
+ }
+#endif /* WITH_WSREP */
error= ha_commit_one_phase(thd, all);
+#ifdef WITH_WSREP
+ if (run_wsrep_hooks)
+ error= error || wsrep_after_commit(thd, all);
+#endif /* WITH_WSREP */
goto done;
}
need_prepare_ordered= FALSE;
need_commit_ordered= FALSE;
- xid= thd->transaction.xid_state.xid.get_my_xid();
+ DBUG_ASSERT(thd->transaction.implicit_xid.get_my_xid() ==
+ thd->transaction.implicit_xid.quick_get_my_xid());
+ xid= thd->transaction.xid_state.is_explicit_XA() ? 0 :
+ thd->transaction.implicit_xid.quick_get_my_xid();
for (Ha_trx_info *hi= ha_info; hi; hi= hi->next())
{
@@ -1486,10 +1573,14 @@ int ha_commit_trans(THD *thd, bool all)
DBUG_EXECUTE_IF("crash_commit_after_prepare", DBUG_SUICIDE(););
#ifdef WITH_WSREP
- if (!error && WSREP_ON && wsrep_is_wsrep_xid(&thd->transaction.xid_state.xid))
+ if (run_wsrep_hooks && !error)
{
- // xid was rewritten by wsrep
- xid= wsrep_xid_seqno(thd->transaction.xid_state.xid);
+ wsrep::seqno const s= wsrep_xid_seqno(thd->wsrep_xid);
+ if (!s.is_undefined())
+ {
+ // xid was rewritten by wsrep
+ xid= s.get();
+ }
}
#endif /* WITH_WSREP */
@@ -1498,18 +1589,35 @@ int ha_commit_trans(THD *thd, bool all)
error= commit_one_phase_2(thd, all, trans, is_real_trans);
goto done;
}
-
+#ifdef WITH_WSREP
+ if (run_wsrep_hooks && (error = wsrep_before_commit(thd, all)))
+ goto wsrep_err;
+#endif /* WITH_WSREP */
DEBUG_SYNC(thd, "ha_commit_trans_before_log_and_order");
cookie= tc_log->log_and_order(thd, xid, all, need_prepare_ordered,
need_commit_ordered);
if (!cookie)
+ {
+ WSREP_DEBUG("log_and_order has failed %llu %d", thd->thread_id, cookie);
goto err;
-
+ }
DEBUG_SYNC(thd, "ha_commit_trans_after_log_and_order");
DBUG_EXECUTE_IF("crash_commit_after_log", DBUG_SUICIDE(););
error= commit_one_phase_2(thd, all, trans, is_real_trans) ? 2 : 0;
-
+#ifdef WITH_WSREP
+ if (run_wsrep_hooks && (error || (error = wsrep_after_commit(thd, all))))
+ {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (wsrep_must_abort(thd))
+ {
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ (void)tc_log->unlog(cookie, xid);
+ goto wsrep_err;
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ }
+#endif /* WITH_WSREP */
DBUG_EXECUTE_IF("crash_commit_before_unlog", DBUG_SUICIDE(););
if (tc_log->unlog(cookie, xid))
{
@@ -1531,6 +1639,19 @@ done:
goto end;
/* Come here if error and we need to rollback. */
+#ifdef WITH_WSREP
+wsrep_err:
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (run_wsrep_hooks && wsrep_must_abort(thd))
+ {
+ WSREP_DEBUG("BF abort has happened after prepare & certify");
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ ha_rollback_trans(thd, TRUE);
+ }
+ else
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+#endif /* WITH_WSREP */
err:
error= 1; /* Transaction was rolled back */
/*
@@ -1540,7 +1661,11 @@ err:
*/
if (!(thd->rgi_slave && thd->rgi_slave->is_parallel_exec))
ha_rollback_trans(thd, all);
-
+ else
+ {
+ WSREP_DEBUG("rollback skipped %p %d",thd->rgi_slave,
+ thd->rgi_slave->is_parallel_exec);
+ }
end:
if (rw_trans && mdl_request.ticket)
{
@@ -1552,6 +1677,14 @@ end:
*/
thd->mdl_context.release_lock(mdl_request.ticket);
}
+#ifdef WITH_WSREP
+ if (wsrep_is_active(thd) && is_real_trans && !error && (rw_ha_count == 0) &&
+ wsrep_not_committed(thd))
+ {
+ wsrep_commit_empty(thd, all);
+ }
+#endif /* WITH_WSREP */
+
DBUG_RETURN(error);
}
@@ -1709,6 +1842,9 @@ int ha_rollback_trans(THD *thd, bool all)
DBUG_RETURN(1);
}
+#ifdef WITH_WSREP
+ (void) wsrep_before_rollback(thd, all);
+#endif /* WITH_WSREP */
if (ha_info)
{
/* Close all cursors that can not survive ROLLBACK */
@@ -1724,9 +1860,9 @@ int ha_rollback_trans(THD *thd, bool all)
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
#ifdef WITH_WSREP
- WSREP_WARN("handlerton rollback failed, thd %llu %lld conf %d SQL %s",
- thd->thread_id, thd->query_id, thd->wsrep_conflict_state,
- thd->query());
+ WSREP_WARN("handlerton rollback failed, thd %lld %lld conf %d SQL %s",
+ thd->thread_id, thd->query_id, thd->wsrep_trx().state(),
+ thd->query());
#endif /* WITH_WSREP */
}
status_var_increment(thd->status_var.ha_rollback_count);
@@ -1737,17 +1873,25 @@ int ha_rollback_trans(THD *thd, bool all)
trans->no_2pc=0;
}
- /*
- Thanks to possibility of MDL deadlock rollback request can come even if
- transaction hasn't been started in any transactional storage engine.
- */
- if (is_real_trans && thd->transaction_rollback_request &&
- thd->transaction.xid_state.xa_state != XA_NOTR)
- thd->transaction.xid_state.rm_error= thd->get_stmt_da()->sql_errno();
-
+#ifdef WITH_WSREP
+ if (thd->is_error())
+ {
+ WSREP_DEBUG("ha_rollback_trans(%lld, %s) rolled back: %s: %s; is_real %d",
+ thd->thread_id, all?"TRUE":"FALSE", WSREP_QUERY(thd),
+ thd->get_stmt_da()->message(), is_real_trans);
+ }
+ (void) wsrep_after_rollback(thd, all);
+#endif /* WITH_WSREP */
/* Always cleanup. Even if nht==0. There may be savepoints. */
if (is_real_trans)
{
+ /*
+ Thanks to possibility of MDL deadlock rollback request can come even if
+ transaction hasn't been started in any transactional storage engine.
+ */
+ if (thd->transaction_rollback_request)
+ thd->transaction.xid_state.set_error(thd->get_stmt_da()->sql_errno());
+
thd->has_waiter= false;
thd->transaction.cleanup();
}
@@ -1882,18 +2026,12 @@ static char* xid_to_str(char *buf, XID *xid)
static my_xid wsrep_order_and_check_continuity(XID *list, int len)
{
wsrep_sort_xid_array(list, len);
- wsrep_uuid_t uuid;
- wsrep_seqno_t seqno;
- if (wsrep_get_SE_checkpoint(uuid, seqno))
- {
- WSREP_ERROR("Could not read wsrep SE checkpoint for recovery");
- return 0;
- }
- long long cur_seqno= seqno;
+ wsrep::gtid cur_position= wsrep_get_SE_checkpoint();
+ long long cur_seqno= cur_position.seqno().get();
for (int i= 0; i < len; ++i)
{
if (!wsrep_is_wsrep_xid(list + i) ||
- wsrep_xid_seqno(*(list + i)) != cur_seqno + 1)
+ wsrep_xid_seqno(list + i) != cur_seqno + 1)
{
WSREP_WARN("Discovered discontinuity in recovered wsrep "
"transaction XIDs. Truncating the recovery list to "
@@ -1906,7 +2044,6 @@ static my_xid wsrep_order_and_check_continuity(XID *list, int len)
return (cur_seqno < 0 ? 0 : cur_seqno);
}
#endif /* WITH_WSREP */
-
/**
recover() step of xa.
@@ -1949,18 +2086,27 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
recovered XIDs is checked for continuity. All the XIDs which
are in continuous range can be safely committed if binlog
is off since they have already ordered and certified in the
- cluster. */
+ cluster.
+
+ The discontinuity of wsrep XIDs may happen because the GTID
+ is assigned for transaction in wsrep_before_prepare(), but the
+ commit order is entered in wsrep_before_commit(). This means that
+ transactions may run prepare step out of order and may
+ result in gap in wsrep XIDs. This can be the case for example
+ if we have T1 with seqno 1 and T2 with seqno 2 and the server
+ crashes after T2 finishes prepare step but before T1 starts
+ the prepare.
+ */
my_xid wsrep_limit= 0;
if (WSREP_ON)
{
wsrep_limit= wsrep_order_and_check_continuity(info->list, got);
}
#endif /* WITH_WSREP */
-
for (int i=0; i < got; i ++)
{
my_xid x= IF_WSREP(WSREP_ON && wsrep_is_wsrep_xid(&info->list[i]) ?
- wsrep_xid_seqno(info->list[i]) :
+ wsrep_xid_seqno(&info->list[i]) :
info->list[i].get_my_xid(),
info->list[i].get_my_xid());
if (!x) // not "mine" - that is generated by external TM
@@ -1969,7 +2115,7 @@ static my_bool xarecover_handlerton(THD *unused, plugin_ref plugin,
char buf[XIDDATASIZE*4+6]; // see xid_to_str
DBUG_PRINT("info", ("ignore xid %s", xid_to_str(buf, info->list+i)));
#endif
- xid_cache_insert(info->list+i, XA_PREPARED);
+ xid_cache_insert(info->list + i);
info->found_foreign_xids++;
continue;
}
@@ -2081,186 +2227,6 @@ int ha_recover(HASH *commit_list)
DBUG_RETURN(0);
}
-/**
- return the XID as it appears in the SQL function's arguments.
- So this string can be passed to XA START, XA PREPARE etc...
-
- @note
- the 'buf' has to have space for at least SQL_XIDSIZE bytes.
-*/
-
-
-/*
- 'a'..'z' 'A'..'Z', '0'..'9'
- and '-' '_' ' ' symbols don't have to be
- converted.
-*/
-
-static const char xid_needs_conv[128]=
-{
- 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
- 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
- 0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,
- 0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
- 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,
- 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
- 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1
-};
-
-uint get_sql_xid(XID *xid, char *buf)
-{
- int tot_len= xid->gtrid_length + xid->bqual_length;
- int i;
- const char *orig_buf= buf;
-
- for (i=0; i<tot_len; i++)
- {
- uchar c= ((uchar *) xid->data)[i];
- if (c >= 128 || xid_needs_conv[c])
- break;
- }
-
- if (i >= tot_len)
- {
- /* No need to convert characters to hexadecimals. */
- *buf++= '\'';
- memcpy(buf, xid->data, xid->gtrid_length);
- buf+= xid->gtrid_length;
- *buf++= '\'';
- if (xid->bqual_length > 0 || xid->formatID != 1)
- {
- *buf++= ',';
- *buf++= '\'';
- memcpy(buf, xid->data+xid->gtrid_length, xid->bqual_length);
- buf+= xid->bqual_length;
- *buf++= '\'';
- }
- }
- else
- {
- *buf++= 'X';
- *buf++= '\'';
- for (i= 0; i < xid->gtrid_length; i++)
- {
- *buf++=_dig_vec_lower[((uchar*) xid->data)[i] >> 4];
- *buf++=_dig_vec_lower[((uchar*) xid->data)[i] & 0x0f];
- }
- *buf++= '\'';
- if (xid->bqual_length > 0 || xid->formatID != 1)
- {
- *buf++= ',';
- *buf++= 'X';
- *buf++= '\'';
- for (; i < tot_len; i++)
- {
- *buf++=_dig_vec_lower[((uchar*) xid->data)[i] >> 4];
- *buf++=_dig_vec_lower[((uchar*) xid->data)[i] & 0x0f];
- }
- *buf++= '\'';
- }
- }
-
- if (xid->formatID != 1)
- {
- *buf++= ',';
- buf+= my_longlong10_to_str_8bit(&my_charset_bin, buf,
- MY_INT64_NUM_DECIMAL_DIGITS, -10, xid->formatID);
- }
-
- return (uint)(buf - orig_buf);
-}
-
-
-/**
- return the list of XID's to a client, the same way SHOW commands do.
-
- @note
- I didn't find in XA specs that an RM cannot return the same XID twice,
- so mysql_xa_recover does not filter XID's to ensure uniqueness.
- It can be easily fixed later, if necessary.
-*/
-
-static my_bool xa_recover_callback(XID_STATE *xs, Protocol *protocol,
- char *data, uint data_len, CHARSET_INFO *data_cs)
-{
- if (xs->xa_state == XA_PREPARED)
- {
- protocol->prepare_for_resend();
- protocol->store_longlong((longlong) xs->xid.formatID, FALSE);
- protocol->store_longlong((longlong) xs->xid.gtrid_length, FALSE);
- protocol->store_longlong((longlong) xs->xid.bqual_length, FALSE);
- protocol->store(data, data_len, data_cs);
- if (protocol->write())
- return TRUE;
- }
- return FALSE;
-}
-
-
-static my_bool xa_recover_callback_short(XID_STATE *xs, Protocol *protocol)
-{
- return xa_recover_callback(xs, protocol, xs->xid.data,
- xs->xid.gtrid_length + xs->xid.bqual_length, &my_charset_bin);
-}
-
-
-static my_bool xa_recover_callback_verbose(XID_STATE *xs, Protocol *protocol)
-{
- char buf[SQL_XIDSIZE];
- uint len= get_sql_xid(&xs->xid, buf);
- return xa_recover_callback(xs, protocol, buf, len,
- &my_charset_utf8_general_ci);
-}
-
-
-bool mysql_xa_recover(THD *thd)
-{
- List<Item> field_list;
- Protocol *protocol= thd->protocol;
- MEM_ROOT *mem_root= thd->mem_root;
- my_hash_walk_action action;
- DBUG_ENTER("mysql_xa_recover");
-
- field_list.push_back(new (mem_root)
- Item_int(thd, "formatID", 0,
- MY_INT32_NUM_DECIMAL_DIGITS), mem_root);
- field_list.push_back(new (mem_root)
- Item_int(thd, "gtrid_length", 0,
- MY_INT32_NUM_DECIMAL_DIGITS), mem_root);
- field_list.push_back(new (mem_root)
- Item_int(thd, "bqual_length", 0,
- MY_INT32_NUM_DECIMAL_DIGITS), mem_root);
- {
- uint len;
- CHARSET_INFO *cs;
-
- if (thd->lex->verbose)
- {
- len= SQL_XIDSIZE;
- cs= &my_charset_utf8_general_ci;
- action= (my_hash_walk_action) xa_recover_callback_verbose;
- }
- else
- {
- len= XIDDATASIZE;
- cs= &my_charset_bin;
- action= (my_hash_walk_action) xa_recover_callback_short;
- }
-
- field_list.push_back(new (mem_root)
- Item_empty_string(thd, "data", len, cs), mem_root);
- }
-
- if (protocol->send_result_set_metadata(&field_list,
- Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
- DBUG_RETURN(1);
-
- if (xid_cache_iterate(thd, action, protocol))
- DBUG_RETURN(1);
- my_eof(thd);
- DBUG_RETURN(0);
-}
/*
Called by engine to notify TC that a new commit checkpoint has been reached.
@@ -2346,11 +2312,26 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
{
int err;
handlerton *ht= ha_info->ht();
+#ifdef WITH_WSREP
+ if (WSREP(thd) && ht->flags & HTON_WSREP_REPLICATION)
+ {
+ WSREP_DEBUG("ha_rollback_to_savepoint: run before_rollbackha_rollback_trans hook");
+ (void) wsrep_before_rollback(thd, !thd->in_sub_stmt);
+
+ }
+#endif // WITH_WSREP
if ((err= ht->rollback(ht, thd, !thd->in_sub_stmt)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
}
+#ifdef WITH_WSREP
+ if (WSREP(thd) && ht->flags & HTON_WSREP_REPLICATION)
+ {
+ WSREP_DEBUG("ha_rollback_to_savepoint: run after_rollback hook");
+ (void) wsrep_after_rollback(thd, !thd->in_sub_stmt);
+ }
+#endif // WITH_WSREP
status_var_increment(thd->status_var.ha_rollback_count);
ha_info_next= ha_info->next();
ha_info->reset(); /* keep it conveniently zero-filled */
@@ -2367,6 +2348,16 @@ int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv)
*/
int ha_savepoint(THD *thd, SAVEPOINT *sv)
{
+#ifdef WITH_WSREP
+ /*
+ Register binlog hton for savepoint processing if wsrep binlog
+ emulation is on.
+ */
+ if (WSREP_EMULATE_BINLOG(thd) && wsrep_thd_is_local(thd))
+ {
+ wsrep_register_binlog_handler(thd, thd->in_multi_stmt_transaction_mode());
+ }
+#endif /* WITH_WSREP */
int error=0;
THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt :
&thd->transaction.all);
@@ -2593,7 +2584,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
dummy_share.table_name= *alias;
dummy_table.alias.set(alias->str, alias->length, table_alias_charset);
file->change_table_ptr(&dummy_table, &dummy_share);
- file->print_error(error, MYF(intercept ? ME_JUST_WARNING : 0));
+ file->print_error(error, MYF(intercept ? ME_WARNING : 0));
}
if (intercept)
error= 0;
@@ -2651,23 +2642,26 @@ LEX_CSTRING *handler::engine_name()
}
+/*
+ It is assumed that the value of the parameter 'ranges' can be only 0 or 1.
+ If ranges == 1 then the function returns the cost of index only scan
+ by index 'keyno' of one range containing 'rows' key entries.
+ If ranges == 0 then the function returns only the cost of copying
+ those key entries into the engine buffers.
+*/
+
double handler::keyread_time(uint index, uint ranges, ha_rows rows)
{
- /*
- It is assumed that we will read trough the whole key range and that all
- key blocks are half full (normally things are much better). It is also
- assumed that each time we read the next key from the index, the handler
- performs a random seek, thus the cost is proportional to the number of
- blocks read. This model does not take into account clustered indexes -
- engines that support that (e.g. InnoDB) may want to overwrite this method.
- The model counts in the time to read index entries from cache.
- */
+ DBUG_ASSERT(ranges == 0 || ranges == 1);
size_t len= table->key_info[index].key_length + ref_length;
if (index == table->s->primary_key && table->file->primary_key_is_clustered())
len= table->s->stored_rec_length;
- double keys_per_block= (stats.block_size/2.0/len+1);
- return (rows + keys_per_block-1)/ keys_per_block +
- len*rows/(stats.block_size+1)/TIME_FOR_COMPARE ;
+ uint keys_per_block= (uint) (stats.block_size/2.0/len+1);
+ ulonglong blocks= !rows ? 0 : (rows-1) / keys_per_block + 1;
+ double cost= (double)rows*len/(stats.block_size+1)*IDX_BLOCK_COPY_COST;
+ if (ranges)
+ cost+= blocks;
+ return cost;
}
void **handler::ha_data(THD *thd) const
@@ -3646,6 +3640,8 @@ void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag)
}
else
{
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH)
+ setup_keyinfo_hash(key);
/* Table is opened and defined at this point */
key_unpack(&str,table, key);
uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg);
@@ -3656,6 +3652,8 @@ void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag)
}
my_printf_error(ER_DUP_ENTRY, msg, errflag, str.c_ptr_safe(),
key->name.str);
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH)
+ re_setup_keyinfo_hash(key);
}
}
@@ -3673,7 +3671,6 @@ void print_keydup_error(TABLE *table, KEY *key, myf errflag)
errflag);
}
-
/**
Print error that we got from handler function.
@@ -3695,7 +3692,7 @@ void handler::print_error(int error, myf errflag)
if (ha_thd()->transaction_rollback_request)
{
/* Ensure this becomes a true error */
- errflag&= ~(ME_JUST_WARNING | ME_JUST_INFO);
+ errflag&= ~(ME_WARNING | ME_NOTE);
}
int textno= -1; // impossible value
@@ -3830,14 +3827,14 @@ void handler::print_error(int error, myf errflag)
{
textno=ER_RECORD_FILE_FULL;
/* Write the error message to error log */
- errflag|= ME_NOREFRESH;
+ errflag|= ME_ERROR_LOG;
break;
}
case HA_ERR_INDEX_FILE_FULL:
{
textno=ER_INDEX_FILE_FULL;
/* Write the error message to error log */
- errflag|= ME_NOREFRESH;
+ errflag|= ME_ERROR_LOG;
break;
}
case HA_ERR_LOCK_WAIT_TIMEOUT:
@@ -3964,14 +3961,14 @@ void handler::print_error(int error, myf errflag)
if (unlikely(fatal_error))
{
/* Ensure this becomes a true error */
- errflag&= ~(ME_JUST_WARNING | ME_JUST_INFO);
+ errflag&= ~(ME_WARNING | ME_NOTE);
if ((debug_assert_if_crashed_table ||
global_system_variables.log_warnings > 1))
{
/*
Log error to log before we crash or if extended warnings are requested
*/
- errflag|= ME_NOREFRESH;
+ errflag|= ME_ERROR_LOG;
}
}
@@ -4143,7 +4140,8 @@ static bool update_frm_version(TABLE *table)
int4store(version, MYSQL_VERSION_ID);
- if ((result= (int)mysql_file_pwrite(file, (uchar*) version, 4, 51L, MYF_RW)))
+ if ((result= (int)mysql_file_pwrite(file, (uchar*) version, 4, 51L,
+ MYF(MY_WME+MY_NABP))))
goto err;
table->s->mysql_version= MYSQL_VERSION_ID;
@@ -4162,9 +4160,10 @@ err:
*/
uint handler::get_dup_key(int error)
{
- DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
- m_lock_type != F_UNLCK);
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || m_lock_type != F_UNLCK);
DBUG_ENTER("handler::get_dup_key");
+ if (table->s->long_unique_table && table->file->errkey < table->s->keys)
+ DBUG_RETURN(table->file->errkey);
table->file->errkey = (uint) -1;
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
@@ -4613,6 +4612,29 @@ handler::check_if_supported_inplace_alter(TABLE *altered_table,
DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
}
+Alter_inplace_info::Alter_inplace_info(HA_CREATE_INFO *create_info_arg,
+ Alter_info *alter_info_arg,
+ KEY *key_info_arg, uint key_count_arg,
+ partition_info *modified_part_info_arg,
+ bool ignore_arg)
+ : create_info(create_info_arg),
+ alter_info(alter_info_arg),
+ key_info_buffer(key_info_arg),
+ key_count(key_count_arg),
+ index_drop_count(0),
+ index_drop_buffer(nullptr),
+ index_add_count(0),
+ index_add_buffer(nullptr),
+ rename_keys(current_thd->mem_root),
+ handler_ctx(nullptr),
+ group_commit_ctx(nullptr),
+ handler_flags(0),
+ modified_part_info(modified_part_info_arg),
+ ignore(ignore_arg),
+ online(false),
+ unsupported_reason(nullptr)
+ {}
+
void Alter_inplace_info::report_unsupported_error(const char *not_supported,
const char *try_instead) const
{
@@ -5065,7 +5087,7 @@ int ha_create_table(THD *thd, const char *path,
{
if (!thd->is_error())
my_error(ER_CANT_CREATE_TABLE, MYF(0), db, table_name, error);
- table.file->print_error(error, MYF(ME_JUST_WARNING));
+ table.file->print_error(error, MYF(ME_WARNING));
PSI_CALL_drop_table_share(temp_table, share.db.str, (uint)share.db.length,
share.table_name.str, (uint)share.table_name.length);
}
@@ -5846,6 +5868,35 @@ extern "C" enum icp_result handler_index_cond_check(void* h_arg)
return res;
}
+
+/**
+ Rowid filter callback - to be called by an engine to check rowid / primary
+ keys of the rows whose data is to be fetched against the used rowid filter
+*/
+
+extern "C" int handler_rowid_filter_check(void *h_arg)
+{
+ handler *h= (handler*) h_arg;
+ TABLE *tab= h->get_table();
+ h->position(tab->record[0]);
+ return h->pushed_rowid_filter->check((char *) h->ref);
+}
+
+
+/**
+ Callback function for an engine to check whether the used rowid filter
+ has been already built
+*/
+
+extern "C" int handler_rowid_filter_is_active(void *h_arg)
+{
+ if (!h_arg)
+ return false;
+ handler *h= (handler*) h_arg;
+ return h->rowid_filter_is_active;
+}
+
+
int handler::index_read_idx_map(uchar * buf, uint index, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
@@ -6026,6 +6077,12 @@ bool handler::check_table_binlog_row_based(bool binlog_row)
return false;
if (unlikely((table->in_use->variables.sql_log_bin_off)))
return 0; /* Called by partitioning engine */
+#ifdef WITH_WSREP
+ if (!table->in_use->variables.sql_log_bin &&
+ wsrep_thd_is_applying(table->in_use))
+ return 0; /* wsrep patch sets sql_log_bin to silence binlogging
+ from high priority threads */
+#endif /* WITH_WSREP */
if (unlikely((!check_table_binlog_row_based_done)))
{
check_table_binlog_row_based_done= 1;
@@ -6056,12 +6113,12 @@ bool handler::check_table_binlog_row_based_internal(bool binlog_row)
Otherwise, return 'true' if binary logging is on.
*/
IF_WSREP(((WSREP_EMULATE_BINLOG(thd) &&
- (thd->wsrep_exec_mode != REPL_RECV)) ||
+ wsrep_thd_is_local(thd)) ||
((WSREP(thd) ||
(thd->variables.option_bits & OPTION_BIN_LOG)) &&
mysql_bin_log.is_open())),
- (thd->variables.option_bits & OPTION_BIN_LOG) &&
- mysql_bin_log.is_open()));
+ (thd->variables.option_bits & OPTION_BIN_LOG) &&
+ mysql_bin_log.is_open()));
}
@@ -6098,7 +6155,9 @@ static int write_locked_table_maps(THD *thd)
MYSQL_LOCK *locks[2];
locks[0]= thd->extra_lock;
locks[1]= thd->lock;
- my_bool with_annotate= thd->variables.binlog_annotate_row_events &&
+ my_bool with_annotate= IF_WSREP(!wsrep_fragments_certified_for_stmt(thd),
+ true) &&
+ thd->variables.binlog_annotate_row_events &&
thd->query() && thd->query_length();
for (uint i= 0 ; i < sizeof(locks)/sizeof(*locks) ; ++i )
@@ -6186,23 +6245,9 @@ int binlog_log_row(TABLE* table, const uchar *before_record,
/* only InnoDB tables will be replicated through binlog emulation */
if ((WSREP_EMULATE_BINLOG(thd) &&
- table->file->partition_ht()->db_type != DB_TYPE_INNODB) ||
- (thd->wsrep_ignore_table == true))
+ !(table->file->partition_ht()->flags & HTON_WSREP_REPLICATION)) ||
+ thd->wsrep_ignore_table == true)
return 0;
-
- /* enforce wsrep_max_ws_rows */
- if (WSREP(thd) && table->s->tmp_table == NO_TMP_TABLE)
- {
- thd->wsrep_affected_rows++;
- if (wsrep_max_ws_rows &&
- thd->wsrep_exec_mode != REPL_RECV &&
- thd->wsrep_affected_rows > wsrep_max_ws_rows)
- {
- trans_rollback_stmt(thd) || trans_rollback(thd);
- my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0));
- return ER_ERROR_DURING_COMMIT;
- }
- }
#endif
if (!table->file->check_table_binlog_row_based(1))
@@ -6310,10 +6355,191 @@ int handler::ha_reset()
/* Reset information about pushed engine conditions */
cancel_pushed_idx_cond();
/* Reset information about pushed index conditions */
+ cancel_pushed_rowid_filter();
clear_top_table_fields();
DBUG_RETURN(reset());
}
+#ifdef WITH_WSREP
+static int wsrep_after_row(THD *thd)
+{
+ DBUG_ENTER("wsrep_after_row");
+ /* enforce wsrep_max_ws_rows */
+ thd->wsrep_affected_rows++;
+ if (wsrep_max_ws_rows &&
+ wsrep_thd_is_local(thd) &&
+ thd->wsrep_affected_rows > wsrep_max_ws_rows)
+ {
+ trans_rollback_stmt(thd) || trans_rollback(thd);
+ my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0));
+ DBUG_RETURN(ER_ERROR_DURING_COMMIT);
+ }
+ else if (wsrep_after_row(thd, false))
+ {
+ DBUG_RETURN(ER_LOCK_DEADLOCK);
+ }
+ DBUG_RETURN(0);
+}
+#endif /* WITH_WSREP */
+
+static int check_duplicate_long_entry_key(TABLE *table, handler *h,
+ uchar *new_rec, uint key_no)
+{
+ Field *hash_field;
+ int result, error= 0;
+ KEY *key_info= table->key_info + key_no;
+ hash_field= key_info->key_part->field;
+ uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL];
+
+ DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY &&
+ key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL)
+ || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL);
+
+ if (hash_field->is_real_null())
+ return 0;
+
+ key_copy(ptr, new_rec, key_info, key_info->key_length, false);
+
+ if (!table->check_unique_buf)
+ table->check_unique_buf= (uchar *)alloc_root(&table->mem_root,
+ table->s->reclength);
+
+ result= h->ha_index_init(key_no, 0);
+ if (result)
+ return result;
+ store_record(table, check_unique_buf);
+ result= h->ha_index_read_map(table->record[0],
+ ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
+ if (!result)
+ {
+ bool is_same;
+ Field * t_field;
+ Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr;
+ Item ** arguments= temp->arguments();
+ uint arg_count= temp->argument_count();
+ do
+ {
+ my_ptrdiff_t diff= table->check_unique_buf - new_rec;
+ is_same= true;
+ for (uint j=0; is_same && j < arg_count; j++)
+ {
+ DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM ||
+ // this one for left(fld_name,length)
+ arguments[j]->type() == Item::FUNC_ITEM);
+ if (arguments[j]->type() == Item::FIELD_ITEM)
+ {
+ t_field= static_cast<Item_field *>(arguments[j])->field;
+ if (t_field->cmp_offset(diff))
+ is_same= false;
+ }
+ else
+ {
+ Item_func_left *fnc= static_cast<Item_func_left *>(arguments[j]);
+ DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name()));
+ DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM);
+ t_field= static_cast<Item_field *>(fnc->arguments()[0])->field;
+ uint length= (uint)fnc->arguments()[1]->val_int();
+ if (t_field->cmp_max(t_field->ptr, t_field->ptr + diff, length))
+ is_same= false;
+ }
+ }
+ }
+ while (!is_same && !(result= h->ha_index_next_same(table->record[0],
+ ptr, key_info->key_length)));
+ if (is_same)
+ error= HA_ERR_FOUND_DUPP_KEY;
+ goto exit;
+ }
+ if (result != HA_ERR_KEY_NOT_FOUND)
+ error= result;
+exit:
+ if (error == HA_ERR_FOUND_DUPP_KEY)
+ {
+ table->file->errkey= key_no;
+ if (h->ha_table_flags() & HA_DUPLICATE_POS)
+ {
+ h->position(table->record[0]);
+ memcpy(table->file->dup_ref, h->ref, h->ref_length);
+ }
+ }
+ restore_record(table, check_unique_buf);
+ h->ha_index_end();
+ return error;
+}
+
+/** @brief
+ check whether inserted records breaks the
+ unique constraint on long columns.
+ @returns 0 if no duplicate else returns error
+ */
+static int check_duplicate_long_entries(TABLE *table, handler *h, uchar *new_rec)
+{
+ table->file->errkey= -1;
+ int result;
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH &&
+ (result= check_duplicate_long_entry_key(table, h, new_rec, i)))
+ return result;
+ }
+ return 0;
+}
+
+/** @brief
+ check whether updated records breaks the
+ unique constraint on long columns.
+ In the case of update we just need to check the specic key
+ reason for that is consider case
+ create table t1(a blob , b blob , x blob , y blob ,unique(a,b)
+ ,unique(x,y))
+ and update statement like this
+ update t1 set a=23+a; in this case if we try to scan for
+ whole keys in table then index scan on x_y will return 0
+ because data is same so in the case of update we take
+ key as a parameter in normal insert key should be -1
+ @returns 0 if no duplicate else returns error
+ */
+static int check_duplicate_long_entries_update(TABLE *table, handler *h, uchar *new_rec)
+{
+ Field *field;
+ uint key_parts;
+ int error= 0;
+ KEY *keyinfo;
+ KEY_PART_INFO *keypart;
+ /*
+ Here we are comparing whether new record and old record are same
+ with respect to fields in hash_str
+ */
+ uint reclength= (uint) (table->record[1] - table->record[0]);
+ table->clone_handler_for_update();
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ keyinfo= table->key_info + i;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ key_parts= fields_in_hash_keyinfo(keyinfo);
+ keypart= keyinfo->key_part - key_parts;
+ for (uint j= 0; j < key_parts; j++, keypart++)
+ {
+ field= keypart->field;
+ /* Compare fields if they are different then check for duplicates*/
+ if(field->cmp_binary_offset(reclength))
+ {
+ if((error= check_duplicate_long_entry_key(table, table->update_handler,
+ new_rec, i)))
+ goto exit;
+ /*
+ break because check_duplicate_long_entries_key will
+ take care of remaining fields
+ */
+ break;
+ }
+ }
+ }
+ }
+ exit:
+ return error;
+}
int handler::ha_write_row(uchar *buf)
{
@@ -6328,6 +6554,14 @@ int handler::ha_write_row(uchar *buf)
mark_trx_read_write();
increment_statistics(&SSV::ha_write_count);
+ if (table->s->long_unique_table)
+ {
+ if (this->inited == RND)
+ table->clone_handler_for_update();
+ handler *h= table->update_handler ? table->update_handler : table->file;
+ if ((error= check_duplicate_long_entries(table, h, buf)))
+ DBUG_RETURN(error);
+ }
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_WRITE_ROW, MAX_KEY, 0,
{ error= write_row(buf); })
@@ -6336,7 +6570,15 @@ int handler::ha_write_row(uchar *buf)
{
rows_changed++;
error= binlog_log_row(table, 0, buf, log_func);
+#ifdef WITH_WSREP
+ if (table_share->tmp_table == NO_TMP_TABLE &&
+ WSREP(ha_thd()) && (error= wsrep_after_row(ha_thd())))
+ {
+ DBUG_RETURN(error);
+ }
+#endif /* WITH_WSREP */
}
+
DEBUG_SYNC_C("ha_write_row_end");
DBUG_RETURN(error);
}
@@ -6359,6 +6601,11 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
increment_statistics(&SSV::ha_update_count);
+ if (table->s->long_unique_table &&
+ (error= check_duplicate_long_entries_update(table, table->file, (uchar *)new_data)))
+ {
+ return error;
+ }
TABLE_IO_WAIT(tracker, m_psi, PSI_TABLE_UPDATE_ROW, active_index, 0,
{ error= update_row(old_data, new_data);})
@@ -6368,6 +6615,13 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data)
{
rows_changed++;
error= binlog_log_row(table, old_data, new_data, log_func);
+#ifdef WITH_WSREP
+ if (table_share->tmp_table == NO_TMP_TABLE &&
+ WSREP(ha_thd()) && (error= wsrep_after_row(ha_thd())))
+ {
+ return error;
+ }
+#endif /* WITH_WSREP */
}
return error;
}
@@ -6423,6 +6677,13 @@ int handler::ha_delete_row(const uchar *buf)
{
rows_changed++;
error= binlog_log_row(table, buf, 0, log_func);
+#ifdef WITH_WSREP
+ if (table_share->tmp_table == NO_TMP_TABLE &&
+ WSREP(ha_thd()) && (error= wsrep_after_row(ha_thd())))
+ {
+ return error;
+ }
+#endif /* WITH_WSREP */
}
return error;
}
@@ -6612,7 +6873,7 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal)
DBUG_ENTER("ha_abort_transaction");
if (!WSREP(bf_thd) &&
!(bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU &&
- bf_thd->wsrep_exec_mode == TOTAL_ORDER)) {
+ wsrep_thd_is_toi(bf_thd))) {
DBUG_RETURN(0);
}
@@ -6628,54 +6889,6 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal)
DBUG_RETURN(0);
}
-
-void ha_fake_trx_id(THD *thd)
-{
- DBUG_ENTER("ha_fake_trx_id");
-
- bool no_fake_trx_id= true;
-
- if (!WSREP(thd))
- {
- DBUG_VOID_RETURN;
- }
-
- if (thd->wsrep_ws_handle.trx_id != WSREP_UNDEFINED_TRX_ID)
- {
- WSREP_DEBUG("fake trx id skipped: %" PRIu64, thd->wsrep_ws_handle.trx_id);
- DBUG_VOID_RETURN;
- }
-
- /* Try statement transaction if standard one is not set. */
- THD_TRANS *trans= (thd->transaction.all.ha_list) ? &thd->transaction.all :
- &thd->transaction.stmt;
-
- Ha_trx_info *ha_info= trans->ha_list, *ha_info_next;
-
- for (; ha_info; ha_info= ha_info_next)
- {
- handlerton *hton= ha_info->ht();
- if (hton->fake_trx_id)
- {
- hton->fake_trx_id(hton, thd);
-
- /* Got a fake trx id. */
- no_fake_trx_id= false;
-
- /*
- We need transaction ID from just one storage engine providing
- fake_trx_id (which will most likely be the case).
- */
- break;
- }
- ha_info_next= ha_info->next();
- }
-
- if (unlikely(no_fake_trx_id))
- WSREP_WARN("Cannot get fake transaction ID from storage engine.");
-
- DBUG_VOID_RETURN;
-}
#endif /* WITH_WSREP */
@@ -6963,6 +7176,10 @@ int del_global_index_stat(THD *thd, TABLE* table, KEY* key_info)
DBUG_RETURN(res);
}
+/*****************************************************************************
+ VERSIONING functions
+******************************************************************************/
+
bool Vers_parse_info::is_start(const char *name) const
{
DBUG_ASSERT(name);
@@ -7036,8 +7253,8 @@ bool Vers_parse_info::fix_implicit(THD *thd, Alter_info *alter_info)
alter_info->flags|= ALTER_PARSER_ADD_COLUMN;
- system_time= start_end_t(default_start, default_end);
- as_row= system_time;
+ period= start_end_t(default_start, default_end);
+ as_row= period;
if (vers_create_sys_field(thd, default_start, alter_info, VERS_SYS_START_FLAG) ||
vers_create_sys_field(thd, default_end, alter_info, VERS_SYS_END_FLAG))
@@ -7206,7 +7423,7 @@ bool Vers_parse_info::fix_alter_info(THD *thd, Alter_info *alter_info,
DBUG_ASSERT(end.str);
as_row= start_end_t(start, end);
- system_time= as_row;
+ period= as_row;
if (alter_info->create_list.elements)
{
@@ -7291,7 +7508,7 @@ Vers_parse_info::fix_create_like(Alter_info &alter_info, HA_CREATE_INFO &create_
}
as_row= start_end_t(f_start->field_name, f_end->field_name);
- system_time= as_row;
+ period= as_row;
create_info.options|= HA_VERSIONED_TABLE;
return false;
@@ -7316,14 +7533,14 @@ bool Vers_parse_info::check_conditions(const Lex_table_name &table_name,
return true;
}
- if (!system_time.start || !system_time.end)
+ if (!period.start || !period.end)
{
my_error(ER_MISSING, MYF(0), table_name.str, "PERIOD FOR SYSTEM_TIME");
return true;
}
- if (!as_row.start.streq(system_time.start) ||
- !as_row.end.streq(system_time.end))
+ if (!as_row.start.streq(period.start) ||
+ !as_row.end.streq(period.end))
{
my_error(ER_VERS_PERIOD_COLUMNS, MYF(0), as_row.start.str, as_row.end.str);
return true;
@@ -7412,3 +7629,107 @@ bool Vers_parse_info::check_sys_fields(const Lex_table_name &table_name,
"ROW END" : found_flag ? "ROW START" : "ROW START/END");
return true;
}
+
+bool Table_period_info::check_field(const Create_field* f,
+ const Lex_ident& f_name) const
+{
+ bool res= false;
+ if (!f)
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), f_name.str, name.str);
+ res= true;
+ }
+ else if (f->type_handler()->mysql_timestamp_type() != MYSQL_TIMESTAMP_DATE &&
+ f->type_handler()->mysql_timestamp_type() != MYSQL_TIMESTAMP_DATETIME)
+ {
+ my_error(ER_WRONG_FIELD_SPEC, MYF(0), f->field_name.str);
+ res= true;
+ }
+ else if (f->vcol_info || f->flags & VERS_SYSTEM_FIELD)
+ {
+ my_error(ER_PERIOD_FIELD_WRONG_ATTRIBUTES, MYF(0),
+ f->field_name.str, "GENERATED ALWAYS AS");
+ }
+
+ return res;
+}
+
+bool Table_scope_and_contents_source_st::check_fields(
+ THD *thd, Alter_info *alter_info, TABLE_LIST &create_table)
+{
+ return vers_check_system_fields(thd, alter_info, create_table)
+ || check_period_fields(thd, alter_info);
+}
+
+bool Table_scope_and_contents_source_st::check_period_fields(
+ THD *thd, Alter_info *alter_info)
+{
+ if (!period_info.name)
+ return false;
+
+ if (tmp_table())
+ {
+ my_error(ER_PERIOD_TEMPORARY_NOT_ALLOWED, MYF(0));
+ return true;
+ }
+
+ Table_period_info::start_end_t &period= period_info.period;
+ const Create_field *row_start= NULL;
+ const Create_field *row_end= NULL;
+ List_iterator<Create_field> it(alter_info->create_list);
+ while (const Create_field *f= it++)
+ {
+ if (period.start.streq(f->field_name)) row_start= f;
+ else if (period.end.streq(f->field_name)) row_end= f;
+
+ if (period_info.name.streq(f->field_name))
+ {
+ my_error(ER_DUP_FIELDNAME, MYF(0), f->field_name.str);
+ return true;
+ }
+ }
+
+ bool res= period_info.check_field(row_start, period.start.str)
+ || period_info.check_field(row_end, period.end.str);
+ if (res)
+ return true;
+
+ if (row_start->type_handler() != row_end->type_handler()
+ || row_start->length != row_end->length)
+ {
+ my_error(ER_PERIOD_TYPES_MISMATCH, MYF(0), period_info.name.str);
+ res= true;
+ }
+
+ return res;
+}
+
+bool
+Table_scope_and_contents_source_st::fix_create_fields(THD *thd,
+ Alter_info *alter_info,
+ const TABLE_LIST &create_table,
+ bool create_select)
+{
+ return vers_fix_system_fields(thd, alter_info, create_table, create_select)
+ || fix_period_fields(thd, alter_info);
+}
+
+bool
+Table_scope_and_contents_source_st::fix_period_fields(THD *thd,
+ Alter_info *alter_info)
+{
+ if (!period_info.name)
+ return false;
+
+ Table_period_info::start_end_t &period= period_info.period;
+ List_iterator<Create_field> it(alter_info->create_list);
+ while (Create_field *f= it++)
+ {
+ if (period.start.streq(f->field_name) || period.end.streq(f->field_name))
+ {
+ f->period= &period_info;
+ f->flags|= NOT_NULL_FLAG;
+ }
+ }
+ return false;
+}
diff --git a/sql/handler.h b/sql/handler.h
index 384166cf5c4..fb6862e4ce1 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -2,7 +2,7 @@
#define HANDLER_INCLUDED
/*
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2018, MariaDB
+ Copyright (c) 2009, 2019, MariaDB
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
@@ -43,10 +43,12 @@
#include <keycache.h>
#include <mysql/psi/mysql_table.h>
#include "sql_sequence.h"
+#include "mem_root_array.h"
class Alter_info;
class Virtual_column_info;
class sequence_definition;
+class Rowid_filter;
// the following is for checking tables
@@ -115,7 +117,13 @@ enum enum_alter_inplace_result {
#define HA_NO_BLOBS (1ULL << 9) /* Doesn't support blobs */
#define HA_CAN_INDEX_BLOBS (1ULL << 10)
#define HA_AUTO_PART_KEY (1ULL << 11) /* auto-increment in multi-part key */
-#define HA_REQUIRE_PRIMARY_KEY (1ULL << 12) /* .. and can't create a hidden one */
+/*
+ The engine requires every table to have a user-specified PRIMARY KEY.
+ Do not set the flag if the engine can generate a hidden primary key internally.
+ This flag is ignored if a SEQUENCE is created (which, in turn, needs
+ HA_CAN_TABLES_WITHOUT_ROLLBACK flag)
+*/
+#define HA_REQUIRE_PRIMARY_KEY (1ULL << 12)
#define HA_STATS_RECORDS_IS_EXACT (1ULL << 13) /* stats.records is exact */
/*
INSERT_DELAYED only works with handlers that uses MySQL internal table
@@ -299,10 +307,27 @@ enum enum_alter_inplace_result {
#define HA_CAN_MULTISTEP_MERGE (1LL << 53)
/* calling cmp_ref() on the engine is expensive */
-#define HA_CMP_REF_IS_EXPENSIVE (1ULL << 54)
+#define HA_SLOW_CMP_REF (1ULL << 54)
+#define HA_CMP_REF_IS_EXPENSIVE HA_SLOW_CMP_REF
+
+/**
+ Some engines are unable to provide an efficient implementation for rnd_pos().
+ Server will try to avoid it, if possible
+
+ TODO better to do it with cost estimates, not with an explicit flag
+*/
+#define HA_SLOW_RND_POS (1ULL << 55)
+
+/* Safe for online backup */
+#define HA_CAN_ONLINE_BACKUPS (1ULL << 56)
+
+/** whether every data field explicitly stores length
+(holds for InnoDB ROW_FORMAT=REDUNDANT) */
+#define HA_EXTENDED_TYPES_CONVERSION (1ULL << 57)
-/* Engine wants primary keys for everything except sequences */
-#define HA_WANTS_PRIMARY_KEY (1ULL << 55)
+/* Support native hash index */
+#define HA_CAN_HASH_KEYS (1ULL << 58)
+#define HA_LAST_TABLE_FLAG HA_CAN_HASH_KEYS
/* bits in index_flags(index_number) for what you can do with index */
#define HA_READ_NEXT 1 /* TODO really use this flag */
@@ -324,6 +349,8 @@ enum enum_alter_inplace_result {
*/
#define HA_CLUSTERED_INDEX 512
+#define HA_DO_RANGE_FILTER_PUSHDOWN 1024
+
/*
bits in alter_table_flags:
*/
@@ -607,6 +634,10 @@ typedef ulonglong alter_table_operations;
#define ALTER_KEYS_ONOFF (1ULL << 9)
// Set for FORCE, ENGINE(same engine), by mysql_recreate_table()
#define ALTER_RECREATE (1ULL << 10)
+// Set for CONVERT TO
+#define ALTER_CONVERT_TO (1ULL << 11)
+// Set for DROP ... ADD some_index
+#define ALTER_RENAME_INDEX (1ULL << 12)
// Set for ADD FOREIGN KEY
#define ALTER_ADD_FOREIGN_KEY (1ULL << 21)
// Set for DROP FOREIGN KEY
@@ -861,15 +892,6 @@ struct xid_t {
};
typedef struct xid_t XID;
-/*
- The size of XID string representation in the form
- 'gtrid', 'bqual', formatID
- see xid_t::get_sql_string() for details.
-*/
-#define SQL_XIDSIZE (XIDDATASIZE * 2 + 8 + MY_INT64_NUM_DECIMAL_DIGITS)
-/* The 'buf' has to have space for at least SQL_XIDSIZE bytes. */
-uint get_sql_xid(XID *xid, char *buf);
-
/* for recover() handlerton call */
#define MIN_XID_LIST_SIZE 128
#define MAX_XID_LIST_SIZE (1024*128)
@@ -978,6 +1000,7 @@ enum enum_schema_tables
SCH_KEY_CACHES,
SCH_KEY_COLUMN_USAGE,
SCH_OPEN_TABLES,
+ SCH_OPT_TRACE,
SCH_PARAMETERS,
SCH_PARTITIONS,
SCH_PLUGINS,
@@ -1196,6 +1219,8 @@ struct handler_iterator {
class handler;
class group_by_handler;
+class derived_handler;
+class select_handler;
struct Query;
typedef class st_select_lex SELECT_LEX;
typedef struct st_order ORDER;
@@ -1475,7 +1500,6 @@ struct handlerton
THD *victim_thd, my_bool signal);
int (*set_checkpoint)(handlerton *hton, const XID* xid);
int (*get_checkpoint)(handlerton *hton, XID* xid);
- void (*fake_trx_id)(handlerton *hton, THD *thd);
/*
Optional clauses in the CREATE/ALTER TABLE
*/
@@ -1515,6 +1539,21 @@ struct handlerton
*/
group_by_handler *(*create_group_by)(THD *thd, Query *query);
+ /*
+ Create and return a derived_handler if the storage engine can execute
+ the derived table 'derived', otherwise return NULL.
+ In a general case 'derived' may contain tables not from the engine.
+ If the engine cannot handle or does not want to handle such pushed derived
+ the function create_group_by has to return NULL.
+ */
+ derived_handler *(*create_derived)(THD *thd, TABLE_LIST *derived);
+
+ /*
+ Create and return a select_handler if the storage engine can execute
+ the select statement 'select, otherwise return NULL
+ */
+ select_handler *(*create_select) (THD *thd, SELECT_LEX *select);
+
/*********************************************************************
Table discovery API.
It allows the server to "discover" tables that exist in the storage
@@ -1612,6 +1651,10 @@ struct handlerton
@return transaction commit ID
@retval 0 if no system-versioned data was affected by the transaction */
ulonglong (*prepare_commit_versioned)(THD *thd, ulonglong *trx_id);
+
+ /* backup */
+ void (*prepare_for_backup)(void);
+ void (*end_backup)(void);
};
@@ -1668,6 +1711,9 @@ handlerton *ha_default_tmp_handlerton(THD *thd);
// Engine needs to access the main connect string in partitions
#define HTON_CAN_READ_CONNECT_STRING_IN_PARTITION (1 <<12)
+/* can be replicated by wsrep replication provider plugin */
+#define HTON_WSREP_REPLICATION (1 << 13)
+
class Ha_trx_info;
struct THD_TRANS
@@ -1919,57 +1965,68 @@ enum vers_sys_type_t
VERS_TRX_ID
};
-extern const LEX_CSTRING null_clex_str;
-
-struct Vers_parse_info
+struct Table_period_info: Sql_alloc
{
- Vers_parse_info() :
- check_unit(VERS_UNDEFINED),
- versioned_fields(false),
- unversioned_fields(false)
- {}
+ Table_period_info() :
+ create_if_not_exists(false),
+ constr(NULL) {}
+ Table_period_info(const char *name_arg, size_t size) :
+ name(name_arg, size),
+ create_if_not_exists(false),
+ constr(NULL) {}
- void init() // Deep initialization
- {
- system_time= start_end_t(null_clex_str, null_clex_str);
- as_row= start_end_t(null_clex_str, null_clex_str);
- check_unit= VERS_UNDEFINED;
- versioned_fields= false;
- unversioned_fields= false;
- }
+ Lex_ident name;
struct start_end_t
{
- start_end_t()
- {}
- start_end_t(LEX_CSTRING _start, LEX_CSTRING _end) :
+ start_end_t() {};
+ start_end_t(const LEX_CSTRING& _start, const LEX_CSTRING& _end) :
start(_start),
end(_end) {}
Lex_ident start;
Lex_ident end;
};
+ start_end_t period;
+ bool create_if_not_exists;
+ Virtual_column_info *constr;
- start_end_t system_time;
- start_end_t as_row;
- vers_sys_type_t check_unit;
+ bool is_set() const
+ {
+ DBUG_ASSERT(bool(period.start) == bool(period.end));
+ return period.start;
+ }
- void set_system_time(Lex_ident start, Lex_ident end)
+ void set_period(const Lex_ident& start, const Lex_ident& end)
{
- system_time.start= start;
- system_time.end= end;
+ period.start= start;
+ period.end= end;
}
+ bool check_field(const Create_field* f, const Lex_ident& f_name) const;
+};
+
+struct Vers_parse_info: public Table_period_info
+{
+ Vers_parse_info() :
+ Table_period_info(STRING_WITH_LEN("SYSTEM_TIME")),
+ check_unit(VERS_UNDEFINED),
+ versioned_fields(false),
+ unversioned_fields(false)
+ {}
+
+ Table_period_info::start_end_t as_row;
+ vers_sys_type_t check_unit;
protected:
friend struct Table_scope_and_contents_source_st;
void set_start(const LEX_CSTRING field_name)
{
as_row.start= field_name;
- system_time.start= field_name;
+ period.start= field_name;
}
void set_end(const LEX_CSTRING field_name)
{
as_row.end= field_name;
- system_time.end= field_name;
+ period.end= field_name;
}
bool is_start(const char *name) const;
bool is_end(const char *name) const;
@@ -1978,7 +2035,7 @@ protected:
bool fix_implicit(THD *thd, Alter_info *alter_info);
operator bool() const
{
- return as_row.start || as_row.end || system_time.start || system_time.end;
+ return as_row.start || as_row.end || period.start || period.end;
}
bool need_check(const Alter_info *alter_info) const;
bool check_conditions(const Lex_table_name &table_name,
@@ -2099,20 +2156,28 @@ struct Table_scope_and_contents_source_st:
public Table_scope_and_contents_source_pod_st
{
Vers_parse_info vers_info;
+ Table_period_info period_info;
void init()
{
Table_scope_and_contents_source_pod_st::init();
- vers_info.init();
+ vers_info= {};
+ period_info= {};
}
- bool vers_fix_system_fields(THD *thd, Alter_info *alter_info,
+ bool fix_create_fields(THD *thd, Alter_info *alter_info,
const TABLE_LIST &create_table,
bool create_select= false);
+ bool fix_period_fields(THD *thd, Alter_info *alter_info);
+ bool check_fields(THD *thd, Alter_info *alter_info, TABLE_LIST &create_table);
+ bool check_period_fields(THD *thd, Alter_info *alter_info);
+
+ bool vers_fix_system_fields(THD *thd, Alter_info *alter_info,
+ const TABLE_LIST &create_table,
+ bool create_select= false);
bool vers_check_system_fields(THD *thd, Alter_info *alter_info,
const TABLE_LIST &create_table);
-
};
@@ -2297,6 +2362,29 @@ public:
uint *index_add_buffer;
/**
+ Old and new index names. Used for index rename.
+ */
+ struct Rename_key_pair
+ {
+ Rename_key_pair(const KEY *old_key, const KEY *new_key)
+ : old_key(old_key), new_key(new_key)
+ {
+ }
+ const KEY *old_key;
+ const KEY *new_key;
+ };
+ /**
+ Vector of key pairs from DROP/ADD index which can be renamed.
+ */
+ typedef Mem_root_array<Rename_key_pair, true> Rename_keys_vector;
+
+ /**
+ A list of indexes which should be renamed.
+ Index definitions stays the same.
+ */
+ Rename_keys_vector rename_keys;
+
+ /**
Context information to allow handlers to keep context between in-place
alter API calls.
@@ -2357,23 +2445,7 @@ public:
Alter_info *alter_info_arg,
KEY *key_info_arg, uint key_count_arg,
partition_info *modified_part_info_arg,
- bool ignore_arg)
- : create_info(create_info_arg),
- alter_info(alter_info_arg),
- key_info_buffer(key_info_arg),
- key_count(key_count_arg),
- index_drop_count(0),
- index_drop_buffer(NULL),
- index_add_count(0),
- index_add_buffer(NULL),
- handler_ctx(NULL),
- group_commit_ctx(NULL),
- handler_flags(0),
- modified_part_info(modified_part_info_arg),
- ignore(ignore_arg),
- online(false),
- unsupported_reason(NULL)
- {}
+ bool ignore_arg);
~Alter_inplace_info()
{
@@ -2568,11 +2640,14 @@ typedef bool (*SKIP_INDEX_TUPLE_FUNC) (range_seq_t seq, range_id_t range_info);
class Cost_estimate
{
public:
- double io_count; /* number of I/O */
- double avg_io_cost; /* cost of an average I/O oper. */
- double cpu_cost; /* cost of operations in CPU */
- double import_cost; /* cost of remote operations */
- double mem_cost; /* cost of used memory */
+ double io_count; /* number of I/O to fetch records */
+ double avg_io_cost; /* cost of an average I/O oper. to fetch records */
+ double idx_io_count; /* number of I/O to read keys */
+ double idx_avg_io_cost; /* cost of an average I/O oper. to fetch records */
+ double cpu_cost; /* total cost of operations in CPU */
+ double idx_cpu_cost; /* cost of operations in CPU for index */
+ double import_cost; /* cost of remote operations */
+ double mem_cost; /* cost of used memory */
enum { IO_COEFF=1 };
enum { CPU_COEFF=1 };
@@ -2586,10 +2661,18 @@ public:
double total_cost()
{
- return IO_COEFF*io_count*avg_io_cost + CPU_COEFF * cpu_cost +
+ return IO_COEFF*io_count*avg_io_cost +
+ IO_COEFF*idx_io_count*idx_avg_io_cost +
+ CPU_COEFF*cpu_cost +
MEM_COEFF*mem_cost + IMPORT_COEFF*import_cost;
}
+ double index_only_cost()
+ {
+ return IO_COEFF*idx_io_count*idx_avg_io_cost +
+ CPU_COEFF*idx_cpu_cost;
+ }
+
/**
Whether or not all costs in the object are zero
@@ -2597,30 +2680,48 @@ public:
*/
bool is_zero() const
{
- return io_count == 0.0 && cpu_cost == 0.0 &&
+ return io_count == 0.0 && idx_io_count && cpu_cost == 0.0 &&
import_cost == 0.0 && mem_cost == 0.0;
}
void reset()
{
avg_io_cost= 1.0;
- io_count= cpu_cost= mem_cost= import_cost= 0.0;
+ idx_avg_io_cost= 1.0;
+ io_count= idx_io_count= cpu_cost= idx_cpu_cost= mem_cost= import_cost= 0.0;
}
void multiply(double m)
{
io_count *= m;
cpu_cost *= m;
+ idx_io_count *= m;
+ idx_cpu_cost *= m;
import_cost *= m;
/* Don't multiply mem_cost */
}
void add(const Cost_estimate* cost)
{
- double io_count_sum= io_count + cost->io_count;
- add_io(cost->io_count, cost->avg_io_cost);
- io_count= io_count_sum;
+ if (cost->io_count)
+ {
+ double io_count_sum= io_count + cost->io_count;
+ avg_io_cost= (io_count * avg_io_cost +
+ cost->io_count * cost->avg_io_cost)
+ /io_count_sum;
+ io_count= io_count_sum;
+ }
+ if (cost->idx_io_count)
+ {
+ double idx_io_count_sum= idx_io_count + cost->idx_io_count;
+ idx_avg_io_cost= (idx_io_count * idx_avg_io_cost +
+ cost->idx_io_count * cost->idx_avg_io_cost)
+ /idx_io_count_sum;
+ idx_io_count= idx_io_count_sum;
+ }
cpu_cost += cost->cpu_cost;
+ idx_cpu_cost += cost->idx_cpu_cost;
+ import_cost += cost->import_cost;
}
void add_io(double add_io_cnt, double add_avg_cost)
@@ -2797,6 +2898,9 @@ public:
extern "C" enum icp_result handler_index_cond_check(void* h_arg);
+extern "C" int handler_rowid_filter_check(void* h_arg);
+extern "C" int handler_rowid_filter_is_active(void* h_arg);
+
uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
/*
bitmap with first N+1 bits set
@@ -2959,10 +3063,16 @@ private:
Exec_time_tracker *tracker;
public:
void set_time_tracker(Exec_time_tracker *tracker_arg) { tracker=tracker_arg;}
+ Exec_time_tracker *get_time_tracker() { return tracker; }
Item *pushed_idx_cond;
uint pushed_idx_cond_keyno; /* The index which the above condition is for */
+ /* Rowid filter pushed into the engine */
+ Rowid_filter *pushed_rowid_filter;
+ /* true when the pushed rowid filter has been already filled */
+ bool rowid_filter_is_active;
+
Discrete_interval auto_inc_interval_for_cur_row;
/**
Number of reserved auto-increment intervals. Serves as a heuristic
@@ -3018,7 +3128,7 @@ public:
check_table_binlog_row_based_done(0),
check_table_binlog_row_based_result(0),
row_already_logged(0),
- in_range_check_pushed_down(FALSE),
+ in_range_check_pushed_down(FALSE), errkey(-1),
key_used_on_scan(MAX_KEY),
active_index(MAX_KEY), keyread(MAX_KEY),
ref_length(sizeof(my_off_t)),
@@ -3027,6 +3137,8 @@ public:
tracker(NULL),
pushed_idx_cond(NULL),
pushed_idx_cond_keyno(MAX_KEY),
+ pushed_rowid_filter(NULL),
+ rowid_filter_is_active(0),
auto_inc_intervals_count(0),
m_psi(NULL), set_top_table_fields(FALSE), top_table(0),
top_table_field(0), top_table_fields(0),
@@ -3115,7 +3227,11 @@ public:
/**
The cached_table_flags is set at ha_open and ha_external_lock
*/
- Table_flags ha_table_flags() const { return cached_table_flags; }
+ Table_flags ha_table_flags() const
+ {
+ DBUG_ASSERT(cached_table_flags < (HA_LAST_TABLE_FLAG << 1));
+ return cached_table_flags;
+ }
/**
These functions represent the public interface to *users* of the
handler class, hence they are *not* virtual. For the inheritance
@@ -3233,6 +3349,11 @@ public:
virtual double scan_time()
{ return ulonglong2double(stats.data_file_length) / IO_SIZE + 2; }
+ virtual double key_scan_time(uint index)
+ {
+ return keyread_time(index, 1, records());
+ }
+
/**
The cost of reading a set of ranges from the table using an index
to access it.
@@ -3260,7 +3381,7 @@ public:
/*
True if changes to the table is persistent (no rollback)
- This is manly used to decide how to log changes to the table in
+ This is mainly used to decide how to log changes to the table in
the binary log.
*/
bool has_transactions()
@@ -4055,6 +4176,14 @@ public:
in_range_check_pushed_down= false;
}
+ virtual void cancel_pushed_rowid_filter()
+ {
+ pushed_rowid_filter= NULL;
+ rowid_filter_is_active= false;
+ }
+
+ virtual bool rowid_filter_push(Rowid_filter *rowid_filter) { return true; }
+
/* Needed for partition / spider */
virtual TABLE_LIST *get_next_global_for_child() { return NULL; }
@@ -4674,6 +4803,9 @@ public:
{ DBUG_ASSERT(ht); return partition_ht()->flags & HTON_NATIVE_SYS_VERSIONING; }
virtual void update_partition(uint part_id)
{}
+
+ virtual bool is_clustering_key(uint index) { return false; }
+
protected:
Handler_share *get_ha_share_ptr();
void set_ha_share_ptr(Handler_share *arg_ha_share);
@@ -4752,6 +4884,8 @@ int ha_create_table(THD *thd, const char *path,
HA_CREATE_INFO *create_info, LEX_CUSTRING *frm);
int ha_delete_table(THD *thd, handlerton *db_type, const char *path,
const LEX_CSTRING *db, const LEX_CSTRING *alias, bool generate_warning);
+void ha_prepare_for_backup();
+void ha_end_backup();
/* statistics and info */
bool ha_show_status(THD *thd, handlerton *db_type, enum ha_stat_type stat);
@@ -4819,9 +4953,6 @@ int ha_savepoint(THD *thd, SAVEPOINT *sv);
int ha_release_savepoint(THD *thd, SAVEPOINT *sv);
#ifdef WITH_WSREP
int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal);
-void ha_fake_trx_id(THD *thd);
-#else
-inline void ha_fake_trx_id(THD *thd) { }
#endif
/* these are called by storage engines */
@@ -4837,7 +4968,6 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht);
const char *get_canonical_filename(handler *file, const char *path,
char *tmp_path);
-bool mysql_xa_recover(THD *thd);
void commit_checkpoint_notify_ha(handlerton *hton, void *cookie);
inline const LEX_CSTRING *table_case_name(HA_CREATE_INFO *info, const LEX_CSTRING *name)
diff --git a/sql/init.cc b/sql/init.cc
index 0d00e3cf846..024cbb79d81 100644
--- a/sql/init.cc
+++ b/sql/init.cc
@@ -24,7 +24,7 @@
#include "mariadb.h"
#include "sql_priv.h"
#include "init.h"
-#include "mysqld.h" // abort_loop, ...
+#include "mysqld.h"
#include "my_time.h" // my_init_time
#include "unireg.h" // SPECIAL_SAME_DB_NAME
#include <m_ctype.h>
@@ -34,8 +34,6 @@ void unireg_init(ulong options)
DBUG_ENTER("unireg_init");
error_handler_hook = my_message_stderr;
- abort_loop=0;
-
my_disable_async_io=1; /* aioread is only in shared library */
wild_many='%'; wild_one='_'; wild_prefix='\\'; /* Change to sql syntax */
diff --git a/sql/init.h b/sql/init.h
index e8dec0c1e2e..0bb67b293ed 100644
--- a/sql/init.h
+++ b/sql/init.h
@@ -17,6 +17,5 @@
#define INIT_INCLUDED
void unireg_init(ulong options);
-ATTRIBUTE_NORETURN void unireg_end(void);
#endif /* INIT_INCLUDED */
diff --git a/sql/innodb_priv.h b/sql/innodb_priv.h
index 7fbaa7cfc2f..b78724d04b0 100644
--- a/sql/innodb_priv.h
+++ b/sql/innodb_priv.h
@@ -19,15 +19,13 @@
/** @file Declaring server-internal functions that are used by InnoDB. */
#include <sql_priv.h>
+#include <strfunc.h> /* strconvert */
class THD;
int get_quote_char_for_identifier(THD *thd, const char *name, size_t length);
bool schema_table_store_record(THD *thd, TABLE *table);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
-uint strconvert(CHARSET_INFO *from_cs, const char *from, size_t from_length,
- CHARSET_INFO *to_cs, char *to, size_t to_length,
- uint *errors);
void sql_print_error(const char *format, ...);
diff --git a/sql/item.cc b/sql/item.cc
index a9139ceb0b1..72231497a22 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2010, 2018, MariaDB Corporation
+ Copyright (c) 2010, 2019, MariaDB Corporation
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -115,60 +115,31 @@ void Item::push_note_converted_to_positive_complement(THD *thd)
}
-longlong Item::val_datetime_packed_result()
+longlong Item::val_datetime_packed_result(THD *thd)
{
MYSQL_TIME ltime, tmp;
- if (get_date_result(&ltime, TIME_FUZZY_DATES | TIME_INVALID_DATES))
+ if (get_date_result(thd, &ltime, Datetime::Options_cmp(thd)))
return 0;
if (ltime.time_type != MYSQL_TIMESTAMP_TIME)
return pack_time(&ltime);
- if ((null_value= time_to_datetime_with_warn(current_thd, &ltime, &tmp, 0)))
+ if ((null_value= time_to_datetime_with_warn(thd, &ltime, &tmp,
+ TIME_CONV_NONE)))
return 0;
return pack_time(&tmp);
}
-/**
- Get date/time/datetime.
- If DATETIME or DATE result is returned, it's converted to TIME.
-*/
-bool Item::get_time_with_conversion(THD *thd, MYSQL_TIME *ltime,
- ulonglong fuzzydate)
+longlong Item::val_time_packed_result(THD *thd)
{
- if (get_date(ltime, fuzzydate))
- return true;
- if (ltime->time_type != MYSQL_TIMESTAMP_TIME)
- {
- MYSQL_TIME ltime2;
- if ((thd->variables.old_behavior & OLD_MODE_ZERO_DATE_TIME_CAST) &&
- (ltime->year || ltime->day || ltime->month))
- {
- /*
- Old mode conversion from DATETIME with non-zero YYYYMMDD part
- to TIME works very inconsistently. Possible variants:
- - truncate the YYYYMMDD part
- - add (MM*33+DD)*24 to hours
- - add (MM*31+DD)*24 to hours
- Let's return TRUE here, to disallow equal field propagation.
- Note, If we start to use this method in more pieces of the code other
- than equal field propagation, we should probably return
- TRUE only if some flag in fuzzydate is set.
- */
- return true;
- }
- if (datetime_to_time_with_warn(thd, ltime, &ltime2, TIME_SECOND_PART_DIGITS))
- {
- /*
- If the time difference between CURRENT_DATE and ltime
- did not fit into the supported TIME range, then we set the
- difference to the maximum possible value in the supported TIME range
- */
- DBUG_ASSERT(0);
- return (null_value= true);
- }
- *ltime= ltime2;
- }
- return false;
+ MYSQL_TIME ltime;
+ if (get_date_result(thd, &ltime, Time::Options_cmp(thd)))
+ return 0;
+ if (ltime.time_type == MYSQL_TIMESTAMP_TIME)
+ return pack_time(&ltime);
+ int warn= 0;
+ Time tmp(&warn, &ltime, 0);
+ DBUG_ASSERT(tmp.is_valid_time());
+ return tmp.to_packed();
}
@@ -253,36 +224,6 @@ String *Item::val_string_from_int(String *str)
}
-String *Item::val_string_from_decimal(String *str)
-{
- my_decimal dec_buf, *dec= val_decimal(&dec_buf);
- if (null_value)
- return 0;
- my_decimal_round(E_DEC_FATAL_ERROR, dec, decimals, FALSE, &dec_buf);
- my_decimal2string(E_DEC_FATAL_ERROR, &dec_buf, 0, 0, 0, str);
- return str;
-}
-
-
-/*
- All val_xxx_from_date() must call this method, to expose consistent behaviour
- regarding SQL_MODE when converting DATE/DATETIME to other data types.
-*/
-bool Item::get_temporal_with_sql_mode(MYSQL_TIME *ltime)
-{
- return get_date(ltime, field_type() == MYSQL_TYPE_TIME
- ? TIME_TIME_ONLY
- : sql_mode_for_dates(current_thd));
-}
-
-
-bool Item::is_null_from_temporal()
-{
- MYSQL_TIME ltime;
- return get_temporal_with_sql_mode(&ltime);
-}
-
-
longlong Item::val_int_from_str(int *error)
{
char buff[MAX_FIELD_WIDTH];
@@ -324,15 +265,6 @@ longlong Item::val_int_unsigned_typecast_from_str()
}
-longlong Item::val_int_unsigned_typecast_from_int()
-{
- longlong value= val_int();
- if (!null_value && unsigned_flag == 0 && value < 0)
- push_note_converted_to_positive_complement(current_thd);
- return value;
-}
-
-
longlong Item::val_int_signed_typecast_from_int()
{
longlong value= val_int();
@@ -342,18 +274,12 @@ longlong Item::val_int_signed_typecast_from_int()
}
-String *Item::val_string_from_date(String *str)
+longlong Item::val_int_unsigned_typecast_from_int()
{
- MYSQL_TIME ltime;
- if (get_temporal_with_sql_mode(&ltime) ||
- str->alloc(MAX_DATE_STRING_REP_LENGTH))
- {
- null_value= 1;
- return (String *) 0;
- }
- str->length(my_TIME_to_str(&ltime, const_cast<char*>(str->ptr()), decimals));
- str->set_charset(&my_charset_numeric);
- return str;
+ longlong value= val_int();
+ if (!null_value && unsigned_flag == 0 && value < 0)
+ push_note_converted_to_positive_complement(current_thd);
+ return value;
}
@@ -388,93 +314,10 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value)
}
-my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value)
-{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_temporal_with_sql_mode(&ltime))
- {
- my_decimal_set_zero(decimal_value);
- null_value= 1; // set NULL, stop processing
- return 0;
- }
- return date2my_decimal(&ltime, decimal_value);
-}
-
-
-my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value)
-{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_time(&ltime))
- {
- my_decimal_set_zero(decimal_value);
- return 0;
- }
- return date2my_decimal(&ltime, decimal_value);
-}
-
-
-longlong Item::val_int_from_date()
-{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_temporal_with_sql_mode(&ltime))
- return 0;
- longlong v= TIME_to_ulonglong(&ltime);
- return ltime.neg ? -v : v;
-}
-
-
-double Item::val_real_from_date()
-{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_temporal_with_sql_mode(&ltime))
- return 0;
- return TIME_to_double(&ltime);
-}
-
-
-double Item::val_real_from_decimal()
-{
- /* Note that fix_fields may not be called for Item_avg_field items */
- double result;
- my_decimal value_buff, *dec_val= val_decimal(&value_buff);
- if (null_value)
- return 0.0;
- my_decimal2double(E_DEC_FATAL_ERROR, dec_val, &result);
- return result;
-}
-
-
-longlong Item::val_int_from_decimal()
-{
- /* Note that fix_fields may not be called for Item_avg_field items */
- longlong result;
- my_decimal value, *dec_val= val_decimal(&value);
- if (null_value)
- return 0;
- my_decimal2int(E_DEC_FATAL_ERROR, dec_val, unsigned_flag, &result);
- return result;
-}
-
-
-longlong Item::val_int_unsigned_typecast_from_decimal()
-{
- longlong result;
- my_decimal tmp, *dec= val_decimal(&tmp);
- if (null_value)
- return 0;
- my_decimal2int(E_DEC_FATAL_ERROR, dec, 1, &result);
- return result;
-}
-
-
int Item::save_time_in_field(Field *field, bool no_conversions)
{
MYSQL_TIME ltime;
- if (get_time(&ltime))
+ if (get_time(field->table->in_use, &ltime))
return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
return field->store_time_dec(&ltime, decimals);
@@ -484,7 +327,8 @@ int Item::save_time_in_field(Field *field, bool no_conversions)
int Item::save_date_in_field(Field *field, bool no_conversions)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, sql_mode_for_dates(field->table->in_use)))
+ THD *thd= field->table->in_use;
+ if (get_date(thd, &ltime, Datetime::Options(thd)))
return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
return field->store_time_dec(&ltime, decimals);
@@ -524,11 +368,11 @@ int Item::save_str_value_in_field(Field *field, String *result)
Item::Item(THD *thd):
is_expensive_cache(-1), rsize(0), name(null_clex_str), orig_name(0),
- fixed(0), is_autogenerated_name(TRUE)
+ is_autogenerated_name(TRUE)
{
DBUG_ASSERT(thd);
marker= 0;
- maybe_null=null_value=with_sum_func=with_window_func=with_field=0;
+ maybe_null= null_value= with_window_func= with_field= false;
in_rollup= 0;
with_param= 0;
@@ -581,11 +425,9 @@ Item::Item(THD *thd, Item *item):
maybe_null(item->maybe_null),
in_rollup(item->in_rollup),
null_value(item->null_value),
- with_sum_func(item->with_sum_func),
with_param(item->with_param),
with_window_func(item->with_window_func),
with_field(item->with_field),
- fixed(item->fixed),
is_autogenerated_name(item->is_autogenerated_name)
{
next= thd->free_list; // Put in free list
@@ -655,7 +497,6 @@ void Item::cleanup()
{
DBUG_ENTER("Item::cleanup");
DBUG_PRINT("enter", ("this: %p", this));
- fixed= 0;
marker= 0;
join_tab_idx= MAX_TABLES;
if (orig_name)
@@ -675,7 +516,7 @@ void Item::cleanup()
bool Item::cleanup_processor(void *arg)
{
- if (fixed)
+ if (is_fixed())
cleanup();
return FALSE;
}
@@ -767,7 +608,8 @@ Item_ident::Item_ident(THD *thd, TABLE_LIST *view_arg,
:Item_result_field(thd), orig_db_name(NullS),
orig_table_name(view_arg->table_name.str),
orig_field_name(*field_name_arg),
- context(&view_arg->view->select_lex.context),
+ /* TODO: suspicious use of first_select_lex */
+ context(&view_arg->view->first_select_lex()->context),
db_name(NullS), table_name(view_arg->alias.str),
field_name(*field_name_arg),
alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX),
@@ -961,12 +803,15 @@ bool Item_field::register_field_in_read_map(void *arg)
{
TABLE *table= (TABLE *) arg;
int res= 0;
+ if (table && table != field->table)
+ return res;
+
if (field->vcol_info &&
- !bitmap_fast_test_and_set(field->table->vcol_set, field->field_index))
+ !bitmap_fast_test_and_set(field->table->read_set, field->field_index))
{
res= field->vcol_info->expr->walk(&Item::register_field_in_read_map,1,arg);
}
- if (field->table == table || !table)
+ else
bitmap_set_bit(field->table->read_set, field->field_index);
return res;
}
@@ -1194,7 +1039,7 @@ bool Item::check_type_scalar(const char *opname) const
This hack in Item_outer_ref should probably be refactored eventually.
Discuss with Sanja.
*/
- DBUG_ASSERT(fixed || type() == REF_ITEM);
+ DBUG_ASSERT(is_fixed() || type() == REF_ITEM);
const Type_handler *handler= type_handler();
if (handler->is_scalar_type())
return false;
@@ -1343,7 +1188,6 @@ Item *Item_cache::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
unlikely(!(cache= new (thd->mem_root) Item_cache_str(thd, conv))))
return NULL; // Safe conversion is not possible, or OEM
cache->setup(thd, conv);
- cache->fixed= false; // Make Item::fix_fields() happy
return cache;
}
@@ -1394,7 +1238,7 @@ Item *Item::const_charset_converter(THD *thd, CHARSET_INFO *tocs,
const char *func_name)
{
DBUG_ASSERT(const_item());
- DBUG_ASSERT(fixed);
+ DBUG_ASSERT(is_fixed());
StringBuffer<64>tmp;
String *s= val_str(&tmp);
MEM_ROOT *mem_root= thd->mem_root;
@@ -1462,116 +1306,35 @@ Item *Item_param::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
As a extra convenience the time structure is reset on error or NULL values!
*/
-bool Item::get_date_from_int(MYSQL_TIME *ltime, ulonglong fuzzydate)
-{
- longlong value= val_int();
- bool neg= !unsigned_flag && value < 0;
- if (null_value || int_to_datetime_with_warn(neg, neg ? -value : value,
- ltime, fuzzydate,
- field_table_or_null(),
- field_name_or_null()))
- return null_value|= make_zero_date(ltime, fuzzydate);
- return null_value= false;
-}
-
-
-bool Item::get_date_from_year(MYSQL_TIME *ltime, ulonglong fuzzydate)
-{
- longlong value= val_int();
- DBUG_ASSERT(unsigned_flag || value >= 0);
- if (max_length == 2)
- {
- if (value < 70)
- value+= 2000;
- else if (value <= 1900)
- value+= 1900;
- }
- value*= 10000; /* make it YYYYMMHH */
- if (null_value || int_to_datetime_with_warn(false, value,
- ltime, fuzzydate,
- field_table_or_null(),
- field_name_or_null()))
- return null_value|= make_zero_date(ltime, fuzzydate);
- return null_value= false;
-}
-
-
-bool Item::get_date_from_real(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item::get_date_from_int(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- double value= val_real();
- if (null_value || double_to_datetime_with_warn(value, ltime, fuzzydate,
+ Longlong_hybrid value(val_int(), unsigned_flag);
+ return null_value || int_to_datetime_with_warn(thd, value,
+ ltime, fuzzydate,
field_table_or_null(),
- field_name_or_null()))
- return null_value|= make_zero_date(ltime, fuzzydate);
- return null_value= false;
+ field_name_or_null());
}
-bool Item::get_date_from_decimal(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item::get_date_from_real(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- my_decimal value, *res;
- if (!(res= val_decimal(&value)) ||
- decimal_to_datetime_with_warn(res, ltime, fuzzydate,
- field_table_or_null(),
- field_name_or_null()))
- return null_value|= make_zero_date(ltime, fuzzydate);
- return null_value= false;
-}
-
-
-bool Item::get_date_from_string(MYSQL_TIME *ltime, ulonglong fuzzydate)
-{
- char buff[40];
- String tmp(buff,sizeof(buff), &my_charset_bin),*res;
- if (!(res=val_str(&tmp)) ||
- str_to_datetime_with_warn(res->charset(), res->ptr(), res->length(),
- ltime, fuzzydate))
- return null_value|= make_zero_date(ltime, fuzzydate);
- return null_value= false;
+ double value= val_real();
+ return null_value || double_to_datetime_with_warn(thd, value,
+ ltime, fuzzydate,
+ field_table_or_null(),
+ field_name_or_null());
}
-bool Item::make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item::get_date_from_string(THD *thd, MYSQL_TIME *to, date_mode_t mode)
{
- /*
- if the item was not null and convertion failed, we return a zero date
- if allowed, otherwise - null.
- */
- bzero((char*) ltime,sizeof(*ltime));
- if (fuzzydate & TIME_TIME_ONLY)
- {
- /*
- In the following scenario:
- - The caller expected to get a TIME value
- - Item returned a not NULL string or numeric value
- - But then conversion from string or number to TIME failed
- we need to change the default time_type from MYSQL_TIMESTAMP_DATE
- (which was set in bzero) to MYSQL_TIMESTAMP_TIME and therefore
- return TIME'00:00:00' rather than DATE'0000-00-00'.
- If we don't do this, methods like Item::get_time_with_conversion()
- will erroneously subtract CURRENT_DATE from '0000-00-00 00:00:00'
- and return TIME'-838:59:59' instead of TIME'00:00:00' as a result.
- */
- ltime->time_type= MYSQL_TIMESTAMP_TIME;
- }
- return !(fuzzydate & TIME_FUZZY_DATES);
+ StringBuffer<40> tmp;
+ Temporal::Warn_push warn(thd, field_table_or_null(), field_name_or_null(),
+ to, mode);
+ Temporal_hybrid *t= new(to) Temporal_hybrid(thd, &warn, val_str(&tmp), mode);
+ return !t->is_valid_temporal();
}
-bool Item::get_seconds(ulonglong *sec, ulong *sec_part)
-{
- if (decimals == 0)
- { // optimize for an important special case
- longlong val= val_int();
- bool neg= val < 0 && !unsigned_flag;
- *sec= neg ? -val : val;
- *sec_part= 0;
- return neg;
- }
- my_decimal tmp, *dec= val_decimal(&tmp);
- if (!dec)
- return 0;
- return my_decimal2seconds(dec, sec, sec_part);
-}
const MY_LOCALE *Item::locale_from_val_str()
{
@@ -1715,7 +1478,7 @@ Query_fragment::Query_fragment(THD *thd, sp_head *sphead,
*****************************************************************************/
Item_sp_variable::Item_sp_variable(THD *thd, const LEX_CSTRING *sp_var_name)
- :Item(thd), m_thd(0), m_name(*sp_var_name)
+ :Item_fixed_hybrid(thd), m_thd(0), m_name(*sp_var_name)
#ifndef DBUG_OFF
, m_sp(0)
#endif
@@ -1727,7 +1490,7 @@ bool Item_sp_variable::fix_fields_from_item(THD *thd, Item **, const Item *it)
{
m_thd= thd; /* NOTE: this must be set before any this_xxx() */
- DBUG_ASSERT(it->fixed);
+ DBUG_ASSERT(it->is_fixed());
max_length= it->max_length;
decimals= it->decimals;
@@ -1798,6 +1561,12 @@ String *Item_sp_variable::val_str(String *sp)
}
+bool Item_sp_variable::val_native(THD *thd, Native *to)
+{
+ return val_native_from_item(thd, this_item(), to);
+}
+
+
my_decimal *Item_sp_variable::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed);
@@ -1808,11 +1577,11 @@ my_decimal *Item_sp_variable::val_decimal(my_decimal *decimal_value)
}
-bool Item_sp_variable::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_sp_variable::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed);
Item *it= this_item();
- bool val= it->get_date(ltime, fuzzydate);
+ bool val= it->get_date(thd, ltime, fuzzydate);
null_value= it->null_value;
return val;
}
@@ -1848,10 +1617,10 @@ Item_splocal::Item_splocal(THD *thd,
Rewritable_query_parameter(pos_in_q, len_in_q),
Type_handler_hybrid_field_type(handler),
m_rcontext_handler(rh),
- m_var_idx(sp_var_idx)
+ m_var_idx(sp_var_idx),
+ m_type(handler == &type_handler_row ? ROW_ITEM : CONST_ITEM)
{
maybe_null= TRUE;
- m_type= sp_map_item_type(handler);
}
@@ -2189,10 +1958,10 @@ my_decimal *Item_name_const::val_decimal(my_decimal *decimal_value)
return val;
}
-bool Item_name_const::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_name_const::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed);
- bool rc= value_item->get_date(ltime, fuzzydate);
+ bool rc= value_item->get_date(thd, ltime, fuzzydate);
null_value= value_item->null_value;
return rc;
}
@@ -2204,58 +1973,30 @@ bool Item_name_const::is_null()
Item_name_const::Item_name_const(THD *thd, Item *name_arg, Item *val):
- Item(thd), value_item(val), name_item(name_arg)
+ Item_fixed_hybrid(thd), value_item(val), name_item(name_arg)
{
StringBuffer<128> name_buffer;
String *name_str;
Item::maybe_null= TRUE;
- valid_args= true;
- if (!name_item->basic_const_item() ||
- !(name_str= name_item->val_str(&name_buffer))) // Can't have a NULL name
- goto err;
- set_name(thd, name_str->ptr(), name_str->length(), name_str->charset());
-
- if (value_item->basic_const_item())
- return; // ok
-
- if (value_item->type() == FUNC_ITEM)
- {
- Item_func *value_func= (Item_func *) value_item;
- if (value_func->functype() != Item_func::COLLATE_FUNC &&
- value_func->functype() != Item_func::NEG_FUNC)
- goto err;
-
- if (value_func->key_item()->basic_const_item())
- return; // ok
- }
-
-err:
- valid_args= false;
- my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST");
+ if (name_item->basic_const_item() &&
+ (name_str= name_item->val_str(&name_buffer))) // Can't have a NULL name
+ set_name(thd, name_str->ptr(), name_str->length(), name_str->charset());
}
Item::Type Item_name_const::type() const
{
/*
- As
- 1. one can try to create the Item_name_const passing non-constant
- arguments, although it's incorrect and
- 2. the type() method can be called before the fix_fields() to get
- type information for a further type cast, e.g.
- if (item->type() == FIELD_ITEM)
- ((Item_field *) item)->...
- we return NULL_ITEM in the case to avoid wrong casting.
-
- valid_args guarantees value_item->basic_const_item(); if type is
- FUNC_ITEM, then we have a fudged item_func_neg() on our hands
- and return the underlying type.
+
+ We are guarenteed that value_item->basic_const_item(), if not
+ an error is thrown that WRONG ARGUMENTS are supplied to
+ NAME_CONST function.
+ If type is FUNC_ITEM, then we have a fudged item_func_neg()
+ on our hands and return the underlying type.
For Item_func_set_collation()
e.g. NAME_CONST('name', 'value' COLLATE collation) we return its
'value' argument type.
*/
- if (!valid_args)
- return NULL_ITEM;
Item::Type value_type= value_item->type();
if (value_type == FUNC_ITEM)
{
@@ -2275,10 +2016,8 @@ Item::Type Item_name_const::type() const
bool Item_name_const::fix_fields(THD *thd, Item **ref)
{
- if ((!value_item->fixed &&
- value_item->fix_fields(thd, &value_item)) ||
- (!name_item->fixed &&
- name_item->fix_fields(thd, &name_item)) ||
+ if (value_item->fix_fields_if_needed(thd, &value_item) ||
+ name_item->fix_fields_if_needed(thd, &name_item) ||
!value_item->const_item() ||
!name_item->const_item())
{
@@ -2389,7 +2128,7 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
else
{
/* Not a SUM() function */
- if (unlikely((!with_sum_func && !(split_flags & SPLIT_SUM_SELECT))))
+ if (unlikely((!with_sum_func() && !(split_flags & SPLIT_SUM_SELECT))))
{
/*
This is not a SUM function and there are no SUM functions inside.
@@ -2397,7 +2136,7 @@ void Item::split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
*/
return;
}
- if (likely(with_sum_func ||
+ if (likely(with_sum_func() ||
(type() == FUNC_ITEM &&
(((Item_func *) this)->functype() ==
Item_func::ISNOTNULLTEST_FUNC ||
@@ -2771,7 +2510,7 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
else
thd->change_item_tree(arg, conv);
- if (conv->fix_fields(thd, arg))
+ if (conv->fix_fields_if_needed(thd, arg))
{
res= TRUE;
break; // we cannot return here, we need to restore "arena".
@@ -2920,7 +2659,7 @@ bool Item_sp::execute(THD *thd, bool *null_value, Item **args, uint arg_count)
if (unlikely(execute_impl(thd, args, arg_count)))
{
*null_value= 1;
- context->process_error(thd);
+ process_error(thd);
if (thd->killed)
thd->send_kill_message();
return true;
@@ -2953,7 +2692,7 @@ Item_sp::execute_impl(THD *thd, Item **args, uint arg_count)
DBUG_ENTER("Item_sp::execute_impl");
- if (context->security_ctx)
+ if (context && context->security_ctx)
{
/* Set view definer security context */
thd->security_ctx= context->security_ctx;
@@ -3112,18 +2851,6 @@ Item* Item_ref::build_clone(THD *thd)
}
-void Item_ident_for_show::make_send_field(THD *thd, Send_field *tmp_field)
-{
- tmp_field->table_name= tmp_field->org_table_name= table_name;
- tmp_field->db_name= db_name;
- tmp_field->col_name= tmp_field->org_col_name= field->field_name;
- tmp_field->length=field->field_length;
- tmp_field->type=field->type();
- tmp_field->flags= field->table->maybe_null ?
- (field->flags & ~NOT_NULL_FLAG) : field->flags;
- tmp_field->decimals= field->decimals();
-}
-
/**********************************************/
Item_field::Item_field(THD *thd, Field *f)
@@ -3132,7 +2859,10 @@ Item_field::Item_field(THD *thd, Field *f)
have_privileges(0), any_privileges(0)
{
set_field(f);
-
+ /*
+ field_name and table_name should not point to garbage
+ if this item is to be reused
+ */
orig_table_name= table_name;
orig_field_name= field_name;
with_field= 1;
@@ -3449,7 +3179,7 @@ String *Item_field::str_result(String *str)
return result_field->val_str(str,&str_value);
}
-bool Item_field::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Item_field::get_date(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate)
{
if ((null_value=field->is_null()) || field->get_date(ltime,fuzzydate))
{
@@ -3459,7 +3189,7 @@ bool Item_field::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
return 0;
}
-bool Item_field::get_date_result(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_field::get_date_result(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (result_field->is_null() || result_field->get_date(ltime,fuzzydate))
{
@@ -3470,6 +3200,18 @@ bool Item_field::get_date_result(MYSQL_TIME *ltime, ulonglong fuzzydate)
}
+bool Item_field::val_native(THD *thd, Native *to)
+{
+ return val_native_from_field(field, to);
+}
+
+
+bool Item_field::val_native_result(THD *thd, Native *to)
+{
+ return val_native_from_field(result_field, to);
+}
+
+
void Item_field::save_result(Field *to)
{
save_field_in_field(result_field, &null_value, to, TRUE);
@@ -3668,6 +3410,48 @@ longlong Item_field::val_int_endpoint(bool left_endp, bool *incl_endp)
return null_value? LONGLONG_MIN : res;
}
+
+bool Item_basic_value::eq(const Item *item, bool binary_cmp) const
+{
+ const Item_const *c0, *c1;
+ const Type_handler *h0, *h1;
+ /*
+ - Test get_item_const() for NULL filters out Item_param
+ bound in a way that needs a data type conversion
+ (e.g. non-integer value in a LIMIT clause).
+ Item_param::get_item_const() return NULL in such cases.
+ - Test for type_handler_for_comparison() equality makes sure
+ that values of different data type groups do not get detected
+ as equal (e.g. numbers vs strings, time vs datetime).
+ - Test for cast_to_int_type_handler() equality distinguishes
+ values with dual properties. For example, VARCHAR 'abc' and hex
+ hybrid 0x616263 are equal in string context, but they are not equal
+ if the hybrid appears in integer context (it behaves as integer then).
+ Here we have no full information about the context, so treat them
+ as not equal.
+ QQ: We could pass Value_source::Context here instead of
+ "bool binary_cmp", to make substitution more delicate.
+ See Field::get_equal_const_item().
+ */
+ bool res= (c0= get_item_const()) &&
+ (c1= item->get_item_const()) &&
+ (h0= type_handler())->type_handler_for_comparison() ==
+ (h1= item->type_handler())->type_handler_for_comparison() &&
+ h0->cast_to_int_type_handler()->type_handler_for_comparison() ==
+ h1->cast_to_int_type_handler()->type_handler_for_comparison() &&
+ h0->Item_const_eq(c0, c1, binary_cmp);
+ DBUG_EXECUTE_IF("Item_basic_value",
+ push_warning_printf(current_thd,
+ Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "%seq=%d a=%s b=%s",
+ binary_cmp ? "bin_" : "", (int) res,
+ DbugStringItemTypeValue(current_thd, this).c_ptr(),
+ DbugStringItemTypeValue(current_thd, item).c_ptr()
+ ););
+ return res;
+}
+
+
/**
Create an item from a string we KNOW points to a valid longlong
end \\0 terminated number string.
@@ -3687,7 +3471,6 @@ Item_int::Item_int(THD *thd, const char *str_arg, size_t length):
the field name.
*/
name.length= !str_arg[max_length] ? max_length : strlen(str_arg);
- fixed= 1;
}
@@ -3699,8 +3482,6 @@ my_decimal *Item_int::val_decimal(my_decimal *decimal_value)
String *Item_int::val_str(String *str)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
str->set_int(value, unsigned_flag, collation.collation);
return str;
}
@@ -3737,8 +3518,6 @@ Item_uint::Item_uint(THD *thd, const char *str_arg, longlong i, uint length):
String *Item_uint::val_str(String *str)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
str->set((ulonglong) value, collation.collation);
return str;
}
@@ -3760,7 +3539,6 @@ Item_decimal::Item_decimal(THD *thd, const char *str_arg, size_t length,
name.str= str_arg;
name.length= safe_strlen(str_arg);
decimals= (uint8) decimal_value.frac;
- fixed= 1;
max_length= my_decimal_precision_to_length_no_truncation(decimal_value.intg +
decimals,
decimals,
@@ -3772,7 +3550,6 @@ Item_decimal::Item_decimal(THD *thd, longlong val, bool unsig):
{
int2my_decimal(E_DEC_FATAL_ERROR, val, unsig, &decimal_value);
decimals= (uint8) decimal_value.frac;
- fixed= 1;
max_length= my_decimal_precision_to_length_no_truncation(decimal_value.intg +
decimals,
decimals,
@@ -3785,7 +3562,6 @@ Item_decimal::Item_decimal(THD *thd, double val, int precision, int scale):
{
double2my_decimal(E_DEC_FATAL_ERROR, val, &decimal_value);
decimals= (uint8) decimal_value.frac;
- fixed= 1;
max_length= my_decimal_precision_to_length_no_truncation(decimal_value.intg +
decimals,
decimals,
@@ -3802,16 +3578,14 @@ Item_decimal::Item_decimal(THD *thd, const char *str, const my_decimal *val_arg,
name.length= safe_strlen(str);
decimals= (uint8) decimal_par;
max_length= length;
- fixed= 1;
}
-Item_decimal::Item_decimal(THD *thd, my_decimal *value_par):
+Item_decimal::Item_decimal(THD *thd, const my_decimal *value_par):
Item_num(thd)
{
my_decimal2decimal(value_par, &decimal_value);
decimals= (uint8) decimal_value.frac;
- fixed= 1;
max_length= my_decimal_precision_to_length_no_truncation(decimal_value.intg +
decimals,
decimals,
@@ -3820,63 +3594,15 @@ Item_decimal::Item_decimal(THD *thd, my_decimal *value_par):
Item_decimal::Item_decimal(THD *thd, const uchar *bin, int precision, int scale):
- Item_num(thd)
+ Item_num(thd),
+ decimal_value(bin, precision, scale)
{
- binary2my_decimal(E_DEC_FATAL_ERROR, bin,
- &decimal_value, precision, scale);
decimals= (uint8) decimal_value.frac;
- fixed= 1;
max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
unsigned_flag);
}
-longlong Item_decimal::val_int()
-{
- longlong result;
- my_decimal2int(E_DEC_FATAL_ERROR, &decimal_value, unsigned_flag, &result);
- return result;
-}
-
-double Item_decimal::val_real()
-{
- double result;
- my_decimal2double(E_DEC_FATAL_ERROR, &decimal_value, &result);
- return result;
-}
-
-String *Item_decimal::val_str(String *result)
-{
- result->set_charset(&my_charset_numeric);
- my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, 0, 0, 0, result);
- return result;
-}
-
-void Item_decimal::print(String *str, enum_query_type query_type)
-{
- my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, 0, 0, 0, &str_value);
- str->append(str_value);
-}
-
-
-bool Item_decimal::eq(const Item *item, bool binary_cmp) const
-{
- if (type() == item->type() && item->basic_const_item())
- {
- /*
- We need to cast off const to call val_decimal(). This should
- be OK for a basic constant. Additionally, we can pass 0 as
- a true decimal constant will return its internal decimal
- storage and ignore the argument.
- */
- Item *arg= (Item*) item;
- my_decimal *value= arg->val_decimal(0);
- return !my_decimal_cmp(&decimal_value, value);
- }
- return 0;
-}
-
-
void Item_decimal::set_decimal_value(my_decimal *value_par)
{
my_decimal2decimal(value_par, &decimal_value);
@@ -3898,8 +3624,6 @@ Item *Item_decimal::clone_item(THD *thd)
String *Item_float::val_str(String *str)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
str->set_real(value, decimals, &my_charset_numeric);
return str;
}
@@ -3907,8 +3631,6 @@ String *Item_float::val_str(String *str)
my_decimal *Item_float::val_decimal(my_decimal *decimal_value)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
double2my_decimal(E_DEC_FATAL_ERROR, value, decimal_value);
return (decimal_value);
}
@@ -3969,7 +3691,6 @@ void Item_string::print(String *str, enum_query_type query_type)
double Item_string::val_real()
{
- DBUG_ASSERT(fixed == 1);
return double_from_string_with_check(&str_value);
}
@@ -3980,7 +3701,6 @@ double Item_string::val_real()
*/
longlong Item_string::val_int()
{
- DBUG_ASSERT(fixed == 1);
return longlong_from_string_with_check(&str_value);
}
@@ -3993,23 +3713,17 @@ my_decimal *Item_string::val_decimal(my_decimal *decimal_value)
double Item_null::val_real()
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
null_value=1;
return 0.0;
}
longlong Item_null::val_int()
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
null_value=1;
return 0;
}
/* ARGSUSED */
String *Item_null::val_str(String *str)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
null_value=1;
return 0;
}
@@ -4020,11 +3734,9 @@ my_decimal *Item_null::val_decimal(my_decimal *decimal_value)
}
-bool Item_null::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_null::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
- make_zero_date(ltime, fuzzydate);
+ set_zero_time(ltime, MYSQL_TIMESTAMP_NONE);
return (null_value= true);
}
@@ -4073,8 +3785,6 @@ Item_param::Item_param(THD *thd, const LEX_CSTRING *name_arg,
*/
Type_handler_hybrid_field_type(&type_handler_null),
state(NO_VALUE),
- /* Don't pretend to be a literal unless value for this item is set. */
- item_type(PARAM_ITEM),
m_empty_string_is_null(false),
indicator(STMT_INDICATOR_NONE),
m_out_param_info(NULL),
@@ -4133,7 +3843,6 @@ void Item_param::sync_clones()
c->Type_geometry_attributes::operator=(*this);
c->state= state;
- c->item_type= item_type;
c->m_empty_string_is_null= m_empty_string_is_null;
c->value.PValue_simple::operator=(value);
@@ -4168,7 +3877,6 @@ void Item_param::set_null()
max_length= 0;
decimals= 0;
state= NULL_VALUE;
- fix_type(Item::NULL_ITEM);
DBUG_VOID_RETURN;
}
@@ -4183,7 +3891,6 @@ void Item_param::set_int(longlong i, uint32 max_length_arg)
decimals= 0;
maybe_null= 0;
null_value= 0;
- fix_type(Item::INT_ITEM);
DBUG_VOID_RETURN;
}
@@ -4198,7 +3905,6 @@ void Item_param::set_double(double d)
decimals= NOT_FIXED_DEC;
maybe_null= 0;
null_value= 0;
- fix_type(Item::REAL_ITEM);
DBUG_VOID_RETURN;
}
@@ -4231,7 +3937,6 @@ void Item_param::set_decimal(const char *str, ulong length)
decimals, unsigned_flag);
maybe_null= 0;
null_value= 0;
- fix_type(Item::DECIMAL_ITEM);
DBUG_VOID_RETURN;
}
@@ -4249,7 +3954,6 @@ void Item_param::set_decimal(const my_decimal *dv, bool unsigned_arg)
decimals, unsigned_flag);
maybe_null= 0;
null_value= 0;
- fix_type(Item::DECIMAL_ITEM);
}
@@ -4261,7 +3965,6 @@ void Item_param::fix_temporal(uint32 max_length_arg, uint decimals_arg)
decimals= decimals_arg;
maybe_null= 0;
null_value= 0;
- fix_type(Item::DATE_ITEM);
}
@@ -4346,7 +4049,6 @@ bool Item_param::set_str(const char *str, ulong length,
null_value= 0;
/* max_length and decimals are set after charset conversion */
/* sic: str may be not null-terminated, don't add DBUG_PRINT here */
- fix_type(Item::STRING_ITEM);
DBUG_RETURN(FALSE);
}
@@ -4380,7 +4082,6 @@ bool Item_param::set_longdata(const char *str, ulong length)
state= LONG_DATA_VALUE;
maybe_null= 0;
null_value= 0;
- fix_type(Item::STRING_ITEM);
DBUG_RETURN(FALSE);
}
@@ -4444,7 +4145,7 @@ bool Item_param::set_from_item(THD *thd, Item *item)
}
}
struct st_value tmp;
- if (!item->save_in_value(&tmp))
+ if (!item->save_in_value(thd, &tmp))
{
const Type_handler *h= item->type_handler();
set_handler(h);
@@ -4482,16 +4183,6 @@ void Item_param::reset()
state= NO_VALUE;
maybe_null= 1;
null_value= 0;
- fixed= false;
- /*
- Don't reset item_type to PARAM_ITEM: it's only needed to guard
- us from item optimizations at prepare stage, when item doesn't yet
- contain a literal of some kind.
- In all other cases when this object is accessed its value is
- set (this assumption is guarded by 'state' and
- DBUG_ASSERTS(state != NO_VALUE) in all Item_param::get_*
- methods).
- */
DBUG_VOID_RETURN;
}
@@ -4557,7 +4248,7 @@ void Item_param::invalid_default_param() const
}
-bool Item_param::get_date(MYSQL_TIME *res, ulonglong fuzzydate)
+bool Item_param::get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate)
{
/*
LIMIT clause parameter should not call get_date()
@@ -4571,7 +4262,7 @@ bool Item_param::get_date(MYSQL_TIME *res, ulonglong fuzzydate)
*res= value.time;
return 0;
}
- return type_handler()->Item_get_date(this, res, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, res, fuzzydate);
}
@@ -4583,11 +4274,7 @@ double Item_param::PValue::val_real() const
case INT_RESULT:
return (double) integer;
case DECIMAL_RESULT:
- {
- double result;
- my_decimal2double(E_DEC_FATAL_ERROR, &m_decimal, &result);
- return result;
- }
+ return m_decimal.to_double();
case STRING_RESULT:
return double_from_string_with_check(&m_string);
case TIME_RESULT:
@@ -4612,11 +4299,7 @@ longlong Item_param::PValue::val_int(const Type_std_attributes *attr) const
case INT_RESULT:
return integer;
case DECIMAL_RESULT:
- {
- longlong i;
- my_decimal2int(E_DEC_FATAL_ERROR, &m_decimal, attr->unsigned_flag, &i);
- return i;
- }
+ return m_decimal.to_longlong(attr->unsigned_flag);
case STRING_RESULT:
return longlong_from_string_with_check(&m_string);
case TIME_RESULT:
@@ -4666,7 +4349,7 @@ String *Item_param::PValue::val_str(String *str,
str->set(integer, &my_charset_bin);
return str;
case DECIMAL_RESULT:
- if (my_decimal2string(E_DEC_FATAL_ERROR, &m_decimal, 0, 0, 0, str) <= 1)
+ if (m_decimal.to_string_native(str, 0, 0, 0) <= 1)
return str;
return NULL;
case TIME_RESULT:
@@ -4707,8 +4390,7 @@ const String *Item_param::value_query_val_str(THD *thd, String *str) const
str->set_real(value.real, NOT_FIXED_DEC, &my_charset_bin);
return str;
case DECIMAL_RESULT:
- if (my_decimal2string(E_DEC_FATAL_ERROR, &value.m_decimal,
- 0, 0, 0, str) > 1)
+ if (value.m_decimal.to_string_native(str, 0, 0, 0) > 1)
return &my_null_string;
return str;
case TIME_RESULT:
@@ -4813,11 +4495,20 @@ bool Item_param::convert_str_value(THD *thd)
bool Item_param::basic_const_item() const
{
- DBUG_ASSERT(fixed || state == NO_VALUE);
- if (state == NO_VALUE ||
- (state == SHORT_DATA_VALUE && type_handler()->cmp_type() == TIME_RESULT))
- return FALSE;
- return TRUE;
+ switch (state) {
+ case LONG_DATA_VALUE:
+ case NULL_VALUE:
+ return true;
+ case SHORT_DATA_VALUE:
+ return type_handler()->cmp_type() != TIME_RESULT;
+ case DEFAULT_VALUE:
+ case IGNORE_VALUE:
+ invalid_default_param();
+ return false;
+ case NO_VALUE:
+ break;
+ }
+ return false;
}
@@ -4878,48 +4569,6 @@ Item_param::clone_item(THD *thd)
}
-bool Item_param::value_eq(const Item *item, bool binary_cmp) const
-{
- switch (value.type_handler()->cmp_type()) {
- case INT_RESULT:
- return int_eq(value.integer, item);
- case REAL_RESULT:
- return real_eq(value.real, item);
- case STRING_RESULT:
- return str_eq(&value.m_string, item, binary_cmp);
- case DECIMAL_RESULT:
- case TIME_RESULT:
- case ROW_RESULT:
- break;
- }
- return false;
-}
-
-
-bool
-Item_param::eq(const Item *item, bool binary_cmp) const
-{
- if (!basic_const_item())
- return FALSE;
-
- // There's no "default". See comments in Item_param::save_in_field().
- switch (state) {
- case IGNORE_VALUE:
- case DEFAULT_VALUE:
- invalid_default_param();
- return false;
- case NULL_VALUE:
- return null_eq(item);
- case SHORT_DATA_VALUE:
- case LONG_DATA_VALUE:
- return value_eq(item, binary_cmp);
- case NO_VALUE:
- return false;
- }
- DBUG_ASSERT(0); // Garbage
- return FALSE;
-}
-
/* End of Item_param related */
void Item_param::print(String *str, enum_query_type query_type)
@@ -4973,12 +4622,10 @@ Item_param::set_param_type_and_swap_value(Item_param *src)
{
Type_std_attributes::set(src);
set_handler(src->type_handler());
- item_type= src->item_type;
maybe_null= src->maybe_null;
null_value= src->null_value;
state= src->state;
- fixed= src->fixed;
value.swap(src->value);
}
@@ -4988,7 +4635,6 @@ void Item_param::set_default()
{
m_is_settable_routine_parameter= false;
state= DEFAULT_VALUE;
- fixed= true;
/*
When Item_param is set to DEFAULT_VALUE:
- its val_str() and val_decimal() return NULL
@@ -5004,7 +4650,6 @@ void Item_param::set_ignore()
{
m_is_settable_routine_parameter= false;
state= IGNORE_VALUE;
- fixed= true;
null_value= true;
}
@@ -5033,7 +4678,7 @@ Item_param::set_value(THD *thd, sp_rcontext *ctx, Item **it)
correctly fetches the value from the client-server protocol,
using set_param_func().
*/
- if (arg->save_in_value(&tmp) ||
+ if (arg->save_in_value(thd, &tmp) ||
set_value(thd, arg, &tmp, arg->type_handler()))
{
set_null();
@@ -5058,7 +4703,7 @@ void
Item_param::set_out_param_info(Send_field *info)
{
m_out_param_info= info;
- set_handler_by_field_type(m_out_param_info->type);
+ set_handler(m_out_param_info->type_handler());
}
@@ -5099,16 +4744,7 @@ void Item_param::make_send_field(THD *thd, Send_field *field)
OUT-parameter info to fill out the names.
*/
- field->db_name= m_out_param_info->db_name;
- field->table_name= m_out_param_info->table_name;
- field->org_table_name= m_out_param_info->org_table_name;
- field->col_name= m_out_param_info->col_name;
- field->org_col_name= m_out_param_info->org_col_name;
-
- field->length= m_out_param_info->length;
- field->flags= m_out_param_info->flags;
- field->decimals= m_out_param_info->decimals;
- field->type= m_out_param_info->type;
+ *field= *m_out_param_info;
}
bool Item_param::append_for_log(THD *thd, String *str)
@@ -5179,17 +4815,6 @@ my_decimal *Item_copy_string::val_decimal(my_decimal *decimal_value)
Functions to convert item to field (for send_result_set_metadata)
*/
-/* ARGSUSED */
-bool Item::fix_fields(THD *thd, Item **ref)
-{
-
- // We do not check fields which are fixed during construction
- DBUG_ASSERT(fixed == 0 || basic_const_item());
- fixed= 1;
- return FALSE;
-}
-
-
void Item_ref_null_helper::save_val(Field *to)
{
DBUG_ASSERT(fixed == 1);
@@ -5243,9 +4868,15 @@ String* Item_ref_null_helper::val_str(String* s)
}
-bool Item_ref_null_helper::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_ref_null_helper::val_native(THD *thd, Native *to)
+{
+ return (owner->was_null|= val_native_from_item(thd, *ref, to));
+}
+
+
+bool Item_ref_null_helper::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return (owner->was_null|= null_value= (*ref)->get_date_result(ltime, fuzzydate));
+ return (owner->was_null|= null_value= (*ref)->get_date_result(thd, ltime, fuzzydate));
}
@@ -5538,7 +5169,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
ref->alias_name_used= TRUE;
/* If this is a non-aggregated field inside HAVING, search in GROUP BY. */
- if (select->having_fix_field && !ref->with_sum_func && group_list)
+ if (select->having_fix_field && !ref->with_sum_func() && group_list)
{
group_by_ref= find_field_in_group_list(ref, group_list);
@@ -5580,7 +5211,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
ref->name.str, "forward reference in item list");
return NULL;
}
- DBUG_ASSERT((*select_ref)->fixed);
+ DBUG_ASSERT((*select_ref)->is_fixed());
return &select->ref_pointer_array[counter];
}
if (group_by_ref)
@@ -5703,7 +5334,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
Name_resolution_context *outer_context= 0;
SELECT_LEX *select= 0;
/* Currently derived tables cannot be correlated */
- if (current_sel->master_unit()->first_select()->linkage !=
+ if (current_sel->master_unit()->first_select()->get_linkage() !=
DERIVED_TABLE_TYPE)
outer_context= context->outer_context;
@@ -5857,7 +5488,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
return -1; /* Some error occurred (e.g. ambiguous names). */
if (ref != not_found_item)
{
- DBUG_ASSERT(*ref && (*ref)->fixed);
+ DBUG_ASSERT(*ref && (*ref)->is_fixed());
prev_subselect_item->used_tables_and_const_cache_join(*ref);
break;
}
@@ -5899,7 +5530,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
Item_ref *rf;
/* Should have been checked in resolve_ref_in_select_and_group(). */
- DBUG_ASSERT(*ref && (*ref)->fixed);
+ DBUG_ASSERT(*ref && (*ref)->is_fixed());
/*
Here, a subset of actions performed by Item_ref::set_properties
is not enough. So we pass ptr to NULL into Item_[direct]_ref
@@ -6475,7 +6106,7 @@ Item *Item_field::replace_equal_field(THD *thd, uchar *arg)
comparison context, and it's safe to replace it to the constant from
item_equal.
*/
- DBUG_ASSERT(type_handler()->type_handler_for_comparison()->cmp_type() ==
+ DBUG_ASSERT(type_handler_for_comparison()->cmp_type() ==
item_equal->compare_type_handler()->cmp_type());
return const_item2;
}
@@ -6491,7 +6122,7 @@ Item *Item_field::replace_equal_field(THD *thd, uchar *arg)
void Item::init_make_send_field(Send_field *tmp_field,
- enum enum_field_types field_type_arg)
+ const Type_handler *h)
{
tmp_field->db_name= "";
tmp_field->org_table_name= "";
@@ -6501,7 +6132,7 @@ void Item::init_make_send_field(Send_field *tmp_field,
tmp_field->flags= (maybe_null ? 0 : NOT_NULL_FLAG) |
(my_binary_compare(charset_for_protocol()) ?
BINARY_FLAG : 0);
- tmp_field->type= field_type_arg;
+ tmp_field->set_handler(h);
tmp_field->length=max_length;
tmp_field->decimals=decimals;
if (unsigned_flag)
@@ -6510,13 +6141,13 @@ void Item::init_make_send_field(Send_field *tmp_field,
void Item::make_send_field(THD *thd, Send_field *tmp_field)
{
- init_make_send_field(tmp_field, field_type());
+ init_make_send_field(tmp_field, type_handler());
}
void Item_empty_string::make_send_field(THD *thd, Send_field *tmp_field)
{
- init_make_send_field(tmp_field, string_type_handler()->field_type());
+ init_make_send_field(tmp_field, string_type_handler());
}
@@ -6847,12 +6478,11 @@ int Item::save_real_in_field(Field *field, bool no_conversions)
int Item::save_decimal_in_field(Field *field, bool no_conversions)
{
- my_decimal decimal_value;
- my_decimal *value= val_decimal(&decimal_value);
- if (null_value)
+ VDec value(this);
+ if (value.is_null())
return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
- return field->store_decimal(value);
+ return field->store_decimal(value.ptr());
}
@@ -6919,14 +6549,18 @@ Item_string::make_string_literal_concat(THD *thd, const LEX_CSTRING *str)
*/
Item *Item_string::make_odbc_literal(THD *thd, const LEX_CSTRING *typestr)
{
- enum_field_types type= odbc_temporal_literal_type(typestr);
- Item *res= type == MYSQL_TYPE_STRING ? this :
- create_temporal_literal(thd, val_str(NULL), type, false);
+ Item_literal *res;
+ const Type_handler *h;
+ if (collation.repertoire == MY_REPERTOIRE_ASCII &&
+ str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4 &&
+ (h= Type_handler::odbc_literal_type_handler(typestr)) &&
+ (res= h->create_literal_item(thd, val_str(NULL), false)))
+ return res;
/*
- create_temporal_literal() returns NULL if failed to parse the string,
+ h->create_literal_item() returns NULL if failed to parse the string,
or the string format did not match the type, e.g.: {d'2001-01-01 10:10:10'}
*/
- return res ? res : this;
+ return this;
}
@@ -7129,7 +6763,6 @@ Item_float::Item_float(THD *thd, const char *str_arg, size_t length):
name.length= strlen(str_arg);
decimals=(uint8) nr_of_decimals(str_arg, str_arg+length);
max_length=(uint32)length;
- fixed= 1;
}
@@ -7185,7 +6818,6 @@ void Item_hex_constant::hex_string_init(THD *thd, const char *str, size_t str_le
}
*ptr=0; // Keep purify happy
collation.set(&my_charset_bin, DERIVATION_COERCIBLE);
- fixed= 1;
unsigned_flag= 1;
}
@@ -7264,19 +6896,9 @@ Item_bin_string::Item_bin_string(THD *thd, const char *str, size_t str_length):
ptr[0]= 0;
collation.set(&my_charset_bin, DERIVATION_COERCIBLE);
- fixed= 1;
}
-bool Item_temporal_literal::eq(const Item *item, bool binary_cmp) const
-{
- return
- item->basic_const_item() && type() == item->type() &&
- field_type() == ((Item_temporal_literal *) item)->field_type() &&
- !my_time_compare(&cached_time,
- &((Item_temporal_literal *) item)->cached_time);
-}
-
void Item_date_literal::print(String *str, enum_query_type query_type)
{
str->append("DATE'");
@@ -7293,12 +6915,11 @@ Item *Item_date_literal::clone_item(THD *thd)
}
-bool Item_date_literal::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_date_literal::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- DBUG_ASSERT(fixed);
- fuzzy_date |= sql_mode_for_dates(current_thd);
+ fuzzydate |= sql_mode_for_dates(thd);
*ltime= cached_time;
- return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ return (null_value= check_date_with_warn(thd, ltime, fuzzydate,
MYSQL_TIMESTAMP_ERROR));
}
@@ -7319,12 +6940,11 @@ Item *Item_datetime_literal::clone_item(THD *thd)
}
-bool Item_datetime_literal::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_datetime_literal::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- DBUG_ASSERT(fixed);
- fuzzy_date |= sql_mode_for_dates(current_thd);
+ fuzzydate |= sql_mode_for_dates(thd);
*ltime= cached_time;
- return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ return (null_value= check_date_with_warn(thd, ltime, fuzzydate,
MYSQL_TIMESTAMP_ERROR));
}
@@ -7345,13 +6965,12 @@ Item *Item_time_literal::clone_item(THD *thd)
}
-bool Item_time_literal::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_time_literal::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- DBUG_ASSERT(fixed);
*ltime= cached_time;
- if (fuzzy_date & TIME_TIME_ONLY)
+ if (fuzzydate & TIME_TIME_ONLY)
return (null_value= false);
- return (null_value= check_date_with_warn(ltime, fuzzy_date,
+ return (null_value= check_date_with_warn(thd, ltime, fuzzydate,
MYSQL_TIMESTAMP_ERROR));
}
@@ -7467,7 +7086,7 @@ void Item_field::update_null_value()
no_errors= thd->no_errors;
thd->no_errors= 1;
- Item::update_null_value();
+ type_handler()->Item_update_null_value(this);
thd->no_errors= no_errors;
}
@@ -7518,6 +7137,198 @@ Item *Item_field::update_value_transformer(THD *thd, uchar *select_arg)
}
+/**
+ @brief
+ Prepare AND/OR formula for extraction of a pushable condition
+
+ @param checker the checker callback function to be applied to the nodes
+ of the tree of the object
+ @param arg parameter to be passed to the checker
+
+ @details
+ This method recursively traverses this AND/OR condition and for each
+ subformula of the condition it checks whether it can be usable for the
+ extraction of a pushable condition. The criteria of pushability of
+ a subformula is checked by the callback function 'checker' with one
+ parameter arg. The subformulas that are not usable are marked with
+ the flag NO_EXTRACTION_FL.
+ @note
+ This method is called before any call of build_pushable_cond.
+ The flag NO_EXTRACTION_FL set in a subformula allows to avoid building
+ clones for the subformulas that are not used in the pushable condition.
+ @note
+ This method is called for pushdown conditions into materialized
+ derived tables/views optimization.
+ Item::pushable_cond_checker_for_derived() is passed as the actual callback
+ function.
+ Also it is called for pushdown conditions in materialized IN subqueries.
+ Item::pushable_cond_checker_for_subquery is passed as the actual
+ callback function.
+*/
+
+void Item::check_pushable_cond(Pushdown_checker checker, uchar *arg)
+{
+ clear_extraction_flag();
+ if (type() == Item::COND_ITEM)
+ {
+ bool and_cond= ((Item_cond*) this)->functype() == Item_func::COND_AND_FUNC;
+ List_iterator<Item> li(*((Item_cond*) this)->argument_list());
+ uint count= 0;
+ Item *item;
+ while ((item=li++))
+ {
+ item->check_pushable_cond(checker, arg);
+ if (item->get_extraction_flag() != NO_EXTRACTION_FL)
+ count++;
+ else if (!and_cond)
+ break;
+ }
+ if ((and_cond && count == 0) || item)
+ {
+ set_extraction_flag(NO_EXTRACTION_FL);
+ if (and_cond)
+ li.rewind();
+ while ((item= li++))
+ item->clear_extraction_flag();
+ }
+ }
+ else if (!((this->*checker) (arg)))
+ set_extraction_flag(NO_EXTRACTION_FL);
+}
+
+
+/**
+ @brief
+ Build condition extractable from this condition for pushdown
+
+ @param thd the thread handle
+ @param checker the checker callback function to be applied to the nodes
+ of the tree of the object to check if multiple equality
+ elements can be used to create equalities
+ @param arg parameter to be passed to the checker
+
+ @details
+ This method finds out what condition that can be pushed down can be
+ extracted from this condition. If such condition C exists the
+ method builds the item for it. The method uses the flag NO_EXTRACTION_FL
+ set by the preliminary call of the method check_pushable_cond() to figure
+ out whether a subformula is pushable or not.
+ In the case when this item is a multiple equality a checker method is
+ called to find the equal fields to build a new equality that can be
+ pushed down.
+ @note
+ The built condition C is always implied by the condition cond
+ (cond => C). The method tries to build the most restrictive such
+ condition (i.e. for any other condition C' such that cond => C'
+ we have C => C').
+ @note
+ The build item is not ready for usage: substitution for the field items
+ has to be done and it has to be re-fixed.
+ @note
+ This method is called for pushdown conditions into materialized
+ derived tables/views optimization.
+ Item::pushable_equality_checker_for_derived() is passed as the actual
+ callback function.
+ Also it is called for pushdown conditions into materialized IN subqueries.
+ Item::pushable_equality_checker_for_subquery() is passed as the actual
+ callback function.
+
+ @retval
+ the built condition pushable into if such a condition exists
+ NULL if there is no such a condition
+*/
+
+Item *Item::build_pushable_cond(THD *thd,
+ Pushdown_checker checker,
+ uchar *arg)
+{
+ bool is_multiple_equality= type() == Item::FUNC_ITEM &&
+ ((Item_func*) this)->functype() == Item_func::MULT_EQUAL_FUNC;
+
+ if (get_extraction_flag() == NO_EXTRACTION_FL)
+ return 0;
+
+ if (type() == Item::COND_ITEM)
+ {
+ bool cond_and= false;
+ Item_cond *new_cond;
+ if (((Item_cond*) this)->functype() == Item_func::COND_AND_FUNC)
+ {
+ cond_and= true;
+ new_cond= new (thd->mem_root) Item_cond_and(thd);
+ }
+ else
+ new_cond= new (thd->mem_root) Item_cond_or(thd);
+ if (!new_cond)
+ return 0;
+ List_iterator<Item> li(*((Item_cond*) this)->argument_list());
+ Item *item;
+ bool is_fix_needed= false;
+
+ while ((item=li++))
+ {
+ if (item->get_extraction_flag() == NO_EXTRACTION_FL)
+ {
+ if (!cond_and)
+ return 0;
+ continue;
+ }
+ Item *fix= item->build_pushable_cond(thd, checker, arg);
+ if (!fix && !cond_and)
+ return 0;
+ if (!fix)
+ continue;
+
+ if (fix->type() == Item::COND_ITEM &&
+ ((Item_cond*) fix)->functype() == Item_func::COND_AND_FUNC)
+ is_fix_needed= true;
+
+ if (new_cond->argument_list()->push_back(fix, thd->mem_root))
+ return 0;
+ }
+ if (is_fix_needed && new_cond->fix_fields(thd, 0))
+ return 0;
+
+ switch (new_cond->argument_list()->elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ return new_cond->argument_list()->head();
+ default:
+ return new_cond;
+ }
+ }
+ else if (is_multiple_equality)
+ {
+ List<Item> equalities;
+ Item *new_cond= NULL;
+ if (((Item_equal *)this)->create_pushable_equalities(thd, &equalities,
+ checker, arg) ||
+ (equalities.elements == 0))
+ return 0;
+
+ switch (equalities.elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ new_cond= equalities.head();
+ break;
+ default:
+ new_cond= new (thd->mem_root) Item_cond_and(thd, equalities);
+ break;
+ }
+ if (new_cond && new_cond->fix_fields(thd, &new_cond))
+ return 0;
+ return new_cond;
+ }
+ else if (get_extraction_flag() != NO_EXTRACTION_FL)
+ return build_clone(thd);
+ return 0;
+}
+
+
static
Item *get_field_item_for_having(THD *thd, Item *item, st_select_lex *sel)
{
@@ -7651,50 +7462,15 @@ Item *Item_direct_view_ref::derived_field_transformer_for_where(THD *thd,
return (*ref);
}
-static
-Grouping_tmp_field *find_matching_grouping_field(Item *item,
- st_select_lex *sel)
-{
- DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
- (item->type() == Item::REF_ITEM &&
- ((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF));
- List_iterator<Grouping_tmp_field> li(sel->grouping_tmp_fields);
- Grouping_tmp_field *gr_field;
- Item_field *field_item= (Item_field *) (item->real_item());
- while ((gr_field= li++))
- {
- if (field_item->field == gr_field->tmp_field)
- return gr_field;
- }
- Item_equal *item_equal= item->get_item_equal();
- if (item_equal)
- {
- Item_equal_fields_iterator it(*item_equal);
- Item *equal_item;
- while ((equal_item= it++))
- {
- field_item= (Item_field *) (equal_item->real_item());
- li.rewind();
- while ((gr_field= li++))
- {
- if (field_item->field == gr_field->tmp_field)
- return gr_field;
- }
- }
- }
- return NULL;
-}
-
-Item *Item_field::derived_grouping_field_transformer_for_where(THD *thd,
- uchar *arg)
+Item *Item_field::grouping_field_transformer_for_where(THD *thd, uchar *arg)
{
st_select_lex *sel= (st_select_lex *)arg;
- Grouping_tmp_field *gr_field= find_matching_grouping_field(this, sel);
+ Field_pair *gr_field= find_matching_field_pair(this, sel->grouping_tmp_fields);
if (gr_field)
{
Item *producing_clone=
- gr_field->producing_item->build_clone(thd);
+ gr_field->corresponding_item->build_clone(thd);
if (producing_clone)
producing_clone->marker|= SUBSTITUTION_FL;
return producing_clone;
@@ -7704,8 +7480,8 @@ Item *Item_field::derived_grouping_field_transformer_for_where(THD *thd,
Item *
-Item_direct_view_ref::derived_grouping_field_transformer_for_where(THD *thd,
- uchar *arg)
+Item_direct_view_ref::grouping_field_transformer_for_where(THD *thd,
+ uchar *arg)
{
if ((*ref)->marker & SUBSTITUTION_FL)
{
@@ -7715,8 +7491,9 @@ Item_direct_view_ref::derived_grouping_field_transformer_for_where(THD *thd,
if (!item_equal)
return this;
st_select_lex *sel= (st_select_lex *)arg;
- Grouping_tmp_field *gr_field= find_matching_grouping_field(this, sel);
- return gr_field->producing_item->build_clone(thd);
+ Field_pair *gr_field= find_matching_field_pair(this,
+ sel->grouping_tmp_fields);
+ return gr_field->corresponding_item->build_clone(thd);
}
void Item_field::print(String *str, enum_query_type query_type)
@@ -7752,7 +7529,7 @@ Item_ref::Item_ref(THD *thd, Name_resolution_context *context_arg,
/*
This constructor used to create some internals references over fixed items
*/
- if ((set_properties_only= (ref && *ref && (*ref)->fixed)))
+ if ((set_properties_only= (ref && *ref && (*ref)->is_fixed())))
set_properties();
}
@@ -7801,7 +7578,7 @@ Item_ref::Item_ref(THD *thd, TABLE_LIST *view_arg, Item **item,
/*
This constructor is used to create some internal references over fixed items
*/
- if ((set_properties_only= (ref && *ref && (*ref)->fixed)))
+ if ((set_properties_only= (ref && *ref && (*ref)->is_fixed())))
set_properties();
}
@@ -7927,7 +7704,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
goto error; /* Some error occurred (e.g. ambiguous names). */
if (ref != not_found_item)
{
- DBUG_ASSERT(*ref && (*ref)->fixed);
+ DBUG_ASSERT(*ref && (*ref)->is_fixed());
prev_subselect_item->used_tables_and_const_cache_join(*ref);
break;
}
@@ -8050,7 +7827,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
goto error;
}
/* Should be checked in resolve_ref_in_select_and_group(). */
- DBUG_ASSERT(*ref && (*ref)->fixed);
+ DBUG_ASSERT(*ref && (*ref)->is_fixed());
mark_as_dependent(thd, last_checked_context->select_lex,
context->select_lex, this, this);
/*
@@ -8075,13 +7852,13 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
*/
if (!((*ref)->type() == REF_ITEM &&
((Item_ref *)(*ref))->ref_type() == OUTER_REF) &&
- (((*ref)->with_sum_func && name.str &&
- !(current_sel->linkage != GLOBAL_OPTIONS_TYPE &&
+ (((*ref)->with_sum_func() && name.str &&
+ !(current_sel->get_linkage() != GLOBAL_OPTIONS_TYPE &&
current_sel->having_fix_field)) ||
- !(*ref)->fixed))
+ !(*ref)->is_fixed()))
{
my_error(ER_ILLEGAL_REFERENCE, MYF(0),
- name.str, ((*ref)->with_sum_func?
+ name.str, ((*ref)->with_sum_func() ?
"reference to group function":
"forward reference in item list"));
goto error;
@@ -8107,7 +7884,7 @@ void Item_ref::set_properties()
We have to remember if we refer to a sum function, to ensure that
split_sum_func() doesn't try to change the reference.
*/
- with_sum_func= (*ref)->with_sum_func;
+ copy_with_sum_func(*ref);
with_param= (*ref)->with_param;
with_window_func= (*ref)->with_window_func;
with_field= (*ref)->with_field;
@@ -8293,6 +8070,14 @@ String *Item_ref::str_result(String* str)
}
+bool Item_ref::val_native_result(THD *thd, Native *to)
+{
+ return result_field ?
+ val_native_from_field(result_field, to) :
+ val_native(thd, to);
+}
+
+
my_decimal *Item_ref::val_decimal_result(my_decimal *decimal_value)
{
if (result_field)
@@ -8381,9 +8166,15 @@ bool Item_ref::is_null()
}
-bool Item_ref::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Item_ref::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+{
+ return (null_value=(*ref)->get_date_result(thd, ltime, fuzzydate));
+}
+
+
+bool Item_ref::val_native(THD *thd, Native *to)
{
- return (null_value=(*ref)->get_date_result(ltime,fuzzydate));
+ return val_native_from_item(thd, *ref, to);
}
@@ -8518,9 +8309,15 @@ bool Item_direct_ref::is_null()
}
-bool Item_direct_ref::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Item_direct_ref::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+{
+ return (null_value=(*ref)->get_date(thd, ltime, fuzzydate));
+}
+
+
+bool Item_direct_ref::val_native(THD *thd, Native *to)
{
- return (null_value=(*ref)->get_date(ltime,fuzzydate));
+ return val_native_from_item(thd, *ref, to);
}
@@ -8532,10 +8329,10 @@ Item_cache_wrapper::~Item_cache_wrapper()
Item_cache_wrapper::Item_cache_wrapper(THD *thd, Item *item_arg):
Item_result_field(thd), orig_item(item_arg), expr_cache(NULL), expr_value(NULL)
{
- DBUG_ASSERT(orig_item->fixed);
+ DBUG_ASSERT(orig_item->is_fixed());
Type_std_attributes::set(orig_item);
maybe_null= orig_item->maybe_null;
- with_sum_func= orig_item->with_sum_func;
+ copy_with_sum_func(orig_item);
with_param= orig_item->with_param;
with_field= orig_item->with_field;
name= item_arg->name;
@@ -8594,7 +8391,7 @@ void Item_cache_wrapper::print(String *str, enum_query_type query_type)
bool Item_cache_wrapper::fix_fields(THD *thd __attribute__((unused)),
Item **it __attribute__((unused)))
{
- DBUG_ASSERT(orig_item->fixed);
+ DBUG_ASSERT(orig_item->is_fixed());
DBUG_ASSERT(fixed);
return FALSE;
}
@@ -8813,6 +8610,28 @@ String *Item_cache_wrapper::val_str(String* str)
/**
+ Get the native value of the possibly cached item
+*/
+
+bool Item_cache_wrapper::val_native(THD *thd, Native* to)
+{
+ Item *cached_value;
+ DBUG_ENTER("Item_cache_wrapper::val_native");
+ if (!expr_cache)
+ DBUG_RETURN(val_native_from_item(thd, orig_item, to));
+
+ if ((cached_value= check_cache()))
+ DBUG_RETURN(val_native_from_item(thd, cached_value, to));
+
+ cache();
+ if ((null_value= expr_value->null_value))
+ DBUG_RETURN(true);
+ DBUG_RETURN(expr_value->val_native(thd, to));
+}
+
+
+
+/**
Get the decimal value of the possibly cached item
*/
@@ -8897,18 +8716,18 @@ bool Item_cache_wrapper::is_null()
Get the date value of the possibly cached item
*/
-bool Item_cache_wrapper::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_cache_wrapper::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
Item *cached_value;
DBUG_ENTER("Item_cache_wrapper::get_date");
if (!expr_cache)
- DBUG_RETURN((null_value= orig_item->get_date(ltime, fuzzydate)));
+ DBUG_RETURN((null_value= orig_item->get_date(thd, ltime, fuzzydate)));
if ((cached_value= check_cache()))
- DBUG_RETURN((null_value= cached_value->get_date(ltime, fuzzydate)));
+ DBUG_RETURN((null_value= cached_value->get_date(thd, ltime, fuzzydate)));
cache();
- DBUG_RETURN((null_value= expr_value->get_date(ltime, fuzzydate)));
+ DBUG_RETURN((null_value= expr_value->get_date(thd, ltime, fuzzydate)));
}
@@ -8924,7 +8743,7 @@ int Item_cache_wrapper::save_in_field(Field *to, bool no_conversions)
Item* Item_cache_wrapper::get_tmp_table_item(THD *thd)
{
- if (!orig_item->with_sum_func && !orig_item->const_item())
+ if (!orig_item->with_sum_func() && !orig_item->const_item())
return new (thd->mem_root) Item_temptable_field(thd, result_field);
return copy_or_same(thd);
}
@@ -8954,7 +8773,7 @@ bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference)
/* view fild reference must be defined */
DBUG_ASSERT(*ref);
/* (*ref)->check_cols() will be made in Item_direct_ref::fix_fields */
- if ((*ref)->fixed)
+ if ((*ref)->is_fixed())
{
Item *ref_item= (*ref)->real_item();
if (ref_item->type() == Item::FIELD_ITEM)
@@ -9150,6 +8969,19 @@ Item *Item_direct_view_ref::propagate_equal_fields(THD *thd,
}
+Item *Item_ref::propagate_equal_fields(THD *thd, const Context &ctx,
+ COND_EQUAL *cond)
+{
+ Item *field_item= real_item();
+ if (field_item->type() != FIELD_ITEM)
+ return this;
+ Item *item= field_item->propagate_equal_fields(thd, ctx, cond);
+ if (item != field_item)
+ return item;
+ return this;
+}
+
+
/**
Replace an Item_direct_view_ref for an equal Item_field evaluated earlier
(if any).
@@ -9192,6 +9024,20 @@ Item *Item_direct_view_ref::replace_equal_field(THD *thd, uchar *arg)
}
+bool Item_field::excl_dep_on_table(table_map tab_map)
+{
+ return used_tables() == tab_map ||
+ (item_equal && (item_equal->used_tables() & tab_map));
+}
+
+
+bool
+Item_field::excl_dep_on_grouping_fields(st_select_lex *sel)
+{
+ return find_matching_field_pair(this, sel->grouping_tmp_fields) != NULL;
+}
+
+
bool Item_direct_view_ref::excl_dep_on_table(table_map tab_map)
{
table_map used= used_tables();
@@ -9207,17 +9053,34 @@ bool Item_direct_view_ref::excl_dep_on_table(table_map tab_map)
return (*ref)->excl_dep_on_table(tab_map);
}
+
bool Item_direct_view_ref::excl_dep_on_grouping_fields(st_select_lex *sel)
{
if (item_equal)
{
DBUG_ASSERT(real_item()->type() == Item::FIELD_ITEM);
- return find_matching_grouping_field(this, sel) != NULL;
+ return (find_matching_field_pair(this, sel->grouping_tmp_fields) != NULL);
}
return (*ref)->excl_dep_on_grouping_fields(sel);
}
+bool Item_args::excl_dep_on_grouping_fields(st_select_lex *sel)
+{
+ for (uint i= 0; i < arg_count; i++)
+ {
+ if (args[i]->type() == Item::FUNC_ITEM &&
+ ((Item_func *)args[i])->functype() == Item_func::UDF_FUNC)
+ return false;
+ if (args[i]->const_item())
+ continue;
+ if (!args[i]->excl_dep_on_grouping_fields(sel))
+ return false;
+ }
+ return true;
+}
+
+
bool Item_default_value::eq(const Item *item, bool binary_cmp) const
{
return item->type() == DEFAULT_VALUE_ITEM &&
@@ -9255,7 +9118,6 @@ bool Item_default_value::fix_fields(THD *thd, Item **items)
}
thd->column_usage= save_column_usage;
-
real_arg= arg->real_item();
if (real_arg->type() != FIELD_ITEM)
{
@@ -9351,10 +9213,10 @@ my_decimal *Item_default_value::val_decimal(my_decimal *decimal_value)
return Item_field::val_decimal(decimal_value);
}
-bool Item_default_value::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Item_default_value::get_date(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate)
{
calculate();
- return Item_field::get_date(ltime, fuzzydate);
+ return Item_field::get_date(thd, ltime, fuzzydate);
}
bool Item_default_value::send(Protocol *protocol, st_value *buffer)
@@ -9457,7 +9319,7 @@ my_decimal *Item_ignore_value::val_decimal(my_decimal *decimal_value)
return 0;
}
-bool Item_ignore_value::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_ignore_value::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(0); // never should be called
null_value= 1;
@@ -9481,7 +9343,7 @@ bool Item_insert_value::fix_fields(THD *thd, Item **items)
{
DBUG_ASSERT(fixed == 0);
/* We should only check that arg is in first table */
- if (!arg->fixed)
+ if (!arg->is_fixed())
{
bool res;
TABLE_LIST *orig_next_table= context->last_name_resolution_table;
@@ -9693,7 +9555,7 @@ void Item_trigger_field::cleanup()
Since special nature of Item_trigger_field we should not do most of
things from Item_field::cleanup() or Item_ident::cleanup() here.
*/
- Item::cleanup();
+ Item_fixed_hybrid::cleanup();
}
@@ -9752,73 +9614,14 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item)
int stored_field_cmp_to_item(THD *thd, Field *field, Item *item)
{
- Item_result res_type=item_cmp_type(field->result_type(),
- item->result_type());
- /*
- We have to check field->cmp_type() instead of res_type,
- as result_type() - and thus res_type - can never be TIME_RESULT (yet).
- */
- if (field->cmp_type() == TIME_RESULT)
- {
- MYSQL_TIME field_time, item_time, item_time2, *item_time_cmp= &item_time;
- if (field->type() == MYSQL_TYPE_TIME)
- {
- field->get_time(&field_time);
- item->get_time(&item_time);
- }
- else
- {
- field->get_date(&field_time, TIME_INVALID_DATES);
- item->get_date(&item_time, TIME_INVALID_DATES);
- if (item_time.time_type == MYSQL_TIMESTAMP_TIME)
- if (time_to_datetime(thd, &item_time, item_time_cmp= &item_time2))
- return 1;
- }
- return my_time_compare(&field_time, item_time_cmp);
- }
- if (res_type == STRING_RESULT)
+ Type_handler_hybrid_field_type cmp(field->type_handler_for_comparison());
+ if (cmp.aggregate_for_comparison(item->type_handler_for_comparison()))
{
- char item_buff[MAX_FIELD_WIDTH];
- char field_buff[MAX_FIELD_WIDTH];
-
- String item_tmp(item_buff,sizeof(item_buff),&my_charset_bin);
- String field_tmp(field_buff,sizeof(field_buff),&my_charset_bin);
- String *item_result= item->val_str(&item_tmp);
- /*
- Some implementations of Item::val_str(String*) actually modify
- the field Item::null_value, hence we can't check it earlier.
- */
- if (item->null_value)
- return 0;
- String *field_result= field->val_str(&field_tmp);
- return sortcmp(field_result, item_result, field->charset());
- }
- if (res_type == INT_RESULT)
- return 0; // Both are of type int
- if (res_type == DECIMAL_RESULT)
- {
- my_decimal item_buf, *item_val,
- field_buf, *field_val;
- item_val= item->val_decimal(&item_buf);
- if (item->null_value)
- return 0;
- field_val= field->val_decimal(&field_buf);
- return my_decimal_cmp(field_val, item_val);
- }
- /*
- The patch for Bug#13463415 started using this function for comparing
- BIGINTs. That uncovered a bug in Visual Studio 32bit optimized mode.
- Prefixing the auto variables with volatile fixes the problem....
- */
- volatile double result= item->val_real();
- if (item->null_value)
+ // At fix_fields() time we checked that "field" and "item" are comparable
+ DBUG_ASSERT(0);
return 0;
- volatile double field_result= field->val_real();
- if (field_result < result)
- return -1;
- else if (field_result > result)
- return 1;
- return 0;
+ }
+ return cmp.type_handler()->stored_field_cmp_to_item(thd, field, item);
}
@@ -9881,7 +9684,6 @@ bool Item_cache_int::cache_value()
String *Item_cache_int::val_str(String *str)
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return NULL;
str->set_int(value, unsigned_flag, default_charset());
@@ -9891,7 +9693,6 @@ String *Item_cache_int::val_str(String *str)
my_decimal *Item_cache_int::val_decimal(my_decimal *decimal_val)
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return NULL;
int2my_decimal(E_DEC_FATAL_ERROR, value, unsigned_flag, decimal_val);
@@ -9900,7 +9701,6 @@ my_decimal *Item_cache_int::val_decimal(my_decimal *decimal_val)
double Item_cache_int::val_real()
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0.0;
return (double) value;
@@ -9908,7 +9708,6 @@ double Item_cache_int::val_real()
longlong Item_cache_int::val_int()
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0;
return value;
@@ -9948,88 +9747,12 @@ Item_cache_temporal::Item_cache_temporal(THD *thd, const Type_handler *handler)
}
-longlong Item_cache_temporal::val_datetime_packed()
-{
- DBUG_ASSERT(fixed == 1);
- if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME)
- return Item::val_datetime_packed(); // TIME-to-DATETIME conversion needed
- if ((!value_cached && !cache_value()) || null_value)
- {
- null_value= TRUE;
- return 0;
- }
- return value;
-}
-
-
-longlong Item_cache_temporal::val_time_packed()
-{
- DBUG_ASSERT(fixed == 1);
- if (Item_cache_temporal::field_type() != MYSQL_TYPE_TIME)
- return Item::val_time_packed(); // DATETIME-to-TIME conversion needed
- if ((!value_cached && !cache_value()) || null_value)
- {
- null_value= TRUE;
- return 0;
- }
- return value;
-}
-
-
-String *Item_cache_temporal::val_str(String *str)
-{
- DBUG_ASSERT(fixed == 1);
- if (!has_value())
- {
- null_value= true;
- return NULL;
- }
- return val_string_from_date(str);
-}
-
-
-my_decimal *Item_cache_temporal::val_decimal(my_decimal *decimal_value)
-{
- DBUG_ASSERT(fixed == 1);
- if ((!value_cached && !cache_value()) || null_value)
- {
- null_value= true;
- return NULL;
- }
- return val_decimal_from_date(decimal_value);
-}
-
-
-longlong Item_cache_temporal::val_int()
-{
- DBUG_ASSERT(fixed == 1);
- if ((!value_cached && !cache_value()) || null_value)
- {
- null_value= true;
- return 0;
- }
- return val_int_from_date();
-}
-
-
-double Item_cache_temporal::val_real()
-{
- DBUG_ASSERT(fixed == 1);
- if ((!value_cached && !cache_value()) || null_value)
- {
- null_value= true;
- return 0;
- }
- return val_real_from_date();
-}
-
-
bool Item_cache_temporal::cache_value()
{
if (!example)
return false;
value_cached= true;
- value= example->val_datetime_packed_result();
+ value= example->val_datetime_packed_result(current_thd);
null_value= example->null_value;
return true;
}
@@ -10040,16 +9763,14 @@ bool Item_cache_time::cache_value()
if (!example)
return false;
value_cached= true;
- value= example->val_time_packed_result();
+ value= example->val_time_packed_result(current_thd);
null_value= example->null_value;
return true;
}
-bool Item_cache_temporal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_cache_temporal::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- ErrConvInteger str(value);
-
if (!has_value())
{
bzero((char*) ltime,sizeof(*ltime));
@@ -10064,7 +9785,8 @@ bool Item_cache_temporal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
int Item_cache_temporal::save_in_field(Field *field, bool no_conversions)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, 0))
+ // This is a temporal type. No nanoseconds, so round mode is not important.
+ if (get_date(field->get_thd(), &ltime, TIME_CONV_NONE | TIME_FRAC_NONE))
return set_field_to_null_with_conversions(field, no_conversions);
field->set_notnull();
int error= field->store_time_dec(&ltime, decimals);
@@ -10107,24 +9829,80 @@ Item *Item_cache_temporal::convert_to_basic_const_item(THD *thd)
Item *Item_cache_datetime::make_literal(THD *thd)
{
MYSQL_TIME ltime;
- unpack_time(val_datetime_packed(), &ltime, MYSQL_TIMESTAMP_DATETIME);
+ unpack_time(val_datetime_packed(thd), &ltime, MYSQL_TIMESTAMP_DATETIME);
return new (thd->mem_root) Item_datetime_literal(thd, &ltime, decimals);
}
Item *Item_cache_date::make_literal(THD *thd)
{
MYSQL_TIME ltime;
- unpack_time(val_datetime_packed(), &ltime, MYSQL_TIMESTAMP_DATE);
+ unpack_time(val_datetime_packed(thd), &ltime, MYSQL_TIMESTAMP_DATE);
return new (thd->mem_root) Item_date_literal(thd, &ltime);
}
Item *Item_cache_time::make_literal(THD *thd)
{
MYSQL_TIME ltime;
- unpack_time(val_time_packed(), &ltime, MYSQL_TIMESTAMP_TIME);
+ unpack_time(val_time_packed(thd), &ltime, MYSQL_TIMESTAMP_TIME);
return new (thd->mem_root) Item_time_literal(thd, &ltime, decimals);
}
+
+int Item_cache_timestamp::save_in_field(Field *field, bool no_conversions)
+{
+ if (!has_value())
+ return set_field_to_null_with_conversions(field, no_conversions);
+ return m_native.save_in_field(field, decimals);
+}
+
+
+bool Item_cache_timestamp::val_native(THD *thd, Native *to)
+{
+ if (!has_value())
+ {
+ null_value= true;
+ return true;
+ }
+ return null_value= to->copy(m_native);
+}
+
+
+Datetime Item_cache_timestamp::to_datetime(THD *thd)
+{
+ DBUG_ASSERT(is_fixed() == 1);
+ if (!has_value())
+ {
+ null_value= true;
+ return Datetime();
+ }
+ return Datetime(thd, Timestamp_or_zero_datetime(m_native).tv());
+}
+
+
+bool Item_cache_timestamp::get_date(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate)
+{
+ if (!has_value())
+ {
+ set_zero_time(ltime, MYSQL_TIMESTAMP_DATETIME);
+ return true;
+ }
+ Timestamp_or_zero_datetime tm(m_native);
+ return (null_value= tm.to_TIME(thd, ltime, fuzzydate));
+}
+
+
+bool Item_cache_timestamp::cache_value()
+{
+ if (!example)
+ return false;
+ value_cached= true;
+ null_value= example->val_native_with_conversion_result(current_thd, &m_native,
+ type_handler());
+ return true;
+}
+
+
bool Item_cache_real::cache_value()
{
if (!example)
@@ -10138,7 +9916,6 @@ bool Item_cache_real::cache_value()
double Item_cache_real::val_real()
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0.0;
return value;
@@ -10146,7 +9923,6 @@ double Item_cache_real::val_real()
longlong Item_cache_real::val_int()
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0;
return Converter_double_to_longlong(value, unsigned_flag).result();
@@ -10155,7 +9931,6 @@ longlong Item_cache_real::val_int()
String* Item_cache_real::val_str(String *str)
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return NULL;
str->set_real(value, decimals, default_charset());
@@ -10165,7 +9940,6 @@ String* Item_cache_real::val_str(String *str)
my_decimal *Item_cache_real::val_decimal(my_decimal *decimal_val)
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return NULL;
double2my_decimal(E_DEC_FATAL_ERROR, value, decimal_val);
@@ -10200,38 +9974,22 @@ bool Item_cache_decimal::cache_value()
double Item_cache_decimal::val_real()
{
- DBUG_ASSERT(fixed);
- double res;
- if (!has_value())
- return 0.0;
- my_decimal2double(E_DEC_FATAL_ERROR, &decimal_value, &res);
- return res;
+ return !has_value() ? 0.0 : decimal_value.to_double();
}
longlong Item_cache_decimal::val_int()
{
- DBUG_ASSERT(fixed);
- longlong res;
- if (!has_value())
- return 0;
- my_decimal2int(E_DEC_FATAL_ERROR, &decimal_value, unsigned_flag, &res);
- return res;
+ return !has_value() ? 0 : decimal_value.to_longlong(unsigned_flag);
}
String* Item_cache_decimal::val_str(String *str)
{
- DBUG_ASSERT(fixed);
- if (!has_value())
- return NULL;
- my_decimal_round(E_DEC_FATAL_ERROR, &decimal_value, decimals, FALSE,
- &decimal_value);
- my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, 0, 0, 0, str);
- return str;
+ return !has_value() ? NULL :
+ decimal_value.to_string_round(str, decimals, &decimal_value);
}
my_decimal *Item_cache_decimal::val_decimal(my_decimal *val)
{
- DBUG_ASSERT(fixed);
if (!has_value())
return NULL;
return &decimal_value;
@@ -10248,9 +10006,8 @@ Item *Item_cache_decimal::convert_to_basic_const_item(THD *thd)
new_item= (Item*) new (thd->mem_root) Item_null(thd);
else
{
- my_decimal decimal_value;
- my_decimal *result= val_decimal(&decimal_value);
- new_item= (Item*) new (thd->mem_root) Item_decimal(thd, result);
+ VDec tmp(this);
+ new_item= (Item*) new (thd->mem_root) Item_decimal(thd, tmp.ptr());
}
return new_item;
}
@@ -10283,7 +10040,6 @@ bool Item_cache_str::cache_value()
double Item_cache_str::val_real()
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0.0;
return value ? double_from_string_with_check(value) : 0.0;
@@ -10292,7 +10048,6 @@ double Item_cache_str::val_real()
longlong Item_cache_str::val_int()
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0;
return value ? longlong_from_string_with_check(value) : 0;
@@ -10301,7 +10056,6 @@ longlong Item_cache_str::val_int()
String* Item_cache_str::val_str(String *str)
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return 0;
return value;
@@ -10310,7 +10064,6 @@ String* Item_cache_str::val_str(String *str)
my_decimal *Item_cache_str::val_decimal(my_decimal *decimal_val)
{
- DBUG_ASSERT(fixed == 1);
if (!has_value())
return NULL;
return value ? decimal_from_string_with_check(decimal_val, value) : 0;
@@ -10494,7 +10247,7 @@ String *Item_type_holder::val_str(String*)
return 0;
}
-bool Item_type_holder::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_type_holder::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(0); // should never be called
return true;
@@ -10503,7 +10256,7 @@ bool Item_type_holder::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
void Item_result_field::cleanup()
{
DBUG_ENTER("Item_result_field::cleanup()");
- Item::cleanup();
+ Item_fixed_hybrid::cleanup();
result_field= 0;
DBUG_VOID_RETURN;
}
@@ -10676,17 +10429,6 @@ const char *dbug_print(SELECT_LEX_UNIT *x) { return dbug_print_unit(x); }
#endif /*DBUG_OFF*/
-bool Item_field::excl_dep_on_table(table_map tab_map)
-{
- return used_tables() == tab_map ||
- (item_equal && (item_equal->used_tables() & tab_map));
-}
-
-bool
-Item_field::excl_dep_on_grouping_fields(st_select_lex *sel)
-{
- return find_matching_grouping_field(this, sel) != NULL;
-}
void Item::register_in(THD *thd)
diff --git a/sql/item.h b/sql/item.h
index 4261ef64950..ec474b81a08 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -100,7 +100,10 @@ class sp_head;
class Protocol;
struct TABLE_LIST;
void item_init(void); /* Init item functions */
+class Item_basic_value;
+class Item_result_field;
class Item_field;
+class Item_ref;
class Item_param;
class user_var_entry;
class JOIN;
@@ -108,6 +111,7 @@ struct KEY_FIELD;
struct SARGABLE_PARAM;
class RANGE_OPT_PARAM;
class SEL_TREE;
+class With_sum_func_cache;
enum precedence {
LOWEST_PRECEDENCE,
@@ -147,8 +151,9 @@ bool mark_unsupported_function(const char *w1, const char *w2,
#define NO_EXTRACTION_FL (1 << 6)
#define FULL_EXTRACTION_FL (1 << 7)
-#define SUBSTITUTION_FL (1 << 8)
-#define EXTRACTION_MASK (NO_EXTRACTION_FL | FULL_EXTRACTION_FL)
+#define DELETION_FL (1 << 8)
+#define SUBSTITUTION_FL (1 << 9)
+#define EXTRACTION_MASK (NO_EXTRACTION_FL | FULL_EXTRACTION_FL | DELETION_FL)
extern const char *item_empty_name;
@@ -597,6 +602,7 @@ typedef bool (Item::*Item_processor) (void *arg);
typedef bool (Item::*Item_analyzer) (uchar **argp);
typedef Item* (Item::*Item_transformer) (THD *thd, uchar *arg);
typedef void (*Cond_traverser) (const Item *item, void *arg);
+typedef bool (Item::*Pushdown_checker) (uchar *arg);
struct st_cond_statistic;
@@ -629,6 +635,87 @@ public:
String_copier_for_item(THD *thd): m_thd(thd) { }
};
+
+/**
+ A helper class describing what kind of Item created a temporary field.
+ - If m_field is set, then the temporary field was created from Field
+ (e.g. when the Item was Item_field, or Item_ref pointing to Item_field)
+ - If m_default_field is set, then there is a usable DEFAULT value.
+ (e.g. when the Item is Item_field)
+ - If m_item_result_field is set, then the temporary field was created
+ from certain sub-types of Item_result_field (e.g. Item_func)
+ See create_tmp_field() in sql_select.cc for details.
+*/
+
+class Tmp_field_src
+{
+ Field *m_field;
+ Field *m_default_field;
+ Item_result_field *m_item_result_field;
+public:
+ Tmp_field_src()
+ :m_field(0),
+ m_default_field(0),
+ m_item_result_field(0)
+ { }
+ Field *field() const { return m_field; }
+ Field *default_field() const { return m_default_field; }
+ Item_result_field *item_result_field() const { return m_item_result_field; }
+ void set_field(Field *field) { m_field= field; }
+ void set_default_field(Field *field) { m_default_field= field; }
+ void set_item_result_field(Item_result_field *item)
+ { m_item_result_field= item; }
+};
+
+
+/**
+ Parameters for create_tmp_field_ex().
+ See create_tmp_field() in sql_select.cc for details.
+*/
+
+class Tmp_field_param
+{
+ bool m_group;
+ bool m_modify_item;
+ bool m_table_cant_handle_bit_fields;
+ bool m_make_copy_field;
+public:
+ Tmp_field_param(bool group,
+ bool modify_item,
+ bool table_cant_handle_bit_fields,
+ bool make_copy_field)
+ :m_group(group),
+ m_modify_item(modify_item),
+ m_table_cant_handle_bit_fields(table_cant_handle_bit_fields),
+ m_make_copy_field(make_copy_field)
+ { }
+ bool group() const { return m_group; }
+ bool modify_item() const { return m_modify_item; }
+ bool table_cant_handle_bit_fields() const
+ { return m_table_cant_handle_bit_fields; }
+ bool make_copy_field() const { return m_make_copy_field; }
+ void set_modify_item(bool to) { m_modify_item= to; }
+};
+
+
+class Item_const
+{
+public:
+ virtual ~Item_const() {}
+ virtual const Type_all_attributes *get_type_all_attributes_from_const() const= 0;
+ virtual bool const_is_null() const { return false; }
+ virtual const longlong *const_ptr_longlong() const { return NULL; }
+ virtual const double *const_ptr_double() const { return NULL; }
+ virtual const my_decimal *const_ptr_my_decimal() const { return NULL; }
+ virtual const MYSQL_TIME *const_ptr_mysql_time() const { return NULL; }
+ virtual const String *const_ptr_string() const { return NULL; }
+};
+
+
+/****************************************************************************/
+
+#define STOP_PTR ((void *) 1)
+
class Item: public Value_source,
public Type_all_attributes
{
@@ -653,16 +740,26 @@ public:
static void operator delete(void *ptr, MEM_ROOT *mem_root) {}
enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM,
- WINDOW_FUNC_ITEM, STRING_ITEM,
- INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM,
- COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM,
- PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM,
- FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM,
+ WINDOW_FUNC_ITEM,
+ /*
+ NOT NULL literal-alike constants, which do not change their
+ value during an SQL statement execution, but can optionally
+ change their value between statements:
+ - Item_literal - real NOT NULL constants
+ - Item_param - can change between statements
+ - Item_splocal - can change between statements
+ - Item_user_var_as_out_param - hack
+ Note, Item_user_var_as_out_param actually abuses the type code.
+ It should be moved out of the Item tree eventually.
+ */
+ CONST_ITEM,
+ NULL_ITEM, // Item_null or Item_param bound to NULL
+ COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM,
+ PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM,
+ FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM,
SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER,
- PARAM_ITEM, TRIGGER_FIELD_ITEM, DECIMAL_ITEM,
- XPATH_NODESET, XPATH_NODESET_CMP,
- VIEW_FIXER_ITEM, EXPR_CACHE_ITEM,
- DATE_ITEM};
+ PARAM_ITEM, TRIGGER_FIELD_ITEM,
+ EXPR_CACHE_ITEM};
enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE };
@@ -704,11 +801,34 @@ protected:
*/
Field *tmp_table_field_from_field_type(TABLE *table)
{
+ DBUG_ASSERT(is_fixed());
const Type_handler *h= type_handler()->type_handler_for_tmp_table(this);
return h->make_and_init_table_field(&name, Record_addr(maybe_null),
*this, table);
}
+ /**
+ Create a temporary field for a simple Item, which does not
+ need any special action after the field creation:
+ - is not an Item_field descendant (and not a reference to Item_field)
+ - is not an Item_result_field descendant
+ - does not need to copy any DEFAULT value to the result Field
+ - does not need to set Field::is_created_from_null_item for the result
+ See create_tmp_field_ex() for details on parameters and return values.
+ */
+ Field *create_tmp_field_ex_simple(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ DBUG_ASSERT(!param->make_copy_field());
+ DBUG_ASSERT(!is_result_field());
+ DBUG_ASSERT(type() != NULL_ITEM);
+ return tmp_table_field_from_field_type(table);
+ }
Field *create_tmp_field_int(TABLE *table, uint convert_int_length);
+ Field *tmp_table_field_from_field_type_maybe_null(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param,
+ bool is_explicit_null);
void push_note_converted_to_negative_complement(THD *thd);
void push_note_converted_to_positive_complement(THD *thd);
@@ -716,21 +836,21 @@ protected:
/* Helper methods, to get an Item value from another Item */
double val_real_from_item(Item *item)
{
- DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(is_fixed());
double value= item->val_real();
null_value= item->null_value;
return value;
}
longlong val_int_from_item(Item *item)
{
- DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(is_fixed());
longlong value= item->val_int();
null_value= item->null_value;
return value;
}
String *val_str_from_item(Item *item, String *str)
{
- DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(is_fixed());
String *res= item->val_str(str);
if (res)
res->set_charset(collation.collation);
@@ -738,33 +858,48 @@ protected:
res= NULL;
return res;
}
+ bool val_native_from_item(THD *thd, Item *item, Native *to)
+ {
+ DBUG_ASSERT(is_fixed());
+ null_value= item->val_native(thd, to);
+ DBUG_ASSERT(null_value == item->null_value);
+ return null_value;
+ }
+ bool val_native_from_field(Field *field, Native *to)
+ {
+ if ((null_value= field->is_null()))
+ return true;
+ return (null_value= field->val_native(to));
+ }
+ bool val_native_with_conversion_from_item(THD *thd, Item *item, Native *to,
+ const Type_handler *handler)
+ {
+ DBUG_ASSERT(is_fixed());
+ return null_value= item->val_native_with_conversion(thd, to, handler);
+ }
my_decimal *val_decimal_from_item(Item *item, my_decimal *decimal_value)
{
- DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(is_fixed());
my_decimal *value= item->val_decimal(decimal_value);
if ((null_value= item->null_value))
value= NULL;
return value;
}
- bool get_date_from_item(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date_from_item(THD *thd, Item *item,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- bool rc= item->get_date(ltime, fuzzydate);
+ bool rc= item->get_date(thd, ltime, fuzzydate);
null_value= MY_TEST(rc || item->null_value);
return rc;
}
- /*
- This method is used if the item was not null but convertion to
- TIME/DATE/DATETIME failed. We return a zero date if allowed,
- otherwise - null.
- */
- bool make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
-
public:
+
/*
Cache val_str() into the own buffer, e.g. to evaluate constant
expressions with subqueries in the ORDER/GROUP clauses.
*/
String *val_str() { return val_str(&str_value); }
+ virtual Item_func *get_item_func() { return NULL; }
const MY_LOCALE *locale_from_val_str();
@@ -784,14 +919,12 @@ public:
bool in_rollup; /* If used in GROUP BY list
of a query with ROLLUP */
bool null_value; /* if item is null */
- bool with_sum_func; /* True if item contains a sum func */
bool with_param; /* True if contains an SP parameter */
bool with_window_func; /* True if item contains a window func */
/**
True if any item except Item_sum contains a field. Set during parsing.
*/
bool with_field;
- bool fixed; /* If item fixed with fix_fields */
bool is_autogenerated_name; /* indicate was name of this Item
autogenerated or set by user */
// alloc & destruct is done as start of select on THD::mem_root
@@ -815,13 +948,13 @@ public:
void set_name(THD *thd, const char *str, size_t length, CHARSET_INFO *cs);
void set_name_no_truncate(THD *thd, const char *str, uint length,
CHARSET_INFO *cs);
- void init_make_send_field(Send_field *tmp_field,enum enum_field_types type);
+ void init_make_send_field(Send_field *tmp_field, const Type_handler *h);
virtual void cleanup();
virtual void make_send_field(THD *thd, Send_field *field);
bool fix_fields_if_needed(THD *thd, Item **ref)
{
- return fixed ? false : fix_fields(thd, ref);
+ return is_fixed() ? false : fix_fields(thd, ref);
}
bool fix_fields_if_needed_for_scalar(THD *thd, Item **ref)
{
@@ -835,7 +968,27 @@ public:
{
return fix_fields_if_needed_for_scalar(thd, ref);
}
- virtual bool fix_fields(THD *, Item **);
+ /*
+ By default we assume that an Item is fixed by the contstructor.
+ */
+ virtual bool fix_fields(THD *, Item **)
+ {
+ /*
+ This should not normally be called, because usually before
+ fix_fields() we check is_fixed() to be false.
+ But historically we allow fix_fields() to be called for Items
+ who return basic_const_item()==true.
+ */
+ DBUG_ASSERT(is_fixed());
+ DBUG_ASSERT(basic_const_item());
+ return false;
+ }
+ virtual bool is_fixed() const { return true; }
+ virtual void unfix_fields()
+ {
+ DBUG_ASSERT(0);
+ }
+
/*
Fix after some tables has been pulled out. Basically re-calculate all
attributes that are dependent on the tables.
@@ -855,11 +1008,14 @@ public:
but rather uses intermediate type conversion items. Then the method is
supposed to be applied recursively.
*/
- virtual inline void quick_fix_field() { fixed= 1; }
+ virtual void quick_fix_field()
+ {
+ DBUG_ASSERT(0);
+ }
- bool save_in_value(struct st_value *value)
+ bool save_in_value(THD *thd, struct st_value *value)
{
- return type_handler()->Item_save_in_value(this, value);
+ return type_handler()->Item_save_in_value(thd, this, value);
}
/* Function returns 1 on overflow and -1 on fatal errors */
@@ -884,6 +1040,21 @@ public:
return type_handler()->field_type();
}
virtual const Type_handler *type_handler() const= 0;
+ /**
+ Detects if an Item has a fixed data type which is known
+ even before fix_fields().
+ Currently it's important only to find Items with a fixed boolean
+ data type. More item types can be marked in the future as having
+ a fixed data type (e.g. all literals, all fixed type functions, etc).
+
+ @retval NULL if the Item type is not known before fix_fields()
+ @retval the pointer to the data type handler, if the data type
+ is known before fix_fields().
+ */
+ virtual const Type_handler *fixed_type_handler() const
+ {
+ return NULL;
+ }
const Type_handler *type_handler_for_comparison() const
{
return type_handler()->type_handler_for_comparison();
@@ -892,13 +1063,9 @@ public:
{
return type_handler();
}
- virtual const Type_handler *cast_to_int_type_handler() const
- {
- return type_handler();
- }
- virtual const Type_handler *type_handler_for_system_time() const
+ const Type_handler *cast_to_int_type_handler() const
{
- return real_type_handler();
+ return real_type_handler()->cast_to_int_type_handler();
}
/* result_type() of an item specifies how the value should be returned */
Item_result result_type() const
@@ -954,6 +1121,10 @@ public:
return type_handler()->Item_get_cache(thd, this);
}
virtual enum Type type() const =0;
+ bool is_of_type(Type t, Item_result cmp) const
+ {
+ return type() == t && cmp_type() == cmp;
+ }
/*
real_type() is the type of base item. This is same as type() for
most items, except Item_ref() and Item_cache_wrapper() where it
@@ -1018,6 +1189,12 @@ public:
If value is not null null_value flag will be reset to FALSE.
*/
virtual double val_real()=0;
+ Double_null to_double_null()
+ {
+ // val_real() must be caleed on a separate line. See to_longlong_null()
+ double nr= val_real();
+ return Double_null(nr, null_value);
+ }
/*
Return integer representation of item.
@@ -1033,6 +1210,20 @@ public:
{
return Longlong_hybrid(val_int(), unsigned_flag);
}
+ Longlong_null to_longlong_null()
+ {
+ longlong nr= val_int();
+ /*
+ C++ does not guarantee the order of parameter evaluation,
+ so to make sure "null_value" is passed to the constructor
+ after the val_int() call, val_int() is caled on a separate line.
+ */
+ return Longlong_null(nr, null_value);
+ }
+ Longlong_hybrid_null to_longlong_hybrid_null()
+ {
+ return Longlong_hybrid_null(to_longlong_null(), unsigned_flag);
+ }
/**
Get a value for CAST(x AS SIGNED).
Too large positive unsigned integer values are converted
@@ -1054,7 +1245,6 @@ public:
{
return cast_to_int_type_handler()->Item_val_int_unsigned_typecast(this);
}
- longlong val_int_unsigned_typecast_from_decimal();
longlong val_int_unsigned_typecast_from_int();
longlong val_int_unsigned_typecast_from_str();
@@ -1101,6 +1291,59 @@ public:
*/
virtual String *val_str(String *str)=0;
+
+ bool val_native_with_conversion(THD *thd, Native *to, const Type_handler *th)
+ {
+ return th->Item_val_native_with_conversion(thd, this, to);
+ }
+ bool val_native_with_conversion_result(THD *thd, Native *to,
+ const Type_handler *th)
+ {
+ return th->Item_val_native_with_conversion_result(thd, this, to);
+ }
+
+ virtual bool val_native(THD *thd, Native *to)
+ {
+ /*
+ The default implementation for the Items that do not need native format:
+ - Item_basic_value
+ - Item_copy
+ - Item_exists_subselect
+ - Item_sum_field
+ - Item_sum_or_func (default implementation)
+ - Item_proc
+ - Item_type_holder (as val_xxx() are never called for it);
+ - TODO: Item_name_const will need val_native() in the future,
+ when we add this syntax:
+ TIMESTAMP WITH LOCAL TIMEZONE'2001-01-01 00:00:00'
+
+ These hybrid Item types override val_native():
+ - Item_field
+ - Item_param
+ - Item_sp_variable
+ - Item_ref
+ - Item_cache_wrapper
+ - Item_direct_ref
+ - Item_direct_view_ref
+ - Item_ref_null_helper
+ - Item_sum_or_func
+ Note, these hybrid type Item_sum_or_func descendants
+ override the default implementation:
+ * Item_sum_hybrid
+ * Item_func_hybrid_field_type
+ * Item_func_min_max
+ * Item_func_sp
+ * Item_func_last_value
+ * Item_func_rollup_const
+ */
+ DBUG_ASSERT(0);
+ return null_value= true;
+ }
+ virtual bool val_native_result(THD *thd, Native *to)
+ {
+ return val_native(thd, to);
+ }
+
/*
Returns string representation of this item in ASCII format.
@@ -1213,7 +1456,7 @@ public:
{
return type_handler()->Item_val_bool(this);
}
- virtual String *val_nodeset(String*) { return 0; }
+ virtual String *val_raw(String*) { return 0; }
bool eval_const_cond()
{
@@ -1235,28 +1478,15 @@ public:
/* Helper functions, see item_sum.cc */
String *val_string_from_real(String *str);
String *val_string_from_int(String *str);
- String *val_string_from_decimal(String *str);
- String *val_string_from_date(String *str);
my_decimal *val_decimal_from_real(my_decimal *decimal_value);
my_decimal *val_decimal_from_int(my_decimal *decimal_value);
my_decimal *val_decimal_from_string(my_decimal *decimal_value);
- my_decimal *val_decimal_from_date(my_decimal *decimal_value);
- my_decimal *val_decimal_from_time(my_decimal *decimal_value);
- longlong val_int_from_decimal();
- longlong val_int_from_date();
longlong val_int_from_real()
{
- DBUG_ASSERT(fixed == 1);
+ DBUG_ASSERT(is_fixed());
return Converter_double_to_longlong_with_warn(val_real(), false).result();
}
longlong val_int_from_str(int *error);
- double val_real_from_decimal();
- double val_real_from_date();
-
- // Get TIME, DATE or DATETIME using proper sql_mode flags for the field type
- bool get_temporal_with_sql_mode(MYSQL_TIME *ltime);
- // Check NULL value for a TIME, DATE or DATETIME expression
- bool is_null_from_temporal();
int save_time_in_field(Field *field, bool no_conversions);
int save_date_in_field(Field *field, bool no_conversions);
@@ -1317,6 +1547,14 @@ public:
a constant expression. Used in the optimizer to propagate basic constants.
*/
virtual bool basic_const_item() const { return 0; }
+ /*
+ Test if "this" is an ORDER position (rather than an expression).
+ Notes:
+ - can be called before fix_fields().
+ - local SP variables (even of integer types) are always expressions, not
+ positions. (And they can't be used before fix_fields is called for them).
+ */
+ virtual bool is_order_clause_position() const { return false; }
/* cloning of constant items (0 if it is not const) */
virtual Item *clone_item(THD *thd) { return 0; }
virtual Item* build_clone(THD *thd) { return get_copy(thd); }
@@ -1364,14 +1602,14 @@ public:
/**
TIME or DATETIME precision of the item: 0..6
*/
- uint time_precision()
+ uint time_precision(THD *thd)
{
- return const_item() ? type_handler()->Item_time_precision(this) :
+ return const_item() ? type_handler()->Item_time_precision(thd, this) :
MY_MIN(decimals, TIME_SECOND_PART_DIGITS);
}
- uint datetime_precision()
+ uint datetime_precision(THD *thd)
{
- return const_item() ? type_handler()->Item_datetime_precision(this) :
+ return const_item() ? type_handler()->Item_datetime_precision(thd, this) :
MY_MIN(decimals, TIME_SECOND_PART_DIGITS);
}
virtual longlong val_int_min() const
@@ -1479,78 +1717,28 @@ public:
void split_sum_func2(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields,
Item **ref, uint flags);
- virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)= 0;
- bool get_date_from_int(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_year(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_real(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_decimal(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_string(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_time(MYSQL_TIME *ltime)
- { return get_date(ltime, Time::flags_for_get_date()); }
- /*
- Get time with automatic DATE/DATETIME to TIME conversion,
- by subtracting CURRENT_DATE.
-
- Performce a reverse operation to CAST(time AS DATETIME)
- Suppose:
- - we have a set of items (typically with the native MYSQL_TYPE_TIME type)
- whose item->get_date() return TIME1 value, and
- - CAST(AS DATETIME) for the same Items return DATETIME1,
- after applying time-to-datetime conversion to TIME1.
-
- then all items (typically of the native MYSQL_TYPE_{DATE|DATETIME} types)
- whose get_date() return DATETIME1 must also return TIME1 from
- get_time_with_conversion()
-
- @param thd - the thread, its variables.old_mode is checked
- to decide if use simple YYYYMMDD truncation (old mode),
- or perform full DATETIME-to-TIME conversion with
- CURRENT_DATE subtraction.
- @param[out] ltime - store the result here
- @param fuzzydate - flags to be used for the get_date() call.
- Normally, should include TIME_TIME_ONLY, to let
- the called low-level routines, e.g. str_to_date(),
- know that we prefer TIME rather that DATE/DATETIME
- and do less conversion outside of the low-level
- routines.
-
- @returns true - on error, e.g. get_date() returned NULL value,
- or get_date() returned DATETIME/DATE with non-zero
- YYYYMMDD part.
- @returns false - on success
- */
- bool get_time_with_conversion(THD *thd, MYSQL_TIME *ltime,
- ulonglong fuzzydate);
+ virtual bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)= 0;
+ bool get_date_from_int(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool get_date_from_real(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool get_date_from_string(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool get_time(THD *thd, MYSQL_TIME *ltime)
+ { return get_date(thd, ltime, Time::Options(thd)); }
// Get a DATE or DATETIME value in numeric packed format for comparison
- virtual longlong val_datetime_packed()
+ virtual longlong val_datetime_packed(THD *thd)
{
- ulonglong fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES;
- Datetime dt(current_thd, this, fuzzydate);
- return dt.is_valid_datetime() ? pack_time(dt.get_mysql_time()) : 0;
+ return Datetime(thd, this, Datetime::Options_cmp(thd)).to_packed();
}
// Get a TIME value in numeric packed format for comparison
- virtual longlong val_time_packed()
- {
- Time tm(this, Time::comparison_flags_for_get_date());
- return tm.is_valid_time() ? pack_time(tm.get_mysql_time()) : 0;
- }
- longlong val_datetime_packed_result();
- longlong val_time_packed_result()
+ virtual longlong val_time_packed(THD *thd)
{
- MYSQL_TIME ltime;
- ulonglong fuzzydate= Time::comparison_flags_for_get_date();
- return get_date_result(&ltime, fuzzydate) ? 0 : pack_time(&ltime);
+ return Time(thd, this, Time::Options_cmp(thd)).to_packed();
}
+ longlong val_datetime_packed_result(THD *thd);
+ longlong val_time_packed_result(THD *thd);
+
+ virtual bool get_date_result(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date(thd, ltime,fuzzydate); }
- // Get a temporal value in packed DATE/DATETIME or TIME format
- longlong val_temporal_packed(enum_field_types f_type)
- {
- return f_type == MYSQL_TYPE_TIME ? val_time_packed() :
- val_datetime_packed();
- }
- bool get_seconds(ulonglong *sec, ulong *sec_part);
- virtual bool get_date_result(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date(ltime,fuzzydate); }
/*
The method allows to determine nullness of a complex expression
without fully evaluating it, instead of calling val/result*() then
@@ -1565,35 +1753,7 @@ public:
*/
virtual void update_null_value ()
{
- switch (cmp_type()) {
- case INT_RESULT:
- (void) val_int();
- break;
- case REAL_RESULT:
- (void) val_real();
- break;
- case DECIMAL_RESULT:
- {
- my_decimal tmp;
- (void) val_decimal(&tmp);
- }
- break;
- case TIME_RESULT:
- {
- MYSQL_TIME ltime;
- (void) get_temporal_with_sql_mode(&ltime);
- }
- break;
- case STRING_RESULT:
- {
- StringBuffer<MAX_FIELD_WIDTH> tmp;
- (void) val_str(&tmp);
- }
- break;
- case ROW_RESULT:
- DBUG_ASSERT(0);
- null_value= true;
- }
+ return type_handler()->Item_update_null_value(this);
}
/*
@@ -1611,10 +1771,9 @@ public:
set field of temporary table for Item which can be switched on temporary
table during query processing (grouping and so on)
*/
- virtual void set_result_field(Field *field) {}
virtual bool is_result_field() { return 0; }
- virtual bool is_bool_type() { return false; }
virtual bool is_json_type() { return false; }
+ virtual bool is_bool_literal() const { return false; }
/* This is to handle printing of default values */
virtual bool need_parentheses_in_default() { return false; }
virtual void save_in_result_field(bool no_conversions) {}
@@ -1679,8 +1838,10 @@ public:
/*========= Item processors, to be used with Item::walk() ========*/
virtual bool remove_dependence_processor(void *arg) { return 0; }
virtual bool cleanup_processor(void *arg);
- virtual bool cleanup_excluding_fields_processor(void *arg) { return cleanup_processor(arg); }
- virtual bool cleanup_excluding_const_fields_processor(void *arg) { return cleanup_processor(arg); }
+ virtual bool cleanup_excluding_fields_processor (void *arg)
+ { return cleanup_processor(arg); }
+ virtual bool cleanup_excluding_const_fields_processor (void *arg)
+ { return cleanup_processor(arg); }
virtual bool collect_item_field_processor(void *arg) { return 0; }
virtual bool collect_outer_ref_processor(void *arg) {return 0; }
virtual bool check_inner_refs_processor(void *arg) { return 0; }
@@ -1720,12 +1881,22 @@ public:
Not to be used for AND/OR formulas.
*/
virtual bool excl_dep_on_table(table_map tab_map) { return false; }
- /*
+ /*
TRUE if the expression depends only on grouping fields of sel
- or can be converted to such an exression using equalities.
+ or can be converted to such an expression using equalities.
+ It also checks if the expression doesn't contain stored procedures,
+ subqueries or randomly generated elements.
+ Not to be used for AND/OR formulas.
+ */
+ virtual bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ { return false; }
+ /*
+ TRUE if the expression depends only on fields from the left part of
+ IN subquery or can be converted to such an expression using equalities.
Not to be used for AND/OR formulas.
*/
- virtual bool excl_dep_on_grouping_fields(st_select_lex *sel) { return false; }
+ virtual bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+ { return false; }
virtual bool switch_to_nullable_fields_processor(void *arg) { return 0; }
virtual bool find_function_processor (void *arg) { return 0; }
@@ -1783,6 +1954,12 @@ public:
virtual bool check_partition_func_processor(void *arg) { return 1;}
virtual bool post_fix_fields_part_expr_processor(void *arg) { return 0; }
virtual bool rename_fields_processor(void *arg) { return 0; }
+ /*
+ TRUE if the function is knowingly TRUE or FALSE.
+ Not to be used for AND/OR formulas.
+ */
+ virtual bool is_simplified_cond_processor(void *arg) { return false; }
+
/** Processor used to check acceptability of an item in the defining
expression for a virtual column
@@ -1816,6 +1993,12 @@ public:
virtual bool check_valid_arguments_processor(void *arg) { return 0; }
virtual bool update_vcol_processor(void *arg) { return 0; }
virtual bool set_fields_as_dependent_processor(void *arg) { return 0; }
+ /*
+ Find if some of the key parts of table keys (the reference on table is
+ passed as an argument) participate in the expression.
+ If there is some, sets a bit for this key in the proper key map.
+ */
+ virtual bool check_index_dependence(void *arg) { return 0; }
/*============== End of Item processor list ======================*/
virtual Item *get_copy(THD *thd)=0;
@@ -1885,11 +2068,17 @@ public:
return Type_handler::type_handler_long_or_longlong(max_char_length());
}
- virtual Field *create_tmp_field(bool group, TABLE *table)
- {
- return tmp_table_field_from_field_type(table);
- }
-
+ /**
+ Create field for temporary table.
+ @param table Temporary table
+ @param [OUT] src Who created the fields
+ @param param Create parameters
+ @retval NULL (on error)
+ @retval a pointer to a newly create Field (on success)
+ */
+ virtual Field *create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)= 0;
virtual Item_field *field_for_view_update() { return 0; }
virtual Item *neg_transformer(THD *thd) { return NULL; }
@@ -1901,11 +2090,19 @@ public:
{ return this; }
virtual Item *derived_field_transformer_for_where(THD *thd, uchar *arg)
{ return this; }
- virtual Item *derived_grouping_field_transformer_for_where(THD *thd,
- uchar *arg)
+ virtual Item *grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ { return this; }
+ /* Now is not used. */
+ virtual Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg)
+ { return this; }
+ virtual Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg)
{ return this; }
virtual Item *in_predicate_to_in_subs_transformer(THD *thd, uchar *arg)
{ return this; }
+ virtual Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg)
+ { return this; }
+ virtual Item *multiple_equality_transformer(THD *thd, uchar *arg)
+ { return this; }
virtual bool expr_cache_is_needed(THD *) { return FALSE; }
virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
bool needs_charset_converter(uint32 length, CHARSET_INFO *tocs) const
@@ -1956,6 +2153,7 @@ public:
delete this;
}
+ virtual const Item_const *get_item_const() const { return NULL; }
virtual Item_splocal *get_item_splocal() { return 0; }
virtual Rewritable_query_parameter *get_rewritable_query_parameter()
{ return 0; }
@@ -2029,13 +2227,16 @@ public:
/*
Return TRUE if the item points to a column of an outer-joined table.
*/
- virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; }
+ virtual bool is_outer_field() const { DBUG_ASSERT(is_fixed()); return FALSE; }
/**
Checks if this item or any of its decendents contains a subquery. This is a
replacement of the former Item::has_subquery() and Item::with_subselect.
*/
- virtual bool with_subquery() const { DBUG_ASSERT(fixed); return false; }
+ virtual bool with_subquery() const { DBUG_ASSERT(is_fixed()); return false; }
+
+ virtual bool with_sum_func() const { return false; }
+ virtual With_sum_func_cache* get_with_sum_func_cache() { return NULL; }
Item* set_expr_cache(THD *thd);
@@ -2082,6 +2283,11 @@ public:
*/
virtual void under_not(Item_func_not * upper
__attribute__((unused))) {};
+ /*
+ If Item_field is wrapped in Item_direct_wrep remove this Item_direct_ref
+ wrapper.
+ */
+ virtual Item *remove_item_direct_ref() { return this; }
void register_in(THD *thd);
@@ -2099,6 +2305,29 @@ public:
{
marker &= ~EXTRACTION_MASK;
}
+ void check_pushable_cond(Pushdown_checker excl_dep_func, uchar *arg);
+ bool pushable_cond_checker_for_derived(uchar *arg)
+ {
+ return excl_dep_on_table(*((table_map *)arg));
+ }
+ bool pushable_cond_checker_for_subquery(uchar *arg)
+ {
+ return excl_dep_on_in_subq_left_part((Item_in_subselect *)arg);
+ }
+ Item *build_pushable_cond(THD *thd,
+ Pushdown_checker checker,
+ uchar *arg);
+ /*
+ Checks if this item depends only on the arg table
+ */
+ bool pushable_equality_checker_for_derived(uchar *arg)
+ {
+ return (used_tables() == *((table_map *)arg));
+ }
+ /*
+ Checks if this item consists in the left part of arg IN subquery predicate
+ */
+ bool pushable_equality_checker_for_subquery(uchar *arg);
};
MEM_ROOT *get_thd_memroot(THD *thd);
@@ -2113,6 +2342,66 @@ inline Item* get_item_copy (THD *thd, T* item)
}
+#ifndef DBUG_OFF
+/**
+ A helper class to print the data type and the value for an Item
+ in debug builds.
+*/
+class DbugStringItemTypeValue: public StringBuffer<128>
+{
+public:
+ DbugStringItemTypeValue(THD *thd, const Item *item)
+ {
+ append('(');
+ append(item->type_handler()->name().ptr());
+ append(')');
+ const_cast<Item*>(item)->print(this, QT_EXPLAIN);
+ }
+};
+#endif
+
+class With_sum_func_cache
+{
+protected:
+ bool m_with_sum_func; // True if the owner item contains a sum func
+public:
+ With_sum_func_cache()
+ :m_with_sum_func(false)
+ { }
+ With_sum_func_cache(const Item *a)
+ :m_with_sum_func(a->with_sum_func())
+ { }
+ With_sum_func_cache(const Item *a, const Item *b)
+ :m_with_sum_func(a->with_sum_func() || b->with_sum_func())
+ { }
+ With_sum_func_cache(const Item *a, const Item *b, const Item *c)
+ :m_with_sum_func(a->with_sum_func() || b->with_sum_func() ||
+ c->with_sum_func())
+ { }
+ With_sum_func_cache(const Item *a, const Item *b, const Item *c,
+ const Item *d)
+ :m_with_sum_func(a->with_sum_func() || b->with_sum_func() ||
+ c->with_sum_func() || d->with_sum_func())
+ { }
+ With_sum_func_cache(const Item *a, const Item *b, const Item *c,
+ const Item *d, const Item *e)
+ :m_with_sum_func(a->with_sum_func() || b->with_sum_func() ||
+ c->with_sum_func() || d->with_sum_func() ||
+ e->with_sum_func())
+ { }
+ void set_with_sum_func() { m_with_sum_func= true; }
+ void reset_with_sum_func() { m_with_sum_func= false; }
+ void copy_with_sum_func(const Item *item)
+ {
+ m_with_sum_func= item->with_sum_func();
+ }
+ void join_with_sum_func(const Item *item)
+ {
+ m_with_sum_func|= item->with_sum_func();
+ }
+};
+
+
/*
This class is a replacement for the former member Item::with_subselect.
Determines if the descendant Item is a subselect or some of
@@ -2216,22 +2505,23 @@ protected:
}
return true;
}
- bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ bool excl_dep_on_grouping_fields(st_select_lex *sel);
+ bool eq(const Item_args *other, bool binary_cmp) const
{
- for (uint i= 0; i < arg_count; i++)
+ for (uint i= 0; i < arg_count ; i++)
{
- if (args[i]->const_item())
- continue;
- if (!args[i]->excl_dep_on_grouping_fields(sel))
+ if (!args[i]->eq(other->args[i], binary_cmp))
return false;
}
return true;
}
- bool eq(const Item_args *other, bool binary_cmp) const
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
{
- for (uint i= 0; i < arg_count ; i++)
+ for (uint i= 0; i < arg_count; i++)
{
- if (!args[i]->eq(other->args[i], binary_cmp))
+ if (args[i]->const_item())
+ continue;
+ if (!args[i]->excl_dep_on_in_subq_left_part(subq_pred))
return false;
}
return true;
@@ -2287,6 +2577,30 @@ public:
{
args[arg_count++]= item;
}
+ /**
+ Extract row elements from the given position.
+ For example, for this input: (1,2),(3,4),(5,6)
+ pos=0 will extract (1,3,5)
+ pos=1 will extract (2,4,6)
+ @param thd - current thread, to allocate memory on its mem_root
+ @param rows - an array of compatible ROW-type items
+ @param pos - the element position to extract
+ */
+ bool alloc_and_extract_row_elements(THD *thd, const Item_args *rows, uint pos)
+ {
+ DBUG_ASSERT(rows->argument_count() > 0);
+ DBUG_ASSERT(rows->arguments()[0]->cols() > pos);
+ if (alloc_arguments(thd, rows->argument_count()))
+ return true;
+ for (uint i= 0; i < rows->argument_count(); i++)
+ {
+ DBUG_ASSERT(rows->arguments()[0]->cols() == rows->arguments()[i]->cols());
+ Item *arg= rows->arguments()[i]->element_index(pos);
+ add_argument(arg);
+ }
+ DBUG_ASSERT(argument_count() == rows->argument_count());
+ return false;
+ }
inline Item **arguments() const { return args; }
inline uint argument_count() const { return arg_count; }
inline void remove_arguments() { arg_count=0; }
@@ -2321,27 +2635,39 @@ public:
class Item_string;
-/**
- A common class for Item_basic_constant and Item_param
-*/
-class Item_basic_value :public Item
+class Item_fixed_hybrid: public Item
{
- bool is_basic_value(const Item *item, Type type_arg) const
- {
- return item->basic_const_item() && item->type() == type_arg;
- }
- bool is_basic_value(Type type_arg) const
+public:
+ bool fixed; // If item was fixed with fix_fields
+public:
+ Item_fixed_hybrid(THD *thd): Item(thd), fixed(false)
+ { }
+ Item_fixed_hybrid(THD *thd, Item_fixed_hybrid *item)
+ :Item(thd, item), fixed(item->fixed)
+ { }
+ bool fix_fields(THD *thd, Item **ref)
{
- return basic_const_item() && type() == type_arg;
+ DBUG_ASSERT(!fixed);
+ fixed= true;
+ return false;
}
- bool str_eq(const String *value,
- const String *other, CHARSET_INFO *cs, bool binary_cmp) const
+ void cleanup()
{
- return binary_cmp ?
- value->bin_eq(other) :
- collation.collation == cs && value->eq(other, collation.collation);
+ Item::cleanup();
+ fixed= false;
}
+ void quick_fix_field() { fixed= true; }
+ void unfix_fields() { fixed= false; }
+ bool is_fixed() const { return fixed; }
+};
+
+/**
+ A common class for Item_basic_constant and Item_param
+*/
+class Item_basic_value :public Item,
+ public Item_const
+{
protected:
// Value metadata, e.g. to make string processing easier
class Metadata: private MY_STRING_METADATA
@@ -2378,66 +2704,40 @@ protected:
fix_charset_and_length(str.charset(), dv, Metadata(&str));
}
Item_basic_value(THD *thd): Item(thd) {}
- /*
- In the xxx_eq() methods below we need to cast off "const" to
- call val_xxx(). This is OK for Item_basic_constant and Item_param.
- */
- bool null_eq(const Item *item) const
- {
- DBUG_ASSERT(is_basic_value(NULL_ITEM));
- return item->type() == NULL_ITEM;
- }
- bool str_eq(const String *value, const Item *item, bool binary_cmp) const
- {
- DBUG_ASSERT(is_basic_value(STRING_ITEM));
- return is_basic_value(item, STRING_ITEM) &&
- str_eq(value, ((Item_basic_value*)item)->val_str(NULL),
- item->collation.collation, binary_cmp);
- }
- bool real_eq(double value, const Item *item) const
- {
- DBUG_ASSERT(is_basic_value(REAL_ITEM));
- return is_basic_value(item, REAL_ITEM) &&
- value == ((Item_basic_value*)item)->val_real();
- }
- bool int_eq(longlong value, const Item *item) const
+public:
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
{
- DBUG_ASSERT(is_basic_value(INT_ITEM));
- return is_basic_value(item, INT_ITEM) &&
- value == ((Item_basic_value*)item)->val_int() &&
- (value >= 0 || item->unsigned_flag == unsigned_flag);
+
+ /*
+ create_tmp_field_ex() for this type of Items is called for:
+ - CREATE TABLE ... SELECT
+ - In ORDER BY: SELECT max(a) FROM t1 GROUP BY a ORDER BY 'const';
+ - In CURSORS:
+ DECLARE c CURSOR FOR SELECT 'test';
+ OPEN c;
+ */
+ return tmp_table_field_from_field_type_maybe_null(table, src, param,
+ type() == Item::NULL_ITEM);
}
+ bool eq(const Item *item, bool binary_cmp) const;
+ const Type_all_attributes *get_type_all_attributes_from_const() const
+ { return this; }
};
class Item_basic_constant :public Item_basic_value
{
- table_map used_table_map;
public:
- Item_basic_constant(THD *thd): Item_basic_value(thd), used_table_map(0) {};
- void set_used_tables(table_map map) { used_table_map= map; }
- table_map used_tables() const { return used_table_map; }
- bool check_vcol_func_processor(void *arg) { return FALSE;}
+ Item_basic_constant(THD *thd): Item_basic_value(thd) {};
+ bool check_vcol_func_processor(void *arg) { return false; }
+ const Item_const *get_item_const() const { return this; }
virtual Item_basic_constant *make_string_literal_concat(THD *thd,
const LEX_CSTRING *)
{
DBUG_ASSERT(0);
return this;
}
- /* to prevent drop fixed flag (no need parent cleanup call) */
- void cleanup()
- {
- /*
- Restore the original field name as it might not have been allocated
- in the statement memory. If the name is auto generated, it must be
- done again between subsequent executions of a prepared statement.
- */
- if (orig_name)
- {
- name.str= orig_name;
- name.length= strlen(orig_name);
- }
- }
};
@@ -2448,7 +2748,7 @@ public:
- CASE expression (Item_case_expr);
*****************************************************************************/
-class Item_sp_variable :public Item
+class Item_sp_variable :public Item_fixed_hybrid
{
protected:
/*
@@ -2480,7 +2780,8 @@ public:
longlong val_int();
String *val_str(String *sp);
my_decimal *val_decimal(my_decimal *decimal_value);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool val_native(THD *thd, Native *to);
bool is_null();
public:
@@ -2488,6 +2789,11 @@ public:
inline bool const_item() const;
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ return create_tmp_field_ex_simple(table, src, param);
+ }
inline int save_in_field(Field *field, bool no_conversions);
inline bool send(Protocol *protocol, st_value *buffer);
bool check_vcol_func_processor(void *arg)
@@ -2591,6 +2897,20 @@ public:
*/
Field *create_field_for_create_select(TABLE *table)
{ return create_table_field_from_handler(table); }
+
+ bool is_valid_limit_clause_variable_with_error() const
+ {
+ /*
+ In case if the variable has an anchored data type, e.g.:
+ DECLARE a TYPE OF t1.a;
+ type_handler() is set to &type_handler_null and this
+ function detects such variable as not valid in LIMIT.
+ */
+ if (type_handler()->is_limit_clause_valid_type())
+ return true;
+ my_error(ER_WRONG_SPVAR_TYPE_IN_LIMIT, MYF(0));
+ return false;
+ }
};
@@ -2737,11 +3057,10 @@ inline enum Item::Type Item_case_expr::type() const
extract a common base with class Item_ref, too.
*/
-class Item_name_const : public Item
+class Item_name_const : public Item_fixed_hybrid
{
Item *value_item;
Item *name_item;
- bool valid_args;
public:
Item_name_const(THD *thd, Item *name_arg, Item *val);
@@ -2752,7 +3071,7 @@ public:
longlong val_int();
String *val_str(String *sp);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool is_null();
virtual void print(String *str, enum_query_type query_type);
@@ -2766,6 +3085,17 @@ public:
return TRUE;
}
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ /*
+ We can get to here when using a CURSOR for a query with NAME_CONST():
+ DECLARE c CURSOR FOR SELECT NAME_CONST('x','y') FROM t1;
+ OPEN c;
+ */
+ return tmp_table_field_from_field_type_maybe_null(table, src, param,
+ type() == Item::NULL_ITEM);
+ }
int save_in_field(Field *field, bool no_conversions)
{
return value_item->save_in_field(field, no_conversions);
@@ -2783,15 +3113,27 @@ public:
{ return get_item_copy<Item_name_const>(thd, this); }
};
-class Item_num: public Item_basic_constant
+
+class Item_literal: public Item_basic_constant
+{
+public:
+ Item_literal(THD *thd): Item_basic_constant(thd)
+ { }
+ enum Type type() const { return CONST_ITEM; }
+ bool check_partition_func_processor(void *int_arg) { return false;}
+ bool const_item() const { return true; }
+ bool basic_const_item() const { return true; }
+};
+
+
+class Item_num: public Item_literal
{
public:
- Item_num(THD *thd): Item_basic_constant(thd) { collation.set_numeric(); }
+ Item_num(THD *thd): Item_literal(thd) { collation.set_numeric(); }
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
- bool check_partition_func_processor(void *int_arg) { return FALSE;}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
};
@@ -2800,24 +3142,26 @@ public:
class st_select_lex;
-class Item_result_field :public Item /* Item with result field */
+class Item_result_field :public Item_fixed_hybrid /* Item with result field */
{
public:
Field *result_field; /* Save result here */
- Item_result_field(THD *thd): Item(thd), result_field(0) {}
+ Item_result_field(THD *thd): Item_fixed_hybrid(thd), result_field(0) {}
// Constructor used for Item_sum/Item_cond_and/or (see Item comment)
Item_result_field(THD *thd, Item_result_field *item):
- Item(thd, item), result_field(item->result_field)
+ Item_fixed_hybrid(thd, item), result_field(item->result_field)
{}
~Item_result_field() {} /* Required with gcc 2.95 */
Field *get_tmp_table_field() { return result_field; }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param);
+ void get_tmp_field_src(Tmp_field_src *src, const Tmp_field_param *param);
/*
This implementation of used_tables() used by Item_avg_field and
Item_variance_field which work when only temporary table left, so theu
return table map of the temporary table.
*/
table_map used_tables() const { return 1; }
- void set_result_field(Field *field) { result_field= field; }
bool is_result_field() { return true; }
void save_in_result_field(bool no_conversions)
{
@@ -2896,39 +3240,6 @@ public:
};
-class Item_ident_for_show :public Item
-{
-public:
- Field *field;
- const char *db_name;
- const char *table_name;
-
- Item_ident_for_show(THD *thd, Field *par_field, const char *db_arg,
- const char *table_name_arg):
- Item(thd), field(par_field), db_name(db_arg), table_name(table_name_arg)
- {
- Type_std_attributes::set(par_field->type_std_attributes());
- }
- enum Type type() const { return FIELD_ITEM; }
- double val_real() { return field->val_real(); }
- longlong val_int() { return field->val_int(); }
- String *val_str(String *str) { return field->val_str(str); }
- my_decimal *val_decimal(my_decimal *dec) { return field->val_decimal(dec); }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- {
- return field->get_date(ltime, fuzzydate);
- }
- void make_send_field(THD *thd, Send_field *tmp_field);
- const Type_handler *type_handler() const
- {
- const Type_handler *handler= field->type_handler();
- return handler->type_handler_for_item_field();
- }
- Item* get_copy(THD *thd)
- { return get_item_copy<Item_ident_for_show>(thd, this); }
-};
-
-
class Item_field :public Item_ident,
public Load_data_outvar
{
@@ -2972,6 +3283,8 @@ public:
void save_result(Field *to);
double val_result();
longlong val_int_result();
+ bool val_native(THD *thd, Native *to);
+ bool val_native_result(THD *thd, Native *to);
String *str_result(String* tmp);
my_decimal *val_decimal_result(my_decimal *);
bool val_bool_result();
@@ -3015,24 +3328,25 @@ public:
const Type_handler *handler= field->type_handler();
return handler->type_handler_for_item_field();
}
- const Type_handler *cast_to_int_type_handler() const
- {
- return field->type_handler()->cast_to_int_type_handler();
- }
const Type_handler *real_type_handler() const
{
if (field->is_created_from_null_item)
return &type_handler_null;
return field->type_handler();
}
+ Field *create_tmp_field_from_item_field(TABLE *new_table,
+ Item_ref *orig_item,
+ const Tmp_field_param *param);
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param);
TYPELIB *get_typelib() const { return field->get_typelib(); }
enum_monotonicity_info get_monotonicity_info() const
{
return MONOTONIC_STRICT_INCREASING;
}
longlong val_int_endpoint(bool left_endp, bool *incl_endp);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_result(MYSQL_TIME *ltime,ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool get_date_result(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate);
bool is_null() { return field->is_null(); }
void update_null_value();
void update_table_bitmaps()
@@ -3042,13 +3356,7 @@ public:
TABLE *tab= field->table;
tab->covering_keys.intersect(field->part_of_key);
if (tab->read_set)
- bitmap_fast_test_and_set(tab->read_set, field->field_index);
- /*
- Do not mark a self-referecing virtual column.
- Such virtual columns are reported as invalid.
- */
- if (field->vcol_info && tab->vcol_set)
- tab->mark_virtual_col(field);
+ tab->mark_column_with_deps(field);
}
}
void update_used_tables()
@@ -3125,10 +3433,13 @@ public:
virtual Item *update_value_transformer(THD *thd, uchar *select_arg);
Item *derived_field_transformer_for_having(THD *thd, uchar *arg);
Item *derived_field_transformer_for_where(THD *thd, uchar *arg);
- Item *derived_grouping_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *grouping_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg);
virtual void print(String *str, enum_query_type query_type);
bool excl_dep_on_table(table_map tab_map);
bool excl_dep_on_grouping_fields(st_select_lex *sel);
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred);
bool cleanup_excluding_fields_processor(void *arg)
{ return field ? 0 : cleanup_processor(arg); }
bool cleanup_excluding_const_fields_processor(void *arg)
@@ -3146,6 +3457,7 @@ public:
DBUG_ASSERT(field_type() == MYSQL_TYPE_GEOMETRY);
return field->get_geometry_type();
}
+ bool check_index_dependence(void *arg);
friend class Item_default_value;
friend class Item_insert_value;
friend class st_select_lex_unit;
@@ -3233,22 +3545,21 @@ public:
max_length= 0;
name.str= name_par ? name_par : "NULL";
name.length= strlen(name.str);
- fixed= 1;
collation.set(cs, DERIVATION_IGNORABLE, MY_REPERTOIRE_ASCII);
}
enum Type type() const { return NULL_ITEM; }
- bool eq(const Item *item, bool binary_cmp) const { return null_eq(item); }
double val_real();
longlong val_int();
String *val_str(String *str);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
int save_in_field(Field *field, bool no_conversions);
int save_safe_in_field(Field *field);
bool send(Protocol *protocol, st_value *buffer);
const Type_handler *type_handler() const { return &type_handler_null; }
bool basic_const_item() const { return 1; }
Item *clone_item(THD *thd);
+ bool const_is_null() const { return true; }
bool is_null() { return 1; }
virtual inline void print(String *str, enum_query_type query_type)
@@ -3274,6 +3585,12 @@ public:
{
return result_field->type();
}
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
void save_in_result_field(bool no_conversions)
{
save_in_field(result_field, no_conversions);
@@ -3341,8 +3658,8 @@ class Item_param :public Item_basic_value,
All Item_param::set_xxx() make sure to do so.
In the state with an assigned value:
- Item_param::basic_const_item() returns true
- - Item::type() returns NULL_ITEM, INT_ITEM, REAL_ITEM, DECIMAL_ITEM,
- DATE_ITEM, STRING_ITEM, depending on the value assigned.
+ - Item::type() returns NULL_ITEM or CONST_ITEM,
+ depending on the value assigned.
So in this state Item_param behaves in many cases like a literal.
When Item_param::cleanup() is called:
@@ -3365,14 +3682,6 @@ class Item_param :public Item_basic_value,
DEFAULT_VALUE, IGNORE_VALUE
} state;
- enum Type item_type;
-
- void fix_type(Type type)
- {
- item_type= type;
- fixed= true;
- }
-
void fix_temporal(uint32 max_length_arg, uint decimals_arg);
struct CONVERSION_INFO
@@ -3471,7 +3780,6 @@ class Item_param :public Item_basic_value,
PValue value;
const String *value_query_val_str(THD *thd, String* str) const;
- bool value_eq(const Item *item, bool binary_cmp) const;
Item *value_clone_item(THD *thd);
bool can_return_value() const;
@@ -3495,10 +3803,58 @@ public:
enum Type type() const
{
- DBUG_ASSERT(fixed || state == NO_VALUE);
- return item_type;
+ // Don't pretend to be a constant unless value for this item is set.
+ switch (state) {
+ case NO_VALUE: return PARAM_ITEM;
+ case NULL_VALUE: return NULL_ITEM;
+ case SHORT_DATA_VALUE: return CONST_ITEM;
+ case LONG_DATA_VALUE: return CONST_ITEM;
+ case DEFAULT_VALUE: return PARAM_ITEM;
+ case IGNORE_VALUE: return PARAM_ITEM;
+ }
+ DBUG_ASSERT(0);
+ return PARAM_ITEM;
}
+ bool is_order_clause_position() const
+ {
+ return state == SHORT_DATA_VALUE &&
+ type_handler()->is_order_clause_position_type();
+ }
+
+ const Item_const *get_item_const() const
+ {
+ switch (state) {
+ case SHORT_DATA_VALUE:
+ case LONG_DATA_VALUE:
+ case NULL_VALUE:
+ return this;
+ case IGNORE_VALUE:
+ case DEFAULT_VALUE:
+ case NO_VALUE:
+ break;
+ }
+ return NULL;
+ }
+
+ bool const_is_null() const { return state == NULL_VALUE; }
+ bool can_return_const_value(Item_result type) const
+ {
+ return can_return_value() &&
+ value.type_handler()->cmp_type() == type &&
+ type_handler()->cmp_type() == type;
+ }
+ const longlong *const_ptr_longlong() const
+ { return can_return_const_value(INT_RESULT) ? &value.integer : NULL; }
+ const double *const_ptr_double() const
+ { return can_return_const_value(REAL_RESULT) ? &value.real : NULL; }
+ const my_decimal *const_ptr_my_decimal() const
+ { return can_return_const_value(DECIMAL_RESULT) ? &value.m_decimal : NULL; }
+ const MYSQL_TIME *const_ptr_mysql_time() const
+ { return can_return_const_value(TIME_RESULT) ? &value.time : NULL; }
+ const String *const_ptr_string() const
+ { return can_return_const_value(STRING_RESULT) ? &value.m_string : NULL; }
+
double val_real()
{
return can_return_value() ? value.val_real() : 0e0;
@@ -3515,7 +3871,12 @@ public:
{
return can_return_value() ? value.val_str(str, this) : NULL;
}
- bool get_date(MYSQL_TIME *tm, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *tm, date_mode_t fuzzydate);
+ bool val_native(THD *thd, Native *to)
+ {
+ return Item_param::type_handler()->Item_param_val_native(thd, this, to);
+ }
+
int save_in_field(Field *field, bool no_conversions);
void set_default();
@@ -3592,8 +3953,14 @@ public:
so no one will use parameters value in fix_fields still
parameter is constant during execution.
*/
+ bool const_item() const
+ {
+ return state != NO_VALUE;
+ }
virtual table_map used_tables() const
- { return state != NO_VALUE ? (table_map)0 : PARAM_TABLE_BIT; }
+ {
+ return state != NO_VALUE ? (table_map)0 : PARAM_TABLE_BIT;
+ }
virtual void print(String *str, enum_query_type query_type);
bool is_null()
{ DBUG_ASSERT(state != NO_VALUE); return state == NULL_VALUE; }
@@ -3623,12 +3990,6 @@ public:
*/
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs);
Item *clone_item(THD *thd);
- /*
- Implement by-value equality evaluation if parameter value
- is set and is a basic constant (integer, real or string).
- Otherwise return FALSE.
- */
- bool eq(const Item *item, bool binary_cmp) const;
void set_param_type_and_swap_value(Item_param *from);
Rewritable_query_parameter *get_rewritable_query_parameter()
@@ -3676,50 +4037,44 @@ public:
longlong value;
Item_int(THD *thd, int32 i,size_t length= MY_INT32_NUM_DECIMAL_DIGITS):
Item_num(thd), value((longlong) i)
- { max_length=(uint32)length; fixed= 1; }
+ { max_length=(uint32)length; }
Item_int(THD *thd, longlong i,size_t length= MY_INT64_NUM_DECIMAL_DIGITS):
Item_num(thd), value(i)
- { max_length=(uint32)length; fixed= 1; }
+ { max_length=(uint32)length; }
Item_int(THD *thd, ulonglong i, size_t length= MY_INT64_NUM_DECIMAL_DIGITS):
Item_num(thd), value((longlong)i)
- { max_length=(uint32)length; fixed= 1; unsigned_flag= 1; }
+ { max_length=(uint32)length; unsigned_flag= 1; }
Item_int(THD *thd, const char *str_arg,longlong i,size_t length):
Item_num(thd), value(i)
{
max_length=(uint32)length;
name.str= str_arg; name.length= safe_strlen(name.str);
- fixed= 1;
}
Item_int(THD *thd, const char *str_arg,longlong i,size_t length, bool flag):
Item_num(thd), value(i)
{
max_length=(uint32)length;
name.str= str_arg; name.length= safe_strlen(name.str);
- fixed= 1;
unsigned_flag= flag;
}
Item_int(THD *thd, const char *str_arg, size_t length=64);
- enum Type type() const { return INT_ITEM; }
const Type_handler *type_handler() const
{ return type_handler_long_or_longlong(); }
- Field *create_tmp_field(bool group, TABLE *table)
- { return tmp_table_field_from_field_type(table); }
Field *create_field_for_create_select(TABLE *table)
{ return tmp_table_field_from_field_type(table); }
- longlong val_int() { DBUG_ASSERT(fixed == 1); return value; }
- longlong val_int_min() const { DBUG_ASSERT(fixed == 1); return value; }
- double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; }
+ const longlong *const_ptr_longlong() const { return &value; }
+ longlong val_int() { return value; }
+ longlong val_int_min() const { return value; }
+ double val_real() { return (double) value; }
my_decimal *val_decimal(my_decimal *);
String *val_str(String*);
int save_in_field(Field *field, bool no_conversions);
- bool basic_const_item() const { return 1; }
+ bool is_order_clause_position() const { return true; }
Item *clone_item(THD *thd);
virtual void print(String *str, enum_query_type query_type);
Item *neg(THD *thd);
uint decimal_precision() const
{ return (uint) (max_length - MY_TEST(value < 0)); }
- bool eq(const Item *item, bool binary_cmp) const
- { return int_eq(value, item); }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_int>(thd, this); }
};
@@ -3735,8 +4090,20 @@ class Item_bool :public Item_int
public:
Item_bool(THD *thd, const char *str_arg, longlong i):
Item_int(thd, str_arg, i, 1) {}
- bool is_bool_type() { return true; }
+ Item_bool(THD *thd, bool i) :Item_int(thd, (longlong) i, 1) { }
+ bool is_bool_literal() const { return true; }
Item *neg_transformer(THD *thd);
+ const Type_handler *type_handler() const
+ { return &type_handler_bool; }
+ const Type_handler *fixed_type_handler() const
+ { return &type_handler_bool; }
+ void quick_fix_field()
+ {
+ /*
+ We can get here when Item_bool is created instead of a constant
+ predicate at various condition optimization stages in sql_select.
+ */
+ }
};
@@ -3746,8 +4113,7 @@ public:
Item_uint(THD *thd, const char *str_arg, size_t length);
Item_uint(THD *thd, ulonglong i): Item_int(thd, i, 10) {}
Item_uint(THD *thd, const char *str_arg, longlong i, uint length);
- double val_real()
- { DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); }
+ double val_real() { return ulonglong2double((ulonglong)value); }
String *val_str(String*);
Item *clone_item(THD *thd);
virtual void print(String *str, enum_query_type query_type);
@@ -3768,7 +4134,7 @@ public:
longlong val_int();
double val_real() { return (double)val_int(); }
void set(longlong packed, enum_mysql_timestamp_type ts_type);
- bool get_date(MYSQL_TIME *to, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate)
{
*to= ltime;
return false;
@@ -3786,24 +4152,26 @@ public:
CHARSET_INFO *charset);
Item_decimal(THD *thd, const char *str, const my_decimal *val_arg,
uint decimal_par, uint length);
- Item_decimal(THD *thd, my_decimal *value_par);
+ Item_decimal(THD *thd, const my_decimal *value_par);
Item_decimal(THD *thd, longlong val, bool unsig);
Item_decimal(THD *thd, double val, int precision, int scale);
Item_decimal(THD *thd, const uchar *bin, int precision, int scale);
- enum Type type() const { return DECIMAL_ITEM; }
const Type_handler *type_handler() const { return &type_handler_newdecimal; }
- longlong val_int();
- double val_real();
- String *val_str(String*);
+ longlong val_int() { return decimal_value.to_longlong(unsigned_flag); }
+ double val_real() { return decimal_value.to_double(); }
+ String *val_str(String *to) { return decimal_value.to_string(to); }
my_decimal *val_decimal(my_decimal *val) { return &decimal_value; }
+ const my_decimal *const_ptr_my_decimal() const { return &decimal_value; }
int save_in_field(Field *field, bool no_conversions);
- bool basic_const_item() const { return 1; }
Item *clone_item(THD *thd);
- virtual void print(String *str, enum_query_type query_type);
+ virtual void print(String *str, enum_query_type query_type)
+ {
+ decimal_value.to_string(&str_value);
+ str->append(str_value);
+ }
Item *neg(THD *thd);
uint decimal_precision() const { return decimal_value.precision(); }
- bool eq(const Item *, bool binary_cmp) const;
void set_decimal_value(my_decimal *value_par);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_decimal>(thd, this); }
@@ -3823,21 +4191,18 @@ public:
name.length= safe_strlen(str);
decimals=(uint8) decimal_par;
max_length= length;
- fixed= 1;
}
Item_float(THD *thd, double value_par, uint decimal_par):
Item_num(thd), presentation(0), value(value_par)
{
decimals= (uint8) decimal_par;
- fixed= 1;
}
int save_in_field(Field *field, bool no_conversions);
- enum Type type() const { return REAL_ITEM; }
const Type_handler *type_handler() const { return &type_handler_double; }
- double val_real() { DBUG_ASSERT(fixed == 1); return value; }
+ const double *const_ptr_double() const { return &value; }
+ double val_real() { return value; }
longlong val_int()
{
- DBUG_ASSERT(fixed == 1);
if (value <= (double) LONGLONG_MIN)
{
return LONGLONG_MIN;
@@ -3850,12 +4215,9 @@ public:
}
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
- bool basic_const_item() const { return 1; }
Item *clone_item(THD *thd);
Item *neg(THD *thd);
virtual void print(String *str, enum_query_type query_type);
- bool eq(const Item *item, bool binary_cmp) const
- { return real_eq(value, item); }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_float>(thd, this); }
};
@@ -3882,14 +4244,12 @@ public:
};
-class Item_string :public Item_basic_constant
+class Item_string :public Item_literal
{
protected:
void fix_from_value(Derivation dv, const Metadata metadata)
{
fix_charset_and_length(str_value.charset(), dv, metadata);
- // it is constant => can be used without fix_fields (and frequently used)
- fixed= 1;
}
void fix_and_set_name_from_value(THD *thd, Derivation dv,
const Metadata metadata)
@@ -3900,41 +4260,41 @@ protected:
protected:
/* Just create an item and do not fill string representation */
Item_string(THD *thd, CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE):
- Item_basic_constant(thd)
+ Item_literal(thd)
{
collation.set(cs, dv);
max_length= 0;
set_name(thd, NULL, 0, system_charset_info);
decimals= NOT_FIXED_DEC;
- fixed= 1;
}
public:
- Item_string(THD *thd, CHARSET_INFO *csi, const char *str_arg, uint length_arg):
- Item_basic_constant(thd)
+ Item_string(THD *thd, CHARSET_INFO *csi, const char *str_arg, uint length_arg)
+ :Item_literal(thd)
{
collation.set(csi, DERIVATION_COERCIBLE);
set_name(thd, NULL, 0, system_charset_info);
decimals= NOT_FIXED_DEC;
- fixed= 1;
str_value.copy(str_arg, length_arg, csi);
max_length= str_value.numchars() * csi->mbmaxlen;
}
// Constructors with the item name set from its value
Item_string(THD *thd, const char *str, uint length, CHARSET_INFO *cs,
- Derivation dv, uint repertoire): Item_basic_constant(thd)
+ Derivation dv, uint repertoire)
+ :Item_literal(thd)
{
str_value.set_or_copy_aligned(str, length, cs);
fix_and_set_name_from_value(thd, dv, Metadata(&str_value, repertoire));
}
Item_string(THD *thd, const char *str, size_t length,
- CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE):
- Item_basic_constant(thd)
+ CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE)
+ :Item_literal(thd)
{
str_value.set_or_copy_aligned(str, length, cs);
fix_and_set_name_from_value(thd, dv, Metadata(&str_value));
}
Item_string(THD *thd, const String *str, CHARSET_INFO *tocs, uint *conv_errors,
- Derivation dv, uint repertoire): Item_basic_constant(thd)
+ Derivation dv, uint repertoire)
+ :Item_literal(thd)
{
if (str_value.copy(str, tocs, conv_errors))
str_value.set("", 0, tocs); // EOM ?
@@ -3943,16 +4303,16 @@ public:
}
// Constructors with an externally provided item name
Item_string(THD *thd, const char *name_par, const char *str, size_t length,
- CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE):
- Item_basic_constant(thd)
+ CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE)
+ :Item_literal(thd)
{
str_value.set_or_copy_aligned(str, length, cs);
fix_from_value(dv, Metadata(&str_value));
set_name(thd, name_par,safe_strlen(name_par), system_charset_info);
}
Item_string(THD *thd, const char *name_par, const char *str, size_t length,
- CHARSET_INFO *cs, Derivation dv, uint repertoire):
- Item_basic_constant(thd)
+ CHARSET_INFO *cs, Derivation dv, uint repertoire)
+ :Item_literal(thd)
{
str_value.set_or_copy_aligned(str, length, cs);
fix_from_value(dv, Metadata(&str_value, repertoire));
@@ -3962,26 +4322,23 @@ public:
{
str_value.print(to);
}
- enum Type type() const { return STRING_ITEM; }
double val_real();
longlong val_int();
+ const String *const_ptr_string() const
+ {
+ return &str_value;
+ }
String *val_str(String*)
{
- DBUG_ASSERT(fixed == 1);
return (String*) &str_value;
}
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return get_date_from_string(ltime, fuzzydate);
+ return get_date_from_string(thd, ltime, fuzzydate);
}
int save_in_field(Field *field, bool no_conversions);
const Type_handler *type_handler() const { return &type_handler_varchar; }
- bool basic_const_item() const { return 1; }
- bool eq(const Item *item, bool binary_cmp) const
- {
- return str_eq(&str_value, item, binary_cmp);
- }
Item *clone_item(THD *thd);
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
{
@@ -3993,7 +4350,6 @@ public:
max_length= str_value.numchars() * collation.collation->mbmaxlen;
}
virtual void print(String *str, enum_query_type query_type);
- bool check_partition_func_processor(void *int_arg) {return FALSE;}
/**
Return TRUE if character-set-introducer was explicitly specified in the
@@ -4022,34 +4378,6 @@ public:
String *check_well_formed_result(bool send_error)
{ return Item::check_well_formed_result(&str_value, send_error); }
- enum_field_types odbc_temporal_literal_type(const LEX_CSTRING *type_str) const
- {
- /*
- If string is a reasonably short pure ASCII string literal,
- try to parse known ODBC style date, time or timestamp literals,
- e.g:
- SELECT {d'2001-01-01'};
- SELECT {t'10:20:30'};
- SELECT {ts'2001-01-01 10:20:30'};
- */
- if (collation.repertoire == MY_REPERTOIRE_ASCII &&
- str_value.length() < MAX_DATE_STRING_REP_LENGTH * 4)
- {
- if (type_str->length == 1)
- {
- if (type_str->str[0] == 'd') /* {d'2001-01-01'} */
- return MYSQL_TYPE_DATE;
- else if (type_str->str[0] == 't') /* {t'10:20:30'} */
- return MYSQL_TYPE_TIME;
- }
- else if (type_str->length == 2) /* {ts'2001-01-01 10:20:30'} */
- {
- if (type_str->str[0] == 't' && type_str->str[1] == 's')
- return MYSQL_TYPE_DATETIME;
- }
- }
- return MYSQL_TYPE_STRING; // Not a temporal literal
- }
Item_basic_constant *make_string_literal_concat(THD *thd,
const LEX_CSTRING *);
Item *make_odbc_literal(THD *thd, const LEX_CSTRING *typestr);
@@ -4236,38 +4564,30 @@ public:
/**
Item_hex_constant -- a common class for hex literals: X'HHHH' and 0xHHHH
*/
-class Item_hex_constant: public Item_basic_constant
+class Item_hex_constant: public Item_literal
{
private:
void hex_string_init(THD *thd, const char *str, size_t str_length);
public:
- Item_hex_constant(THD *thd): Item_basic_constant(thd)
+ Item_hex_constant(THD *thd): Item_literal(thd)
{
hex_string_init(thd, "", 0);
}
Item_hex_constant(THD *thd, const char *str, size_t str_length):
- Item_basic_constant(thd)
+ Item_literal(thd)
{
hex_string_init(thd, str, str_length);
}
- enum Type type() const { return VARBIN_ITEM; }
const Type_handler *type_handler() const { return &type_handler_varchar; }
virtual Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
{
return const_charset_converter(thd, tocs, true);
}
- bool check_partition_func_processor(void *int_arg) {return FALSE;}
- bool basic_const_item() const { return 1; }
- bool eq(const Item *item, bool binary_cmp) const
- {
- return item->basic_const_item() && item->type() == type() &&
- item->cast_to_int_type_handler() == cast_to_int_type_handler() &&
- str_value.bin_eq(&((Item_hex_constant*)item)->str_value);
- }
- String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ const String *const_ptr_string() const { return &str_value; }
+ String *val_str(String*) { return &str_value; }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
};
@@ -4283,22 +4603,18 @@ public:
Item_hex_hybrid(THD *thd): Item_hex_constant(thd) {}
Item_hex_hybrid(THD *thd, const char *str, size_t str_length):
Item_hex_constant(thd, str, str_length) {}
+ const Type_handler *type_handler() const { return &type_handler_hex_hybrid; }
uint decimal_precision() const;
double val_real()
{
- DBUG_ASSERT(fixed == 1);
return (double) (ulonglong) Item_hex_hybrid::val_int();
}
longlong val_int()
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
return longlong_from_hex_hybrid(str_value.ptr(), str_value.length());
}
my_decimal *val_decimal(my_decimal *decimal_value)
{
- // following assert is redundant, because fixed=1 assigned in constructor
- DBUG_ASSERT(fixed == 1);
longlong value= Item_hex_hybrid::val_int();
int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value);
return decimal_value;
@@ -4308,14 +4624,6 @@ public:
field->set_notnull();
return field->store_hex_hybrid(str_value.ptr(), str_value.length());
}
- const Type_handler *cast_to_int_type_handler() const
- {
- return &type_handler_longlong;
- }
- const Type_handler *type_handler_for_system_time() const
- {
- return &type_handler_longlong;
- }
void print(String *str, enum_query_type query_type);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_hex_hybrid>(thd, this); }
@@ -4339,12 +4647,10 @@ public:
Item_hex_constant(thd, str, str_length) {}
longlong val_int()
{
- DBUG_ASSERT(fixed == 1);
return longlong_from_string_with_check(&str_value);
}
double val_real()
{
- DBUG_ASSERT(fixed == 1);
return double_from_string_with_check(&str_value);
}
my_decimal *val_decimal(my_decimal *decimal_value)
@@ -4370,7 +4676,55 @@ public:
};
-class Item_temporal_literal :public Item_basic_constant
+class Item_timestamp_literal: public Item_literal
+{
+ Timestamp_or_zero_datetime m_value;
+public:
+ Item_timestamp_literal(THD *thd)
+ :Item_literal(thd)
+ { }
+ const Type_handler *type_handler() const { return &type_handler_timestamp2; }
+ int save_in_field(Field *field, bool no_conversions)
+ {
+ Timestamp_or_zero_datetime_native native(m_value, decimals);
+ return native.save_in_field(field, decimals);
+ }
+ longlong val_int()
+ {
+ return m_value.to_datetime(current_thd).to_longlong();
+ }
+ double val_real()
+ {
+ return m_value.to_datetime(current_thd).to_double();
+ }
+ String *val_str(String *to)
+ {
+ return m_value.to_datetime(current_thd).to_string(to, decimals);
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return m_value.to_datetime(current_thd).to_decimal(to);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ {
+ bool res= m_value.to_TIME(thd, ltime, fuzzydate);
+ DBUG_ASSERT(!res);
+ return res;
+ }
+ bool val_native(THD *thd, Native *to)
+ {
+ return m_value.to_native(to, decimals);
+ }
+ void set_value(const Timestamp_or_zero_datetime &value)
+ {
+ m_value= value;
+ }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_timestamp_literal>(thd, this); }
+};
+
+
+class Item_temporal_literal :public Item_literal
{
protected:
MYSQL_TIME cached_time;
@@ -4380,37 +4734,21 @@ public:
@param ltime DATE value.
*/
Item_temporal_literal(THD *thd, const MYSQL_TIME *ltime)
- :Item_basic_constant(thd)
+ :Item_literal(thd)
{
collation.set(&my_charset_numeric, DERIVATION_NUMERIC, MY_REPERTOIRE_ASCII);
decimals= 0;
cached_time= *ltime;
}
Item_temporal_literal(THD *thd, const MYSQL_TIME *ltime, uint dec_arg):
- Item_basic_constant(thd)
+ Item_literal(thd)
{
collation.set(&my_charset_numeric, DERIVATION_NUMERIC, MY_REPERTOIRE_ASCII);
decimals= dec_arg;
cached_time= *ltime;
}
- bool basic_const_item() const { return true; }
- bool const_item() const { return true; }
- enum Type type() const { return DATE_ITEM; }
- bool eq(const Item *item, bool binary_cmp) const;
- bool check_partition_func_processor(void *int_arg) {return FALSE;}
-
- bool is_null()
- { return is_null_from_temporal(); }
- bool get_date_with_sql_mode(MYSQL_TIME *to);
- String *val_str(String *str)
- { return val_string_from_date(str); }
- longlong val_int()
- { return val_int_from_date(); }
- double val_real()
- { return val_real_from_date(); }
- my_decimal *val_decimal(my_decimal *decimal_value)
- { return val_decimal_from_date(decimal_value); }
+ const MYSQL_TIME *const_ptr_mysql_time() const { return &cached_time; }
int save_in_field(Field *field, bool no_conversions)
{ return save_date_in_field(field, no_conversions); }
};
@@ -4426,7 +4764,6 @@ public:
:Item_temporal_literal(thd, ltime)
{
max_length= MAX_DATE_WIDTH;
- fixed= 1;
/*
If date has zero month or day, it can return NULL in case of
NO_ZERO_DATE or NO_ZERO_IN_DATE.
@@ -4439,7 +4776,11 @@ public:
const Type_handler *type_handler() const { return &type_handler_newdate; }
void print(String *str, enum_query_type query_type);
Item *clone_item(THD *thd);
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ longlong val_int() { return Date(this).to_longlong(); }
+ double val_real() { return Date(this).to_double(); }
+ String *val_str(String *to) { return Date(this).to_string(to); }
+ my_decimal *val_decimal(my_decimal *to) { return Date(this).to_decimal(to); }
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_date_literal>(thd, this); }
};
@@ -4455,12 +4796,15 @@ public:
Item_temporal_literal(thd, ltime, dec_arg)
{
max_length= MIN_TIME_WIDTH + (decimals ? decimals + 1 : 0);
- fixed= 1;
}
const Type_handler *type_handler() const { return &type_handler_time2; }
void print(String *str, enum_query_type query_type);
Item *clone_item(THD *thd);
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ longlong val_int() { return Time(this).to_longlong(); }
+ double val_real() { return Time(this).to_double(); }
+ String *val_str(String *to) { return Time(this).to_string(to, decimals); }
+ my_decimal *val_decimal(my_decimal *to) { return Time(this).to_decimal(to); }
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_time_literal>(thd, this); }
};
@@ -4476,14 +4820,23 @@ public:
Item_temporal_literal(thd, ltime, dec_arg)
{
max_length= MAX_DATETIME_WIDTH + (decimals ? decimals + 1 : 0);
- fixed= 1;
// See the comment on maybe_null in Item_date_literal
maybe_null= !ltime->month || !ltime->day;
}
const Type_handler *type_handler() const { return &type_handler_datetime2; }
void print(String *str, enum_query_type query_type);
Item *clone_item(THD *thd);
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ longlong val_int() { return Datetime(this).to_longlong(); }
+ double val_real() { return Datetime(this).to_double(); }
+ String *val_str(String *to)
+ {
+ return Datetime(this).to_string(to, decimals);
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return Datetime(this).to_decimal(to);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_datetime_literal>(thd, this); }
};
@@ -4520,7 +4873,7 @@ class Item_date_literal_for_invalid_dates: public Item_date_literal
public:
Item_date_literal_for_invalid_dates(THD *thd, const MYSQL_TIME *ltime)
:Item_date_literal(thd, ltime) { }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
*ltime= cached_time;
return (null_value= false);
@@ -4538,7 +4891,7 @@ public:
Item_datetime_literal_for_invalid_dates(THD *thd,
const MYSQL_TIME *ltime, uint dec_arg)
:Item_datetime_literal(thd, ltime, dec_arg) { }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
*ltime= cached_time;
return (null_value= false);
@@ -4733,8 +5086,10 @@ struct st_sp_security_context;
class Item_sp
{
-public:
+protected:
+ // Can be NULL in some non-SELECT queries
Name_resolution_context *context;
+public:
sp_name *m_name;
sp_head *m_sp;
TABLE *dummy_table;
@@ -4756,9 +5111,15 @@ public:
bool execute_impl(THD *thd, Item **args, uint arg_count);
bool init_result_field(THD *thd, uint max_length, uint maybe_null,
bool *null_value, LEX_CSTRING *name);
+ void process_error(THD *thd)
+ {
+ if (context)
+ context->process_error(thd);
+ }
};
-class Item_ref :public Item_ident
+class Item_ref :public Item_ident,
+ protected With_sum_func_cache
{
protected:
void set_properties();
@@ -4794,7 +5155,8 @@ public:
/* Constructor need to process subselect with temporary tables (see Item) */
Item_ref(THD *thd, Item_ref *item)
- :Item_ident(thd, item), set_properties_only(0), ref(item->ref) {}
+ :Item_ident(thd, item), With_sum_func_cache(*item),
+ set_properties_only(0), ref(item->ref) {}
enum Type type() const { return REF_ITEM; }
enum Type real_type() const { return ref ? (*ref)->type() :
REF_ITEM; }
@@ -4810,11 +5172,13 @@ public:
my_decimal *val_decimal(my_decimal *);
bool val_bool();
String *val_str(String* tmp);
+ bool val_native(THD *thd, Native *to);
bool is_null();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
double val_result();
longlong val_int_result();
String *str_result(String* tmp);
+ bool val_native_result(THD *thd, Native *to);
my_decimal *val_decimal_result(my_decimal *);
bool val_bool_result();
bool is_null_result();
@@ -4832,6 +5196,9 @@ public:
Field *get_tmp_table_field()
{ return result_field ? result_field : (*ref)->get_tmp_table_field(); }
Item *get_tmp_table_item(THD *thd);
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param);
+ Item* propagate_equal_fields(THD *, const Context &, COND_EQUAL *);
table_map used_tables() const;
void update_used_tables();
COND *build_equal_items(THD *thd, COND_EQUAL *inherited,
@@ -4961,6 +5328,8 @@ public:
}
bool excl_dep_on_grouping_fields(st_select_lex *sel)
{ return (*ref)->excl_dep_on_grouping_fields(sel); }
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+ { return (*ref)->excl_dep_on_in_subq_left_part(subq_pred); }
bool cleanup_excluding_fields_processor(void *arg)
{
Item *item= real_item();
@@ -4977,6 +5346,15 @@ public:
return 0;
return cleanup_processor(arg);
}
+ bool with_sum_func() const { return m_with_sum_func; }
+ With_sum_func_cache* get_with_sum_func_cache() { return this; }
+ Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg)
+ { return (*ref)->field_transformer_for_having_pushdown(thd, arg); }
+ Item *remove_item_direct_ref()
+ {
+ *ref= (*ref)->remove_item_direct_ref();
+ return this;
+ }
};
@@ -5013,13 +5391,16 @@ public:
double val_real();
longlong val_int();
String *val_str(String* tmp);
+ bool val_native(THD *thd, Native *to);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
bool is_null();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
virtual Ref_Type ref_type() { return DIRECT_REF; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_direct_ref>(thd, this); }
+ Item *remove_item_direct_ref()
+ { return (*ref)->remove_item_direct_ref(); }
};
@@ -5064,7 +5445,8 @@ class Expression_cache_tracker;
*/
class Item_cache_wrapper :public Item_result_field,
- public With_subquery_cache
+ public With_subquery_cache,
+ protected With_sum_func_cache
{
private:
/* Pointer on the cached expression */
@@ -5092,6 +5474,8 @@ public:
enum Type type() const { return EXPR_CACHE_ITEM; }
enum Type real_type() const { return orig_item->type(); }
bool with_subquery() const { DBUG_ASSERT(fixed); return m_with_subquery; }
+ bool with_sum_func() const { return m_with_sum_func; }
+ With_sum_func_cache* get_with_sum_func_cache() { return this; }
bool set_cache(THD *thd);
Expression_cache_tracker* init_tracker(MEM_ROOT *mem_root);
@@ -5106,10 +5490,11 @@ public:
double val_real();
longlong val_int();
String *val_str(String* tmp);
+ bool val_native(THD *thd, Native *to);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
bool is_null();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool send(Protocol *protocol, st_value *buffer);
void save_org_in_field(Field *field,
fast_field_copier data __attribute__ ((__unused__)))
@@ -5267,10 +5652,12 @@ public:
}
bool excl_dep_on_table(table_map tab_map);
bool excl_dep_on_grouping_fields(st_select_lex *sel);
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred);
Item *derived_field_transformer_for_having(THD *thd, uchar *arg);
Item *derived_field_transformer_for_where(THD *thd, uchar *arg);
- Item *derived_grouping_field_transformer_for_where(THD *thd,
- uchar *arg);
+ Item *grouping_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg);
+ Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg);
void save_val(Field *to)
{
@@ -5300,6 +5687,12 @@ public:
else
return Item_direct_ref::val_str(tmp);
}
+ bool val_native(THD *thd, Native *to)
+ {
+ if (check_null_ref())
+ return true;
+ return Item_direct_ref::val_native(thd, to);
+ }
my_decimal *val_decimal(my_decimal *tmp)
{
if (check_null_ref())
@@ -5321,14 +5714,14 @@ public:
else
return Item_direct_ref::is_null();
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (check_null_ref())
{
bzero((char*) ltime,sizeof(*ltime));
return 1;
}
- return Item_direct_ref::get_date(ltime, fuzzydate);
+ return Item_direct_ref::get_date(thd, ltime, fuzzydate);
}
bool send(Protocol *protocol, st_value *buffer);
void save_org_in_field(Field *field,
@@ -5355,6 +5748,9 @@ public:
}
Item *get_copy(THD *thd)
{ return get_item_copy<Item_direct_view_ref>(thd, this); }
+ Item *field_transformer_for_having_pushdown(THD *thd, uchar *arg)
+ { return this; }
+ Item *remove_item_direct_ref() { return this; }
};
@@ -5444,7 +5840,8 @@ public:
String* val_str(String* s);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool val_native(THD *thd, Native *to);
virtual void print(String *str, enum_query_type query_type);
table_map used_tables() const;
Item *get_copy(THD *thd)
@@ -5536,12 +5933,12 @@ protected:
*/
Item_copy(THD *thd, Item *i): Item(thd)
{
+ DBUG_ASSERT(i->is_fixed());
item= i;
null_value=maybe_null=item->maybe_null;
Type_std_attributes::set(item);
name= item->name;
set_handler(item->type_handler());
- fixed= item->fixed;
}
public:
@@ -5562,6 +5959,12 @@ public:
const Type_handler *type_handler() const
{ return Type_handler_hybrid_field_type::type_handler(); }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
void make_send_field(THD *thd, Send_field *field)
{ item->make_send_field(thd, field); }
table_map used_tables() const { return (table_map) 1L; }
@@ -5603,8 +6006,8 @@ public:
my_decimal *val_decimal(my_decimal *);
double val_real();
longlong val_int();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_string(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_string(thd, ltime, fuzzydate); }
void copy();
int save_in_field(Field *field, bool no_conversions);
Item *get_copy(THD *thd)
@@ -5612,6 +6015,76 @@ public:
};
+/**
+ We need a separate class Item_copy_timestamp because
+ TIMESTAMP->string->TIMESTAMP conversion is not round trip safe
+ near the DST change, e.g. '2010-10-31 02:25:26' can mean:
+ - my_time_t(1288477526) - summer time in Moscow
+ - my_time_t(1288481126) - winter time in Moscow, one hour later
+*/
+class Item_copy_timestamp: public Item_copy
+{
+ Timestamp_or_zero_datetime m_value;
+ bool sane() const { return !null_value || m_value.is_zero_datetime(); }
+public:
+ Item_copy_timestamp(THD *thd, Item *arg): Item_copy(thd, arg) { }
+ const Type_handler *type_handler() const { return &type_handler_timestamp2; }
+ void copy()
+ {
+ Timestamp_or_zero_datetime_native_null tmp(current_thd, item, false);
+ null_value= tmp.is_null();
+ m_value= tmp.is_null() ? Timestamp_or_zero_datetime() :
+ Timestamp_or_zero_datetime(tmp);
+ }
+ int save_in_field(Field *field, bool no_conversions)
+ {
+ DBUG_ASSERT(sane());
+ if (null_value)
+ return set_field_to_null(field);
+ Timestamp_or_zero_datetime_native native(m_value, decimals);
+ return native.save_in_field(field, decimals);
+ }
+ longlong val_int()
+ {
+ DBUG_ASSERT(sane());
+ return null_value ? 0 :
+ m_value.to_datetime(current_thd).to_longlong();
+ }
+ double val_real()
+ {
+ DBUG_ASSERT(sane());
+ return null_value ? 0e0 :
+ m_value.to_datetime(current_thd).to_double();
+ }
+ String *val_str(String *to)
+ {
+ DBUG_ASSERT(sane());
+ return null_value ? NULL :
+ m_value.to_datetime(current_thd).to_string(to, decimals);
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ DBUG_ASSERT(sane());
+ return null_value ? NULL :
+ m_value.to_datetime(current_thd).to_decimal(to);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ {
+ DBUG_ASSERT(sane());
+ bool res= m_value.to_TIME(thd, ltime, fuzzydate);
+ DBUG_ASSERT(!res);
+ return null_value || res;
+ }
+ bool val_native(THD *thd, Native *to)
+ {
+ DBUG_ASSERT(sane());
+ return null_value || m_value.to_native(to, decimals);
+ }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_copy_timestamp>(thd, this); }
+};
+
+
/*
Cached_item_XXX objects are not exactly caches. They do the following:
@@ -5745,7 +6218,7 @@ public:
double val_real();
longlong val_int();
my_decimal *val_decimal(my_decimal *decimal_value);
- bool get_date(MYSQL_TIME *ltime,ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate);
bool send(Protocol *protocol, st_value *buffer);
int save_in_field(Field *field_arg, bool no_conversions);
bool save_in_param(THD *thd, Item_param *param)
@@ -5802,7 +6275,7 @@ public:
double val_real();
longlong val_int();
my_decimal *val_decimal(my_decimal *decimal_value);
- bool get_date(MYSQL_TIME *ltime,ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime,date_mode_t fuzzydate);
bool send(Protocol *protocol, st_value *buffer);
};
@@ -5948,7 +6421,7 @@ public:
for any value.
*/
-class Item_cache: public Item_basic_constant,
+class Item_cache: public Item,
public Type_handler_hybrid_field_type
{
protected:
@@ -5967,25 +6440,27 @@ protected:
cache_value() will set this flag to TRUE.
*/
bool value_cached;
+
+ table_map used_table_map;
public:
Item_cache(THD *thd):
- Item_basic_constant(thd),
+ Item(thd),
Type_handler_hybrid_field_type(&type_handler_string),
example(0), cached_field(0),
- value_cached(0)
+ value_cached(0),
+ used_table_map(0)
{
- fixed= 1;
maybe_null= 1;
null_value= 1;
}
protected:
Item_cache(THD *thd, const Type_handler *handler):
- Item_basic_constant(thd),
+ Item(thd),
Type_handler_hybrid_field_type(handler),
example(0), cached_field(0),
- value_cached(0)
+ value_cached(0),
+ used_table_map(0)
{
- fixed= 1;
maybe_null= 1;
null_value= 1;
}
@@ -6000,10 +6475,18 @@ public:
cached_field= ((Item_field *)item)->field;
return 0;
};
+
+ void set_used_tables(table_map map) { used_table_map= map; }
+ table_map used_tables() const { return used_table_map; }
enum Type type() const { return CACHE_ITEM; }
const Type_handler *type_handler() const
{ return Type_handler_hybrid_field_type::type_handler(); }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ return create_tmp_field_ex_simple(table, src, param);
+ }
virtual void keep_array() {}
virtual void print(String *str, enum_query_type query_type);
@@ -6034,7 +6517,7 @@ public:
void cleanup()
{
clear();
- Item_basic_constant::cleanup();
+ Item::cleanup();
}
/**
Check if saved item has a non-NULL value.
@@ -6069,6 +6552,8 @@ public:
virtual void set_null();
bool walk(Item_processor processor, bool walk_subquery, void *arg)
{
+ if (arg == STOP_PTR)
+ return FALSE;
if (example && example->walk(processor, walk_subquery, arg))
return TRUE;
return (this->*processor)(arg);
@@ -6086,7 +6571,11 @@ public:
{ return convert_to_basic_const_item(thd); }
Item *derived_field_transformer_for_where(THD *thd, uchar *arg)
{ return convert_to_basic_const_item(thd); }
- Item *derived_grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ Item *grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ { return convert_to_basic_const_item(thd); }
+ Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg)
+ { return convert_to_basic_const_item(thd); }
+ Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg)
{ return convert_to_basic_const_item(thd); }
};
@@ -6105,8 +6594,8 @@ public:
longlong val_int();
String* val_str(String *str);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_int(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_int(thd, ltime, fuzzydate); }
bool cache_value();
int save_in_field(Field *field, bool no_conversions);
Item *convert_to_basic_const_item(THD *thd);
@@ -6118,9 +6607,12 @@ public:
class Item_cache_year: public Item_cache_int
{
public:
- Item_cache_year(THD *thd): Item_cache_int(thd, &type_handler_year) { }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_year(ltime, fuzzydate); }
+ Item_cache_year(THD *thd, const Type_handler *handler)
+ :Item_cache_int(thd, handler) { }
+ bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode)
+ {
+ return type_handler_year.Item_get_date_with_warn(thd, this, to, mode);
+ }
};
@@ -6129,14 +6621,8 @@ class Item_cache_temporal: public Item_cache_int
protected:
Item_cache_temporal(THD *thd, const Type_handler *handler);
public:
- String* val_str(String *str);
- my_decimal *val_decimal(my_decimal *);
- longlong val_int();
- longlong val_datetime_packed();
- longlong val_time_packed();
- double val_real();
bool cache_value();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
int save_in_field(Field *field, bool no_conversions);
void store_packed(longlong val_arg, Item *example);
/*
@@ -6159,6 +6645,31 @@ public:
Item *get_copy(THD *thd)
{ return get_item_copy<Item_cache_time>(thd, this); }
Item *make_literal(THD *);
+ longlong val_datetime_packed(THD *thd)
+ {
+ Datetime::Options_cmp opt(thd);
+ return has_value() ? Datetime(thd, this, opt).to_packed() : 0;
+ }
+ longlong val_time_packed(THD *thd)
+ {
+ return has_value() ? value : 0;
+ }
+ longlong val_int()
+ {
+ return has_value() ? Time(this).to_longlong() : 0;
+ }
+ double val_real()
+ {
+ return has_value() ? Time(this).to_double() : 0;
+ }
+ String *val_str(String *to)
+ {
+ return has_value() ? Time(this).to_string(to, decimals) : NULL;
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return has_value() ? Time(this).to_decimal(to) : NULL;
+ }
};
@@ -6170,6 +6681,30 @@ public:
Item *get_copy(THD *thd)
{ return get_item_copy<Item_cache_datetime>(thd, this); }
Item *make_literal(THD *);
+ longlong val_datetime_packed(THD *thd)
+ {
+ return has_value() ? value : 0;
+ }
+ longlong val_time_packed(THD *thd)
+ {
+ return Time(thd, this, Time::Options_cmp(thd)).to_packed();
+ }
+ longlong val_int()
+ {
+ return has_value() ? Datetime(this).to_longlong() : 0;
+ }
+ double val_real()
+ {
+ return has_value() ? Datetime(this).to_double() : 0;
+ }
+ String *val_str(String *to)
+ {
+ return has_value() ? Datetime(this).to_string(to, decimals) : NULL;
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return has_value() ? Datetime(this).to_decimal(to) : NULL;
+ }
};
@@ -6181,6 +6716,65 @@ public:
Item *get_copy(THD *thd)
{ return get_item_copy<Item_cache_date>(thd, this); }
Item *make_literal(THD *);
+ longlong val_datetime_packed(THD *thd)
+ {
+ return has_value() ? value : 0;
+ }
+ longlong val_time_packed(THD *thd)
+ {
+ return Time(thd, this, Time::Options_cmp(thd)).to_packed();
+ }
+ longlong val_int() { return has_value() ? Date(this).to_longlong() : 0; }
+ double val_real() { return has_value() ? Date(this).to_double() : 0; }
+ String *val_str(String *to)
+ {
+ return has_value() ? Date(this).to_string(to) : NULL;
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return has_value() ? Date(this).to_decimal(to) : NULL;
+ }
+};
+
+
+class Item_cache_timestamp: public Item_cache
+{
+ Timestamp_or_zero_datetime_native m_native;
+ Datetime to_datetime(THD *thd);
+public:
+ Item_cache_timestamp(THD *thd)
+ :Item_cache(thd, &type_handler_timestamp2) { }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_cache_timestamp>(thd, this); }
+ bool cache_value();
+ String* val_str(String *to)
+ {
+ return to_datetime(current_thd).to_string(to, decimals);
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return to_datetime(current_thd).to_decimal(to);
+ }
+ longlong val_int()
+ {
+ return to_datetime(current_thd).to_longlong();
+ }
+ double val_real()
+ {
+ return to_datetime(current_thd).to_double();
+ }
+ longlong val_datetime_packed(THD *thd)
+ {
+ return to_datetime(current_thd).to_packed();
+ }
+ longlong val_time_packed(THD *thd)
+ {
+ DBUG_ASSERT(0);
+ return 0;
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ int save_in_field(Field *field, bool no_conversions);
+ bool val_native(THD *thd, Native *to);
};
@@ -6195,8 +6789,8 @@ public:
longlong val_int();
String* val_str(String *str);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_real(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_real(thd, ltime, fuzzydate); }
bool cache_value();
Item *convert_to_basic_const_item(THD *thd);
Item *get_copy(THD *thd)
@@ -6215,8 +6809,11 @@ public:
longlong val_int();
String* val_str(String *str);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_decimal(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode)
+ {
+ return decimal_to_datetime_with_warn(thd, VDec(this).ptr(), to, mode,
+ NULL, NULL);
+ }
bool cache_value();
Item *convert_to_basic_const_item(THD *thd);
Item *get_copy(THD *thd)
@@ -6243,8 +6840,8 @@ public:
longlong val_int();
String* val_str(String *);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_string(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_string(thd, ltime, fuzzydate); }
CHARSET_INFO *charset() const { return value->charset(); };
int save_in_field(Field *field, bool no_conversions);
bool cache_value();
@@ -6325,7 +6922,7 @@ public:
illegal_method_call((const char*)"val_decimal");
return 0;
};
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
illegal_method_call((const char*)"val_decimal");
return true;
@@ -6374,7 +6971,7 @@ public:
Type_handler_hybrid_field_type(item->real_type_handler()),
enum_set_typelib(0)
{
- DBUG_ASSERT(item->fixed);
+ DBUG_ASSERT(item->is_fixed());
maybe_null= item->maybe_null;
}
Item_type_holder(THD *thd,
@@ -6408,8 +7005,9 @@ public:
longlong val_int();
my_decimal *val_decimal(my_decimal *);
String *val_str(String*);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
- Field *create_tmp_field(bool group, TABLE *table)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
{
return Item_type_holder::real_type_handler()->
make_and_init_table_field(&name, Record_addr(maybe_null),
@@ -6539,7 +7137,7 @@ bool fix_escape_item(THD *thd, Item *escape_item, String *tmp_str,
inline bool Virtual_column_info::is_equal(const Virtual_column_info* vcol) const
{
- return field_type == vcol->get_real_type()
+ return type_handler() == vcol->type_handler()
&& stored_in_db == vcol->is_stored()
&& expr->eq(vcol->expr, true);
}
@@ -6549,4 +7147,39 @@ inline void Virtual_column_info::print(String* str)
expr->print_for_table_def(str);
}
+inline bool TABLE::mark_column_with_deps(Field *field)
+{
+ bool res;
+ if (!(res= bitmap_fast_test_and_set(read_set, field->field_index)))
+ {
+ if (field->vcol_info)
+ mark_virtual_column_deps(field);
+ }
+ return res;
+}
+
+inline bool TABLE::mark_virtual_column_with_deps(Field *field)
+{
+ bool res;
+ DBUG_ASSERT(field->vcol_info);
+ if (!(res= bitmap_fast_test_and_set(read_set, field->field_index)))
+ mark_virtual_column_deps(field);
+ return res;
+}
+
+inline void TABLE::mark_virtual_column_deps(Field *field)
+{
+ DBUG_ASSERT(field->vcol_info);
+ DBUG_ASSERT(field->vcol_info->expr);
+ field->vcol_info->expr->walk(&Item::register_field_in_read_map, 1, 0);
+}
+
+inline void TABLE::use_all_stored_columns()
+{
+ bitmap_set_all(read_set);
+ if (Field **vf= vfield)
+ for (; *vf; vf++)
+ bitmap_clear_bit(read_set, (*vf)->field_index);
+}
+
#endif /* SQL_ITEM_INCLUDED */
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index 4d03462d7c3..3467fda79c7 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -225,16 +225,15 @@ Cached_item_decimal::Cached_item_decimal(Item *it)
bool Cached_item_decimal::cmp()
{
- my_decimal tmp;
- my_decimal *ptmp= item->val_decimal(&tmp);
- if (null_value != item->null_value ||
- (!item->null_value && my_decimal_cmp(&value, ptmp)))
+ VDec tmp(item);
+ if (null_value != tmp.is_null() ||
+ (!tmp.is_null() && tmp.cmp(&value)))
{
- null_value= item->null_value;
+ null_value= tmp.is_null();
/* Save only not null values */
if (!null_value)
{
- my_decimal2decimal(ptmp, &value);
+ my_decimal2decimal(tmp.ptr(), &value);
return TRUE;
}
return FALSE;
@@ -245,17 +244,9 @@ bool Cached_item_decimal::cmp()
int Cached_item_decimal::cmp_read_only()
{
- my_decimal tmp;
- my_decimal *ptmp= item->val_decimal(&tmp);
+ VDec tmp(item);
if (null_value)
- {
- if (item->null_value)
- return 0;
- else
- return -1;
- }
- if (item->null_value)
- return 1;
- return my_decimal_cmp(&value, ptmp);
+ return tmp.is_null() ? 0 : -1;
+ return tmp.is_null() ? 1 : value.cmp(tmp.ptr());
}
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index ed5910bd172..feb41940865 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -31,32 +31,8 @@
#include <m_ctype.h>
#include "sql_select.h"
#include "sql_parse.h" // check_stack_overrun
-#include "sql_time.h" // make_truncated_value_warning
#include "sql_base.h" // dynamic_column_error_message
-/**
- find an temporal type (item) that others will be converted to
- for the purpose of comparison.
-
- this is the type that will be used in warnings like
- "Incorrect <<TYPE>> value".
-*/
-static Item *find_date_time_item(Item **args, uint nargs, uint col)
-{
- Item *date_arg= 0, **arg, **arg_end;
- for (arg= args, arg_end= args + nargs; arg != arg_end ; arg++)
- {
- Item *item= arg[0]->element_index(col);
- if (item->cmp_type() != TIME_RESULT)
- continue;
- if (item->field_type() == MYSQL_TYPE_DATETIME)
- return item;
- if (!date_arg)
- date_arg= item;
- }
- return date_arg;
-}
-
/*
Compare row signature of two expressions
@@ -591,6 +567,18 @@ bool Arg_comparator::set_cmp_func_datetime()
}
+bool Arg_comparator::set_cmp_func_native()
+{
+ THD *thd= current_thd;
+ m_compare_collation= &my_charset_numeric;
+ func= is_owner_equal_func() ? &Arg_comparator::compare_e_native :
+ &Arg_comparator::compare_native;
+ a= cache_converted_constant(thd, a, &a_cache, compare_type_handler());
+ b= cache_converted_constant(thd, b, &b_cache, compare_type_handler());
+ return false;
+}
+
+
bool Arg_comparator::set_cmp_func_int()
{
THD *thd= current_thd;
@@ -707,10 +695,11 @@ Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value,
int Arg_comparator::compare_time()
{
- longlong val1= (*a)->val_time_packed();
+ THD *thd= current_thd;
+ longlong val1= (*a)->val_time_packed(thd);
if (!(*a)->null_value)
{
- longlong val2= (*b)->val_time_packed();
+ longlong val2= (*b)->val_time_packed(thd);
if (!(*b)->null_value)
return compare_not_null_values(val1, val2);
}
@@ -722,8 +711,9 @@ int Arg_comparator::compare_time()
int Arg_comparator::compare_e_time()
{
- longlong val1= (*a)->val_time_packed();
- longlong val2= (*b)->val_time_packed();
+ THD *thd= current_thd;
+ longlong val1= (*a)->val_time_packed(thd);
+ longlong val2= (*b)->val_time_packed(thd);
if ((*a)->null_value || (*b)->null_value)
return MY_TEST((*a)->null_value && (*b)->null_value);
return MY_TEST(val1 == val2);
@@ -733,10 +723,11 @@ int Arg_comparator::compare_e_time()
int Arg_comparator::compare_datetime()
{
- longlong val1= (*a)->val_datetime_packed();
+ THD *thd= current_thd;
+ longlong val1= (*a)->val_datetime_packed(thd);
if (!(*a)->null_value)
{
- longlong val2= (*b)->val_datetime_packed();
+ longlong val2= (*b)->val_datetime_packed(thd);
if (!(*b)->null_value)
return compare_not_null_values(val1, val2);
}
@@ -748,8 +739,9 @@ int Arg_comparator::compare_datetime()
int Arg_comparator::compare_e_datetime()
{
- longlong val1= (*a)->val_datetime_packed();
- longlong val2= (*b)->val_datetime_packed();
+ THD *thd= current_thd;
+ longlong val1= (*a)->val_datetime_packed(thd);
+ longlong val2= (*b)->val_datetime_packed(thd);
if ((*a)->null_value || (*b)->null_value)
return MY_TEST((*a)->null_value && (*b)->null_value);
return MY_TEST(val1 == val2);
@@ -790,6 +782,39 @@ int Arg_comparator::compare_e_string()
}
+int Arg_comparator::compare_native()
+{
+ THD *thd= current_thd;
+ if (!(*a)->val_native_with_conversion(thd, &m_native1,
+ compare_type_handler()))
+ {
+ if (!(*b)->val_native_with_conversion(thd, &m_native2,
+ compare_type_handler()))
+ {
+ if (set_null)
+ owner->null_value= 0;
+ return compare_type_handler()->cmp_native(m_native1, m_native2);
+ }
+ }
+ if (set_null)
+ owner->null_value= 1;
+ return -1;
+}
+
+
+int Arg_comparator::compare_e_native()
+{
+ THD *thd= current_thd;
+ bool res1= (*a)->val_native_with_conversion(thd, &m_native1,
+ compare_type_handler());
+ bool res2= (*b)->val_native_with_conversion(thd, &m_native2,
+ compare_type_handler());
+ if (res1 || res2)
+ return MY_TEST(res1 == res2);
+ return MY_TEST(compare_type_handler()->cmp_native(m_native1, m_native2) == 0);
+}
+
+
int Arg_comparator::compare_real()
{
/*
@@ -818,17 +843,15 @@ int Arg_comparator::compare_real()
int Arg_comparator::compare_decimal()
{
- my_decimal decimal1;
- my_decimal *val1= (*a)->val_decimal(&decimal1);
- if (!(*a)->null_value)
+ VDec val1(*a);
+ if (!val1.is_null())
{
- my_decimal decimal2;
- my_decimal *val2= (*b)->val_decimal(&decimal2);
- if (!(*b)->null_value)
+ VDec val2(*b);
+ if (!val2.is_null())
{
if (set_null)
owner->null_value= 0;
- return my_decimal_cmp(val1, val2);
+ return val1.cmp(val2);
}
}
if (set_null)
@@ -847,12 +870,10 @@ int Arg_comparator::compare_e_real()
int Arg_comparator::compare_e_decimal()
{
- my_decimal decimal1, decimal2;
- my_decimal *val1= (*a)->val_decimal(&decimal1);
- my_decimal *val2= (*b)->val_decimal(&decimal2);
- if ((*a)->null_value || (*b)->null_value)
- return MY_TEST((*a)->null_value && (*b)->null_value);
- return MY_TEST(my_decimal_cmp(val1, val2) == 0);
+ VDec val1(*a), val2(*b);
+ if (val1.is_null() || val2.is_null())
+ return MY_TEST(val1.is_null() && val2.is_null());
+ return MY_TEST(val1.cmp(val2) == 0);
}
@@ -1161,6 +1182,8 @@ longlong Item_func_truth::val_int()
bool Item_in_optimizer::is_top_level_item()
{
+ if (invisible_mode())
+ return FALSE;
return ((Item_in_subselect *)args[1])->is_top_level_item();
}
@@ -1194,8 +1217,13 @@ bool Item_in_optimizer::eval_not_null_tables(void *opt_arg)
void Item_in_optimizer::print(String *str, enum_query_type query_type)
{
- restore_first_argument();
- Item_func::print(str, query_type);
+ if (query_type & QT_PARSABLE)
+ args[1]->print(str, query_type);
+ else
+ {
+ restore_first_argument();
+ Item_func::print(str, query_type);
+ }
}
@@ -1211,8 +1239,7 @@ void Item_in_optimizer::print(String *str, enum_query_type query_type)
void Item_in_optimizer::restore_first_argument()
{
- if (args[1]->type() == Item::SUBSELECT_ITEM &&
- ((Item_subselect *)args[1])->is_in_predicate())
+ if (!invisible_mode())
{
args[0]= ((Item_in_subselect *)args[1])->left_expr;
}
@@ -1229,8 +1256,7 @@ bool Item_in_optimizer::fix_left(THD *thd)
it is args[0].
*/
Item **ref0= args;
- if (args[1]->type() == Item::SUBSELECT_ITEM &&
- ((Item_subselect *)args[1])->is_in_predicate())
+ if (!invisible_mode())
{
/*
left_expr->fix_fields() may cause left_expr to be substituted for
@@ -1292,7 +1318,7 @@ bool Item_in_optimizer::fix_left(THD *thd)
used_tables_cache= args[0]->used_tables();
}
eval_not_null_tables(NULL);
- with_sum_func= args[0]->with_sum_func;
+ copy_with_sum_func(args[0]);
with_param= args[0]->with_param || args[1]->with_param;
with_field= args[0]->with_field;
if ((const_item_cache= args[0]->const_item()))
@@ -1300,11 +1326,11 @@ bool Item_in_optimizer::fix_left(THD *thd)
cache->store(args[0]);
cache->cache_value();
}
- if (args[1]->fixed)
+ if (args[1]->is_fixed())
{
/* to avoid overriding is called to update left expression */
used_tables_and_const_cache_join(args[1]);
- with_sum_func= with_sum_func || args[1]->with_sum_func;
+ join_with_sum_func(args[1]);
}
DBUG_RETURN(0);
}
@@ -1340,7 +1366,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
if (args[1]->maybe_null)
maybe_null=1;
m_with_subquery= true;
- with_sum_func= with_sum_func || args[1]->with_sum_func;
+ join_with_sum_func(args[1]);
with_field= with_field || args[1]->with_field;
with_param= args[0]->with_param || args[1]->with_param;
used_tables_and_const_cache_join(args[1]);
@@ -1892,7 +1918,7 @@ bool Item_func_interval::fix_length_and_dec()
max_length= 2;
used_tables_and_const_cache_join(row);
not_null_tables_cache= row->not_null_tables();
- with_sum_func= with_sum_func || row->with_sum_func;
+ join_with_sum_func(row);
with_param= with_param || row->with_param;
with_field= with_field || row->with_field;
return FALSE;
@@ -1971,11 +1997,11 @@ longlong Item_func_interval::val_int()
((el->result_type() == DECIMAL_RESULT) ||
(el->result_type() == INT_RESULT)))
{
- my_decimal e_dec_buf, *e_dec= el->val_decimal(&e_dec_buf);
+ VDec e_dec(el);
/* Skip NULL ranges. */
- if (el->null_value)
+ if (e_dec.is_null())
continue;
- if (my_decimal_cmp(e_dec, dec) > 0)
+ if (e_dec.cmp(dec) > 0)
return i - 1;
}
else
@@ -2121,22 +2147,49 @@ bool Item_func_between::fix_length_and_dec_temporal(THD *thd)
}
-longlong Item_func_between::val_int_cmp_temporal()
+longlong Item_func_between::val_int_cmp_datetime()
{
- enum_field_types f_type= m_comparator.type_handler()->field_type();
- longlong value= args[0]->val_temporal_packed(f_type), a, b;
+ THD *thd= current_thd;
+ longlong value= args[0]->val_datetime_packed(thd), a, b;
if ((null_value= args[0]->null_value))
return 0;
- a= args[1]->val_temporal_packed(f_type);
- b= args[2]->val_temporal_packed(f_type);
- if (!args[1]->null_value && !args[2]->null_value)
- return (longlong) ((value >= a && value <= b) != negated);
- if (args[1]->null_value && args[2]->null_value)
+ a= args[1]->val_datetime_packed(thd);
+ b= args[2]->val_datetime_packed(thd);
+ return val_int_cmp_int_finalize(value, a, b);
+}
+
+
+longlong Item_func_between::val_int_cmp_time()
+{
+ THD *thd= current_thd;
+ longlong value= args[0]->val_time_packed(thd), a, b;
+ if ((null_value= args[0]->null_value))
+ return 0;
+ a= args[1]->val_time_packed(thd);
+ b= args[2]->val_time_packed(thd);
+ return val_int_cmp_int_finalize(value, a, b);
+}
+
+
+longlong Item_func_between::val_int_cmp_native()
+{
+ THD *thd= current_thd;
+ const Type_handler *h= m_comparator.type_handler();
+ NativeBuffer<STRING_BUFFER_USUAL_SIZE> value, a, b;
+ if (val_native_with_conversion_from_item(thd, args[0], &value, h))
+ return 0;
+ bool ra= args[1]->val_native_with_conversion(thd, &a, h);
+ bool rb= args[2]->val_native_with_conversion(thd, &b, h);
+ if (!ra && !rb)
+ return (longlong)
+ ((h->cmp_native(value, a) >= 0 &&
+ h->cmp_native(value, b) <= 0) != negated);
+ if (ra && rb)
null_value= true;
- else if (args[1]->null_value)
- null_value= value <= b; // not null if false range.
+ else if (ra)
+ null_value= h->cmp_native(value, b) <= 0;
else
- null_value= value >= a;
+ null_value= h->cmp_native(value, a) >= 0;
return (longlong) (!null_value && negated);
}
@@ -2188,23 +2241,37 @@ longlong Item_func_between::val_int_cmp_int()
}
-longlong Item_func_between::val_int_cmp_decimal()
+bool Item_func_between::val_int_cmp_int_finalize(longlong value,
+ longlong a,
+ longlong b)
{
- my_decimal dec_buf, *dec= args[0]->val_decimal(&dec_buf),
- a_buf, *a_dec, b_buf, *b_dec;
- if ((null_value=args[0]->null_value))
- return 0; /* purecov: inspected */
- a_dec= args[1]->val_decimal(&a_buf);
- b_dec= args[2]->val_decimal(&b_buf);
if (!args[1]->null_value && !args[2]->null_value)
- return (longlong) ((my_decimal_cmp(dec, a_dec) >= 0 &&
- my_decimal_cmp(dec, b_dec) <= 0) != negated);
+ return (longlong) ((value >= a && value <= b) != negated);
if (args[1]->null_value && args[2]->null_value)
null_value= true;
else if (args[1]->null_value)
- null_value= (my_decimal_cmp(dec, b_dec) <= 0);
+ null_value= value <= b; // not null if false range.
+ else
+ null_value= value >= a;
+ return (longlong) (!null_value && negated);
+}
+
+
+longlong Item_func_between::val_int_cmp_decimal()
+{
+ VDec dec(args[0]);
+ if ((null_value= dec.is_null()))
+ return 0; /* purecov: inspected */
+ VDec a_dec(args[1]), b_dec(args[2]);
+ if (!a_dec.is_null() && !b_dec.is_null())
+ return (longlong) ((dec.cmp(a_dec) >= 0 &&
+ dec.cmp(b_dec) <= 0) != negated);
+ if (a_dec.is_null() && b_dec.is_null())
+ null_value= true;
+ else if (a_dec.is_null())
+ null_value= (dec.cmp(b_dec) <= 0);
else
- null_value= (my_decimal_cmp(dec, a_dec) >= 0);
+ null_value= (dec.cmp(a_dec) >= 0);
return (longlong) (!null_value && negated);
}
@@ -2312,12 +2379,22 @@ Item_func_ifnull::str_op(String *str)
}
-bool Item_func_ifnull::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_func_ifnull::native_op(THD *thd, Native *to)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (!val_native_with_conversion_from_item(thd, args[0], to, type_handler()))
+ return false;
+ return val_native_with_conversion_from_item(thd, args[1], to, type_handler());
+}
+
+
+bool Item_func_ifnull::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
for (uint i= 0; i < 2; i++)
{
- Datetime dt(current_thd, args[i], fuzzydate & ~TIME_FUZZY_DATES);
+ Datetime_truncation_not_needed dt(thd, args[i],
+ fuzzydate & ~TIME_FUZZY_DATES);
if (!(dt.copy_to_mysql_time(ltime, mysql_timestamp_type())))
return (null_value= false);
}
@@ -2325,12 +2402,12 @@ bool Item_func_ifnull::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
}
-bool Item_func_ifnull::time_op(MYSQL_TIME *ltime)
+bool Item_func_ifnull::time_op(THD *thd, MYSQL_TIME *ltime)
{
DBUG_ASSERT(fixed == 1);
for (uint i= 0; i < 2; i++)
{
- if (!Time(args[i]).copy_to_mysql_time(ltime))
+ if (!Time(thd, args[i]).copy_to_mysql_time(ltime))
return (null_value= false);
}
return (null_value= true);
@@ -2812,28 +2889,38 @@ Item_func_nullif::decimal_op(my_decimal * decimal_value)
bool
-Item_func_nullif::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+Item_func_nullif::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
if (!compare())
return (null_value= true);
- Datetime dt(current_thd, args[2], fuzzydate);
+ Datetime_truncation_not_needed dt(thd, args[2], fuzzydate);
return (null_value= dt.copy_to_mysql_time(ltime, mysql_timestamp_type()));
}
bool
-Item_func_nullif::time_op(MYSQL_TIME *ltime)
+Item_func_nullif::time_op(THD *thd, MYSQL_TIME *ltime)
{
DBUG_ASSERT(fixed == 1);
if (!compare())
return (null_value= true);
- return (null_value= Time(args[2]).copy_to_mysql_time(ltime));
+ return (null_value= Time(thd, args[2]).copy_to_mysql_time(ltime));
}
bool
+Item_func_nullif::native_op(THD *thd, Native *to)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (!compare())
+ return (null_value= true);
+ return val_native_with_conversion_from_item(thd, args[2], to, type_handler());
+}
+
+
+bool
Item_func_nullif::is_null()
{
return (null_value= (!compare() ? 1 : args[2]->is_null()));
@@ -2910,7 +2997,7 @@ Item *Item_func_case_simple::find_item()
Item *Item_func_decode_oracle::find_item()
{
uint idx;
- if (!Predicant_to_list_comparator::cmp_nulls_equal(this, &idx))
+ if (!Predicant_to_list_comparator::cmp_nulls_equal(current_thd, this, &idx))
return args[idx + when_count()];
Item **pos= Item_func_decode_oracle::else_expr_addr();
return pos ? pos[0] : 0;
@@ -2986,24 +3073,34 @@ my_decimal *Item_func_case::decimal_op(my_decimal *decimal_value)
}
-bool Item_func_case::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_func_case::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
Item *item= find_item();
if (!item)
return (null_value= true);
- Datetime dt(current_thd, item, fuzzydate);
+ Datetime_truncation_not_needed dt(thd, item, fuzzydate);
return (null_value= dt.copy_to_mysql_time(ltime, mysql_timestamp_type()));
}
-bool Item_func_case::time_op(MYSQL_TIME *ltime)
+bool Item_func_case::time_op(THD *thd, MYSQL_TIME *ltime)
{
DBUG_ASSERT(fixed == 1);
Item *item= find_item();
if (!item)
return (null_value= true);
- return (null_value= Time(item).copy_to_mysql_time(ltime));
+ return (null_value= Time(thd, item).copy_to_mysql_time(ltime));
+}
+
+
+bool Item_func_case::native_op(THD *thd, Native *to)
+{
+ DBUG_ASSERT(fixed == 1);
+ Item *item= find_item();
+ if (!item)
+ return (null_value= true);
+ return val_native_with_conversion_from_item(thd, item, to, type_handler());
}
@@ -3339,12 +3436,13 @@ double Item_func_coalesce::real_op()
}
-bool Item_func_coalesce::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_func_coalesce::date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
for (uint i= 0; i < arg_count; i++)
{
- Datetime dt(current_thd, args[i], fuzzydate & ~TIME_FUZZY_DATES);
+ Datetime_truncation_not_needed dt(thd, args[i],
+ fuzzydate & ~TIME_FUZZY_DATES);
if (!dt.copy_to_mysql_time(ltime, mysql_timestamp_type()))
return (null_value= false);
}
@@ -3352,18 +3450,30 @@ bool Item_func_coalesce::date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
}
-bool Item_func_coalesce::time_op(MYSQL_TIME *ltime)
+bool Item_func_coalesce::time_op(THD *thd, MYSQL_TIME *ltime)
{
DBUG_ASSERT(fixed == 1);
for (uint i= 0; i < arg_count; i++)
{
- if (!Time(args[i]).copy_to_mysql_time(ltime))
+ if (!Time(thd, args[i]).copy_to_mysql_time(ltime))
return (null_value= false);
}
return (null_value= true);
}
+bool Item_func_coalesce::native_op(THD *thd, Native *to)
+{
+ DBUG_ASSERT(fixed == 1);
+ for (uint i= 0; i < arg_count; i++)
+ {
+ if (!val_native_with_conversion_from_item(thd, args[i], to, type_handler()))
+ return false;
+ }
+ return (null_value= true);
+}
+
+
my_decimal *Item_func_coalesce::decimal_op(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
@@ -3641,11 +3751,58 @@ Item *in_longlong::create_item(THD *thd)
}
+static int cmp_timestamp(void *cmp_arg,
+ Timestamp_or_zero_datetime *a,
+ Timestamp_or_zero_datetime *b)
+{
+ return a->cmp(*b);
+}
+
+
+in_timestamp::in_timestamp(THD *thd, uint elements)
+ :in_vector(thd, elements, sizeof(Value), (qsort2_cmp) cmp_timestamp, 0)
+{}
+
+
+void in_timestamp::set(uint pos, Item *item)
+{
+ Timestamp_or_zero_datetime *buff= &((Timestamp_or_zero_datetime *) base)[pos];
+ Timestamp_or_zero_datetime_native_null native(current_thd, item, true);
+ if (native.is_null())
+ *buff= Timestamp_or_zero_datetime();
+ else
+ *buff= Timestamp_or_zero_datetime(native);
+}
+
+
+uchar *in_timestamp::get_value(Item *item)
+{
+ Timestamp_or_zero_datetime_native_null native(current_thd, item, true);
+ if (native.is_null())
+ return 0;
+ tmp= Timestamp_or_zero_datetime(native);
+ return (uchar*) &tmp;
+}
+
+
+Item *in_timestamp::create_item(THD *thd)
+{
+ return new (thd->mem_root) Item_timestamp_literal(thd);
+}
+
+
+void in_timestamp::value_to_item(uint pos, Item *item)
+{
+ const Timestamp_or_zero_datetime &buff= (((Timestamp_or_zero_datetime*) base)[pos]);
+ static_cast<Item_timestamp_literal*>(item)->set_value(buff);
+}
+
+
void in_datetime::set(uint pos,Item *item)
{
struct packed_longlong *buff= &((packed_longlong*) base)[pos];
- buff->val= item->val_datetime_packed();
+ buff->val= item->val_datetime_packed(current_thd);
buff->unsigned_flag= 1L;
}
@@ -3653,13 +3810,22 @@ void in_time::set(uint pos,Item *item)
{
struct packed_longlong *buff= &((packed_longlong*) base)[pos];
- buff->val= item->val_time_packed();
+ buff->val= item->val_time_packed(current_thd);
buff->unsigned_flag= 1L;
}
-uchar *in_temporal::get_value_internal(Item *item, enum_field_types f_type)
+uchar *in_datetime::get_value(Item *item)
+{
+ tmp.val= item->val_datetime_packed(current_thd);
+ if (item->null_value)
+ return 0;
+ tmp.unsigned_flag= 1L;
+ return (uchar*) &tmp;
+}
+
+uchar *in_time::get_value(Item *item)
{
- tmp.val= item->val_temporal_packed(f_type);
+ tmp.val= item->val_time_packed(current_thd);
if (item->null_value)
return 0;
tmp.unsigned_flag= 1L;
@@ -3871,39 +4037,15 @@ bool cmp_item_row::alloc_comparators(THD *thd, uint cols)
void cmp_item_row::store_value(Item *item)
{
DBUG_ENTER("cmp_item_row::store_value");
- THD *thd= current_thd;
- if (!alloc_comparators(thd, item->cols()))
+ DBUG_ASSERT(comparators);
+ DBUG_ASSERT(n == item->cols());
+ item->bring_value();
+ item->null_value= 0;
+ for (uint i=0; i < n; i++)
{
- item->bring_value();
- item->null_value= 0;
- for (uint i=0; i < n; i++)
- {
- if (!comparators[i])
- {
- /**
- Comparators for the row elements that have temporal data types
- are installed at initialization time by prepare_comparators().
- Here we install comparators for the other data types.
- There is a bug in the below code. See MDEV-11511.
- When performing:
- (predicant0,predicant1) IN ((value00,value01),(value10,value11))
- It uses only the data type and the collation of the predicant
- elements only. It should be fixed to aggregate the data type and
- the collation for all elements at the N-th positions of the
- predicate and all values:
- - predicate0, value00, value01
- - predicate1, value10, value11
- */
- Item *elem= item->element_index(i);
- const Type_handler *handler= elem->type_handler();
- DBUG_ASSERT(elem->cmp_type() != TIME_RESULT);
- if (!(comparators[i]=
- handler->make_cmp_item(thd, elem->collation.collation)))
- break; // new failed
- }
- comparators[i]->store_value(item->element_index(i));
- item->null_value|= item->element_index(i)->null_value;
- }
+ DBUG_ASSERT(comparators[i]);
+ comparators[i]->store_value(item->element_index(i));
+ item->null_value|= item->element_index(i)->null_value;
}
DBUG_VOID_RETURN;
}
@@ -3996,9 +4138,8 @@ int cmp_item_decimal::cmp_not_null(const Value *val)
int cmp_item_decimal::cmp(Item *arg)
{
- my_decimal tmp_buf, *tmp= arg->val_decimal(&tmp_buf);
- return (m_null_value || arg->null_value) ?
- UNKNOWN : (my_decimal_cmp(&value, tmp) != 0);
+ VDec tmp(arg);
+ return m_null_value || tmp.is_null() ? UNKNOWN : (tmp.cmp(&value) != 0);
}
@@ -4015,14 +4156,6 @@ cmp_item* cmp_item_decimal::make_same()
}
-void cmp_item_temporal::store_value_internal(Item *item,
- enum_field_types f_type)
-{
- value= item->val_temporal_packed(f_type);
- m_null_value= item->null_value;
-}
-
-
int cmp_item_datetime::cmp_not_null(const Value *val)
{
DBUG_ASSERT(!val->is_null());
@@ -4033,7 +4166,7 @@ int cmp_item_datetime::cmp_not_null(const Value *val)
int cmp_item_datetime::cmp(Item *arg)
{
- const bool rc= value != arg->val_datetime_packed();
+ const bool rc= value != arg->val_datetime_packed(current_thd);
return (m_null_value || arg->null_value) ? UNKNOWN : rc;
}
@@ -4048,7 +4181,7 @@ int cmp_item_time::cmp_not_null(const Value *val)
int cmp_item_time::cmp(Item *arg)
{
- const bool rc= value != arg->val_time_packed();
+ const bool rc= value != arg->val_time_packed(current_thd);
return (m_null_value || arg->null_value) ? UNKNOWN : rc;
}
@@ -4072,6 +4205,49 @@ cmp_item *cmp_item_time::make_same()
}
+void cmp_item_timestamp::store_value(Item *item)
+{
+ item->val_native_with_conversion(current_thd, &m_native,
+ &type_handler_timestamp2);
+ m_null_value= item->null_value;
+}
+
+
+int cmp_item_timestamp::cmp_not_null(const Value *val)
+{
+ /*
+ This method will be implemented when we add this syntax:
+ SELECT TIMESTAMP WITH LOCAL TIME ZONE '2001-01-01 10:20:30'
+ For now TIMESTAMP is compared to non-TIMESTAMP using DATETIME.
+ */
+ DBUG_ASSERT(0);
+ return 0;
+}
+
+
+int cmp_item_timestamp::cmp(Item *arg)
+{
+ THD *thd= current_thd;
+ Timestamp_or_zero_datetime_native_null tmp(thd, arg, true);
+ return m_null_value || tmp.is_null() ? UNKNOWN :
+ type_handler_timestamp2.cmp_native(m_native, tmp) != 0;
+}
+
+
+int cmp_item_timestamp::compare(cmp_item *arg)
+{
+ cmp_item_timestamp *tmp= static_cast<cmp_item_timestamp*>(arg);
+ return type_handler_timestamp2.cmp_native(m_native, tmp->m_native);
+}
+
+
+cmp_item* cmp_item_timestamp::make_same()
+{
+ return new cmp_item_timestamp();
+}
+
+
+
bool Item_func_in::count_sargable_conds(void *arg)
{
((SELECT_LEX*) arg)->cond_count++;
@@ -4292,25 +4468,84 @@ bool Item_func_in::value_list_convert_const_to_int(THD *thd)
}
-/**
- Historically this code installs comparators at initialization time
- for temporal ROW elements only. All other comparators are installed later,
- during the first store_value(). This causes the bug MDEV-11511.
- See also comments in cmp_item_row::store_value().
-*/
-bool cmp_item_row::prepare_comparators(THD *thd, Item **args, uint arg_count)
+bool cmp_item_row::
+ aggregate_row_elements_for_comparison(THD *thd,
+ Type_handler_hybrid_field_type *cmp,
+ Item_args *tmp,
+ const char *funcname,
+ uint col,
+ uint level)
{
+ DBUG_EXECUTE_IF("cmp_item",
+ {
+ for (uint i= 0 ; i < tmp->argument_count(); i++)
+ {
+ Item *arg= tmp->arguments()[i];
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "DBUG: %s[%d,%d] handler=%s",
+ String_space(level).c_ptr(), col, i,
+ arg->type_handler()->name().ptr());
+ }
+ }
+ );
+ bool err= cmp->aggregate_for_comparison(funcname, tmp->arguments(),
+ tmp->argument_count(), true);
+ DBUG_EXECUTE_IF("cmp_item",
+ {
+ if (!err)
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "DBUG: %s=> handler=%s",
+ String_space(level).c_ptr(),
+ cmp->type_handler()->name().ptr());
+ }
+ );
+ return err;
+}
+
+
+bool cmp_item_row::prepare_comparators(THD *thd, const char *funcname,
+ const Item_args *args, uint level)
+{
+ DBUG_EXECUTE_IF("cmp_item",
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "DBUG: %sROW(%d args) level=%d",
+ String_space(level).c_ptr(),
+ args->argument_count(), level););
+ DBUG_ASSERT(args->argument_count() > 0);
+ if (alloc_comparators(thd, args->arguments()[0]->cols()))
+ return true;
+ DBUG_ASSERT(n == args->arguments()[0]->cols());
for (uint col= 0; col < n; col++)
{
- Item *date_arg= find_date_time_item(args, arg_count, col);
- if (date_arg)
+ Item_args tmp;
+ Type_handler_hybrid_field_type cmp;
+
+ if (tmp.alloc_and_extract_row_elements(thd, args, col) ||
+ aggregate_row_elements_for_comparison(thd, &cmp, &tmp,
+ funcname, col, level + 1))
+ return true;
+
+ /*
+ There is a legacy bug (MDEV-11511) in the code below,
+ which should be fixed eventually.
+ When performing:
+ (predicant0,predicant1) IN ((value00,value01),(value10,value11))
+ It uses only the data type and the collation of the predicant
+ elements only. It should be fixed to take into account the data type and
+ the collation for all elements at the N-th positions of the
+ predicate and all values:
+ - predicate0, value00, value01
+ - predicate1, value10, value11
+ */
+ Item *item0= args->arguments()[0]->element_index(col);
+ CHARSET_INFO *collation= item0->collation.collation;
+ if (!(comparators[col]= cmp.type_handler()->make_cmp_item(thd, collation)))
+ return true;
+ if (cmp.type_handler() == &type_handler_row)
{
- // TODO: do like the scalar comparators do
- const Type_handler *h= date_arg->type_handler();
- comparators[col]= h->field_type() == MYSQL_TYPE_TIME ?
- (cmp_item *) new (thd->mem_root) cmp_item_time() :
- (cmp_item *) new (thd->mem_root) cmp_item_datetime();
- if (!comparators[col])
+ // Prepare comparators for ROW elements recursively
+ cmp_item_row *row= static_cast<cmp_item_row*>(comparators[col]);
+ if (row->prepare_comparators(thd, funcname, &tmp, level + 1))
return true;
}
}
@@ -4320,19 +4555,10 @@ bool cmp_item_row::prepare_comparators(THD *thd, Item **args, uint arg_count)
bool Item_func_in::fix_for_row_comparison_using_bisection(THD *thd)
{
- uint cols= args[0]->cols();
if (unlikely(!(array= new (thd->mem_root) in_row(thd, arg_count-1, 0))))
return true;
cmp_item_row *cmp= &((in_row*)array)->tmp;
- if (cmp->alloc_comparators(thd, cols) ||
- cmp->prepare_comparators(thd, args, arg_count))
- return true;
- /*
- Only DATETIME items comparators were initialized.
- Call store_value() to setup others.
- */
- cmp->store_value(args[0]);
- if (unlikely(thd->is_fatal_error)) // OOM
+ if (cmp->prepare_comparators(thd, func_name(), this, 0))
return true;
fix_in_vector();
return false;
@@ -4371,8 +4597,7 @@ bool Item_func_in::fix_for_row_comparison_using_cmp_items(THD *thd)
DBUG_ASSERT(get_comparator_type_handler(0) == &type_handler_row);
DBUG_ASSERT(get_comparator_cmp_item(0));
cmp_item_row *cmp_row= (cmp_item_row*) get_comparator_cmp_item(0);
- return cmp_row->alloc_comparators(thd, args[0]->cols()) ||
- cmp_row->prepare_comparators(thd, args, arg_count);
+ return cmp_row->prepare_comparators(thd, func_name(), this, 0);
}
@@ -4643,7 +4868,7 @@ Item_cond::fix_fields(THD *thd, Item **ref)
const_item_cache= FALSE;
}
- with_sum_func|= item->with_sum_func;
+ join_with_sum_func(item);
with_param|= item->with_param;
with_field|= item->with_field;
m_with_subquery|= item->with_subquery();
@@ -5003,6 +5228,8 @@ bool Item_cond::excl_dep_on_table(table_map tab_map)
bool Item_cond::excl_dep_on_grouping_fields(st_select_lex *sel)
{
+ if (has_rand_bit())
+ return false;
List_iterator_fast<Item> li(list);
Item *item;
while ((item= li++))
@@ -6314,76 +6541,53 @@ void Item_equal::add_const(THD *thd, Item *c)
equal_items.push_front(c, thd->mem_root);
return;
}
- Item *const_item= get_const();
- switch (Item_equal::compare_type_handler()->cmp_type()) {
- case TIME_RESULT:
- {
- enum_field_types f_type= context_field->field_type();
- longlong value0= c->val_temporal_packed(f_type);
- longlong value1= const_item->val_temporal_packed(f_type);
- cond_false= c->null_value || const_item->null_value || value0 != value1;
- break;
- }
- case STRING_RESULT:
- {
- String *str1, *str2;
- /*
- Suppose we have an expression (with a string type field) like this:
- WHERE field=const1 AND field=const2 ...
-
- For all pairs field=constXXX we know that:
-
- - Item_func_eq::fix_length_and_dec() performed collation and character
- set aggregation and added character set converters when needed.
- Note, the case like:
- WHERE field=const1 COLLATE latin1_bin AND field=const2
- is not handled here, because the field would be replaced to
- Item_func_set_collation, which cannot get into Item_equal.
- So all constXXX that are handled by Item_equal
- already have compatible character sets with "field".
-
- - Also, Field_str::test_if_equality_guarantees_uniqueness() guarantees
- that the comparison collation of all equalities handled by Item_equal
- match the the collation of the field.
-
- Therefore, at Item_equal::add_const() time all constants constXXX
- should be directly comparable to each other without an additional
- character set conversion.
- It's safe to do val_str() for "const_item" and "c" and compare
- them according to the collation of the *field*.
-
- So in a script like this:
- CREATE TABLE t1 (a VARCHAR(10) COLLATE xxx);
- INSERT INTO t1 VALUES ('a'),('A');
- SELECT * FROM t1 WHERE a='a' AND a='A';
- Item_equal::add_const() effectively rewrites the condition to:
- SELECT * FROM t1 WHERE a='a' AND 'a' COLLATE xxx='A';
- and then to:
- SELECT * FROM t1 WHERE a='a'; // if the two constants were equal
- // e.g. in case of latin1_swedish_ci
- or to:
- SELECT * FROM t1 WHERE FALSE; // if the two constants were not equal
- // e.g. in case of latin1_bin
-
- Note, both "const_item" and "c" can return NULL, e.g.:
- SELECT * FROM t1 WHERE a=NULL AND a='const';
- SELECT * FROM t1 WHERE a='const' AND a=NULL;
- SELECT * FROM t1 WHERE a='const' AND a=(SELECT MAX(a) FROM t2)
- */
- cond_false= !(str1= const_item->val_str(&cmp_value1)) ||
- !(str2= c->val_str(&cmp_value2)) ||
- !str1->eq(str2, compare_collation());
- break;
- }
- default:
- {
- Item_func_eq *func= new (thd->mem_root) Item_func_eq(thd, c, const_item);
- if (func->set_cmp_func())
- return;
- func->quick_fix_field();
- cond_false= !func->val_int();
- }
- }
+
+ /*
+ Suppose we have an expression (with a string type field) like this:
+ WHERE field=const1 AND field=const2 ...
+
+ For all pairs field=constXXX we know that:
+
+ - Item_func_eq::fix_length_and_dec() performed collation and character
+ set aggregation and added character set converters when needed.
+ Note, the case like:
+ WHERE field=const1 COLLATE latin1_bin AND field=const2
+ is not handled here, because the field would be replaced to
+ Item_func_set_collation, which cannot get into Item_equal.
+ So all constXXX that are handled by Item_equal
+ already have compatible character sets with "field".
+
+ - Also, Field_str::test_if_equality_guarantees_uniqueness() guarantees
+ that the comparison collation of all equalities handled by Item_equal
+ match the the collation of the field.
+
+ Therefore, at Item_equal::add_const() time all constants constXXX
+ should be directly comparable to each other without an additional
+ character set conversion.
+ It's safe to do val_str() for "const_item" and "c" and compare
+ them according to the collation of the *field*.
+
+ So in a script like this:
+ CREATE TABLE t1 (a VARCHAR(10) COLLATE xxx);
+ INSERT INTO t1 VALUES ('a'),('A');
+ SELECT * FROM t1 WHERE a='a' AND a='A';
+ Item_equal::add_const() effectively rewrites the condition to:
+ SELECT * FROM t1 WHERE a='a' AND 'a' COLLATE xxx='A';
+ and then to:
+ SELECT * FROM t1 WHERE a='a'; // if the two constants were equal
+ // e.g. in case of latin1_swedish_ci
+ or to:
+ SELECT * FROM t1 WHERE FALSE; // if the two constants were not equal
+ // e.g. in case of latin1_bin
+
+ Note, both "const_item" and "c" can return NULL, e.g.:
+ SELECT * FROM t1 WHERE a=NULL AND a='const';
+ SELECT * FROM t1 WHERE a='const' AND a=NULL;
+ SELECT * FROM t1 WHERE a='const' AND a=(SELECT MAX(a) FROM t2)
+ */
+
+ cond_false= !Item_equal::compare_type_handler()->Item_eq_value(thd, this, c,
+ get_const());
if (with_const && equal_items.elements == 1)
cond_true= TRUE;
if (cond_false || cond_true)
@@ -6688,7 +6892,7 @@ bool Item_equal::fix_fields(THD *thd, Item **ref)
used_tables_cache|= item->used_tables();
tmp_table_map= item->not_null_tables();
not_null_tables_cache|= tmp_table_map;
- DBUG_ASSERT(!item->with_sum_func && !item->with_subquery());
+ DBUG_ASSERT(!item->with_sum_func() && !item->with_subquery());
if (item->maybe_null)
maybe_null= 1;
if (!item->get_item_equal())
@@ -7138,3 +7342,171 @@ Item_bool_rowready_func2* Le_creator::create_swap(THD *thd, Item *a, Item *b) co
{
return new(thd->mem_root) Item_func_ge(thd, b, a);
}
+
+
+bool
+Item_equal::excl_dep_on_grouping_fields(st_select_lex *sel)
+{
+ Item_equal_fields_iterator it(*this);
+ Item *item;
+
+ while ((item=it++))
+ {
+ if (item->excl_dep_on_grouping_fields(sel))
+ {
+ set_extraction_flag(FULL_EXTRACTION_FL);
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Transform multiple equality into list of equalities
+
+ @param thd the thread handle
+ @param equalities the list where created equalities are stored
+ @param checker the checker callback function to be applied to the nodes
+ of the tree of the object to check if multiple equality
+ elements can be used to create equalities
+ @param arg parameter to be passed to the checker
+
+ @details
+ How the method works on examples:
+
+ Example 1:
+ It takes MULT_EQ(x,a,b) and tries to create from its elements a set of
+ equalities {(x=a),(x=b)}.
+
+ Example 2:
+ It takes MULT_EQ(1,a,b) and tries to create from its elements a set of
+ equalities {(1=a),(1=b)}.
+
+ How it is done:
+
+ 1. The method finds the left part of the equalities to be built. It will
+ be the same for all equalities. It is either:
+ a. A constant if there is any
+ b. A first element in the multiple equality that satisfies
+ checker function
+
+ For the example 1 the left element is field 'x'.
+ For the example 2 it is constant '1'.
+
+ 2. If the left element is found the rest elements of the multiple equality
+ are checked with the checker function if they can be right parts
+ of equalities.
+ If the element can be a right part of the equality, equality is built.
+ It is built with the left part element found at the step 1 and
+ the right part element found at this step (step 2).
+
+ Suppose for the example above that both 'a' and 'b' fields can be used
+ to build equalities:
+
+ Example 1:
+ for 'a' field (x=a) is built
+ for 'b' field (x=b) is built
+
+ Example 2:
+ for 'a' field (1=a) is built
+ for 'b' field (1=b) is built
+
+ 3. As a result we get a set of equalities built with the elements of
+ this multiple equality. They are saved in the equality list.
+
+ Example 1:
+ {(x=a),(x=b)}
+
+ Example 2:
+ {(1=a),(1=b)}
+
+ @note
+ This method is called for condition pushdown into materialized
+ derived table/view, and IN subquery, and pushdown from HAVING into WHERE.
+ When it is called for pushdown from HAVING the empty checker is passed.
+ It happens because elements of this multiple equality don't need to be
+ checked if they can be used to build equalities. There are no elements
+ that can't be used to build equalities.
+
+ @retval true if an error occurs
+ @retval false otherwise
+*/
+
+bool Item_equal::create_pushable_equalities(THD *thd,
+ List<Item> *equalities,
+ Pushdown_checker checker,
+ uchar *arg)
+{
+ Item *item;
+ Item_equal_fields_iterator it(*this);
+ Item *left_item = get_const();
+ if (!left_item)
+ {
+ while ((item=it++))
+ {
+ left_item= item;
+ if (checker && !((item->*checker) (arg)))
+ continue;
+ break;
+ }
+ }
+ if (!left_item)
+ return false;
+
+ while ((item=it++))
+ {
+ if (checker && !((item->*checker) (arg)))
+ continue;
+ Item_func_eq *eq= 0;
+ Item *left_item_clone= left_item->build_clone(thd);
+ Item *right_item_clone= item->build_clone(thd);
+ if (left_item_clone && right_item_clone)
+ {
+ left_item_clone->set_item_equal(NULL);
+ right_item_clone->set_item_equal(NULL);
+ eq= new (thd->mem_root) Item_func_eq(thd,
+ right_item_clone,
+ left_item_clone);
+ }
+ if (eq && equalities->push_back(eq, thd->mem_root))
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ Transform multiple equality into the AND condition of equalities.
+
+ Example:
+ MULT_EQ(x,a,b)
+ =>
+ (x=a) AND (x=b)
+
+ Equalities are built in Item_equal::create_pushable_equalities() method
+ using elements of this multiple equality. The result of this method is
+ saved in an equality list.
+ This method returns the condition where the elements of the equality list
+ are anded.
+*/
+
+Item *Item_equal::multiple_equality_transformer(THD *thd, uchar *arg)
+{
+ List<Item> equalities;
+ if (create_pushable_equalities(thd, &equalities, 0, 0))
+ return 0;
+
+ switch (equalities.elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ return equalities.head();
+ break;
+ default:
+ return new (thd->mem_root) Item_cond_and(thd, equalities);
+ break;
+ }
+}
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 06f15503258..5793dda9e9a 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -68,6 +68,7 @@ class Arg_comparator: public Sql_alloc
if (val1 == val2) return 0;
return 1;
}
+ NativeBuffer<STRING_BUFFER_USUAL_SIZE> m_native1, m_native2;
public:
/* Allow owner function to use string buffers. */
String value1, value2;
@@ -89,6 +90,7 @@ public:
bool set_cmp_func_string();
bool set_cmp_func_time();
bool set_cmp_func_datetime();
+ bool set_cmp_func_native();
bool set_cmp_func_int();
bool set_cmp_func_real();
bool set_cmp_func_decimal();
@@ -121,6 +123,8 @@ public:
int compare_e_datetime();
int compare_time();
int compare_e_time();
+ int compare_native();
+ int compare_e_native();
int compare_json_str_basic(Item *j, Item *s);
int compare_json_str();
int compare_str_json();
@@ -152,7 +156,8 @@ public:
class SEL_ARG;
struct KEY_PART;
-class Item_bool_func :public Item_int_func
+class Item_bool_func :public Item_int_func,
+ public Type_cmp_attributes
{
protected:
/*
@@ -215,9 +220,9 @@ public:
Item_bool_func(THD *thd, Item *a, Item *b, Item *c): Item_int_func(thd, a, b, c) {}
Item_bool_func(THD *thd, List<Item> &list): Item_int_func(thd, list) { }
Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {}
- const Type_handler *type_handler() const { return &type_handler_long; }
- bool is_bool_type() { return true; }
- virtual CHARSET_INFO *compare_collation() const { return NULL; }
+ const Type_handler *type_handler() const { return &type_handler_bool; }
+ const Type_handler *fixed_type_handler() const { return &type_handler_bool; }
+ CHARSET_INFO *compare_collation() const { return NULL; }
bool fix_length_and_dec() { decimals=0; max_length=1; return FALSE; }
uint decimal_precision() const { return 1; }
bool need_parentheses_in_default() { return true; }
@@ -891,6 +896,7 @@ class Item_func_between :public Item_func_opt_neg
protected:
SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param,
Field *field, Item *value);
+ bool val_int_cmp_int_finalize(longlong value, longlong a, longlong b);
public:
String value0,value1,value2;
Item_func_between(THD *thd, Item *a, Item *b, Item *c):
@@ -931,7 +937,9 @@ public:
{ return get_item_copy<Item_func_between>(thd, this); }
longlong val_int_cmp_string();
- longlong val_int_cmp_temporal();
+ longlong val_int_cmp_datetime();
+ longlong val_int_cmp_time();
+ longlong val_int_cmp_native();
longlong val_int_cmp_int();
longlong val_int_cmp_real();
longlong val_int_cmp_decimal();
@@ -954,7 +962,7 @@ public:
{
if (agg_arg_charsets_for_comparison(cmp_collation, args, 2))
return TRUE;
- fix_char_length(2);
+ fix_char_length(2); // returns "1" or "0" or "-1"
return FALSE;
}
Item *get_copy(THD *thd)
@@ -1008,8 +1016,9 @@ public:
longlong int_op();
String *str_op(String *);
my_decimal *decimal_op(my_decimal *);
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool time_op(MYSQL_TIME *ltime);
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool time_op(THD *thd, MYSQL_TIME *ltime);
+ bool native_op(THD *thd, Native *to);
bool fix_length_and_dec()
{
if (aggregate_for_result(func_name(), args, arg_count, true))
@@ -1087,8 +1096,9 @@ public:
longlong int_op();
String *str_op(String *str);
my_decimal *decimal_op(my_decimal *);
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool time_op(MYSQL_TIME *ltime);
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool time_op(THD *thd, MYSQL_TIME *ltime);
+ bool native_op(THD *thd, Native *to);
bool fix_length_and_dec()
{
if (Item_func_case_abbreviation2::fix_length_and_dec2(args))
@@ -1122,12 +1132,12 @@ public:
:Item_func_case_abbreviation2(thd, a, b, c)
{ }
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- Datetime dt(current_thd, find_item(), fuzzydate);
+ Datetime_truncation_not_needed dt(thd, find_item(), fuzzydate);
return (null_value= dt.copy_to_mysql_time(ltime, mysql_timestamp_type()));
}
- bool time_op(MYSQL_TIME *ltime)
+ bool time_op(THD *thd, MYSQL_TIME *ltime)
{
return (null_value= Time(find_item()).copy_to_mysql_time(ltime));
}
@@ -1147,6 +1157,11 @@ public:
{
return val_str_from_item(find_item(), str);
}
+ bool native_op(THD *thd, Native *to)
+ {
+ return val_native_with_conversion_from_item(thd, find_item(), to,
+ type_handler());
+ }
};
@@ -1240,12 +1255,13 @@ public:
Item_func_hybrid_field_type::cleanup();
arg_count= 2; // See the comment to the constructor
}
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool time_op(MYSQL_TIME *ltime);
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool time_op(THD *thd, MYSQL_TIME *ltime);
double real_op();
longlong int_op();
String *str_op(String *str);
my_decimal *decimal_op(my_decimal *);
+ bool native_op(THD *thd, Native *to);
bool fix_length_and_dec();
bool walk(Item_processor processor, bool walk_subquery, void *arg);
const char *func_name() const { return "nullif"; }
@@ -1282,7 +1298,11 @@ public:
{ reset_first_arg_if_needed(); return this; }
Item *derived_field_transformer_for_where(THD *thd, uchar *arg)
{ reset_first_arg_if_needed(); return this; }
- Item *derived_grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ Item *grouping_field_transformer_for_where(THD *thd, uchar *arg)
+ { reset_first_arg_if_needed(); return this; }
+ Item *in_subq_field_transformer_for_where(THD *thd, uchar *arg)
+ { reset_first_arg_if_needed(); return this; }
+ Item *in_subq_field_transformer_for_having(THD *thd, uchar *arg)
{ reset_first_arg_if_needed(); return this; }
};
@@ -1405,13 +1425,24 @@ public:
};
+class in_timestamp :public in_vector
+{
+ Timestamp_or_zero_datetime tmp;
+public:
+ in_timestamp(THD *thd, uint elements);
+ void set(uint pos,Item *item);
+ uchar *get_value(Item *item);
+ Item* create_item(THD *thd);
+ void value_to_item(uint pos, Item *item);
+ const Type_handler *type_handler() const { return &type_handler_timestamp2; }
+};
+
+
/*
Class to represent a vector of constant DATE/DATETIME values.
*/
class in_temporal :public in_longlong
{
-protected:
- uchar *get_value_internal(Item *item, enum_field_types f_type);
public:
/* Cache for the left item. */
@@ -1424,8 +1455,6 @@ public:
Item_datetime *dt= static_cast<Item_datetime*>(item);
dt->set(val->val, type_handler()->mysql_timestamp_type());
}
- uchar *get_value(Item *item)
- { return get_value_internal(item, type_handler()->field_type()); }
friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b);
};
@@ -1437,6 +1466,7 @@ public:
:in_temporal(thd, elements)
{}
void set(uint pos,Item *item);
+ uchar *get_value(Item *item);
const Type_handler *type_handler() const { return &type_handler_datetime2; }
};
@@ -1448,6 +1478,7 @@ public:
:in_temporal(thd, elements)
{}
void set(uint pos,Item *item);
+ uchar *get_value(Item *item);
const Type_handler *type_handler() const { return &type_handler_time2; }
};
@@ -1622,7 +1653,6 @@ class cmp_item_temporal: public cmp_item_scalar
{
protected:
longlong value;
- void store_value_internal(Item *item, enum_field_types type);
public:
cmp_item_temporal() {}
int compare(cmp_item *ci);
@@ -1637,7 +1667,8 @@ public:
{ }
void store_value(Item *item)
{
- store_value_internal(item, MYSQL_TYPE_DATETIME);
+ value= item->val_datetime_packed(current_thd);
+ m_null_value= item->null_value;
}
int cmp_not_null(const Value *val);
int cmp(Item *arg);
@@ -1653,13 +1684,28 @@ public:
{ }
void store_value(Item *item)
{
- store_value_internal(item, MYSQL_TYPE_TIME);
+ value= item->val_time_packed(current_thd);
+ m_null_value= item->null_value;
}
int cmp_not_null(const Value *val);
int cmp(Item *arg);
cmp_item *make_same();
};
+
+class cmp_item_timestamp: public cmp_item_scalar
+{
+ Timestamp_or_zero_datetime_native m_native;
+public:
+ cmp_item_timestamp() :cmp_item_scalar() { }
+ void store_value(Item *item);
+ int cmp_not_null(const Value *val);
+ int cmp(Item *arg);
+ int compare(cmp_item *ci);
+ cmp_item *make_same();
+};
+
+
class cmp_item_real : public cmp_item_scalar
{
double value;
@@ -1891,7 +1937,7 @@ class Predicant_to_list_comparator
return UNKNOWN;
return in_item->cmp(args->arguments()[m_comparators[i].m_arg_index]);
}
- int cmp_args_nulls_equal(Item_args *args, uint i)
+ int cmp_args_nulls_equal(THD *thd, Item_args *args, uint i)
{
Predicant_to_value_comparator *cmp=
&m_comparators[m_comparators[i].m_handler_index];
@@ -1902,7 +1948,7 @@ class Predicant_to_list_comparator
ValueBuffer<MAX_FIELD_WIDTH> val;
if (m_comparators[i].m_handler_index == i)
in_item->store_value(predicant);
- m_comparators[i].m_handler->Item_save_in_value(arg, &val);
+ m_comparators[i].m_handler->Item_save_in_value(thd, arg, &val);
if (predicant->null_value && val.is_null())
return FALSE; // Two nulls are equal
if (predicant->null_value || val.is_null())
@@ -2083,12 +2129,12 @@ public:
/*
Same as above, but treats two NULLs as equal, e.g. as in DECODE_ORACLE().
*/
- bool cmp_nulls_equal(Item_args *args, uint *idx)
+ bool cmp_nulls_equal(THD *thd, Item_args *args, uint *idx)
{
for (uint i= 0 ; i < m_comparator_count ; i++)
{
DBUG_ASSERT(m_comparators[i].m_handler != NULL);
- if (cmp_args_nulls_equal(args, i) == FALSE)
+ if (cmp_args_nulls_equal(thd, args, i) == FALSE)
{
*idx= m_comparators[i].m_arg_index;
return false; // Found a matching value
@@ -2124,8 +2170,9 @@ public:
longlong int_op();
String *str_op(String *);
my_decimal *decimal_op(my_decimal *);
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool time_op(MYSQL_TIME *ltime);
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool time_op(THD *thd, MYSQL_TIME *ltime);
+ bool native_op(THD *thd, Native *to);
bool fix_fields(THD *thd, Item **ref);
table_map not_null_tables() const { return 0; }
const char *func_name() const { return "case"; }
@@ -2421,12 +2468,19 @@ class cmp_item_row :public cmp_item
{
cmp_item **comparators;
uint n;
+ bool alloc_comparators(THD *thd, uint n);
+ bool aggregate_row_elements_for_comparison(THD *thd,
+ Type_handler_hybrid_field_type *cmp,
+ Item_args *tmp,
+ const char *funcname,
+ uint col,
+ uint level);
public:
cmp_item_row(): comparators(0), n(0) {}
~cmp_item_row();
void store_value(Item *item);
- bool alloc_comparators(THD *thd, uint n);
- bool prepare_comparators(THD *, Item **args, uint arg_count);
+ bool prepare_comparators(THD *, const char *funcname,
+ const Item_args *args, uint level);
int cmp(Item *arg);
int cmp_not_null(const Value *val)
{
@@ -2507,9 +2561,8 @@ public:
{
Field *field=((Item_field*) args[0]->real_item())->field;
- if (((field->type() == MYSQL_TYPE_DATE) ||
- (field->type() == MYSQL_TYPE_DATETIME)) &&
- (field->flags & NOT_NULL_FLAG))
+ if ((field->flags & NOT_NULL_FLAG) &&
+ field->type_handler()->cond_notnull_field_isnull_to_field_eq_zero())
return true;
}
return false;
@@ -2608,7 +2661,6 @@ class Item_func_like :public Item_bool_func2
bool escape_used_in_parsing;
bool use_sampling;
- bool negated;
DTCollation cmp_collation;
String cmp_value1, cmp_value2;
@@ -2625,11 +2677,15 @@ protected:
Item_func::Functype type, Item *value);
public:
int escape;
+ bool negated;
Item_func_like(THD *thd, Item *a, Item *b, Item *escape_arg, bool escape_used):
Item_bool_func2(thd, a, b), canDoTurboBM(FALSE), pattern(0), pattern_len(0),
bmGs(0), bmBc(0), escape_item(escape_arg),
escape_used_in_parsing(escape_used), use_sampling(0), negated(0) {}
+
+ bool get_negated() const { return negated; } // Used by ColumnStore
+
longlong val_int();
enum Functype functype() const { return LIKE_FUNC; }
void print(String *str, enum_query_type query_type);
@@ -3085,7 +3141,6 @@ class Item_equal: public Item_bool_func
const Type_handler *m_compare_handler;
CHARSET_INFO *m_compare_collation;
- String cmp_value1, cmp_value2;
public:
COND_EQUAL *upper_levels; /* multiple equalities of upper and levels */
@@ -3143,8 +3198,15 @@ public:
{
return used_tables() & tab_map;
}
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred);
+ bool excl_dep_on_grouping_fields(st_select_lex *sel);
+ bool create_pushable_equalities(THD *thd, List<Item> *equalities,
+ Pushdown_checker checker, uchar *arg);
+ /* Return the number of elements in this multiple equality */
+ uint elements_count() { return equal_items.elements; }
friend class Item_equal_fields_iterator;
bool count_sargable_conds(void *arg);
+ Item *multiple_equality_transformer(THD *thd, uchar *arg);
friend class Item_equal_iterator<List_iterator_fast,Item>;
friend class Item_equal_iterator<List_iterator,Item>;
friend Item *eliminate_item_equal(THD *thd, COND *cond,
@@ -3180,6 +3242,10 @@ public:
else
current_level= cond_equal.current_level;
}
+ bool is_empty()
+ {
+ return (current_level.elements == 0);
+ }
};
@@ -3294,11 +3360,8 @@ public:
inline bool is_cond_and(Item *item)
{
- if (item->type() != Item::COND_ITEM)
- return FALSE;
-
- Item_cond *cond_item= (Item_cond*) item;
- return (cond_item->functype() == Item_func::COND_AND_FUNC);
+ Item_func *func_item= item->get_item_func();
+ return func_item && func_item->functype() == Item_func::COND_AND_FUNC;
}
class Item_cond_or :public Item_cond
@@ -3399,11 +3462,8 @@ public:
inline bool is_cond_or(Item *item)
{
- if (item->type() != Item::COND_ITEM)
- return FALSE;
-
- Item_cond *cond_item= (Item_cond*) item;
- return (cond_item->functype() == Item_func::COND_OR_FUNC);
+ Item_func *func_item= item->get_item_func();
+ return func_item && func_item->functype() == Item_func::COND_OR_FUNC;
}
Item *and_expressions(Item *a, Item *b, Item **org_item);
diff --git a/sql/item_create.cc b/sql/item_create.cc
index 84f2c91ba54..ba7a704e29b 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -3193,6 +3193,45 @@ protected:
};
#endif
+#ifdef WITH_WSREP
+class Create_func_wsrep_last_written_gtid : public Create_func_arg0
+{
+public:
+ virtual Item *create_builder(THD *thd);
+
+ static Create_func_wsrep_last_written_gtid s_singleton;
+
+protected:
+ Create_func_wsrep_last_written_gtid() {}
+ virtual ~Create_func_wsrep_last_written_gtid() {}
+};
+
+
+class Create_func_wsrep_last_seen_gtid : public Create_func_arg0
+{
+public:
+ virtual Item *create_builder(THD *thd);
+
+ static Create_func_wsrep_last_seen_gtid s_singleton;
+
+protected:
+ Create_func_wsrep_last_seen_gtid() {}
+ virtual ~Create_func_wsrep_last_seen_gtid() {}
+};
+
+
+class Create_func_wsrep_sync_wait_upto : public Create_native_func
+{
+public:
+ virtual Item *create_native(THD *thd, LEX_CSTRING *name, List<Item> *item_list);
+
+ static Create_func_wsrep_sync_wait_upto s_singleton;
+
+protected:
+ Create_func_wsrep_sync_wait_upto() {}
+ virtual ~Create_func_wsrep_sync_wait_upto() {}
+};
+#endif /* WITH_WSREP */
#ifdef HAVE_SPATIAL
class Create_func_x : public Create_func_arg1
@@ -3487,12 +3526,11 @@ Create_sp_func::create_with_db(THD *thd, LEX_CSTRING *db, LEX_CSTRING *name,
sph->add_used_routine(lex, thd, qname);
if (pkgname.m_name.length)
sp_handler_package_body.add_used_routine(lex, thd, &pkgname);
+ Name_resolution_context *ctx= lex->current_context();
if (arg_count > 0)
- func= new (thd->mem_root) Item_func_sp(thd, lex->current_context(),
- qname, sph, *item_list);
+ func= new (thd->mem_root) Item_func_sp(thd, ctx, qname, sph, *item_list);
else
- func= new (thd->mem_root) Item_func_sp(thd, lex->current_context(),
- qname, sph);
+ func= new (thd->mem_root) Item_func_sp(thd, ctx, qname, sph);
lex->safe_to_cache_query= 0;
return func;
@@ -3637,7 +3675,7 @@ Create_func_addtime Create_func_addtime::s_singleton;
Item*
Create_func_addtime::create_2_arg(THD *thd, Item *arg1, Item *arg2)
{
- return new (thd->mem_root) Item_func_add_time(thd, arg1, arg2, 0, 0);
+ return new (thd->mem_root) Item_func_add_time(thd, arg1, arg2, false);
}
@@ -6104,7 +6142,26 @@ Create_func_name_const Create_func_name_const::s_singleton;
Item*
Create_func_name_const::create_2_arg(THD *thd, Item *arg1, Item *arg2)
{
- return new (thd->mem_root) Item_name_const(thd, arg1, arg2);
+ if (!arg1->basic_const_item())
+ goto err;
+
+ if (arg2->basic_const_item())
+ return new (thd->mem_root) Item_name_const(thd, arg1, arg2);
+
+ if (arg2->type() == Item::FUNC_ITEM)
+ {
+ Item_func *value_func= (Item_func *) arg2;
+ if (value_func->functype() != Item_func::COLLATE_FUNC &&
+ value_func->functype() != Item_func::NEG_FUNC)
+ goto err;
+
+ if (!value_func->key_item()->basic_const_item())
+ goto err;
+ return new (thd->mem_root) Item_name_const(thd, arg1, arg2);
+ }
+err:
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST");
+ return NULL;
}
@@ -6658,7 +6715,7 @@ Create_func_subtime Create_func_subtime::s_singleton;
Item*
Create_func_subtime::create_2_arg(THD *thd, Item *arg1, Item *arg2)
{
- return new (thd->mem_root) Item_func_add_time(thd, arg1, arg2, 0, 1);
+ return new (thd->mem_root) Item_func_add_time(thd, arg1, arg2, true);
}
@@ -6887,6 +6944,63 @@ Create_func_within::create_2_arg(THD *thd, Item *arg1, Item *arg2)
}
#endif
+#ifdef WITH_WSREP
+Create_func_wsrep_last_written_gtid
+Create_func_wsrep_last_written_gtid::s_singleton;
+
+Item*
+Create_func_wsrep_last_written_gtid::create_builder(THD *thd)
+{
+ thd->lex->safe_to_cache_query= 0;
+ return new (thd->mem_root) Item_func_wsrep_last_written_gtid(thd);
+}
+
+
+Create_func_wsrep_last_seen_gtid
+Create_func_wsrep_last_seen_gtid::s_singleton;
+
+Item*
+Create_func_wsrep_last_seen_gtid::create_builder(THD *thd)
+{
+ thd->lex->safe_to_cache_query= 0;
+ return new (thd->mem_root) Item_func_wsrep_last_seen_gtid(thd);
+}
+
+
+Create_func_wsrep_sync_wait_upto
+Create_func_wsrep_sync_wait_upto::s_singleton;
+
+Item*
+Create_func_wsrep_sync_wait_upto::create_native(THD *thd,
+ LEX_CSTRING *name,
+ List<Item> *item_list)
+{
+ Item *func= NULL;
+ int arg_count= 0;
+ Item *param_1, *param_2;
+
+ if (item_list != NULL)
+ arg_count= item_list->elements;
+
+ switch (arg_count)
+ {
+ case 1:
+ param_1= item_list->pop();
+ func= new (thd->mem_root) Item_func_wsrep_sync_wait_upto(thd, param_1);
+ break;
+ case 2:
+ param_1= item_list->pop();
+ param_2= item_list->pop();
+ func= new (thd->mem_root) Item_func_wsrep_sync_wait_upto(thd, param_1, param_2);
+ break;
+ default:
+ my_error(ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT, MYF(0), name->str);
+ break;
+ }
+ thd->lex->safe_to_cache_query= 0;
+ return func;
+}
+#endif /* WITH_WSREP */
#ifdef HAVE_SPATIAL
Create_func_x Create_func_x::s_singleton;
@@ -7329,6 +7443,11 @@ static Native_func_registry func_array[] =
{ { STRING_WITH_LEN("WEEKDAY") }, BUILDER(Create_func_weekday)},
{ { STRING_WITH_LEN("WEEKOFYEAR") }, BUILDER(Create_func_weekofyear)},
{ { STRING_WITH_LEN("WITHIN") }, GEOM_BUILDER(Create_func_within)},
+#ifdef WITH_WSREP
+ { { STRING_WITH_LEN("WSREP_LAST_WRITTEN_GTID") }, BUILDER(Create_func_wsrep_last_written_gtid)},
+ { { STRING_WITH_LEN("WSREP_LAST_SEEN_GTID") }, BUILDER(Create_func_wsrep_last_seen_gtid)},
+ { { STRING_WITH_LEN("WSREP_SYNC_WAIT_UPTO_GTID") }, BUILDER(Create_func_wsrep_sync_wait_upto)},
+#endif /* WITH_WSREP */
{ { STRING_WITH_LEN("X") }, GEOM_BUILDER(Create_func_x)},
{ { STRING_WITH_LEN("Y") }, GEOM_BUILDER(Create_func_y)},
{ { STRING_WITH_LEN("YEARWEEK") }, BUILDER(Create_func_year_week)},
@@ -7433,84 +7552,6 @@ find_qualified_function_builder(THD *thd)
}
-static bool
-have_important_literal_warnings(const MYSQL_TIME_STATUS *status)
-{
- return (status->warnings & ~MYSQL_TIME_NOTE_TRUNCATED) != 0;
-}
-
-
-/**
- Builder for datetime literals:
- TIME'00:00:00', DATE'2001-01-01', TIMESTAMP'2001-01-01 00:00:00'.
- @param thd The current thread
- @param str Character literal
- @param length Length of str
- @param type Type of literal (TIME, DATE or DATETIME)
- @param send_error Whether to generate an error on failure
-*/
-
-Item *create_temporal_literal(THD *thd,
- const char *str, size_t length,
- CHARSET_INFO *cs,
- enum_field_types type,
- bool send_error)
-{
- MYSQL_TIME_STATUS status;
- MYSQL_TIME ltime;
- Item *item= NULL;
- sql_mode_t flags= sql_mode_for_dates(thd);
-
- switch(type)
- {
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_NEWDATE:
- if (!str_to_datetime(cs, str, length, &ltime, flags, &status) &&
- ltime.time_type == MYSQL_TIMESTAMP_DATE && !status.warnings)
- item= new (thd->mem_root) Item_date_literal(thd, &ltime);
- break;
- case MYSQL_TYPE_DATETIME:
- if (!str_to_datetime(cs, str, length, &ltime, flags, &status) &&
- ltime.time_type == MYSQL_TIMESTAMP_DATETIME &&
- !have_important_literal_warnings(&status))
- item= new (thd->mem_root) Item_datetime_literal(thd, &ltime,
- status.precision);
- break;
- case MYSQL_TYPE_TIME:
- if (!str_to_time(cs, str, length, &ltime, 0, &status) &&
- ltime.time_type == MYSQL_TIMESTAMP_TIME &&
- !have_important_literal_warnings(&status))
- item= new (thd->mem_root) Item_time_literal(thd, &ltime,
- status.precision);
- break;
- default:
- DBUG_ASSERT(0);
- }
-
- if (likely(item))
- {
- if (status.warnings) // e.g. a note on nanosecond truncation
- {
- ErrConvString err(str, length, cs);
- make_truncated_value_warning(thd,
- Sql_condition::time_warn_level(status.warnings),
- &err, ltime.time_type, 0, 0);
- }
- return item;
- }
-
- if (send_error)
- {
- const char *typestr=
- (type == MYSQL_TYPE_DATE) ? "DATE" :
- (type == MYSQL_TYPE_TIME) ? "TIME" : "DATETIME";
- ErrConvString err(str, length, thd->variables.character_set_client);
- my_error(ER_WRONG_VALUE, MYF(0), typestr, err.ptr());
- }
- return NULL;
-}
-
-
static List<Item> *create_func_dyncol_prepare(THD *thd,
DYNCALL_CREATE_DEF **dfs,
List<DYNCALL_CREATE_DEF> &list)
diff --git a/sql/item_create.h b/sql/item_create.h
index 5983a092cdc..4fb3c07c4ae 100644
--- a/sql/item_create.h
+++ b/sql/item_create.h
@@ -191,21 +191,6 @@ protected:
#endif
-Item *create_temporal_literal(THD *thd,
- const char *str, size_t length,
- CHARSET_INFO *cs,
- enum_field_types type,
- bool send_error);
-inline
-Item *create_temporal_literal(THD *thd, const String *str,
- enum_field_types type,
- bool send_error)
-{
- return create_temporal_literal(thd,
- str->ptr(), str->length(), str->charset(),
- type, send_error);
-}
-
struct Native_func_registry
{
LEX_CSTRING name;
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 0a7693daaae..04fa20a8abf 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -133,7 +133,7 @@ void Item_func::sync_with_sum_func_and_with_field(List<Item> &list)
Item *item;
while ((item= li++))
{
- with_sum_func|= item->with_sum_func;
+ join_with_sum_func(item);
with_window_func|= item->with_window_func;
with_field|= item->with_field;
with_param|= item->with_param;
@@ -355,7 +355,7 @@ Item_func::fix_fields(THD *thd, Item **ref)
if (item->maybe_null)
maybe_null=1;
- with_sum_func= with_sum_func || item->with_sum_func;
+ join_with_sum_func(item);
with_param= with_param || item->with_param;
with_window_func= with_window_func || item->with_window_func;
with_field= with_field || item->with_field;
@@ -379,7 +379,7 @@ Item_func::quick_fix_field()
{
for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++)
{
- if (!(*arg)->fixed)
+ if (!(*arg)->is_fixed())
(*arg)->quick_fix_field();
}
}
@@ -722,7 +722,7 @@ void Item_func::signal_divide_by_null()
Item *Item_func::get_tmp_table_item(THD *thd)
{
- if (!with_sum_func && !const_item())
+ if (!Item_func::with_sum_func() && !const_item())
return new (thd->mem_root) Item_temptable_field(thd, result_field);
return copy_or_same(thd);
}
@@ -794,51 +794,6 @@ bool Item_func_plus::fix_length_and_dec(void)
}
-String *Item_func_hybrid_field_type::val_str_from_decimal_op(String *str)
-{
- my_decimal decimal_value, *val;
- if (!(val= decimal_op_with_null_check(&decimal_value)))
- return 0; // null is set
- DBUG_ASSERT(!null_value);
- my_decimal_round(E_DEC_FATAL_ERROR, val, decimals, FALSE, val);
- str->set_charset(collation.collation);
- my_decimal2string(E_DEC_FATAL_ERROR, val, 0, 0, 0, str);
- return str;
-}
-
-double Item_func_hybrid_field_type::val_real_from_decimal_op()
-{
- my_decimal decimal_value, *val;
- if (!(val= decimal_op_with_null_check(&decimal_value)))
- return 0.0; // null is set
- double result;
- my_decimal2double(E_DEC_FATAL_ERROR, val, &result);
- return result;
-}
-
-longlong Item_func_hybrid_field_type::val_int_from_decimal_op()
-{
- my_decimal decimal_value, *val;
- if (!(val= decimal_op_with_null_check(&decimal_value)))
- return 0; // null is set
- longlong result;
- my_decimal2int(E_DEC_FATAL_ERROR, val, unsigned_flag, &result);
- return result;
-}
-
-bool Item_func_hybrid_field_type::get_date_from_decimal_op(MYSQL_TIME *ltime,
- ulonglong fuzzydate)
-{
- my_decimal value, *res;
- if (!(res= decimal_op_with_null_check(&value)) ||
- decimal_to_datetime_with_warn(res, ltime, fuzzydate,
- field_table_or_null(),
- field_name_or_null()))
- return make_zero_mysql_time(ltime, fuzzydate);
- return (null_value= 0);
-}
-
-
String *Item_func_hybrid_field_type::val_str_from_int_op(String *str)
{
longlong nr= int_op();
@@ -864,19 +819,6 @@ Item_func_hybrid_field_type::val_decimal_from_int_op(my_decimal *dec)
return dec;
}
-bool Item_func_hybrid_field_type::get_date_from_int_op(MYSQL_TIME *ltime,
- ulonglong fuzzydate)
-{
- longlong value= int_op();
- bool neg= !unsigned_flag && value < 0;
- if (null_value || int_to_datetime_with_warn(neg, neg ? -value : value,
- ltime, fuzzydate,
- field_table_or_null(),
- field_name_or_null()))
- return make_zero_mysql_time(ltime, fuzzydate);
- return (null_value= 0);
-}
-
String *Item_func_hybrid_field_type::val_str_from_real_op(String *str)
{
@@ -902,22 +844,11 @@ Item_func_hybrid_field_type::val_decimal_from_real_op(my_decimal *dec)
return dec;
}
-bool Item_func_hybrid_field_type::get_date_from_real_op(MYSQL_TIME *ltime,
- ulonglong fuzzydate)
-{
- double value= real_op();
- if (null_value || double_to_datetime_with_warn(value, ltime, fuzzydate,
- field_table_or_null(),
- field_name_or_null()))
- return make_zero_mysql_time(ltime, fuzzydate);
- return (null_value= 0);
-}
-
String *Item_func_hybrid_field_type::val_str_from_date_op(String *str)
{
MYSQL_TIME ltime;
- if (date_op_with_null_check(&ltime) ||
+ if (date_op_with_null_check(current_thd, &ltime) ||
(null_value= str->alloc(MAX_DATE_STRING_REP_LENGTH)))
return (String *) 0;
str->length(my_TIME_to_str(&ltime, const_cast<char*>(str->ptr()), decimals));
@@ -929,7 +860,7 @@ String *Item_func_hybrid_field_type::val_str_from_date_op(String *str)
double Item_func_hybrid_field_type::val_real_from_date_op()
{
MYSQL_TIME ltime;
- if (date_op_with_null_check(&ltime))
+ if (date_op_with_null_check(current_thd, &ltime))
return 0;
return TIME_to_double(&ltime);
}
@@ -937,7 +868,7 @@ double Item_func_hybrid_field_type::val_real_from_date_op()
longlong Item_func_hybrid_field_type::val_int_from_date_op()
{
MYSQL_TIME ltime;
- if (date_op_with_null_check(&ltime))
+ if (date_op_with_null_check(current_thd, &ltime))
return 0;
return TIME_to_ulonglong(&ltime);
}
@@ -946,7 +877,7 @@ my_decimal *
Item_func_hybrid_field_type::val_decimal_from_date_op(my_decimal *dec)
{
MYSQL_TIME ltime;
- if (date_op_with_null_check(&ltime))
+ if (date_op_with_null_check(current_thd, &ltime))
{
my_decimal_set_zero(dec);
return 0;
@@ -958,7 +889,7 @@ Item_func_hybrid_field_type::val_decimal_from_date_op(my_decimal *dec)
String *Item_func_hybrid_field_type::val_str_from_time_op(String *str)
{
MYSQL_TIME ltime;
- if (time_op_with_null_check(&ltime) ||
+ if (time_op_with_null_check(current_thd, &ltime) ||
(null_value= my_TIME_to_str(&ltime, str, decimals)))
return NULL;
return str;
@@ -967,20 +898,22 @@ String *Item_func_hybrid_field_type::val_str_from_time_op(String *str)
double Item_func_hybrid_field_type::val_real_from_time_op()
{
MYSQL_TIME ltime;
- return time_op_with_null_check(&ltime) ? 0 : TIME_to_double(&ltime);
+ return time_op_with_null_check(current_thd, &ltime) ? 0 :
+ TIME_to_double(&ltime);
}
longlong Item_func_hybrid_field_type::val_int_from_time_op()
{
MYSQL_TIME ltime;
- return time_op_with_null_check(&ltime) ? 0 : TIME_to_ulonglong(&ltime);
+ return time_op_with_null_check(current_thd, &ltime) ? 0 :
+ TIME_to_ulonglong(&ltime);
}
my_decimal *
Item_func_hybrid_field_type::val_decimal_from_time_op(my_decimal *dec)
{
MYSQL_TIME ltime;
- if (time_op_with_null_check(&ltime))
+ if (time_op_with_null_check(current_thd, &ltime))
{
my_decimal_set_zero(dec);
return 0;
@@ -1008,18 +941,6 @@ Item_func_hybrid_field_type::val_decimal_from_str_op(my_decimal *decimal_value)
return res ? decimal_from_string_with_check(decimal_value, res) : 0;
}
-bool Item_func_hybrid_field_type::get_date_from_str_op(MYSQL_TIME *ltime,
- ulonglong fuzzydate)
-{
- StringBuffer<40> tmp;
- String *res;
- if (!(res= str_op_with_null_check(&tmp)) ||
- str_to_datetime_with_warn(res->charset(), res->ptr(), res->length(),
- ltime, fuzzydate))
- return make_zero_mysql_time(ltime, fuzzydate);
- return (null_value= 0);
-}
-
void Item_func_signed::print(String *str, enum_query_type query_type)
{
@@ -1039,47 +960,15 @@ void Item_func_unsigned::print(String *str, enum_query_type query_type)
}
-String *Item_decimal_typecast::val_str(String *str)
-{
- my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
- if (null_value)
- return NULL;
- my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str);
- return str;
-}
-
-
-double Item_decimal_typecast::val_real()
-{
- my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
- double res;
- if (null_value)
- return 0.0;
- my_decimal2double(E_DEC_FATAL_ERROR, tmp, &res);
- return res;
-}
-
-
-longlong Item_decimal_typecast::val_int()
-{
- my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf);
- longlong res;
- if (null_value)
- return 0;
- my_decimal2int(E_DEC_FATAL_ERROR, tmp, unsigned_flag, &res);
- return res;
-}
-
-
my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec)
{
- my_decimal tmp_buf, *tmp= args[0]->val_decimal(&tmp_buf);
+ VDec tmp(args[0]);
bool sign;
uint precision;
- if ((null_value= args[0]->null_value))
+ if ((null_value= tmp.is_null()))
return NULL;
- my_decimal_round(E_DEC_FATAL_ERROR, tmp, decimals, FALSE, dec);
+ tmp.round_to(dec, decimals, HALF_UP);
sign= dec->sign();
if (unsigned_flag)
{
@@ -1263,17 +1152,13 @@ err:
my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value)
{
- my_decimal value1, *val1;
- my_decimal value2, *val2;
- val1= args[0]->val_decimal(&value1);
- if ((null_value= args[0]->null_value))
- return 0;
- val2= args[1]->val_decimal(&value2);
- if (!(null_value= (args[1]->null_value ||
+ VDec2_lazy val(args[0], args[1]);
+ if (!(null_value= (val.has_null() ||
check_decimal_overflow(my_decimal_add(E_DEC_FATAL_ERROR &
~E_DEC_OVERFLOW,
decimal_value,
- val1, val2)) > 3)))
+ val.m_a.ptr(),
+ val.m_b.ptr())) > 3)))
return decimal_value;
return 0;
}
@@ -1403,18 +1288,13 @@ err:
my_decimal *Item_func_minus::decimal_op(my_decimal *decimal_value)
{
- my_decimal value1, *val1;
- my_decimal value2, *val2=
-
- val1= args[0]->val_decimal(&value1);
- if ((null_value= args[0]->null_value))
- return 0;
- val2= args[1]->val_decimal(&value2);
- if (!(null_value= (args[1]->null_value ||
- (check_decimal_overflow(my_decimal_sub(E_DEC_FATAL_ERROR &
- ~E_DEC_OVERFLOW,
- decimal_value, val1,
- val2)) > 3))))
+ VDec2_lazy val(args[0], args[1]);
+ if (!(null_value= (val.has_null() ||
+ check_decimal_overflow(my_decimal_sub(E_DEC_FATAL_ERROR &
+ ~E_DEC_OVERFLOW,
+ decimal_value,
+ val.m_a.ptr(),
+ val.m_b.ptr())) > 3)))
return decimal_value;
return 0;
}
@@ -1513,17 +1393,13 @@ err:
my_decimal *Item_func_mul::decimal_op(my_decimal *decimal_value)
{
- my_decimal value1, *val1;
- my_decimal value2, *val2;
- val1= args[0]->val_decimal(&value1);
- if ((null_value= args[0]->null_value))
- return 0;
- val2= args[1]->val_decimal(&value2);
- if (!(null_value= (args[1]->null_value ||
- (check_decimal_overflow(my_decimal_mul(E_DEC_FATAL_ERROR &
- ~E_DEC_OVERFLOW,
- decimal_value, val1,
- val2)) > 3))))
+ VDec2_lazy val(args[0], args[1]);
+ if (!(null_value= (val.has_null() ||
+ check_decimal_overflow(my_decimal_mul(E_DEC_FATAL_ERROR &
+ ~E_DEC_OVERFLOW,
+ decimal_value,
+ val.m_a.ptr(),
+ val.m_b.ptr())) > 3)))
return decimal_value;
return 0;
}
@@ -1574,21 +1450,15 @@ double Item_func_div::real_op()
my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
{
- my_decimal value1, *val1;
- my_decimal value2, *val2;
int err;
-
- val1= args[0]->val_decimal(&value1);
- if ((null_value= args[0]->null_value))
- return 0;
- val2= args[1]->val_decimal(&value2);
- if ((null_value= args[1]->null_value))
+ VDec2_lazy val(args[0], args[1]);
+ if ((null_value= val.has_null()))
return 0;
if ((err= check_decimal_overflow(my_decimal_div(E_DEC_FATAL_ERROR &
~E_DEC_OVERFLOW &
~E_DEC_DIV_ZERO,
decimal_value,
- val1, val2,
+ val.m_a.ptr(), val.m_b.ptr(),
prec_increment))) > 3)
{
if (err == E_DEC_DIV_ZERO)
@@ -1678,20 +1548,14 @@ longlong Item_func_int_div::val_int()
if (args[0]->result_type() != INT_RESULT ||
args[1]->result_type() != INT_RESULT)
{
- my_decimal tmp;
- my_decimal *val0p= args[0]->val_decimal(&tmp);
- if ((null_value= args[0]->null_value))
- return 0;
- my_decimal val0= *val0p;
-
- my_decimal *val1p= args[1]->val_decimal(&tmp);
- if ((null_value= args[1]->null_value))
+ VDec2_lazy val(args[0], args[1]);
+ if ((null_value= val.has_null()))
return 0;
- my_decimal val1= *val1p;
int err;
+ my_decimal tmp;
if ((err= my_decimal_div(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, &tmp,
- &val0, &val1, 0)) > 3)
+ val.m_a.ptr(), val.m_b.ptr(), 0)) > 3)
{
if (err == E_DEC_DIV_ZERO)
signal_divide_by_null();
@@ -1699,8 +1563,7 @@ longlong Item_func_int_div::val_int()
}
my_decimal truncated;
- const bool do_truncate= true;
- if (my_decimal_round(E_DEC_FATAL_ERROR, &tmp, 0, do_truncate, &truncated))
+ if (tmp.round_to(&truncated, 0, TRUNCATE))
DBUG_ASSERT(false);
longlong res;
@@ -1798,17 +1661,11 @@ double Item_func_mod::real_op()
my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value)
{
- my_decimal value1, *val1;
- my_decimal value2, *val2;
-
- val1= args[0]->val_decimal(&value1);
- if ((null_value= args[0]->null_value))
- return 0;
- val2= args[1]->val_decimal(&value2);
- if ((null_value= args[1]->null_value))
+ VDec2_lazy val(args[0], args[1]);
+ if ((null_value= val.has_null()))
return 0;
switch (my_decimal_mod(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, decimal_value,
- val1, val2)) {
+ val.m_a.ptr(), val.m_b.ptr())) {
case E_DEC_TRUNCATED:
case E_DEC_OK:
return decimal_value;
@@ -1845,6 +1702,46 @@ bool Item_func_mod::fix_length_and_dec()
DBUG_RETURN(FALSE);
}
+static void calc_hash_for_unique(ulong &nr1, ulong &nr2, String *str)
+{
+ CHARSET_INFO *cs;
+ uchar l[4];
+ int4store(l, str->length());
+ cs= str->charset();
+ cs->coll->hash_sort(cs, l, sizeof(l), &nr1, &nr2);
+ cs= str->charset();
+ cs->coll->hash_sort(cs, (uchar *)str->ptr(), str->length(), &nr1, &nr2);
+}
+
+longlong Item_func_hash::val_int()
+{
+ DBUG_EXECUTE_IF("same_long_unique_hash", return 9;);
+ unsigned_flag= true;
+ ulong nr1= 1,nr2= 4;
+ String * str;
+ for(uint i= 0;i<arg_count;i++)
+ {
+ str = args[i]->val_str();
+ if(args[i]->null_value)
+ {
+ null_value= 1;
+ return 0;
+ }
+ calc_hash_for_unique(nr1, nr2, str);
+ }
+ null_value= 0;
+ return (longlong)nr1;
+}
+
+
+bool Item_func_hash::fix_length_and_dec()
+{
+ decimals= 0;
+ max_length= 8;
+ return false;
+}
+
+
double Item_func_neg::real_op()
{
@@ -1878,10 +1775,10 @@ longlong Item_func_neg::int_op()
my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value)
{
- my_decimal val, *value= args[0]->val_decimal(&val);
- if (!(null_value= args[0]->null_value))
+ VDec value(args[0]);
+ if (!(null_value= value.is_null()))
{
- my_decimal2decimal(value, decimal_value);
+ my_decimal2decimal(value.ptr(), decimal_value);
my_decimal_neg(decimal_value);
return decimal_value;
}
@@ -1905,7 +1802,7 @@ void Item_func_neg::fix_length_and_dec_int()
longlong val= args[0]->val_int();
if ((ulonglong) val >= (ulonglong) LONGLONG_MIN &&
((ulonglong) val != (ulonglong) LONGLONG_MIN ||
- args[0]->type() != INT_ITEM))
+ !args[0]->is_of_type(CONST_ITEM, INT_RESULT)))
{
/*
Ensure that result is converted to DECIMAL, as longlong can't hold
@@ -1976,10 +1873,10 @@ longlong Item_func_abs::int_op()
my_decimal *Item_func_abs::decimal_op(my_decimal *decimal_value)
{
- my_decimal val, *value= args[0]->val_decimal(&val);
- if (!(null_value= args[0]->null_value))
+ VDec value(args[0]);
+ if (!(null_value= value.is_null()))
{
- my_decimal2decimal(value, decimal_value);
+ my_decimal2decimal(value.ptr(), decimal_value);
if (decimal_value->sign())
my_decimal_neg(decimal_value);
return decimal_value;
@@ -2301,25 +2198,15 @@ bool Item_func_int_val::fix_length_and_dec()
longlong Item_func_ceiling::int_op()
{
- longlong result;
switch (args[0]->result_type()) {
case INT_RESULT:
- result= args[0]->val_int();
- null_value= args[0]->null_value;
- break;
+ return val_int_from_item(args[0]);
case DECIMAL_RESULT:
- {
- my_decimal dec_buf, *dec;
- if ((dec= Item_func_ceiling::decimal_op(&dec_buf)))
- my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result);
- else
- result= 0;
+ return VDec_op(this).to_longlong(unsigned_flag);
+ default:
break;
}
- default:
- result= (longlong)Item_func_ceiling::real_op();
- };
- return result;
+ return (longlong) Item_func_ceiling::real_op();
}
@@ -2337,10 +2224,9 @@ double Item_func_ceiling::real_op()
my_decimal *Item_func_ceiling::decimal_op(my_decimal *decimal_value)
{
- my_decimal val, *value= args[0]->val_decimal(&val);
- if (!(null_value= (args[0]->null_value ||
- my_decimal_ceiling(E_DEC_FATAL_ERROR, value,
- decimal_value) > 1)))
+ VDec value(args[0]);
+ if (!(null_value= (value.is_null() ||
+ value.round_to(decimal_value, 0, CEILING) > 1)))
return decimal_value;
return 0;
}
@@ -2348,25 +2234,19 @@ my_decimal *Item_func_ceiling::decimal_op(my_decimal *decimal_value)
longlong Item_func_floor::int_op()
{
- longlong result;
switch (args[0]->result_type()) {
case INT_RESULT:
- result= args[0]->val_int();
- null_value= args[0]->null_value;
- break;
+ return val_int_from_item(args[0]);
case DECIMAL_RESULT:
{
my_decimal dec_buf, *dec;
- if ((dec= Item_func_floor::decimal_op(&dec_buf)))
- my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result);
- else
- result= 0;
- break;
+ return (!(dec= Item_func_floor::decimal_op(&dec_buf))) ? 0 :
+ dec->to_longlong(unsigned_flag);
}
default:
- result= (longlong)Item_func_floor::real_op();
- };
- return result;
+ break;
+ }
+ return (longlong) Item_func_floor::real_op();
}
@@ -2384,10 +2264,9 @@ double Item_func_floor::real_op()
my_decimal *Item_func_floor::decimal_op(my_decimal *decimal_value)
{
- my_decimal val, *value= args[0]->val_decimal(&val);
- if (!(null_value= (args[0]->null_value ||
- my_decimal_floor(E_DEC_FATAL_ERROR, value,
- decimal_value) > 1)))
+ VDec value(args[0]);
+ if (!(null_value= (value.is_null() ||
+ value.round_to(decimal_value, 0, FLOOR) > 1)))
return decimal_value;
return 0;
}
@@ -2574,16 +2453,16 @@ longlong Item_func_round::int_op()
my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value)
{
- my_decimal val, *value= args[0]->val_decimal(&val);
+ VDec value(args[0]);
longlong dec= args[1]->val_int();
if (dec >= 0 || args[1]->unsigned_flag)
dec= MY_MIN((ulonglong) dec, decimals);
else if (dec < INT_MIN)
dec= INT_MIN;
- if (!(null_value= (args[0]->null_value || args[1]->null_value ||
- my_decimal_round(E_DEC_FATAL_ERROR, value, (int) dec,
- truncate, decimal_value) > 1)))
+ if (!(null_value= (value.is_null() || args[1]->null_value ||
+ value.round_to(decimal_value, (uint) dec,
+ truncate ? TRUNCATE : HALF_UP) > 1)))
return decimal_value;
return 0;
}
@@ -2600,7 +2479,7 @@ void Item_func_rand::seed_random(Item *arg)
THD *thd= current_thd;
if (WSREP(thd))
{
- if (thd->wsrep_exec_mode==REPL_RECV)
+ if (wsrep_thd_is_applying(thd))
tmp= thd->wsrep_rand;
else
tmp= thd->wsrep_rand= (uint32) arg->val_int();
@@ -2721,14 +2600,15 @@ bool Item_func_min_max::fix_attributes(Item **items, uint nitems)
0 Otherwise
*/
-bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_min_max::get_date_native(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate)
{
longlong UNINIT_VAR(min_max);
DBUG_ASSERT(fixed == 1);
for (uint i=0; i < arg_count ; i++)
{
- longlong res= args[i]->val_datetime_packed();
+ longlong res= args[i]->val_datetime_packed(thd);
/* Check if we need to stop (because of error or KILL) and stop the loop */
if (unlikely(args[i]->null_value))
@@ -2739,8 +2619,8 @@ bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date)
}
unpack_time(min_max, ltime, mysql_timestamp_type());
- if (!(fuzzy_date & TIME_TIME_ONLY) &&
- unlikely((null_value= check_date_with_warn(ltime, fuzzy_date,
+ if (!(fuzzydate & TIME_TIME_ONLY) &&
+ unlikely((null_value= check_date_with_warn(thd, ltime, fuzzydate,
MYSQL_TIMESTAMP_ERROR))))
return true;
@@ -2748,17 +2628,17 @@ bool Item_func_min_max::get_date_native(MYSQL_TIME *ltime, ulonglong fuzzy_date)
}
-bool Item_func_min_max::get_time_native(MYSQL_TIME *ltime)
+bool Item_func_min_max::get_time_native(THD *thd, MYSQL_TIME *ltime)
{
DBUG_ASSERT(fixed == 1);
- Time value(args[0]);
+ Time value(thd, args[0], Time::Options(thd), decimals);
if (!value.is_valid_time())
return (null_value= true);
for (uint i= 1; i < arg_count ; i++)
{
- Time tmp(args[i]);
+ Time tmp(thd, args[i], Time::Options(thd), decimals);
if (!tmp.is_valid_time())
return (null_value= true);
@@ -2872,6 +2752,28 @@ my_decimal *Item_func_min_max::val_decimal_native(my_decimal *dec)
}
+bool Item_func_min_max::val_native(THD *thd, Native *native)
+{
+ DBUG_ASSERT(fixed == 1);
+ const Type_handler *handler= Item_hybrid_func::type_handler();
+ NativeBuffer<STRING_BUFFER_USUAL_SIZE> cur;
+ for (uint i= 0; i < arg_count; i++)
+ {
+ if (val_native_with_conversion_from_item(thd, args[i],
+ i == 0 ? native : &cur,
+ handler))
+ return true;
+ if (i > 0)
+ {
+ int cmp= handler->cmp_native(*native, cur);
+ if ((cmp_sign < 0 ? cmp : -cmp) < 0 && native->copy(cur))
+ return null_value= true;
+ }
+ }
+ return null_value= false;
+}
+
+
longlong Item_func_bit_length::val_int()
{
DBUG_ASSERT(fixed == 1);
@@ -3003,14 +2905,14 @@ longlong Item_func_field::val_int()
}
else if (cmp_type == DECIMAL_RESULT)
{
- my_decimal dec_arg_buf, *dec_arg,
- dec_buf, *dec= args[0]->val_decimal(&dec_buf);
- if (args[0]->null_value)
+ VDec dec(args[0]);
+ if (dec.is_null())
return 0;
+ my_decimal dec_arg_buf;
for (uint i=1; i < arg_count; i++)
{
- dec_arg= args[i]->val_decimal(&dec_arg_buf);
- if (!args[i]->null_value && !my_decimal_cmp(dec_arg, dec))
+ my_decimal *dec_arg= args[i]->val_decimal(&dec_arg_buf);
+ if (!args[i]->null_value && !dec.cmp(dec_arg))
return (longlong) (i);
}
}
@@ -3263,6 +3165,7 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
}
uint i;
Item **arg,**arg_end;
+ With_sum_func_cache *with_sum_func_cache= func->get_with_sum_func_cache();
for (i=0, arg=arguments, arg_end=arguments+arg_count;
arg != arg_end ;
arg++,i++)
@@ -3286,7 +3189,8 @@ udf_handler::fix_fields(THD *thd, Item_func_or_sum *func,
func->collation.set(&my_charset_bin);
if (item->maybe_null)
func->maybe_null=1;
- func->with_sum_func= func->with_sum_func || item->with_sum_func;
+ if (with_sum_func_cache)
+ with_sum_func_cache->join_with_sum_func(item);
func->with_window_func= func->with_window_func ||
item->with_window_func;
func->with_field= func->with_field || item->with_field;
@@ -3592,32 +3496,6 @@ String *Item_func_udf_int::val_str(String *str)
}
-longlong Item_func_udf_decimal::val_int()
-{
- my_bool tmp_null_value;
- longlong result;
- my_decimal dec_buf, *dec= udf.val_decimal(&tmp_null_value, &dec_buf);
- null_value= tmp_null_value;
- if (null_value)
- return 0;
- my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result);
- return result;
-}
-
-
-double Item_func_udf_decimal::val_real()
-{
- my_bool tmp_null_value;
- double result;
- my_decimal dec_buf, *dec= udf.val_decimal(&tmp_null_value, &dec_buf);
- null_value= tmp_null_value;
- if (null_value)
- return 0.0;
- my_decimal2double(E_DEC_FATAL_ERROR, dec, &result);
- return result;
-}
-
-
my_decimal *Item_func_udf_decimal::val_decimal(my_decimal *dec_buf)
{
my_decimal *res;
@@ -3633,21 +3511,6 @@ my_decimal *Item_func_udf_decimal::val_decimal(my_decimal *dec_buf)
}
-String *Item_func_udf_decimal::val_str(String *str)
-{
- my_bool tmp_null_value;
- my_decimal dec_buf, *dec= udf.val_decimal(&tmp_null_value, &dec_buf);
- null_value= tmp_null_value;
- if (null_value)
- return 0;
- if (str->length() < DECIMAL_MAX_STR_LENGTH)
- str->length(DECIMAL_MAX_STR_LENGTH);
- my_decimal_round(E_DEC_FATAL_ERROR, dec, decimals, FALSE, &dec_buf);
- my_decimal2string(E_DEC_FATAL_ERROR, &dec_buf, 0, 0, '0', str);
- return str;
-}
-
-
/* Default max_length is max argument length */
bool Item_func_udf_str::fix_length_and_dec()
@@ -3714,7 +3577,7 @@ longlong Item_master_pos_wait::val_int()
connection_name.length= con->length();
if (check_master_connection_name(&connection_name))
{
- my_error(ER_WRONG_ARGUMENTS, MYF(ME_JUST_WARNING),
+ my_error(ER_WRONG_ARGUMENTS, MYF(ME_WARNING),
"MASTER_CONNECTION_NAME");
goto err;
}
@@ -4425,7 +4288,7 @@ user_var_entry *get_variable(HASH *hash, LEX_CSTRING *name,
if (!my_hash_inited(hash))
return 0;
if (!(entry = (user_var_entry*) my_malloc(size,
- MYF(MY_WME | ME_FATALERROR |
+ MYF(MY_WME | ME_FATAL |
MY_THREAD_SPECIFIC))))
return 0;
entry->name.str=(char*) entry+ ALIGN_SIZE(sizeof(user_var_entry))+
@@ -4680,7 +4543,7 @@ update_hash(user_var_entry *entry, bool set_null, void *ptr, size_t length,
entry->value=0;
entry->value= (char*) my_realloc(entry->value, length,
MYF(MY_ALLOW_ZERO_PTR | MY_WME |
- ME_FATALERROR |
+ ME_FATAL |
MY_THREAD_SPECIFIC));
if (!entry->value)
return 1;
@@ -4745,11 +4608,7 @@ double user_var_entry::val_real(bool *null_value)
case INT_RESULT:
return (double) *(longlong*) value;
case DECIMAL_RESULT:
- {
- double result;
- my_decimal2double(E_DEC_FATAL_ERROR, (my_decimal *)value, &result);
- return result;
- }
+ return ((my_decimal *)value)->to_double();
case STRING_RESULT:
return my_atof(value); // This is null terminated
case ROW_RESULT:
@@ -4774,11 +4633,7 @@ longlong user_var_entry::val_int(bool *null_value) const
case INT_RESULT:
return *(longlong*) value;
case DECIMAL_RESULT:
- {
- longlong result;
- my_decimal2int(E_DEC_FATAL_ERROR, (my_decimal *)value, 0, &result);
- return result;
- }
+ return ((my_decimal *)value)->to_longlong(false);
case STRING_RESULT:
{
int error;
@@ -5515,10 +5370,9 @@ bool Item_func_get_user_var::set_value(THD *thd,
bool Item_user_var_as_out_param::fix_fields(THD *thd, Item **ref)
{
- DBUG_ASSERT(fixed == 0);
+ DBUG_ASSERT(!is_fixed());
DBUG_ASSERT(thd->lex->exchange);
- if (Item::fix_fields(thd, ref) ||
- !(entry= get_variable(&thd->user_vars, &org_name, 1)))
+ if (!(entry= get_variable(&thd->user_vars, &org_name, 1)))
return TRUE;
entry->type= STRING_RESULT;
/*
@@ -5576,7 +5430,8 @@ my_decimal* Item_user_var_as_out_param::val_decimal(my_decimal *decimal_buffer)
}
-bool Item_user_var_as_out_param::get_date(MYSQL_TIME *ltime, ulonglong fuzzy)
+bool Item_user_var_as_out_param::get_date(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate)
{
DBUG_ASSERT(0);
return true;
@@ -5615,7 +5470,7 @@ void Item_func_get_system_var::update_null_value()
THD *thd= current_thd;
int save_no_errors= thd->no_errors;
thd->no_errors= TRUE;
- Item::update_null_value();
+ type_handler()->Item_update_null_value(this);
thd->no_errors= save_no_errors;
}
@@ -6455,7 +6310,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
(thd->lex->sql_command == SQLCOM_CREATE_VIEW))
{
Security_context *save_security_ctx= thd->security_ctx;
- if (context->security_ctx)
+ if (context && context->security_ctx)
thd->security_ctx= context->security_ctx;
/*
@@ -6470,7 +6325,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
if (res)
{
- context->process_error(thd);
+ process_error(thd);
DBUG_RETURN(res);
}
}
@@ -6487,7 +6342,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
if (!(m_sp= sp))
{
my_missing_function_error(m_name->m_name, ErrConvDQName(m_name).ptr());
- context->process_error(thd);
+ process_error(thd);
DBUG_RETURN(TRUE);
}
@@ -6643,6 +6498,14 @@ String *Item_func_last_value::val_str(String *str)
return tmp;
}
+
+bool Item_func_last_value::val_native(THD *thd, Native *to)
+{
+ evaluate_sideeffects();
+ return val_native_from_item(thd, last_value, to);
+}
+
+
longlong Item_func_last_value::val_int()
{
longlong tmp;
@@ -6671,10 +6534,10 @@ my_decimal *Item_func_last_value::val_decimal(my_decimal *decimal_value)
}
-bool Item_func_last_value::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_func_last_value::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
evaluate_sideeffects();
- bool tmp= last_value->get_date(ltime, fuzzydate);
+ bool tmp= last_value->get_date(thd, ltime, fuzzydate);
null_value= last_value->null_value;
return tmp;
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 80d90ae21c5..27cb245db6b 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -35,12 +35,11 @@ extern "C" /* Bug in BSDI include file */
#include <cmath>
-class Item_func :public Item_func_or_sum
+class Item_func :public Item_func_or_sum,
+ protected With_sum_func_cache
{
void sync_with_sum_func_and_with_field(List<Item> &list);
protected:
- String *val_str_from_val_str_ascii(String *str, String *str2);
-
virtual bool check_arguments() const
{
return check_argument_types_scalar(0, arg_count);
@@ -56,6 +55,7 @@ protected:
bool check_argument_types_can_return_text(uint start, uint end) const;
bool check_argument_types_can_return_date(uint start, uint end) const;
bool check_argument_types_can_return_time(uint start, uint end) const;
+ void print_cast_temporal(String *str, enum_query_type query_type);
public:
table_map not_null_tables_cache;
@@ -80,49 +80,56 @@ public:
CASE_SEARCHED_FUNC, // Used by ColumnStore/Spider
CASE_SIMPLE_FUNC // Used by ColumnStore/spider
};
+ static scalar_comparison_op functype_to_scalar_comparison_op(Functype type)
+ {
+ switch (type) {
+ case EQ_FUNC: return SCALAR_CMP_EQ;
+ case EQUAL_FUNC: return SCALAR_CMP_EQUAL;
+ case LT_FUNC: return SCALAR_CMP_LT;
+ case LE_FUNC: return SCALAR_CMP_LE;
+ case GE_FUNC: return SCALAR_CMP_GE;
+ case GT_FUNC: return SCALAR_CMP_GT;
+ default: break;
+ }
+ DBUG_ASSERT(0);
+ return SCALAR_CMP_EQ;
+ }
enum Type type() const { return FUNC_ITEM; }
virtual enum Functype functype() const { return UNKNOWN_FUNC; }
Item_func(THD *thd): Item_func_or_sum(thd)
{
- with_sum_func= 0;
with_field= 0;
with_param= 0;
}
- Item_func(THD *thd, Item *a): Item_func_or_sum(thd, a)
+ Item_func(THD *thd, Item *a)
+ :Item_func_or_sum(thd, a), With_sum_func_cache(a)
{
- with_sum_func= a->with_sum_func;
with_param= a->with_param;
with_field= a->with_field;
}
- Item_func(THD *thd, Item *a, Item *b):
- Item_func_or_sum(thd, a, b)
+ Item_func(THD *thd, Item *a, Item *b)
+ :Item_func_or_sum(thd, a, b), With_sum_func_cache(a, b)
{
- with_sum_func= a->with_sum_func || b->with_sum_func;
with_param= a->with_param || b->with_param;
with_field= a->with_field || b->with_field;
}
- Item_func(THD *thd, Item *a, Item *b, Item *c):
- Item_func_or_sum(thd, a, b, c)
+ Item_func(THD *thd, Item *a, Item *b, Item *c)
+ :Item_func_or_sum(thd, a, b, c), With_sum_func_cache(a, b, c)
{
- with_sum_func= a->with_sum_func || b->with_sum_func || c->with_sum_func;
with_field= a->with_field || b->with_field || c->with_field;
with_param= a->with_param || b->with_param || c->with_param;
}
- Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d):
- Item_func_or_sum(thd, a, b, c, d)
+ Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d)
+ :Item_func_or_sum(thd, a, b, c, d), With_sum_func_cache(a, b, c, d)
{
- with_sum_func= a->with_sum_func || b->with_sum_func ||
- c->with_sum_func || d->with_sum_func;
with_field= a->with_field || b->with_field ||
c->with_field || d->with_field;
with_param= a->with_param || b->with_param ||
c->with_param || d->with_param;
}
- Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e):
- Item_func_or_sum(thd, a, b, c, d, e)
+ Item_func(THD *thd, Item *a, Item *b, Item *c, Item *d, Item* e)
+ :Item_func_or_sum(thd, a, b, c, d, e), With_sum_func_cache(a, b, c, d, e)
{
- with_sum_func= a->with_sum_func || b->with_sum_func ||
- c->with_sum_func || d->with_sum_func || e->with_sum_func;
with_field= a->with_field || b->with_field ||
c->with_field || d->with_field || e->with_field;
with_param= a->with_param || b->with_param ||
@@ -134,11 +141,10 @@ public:
set_arguments(thd, list);
}
// Constructor used for Item_cond_and/or (see Item comment)
- Item_func(THD *thd, Item_func *item):
- Item_func_or_sum(thd, item),
+ Item_func(THD *thd, Item_func *item)
+ :Item_func_or_sum(thd, item), With_sum_func_cache(item),
not_null_tables_cache(item->not_null_tables_cache)
- {
- }
+ { }
bool fix_fields(THD *, Item **ref);
void cleanup()
{
@@ -174,16 +180,12 @@ public:
virtual void print(String *str, enum_query_type query_type);
void print_op(String *str, enum_query_type query_type);
void print_args(String *str, uint from, enum_query_type query_type);
- inline bool get_arg0_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
- {
- DBUG_ASSERT(!(fuzzy_date & TIME_TIME_ONLY));
- Datetime dt(current_thd, args[0], fuzzy_date);
- return (null_value= dt.copy_to_mysql_time(ltime));
- }
bool is_null() {
update_null_value();
return null_value;
}
+ String *val_str_from_val_str_ascii(String *str, String *str2);
+
void signal_divide_by_null();
friend class udf_handler;
Field *create_field_for_create_select(TABLE *table)
@@ -325,6 +327,11 @@ public:
return this;
}
+ bool has_rand_bit()
+ {
+ return used_tables() & RAND_TABLE_BIT;
+ }
+
bool excl_dep_on_table(table_map tab_map)
{
if (used_tables() & OUTER_REF_TABLE_BIT)
@@ -335,9 +342,16 @@ public:
bool excl_dep_on_grouping_fields(st_select_lex *sel)
{
+ if (has_rand_bit() || with_subquery())
+ return false;
return Item_args::excl_dep_on_grouping_fields(sel);
}
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+ {
+ return Item_args::excl_dep_on_in_subq_left_part(subq_pred);
+ }
+
/*
We assume the result of any function that has a TIMESTAMP argument to be
timezone-dependent, since a TIMESTAMP value in both numeric and string
@@ -380,6 +394,12 @@ public:
- or replaced to an Item_int_with_ref
*/
bool setup_args_and_comparator(THD *thd, Arg_comparator *cmp);
+
+ bool with_sum_func() const { return m_with_sum_func; }
+ With_sum_func_cache* get_with_sum_func_cache() { return this; }
+ Item_func *get_item_func() { return this; }
+ bool is_simplified_cond_processor(void *arg)
+ { return const_item() && !val_int(); }
};
@@ -400,8 +420,8 @@ public:
DBUG_ASSERT(fixed == 1);
return Converter_double_to_longlong(val_real(), unsigned_flag).result();
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_real(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_real(thd, ltime, fuzzydate); }
const Type_handler *type_handler() const { return &type_handler_double; }
bool fix_length_and_dec()
{
@@ -441,6 +461,193 @@ public:
};
+class Item_handled_func: public Item_func
+{
+public:
+ class Handler
+ {
+ public:
+ virtual ~Handler() { }
+ virtual String *val_str(Item_handled_func *, String *) const= 0;
+ virtual String *val_str_ascii(Item_handled_func *, String *) const= 0;
+ virtual double val_real(Item_handled_func *) const= 0;
+ virtual longlong val_int(Item_handled_func *) const= 0;
+ virtual my_decimal *val_decimal(Item_handled_func *, my_decimal *) const= 0;
+ virtual bool get_date(THD *thd, Item_handled_func *, MYSQL_TIME *, date_mode_t fuzzydate) const= 0;
+ virtual const Type_handler *return_type_handler() const= 0;
+ virtual bool fix_length_and_dec(Item_handled_func *) const= 0;
+ };
+
+ /**
+ Abstract class for functions returning TIME, DATE, DATETIME or string values,
+ whose data type depends on parameters and is set at fix_fields time.
+ */
+ class Handler_temporal: public Handler
+ {
+ public:
+ String *val_str(Item_handled_func *item, String *to) const
+ {
+ StringBuffer<MAX_FIELD_WIDTH> ascii_buf;
+ return item->val_str_from_val_str_ascii(to, &ascii_buf);
+ }
+ };
+
+ /**
+ Abstract class for functions returning strings,
+ which are generated from get_date() results,
+ when get_date() can return different MYSQL_TIMESTAMP_XXX per row.
+ */
+ class Handler_temporal_string: public Handler_temporal
+ {
+ public:
+ const Type_handler *return_type_handler() const
+ {
+ return &type_handler_string;
+ }
+ double val_real(Item_handled_func *item) const
+ {
+ return Temporal_hybrid(item).to_double();
+ }
+ longlong val_int(Item_handled_func *item) const
+ {
+ return Temporal_hybrid(item).to_longlong();
+ }
+ my_decimal *val_decimal(Item_handled_func *item, my_decimal *to) const
+ {
+ return Temporal_hybrid(item).to_decimal(to);
+ }
+ String *val_str_ascii(Item_handled_func *item, String *to) const
+ {
+ return Temporal_hybrid(item).to_string(to, item->decimals);
+ }
+ };
+
+
+ class Handler_date: public Handler_temporal
+ {
+ public:
+ const Type_handler *return_type_handler() const
+ {
+ return &type_handler_newdate;
+ }
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ item->fix_attributes_date();
+ return false;
+ }
+ double val_real(Item_handled_func *item) const
+ {
+ return Date(item).to_double();
+ }
+ longlong val_int(Item_handled_func *item) const
+ {
+ return Date(item).to_longlong();
+ }
+ my_decimal *val_decimal(Item_handled_func *item, my_decimal *to) const
+ {
+ return Date(item).to_decimal(to);
+ }
+ String *val_str_ascii(Item_handled_func *item, String *to) const
+ {
+ return Date(item).to_string(to);
+ }
+ };
+
+
+ class Handler_time: public Handler_temporal
+ {
+ public:
+ const Type_handler *return_type_handler() const
+ {
+ return &type_handler_time2;
+ }
+ double val_real(Item_handled_func *item) const
+ {
+ return Time(item).to_double();
+ }
+ longlong val_int(Item_handled_func *item) const
+ {
+ return Time(item).to_longlong();
+ }
+ my_decimal *val_decimal(Item_handled_func *item, my_decimal *to) const
+ {
+ return Time(item).to_decimal(to);
+ }
+ String *val_str_ascii(Item_handled_func *item, String *to) const
+ {
+ return Time(item).to_string(to, item->decimals);
+ }
+ };
+
+
+ class Handler_datetime: public Handler_temporal
+ {
+ public:
+ const Type_handler *return_type_handler() const
+ {
+ return &type_handler_datetime2;
+ }
+ double val_real(Item_handled_func *item) const
+ {
+ return Datetime(item).to_double();
+ }
+ longlong val_int(Item_handled_func *item) const
+ {
+ return Datetime(item).to_longlong();
+ }
+ my_decimal *val_decimal(Item_handled_func *item, my_decimal *to) const
+ {
+ return Datetime(item).to_decimal(to);
+ }
+ String *val_str_ascii(Item_handled_func *item, String *to) const
+ {
+ return Datetime(item).to_string(to, item->decimals);
+ }
+ };
+
+
+protected:
+ const Handler *m_func_handler;
+public:
+ Item_handled_func(THD *thd, Item *a)
+ :Item_func(thd, a), m_func_handler(NULL) { }
+ Item_handled_func(THD *thd, Item *a, Item *b)
+ :Item_func(thd, a, b), m_func_handler(NULL) { }
+ void set_func_handler(const Handler *handler)
+ {
+ m_func_handler= handler;
+ }
+ const Type_handler *type_handler() const
+ {
+ return m_func_handler->return_type_handler();
+ }
+ String *val_str(String *to)
+ {
+ return m_func_handler->val_str(this, to);
+ }
+ String *val_str_ascii(String *to)
+ {
+ return m_func_handler->val_str_ascii(this, to);
+ }
+ double val_real()
+ {
+ return m_func_handler->val_real(this);
+ }
+ longlong val_int()
+ {
+ return m_func_handler->val_int(this);
+ }
+ my_decimal *val_decimal(my_decimal *to)
+ {
+ return m_func_handler->val_decimal(this, to);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate)
+ {
+ return m_func_handler->get_date(thd, this, to, fuzzydate);
+ }
+};
+
+
/**
Functions that at fix_fields() time determine the returned field type,
trying to preserve the exact data type of the arguments.
@@ -463,15 +670,15 @@ class Item_func_hybrid_field_type: public Item_hybrid_func
Helper methods to make sure that the result of
decimal_op(), str_op() and date_op() is properly synched with null_value.
*/
- bool date_op_with_null_check(MYSQL_TIME *ltime)
+ bool date_op_with_null_check(THD *thd, MYSQL_TIME *ltime)
{
- bool rc= date_op(ltime, 0);
+ bool rc= date_op(thd, ltime, date_mode_t(0));
DBUG_ASSERT(!rc ^ null_value);
return rc;
}
- bool time_op_with_null_check(MYSQL_TIME *ltime)
+ bool time_op_with_null_check(THD *thd, MYSQL_TIME *ltime)
{
- bool rc= time_op(ltime);
+ bool rc= time_op(thd, ltime);
DBUG_ASSERT(!rc ^ null_value);
DBUG_ASSERT(rc || ltime->time_type == MYSQL_TIMESTAMP_TIME);
return rc;
@@ -482,17 +689,6 @@ class Item_func_hybrid_field_type: public Item_hybrid_func
DBUG_ASSERT((res != NULL) ^ null_value);
return res;
}
- my_decimal *decimal_op_with_null_check(my_decimal *decimal_buffer)
- {
- my_decimal *res= decimal_op(decimal_buffer);
- DBUG_ASSERT((res != NULL) ^ null_value);
- return res;
- }
- bool make_zero_mysql_time(MYSQL_TIME *ltime, ulonglong fuzzydate)
- {
- bzero(ltime, sizeof(*ltime));
- return null_value|= !(fuzzydate & TIME_FUZZY_DATES);
- }
public:
// Value methods that involve no conversion
@@ -500,10 +696,6 @@ public:
{
return str_op_with_null_check(&str_value);
}
- my_decimal *val_decimal_from_decimal_op(my_decimal *dec)
- {
- return decimal_op_with_null_check(dec);
- }
longlong val_int_from_int_op()
{
return int_op();
@@ -514,7 +706,6 @@ public:
}
// Value methods that involve conversion
- String *val_str_from_decimal_op(String *str);
String *val_str_from_real_op(String *str);
String *val_str_from_int_op(String *str);
String *val_str_from_date_op(String *str);
@@ -528,21 +719,14 @@ public:
longlong val_int_from_str_op();
longlong val_int_from_real_op();
- longlong val_int_from_decimal_op();
longlong val_int_from_date_op();
longlong val_int_from_time_op();
double val_real_from_str_op();
- double val_real_from_decimal_op();
double val_real_from_date_op();
double val_real_from_time_op();
double val_real_from_int_op();
- bool get_date_from_str_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_real_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_decimal_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
- bool get_date_from_int_op(MYSQL_TIME *ltime, ulonglong fuzzydate);
-
public:
Item_func_hybrid_field_type(THD *thd):
Item_hybrid_func(thd)
@@ -586,11 +770,17 @@ public:
DBUG_ASSERT(null_value == (res == NULL));
return res;
}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date)
+ bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode)
{
DBUG_ASSERT(fixed);
return Item_func_hybrid_field_type::type_handler()->
- Item_func_hybrid_field_type_get_date(this, res, fuzzy_date);
+ Item_func_hybrid_field_type_get_date_with_warn(thd, this, to, mode);
+ }
+
+ bool val_native(THD *thd, Native *to)
+ {
+ DBUG_ASSERT(fixed);
+ return native_op(thd, to);
}
/**
@@ -600,6 +790,20 @@ public:
@return The result of the operation.
*/
virtual longlong int_op()= 0;
+ Longlong_null to_longlong_null_op()
+ {
+ longlong nr= int_op();
+ /*
+ C++ does not guarantee the order of parameter evaluation,
+ so to make sure "null_value" is passed to the constructor
+ after the int_op() call, int_op() is caled on a separate line.
+ */
+ return Longlong_null(nr, null_value);
+ }
+ Longlong_hybrid_null to_longlong_hybrid_null_op()
+ {
+ return Longlong_hybrid_null(to_longlong_null_op(), unsigned_flag);
+ }
/**
@brief Performs the operation that this functions implements when the
@@ -608,6 +812,12 @@ public:
@return The result of the operation.
*/
virtual double real_op()= 0;
+ Double_null to_double_null_op()
+ {
+ // val_real() must be caleed on a separate line. See to_longlong_null()
+ double nr= real_op();
+ return Double_null(nr, null_value);
+ }
/**
@brief Performs the operation that this functions implements when the
@@ -634,15 +844,16 @@ public:
field type is DATETIME or DATE.
@return The result of the operation.
*/
- virtual bool date_op(MYSQL_TIME *res, ulonglong fuzzy_date)= 0;
+ virtual bool date_op(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate)= 0;
/**
@brief Performs the operation that this functions implements when
field type is TIME.
@return The result of the operation.
*/
- virtual bool time_op(MYSQL_TIME *res)= 0;
+ virtual bool time_op(THD *thd, MYSQL_TIME *res)= 0;
+ virtual bool native_op(THD *thd, Native *native)= 0;
};
@@ -700,12 +911,17 @@ public:
Item_func_hybrid_field_type(thd, list)
{ }
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(0);
return true;
}
- bool time_op(MYSQL_TIME *ltime)
+ bool time_op(THD *thd, MYSQL_TIME *ltime)
+ {
+ DBUG_ASSERT(0);
+ return true;
+ }
+ bool native_op(THD *thd, Native *to)
{
DBUG_ASSERT(0);
return true;
@@ -791,8 +1007,8 @@ public:
{ collation.set_numeric(); }
double val_real();
String *val_str(String*str);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_int(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_int(thd, ltime, fuzzydate); }
const Type_handler *type_handler() const= 0;
bool fix_length_and_dec() { return FALSE; }
};
@@ -812,6 +1028,19 @@ public:
};
+class Item_func_hash: public Item_int_func
+{
+public:
+ Item_func_hash(THD *thd, List<Item> &item): Item_int_func(thd, item)
+ {}
+ longlong val_int();
+ bool fix_length_and_dec();
+ const Type_handler *type_handler() const { return &type_handler_long; }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_hash>(thd, this); }
+ const char *func_name() const { return "<hash>"; }
+};
+
class Item_longlong_func: public Item_int_func
{
public:
@@ -982,12 +1211,15 @@ public:
fix_char_length(my_decimal_precision_to_length_no_truncation(len, dec,
unsigned_flag));
}
- String *val_str(String *str);
- double val_real();
- longlong val_int();
+ String *val_str(String *str) { return VDec(this).to_string(str); }
+ double val_real() { return VDec(this).to_double(); }
+ longlong val_int() { return VDec(this).to_longlong(unsigned_flag); }
my_decimal *val_decimal(my_decimal*);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_decimal(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode)
+ {
+ return decimal_to_datetime_with_warn(thd, VDec(this).ptr(), to, mode,
+ NULL, NULL);
+ }
const Type_handler *type_handler() const { return &type_handler_newdecimal; }
void fix_length_and_dec_generic() {}
bool fix_length_and_dec()
@@ -1541,8 +1773,8 @@ public:
double val_real_native();
longlong val_int_native();
my_decimal *val_decimal_native(my_decimal *);
- bool get_date_native(MYSQL_TIME *res, ulonglong fuzzydate);
- bool get_time_native(MYSQL_TIME *res);
+ bool get_date_native(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
+ bool get_time_native(THD *thd, MYSQL_TIME *res);
double val_real()
{
@@ -1568,12 +1800,13 @@ public:
return Item_func_min_max::type_handler()->
Item_func_min_max_val_decimal(this, dec);
}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date)
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed);
return Item_func_min_max::type_handler()->
- Item_func_min_max_get_date(this, res, fuzzy_date);
+ Item_func_min_max_get_date(thd, this, res, fuzzydate);
}
+ bool val_native(THD *thd, Native *to);
void aggregate_attributes_real(Item **items, uint nitems)
{
/*
@@ -1637,10 +1870,12 @@ public:
double val_real() { return val_real_from_item(args[0]); }
longlong val_int() { return val_int_from_item(args[0]); }
String *val_str(String *str) { return val_str_from_item(args[0], str); }
+ bool val_native(THD *thd, Native *to)
+ { return val_native_from_item(thd, args[0], to); }
my_decimal *val_decimal(my_decimal *dec)
{ return val_decimal_from_item(args[0], dec); }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_item(args[0], ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_item(thd, args[0], ltime, fuzzydate); }
const char *func_name() const { return "rollup_const"; }
bool const_item() const { return 0; }
const Type_handler *type_handler() const { return args[0]->type_handler(); }
@@ -2007,6 +2242,18 @@ protected:
udf_handler udf;
bool is_expensive_processor(void *arg) { return TRUE; }
+ class VDec_udf: public Dec_ptr_and_buffer
+ {
+ public:
+ VDec_udf(Item_udf_func *func, udf_handler *udf)
+ {
+ my_bool tmp_null_value;
+ m_ptr= udf->val_decimal(&tmp_null_value, &m_buffer);
+ DBUG_ASSERT(is_null() == (tmp_null_value != 0));
+ func->null_value= is_null();
+ }
+ };
+
public:
Item_udf_func(THD *thd, udf_func *udf_arg):
Item_func(thd), udf(udf_arg) {}
@@ -2084,10 +2331,12 @@ public:
{
return mark_unsupported_function(func_name(), "()", arg, VCOL_NON_DETERMINISTIC);
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
+ bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ { return false; }
};
@@ -2147,10 +2396,19 @@ public:
Item_udf_func(thd, udf_arg) {}
Item_func_udf_decimal(THD *thd, udf_func *udf_arg, List<Item> &list):
Item_udf_func(thd, udf_arg, list) {}
- longlong val_int();
- double val_real();
+ longlong val_int()
+ {
+ return VDec_udf(this, &udf).to_longlong(unsigned_flag);
+ }
+ double val_real()
+ {
+ return VDec_udf(this, &udf).to_double();
+ }
my_decimal *val_decimal(my_decimal *);
- String *val_str(String *str);
+ String *val_str(String *str)
+ {
+ return VDec_udf(this, &udf).to_string_round(str, decimals);
+ }
const Type_handler *type_handler() const { return &type_handler_newdecimal; }
bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; }
Item *get_copy(THD *thd)
@@ -2381,13 +2639,15 @@ public:
Item_func_user_var(THD *thd, Item_func_user_var *item)
:Item_hybrid_func(thd, item),
m_var_entry(item->m_var_entry), name(item->name) { }
- Field *create_tmp_field(bool group, TABLE *table)
- { return create_table_field_from_handler(table); }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param);
Field *create_field_for_create_select(TABLE *table)
{ return create_table_field_from_handler(table); }
bool check_vcol_func_processor(void *arg);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return type_handler()->Item_get_date(this, ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ {
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
+ }
};
@@ -2514,14 +2774,14 @@ public:
in List<Item> and desire to place this code somewhere near other functions
working with user variables.
*/
-class Item_user_var_as_out_param :public Item,
+class Item_user_var_as_out_param :public Item_fixed_hybrid,
public Load_data_outvar
{
LEX_CSTRING org_name;
user_var_entry *entry;
public:
Item_user_var_as_out_param(THD *thd, const LEX_CSTRING *a)
- :Item(thd)
+ :Item_fixed_hybrid(thd)
{
DBUG_ASSERT(a->length < UINT_MAX32);
org_name= *a;
@@ -2556,12 +2816,18 @@ public:
{
return 0;
}
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
/* We should return something different from FIELD_ITEM here */
- enum Type type() const { return STRING_ITEM;}
+ enum Type type() const { return CONST_ITEM;}
double val_real();
longlong val_int();
String *val_str(String *str);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
my_decimal *val_decimal(my_decimal *decimal_buffer);
/* fix_fields() binds variable name with its entry structure */
bool fix_fields(THD *thd, Item **ref);
@@ -2608,9 +2874,9 @@ public:
String* val_str(String*);
my_decimal *val_decimal(my_decimal *dec_buf)
{ return val_decimal_from_real(dec_buf); }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
/* TODO: fix to support views */
const char *func_name() const { return "get_system_var"; }
@@ -2869,6 +3135,8 @@ public:
const Type_handler *type_handler() const;
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param);
Field *create_field_for_create_select(TABLE *table)
{
return result_type() != STRING_RESULT ?
@@ -2898,7 +3166,7 @@ public:
return sp_result_field->val_decimal(dec_buf);
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (execute())
return true;
@@ -2924,6 +3192,13 @@ public:
return str;
}
+ bool val_native(THD *thd, Native *to)
+ {
+ if (execute())
+ return true;
+ return null_value= sp_result_field->val_native(to);
+ }
+
void update_null_value()
{
execute();
@@ -2958,6 +3233,8 @@ public:
not_null_tables_cache= 0;
return 0;
}
+ bool excl_dep_on_grouping_fields(st_select_lex *sel)
+ { return false; }
};
@@ -3052,7 +3329,8 @@ public:
longlong val_int();
String *val_str(String *);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
+ bool val_native(THD *thd, Native *);
bool fix_length_and_dec();
const char *func_name() const { return "last_value"; }
const Type_handler *type_handler() const { return last_value->type_handler(); }
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 4c2a2fa8b11..1f1b5a6ceed 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -916,7 +916,7 @@ String *Item_func_point::val_str(String *str)
if ((null_value= (args[0]->null_value ||
args[1]->null_value ||
- str->realloc(4/*SRID*/ + 1 + 4 + SIZEOF_STORED_DOUBLE * 2))))
+ str->alloc(4/*SRID*/ + 1 + 4 + SIZEOF_STORED_DOUBLE * 2))))
return 0;
str->set_charset(&my_charset_bin);
diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h
index 0e727829ce7..e6c198fb8b2 100644
--- a/sql/item_geofunc.h
+++ b/sql/item_geofunc.h
@@ -512,7 +512,7 @@ public:
return TRUE;
for (unsigned int i= 0; i < arg_count; ++i)
{
- if (args[i]->fixed && args[i]->field_type() != MYSQL_TYPE_GEOMETRY)
+ if (args[i]->is_fixed() && args[i]->field_type() != MYSQL_TYPE_GEOMETRY)
{
String str;
args[i]->print(&str, QT_NO_DATA_EXPANSION);
diff --git a/sql/item_inetfunc.cc b/sql/item_inetfunc.cc
index 8a3345ecc81..379897ac8c1 100644
--- a/sql/item_inetfunc.cc
+++ b/sql/item_inetfunc.cc
@@ -21,9 +21,18 @@
///////////////////////////////////////////////////////////////////////////
-static const int IN_ADDR_SIZE= sizeof (in_addr);
-static const int IN6_ADDR_SIZE= sizeof (in6_addr);
-static const int IN6_ADDR_NUM_WORDS= IN6_ADDR_SIZE / 2;
+static const size_t IN_ADDR_SIZE= 4;
+static const size_t IN_ADDR_MAX_CHAR_LENGTH= 15;
+
+static const size_t IN6_ADDR_SIZE= 16;
+static const size_t IN6_ADDR_NUM_WORDS= IN6_ADDR_SIZE / 2;
+
+/**
+ Non-abbreviated syntax is 8 groups, up to 4 digits each,
+ plus 7 delimiters between the groups.
+ Abbreviated syntax is even shorter.
+*/
+static const uint IN6_ADDR_MAX_CHAR_LENGTH= 8 * 4 + 7;
static const char HEX_DIGITS[]= "0123456789abcdef";
@@ -89,7 +98,6 @@ err:
return 0;
}
-///////////////////////////////////////////////////////////////////////////
String* Item_func_inet_ntoa::val_str(String* str)
{
@@ -139,109 +147,264 @@ String* Item_func_inet_ntoa::val_str(String* str)
///////////////////////////////////////////////////////////////////////////
-/**
- Check the function argument, handle errors properly.
-
- @return The function value.
-*/
-longlong Item_func_inet_bool_base::val_int()
+class Inet4
{
- DBUG_ASSERT(fixed);
-
- // String argument expected
- if (unlikely(args[0]->result_type() != STRING_RESULT))
- return 0;
-
- String buffer;
- String *arg_str= args[0]->val_str(&buffer);
-
- if (unlikely(!arg_str)) // Out-of memory happened. error has been reported.
- return 0; // Or: the underlying field is NULL
+ char m_buffer[IN_ADDR_SIZE];
+protected:
+ bool ascii_to_ipv4(const char *str, size_t length);
+ bool character_string_to_ipv4(const char *str, size_t str_length,
+ CHARSET_INFO *cs)
+ {
+ if (cs->state & MY_CS_NONASCII)
+ {
+ char tmp[IN_ADDR_MAX_CHAR_LENGTH];
+ String_copier copier;
+ uint length= copier.well_formed_copy(&my_charset_latin1, tmp, sizeof(tmp),
+ cs, str, str_length);
+ return ascii_to_ipv4(tmp, length);
+ }
+ return ascii_to_ipv4(str, str_length);
+ }
+ bool binary_to_ipv4(const char *str, size_t length)
+ {
+ if (length != sizeof(m_buffer))
+ return true;
+ memcpy(m_buffer, str, length);
+ return false;
+ }
+ // Non-initializing constructor
+ Inet4() { }
+public:
+ void to_binary(char *dst, size_t dstsize) const
+ {
+ DBUG_ASSERT(dstsize >= sizeof(m_buffer));
+ memcpy(dst, m_buffer, sizeof(m_buffer));
+ }
+ bool to_binary(String *to) const
+ {
+ return to->copy(m_buffer, sizeof(m_buffer), &my_charset_bin);
+ }
+ size_t to_string(char *dst, size_t dstsize) const;
+ bool to_string(String *to) const
+ {
+ to->set_charset(&my_charset_latin1);
+ if (to->alloc(INET_ADDRSTRLEN))
+ return true;
+ to->length((uint32) to_string((char*) to->ptr(), INET_ADDRSTRLEN));
+ return false;
+ }
+};
- return calc_value(arg_str) ? 1 : 0;
-}
-///////////////////////////////////////////////////////////////////////////
+class Inet4_null: public Inet4, public Null_flag
+{
+public:
+ // Initialize from a text representation
+ Inet4_null(const char *str, size_t length, CHARSET_INFO *cs)
+ :Null_flag(character_string_to_ipv4(str, length, cs))
+ { }
+ Inet4_null(const String &str)
+ :Inet4_null(str.ptr(), str.length(), str.charset())
+ { }
+ // Initialize from a binary representation
+ Inet4_null(const char *str, size_t length)
+ :Null_flag(binary_to_ipv4(str, length))
+ { }
+ Inet4_null(const Binary_string &str)
+ :Inet4_null(str.ptr(), str.length())
+ { }
+public:
+ const Inet4& to_inet4() const
+ {
+ DBUG_ASSERT(!is_null());
+ return *this;
+ }
+ void to_binary(char *dst, size_t dstsize) const
+ {
+ to_inet4().to_binary(dst, dstsize);
+ }
+ bool to_binary(String *to) const
+ {
+ return to_inet4().to_binary(to);
+ }
+ size_t to_string(char *dst, size_t dstsize) const
+ {
+ return to_inet4().to_string(dst, dstsize);
+ }
+ bool to_string(String *to) const
+ {
+ return to_inet4().to_string(to);
+ }
+};
-/**
- Check the function argument, handle errors properly.
- @param [out] buffer Buffer for string operations.
+class Inet6
+{
+ char m_buffer[IN6_ADDR_SIZE];
+protected:
+ bool make_from_item(Item *item);
+ bool ascii_to_ipv6(const char *str, size_t str_length);
+ bool character_string_to_ipv6(const char *str, size_t str_length,
+ CHARSET_INFO *cs)
+ {
+ if (cs->state & MY_CS_NONASCII)
+ {
+ char tmp[IN6_ADDR_MAX_CHAR_LENGTH];
+ String_copier copier;
+ uint length= copier.well_formed_copy(&my_charset_latin1, tmp, sizeof(tmp),
+ cs, str, str_length);
+ return ascii_to_ipv6(tmp, length);
+ }
+ return ascii_to_ipv6(str, str_length);
+ }
+ bool binary_to_ipv6(const char *str, size_t length)
+ {
+ if (length != sizeof(m_buffer))
+ return true;
+ memcpy(m_buffer, str, length);
+ return false;
+ }
+ // Non-initializing constructor
+ Inet6() { }
+public:
+ bool to_binary(String *to) const
+ {
+ return to->copy(m_buffer, sizeof(m_buffer), &my_charset_bin);
+ }
+ size_t to_string(char *dst, size_t dstsize) const;
+ bool to_string(String *to) const
+ {
+ to->set_charset(&my_charset_latin1);
+ if (to->alloc(INET6_ADDRSTRLEN))
+ return true;
+ to->length((uint32) to_string((char*) to->ptr(), INET6_ADDRSTRLEN));
+ return false;
+ }
+ bool is_v4compat() const
+ {
+ static_assert(sizeof(in6_addr) == IN6_ADDR_SIZE, "unexpected in6_addr size");
+ return IN6_IS_ADDR_V4COMPAT((struct in6_addr *) m_buffer);
+ }
+ bool is_v4mapped() const
+ {
+ static_assert(sizeof(in6_addr) == IN6_ADDR_SIZE, "unexpected in6_addr size");
+ return IN6_IS_ADDR_V4MAPPED((struct in6_addr *) m_buffer);
+ }
+};
- @return The function value.
-*/
-String *Item_func_inet_str_base::val_str_ascii(String *buffer)
+class Inet6_null: public Inet6, public Null_flag
{
- DBUG_ASSERT(fixed);
-
- // String argument expected
- if (unlikely(args[0]->result_type() != STRING_RESULT))
+public:
+ // Initialize from a text representation
+ Inet6_null(const char *str, size_t length, CHARSET_INFO *cs)
+ :Null_flag(character_string_to_ipv6(str, length, cs))
+ { }
+ Inet6_null(const String &str)
+ :Inet6_null(str.ptr(), str.length(), str.charset())
+ { }
+ // Initialize from a binary representation
+ Inet6_null(const char *str, size_t length)
+ :Null_flag(binary_to_ipv6(str, length))
+ { }
+ Inet6_null(const Binary_string &str)
+ :Inet6_null(str.ptr(), str.length())
+ { }
+ // Initialize from an Item
+ Inet6_null(Item *item)
+ :Null_flag(make_from_item(item))
+ { }
+public:
+ const Inet6& to_inet6() const
{
- null_value= true;
- return NULL;
+ DBUG_ASSERT(!is_null());
+ return *this;
}
-
- StringBuffer<STRING_BUFFER_USUAL_SIZE> tmp;
- String *arg_str= args[0]->val_str(&tmp);
- if (unlikely(!arg_str))
+ bool to_binary(String *to) const
{
- // Out-of memory happened. error has been reported.
- // Or: the underlying field is NULL
- null_value= true;
- return NULL;
+ DBUG_ASSERT(!is_null());
+ return to_inet6().to_binary(to);
+ }
+ size_t to_string(char *dst, size_t dstsize) const
+ {
+ return to_inet6().to_string(dst, dstsize);
}
+ bool to_string(String *to) const
+ {
+ return to_inet6().to_string(to);
+ }
+ bool is_v4compat() const
+ {
+ return to_inet6().is_v4compat();
+ }
+ bool is_v4mapped() const
+ {
+ return to_inet6().is_v4mapped();
+ }
+};
- null_value= !calc_value(arg_str, buffer);
- return unlikely(null_value) ? NULL : buffer;
-}
+bool Inet6::make_from_item(Item *item)
+{
+ String tmp(m_buffer, sizeof(m_buffer), &my_charset_bin);
+ String *str= item->val_str(&tmp);
+ /*
+ Charset could be tested in item->collation.collation before the val_str()
+ call, but traditionally Inet6 functions still call item->val_str()
+ for non-binary arguments and therefore execute side effects.
+ */
+ if (!str || str->length() != sizeof(m_buffer) ||
+ str->charset() != &my_charset_bin)
+ return true;
+ if (str->ptr() != m_buffer)
+ memcpy(m_buffer, str->ptr(), sizeof(m_buffer));
+ return false;
+};
-///////////////////////////////////////////////////////////////////////////
/**
Tries to convert given string to binary IPv4-address representation.
This is a portable alternative to inet_pton(AF_INET).
@param str String to convert.
- @param str_len String length.
- @param[out] ipv4_address Buffer to store IPv4-address.
+ @param str_length String length.
@return Completion status.
- @retval false Given string does not represent an IPv4-address.
- @retval true The string has been converted sucessfully.
+ @retval true - error, the given string does not represent an IPv4-address.
+ @retval false - ok, the string has been converted sucessfully.
@note The problem with inet_pton() is that it treats leading zeros in
IPv4-part differently on different platforms.
*/
-static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_address)
+bool Inet4::ascii_to_ipv4(const char *str, size_t str_length)
{
if (str_length < 7)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): "
"invalid IPv4 address: too short.",
(int) str_length, str));
- return false;
+ return true;
}
- if (str_length > 15)
+ if (str_length > IN_ADDR_MAX_CHAR_LENGTH)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): "
"invalid IPv4 address: too long.",
(int) str_length, str));
- return false;
+ return true;
}
- unsigned char *ipv4_bytes= (unsigned char *) ipv4_address;
+ unsigned char *ipv4_bytes= (unsigned char *) &m_buffer;
+ const char *str_end= str + str_length;
const char *p= str;
int byte_value= 0;
int chars_in_group= 0;
int dot_count= 0;
char c= 0;
- while (((p - str) < (int)str_length) && *p)
+ while (p < str_end && *p)
{
c= *p++;
@@ -251,30 +414,30 @@ static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_addres
if (chars_in_group > 3)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"too many characters in a group.",
(int) str_length, str));
- return false;
+ return true;
}
byte_value= byte_value * 10 + (c - '0');
if (byte_value > 255)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"invalid byte value.",
(int) str_length, str));
- return false;
+ return true;
}
}
else if (c == '.')
{
if (chars_in_group == 0)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"too few characters in a group.",
(int) str_length, str));
- return false;
+ return true;
}
ipv4_bytes[dot_count]= (unsigned char) byte_value;
@@ -285,79 +448,77 @@ static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_addres
if (dot_count > 3)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"too many dots.", (int) str_length, str));
- return false;
+ return true;
}
}
else
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"invalid character at pos %d.",
(int) str_length, str, (int) (p - str)));
- return false;
+ return true;
}
}
if (c == '.')
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"ending at '.'.", (int) str_length, str));
- return false;
+ return true;
}
if (dot_count != 3)
{
- DBUG_PRINT("error", ("str_to_ipv4(%.*s): invalid IPv4 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv4(%.*s): invalid IPv4 address: "
"too few groups.",
(int) str_length, str));
- return false;
+ return true;
}
ipv4_bytes[3]= (unsigned char) byte_value;
- DBUG_PRINT("info", ("str_to_ipv4(%.*s): valid IPv4 address: %d.%d.%d.%d",
+ DBUG_PRINT("info", ("ascii_to_ipv4(%.*s): valid IPv4 address: %d.%d.%d.%d",
(int) str_length, str,
ipv4_bytes[0], ipv4_bytes[1],
ipv4_bytes[2], ipv4_bytes[3]));
- return true;
+ return false;
}
-///////////////////////////////////////////////////////////////////////////
/**
Tries to convert given string to binary IPv6-address representation.
This is a portable alternative to inet_pton(AF_INET6).
@param str String to convert.
- @param str_len String length.
- @param[out] ipv6_address Buffer to store IPv6-address.
+ @param str_length String length.
@return Completion status.
- @retval false Given string does not represent an IPv6-address.
- @retval true The string has been converted sucessfully.
+ @retval true - error, the given string does not represent an IPv6-address.
+ @retval false - ok, the string has been converted sucessfully.
@note The problem with inet_pton() is that it treats leading zeros in
IPv4-part differently on different platforms.
*/
-static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
+bool Inet6::ascii_to_ipv6(const char *str, size_t str_length)
{
if (str_length < 2)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: too short.",
- str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: too short.",
+ (int) str_length, str));
+ return true;
}
- if (str_length > 8 * 4 + 7)
+ if (str_length > IN6_ADDR_MAX_CHAR_LENGTH)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: too long.",
- str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: too long.",
+ (int) str_length, str));
+ return true;
}
- memset(ipv6_address, 0, IN6_ADDR_SIZE);
+ memset(m_buffer, 0, sizeof(m_buffer));
const char *p= str;
@@ -367,21 +528,21 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
if (*p != ':')
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "can not start with ':x'.", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "can not start with ':x'.", (int) str_length, str));
+ return true;
}
}
- char *ipv6_bytes= (char *) ipv6_address;
- char *ipv6_bytes_end= ipv6_bytes + IN6_ADDR_SIZE;
- char *dst= ipv6_bytes;
+ const char *str_end= str + str_length;
+ char *ipv6_bytes_end= m_buffer + sizeof(m_buffer);
+ char *dst= m_buffer;
char *gap_ptr= NULL;
const char *group_start_ptr= p;
int chars_in_group= 0;
int group_value= 0;
- while (((p - str) < str_length) && *p)
+ while (p < str_end && *p)
{
char c= *p++;
@@ -393,27 +554,27 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
{
if (gap_ptr)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "too many gaps(::).", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "too many gaps(::).", (int) str_length, str));
+ return true;
}
gap_ptr= dst;
continue;
}
- if (!*p || ((p - str) >= str_length))
+ if (!*p || p >= str_end)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "ending at ':'.", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "ending at ':'.", (int) str_length, str));
+ return true;
}
if (dst + 2 > ipv6_bytes_end)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "too many groups (1).", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "too many groups (1).", (int) str_length, str));
+ return true;
}
dst[0]= (unsigned char) (group_value >> 8) & 0xff;
@@ -427,20 +588,21 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
{
if (dst + IN_ADDR_SIZE > ipv6_bytes_end)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "unexpected IPv4-part.", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "unexpected IPv4-part.", (int) str_length, str));
+ return true;
}
- if (!str_to_ipv4(group_start_ptr,
- str + str_length - group_start_ptr,
- (in_addr *) dst))
+ Inet4_null tmp(group_start_ptr, (size_t) (str_end - group_start_ptr),
+ &my_charset_latin1);
+ if (tmp.is_null())
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "invalid IPv4-part.", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "invalid IPv4-part.", (int) str_length, str));
+ return true;
}
+ tmp.to_binary(dst, IN_ADDR_SIZE);
dst += IN_ADDR_SIZE;
chars_in_group= 0;
@@ -452,18 +614,18 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
if (!hdp)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
"invalid character at pos %d.",
- str_length, str, (int) (p - str)));
- return false;
+ (int) str_length, str, (int) (p - str)));
+ return true;
}
if (chars_in_group >= 4)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
"too many digits in group.",
- str_length, str));
- return false;
+ (int) str_length, str));
+ return true;
}
group_value <<= 4;
@@ -479,9 +641,9 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
{
if (dst + 2 > ipv6_bytes_end)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "too many groups (2).", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "too many groups (2).", (int) str_length, str));
+ return true;
}
dst[0]= (unsigned char) (group_value >> 8) & 0xff;
@@ -493,9 +655,9 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
{
if (dst == ipv6_bytes_end)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "no room for a gap (::).", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "no room for a gap (::).", (int) str_length, str));
+ return true;
}
int bytes_to_move= (int)(dst - gap_ptr);
@@ -511,50 +673,49 @@ static bool str_to_ipv6(const char *str, int str_length, in6_addr *ipv6_address)
if (dst < ipv6_bytes_end)
{
- DBUG_PRINT("error", ("str_to_ipv6(%.*s): invalid IPv6 address: "
- "too few groups.", str_length, str));
- return false;
+ DBUG_PRINT("error", ("ascii_to_ipv6(%.*s): invalid IPv6 address: "
+ "too few groups.", (int) str_length, str));
+ return true;
}
- return true;
+ return false;
}
-///////////////////////////////////////////////////////////////////////////
/**
Converts IPv4-binary-address to a string. This function is a portable
alternative to inet_ntop(AF_INET).
@param[in] ipv4 IPv4-address data (byte array)
- @param[out] str A buffer to store string representation of IPv4-address.
- It must be at least of INET_ADDRSTRLEN.
+ @param[out] dst A buffer to store string representation of IPv4-address.
+ @param[in] dstsize Number of bytes avaiable in "dst"
@note The problem with inet_ntop() is that it is available starting from
Windows Vista, but the minimum supported version is Windows 2000.
*/
-static void ipv4_to_str(const in_addr *ipv4, char *str)
+size_t Inet4::to_string(char *dst, size_t dstsize) const
{
- const unsigned char *ipv4_bytes= (const unsigned char *) ipv4;
-
- sprintf(str, "%d.%d.%d.%d",
- ipv4_bytes[0], ipv4_bytes[1], ipv4_bytes[2], ipv4_bytes[3]);
+ return (size_t) my_snprintf(dst, dstsize, "%d.%d.%d.%d",
+ (uchar) m_buffer[0], (uchar) m_buffer[1],
+ (uchar) m_buffer[2], (uchar) m_buffer[3]);
}
-///////////////////////////////////////////////////////////////////////////
+
/**
Converts IPv6-binary-address to a string. This function is a portable
alternative to inet_ntop(AF_INET6).
@param[in] ipv6 IPv6-address data (byte array)
- @param[out] str A buffer to store string representation of IPv6-address.
+ @param[out] dst A buffer to store string representation of IPv6-address.
It must be at least of INET6_ADDRSTRLEN.
+ @param[in] dstsize Number of bytes available dst.
@note The problem with inet_ntop() is that it is available starting from
Windows Vista, but out the minimum supported version is Windows 2000.
*/
-static void ipv6_to_str(const in6_addr *ipv6, char *str)
+size_t Inet6::to_string(char *dst, size_t dstsize) const
{
struct Region
{
@@ -562,6 +723,8 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
int length;
};
+ const char *ipv6= m_buffer;
+ char *dstend= dst + dstsize;
const unsigned char *ipv6_bytes= (const unsigned char *) ipv6;
// 1. Translate IPv6-address bytes to words.
@@ -570,7 +733,8 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
uint16 ipv6_words[IN6_ADDR_NUM_WORDS];
- for (int i= 0; i < IN6_ADDR_NUM_WORDS; ++i)
+ DBUG_ASSERT(dstsize > 0); // Need a space at least for the trailing '\0'
+ for (size_t i= 0; i < IN6_ADDR_NUM_WORDS; ++i)
ipv6_words[i]= (ipv6_bytes[2 * i] << 8) + ipv6_bytes[2 * i + 1];
// 2. Find "the gap" -- longest sequence of zeros in IPv6-address.
@@ -580,7 +744,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
{
Region rg= { -1, -1 };
- for (int i = 0; i < IN6_ADDR_NUM_WORDS; ++i)
+ for (size_t i= 0; i < IN6_ADDR_NUM_WORDS; ++i)
{
if (ipv6_words[i] != 0)
{
@@ -601,7 +765,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
}
else
{
- rg.pos= i;
+ rg.pos= (int) i;
rg.length= 1;
}
}
@@ -616,10 +780,14 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
// 3. Convert binary data to string.
- char *p= str;
+ char *p= dst;
- for (int i = 0; i < IN6_ADDR_NUM_WORDS; ++i)
+ for (int i= 0; i < (int) IN6_ADDR_NUM_WORDS; ++i)
{
+ DBUG_ASSERT(dstend >= p);
+ size_t dstsize_available= dstend - p;
+ if (dstsize_available < 5)
+ break;
if (i == gap.pos)
{
// We're at the gap position. We should put trailing ':' and jump to
@@ -646,10 +814,11 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
{
// The data represents either IPv4-compatible or IPv4-mapped address.
// The IPv6-part (zeros or zeros + ffff) has been already put into
- // the string (str). Now it's time to dump IPv4-part.
+ // the string (dst). Now it's time to dump IPv4-part.
- ipv4_to_str((const in_addr *) (ipv6_bytes + 12), p);
- return;
+ return (size_t) (p - dst) +
+ Inet4_null((const char *) (ipv6_bytes + 12), 4).
+ to_string(p, dstsize_available);
}
else
{
@@ -660,7 +829,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
p += sprintf(p, "%x", ipv6_words[i]);
- if (i != IN6_ADDR_NUM_WORDS - 1)
+ if (i + 1 != IN6_ADDR_NUM_WORDS)
{
*p= ':';
++p;
@@ -669,6 +838,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
}
*p= 0;
+ return (size_t) (p - dst);
}
///////////////////////////////////////////////////////////////////////////
@@ -676,161 +846,122 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
/**
Converts IP-address-string to IP-address-data.
- @param arg IP-address-string.
- @param [out] buffer Buffer to store IP-address-data.
+ ipv4-string -> varbinary(4)
+ ipv6-string -> varbinary(16)
@return Completion status.
- @retval false Given string does not represent an IP-address.
- @retval true The string has been converted sucessfully.
+ @retval NULL Given string does not represent an IP-address.
+ @retval !NULL The string has been converted sucessfully.
*/
-bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
+String *Item_func_inet6_aton::val_str(String *buffer)
{
- // ipv4-string -> varbinary(4)
- // ipv6-string -> varbinary(16)
+ DBUG_ASSERT(fixed);
- in_addr ipv4_address;
- in6_addr ipv6_address;
+ Ascii_ptr_and_buffer<STRING_BUFFER_USUAL_SIZE> tmp(args[0]);
+ if ((null_value= tmp.is_null()))
+ return NULL;
- if (str_to_ipv4(arg->ptr(), arg->length(), &ipv4_address))
+ Inet4_null ipv4(*tmp.string());
+ if (!ipv4.is_null())
{
- buffer->length(0);
- buffer->append((char *) &ipv4_address, sizeof (in_addr), &my_charset_bin);
-
- return true;
+ ipv4.to_binary(buffer);
+ return buffer;
}
- if (str_to_ipv6(arg->ptr(), arg->length(), &ipv6_address))
+ Inet6_null ipv6(*tmp.string());
+ if (!ipv6.is_null())
{
- buffer->length(0);
- buffer->append((char *) &ipv6_address, sizeof (in6_addr), &my_charset_bin);
-
- return true;
+ ipv6.to_binary(buffer);
+ return buffer;
}
- return false;
+ null_value= true;
+ return NULL;
}
-///////////////////////////////////////////////////////////////////////////
/**
Converts IP-address-data to IP-address-string.
-
- @param arg IP-address-data.
- @param [out] buffer Buffer to store IP-address-string.
-
- @return Completion status.
- @retval false The argument does not correspond to IP-address.
- @retval true The string has been converted sucessfully.
*/
-bool Item_func_inet6_ntoa::calc_value(const String *arg, String *buffer)
+String *Item_func_inet6_ntoa::val_str_ascii(String *buffer)
{
- if (arg->charset() != &my_charset_bin)
- return false;
+ DBUG_ASSERT(fixed);
- if ((int) arg->length() == IN_ADDR_SIZE)
+ // Binary string argument expected
+ if (unlikely(args[0]->result_type() != STRING_RESULT ||
+ args[0]->collation.collation != &my_charset_bin))
{
- char str[INET_ADDRSTRLEN];
-
- ipv4_to_str((const in_addr *) arg->ptr(), str);
-
- buffer->length(0);
- buffer->append(str, (uint32) strlen(str), &my_charset_latin1);
-
- return true;
+ null_value= true;
+ return NULL;
}
- else if ((int) arg->length() == IN6_ADDR_SIZE)
- {
- char str[INET6_ADDRSTRLEN];
- ipv6_to_str((const in6_addr *) arg->ptr(), str);
+ String_ptr_and_buffer<STRING_BUFFER_USUAL_SIZE> tmp(args[0]);
+ if ((null_value= tmp.is_null()))
+ return NULL;
- buffer->length(0);
- buffer->append(str, (uint32) strlen(str), &my_charset_latin1);
+ Inet4_null ipv4(static_cast<const Binary_string&>(*tmp.string()));
+ if (!ipv4.is_null())
+ {
+ ipv4.to_string(buffer);
+ return buffer;
+ }
- return true;
+ Inet6_null ipv6(static_cast<const Binary_string&>(*tmp.string()));
+ if (!ipv6.is_null())
+ {
+ ipv6.to_string(buffer);
+ return buffer;
}
- DBUG_PRINT("info",
- ("INET6_NTOA(): varbinary(4) or varbinary(16) expected."));
- return false;
+ DBUG_PRINT("info", ("INET6_NTOA(): varbinary(4) or varbinary(16) expected."));
+ null_value= true;
+ return NULL;
}
-///////////////////////////////////////////////////////////////////////////
/**
Checks if the passed string represents an IPv4-address.
-
- @param arg The string to check.
-
- @return Check status.
- @retval false The passed string does not represent an IPv4-address.
- @retval true The passed string represents an IPv4-address.
*/
-bool Item_func_is_ipv4::calc_value(const String *arg)
+longlong Item_func_is_ipv4::val_int()
{
- in_addr ipv4_address;
-
- return str_to_ipv4(arg->ptr(), arg->length(), &ipv4_address);
+ DBUG_ASSERT(fixed);
+ String_ptr_and_buffer<STRING_BUFFER_USUAL_SIZE> tmp(args[0]);
+ return !tmp.is_null() && !Inet4_null(*tmp.string()).is_null();
}
-///////////////////////////////////////////////////////////////////////////
/**
Checks if the passed string represents an IPv6-address.
-
- @param arg The string to check.
-
- @return Check status.
- @retval false The passed string does not represent an IPv6-address.
- @retval true The passed string represents an IPv6-address.
*/
-bool Item_func_is_ipv6::calc_value(const String *arg)
+longlong Item_func_is_ipv6::val_int()
{
- in6_addr ipv6_address;
-
- return str_to_ipv6(arg->ptr(), arg->length(), &ipv6_address);
+ DBUG_ASSERT(fixed);
+ String_ptr_and_buffer<STRING_BUFFER_USUAL_SIZE> tmp(args[0]);
+ return !tmp.is_null() && !Inet6_null(*tmp.string()).is_null();
}
-///////////////////////////////////////////////////////////////////////////
/**
Checks if the passed IPv6-address is an IPv4-compat IPv6-address.
-
- @param arg The IPv6-address to check.
-
- @return Check status.
- @retval false The passed IPv6-address is not an IPv4-compatible IPv6-address.
- @retval true The passed IPv6-address is an IPv4-compatible IPv6-address.
*/
-bool Item_func_is_ipv4_compat::calc_value(const String *arg)
+longlong Item_func_is_ipv4_compat::val_int()
{
- if ((int) arg->length() != IN6_ADDR_SIZE || arg->charset() != &my_charset_bin)
- return false;
-
- return IN6_IS_ADDR_V4COMPAT((struct in6_addr *) arg->ptr());
+ Inet6_null ip6(args[0]);
+ return !ip6.is_null() && ip6.is_v4compat();
}
-///////////////////////////////////////////////////////////////////////////
/**
Checks if the passed IPv6-address is an IPv4-mapped IPv6-address.
-
- @param arg The IPv6-address to check.
-
- @return Check status.
- @retval false The passed IPv6-address is not an IPv4-mapped IPv6-address.
- @retval true The passed IPv6-address is an IPv4-mapped IPv6-address.
*/
-bool Item_func_is_ipv4_mapped::calc_value(const String *arg)
+longlong Item_func_is_ipv4_mapped::val_int()
{
- if ((int) arg->length() != IN6_ADDR_SIZE || arg->charset() != &my_charset_bin)
- return false;
-
- return IN6_IS_ADDR_V4MAPPED((struct in6_addr *) arg->ptr());
+ Inet6_null ip6(args[0]);
+ return !ip6.is_null() && ip6.is_v4mapped();
}
diff --git a/sql/item_inetfunc.h b/sql/item_inetfunc.h
index 024ff8ce4f0..feeac9fa457 100644
--- a/sql/item_inetfunc.h
+++ b/sql/item_inetfunc.h
@@ -81,33 +81,7 @@ public:
{
null_value= false;
}
-
-public:
- virtual longlong val_int();
bool need_parentheses_in_default() { return false; }
-
-protected:
- virtual bool calc_value(const String *arg) = 0;
-};
-
-
-/*************************************************************************
- Item_func_inet_str_base implements common code for INET6/IP-related
- functions returning string value.
-*************************************************************************/
-
-class Item_func_inet_str_base : public Item_str_ascii_func
-{
-public:
- inline Item_func_inet_str_base(THD *thd, Item *arg):
- Item_str_ascii_func(thd, arg)
- { }
-
-public:
- virtual String *val_str_ascii(String *buffer);
-
-protected:
- virtual bool calc_value(const String *arg, String *buffer) = 0;
};
@@ -115,11 +89,11 @@ protected:
Item_func_inet6_aton implements INET6_ATON() SQL-function.
*************************************************************************/
-class Item_func_inet6_aton : public Item_func_inet_str_base
+class Item_func_inet6_aton : public Item_str_func
{
public:
inline Item_func_inet6_aton(THD *thd, Item *ip_addr):
- Item_func_inet_str_base(thd, ip_addr)
+ Item_str_func(thd, ip_addr)
{ }
public:
@@ -136,8 +110,7 @@ public:
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_inet6_aton>(thd, this); }
-protected:
- virtual bool calc_value(const String *arg, String *buffer);
+ String *val_str(String *to);
};
@@ -145,11 +118,11 @@ protected:
Item_func_inet6_ntoa implements INET6_NTOA() SQL-function.
*************************************************************************/
-class Item_func_inet6_ntoa : public Item_func_inet_str_base
+class Item_func_inet6_ntoa : public Item_str_ascii_func
{
public:
inline Item_func_inet6_ntoa(THD *thd, Item *ip_addr):
- Item_func_inet_str_base(thd, ip_addr)
+ Item_str_ascii_func(thd, ip_addr)
{ }
public:
@@ -168,11 +141,9 @@ public:
maybe_null= 1;
return FALSE;
}
+ String *val_str_ascii(String *to);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_inet6_ntoa>(thd, this); }
-
-protected:
- virtual bool calc_value(const String *arg, String *buffer);
};
@@ -193,8 +164,7 @@ public:
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_is_ipv4>(thd, this); }
-protected:
- virtual bool calc_value(const String *arg);
+ longlong val_int();
};
@@ -209,14 +179,12 @@ public:
Item_func_inet_bool_base(thd, ip_addr)
{ }
-public:
virtual const char *func_name() const
{ return "is_ipv6"; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_is_ipv6>(thd, this); }
-protected:
- virtual bool calc_value(const String *arg);
+ longlong val_int();
};
@@ -230,15 +198,11 @@ public:
inline Item_func_is_ipv4_compat(THD *thd, Item *ip_addr):
Item_func_inet_bool_base(thd, ip_addr)
{ }
-
-public:
virtual const char *func_name() const
{ return "is_ipv4_compat"; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_is_ipv4_compat>(thd, this); }
-
-protected:
- virtual bool calc_value(const String *arg);
+ longlong val_int();
};
@@ -252,15 +216,11 @@ public:
inline Item_func_is_ipv4_mapped(THD *thd, Item *ip_addr):
Item_func_inet_bool_base(thd, ip_addr)
{ }
-
-public:
virtual const char *func_name() const
{ return "is_ipv4_mapped"; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_is_ipv4_mapped>(thd, this); }
-
-protected:
- virtual bool calc_value(const String *arg);
+ longlong val_int();
};
#endif // ITEM_INETFUNC_INCLUDED
diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc
index 960a6e9ccc8..40237ab46a6 100644
--- a/sql/item_jsonfunc.cc
+++ b/sql/item_jsonfunc.cc
@@ -374,17 +374,11 @@ static int path_setup_nwc(json_path_t *p, CHARSET_INFO *i_cs,
longlong Item_func_json_valid::val_int()
{
String *js= args[0]->val_json(&tmp_value);
- json_engine_t je;
if ((null_value= args[0]->null_value))
return 0;
- json_scan_start(&je, js->charset(), (const uchar *) js->ptr(),
- (const uchar *) js->ptr()+js->length());
-
- while (json_scan_next(&je) == 0) {}
-
- return je.s.error == 0;
+ return json_valid(js->ptr(), js->length(), js->charset());
}
@@ -1422,7 +1416,7 @@ null_return:
static int append_json_value(String *str, Item *item, String *tmp_val)
{
- if (item->is_bool_type())
+ if (item->type_handler()->is_bool_type())
{
longlong v_int= item->val_int();
const char *t_f;
diff --git a/sql/item_row.cc b/sql/item_row.cc
index 8233ba00f06..665c900cb3a 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -60,7 +60,7 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
}
}
maybe_null|= item->maybe_null;
- with_sum_func= with_sum_func || item->with_sum_func;
+ join_with_sum_func(item);
with_window_func = with_window_func || item->with_window_func;
with_field= with_field || item->with_field;
m_with_subquery|= item->with_subquery();
@@ -91,7 +91,7 @@ void Item_row::cleanup()
{
DBUG_ENTER("Item_row::cleanup");
- Item::cleanup();
+ Item_fixed_hybrid::cleanup();
/* Reset to the original values */
used_tables_and_const_cache_init();
with_null= 0;
diff --git a/sql/item_row.h b/sql/item_row.h
index e0d54403730..4f60a33ab9f 100644
--- a/sql/item_row.h
+++ b/sql/item_row.h
@@ -33,10 +33,11 @@
Item which stores (x,y,...) and ROW(x,y,...).
Note that this can be recursive: ((x,y),(z,t)) is a ROW of ROWs.
*/
-class Item_row: public Item,
+class Item_row: public Item_fixed_hybrid,
private Item_args,
private Used_tables_and_const_cache,
- private With_subquery_cache
+ private With_subquery_cache,
+ private With_sum_func_cache
{
table_map not_null_tables_cache;
/**
@@ -45,17 +46,25 @@ class Item_row: public Item,
*/
bool with_null;
public:
- Item_row(THD *thd, List<Item> &list):
- Item(thd), Item_args(thd, list), not_null_tables_cache(0), with_null(0)
+ Item_row(THD *thd, List<Item> &list)
+ :Item_fixed_hybrid(thd), Item_args(thd, list),
+ not_null_tables_cache(0), with_null(0)
{ }
- Item_row(THD *thd, Item_row *row):
- Item(thd), Item_args(thd, static_cast<Item_args*>(row)), Used_tables_and_const_cache(),
+ Item_row(THD *thd, Item_row *row)
+ :Item_fixed_hybrid(thd), Item_args(thd, static_cast<Item_args*>(row)),
+ Used_tables_and_const_cache(),
+ With_sum_func_cache(*row),
not_null_tables_cache(0), with_null(0)
{ }
bool with_subquery() const { DBUG_ASSERT(fixed); return m_with_subquery; }
enum Type type() const { return ROW_ITEM; };
const Type_handler *type_handler() const { return &type_handler_row; }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ return NULL; // Check with Vicentiu why it's called for Item_row
+ }
void illegal_method_call(const char *);
bool is_null() { return null_value; }
void make_send_field(THD *thd, Send_field *)
@@ -82,7 +91,7 @@ public:
illegal_method_call((const char*)"val_decimal");
return 0;
};
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
illegal_method_call((const char*)"get_date");
return true;
@@ -92,6 +101,8 @@ public:
void cleanup();
void split_sum_func(THD *thd, Ref_ptr_array ref_pointer_array,
List<Item> &fields, uint flags);
+ bool with_sum_func() const { return m_with_sum_func; }
+ With_sum_func_cache* get_with_sum_func_cache() { return this; }
table_map used_tables() const { return used_tables_cache; };
bool const_item() const { return const_item_cache; };
void update_used_tables()
@@ -134,6 +145,11 @@ public:
return Item_args::excl_dep_on_grouping_fields(sel);
}
+ bool excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+ {
+ return Item_args::excl_dep_on_in_subq_left_part(subq_pred);
+ }
+
bool check_vcol_func_processor(void *arg) {return FALSE; }
Item *get_copy(THD *thd)
{ return get_item_copy<Item_row>(thd, this); }
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index d6f3a282671..818914e8df3 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -255,7 +255,7 @@ String *Item_func_sha2::val_str_ascii(String *str)
Since we're subverting the usual String methods, we must make sure that
the destination has space for the bytes we're about to write.
*/
- str->realloc((uint) digest_length*2 + 1); /* Each byte as two nybbles */
+ str->alloc((uint) digest_length*2 + 1); /* Each byte as two nybbles */
/* Convert the large number to a string-hex representation. */
array_to_hex((char *) str->ptr(), digest_buf, (uint)digest_length);
@@ -762,7 +762,7 @@ String *Item_func_des_encrypt::val_str(String *str)
tail= 8 - (res_length % 8); // 1..8 marking extra length
res_length+=tail;
- if (tmp_arg.realloc(res_length))
+ if (tmp_arg.alloc(res_length))
goto error;
tmp_arg.length(0);
tmp_arg.append(res->ptr(), res->length());
@@ -770,7 +770,6 @@ String *Item_func_des_encrypt::val_str(String *str)
if (tmp_arg.append(append_str, tail) || str->alloc(res_length+1))
goto error;
tmp_arg[res_length-1]=tail; // save extra length
- str->realloc(res_length+1);
str->length(res_length+1);
str->set_charset(&my_charset_bin);
(*str)[0]=(char) (128 | key_number);
@@ -1017,7 +1016,7 @@ String *Item_func_concat_ws::val_str(String *str)
{
uint new_len = MY_MAX(tmp_value.alloced_length() * 2, concat_len);
- if (tmp_value.realloc(new_len))
+ if (tmp_value.alloc(new_len))
goto null;
}
}
@@ -1072,8 +1071,7 @@ String *Item_func_reverse::val_str(String *str)
/* An empty string is a special case as the string pointer may be null */
if (!res->length())
return make_empty_result();
- if (str->alloced_length() < res->length() &&
- str->realloc(res->length()))
+ if (str->alloc(res->length()))
{
null_value= 1;
return 0;
@@ -2659,12 +2657,10 @@ String *Item_func_format::val_str_ascii(String *str)
if (args[0]->result_type() == DECIMAL_RESULT ||
args[0]->result_type() == INT_RESULT)
{
- my_decimal dec_val, rnd_dec, *res;
- res= args[0]->val_decimal(&dec_val);
- if ((null_value=args[0]->null_value))
+ VDec res(args[0]);
+ if ((null_value= res.is_null()))
return 0; /* purecov: inspected */
- my_decimal_round(E_DEC_FATAL_ERROR, res, dec, false, &rnd_dec);
- my_decimal2string(E_DEC_FATAL_ERROR, &rnd_dec, 0, 0, 0, str);
+ res.to_string_round(str, dec);
str_length= str->length();
}
else
@@ -4183,7 +4179,7 @@ String *Item_func_compress::val_str(String *str)
// Check new_size overflow: new_size <= res->length()
if (((uint32) (new_size+5) <= res->length()) ||
- str->realloc((uint32) new_size + 4 + 1))
+ str->alloc((uint32) new_size + 4 + 1))
{
null_value= 1;
return 0;
@@ -4255,7 +4251,7 @@ String *Item_func_uncompress::val_str(String *str)
max_allowed_packet));
goto err;
}
- if (str->realloc((uint32)new_size))
+ if (str->alloc((uint32)new_size))
goto err;
if ((err= uncompress((Byte*)str->ptr(), &new_size,
@@ -4284,7 +4280,7 @@ String *Item_func_uuid::val_str(String *str)
DBUG_ASSERT(fixed == 1);
uchar guid[MY_UUID_SIZE];
- str->realloc(MY_UUID_STRING_LENGTH+1);
+ str->alloc(MY_UUID_STRING_LENGTH+1);
str->length(MY_UUID_STRING_LENGTH);
str->set_charset(system_charset_info);
my_uuid(guid);
@@ -4550,11 +4546,11 @@ bool Item_func_dyncol_create::prepare_arguments(THD *thd, bool force_names_arg)
break;
case DYN_COL_DATETIME:
case DYN_COL_DATE:
- args[valpos]->get_date(&vals[i].x.time_value,
- sql_mode_for_dates(thd));
+ args[valpos]->get_date(thd, &vals[i].x.time_value,
+ Datetime::Options(thd));
break;
case DYN_COL_TIME:
- args[valpos]->get_time(&vals[i].x.time_value);
+ args[valpos]->get_time(thd, &vals[i].x.time_value);
break;
default:
DBUG_ASSERT(0);
@@ -5120,7 +5116,7 @@ null:
}
-bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_dyncol_get::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DYNAMIC_COLUMN_VALUE val;
char buff[STRING_BUFFER_USUAL_SIZE];
@@ -5141,10 +5137,8 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
if (signed_value || val.x.ulong_value <= LONGLONG_MAX)
{
longlong llval = (longlong)val.x.ulong_value;
- bool neg = llval < 0;
- if (int_to_datetime_with_warn(neg, (ulonglong)(neg ? -llval :
- llval),
- ltime, fuzzy_date, 0, 0 /* TODO */))
+ if (int_to_datetime_with_warn(thd, Longlong_hybrid(llval, !signed_value),
+ ltime, fuzzydate, 0, 0 /* TODO */))
goto null;
return 0;
}
@@ -5152,20 +5146,20 @@ bool Item_dyncol_get::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
val.x.double_value= static_cast<double>(ULONGLONG_MAX);
/* fall through */
case DYN_COL_DOUBLE:
- if (double_to_datetime_with_warn(val.x.double_value, ltime, fuzzy_date,
+ if (double_to_datetime_with_warn(thd, val.x.double_value, ltime, fuzzydate,
0, 0 /* TODO */))
goto null;
return 0;
case DYN_COL_DECIMAL:
- if (decimal_to_datetime_with_warn((my_decimal*)&val.x.decimal.value, ltime,
- fuzzy_date, 0, 0 /* TODO */))
+ if (decimal_to_datetime_with_warn(thd, (my_decimal*)&val.x.decimal.value,
+ ltime, fuzzydate, 0, 0 /* TODO */))
goto null;
return 0;
case DYN_COL_STRING:
- if (str_to_datetime_with_warn(&my_charset_numeric,
+ if (str_to_datetime_with_warn(thd, &my_charset_numeric,
val.x.string.value.str,
val.x.string.value.length,
- ltime, fuzzy_date))
+ ltime, fuzzydate))
goto null;
return 0;
case DYN_COL_DATETIME:
@@ -5273,3 +5267,102 @@ String *Item_temptable_rowid::val_str(String *str)
str_value.set((char*)(table->file->ref), max_length, &my_charset_bin);
return &str_value;
}
+#ifdef WITH_WSREP
+
+#include "wsrep_mysqld.h"
+
+String *Item_func_wsrep_last_written_gtid::val_str_ascii(String *str)
+{
+ wsrep::gtid gtid= current_thd->wsrep_cs().last_written_gtid();
+ if (gtid_str.alloc(wsrep::gtid_c_str_len()))
+ {
+ my_error(ER_OUTOFMEMORY, wsrep::gtid_c_str_len());
+ null_value= true;
+ return NULL;
+ }
+
+ ssize_t gtid_len= gtid_print_to_c_str(gtid, (char*) gtid_str.ptr(),
+ wsrep::gtid_c_str_len());
+ if (gtid_len < 0)
+ {
+ my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), func_name(),
+ "wsrep_gtid_print failed");
+ null_value= true;
+ return NULL;
+ }
+ gtid_str.length(gtid_len);
+ return &gtid_str;
+}
+
+String *Item_func_wsrep_last_seen_gtid::val_str_ascii(String *str)
+{
+ /* TODO: Should call Wsrep_server_state.instance().last_committed_gtid()
+ instead. */
+ wsrep::gtid gtid= Wsrep_server_state::instance().provider().last_committed_gtid();
+ if (gtid_str.alloc(wsrep::gtid_c_str_len()))
+ {
+ my_error(ER_OUTOFMEMORY, wsrep::gtid_c_str_len());
+ null_value= true;
+ return NULL;
+ }
+ ssize_t gtid_len= wsrep::gtid_print_to_c_str(gtid, (char*) gtid_str.ptr(),
+ wsrep::gtid_c_str_len());
+ if (gtid_len < 0)
+ {
+ my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), func_name(),
+ "wsrep_gtid_print failed");
+ null_value= true;
+ return NULL;
+ }
+ gtid_str.length(gtid_len);
+ return &gtid_str;
+}
+
+longlong Item_func_wsrep_sync_wait_upto::val_int()
+{
+ int timeout= -1;
+ String* gtid_str= args[0]->val_str(&value);
+ if (gtid_str == NULL)
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), func_name());
+ return 0LL;
+ }
+
+ if (arg_count == 2)
+ {
+ timeout= args[1]->val_int();
+ }
+
+ wsrep_gtid_t gtid;
+ int gtid_len= wsrep_gtid_scan(gtid_str->ptr(), gtid_str->length(), &gtid);
+ if (gtid_len < 0)
+ {
+ my_error(ER_WRONG_ARGUMENTS, MYF(0), func_name());
+ return 0LL;
+ }
+
+ if (gtid.seqno == WSREP_SEQNO_UNDEFINED &&
+ wsrep_uuid_compare(&gtid.uuid, &WSREP_UUID_UNDEFINED) == 0)
+ {
+ return 1LL;
+ }
+
+ enum wsrep::provider::status status=
+ wsrep_sync_wait_upto(current_thd, &gtid, timeout);
+
+ if (status)
+ {
+ int err;
+ switch (status) {
+ case wsrep::provider::error_transaction_missing:
+ err= ER_WRONG_ARGUMENTS;
+ break;
+ default:
+ err= ER_LOCK_WAIT_TIMEOUT;
+ }
+ my_error(err, MYF(0), func_name());
+ return 0LL;
+ }
+ return 1LL;
+}
+#endif /* WITH_WSREP */
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 56417f6d638..6207e1a7754 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -62,16 +62,11 @@ public:
longlong val_int();
double val_real();
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_string(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_string(thd, ltime, fuzzydate); }
const Type_handler *type_handler() const { return string_type_handler(); }
void left_right_max_length();
bool fix_fields(THD *thd, Item **ref);
- void update_null_value()
- {
- StringBuffer<MAX_FIELD_WIDTH> tmp;
- (void) val_str(&tmp);
- }
};
@@ -1471,11 +1466,11 @@ public:
return NULL;
return res;
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
if (args[0]->result_type() == STRING_RESULT)
- return Item_str_func::get_date(ltime, fuzzydate);
- bool res= args[0]->get_date(ltime, fuzzydate);
+ return Item_str_func::get_date(thd, ltime, fuzzydate);
+ bool res= args[0]->get_date(thd, ltime, fuzzydate);
if ((null_value= args[0]->null_value))
return 1;
return res;
@@ -1770,7 +1765,7 @@ public:
double val_real();
my_decimal *val_decimal(my_decimal *);
bool get_dyn_value(THD *thd, DYNAMIC_COLUMN_VALUE *val, String *tmp);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
void print(String *str, enum_query_type query_type);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_dyncol_get>(thd, this); }
@@ -1810,5 +1805,56 @@ public:
Item *get_copy(THD *thd)
{ return get_item_copy<Item_temptable_rowid>(thd, this); }
};
+#ifdef WITH_WSREP
+
+#include "wsrep_api.h"
+
+class Item_func_wsrep_last_written_gtid: public Item_str_ascii_func
+{
+ String gtid_str;
+public:
+ Item_func_wsrep_last_written_gtid(THD *thd): Item_str_ascii_func(thd) {}
+ const char *func_name() const { return "wsrep_last_written_gtid"; }
+ String *val_str_ascii(String *);
+ bool fix_length_and_dec()
+ {
+ max_length= WSREP_GTID_STR_LEN;
+ maybe_null= true;
+ return FALSE;
+ }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_wsrep_last_written_gtid>(thd, this); }
+};
+
+class Item_func_wsrep_last_seen_gtid: public Item_str_ascii_func
+{
+ String gtid_str;
+public:
+ Item_func_wsrep_last_seen_gtid(THD *thd): Item_str_ascii_func(thd) {}
+ const char *func_name() const { return "wsrep_last_seen_gtid"; }
+ String *val_str_ascii(String *);
+ bool fix_length_and_dec()
+ {
+ max_length= WSREP_GTID_STR_LEN;
+ maybe_null= true;
+ return FALSE;
+ }
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_wsrep_last_seen_gtid>(thd, this); }
+};
+
+class Item_func_wsrep_sync_wait_upto: public Item_int_func
+{
+ String value;
+public:
+ Item_func_wsrep_sync_wait_upto(THD *thd, Item *a): Item_int_func(thd, a) {}
+ Item_func_wsrep_sync_wait_upto(THD *thd, Item *a, Item* b): Item_int_func(thd, a, b) {}
+ const Type_handler *type_handler() const { return &type_handler_string; }
+ const char *func_name() const { return "wsrep_sync_wait_upto_gtid"; }
+ longlong val_int();
+ Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_wsrep_sync_wait_upto>(thd, this); }
+};
+#endif /* WITH_WSREP */
#endif /* ITEM_STRFUNC_INCLUDED */
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 99bba5a98d5..e32a730214a 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -54,7 +54,8 @@ Item_subselect::Item_subselect(THD *thd_arg):
value_assigned(0), own_engine(0), thd(0), old_engine(0),
have_to_be_excluded(0),
inside_first_fix_fields(0), done_first_fix_fields(FALSE),
- expr_cache(0), forced_const(FALSE), substitution(0), engine(0), eliminated(FALSE),
+ expr_cache(0), forced_const(FALSE), expensive_fl(FALSE),
+ substitution(0), engine(0), eliminated(FALSE),
changed(0), is_correlated(FALSE), with_recursive_reference(0)
{
DBUG_ENTER("Item_subselect::Item_subselect");
@@ -85,6 +86,9 @@ void Item_subselect::init(st_select_lex *select_lex,
DBUG_ENTER("Item_subselect::init");
DBUG_PRINT("enter", ("select_lex: %p this: %p",
select_lex, this));
+
+ select_lex->parent_lex->relink_hack(select_lex);
+
unit= select_lex->master_unit();
if (unit->item)
@@ -123,13 +127,6 @@ void Item_subselect::init(st_select_lex *select_lex,
else
engine= new subselect_single_select_engine(select_lex, result, this);
}
- {
- SELECT_LEX *upper= unit->outer_select();
- if (upper->parsing_place == IN_HAVING)
- upper->subquery_in_having= 1;
- /* The subquery is an expression cache candidate */
- upper->expr_cache_may_be_used[upper->parsing_place]= TRUE;
- }
DBUG_PRINT("info", ("engine: %p", engine));
DBUG_VOID_RETURN;
}
@@ -220,7 +217,8 @@ Item_subselect::~Item_subselect()
if (own_engine)
delete engine;
else
- engine->cleanup();
+ if (engine) // can be empty in case of EOM
+ engine->cleanup();
engine= NULL;
DBUG_VOID_RETURN;
}
@@ -244,6 +242,14 @@ bool Item_subselect::fix_fields(THD *thd_param, Item **ref)
DBUG_ASSERT(unit->thd == thd);
+ {
+ SELECT_LEX *upper= unit->outer_select();
+ if (upper->parsing_place == IN_HAVING)
+ upper->subquery_in_having= 1;
+ /* The subquery is an expression cache candidate */
+ upper->expr_cache_may_be_used[upper->parsing_place]= TRUE;
+ }
+
status_var_increment(thd_param->status_var.feature_subquery);
DBUG_ASSERT(fixed == 0);
@@ -580,6 +586,9 @@ bool Item_subselect::is_expensive()
double examined_rows= 0;
bool all_are_simple= true;
+ if (!expensive_fl && is_evaluated())
+ return false;
+
/* check extremely simple select */
if (!unit->first_select()->next_select()) // no union
{
@@ -590,7 +599,7 @@ bool Item_subselect::is_expensive()
SELECT_LEX *sl= unit->first_select();
JOIN *join = sl->join;
if (join && !join->tables_list && !sl->first_inner_unit())
- return false;
+ return (expensive_fl= false);
}
@@ -600,14 +609,14 @@ bool Item_subselect::is_expensive()
/* not optimized subquery */
if (!cur_join)
- return true;
+ return (expensive_fl= true);
/*
If the subquery is not optimised or in the process of optimization
it supposed to be expensive
*/
if (cur_join->optimization_state != JOIN::OPTIMIZATION_DONE)
- return true;
+ return (expensive_fl= true);
if (!cur_join->tables_list && !sl->first_inner_unit())
continue;
@@ -629,7 +638,7 @@ bool Item_subselect::is_expensive()
considered optimized if it has a join plan.
*/
if (!cur_join->join_tab)
- return true;
+ return (expensive_fl= true);
if (sl->first_inner_unit())
{
@@ -637,15 +646,15 @@ bool Item_subselect::is_expensive()
Subqueries that contain subqueries are considered expensive.
@todo: accumulate the cost of subqueries.
*/
- return true;
+ return (expensive_fl= true);
}
examined_rows+= cur_join->get_examined_rows();
}
// here we are sure that subquery is optimized so thd is set
- return !all_are_simple &&
- (examined_rows > thd->variables.expensive_subquery_limit);
+ return (expensive_fl= !all_are_simple &&
+ (examined_rows > thd->variables.expensive_subquery_limit));
}
@@ -945,7 +954,7 @@ bool Item_subselect::const_item() const
Item *Item_subselect::get_tmp_table_item(THD *thd_arg)
{
- if (!with_sum_func && !const_item())
+ if (!Item_subselect::with_sum_func() && !const_item())
return new (thd->mem_root) Item_temptable_field(thd_arg, result_field);
return copy_or_same(thd_arg);
}
@@ -1085,7 +1094,7 @@ void Item_maxmin_subselect::no_rows_in_result()
*/
if (parsing_place != SELECT_LIST || const_item())
return;
- value= (new (thd->mem_root) Item_null(thd))->get_cache(thd);
+ value= get_cache(thd);
null_value= 0;
was_values= 0;
make_const();
@@ -1103,7 +1112,7 @@ void Item_singlerow_subselect::no_rows_in_result()
*/
if (parsing_place != SELECT_LIST || const_item())
return;
- value= (new (thd->mem_root) Item_null(thd))->get_cache(thd);
+ value= get_cache(thd);
reset();
make_const();
}
@@ -1148,7 +1157,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
if (!select_lex->master_unit()->is_unit_op() &&
!select_lex->table_list.elements &&
select_lex->item_list.elements == 1 &&
- !select_lex->item_list.head()->with_sum_func &&
+ !select_lex->item_list.head()->with_sum_func() &&
/*
We cant change name of Item_field or Item_ref, because it will
prevent it's correct resolving, but we should save name of
@@ -1353,6 +1362,24 @@ String *Item_singlerow_subselect::val_str(String *str)
}
+bool Item_singlerow_subselect::val_native(THD *thd, Native *to)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (forced_const)
+ return value->val_native(thd, to);
+ if (!exec() && !value->null_value)
+ {
+ null_value= false;
+ return value->val_native(thd, to);
+ }
+ else
+ {
+ reset();
+ return true;
+ }
+}
+
+
my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
@@ -1389,15 +1416,15 @@ bool Item_singlerow_subselect::val_bool()
}
-bool Item_singlerow_subselect::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate)
+bool Item_singlerow_subselect::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
if (forced_const)
- return value->get_date(ltime, fuzzydate);
+ return value->get_date(thd, ltime, fuzzydate);
if (!exec() && !value->null_value)
{
null_value= FALSE;
- return value->get_date(ltime, fuzzydate);
+ return value->get_date(thd, ltime, fuzzydate);
}
else
{
@@ -1413,6 +1440,8 @@ Item_exists_subselect::Item_exists_subselect(THD *thd,
emb_on_expr_nest(NULL), optimizer(0), exists_transformed(0)
{
DBUG_ENTER("Item_exists_subselect::Item_exists_subselect");
+
+
init(select_lex, new (thd->mem_root) select_exists_subselect(thd, this));
max_columns= UINT_MAX;
null_value= FALSE; //can't be NULL
@@ -1455,6 +1484,7 @@ Item_in_subselect::Item_in_subselect(THD *thd, Item * left_exp,
{
DBUG_ENTER("Item_in_subselect::Item_in_subselect");
DBUG_PRINT("info", ("in_strategy: %u", (uint)in_strategy));
+
left_expr_orig= left_expr= left_exp;
/* prepare to possible disassembling the item in convert_subq_to_sj() */
if (left_exp->type() == Item::ROW_ITEM)
@@ -2044,7 +2074,7 @@ bool Item_in_subselect::fix_having(Item *having, SELECT_LEX *select_lex)
{
bool fix_res= 0;
DBUG_ASSERT(thd);
- if (!having->fixed)
+ if (!having->is_fixed())
{
select_lex->having_fix_field= 1;
fix_res= having->fix_fields(thd, 0);
@@ -2381,9 +2411,9 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
Item *item_having_part2= 0;
for (uint i= 0; i < cols_num; i++)
{
- DBUG_ASSERT((left_expr->fixed &&
+ DBUG_ASSERT((left_expr->is_fixed() &&
- select_lex->ref_pointer_array[i]->fixed) ||
+ select_lex->ref_pointer_array[i]->is_fixed()) ||
(select_lex->ref_pointer_array[i]->type() == REF_ITEM &&
((Item_ref*)(select_lex->ref_pointer_array[i]))->ref_type() ==
Item_ref::OUTER_REF));
@@ -2452,8 +2482,8 @@ Item_in_subselect::create_row_in_to_exists_cond(JOIN * join,
for (uint i= 0; i < cols_num; i++)
{
Item *item, *item_isnull;
- DBUG_ASSERT((left_expr->fixed &&
- select_lex->ref_pointer_array[i]->fixed) ||
+ DBUG_ASSERT((left_expr->is_fixed() &&
+ select_lex->ref_pointer_array[i]->is_fixed()) ||
(select_lex->ref_pointer_array[i]->type() == REF_ITEM &&
((Item_ref*)(select_lex->ref_pointer_array[i]))->ref_type() ==
Item_ref::OUTER_REF));
@@ -3252,7 +3282,8 @@ out:
void Item_in_subselect::print(String *str, enum_query_type query_type)
{
- if (test_strategy(SUBS_IN_TO_EXISTS))
+ if (test_strategy(SUBS_IN_TO_EXISTS) &&
+ !(query_type & QT_PARSABLE))
str->append(STRING_WITH_LEN("<exists>"));
else
{
@@ -3479,7 +3510,8 @@ Item_allany_subselect::select_transformer(JOIN *join)
void Item_allany_subselect::print(String *str, enum_query_type query_type)
{
- if (test_strategy(SUBS_IN_TO_EXISTS))
+ if (test_strategy(SUBS_IN_TO_EXISTS) &&
+ !(query_type & QT_PARSABLE))
str->append(STRING_WITH_LEN("<exists>"));
else
{
@@ -5793,7 +5825,7 @@ int
Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b)
{
uchar *rowid_a, *rowid_b;
- int __attribute__((unused)) error;
+ int error;
int cmp_res;
/* The length in bytes of the rowids (positions) of tmp_table. */
uint rowid_length= tbl->file->ref_length;
@@ -5807,14 +5839,14 @@ Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b)
if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[0], rowid_a))))
{
/* purecov: begin inspected */
- tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error
+ tbl->file->print_error(error, MYF(ME_FATAL)); // Sets fatal_error
return 0;
/* purecov: end */
}
if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[1], rowid_b))))
{
/* purecov: begin inspected */
- tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error
+ tbl->file->print_error(error, MYF(ME_FATAL)); // Sets fatal_error
return 0;
/* purecov: end */
}
@@ -5894,13 +5926,13 @@ int Ordered_key::cmp_key_with_search_key(rownum_t row_num)
/* The length in bytes of the rowids (positions) of tmp_table. */
uint rowid_length= tbl->file->ref_length;
uchar *cur_rowid= row_num_to_rowid + row_num * rowid_length;
- int __attribute__((unused)) error;
+ int error;
int cmp_res;
if (unlikely((error= tbl->file->ha_rnd_pos(tbl->record[0], cur_rowid))))
{
/* purecov: begin inspected */
- tbl->file->print_error(error, MYF(ME_FATALERROR)); // Sets fatal_error
+ tbl->file->print_error(error, MYF(ME_FATAL)); // Sets fatal_error
return 0;
/* purecov: end */
}
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index e0b09b9484b..6b66ccc8fe7 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -33,6 +33,7 @@ class subselect_hash_sj_engine;
class Item_bool_func2;
class Comp_creator;
class With_element;
+class Field_pair;
typedef class st_select_lex SELECT_LEX;
@@ -46,7 +47,8 @@ class Cached_item;
/* base class for subselects */
class Item_subselect :public Item_result_field,
- protected Used_tables_and_const_cache
+ protected Used_tables_and_const_cache,
+ protected With_sum_func_cache
{
bool value_assigned; /* value already assigned to subselect */
bool own_engine; /* the engine was not taken from other Item_subselect */
@@ -71,6 +73,8 @@ protected:
to substitute 'this' with a constant item.
*/
bool forced_const;
+ /* Set to the result of the last call of is_expensive() */
+ bool expensive_fl;
#ifndef DBUG_OFF
/* Count the number of times this subquery predicate has been executed. */
uint exec_counter;
@@ -183,6 +187,8 @@ public:
}
bool fix_fields(THD *thd, Item **ref);
bool with_subquery() const { DBUG_ASSERT(fixed); return true; }
+ bool with_sum_func() const { return m_with_sum_func; }
+ With_sum_func_cache* get_with_sum_func_cache() { return this; }
bool mark_as_dependent(THD *thd, st_select_lex *select, Item *item);
void fix_after_pullout(st_select_lex *new_parent, Item **ref, bool merge);
void recalc_used_tables(st_select_lex *new_parent, bool after_pullout);
@@ -302,9 +308,10 @@ public:
double val_real();
longlong val_int ();
String *val_str (String *);
+ bool val_native(THD *thd, Native *);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
const Type_handler *type_handler() const;
bool fix_length_and_dec();
@@ -395,14 +402,14 @@ public:
}
void no_rows_in_result();
- const Type_handler *type_handler() const { return &type_handler_longlong; }
+ const Type_handler *type_handler() const { return &type_handler_bool; }
longlong val_int();
double val_real();
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
bool val_bool();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
- { return get_date_from_int(ltime, fuzzydate); }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ { return get_date_from_int(thd, ltime, fuzzydate); }
bool fix_fields(THD *thd, Item **ref);
bool fix_length_and_dec();
void print(String *str, enum_query_type query_type);
@@ -570,6 +577,8 @@ public:
*/
bool is_registered_semijoin;
+ List<Field_pair> corresponding_fields;
+
/*
Used to determine how this subselect item is represented in the item tree,
in case there is a need to locate it there and replace with something else.
@@ -621,7 +630,6 @@ public:
double val_real();
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
- void update_null_value () { (void) val_bool(); }
bool val_bool();
bool test_limit(st_select_lex_unit *unit);
void print(String *str, enum_query_type query_type);
@@ -741,6 +749,8 @@ public:
return 0;
};
+ bool pushdown_cond_for_in_subquery(THD *thd, Item *cond);
+
friend class Item_ref_null_helper;
friend class Item_is_not_null_test;
friend class Item_in_optimizer;
@@ -852,7 +862,6 @@ protected:
bool set_row(List<Item> &item_list, Item_cache **row);
};
-
class subselect_single_select_engine: public subselect_engine
{
bool prepared; /* simple subselect is prepared */
@@ -886,9 +895,10 @@ public:
friend class subselect_hash_sj_engine;
friend class Item_in_subselect;
- friend bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
- Item **join_where);
-
+ friend bool execute_degenerate_jtbm_semi_join(THD *thd,
+ TABLE_LIST *tbl,
+ Item_in_subselect *subq_pred,
+ List<Item> &eq_list);
};
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index ce0c9d3e944..e1c8af98dd7 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -406,7 +406,7 @@ bool Item_sum::register_sum_func(THD *thd, Item **ref)
for (sl= thd->lex->current_select;
sl && sl != aggr_sel && sl->master_unit()->item;
sl= sl->master_unit()->outer_select() )
- sl->master_unit()->item->with_sum_func= 1;
+ sl->master_unit()->item->get_with_sum_func_cache()->set_with_sum_func();
}
thd->lex->current_select->mark_as_dependent(thd, aggr_sel, NULL);
@@ -486,7 +486,6 @@ void Item_sum::mark_as_sum_func()
cur_select->n_sum_items++;
cur_select->with_sum_func= 1;
const_item_cache= false;
- with_sum_func= 1;
with_field= 0;
window_func_sum_expr_flag= false;
}
@@ -892,7 +891,7 @@ bool Aggregator_distinct::setup(THD *thd)
item_sum->null_value= item_sum->maybe_null= 1;
item_sum->quick_group= 0;
- DBUG_ASSERT(item_sum->get_arg(0)->fixed);
+ DBUG_ASSERT(item_sum->get_arg(0)->is_fixed());
arg= item_sum->get_arg(0);
if (arg->const_item())
@@ -1239,9 +1238,11 @@ Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table)
if (args[0]->type() == Item::FIELD_ITEM)
{
Field *field= ((Item_field*) args[0])->field;
- if ((field= create_tmp_field_from_field(table->in_use, field, &name,
- table, NULL)))
- field->flags&= ~NOT_NULL_FLAG;
+ if ((field= field->create_tmp_field(table->in_use->mem_root, table, true)))
+ {
+ DBUG_ASSERT((field->flags & NOT_NULL_FLAG) == 0);
+ field->field_name= name;
+ }
DBUG_RETURN(field);
}
DBUG_RETURN(tmp_table_field_from_field_type(table));
@@ -1289,7 +1290,7 @@ Item_sum_sp::fix_fields(THD *thd, Item **ref)
if (!m_sp)
{
my_missing_function_error(m_name->m_name, ErrConvDQName(m_name).ptr());
- context->process_error(thd);
+ process_error(thd);
return TRUE;
}
@@ -1641,12 +1642,7 @@ longlong Item_sum_sum::val_int()
if (aggr)
aggr->endup();
if (result_type() == DECIMAL_RESULT)
- {
- longlong result;
- my_decimal2int(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, unsigned_flag,
- &result);
- return result;
- }
+ return dec_buffs[curr_dec_buff].to_longlong(unsigned_flag);
return val_int_from_real();
}
@@ -1657,7 +1653,7 @@ double Item_sum_sum::val_real()
if (aggr)
aggr->endup();
if (result_type() == DECIMAL_RESULT)
- my_decimal2double(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, &sum);
+ sum= dec_buffs[curr_dec_buff].to_double();
return sum;
}
@@ -1667,7 +1663,7 @@ String *Item_sum_sum::val_str(String *str)
if (aggr)
aggr->endup();
if (result_type() == DECIMAL_RESULT)
- return val_string_from_decimal(str);
+ return VDec(this).to_string_round(str, decimals);
return val_string_from_real(str);
}
@@ -2033,7 +2029,7 @@ String *Item_sum_avg::val_str(String *str)
if (aggr)
aggr->endup();
if (result_type() == DECIMAL_RESULT)
- return val_string_from_decimal(str);
+ return VDec(this).to_string_round(str, decimals);
return val_string_from_real(str);
}
@@ -2093,46 +2089,42 @@ Item *Item_sum_std::result_item(THD *thd, Field *field)
variance. The difference between the two classes is that the first is used
for a mundane SELECT, while the latter is used in a GROUPing SELECT.
*/
-static void variance_fp_recurrence_next(double *m, double *s, ulonglong *count, double nr)
+void Stddev::recurrence_next(double nr)
{
- *count += 1;
-
- if (*count == 1)
+ if (!m_count++)
{
- *m= nr;
- *s= 0;
+ DBUG_ASSERT(m_m == 0);
+ DBUG_ASSERT(m_s == 0);
+ m_m= nr;
}
else
{
- double m_kminusone= *m;
+ double m_kminusone= m_m;
volatile double diff= nr - m_kminusone;
- *m= m_kminusone + diff / (double) *count;
- *s= *s + diff * (nr - *m);
+ m_m= m_kminusone + diff / (double) m_count;
+ m_s= m_s + diff * (nr - m_m);
}
}
-static double variance_fp_recurrence_result(double s, ulonglong count, bool is_sample_variance)
+double Stddev::result(bool is_sample_variance)
{
- if (count == 1)
+ if (m_count == 1)
return 0.0;
if (is_sample_variance)
- return s / (count - 1);
+ return m_s / (m_count - 1);
/* else, is a population variance */
- return s / count;
+ return m_s / m_count;
}
Item_sum_variance::Item_sum_variance(THD *thd, Item_sum_variance *item):
Item_sum_num(thd, item),
- count(item->count), sample(item->sample),
+ m_stddev(item->m_stddev), sample(item->sample),
prec_increment(item->prec_increment)
-{
- recurrence_m= item->recurrence_m;
- recurrence_s= item->recurrence_s;
-}
+{ }
void Item_sum_variance::fix_length_and_dec_double()
@@ -2195,8 +2187,7 @@ Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table)
The easiest way is to do this is to store both value in a string
and unpack on access.
*/
- field= new Field_string(sizeof(double)*2 + sizeof(longlong), 0,
- &name, &my_charset_bin);
+ field= new Field_string(Stddev::binary_size(), 0, &name, &my_charset_bin);
}
else
field= new Field_double(max_length, maybe_null, &name, decimals,
@@ -2211,7 +2202,7 @@ Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table)
void Item_sum_variance::clear()
{
- count= 0;
+ m_stddev= Stddev();
}
bool Item_sum_variance::add()
@@ -2223,7 +2214,7 @@ bool Item_sum_variance::add()
double nr= args[0]->val_real();
if (!args[0]->null_value)
- variance_fp_recurrence_next(&recurrence_m, &recurrence_s, &count, nr);
+ m_stddev.recurrence_next(nr);
return 0;
}
@@ -2241,14 +2232,14 @@ double Item_sum_variance::val_real()
below which a 'count' number of items is called NULL.
*/
DBUG_ASSERT((sample == 0) || (sample == 1));
- if (count <= sample)
+ if (m_stddev.count() <= sample)
{
null_value=1;
return 0.0;
}
null_value=0;
- return variance_fp_recurrence_result(recurrence_s, count, sample);
+ return m_stddev.result(sample);
}
@@ -2267,24 +2258,32 @@ void Item_sum_variance::reset_field()
nr= args[0]->val_real(); /* sets null_value as side-effect */
if (args[0]->null_value)
- bzero(res,sizeof(double)*2+sizeof(longlong));
+ bzero(res,Stddev::binary_size());
else
- {
- /* Serialize format is (double)m, (double)s, (longlong)count */
- ulonglong tmp_count;
- double tmp_s;
- float8store(res, nr); /* recurrence variable m */
- tmp_s= 0.0;
- float8store(res + sizeof(double), tmp_s);
- tmp_count= 1;
- int8store(res + sizeof(double)*2, tmp_count);
- }
+ Stddev(nr).to_binary(res);
+}
+
+
+Stddev::Stddev(const uchar *ptr)
+{
+ float8get(m_m, ptr);
+ float8get(m_s, ptr + sizeof(double));
+ m_count= sint8korr(ptr + sizeof(double) * 2);
+}
+
+
+void Stddev::to_binary(uchar *ptr) const
+{
+ /* Serialize format is (double)m, (double)s, (longlong)count */
+ float8store(ptr, m_m);
+ float8store(ptr + sizeof(double), m_s);
+ ptr+= sizeof(double)*2;
+ int8store(ptr, m_count);
}
void Item_sum_variance::update_field()
{
- ulonglong field_count;
uchar *res=result_field->ptr;
double nr= args[0]->val_real(); /* sets null_value as side-effect */
@@ -2293,17 +2292,9 @@ void Item_sum_variance::update_field()
return;
/* Serialize format is (double)m, (double)s, (longlong)count */
- double field_recurrence_m, field_recurrence_s;
- float8get(field_recurrence_m, res);
- float8get(field_recurrence_s, res + sizeof(double));
- field_count=sint8korr(res+sizeof(double)*2);
-
- variance_fp_recurrence_next(&field_recurrence_m, &field_recurrence_s, &field_count, nr);
-
- float8store(res, field_recurrence_m);
- float8store(res + sizeof(double), field_recurrence_s);
- res+= sizeof(double)*2;
- int8store(res,field_count);
+ Stddev field_stddev(res);
+ field_stddev.recurrence_next(nr);
+ field_stddev.to_binary(res);
}
@@ -2324,12 +2315,12 @@ void Item_sum_hybrid::clear()
bool
-Item_sum_hybrid::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+Item_sum_hybrid::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
if (null_value)
return true;
- bool retval= value->get_date(ltime, fuzzydate);
+ bool retval= value->get_date(thd, ltime, fuzzydate);
if ((null_value= value->null_value))
DBUG_ASSERT(retval == true);
return retval;
@@ -2398,6 +2389,15 @@ Item_sum_hybrid::val_str(String *str)
}
+bool Item_sum_hybrid::val_native(THD *thd, Native *to)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return true;
+ return val_native_from_item(thd, value, to);
+}
+
+
void Item_sum_hybrid::cleanup()
{
DBUG_ENTER("Item_sum_hybrid::cleanup");
@@ -2674,25 +2674,6 @@ bool Item_sum_and::add()
** reset result of a Item_sum with is saved in a tmp_table
*************************************************************************/
-void Item_sum_num::reset_field()
-{
- double nr= args[0]->val_real();
- uchar *res=result_field->ptr;
-
- if (maybe_null)
- {
- if (args[0]->null_value)
- {
- nr=0.0;
- result_field->set_null();
- }
- else
- result_field->set_notnull();
- }
- float8store(res,nr);
-}
-
-
void Item_sum_hybrid::reset_field()
{
Item *UNINIT_VAR(tmp_item), *arg0;
@@ -2763,11 +2744,11 @@ void Item_sum_hybrid::reset_field()
}
case DECIMAL_RESULT:
{
- my_decimal value_buff, *arg_dec= arg0->val_decimal(&value_buff);
+ VDec arg_dec(arg0);
if (maybe_null)
{
- if (arg0->null_value)
+ if (arg_dec.is_null())
result_field->set_null();
else
result_field->set_notnull();
@@ -2776,9 +2757,7 @@ void Item_sum_hybrid::reset_field()
We must store zero in the field as we will use the field value in
add()
*/
- if (!arg_dec) // Null
- arg_dec= &decimal_zero;
- result_field->store_decimal(arg_dec);
+ result_field->store_decimal(arg_dec.ptr_or(&decimal_zero));
break;
}
case ROW_RESULT:
@@ -2801,15 +2780,10 @@ void Item_sum_sum::reset_field()
DBUG_ASSERT (aggr->Aggrtype() != Aggregator::DISTINCT_AGGREGATOR);
if (result_type() == DECIMAL_RESULT)
{
- my_decimal value, *arg_val;
if (unlikely(direct_added))
- arg_val= &direct_sum_decimal;
+ result_field->store_decimal(&direct_sum_decimal);
else
- {
- if (!(arg_val= args[0]->val_decimal(&value)))
- arg_val= &decimal_zero; // Null
- }
- result_field->store_decimal(arg_val);
+ result_field->store_decimal(VDec(args[0]).ptr_or(&decimal_zero));
}
else
{
@@ -2862,15 +2836,9 @@ void Item_sum_avg::reset_field()
if (result_type() == DECIMAL_RESULT)
{
longlong tmp;
- my_decimal value, *arg_dec= args[0]->val_decimal(&value);
- if (args[0]->null_value)
- {
- arg_dec= &decimal_zero;
- tmp= 0;
- }
- else
- tmp= 1;
- my_decimal2binary(E_DEC_FATAL_ERROR, arg_dec, res, f_precision, f_scale);
+ VDec value(args[0]);
+ tmp= value.is_null() ? 0 : 1;
+ value.to_binary(res, f_precision, f_scale);
res+= dec_bin_size;
int8store(res, tmp);
}
@@ -2937,9 +2905,8 @@ void Item_sum_sum::update_field()
{
if (!result_field->is_null())
{
- my_decimal field_value;
- my_decimal *field_val= result_field->val_decimal(&field_value);
- my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs, arg_val, field_val);
+ my_decimal field_value(result_field);
+ my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs, arg_val, &field_value);
result_field->store_decimal(dec_buffs);
}
else
@@ -3006,15 +2973,14 @@ void Item_sum_avg::update_field()
if (result_type() == DECIMAL_RESULT)
{
- my_decimal value, *arg_val= args[0]->val_decimal(&value);
- if (!args[0]->null_value)
+ VDec tmp(args[0]);
+ if (!tmp.is_null())
{
binary2my_decimal(E_DEC_FATAL_ERROR, res,
dec_buffs + 1, f_precision, f_scale);
field_count= sint8korr(res + dec_bin_size);
- my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs, arg_val, dec_buffs + 1);
- my_decimal2binary(E_DEC_FATAL_ERROR, dec_buffs,
- res, f_precision, f_scale);
+ my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs, tmp.ptr(), dec_buffs + 1);
+ dec_buffs->to_binary(res, f_precision, f_scale);
res+= dec_bin_size;
field_count++;
int8store(res, field_count);
@@ -3209,9 +3175,7 @@ my_decimal *Item_avg_field_decimal::val_decimal(my_decimal *dec_buf)
if ((null_value= !count))
return 0;
- my_decimal dec_count, dec_field;
- binary2my_decimal(E_DEC_FATAL_ERROR,
- field->ptr, &dec_field, f_precision, f_scale);
+ my_decimal dec_count, dec_field(field->ptr, f_precision, f_scale);
int2my_decimal(E_DEC_FATAL_ERROR, count, 0, &dec_count);
my_decimal_div(E_DEC_FATAL_ERROR, dec_buf,
&dec_field, &dec_count, prec_increment);
@@ -3232,15 +3196,11 @@ double Item_std_field::val_real()
double Item_variance_field::val_real()
{
// fix_fields() never calls for this Item
- double recurrence_s;
- ulonglong count;
- float8get(recurrence_s, (field->ptr + sizeof(double)));
- count=sint8korr(field->ptr+sizeof(double)*2);
-
- if ((null_value= (count <= sample)))
+ Stddev tmp(field->ptr);
+ if ((null_value= (tmp.count() <= sample)))
return 0.0;
- return variance_fp_recurrence_result(recurrence_s, count, sample);
+ return tmp.result(sample);
}
@@ -3269,6 +3229,25 @@ bool Item_udf_sum::add()
DBUG_RETURN(0);
}
+
+bool Item_udf_sum::supports_removal() const
+{
+ DBUG_ENTER("Item_udf_sum::supports_remove");
+ DBUG_PRINT("info", ("support: %d", udf.supports_removal()));
+ DBUG_RETURN(udf.supports_removal());
+}
+
+
+void Item_udf_sum::remove()
+{
+ my_bool tmp_null_value;
+ DBUG_ENTER("Item_udf_sum::remove");
+ udf.remove(&tmp_null_value);
+ null_value= tmp_null_value;
+ DBUG_VOID_RETURN;
+}
+
+
void Item_udf_sum::cleanup()
{
/*
@@ -3325,24 +3304,6 @@ my_decimal *Item_sum_udf_float::val_decimal(my_decimal *dec)
}
-String *Item_sum_udf_decimal::val_str(String *str)
-{
- return val_string_from_decimal(str);
-}
-
-
-double Item_sum_udf_decimal::val_real()
-{
- return val_real_from_decimal();
-}
-
-
-longlong Item_sum_udf_decimal::val_int()
-{
- return val_int_from_decimal();
-}
-
-
my_decimal *Item_sum_udf_decimal::val_decimal(my_decimal *dec_buf)
{
my_decimal *res;
@@ -4023,6 +3984,7 @@ bool Item_func_group_concat::setup(THD *thd)
if (!ref_pointer_array)
DBUG_RETURN(TRUE);
memcpy(ref_pointer_array, args, arg_count * sizeof(Item*));
+ DBUG_ASSERT(context);
if (setup_order(thd, Ref_ptr_array(ref_pointer_array, n_elems),
context->table_list, list, all_fields, *order))
DBUG_RETURN(TRUE);
diff --git a/sql/item_sum.h b/sql/item_sum.h
index b400ebd5f80..abe6192fcd1 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -384,7 +384,9 @@ protected:
Item **orig_args, *tmp_orig_args[2];
static size_t ram_limitation(THD *thd);
-
+public:
+ // Methods used by ColumnStore
+ Item **get_orig_args() const { return orig_args; }
public:
void mark_as_sum_func();
@@ -511,7 +513,12 @@ public:
}
virtual void make_unique() { force_copy_fields= TRUE; }
Item *get_tmp_table_item(THD *thd);
- Field *create_tmp_field(bool group, TABLE *table);
+ virtual Field *create_tmp_field(bool group, TABLE *table);
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ return create_tmp_field(param->group(), table);
+ }
virtual bool collect_outer_ref_processor(void *param);
bool init_sum_func_check(THD *thd);
bool check_sum_func(THD *thd, Item **ref);
@@ -578,6 +585,8 @@ public:
void mark_as_window_func_sum_expr() { window_func_sum_expr_flag= true; }
bool is_window_func_sum_expr() { return window_func_sum_expr_flag; }
virtual void setup_caches(THD *thd) {};
+
+ bool with_sum_func() const { return true; }
};
@@ -713,33 +722,24 @@ public:
class Item_sum_num :public Item_sum
{
-protected:
- /*
- val_xxx() functions may be called several times during the execution of a
- query. Derived classes that require extensive calculation in val_xxx()
- maintain cache of aggregate value. This variable governs the validity of
- that cache.
- */
- bool is_evaluated;
public:
- Item_sum_num(THD *thd): Item_sum(thd), is_evaluated(FALSE) {}
+ Item_sum_num(THD *thd): Item_sum(thd) {}
Item_sum_num(THD *thd, Item *item_par):
- Item_sum(thd, item_par), is_evaluated(FALSE) {}
+ Item_sum(thd, item_par) {}
Item_sum_num(THD *thd, Item *a, Item* b):
- Item_sum(thd, a, b), is_evaluated(FALSE) {}
+ Item_sum(thd, a, b) {}
Item_sum_num(THD *thd, List<Item> &list):
- Item_sum(thd, list), is_evaluated(FALSE) {}
+ Item_sum(thd, list) {}
Item_sum_num(THD *thd, Item_sum_num *item):
- Item_sum(thd, item),is_evaluated(item->is_evaluated) {}
+ Item_sum(thd, item) {}
bool fix_fields(THD *, Item **);
longlong val_int() { return val_int_from_real(); /* Real as default */ }
String *val_str(String*str);
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
- void reset_field();
};
@@ -964,18 +964,38 @@ But, this falls prey to catastrophic cancellation. Instead, use the recurrence
*/
+class Stddev
+{
+ double m_m;
+ double m_s;
+ ulonglong m_count;
+public:
+ Stddev() :m_m(0), m_s(0), m_count(0) { }
+ Stddev(double nr) :m_m(nr), m_s(0.0), m_count(1) { }
+ Stddev(const uchar *);
+ void to_binary(uchar *) const;
+ void recurrence_next(double nr);
+ double result(bool is_simple_variance);
+ ulonglong count() const { return m_count; }
+ static uint32 binary_size()
+ {
+ return (uint32) (sizeof(double) * 2 + sizeof(ulonglong));
+ };
+};
+
+
+
class Item_sum_variance : public Item_sum_num
{
+ Stddev m_stddev;
bool fix_length_and_dec();
public:
- double recurrence_m, recurrence_s; /* Used in recurrence relation. */
- ulonglong count;
uint sample;
uint prec_increment;
Item_sum_variance(THD *thd, Item *item_par, uint sample_arg):
- Item_sum_num(thd, item_par), count(0),
+ Item_sum_num(thd, item_par),
sample(sample_arg)
{}
Item_sum_variance(THD *thd, Item_sum_variance *item);
@@ -997,7 +1017,7 @@ public:
const Type_handler *type_handler() const { return &type_handler_double; }
void cleanup()
{
- count= 0;
+ m_stddev= Stddev();
Item_sum_num::cleanup();
}
Item *get_copy(THD *thd)
@@ -1060,9 +1080,10 @@ protected:
double val_real();
longlong val_int();
my_decimal *val_decimal(my_decimal *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
void reset_field();
String *val_str(String *);
+ bool val_native(THD *thd, Native *);
const Type_handler *real_type_handler() const
{
return get_arg(0)->real_type_handler();
@@ -1356,7 +1377,7 @@ public:
void update_field(){DBUG_ASSERT(0);}
void clear();
void cleanup();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
return execute() || sp_result_field->get_date(ltime, fuzzydate);
}
@@ -1384,17 +1405,21 @@ public:
decimals= item->decimals;
max_length= item->max_length;
unsigned_flag= item->unsigned_flag;
- fixed= true;
}
table_map used_tables() const { return (table_map) 1L; }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ return create_tmp_field_ex_simple(table, src, param);
+ }
void save_in_result_field(bool no_conversions) { DBUG_ASSERT(0); }
bool check_vcol_func_processor(void *arg)
{
return mark_unsupported_function(name.str, arg, VCOL_IMPOSSIBLE);
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
};
@@ -1439,9 +1464,18 @@ public:
dec_bin_size(item->dec_bin_size)
{ }
const Type_handler *type_handler() const { return &type_handler_newdecimal; }
- double val_real() { return val_real_from_decimal(); }
- longlong val_int() { return val_int_from_decimal(); }
- String *val_str(String *str) { return val_string_from_decimal(str); }
+ double val_real()
+ {
+ return VDec(this).to_double();
+ }
+ longlong val_int()
+ {
+ return VDec(this).to_longlong(unsigned_flag);
+ }
+ String *val_str(String *str)
+ {
+ return VDec(this).to_string_round(str, decimals);
+ }
my_decimal *val_decimal(my_decimal *);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_avg_field_decimal>(thd, this); }
@@ -1541,13 +1575,15 @@ public:
void clear();
bool add();
+ bool supports_removal() const;
+ void remove();
void reset_field() {};
void update_field() {};
void cleanup();
virtual void print(String *str, enum_query_type query_type);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
};
@@ -1645,9 +1681,18 @@ public:
Item_udf_sum(thd, udf_arg, list) {}
Item_sum_udf_decimal(THD *thd, Item_sum_udf_decimal *item)
:Item_udf_sum(thd, item) {}
- String *val_str(String *);
- double val_real();
- longlong val_int();
+ String *val_str(String *str)
+ {
+ return VDec(this).to_string_round(str, decimals);
+ }
+ double val_real()
+ {
+ return VDec(this).to_double();
+ }
+ longlong val_int()
+ {
+ return VDec(this).to_longlong(unsigned_flag);
+ }
my_decimal *val_decimal(my_decimal *);
const Type_handler *type_handler() const { return &type_handler_newdecimal; }
bool fix_length_and_dec() { fix_num_length_and_dec(); return FALSE; }
@@ -1671,6 +1716,7 @@ class Item_sum_udf_float :public Item_sum_num
double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; }
void clear() {}
bool add() { return 0; }
+ void reset_field() { DBUG_ASSERT(0); };
void update_field() {}
};
@@ -1689,6 +1735,7 @@ public:
double val_real() { DBUG_ASSERT(fixed == 1); return 0; }
void clear() {}
bool add() { return 0; }
+ void reset_field() { DBUG_ASSERT(0); };
void update_field() {}
};
@@ -1707,6 +1754,7 @@ class Item_sum_udf_decimal :public Item_sum_num
my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed == 1); return 0; }
void clear() {}
bool add() { return 0; }
+ void reset_field() { DBUG_ASSERT(0); };
void update_field() {}
};
@@ -1728,6 +1776,7 @@ public:
enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; }
void clear() {}
bool add() { return 0; }
+ void reset_field() { DBUG_ASSERT(0); };
void update_field() {}
};
@@ -1797,6 +1846,14 @@ class Item_func_group_concat : public Item_sum
element_count count __attribute__((unused)),
void* item_arg);
public:
+ // Methods used by ColumnStore
+ bool get_distinct() const { return distinct; }
+ uint get_count_field() const { return arg_count_field; }
+ uint get_order_field() const { return arg_count_order; }
+ const String* get_separator() const { return separator; }
+ ORDER** get_order() const { return order; }
+
+public:
Item_func_group_concat(THD *thd, Name_resolution_context *context_arg,
bool is_distinct, List<Item> *is_select,
const SQL_I_List<ORDER> &is_order, String *is_separator,
@@ -1845,9 +1902,9 @@ public:
{
return val_decimal_from_string(decimal_value);
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return get_date_from_string(ltime, fuzzydate);
+ return get_date_from_string(thd, ltime, fuzzydate);
}
String* val_str(String* str);
Item *copy_or_same(THD* thd);
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 2339c6afbdc..2627d5413a2 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -59,6 +59,29 @@
/** Day number for Dec 31st, 9999. */
#define MAX_DAY_NUMBER 3652424L
+
+Func_handler_date_add_interval_datetime_arg0_time
+ func_handler_date_add_interval_datetime_arg0_time;
+
+Func_handler_date_add_interval_datetime func_handler_date_add_interval_datetime;
+Func_handler_date_add_interval_date func_handler_date_add_interval_date;
+Func_handler_date_add_interval_time func_handler_date_add_interval_time;
+Func_handler_date_add_interval_string func_handler_date_add_interval_string;
+
+Func_handler_add_time_datetime func_handler_add_time_datetime_add(1);
+Func_handler_add_time_datetime func_handler_add_time_datetime_sub(-1);
+Func_handler_add_time_time func_handler_add_time_time_add(1);
+Func_handler_add_time_time func_handler_add_time_time_sub(-1);
+Func_handler_add_time_string func_handler_add_time_string_add(1);
+Func_handler_add_time_string func_handler_add_time_string_sub(-1);
+
+Func_handler_str_to_date_datetime_sec func_handler_str_to_date_datetime_sec;
+Func_handler_str_to_date_datetime_usec func_handler_str_to_date_datetime_usec;
+Func_handler_str_to_date_date func_handler_str_to_date_date;
+Func_handler_str_to_date_time_sec func_handler_str_to_date_time_sec;
+Func_handler_str_to_date_time_usec func_handler_str_to_date_time_usec;
+
+
/*
Date formats corresponding to compound %r and %T conversion specifiers
@@ -103,12 +126,12 @@ static DATE_TIME_FORMAT time_24hrs_format= {{0}, '\0', 0,
1 error
*/
-static bool extract_date_time(DATE_TIME_FORMAT *format,
+static bool extract_date_time(THD *thd, DATE_TIME_FORMAT *format,
const char *val, uint length, MYSQL_TIME *l_time,
timestamp_type cached_timestamp_type,
const char **sub_pattern_end,
const char *date_time_type,
- ulonglong fuzzy_date)
+ date_conv_mode_t fuzzydate)
{
int weekday= 0, yearday= 0, daypart= 0;
int week_number= -1;
@@ -303,17 +326,17 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
We can't just set error here, as we don't want to generate two
warnings in case of errors
*/
- if (extract_date_time(&time_ampm_format, val,
+ if (extract_date_time(thd, &time_ampm_format, val,
(uint)(val_end - val), l_time,
- cached_timestamp_type, &val, "time", fuzzy_date))
+ cached_timestamp_type, &val, "time", fuzzydate))
DBUG_RETURN(1);
break;
/* Time in 24-hour notation */
case 'T':
- if (extract_date_time(&time_24hrs_format, val,
+ if (extract_date_time(thd, &time_24hrs_format, val,
(uint)(val_end - val), l_time,
- cached_timestamp_type, &val, "time", fuzzy_date))
+ cached_timestamp_type, &val, "time", fuzzydate))
DBUG_RETURN(1);
break;
@@ -419,7 +442,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
goto err;
int was_cut;
- if (check_date(l_time, fuzzy_date | TIME_INVALID_DATES, &was_cut))
+ if (check_date(l_time, fuzzydate | TIME_INVALID_DATES, &was_cut))
goto err;
if (val != val_end)
@@ -428,10 +451,9 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
{
if (!my_isspace(&my_charset_latin1,*val))
{
- make_truncated_value_warning(current_thd,
- Sql_condition::WARN_LEVEL_WARN,
- val_begin, length,
- cached_timestamp_type, 0, NullS);
+ ErrConvString err(val_begin, length, &my_charset_bin);
+ make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ &err, cached_timestamp_type, 0, NullS);
break;
}
} while (++val != val_end);
@@ -440,7 +462,6 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
err:
{
- THD *thd= current_thd;
char buff[128];
strmake(buff, val_begin, MY_MIN(length, sizeof(buff)-1));
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -787,10 +808,9 @@ longlong Item_func_period_diff::val_int()
longlong Item_func_to_days::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
- return 0;
- return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 : d.daynr();
}
@@ -798,42 +818,31 @@ longlong Item_func_to_seconds::val_int_endpoint(bool left_endp,
bool *incl_endp)
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- longlong seconds;
- longlong days;
- int dummy; /* unused */
- if (get_arg0_date(&ltime, TIME_FUZZY_DATES))
+ // val_int_endpoint() is called only if args[0] is a temporal Item_field
+ Datetime_from_temporal dt(current_thd, args[0], TIME_FUZZY_DATES);
+ if ((null_value= !dt.is_valid_datetime()))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
}
- seconds= ltime.hour * 3600L + ltime.minute * 60 + ltime.second;
- seconds= ltime.neg ? -seconds : seconds;
- days= (longlong) calc_daynr(ltime.year, ltime.month, ltime.day);
- seconds+= days * 24L * 3600L;
/* Set to NULL if invalid date, but keep the value */
- null_value= check_date(&ltime,
- (ltime.year || ltime.month || ltime.day),
- (TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE),
- &dummy);
+ null_value= dt.check_date(TIME_NO_ZEROS);
/*
Even if the evaluation return NULL, seconds is useful for pruning
*/
- return seconds;
+ return dt.to_seconds();
}
longlong Item_func_to_seconds::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- longlong seconds;
- longlong days;
- if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
- return 0;
- seconds= ltime.hour * 3600L + ltime.minute * 60 + ltime.second;
- seconds=ltime.neg ? -seconds : seconds;
- days= (longlong) calc_daynr(ltime.year, ltime.month, ltime.day);
- return seconds + days * 24L * 3600L;
+ THD *thd= current_thd;
+ /*
+ Unlike val_int_endpoint(), we cannot use Datetime_from_temporal here.
+ The argument can be of a non-temporal data type.
+ */
+ Datetime dt(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd));
+ return (null_value= !dt.is_valid_datetime()) ? 0 : dt.to_seconds();
}
/*
@@ -877,19 +886,17 @@ enum_monotonicity_info Item_func_to_seconds::get_monotonicity_info() const
longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp)
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
+ // val_int_endpoint() is only called if args[0] is a temporal Item_field
+ Datetime_from_temporal dt(current_thd, args[0], TIME_CONV_NONE);
longlong res;
- int dummy; /* unused */
- if (get_arg0_date(&ltime, 0))
+ if ((null_value= !dt.is_valid_datetime()))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
}
- res=(longlong) calc_daynr(ltime.year,ltime.month,ltime.day);
+ res= (longlong) dt.daynr();
/* Set to NULL if invalid date, but keep the value */
- null_value= check_date(&ltime,
- (TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE),
- &dummy);
+ null_value= dt.check_date(TIME_NO_ZEROS);
if (null_value)
{
/*
@@ -918,8 +925,8 @@ longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp)
col < '2007-09-15 12:34:56' -> TO_DAYS(col) <= TO_DAYS('2007-09-15')
*/
- if ((!left_endp && !(ltime.hour || ltime.minute || ltime.second ||
- ltime.second_part)) ||
+ const MYSQL_TIME &ltime= dt.get_mysql_time()[0];
+ if ((!left_endp && dt.hhmmssff_is_zero()) ||
(left_endp && ltime.hour == 23 && ltime.minute == 59 &&
ltime.second == 59))
/* do nothing */
@@ -933,25 +940,25 @@ longlong Item_func_to_days::val_int_endpoint(bool left_endp, bool *incl_endp)
longlong Item_func_dayofyear::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE))
- return 0;
- return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day) -
- calc_daynr(ltime.year,1,1) + 1;
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 : d.dayofyear();
}
longlong Item_func_dayofmonth::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- return get_arg0_date(&ltime, 0) ? 0 : (longlong) ltime.day;
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 : d.get_mysql_time()->day;
}
longlong Item_func_month::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- return get_arg0_date(&ltime, 0) ? 0 : (longlong) ltime.month;
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 : d.get_mysql_time()->month;
}
@@ -973,12 +980,12 @@ String* Item_func_monthname::val_str(String* str)
DBUG_ASSERT(fixed == 1);
const char *month_name;
uint err;
- MYSQL_TIME ltime;
-
- if ((null_value= (get_arg0_date(&ltime, 0) || !ltime.month)))
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd));
+ if ((null_value= (!d.is_valid_datetime() || !d.get_mysql_time()->month)))
return (String *) 0;
- month_name= locale->month_names->type_names[ltime.month - 1];
+ month_name= locale->month_names->type_names[d.get_mysql_time()->month - 1];
str->copy(month_name, (uint) strlen(month_name), &my_charset_utf8_bin,
collation.collation, &err);
return str;
@@ -992,23 +999,24 @@ String* Item_func_monthname::val_str(String* str)
longlong Item_func_quarter::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, 0))
- return 0;
- return (longlong) ((ltime.month+2)/3);
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 : d.quarter();
}
longlong Item_func_hour::val_int()
{
DBUG_ASSERT(fixed == 1);
- Time tm(args[0], Time::Options_for_cast());
+ THD *thd= current_thd;
+ Time tm(thd, args[0], Time::Options_for_cast(thd));
return (null_value= !tm.is_valid_time()) ? 0 : tm.get_mysql_time()->hour;
}
longlong Item_func_minute::val_int()
{
DBUG_ASSERT(fixed == 1);
- Time tm(args[0], Time::Options_for_cast());
+ THD *thd= current_thd;
+ Time tm(thd, args[0], Time::Options_for_cast(thd));
return (null_value= !tm.is_valid_time()) ? 0 : tm.get_mysql_time()->minute;
}
@@ -1018,7 +1026,8 @@ longlong Item_func_minute::val_int()
longlong Item_func_second::val_int()
{
DBUG_ASSERT(fixed == 1);
- Time tm(args[0], Time::Options_for_cast());
+ THD *thd= current_thd;
+ Time tm(thd, args[0], Time::Options_for_cast(thd));
return (null_value= !tm.is_valid_time()) ? 0 : tm.get_mysql_time()->second;
}
@@ -1065,43 +1074,36 @@ uint week_mode(uint mode)
longlong Item_func_week::val_int()
{
DBUG_ASSERT(fixed == 1);
- uint year, week_format;
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
+ uint week_format;
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd));
+ if ((null_value= !d.is_valid_datetime()))
return 0;
if (arg_count > 1)
week_format= (uint)args[1]->val_int();
else
- week_format= current_thd->variables.default_week_format;
- return (longlong) calc_week(&ltime, week_mode(week_format), &year);
+ week_format= thd->variables.default_week_format;
+ return d.week(week_mode(week_format));
}
longlong Item_func_yearweek::val_int()
{
DBUG_ASSERT(fixed == 1);
- uint year,week;
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
- return 0;
- week= calc_week(&ltime,
- (week_mode((uint) args[1]->val_int()) | WEEK_YEAR),
- &year);
- return week+year*100;
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 :
+ d.yearweek((week_mode((uint) args[1]->val_int()) | WEEK_YEAR));
}
longlong Item_func_weekday::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
-
- if (get_arg0_date(&ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
- return 0;
-
- return (longlong) calc_weekday(calc_daynr(ltime.year, ltime.month,
- ltime.day),
- odbc_type) + MY_TEST(odbc_type);
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_NO_ZEROS, thd));
+ return ((null_value= !d.is_valid_datetime())) ? 0 :
+ calc_weekday(d.daynr(), odbc_type) + MY_TEST(odbc_type);
}
bool Item_func_dayname::fix_length_and_dec()
@@ -1137,8 +1139,9 @@ String* Item_func_dayname::val_str(String* str)
longlong Item_func_year::val_int()
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- return get_arg0_date(&ltime, 0) ? 0 : (longlong) ltime.year;
+ THD *thd= current_thd;
+ Datetime d(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd));
+ return (null_value= !d.is_valid_datetime()) ? 0 : d.get_mysql_time()->year;
}
@@ -1169,8 +1172,9 @@ enum_monotonicity_info Item_func_year::get_monotonicity_info() const
longlong Item_func_year::val_int_endpoint(bool left_endp, bool *incl_endp)
{
DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, 0))
+ // val_int_endpoint() is cally only if args[0] is a temporal Item_field
+ Datetime_from_temporal dt(current_thd, args[0], TIME_CONV_NONE);
+ if ((null_value= !dt.is_valid_datetime()))
{
/* got NULL, leave the incl_endp intact */
return LONGLONG_MIN;
@@ -1187,8 +1191,9 @@ longlong Item_func_year::val_int_endpoint(bool left_endp, bool *incl_endp)
col < '2007-09-15 23:00:00' -> YEAR(col) <= 2007
*/
+ const MYSQL_TIME &ltime= dt.get_mysql_time()[0];
if (!left_endp && ltime.day == 1 && ltime.month == 1 &&
- !(ltime.hour || ltime.minute || ltime.second || ltime.second_part))
+ dt.hhmmssff_is_zero())
; /* do nothing */
else
*incl_endp= TRUE;
@@ -1212,14 +1217,13 @@ bool Item_func_unix_timestamp::get_timestamp_value(my_time_t *seconds,
}
}
- MYSQL_TIME ltime;
- if (get_arg0_date(&ltime, TIME_NO_ZERO_IN_DATE))
- return 1;
-
- uint error_code;
- *seconds= TIME_to_timestamp(current_thd, &ltime, &error_code);
- *second_part= ltime.second_part;
- return (null_value= (error_code == ER_WARN_DATA_OUT_OF_RANGE));
+ Timestamp_or_zero_datetime_native_null native(current_thd, args[0], true);
+ if ((null_value= native.is_null() || native.is_zero_datetime()))
+ return true;
+ Timestamp_or_zero_datetime tm(native);
+ *seconds= tm.tv().tv_sec;
+ *second_part= tm.tv().tv_usec;
+ return false;
}
@@ -1276,7 +1280,8 @@ longlong Item_func_unix_timestamp::val_int_endpoint(bool left_endp, bool *incl_e
longlong Item_func_time_to_sec::int_op()
{
DBUG_ASSERT(fixed == 1);
- Time tm(args[0], Time::Options_for_cast());
+ THD *thd= current_thd;
+ Time tm(thd, args[0], Time::Options_for_cast(thd));
return ((null_value= !tm.is_valid_time())) ? 0 : tm.to_seconds();
}
@@ -1284,7 +1289,8 @@ longlong Item_func_time_to_sec::int_op()
my_decimal *Item_func_time_to_sec::decimal_op(my_decimal* buf)
{
DBUG_ASSERT(fixed == 1);
- Time tm(args[0], Time::Options_for_cast());
+ THD *thd= current_thd;
+ Time tm(thd, args[0], Time::Options_for_cast(thd));
if ((null_value= !tm.is_valid_time()))
return 0;
const MYSQL_TIME *ltime= tm.get_mysql_time();
@@ -1299,7 +1305,8 @@ my_decimal *Item_func_time_to_sec::decimal_op(my_decimal* buf)
To make code easy, allow interval objects without separators.
*/
-bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
+bool get_interval_value(THD *thd, Item *args,
+ interval_type int_type, INTERVAL *interval)
{
ulonglong array[5];
longlong UNINIT_VAR(value);
@@ -1312,25 +1319,19 @@ bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
bzero((char*) interval,sizeof(*interval));
if (int_type == INTERVAL_SECOND && args->decimals)
{
- my_decimal decimal_value, *val;
- ulonglong second;
- ulong second_part;
- if (!(val= args->val_decimal(&decimal_value)))
+ VDec val(args);
+ if (val.is_null())
return true;
- interval->neg= my_decimal2seconds(val, &second, &second_part);
- if (second == LONGLONG_MAX)
+ Sec6 d(val.ptr());
+ interval->neg= d.neg();
+ if (d.sec() >= LONGLONG_MAX)
{
- THD *thd= current_thd;
- ErrConvDecimal err(val);
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_TRUNCATED_WRONG_VALUE,
- ER_THD(thd, ER_TRUNCATED_WRONG_VALUE), "DECIMAL",
- err.ptr());
+ ErrConvDecimal err(val.ptr());
+ thd->push_warning_truncated_wrong_value("seconds", err.ptr());
return true;
}
-
- interval->second= second;
- interval->second_part= second_part;
+ interval->second= d.sec();
+ interval->second_part= d.usec();
return false;
}
else if ((int) int_type <= INTERVAL_MICROSECOND)
@@ -1476,90 +1477,11 @@ bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
}
-String *Item_temporal_func::val_str(String *str)
-{
- DBUG_ASSERT(fixed == 1);
- return val_string_from_date(str);
-}
-
-
-bool Item_temporal_hybrid_func::fix_temporal_type(MYSQL_TIME *ltime)
-{
- if (ltime->time_type < 0) /* MYSQL_TIMESTAMP_NONE, MYSQL_TIMESTAMP_ERROR */
- return false;
-
- if (ltime->time_type != MYSQL_TIMESTAMP_TIME)
- goto date_or_datetime_value;
-
- /* Convert TIME to DATE or DATETIME */
- switch (field_type())
- {
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_TIMESTAMP:
- {
- MYSQL_TIME tmp;
- if (time_to_datetime_with_warn(current_thd, ltime, &tmp, 0))
- return (null_value= true);
- *ltime= tmp;
- if (field_type() == MYSQL_TYPE_DATE)
- datetime_to_date(ltime);
- return false;
- }
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_STRING: /* DATE_ADD, ADDTIME can return VARCHAR */
- return false;
- default:
- DBUG_ASSERT(0);
- return (null_value= true);
- }
-
-date_or_datetime_value:
- /* Convert DATE or DATETIME to TIME, DATE, or DATETIME */
- switch (field_type())
- {
- case MYSQL_TYPE_TIME:
- datetime_to_time(ltime);
- return false;
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_TIMESTAMP:
- date_to_datetime(ltime);
- return false;
- case MYSQL_TYPE_DATE:
- datetime_to_date(ltime);
- return false;
- case MYSQL_TYPE_STRING: /* DATE_ADD, ADDTIME can return VARCHAR */
- return false;
- default:
- DBUG_ASSERT(0);
- return (null_value= true);
- }
- return false;
-}
-
-
-String *Item_temporal_hybrid_func::val_str_ascii(String *str)
-{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
-
- if (get_date(&ltime, 0) || fix_temporal_type(&ltime) ||
- (null_value= my_TIME_to_str(&ltime, str, decimals)))
- return (String *) 0;
-
- /* Check that the returned timestamp type matches to the function type */
- DBUG_ASSERT(field_type() == MYSQL_TYPE_STRING ||
- ltime.time_type == MYSQL_TIMESTAMP_NONE ||
- ltime.time_type == mysql_timestamp_type());
- return str;
-}
-
-
-bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_from_days::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
longlong value=args[0]->val_int();
if ((null_value= (args[0]->null_value ||
- ((fuzzy_date & TIME_NO_ZERO_DATE) && value == 0))))
+ ((fuzzydate & TIME_NO_ZERO_DATE) && value == 0))))
return true;
bzero(ltime, sizeof(MYSQL_TIME));
if (get_date_from_daynr((long) value, &ltime->year, &ltime->month,
@@ -1596,10 +1518,9 @@ void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
}
-bool Item_func_curdate::get_date(MYSQL_TIME *res,
- ulonglong fuzzy_date __attribute__((unused)))
+bool Item_func_curdate::get_date(THD *thd, MYSQL_TIME *res,
+ date_mode_t fuzzydate __attribute__((unused)))
{
- THD *thd= current_thd;
query_id_t query_id= thd->query_id;
/* Cache value for this query */
if (last_query_id != query_id)
@@ -1626,10 +1547,9 @@ bool Item_func_curtime::fix_fields(THD *thd, Item **items)
return Item_timefunc::fix_fields(thd, items);
}
-bool Item_func_curtime::get_date(MYSQL_TIME *res,
- ulonglong fuzzy_date __attribute__((unused)))
+bool Item_func_curtime::get_date(THD *thd, MYSQL_TIME *res,
+ date_mode_t fuzzydate __attribute__((unused)))
{
- THD *thd= current_thd;
query_id_t query_id= thd->query_id;
/* Cache value for this query */
if (last_query_id != query_id)
@@ -1700,7 +1620,7 @@ bool Item_func_now::fix_fields(THD *thd, Item **items)
func_name(), TIME_SECOND_PART_DIGITS);
return 1;
}
- return Item_temporal_func::fix_fields(thd, items);
+ return Item_datetimefunc::fix_fields(thd, items);
}
void Item_func_now::print(String *str, enum_query_type query_type)
@@ -1719,15 +1639,14 @@ int Item_func_now_local::save_in_field(Field *field, bool no_conversions)
{
THD *thd= field->get_thd();
my_time_t ts= thd->query_start();
- uint dec= MY_MIN(decimals, field->decimals());
- ulong sec_part= dec ? thd->query_start_sec_part() : 0;
- sec_part-= my_time_fraction_remainder(sec_part, dec);
+ ulong sec_part= decimals ? thd->query_start_sec_part() : 0;
+ sec_part-= my_time_fraction_remainder(sec_part, decimals);
field->set_notnull();
((Field_timestamp*)field)->store_TIME(ts, sec_part);
return 0;
}
else
- return Item_temporal_func::save_in_field(field, no_conversions);
+ return Item_datetimefunc::save_in_field(field, no_conversions);
}
@@ -1758,10 +1677,9 @@ void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
}
-bool Item_func_now::get_date(MYSQL_TIME *res,
- ulonglong fuzzy_date __attribute__((unused)))
+bool Item_func_now::get_date(THD *thd, MYSQL_TIME *res,
+ date_mode_t fuzzydate __attribute__((unused)))
{
- THD *thd= current_thd;
query_id_t query_id= thd->query_id;
/* Cache value for this query */
if (last_query_id != query_id)
@@ -1787,62 +1705,23 @@ void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
}
-bool Item_func_sysdate_local::get_date(MYSQL_TIME *res,
- ulonglong fuzzy_date __attribute__((unused)))
+bool Item_func_sysdate_local::get_date(THD *thd, MYSQL_TIME *res,
+ date_mode_t fuzzydate __attribute__((unused)))
{
- store_now_in_TIME(current_thd, res);
+ store_now_in_TIME(thd, res);
return 0;
}
-bool Item_func_sec_to_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_sec_to_time::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
- bool sign;
- ulonglong sec;
- ulong sec_part;
-
- bzero((char *)ltime, sizeof(*ltime));
- ltime->time_type= MYSQL_TIMESTAMP_TIME;
-
- sign= args[0]->get_seconds(&sec, &sec_part);
-
- if ((null_value= args[0]->null_value))
- return 1;
-
- ltime->neg= sign;
- if (sec > TIME_MAX_VALUE_SECONDS)
- goto overflow;
-
- DBUG_ASSERT(sec_part <= TIME_MAX_SECOND_PART);
-
- ltime->hour= (uint) (sec/3600);
- ltime->minute= (uint) (sec % 3600) /60;
- ltime->second= (uint) sec % 60;
- ltime->second_part= sec_part;
-
- return 0;
-
-overflow:
- /* use check_time_range() to set ltime to the max value depending on dec */
- int unused;
- char buf[100];
- String tmp(buf, sizeof(buf), &my_charset_bin), *err= args[0]->val_str(&tmp);
-
- ltime->hour= TIME_MAX_HOUR+1;
- check_time_range(ltime, decimals, &unused);
- if (!err)
- {
- ErrConvInteger err2(sec, unsigned_flag);
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &err2, MYSQL_TIMESTAMP_TIME, 0, NullS);
- }
- else
- {
- ErrConvString err2(err);
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &err2, MYSQL_TIMESTAMP_TIME, 0, NullS);
- }
- return 0;
+ VSec9 sec(thd, args[0], "seconds", LONGLONG_MAX);
+ if ((null_value= sec.is_null()))
+ return true;
+ sec.round(decimals, thd->temporal_round_mode());
+ if (sec.sec_to_time(ltime, decimals) && !sec.truncated())
+ sec.make_truncated_warning(thd, "seconds");
+ return false;
}
bool Item_func_date_format::fix_length_and_dec()
@@ -1998,8 +1877,10 @@ String *Item_func_date_format::val_str(String *str)
uint size;
const MY_LOCALE *lc= 0;
DBUG_ASSERT(fixed == 1);
-
- if ((null_value= args[0]->get_date(&l_time, is_time_format ? TIME_TIME_ONLY : 0)))
+ date_conv_mode_t mode= is_time_format ? TIME_TIME_ONLY : TIME_CONV_NONE;
+ THD *thd= current_thd;
+ if ((null_value= args[0]->get_date(thd, &l_time,
+ Temporal::Options(mode, thd))))
return 0;
if (!(format= args[1]->val_str(&format_buffer)) || !format->length())
@@ -2045,35 +1926,34 @@ bool Item_func_from_unixtime::fix_length_and_dec()
}
-bool Item_func_from_unixtime::get_date(MYSQL_TIME *ltime,
- ulonglong fuzzy_date __attribute__((unused)))
+bool Item_func_from_unixtime::get_date(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate __attribute__((unused)))
{
- bool sign;
- ulonglong sec;
- ulong sec_part;
-
bzero((char *)ltime, sizeof(*ltime));
ltime->time_type= MYSQL_TIMESTAMP_TIME;
- sign= args[0]->get_seconds(&sec, &sec_part);
+ VSec9 sec(thd, args[0], "unixtime", TIMESTAMP_MAX_VALUE);
+ DBUG_ASSERT(sec.sec() <= TIMESTAMP_MAX_VALUE);
- if (args[0]->null_value || sign || sec > TIMESTAMP_MAX_VALUE)
+ if (sec.is_null() || sec.truncated() || sec.neg())
return (null_value= 1);
- tz->gmt_sec_to_TIME(ltime, (my_time_t)sec);
+ sec.round(MY_MIN(decimals, TIME_SECOND_PART_DIGITS), thd->temporal_round_mode());
+ if (sec.sec() > TIMESTAMP_MAX_VALUE)
+ return (null_value= true); // Went out of range after rounding
- ltime->second_part= sec_part;
+ tz->gmt_sec_to_TIME(ltime, (my_time_t) sec.sec());
+ ltime->second_part= sec.usec();
return (null_value= 0);
}
-bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime,
- ulonglong fuzzy_date __attribute__((unused)))
+bool Item_func_convert_tz::get_date(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate __attribute__((unused)))
{
my_time_t my_time_tmp;
String str;
- THD *thd= current_thd;
if (!from_tz_cached)
{
@@ -2087,9 +1967,13 @@ bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime,
to_tz_cached= args[2]->const_item();
}
- if (from_tz==0 || to_tz==0 ||
- get_arg0_date(ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
- return (null_value= 1);
+ if ((null_value= (from_tz == 0 || to_tz == 0)))
+ return true;
+
+ Datetime::Options opt(TIME_NO_ZEROS, thd);
+ Datetime *dt= new(ltime) Datetime(thd, args[0], opt);
+ if ((null_value= !dt->is_valid_datetime()))
+ return true;
{
uint not_used;
@@ -2109,7 +1993,7 @@ bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime,
void Item_func_convert_tz::cleanup()
{
from_tz_cached= to_tz_cached= 0;
- Item_temporal_func::cleanup();
+ Item_datetimefunc::cleanup();
}
@@ -2140,81 +2024,44 @@ bool Item_date_add_interval::fix_length_and_dec()
MYSQL_TIME or DATETIME argument)
*/
arg0_field_type= args[0]->field_type();
- uint interval_dec= 0;
- if (int_type == INTERVAL_MICROSECOND ||
- (int_type >= INTERVAL_DAY_MICROSECOND &&
- int_type <= INTERVAL_SECOND_MICROSECOND))
- interval_dec= TIME_SECOND_PART_DIGITS;
- else if (int_type == INTERVAL_SECOND && args[1]->decimals > 0)
- interval_dec= MY_MIN(args[1]->decimals, TIME_SECOND_PART_DIGITS);
if (arg0_field_type == MYSQL_TYPE_DATETIME ||
arg0_field_type == MYSQL_TYPE_TIMESTAMP)
{
- uint dec= MY_MAX(args[0]->datetime_precision(), interval_dec);
- set_handler(&type_handler_datetime);
- fix_attributes_datetime(dec);
+ set_func_handler(&func_handler_date_add_interval_datetime);
}
else if (arg0_field_type == MYSQL_TYPE_DATE)
{
if (int_type <= INTERVAL_DAY || int_type == INTERVAL_YEAR_MONTH)
- {
- set_handler(&type_handler_newdate);
- fix_attributes_date();
- }
+ set_func_handler(&func_handler_date_add_interval_date);
else
- {
- set_handler(&type_handler_datetime2);
- fix_attributes_datetime(interval_dec);
- }
+ set_func_handler(&func_handler_date_add_interval_datetime);
}
else if (arg0_field_type == MYSQL_TYPE_TIME)
{
- uint dec= MY_MAX(args[0]->time_precision(), interval_dec);
if (int_type >= INTERVAL_DAY && int_type != INTERVAL_YEAR_MONTH)
- {
- set_handler(&type_handler_time2);
- fix_attributes_time(dec);
- }
+ set_func_handler(&func_handler_date_add_interval_time);
else
- {
- set_handler(&type_handler_datetime2);
- fix_attributes_datetime(dec);
- }
+ set_func_handler(&func_handler_date_add_interval_datetime_arg0_time);
}
else
{
- uint dec= MY_MAX(args[0]->datetime_precision(), interval_dec);
- set_handler(&type_handler_string);
- collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
- fix_char_length_temporal_not_fixed_dec(MAX_DATETIME_WIDTH, dec);
+ set_func_handler(&func_handler_date_add_interval_string);
}
maybe_null= true;
- return FALSE;
+ return m_func_handler->fix_length_and_dec(this);
}
-bool Item_date_add_interval::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Func_handler_date_add_interval_datetime_arg0_time::
+ get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
{
- INTERVAL interval;
-
- if (args[0]->get_date(ltime,
- field_type() == MYSQL_TYPE_TIME ?
- TIME_TIME_ONLY : 0) ||
- get_interval_value(args[1], int_type, &interval))
- return (null_value=1);
-
- if (ltime->time_type != MYSQL_TIMESTAMP_TIME &&
- check_date_with_warn(ltime, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE,
- MYSQL_TIMESTAMP_ERROR))
- return (null_value=1);
-
- if (date_sub_interval)
- interval.neg = !interval.neg;
-
- if (date_add_interval(ltime, int_type, interval))
- return (null_value=1);
- return (null_value= 0);
+ // time_expr + INTERVAL {YEAR|QUARTER|MONTH|WEEK|YEAR_MONTH}
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_DATETIME_FUNCTION_OVERFLOW,
+ ER_THD(thd, ER_DATETIME_FUNCTION_OVERFLOW), "time");
+ return (item->null_value= true);
}
@@ -2264,16 +2111,18 @@ void Item_extract::print(String *str, enum_query_type query_type)
bool Item_extract::fix_length_and_dec()
{
maybe_null=1; // If wrong date
+ uint32 daylen= args[0]->cmp_type() == TIME_RESULT ? 2 :
+ TIME_MAX_INTERVAL_DAY_CHAR_LENGTH;
switch (int_type) {
case INTERVAL_YEAR: set_date_length(4); break; // YYYY
case INTERVAL_YEAR_MONTH: set_date_length(6); break; // YYYYMM
case INTERVAL_QUARTER: set_date_length(2); break; // 1..4
case INTERVAL_MONTH: set_date_length(2); break; // MM
case INTERVAL_WEEK: set_date_length(2); break; // 0..52
- case INTERVAL_DAY: set_date_length(2); break; // DD
- case INTERVAL_DAY_HOUR: set_time_length(4); break; // DDhh
- case INTERVAL_DAY_MINUTE: set_time_length(6); break; // DDhhmm
- case INTERVAL_DAY_SECOND: set_time_length(8); break; // DDhhmmss
+ case INTERVAL_DAY: set_day_length(daylen); break; // DD
+ case INTERVAL_DAY_HOUR: set_day_length(daylen+2); break; // DDhh
+ case INTERVAL_DAY_MINUTE: set_day_length(daylen+4); break; // DDhhmm
+ case INTERVAL_DAY_SECOND: set_day_length(daylen+6); break; // DDhhmmss
case INTERVAL_HOUR: set_time_length(2); break; // hh
case INTERVAL_HOUR_MINUTE: set_time_length(4); break; // hhmm
case INTERVAL_HOUR_SECOND: set_time_length(6); break; // hhmmss
@@ -2281,7 +2130,7 @@ bool Item_extract::fix_length_and_dec()
case INTERVAL_MINUTE_SECOND: set_time_length(4); break; // mmss
case INTERVAL_SECOND: set_time_length(2); break; // ss
case INTERVAL_MICROSECOND: set_time_length(6); break; // ffffff
- case INTERVAL_DAY_MICROSECOND: set_time_length(14); break; // DDhhmmssffffff
+ case INTERVAL_DAY_MICROSECOND: set_time_length(daylen+12); break; // DDhhmmssffffff
case INTERVAL_HOUR_MICROSECOND: set_time_length(12); break; // hhmmssffffff
case INTERVAL_MINUTE_MICROSECOND: set_time_length(10); break; // mmssffffff
case INTERVAL_SECOND_MICROSECOND: set_time_length(8); break; // ssffffff
@@ -2291,69 +2140,46 @@ bool Item_extract::fix_length_and_dec()
}
-longlong Item_extract::val_int()
+uint Extract_source::week(THD *thd) const
{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME ltime;
+ DBUG_ASSERT(is_valid_extract_source());
uint year;
- ulong week_format;
- long neg;
- int is_time_flag = date_value ? 0 : TIME_TIME_ONLY;
-
- // Not using get_arg0_date to avoid automatic TIME to DATETIME conversion
- if ((null_value= args[0]->get_date(&ltime, is_time_flag)))
- return 0;
-
- neg= ltime.neg ? -1 : 1;
+ ulong week_format= current_thd->variables.default_week_format;
+ return calc_week(this, week_mode(week_format), &year);
+}
- DBUG_ASSERT(ltime.time_type != MYSQL_TIMESTAMP_TIME || ltime.day == 0);
- if (ltime.time_type == MYSQL_TIMESTAMP_TIME)
- time_to_daytime_interval(&ltime);
+longlong Item_extract::val_int()
+{
+ DBUG_ASSERT(fixed == 1);
+ THD *thd= current_thd;
+ Extract_source dt(thd, args[0], m_date_mode);
+ if ((null_value= !dt.is_valid_extract_source()))
+ return 0;
switch (int_type) {
- case INTERVAL_YEAR: return ltime.year;
- case INTERVAL_YEAR_MONTH: return ltime.year*100L+ltime.month;
- case INTERVAL_QUARTER: return (ltime.month+2)/3;
- case INTERVAL_MONTH: return ltime.month;
- case INTERVAL_WEEK:
- {
- week_format= current_thd->variables.default_week_format;
- return calc_week(&ltime, week_mode(week_format), &year);
- }
- case INTERVAL_DAY: return ltime.day;
- case INTERVAL_DAY_HOUR: return (long) (ltime.day*100L+ltime.hour)*neg;
- case INTERVAL_DAY_MINUTE: return (long) (ltime.day*10000L+
- ltime.hour*100L+
- ltime.minute)*neg;
- case INTERVAL_DAY_SECOND: return ((longlong) ltime.day*1000000L+
- (longlong) (ltime.hour*10000L+
- ltime.minute*100+
- ltime.second))*neg;
- case INTERVAL_HOUR: return (long) ltime.hour*neg;
- case INTERVAL_HOUR_MINUTE: return (long) (ltime.hour*100+ltime.minute)*neg;
- case INTERVAL_HOUR_SECOND: return (long) (ltime.hour*10000+ltime.minute*100+
- ltime.second)*neg;
- case INTERVAL_MINUTE: return (long) ltime.minute*neg;
- case INTERVAL_MINUTE_SECOND: return (long) (ltime.minute*100+ltime.second)*neg;
- case INTERVAL_SECOND: return (long) ltime.second*neg;
- case INTERVAL_MICROSECOND: return (long) ltime.second_part*neg;
- case INTERVAL_DAY_MICROSECOND: return (((longlong)ltime.day*1000000L +
- (longlong)ltime.hour*10000L +
- ltime.minute*100 +
- ltime.second)*1000000L +
- ltime.second_part)*neg;
- case INTERVAL_HOUR_MICROSECOND: return (((longlong)ltime.hour*10000L +
- ltime.minute*100 +
- ltime.second)*1000000L +
- ltime.second_part)*neg;
- case INTERVAL_MINUTE_MICROSECOND: return (((longlong)(ltime.minute*100+
- ltime.second))*1000000L+
- ltime.second_part)*neg;
- case INTERVAL_SECOND_MICROSECOND: return ((longlong)ltime.second*1000000L+
- ltime.second_part)*neg;
+ case INTERVAL_YEAR: return dt.year();
+ case INTERVAL_YEAR_MONTH: return dt.year_month();
+ case INTERVAL_QUARTER: return dt.quarter();
+ case INTERVAL_MONTH: return dt.month();
+ case INTERVAL_WEEK: return dt.week(thd);
+ case INTERVAL_DAY: return dt.day();
+ case INTERVAL_DAY_HOUR: return dt.day_hour();
+ case INTERVAL_DAY_MINUTE: return dt.day_minute();
+ case INTERVAL_DAY_SECOND: return dt.day_second();
+ case INTERVAL_HOUR: return dt.hour();
+ case INTERVAL_HOUR_MINUTE: return dt.hour_minute();
+ case INTERVAL_HOUR_SECOND: return dt.hour_second();
+ case INTERVAL_MINUTE: return dt.minute();
+ case INTERVAL_MINUTE_SECOND: return dt.minute_second();
+ case INTERVAL_SECOND: return dt.second();
+ case INTERVAL_MICROSECOND: return dt.microsecond();
+ case INTERVAL_DAY_MICROSECOND: return dt.day_microsecond();
+ case INTERVAL_HOUR_MICROSECOND: return dt.hour_microsecond();
+ case INTERVAL_MINUTE_MICROSECOND: return dt.minute_microsecond();
+ case INTERVAL_SECOND_MICROSECOND: return dt.second_microsecond();
case INTERVAL_LAST: DBUG_ASSERT(0); break; /* purecov: deadcode */
}
- return 0; // Impossible
+ return 0; // Impossible
}
bool Item_extract::eq(const Item *item, bool binary_cmp) const
@@ -2392,13 +2218,14 @@ bool Item_char_typecast::eq(const Item *item, bool binary_cmp) const
return 1;
}
-void Item_temporal_typecast::print(String *str, enum_query_type query_type)
+void Item_func::print_cast_temporal(String *str, enum_query_type query_type)
{
char buf[32];
str->append(STRING_WITH_LEN("cast("));
args[0]->print(str, query_type);
str->append(STRING_WITH_LEN(" as "));
- str->append(cast_type());
+ const Name name= type_handler()->name();
+ str->append(name.ptr(), name.length());
if (decimals && decimals != NOT_FIXED_DEC)
{
str->append('(');
@@ -2611,45 +2438,31 @@ void Item_char_typecast::fix_length_and_dec_internal(CHARSET_INFO *from_cs)
}
-bool Item_time_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_time_typecast::get_date(THD *thd, MYSQL_TIME *to, date_mode_t mode)
{
- Time tm(args[0], Time::Options_for_cast());
- if ((null_value= !tm.is_valid_time()))
- return true;
- tm.copy_to_mysql_time(ltime);
- if (decimals < TIME_SECOND_PART_DIGITS)
- my_time_trunc(ltime, decimals);
- return (fuzzy_date & TIME_TIME_ONLY) ? 0 :
- (null_value= check_date_with_warn(ltime, fuzzy_date,
- MYSQL_TIMESTAMP_ERROR));
+ Time *tm= new(to) Time(thd, args[0], Time::Options_for_cast(mode, thd),
+ MY_MIN(decimals, TIME_SECOND_PART_DIGITS));
+ return (null_value= !tm->is_valid_time());
}
-bool Item_date_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_date_typecast::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- fuzzy_date |= sql_mode_for_dates(current_thd);
- if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY))
- return 1;
-
- if (make_date_with_warn(ltime, fuzzy_date, MYSQL_TIMESTAMP_DATE))
- return (null_value= 1);
-
- return 0;
+ date_mode_t tmp= (fuzzydate | sql_mode_for_dates(thd)) & ~TIME_TIME_ONLY;
+ // Force truncation
+ Date *d= new(ltime) Date(thd, args[0], Date::Options(date_conv_mode_t(tmp)));
+ return (null_value= !d->is_valid_date());
}
-bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_datetime_typecast::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- fuzzy_date |= sql_mode_for_dates(current_thd);
- if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY))
- return 1;
-
- if (decimals < TIME_SECOND_PART_DIGITS)
- my_time_trunc(ltime, decimals);
-
- DBUG_ASSERT(ltime->time_type != MYSQL_TIMESTAMP_TIME);
- ltime->time_type= MYSQL_TIMESTAMP_DATETIME;
- return 0;
+ date_mode_t tmp= (fuzzydate | sql_mode_for_dates(thd)) & ~TIME_TIME_ONLY;
+ // Force rounding if the current sql_mode says so
+ Datetime::Options opt(date_conv_mode_t(tmp), thd);
+ Datetime *dt= new(ltime) Datetime(thd, args[0], opt,
+ MY_MIN(decimals, TIME_SECOND_PART_DIGITS));
+ return (null_value= !dt->is_valid_datetime());
}
@@ -2664,20 +2477,17 @@ bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
0099-12-31
*/
-bool Item_func_makedate::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_makedate::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
- long daynr= (long) args[1]->val_int();
- long year= (long) args[0]->val_int();
- long days;
+ long year, days, daynr= (long) args[1]->val_int();
- if (args[0]->null_value || args[1]->null_value ||
- year < 0 || year > 9999 || daynr <= 0)
+ VYear vyear(args[0]);
+ if (vyear.is_null() || args[1]->null_value || vyear.truncated() || daynr <= 0)
goto err;
- if (year < 100)
+ if ((year= (long) vyear.year()) < 100)
year= year_2000_handling(year);
-
days= calc_daynr(year,1,1) + daynr - 1;
if (get_date_from_daynr(days, &ltime->year, &ltime->month, &ltime->day))
goto err;
@@ -2716,101 +2526,24 @@ bool Item_func_add_time::fix_length_and_dec()
arg0_field_type= args[0]->field_type();
if (arg0_field_type == MYSQL_TYPE_DATE ||
arg0_field_type == MYSQL_TYPE_DATETIME ||
- arg0_field_type == MYSQL_TYPE_TIMESTAMP ||
- is_date)
+ arg0_field_type == MYSQL_TYPE_TIMESTAMP)
{
- uint dec= MY_MAX(args[0]->datetime_precision(), args[1]->time_precision());
- set_handler(&type_handler_datetime2);
- fix_attributes_datetime(dec);
+ set_func_handler(sign > 0 ? &func_handler_add_time_datetime_add :
+ &func_handler_add_time_datetime_sub);
}
else if (arg0_field_type == MYSQL_TYPE_TIME)
{
- uint dec= MY_MAX(args[0]->time_precision(), args[1]->time_precision());
- set_handler(&type_handler_time2);
- fix_attributes_time(dec);
+ set_func_handler(sign > 0 ? &func_handler_add_time_time_add :
+ &func_handler_add_time_time_sub);
}
else
{
- uint dec= MY_MAX(args[0]->decimals, args[1]->decimals);
- set_handler(&type_handler_string);
- collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
- fix_char_length_temporal_not_fixed_dec(MAX_DATETIME_WIDTH, dec);
+ set_func_handler(sign > 0 ? &func_handler_add_time_string_add :
+ &func_handler_add_time_string_sub);
}
- maybe_null= true;
- return FALSE;
-}
-/**
- ADDTIME(t,a) and SUBTIME(t,a) are time functions that calculate a
- time/datetime value
-
- t: time_or_datetime_expression
- a: time_expression
-
- Result: Time value or datetime value
-*/
-
-bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
-{
- DBUG_ASSERT(fixed == 1);
- MYSQL_TIME l_time1, l_time2;
- bool is_time= 0;
- long days, microseconds;
- longlong seconds;
- int l_sign= sign;
-
- if (Item_func_add_time::field_type() == MYSQL_TYPE_DATETIME)
- {
- // TIMESTAMP function OR the first argument is DATE/DATETIME/TIMESTAMP
- if (get_arg0_date(&l_time1, 0) ||
- args[1]->get_time(&l_time2) ||
- l_time1.time_type == MYSQL_TIMESTAMP_TIME ||
- l_time2.time_type != MYSQL_TIMESTAMP_TIME)
- return (null_value= 1);
- }
- else
- {
- // ADDTIME function AND the first argument is TIME
- if (args[0]->get_time(&l_time1) ||
- args[1]->get_time(&l_time2) ||
- l_time2.time_type != MYSQL_TIMESTAMP_TIME)
- return (null_value= 1);
- is_time= (l_time1.time_type == MYSQL_TIMESTAMP_TIME);
- }
- if (l_time1.neg != l_time2.neg)
- l_sign= -l_sign;
-
- bzero(ltime, sizeof(*ltime));
-
- ltime->neg= calc_time_diff(&l_time1, &l_time2, -l_sign,
- &seconds, &microseconds);
-
- /*
- If first argument was negative and diff between arguments
- is non-zero we need to swap sign to get proper result.
- */
- if (l_time1.neg && (seconds || microseconds))
- ltime->neg= 1-ltime->neg; // Swap sign of result
-
- if (!is_time && ltime->neg)
- return (null_value= 1);
-
- days= (long) (seconds / SECONDS_IN_24H);
-
- calc_time_from_sec(ltime, (long)(seconds % SECONDS_IN_24H), microseconds);
-
- ltime->time_type= is_time ? MYSQL_TIMESTAMP_TIME : MYSQL_TIMESTAMP_DATETIME;
-
- if (!is_time)
- {
- if (get_date_from_daynr(days,&ltime->year,&ltime->month,&ltime->day) ||
- !ltime->day)
- return (null_value= 1);
- return (null_value= 0);
- }
-
- ltime->hour+= days*24;
- return (null_value= adjust_time_range_with_warn(ltime, decimals));
+ maybe_null= true;
+ return m_func_handler->fix_length_and_dec(this);
}
@@ -2822,7 +2555,7 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
Result: Time value
*/
-bool Item_func_timediff::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_timediff::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
int l_sign= 1;
@@ -2830,55 +2563,47 @@ bool Item_func_timediff::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
ErrConvTime str(&l_time3);
/* the following may be true in, for example, date_add(timediff(...), ... */
- if (fuzzy_date & TIME_NO_ZERO_IN_DATE)
+ if (fuzzydate & TIME_NO_ZERO_IN_DATE)
return (null_value= 1);
- if (args[0]->get_time(&l_time1) ||
- args[1]->get_time(&l_time2) ||
+ if (args[0]->get_time(thd, &l_time1) ||
+ args[1]->get_time(thd, &l_time2) ||
l_time1.time_type != l_time2.time_type)
return (null_value= 1);
if (l_time1.neg != l_time2.neg)
l_sign= -l_sign;
- if (calc_time_diff(&l_time1, &l_time2, l_sign, &l_time3, fuzzy_date))
+ if (calc_time_diff(&l_time1, &l_time2, l_sign, &l_time3, fuzzydate))
return (null_value= 1);
*ltime= l_time3;
- return (null_value= adjust_time_range_with_warn(ltime, decimals));
+ return (null_value= adjust_time_range_with_warn(thd, ltime, decimals));
}
+
/**
MAKETIME(h,m,s) is a time function that calculates a time value
from the total number of hours, minutes, and seconds.
Result: Time value
*/
-bool Item_func_maketime::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_maketime::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(fixed == 1);
Longlong_hybrid hour(args[0]->val_int(), args[0]->unsigned_flag);
longlong minute= args[1]->val_int();
- ulonglong second;
- ulong microsecond;
- bool neg= args[2]->get_seconds(&second, &microsecond);
+ VSec9 sec(thd, args[2], "seconds", 59);
- if (args[0]->null_value || args[1]->null_value || args[2]->null_value ||
- minute < 0 || minute > 59 || neg || second > 59)
+ DBUG_ASSERT(sec.is_null() || sec.sec() <= 59);
+ if (args[0]->null_value || args[1]->null_value || sec.is_null() ||
+ minute < 0 || minute > 59 || sec.neg() || sec.truncated())
return (null_value= 1);
- bzero(ltime, sizeof(*ltime));
- ltime->time_type= MYSQL_TIMESTAMP_TIME;
- ltime->neg= hour.neg();
-
- if (hour.abs() <= TIME_MAX_HOUR)
- {
- ltime->hour= (uint) hour.abs();
- ltime->minute= (uint) minute;
- ltime->second= (uint) second;
- ltime->second_part= microsecond;
- }
- else
+ int warn;
+ new(ltime) Time(&warn, hour.neg(), hour.abs(), (uint) minute, sec,
+ thd->temporal_round_mode(), decimals);
+ if (warn)
{
// use check_time_range() to set ltime to the max value depending on dec
int unused;
@@ -2886,10 +2611,10 @@ bool Item_func_maketime::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
check_time_range(ltime, decimals, &unused);
char buf[28];
char *ptr= longlong10_to_str(hour.value(), buf, hour.is_unsigned() ? 10 : -10);
- int len = (int)(ptr - buf) + sprintf(ptr, ":%02u:%02u", (uint)minute, (uint)second);
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- buf, len, MYSQL_TIMESTAMP_TIME,
- 0, NullS);
+ int len = (int)(ptr - buf) + sprintf(ptr, ":%02u:%02u",
+ (uint) minute, (uint) sec.sec());
+ ErrConvString err(buf, len, &my_charset_bin);
+ thd->push_warning_truncated_wrong_value("time", err.ptr());
}
return (null_value= 0);
@@ -2907,7 +2632,8 @@ bool Item_func_maketime::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
longlong Item_func_microsecond::val_int()
{
DBUG_ASSERT(fixed == 1);
- Time tm(args[0], Time::Options_for_cast());
+ THD *thd= current_thd;
+ Time tm(thd, args[0], Time::Options_for_cast(thd));
return ((null_value= !tm.is_valid_time())) ?
0 : tm.get_mysql_time()->second_part;
}
@@ -2916,17 +2642,17 @@ longlong Item_func_microsecond::val_int()
longlong Item_func_timestamp_diff::val_int()
{
MYSQL_TIME ltime1, ltime2;
- longlong seconds;
- long microseconds;
+ ulonglong seconds;
+ ulong microseconds;
long months= 0;
int neg= 1;
THD *thd= current_thd;
- ulonglong fuzzydate= TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE;
+ Datetime::Options opt(TIME_NO_ZEROS, thd);
null_value= 0;
- if (Datetime(thd, args[0], fuzzydate).copy_to_mysql_time(&ltime1) ||
- Datetime(thd, args[1], fuzzydate).copy_to_mysql_time(&ltime2))
+ if (Datetime(thd, args[0], opt).copy_to_mysql_time(&ltime1) ||
+ Datetime(thd, args[1], opt).copy_to_mysql_time(&ltime2))
goto null_date;
if (calc_time_diff(&ltime2,&ltime1, 1,
@@ -2996,21 +2722,21 @@ longlong Item_func_timestamp_diff::val_int()
case INTERVAL_MONTH:
return months*neg;
case INTERVAL_WEEK:
- return seconds / SECONDS_IN_24H / 7L * neg;
+ return ((longlong) (seconds / SECONDS_IN_24H / 7L)) * neg;
case INTERVAL_DAY:
- return seconds / SECONDS_IN_24H * neg;
+ return ((longlong) (seconds / SECONDS_IN_24H)) * neg;
case INTERVAL_HOUR:
- return seconds/3600L*neg;
+ return ((longlong) (seconds / 3600L)) * neg;
case INTERVAL_MINUTE:
- return seconds/60L*neg;
+ return ((longlong) (seconds / 60L)) * neg;
case INTERVAL_SECOND:
- return seconds*neg;
+ return ((longlong) seconds) * neg;
case INTERVAL_MICROSECOND:
/*
In MySQL difference between any two valid datetime values
in microseconds fits into longlong.
*/
- return (seconds*1000000L+microseconds)*neg;
+ return ((longlong) ((ulonglong) seconds * 1000000L + microseconds)) * neg;
default:
break;
}
@@ -3140,15 +2866,10 @@ void Item_func_get_format::print(String *str, enum_query_type query_type)
specifiers supported by extract_date_time() function.
@return
- One of date_time_format_types values:
- - DATE_TIME_MICROSECOND
- - DATE_TIME
- - DATE_ONLY
- - TIME_MICROSECOND
- - TIME_ONLY
+ A function handler corresponding the given format
*/
-static date_time_format_types
+static const Item_handled_func::Handler *
get_date_time_result_type(const char *format, uint length)
{
const char *time_part_frms= "HISThiklrs";
@@ -3175,21 +2896,21 @@ get_date_time_result_type(const char *format, uint length)
frac_second_used implies time_part_used, and thus we already
have all types of date-time components and can end our search.
*/
- return DATE_TIME_MICROSECOND;
+ return &func_handler_str_to_date_datetime_usec;
}
}
}
/* We don't have all three types of date-time components */
if (frac_second_used)
- return TIME_MICROSECOND;
+ return &func_handler_str_to_date_time_usec;
if (time_part_used)
{
if (date_part_used)
- return DATE_TIME;
- return TIME_ONLY;
+ return &func_handler_str_to_date_datetime_sec;
+ return &func_handler_str_to_date_time_sec;
}
- return DATE_ONLY;
+ return &func_handler_str_to_date_date;
}
@@ -3209,56 +2930,27 @@ bool Item_func_str_to_date::fix_length_and_dec()
internal_charset= &my_charset_utf8mb4_general_ci;
maybe_null= true;
- set_handler(&type_handler_datetime2);
- fix_attributes_datetime(TIME_SECOND_PART_DIGITS);
+ set_func_handler(&func_handler_str_to_date_datetime_usec);
if ((const_item= args[1]->const_item()))
{
- char format_buff[64];
- String format_str(format_buff, sizeof(format_buff), &my_charset_bin);
+ StringBuffer<64> format_str;
String *format= args[1]->val_str(&format_str, &format_converter,
internal_charset);
- decimals= 0;
if (!args[1]->null_value)
- {
- date_time_format_types cached_format_type=
- get_date_time_result_type(format->ptr(), format->length());
- switch (cached_format_type) {
- case DATE_ONLY:
- set_handler(&type_handler_newdate);
- fix_attributes_date();
- break;
- case TIME_MICROSECOND:
- set_handler(&type_handler_time2);
- fix_attributes_time(TIME_SECOND_PART_DIGITS);
- break;
- case TIME_ONLY:
- set_handler(&type_handler_time2);
- fix_attributes_time(0);
- break;
- case DATE_TIME_MICROSECOND:
- set_handler(&type_handler_datetime2);
- fix_attributes_datetime(TIME_SECOND_PART_DIGITS);
- break;
- case DATE_TIME:
- set_handler(&type_handler_datetime2);
- fix_attributes_datetime(0);
- break;
- }
- }
+ set_func_handler(get_date_time_result_type(format->ptr(), format->length()));
}
- cached_timestamp_type= mysql_timestamp_type();
- return FALSE;
+ return m_func_handler->fix_length_and_dec(this);
}
-bool Item_func_str_to_date::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_str_to_date::get_date_common(THD *thd, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
+ timestamp_type tstype)
{
DATE_TIME_FORMAT date_time_format;
- char val_buff[64], format_buff[64];
- String val_string(val_buff, sizeof(val_buff), &my_charset_bin), *val;
- String format_str(format_buff, sizeof(format_buff), &my_charset_bin),
- *format;
+ StringBuffer<64> val_string, format_str;
+ String *val, *format;
val= args[0]->val_str(&val_string, &subject_converter, internal_charset);
format= args[1]->val_str(&format_str, &format_converter, internal_charset);
@@ -3267,29 +2959,22 @@ bool Item_func_str_to_date::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
date_time_format.format.str= (char*) format->ptr();
date_time_format.format.length= format->length();
- if (extract_date_time(&date_time_format, val->ptr(), val->length(),
- ltime, cached_timestamp_type, 0, "datetime",
- fuzzy_date | sql_mode_for_dates(current_thd)))
+ if (extract_date_time(thd, &date_time_format, val->ptr(), val->length(),
+ ltime, tstype, 0, "datetime",
+ date_conv_mode_t(fuzzydate) |
+ sql_mode_for_dates(thd)))
return (null_value=1);
- if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day)
- {
- /*
- Day part for time type can be nonzero value and so
- we should add hours from day part to hour part to
- keep valid time value.
- */
- ltime->hour+= ltime->day*24;
- ltime->day= 0;
- }
return (null_value= 0);
}
-bool Item_func_last_day::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
+bool Item_func_last_day::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY) ||
- (ltime->month == 0))
- return (null_value=1);
+ Datetime::Options opt(date_conv_mode_t(fuzzydate & ~TIME_TIME_ONLY),
+ time_round_mode_t(fuzzydate));
+ Datetime *d= new(ltime) Datetime(thd, args[0], opt);
+ if ((null_value= (!d->is_valid_datetime() || ltime->month == 0)))
+ return true;
uint month_idx= ltime->month-1;
ltime->day= days_in_month[month_idx];
if ( month_idx == 1 && calc_days_in_year(ltime->year) == 366)
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 7aacdec85e0..d9ab45ed46d 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -25,13 +25,9 @@
class MY_LOCALE;
-enum date_time_format_types
-{
- TIME_ONLY= 0, TIME_MICROSECOND, DATE_ONLY, DATE_TIME, DATE_TIME_MICROSECOND
-};
-
-bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval);
+bool get_interval_value(THD *thd, Item *args,
+ interval_type int_type, INTERVAL *interval);
class Item_long_func_date_field: public Item_long_func
@@ -186,9 +182,9 @@ public:
str->set(nr, collation.collation);
return str;
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return get_date_from_int(ltime, fuzzydate);
+ return get_date_from_int(thd, ltime, fuzzydate);
}
const char *func_name() const { return "month"; }
const Type_handler *type_handler() const { return &type_handler_long; }
@@ -459,9 +455,9 @@ public:
{
return (odbc_type ? "dayofweek" : "weekday");
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
const Type_handler *type_handler() const { return &type_handler_long; }
bool fix_length_and_dec()
@@ -516,7 +512,7 @@ public:
}
double real_op() { DBUG_ASSERT(0); return 0; }
String *str_op(String *str) { DBUG_ASSERT(0); return 0; }
- bool date_op(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool date_op(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
DBUG_ASSERT(0);
return true;
@@ -552,7 +548,8 @@ public:
}
bool fix_length_and_dec()
{
- fix_length_and_dec_generic(arg_count ? args[0]->datetime_precision() : 0);
+ fix_length_and_dec_generic(arg_count ?
+ args[0]->datetime_precision(current_thd) : 0);
return FALSE;
}
longlong int_op();
@@ -576,7 +573,7 @@ public:
}
bool fix_length_and_dec()
{
- fix_length_and_dec_generic(args[0]->time_precision());
+ fix_length_and_dec_generic(args[0]->time_precision(current_thd));
return FALSE;
}
longlong int_op();
@@ -586,66 +583,17 @@ public:
};
-class Item_temporal_func: public Item_func
-{
-public:
- Item_temporal_func(THD *thd): Item_func(thd) {}
- Item_temporal_func(THD *thd, Item *a): Item_func(thd, a) {}
- Item_temporal_func(THD *thd, Item *a, Item *b): Item_func(thd, a, b) {}
- Item_temporal_func(THD *thd, Item *a, Item *b, Item *c): Item_func(thd, a, b, c) {}
- String *val_str(String *str);
- longlong val_int() { return val_int_from_date(); }
- double val_real() { return val_real_from_date(); }
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date) { DBUG_ASSERT(0); return 1; }
- my_decimal *val_decimal(my_decimal *decimal_value)
- { return val_decimal_from_date(decimal_value); }
-};
-
-
-/**
- Abstract class for functions returning TIME, DATE, DATETIME or string values,
- whose data type depends on parameters and is set at fix_fields time.
-*/
-class Item_temporal_hybrid_func: public Item_hybrid_func
-{
-protected:
- String ascii_buf; // Conversion buffer
-public:
- Item_temporal_hybrid_func(THD *thd, Item *a, Item *b):
- Item_hybrid_func(thd, a, b) {}
-
- longlong val_int() { return val_int_from_date(); }
- double val_real() { return val_real_from_date(); }
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date)= 0;
- my_decimal *val_decimal(my_decimal *decimal_value)
- { return val_decimal_from_date(decimal_value); }
-
- /**
- Fix the returned timestamp to match field_type(),
- which is important for val_str().
- */
- bool fix_temporal_type(MYSQL_TIME *ltime);
- /**
- Return string value in ASCII character set.
- */
- String *val_str_ascii(String *str);
- /**
- Return string value in @@character_set_connection.
- */
- String *val_str(String *str)
- {
- return val_str_from_val_str_ascii(str, &ascii_buf);
- }
-};
-
-
-class Item_datefunc :public Item_temporal_func
+class Item_datefunc :public Item_func
{
public:
- Item_datefunc(THD *thd): Item_temporal_func(thd) { }
- Item_datefunc(THD *thd, Item *a): Item_temporal_func(thd, a) { }
- Item_datefunc(THD *thd, Item *a, Item *b): Item_temporal_func(thd, a, b) { }
+ Item_datefunc(THD *thd): Item_func(thd) { }
+ Item_datefunc(THD *thd, Item *a): Item_func(thd, a) { }
+ Item_datefunc(THD *thd, Item *a, Item *b): Item_func(thd, a, b) { }
const Type_handler *type_handler() const { return &type_handler_newdate; }
+ longlong val_int() { return Date(this).to_longlong(); }
+ double val_real() { return Date(this).to_double(); }
+ String *val_str(String *to) { return Date(this).to_string(to); }
+ my_decimal *val_decimal(my_decimal *to) { return Date(this).to_decimal(to); }
bool fix_length_and_dec()
{
fix_attributes_date();
@@ -655,26 +603,34 @@ public:
};
-class Item_timefunc :public Item_temporal_func
+class Item_timefunc :public Item_func
{
public:
- Item_timefunc(THD *thd): Item_temporal_func(thd) {}
- Item_timefunc(THD *thd, Item *a): Item_temporal_func(thd, a) {}
- Item_timefunc(THD *thd, Item *a, Item *b): Item_temporal_func(thd, a, b) {}
- Item_timefunc(THD *thd, Item *a, Item *b, Item *c):
- Item_temporal_func(thd, a, b ,c) {}
+ Item_timefunc(THD *thd): Item_func(thd) {}
+ Item_timefunc(THD *thd, Item *a): Item_func(thd, a) {}
+ Item_timefunc(THD *thd, Item *a, Item *b): Item_func(thd, a, b) {}
+ Item_timefunc(THD *thd, Item *a, Item *b, Item *c): Item_func(thd, a, b ,c) {}
const Type_handler *type_handler() const { return &type_handler_time2; }
+ longlong val_int() { return Time(this).to_longlong(); }
+ double val_real() { return Time(this).to_double(); }
+ String *val_str(String *to) { return Time(this).to_string(to, decimals); }
+ my_decimal *val_decimal(my_decimal *to) { return Time(this).to_decimal(to); }
};
-class Item_datetimefunc :public Item_temporal_func
+class Item_datetimefunc :public Item_func
{
public:
- Item_datetimefunc(THD *thd): Item_temporal_func(thd) {}
- Item_datetimefunc(THD *thd, Item *a): Item_temporal_func(thd, a) {}
+ Item_datetimefunc(THD *thd): Item_func(thd) {}
+ Item_datetimefunc(THD *thd, Item *a): Item_func(thd, a) {}
+ Item_datetimefunc(THD *thd, Item *a, Item *b): Item_func(thd, a, b) {}
Item_datetimefunc(THD *thd, Item *a, Item *b, Item *c):
- Item_temporal_func(thd, a, b ,c) {}
+ Item_func(thd, a, b ,c) {}
const Type_handler *type_handler() const { return &type_handler_datetime2; }
+ longlong val_int() { return Datetime(this).to_longlong(); }
+ double val_real() { return Datetime(this).to_double(); }
+ String *val_str(String *to) { return Datetime(this).to_string(to, decimals); }
+ my_decimal *val_decimal(my_decimal *to) { return Datetime(this).to_decimal(to); }
};
@@ -689,7 +645,7 @@ public:
{ decimals= dec; }
bool fix_fields(THD *, Item **);
bool fix_length_and_dec() { fix_attributes_time(decimals); return FALSE; }
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
/*
Abstract method that defines which time zone is used for conversion.
Converts time current time in my_time_t representation to broken-down
@@ -734,7 +690,7 @@ class Item_func_curdate :public Item_datefunc
MYSQL_TIME ltime;
public:
Item_func_curdate(THD *thd): Item_datefunc(thd), last_query_id(0) {}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0;
bool check_vcol_func_processor(void *arg)
{
@@ -777,7 +733,7 @@ public:
bool fix_fields(THD *, Item **);
bool fix_length_and_dec()
{ fix_attributes_datetime(decimals); return FALSE;}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
virtual void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)=0;
bool check_vcol_func_processor(void *arg)
{
@@ -832,7 +788,7 @@ public:
bool const_item() const { return 0; }
const char *func_name() const { return "sysdate"; }
void store_now_in_TIME(THD *thd, MYSQL_TIME *now_time);
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
table_map used_tables() const { return RAND_TABLE_BIT; }
bool check_vcol_func_processor(void *arg)
{
@@ -852,7 +808,7 @@ class Item_func_from_days :public Item_datefunc
public:
Item_func_from_days(THD *thd, Item *a): Item_datefunc(thd, a) {}
const char *func_name() const { return "from_days"; }
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
bool check_partition_func_processor(void *int_arg) {return FALSE;}
bool check_vcol_func_processor(void *arg) { return FALSE;}
bool check_valid_arguments_processor(void *int_arg)
@@ -917,7 +873,7 @@ class Item_func_from_unixtime :public Item_datetimefunc
Item_func_from_unixtime(THD *thd, Item *a): Item_datetimefunc(thd, a) {}
const char *func_name() const { return "from_unixtime"; }
bool fix_length_and_dec();
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_from_unixtime>(thd, this); }
};
@@ -958,11 +914,11 @@ class Item_func_convert_tz :public Item_datetimefunc
const char *func_name() const { return "convert_tz"; }
bool fix_length_and_dec()
{
- fix_attributes_datetime(args[0]->datetime_precision());
+ fix_attributes_datetime(args[0]->datetime_precision(current_thd));
maybe_null= true;
return FALSE;
}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
void cleanup();
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_convert_tz>(thd, this); }
@@ -975,7 +931,7 @@ class Item_func_sec_to_time :public Item_timefunc
{ return args[0]->check_type_can_return_decimal(func_name()); }
public:
Item_func_sec_to_time(THD *thd, Item *item): Item_timefunc(thd, item) {}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
bool fix_length_and_dec()
{
fix_attributes_time(args[0]->decimals);
@@ -988,18 +944,17 @@ public:
};
-class Item_date_add_interval :public Item_temporal_hybrid_func
+class Item_date_add_interval :public Item_handled_func
{
public:
const interval_type int_type; // keep it public
const bool date_sub_interval; // keep it public
Item_date_add_interval(THD *thd, Item *a, Item *b, interval_type type_arg,
bool neg_arg):
- Item_temporal_hybrid_func(thd, a, b),int_type(type_arg),
+ Item_handled_func(thd, a, b), int_type(type_arg),
date_sub_interval(neg_arg) {}
const char *func_name() const { return "date_add_interval"; }
bool fix_length_and_dec();
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
bool eq(const Item *item, bool binary_cmp) const;
void print(String *str, enum_query_type query_type);
enum precedence precedence() const { return ADDINTERVAL_PRECEDENCE; }
@@ -1009,9 +964,17 @@ public:
};
-class Item_extract :public Item_int_func
+class Item_extract :public Item_int_func,
+ public Type_handler_hybrid_field_type
{
- bool date_value;
+ date_mode_t m_date_mode;
+ const Type_handler_int_result *handler_by_length(uint32 length,
+ uint32 threashold)
+ {
+ if (length >= threashold)
+ return &type_handler_longlong;
+ return &type_handler_long;
+ }
void set_date_length(uint32 length)
{
/*
@@ -1020,48 +983,34 @@ class Item_extract :public Item_int_func
because all around the code we assume that max_length is sign inclusive.
Another options is to set unsigned_flag to "true".
*/
- max_length= length; //QQ: see above
- date_value= true;
+ set_handler(handler_by_length(max_length= length, 10)); // QQ: see above
+ m_date_mode= date_mode_t(0);
+ }
+ void set_day_length(uint32 length)
+ {
+ /*
+ Units starting with DAY can be negative:
+ EXTRACT(DAY FROM '-24:00:00') -> -1
+ */
+ set_handler(handler_by_length(max_length= length + 1/*sign*/, 11));
+ m_date_mode= Temporal::Options(TIME_INTERVAL_DAY, current_thd);
}
void set_time_length(uint32 length)
{
- max_length= length + 1/*sign*/;
- date_value= false;
+ set_handler(handler_by_length(max_length= length + 1/*sign*/, 11));
+ m_date_mode= Temporal::Options(TIME_INTERVAL_hhmmssff, current_thd);
}
public:
const interval_type int_type; // keep it public
Item_extract(THD *thd, interval_type type_arg, Item *a):
- Item_int_func(thd, a), int_type(type_arg) {}
+ Item_int_func(thd, a),
+ Type_handler_hybrid_field_type(&type_handler_longlong),
+ m_date_mode(date_mode_t(0)),
+ int_type(type_arg)
+ { }
const Type_handler *type_handler() const
{
- switch (int_type) {
- case INTERVAL_YEAR:
- case INTERVAL_YEAR_MONTH:
- case INTERVAL_QUARTER:
- case INTERVAL_MONTH:
- case INTERVAL_WEEK:
- case INTERVAL_DAY:
- case INTERVAL_DAY_HOUR:
- case INTERVAL_DAY_MINUTE:
- case INTERVAL_DAY_SECOND:
- case INTERVAL_HOUR:
- case INTERVAL_HOUR_MINUTE:
- case INTERVAL_HOUR_SECOND:
- case INTERVAL_MINUTE:
- case INTERVAL_MINUTE_SECOND:
- case INTERVAL_SECOND:
- case INTERVAL_MICROSECOND:
- case INTERVAL_SECOND_MICROSECOND:
- return &type_handler_long;
- case INTERVAL_DAY_MICROSECOND:
- case INTERVAL_HOUR_MICROSECOND:
- case INTERVAL_MINUTE_MICROSECOND:
- return &type_handler_longlong;
- case INTERVAL_LAST:
- break;
- }
- DBUG_ASSERT(0);
- return &type_handler_longlong;
+ return Type_handler_hybrid_field_type::type_handler();
}
longlong val_int();
enum Functype functype() const { return EXTRACT_FUNC; }
@@ -1131,6 +1080,9 @@ class Item_char_typecast :public Item_str_func
void check_truncation_with_warn(String *src, size_t dstlen);
void fix_length_and_dec_internal(CHARSET_INFO *fromcs);
public:
+ // Methods used by ColumnStore
+ uint get_cast_length() const { return cast_length; }
+public:
Item_char_typecast(THD *thd, Item *a, uint length_arg, CHARSET_INFO *cs_arg):
Item_str_func(thd, a), cast_length(length_arg), cast_cs(cs_arg),
m_suppress_warning_to_error_escalation(false) {}
@@ -1157,22 +1109,34 @@ public:
};
-class Item_temporal_typecast: public Item_temporal_func
+class Item_interval_DDhhmmssff_typecast :public Item_char_typecast
{
+ uint m_fsp;
public:
- Item_temporal_typecast(THD *thd, Item *a): Item_temporal_func(thd, a) {}
- virtual const char *cast_type() const = 0;
- void print(String *str, enum_query_type query_type);
+ Item_interval_DDhhmmssff_typecast(THD *thd, Item *a, uint fsp)
+ :Item_char_typecast(thd, a,Interval_DDhhmmssff::max_char_length(fsp),
+ &my_charset_latin1),
+ m_fsp(fsp)
+ { }
+ String *val_str(String *to)
+ {
+ Interval_DDhhmmssff it(current_thd, args[0], m_fsp);
+ null_value= !it.is_valid_interval_DDhhmmssff();
+ return it.to_string(to, m_fsp);
+ }
};
-class Item_date_typecast :public Item_temporal_typecast
+
+class Item_date_typecast :public Item_datefunc
{
public:
- Item_date_typecast(THD *thd, Item *a): Item_temporal_typecast(thd, a) {}
+ Item_date_typecast(THD *thd, Item *a): Item_datefunc(thd, a) {}
const char *func_name() const { return "cast_as_date"; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
- const char *cast_type() const { return "date"; }
- const Type_handler *type_handler() const { return &type_handler_newdate; }
+ void print(String *str, enum_query_type query_type)
+ {
+ print_cast_temporal(str, query_type);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool fix_length_and_dec()
{
return args[0]->type_handler()->Item_date_typecast_fix_length_and_dec(this);
@@ -1182,15 +1146,17 @@ public:
};
-class Item_time_typecast :public Item_temporal_typecast
+class Item_time_typecast :public Item_timefunc
{
public:
Item_time_typecast(THD *thd, Item *a, uint dec_arg):
- Item_temporal_typecast(thd, a) { decimals= dec_arg; }
+ Item_timefunc(thd, a) { decimals= dec_arg; }
const char *func_name() const { return "cast_as_time"; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
- const char *cast_type() const { return "time"; }
- const Type_handler *type_handler() const { return &type_handler_time2; }
+ void print(String *str, enum_query_type query_type)
+ {
+ print_cast_temporal(str, query_type);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool fix_length_and_dec()
{
return args[0]->type_handler()->
@@ -1201,15 +1167,17 @@ public:
};
-class Item_datetime_typecast :public Item_temporal_typecast
+class Item_datetime_typecast :public Item_datetimefunc
{
public:
Item_datetime_typecast(THD *thd, Item *a, uint dec_arg):
- Item_temporal_typecast(thd, a) { decimals= dec_arg; }
+ Item_datetimefunc(thd, a) { decimals= dec_arg; }
const char *func_name() const { return "cast_as_datetime"; }
- const char *cast_type() const { return "datetime"; }
- const Type_handler *type_handler() const { return &type_handler_datetime2; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ void print(String *str, enum_query_type query_type)
+ {
+ print_cast_temporal(str, query_type);
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
bool fix_length_and_dec()
{
return args[0]->type_handler()->
@@ -1228,31 +1196,76 @@ public:
Item_func_makedate(THD *thd, Item *a, Item *b):
Item_datefunc(thd, a, b) {}
const char *func_name() const { return "makedate"; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_makedate>(thd, this); }
};
-class Item_func_add_time :public Item_temporal_hybrid_func
+class Item_func_timestamp :public Item_datetimefunc
{
- const bool is_date;
- int sign;
-
+ bool check_arguments() const
+ {
+ return args[0]->check_type_can_return_date(func_name()) ||
+ args[1]->check_type_can_return_time(func_name());
+ }
public:
- Item_func_add_time(THD *thd, Item *a, Item *b, bool type_arg, bool neg_arg):
- Item_temporal_hybrid_func(thd, a, b), is_date(type_arg)
- { sign= neg_arg ? -1 : 1; }
- bool fix_length_and_dec();
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
- const char *func_name() const
+ Item_func_timestamp(THD *thd, Item *a, Item *b)
+ :Item_datetimefunc(thd, a, b)
+ { }
+ const char *func_name() const { return "timestamp"; }
+ bool fix_length_and_dec()
{
- return is_date ? "timestamp" : sign > 0 ? "addtime" : "subtime";
+ THD *thd= current_thd;
+ uint dec0= args[0]->datetime_precision(thd);
+ uint dec1= Interval_DDhhmmssff::fsp(thd, args[1]);
+ fix_attributes_datetime(MY_MAX(dec0, dec1));
+ maybe_null= true;
+ return false;
+ }
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+ {
+ Datetime dt(thd, args[0], Datetime::Options(TIME_CONV_NONE, thd));
+ if (!dt.is_valid_datetime())
+ return null_value= true;
+ Interval_DDhhmmssff it(thd, args[1]);
+ if (!it.is_valid_interval_DDhhmmssff())
+ return null_value= true;
+ return (null_value= Sec6_add(dt.get_mysql_time(), it.get_mysql_time(), 1).
+ to_datetime(ltime));
}
Item *get_copy(THD *thd)
+ { return get_item_copy<Item_func_timestamp>(thd, this); }
+};
+
+
+/**
+ ADDTIME(t,a) and SUBTIME(t,a) are time functions that calculate a
+ time/datetime value
+
+ t: time_or_datetime_expression
+ a: time_expression
+
+ Result: Time value or datetime value
+*/
+
+class Item_func_add_time :public Item_handled_func
+{
+ int sign;
+public:
+ // Methods used by ColumnStore
+ int get_sign() const { return sign; }
+public:
+ Item_func_add_time(THD *thd, Item *a, Item *b, bool neg_arg)
+ :Item_handled_func(thd, a, b), sign(neg_arg ? -1 : 1)
+ { }
+ bool fix_length_and_dec();
+ const char *func_name() const { return sign > 0 ? "addtime" : "subtime"; }
+ Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_add_time>(thd, this); }
};
+
class Item_func_timediff :public Item_timefunc
{
bool check_arguments() const
@@ -1262,12 +1275,14 @@ public:
const char *func_name() const { return "timediff"; }
bool fix_length_and_dec()
{
- uint dec= MY_MAX(args[0]->time_precision(), args[1]->time_precision());
+ THD *thd= current_thd;
+ uint dec= MY_MAX(args[0]->time_precision(thd),
+ args[1]->time_precision(thd));
fix_attributes_time(dec);
maybe_null= true;
return FALSE;
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_timediff>(thd, this); }
};
@@ -1290,7 +1305,7 @@ public:
return FALSE;
}
const char *func_name() const { return "maketime"; }
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_maketime>(thd, this); }
};
@@ -1326,6 +1341,9 @@ class Item_func_timestamp_diff :public Item_longlong_func
{ return check_argument_types_can_return_date(0, arg_count); }
const interval_type int_type;
public:
+ // Methods used by ColumnStore
+ interval_type get_int_type() const { return int_type; };
+public:
Item_func_timestamp_diff(THD *thd, Item *a, Item *b, interval_type type_arg):
Item_longlong_func(thd, a, b), int_type(type_arg) {}
const char *func_name() const { return "timestampdiff"; }
@@ -1369,19 +1387,19 @@ public:
};
-class Item_func_str_to_date :public Item_temporal_hybrid_func
+class Item_func_str_to_date :public Item_handled_func
{
- timestamp_type cached_timestamp_type;
bool const_item;
String subject_converter;
String format_converter;
CHARSET_INFO *internal_charset;
public:
Item_func_str_to_date(THD *thd, Item *a, Item *b):
- Item_temporal_hybrid_func(thd, a, b), const_item(false),
+ Item_handled_func(thd, a, b), const_item(false),
internal_charset(NULL)
{}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
+ bool get_date_common(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate,
+ timestamp_type);
const char *func_name() const { return "str_to_date"; }
bool fix_length_and_dec();
Item *get_copy(THD *thd)
@@ -1396,9 +1414,365 @@ class Item_func_last_day :public Item_datefunc
public:
Item_func_last_day(THD *thd, Item *a): Item_datefunc(thd, a) {}
const char *func_name() const { return "last_day"; }
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_last_day>(thd, this); }
};
+
+/*****************************************************************************/
+
+class Func_handler_date_add_interval
+{
+protected:
+ static uint interval_dec(const Item *item, interval_type int_type)
+ {
+ if (int_type == INTERVAL_MICROSECOND ||
+ (int_type >= INTERVAL_DAY_MICROSECOND &&
+ int_type <= INTERVAL_SECOND_MICROSECOND))
+ return TIME_SECOND_PART_DIGITS;
+ if (int_type == INTERVAL_SECOND && item->decimals > 0)
+ return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
+ return 0;
+ }
+ interval_type int_type(const Item_handled_func *item) const
+ {
+ return static_cast<const Item_date_add_interval*>(item)->int_type;
+ }
+ bool sub(const Item_handled_func *item) const
+ {
+ return static_cast<const Item_date_add_interval*>(item)->date_sub_interval;
+ }
+ bool add(THD *thd, Item *item, interval_type type, bool sub, MYSQL_TIME *to) const
+ {
+ INTERVAL interval;
+ if (get_interval_value(thd, item, type, &interval))
+ return true;
+ if (sub)
+ interval.neg = !interval.neg;
+ return date_add_interval(thd, to, type, interval);
+ }
+};
+
+
+class Func_handler_date_add_interval_datetime:
+ public Item_handled_func::Handler_datetime,
+ public Func_handler_date_add_interval
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ uint dec= MY_MAX(item->arguments()[0]->datetime_precision(current_thd),
+ interval_dec(item->arguments()[1], int_type(item)));
+ item->fix_attributes_datetime(dec);
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ Datetime::Options opt(TIME_CONV_NONE, thd);
+ Datetime dt(thd, item->arguments()[0], opt);
+ if (!dt.is_valid_datetime() ||
+ dt.check_date_with_warn(thd, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
+ return (item->null_value= true);
+ dt.copy_to_mysql_time(to);
+ return (item->null_value= add(thd, item->arguments()[1],
+ int_type(item), sub(item), to));
+ }
+};
+
+
+class Func_handler_date_add_interval_datetime_arg0_time:
+ public Func_handler_date_add_interval_datetime
+{
+public:
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const;
+};
+
+
+class Func_handler_date_add_interval_date:
+ public Item_handled_func::Handler_date,
+ public Func_handler_date_add_interval
+{
+public:
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ /*
+ The first argument is known to be of the DATE data type (not DATETIME).
+ We don't need rounding here.
+ */
+ Date d(thd, item->arguments()[0], TIME_CONV_NONE);
+ if (!d.is_valid_date() ||
+ d.check_date_with_warn(thd, TIME_NO_ZERO_DATE | TIME_NO_ZERO_IN_DATE))
+ return (item->null_value= true);
+ d.copy_to_mysql_time(to);
+ return (item->null_value= add(thd, item->arguments()[1],
+ int_type(item), sub(item), to));
+ }
+};
+
+
+class Func_handler_date_add_interval_time:
+ public Item_handled_func::Handler_time,
+ public Func_handler_date_add_interval
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ uint dec= MY_MAX(item->arguments()[0]->time_precision(current_thd),
+ interval_dec(item->arguments()[1], int_type(item)));
+ item->fix_attributes_time(dec);
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ Time t(thd, item->arguments()[0]);
+ if (!t.is_valid_time())
+ return (item->null_value= true);
+ t.copy_to_mysql_time(to);
+ return (item->null_value= add(thd, item->arguments()[1],
+ int_type(item), sub(item), to));
+ }
+};
+
+
+class Func_handler_date_add_interval_string:
+ public Item_handled_func::Handler_temporal_string,
+ public Func_handler_date_add_interval
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ uint dec= MY_MAX(item->arguments()[0]->datetime_precision(current_thd),
+ interval_dec(item->arguments()[1], int_type(item)));
+ item->collation.set(item->default_charset(),
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
+ item->fix_char_length_temporal_not_fixed_dec(MAX_DATETIME_WIDTH, dec);
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ if (item->arguments()[0]->
+ get_date(thd, to, Datetime::Options(TIME_CONV_NONE, thd)) ||
+ (to->time_type != MYSQL_TIMESTAMP_TIME &&
+ check_date_with_warn(thd, to, TIME_NO_ZEROS, MYSQL_TIMESTAMP_ERROR)))
+ return (item->null_value= true);
+ return (item->null_value= add(thd, item->arguments()[1],
+ int_type(item), sub(item), to));
+ }
+};
+
+
+class Func_handler_sign
+{
+protected:
+ int m_sign;
+ Func_handler_sign(int sign) :m_sign(sign) { }
+};
+
+
+class Func_handler_add_time_datetime:
+ public Item_handled_func::Handler_datetime,
+ public Func_handler_sign
+{
+public:
+ Func_handler_add_time_datetime(int sign)
+ :Func_handler_sign(sign)
+ { }
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ THD *thd= current_thd;
+ uint dec0= item->arguments()[0]->datetime_precision(thd);
+ uint dec1= Interval_DDhhmmssff::fsp(thd, item->arguments()[1]);
+ item->fix_attributes_datetime(MY_MAX(dec0, dec1));
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ DBUG_ASSERT(item->is_fixed());
+ Datetime::Options opt(TIME_CONV_NONE, thd);
+ Datetime dt(thd, item->arguments()[0], opt);
+ if (!dt.is_valid_datetime())
+ return item->null_value= true;
+ Interval_DDhhmmssff it(thd, item->arguments()[1]);
+ if (!it.is_valid_interval_DDhhmmssff())
+ return item->null_value= true;
+ return (item->null_value= (Sec6_add(dt.get_mysql_time(),
+ it.get_mysql_time(), m_sign).
+ to_datetime(to)));
+ }
+};
+
+
+class Func_handler_add_time_time:
+ public Item_handled_func::Handler_time,
+ public Func_handler_sign
+{
+public:
+ Func_handler_add_time_time(int sign)
+ :Func_handler_sign(sign)
+ { }
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ THD *thd= current_thd;
+ uint dec0= item->arguments()[0]->time_precision(thd);
+ uint dec1= Interval_DDhhmmssff::fsp(thd, item->arguments()[1]);
+ item->fix_attributes_time(MY_MAX(dec0, dec1));
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ DBUG_ASSERT(item->is_fixed());
+ Time t(thd, item->arguments()[0]);
+ if (!t.is_valid_time())
+ return item->null_value= true;
+ Interval_DDhhmmssff i(thd, item->arguments()[1]);
+ if (!i.is_valid_interval_DDhhmmssff())
+ return item->null_value= true;
+ return (item->null_value= (Sec6_add(t.get_mysql_time(),
+ i.get_mysql_time(), m_sign).
+ to_time(thd, to, item->decimals)));
+ }
+};
+
+
+class Func_handler_add_time_string:
+ public Item_handled_func::Handler_temporal_string,
+ public Func_handler_sign
+{
+public:
+ Func_handler_add_time_string(int sign)
+ :Func_handler_sign(sign)
+ { }
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ uint dec0= item->arguments()[0]->decimals;
+ uint dec1= Interval_DDhhmmssff::fsp(current_thd, item->arguments()[1]);
+ uint dec= MY_MAX(dec0, dec1);
+ item->collation.set(item->default_charset(),
+ DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII);
+ item->fix_char_length_temporal_not_fixed_dec(MAX_DATETIME_WIDTH, dec);
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ DBUG_ASSERT(item->is_fixed());
+ // Detect a proper timestamp type based on the argument values
+ Temporal_hybrid l_time1(thd, item->arguments()[0],
+ Temporal::Options(TIME_TIME_ONLY, thd));
+ if (!l_time1.is_valid_temporal())
+ return (item->null_value= true);
+ Interval_DDhhmmssff l_time2(thd, item->arguments()[1]);
+ if (!l_time2.is_valid_interval_DDhhmmssff())
+ return (item->null_value= true);
+ Sec6_add add(l_time1.get_mysql_time(), l_time2.get_mysql_time(), m_sign);
+ return (item->null_value= (l_time1.get_mysql_time()->time_type ==
+ MYSQL_TIMESTAMP_TIME ?
+ add.to_time(thd, to, item->decimals) :
+ add.to_datetime(to)));
+ }
+};
+
+
+class Func_handler_str_to_date_datetime_sec:
+ public Item_handled_func::Handler_datetime
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ item->fix_attributes_datetime(0);
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ return static_cast<Item_func_str_to_date*>(item)->
+ get_date_common(thd, to, fuzzy, MYSQL_TIMESTAMP_DATETIME);
+ }
+};
+
+
+class Func_handler_str_to_date_datetime_usec:
+ public Item_handled_func::Handler_datetime
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ item->fix_attributes_datetime(TIME_SECOND_PART_DIGITS);
+ return false;
+ }
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ return static_cast<Item_func_str_to_date*>(item)->
+ get_date_common(thd, to, fuzzy, MYSQL_TIMESTAMP_DATETIME);
+ }
+};
+
+
+class Func_handler_str_to_date_date: public Item_handled_func::Handler_date
+{
+public:
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ return static_cast<Item_func_str_to_date*>(item)->
+ get_date_common(thd, to, fuzzy, MYSQL_TIMESTAMP_DATE);
+ }
+};
+
+
+class Func_handler_str_to_date_time: public Item_handled_func::Handler_time
+{
+public:
+ bool get_date(THD *thd, Item_handled_func *item,
+ MYSQL_TIME *to, date_mode_t fuzzy) const
+ {
+ if (static_cast<Item_func_str_to_date*>(item)->
+ get_date_common(thd, to, fuzzy, MYSQL_TIMESTAMP_TIME))
+ return true;
+ if (to->day)
+ {
+ /*
+ Day part for time type can be nonzero value and so
+ we should add hours from day part to hour part to
+ keep valid time value.
+ */
+ to->hour+= to->day * 24;
+ to->day= 0;
+ }
+ return false;
+ }
+};
+
+
+class Func_handler_str_to_date_time_sec: public Func_handler_str_to_date_time
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ item->fix_attributes_time(0);
+ return false;
+ }
+};
+
+
+class Func_handler_str_to_date_time_usec: public Func_handler_str_to_date_time
+{
+public:
+ bool fix_length_and_dec(Item_handled_func *item) const
+ {
+ item->fix_attributes_time(TIME_SECOND_PART_DIGITS);
+ return false;
+ }
+};
+
+
#endif /* ITEM_TIMEFUNC_INCLUDED */
diff --git a/sql/item_vers.cc b/sql/item_vers.cc
index cfedc6b0f81..c8f1c793895 100644
--- a/sql/item_vers.cc
+++ b/sql/item_vers.cc
@@ -37,9 +37,8 @@ Item_func_trt_ts::Item_func_trt_ts(THD *thd, Item* a, TR_table::field_id_t _trt_
bool
-Item_func_trt_ts::get_date(MYSQL_TIME *res, ulonglong fuzzy_date)
+Item_func_trt_ts::get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate)
{
- THD *thd= current_thd; // can it differ from constructor's?
DBUG_ASSERT(thd);
DBUG_ASSERT(args[0]);
if (args[0]->result_type() != INT_RESULT)
@@ -64,7 +63,7 @@ Item_func_trt_ts::get_date(MYSQL_TIME *res, ulonglong fuzzy_date)
if (null_value)
return true;
- return trt[trt_field]->get_date(res, fuzzy_date);
+ return trt[trt_field]->get_date(res, fuzzydate);
}
@@ -140,7 +139,9 @@ Item_func_trt_id::val_int()
else
{
MYSQL_TIME commit_ts;
- if (args[0]->get_date(&commit_ts, 0))
+ THD *thd= current_thd;
+ Datetime::Options opt(TIME_CONV_NONE, thd);
+ if (args[0]->get_date(thd, &commit_ts, opt))
{
null_value= true;
return 0;
diff --git a/sql/item_vers.h b/sql/item_vers.h
index 8b9c0e6056c..a42b5a033f2 100644
--- a/sql/item_vers.h
+++ b/sql/item_vers.h
@@ -35,7 +35,7 @@ public:
}
return "trt_commit_ts";
}
- bool get_date(MYSQL_TIME *res, ulonglong fuzzy_date);
+ bool get_date(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_func_trt_ts>(thd, this); }
bool fix_length_and_dec()
diff --git a/sql/item_windowfunc.cc b/sql/item_windowfunc.cc
index 2db396d3065..adff1c3612b 100644
--- a/sql/item_windowfunc.cc
+++ b/sql/item_windowfunc.cc
@@ -120,7 +120,6 @@ Item_window_func::fix_fields(THD *thd, Item **ref)
const_item_cache= false;
with_window_func= true;
- with_sum_func= false;
if (fix_length_and_dec())
return TRUE;
@@ -440,12 +439,20 @@ Item_sum_hybrid_simple::val_str(String *str)
return retval;
}
-bool Item_sum_hybrid_simple::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+bool Item_sum_hybrid_simple::val_native(THD *thd, Native *to)
{
DBUG_ASSERT(fixed == 1);
if (null_value)
return true;
- bool retval= value->get_date(ltime, fuzzydate);
+ return val_native_from_item(thd, value, to);
+}
+
+bool Item_sum_hybrid_simple::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
+{
+ DBUG_ASSERT(fixed == 1);
+ if (null_value)
+ return true;
+ bool retval= value->get_date(thd, ltime, fuzzydate);
if ((null_value= value->null_value))
DBUG_ASSERT(retval == true);
return retval;
@@ -514,11 +521,11 @@ void Item_sum_hybrid_simple::reset_field()
}
case DECIMAL_RESULT:
{
- my_decimal value_buff, *arg_dec= args[0]->val_decimal(&value_buff);
+ VDec arg_dec(args[0]);
if (maybe_null)
{
- if (args[0]->null_value)
+ if (arg_dec.is_null())
result_field->set_null();
else
result_field->set_notnull();
@@ -527,9 +534,7 @@ void Item_sum_hybrid_simple::reset_field()
We must store zero in the field as we will use the field value in
add()
*/
- if (!arg_dec) // Null
- arg_dec= &decimal_zero;
- result_field->store_decimal(arg_dec);
+ result_field->store_decimal(arg_dec.ptr_or(&decimal_zero));
break;
}
case ROW_RESULT:
diff --git a/sql/item_windowfunc.h b/sql/item_windowfunc.h
index 9ba60c3956d..8c6a661366f 100644
--- a/sql/item_windowfunc.h
+++ b/sql/item_windowfunc.h
@@ -130,6 +130,7 @@ public:
return false;
}
+ void reset_field() { DBUG_ASSERT(0); }
void update_field() {}
enum Sumfunctype sum_func() const
@@ -193,11 +194,8 @@ public:
return cur_rank;
}
+ void reset_field() { DBUG_ASSERT(0); }
void update_field() {}
- /*
- void reset_field();
- TODO: ^^ what does this do ? It is not called ever?
- */
enum Sumfunctype sum_func () const
{
@@ -261,6 +259,7 @@ class Item_sum_dense_rank: public Item_sum_int
first_add= true;
}
bool add();
+ void reset_field() { DBUG_ASSERT(0); }
void update_field() {}
longlong val_int()
{
@@ -319,7 +318,8 @@ class Item_sum_hybrid_simple : public Item_sum,
my_decimal *val_decimal(my_decimal *);
void reset_field();
String *val_str(String *);
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
+ bool val_native(THD *thd, Native *to);
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate);
const Type_handler *type_handler() const
{ return Type_handler_hybrid_field_type::type_handler(); }
void update_field();
@@ -460,6 +460,7 @@ class Item_sum_window_with_row_count : public Item_sum_num
void set_row_count(ulonglong count) { partition_row_count_ = count; }
+ void reset_field() { DBUG_ASSERT(0); }
protected:
longlong get_row_count() { return partition_row_count_; }
private:
@@ -1250,6 +1251,15 @@ public:
return res;
}
+ bool val_native(THD *thd, Native *to)
+ {
+ if (force_return_blank)
+ return null_value= true;
+ if (read_value_from_result_field)
+ return val_native_from_field(result_field, to);
+ return val_native_from_item(thd, window_func(), to);
+ }
+
my_decimal* val_decimal(my_decimal* dec)
{
my_decimal *res;
@@ -1273,7 +1283,7 @@ public:
return res;
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
bool res;
if (force_return_blank)
@@ -1290,7 +1300,7 @@ public:
}
else
{
- res= window_func()->get_date(ltime, fuzzydate);
+ res= window_func()->get_date(thd, ltime, fuzzydate);
null_value= window_func()->null_value;
}
return res;
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 63734ecf9ac..146c5aa57fe 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -29,10 +29,8 @@
/*
TODO: future development directions:
- 1. add real constants for XPATH_NODESET_CMP and XPATH_NODESET
- into enum Type in item.h.
- 2. add nodeset_to_nodeset_comparator
- 3. add lacking functions:
+ 1. add nodeset_to_nodeset_comparator
+ 2. add lacking functions:
- name()
- lang()
- string()
@@ -44,7 +42,7 @@
- substring-after()
- normalize-space()
- substring-before()
- 4. add lacking axis:
+ 3. add lacking axis:
- following-sibling
- following,
- preceding-sibling
@@ -151,6 +149,9 @@ public:
};
+static Type_handler_long_blob type_handler_xpath_nodeset;
+
+
/*
Common features of the functions returning a node set.
*/
@@ -181,16 +182,29 @@ public:
void prepare(String *nodeset)
{
prepare_nodes();
- String *res= args[0]->val_nodeset(&tmp_value);
+ String *res= args[0]->val_raw(&tmp_value);
fltbeg= (MY_XPATH_FLT*) res->ptr();
fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
nodeset->length(0);
}
- enum Type type() const { return XPATH_NODESET; }
+ const Type_handler *type_handler() const
+ {
+ return &type_handler_xpath_nodeset;
+ }
+ const Type_handler *fixed_type_handler() const
+ {
+ return &type_handler_xpath_nodeset;
+ }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
String *val_str(String *str)
{
prepare_nodes();
- String *res= val_nodeset(&tmp2_value);
+ String *res= val_raw(&tmp2_value);
fltbeg= (MY_XPATH_FLT*) res->ptr();
fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
String active;
@@ -247,7 +261,7 @@ public:
Item_nodeset_func_rootelement(THD *thd, String *pxml):
Item_nodeset_func(thd, pxml) {}
const char *func_name() const { return "xpath_rootelement"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_rootelement>(thd, this); }
};
@@ -260,7 +274,7 @@ public:
Item_nodeset_func_union(THD *thd, Item *a, Item *b, String *pxml):
Item_nodeset_func(thd, a, b, pxml) {}
const char *func_name() const { return "xpath_union"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_union>(thd, this); }
};
@@ -294,7 +308,7 @@ public:
String *pxml):
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_selfbyname"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_selfbyname>(thd, this); }
};
@@ -308,7 +322,7 @@ public:
String *pxml):
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_childbyname"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_childbyname>(thd, this); }
};
@@ -324,7 +338,7 @@ public:
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml),
need_self(need_self_arg) {}
const char *func_name() const { return "xpath_descendantbyname"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_descendantbyname>(thd, this); }
};
@@ -340,7 +354,7 @@ public:
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml),
need_self(need_self_arg) {}
const char *func_name() const { return "xpath_ancestorbyname"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_ancestorbyname>(thd, this); }
};
@@ -354,7 +368,7 @@ public:
String *pxml):
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_parentbyname"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_parentbyname>(thd, this); }
};
@@ -368,7 +382,7 @@ public:
uint l_arg, String *pxml):
Item_nodeset_func_axisbyname(thd, a, n_arg, l_arg, pxml) {}
const char *func_name() const { return "xpath_attributebyname"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_attributebyname>(thd, this); }
};
@@ -385,7 +399,7 @@ public:
Item_nodeset_func_predicate(THD *thd, Item *a, Item *b, String *pxml):
Item_nodeset_func(thd, a, b, pxml) {}
const char *func_name() const { return "xpath_predicate"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_predicate>(thd, this); }
};
@@ -398,7 +412,7 @@ public:
Item_nodeset_func_elementbyindex(THD *thd, Item *a, Item *b, String *pxml):
Item_nodeset_func(thd, a, b, pxml) { }
const char *func_name() const { return "xpath_elementbyindex"; }
- String *val_nodeset(String *nodeset);
+ String *val_raw(String *nodeset);
Item *get_copy(THD *thd)
{ return get_item_copy<Item_nodeset_func_elementbyindex>(thd, this); }
};
@@ -420,9 +434,9 @@ public:
const char *func_name() const { return "xpath_cast_bool"; }
longlong val_int()
{
- if (args[0]->type() == XPATH_NODESET)
+ if (args[0]->fixed_type_handler() == &type_handler_xpath_nodeset)
{
- String *flt= args[0]->val_nodeset(&tmp_value);
+ String *flt= args[0]->val_raw(&tmp_value);
return flt->length() == sizeof(MY_XPATH_FLT) ? 1 : 0;
}
return args[0]->val_real() ? 1 : 0;
@@ -455,7 +469,7 @@ public:
String *string_cache;
Item_nodeset_context_cache(THD *thd, String *str_arg, String *pxml):
Item_nodeset_func(thd, pxml), string_cache(str_arg) { }
- String *val_nodeset(String *res)
+ String *val_raw(String *res)
{ return string_cache; }
bool fix_length_and_dec() { max_length= MAX_BLOB_WIDTH;; return FALSE; }
Item *get_copy(THD *thd)
@@ -474,7 +488,7 @@ public:
bool fix_length_and_dec() { max_length=10; return FALSE; }
longlong val_int()
{
- String *flt= args[0]->val_nodeset(&tmp_value);
+ String *flt= args[0]->val_raw(&tmp_value);
if (flt->length() == sizeof(MY_XPATH_FLT))
return ((MY_XPATH_FLT*)flt->ptr())->pos + 1;
return 0;
@@ -496,7 +510,7 @@ public:
longlong val_int()
{
uint predicate_supplied_context_size;
- String *res= args[0]->val_nodeset(&tmp_value);
+ String *res= args[0]->val_raw(&tmp_value);
if (res->length() == sizeof(MY_XPATH_FLT) &&
(predicate_supplied_context_size= ((MY_XPATH_FLT*)res->ptr())->size))
return predicate_supplied_context_size;
@@ -519,7 +533,7 @@ public:
double val_real()
{
double sum= 0;
- String *res= args[0]->val_nodeset(&tmp_value);
+ String *res= args[0]->val_raw(&tmp_value);
MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) res->ptr();
MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
uint numnodes= pxml->length() / sizeof(MY_XML_NODE);
@@ -587,19 +601,23 @@ public:
Item_nodeset_to_const_comparator(THD *thd, Item *nodeset, Item *cmpfunc,
String *p):
Item_bool_func(thd, nodeset, cmpfunc), pxml(p) {}
- enum Type type() const { return XPATH_NODESET_CMP; };
const char *func_name() const { return "xpath_nodeset_to_const_comparator"; }
bool check_vcol_func_processor(void *arg)
{
return mark_unsupported_function(func_name(), arg, VCOL_IMPOSSIBLE);
}
-
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
longlong val_int()
{
Item_func *comp= (Item_func*)args[1];
Item_string_xml_non_const *fake=
(Item_string_xml_non_const*)(comp->arguments()[0]);
- String *res= args[0]->val_nodeset(&tmp_nodeset);
+ String *res= args[0]->val_raw(&tmp_nodeset);
MY_XPATH_FLT *fltbeg= (MY_XPATH_FLT*) res->ptr();
MY_XPATH_FLT *fltend= (MY_XPATH_FLT*) (res->ptr() + res->length());
MY_XML_NODE *nodebeg= (MY_XML_NODE*) pxml->ptr();
@@ -630,7 +648,7 @@ public:
};
-String *Item_nodeset_func_rootelement::val_nodeset(String *nodeset)
+String *Item_nodeset_func_rootelement::val_raw(String *nodeset)
{
nodeset->length(0);
((XPathFilter*)nodeset)->append_element(0, 0);
@@ -638,11 +656,11 @@ String *Item_nodeset_func_rootelement::val_nodeset(String *nodeset)
}
-String * Item_nodeset_func_union::val_nodeset(String *nodeset)
+String * Item_nodeset_func_union::val_raw(String *nodeset)
{
uint num_nodes= pxml->length() / sizeof(MY_XML_NODE);
- String set0, *s0= args[0]->val_nodeset(&set0);
- String set1, *s1= args[1]->val_nodeset(&set1);
+ String set0, *s0= args[0]->val_raw(&set0);
+ String set1, *s1= args[1]->val_raw(&set1);
String both_str;
both_str.alloc(num_nodes);
char *both= (char*) both_str.ptr();
@@ -669,7 +687,7 @@ String * Item_nodeset_func_union::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_selfbyname::val_nodeset(String *nodeset)
+String *Item_nodeset_func_selfbyname::val_raw(String *nodeset)
{
prepare(nodeset);
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
@@ -683,7 +701,7 @@ String *Item_nodeset_func_selfbyname::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_childbyname::val_nodeset(String *nodeset)
+String *Item_nodeset_func_childbyname::val_raw(String *nodeset)
{
prepare(nodeset);
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
@@ -704,7 +722,7 @@ String *Item_nodeset_func_childbyname::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_descendantbyname::val_nodeset(String *nodeset)
+String *Item_nodeset_func_descendantbyname::val_raw(String *nodeset)
{
prepare(nodeset);
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
@@ -726,7 +744,7 @@ String *Item_nodeset_func_descendantbyname::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_ancestorbyname::val_nodeset(String *nodeset)
+String *Item_nodeset_func_ancestorbyname::val_raw(String *nodeset)
{
char *active;
String active_str;
@@ -768,7 +786,7 @@ String *Item_nodeset_func_ancestorbyname::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_parentbyname::val_nodeset(String *nodeset)
+String *Item_nodeset_func_parentbyname::val_raw(String *nodeset)
{
char *active;
String active_str;
@@ -791,7 +809,7 @@ String *Item_nodeset_func_parentbyname::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_attributebyname::val_nodeset(String *nodeset)
+String *Item_nodeset_func_attributebyname::val_raw(String *nodeset)
{
prepare(nodeset);
for (MY_XPATH_FLT *flt= fltbeg; flt < fltend; flt++)
@@ -812,7 +830,7 @@ String *Item_nodeset_func_attributebyname::val_nodeset(String *nodeset)
}
-String *Item_nodeset_func_predicate::val_nodeset(String *str)
+String *Item_nodeset_func_predicate::val_raw(String *str)
{
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
Item_func *comp_func= (Item_func*)args[1];
@@ -832,7 +850,7 @@ String *Item_nodeset_func_predicate::val_nodeset(String *str)
}
-String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
+String *Item_nodeset_func_elementbyindex::val_raw(String *nodeset)
{
Item_nodeset_func *nodeset_func= (Item_nodeset_func*) args[0];
prepare(nodeset);
@@ -845,7 +863,9 @@ String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
flt->pos,
size);
int index= (int) (args[1]->val_int()) - 1;
- if (index >= 0 && (flt->pos == (uint) index || args[1]->is_bool_type()))
+ if (index >= 0 &&
+ (flt->pos == (uint) index ||
+ (args[1]->type_handler()->is_bool_type())))
((XPathFilter*)nodeset)->append_element(flt->num, pos++);
}
return nodeset;
@@ -858,7 +878,7 @@ String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
*/
static Item* nodeset2bool(MY_XPATH *xpath, Item *item)
{
- if (item->type() == Item::XPATH_NODESET)
+ if (item->fixed_type_handler() == &type_handler_xpath_nodeset)
return new (xpath->thd->mem_root)
Item_xpath_cast_bool(xpath->thd, item, xpath->pxml);
return item;
@@ -988,13 +1008,13 @@ static Item *create_comparator(MY_XPATH *xpath,
int oper, MY_XPATH_LEX *context,
Item *a, Item *b)
{
- if (a->type() != Item::XPATH_NODESET &&
- b->type() != Item::XPATH_NODESET)
+ if (a->fixed_type_handler() != &type_handler_xpath_nodeset &&
+ b->fixed_type_handler() != &type_handler_xpath_nodeset)
{
return eq_func(xpath->thd, oper, a, b); // two scalar arguments
}
- else if (a->type() == Item::XPATH_NODESET &&
- b->type() == Item::XPATH_NODESET)
+ else if (a->fixed_type_handler() == &type_handler_xpath_nodeset &&
+ b->fixed_type_handler() == &type_handler_xpath_nodeset)
{
uint len= (uint)(xpath->query.end - context->beg);
set_if_smaller(len, 32);
@@ -1019,7 +1039,7 @@ static Item *create_comparator(MY_XPATH *xpath,
Item_string_xml_non_const(thd, "", 0, xpath->cs));
Item_nodeset_func *nodeset;
Item *scalar, *comp;
- if (a->type() == Item::XPATH_NODESET)
+ if (a->fixed_type_handler() == &type_handler_xpath_nodeset)
{
nodeset= (Item_nodeset_func*) a;
scalar= b;
@@ -1053,7 +1073,7 @@ static Item* nametestfunc(MY_XPATH *xpath,
MEM_ROOT *mem_root= thd->mem_root;
DBUG_ASSERT(arg != 0);
- DBUG_ASSERT(arg->type() == Item::XPATH_NODESET);
+ DBUG_ASSERT(arg->fixed_type_handler() == &type_handler_xpath_nodeset);
DBUG_ASSERT(beg != 0);
DBUG_ASSERT(len > 0);
@@ -1306,7 +1326,7 @@ static Item *create_func_substr(MY_XPATH *xpath, Item **args, uint nargs)
static Item *create_func_count(MY_XPATH *xpath, Item **args, uint nargs)
{
- if (args[0]->type() != Item::XPATH_NODESET)
+ if (args[0]->fixed_type_handler() != &type_handler_xpath_nodeset)
return 0;
return new (xpath->thd->mem_root) Item_func_xpath_count(xpath->thd, args[0], xpath->pxml);
}
@@ -1314,7 +1334,7 @@ static Item *create_func_count(MY_XPATH *xpath, Item **args, uint nargs)
static Item *create_func_sum(MY_XPATH *xpath, Item **args, uint nargs)
{
- if (args[0]->type() != Item::XPATH_NODESET)
+ if (args[0]->fixed_type_handler() != &type_handler_xpath_nodeset)
return 0;
return new (xpath->thd->mem_root)
Item_func_xpath_sum(xpath->thd, args[0], xpath->pxml);
@@ -1793,7 +1813,8 @@ my_xpath_parse_AxisSpecifier_NodeTest_opt_Predicate_list(MY_XPATH *xpath)
xpath->item= nodeset2bool(xpath, xpath->item);
- if (xpath->item->is_bool_type())
+ const Type_handler *fh;
+ if ((fh= xpath->item->fixed_type_handler()) && fh->is_bool_type())
{
xpath->context= new (xpath->thd->mem_root)
Item_nodeset_func_predicate(xpath->thd, prev_context,
@@ -2047,11 +2068,11 @@ static int my_xpath_parse_UnionExpr(MY_XPATH *xpath)
while (my_xpath_parse_term(xpath, MY_XPATH_LEX_VLINE))
{
Item *prev= xpath->item;
- if (prev->type() != Item::XPATH_NODESET)
+ if (prev->fixed_type_handler() != &type_handler_xpath_nodeset)
return 0;
if (!my_xpath_parse_PathExpr(xpath)
- || xpath->item->type() != Item::XPATH_NODESET)
+ || xpath->item->fixed_type_handler() != &type_handler_xpath_nodeset)
{
xpath->error= 1;
return 0;
@@ -2089,7 +2110,7 @@ my_xpath_parse_FilterExpr_opt_slashes_RelativeLocationPath(MY_XPATH *xpath)
if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH))
return 1;
- if (xpath->item->type() != Item::XPATH_NODESET)
+ if (xpath->item->fixed_type_handler() != &type_handler_xpath_nodeset)
{
xpath->lasttok= xpath->prevtok;
xpath->error= 1;
@@ -3054,7 +3075,7 @@ String *Item_func_xml_update::val_str(String *str)
null_value= 0;
if (!nodeset_func || get_xml(&xml) ||
!(rep= args[2]->val_str(&tmp_value3)) ||
- !(nodeset= nodeset_func->val_nodeset(&tmp_value2)))
+ !(nodeset= nodeset_func->val_raw(&tmp_value2)))
{
null_value= 1;
return 0;
diff --git a/sql/lex.h b/sql/lex.h
index da20468d593..bb57f308475 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -55,6 +55,7 @@ static SYMBOL symbols[] = {
{ ">>", SYM(SHIFT_RIGHT)},
{ "<=>", SYM(EQUAL_SYM)},
{ "ACCESSIBLE", SYM(ACCESSIBLE_SYM)},
+ { "ACCOUNT", SYM(ACCOUNT_SYM)},
{ "ACTION", SYM(ACTION)},
{ "ADD", SYM(ADD)},
{ "ADMIN", SYM(ADMIN_SYM)},
@@ -230,6 +231,7 @@ static SYMBOL symbols[] = {
{ "EXISTS", SYM(EXISTS)},
{ "EXIT", SYM(EXIT_MARIADB_SYM)},
{ "EXPANSION", SYM(EXPANSION_SYM)},
+ { "EXPIRE", SYM(EXPIRE_SYM)},
{ "EXPORT", SYM(EXPORT_SYM)},
{ "EXPLAIN", SYM(DESCRIBE)},
{ "EXTENDED", SYM(EXTENDED_SYM)},
@@ -418,6 +420,7 @@ static SYMBOL symbols[] = {
{ "NATIONAL", SYM(NATIONAL_SYM)},
{ "NATURAL", SYM(NATURAL)},
{ "NCHAR", SYM(NCHAR_SYM)},
+ { "NEVER", SYM(NEVER_SYM)},
{ "NEW", SYM(NEW_SYM)},
{ "NEXT", SYM(NEXT_SYM)},
{ "NEXTVAL", SYM(NEXTVAL_SYM)},
@@ -476,6 +479,7 @@ static SYMBOL symbols[] = {
{ "POINT", SYM(POINT_SYM)},
{ "POLYGON", SYM(POLYGON)},
{ "PORT", SYM(PORT_SYM)},
+ { "PORTION", SYM(PORTION_SYM)},
{ "PRECEDES", SYM(PRECEDES_SYM)},
{ "PRECEDING", SYM(PRECEDING_SYM)},
{ "PRECISION", SYM(PRECISION)},
@@ -589,6 +593,7 @@ static SYMBOL symbols[] = {
{ "SONAME", SYM(SONAME_SYM)},
{ "SOUNDS", SYM(SOUNDS_SYM)},
{ "SOURCE", SYM(SOURCE_SYM)},
+ { "STAGE", SYM(STAGE_SYM)},
{ "STORED", SYM(STORED_SYM)},
{ "SPATIAL", SYM(SPATIAL_SYM)},
{ "SPECIFIC", SYM(SPECIFIC_SYM)},
diff --git a/sql/lock.cc b/sql/lock.cc
index aeba6cc7504..d53aad5a2ba 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -77,6 +77,7 @@
#include "sql_base.h" // close_tables_for_reopen
#include "sql_parse.h" // is_log_table_write_query
#include "sql_acl.h" // SUPER_ACL
+#include "sql_handler.h"
#include <hash.h>
#include "wsrep_mysqld.h"
@@ -855,10 +856,9 @@ bool lock_schema_name(THD *thd, const char *db)
return TRUE;
}
- if (thd->global_read_lock.can_acquire_protection())
+ if (thd->has_read_only_protection())
return TRUE;
- global_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_STATEMENT);
+ global_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_DDL, MDL_STATEMENT);
mdl_request.init(MDL_key::SCHEMA, db, "", MDL_EXCLUSIVE, MDL_TRANSACTION);
mdl_requests.push_front(&mdl_request);
@@ -914,10 +914,9 @@ bool lock_object_name(THD *thd, MDL_key::enum_mdl_namespace mdl_type,
DBUG_ASSERT(name);
DEBUG_SYNC(thd, "before_wait_locked_pname");
- if (thd->global_read_lock.can_acquire_protection())
+ if (thd->has_read_only_protection())
return TRUE;
- global_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_STATEMENT);
+ global_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_DDL, MDL_STATEMENT);
schema_request.init(MDL_key::SCHEMA, db, "", MDL_INTENTION_EXCLUSIVE,
MDL_TRANSACTION);
mdl_request.init(mdl_type, db, name, MDL_EXCLUSIVE, MDL_TRANSACTION);
@@ -948,10 +947,10 @@ bool lock_object_name(THD *thd, MDL_key::enum_mdl_namespace mdl_type,
semi-automatic. We assume that any statement which should be blocked
by global read lock will either open and acquires write-lock on tables
or acquires metadata locks on objects it is going to modify. For any
- such statement global IX metadata lock is automatically acquired for
- its duration (in case of LOCK TABLES until end of LOCK TABLES mode).
- And lock_global_read_lock() simply acquires global S metadata lock
- and thus prohibits execution of statements which modify data (unless
+ such statement MDL_BACKUP_STMT metadata lock is automatically acquired
+ for its duration (in case of LOCK TABLES until end of LOCK TABLES mode).
+ And lock_global_read_lock() simply acquires MDL_BACKUP_FTWRL1 metadata
+ lock and thus prohibits execution of statements which modify data (unless
they modify only temporary tables). If deadlock happens it is detected
by MDL subsystem and resolved in the standard fashion (by backing-off
metadata locks acquired so far and restarting open tables process
@@ -992,11 +991,23 @@ bool lock_object_name(THD *thd, MDL_key::enum_mdl_namespace mdl_type,
/**
Take global read lock, wait if there is protection against lock.
- If the global read lock is already taken by this thread, then nothing is done.
+ If the global read lock is already taken by this thread, then nothing is
+ done.
+
+ Concurrent thread can acquire protection against global read lock either
+ before or after it got table metadata lock. This may lead to a deadlock if
+ there is pending global read lock request. E.g.
+ t1 does DML, holds SHARED table lock, waiting for t3 (GRL protection)
+ t2 does DDL, holds GRL protection, waiting for t1 (EXCLUSIVE)
+ t3 does FTWRL, has pending GRL, waiting for t2 (GRL)
+
+ Since this is very seldom deadlock and FTWRL connection must not hold any
+ other locks, FTWRL connection is made deadlock victim and attempt to acquire
+ GRL retried.
See also "Handling of global read locks" above.
- @param thd Reference to thread.
+ @param thd Reference to thread.
@retval False Success, global read lock set, commits are NOT blocked.
@retval True Failure, thread was killed.
@@ -1008,18 +1019,40 @@ bool Global_read_lock::lock_global_read_lock(THD *thd)
if (!m_state)
{
+ MDL_deadlock_and_lock_abort_error_handler mdl_deadlock_handler;
MDL_request mdl_request;
+ bool result;
- DBUG_ASSERT(! thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "",
- MDL_SHARED));
- mdl_request.init(MDL_key::GLOBAL, "", "", MDL_SHARED, MDL_EXPLICIT);
-
- if (thd->mdl_context.acquire_lock(&mdl_request,
- thd->variables.lock_wait_timeout))
+ if (thd->current_backup_stage != BACKUP_FINISHED)
+ {
+ my_error(ER_BACKUP_LOCK_IS_ACTIVE, MYF(0));
DBUG_RETURN(1);
+ }
- m_mdl_global_shared_lock= mdl_request.ticket;
+ DBUG_ASSERT(! thd->mdl_context.is_lock_owner(MDL_key::BACKUP, "", "",
+ MDL_BACKUP_FTWRL1));
+ DBUG_ASSERT(! thd->mdl_context.is_lock_owner(MDL_key::BACKUP, "", "",
+ MDL_BACKUP_FTWRL2));
+ mdl_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_FTWRL1,
+ MDL_EXPLICIT);
+
+ do
+ {
+ mdl_deadlock_handler.init();
+ thd->push_internal_handler(&mdl_deadlock_handler);
+ result= thd->mdl_context.acquire_lock(&mdl_request,
+ thd->variables.lock_wait_timeout);
+ thd->pop_internal_handler();
+ } while (mdl_deadlock_handler.need_reopen());
+
+ if (result)
+ DBUG_RETURN(true);
+
+ m_mdl_global_read_lock= mdl_request.ticket;
m_state= GRL_ACQUIRED;
+
+ /* Release HANDLER OPEN after we have got our MDL lock */
+ mysql_ha_cleanup_no_free(thd);
}
/*
We DON'T set global_read_lock_blocks_commit now, it will be set after
@@ -1047,7 +1080,7 @@ void Global_read_lock::unlock_global_read_lock(THD *thd)
{
DBUG_ENTER("unlock_global_read_lock");
- DBUG_ASSERT(m_mdl_global_shared_lock && m_state);
+ DBUG_ASSERT(m_mdl_global_read_lock && m_state);
if (thd->global_disable_checkpoint)
{
@@ -1058,31 +1091,27 @@ void Global_read_lock::unlock_global_read_lock(THD *thd)
}
}
- if (m_mdl_blocks_commits_lock)
- {
- thd->mdl_context.release_lock(m_mdl_blocks_commits_lock);
- m_mdl_blocks_commits_lock= NULL;
+ thd->mdl_context.release_lock(m_mdl_global_read_lock);
+
#ifdef WITH_WSREP
- if (WSREP(thd) || wsrep_node_is_donor())
+ if (m_state == GRL_ACQUIRED_AND_BLOCKS_COMMIT)
+ {
+ Wsrep_server_state& server_state= Wsrep_server_state::instance();
+ if (server_state.state() == Wsrep_server_state::s_donor ||
+ (wsrep_on(thd) && server_state.state() != Wsrep_server_state::s_synced))
{
+ /* TODO: maybe redundant here?: */
wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED;
- wsrep->resume(wsrep);
- /* resync here only if we did implicit desync earlier */
- if (!wsrep_desync && wsrep_node_is_synced())
- {
- int ret = wsrep->resync(wsrep);
- if (ret != WSREP_OK)
- {
- WSREP_WARN("resync failed %d for FTWRL: db: %s, query: %s",
- ret, thd->get_db(), thd->query());
- DBUG_VOID_RETURN;
- }
- }
+ server_state.resume();
+ }
+ else if (wsrep_on(thd) && server_state.state() == Wsrep_server_state::s_synced)
+ {
+ server_state.resume_and_resync();
}
-#endif /* WITH_WSREP */
}
- thd->mdl_context.release_lock(m_mdl_global_shared_lock);
- m_mdl_global_shared_lock= NULL;
+#endif /* WITH_WSREP */
+
+ m_mdl_global_read_lock= NULL;
m_state= GRL_NONE;
DBUG_VOID_RETURN;
@@ -1106,7 +1135,6 @@ void Global_read_lock::unlock_global_read_lock(THD *thd)
bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
{
- MDL_request mdl_request;
DBUG_ENTER("make_global_read_lock_block_commit");
/*
If we didn't succeed lock_global_read_lock(), or if we already suceeded
@@ -1116,77 +1144,38 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
if (m_state != GRL_ACQUIRED)
DBUG_RETURN(0);
-#ifdef WITH_WSREP
- if (WSREP(thd) && m_mdl_blocks_commits_lock)
- {
- WSREP_DEBUG("GRL was in block commit mode when entering "
- "make_global_read_lock_block_commit");
- DBUG_RETURN(FALSE);
- }
-#endif /* WITH_WSREP */
-
- mdl_request.init(MDL_key::COMMIT, "", "", MDL_SHARED, MDL_EXPLICIT);
-
- if (thd->mdl_context.acquire_lock(&mdl_request,
- thd->variables.lock_wait_timeout))
+ if (thd->mdl_context.upgrade_shared_lock(m_mdl_global_read_lock,
+ MDL_BACKUP_FTWRL2,
+ thd->variables.lock_wait_timeout))
DBUG_RETURN(TRUE);
- m_mdl_blocks_commits_lock= mdl_request.ticket;
m_state= GRL_ACQUIRED_AND_BLOCKS_COMMIT;
#ifdef WITH_WSREP
- /* Native threads should bail out before wsrep oprations to follow.
- Donor servicing thread is an exception, it should pause provider but not desync,
- as it is already desynced in donor state
+ /* Native threads should bail out before wsrep operations to follow.
+ Donor servicing thread is an exception, it should pause provider
+ but not desync, as it is already desynced in donor state.
+ Desync should be called only when we are in synced state.
*/
- if (!WSREP(thd) && !wsrep_node_is_donor())
+ Wsrep_server_state& server_state= Wsrep_server_state::instance();
+ wsrep::seqno paused_seqno;
+ if (server_state.state() == Wsrep_server_state::s_donor ||
+ (wsrep_on(thd) && server_state.state() != Wsrep_server_state::s_synced))
{
- DBUG_RETURN(FALSE);
+ paused_seqno= server_state.pause();
}
-
- /* if already desynced or donor, avoid double desyncing
- if not in PC and synced, desyncing is not possible either
- */
- if (wsrep_desync || !wsrep_node_is_synced())
+ else if (wsrep_on(thd) && server_state.state() == Wsrep_server_state::s_synced)
{
- WSREP_DEBUG("desync set upfont, skipping implicit desync for FTWRL: %d",
- wsrep_desync);
+ paused_seqno= server_state.desync_and_pause();
}
else
{
- int rcode;
- WSREP_DEBUG("running implicit desync for node");
- rcode = wsrep->desync(wsrep);
- if (rcode != WSREP_OK)
- {
- WSREP_WARN("FTWRL desync failed %d for schema: %s, query: %s",
- rcode, thd->get_db(), thd->query());
- my_message(ER_LOCK_DEADLOCK, "wsrep desync failed for FTWRL", MYF(0));
- DBUG_RETURN(TRUE);
- }
- }
-
- long long ret = wsrep->pause(wsrep);
- if (ret >= 0)
- {
- wsrep_locked_seqno= ret;
+ DBUG_RETURN(FALSE);
}
- else if (ret != -ENOSYS) /* -ENOSYS - no provider */
+ WSREP_INFO("Server paused at: %lld", paused_seqno.get());
+ if (paused_seqno.get() >= 0)
{
- long long ret = wsrep->pause(wsrep);
- if (ret >= 0)
- {
- wsrep_locked_seqno= ret;
- }
- else if (ret != -ENOSYS) /* -ENOSYS - no provider */
- {
- WSREP_ERROR("Failed to pause provider: %lld (%s)", -ret, strerror(-ret));
-
- DBUG_ASSERT(m_mdl_blocks_commits_lock == NULL);
- wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED;
- my_error(ER_LOCK_DEADLOCK, MYF(0));
- DBUG_RETURN(TRUE);
- }
+ wsrep_locked_seqno= paused_seqno.get();
}
#endif /* WITH_WSREP */
DBUG_RETURN(FALSE);
@@ -1201,10 +1190,8 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
void Global_read_lock::set_explicit_lock_duration(THD *thd)
{
- if (m_mdl_global_shared_lock)
- thd->mdl_context.set_lock_duration(m_mdl_global_shared_lock, MDL_EXPLICIT);
- if (m_mdl_blocks_commits_lock)
- thd->mdl_context.set_lock_duration(m_mdl_blocks_commits_lock, MDL_EXPLICIT);
+ if (m_mdl_global_read_lock)
+ thd->mdl_context.set_lock_duration(m_mdl_global_read_lock, MDL_EXPLICIT);
}
/**
diff --git a/sql/log.cc b/sql/log.cc
index ac885746c62..aa63736b796 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -55,10 +55,14 @@
#include "sql_show.h"
#include "my_pthread.h"
#include "semisync_master.h"
-#include "wsrep_mysqld.h"
#include "sp_rcontext.h"
#include "sp_head.h"
+#include "wsrep_mysqld.h"
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h"
+#endif /* WITH_WSREP */
+
/* max size of the log message */
#define MAX_LOG_BUFFER_SIZE 1024
#define MAX_TIME_SIZE 32
@@ -866,10 +870,10 @@ bool Log_to_csv_event_handler::
Open_tables_backup open_tables_backup;
CHARSET_INFO *client_cs= thd->variables.character_set_client;
bool save_time_zone_used;
- long query_time= (long) MY_MIN(query_utime/1000000, TIME_MAX_VALUE_SECONDS);
- long lock_time= (long) MY_MIN(lock_utime/1000000, TIME_MAX_VALUE_SECONDS);
- long query_time_micro= (long) (query_utime % 1000000);
- long lock_time_micro= (long) (lock_utime % 1000000);
+ ulong query_time= (ulong) MY_MIN(query_utime/1000000, TIME_MAX_VALUE_SECONDS);
+ ulong lock_time= (ulong) MY_MIN(lock_utime/1000000, TIME_MAX_VALUE_SECONDS);
+ ulong query_time_micro= (ulong) (query_utime % 1000000);
+ ulong lock_time_micro= (ulong) (lock_utime % 1000000);
DBUG_ENTER("Log_to_csv_event_handler::log_slow");
@@ -1162,6 +1166,10 @@ bool LOGGER::error_log_print(enum loglevel level, const char *format,
{
bool error= FALSE;
Log_event_handler **current_handler;
+ THD *thd= current_thd;
+
+ if (likely(thd))
+ thd->error_printed_to_log= 1;
/* currently we don't need locking here as there is no error_log table */
for (current_handler= error_log_handler_list ; *current_handler ;)
@@ -1699,7 +1707,7 @@ static int binlog_close_connection(handlerton *hton, THD *thd)
(binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton);
#ifdef WITH_WSREP
if (cache_mngr && !cache_mngr->trx_cache.empty()) {
- IO_CACHE* cache= get_trans_log(thd);
+ IO_CACHE* cache= cache_mngr->get_binlog_cache_log(true);
uchar *buf;
size_t len=0;
wsrep_write_cache_buf(cache, &buf, &len);
@@ -2183,18 +2191,30 @@ void MYSQL_BIN_LOG::set_write_error(THD *thd, bool is_transactional)
{
if (is_transactional)
{
- my_message(ER_TRANS_CACHE_FULL, ER_THD(thd, ER_TRANS_CACHE_FULL), MYF(MY_WME));
+ my_message(ER_TRANS_CACHE_FULL, ER_THD(thd, ER_TRANS_CACHE_FULL), MYF(0));
}
else
{
- my_message(ER_STMT_CACHE_FULL, ER_THD(thd, ER_STMT_CACHE_FULL), MYF(MY_WME));
+ my_message(ER_STMT_CACHE_FULL, ER_THD(thd, ER_STMT_CACHE_FULL), MYF(0));
}
}
else
{
- my_error(ER_ERROR_ON_WRITE, MYF(MY_WME), name, errno);
+ my_error(ER_ERROR_ON_WRITE, MYF(0), name, errno);
}
-
+#ifdef WITH_WSREP
+ /* If wsrep transaction is active and binlog emulation is on,
+ binlog write error may leave transaction without any registered
+ htons. This makes wsrep rollback hooks to be skipped and the
+ transaction will remain alive in wsrep world after rollback.
+ Register binlog hton here to ensure that rollback happens in full. */
+ if (WSREP_EMULATE_BINLOG(thd))
+ {
+ if (is_transactional)
+ trans_register_ha(thd, TRUE, binlog_hton);
+ trans_register_ha(thd, FALSE, binlog_hton);
+ }
+#endif /* WITH_WSREP */
DBUG_VOID_RETURN;
}
@@ -2293,8 +2313,17 @@ static int binlog_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
non-transactional table. Otherwise, truncate the binlog cache starting
from the SAVEPOINT command.
*/
+#ifdef WITH_WSREP
+ /* for streaming replication, we must replicate savepoint rollback so that
+ slaves can maintain SR transactions
+ */
+ if (unlikely(thd->wsrep_trx().is_streaming() ||
+ (trans_has_updated_non_trans_table(thd)) ||
+ (thd->variables.option_bits & OPTION_KEEP_LOG)))
+#else
if (unlikely(trans_has_updated_non_trans_table(thd) ||
(thd->variables.option_bits & OPTION_KEEP_LOG)))
+#endif /* WITH_WSREP */
{
char buf[1024];
String log_query(buf, sizeof(buf), &my_charset_bin);
@@ -2420,7 +2449,7 @@ static void setup_windows_event_source()
// Create the event source registry key
dwError= RegCreateKey(HKEY_LOCAL_MACHINE,
- "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\MySQL",
+ "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\MariaDB",
&hRegKey);
/* Name of the PE module that contains the message resource */
@@ -2452,11 +2481,19 @@ static void setup_windows_event_source()
exceeds FN_REFLEN; (ii) if the number of extensions is exhausted;
or (iii) some other error happened while examining the filesystem.
+ @param name Base name of file
+ @param min_log_number_to_use minimum log number to choose. Set by
+ CHANGE MASTER .. TO
+ @param last_used_log_number If 0, find log number based on files.
+ If not 0, then use *last_used_log_number +1
+ Will be update to new generated number
@return
+ 0 ok
nonzero if not possible to get unique filename.
*/
-static int find_uniq_filename(char *name, ulong next_log_number)
+static int find_uniq_filename(char *name, ulong min_log_number_to_use,
+ ulong *last_used_log_number)
{
uint i;
char buff[FN_REFLEN], ext_buf[FN_REFLEN];
@@ -2475,24 +2512,34 @@ static int find_uniq_filename(char *name, ulong next_log_number)
*end='.';
length= (size_t) (end - start + 1);
- if ((DBUG_EVALUATE_IF("error_unique_log_filename", 1,
- unlikely(!(dir_info= my_dir(buff,
- MYF(MY_DONT_SORT)))))))
- { // This shouldn't happen
- strmov(end,".1"); // use name+1
- DBUG_RETURN(1);
- }
- file_info= dir_info->dir_entry;
- max_found= next_log_number ? next_log_number-1 : 0;
- for (i= dir_info->number_of_files ; i-- ; file_info++)
+ /* The following matches the code for my_dir () below */
+ DBUG_EXECUTE_IF("error_unique_log_filename",
+ {
+ strmov(end,".1");
+ DBUG_RETURN(1);
+ });
+
+ if (*last_used_log_number)
+ max_found= *last_used_log_number;
+ else
{
- if (strncmp(file_info->name, start, length) == 0 &&
- test_if_number(file_info->name+length, &number,0))
+ if (unlikely(!(dir_info= my_dir(buff, MYF(MY_DONT_SORT)))))
+ { // This shouldn't happen
+ strmov(end,".1"); // use name+1
+ DBUG_RETURN(1);
+ }
+ file_info= dir_info->dir_entry;
+ max_found= min_log_number_to_use ? min_log_number_to_use-1 : 0;
+ for (i= dir_info->number_of_files ; i-- ; file_info++)
{
- set_if_bigger(max_found,(ulong) number);
+ if (strncmp(file_info->name, start, length) == 0 &&
+ test_if_number(file_info->name+length, &number,0))
+ {
+ set_if_bigger(max_found,(ulong) number);
+ }
}
+ my_dirend(dir_info);
}
- my_dirend(dir_info);
/* check if reached the maximum possible extension number */
if (max_found >= MAX_LOG_UNIQUE_FN_EXT)
@@ -2531,6 +2578,7 @@ index files.", name, ext_buf, (strlen(ext_buf) + (end - name)));
error= 1;
goto end;
}
+ *last_used_log_number= next;
/* print warning if reaching the end of available extensions. */
if ((next > (MAX_LOG_UNIQUE_FN_EXT - LOG_WARN_UNIQUE_FN_EXT_LEFT)))
@@ -2649,7 +2697,7 @@ bool MYSQL_LOG::open(
#endif
if ((file= mysql_file_open(log_file_key, log_file_name, open_flags,
- MYF(MY_WME | ME_WAITTANG))) < 0)
+ MYF(MY_WME))) < 0)
goto err;
if (is_fifo)
@@ -2782,19 +2830,24 @@ int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name,
ulong next_log_number)
{
fn_format(new_name, log_name, mysql_data_home, "", 4);
- if (log_type == LOG_BIN)
+ return 0;
+}
+
+int MYSQL_BIN_LOG::generate_new_name(char *new_name, const char *log_name,
+ ulong next_log_number)
+{
+ fn_format(new_name, log_name, mysql_data_home, "", 4);
+ if (!fn_ext(log_name)[0])
{
- if (!fn_ext(log_name)[0])
+ if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) ||
+ unlikely(find_uniq_filename(new_name, next_log_number,
+ &last_used_log_number)))
{
- if (DBUG_EVALUATE_IF("binlog_inject_new_name_error", TRUE, FALSE) ||
- unlikely(find_uniq_filename(new_name, next_log_number)))
- {
- THD *thd= current_thd;
- if (unlikely(thd))
- my_error(ER_NO_UNIQUE_LOGFILE, MYF(ME_FATALERROR), log_name);
- sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name);
- return 1;
- }
+ THD *thd= current_thd;
+ if (unlikely(thd))
+ my_error(ER_NO_UNIQUE_LOGFILE, MYF(ME_FATAL), log_name);
+ sql_print_error(ER_DEFAULT(ER_NO_UNIQUE_LOGFILE), log_name);
+ return 1;
}
}
return 0;
@@ -3194,7 +3247,8 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
MYSQL_BIN_LOG::MYSQL_BIN_LOG(uint *sync_period)
:reset_master_pending(0), mark_xid_done_waiting(0),
- bytes_written(0), file_id(1), open_count(1),
+ bytes_written(0), last_used_log_number(0),
+ file_id(1), open_count(1),
group_commit_queue(0), group_commit_queue_busy(FALSE),
num_commits(0), num_group_commits(0),
group_commit_trigger_count(0), group_commit_trigger_timeout(0),
@@ -4174,6 +4228,8 @@ bool MYSQL_BIN_LOG::reset_logs(THD *thd, bool create_new_log,
name=0; // Protect against free
close(LOG_CLOSE_TO_BE_OPENED);
+ last_used_log_number= 0; // Reset log number cache
+
/*
First delete all old log files and then update the index file.
As we first delete the log files and do not use sort of logging,
@@ -4627,7 +4683,7 @@ int MYSQL_BIN_LOG::open_purge_index_file(bool destroy)
if (!my_b_inited(&purge_index_file))
{
if ((file= my_open(purge_index_file_name, O_RDWR | O_CREAT | O_BINARY,
- MYF(MY_WME | ME_WAITTANG))) < 0 ||
+ MYF(MY_WME))) < 0 ||
init_io_cache(&purge_index_file, file, IO_SIZE,
(destroy ? WRITE_CACHE : READ_CACHE),
0, 0, MYF(MY_WME | MY_NABP | MY_WAIT_IF_FULL)))
@@ -5109,7 +5165,11 @@ bool MYSQL_BIN_LOG::is_active(const char *log_file_name_arg)
int MYSQL_BIN_LOG::new_file()
{
- return new_file_impl(1);
+ int res;
+ mysql_mutex_lock(&LOCK_log);
+ res= new_file_impl();
+ mysql_mutex_unlock(&LOCK_log);
+ return res;
}
/*
@@ -5118,7 +5178,7 @@ int MYSQL_BIN_LOG::new_file()
*/
int MYSQL_BIN_LOG::new_file_without_locking()
{
- return new_file_impl(0);
+ return new_file_impl();
}
@@ -5134,7 +5194,7 @@ int MYSQL_BIN_LOG::new_file_without_locking()
The new file name is stored last in the index file
*/
-int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
+int MYSQL_BIN_LOG::new_file_impl()
{
int error= 0, close_on_error= FALSE;
char new_name[FN_REFLEN], *new_name_ptr, *old_name, *file_to_open;
@@ -5143,14 +5203,12 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
File UNINIT_VAR(old_file);
DBUG_ENTER("MYSQL_BIN_LOG::new_file_impl");
- if (need_lock)
- mysql_mutex_lock(&LOCK_log);
+ DBUG_ASSERT(log_type == LOG_BIN);
mysql_mutex_assert_owner(&LOCK_log);
if (!is_open())
{
DBUG_PRINT("info",("log is closed"));
- mysql_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
}
@@ -5169,7 +5227,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
#ifdef ENABLE_AND_FIX_HANG
close_on_error= TRUE;
#endif
- goto end;
+ goto end2;
}
new_name_ptr=new_name;
@@ -5196,7 +5254,7 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
close_on_error= TRUE;
my_printf_error(ER_ERROR_ON_WRITE,
ER_THD_OR_DEFAULT(current_thd, ER_CANT_OPEN_FILE),
- MYF(ME_FATALERROR), name, errno);
+ MYF(ME_FATAL), name, errno);
goto end;
}
bytes_written += r.data_written;
@@ -5265,14 +5323,21 @@ int MYSQL_BIN_LOG::new_file_impl(bool need_lock)
/* handle reopening errors */
if (unlikely(error))
{
- my_error(ER_CANT_OPEN_FILE, MYF(ME_FATALERROR), file_to_open, error);
+ my_error(ER_CANT_OPEN_FILE, MYF(ME_FATAL), file_to_open, error);
close_on_error= TRUE;
}
my_free(old_name);
end:
+ /* In case of errors, reuse the last generated log file name */
+ if (unlikely(error))
+ {
+ DBUG_ASSERT(last_used_log_number > 0);
+ last_used_log_number--;
+ }
+end2:
if (delay_close)
{
clear_inuse_flag_when_closing(old_file);
@@ -5298,8 +5363,6 @@ end:
}
mysql_mutex_unlock(&LOCK_index);
- if (need_lock)
- mysql_mutex_unlock(&LOCK_log);
DBUG_RETURN(error);
}
@@ -5659,7 +5722,18 @@ THD::binlog_start_trans_and_stmt()
this->binlog_set_stmt_begin();
bool mstmt_mode= in_multi_stmt_transaction_mode();
#ifdef WITH_WSREP
- /* Write Gtid
+ /*
+ With wsrep binlog emulation we can skip the rest because the
+ binlog cache will not be written into binlog. Note however that
+ because of this the hton callbacks will not get called to clean
+ up the cache, so this must be done explicitly when the transaction
+ terminates.
+ */
+ if (WSREP_EMULATE_BINLOG_NNULL(this))
+ {
+ DBUG_VOID_RETURN;
+ }
+ /* Write Gtid
Get domain id only when gtid mode is set
If this event is replicate through a master then ,
we will forward the same gtid another nodes
@@ -5764,10 +5838,10 @@ int THD::binlog_write_table_map(TABLE *table, bool is_transactional,
/* Ensure that all events in a GTID group are in the same cache */
if (variables.option_bits & OPTION_GTID_BEGIN)
is_transactional= 1;
-
+
/* Pre-conditions */
DBUG_ASSERT(is_current_stmt_binlog_format_row());
- DBUG_ASSERT(WSREP_EMULATE_BINLOG(this) || mysql_bin_log.is_open());
+ DBUG_ASSERT(WSREP_EMULATE_BINLOG_NNULL(this) || mysql_bin_log.is_open());
DBUG_ASSERT(table->s->table_map_id != ULONG_MAX);
Table_map_log_event
@@ -5966,7 +6040,9 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
DBUG_PRINT("enter", ("standalone: %d", standalone));
#ifdef WITH_WSREP
- if (WSREP(thd) && thd->wsrep_trx_meta.gtid.seqno != -1 && wsrep_gtid_mode && !thd->variables.gtid_seq_no)
+ if (WSREP(thd) &&
+ (wsrep_thd_trx_seqno(thd) > 0) &&
+ wsrep_gtid_mode && !thd->variables.gtid_seq_no)
{
domain_id= wsrep_gtid_domain_id;
} else {
@@ -6069,7 +6145,7 @@ MYSQL_BIN_LOG::write_state_to_file()
goto end;
err:
- sql_print_error("Error writing binlog state to file '%s'.\n", buf);
+ sql_print_error("Error writing binlog state to file '%s'.", buf);
if (log_inited)
end_io_cache(&cache);
end:
@@ -6129,7 +6205,7 @@ MYSQL_BIN_LOG::read_state_from_file()
goto end;
err:
- sql_print_error("Error reading binlog GTID state from file '%s'.\n", buf);
+ sql_print_error("Error reading binlog GTID state from file '%s'.", buf);
end:
if (log_inited)
end_io_cache(&cache);
@@ -6283,7 +6359,7 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info, my_bool *with_annotate)
*/
/* applier and replayer can skip writing binlog events */
if ((WSREP_EMULATE_BINLOG(thd) &&
- IF_WSREP(thd->wsrep_exec_mode != REPL_RECV, 0)) || is_open())
+ IF_WSREP(thd->wsrep_cs().mode() == wsrep::client_state::m_local, 0)) || is_open())
{
my_off_t UNINIT_VAR(my_org_b_tell);
#ifdef HAVE_REPLICATION
@@ -6474,25 +6550,8 @@ err:
it's list before dump-thread tries to send it
*/
update_binlog_end_pos(offset);
- /*
- If a transaction with the LOAD DATA statement is divided
- into logical mini-transactions (of the 10K rows) and binlog
- is rotated, then the last portion of data may be lost due to
- wsrep handler re-registration at the boundary of the split.
- Since splitting of the LOAD DATA into mini-transactions is
- logical, we should not allow these mini-transactions to fall
- into separate binlogs. Therefore, it is necessary to prohibit
- the rotation of binlog in the middle of processing LOAD DATA:
- */
-#ifdef WITH_WSREP
- if (!thd->wsrep_split_flag)
- {
-#endif /* WITH_WSREP */
if (unlikely((error= rotate(false, &check_purge))))
check_purge= false;
-#ifdef WITH_WSREP
- }
-#endif /* WITH_WSREP */
}
}
}
@@ -7218,25 +7277,8 @@ bool MYSQL_BIN_LOG::write_incident(THD *thd)
likely(!(error= flush_and_sync(0))))
{
update_binlog_end_pos();
- /*
- If a transaction with the LOAD DATA statement is divided
- into logical mini-transactions (of the 10K rows) and binlog
- is rotated, then the last portion of data may be lost due to
- wsrep handler re-registration at the boundary of the split.
- Since splitting of the LOAD DATA into mini-transactions is
- logical, we should not allow these mini-transactions to fall
- into separate binlogs. Therefore, it is necessary to prohibit
- the rotation of binlog in the middle of processing LOAD DATA:
- */
-#ifdef WITH_WSREP
- if (!thd->wsrep_split_flag)
- {
-#endif /* WITH_WSREP */
if (unlikely((error= rotate(false, &check_purge))))
check_purge= false;
-#ifdef WITH_WSREP
- }
-#endif /* WITH_WSREP */
}
offset= my_b_tell(&log_file);
@@ -7287,7 +7329,7 @@ MYSQL_BIN_LOG::write_binlog_checkpoint_event_already_locked(const char *name_arg
ability to do crash recovery - crash recovery will just have to scan a
bit more of the binlog than strictly necessary.
*/
- sql_print_error("Failed to write binlog checkpoint event to binary log\n");
+ sql_print_error("Failed to write binlog checkpoint event to binary log");
}
offset= my_b_tell(&log_file);
@@ -7700,7 +7742,26 @@ bool
MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
{
int is_leader= queue_for_group_commit(entry);
-
+#ifdef WITH_WSREP
+ if (wsrep_is_active(entry->thd) &&
+ wsrep_run_commit_hook(entry->thd, entry->all))
+ {
+ /*
+ Release commit order and if leader, wait for prior commit to
+ complete. This establishes total order for group leaders.
+ */
+ if (wsrep_ordered_commit(entry->thd, entry->all, wsrep_apply_error()))
+ {
+ entry->thd->wakeup_subsequent_commits(1);
+ return 1;
+ }
+ if (is_leader)
+ {
+ if (entry->thd->wait_for_prior_commit())
+ return 1;
+ }
+ }
+#endif /* WITH_WSREP */
/*
The first in the queue handles group commit for all; the others just wait
to be signalled when group commit is done.
@@ -7782,10 +7843,10 @@ MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
switch (entry->error)
{
case ER_ERROR_ON_WRITE:
- my_error(ER_ERROR_ON_WRITE, MYF(ME_NOREFRESH), name, entry->commit_errno);
+ my_error(ER_ERROR_ON_WRITE, MYF(ME_ERROR_LOG), name, entry->commit_errno);
break;
case ER_ERROR_ON_READ:
- my_error(ER_ERROR_ON_READ, MYF(ME_NOREFRESH),
+ my_error(ER_ERROR_ON_READ, MYF(ME_ERROR_LOG),
entry->error_cache->file_name, entry->commit_errno);
break;
default:
@@ -7796,7 +7857,7 @@ MYSQL_BIN_LOG::write_transaction_to_binlog_events(group_commit_entry *entry)
*/
my_printf_error(entry->error,
"Error writing transaction to binary log: %d",
- MYF(ME_NOREFRESH), entry->error);
+ MYF(ME_ERROR_LOG), entry->error);
}
/*
@@ -8005,20 +8066,6 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
mark_xids_active(binlog_id, xid_count);
}
- /*
- If a transaction with the LOAD DATA statement is divided
- into logical mini-transactions (of the 10K rows) and binlog
- is rotated, then the last portion of data may be lost due to
- wsrep handler re-registration at the boundary of the split.
- Since splitting of the LOAD DATA into mini-transactions is
- logical, we should not allow these mini-transactions to fall
- into separate binlogs. Therefore, it is necessary to prohibit
- the rotation of binlog in the middle of processing LOAD DATA:
- */
-#ifdef WITH_WSREP
- if (!leader->thd->wsrep_split_flag)
- {
-#endif /* WITH_WSREP */
if (rotate(false, &check_purge))
{
/*
@@ -8035,12 +8082,9 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader)
when the transaction has been safely committed in the engine.
*/
leader->cache_mngr->delayed_error= true;
- my_error(ER_ERROR_ON_WRITE, MYF(ME_NOREFRESH), name, errno);
+ my_error(ER_ERROR_ON_WRITE, MYF(ME_ERROR_LOG), name, errno);
check_purge= false;
}
-#ifdef WITH_WSREP
- }
-#endif /* WITH_WSREP */
/* In case of binlog rotate, update the correct current binlog offset. */
commit_offset= my_b_write_tell(&log_file);
}
@@ -8689,18 +8733,35 @@ bool flush_error_log()
}
#ifdef _WIN32
+struct eventlog_source
+{
+ HANDLE handle;
+ eventlog_source()
+ {
+ setup_windows_event_source();
+ handle = RegisterEventSource(NULL, "MariaDB");
+ }
+
+ ~eventlog_source()
+ {
+ if (handle)
+ DeregisterEventSource(handle);
+ }
+};
+
+static eventlog_source eventlog;
+
static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
size_t length, size_t buffLen)
{
- HANDLE event;
+ HANDLE event= eventlog.handle;
char *buffptr= buff;
DBUG_ENTER("print_buffer_to_nt_eventlog");
/* Add ending CR/LF's to string, overwrite last chars if necessary */
strmov(buffptr+MY_MIN(length, buffLen-5), "\r\n\r\n");
- setup_windows_event_source();
- if ((event= RegisterEventSource(NULL,"MySQL")))
+ if (event)
{
switch (level) {
case ERROR_LEVEL:
@@ -8716,7 +8777,6 @@ static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
0, (LPCSTR*) &buffptr, NULL);
break;
}
- DeregisterEventSource(event);
}
DBUG_VOID_RETURN;
@@ -10516,7 +10576,7 @@ set_binlog_snapshot_file(const char *src)
Copy out current values of status variables, for SHOW STATUS or
information_schema.global_status.
- This is called only under LOCK_show_status, so we can fill in a static array.
+ This is called only under LOCK_all_status_vars, so we can fill in a static array.
*/
void
TC_LOG_BINLOG::set_status_variables(THD *thd)
@@ -10641,7 +10701,10 @@ maria_declare_plugin(binlog)
maria_declare_plugin_end;
#ifdef WITH_WSREP
-IO_CACHE * get_trans_log(THD * thd)
+#include "wsrep_trans_observer.h"
+#include "wsrep_mysqld.h"
+
+IO_CACHE *wsrep_get_trans_cache(THD * thd)
{
DBUG_ASSERT(binlog_hton->slot != HA_SLOT_UNDEF);
binlog_cache_mngr *cache_mngr = (binlog_cache_mngr*)
@@ -10654,17 +10717,10 @@ IO_CACHE * get_trans_log(THD * thd)
return NULL;
}
-
-bool wsrep_trans_cache_is_empty(THD *thd)
-{
- binlog_cache_mngr *const cache_mngr=
- (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton);
- return (!cache_mngr || cache_mngr->trx_cache.empty());
-}
-
-
-void thd_binlog_trx_reset(THD * thd)
+void wsrep_thd_binlog_trx_reset(THD * thd)
{
+ DBUG_ENTER("wsrep_thd_binlog_trx_reset");
+ WSREP_DEBUG("wsrep_thd_binlog_reset");
/*
todo: fix autocommit select to not call the caller
*/
@@ -10683,6 +10739,7 @@ void thd_binlog_trx_reset(THD * thd)
}
}
thd->clear_binlog_table_maps();
+ DBUG_VOID_RETURN;
}
@@ -10695,4 +10752,78 @@ void thd_binlog_rollback_stmt(THD * thd)
if (cache_mngr)
cache_mngr->trx_cache.set_prev_position(MY_OFF_T_UNDEF);
}
+
+bool wsrep_stmt_rollback_is_safe(THD* thd)
+{
+ bool ret(true);
+
+ DBUG_ENTER("wsrep_binlog_stmt_rollback_is_safe");
+
+ binlog_cache_mngr *cache_mngr=
+ (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton);
+
+
+ if (binlog_hton && cache_mngr)
+ {
+ binlog_cache_data * trx_cache = &cache_mngr->trx_cache;
+ if (thd->wsrep_sr().fragments_certified() > 0 &&
+ (trx_cache->get_prev_position() == MY_OFF_T_UNDEF ||
+ trx_cache->get_prev_position() < thd->wsrep_sr().bytes_certified()))
+ {
+ WSREP_DEBUG("statement rollback is not safe for streaming replication"
+ " pre-stmt_pos: %llu, frag repl pos: %zu\n"
+ "Thread: %llu, SQL: %s",
+ trx_cache->get_prev_position(),
+ thd->wsrep_sr().bytes_certified(),
+ thd->thread_id, thd->query());
+ ret = false;
+ }
+ }
+ DBUG_RETURN(ret);
+}
+
+void wsrep_register_binlog_handler(THD *thd, bool trx)
+{
+ DBUG_ENTER("register_binlog_handler");
+ /*
+ If this is the first call to this function while processing a statement,
+ the transactional cache does not have a savepoint defined. So, in what
+ follows:
+ . an implicit savepoint is defined;
+ . callbacks are registered;
+ . binary log is set as read/write.
+
+ The savepoint allows for truncating the trx-cache transactional changes
+ fail. Callbacks are necessary to flush caches upon committing or rolling
+ back a statement or a transaction. However, notifications do not happen
+ if the binary log is set as read/write.
+ */
+ //binlog_cache_mngr *cache_mngr= thd_get_cache_mngr(thd);
+ binlog_cache_mngr *cache_mngr=
+ (binlog_cache_mngr*) thd_get_ha_data(thd, binlog_hton);
+ /* cache_mngr may be missing e.g. in mtr test ev51914.test */
+ if (cache_mngr && cache_mngr->trx_cache.get_prev_position() == MY_OFF_T_UNDEF)
+ {
+ /*
+ Set an implicit savepoint in order to be able to truncate a trx-cache.
+ */
+ my_off_t pos= 0;
+ binlog_trans_log_savepos(thd, &pos);
+ cache_mngr->trx_cache.set_prev_position(pos);
+
+ /*
+ Set callbacks in order to be able to call commmit or rollback.
+ */
+ if (trx)
+ trans_register_ha(thd, TRUE, binlog_hton);
+ trans_register_ha(thd, FALSE, binlog_hton);
+
+ /*
+ Set the binary log as read/write otherwise callbacks are not called.
+ */
+ thd->ha_data[binlog_hton->slot].ha_info[0].set_trx_read_write();
+ }
+ DBUG_VOID_RETURN;
+}
+
#endif /* WITH_WSREP */
diff --git a/sql/log.h b/sql/log.h
index 7dfdb36c442..4cdbf300fb9 100644
--- a/sql/log.h
+++ b/sql/log.h
@@ -18,7 +18,6 @@
#define LOG_H
#include "handler.h" /* my_xid */
-#include "wsrep.h"
#include "wsrep_mysqld.h"
#include "rpl_constants.h"
@@ -248,10 +247,6 @@ extern TC_LOG_DUMMY tc_log_dummy;
class Relay_log_info;
-#ifdef HAVE_PSI_INTERFACE
-extern PSI_mutex_key key_LOG_INFO_lock;
-#endif
-
/*
Note that we destroy the lock mutex in the desctructor here.
This means that object instances cannot be destroyed/go out of scope,
@@ -263,19 +258,11 @@ typedef struct st_log_info
my_off_t index_file_offset, index_file_start_offset;
my_off_t pos;
bool fatal; // if the purge happens to give us a negative offset
- mysql_mutex_t lock;
st_log_info() : index_file_offset(0), index_file_start_offset(0),
pos(0), fatal(0)
{
DBUG_ENTER("LOG_INFO");
log_file_name[0] = '\0';
- mysql_mutex_init(key_LOG_INFO_lock, &lock, MY_MUTEX_INIT_FAST);
- DBUG_VOID_RETURN;
- }
- ~st_log_info()
- {
- DBUG_ENTER("~LOG_INFO");
- mysql_mutex_destroy(&lock);
DBUG_VOID_RETURN;
}
} LOG_INFO;
@@ -306,6 +293,7 @@ class MYSQL_LOG
{
public:
MYSQL_LOG();
+ virtual ~MYSQL_LOG() {}
void init_pthread_objects();
void cleanup();
bool open(
@@ -328,8 +316,8 @@ public:
const char *generate_name(const char *log_name,
const char *suffix,
bool strip_ext, char *buff);
- int generate_new_name(char *new_name, const char *log_name,
- ulong next_log_number);
+ virtual int generate_new_name(char *new_name, const char *log_name,
+ ulong next_log_number);
protected:
/* LOCK_log is inited by init_pthread_objects() */
mysql_mutex_t LOCK_log;
@@ -516,6 +504,11 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) from sys_vars.cc
*/
ulong max_size;
+ /*
+ Number generated by last call of find_uniq_filename(). Corresponds
+ closely with current_binlog_id
+ */
+ ulong last_used_log_number;
// current file sequence number for load data infile binary logging
uint file_id;
uint open_count; // For replication
@@ -562,7 +555,7 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG
LOCK_log.
*/
int new_file_without_locking();
- int new_file_impl(bool need_lock);
+ int new_file_impl();
void do_checkpoint_request(ulong binlog_id);
void purge();
int write_transaction_or_stmt(group_commit_entry *entry, uint64 commit_id);
@@ -695,6 +688,8 @@ public:
int open(const char *opt_name);
void close();
+ virtual int generate_new_name(char *new_name, const char *log_name,
+ ulong next_log_number);
int log_and_order(THD *thd, my_xid xid, bool all,
bool need_prepare_ordered, bool need_commit_ordered);
int unlog(ulong cookie, my_xid xid);
@@ -1212,6 +1207,10 @@ static inline TC_LOG *get_tc_log_implementation()
return &tc_log_mmap;
}
+#ifdef WITH_WSREP
+IO_CACHE* wsrep_get_trans_cache(THD *);
+void wsrep_thd_binlog_trx_reset(THD * thd);
+#endif /* WITH_WSREP */
class Gtid_list_log_event;
const char *
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 52556cddd12..1246330f7bb 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -53,7 +53,6 @@
#include "rpl_constants.h"
#include "sql_digest.h"
#include "zlib.h"
-#include "my_atomic.h"
#define my_b_write_string(A, B) my_b_write((A), (uchar*)(B), (uint) (sizeof(B) - 1))
@@ -101,16 +100,11 @@ TYPELIB binlog_checksum_typelib=
TODO: correct the constant when it has been determined
(which main tree to push and when)
*/
-const uchar checksum_version_split_mysql[3]= {5, 6, 1};
-const ulong checksum_version_product_mysql=
- (checksum_version_split_mysql[0] * 256 +
- checksum_version_split_mysql[1]) * 256 +
- checksum_version_split_mysql[2];
-const uchar checksum_version_split_mariadb[3]= {5, 3, 0};
-const ulong checksum_version_product_mariadb=
- (checksum_version_split_mariadb[0] * 256 +
- checksum_version_split_mariadb[1]) * 256 +
- checksum_version_split_mariadb[2];
+const Version checksum_version_split_mysql(5, 6, 1);
+const Version checksum_version_split_mariadb(5, 3, 0);
+
+// First MySQL version with fraction seconds
+const Version fsp_version_split_mysql(5, 6, 0);
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD* thd);
@@ -2753,9 +2747,7 @@ log_event_print_value(IO_CACHE *file, PRINT_EVENT_INFO *print_event_info,
goto return_null;
uint bin_size= my_decimal_get_binary_size(precision, decimals);
- my_decimal dec;
- binary2my_decimal(E_DEC_FATAL_ERROR, (uchar*) ptr, &dec,
- precision, decimals);
+ my_decimal dec((const uchar *) ptr, precision, decimals);
int length= DECIMAL_MAX_STR_LENGTH;
char buff[DECIMAL_MAX_STR_LENGTH + 1];
decimal2string(&dec, buff, &length, 0, 0, 0);
@@ -4426,7 +4418,7 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, size_t que
have to use the transactional cache to ensure we don't
calculate any checksum for the CREATE part.
*/
- trx_cache= (lex->select_lex.item_list.elements &&
+ trx_cache= (lex->first_select_lex()->item_list.elements &&
thd->is_current_stmt_binlog_format_row()) ||
(thd->variables.option_bits & OPTION_GTID_BEGIN);
use_cache= (lex->tmp_table() &&
@@ -4558,6 +4550,7 @@ code_name(int code)
}
#endif
+
/**
Macro to check that there is enough space to read from memory.
@@ -4781,6 +4774,30 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
}
}
+#if !defined(MYSQL_CLIENT)
+ if (description_event->server_version_split.kind ==
+ Format_description_log_event::master_version_split::KIND_MYSQL)
+ {
+ // Handle MariaDB/MySQL incompatible sql_mode bits
+ sql_mode_t mysql_sql_mode= sql_mode;
+ sql_mode&= MODE_MASK_MYSQL_COMPATIBLE; // Unset MySQL specific bits
+
+ /*
+ sql_mode flags related to fraction second rounding/truncation
+ have opposite meaning in MySQL vs MariaDB.
+ MySQL:
+ - rounds fractional seconds by default
+ - truncates if TIME_TRUNCATE_FRACTIONAL is set
+ MariaDB:
+ - truncates fractional seconds by default
+ - rounds if TIME_ROUND_FRACTIONAL is set
+ */
+ if (description_event->server_version_split >= fsp_version_split_mysql &&
+ !(mysql_sql_mode & MODE_MYSQL80_TIME_TRUNCATE_FRACTIONAL))
+ sql_mode|= MODE_TIME_ROUND_FRACTIONAL;
+ }
+#endif
+
/**
Layout for the data buffer is as follows
+--------+-----------+------+------+---------+----+-------+
@@ -5614,7 +5631,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi,
gtid= rgi->current_gtid;
if (unlikely(rpl_global_gtid_slave_state->record_gtid(thd, &gtid,
sub_id,
- rgi, false,
+ true, false,
&hton)))
{
int errcode= thd->get_stmt_da()->sql_errno();
@@ -5794,6 +5811,14 @@ compare_errors:
"unexpected success or fatal error"),
thd->get_db(), query_arg);
thd->is_slave_error= 1;
+#ifdef WITH_WSREP
+ if (thd->wsrep_apply_toi && wsrep_must_ignore_error(thd))
+ {
+ thd->clear_error(1);
+ thd->killed= NOT_KILLED;
+ thd->wsrep_has_ignored_error= true;
+ }
+#endif /* WITH_WSREP */
}
/*
@@ -6589,26 +6614,24 @@ bool Format_description_log_event::start_decryption(Start_encryption_log_event*
return crypto_data.init(sele->crypto_scheme, sele->key_version);
}
-static inline void
-do_server_version_split(char* version,
- Format_description_log_event::master_version_split *split_versions)
+
+Version::Version(const char *version, const char **endptr)
{
- char *p= version, *r;
+ const char *p= version;
ulong number;
for (uint i= 0; i<=2; i++)
{
+ char *r;
number= strtoul(p, &r, 10);
/*
It is an invalid version if any version number greater than 255 or
first number is not followed by '.'.
*/
if (number < 256 && (*r == '.' || i != 0))
- split_versions->ver[i]= (uchar) number;
+ m_ver[i]= (uchar) number;
else
{
- split_versions->ver[0]= 0;
- split_versions->ver[1]= 0;
- split_versions->ver[2]= 0;
+ *this= Version();
break;
}
@@ -6616,12 +6639,19 @@ do_server_version_split(char* version,
if (*r == '.')
p++; // skip the dot
}
+ endptr[0]= p;
+}
+
+
+Format_description_log_event::
+ master_version_split::master_version_split(const char *version)
+{
+ const char *p;
+ static_cast<Version*>(this)[0]= Version(version, &p);
if (strstr(p, "MariaDB") != 0 || strstr(p, "-maria-") != 0)
- split_versions->kind=
- Format_description_log_event::master_version_split::KIND_MARIADB;
+ kind= KIND_MARIADB;
else
- split_versions->kind=
- Format_description_log_event::master_version_split::KIND_MYSQL;
+ kind= KIND_MYSQL;
}
@@ -6635,20 +6665,14 @@ do_server_version_split(char* version,
*/
void Format_description_log_event::calc_server_version_split()
{
- do_server_version_split(server_version, &server_version_split);
+ server_version_split= master_version_split(server_version);
DBUG_PRINT("info",("Format_description_log_event::server_version_split:"
" '%s' %d %d %d", server_version,
- server_version_split.ver[0],
- server_version_split.ver[1], server_version_split.ver[2]));
+ server_version_split[0],
+ server_version_split[1], server_version_split[2]));
}
-static inline ulong
-version_product(const Format_description_log_event::master_version_split* version_split)
-{
- return ((version_split->ver[0] * 256 + version_split->ver[1]) * 256
- + version_split->ver[2]);
-}
/**
@return TRUE is the event's version is earlier than one that introduced
@@ -6658,9 +6682,9 @@ bool
Format_description_log_event::is_version_before_checksum(const master_version_split
*version_split)
{
- return version_product(version_split) <
+ return *version_split <
(version_split->kind == master_version_split::KIND_MARIADB ?
- checksum_version_product_mariadb : checksum_version_product_mysql);
+ checksum_version_split_mariadb : checksum_version_split_mysql);
}
/**
@@ -6676,7 +6700,6 @@ enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
{
enum enum_binlog_checksum_alg ret;
char version[ST_SERVER_VER_LEN];
- Format_description_log_event::master_version_split version_split;
DBUG_ENTER("get_checksum_alg");
DBUG_ASSERT(buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT);
@@ -6686,7 +6709,7 @@ enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
ST_SERVER_VER_LEN);
version[ST_SERVER_VER_LEN - 1]= 0;
- do_server_version_split(version, &version_split);
+ Format_description_log_event::master_version_split version_split(version);
ret= Format_description_log_event::is_version_before_checksum(&version_split)
? BINLOG_CHECKSUM_ALG_UNDEF
: (enum_binlog_checksum_alg)buf[len - BINLOG_CHECKSUM_LEN - BINLOG_CHECKSUM_ALG_DESC_LEN];
@@ -7437,8 +7460,9 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
ex.skip_lines = skip_lines;
List<Item> field_list;
- thd->lex->select_lex.context.resolve_in_table_list_only(&tables);
- set_fields(tables.db.str, field_list, &thd->lex->select_lex.context);
+ thd->lex->first_select_lex()->context.resolve_in_table_list_only(&tables);
+ set_fields(tables.db.str,
+ field_list, &thd->lex->first_select_lex()->context);
thd->variables.pseudo_thread_id= thread_id;
if (net)
{
@@ -8076,16 +8100,13 @@ Gtid_log_event::do_apply_event(rpl_group_info *rgi)
switch (flags2 & (FL_DDL | FL_TRANSACTIONAL))
{
case FL_TRANSACTIONAL:
- my_atomic_add64_explicit((volatile int64 *)&mi->total_trans_groups, 1,
- MY_MEMORY_ORDER_RELAXED);
+ mi->total_trans_groups++;
break;
case FL_DDL:
- my_atomic_add64_explicit((volatile int64 *)&mi->total_ddl_groups, 1,
- MY_MEMORY_ORDER_RELAXED);
+ mi->total_ddl_groups++;
break;
default:
- my_atomic_add64_explicit((volatile int64 *)&mi->total_non_trans_groups, 1,
- MY_MEMORY_ORDER_RELAXED);
+ mi->total_non_trans_groups++;
}
if (flags2 & FL_STANDALONE)
@@ -8417,7 +8438,7 @@ Gtid_list_log_event::do_apply_event(rpl_group_info *rgi)
{
if ((ret= rpl_global_gtid_slave_state->record_gtid(thd, &list[i],
sub_id_list[i],
- NULL, false, &hton)))
+ false, false, &hton)))
return ret;
rpl_global_gtid_slave_state->update_state_hash(sub_id_list[i], &list[i],
hton, NULL);
@@ -8954,7 +8975,7 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi)
rgi->gtid_pending= false;
gtid= rgi->current_gtid;
- err= rpl_global_gtid_slave_state->record_gtid(thd, &gtid, sub_id, rgi,
+ err= rpl_global_gtid_slave_state->record_gtid(thd, &gtid, sub_id, true,
false, &hton);
if (unlikely(err))
{
@@ -9092,11 +9113,8 @@ void User_var_log_event::pack_info(Protocol* protocol)
String buf(buf_mem, sizeof(buf_mem), system_charset_info);
char buf2[DECIMAL_MAX_STR_LENGTH+1];
String str(buf2, sizeof(buf2), &my_charset_bin);
- my_decimal dec;
buf.length(0);
- binary2my_decimal(E_DEC_FATAL_ERROR, (uchar*) (val+2), &dec, val[0],
- val[1]);
- my_decimal2string(E_DEC_FATAL_ERROR, &dec, 0, 0, 0, &str);
+ my_decimal((const uchar *) (val + 2), val[0], val[1]).to_string(&str);
if (user_var_append_name_part(protocol->thd, &buf, name, name_len) ||
buf.append(buf2))
return;
@@ -11309,13 +11327,13 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
{
WSREP_WARN("BF applier failed to open_and_lock_tables: %u, fatal: %d "
"wsrep = (exec_mode: %d conflict_state: %d seqno: %lld)",
- thd->get_stmt_da()->sql_errno(),
- thd->is_fatal_error,
- thd->wsrep_exec_mode,
- thd->wsrep_conflict_state,
- (long long)wsrep_thd_trx_seqno(thd));
+ thd->get_stmt_da()->sql_errno(),
+ thd->is_fatal_error,
+ thd->wsrep_cs().mode(),
+ thd->wsrep_trx().state(),
+ (long long) wsrep_thd_trx_seqno(thd));
}
-#endif
+#endif /* WITH_WSREP */
if ((thd->is_slave_error || thd->is_fatal_error) &&
!is_parallel_retry_error(rgi, actual_error))
{
@@ -11452,10 +11470,10 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
#ifdef HAVE_QUERY_CACHE
#ifdef WITH_WSREP
/*
- Moved invalidation right before the call to rows_event_stmt_cleanup(),
- to avoid query cache being polluted with stale entries.
+ Moved invalidation right before the call to rows_event_stmt_cleanup(),
+ to avoid query cache being polluted with stale entries,
*/
- if (! (WSREP(thd) && (thd->wsrep_exec_mode == REPL_RECV)))
+ if (! (WSREP(thd) && wsrep_thd_is_applying(thd)))
{
#endif /* WITH_WSREP */
query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
@@ -11568,6 +11586,13 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
bool ignored_error= (idempotent_error == 0 ?
ignored_error_code(actual_error) : 0);
+#ifdef WITH_WSREP
+ if (WSREP(thd) && wsrep_ignored_error_code(this, actual_error))
+ {
+ idempotent_error= true;
+ thd->wsrep_has_ignored_error= true;
+ }
+#endif /* WITH_WSREP */
if (idempotent_error || ignored_error)
{
if (global_system_variables.log_warnings)
@@ -11655,7 +11680,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
restore_empty_query_table_list(thd->lex);
#if defined(WITH_WSREP) && defined(HAVE_QUERY_CACHE)
- if (WSREP(thd) && thd->wsrep_exec_mode == REPL_RECV)
+ if (WSREP(thd) && wsrep_thd_is_applying(thd))
{
query_cache.invalidate_locked_for_write(thd, rgi->tables_to_lock);
}
diff --git a/sql/log_event.h b/sql/log_event.h
index 9f598012e8a..339db756aab 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -2729,6 +2729,38 @@ protected:
};
+class Version
+{
+protected:
+ uchar m_ver[3];
+ int cmp(const Version &other) const
+ {
+ return memcmp(m_ver, other.m_ver, 3);
+ }
+public:
+ Version()
+ {
+ m_ver[0]= m_ver[1]= m_ver[2]= '\0';
+ }
+ Version(uchar v0, uchar v1, uchar v2)
+ {
+ m_ver[0]= v0;
+ m_ver[1]= v1;
+ m_ver[2]= v2;
+ }
+ Version(const char *version, const char **endptr);
+ const uchar& operator [] (size_t i) const
+ {
+ DBUG_ASSERT(i < 3);
+ return m_ver[i];
+ }
+ bool operator<(const Version &other) const { return cmp(other) < 0; }
+ bool operator>(const Version &other) const { return cmp(other) > 0; }
+ bool operator<=(const Version &other) const { return cmp(other) <= 0; }
+ bool operator>=(const Version &other) const { return cmp(other) >= 0; }
+};
+
+
/**
@class Format_description_log_event
@@ -2755,10 +2787,17 @@ public:
by the checksum alg decription byte
*/
uint8 *post_header_len;
- struct master_version_split {
+ class master_version_split: public Version {
+ public:
enum {KIND_MYSQL, KIND_MARIADB};
int kind;
- uchar ver[3];
+ master_version_split() :kind(KIND_MARIADB) { }
+ master_version_split(const char *version);
+ bool version_is_valid() const
+ {
+ /* It is invalid only when all version numbers are 0 */
+ return !(m_ver[0] == 0 && m_ver[1] == 0 && m_ver[2] == 0);
+ }
};
master_version_split server_version_split;
const uint8 *event_type_permutation;
@@ -2782,17 +2821,9 @@ public:
(post_header_len != NULL));
}
- bool version_is_valid() const
- {
- /* It is invalid only when all version numbers are 0 */
- return !(server_version_split.ver[0] == 0 &&
- server_version_split.ver[1] == 0 &&
- server_version_split.ver[2] == 0);
- }
-
bool is_valid() const
{
- return header_is_valid() && version_is_valid();
+ return header_is_valid() && server_version_split.version_is_valid();
}
int get_data_size()
diff --git a/sql/mdl.cc b/sql/mdl.cc
index f03fc89fcc1..ccd7a71e9f4 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -24,9 +24,6 @@
#include <mysql/plugin.h>
#include <mysql/service_thd_wait.h>
#include <mysql/psi/mysql_stage.h>
-#include "wsrep_mysqld.h"
-#include "wsrep_thd.h"
-
#ifdef HAVE_PSI_INTERFACE
static PSI_mutex_key key_MDL_wait_LOCK_wait_status;
@@ -80,7 +77,7 @@ static void init_mdl_psi_keys(void)
PSI_stage_info MDL_key::m_namespace_to_wait_state_name[NAMESPACE_END]=
{
- {0, "Waiting for global read lock", 0},
+ {0, "Waiting for backup lock", 0},
{0, "Waiting for schema metadata lock", 0},
{0, "Waiting for table metadata lock", 0},
{0, "Waiting for stored function metadata lock", 0},
@@ -88,10 +85,44 @@ PSI_stage_info MDL_key::m_namespace_to_wait_state_name[NAMESPACE_END]=
{0, "Waiting for stored package body metadata lock", 0},
{0, "Waiting for trigger metadata lock", 0},
{0, "Waiting for event metadata lock", 0},
- {0, "Waiting for commit lock", 0},
{0, "User lock", 0} /* Be compatible with old status. */
};
+
+static const LEX_STRING lock_types[]=
+{
+ { C_STRING_WITH_LEN("MDL_INTENTION_EXCLUSIVE") },
+ { C_STRING_WITH_LEN("MDL_SHARED") },
+ { C_STRING_WITH_LEN("MDL_SHARED_HIGH_PRIO") },
+ { C_STRING_WITH_LEN("MDL_SHARED_READ") },
+ { C_STRING_WITH_LEN("MDL_SHARED_WRITE") },
+ { C_STRING_WITH_LEN("MDL_SHARED_UPGRADABLE") },
+ { C_STRING_WITH_LEN("MDL_SHARED_READ_ONLY") },
+ { C_STRING_WITH_LEN("MDL_SHARED_NO_WRITE") },
+ { C_STRING_WITH_LEN("MDL_SHARED_NO_READ_WRITE") },
+ { C_STRING_WITH_LEN("MDL_EXCLUSIVE") },
+};
+
+
+static const LEX_STRING backup_lock_types[]=
+{
+ { C_STRING_WITH_LEN("MDL_BACKUP_START") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_FLUSH") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_WAIT_FLUSH") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_WAIT_DDL") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_WAIT_COMMIT") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_FTWRL1") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_FTWRL2") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_DML") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_TRANS_DML") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_SYS_DML") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_DDL") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_BLOCK_DDL") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_ALTER_COPY") },
+ { C_STRING_WITH_LEN("MDL_BACKUP_COMMIT") }
+};
+
+
#ifdef HAVE_PSI_INTERFACE
void MDL_key::init_psi_keys()
{
@@ -128,11 +159,9 @@ public:
LF_PINS *get_pins() { return lf_hash_get_pins(&m_locks); }
private:
LF_HASH m_locks; /**< All acquired locks in the server. */
- /** Pre-allocated MDL_lock object for GLOBAL namespace. */
- MDL_lock *m_global_lock;
- /** Pre-allocated MDL_lock object for COMMIT namespace. */
- MDL_lock *m_commit_lock;
- friend int mdl_iterate(int (*)(MDL_ticket *, void *), void *);
+ /** Pre-allocated MDL_lock object for BACKUP namespace. */
+ MDL_lock *m_backup_lock;
+ friend int mdl_iterate(mdl_iterator_callback, void *);
};
@@ -279,8 +308,6 @@ Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim)
and compatibility matrices.
*/
-#define MDL_BIT(A) static_cast<MDL_lock::bitmap_t>(1U << A)
-
/**
The lock context. Created internally for an acquired lock.
For a given name, there exists only one MDL_lock instance,
@@ -295,7 +322,7 @@ Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim)
class MDL_lock
{
public:
- typedef unsigned short bitmap_t;
+ typedef mdl_bitmap_t bitmap_t;
class Ticket_list
{
@@ -328,9 +355,10 @@ public:
/**
Helper struct which defines how different types of locks are handled
- for a specific MDL_lock. In practice we use only two strategies: "scoped"
- lock strategy for locks in GLOBAL, COMMIT and SCHEMA namespaces and
- "object" lock strategy for all other namespaces.
+ for a specific MDL_lock. In practice we use only three strategies:
+ "backup" lock strategy for locks in BACKUP namespace, "scoped" lock
+ strategy for locks in SCHEMA namespace and "object" lock strategy for
+ all other namespaces.
*/
struct MDL_lock_strategy
{
@@ -394,9 +422,10 @@ public:
{ return m_waiting_incompatible; }
virtual bool needs_notification(const MDL_ticket *ticket) const
{
- return ticket->get_type() == MDL_SHARED_NO_WRITE ||
- ticket->get_type() == MDL_SHARED_NO_READ_WRITE ||
- ticket->get_type() == MDL_EXCLUSIVE;
+ return (MDL_BIT(ticket->get_type()) &
+ (MDL_BIT(MDL_SHARED_NO_WRITE) |
+ MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
+ MDL_BIT(MDL_EXCLUSIVE)));
}
/**
@@ -427,6 +456,43 @@ public:
static const bitmap_t m_waiting_incompatible[MDL_TYPE_END];
};
+
+ struct MDL_backup_lock: public MDL_lock_strategy
+ {
+ MDL_backup_lock() {}
+ virtual const bitmap_t *incompatible_granted_types_bitmap() const
+ { return m_granted_incompatible; }
+ virtual const bitmap_t *incompatible_waiting_types_bitmap() const
+ { return m_waiting_incompatible; }
+ virtual bool needs_notification(const MDL_ticket *ticket) const
+ {
+ return (MDL_BIT(ticket->get_type()) & MDL_BIT(MDL_BACKUP_FTWRL1));
+ }
+
+ /**
+ Insert delayed threads may hold DML or TRANS_DML lock.
+ We need to kill such threads in order to get lock for FTWRL statements.
+ We do this by calling code outside of MDL.
+ */
+ virtual bool conflicting_locks(const MDL_ticket *ticket) const
+ {
+ return (MDL_BIT(ticket->get_type()) &
+ (MDL_BIT(MDL_BACKUP_DML) |
+ MDL_BIT(MDL_BACKUP_TRANS_DML)));
+ }
+
+ /*
+ In backup namespace DML/DDL may starve because of concurrent FTWRL or
+ BACKUP statements. This scenario is partically useless in real world,
+ so we just return 0 here.
+ */
+ virtual bitmap_t hog_lock_types_bitmap() const
+ { return 0; }
+ private:
+ static const bitmap_t m_granted_incompatible[MDL_BACKUP_END];
+ static const bitmap_t m_waiting_incompatible[MDL_BACKUP_END];
+ };
+
public:
/** The key of the object (data) being protected. */
MDL_key key;
@@ -538,10 +604,9 @@ public:
MDL_lock(const MDL_key *key_arg)
: key(key_arg),
m_hog_lock_count(0),
- m_strategy(&m_scoped_lock_strategy)
+ m_strategy(&m_backup_lock_strategy)
{
- DBUG_ASSERT(key_arg->mdl_namespace() == MDL_key::GLOBAL ||
- key_arg->mdl_namespace() == MDL_key::COMMIT);
+ DBUG_ASSERT(key_arg->mdl_namespace() == MDL_key::BACKUP);
mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock);
}
@@ -557,8 +622,7 @@ public:
static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)),
MDL_lock *lock, MDL_key *key_arg)
{
- DBUG_ASSERT(key_arg->mdl_namespace() != MDL_key::GLOBAL &&
- key_arg->mdl_namespace() != MDL_key::COMMIT);
+ DBUG_ASSERT(key_arg->mdl_namespace() != MDL_key::BACKUP);
new (&lock->key) MDL_key(key_arg);
if (key_arg->mdl_namespace() == MDL_key::SCHEMA)
lock->m_strategy= &m_scoped_lock_strategy;
@@ -568,11 +632,13 @@ public:
const MDL_lock_strategy *m_strategy;
private:
+ static const MDL_backup_lock m_backup_lock_strategy;
static const MDL_scoped_lock m_scoped_lock_strategy;
static const MDL_object_lock m_object_lock_strategy;
};
+const MDL_lock::MDL_backup_lock MDL_lock::m_backup_lock_strategy;
const MDL_lock::MDL_scoped_lock MDL_lock::m_scoped_lock_strategy;
const MDL_lock::MDL_object_lock MDL_lock::m_object_lock_strategy;
@@ -636,7 +702,7 @@ void mdl_destroy()
struct mdl_iterate_arg
{
- int (*callback)(MDL_ticket *ticket, void *arg);
+ mdl_iterator_callback callback;
void *argument;
};
@@ -649,16 +715,19 @@ static my_bool mdl_iterate_lock(MDL_lock *lock, mdl_iterate_arg *arg)
must be empty for such locks anyway.
*/
mysql_prlock_rdlock(&lock->m_rwlock);
- MDL_lock::Ticket_iterator ticket_it(lock->m_granted);
+ MDL_lock::Ticket_iterator granted_it(lock->m_granted);
+ MDL_lock::Ticket_iterator waiting_it(lock->m_waiting);
MDL_ticket *ticket;
- while ((ticket= ticket_it++) && !(res= arg->callback(ticket, arg->argument)))
+ while ((ticket= granted_it++) && !(res= arg->callback(ticket, arg->argument, true)))
+ /* no-op */;
+ while ((ticket= waiting_it++) && !(res= arg->callback(ticket, arg->argument, false)))
/* no-op */;
mysql_prlock_unlock(&lock->m_rwlock);
return MY_TEST(res);
}
-int mdl_iterate(int (*callback)(MDL_ticket *ticket, void *arg), void *arg)
+int mdl_iterate(mdl_iterator_callback callback, void *arg)
{
DBUG_ENTER("mdl_iterate");
mdl_iterate_arg argument= { callback, arg };
@@ -667,8 +736,7 @@ int mdl_iterate(int (*callback)(MDL_ticket *ticket, void *arg), void *arg)
if (pins)
{
- res= mdl_iterate_lock(mdl_locks.m_global_lock, &argument) ||
- mdl_iterate_lock(mdl_locks.m_commit_lock, &argument) ||
+ res= mdl_iterate_lock(mdl_locks.m_backup_lock, &argument) ||
lf_hash_iterate(&mdl_locks.m_locks, pins,
(my_hash_walk_action) mdl_iterate_lock, &argument);
lf_hash_put_pins(pins);
@@ -689,11 +757,9 @@ my_hash_value_type mdl_hash_function(CHARSET_INFO *cs,
void MDL_map::init()
{
- MDL_key global_lock_key(MDL_key::GLOBAL, "", "");
- MDL_key commit_lock_key(MDL_key::COMMIT, "", "");
+ MDL_key backup_lock_key(MDL_key::BACKUP, "", "");
- m_global_lock= new (std::nothrow) MDL_lock(&global_lock_key);
- m_commit_lock= new (std::nothrow) MDL_lock(&commit_lock_key);
+ m_backup_lock= new (std::nothrow) MDL_lock(&backup_lock_key);
lf_hash_init(&m_locks, sizeof(MDL_lock), LF_HASH_UNIQUE, 0, 0,
mdl_locks_key, &my_charset_bin);
@@ -711,10 +777,9 @@ void MDL_map::init()
void MDL_map::destroy()
{
- delete m_global_lock;
- delete m_commit_lock;
+ delete m_backup_lock;
- DBUG_ASSERT(!my_atomic_load32(&m_locks.count));
+ DBUG_ASSERT(!lf_hash_size(&m_locks));
lf_hash_destroy(&m_locks);
}
@@ -732,26 +797,18 @@ MDL_lock* MDL_map::find_or_insert(LF_PINS *pins, const MDL_key *mdl_key)
{
MDL_lock *lock;
- if (mdl_key->mdl_namespace() == MDL_key::GLOBAL ||
- mdl_key->mdl_namespace() == MDL_key::COMMIT)
+ if (mdl_key->mdl_namespace() == MDL_key::BACKUP)
{
/*
- Avoid locking any m_mutex when lock for GLOBAL or COMMIT namespace is
- requested. Return pointer to pre-allocated MDL_lock instance instead.
- Such an optimization allows to save one mutex lock/unlock for any
- statement changing data.
+ Return pointer to pre-allocated MDL_lock instance. Such an optimization
+ allows to save one hash lookup for any statement changing data.
- It works since these namespaces contain only one element so keys
+ It works since this namespace contains only one element so keys
for them look like '<namespace-id>\0\0'.
*/
DBUG_ASSERT(mdl_key->length() == 3);
-
- lock= (mdl_key->mdl_namespace() == MDL_key::GLOBAL) ? m_global_lock :
- m_commit_lock;
-
- mysql_prlock_wrlock(&lock->m_rwlock);
-
- return lock;
+ mysql_prlock_wrlock(&m_backup_lock->m_rwlock);
+ return m_backup_lock;
}
retry:
@@ -780,22 +837,18 @@ retry:
unsigned long
MDL_map::get_lock_owner(LF_PINS *pins, const MDL_key *mdl_key)
{
- MDL_lock *lock;
unsigned long res= 0;
- if (mdl_key->mdl_namespace() == MDL_key::GLOBAL ||
- mdl_key->mdl_namespace() == MDL_key::COMMIT)
+ if (mdl_key->mdl_namespace() == MDL_key::BACKUP)
{
- lock= (mdl_key->mdl_namespace() == MDL_key::GLOBAL) ? m_global_lock :
- m_commit_lock;
- mysql_prlock_rdlock(&lock->m_rwlock);
- res= lock->get_lock_owner();
- mysql_prlock_unlock(&lock->m_rwlock);
+ mysql_prlock_rdlock(&m_backup_lock->m_rwlock);
+ res= m_backup_lock->get_lock_owner();
+ mysql_prlock_unlock(&m_backup_lock->m_rwlock);
}
else
{
- lock= (MDL_lock*) lf_hash_search(&m_locks, pins, mdl_key->ptr(),
- mdl_key->length());
+ MDL_lock *lock= (MDL_lock*) lf_hash_search(&m_locks, pins, mdl_key->ptr(),
+ mdl_key->length());
if (lock)
{
/*
@@ -820,13 +873,9 @@ MDL_map::get_lock_owner(LF_PINS *pins, const MDL_key *mdl_key)
void MDL_map::remove(LF_PINS *pins, MDL_lock *lock)
{
- if (lock->key.mdl_namespace() == MDL_key::GLOBAL ||
- lock->key.mdl_namespace() == MDL_key::COMMIT)
+ if (lock->key.mdl_namespace() == MDL_key::BACKUP)
{
- /*
- Never destroy pre-allocated MDL_lock objects for GLOBAL and
- COMMIT namespaces.
- */
+ /* Never destroy pre-allocated MDL_lock object in BACKUP namespace. */
mysql_prlock_unlock(&lock->m_rwlock);
return;
}
@@ -975,9 +1024,14 @@ void MDL_ticket::destroy(MDL_ticket *ticket)
uint MDL_ticket::get_deadlock_weight() const
{
- return (m_lock->key.mdl_namespace() == MDL_key::GLOBAL ||
- m_type >= MDL_SHARED_UPGRADABLE ?
- DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML);
+ if (m_lock->key.mdl_namespace() == MDL_key::BACKUP)
+ {
+ if (m_type == MDL_BACKUP_FTWRL1)
+ return DEADLOCK_WEIGHT_FTWRL1;
+ return DEADLOCK_WEIGHT_DDL;
+ }
+ return m_type >= MDL_SHARED_UPGRADABLE ?
+ DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML;
}
@@ -1161,10 +1215,9 @@ void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket)
wsrep_thd_is_BF(ticket->get_ctx()->get_thd(), false))
{
Ticket_iterator itw(ticket->get_lock()->m_waiting);
- Ticket_iterator itg(ticket->get_lock()->m_granted);
DBUG_ASSERT(WSREP_ON);
- MDL_ticket *waiting, *granted;
+ MDL_ticket *waiting;
MDL_ticket *prev=NULL;
bool added= false;
@@ -1183,20 +1236,8 @@ void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket)
}
/* Otherwise, insert the ticket at the back of the waiting list. */
- if (!added) m_list.push_back(ticket);
-
- while ((granted= itg++))
- {
- if (granted->get_ctx() != ticket->get_ctx() &&
- granted->is_incompatible_when_granted(ticket->get_type()))
- {
- if (!wsrep_grant_mdl_exception(ticket->get_ctx(), granted,
- &ticket->get_lock()->key))
- {
- WSREP_DEBUG("MDL victim killed at add_ticket");
- }
- }
- }
+ if (!added)
+ m_list.push_back(ticket);
}
else
#endif /* WITH_WSREP */
@@ -1368,17 +1409,23 @@ void MDL_lock::reschedule_waiters()
/**
Compatibility (or rather "incompatibility") matrices for scoped metadata
- lock. Arrays of bitmaps which elements specify which granted/waiting locks
+ lock.
+ Scoped locks are database (or schema) locks.
+ Arrays of bitmaps which elements specify which granted/waiting locks
are incompatible with type of lock being requested.
The first array specifies if particular type of request can be satisfied
if there is granted scoped lock of certain type.
+ (*) Since intention shared scoped locks (IS) are compatible with all other
+ type of locks, they don't need to be implemented and there is no code
+ for them.
+
| Type of active |
Request | scoped lock |
type | IS(*) IX S X |
---------+------------------+
- IS | + + + + |
+ IS(*) | + + + + |
IX | + + - - |
S | + - + - |
X | + - - - |
@@ -1391,7 +1438,7 @@ void MDL_lock::reschedule_waiters()
Request | scoped lock |
type | IS(*) IX S X |
---------+-----------------+
- IS | + + + + |
+ IS(*) | + + + + |
IX | + + - - |
S | + + + - |
X | + + + + |
@@ -1399,9 +1446,6 @@ void MDL_lock::reschedule_waiters()
Here: "+" -- means that request can be satisfied
"-" -- means that request can't be satisfied and should wait
- (*) Since intention shared scoped locks are compatible with all other
- type of locks we don't even have any accounting for them.
-
Note that relation between scoped locks and objects locks requested
by statement is not straightforward and is therefore fully defined
by SQL-layer.
@@ -1440,41 +1484,41 @@ MDL_lock::MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END]=
The first array specifies if particular type of request can be satisfied
if there is granted lock of certain type.
- Request | Granted requests for lock |
- type | S SH SR SW SU SRO SNW SNRW X |
- ----------+---------------------------------------+
- S | + + + + + + + + - |
- SH | + + + + + + + + - |
- SR | + + + + + + + - - |
- SW | + + + + + - - - - |
- SU | + + + + - + - - - |
- SRO | + + + - + + + - - |
- SNW | + + + - - + - - - |
- SNRW | + + - - - - - - - |
- X | - - - - - - - - - |
- SU -> X | - - - - 0 - 0 0 0 |
- SNW -> X | - - - 0 0 - 0 0 0 |
- SNRW -> X | - - 0 0 0 0 0 0 0 |
+ Request | Granted requests for lock |
+ type | S SH SR SW SU SRO SNW SNRW X |
+ ----------+------------------------------------+
+ S | + + + + + + + + - |
+ SH | + + + + + + + + - |
+ SR | + + + + + + + - - |
+ SW | + + + + + - - - - |
+ SU | + + + + - + - - - |
+ SRO | + + + - + + + - - |
+ SNW | + + + - - + - - - |
+ SNRW | + + - - - - - - - |
+ X | - - - - - - - - - |
+ SU -> X | - - - - 0 - 0 0 0 |
+ SNW -> X | - - - 0 0 - 0 0 0 |
+ SNRW -> X | - - 0 0 0 0 0 0 0 |
The second array specifies if particular type of request can be satisfied
if there is waiting request for the same lock of certain type. In other
words it specifies what is the priority of different lock types.
- Request | Pending requests for lock |
- type | S SH SR SW SU SRO SNW SNRW X |
- ----------+--------------------------------------+
- S | + + + + + + + + - |
- SH | + + + + + + + + + |
- SR | + + + + + + + - - |
- SW | + + + + + + - - - |
- SU | + + + + + + + + - |
- SRO | + + + - + + + - - |
- SNW | + + + + + + + + - |
- SNRW | + + + + + + + + - |
- X | + + + + + + + + + |
- SU -> X | + + + + + + + + + |
- SNW -> X | + + + + + + + + + |
- SNRW -> X | + + + + + + + + + |
+ Request | Pending requests for lock |
+ type | S SH SR SW SU SRO SNW SNRW X |
+ ----------+-----------------------------------+
+ S | + + + + + + + + - |
+ SH | + + + + + + + + + |
+ SR | + + + + + + + - - |
+ SW | + + + + + + - - - |
+ SU | + + + + + + + + - |
+ SRO | + + + - + + + - - |
+ SNW | + + + + + + + + - |
+ SNRW | + + + + + + + + - |
+ X | + + + + + + + + + |
+ SU -> X | + + + + + + + + + |
+ SNW -> X | + + + + + + + + + |
+ SNRW -> X | + + + + + + + + + |
Here: "+" -- means that request can be satisfied
"-" -- means that request can't be satisfied and should wait
@@ -1535,9 +1579,126 @@ MDL_lock::MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END]=
/**
+ Compatibility (or rather "incompatibility") matrices for backup metadata
+ lock. Arrays of bitmaps which elements specify which granted/waiting locks
+ are incompatible with type of lock being requested.
+
+ The first array specifies if particular type of request can be satisfied
+ if there is granted backup lock of certain type.
+
+ Request | Type of active backup lock |
+ type | S0 S1 S2 S3 S4 F1 F2 D TD SD DD BL AC C |
+ ----------+---------------------------------------------------------+
+ S0 | - - - - - + + + + + + + + + |
+ S1 | - + + + + + + + + + + + + + |
+ S2 | - + + + + + + - + + + + + + |
+ S3 | - + + + + + + - + + - + + + |
+ S4 | - + + + + + + - + - - + + - |
+ FTWRL1 | + + + + + + + - - - - + - + |
+ FTWRL2 | + + + + + + + - - - - + - - |
+ D | + - - - - - - + + + + + + + |
+ TD | + + + + + - - + + + + + + + |
+ SD | + + + + - - - + + + + + + + |
+ DDL | + + + - - - - + + + + - + + |
+ BLOCK_DDL | + + + + + + + + + + - + + + |
+ ALTER_COP | + + + + + - - + + + + + + + |
+ COMMIT | + + + + - + - + + + + + + + |
+
+ The second array specifies if particular type of request can be satisfied
+ if there is already waiting request for the backup lock of certain type.
+ I.e. it specifies what is the priority of different lock types.
+
+ Request | Pending backup lock |
+ type | S0 S1 S2 S3 S4 F1 F2 D TD SD DD BL AC C |
+ ----------+---------------------------------------------------------+
+ S0 | + - - - - + + + + + + + + + |
+ S1 | + + + + + + + + + + + + + + |
+ S2 | + + + + + + + + + + + + + + |
+ S3 | + + + + + + + + + + + + + + |
+ S4 | + + + + + + + + + + + + + + |
+ FTWRL1 | + + + + + + + + + + + + + + |
+ FTWRL2 | + + + + + + + + + + + + + + |
+ D | + - - - - - - + + + + + + + |
+ TD | + + + + + - - + + + + + + + |
+ SD | + + + + - - - + + + + + + + |
+ DDL | + + + - - - - + + + + - + + |
+ BLOCK_DDL | + + + + + + + + + + + + + + |
+ ALTER_COP | + + + + + - - + + + + + + + |
+ COMMIT | + + + + - + - + + + + + + + |
+
+ Here: "+" -- means that request can be satisfied
+ "-" -- means that request can't be satisfied and should wait
+*/
+
+/*
+ NOTE: If you add a new MDL_BACKUP_XXX level lock, you have to also add it
+ to MDL_BACKUP_START in the two arrays below!
+*/
+
+const MDL_lock::bitmap_t
+MDL_lock::MDL_backup_lock::m_granted_incompatible[MDL_BACKUP_END]=
+{
+ /* MDL_BACKUP_START */
+ MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT),
+ MDL_BIT(MDL_BACKUP_START),
+ MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML),
+ MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_DDL),
+ MDL_BIT(MDL_BACKUP_START) | MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_SYS_DML) | MDL_BIT(MDL_BACKUP_DDL) | MDL_BIT(MDL_BACKUP_COMMIT),
+
+ /* MDL_BACKUP_FTWRL1 */
+ MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_TRANS_DML) | MDL_BIT(MDL_BACKUP_SYS_DML) | MDL_BIT(MDL_BACKUP_DDL) | MDL_BIT(MDL_BACKUP_ALTER_COPY),
+ MDL_BIT(MDL_BACKUP_DML) | MDL_BIT(MDL_BACKUP_TRANS_DML) | MDL_BIT(MDL_BACKUP_SYS_DML) | MDL_BIT(MDL_BACKUP_DDL) | MDL_BIT(MDL_BACKUP_ALTER_COPY) | MDL_BIT(MDL_BACKUP_COMMIT),
+ /* MDL_BACKUP_DML */
+ MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ /* MDL_BACKUP_DDL */
+ MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2) | MDL_BIT(MDL_BACKUP_BLOCK_DDL),
+ /* MDL_BACKUP_BLOCK_DDL */
+ MDL_BIT(MDL_BACKUP_DDL),
+ MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ /* MDL_BACKUP_COMMIT */
+ MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL2)
+};
+
+
+const MDL_lock::bitmap_t
+MDL_lock::MDL_backup_lock::m_waiting_incompatible[MDL_BACKUP_END]=
+{
+ /* MDL_BACKUP_START */
+ MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT),
+ 0,
+ 0,
+ 0,
+ 0,
+ /* MDL_BACKUP_FTWRL1 */
+ 0,
+ 0,
+
+ /* MDL_BACKUP_DML */
+ MDL_BIT(MDL_BACKUP_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_FLUSH) | MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ /* MDL_BACKUP_DDL */
+ MDL_BIT(MDL_BACKUP_WAIT_DDL) | MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2) | MDL_BIT(MDL_BACKUP_BLOCK_DDL),
+ /* MDL_BACKUP_BLOCK_DDL */
+ 0,
+ MDL_BIT(MDL_BACKUP_FTWRL1) | MDL_BIT(MDL_BACKUP_FTWRL2),
+ /* MDL_BACKUP_COMMIT */
+ MDL_BIT(MDL_BACKUP_WAIT_COMMIT) | MDL_BIT(MDL_BACKUP_FTWRL2)
+};
+
+
+/**
Check if request for the metadata lock can be satisfied given its
current state.
+ New lock request can be satisfied iff:
+ - There are no incompatible types of satisfied requests
+ in other contexts
+ - There are no waiting requests which have higher priority
+ than this request when priority was not ignored.
+
@param type_arg The requested lock type.
@param requestor_ctx The MDL context of the requestor.
@param ignore_lock_priority Ignore lock priority.
@@ -1555,78 +1716,72 @@ MDL_lock::can_grant_lock(enum_mdl_type type_arg,
MDL_context *requestor_ctx,
bool ignore_lock_priority) const
{
- bool can_grant= FALSE;
bitmap_t waiting_incompat_map= incompatible_waiting_types_bitmap()[type_arg];
bitmap_t granted_incompat_map= incompatible_granted_types_bitmap()[type_arg];
- bool wsrep_can_grant= TRUE;
+#ifdef WITH_WSREP
/*
- New lock request can be satisfied iff:
- - There are no incompatible types of satisfied requests
- in other contexts
- - There are no waiting requests which have higher priority
- than this request when priority was not ignored.
+ Approve lock request in BACKUP namespace for BF threads.
+ We should get rid of this code and forbid FTWRL/BACKUP statements
+ when wsrep is active.
*/
- if (ignore_lock_priority || !(m_waiting.bitmap() & waiting_incompat_map))
+ if ((wsrep_thd_is_toi(requestor_ctx->get_thd()) ||
+ wsrep_thd_is_applying(requestor_ctx->get_thd())) &&
+ key.mdl_namespace() == MDL_key::BACKUP)
{
- if (! (m_granted.bitmap() & granted_incompat_map))
- can_grant= TRUE;
- else
+ bool waiting_incompatible= m_waiting.bitmap() & waiting_incompat_map;
+ bool granted_incompatible= m_granted.bitmap() & granted_incompat_map;
+ if (waiting_incompatible || granted_incompatible)
{
- Ticket_iterator it(m_granted);
- MDL_ticket *ticket;
+ WSREP_DEBUG("global lock granted for BF%s: %lu %s",
+ waiting_incompatible ? " (waiting queue)" : "",
+ thd_get_thread_id(requestor_ctx->get_thd()),
+ wsrep_thd_query(requestor_ctx->get_thd()));
+ }
+ return true;
+ }
+#endif /* WITH_WSREP */
+
+ if (!ignore_lock_priority && (m_waiting.bitmap() & waiting_incompat_map))
+ return false;
+
+ if (m_granted.bitmap() & granted_incompat_map)
+ {
+ Ticket_iterator it(m_granted);
+ bool can_grant= true;
- /* Check that the incompatible lock belongs to some other context. */
- while ((ticket= it++))
+ /* Check that the incompatible lock belongs to some other context. */
+ while (auto ticket= it++)
+ {
+ if (ticket->get_ctx() != requestor_ctx &&
+ ticket->is_incompatible_when_granted(type_arg))
{
- if (ticket->get_ctx() != requestor_ctx &&
- ticket->is_incompatible_when_granted(type_arg))
- {
+ can_grant= false;
#ifdef WITH_WSREP
- if (wsrep_thd_is_BF(requestor_ctx->get_thd(),false) &&
- key.mdl_namespace() == MDL_key::GLOBAL)
- {
- WSREP_DEBUG("global lock granted for BF: %lu %s",
- thd_get_thread_id(requestor_ctx->get_thd()),
- wsrep_thd_query(requestor_ctx->get_thd()));
- can_grant = true;
- }
- else if (!wsrep_grant_mdl_exception(requestor_ctx, ticket, &key))
+ /*
+ non WSREP threads must report conflict immediately
+ note: RSU processing wsrep threads, have wsrep_on==OFF
+ */
+ if (WSREP(requestor_ctx->get_thd()) ||
+ requestor_ctx->get_thd()->wsrep_cs().mode() ==
+ wsrep::client_state::m_rsu)
+ {
+ wsrep_handle_mdl_conflict(requestor_ctx, ticket, &key);
+ if (wsrep_log_conflicts)
{
- wsrep_can_grant= FALSE;
- if (wsrep_log_conflicts)
- {
- MDL_lock * lock = ticket->get_lock();
- WSREP_INFO(
- "MDL conflict db=%s table=%s ticket=%d solved by %s",
- lock->key.db_name(), lock->key.name(), ticket->get_type(),
- "abort" );
- }
+ auto key= ticket->get_key();
+ WSREP_INFO("MDL conflict db=%s table=%s ticket=%d solved by abort",
+ key->db_name(), key->name(), ticket->get_type());
}
- else
- can_grant= TRUE;
- /* Continue loop */
-#else
- break;
-#endif /* WITH_WSREP */
+ continue;
}
+#endif /* WITH_WSREP */
+ break;
}
- if ((ticket == NULL) && wsrep_can_grant)
- can_grant= TRUE; /* Incompatible locks are our own. */
}
+ return can_grant;
}
- else
- {
- if (wsrep_thd_is_BF(requestor_ctx->get_thd(), false) &&
- key.mdl_namespace() == MDL_key::GLOBAL)
- {
- WSREP_DEBUG("global lock granted for BF (waiting queue): %lu %s",
- thd_get_thread_id(requestor_ctx->get_thd()),
- wsrep_thd_query(requestor_ctx->get_thd()));
- can_grant = true;
- }
- }
- return can_grant;
+ return true;
}
@@ -1739,6 +1894,27 @@ bool MDL_ticket::is_incompatible_when_waiting(enum_mdl_type type) const
}
+static const LEX_STRING
+*get_mdl_lock_name(MDL_key::enum_mdl_namespace mdl_namespace,
+ enum_mdl_type type)
+{
+ return mdl_namespace == MDL_key::BACKUP ?
+ &backup_lock_types[type] :
+ &lock_types[type];
+}
+
+
+const LEX_STRING *MDL_ticket::get_type_name() const
+{
+ return get_mdl_lock_name(get_key()->mdl_namespace(), m_type);
+}
+
+const LEX_STRING *MDL_ticket::get_type_name(enum_mdl_type type) const
+{
+ return get_mdl_lock_name(get_key()->mdl_namespace(), type);
+}
+
+
/**
Check whether the context already holds a compatible lock ticket
on an object.
@@ -1772,8 +1948,10 @@ MDL_context::find_ticket(MDL_request *mdl_request,
if (mdl_request->key.is_equal(&ticket->m_lock->key) &&
ticket->has_stronger_or_equal_type(mdl_request->type))
{
- DBUG_PRINT("info", ("Adding mdl lock %d to %d",
- mdl_request->type, ticket->m_type));
+ DBUG_PRINT("info", ("Adding mdl lock %s to %s",
+ get_mdl_lock_name(mdl_request->key.mdl_namespace(),
+ mdl_request->type)->str,
+ ticket->get_type_name()->str));
*result_duration= duration;
return ticket;
}
@@ -1859,11 +2037,8 @@ MDL_context::try_acquire_lock_impl(MDL_request *mdl_request,
MDL_ticket *ticket;
enum_mdl_duration found_duration;
- DBUG_ASSERT(mdl_request->type != MDL_EXCLUSIVE ||
- is_lock_owner(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE));
- DBUG_ASSERT(mdl_request->ticket == NULL);
-
/* Don't take chances in production. */
+ DBUG_ASSERT(mdl_request->ticket == NULL);
mdl_request->ticket= NULL;
/*
@@ -2064,7 +2239,10 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
MDL_ticket *ticket;
MDL_wait::enum_wait_status wait_status;
DBUG_ENTER("MDL_context::acquire_lock");
- DBUG_PRINT("enter", ("lock_type: %d", mdl_request->type));
+ DBUG_PRINT("enter", ("lock_type: %s timeout: %f",
+ get_mdl_lock_name(mdl_request->key.mdl_namespace(),
+ mdl_request->type)->str,
+ lock_wait_timeout));
if (try_acquire_lock_impl(mdl_request, &ticket))
DBUG_RETURN(TRUE);
@@ -2180,6 +2358,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout)
switch (wait_status)
{
case MDL_wait::VICTIM:
+ mdl_dbug_print_locks();
my_error(ER_LOCK_DEADLOCK, MYF(0));
break;
case MDL_wait::TIMEOUT:
@@ -2319,15 +2498,23 @@ MDL_context::upgrade_shared_lock(MDL_ticket *mdl_ticket,
MDL_savepoint mdl_svp= mdl_savepoint();
bool is_new_ticket;
DBUG_ENTER("MDL_context::upgrade_shared_lock");
- DBUG_PRINT("enter",("new_type: %d lock_wait_timeout: %f", new_type,
+ DBUG_PRINT("enter",("old_type: %s new_type: %s lock_wait_timeout: %f",
+ mdl_ticket->get_type_name()->str,
+ mdl_ticket->get_type_name(new_type)->str,
lock_wait_timeout));
DEBUG_SYNC(get_thd(), "mdl_upgrade_lock");
/*
Do nothing if already upgraded. Used when we FLUSH TABLE under
LOCK TABLES and a table is listed twice in LOCK TABLES list.
+
+ In BACKUP namespace upgrade must always happen. Even though
+ MDL_BACKUP_START is not stronger than MDL_BACKUP_FLUSH from
+ has_stronger_or_equal_type(), the latter effectively blocks
+ new MDL_BACKUP_DML while the former doesn't.
*/
- if (mdl_ticket->has_stronger_or_equal_type(new_type))
+ if (mdl_ticket->has_stronger_or_equal_type(new_type) &&
+ mdl_ticket->get_key()->mdl_namespace() != MDL_key::BACKUP)
DBUG_RETURN(FALSE);
mdl_xlock_request.init(&mdl_ticket->m_lock->key, new_type,
@@ -2728,9 +2915,13 @@ void MDL_ticket::downgrade_lock(enum_mdl_type type)
if (m_type == type || !has_stronger_or_equal_type(type))
return;
- /* Only allow downgrade from EXCLUSIVE and SHARED_NO_WRITE. */
- DBUG_ASSERT(m_type == MDL_EXCLUSIVE ||
- m_type == MDL_SHARED_NO_WRITE);
+ /* Only allow downgrade in some specific known cases */
+ DBUG_ASSERT((get_key()->mdl_namespace() != MDL_key::BACKUP &&
+ (m_type == MDL_EXCLUSIVE ||
+ m_type == MDL_SHARED_NO_WRITE)) ||
+ (get_key()->mdl_namespace() == MDL_key::BACKUP &&
+ (m_type == MDL_BACKUP_DDL ||
+ m_type == MDL_BACKUP_WAIT_FLUSH)));
mysql_prlock_wrlock(&m_lock->m_rwlock);
/*
@@ -3016,30 +3207,11 @@ bool MDL_context::has_explicit_locks()
#ifdef WITH_WSREP
static
-const char *wsrep_get_mdl_type_name(enum_mdl_type type)
-{
- switch (type)
- {
- case MDL_INTENTION_EXCLUSIVE : return "intention exclusive";
- case MDL_SHARED : return "shared";
- case MDL_SHARED_HIGH_PRIO : return "shared high prio";
- case MDL_SHARED_READ : return "shared read";
- case MDL_SHARED_WRITE : return "shared write";
- case MDL_SHARED_UPGRADABLE : return "shared upgradable";
- case MDL_SHARED_NO_WRITE : return "shared no write";
- case MDL_SHARED_NO_READ_WRITE : return "shared no read write";
- case MDL_EXCLUSIVE : return "exclusive";
- default: break;
- }
- return "UNKNOWN";
-}
-
-static
const char *wsrep_get_mdl_namespace_name(MDL_key::enum_mdl_namespace ns)
{
switch (ns)
{
- case MDL_key::GLOBAL : return "GLOBAL";
+ case MDL_key::BACKUP : return "BACKUP";
case MDL_key::SCHEMA : return "SCHEMA";
case MDL_key::TABLE : return "TABLE";
case MDL_key::FUNCTION : return "FUNCTION";
@@ -3047,7 +3219,6 @@ const char *wsrep_get_mdl_namespace_name(MDL_key::enum_mdl_namespace ns)
case MDL_key::PACKAGE_BODY: return "PACKAGE BODY";
case MDL_key::TRIGGER : return "TRIGGER";
case MDL_key::EVENT : return "EVENT";
- case MDL_key::COMMIT : return "COMMIT";
case MDL_key::USER_LOCK : return "USER_LOCK";
default: break;
}
@@ -3060,10 +3231,41 @@ void MDL_ticket::wsrep_report(bool debug)
const PSI_stage_info *psi_stage= m_lock->key.get_wait_state_name();
WSREP_DEBUG("MDL ticket: type: %s space: %s db: %s name: %s (%s)",
- wsrep_get_mdl_type_name(get_type()),
+ get_type_name()->str,
wsrep_get_mdl_namespace_name(m_lock->key.mdl_namespace()),
m_lock->key.db_name(),
m_lock->key.name(),
psi_stage->m_name);
}
#endif /* WITH_WSREP */
+
+
+#ifndef DBUG_OFF
+
+/*
+ Print a list of all locks to DBUG trace to help with debugging
+*/
+
+static int mdl_dbug_print_lock(MDL_ticket *mdl_ticket, void *arg, bool granted)
+{
+ String *tmp= (String*) arg;
+ char buffer[128];
+ MDL_key *mdl_key= mdl_ticket->get_key();
+ size_t length;
+ length= my_snprintf(buffer, sizeof(buffer)-1,
+ "\nname: %s db: %.*s key_name: %.*s (%s)",
+ mdl_ticket->get_type_name()->str,
+ (int) mdl_key->db_name_length(), mdl_key->db_name(),
+ (int) mdl_key->name_length(), mdl_key->name(),
+ granted ? "granted" : "waiting");
+ tmp->append(buffer, length);
+ return 0;
+}
+
+void mdl_dbug_print_locks()
+{
+ String tmp;
+ mdl_iterate(mdl_dbug_print_lock, (void*) &tmp);
+ DBUG_PRINT("mdl_locks", ("%s", tmp.c_ptr()));
+}
+#endif /* DBUG_OFF */
diff --git a/sql/mdl.h b/sql/mdl.h
index 848b2497f43..3d0c86f8f1c 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -28,6 +28,10 @@ class MDL_lock;
class MDL_ticket;
bool ok_for_lower_case_names(const char *name);
+typedef unsigned short mdl_bitmap_t;
+#define MDL_BIT(A) static_cast<mdl_bitmap_t>(1U << A)
+
+
/**
@def ENTER_COND(C, M, S, O)
Start a wait on a condition.
@@ -112,19 +116,25 @@ public:
@sa Comments for MDL_object_lock::can_grant_lock() and
MDL_scoped_lock::can_grant_lock() for details.
+
+ Scoped locks are database (or schema) locks.
+ The object locks are for tables, triggers etc.
*/
enum enum_mdl_type {
/*
- An intention exclusive metadata lock. Used only for scoped locks.
+ An intention exclusive metadata lock (IX). Used only for scoped locks.
Owner of this type of lock can acquire upgradable exclusive locks on
individual objects.
Compatible with other IX locks, but is incompatible with scoped S and
X locks.
+ IX lock is taken in SCHEMA namespace when we intend to modify
+ object metadata. Object may refer table, stored procedure, trigger,
+ view/etc.
*/
MDL_INTENTION_EXCLUSIVE= 0,
/*
- A shared metadata lock.
+ A shared metadata lock (S).
To be used in cases when we are interested in object metadata only
and there is no intention to access object data (e.g. for stored
routines or during preparing prepared statements).
@@ -144,6 +154,9 @@ enum enum_mdl_type {
use SNRW locks for them. It also does not arise when S locks are used
during PREPARE calls as table-level locks are not acquired in this
case.
+ This lock is taken for global read lock, when caching a stored
+ procedure in memory for the duration of the transaction and for
+ tables used by prepared statements.
*/
MDL_SHARED,
/*
@@ -164,8 +177,8 @@ enum enum_mdl_type {
*/
MDL_SHARED_HIGH_PRIO,
/*
- A shared metadata lock for cases when there is an intention to read data
- from table.
+ A shared metadata lock (SR) for cases when there is an intention to read
+ data from table.
A connection holding this kind of lock can read table metadata and read
table data (after acquiring appropriate table and row-level locks).
This means that one can only acquire TL_READ, TL_READ_NO_INSERT, and
@@ -175,7 +188,7 @@ enum enum_mdl_type {
*/
MDL_SHARED_READ,
/*
- A shared metadata lock for cases when there is an intention to modify
+ A shared metadata lock (SW) for cases when there is an intention to modify
(and not just read) data in the table.
A connection holding SW lock can read table metadata and modify or read
table data (after acquiring appropriate table and row-level locks).
@@ -185,8 +198,8 @@ enum enum_mdl_type {
*/
MDL_SHARED_WRITE,
/*
- An upgradable shared metadata lock for cases when there is an intention
- to modify (and not just read) data in the table.
+ An upgradable shared metadata lock for cases when there is an
+ intention to modify (and not just read) data in the table.
Can be upgraded to MDL_SHARED_NO_WRITE and MDL_EXCLUSIVE.
A connection holding SU lock can read table metadata and modify or read
table data (after acquiring appropriate table and row-level locks).
@@ -226,7 +239,7 @@ enum enum_mdl_type {
*/
MDL_SHARED_NO_READ_WRITE,
/*
- An exclusive metadata lock.
+ An exclusive metadata lock (X).
A connection holding this lock can modify both table's metadata and data.
No other type of metadata lock can be granted while this lock is held.
To be used for CREATE/DROP/RENAME TABLE statements and for execution of
@@ -234,7 +247,75 @@ enum enum_mdl_type {
*/
MDL_EXCLUSIVE,
/* This should be the last !!! */
- MDL_TYPE_END};
+ MDL_TYPE_END
+};
+
+
+/** Backup locks */
+
+/**
+ Block concurrent backup
+*/
+#define MDL_BACKUP_START enum_mdl_type(0)
+/**
+ Block new write requests to non transactional tables
+*/
+#define MDL_BACKUP_FLUSH enum_mdl_type(1)
+/**
+ In addition to previous locks, blocks running requests to non trans tables
+ Used to wait until all DML usage of on trans tables are finished
+*/
+#define MDL_BACKUP_WAIT_FLUSH enum_mdl_type(2)
+/**
+ In addition to previous locks, blocks new DDL's from starting
+*/
+#define MDL_BACKUP_WAIT_DDL enum_mdl_type(3)
+/**
+ In addition to previous locks, blocks commits
+*/
+#define MDL_BACKUP_WAIT_COMMIT enum_mdl_type(4)
+
+/**
+ Blocks (or is blocked by) statements that intend to modify data. Acquired
+ before commit lock by FLUSH TABLES WITH READ LOCK.
+*/
+#define MDL_BACKUP_FTWRL1 enum_mdl_type(5)
+
+/**
+ Blocks (or is blocked by) commits. Acquired after global read lock by
+ FLUSH TABLES WITH READ LOCK.
+*/
+#define MDL_BACKUP_FTWRL2 enum_mdl_type(6)
+
+#define MDL_BACKUP_DML enum_mdl_type(7)
+#define MDL_BACKUP_TRANS_DML enum_mdl_type(8)
+#define MDL_BACKUP_SYS_DML enum_mdl_type(9)
+
+/**
+ Must be acquired by DDL statements that intend to modify data.
+ Currently it's also used for LOCK TABLES.
+*/
+#define MDL_BACKUP_DDL enum_mdl_type(10)
+
+/**
+ Blocks new DDL's. Used by backup code to enable DDL logging
+*/
+#define MDL_BACKUP_BLOCK_DDL enum_mdl_type(11)
+
+/*
+ Statement is modifying data, but will not block MDL_BACKUP_DDL or earlier
+ BACKUP stages.
+ ALTER TABLE is started with MDL_BACKUP_DDL, but changed to
+ MDL_BACKUP_ALTER_COPY while alter table is copying or modifing data.
+*/
+
+#define MDL_BACKUP_ALTER_COPY enum_mdl_type(12)
+
+/**
+ Must be acquired during commit.
+*/
+#define MDL_BACKUP_COMMIT enum_mdl_type(13)
+#define MDL_BACKUP_END enum_mdl_type(14)
/** Duration of metadata lock. */
@@ -282,10 +363,13 @@ public:
/**
Object namespaces.
Sic: when adding a new member to this enum make sure to
- update m_namespace_to_wait_state_name array in mdl.cc!
+ update m_namespace_to_wait_state_name array in mdl.cc and
+ metadata_lock_info_lock_name in metadata_lock_info.cc!
Different types of objects exist in different namespaces
+ - SCHEMA is for databases (to protect against DROP DATABASE)
- TABLE is for tables and views.
+ - BACKUP is for locking DML, DDL and COMMIT's during BACKUP STAGES
- FUNCTION is for stored functions.
- PROCEDURE is for stored procedures.
- TRIGGER is for triggers.
@@ -294,7 +378,7 @@ public:
it's necessary to have a separate namespace for them since
MDL_key is also used outside of the MDL subsystem.
*/
- enum enum_mdl_namespace { GLOBAL=0,
+ enum enum_mdl_namespace { BACKUP=0,
SCHEMA,
TABLE,
FUNCTION,
@@ -302,7 +386,6 @@ public:
PACKAGE_BODY,
TRIGGER,
EVENT,
- COMMIT,
USER_LOCK, /* user level locks. */
/* This should be the last ! */
NAMESPACE_END };
@@ -562,7 +645,8 @@ public:
enum enum_deadlock_weight
{
- DEADLOCK_WEIGHT_DML= 0,
+ DEADLOCK_WEIGHT_FTWRL1= 0,
+ DEADLOCK_WEIGHT_DML= 1,
DEADLOCK_WEIGHT_DDL= 100
};
/* A helper used to determine which lock request should be aborted. */
@@ -620,6 +704,8 @@ public:
m_type == MDL_EXCLUSIVE;
}
enum_mdl_type get_type() const { return m_type; }
+ const LEX_STRING *get_type_name() const;
+ const LEX_STRING *get_type_name(enum_mdl_type type) const;
MDL_lock *get_lock() const { return m_lock; }
MDL_key *get_key() const;
void downgrade_lock(enum_mdl_type type);
@@ -1011,6 +1097,13 @@ extern "C" int thd_is_connected(MYSQL_THD thd);
*/
extern "C" ulong max_write_lock_count;
+typedef int (*mdl_iterator_callback)(MDL_ticket *ticket, void *arg,
+ bool granted);
extern MYSQL_PLUGIN_IMPORT
-int mdl_iterate(int (*callback)(MDL_ticket *ticket, void *arg), void *arg);
-#endif
+int mdl_iterate(mdl_iterator_callback callback, void *arg);
+#ifndef DBUG_OFF
+void mdl_dbug_print_locks();
+#else
+ static inline void mdl_dbug_print_locks() {}
+#endif /* DBUG_OFF */
+#endif /* MDL_H */
diff --git a/sql/mem_root_array.h b/sql/mem_root_array.h
index 5daeedadcba..bf266a60334 100644
--- a/sql/mem_root_array.h
+++ b/sql/mem_root_array.h
@@ -87,9 +87,11 @@ public:
// Returns a pointer to the first element in the array.
Element_type *begin() { return &m_array[0]; }
+ const Element_type *begin() const { return &m_array[0]; }
// Returns a pointer to the past-the-end element in the array.
Element_type *end() { return &m_array[size()]; }
+ const Element_type *end() const { return &m_array[size()]; }
// Erases all of the elements.
void clear()
@@ -226,6 +228,7 @@ public:
size_t element_size() const { return sizeof(Element_type); }
bool empty() const { return size() == 0; }
size_t size() const { return m_size; }
+ const MEM_ROOT *mem_root() const { return m_root; }
private:
MEM_ROOT *const m_root;
diff --git a/sql/message.h b/sql/message.h
index 6641453a965..a6491736877 100644
--- a/sql/message.h
+++ b/sql/message.h
@@ -1,21 +1,3 @@
-#ifndef MESSAGE_INCLUDED
-#define MESSAGE_INCLUDED
-/* Copyright (c) 2008, 2009 Sun Microsystems, Inc.
- Use is subject to license terms.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
/*
To change or add messages mysqld writes to the Windows error log, run
mc.exe message.mc
@@ -24,10 +6,8 @@
mc.exe can be installed with Windows SDK, some Visual Studio distributions
do not include it.
*/
-
-
//
-// Values are 32 bit values layed out as follows:
+// Values are 32 bit values laid out as follows:
//
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
@@ -67,11 +47,8 @@
//
// MessageText:
//
-// %1For more information, see Help and Support Center at http://www.mysql.com.
-//
-//
+// %1
+//
//
#define MSG_DEFAULT 0xC0000064L
-#endif /* MESSAGE_INCLUDED */
-
diff --git a/sql/message.mc b/sql/message.mc
index 8d68d599365..c009b048d05 100644
--- a/sql/message.mc
+++ b/sql/message.mc
@@ -11,6 +11,5 @@ Severity = Error
Facility = Application
SymbolicName = MSG_DEFAULT
Language = English
-%1For more information, see Help and Support Center at http://www.mysql.com.
-
+%1
diff --git a/sql/message.rc b/sql/message.rc
index 0885a897e6f..0abcb0fa2c5 100644
--- a/sql/message.rc
+++ b/sql/message.rc
@@ -1,2 +1,2 @@
LANGUAGE 0x9,0x1
-1 11 MSG00001.bin
+1 11 "MSG00001.bin"
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index d6952e71899..027d2f71d85 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -20,6 +20,18 @@
#include "key.h"
#include "sql_statistics.h"
+static ulonglong key_block_no(TABLE *table, uint keyno, ha_rows keyentry_pos)
+{
+ size_t len= table->key_info[keyno].key_length + table->file->ref_length;
+ if (keyno == table->s->primary_key &&
+ table->file->primary_key_is_clustered())
+ len= table->s->stored_rec_length;
+ uint keys_per_block= (uint) (table->file->stats.block_size/2.0/len+1);
+ ulonglong block_no= !keyentry_pos ? 0 :
+ (keyentry_pos - 1) / keys_per_block + 1;
+ return block_no;
+}
+
/****************************************************************************
* Default MRR implementation (MRR to non-MRR converter)
***************************************************************************/
@@ -47,6 +59,24 @@
for a user to be able to interrupt the calculation by killing the
connection/query.
+ @note
+ Starting from 10.4 the implementation of this method tries to take into
+ account gaps between range intervals. Before this we had such paradoxical
+ cases when, for example, the cost of the index scan by range [1..3] was
+ almost twice as less than the cost of of the index scan by two intervals
+ [1..1] and [3..3].
+
+ @note
+ The current implementation of the method is not efficient for it
+ requires extra dives for gaps. Although these dives are not expensive
+ as they touch the index nodes that with very high probability are in
+ cache this is still not good. We could avoid it if records in range
+ also returned positions of the ends of range intervals. It's not
+ hard to implement it now for MyISAM as this engine provides a function
+ returning an approximation of the relative position of a key tuple
+ among other index key tuples. Unfortunately InnoDB now does not provide
+ anything like this function.
+
@retval
HA_POS_ERROR Error or the engine is unable to perform the requested
scan. Values of OUT parameters are undefined.
@@ -61,12 +91,21 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
uint *bufsz, uint *flags, Cost_estimate *cost)
{
KEY_MULTI_RANGE range;
+ key_range prev_start_key;
range_seq_t seq_it;
- ha_rows rows, total_rows= 0;
+ ha_rows min_pos= 0;
+ ha_rows total_rows= 0;
uint n_ranges=0;
+ uint n_eq_ranges= 0;
+ ulonglong total_touched_blocks= 0;
+ key_range *prev_min_endp= 0;
+ ulonglong prev_max_block_no=0;
+ ha_rows max_rows= stats.records;
THD *thd= table->in_use;
- uint limit= thd->variables.eq_range_index_dive_limit;
+ StringBuffer<64> key_value;
+ uint limit= thd->variables.eq_range_index_dive_limit;
+
bool use_statistics_for_eq_range= eq_ranges_exceeds_limit(seq,
seq_init_param,
limit);
@@ -77,10 +116,15 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
seq_it= seq->init(seq_init_param, n_ranges, *flags);
while (!seq->next(seq_it, &range))
{
+ ha_rows rows;
+ ulonglong new_touched_blocks= 0;
+
if (unlikely(thd->killed != 0))
return HA_POS_ERROR;
n_ranges++;
+ if (range.range_flag & EQ_RANGE)
+ n_eq_ranges++;
key_range *min_endp, *max_endp;
if (range.range_flag & GEOM_FLAG)
{
@@ -95,38 +139,96 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
max_endp= range.end_key.length? &range.end_key : NULL;
}
int keyparts_used= my_count_bits(range.start_key.keypart_map);
- if ((range.range_flag & UNIQUE_RANGE) && !(range.range_flag & NULL_RANGE))
- rows= 1; /* there can be at most one row */
- else if (use_statistics_for_eq_range &&
- !(range.range_flag & NULL_RANGE) &&
- (range.range_flag & EQ_RANGE) &&
- table->key_info[keyno].actual_rec_per_key(keyparts_used - 1) > 0.5)
- rows=
- (ha_rows) table->key_info[keyno].actual_rec_per_key(keyparts_used - 1);
+ if (use_statistics_for_eq_range &&
+ !(range.range_flag & NULL_RANGE) &&
+ (range.range_flag & EQ_RANGE) &&
+ table->key_info[keyno].actual_rec_per_key(keyparts_used - 1) > 0.5)
+ {
+ if ((range.range_flag & UNIQUE_RANGE) && !(range.range_flag & NULL_RANGE))
+ rows= 1; /* there can be at most one row */
+ else
+ rows=
+ (ha_rows) table->key_info[keyno].actual_rec_per_key(keyparts_used-1);
+ }
else
{
- if (HA_POS_ERROR == (rows= this->records_in_range(keyno, min_endp,
+ ulonglong min_block_no;
+ ulonglong max_block_no;
+ if ((range.range_flag & UNIQUE_RANGE) && !(range.range_flag & NULL_RANGE))
+ rows= 1; /* there can be at most one row */
+ else if (HA_POS_ERROR == (rows= this->records_in_range(keyno, min_endp,
max_endp)))
{
/* Can't scan one range => can't do MRR scan at all */
total_rows= HA_POS_ERROR;
break;
}
+ if (!max_endp && !(prev_min_endp && prev_min_endp->length))
+ min_pos+= max_rows - rows;
+ else
+ {
+ key_range *start_endp= prev_min_endp;
+ if (start_endp && !start_endp->keypart_map)
+ start_endp= 0;
+ /*
+ Get the estimate of rows in the previous gap
+ and two ranges surrounding this gap
+ */
+ ha_rows r= this->records_in_range(keyno,start_endp,max_endp);
+ if (r == HA_POS_ERROR)
+ {
+ /* Some engine cannot estimate such ranges */
+ total_rows += rows;
+ continue;
+ }
+ min_pos+= r - rows;
+ }
+ min_block_no= key_block_no(this->table, keyno, min_pos);
+ max_block_no= key_block_no(this->table, keyno, min_pos + rows);
+ new_touched_blocks= max_block_no - min_block_no +
+ MY_TEST(min_block_no != prev_max_block_no);
+ prev_max_block_no= max_block_no;
+ if (!prev_min_endp)
+ prev_min_endp= &prev_start_key;
+ /* Save range.start_key for the next iteration step */
+ prev_start_key= range.start_key;
+ key_value.copy((const char *) prev_start_key.key, prev_start_key.length,
+ key_value.charset());
+ prev_start_key.key= (const uchar *) key_value.ptr();
}
total_rows += rows;
+ total_touched_blocks+= new_touched_blocks;
}
if (total_rows != HA_POS_ERROR)
{
+ set_if_smaller(total_rows, max_rows);
/* The following calculation is the same as in multi_range_read_info(): */
*flags |= HA_MRR_USE_DEFAULT_IMPL;
cost->reset();
cost->avg_io_cost= 1; /* assume random seeks */
- if ((*flags & HA_MRR_INDEX_ONLY) && total_rows > 2)
- cost->io_count= keyread_time(keyno, n_ranges, (uint)total_rows);
+ cost->idx_avg_io_cost= 1;
+ if (!((keyno == table->s->primary_key && primary_key_is_clustered()) ||
+ is_clustering_key(keyno)))
+ {
+ cost->idx_io_count= total_touched_blocks +
+ keyread_time(keyno, 0, total_rows);
+ cost->cpu_cost= cost->idx_cpu_cost=
+ (double) total_rows / TIME_FOR_COMPARE_IDX +
+ (2 * n_ranges - n_eq_ranges) * IDX_LOOKUP_COST;
+ if (!(*flags & HA_MRR_INDEX_ONLY))
+ {
+ cost->io_count= read_time(keyno, 0, total_rows);
+ cost->cpu_cost+= (double) total_rows / TIME_FOR_COMPARE;
+ }
+ }
else
- cost->io_count= read_time(keyno, n_ranges, total_rows);
- cost->cpu_cost= (double) total_rows / TIME_FOR_COMPARE + 0.01;
+ {
+ cost->io_count= read_time(keyno,
+ (uint)total_touched_blocks,
+ (uint) total_rows);
+ cost->cpu_cost= (double) total_rows / TIME_FOR_COMPARE + 0.01;
+ }
}
return total_rows;
}
@@ -183,10 +285,22 @@ ha_rows handler::multi_range_read_info(uint keyno, uint n_ranges, uint n_rows,
cost->avg_io_cost= 1; /* assume random seeks */
/* Produce the same cost as non-MRR code does */
- if (*flags & HA_MRR_INDEX_ONLY)
- cost->io_count= keyread_time(keyno, n_ranges, n_rows);
+ if (!(keyno == table->s->primary_key && primary_key_is_clustered()))
+ {
+ cost->idx_io_count= n_ranges + keyread_time(keyno, 0, n_rows);
+ cost->cpu_cost= cost->idx_cpu_cost=
+ (double) n_rows / TIME_FOR_COMPARE_IDX + n_ranges * IDX_LOOKUP_COST;
+ if (!(*flags & HA_MRR_INDEX_ONLY))
+ {
+ cost->io_count= read_time(keyno, 0, n_rows);
+ cost->cpu_cost+= (double) n_rows / TIME_FOR_COMPARE;
+ }
+ }
else
- cost->io_count= read_time(keyno, n_ranges, n_rows);
+ {
+ cost->io_count= read_time(keyno, n_ranges, (uint)n_rows);
+ cost->cpu_cost= (double) n_rows / TIME_FOR_COMPARE + 0.01;
+ }
return 0;
}
diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc
index 338f78d8f08..b57ac234fdc 100644
--- a/sql/my_decimal.cc
+++ b/sql/my_decimal.cc
@@ -20,6 +20,7 @@
#ifndef MYSQL_CLIENT
#include "sql_class.h" // THD
+#include "field.h"
#endif
#define DIG_BASE 1000000000
@@ -95,9 +96,8 @@ int decimal_operation_results(int result, const char *value, const char *type)
@retval E_DEC_OOM
*/
-int my_decimal2string(uint mask, const my_decimal *d,
- uint fixed_prec, uint fixed_dec,
- char filler, String *str)
+int my_decimal::to_string_native(String *str, uint fixed_prec, uint fixed_dec,
+ char filler, uint mask) const
{
/*
Calculate the size of the string: For DECIMAL(a,b), fixed_prec==a
@@ -113,11 +113,11 @@ int my_decimal2string(uint mask, const my_decimal *d,
*/
int length= (fixed_prec
? (fixed_prec + ((fixed_prec == fixed_dec) ? 1 : 0) + 1)
- : my_decimal_string_length(d));
+ : my_decimal_string_length(this));
int result;
if (str->alloc(length))
return check_result(mask, E_DEC_OOM);
- result= decimal2string((decimal_t*) d, (char*) str->ptr(),
+ result= decimal2string(this, (char*) str->ptr(),
&length, (int)fixed_prec, fixed_dec,
filler);
str->length(length);
@@ -156,8 +156,8 @@ str_set_decimal(uint mask, const my_decimal *val,
{
if (!(cs->state & MY_CS_NONASCII))
{
- /* For ASCII-compatible character sets we can use my_decimal2string */
- my_decimal2string(mask, val, fixed_prec, fixed_dec, filler, str);
+ // For ASCII-compatible character sets we can use to_string_native()
+ val->to_string_native(str, fixed_prec, fixed_dec, filler, mask);
str->set_charset(cs);
return FALSE;
}
@@ -165,14 +165,13 @@ str_set_decimal(uint mask, const my_decimal *val,
{
/*
For ASCII-incompatible character sets (like UCS2) we
- call my_decimal2string() on a temporary buffer first,
+ call my_string_native() on a temporary buffer first,
and then convert the result to the target character
with help of str->copy().
*/
uint errors;
- char buf[DECIMAL_MAX_STR_LENGTH];
- String tmp(buf, sizeof(buf), &my_charset_latin1);
- my_decimal2string(mask, val, fixed_prec, fixed_dec, filler, &tmp);
+ StringBuffer<DECIMAL_MAX_STR_LENGTH> tmp;
+ val->to_string_native(&tmp, fixed_prec, fixed_dec, filler, mask);
return str->copy(tmp.ptr(), tmp.length(), &my_charset_latin1, cs, &errors);
}
}
@@ -182,7 +181,7 @@ str_set_decimal(uint mask, const my_decimal *val,
Convert from decimal to binary representation
SYNOPSIS
- my_decimal2binary()
+ to_binary()
mask error processing mask
d number for conversion
bin pointer to buffer where to write result
@@ -199,12 +198,11 @@ str_set_decimal(uint mask, const my_decimal *val,
E_DEC_OVERFLOW
*/
-int my_decimal2binary(uint mask, const my_decimal *d, uchar *bin, int prec,
- int scale)
+int my_decimal::to_binary(uchar *bin, int prec, int scale, uint mask) const
{
int err1= E_DEC_OK, err2;
my_decimal rounded;
- my_decimal2decimal(d, &rounded);
+ my_decimal2decimal(this, &rounded);
rounded.frac= decimal_actual_fraction(&rounded);
if (scale < rounded.frac)
{
@@ -270,7 +268,8 @@ int str2my_decimal(uint mask, const char *from, size_t length,
integer part cannot be larger that 1e18 (otherwise it's an overflow).
fractional part is microseconds.
*/
-bool my_decimal2seconds(const my_decimal *d, ulonglong *sec, ulong *microsec)
+bool my_decimal2seconds(const my_decimal *d, ulonglong *sec,
+ ulong *microsec, ulong *nanosec)
{
int pos;
@@ -288,6 +287,7 @@ bool my_decimal2seconds(const my_decimal *d, ulonglong *sec, ulong *microsec)
}
*microsec= d->frac ? static_cast<longlong>(d->buf[pos+1]) / (DIG_BASE/1000000) : 0;
+ *nanosec= d->frac ? static_cast<longlong>(d->buf[pos+1]) % (DIG_BASE/1000000) : 0;
if (pos > 1)
{
@@ -368,6 +368,26 @@ int my_decimal2int(uint mask, const decimal_t *d, bool unsigned_flag,
}
+longlong my_decimal::to_longlong(bool unsigned_flag) const
+{
+ longlong result;
+ my_decimal2int(E_DEC_FATAL_ERROR, this, unsigned_flag, &result);
+ return result;
+}
+
+
+my_decimal::my_decimal(Field *field)
+{
+ init();
+ DBUG_ASSERT(!field->is_null());
+#ifdef DBUG_ASSERT_EXISTS
+ my_decimal *dec=
+#endif
+ field->val_decimal(this);
+ DBUG_ASSERT(dec == this);
+}
+
+
#ifndef DBUG_OFF
/* routines for debugging print */
diff --git a/sql/my_decimal.h b/sql/my_decimal.h
index 22800c24338..c196d43e001 100644
--- a/sql/my_decimal.h
+++ b/sql/my_decimal.h
@@ -29,6 +29,8 @@
#ifndef my_decimal_h
#define my_decimal_h
+#include "sql_basic_types.h"
+
#if defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY)
#include "sql_string.h" /* String */
#endif
@@ -39,6 +41,7 @@ C_MODE_START
C_MODE_END
class String;
+class Field;
typedef struct st_mysql_time MYSQL_TIME;
/**
@@ -63,6 +66,25 @@ inline int my_decimal_int_part(uint precision, uint decimals)
}
+#ifndef MYSQL_CLIENT
+int decimal_operation_results(int result, const char *value, const char *type);
+#else
+inline int decimal_operation_results(int result, const char *value,
+ const char *type)
+{
+ return result;
+}
+#endif /*MYSQL_CLIENT*/
+
+
+inline int check_result(uint mask, int result)
+{
+ if (result & mask)
+ decimal_operation_results(result, "", "DECIMAL");
+ return result;
+}
+
+
/**
my_decimal class limits 'decimal_t' type to what we need in MySQL.
@@ -125,6 +147,12 @@ public:
{
init();
}
+ my_decimal(const uchar *bin, int prec, int scale)
+ {
+ init();
+ check_result(E_DEC_FATAL_ERROR, bin2decimal(bin, this, prec, scale));
+ }
+ my_decimal(Field *field);
~my_decimal()
{
sanity_check();
@@ -141,7 +169,57 @@ public:
bool sign() const { return decimal_t::sign; }
void sign(bool s) { decimal_t::sign= s; }
uint precision() const { return intg + frac; }
+ void set_zero()
+ {
+ /*
+ We need the up-cast here, since my_decimal has sign() member functions,
+ which conflicts with decimal_t::sign
+ (and decimal_make_zero is a macro, rather than a funcion).
+ */
+ decimal_make_zero(static_cast<decimal_t*>(this));
+ }
+ int cmp(const my_decimal *other) const
+ {
+ return decimal_cmp(this, other);
+ }
+#ifndef MYSQL_CLIENT
+ bool to_bool() const
+ {
+ return !decimal_is_zero(this);
+ }
+ double to_double() const
+ {
+ double res;
+ decimal2double(this, &res);
+ return res;
+ }
+ longlong to_longlong(bool unsigned_flag) const;
+ // Convert to string returning decimal2string() error code
+ int to_string_native(String *to, uint prec, uint dec, char filler,
+ uint mask= E_DEC_FATAL_ERROR) const;
+ // Convert to string returning the String pointer
+ String *to_string(String *to, uint prec, uint dec, char filler) const
+ {
+ return to_string_native(to, prec, dec, filler) ? NULL : to;
+ }
+ String *to_string(String *to) const
+ {
+ return to_string(to, 0, 0, 0);
+ }
+ String *to_string_round(String *to, uint scale, my_decimal *round_buff) const
+ {
+ (void) round_to(round_buff, scale, HALF_UP); // QQ: check result?
+ return round_buff->to_string(to);
+ }
+ int round_to(my_decimal *to, uint scale, decimal_round_mode mode,
+ int mask= E_DEC_FATAL_ERROR) const
+ {
+ return check_result(mask, decimal_round(this, to, (int) scale, mode));
+ }
+ int to_binary(uchar *bin, int prec, int scale,
+ uint mask= E_DEC_FATAL_ERROR) const;
+#endif
/** Swap two my_decimal values */
void swap(my_decimal &rhs)
{
@@ -164,16 +242,6 @@ bool str_set_decimal(uint mask, const my_decimal *val, uint fixed_prec,
extern my_decimal decimal_zero;
-#ifndef MYSQL_CLIENT
-int decimal_operation_results(int result, const char *value, const char *type);
-#else
-inline int decimal_operation_results(int result, const char *value,
- const char *type)
-{
- return result;
-}
-#endif /*MYSQL_CLIENT*/
-
inline
void max_my_decimal(my_decimal *to, int precision, int frac)
{
@@ -187,13 +255,6 @@ inline void max_internal_decimal(my_decimal *to)
max_my_decimal(to, DECIMAL_MAX_PRECISION, 0);
}
-inline int check_result(uint mask, int result)
-{
- if (result & mask)
- decimal_operation_results(result, "", "DECIMAL");
- return result;
-}
-
inline int check_result_and_overflow(uint mask, int result, my_decimal *val)
{
if (check_result(mask, result) & E_DEC_OVERFLOW)
@@ -271,10 +332,6 @@ void my_decimal2decimal(const my_decimal *from, my_decimal *to)
}
-int my_decimal2binary(uint mask, const my_decimal *d, uchar *bin, int prec,
- int scale);
-
-
inline
int binary2my_decimal(uint mask, const uchar *bin, my_decimal *d, int prec,
int scale)
@@ -286,12 +343,7 @@ int binary2my_decimal(uint mask, const uchar *bin, my_decimal *d, int prec,
inline
int my_decimal_set_zero(my_decimal *d)
{
- /*
- We need the up-cast here, since my_decimal has sign() member functions,
- which conflicts with decimal_t::sign
- (and decimal_make_zero is a macro, rather than a funcion).
- */
- decimal_make_zero(static_cast<decimal_t*>(d));
+ d->set_zero();
return 0;
}
@@ -303,42 +355,15 @@ bool my_decimal_is_zero(const my_decimal *decimal_value)
}
-inline
-int my_decimal_round(uint mask, const my_decimal *from, int scale,
- bool truncate, my_decimal *to)
-{
- return check_result(mask, decimal_round(from, to, scale,
- (truncate ? TRUNCATE : HALF_UP)));
-}
-
-
-inline
-int my_decimal_floor(uint mask, const my_decimal *from, my_decimal *to)
-{
- return check_result(mask, decimal_round(from, to, 0, FLOOR));
-}
-
-
-inline
-int my_decimal_ceiling(uint mask, const my_decimal *from, my_decimal *to)
-{
- return check_result(mask, decimal_round(from, to, 0, CEILING));
-}
-
-
inline bool str_set_decimal(const my_decimal *val, String *str,
CHARSET_INFO *cs)
{
return str_set_decimal(E_DEC_FATAL_ERROR, val, 0, 0, 0, str, cs);
}
-#ifndef MYSQL_CLIENT
-class String;
-int my_decimal2string(uint mask, const my_decimal *d, uint fixed_prec,
- uint fixed_dec, char filler, String *str);
-#endif
-bool my_decimal2seconds(const my_decimal *d, ulonglong *sec, ulong *microsec);
+bool my_decimal2seconds(const my_decimal *d, ulonglong *sec,
+ ulong *microsec, ulong *nanosec);
my_decimal *seconds2my_decimal(bool sign, ulonglong sec, ulong microsec,
my_decimal *d);
diff --git a/sql/my_json_writer.cc b/sql/my_json_writer.cc
index d219e88b98b..3755f8d4bcb 100644
--- a/sql/my_json_writer.cc
+++ b/sql/my_json_writer.cc
@@ -16,7 +16,6 @@
#include "mariadb.h"
#include "sql_priv.h"
#include "sql_string.h"
-
#include "my_json_writer.h"
void Json_writer::append_indent()
@@ -62,6 +61,7 @@ void Json_writer::end_object()
indent_level-=INDENT_SIZE;
if (!first_child)
append_indent();
+ first_child= false;
output.append("}");
}
@@ -129,7 +129,6 @@ void Json_writer::add_ll(longlong val)
add_unquoted_str(buf);
}
-
/* Add a memory size, printing in Kb, Kb, Gb if necessary */
void Json_writer::add_size(longlong val)
{
@@ -173,7 +172,7 @@ void Json_writer::add_null()
void Json_writer::add_unquoted_str(const char* str)
{
- if (fmt_helper.on_add_str(str))
+ if (fmt_helper.on_add_str(str, 0))
return;
if (!element_started)
@@ -183,10 +182,9 @@ void Json_writer::add_unquoted_str(const char* str)
element_started= false;
}
-
void Json_writer::add_str(const char *str)
{
- if (fmt_helper.on_add_str(str))
+ if (fmt_helper.on_add_str(str, 0))
return;
if (!element_started)
@@ -198,12 +196,82 @@ void Json_writer::add_str(const char *str)
element_started= false;
}
+/*
+ This function is used to add only num_bytes of str to the output string
+*/
+
+void Json_writer::add_str(const char* str, size_t num_bytes)
+{
+ if (fmt_helper.on_add_str(str, num_bytes))
+ return;
+
+ if (!element_started)
+ start_element();
+
+ output.append('"');
+ output.append(str, num_bytes);
+ output.append('"');
+ element_started= false;
+}
void Json_writer::add_str(const String &str)
{
- add_str(str.ptr());
+ add_str(str.ptr(), str.length());
+}
+
+Json_writer_object::Json_writer_object(THD *thd) :
+ Json_writer_struct(thd)
+{
+ if (my_writer)
+ my_writer->start_object();
+}
+
+Json_writer_object::Json_writer_object(THD* thd, const char *str) :
+ Json_writer_struct(thd)
+{
+ if (my_writer)
+ my_writer->add_member(str).start_object();
+}
+
+Json_writer_object::~Json_writer_object()
+{
+ if (!closed && my_writer)
+ my_writer->end_object();
+ closed= TRUE;
+}
+
+Json_writer_array::Json_writer_array(THD *thd) :
+ Json_writer_struct(thd)
+{
+ if (my_writer)
+ my_writer->start_array();
+}
+
+Json_writer_array::Json_writer_array(THD *thd, const char *str) :
+ Json_writer_struct(thd)
+{
+ if (my_writer)
+ my_writer->add_member(str).start_array();
+
+}
+Json_writer_array::~Json_writer_array()
+{
+ if (!closed && my_writer)
+ {
+ my_writer->end_array();
+ closed= TRUE;
+ }
}
+Json_writer_temp_disable::Json_writer_temp_disable(THD *thd_arg)
+{
+ thd= thd_arg;
+ thd->opt_trace.disable_tracing_if_required();
+}
+Json_writer_temp_disable::~Json_writer_temp_disable()
+{
+ thd->opt_trace.enable_tracing_if_required();
+}
bool Single_line_formatting_helper::on_add_member(const char *name)
{
@@ -267,11 +335,12 @@ void Single_line_formatting_helper::on_start_object()
}
-bool Single_line_formatting_helper::on_add_str(const char *str)
+bool Single_line_formatting_helper::on_add_str(const char *str,
+ size_t num_bytes)
{
if (state == IN_ARRAY)
{
- size_t len= strlen(str);
+ size_t len= num_bytes ? num_bytes : strlen(str);
// New length will be:
// "$string",
diff --git a/sql/my_json_writer.h b/sql/my_json_writer.h
index 3c127bd178c..8f86212ac30 100644
--- a/sql/my_json_writer.h
+++ b/sql/my_json_writer.h
@@ -13,7 +13,15 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+#ifndef JSON_WRITER_INCLUDED
+#define JSON_WRITER_INCLUDED
+#include "my_base.h"
+#include "sql_select.h"
+class Opt_trace_stmt;
+class Opt_trace_context;
class Json_writer;
+struct TABLE_LIST;
+
/*
Single_line_formatting_helper is used by Json_writer to do better formatting
@@ -85,7 +93,7 @@ public:
void on_start_object();
// on_end_object() is not needed.
- bool on_add_str(const char *str);
+ bool on_add_str(const char *str, size_t num_bytes);
void flush_on_one_line();
void disable_and_flush();
@@ -93,6 +101,80 @@ public:
/*
+ Something that looks like class String, but has an internal limit of
+ how many bytes one can append to it.
+
+ Bytes that were truncated due to the size limitation are counted.
+*/
+
+class String_with_limit
+{
+public:
+
+ String_with_limit() : size_limit(SIZE_T_MAX), truncated_len(0)
+ {
+ str.length(0);
+ }
+
+ size_t get_truncated_bytes() const { return truncated_len; }
+ size_t get_size_limit() { return size_limit; }
+
+ void set_size_limit(size_t limit_arg)
+ {
+ // Setting size limit to be shorter than length will not have the desired
+ // effect
+ DBUG_ASSERT(str.length() < size_limit);
+ size_limit= limit_arg;
+ }
+
+ void append(const char *s, size_t size)
+ {
+ if (str.length() + size <= size_limit)
+ {
+ // Whole string can be added, just do it
+ str.append(s, size);
+ }
+ else
+ {
+ // We cannot add the whole string
+ if (str.length() < size_limit)
+ {
+ // But we can still add something
+ size_t bytes_to_add = size_limit - str.length();
+ str.append(s, bytes_to_add);
+ truncated_len += size - bytes_to_add;
+ }
+ else
+ truncated_len += size;
+ }
+ }
+
+ void append(const char *s)
+ {
+ append(s, strlen(s));
+ }
+
+ void append(char c)
+ {
+ if (str.length() + 1 > size_limit)
+ truncated_len++;
+ else
+ str.append(c);
+ }
+
+ const String *get_string() { return &str; }
+ size_t length() { return str.length(); }
+private:
+ String str;
+
+ // str must not get longer than this many bytes.
+ size_t size_limit;
+
+ // How many bytes were truncated from the string
+ size_t truncated_len;
+};
+
+/*
A class to write well-formed JSON documents. The documents are also formatted
for human readability.
*/
@@ -105,7 +187,11 @@ public:
/* Add atomic values */
void add_str(const char* val);
+ void add_str(const char* val, size_t num_bytes);
void add_str(const String &str);
+ void add_str(Item *item);
+ void add_table_name(const JOIN_TAB *tab);
+ void add_table_name(const TABLE* table);
void add_ll(longlong val);
void add_size(longlong val);
@@ -123,6 +209,14 @@ public:
void end_object();
void end_array();
+ /*
+ One can set a limit of how large a JSON document should be.
+ Writes beyond that size will be counted, but will not be collected.
+ */
+ void set_size_limit(size_t mem_size) { output.set_size_limit(mem_size); }
+
+ size_t get_truncated_bytes() { return output.get_truncated_bytes(); }
+
Json_writer() :
indent_level(0), document_start(true), element_started(false),
first_child(true)
@@ -146,13 +240,338 @@ private:
void start_element();
void start_sub_element();
- //const char *new_member_name;
public:
- String output;
+ String_with_limit output;
+};
+
+/* A class to add values to Json_writer_object and Json_writer_array */
+class Json_value_helper
+{
+ Json_writer* writer;
+
+public:
+ void init(Json_writer *my_writer) { writer= my_writer; }
+ void add_str(const char* val)
+ {
+ if (writer)
+ writer->add_str(val);
+ }
+ void add_str(const char* val, size_t length)
+ {
+ if (writer)
+ writer->add_str(val, length);
+ }
+ void add_str(const String &str)
+ {
+ if (writer)
+ writer->add_str(str);
+ }
+ void add_str(LEX_CSTRING str)
+ {
+ if (writer)
+ writer->add_str(str.str);
+ }
+ void add_str(Item *item)
+ {
+ if (writer)
+ writer->add_str(item);
+ }
+
+ void add_ll(longlong val)
+ {
+ if (writer)
+ writer->add_ll(val);
+ }
+ void add_size(longlong val)
+ {
+ if (writer)
+ writer->add_size(val);
+ }
+ void add_double(double val)
+ {
+ if (writer)
+ writer->add_double(val);
+ }
+ void add_bool(bool val)
+ {
+ if (writer)
+ writer->add_bool(val);
+ }
+ void add_null()
+ {
+ if (writer)
+ writer->add_null();
+ }
+ void add_table_name(const JOIN_TAB *tab)
+ {
+ if (writer)
+ writer->add_table_name(tab);
+ }
+ void add_table_name(const TABLE* table)
+ {
+ if (writer)
+ writer->add_table_name(table);
+ }
+};
+
+/* A common base for Json_writer_object and Json_writer_array */
+class Json_writer_struct
+{
+protected:
+ Json_writer* my_writer;
+ Json_value_helper context;
+ /*
+ Tells when a json_writer_struct has been closed or not
+ */
+ bool closed;
+
+public:
+ explicit Json_writer_struct(THD *thd)
+ {
+ my_writer= thd->opt_trace.get_current_json();
+ context.init(my_writer);
+ closed= false;
+ }
+};
+
+
+/*
+ RAII-based class to start/end writing a JSON object into the JSON document
+
+ There is "ignore mode": one can initialize Json_writer_object with a NULL
+ Json_writer argument, and then all its calls will do nothing. This is used
+ by optimizer trace which can be enabled or disabled.
+*/
+
+class Json_writer_object : public Json_writer_struct
+{
+private:
+ void add_member(const char *name)
+ {
+ if (my_writer)
+ my_writer->add_member(name);
+ }
+public:
+ explicit Json_writer_object(THD *thd);
+ explicit Json_writer_object(THD *thd, const char *str);
+
+ Json_writer_object& add(const char *name, bool value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_bool(value);
+ return *this;
+ }
+ Json_writer_object& add(const char *name, ulonglong value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_ll(static_cast<longlong>(value));
+ return *this;
+ }
+ Json_writer_object& add(const char *name, longlong value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_ll(value);
+ return *this;
+ }
+ Json_writer_object& add(const char *name, double value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_double(value);
+ return *this;
+ }
+ #ifndef _WIN64
+ Json_writer_object& add(const char *name, size_t value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_ll(static_cast<longlong>(value));
+ return *this;
+ }
+ #endif
+ Json_writer_object& add(const char *name, const char *value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_str(value);
+ return *this;
+ }
+ Json_writer_object& add(const char *name, const char *value, size_t num_bytes)
+ {
+ add_member(name);
+ context.add_str(value, num_bytes);
+ return *this;
+ }
+ Json_writer_object& add(const char *name, LEX_CSTRING value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_str(value.str);
+ return *this;
+ }
+ Json_writer_object& add(const char *name, Item *value)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_str(value);
+ return *this;
+ }
+ Json_writer_object& add_null(const char*name)
+ {
+ DBUG_ASSERT(!closed);
+ add_member(name);
+ context.add_null();
+ return *this;
+ }
+ Json_writer_object& add_table_name(const JOIN_TAB *tab)
+ {
+ DBUG_ASSERT(!closed);
+ add_member("table");
+ context.add_table_name(tab);
+ return *this;
+ }
+ Json_writer_object& add_table_name(const TABLE *table)
+ {
+ DBUG_ASSERT(!closed);
+ add_member("table");
+ context.add_table_name(table);
+ return *this;
+ }
+ Json_writer_object& add_select_number(uint select_number)
+ {
+ DBUG_ASSERT(!closed);
+ add_member("select_id");
+ if (unlikely(select_number >= INT_MAX))
+ context.add_str("fake");
+ else
+ context.add_ll(static_cast<longlong>(select_number));
+ return *this;
+ }
+ void end()
+ {
+ DBUG_ASSERT(!closed);
+ if (my_writer)
+ my_writer->end_object();
+ closed= TRUE;
+ }
+ ~Json_writer_object();
};
/*
+ RAII-based class to start/end writing a JSON array into the JSON document
+
+ There is "ignore mode": one can initialize Json_writer_array with a NULL
+ Json_writer argument, and then all its calls will do nothing. This is used
+ by optimizer trace which can be enabled or disabled.
+*/
+
+class Json_writer_array : public Json_writer_struct
+{
+public:
+ Json_writer_array(THD *thd);
+ Json_writer_array(THD *thd, const char *str);
+ void end()
+ {
+ DBUG_ASSERT(!closed);
+ if (my_writer)
+ my_writer->end_array();
+ closed= TRUE;
+ }
+
+ Json_writer_array& add(bool value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_bool(value);
+ return *this;
+ }
+ Json_writer_array& add(ulonglong value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_ll(static_cast<longlong>(value));
+ return *this;
+ }
+ Json_writer_array& add(longlong value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_ll(value);
+ return *this;
+ }
+ Json_writer_array& add(double value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_double(value);
+ return *this;
+ }
+ #ifndef _WIN64
+ Json_writer_array& add(size_t value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_ll(static_cast<longlong>(value));
+ return *this;
+ }
+ #endif
+ Json_writer_array& add(const char *value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_str(value);
+ return *this;
+ }
+ Json_writer_array& add(const char *value, size_t num_bytes)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_str(value, num_bytes);
+ return *this;
+ }
+ Json_writer_array& add(LEX_CSTRING value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_str(value.str);
+ return *this;
+ }
+ Json_writer_array& add(Item *value)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_str(value);
+ return *this;
+ }
+ Json_writer_array& add_null()
+ {
+ DBUG_ASSERT(!closed);
+ context.add_null();
+ return *this;
+ }
+ Json_writer_array& add_table_name(const JOIN_TAB *tab)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_table_name(tab);
+ return *this;
+ }
+ Json_writer_array& add_table_name(const TABLE *table)
+ {
+ DBUG_ASSERT(!closed);
+ context.add_table_name(table);
+ return *this;
+ }
+ ~Json_writer_array();
+};
+
+/*
+ RAII-based class to disable writing into the JSON document
+*/
+
+class Json_writer_temp_disable
+{
+public:
+ Json_writer_temp_disable(THD *thd_arg);
+ ~Json_writer_temp_disable();
+ THD *thd;
+};
+
+/*
RAII-based helper class to detect incorrect use of Json_writer.
The idea is that a function typically must leave Json_writer at the same
@@ -192,4 +611,4 @@ public:
#endif
};
-
+#endif
diff --git a/sql/mysql_install_db.cc b/sql/mysql_install_db.cc
index fc8bce08276..93a3b54bdc2 100644
--- a/sql/mysql_install_db.cc
+++ b/sql/mysql_install_db.cc
@@ -39,7 +39,8 @@ struct IUnknown;
extern "C" const char* mysql_bootstrap_sql[];
-char default_os_user[]= "NT AUTHORITY\\NetworkService";
+static char default_os_user[]= "NT AUTHORITY\\NetworkService";
+static char default_datadir[MAX_PATH];
static int create_db_instance();
static uint opt_silent;
static char datadir_buffer[FN_REFLEN];
@@ -169,8 +170,27 @@ int main(int argc, char **argv)
exit(error);
if (!opt_datadir)
{
- my_print_help(my_long_options);
- die("parameter --datadir=# is mandatory");
+ /*
+ Figure out default data directory. It "data" directory, next to "bin" directory, where
+ mysql_install_db.exe resides.
+ */
+ strcpy(default_datadir, self_name);
+ p = strrchr(default_datadir, FN_LIBCHAR);
+ if (p)
+ {
+ *p= 0;
+ p= strrchr(default_datadir, FN_LIBCHAR);
+ if (p)
+ *p= 0;
+ }
+ if (!p)
+ {
+ die("--datadir option not provided, and default datadir not found");
+ my_print_help(my_long_options);
+ }
+ strncat(default_datadir, "\\data", sizeof(default_datadir));
+ opt_datadir= default_datadir;
+ printf("Default data directory is %s\n",opt_datadir);
}
/* Print some help on errors */
@@ -198,7 +218,7 @@ int main(int argc, char **argv)
die("database creation failed");
}
- printf("Creation of the database was successful");
+ printf("Creation of the database was successful\n");
return 0;
}
@@ -343,17 +363,19 @@ static int create_myini()
static const char update_root_passwd_part1[]=
- "UPDATE mysql.user SET Password = PASSWORD(";
+ "UPDATE mysql.global_priv SET priv=json_set(priv,"
+ "'$.plugin','mysql_native_password',"
+ "'$.authentication_string',PASSWORD(";
static const char update_root_passwd_part2[]=
- ") where User='root';\n";
+ ")) where User='root';\n";
static const char remove_default_user_cmd[]=
"DELETE FROM mysql.user where User='';\n";
static const char allow_remote_root_access_cmd[]=
- "CREATE TEMPORARY TABLE tmp_user LIKE user;\n"
- "INSERT INTO tmp_user SELECT * from user where user='root' "
+ "CREATE TEMPORARY TABLE tmp_user LIKE global_priv;\n"
+ "INSERT INTO tmp_user SELECT * from global_priv where user='root' "
" AND host='localhost';\n"
"UPDATE tmp_user SET host='%';\n"
- "INSERT INTO user SELECT * FROM tmp_user;\n"
+ "INSERT INTO global_priv SELECT * FROM tmp_user;\n"
"DROP TABLE tmp_user;\n";
static const char end_of_script[]="-- end.";
diff --git a/sql/mysql_upgrade_service.cc b/sql/mysql_upgrade_service.cc
index 9ea78accf44..58383df9c56 100644
--- a/sql/mysql_upgrade_service.cc
+++ b/sql/mysql_upgrade_service.cc
@@ -495,7 +495,7 @@ int main(int argc, char **argv)
old_mysqld_exe_exists?",this can take some time":"(skipped)");
char socket_param[FN_REFLEN];
- sprintf_s(socket_param, "--socket=mysql_upgrade_service_%d",
+ sprintf_s(socket_param, "--socket=mysql_upgrade_service_%u",
GetCurrentProcessId());
DWORD start_duration_ms = 0;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 4f5026fd3b5..dbe6055d387 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -72,8 +72,10 @@
#include "debug_sync.h"
#include "wsrep_mysqld.h"
#include "wsrep_var.h"
+#ifdef WITH_WSREP
#include "wsrep_thd.h"
#include "wsrep_sst.h"
+#endif /* WITH_WSREP */
#include "proxy_protocol.h"
#include "sql_callback.h"
@@ -117,13 +119,17 @@
#include <poll.h>
#endif
+#ifdef _WIN32
+#include <handle_connections_win.h>
+#endif
+
#include <my_service_manager.h>
#define mysqld_charset &my_charset_latin1
/* We have HAVE_valgrind below as this speeds up the shutdown of MySQL */
-#if defined(SIGNALS_DONT_BREAK_READ) || defined(HAVE_valgrind) && defined(__linux__)
+#if defined(HAVE_valgrind) && defined(__linux__)
#define HAVE_CLOSE_SERVER_SOCK 1
#endif
@@ -319,23 +325,6 @@ MY_TIMER_INFO sys_timer_info;
/* static variables */
#ifdef HAVE_PSI_INTERFACE
-#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
-static PSI_thread_key key_thread_handle_con_namedpipes;
-static PSI_cond_key key_COND_handler_count;
-#endif /* _WIN32 || HAVE_SMEM && !EMBEDDED_LIBRARY */
-
-#if defined(HAVE_SMEM) && !defined(EMBEDDED_LIBRARY)
-static PSI_thread_key key_thread_handle_con_sharedmem;
-#endif /* HAVE_SMEM && !EMBEDDED_LIBRARY */
-
-#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
-static PSI_thread_key key_thread_handle_con_sockets;
-#endif /* _WIN32 || HAVE_SMEM && !EMBEDDED_LIBRARY */
-
-#ifdef __WIN__
-static PSI_thread_key key_thread_handle_shutdown;
-#endif /* __WIN__ */
-
#ifdef HAVE_OPENSSL10
static PSI_rwlock_key key_rwlock_openssl;
#endif
@@ -356,7 +345,6 @@ PSI_statement_info stmt_info_rpl;
static bool lower_case_table_names_used= 0;
static bool max_long_data_size_used= false;
static bool volatile select_thread_in_use, signal_thread_in_use;
-static volatile bool ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_short_log_format= 0, opt_silent_startup= 0;
bool my_disable_leak_check= false;
@@ -371,6 +359,7 @@ static char *character_set_filesystem_name;
static char *lc_messages;
static char *lc_time_names_name;
char *my_bind_addr_str;
+int server_socket_ai_family;
static char *default_collation_name;
char *default_storage_engine, *default_tmp_storage_engine;
char *enforced_storage_engine=NULL;
@@ -405,25 +394,10 @@ bool opt_endinfo, using_udf_functions;
my_bool locked_in_memory;
bool opt_using_transactions;
bool volatile abort_loop;
-bool volatile shutdown_in_progress;
uint volatile global_disable_checkpoint;
#if defined(_WIN32) && !defined(EMBEDDED_LIBRARY)
ulong slow_start_timeout;
#endif
-/*
- True if the bootstrap thread is running. Protected by LOCK_start_thread.
- Used in bootstrap() function to determine if the bootstrap thread
- has completed. Note, that we can't use 'thread_count' instead,
- since in 5.1, in presence of the Event Scheduler, there may be
- event threads running in parallel, so it's impossible to know
- what value of 'thread_count' is a sign of completion of the
- bootstrap thread.
-
- At the same time, we can't start the event scheduler after
- bootstrap either, since we want to be able to process event-related
- SQL commands in the init file and in --bootstrap mode.
-*/
-bool volatile in_bootstrap= FALSE;
/**
@brief 'grant_option' is used to indicate if privileges needs
to be checked, in which case the lock, LOCK_grant, is used
@@ -491,7 +465,8 @@ ulong delay_key_write_options;
uint protocol_version;
uint lower_case_table_names;
ulong tc_heuristic_recover= 0;
-int32 thread_count, service_thread_count;
+Atomic_counter<uint32_t> thread_count;
+bool shutdown_wait_for_slaves;
int32 slave_open_temp_tables;
ulong thread_created;
ulong back_log, connect_timeout, concurrency, server_id;
@@ -525,6 +500,7 @@ ulong specialflag=0;
ulong binlog_cache_use= 0, binlog_cache_disk_use= 0;
ulong binlog_stmt_cache_use= 0, binlog_stmt_cache_disk_use= 0;
ulong max_connections, max_connect_errors;
+uint max_password_errors;
ulong extra_max_connections;
uint max_digest_length= 0;
ulong slave_retried_transactions;
@@ -536,6 +512,8 @@ ulong feature_files_opened_with_delayed_keys= 0, feature_check_constraint= 0;
ulonglong denied_connections;
my_decimal decimal_zero;
long opt_secure_timestamp;
+uint default_password_lifetime;
+my_bool disconnect_on_expired_password;
/*
Maximum length of parameter value which can be set through
@@ -566,7 +544,6 @@ ulong slow_launch_threads = 0;
uint sync_binlog_period= 0, sync_relaylog_period= 0,
sync_relayloginfo_period= 0, sync_masterinfo_period= 0;
ulong expire_logs_days = 0;
-ulong rpl_recovery_rank=0;
/**
Soft upper limit for number of sp_head objects that can be stored
in the sp_cache for one connection.
@@ -580,6 +557,7 @@ ulong opt_binlog_commit_wait_count= 0;
ulong opt_binlog_commit_wait_usec= 0;
ulong opt_slave_parallel_max_queued= 131072;
my_bool opt_gtid_ignore_duplicates= FALSE;
+uint opt_gtid_cleanup_batch_size= 64;
const double log_10[] = {
1e000, 1e001, 1e002, 1e003, 1e004, 1e005, 1e006, 1e007, 1e008, 1e009,
@@ -671,29 +649,11 @@ Lt_creator lt_creator;
Ge_creator ge_creator;
Le_creator le_creator;
-MYSQL_FILE *bootstrap_file;
-int bootstrap_error;
-
-I_List<THD> threads;
+THD_list server_threads;
Rpl_filter* cur_rpl_filter;
Rpl_filter* global_rpl_filter;
Rpl_filter* binlog_filter;
-THD *first_global_thread()
-{
- if (threads.is_empty())
- return NULL;
- return threads.head();
-}
-
-THD *next_global_thread(THD *thd)
-{
- if (threads.is_last(thd))
- return NULL;
- struct ilink *next= thd->next;
- return static_cast<THD*>(next);
-}
-
struct system_variables global_system_variables;
/**
Following is just for options parsing, used with a difference against
@@ -723,29 +683,17 @@ SHOW_COMP_OPTION have_crypt, have_compress;
SHOW_COMP_OPTION have_profiling;
SHOW_COMP_OPTION have_openssl;
+static std::atomic<char*> shutdown_user;
+
/* Thread specific variables */
pthread_key(THD*, THR_THD);
/*
- LOCK_thread_count protects the following variables:
- thread_count Number of threads with THD that servers queries.
- threads Linked list of active THD's.
- The effect of this is that one can't unlink and
- delete a THD as long as one has locked
- LOCK_thread_count.
- ready_to_exit
- delayed_insert_threads
-*/
-mysql_mutex_t LOCK_thread_count;
-
-/*
LOCK_start_thread is used to syncronize thread start and stop with
other threads.
It also protects these variables:
- handler_count
- in_bootstrap
select_thread_in_use
slave_init_thread_running
check_temp_dir() call
@@ -754,13 +702,12 @@ mysql_mutex_t LOCK_start_thread;
mysql_mutex_t LOCK_thread_cache;
mysql_mutex_t
- LOCK_status, LOCK_show_status, LOCK_error_log, LOCK_short_uuid_generator,
+ LOCK_status, LOCK_error_log, LOCK_short_uuid_generator,
LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create,
LOCK_crypt,
LOCK_global_system_variables,
- LOCK_user_conn, LOCK_slave_list,
+ LOCK_user_conn,
LOCK_connection_count, LOCK_error_messages, LOCK_slave_background;
-
mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats,
LOCK_global_table_stats, LOCK_global_index_stats;
@@ -782,8 +729,10 @@ mysql_mutex_t LOCK_prepared_stmt_count;
mysql_mutex_t LOCK_des_key_file;
#endif
mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
+mysql_rwlock_t LOCK_ssl_refresh;
+mysql_rwlock_t LOCK_all_status_vars;
mysql_prlock_t LOCK_system_variables_hash;
-mysql_cond_t COND_thread_count, COND_start_thread;
+mysql_cond_t COND_start_thread;
pthread_t signal_thread;
pthread_attr_t connection_attrib;
mysql_mutex_t LOCK_server_started;
@@ -803,7 +752,6 @@ char *opt_binlog_index_name=0;
/* Static variables */
-static volatile sig_atomic_t kill_in_progress;
my_bool opt_stack_trace;
my_bool opt_expect_abort= 0, opt_bootstrap= 0;
static my_bool opt_myisam_log;
@@ -923,7 +871,7 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_LOCK_manager,
key_LOCK_prepared_stmt_count,
key_LOCK_rpl_status, key_LOCK_server_started,
- key_LOCK_status, key_LOCK_show_status,
+ key_LOCK_status,
key_LOCK_system_variables_hash, key_LOCK_thd_data, key_LOCK_thd_kill,
key_LOCK_user_conn, key_LOCK_uuid_short_generator, key_LOG_LOCK_log,
key_master_info_data_lock, key_master_info_run_lock,
@@ -932,9 +880,9 @@ PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_rpl_group_info_sleep_lock,
key_relay_log_info_log_space_lock, key_relay_log_info_run_lock,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
- key_LOCK_error_messages, key_LOG_INFO_lock,
+ key_LOCK_error_messages,
key_LOCK_start_thread,
- key_LOCK_thread_count, key_LOCK_thread_cache,
+ key_LOCK_thread_cache,
key_PARTITION_LOCK_auto_inc;
PSI_mutex_key key_RELAYLOG_LOCK_index;
PSI_mutex_key key_LOCK_relaylog_end_pos;
@@ -996,7 +944,6 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_rpl_status, "LOCK_rpl_status", PSI_FLAG_GLOBAL},
{ &key_LOCK_server_started, "LOCK_server_started", PSI_FLAG_GLOBAL},
{ &key_LOCK_status, "LOCK_status", PSI_FLAG_GLOBAL},
- { &key_LOCK_show_status, "LOCK_show_status", PSI_FLAG_GLOBAL},
{ &key_LOCK_system_variables_hash, "LOCK_system_variables_hash", PSI_FLAG_GLOBAL},
{ &key_LOCK_stats, "LOCK_stats", PSI_FLAG_GLOBAL},
{ &key_LOCK_global_user_client_stats, "LOCK_global_user_client_stats", PSI_FLAG_GLOBAL},
@@ -1028,8 +975,6 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_LOCK_after_binlog_sync, "LOCK_after_binlog_sync", PSI_FLAG_GLOBAL},
{ &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
{ &key_LOCK_slave_background, "LOCK_slave_background", PSI_FLAG_GLOBAL},
- { &key_LOG_INFO_lock, "LOG_INFO::lock", 0},
- { &key_LOCK_thread_count, "LOCK_thread_count", PSI_FLAG_GLOBAL},
{ &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL},
{ &key_PARTITION_LOCK_auto_inc, "HA_DATA_PARTITION::LOCK_auto_inc", 0},
{ &key_LOCK_slave_state, "LOCK_slave_state", 0},
@@ -1047,7 +992,10 @@ PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
key_rwlock_LOCK_sys_init_connect, key_rwlock_LOCK_sys_init_slave,
key_rwlock_LOCK_system_variables_hash, key_rwlock_query_cache_query_lock,
key_LOCK_SEQUENCE,
- key_rwlock_LOCK_vers_stats, key_rwlock_LOCK_stat_serial;
+ key_rwlock_LOCK_vers_stats, key_rwlock_LOCK_stat_serial,
+ key_rwlock_LOCK_ssl_refresh,
+ key_rwlock_THD_list,
+ key_rwlock_LOCK_all_status_vars;
static PSI_rwlock_info all_server_rwlocks[]=
{
@@ -1062,7 +1010,10 @@ static PSI_rwlock_info all_server_rwlocks[]=
{ &key_rwlock_LOCK_system_variables_hash, "LOCK_system_variables_hash", PSI_FLAG_GLOBAL},
{ &key_rwlock_query_cache_query_lock, "Query_cache_query::lock", 0},
{ &key_rwlock_LOCK_vers_stats, "Vers_field_stats::lock", 0},
- { &key_rwlock_LOCK_stat_serial, "TABLE_SHARE::LOCK_stat_serial", 0}
+ { &key_rwlock_LOCK_stat_serial, "TABLE_SHARE::LOCK_stat_serial", 0},
+ { &key_rwlock_LOCK_ssl_refresh, "LOCK_ssl_refresh", PSI_FLAG_GLOBAL },
+ { &key_rwlock_THD_list, "THD_list::lock", PSI_FLAG_GLOBAL },
+ { &key_rwlock_LOCK_all_status_vars, "LOCK_all_status_vars", PSI_FLAG_GLOBAL }
};
#ifdef HAVE_MMAP
@@ -1083,7 +1034,7 @@ PSI_cond_key key_BINLOG_COND_xid_list,
key_relay_log_info_start_cond, key_relay_log_info_stop_cond,
key_rpl_group_info_sleep_cond,
key_TABLE_SHARE_cond, key_user_level_lock_cond,
- key_COND_thread_count, key_COND_thread_cache, key_COND_flush_thread_cache,
+ key_COND_thread_cache, key_COND_flush_thread_cache,
key_COND_start_thread, key_COND_binlog_send,
key_BINLOG_COND_queue_busy;
PSI_cond_key key_RELAYLOG_COND_relay_log_updated,
@@ -1100,9 +1051,6 @@ PSI_cond_key key_COND_ack_receiver;
static PSI_cond_info all_server_conds[]=
{
-#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
- { &key_COND_handler_count, "COND_handler_count", PSI_FLAG_GLOBAL},
-#endif /* _WIN32 || HAVE_SMEM && !EMBEDDED_LIBRARY */
#ifdef HAVE_MMAP
{ &key_PAGE_cond, "PAGE::cond", 0},
{ &key_COND_active, "TC_LOG_MMAP::COND_active", 0},
@@ -1136,7 +1084,6 @@ static PSI_cond_info all_server_conds[]=
{ &key_rpl_group_info_sleep_cond, "Rpl_group_info::sleep_cond", 0},
{ &key_TABLE_SHARE_cond, "TABLE_SHARE::cond", 0},
{ &key_user_level_lock_cond, "User_level_lock::cond", 0},
- { &key_COND_thread_count, "COND_thread_count", PSI_FLAG_GLOBAL},
{ &key_COND_thread_cache, "COND_thread_cache", PSI_FLAG_GLOBAL},
{ &key_COND_flush_thread_cache, "COND_flush_thread_cache", PSI_FLAG_GLOBAL},
{ &key_COND_rpl_thread, "COND_rpl_thread", 0},
@@ -1155,7 +1102,7 @@ static PSI_cond_info all_server_conds[]=
{ &key_TABLE_SHARE_COND_rotation, "TABLE_SHARE::COND_rotation", 0}
};
-PSI_thread_key key_thread_bootstrap, key_thread_delayed_insert,
+PSI_thread_key key_thread_delayed_insert,
key_thread_handle_manager, key_thread_main,
key_thread_one_connection, key_thread_signal_hand,
key_thread_slave_background, key_rpl_parallel_thread;
@@ -1163,23 +1110,6 @@ PSI_thread_key key_thread_ack_receiver;
static PSI_thread_info all_server_threads[]=
{
-#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
- { &key_thread_handle_con_namedpipes, "con_named_pipes", PSI_FLAG_GLOBAL},
-#endif /* _WIN32 || HAVE_SMEM && !EMBEDDED_LIBRARY */
-
-#if defined(HAVE_SMEM) && !defined(EMBEDDED_LIBRARY)
- { &key_thread_handle_con_sharedmem, "con_shared_mem", PSI_FLAG_GLOBAL},
-#endif /* HAVE_SMEM && !EMBEDDED_LIBRARY */
-
-#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
- { &key_thread_handle_con_sockets, "con_sockets", PSI_FLAG_GLOBAL},
-#endif /* _WIN32 || HAVE_SMEM && !EMBEDDED_LIBRARY */
-
-#ifdef __WIN__
- { &key_thread_handle_shutdown, "shutdown", PSI_FLAG_GLOBAL},
-#endif /* __WIN__ */
-
- { &key_thread_bootstrap, "bootstrap", PSI_FLAG_GLOBAL},
{ &key_thread_delayed_insert, "delayed_insert", 0},
{ &key_thread_handle_manager, "manager", PSI_FLAG_GLOBAL},
{ &key_thread_main, "main", PSI_FLAG_GLOBAL},
@@ -1332,10 +1262,10 @@ void Buffered_log::print()
switch(m_level)
{
case ERROR_LEVEL:
- sql_print_error("Buffered error: %s\n", m_message.c_ptr_safe());
+ sql_print_error("Buffered error: %s", m_message.c_ptr_safe());
break;
case WARNING_LEVEL:
- sql_print_warning("Buffered warning: %s\n", m_message.c_ptr_safe());
+ sql_print_warning("Buffered warning: %s", m_message.c_ptr_safe());
break;
case INFORMATION_LEVEL:
/*
@@ -1421,10 +1351,10 @@ void Buffered_logs::print()
/** Logs reported before a logger is available. */
static Buffered_logs buffered_logs;
-static MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock;
struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD()
#ifndef EMBEDDED_LIBRARY
+MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock;
/**
Error reporter that buffer log messages.
@param level log message level
@@ -1480,27 +1410,18 @@ static pthread_t select_thread;
#undef getpid
#include <process.h>
-static mysql_cond_t COND_handler_count;
-static uint handler_count;
static bool start_mode=0, use_opt_args;
static int opt_argc;
static char **opt_argv;
#if !defined(EMBEDDED_LIBRARY)
-static HANDLE hEventShutdown;
+HANDLE hEventShutdown;
static char shutdown_event_name[40];
#include "nt_servc.h"
static NTService Service; ///< Service object for WinNT
#endif /* EMBEDDED_LIBRARY */
#endif /* __WIN__ */
-#ifdef _WIN32
-#include <sddl.h> /* ConvertStringSecurityDescriptorToSecurityDescriptor */
-static char pipe_name[512];
-static SECURITY_ATTRIBUTES saPipeSecurity;
-static HANDLE hPipe = INVALID_HANDLE_VALUE;
-#endif
-
#ifndef EMBEDDED_LIBRARY
bool mysqld_embedded=0;
#else
@@ -1521,11 +1442,7 @@ int deny_severity = LOG_WARNING;
ulong query_cache_min_res_unit= QUERY_CACHE_MIN_RESULT_DATA_SIZE;
Query_cache query_cache;
#endif
-#ifdef HAVE_SMEM
-const char *shared_memory_base_name= default_shared_memory_base_name;
-my_bool opt_enable_shared_memory;
-HANDLE smem_event_connect_request= 0;
-#endif
+
my_bool opt_use_ssl = 0;
char *opt_ssl_ca= NULL, *opt_ssl_capath= NULL, *opt_ssl_cert= NULL,
@@ -1577,19 +1494,11 @@ extern "C" my_bool mysqld_get_one_option(int, const struct my_option *, char *);
static int init_thread_environment();
static char *get_relative_path(const char *path);
static int fix_paths(void);
+#ifndef _WIN32
void handle_connections_sockets();
-#ifdef _WIN32
-pthread_handler_t handle_connections_sockets_thread(void *arg);
#endif
-pthread_handler_t kill_server_thread(void *arg);
-static void bootstrap(MYSQL_FILE *file);
+
static bool read_init_file(char *file_name);
-#ifdef _WIN32
-pthread_handler_t handle_connections_namedpipes(void *arg);
-#endif
-#ifdef HAVE_SMEM
-pthread_handler_t handle_connections_shared_memory(void *arg);
-#endif
pthread_handler_t handle_slave(void *arg);
static void clean_up(bool print_message);
static int test_if_case_insensitive(const char *dir_name);
@@ -1613,19 +1522,128 @@ static void end_ssl();
** Code to end mysqld
****************************************************************************/
-static void close_connections(void)
+/* common callee of two shutdown phases */
+static void kill_thread(THD *thd)
+{
+ if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
+ mysql_mutex_lock(&thd->LOCK_thd_kill);
+ if (thd->mysys_var)
+ {
+ thd->mysys_var->abort= 1;
+ mysql_mutex_lock(&thd->mysys_var->mutex);
+ if (thd->mysys_var->current_cond)
+ {
+ for (uint i= 0; i < 2; i++)
+ {
+ int ret= mysql_mutex_trylock(thd->mysys_var->current_mutex);
+ mysql_cond_broadcast(thd->mysys_var->current_cond);
+ if (!ret)
+ {
+ /* Thread has surely got the signal, unlock and abort */
+ mysql_mutex_unlock(thd->mysys_var->current_mutex);
+ break;
+ }
+ sleep(1);
+ }
+ }
+ mysql_mutex_unlock(&thd->mysys_var->mutex);
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_kill);
+ if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data);
+}
+
+
+/**
+ First shutdown everything but slave threads and binlog dump connections
+*/
+static my_bool kill_thread_phase_1(THD *thd, void *)
+{
+ DBUG_PRINT("quit", ("Informing thread %ld that it's time to die",
+ (ulong) thd->thread_id));
+ if (thd->slave_thread || thd->is_binlog_dump_thread())
+ return 0;
+
+ if (DBUG_EVALUATE_IF("only_kill_system_threads", !thd->system_thread, 0))
+ return 0;
+
+ thd->set_killed(KILL_SERVER_HARD);
+ MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (thd));
+ kill_thread(thd);
+ return 0;
+}
+
+
+/**
+ Last shutdown binlog dump connections
+*/
+static my_bool kill_thread_phase_2(THD *thd, void *)
+{
+ if (shutdown_wait_for_slaves)
+ {
+ thd->set_killed(KILL_SERVER);
+ }
+ else
+ {
+ thd->set_killed(KILL_SERVER_HARD);
+ MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (thd));
+ }
+ kill_thread(thd);
+ return 0;
+}
+
+
+/* associated with the kill thread phase 1 */
+static my_bool warn_threads_active_after_phase_1(THD *thd, void *)
+{
+ if (!thd->is_binlog_dump_thread())
+ sql_print_warning("%s: Thread %llu (user : '%s') did not exit\n", my_progname,
+ (ulonglong) thd->thread_id,
+ (thd->main_security_ctx.user ?
+ thd->main_security_ctx.user : ""));
+ return 0;
+}
+
+
+/* associated with the kill thread phase 2 */
+static my_bool warn_threads_active_after_phase_2(THD *thd, void *)
+{
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ // dump thread may not have yet (or already) current_linfo set
+ sql_print_warning("Dump thread %llu last sent to server %lu "
+ "binlog file:pos %s:%llu",
+ thd->thread_id, thd->variables.server_id,
+ thd->current_linfo ?
+ my_basename(thd->current_linfo->log_file_name) : "NULL",
+ thd->current_linfo ? thd->current_linfo->pos : 0);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+ return 0;
+}
+
+
+/**
+ Kills main thread.
+
+ @note this function is responsible for setting abort_loop and breaking
+ poll() in main thread. Shutdown as such is supposed to be performed by main
+ thread itself.
+*/
+
+static void break_connect_loop()
{
#ifdef EXTRA_DEBUG
int count=0;
#endif
- DBUG_ENTER("close_connections");
- /* Clear thread cache */
- kill_cached_threads++;
- flush_thread_cache();
+ abort_loop= 1;
- /* kill connection thread */
-#if !defined(__WIN__)
+#if defined(__WIN__)
+ if (!SetEvent(hEventShutdown))
+ DBUG_PRINT("error", ("Got error: %ld from SetEvent", GetLastError()));
+#else
+ /* Avoid waiting for ourselves when thread-handling=no-threads. */
+ if (pthread_equal(pthread_self(), select_thread))
+ return;
DBUG_PRINT("quit", ("waiting for select thread: %lu",
(ulong)select_thread));
@@ -1646,7 +1664,7 @@ static void close_connections(void)
error= mysql_cond_timedwait(&COND_start_thread, &LOCK_start_thread,
&abstime);
if (error != EINTR)
- break;
+ break;
}
#ifdef EXTRA_DEBUG
if (error != 0 && error != ETIMEDOUT && !count++)
@@ -1656,7 +1674,58 @@ static void close_connections(void)
}
mysql_mutex_unlock(&LOCK_start_thread);
#endif /* __WIN__ */
+}
+
+
+/**
+ A wrapper around kill_main_thrad().
+
+ Sets shutdown user. This function may be called by multiple threads
+ concurrently, thus it performs safe update of shutdown_user
+ (first thread wins).
+*/
+void kill_mysql(THD *thd)
+{
+ char user_host_buff[MAX_USER_HOST_SIZE + 1];
+ char *user, *expected_shutdown_user= 0;
+
+ make_user_name(thd, user_host_buff);
+
+ if ((user= my_strdup(user_host_buff, MYF(0))) &&
+ !shutdown_user.compare_exchange_strong(expected_shutdown_user,
+ user,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed))
+ {
+ my_free(user);
+ }
+
+ DBUG_EXECUTE_IF("mysql_admin_shutdown_wait_for_slaves",
+ thd->lex->is_shutdown_wait_for_slaves= true;);
+ DBUG_EXECUTE_IF("simulate_delay_at_shutdown",
+ {
+ DBUG_ASSERT(binlog_dump_thread_count == 3);
+ const char act[]=
+ "now "
+ "SIGNAL greetings_from_kill_mysql";
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
+
+ if (thd->lex->is_shutdown_wait_for_slaves)
+ shutdown_wait_for_slaves= true;
+ break_connect_loop();
+}
+
+
+static void close_connections(void)
+{
+ DBUG_ENTER("close_connections");
+
+ /* Clear thread cache */
+ kill_cached_threads++;
+ flush_thread_cache();
/* Abort listening to new connections */
DBUG_PRINT("quit",("Closing sockets"));
@@ -1673,30 +1742,7 @@ static void close_connections(void)
extra_ip_sock= MYSQL_INVALID_SOCKET;
}
}
-#ifdef _WIN32
- if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe)
- {
- HANDLE temp;
- DBUG_PRINT("quit", ("Closing named pipes") );
-
- /* Create connection to the handle named pipe handler to break the loop */
- if ((temp = CreateFile(pipe_name,
- GENERIC_READ | GENERIC_WRITE,
- 0,
- NULL,
- OPEN_EXISTING,
- 0,
- NULL )) != INVALID_HANDLE_VALUE)
- {
- WaitNamedPipe(pipe_name, 1000);
- DWORD dwMode = PIPE_READMODE_BYTE | PIPE_WAIT;
- SetNamedPipeHandleState(temp, &dwMode, NULL, NULL);
- CancelIo(temp);
- DisconnectNamedPipe(temp);
- CloseHandle(temp);
- }
- }
-#endif
+
#ifdef HAVE_SYS_UN_H
if (mysql_socket_getfd(unix_sock) != INVALID_SOCKET)
{
@@ -1712,56 +1758,7 @@ static void close_connections(void)
This will give the threads some time to gracefully abort their
statements and inform their clients that the server is about to die.
*/
-
- THD *tmp;
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
-
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
- {
- DBUG_PRINT("quit",("Informing thread %ld that it's time to die",
- (ulong) tmp->thread_id));
- /* We skip slave threads on this first loop through. */
- if (tmp->slave_thread)
- continue;
-
- /* cannot use 'continue' inside DBUG_EXECUTE_IF()... */
- if (DBUG_EVALUATE_IF("only_kill_system_threads", !tmp->system_thread, 0))
- continue;
-
-#ifdef WITH_WSREP
- /* skip wsrep system threads as well */
- if (WSREP(tmp) && (tmp->wsrep_exec_mode==REPL_RECV || tmp->wsrep_applier))
- continue;
-#endif
- tmp->set_killed(KILL_SERVER_HARD);
- MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (tmp));
- mysql_mutex_lock(&tmp->LOCK_thd_kill);
- if (tmp->mysys_var)
- {
- tmp->mysys_var->abort=1;
- mysql_mutex_lock(&tmp->mysys_var->mutex);
- if (tmp->mysys_var->current_cond)
- {
- uint i;
- for (i=0; i < 2; i++)
- {
- int ret= mysql_mutex_trylock(tmp->mysys_var->current_mutex);
- mysql_cond_broadcast(tmp->mysys_var->current_cond);
- if (!ret)
- {
- /* Thread has surely got the signal, unlock and abort */
- mysql_mutex_unlock(tmp->mysys_var->current_mutex);
- break;
- }
- sleep(1);
- }
- }
- mysql_mutex_unlock(&tmp->mysys_var->mutex);
- }
- mysql_mutex_unlock(&tmp->LOCK_thd_kill);
- }
- mysql_mutex_unlock(&LOCK_thread_count); // For unlink from list
+ server_threads.iterate(kill_thread_phase_1);
Events::deinit();
slave_prepare_for_shutdown();
@@ -1782,85 +1779,40 @@ static void close_connections(void)
much smaller than even 2 seconds, this is only a safety fallback against
stuck threads so server shutdown is not held up forever.
*/
- DBUG_PRINT("info", ("thread_count: %d", thread_count));
+ DBUG_PRINT("info", ("thread_count: %u", uint32_t(thread_count)));
- for (int i= 0; *(volatile int32*) &thread_count && i < 1000; i++)
+ for (int i= 0; (thread_count - binlog_dump_thread_count) && i < 1000; i++)
my_sleep(20000);
- /*
- Force remaining threads to die by closing the connection to the client
- This will ensure that threads that are waiting for a command from the
- client on a blocking read call are aborted.
- */
-
- for (;;)
- {
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- if (!(tmp=threads.get()))
- {
- mysql_mutex_unlock(&LOCK_thread_count);
- break;
- }
-#ifndef __bsdi__ // Bug in BSDI kernel
- if (tmp->vio_ok())
- {
- if (global_system_variables.log_warnings)
- sql_print_warning(ER_DEFAULT(ER_FORCING_CLOSE),my_progname,
- (ulong) tmp->thread_id,
- (tmp->main_security_ctx.user ?
- tmp->main_security_ctx.user : ""));
- /*
- close_connection() might need a valid current_thd
- for memory allocation tracking.
- */
- THD* save_thd= current_thd;
- set_current_thd(tmp);
- close_connection(tmp);
- set_current_thd(save_thd);
- }
-#endif
+ if (global_system_variables.log_warnings)
+ server_threads.iterate(warn_threads_active_after_phase_1);
#ifdef WITH_WSREP
- /*
- * WSREP_TODO:
- * this code block may turn out redundant. wsrep->disconnect()
- * should terminate slave threads gracefully, and we don't need
- * to signal them here.
- * The code here makes sure mysqld will not hang during shutdown
- * even if wsrep provider has problems in shutting down.
- */
- if (WSREP(tmp) && tmp->wsrep_exec_mode==REPL_RECV)
- {
- sql_print_information("closing wsrep system thread");
- tmp->set_killed(KILL_CONNECTION);
- MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (tmp));
- if (tmp->mysys_var)
- {
- tmp->mysys_var->abort=1;
- mysql_mutex_lock(&tmp->mysys_var->mutex);
- if (tmp->mysys_var->current_cond)
- {
- mysql_mutex_lock(tmp->mysys_var->current_mutex);
- mysql_cond_broadcast(tmp->mysys_var->current_cond);
- mysql_mutex_unlock(tmp->mysys_var->current_mutex);
- }
- mysql_mutex_unlock(&tmp->mysys_var->mutex);
- }
- }
-#endif
- DBUG_PRINT("quit",("Unlocking LOCK_thread_count"));
- mysql_mutex_unlock(&LOCK_thread_count);
+ if (wsrep_inited == 1)
+ {
+ wsrep_deinit(true);
}
- end_slave();
+#endif
/* All threads has now been aborted */
- DBUG_PRINT("quit",("Waiting for threads to die (count=%u)",thread_count));
- mysql_mutex_lock(&LOCK_thread_count);
- while (thread_count || service_thread_count)
+ DBUG_PRINT("quit", ("Waiting for threads to die (count=%u)",
+ uint32_t(thread_count)));
+
+ while (thread_count - binlog_dump_thread_count)
+ my_sleep(1000);
+
+ /* Kill phase 2 */
+ server_threads.iterate(kill_thread_phase_2);
+ for (uint64 i= 0; thread_count; i++)
{
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- DBUG_PRINT("quit",("One thread died (count=%u)",thread_count));
+ /*
+ This time the warnings are emitted within the loop to provide a
+ dynamic view on the shutdown status through the errorlog.
+ */
+ if (global_system_variables.log_warnings > 2 && i % 60000 == 0)
+ server_threads.iterate(warn_threads_active_after_phase_2);
+ my_sleep(1000);
}
- mysql_mutex_unlock(&LOCK_thread_count);
+ /* End of kill phase 2 */
DBUG_PRINT("quit",("close_connections thread"));
DBUG_VOID_RETURN;
@@ -1902,187 +1854,6 @@ static void close_server_sock()
#endif /*EMBEDDED_LIBRARY*/
-/**
- Set shutdown user
-
- @note this function may be called by multiple threads concurrently, thus
- it performs safe update of shutdown_user (first thread wins).
-*/
-
-static volatile char *shutdown_user;
-static void set_shutdown_user(THD *thd)
-{
- char user_host_buff[MAX_USER_HOST_SIZE + 1];
- char *user, *expected_shutdown_user= 0;
-
- make_user_name(thd, user_host_buff);
-
- if ((user= my_strdup(user_host_buff, MYF(0))) &&
- !my_atomic_casptr((void **) &shutdown_user,
- (void **) &expected_shutdown_user, user))
- my_free(user);
-}
-
-
-void kill_mysql(THD *thd)
-{
- DBUG_ENTER("kill_mysql");
-
- if (thd)
- set_shutdown_user(thd);
-
-#if defined(SIGNALS_DONT_BREAK_READ) && !defined(EMBEDDED_LIBRARY)
- abort_loop=1; // Break connection loops
- close_server_sock(); // Force accept to wake up
-#endif
-
-#if defined(__WIN__)
-#if !defined(EMBEDDED_LIBRARY)
- {
- if (!SetEvent(hEventShutdown))
- {
- DBUG_PRINT("error",("Got error: %ld from SetEvent",GetLastError()));
- }
- /*
- or:
- HANDLE hEvent=OpenEvent(0, FALSE, "MySqlShutdown");
- SetEvent(hEventShutdown);
- CloseHandle(hEvent);
- */
- }
-#endif
-#elif defined(HAVE_PTHREAD_KILL)
- if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL))
- {
- DBUG_PRINT("error",("Got error %d from pthread_kill",errno)); /* purecov: inspected */
- }
-#elif !defined(SIGNALS_DONT_BREAK_READ)
- kill(current_pid, MYSQL_KILL_SIGNAL);
-#endif
- DBUG_PRINT("quit",("After pthread_kill"));
- shutdown_in_progress=1; // Safety if kill didn't work
-#ifdef SIGNALS_DONT_BREAK_READ
- if (!kill_in_progress)
- {
- pthread_t tmp;
- int error;
- abort_loop=1;
- if (unlikely((error= mysql_thread_create(0, /* Not instrumented */
- &tmp, &connection_attrib,
- kill_server_thread, (void*) 0))))
- sql_print_error("Can't create thread to kill server (errno= %d).",
- error);
- }
-#endif
- DBUG_VOID_RETURN;
-}
-
-/**
- Force server down. Kill all connections and threads and exit.
-
- @param sig_ptr Signal number that caused kill_server to be called.
-
- @note
- A signal number of 0 mean that the function was not called
- from a signal handler and there is thus no signal to block
- or stop, we just want to kill the server.
-*/
-
-#if !defined(__WIN__)
-static void *kill_server(void *sig_ptr)
-#define RETURN_FROM_KILL_SERVER return 0
-#else
-static void __cdecl kill_server(int sig_ptr)
-#define RETURN_FROM_KILL_SERVER return
-#endif
-{
- DBUG_ENTER("kill_server");
-#ifndef EMBEDDED_LIBRARY
- int sig=(int) (long) sig_ptr; // This is passed a int
- // if there is a signal during the kill in progress, ignore the other
- if (kill_in_progress) // Safety
- {
- DBUG_LEAVE;
- RETURN_FROM_KILL_SERVER;
- }
- kill_in_progress=TRUE;
- abort_loop=1; // This should be set
- if (sig != 0) // 0 is not a valid signal number
- my_sigset(sig, SIG_IGN); /* purify inspected */
- if (sig == MYSQL_KILL_SIGNAL || sig == 0)
- {
- char *user= (char *) my_atomic_loadptr((void**) &shutdown_user);
- sql_print_information(ER_DEFAULT(ER_NORMAL_SHUTDOWN), my_progname,
- user ? user : "unknown");
- if (user)
- my_free(user);
- }
- else
- sql_print_error(ER_DEFAULT(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */
-
-#ifdef HAVE_SMEM
- /*
- Send event to smem_event_connect_request for aborting
- */
- if (opt_enable_shared_memory)
- {
- if (!SetEvent(smem_event_connect_request))
- {
- DBUG_PRINT("error",
- ("Got error: %ld from SetEvent of smem_event_connect_request",
- GetLastError()));
- }
- }
-#endif
-
- /* Stop wsrep threads in case they are running. */
- if (wsrep_running_threads > 0)
- {
- wsrep_stop_replication(NULL);
- }
-
- close_connections();
-
- if (wsrep_inited == 1)
- wsrep_deinit(true);
-
- if (sig != MYSQL_KILL_SIGNAL &&
- sig != 0)
- unireg_abort(1); /* purecov: inspected */
- else
- unireg_end();
-
- /* purecov: begin deadcode */
- DBUG_LEAVE; // Must match DBUG_ENTER()
- my_thread_end();
- pthread_exit(0);
- /* purecov: end */
-
- RETURN_FROM_KILL_SERVER; // Avoid compiler warnings
-
-#else /* EMBEDDED_LIBRARY*/
-
- DBUG_LEAVE;
- RETURN_FROM_KILL_SERVER;
-
-#endif /* EMBEDDED_LIBRARY */
-}
-
-
-#if defined(USE_ONE_SIGNAL_HAND)
-pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
-{
- my_thread_init(); // Initialize new thread
- kill_server(0);
- /* purecov: begin deadcode */
- my_thread_end();
- pthread_exit(0);
- return 0;
- /* purecov: end */
-}
-#endif
-
-
extern "C" sig_handler print_signal_warning(int sig)
{
if (global_system_variables.log_warnings)
@@ -2098,42 +1869,6 @@ extern "C" sig_handler print_signal_warning(int sig)
}
#ifndef EMBEDDED_LIBRARY
-
-static void init_error_log_mutex()
-{
- mysql_mutex_init(key_LOCK_error_log, &LOCK_error_log, MY_MUTEX_INIT_FAST);
-}
-
-
-static void clean_up_error_log_mutex()
-{
- mysql_mutex_destroy(&LOCK_error_log);
-}
-
-
-/**
- cleanup all memory and end program nicely.
-
- If SIGNALS_DONT_BREAK_READ is defined, this function is called
- by the main thread. To get MySQL to shut down nicely in this case
- (Mac OS X) we have to call exit() instead if pthread_exit().
-
- @note
- This function never returns.
-*/
-void unireg_end(void)
-{
- clean_up(1);
- my_thread_end();
- sd_notify(0, "STATUS=MariaDB server is down");
-#if defined(SIGNALS_DONT_BREAK_READ)
- exit(0);
-#else
- pthread_exit(0); // Exit is in main thread
-#endif
-}
-
-
extern "C" void unireg_abort(int exit_code)
{
DBUG_ENTER("unireg_abort");
@@ -2141,29 +1876,32 @@ extern "C" void unireg_abort(int exit_code)
if (opt_help)
usage();
if (exit_code)
- sql_print_error("Aborting\n");
+ sql_print_error("Aborting");
/* Don't write more notes to the log to not hide error message */
disable_log_notes= 1;
#ifdef WITH_WSREP
- /* Check if wsrep class is used. If yes, then cleanup wsrep */
- if (wsrep)
+ if (WSREP_ON &&
+ Wsrep_server_state::instance().state() != wsrep::server_state::s_disconnected)
{
/*
This is an abort situation, we cannot expect to gracefully close all
wsrep threads here, we can only diconnect from service
*/
wsrep_close_client_connections(FALSE);
- shutdown_in_progress= 1;
- wsrep->disconnect(wsrep);
+ Wsrep_server_state::instance().disconnect();
WSREP_INFO("Service disconnected.");
wsrep_close_threads(NULL); /* this won't close all threads */
sleep(1); /* so give some time to exit for those which can */
WSREP_INFO("Some threads may fail to exit.");
-
+ }
+ if (WSREP_ON)
+ {
/* In bootstrap mode we deinitialize wsrep here. */
- if (opt_bootstrap && wsrep_inited)
- wsrep_deinit(true);
+ if (opt_bootstrap || wsrep_recovery)
+ {
+ if (wsrep_inited) wsrep_deinit(true);
+ }
}
#endif // WITH_WSREP
@@ -2191,9 +1929,11 @@ static void mysqld_exit(int exit_code)
rpl_deinit_gtid_waiting();
rpl_deinit_gtid_slave_state();
wait_for_signal_thread_to_end();
+#ifdef WITH_WSREP
+ wsrep_deinit_server();
+#endif /* WITH_WSREP */
mysql_audit_finalize();
clean_up_mutexes();
- clean_up_error_log_mutex();
my_end((opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0));
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
shutdown_performance_schema(); // we do it as late as possible
@@ -2218,7 +1958,7 @@ static void mysqld_exit(int exit_code)
#endif /* !EMBEDDED_LIBRARY */
-void clean_up(bool print_message)
+static void clean_up(bool print_message)
{
DBUG_PRINT("exit",("clean_up"));
if (cleanup_done++)
@@ -2285,9 +2025,6 @@ void clean_up(bool print_message)
free_global_index_stats();
delete_dynamic(&all_options); // This should be empty
free_all_rpl_filters();
-#ifdef HAVE_REPLICATION
- end_slave_list();
-#endif
wsrep_thr_deinit();
my_uuid_end();
delete type_handler_data;
@@ -2317,16 +2054,6 @@ void clean_up(bool print_message)
sys_var_end();
free_charsets();
- /*
- Signal mysqld_main() that it can exit
- do the broadcast inside the lock to ensure that my_end() is not called
- during broadcast()
- */
- mysql_mutex_lock(&LOCK_thread_count);
- ready_to_exit=1;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
-
my_free(const_cast<char*>(log_bin_basename));
my_free(const_cast<char*>(log_bin_index));
#ifndef EMBEDDED_LIBRARY
@@ -2369,12 +2096,12 @@ static void wait_for_signal_thread_to_end()
static void clean_up_mutexes()
{
DBUG_ENTER("clean_up_mutexes");
+ server_threads.destroy();
mysql_rwlock_destroy(&LOCK_grant);
- mysql_mutex_destroy(&LOCK_thread_count);
mysql_mutex_destroy(&LOCK_thread_cache);
mysql_mutex_destroy(&LOCK_start_thread);
mysql_mutex_destroy(&LOCK_status);
- mysql_mutex_destroy(&LOCK_show_status);
+ mysql_rwlock_destroy(&LOCK_all_status_vars);
mysql_mutex_destroy(&LOCK_delayed_insert);
mysql_mutex_destroy(&LOCK_delayed_status);
mysql_mutex_destroy(&LOCK_delayed_create);
@@ -2398,6 +2125,7 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_rpl_status);
#endif /* HAVE_REPLICATION */
mysql_mutex_destroy(&LOCK_active_mi);
+ mysql_rwlock_destroy(&LOCK_ssl_refresh);
mysql_rwlock_destroy(&LOCK_sys_init_connect);
mysql_rwlock_destroy(&LOCK_sys_init_slave);
mysql_mutex_destroy(&LOCK_global_system_variables);
@@ -2405,7 +2133,6 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_short_uuid_generator);
mysql_mutex_destroy(&LOCK_prepared_stmt_count);
mysql_mutex_destroy(&LOCK_error_messages);
- mysql_cond_destroy(&COND_thread_count);
mysql_cond_destroy(&COND_thread_cache);
mysql_cond_destroy(&COND_start_thread);
mysql_cond_destroy(&COND_flush_thread_cache);
@@ -2417,6 +2144,9 @@ static void clean_up_mutexes()
mysql_mutex_destroy(&LOCK_commit_ordered);
mysql_mutex_destroy(&LOCK_slave_background);
mysql_cond_destroy(&COND_slave_background);
+#ifndef EMBEDDED_LIBRARY
+ mysql_mutex_destroy(&LOCK_error_log);
+#endif
DBUG_VOID_RETURN;
}
@@ -2426,9 +2156,6 @@ static void clean_up_mutexes()
****************************************************************************/
#ifdef EMBEDDED_LIBRARY
-static void set_ports()
-{
-}
void close_connection(THD *thd, uint sql_errno)
{
}
@@ -2625,6 +2352,7 @@ static MYSQL_SOCKET activate_tcp_port(uint port)
}
else
{
+ server_socket_ai_family= a->ai_family;
sql_print_information("Server socket created on IP: '%s'.",
(const char *) ip_addr);
break;
@@ -2751,53 +2479,16 @@ static void network_init(void)
extra_ip_sock= activate_tcp_port(mysqld_extra_port);
}
-#ifdef _WIN32
- /* create named pipe */
- if (mysqld_unix_port[0] && !opt_bootstrap &&
- opt_enable_named_pipe)
- {
-
- strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\.\\pipe\\",
- mysqld_unix_port, NullS);
- /*
- Create a security descriptor for pipe.
- - Use low integrity level, so that it is possible to connect
- from any process.
- - Give Everyone read/write access to pipe.
- */
- if (!ConvertStringSecurityDescriptorToSecurityDescriptor(
- "S:(ML;; NW;;; LW) D:(A;; FRFW;;; WD)",
- SDDL_REVISION_1, &saPipeSecurity.lpSecurityDescriptor, NULL))
- {
- sql_perror("Can't start server : Initialize security descriptor");
- unireg_abort(1);
- }
- saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES);
- saPipeSecurity.bInheritHandle = FALSE;
- if ((hPipe= CreateNamedPipe(pipe_name,
- PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
- PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
- PIPE_UNLIMITED_INSTANCES,
- (int) global_system_variables.net_buffer_length,
- (int) global_system_variables.net_buffer_length,
- NMPWAIT_USE_DEFAULT_WAIT,
- &saPipeSecurity)) == INVALID_HANDLE_VALUE)
- {
- sql_perror("Create named pipe failed");
- unireg_abort(1);
- }
- }
-#endif
-
#if defined(HAVE_SYS_UN_H)
/*
** Create the UNIX socket
*/
if (mysqld_unix_port[0] && !opt_bootstrap)
{
+ size_t port_len;
DBUG_PRINT("general",("UNIX Socket is %s",mysqld_unix_port));
- if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
+ if ((port_len= strlen(mysqld_unix_port)) > sizeof(UNIXaddr.sun_path) - 1)
{
sql_print_error("The socket file path is too long (> %u): %s",
(uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
@@ -2815,14 +2506,26 @@ static void network_init(void)
bzero((char*) &UNIXaddr, sizeof(UNIXaddr));
UNIXaddr.sun_family = AF_UNIX;
strmov(UNIXaddr.sun_path, mysqld_unix_port);
- (void) unlink(mysqld_unix_port);
+#if defined(__linux__)
+ /* Abstract socket */
+ if (mysqld_unix_port[0] == '@')
+ {
+ UNIXaddr.sun_path[0]= '\0';
+ port_len+= offsetof(struct sockaddr_un, sun_path);
+ }
+ else
+#endif
+ {
+ (void) unlink(mysqld_unix_port);
+ port_len= sizeof(UNIXaddr);
+ }
arg= 1;
(void) mysql_socket_setsockopt(unix_sock,SOL_SOCKET,SO_REUSEADDR,
(char*)&arg, sizeof(arg));
umask(0);
if (mysql_socket_bind(unix_sock,
reinterpret_cast<struct sockaddr *>(&UNIXaddr),
- sizeof(UNIXaddr)) < 0)
+ port_len) < 0)
{
sql_perror("Can't start server : Bind on unix socket"); /* purecov: tested */
sql_print_error("Do you already have another mysqld server running on socket: %s ?",mysqld_unix_port);
@@ -2876,7 +2579,6 @@ void close_connection(THD *thd, uint sql_errno)
mysql_audit_notify_connection_disconnect(thd, sql_errno);
DBUG_VOID_RETURN;
}
-#endif /* EMBEDDED_LIBRARY */
/** Called when mysqld is aborted with ^C */
@@ -2884,11 +2586,12 @@ void close_connection(THD *thd, uint sql_errno)
extern "C" sig_handler end_mysqld_signal(int sig __attribute__((unused)))
{
DBUG_ENTER("end_mysqld_signal");
- /* Don't call kill_mysql() if signal thread is not running */
+ /* Don't kill if signal thread is not running */
if (signal_thread_in_use)
- kill_mysql(); // Take down mysqld nicely
+ break_connect_loop(); // Take down mysqld nicely
DBUG_VOID_RETURN; /* purecov: deadcode */
}
+#endif /* EMBEDDED_LIBRARY */
/*
Decrease number of connections
@@ -2906,30 +2609,6 @@ void dec_connection_count(scheduler_functions *scheduler)
/*
- Send a signal to unblock close_conneciton() if there is no more
- threads running with a THD attached
-
- It's safe to check for thread_count and service_thread_count outside
- of a mutex as we are only interested to see if they where decremented
- to 0 by a previous unlink_thd() call.
-
- We should only signal COND_thread_count if both variables are 0,
- false positives are ok.
-*/
-
-void signal_thd_deleted()
-{
- if (!thread_count && !service_thread_count)
- {
- /* Signal close_connections() that all THD's are freed */
- mysql_mutex_lock(&LOCK_thread_count);
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
- }
-}
-
-
-/*
Unlink thd from global list of available connections
SYNOPSIS
@@ -2944,14 +2623,16 @@ void unlink_thd(THD *thd)
thd->cleanup();
thd->add_status_to_global();
- unlink_not_visible_thd(thd);
+ server_threads.erase(thd);
+#ifdef WITH_WSREP
/*
Do not decrement when its wsrep system thread. wsrep_applier is set for
applier as well as rollbacker threads.
*/
- if (IF_WSREP(!thd->wsrep_applier, 1))
- dec_connection_count(thd->scheduler);
+ if (!thd->wsrep_applier)
+#endif /* WITH_WSREP */
+ dec_connection_count(thd->scheduler);
thd->free_connection();
@@ -3053,7 +2734,7 @@ static bool cache_thread(THD *thd)
thd->thr_create_utime= microsecond_interval_timer();
thd->start_utime= thd->thr_create_utime;
- add_to_active_threads(thd);
+ server_threads.insert(thd);
DBUG_RETURN(1);
}
}
@@ -3155,7 +2836,7 @@ static BOOL WINAPI console_event_handler( DWORD type )
*/
#ifndef EMBEDDED_LIBRARY
if(hEventShutdown)
- kill_mysql();
+ break_connect_loop();
else
#endif
sql_print_warning("CTRL-C ignored during startup");
@@ -3494,6 +3175,18 @@ static void start_signal_handler(void)
}
+#if defined(USE_ONE_SIGNAL_HAND)
+pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
+{
+ my_thread_init(); // Initialize new thread
+ break_connect_loop();
+ my_thread_end();
+ pthread_exit(0);
+ return 0;
+}
+#endif
+
+
/** This threads handles all signals and alarms. */
/* ARGSUSED */
pthread_handler_t signal_hand(void *arg __attribute__((unused)))
@@ -3547,14 +3240,10 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
(void) pthread_sigmask(SIG_BLOCK,&set,NULL);
for (;;)
{
- int error; // Used when debugging
- if (shutdown_in_progress && !abort_loop)
- {
- sig= SIGTERM;
- error=0;
- }
- else
- while ((error=my_sigwait(&set,&sig)) == EINTR) ;
+ int error;
+ int origin;
+
+ while ((error= my_sigwait(&set, &sig, &origin)) == EINTR) /* no-op */;
if (cleanup_done)
{
DBUG_PRINT("quit",("signal_handler: calling my_thread_end()"));
@@ -3577,7 +3266,6 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
DBUG_PRINT("info",("Got signal: %d abort_loop: %d",sig,abort_loop));
if (!abort_loop)
{
- abort_loop=1; // mark abort for threads
/* Delete the instrumentation for the signal thread */
PSI_CALL_delete_current_thread();
#ifdef USE_ONE_SIGNAL_HAND
@@ -3589,12 +3277,13 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
sql_print_error("Can't create thread to kill server (errno= %d)",
error);
#else
- kill_server((void*) sig); // MIT THREAD has a alarm thread
+ my_sigset(sig, SIG_IGN);
+ break_connect_loop(); // MIT THREAD has a alarm thread
#endif
}
break;
case SIGHUP:
- if (!abort_loop)
+ if (!abort_loop && origin != SI_KERNEL)
{
int not_used;
mysql_print_status(); // Print some debug info
@@ -3603,21 +3292,14 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
REFRESH_GRANT |
REFRESH_THREADS | REFRESH_HOSTS),
(TABLE_LIST*) 0, &not_used); // Flush logs
- }
- /* reenable logs after the options were reloaded */
- if (log_output_options & LOG_NONE)
- {
- logger.set_handlers(LOG_FILE,
- global_system_variables.sql_log_slow ?
- LOG_TABLE : LOG_NONE,
- opt_log ? LOG_TABLE : LOG_NONE);
- }
- else
- {
- logger.set_handlers(LOG_FILE,
- global_system_variables.sql_log_slow ?
- log_output_options : LOG_NONE,
- opt_log ? log_output_options : LOG_NONE);
+
+ /* reenable logs after the options were reloaded */
+ ulonglong fixed_log_output_options=
+ log_output_options & LOG_NONE ? LOG_TABLE : log_output_options;
+
+ logger.set_handlers(LOG_FILE, global_system_variables.sql_log_slow
+ ? fixed_log_output_options : LOG_NONE,
+ opt_log ? fixed_log_output_options : LOG_NONE);
}
break;
#ifdef USE_ONE_SIGNAL_HAND
@@ -3651,7 +3333,7 @@ extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
void my_message_sql(uint error, const char *str, myf MyFlags)
{
- THD *thd= current_thd;
+ THD *thd= MyFlags & ME_ERROR_LOG_ONLY ? NULL : current_thd;
Sql_condition::enum_warning_level level;
sql_print_message_func func;
DBUG_ENTER("my_message_sql");
@@ -3660,13 +3342,15 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
DBUG_ASSERT(str != NULL);
DBUG_ASSERT(error != 0);
+ DBUG_ASSERT((MyFlags & ~(ME_BELL | ME_ERROR_LOG | ME_ERROR_LOG_ONLY |
+ ME_NOTE | ME_WARNING | ME_FATAL)) == 0);
- if (MyFlags & ME_JUST_INFO)
+ if (MyFlags & ME_NOTE)
{
level= Sql_condition::WARN_LEVEL_NOTE;
func= sql_print_information;
}
- else if (MyFlags & ME_JUST_WARNING)
+ else if (MyFlags & ME_WARNING)
{
level= Sql_condition::WARN_LEVEL_WARN;
func= sql_print_warning;
@@ -3679,7 +3363,7 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
if (likely(thd))
{
- if (unlikely(MyFlags & ME_FATALERROR))
+ if (unlikely(MyFlags & ME_FATAL))
thd->is_fatal_error= 1;
(void) thd->raise_condition(error, NULL, level, str);
}
@@ -3689,7 +3373,7 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
/* When simulating OOM, skip writing to error log to avoid mtr errors */
DBUG_EXECUTE_IF("simulate_out_of_memory", DBUG_VOID_RETURN;);
- if (unlikely(!thd) || thd->log_all_errors || (MyFlags & ME_NOREFRESH))
+ if (unlikely(!thd) || thd->log_all_errors || (MyFlags & ME_ERROR_LOG))
(*func)("%s: %s", my_progname_short, str); /* purecov: inspected */
DBUG_VOID_RETURN;
}
@@ -3703,23 +3387,6 @@ void *my_str_malloc_mysqld(size_t size)
}
-#ifdef __WIN__
-
-pthread_handler_t handle_shutdown(void *arg)
-{
- MSG msg;
- my_thread_init();
-
- /* this call should create the message queue for this thread */
- PeekMessage(&msg, NULL, 1, 65534,PM_NOREMOVE);
-#if !defined(EMBEDDED_LIBRARY)
- if (WaitForSingleObject(hEventShutdown,INFINITE)==WAIT_OBJECT_0)
-#endif /* EMBEDDED_LIBRARY */
- kill_server(MYSQL_KILL_SIGNAL);
- return 0;
-}
-#endif
-
#include <mysqld_default_groups.h>
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
@@ -3827,6 +3494,8 @@ SHOW_VAR com_status_vars[]= {
{"alter_user", STMT_STATUS(SQLCOM_ALTER_USER)},
{"analyze", STMT_STATUS(SQLCOM_ANALYZE)},
{"assign_to_keycache", STMT_STATUS(SQLCOM_ASSIGN_TO_KEYCACHE)},
+ {"backup", STMT_STATUS(SQLCOM_BACKUP)},
+ {"backup_lock", STMT_STATUS(SQLCOM_BACKUP_LOCK)},
{"begin", STMT_STATUS(SQLCOM_BEGIN)},
{"binlog", STMT_STATUS(SQLCOM_BINLOG_BASE64_EVENT)},
{"call_procedure", STMT_STATUS(SQLCOM_CALL)},
@@ -4122,8 +3791,27 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
else
update_global_memory_status(size);
}
+
+int json_escape_string(const char *str,const char *str_end,
+ char *json, char *json_end)
+{
+ return json_escape(system_charset_info,
+ (const uchar *) str, (const uchar *) str_end,
+ &my_charset_utf8mb4_bin,
+ (uchar *) json, (uchar *) json_end);
+}
+
+
+int json_unescape_json(const char *json_str, const char *json_end,
+ char *res, char *res_end)
+{
+ return json_unescape(&my_charset_utf8mb4_bin,
+ (const uchar *) json_str, (const uchar *) json_end,
+ system_charset_info, (uchar *) res, (uchar *) res_end);
}
+} /*extern "C"*/
+
/**
Create a replication file name or base for file names.
@@ -4786,11 +4474,10 @@ static int init_common_variables()
static int init_thread_environment()
{
DBUG_ENTER("init_thread_environment");
- mysql_mutex_init(key_LOCK_thread_count, &LOCK_thread_count, MY_MUTEX_INIT_FAST);
+ server_threads.init();
mysql_mutex_init(key_LOCK_thread_cache, &LOCK_thread_cache, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_start_thread, &LOCK_start_thread, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_status, &LOCK_status, MY_MUTEX_INIT_FAST);
- mysql_mutex_init(key_LOCK_show_status, &LOCK_show_status, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_LOCK_delayed_insert,
&LOCK_delayed_insert, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_delayed_status,
@@ -4803,7 +4490,6 @@ static int init_thread_environment()
mysql_mutex_init(key_LOCK_global_system_variables,
&LOCK_global_system_variables, MY_MUTEX_INIT_FAST);
mysql_mutex_record_order(&LOCK_active_mi, &LOCK_global_system_variables);
- mysql_mutex_record_order(&LOCK_status, &LOCK_thread_count);
mysql_prlock_init(key_rwlock_LOCK_system_variables_hash,
&LOCK_system_variables_hash);
mysql_mutex_init(key_LOCK_prepared_stmt_count,
@@ -4850,8 +4536,9 @@ static int init_thread_environment()
#endif /* HAVE_OPENSSL */
mysql_rwlock_init(key_rwlock_LOCK_sys_init_connect, &LOCK_sys_init_connect);
mysql_rwlock_init(key_rwlock_LOCK_sys_init_slave, &LOCK_sys_init_slave);
+ mysql_rwlock_init(key_rwlock_LOCK_ssl_refresh, &LOCK_ssl_refresh);
mysql_rwlock_init(key_rwlock_LOCK_grant, &LOCK_grant);
- mysql_cond_init(key_COND_thread_count, &COND_thread_count, NULL);
+ mysql_rwlock_init(key_rwlock_LOCK_all_status_vars, &LOCK_all_status_vars);
mysql_cond_init(key_COND_thread_cache, &COND_thread_cache, NULL);
mysql_cond_init(key_COND_start_thread, &COND_start_thread, NULL);
mysql_cond_init(key_COND_flush_thread_cache, &COND_flush_thread_cache, NULL);
@@ -4943,6 +4630,60 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file,
}
#endif /* HAVE_OPENSSL10 */
+
+struct SSL_ACCEPTOR_STATS
+{
+ long accept;
+ long accept_good;
+ long cache_size;
+ long verify_mode;
+ long verify_depth;
+ long zero;
+ const char *session_cache_mode;
+
+ SSL_ACCEPTOR_STATS():
+ accept(),accept_good(),cache_size(),verify_mode(),verify_depth(),zero(),
+ session_cache_mode("NONE")
+ {
+ }
+
+ void init()
+ {
+ DBUG_ASSERT(ssl_acceptor_fd !=0 && ssl_acceptor_fd->ssl_context != 0);
+ SSL_CTX *ctx= ssl_acceptor_fd->ssl_context;
+ accept= 0;
+ accept_good= 0;
+ verify_mode= SSL_CTX_get_verify_mode(ctx);
+ verify_depth= SSL_CTX_get_verify_depth(ctx);
+ cache_size= SSL_CTX_sess_get_cache_size(ctx);
+ switch (SSL_CTX_get_session_cache_mode(ctx))
+ {
+ case SSL_SESS_CACHE_OFF:
+ session_cache_mode= "OFF"; break;
+ case SSL_SESS_CACHE_CLIENT:
+ session_cache_mode= "CLIENT"; break;
+ case SSL_SESS_CACHE_SERVER:
+ session_cache_mode= "SERVER"; break;
+ case SSL_SESS_CACHE_BOTH:
+ session_cache_mode= "BOTH"; break;
+ case SSL_SESS_CACHE_NO_AUTO_CLEAR:
+ session_cache_mode= "NO_AUTO_CLEAR"; break;
+ case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP:
+ session_cache_mode= "NO_INTERNAL_LOOKUP"; break;
+ default:
+ session_cache_mode= "Unknown"; break;
+ }
+ }
+};
+
+static SSL_ACCEPTOR_STATS ssl_acceptor_stats;
+void ssl_acceptor_stats_update(int sslaccept_ret)
+{
+ statistic_increment(ssl_acceptor_stats.accept, &LOCK_status);
+ if (!sslaccept_ret)
+ statistic_increment(ssl_acceptor_stats.accept_good,&LOCK_status);
+}
+
static void init_ssl()
{
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
@@ -4963,6 +4704,9 @@ static void init_ssl()
opt_use_ssl = 0;
have_ssl= SHOW_OPTION_DISABLED;
}
+ else
+ ssl_acceptor_stats.init();
+
if (global_system_variables.log_warnings > 0)
{
ulong err;
@@ -4981,6 +4725,34 @@ static void init_ssl()
#endif /* HAVE_OPENSSL && ! EMBEDDED_LIBRARY */
}
+/* Reinitialize SSL (FLUSH SSL) */
+int reinit_ssl()
+{
+#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
+ if (!opt_use_ssl)
+ return 0;
+
+ enum enum_ssl_init_error error = SSL_INITERR_NOERROR;
+ st_VioSSLFd *new_fd = new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert,
+ opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher, &error, opt_ssl_crl, opt_ssl_crlpath);
+
+ if (!new_fd)
+ {
+ my_printf_error(ER_UNKNOWN_ERROR, "Failed to refresh SSL, error: %s", MYF(0),
+ sslGetErrString(error));
+#ifndef HAVE_YASSL
+ ERR_clear_error();
+#endif
+ return 1;
+ }
+ mysql_rwlock_wrlock(&LOCK_ssl_refresh);
+ free_vio_ssl_acceptor_fd(ssl_acceptor_fd);
+ ssl_acceptor_fd= new_fd;
+ ssl_acceptor_stats.init();
+ mysql_rwlock_unlock(&LOCK_ssl_refresh);
+#endif
+ return 0;
+}
static void end_ssl()
{
@@ -5111,6 +4883,7 @@ static int init_server_components()
my_rnd_init(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2);
setup_fpu();
init_thr_lock();
+ backup_init();
#ifndef EMBEDDED_LIBRARY
if (init_thr_timer(thread_scheduler->max_threads + extra_max_connections))
@@ -5121,9 +4894,6 @@ static int init_server_components()
#endif
my_uuid_init((ulong) (my_rnd(&sql_rand))*12345,12345);
-#ifdef HAVE_REPLICATION
- init_slave_list();
-#endif
wt_init();
/* Setup logs */
@@ -5308,7 +5078,9 @@ static int init_server_components()
wsrep_thr_init();
#endif
- if (WSREP_ON && !wsrep_recovery && !opt_abort) /* WSREP BEFORE SE */
+#ifdef WITH_WSREP
+ if (wsrep_init_server()) unireg_abort(1);
+ if (WSREP_ON && !wsrep_recovery && !opt_abort)
{
if (opt_bootstrap) // bootsrap option given - disable wsrep functionality
{
@@ -5341,6 +5113,7 @@ static int init_server_components()
}
}
}
+#endif /* WITH_WSREP */
if (opt_bin_log)
{
@@ -5635,100 +5408,17 @@ static int init_server_components()
#ifndef EMBEDDED_LIBRARY
-
-static void create_shutdown_thread()
+#ifdef _WIN32
+static void create_shutdown_event()
{
-#ifdef __WIN__
hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name);
- pthread_t hThread;
- int error;
- if (unlikely((error= mysql_thread_create(key_thread_handle_shutdown,
- &hThread, &connection_attrib,
- handle_shutdown, 0))))
- sql_print_warning("Can't create thread to handle shutdown requests"
- " (errno= %d)", error);
-
// On "Stop Service" we have to do regular shutdown
Service.SetShutdownEvent(hEventShutdown);
-#endif /* __WIN__ */
}
-
-#endif /* EMBEDDED_LIBRARY */
-
-#if (defined(_WIN32) || defined(HAVE_SMEM)) && !defined(EMBEDDED_LIBRARY)
-static void handle_connections_methods()
-{
- pthread_t hThread;
- int error;
- DBUG_ENTER("handle_connections_methods");
- if (hPipe == INVALID_HANDLE_VALUE &&
- (!have_tcpip || opt_disable_networking) &&
- !opt_enable_shared_memory)
- {
- sql_print_error("TCP/IP, --shared-memory, or --named-pipe should be configured on NT OS");
- unireg_abort(1); // Will not return
- }
-
- mysql_mutex_lock(&LOCK_start_thread);
- mysql_cond_init(key_COND_handler_count, &COND_handler_count, NULL);
- handler_count=0;
- if (hPipe != INVALID_HANDLE_VALUE)
- {
- handler_count++;
- if ((error= mysql_thread_create(key_thread_handle_con_namedpipes,
- &hThread, &connection_attrib,
- handle_connections_namedpipes, 0)))
- {
- sql_print_warning("Can't create thread to handle named pipes"
- " (errno= %d)", error);
- handler_count--;
- }
- }
- if (have_tcpip && !opt_disable_networking)
- {
- handler_count++;
- if ((error= mysql_thread_create(key_thread_handle_con_sockets,
- &hThread, &connection_attrib,
- handle_connections_sockets_thread, 0)))
- {
- sql_print_warning("Can't create thread to handle TCP/IP",
- " (errno= %d)", error);
- handler_count--;
- }
- }
-#ifdef HAVE_SMEM
- if (opt_enable_shared_memory)
- {
- handler_count++;
- if ((error= mysql_thread_create(key_thread_handle_con_sharedmem,
- &hThread, &connection_attrib,
- handle_connections_shared_memory, 0)))
- {
- sql_print_warning("Can't create thread to handle shared memory",
- " (errno= %d)", error);
- handler_count--;
- }
- }
+#else /*_WIN32*/
+#define create_shutdown_event()
#endif
-
- while (handler_count > 0)
- mysql_cond_wait(&COND_handler_count, &LOCK_start_thread);
- mysql_mutex_unlock(&LOCK_start_thread);
- DBUG_VOID_RETURN;
-}
-
-void decrement_handler_count()
-{
- mysql_mutex_lock(&LOCK_start_thread);
- if (--handler_count == 0)
- mysql_cond_signal(&COND_handler_count);
- mysql_mutex_unlock(&LOCK_start_thread);
- my_thread_end();
-}
-#else
-#define decrement_handler_count()
-#endif /* defined(_WIN32) || defined(HAVE_SMEM) */
-
+#endif /* EMBEDDED_LIBRARY */
#ifndef EMBEDDED_LIBRARY
@@ -5908,7 +5598,7 @@ int mysqld_main(int argc, char **argv)
}
#endif /* HAVE_PSI_INTERFACE */
- init_error_log_mutex();
+ mysql_mutex_init(key_LOCK_error_log, &LOCK_error_log, MY_MUTEX_INIT_FAST);
/* Initialize audit interface globals. Audit plugins are inited later. */
mysql_audit_initialize();
@@ -5995,8 +5685,7 @@ int mysqld_main(int argc, char **argv)
set_user(mysqld_user, user_info);
}
- if (WSREP_ON && wsrep_check_opts())
- global_system_variables.wsrep_on= 0;
+ if (WSREP_ON && wsrep_check_opts()) unireg_abort(1);
/*
The subsequent calls may take a long time : e.g. innodb log read.
@@ -6046,18 +5735,7 @@ int mysqld_main(int argc, char **argv)
if (mysql_rm_tmp_tables() || acl_init(opt_noacl) ||
my_tz_init((THD *)0, default_tz_name, opt_bootstrap))
- {
- abort_loop=1;
- select_thread_in_use=0;
-
- (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL);
-
- delete_pid_file(MYF(MY_WME));
-
- if (mysql_socket_getfd(unix_sock) != INVALID_SOCKET)
- unlink(mysqld_unix_port);
- exit(1);
- }
+ unireg_abort(1);
if (!opt_noacl)
(void) grant_init();
@@ -6096,24 +5774,11 @@ int mysqld_main(int argc, char **argv)
}
else
{
- wsrep_SE_initialized();
-
- if (wsrep_before_SE())
- {
- /*! in case of no SST wsrep waits in view handler callback */
- wsrep_SE_init_grab();
- wsrep_SE_init_done();
- /*! in case of SST wsrep waits for wsrep->sst_received */
- if (wsrep_sst_continue())
- {
- WSREP_ERROR("Failed to signal the wsrep provider to continue.");
- }
- }
- else
+ wsrep_init_globals();
+ if (!wsrep_before_SE())
{
wsrep_init_startup (false);
}
-
wsrep_create_appliers(wsrep_slave_threads - 1);
}
}
@@ -6121,9 +5786,9 @@ int mysqld_main(int argc, char **argv)
if (opt_bootstrap)
{
select_thread_in_use= 0; // Allow 'kill' to work
- bootstrap(mysql_stdin);
- if (!kill_in_progress)
- unireg_abort(bootstrap_error ? 1 : 0);
+ int bootstrap_error= bootstrap(mysql_stdin);
+ if (!abort_loop)
+ unireg_abort(bootstrap_error);
else
{
sleep(2); // Wait for kill
@@ -6131,7 +5796,7 @@ int mysqld_main(int argc, char **argv)
}
}
- create_shutdown_thread();
+ create_shutdown_event();
start_handle_manager();
/* Copy default global rpl_filter to global_rpl_filter */
@@ -6200,22 +5865,40 @@ int mysqld_main(int argc, char **argv)
/* Memory used when everything is setup */
start_memory_used= global_status_var.global_memory_used;
-#if defined(_WIN32) || defined(HAVE_SMEM)
- handle_connections_methods();
+#ifdef _WIN32
+ handle_connections_win();
#else
handle_connections_sockets();
-#endif /* _WIN32 || HAVE_SMEM */
-
- /* (void) pthread_attr_destroy(&connection_attrib); */
- DBUG_PRINT("quit",("Exiting main thread"));
-
-#ifndef __WIN__
mysql_mutex_lock(&LOCK_start_thread);
- select_thread_in_use=0; // For close_connections
+ select_thread_in_use=0;
mysql_cond_broadcast(&COND_start_thread);
mysql_mutex_unlock(&LOCK_start_thread);
-#endif /* __WIN__ */
+#endif /* _WIN32 */
+
+ /* Shutdown requested */
+ char *user= shutdown_user.load(std::memory_order_relaxed);
+ sql_print_information(ER_DEFAULT(ER_NORMAL_SHUTDOWN), my_progname,
+ user ? user : "unknown");
+ if (user)
+ my_free(user);
+
+#ifdef WITH_WSREP
+ /* Stop wsrep threads in case they are running. */
+ if (wsrep_running_threads > 0)
+ {
+ wsrep_shutdown_replication();
+ }
+#endif
+
+ close_connections();
+
+ clean_up(1);
+ sd_notify(0, "STATUS=MariaDB server is down");
+
+ /* (void) pthread_attr_destroy(&connection_attrib); */
+
+ DBUG_PRINT("quit",("Exiting main thread"));
/*
Disable the main thread instrumentation,
@@ -6223,12 +5906,6 @@ int mysqld_main(int argc, char **argv)
*/
PSI_CALL_delete_current_thread();
- /* Wait until cleanup is done */
- mysql_mutex_lock(&LOCK_thread_count);
- while (!ready_to_exit)
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
-
#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY)
if (start_mode)
Service.Stop();
@@ -6461,54 +6138,6 @@ int mysqld_main(int argc, char **argv)
#endif
-/**
- Execute all commands from a file. Used by the mysql_install_db script to
- create MySQL privilege tables without having to start a full MySQL server
- and by read_init_file() if mysqld was started with the option --init-file.
-*/
-
-static void bootstrap(MYSQL_FILE *file)
-{
- DBUG_ENTER("bootstrap");
-
- THD *thd= new THD(next_thread_id());
-#ifdef WITH_WSREP
- thd->variables.wsrep_on= 0;
-#endif
- thd->bootstrap=1;
- my_net_init(&thd->net,(st_vio*) 0, thd, MYF(0));
- thd->max_client_packet_length= thd->net.max_packet;
- thd->security_ctx->master_access= ~(ulong)0;
- in_bootstrap= TRUE;
-
- bootstrap_file=file;
-#ifndef EMBEDDED_LIBRARY // TODO: Enable this
- int error;
- if ((error= mysql_thread_create(key_thread_bootstrap,
- &thd->real_id, &connection_attrib,
- handle_bootstrap,
- (void*) thd)))
- {
- sql_print_warning("Can't create thread to handle bootstrap (errno= %d)",
- error);
- bootstrap_error=-1;
- delete thd;
- DBUG_VOID_RETURN;
- }
- /* Wait for thread to die */
- mysql_mutex_lock(&LOCK_thread_count);
- while (in_bootstrap)
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
-#else
- thd->mysql= 0;
- do_handle_bootstrap(thd);
-#endif
-
- DBUG_VOID_RETURN;
-}
-
-
static bool read_init_file(char *file_name)
{
MYSQL_FILE *file;
@@ -6613,7 +6242,7 @@ void create_thread_to_handle_connection(CONNECT *connect)
@param[in,out] thd Thread handle of future thread.
*/
-static void create_new_thread(CONNECT *connect)
+void create_new_thread(CONNECT *connect)
{
DBUG_ENTER("create_new_thread");
@@ -6658,41 +6287,111 @@ static void create_new_thread(CONNECT *connect)
#endif /* EMBEDDED_LIBRARY */
-#ifdef SIGNALS_DONT_BREAK_READ
-inline void kill_broken_server()
+ /* Handle new connections and spawn new process to handle them */
+
+#ifndef EMBEDDED_LIBRARY
+
+void handle_accepted_socket(MYSQL_SOCKET new_sock, MYSQL_SOCKET sock)
{
- /* hack to get around signals ignored in syscalls for problem OS's */
- if (mysql_socket_getfd(unix_sock) == INVALID_SOCKET ||
- (!opt_disable_networking &&
- mysql_socket_getfd(base_ip_sock) == INVALID_SOCKET))
+ CONNECT *connect;
+ bool is_unix_sock;
+
+#ifdef FD_CLOEXEC
+ (void) fcntl(mysql_socket_getfd(new_sock), F_SETFD, FD_CLOEXEC);
+#endif
+
+#ifdef HAVE_LIBWRAP
{
- select_thread_in_use = 0;
- /* The following call will never return */
- DBUG_PRINT("general", ("killing server because socket is closed"));
- kill_server((void*) MYSQL_KILL_SIGNAL);
+ if (mysql_socket_getfd(sock) == mysql_socket_getfd(base_ip_sock) ||
+ mysql_socket_getfd(sock) == mysql_socket_getfd(extra_ip_sock))
+ {
+ struct request_info req;
+ signal(SIGCHLD, SIG_DFL);
+ request_init(&req, RQ_DAEMON, libwrapName, RQ_FILE,
+ mysql_socket_getfd(new_sock), NULL);
+ my_fromhost(&req);
+ if (!my_hosts_access(&req))
+ {
+ /*
+ This may be stupid but refuse() includes an exit(0)
+ which we surely don't want...
+ clean_exit() - same stupid thing ...
+ */
+ syslog(deny_severity, "refused connect from %s",
+ my_eval_client(&req));
+
+ /*
+ C++ sucks (the gibberish in front just translates the supplied
+ sink function pointer in the req structure from a void (*sink)();
+ to a void(*sink)(int) if you omit the cast, the C++ compiler
+ will cry...
+ */
+ if (req.sink)
+ ((void(*)(int))req.sink)(req.fd);
+
+ (void)mysql_socket_shutdown(new_sock, SHUT_RDWR);
+ (void)mysql_socket_close(new_sock);
+ /*
+ The connection was refused by TCP wrappers.
+ There are no details (by client IP) available to update the
+ host_cache.
+ */
+ statistic_increment(connection_errors_tcpwrap, &LOCK_status);
+ return;
+ }
+ }
}
-}
-#define MAYBE_BROKEN_SYSCALL kill_broken_server();
-#else
-#define MAYBE_BROKEN_SYSCALL
-#endif
+#endif /* HAVE_LIBWRAP */
- /* Handle new connections and spawn new process to handle them */
+ DBUG_PRINT("info", ("Creating CONNECT for new connection"));
-#ifndef EMBEDDED_LIBRARY
+ if ((connect= new CONNECT()))
+ {
+ is_unix_sock= (mysql_socket_getfd(sock) ==
+ mysql_socket_getfd(unix_sock));
+ if (!(connect->vio=
+ mysql_socket_vio_new(new_sock,
+ is_unix_sock ? VIO_TYPE_SOCKET :
+ VIO_TYPE_TCPIP,
+ is_unix_sock ? VIO_LOCALHOST : 0)))
+ {
+ delete connect;
+ connect= 0; // Error handling below
+ }
+ }
+
+ if (!connect)
+ {
+ /* Connect failure */
+ (void)mysql_socket_close(new_sock);
+ statistic_increment(aborted_connects, &LOCK_status);
+ statistic_increment(connection_errors_internal, &LOCK_status);
+ return;
+ }
+
+ if (is_unix_sock)
+ connect->host= my_localhost;
+
+ if (mysql_socket_getfd(sock) == mysql_socket_getfd(extra_ip_sock))
+ {
+ connect->extra_port= 1;
+ connect->scheduler= extra_thread_scheduler;
+ }
+ create_new_thread(connect);
+}
+
+#ifndef _WIN32
void handle_connections_sockets()
{
MYSQL_SOCKET sock= mysql_socket_invalid();
MYSQL_SOCKET new_sock= mysql_socket_invalid();
uint error_count=0;
- CONNECT *connect;
struct sockaddr_storage cAddr;
int ip_flags __attribute__((unused))=0;
int socket_flags __attribute__((unused))= 0;
int extra_ip_flags __attribute__((unused))=0;
int flags=0,retval;
- bool is_unix_sock;
#ifdef HAVE_POLL
int socket_count= 0;
struct pollfd fds[3]; // for ip_sock, unix_sock and extra_ip_sock
@@ -6730,7 +6429,6 @@ void handle_connections_sockets()
"STATUS=Taking your SQL requests now...\n");
DBUG_PRINT("general",("Waiting for connections."));
- MAYBE_BROKEN_SYSCALL;
while (!abort_loop)
{
#ifdef HAVE_POLL
@@ -6753,15 +6451,11 @@ void handle_connections_sockets()
if (!select_errors++ && !abort_loop) /* purecov: inspected */
sql_print_error("mysqld: Got error %d from select",socket_errno); /* purecov: inspected */
}
- MAYBE_BROKEN_SYSCALL
continue;
}
if (abort_loop)
- {
- MAYBE_BROKEN_SYSCALL;
break;
- }
/* Is this a new connection request ? */
#ifdef HAVE_POLL
@@ -6812,7 +6506,6 @@ void handle_connections_sockets()
if (mysql_socket_getfd(new_sock) != INVALID_SOCKET ||
(socket_errno != SOCKET_EINTR && socket_errno != SOCKET_EAGAIN))
break;
- MAYBE_BROKEN_SYSCALL;
#if !defined(NO_FCNTL_NONBLOCK)
if (!(test_flags & TEST_BLOCKING))
{
@@ -6824,10 +6517,7 @@ void handle_connections_sockets()
}
#endif
}
-#if !defined(NO_FCNTL_NONBLOCK)
- if (!(test_flags & TEST_BLOCKING))
- fcntl(mysql_socket_getfd(sock), F_SETFL, flags);
-#endif
+
if (mysql_socket_getfd(new_sock) == INVALID_SOCKET)
{
/*
@@ -6838,448 +6528,22 @@ void handle_connections_sockets()
statistic_increment(connection_errors_accept, &LOCK_status);
if ((error_count++ & 255) == 0) // This can happen often
sql_perror("Error in accept");
- MAYBE_BROKEN_SYSCALL;
if (socket_errno == SOCKET_ENFILE || socket_errno == SOCKET_EMFILE)
sleep(1); // Give other threads some time
continue;
}
-#ifdef FD_CLOEXEC
- (void) fcntl(mysql_socket_getfd(new_sock), F_SETFD, FD_CLOEXEC);
+#if !defined(NO_FCNTL_NONBLOCK)
+ if (!(test_flags & TEST_BLOCKING))
+ fcntl(mysql_socket_getfd(sock), F_SETFL, flags);
#endif
-
-#ifdef HAVE_LIBWRAP
- {
- if (mysql_socket_getfd(sock) == mysql_socket_getfd(base_ip_sock) ||
- mysql_socket_getfd(sock) == mysql_socket_getfd(extra_ip_sock))
- {
- struct request_info req;
- signal(SIGCHLD, SIG_DFL);
- request_init(&req, RQ_DAEMON, libwrapName, RQ_FILE,
- mysql_socket_getfd(new_sock), NULL);
- my_fromhost(&req);
- if (!my_hosts_access(&req))
- {
- /*
- This may be stupid but refuse() includes an exit(0)
- which we surely don't want...
- clean_exit() - same stupid thing ...
- */
- syslog(deny_severity, "refused connect from %s",
- my_eval_client(&req));
-
- /*
- C++ sucks (the gibberish in front just translates the supplied
- sink function pointer in the req structure from a void (*sink)();
- to a void(*sink)(int) if you omit the cast, the C++ compiler
- will cry...
- */
- if (req.sink)
- ((void (*)(int))req.sink)(req.fd);
-
- (void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
- (void) mysql_socket_close(new_sock);
- /*
- The connection was refused by TCP wrappers.
- There are no details (by client IP) available to update the
- host_cache.
- */
- statistic_increment(connection_errors_tcpwrap, &LOCK_status);
- continue;
- }
- }
- }
-#endif /* HAVE_LIBWRAP */
-
- DBUG_PRINT("info", ("Creating CONNECT for new connection"));
-
- if ((connect= new CONNECT()))
- {
- is_unix_sock= (mysql_socket_getfd(sock) ==
- mysql_socket_getfd(unix_sock));
-
- if (!(connect->vio=
- mysql_socket_vio_new(new_sock,
- is_unix_sock ? VIO_TYPE_SOCKET :
- VIO_TYPE_TCPIP,
- is_unix_sock ? VIO_LOCALHOST: 0)))
- {
- delete connect;
- connect= 0; // Error handling below
- }
- }
-
- if (!connect)
- {
- /* Connect failure */
- (void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
- (void) mysql_socket_close(new_sock);
- statistic_increment(aborted_connects,&LOCK_status);
- statistic_increment(connection_errors_internal, &LOCK_status);
- continue;
- }
-
- if (is_unix_sock)
- connect->host= my_localhost;
-
- if (mysql_socket_getfd(sock) == mysql_socket_getfd(extra_ip_sock))
- {
- connect->extra_port= 1;
- connect->scheduler= extra_thread_scheduler;
- }
- create_new_thread(connect);
+ handle_accepted_socket(new_sock, sock);
}
sd_notify(0, "STOPPING=1\n"
"STATUS=Shutdown in progress\n");
DBUG_VOID_RETURN;
}
-
-#ifdef _WIN32
-pthread_handler_t handle_connections_sockets_thread(void *arg)
-{
- my_thread_init();
- handle_connections_sockets();
- decrement_handler_count();
- return 0;
-}
-
-pthread_handler_t handle_connections_namedpipes(void *arg)
-{
- HANDLE hConnectedPipe;
- OVERLAPPED connectOverlapped= {0};
- my_thread_init();
- DBUG_ENTER("handle_connections_namedpipes");
- connectOverlapped.hEvent= CreateEvent(NULL, TRUE, FALSE, NULL);
- if (!connectOverlapped.hEvent)
- {
- sql_print_error("Can't create event, last error=%u", GetLastError());
- unireg_abort(1);
- }
- DBUG_PRINT("general",("Waiting for named pipe connections."));
- while (!abort_loop)
- {
- /* wait for named pipe connection */
- BOOL fConnected= ConnectNamedPipe(hPipe, &connectOverlapped);
- if (!fConnected && (GetLastError() == ERROR_IO_PENDING))
- {
- /*
- ERROR_IO_PENDING says async IO has started but not yet finished.
- GetOverlappedResult will wait for completion.
- */
- DWORD bytes;
- fConnected= GetOverlappedResult(hPipe, &connectOverlapped,&bytes, TRUE);
- }
- if (abort_loop)
- break;
- if (!fConnected)
- fConnected = GetLastError() == ERROR_PIPE_CONNECTED;
- if (!fConnected)
- {
- CloseHandle(hPipe);
- if ((hPipe= CreateNamedPipe(pipe_name,
- PIPE_ACCESS_DUPLEX |
- FILE_FLAG_OVERLAPPED,
- PIPE_TYPE_BYTE |
- PIPE_READMODE_BYTE |
- PIPE_WAIT,
- PIPE_UNLIMITED_INSTANCES,
- (int) global_system_variables.
- net_buffer_length,
- (int) global_system_variables.
- net_buffer_length,
- NMPWAIT_USE_DEFAULT_WAIT,
- &saPipeSecurity)) ==
- INVALID_HANDLE_VALUE)
- {
- sql_perror("Can't create new named pipe!");
- break; // Abort
- }
- }
- hConnectedPipe = hPipe;
- /* create new pipe for new connection */
- if ((hPipe = CreateNamedPipe(pipe_name,
- PIPE_ACCESS_DUPLEX |
- FILE_FLAG_OVERLAPPED,
- PIPE_TYPE_BYTE |
- PIPE_READMODE_BYTE |
- PIPE_WAIT,
- PIPE_UNLIMITED_INSTANCES,
- (int) global_system_variables.net_buffer_length,
- (int) global_system_variables.net_buffer_length,
- NMPWAIT_USE_DEFAULT_WAIT,
- &saPipeSecurity)) ==
- INVALID_HANDLE_VALUE)
- {
- sql_perror("Can't create new named pipe!");
- hPipe=hConnectedPipe;
- continue; // We have to try again
- }
- CONNECT *connect;
- if (!(connect= new CONNECT) ||
- !(connect->vio= vio_new_win32pipe(hConnectedPipe)))
- {
- DisconnectNamedPipe(hConnectedPipe);
- CloseHandle(hConnectedPipe);
- delete connect;
- statistic_increment(aborted_connects,&LOCK_status);
- statistic_increment(connection_errors_internal, &LOCK_status);
- continue;
- }
- connect->host= my_localhost;
- create_new_thread(connect);
- }
- LocalFree(saPipeSecurity.lpSecurityDescriptor);
- CloseHandle(connectOverlapped.hEvent);
- DBUG_LEAVE;
- decrement_handler_count();
- return 0;
-}
-#endif /* _WIN32 */
-
-
-#ifdef HAVE_SMEM
-
-/**
- Thread of shared memory's service.
-
- @param arg Arguments of thread
-*/
-pthread_handler_t handle_connections_shared_memory(void *arg)
-{
- /* file-mapping object, use for create shared memory */
- HANDLE handle_connect_file_map= 0;
- char *handle_connect_map= 0; // pointer on shared memory
- HANDLE event_connect_answer= 0;
- ulong smem_buffer_length= shared_memory_buffer_length + 4;
- ulong connect_number= 1;
- char *tmp= NULL;
- char *suffix_pos;
- char connect_number_char[22], *p;
- const char *errmsg= 0;
- SECURITY_ATTRIBUTES *sa_event= 0, *sa_mapping= 0;
- my_thread_init();
- DBUG_ENTER("handle_connections_shared_memorys");
- DBUG_PRINT("general",("Waiting for allocated shared memory."));
-
- /*
- get enough space base-name + '_' + longest suffix we might ever send
- */
- if (!(tmp= (char *)my_malloc(strlen(shared_memory_base_name) + 32L,
- MYF(MY_FAE))))
- goto error;
-
- if (my_security_attr_create(&sa_event, &errmsg,
- GENERIC_ALL, SYNCHRONIZE | EVENT_MODIFY_STATE))
- goto error;
-
- if (my_security_attr_create(&sa_mapping, &errmsg,
- GENERIC_ALL, FILE_MAP_READ | FILE_MAP_WRITE))
- goto error;
-
- /*
- The name of event and file-mapping events create agree next rule:
- shared_memory_base_name+unique_part
- Where:
- shared_memory_base_name is unique value for each server
- unique_part is unique value for each object (events and file-mapping)
- */
- suffix_pos= strxmov(tmp,shared_memory_base_name,"_",NullS);
- strmov(suffix_pos, "CONNECT_REQUEST");
- if ((smem_event_connect_request= CreateEvent(sa_event,
- FALSE, FALSE, tmp)) == 0)
- {
- errmsg= "Could not create request event";
- goto error;
- }
- strmov(suffix_pos, "CONNECT_ANSWER");
- if ((event_connect_answer= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
- {
- errmsg="Could not create answer event";
- goto error;
- }
- strmov(suffix_pos, "CONNECT_DATA");
- if ((handle_connect_file_map=
- CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping,
- PAGE_READWRITE, 0, sizeof(connect_number), tmp)) == 0)
- {
- errmsg= "Could not create file mapping";
- goto error;
- }
- if ((handle_connect_map= (char *)MapViewOfFile(handle_connect_file_map,
- FILE_MAP_WRITE,0,0,
- sizeof(DWORD))) == 0)
- {
- errmsg= "Could not create shared memory service";
- goto error;
- }
-
- while (!abort_loop)
- {
- /* Wait a request from client */
- WaitForSingleObject(smem_event_connect_request,INFINITE);
-
- /*
- it can be after shutdown command
- */
- if (abort_loop)
- goto error;
-
- HANDLE handle_client_file_map= 0;
- char *handle_client_map= 0;
- HANDLE event_client_wrote= 0;
- HANDLE event_client_read= 0; // for transfer data server <-> client
- HANDLE event_server_wrote= 0;
- HANDLE event_server_read= 0;
- HANDLE event_conn_closed= 0;
- CONNECT *connect= 0;
-
- p= int10_to_str(connect_number, connect_number_char, 10);
- /*
- The name of event and file-mapping events create agree next rule:
- shared_memory_base_name+unique_part+number_of_connection
- Where:
- shared_memory_base_name is uniquel value for each server
- unique_part is unique value for each object (events and file-mapping)
- number_of_connection is connection-number between server and client
- */
- suffix_pos= strxmov(tmp,shared_memory_base_name,"_",connect_number_char,
- "_",NullS);
- strmov(suffix_pos, "DATA");
- if ((handle_client_file_map=
- CreateFileMapping(INVALID_HANDLE_VALUE, sa_mapping,
- PAGE_READWRITE, 0, smem_buffer_length, tmp)) == 0)
- {
- errmsg= "Could not create file mapping";
- goto errorconn;
- }
- if ((handle_client_map= (char*)MapViewOfFile(handle_client_file_map,
- FILE_MAP_WRITE,0,0,
- smem_buffer_length)) == 0)
- {
- errmsg= "Could not create memory map";
- goto errorconn;
- }
- strmov(suffix_pos, "CLIENT_WROTE");
- if ((event_client_wrote= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
- {
- errmsg= "Could not create client write event";
- goto errorconn;
- }
- strmov(suffix_pos, "CLIENT_READ");
- if ((event_client_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
- {
- errmsg= "Could not create client read event";
- goto errorconn;
- }
- strmov(suffix_pos, "SERVER_READ");
- if ((event_server_read= CreateEvent(sa_event, FALSE, FALSE, tmp)) == 0)
- {
- errmsg= "Could not create server read event";
- goto errorconn;
- }
- strmov(suffix_pos, "SERVER_WROTE");
- if ((event_server_wrote= CreateEvent(sa_event,
- FALSE, FALSE, tmp)) == 0)
- {
- errmsg= "Could not create server write event";
- goto errorconn;
- }
- strmov(suffix_pos, "CONNECTION_CLOSED");
- if ((event_conn_closed= CreateEvent(sa_event,
- TRUE, FALSE, tmp)) == 0)
- {
- errmsg= "Could not create closed connection event";
- goto errorconn;
- }
- if (abort_loop)
- goto errorconn;
-
- if (!(connect= new CONNECT))
- {
- errmsg= "Could not create CONNECT object";
- goto errorconn;
- }
-
- /* Send number of connection to client */
- int4store(handle_connect_map, connect_number);
- if (!SetEvent(event_connect_answer))
- {
- errmsg= "Could not send answer event";
- goto errorconn;
- }
- /* Set event that client should receive data */
- if (!SetEvent(event_client_read))
- {
- errmsg= "Could not set client to read mode";
- goto errorconn;
- }
- if (!(connect->vio= vio_new_win32shared_memory(handle_client_file_map,
- handle_client_map,
- event_client_wrote,
- event_client_read,
- event_server_wrote,
- event_server_read,
- event_conn_closed)))
- {
- errmsg= "Could not create VIO object";
- goto errorconn;
- }
- connect->host= my_localhost; /* Host is unknown */
- create_new_thread(connect);
- connect_number++;
- continue;
-
-errorconn:
- /* Could not form connection; Free used handlers/memort and retry */
- if (errmsg)
- {
- char buff[180];
- strxmov(buff, "Can't create shared memory connection: ", errmsg, ".",
- NullS);
- sql_perror(buff);
- }
- if (handle_client_file_map)
- CloseHandle(handle_client_file_map);
- if (handle_client_map)
- UnmapViewOfFile(handle_client_map);
- if (event_server_wrote)
- CloseHandle(event_server_wrote);
- if (event_server_read)
- CloseHandle(event_server_read);
- if (event_client_wrote)
- CloseHandle(event_client_wrote);
- if (event_client_read)
- CloseHandle(event_client_read);
- if (event_conn_closed)
- CloseHandle(event_conn_closed);
-
- delete connect;
- statistic_increment(aborted_connects,&LOCK_status);
- statistic_increment(connection_errors_internal, &LOCK_status);
- }
-
- /* End shared memory handling */
-error:
- if (tmp)
- my_free(tmp);
-
- if (errmsg)
- {
- char buff[180];
- strxmov(buff, "Can't create shared memory service: ", errmsg, ".", NullS);
- sql_perror(buff);
- }
- my_security_attr_free(sa_event);
- my_security_attr_free(sa_mapping);
- if (handle_connect_map) UnmapViewOfFile(handle_connect_map);
- if (handle_connect_file_map) CloseHandle(handle_connect_file_map);
- if (event_connect_answer) CloseHandle(event_connect_answer);
- if (smem_event_connect_request) CloseHandle(smem_event_connect_request);
- DBUG_LEAVE;
- decrement_handler_count();
- return 0;
-}
-#endif /* HAVE_SMEM */
+#endif /* _WIN32*/
#endif /* EMBEDDED_LIBRARY */
@@ -7805,11 +7069,9 @@ struct my_option my_long_options[]=
MYSQL_COMPATIBILITY_OPTION("binlog-order-commits"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("log-throttle-queries-not-using-indexes"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("end-markers-in-json"),
- MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-features"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-offset"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-limit"), // OPTIMIZER_TRACE
- MYSQL_TO_BE_IMPLEMENTED_OPTION("optimizer-trace-max-mem-size"), // OPTIMIZER_TRACE
MYSQL_TO_BE_IMPLEMENTED_OPTION("eq-range-index-dive-limit"),
MYSQL_COMPATIBILITY_OPTION("server-id-bits"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("slave-rows-search-algorithms"), // HAVE_REPLICATION
@@ -7817,7 +7079,6 @@ struct my_option my_long_options[]=
MYSQL_COMPATIBILITY_OPTION("slave-checkpoint-period"), // HAVE_REPLICATION
MYSQL_COMPATIBILITY_OPTION("slave-checkpoint-group"), // HAVE_REPLICATION
MYSQL_SUGGEST_ANALOG_OPTION("slave-pending-jobs-size-max", "--slave-parallel-max-queued"), // HAVE_REPLICATION
- MYSQL_TO_BE_IMPLEMENTED_OPTION("disconnect-on-expired-password"),
MYSQL_TO_BE_IMPLEMENTED_OPTION("sha256-password-private-key-path"), // HAVE_OPENSSL && !HAVE_YASSL
MYSQL_TO_BE_IMPLEMENTED_OPTION("sha256-password-public-key-path"), // HAVE_OPENSSL && !HAVE_YASSL
@@ -7912,11 +7173,7 @@ static int show_slaves_connected(THD *thd, SHOW_VAR *var, char *buff)
var->type= SHOW_LONGLONG;
var->value= buff;
- mysql_mutex_lock(&LOCK_slave_list);
-
- *((longlong *)buff)= slave_list.records;
-
- mysql_mutex_unlock(&LOCK_slave_list);
+ *((longlong*) buff)= uint32_t(binlog_dump_thread_count);
return 0;
}
@@ -8018,187 +7275,6 @@ static int show_flush_commands(THD *thd, SHOW_VAR *var, char *buff,
#if defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY)
-/* Functions relying on CTX */
-static int show_ssl_ctx_sess_accept(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_accept(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_accept_good(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_accept_good(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_connect_good(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_connect_good(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_accept_renegotiate(THD *thd, SHOW_VAR *var,
- char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_connect_renegotiate(THD *thd, SHOW_VAR *var,
- char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_cb_hits(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_cb_hits(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_hits(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_hits(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_cache_full(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_cache_full(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_misses(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_misses(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_timeouts(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_number(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_connect(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_sess_get_cache_size(THD *thd, SHOW_VAR *var,
- char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- *((long *)buff)= (!ssl_acceptor_fd ? 0 :
- SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context));
- return 0;
-}
-
-static int show_ssl_ctx_get_session_cache_mode(THD *thd, SHOW_VAR *var,
- char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_CHAR;
- if (!ssl_acceptor_fd)
- var->value= const_cast<char*>("NONE");
- else
- switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context))
- {
- case SSL_SESS_CACHE_OFF:
- var->value= const_cast<char*>("OFF"); break;
- case SSL_SESS_CACHE_CLIENT:
- var->value= const_cast<char*>("CLIENT"); break;
- case SSL_SESS_CACHE_SERVER:
- var->value= const_cast<char*>("SERVER"); break;
- case SSL_SESS_CACHE_BOTH:
- var->value= const_cast<char*>("BOTH"); break;
- case SSL_SESS_CACHE_NO_AUTO_CLEAR:
- var->value= const_cast<char*>("NO_AUTO_CLEAR"); break;
- case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP:
- var->value= const_cast<char*>("NO_INTERNAL_LOOKUP"); break;
- default:
- var->value= const_cast<char*>("Unknown"); break;
- }
- return 0;
-}
/*
Functions relying on SSL
@@ -8219,18 +7295,6 @@ static int show_ssl_get_version(THD *thd, SHOW_VAR *var, char *buff,
return 0;
}
-static int show_ssl_session_reused(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
-{
- var->type= SHOW_LONG;
- var->value= buff;
- if( thd->vio_ok() && thd->net.vio->ssl_arg )
- *((long *)buff)= (long)SSL_session_reused((SSL*) thd->net.vio->ssl_arg);
- else
- *((long *)buff)= 0;
- return 0;
-}
-
static int show_ssl_get_default_timeout(THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope)
{
@@ -8248,10 +7312,14 @@ static int show_ssl_get_verify_mode(THD *thd, SHOW_VAR *var, char *buff,
{
var->type= SHOW_LONG;
var->value= buff;
+#ifndef HAVE_YASSL
if( thd->net.vio && thd->net.vio->ssl_arg )
*((long *)buff)= (long)SSL_get_verify_mode((SSL*)thd->net.vio->ssl_arg);
else
*((long *)buff)= 0;
+#else
+ *((long *)buff) = 0;
+#endif
return 0;
}
@@ -8260,10 +7328,15 @@ static int show_ssl_get_verify_depth(THD *thd, SHOW_VAR *var, char *buff,
{
var->type= SHOW_LONG;
var->value= buff;
+#ifndef HAVE_YASSL
if( thd->vio_ok() && thd->net.vio->ssl_arg )
*((long *)buff)= (long)SSL_get_verify_depth((SSL*)thd->net.vio->ssl_arg);
else
*((long *)buff)= 0;
+#else
+ *((long *)buff)= 0;
+#endif
+
return 0;
}
@@ -8588,6 +7661,7 @@ SHOW_VAR status_vars[]= {
{"Feature_locale", (char*) offsetof(STATUS_VAR, feature_locale), SHOW_LONG_STATUS},
{"Feature_subquery", (char*) offsetof(STATUS_VAR, feature_subquery), SHOW_LONG_STATUS},
{"Feature_system_versioning", (char*) offsetof(STATUS_VAR, feature_system_versioning), SHOW_LONG_STATUS},
+ {"Feature_application_time_periods", (char*) offsetof(STATUS_VAR, feature_application_time_periods), SHOW_LONG_STATUS},
{"Feature_timezone", (char*) offsetof(STATUS_VAR, feature_timezone), SHOW_LONG_STATUS},
{"Feature_trigger", (char*) offsetof(STATUS_VAR, feature_trigger), SHOW_LONG_STATUS},
{"Feature_window_functions", (char*) offsetof(STATUS_VAR, feature_window_functions), SHOW_LONG_STATUS},
@@ -8703,28 +7777,28 @@ SHOW_VAR status_vars[]= {
{"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count_), SHOW_LONG_STATUS},
#ifdef HAVE_OPENSSL
#ifndef EMBEDDED_LIBRARY
- {"Ssl_accept_renegotiates", (char*) &show_ssl_ctx_sess_accept_renegotiate, SHOW_SIMPLE_FUNC},
- {"Ssl_accepts", (char*) &show_ssl_ctx_sess_accept, SHOW_SIMPLE_FUNC},
- {"Ssl_callback_cache_hits", (char*) &show_ssl_ctx_sess_cb_hits, SHOW_SIMPLE_FUNC},
+ {"Ssl_accept_renegotiates", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_accepts", (char*) &ssl_acceptor_stats.accept, SHOW_LONG},
+ {"Ssl_callback_cache_hits", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
{"Ssl_cipher", (char*) &show_ssl_get_cipher, SHOW_SIMPLE_FUNC},
{"Ssl_cipher_list", (char*) &show_ssl_get_cipher_list, SHOW_SIMPLE_FUNC},
- {"Ssl_client_connects", (char*) &show_ssl_ctx_sess_connect, SHOW_SIMPLE_FUNC},
- {"Ssl_connect_renegotiates", (char*) &show_ssl_ctx_sess_connect_renegotiate, SHOW_SIMPLE_FUNC},
- {"Ssl_ctx_verify_depth", (char*) &show_ssl_ctx_get_verify_depth, SHOW_SIMPLE_FUNC},
- {"Ssl_ctx_verify_mode", (char*) &show_ssl_ctx_get_verify_mode, SHOW_SIMPLE_FUNC},
+ {"Ssl_client_connects", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_connect_renegotiates", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_ctx_verify_depth", (char*) &ssl_acceptor_stats.verify_depth, SHOW_LONG},
+ {"Ssl_ctx_verify_mode", (char*) &ssl_acceptor_stats.verify_mode, SHOW_LONG},
{"Ssl_default_timeout", (char*) &show_ssl_get_default_timeout, SHOW_SIMPLE_FUNC},
- {"Ssl_finished_accepts", (char*) &show_ssl_ctx_sess_accept_good, SHOW_SIMPLE_FUNC},
- {"Ssl_finished_connects", (char*) &show_ssl_ctx_sess_connect_good, SHOW_SIMPLE_FUNC},
+ {"Ssl_finished_accepts", (char*) &ssl_acceptor_stats.accept_good, SHOW_LONG},
+ {"Ssl_finished_connects", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
{"Ssl_server_not_after", (char*) &show_ssl_get_server_not_after, SHOW_SIMPLE_FUNC},
{"Ssl_server_not_before", (char*) &show_ssl_get_server_not_before, SHOW_SIMPLE_FUNC},
- {"Ssl_session_cache_hits", (char*) &show_ssl_ctx_sess_hits, SHOW_SIMPLE_FUNC},
- {"Ssl_session_cache_misses", (char*) &show_ssl_ctx_sess_misses, SHOW_SIMPLE_FUNC},
- {"Ssl_session_cache_mode", (char*) &show_ssl_ctx_get_session_cache_mode, SHOW_SIMPLE_FUNC},
- {"Ssl_session_cache_overflows", (char*) &show_ssl_ctx_sess_cache_full, SHOW_SIMPLE_FUNC},
- {"Ssl_session_cache_size", (char*) &show_ssl_ctx_sess_get_cache_size, SHOW_SIMPLE_FUNC},
- {"Ssl_session_cache_timeouts", (char*) &show_ssl_ctx_sess_timeouts, SHOW_SIMPLE_FUNC},
- {"Ssl_sessions_reused", (char*) &show_ssl_session_reused, SHOW_SIMPLE_FUNC},
- {"Ssl_used_session_cache_entries",(char*) &show_ssl_ctx_sess_number, SHOW_SIMPLE_FUNC},
+ {"Ssl_session_cache_hits", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_session_cache_misses", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_session_cache_mode", (char*) &ssl_acceptor_stats.session_cache_mode, SHOW_CHAR_PTR},
+ {"Ssl_session_cache_overflows", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_session_cache_size", (char*) &ssl_acceptor_stats.cache_size, SHOW_LONG},
+ {"Ssl_session_cache_timeouts", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_sessions_reused", (char*) &ssl_acceptor_stats.zero, SHOW_LONG},
+ {"Ssl_used_session_cache_entries",(char*) &ssl_acceptor_stats.zero, SHOW_LONG},
{"Ssl_verify_depth", (char*) &show_ssl_get_verify_depth, SHOW_SIMPLE_FUNC},
{"Ssl_verify_mode", (char*) &show_ssl_get_verify_mode, SHOW_SIMPLE_FUNC},
{"Ssl_version", (char*) &show_ssl_get_version, SHOW_SIMPLE_FUNC},
@@ -8765,6 +7839,20 @@ SHOW_VAR status_vars[]= {
{"Uptime_since_flush_status",(char*) &show_flushstatustime, SHOW_SIMPLE_FUNC},
#endif
#ifdef WITH_WSREP
+ {"wsrep_connected", (char*) &wsrep_connected, SHOW_BOOL},
+ {"wsrep_ready", (char*) &wsrep_show_ready, SHOW_FUNC},
+ {"wsrep_cluster_state_uuid",(char*) &wsrep_cluster_state_uuid,SHOW_CHAR_PTR},
+ {"wsrep_cluster_conf_id", (char*) &wsrep_cluster_conf_id, SHOW_LONGLONG},
+ {"wsrep_cluster_status", (char*) &wsrep_cluster_status, SHOW_CHAR_PTR},
+ {"wsrep_cluster_size", (char*) &wsrep_cluster_size, SHOW_LONG_NOFLUSH},
+ {"wsrep_local_index", (char*) &wsrep_local_index, SHOW_LONG_NOFLUSH},
+ {"wsrep_local_bf_aborts", (char*) &wsrep_show_bf_aborts, SHOW_FUNC},
+ {"wsrep_provider_name", (char*) &wsrep_provider_name, SHOW_CHAR_PTR},
+ {"wsrep_provider_version", (char*) &wsrep_provider_version, SHOW_CHAR_PTR},
+ {"wsrep_provider_vendor", (char*) &wsrep_provider_vendor, SHOW_CHAR_PTR},
+ {"wsrep_provider_capabilities", (char*) &wsrep_provider_capabilities, SHOW_CHAR_PTR},
+ {"wsrep_thread_count", (char*) &wsrep_running_threads, SHOW_LONG_NOFLUSH},
+ {"wsrep_cluster_capabilities", (char*) &wsrep_cluster_capabilities, SHOW_CHAR_PTR},
{"wsrep", (char*) &wsrep_show_status, SHOW_FUNC},
#endif
{NullS, NullS, SHOW_LONG}
@@ -8929,17 +8017,15 @@ static int mysql_init_variables(void)
opt_bootstrap= opt_myisam_log= 0;
disable_log_notes= 0;
mqh_used= 0;
- kill_in_progress= 0;
cleanup_done= 0;
test_flags= select_errors= dropping_tables= ha_open_options=0;
thread_count= kill_cached_threads= wake_thread= 0;
- service_thread_count= 0;
slave_open_temp_tables= 0;
cached_thread_count= 0;
opt_endinfo= using_udf_functions= 0;
opt_using_transactions= 0;
abort_loop= select_thread_in_use= signal_thread_in_use= 0;
- ready_to_exit= shutdown_in_progress= grant_option= 0;
+ grant_option= 0;
aborted_threads= aborted_connects= 0;
subquery_cache_miss= subquery_cache_hit= 0;
delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0;
@@ -8969,7 +8055,9 @@ static int mysql_init_variables(void)
character_set_filesystem= &my_charset_bin;
opt_specialflag= SPECIAL_ENGLISH;
+#ifndef EMBEDDED_LIBRARY
unix_sock= base_ip_sock= extra_ip_sock= MYSQL_INVALID_SOCKET;
+#endif
mysql_home_ptr= mysql_home;
log_error_file_ptr= log_error_file;
protocol_version= PROTOCOL_VERSION;
@@ -8979,7 +8067,6 @@ static int mysql_init_variables(void)
global_query_id= 1;
global_thread_id= 0;
strnmov(server_version, MYSQL_SERVER_VERSION, sizeof(server_version)-1);
- threads.empty();
thread_cache.empty();
key_caches.empty();
if (!(dflt_key_cache= get_or_create_key_cache(default_key_cache_base.str,
@@ -9083,9 +8170,6 @@ static int mysql_init_variables(void)
ssl_acceptor_fd= 0;
#endif /* ! EMBEDDED_LIBRARY */
#endif /* HAVE_OPENSSL */
-#ifdef HAVE_SMEM
- shared_memory_base_name= default_shared_memory_base_name;
-#endif
#if defined(__WIN__)
/* Allow Win32 users to move MySQL anywhere */
@@ -9296,7 +8380,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
if (!(p= strstr(argument, "->")))
{
- sql_print_error("Bad syntax in replicate-rewrite-db - missing '->'!\n");
+ sql_print_error("Bad syntax in replicate-rewrite-db - missing '->'!");
return 1;
}
val= p--;
@@ -9305,7 +8389,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
/* Db name can be one char also */
if (p == argument && my_isspace(mysqld_charset, *p))
{
- sql_print_error("Bad syntax in replicate-rewrite-db - empty FROM db!\n");
+ sql_print_error("Bad syntax in replicate-rewrite-db - empty FROM db!");
return 1;
}
*val= 0;
@@ -9314,7 +8398,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
val++;
if (!*val)
{
- sql_print_error("Bad syntax in replicate-rewrite-db - empty TO db!\n");
+ sql_print_error("Bad syntax in replicate-rewrite-db - empty TO db!");
return 1;
}
@@ -9343,7 +8427,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
{
if (cur_rpl_filter->add_do_table(argument))
{
- sql_print_error("Could not add do table rule '%s'!\n", argument);
+ sql_print_error("Could not add do table rule '%s'!", argument);
return 1;
}
break;
@@ -9352,7 +8436,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
{
if (cur_rpl_filter->add_wild_do_table(argument))
{
- sql_print_error("Could not add do table rule '%s'!\n", argument);
+ sql_print_error("Could not add do table rule '%s'!", argument);
return 1;
}
break;
@@ -9361,7 +8445,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
{
if (cur_rpl_filter->add_wild_ignore_table(argument))
{
- sql_print_error("Could not add ignore table rule '%s'!\n", argument);
+ sql_print_error("Could not add ignore table rule '%s'!", argument);
return 1;
}
break;
@@ -9370,7 +8454,7 @@ mysqld_get_one_option(int optid, const struct my_option *opt, char *argument)
{
if (cur_rpl_filter->add_ignore_table(argument))
{
- sql_print_error("Could not add ignore table rule '%s'!\n", argument);
+ sql_print_error("Could not add ignore table rule '%s'!", argument);
return 1;
}
break;
@@ -9727,7 +8811,7 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
if (ft_boolean_check_syntax_string((uchar*) ft_boolean_syntax))
{
- sql_print_error("Invalid ft-boolean-syntax string: %s\n",
+ sql_print_error("Invalid ft-boolean-syntax string: %s",
ft_boolean_syntax);
return 1;
}
@@ -9829,10 +8913,10 @@ static int get_options(int *argc_ptr, char ***argv_ptr)
errors.
*/
if (global_system_variables.log_warnings >= 10)
- my_global_flags= MY_WME | ME_JUST_INFO;
+ my_global_flags= MY_WME | ME_NOTE;
/* Log all errors not handled by thd->handle_error() to my_message_sql() */
if (global_system_variables.log_warnings >= 11)
- my_global_flags|= ME_NOREFRESH;
+ my_global_flags|= ME_ERROR_LOG;
if (my_assert_on_error)
debug_assert_if_crashed_table= 1;
@@ -10229,7 +9313,9 @@ void refresh_status(THD *thd)
reset_status_vars();
#ifdef WITH_WSREP
if (WSREP_ON)
- wsrep->stats_reset(wsrep);
+ {
+ Wsrep_server_state::instance().provider().reset_status();
+ }
#endif /* WITH_WSREP */
/* Reset the counters of all key caches (default and named). */
@@ -10689,6 +9775,14 @@ static my_thread_id thread_id_max= UINT_MAX32;
@param[out] low - lower bound for the range
@param[out] high - upper bound for the range
*/
+
+static my_bool recalculate_callback(THD *thd, std::vector<my_thread_id> *ids)
+{
+ ids->push_back(thd->thread_id);
+ return 0;
+}
+
+
static void recalculate_thread_id_range(my_thread_id *low, my_thread_id *high)
{
std::vector<my_thread_id> ids;
@@ -10696,15 +9790,7 @@ static void recalculate_thread_id_range(my_thread_id *low, my_thread_id *high)
// Add sentinels
ids.push_back(0);
ids.push_back(UINT_MAX32);
-
- mysql_mutex_lock(&LOCK_thread_count);
-
- I_List_iterator<THD> it(threads);
- THD *thd;
- while ((thd=it++))
- ids.push_back(thd->thread_id);
-
- mysql_mutex_unlock(&LOCK_thread_count);
+ server_threads.iterate(recalculate_callback, &ids);
std::sort(ids.begin(), ids.end());
my_thread_id max_gap= 0;
diff --git a/sql/mysqld.h b/sql/mysqld.h
index 9cb0a0fda39..b8c6e5f79bb 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -23,7 +23,9 @@
#include "my_decimal.h" /* my_decimal */
#include "mysql_com.h" /* SERVER_VERSION_LENGTH */
#include "my_atomic.h"
+#include "my_counter.h"
#include "mysql/psi/mysql_file.h" /* MYSQL_FILE */
+#include "mysql/psi/mysql_socket.h" /* MYSQL_SOCKET */
#include "sql_list.h" /* I_List */
#include "sql_cmd.h"
#include <my_rnd.h>
@@ -80,11 +82,10 @@ enum enum_slave_parallel_mode {
};
/* Function prototypes */
-void kill_mysql(THD *thd= 0);
+void kill_mysql(THD *thd);
void close_connection(THD *thd, uint sql_errno= 0);
void handle_connection_in_main_thread(CONNECT *thd);
void create_thread_to_handle_connection(CONNECT *connect);
-void signal_thd_deleted();
void unlink_thd(THD *thd);
bool one_thread_per_connection_end(THD *thd, bool put_in_cache);
void flush_thread_cache();
@@ -92,6 +93,11 @@ void refresh_status(THD *thd);
bool is_secure_file_path(char *path);
void dec_connection_count(scheduler_functions *scheduler);
extern void init_net_server_extension(THD *thd);
+extern void handle_accepted_socket(MYSQL_SOCKET new_sock, MYSQL_SOCKET sock);
+extern void create_new_thread(CONNECT *connect);
+
+extern void ssl_acceptor_stats_update(int sslaccept_ret);
+extern int reinit_ssl();
extern "C" MYSQL_PLUGIN_IMPORT CHARSET_INFO *system_charset_info;
extern MYSQL_PLUGIN_IMPORT CHARSET_INFO *files_charset_info ;
@@ -121,7 +127,6 @@ extern bool opt_ignore_builtin_innodb;
extern my_bool opt_character_set_client_handshake;
extern my_bool debug_assert_on_not_freed_memory;
extern bool volatile abort_loop;
-extern bool volatile in_bootstrap;
extern uint connection_count;
extern my_bool opt_safe_user_create;
extern my_bool opt_safe_show_db, opt_local_infile, opt_myisam_use_mmap;
@@ -152,6 +157,7 @@ extern ulong opt_replicate_events_marked_for_skip;
extern char *default_tz_name;
extern Time_zone *default_tz;
extern char *my_bind_addr_str;
+extern int server_socket_ai_family;
extern char *default_storage_engine, *default_tmp_storage_engine;
extern char *enforced_storage_engine;
extern char *gtid_pos_auto_engines;
@@ -230,6 +236,7 @@ extern ulong slow_launch_threads, slow_launch_time;
extern MYSQL_PLUGIN_IMPORT ulong max_connections;
extern uint max_digest_length;
extern ulong max_connect_errors, connect_timeout;
+extern uint max_password_errors;
extern my_bool slave_allow_batching;
extern my_bool allow_slave_start;
extern LEX_CSTRING reason_slave_blocked;
@@ -246,7 +253,7 @@ extern ulonglong max_binlog_cache_size, max_binlog_stmt_cache_size;
extern ulong max_binlog_size;
extern ulong slave_max_allowed_packet;
extern ulong opt_binlog_rows_event_max_size;
-extern ulong rpl_recovery_rank, thread_cache_size;
+extern ulong thread_cache_size;
extern ulong stored_program_cache_size;
extern ulong opt_slave_parallel_threads;
extern ulong opt_slave_domain_parallel_threads;
@@ -255,6 +262,7 @@ extern ulong opt_slave_parallel_mode;
extern ulong opt_binlog_commit_wait_count;
extern ulong opt_binlog_commit_wait_usec;
extern my_bool opt_gtid_ignore_duplicates;
+extern uint opt_gtid_cleanup_batch_size;
extern ulong back_log;
extern ulong executed_events;
extern char language[FN_REFLEN];
@@ -284,11 +292,8 @@ extern int mysqld_server_started, mysqld_server_initialized;
extern "C" MYSQL_PLUGIN_IMPORT int orig_argc;
extern "C" MYSQL_PLUGIN_IMPORT char **orig_argv;
extern pthread_attr_t connection_attrib;
-extern MYSQL_FILE *bootstrap_file;
extern my_bool old_mode;
extern LEX_STRING opt_init_connect, opt_init_slave;
-extern int bootstrap_error;
-extern I_List<THD> threads;
extern char err_shared_dir[];
extern ulong connection_errors_select;
extern ulong connection_errors_accept;
@@ -302,6 +307,8 @@ extern my_bool encrypt_tmp_disk_tables, encrypt_tmp_files;
extern ulong encryption_algorithm;
extern const char *encryption_algorithm_names[];
extern long opt_secure_timestamp;
+extern uint default_password_lifetime;
+extern my_bool disconnect_on_expired_password;
enum secure_timestamp { SECTIME_NO, SECTIME_SUPER, SECTIME_REPL, SECTIME_YES };
@@ -325,7 +332,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_LOCK_logger, key_LOCK_manager,
key_LOCK_prepared_stmt_count,
key_LOCK_rpl_status, key_LOCK_server_started,
- key_LOCK_status, key_LOCK_show_status,
+ key_LOCK_status,
key_LOCK_thd_data, key_LOCK_thd_kill,
key_LOCK_user_conn, key_LOG_LOCK_log,
key_master_info_data_lock, key_master_info_run_lock,
@@ -335,7 +342,8 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
key_rpl_group_info_sleep_lock,
key_structure_guard_mutex, key_TABLE_SHARE_LOCK_ha_data,
key_LOCK_start_thread,
- key_LOCK_error_messages, key_LOCK_thread_count, key_PARTITION_LOCK_auto_inc;
+ key_LOCK_error_messages,
+ key_PARTITION_LOCK_auto_inc;
extern PSI_mutex_key key_RELAYLOG_LOCK_index;
extern PSI_mutex_key key_LOCK_relaylog_end_pos;
extern PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state,
@@ -351,7 +359,8 @@ extern PSI_rwlock_key key_rwlock_LOCK_grant, key_rwlock_LOCK_logger,
key_rwlock_LOCK_sys_init_connect, key_rwlock_LOCK_sys_init_slave,
key_rwlock_LOCK_system_variables_hash, key_rwlock_query_cache_query_lock,
key_LOCK_SEQUENCE,
- key_rwlock_LOCK_vers_stats, key_rwlock_LOCK_stat_serial;
+ key_rwlock_LOCK_vers_stats, key_rwlock_LOCK_stat_serial,
+ key_rwlock_THD_list;
#ifdef HAVE_MMAP
extern PSI_cond_key key_PAGE_cond, key_COND_active, key_COND_pool;
@@ -371,7 +380,7 @@ extern PSI_cond_key key_BINLOG_COND_xid_list, key_BINLOG_update_cond,
key_rpl_group_info_sleep_cond,
key_TABLE_SHARE_cond, key_user_level_lock_cond,
key_COND_start_thread,
- key_COND_thread_count, key_COND_thread_cache, key_COND_flush_thread_cache;
+ key_COND_thread_cache, key_COND_flush_thread_cache;
extern PSI_cond_key key_RELAYLOG_COND_relay_log_updated,
key_RELAYLOG_COND_bin_log_updated, key_COND_wakeup_ready,
key_COND_wait_commit;
@@ -383,7 +392,7 @@ extern PSI_cond_key key_COND_rpl_thread, key_COND_rpl_thread_queue,
extern PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates;
extern PSI_cond_key key_TABLE_SHARE_COND_rotation;
-extern PSI_thread_key key_thread_bootstrap, key_thread_delayed_insert,
+extern PSI_thread_key key_thread_delayed_insert,
key_thread_handle_manager, key_thread_kill_server, key_thread_main,
key_thread_one_connection, key_thread_signal_hand,
key_thread_slave_background, key_rpl_parallel_thread;
@@ -609,14 +618,14 @@ extern MYSQL_PLUGIN_IMPORT key_map key_map_full; /* Should be threaded
Server mutex locks and condition variables.
*/
extern mysql_mutex_t
- LOCK_item_func_sleep, LOCK_status, LOCK_show_status,
+ LOCK_item_func_sleep, LOCK_status,
LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator,
LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone,
- LOCK_slave_list, LOCK_active_mi, LOCK_manager,
+ LOCK_active_mi, LOCK_manager,
LOCK_global_system_variables, LOCK_user_conn,
LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count,
LOCK_slave_background;
-extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count;
+extern mysql_rwlock_t LOCK_all_status_vars;
extern mysql_mutex_t LOCK_start_thread;
#ifdef HAVE_OPENSSL
extern char* des_key_file;
@@ -625,11 +634,12 @@ extern mysql_mutex_t LOCK_des_key_file;
extern mysql_mutex_t LOCK_server_started;
extern mysql_cond_t COND_server_started;
extern mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave;
+extern mysql_rwlock_t LOCK_ssl_refresh;
extern mysql_prlock_t LOCK_system_variables_hash;
-extern mysql_cond_t COND_thread_count, COND_start_thread;
+extern mysql_cond_t COND_start_thread;
extern mysql_cond_t COND_manager;
extern mysql_cond_t COND_slave_background;
-extern int32 thread_count, service_thread_count;
+extern Atomic_counter<uint32_t> thread_count;
extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher,
*opt_ssl_key, *opt_ssl_crl, *opt_ssl_crlpath;
@@ -732,6 +742,8 @@ enum enum_query_type
/// SHOW CREATE {VIEW|PROCEDURE|FUNCTION} and other cases where the
/// original representation is required, should set this flag.
QT_ITEM_ORIGINAL_FUNC_NULLIF= (1 << 7),
+ /// good for parsing
+ QT_PARSABLE= (1 << 8),
/// This value means focus on readability, not on ability to parse back, etc.
QT_EXPLAIN= QT_TO_SYSTEM_CHARSET |
@@ -757,8 +769,6 @@ enum enum_query_type
/* query_id */
extern query_id_t global_query_id;
-ATTRIBUTE_NORETURN void unireg_end(void);
-
/* increment query_id and return it. */
inline __attribute__((warn_unused_result)) query_id_t next_query_id()
{
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index b19504cff22..ed9cd541f70 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -119,6 +119,7 @@
#include "sql_select.h"
#include "sql_statistics.h"
#include "uniques.h"
+#include "my_json_writer.h"
#ifndef EXTRA_DEBUG
#define test_rb_tree(A,B) {}
@@ -429,6 +430,18 @@ static int and_range_trees(RANGE_OPT_PARAM *param,
SEL_TREE *result);
static bool remove_nonrange_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree);
+static void print_key_value(String *out, const KEY_PART_INFO *key_part,
+ const uchar *key);
+
+static void append_range_all_keyparts(Json_writer_array *range_trace,
+ String *range_string,
+ String *range_so_far, const SEL_ARG *keypart,
+ const KEY_PART_INFO *key_parts);
+
+static
+void append_range(String *out, const KEY_PART_INFO *key_parts,
+ const uchar *min_key, const uchar *max_key, const uint flag);
+
/*
SEL_IMERGE is a list of possible ways to do index merge, i.e. it is
@@ -662,7 +675,7 @@ int SEL_IMERGE::or_sel_tree_with_checks(RANGE_OPT_PARAM *param,
{
bool was_ored= FALSE;
*is_last_check_pass= is_first_check_pass;
- SEL_TREE** or_tree = trees;
+ SEL_TREE** or_tree= trees;
for (uint i= 0; i < n_trees; i++, or_tree++)
{
SEL_TREE *result= 0;
@@ -859,7 +872,7 @@ SEL_IMERGE::SEL_IMERGE(SEL_IMERGE *arg, uint cnt,
trees_next= trees + (cnt ? cnt : arg->trees_next-arg->trees);
trees_end= trees + elements;
- for (SEL_TREE **tree = trees, **arg_tree= arg->trees; tree < trees_next;
+ for (SEL_TREE **tree= trees, **arg_tree= arg->trees; tree < trees_next;
tree++, arg_tree++)
{
if (!(*tree= new SEL_TREE(*arg_tree, TRUE, param)))
@@ -1477,7 +1490,6 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler,
{
handler *save_file= file, *org_file;
THD *thd= head->in_use;
- MY_BITMAP * const save_vcol_set= head->vcol_set;
MY_BITMAP * const save_read_set= head->read_set;
MY_BITMAP * const save_write_set= head->write_set;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
@@ -1535,14 +1547,14 @@ end:
org_file= head->file;
head->file= file;
- head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap, &column_bitmap);
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
head->prepare_for_keyread(index, &column_bitmap);
head->prepare_for_position();
head->file= org_file;
/* Restore head->read_set (and write_set) to what they had before the call */
- head->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ head->column_bitmaps_set(save_read_set, save_write_set);
if (reset())
{
@@ -1557,7 +1569,7 @@ end:
DBUG_RETURN(0);
failure:
- head->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
+ head->column_bitmaps_set(save_read_set, save_write_set);
delete file;
file= save_file;
free_file= false;
@@ -1892,6 +1904,118 @@ SEL_ARG::SEL_ARG(Field *field_,uint8 part_,
left=right= &null_element;
}
+
+/*
+ A number of helper classes:
+ SEL_ARG_LE, SEL_ARG_LT, SEL_ARG_GT, SEL_ARG_GE,
+ to share the code between:
+ Field::stored_field_make_mm_leaf()
+ Field::stored_field_make_mm_leaf_exact()
+*/
+class SEL_ARG_LE: public SEL_ARG
+{
+public:
+ SEL_ARG_LE(const uchar *key, Field *field)
+ :SEL_ARG(field, key, key)
+ {
+ if (!field->real_maybe_null())
+ min_flag= NO_MIN_RANGE; // From start
+ else
+ {
+ min_value= is_null_string;
+ min_flag= NEAR_MIN; // > NULL
+ }
+ }
+};
+
+
+class SEL_ARG_LT: public SEL_ARG_LE
+{
+public:
+ /*
+ Use this constructor if value->save_in_field() went precisely,
+ without any data rounding or truncation.
+ */
+ SEL_ARG_LT(const uchar *key, Field *field)
+ :SEL_ARG_LE(key, field)
+ { max_flag= NEAR_MAX; }
+ /*
+ Use this constructor if value->save_in_field() returned success,
+ but we don't know if rounding or truncation happened
+ (as some Field::store() do not report minor data changes).
+ */
+ SEL_ARG_LT(THD *thd, const uchar *key, Field *field, Item *value)
+ :SEL_ARG_LE(key, field)
+ {
+ if (stored_field_cmp_to_item(thd, field, value) == 0)
+ max_flag= NEAR_MAX;
+ }
+};
+
+
+class SEL_ARG_GT: public SEL_ARG
+{
+public:
+ /*
+ Use this constructor if value->save_in_field() went precisely,
+ without any data rounding or truncation.
+ */
+ SEL_ARG_GT(const uchar *key, const KEY_PART *key_part, Field *field)
+ :SEL_ARG(field, key, key)
+ {
+ // Don't use open ranges for partial key_segments
+ if (!(key_part->flag & HA_PART_KEY_SEG))
+ min_flag= NEAR_MIN;
+ max_flag= NO_MAX_RANGE;
+ }
+ /*
+ Use this constructor if value->save_in_field() returned success,
+ but we don't know if rounding or truncation happened
+ (as some Field::store() do not report minor data changes).
+ */
+ SEL_ARG_GT(THD *thd, const uchar *key,
+ const KEY_PART *key_part, Field *field, Item *value)
+ :SEL_ARG(field, key, key)
+ {
+ // Don't use open ranges for partial key_segments
+ if ((!(key_part->flag & HA_PART_KEY_SEG)) &&
+ (stored_field_cmp_to_item(thd, field, value) <= 0))
+ min_flag= NEAR_MIN;
+ max_flag= NO_MAX_RANGE;
+ }
+};
+
+
+class SEL_ARG_GE: public SEL_ARG
+{
+public:
+ /*
+ Use this constructor if value->save_in_field() went precisely,
+ without any data rounding or truncation.
+ */
+ SEL_ARG_GE(const uchar *key, Field *field)
+ :SEL_ARG(field, key, key)
+ {
+ max_flag= NO_MAX_RANGE;
+ }
+ /*
+ Use this constructor if value->save_in_field() returned success,
+ but we don't know if rounding or truncation happened
+ (as some Field::store() do not report minor data changes).
+ */
+ SEL_ARG_GE(THD *thd, const uchar *key,
+ const KEY_PART *key_part, Field *field, Item *value)
+ :SEL_ARG(field, key, key)
+ {
+ // Don't use open ranges for partial key_segments
+ if ((!(key_part->flag & HA_PART_KEY_SEG)) &&
+ (stored_field_cmp_to_item(thd, field, value) < 0))
+ min_flag= NEAR_MIN;
+ max_flag= NO_MAX_RANGE;
+ }
+};
+
+
SEL_ARG *SEL_ARG::clone(RANGE_OPT_PARAM *param, SEL_ARG *new_parent,
SEL_ARG **next_arg)
{
@@ -2078,6 +2202,14 @@ public:
static void operator delete(void *ptr,size_t size) { TRASH_FREE(ptr, size); }
static void operator delete(void *ptr, MEM_ROOT *mem_root) { /* Never called */ }
virtual ~TABLE_READ_PLAN() {} /* Remove gcc warning */
+ /**
+ Add basic info for this TABLE_READ_PLAN to the optimizer trace.
+
+ @param param Parameters for range analysis of this table
+ @param trace_object The optimizer trace object the info is appended to
+ */
+ virtual void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const= 0;
};
@@ -2119,8 +2251,34 @@ public:
}
DBUG_RETURN(quick);
}
+ void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const;
};
+void TRP_RANGE::trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const
+{
+ DBUG_ASSERT(param->using_real_indexes);
+ const uint keynr_in_table= param->real_keynr[key_idx];
+
+ const KEY &cur_key= param->table->key_info[keynr_in_table];
+ const KEY_PART_INFO *key_part= cur_key.key_part;
+
+ trace_object->add("type", "range_scan")
+ .add("index", cur_key.name)
+ .add("rows", records);
+
+ Json_writer_array trace_range(param->thd, "ranges");
+
+ // TRP_RANGE should not be created if there are no range intervals
+ DBUG_ASSERT(key);
+
+ String range_info;
+ range_info.length(0);
+ range_info.set_charset(system_charset_info);
+ append_range_all_keyparts(&trace_range, NULL, &range_info, key, key_part);
+}
+
/* Plan for QUICK_ROR_INTERSECT_SELECT scan. */
@@ -2138,9 +2296,12 @@ public:
struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */
bool is_covering; /* TRUE if no row retrieval phase is necessary */
double index_scan_costs; /* SUM(cost(index_scan)) */
+ void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const;
};
+
/*
Plan for QUICK_ROR_UNION_SELECT scan.
QUICK_ROR_UNION_SELECT always retrieves full rows, so retrieve_full_rows
@@ -2156,8 +2317,22 @@ public:
MEM_ROOT *parent_alloc);
TABLE_READ_PLAN **first_ror; /* array of ptrs to plans for merged scans */
TABLE_READ_PLAN **last_ror; /* end of the above array */
+ void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const;
};
+void TRP_ROR_UNION::trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const
+{
+ THD *thd= param->thd;
+ trace_object->add("type", "index_roworder_union");
+ Json_writer_array smth_trace(thd, "union_of");
+ for (TABLE_READ_PLAN **current= first_ror; current != last_ror; current++)
+ {
+ Json_writer_object trp_info(thd);
+ (*current)->trace_basic_info(param, &trp_info);
+ }
+}
/*
Plan for QUICK_INDEX_INTERSECT_SELECT scan.
@@ -2175,9 +2350,25 @@ public:
TRP_RANGE **range_scans; /* array of ptrs to plans of intersected scans */
TRP_RANGE **range_scans_end; /* end of the array */
/* keys whose scans are to be filtered by cpk conditions */
- key_map filtered_scans;
+ key_map filtered_scans;
+ void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const;
+
};
+void TRP_INDEX_INTERSECT::trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const
+{
+ THD *thd= param->thd;
+ trace_object->add("type", "index_sort_intersect");
+ Json_writer_array smth_trace(thd, "index_sort_intersect_of");
+ for (TRP_RANGE **current= range_scans; current != range_scans_end;
+ current++)
+ {
+ Json_writer_object trp_info(thd);
+ (*current)->trace_basic_info(param, &trp_info);
+ }
+}
/*
Plan for QUICK_INDEX_MERGE_SELECT scan.
@@ -2194,8 +2385,22 @@ public:
MEM_ROOT *parent_alloc);
TRP_RANGE **range_scans; /* array of ptrs to plans of merged scans */
TRP_RANGE **range_scans_end; /* end of the array */
+ void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const;
};
+void TRP_INDEX_MERGE::trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const
+{
+ THD *thd= param->thd;
+ trace_object->add("type", "index_merge");
+ Json_writer_array smth_trace(thd, "index_merge_of");
+ for (TRP_RANGE **current= range_scans; current != range_scans_end; current++)
+ {
+ Json_writer_object trp_info(thd);
+ (*current)->trace_basic_info(param, &trp_info);
+ }
+}
/*
Plan for a QUICK_GROUP_MIN_MAX_SELECT scan.
@@ -2247,9 +2452,51 @@ public:
QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows,
MEM_ROOT *parent_alloc);
void use_index_scan() { is_index_scan= TRUE; }
+ void trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const;
};
+void TRP_GROUP_MIN_MAX::trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const
+{
+ THD *thd= param->thd;
+ trace_object->add("type", "index_group").add("index", index_info->name);
+
+ if (min_max_arg_part)
+ trace_object->add("min_max_arg", min_max_arg_part->field->field_name);
+ else
+ trace_object->add_null("min_max_arg");
+
+ trace_object->add("min_aggregate", have_min)
+ .add("max_aggregate", have_max)
+ .add("distinct_aggregate", have_agg_distinct)
+ .add("rows", records)
+ .add("cost", read_cost);
+
+ const KEY_PART_INFO *key_part= index_info->key_part;
+ {
+ Json_writer_array trace_keyparts(thd, "key_parts_used_for_access");
+ for (uint partno= 0; partno < used_key_parts; partno++)
+ {
+ const KEY_PART_INFO *cur_key_part= key_part + partno;
+ trace_keyparts.add(cur_key_part->field->field_name);
+ }
+ }
+
+ Json_writer_array trace_range(thd, "ranges");
+
+ // can have group quick without ranges
+ if (index_tree)
+ {
+ String range_info;
+ range_info.set_charset(system_charset_info);
+ append_range_all_keyparts(&trace_range, NULL, &range_info, index_tree,
+ key_part);
+ }
+}
+
+
typedef struct st_index_scan_info
{
uint idx; /* # of used key in param->keys */
@@ -2337,6 +2584,9 @@ static int fill_used_fields_bitmap(PARAM *param)
limit Query limit
force_quick_range Prefer to use range (instead of full table scan) even
if it is more expensive.
+ remove_false_parts_of_where Remove parts of OR-clauses for which range
+ analysis produced SEL_TREE(IMPOSSIBLE)
+ only_single_index_range_scan Evaluate only single index range scans
NOTES
Updates the following in the select parameter:
@@ -2395,7 +2645,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
table_map prev_tables,
ha_rows limit, bool force_quick_range,
bool ordered_output,
- bool remove_false_parts_of_where)
+ bool remove_false_parts_of_where,
+ bool only_single_index_range_scan)
{
uint idx;
double scan_time;
@@ -2408,6 +2659,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
quick=0;
needed_reg.clear_all();
quick_keys.clear_all();
+ head->with_impossible_ranges.clear_all();
DBUG_ASSERT(!head->is_filled_at_execution());
if (keys_to_use.is_clear_all() || head->is_filled_at_execution())
DBUG_RETURN(0);
@@ -2425,6 +2677,17 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("info",("Time to scan table: %g", read_time));
+ Json_writer_object table_records(thd);
+ if (head->reginfo.join_tab)
+ table_records.add_table_name(head->reginfo.join_tab);
+ else
+ table_records.add_table_name(head);
+ Json_writer_object trace_range(thd, "range_analysis");
+ {
+ Json_writer_object table_rec(thd, "table_scan");
+ table_rec.add("rows", records).add("cost", read_time);
+ }
+
keys_to_use.intersect(head->keys_in_use_for_query);
if (!keys_to_use.is_clear_all())
{
@@ -2478,19 +2741,33 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
*/
key_info= head->key_info;
uint max_key_len= 0;
+
+ Json_writer_array trace_idx(thd, "potential_range_indexes");
+
for (idx=0 ; idx < head->s->keys ; idx++, key_info++)
{
+ Json_writer_object trace_idx_details(thd);
+ trace_idx_details.add("index", key_info->name);
KEY_PART_INFO *key_part_info;
uint n_key_parts= head->actual_n_key_parts(key_info);
if (!keys_to_use.is_set(idx))
- continue;
+ {
+ trace_idx_details.add("usable", false)
+ .add("cause", "not applicable");
+ continue;
+ }
if (key_info->flags & HA_FULLTEXT)
- continue; // ToDo: ft-keys in non-ft ranges, if possible SerG
+ {
+ trace_idx_details.add("usable", false).add("cause", "fulltext");
+ continue; // ToDo: ft-keys in non-ft ranges, if possible SerG
+ }
+ trace_idx_details.add("usable", true);
param.key[param.keys]=key_parts;
key_part_info= key_info->key_part;
uint cur_key_len= 0;
+ Json_writer_array trace_keypart(thd, "key_parts");
for (uint part= 0 ; part < n_key_parts ;
part++, key_parts++, key_part_info++)
{
@@ -2505,11 +2782,14 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
(key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW;
/* Only HA_PART_KEY_SEG is used */
key_parts->flag= (uint8) key_part_info->key_part_flag;
+ trace_keypart.add(key_parts->field->field_name);
}
param.real_keynr[param.keys++]=idx;
if (cur_key_len > max_key_len)
max_key_len= cur_key_len;
}
+ trace_idx.end();
+
param.key_parts_end=key_parts;
param.alloced_sel_args= 0;
@@ -2527,26 +2807,42 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
if (!force_quick_range && !head->covering_keys.is_clear_all())
{
int key_for_use= find_shortest_key(head, &head->covering_keys);
- double key_read_time= head->file->keyread_time(key_for_use, 1, records) +
- (double) records / TIME_FOR_COMPARE;
+ double key_read_time= head->file->key_scan_time(key_for_use) +
+ (double) records / TIME_FOR_COMPARE_IDX;
DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, "
"read time %g", key_for_use, key_read_time));
+
+ Json_writer_object trace_cov(thd, "best_covering_index_scan");
+ bool chosen= FALSE;
if (key_read_time < read_time)
+ {
read_time= key_read_time;
+ chosen= TRUE;
+ }
+ trace_cov.add("index", head->key_info[key_for_use].name)
+ .add("cost", key_read_time).add("chosen", chosen);
+ if (!chosen)
+ trace_cov.add("cause", "cost");
}
TABLE_READ_PLAN *best_trp= NULL;
- TRP_GROUP_MIN_MAX *group_trp;
+ TRP_GROUP_MIN_MAX *group_trp= NULL;
double best_read_time= read_time;
if (cond)
{
- if ((tree= cond->get_mm_tree(&param, &cond)))
+ {
+ Json_writer_array trace_range_summary(thd,
+ "setup_range_conditions");
+ tree= cond->get_mm_tree(&param, &cond);
+ }
+ if (tree)
{
if (tree->type == SEL_TREE::IMPOSSIBLE)
{
records=0L; /* Return -1 from this function. */
read_time= (double) HA_POS_ERROR;
+ trace_range.add("impossible_range", true);
goto free_mem;
}
/*
@@ -2554,7 +2850,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
can construct a group-min-max quick select
*/
if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER)
+ {
+ trace_range.add("range_scan_possible", false);
tree= NULL;
+ }
}
}
@@ -2562,16 +2861,25 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Try to construct a QUICK_GROUP_MIN_MAX_SELECT.
Notice that it can be constructed no matter if there is a range tree.
*/
- group_trp= get_best_group_min_max(&param, tree, best_read_time);
+ if (!only_single_index_range_scan)
+ group_trp= get_best_group_min_max(&param, tree, best_read_time);
if (group_trp)
{
param.table->quick_condition_rows= MY_MIN(group_trp->records,
head->stat_records());
+ Json_writer_object grp_summary(thd, "best_group_range_summary");
+
+ if (unlikely(thd->trace_started()))
+ group_trp->trace_basic_info(&param, &grp_summary);
+
if (group_trp->read_cost < best_read_time)
{
+ grp_summary.add("chosen", true);
best_trp= group_trp;
best_read_time= best_trp->read_cost;
}
+ else
+ grp_summary.add("chosen", false).add("cause", "cost");
}
if (tree)
@@ -2584,7 +2892,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
TRP_ROR_INTERSECT *rori_trp;
TRP_INDEX_INTERSECT *intersect_trp;
bool can_build_covering= FALSE;
-
+ Json_writer_object trace_range(thd, "analyzing_range_alternatives");
+
remove_nonrange_trees(&param, tree);
/* Get best 'range' plan and prepare data for making other plans */
@@ -2601,7 +2910,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
table deletes.
*/
if ((thd->lex->sql_command != SQLCOM_DELETE) &&
- optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE))
+ optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE) &&
+ !only_single_index_range_scan)
{
/*
Get best non-covering ROR-intersection plan and prepare data for
@@ -2629,7 +2939,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
*/
if (param.table->covering_keys.is_clear_all() &&
optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE) &&
- optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE_SORT_INTERSECT))
+ optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE_SORT_INTERSECT) &&
+ !only_single_index_range_scan)
{
if ((intersect_trp= get_best_index_intersect(&param, tree,
best_read_time)))
@@ -2642,7 +2953,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
}
if (optimizer_flag(thd, OPTIMIZER_SWITCH_INDEX_MERGE) &&
- head->stat_records() != 0)
+ head->stat_records() != 0 && !only_single_index_range_scan)
{
/* Try creating index_merge/ROR-union scan. */
SEL_IMERGE *imerge;
@@ -2651,6 +2962,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
DBUG_PRINT("info",("No range reads possible,"
" trying to construct index_merge"));
List_iterator_fast<SEL_IMERGE> it(tree->merges);
+ Json_writer_array trace_idx_merge(thd, "analyzing_index_merge_union");
while ((imerge= it++))
{
new_conj_trp= get_best_disjunct_quick(&param, imerge, best_read_time);
@@ -2685,6 +2997,19 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
possible_keys= param.possible_keys;
free_mem:
+ if (unlikely(quick && best_trp && thd->trace_started()))
+ {
+ Json_writer_object trace_range_summary(thd,
+ "chosen_range_access_summary");
+ {
+ Json_writer_object trace_range_plan(thd, "range_access_plan");
+ best_trp->trace_basic_info(&param, &trace_range_plan);
+ }
+ trace_range_summary.add("rows_for_plan", quick->records)
+ .add("cost_for_plan", quick->read_time)
+ .add("chosen", true);
+ }
+
free_root(&alloc,MYF(0)); // Return memory & allocator
thd->mem_root= param.old_root;
thd->no_errors=0;
@@ -2947,6 +3272,9 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
estimate sources.
*/
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_array selectivity_for_indexes(thd, "selectivity_for_indexes");
+
for (keynr= 0; keynr < table->s->keys; keynr++)
{
if (table->quick_keys.is_set(keynr))
@@ -2996,6 +3324,10 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
not yet been accounted for.
*/
table->cond_selectivity*= quick_cond_selectivity;
+ Json_writer_object selectivity_for_index(thd);
+ selectivity_for_index.add("index_name", key_info->name)
+ .add("selectivity_from_index",
+ quick_cond_selectivity);
if (i != used_key_parts)
{
/*
@@ -3015,7 +3347,9 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
*/
selectivity_mult= ((double)(i+1)) / i;
}
- table->cond_selectivity*= selectivity_mult;
+ table->cond_selectivity*= selectivity_mult;
+ selectivity_for_index.add("selectivity_multiplier",
+ selectivity_mult);
}
/*
We need to set selectivity for fields supported by indexes.
@@ -3036,12 +3370,14 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
}
}
}
+ selectivity_for_indexes.end();
/*
Second step: calculate the selectivity of the range conditions not
supported by any index and selectivity of the range condition
over the fields whose selectivity has not been set yet.
*/
+ Json_writer_array selectivity_for_columns(thd, "selectivity_for_columns");
if (thd->variables.optimizer_use_condition_selectivity > 2 &&
!bitmap_is_clear_all(used_fields) &&
@@ -3101,17 +3437,25 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
SEL_ARG *key= tree->keys[idx];
if (key)
{
+ Json_writer_object selectivity_for_column(thd);
+ selectivity_for_column.add("column_name", key->field->field_name);
if (key->type == SEL_ARG::IMPOSSIBLE)
- {
+ {
rows= 0;
table->reginfo.impossible_range= 1;
+ selectivity_for_column.add("selectivity_from_histogram", rows);
+ selectivity_for_column.add("cause", "impossible range");
goto free_alloc;
}
else
{
rows= records_in_column_ranges(&param, idx, key);
if (rows != DBL_MAX)
+ {
key->field->cond_selectivity= rows/table_records;
+ selectivity_for_column.add("selectivity_from_histogram",
+ key->field->cond_selectivity);
+ }
}
}
}
@@ -3133,6 +3477,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
free_root(&alloc, MYF(0));
}
+ selectivity_for_columns.end();
if (quick && (quick->get_type() == QUICK_SELECT_I::QS_TYPE_ROR_UNION ||
quick->get_type() == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE))
@@ -3207,7 +3552,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
table->cond_selectivity_sampling_explain= &dt->list;
}
}
-
+ trace_wrapper.add("cond_selectivity", table->cond_selectivity);
DBUG_RETURN(FALSE);
}
@@ -4569,7 +4914,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records)
if (max_cost != DBL_MAX && (busy_blocks+index_reads_cost) >= n_blocks)
return 1;
*/
- JOIN *join= param->thd->lex->select_lex.join;
+ JOIN *join= param->thd->lex->first_select_lex()->join;
if (!join || join->table_count == 1)
{
/* No join, assume reading is done in one 'sweep' */
@@ -4677,7 +5022,9 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
double roru_index_costs;
ha_rows roru_total_records;
double roru_intersect_part= 1.0;
+ double limit_read_time= read_time;
size_t n_child_scans;
+ THD *thd= param->thd;
DBUG_ENTER("get_best_disjunct_quick");
DBUG_PRINT("info", ("Full table scan cost: %g", read_time));
@@ -4703,6 +5050,8 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
sizeof(TRP_RANGE*)*
n_child_scans)))
DBUG_RETURN(NULL);
+ Json_writer_object trace_best_disjunct(thd);
+ Json_writer_array to_merge(thd, "indexes_to_merge");
/*
Collect best 'range' scan for each of disjuncts, and, while doing so,
analyze possibility of ROR scans. Also calculate some values needed by
@@ -4714,6 +5063,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
{
DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map,
"tree in SEL_IMERGE"););
+ Json_writer_object trace_idx(thd);
if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, FALSE, read_time)))
{
/*
@@ -4725,8 +5075,11 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
imerge_too_expensive= TRUE;
}
if (imerge_too_expensive)
+ {
+ trace_idx.add("chosen", false).add("cause", "cost");
continue;
-
+ }
+ const uint keynr_in_table= param->real_keynr[(*cur_child)->key_idx];
imerge_cost += (*cur_child)->read_cost;
all_scans_ror_able &= ((*ptree)->n_ror_scans > 0);
all_scans_rors &= (*cur_child)->is_ror;
@@ -4739,9 +5092,16 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
}
else
non_cpk_scan_records += (*cur_child)->records;
+ trace_idx.add("index_to_merge",
+ param->table->key_info[keynr_in_table].name)
+ .add("cumulated_cost", imerge_cost);
}
+ to_merge.end();
+
DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost));
+ trace_best_disjunct.add("cost_of_reading_ranges", imerge_cost);
+
if (imerge_too_expensive || (imerge_cost > read_time) ||
((non_cpk_scan_records+cpk_scan_records >=
param->table->stat_records()) &&
@@ -4753,6 +5113,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
*/
DBUG_PRINT("info", ("Sum of index_merge scans is more expensive than "
"full table scan, bailing out"));
+ trace_best_disjunct.add("chosen", false).add("cause", "cost");
DBUG_RETURN(NULL);
}
@@ -4765,6 +5126,9 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
optimizer_flag(param->thd, OPTIMIZER_SWITCH_INDEX_MERGE_UNION))
{
roru_read_plans= (TABLE_READ_PLAN**)range_scans;
+ trace_best_disjunct.add("use_roworder_union", true)
+ .add("cause",
+ "always cheaper than non roworder retrieval");
goto skip_to_ror_scan;
}
@@ -4774,16 +5138,26 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
Add one ROWID comparison for each row retrieved on non-CPK scan. (it
is done in QUICK_RANGE_SELECT::row_in_ranges)
*/
- imerge_cost += non_cpk_scan_records / TIME_FOR_COMPARE_ROWID;
+ double rid_comp_cost= static_cast<double>(non_cpk_scan_records) /
+ TIME_FOR_COMPARE_ROWID;
+ imerge_cost+= rid_comp_cost;
+ trace_best_disjunct.add("cost_of_mapping_rowid_in_non_clustered_pk_scan",
+ rid_comp_cost);
}
/* Calculate cost(rowid_to_row_scan) */
- imerge_cost += get_sweep_read_cost(param, non_cpk_scan_records);
+ {
+ double sweep_cost= get_sweep_read_cost(param, non_cpk_scan_records);
+ imerge_cost+= sweep_cost;
+ trace_best_disjunct.add("cost_sort_rowid_and_read_disk", sweep_cost);
+ }
DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g",
imerge_cost));
if (imerge_cost > read_time ||
!optimizer_flag(param->thd, OPTIMIZER_SWITCH_INDEX_MERGE_SORT_UNION))
{
+ trace_best_disjunct.add("use_roworder_index_merge", true);
+ trace_best_disjunct.add("cause", "cost");
goto build_ror_index_merge;
}
@@ -4800,12 +5174,18 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
param->imerge_cost_buff_size= unique_calc_buff_size;
}
- imerge_cost +=
- Unique::get_use_cost(param->imerge_cost_buff, (uint)non_cpk_scan_records,
- param->table->file->ref_length,
- (size_t)param->thd->variables.sortbuff_size,
- TIME_FOR_COMPARE_ROWID,
- FALSE, NULL);
+ {
+ const double dup_removal_cost= Unique::get_use_cost(
+ param->imerge_cost_buff, (uint)non_cpk_scan_records,
+ param->table->file->ref_length,
+ (size_t)param->thd->variables.sortbuff_size,
+ TIME_FOR_COMPARE_ROWID,
+ FALSE, NULL);
+ imerge_cost+= dup_removal_cost;
+ trace_best_disjunct.add("cost_duplicate_removal", dup_removal_cost)
+ .add("total_cost", imerge_cost);
+ }
+
DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)",
imerge_cost, read_time));
if (imerge_cost < read_time)
@@ -4823,7 +5203,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
if (imerge_trp)
{
TABLE_READ_PLAN *trp= merge_same_index_scans(param, imerge, imerge_trp,
- read_time);
+ limit_read_time);
if (trp != imerge_trp)
DBUG_RETURN(trp);
}
@@ -4848,11 +5228,16 @@ skip_to_ror_scan:
roru_total_records= 0;
cur_roru_plan= roru_read_plans;
+ Json_writer_array trace_analyze_ror(thd, "analyzing_roworder_scans");
+
/* Find 'best' ROR scan for each of trees in disjunction */
for (ptree= imerge->trees, cur_child= range_scans;
ptree != imerge->trees_next;
ptree++, cur_child++, cur_roru_plan++)
{
+ Json_writer_object trp_info(thd);
+ if (unlikely(thd->trace_started()))
+ (*cur_child)->trace_basic_info(param, &trp_info);
/*
Assume the best ROR scan is the one that has cheapest full-row-retrieval
scan cost.
@@ -4888,7 +5273,7 @@ skip_to_ror_scan:
roru_intersect_part *= (*cur_roru_plan)->records /
param->table->stat_records();
}
-
+ trace_analyze_ror.end();
/*
rows to retrieve=
SUM(rows_in_scan_i) - table_rows * PROD(rows_in_scan_i / table_rows).
@@ -4914,11 +5299,14 @@ skip_to_ror_scan:
DBUG_PRINT("info", ("ROR-union: cost %g, %zu members",
roru_total_cost, n_child_scans));
+ trace_best_disjunct.add("index_roworder_union_cost", roru_total_cost)
+ .add("members", n_child_scans);
TRP_ROR_UNION* roru;
if (roru_total_cost < read_time)
{
if ((roru= new (param->mem_root) TRP_ROR_UNION))
{
+ trace_best_disjunct.add("chosen", true);
roru->first_ror= roru_read_plans;
roru->last_ror= roru_read_plans + n_child_scans;
roru->read_cost= roru_total_cost;
@@ -4926,7 +5314,9 @@ skip_to_ror_scan:
DBUG_RETURN(roru);
}
}
- DBUG_RETURN(imerge_trp);
+ else
+ trace_best_disjunct.add("chosen", false);
+ DBUG_RETURN(imerge_trp);
}
@@ -5197,6 +5587,15 @@ ha_rows get_table_cardinality_for_index_intersect(TABLE *table)
}
}
+static
+void print_keyparts(THD *thd, KEY *key, uint key_parts)
+{
+ KEY_PART_INFO *part= key->key_part;
+ Json_writer_array keyparts= Json_writer_array(thd, "keyparts");
+ for(uint i= 0; i < key_parts; i++, part++)
+ keyparts.add(part->field->field_name);
+}
+
static
ha_rows records_in_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
@@ -5249,8 +5648,9 @@ bool prepare_search_best_index_intersect(PARAM *param,
INDEX_SCAN_INFO *cpk_scan= NULL;
TABLE *table= param->table;
uint n_index_scans= (uint)(tree->index_scans_end - tree->index_scans);
+ THD *thd= param->thd;
- if (!n_index_scans)
+ if (n_index_scans <= 1)
return 1;
init->init();
@@ -5266,9 +5666,6 @@ bool prepare_search_best_index_intersect(PARAM *param,
common->table_cardinality=
get_table_cardinality_for_index_intersect(table);
- if (n_index_scans <= 1)
- return TRUE;
-
if (table->file->primary_key_is_clustered())
{
INDEX_SCAN_INFO **index_scan_end;
@@ -5293,23 +5690,38 @@ bool prepare_search_best_index_intersect(PARAM *param,
bzero(common->search_scans, sizeof(INDEX_SCAN_INFO *) * i);
INDEX_SCAN_INFO **selected_index_scans= common->search_scans;
-
+ Json_writer_array potential_idx_scans(thd, "potential_index_scans");
for (i=0, index_scan= tree->index_scans; i < n_index_scans; i++, index_scan++)
{
+ Json_writer_object idx_scan(thd);
uint used_key_parts= (*index_scan)->used_key_parts;
KEY *key_info= (*index_scan)->key_info;
+ idx_scan.add("index", key_info->name);
if (*index_scan == cpk_scan)
+ {
+ idx_scan.add("chosen", "false")
+ .add("cause", "clustered index used for filtering");
continue;
+ }
if (cpk_scan && cpk_scan->used_key_parts >= used_key_parts &&
same_index_prefix(cpk_scan->key_info, key_info, used_key_parts))
+ {
+ idx_scan.add("chosen", "false")
+ .add("cause", "clustered index used for filtering");
continue;
+ }
+
+ cost= table->quick_index_only_costs[(*index_scan)->keynr];
+
+ idx_scan.add("cost", cost);
- cost= table->file->keyread_time((*index_scan)->keynr,
- (*index_scan)->range_count,
- (*index_scan)->records);
if (cost >= cutoff_cost)
+ {
+ idx_scan.add("chosen", false);
+ idx_scan.add("cause", "cost");
continue;
+ }
for (scan_ptr= selected_index_scans; *scan_ptr ; scan_ptr++)
{
@@ -5326,10 +5738,20 @@ bool prepare_search_best_index_intersect(PARAM *param,
}
if (!*scan_ptr || cost < (*scan_ptr)->index_read_cost)
{
+ idx_scan.add("chosen", true);
+ if (!*scan_ptr)
+ idx_scan.add("cause", "first occurence of index prefix");
+ else
+ idx_scan.add("cause", "better cost for same idx prefix");
*scan_ptr= *index_scan;
(*scan_ptr)->index_read_cost= cost;
}
- }
+ else
+ {
+ idx_scan.add("chosen", false).add("cause", "cost");
+ }
+ }
+ potential_idx_scans.end();
ha_rows records_in_scans= 0;
@@ -5339,6 +5761,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
return TRUE;
records_in_scans+= (*scan_ptr)->records;
}
+
n_search_scans= i;
if (cpk_scan && create_fields_bitmap(param, &cpk_scan->used_fields))
@@ -5368,6 +5791,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
my_qsort(selected_index_scans, n_search_scans, sizeof(INDEX_SCAN_INFO *),
(qsort_cmp) cmp_intersect_index_scan);
+ Json_writer_array selected_idx_scans(thd, "selected_index_scans");
if (cpk_scan)
{
PARTIAL_INDEX_INTERSECT_INFO curr;
@@ -5380,16 +5804,36 @@ bool prepare_search_best_index_intersect(PARAM *param,
curr.length= 1;
for (scan_ptr=selected_index_scans; *scan_ptr; scan_ptr++)
{
+ KEY *key_info= (*scan_ptr)->key_info;
ha_rows scan_records= (*scan_ptr)->records;
ha_rows records= records_in_index_intersect_extension(&curr, *scan_ptr);
(*scan_ptr)->filtered_out= records >= scan_records ?
- 0 : scan_records-records;
+ 0 : scan_records-records;
+ if (thd->trace_started())
+ {
+ Json_writer_object selected_idx(thd);
+ selected_idx.add("index", key_info->name);
+ print_keyparts(thd, key_info, (*scan_ptr)->used_key_parts);
+ selected_idx.add("records", (*scan_ptr)->records)
+ .add("filtered_records", (*scan_ptr)->filtered_out);
+ }
}
}
else
{
for (scan_ptr=selected_index_scans; *scan_ptr; scan_ptr++)
+ {
+ KEY *key_info= (*scan_ptr)->key_info;
(*scan_ptr)->filtered_out= 0;
+ if (thd->trace_started())
+ {
+ Json_writer_object selected_idx(thd);
+ selected_idx.add("index", key_info->name);
+ print_keyparts(thd, key_info, (*scan_ptr)->used_key_parts);
+ selected_idx.add("records", (*scan_ptr)->records)
+ .add("filtered_records", (*scan_ptr)->filtered_out);
+ }
+ }
}
return FALSE;
@@ -5846,10 +6290,12 @@ TRP_INDEX_INTERSECT *get_best_index_intersect(PARAM *param, SEL_TREE *tree,
PARTIAL_INDEX_INTERSECT_INFO init;
TRP_INDEX_INTERSECT *intersect_trp= NULL;
TABLE *table= param->table;
-
+ THD *thd= param->thd;
DBUG_ENTER("get_best_index_intersect");
+ Json_writer_object trace_idx_interect(thd, "analyzing_sort_intersect");
+
if (prepare_search_best_index_intersect(param, tree, &common, &init,
read_time))
DBUG_RETURN(NULL);
@@ -5911,11 +6357,15 @@ TRP_INDEX_INTERSECT *get_best_index_intersect(PARAM *param, SEL_TREE *tree,
if ((intersect_trp= new (param->mem_root)TRP_INDEX_INTERSECT))
{
+
intersect_trp->read_cost= common.best_cost;
intersect_trp->records= common.best_records;
intersect_trp->range_scans= range_scans;
intersect_trp->range_scans_end= cur_range;
intersect_trp->filtered_scans= common.filtered_scans;
+ trace_idx_interect.add("rows", intersect_trp->records)
+ .add("cost", intersect_trp->read_cost)
+ .add("chosen",true);
}
DBUG_RETURN(intersect_trp);
}
@@ -5925,6 +6375,46 @@ typedef struct st_ror_scan_info : INDEX_SCAN_INFO
{
} ROR_SCAN_INFO;
+void TRP_ROR_INTERSECT::trace_basic_info(const PARAM *param,
+ Json_writer_object *trace_object) const
+{
+ THD *thd= param->thd;
+ trace_object->add("type", "index_roworder_intersect");
+ trace_object->add("rows", records);
+ trace_object->add("cost", read_cost);
+ trace_object->add("covering", is_covering);
+ trace_object->add("clustered_pk_scan", cpk_scan != NULL);
+
+ Json_writer_array smth_trace(thd, "intersect_of");
+ for (ROR_SCAN_INFO **cur_scan= first_scan; cur_scan != last_scan;
+ cur_scan++)
+ {
+ const KEY &cur_key= param->table->key_info[(*cur_scan)->keynr];
+ const KEY_PART_INFO *key_part= cur_key.key_part;
+
+ Json_writer_object trace_isect_idx(thd);
+ trace_isect_idx.add("type", "range_scan");
+ trace_isect_idx.add("index", cur_key.name);
+ trace_isect_idx.add("rows", (*cur_scan)->records);
+
+ Json_writer_array trace_range(thd, "ranges");
+ for (const SEL_ARG *current= (*cur_scan)->sel_arg->first(); current;
+ current= current->next)
+ {
+ String range_info;
+ range_info.set_charset(system_charset_info);
+ for (const SEL_ARG *part= current; part;
+ part= part->next_key_part ? part->next_key_part : nullptr)
+ {
+ const KEY_PART_INFO *cur_key_part= key_part + part->part;
+ append_range(&range_info, cur_key_part, part->min_value,
+ part->max_value, part->min_flag | part->max_flag);
+ }
+ trace_range.add(range_info.ptr(), range_info.length());
+ }
+ }
+}
+
/*
Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using
@@ -6310,7 +6800,9 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
*/
static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
- ROR_SCAN_INFO* ror_scan, bool is_cpk_scan)
+ ROR_SCAN_INFO* ror_scan,
+ Json_writer_object *trace_costs,
+ bool is_cpk_scan)
{
double selectivity_mult= 1.0;
@@ -6337,13 +6829,16 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
each record of every scan. Assuming 1/TIME_FOR_COMPARE_ROWID
per check this gives us:
*/
- info->index_scan_costs += rows2double(info->index_records) /
+ const double idx_cost= rows2double(info->index_records) /
TIME_FOR_COMPARE_ROWID;
+ info->index_scan_costs+= idx_cost;
+ trace_costs->add("index_scan_cost", idx_cost);
}
else
{
info->index_records += info->param->quick_rows[ror_scan->keynr];
info->index_scan_costs += ror_scan->index_read_cost;
+ trace_costs->add("index_scan_cost", ror_scan->index_read_cost);
bitmap_union(&info->covered_fields, &ror_scan->covered_fields);
if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields,
&info->covered_fields))
@@ -6354,13 +6849,19 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info,
}
info->total_cost= info->index_scan_costs;
+ trace_costs->add("cumulated_index_scan_cost", info->index_scan_costs);
DBUG_PRINT("info", ("info->total_cost: %g", info->total_cost));
if (!info->is_covering)
{
- info->total_cost +=
- get_sweep_read_cost(info->param, double2rows(info->out_rows));
+ double sweep_cost= get_sweep_read_cost(info->param,
+ double2rows(info->out_rows));
+ info->total_cost+= sweep_cost;
+ trace_costs->add("disk_sweep_cost", sweep_cost);
DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost));
}
+ else
+ trace_costs->add("disk_sweep_cost", static_cast<longlong>(0));
+
DBUG_PRINT("info", ("New out_rows: %g", info->out_rows));
DBUG_PRINT("info", ("New cost: %g, %scovering", info->total_cost,
info->is_covering?"" : "non-"));
@@ -6440,10 +6941,16 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
uint idx;
double min_cost= DBL_MAX;
DBUG_ENTER("get_best_ror_intersect");
+ THD *thd= param->thd;
+ Json_writer_object trace_ror(thd, "analyzing_roworder_intersect");
if ((tree->n_ror_scans < 2) || !param->table->stat_records() ||
!optimizer_flag(param->thd, OPTIMIZER_SWITCH_INDEX_MERGE_INTERSECT))
- DBUG_RETURN(NULL);
+ {
+ if (tree->n_ror_scans < 2)
+ trace_ror.add("cause", "too few roworder scans");
+ DBUG_RETURN(NULL);
+ }
/*
Step1: Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of
@@ -6518,15 +7025,27 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
ROR_SCAN_INFO **intersect_scans_best;
cur_ror_scan= tree->ror_scans;
intersect_scans_best= intersect_scans;
+ Json_writer_array trace_isect_idx(thd, "intersecting_indexes");
while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering)
{
+ Json_writer_object trace_idx(thd);
+ trace_idx.add("index",
+ param->table->key_info[(*cur_ror_scan)->keynr].name);
+
/* S= S + first(R); R= R - first(R); */
- if (!ror_intersect_add(intersect, *cur_ror_scan, FALSE))
+ if (!ror_intersect_add(intersect, *cur_ror_scan, &trace_idx, FALSE))
{
+ trace_idx.add("usable", false)
+ .add("cause", "does not reduce cost of intersect");
cur_ror_scan++;
continue;
}
+ trace_idx.add("cumulative_total_cost", intersect->total_cost)
+ .add("usable", true)
+ .add("matching_rows_now", intersect->out_rows)
+ .add("intersect_covering_with_this_index", intersect->is_covering);
+
*(intersect_scans_end++)= *(cur_ror_scan++);
if (intersect->total_cost < min_cost)
@@ -6535,12 +7054,21 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
ror_intersect_cpy(intersect_best, intersect);
intersect_scans_best= intersect_scans_end;
min_cost = intersect->total_cost;
+ trace_idx.add("chosen", true);
+ }
+ else
+ {
+ trace_idx.add("chosen", false)
+ .add("cause", "does not reduce cost");
}
}
+ trace_isect_idx.end();
if (intersect_scans_best == intersect_scans)
{
DBUG_PRINT("info", ("None of scans increase selectivity"));
+ trace_ror.add("chosen", false)
+ .add("cause","does not increase selectivity");
DBUG_RETURN(NULL);
}
@@ -6558,16 +7086,31 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
Check if we should add a CPK scan. If the obtained ROR-intersection is
covering, it doesn't make sense to add CPK scan.
*/
+ Json_writer_object trace_cpk(thd, "clustered_pk");
if (cpk_scan && !intersect->is_covering)
{
- if (ror_intersect_add(intersect, cpk_scan, TRUE) &&
+ if (ror_intersect_add(intersect, cpk_scan, &trace_cpk, TRUE) &&
(intersect->total_cost < min_cost))
+ {
+ trace_cpk.add("clustered_pk_scan_added_to_intersect", true)
+ .add("cumulated_cost", intersect->total_cost);
intersect_best= intersect; //just set pointer here
+ }
else
+ {
+ trace_cpk.add("clustered_pk_added_to_intersect", false)
+ .add("cause", "cost");
cpk_scan= 0; // Don't use cpk_scan
+ }
}
else
+ {
+ trace_cpk.add("clustered_pk_added_to_intersect", false)
+ .add("cause", cpk_scan ? "roworder is covering"
+ : "no clustered pk index");
cpk_scan= 0; // Don't use cpk_scan
+ }
+ trace_cpk.end();
/* Ok, return ROR-intersect plan if we have found one */
TRP_ROR_INTERSECT *trp= NULL;
@@ -6594,6 +7137,17 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
DBUG_PRINT("info", ("Returning non-covering ROR-intersect plan:"
"cost %g, records %lu",
trp->read_cost, (ulong) trp->records));
+ trace_ror.add("rows", trp->records)
+ .add("cost", trp->read_cost)
+ .add("covering", trp->is_covering)
+ .add("chosen", true);
+ }
+ else
+ {
+ trace_ror.add("chosen", false)
+ .add("cause", (read_time > min_cost)
+ ? "too few indexes to merge"
+ : "cost");
}
DBUG_RETURN(trp);
}
@@ -6781,6 +7335,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
UNINIT_VAR(best_buf_size); /* protected by key_to_read */
TRP_RANGE* read_plan= NULL;
DBUG_ENTER("get_key_scans_params");
+ THD *thd= param->thd;
/*
Note that there may be trees that have type SEL_TREE::KEY but contain no
key reads at all, e.g. tree for expression "key1 is not null" where key1
@@ -6788,6 +7343,8 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
*/
DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->keys_map,
"tree scans"););
+ Json_writer_array range_scan_alt(thd, "range_scan_alternatives");
+
tree->ror_scans_map.clear_all();
tree->n_ror_scans= 0;
tree->index_scans= 0;
@@ -6816,6 +7373,9 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
bool read_index_only= index_read_must_be_used ? TRUE :
(bool) param->table->covering_keys.is_set(keynr);
+ Json_writer_object trace_idx(thd);
+ trace_idx.add("index", param->table->key_info[keynr].name);
+
found_records= check_quick_select(param, idx, read_index_only, key,
update_tbl_stats, &mrr_flags,
&buf_size, &cost);
@@ -6824,6 +7384,14 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
(index_scan= (INDEX_SCAN_INFO *)alloc_root(param->mem_root,
sizeof(INDEX_SCAN_INFO))))
{
+ Json_writer_array trace_range(thd, "ranges");
+
+ const KEY &cur_key= param->table->key_info[keynr];
+ const KEY_PART_INFO *key_part= cur_key.key_part;
+
+ String range_info;
+ range_info.set_charset(system_charset_info);
+
index_scan->idx= idx;
index_scan->keynr= keynr;
index_scan->key_info= &param->table->key_info[keynr];
@@ -6832,6 +7400,17 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
index_scan->records= found_records;
index_scan->sel_arg= key;
*tree->index_scans_end++= index_scan;
+
+ if (unlikely(thd->trace_started()))
+ append_range_all_keyparts(&trace_range, NULL, &range_info, key,
+ key_part);
+ trace_range.end();
+
+ trace_idx.add("rowid_ordered", param->is_ror_scan)
+ .add("using_mrr", !(mrr_flags & HA_MRR_USE_DEFAULT_IMPL))
+ .add("index_only", read_index_only)
+ .add("rows", found_records)
+ .add("cost", cost.total_cost());
}
if ((found_records != HA_POS_ERROR) && param->is_ror_scan)
{
@@ -6847,6 +7426,18 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree,
best_idx= idx;
best_mrr_flags= mrr_flags;
best_buf_size= buf_size;
+ trace_idx.add("chosen", true);
+ }
+ else
+ {
+ trace_idx.add("chosen", false);
+ if (found_records == HA_POS_ERROR)
+ if (key->type == SEL_ARG::Type::MAYBE_KEY)
+ trace_idx.add("cause", "depends on unread values");
+ else
+ trace_idx.add("cause", "unknown");
+ else
+ trace_idx.add("cause", "cost");
}
}
}
@@ -6951,10 +7542,11 @@ QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param,
"creating ROR-intersect",
first_scan, last_scan););
alloc= parent_alloc? parent_alloc: &quick_intrsect->alloc;
- for (; first_scan != last_scan;++first_scan)
+ for (ROR_SCAN_INFO **curr_scan= first_scan; curr_scan != last_scan;
+ ++curr_scan)
{
- if (!(quick= get_quick_select(param, (*first_scan)->idx,
- (*first_scan)->sel_arg,
+ if (!(quick= get_quick_select(param, (*curr_scan)->idx,
+ (*curr_scan)->sel_arg,
HA_MRR_USE_DEFAULT_IMPL | HA_MRR_SORTED,
0, alloc)) ||
quick_intrsect->push_quick_back(alloc, quick))
@@ -8049,52 +8641,112 @@ Item_func_like::get_mm_leaf(RANGE_OPT_PARAM *param,
SEL_ARG *
Item_bool_func::get_mm_leaf(RANGE_OPT_PARAM *param,
Field *field, KEY_PART *key_part,
- Item_func::Functype type, Item *value)
+ Item_func::Functype functype, Item *value)
{
- uint maybe_null=(uint) field->real_maybe_null();
- SEL_ARG *tree= 0;
- MEM_ROOT *alloc= param->mem_root;
- uchar *str;
- int err;
DBUG_ENTER("Item_bool_func::get_mm_leaf");
-
DBUG_ASSERT(value); // IS NULL and IS NOT NULL are handled separately
-
if (key_part->image_type != Field::itRAW)
DBUG_RETURN(0); // e.g. SPATIAL index
+ DBUG_RETURN(field->get_mm_leaf(param, key_part, this,
+ functype_to_scalar_comparison_op(functype),
+ value));
+}
- if (param->using_real_indexes &&
- !field->optimize_range(param->real_keynr[key_part->key],
- key_part->part) &&
- type != EQ_FUNC &&
- type != EQUAL_FUNC)
- goto end; // Can't optimize this
- if (!field->can_optimize_range(this, value,
- type == EQUAL_FUNC || type == EQ_FUNC))
- goto end;
+bool Field::can_optimize_scalar_range(const RANGE_OPT_PARAM *param,
+ const KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op,
+ const Item *value) const
+{
+ bool is_eq_func= op == SCALAR_CMP_EQ || op == SCALAR_CMP_EQUAL;
+ if ((param->using_real_indexes &&
+ !optimize_range(param->real_keynr[key_part->key],
+ key_part->part) && !is_eq_func) ||
+ !can_optimize_range(cond, value, is_eq_func))
+ return false;
+ return true;
+}
+
- err= value->save_in_field_no_warnings(field, 1);
+uchar *Field::make_key_image(MEM_ROOT *mem_root, const KEY_PART *key_part)
+{
+ DBUG_ENTER("Field::make_key_image");
+ uint maybe_null= (uint) real_maybe_null();
+ uchar *str;
+ if (!(str= (uchar*) alloc_root(mem_root, key_part->store_length + 1)))
+ DBUG_RETURN(0);
+ if (maybe_null)
+ *str= (uchar) is_real_null(); // Set to 1 if null
+ get_key_image(str + maybe_null, key_part->length, key_part->image_type);
+ DBUG_RETURN(str);
+}
+
+
+SEL_ARG *Field::stored_field_make_mm_leaf_truncated(RANGE_OPT_PARAM *param,
+ scalar_comparison_op op,
+ Item *value)
+{
+ DBUG_ENTER("Field::stored_field_make_mm_leaf_truncated");
+ if ((op == SCALAR_CMP_EQ || op == SCALAR_CMP_EQUAL) &&
+ value->result_type() == item_cmp_type(result_type(),
+ value->result_type()))
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_IMPOSSIBLE(this));
+ /*
+ TODO: We should return trees of the type SEL_ARG::IMPOSSIBLE
+ for the cases like int_field > 999999999999999999999999 as well.
+ */
+ DBUG_RETURN(0);
+}
+
+
+SEL_ARG *Field_num::get_mm_leaf(RANGE_OPT_PARAM *prm, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value)
+{
+ DBUG_ENTER("Field_num::get_mm_leaf");
+ if (!can_optimize_scalar_range(prm, key_part, cond, op, value))
+ DBUG_RETURN(0);
+ int err= value->save_in_field_no_warnings(this, 1);
+ if ((op != SCALAR_CMP_EQUAL && is_real_null()) || err < 0)
+ DBUG_RETURN(&null_element);
+ if (err > 0 && cmp_type() != value->result_type())
+ DBUG_RETURN(stored_field_make_mm_leaf_truncated(prm, op, value));
+ DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value));
+}
+
+
+SEL_ARG *Field_temporal::get_mm_leaf(RANGE_OPT_PARAM *prm, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value)
+{
+ DBUG_ENTER("Field_temporal::get_mm_leaf");
+ if (!can_optimize_scalar_range(prm, key_part, cond, op, value))
+ DBUG_RETURN(0);
+ int err= value->save_in_field_no_warnings(this, 1);
+ if ((op != SCALAR_CMP_EQUAL && is_real_null()) || err < 0)
+ DBUG_RETURN(&null_element);
if (err > 0)
- {
- if (field->type_handler() == &type_handler_enum ||
- field->type_handler() == &type_handler_set)
- {
- if (type == EQ_FUNC || type == EQUAL_FUNC)
- tree= new (alloc) SEL_ARG_IMPOSSIBLE(field);
- goto end;
- }
+ DBUG_RETURN(stored_field_make_mm_leaf_truncated(prm, op, value));
+ DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value));
+}
- if (err == 2 && field->cmp_type() == STRING_RESULT)
- {
- if (type == EQ_FUNC || type == EQUAL_FUNC)
- tree= new (alloc) SEL_ARG_IMPOSSIBLE(field);
- else
- tree= NULL; /* Cannot infer anything */
- goto end;
- }
- if (err == 3 && field->type() == FIELD_TYPE_DATE)
+SEL_ARG *Field_date_common::get_mm_leaf(RANGE_OPT_PARAM *prm,
+ KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op,
+ Item *value)
+{
+ DBUG_ENTER("Field_date_common::get_mm_leaf");
+ if (!can_optimize_scalar_range(prm, key_part, cond, op, value))
+ DBUG_RETURN(0);
+ int err= value->save_in_field_no_warnings(this, 1);
+ if ((op != SCALAR_CMP_EQUAL && is_real_null()) || err < 0)
+ DBUG_RETURN(&null_element);
+ if (err > 0)
+ {
+ if (err == 3)
{
/*
We were saving DATETIME into a DATE column, the conversion went ok
@@ -8114,76 +8766,86 @@ Item_bool_func::get_mm_leaf(RANGE_OPT_PARAM *param,
be done together with other types at the end of this function
(grep for stored_field_cmp_to_item)
*/
- if (type == EQ_FUNC || type == EQUAL_FUNC)
- {
- tree= new (alloc) SEL_ARG_IMPOSSIBLE(field);
- goto end;
- }
- // Continue with processing non-equality ranges
- }
- else if (field->cmp_type() != value->result_type())
- {
- if ((type == EQ_FUNC || type == EQUAL_FUNC) &&
- value->result_type() == item_cmp_type(field->result_type(),
- value->result_type()))
- {
- tree= new (alloc) SEL_ARG_IMPOSSIBLE(field);
- goto end;
- }
- else
- {
- /*
- TODO: We should return trees of the type SEL_ARG::IMPOSSIBLE
- for the cases like int_field > 999999999999999999999999 as well.
- */
- tree= 0;
- goto end;
- }
- }
-
- /*
- guaranteed at this point: err > 0; field and const of same type
- If an integer got bounded (e.g. to within 0..255 / -128..127)
- for < or >, set flags as for <= or >= (no NEAR_MAX / NEAR_MIN)
- */
- else if (err == 1 && field->result_type() == INT_RESULT)
- {
- if (type == LT_FUNC && (value->val_int() > 0))
- type= LE_FUNC;
- else if (type == GT_FUNC &&
- (field->type() != FIELD_TYPE_BIT) &&
- !((Field_num*)field)->unsigned_flag &&
- !((Item_int*)value)->unsigned_flag &&
- (value->val_int() < 0))
- type= GE_FUNC;
+ if (op == SCALAR_CMP_EQ || op == SCALAR_CMP_EQUAL)
+ DBUG_RETURN(new (prm->mem_root) SEL_ARG_IMPOSSIBLE(this));
+ DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value));
}
+ DBUG_RETURN(stored_field_make_mm_leaf_truncated(prm, op, value));
}
- else if (err < 0)
+ DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value));
+}
+
+
+SEL_ARG *Field_str::get_mm_leaf(RANGE_OPT_PARAM *prm, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value)
+{
+ DBUG_ENTER("Field_str::get_mm_leaf");
+ if (!can_optimize_scalar_range(prm, key_part, cond, op, value))
+ DBUG_RETURN(0);
+ int err= value->save_in_field_no_warnings(this, 1);
+ if ((op != SCALAR_CMP_EQUAL && is_real_null()) || err < 0)
+ DBUG_RETURN(&null_element);
+ if (err > 0)
{
- /* This happens when we try to insert a NULL field in a not null column */
- tree= &null_element; // cmp with NULL is never TRUE
- goto end;
+ if (op == SCALAR_CMP_EQ || op == SCALAR_CMP_EQUAL)
+ DBUG_RETURN(new (prm->mem_root) SEL_ARG_IMPOSSIBLE(this));
+ DBUG_RETURN(NULL); /* Cannot infer anything */
}
+ DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value));
+}
- /*
- Any sargable predicate except "<=>" involving NULL as a constant is always
- FALSE
- */
- if (type != EQUAL_FUNC && field->is_real_null())
+
+SEL_ARG *Field::get_mm_leaf_int(RANGE_OPT_PARAM *prm, KEY_PART *key_part,
+ const Item_bool_func *cond,
+ scalar_comparison_op op, Item *value,
+ bool unsigned_field)
+{
+ DBUG_ENTER("Field::get_mm_leaf_int");
+ if (!can_optimize_scalar_range(prm, key_part, cond, op, value))
+ DBUG_RETURN(0);
+ int err= value->save_in_field_no_warnings(this, 1);
+ if ((op != SCALAR_CMP_EQUAL && is_real_null()) || err < 0)
+ DBUG_RETURN(&null_element);
+ if (err > 0)
{
- tree= &null_element;
- goto end;
+ if (value->result_type() != INT_RESULT)
+ DBUG_RETURN(stored_field_make_mm_leaf_truncated(prm, op, value));
+ else
+ DBUG_RETURN(stored_field_make_mm_leaf_bounded_int(prm, key_part,
+ op, value,
+ unsigned_field));
}
-
- str= (uchar*) alloc_root(alloc, key_part->store_length+1);
- if (!str)
- goto end;
- if (maybe_null)
- *str= (uchar) field->is_real_null(); // Set to 1 if null
- field->get_key_image(str+maybe_null, key_part->length,
- key_part->image_type);
- if (!(tree= new (alloc) SEL_ARG(field, str, str)))
- goto end; // out of memory
+ if (value->result_type() != INT_RESULT)
+ DBUG_RETURN(stored_field_make_mm_leaf(prm, key_part, op, value));
+ DBUG_RETURN(stored_field_make_mm_leaf_exact(prm, key_part, op, value));
+}
+
+
+/*
+ This method is called when:
+ - value->save_in_field_no_warnings() returned err > 0
+ - and both field and "value" are of integer data types
+ If an integer got bounded (e.g. to within 0..255 / -128..127)
+ for < or >, set flags as for <= or >= (no NEAR_MAX / NEAR_MIN)
+*/
+
+SEL_ARG *Field::stored_field_make_mm_leaf_bounded_int(RANGE_OPT_PARAM *param,
+ KEY_PART *key_part,
+ scalar_comparison_op op,
+ Item *value,
+ bool unsigned_field)
+{
+ DBUG_ENTER("Field::stored_field_make_mm_leaf_bounded_int");
+ if (op == SCALAR_CMP_EQ || op == SCALAR_CMP_EQUAL) // e.g. tinyint = 200
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_IMPOSSIBLE(this));
+ longlong item_val= value->val_int();
+
+ if (op == SCALAR_CMP_LT && item_val > 0)
+ op= SCALAR_CMP_LE; // e.g. rewrite (tinyint < 200) to (tinyint <= 127)
+ else if (op == SCALAR_CMP_GT && !unsigned_field &&
+ !value->unsigned_flag && item_val < 0)
+ op= SCALAR_CMP_GE; // e.g. rewrite (tinyint > -200) to (tinyint >= -128)
/*
Check if we are comparing an UNSIGNED integer with a negative constant.
@@ -8196,66 +8858,74 @@ Item_bool_func::get_mm_leaf(RANGE_OPT_PARAM *param,
negative integers (which otherwise fails because at query execution time
negative integers are cast to unsigned if compared with unsigned).
*/
- if (field->result_type() == INT_RESULT &&
- value->result_type() == INT_RESULT &&
- ((field->type() == FIELD_TYPE_BIT ||
- ((Field_num *) field)->unsigned_flag) &&
- !((Item_int*) value)->unsigned_flag))
+ if (unsigned_field && !value->unsigned_flag && item_val < 0)
{
- longlong item_val= value->val_int();
- if (item_val < 0)
- {
- if (type == LT_FUNC || type == LE_FUNC)
- {
- tree->type= SEL_ARG::IMPOSSIBLE;
- goto end;
- }
- if (type == GT_FUNC || type == GE_FUNC)
- {
- tree= 0;
- goto end;
- }
- }
+ if (op == SCALAR_CMP_LT || op == SCALAR_CMP_LE) // e.g. uint < -1
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_IMPOSSIBLE(this));
+ if (op == SCALAR_CMP_GT || op == SCALAR_CMP_GE) // e.g. uint > -1
+ DBUG_RETURN(0);
}
+ DBUG_RETURN(stored_field_make_mm_leaf_exact(param, key_part, op, value));
+}
- switch (type) {
- case LT_FUNC:
- if (stored_field_cmp_to_item(param->thd, field, value) == 0)
- tree->max_flag=NEAR_MAX;
- /* fall through */
- case LE_FUNC:
- if (!maybe_null)
- tree->min_flag=NO_MIN_RANGE; /* From start */
- else
- { // > NULL
- tree->min_value=is_null_string;
- tree->min_flag=NEAR_MIN;
- }
- break;
- case GT_FUNC:
- /* Don't use open ranges for partial key_segments */
- if ((!(key_part->flag & HA_PART_KEY_SEG)) &&
- (stored_field_cmp_to_item(param->thd, field, value) <= 0))
- tree->min_flag=NEAR_MIN;
- tree->max_flag= NO_MAX_RANGE;
- break;
- case GE_FUNC:
- /* Don't use open ranges for partial key_segments */
- if ((!(key_part->flag & HA_PART_KEY_SEG)) &&
- (stored_field_cmp_to_item(param->thd, field, value) < 0))
- tree->min_flag= NEAR_MIN;
- tree->max_flag=NO_MAX_RANGE;
- break;
- case EQ_FUNC:
- case EQUAL_FUNC:
- break;
- default:
- DBUG_ASSERT(0);
+
+SEL_ARG *Field::stored_field_make_mm_leaf(RANGE_OPT_PARAM *param,
+ KEY_PART *key_part,
+ scalar_comparison_op op,
+ Item *value)
+{
+ DBUG_ENTER("Field::stored_field_make_mm_leaf");
+ THD *thd= param->thd;
+ MEM_ROOT *mem_root= param->mem_root;
+ uchar *str;
+ if (!(str= make_key_image(param->mem_root, key_part)))
+ DBUG_RETURN(0);
+
+ switch (op) {
+ case SCALAR_CMP_LE:
+ DBUG_RETURN(new (mem_root) SEL_ARG_LE(str, this));
+ case SCALAR_CMP_LT:
+ DBUG_RETURN(new (mem_root) SEL_ARG_LT(thd, str, this, value));
+ case SCALAR_CMP_GT:
+ DBUG_RETURN(new (mem_root) SEL_ARG_GT(thd, str, key_part, this, value));
+ case SCALAR_CMP_GE:
+ DBUG_RETURN(new (mem_root) SEL_ARG_GE(thd, str, key_part, this, value));
+ case SCALAR_CMP_EQ:
+ case SCALAR_CMP_EQUAL:
+ DBUG_RETURN(new (mem_root) SEL_ARG(this, str, str));
break;
}
+ DBUG_ASSERT(0);
+ DBUG_RETURN(NULL);
+}
-end:
- DBUG_RETURN(tree);
+
+SEL_ARG *Field::stored_field_make_mm_leaf_exact(RANGE_OPT_PARAM *param,
+ KEY_PART *key_part,
+ scalar_comparison_op op,
+ Item *value)
+{
+ DBUG_ENTER("Field::stored_field_make_mm_leaf_exact");
+ uchar *str;
+ if (!(str= make_key_image(param->mem_root, key_part)))
+ DBUG_RETURN(0);
+
+ switch (op) {
+ case SCALAR_CMP_LE:
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_LE(str, this));
+ case SCALAR_CMP_LT:
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_LT(str, this));
+ case SCALAR_CMP_GT:
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_GT(str, key_part, this));
+ case SCALAR_CMP_GE:
+ DBUG_RETURN(new (param->mem_root) SEL_ARG_GE(str, this));
+ case SCALAR_CMP_EQ:
+ case SCALAR_CMP_EQUAL:
+ DBUG_RETURN(new (param->mem_root) SEL_ARG(this, str, str));
+ break;
+ }
+ DBUG_ASSERT(0);
+ DBUG_RETURN(NULL);
}
@@ -8374,6 +9044,11 @@ int and_range_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree1, SEL_TREE *tree2,
if (key && key->type == SEL_ARG::IMPOSSIBLE)
{
result->type= SEL_TREE::IMPOSSIBLE;
+ if (param->using_real_indexes)
+ {
+ param->table->with_impossible_ranges.set_bit(param->
+ real_keynr[key_no]);
+ }
DBUG_RETURN(1);
}
result_keys.set_bit(key_no);
@@ -10417,6 +11092,10 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
MY_MIN(param->table->quick_condition_rows, rows);
param->table->quick_rows[keynr]= rows;
param->table->quick_costs[keynr]= cost->total_cost();
+ if (keynr == param->table->s->primary_key && pk_is_clustered)
+ param->table->quick_index_only_costs[keynr]= 0;
+ else
+ param->table->quick_index_only_costs[keynr]= cost->index_only_cost();
}
}
/* Figure out if the key scan is ROR (returns rows in ROWID order) or not */
@@ -11385,7 +12064,6 @@ int QUICK_RANGE_SELECT::reset()
HANDLER_BUFFER empty_buf;
MY_BITMAP * const save_read_set= head->read_set;
MY_BITMAP * const save_write_set= head->write_set;
- MY_BITMAP * const save_vcol_set= head->vcol_set;
DBUG_ENTER("QUICK_RANGE_SELECT::reset");
last_range= NULL;
cur_range= (QUICK_RANGE**) ranges.buffer;
@@ -11399,8 +12077,7 @@ int QUICK_RANGE_SELECT::reset()
}
if (in_ror_merged_scan)
- head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap,
- &column_bitmap);
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
if (file->inited == handler::NONE)
{
@@ -11446,8 +12123,7 @@ int QUICK_RANGE_SELECT::reset()
err:
/* Restore bitmaps set on entry */
if (in_ror_merged_scan)
- head->column_bitmaps_set_no_signal(save_read_set, save_write_set,
- save_vcol_set);
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
DBUG_RETURN(error);
}
@@ -11478,16 +12154,13 @@ int QUICK_RANGE_SELECT::get_next()
MY_BITMAP * const save_read_set= head->read_set;
MY_BITMAP * const save_write_set= head->write_set;
- MY_BITMAP * const save_vcol_set= head->vcol_set;
/*
We don't need to signal the bitmap change as the bitmap is always the
same for this head->file
*/
- head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap,
- &column_bitmap);
+ head->column_bitmaps_set_no_signal(&column_bitmap, &column_bitmap);
result= file->multi_range_read_next(&dummy);
- head->column_bitmaps_set_no_signal(save_read_set, save_write_set,
- save_vcol_set);
+ head->column_bitmaps_set_no_signal(save_read_set, save_write_set);
DBUG_RETURN(result);
}
@@ -12408,16 +13081,27 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
DBUG_ENTER("get_best_group_min_max");
+ Json_writer_object trace_group(thd, "group_index_range");
+ const char* cause= NULL;
+
/* Perform few 'cheap' tests whether this access method is applicable. */
- if (!join)
- DBUG_RETURN(NULL); /* This is not a select statement. */
- if ((join->table_count != 1) || /* The query must reference one table. */
- (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */
- DBUG_RETURN(NULL);
- if (table->s->keys == 0) /* There are no indexes to use. */
+ if (!join) /* This is not a select statement. */
+ cause= "no join";
+ else if (join->table_count != 1) /* The query must reference one table. */
+ cause= "not single_table";
+ else if (join->select_lex->olap == ROLLUP_TYPE) /* Check (B3) for ROLLUP */
+ cause= "rollup";
+ else if (table->s->keys == 0) /* There are no indexes to use. */
+ cause= "no index";
+ else if (join->conds && join->conds->used_tables()
+ & OUTER_REF_TABLE_BIT) /* Cannot execute with correlated conditions. */
+ cause= "correlated conditions";
+
+ if (cause)
+ {
+ trace_group.add("chosen", false).add("cause", cause);
DBUG_RETURN(NULL);
- if (join->conds && join->conds->used_tables() & OUTER_REF_TABLE_BIT)
- DBUG_RETURN(NULL); /* Cannot execute with correlated conditions. */
+ }
/* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/
List_iterator<Item> select_items_it(join->fields_list);
@@ -12426,7 +13110,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
if ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */
(!join->select_distinct) &&
!is_agg_distinct)
+ {
+ trace_group.add("chosen", false).add("cause","no group by or distinct");
DBUG_RETURN(NULL);
+ }
/* Analyze the query in more detail. */
if (join->sum_funcs[0])
@@ -12445,7 +13132,11 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
min_max_item->sum_func() == Item_sum::AVG_DISTINCT_FUNC))
continue;
else
+ {
+ trace_group.add("chosen", false)
+ .add("cause", "not applicable aggregate function");
DBUG_RETURN(NULL);
+ }
/* The argument of MIN/MAX. */
Item *expr= min_max_item->get_arg(0)->real_item();
@@ -12454,26 +13145,41 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
if (! min_max_arg_item)
min_max_arg_item= (Item_field*) expr;
else if (! min_max_arg_item->eq(expr, 1))
+ {
+ trace_group.add("chosen", false)
+ .add("cause", "arguments different in min max function");
DBUG_RETURN(NULL);
+ }
}
else
+ {
+ trace_group.add("chosen", false)
+ .add("cause", "no field item in min max function");
DBUG_RETURN(NULL);
+ }
}
}
/* Check (SA7). */
if (is_agg_distinct && (have_max || have_min))
{
+ trace_group.add("chosen", false)
+ .add("cause", "have both agg distinct and min max");
DBUG_RETURN(NULL);
}
/* Check (SA5). */
if (join->select_distinct)
{
+ trace_group.add("distinct_query", true);
while ((item= select_items_it++))
{
if (item->real_item()->type() != Item::FIELD_ITEM)
+ {
+ trace_group.add("chosen", false)
+ .add("cause", "distinct field is expression");
DBUG_RETURN(NULL);
+ }
}
}
@@ -12482,7 +13188,11 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next)
{
if ((*tmp_group->item)->real_item()->type() != Item::FIELD_ITEM)
+ {
+ trace_group.add("chosen", false)
+ .add("cause", "group field is expression");
DBUG_RETURN(NULL);
+ }
elements_in_group++;
}
@@ -12504,10 +13214,16 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
ha_rows cur_quick_prefix_records= 0;
// We go through allowed indexes
+ Json_writer_array trace_indexes(thd, "potential_group_range_indexes");
+
for (uint cur_param_idx= 0; cur_param_idx < param->keys ; ++cur_param_idx)
{
const uint cur_index= param->real_keynr[cur_param_idx];
KEY *const cur_index_info= &table->key_info[cur_index];
+
+ Json_writer_object trace_idx(thd);
+ trace_idx.add("index", cur_index_info->name);
+
KEY_PART_INFO *cur_part;
KEY_PART_INFO *end_part; /* Last part for loops. */
/* Last index part. */
@@ -12532,7 +13248,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
*/
if (!table->covering_keys.is_set(cur_index) ||
!table->keys_in_use_for_group_by.is_set(cur_index))
- continue;
+ {
+ cause= "not covering";
+ goto next_index;
+ }
/*
This function is called on the precondition that the index is covering.
@@ -12540,7 +13259,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
these are duplicates. The GROUP BY list cannot be a prefix of the index.
*/
if (elements_in_group > table->actual_n_key_parts(cur_index_info))
- continue;
+ {
+ cause= "group key parts greater than index key parts";
+ goto next_index;
+ }
/*
Unless extended keys can be used for cur_index:
@@ -12566,10 +13288,15 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
*/
if (bitmap_is_set(table->read_set, cur_field->field_index) &&
!cur_field->part_of_key_not_clustered.is_set(cur_index))
+ {
+ cause= "not covering";
goto next_index; // Field was not part of key
+ }
}
}
+ trace_idx.add("covering", true);
+
max_key_part= 0;
used_key_parts_map.clear_all();
@@ -12600,7 +13327,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
used_key_parts_map.set_bit(max_key_part);
}
else
+ {
+ cause= "group attribute not prefix in index";
goto next_index;
+ }
}
}
/*
@@ -12629,7 +13359,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
/* not doing loose index scan for derived tables */
if (!item_field->field)
+ {
+ cause= "derived table";
goto next_index;
+ }
/* Find the order of the key part in the index. */
key_part_nr= get_field_keypart(cur_index_info, item_field->field);
@@ -12641,7 +13374,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
continue;
if (key_part_nr < 1 ||
(!is_agg_distinct && key_part_nr > join->fields_list.elements))
+ {
+ cause= "select attribute not prefix in index";
goto next_index;
+ }
cur_part= cur_index_info->key_part + key_part_nr - 1;
cur_group_prefix_len+= cur_part->store_length;
used_key_parts_map.set_bit(key_part_nr);
@@ -12666,7 +13402,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
{
key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field);
if (key_part_nr <= cur_group_key_parts)
+ {
+ cause= "aggregate column not suffix in idx";
goto next_index;
+ }
min_max_arg_part= cur_index_info->key_part + key_part_nr - 1;
}
@@ -12677,6 +13416,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
if (cur_index_info->flags & HA_NOSAME &&
cur_group_key_parts == cur_index_info->user_defined_key_parts)
{
+ cause= "using unique index";
goto next_index;
}
@@ -12716,7 +13456,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
last_part, thd, cur_key_infix,
&cur_key_infix_len,
&first_non_infix_part))
+ {
+ cause= "nonconst equality gap attribute";
goto next_index;
+ }
}
else if (min_max_arg_part &&
(min_max_arg_part - first_non_group_part > 0))
@@ -12725,6 +13468,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
There is a gap but no range tree, thus no predicates at all for the
non-group keyparts.
*/
+ cause= "no nongroup keypart predicate";
goto next_index;
}
else if (first_non_group_part && join->conds)
@@ -12748,7 +13492,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
/* Check if cur_part is referenced in the WHERE clause. */
if (join->conds->walk(&Item::find_item_in_field_list_processor, 0,
key_part_range))
+ {
+ cause= "keypart reference from where clause";
goto next_index;
+ }
}
}
@@ -12763,7 +13510,10 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
for (; cur_part != last_part; cur_part++)
{
if (bitmap_is_set(table->read_set, cur_part->field->field_index))
+ {
+ cause= "keypart after infix in query";
goto next_index;
+ }
}
}
@@ -12780,6 +13530,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
index_range_tree, &cur_range) ||
(cur_range && cur_range->type != SEL_ARG::KEY_RANGE))
{
+ cause= "minmax keypart in disjunctive query";
goto next_index;
}
}
@@ -12802,6 +13553,17 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
cur_index_tree, TRUE,
&mrr_flags, &mrr_bufsize,
&dummy_cost);
+ if (unlikely(cur_index_tree && thd->trace_started()))
+ {
+ Json_writer_array trace_range(thd, "ranges");
+
+ const KEY_PART_INFO *key_part= cur_index_info->key_part;
+
+ String range_info;
+ range_info.set_charset(system_charset_info);
+ append_range_all_keyparts(&trace_range, NULL, &range_info,
+ cur_index_tree, key_part);
+ }
}
cost_group_min_max(table, cur_index_info, cur_used_key_parts,
cur_group_key_parts, tree, cur_index_tree,
@@ -12812,6 +13574,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
Do not compare doubles directly because they may have different
representations (64 vs. 80 bits).
*/
+ trace_idx.add("rows", cur_records).add("cost", cur_read_cost);
+
if (cur_read_cost < best_read_cost - (DBL_EPSILON * cur_read_cost))
{
index_info= cur_index_info;
@@ -12829,8 +13593,16 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
used_key_parts= cur_used_key_parts;
}
- next_index:;
+ next_index:
+ if (cause)
+ {
+ trace_idx.add("usable", false).add("cause", cause);
+ cause= NULL;
+ }
}
+
+ trace_indexes.end();
+
if (!index_info) /* No usable index found. */
DBUG_RETURN(NULL);
@@ -12841,14 +13613,22 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
(index_info->flags & HA_SPATIAL) ?
Field::itMBR : Field::itRAW,
&has_min_max_fld, &has_other_fld))
+ {
+ trace_group.add("usable", false)
+ .add("cause", "unsupported predicate on agg attribute");
DBUG_RETURN(NULL);
+ }
/*
Check (SA6) if clustered key is used
*/
if (is_agg_distinct && index == table->s->primary_key &&
table->file->primary_key_is_clustered())
+ {
+ trace_group.add("usable", false)
+ .add("cause", "index is clustered");
DBUG_RETURN(NULL);
+ }
/* The query passes all tests, so construct a new TRP object. */
read_plan= new (param->mem_root)
@@ -12869,6 +13649,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
read_plan->records= best_records;
if (read_time < best_read_cost && is_agg_distinct)
{
+ trace_group.add("index_scan", true);
read_plan->read_cost= 0;
read_plan->use_index_scan();
}
@@ -13062,7 +13843,8 @@ check_group_min_max_predicates(Item *cond, Item_field *min_max_arg_item,
if (args[0] && args[1]) // this is a binary function or BETWEEN
{
- DBUG_ASSERT(pred->is_bool_type());
+ DBUG_ASSERT(pred->fixed_type_handler());
+ DBUG_ASSERT(pred->fixed_type_handler()->is_bool_type());
Item_bool_func *bool_func= (Item_bool_func*) pred;
Field *field= min_max_arg_item->field;
if (!args[2]) // this is a binary function
@@ -13476,7 +14258,7 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
1/double(2*TIME_FOR_COMPARE);
const double cpu_cost= num_groups *
- (tree_traversal_cost + 1/double(TIME_FOR_COMPARE));
+ (tree_traversal_cost + 1/double(TIME_FOR_COMPARE_IDX));
*read_cost= io_cost + cpu_cost;
*records= num_groups;
@@ -14825,7 +15607,6 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg)
DBUG_VOID_RETURN;
}
-
void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose)
{
/* purecov: begin inspected */
@@ -14953,3 +15734,178 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
}
#endif /* !DBUG_OFF */
+static
+void append_range(String *out, const KEY_PART_INFO *key_part,
+ const uchar *min_key, const uchar *max_key, const uint flag)
+{
+ if (out->length() > 0)
+ out->append(STRING_WITH_LEN(" AND "));
+
+ if (flag & GEOM_FLAG)
+ {
+ /*
+ The flags of GEOM ranges do not work the same way as for other
+ range types, so printing "col < some_geom" doesn't make sense.
+ Just print the column name, not operator.
+ */
+ out->append(key_part->field->field_name);
+ out->append(STRING_WITH_LEN(" "));
+ print_key_value(out, key_part, min_key);
+ return;
+ }
+
+ if (!(flag & NO_MIN_RANGE))
+ {
+ print_key_value(out, key_part, min_key);
+ if (flag & NEAR_MIN)
+ out->append(STRING_WITH_LEN(" < "));
+ else
+ out->append(STRING_WITH_LEN(" <= "));
+ }
+
+ out->append(key_part->field->field_name);
+
+ if (!(flag & NO_MAX_RANGE))
+ {
+ if (flag & NEAR_MAX)
+ out->append(STRING_WITH_LEN(" < "));
+ else
+ out->append(STRING_WITH_LEN(" <= "));
+ print_key_value(out, key_part, max_key);
+ }
+}
+
+/*
+
+ Add ranges to the trace
+ For ex:
+ query: select * from t1 where a=2 ;
+ and we have an index on a , so we create a range
+ 2 <= a <= 2
+ this is added to the trace
+*/
+
+static void append_range_all_keyparts(Json_writer_array *range_trace,
+ String *range_string,
+ String *range_so_far, const SEL_ARG *keypart,
+ const KEY_PART_INFO *key_parts)
+{
+
+ DBUG_ASSERT(keypart);
+ DBUG_ASSERT(keypart && keypart != &null_element);
+
+ // Navigate to first interval in red-black tree
+ const KEY_PART_INFO *cur_key_part= key_parts + keypart->part;
+ const SEL_ARG *keypart_range= keypart->first();
+ const size_t save_range_so_far_length= range_so_far->length();
+
+
+ while (keypart_range)
+ {
+ // Append the current range predicate to the range String
+ switch (keypart->type)
+ {
+ case SEL_ARG::Type::KEY_RANGE:
+ append_range(range_so_far, cur_key_part, keypart_range->min_value,
+ keypart_range->max_value,
+ keypart_range->min_flag | keypart_range->max_flag);
+ break;
+ case SEL_ARG::Type::MAYBE_KEY:
+ range_so_far->append("MAYBE_KEY");
+ break;
+ case SEL_ARG::Type::IMPOSSIBLE:
+ range_so_far->append("IMPOSSIBLE");
+ break;
+ default:
+ DBUG_ASSERT(false);
+ break;
+ }
+
+ if (keypart_range->next_key_part &&
+ keypart_range->next_key_part->part ==
+ keypart_range->part + 1 &&
+ keypart_range->is_singlepoint())
+ {
+ append_range_all_keyparts(range_trace, range_string, range_so_far,
+ keypart_range->next_key_part, key_parts);
+ }
+ else
+ range_trace->add(range_so_far->c_ptr_safe(), range_so_far->length());
+ keypart_range= keypart_range->next;
+ range_so_far->length(save_range_so_far_length);
+ }
+}
+
+/**
+ Print a key to a string
+
+ @param[out] out String the key is appended to
+ @param[in] key_part Index components description
+ @param[in] key Key tuple
+*/
+static void print_key_value(String *out, const KEY_PART_INFO *key_part,
+ const uchar *key)
+{
+ Field *field= key_part->field;
+
+ if (field->flags & BLOB_FLAG)
+ {
+ // Byte 0 of a nullable key is the null-byte. If set, key is NULL.
+ if (field->real_maybe_null() && *key)
+ out->append(STRING_WITH_LEN("NULL"));
+ else
+ (field->type() == MYSQL_TYPE_GEOMETRY)
+ ? out->append(STRING_WITH_LEN("unprintable_geometry_value"))
+ : out->append(STRING_WITH_LEN("unprintable_blob_value"));
+ return;
+ }
+
+ uint store_length= key_part->store_length;
+
+ if (field->real_maybe_null())
+ {
+ /*
+ Byte 0 of key is the null-byte. If set, key is NULL.
+ Otherwise, print the key value starting immediately after the
+ null-byte
+ */
+ if (*key)
+ {
+ out->append(STRING_WITH_LEN("NULL"));
+ return;
+ }
+ key++; // Skip null byte
+ store_length--;
+ }
+
+ /*
+ Binary data cannot be converted to UTF8 which is what the
+ optimizer trace expects. If the column is binary, the hex
+ representation is printed to the trace instead.
+ */
+ if (field->flags & BINARY_FLAG)
+ {
+ out->append("0x");
+ for (uint i = 0; i < store_length; i++)
+ {
+ out->append(_dig_vec_lower[*(key + i) >> 4]);
+ out->append(_dig_vec_lower[*(key + i) & 0x0F]);
+ }
+ return;
+ }
+
+ StringBuffer<128> tmp(system_charset_info);
+ TABLE *table= field->table;
+ my_bitmap_map *old_sets[2];
+
+ dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set);
+
+ field->set_key_image(key, key_part->length);
+ if (field->type() == MYSQL_TYPE_BIT)
+ (void)field->val_int_as_str(&tmp, 1); // may change tmp's charset
+ else
+ field->val_str(&tmp); // may change tmp's charset
+ out->append(tmp.ptr(), tmp.length(), tmp.charset());
+
+ dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets);
+}
diff --git a/sql/opt_range.h b/sql/opt_range.h
index d5416988b88..2dab90b9f69 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -566,7 +566,7 @@ public:
FALSE Otherwise
*/
- bool is_singlepoint()
+ bool is_singlepoint() const
{
/*
Check for NEAR_MIN ("strictly less") and NO_MIN_RANGE (-inf < field)
@@ -1645,7 +1645,8 @@ class SQL_SELECT :public Sql_alloc {
{
key_map tmp;
tmp.set_all();
- return test_quick_select(thd, tmp, 0, limit, force_quick_range, FALSE, FALSE) < 0;
+ return test_quick_select(thd, tmp, 0, limit, force_quick_range,
+ FALSE, FALSE, FALSE) < 0;
}
/*
RETURN
@@ -1662,7 +1663,8 @@ class SQL_SELECT :public Sql_alloc {
}
int test_quick_select(THD *thd, key_map keys, table_map prev_tables,
ha_rows limit, bool force_quick_range,
- bool ordered_output, bool remove_false_parts_of_where);
+ bool ordered_output, bool remove_false_parts_of_where,
+ bool only_single_index_range_scan);
};
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index e66695ab9b0..2fedd8a4ed3 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -33,6 +33,7 @@
#include "opt_subselect.h"
#include "sql_test.h"
#include <my_bit.h>
+#include "opt_trace.h"
/*
This file contains optimizations for semi-join subqueries.
@@ -437,7 +438,7 @@ Currently, solution #2 is implemented.
LEX_CSTRING weedout_key= {STRING_WITH_LEN("weedout_key")};
static
-bool subquery_types_allow_materialization(Item_in_subselect *in_subs);
+bool subquery_types_allow_materialization(THD *thd, Item_in_subselect *in_subs);
static bool replace_where_subcondition(JOIN *, Item **, Item *, Item *, bool);
static int subq_sj_candidate_cmp(Item_in_subselect* el1, Item_in_subselect* el2,
void *arg);
@@ -455,6 +456,7 @@ void best_access_path(JOIN *join, JOIN_TAB *s,
table_map remaining_tables, uint idx,
bool disable_jbuf, double record_count,
POSITION *pos, POSITION *loose_scan_pos);
+void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables);
static Item *create_subq_in_equalities(THD *thd, SJ_MATERIALIZATION_INFO *sjm,
Item_in_subselect *subq_pred);
@@ -519,8 +521,9 @@ bool is_materialization_applicable(THD *thd, Item_in_subselect *in_subs,
if (optimizer_flag(thd, OPTIMIZER_SWITCH_MATERIALIZATION) && // 0
!child_select->is_part_of_union() && // 1
parent_unit->first_select()->leaf_tables.elements && // 2
+ child_select->outer_select() &&
child_select->outer_select()->leaf_tables.elements && // 2A
- subquery_types_allow_materialization(in_subs) &&
+ subquery_types_allow_materialization(thd, in_subs) &&
(in_subs->is_top_level_item() || //3
optimizer_flag(thd,
OPTIMIZER_SWITCH_PARTIAL_MATCH_ROWID_MERGE) || //3
@@ -681,7 +684,7 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
{
DBUG_PRINT("info", ("Subquery is semi-join conversion candidate"));
- (void)subquery_types_allow_materialization(in_subs);
+ (void)subquery_types_allow_materialization(thd, in_subs);
in_subs->is_flattenable_semijoin= TRUE;
@@ -695,6 +698,10 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
if (arena)
thd->restore_active_arena(arena, &backup);
in_subs->is_registered_semijoin= TRUE;
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
+ select_lex->select_number,
+ "IN (SELECT)", "semijoin");
+ trace_transform.add("chosen", true);
}
}
else
@@ -822,17 +829,22 @@ int check_and_do_in_subquery_rewrites(JOIN *join)
*/
static
-bool subquery_types_allow_materialization(Item_in_subselect *in_subs)
+bool subquery_types_allow_materialization(THD* thd, Item_in_subselect *in_subs)
{
DBUG_ENTER("subquery_types_allow_materialization");
- DBUG_ASSERT(in_subs->left_expr->fixed);
+ DBUG_ASSERT(in_subs->left_expr->is_fixed());
List_iterator<Item> it(in_subs->unit->first_select()->item_list);
uint elements= in_subs->unit->first_select()->item_list.elements;
+ const char* cause= NULL;
in_subs->types_allow_materialization= FALSE; // Assign default values
in_subs->sjm_scan_allowed= FALSE;
+
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
+ in_subs->get_select_lex()->select_number,
+ "IN (SELECT)", "materialization");
bool all_are_fields= TRUE;
uint32 total_key_length = 0;
@@ -845,7 +857,11 @@ bool subquery_types_allow_materialization(Item_in_subselect *in_subs)
total_key_length += inner->max_length;
if (!inner->type_handler()->subquery_type_allows_materialization(inner,
outer))
+ {
+ trace_transform.add("possible", false);
+ trace_transform.add("cause", "types mismatch");
DBUG_RETURN(FALSE);
+ }
}
/*
@@ -855,14 +871,23 @@ bool subquery_types_allow_materialization(Item_in_subselect *in_subs)
Make sure that the length of the key for the temp_table is atleast
greater than 0.
*/
- if (!total_key_length || total_key_length > tmp_table_max_key_length() ||
- elements > tmp_table_max_key_parts())
- DBUG_RETURN(FALSE);
-
- in_subs->types_allow_materialization= TRUE;
- in_subs->sjm_scan_allowed= all_are_fields;
- DBUG_PRINT("info",("subquery_types_allow_materialization: ok, allowed"));
- DBUG_RETURN(TRUE);
+ if (!total_key_length)
+ cause= "zero length key for materialized table";
+ else if (total_key_length > tmp_table_max_key_length())
+ cause= "length of key greater than allowed key length for materialized tables";
+ else if (elements > tmp_table_max_key_parts())
+ cause= "#keyparts greater than allowed key parts for materialized tables";
+ else
+ {
+ in_subs->types_allow_materialization= TRUE;
+ in_subs->sjm_scan_allowed= all_are_fields;
+ trace_transform.add("sjm_scan_allowed", all_are_fields)
+ .add("possible", true);
+ DBUG_PRINT("info",("subquery_types_allow_materialization: ok, allowed"));
+ DBUG_RETURN(TRUE);
+ }
+ trace_transform.add("possible", false).add("cause", cause);
+ DBUG_RETURN(FALSE);
}
@@ -902,7 +927,7 @@ bool make_in_exists_conversion(THD *thd, JOIN *join, Item_in_subselect *item)
/*
We're going to finalize IN->EXISTS conversion.
Normally, IN->EXISTS conversion takes place inside the
- Item_subselect::fix_fields() call, where item_subselect->fixed==FALSE (as
+ Item_subselect::fix_fields() call, where item_subselect->is_fixed()==FALSE (as
fix_fields() haven't finished yet) and item_subselect->changed==FALSE (as
the conversion haven't been finalized)
@@ -929,7 +954,7 @@ bool make_in_exists_conversion(THD *thd, JOIN *join, Item_in_subselect *item)
item->fixed= 1;
Item *substitute= item->substitution;
- bool do_fix_fields= !item->substitution->fixed;
+ bool do_fix_fields= !item->substitution->is_fixed();
/*
The Item_subselect has already been wrapped with Item_in_optimizer, so we
should search for item->optimizer, not 'item'.
@@ -1212,15 +1237,31 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
/* Stop processing if we've reached a subquery that's attached to the ON clause */
if (in_subq->do_not_convert_to_sj)
+ {
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
+ in_subq->get_select_lex()->select_number,
+ "IN (SELECT)", "semijoin");
+ trace_transform.add("converted_to_semi_join", false)
+ .add("cause", "subquery attached to the ON clause");
break;
+ }
if (in_subq->is_flattenable_semijoin)
{
+ OPT_TRACE_TRANSFORM(thd, trace_wrapper, trace_transform,
+ in_subq->get_select_lex()->select_number,
+ "IN (SELECT)", "semijoin");
if (join->table_count +
in_subq->unit->first_select()->join->table_count >= MAX_TABLES)
+ {
+ trace_transform.add("converted_to_semi_join", false);
+ trace_transform.add("cause",
+ "table in parent join now exceeds MAX_TABLES");
break;
+ }
if (convert_subq_to_sj(join, in_subq))
goto restore_arena_and_fail;
+ trace_transform.add("converted_to_semi_join", true);
}
else
{
@@ -1265,7 +1306,7 @@ bool convert_join_subqueries_to_semijoins(JOIN *join)
in_subq->fixed= 1;
Item *substitute= in_subq->substitution;
- bool do_fix_fields= !in_subq->substitution->fixed;
+ bool do_fix_fields= !in_subq->substitution->is_fixed();
Item **tree= (in_subq->emb_on_expr_nest == NO_JOIN_NEST)?
&join->conds : &(in_subq->emb_on_expr_nest->on_expr);
Item *replace_me= in_subq->original_item();
@@ -1800,7 +1841,7 @@ static bool convert_subq_to_sj(JOIN *parent_join, Item_in_subselect *subq_pred)
subq_lex->ref_pointer_array[i]);
if (!item_eq)
DBUG_RETURN(TRUE);
- DBUG_ASSERT(subq_pred->left_expr->element_index(i)->fixed);
+ DBUG_ASSERT(subq_pred->left_expr->element_index(i)->is_fixed());
if (subq_pred->left_expr_orig->element_index(i) !=
subq_pred->left_expr->element_index(i))
thd->change_item_tree(item_eq->arguments(),
@@ -2339,8 +2380,15 @@ int pull_out_semijoin_tables(JOIN *join)
bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
{
DBUG_ENTER("optimize_semijoin_nests");
+ THD *thd= join->thd;
List_iterator<TABLE_LIST> sj_list_it(join->select_lex->sj_nests);
TABLE_LIST *sj_nest;
+ if (!join->select_lex->sj_nests.elements)
+ DBUG_RETURN(FALSE);
+ Json_writer_object wrapper(thd);
+ Json_writer_object trace_semijoin_nest(thd,
+ "execution_plan_for_potential_materialization");
+ Json_writer_array trace_steps_array(thd, "steps");
while ((sj_nest= sj_list_it++))
{
/* semi-join nests with only constant tables are not valid */
@@ -2896,6 +2944,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
{
bool sjm_scan;
SJ_MATERIALIZATION_INFO *mat_info;
+ THD *thd= join->thd;
if ((mat_info= at_sjmat_pos(join, remaining_tables,
new_join_tab, idx, &sjm_scan)))
{
@@ -2997,6 +3046,7 @@ bool Sj_materialization_picker::check_qep(JOIN *join,
POSITION curpos, dummy;
/* Need to re-run best-access-path as we prefix_rec_count has changed */
bool disable_jbuf= (join->thd->variables.join_cache_level == 0);
+ Json_writer_temp_disable trace_semijoin_mat_scan(thd);
for (i= first_tab + mat_info->tables; i <= idx; i++)
{
best_access_path(join, join->positions[i].table, rem_tables, i,
@@ -3547,6 +3597,12 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
table_map handled_tabs= 0;
join->sjm_lookup_tables= 0;
join->sjm_scan_tables= 0;
+ THD *thd= join->thd;
+ if (!join->select_lex->sj_nests.elements)
+ return;
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_array trace_semijoin_strategies(thd,
+ "fix_semijoin_strategies_for_picked_join_order");
for (tablenr= table_count - 1 ; tablenr != join->const_tables - 1; tablenr--)
{
POSITION *pos= join->best_positions + tablenr;
@@ -3571,8 +3627,18 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
first= tablenr - sjm->tables + 1;
join->best_positions[first].n_sj_tables= sjm->tables;
join->best_positions[first].sj_strategy= SJ_OPT_MATERIALIZE;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","sj_materialize");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (uint i= first; i < first+ sjm->tables; i++)
+ {
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[i].table);
+ }
join->sjm_lookup_tables |= join->best_positions[i].table->table->map;
+ }
}
else if (pos->sj_strategy == SJ_OPT_MATERIALIZE_SCAN)
{
@@ -3610,8 +3676,16 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
POSITION dummy;
join->cur_sj_inner_tables= 0;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","sj_materialize_scan");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (i= first + sjm->tables; i <= tablenr; i++)
{
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[i].table);
+ }
best_access_path(join, join->best_positions[i].table, rem_tables, i,
FALSE, prefix_rec_count,
join->best_positions + i, &dummy);
@@ -3640,8 +3714,16 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
join buffering
*/
join->cur_sj_inner_tables= 0;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","firstmatch");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (idx= first; idx <= tablenr; idx++)
{
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[idx].table);
+ }
if (join->best_positions[idx].use_join_buffer)
{
best_access_path(join, join->best_positions[idx].table,
@@ -3670,8 +3752,16 @@ void fix_semijoin_strategies_for_picked_join_order(JOIN *join)
join buffering
*/
join->cur_sj_inner_tables= 0;
+ Json_writer_object semijoin_strategy(thd);
+ semijoin_strategy.add("semi_join_strategy","sj_materialize");
+ Json_writer_array semijoin_plan(thd, "join_order");
for (idx= first; idx <= tablenr; idx++)
{
+ if (unlikely(thd->trace_started()))
+ {
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(join->best_positions[idx].table);
+ }
if (join->best_positions[idx].use_join_buffer || (idx == first))
{
best_access_path(join, join->best_positions[idx].table,
@@ -5509,31 +5599,515 @@ int select_value_catcher::send_data(List<Item> &items)
}
-/*
- Setup JTBM join tabs for execution
+/**
+ @brief
+ Attach conditions to already optimized condition
+
+ @param thd the thread handle
+ @param cond the condition to which add new conditions
+ @param cond_eq IN/OUT the multiple equalities of cond
+ @param new_conds the list of conditions to be added
+ @param cond_value the returned value of the condition
+ if it can be evaluated
+
+ @details
+ The method creates new condition through union of cond and
+ the conditions from new_conds list.
+ The method is called after optimize_cond() for cond. The result
+ of the union should be the same as if it was done before the
+ the optimize_cond() call.
+
+ @retval otherwise the created condition
+ @retval NULL if an error occurs
*/
-bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
- Item **join_where)
+Item *and_new_conditions_to_optimized_cond(THD *thd, Item *cond,
+ COND_EQUAL **cond_eq,
+ List<Item> &new_conds,
+ Item::cond_result *cond_value)
+{
+ COND_EQUAL new_cond_equal;
+ Item *item;
+ Item_equal *mult_eq;
+ bool is_simplified_cond= false;
+ /* The list where parts of the new condition are stored. */
+ List_iterator<Item> li(new_conds);
+ List_iterator_fast<Item_equal> it(new_cond_equal.current_level);
+
+ /*
+ Create multiple equalities from the equalities of the list new_conds.
+ Save the created multiple equalities in new_cond_equal.
+ If multiple equality can't be created or the condition
+ from new_conds list isn't an equality leave it in new_conds
+ list.
+
+ The equality can't be converted into the multiple equality if it
+ is a knowingly false or true equality.
+ For example, (3 = 1) equality.
+ */
+ while ((item=li++))
+ {
+ if (item->type() == Item::FUNC_ITEM &&
+ ((Item_func *) item)->functype() == Item_func::EQ_FUNC &&
+ check_simple_equality(thd,
+ Item::Context(Item::ANY_SUBST,
+ ((Item_func_equal *)item)->compare_type_handler(),
+ ((Item_func_equal *)item)->compare_collation()),
+ ((Item_func *)item)->arguments()[0],
+ ((Item_func *)item)->arguments()[1],
+ &new_cond_equal))
+ li.remove();
+ }
+
+ it.rewind();
+ if (cond && cond->type() == Item::COND_ITEM &&
+ ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
+ {
+ /*
+ Case when cond is an AND-condition.
+ Union AND-condition cond, created multiple equalities from
+ new_cond_equal and remaining conditions from new_conds.
+ */
+ COND_EQUAL *cond_equal= &((Item_cond_and *) cond)->m_cond_equal;
+ List<Item_equal> *cond_equalities= &cond_equal->current_level;
+ List<Item> *and_args= ((Item_cond_and *)cond)->argument_list();
+
+ /*
+ Disjoin multiple equalities of cond.
+ Merge these multiple equalities with the multiple equalities of
+ new_cond_equal. Save the result in new_cond_equal.
+ Check if after the merge some multiple equalities are knowingly
+ true or false.
+ */
+ and_args->disjoin((List<Item> *) cond_equalities);
+ while ((mult_eq= it++))
+ {
+ mult_eq->upper_levels= 0;
+ mult_eq->merge_into_list(thd, cond_equalities, false, false);
+ }
+ List_iterator_fast<Item_equal> ei(*cond_equalities);
+ while ((mult_eq= ei++))
+ {
+ if (mult_eq->const_item() && !mult_eq->val_int())
+ is_simplified_cond= true;
+ else
+ {
+ mult_eq->unfix_fields();
+ if (mult_eq->fix_fields(thd, NULL))
+ return NULL;
+ }
+ }
+
+ li.rewind();
+ while ((item=li++))
+ {
+ /*
+ There still can be some equalities at not top level of new_conds
+ conditions that are not transformed into multiple equalities.
+ To transform them build_item_equal() is called.
+
+ Examples of not top level equalities:
+
+ 1. (t1.a = 3) OR (t1.b > 5)
+ (t1.a = 3) - not top level equality.
+ It is inside OR condition
+
+ 2. ((t3.d = t3.c) AND (t3.c < 15)) OR (t3.d > 1)
+ (t1.d = t3.c) - not top level equality.
+ It is inside AND condition which is a part of OR condition
+ */
+ if (item->type() == Item::COND_ITEM &&
+ ((Item_cond *)item)->functype() == Item_func::COND_OR_FUNC)
+ {
+ item= item->build_equal_items(thd,
+ &((Item_cond_and *) cond)->m_cond_equal,
+ false, NULL);
+ }
+ and_args->push_back(item, thd->mem_root);
+ }
+ and_args->append((List<Item> *) cond_equalities);
+ *cond_eq= &((Item_cond_and *) cond)->m_cond_equal;
+ }
+ else
+ {
+ /*
+ Case when cond isn't an AND-condition or is NULL.
+ There can be several cases:
+
+ 1. cond is a multiple equality.
+ In this case merge cond with the multiple equalities of
+ new_cond_equal.
+ Create new condition from the created multiple equalities
+ and new_conds list conditions.
+ 2. cond is NULL
+ Create new condition from new_conds list conditions
+ and multiple equalities from new_cond_equal.
+ 3. Otherwise
+ Create new condition through union of cond, conditions from new_conds
+ list and created multiple equalities from new_cond_equal.
+ */
+ List<Item> new_conds_list;
+ /* Flag is set to true if cond is a multiple equality */
+ bool is_mult_eq= (cond && cond->type() == Item::FUNC_ITEM &&
+ ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC);
+
+ /*
+ If cond is non-empty and is not multiple equality save it as
+ a part of a new condition.
+ */
+ if (cond && !is_mult_eq &&
+ new_conds_list.push_back(cond, thd->mem_root))
+ return NULL;
+
+ /*
+ If cond is a multiple equality merge it with new_cond_equal
+ multiple equalities.
+ */
+ if (is_mult_eq)
+ {
+ Item_equal *eq_cond= (Item_equal *)cond;
+ eq_cond->upper_levels= 0;
+ eq_cond->merge_into_list(thd, &new_cond_equal.current_level,
+ false, false);
+ }
+
+ /**
+ Fix created multiple equalities and check if they are knowingly
+ true or false.
+ */
+ List_iterator_fast<Item_equal> ei(new_cond_equal.current_level);
+ while ((mult_eq=ei++))
+ {
+ if (mult_eq->const_item() && !mult_eq->val_int())
+ is_simplified_cond= true;
+ else
+ {
+ mult_eq->unfix_fields();
+ if (mult_eq->fix_fields(thd, NULL))
+ return NULL;
+ }
+ }
+
+ /*
+ Create AND condition if new condition will have two or
+ more elements.
+ */
+ Item_cond_and *and_cond= 0;
+ COND_EQUAL *inherited= 0;
+ if (new_conds_list.elements +
+ new_conds.elements +
+ new_cond_equal.current_level.elements > 1)
+ {
+ and_cond= new (thd->mem_root) Item_cond_and(thd);
+ and_cond->m_cond_equal.copy(new_cond_equal);
+ inherited= &and_cond->m_cond_equal;
+ }
+
+ li.rewind();
+ while ((item=li++))
+ {
+ /*
+ Look for the comment in the case when cond is an
+ AND condition above the build_equal_items() call.
+ */
+ if (item->type() == Item::COND_ITEM &&
+ ((Item_cond *)item)->functype() == Item_func::COND_OR_FUNC)
+ {
+ item= item->build_equal_items(thd, inherited, false, NULL);
+ }
+ new_conds_list.push_back(item, thd->mem_root);
+ }
+ new_conds_list.append((List<Item> *)&new_cond_equal.current_level);
+
+ if (and_cond)
+ {
+ and_cond->argument_list()->append(&new_conds_list);
+ cond= (Item *)and_cond;
+ *cond_eq= &((Item_cond_and *) cond)->m_cond_equal;
+ }
+ else
+ {
+ List_iterator_fast<Item> iter(new_conds_list);
+ cond= iter++;
+ if (cond->type() == Item::FUNC_ITEM &&
+ ((Item_func *)cond)->functype() == Item_func::MULT_EQUAL_FUNC)
+ {
+ if (!(*cond_eq))
+ *cond_eq= new COND_EQUAL();
+ (*cond_eq)->copy(new_cond_equal);
+ }
+ else
+ *cond_eq= 0;
+ }
+ }
+
+ if (!cond)
+ return NULL;
+
+ if (*cond_eq)
+ {
+ /*
+ The multiple equalities are attached only to the upper level
+ of AND-condition cond.
+ Push them down to the bottom levels of cond AND-condition if needed.
+ */
+ propagate_new_equalities(thd, cond,
+ &(*cond_eq)->current_level,
+ 0,
+ &is_simplified_cond);
+ cond= cond->propagate_equal_fields(thd,
+ Item::Context_boolean(),
+ *cond_eq);
+ cond->update_used_tables();
+ }
+ /* Check if conds has knowingly true or false parts. */
+ if (cond &&
+ !is_simplified_cond &&
+ cond->walk(&Item::is_simplified_cond_processor, 0, 0))
+ is_simplified_cond= true;
+
+
+ /*
+ If it was found that there are some knowingly true or false equalities
+ remove them from cond and set cond_value to the appropriate value.
+ */
+ if (cond && is_simplified_cond)
+ cond= cond->remove_eq_conds(thd, cond_value, true);
+
+ if (cond && cond->fix_fields_if_needed(thd, NULL))
+ return NULL;
+
+ return cond;
+}
+
+
+/**
+ @brief Materialize a degenerate jtbm semi join
+
+ @param thd thread handler
+ @param tbl table list for the target jtbm semi join table
+ @param subq_pred IN subquery predicate with the degenerate jtbm semi join
+ @param eq_list IN/OUT the list where to add produced equalities
+
+ @details
+ The method materializes the degenerate jtbm semi join for the
+ subquery from the IN subquery predicate subq_pred taking table
+ as the target for materialization.
+ Any degenerate table is guaranteed to produce 0 or 1 record.
+ Examples of both cases:
+
+ select * from ot where col in (select ... from it where 2>3)
+ select * from ot where col in (select MY_MIN(it.key) from it)
+
+ in this case, there is no necessity to create a temp.table for
+ materialization.
+ We now just need to
+ 1. Check whether 1 or 0 records are produced, setup this as a
+ constant join tab.
+ 2. Create a dummy temporary table, because all of the join
+ optimization code relies on TABLE object being present.
+
+ In the case when materialization produces one row the function
+ additionally creates equalities between the expressions from the
+ left part of the IN subquery predicate and the corresponding
+ columns of the produced row. These equalities are added to the
+ list eq_list. They are supposed to be conjuncted with the condition
+ of the WHERE clause.
+
+ @retval TRUE if an error occurs
+ @retval FALSE otherwise
+*/
+
+bool execute_degenerate_jtbm_semi_join(THD *thd,
+ TABLE_LIST *tbl,
+ Item_in_subselect *subq_pred,
+ List<Item> &eq_list)
+{
+ DBUG_ENTER("execute_degenerate_jtbm_semi_join");
+ select_value_catcher *new_sink;
+
+ DBUG_ASSERT(subq_pred->engine->engine_type() ==
+ subselect_engine::SINGLE_SELECT_ENGINE);
+ subselect_single_select_engine *engine=
+ (subselect_single_select_engine*)subq_pred->engine;
+ if (!(new_sink= new (thd->mem_root) select_value_catcher(thd, subq_pred)))
+ DBUG_RETURN(TRUE);
+ if (new_sink->setup(&engine->select_lex->join->fields_list) ||
+ engine->select_lex->join->change_result(new_sink, NULL) ||
+ engine->exec())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ subq_pred->is_jtbm_const_tab= TRUE;
+
+ if (new_sink->assigned)
+ {
+ /*
+ Subselect produced one row, which is saved in new_sink->row.
+ Save "left_expr[i] == row[i]" equalities into the eq_list.
+ */
+ subq_pred->jtbm_const_row_found= TRUE;
+
+ Item *eq_cond;
+ for (uint i= 0; i < subq_pred->left_expr->cols(); i++)
+ {
+ eq_cond=
+ new (thd->mem_root) Item_func_eq(thd,
+ subq_pred->left_expr->element_index(i),
+ new_sink->row[i]);
+ if (!eq_cond || eq_cond->fix_fields(thd, NULL) ||
+ eq_list.push_back(eq_cond, thd->mem_root))
+ DBUG_RETURN(TRUE);
+ }
+ }
+ else
+ {
+ /* Subselect produced no rows. Just set the flag */
+ subq_pred->jtbm_const_row_found= FALSE;
+ }
+
+ TABLE *dummy_table;
+ if (!(dummy_table= create_dummy_tmp_table(thd)))
+ DBUG_RETURN(TRUE);
+ tbl->table= dummy_table;
+ tbl->table->pos_in_table_list= tbl;
+ /*
+ Note: the table created above may be freed by:
+ 1. JOIN_TAB::cleanup(), when the parent join is a regular join.
+ 2. cleanup_empty_jtbm_semi_joins(), when the parent join is a
+ degenerate join (e.g. one with "Impossible where").
+ */
+ setup_table_map(tbl->table, tbl, tbl->jtbm_table_no);
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ @brief
+ Execute degenerate jtbm semi joins before optimize_cond() for parent
+
+ @param join the parent join for jtbm semi joins
+ @param join_list the list of tables where jtbm semi joins are processed
+ @param eq_list IN/OUT the list where to add equalities produced after
+ materialization of single-row degenerate jtbm semi joins
+
+ @details
+ The method traverses join_list trying to find any degenerate jtbm semi
+ joins for subqueries of IN predicates. For each degenerate jtbm
+ semi join execute_degenerate_jtbm_semi_join() is called. As a result
+ of this call new equalities that substitute for single-row materialized
+ jtbm semi join are added to eq_list.
+
+ In the case when a table is nested in another table 'nested_join' the
+ method is recursively called for the join_list of the 'nested_join' trying
+ to find in the list any degenerate jtbm semi joins. Currently a jtbm semi
+ join may occur in a mergeable semi join nest.
+
+ @retval TRUE if an error occurs
+ @retval FALSE otherwise
+*/
+
+bool setup_degenerate_jtbm_semi_joins(JOIN *join,
+ List<TABLE_LIST> *join_list,
+ List<Item> &eq_list)
+{
+ TABLE_LIST *table;
+ NESTED_JOIN *nested_join;
+ List_iterator<TABLE_LIST> li(*join_list);
+ THD *thd= join->thd;
+ DBUG_ENTER("setup_degenerate_jtbm_semi_joins");
+
+ while ((table= li++))
+ {
+ Item_in_subselect *subq_pred;
+
+ if ((subq_pred= table->jtbm_subselect))
+ {
+ JOIN *subq_join= subq_pred->unit->first_select()->join;
+
+ if (!subq_join->tables_list || !subq_join->table_count)
+ {
+ if (execute_degenerate_jtbm_semi_join(thd,
+ table,
+ subq_pred,
+ eq_list))
+ DBUG_RETURN(TRUE);
+ join->is_orig_degenerated= true;
+ }
+ }
+ if ((nested_join= table->nested_join))
+ {
+ if (setup_degenerate_jtbm_semi_joins(join,
+ &nested_join->join_list,
+ eq_list))
+ DBUG_RETURN(TRUE);
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ @brief
+ Optimize jtbm semi joins for materialization
+
+ @param join the parent join for jtbm semi joins
+ @param join_list the list of TABLE_LIST objects where jtbm semi join
+ can occur
+ @param eq_list IN/OUT the list where to add produced equalities
+
+ @details
+ This method is called by the optimizer after the call of
+ optimize_cond() for parent select.
+ The method traverses join_list trying to find any jtbm semi joins for
+ subqueries from IN predicates and optimizes them.
+ After the optimization some of jtbm semi joins may become degenerate.
+ For example the subquery 'SELECT MAX(b) FROM t2' from the query
+
+ SELECT * FROM t1 WHERE 4 IN (SELECT MAX(b) FROM t2);
+
+ will become degenerate if there is an index on t2.b.
+ If a subquery becomes degenerate it is handled by the function
+ execute_degenerate_jtbm_semi_join().
+
+ Otherwise the method creates a temporary table in which the subquery
+ of the jtbm semi join will be materialied.
+
+ The function saves the equalities between all pairs of the expressions
+ from the left part of the IN subquery predicate and the corresponding
+ columns of the subquery from the predicate in eq_list appending them
+ to the list. The equalities of eq_list will be later conjucted with the
+ condition of the WHERE clause.
+
+ In the case when a table is nested in another table 'nested_join' the
+ method is recursively called for the join_list of the 'nested_join' trying
+ to find in the list any degenerate jtbm semi joins. Currently a jtbm semi
+ join may occur in a mergeable semi join nest.
+
+ @retval TRUE if an error occurs
+ @retval FALSE otherwise
+*/
+
+bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
+ List<Item> &eq_list)
{
TABLE_LIST *table;
NESTED_JOIN *nested_join;
List_iterator<TABLE_LIST> li(*join_list);
THD *thd= join->thd;
DBUG_ENTER("setup_jtbm_semi_joins");
-
+
while ((table= li++))
{
- Item_in_subselect *item;
+ Item_in_subselect *subq_pred;
- if ((item= table->jtbm_subselect))
+ if ((subq_pred= table->jtbm_subselect))
{
- Item_in_subselect *subq_pred= item;
double rows;
double read_time;
/*
- Perform optimization of the subquery, so that we know estmated
+ Perform optimization of the subquery, so that we know estimated
- cost of materialization process
- how many records will be in the materialized temp.table
*/
@@ -5546,102 +6120,37 @@ bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
if (!subq_join->tables_list || !subq_join->table_count)
{
- /*
- A special case; subquery's join is degenerate, and it either produces
- 0 or 1 record. Examples of both cases:
-
- select * from ot where col in (select ... from it where 2>3)
- select * from ot where col in (select MY_MIN(it.key) from it)
-
- in this case, the subquery predicate has not been setup for
- materialization. In particular, there is no materialized temp.table.
- We'll now need to
- 1. Check whether 1 or 0 records are produced, setup this as a
- constant join tab.
- 2. Create a dummy temporary table, because all of the join
- optimization code relies on TABLE object being present (here we
- follow a bad tradition started by derived tables)
- */
- DBUG_ASSERT(subq_pred->engine->engine_type() ==
- subselect_engine::SINGLE_SELECT_ENGINE);
- subselect_single_select_engine *engine=
- (subselect_single_select_engine*)subq_pred->engine;
- select_value_catcher *new_sink;
- if (!(new_sink=
- new (thd->mem_root) select_value_catcher(thd, subq_pred)))
+ if (!join->is_orig_degenerated &&
+ execute_degenerate_jtbm_semi_join(thd, table, subq_pred,
+ eq_list))
DBUG_RETURN(TRUE);
- if (new_sink->setup(&engine->select_lex->join->fields_list) ||
- engine->select_lex->join->change_result(new_sink, NULL) ||
- engine->exec())
- {
- DBUG_RETURN(TRUE);
- }
- subq_pred->is_jtbm_const_tab= TRUE;
-
- if (new_sink->assigned)
- {
- subq_pred->jtbm_const_row_found= TRUE;
- /*
- Subselect produced one row, which is saved in new_sink->row.
- Inject "left_expr[i] == row[i] equalities into parent's WHERE.
- */
- Item *eq_cond;
- for (uint i= 0; i < subq_pred->left_expr->cols(); i++)
- {
- eq_cond= new (thd->mem_root)
- Item_func_eq(thd, subq_pred->left_expr->element_index(i),
- new_sink->row[i]);
- if (!eq_cond)
- DBUG_RETURN(1);
-
- if (!((*join_where)= and_items(thd, *join_where, eq_cond)) ||
- (*join_where)->fix_fields(thd, join_where))
- DBUG_RETURN(1);
- }
- }
- else
- {
- /* Subselect produced no rows. Just set the flag, */
- subq_pred->jtbm_const_row_found= FALSE;
- }
-
- /* Set up a dummy TABLE*, optimizer code needs JOIN_TABs to have TABLE */
- TABLE *dummy_table;
- if (!(dummy_table= create_dummy_tmp_table(thd)))
- DBUG_RETURN(1);
- table->table= dummy_table;
- table->table->pos_in_table_list= table;
- /*
- Note: the table created above may be freed by:
- 1. JOIN_TAB::cleanup(), when the parent join is a regular join.
- 2. cleanup_empty_jtbm_semi_joins(), when the parent join is a
- degenerate join (e.g. one with "Impossible where").
- */
- setup_table_map(table->table, table, table->jtbm_table_no);
}
else
{
DBUG_ASSERT(subq_pred->test_set_strategy(SUBS_MATERIALIZATION));
subq_pred->is_jtbm_const_tab= FALSE;
subselect_hash_sj_engine *hash_sj_engine=
- ((subselect_hash_sj_engine*)item->engine);
+ ((subselect_hash_sj_engine*)subq_pred->engine);
table->table= hash_sj_engine->tmp_table;
table->table->pos_in_table_list= table;
setup_table_map(table->table, table, table->jtbm_table_no);
- Item *sj_conds= hash_sj_engine->semi_join_conds;
-
- (*join_where)= and_items(thd, *join_where, sj_conds);
- (*join_where)->fix_fields_if_needed(thd, join_where);
+ List_iterator<Item> li(*hash_sj_engine->semi_join_conds->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ item->update_used_tables();
+ if (eq_list.push_back(item, thd->mem_root))
+ DBUG_RETURN(TRUE);
+ }
}
table->table->maybe_null= MY_TEST(join->mixed_implicit_grouping);
}
-
if ((nested_join= table->nested_join))
{
- if (setup_jtbm_semi_joins(join, &nested_join->join_list, join_where))
+ if (setup_jtbm_semi_joins(join, &nested_join->join_list, eq_list))
DBUG_RETURN(TRUE);
}
}
@@ -5749,8 +6258,8 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
/* A strategy must be chosen earlier. */
DBUG_ASSERT(in_subs->has_strategy());
DBUG_ASSERT(in_to_exists_where || in_to_exists_having);
- DBUG_ASSERT(!in_to_exists_where || in_to_exists_where->fixed);
- DBUG_ASSERT(!in_to_exists_having || in_to_exists_having->fixed);
+ DBUG_ASSERT(!in_to_exists_where || in_to_exists_where->is_fixed());
+ DBUG_ASSERT(!in_to_exists_having || in_to_exists_having->is_fixed());
/* The original QEP of the subquery. */
Join_plan_state save_qep(table_count);
@@ -6037,3 +6546,428 @@ bool JOIN::choose_tableless_subquery_plan()
exec_const_cond= zero_result_cause ? 0 : conds;
return FALSE;
}
+
+
+bool Item::pushable_equality_checker_for_subquery(uchar *arg)
+{
+ return
+ get_corresponding_field_pair(this,
+ ((Item_in_subselect *)arg)->corresponding_fields);
+}
+
+
+/*
+ Checks if 'item' or some item equal to it is equal to the field from
+ some Field_pair of 'pair_list' and returns matching Field_pair or
+ NULL if the matching Field_pair wasn't found.
+*/
+
+Field_pair *find_matching_field_pair(Item *item, List<Field_pair> pair_list)
+{
+ Field_pair *field_pair= get_corresponding_field_pair(item, pair_list);
+ if (field_pair)
+ return field_pair;
+
+ Item_equal *item_equal= item->get_item_equal();
+ if (item_equal)
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ Item *equal_item;
+ while ((equal_item= it++))
+ {
+ if (equal_item->const_item())
+ continue;
+ field_pair= get_corresponding_field_pair(equal_item, pair_list);
+ if (field_pair)
+ return field_pair;
+ }
+ }
+ return NULL;
+}
+
+
+bool Item_field::excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+{
+ if (find_matching_field_pair(((Item *) this), subq_pred->corresponding_fields))
+ return true;
+ return false;
+}
+
+
+bool Item_direct_view_ref::excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+{
+ if (item_equal)
+ {
+ DBUG_ASSERT(real_item()->type() == Item::FIELD_ITEM);
+ if (get_corresponding_field_pair(((Item *)this), subq_pred->corresponding_fields))
+ return true;
+ }
+ return (*ref)->excl_dep_on_in_subq_left_part(subq_pred);
+}
+
+
+bool Item_equal::excl_dep_on_in_subq_left_part(Item_in_subselect *subq_pred)
+{
+ Item *left_item = get_const();
+ Item_equal_fields_iterator it(*this);
+ Item *item;
+ if (!left_item)
+ {
+ while ((item=it++))
+ {
+ if (item->excl_dep_on_in_subq_left_part(subq_pred))
+ {
+ left_item= item;
+ break;
+ }
+ }
+ }
+ if (!left_item)
+ return false;
+ while ((item=it++))
+ {
+ if (item->excl_dep_on_in_subq_left_part(subq_pred))
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Get corresponding item from the select of the right part of IN subquery
+
+ @param thd the thread handle
+ @param item the item from the left part of subq_pred for which
+ corresponding item should be found
+ @param subq_pred the IN subquery predicate
+
+ @details
+ This method looks through the fields of the select of the right part of
+ the IN subquery predicate subq_pred trying to find the corresponding
+ item 'new_item' for item. If item has equal items it looks through
+ the fields of the select of the right part of subq_pred for each equal
+ item trying to find the corresponding item.
+ The method assumes that the given item is either a field item or
+ a reference to a field item.
+
+ @retval <item*> reference to the corresponding item
+ @retval NULL if item was not found
+*/
+
+static
+Item *get_corresponding_item(THD *thd, Item *item,
+ Item_in_subselect *subq_pred)
+{
+ DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
+ (item->type() == Item::REF_ITEM &&
+ ((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF));
+
+ Field_pair *field_pair;
+ Item_equal *item_equal= item->get_item_equal();
+
+ if (item_equal)
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ Item *equal_item;
+ while ((equal_item= it++))
+ {
+ field_pair=
+ get_corresponding_field_pair(equal_item, subq_pred->corresponding_fields);
+ if (field_pair)
+ return field_pair->corresponding_item;
+ }
+ }
+ else
+ {
+ field_pair=
+ get_corresponding_field_pair(item, subq_pred->corresponding_fields);
+ if (field_pair)
+ return field_pair->corresponding_item;
+ }
+ return NULL;
+}
+
+
+Item *Item_field::in_subq_field_transformer_for_where(THD *thd, uchar *arg)
+{
+ Item_in_subselect *subq_pred= (Item_in_subselect *)arg;
+ Item *producing_item= get_corresponding_item(thd, this, subq_pred);
+ if (producing_item)
+ return producing_item->build_clone(thd);
+ return this;
+}
+
+
+Item *Item_direct_view_ref::in_subq_field_transformer_for_where(THD *thd,
+ uchar *arg)
+{
+ if (item_equal)
+ {
+ Item_in_subselect *subq_pred= (Item_in_subselect *)arg;
+ Item *producing_item= get_corresponding_item(thd, this, subq_pred);
+ DBUG_ASSERT (producing_item != NULL);
+ return producing_item->build_clone(thd);
+ }
+ return this;
+}
+
+
+/**
+ @brief
+ Transforms item so it can be pushed into the IN subquery HAVING clause
+
+ @param thd the thread handle
+ @param in_item the item for which pushable item should be created
+ @param subq_pred the IN subquery predicate
+
+ @details
+ This method finds for in_item that is a field from the left part of the
+ IN subquery predicate subq_pred its corresponding item from the right part
+ of subq_pred.
+ If corresponding item is found, a shell for this item is created.
+ This shell can be pushed into the HAVING part of subq_pred select.
+
+ @retval <item*> reference to the created corresponding item shell for in_item
+ @retval NULL if mistake occurs
+*/
+
+static Item*
+get_corresponding_item_for_in_subq_having(THD *thd, Item *in_item,
+ Item_in_subselect *subq_pred)
+{
+ Item *new_item= get_corresponding_item(thd, in_item, subq_pred);
+
+ if (new_item)
+ {
+ Item_ref *ref=
+ new (thd->mem_root) Item_ref(thd,
+ &subq_pred->unit->first_select()->context,
+ NullS, NullS,
+ &new_item->name);
+ if (!ref)
+ DBUG_ASSERT(0);
+ return ref;
+ }
+ return new_item;
+}
+
+
+Item *Item_field::in_subq_field_transformer_for_having(THD *thd, uchar *arg)
+{
+ return get_corresponding_item_for_in_subq_having(thd, this,
+ (Item_in_subselect *)arg);
+}
+
+
+Item *Item_direct_view_ref::in_subq_field_transformer_for_having(THD *thd,
+ uchar *arg)
+{
+ if (!item_equal)
+ return this;
+ else
+ {
+ Item *new_item= get_corresponding_item_for_in_subq_having(thd, this,
+ (Item_in_subselect *)arg);
+ if (!new_item)
+ return this;
+ return new_item;
+ }
+}
+
+
+/**
+ @brief
+ Find fields that are used in the GROUP BY of the select
+
+ @param thd the thread handle
+ @param sel the select of the IN subquery predicate
+ @param fields fields of the left part of the IN subquery predicate
+ @param grouping_list GROUP BY clause
+
+ @details
+ This method traverses fields which are used in the GROUP BY of
+ sel and saves them with their corresponding items from fields.
+*/
+
+bool grouping_fields_in_the_in_subq_left_part(THD *thd,
+ st_select_lex *sel,
+ List<Field_pair> *fields,
+ ORDER *grouping_list)
+{
+ DBUG_ENTER("grouping_fields_in_the_in_subq_left_part");
+ sel->grouping_tmp_fields.empty();
+ List_iterator<Field_pair> it(*fields);
+ Field_pair *item;
+ while ((item= it++))
+ {
+ for (ORDER *ord= grouping_list; ord; ord= ord->next)
+ {
+ if ((*ord->item)->eq(item->corresponding_item, 0))
+ {
+ if (sel->grouping_tmp_fields.push_back(item, thd->mem_root))
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ @brief
+ Extract condition that can be pushed into select of this IN subquery
+
+ @param thd the thread handle
+ @param cond current condition
+
+ @details
+ This function builds the most restrictive condition depending only on
+ the list of fields of the left part of this IN subquery predicate
+ (directly or indirectly through equality) that can be extracted from the
+ given condition cond and pushes it into this IN subquery.
+
+ Example of the transformation:
+
+ SELECT * FROM t1
+ WHERE a>3 AND b>10 AND
+ (a,b) IN (SELECT x,MAX(y) FROM t2 GROUP BY x);
+
+ =>
+
+ SELECT * FROM t1
+ WHERE a>3 AND b>10 AND
+ (a,b) IN (SELECT x,max(y)
+ FROM t2
+ WHERE x>3
+ GROUP BY x
+ HAVING MAX(y)>10);
+
+
+ In details:
+ 1. Check what pushable formula can be extracted from cond
+ 2. Build a clone PC of the formula that can be extracted
+ (the clone is built only if the extracted formula is a AND subformula
+ of cond or conjunction of such subformulas)
+ 3. If there is no HAVING clause prepare PC to be conjuncted with
+ WHERE clause of this subquery. Otherwise do 4-7.
+ 4. Check what formula PC_where can be extracted from PC to be pushed
+ into the WHERE clause of the subquery
+ 5. Build PC_where and if PC_where is a conjunct(s) of PC remove it from PC
+ getting PC_having
+ 6. Prepare PC_where to be conjuncted with the WHERE clause of
+ the IN subquery
+ 7. Prepare PC_having to be conjuncted with the HAVING clause of
+ the IN subquery
+
+ @note
+ This method is similar to pushdown_cond_for_derived()
+
+ @retval TRUE if an error occurs
+ @retval FALSE otherwise
+*/
+
+bool Item_in_subselect::pushdown_cond_for_in_subquery(THD *thd, Item *cond)
+{
+ DBUG_ENTER("Item_in_subselect::pushdown_cond_for_in_subquery");
+ Item *remaining_cond= NULL;
+
+ if (!cond)
+ DBUG_RETURN(FALSE);
+
+ st_select_lex *sel = unit->first_select();
+
+ if (is_jtbm_const_tab)
+ DBUG_RETURN(FALSE);
+
+ if (!sel->cond_pushdown_is_allowed())
+ DBUG_RETURN(FALSE);
+
+ /*
+ Create a list of Field_pair items for this IN subquery.
+ It consists of the pairs of fields from the left part of this IN subquery
+ predicate 'left_part' and the respective fields from the select of the
+ right part of the IN subquery 'sel' (the field from left_part with the
+ corresponding field from the sel projection list).
+ Attach this list to the IN subquery.
+ */
+ corresponding_fields.empty();
+ List_iterator_fast<Item> it(sel->join->fields_list);
+ Item *item;
+ for (uint i= 0; i < left_expr->cols(); i++)
+ {
+ item= it++;
+ Item *elem= left_expr->element_index(i);
+
+ if (elem->real_item()->type() != Item::FIELD_ITEM)
+ continue;
+
+ if (corresponding_fields.push_back(
+ new Field_pair(((Item_field *)(elem->real_item()))->field,
+ item)))
+ DBUG_RETURN(TRUE);
+ }
+
+ /* 1. Check what pushable formula can be extracted from cond */
+ Item *extracted_cond;
+ cond->check_pushable_cond(&Item::pushable_cond_checker_for_subquery,
+ (uchar *)this);
+ /* 2. Build a clone PC of the formula that can be extracted */
+ extracted_cond=
+ cond->build_pushable_cond(thd,
+ &Item::pushable_equality_checker_for_subquery,
+ (uchar *)this);
+ /* Nothing to push */
+ if (!extracted_cond)
+ {
+ DBUG_RETURN(FALSE);
+ }
+
+ /* Collect fields that are used in the GROUP BY of sel */
+ st_select_lex *save_curr_select= thd->lex->current_select;
+ if (sel->have_window_funcs())
+ {
+ if (sel->group_list.first || sel->join->implicit_grouping)
+ goto exit;
+ ORDER *common_partition_fields=
+ sel->find_common_window_func_partition_fields(thd);
+ if (!common_partition_fields)
+ goto exit;
+
+ if (grouping_fields_in_the_in_subq_left_part(thd, sel, &corresponding_fields,
+ common_partition_fields))
+ DBUG_RETURN(TRUE);
+ }
+ else if (grouping_fields_in_the_in_subq_left_part(thd, sel,
+ &corresponding_fields,
+ sel->group_list.first))
+ DBUG_RETURN(TRUE);
+
+ /* Do 4-6 */
+ sel->pushdown_cond_into_where_clause(thd, extracted_cond,
+ &remaining_cond,
+ &Item::in_subq_field_transformer_for_where,
+ (uchar *) this);
+ if (!remaining_cond)
+ goto exit;
+ /*
+ 7. Prepare PC_having to be conjuncted with the HAVING clause of
+ the IN subquery
+ */
+ remaining_cond=
+ remaining_cond->transform(thd,
+ &Item::in_subq_field_transformer_for_having,
+ (uchar *)this);
+ if (!remaining_cond ||
+ remaining_cond->walk(&Item::cleanup_excluding_const_fields_processor,
+ 0, 0))
+ goto exit;
+
+ mark_or_conds_to_avoid_pushdown(remaining_cond);
+
+ sel->cond_pushed_into_having= remaining_cond;
+
+exit:
+ thd->lex->current_select= save_curr_select;
+ DBUG_RETURN(FALSE);
+}
diff --git a/sql/opt_subselect.h b/sql/opt_subselect.h
index 9cb19e0cc6c..7af818bd62d 100644
--- a/sql/opt_subselect.h
+++ b/sql/opt_subselect.h
@@ -26,8 +26,11 @@ int check_and_do_in_subquery_rewrites(JOIN *join);
bool convert_join_subqueries_to_semijoins(JOIN *join);
int pull_out_semijoin_tables(JOIN *join);
bool optimize_semijoin_nests(JOIN *join, table_map all_table_map);
-bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
- Item **join_where);
+bool setup_degenerate_jtbm_semi_joins(JOIN *join,
+ List<TABLE_LIST> *join_list,
+ List<Item> &eq_list);
+bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
+ List<Item> &eq_list);
void cleanup_empty_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list);
// used by Loose_scan_opt
@@ -296,6 +299,7 @@ public:
pos->loosescan_picker.loosescan_parts= best_max_loose_keypart + 1;
pos->use_join_buffer= FALSE;
pos->table= tab;
+ pos->range_rowid_filter_info= tab->range_rowid_filter_info;
// todo need ref_depend_map ?
DBUG_PRINT("info", ("Produced a LooseScan plan, key %s, %s",
tab->table->key_info[best_loose_scan_key].name.str,
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 82946709166..ecede5903a2 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -318,7 +318,7 @@ int opt_sum_query(THD *thd,
error= tl->table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
if (unlikely(error))
{
- tl->table->file->print_error(error, MYF(ME_FATALERROR));
+ tl->table->file->print_error(error, MYF(ME_FATAL));
DBUG_RETURN(error);
}
count*= tl->table->file->stats.records;
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index ef9b07cca47..422b21cb541 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -31,6 +31,8 @@
#include "mariadb.h"
#include "my_bit.h"
#include "sql_select.h"
+#include "opt_trace.h"
+#include "my_json_writer.h"
/*
OVERVIEW
@@ -522,7 +524,8 @@ eliminate_tables_for_list(JOIN *join,
List<TABLE_LIST> *join_list,
table_map tables_in_list,
Item *on_expr,
- table_map tables_used_elsewhere);
+ table_map tables_used_elsewhere,
+ Json_writer_array* trace_eliminate_tables);
static
bool check_func_dependency(JOIN *join,
table_map dep_tables,
@@ -541,7 +544,8 @@ static
Dep_module_expr *merge_eq_mods(Dep_module_expr *start,
Dep_module_expr *new_fields,
Dep_module_expr *end, uint and_level);
-static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl);
+static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
+ Json_writer_array* trace_eliminate_tables);
static
void add_module_expr(Dep_analysis_context *dac, Dep_module_expr **eq_mod,
uint and_level, Dep_value_field *field_val, Item *right,
@@ -608,6 +612,8 @@ void eliminate_tables(JOIN *join)
if (!optimizer_flag(thd, OPTIMIZER_SWITCH_TABLE_ELIMINATION))
DBUG_VOID_RETURN; /* purecov: inspected */
+ Json_writer_object trace_wrapper(thd);
+
/* Find the tables that are referred to from WHERE/HAVING */
used_tables= (join->conds? join->conds->used_tables() : 0) |
(join->having? join->having->used_tables() : 0);
@@ -617,12 +623,12 @@ void eliminate_tables(JOIN *join)
we should also take into account tables mentioned in "val".
*/
if (join->thd->lex->sql_command == SQLCOM_INSERT_SELECT &&
- join->select_lex == &thd->lex->select_lex)
+ join->select_lex == thd->lex->first_select_lex())
{
List_iterator<Item> val_it(thd->lex->value_list);
while ((item= val_it++))
{
- DBUG_ASSERT(item->fixed);
+ DBUG_ASSERT(item->is_fixed());
used_tables |= item->used_tables();
}
}
@@ -640,7 +646,7 @@ void eliminate_tables(JOIN *join)
used_tables |= (*(cur_list->item))->used_tables();
}
- if (join->select_lex == &thd->lex->select_lex)
+ if (join->select_lex == thd->lex->first_select_lex())
{
/* Multi-table UPDATE: don't eliminate tables referred from SET statement */
@@ -663,13 +669,14 @@ void eliminate_tables(JOIN *join)
}
}
}
-
+
table_map all_tables= join->all_tables_map();
+ Json_writer_array trace_eliminated_tables(thd,"eliminated_tables");
if (all_tables & ~used_tables)
{
/* There are some tables that we probably could eliminate. Try it. */
eliminate_tables_for_list(join, join->join_list, all_tables, NULL,
- used_tables);
+ used_tables, &trace_eliminated_tables);
}
DBUG_VOID_RETURN;
}
@@ -712,7 +719,8 @@ void eliminate_tables(JOIN *join)
static bool
eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
table_map list_tables, Item *on_expr,
- table_map tables_used_elsewhere)
+ table_map tables_used_elsewhere,
+ Json_writer_array *trace_eliminate_tables)
{
TABLE_LIST *tbl;
List_iterator<TABLE_LIST> it(*join_list);
@@ -734,9 +742,10 @@ eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
&tbl->nested_join->join_list,
tbl->nested_join->used_tables,
tbl->on_expr,
- outside_used_tables))
+ outside_used_tables,
+ trace_eliminate_tables))
{
- mark_as_eliminated(join, tbl);
+ mark_as_eliminated(join, tbl, trace_eliminate_tables);
}
else
all_eliminated= FALSE;
@@ -748,7 +757,7 @@ eliminate_tables_for_list(JOIN *join, List<TABLE_LIST> *join_list,
check_func_dependency(join, tbl->table->map, NULL, tbl,
tbl->on_expr))
{
- mark_as_eliminated(join, tbl);
+ mark_as_eliminated(join, tbl, trace_eliminate_tables);
}
else
all_eliminated= FALSE;
@@ -1788,7 +1797,8 @@ Dep_module* Dep_value_field::get_next_unbound_module(Dep_analysis_context *dac,
Mark one table or the whole join nest as eliminated.
*/
-static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl)
+static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl,
+ Json_writer_array* trace_eliminate_tables)
{
TABLE *table;
/*
@@ -1801,7 +1811,7 @@ static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl)
TABLE_LIST *child;
List_iterator<TABLE_LIST> it(tbl->nested_join->join_list);
while ((child= it++))
- mark_as_eliminated(join, child);
+ mark_as_eliminated(join, child, trace_eliminate_tables);
}
else if ((table= tbl->table))
{
@@ -1812,6 +1822,7 @@ static void mark_as_eliminated(JOIN *join, TABLE_LIST *tbl)
tab->type= JT_CONST;
tab->table->const_table= 1;
join->eliminated_tables |= table->map;
+ trace_eliminate_tables->add(table->alias.c_ptr_safe());
join->const_table_map|= table->map;
set_position(join, join->const_tables++, tab, (KEYUSE*)0);
}
diff --git a/sql/opt_trace.cc b/sql/opt_trace.cc
new file mode 100644
index 00000000000..befc7934a3a
--- /dev/null
+++ b/sql/opt_trace.cc
@@ -0,0 +1,698 @@
+/* This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "mariadb.h"
+#include "sql_array.h"
+#include "sql_string.h"
+#include "sql_class.h"
+#include "sql_show.h"
+#include "field.h"
+#include "table.h"
+#include "opt_trace.h"
+#include "sql_parse.h"
+#include "set_var.h"
+#include "my_json_writer.h"
+#include "sp_head.h"
+
+const char I_S_table_name[]= "OPTIMIZER_TRACE";
+
+/**
+ Whether a list of tables contains information_schema.OPTIMIZER_TRACE.
+ @param tbl list of tables
+
+ Can we do better than this here??
+ @note this does not catch that a stored routine or view accesses
+ the OPTIMIZER_TRACE table. So using a stored routine or view to read
+ OPTIMIZER_TRACE will overwrite OPTIMIZER_TRACE as it runs and provide
+ uninteresting info.
+*/
+bool list_has_optimizer_trace_table(const TABLE_LIST *tbl)
+{
+ for (; tbl; tbl= tbl->next_global)
+ {
+ if (tbl->schema_table &&
+ 0 == strcmp(tbl->schema_table->table_name, I_S_table_name))
+ return true;
+ }
+ return false;
+}
+
+/*
+ Returns if a query has a set command with optimizer_trace being switched on/off.
+ True: Don't trace the query(uninteresting)
+*/
+
+bool sets_var_optimizer_trace(enum enum_sql_command sql_command,
+ List<set_var_base> *set_vars)
+{
+ if (sql_command == SQLCOM_SET_OPTION)
+ {
+ List_iterator_fast<set_var_base> it(*set_vars);
+ const set_var_base *var;
+ while ((var= it++))
+ if (var->is_var_optimizer_trace()) return true;
+ }
+ return false;
+}
+
+
+ST_FIELD_INFO optimizer_trace_info[]=
+{
+ /* name, length, type, value, maybe_null, old_name, open_method */
+ {"QUERY", 65535, MYSQL_TYPE_STRING, 0, false, NULL, SKIP_OPEN_TABLE},
+ {"TRACE", 65535, MYSQL_TYPE_STRING, 0, false, NULL, SKIP_OPEN_TABLE},
+ {"MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 20, MYSQL_TYPE_LONG, 0, false, NULL,
+ SKIP_OPEN_TABLE},
+ {"INSUFFICIENT_PRIVILEGES", 1, MYSQL_TYPE_TINY, 0, false, NULL,
+ SKIP_OPEN_TABLE},
+ {NULL, 0, MYSQL_TYPE_STRING, 0, true, NULL, 0}
+};
+
+/*
+ TODO: one-line needs to be implemented seperately
+*/
+const char *Opt_trace_context::flag_names[]= {"enabled", "default",
+ NullS};
+
+/*
+ Returns if a particular command will be traced or not
+*/
+
+inline bool sql_command_can_be_traced(enum enum_sql_command sql_command)
+{
+ /*
+ For first iteration we are only allowing select queries.
+ TODO: change to allow other queries.
+ */
+ return sql_command == SQLCOM_SELECT ||
+ sql_command == SQLCOM_UPDATE ||
+ sql_command == SQLCOM_DELETE ||
+ sql_command == SQLCOM_DELETE_MULTI ||
+ sql_command == SQLCOM_UPDATE_MULTI;
+}
+
+void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
+ Json_writer_object *writer)
+
+{
+ if (!thd->trace_started())
+ return;
+ StringBuffer<1024> str(system_charset_info);
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
+ select_lex->print(thd, &str,
+ enum_query_type(QT_TO_SYSTEM_CHARSET |
+ QT_SHOW_SELECT_NUMBER |
+ QT_ITEM_IDENT_SKIP_DB_NAMES |
+ QT_VIEW_INTERNAL));
+ thd->variables.option_bits= save_option_bits;
+ /*
+ The output is not very pretty lots of back-ticks, the output
+ is as the one in explain extended , lets try to improved it here.
+ */
+ writer->add("expanded_query", str.c_ptr_safe(), str.length());
+}
+
+void opt_trace_disable_if_no_security_context_access(THD *thd)
+{
+ if (likely(!(thd->variables.optimizer_trace &
+ Opt_trace_context::FLAG_ENABLED)) || // (1)
+ thd->system_thread) // (2)
+ {
+ /*
+ (1) We know that the routine's execution starts with "enabled=off".
+ If it stays so until the routine ends, we needn't do security checks on
+ the routine.
+ If it does not stay so, it means the definer sets it to "on" somewhere
+ in the routine's body. Then it is his conscious decision to generate
+ traces, thus it is still correct to skip the security check.
+
+ (2) Threads of the Events Scheduler have an unusual security context
+ (thd->m_main_security_ctx.priv_user==NULL, see comment in
+ Security_context::change_security_context()).
+ */
+ return;
+ }
+ Opt_trace_context *const trace= &thd->opt_trace;
+ if (!thd->trace_started())
+ {
+ /*
+ @@optimizer_trace has "enabled=on" but trace is not started.
+ Either Opt_trace_start ctor was not called for our statement (3), or it
+ was called but at that time, the variable had "enabled=off" (4).
+
+ There are no known cases of (3).
+
+ (4) suggests that the user managed to change the variable during
+ execution of the statement, and this statement is using
+ view/routine (note that we have not been able to provoke this, maybe
+ this is impossible). If it happens it is suspicious.
+
+ We disable I_S output. And we cannot do otherwise: we have no place to
+ store a possible "missing privilege" information (no Opt_trace_stmt, as
+ is_started() is false), so cannot do security checks, so cannot safely
+ do tracing, so have to disable I_S output. And even then, we don't know
+ when to re-enable I_S output, as we have no place to store the
+ information "re-enable tracing at the end of this statement", and we
+ don't even have a notion of statement here (statements in the optimizer
+ trace world mean an Opt_trace_stmt object, and there is none here). So
+ we must disable for the session's life.
+
+ COM_FIELD_LIST opens views, thus used to be a case of (3). To avoid
+ disabling I_S output for the session's life when this command is issued
+ (like in: "SET OPTIMIZER_TRACE='ENABLED=ON';USE somedb;" in the 'mysql'
+ command-line client), we have decided to create a Opt_trace_start for
+ this command. The command itself is not traced though
+ (SQLCOM_SHOW_FIELDS does not have CF_OPTIMIZER_TRACE).
+ */
+ return;
+ }
+ /*
+ Note that thd->main_security_ctx.master_access is probably invariant
+ accross the life of THD: GRANT/REVOKE don't affect global privileges of an
+ existing connection, per the manual.
+ */
+ if (!(thd->main_security_ctx.check_access(GLOBAL_ACLS & ~GRANT_ACL)) &&
+ (0 != strcmp(thd->main_security_ctx.priv_user,
+ thd->security_context()->priv_user) ||
+ 0 != my_strcasecmp(system_charset_info,
+ thd->main_security_ctx.priv_host,
+ thd->security_context()->priv_host)))
+ trace->missing_privilege();
+}
+
+void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp)
+{
+ if (likely(!(thd->variables.optimizer_trace &
+ Opt_trace_context::FLAG_ENABLED)) ||
+ thd->system_thread)
+ return;
+
+ Opt_trace_context *const trace= &thd->opt_trace;
+ if (!thd->trace_started())
+ return;
+ bool full_access;
+ Security_context *const backup_thd_sctx= thd->security_context();
+ thd->set_security_context(&thd->main_security_ctx);
+ const bool rc= check_show_routine_access(thd, sp, &full_access) || !full_access;
+ thd->set_security_context(backup_thd_sctx);
+ if (rc)
+ trace->missing_privilege();
+}
+
+/**
+ If tracing is on, checks additional privileges on a list of tables/views,
+ to make sure that the user has the right to do SHOW CREATE TABLE/VIEW and
+ "SELECT *". For that:
+ - this functions checks table-level SELECT
+ - which is sufficient for SHOW CREATE TABLE and "SELECT *", if a base table
+ - if a view, if the view has not been identified as such then
+ opt_trace_disable_if_no_view_access() will be later called and check SHOW
+ VIEW; other we check SHOW VIEW here; SHOW VIEW + SELECT is sufficient for
+ SHOW CREATE VIEW.
+ If a privilege is missing, notifies the trace system.
+
+ @param thd
+ @param tbl list of tables to check
+*/
+
+void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl)
+{
+ if (likely(!(thd->variables.optimizer_trace &
+ Opt_trace_context::FLAG_ENABLED)) || thd->system_thread)
+ return;
+ Opt_trace_context *const trace= &thd->opt_trace;
+
+ if (!thd->trace_started())
+ return;
+
+ Security_context *const backup_thd_sctx= thd->security_context();
+ thd->set_security_context(&thd->main_security_ctx);
+ const TABLE_LIST *const first_not_own_table= thd->lex->first_not_own_table();
+ for (TABLE_LIST *t= tbl; t != NULL && t != first_not_own_table;
+ t= t->next_global)
+ {
+ /*
+ Anonymous derived tables (as in
+ "SELECT ... FROM (SELECT ...)") don't have their grant.privilege set.
+ */
+ if (!t->is_anonymous_derived_table())
+ {
+ const GRANT_INFO backup_grant_info= t->grant;
+ Security_context *const backup_table_sctx= t->security_ctx;
+ t->security_ctx= NULL;
+ /*
+ (1) check_table_access() fills t->grant.privilege.
+ (2) Because SELECT privileges can be column-based,
+ check_table_access() will return 'false' as long as there is SELECT
+ privilege on one column. But we want a table-level privilege.
+ */
+
+ bool rc =
+ check_table_access(thd, SELECT_ACL, t, false, 1, true) || // (1)
+ ((t->grant.privilege & SELECT_ACL) == 0); // (2)
+ if (t->is_view())
+ {
+ /*
+ It's a view which has already been opened: we are executing a
+ prepared statement. The view has been unfolded in the global list of
+ tables. So underlying tables will be automatically checked in the
+ present function, but we need an explicit check of SHOW VIEW:
+ */
+ rc |= check_table_access(thd, SHOW_VIEW_ACL, t, false, 1, true);
+ }
+ t->security_ctx= backup_table_sctx;
+ t->grant= backup_grant_info;
+ if (rc)
+ {
+ trace->missing_privilege();
+ break;
+ }
+ }
+ }
+ thd->set_security_context(backup_thd_sctx);
+ return;
+}
+
+void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
+ TABLE_LIST *underlying_tables)
+{
+
+ if (likely(!(thd->variables.optimizer_trace &
+ Opt_trace_context::FLAG_ENABLED)) ||
+ thd->system_thread)
+ return;
+ Opt_trace_context *const trace= &thd->opt_trace;
+ if (!thd->trace_started())
+ return;
+
+ Security_context *const backup_table_sctx= view->security_ctx;
+ Security_context *const backup_thd_sctx= thd->security_context();
+ const GRANT_INFO backup_grant_info= view->grant;
+
+ view->security_ctx= NULL; // no SUID context for view
+ // no SUID context for THD
+ thd->set_security_context(&thd->main_security_ctx);
+ const int rc= check_table_access(thd, SHOW_VIEW_ACL, view, false, 1, true);
+
+ view->security_ctx= backup_table_sctx;
+ thd->set_security_context(backup_thd_sctx);
+ view->grant= backup_grant_info;
+
+ if (rc)
+ {
+ trace->missing_privilege();
+ return;
+ }
+ /*
+ We needn't check SELECT privilege on this view. Some
+ opt_trace_disable_if_no_tables_access() call has or will check it.
+
+ Now we check underlying tables/views of our view:
+ */
+ opt_trace_disable_if_no_tables_access(thd, underlying_tables);
+ return;
+}
+
+
+/**
+ @class Opt_trace_stmt
+
+ The trace of one statement.
+*/
+
+class Opt_trace_stmt {
+ public:
+ /**
+ Constructor, starts a trace for information_schema and dbug.
+ @param ctx_arg context
+ */
+ Opt_trace_stmt(Opt_trace_context *ctx_arg)
+ {
+ ctx= ctx_arg;
+ current_json= new Json_writer();
+ missing_priv= false;
+ I_S_disabled= 0;
+ }
+ ~Opt_trace_stmt()
+ {
+ delete current_json;
+ }
+ void set_query(const char *query_ptr, size_t length, const CHARSET_INFO *charset);
+ void open_struct(const char *key, char opening_bracket);
+ void close_struct(const char *saved_key, char closing_bracket);
+ void fill_info(Opt_trace_info* info);
+ void add(const char *key, char *opening_bracket, size_t val_length);
+ Json_writer* get_current_json() {return current_json;}
+ void missing_privilege();
+ void disable_tracing_for_children();
+ void enable_tracing_for_children();
+ bool is_enabled();
+
+ void set_allowed_mem_size(size_t mem_size);
+ size_t get_length() { return current_json->output.length(); }
+ size_t get_truncated_bytes() { return current_json->get_truncated_bytes(); }
+ bool get_missing_priv() { return missing_priv; }
+
+private:
+ Opt_trace_context *ctx;
+ String query; // store the query sent by the user
+ Json_writer *current_json; // stores the trace
+ bool missing_priv; ///< whether user lacks privilege to see this trace
+ /*
+ 0 <=> this trace should be in information_schema.
+ !=0 tracing is disabled, this currently happens when we want to trace a
+ sub-statement. For now traces are only collect for the top statement
+ not for the sub-statments.
+ */
+ uint I_S_disabled;
+};
+
+void Opt_trace_stmt::set_query(const char *query_ptr, size_t length,
+ const CHARSET_INFO *charset)
+{
+ query.append(query_ptr, length, charset);
+}
+
+Json_writer* Opt_trace_context::get_current_json()
+{
+ if (!is_started())
+ return NULL;
+ return current_trace->get_current_json();
+}
+
+void Opt_trace_context::missing_privilege()
+{
+ if (current_trace)
+ current_trace->missing_privilege();
+}
+
+void Opt_trace_context::set_allowed_mem_size(size_t mem_size)
+{
+ current_trace->set_allowed_mem_size(mem_size);
+}
+
+/*
+ TODO: In future when we would be saving multiple trace,
+ this function would return
+ max_mem_size - memory_occupied_by_the_saved_traces
+*/
+
+size_t Opt_trace_context::remaining_mem_size()
+{
+ return max_mem_size;
+}
+
+bool Opt_trace_context::disable_tracing_if_required()
+{
+ if (current_trace)
+ {
+ current_trace->disable_tracing_for_children();
+ return true;
+ }
+ return false;
+}
+
+bool Opt_trace_context::enable_tracing_if_required()
+{
+ if (current_trace)
+ {
+ current_trace->enable_tracing_for_children();
+ return true;
+ }
+ return false;
+}
+
+bool Opt_trace_context::is_enabled()
+{
+ if (current_trace)
+ return current_trace->is_enabled();
+ return false;
+}
+
+Opt_trace_context::Opt_trace_context()
+{
+ current_trace= NULL;
+ max_mem_size= 0;
+}
+Opt_trace_context::~Opt_trace_context()
+{
+ delete_traces();
+}
+
+void Opt_trace_context::set_query(const char *query, size_t length, const CHARSET_INFO *charset)
+{
+ current_trace->set_query(query, length, charset);
+}
+
+void Opt_trace_context::start(THD *thd, TABLE_LIST *tbl,
+ enum enum_sql_command sql_command,
+ const char *query,
+ size_t query_length,
+ const CHARSET_INFO *query_charset,
+ ulong max_mem_size_arg)
+{
+ /*
+ This is done currently because we don't want to have multiple
+ traces open at the same time, so as soon as a new trace is created
+ we forcefully end the previous one, if it has not ended by itself.
+ This would mostly happen with stored functions or procedures.
+
+ TODO: handle multiple traces
+ */
+ DBUG_ASSERT(!current_trace);
+ current_trace= new Opt_trace_stmt(this);
+ max_mem_size= max_mem_size_arg;
+ set_allowed_mem_size(remaining_mem_size());
+}
+
+void Opt_trace_context::end()
+{
+ if (current_trace)
+ traces.push(current_trace);
+
+ if (!traces.elements())
+ return;
+ if (traces.elements() > 1)
+ {
+ Opt_trace_stmt *prev= traces.at(0);
+ delete prev;
+ traces.del(0);
+ }
+ current_trace= NULL;
+}
+
+Opt_trace_start::Opt_trace_start(THD *thd, TABLE_LIST *tbl,
+ enum enum_sql_command sql_command,
+ List<set_var_base> *set_vars,
+ const char *query,
+ size_t query_length,
+ const CHARSET_INFO *query_charset):ctx(&thd->opt_trace)
+{
+ /*
+ if optimizer trace is enabled and the statment we have is traceable,
+ then we start the context.
+ */
+ const ulonglong var= thd->variables.optimizer_trace;
+ traceable= FALSE;
+ if (unlikely(var & Opt_trace_context::FLAG_ENABLED) &&
+ sql_command_can_be_traced(sql_command) &&
+ !list_has_optimizer_trace_table(tbl) &&
+ !sets_var_optimizer_trace(sql_command, set_vars) &&
+ !thd->system_thread &&
+ !ctx->disable_tracing_if_required())
+ {
+ ctx->start(thd, tbl, sql_command, query, query_length, query_charset,
+ thd->variables.optimizer_trace_max_mem_size);
+ ctx->set_query(query, query_length, query_charset);
+ traceable= TRUE;
+ opt_trace_disable_if_no_tables_access(thd, tbl);
+ }
+}
+
+Opt_trace_start::~Opt_trace_start()
+{
+ if (traceable)
+ {
+ ctx->end();
+ traceable= FALSE;
+ }
+ else
+ {
+ ctx->enable_tracing_if_required();
+ }
+}
+
+void Opt_trace_stmt::fill_info(Opt_trace_info* info)
+{
+ if (unlikely(info->missing_priv= get_missing_priv()))
+ {
+ info->trace_ptr= info->query_ptr= "";
+ info->trace_length= info->query_length= 0;
+ info->query_charset= &my_charset_bin;
+ info->missing_bytes= 0;
+ }
+ else
+ {
+ info->trace_ptr= current_json->output.get_string()->ptr();
+ info->trace_length= get_length();
+ info->query_ptr= query.ptr();
+ info->query_length= query.length();
+ info->query_charset= query.charset();
+ info->missing_bytes= get_truncated_bytes();
+ info->missing_priv= get_missing_priv();
+ }
+}
+
+void Opt_trace_stmt::missing_privilege()
+{
+ missing_priv= true;
+}
+
+void Opt_trace_stmt::disable_tracing_for_children()
+{
+ ++I_S_disabled;
+}
+
+void Opt_trace_stmt::enable_tracing_for_children()
+{
+ if (I_S_disabled)
+ --I_S_disabled;
+}
+
+bool Opt_trace_stmt::is_enabled()
+{
+ return I_S_disabled == 0;
+}
+
+void Opt_trace_stmt::set_allowed_mem_size(size_t mem_size)
+{
+ current_json->set_size_limit(mem_size);
+}
+
+/*
+ Prefer this when you are iterating over JOIN_TABs
+*/
+
+void Json_writer::add_table_name(const JOIN_TAB *tab)
+{
+ if (tab != NULL)
+ {
+ char table_name_buffer[SAFE_NAME_LEN];
+ if (tab->table && tab->table->derived_select_number)
+ {
+ /* Derived table name generation */
+ size_t len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1,
+ "<derived%u>",
+ tab->table->derived_select_number);
+ add_str(table_name_buffer, len);
+ }
+ else if (tab->bush_children)
+ {
+ JOIN_TAB *ctab= tab->bush_children->start;
+ size_t len= my_snprintf(table_name_buffer,
+ sizeof(table_name_buffer)-1,
+ "<subquery%d>",
+ ctab->emb_sj_nest->sj_subq_pred->get_identifier());
+ add_str(table_name_buffer, len);
+ }
+ else
+ {
+ TABLE_LIST *real_table= tab->table->pos_in_table_list;
+ add_str(real_table->alias.str, real_table->alias.length);
+ }
+ }
+ else
+ DBUG_ASSERT(0);
+}
+
+void Json_writer::add_table_name(const TABLE *table)
+{
+ add_str(table->pos_in_table_list->alias.str);
+}
+
+
+void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab)
+{
+ Json_writer_object table_records(thd);
+ table_records.add_table_name(tab);
+ Json_writer_object table_rec(thd, "table_scan");
+ table_rec.add("rows", tab->found_records)
+ .add("cost", tab->read_time);
+}
+/*
+ Introduce enum_query_type flags parameter, maybe also allow
+ EXPLAIN also use this function.
+*/
+
+void Json_writer::add_str(Item *item)
+{
+ if (item)
+ {
+ THD *thd= current_thd;
+ StringBuffer<256> str(system_charset_info);
+
+ ulonglong save_option_bits= thd->variables.option_bits;
+ thd->variables.option_bits &= ~OPTION_QUOTE_SHOW_CREATE;
+ item->print(&str,
+ enum_query_type(QT_TO_SYSTEM_CHARSET | QT_SHOW_SELECT_NUMBER
+ | QT_ITEM_IDENT_SKIP_DB_NAMES));
+ thd->variables.option_bits= save_option_bits;
+ add_str(str.c_ptr_safe());
+ }
+ else
+ add_null();
+}
+
+void Opt_trace_context::delete_traces()
+{
+ if (traces.elements())
+ {
+ while (traces.elements())
+ {
+ Opt_trace_stmt *prev= traces.at(0);
+ delete prev;
+ traces.del(0);
+ }
+ }
+}
+
+
+int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *)
+{
+ TABLE *table= tables->table;
+ Opt_trace_info info;
+
+ /* get_values of trace, query , missing bytes and missing_priv
+
+ @todo: Need an iterator here to walk over all the traces
+ */
+ Opt_trace_context* ctx= &thd->opt_trace;
+
+ if (!thd->opt_trace.empty())
+ {
+ Opt_trace_stmt *stmt= ctx->get_top_trace();
+ stmt->fill_info(&info);
+
+ table->field[0]->store(info.query_ptr, static_cast<uint>(info.query_length),
+ info.query_charset);
+ table->field[1]->store(info.trace_ptr, static_cast<uint>(info.trace_length),
+ system_charset_info);
+ table->field[2]->store(info.missing_bytes, true);
+ table->field[3]->store(info.missing_priv, true);
+ // Store in IS
+ if (schema_table_store_record(thd, table))
+ return 1;
+ }
+ return 0;
+}
diff --git a/sql/opt_trace.h b/sql/opt_trace.h
new file mode 100644
index 00000000000..52318bc6b7f
--- /dev/null
+++ b/sql/opt_trace.h
@@ -0,0 +1,208 @@
+#ifndef OPT_TRACE_INCLUDED
+#define OPT_TRACE_INCLUDED
+/* This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "opt_trace_context.h" // Opt_trace_context
+#include "sql_lex.h"
+#include "my_json_writer.h"
+#include "sql_select.h"
+class Item;
+class THD;
+struct TABLE_LIST;
+
+class Opt_trace_stmt;
+
+/*
+ User-visible information about a trace.
+*/
+
+struct Opt_trace_info
+{
+ /**
+ String containing trace.
+ If trace has been end()ed, this is 0-terminated, which is only to aid
+ debugging or unit testing; this property is not relied upon in normal
+ server usage.
+ If trace has not been ended, this is not 0-terminated. That rare case can
+ happen when a substatement reads OPTIMIZER_TRACE (at that stage, the top
+ statement is still executing so its trace is not ended yet, but may still
+ be read by the sub-statement).
+ */
+ const char *trace_ptr;
+ size_t trace_length;
+ //// String containing original query.
+ const char *query_ptr;
+ size_t query_length;
+ const CHARSET_INFO *query_charset; ///< charset of query string
+ /**
+ How many bytes this trace is missing (for traces which were truncated
+ because of @@@@optimizer-trace-max-mem-size).
+ The trace is not extended beyond trace-max-mem-size.
+ */
+ size_t missing_bytes;
+ /*
+ Whether user lacks privilege to see this trace.
+ If this is set to TRUE, then we return an empty trace
+ */
+ bool missing_priv;
+};
+
+/**
+ Instantiate this class to start tracing a THD's actions (generally at a
+ statement's start), and to set the "original" query (not transformed, as
+ sent by client) for the new trace. Destructor will end the trace.
+
+ @param thd the THD
+ @param tbl list of tables read/written by the statement.
+ @param sql_command SQL command being prepared or executed
+ @param set_vars what variables are set by this command (only used if
+ sql_command is SQLCOM_SET_OPTION)
+ @param query query
+ @param length query's length
+ @param charset charset which was used to encode this query
+*/
+
+
+class Opt_trace_start {
+ public:
+ Opt_trace_start(THD *thd_arg, TABLE_LIST *tbl,
+ enum enum_sql_command sql_command,
+ List<set_var_base> *set_vars,
+ const char *query,
+ size_t query_length,
+ const CHARSET_INFO *query_charset);
+ ~Opt_trace_start();
+
+ private:
+ Opt_trace_context *const ctx;
+ /*
+ True: the query will be traced
+ False: otherwise
+ */
+ bool traceable;
+};
+
+/**
+ Prints SELECT query to optimizer trace. It is not the original query (as in
+ @c Opt_trace_context::set_query()) but a printout of the parse tree
+ (Item-s).
+ @param thd the THD
+ @param select_lex query's parse tree
+ @param trace_object Json_writer object to which the query will be added
+*/
+void opt_trace_print_expanded_query(THD *thd, SELECT_LEX *select_lex,
+ Json_writer_object *trace_object);
+
+void add_table_scan_values_to_trace(THD *thd, JOIN_TAB *tab);
+
+/*
+ Security related (need to add a proper comment here)
+*/
+
+/**
+ If the security context is not that of the connected user, inform the trace
+ system that a privilege is missing. With one exception: see below.
+
+ @param thd
+
+ This serves to eliminate the following issue.
+ Any information readable by a SELECT may theoretically end up in
+ the trace. And a SELECT may read information from other places than tables:
+ - from views (reading their bodies)
+ - from stored routines (reading their bodies)
+ - from files (reading their content), with LOAD_FILE()
+ - from the list of connections (reading their queries...), with
+ I_S.PROCESSLIST.
+ If the connected user has EXECUTE privilege on a routine which does a
+ security context change, the routine can retrieve information internally
+ (if allowed by the SUID context's privileges), and present only a portion
+ of it to the connected user. But with tracing on, all information is
+ possibly in the trace. So the connected user receives more information than
+ the routine's definer intended to provide. Fixing this issue would require
+ adding, near many privilege checks in the server, a new
+ optimizer-trace-specific check done against the connected user's context,
+ to verify that the connected user has the right to see the retrieved
+ information.
+
+ Instead, our chosen simpler solution is that if we see a security context
+ change where SUID user is not the connected user, we disable tracing. With
+ only one safe exception: if the connected user has all global privileges
+ (because then she/he can find any information anyway). By "all global
+ privileges" we mean everything but WITH GRANT OPTION (that latter one isn't
+ related to information gathering).
+
+ Read access to I_S.OPTIMIZER_TRACE by another user than the connected user
+ is restricted: @see fill_optimizer_trace_info().
+*/
+void opt_trace_disable_if_no_security_context_access(THD *thd);
+
+void opt_trace_disable_if_no_tables_access(THD *thd, TABLE_LIST *tbl);
+
+/**
+ If tracing is on, checks additional privileges for a view, to make sure
+ that the user has the right to do SHOW CREATE VIEW. For that:
+ - this function checks SHOW VIEW
+ - SELECT is tested in opt_trace_disable_if_no_tables_access()
+ - SELECT + SHOW VIEW is sufficient for SHOW CREATE VIEW.
+ We also check underlying tables.
+ If a privilege is missing, notifies the trace system.
+ This function should be called when the view's underlying tables have not
+ yet been merged.
+
+ @param thd THD context
+ @param view view to check
+ @param underlying_tables underlying tables/views of 'view'
+ */
+
+void opt_trace_disable_if_no_view_access(THD *thd, TABLE_LIST *view,
+ TABLE_LIST *underlying_tables);
+
+/**
+ If tracing is on, checks additional privileges on a stored routine, to make
+ sure that the user has the right to do SHOW CREATE PROCEDURE/FUNCTION. For
+ that, we use the same checks as in those SHOW commands.
+ If a privilege is missing, notifies the trace system.
+
+ This function is not redundant with
+ opt_trace_disable_if_no_security_context_access().
+ Indeed, for a SQL SECURITY INVOKER routine, there is no context change, but
+ we must still verify that the invoker can do SHOW CREATE.
+
+ For triggers, see note in sp_head::execute_trigger().
+
+ @param thd
+ @param sp routine to check
+ */
+void opt_trace_disable_if_no_stored_proc_func_access(THD *thd, sp_head *sp);
+
+/**
+ Fills information_schema.OPTIMIZER_TRACE with rows (one per trace)
+ @retval 0 ok
+ @retval 1 error
+*/
+int fill_optimizer_trace_info(THD *thd, TABLE_LIST *tables, Item *);
+
+#define OPT_TRACE_TRANSFORM(thd, object_level0, object_level1, \
+ select_number, from, to) \
+ Json_writer_object object_level0(thd); \
+ Json_writer_object object_level1(thd, "transformation"); \
+ object_level1.add_select_number(select_number).add("from", from).add("to", to);
+
+#define OPT_TRACE_VIEWS_TRANSFORM(thd, object_level0, object_level1, \
+ derived, name, select_number, algorithm) \
+ Json_writer_object trace_wrapper(thd); \
+ Json_writer_object trace_derived(thd, derived); \
+ trace_derived.add("table", name).add_select_number(select_number) \
+ .add("algorithm", algorithm);
+#endif
diff --git a/sql/opt_trace_context.h b/sql/opt_trace_context.h
new file mode 100644
index 00000000000..e5df16b1e3b
--- /dev/null
+++ b/sql/opt_trace_context.h
@@ -0,0 +1,87 @@
+#ifndef OPT_TRACE_CONTEXT_INCLUDED
+#define OPT_TRACE_CONTEXT_INCLUDED
+
+#include "sql_array.h"
+
+class Opt_trace_stmt;
+
+class Opt_trace_context
+{
+public:
+ Opt_trace_context();
+ ~Opt_trace_context();
+
+ void start(THD *thd, TABLE_LIST *tbl,
+ enum enum_sql_command sql_command,
+ const char *query,
+ size_t query_length,
+ const CHARSET_INFO *query_charset,
+ ulong max_mem_size_arg);
+ void end();
+ void set_query(const char *query, size_t length, const CHARSET_INFO *charset);
+ void delete_traces();
+ void set_allowed_mem_size(size_t mem_size);
+ size_t remaining_mem_size();
+
+private:
+ Opt_trace_stmt* top_trace()
+ {
+ return *(traces.front());
+ }
+
+public:
+
+ /*
+ This returns the top trace from the list of traces. This function
+ is used when we want to see the contents of the INFORMATION_SCHEMA.OPTIMIZER_TRACE
+ table.
+ */
+
+ Opt_trace_stmt* get_top_trace()
+ {
+ if (!traces.elements())
+ return NULL;
+ return top_trace();
+ }
+
+ /*
+ This returns the current trace, to which we are still writing and has not been finished
+ */
+
+ Json_writer* get_current_json();
+
+ bool empty()
+ {
+ return static_cast<uint>(traces.elements()) == 0;
+ }
+
+ bool is_started()
+ {
+ return current_trace && is_enabled();
+ }
+
+ bool disable_tracing_if_required();
+
+ bool enable_tracing_if_required();
+
+ bool is_enabled();
+
+ void missing_privilege();
+
+ static const char *flag_names[];
+ enum
+ {
+ FLAG_DEFAULT = 0,
+ FLAG_ENABLED = 1 << 0
+ };
+
+private:
+ /*
+ List of traces (currently it stores only 1 trace)
+ */
+ Dynamic_array<Opt_trace_stmt*> traces;
+ Opt_trace_stmt *current_trace;
+ size_t max_mem_size;
+};
+
+#endif /* OPT_TRACE_CONTEXT_INCLUDED */
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 95700dac517..c7d8e16dfeb 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -395,12 +395,13 @@ public:
bool field_in_partition_expr(Field *field) const;
bool vers_init_info(THD *thd);
- bool vers_set_interval(Item *item, interval_type int_type, my_time_t start)
+ bool vers_set_interval(THD *thd, Item *item,
+ interval_type int_type, my_time_t start)
{
DBUG_ASSERT(part_type == VERSIONING_PARTITION);
vers_info->interval.type= int_type;
vers_info->interval.start= start;
- return get_interval_value(item, int_type, &vers_info->interval.step) ||
+ return get_interval_value(thd, item, int_type, &vers_info->interval.step) ||
vers_info->interval.step.neg || vers_info->interval.step.second_part ||
!(vers_info->interval.step.year || vers_info->interval.step.month ||
vers_info->interval.step.day || vers_info->interval.step.hour ||
diff --git a/sql/procedure.h b/sql/procedure.h
index 1ece31223ad..2bbdd906151 100644
--- a/sql/procedure.h
+++ b/sql/procedure.h
@@ -44,24 +44,30 @@ public:
this->name.length= strlen(name_par);
}
enum Type type() const { return Item::PROC_ITEM; }
+ Field *create_tmp_field_ex(TABLE *table, Tmp_field_src *src,
+ const Tmp_field_param *param)
+ {
+ /*
+ We can get to here when using a CURSOR for a query with PROCEDURE:
+ DECLARE c CURSOR FOR SELECT * FROM t1 PROCEDURE analyse();
+ OPEN c;
+ */
+ return create_tmp_field_ex_simple(table, src, param);
+ }
virtual void set(double nr)=0;
virtual void set(const char *str,uint length,CHARSET_INFO *cs)=0;
virtual void set(longlong nr)=0;
const Type_handler *type_handler() const=0;
void set(const char *str) { set(str,(uint) strlen(str), default_charset()); }
- void make_send_field(THD *thd, Send_field *tmp_field)
- {
- init_make_send_field(tmp_field,field_type());
- }
unsigned int size_of() { return sizeof(*this);}
bool check_vcol_func_processor(void *arg)
{
DBUG_ASSERT(0); // impossible
return mark_unsupported_function("proc", arg, VCOL_IMPOSSIBLE);
}
- bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
+ bool get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate)
{
- return type_handler()->Item_get_date(this, ltime, fuzzydate);
+ return type_handler()->Item_get_date_with_warn(thd, this, ltime, fuzzydate);
}
Item* get_copy(THD *thd) { return 0; }
};
diff --git a/sql/protocol.cc b/sql/protocol.cc
index c4c243ea166..ffed17634c0 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -551,8 +551,26 @@ static uchar *net_store_length_fast(uchar *packet, size_t length)
void Protocol::end_statement()
{
- /* sanity check*/
- DBUG_ASSERT_IF_WSREP(!(WSREP(thd) && thd->wsrep_conflict_state == REPLAYING));
+#ifdef WITH_WSREP
+ /*
+ Commented out: This sanity check does not hold in general.
+ Thd->LOCK_thd_data() must be unlocked before sending response
+ to client, so BF abort may sneak in here.
+ DBUG_ASSERT(!WSREP(thd) || thd->wsrep_conflict_state() == NO_CONFLICT);
+ */
+
+ /*
+ sanity check, don't send end statement while replaying
+ */
+ DBUG_ASSERT(thd->wsrep_trx().state() != wsrep::transaction::s_replaying);
+ if (WSREP(thd) && thd->wsrep_trx().state() ==
+ wsrep::transaction::s_replaying)
+ {
+ WSREP_ERROR("attempting net_end_statement while replaying");
+ return;
+ }
+#endif /* WITH_WSREP */
+
DBUG_ENTER("Protocol::end_statement");
DBUG_ASSERT(! thd->get_stmt_da()->is_sent());
bool error= FALSE;
@@ -738,7 +756,7 @@ void Protocol::init(THD *thd_arg)
packet= &thd->packet;
convert= &thd->convert_buffer;
#ifndef DBUG_OFF
- field_types= 0;
+ field_handlers= 0;
#endif
}
@@ -770,6 +788,73 @@ bool Protocol::flush()
#ifndef EMBEDDED_LIBRARY
+bool Protocol_text::store_field_metadata(const THD * thd,
+ const Send_field &field,
+ CHARSET_INFO *charset_for_protocol,
+ uint fieldnr)
+{
+ CHARSET_INFO *thd_charset= thd->variables.character_set_results;
+ char *pos;
+ CHARSET_INFO *cs= system_charset_info;
+ DBUG_ASSERT(field.is_sane());
+
+ if (thd->client_capabilities & CLIENT_PROTOCOL_41)
+ {
+ if (store(STRING_WITH_LEN("def"), cs, thd_charset) ||
+ store_str(field.db_name, cs, thd_charset) ||
+ store_str(field.table_name, cs, thd_charset) ||
+ store_str(field.org_table_name, cs, thd_charset) ||
+ store_str(field.col_name, cs, thd_charset) ||
+ store_str(field.org_col_name, cs, thd_charset) ||
+ packet->realloc(packet->length() + 12))
+ return true;
+ /* Store fixed length fields */
+ pos= (char*) packet->end();
+ *pos++= 12; // Length of packed fields
+ /* inject a NULL to test the client */
+ DBUG_EXECUTE_IF("poison_rs_fields", pos[-1]= (char) 0xfb;);
+ if (charset_for_protocol == &my_charset_bin || thd_charset == NULL)
+ {
+ /* No conversion */
+ int2store(pos, charset_for_protocol->number);
+ int4store(pos + 2, field.length);
+ }
+ else
+ {
+ /* With conversion */
+ int2store(pos, thd_charset->number);
+ uint32 field_length= field.max_octet_length(charset_for_protocol,
+ thd_charset);
+ int4store(pos + 2, field_length);
+ }
+ pos[6]= field.type_handler()->type_code_for_protocol();
+ int2store(pos + 7, field.flags);
+ pos[9]= (char) field.decimals;
+ pos[10]= 0; // For the future
+ pos[11]= 0; // For the future
+ pos+= 12;
+ }
+ else
+ {
+ if (store_str(field.table_name, cs, thd_charset) ||
+ store_str(field.col_name, cs, thd_charset) ||
+ packet->realloc(packet->length() + 10))
+ return true;
+ pos= (char*) packet->end();
+ pos[0]= 3;
+ int3store(pos + 1, field.length);
+ pos[4]= 1;
+ pos[5]= field.type_handler()->type_code_for_protocol();
+ pos[6]= 3;
+ int2store(pos + 7, field.flags);
+ pos[9]= (char) field.decimals;
+ pos+= 10;
+ }
+ packet->length((uint) (pos - packet->ptr()));
+ return false;
+}
+
+
/**
Send name and type of result to client.
@@ -792,10 +877,7 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
{
List_iterator_fast<Item> it(*list);
Item *item;
- ValueBuffer<MAX_FIELD_WIDTH> tmp;
- Protocol_text prot(thd);
- String *local_packet= prot.storage_packet();
- CHARSET_INFO *thd_charset= thd->variables.character_set_results;
+ Protocol_text prot(thd, thd->variables.net_buffer_length);
DBUG_ENTER("Protocol::send_result_set_metadata");
if (flags & SEND_NUM_ROWS)
@@ -808,119 +890,19 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
}
#ifndef DBUG_OFF
- field_types= (enum_field_types*) thd->alloc(sizeof(field_types) *
- list->elements);
- uint count= 0;
+ field_handlers= (const Type_handler**) thd->alloc(sizeof(field_handlers[0]) *
+ list->elements);
#endif
- /* We have to reallocate it here as a stored procedure may have reset it */
- (void) local_packet->alloc(thd->variables.net_buffer_length);
-
- while ((item=it++))
+ for (uint pos= 0; (item=it++); pos++)
{
- char *pos;
- CHARSET_INFO *cs= system_charset_info;
- Send_field field;
- item->make_send_field(thd, &field);
-
- /* limit number of decimals for float and double */
- if (field.type == MYSQL_TYPE_FLOAT || field.type == MYSQL_TYPE_DOUBLE)
- set_if_smaller(field.decimals, FLOATING_POINT_DECIMALS);
-
- /* Keep things compatible for old clients */
- if (field.type == MYSQL_TYPE_VARCHAR)
- field.type= MYSQL_TYPE_VAR_STRING;
-
prot.prepare_for_resend();
-
- if (thd->client_capabilities & CLIENT_PROTOCOL_41)
- {
- if (prot.store(STRING_WITH_LEN("def"), cs, thd_charset) ||
- prot.store(field.db_name, (uint) strlen(field.db_name),
- cs, thd_charset) ||
- prot.store(field.table_name, (uint) strlen(field.table_name),
- cs, thd_charset) ||
- prot.store(field.org_table_name, (uint) strlen(field.org_table_name),
- cs, thd_charset) ||
- prot.store(field.col_name.str, (uint) field.col_name.length,
- cs, thd_charset) ||
- prot.store(field.org_col_name.str, (uint) field.org_col_name.length,
- cs, thd_charset) ||
- local_packet->realloc(local_packet->length()+12))
- goto err;
- /* Store fixed length fields */
- pos= (char*) local_packet->ptr()+local_packet->length();
- *pos++= 12; // Length of packed fields
- /* inject a NULL to test the client */
- DBUG_EXECUTE_IF("poison_rs_fields", pos[-1]= (char) 0xfb;);
- if (item->charset_for_protocol() == &my_charset_bin || thd_charset == NULL)
- {
- /* No conversion */
- int2store(pos, item->charset_for_protocol()->number);
- int4store(pos+2, field.length);
- }
- else
- {
- /* With conversion */
- uint32 field_length, max_length;
- int2store(pos, thd_charset->number);
- /*
- For TEXT/BLOB columns, field_length describes the maximum data
- length in bytes. There is no limit to the number of characters
- that a TEXT column can store, as long as the data fits into
- the designated space.
- For the rest of textual columns, field_length is evaluated as
- char_count * mbmaxlen, where character count is taken from the
- definition of the column. In other words, the maximum number
- of characters here is limited by the column definition.
-
- When one has a LONG TEXT column with a single-byte
- character set, and the connection character set is multi-byte, the
- client may get fields longer than UINT_MAX32, due to
- <character set column> -> <character set connection> conversion.
- In that case column max length does not fit into the 4 bytes
- reserved for it in the protocol.
- */
- max_length= (field.type >= MYSQL_TYPE_TINY_BLOB &&
- field.type <= MYSQL_TYPE_BLOB) ?
- field.length / item->collation.collation->mbminlen :
- field.length / item->collation.collation->mbmaxlen;
- field_length= char_to_byte_length_safe(max_length,
- thd_charset->mbmaxlen);
- int4store(pos + 2, field_length);
- }
- pos[6]= field.type;
- int2store(pos+7,field.flags);
- pos[9]= (char) field.decimals;
- pos[10]= 0; // For the future
- pos[11]= 0; // For the future
- pos+= 12;
- }
- else
- {
- if (prot.store(field.table_name, (uint) strlen(field.table_name),
- cs, thd_charset) ||
- prot.store(field.col_name.str, (uint) field.col_name.length,
- cs, thd_charset) ||
- local_packet->realloc(local_packet->length()+10))
- goto err;
- pos= (char*) local_packet->ptr()+local_packet->length();
- pos[0]=3;
- int3store(pos+1,field.length);
- pos[4]=1;
- pos[5]=field.type;
- pos[6]=3;
- int2store(pos+7,field.flags);
- pos[9]= (char) field.decimals;
- pos+= 10;
- }
- local_packet->length((uint) (pos - local_packet->ptr()));
- if (flags & SEND_DEFAULTS)
- item->send(&prot, &tmp); // Send default value
+ if (prot.store_field_metadata(thd, item, pos))
+ goto err;
if (prot.write())
DBUG_RETURN(1);
#ifndef DBUG_OFF
- field_types[count++]= field.type;
+ field_handlers[pos]= item->type_handler();
#endif
}
@@ -949,6 +931,43 @@ err:
}
+bool Protocol::send_list_fields(List<Field> *list, const TABLE_LIST *table_list)
+{
+ DBUG_ENTER("Protocol::send_list_fields");
+ List_iterator_fast<Field> it(*list);
+ Field *fld;
+ Protocol_text prot(thd, thd->variables.net_buffer_length);
+
+#ifndef DBUG_OFF
+ field_handlers= (const Type_handler **) thd->alloc(sizeof(field_handlers[0]) *
+ list->elements);
+#endif
+
+ for (uint pos= 0; (fld= it++); pos++)
+ {
+ prot.prepare_for_resend();
+ if (prot.store_field_metadata_for_list_fields(thd, fld, table_list, pos))
+ goto err;
+ prot.store(fld); // Send default value
+ if (prot.write())
+ DBUG_RETURN(1);
+#ifndef DBUG_OFF
+ /*
+ Historically all BLOB variant Fields are displayed as
+ MYSQL_TYPE_BLOB in metadata.
+ See Field_blob::make_send_field() for more comments.
+ */
+ field_handlers[pos]= Send_field(fld).type_handler();
+#endif
+ }
+ DBUG_RETURN(prepare_for_send(list->elements));
+
+err:
+ my_message(ER_OUT_OF_RESOURCES, ER_THD(thd, ER_OUT_OF_RESOURCES), MYF(0));
+ DBUG_RETURN(1);
+}
+
+
bool Protocol::write()
{
DBUG_ENTER("Protocol::write");
@@ -958,6 +977,25 @@ bool Protocol::write()
#endif /* EMBEDDED_LIBRARY */
+bool Protocol_text::store_field_metadata(THD *thd, Item *item, uint pos)
+{
+ Send_field field(thd, item);
+ return store_field_metadata(thd, field, item->charset_for_protocol(), pos);
+}
+
+
+bool Protocol_text::store_field_metadata_for_list_fields(const THD *thd,
+ Field *fld,
+ const TABLE_LIST *tl,
+ uint pos)
+{
+ Send_field field= tl->view ?
+ Send_field(fld, tl->view_db.str, tl->view_name.str) :
+ Send_field(fld);
+ return store_field_metadata(thd, field, fld->charset_for_protocol(), pos);
+}
+
+
/**
Send one result set row.
@@ -1098,12 +1136,7 @@ bool Protocol_text::store(const char *from, size_t length,
CHARSET_INFO *fromcs, CHARSET_INFO *tocs)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_DECIMAL ||
- field_types[field_pos] == MYSQL_TYPE_BIT ||
- field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL ||
- (field_types[field_pos] >= MYSQL_TYPE_ENUM &&
- field_types[field_pos] <= MYSQL_TYPE_GEOMETRY));
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_STRING));
field_pos++;
#endif
return store_string_aux(from, length, fromcs, tocs);
@@ -1117,14 +1150,8 @@ bool Protocol_text::store(const char *from, size_t length,
#ifndef DBUG_OFF
DBUG_PRINT("info", ("Protocol_text::store field %u (%u): %.*s", field_pos,
field_count, (int) length, (length == 0 ? "" : from)));
- DBUG_ASSERT(field_types == 0 || field_pos < field_count);
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_DECIMAL ||
- field_types[field_pos] == MYSQL_TYPE_BIT ||
- field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL ||
- field_types[field_pos] == MYSQL_TYPE_NEWDATE ||
- (field_types[field_pos] >= MYSQL_TYPE_ENUM &&
- field_types[field_pos] <= MYSQL_TYPE_GEOMETRY));
+ DBUG_ASSERT(field_handlers == 0 || field_pos < field_count);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_STRING));
field_pos++;
#endif
return store_string_aux(from, length, fromcs, tocs);
@@ -1134,7 +1161,7 @@ bool Protocol_text::store(const char *from, size_t length,
bool Protocol_text::store_tiny(longlong from)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TINY);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_TINY));
field_pos++;
#endif
char buff[22];
@@ -1146,9 +1173,7 @@ bool Protocol_text::store_tiny(longlong from)
bool Protocol_text::store_short(longlong from)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_YEAR ||
- field_types[field_pos] == MYSQL_TYPE_SHORT);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_SHORT));
field_pos++;
#endif
char buff[22];
@@ -1161,9 +1186,7 @@ bool Protocol_text::store_short(longlong from)
bool Protocol_text::store_long(longlong from)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_INT24 ||
- field_types[field_pos] == MYSQL_TYPE_LONG);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_LONG));
field_pos++;
#endif
char buff[22];
@@ -1176,8 +1199,7 @@ bool Protocol_text::store_long(longlong from)
bool Protocol_text::store_longlong(longlong from, bool unsigned_flag)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_LONGLONG);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_LONGLONG));
field_pos++;
#endif
char buff[22];
@@ -1191,13 +1213,11 @@ bool Protocol_text::store_longlong(longlong from, bool unsigned_flag)
bool Protocol_text::store_decimal(const my_decimal *d)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL);
+ DBUG_ASSERT(0); // This method is not used yet
field_pos++;
#endif
- char buff[DECIMAL_MAX_STR_LENGTH];
- String str(buff, sizeof(buff), &my_charset_bin);
- (void) my_decimal2string(E_DEC_FATAL_ERROR, d, 0, 0, 0, &str);
+ StringBuffer<DECIMAL_MAX_STR_LENGTH> str;
+ (void) d->to_string(&str);
return net_store_data((uchar*) str.ptr(), str.length());
}
@@ -1205,8 +1225,7 @@ bool Protocol_text::store_decimal(const my_decimal *d)
bool Protocol_text::store(float from, uint32 decimals, String *buffer)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_FLOAT);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_FLOAT));
field_pos++;
#endif
buffer->set_real((double) from, decimals, thd->charset());
@@ -1217,8 +1236,7 @@ bool Protocol_text::store(float from, uint32 decimals, String *buffer)
bool Protocol_text::store(double from, uint32 decimals, String *buffer)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_DOUBLE);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_DOUBLE));
field_pos++;
#endif
buffer->set_real(from, decimals, thd->charset());
@@ -1256,9 +1274,7 @@ bool Protocol_text::store(Field *field)
bool Protocol_text::store(MYSQL_TIME *tm, int decimals)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_DATETIME ||
- field_types[field_pos] == MYSQL_TYPE_TIMESTAMP);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_DATETIME));
field_pos++;
#endif
char buff[MAX_DATE_STRING_REP_LENGTH];
@@ -1270,8 +1286,7 @@ bool Protocol_text::store(MYSQL_TIME *tm, int decimals)
bool Protocol_text::store_date(MYSQL_TIME *tm)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_DATE);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_DATE));
field_pos++;
#endif
char buff[MAX_DATE_STRING_REP_LENGTH];
@@ -1283,8 +1298,7 @@ bool Protocol_text::store_date(MYSQL_TIME *tm)
bool Protocol_text::store_time(MYSQL_TIME *tm, int decimals)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_TIME);
+ DBUG_ASSERT(valid_handler(field_pos, PROTOCOL_SEND_TIME));
field_pos++;
#endif
char buff[MAX_DATE_STRING_REP_LENGTH];
@@ -1304,11 +1318,10 @@ bool Protocol_text::store_time(MYSQL_TIME *tm, int decimals)
bool Protocol_text::send_out_parameters(List<Item_param> *sp_params)
{
- DBUG_ASSERT(sp_params->elements ==
- thd->lex->prepared_stmt_params.elements);
+ DBUG_ASSERT(sp_params->elements == thd->lex->prepared_stmt.param_count());
List_iterator_fast<Item_param> item_param_it(*sp_params);
- List_iterator_fast<Item> param_it(thd->lex->prepared_stmt_params);
+ List_iterator_fast<Item> param_it(thd->lex->prepared_stmt.params());
while (true)
{
@@ -1442,13 +1455,11 @@ bool Protocol_binary::store_longlong(longlong from, bool unsigned_flag)
bool Protocol_binary::store_decimal(const my_decimal *d)
{
#ifndef DBUG_OFF
- DBUG_ASSERT(field_types == 0 ||
- field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL);
+ DBUG_ASSERT(0); // This method is not used yet
field_pos++;
#endif
- char buff[DECIMAL_MAX_STR_LENGTH];
- String str(buff, sizeof(buff), &my_charset_bin);
- (void) my_decimal2string(E_DEC_FATAL_ERROR, d, 0, 0, 0, &str);
+ StringBuffer<DECIMAL_MAX_STR_LENGTH> str;
+ (void) d->to_string(&str);
return store(str.ptr(), str.length(), str.charset());
}
diff --git a/sql/protocol.h b/sql/protocol.h
index 1a6cb3bdc3c..70a097c9e32 100644
--- a/sql/protocol.h
+++ b/sql/protocol.h
@@ -22,11 +22,14 @@
#include "sql_error.h"
#include "my_decimal.h" /* my_decimal */
+#include "sql_type.h"
class i_string;
class Field;
+class Send_field;
class THD;
class Item_param;
+struct TABLE_LIST;
typedef struct st_mysql_field MYSQL_FIELD;
typedef struct st_mysql_rows MYSQL_ROWS;
@@ -38,7 +41,12 @@ protected:
String *convert;
uint field_pos;
#ifndef DBUG_OFF
- enum enum_field_types *field_types;
+ const Type_handler **field_handlers;
+ bool valid_handler(uint pos, protocol_send_type_t type) const
+ {
+ return field_handlers == 0 ||
+ field_handlers[field_pos]->protocol_send_type() == type;
+ }
#endif
uint field_count;
#ifndef EMBEDDED_LIBRARY
@@ -75,8 +83,9 @@ public:
virtual ~Protocol() {}
void init(THD* thd_arg);
- enum { SEND_NUM_ROWS= 1, SEND_DEFAULTS= 2, SEND_EOF= 4 };
+ enum { SEND_NUM_ROWS= 1, SEND_EOF= 2 };
virtual bool send_result_set_metadata(List<Item> *list, uint flags);
+ bool send_list_fields(List<Field> *list, const TABLE_LIST *table_list);
bool send_result_set_row(List<Item> *row_items);
bool store(I_List<i_string> *str_list);
@@ -113,6 +122,15 @@ public:
virtual bool store(const char *from, size_t length, CHARSET_INFO *cs)=0;
virtual bool store(const char *from, size_t length,
CHARSET_INFO *fromcs, CHARSET_INFO *tocs)=0;
+ bool store_str(const char *s, CHARSET_INFO *fromcs, CHARSET_INFO *tocs)
+ {
+ DBUG_ASSERT(s);
+ return store(s, (uint) strlen(s), fromcs, tocs);
+ }
+ bool store_str(const LEX_CSTRING &s, CHARSET_INFO *fromcs, CHARSET_INFO *tocs)
+ {
+ return store(s.str, (uint) s.length, fromcs, tocs);
+ }
virtual bool store(float from, uint32 decimals, String *buffer)=0;
virtual bool store(double from, uint32 decimals, String *buffer)=0;
virtual bool store(MYSQL_TIME *time, int decimals)=0;
@@ -122,7 +140,8 @@ public:
virtual bool send_out_parameters(List<Item_param> *sp_params)=0;
#ifdef EMBEDDED_LIBRARY
- int begin_dataset();
+ bool begin_dataset();
+ bool begin_dataset(THD *thd, uint numfields);
virtual void remove_last_row() {}
#else
void remove_last_row() {}
@@ -150,7 +169,12 @@ public:
class Protocol_text :public Protocol
{
public:
- Protocol_text(THD *thd_arg) :Protocol(thd_arg) {}
+ Protocol_text(THD *thd_arg, ulong prealloc= 0)
+ :Protocol(thd_arg)
+ {
+ if (prealloc)
+ packet->alloc(prealloc);
+ }
virtual void prepare_for_resend();
virtual bool store_null();
virtual bool store_tiny(longlong from);
@@ -172,6 +196,13 @@ public:
#ifdef EMBEDDED_LIBRARY
void remove_last_row();
#endif
+ bool store_field_metadata(const THD *thd, const Send_field &field,
+ CHARSET_INFO *charset_for_protocol,
+ uint pos);
+ bool store_field_metadata(THD *thd, Item *item, uint pos);
+ bool store_field_metadata_for_list_fields(const THD *thd, Field *field,
+ const TABLE_LIST *table_list,
+ uint pos);
virtual enum enum_protocol_type type() { return PROTOCOL_TEXT; };
};
diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc
index 68c7158e9e5..e7873b185c5 100644
--- a/sql/repl_failsafe.cc
+++ b/sql/repl_failsafe.cc
@@ -38,13 +38,21 @@
#include "log_event.h"
#include <mysql.h>
-#define SLAVE_LIST_CHUNK 128
-#define SLAVE_ERRMSG_SIZE (FN_REFLEN+64)
+
+struct Slave_info
+{
+ uint32 server_id;
+ uint32 master_id;
+ char host[HOSTNAME_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
+ char user[USERNAME_LENGTH+1];
+ char password[MAX_PASSWORD_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
+ uint16 port;
+};
+Atomic_counter<uint32_t> binlog_dump_thread_count;
ulong rpl_status=RPL_NULL;
mysql_mutex_t LOCK_rpl_status;
-HASH slave_list;
const char *rpl_role_type[] = {"MASTER","SLAVE",NullS};
TYPELIB rpl_role_typelib = {array_elements(rpl_role_type)-1,"",
@@ -81,33 +89,26 @@ void change_rpl_status(ulong from_status, ulong to_status)
errmsg= msg;\
goto err; \
}\
- strmake(obj,(char*) p,len); \
+ ::strmake(obj, (char*) p, len); \
p+= len; \
}\
-void unregister_slave(THD* thd, bool only_mine, bool need_mutex)
+void THD::unregister_slave()
{
- uint32 thd_server_id= thd->variables.server_id;
- if (thd_server_id)
+ if (auto old_si= slave_info)
{
- if (need_mutex)
- mysql_mutex_lock(&LOCK_slave_list);
-
- SLAVE_INFO* old_si;
- if ((old_si = (SLAVE_INFO*)my_hash_search(&slave_list,
- (uchar*)&thd_server_id, 4)) &&
- (!only_mine || old_si->thd == thd))
- my_hash_delete(&slave_list, (uchar*)old_si);
-
- if (need_mutex)
- mysql_mutex_unlock(&LOCK_slave_list);
+ mysql_mutex_lock(&LOCK_thd_data);
+ slave_info= 0;
+ mysql_mutex_unlock(&LOCK_thd_data);
+ delete old_si;
+ binlog_dump_thread_count--;
}
}
/**
- Register slave in 'slave_list' hash table.
+ Register slave
@return
0 ok
@@ -115,19 +116,18 @@ void unregister_slave(THD* thd, bool only_mine, bool need_mutex)
1 Error. Error message sent to client
*/
-int register_slave(THD* thd, uchar* packet, size_t packet_length)
+int THD::register_slave(uchar *packet, size_t packet_length)
{
- int res;
- SLAVE_INFO *si;
+ Slave_info *si;
uchar *p= packet, *p_end= packet + packet_length;
const char *errmsg= "Wrong parameters to function register_slave";
- if (check_access(thd, REPL_SLAVE_ACL, any_db, NULL, NULL, 0, 0))
+ if (check_access(this, REPL_SLAVE_ACL, any_db, NULL, NULL, 0, 0))
+ return 1;
+ if (!(si= new Slave_info))
return 1;
- if (!(si = (SLAVE_INFO*)my_malloc(sizeof(SLAVE_INFO), MYF(MY_WME))))
- goto err2;
- thd->variables.server_id= si->server_id= uint4korr(p);
+ variables.server_id= si->server_id= uint4korr(p);
p+= 4;
get_object(p,si->host, "Failed to register slave: too long 'report-host'");
get_object(p,si->user, "Failed to register slave: too long 'report-user'");
@@ -146,77 +146,54 @@ int register_slave(THD* thd, uchar* packet, size_t packet_length)
p += 4;
if (!(si->master_id= uint4korr(p)))
si->master_id= global_system_variables.server_id;
- si->thd= thd;
- mysql_mutex_lock(&LOCK_slave_list);
- unregister_slave(thd,0,0);
- res= my_hash_insert(&slave_list, (uchar*) si);
- mysql_mutex_unlock(&LOCK_slave_list);
- return res;
+ unregister_slave();
+ mysql_mutex_lock(&LOCK_thd_data);
+ slave_info= si;
+ mysql_mutex_unlock(&LOCK_thd_data);
+ binlog_dump_thread_count++;
+ return 0;
err:
- my_free(si);
+ delete si;
my_message(ER_UNKNOWN_ERROR, errmsg, MYF(0)); /* purecov: inspected */
-err2:
return 1;
}
-extern "C" uint32
-*slave_list_key(SLAVE_INFO* si, size_t *len,
- my_bool not_used __attribute__((unused)))
-{
- *len = 4;
- return &si->server_id;
-}
-extern "C" void slave_info_free(void *s)
+bool THD::is_binlog_dump_thread()
{
- my_free(s);
-}
-
-#ifdef HAVE_PSI_INTERFACE
-static PSI_mutex_key key_LOCK_slave_list;
+ mysql_mutex_lock(&LOCK_thd_data);
+ bool res= slave_info != NULL;
+ mysql_mutex_unlock(&LOCK_thd_data);
-static PSI_mutex_info all_slave_list_mutexes[]=
-{
- { &key_LOCK_slave_list, "LOCK_slave_list", PSI_FLAG_GLOBAL}
-};
-
-static void init_all_slave_list_mutexes(void)
-{
- const char* category= "sql";
- int count;
-
- if (PSI_server == NULL)
- return;
-
- count= array_elements(all_slave_list_mutexes);
- PSI_server->register_mutex(category, all_slave_list_mutexes, count);
+ return res;
}
-#endif /* HAVE_PSI_INTERFACE */
-void init_slave_list()
-{
-#ifdef HAVE_PSI_INTERFACE
- init_all_slave_list_mutexes();
-#endif
-
- my_hash_init(&slave_list, system_charset_info, SLAVE_LIST_CHUNK, 0, 0,
- (my_hash_get_key) slave_list_key,
- (my_hash_free_key) slave_info_free, 0);
- mysql_mutex_init(key_LOCK_slave_list, &LOCK_slave_list, MY_MUTEX_INIT_FAST);
-}
-void end_slave_list()
+static my_bool show_slave_hosts_callback(THD *thd, Protocol *protocol)
{
- /* No protection by a mutex needed as we are only called at shutdown */
- if (my_hash_inited(&slave_list))
+ my_bool res= FALSE;
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (auto si= thd->slave_info)
{
- my_hash_free(&slave_list);
- mysql_mutex_destroy(&LOCK_slave_list);
+ protocol->prepare_for_resend();
+ protocol->store(si->server_id);
+ protocol->store(si->host, &my_charset_bin);
+ if (opt_show_slave_auth_info)
+ {
+ protocol->store(si->user, &my_charset_bin);
+ protocol->store(si->password, &my_charset_bin);
+ }
+ protocol->store((uint32) si->port);
+ protocol->store(si->master_id);
+ res= protocol->write();
}
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ return res;
}
+
/**
Execute a SHOW SLAVE HOSTS statement.
@@ -258,28 +235,9 @@ bool show_slave_hosts(THD* thd)
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
- mysql_mutex_lock(&LOCK_slave_list);
+ if (server_threads.iterate(show_slave_hosts_callback, protocol))
+ DBUG_RETURN(true);
- for (uint i = 0; i < slave_list.records; ++i)
- {
- SLAVE_INFO* si = (SLAVE_INFO*) my_hash_element(&slave_list, i);
- protocol->prepare_for_resend();
- protocol->store((uint32) si->server_id);
- protocol->store(si->host, &my_charset_bin);
- if (opt_show_slave_auth_info)
- {
- protocol->store(si->user, &my_charset_bin);
- protocol->store(si->password, &my_charset_bin);
- }
- protocol->store((uint32) si->port);
- protocol->store((uint32) si->master_id);
- if (protocol->write())
- {
- mysql_mutex_unlock(&LOCK_slave_list);
- DBUG_RETURN(TRUE);
- }
- }
- mysql_mutex_unlock(&LOCK_slave_list);
my_eof(thd);
DBUG_RETURN(FALSE);
}
diff --git a/sql/repl_failsafe.h b/sql/repl_failsafe.h
index 967d81bcf0d..7f81b98303e 100644
--- a/sql/repl_failsafe.h
+++ b/sql/repl_failsafe.h
@@ -22,6 +22,7 @@
#include <my_sys.h>
#include "slave.h"
+extern Atomic_counter<uint32_t> binlog_dump_thread_count;
typedef enum {RPL_AUTH_MASTER=0,RPL_IDLE_SLAVE,RPL_ACTIVE_SLAVE,
RPL_LOST_SOLDIER,RPL_TROOP_SOLDIER,
RPL_RECOVERY_CAPTAIN,RPL_NULL /* inactive */,
@@ -36,13 +37,7 @@ extern const char* rpl_role_type[], *rpl_status_type[];
void change_rpl_status(ulong from_status, ulong to_status);
int find_recovery_captain(THD* thd, MYSQL* mysql);
-extern HASH slave_list;
-
bool show_slave_hosts(THD* thd);
-void init_slave_list();
-void end_slave_list();
-int register_slave(THD* thd, uchar* packet, size_t packet_length);
-void unregister_slave(THD* thd, bool only_mine, bool need_mutex);
#endif /* HAVE_REPLICATION */
#endif /* REPL_FAILSAFE_INCLUDED */
diff --git a/sql/rowid_filter.cc b/sql/rowid_filter.cc
new file mode 100644
index 00000000000..865f22b431a
--- /dev/null
+++ b/sql/rowid_filter.cc
@@ -0,0 +1,626 @@
+/*
+ Copyright (c) 2018, 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "mariadb.h"
+#include "table.h"
+#include "sql_class.h"
+#include "opt_range.h"
+#include "rowid_filter.h"
+#include "sql_select.h"
+
+
+inline
+double Range_rowid_filter_cost_info::lookup_cost(
+ Rowid_filter_container_type cont_type)
+{
+ switch (cont_type) {
+ case SORTED_ARRAY_CONTAINER:
+ return log(est_elements)*0.01;
+ default:
+ DBUG_ASSERT(0);
+ return 0;
+ }
+}
+
+
+/**
+ @brief
+ The average gain in cost per row to use the range filter with this cost info
+*/
+
+inline
+double Range_rowid_filter_cost_info::avg_access_and_eval_gain_per_row(
+ Rowid_filter_container_type cont_type)
+{
+ return (1+1.0/TIME_FOR_COMPARE) * (1 - selectivity) -
+ lookup_cost(cont_type);
+}
+
+
+/**
+ @brief
+ The average adjusted gain in cost per row of using the filter
+
+ @param access_cost_factor the adjusted cost of access a row
+
+ @details
+ The current code to estimate the cost of a ref access is quite inconsistent:
+ in some cases the effect of page buffers is taken into account, for others
+ just the engine dependent read_time() is employed. That's why the average
+ cost of one random seek might differ from 1.
+ The parameter access_cost_factor can be considered as the cost of a random
+ seek that is used for the given ref access. Changing the cost of a random
+ seek we have to change the first coefficient in the linear formula by which
+ we calculate the gain of usage the given filter for a_adj. This function
+ calculates the value of a_adj.
+
+ @note
+ Currently we require that access_cost_factor should be a number between
+ 0.0 and 1.0
+*/
+
+inline
+double Range_rowid_filter_cost_info::avg_adjusted_gain_per_row(
+ double access_cost_factor)
+{
+ return a - (1 - access_cost_factor) * (1 - selectivity);
+}
+
+
+/**
+ @brief
+ Set the parameters used to choose the filter with the best adjusted gain
+
+ @note
+ This function must be called before the call of get_adjusted_gain()
+ for the given filter.
+*/
+
+inline void
+Range_rowid_filter_cost_info::set_adjusted_gain_param(double access_cost_factor)
+{
+ a_adj= avg_adjusted_gain_per_row(access_cost_factor);
+ cross_x_adj= b / a_adj;
+}
+
+
+/**
+ @brief
+ Initialize the cost info structure for a range filter
+
+ @param cont_type The type of the container of the range filter
+ @param tab The table for which the range filter is evaluated
+ @param idx The index used to create this range filter
+*/
+
+void Range_rowid_filter_cost_info::init(Rowid_filter_container_type cont_type,
+ TABLE *tab, uint idx)
+{
+ container_type= cont_type;
+ table= tab;
+ key_no= idx;
+ est_elements= (ulonglong) (table->quick_rows[key_no]);
+ b= build_cost(container_type);
+ selectivity= est_elements/((double) table->stat_records());
+ a= avg_access_and_eval_gain_per_row(container_type);
+ if (a > 0)
+ cross_x= b/a;
+ else
+ cross_x= b+1;
+ abs_independent.clear_all();
+}
+
+
+/**
+ @brief
+ Return the cost of building a range filter of a certain type
+*/
+
+double
+Range_rowid_filter_cost_info::build_cost(Rowid_filter_container_type cont_type)
+{
+ double cost= 0;
+
+ cost+= table->quick_index_only_costs[key_no];
+
+ switch (cont_type) {
+
+ case SORTED_ARRAY_CONTAINER:
+ cost+= ARRAY_WRITE_COST * est_elements; /* cost filling the container */
+ cost+= ARRAY_SORT_C * est_elements * log(est_elements); /* sorting cost */
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+
+ return cost;
+}
+
+
+Rowid_filter_container *Range_rowid_filter_cost_info::create_container()
+{
+ THD *thd= table->in_use;
+ uint elem_sz= table->file->ref_length;
+ Rowid_filter_container *res= 0;
+
+ switch (container_type) {
+ case SORTED_ARRAY_CONTAINER:
+ res= new (thd->mem_root) Rowid_filter_sorted_array((uint) est_elements,
+ elem_sz);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ return res;
+}
+
+
+static
+int compare_range_rowid_filter_cost_info_by_a(
+ Range_rowid_filter_cost_info **filter_ptr_1,
+ Range_rowid_filter_cost_info **filter_ptr_2)
+{
+ double diff= (*filter_ptr_2)->get_a() - (*filter_ptr_1)->get_a();
+ return (diff < 0 ? -1 : (diff > 0 ? 1 : 0));
+}
+
+
+/**
+ @brief
+ Prepare the array with cost info on range filters to be used by optimizer
+
+ @details
+ The function removes the array of cost info on range filters the elements
+ for those range filters that won't be ever chosen as the best filter, no
+ matter what index will be used to access the table and at what step the
+ table will be joined.
+*/
+
+void TABLE::prune_range_rowid_filters()
+{
+ /*
+ For the elements of the array with cost info on range filters
+ build a bit matrix of absolutely independent elements.
+ Two elements are absolutely independent if they such indexes that
+ there is no other index that overlaps both of them or is constraint
+ correlated with both of them. Use abs_independent key maps to store
+ the elements if this bit matrix.
+ */
+
+ Range_rowid_filter_cost_info **filter_ptr_1= range_rowid_filter_cost_info_ptr;
+ for (uint i= 0;
+ i < range_rowid_filter_cost_info_elems;
+ i++, filter_ptr_1++)
+ {
+ uint key_no= (*filter_ptr_1)->key_no;
+ Range_rowid_filter_cost_info **filter_ptr_2= filter_ptr_1 + 1;
+ for (uint j= i+1;
+ j < range_rowid_filter_cost_info_elems;
+ j++, filter_ptr_2++)
+ {
+ key_map map_1= key_info[key_no].overlapped;
+ map_1.merge(key_info[key_no].constraint_correlated);
+ key_map map_2= key_info[(*filter_ptr_2)->key_no].overlapped;
+ map_2.merge(key_info[(*filter_ptr_2)->key_no].constraint_correlated);
+ map_1.intersect(map_2);
+ if (map_1.is_clear_all())
+ {
+ (*filter_ptr_1)->abs_independent.set_bit((*filter_ptr_2)->key_no);
+ (*filter_ptr_2)->abs_independent.set_bit(key_no);
+ }
+ }
+ }
+
+ /* Sort the array range_filter_cost_info by 'a' in descending order */
+ my_qsort(range_rowid_filter_cost_info_ptr,
+ range_rowid_filter_cost_info_elems,
+ sizeof(Range_rowid_filter_cost_info *),
+ (qsort_cmp) compare_range_rowid_filter_cost_info_by_a);
+
+ /*
+ For each element check whether it is created for the filter that
+ can be ever chosen as the best one. If it's not the case remove
+ from the array. Otherwise put it in the array in such a place
+ that all already checked elements left the array are ordered by
+ cross_x.
+ */
+
+ Range_rowid_filter_cost_info **cand_filter_ptr=
+ range_rowid_filter_cost_info_ptr;
+ for (uint i= 0;
+ i < range_rowid_filter_cost_info_elems;
+ i++, cand_filter_ptr++)
+ {
+ bool is_pruned= false;
+ Range_rowid_filter_cost_info **usable_filter_ptr=
+ range_rowid_filter_cost_info_ptr;
+ key_map abs_indep;
+ abs_indep.clear_all();
+ for (uint j= 0; j < i; j++, usable_filter_ptr++)
+ {
+ if ((*cand_filter_ptr)->cross_x >= (*usable_filter_ptr)->cross_x)
+ {
+ if (abs_indep.is_set((*usable_filter_ptr)->key_no))
+ {
+ /*
+ The following is true here for the element e being checked:
+ There are at 2 elements e1 and e2 among already selected such that
+ e1.cross_x < e.cross_x and e1.a > e.a
+ and
+ e2.cross_x < e_cross_x and e2.a > e.a,
+ i.e. the range filters f1, f2 of both e1 and e2 always promise
+ better gains then the range filter of e.
+ As e1 and e2 are absolutely independent one of the range filters
+ f1, f2 will be always a better choice than f1 no matter what index
+ is chosen to access the table. Because of this the element e
+ can be safely removed from the array.
+ */
+
+ is_pruned= true;
+ break;
+ }
+ abs_indep.merge((*usable_filter_ptr)->abs_independent);
+ }
+ else
+ {
+ /*
+ Move the element being checked to the proper position to have all
+ elements that have been already checked to be sorted by cross_x
+ */
+ Range_rowid_filter_cost_info *moved= *cand_filter_ptr;
+ memmove(usable_filter_ptr+1, usable_filter_ptr,
+ sizeof(Range_rowid_filter_cost_info *) * (i-j-1));
+ *usable_filter_ptr= moved;
+ }
+ }
+ if (is_pruned)
+ {
+ /* Remove the checked element from the array */
+ memmove(cand_filter_ptr, cand_filter_ptr+1,
+ sizeof(Range_rowid_filter_cost_info *) *
+ (range_rowid_filter_cost_info_elems - 1 - i));
+ range_rowid_filter_cost_info_elems--;
+ }
+ }
+}
+
+
+/**
+ @brief
+ Return maximum number of elements that a container allowed to have
+ */
+
+static ulonglong
+get_max_range_rowid_filter_elems_for_table(
+ THD *thd, TABLE *tab,
+ Rowid_filter_container_type cont_type)
+{
+ switch (cont_type) {
+ case SORTED_ARRAY_CONTAINER :
+ return thd->variables.max_rowid_filter_size/tab->file->ref_length;
+ default :
+ DBUG_ASSERT(0);
+ return 0;
+ }
+}
+
+
+/**
+ @brief
+ Prepare info on possible range filters used by optimizer
+
+ @param table The thread handler
+
+ @details
+ The function first selects the indexes of the table that potentially
+ can be used for range filters and allocates an array of the objects
+ of the Range_rowid_filter_cost_info type to store cost info on
+ possible range filters and an array of pointers to these objects.
+ The latter is created for easy sorting of the objects with cost info
+ by different sort criteria. Then the function initializes the allocated
+ array with cost info for each possible range filter. After this
+ the function calls the method TABLE::prune_range_rowid_filters().
+ The method removes the elements of the array for the filters that
+ promise less gain then others remaining in the array in any situation
+ and optimizes the order of the elements for faster choice of the best
+ range filter.
+*/
+
+void TABLE::init_cost_info_for_usable_range_rowid_filters(THD *thd)
+{
+ uint key_no;
+ key_map usable_range_filter_keys;
+ usable_range_filter_keys.clear_all();
+ key_map::Iterator it(quick_keys);
+
+ /*
+ From all indexes that can be used for range accesses select only such that
+ - range filter pushdown is supported by the engine for them (1)
+ - they are not clustered primary (2)
+ - the range filter containers for them are not too large (3)
+ */
+ while ((key_no= it++) != key_map::Iterator::BITMAP_END)
+ {
+ if (!(file->index_flags(key_no, 0, 1) & HA_DO_RANGE_FILTER_PUSHDOWN)) // !1
+ continue;
+ if (key_no == s->primary_key && file->primary_key_is_clustered()) // !2
+ continue;
+ if (quick_rows[key_no] >
+ get_max_range_rowid_filter_elems_for_table(thd, this,
+ SORTED_ARRAY_CONTAINER)) // !3
+ continue;
+ usable_range_filter_keys.set_bit(key_no);
+ }
+
+ /*
+ Allocate an array of objects to store cost info for the selected filters
+ and allocate an array of pointers to these objects
+ */
+
+ range_rowid_filter_cost_info_elems= usable_range_filter_keys.bits_set();
+ if (!range_rowid_filter_cost_info_elems)
+ return;
+
+ range_rowid_filter_cost_info_ptr=
+ (Range_rowid_filter_cost_info **)
+ thd->calloc(sizeof(Range_rowid_filter_cost_info *) *
+ range_rowid_filter_cost_info_elems);
+ range_rowid_filter_cost_info=
+ new (thd->mem_root)
+ Range_rowid_filter_cost_info[range_rowid_filter_cost_info_elems];
+ if (!range_rowid_filter_cost_info_ptr || !range_rowid_filter_cost_info)
+ {
+ range_rowid_filter_cost_info_elems= 0;
+ return;
+ }
+
+ /* Fill the allocated array with cost info on the selected range filters */
+
+ Range_rowid_filter_cost_info **curr_ptr= range_rowid_filter_cost_info_ptr;
+ Range_rowid_filter_cost_info *curr_filter_cost_info=
+ range_rowid_filter_cost_info;
+
+ key_map::Iterator li(usable_range_filter_keys);
+ while ((key_no= li++) != key_map::Iterator::BITMAP_END)
+ {
+ *curr_ptr= curr_filter_cost_info;
+ curr_filter_cost_info->init(SORTED_ARRAY_CONTAINER, this, key_no);
+ curr_ptr++;
+ curr_filter_cost_info++;
+ }
+
+ prune_range_rowid_filters();
+}
+
+
+/**
+ @brief
+ Choose the best range filter for the given access of the table
+
+ @param access_key_no The index by which the table is accessed
+ @param records The estimated total number of key tuples with this access
+ @param access_cost_factor the cost of a random seek to access the table
+
+ @details
+ The function looks through the array of cost info for range filters
+ and chooses the element for the range filter that promise the greatest
+ gain with the the ref or range access of the table by access_key_no.
+ As the array is sorted by cross_x in ascending order the function stops
+ the look through as soon as it reaches the first element with
+ cross_x_adj > records because the range filter for this element and the
+ range filters for all remaining elements do not promise positive gains.
+
+ @note
+ It is easy to see that if cross_x[i] > cross_x[j] then
+ cross_x_adj[i] > cross_x_adj[j]
+
+ @retval Pointer to the cost info for the range filter that promises
+ the greatest gain, NULL if there is no such range filter
+*/
+
+Range_rowid_filter_cost_info *
+TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
+ double records,
+ double access_cost_factor)
+{
+ if (range_rowid_filter_cost_info_elems == 0 ||
+ covering_keys.is_set(access_key_no))
+ return 0;
+
+ /*
+ Currently we do not support usage of range filters if the table
+ is accessed by the clustered primary key. It does not make sense
+ if a full key is used. If the table is accessed by a partial
+ clustered primary key it would, but the current InnoDB code does not
+ allow it. Later this limitation will be lifted
+ */
+ if (access_key_no == s->primary_key && file->primary_key_is_clustered())
+ return 0;
+
+ Range_rowid_filter_cost_info *best_filter= 0;
+ double best_filter_gain= 0;
+
+ key_map no_filter_usage= key_info[access_key_no].overlapped;
+ no_filter_usage.merge(key_info[access_key_no].constraint_correlated);
+ for (uint i= 0; i < range_rowid_filter_cost_info_elems ; i++)
+ {
+ double curr_gain = 0;
+ Range_rowid_filter_cost_info *filter= range_rowid_filter_cost_info_ptr[i];
+
+ /*
+ Do not use a range filter that uses an in index correlated with
+ the index by which the table is accessed
+ */
+ if ((filter->key_no == access_key_no) ||
+ no_filter_usage.is_set(filter->key_no))
+ continue;
+
+ filter->set_adjusted_gain_param(access_cost_factor);
+
+ if (records < filter->cross_x_adj)
+ {
+ /* Does not make sense to look through the remaining filters */
+ break;
+ }
+
+ curr_gain= filter->get_adjusted_gain(records);
+ if (best_filter_gain < curr_gain)
+ {
+ best_filter_gain= curr_gain;
+ best_filter= filter;
+ }
+ }
+ return best_filter;
+}
+
+
+/**
+ @brief
+ Fill the range rowid filter performing the associated range index scan
+
+ @details
+ This function performs the range index scan associated with this
+ range filter and place into the filter the rowids / primary keys
+ read from key tuples when doing this scan.
+ @retval
+ false on success
+ true otherwise
+
+ @note
+ The function assumes that the quick select object to perform
+ the index range scan has been already created.
+
+ @note
+ Currently the same table handler is used to access the joined table
+ and to perform range index scan filling the filter.
+ In the future two different handlers will be used for this
+ purposes to facilitate a lazy building of the filter.
+*/
+
+bool Range_rowid_filter::fill()
+{
+ int rc= 0;
+ handler *file= table->file;
+ THD *thd= table->in_use;
+ QUICK_RANGE_SELECT* quick= (QUICK_RANGE_SELECT*) select->quick;
+
+ uint table_status_save= table->status;
+ Item *pushed_idx_cond_save= file->pushed_idx_cond;
+ uint pushed_idx_cond_keyno_save= file->pushed_idx_cond_keyno;
+ bool in_range_check_pushed_down_save= file->in_range_check_pushed_down;
+
+ table->status= 0;
+ file->pushed_idx_cond= 0;
+ file->pushed_idx_cond_keyno= MAX_KEY;
+ file->in_range_check_pushed_down= false;
+
+ /* We're going to just read rowids / primary keys */
+ table->prepare_for_position();
+
+ table->file->ha_start_keyread(quick->index);
+
+ if (quick->init() || quick->reset())
+ rc= 1;
+
+ while (!rc)
+ {
+ rc= quick->get_next();
+ if (thd->killed)
+ rc= 1;
+ if (!rc)
+ {
+ file->position(quick->record);
+ if (container->add(NULL, (char*) file->ref))
+ rc= 1;
+ else
+ tracker->increment_container_elements_count();
+ }
+ }
+
+ quick->range_end();
+ table->file->ha_end_keyread();
+
+ table->status= table_status_save;
+ file->pushed_idx_cond= pushed_idx_cond_save;
+ file->pushed_idx_cond_keyno= pushed_idx_cond_keyno_save;
+ file->in_range_check_pushed_down= in_range_check_pushed_down_save;
+ tracker->report_container_buff_size(table->file->ref_length);
+
+ if (rc != HA_ERR_END_OF_FILE)
+ return 1;
+ table->file->rowid_filter_is_active= true;
+ return 0;
+}
+
+
+/**
+ @brief
+ Binary search in the sorted array of a rowid filter
+
+ @param ctxt context of the search
+ @parab elem rowid / primary key to look for
+
+ @details
+ The function looks for the rowid / primary key ' elem' in this container
+ assuming that ctxt contains a pointer to the TABLE structure created
+ for the table to whose row elem refers to.
+
+ @retval
+ true elem is found in the container
+ false otherwise
+*/
+
+bool Rowid_filter_sorted_array::check(void *ctxt, char *elem)
+{
+ TABLE *table= (TABLE *) ctxt;
+ if (!is_checked)
+ {
+ refpos_container.sort(refpos_order_cmp, (void *) (table->file));
+ is_checked= true;
+ }
+ int l= 0;
+ int r= refpos_container.elements()-1;
+ while (l <= r)
+ {
+ int m= (l + r) / 2;
+ int cmp= refpos_order_cmp((void *) (table->file),
+ refpos_container.get_pos(m), elem);
+ if (cmp == 0)
+ return true;
+ if (cmp < 0)
+ l= m + 1;
+ else
+ r= m-1;
+ }
+ return false;
+}
+
+
+Range_rowid_filter::~Range_rowid_filter()
+{
+ delete container;
+ container= 0;
+ if (select)
+ {
+ if (select->quick)
+ {
+ delete select->quick;
+ select->quick= 0;
+ }
+ delete select;
+ select= 0;
+ }
+}
diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h
new file mode 100644
index 00000000000..a9930dcbca8
--- /dev/null
+++ b/sql/rowid_filter.h
@@ -0,0 +1,468 @@
+/*
+ Copyright (c) 2018, 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef ROWID_FILTER_INCLUDED
+#define ROWID_FILTER_INCLUDED
+
+
+#include "mariadb.h"
+#include "sql_array.h"
+
+/*
+
+ What rowid / primary filters are
+ --------------------------------
+
+ Consider a join query Q of the form
+ SELECT * FROM T1, ... , Tk WHERE P.
+
+ For any of the table reference Ti(Q) from the from clause of Q different
+ rowid / primary key filters (pk-filters for short) can be built.
+ A pk-filter F built for Ti(Q) is a set of rowids / primary keys of Ti
+ F= {pk1,...,pkN} such that for any row r=r1||...||rk from the result set of Q
+ ri's rowid / primary key pk(ri) is contained in F.
+
+ When pk-filters are useful
+ --------------------------
+
+ If building a pk-filter F for Ti(Q )is not too costly and its cardinality #F
+ is much less than the cardinality of T - #T then using the pk-filter when
+ executing Q might be quite beneficial.
+
+ Let r be a random row from Ti. Let s(F) be the probability that pk(r)
+ belongs to F. Let BC(F) be the cost of building F.
+
+ Suppose that the optimizer has chosen for Q a plan with this join order
+ T1 => ... Tk and that the table Ti is accessed by a ref access using index I.
+ Let K = {k1,...,kM} be the set of all rowid/primary keys values used to access
+ rows of Ti when looking for matches in this table.to join Ti by index I.
+
+ Let's assume that two set sets K and F are uncorrelated. With this assumption
+ if before accessing data from Ti by the rowid / primary key k we first
+ check whether k is in F then we can expect saving on M*(1-s(S)) accesses of
+ data rows from Ti. If we can guarantee that test whether k is in F is
+ relatively cheap then we can gain a lot assuming that BC(F) is much less
+ then the cost of fetching M*(1-s(S)) records from Ti and following
+ evaluation of conditions pushed into Ti.
+
+ Making pk-filter test cheap
+ ---------------------------
+
+ If the search structure to test whether an element is in F can be fully
+ placed in RAM then this test is expected to be be much cheaper than a random
+ access of a record from Ti. We'll consider two search structures for
+ pk-filters: ordered array and bloom filter. Ordered array is easy to
+ implement, but it's space consuming. If a filter contains primary keys
+ then at least space for each primary key from the filter must be allocated
+ in the search structure. On a the opposite a bloom filter requires a
+ fixed number of bits and this number does not depend on the cardinality
+ of the pk-filter (10 bits per element will serve pk-filter of any size).
+
+*/
+
+/*
+
+ How and when the optimizer builds and uses range rowid filters
+ --------------------------------------------------------------
+
+ 1. In make_join_statistics()
+ for each join table s
+ after the call of get_quick_record_count()
+ the TABLE::method init_cost_info_for_usable_range_rowid_filters()
+ is called
+ The method build an array of Range_rowid_filter_cost_info elements
+ containing the cost info on possible range filters for s->table.
+ The array is optimized for further usage.
+
+ 2. For each partial join order when the optimizer considers joining
+ table s to this partial join
+ In the function best_access_path()
+ a. When evaluating a ref access r by index idx to join s
+ the optimizer estimates the effect of usage of each possible
+ range filter f and chooses one with the best gain. The gain
+ is taken into account when the cost of thr ref access r is
+ calculated. If it turns out that this is the best ref access
+ to join s then the info about the chosen filter together
+ with the info on r is remembered in the corresponding element
+ of the array of POSITION structures.
+ [We evaluate every pair (ref access, range_filter) rather then
+ every pair (best ref access, range filter) because if the index
+ ref_idx used for ref access r correlates with the index rf_idx
+ used by the filter f then the pair (r,f) is not evaluated
+ at all as we don't know how to estimate the effect of correlation
+ between ref_idx and rf_idx.]
+ b. When evaluating the best range access to join table s the
+ optimizer estimates the effect of usage of each possible
+ range filter f and chooses one with the best gain.
+ [Here we should have evaluated every pair (range access,
+ range filter) as well, but it's not done yet.]
+
+ 3. When the cheapest execution plan has been chosen and after the
+ call of JOIN::get_best_combination()
+ The method JOIN::make_range_rowid_filters() is called
+ For each range rowid filter used in the chosen execution plan
+ the method creates a quick select object to be able to perform
+ index range scan to fill the filter at the execution stage.
+ The method also creates Range_rowid_filter objects that are
+ used at the execution stage.
+
+ 4. Just before the execution stage
+ The method JOIN::init_range_rowid_filters() is called.
+ For each join table s that is to be accessed with usage of a range
+ filter the method allocates containers for the range filter and
+ it lets the engine know that the filter will be used when
+ accessing s.
+
+ 5. At the execution stage
+ In the function sub_select() just before the first access of a join
+ table s employing a range filter
+ The method JOIN_TAB::build_range_rowid_filter_if_needed() is called
+ The method fills the filter using the quick select created by
+ JOIN::make_range_rowid_filters().
+
+ 6. The accessed key tuples are checked against the filter within the engine
+ using the info pushed into it.
+
+*/
+
+struct TABLE;
+class SQL_SELECT;
+class Rowid_filter_container;
+class Range_rowid_filter_cost_info;
+
+/* Cost to write rowid into array */
+#define ARRAY_WRITE_COST 0.005
+/* Factor used to calculate cost of sorting rowids in array */
+#define ARRAY_SORT_C 0.01
+/* Cost to evaluate condition */
+#define COST_COND_EVAL 0.2
+
+typedef enum
+{
+ SORTED_ARRAY_CONTAINER,
+ BLOOM_FILTER_CONTAINER // Not used yet
+} Rowid_filter_container_type;
+
+/**
+ @class Rowid_filter_container
+
+ The interface for different types of containers to store info on the set
+ of rowids / primary keys that defines a pk-filter.
+
+ There will be two implementations of this abstract class.
+ - sorted array
+ - bloom filter
+*/
+
+class Rowid_filter_container : public Sql_alloc
+{
+public:
+
+ virtual Rowid_filter_container_type get_type() = 0;
+
+ /* Allocate memory for the container */
+ virtual bool alloc() = 0;
+
+ /*
+ @brief Add info on a rowid / primary to the container
+ @param ctxt The context info (opaque)
+ @param elem The rowid / primary key to be added to the container
+ @retval true if elem is successfully added
+ */
+ virtual bool add(void *ctxt, char *elem) = 0;
+
+ /*
+ @brief Check whether a rowid / primary key is in container
+ @param ctxt The context info (opaque)
+ @param elem The rowid / primary key to be checked against the container
+ @retval False if elem is definitely not in the container
+ */
+ virtual bool check(void *ctxt, char *elem) = 0;
+
+ virtual ~Rowid_filter_container() {}
+};
+
+
+/**
+ @class Rowid_filter
+
+ The interface for different types of pk-filters
+
+ Currently we support only range pk filters.
+*/
+
+class Rowid_filter : public Sql_alloc
+{
+protected:
+
+ /* The container to store info the set of elements in the filter */
+ Rowid_filter_container *container;
+
+ Rowid_filter_tracker *tracker;
+
+public:
+ Rowid_filter(Rowid_filter_container *container_arg)
+ : container(container_arg) {}
+
+ /*
+ Build the filter :
+ fill it with info on the set of elements placed there
+ */
+ virtual bool build() = 0;
+
+ /*
+ Check whether an element is in the filter.
+ Returns false is the elements is definitely not in the filter.
+ */
+ virtual bool check(char *elem) = 0;
+
+ virtual ~Rowid_filter() {}
+
+ Rowid_filter_container *get_container() { return container; }
+
+ void set_tracker(Rowid_filter_tracker *track_arg) { tracker= track_arg; }
+ Rowid_filter_tracker *get_tracker() { return tracker; }
+};
+
+
+/**
+ @class Rowid_filter_container
+
+ The implementation of the Rowid_interface used for pk-filters
+ that are filled when performing range index scans.
+*/
+
+class Range_rowid_filter: public Rowid_filter
+{
+ /* The table for which the rowid filter is built */
+ TABLE *table;
+ /* The select to perform the range scan to fill the filter */
+ SQL_SELECT *select;
+ /* The cost info on the filter (used for EXPLAIN/ANALYZE) */
+ Range_rowid_filter_cost_info *cost_info;
+
+public:
+ Range_rowid_filter(TABLE *tab,
+ Range_rowid_filter_cost_info *cost_arg,
+ Rowid_filter_container *container_arg,
+ SQL_SELECT *sel)
+ : Rowid_filter(container_arg), table(tab), select(sel), cost_info(cost_arg)
+ {}
+
+ ~Range_rowid_filter();
+
+ bool build() { return fill(); }
+
+ bool check(char *elem)
+ {
+ bool was_checked= container->check(table, elem);
+ tracker->increment_checked_elements_count(was_checked);
+ return was_checked;
+ }
+
+ bool fill();
+
+ SQL_SELECT *get_select() { return select; }
+};
+
+
+/**
+ @class Refpos_container_sorted_array
+
+ The wrapper class over Dynamic_array<char> to facilitate operations over
+ array of elements of the type char[N] where N is the same for all elements
+*/
+
+class Refpos_container_sorted_array : public Sql_alloc
+{
+ /*
+ Maximum number of elements in the array
+ (Now is used only at the initialization of the dynamic array)
+ */
+ uint max_elements;
+ /* Number of bytes allocated for an element */
+ uint elem_size;
+ /* The dynamic array over which the wrapper is built */
+ Dynamic_array<char> *array;
+
+public:
+
+ Refpos_container_sorted_array(uint max_elems, uint elem_sz)
+ : max_elements(max_elems), elem_size(elem_sz), array(0) {}
+
+ ~Refpos_container_sorted_array()
+ {
+ delete array;
+ array= 0;
+ }
+
+ bool alloc()
+ {
+ array= new Dynamic_array<char> (elem_size * max_elements,
+ elem_size * max_elements/sizeof(char) + 1);
+ return array == NULL;
+ }
+
+ bool add(char *elem)
+ {
+ for (uint i= 0; i < elem_size; i++)
+ {
+ if (array->append(elem[i]))
+ return true;
+ }
+ return false;
+ }
+
+ char *get_pos(uint n)
+ {
+ return array->get_pos(n * elem_size);
+ }
+
+ uint elements() { return (uint) (array->elements() / elem_size); }
+
+ void sort (int (*cmp) (void *ctxt, const void *el1, const void *el2),
+ void *cmp_arg)
+ {
+ my_qsort2(array->front(), array->elements()/elem_size,
+ elem_size, (qsort2_cmp) cmp, cmp_arg);
+ }
+};
+
+
+/**
+ @class Rowid_filter_sorted_array
+
+ The implementation of the Rowid_filter_container interface as
+ a sorted array container of rowids / primary keys
+*/
+
+class Rowid_filter_sorted_array: public Rowid_filter_container
+{
+ /* The dynamic array to store rowids / primary keys */
+ Refpos_container_sorted_array refpos_container;
+ /* Initially false, becomes true after the first call of (check() */
+ bool is_checked;
+
+public:
+ Rowid_filter_sorted_array(uint elems, uint elem_size)
+ : refpos_container(elems, elem_size), is_checked(false) {}
+
+ Rowid_filter_container_type get_type()
+ { return SORTED_ARRAY_CONTAINER; }
+
+ bool alloc() { return refpos_container.alloc(); }
+
+ bool add(void *ctxt, char *elem) { return refpos_container.add(elem); }
+
+ bool check(void *ctxt, char *elem);
+};
+
+/**
+ @class Range_rowid_filter_cost_info
+
+ An objects of this class is created for each potentially usable
+ range filter. It contains the info that allows to figure out
+ whether usage of the range filter promises some gain.
+*/
+
+class Range_rowid_filter_cost_info : public Sql_alloc
+{
+ /* The table for which the range filter is to be built (if needed) */
+ TABLE *table;
+ /* Estimated number of elements in the filter */
+ ulonglong est_elements;
+ /* The cost of building the range filter */
+ double b;
+ /*
+ a*N-b yields the gain of the filter
+ for N key tuples of the index key_no
+ */
+ double a;
+ /* The value of N where the gain is 0 */
+ double cross_x;
+ /* Used for pruning of the potential range filters */
+ key_map abs_independent;
+
+ /*
+ These two parameters are used to choose the best range filter
+ in the function TABLE::best_range_rowid_filter_for_partial_join
+ */
+ double a_adj;
+ double cross_x_adj;
+
+public:
+ /* The type of the container of the range filter */
+ Rowid_filter_container_type container_type;
+ /* The index whose range scan would be used to build the range filter */
+ uint key_no;
+ /* The selectivity of the range filter */
+ double selectivity;
+
+ Range_rowid_filter_cost_info() : table(0), key_no(0) {}
+
+ void init(Rowid_filter_container_type cont_type,
+ TABLE *tab, uint key_no);
+
+ double build_cost(Rowid_filter_container_type container_type);
+
+ inline double lookup_cost(Rowid_filter_container_type cont_type);
+
+ inline double
+ avg_access_and_eval_gain_per_row(Rowid_filter_container_type cont_type);
+
+ inline double avg_adjusted_gain_per_row(double access_cost_factor);
+
+ inline void set_adjusted_gain_param(double access_cost_factor);
+
+ /* Get the gain that usage of filter promises for r key tuples */
+ inline double get_gain(double r)
+ {
+ return r * a - b;
+ }
+
+ /* Get the adjusted gain that usage of filter promises for r key tuples */
+ inline double get_adjusted_gain(double r)
+ {
+ return r * a_adj - b;
+ }
+
+ /*
+ The gain promised by usage of the filter for r key tuples
+ due to less condition evaluations
+ */
+ inline double get_cmp_gain(double r)
+ {
+ return r * (1 - selectivity) / TIME_FOR_COMPARE;
+ }
+
+ Rowid_filter_container *create_container();
+
+ double get_a() { return a; }
+
+ friend
+ void TABLE::prune_range_rowid_filters();
+
+ friend
+ void TABLE::init_cost_info_for_usable_range_rowid_filters(THD *thd);
+
+ friend
+ Range_rowid_filter_cost_info *
+ TABLE::best_range_rowid_filter_for_partial_join(uint access_key_no,
+ double records,
+ double access_cost_factor);
+};
+
+#endif /* ROWID_FILTER_INCLUDED */
diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc
index 322b84130f2..17f474c2acf 100644
--- a/sql/rpl_gtid.cc
+++ b/sql/rpl_gtid.cc
@@ -79,7 +79,7 @@ rpl_slave_state::record_and_update_gtid(THD *thd, rpl_group_info *rgi)
rgi->gtid_pending= false;
if (rgi->gtid_ignore_duplicate_state!=rpl_group_info::GTID_DUPLICATE_IGNORE)
{
- if (record_gtid(thd, &rgi->current_gtid, sub_id, NULL, false, &hton))
+ if (record_gtid(thd, &rgi->current_gtid, sub_id, false, false, &hton))
DBUG_RETURN(1);
update_state_hash(sub_id, &rgi->current_gtid, hton, rgi);
}
@@ -244,7 +244,7 @@ rpl_slave_state_free_element(void *arg)
rpl_slave_state::rpl_slave_state()
- : last_sub_id(0), gtid_pos_tables(0), loaded(false)
+ : pending_gtid_count(0), last_sub_id(0), gtid_pos_tables(0), loaded(false)
{
mysql_mutex_init(key_LOCK_slave_state, &LOCK_slave_state,
MY_MUTEX_INIT_SLOW);
@@ -331,14 +331,11 @@ rpl_slave_state::update(uint32 domain_id, uint32 server_id, uint64 sub_id,
}
}
rgi->gtid_ignore_duplicate_state= rpl_group_info::GTID_DUPLICATE_NULL;
-
-#ifdef HAVE_REPLICATION
- rgi->pending_gtid_deletes_clear();
-#endif
}
if (!(list_elem= (list_element *)my_malloc(sizeof(*list_elem), MYF(MY_WME))))
return 1;
+ list_elem->domain_id= domain_id;
list_elem->server_id= server_id;
list_elem->sub_id= sub_id;
list_elem->seq_no= seq_no;
@@ -348,6 +345,15 @@ rpl_slave_state::update(uint32 domain_id, uint32 server_id, uint64 sub_id,
if (last_sub_id < sub_id)
last_sub_id= sub_id;
+#ifdef HAVE_REPLICATION
+ ++pending_gtid_count;
+ if (pending_gtid_count >= opt_gtid_cleanup_batch_size)
+ {
+ pending_gtid_count = 0;
+ slave_background_gtid_pending_delete_request();
+ }
+#endif
+
return 0;
}
@@ -382,20 +388,22 @@ rpl_slave_state::get_element(uint32 domain_id)
int
-rpl_slave_state::put_back_list(uint32 domain_id, list_element *list)
+rpl_slave_state::put_back_list(list_element *list)
{
- element *e;
+ element *e= NULL;
int err= 0;
mysql_mutex_lock(&LOCK_slave_state);
- if (!(e= (element *)my_hash_search(&hash, (const uchar *)&domain_id, 0)))
- {
- err= 1;
- goto end;
- }
while (list)
{
list_element *next= list->next;
+
+ if ((!e || e->domain_id != list->domain_id) &&
+ !(e= (element *)my_hash_search(&hash, (const uchar *)&list->domain_id, 0)))
+ {
+ err= 1;
+ goto end;
+ }
e->add(list);
list= next;
}
@@ -572,12 +580,12 @@ rpl_slave_state::select_gtid_pos_table(THD *thd, LEX_CSTRING *out_tablename)
/*
Write a gtid to the replication slave state table.
+ Do it as part of the transaction, to get slave crash safety, or as a separate
+ transaction if !in_transaction (eg. MyISAM or DDL).
+
gtid The global transaction id for this event group.
sub_id Value allocated within the sub_id when the event group was
read (sub_id must be consistent with commit order in master binlog).
- rgi rpl_group_info context, if we are recording the gtid transactionally
- as part of replicating a transactional event. NULL if called from
- outside of a replicated transaction.
Note that caller must later ensure that the new gtid and sub_id is inserted
into the appropriate HASH element with rpl_slave_state.add(), so that it can
@@ -585,16 +593,13 @@ rpl_slave_state::select_gtid_pos_table(THD *thd, LEX_CSTRING *out_tablename)
*/
int
rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
- rpl_group_info *rgi, bool in_statement,
+ bool in_transaction, bool in_statement,
void **out_hton)
{
TABLE_LIST tlist;
int err= 0, not_sql_thread;
bool table_opened= false;
TABLE *table;
- list_element *delete_list= 0, *next, *cur, **next_ptr_ptr, **best_ptr_ptr;
- uint64 best_sub_id;
- element *elem;
ulonglong thd_saved_option= thd->variables.option_bits;
Query_tables_list lex_backup;
wait_for_commit* suspended_wfc;
@@ -684,7 +689,7 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
thd->wsrep_ignore_table= true;
#endif
- if (!rgi)
+ if (!in_transaction)
{
DBUG_PRINT("info", ("resetting OPTION_BEGIN"));
thd->variables.option_bits&=
@@ -716,168 +721,280 @@ rpl_slave_state::record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
my_error(ER_OUT_OF_RESOURCES, MYF(0));
goto end;
}
+end:
- mysql_mutex_lock(&LOCK_slave_state);
- if ((elem= get_element(gtid->domain_id)) == NULL)
+#ifdef WITH_WSREP
+ thd->wsrep_ignore_table= false;
+#endif
+
+ if (table_opened)
{
- mysql_mutex_unlock(&LOCK_slave_state);
- my_error(ER_OUT_OF_RESOURCES, MYF(0));
- err= 1;
- goto end;
+ if (err || (err= ha_commit_trans(thd, FALSE)))
+ ha_rollback_trans(thd, FALSE);
+
+ close_thread_tables(thd);
+ if (in_transaction)
+ thd->mdl_context.release_statement_locks();
+ else
+ thd->mdl_context.release_transactional_locks();
}
+ thd->lex->restore_backup_query_tables_list(&lex_backup);
+ thd->variables.option_bits= thd_saved_option;
+ thd->resume_subsequent_commits(suspended_wfc);
+ DBUG_EXECUTE_IF("inject_record_gtid_serverid_100_sleep",
+ {
+ if (gtid->server_id == 100)
+ my_sleep(500000);
+ });
+ DBUG_RETURN(err);
+}
- /* Now pull out all GTIDs that were recorded in this engine. */
- delete_list = NULL;
- next_ptr_ptr= &elem->list;
- cur= elem->list;
- best_sub_id= 0;
- best_ptr_ptr= NULL;
- while (cur)
+
+/*
+ Return a list of all old GTIDs in any mysql.gtid_slave_pos* table that are
+ no longer needed and can be deleted from the table.
+
+ Within each domain, we need to keep around the latest GTID (the one with the
+ highest sub_id), but any others in that domain can be deleted.
+*/
+rpl_slave_state::list_element *
+rpl_slave_state::gtid_grab_pending_delete_list()
+{
+ uint32 i;
+ list_element *full_list;
+
+ mysql_mutex_lock(&LOCK_slave_state);
+ full_list= NULL;
+ for (i= 0; i < hash.records; ++i)
{
- list_element *next= cur->next;
- if (cur->hton == hton)
- {
- /* Belongs to same engine, so move it to the delete list. */
- cur->next= delete_list;
- delete_list= cur;
- if (cur->sub_id > best_sub_id)
+ element *elem= (element *)my_hash_element(&hash, i);
+ list_element *elist= elem->list;
+ list_element *last_elem, **best_ptr_ptr, *cur, *next;
+ uint64 best_sub_id;
+
+ if (!elist)
+ continue; /* Nothing here */
+
+ /* Delete any old stuff, but keep around the most recent one. */
+ cur= elist;
+ best_sub_id= cur->sub_id;
+ best_ptr_ptr= &elist;
+ last_elem= cur;
+ while ((next= cur->next)) {
+ last_elem= next;
+ if (next->sub_id > best_sub_id)
{
- best_sub_id= cur->sub_id;
- best_ptr_ptr= &delete_list;
- }
- else if (best_ptr_ptr == &delete_list)
+ best_sub_id= next->sub_id;
best_ptr_ptr= &cur->next;
- }
- else
- {
- /* Another engine, leave it in the list. */
- if (cur->sub_id > best_sub_id)
- {
- best_sub_id= cur->sub_id;
- /* Current best is not on the delete list. */
- best_ptr_ptr= NULL;
}
- *next_ptr_ptr= cur;
- next_ptr_ptr= &cur->next;
+ cur= next;
}
- cur= next;
- }
- *next_ptr_ptr= NULL;
- /*
- If the highest sub_id element is on the delete list, put it back on the
- original list, to preserve the highest sub_id element in the table for
- GTID position recovery.
- */
- if (best_ptr_ptr)
- {
+ /*
+ Append the new elements to the full list. Note the order is important;
+ we do it here so that we do not break the list if best_sub_id is the
+ last of the new elements.
+ */
+ last_elem->next= full_list;
+ /*
+ Delete the highest sub_id element from the old list, and put it back as
+ the single-element new list.
+ */
cur= *best_ptr_ptr;
*best_ptr_ptr= cur->next;
- cur->next= elem->list;
+ cur->next= NULL;
elem->list= cur;
+
+ /*
+ Collect the full list so far here. Note that elist may have moved if we
+ deleted the first element, so order is again important.
+ */
+ full_list= elist;
}
mysql_mutex_unlock(&LOCK_slave_state);
- if (!delete_list)
- goto end;
+ return full_list;
+}
+
- /* Now delete any already committed GTIDs. */
- bitmap_set_bit(table->read_set, table->field[0]->field_index);
- bitmap_set_bit(table->read_set, table->field[1]->field_index);
+/* Find the mysql.gtid_slave_posXXX table associated with a given hton. */
+LEX_CSTRING *
+rpl_slave_state::select_gtid_pos_table(void *hton)
+{
+ struct gtid_pos_table *table_entry;
- if ((err= table->file->ha_index_init(0, 0)))
+ /*
+ See comments on rpl_slave_state::gtid_pos_tables for rules around proper
+ access to the list.
+ */
+ table_entry= (struct gtid_pos_table *)
+ my_atomic_loadptr_explicit(&gtid_pos_tables, MY_MEMORY_ORDER_ACQUIRE);
+
+ while (table_entry)
{
- table->file->print_error(err, MYF(0));
- goto end;
+ if (table_entry->table_hton == hton)
+ {
+ if (likely(table_entry->state == GTID_POS_AVAILABLE))
+ return &table_entry->table_name;
+ }
+ table_entry= table_entry->next;
}
- cur = delete_list;
- while (cur)
- {
- uchar key_buffer[4+8];
- DBUG_EXECUTE_IF("gtid_slave_pos_simulate_failed_delete",
- { err= ENOENT;
- table->file->print_error(err, MYF(0));
- /* `break' does not work inside DBUG_EXECUTE_IF */
- goto dbug_break; });
+ table_entry= (struct gtid_pos_table *)
+ my_atomic_loadptr_explicit(&default_gtid_pos_table, MY_MEMORY_ORDER_ACQUIRE);
+ return &table_entry->table_name;
+}
- next= cur->next;
- table->field[1]->store(cur->sub_id, true);
- /* domain_id is already set in table->record[0] from write_row() above. */
- key_copy(key_buffer, table->record[0], &table->key_info[0], 0, false);
- if (table->file->ha_index_read_map(table->record[1], key_buffer,
- HA_WHOLE_KEY, HA_READ_KEY_EXACT))
- /* We cannot find the row, assume it is already deleted. */
- ;
- else if ((err= table->file->ha_delete_row(table->record[1])))
- table->file->print_error(err, MYF(0));
- /*
- In case of error, we still discard the element from the list. We do
- not want to endlessly error on the same element in case of table
- corruption or such.
- */
- cur= next;
- if (err)
- break;
- }
-IF_DBUG(dbug_break:, )
- table->file->ha_index_end();
+void
+rpl_slave_state::gtid_delete_pending(THD *thd,
+ rpl_slave_state::list_element **list_ptr)
+{
+ int err= 0;
+ ulonglong thd_saved_option;
-end:
+ if (unlikely(!loaded))
+ return;
#ifdef WITH_WSREP
- thd->wsrep_ignore_table= false;
+ /*
+ Updates in slave state table should not be appended to galera transaction
+ writeset.
+ */
+ thd->wsrep_ignore_table= true;
#endif
- if (table_opened)
+ thd_saved_option= thd->variables.option_bits;
+ thd->variables.option_bits&=
+ ~(ulonglong)(OPTION_NOT_AUTOCOMMIT |OPTION_BEGIN |OPTION_BIN_LOG |
+ OPTION_GTID_BEGIN);
+
+ while (*list_ptr)
{
- if (err || (err= ha_commit_trans(thd, FALSE)))
- {
- /*
- If error, we need to put any remaining delete_list back into the HASH
- so we can do another delete attempt later.
- */
- if (delete_list)
- {
- put_back_list(gtid->domain_id, delete_list);
- delete_list = 0;
- }
+ LEX_CSTRING *gtid_pos_table_name, *tmp_table_name;
+ Query_tables_list lex_backup;
+ TABLE_LIST tlist;
+ TABLE *table;
+ handler::Table_flags direct_pos;
+ list_element *cur, **cur_ptr_ptr;
+ bool table_opened= false;
+ void *hton= (*list_ptr)->hton;
- ha_rollback_trans(thd, FALSE);
+ thd->reset_for_next_command();
+
+ /*
+ Only the SQL thread can call select_gtid_pos_table without a mutex
+ Other threads needs to use a mutex and take into account that the
+ result may change during execution, so we have to make a copy.
+ */
+ mysql_mutex_lock(&LOCK_slave_state);
+ tmp_table_name= select_gtid_pos_table(hton);
+ gtid_pos_table_name= thd->make_clex_string(tmp_table_name->str,
+ tmp_table_name->length);
+ mysql_mutex_unlock(&LOCK_slave_state);
+ if (!gtid_pos_table_name)
+ {
+ /* Out of memory - we can try again later. */
+ break;
}
- close_thread_tables(thd);
- if (rgi)
+
+ thd->lex->reset_n_backup_query_tables_list(&lex_backup);
+ tlist.init_one_table(&MYSQL_SCHEMA_NAME, gtid_pos_table_name, NULL, TL_WRITE);
+ if ((err= open_and_lock_tables(thd, &tlist, FALSE, 0)))
+ goto end;
+ table_opened= true;
+ table= tlist.table;
+
+ if ((err= gtid_check_rpl_slave_state_table(table)))
+ goto end;
+
+ direct_pos= table->file->ha_table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION;
+ bitmap_set_all(table->write_set);
+ table->rpl_write_set= table->write_set;
+
+ /* Now delete any already committed GTIDs. */
+ bitmap_set_bit(table->read_set, table->field[0]->field_index);
+ bitmap_set_bit(table->read_set, table->field[1]->field_index);
+
+ if (!direct_pos && (err= table->file->ha_index_init(0, 0)))
{
- thd->mdl_context.release_statement_locks();
- /*
- Save the list of old gtid entries we deleted. If this transaction
- fails later for some reason and is rolled back, the deletion of those
- entries will be rolled back as well, and we will need to put them back
- on the to-be-deleted list so we can re-do the deletion. Otherwise
- redundant rows in mysql.gtid_slave_pos may accumulate if transactions
- are rolled back and retried after record_gtid().
- */
-#ifdef HAVE_REPLICATION
- rgi->pending_gtid_deletes_save(gtid->domain_id, delete_list);
-#endif
+ table->file->print_error(err, MYF(0));
+ goto end;
}
- else
+
+ cur = *list_ptr;
+ cur_ptr_ptr = list_ptr;
+ do
{
- thd->mdl_context.release_transactional_locks();
-#ifdef HAVE_REPLICATION
- rpl_group_info::pending_gtid_deletes_free(delete_list);
-#endif
+ uchar key_buffer[4+8];
+ list_element *next= cur->next;
+
+ if (cur->hton == hton)
+ {
+ int res;
+
+ table->field[0]->store((ulonglong)cur->domain_id, true);
+ table->field[1]->store(cur->sub_id, true);
+ if (direct_pos)
+ {
+ res= table->file->ha_rnd_pos_by_record(table->record[0]);
+ }
+ else
+ {
+ key_copy(key_buffer, table->record[0], &table->key_info[0], 0, false);
+ res= table->file->ha_index_read_map(table->record[0], key_buffer,
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT);
+ }
+ DBUG_EXECUTE_IF("gtid_slave_pos_simulate_failed_delete",
+ { res= 1;
+ err= ENOENT;
+ sql_print_error("<DEBUG> Error deleting old GTID row");
+ });
+ if (res)
+ /* We cannot find the row, assume it is already deleted. */
+ ;
+ else if ((err= table->file->ha_delete_row(table->record[0])))
+ {
+ sql_print_error("Error deleting old GTID row: %s",
+ thd->get_stmt_da()->message());
+ /*
+ In case of error, we still discard the element from the list. We do
+ not want to endlessly error on the same element in case of table
+ corruption or such.
+ */
+ }
+ *cur_ptr_ptr= next;
+ my_free(cur);
+ }
+ else
+ {
+ /* Leave this one in the list until we get to the table for its hton. */
+ cur_ptr_ptr= &cur->next;
+ }
+ cur= next;
+ if (err)
+ break;
+ } while (cur);
+end:
+ if (table_opened)
+ {
+ if (!direct_pos)
+ table->file->ha_index_end();
+
+ if (err || (err= ha_commit_trans(thd, FALSE)))
+ ha_rollback_trans(thd, FALSE);
}
+ close_thread_tables(thd);
+ thd->mdl_context.release_transactional_locks();
+ thd->lex->restore_backup_query_tables_list(&lex_backup);
+
+ if (err)
+ break;
}
- thd->lex->restore_backup_query_tables_list(&lex_backup);
thd->variables.option_bits= thd_saved_option;
- thd->resume_subsequent_commits(suspended_wfc);
- DBUG_EXECUTE_IF("inject_record_gtid_serverid_100_sleep",
- {
- if (gtid->server_id == 100)
- my_sleep(500000);
- });
- DBUG_RETURN(err);
+
+#ifdef WITH_WSREP
+ thd->wsrep_ignore_table= false;
+#endif
}
@@ -1251,7 +1368,7 @@ rpl_slave_state::load(THD *thd, const char *state_from_master, size_t len,
if (gtid_parser_helper(&state_from_master, end, &gtid) ||
!(sub_id= next_sub_id(gtid.domain_id)) ||
- record_gtid(thd, &gtid, sub_id, NULL, in_statement, &hton) ||
+ record_gtid(thd, &gtid, sub_id, false, in_statement, &hton) ||
update(gtid.domain_id, gtid.server_id, sub_id, gtid.seq_no, hton, NULL))
return 1;
if (state_from_master == end)
diff --git a/sql/rpl_gtid.h b/sql/rpl_gtid.h
index 0fc92d5e33c..60d822f7b0d 100644
--- a/sql/rpl_gtid.h
+++ b/sql/rpl_gtid.h
@@ -118,8 +118,9 @@ struct rpl_slave_state
{
struct list_element *next;
uint64 sub_id;
- uint64 seq_no;
+ uint32 domain_id;
uint32 server_id;
+ uint64 seq_no;
/*
hton of mysql.gtid_slave_pos* table used to record this GTID.
Can be NULL if the gtid table failed to load (eg. missing
@@ -191,6 +192,8 @@ struct rpl_slave_state
/* Mapping from domain_id to its element. */
HASH hash;
+ /* GTIDs added since last purge of old mysql.gtid_slave_pos rows. */
+ uint32 pending_gtid_count;
/* Mutex protecting access to the state. */
mysql_mutex_t LOCK_slave_state;
/* Auxiliary buffer to sort gtid list. */
@@ -233,7 +236,10 @@ struct rpl_slave_state
int truncate_state_table(THD *thd);
void select_gtid_pos_table(THD *thd, LEX_CSTRING *out_tablename);
int record_gtid(THD *thd, const rpl_gtid *gtid, uint64 sub_id,
- rpl_group_info *rgi, bool in_statement, void **out_hton);
+ bool in_transaction, bool in_statement, void **out_hton);
+ list_element *gtid_grab_pending_delete_list();
+ LEX_CSTRING *select_gtid_pos_table(void *hton);
+ void gtid_delete_pending(THD *thd, rpl_slave_state::list_element **list_ptr);
uint64 next_sub_id(uint32 domain_id);
int iterate(int (*cb)(rpl_gtid *, void *), void *data,
rpl_gtid *extra_gtids, uint32 num_extra,
@@ -245,7 +251,7 @@ struct rpl_slave_state
bool is_empty();
element *get_element(uint32 domain_id);
- int put_back_list(uint32 domain_id, list_element *list);
+ int put_back_list(list_element *list);
void update_state_hash(uint64 sub_id, rpl_gtid *gtid, void *hton,
rpl_group_info *rgi);
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 6f659aa12ad..8d3e146f4c5 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -1091,7 +1091,7 @@ bool Master_info_index::init_all_master_info()
if ((index_file_nr= my_open(index_file_name,
O_RDWR | O_CREAT | O_BINARY ,
- MYF(MY_WME | ME_NOREFRESH))) < 0 ||
+ MYF(MY_WME | ME_ERROR_LOG))) < 0 ||
my_sync(index_file_nr, MYF(MY_WME)) ||
init_io_cache(&index_file, index_file_nr,
IO_SIZE, READ_CACHE,
@@ -1307,7 +1307,7 @@ Master_info *get_master_info(const LEX_CSTRING *connection_name,
if (warning != Sql_condition::WARN_LEVEL_NOTE)
my_error(WARN_NO_MASTER_INFO,
MYF(warning == Sql_condition::WARN_LEVEL_WARN ?
- ME_JUST_WARNING : 0),
+ ME_WARNING : 0),
(int) connection_name->length, connection_name->str);
mysql_mutex_unlock(&LOCK_active_mi);
DBUG_RETURN(0);
@@ -1377,7 +1377,7 @@ Master_info_index::get_master_info(const LEX_CSTRING *connection_name,
if (!mi && warning != Sql_condition::WARN_LEVEL_NOTE)
{
my_error(WARN_NO_MASTER_INFO,
- MYF(warning == Sql_condition::WARN_LEVEL_WARN ? ME_JUST_WARNING :
+ MYF(warning == Sql_condition::WARN_LEVEL_WARN ? ME_WARNING :
0),
(int) connection_name->length,
connection_name->str);
@@ -1436,7 +1436,7 @@ bool Master_info_index::add_master_info(Master_info *mi, bool write_to_file)
We have to protect against shutdown to ensure we are not calling
my_hash_insert() while my_hash_free() is in progress
*/
- if (unlikely(shutdown_in_progress) ||
+ if (unlikely(abort_loop) ||
!my_hash_insert(&master_info_hash, (uchar*) mi))
{
if (global_system_variables.log_warnings > 1)
@@ -1579,7 +1579,7 @@ uint any_slave_sql_running(bool already_locked)
if (!already_locked)
mysql_mutex_lock(&LOCK_active_mi);
- if (unlikely(shutdown_in_progress || !master_info_index))
+ if (unlikely(abort_loop || !master_info_index))
count= 1;
else
{
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index 54d6b5be592..5de73254ed9 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -329,13 +329,13 @@ class Master_info : public Slave_reporting_capability
/* No of DDL event group */
- volatile uint64 total_ddl_groups;
+ Atomic_counter<uint64> total_ddl_groups;
/* No of non-transactional event group*/
- volatile uint64 total_non_trans_groups;
+ Atomic_counter<uint64> total_non_trans_groups;
/* No of transactional event group*/
- volatile uint64 total_trans_groups;
+ Atomic_counter<uint64> total_trans_groups;
/* domain-id based filter */
Domain_id_filter domain_id_filter;
diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc
index 144b12a9fdf..dc5e3ff1fbf 100644
--- a/sql/rpl_parallel.cc
+++ b/sql/rpl_parallel.cc
@@ -1023,7 +1023,7 @@ handle_rpl_parallel_thread(void *arg)
my_thread_init();
thd = new THD(next_thread_id());
thd->thread_stack = (char*)&thd;
- add_to_active_threads(thd);
+ server_threads.insert(thd);
set_current_thd(thd);
pthread_detach_this_thread();
thd->init_for_queries();
@@ -1432,7 +1432,7 @@ handle_rpl_parallel_thread(void *arg)
thd->temporary_tables= 0;
THD_CHECK_SENTRY(thd);
- unlink_not_visible_thd(thd);
+ server_threads.erase(thd);
delete thd;
mysql_mutex_lock(&rpt->LOCK_rpl_thread);
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index db579a63ce0..84661fa513d 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -329,6 +329,7 @@ unpack_row(rpl_group_info *rgi,
(int) (pack_ptr - old_pack_ptr)));
if (!pack_ptr)
{
+#ifdef WITH_WSREP
if (WSREP_ON)
{
/*
@@ -344,7 +345,7 @@ unpack_row(rpl_group_info *rgi,
(table_found) ? "found" : "not found", row_end
);
}
-
+#endif /* WITH_WSREP */
rgi->rli->report(ERROR_LEVEL, ER_SLAVE_CORRUPT_EVENT,
rgi->gtid_info(),
"Could not read field '%s' of table '%s.%s'",
@@ -497,7 +498,9 @@ int prepare_record(TABLE *const table, const uint skip, const bool check)
DBUG_RETURN(0);
}
/**
- Fills @c table->record[0] with computed values of extra persistent column which are present on slave but not on master.
+ Fills @c table->record[0] with computed values of extra persistent column
+ which are present on slave but not on master.
+
@param table Table whose record[0] buffer is prepared.
@param master_cols No of columns on master
@returns 0 on success
@@ -514,10 +517,8 @@ int fill_extra_persistent_columns(TABLE *table, int master_cols)
vfield= *vfield_ptr;
if (vfield->field_index >= master_cols && vfield->stored_in_db())
{
- /*Set bitmap for writing*/
- bitmap_set_bit(table->vcol_set, vfield->field_index);
+ bitmap_set_bit(table->write_set, vfield->field_index);
error= vfield->vcol_info->expr->save_in_field(vfield,0);
- bitmap_clear_bit(table->vcol_set, vfield->field_index);
}
}
return error;
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index b275ad884bd..2d91620c898 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -1820,6 +1820,7 @@ rpl_load_gtid_slave_state(THD *thd)
int err= 0;
uint32 i;
load_gtid_state_cb_data cb_data;
+ rpl_slave_state::list_element *old_gtids_list;
DBUG_ENTER("rpl_load_gtid_slave_state");
mysql_mutex_lock(&rpl_global_gtid_slave_state->LOCK_slave_state);
@@ -1905,6 +1906,13 @@ rpl_load_gtid_slave_state(THD *thd)
rpl_global_gtid_slave_state->loaded= true;
mysql_mutex_unlock(&rpl_global_gtid_slave_state->LOCK_slave_state);
+ /* Clear out no longer needed elements now. */
+ old_gtids_list=
+ rpl_global_gtid_slave_state->gtid_grab_pending_delete_list();
+ rpl_global_gtid_slave_state->gtid_delete_pending(thd, &old_gtids_list);
+ if (old_gtids_list)
+ rpl_global_gtid_slave_state->put_back_list(old_gtids_list);
+
end:
if (array_inited)
delete_dynamic(&array);
@@ -2086,7 +2094,6 @@ rpl_group_info::reinit(Relay_log_info *rli)
long_find_row_note_printed= false;
did_mark_start_commit= false;
gtid_ev_flags2= 0;
- pending_gtid_delete_list= NULL;
last_master_timestamp = 0;
gtid_ignore_duplicate_state= GTID_DUPLICATE_NULL;
speculation= SPECULATE_NO;
@@ -2217,12 +2224,6 @@ void rpl_group_info::cleanup_context(THD *thd, bool error)
erroneously update the GTID position.
*/
gtid_pending= false;
-
- /*
- Rollback will have undone any deletions of old rows we might have made
- in mysql.gtid_slave_pos. Put those rows back on the list to be deleted.
- */
- pending_gtid_deletes_put_back();
}
m_table_map.clear_tables();
slave_close_thread_tables(thd);
@@ -2448,78 +2449,6 @@ rpl_group_info::unmark_start_commit()
}
-/*
- When record_gtid() has deleted any old rows from the table
- mysql.gtid_slave_pos as part of a replicated transaction, save the list of
- rows deleted here.
-
- If later the transaction fails (eg. optimistic parallel replication), the
- deletes will be undone when the transaction is rolled back. Then we can
- put back the list of rows into the rpl_global_gtid_slave_state, so that
- we can re-do the deletes and avoid accumulating old rows in the table.
-*/
-void
-rpl_group_info::pending_gtid_deletes_save(uint32 domain_id,
- rpl_slave_state::list_element *list)
-{
- /*
- We should never get to a state where we try to save a new pending list of
- gtid deletes while we still have an old one. But make sure we handle it
- anyway just in case, so we avoid leaving stray entries in the
- mysql.gtid_slave_pos table.
- */
- DBUG_ASSERT(!pending_gtid_delete_list);
- if (unlikely(pending_gtid_delete_list))
- pending_gtid_deletes_put_back();
-
- pending_gtid_delete_list= list;
- pending_gtid_delete_list_domain= domain_id;
-}
-
-
-/*
- Take the list recorded by pending_gtid_deletes_save() and put it back into
- rpl_global_gtid_slave_state. This is needed if deletion of the rows was
- rolled back due to transaction failure.
-*/
-void
-rpl_group_info::pending_gtid_deletes_put_back()
-{
- if (pending_gtid_delete_list)
- {
- rpl_global_gtid_slave_state->put_back_list(pending_gtid_delete_list_domain,
- pending_gtid_delete_list);
- pending_gtid_delete_list= NULL;
- }
-}
-
-
-/*
- Free the list recorded by pending_gtid_deletes_save(). Done when the deletes
- in the list have been permanently committed.
-*/
-void
-rpl_group_info::pending_gtid_deletes_clear()
-{
- pending_gtid_deletes_free(pending_gtid_delete_list);
- pending_gtid_delete_list= NULL;
-}
-
-
-void
-rpl_group_info::pending_gtid_deletes_free(rpl_slave_state::list_element *list)
-{
- rpl_slave_state::list_element *next;
-
- while (list)
- {
- next= list->next;
- my_free(list);
- list= next;
- }
-}
-
-
rpl_sql_thread_info::rpl_sql_thread_info(Rpl_filter *filter)
: rpl_filter(filter)
{
diff --git a/sql/rpl_rli.h b/sql/rpl_rli.h
index d9f0e0e5d3b..b8b153c34be 100644
--- a/sql/rpl_rli.h
+++ b/sql/rpl_rli.h
@@ -757,11 +757,6 @@ struct rpl_group_info
/* Needs room for "Gtid D-S-N\x00". */
char gtid_info_buf[5+10+1+10+1+20+1];
- /* List of not yet committed deletions in mysql.gtid_slave_pos. */
- rpl_slave_state::list_element *pending_gtid_delete_list;
- /* Domain associated with pending_gtid_delete_list. */
- uint32 pending_gtid_delete_list_domain;
-
/*
The timestamp, from the master, of the commit event.
Used to do delayed update of rli->last_master_timestamp, for getting
@@ -903,12 +898,6 @@ struct rpl_group_info
char *gtid_info();
void unmark_start_commit();
- static void pending_gtid_deletes_free(rpl_slave_state::list_element *list);
- void pending_gtid_deletes_save(uint32 domain_id,
- rpl_slave_state::list_element *list);
- void pending_gtid_deletes_put_back();
- void pending_gtid_deletes_clear();
-
longlong get_row_stmt_start_timestamp()
{
return row_stmt_start_timestamp;
diff --git a/sql/select_handler.cc b/sql/select_handler.cc
new file mode 100644
index 00000000000..f020d2f6b80
--- /dev/null
+++ b/sql/select_handler.cc
@@ -0,0 +1,188 @@
+/*
+ Copyright (c) 2018, 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+#include "mariadb.h"
+#include "sql_priv.h"
+#include "sql_select.h"
+#include "select_handler.h"
+
+
+/**
+ The methods of the Pushdown_select class.
+
+ The objects of this class are used for pushdown of the select queries
+ into engines. The main method of the class is Pushdown_select::execute()
+ that initiates execution of a select query by a foreign engine, receives the
+ rows of the result set, put it in a buffer of a temporary table and send
+ them from the buffer directly into output.
+
+ The method uses the functions of the select_handle interface to do this.
+ It also employes plus some helper functions to create the needed temporary
+ table and to send rows from the temporary table into output.
+ The constructor of the class gets the select_handler interface as a parameter.
+*/
+
+
+Pushdown_select::Pushdown_select(SELECT_LEX *sel, select_handler *h)
+ : select(sel), handler(h)
+{
+ is_analyze= handler->thd->lex->analyze_stmt;
+}
+
+
+Pushdown_select::~Pushdown_select()
+{
+ delete handler;
+ select->select_h= NULL;
+}
+
+
+bool Pushdown_select::init()
+{
+ List<Item> types;
+ TMP_TABLE_PARAM tmp_table_param;
+ THD *thd= handler->thd;
+ DBUG_ENTER("Pushdown_select::init");
+ if (select->master_unit()->join_union_item_types(thd, types, 1))
+ DBUG_RETURN(true);
+ tmp_table_param.init();
+ tmp_table_param.field_count= types.elements;
+
+ handler->table= create_tmp_table(thd, &tmp_table_param, types,
+ (ORDER *) 0, false, 0,
+ TMP_TABLE_ALL_COLUMNS, 1,
+ &empty_clex_str, true, false);
+ if (!handler->table)
+ DBUG_RETURN(true);
+ if (handler->table->fill_item_list(&result_columns))
+ DBUG_RETURN(true);
+ DBUG_RETURN(false);
+}
+
+
+bool Pushdown_select::send_result_set_metadata()
+{
+ THD *thd= handler->thd;
+ Protocol *protocol= thd->protocol;
+ DBUG_ENTER("Pushdown_select::send_result_set_metadata");
+
+#ifdef WITH_WSREP
+ if (WSREP(thd) && thd->wsrep_retry_query)
+ {
+ WSREP_DEBUG("skipping select metadata");
+ DBUG_RETURN(false);
+ }
+ #endif /* WITH_WSREP */
+ if (protocol->send_result_set_metadata(&result_columns,
+ Protocol::SEND_NUM_ROWS |
+ Protocol::SEND_EOF))
+ DBUG_RETURN(true);
+
+ DBUG_RETURN(false);
+}
+
+
+bool Pushdown_select::send_data()
+{
+ THD *thd= handler->thd;
+ Protocol *protocol= thd->protocol;
+ DBUG_ENTER("Pushdown_select::send_data");
+
+ if (thd->killed == ABORT_QUERY)
+ DBUG_RETURN(false);
+
+ protocol->prepare_for_resend();
+ if (protocol->send_result_set_row(&result_columns))
+ {
+ protocol->remove_last_row();
+ DBUG_RETURN(true);
+ }
+
+ thd->inc_sent_row_count(1);
+
+ if (thd->vio_ok())
+ DBUG_RETURN(protocol->write());
+
+ DBUG_RETURN(false);
+}
+
+
+bool Pushdown_select::send_eof()
+{
+ THD *thd= handler->thd;
+ DBUG_ENTER("Pushdown_select::send_eof");
+
+ /*
+ Don't send EOF if we're in error condition (which implies we've already
+ sent or are sending an error)
+ */
+ if (thd->is_error())
+ DBUG_RETURN(true);
+ ::my_eof(thd);
+ DBUG_RETURN(false);
+}
+
+
+int Pushdown_select::execute()
+{
+ int err;
+ THD *thd= handler->thd;
+
+ DBUG_ENTER("Pushdown_select::execute");
+
+ if ((err= handler->init_scan()))
+ goto error;
+
+ if (is_analyze)
+ {
+ handler->end_scan();
+ DBUG_RETURN(0);
+ }
+
+ if (send_result_set_metadata())
+ DBUG_RETURN(-1);
+
+ while (!(err= handler->next_row()))
+ {
+ if (thd->check_killed() || send_data())
+ {
+ handler->end_scan();
+ DBUG_RETURN(-1);
+ }
+ }
+
+ if (err != 0 && err != HA_ERR_END_OF_FILE)
+ goto error;
+
+ if ((err= handler->end_scan()))
+ goto error_2;
+
+ if (send_eof())
+ DBUG_RETURN(-1);
+
+ DBUG_RETURN(0);
+
+error:
+ handler->end_scan();
+error_2:
+ handler->print_error(err, MYF(0));
+ DBUG_RETURN(-1); // Error not sent to client
+}
+
+void select_handler::print_error(int error, myf errflag)
+{
+ my_error(ER_GET_ERRNO, MYF(0), error, hton_name(ht)->str);
+}
diff --git a/sql/select_handler.h b/sql/select_handler.h
new file mode 100644
index 00000000000..e2ad13b7cdf
--- /dev/null
+++ b/sql/select_handler.h
@@ -0,0 +1,72 @@
+/*
+ Copyright (c) 2018, 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef SELECT_HANDLER_INCLUDED
+#define SELECT_HANDLER_INCLUDED
+
+#include "mariadb.h"
+#include "sql_priv.h"
+
+/**
+ @class select_handler
+
+ This interface class is to be used for execution of select queries
+ by foreign engines
+*/
+
+class select_handler
+{
+ public:
+ THD *thd;
+ handlerton *ht;
+
+ SELECT_LEX *select; // Select to be excuted
+
+ /*
+ Temporary table where all results should be stored in record[0]
+ The table has a field for every item from the select_lex::item_list.
+ The table is actually never filled. Only its record buffer is used.
+ */
+ TABLE *table;
+
+ select_handler(THD *thd_arg, handlerton *ht_arg)
+ : thd(thd_arg), ht(ht_arg), table(0) {}
+
+ virtual ~select_handler() {}
+
+ /*
+ Functions to scan the select result set.
+ All these returns 0 if ok, error code in case of error.
+ */
+
+ /* Initialize the process of producing rows of result set */
+ virtual int init_scan() = 0;
+
+ /*
+ Put the next produced row of the result set in table->record[0]
+ and return 0. Return HA_ERR_END_OF_FILE if there are no more rows,
+ return other error number in case of fatal error.
+ */
+ virtual int next_row() = 0;
+
+ /* Finish scanning */
+ virtual int end_scan() = 0;
+
+ /* Report errors */
+ virtual void print_error(int error, myf errflag);
+};
+
+#endif /* SELECT_HANDLER_INCLUDED */
diff --git a/sql/semisync_master_ack_receiver.cc b/sql/semisync_master_ack_receiver.cc
index 81f494c9d34..e189fc5f631 100644
--- a/sql/semisync_master_ack_receiver.cc
+++ b/sql/semisync_master_ack_receiver.cc
@@ -204,7 +204,6 @@ void Ack_receiver::run()
thd->thread_stack= (char*) &thd;
thd->store_globals();
thd->security_ctx->skip_grants();
- thread_safe_increment32(&service_thread_count);
thd->set_command(COM_DAEMON);
init_net(&net, net_buff, REPLY_MESSAGE_MAX_LENGTH);
@@ -283,8 +282,6 @@ end:
sql_print_information("Stopping ack receiver thread");
m_status= ST_DOWN;
delete thd;
- thread_safe_decrement32(&service_thread_count);
- signal_thd_deleted();
mysql_cond_broadcast(&m_cond);
mysql_mutex_unlock(&m_mutex);
DBUG_VOID_RETURN;
diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc
new file mode 100644
index 00000000000..8583897e064
--- /dev/null
+++ b/sql/service_wsrep.cc
@@ -0,0 +1,272 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+#include "mariadb.h"
+
+#include "mysql/service_wsrep.h"
+#include "wsrep/key.hpp"
+#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h"
+#include "sql_class.h"
+#include "debug_sync.h"
+#include "log.h"
+
+extern "C" my_bool wsrep_on(const THD *thd)
+{
+ return my_bool(WSREP(thd));
+}
+
+extern "C" void wsrep_thd_LOCK(const THD *thd)
+{
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+}
+
+extern "C" void wsrep_thd_UNLOCK(const THD *thd)
+{
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+}
+
+extern "C" const char* wsrep_thd_client_state_str(const THD *thd)
+{
+ return wsrep::to_c_string(thd->wsrep_cs().state());
+}
+
+extern "C" const char* wsrep_thd_client_mode_str(const THD *thd)
+{
+ return wsrep::to_c_string(thd->wsrep_cs().mode());
+}
+
+extern "C" const char* wsrep_thd_transaction_state_str(const THD *thd)
+{
+ return wsrep::to_c_string(thd->wsrep_cs().transaction().state());
+}
+
+
+extern "C" const char *wsrep_thd_query(const THD *thd)
+{
+ return thd ? thd->query() : NULL;
+}
+
+extern "C" query_id_t wsrep_thd_transaction_id(const THD *thd)
+{
+ return thd->wsrep_cs().transaction().id().get();
+}
+
+extern "C" long long wsrep_thd_trx_seqno(const THD *thd)
+{
+ const wsrep::client_state& cs= thd->wsrep_cs();
+ if (cs.mode() == wsrep::client_state::m_toi)
+ {
+ return cs.toi_meta().seqno().get();
+ }
+ else
+ {
+ return cs.transaction().ws_meta().seqno().get();
+ }
+}
+
+extern "C" void wsrep_thd_self_abort(THD *thd)
+{
+ thd->wsrep_cs().bf_abort(wsrep::seqno(0));
+}
+
+extern "C" const char* wsrep_get_sr_table_name()
+{
+ return wsrep_sr_table_name_full;
+}
+
+extern "C" my_bool wsrep_get_debug()
+{
+ return wsrep_debug;
+}
+
+extern "C" my_bool wsrep_thd_is_local(const THD *thd)
+{
+ return thd->wsrep_cs().mode() == wsrep::client_state::m_local;
+}
+
+extern "C" my_bool wsrep_thd_is_applying(const THD *thd)
+{
+ return thd->wsrep_cs().mode() == wsrep::client_state::m_high_priority;
+}
+
+extern "C" my_bool wsrep_thd_is_toi(const THD *thd)
+{
+ return thd->wsrep_cs().mode() == wsrep::client_state::m_toi;
+}
+
+extern "C" my_bool wsrep_thd_is_local_toi(const THD *thd)
+{
+ return thd->wsrep_cs().mode() == wsrep::client_state::m_toi &&
+ thd->wsrep_cs().toi_mode() == wsrep::client_state::m_local;
+
+}
+
+extern "C" my_bool wsrep_thd_is_in_rsu(const THD *thd)
+{
+ return thd->wsrep_cs().mode() == wsrep::client_state::m_rsu;
+}
+
+extern "C" my_bool wsrep_thd_is_BF(const THD *thd, my_bool sync)
+{
+ my_bool status = FALSE;
+ if (thd && WSREP(thd))
+ {
+ if (sync) mysql_mutex_lock(&thd->LOCK_thd_data);
+ status = (wsrep_thd_is_applying(thd) || wsrep_thd_is_toi(thd));
+ if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data);
+ }
+ return status;
+}
+
+extern "C" my_bool wsrep_thd_is_SR(const THD *thd)
+{
+ return thd && thd->wsrep_cs().transaction().is_streaming();
+}
+
+extern "C" void wsrep_handle_SR_rollback(THD *bf_thd,
+ THD *victim_thd)
+{
+ DBUG_ASSERT(victim_thd);
+ if (!victim_thd || !wsrep_on(bf_thd)) return;
+
+ WSREP_DEBUG("handle rollback, for deadlock: thd %llu trx_id %" PRIu64 " frags %zu conf %s",
+ victim_thd->thread_id,
+ victim_thd->wsrep_trx_id(),
+ victim_thd->wsrep_sr().fragments_certified(),
+ wsrep_thd_transaction_state_str(victim_thd));
+ if (bf_thd && bf_thd != victim_thd)
+ {
+ victim_thd->store_globals();
+ }
+ else
+ {
+ DEBUG_SYNC(victim_thd, "wsrep_before_SR_rollback");
+ }
+ if (bf_thd)
+ {
+ wsrep_bf_abort(bf_thd, victim_thd);
+ }
+ else
+ {
+ wsrep_thd_self_abort(victim_thd);
+ }
+ if (bf_thd && bf_thd != victim_thd)
+ {
+ bf_thd->store_globals();
+ }
+}
+
+extern "C" my_bool wsrep_thd_bf_abort(const THD *bf_thd, THD *victim_thd,
+ my_bool signal)
+{
+ if (WSREP(victim_thd) && !victim_thd->wsrep_trx().active())
+ {
+ WSREP_DEBUG("BF abort for non active transaction");
+ wsrep_start_transaction(victim_thd, victim_thd->wsrep_next_trx_id());
+ }
+ my_bool ret= wsrep_bf_abort(bf_thd, victim_thd);
+ /*
+ Send awake signal if victim was BF aborted or does not
+ have wsrep on. Note that this should never interrupt RSU
+ as RSU has paused the provider.
+ */
+ if ((ret || !wsrep_on(victim_thd)) && signal)
+ victim_thd->awake(KILL_QUERY);
+ return ret;
+}
+
+extern "C" my_bool wsrep_thd_skip_locking(const THD *thd)
+{
+ return thd && thd->wsrep_skip_locking;
+}
+
+extern "C" my_bool wsrep_thd_order_before(const THD *left, const THD *right)
+{
+ if (wsrep_thd_trx_seqno(left) < wsrep_thd_trx_seqno(right)) {
+ WSREP_DEBUG("BF conflict, order: %lld %lld\n",
+ (long long)wsrep_thd_trx_seqno(left),
+ (long long)wsrep_thd_trx_seqno(right));
+ return TRUE;
+ }
+ WSREP_DEBUG("waiting for BF, trx order: %lld %lld\n",
+ (long long)wsrep_thd_trx_seqno(left),
+ (long long)wsrep_thd_trx_seqno(right));
+ return FALSE;
+}
+
+extern "C" my_bool wsrep_thd_is_aborting(const MYSQL_THD thd)
+{
+ mysql_mutex_assert_owner(&thd->LOCK_thd_data);
+ if (thd != 0)
+ {
+ const wsrep::client_state& cs(thd->wsrep_cs());
+ const enum wsrep::transaction::state tx_state(cs.transaction().state());
+ switch (tx_state)
+ {
+ case wsrep::transaction::s_must_abort:
+ return (cs.state() == wsrep::client_state::s_exec ||
+ cs.state() == wsrep::client_state::s_result);
+ case wsrep::transaction::s_aborting:
+ case wsrep::transaction::s_aborted:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+static inline enum wsrep::key::type
+map_key_type(enum Wsrep_service_key_type type)
+{
+ switch (type)
+ {
+ case WSREP_SERVICE_KEY_SHARED: return wsrep::key::shared;
+ case WSREP_SERVICE_KEY_REFERENCE: return wsrep::key::reference;
+ case WSREP_SERVICE_KEY_UPDATE: return wsrep::key::update;
+ case WSREP_SERVICE_KEY_EXCLUSIVE: return wsrep::key::exclusive;
+ }
+ return wsrep::key::exclusive;
+}
+
+extern "C" int wsrep_thd_append_key(THD *thd,
+ const struct wsrep_key* key,
+ int n_keys,
+ enum Wsrep_service_key_type key_type)
+{
+ Wsrep_client_state& client_state(thd->wsrep_cs());
+ DBUG_ASSERT(client_state.transaction().active());
+ int ret= 0;
+ for (int i= 0; i < n_keys && ret == 0; ++i)
+ {
+ wsrep::key wsrep_key(map_key_type(key_type));
+ for (size_t kp= 0; kp < key[i].key_parts_num; ++kp)
+ {
+ wsrep_key.append_key_part(key[i].key_parts[kp].ptr, key[i].key_parts[kp].len);
+ }
+ ret= client_state.append_key(wsrep_key);
+ }
+ return ret;
+}
+
+extern "C" void wsrep_commit_ordered(THD *thd)
+{
+ if (wsrep_is_active(thd) &&
+ thd->wsrep_trx().state() == wsrep::transaction::s_committing &&
+ !wsrep_commit_will_write_binlog(thd))
+ {
+ thd->wsrep_cs().ordered_commit();
+ }
+}
diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc
index f4dab11bb42..1566c2d7ade 100644
--- a/sql/session_tracker.cc
+++ b/sql/session_tracker.cc
@@ -734,7 +734,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf)
if ((thd->variables.session_track_transaction_info == TX_TRACK_CHISTICS) &&
(tx_changed & TX_CHG_CHISTICS))
{
- bool is_xa= (thd->transaction.xid_state.xa_state != XA_NOTR);
+ bool is_xa= thd->transaction.xid_state.is_explicit_XA();
size_t start;
/* 2 length by 1 byte and code */
@@ -911,7 +911,7 @@ bool Transaction_state_tracker::store(THD *thd, String *buf)
if ((tx_curr_state & TX_EXPLICIT) && is_xa)
{
- XID *xid= &thd->transaction.xid_state.xid;
+ XID *xid= thd->transaction.xid_state.get_xid();
long glen, blen;
buf->append(STRING_WITH_LEN("XA START"));
diff --git a/sql/set_var.cc b/sql/set_var.cc
index ddaf747908c..ae4e712c77d 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -742,7 +742,7 @@ int sql_set_variables(THD *thd, List<set_var_base> *var_list, bool free)
err:
if (free)
- free_underlaid_joins(thd, &thd->lex->select_lex);
+ free_underlaid_joins(thd, thd->lex->first_select_lex());
DBUG_RETURN(error);
}
diff --git a/sql/set_var.h b/sql/set_var.h
index 6e673cffefb..5f9720f0d5a 100644
--- a/sql/set_var.h
+++ b/sql/set_var.h
@@ -275,6 +275,10 @@ public:
virtual int update(THD *thd)=0; /* To set the value */
virtual int light_check(THD *thd) { return check(thd); } /* for PS */
virtual bool is_system() { return FALSE; }
+ /**
+ @returns whether this variable is @@@@optimizer_trace.
+ */
+ virtual bool is_var_optimizer_trace() const { return false; }
};
@@ -306,6 +310,11 @@ public:
int check(THD *thd);
int update(THD *thd);
int light_check(THD *thd);
+ virtual bool is_var_optimizer_trace() const
+ {
+ extern sys_var *Sys_optimizer_trace_ptr;
+ return var == Sys_optimizer_trace_ptr;
+ }
};
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index 74aed792f1d..feb47f9af94 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -5815,10 +5815,10 @@ ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR
eng "Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed"
ger "Konstante oder Random-Ausdrücke in (Unter-)Partitionsfunktionen sind nicht erlaubt"
swe "Konstanta uttryck eller slumpmässiga uttryck är inte tillåtna (sub)partitioneringsfunktioner"
-ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR
- eng "Expression in RANGE/LIST VALUES must be constant"
- ger "Ausdrücke in RANGE/LIST VALUES müssen konstant sein"
- swe "Uttryck i RANGE/LIST VALUES måste vara ett konstant uttryck"
+ER_NOT_CONSTANT_EXPRESSION
+ eng "Expression in %s must be constant"
+ ger "Ausdrücke in %s müssen konstant sein"
+ swe "Uttryck i %s måste vara ett konstant uttryck"
ER_FIELD_NOT_FOUND_PART_ERROR
eng "Field in list of fields for partition function not found in table"
ger "Felder in der Feldliste der Partitionierungsfunktion wurden in der Tabelle nicht gefunden"
@@ -6547,7 +6547,7 @@ ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000
ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%s'@'%s'"
ER_SET_PASSWORD_AUTH_PLUGIN
- eng "SET PASSWORD has no significance for users authenticating via plugins"
+ eng "SET PASSWORD is ignored for users authenticating via %s plugin"
ER_GRANT_PLUGIN_USER_EXISTS
eng "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists"
@@ -6919,6 +6919,7 @@ ER_NOT_VALID_PASSWORD
ER_MUST_CHANGE_PASSWORD
eng "You must SET PASSWORD before executing this statement"
bgn "ТрÑбва първо да Ñи Ñмените паролата ÑÑŠÑ SET PASSWORD за да можете да изпълните тази команда"
+ rum "Trebuie sa iti schimbi parola folosind SET PASSWORD inainte de a executa aceasta comanda"
ER_FK_NO_INDEX_CHILD
eng "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'"
@@ -7071,6 +7072,7 @@ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL
ER_MUST_CHANGE_PASSWORD_LOGIN
eng "Your password has expired. To log in you must change it using a client that supports expired passwords"
bgn "Паролата ви е изтекла. За да влезете Ñ‚Ñ€Ñбва да Ñ Ñмените използвайки клиент който поддрържа такива пароли"
+ rum "Parola ta a expirat. Pentru a te loga, trebuie sa o schimbi folosind un client ce suporta parole expirate"
ER_ROW_IN_WRONG_PARTITION
eng "Found a row in wrong partition %s"
@@ -7786,7 +7788,7 @@ ER_ARGUMENT_OUT_OF_RANGE
ER_WRONG_TYPE_OF_ARGUMENT
eng "%s function only accepts arguments that can be converted to numerical types"
ER_NOT_AGGREGATE_FUNCTION
- eng "Non-aggregate function contains aggregate specific instructions: (FETCH GROUP NEXT ROW)"
+ eng "Aggregate specific instruction (FETCH GROUP NEXT ROW) used in a wrong context"
ER_INVALID_AGGREGATE_FUNCTION
eng "Aggregate specific instruction(FETCH GROUP NEXT ROW) missing from the aggregate function"
ER_INVALID_VALUE_TO_LIMIT
@@ -7898,3 +7900,34 @@ ER_KEY_DOESNT_SUPPORT
eng "%s index %`s does not support this operation"
ER_ALTER_OPERATION_TABLE_OPTIONS_NEED_REBUILD
eng "Changing table options requires the table to be rebuilt"
+ER_BACKUP_LOCK_IS_ACTIVE
+ eng "Can't execute the command as you have a BACKUP STAGE active"
+ER_BACKUP_NOT_RUNNING
+ eng "You must start backup with \"BACKUP STAGE START\""
+ER_BACKUP_WRONG_STAGE
+ eng "Backup stage '%s' is same or before current backup stage '%s'"
+ER_BACKUP_STAGE_FAILED
+ eng "Backup stage '%s' failed"
+ER_BACKUP_UNKNOWN_STAGE
+ eng "Unknown backup stage: '%s'. Stage should be one of START, FLUSH, BLOCK_DDL, BLOCK_COMMIT or END"
+ER_USER_IS_BLOCKED
+ eng "User is blocked because of too many credential errors; unblock with 'FLUSH PRIVILEGES'"
+ER_ACCOUNT_HAS_BEEN_LOCKED
+ eng "Access denied, this account is locked"
+ rum "Acces refuzat, acest cont este blocat"
+ER_PERIOD_TEMPORARY_NOT_ALLOWED
+ eng "Application-time period table cannot be temporary"
+ER_PERIOD_TYPES_MISMATCH
+ eng "Fields of PERIOD FOR %`s have different types"
+ER_MORE_THAN_ONE_PERIOD
+ eng "Cannot specify more than one application-time period"
+ER_PERIOD_FIELD_WRONG_ATTRIBUTES
+ eng "Period field %`s cannot be %s"
+ER_PERIOD_NOT_FOUND
+ eng "Period %`s is not found in table"
+ER_PERIOD_COLUMNS_UPDATED
+ eng "Column %`s used in period %`s specified in update SET list"
+ER_PERIOD_CONSTRAINT_DROP
+ eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
+ER_TOO_LONG_KEYPART 42000 S1009
+ eng "Specified key part was too long; max key part length is %u bytes"
diff --git a/sql/slave.cc b/sql/slave.cc
index 604c1de29a7..1430c9fa153 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -53,6 +53,9 @@
// Create_file_log_event,
// Format_description_log_event
#include "wsrep_mysqld.h"
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h"
+#endif
#ifdef HAVE_REPLICATION
@@ -465,6 +468,8 @@ static struct slave_background_gtid_pos_create_t {
void *hton;
} *slave_background_gtid_pos_create_list;
+static volatile bool slave_background_gtid_pending_delete_flag;
+
pthread_handler_t
handle_slave_background(void *arg __attribute__((unused)))
@@ -477,7 +482,6 @@ handle_slave_background(void *arg __attribute__((unused)))
thd= new THD(next_thread_id());
thd->thread_stack= (char*) &thd; /* Set approximate stack start */
thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND;
- thread_safe_increment32(&service_thread_count);
thd->store_globals();
thd->security_ctx->skip_grants();
thd->set_command(COM_DAEMON);
@@ -499,22 +503,25 @@ handle_slave_background(void *arg __attribute__((unused)))
{
slave_background_kill_t *kill_list;
slave_background_gtid_pos_create_t *create_list;
+ bool pending_deletes;
thd->ENTER_COND(&COND_slave_background, &LOCK_slave_background,
&stage_slave_background_wait_request,
&old_stage);
for (;;)
{
- stop= abort_loop || thd->killed || slave_background_thread_stop;
+ stop= thd->killed || slave_background_thread_stop;
kill_list= slave_background_kill_list;
create_list= slave_background_gtid_pos_create_list;
- if (stop || kill_list || create_list)
+ pending_deletes= slave_background_gtid_pending_delete_flag;
+ if (stop || kill_list || create_list || pending_deletes)
break;
mysql_cond_wait(&COND_slave_background, &LOCK_slave_background);
}
slave_background_kill_list= NULL;
slave_background_gtid_pos_create_list= NULL;
+ slave_background_gtid_pending_delete_flag= false;
thd->EXIT_COND(&old_stage);
while (kill_list)
@@ -541,6 +548,17 @@ handle_slave_background(void *arg __attribute__((unused)))
create_list= next;
}
+ if (pending_deletes)
+ {
+ rpl_slave_state::list_element *list;
+
+ slave_background_gtid_pending_delete_flag= false;
+ list= rpl_global_gtid_slave_state->gtid_grab_pending_delete_list();
+ rpl_global_gtid_slave_state->gtid_delete_pending(thd, &list);
+ if (list)
+ rpl_global_gtid_slave_state->put_back_list(list);
+ }
+
mysql_mutex_lock(&LOCK_slave_background);
} while (!stop);
@@ -549,8 +567,6 @@ handle_slave_background(void *arg __attribute__((unused)))
mysql_mutex_unlock(&LOCK_slave_background);
delete thd;
- thread_safe_decrement32(&service_thread_count);
- signal_thd_deleted();
my_thread_end();
return 0;
@@ -615,6 +631,23 @@ slave_background_gtid_pos_create_request(
/*
+ Request the slave background thread to delete no longer used rows from the
+ mysql.gtid_slave_pos* tables.
+
+ This is called from time-critical rpl_slave_state::update(), so we avoid
+ taking any locks here. This means we may race with the background thread
+ to occasionally lose a signal. This is not a problem; any pending rows to
+ be deleted will just be deleted a bit later as part of the next batch.
+*/
+void
+slave_background_gtid_pending_delete_request(void)
+{
+ slave_background_gtid_pending_delete_flag= true;
+ mysql_cond_signal(&COND_slave_background);
+}
+
+
+/*
Start the slave background thread.
This thread is currently used for two purposes:
@@ -962,6 +995,8 @@ static void make_slave_transaction_retry_errors_printable(void)
}
+#define DEFAULT_SLAVE_RETRY_ERRORS 9
+
bool init_slave_transaction_retry_errors(const char* arg)
{
const char *p;
@@ -973,7 +1008,7 @@ bool init_slave_transaction_retry_errors(const char* arg)
if (!arg)
arg= "";
- slave_transaction_retry_error_length= 2;
+ slave_transaction_retry_error_length= DEFAULT_SLAVE_RETRY_ERRORS;
for (;my_isspace(system_charset_info,*arg);++arg)
/* empty */;
for (p= arg; *p; )
@@ -996,11 +1031,18 @@ bool init_slave_transaction_retry_errors(const char* arg)
currently, InnoDB deadlock detected by InnoDB or lock
wait timeout (innodb_lock_wait_timeout exceeded
*/
- slave_transaction_retry_errors[0]= ER_LOCK_DEADLOCK;
- slave_transaction_retry_errors[1]= ER_LOCK_WAIT_TIMEOUT;
+ slave_transaction_retry_errors[0]= ER_NET_READ_ERROR;
+ slave_transaction_retry_errors[1]= ER_NET_READ_INTERRUPTED;
+ slave_transaction_retry_errors[2]= ER_NET_ERROR_ON_WRITE;
+ slave_transaction_retry_errors[3]= ER_NET_WRITE_INTERRUPTED;
+ slave_transaction_retry_errors[4]= ER_LOCK_WAIT_TIMEOUT;
+ slave_transaction_retry_errors[5]= ER_LOCK_DEADLOCK;
+ slave_transaction_retry_errors[6]= ER_CONNECT_TO_FOREIGN_DATA_SOURCE;
+ slave_transaction_retry_errors[7]= 2013; /* CR_SERVER_LOST */
+ slave_transaction_retry_errors[8]= 12701; /* ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM */
/* Add user codes after this */
- for (p= arg, i= 2; *p; )
+ for (p= arg, i= DEFAULT_SLAVE_RETRY_ERRORS; *p; )
{
if (!(p= str2int(p, 10, 0, LONG_MAX, &err_code)))
break;
@@ -1168,6 +1210,11 @@ terminate_slave_thread(THD *thd,
int error __attribute__((unused));
DBUG_PRINT("loop", ("killing slave thread"));
+#ifdef WITH_WSREP
+ /* awake_no_mutex() requires LOCK_thd_data to be locked if wsrep
+ is enabled */
+ if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
+#endif /* WITH_WSREP */
mysql_mutex_lock(&thd->LOCK_thd_kill);
#ifndef DONT_USE_THR_ALARM
/*
@@ -1181,6 +1228,9 @@ terminate_slave_thread(THD *thd,
thd->awake_no_mutex(NOT_KILLED);
mysql_mutex_unlock(&thd->LOCK_thd_kill);
+#ifdef WITH_WSREP
+ if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data);
+#endif /* WITH_WSREP */
/*
There is a small chance that slave thread might miss the first
@@ -1433,7 +1483,7 @@ static bool io_slave_killed(Master_info* mi)
DBUG_ENTER("io_slave_killed");
DBUG_ASSERT(mi->slave_running); // tracking buffer overrun
- DBUG_RETURN(mi->abort_slave || abort_loop || mi->io_thd->killed);
+ DBUG_RETURN(mi->abort_slave || mi->io_thd->killed);
}
/**
@@ -1458,7 +1508,7 @@ static bool sql_slave_killed(rpl_group_info *rgi)
DBUG_ASSERT(rli->sql_driver_thd == thd);
DBUG_ASSERT(rli->slave_running == 1);// tracking buffer overrun
- if (abort_loop || rli->sql_driver_thd->killed || rli->abort_slave)
+ if (rli->sql_driver_thd->killed || rli->abort_slave)
{
/*
The transaction should always be binlogged if OPTION_KEEP_LOG is
@@ -3374,16 +3424,9 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
// Slave_SQL_Running_State
protocol->store(slave_sql_running_state, &my_charset_bin);
- uint64 events;
- events= (uint64)my_atomic_load64_explicit((volatile int64 *)
- &mi->total_ddl_groups, MY_MEMORY_ORDER_RELAXED);
- protocol->store(events);
- events= (uint64)my_atomic_load64_explicit((volatile int64 *)
- &mi->total_non_trans_groups, MY_MEMORY_ORDER_RELAXED);
- protocol->store(events);
- events= (uint64)my_atomic_load64_explicit((volatile int64 *)
- &mi->total_trans_groups, MY_MEMORY_ORDER_RELAXED);
- protocol->store(events);
+ protocol->store(mi->total_ddl_groups);
+ protocol->store(mi->total_non_trans_groups);
+ protocol->store(mi->total_trans_groups);
if (full)
{
@@ -3541,7 +3584,6 @@ static int init_slave_thread(THD* thd, Master_info *mi,
thd->system_thread = (thd_type == SLAVE_THD_SQL) ?
SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO;
- thread_safe_increment32(&service_thread_count);
/* We must call store_globals() before doing my_net_init() */
if (init_thr_lock() || thd->store_globals() ||
@@ -3911,14 +3953,20 @@ apply_event_and_update_pos_apply(Log_event* ev, THD* thd, rpl_group_info *rgi,
exec_res= ev->apply_event(rgi);
#ifdef WITH_WSREP
- if (exec_res && thd->wsrep_conflict_state != NO_CONFLICT)
+ if (WSREP_ON)
+ {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (exec_res &&
+ thd->wsrep_trx().state() != wsrep::transaction::s_executing)
{
- WSREP_DEBUG("SQL apply failed, res %d conflict state: %d",
- exec_res, thd->wsrep_conflict_state);
+ WSREP_DEBUG("SQL apply failed, res %d conflict state: %s",
+ exec_res, wsrep_thd_transaction_state_str(thd));
rli->abort_slave= 1;
rli->report(ERROR_LEVEL, ER_UNKNOWN_COM_ERROR, rgi->gtid_info(),
"Node has dropped from cluster");
}
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ }
#endif
#ifndef DBUG_OFF
@@ -4211,6 +4259,13 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
}
if (ev)
{
+#ifdef WITH_WSREP
+ if (wsrep_before_statement(thd))
+ {
+ WSREP_INFO("Wsrep before statement error");
+ DBUG_RETURN(1);
+ }
+#endif /* WITH_WSREP */
int exec_res;
Log_event_type typ= ev->get_type_code();
@@ -4242,9 +4297,9 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
rli->until_condition == Relay_log_info::UNTIL_RELAY_POS) &&
(ev->server_id != global_system_variables.server_id ||
rli->replicate_same_server_id) &&
- rli->is_until_satisfied((rli->get_flag(Relay_log_info::IN_TRANSACTION) || !ev->log_pos)
- ? rli->group_master_log_pos
- : ev->log_pos - ev->data_written))
+ rli->is_until_satisfied((rli->get_flag(Relay_log_info::IN_TRANSACTION) || !ev->log_pos)
+ ? rli->group_master_log_pos
+ : ev->log_pos - ev->data_written))
{
sql_print_information("Slave SQL thread stopped because it reached its"
" UNTIL position %llu", rli->until_pos());
@@ -4255,6 +4310,9 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
rli->abort_slave= 1;
rli->stop_for_until= true;
mysql_mutex_unlock(&rli->data_lock);
+#ifdef WITH_WSREP
+ wsrep_after_statement(thd);
+#endif /* WITH_WSREP */
delete ev;
DBUG_RETURN(1);
}
@@ -4292,7 +4350,12 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
if (res == 0)
rli->event_relay_log_pos= rli->future_event_relay_log_pos;
if (res >= 0)
+ {
+#ifdef WITH_WSREP
+ wsrep_after_statement(thd);
+#endif /* WITH_WSREP */
DBUG_RETURN(res);
+ }
/*
Else we proceed to execute the event non-parallel.
This is the case for pre-10.0 events without GTID, and for handling
@@ -4327,6 +4390,9 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
"aborted because of out-of-memory error");
mysql_mutex_unlock(&rli->data_lock);
delete ev;
+#ifdef WITH_WSREP
+ wsrep_after_statement(thd);
+#endif /* WITH_WSREP */
DBUG_RETURN(1);
}
@@ -4341,6 +4407,9 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
"thread aborted because of out-of-memory error");
mysql_mutex_unlock(&rli->data_lock);
delete ev;
+#ifdef WITH_WSREP
+ wsrep_after_statement(thd);
+#endif /* WITH_WSREP */
DBUG_RETURN(1);
}
/*
@@ -4369,13 +4438,17 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
retry.
*/
if (unlikely(exec_res == 2))
+ {
+#ifdef WITH_WSREP
+ wsrep_after_statement(thd);
+#endif /* WITH_WSREP */
DBUG_RETURN(1);
-
+ }
#ifdef WITH_WSREP
mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state == NO_CONFLICT)
- {
- mysql_mutex_unlock(&thd->LOCK_thd_data);
+ enum wsrep::client_error wsrep_error= thd->wsrep_cs().current_error();
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ if (wsrep_error == wsrep::e_success)
#endif /* WITH_WSREP */
if (slave_trans_retries)
{
@@ -4388,8 +4461,8 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
We were in a transaction which has been rolled back because of a
temporary error;
let's seek back to BEGIN log event and retry it all again.
- Note, if lock wait timeout (innodb_lock_wait_timeout exceeded)
- there is no rollback since 5.0.13 (ref: manual).
+ Note, if lock wait timeout (innodb_lock_wait_timeout exceeded)
+ there is no rollback since 5.0.13 (ref: manual).
We have to not only seek but also
a) init_master_info(), to seek back to hot relay log's start
@@ -4450,13 +4523,11 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli,
serial_rgi->trans_retries));
}
}
-#ifdef WITH_WSREP
- }
- else
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-#endif /* WITH_WSREP */
thread_safe_increment64(&rli->executed_entries);
+#ifdef WITH_WSREP
+ wsrep_after_statement(thd);
+#endif /* WITH_WSREP */
DBUG_RETURN(exec_res);
}
mysql_mutex_unlock(&rli->data_lock);
@@ -4626,7 +4697,7 @@ pthread_handler_t handle_slave_io(void *arg)
goto err_during_init;
}
thd->system_thread_info.rpl_io_info= &io_info;
- add_to_active_threads(thd);
+ server_threads.insert(thd);
mi->slave_running = MYSQL_SLAVE_RUN_NOT_CONNECT;
mi->abort_slave = 0;
mysql_mutex_unlock(&mi->run_lock);
@@ -5008,7 +5079,7 @@ err:
flush_master_info(mi, TRUE, TRUE);
THD_STAGE_INFO(thd, stage_waiting_for_slave_mutex_on_exit);
thd->add_status_to_global();
- unlink_not_visible_thd(thd);
+ server_threads.erase(thd);
mysql_mutex_lock(&mi->run_lock);
err_during_init:
@@ -5020,8 +5091,6 @@ err_during_init:
thd->assert_not_linked();
delete thd;
- thread_safe_decrement32(&service_thread_count);
- signal_thd_deleted();
mi->abort_slave= 0;
mi->slave_running= MYSQL_SLAVE_NOT_RUN;
@@ -5296,7 +5365,7 @@ pthread_handler_t handle_slave_sql(void *arg)
/* Ensure that slave can exeute any alter table it gets from master */
thd->variables.alter_algorithm= (ulong) Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT;
- add_to_active_threads(thd);
+ server_threads.insert(thd);
/*
We are going to set slave_running to 1. Assuming slave I/O thread is
alive and connected, this is going to make Seconds_Behind_Master be 0
@@ -5383,12 +5452,6 @@ pthread_handler_t handle_slave_sql(void *arg)
}
#endif
-#ifdef WITH_WSREP
- thd->wsrep_exec_mode= LOCAL_STATE;
- /* synchronize with wsrep replication */
- if (WSREP_ON)
- wsrep_ready_wait();
-#endif
DBUG_PRINT("master_info",("log_file_name: %s position: %llu",
rli->group_master_log_name,
rli->group_master_log_pos));
@@ -5485,7 +5548,14 @@ pthread_handler_t handle_slave_sql(void *arg)
goto err;
}
mysql_mutex_unlock(&rli->data_lock);
-
+#ifdef WITH_WSREP
+ wsrep_open(thd);
+ if (wsrep_before_command(thd))
+ {
+ WSREP_WARN("Slave SQL wsrep_before_command() failed");
+ goto err;
+ }
+#endif /* WITH_WSREP */
/* Read queries from the IO/THREAD until this thread is killed */
thd->set_command(COM_SLAVE_SQL);
@@ -5522,10 +5592,16 @@ pthread_handler_t handle_slave_sql(void *arg)
if (exec_relay_log_event(thd, rli, serial_rgi))
{
#ifdef WITH_WSREP
- if (thd->wsrep_conflict_state != NO_CONFLICT)
+ if (WSREP_ON)
{
- wsrep_node_dropped= TRUE;
- rli->abort_slave= TRUE;
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+
+ if (thd->wsrep_cs().current_error())
+ {
+ wsrep_node_dropped = TRUE;
+ rli->abort_slave = TRUE;
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
}
#endif /* WITH_WSREP */
@@ -5558,6 +5634,10 @@ pthread_handler_t handle_slave_sql(void *arg)
"log '%s' at position %llu%s", RPL_LOG_NAME,
rli->group_master_log_pos, tmp.c_ptr_safe());
}
+#ifdef WITH_WSREP
+ wsrep_after_command_before_result(thd);
+ wsrep_after_command_after_result(thd);
+#endif /* WITH_WSREP */
err_before_start:
@@ -5631,7 +5711,7 @@ pthread_handler_t handle_slave_sql(void *arg)
}
THD_STAGE_INFO(thd, stage_waiting_for_slave_mutex_on_exit);
thd->add_status_to_global();
- unlink_not_visible_thd(thd);
+ server_threads.erase(thd);
mysql_mutex_lock(&rli->run_lock);
err_during_init:
@@ -5676,17 +5756,17 @@ err_during_init:
"SQL slave will continue");
wsrep_node_dropped= FALSE;
mysql_mutex_unlock(&rli->run_lock);
- WSREP_DEBUG("wsrep_conflict_state now: %d", thd->wsrep_conflict_state);
- WSREP_INFO("slave restart: %d", thd->wsrep_conflict_state);
- thd->wsrep_conflict_state= NO_CONFLICT;
goto wsrep_restart_point;
- } else {
+ }
+ else
+ {
WSREP_INFO("Slave error due to node going non-primary");
WSREP_INFO("wsrep_restart_slave was set and therefore slave will be "
- "automatically restarted when node joins back to cluster.");
+ "automatically restarted when node joins back to cluster");
wsrep_restart_slave_activated= TRUE;
}
}
+ wsrep_close(thd);
#endif /* WITH_WSREP */
/*
@@ -5703,8 +5783,6 @@ err_during_init:
delete serial_rgi;
delete thd;
- thread_safe_decrement32(&service_thread_count);
- signal_thd_deleted();
DBUG_LEAVE; // Must match DBUG_ENTER()
my_thread_end();
@@ -7904,8 +7982,8 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
{
struct st_version_range_for_one_bug {
uint bug_id;
- const uchar introduced_in[3]; // first version with bug
- const uchar fixed_in[3]; // first version with fix
+ Version introduced_in; // first version with bug
+ Version fixed_in; // first version with fix
};
static struct st_version_range_for_one_bug versions_for_all_bugs[]=
{
@@ -7915,19 +7993,17 @@ bool rpl_master_has_bug(const Relay_log_info *rli, uint bug_id, bool report,
{33029, { 5, 1, 0 }, { 5, 1, 12 } },
{37426, { 5, 1, 0 }, { 5, 1, 26 } },
};
- const uchar *master_ver=
- rli->relay_log.description_event_for_exec->server_version_split.ver;
-
- DBUG_ASSERT(sizeof(rli->relay_log.description_event_for_exec->server_version_split.ver) == 3);
+ const Version &master_ver=
+ rli->relay_log.description_event_for_exec->server_version_split;
for (uint i= 0;
i < sizeof(versions_for_all_bugs)/sizeof(*versions_for_all_bugs);i++)
{
- const uchar *introduced_in= versions_for_all_bugs[i].introduced_in,
- *fixed_in= versions_for_all_bugs[i].fixed_in;
+ const Version &introduced_in= versions_for_all_bugs[i].introduced_in;
+ const Version &fixed_in= versions_for_all_bugs[i].fixed_in;
if ((versions_for_all_bugs[i].bug_id == bug_id) &&
- (memcmp(introduced_in, master_ver, 3) <= 0) &&
- (memcmp(fixed_in, master_ver, 3) > 0) &&
+ introduced_in <= master_ver &&
+ fixed_in > master_ver &&
(pred == NULL || (*pred)(param)))
{
if (!report)
diff --git a/sql/slave.h b/sql/slave.h
index 649d55b45b9..646fa178f81 100644
--- a/sql/slave.h
+++ b/sql/slave.h
@@ -276,8 +276,8 @@ bool net_request_file(NET* net, const char* fname);
void slave_background_kill_request(THD *to_kill);
void slave_background_gtid_pos_create_request
(rpl_slave_state::gtid_pos_table *table_entry);
+void slave_background_gtid_pending_delete_request(void);
-extern bool volatile abort_loop;
extern Master_info *active_mi; /* active_mi for multi-master */
extern Master_info *default_master_info; /* To replace active_mi */
extern Master_info_index *master_info_index;
diff --git a/sql/sp.cc b/sql/sp.cc
index af86737ebb9..6b38a0ddeb5 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -200,7 +200,8 @@ TABLE_FIELD_TYPE proc_table_fields[MYSQL_PROC_FIELD_COUNT] =
"'STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES',"
"'ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER',"
"'HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH',"
- "'EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT')") },
+ "'EMPTY_STRING_IS_NULL','SIMULTANEOUS_ASSIGNMENT',"
+ "'TIME_ROUND_FRACTIONAL')") },
{ NULL, 0 }
},
{
@@ -1468,7 +1469,7 @@ log:
log_query.ptr(), log_query.length(),
FALSE, FALSE, FALSE, 0))
{
- my_error(ER_ERROR_ON_WRITE, MYF(MY_WME), "binary log", -1);
+ my_error(ER_ERROR_ON_WRITE, MYF(0), "binary log", -1);
goto done;
}
thd->variables.sql_mode= 0;
@@ -1793,8 +1794,8 @@ bool lock_db_routines(THD *thd, const char *db)
close_system_tables(thd, &open_tables_state_backup);
/* We should already hold a global IX lock and a schema X lock. */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "",
- MDL_INTENTION_EXCLUSIVE) &&
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::BACKUP, "", "",
+ MDL_BACKUP_DDL) &&
thd->mdl_context.is_lock_owner(MDL_key::SCHEMA, db, "",
MDL_EXCLUSIVE));
DBUG_RETURN(thd->mdl_context.acquire_locks(&mdl_requests,
diff --git a/sql/sp.h b/sql/sp.h
index 380dd69d3a1..a72d5b78262 100644
--- a/sql/sp.h
+++ b/sql/sp.h
@@ -522,12 +522,11 @@ inline const Sp_handler *Sp_handler::handler(MDL_key::enum_mdl_namespace type)
return &sp_handler_procedure;
case MDL_key::PACKAGE_BODY:
return &sp_handler_package_body;
- case MDL_key::GLOBAL:
+ case MDL_key::BACKUP:
case MDL_key::SCHEMA:
case MDL_key::TABLE:
case MDL_key::TRIGGER:
case MDL_key::EVENT:
- case MDL_key::COMMIT:
case MDL_key::USER_LOCK:
case MDL_key::NAMESPACE_END:
break;
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 924fd04007e..f996c057908 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -29,6 +29,8 @@
#include "sql_derived.h" // mysql_handle_derived
#include "sql_cte.h"
#include "sql_select.h" // Virtual_tmp_table
+#include "opt_trace.h"
+#include "my_json_writer.h"
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation
@@ -44,6 +46,9 @@
#include "transaction.h" // trans_commit_stmt
#include "sql_audit.h"
#include "debug_sync.h"
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h"
+#endif /* WITH_WSREP */
/*
Sufficient max length of printed destinations and frame offsets (all uints).
@@ -71,33 +76,6 @@ static void reset_start_time_for_sp(THD *thd)
}
-Item::Type
-sp_map_item_type(const Type_handler *handler)
-{
- if (handler == &type_handler_row)
- return Item::ROW_ITEM;
- enum_field_types type= real_type_to_type(handler->real_field_type());
-
- switch (type) {
- case MYSQL_TYPE_BIT:
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- return Item::INT_ITEM;
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_NEWDECIMAL:
- return Item::DECIMAL_ITEM;
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- return Item::REAL_ITEM;
- default:
- return Item::STRING_ITEM;
- }
-}
-
-
bool Item_splocal::append_for_log(THD *thd, String *str)
{
if (fix_fields_if_needed(thd, NULL))
@@ -319,7 +297,7 @@ sp_get_flags_for_command(LEX *lex)
- EXPLAIN DELETE ...
- ANALYZE DELETE ...
*/
- if (lex->select_lex.item_list.is_empty() &&
+ if (lex->first_select_lex()->item_list.is_empty() &&
!lex->describe && !lex->analyze_stmt)
flags= 0;
else
@@ -512,7 +490,8 @@ sp_head::operator delete(void *ptr, size_t size) throw()
}
-sp_head::sp_head(sp_package *parent, const Sp_handler *sph)
+sp_head::sp_head(sp_package *parent, const Sp_handler *sph,
+ enum_sp_aggregate_type agg_type)
:Query_arena(&main_mem_root, STMT_INITIALIZED_FOR_SP),
Database_qualified_name(&null_clex_str, &null_clex_str),
m_parent(parent),
@@ -545,6 +524,7 @@ sp_head::sp_head(sp_package *parent, const Sp_handler *sph)
m_pcont(new (&main_mem_root) sp_pcontext()),
m_cont_level(0)
{
+ set_chistics_agg_type(agg_type);
m_first_instance= this;
m_first_free_instance= this;
m_last_cached_sp= this;
@@ -562,6 +542,7 @@ sp_head::sp_head(sp_package *parent, const Sp_handler *sph)
my_hash_init(&m_sptabs, system_charset_info, 0, 0, 0, sp_table_key, 0, 0);
my_hash_init(&m_sroutines, system_charset_info, 0, 0, 0, sp_sroutine_key,
0, 0);
+ m_security_ctx.init();
DBUG_VOID_RETURN;
}
@@ -570,7 +551,7 @@ sp_head::sp_head(sp_package *parent, const Sp_handler *sph)
sp_package::sp_package(LEX *top_level_lex,
const sp_name *name,
const Sp_handler *sph)
- :sp_head(NULL, sph),
+ :sp_head(NULL, sph, DEFAULT_AGGREGATE),
m_current_routine(NULL),
m_top_level_lex(top_level_lex),
m_rcontext(NULL),
@@ -1172,6 +1153,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
if (check_stack_overrun(thd, 7 * STACK_MIN_SIZE, (uchar*)&old_packet))
DBUG_RETURN(TRUE);
+ opt_trace_disable_if_no_security_context_access(thd);
+
/* init per-instruction memroot */
init_sql_alloc(&execute_mem_root, "per_instruction_memroot",
MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
@@ -1353,6 +1336,13 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
sql_digest_state *parent_digest= thd->m_digest;
thd->m_digest= NULL;
+#ifdef WITH_WSREP
+ if (WSREP(thd) && thd->wsrep_next_trx_id() == WSREP_UNDEFINED_TRX_ID)
+ {
+ thd->set_wsrep_next_trx_id(thd->query_id);
+ WSREP_DEBUG("assigned new next trx ID for SP, trx id: %" PRIu64, thd->wsrep_next_trx_id());
+ }
+#endif /* WITH_WSREP */
err_status= i->execute(thd, &ip);
thd->m_digest= parent_digest;
@@ -1924,7 +1914,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
for (arg_no= 0; arg_no < argcount; arg_no++)
{
/* Arguments must be fixed in Item_func_sp::fix_fields */
- DBUG_ASSERT(argp[arg_no]->fixed);
+ DBUG_ASSERT(argp[arg_no]->is_fixed());
if ((err_status= (*func_ctx)->set_parameter(thd, arg_no, &(argp[arg_no]))))
goto err_with_cleanup;
@@ -2001,6 +1991,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
thd->variables.option_bits&= ~OPTION_BIN_LOG;
}
+ opt_trace_disable_if_no_stored_proc_func_access(thd, this);
/*
Switch to call arena/mem_root so objects like sp_cursor or
Item_cache holders for case expressions can be allocated on it.
@@ -2291,9 +2282,11 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
err_status= set_routine_security_ctx(thd, this, &save_security_ctx);
#endif
+ opt_trace_disable_if_no_stored_proc_func_access(thd, this);
if (!err_status)
{
err_status= execute(thd, TRUE);
+ DBUG_PRINT("info", ("execute returned %d", (int) err_status));
}
if (save_log_general)
@@ -2692,6 +2685,17 @@ sp_head::set_chistics(const st_sp_chistics &chistics)
m_chistics.comment.length);
}
+
+void
+sp_head::set_c_chistics(const st_sp_chistics &chistics)
+{
+ // Set all chistics but preserve agg_type.
+ enum_sp_aggregate_type save_agg_type= agg_type();
+ set_chistics(chistics);
+ set_chistics_agg_type(save_agg_type);
+}
+
+
void
sp_head::set_info(longlong created, longlong modified,
const st_sp_chistics &chistics, sql_mode_t sql_mode)
@@ -3315,6 +3319,13 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
thd->lex->safe_to_cache_query= 0;
#endif
+ Opt_trace_start ots(thd, m_lex->query_tables,
+ SQLCOM_SELECT, &m_lex->var_list,
+ NULL, 0,
+ thd->variables.character_set_client);
+
+ Json_writer_object trace_command(thd);
+ Json_writer_array trace_command_steps(thd, "steps");
if (open_tables)
res= check_dependencies_in_with_clauses(m_lex->with_clauses_list) ||
instr->exec_open_and_lock_tables(thd, m_lex->query_tables);
@@ -3594,6 +3605,49 @@ sp_instr_stmt::exec_core(THD *thd, uint *nextp)
(char *)thd->security_ctx->host_or_ip,
3);
int res= mysql_execute_command(thd);
+#ifdef WITH_WSREP
+ if (WSREP(thd))
+ {
+ if ((thd->is_fatal_error || thd->killed_errno()) &&
+ (thd->wsrep_trx().state() == wsrep::transaction::s_executing))
+ {
+ /*
+ SP was killed, and it is not due to a wsrep conflict.
+ We skip after_statement hook at this point because
+ otherwise it clears the error, and cleans up the
+ whole transaction. For now we just return and finish
+ our handling once we are back to mysql_parse.
+ */
+ WSREP_DEBUG("Skipping after_command hook for killed SP");
+ }
+ else
+ {
+ const bool must_replay= wsrep_must_replay(thd);
+ (void) wsrep_after_statement(thd);
+ /*
+ Reset the return code to zero if the transaction was
+ replayed succesfully.
+ */
+ if (res && must_replay && !wsrep_current_error(thd))
+ res= 0;
+ /*
+ Final wsrep error status for statement is known only after
+ wsrep_after_statement() call. If the error is set, override
+ error in thd diagnostics area and reset wsrep client_state error
+ so that the error does not get propagated via client-server protocol.
+ */
+ if (wsrep_current_error(thd))
+ {
+ wsrep_override_error(thd, wsrep_current_error(thd),
+ wsrep_current_error_status(thd));
+ thd->wsrep_cs().reset_error();
+ /* Reset also thd->killed if it has been set during BF abort. */
+ if (thd->killed == KILL_QUERY)
+ thd->reset_killed();
+ }
+ }
+ }
+#endif /* WITH_WSREP */
MYSQL_QUERY_EXEC_DONE(res);
*nextp= m_ip+1;
return res;
@@ -4531,8 +4585,8 @@ int
sp_instr_error::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_error::execute");
-
my_message(m_errcode, ER_THD(thd, m_errcode), MYF(0));
+ WSREP_DEBUG("sp_instr_error: %s %d", ER_THD(thd, m_errcode), thd->is_error());
*nextp= m_ip+1;
DBUG_RETURN(-1);
}
@@ -4580,7 +4634,7 @@ sp_instr_set_case_expr::exec_core(THD *thd, uint *nextp)
thd->spcont->set_case_expr(thd, m_case_expr_id, &null_item))
{
/* If this also failed, we have to abort. */
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
}
}
else
@@ -5120,6 +5174,36 @@ bool sp_head::spvar_fill_table_rowtype_reference(THD *thd,
}
+bool sp_head::check_group_aggregate_instructions_forbid() const
+{
+ if (unlikely(m_flags & sp_head::HAS_AGGREGATE_INSTR))
+ {
+ my_error(ER_NOT_AGGREGATE_FUNCTION, MYF(0));
+ return true;
+ }
+ return false;
+}
+
+
+bool sp_head::check_group_aggregate_instructions_require() const
+{
+ if (unlikely(!(m_flags & HAS_AGGREGATE_INSTR)))
+ {
+ my_error(ER_INVALID_AGGREGATE_FUNCTION, MYF(0));
+ return true;
+ }
+ return false;
+}
+
+
+bool sp_head::check_group_aggregate_instructions_function() const
+{
+ return agg_type() == GROUP_AGGREGATE ?
+ check_group_aggregate_instructions_require() :
+ check_group_aggregate_instructions_forbid();
+}
+
+
/*
In Oracle mode stored routines have an optional name
at the end of a declaration:
@@ -5154,6 +5238,19 @@ err:
}
+bool
+sp_head::check_standalone_routine_end_name(const sp_name *end_name) const
+{
+ if (end_name && !end_name->eq(this))
+ {
+ my_error(ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0),
+ ErrConvDQName(end_name).ptr(), ErrConvDQName(this).ptr());
+ return true;
+ }
+ return false;
+}
+
+
ulong sp_head::sp_cache_version() const
{
return m_parent ? m_parent->sp_cache_version() : m_sp_cache_version;
diff --git a/sql/sp_head.h b/sql/sp_head.h
index cf934603cf0..3365bf4883f 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -39,9 +39,6 @@
@{
*/
-Item::Type
-sp_map_item_type(const Type_handler *handler);
-
uint
sp_get_flags_for_command(LEX *lex);
@@ -186,6 +183,11 @@ private:
set_chistics() makes sure this.
*/
Sp_chistics m_chistics;
+ void set_chistics(const st_sp_chistics &chistics);
+ inline void set_chistics_agg_type(enum enum_sp_aggregate_type type)
+ {
+ m_chistics.agg_type= type;
+ }
public:
sql_mode_t m_sql_mode; ///< For SHOW CREATE and execution
bool m_explicit_name; /**< Prepend the db name? */
@@ -322,7 +324,8 @@ public:
static void
operator delete(void *ptr, size_t size) throw ();
- sp_head(sp_package *parent, const Sp_handler *handler);
+ sp_head(sp_package *parent, const Sp_handler *handler,
+ enum_sp_aggregate_type);
/// Initialize after we have reset mem_root
void
@@ -415,6 +418,10 @@ public:
const LEX_CSTRING *field_name,
Item *val, LEX *lex);
bool check_package_routine_end_name(const LEX_CSTRING &end_name) const;
+ bool check_standalone_routine_end_name(const sp_name *end_name) const;
+ bool check_group_aggregate_instructions_function() const;
+ bool check_group_aggregate_instructions_forbid() const;
+ bool check_group_aggregate_instructions_require() const;
private:
/**
Generate a code to set a single cursor parameter variable.
@@ -592,7 +599,8 @@ public:
if (!oldlex)
DBUG_RETURN(false); // Nothing to restore
LEX *sublex= thd->lex;
- if (thd->restore_from_local_lex_to_old_lex(oldlex))// This restores thd->lex
+ // This restores thd->lex and thd->stmt_lex
+ if (thd->restore_from_local_lex_to_old_lex(oldlex))
DBUG_RETURN(true);
if (!sublex->sp_lex_in_use)
{
@@ -731,11 +739,7 @@ public:
const LEX_CSTRING &db,
const LEX_CSTRING &table);
- void set_chistics(const st_sp_chistics &chistics);
- inline void set_chistics_agg_type(enum enum_sp_aggregate_type type)
- {
- m_chistics.agg_type= type;
- }
+ void set_c_chistics(const st_sp_chistics &chistics);
void set_info(longlong created, longlong modified,
const st_sp_chistics &chistics, sql_mode_t sql_mode);
@@ -2026,6 +2030,7 @@ private:
}; // class sp_instr_set_case_expr : public sp_instr_opt_meta
+bool check_show_routine_access(THD *thd, sp_head *sp, bool *full_access);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
bool
diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h
index e607315cdaf..7b38177236a 100644
--- a/sql/sp_pcontext.h
+++ b/sql/sp_pcontext.h
@@ -22,7 +22,6 @@
#endif
#include "sql_string.h" // LEX_STRING
-#include "mysql_com.h" // enum_field_types
#include "field.h" // Create_field
#include "sql_array.h" // Dynamic_array
diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc
index 3e77d8c357b..e71a529bc07 100644
--- a/sql/sp_rcontext.cc
+++ b/sql/sp_rcontext.cc
@@ -228,9 +228,10 @@ bool Qualified_column_ident::resolve_type_ref(THD *thd, Column_definition *def)
// Make %TYPE variables see temporary tables that shadow permanent tables
thd->temporary_tables= open_tables_state_backup.temporary_tables;
- if ((table_list= lex.select_lex.add_table_to_list(thd, this, NULL, 0,
- TL_READ_NO_INSERT,
- MDL_SHARED_READ)) &&
+ if ((table_list=
+ lex.first_select_lex()->add_table_to_list(thd, this, NULL, 0,
+ TL_READ_NO_INSERT,
+ MDL_SHARED_READ)) &&
!check_table_access(thd, SELECT_ACL, table_list, TRUE, UINT_MAX, FALSE) &&
!open_tables_only_view_structure(thd, table_list,
thd->mdl_context.has_locks()))
@@ -286,9 +287,10 @@ bool Table_ident::resolve_table_rowtype_ref(THD *thd,
// Make %ROWTYPE variables see temporary tables that shadow permanent tables
thd->temporary_tables= open_tables_state_backup.temporary_tables;
- if ((table_list= lex.select_lex.add_table_to_list(thd, this, NULL, 0,
- TL_READ_NO_INSERT,
- MDL_SHARED_READ)) &&
+ if ((table_list=
+ lex.first_select_lex()->add_table_to_list(thd, this, NULL, 0,
+ TL_READ_NO_INSERT,
+ MDL_SHARED_READ)) &&
!check_table_access(thd, SELECT_ACL, table_list, TRUE, UINT_MAX, FALSE) &&
!open_tables_only_view_structure(thd, table_list,
thd->mdl_context.has_locks()))
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index ed200bba763..87cfb2b95bb 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2009, 2018, MariaDB
+ Copyright (c) 2009, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -57,7 +57,10 @@
#include "sql_plugin_compat.h"
+#define MAX_SCRAMBLE_LENGTH 1024
+
bool mysql_user_table_is_in_short_password_format= false;
+bool using_global_priv_table= true;
static LEX_CSTRING native_password_plugin_name= {
STRING_WITH_LEN("mysql_native_password")
@@ -85,11 +88,19 @@ LEX_CSTRING current_role= { STRING_WITH_LEN("*current_role") };
LEX_CSTRING current_user_and_current_role= { STRING_WITH_LEN("*current_user_and_current_role") };
-#ifndef NO_EMBEDDED_ACCESS_CHECKS
static plugin_ref old_password_plugin;
-#endif
static plugin_ref native_password_plugin;
+static plugin_ref get_auth_plugin(THD *thd, const LEX_CSTRING &name, bool *locked)
+{
+ if (name.str == native_password_plugin_name.str)
+ return native_password_plugin;
+ else if (name.str == old_password_plugin_name.str)
+ return old_password_plugin;
+ *locked=true;
+ return my_plugin_lock_by_name(thd, &name, MYSQL_AUTHENTICATION_PLUGIN);
+}
+
/* Classes */
struct acl_host_and_ip
@@ -108,6 +119,9 @@ class ACL_ACCESS {
public:
ulong sort;
ulong access;
+ ACL_ACCESS()
+ :sort(0), access(0)
+ { }
};
/* ACL_HOST is used if no host is specified */
@@ -119,53 +133,88 @@ public:
char *db;
};
-class ACL_USER_BASE :public ACL_ACCESS
+class ACL_USER_BASE :public ACL_ACCESS, public Sql_alloc
{
public:
- static void *operator new(size_t size, MEM_ROOT *mem_root)
- { return (void*) alloc_root(mem_root, size); }
- static void operator delete(void *, MEM_ROOT *){}
+ ACL_USER_BASE()
+ :flags(0), user(null_clex_str)
+ {
+ bzero(&role_grants, sizeof(role_grants));
+ }
uchar flags; // field used to store various state information
LEX_CSTRING user;
/* list to hold references to granted roles (ACL_ROLE instances) */
DYNAMIC_ARRAY role_grants;
+ const char *get_username() { return user.str; }
};
-class ACL_USER :public ACL_USER_BASE
+class ACL_USER_PARAM
{
public:
+ ACL_USER_PARAM()
+ {
+ bzero(this, sizeof(*this));
+ }
acl_host_and_ip host;
size_t hostname_length;
USER_RESOURCES user_resource;
- uint8 salt[SCRAMBLE_LENGTH + 1]; // scrambled password in binary form
- uint8 salt_len; // 0 - no password, 4 - 3.20, 8 - 4.0, 20 - 4.1.1
enum SSL_type ssl_type;
+ uint password_errors;
const char *ssl_cipher, *x509_issuer, *x509_subject;
- LEX_CSTRING plugin;
- LEX_CSTRING auth_string;
LEX_CSTRING default_rolename;
+ struct AUTH { LEX_CSTRING plugin, auth_string, salt; } *auth;
+ uint nauth;
+ bool account_locked;
+ bool password_expired;
+ my_time_t password_last_changed;
+ longlong password_lifetime;
+
+ bool alloc_auth(MEM_ROOT *root, uint n)
+ {
+ return !(auth= (AUTH*) alloc_root(root, (nauth= n)*sizeof(AUTH)));
+ }
+};
+
+
+class ACL_USER :public ACL_USER_BASE,
+ public ACL_USER_PARAM
+{
+public:
+
+ ACL_USER() { }
+ ACL_USER(THD *thd, const LEX_USER &combo,
+ const Account_options &options,
+ const ulong privileges);
ACL_USER *copy(MEM_ROOT *root)
{
- ACL_USER *dst= (ACL_USER *) alloc_root(root, sizeof(ACL_USER));
- if (!dst)
+ ACL_USER *dst;
+ AUTH *dauth;
+ if (!multi_alloc_root(root, &dst, sizeof(ACL_USER),
+ &dauth, sizeof(AUTH)*nauth, NULL))
return 0;
*dst= *this;
- dst->user.str= safe_strdup_root(root, user.str);
- dst->user.length= user.length;
+ dst->user= safe_lexcstrdup_root(root, user);
dst->ssl_cipher= safe_strdup_root(root, ssl_cipher);
dst->x509_issuer= safe_strdup_root(root, x509_issuer);
dst->x509_subject= safe_strdup_root(root, x509_subject);
- if (plugin.str == native_password_plugin_name.str ||
- plugin.str == old_password_plugin_name.str)
- dst->plugin= plugin;
- else
- dst->plugin.str= strmake_root(root, plugin.str, plugin.length);
- dst->auth_string.str= safe_strdup_root(root, auth_string.str);
+ dst->auth= dauth;
+ for (uint i=0; i < nauth; i++, dauth++)
+ {
+ if (auth[i].plugin.str == native_password_plugin_name.str ||
+ auth[i].plugin.str == old_password_plugin_name.str)
+ dauth->plugin= auth[i].plugin;
+ else
+ dauth->plugin= safe_lexcstrdup_root(root, auth[i].plugin);
+ dauth->auth_string= safe_lexcstrdup_root(root, auth[i].auth_string);
+ if (auth[i].salt.length == 0)
+ dauth->salt= auth[i].salt;
+ else
+ dauth->salt= safe_lexcstrdup_root(root, auth[i].salt);
+ }
dst->host.hostname= safe_strdup_root(root, host.hostname);
- dst->default_rolename.str= safe_strdup_root(root, default_rolename.str);
- dst->default_rolename.length= default_rolename.length;
+ dst->default_rolename= safe_lexcstrdup_root(root, default_rolename);
bzero(&dst->role_grants, sizeof(role_grants));
return dst;
}
@@ -174,7 +223,7 @@ public:
{
CHARSET_INFO *cs= system_charset_info;
int res;
- res= strcmp(safe_str(user.str), safe_str(user2));
+ res= strcmp(user.str, user2);
if (!res)
res= my_strcasecmp(cs, host.hostname, host2);
return res;
@@ -184,7 +233,7 @@ public:
bool wild_eq(const char *user2, const char *host2, const char *ip2)
{
- if (strcmp(safe_str(user.str), safe_str(user2)))
+ if (strcmp(user.str, user2))
return false;
return compare_hostname(&host, host2, ip2 ? ip2 : host2);
@@ -223,6 +272,8 @@ public:
acl_host_and_ip host;
const char *user,*db;
ulong initial_access; /* access bits present in the table */
+
+ const char *get_username() { return user; }
};
#ifndef DBUG_OFF
@@ -232,7 +283,6 @@ ulong role_global_merges= 0, role_db_merges= 0, role_table_merges= 0,
#endif
#ifndef NO_EMBEDDED_ACCESS_CHECKS
-static bool fix_and_copy_user(LEX_USER *to, LEX_USER *from, THD *thd);
static void update_hostname(acl_host_and_ip *host, const char *hostname);
static ulong get_sort(uint count,...);
static bool show_proxy_grants (THD *, const char *, const char *,
@@ -275,10 +325,9 @@ public:
const char *proxied_host_arg, const char *proxied_user_arg,
bool with_grant_arg)
{
- user= (user_arg && *user_arg) ? user_arg : NULL;
+ user= user_arg;
update_hostname (&host, (host_arg && *host_arg) ? host_arg : NULL);
- proxied_user= (proxied_user_arg && *proxied_user_arg) ?
- proxied_user_arg : NULL;
+ proxied_user= proxied_user_arg;
update_hostname (&proxied_host,
(proxied_host_arg && *proxied_host_arg) ?
proxied_host_arg : NULL);
@@ -291,11 +340,10 @@ public:
bool with_grant_arg)
{
init ((host_arg && *host_arg) ? strdup_root (mem, host_arg) : NULL,
- (user_arg && *user_arg) ? strdup_root (mem, user_arg) : NULL,
+ strdup_root (mem, user_arg),
(proxied_host_arg && *proxied_host_arg) ?
strdup_root (mem, proxied_host_arg) : NULL,
- (proxied_user_arg && *proxied_user_arg) ?
- strdup_root (mem, proxied_user_arg) : NULL,
+ strdup_root (mem, proxied_user_arg),
with_grant_arg);
}
@@ -308,7 +356,7 @@ public:
const char *get_proxied_host() { return proxied_host.hostname; }
void set_user(MEM_ROOT *mem, const char *user_arg)
{
- user= user_arg && *user_arg ? strdup_root(mem, user_arg) : NULL;
+ user= *user_arg ? strdup_root(mem, user_arg) : "";
}
void set_host(MEM_ROOT *mem, const char *host_arg)
{
@@ -323,9 +371,8 @@ public:
{
sql_print_warning("'proxies_priv' entry '%s@%s %s@%s' "
"ignored in --skip-name-resolve mode.",
- safe_str(proxied_user),
- safe_str(proxied_host.hostname),
- safe_str(user),
+ proxied_user,
+ safe_str(proxied_host.hostname), user,
safe_str(host.hostname));
return TRUE;
}
@@ -345,11 +392,10 @@ public:
proxied_user_arg, proxied_user));
DBUG_RETURN(compare_hostname(&host, host_arg, ip_arg) &&
compare_hostname(&proxied_host, host_arg, ip_arg) &&
- (!user ||
+ (!*user ||
(user_arg && !wild_compare(user_arg, user, TRUE))) &&
- (!proxied_user ||
- (proxied_user && !wild_compare(proxied_user_arg,
- proxied_user, TRUE))));
+ (!*proxied_user ||
+ !wild_compare(proxied_user_arg, proxied_user, TRUE)));
}
@@ -381,8 +427,7 @@ public:
bool granted_on(const char *host_arg, const char *user_arg)
{
- return (((!user && (!user_arg || !user_arg[0])) ||
- (user && user_arg && !strcmp(user, user_arg))) &&
+ return (!strcmp(user, user_arg) &&
((!host.hostname && (!host_arg || !host_arg[0])) ||
(host.hostname && host_arg && !strcmp(host.hostname, host_arg))));
}
@@ -391,17 +436,15 @@ public:
void print_grant(String *str)
{
str->append(STRING_WITH_LEN("GRANT PROXY ON '"));
- if (proxied_user)
- str->append(proxied_user, strlen(proxied_user));
+ str->append(proxied_user);
str->append(STRING_WITH_LEN("'@'"));
if (proxied_host.hostname)
str->append(proxied_host.hostname, strlen(proxied_host.hostname));
str->append(STRING_WITH_LEN("' TO '"));
- if (user)
- str->append(user, strlen(user));
+ str->append(user);
str->append(STRING_WITH_LEN("'@'"));
if (host.hostname)
- str->append(host.hostname, strlen(host.hostname));
+ str->append(host.hostname);
str->append(STRING_WITH_LEN("'"));
if (with_grant)
str->append(STRING_WITH_LEN(" WITH GRANT OPTION"));
@@ -614,8 +657,11 @@ static DYNAMIC_ARRAY acl_wild_hosts;
static Hash_filo<acl_entry> *acl_cache;
static uint grant_version=0; /* Version of priv tables. incremented by acl_load */
static ulong get_access(TABLE *form,uint fieldnr, uint *next_field=0);
-static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b);
-static ulong get_sort(uint count,...);
+static int acl_compare(const ACL_ACCESS *a, const ACL_ACCESS *b);
+static int acl_user_compare(const ACL_USER *a, const ACL_USER *b);
+static void rebuild_acl_users();
+static int acl_db_compare(const ACL_DB *a, const ACL_DB *b);
+static void rebuild_acl_dbs();
static void init_check_host(void);
static void rebuild_check_host(void);
static void rebuild_role_grants(void);
@@ -624,8 +670,7 @@ static ACL_USER *find_user_wild(const char *host, const char *user, const char *
static ACL_ROLE *find_acl_role(const char *user);
static ROLE_GRANT_PAIR *find_role_grant_pair(const LEX_CSTRING *u, const LEX_CSTRING *h, const LEX_CSTRING *r);
static ACL_USER_BASE *find_acl_user_base(const char *user, const char *host);
-static bool update_user_table(THD *, const User_table &, const char *, const char *, const
- char *, size_t new_password_len);
+static bool update_user_table_password(THD *, const User_table&, const ACL_USER&);
static bool acl_load(THD *thd, const Grant_tables& grant_tables);
static inline void get_grantor(THD *thd, char* grantor);
static bool add_role_user_mapping(const char *uname, const char *hname, const char *rname);
@@ -670,7 +715,6 @@ HASH *Sp_handler_package_body::get_priv_hash() const
*/
enum enum_acl_tables
{
- USER_TABLE,
DB_TABLE,
TABLES_PRIV_TABLE,
COLUMNS_PRIV_TABLE,
@@ -679,7 +723,7 @@ enum enum_acl_tables
PROCS_PRIV_TABLE,
PROXIES_PRIV_TABLE,
ROLES_MAPPING_TABLE,
- TABLES_MAX // <== always the last
+ USER_TABLE // <== always the last
};
static const int Table_user= 1 << USER_TABLE;
@@ -691,6 +735,31 @@ static const int Table_procs_priv= 1 << PROCS_PRIV_TABLE;
static const int Table_proxies_priv= 1 << PROXIES_PRIV_TABLE;
static const int Table_roles_mapping= 1 << ROLES_MAPPING_TABLE;
+static LEX_CSTRING MYSQL_TABLE_NAME[USER_TABLE+1]= {
+ {STRING_WITH_LEN("db")},
+ {STRING_WITH_LEN("tables_priv")},
+ {STRING_WITH_LEN("columns_priv")},
+ {STRING_WITH_LEN("host")},
+ {STRING_WITH_LEN("procs_priv")},
+ {STRING_WITH_LEN("proxies_priv")},
+ {STRING_WITH_LEN("roles_mapping")},
+ {STRING_WITH_LEN("global_priv")}
+};
+static LEX_CSTRING MYSQL_TABLE_NAME_USER={STRING_WITH_LEN("user")};
+
+/**
+ Choose from either native or old password plugins when assigning a password
+*/
+
+static LEX_CSTRING &guess_auth_plugin(THD *thd, size_t password_len)
+{
+ if (thd->variables.old_passwords == 1 ||
+ password_len == SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ return old_password_plugin_name;
+ else
+ return native_password_plugin_name;
+}
+
/**
Base class representing a generic grant table from the mysql database.
@@ -705,105 +774,43 @@ class Grant_table_base
{
public:
/* Number of fields for this Grant Table. */
- uint num_fields() const { return tl.table->s->fields; }
+ uint num_fields() const { return m_table->s->fields; }
/* Check if the table exists after an attempt to open it was made.
Some tables, such as the host table in MySQL 5.6.7+ are missing. */
- bool table_exists() const { return tl.table; };
+ bool table_exists() const { return m_table; };
/* Initializes the READ_RECORD structure provided as a parameter
to read through the whole table, with all columns available. Cleaning up
is the caller's job. */
- bool init_read_record(READ_RECORD* info, THD* thd) const
+ bool init_read_record(READ_RECORD* info) const
{
- DBUG_ASSERT(tl.table);
- bool result= ::init_read_record(info, thd, tl.table, NULL, NULL, 1,
- true, false);
+ DBUG_ASSERT(m_table);
+ bool result= ::init_read_record(info, m_table->in_use, m_table,
+ NULL, NULL, 1, true, false);
if (!result)
- tl.table->use_all_columns();
+ m_table->use_all_columns();
return result;
}
- /* Return the number of privilege columns for this table. */
- uint num_privileges() const { return num_privilege_cols; }
- /* Return a privilege column by index. */
- Field* priv_field(uint privilege_idx) const
- {
- DBUG_ASSERT(privilege_idx < num_privileges());
- return tl.table->field[start_privilege_column + privilege_idx];
- }
-
- /* Fetch the privileges from the table as a set of bits. The first column
- is represented by the first bit in the result, the second column by the
- second bit, etc. */
- ulong get_access() const
- {
- return get_access(start_privilege_column,
- start_privilege_column + num_privileges() - 1);
- }
-
/* Return the underlying TABLE handle. */
- TABLE* table() const
- {
- return tl.table;
- }
-
- /** Check if the table was opened, issue an error otherwise. */
- int no_such_table() const
- {
- if (table_exists())
- return 0;
-
- my_error(ER_NO_SUCH_TABLE, MYF(0), tl.db.str, tl.alias.str);
- return 1;
- }
-
-
- protected:
- friend class Grant_tables;
+ TABLE* table() const { return m_table; }
- Grant_table_base() : start_privilege_column(0), num_privilege_cols(0)
- {
- tl.reset();
- };
-
- /* Initialization sequence common for all grant tables. This should be called
- after all table-specific initialization is performed. */
- void init(enum thr_lock_type lock_type, bool is_optional)
- {
- tl.open_type= OT_BASE_ONLY;
- if (lock_type >= TL_WRITE_ALLOW_WRITE)
- tl.updating= 1;
- if (is_optional)
- tl.open_strategy= TABLE_LIST::OPEN_IF_EXISTS;
- }
-
- /*
- Get all access bits from table between start_field and end_field indices.
-
- IMPLEMENTATION
- The record should be already read in table->record[0]. All privileges
- are specified as an ENUM(Y,N).
-
- SYNOPSIS
- get_access()
- start_field_idx The field index at which the first privilege
- specification begins.
- end_field_idx The field index at which the last privilege
- specification is located.
-
- RETURN VALUE
- privilege mask
- */
- ulong get_access(uint start_field_idx, uint end_field_idx) const
+ ulong get_access() const
{
ulong access_bits= 0, bit= 1;
- for (uint i = start_field_idx; i <= end_field_idx; i++, bit<<=1)
+ for (uint i = start_priv_columns; i < end_priv_columns; i++, bit<<=1)
{
- if (get_YN_as_bool(tl.table->field[i]))
+ if (get_YN_as_bool(m_table->field[i]))
access_bits|= bit;
}
return access_bits;
}
+ protected:
+ friend class Grant_tables;
+
+ Grant_table_base() : start_priv_columns(0), end_priv_columns(0), m_table(0)
+ { }
+
/* Compute how many privilege columns this table has. This method
can only be called after the table has been opened.
@@ -811,115 +818,424 @@ class Grant_table_base
A privilege column is of type enum('Y', 'N'). Privilege columns are
expected to be one after another.
*/
- void compute_num_privilege_cols()
+ void set_table(TABLE *table)
{
- if (!table_exists()) // Table does not exist or not opened.
+ if (!(m_table= table)) // Table does not exist or not opened.
return;
- num_privilege_cols= 0;
- for (uint i= 0; i < num_fields(); i++)
+ for (end_priv_columns= 0; end_priv_columns < num_fields(); end_priv_columns++)
{
- Field *field= tl.table->field[i];
- if (num_privilege_cols > 0 && field->real_type() != MYSQL_TYPE_ENUM)
- return;
+ Field *field= m_table->field[end_priv_columns];
if (field->real_type() == MYSQL_TYPE_ENUM &&
static_cast<Field_enum*>(field)->typelib->count == 2)
{
- num_privilege_cols++;
- if (num_privilege_cols == 1)
- start_privilege_column= i;
+ if (!start_priv_columns)
+ start_priv_columns= end_priv_columns;
}
+ else if (start_priv_columns)
+ break;
}
}
/* The index at which privilege columns start. */
- uint start_privilege_column;
- /* The number of privilege columns in the table. */
- uint num_privilege_cols;
+ uint start_priv_columns;
+ /* The index after the last privilege column */
+ uint end_priv_columns;
- TABLE_LIST tl;
+ TABLE *m_table;
};
class User_table: public Grant_table_base
{
public:
- /* Field getters return NULL if the column is not present in the table.
- This is consistent only if the table is in a supported version. We do
- not guard against corrupt tables. (yet) */
- Field* host() const
- { return get_field(0); }
- Field* user() const
- { return get_field(1); }
- Field* password() const
- { return have_password() ? NULL : tl.table->field[2]; }
- /* Columns after privilege columns. */
- Field* ssl_type() const
- { return get_field(start_privilege_column + num_privileges()); }
- Field* ssl_cipher() const
- { return get_field(start_privilege_column + num_privileges() + 1); }
- Field* x509_issuer() const
- { return get_field(start_privilege_column + num_privileges() + 2); }
- Field* x509_subject() const
- { return get_field(start_privilege_column + num_privileges() + 3); }
- Field* max_questions() const
- { return get_field(start_privilege_column + num_privileges() + 4); }
- Field* max_updates() const
- { return get_field(start_privilege_column + num_privileges() + 5); }
- Field* max_connections() const
- { return get_field(start_privilege_column + num_privileges() + 6); }
- Field* max_user_connections() const
- { return get_field(start_privilege_column + num_privileges() + 7); }
- Field* plugin() const
- { return get_field(start_privilege_column + num_privileges() + 8); }
- Field* authentication_string() const
- { return get_field(start_privilege_column + num_privileges() + 9); }
- Field* password_expired() const
- { return get_field(start_privilege_column + num_privileges() + 10); }
- Field* is_role() const
- { return get_field(start_privilege_column + num_privileges() + 11); }
- Field* default_role() const
- { return get_field(start_privilege_column + num_privileges() + 12); }
- Field* max_statement_time() const
- { return get_field(start_privilege_column + num_privileges() + 13); }
+ bool init_read_record(READ_RECORD* info) const
+ {
+ return Grant_table_base::init_read_record(info) || setup_sysvars();
+ }
+
+ virtual LEX_CSTRING& name() const = 0;
+ virtual int get_auth(THD *, MEM_ROOT *, ACL_USER *u) const= 0;
+ virtual bool set_auth(const ACL_USER &u) const = 0;
+ virtual ulong get_access() const = 0;
+ virtual void set_access(ulong rights, bool revoke) const = 0;
+
+ char *get_host(MEM_ROOT *root) const
+ { return ::get_field(root, m_table->field[0]); }
+ int set_host(const char *s, size_t l) const
+ { return m_table->field[0]->store(s, l, system_charset_info); };
+ char *get_user(MEM_ROOT *root) const
+ { return ::get_field(root, m_table->field[1]); }
+ int set_user(const char *s, size_t l) const
+ { return m_table->field[1]->store(s, l, system_charset_info); };
+
+ virtual SSL_type get_ssl_type () const = 0;
+ virtual int set_ssl_type (SSL_type x) const = 0;
+ virtual const char* get_ssl_cipher (MEM_ROOT *root) const = 0;
+ virtual int set_ssl_cipher (const char *s, size_t l) const = 0;
+ virtual const char* get_x509_issuer (MEM_ROOT *root) const = 0;
+ virtual int set_x509_issuer (const char *s, size_t l) const = 0;
+ virtual const char* get_x509_subject (MEM_ROOT *root) const = 0;
+ virtual int set_x509_subject (const char *s, size_t l) const = 0;
+ virtual longlong get_max_questions () const = 0;
+ virtual int set_max_questions (longlong x) const = 0;
+ virtual longlong get_max_updates () const = 0;
+ virtual int set_max_updates (longlong x) const = 0;
+ virtual longlong get_max_connections () const = 0;
+ virtual int set_max_connections (longlong x) const = 0;
+ virtual longlong get_max_user_connections () const = 0;
+ virtual int set_max_user_connections (longlong x) const = 0;
+ virtual double get_max_statement_time () const = 0;
+ virtual int set_max_statement_time (double x) const = 0;
+ virtual bool get_is_role () const = 0;
+ virtual int set_is_role (bool x) const = 0;
+ virtual const char* get_default_role (MEM_ROOT *root) const = 0;
+ virtual int set_default_role (const char *s, size_t l) const = 0;
+ virtual bool get_account_locked () const = 0;
+ virtual int set_account_locked (bool x) const = 0;
+ virtual bool get_password_expired () const = 0;
+ virtual int set_password_expired (bool x) const = 0;
+ virtual my_time_t get_password_last_changed () const = 0;
+ virtual int set_password_last_changed (my_time_t x) const = 0;
+ virtual longlong get_password_lifetime () const = 0;
+ virtual int set_password_lifetime (longlong x) const = 0;
+
+ virtual ~User_table() {}
+ private:
+ friend class Grant_tables;
+ virtual int setup_sysvars() const = 0;
+};
- /*
- Check if a user entry in the user table is marked as being a role entry
+/* MySQL-3.23 to MariaDB 10.3 `user` table */
+class User_table_tabular: public User_table
+{
+ public:
- IMPLEMENTATION
- Access the coresponding column and check the coresponding ENUM of the form
- ENUM('N', 'Y')
+ LEX_CSTRING& name() const { return MYSQL_TABLE_NAME_USER; }
- SYNOPSIS
- check_is_role()
- form an open table to read the entry from.
- The record should be already read in table->record[0]
+ int get_auth(THD *thd, MEM_ROOT *root, ACL_USER *u) const
+ {
+ u->alloc_auth(root, 1);
+ if (have_password())
+ {
+ const char *as= safe_str(::get_field(&acl_memroot, password()));
+ u->auth->auth_string.str= as;
+ u->auth->auth_string.length= strlen(as);
+ u->auth->plugin= guess_auth_plugin(thd, u->auth->auth_string.length);
+ }
+ else
+ {
+ u->auth->plugin= native_password_plugin_name;
+ u->auth->auth_string= empty_clex_str;
+ }
+ if (plugin() && authstr())
+ {
+ char *tmpstr= ::get_field(&acl_memroot, plugin());
+ if (tmpstr)
+ {
+ const char *pw= u->auth->auth_string.str;
+ const char *as= safe_str(::get_field(&acl_memroot, authstr()));
+ if (*pw)
+ {
+ if (*as && strcmp(as, pw))
+ {
+ sql_print_warning("'user' entry '%s@%s' has both a password and an "
+ "authentication plugin specified. The password will be ignored.",
+ safe_str(get_user(thd->mem_root)), safe_str(get_host(thd->mem_root)));
+ }
+ else
+ as= pw;
+ }
+ u->auth->plugin.str= tmpstr;
+ u->auth->plugin.length= strlen(tmpstr);
+ u->auth->auth_string.str= as;
+ u->auth->auth_string.length= strlen(as);
+ }
+ }
+ return 0;
+ }
- RETURN VALUE
- TRUE if the user is marked as a role
- FALSE otherwise
- */
- bool check_is_role() const
+ bool set_auth(const ACL_USER &u) const
{
- /* Table version does not support roles */
- if (!is_role())
- return false;
+ if (u.nauth != 1)
+ return 1;
+ if (plugin())
+ {
+ if (have_password())
+ password()->reset();
+ plugin()->store(u.auth->plugin.str, u.auth->plugin.length, system_charset_info);
+ authstr()->store(u.auth->auth_string.str, u.auth->auth_string.length, system_charset_info);
+ }
+ else
+ {
+ if (u.auth->plugin.str != native_password_plugin_name.str &&
+ u.auth->plugin.str != old_password_plugin_name.str)
+ return 1;
+ password()->store(u.auth->auth_string.str, u.auth->auth_string.length, system_charset_info);
+ }
+ return 0;
+ }
+
+ ulong get_access() const
+ {
+ ulong access= Grant_table_base::get_access();
+ if ((num_fields() <= 13) && (access & CREATE_ACL))
+ access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL;
- return get_YN_as_bool(is_role());
+ if (num_fields() <= 18)
+ {
+ access|= LOCK_TABLES_ACL | CREATE_TMP_ACL | SHOW_DB_ACL;
+ if (access & FILE_ACL)
+ access|= REPL_CLIENT_ACL | REPL_SLAVE_ACL;
+ if (access & PROCESS_ACL)
+ access|= SUPER_ACL | EXECUTE_ACL;
+ }
+
+ if (num_fields() <= 31 && (access & CREATE_ACL))
+ access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL);
+
+ if (num_fields() <= 33)
+ {
+ if (access & CREATE_ACL)
+ access|= CREATE_PROC_ACL;
+ if (access & ALTER_ACL)
+ access|= ALTER_PROC_ACL;
+ }
+
+ if (num_fields() <= 36 && (access & GRANT_ACL))
+ access|= CREATE_USER_ACL;
+
+ if (num_fields() <= 37 && (access & SUPER_ACL))
+ access|= EVENT_ACL;
+
+ if (num_fields() <= 38 && (access & SUPER_ACL))
+ access|= TRIGGER_ACL;
+
+ if (num_fields() <= 46 && (access & DELETE_ACL))
+ access|= DELETE_HISTORY_ACL;
+
+ return access & GLOBAL_ACLS;
}
+ void set_access(ulong rights, bool revoke) const
+ {
+ ulong priv= SELECT_ACL;
+ for (uint i= start_priv_columns; i < end_priv_columns; i++, priv <<= 1)
+ {
+ if (priv & rights)
+ m_table->field[i]->store(1 + !revoke, 0);
+ }
+ }
- private:
- friend class Grant_tables;
+ SSL_type get_ssl_type () const
+ {
+ Field *f= get_field(end_priv_columns, MYSQL_TYPE_ENUM);
+ return (SSL_type)(f ? f->val_int()-1 : 0);
+ }
+ int set_ssl_type (SSL_type x) const
+ {
+ if (Field *f= get_field(end_priv_columns, MYSQL_TYPE_ENUM))
+ return f->store(x+1, 0);
+ else
+ return 1;
+ }
+ const char* get_ssl_cipher (MEM_ROOT *root) const
+ {
+ Field *f= get_field(end_priv_columns + 1, MYSQL_TYPE_BLOB);
+ return f ? ::get_field(root,f) : 0;
+ }
+ int set_ssl_cipher (const char *s, size_t l) const
+ {
+ if (Field *f= get_field(end_priv_columns + 1, MYSQL_TYPE_BLOB))
+ return f->store(s, l, &my_charset_latin1);
+ else
+ return 1;
+ }
+ const char* get_x509_issuer (MEM_ROOT *root) const
+ {
+ Field *f= get_field(end_priv_columns + 2, MYSQL_TYPE_BLOB);
+ return f ? ::get_field(root,f) : 0;
+ }
+ int set_x509_issuer (const char *s, size_t l) const
+ {
+ if (Field *f= get_field(end_priv_columns + 2, MYSQL_TYPE_BLOB))
+ return f->store(s, l, &my_charset_latin1);
+ else
+ return 1;
+ }
+ const char* get_x509_subject (MEM_ROOT *root) const
+ {
+ Field *f= get_field(end_priv_columns + 3, MYSQL_TYPE_BLOB);
+ return f ? ::get_field(root,f) : 0;
+ }
+ int set_x509_subject (const char *s, size_t l) const
+ {
+ if (Field *f= get_field(end_priv_columns + 3, MYSQL_TYPE_BLOB))
+ return f->store(s, l, &my_charset_latin1);
+ else
+ return 1;
+ }
+ longlong get_max_questions () const
+ {
+ Field *f= get_field(end_priv_columns + 4, MYSQL_TYPE_LONG);
+ return f ? f->val_int() : 0;
+ }
+ int set_max_questions (longlong x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 4, MYSQL_TYPE_LONG))
+ return f->store(x, 0);
+ else
+ return 1;
+ }
+ longlong get_max_updates () const
+ {
+ Field *f= get_field(end_priv_columns + 5, MYSQL_TYPE_LONG);
+ return f ? f->val_int() : 0;
+ }
+ int set_max_updates (longlong x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 5, MYSQL_TYPE_LONG))
+ return f->store(x, 0);
+ else
+ return 1;
+ }
+ longlong get_max_connections () const
+ {
+ Field *f= get_field(end_priv_columns + 6, MYSQL_TYPE_LONG);
+ return f ? f->val_int() : 0;
+ }
+ int set_max_connections (longlong x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 6, MYSQL_TYPE_LONG))
+ return f->store(x, 0);
+ else
+ return 1;
+ }
+ longlong get_max_user_connections () const
+ {
+ Field *f= get_field(end_priv_columns + 7, MYSQL_TYPE_LONG);
+ return f ? f->val_int() : 0;
+ }
+ int set_max_user_connections (longlong x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 7, MYSQL_TYPE_LONG))
+ return f->store(x, 0);
+ else
+ return 1;
+ }
+ double get_max_statement_time () const
+ {
+ Field *f= get_field(end_priv_columns + 13, MYSQL_TYPE_NEWDECIMAL);
+ return f ? f->val_real() : 0;
+ }
+ int set_max_statement_time (double x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 13, MYSQL_TYPE_NEWDECIMAL))
+ return f->store(x);
+ else
+ return 1;
+ }
+ bool get_is_role () const
+ {
+ Field *f= get_field(end_priv_columns + 11, MYSQL_TYPE_ENUM);
+ return f ? f->val_int()-1 : 0;
+ }
+ int set_is_role (bool x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 11, MYSQL_TYPE_ENUM))
+ return f->store(x+1, 0);
+ else
+ return 1;
+ }
+ const char* get_default_role (MEM_ROOT *root) const
+ {
+ Field *f= get_field(end_priv_columns + 12, MYSQL_TYPE_STRING);
+ return f ? ::get_field(root,f) : 0;
+ }
+ int set_default_role (const char *s, size_t l) const
+ {
+ if (Field *f= get_field(end_priv_columns + 12, MYSQL_TYPE_STRING))
+ return f->store(s, l, system_charset_info);
+ else
+ return 1;
+ }
+ /* On a MariaDB 10.3 user table, the account locking accessors will try to
+ get the content of the max_statement_time column, but they will fail due
+ to the typecheck in get_field. */
+ bool get_account_locked () const
+ {
+ Field *f= get_field(end_priv_columns + 13, MYSQL_TYPE_ENUM);
+ return f ? f->val_int()-1 : 0;
+ }
+ int set_account_locked (bool x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 13, MYSQL_TYPE_ENUM))
+ return f->store(x+1, 0);
- /* Only Grant_tables can instantiate this class. */
- User_table() {};
+ return 1;
+ }
- void init(enum thr_lock_type lock_type)
+ bool get_password_expired () const
{
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_USER_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, false);
+ uint field_num= end_priv_columns + 10;
+
+ Field *f= get_field(field_num, MYSQL_TYPE_ENUM);
+ return f ? f->val_int()-1 : 0;
}
+ int set_password_expired (bool x) const
+ {
+ uint field_num= end_priv_columns + 10;
+
+ if (Field *f= get_field(field_num, MYSQL_TYPE_ENUM))
+ return f->store(x+1, 0);
+ return 1;
+ }
+ my_time_t get_password_last_changed () const
+ {
+ ulong unused_dec;
+ if (Field *f= get_field(end_priv_columns + 11, MYSQL_TYPE_TIMESTAMP2))
+ return f->get_timestamp(&unused_dec);
+ return 0;
+ }
+ int set_password_last_changed (my_time_t x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 11, MYSQL_TYPE_TIMESTAMP2))
+ {
+ f->set_notnull();
+ return f->store_timestamp(x, 0);
+ }
+ return 1;
+ }
+ longlong get_password_lifetime () const
+ {
+ if (Field *f= get_field(end_priv_columns + 12, MYSQL_TYPE_SHORT))
+ {
+ if (f->is_null())
+ return -1;
+ return f->val_int();
+ }
+ return 0;
+ }
+ int set_password_lifetime (longlong x) const
+ {
+ if (Field *f= get_field(end_priv_columns + 12, MYSQL_TYPE_SHORT))
+ {
+ if (x < 0)
+ {
+ f->set_null();
+ return 0;
+ }
+ f->set_notnull();
+ return f->store(x, 0);
+ }
+ return 1;
+ }
+
+ virtual ~User_table_tabular() {}
+ private:
+ friend class Grant_tables;
+
+ /* Only Grant_tables can instantiate this class. */
+ User_table_tabular() {}
/* The user table is a bit different compared to the other Grant tables.
Usually, we only add columns to the grant tables when adding functionality.
@@ -931,183 +1247,541 @@ class User_table: public Grant_table_base
doesn't exist. This simplifies checking of table "version", as we don't
have to make use of num_fields() any more.
*/
- inline Field* get_field(uint field_num) const
+ inline Field* get_field(uint field_num, enum enum_field_types type) const
{
if (field_num >= num_fields())
return NULL;
+ Field *f= m_table->field[field_num];
+ return f->real_type() == type ? f : NULL;
+ }
- return tl.table->field[field_num];
+ int setup_sysvars() const
+ {
+ username_char_length= MY_MIN(m_table->field[1]->char_length(),
+ USERNAME_CHAR_LENGTH);
+ using_global_priv_table= false;
+
+ if (have_password()) // Password column might be missing. (MySQL 5.7.6+)
+ {
+ int password_length= password()->field_length /
+ password()->charset()->mbmaxlen;
+ if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ {
+ sql_print_error("Fatal error: mysql.user table is damaged or in "
+ "unsupported 3.20 format.");
+ return 1;
+ }
+
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH)
+ {
+ if (opt_secure_auth)
+ {
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ sql_print_error("Fatal error: mysql.user table is in old format, "
+ "but server started with --secure-auth option.");
+ return 1;
+ }
+ mysql_user_table_is_in_short_password_format= true;
+ if (global_system_variables.old_passwords)
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ else
+ {
+ extern sys_var *Sys_old_passwords_ptr;
+ Sys_old_passwords_ptr->value_origin= sys_var::AUTO;
+ global_system_variables.old_passwords= 1;
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ sql_print_warning("mysql.user table is not updated to new password format; "
+ "Disabling new password usage until "
+ "mysql_fix_privilege_tables is run");
+ }
+ m_table->in_use->variables.old_passwords= 1;
+ }
+ else
+ {
+ mysql_user_table_is_in_short_password_format= false;
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+ }
+ }
+ return 0;
}
/* Normally password column is the third column in the table. If privileges
start on the third column instead, we are missing the password column.
This means we are using a MySQL 5.7.6+ data directory. */
- bool have_password() const { return start_privilege_column == 2; }
+ bool have_password() const { return start_priv_columns == 3; }
+ Field* password() const { return m_table->field[2]; }
+ Field* plugin() const { return get_field(end_priv_columns + 8, MYSQL_TYPE_STRING); }
+ Field* authstr() const { return get_field(end_priv_columns + 9, MYSQL_TYPE_BLOB); }
+};
+
+/*
+ MariaDB 10.4 and up `global_priv` table
+
+ TODO possible optimizations:
+ * update json in-place if the new value can fit
+ * don't repeat get_value for every key, but use a streaming parser
+ to convert json into in-memory object (ACL_USER?) in one json scan.
+ - this makes sense for acl_load(), but hardly for GRANT
+ * similarly, pack ACL_USER (?) into json in one go.
+ - doesn't make sense? GRANT rarely updates more than one field.
+*/
+class User_table_json: public User_table
+{
+ LEX_CSTRING& name() const { return MYSQL_TABLE_NAME[USER_TABLE]; }
+
+ int get_auth(THD *thd, MEM_ROOT *root, ACL_USER *u) const
+ {
+ size_t array_len;
+ const char *array;
+ int vl;
+ const char *v;
+
+ if (get_value("auth_or", JSV_ARRAY, &array, &array_len))
+ {
+ u->alloc_auth(root, 1);
+ return get_auth1(thd, root, u, 0);
+ }
+
+ if (json_get_array_item(array, array + array_len, (int)array_len,
+ &v, &vl) != JSV_NOTHING)
+ return 1;
+ u->alloc_auth(root, vl);
+ for (uint i=0; i < u->nauth; i++)
+ {
+ if (json_get_array_item(array, array + array_len, i, &v, &vl) != JSV_OBJECT)
+ return 1;
+
+ const char *p, *a;
+ int pl, al;
+ switch (json_get_object_key(v, v + vl, "plugin", &p, &pl)) {
+ case JSV_STRING: u->auth[i].plugin.str= strmake_root(root, p, pl);
+ u->auth[i].plugin.length= pl;
+ break;
+ case JSV_NOTHING: if (get_auth1(thd, root, u, i))
+ return 1;
+ else
+ continue;
+ default: return 1;
+ }
+ switch (json_get_object_key(v, v + vl, "authentication_string", &a, &al)) {
+ case JSV_NOTHING: u->auth[i].auth_string= empty_clex_str;
+ break;
+ case JSV_STRING: u->auth[i].auth_string.str= strmake_root(root, a, al);
+ u->auth[i].auth_string.length= al;
+ break;
+ default: return 1;
+ }
+ }
+ return 0;
+ }
+
+ int get_auth1(THD *thd, MEM_ROOT *root, ACL_USER *u, uint n) const
+ {
+ const char *authstr= get_str_value(root, "authentication_string");
+ const char *plugin= get_str_value(root, "plugin");
+ if (plugin && authstr)
+ {
+ if (plugin && *plugin)
+ {
+ u->auth[n].plugin.str= plugin;
+ u->auth[n].plugin.length= strlen(plugin);
+ }
+ else
+ u->auth[n].plugin= native_password_plugin_name;
+ u->auth[n].auth_string.str= authstr;
+ u->auth[n].auth_string.length= strlen(authstr);
+ return 0;
+ }
+ return 1;
+ }
+
+ bool append_str_value(String *to, const LEX_CSTRING &str) const
+ {
+ to->append('"');
+ to->reserve(str.length*2);
+ int len= json_escape(system_charset_info, (uchar*)str.str, (uchar*)str.str + str.length,
+ to->charset(), (uchar*)to->end(), (uchar*)to->end() + str.length*2);
+ if (len < 0)
+ return 1;
+ to->length(to->length() + len);
+ to->append('"');
+ return 0;
+ }
+
+ bool set_auth(const ACL_USER &u) const
+ {
+ StringBuffer<JSON_SIZE> json(m_table->field[2]->charset());
+ if (u.nauth == 1)
+ return set_auth1(u, 0);
+ bool top_done = false;
+ json.append('[');
+ for (uint i=0; i < u.nauth; i++)
+ {
+ ACL_USER::AUTH * const auth= u.auth + i;
+ if (i)
+ json.append(',');
+ json.append('{');
+ if (!top_done &&
+ (auth->plugin.str == native_password_plugin_name.str ||
+ auth->plugin.str == old_password_plugin_name.str ||
+ i == u.nauth - 1))
+ {
+ if (set_auth1(u, i))
+ return 1;
+ top_done= true;
+ }
+ else
+ {
+ json.append(STRING_WITH_LEN("\"plugin\":"));
+ if (append_str_value(&json, auth->plugin))
+ return 1;
+ if (auth->auth_string.length)
+ {
+ json.append(STRING_WITH_LEN(",\"authentication_string\":"));
+ if (append_str_value(&json, auth->auth_string))
+ return 1;
+ }
+ }
+ json.append('}');
+ }
+ json.append(']');
+ return set_value("auth_or", json.ptr(), json.length(), false) == JSV_BAD_JSON;
+ }
+ bool set_auth1(const ACL_USER &u, uint i) const
+ {
+ return set_str_value("plugin",
+ u.auth[i].plugin.str, u.auth[i].plugin.length) ||
+ set_str_value("authentication_string",
+ u.auth[i].auth_string.str, u.auth[i].auth_string.length);
+ }
+ ulong get_access() const
+ {
+ /*
+ when new privileges will be added, we'll start storing GLOBAL_ACLS
+ (or, for example, my_count_bits(GLOBAL_ACLS))
+ in the json too, and it'll allow us to do privilege upgrades
+ */
+ return get_int_value("access") & GLOBAL_ACLS;
+ }
+ void set_access(ulong rights, bool revoke) const
+ {
+ ulong access= get_access();
+ if (revoke)
+ access&= ~rights;
+ else
+ access|= rights;
+ set_int_value("access", access & GLOBAL_ACLS);
+ }
+ const char *unsafe_str(const char *s) const
+ { return s[0] ? s : NULL; }
+
+ SSL_type get_ssl_type () const
+ { return (SSL_type)get_int_value("ssl_type"); }
+ int set_ssl_type (SSL_type x) const
+ { return set_int_value("ssl_type", x); }
+ const char* get_ssl_cipher (MEM_ROOT *root) const
+ { return unsafe_str(get_str_value(root, "ssl_cipher")); }
+ int set_ssl_cipher (const char *s, size_t l) const
+ { return set_str_value("ssl_cipher", s, l); }
+ const char* get_x509_issuer (MEM_ROOT *root) const
+ { return unsafe_str(get_str_value(root, "x509_issuer")); }
+ int set_x509_issuer (const char *s, size_t l) const
+ { return set_str_value("x509_issuer", s, l); }
+ const char* get_x509_subject (MEM_ROOT *root) const
+ { return unsafe_str(get_str_value(root, "x509_subject")); }
+ int set_x509_subject (const char *s, size_t l) const
+ { return set_str_value("x509_subject", s, l); }
+ longlong get_max_questions () const
+ { return get_int_value("max_questions"); }
+ int set_max_questions (longlong x) const
+ { return set_int_value("max_questions", x); }
+ longlong get_max_updates () const
+ { return get_int_value("max_updates"); }
+ int set_max_updates (longlong x) const
+ { return set_int_value("max_updates", x); }
+ longlong get_max_connections () const
+ { return get_int_value("max_connections"); }
+ int set_max_connections (longlong x) const
+ { return set_int_value("max_connections", x); }
+ longlong get_max_user_connections () const
+ { return get_int_value("max_user_connections"); }
+ int set_max_user_connections (longlong x) const
+ { return set_int_value("max_user_connections", x); }
+ double get_max_statement_time () const
+ { return get_double_value("max_statement_time"); }
+ int set_max_statement_time (double x) const
+ { return set_double_value("max_statement_time", x); }
+ bool get_is_role () const
+ { return get_bool_value("is_role"); }
+ int set_is_role (bool x) const
+ { return set_bool_value("is_role", x); }
+ const char* get_default_role (MEM_ROOT *root) const
+ { return get_str_value(root, "default_role"); }
+ int set_default_role (const char *s, size_t l) const
+ { return set_str_value("default_role", s, l); }
+ bool get_account_locked () const
+ { return get_bool_value("account_locked"); }
+ int set_account_locked (bool x) const
+ { return set_bool_value("account_locked", x); }
+ my_time_t get_password_last_changed () const
+ { return static_cast<my_time_t>(get_int_value("password_last_changed")); }
+ int set_password_last_changed (my_time_t x) const
+ { return set_int_value("password_last_changed", static_cast<longlong>(x)); }
+ int set_password_lifetime (longlong x) const
+ { return set_int_value("password_lifetime", x); }
+ longlong get_password_lifetime () const
+ { return get_int_value("password_lifetime", -1); }
+ /*
+ password_last_changed=0 means the password is manually expired.
+ In MySQL 5.7+ this state is described using the password_expired column
+ in mysql.user
+ */
+ bool get_password_expired () const
+ { return get_int_value("password_last_changed", -1) == 0; }
+ int set_password_expired (bool x) const
+ { return x ? set_password_last_changed(0) : 0; }
+
+ ~User_table_json() {}
+ private:
+ friend class Grant_tables;
+ static const uint JSON_SIZE=1024;
+ int setup_sysvars() const
+ {
+ using_global_priv_table= true;
+ username_char_length= MY_MIN(m_table->field[1]->char_length(),
+ USERNAME_CHAR_LENGTH);
+ return 0;
+ }
+ bool get_value(const char *key,
+ enum json_types vt, const char **v, size_t *vl) const
+ {
+ enum json_types value_type;
+ int int_vl;
+ String str, *res= m_table->field[2]->val_str(&str);
+ if (!res ||
+ (value_type= json_get_object_key(res->ptr(), res->end(), key,
+ v, &int_vl)) == JSV_BAD_JSON)
+ return 1; // invalid
+ *vl= int_vl;
+ return value_type != vt;
+ }
+ const char *get_str_value(MEM_ROOT *root, const char *key) const
+ {
+ size_t value_len;
+ const char *value_start;
+ if (get_value(key, JSV_STRING, &value_start, &value_len))
+ return "";
+ char *ptr= (char*)alloca(value_len);
+ int len= json_unescape(m_table->field[2]->charset(),
+ (const uchar*)value_start,
+ (const uchar*)value_start + value_len,
+ system_charset_info,
+ (uchar*)ptr, (uchar*)ptr + value_len);
+ if (len < 0)
+ return NULL;
+ return strmake_root(root, ptr, len);
+ }
+ longlong get_int_value(const char *key, longlong def_val= 0) const
+ {
+ int err;
+ size_t value_len;
+ const char *value_start;
+ if (get_value(key, JSV_NUMBER, &value_start, &value_len))
+ return def_val;
+ const char *value_end= value_start + value_len;
+ return my_strtoll10(value_start, (char**)&value_end, &err);
+ }
+ double get_double_value(const char *key) const
+ {
+ int err;
+ size_t value_len;
+ const char *value_start;
+ if (get_value(key, JSV_NUMBER, &value_start, &value_len))
+ return 0;
+ const char *value_end= value_start + value_len;
+ return my_strtod(value_start, (char**)&value_end, &err);
+ }
+ bool get_bool_value(const char *key) const
+ {
+ size_t value_len;
+ const char *value_start;
+ if (get_value(key, JSV_TRUE, &value_start, &value_len))
+ return false;
+ return true;
+ }
+ enum json_types set_value(const char *key,
+ const char *val, size_t vlen, bool string) const
+ {
+ int value_len;
+ const char *value_start;
+ enum json_types value_type;
+ String str, *res= m_table->field[2]->val_str(&str);
+ if (!res || !res->length())
+ (res= &str)->set(STRING_WITH_LEN("{}"), m_table->field[2]->charset());
+ value_type= json_get_object_key(res->ptr(), res->end(), key,
+ &value_start, &value_len);
+ if (value_type == JSV_BAD_JSON)
+ return value_type; // invalid
+ StringBuffer<JSON_SIZE> json(res->charset());
+ json.copy(res->ptr(), value_start - res->ptr(), res->charset());
+ if (value_type == JSV_NOTHING)
+ {
+ if (value_len)
+ json.append(',');
+ json.append('"');
+ json.append(key);
+ json.append(STRING_WITH_LEN("\":"));
+ if (string)
+ json.append('"');
+ }
+ else
+ value_start+= value_len;
+ json.append(val, vlen);
+ if (!value_type && string)
+ json.append('"');
+ json.append(value_start, res->end() - value_start);
+ DBUG_ASSERT(json_valid(json.ptr(), json.length(), json.charset()));
+ m_table->field[2]->store(json.ptr(), json.length(), json.charset());
+ return value_type;
+ }
+ bool set_str_value(const char *key, const char *val, size_t vlen) const
+ {
+ char buf[JSON_SIZE];
+ int blen= json_escape(system_charset_info,
+ (const uchar*)val, (const uchar*)val + vlen,
+ m_table->field[2]->charset(),
+ (uchar*)buf, (uchar*)buf+sizeof(buf));
+ if (blen < 0)
+ return 1;
+ return set_value(key, buf, blen, true) == JSV_BAD_JSON;
+ }
+ bool set_int_value(const char *key, longlong val) const
+ {
+ char v[MY_INT64_NUM_DECIMAL_DIGITS+1];
+ size_t vlen= longlong10_to_str(val, v, -10) - v;
+ return set_value(key, v, vlen, false) == JSV_BAD_JSON;
+ }
+ bool set_double_value(const char *key, double val) const
+ {
+ char v[FLOATING_POINT_BUFFER+1];
+ size_t vlen= my_fcvt(val, TIME_SECOND_PART_DIGITS, v, NULL);
+ return set_value(key, v, vlen, false) == JSV_BAD_JSON;
+ }
+ bool set_bool_value(const char *key, bool val) const
+ {
+ return set_value(key, val ? "true" : "false", val ? 4 : 5, false) == JSV_BAD_JSON;
+ }
};
class Db_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* db() const { return tl.table->field[1]; }
- Field* user() const { return tl.table->field[2]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* db() const { return m_table->field[1]; }
+ Field* user() const { return m_table->field[2]; }
private:
friend class Grant_tables;
- Db_table() {};
-
- void init(enum thr_lock_type lock_type)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_DB_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, false);
- }
+ Db_table() {}
};
class Tables_priv_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* db() const { return tl.table->field[1]; }
- Field* user() const { return tl.table->field[2]; }
- Field* table_name() const { return tl.table->field[3]; }
- Field* grantor() const { return tl.table->field[4]; }
- Field* timestamp() const { return tl.table->field[5]; }
- Field* table_priv() const { return tl.table->field[6]; }
- Field* column_priv() const { return tl.table->field[7]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* db() const { return m_table->field[1]; }
+ Field* user() const { return m_table->field[2]; }
+ Field* table_name() const { return m_table->field[3]; }
+ Field* grantor() const { return m_table->field[4]; }
+ Field* timestamp() const { return m_table->field[5]; }
+ Field* table_priv() const { return m_table->field[6]; }
+ Field* column_priv() const { return m_table->field[7]; }
private:
friend class Grant_tables;
- Tables_priv_table() {};
-
- void init(enum thr_lock_type lock_type, Grant_table_base *next_table= NULL)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- LEX_CSTRING MYSQL_TABLES_PRIV_NAME={STRING_WITH_LEN("tables_priv") };
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_TABLES_PRIV_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, false);
- }
+ Tables_priv_table() {}
};
class Columns_priv_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* db() const { return tl.table->field[1]; }
- Field* user() const { return tl.table->field[2]; }
- Field* table_name() const { return tl.table->field[3]; }
- Field* column_name() const { return tl.table->field[4]; }
- Field* timestamp() const { return tl.table->field[5]; }
- Field* column_priv() const { return tl.table->field[6]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* db() const { return m_table->field[1]; }
+ Field* user() const { return m_table->field[2]; }
+ Field* table_name() const { return m_table->field[3]; }
+ Field* column_name() const { return m_table->field[4]; }
+ Field* timestamp() const { return m_table->field[5]; }
+ Field* column_priv() const { return m_table->field[6]; }
private:
friend class Grant_tables;
- Columns_priv_table() {};
-
- void init(enum thr_lock_type lock_type)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- LEX_CSTRING MYSQL_COLUMNS_PRIV_NAME={ STRING_WITH_LEN("columns_priv") };
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_COLUMNS_PRIV_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, false);
- }
+ Columns_priv_table() {}
};
class Host_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* db() const { return tl.table->field[1]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* db() const { return m_table->field[1]; }
private:
friend class Grant_tables;
Host_table() {}
-
- void init(enum thr_lock_type lock_type)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- LEX_CSTRING MYSQL_HOST_NAME={STRING_WITH_LEN("host") };
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_HOST_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, true);
- }
};
class Procs_priv_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* db() const { return tl.table->field[1]; }
- Field* user() const { return tl.table->field[2]; }
- Field* routine_name() const { return tl.table->field[3]; }
- Field* routine_type() const { return tl.table->field[4]; }
- Field* grantor() const { return tl.table->field[5]; }
- Field* proc_priv() const { return tl.table->field[6]; }
- Field* timestamp() const { return tl.table->field[7]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* db() const { return m_table->field[1]; }
+ Field* user() const { return m_table->field[2]; }
+ Field* routine_name() const { return m_table->field[3]; }
+ Field* routine_type() const { return m_table->field[4]; }
+ Field* grantor() const { return m_table->field[5]; }
+ Field* proc_priv() const { return m_table->field[6]; }
+ Field* timestamp() const { return m_table->field[7]; }
private:
friend class Grant_tables;
Procs_priv_table() {}
-
- void init(enum thr_lock_type lock_type)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- LEX_CSTRING MYSQL_PROCS_PRIV_NAME={STRING_WITH_LEN("procs_priv") };
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_PROCS_PRIV_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, true);
- }
};
class Proxies_priv_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* user() const { return tl.table->field[1]; }
- Field* proxied_host() const { return tl.table->field[2]; }
- Field* proxied_user() const { return tl.table->field[3]; }
- Field* with_grant() const { return tl.table->field[4]; }
- Field* grantor() const { return tl.table->field[5]; }
- Field* timestamp() const { return tl.table->field[6]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* user() const { return m_table->field[1]; }
+ Field* proxied_host() const { return m_table->field[2]; }
+ Field* proxied_user() const { return m_table->field[3]; }
+ Field* with_grant() const { return m_table->field[4]; }
+ Field* grantor() const { return m_table->field[5]; }
+ Field* timestamp() const { return m_table->field[6]; }
private:
friend class Grant_tables;
Proxies_priv_table() {}
-
- void init(enum thr_lock_type lock_type)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- LEX_CSTRING MYSQL_PROXIES_PRIV_NAME={STRING_WITH_LEN("proxies_priv") };
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_PROXIES_PRIV_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, true);
- }
};
class Roles_mapping_table: public Grant_table_base
{
public:
- Field* host() const { return tl.table->field[0]; }
- Field* user() const { return tl.table->field[1]; }
- Field* role() const { return tl.table->field[2]; }
- Field* admin_option() const { return tl.table->field[3]; }
+ Field* host() const { return m_table->field[0]; }
+ Field* user() const { return m_table->field[1]; }
+ Field* role() const { return m_table->field[2]; }
+ Field* admin_option() const { return m_table->field[3]; }
private:
friend class Grant_tables;
Roles_mapping_table() {}
-
- void init(enum thr_lock_type lock_type)
- {
- /* We are relying on init_one_table zeroing out the TABLE_LIST structure. */
- LEX_CSTRING MYSQL_ROLES_MAPPING_NAME={STRING_WITH_LEN("roles_mapping") };
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_ROLES_MAPPING_NAME, NULL, lock_type);
- Grant_table_base::init(lock_type, true);
- }
};
/**
@@ -1116,170 +1790,130 @@ class Roles_mapping_table: public Grant_table_base
class Grant_tables
{
public:
- /* When constructing the Grant_tables object, we initialize only
- the tables which are going to be opened.
- @param which_tables Bitmap of which tables to open.
- @param lock_type Lock type to use when opening tables.
- */
- Grant_tables(int which_tables, enum thr_lock_type lock_type)
- {
- DBUG_ENTER("Grant_tables::Grant_tables");
- DBUG_PRINT("info", ("which_tables: %x, lock_type: %u",
- which_tables, lock_type));
- DBUG_ASSERT(which_tables); /* At least one table must be opened. */
- Grant_table_base* prev= NULL;
- /* We start from the last table, Table_roles_mapping, such that
- the first one in the linked list is Table_user. */
- if (which_tables & Table_roles_mapping)
- {
- m_roles_mapping_table.init(lock_type);
- prev= &m_roles_mapping_table;
- }
- if (which_tables & Table_proxies_priv)
- {
- m_proxies_priv_table.init(lock_type);
- link_tables(&m_proxies_priv_table, prev);
- prev= &m_proxies_priv_table;
- }
- if (which_tables & Table_procs_priv)
- {
- m_procs_priv_table.init(lock_type);
- link_tables(&m_procs_priv_table, prev);
- prev= &m_procs_priv_table;
- }
- if (which_tables & Table_host)
- {
- m_host_table.init(lock_type);
- link_tables(&m_host_table, prev);
- prev= &m_host_table;
- }
- if (which_tables & Table_columns_priv)
- {
- m_columns_priv_table.init(lock_type);
- link_tables(&m_columns_priv_table, prev);
- prev= &m_columns_priv_table;
- }
- if (which_tables & Table_tables_priv)
- {
- m_tables_priv_table.init(lock_type);
- link_tables(&m_tables_priv_table, prev);
- prev= &m_tables_priv_table;
- }
- if (which_tables & Table_db)
- {
- m_db_table.init(lock_type);
- link_tables(&m_db_table, prev);
- prev= &m_db_table;
- }
- if (which_tables & Table_user)
- {
- m_user_table.init(lock_type);
- link_tables(&m_user_table, prev);
- prev= &m_user_table;
- }
+ Grant_tables() : p_user_table(&m_user_table_json) { }
- first_table_in_list= prev;
- DBUG_VOID_RETURN;
- }
-
- /* Before any operation is possible on grant tables, they must be opened.
- This opens the tables according to the lock type specified during
- construction.
-
- @retval 1 replication filters matched. Abort the operation,
- but return OK (!)
- @retval 0 tables were opened successfully
- @retval -1 error, tables could not be opened
- */
- int open_and_lock(THD *thd)
+ int open_and_lock(THD *thd, int which_tables, enum thr_lock_type lock_type)
{
DBUG_ENTER("Grant_tables::open_and_lock");
- DBUG_ASSERT(first_table_in_list);
-#ifdef HAVE_REPLICATION
- if (first_table_in_list->tl.lock_type >= TL_WRITE_ALLOW_WRITE &&
- thd->slave_thread && !thd->spcont)
- {
- /*
- GRANT and REVOKE are applied the slave in/exclusion rules as they are
- some kind of updates to the mysql.% tables.
- */
- Rpl_filter *rpl_filter= thd->system_thread_info.rpl_sql_info->rpl_filter;
- if (rpl_filter->is_on() &&
- !rpl_filter->tables_ok(0, &first_table_in_list->tl))
- DBUG_RETURN(1);
- }
-#endif
- if (open_and_lock_tables(thd, &first_table_in_list->tl, FALSE,
- MYSQL_LOCK_IGNORE_TIMEOUT))
- DBUG_RETURN(-1);
+ TABLE_LIST tables[USER_TABLE+1], *first= NULL;
+ DBUG_ASSERT(which_tables); /* At least one table must be opened. */
/*
We can read privilege tables even when !initialized.
This can be acl_load() - server startup or FLUSH PRIVILEGES
*/
- if (first_table_in_list->tl.lock_type >= TL_WRITE_ALLOW_WRITE &&
- !initialized)
+ if (lock_type >= TL_WRITE_ALLOW_WRITE && !initialized)
{
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables");
DBUG_RETURN(-1);
}
- /* The privilge columns vary based on MariaDB version. Figure out
- how many we have after we've opened the table. */
- m_user_table.compute_num_privilege_cols();
- m_db_table.compute_num_privilege_cols();
- m_tables_priv_table.compute_num_privilege_cols();
- m_columns_priv_table.compute_num_privilege_cols();
- m_host_table.compute_num_privilege_cols();
- m_procs_priv_table.compute_num_privilege_cols();
- m_proxies_priv_table.compute_num_privilege_cols();
- m_roles_mapping_table.compute_num_privilege_cols();
+ for (int i=USER_TABLE; i >=0; i--)
+ {
+ TABLE_LIST *tl= tables + i;
+ if (which_tables & (1 << i))
+ {
+ tl->init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_TABLE_NAME[i],
+ NULL, lock_type);
+ tl->updating= lock_type >= TL_WRITE_ALLOW_WRITE;
+ if (i >= FIRST_OPTIONAL_TABLE)
+ tl->open_strategy= TABLE_LIST::OPEN_IF_EXISTS;
+ tl->next_global= tl->next_local= first;
+ first= tl;
+ }
+ else
+ tl->table= NULL;
+ }
+
+ uint counter;
+ int res= really_open(thd, first, &counter);
+
+ /* if User_table_json wasn't found, let's try User_table_tabular */
+ if (!res && (which_tables & Table_user) && !(tables[USER_TABLE].table))
+ {
+ uint unused;
+ TABLE_LIST *tl= tables + USER_TABLE;
+ tl->init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_TABLE_NAME_USER,
+ NULL, lock_type);
+ tl->updating= lock_type >= TL_WRITE_ALLOW_WRITE;
+ p_user_table= &m_user_table_tabular;
+ counter++;
+ res= really_open(thd, tl, &unused);
+ }
+ if (res)
+ DBUG_RETURN(res);
+
+ if (lock_tables(thd, first, counter, MYSQL_LOCK_IGNORE_TIMEOUT))
+ DBUG_RETURN(-1);
+
+ p_user_table->set_table(tables[USER_TABLE].table);
+ m_db_table.set_table(tables[DB_TABLE].table);
+ m_tables_priv_table.set_table(tables[TABLES_PRIV_TABLE].table);
+ m_columns_priv_table.set_table(tables[COLUMNS_PRIV_TABLE].table);
+ m_host_table.set_table(tables[HOST_TABLE].table);
+ m_procs_priv_table.set_table(tables[PROCS_PRIV_TABLE].table);
+ m_proxies_priv_table.set_table(tables[PROXIES_PRIV_TABLE].table);
+ m_roles_mapping_table.set_table(tables[ROLES_MAPPING_TABLE].table);
DBUG_RETURN(0);
}
inline const User_table& user_table() const
- {
- return m_user_table;
- }
+ { return *p_user_table; }
inline const Db_table& db_table() const
- {
- return m_db_table;
- }
-
+ { return m_db_table; }
inline const Tables_priv_table& tables_priv_table() const
- {
- return m_tables_priv_table;
- }
+ { return m_tables_priv_table; }
inline const Columns_priv_table& columns_priv_table() const
- {
- return m_columns_priv_table;
- }
+ { return m_columns_priv_table; }
inline const Host_table& host_table() const
- {
- return m_host_table;
- }
+ { return m_host_table; }
inline const Procs_priv_table& procs_priv_table() const
- {
- return m_procs_priv_table;
- }
+ { return m_procs_priv_table; }
inline const Proxies_priv_table& proxies_priv_table() const
- {
- return m_proxies_priv_table;
- }
+ { return m_proxies_priv_table; }
inline const Roles_mapping_table& roles_mapping_table() const
+ { return m_roles_mapping_table; }
+
+ private:
+
+ /* Before any operation is possible on grant tables, they must be opened.
+
+ @retval 1 replication filters matched. Abort the operation,
+ but return OK (!)
+ @retval 0 tables were opened successfully
+ @retval -1 error, tables could not be opened
+ */
+ int really_open(THD *thd, TABLE_LIST* tables, uint *counter)
{
- return m_roles_mapping_table;
+ DBUG_ENTER("Grant_tables::really_open:");
+#ifdef HAVE_REPLICATION
+ if (tables->lock_type >= TL_WRITE_ALLOW_WRITE &&
+ thd->slave_thread && !thd->spcont)
+ {
+ /*
+ GRANT and REVOKE are applied the slave in/exclusion rules as they are
+ some kind of updates to the mysql.% tables.
+ */
+ Rpl_filter *rpl_filter= thd->system_thread_info.rpl_sql_info->rpl_filter;
+ if (rpl_filter->is_on() && !rpl_filter->tables_ok(0, tables))
+ DBUG_RETURN(1);
+ }
+#endif
+ if (open_tables(thd, &tables, counter, MYSQL_LOCK_IGNORE_TIMEOUT))
+ DBUG_RETURN(-1);
+ DBUG_RETURN(0);
}
- private:
- User_table m_user_table;
+ User_table *p_user_table;
+ User_table_json m_user_table_json;
+ User_table_tabular m_user_table_tabular;
Db_table m_db_table;
Tables_priv_table m_tables_priv_table;
Columns_priv_table m_columns_priv_table;
@@ -1287,20 +1921,6 @@ class Grant_tables
Procs_priv_table m_procs_priv_table;
Proxies_priv_table m_proxies_priv_table;
Roles_mapping_table m_roles_mapping_table;
-
- /* The grant tables are set-up in a linked list. We keep the head of it. */
- Grant_table_base *first_table_in_list;
- /**
- Chain two grant tables' TABLE_LIST members.
- */
- static void link_tables(Grant_table_base *from, Grant_table_base *to)
- {
- DBUG_ASSERT(from);
- if (to)
- from->tl.next_local= from->tl.next_global= &to->tl;
- else
- from->tl.next_local= from->tl.next_global= NULL;
- }
};
@@ -1308,14 +1928,13 @@ void ACL_PROXY_USER::init(const Proxies_priv_table& proxies_priv_table,
MEM_ROOT *mem)
{
init(get_field(mem, proxies_priv_table.host()),
- get_field(mem, proxies_priv_table.user()),
+ safe_str(get_field(mem, proxies_priv_table.user())),
get_field(mem, proxies_priv_table.proxied_host()),
- get_field(mem, proxies_priv_table.proxied_user()),
+ safe_str(get_field(mem, proxies_priv_table.proxied_user())),
proxies_priv_table.with_grant()->val_int() != 0);
}
-
/*
Enumeration of various ACL's and Hashes used in handle_grant_struct()
*/
@@ -1335,13 +1954,10 @@ enum enum_acl_lists
ACL_ROLE::ACL_ROLE(ACL_USER *user, MEM_ROOT *root) : counter(0)
{
-
access= user->access;
/* set initial role access the same as the table row privileges */
initial_role_access= user->access;
- this->user.str= safe_strdup_root(root, user->user.str);
- this->user.length= user->user.length;
- bzero(&role_grants, sizeof(role_grants));
+ this->user= user->user;
bzero(&parent_grantee, sizeof(parent_grantee));
flags= IS_ROLE;
}
@@ -1352,7 +1968,6 @@ ACL_ROLE::ACL_ROLE(const char * rolename, ulong privileges, MEM_ROOT *root) :
this->access= initial_role_access;
this->user.str= safe_strdup_root(root, rolename);
this->user.length= strlen(rolename);
- bzero(&role_grants, sizeof(role_grants));
bzero(&parent_grantee, sizeof(parent_grantee));
flags= IS_ROLE;
}
@@ -1390,7 +2005,7 @@ static bool has_validation_plugins()
MariaDB_PASSWORD_VALIDATION_PLUGIN, NULL);
}
-struct validation_data { LEX_CSTRING *user, *password; };
+struct validation_data { const LEX_CSTRING *user, *password; };
static my_bool do_validate(THD *, plugin_ref plugin, void *arg)
{
@@ -1401,13 +2016,13 @@ static my_bool do_validate(THD *, plugin_ref plugin, void *arg)
}
-static bool validate_password(LEX_USER *user, THD *thd)
+static bool validate_password(THD *thd, const LEX_CSTRING &user,
+ const LEX_CSTRING &pwtext, bool has_hash)
{
- if (user->pwtext.length || !user->pwhash.length)
+ if (pwtext.length || !has_hash)
{
- struct validation_data data= { &user->user,
- user->pwtext.str ? &user->pwtext :
- const_cast<LEX_CSTRING *>(&empty_clex_str) };
+ struct validation_data data= { &user,
+ pwtext.str ? &pwtext : &empty_clex_str };
if (plugin_foreach(NULL, do_validate,
MariaDB_PASSWORD_VALIDATION_PLUGIN, &data))
{
@@ -1427,161 +2042,153 @@ static bool validate_password(LEX_USER *user, THD *thd)
return false;
}
-/**
- Convert scrambled password to binary form, according to scramble type,
- Binary form is stored in user.salt.
-
- @param acl_user The object where to store the salt
- @param password The password hash containing the salt
- @param password_len The length of the password hash
-
- Despite the name of the function it is used when loading ACLs from disk
- to store the password hash in the ACL_USER object.
-*/
-
-static void
-set_user_salt(ACL_USER *acl_user, const char *password, size_t password_len)
+static int set_user_salt(ACL_USER::AUTH *auth, plugin_ref plugin)
{
- if (password_len == SCRAMBLED_PASSWORD_CHAR_LENGTH)
- {
- get_salt_from_password(acl_user->salt, password);
- acl_user->salt_len= SCRAMBLE_LENGTH;
- }
- else if (password_len == SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ st_mysql_auth *info= (st_mysql_auth *) plugin_decl(plugin)->info;
+ if (info->interface_version >= 0x0202 && info->preprocess_hash &&
+ auth->auth_string.length)
{
- get_salt_from_password_323((ulong *) acl_user->salt, password);
- acl_user->salt_len= SCRAMBLE_LENGTH_323;
+ uchar buf[MAX_SCRAMBLE_LENGTH];
+ size_t len= sizeof(buf);
+ if (info->preprocess_hash(auth->auth_string.str,
+ auth->auth_string.length, buf, &len))
+ return 1;
+ auth->salt.str= (char*)memdup_root(&acl_memroot, buf, len);
+ auth->salt.length= len;
}
else
- acl_user->salt_len= 0;
-}
+ auth->salt= auth->auth_string;
-static const char *fix_plugin_ptr(const char *name)
-{
- if (my_strcasecmp(system_charset_info, name,
- native_password_plugin_name.str) == 0)
- return native_password_plugin_name.str;
- else
- if (my_strcasecmp(system_charset_info, name,
- old_password_plugin_name.str) == 0)
- return old_password_plugin_name.str;
- else
- return name;
+ return 0;
}
/**
- Fix ACL::plugin pointer to point to a hard-coded string, if appropriate
+ Fills in ACL_USER::auth_string and ACL_USER::salt fields, as needed
- Make sure that if ACL_USER's plugin is a built-in, then it points
- to a hard coded string, not to an allocated copy. Run-time, for
- authentication, we want to be able to detect built-ins by comparing
- pointers, not strings.
+ hashes the plain-text password (if provided) to auth_string,
+ converts auth_string to salt.
- Additionally - update the salt if the plugin is built-in.
-
- @retval 0 the pointers were fixed
- @retval 1 this ACL_USER uses a not built-in plugin
-*/
-static bool fix_user_plugin_ptr(ACL_USER *user)
-{
- if (lex_string_eq(&user->plugin, &native_password_plugin_name))
- user->plugin= native_password_plugin_name;
- else
- if (lex_string_eq(&user->plugin, &old_password_plugin_name))
- user->plugin= old_password_plugin_name;
- else
- return true;
-
- set_user_salt(user, user->auth_string.str, user->auth_string.length);
- return false;
-}
-
-
-/*
- Validates the password, calculates password hash, transforms
- equivalent LEX_USER representations.
-
- Upon entering this function:
-
- - if user->plugin is specified, user->auth is the plugin auth data.
- - if user->plugin is mysql_native_password or mysql_old_password,
- user->auth is the password hash, and LEX_USER is transformed
- to match the next case (that is, user->plugin is cleared).
- - if user->plugin is NOT specified, built-in auth is assumed, that is
- mysql_native_password or mysql_old_password. In that case,
- user->pwhash is the password hash. And user->pwtext is the original
- plain-text password. Either one can be set or both.
-
- Upon exiting this function:
-
- - user->pwtext is left untouched
- - user->pwhash is the password hash, as the mysql.user.password column
- - user->plugin is the plugin name, as the mysql.user.plugin column
- - user->auth is the plugin auth data, as the mysql.user.authentication_string column
+ Fails if the plain-text password fails validation, if the plugin is
+ not loaded, if the auth_string is invalid, if the password is not applicable
*/
-static bool fix_lex_user(THD *thd, LEX_USER *user)
+static int set_user_auth(THD *thd, const LEX_CSTRING &user,
+ ACL_USER::AUTH *auth, const LEX_CSTRING &pwtext)
{
- size_t check_length;
+ const char *plugin_name= auth->plugin.str;
+ bool unlock_plugin= false;
+ plugin_ref plugin= get_auth_plugin(thd, auth->plugin, &unlock_plugin);
+ int res= 1;
- DBUG_ASSERT(user->plugin.length || !user->auth.length);
- DBUG_ASSERT(!(user->plugin.length && (user->pwtext.length || user->pwhash.length)));
+ if (!plugin)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_PLUGIN_IS_NOT_LOADED,
+ ER_THD(thd, ER_PLUGIN_IS_NOT_LOADED), plugin_name);
+ return ER_PLUGIN_IS_NOT_LOADED;
+ }
- if (lex_string_eq(&user->plugin, &native_password_plugin_name))
- check_length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
- else
- if (lex_string_eq(&user->plugin, &old_password_plugin_name))
- check_length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
- else
- if (user->plugin.length)
- return false; // nothing else to do
- else if (thd->variables.old_passwords == 1 ||
- user->pwhash.length == SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
- check_length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
- else
- check_length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ auth->salt= auth->auth_string;
- if (user->plugin.length)
+ st_mysql_auth *info= (st_mysql_auth *) plugin_decl(plugin)->info;
+ if (info->interface_version < 0x0202)
{
- user->pwhash= user->auth;
- user->plugin= empty_clex_str;
- user->auth= empty_clex_str;
+ res= pwtext.length ? ER_SET_PASSWORD_AUTH_PLUGIN : 0;
+ goto end;
}
- if (user->pwhash.length && user->pwhash.length != check_length)
+ if (info->hash_password &&
+ validate_password(thd, user, pwtext, auth->auth_string.length))
{
- my_error(ER_PASSWD_LENGTH, MYF(0), (int) check_length);
- return true;
+ res= ER_NOT_VALID_PASSWORD;
+ goto end;
}
-
- if (user->pwtext.length && !user->pwhash.length)
+ if (pwtext.length)
{
- size_t scramble_length;
- void (*make_scramble)(char *, const char *, size_t);
-
- if (thd->variables.old_passwords == 1)
+ if (info->hash_password)
{
- scramble_length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
- make_scramble= my_make_scrambled_password_323;
+ char buf[MAX_SCRAMBLE_LENGTH];
+ size_t len= sizeof(buf) - 1;
+ if (info->hash_password(pwtext.str, pwtext.length, buf, &len))
+ {
+ res= ER_OUTOFMEMORY;
+ goto end;
+ }
+ buf[len] = 0;
+ auth->auth_string.str= (char*)memdup_root(&acl_memroot, buf, len+1);
+ auth->auth_string.length= len;
}
else
{
- scramble_length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
- make_scramble= my_make_scrambled_password;
+ res= ER_SET_PASSWORD_AUTH_PLUGIN;
+ goto end;
}
+ }
+ if (set_user_salt(auth, plugin))
+ {
+ res= ER_PASSWD_LENGTH;
+ goto end;
+ }
- Query_arena *arena, backup;
- arena= thd->activate_stmt_arena_if_needed(&backup);
- char *buff= (char *) thd->alloc(scramble_length + 1);
- if (arena)
- thd->restore_active_arena(arena, &backup);
+ res= 0;
+end:
+ if (unlock_plugin)
+ plugin_unlock(thd, plugin);
+ return res;
+}
- if (buff == NULL)
- return true;
- make_scramble(buff, user->pwtext.str, user->pwtext.length);
- user->pwhash.str= buff;
- user->pwhash.length= scramble_length;
+
+/**
+ Lazily computes user's salt from the password hash
+*/
+static bool set_user_salt_if_needed(ACL_USER *user_copy, int curr_auth,
+ plugin_ref plugin)
+{
+ ACL_USER::AUTH *auth_copy= user_copy->auth + curr_auth;
+ DBUG_ASSERT(!strcasecmp(auth_copy->plugin.str, plugin_name(plugin)->str));
+
+ if (auth_copy->salt.str)
+ return 0; // already done
+
+ if (set_user_salt(auth_copy, plugin))
+ return 1;
+
+ mysql_mutex_lock(&acl_cache->lock);
+ ACL_USER *user= find_user_exact(user_copy->host.hostname, user_copy->user.str);
+ // make sure the user wasn't altered or dropped meanwhile
+ if (user)
+ {
+ ACL_USER::AUTH *auth= user->auth + curr_auth;
+ if (!auth->salt.str && auth->plugin.length == auth_copy->plugin.length &&
+ auth->auth_string.length == auth_copy->auth_string.length &&
+ !memcmp(auth->plugin.str, auth_copy->plugin.str, auth->plugin.length) &&
+ !memcmp(auth->auth_string.str, auth_copy->auth_string.str, auth->auth_string.length))
+ auth->salt= auth_copy->salt;
}
+ mysql_mutex_unlock(&acl_cache->lock);
+ return 0;
+}
+
+/**
+ Fix ACL::plugin pointer to point to a hard-coded string, if appropriate
+
+ Make sure that if ACL_USER's plugin is a built-in, then it points
+ to a hard coded string, not to an allocated copy. Run-time, for
+ authentication, we want to be able to detect built-ins by comparing
+ pointers, not strings.
+
+ @retval 0 the pointers were fixed
+ @retval 1 this ACL_USER uses a not built-in plugin
+*/
+static bool fix_user_plugin_ptr(ACL_USER::AUTH *auth)
+{
+ if (lex_string_eq(&auth->plugin, &native_password_plugin_name))
+ auth->plugin= native_password_plugin_name;
+ else
+ if (lex_string_eq(&auth->plugin, &old_password_plugin_name))
+ auth->plugin= old_password_plugin_name;
+ else
+ return true;
return false;
}
@@ -1658,27 +2265,12 @@ bool acl_init(bool dont_read_acl_tables)
DBUG_RETURN(return_val);
}
-/**
- Choose from either native or old password plugins when assigning a password
-*/
-
-static bool set_user_plugin (ACL_USER *user, size_t password_len)
+static void push_new_user(const ACL_USER &user)
{
- switch (password_len)
- {
- case 0: /* no password */
- case SCRAMBLED_PASSWORD_CHAR_LENGTH:
- user->plugin= native_password_plugin_name;
- return FALSE;
- case SCRAMBLED_PASSWORD_CHAR_LENGTH_323:
- user->plugin= old_password_plugin_name;
- return FALSE;
- default:
- sql_print_warning("Found invalid password for user: '%s@%s'; "
- "Ignoring user", safe_str(user->user.str),
- safe_str(user->host.hostname));
- return TRUE;
- }
+ push_dynamic(&acl_users, &user);
+ if (!user.host.hostname ||
+ (user.host.hostname[0] == wild_many && !user.host.hostname[1]))
+ allow_all_hosts=1; // Anyone can connect
}
@@ -1703,7 +2295,6 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
READ_RECORD read_record_info;
bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE;
char tmp_name[SAFE_NAME_LEN+1];
- int password_length;
Sql_mode_save old_mode_save(thd);
DBUG_ENTER("acl_load");
@@ -1715,7 +2306,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
init_sql_alloc(&acl_memroot, "ACL", ACL_ALLOC_BLOCK_SIZE, 0, MYF(0));
if (host_table.table_exists()) // "host" table may not exist (e.g. in MySQL 5.6.7+)
{
- if (host_table.init_read_record(&read_record_info, thd))
+ if (host_table.init_read_record(&read_record_info))
DBUG_RETURN(true);
while (!(read_record_info.read_record()))
{
@@ -1769,285 +2360,100 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
freeze_size(&acl_hosts);
const User_table& user_table= tables.user_table();
- if (user_table.init_read_record(&read_record_info, thd))
+ if (user_table.init_read_record(&read_record_info))
DBUG_RETURN(true);
- username_char_length= MY_MIN(user_table.user()->char_length(),
- USERNAME_CHAR_LENGTH);
- if (user_table.password()) // Password column might be missing. (MySQL 5.7.6+)
- {
- password_length= user_table.password()->field_length /
- user_table.password()->charset()->mbmaxlen;
- if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
- {
- sql_print_error("Fatal error: mysql.user table is damaged or in "
- "unsupported 3.20 format.");
- DBUG_RETURN(TRUE);
- }
-
- DBUG_PRINT("info",("user table fields: %d, password length: %d",
- user_table.num_fields(), password_length));
-
- mysql_mutex_lock(&LOCK_global_system_variables);
- if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH)
- {
- if (opt_secure_auth)
- {
- mysql_mutex_unlock(&LOCK_global_system_variables);
- sql_print_error("Fatal error: mysql.user table is in old format, "
- "but server started with --secure-auth option.");
- DBUG_RETURN(TRUE);
- }
- mysql_user_table_is_in_short_password_format= true;
- if (global_system_variables.old_passwords)
- mysql_mutex_unlock(&LOCK_global_system_variables);
- else
- {
- extern sys_var *Sys_old_passwords_ptr;
- Sys_old_passwords_ptr->value_origin= sys_var::AUTO;
- global_system_variables.old_passwords= 1;
- mysql_mutex_unlock(&LOCK_global_system_variables);
- sql_print_warning("mysql.user table is not updated to new password format; "
- "Disabling new password usage until "
- "mysql_fix_privilege_tables is run");
- }
- thd->variables.old_passwords= 1;
- }
- else
- {
- mysql_user_table_is_in_short_password_format= false;
- mysql_mutex_unlock(&LOCK_global_system_variables);
- }
- }
-
allow_all_hosts=0;
while (!(read_record_info.read_record()))
{
ACL_USER user;
bool is_role= FALSE;
- bzero(&user, sizeof(user));
- update_hostname(&user.host, get_field(&acl_memroot, user_table.host()));
- char *username= get_field(&acl_memroot, user_table.user());
+ update_hostname(&user.host, user_table.get_host(&acl_memroot));
+ char *username= safe_str(user_table.get_user(&acl_memroot));
user.user.str= username;
- user.user.length= safe_strlen(username);
-
- /*
- If the user entry is a role, skip password and hostname checks
- A user can not log in with a role so some checks are not necessary
- */
- is_role= user_table.check_is_role();
-
- if (is_role && is_invalid_role_name(username))
- {
- thd->clear_error(); // the warning is still issued
- continue;
- }
-
- if (!is_role && check_no_resolve &&
- hostname_requires_resolving(user.host.hostname))
- {
- sql_print_warning("'user' entry '%s@%s' "
- "ignored in --skip-name-resolve mode.",
- safe_str(user.user.str),
- safe_str(user.host.hostname));
- continue;
- }
+ user.user.length= strlen(username);
- char *password= const_cast<char*>("");
- if (user_table.password())
- password= get_field(&acl_memroot, user_table.password());
- size_t password_len= safe_strlen(password);
- user.auth_string.str= safe_str(password);
- user.auth_string.length= password_len;
- set_user_salt(&user, password, password_len);
+ is_role= user_table.get_is_role();
- if (!is_role && set_user_plugin(&user, password_len))
- continue;
+ user.access= user_table.get_access();
- {
- user.access= user_table.get_access() & GLOBAL_ACLS;
- /*
- if it is pre 5.0.1 privilege table then map CREATE privilege on
- CREATE VIEW & SHOW VIEW privileges
- */
- if (user_table.num_fields() <= 31 && (user.access & CREATE_ACL))
- user.access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL);
+ user.sort= get_sort(2, user.host.hostname, user.user.str);
+ user.hostname_length= safe_strlen(user.host.hostname);
- /*
- if it is pre 5.0.2 privilege table then map CREATE/ALTER privilege on
- CREATE PROCEDURE & ALTER PROCEDURE privileges
- */
- if (user_table.num_fields() <= 33 && (user.access & CREATE_ACL))
- user.access|= CREATE_PROC_ACL;
- if (user_table.num_fields() <= 33 && (user.access & ALTER_ACL))
- user.access|= ALTER_PROC_ACL;
+ my_init_dynamic_array(&user.role_grants, sizeof(ACL_ROLE *), 0, 8, MYF(0));
- /*
- pre 5.0.3 did not have CREATE_USER_ACL
- */
- if (user_table.num_fields() <= 36 && (user.access & GRANT_ACL))
- user.access|= CREATE_USER_ACL;
+ user.account_locked= user_table.get_account_locked();
+ user.password_expired= user_table.get_password_expired();
+ user.password_last_changed= user_table.get_password_last_changed();
+ user.password_lifetime= user_table.get_password_lifetime();
- /*
- if it is pre 5.1.6 privilege table then map CREATE privilege on
- CREATE|ALTER|DROP|EXECUTE EVENT
- */
- if (user_table.num_fields() <= 37 && (user.access & SUPER_ACL))
- user.access|= EVENT_ACL;
-
- /*
- if it is pre 5.1.6 privilege then map TRIGGER privilege on CREATE.
- */
- if (user_table.num_fields() <= 38 && (user.access & SUPER_ACL))
- user.access|= TRIGGER_ACL;
-
- if (user_table.num_fields() <= 46 && (user.access & DELETE_ACL))
- user.access|= DELETE_HISTORY_ACL;
-
- user.sort= get_sort(2, user.host.hostname, user.user.str);
- user.hostname_length= safe_strlen(user.host.hostname);
- user.user_resource.user_conn= 0;
- user.user_resource.max_statement_time= 0.0;
-
- /* Starting from 4.0.2 we have more fields */
- if (user_table.ssl_type())
- {
- char *ssl_type=get_field(thd->mem_root, user_table.ssl_type());
- if (!ssl_type)
- user.ssl_type=SSL_TYPE_NONE;
- else if (!strcmp(ssl_type, "ANY"))
- user.ssl_type=SSL_TYPE_ANY;
- else if (!strcmp(ssl_type, "X509"))
- user.ssl_type=SSL_TYPE_X509;
- else /* !strcmp(ssl_type, "SPECIFIED") */
- user.ssl_type=SSL_TYPE_SPECIFIED;
-
- user.ssl_cipher= get_field(&acl_memroot, user_table.ssl_cipher());
- user.x509_issuer= get_field(&acl_memroot, user_table.x509_issuer());
- user.x509_subject= get_field(&acl_memroot, user_table.x509_subject());
-
- char *ptr = get_field(thd->mem_root, user_table.max_questions());
- user.user_resource.questions=ptr ? atoi(ptr) : 0;
- ptr = get_field(thd->mem_root, user_table.max_updates());
- user.user_resource.updates=ptr ? atoi(ptr) : 0;
- ptr = get_field(thd->mem_root, user_table.max_connections());
- user.user_resource.conn_per_hour= ptr ? atoi(ptr) : 0;
- if (user.user_resource.questions || user.user_resource.updates ||
- user.user_resource.conn_per_hour)
- mqh_used=1;
-
- if (user_table.max_user_connections())
- {
- /* Starting from 5.0.3 we have max_user_connections field */
- ptr= get_field(thd->mem_root, user_table.max_user_connections());
- user.user_resource.user_conn= ptr ? atoi(ptr) : 0;
- }
-
- if (!is_role && user_table.plugin())
- {
- /* We may have plugin & auth_String fields */
- char *tmpstr= get_field(&acl_memroot, user_table.plugin());
- if (tmpstr)
- {
- user.plugin.str= tmpstr;
- user.plugin.length= strlen(user.plugin.str);
- user.auth_string.str=
- safe_str(get_field(&acl_memroot,
- user_table.authentication_string()));
- user.auth_string.length= strlen(user.auth_string.str);
-
- if (user.auth_string.length && password_len &&
- (user.auth_string.length != password_len ||
- memcmp(user.auth_string.str, password, password_len)))
- {
- sql_print_warning("'user' entry '%s@%s' has both a password "
- "and an authentication plugin specified. The "
- "password will be ignored.",
- safe_str(user.user.str),
- safe_str(user.host.hostname));
- }
- else if (password_len)
- {
- user.auth_string.str= password;
- user.auth_string.length= password_len;
- }
-
- fix_user_plugin_ptr(&user);
- }
- }
-
- if (user_table.max_statement_time())
- {
- /* Starting from 10.1.1 we can have max_statement_time */
- ptr= get_field(thd->mem_root,
- user_table.max_statement_time());
- user.user_resource.max_statement_time= ptr ? atof(ptr) : 0.0;
- }
- }
- else
+ if (is_role)
+ {
+ if (is_invalid_role_name(username))
{
- user.ssl_type=SSL_TYPE_NONE;
-#ifndef TO_BE_REMOVED
- if (user_table.num_fields() <= 13)
- { // Without grant
- if (user.access & CREATE_ACL)
- user.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL;
- }
- /* Convert old privileges */
- user.access|= LOCK_TABLES_ACL | CREATE_TMP_ACL | SHOW_DB_ACL;
- if (user.access & FILE_ACL)
- user.access|= REPL_CLIENT_ACL | REPL_SLAVE_ACL;
- if (user.access & PROCESS_ACL)
- user.access|= SUPER_ACL | EXECUTE_ACL;
-#endif
+ thd->clear_error(); // the warning is still issued
+ continue;
}
- (void) my_init_dynamic_array(&user.role_grants,sizeof(ACL_ROLE *),
- 8, 8, MYF(0));
+ ACL_ROLE *entry= new (&acl_memroot) ACL_ROLE(&user, &acl_memroot);
+ entry->role_grants = user.role_grants;
+ my_init_dynamic_array(&entry->parent_grantee,
+ sizeof(ACL_USER_BASE *), 0, 8, MYF(0));
+ my_hash_insert(&acl_roles, (uchar *)entry);
- /* check default role, if any */
- if (!is_role && user_table.default_role())
+ continue;
+ }
+ else
+ {
+ if (check_no_resolve && hostname_requires_resolving(user.host.hostname))
{
- user.default_rolename.str=
- get_field(&acl_memroot, user_table.default_role());
- user.default_rolename.length= safe_strlen(user.default_rolename.str);
+ sql_print_warning("'user' entry '%s@%s' "
+ "ignored in --skip-name-resolve mode.", user.user.str,
+ safe_str(user.host.hostname));
+ continue;
}
- if (is_role)
- {
- DBUG_PRINT("info", ("Found role %s", user.user.str));
- ACL_ROLE *entry= new (&acl_memroot) ACL_ROLE(&user, &acl_memroot);
- entry->role_grants = user.role_grants;
- (void) my_init_dynamic_array(&entry->parent_grantee,
- sizeof(ACL_USER_BASE *), 8, 8, MYF(0));
- my_hash_insert(&acl_roles, (uchar *)entry);
-
+ if (user_table.get_auth(thd, &acl_memroot, &user))
continue;
- }
- else
+ for (uint i= 0; i < user.nauth; i++)
{
- DBUG_PRINT("info", ("Found user %s", user.user.str));
- (void) push_dynamic(&acl_users,(uchar*) &user);
+ ACL_USER::AUTH *auth= user.auth + i;
+ auth->salt= null_clex_str;
+ fix_user_plugin_ptr(auth);
}
- if (!user.host.hostname ||
- (user.host.hostname[0] == wild_many && !user.host.hostname[1]))
- allow_all_hosts=1; // Anyone can connect
+
+ user.ssl_type= user_table.get_ssl_type();
+ user.ssl_cipher= user_table.get_ssl_cipher(&acl_memroot);
+ user.x509_issuer= safe_str(user_table.get_x509_issuer(&acl_memroot));
+ user.x509_subject= safe_str(user_table.get_x509_subject(&acl_memroot));
+ user.user_resource.questions= (uint)user_table.get_max_questions();
+ user.user_resource.updates= (uint)user_table.get_max_updates();
+ user.user_resource.conn_per_hour= (uint)user_table.get_max_connections();
+ if (user.user_resource.questions || user.user_resource.updates ||
+ user.user_resource.conn_per_hour)
+ mqh_used=1;
+
+ user.user_resource.user_conn= (int)user_table.get_max_user_connections();
+ user.user_resource.max_statement_time= user_table.get_max_statement_time();
+
+ user.default_rolename.str= user_table.get_default_role(&acl_memroot);
+ user.default_rolename.length= safe_strlen(user.default_rolename.str);
}
+ push_new_user(user);
}
- my_qsort((uchar*) dynamic_element(&acl_users,0,ACL_USER*),acl_users.elements,
- sizeof(ACL_USER),(qsort_cmp) acl_compare);
+ rebuild_acl_users();
end_read_record(&read_record_info);
freeze_size(&acl_users);
const Db_table& db_table= tables.db_table();
- if (db_table.init_read_record(&read_record_info, thd))
+ if (db_table.init_read_record(&read_record_info))
DBUG_RETURN(TRUE);
while (!(read_record_info.read_record()))
{
ACL_DB db;
char *db_name;
- db.user=get_field(&acl_memroot, db_table.user());
+ db.user=safe_str(get_field(&acl_memroot, db_table.user()));
const char *hostname= get_field(&acl_memroot, db_table.host());
if (!hostname && find_acl_role(db.user))
hostname= "";
@@ -2062,7 +2468,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
{
sql_print_warning("'db' entry '%s %s@%s' "
"ignored in --skip-name-resolve mode.",
- db.db, safe_str(db.user), safe_str(db.host.hostname));
+ db.db, db.user, safe_str(db.host.hostname));
continue;
}
db.access= db_table.get_access();
@@ -2087,7 +2493,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
"case that has been forced to lowercase because "
"lower_case_table_names is set. It will not be "
"possible to remove this privilege using REVOKE.",
- db.db, safe_str(db.user), safe_str(db.host.hostname));
+ db.db, db.user, safe_str(db.host.hostname));
}
}
db.sort=get_sort(3,db.host.hostname,db.db,db.user);
@@ -2101,13 +2507,13 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
acl_dbs.push(db);
}
end_read_record(&read_record_info);
- acl_dbs.sort((acl_dbs_cmp)acl_compare);
+ rebuild_acl_dbs();
acl_dbs.freeze();
const Proxies_priv_table& proxies_priv_table= tables.proxies_priv_table();
if (proxies_priv_table.table_exists())
{
- if (proxies_priv_table.init_read_record(&read_record_info, thd))
+ if (proxies_priv_table.init_read_record(&read_record_info))
DBUG_RETURN(TRUE);
while (!(read_record_info.read_record()))
{
@@ -2133,7 +2539,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
const Roles_mapping_table& roles_mapping_table= tables.roles_mapping_table();
if (roles_mapping_table.table_exists())
{
- if (roles_mapping_table.init_read_record(&read_record_info, thd))
+ if (roles_mapping_table.init_read_record(&read_record_info))
DBUG_RETURN(TRUE);
MEM_ROOT temp_root;
@@ -2170,6 +2576,7 @@ static bool acl_load(THD *thd, const Grant_tables& tables)
init_check_host();
+ thd->bootstrap= !initialized; // keep FLUSH PRIVILEGES connection special
initialized=1;
DBUG_RETURN(FALSE);
}
@@ -2226,13 +2633,14 @@ bool acl_reload(THD *thd)
int result;
DBUG_ENTER("acl_reload");
- Grant_tables tables(Table_host | Table_user | Table_db | Table_proxies_priv |
- Table_roles_mapping, TL_READ);
+ Grant_tables tables;
/*
To avoid deadlocks we should obtain table locks before
obtaining acl_cache->lock mutex.
*/
- if (unlikely((result= tables.open_and_lock(thd))))
+ const uint tables_to_open= Table_host | Table_user | Table_db |
+ Table_proxies_priv | Table_roles_mapping;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_READ)))
{
DBUG_ASSERT(result <= 0);
/*
@@ -2379,7 +2787,7 @@ static ulong get_sort(uint count,...)
}
-static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b)
+static int acl_compare(const ACL_ACCESS *a, const ACL_ACCESS *b)
{
if (a->sort > b->sort)
return -1;
@@ -2388,6 +2796,154 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b)
return 0;
}
+static int acl_user_compare(const ACL_USER *a, const ACL_USER *b)
+{
+ int res= strcmp(a->user.str, b->user.str);
+ if (res)
+ return res;
+
+ res= acl_compare(a, b);
+ if (res)
+ return res;
+
+ /*
+ For more deterministic results, resolve ambiguity between
+ "localhost" and "127.0.0.1"/"::1" by sorting "localhost" before
+ loopback addresses.
+ Test suite (on Windows) expects "root@localhost", even if
+ root@::1 would also match.
+ */
+ return -strcmp(a->host.hostname, b->host.hostname);
+}
+
+static int acl_db_compare(const ACL_DB *a, const ACL_DB *b)
+{
+ int res= strcmp(a->user, b->user);
+ if (res)
+ return res;
+
+ return acl_compare(a, b);
+}
+
+static void rebuild_acl_users()
+{
+ my_qsort((uchar*)dynamic_element(&acl_users, 0, ACL_USER*), acl_users.elements,
+ sizeof(ACL_USER), (qsort_cmp)acl_user_compare);
+}
+
+static void rebuild_acl_dbs()
+{
+ acl_dbs.sort(acl_db_compare);
+}
+
+
+/*
+ Return index of the first entry with given user in the array,
+ or SIZE_T_MAX if not found.
+
+ Assumes the array is sorted by get_username
+*/
+template<typename T> size_t find_first_user(T* arr, size_t len, const char *user)
+{
+ size_t low= 0;
+ size_t high= len;
+ size_t mid;
+
+ bool found= false;
+ if(!len)
+ return SIZE_T_MAX;
+
+#ifndef DBUG_OFF
+ for (uint i = 0; i < len - 1; i++)
+ DBUG_ASSERT(strcmp(arr[i].get_username(), arr[i + 1].get_username()) <= 0);
+#endif
+ while (low < high)
+ {
+ mid= low + (high - low) / 2;
+ int cmp= strcmp(arr[mid].get_username(),user);
+ if (cmp == 0)
+ found= true;
+
+ if (cmp >= 0 )
+ high= mid;
+ else
+ low= mid + 1;
+ }
+ return (!found || low == len || strcmp(arr[low].get_username(), user)!=0 )?SIZE_T_MAX:low;
+}
+
+static size_t acl_find_user_by_name(const char *user)
+{
+ return find_first_user<ACL_USER>((ACL_USER *)acl_users.buffer,acl_users.elements,user);
+}
+
+static size_t acl_find_db_by_username(const char *user)
+{
+ return find_first_user<ACL_DB>(acl_dbs.front(), acl_dbs.elements(), user);
+}
+
+static bool match_db(ACL_DB *acl_db, const char *db, my_bool db_is_pattern)
+{
+ return !acl_db->db || (db && !wild_compare(db, acl_db->db, db_is_pattern));
+}
+
+
+/*
+ Lookup in the acl_users or acl_dbs for the best matching entry corresponding to
+ given user, host and ip parameters (also db, in case of ACL_DB)
+
+ Historical note:
+
+ In the past, both arrays were sorted just by ACL_ENTRY::sort field and were
+ searched linearly, until the first match of (username,host) pair was found.
+
+ This function uses optimizations (binary search by username), yet preserves the
+ historical behavior, i.e the returns a match with highest ACL_ENTRY::sort.
+*/
+template <typename T> T* find_by_username_or_anon(T* arr, size_t len, const char *user,
+ const char *host, const char *ip,
+ const char *db, my_bool db_is_pattern, bool (*match_db_func)(T*,const char *,my_bool))
+{
+ size_t i;
+ T *ret = NULL;
+
+ // Check entries matching user name.
+ size_t start = find_first_user(arr, len, user);
+ for (i= start; i < len; i++)
+ {
+ T *entry= &arr[i];
+ if (i > start && strcmp(user, entry->get_username()))
+ break;
+
+ if (compare_hostname(&entry->host, host, ip) && (!match_db_func || match_db_func(entry, db, db_is_pattern)))
+ {
+ ret= entry;
+ break;
+ }
+ }
+
+ // Look also for anonymous user (username is empty string)
+ // Due to sort by name, entries for anonymous user start at the start of array.
+ for (i= 0; i < len; i++)
+ {
+ T *entry = &arr[i];
+ if (*entry->get_username() || (ret && acl_compare(entry, ret) >= 0))
+ break;
+ if (compare_hostname(&entry->host, host, ip) && (!match_db_func || match_db_func(entry, db, db_is_pattern)))
+ {
+ ret= entry;
+ break;
+ }
+ }
+ return ret;
+}
+
+static ACL_DB *acl_db_find(const char *db, const char *user, const char *host, const char *ip, my_bool db_is_pattern)
+{
+ return find_by_username_or_anon(acl_dbs.front(), acl_dbs.elements(),
+ user, host, ip, db, db_is_pattern, match_db);
+}
+
/*
Gets user credentials without authentication and resource limit checks.
@@ -2409,13 +2965,13 @@ bool acl_getroot(Security_context *sctx, const char *user, const char *host,
const char *ip, const char *db)
{
int res= 1;
- uint i;
ACL_USER *acl_user= 0;
DBUG_ENTER("acl_getroot");
DBUG_PRINT("enter", ("Host: '%s', Ip: '%s', User: '%s', db: '%s'",
host, ip, user, db));
- sctx->user= user;
+ sctx->init();
+ sctx->user= *user ? user : NULL;
sctx->host= host;
sctx->ip= ip;
sctx->host_or_ip= host ? host : (safe_str(ip));
@@ -2431,9 +2987,7 @@ bool acl_getroot(Security_context *sctx, const char *user, const char *host,
mysql_mutex_lock(&acl_cache->lock);
- sctx->master_access= 0;
sctx->db_access= 0;
- *sctx->priv_user= *sctx->priv_host= *sctx->priv_role= 0;
if (host[0]) // User, not Role
{
@@ -2442,26 +2996,12 @@ bool acl_getroot(Security_context *sctx, const char *user, const char *host,
if (acl_user)
{
res= 0;
- for (i=0 ; i < acl_dbs.elements() ; i++)
- {
- ACL_DB *acl_db= &acl_dbs.at(i);
- if (!acl_db->user ||
- (user && user[0] && !strcmp(user, acl_db->user)))
- {
- if (compare_hostname(&acl_db->host, host, ip))
- {
- if (!acl_db->db || (db && !wild_compare(db, acl_db->db, 0)))
- {
- sctx->db_access= acl_db->access;
- break;
- }
- }
- }
- }
+ if (ACL_DB *acl_db= acl_db_find(db, user, host, ip, FALSE))
+ sctx->db_access= acl_db->access;
+
sctx->master_access= acl_user->access;
- if (acl_user->user.str)
- strmake_buf(sctx->priv_user, user);
+ strmake_buf(sctx->priv_user, user);
if (acl_user->host.hostname)
strmake_buf(sctx->priv_host, acl_user->host.hostname);
@@ -2473,26 +3013,12 @@ bool acl_getroot(Security_context *sctx, const char *user, const char *host,
if (acl_role)
{
res= 0;
- for (i=0 ; i < acl_dbs.elements() ; i++)
- {
- ACL_DB *acl_db= &acl_dbs.at(i);
- if (!acl_db->user ||
- (user && user[0] && !strcmp(user, acl_db->user)))
- {
- if (compare_hostname(&acl_db->host, "", ""))
- {
- if (!acl_db->db || (db && !wild_compare(db, acl_db->db, 0)))
- {
- sctx->db_access= acl_db->access;
- break;
- }
- }
- }
- }
+ if (ACL_DB *acl_db= acl_db_find(db, user, "", "", FALSE))
+ sctx->db_access = acl_db->access;
+
sctx->master_access= acl_role->access;
- if (acl_role->user.str)
- strmake_buf(sctx->priv_role, user);
+ strmake_buf(sctx->priv_role, user);
}
}
@@ -2615,148 +3141,106 @@ static void acl_update_role(const char *rolename, ulong privileges)
}
-static void acl_update_user(const char *user, const char *host,
- const char *password, size_t password_len,
- enum SSL_type ssl_type,
- const char *ssl_cipher,
- const char *x509_issuer,
- const char *x509_subject,
- USER_RESOURCES *mqh,
- ulong privileges,
- const LEX_CSTRING *plugin,
- const LEX_CSTRING *auth)
+ACL_USER::ACL_USER(THD *thd, const LEX_USER &combo,
+ const Account_options &options,
+ const ulong privileges)
{
- mysql_mutex_assert_owner(&acl_cache->lock);
+ user= safe_lexcstrdup_root(&acl_memroot, combo.user);
+ update_hostname(&host, safe_strdup_root(&acl_memroot, combo.host.str));
+ hostname_length= combo.host.length;
+ sort= get_sort(2, host.hostname, user.str);
+ password_last_changed= thd->query_start();
+ password_lifetime= -1;
+ my_init_dynamic_array(&role_grants, sizeof(ACL_USER *), 0, 8, MYF(0));
+}
+
- for (uint i=0 ; i < acl_users.elements ; i++)
+static int acl_user_update(THD *thd, ACL_USER *acl_user, uint nauth,
+ const LEX_USER &combo,
+ const Account_options &options,
+ const ulong privileges)
+{
+ if (nauth)
{
- ACL_USER *acl_user=dynamic_element(&acl_users,i,ACL_USER*);
- if (acl_user->eq(user, host))
+ if (acl_user->nauth >= nauth)
+ acl_user->nauth= nauth;
+ else
+ acl_user->alloc_auth(&acl_memroot, nauth);
+
+ USER_AUTH *auth= combo.auth;
+ for (uint i= 0; i < nauth; i++, auth= auth->next)
{
- if (plugin->str[0])
- {
- acl_user->plugin= *plugin;
- acl_user->auth_string.str= auth->str ?
- strmake_root(&acl_memroot, auth->str, auth->length) : const_cast<char*>("");
- acl_user->auth_string.length= auth->length;
- if (fix_user_plugin_ptr(acl_user))
- acl_user->plugin.str= strmake_root(&acl_memroot, plugin->str, plugin->length);
- }
- else
- if (password[0])
- {
- acl_user->auth_string.str= strmake_root(&acl_memroot, password, password_len);
- acl_user->auth_string.length= password_len;
- set_user_salt(acl_user, password, password_len);
- set_user_plugin(acl_user, password_len);
- }
- acl_user->access=privileges;
- if (mqh->specified_limits & USER_RESOURCES::QUERIES_PER_HOUR)
- acl_user->user_resource.questions=mqh->questions;
- if (mqh->specified_limits & USER_RESOURCES::UPDATES_PER_HOUR)
- acl_user->user_resource.updates=mqh->updates;
- if (mqh->specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR)
- acl_user->user_resource.conn_per_hour= mqh->conn_per_hour;
- if (mqh->specified_limits & USER_RESOURCES::USER_CONNECTIONS)
- acl_user->user_resource.user_conn= mqh->user_conn;
- if (mqh->specified_limits & USER_RESOURCES::MAX_STATEMENT_TIME)
- acl_user->user_resource.max_statement_time= mqh->max_statement_time;
- if (ssl_type != SSL_TYPE_NOT_SPECIFIED)
- {
- acl_user->ssl_type= ssl_type;
- acl_user->ssl_cipher= (ssl_cipher ? strdup_root(&acl_memroot,ssl_cipher) :
- 0);
- acl_user->x509_issuer= (x509_issuer ? strdup_root(&acl_memroot,x509_issuer) :
- 0);
- acl_user->x509_subject= (x509_subject ?
- strdup_root(&acl_memroot,x509_subject) : 0);
- }
- /* search complete: */
- break;
+ acl_user->auth[i].plugin= auth->plugin;
+ acl_user->auth[i].auth_string= safe_lexcstrdup_root(&acl_memroot, auth->auth_str);
+ if (fix_user_plugin_ptr(acl_user->auth + i))
+ acl_user->auth[i].plugin= safe_lexcstrdup_root(&acl_memroot, auth->plugin);
+ if (set_user_auth(thd, acl_user->user, acl_user->auth + i, auth->pwtext))
+ return 1;
}
}
-}
+ acl_user->access= privileges;
+ if (options.specified_limits & USER_RESOURCES::QUERIES_PER_HOUR)
+ acl_user->user_resource.questions= options.questions;
+ if (options.specified_limits & USER_RESOURCES::UPDATES_PER_HOUR)
+ acl_user->user_resource.updates= options.updates;
+ if (options.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR)
+ acl_user->user_resource.conn_per_hour= options.conn_per_hour;
+ if (options.specified_limits & USER_RESOURCES::USER_CONNECTIONS)
+ acl_user->user_resource.user_conn= options.user_conn;
+ if (options.specified_limits & USER_RESOURCES::MAX_STATEMENT_TIME)
+ acl_user->user_resource.max_statement_time= options.max_statement_time;
+ if (options.ssl_type != SSL_TYPE_NOT_SPECIFIED)
+ {
+ acl_user->ssl_type= options.ssl_type;
+ acl_user->ssl_cipher= safe_strdup_root(&acl_memroot, options.ssl_cipher.str);
+ acl_user->x509_issuer= safe_strdup_root(&acl_memroot,
+ safe_str(options.x509_issuer.str));
+ acl_user->x509_subject= safe_strdup_root(&acl_memroot,
+ safe_str(options.x509_subject.str));
+ }
+ if (options.account_locked != ACCOUNTLOCK_UNSPECIFIED)
+ acl_user->account_locked= options.account_locked == ACCOUNTLOCK_LOCKED;
-static void acl_insert_role(const char *rolename, ulong privileges)
-{
- ACL_ROLE *entry;
+ /* Unexpire the user password */
+ if (nauth)
+ {
+ acl_user->password_expired= false;
+ acl_user->password_last_changed= thd->query_start();;
+ }
- mysql_mutex_assert_owner(&acl_cache->lock);
- entry= new (&acl_memroot) ACL_ROLE(rolename, privileges, &acl_memroot);
- (void) my_init_dynamic_array(&entry->parent_grantee,
- sizeof(ACL_USER_BASE *), 8, 8, MYF(0));
- (void) my_init_dynamic_array(&entry->role_grants,sizeof(ACL_ROLE *),
- 8, 8, MYF(0));
+ switch (options.password_expire) {
+ case PASSWORD_EXPIRE_UNSPECIFIED:
+ break;
+ case PASSWORD_EXPIRE_NOW:
+ acl_user->password_expired= true;
+ break;
+ case PASSWORD_EXPIRE_NEVER:
+ acl_user->password_lifetime= 0;
+ break;
+ case PASSWORD_EXPIRE_DEFAULT:
+ acl_user->password_lifetime= -1;
+ break;
+ case PASSWORD_EXPIRE_INTERVAL:
+ acl_user->password_lifetime= options.num_expiration_days;
+ break;
+ }
- my_hash_insert(&acl_roles, (uchar *)entry);
+ return 0;
}
-static void acl_insert_user(const char *user, const char *host,
- const char *password, size_t password_len,
- enum SSL_type ssl_type,
- const char *ssl_cipher,
- const char *x509_issuer,
- const char *x509_subject,
- USER_RESOURCES *mqh,
- ulong privileges,
- const LEX_CSTRING *plugin,
- const LEX_CSTRING *auth)
+static void acl_insert_role(const char *rolename, ulong privileges)
{
- ACL_USER acl_user;
+ ACL_ROLE *entry;
mysql_mutex_assert_owner(&acl_cache->lock);
+ entry= new (&acl_memroot) ACL_ROLE(rolename, privileges, &acl_memroot);
+ my_init_dynamic_array(&entry->parent_grantee,
+ sizeof(ACL_USER_BASE *), 0, 8, MYF(0));
+ my_init_dynamic_array(&entry->role_grants, sizeof(ACL_ROLE *), 0, 8, MYF(0));
- bzero(&acl_user, sizeof(acl_user));
- acl_user.user.str=*user ? strdup_root(&acl_memroot,user) : 0;
- acl_user.user.length= strlen(user);
- update_hostname(&acl_user.host, safe_strdup_root(&acl_memroot, host));
- if (plugin->str[0])
- {
- acl_user.plugin= *plugin;
- acl_user.auth_string.str= auth->str ?
- strmake_root(&acl_memroot, auth->str, auth->length) : const_cast<char*>("");
- acl_user.auth_string.length= auth->length;
- if (fix_user_plugin_ptr(&acl_user))
- acl_user.plugin.str= strmake_root(&acl_memroot, plugin->str, plugin->length);
- }
- else
- {
- acl_user.auth_string.str= strmake_root(&acl_memroot, password, password_len);
- acl_user.auth_string.length= password_len;
- set_user_salt(&acl_user, password, password_len);
- set_user_plugin(&acl_user, password_len);
- }
-
- acl_user.flags= 0;
- acl_user.access=privileges;
- acl_user.user_resource = *mqh;
- acl_user.sort=get_sort(2, acl_user.host.hostname, acl_user.user.str);
- acl_user.hostname_length=(uint) strlen(host);
- acl_user.ssl_type= (ssl_type != SSL_TYPE_NOT_SPECIFIED ?
- ssl_type : SSL_TYPE_NONE);
- acl_user.ssl_cipher= ssl_cipher ? strdup_root(&acl_memroot,ssl_cipher) : 0;
- acl_user.x509_issuer= x509_issuer ? strdup_root(&acl_memroot,x509_issuer) : 0;
- acl_user.x509_subject=x509_subject ? strdup_root(&acl_memroot,x509_subject) : 0;
- (void) my_init_dynamic_array(&acl_user.role_grants, sizeof(ACL_USER *),
- 8, 8, MYF(0));
-
- (void) push_dynamic(&acl_users,(uchar*) &acl_user);
- if (!acl_user.host.hostname ||
- (acl_user.host.hostname[0] == wild_many && !acl_user.host.hostname[1]))
- allow_all_hosts=1; // Anyone can connect /* purecov: tested */
- my_qsort((uchar*) dynamic_element(&acl_users,0,ACL_USER*),acl_users.elements,
- sizeof(ACL_USER),(qsort_cmp) acl_compare);
-
- /* Rebuild 'acl_check_hosts' since 'acl_users' has been modified */
- rebuild_check_host();
-
- /*
- Rebuild every user's role_grants since 'acl_users' has been sorted
- and old pointers to ACL_USER elements are no longer valid
- */
- rebuild_role_grants();
+ my_hash_insert(&acl_roles, (uchar *)entry);
}
@@ -2767,16 +3251,13 @@ static bool acl_update_db(const char *user, const char *host, const char *db,
bool updated= false;
- for (uint i=0 ; i < acl_dbs.elements() ; i++)
+ for (size_t i= acl_find_db_by_username(user); i < acl_dbs.elements(); i++)
{
ACL_DB *acl_db= &acl_dbs.at(i);
- if ((!acl_db->user && !user[0]) ||
- (acl_db->user &&
- !strcmp(user,acl_db->user)))
+ if (!strcmp(user,acl_db->user))
{
if ((!acl_db->host.hostname && !host[0]) ||
- (acl_db->host.hostname &&
- !strcmp(host, acl_db->host.hostname)))
+ (acl_db->host.hostname && !strcmp(host, acl_db->host.hostname)))
{
if ((!acl_db->db && !db[0]) ||
(acl_db->db && !strcmp(db,acl_db->db)))
@@ -2793,6 +3274,8 @@ static bool acl_update_db(const char *user, const char *host, const char *db,
}
}
}
+ else
+ break;
}
return updated;
@@ -2824,7 +3307,7 @@ static void acl_insert_db(const char *user, const char *host, const char *db,
acl_db.initial_access= acl_db.access= privileges;
acl_db.sort=get_sort(3,acl_db.host.hostname,acl_db.db,acl_db.user);
acl_dbs.push(acl_db);
- acl_dbs.sort((acl_dbs_cmp)acl_compare);
+ rebuild_acl_dbs();
}
@@ -2870,26 +3353,16 @@ ulong acl_get(const char *host, const char *ip,
/*
Check if there are some access rights for database and user
*/
- for (i=0 ; i < acl_dbs.elements() ; i++)
+ if (ACL_DB *acl_db= acl_db_find(db,user, host, ip, db_is_pattern))
{
- ACL_DB *acl_db= &acl_dbs.at(i);
- if (!acl_db->user || !strcmp(user,acl_db->user))
- {
- if (compare_hostname(&acl_db->host,host,ip))
- {
- if (!acl_db->db || !wild_compare(db,acl_db->db,db_is_pattern))
- {
- db_access=acl_db->access;
- if (acl_db->host.hostname)
- goto exit; // Fully specified. Take it
- /* the host table is not used for roles */
- if ((!host || !host[0]) && !acl_db->host.hostname && find_acl_role(user))
- goto exit;
- break; /* purecov: tested */
- }
- }
- }
+ db_access= acl_db->access;
+ if (acl_db->host.hostname)
+ goto exit; // Fully specified. Take it
+ /* the host table is not used for roles */
+ if ((!host || !host[0]) && !acl_db->host.hostname && find_acl_role(user))
+ goto exit;
}
+
if (!db_access)
goto exit; // Can't be better
@@ -3187,7 +3660,7 @@ static int check_alter_user(THD *thd, const char *host, const char *user)
if (IF_WSREP((!WSREP(thd) || !thd->wsrep_applier), 1) &&
!thd->slave_thread && !thd->security_ctx->priv_user[0] &&
- !in_bootstrap)
+ !thd->bootstrap)
{
my_message(ER_PASSWORD_ANONYMOUS_USER,
ER_THD(thd, ER_PASSWORD_ANONYMOUS_USER),
@@ -3202,10 +3675,13 @@ static int check_alter_user(THD *thd, const char *host, const char *user)
if (!thd->slave_thread &&
IF_WSREP((!WSREP(thd) || !thd->wsrep_applier),1) &&
- (strcmp(thd->security_ctx->priv_user, user) ||
- my_strcasecmp(system_charset_info, host,
- thd->security_ctx->priv_host)))
+ !thd->security_ctx->is_priv_user(user, host))
{
+ if (thd->security_ctx->password_expired)
+ {
+ my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
+ goto end;
+ }
if (check_access(thd, UPDATE_ACL, "mysql", NULL, NULL, 1, 0))
goto end;
}
@@ -3229,13 +3705,8 @@ end:
bool check_change_password(THD *thd, LEX_USER *user)
{
LEX_USER *real_user= get_current_user(thd, user);
-
- if (fix_and_copy_user(real_user, user, thd) ||
- validate_password(real_user, thd))
- return true;
-
- *user= *real_user;
-
+ user->user= real_user->user;
+ user->host= real_user->host;
return check_alter_user(thd, user->host.str, user->user.str);
}
@@ -3252,17 +3723,20 @@ bool check_change_password(THD *thd, LEX_USER *user)
*/
bool change_password(THD *thd, LEX_USER *user)
{
- Grant_tables tables(Table_user, TL_WRITE);
+ Grant_tables tables;
/* Buffer should be extended when password length is extended. */
char buff[512];
ulong query_length= 0;
enum_binlog_format save_binlog_format;
int result=0;
+ ACL_USER *acl_user;
+ ACL_USER::AUTH auth;
+ const char *password_plugin= 0;
const CSET_STRING query_save __attribute__((unused)) = thd->query_string;
DBUG_ENTER("change_password");
DBUG_PRINT("enter",("host: '%s' user: '%s' new_password: '%s'",
- user->host.str, user->user.str, user->pwhash.str));
- DBUG_ASSERT(user->host.str != 0); // Ensured by parent
+ user->host.str, user->user.str, user->auth->auth_str.str));
+ DBUG_ASSERT(user->host.str != 0); // Ensured by caller
/*
This statement will be replicated as a statement, even when using
@@ -3273,80 +3747,87 @@ bool change_password(THD *thd, LEX_USER *user)
*/
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
- if (mysql_bin_log.is_open() ||
- (WSREP(thd) && !IF_WSREP(thd->wsrep_applier, 0)))
- {
- query_length= sprintf(buff, "SET PASSWORD FOR '%-.120s'@'%-.120s'='%-.120s'",
- safe_str(user->user.str), safe_str(user->host.str),
- safe_str(user->pwhash.str));
- }
-
if (WSREP(thd) && !IF_WSREP(thd->wsrep_applier, 0))
- {
- thd->set_query(buff, query_length, system_charset_info);
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, (char*)"user", NULL);
- }
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
- if ((result= tables.open_and_lock(thd)))
+ if ((result= tables.open_and_lock(thd, Table_user, TL_WRITE)))
DBUG_RETURN(result != 1);
result= 1;
-
mysql_mutex_lock(&acl_cache->lock);
- ACL_USER *acl_user;
+
if (!(acl_user= find_user_exact(user->host.str, user->user.str)))
{
- mysql_mutex_unlock(&acl_cache->lock);
- my_message(ER_PASSWORD_NO_MATCH,
- ER_THD(thd, ER_PASSWORD_NO_MATCH), MYF(0));
+ my_error(ER_PASSWORD_NO_MATCH, MYF(0));
goto end;
}
- /* update loaded acl entry: */
- if (acl_user->plugin.str == native_password_plugin_name.str ||
- acl_user->plugin.str == old_password_plugin_name.str)
+ if (acl_user->nauth == 1 &&
+ (acl_user->auth[0].plugin.str == native_password_plugin_name.str ||
+ acl_user->auth[0].plugin.str == old_password_plugin_name.str))
{
- acl_user->auth_string.str= strmake_root(&acl_memroot, user->pwhash.str, user->pwhash.length);
- acl_user->auth_string.length= user->pwhash.length;
- set_user_salt(acl_user, user->pwhash.str, user->pwhash.length);
-
- set_user_plugin(acl_user, user->pwhash.length);
+ /* historical hack of auto-changing the plugin */
+ acl_user->auth[0].plugin= guess_auth_plugin(thd, user->auth->auth_str.length);
}
- else
- push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_SET_PASSWORD_AUTH_PLUGIN,
- ER_THD(thd, ER_SET_PASSWORD_AUTH_PLUGIN));
- if (update_user_table(thd, tables.user_table(),
- safe_str(acl_user->host.hostname),
- safe_str(acl_user->user.str),
- user->pwhash.str, user->pwhash.length))
+ for (uint i=0; i < acl_user->nauth; i++)
{
- mysql_mutex_unlock(&acl_cache->lock); /* purecov: deadcode */
+ auth= acl_user->auth[i];
+ auth.auth_string= safe_lexcstrdup_root(&acl_memroot, user->auth->auth_str);
+ int r= set_user_auth(thd, user->user, &auth, user->auth->pwtext);
+ if (r == ER_SET_PASSWORD_AUTH_PLUGIN)
+ password_plugin= auth.plugin.str;
+ else if (r)
+ goto end;
+ else
+ {
+ acl_user->auth[i]= auth;
+ password_plugin= 0;
+ break;
+ }
+ }
+ if (password_plugin)
+ {
+ my_error(ER_SET_PASSWORD_AUTH_PLUGIN, MYF(0), password_plugin);
goto end;
}
- acl_cache->clear(1); // Clear locked hostname cache
+ /* Update the acl password expired state of user */
+ acl_user->password_last_changed= thd->query_start();
+ acl_user->password_expired= false;
+
+ /* If user is the connected user, reset the password expired field on sctx
+ and allow the user to exit sandbox mode */
+ if (thd->security_ctx->is_priv_user(user->user.str, user->host.str))
+ thd->security_ctx->password_expired= false;
+
+ if (update_user_table_password(thd, tables.user_table(), *acl_user))
+ goto end;
+
+ acl_cache->clear(1); // Clear locked hostname cache
mysql_mutex_unlock(&acl_cache->lock);
result= 0;
if (mysql_bin_log.is_open())
{
+ query_length= sprintf(buff, "SET PASSWORD FOR '%-.120s'@'%-.120s'='%-.120s'",
+ user->user.str, safe_str(user->host.str), auth.auth_string.str);
DBUG_ASSERT(query_length);
thd->clear_error();
result= thd->binlog_query(THD::STMT_QUERY_TYPE, buff, query_length,
FALSE, FALSE, FALSE, 0);
}
end:
+ if (result)
+ mysql_mutex_unlock(&acl_cache->lock);
close_mysql_tables(thd);
#ifdef WITH_WSREP
-WSREP_ERROR_LABEL:
+wsrep_error_label:
if (WSREP(thd) && !thd->wsrep_applier)
{
WSREP_TO_ISOLATION_END;
thd->set_query(query_save);
- thd->wsrep_exec_mode = LOCAL_STATE;
}
#endif /* WITH_WSREP */
thd->restore_stmt_binlog_format(save_binlog_format);
@@ -3362,20 +3843,19 @@ int acl_check_set_default_role(THD *thd, const char *host, const char *user)
int acl_set_default_role(THD *thd, const char *host, const char *user,
const char *rolename)
{
- Grant_tables tables(Table_user, TL_WRITE);
+ Grant_tables tables;
char user_key[MAX_KEY_LENGTH];
int result= 1;
int error;
ulong query_length= 0;
bool clear_role= FALSE;
char buff[512];
- enum_binlog_format save_binlog_format=
- thd->get_current_stmt_binlog_format();
+ enum_binlog_format save_binlog_format= thd->get_current_stmt_binlog_format();
const CSET_STRING query_save __attribute__((unused)) = thd->query_string;
DBUG_ENTER("acl_set_default_role");
DBUG_PRINT("enter",("host: '%s' user: '%s' rolename: '%s'",
- safe_str(user), safe_str(host), safe_str(rolename)));
+ user, safe_str(host), safe_str(rolename)));
if (rolename == current_role.str) {
if (!thd->security_ctx->priv_role[0])
@@ -3395,7 +3875,7 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
{
query_length=
sprintf(buff,"SET DEFAULT ROLE '%-.120s' FOR '%-.120s'@'%-.120s'",
- safe_str(rolename), safe_str(user), safe_str(host));
+ safe_str(rolename), user, safe_str(host));
}
/*
@@ -3411,7 +3891,7 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
{
thd->set_query(buff, query_length, system_charset_info);
// Attention!!! here is implicit goto error;
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, (char*)"user", NULL);
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
}
/*
@@ -3419,7 +3899,7 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
TODO(cvicentiu) Should move this block out in a new function.
*/
{
- if ((result= tables.open_and_lock(thd)))
+ if ((result= tables.open_and_lock(thd, Table_user, TL_WRITE)))
DBUG_RETURN(result != 1);
const User_table& user_table= tables.user_table();
@@ -3452,17 +3932,8 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
/* update the mysql.user table with the new default role */
tables.user_table().table()->use_all_columns();
- if (!tables.user_table().default_role())
- {
- my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
- table->alias.c_ptr(), DEFAULT_ROLE_COLUMN_IDX + 1,
- tables.user_table().num_fields(),
- static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
- mysql_mutex_unlock(&acl_cache->lock);
- goto end;
- }
- user_table.host()->store(host,(uint) strlen(host), system_charset_info);
- user_table.user()->store(user,(uint) strlen(user), system_charset_info);
+ user_table.set_host(host, strlen(host));
+ user_table.set_user(user, strlen(user));
key_copy((uchar *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
@@ -3476,9 +3947,8 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
goto end;
}
store_record(table, record[1]);
- user_table.default_role()->store(acl_user->default_rolename.str,
- acl_user->default_rolename.length,
- system_charset_info);
+ user_table.set_default_role(acl_user->default_rolename.str,
+ acl_user->default_rolename.length);
if (unlikely(error= table->file->ha_update_row(table->record[1],
table->record[0])) &&
error != HA_ERR_RECORD_IS_THE_SAME)
@@ -3503,13 +3973,12 @@ int acl_set_default_role(THD *thd, const char *host, const char *user,
}
#ifdef WITH_WSREP
-WSREP_ERROR_LABEL:
+wsrep_error_label:
if (WSREP(thd) && !thd->wsrep_applier)
{
WSREP_TO_ISOLATION_END;
thd->set_query(query_save);
- thd->wsrep_exec_mode = LOCAL_STATE;
}
#endif /* WITH_WSREP */
@@ -3559,34 +4028,27 @@ bool is_acl_user(const char *host, const char *user)
*/
static ACL_USER *find_user_or_anon(const char *host, const char *user, const char *ip)
{
- ACL_USER *result= NULL;
- mysql_mutex_assert_owner(&acl_cache->lock);
- for (uint i=0; i < acl_users.elements; i++)
- {
- ACL_USER *acl_user_tmp= dynamic_element(&acl_users, i, ACL_USER*);
- if ((!acl_user_tmp->user.str ||
- !strcmp(user, acl_user_tmp->user.str)) &&
- compare_hostname(&acl_user_tmp->host, host, ip))
- {
- result= acl_user_tmp;
- break;
- }
- }
- return result;
+ return find_by_username_or_anon<ACL_USER>
+ (reinterpret_cast<ACL_USER*>(acl_users.buffer), acl_users.elements,
+ user, host, ip, NULL, FALSE, NULL);
}
/*
Find first entry that matches the specified user@host pair
*/
-static ACL_USER * find_user_exact(const char *host, const char *user)
+static ACL_USER *find_user_exact(const char *host, const char *user)
{
mysql_mutex_assert_owner(&acl_cache->lock);
+ size_t start= acl_find_user_by_name(user);
- for (uint i=0 ; i < acl_users.elements ; i++)
+ for (size_t i= start; i < acl_users.elements; i++)
{
- ACL_USER *acl_user=dynamic_element(&acl_users,i,ACL_USER*);
- if (acl_user->eq(user, host))
+ ACL_USER *acl_user= dynamic_element(&acl_users, i, ACL_USER*);
+ if (i > start && strcmp(acl_user->user.str, user))
+ return 0;
+
+ if (!my_strcasecmp(system_charset_info, acl_user->host.hostname, host))
return acl_user;
}
return 0;
@@ -3599,10 +4061,14 @@ static ACL_USER * find_user_wild(const char *host, const char *user, const char
{
mysql_mutex_assert_owner(&acl_cache->lock);
- for (uint i=0 ; i < acl_users.elements ; i++)
+ size_t start = acl_find_user_by_name(user);
+
+ for (size_t i= start; i < acl_users.elements; i++)
{
ACL_USER *acl_user=dynamic_element(&acl_users,i,ACL_USER*);
- if (acl_user->wild_eq(user, host, ip))
+ if (i > start && strcmp(acl_user->user.str, user))
+ break;
+ if (compare_hostname(&acl_user->host, host, ip ? ip : host))
return acl_user;
}
return 0;
@@ -3620,7 +4086,7 @@ static ACL_ROLE *find_acl_role(const char *role)
mysql_mutex_assert_owner(&acl_cache->lock);
ACL_ROLE *r= (ACL_ROLE *)my_hash_search(&acl_roles, (uchar *)role,
- safe_strlen(role));
+ strlen(role));
DBUG_RETURN(r);
}
@@ -3772,53 +4238,23 @@ bool hostname_requires_resolving(const char *hostname)
}
-void set_authentication_plugin_from_password(const User_table& user_table,
- const char* password, size_t password_length)
-{
- if (password_length == SCRAMBLED_PASSWORD_CHAR_LENGTH ||
- password_length == 0)
- {
- user_table.plugin()->store(native_password_plugin_name.str,
- native_password_plugin_name.length,
- system_charset_info);
- }
- else
- {
- DBUG_ASSERT(password_length == SCRAMBLED_PASSWORD_CHAR_LENGTH_323);
- user_table.plugin()->store(old_password_plugin_name.str,
- old_password_plugin_name.length,
- system_charset_info);
- }
- user_table.authentication_string()->store(password,
- password_length,
- system_charset_info);
-}
/**
Update record for user in mysql.user privilege table with new password.
- @param thd THD
- @param table Pointer to TABLE object for open mysql.user table
- @param host Hostname
- @param user Username
- @param new_password New password hash
- @param new_password_len Length of new password hash
-
@see change_password
*/
-static bool update_user_table(THD *thd, const User_table& user_table,
- const char *host, const char *user,
- const char *new_password, size_t new_password_len)
+static bool update_user_table_password(THD *thd, const User_table& user_table,
+ const ACL_USER &user)
{
char user_key[MAX_KEY_LENGTH];
int error;
- DBUG_ENTER("update_user_table");
- DBUG_PRINT("enter",("user: %s host: %s",user,host));
+ DBUG_ENTER("update_user_table_password");
TABLE *table= user_table.table();
table->use_all_columns();
- user_table.host()->store(host,(uint) strlen(host), system_charset_info);
- user_table.user()->store(user,(uint) strlen(user), system_charset_info);
+ user_table.set_host(user.host.hostname, user.hostname_length);
+ user_table.set_user(user.user.str, user.user.length);
key_copy((uchar *) user_key, table->record[0], table->key_info,
table->key_info->key_length);
@@ -3827,28 +4263,30 @@ static bool update_user_table(THD *thd, const User_table& user_table,
HA_READ_KEY_EXACT))
{
my_message(ER_PASSWORD_NO_MATCH, ER_THD(thd, ER_PASSWORD_NO_MATCH),
- MYF(0)); /* purecov: deadcode */
- DBUG_RETURN(1); /* purecov: deadcode */
+ MYF(0));
+ DBUG_RETURN(1);
}
- store_record(table,record[1]);
+ store_record(table, record[1]);
- if (user_table.plugin())
+ if (user_table.set_auth(user))
{
- set_authentication_plugin_from_password(user_table, new_password,
- new_password_len);
+ my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
+ user_table.name().str, 3, user_table.num_fields(),
+ static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
+ DBUG_RETURN(1);
}
- if (user_table.password())
- user_table.password()->store(new_password, new_password_len, system_charset_info);
-
+ user_table.set_password_expired(user.password_expired);
+ user_table.set_password_last_changed(user.password_last_changed);
if (unlikely(error= table->file->ha_update_row(table->record[1],
table->record[0])) &&
error != HA_ERR_RECORD_IS_THE_SAME)
{
- table->file->print_error(error,MYF(0)); /* purecov: deadcode */
+ table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
+
DBUG_RETURN(0);
}
@@ -3870,7 +4308,8 @@ static bool test_if_create_new_users(THD *thd)
{
TABLE_LIST tl;
ulong db_access;
- tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_USER_NAME, NULL, TL_WRITE);
+ tl.init_one_table(&MYSQL_SCHEMA_NAME, &MYSQL_TABLE_NAME[USER_TABLE],
+ NULL, TL_WRITE);
create_new_users= 1;
db_access=acl_get(sctx->host, sctx->ip,
@@ -3890,61 +4329,37 @@ static bool test_if_create_new_users(THD *thd)
/****************************************************************************
Handle GRANT commands
****************************************************************************/
+static USER_AUTH auth_no_password;
static int replace_user_table(THD *thd, const User_table &user_table,
- LEX_USER &combo,
- ulong rights, bool revoke_grant,
- bool can_create_user, bool no_auto_create)
+ LEX_USER * const combo, ulong rights,
+ const bool revoke_grant, const bool can_create_user,
+ const bool no_auto_create)
{
int error = -1;
+ uint nauth= 0;
bool old_row_exists=0;
- char what= (revoke_grant) ? 'N' : 'Y';
uchar user_key[MAX_KEY_LENGTH];
- bool handle_as_role= combo.is_role();
+ bool handle_as_role= combo->is_role();
LEX *lex= thd->lex;
TABLE *table= user_table.table();
+ ACL_USER new_acl_user, *old_acl_user;
DBUG_ENTER("replace_user_table");
mysql_mutex_assert_owner(&acl_cache->lock);
- if (combo.pwhash.str && combo.pwhash.str[0])
- {
- if (combo.pwhash.length != SCRAMBLED_PASSWORD_CHAR_LENGTH &&
- combo.pwhash.length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
- {
- DBUG_ASSERT(0);
- my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH);
- DBUG_RETURN(-1);
- }
- }
- else
- combo.pwhash= empty_clex_str;
-
- /* if the user table is not up to date, we can't handle role updates */
- if (!user_table.is_role() && handle_as_role)
- {
- my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
- "user", ROLE_ASSIGN_COLUMN_IDX + 1, user_table.num_fields(),
- static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
- DBUG_RETURN(-1);
- }
-
table->use_all_columns();
- user_table.host()->store(combo.host.str,combo.host.length,
- system_charset_info);
- user_table.user()->store(combo.user.str,combo.user.length,
- system_charset_info);
+ user_table.set_host(combo->host.str,combo->host.length);
+ user_table.set_user(combo->user.str,combo->user.length);
key_copy(user_key, table->record[0], table->key_info,
table->key_info->key_length);
if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
{
- /* what == 'N' means revoke */
- if (what == 'N')
+ if (revoke_grant)
{
- my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
+ my_error(ER_NONEXISTING_GRANT, MYF(0), combo->user.str, combo->host.str);
goto end;
}
/*
@@ -3960,7 +4375,7 @@ static int replace_user_table(THD *thd, const User_table &user_table,
see also test_if_create_new_users()
*/
- else if (!combo.pwhash.length && !combo.plugin.length && no_auto_create)
+ else if (!combo->has_auth() && no_auto_create)
{
my_error(ER_PASSWORD_NO_MATCH, MYF(0));
goto end;
@@ -3970,21 +4385,14 @@ static int replace_user_table(THD *thd, const User_table &user_table,
my_error(ER_CANT_CREATE_USER_WITH_GRANT, MYF(0));
goto end;
}
- else if (combo.plugin.str[0])
- {
- if (!plugin_is_ready(&combo.plugin, MYSQL_AUTHENTICATION_PLUGIN))
- {
- my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), combo.plugin.str);
- goto end;
- }
- }
+
+ if (!combo->auth)
+ combo->auth= &auth_no_password;
old_row_exists = 0;
- restore_record(table,s->default_values);
- user_table.host()->store(combo.host.str,combo.host.length,
- system_charset_info);
- user_table.user()->store(combo.user.str,combo.user.length,
- system_charset_info);
+ restore_record(table, s->default_values);
+ user_table.set_host(combo->host.str, combo->host.length);
+ user_table.set_user(combo->user.str, combo->user.length);
}
else
{
@@ -3992,138 +4400,117 @@ static int replace_user_table(THD *thd, const User_table &user_table,
store_record(table,record[1]); // Save copy for update
}
- if (!old_row_exists || combo.pwtext.length || combo.pwhash.length)
- if (!handle_as_role && validate_password(&combo, thd))
- goto end;
+ for (USER_AUTH *auth= combo->auth; auth; auth= auth->next)
+ {
+ nauth++;
+ if (auth->plugin.length)
+ {
+ if (!plugin_is_ready(&auth->plugin, MYSQL_AUTHENTICATION_PLUGIN))
+ {
+ my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), auth->plugin.str);
+ goto end;
+ }
+ }
+ else
+ auth->plugin= guess_auth_plugin(thd, auth->auth_str.length);
+ }
/* Update table columns with new privileges */
+ user_table.set_access(rights, revoke_grant);
+ rights= user_table.get_access();
- ulong priv;
- priv = SELECT_ACL;
- for (uint i= 0; i < user_table.num_privileges(); i++, priv <<= 1)
+ if (handle_as_role)
{
- if (priv & rights)
- user_table.priv_field(i)->store(&what, 1, &my_charset_latin1);
+ if (old_row_exists && !user_table.get_is_role())
+ {
+ goto end;
+ }
+ if (user_table.set_is_role(true))
+ {
+ my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
+ user_table.name().str,
+ ROLE_ASSIGN_COLUMN_IDX + 1, user_table.num_fields(),
+ static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
+ goto end;
+ }
}
+ else
+ {
+ old_acl_user= find_user_exact(combo->host.str, combo->user.str);
+ if ((old_acl_user != NULL) != old_row_exists)
+ {
+ my_error(ER_PASSWORD_NO_MATCH, MYF(0));
+ goto end;
+ }
+ new_acl_user= old_row_exists ? *old_acl_user :
+ ACL_USER(thd, *combo, lex->account_options, rights);
+ if (acl_user_update(thd, &new_acl_user, nauth,
+ *combo, lex->account_options, rights))
+ goto end;
- rights= user_table.get_access();
+ if (user_table.set_auth(new_acl_user))
+ {
+ my_error(ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE, MYF(0),
+ user_table.name().str, 3, user_table.num_fields(),
+ static_cast<int>(table->s->mysql_version), MYSQL_VERSION_ID);
+ DBUG_RETURN(1);
+ }
- DBUG_PRINT("info",("table fields: %d", user_table.num_fields()));
- /* If we don't have a password column, we'll use the authentication_string
- column later. */
- if (combo.pwhash.str[0] && user_table.password())
- user_table.password()->store(combo.pwhash.str, combo.pwhash.length,
- system_charset_info);
- /* We either have the password column, the plugin column, or both. Otherwise
- we have a corrupt user table. */
- DBUG_ASSERT(user_table.password() || user_table.plugin());
- if (user_table.ssl_type()) /* From 4.0.0 we have more fields */
- {
- /* We write down SSL related ACL stuff */
- switch (lex->ssl_type) {
- case SSL_TYPE_ANY:
- user_table.ssl_type()->store(STRING_WITH_LEN("ANY"),
- &my_charset_latin1);
- user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
- user_table.x509_issuer()->store("", 0, &my_charset_latin1);
- user_table.x509_subject()->store("", 0, &my_charset_latin1);
+ switch (lex->account_options.ssl_type) {
+ case SSL_TYPE_NOT_SPECIFIED:
break;
+ case SSL_TYPE_NONE:
+ case SSL_TYPE_ANY:
case SSL_TYPE_X509:
- user_table.ssl_type()->store(STRING_WITH_LEN("X509"),
- &my_charset_latin1);
- user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
- user_table.x509_issuer()->store("", 0, &my_charset_latin1);
- user_table.x509_subject()->store("", 0, &my_charset_latin1);
+ user_table.set_ssl_type(lex->account_options.ssl_type);
+ user_table.set_ssl_cipher("", 0);
+ user_table.set_x509_issuer("", 0);
+ user_table.set_x509_subject("", 0);
break;
case SSL_TYPE_SPECIFIED:
- user_table.ssl_type()->store(STRING_WITH_LEN("SPECIFIED"),
- &my_charset_latin1);
- user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
- user_table.x509_issuer()->store("", 0, &my_charset_latin1);
- user_table.x509_subject()->store("", 0, &my_charset_latin1);
- if (lex->ssl_cipher)
- user_table.ssl_cipher()->store(lex->ssl_cipher,
- strlen(lex->ssl_cipher),
- system_charset_info);
- if (lex->x509_issuer)
- user_table.x509_issuer()->store(lex->x509_issuer,
- strlen(lex->x509_issuer),
- system_charset_info);
- if (lex->x509_subject)
- user_table.x509_subject()->store(lex->x509_subject,
- strlen(lex->x509_subject),
- system_charset_info);
- break;
- case SSL_TYPE_NOT_SPECIFIED:
- break;
- case SSL_TYPE_NONE:
- user_table.ssl_type()->store("", 0, &my_charset_latin1);
- user_table.ssl_cipher()->store("", 0, &my_charset_latin1);
- user_table.x509_issuer()->store("", 0, &my_charset_latin1);
- user_table.x509_subject()->store("", 0, &my_charset_latin1);
+ user_table.set_ssl_type(lex->account_options.ssl_type);
+ if (lex->account_options.ssl_cipher.str)
+ user_table.set_ssl_cipher(lex->account_options.ssl_cipher.str,
+ lex->account_options.ssl_cipher.length);
+ else
+ user_table.set_ssl_cipher("", 0);
+ if (lex->account_options.x509_issuer.str)
+ user_table.set_x509_issuer(lex->account_options.x509_issuer.str,
+ lex->account_options.x509_issuer.length);
+ else
+ user_table.set_x509_issuer("", 0);
+ if (lex->account_options.x509_subject.str)
+ user_table.set_x509_subject(lex->account_options.x509_subject.str,
+ lex->account_options.x509_subject.length);
+ else
+ user_table.set_x509_subject("", 0);
break;
}
- USER_RESOURCES mqh= lex->mqh;
- if (mqh.specified_limits & USER_RESOURCES::QUERIES_PER_HOUR)
- user_table.max_questions()->store((longlong) mqh.questions, TRUE);
- if (mqh.specified_limits & USER_RESOURCES::UPDATES_PER_HOUR)
- user_table.max_updates()->store((longlong) mqh.updates, TRUE);
- if (mqh.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR)
- user_table.max_connections()->store((longlong) mqh.conn_per_hour, TRUE);
- if (user_table.max_user_connections() &&
- (mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS))
- user_table.max_user_connections()->store((longlong) mqh.user_conn, FALSE);
- if (user_table.plugin())
- {
- user_table.plugin()->set_notnull();
- user_table.authentication_string()->set_notnull();
- if (combo.plugin.str[0])
- {
- DBUG_ASSERT(combo.pwhash.str[0] == 0);
- if (user_table.password())
- user_table.password()->reset();
- user_table.plugin()->store(combo.plugin.str, combo.plugin.length,
- system_charset_info);
- user_table.authentication_string()->store(combo.auth.str, combo.auth.length,
- system_charset_info);
- }
- if (combo.pwhash.str[0])
- {
- DBUG_ASSERT(combo.plugin.str[0] == 0);
- /* We have Password column. */
- if (user_table.password())
- {
- user_table.plugin()->reset();
- user_table.authentication_string()->reset();
- }
- else
- {
- /* We do not have Password column. Use PLUGIN && Authentication_string
- columns instead. */
- set_authentication_plugin_from_password(user_table,
- combo.pwhash.str,
- combo.pwhash.length);
- }
- }
+ if (lex->account_options.specified_limits & USER_RESOURCES::QUERIES_PER_HOUR)
+ user_table.set_max_questions(lex->account_options.questions);
+ if (lex->account_options.specified_limits & USER_RESOURCES::UPDATES_PER_HOUR)
+ user_table.set_max_updates(lex->account_options.updates);
+ if (lex->account_options.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR)
+ user_table.set_max_connections(lex->account_options.conn_per_hour);
+ if (lex->account_options.specified_limits & USER_RESOURCES::USER_CONNECTIONS)
+ user_table.set_max_user_connections(lex->account_options.user_conn);
+ if (lex->account_options.specified_limits & USER_RESOURCES::MAX_STATEMENT_TIME)
+ user_table.set_max_statement_time(lex->account_options.max_statement_time);
- if (user_table.max_statement_time())
- {
- if (mqh.specified_limits & USER_RESOURCES::MAX_STATEMENT_TIME)
- user_table.max_statement_time()->store(mqh.max_statement_time);
- }
- }
- mqh_used= (mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour ||
- mqh.user_conn || mqh.max_statement_time != 0.0);
+ mqh_used= (mqh_used || lex->account_options.questions || lex->account_options.updates ||
+ lex->account_options.conn_per_hour || lex->account_options.user_conn ||
+ lex->account_options.max_statement_time != 0.0);
- /* table format checked earlier */
- if (handle_as_role)
+ if (lex->account_options.account_locked != ACCOUNTLOCK_UNSPECIFIED)
+ user_table.set_account_locked(new_acl_user.account_locked);
+
+ if (nauth)
+ user_table.set_password_last_changed(new_acl_user.password_last_changed);
+ if (lex->account_options.password_expire != PASSWORD_EXPIRE_UNSPECIFIED)
{
- if (old_row_exists && !user_table.check_is_role())
- {
- goto end;
- }
- user_table.is_role()->store("Y", 1, system_charset_info);
+ user_table.set_password_lifetime(new_acl_user.password_lifetime);
+ user_table.set_password_expired(new_acl_user.password_expired);
}
}
@@ -4163,37 +4550,31 @@ end:
if (likely(!error))
{
acl_cache->clear(1); // Clear privilege cache
- if (old_row_exists)
+ if (handle_as_role)
{
- if (handle_as_role)
- acl_update_role(combo.user.str, rights);
+ if (old_row_exists)
+ acl_update_role(combo->user.str, rights);
else
- acl_update_user(combo.user.str, combo.host.str,
- combo.pwhash.str, combo.pwhash.length,
- lex->ssl_type,
- lex->ssl_cipher,
- lex->x509_issuer,
- lex->x509_subject,
- &lex->mqh,
- rights,
- &combo.plugin,
- &combo.auth);
+ acl_insert_role(combo->user.str, rights);
}
else
{
- if (handle_as_role)
- acl_insert_role(combo.user.str, rights);
+ if (old_acl_user)
+ *old_acl_user= new_acl_user;
else
- acl_insert_user(combo.user.str, combo.host.str,
- combo.pwhash.str, combo.pwhash.length,
- lex->ssl_type,
- lex->ssl_cipher,
- lex->x509_issuer,
- lex->x509_subject,
- &lex->mqh,
- rights,
- &combo.plugin,
- &combo.auth);
+ {
+ push_new_user(new_acl_user);
+ rebuild_acl_users();
+
+ /* Rebuild 'acl_check_hosts' since 'acl_users' has been modified */
+ rebuild_check_host();
+
+ /*
+ Rebuild every user's role_grants since 'acl_users' has been sorted
+ and old pointers to ACL_USER elements are no longer valid
+ */
+ rebuild_role_grants();
+ }
}
}
DBUG_RETURN(error);
@@ -4206,13 +4587,13 @@ end:
static int replace_db_table(TABLE *table, const char *db,
const LEX_USER &combo,
- ulong rights, bool revoke_grant)
+ ulong rights, const bool revoke_grant)
{
uint i;
ulong priv,store_rights;
bool old_row_exists=0;
int error;
- char what= (revoke_grant) ? 'N' : 'Y';
+ char what= revoke_grant ? 'N' : 'Y';
uchar user_key[MAX_KEY_LENGTH];
DBUG_ENTER("replace_db_table");
@@ -4241,7 +4622,7 @@ static int replace_db_table(TABLE *table, const char *db,
HA_WHOLE_KEY,
HA_READ_KEY_EXACT))
{
- if (what == 'N')
+ if (revoke_grant)
{ // no row, no revoke
my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
goto abort;
@@ -4501,6 +4882,13 @@ replace_proxies_priv_table(THD *thd, TABLE *table, const LEX_USER *user,
DBUG_ENTER("replace_proxies_priv_table");
+ if (!table)
+ {
+ my_error(ER_NO_SUCH_TABLE, MYF(0), MYSQL_SCHEMA_NAME.str,
+ MYSQL_TABLE_NAME[PROXIES_PRIV_TABLE].str);
+ DBUG_RETURN(-1);
+ }
+
/* Check if there is such a user in user table in memory? */
if (!find_user_wild(user->host.str,user->user.str))
{
@@ -5298,6 +5686,13 @@ static int replace_routine_table(THD *thd, GRANT_NAME *grant_name,
HASH *hash= sph->get_priv_hash();
DBUG_ENTER("replace_routine_table");
+ if (!table)
+ {
+ my_error(ER_NO_SUCH_TABLE, MYF(0), MYSQL_SCHEMA_NAME.str,
+ MYSQL_TABLE_NAME[PROCS_PRIV_TABLE].str);
+ DBUG_RETURN(-1);
+ }
+
if (revoke_grant && !grant_name->init_privs) // only inherited role privs
{
my_hash_delete(hash, (uchar*) grant_name);
@@ -5893,22 +6288,29 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname,
}
update_flags|= update_role_db(merged, first, access, grantee->user.str);
- /*
- to make this code a bit simpler, we sort on deletes, to move
- deleted elements to the end of the array. strictly speaking it's
- unnecessary, it'd be faster to remove them in one O(N) array scan.
-
- on the other hand, qsort on almost sorted array is pretty fast anyway...
- */
- if (update_flags & (2|4))
- { // inserted or deleted, need to sort
- acl_dbs.sort((acl_dbs_cmp)acl_compare);
- }
if (update_flags & 4)
- { // deleted, trim the end
- while (acl_dbs.elements() && acl_dbs.back()->sort == 0)
- acl_dbs.pop();
+ {
+ // Remove elements marked for deletion.
+ uint count= 0;
+ for(uint i= 0; i < acl_dbs.elements(); i++)
+ {
+ ACL_DB *acl_db= &acl_dbs.at(i);
+ if (acl_db->sort)
+ {
+ if (i > count)
+ acl_dbs.set(count, *acl_db);
+ count++;
+ }
+ }
+ acl_dbs.elements(count);
}
+
+
+ if (update_flags & 2)
+ { // inserted, need to sort
+ rebuild_acl_dbs();
+ }
+
return update_flags;
}
@@ -6294,33 +6696,17 @@ static bool merge_one_role_privileges(ACL_ROLE *grantee)
static bool has_auth(LEX_USER *user, LEX *lex)
{
- return user->pwtext.length || user->pwhash.length || user->plugin.length || user->auth.length ||
- lex->ssl_type != SSL_TYPE_NOT_SPECIFIED || lex->ssl_cipher ||
- lex->x509_issuer || lex->x509_subject ||
- lex->mqh.specified_limits;
-}
-
-static bool fix_and_copy_user(LEX_USER *to, LEX_USER *from, THD *thd)
-{
- if (to != from)
- {
- /* preserve authentication information, if LEX_USER was reallocated */
- to->pwtext= from->pwtext;
- to->pwhash= from->pwhash;
- to->plugin= from->plugin;
- to->auth= from->auth;
- }
-
- if (fix_lex_user(thd, to))
- return true;
-
- return false;
+ return user->has_auth() ||
+ lex->account_options.ssl_type != SSL_TYPE_NOT_SPECIFIED ||
+ lex->account_options.ssl_cipher.str ||
+ lex->account_options.x509_issuer.str ||
+ lex->account_options.x509_subject.str ||
+ lex->account_options.specified_limits;
}
static bool copy_and_check_auth(LEX_USER *to, LEX_USER *from, THD *thd)
{
- if (fix_and_copy_user(to, from, thd))
- return true;
+ to->auth= from->auth;
// if changing auth for an existing user
if (has_auth(to, thd->lex) && find_user_exact(to->host.str, to->user.str))
@@ -6432,10 +6818,10 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
Open the mysql.user and mysql.tables_priv tables.
Don't open column table if we don't need it !
*/
- int maybe_columns_priv= 0;
+ int tables_to_open= Table_user | Table_tables_priv;
if (column_priv ||
(revoke_grant && ((rights & COL_ACLS) || columns.elements)))
- maybe_columns_priv= Table_columns_priv;
+ tables_to_open|= Table_columns_priv;
/*
The lock api is depending on the thd->lex variable which needs to be
@@ -6450,9 +6836,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
*/
thd->lex->sql_command= backup.sql_command;
- Grant_tables tables(Table_user | Table_tables_priv | maybe_columns_priv,
- TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
{
thd->lex->restore_backup_query_tables_list(&backup);
DBUG_RETURN(result != 1);
@@ -6477,7 +6862,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list,
}
/* Create user if needed */
error= copy_and_check_auth(Str, tmp_Str, thd) ||
- replace_user_table(thd, tables.user_table(), *Str,
+ replace_user_table(thd, tables.user_table(), Str,
0, revoke_grant, create_new_users,
MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER));
@@ -6632,8 +7017,8 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
- Grant_tables tables(Table_user | Table_procs_priv, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ if ((result= tables.open_and_lock(thd, Table_user | Table_procs_priv, TL_WRITE)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -6657,7 +7042,7 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list,
}
/* Create user if needed */
if (copy_and_check_auth(Str, tmp_Str, thd) ||
- replace_user_table(thd, tables.user_table(), *Str,
+ replace_user_table(thd, tables.user_table(), Str,
0, revoke_grant, create_new_users,
MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER)))
@@ -6690,12 +7075,8 @@ bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list,
}
}
- /* TODO(cvicentiu) refactor replace_routine_table to use Tables_procs_priv
- instead of TABLE directly. */
- if (tables.procs_priv_table().no_such_table() ||
- replace_routine_table(thd, grant_name, tables.procs_priv_table().table(),
- *Str, db_name, table_name, sph, rights,
- revoke_grant) != 0)
+ if (replace_routine_table(thd, grant_name, tables.procs_priv_table().table(),
+ *Str, db_name, table_name, sph, rights, revoke_grant) != 0)
{
result= TRUE;
continue;
@@ -6833,8 +7214,8 @@ bool mysql_grant_role(THD *thd, List <LEX_USER> &list, bool revoke)
no_auto_create_user= MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER);
- Grant_tables tables(Table_user | Table_roles_mapping, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ if ((result= tables.open_and_lock(thd, Table_user | Table_roles_mapping, TL_WRITE)))
DBUG_RETURN(result != 1);
mysql_rwlock_wrlock(&LOCK_grant);
@@ -6933,7 +7314,7 @@ bool mysql_grant_role(THD *thd, List <LEX_USER> &list, bool revoke)
user_combo.user = username;
if (copy_and_check_auth(&user_combo, &user_combo, thd) ||
- replace_user_table(thd, tables.user_table(), user_combo, 0,
+ replace_user_table(thd, tables.user_table(), &user_combo, 0,
false, create_new_user,
no_auto_create_user))
{
@@ -7073,9 +7454,9 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
proxied_user= str_list++;
}
- Grant_tables tables(Table_user | (is_proxy ? Table_proxies_priv : Table_db),
- TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ const uint tables_to_open= Table_user | (is_proxy ? Table_proxies_priv : Table_db);
+ Grant_tables tables;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -7104,7 +7485,7 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
}
if (copy_and_check_auth(Str, tmp_Str, thd) ||
- replace_user_table(thd, tables.user_table(), *Str,
+ replace_user_table(thd, tables.user_table(), Str,
(!db ? rights : 0), revoke_grant, create_new_users,
MY_TEST(thd->variables.sql_mode &
MODE_NO_AUTO_CREATE_USER)))
@@ -7126,13 +7507,8 @@ bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list,
}
else if (is_proxy)
{
- /* TODO(cvicentiu) refactor replace_proxies_priv_table to use
- Proxies_priv_table instead of TABLE directly. */
- if (tables.proxies_priv_table().no_such_table() ||
- replace_proxies_priv_table (thd, tables.proxies_priv_table().table(),
- Str, proxied_user,
- rights & GRANT_ACL ? TRUE : FALSE,
- revoke_grant))
+ if (replace_proxies_priv_table(thd, tables.proxies_priv_table().table(),
+ Str, proxied_user, rights & GRANT_ACL ? TRUE : FALSE, revoke_grant))
result= true;
}
if (Str->is_role())
@@ -7268,8 +7644,7 @@ static bool grant_load(THD *thd,
{
sql_print_warning("'tables_priv' entry '%s %s@%s' "
"ignored in --skip-name-resolve mode.",
- mem_check->tname,
- safe_str(mem_check->user),
+ mem_check->tname, mem_check->user,
safe_str(mem_check->host.hostname));
continue;
}
@@ -7395,9 +7770,9 @@ bool grant_reload(THD *thd)
obtaining LOCK_grant rwlock.
*/
- Grant_tables tables(Table_tables_priv | Table_columns_priv| Table_procs_priv,
- TL_READ);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ const uint tables_to_open= Table_tables_priv | Table_columns_priv| Table_procs_priv;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_READ)))
DBUG_RETURN(result != 1);
mysql_rwlock_wrlock(&LOCK_grant);
@@ -8349,26 +8724,31 @@ static void add_user_parameters(String *result, ACL_USER* acl_user,
system_charset_info);
result->append('\'');
- if (acl_user->plugin.str == native_password_plugin_name.str ||
- acl_user->plugin.str == old_password_plugin_name.str)
+ if (acl_user->nauth == 1 &&
+ (acl_user->auth->plugin.str == native_password_plugin_name.str ||
+ acl_user->auth->plugin.str == old_password_plugin_name.str))
{
- if (acl_user->auth_string.length)
+ if (acl_user->auth->auth_string.length)
{
- DBUG_ASSERT(acl_user->salt_len);
result->append(STRING_WITH_LEN(" IDENTIFIED BY PASSWORD '"));
- result->append(&acl_user->auth_string);
+ result->append(&acl_user->auth->auth_string);
result->append('\'');
}
}
else
{
result->append(STRING_WITH_LEN(" IDENTIFIED VIA "));
- result->append(&acl_user->plugin);
- if (acl_user->auth_string.length)
+ for (uint i=0; i < acl_user->nauth; i++)
{
- result->append(STRING_WITH_LEN(" USING '"));
- result->append(&acl_user->auth_string);
- result->append('\'');
+ if (i)
+ result->append(STRING_WITH_LEN(" OR "));
+ result->append(&acl_user->auth[i].plugin);
+ if (acl_user->auth[i].auth_string.length)
+ {
+ result->append(STRING_WITH_LEN(" USING '"));
+ result->append(&acl_user->auth[i].auth_string);
+ result->append('\'');
+ }
}
}
/* "show grants" SSL related stuff */
@@ -8380,14 +8760,14 @@ static void add_user_parameters(String *result, ACL_USER* acl_user,
{
int ssl_options = 0;
result->append(STRING_WITH_LEN(" REQUIRE "));
- if (acl_user->x509_issuer)
+ if (acl_user->x509_issuer[0])
{
ssl_options++;
result->append(STRING_WITH_LEN("ISSUER \'"));
result->append(acl_user->x509_issuer,strlen(acl_user->x509_issuer));
result->append('\'');
}
- if (acl_user->x509_subject)
+ if (acl_user->x509_subject[0])
{
if (ssl_options++)
result->append(' ');
@@ -8494,6 +8874,11 @@ bool mysql_show_create_user(THD *thd, LEX_USER *lex_user)
uint head_length;
DBUG_ENTER("mysql_show_create_user");
+ if (!initialized)
+ {
+ my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables");
+ DBUG_RETURN(TRUE);
+ }
if (get_show_user(thd, lex_user, &username, &hostname, NULL))
DBUG_RETURN(TRUE);
@@ -8534,6 +8919,20 @@ bool mysql_show_create_user(THD *thd, LEX_USER *lex_user)
add_user_parameters(&result, acl_user, false);
+ if (acl_user->password_expired)
+ result.append(STRING_WITH_LEN(" PASSWORD EXPIRE"));
+ else if (!acl_user->password_lifetime)
+ result.append(STRING_WITH_LEN(" PASSWORD EXPIRE NEVER"));
+ else if (acl_user->password_lifetime > 0)
+ {
+ result.append(STRING_WITH_LEN(" PASSWORD EXPIRE INTERVAL "));
+ result.append_longlong(acl_user->password_lifetime);
+ result.append(STRING_WITH_LEN(" DAY"));
+ }
+
+ if (acl_user->account_locked)
+ result.append(STRING_WITH_LEN(" ACCOUNT LOCK"));
+
protocol->prepare_for_resend();
protocol->store(result.ptr(), result.length(), result.charset());
if (protocol->write())
@@ -8870,7 +9269,7 @@ static bool show_database_privileges(THD *thd, const char *username,
const char *user, *host;
ACL_DB *acl_db= &acl_dbs.at(i);
- user= safe_str(acl_db->user);
+ user= acl_db->user;
host=acl_db->host.hostname;
/*
@@ -8956,7 +9355,7 @@ static bool show_table_and_column_privileges(THD *thd, const char *username,
GRANT_TABLE *grant_table= (GRANT_TABLE*)
my_hash_element(&column_priv_hash, index);
- user= safe_str(grant_table->user);
+ user= grant_table->user;
host= grant_table->host.hostname;
/*
@@ -9098,7 +9497,7 @@ static int show_routine_grants(THD* thd,
const char *user, *host;
GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, index);
- user= safe_str(grant_proc->user);
+ user= grant_proc->user;
host= grant_proc->host.hostname;
/*
@@ -9446,7 +9845,7 @@ static int handle_grant_table(THD *thd, const Grant_table_base& grant_table,
if (!unlikely(error) && !*host_str)
{
// verify that we got a role or a user, as needed
- if (static_cast<const User_table&>(grant_table).check_is_role() !=
+ if (static_cast<const User_table&>(grant_table).get_is_role() !=
user_from->is_role())
error= HA_ERR_KEY_NOT_FOUND;
}
@@ -9610,8 +10009,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
my_hash_delete(&acl_roles, (uchar*) acl_role);
DBUG_RETURN(1);
}
- acl_role->user.str= strdup_root(&acl_memroot, user_to->user.str);
- acl_role->user.length= user_to->user.length;
+ acl_role->user= safe_lexcstrdup_root(&acl_memroot, user_to->user);
my_hash_update(&acl_roles, (uchar*) acl_role, (uchar*) old_key,
old_key_length);
@@ -9707,8 +10105,6 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
default:
DBUG_ASSERT(0);
}
- if (! user)
- user= "";
if (! host)
host= "";
@@ -9802,8 +10198,7 @@ static int handle_grant_struct(enum enum_acl_lists struct_no, bool drop,
{
switch ( struct_no ) {
case USER_ACL:
- acl_user->user.str= strdup_root(&acl_memroot, user_to->user.str);
- acl_user->user.length= user_to->user.length;
+ acl_user->user= safe_lexcstrdup_root(&acl_memroot, user_to->user);
update_hostname(&acl_user->host, strdup_root(&acl_memroot, user_to->host.str));
acl_user->hostname_length= strlen(acl_user->host.hostname);
break;
@@ -10151,11 +10546,11 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
DBUG_RETURN(TRUE);
/* CREATE USER may be skipped on replication client. */
- Grant_tables tables(Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ const uint tables_to_open= Table_user | Table_db | Table_tables_priv |
+ Table_columns_priv | Table_procs_priv |
+ Table_proxies_priv | Table_roles_mapping;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
DBUG_RETURN(result != 1);
mysql_rwlock_wrlock(&LOCK_grant);
@@ -10187,13 +10582,6 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
if (!user_name->host.str)
user_name->host= host_not_specified;
- if (fix_lex_user(thd, user_name))
- {
- append_user(thd, &wrong_users, user_name);
- result= TRUE;
- continue;
- }
-
/*
Search all in-memory structures and grant tables
for a mention of the new user/role name.
@@ -10238,7 +10626,7 @@ bool mysql_create_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
}
}
- if (replace_user_table(thd, tables.user_table(), *user_name, 0, 0, 1, 0))
+ if (replace_user_table(thd, tables.user_table(), user_name, 0, 0, 1, 0))
{
append_user(thd, &wrong_users, user_name);
result= TRUE;
@@ -10328,11 +10716,11 @@ bool mysql_drop_user(THD *thd, List <LEX_USER> &list, bool handle_as_role)
DBUG_PRINT("entry", ("Handle as %s", handle_as_role ? "role" : "user"));
/* DROP USER may be skipped on replication client. */
- Grant_tables tables(Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ const uint tables_to_open= Table_user | Table_db | Table_tables_priv |
+ Table_columns_priv | Table_procs_priv |
+ Table_proxies_priv | Table_roles_mapping;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
DBUG_RETURN(result != 1);
thd->variables.sql_mode&= ~MODE_PAD_CHAR_TO_FULL_LENGTH;
@@ -10438,11 +10826,11 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
DBUG_ENTER("mysql_rename_user");
/* RENAME USER may be skipped on replication client. */
- Grant_tables tables(Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ const uint tables_to_open= Table_user | Table_db | Table_tables_priv |
+ Table_columns_priv | Table_procs_priv |
+ Table_proxies_priv | Table_roles_mapping;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -10481,8 +10869,12 @@ bool mysql_rename_user(THD *thd, List <LEX_USER> &list)
continue;
}
some_users_renamed= TRUE;
+ rebuild_acl_users();
}
+ /* Rebuild 'acl_dbs' since 'acl_users' has been modified */
+ rebuild_acl_dbs();
+
/* Rebuild 'acl_check_hosts' since 'acl_users' has been modified */
rebuild_check_host();
@@ -10524,8 +10916,8 @@ int mysql_alter_user(THD* thd, List<LEX_USER> &users_list)
bool some_users_altered= false;
/* The only table we're altering is the user table. */
- Grant_tables tables(Table_user, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ if ((result= tables.open_and_lock(thd, Table_user, TL_WRITE)))
DBUG_RETURN(result != 1);
/* Lock ACL data structures until we finish altering all users. */
@@ -10534,13 +10926,12 @@ int mysql_alter_user(THD* thd, List<LEX_USER> &users_list)
LEX_USER *tmp_lex_user;
List_iterator<LEX_USER> users_list_iterator(users_list);
+
while ((tmp_lex_user= users_list_iterator++))
{
LEX_USER* lex_user= get_current_user(thd, tmp_lex_user, false);
- if (!lex_user ||
- fix_lex_user(thd, lex_user) ||
- replace_user_table(thd, tables.user_table(), *lex_user, 0,
- false, false, true))
+ if (!lex_user || replace_user_table(thd, tables.user_table(), lex_user, 0,
+ false, false, true))
{
thd->clear_error();
append_user(thd, &wrong_users, tmp_lex_user);
@@ -10581,9 +10972,7 @@ int mysql_alter_user(THD* thd, List<LEX_USER> &users_list)
static bool
-mysql_revoke_sp_privs(THD *thd,
- Grant_tables *tables,
- const Sp_handler *sph,
+mysql_revoke_sp_privs(THD *thd, Grant_tables *tables, const Sp_handler *sph,
const LEX_USER *lex_user)
{
bool rc= false;
@@ -10594,7 +10983,7 @@ mysql_revoke_sp_privs(THD *thd,
{
const char *user,*host;
GRANT_NAME *grant_proc= (GRANT_NAME*) my_hash_element(hash, counter);
- user= safe_str(grant_proc->user);
+ user= grant_proc->user;
host= safe_str(grant_proc->host.hostname);
if (!strcmp(lex_user->user.str, user) &&
@@ -10639,11 +11028,11 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
ACL_DB *acl_db;
DBUG_ENTER("mysql_revoke_all");
- Grant_tables tables(Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ const uint tables_to_open= Table_user | Table_db | Table_tables_priv |
+ Table_columns_priv | Table_procs_priv |
+ Table_proxies_priv | Table_roles_mapping;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -10669,7 +11058,7 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
continue;
}
- if (replace_user_table(thd, tables.user_table(), *lex_user,
+ if (replace_user_table(thd, tables.user_table(), lex_user,
~(ulong)0, 1, 0, 0))
{
result= -1;
@@ -10686,11 +11075,11 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
{
for (counter= 0, revoked= 0 ; counter < acl_dbs.elements() ; )
{
- const char *user,*host;
+ const char *user, *host;
- acl_db=&acl_dbs.at(counter);
+ acl_db= &acl_dbs.at(counter);
- user= safe_str(acl_db->user);
+ user= acl_db->user;
host= safe_str(acl_db->host.hostname);
if (!strcmp(lex_user->user.str, user) &&
@@ -10722,7 +11111,7 @@ bool mysql_revoke_all(THD *thd, List <LEX_USER> &list)
const char *user,*host;
GRANT_TABLE *grant_table=
(GRANT_TABLE*) my_hash_element(&column_priv_hash, counter);
- user= safe_str(grant_table->user);
+ user= grant_table->user;
host= safe_str(grant_table->host.hostname);
if (!strcmp(lex_user->user.str,user) &&
@@ -10927,11 +11316,11 @@ bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name,
Silence_routine_definer_errors error_handler;
DBUG_ENTER("sp_revoke_privileges");
- Grant_tables tables(Table_user | Table_db |
- Table_tables_priv | Table_columns_priv |
- Table_procs_priv | Table_proxies_priv |
- Table_roles_mapping, TL_WRITE);
- if ((result= tables.open_and_lock(thd)))
+ Grant_tables tables;
+ const uint tables_to_open= Table_user | Table_db | Table_tables_priv |
+ Table_columns_priv | Table_procs_priv |
+ Table_proxies_priv | Table_roles_mapping;
+ if ((result= tables.open_and_lock(thd, tables_to_open, TL_WRITE)))
DBUG_RETURN(result != 1);
DBUG_ASSERT(!thd->is_current_stmt_binlog_format_row());
@@ -11030,20 +11419,12 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
thd->make_lex_string(&combo->user, combo->user.str, strlen(combo->user.str));
thd->make_lex_string(&combo->host, combo->host.str, strlen(combo->host.str));
- combo->reset_auth();
-
- if(au)
- {
- combo->plugin= au->plugin;
- combo->auth= au->auth_string;
- }
+ combo->auth= NULL;
if (user_list.push_back(combo, thd->mem_root))
DBUG_RETURN(TRUE);
- thd->lex->ssl_type= SSL_TYPE_NOT_SPECIFIED;
- thd->lex->ssl_cipher= thd->lex->x509_subject= thd->lex->x509_issuer= 0;
- bzero((char*) &thd->lex->mqh, sizeof(thd->lex->mqh));
+ thd->lex->account_options.reset();
/*
Only care about whether the operation failed or succeeded
@@ -11130,9 +11511,7 @@ acl_check_proxy_grant_access(THD *thd, const char *host, const char *user,
or revoking proxy privilege, user is expected to provide entries mentioned
in mysql.user table.
*/
- if (!strcmp(thd->security_ctx->priv_user, user) &&
- !my_strcasecmp(system_charset_info, host,
- thd->security_ctx->priv_host))
+ if (thd->security_ctx->is_priv_user(user, host))
{
DBUG_PRINT("info", ("strcmp (%s, %s) my_casestrcmp (%s, %s) equal",
thd->security_ctx->priv_user, user,
@@ -11301,10 +11680,10 @@ static int show_database_grants(THD *thd, SHOW_VAR *var, char *buff,
}
#else
+static bool set_user_salt_if_needed(ACL_USER *, int, plugin_ref)
+{ return 0; }
bool check_grant(THD *, ulong, TABLE_LIST *, bool, uint, bool)
-{
- return 0;
-}
+{ return 0; }
#endif /*NO_EMBEDDED_ACCESS_CHECKS */
SHOW_VAR acl_statistics[] = {
@@ -11340,7 +11719,7 @@ bool check_role_is_granted(const char *username,
ACL_USER_BASE *root;
mysql_mutex_lock(&acl_cache->lock);
if (hostname)
- root= find_user_exact(username, hostname);
+ root= find_user_exact(hostname, username);
else
root= find_acl_role(username);
@@ -11503,7 +11882,6 @@ int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table= tables->table;
bool no_global_access= check_access(thd, SELECT_ACL, "mysql",
NULL, NULL, 1, 1);
- char *curr_host= thd->security_ctx->priv_host_name();
DBUG_ENTER("fill_schema_user_privileges");
if (!initialized)
@@ -11514,12 +11892,11 @@ int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
{
const char *user,*host, *is_grantable="YES";
acl_user=dynamic_element(&acl_users,counter,ACL_USER*);
- user= safe_str(acl_user->user.str);
+ user= acl_user->user.str;
host= safe_str(acl_user->host.hostname);
if (no_global_access &&
- (strcmp(thd->security_ctx->priv_user, user) ||
- my_strcasecmp(system_charset_info, curr_host, host)))
+ !thd->security_ctx->is_priv_user(user, host))
continue;
want_access= acl_user->access;
@@ -11576,7 +11953,6 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table= tables->table;
bool no_global_access= check_access(thd, SELECT_ACL, "mysql",
NULL, NULL, 1, 1);
- char *curr_host= thd->security_ctx->priv_host_name();
DBUG_ENTER("fill_schema_schema_privileges");
if (!initialized)
@@ -11588,12 +11964,11 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
const char *user, *host, *is_grantable="YES";
acl_db=&acl_dbs.at(counter);
- user= safe_str(acl_db->user);
+ user= acl_db->user;
host= safe_str(acl_db->host.hostname);
if (no_global_access &&
- (strcmp(thd->security_ctx->priv_user, user) ||
- my_strcasecmp(system_charset_info, curr_host, host)))
+ !thd->security_ctx->is_priv_user(user, host))
continue;
want_access=acl_db->access;
@@ -11650,7 +12025,6 @@ int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table= tables->table;
bool no_global_access= check_access(thd, SELECT_ACL, "mysql",
NULL, NULL, 1, 1);
- char *curr_host= thd->security_ctx->priv_host_name();
DBUG_ENTER("fill_schema_table_privileges");
mysql_rwlock_rdlock(&LOCK_grant);
@@ -11660,12 +12034,11 @@ int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
const char *user, *host, *is_grantable= "YES";
GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
index);
- user= safe_str(grant_table->user);
+ user= grant_table->user;
host= safe_str(grant_table->host.hostname);
if (no_global_access &&
- (strcmp(thd->security_ctx->priv_user, user) ||
- my_strcasecmp(system_charset_info, curr_host, host)))
+ !thd->security_ctx->is_priv_user(user, host))
continue;
ulong table_access= grant_table->privs;
@@ -11732,7 +12105,6 @@ int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table= tables->table;
bool no_global_access= check_access(thd, SELECT_ACL, "mysql",
NULL, NULL, 1, 1);
- char *curr_host= thd->security_ctx->priv_host_name();
DBUG_ENTER("fill_schema_table_privileges");
mysql_rwlock_rdlock(&LOCK_grant);
@@ -11742,12 +12114,11 @@ int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond)
const char *user, *host, *is_grantable= "YES";
GRANT_TABLE *grant_table= (GRANT_TABLE*) my_hash_element(&column_priv_hash,
index);
- user= safe_str(grant_table->user);
+ user= grant_table->user;
host= safe_str(grant_table->host.hostname);
if (no_global_access &&
- (strcmp(thd->security_ctx->priv_user, user) ||
- my_strcasecmp(system_charset_info, curr_host, host)))
+ !thd->security_ctx->is_priv_user(user, host))
continue;
ulong table_access= grant_table->cols;
@@ -12085,6 +12456,7 @@ struct MPVIO_EXT :public MYSQL_PLUGIN_VIO
char *pkt;
uint pkt_len;
} cached_server_packet;
+ uint curr_auth; ///< an index in acl_user->auth[]
int packets_read, packets_written; ///< counters for send/received packets
bool make_it_fail;
/** when plugin returns a failure this tells us what really happened */
@@ -12148,7 +12520,7 @@ static void login_failed_error(THD *thd)
static bool send_server_handshake_packet(MPVIO_EXT *mpvio,
const char *data, uint data_len)
{
- DBUG_ASSERT(mpvio->status == MPVIO_EXT::FAILURE);
+ DBUG_ASSERT(mpvio->status == MPVIO_EXT::RESTART);
DBUG_ASSERT(data_len <= 255);
THD *thd= mpvio->auth_info.thd;
@@ -12302,14 +12674,10 @@ static bool secure_auth(THD *thd)
static bool send_plugin_request_packet(MPVIO_EXT *mpvio,
const uchar *data, uint data_len)
{
- DBUG_ASSERT(mpvio->packets_written == 1);
- DBUG_ASSERT(mpvio->packets_read == 1);
NET *net= &mpvio->auth_info.thd->net;
static uchar switch_plugin_request_buf[]= { 254 };
DBUG_ENTER("send_plugin_request_packet");
- mpvio->status= MPVIO_EXT::FAILURE; // the status is no longer RESTART
-
const char *client_auth_plugin=
((st_mysql_auth *) (plugin_decl(mpvio->plugin)->info))->client_auth_plugin;
@@ -12326,8 +12694,9 @@ static bool send_plugin_request_packet(MPVIO_EXT *mpvio,
user account, it's the plugin that the client need to use to login.
*/
bool switch_from_long_to_short_scramble=
- native_password_plugin_name.str == mpvio->cached_client_reply.plugin &&
- client_auth_plugin == old_password_plugin_name.str;
+ client_auth_plugin == old_password_plugin_name.str &&
+ my_strcasecmp(system_charset_info, mpvio->cached_client_reply.plugin,
+ native_password_plugin_name.str) == 0;
if (switch_from_long_to_short_scramble)
DBUG_RETURN (secure_auth(mpvio->auth_info.thd) ||
@@ -12340,8 +12709,9 @@ static bool send_plugin_request_packet(MPVIO_EXT *mpvio,
ask an old 4.0 client to use the new 4.1 authentication protocol.
*/
bool switch_from_short_to_long_scramble=
- old_password_plugin_name.str == mpvio->cached_client_reply.plugin &&
- client_auth_plugin == native_password_plugin_name.str;
+ client_auth_plugin == native_password_plugin_name.str &&
+ my_strcasecmp(system_charset_info, mpvio->cached_client_reply.plugin,
+ old_password_plugin_name.str) == 0;
if (switch_from_short_to_long_scramble)
{
@@ -12360,6 +12730,23 @@ static bool send_plugin_request_packet(MPVIO_EXT *mpvio,
}
#ifndef NO_EMBEDDED_ACCESS_CHECKS
+
+/**
+ Safeguard to avoid blocking the root, when max_password_errors
+ limit is reached.
+
+ Currently, we allow password errors for superuser on localhost.
+
+ @return true, if password errors should be ignored, and user should not be locked.
+*/
+static bool ignore_max_password_errors(const ACL_USER *acl_user)
+{
+ const char *host= acl_user->host.hostname;
+ return (acl_user->access & SUPER_ACL)
+ && (!strcasecmp(host, "localhost") ||
+ !strcmp(host, "127.0.0.1") ||
+ !strcmp(host, "::1"));
+}
/**
Finds acl entry in user database for authentication purposes.
@@ -12378,6 +12765,16 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
mysql_mutex_lock(&acl_cache->lock);
ACL_USER *user= find_user_or_anon(sctx->host, sctx->user, sctx->ip);
+
+ if (user && user->password_errors >= max_password_errors && !ignore_max_password_errors(user))
+ {
+ mysql_mutex_unlock(&acl_cache->lock);
+ my_error(ER_USER_IS_BLOCKED, MYF(0));
+ general_log_print(mpvio->auth_info.thd, COM_CONNECT,
+ ER_THD(mpvio->auth_info.thd, ER_USER_IS_BLOCKED));
+ DBUG_RETURN(1);
+ }
+
if (user)
mpvio->acl_user= user->copy(mpvio->auth_info.thd->mem_root);
@@ -12415,32 +12812,19 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio)
}
/* user account requires non-default plugin and the client is too old */
- if (mpvio->acl_user->plugin.str != native_password_plugin_name.str &&
- mpvio->acl_user->plugin.str != old_password_plugin_name.str &&
+ if (mpvio->acl_user->auth->plugin.str != native_password_plugin_name.str &&
+ mpvio->acl_user->auth->plugin.str != old_password_plugin_name.str &&
!(mpvio->auth_info.thd->client_capabilities & CLIENT_PLUGIN_AUTH))
{
- DBUG_ASSERT(my_strcasecmp(system_charset_info, mpvio->acl_user->plugin.str,
- native_password_plugin_name.str));
- DBUG_ASSERT(my_strcasecmp(system_charset_info, mpvio->acl_user->plugin.str,
- old_password_plugin_name.str));
+ DBUG_ASSERT(my_strcasecmp(system_charset_info,
+ mpvio->acl_user->auth->plugin.str, native_password_plugin_name.str));
+ DBUG_ASSERT(my_strcasecmp(system_charset_info,
+ mpvio->acl_user->auth->plugin.str, old_password_plugin_name.str));
my_error(ER_NOT_SUPPORTED_AUTH_MODE, MYF(0));
general_log_print(mpvio->auth_info.thd, COM_CONNECT,
ER_THD(mpvio->auth_info.thd, ER_NOT_SUPPORTED_AUTH_MODE));
DBUG_RETURN (1);
}
-
- mpvio->auth_info.user_name= sctx->user;
- mpvio->auth_info.user_name_length= (uint)strlen(sctx->user);
- mpvio->auth_info.auth_string= mpvio->acl_user->auth_string.str;
- mpvio->auth_info.auth_string_length= (unsigned long) mpvio->acl_user->auth_string.length;
- strmake_buf(mpvio->auth_info.authenticated_as, safe_str(mpvio->acl_user->user.str));
-
- DBUG_PRINT("info", ("exit: user=%s, auth_string=%s, authenticated as=%s"
- "plugin=%s",
- mpvio->auth_info.user_name,
- mpvio->auth_info.auth_string,
- mpvio->auth_info.authenticated_as,
- mpvio->acl_user->plugin.str));
DBUG_RETURN(0);
}
@@ -12584,7 +12968,7 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length)
MYF(0));
DBUG_RETURN(1);
}
- client_plugin= fix_plugin_ptr(next_field);
+ client_plugin= next_field;
next_field+= strlen(next_field) + 1;
}
else
@@ -12593,21 +12977,18 @@ static bool parse_com_change_user_packet(MPVIO_EXT *mpvio, uint packet_length)
client_plugin= native_password_plugin_name.str;
else
{
- client_plugin= old_password_plugin_name.str;
/*
- For a passwordless accounts we use native_password_plugin.
- But when an old 4.0 client connects to it, we change it to
- old_password_plugin, otherwise MySQL will think that server
- and client plugins don't match.
+ Normally old clients use old_password_plugin, but for
+ a passwordless accounts we use native_password_plugin.
+ See guess_auth_plugin().
*/
- if (mpvio->acl_user->auth_string.length == 0)
- mpvio->acl_user->plugin= old_password_plugin_name;
+ client_plugin= passwd_len ? old_password_plugin_name.str
+ : native_password_plugin_name.str;
}
}
if ((thd->client_capabilities & CLIENT_CONNECT_ATTRS) &&
- read_client_connect_attrs(&next_field, end,
- thd->charset()))
+ read_client_connect_attrs(&next_field, end, thd->charset()))
{
my_message(ER_UNKNOWN_COM_ERROR, ER_THD(thd, ER_UNKNOWN_COM_ERROR),
MYF(0));
@@ -12678,7 +13059,12 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
return packet_error;
DBUG_PRINT("info", ("IO layer change in progress..."));
- if (sslaccept(ssl_acceptor_fd, net->vio, net->read_timeout, &errptr))
+ mysql_rwlock_rdlock(&LOCK_ssl_refresh);
+ int ssl_ret = sslaccept(ssl_acceptor_fd, net->vio, net->read_timeout, &errptr);
+ mysql_rwlock_unlock(&LOCK_ssl_refresh);
+ ssl_acceptor_stats_update(ssl_ret);
+
+ if(ssl_ret)
{
DBUG_PRINT("error", ("Failed to accept new SSL connection"));
return packet_error;
@@ -12829,7 +13215,6 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
if ((thd->client_capabilities & CLIENT_PLUGIN_AUTH) &&
(client_plugin < (char *)net->read_pos + pkt_len))
{
- client_plugin= fix_plugin_ptr(client_plugin);
next_field+= strlen(next_field) + 1;
}
else
@@ -12841,15 +13226,13 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
client_plugin= native_password_plugin_name.str;
else
{
- client_plugin= old_password_plugin_name.str;
/*
- For a passwordless accounts we use native_password_plugin.
- But when an old 4.0 client connects to it, we change it to
- old_password_plugin, otherwise MySQL will think that server
- and client plugins don't match.
+ Normally old clients use old_password_plugin, but for
+ a passwordless accounts we use native_password_plugin.
+ See guess_auth_plugin().
*/
- if (mpvio->acl_user->auth_string.length == 0)
- mpvio->acl_user->plugin= old_password_plugin_name;
+ client_plugin= passwd_len ? old_password_plugin_name.str
+ : native_password_plugin_name.str;
}
}
@@ -12867,7 +13250,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
restarted and a server auth plugin will read the data that the client
has just send. Cache them to return in the next server_mpvio_read_packet().
*/
- if (!lex_string_eq(&mpvio->acl_user->plugin, plugin_name(mpvio->plugin)))
+ if (!lex_string_eq(&mpvio->acl_user->auth->plugin, plugin_name(mpvio->plugin)))
{
mpvio->cached_client_reply.pkt= passwd;
mpvio->cached_client_reply.pkt_len= (uint)passwd_len;
@@ -12947,6 +13330,7 @@ static int server_mpvio_write_packet(MYSQL_PLUGIN_VIO *param,
res= my_net_write(&mpvio->auth_info.thd->net, packet, packet_len) ||
net_flush(&mpvio->auth_info.thd->net);
}
+ mpvio->status= MPVIO_EXT::FAILURE; // the status is no longer RESTART
mpvio->packets_written++;
DBUG_RETURN(res);
}
@@ -12963,56 +13347,53 @@ static int server_mpvio_write_packet(MYSQL_PLUGIN_VIO *param,
*/
static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf)
{
- MPVIO_EXT *mpvio= (MPVIO_EXT *) param;
+ MPVIO_EXT * const mpvio= (MPVIO_EXT *) param;
+ MYSQL_SERVER_AUTH_INFO * const ai= &mpvio->auth_info;
ulong pkt_len;
DBUG_ENTER("server_mpvio_read_packet");
- if (mpvio->packets_written == 0)
- {
- /*
- plugin wants to read the data without sending anything first.
- send an empty packet to force a server handshake packet to be sent
- */
- if (server_mpvio_write_packet(mpvio, 0, 0))
- pkt_len= packet_error;
- else
- pkt_len= my_net_read(&mpvio->auth_info.thd->net);
- }
- else if (mpvio->cached_client_reply.pkt)
+ if (mpvio->status == MPVIO_EXT::RESTART)
{
- DBUG_ASSERT(mpvio->status == MPVIO_EXT::RESTART);
- DBUG_ASSERT(mpvio->packets_read > 0);
- /*
- if the have the data cached from the last server_mpvio_read_packet
- (which can be the case if it's a restarted authentication)
- and a client has used the correct plugin, then we can return the
- cached data straight away and avoid one round trip.
- */
const char *client_auth_plugin=
((st_mysql_auth *) (plugin_decl(mpvio->plugin)->info))->client_auth_plugin;
- if (client_auth_plugin == 0 ||
- my_strcasecmp(system_charset_info, mpvio->cached_client_reply.plugin,
- client_auth_plugin) == 0)
+ if (client_auth_plugin == 0)
{
mpvio->status= MPVIO_EXT::FAILURE;
- *buf= (uchar*) mpvio->cached_client_reply.pkt;
- mpvio->cached_client_reply.pkt= 0;
- mpvio->packets_read++;
+ pkt_len= 0;
+ *buf= 0;
+ goto done;
+ }
- DBUG_RETURN ((int) mpvio->cached_client_reply.pkt_len);
+ if (mpvio->cached_client_reply.pkt)
+ {
+ DBUG_ASSERT(mpvio->packets_read > 0);
+ /*
+ if the have the data cached from the last server_mpvio_read_packet
+ (which can be the case if it's a restarted authentication)
+ and a client has used the correct plugin, then we can return the
+ cached data straight away and avoid one round trip.
+ */
+ if (my_strcasecmp(system_charset_info, mpvio->cached_client_reply.plugin,
+ client_auth_plugin) == 0)
+ {
+ mpvio->status= MPVIO_EXT::FAILURE;
+ pkt_len= mpvio->cached_client_reply.pkt_len;
+ *buf= (uchar*) mpvio->cached_client_reply.pkt;
+ mpvio->packets_read++;
+ goto done;
+ }
}
/*
- But if the client has used the wrong plugin, the cached data are
- useless. Furthermore, we have to send a "change plugin" request
- to the client.
+ plugin wants to read the data without sending anything first.
+ send an empty packet to force a server handshake packet to be sent
*/
if (server_mpvio_write_packet(mpvio, 0, 0))
pkt_len= packet_error;
else
- pkt_len= my_net_read(&mpvio->auth_info.thd->net);
+ pkt_len= my_net_read(&ai->thd->net);
}
else
- pkt_len= my_net_read(&mpvio->auth_info.thd->net);
+ pkt_len= my_net_read(&ai->thd->net);
if (unlikely(pkt_len == packet_error))
goto err;
@@ -13030,14 +13411,24 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf)
goto err;
}
else
- *buf= mpvio->auth_info.thd->net.read_pos;
+ *buf= ai->thd->net.read_pos;
+
+done:
+ if (set_user_salt_if_needed(mpvio->acl_user, mpvio->curr_auth, mpvio->plugin))
+ goto err;
+
+ ai->user_name= ai->thd->security_ctx->user;
+ ai->user_name_length= (uint) strlen(ai->user_name);
+ ai->auth_string= mpvio->acl_user->auth[mpvio->curr_auth].salt.str;
+ ai->auth_string_length= (ulong) mpvio->acl_user->auth[mpvio->curr_auth].salt.length;
+ strmake_buf(ai->authenticated_as, mpvio->acl_user->user.str);
DBUG_RETURN((int)pkt_len);
err:
if (mpvio->status == MPVIO_EXT::FAILURE)
{
- if (!mpvio->auth_info.thd->is_error())
+ if (!ai->thd->is_error())
my_error(ER_HANDSHAKE_ERROR, MYF(0));
}
DBUG_RETURN(-1);
@@ -13098,24 +13489,25 @@ static bool acl_check_ssl(THD *thd, const ACL_USER *acl_user)
return 1;
if (acl_user->ssl_cipher)
{
+ const char *ssl_cipher= SSL_get_cipher(ssl);
DBUG_PRINT("info", ("comparing ciphers: '%s' and '%s'",
- acl_user->ssl_cipher, SSL_get_cipher(ssl)));
- if (strcmp(acl_user->ssl_cipher, SSL_get_cipher(ssl)))
+ acl_user->ssl_cipher, ssl_cipher));
+ if (strcmp(acl_user->ssl_cipher, ssl_cipher))
{
if (global_system_variables.log_warnings)
sql_print_information("X509 ciphers mismatch: should be '%s' but is '%s'",
- acl_user->ssl_cipher, SSL_get_cipher(ssl));
+ acl_user->ssl_cipher, ssl_cipher);
return 1;
}
}
- if (!acl_user->x509_issuer && !acl_user->x509_subject)
+ if (!acl_user->x509_issuer[0] && !acl_user->x509_subject[0])
return 0; // all done
/* Prepare certificate (if exists) */
if (!(cert= SSL_get_peer_certificate(ssl)))
return 1;
/* If X509 issuer is specified, we check it... */
- if (acl_user->x509_issuer)
+ if (acl_user->x509_issuer[0])
{
char *ptr= X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0);
DBUG_PRINT("info", ("comparing issuers: '%s' and '%s'",
@@ -13132,7 +13524,7 @@ static bool acl_check_ssl(THD *thd, const ACL_USER *acl_user)
free(ptr);
}
/* X509 subject is specified, we check it .. */
- if (acl_user->x509_subject)
+ if (acl_user->x509_subject[0])
{
char *ptr= X509_NAME_oneline(X509_get_subject_name(cert), 0, 0);
DBUG_PRINT("info", ("comparing subjects: '%s' and '%s'",
@@ -13166,35 +13558,25 @@ static bool acl_check_ssl(THD *thd, const ACL_USER *acl_user)
static int do_auth_once(THD *thd, const LEX_CSTRING *auth_plugin_name,
MPVIO_EXT *mpvio)
{
- int res= CR_OK, old_status= MPVIO_EXT::FAILURE;
+ int res= CR_OK;
bool unlock_plugin= false;
- plugin_ref plugin= NULL;
-
- if (auth_plugin_name->str == native_password_plugin_name.str)
- plugin= native_password_plugin;
-#ifndef EMBEDDED_LIBRARY
- else if (auth_plugin_name->str == old_password_plugin_name.str)
- plugin= old_password_plugin;
- else if ((plugin= my_plugin_lock_by_name(thd, auth_plugin_name,
- MYSQL_AUTHENTICATION_PLUGIN)))
- unlock_plugin= true;
-#endif
+ plugin_ref plugin= get_auth_plugin(thd, *auth_plugin_name, &unlock_plugin);
mpvio->plugin= plugin;
- old_status= mpvio->status;
+ mpvio->auth_info.user_name= NULL;
if (plugin)
{
- st_mysql_auth *auth= (st_mysql_auth *) plugin_decl(plugin)->info;
- switch (auth->interface_version >> 8) {
+ st_mysql_auth *info= (st_mysql_auth *) plugin_decl(plugin)->info;
+ switch (info->interface_version >> 8) {
case 0x02:
- res= auth->authenticate_user(mpvio, &mpvio->auth_info);
+ res= info->authenticate_user(mpvio, &mpvio->auth_info);
break;
case 0x01:
{
MYSQL_SERVER_AUTH_INFO_0x0100 compat;
compat.downgrade(&mpvio->auth_info);
- res= auth->authenticate_user(mpvio, (MYSQL_SERVER_AUTH_INFO *)&compat);
+ res= info->authenticate_user(mpvio, (MYSQL_SERVER_AUTH_INFO *)&compat);
compat.upgrade(&mpvio->auth_info);
}
break;
@@ -13214,20 +13596,64 @@ static int do_auth_once(THD *thd, const LEX_CSTRING *auth_plugin_name,
res= CR_ERROR;
}
- /*
- If the status was MPVIO_EXT::RESTART before the authenticate_user() call
- it can never be MPVIO_EXT::RESTART after the call, because any call
- to write_packet() or read_packet() will reset the status.
+ return res;
+}
- But (!) if a plugin never called a read_packet() or write_packet(), the
- status will stay unchanged. We'll fix it, by resetting the status here.
- */
- if (old_status == MPVIO_EXT::RESTART && mpvio->status == MPVIO_EXT::RESTART)
- mpvio->status= MPVIO_EXT::FAILURE; // reset to the default
+enum PASSWD_ERROR_ACTION
+{
+ PASSWD_ERROR_CLEAR,
+ PASSWD_ERROR_INCREMENT
+};
- return res;
+/* Increment, or clear password errors for a user. */
+static void handle_password_errors(const char *user, const char *hostname, PASSWD_ERROR_ACTION action)
+{
+#ifndef NO_EMBEDDED_ACCESS_CHECKS
+ mysql_mutex_assert_not_owner(&acl_cache->lock);
+ mysql_mutex_lock(&acl_cache->lock);
+ ACL_USER *u = find_user_exact(hostname, user);
+ if (u)
+ {
+ switch(action)
+ {
+ case PASSWD_ERROR_INCREMENT:
+ u->password_errors++;
+ break;
+ case PASSWD_ERROR_CLEAR:
+ u->password_errors= 0;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+ }
+ mysql_mutex_unlock(&acl_cache->lock);
+#endif
}
+static bool check_password_lifetime(THD *thd, const ACL_USER &acl_user)
+{
+ /* the password should never expire */
+ if (!acl_user.password_lifetime)
+ return false;
+
+ longlong interval= acl_user.password_lifetime;
+ if (interval < 0)
+ {
+ interval= default_password_lifetime;
+
+ /* default global policy applies, and that is password never expires */
+ if (!interval)
+ return false;
+ }
+
+ thd->set_time();
+
+ if ((thd->query_start() - acl_user.password_last_changed)/3600/24 >= interval)
+ return true;
+
+ return false;
+}
/**
Perform the handshake, authorize the client and update thd sctx variables.
@@ -13245,7 +13671,6 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
{
int res= CR_OK;
MPVIO_EXT mpvio;
- const LEX_CSTRING *auth_plugin_name= default_auth_plugin_name;
enum enum_server_command command= com_change_user_pkt_len ? COM_CHANGE_USER
: COM_CONNECT;
DBUG_ENTER("acl_authenticate");
@@ -13253,9 +13678,9 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
bzero(&mpvio, sizeof(mpvio));
mpvio.read_packet= server_mpvio_read_packet;
mpvio.write_packet= server_mpvio_write_packet;
+ mpvio.cached_client_reply.plugin= "";
mpvio.info= server_mpvio_info;
- mpvio.status= MPVIO_EXT::FAILURE;
- mpvio.make_it_fail= false;
+ mpvio.status= MPVIO_EXT::RESTART;
mpvio.auth_info.thd= thd;
mpvio.auth_info.host_or_ip= thd->security_ctx->host_or_ip;
mpvio.auth_info.host_or_ip_length=
@@ -13271,6 +13696,8 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
if (parse_com_change_user_packet(&mpvio, com_change_user_pkt_len))
DBUG_RETURN(1);
+ res= mpvio.status == MPVIO_EXT::SUCCESS ? CR_OK : CR_ERROR;
+
DBUG_ASSERT(mpvio.status == MPVIO_EXT::RESTART ||
mpvio.status == MPVIO_EXT::SUCCESS);
}
@@ -13286,29 +13713,33 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
the correct plugin.
*/
- res= do_auth_once(thd, auth_plugin_name, &mpvio);
+ res= do_auth_once(thd, default_auth_plugin_name, &mpvio);
}
- /*
- retry the authentication, if - after receiving the user name -
- we found that we need to switch to a non-default plugin
- */
- if (mpvio.status == MPVIO_EXT::RESTART)
+ Security_context * const sctx= thd->security_ctx;
+ const ACL_USER * acl_user= mpvio.acl_user;
+
+ if (acl_user)
{
- DBUG_ASSERT(mpvio.acl_user);
- DBUG_ASSERT(command == COM_CHANGE_USER ||
- !lex_string_eq(auth_plugin_name, &mpvio.acl_user->plugin));
- auth_plugin_name= &mpvio.acl_user->plugin;
- res= do_auth_once(thd, auth_plugin_name, &mpvio);
+ /*
+ retry the authentication with curr_auth==0 if after receiving the user
+ name we found that we need to switch to a non-default plugin
+ */
+ for (mpvio.curr_auth= mpvio.status != MPVIO_EXT::RESTART;
+ res != CR_OK && mpvio.curr_auth < acl_user->nauth;
+ mpvio.curr_auth++)
+ {
+ thd->clear_error();
+ mpvio.status= MPVIO_EXT::RESTART;
+ res= do_auth_once(thd, &acl_user->auth[mpvio.curr_auth].plugin, &mpvio);
+ }
}
+
if (mpvio.make_it_fail && res == CR_OK)
{
mpvio.status= MPVIO_EXT::FAILURE;
res= CR_ERROR;
}
-
- Security_context *sctx= thd->security_ctx;
- const ACL_USER *acl_user= mpvio.acl_user;
thd->password= mpvio.auth_info.password_used; // remember for error messages
@@ -13336,7 +13767,6 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
if (res > CR_OK && mpvio.status != MPVIO_EXT::SUCCESS)
{
Host_errors errors;
- DBUG_ASSERT(mpvio.status == MPVIO_EXT::FAILURE);
switch (res)
{
case CR_AUTH_PLUGIN_ERROR:
@@ -13347,6 +13777,8 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
break;
case CR_AUTH_USER_CREDENTIALS:
errors.m_authentication= 1;
+ if (thd->password && !mpvio.make_it_fail)
+ handle_password_errors(acl_user->user.str, acl_user->host.hostname, PASSWD_ERROR_INCREMENT);
break;
case CR_ERROR:
default:
@@ -13361,12 +13793,17 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
}
sctx->proxy_user[0]= 0;
+ if (thd->password && acl_user->password_errors)
+ {
+ /* Login succeeded, clear password errors.*/
+ handle_password_errors(acl_user->user.str, acl_user->host.hostname, PASSWD_ERROR_CLEAR);
+ }
if (initialized) // if not --skip-grant-tables
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
bool is_proxy_user= FALSE;
- const char *auth_user = safe_str(acl_user->user.str);
+ const char *auth_user = acl_user->user.str;
ACL_PROXY_USER *proxy_user;
/* check if the user is allowed to proxy as another user */
proxy_user= acl_find_proxy_user(auth_user, sctx->host, sctx->ip,
@@ -13412,10 +13849,7 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
#endif
sctx->master_access= acl_user->access;
- if (acl_user->user.str)
- strmake_buf(sctx->priv_user, acl_user->user.str);
- else
- *sctx->priv_user= 0;
+ strmake_buf(sctx->priv_user, acl_user->user.str);
if (acl_user->host.hostname)
strmake_buf(sctx->priv_host, acl_user->host.hostname);
@@ -13436,6 +13870,28 @@ bool acl_authenticate(THD *thd, uint com_change_user_pkt_len)
DBUG_RETURN(1);
}
+ if (acl_user->account_locked) {
+ status_var_increment(denied_connections);
+ my_error(ER_ACCOUNT_HAS_BEEN_LOCKED, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ bool client_can_handle_exp_pass= thd->client_capabilities &
+ CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS;
+ bool password_expired= thd->password != PASSWORD_USED_NO_MENTION
+ && (acl_user->password_expired ||
+ check_password_lifetime(thd, *acl_user));
+
+ if (!client_can_handle_exp_pass && disconnect_on_expired_password &&
+ password_expired)
+ {
+ status_var_increment(denied_connections);
+ my_error(ER_MUST_CHANGE_PASSWORD_LOGIN, MYF(0));
+ DBUG_RETURN(1);
+ }
+
+ sctx->password_expired= password_expired;
+
/*
Don't allow the user to connect if he has done too many queries.
As we are testing max_user_connections == 0 here, it means that we
@@ -13571,12 +14027,11 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio,
/* generate the scramble, or reuse the old one */
if (thd->scramble[SCRAMBLE_LENGTH])
- {
thd_create_random_password(thd, thd->scramble, SCRAMBLE_LENGTH);
- /* and send it to the client */
- if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1))
- DBUG_RETURN(CR_AUTH_HANDSHAKE);
- }
+
+ /* and send it to the client */
+ if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1))
+ DBUG_RETURN(CR_AUTH_HANDSHAKE);
/* reply and authenticate */
@@ -13627,15 +14082,16 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio,
DBUG_EXECUTE_IF("native_password_bad_reply", { pkt_len= 12; });
if (pkt_len == 0) /* no password */
- DBUG_RETURN(mpvio->acl_user->salt_len != 0 ? CR_AUTH_USER_CREDENTIALS : CR_OK);
+ DBUG_RETURN(info->auth_string_length != 0
+ ? CR_AUTH_USER_CREDENTIALS : CR_OK);
info->password_used= PASSWORD_USED_YES;
if (pkt_len == SCRAMBLE_LENGTH)
{
- if (!mpvio->acl_user->salt_len)
+ if (!info->auth_string_length)
DBUG_RETURN(CR_AUTH_USER_CREDENTIALS);
- if (check_scramble(pkt, thd->scramble, mpvio->acl_user->salt))
+ if (check_scramble(pkt, thd->scramble, (uchar*)info->auth_string))
DBUG_RETURN(CR_AUTH_USER_CREDENTIALS);
else
DBUG_RETURN(CR_OK);
@@ -13645,6 +14101,41 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio,
DBUG_RETURN(CR_AUTH_HANDSHAKE);
}
+static int native_password_make_scramble(const char *password,
+ size_t password_length, char *hash, size_t *hash_length)
+{
+ DBUG_ASSERT(*hash_length >= SCRAMBLED_PASSWORD_CHAR_LENGTH);
+ if (password_length == 0)
+ *hash_length= 0;
+ else
+ {
+ *hash_length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ my_make_scrambled_password(hash, password, password_length);
+ }
+ return 0;
+}
+
+static int native_password_get_salt(const char *hash, size_t hash_length,
+ unsigned char *out, size_t *out_length)
+{
+ DBUG_ASSERT(*out_length >= SCRAMBLE_LENGTH);
+ if (hash_length == 0)
+ {
+ *out_length= 0;
+ return 0;
+ }
+
+ if (hash_length != SCRAMBLED_PASSWORD_CHAR_LENGTH)
+ {
+ my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH);
+ return 1;
+ }
+
+ *out_length= SCRAMBLE_LENGTH;
+ get_salt_from_password(out, hash);
+ return 0;
+}
+
static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
MYSQL_SERVER_AUTH_INFO *info)
{
@@ -13655,12 +14146,10 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
/* generate the scramble, or reuse the old one */
if (thd->scramble[SCRAMBLE_LENGTH])
- {
thd_create_random_password(thd, thd->scramble, SCRAMBLE_LENGTH);
- /* and send it to the client */
- if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1))
- return CR_AUTH_HANDSHAKE;
- }
+ /* and send it to the client */
+ if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1))
+ return CR_AUTH_HANDSHAKE;
/* read the reply and authenticate */
if ((pkt_len= mpvio->read_packet(mpvio, &pkt)) < 0)
@@ -13679,7 +14168,7 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
pkt_len= (int)strnlen((char*)pkt, pkt_len);
if (pkt_len == 0) /* no password */
- return info->auth_string[0] ? CR_AUTH_USER_CREDENTIALS : CR_OK;
+ return info->auth_string_length ? CR_AUTH_USER_CREDENTIALS : CR_OK;
if (secure_auth(thd))
return CR_AUTH_HANDSHAKE;
@@ -13688,30 +14177,64 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
if (pkt_len == SCRAMBLE_LENGTH_323)
{
- if (!mpvio->acl_user->salt_len)
+ if (!info->auth_string_length)
return CR_AUTH_USER_CREDENTIALS;
- return check_scramble_323(pkt, thd->scramble,
- (ulong *) mpvio->acl_user->salt) ?
- CR_AUTH_USER_CREDENTIALS : CR_OK;
+ return check_scramble_323(pkt, thd->scramble, (ulong *) info->auth_string)
+ ? CR_AUTH_USER_CREDENTIALS : CR_OK;
}
my_error(ER_HANDSHAKE_ERROR, MYF(0));
return CR_AUTH_HANDSHAKE;
}
+static int old_password_make_scramble(const char *password,
+ size_t password_length, char *hash, size_t *hash_length)
+{
+ DBUG_ASSERT(*hash_length >= SCRAMBLED_PASSWORD_CHAR_LENGTH_323);
+ if (password_length == 0)
+ *hash_length= 0;
+ else
+ {
+ *hash_length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ my_make_scrambled_password_323(hash, password, password_length);
+ }
+ return 0;
+}
+
+#define SALT_LENGTH_323 (sizeof(ulong)*2)
+static int old_password_get_salt(const char *hash, size_t hash_length,
+ unsigned char *out, size_t *out_length)
+{
+ DBUG_ASSERT(*out_length >= SALT_LENGTH_323);
+
+ if (hash_length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
+ {
+ my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH_323);
+ return 1;
+ }
+
+ *out_length= SALT_LENGTH_323;
+ get_salt_from_password_323((ulong*)out, hash);
+ return 0;
+}
+
static struct st_mysql_auth native_password_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
native_password_plugin_name.str,
- native_password_authenticate
+ native_password_authenticate,
+ native_password_make_scramble,
+ native_password_get_salt
};
static struct st_mysql_auth old_password_handler=
{
MYSQL_AUTHENTICATION_INTERFACE_VERSION,
old_password_plugin_name.str,
- old_password_authenticate
+ old_password_authenticate,
+ old_password_make_scramble,
+ old_password_get_salt
};
maria_declare_plugin(mysql_password)
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 853b34f8ad2..cf45f443a07 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -306,7 +306,7 @@ static bool open_only_one_table(THD* thd, TABLE_LIST* table,
bool is_view_operator_func)
{
LEX *lex= thd->lex;
- SELECT_LEX *select= &lex->select_lex;
+ SELECT_LEX *select= lex->first_select_lex();
TABLE_LIST *save_next_global, *save_next_local;
bool open_error;
save_next_global= table->next_global;
@@ -766,7 +766,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
}
collect_eis=
(table->table->s->table_category == TABLE_CATEGORY_USER &&
- (get_use_stat_tables_mode(thd) > NEVER ||
+ (check_eits_collection_allowed(thd) ||
lex->with_persistent_for_clause));
@@ -1297,7 +1297,7 @@ bool mysql_preload_keys(THD* thd, TABLE_LIST* tables)
bool Sql_cmd_analyze_table::execute(THD *thd)
{
LEX *m_lex= thd->lex;
- TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
+ TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first;
bool res= TRUE;
thr_lock_type lock_type = TL_READ_NO_INSERT;
DBUG_ENTER("Sql_cmd_analyze_table::execute");
@@ -1305,6 +1305,9 @@ bool Sql_cmd_analyze_table::execute(THD *thd)
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table,
FALSE, UINT_MAX, FALSE))
goto error;
+ if (thd->has_read_only_protection())
+ goto error;
+
WSREP_TO_ISOLATION_BEGIN_WRTCHK(NULL, NULL, first_table);
res= mysql_admin_table(thd, first_table, &m_lex->check_opt,
"analyze", lock_type, 1, 0, 0, 0,
@@ -1317,11 +1320,13 @@ bool Sql_cmd_analyze_table::execute(THD *thd)
*/
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
}
- m_lex->select_lex.table_list.first= first_table;
+ m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
error:
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
DBUG_RETURN(res);
}
@@ -1329,7 +1334,7 @@ WSREP_ERROR_LABEL:
bool Sql_cmd_check_table::execute(THD *thd)
{
LEX *m_lex= thd->lex;
- TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
+ TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first;
thr_lock_type lock_type = TL_READ_NO_INSERT;
bool res= TRUE;
DBUG_ENTER("Sql_cmd_check_table::execute");
@@ -1342,7 +1347,7 @@ bool Sql_cmd_check_table::execute(THD *thd)
lock_type, 0, 0, HA_OPEN_FOR_REPAIR, 0,
&handler::ha_check, &view_check);
- m_lex->select_lex.table_list.first= first_table;
+ m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
error:
@@ -1353,7 +1358,7 @@ error:
bool Sql_cmd_optimize_table::execute(THD *thd)
{
LEX *m_lex= thd->lex;
- TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
+ TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first;
bool res= TRUE;
DBUG_ENTER("Sql_cmd_optimize_table::execute");
@@ -1375,11 +1380,13 @@ bool Sql_cmd_optimize_table::execute(THD *thd)
*/
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
}
- m_lex->select_lex.table_list.first= first_table;
+ m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
error:
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
DBUG_RETURN(res);
}
@@ -1387,7 +1394,7 @@ WSREP_ERROR_LABEL:
bool Sql_cmd_repair_table::execute(THD *thd)
{
LEX *m_lex= thd->lex;
- TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
+ TABLE_LIST *first_table= m_lex->first_select_lex()->table_list.first;
bool res= TRUE;
DBUG_ENTER("Sql_cmd_repair_table::execute");
@@ -1409,10 +1416,12 @@ bool Sql_cmd_repair_table::execute(THD *thd)
*/
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
}
- m_lex->select_lex.table_list.first= first_table;
+ m_lex->first_select_lex()->table_list.first= first_table;
m_lex->query_tables= first_table;
error:
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
DBUG_RETURN(res);
}
diff --git a/sql/sql_alloc.h b/sql/sql_alloc.h
index 153b0401e29..f475ecdff73 100644
--- a/sql/sql_alloc.h
+++ b/sql/sql_alloc.h
@@ -45,9 +45,6 @@ public:
#ifdef HAVE_valgrind
bool dummy_for_valgrind;
inline Sql_alloc() :dummy_for_valgrind(0) {}
-#else
- inline Sql_alloc() {}
#endif
- inline ~Sql_alloc() {}
};
#endif /* SQL_ALLOC_INCLUDED */
diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc
index 0408808d800..1d2b5c3449b 100644
--- a/sql/sql_alter.cc
+++ b/sql/sql_alter.cc
@@ -356,7 +356,7 @@ bool Sql_cmd_alter_table::execute(THD *thd)
{
LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
/* first table of first SELECT_LEX */
TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first;
/*
@@ -473,6 +473,7 @@ bool Sql_cmd_alter_table::execute(THD *thd)
thd->work_part_info= 0;
#endif
+#ifdef WITH_WSREP
if (WSREP(thd) &&
(!thd->is_current_stmt_binlog_format_row() ||
!thd->find_temporary_table(first_table)))
@@ -484,6 +485,7 @@ bool Sql_cmd_alter_table::execute(THD *thd)
thd->variables.auto_increment_offset = 1;
thd->variables.auto_increment_increment = 1;
}
+#endif
result= mysql_alter_table(thd, &select_lex->db, &lex->name,
&create_info,
@@ -494,16 +496,17 @@ bool Sql_cmd_alter_table::execute(THD *thd)
lex->ignore);
DBUG_RETURN(result);
-
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
WSREP_WARN("ALTER TABLE isolation failure");
DBUG_RETURN(TRUE);
+#endif
}
bool Sql_cmd_discard_import_tablespace::execute(THD *thd)
{
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
/* first table of first SELECT_LEX */
TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first;
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index 48e61177774..fa24ea142b1 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -68,6 +68,25 @@ int compare_decimal2(int* len, const char *s, const char *t)
}
+static bool
+prepare_param(THD *thd, Item **item, const char *proc_name, uint pos)
+{
+ if ((*item)->fix_fields_if_needed(thd, item))
+ {
+ DBUG_PRINT("info", ("fix_fields() for the parameter %u failed", pos));
+ return true;
+ }
+ if ((*item)->type_handler()->result_type() != INT_RESULT ||
+ !(*item)->basic_const_item() ||
+ (*item)->val_real() < 0)
+ {
+ my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name);
+ return true;
+ }
+ return false;
+}
+
+
Procedure *
proc_analyse_init(THD *thd, ORDER *param, select_result *result,
List<Item> &field_list)
@@ -88,17 +107,8 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result,
else if (param->next)
{
// first parameter
- if ((*param->item)->fix_fields_if_needed(thd, param->item))
- {
- DBUG_PRINT("info", ("fix_fields() for the first parameter failed"));
- goto err;
- }
- if ((*param->item)->type() != Item::INT_ITEM ||
- (*param->item)->val_real() < 0)
- {
- my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name);
+ if (prepare_param(thd, param->item, proc_name, 0))
goto err;
- }
pc->max_tree_elements = (uint) (*param->item)->val_int();
param = param->next;
if (param->next) // no third parameter possible
@@ -107,25 +117,12 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result,
goto err;
}
// second parameter
- if ((*param->item)->fix_fields_if_needed(thd, param->item))
- {
- DBUG_PRINT("info", ("fix_fields() for the second parameter failed"));
- goto err;
- }
- if ((*param->item)->type() != Item::INT_ITEM ||
- (*param->item)->val_real() < 0)
- {
- my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name);
+ if (prepare_param(thd, param->item, proc_name, 1))
goto err;
- }
pc->max_treemem = (uint) (*param->item)->val_int();
}
- else if ((*param->item)->type() != Item::INT_ITEM ||
- (*param->item)->val_real() < 0)
- {
- my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name);
+ else if (prepare_param(thd, param->item, proc_name, 0))
goto err;
- }
// if only one parameter was given, it will be the value of max_tree_elements
else
{
@@ -481,30 +478,28 @@ void field_real::add()
void field_decimal::add()
{
/*TODO - remove rounding stuff after decimal_div returns proper frac */
- my_decimal dec_buf, *dec= item->val_decimal(&dec_buf);
- my_decimal rounded;
+ VDec vdec(item);
uint length;
TREE_ELEMENT *element;
- if (item->null_value)
+ if (vdec.is_null())
{
nulls++;
return;
}
- my_decimal_round(E_DEC_FATAL_ERROR, dec, item->decimals, FALSE,&rounded);
- dec= &rounded;
+ my_decimal dec;
+ vdec.round_to(&dec, item->decimals, HALF_UP);
- length= my_decimal_string_length(dec);
+ length= my_decimal_string_length(&dec);
- if (decimal_is_zero(dec))
+ if (decimal_is_zero(&dec))
empty++;
if (room_in_tree)
{
uchar buf[DECIMAL_MAX_FIELD_SIZE];
- my_decimal2binary(E_DEC_FATAL_ERROR, dec, buf,
- item->max_length, item->decimals);
+ dec.to_binary(buf, item->max_length, item->decimals);
if (!(element = tree_insert(&tree, (void*)buf, 0, tree.custom_arg)))
{
room_in_tree = 0; // Remove tree, out of RAM ?
@@ -524,18 +519,18 @@ void field_decimal::add()
if (!found)
{
found = 1;
- min_arg = max_arg = sum[0] = *dec;
- my_decimal_mul(E_DEC_FATAL_ERROR, sum_sqr, dec, dec);
+ min_arg = max_arg = sum[0] = dec;
+ my_decimal_mul(E_DEC_FATAL_ERROR, sum_sqr, &dec, &dec);
cur_sum= 0;
min_length = max_length = length;
}
- else if (!decimal_is_zero(dec))
+ else if (!decimal_is_zero(&dec))
{
int next_cur_sum= cur_sum ^ 1;
my_decimal sqr_buf;
- my_decimal_add(E_DEC_FATAL_ERROR, sum+next_cur_sum, sum+cur_sum, dec);
- my_decimal_mul(E_DEC_FATAL_ERROR, &sqr_buf, dec, dec);
+ my_decimal_add(E_DEC_FATAL_ERROR, sum+next_cur_sum, sum+cur_sum, &dec);
+ my_decimal_mul(E_DEC_FATAL_ERROR, &sqr_buf, &dec, &dec);
my_decimal_add(E_DEC_FATAL_ERROR,
sum_sqr+next_cur_sum, sum_sqr+cur_sum, &sqr_buf);
cur_sum= next_cur_sum;
@@ -543,13 +538,13 @@ void field_decimal::add()
min_length = length;
if (length > max_length)
max_length = length;
- if (my_decimal_cmp(dec, &min_arg) < 0)
+ if (dec.cmp(&min_arg) < 0)
{
- min_arg= *dec;
+ min_arg= dec;
}
- if (my_decimal_cmp(dec, &max_arg) > 0)
+ if (dec.cmp(&max_arg) > 0)
{
- max_arg= *dec;
+ max_arg= dec;
}
}
}
@@ -1003,7 +998,7 @@ void field_decimal::get_opt_type(String *answer,
uint length;
my_decimal_set_zero(&zero);
- my_bool is_unsigned= (my_decimal_cmp(&zero, &min_arg) >= 0);
+ my_bool is_unsigned= (zero.cmp(&min_arg) >= 0);
length= sprintf(buff, "DECIMAL(%d, %d)",
(int) (max_length - (item->decimals ? 1 : 0)),
@@ -1016,14 +1011,14 @@ void field_decimal::get_opt_type(String *answer,
String *field_decimal::get_min_arg(String *str)
{
- my_decimal2string(E_DEC_FATAL_ERROR, &min_arg, 0, 0, '0', str);
+ min_arg.to_string_native(str, 0, 0, '0');
return str;
}
String *field_decimal::get_max_arg(String *str)
{
- my_decimal2string(E_DEC_FATAL_ERROR, &max_arg, 0, 0, '0', str);
+ max_arg.to_string_native(str, 0, 0, '0');
return str;
}
@@ -1041,10 +1036,10 @@ String *field_decimal::avg(String *s, ha_rows rows)
int2my_decimal(E_DEC_FATAL_ERROR, rows - nulls, FALSE, &num);
my_decimal_div(E_DEC_FATAL_ERROR, &avg_val, sum+cur_sum, &num, prec_increment);
/* TODO remove this after decimal_div returns proper frac */
- my_decimal_round(E_DEC_FATAL_ERROR, &avg_val,
+ avg_val.round_to(&rounded_avg,
MY_MIN(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE),
- FALSE,&rounded_avg);
- my_decimal2string(E_DEC_FATAL_ERROR, &rounded_avg, 0, 0, '0', s);
+ HALF_UP);
+ rounded_avg.to_string_native(s, 0, 0, '0');
return s;
}
@@ -1057,7 +1052,6 @@ String *field_decimal::std(String *s, ha_rows rows)
return s;
}
my_decimal num, tmp, sum2, sum2d;
- double std_sqr;
int prec_increment= current_thd->variables.div_precincrement;
int2my_decimal(E_DEC_FATAL_ERROR, rows - nulls, FALSE, &num);
@@ -1065,7 +1059,7 @@ String *field_decimal::std(String *s, ha_rows rows)
my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment);
my_decimal_sub(E_DEC_FATAL_ERROR, &sum2, sum_sqr+cur_sum, &tmp);
my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment);
- my_decimal2double(E_DEC_FATAL_ERROR, &tmp, &std_sqr);
+ double std_sqr= tmp.to_double();
s->set_real(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)),
MY_MIN(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset);
@@ -1117,12 +1111,9 @@ int collect_decimal(uchar *element, element_count count,
info->str->append(',');
else
info->found = 1;
- my_decimal dec;
- binary2my_decimal(E_DEC_FATAL_ERROR, element, &dec,
- info->item->max_length, info->item->decimals);
-
+ my_decimal dec(element, info->item->max_length, info->item->decimals);
info->str->append('\'');
- my_decimal2string(E_DEC_FATAL_ERROR, &dec, 0, 0, '0', &s);
+ dec.to_string_native(&s, 0, 0, '0');
info->str->append(s);
info->str->append('\'');
return 0;
diff --git a/sql/sql_analyze_stmt.h b/sql/sql_analyze_stmt.h
index 27fd7fb6d6a..ceda8b4f416 100644
--- a/sql/sql_analyze_stmt.h
+++ b/sql/sql_analyze_stmt.h
@@ -284,3 +284,82 @@ private:
ulonglong sort_buffer_size;
};
+
+/**
+ A class to collect data about how rowid filter is executed.
+
+ It stores information about how rowid filter container is filled,
+ containers size and observed selectivity.
+
+ The observed selectivity is calculated in this way.
+ Some elements elem_set are checked if they belong to container.
+ Observed selectivity is calculated as the count of elem_set
+ elements that belong to container devided by all elem_set elements.
+*/
+
+class Rowid_filter_tracker : public Sql_alloc
+{
+private:
+ /* A member to track the time to fill the rowid filter */
+ Time_and_counter_tracker time_tracker;
+
+ /* Size of the rowid filter container buffer */
+ size_t container_buff_size;
+
+ /* Count of elements that were used to fill the rowid filter container */
+ uint container_elements;
+
+ /* Elements counts used for observed selectivity calculation */
+ uint n_checks;
+ uint n_positive_checks;
+public:
+ Rowid_filter_tracker(bool do_timing) :
+ time_tracker(do_timing), container_buff_size(0),
+ container_elements(0), n_checks(0), n_positive_checks(0)
+ {}
+
+ inline void start_tracking()
+ {
+ ANALYZE_START_TRACKING(&time_tracker);
+ }
+
+ inline void stop_tracking()
+ {
+ ANALYZE_STOP_TRACKING(&time_tracker);
+ }
+
+ /* Save container buffer size in bytes */
+ inline void report_container_buff_size(uint elem_size)
+ {
+ container_buff_size= container_elements * elem_size / 8;
+ }
+
+ Time_and_counter_tracker *get_time_tracker()
+ {
+ return &time_tracker;
+ }
+
+ double get_time_fill_container_ms()
+ {
+ return time_tracker.get_time_ms();
+ }
+
+ void increment_checked_elements_count(bool was_checked)
+ {
+ n_checks++;
+ if (was_checked)
+ n_positive_checks++;
+ }
+
+ inline void increment_container_elements_count() { container_elements++; }
+
+ uint get_container_elements() { return container_elements; }
+
+ double get_r_selectivity_pct()
+ {
+ return (double)n_positive_checks/(double)n_checks;
+ }
+
+ size_t get_container_buff_size() { return container_buff_size; }
+};
+
diff --git a/sql/sql_array.h b/sql/sql_array.h
index 0f18a89360a..30fbb140748 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -165,6 +165,19 @@ public:
return ((const Elem*)array.buffer) + array.elements - 1;
}
+ /// @returns pointer to n-th element
+ Elem *get_pos(size_t idx)
+ {
+ return ((Elem*)array.buffer) + idx;
+ }
+
+ /// @returns pointer to n-th element
+ const Elem *get_pos(size_t idx) const
+ {
+ return ((const Elem*)array.buffer) + idx;
+ }
+
+
/**
@retval false ok
@retval true OOM, @c my_error() has been called.
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index ddb8fee6f70..0e7059918c2 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -62,7 +62,11 @@
#include <io.h>
#endif
#include "wsrep_mysqld.h"
+#ifdef WITH_WSREP
#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h"
+#endif /* WITH_WSREP */
+
bool
No_such_table_error_handler::handle_condition(THD *,
@@ -306,97 +310,65 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
}
-/*
- Close all tables which aren't in use by any thread
-
- @param thd Thread context
- @param tables List of tables to remove from the cache
- @param wait_for_refresh Wait for a impending flush
- @param timeout Timeout for waiting for flush to be completed.
-
- @note THD can be NULL, but then wait_for_refresh must be FALSE
- and tables must be NULL.
-
- @note When called as part of FLUSH TABLES WITH READ LOCK this function
- ignores metadata locks held by other threads. In order to avoid
- situation when FLUSH TABLES WITH READ LOCK sneaks in at the moment
- when some write-locked table is being reopened (by FLUSH TABLES or
- ALTER TABLE) we have to rely on additional global shared metadata
- lock taken by thread trying to obtain global read lock.
-*/
+/**
+ Close all tables that are not in use in table definition cache
+ @param purge_flag Argument for tc_purge. true if we should force all
+ shares to be deleted. false if it's enough to just
+ evict those that are not in use.
+*/
-struct close_cached_tables_arg
+void purge_tables(bool purge_flag)
{
- tdc_version_t refresh_version;
- TDC_element *element;
-};
-
+ /*
+ Force close of all open tables.
-static my_bool close_cached_tables_callback(TDC_element *element,
- close_cached_tables_arg *arg)
-{
- mysql_mutex_lock(&element->LOCK_table_share);
- if (element->share && element->flushed &&
- element->version < arg->refresh_version)
- {
- /* wait_for_old_version() will unlock mutex and free share */
- arg->element= element;
- return TRUE;
- }
- mysql_mutex_unlock(&element->LOCK_table_share);
- return FALSE;
+ Note that code in TABLE_SHARE::wait_for_old_version() assumes that
+ incrementing of refresh_version is followed by purge of unused table
+ shares.
+ */
+ kill_delayed_threads();
+ /*
+ Get rid of all unused TABLE and TABLE_SHARE instances. By doing
+ this we automatically close all tables which were marked as "old".
+ */
+ tc_purge(purge_flag);
+ /* Free table shares which were not freed implicitly by loop above. */
+ tdc_purge(true);
}
+/**
+ close_cached_tables
+
+ This function has two separate usages:
+ 1) Close not used tables in the table cache to free memory
+ 2) Close a list of tables and wait until they are not used anymore. This
+ is used mainly when preparing a table for export.
+
+ If there are locked tables, they are closed and reopened before
+ function returns. This is done to ensure that table files will be closed
+ by all threads and thus external copyable when FLUSH TABLES returns.
+*/
+
bool close_cached_tables(THD *thd, TABLE_LIST *tables,
bool wait_for_refresh, ulong timeout)
{
- bool result= FALSE;
- struct timespec abstime;
- tdc_version_t refresh_version;
DBUG_ENTER("close_cached_tables");
DBUG_ASSERT(thd || (!wait_for_refresh && !tables));
-
- refresh_version= tdc_increment_refresh_version();
+ DBUG_ASSERT(wait_for_refresh || !tables);
if (!tables)
{
- /*
- Force close of all open tables.
-
- Note that code in TABLE_SHARE::wait_for_old_version() assumes that
- incrementing of refresh_version is followed by purge of unused table
- shares.
- */
- kill_delayed_threads();
- /*
- Get rid of all unused TABLE and TABLE_SHARE instances. By doing
- this we automatically close all tables which were marked as "old".
- */
- tc_purge(true);
- /* Free table shares which were not freed implicitly by loop above. */
- tdc_purge(true);
- }
- else
- {
- bool found=0;
- for (TABLE_LIST *table= tables; table; table= table->next_local)
- {
- /* tdc_remove_table() also sets TABLE_SHARE::version to 0. */
- found|= tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED, table->db.str,
- table->table_name.str, TRUE);
- }
- if (!found)
- wait_for_refresh=0; // Nothing to wait for
+ /* Free tables that are not used */
+ purge_tables(false);
+ if (!wait_for_refresh)
+ DBUG_RETURN(false);
}
DBUG_PRINT("info", ("open table definitions: %d",
(int) tdc_records()));
- if (!wait_for_refresh)
- DBUG_RETURN(result);
-
if (thd->locked_tables_mode)
{
/*
@@ -407,8 +379,9 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
*/
TABLE_LIST *tables_to_reopen= (tables ? tables :
thd->locked_tables_list.locked_tables());
+ bool result= false;
- /* Close open HANDLER instances to avoid self-deadlock. */
+ /* close open HANDLER for this thread to allow table to be closed */
mysql_ha_flush_tables(thd, tables_to_reopen);
for (TABLE_LIST *table_list= tables_to_reopen; table_list;
@@ -423,63 +396,15 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
if (! table)
continue;
- if (wait_while_table_is_used(thd, table,
- HA_EXTRA_PREPARE_FOR_FORCED_CLOSE))
- {
- result= TRUE;
- goto err_with_reopen;
- }
- close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
- }
- }
-
- /* Wait until all threads have closed all the tables we are flushing. */
- DBUG_PRINT("info", ("Waiting for other threads to close their open tables"));
-
- /*
- To a self-deadlock or deadlocks with other FLUSH threads
- waiting on our open HANDLERs, we have to flush them.
- */
- mysql_ha_flush(thd);
- DEBUG_SYNC(thd, "after_flush_unlock");
-
- if (!tables)
- {
- int r= 0;
- close_cached_tables_arg argument;
- argument.refresh_version= refresh_version;
- set_timespec(abstime, timeout);
-
- while (!thd->killed &&
- (r= tdc_iterate(thd,
- (my_hash_walk_action) close_cached_tables_callback,
- &argument)) == 1 &&
- !argument.element->share->wait_for_old_version(thd, &abstime,
- MDL_wait_for_subgraph::DEADLOCK_WEIGHT_DDL))
- /* no-op */;
-
- if (r)
- result= TRUE;
- }
- else
- {
- for (TABLE_LIST *table= tables; table; table= table->next_local)
- {
- if (thd->killed)
- break;
- if (tdc_wait_for_old_version(thd, table->db.str, table->table_name.str, timeout,
- MDL_wait_for_subgraph::DEADLOCK_WEIGHT_DDL,
- refresh_version))
+ if (thd->mdl_context.upgrade_shared_lock(table->mdl_ticket, MDL_EXCLUSIVE,
+ timeout))
{
- result= TRUE;
+ result= true;
break;
}
+ table->file->extra(HA_EXTRA_PREPARE_FOR_FORCED_CLOSE);
+ close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
}
- }
-
-err_with_reopen:
- if (thd->locked_tables_mode)
- {
/*
No other thread has the locked tables open; reopen them and get the
old locks. This should always succeed (unless some external process
@@ -487,6 +412,7 @@ err_with_reopen:
*/
if (thd->locked_tables_list.reopen_tables(thd, false))
result= true;
+
/*
Since downgrade_lock() won't do anything with shared
metadata lock it is much simpler to go through all open tables rather
@@ -494,7 +420,181 @@ err_with_reopen:
*/
for (TABLE *tab= thd->open_tables; tab; tab= tab->next)
tab->mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
+
+ DBUG_RETURN(result);
+ }
+ else if (tables)
+ {
+ /*
+ Get an explicit MDL lock for all requested tables to ensure they are
+ not used by any other thread
+ */
+ MDL_request_list mdl_requests;
+
+ DBUG_PRINT("info", ("Waiting for other threads to close their open tables"));
+ DEBUG_SYNC(thd, "after_flush_unlock");
+
+ /* close open HANDLER for this thread to allow table to be closed */
+ mysql_ha_flush_tables(thd, tables);
+
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
+ {
+ MDL_request *mdl_request= new (thd->mem_root) MDL_request;
+ if (mdl_request == NULL)
+ DBUG_RETURN(true);
+ mdl_request->init(&table->mdl_request.key, MDL_EXCLUSIVE, MDL_STATEMENT);
+ mdl_requests.push_front(mdl_request);
+ }
+
+ if (thd->mdl_context.acquire_locks(&mdl_requests, timeout))
+ DBUG_RETURN(true);
+
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL, table->db.str,
+ table->table_name.str, false);
+ }
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Collect all shares that has open tables
+*/
+
+struct tc_collect_arg
+{
+ DYNAMIC_ARRAY shares;
+ flush_tables_type flush_type;
+};
+
+static my_bool tc_collect_used_shares(TDC_element *element,
+ tc_collect_arg *arg)
+{
+ my_bool result= FALSE;
+
+ DYNAMIC_ARRAY *shares= &arg->shares;
+ mysql_mutex_lock(&element->LOCK_table_share);
+ if (element->ref_count > 0 && !element->share->is_view)
+ {
+ DBUG_ASSERT(element->share);
+ bool do_flush= 0;
+ switch (arg->flush_type) {
+ case FLUSH_ALL:
+ do_flush= 1;
+ break;
+ case FLUSH_NON_TRANS_TABLES:
+ if (!element->share->online_backup &&
+ element->share->table_category == TABLE_CATEGORY_USER)
+ do_flush= 1;
+ break;
+ case FLUSH_SYS_TABLES:
+ if (!element->share->online_backup &&
+ element->share->table_category != TABLE_CATEGORY_USER)
+ do_flush= 1;
+ }
+ if (do_flush)
+ {
+ element->ref_count++; // Protect against delete
+ if (push_dynamic(shares, (uchar*) &element->share))
+ result= TRUE;
+ }
+ }
+ mysql_mutex_unlock(&element->LOCK_table_share);
+ return result;
+}
+
+
+/**
+ Flush cached table as part of global read lock
+
+ @param thd
+ @param flag What type of tables should be flushed
+
+ @return 0 ok
+ @return 1 error
+
+ After we get the list of table shares, we will call flush on all
+ possible tables, even if some flush fails.
+*/
+
+bool flush_tables(THD *thd, flush_tables_type flag)
+{
+ bool result= TRUE;
+ uint open_errors= 0;
+ tc_collect_arg collect_arg;
+ TABLE *tmp_table;
+ DBUG_ENTER("flush_tables");
+
+ purge_tables(false); /* Flush unused tables and shares */
+
+ /*
+ Loop over all shares and collect shares that have open tables
+ TODO:
+ Optimize this to only collect shares that have been used for
+ write after last time all tables was closed.
+ */
+
+ if (!(tmp_table= (TABLE*) my_malloc(sizeof(*tmp_table),
+ MYF(MY_WME | MY_THREAD_SPECIFIC))))
+ DBUG_RETURN(1);
+
+ my_init_dynamic_array(&collect_arg.shares, sizeof(TABLE_SHARE*), 100, 100,
+ MYF(0));
+ collect_arg.flush_type= flag;
+ if (tdc_iterate(thd, (my_hash_walk_action) tc_collect_used_shares,
+ &collect_arg, true))
+ {
+ /* Release already collected shares */
+ for (uint i= 0 ; i < collect_arg.shares.elements ; i++)
+ {
+ TABLE_SHARE *share= *dynamic_element(&collect_arg.shares, i,
+ TABLE_SHARE**);
+ tdc_release_share(share);
+ }
+ goto err;
+ }
+
+ /* Call HA_EXTRA_FLUSH on all found shares */
+ for (uint i= 0 ; i < collect_arg.shares.elements ; i++)
+ {
+ TABLE_SHARE *share= *dynamic_element(&collect_arg.shares, i,
+ TABLE_SHARE**);
+ TABLE *table= tc_acquire_table(thd, share->tdc);
+ if (table)
+ {
+ (void) table->file->extra(HA_EXTRA_FLUSH);
+ tc_release_table(table);
+ }
+ else
+ {
+ /*
+ HA_OPEN_FOR_ALTER is used to allow us to open the table even if
+ TABLE_SHARE::incompatible_version is set.
+ */
+ if (!open_table_from_share(thd, share, &empty_clex_str,
+ HA_OPEN_KEYFILE, 0,
+ HA_OPEN_FOR_ALTER,
+ tmp_table, FALSE,
+ NULL))
+ {
+ (void) tmp_table->file->extra(HA_EXTRA_FLUSH);
+ /*
+ We don't put the table into the TDC as the table was not fully
+ opened (we didn't open triggers)
+ */
+ closefrm(tmp_table);
+ }
+ else
+ open_errors++;
+ }
+ tdc_release_share(share);
}
+
+ result= open_errors ? TRUE : FALSE;
+ DBUG_PRINT("note", ("open_errors: %u", open_errors));
+err:
+ my_free(tmp_table);
+ delete_dynamic(&collect_arg.shares);
DBUG_RETURN(result);
}
@@ -552,8 +652,17 @@ end:
}
+/**
+ Close cached connections
+
+ @return false ok
+ @return true If there was an error from closed_cached_connection_tables or
+ if there was any open connections that we had to force closed
+*/
+
bool close_cached_connection_tables(THD *thd, LEX_CSTRING *connection)
{
+ bool res= false;
close_cached_connection_tables_arg argument;
DBUG_ENTER("close_cached_connections");
DBUG_ASSERT(thd);
@@ -567,9 +676,13 @@ bool close_cached_connection_tables(THD *thd, LEX_CSTRING *connection)
&argument))
DBUG_RETURN(true);
- DBUG_RETURN(argument.tables ?
- close_cached_tables(thd, argument.tables, FALSE, LONG_TIMEOUT) :
- false);
+ for (TABLE_LIST *table= argument.tables; table; table= table->next_local)
+ res|= tdc_remove_table(thd, TDC_RT_REMOVE_UNUSED,
+ table->db.str,
+ table->table_name.str, TRUE);
+
+ /* Return true if we found any open connections */
+ DBUG_RETURN(res);
}
@@ -598,6 +711,7 @@ bool close_cached_connection_tables(THD *thd, LEX_CSTRING *connection)
static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
{
+ DBUG_ENTER("mark_used_tables_as_free_for_reuse");
for (; table ; table= table->next)
{
DBUG_ASSERT(table->pos_in_locked_tables == NULL ||
@@ -608,6 +722,7 @@ static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
table->file->ha_reset();
}
}
+ DBUG_VOID_RETURN;
}
@@ -630,7 +745,7 @@ static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table)
- The table is marked as closed in the
locked_table_list but kept there so one can call
locked_table_list->reopen_tables() to put it back.
-
+
In case of drop/rename the documented behavior is to
implicitly remove the table from LOCK TABLES
list.
@@ -735,12 +850,16 @@ void close_thread_tables(THD *thd)
DBUG_ASSERT(thd->transaction.stmt.is_empty() || thd->in_sub_stmt ||
(thd->state_flags & Open_tables_state::BACKUPS_AVAIL));
- /* Detach MERGE children after every statement. Even under LOCK TABLES. */
for (table= thd->open_tables; table; table= table->next)
{
+ if (table->update_handler)
+ table->delete_update_handler();
+
/* Table might be in use by some outer statement. */
DBUG_PRINT("tcache", ("table: '%s' query_id: %lu",
table->s->table_name.str, (ulong) table->query_id));
+
+ /* Detach MERGE children after every statement. Even under LOCK TABLES. */
if (thd->locked_tables_mode <= LTM_LOCK_TABLES ||
table->query_id == thd->query_id)
{
@@ -1738,59 +1857,6 @@ bool open_table(THD *thd, TABLE_LIST *table_list, Open_table_context *ot_ctx)
if (! (flags & MYSQL_OPEN_HAS_MDL_LOCK))
{
- /*
- We are not under LOCK TABLES and going to acquire write-lock/
- modify the base table. We need to acquire protection against
- global read lock until end of this statement in order to have
- this statement blocked by active FLUSH TABLES WITH READ LOCK.
-
- We don't need to acquire this protection under LOCK TABLES as
- such protection already acquired at LOCK TABLES time and
- not released until UNLOCK TABLES.
-
- We don't block statements which modify only temporary tables
- as these tables are not preserved by any form of
- backup which uses FLUSH TABLES WITH READ LOCK.
-
- TODO: The fact that we sometimes acquire protection against
- GRL only when we encounter table to be write-locked
- slightly increases probability of deadlock.
- This problem will be solved once Alik pushes his
- temporary table refactoring patch and we can start
- pre-acquiring metadata locks at the beggining of
- open_tables() call.
- */
- if (table_list->mdl_request.is_write_lock_request() &&
- ! (flags & (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
- MYSQL_OPEN_FORCE_SHARED_MDL |
- MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
- MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK)) &&
- ! ot_ctx->has_protection_against_grl())
- {
- MDL_request protection_request;
- MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
-
- if (thd->global_read_lock.can_acquire_protection())
- DBUG_RETURN(TRUE);
-
- protection_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_STATEMENT);
-
- /*
- Install error handler which if possible will convert deadlock error
- into request to back-off and restart process of opening tables.
- */
- thd->push_internal_handler(&mdl_deadlock_handler);
- bool result= thd->mdl_context.acquire_lock(&protection_request,
- ot_ctx->get_timeout());
- thd->pop_internal_handler();
-
- if (result)
- DBUG_RETURN(TRUE);
-
- ot_ctx->set_has_protection_against_grl();
- }
-
if (open_table_get_mdl_lock(thd, ot_ctx, &table_list->mdl_request,
flags, &mdl_ticket) ||
mdl_ticket == NULL)
@@ -1889,7 +1955,6 @@ retry_share:
if (mysql_make_view(thd, share, table_list, false))
goto err_lock;
-
/* TODO: Don't free this */
tdc_release_share(share);
@@ -1963,7 +2028,6 @@ retry_share:
else
{
enum open_frm_error error;
-
/* make a new table */
if (!(table=(TABLE*) my_malloc(sizeof(*table),MYF(MY_WME))))
goto err_lock;
@@ -2002,6 +2066,78 @@ retry_share:
tc_add_table(thd, table);
}
+ if (!(flags & MYSQL_OPEN_HAS_MDL_LOCK) &&
+ table->s->table_category < TABLE_CATEGORY_INFORMATION)
+ {
+ /*
+ We are not under LOCK TABLES and going to acquire write-lock/
+ modify the base table. We need to acquire protection against
+ global read lock until end of this statement in order to have
+ this statement blocked by active FLUSH TABLES WITH READ LOCK.
+
+ We don't need to acquire this protection under LOCK TABLES as
+ such protection already acquired at LOCK TABLES time and
+ not released until UNLOCK TABLES.
+
+ We don't block statements which modify only temporary tables
+ as these tables are not preserved by any form of
+ backup which uses FLUSH TABLES WITH READ LOCK.
+
+ TODO: The fact that we sometimes acquire protection against
+ GRL only when we encounter table to be write-locked
+ slightly increases probability of deadlock.
+ This problem will be solved once Alik pushes his
+ temporary table refactoring patch and we can start
+ pre-acquiring metadata locks at the beggining of
+ open_tables() call.
+ */
+ enum enum_mdl_type mdl_type= MDL_BACKUP_DML;
+
+ if (table->s->table_category != TABLE_CATEGORY_USER)
+ mdl_type= MDL_BACKUP_SYS_DML;
+ else if (table->s->online_backup)
+ mdl_type= MDL_BACKUP_TRANS_DML;
+
+ if (table_list->mdl_request.is_write_lock_request() &&
+ ! (flags & (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
+ MYSQL_OPEN_FORCE_SHARED_MDL |
+ MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
+ MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK)) &&
+ ! ot_ctx->has_protection_against_grl(mdl_type))
+ {
+ MDL_request protection_request;
+ MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
+
+ if (thd->has_read_only_protection())
+ {
+ MYSQL_UNBIND_TABLE(table->file);
+ tc_release_table(table);
+ DBUG_RETURN(TRUE);
+ }
+
+ protection_request.init(MDL_key::BACKUP, "", "", mdl_type,
+ MDL_STATEMENT);
+
+ /*
+ Install error handler which if possible will convert deadlock error
+ into request to back-off and restart process of opening tables.
+ */
+ thd->push_internal_handler(&mdl_deadlock_handler);
+ bool result= thd->mdl_context.acquire_lock(&protection_request,
+ ot_ctx->get_timeout());
+ thd->pop_internal_handler();
+
+ if (result)
+ {
+ MYSQL_UNBIND_TABLE(table->file);
+ tc_release_table(table);
+ DBUG_RETURN(TRUE);
+ }
+
+ ot_ctx->set_has_protection_against_grl(mdl_type);
+ }
+ }
+
table->mdl_ticket= mdl_ticket;
table->next= thd->open_tables; /* Link into simple list */
@@ -2118,8 +2254,8 @@ TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
cases don't take a global IX lock in order to be compatible with
global read lock.
*/
- if (unlikely(!thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "",
- MDL_INTENTION_EXCLUSIVE)))
+ if (unlikely(!thd->mdl_context.is_lock_owner(MDL_key::BACKUP, "", "",
+ MDL_BACKUP_DDL)))
{
error= ER_TABLE_NOT_LOCKED_FOR_WRITE;
goto err_exit;
@@ -2876,7 +3012,7 @@ Open_table_context::Open_table_context(THD *thd, uint flags)
m_flags(flags),
m_action(OT_NO_ACTION),
m_has_locks(thd->mdl_context.has_locks()),
- m_has_protection_against_grl(FALSE)
+ m_has_protection_against_grl(0)
{}
@@ -3092,7 +3228,7 @@ Open_table_context::recover_from_failed_open()
against GRL. It is no longer valid as the corresponding lock was
released by close_tables_for_reopen().
*/
- m_has_protection_against_grl= FALSE;
+ m_has_protection_against_grl= 0;
/* Prepare for possible another back-off. */
m_action= OT_NO_ACTION;
return result;
@@ -3778,6 +3914,40 @@ end:
}
+static bool upgrade_lock_if_not_exists(THD *thd,
+ const DDL_options_st &create_info,
+ TABLE_LIST *create_table,
+ ulong lock_wait_timeout)
+{
+ DBUG_ENTER("upgrade_lock_if_not_exists");
+
+ if (thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
+ thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE)
+ {
+ DEBUG_SYNC(thd,"create_table_before_check_if_exists");
+ if (!create_info.or_replace() &&
+ ha_table_exists(thd, &create_table->db, &create_table->table_name))
+ {
+ if (create_info.if_not_exists())
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_TABLE_EXISTS_ERROR,
+ ER_THD(thd, ER_TABLE_EXISTS_ERROR),
+ create_table->table_name.str);
+ }
+ else
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), create_table->table_name.str);
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(thd->mdl_context.upgrade_shared_lock(
+ create_table->mdl_request.ticket,
+ MDL_EXCLUSIVE,
+ lock_wait_timeout));
+ }
+ DBUG_RETURN(false);
+}
+
+
/**
Acquire upgradable (SNW, SNRW) metadata locks on tables used by
LOCK TABLES or by a DDL statement. Under LOCK TABLES, we can't take
@@ -3815,10 +3985,7 @@ lock_table_names(THD *thd, const DDL_options_st &options,
MDL_request_list mdl_requests;
TABLE_LIST *table;
MDL_request global_request;
- ulong org_lock_wait_timeout= lock_wait_timeout;
- /* Check if we are using CREATE TABLE ... IF NOT EXISTS */
- bool create_table;
- Dummy_error_handler error_handler;
+ MDL_savepoint mdl_savepoint;
DBUG_ENTER("lock_table_names");
DBUG_ASSERT(!thd->locked_tables_mode);
@@ -3826,6 +3993,8 @@ lock_table_names(THD *thd, const DDL_options_st &options,
for (table= tables_start; table && table != tables_end;
table= table->next_global)
{
+ DBUG_PRINT("info", ("mdl_request.type: %d open_type: %d",
+ table->mdl_request.type, table->open_type));
if (table->mdl_request.type < MDL_SHARED_UPGRADABLE ||
table->mdl_request.type == MDL_SHARED_READ_ONLY ||
table->open_type == OT_TEMPORARY_ONLY ||
@@ -3859,73 +4028,48 @@ lock_table_names(THD *thd, const DDL_options_st &options,
if (mdl_requests.is_empty())
DBUG_RETURN(FALSE);
- /* Check if CREATE TABLE without REPLACE was used */
- create_table= ((thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
- thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE) &&
- !options.or_replace());
-
- if (!(flags & MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK))
+ if (flags & MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK)
{
- /*
- Protect this statement against concurrent global read lock
- by acquiring global intention exclusive lock with statement
- duration.
- */
- if (thd->global_read_lock.can_acquire_protection())
- DBUG_RETURN(TRUE);
- global_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_STATEMENT);
- mdl_requests.push_front(&global_request);
-
- if (create_table)
-#ifdef WITH_WSREP
- if (thd->lex->sql_command != SQLCOM_CREATE_TABLE &&
- thd->wsrep_exec_mode != REPL_RECV)
-#endif
- lock_wait_timeout= 0; // Don't wait for timeout
+ DBUG_RETURN(thd->mdl_context.acquire_locks(&mdl_requests,
+ lock_wait_timeout) ||
+ upgrade_lock_if_not_exists(thd, options, tables_start,
+ lock_wait_timeout));
}
- for (;;)
- {
- if (create_table)
- thd->push_internal_handler(&error_handler); // Avoid warnings & errors
- bool res= thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout);
- if (create_table)
- thd->pop_internal_handler();
- if (!res)
- DBUG_RETURN(FALSE); // Got locks
+ /* Protect this statement against concurrent BACKUP STAGE or FTWRL. */
+ if (thd->has_read_only_protection())
+ DBUG_RETURN(true);
- if (!create_table)
- DBUG_RETURN(TRUE); // Return original error
+ global_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_DDL, MDL_STATEMENT);
+ mdl_savepoint= thd->mdl_context.mdl_savepoint();
- /*
- We come here in the case of lock timeout when executing CREATE TABLE.
- Verify that table does exist (it usually does, as we got a lock conflict)
- */
- if (ha_table_exists(thd, &tables_start->db, &tables_start->table_name))
+ while (!thd->mdl_context.acquire_locks(&mdl_requests, lock_wait_timeout) &&
+ !upgrade_lock_if_not_exists(thd, options, tables_start,
+ lock_wait_timeout) &&
+ !thd->mdl_context.try_acquire_lock(&global_request))
+ {
+ if (global_request.ticket)
{
- if (options.if_not_exists())
- {
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_TABLE_EXISTS_ERROR,
- ER_THD(thd, ER_TABLE_EXISTS_ERROR),
- tables_start->table_name.str);
- }
- else
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), tables_start->table_name.str);
- DBUG_RETURN(TRUE);
+ thd->mdl_backup_ticket= global_request.ticket;
+ DBUG_RETURN(false);
}
+
/*
- We got error from acquire_locks, but the table didn't exists.
- This could happen if another connection runs a statement
- involving this non-existent table, and this statement took the mdl,
- but didn't error out with ER_NO_SUCH_TABLE yet (yes, a race condition).
- We play safe and restart the original acquire_locks with the
- original timeout.
+ There is ongoing or pending BACKUP STAGE or FTWRL.
+ Wait until it finishes and re-try.
*/
- create_table= 0;
- lock_wait_timeout= org_lock_wait_timeout;
+ thd->mdl_context.rollback_to_savepoint(mdl_savepoint);
+ if (thd->mdl_context.acquire_lock(&global_request, lock_wait_timeout))
+ break;
+ thd->mdl_context.rollback_to_savepoint(mdl_savepoint);
+
+ /* Reset tickets for all acquired locks */
+ global_request.ticket= 0;
+ MDL_request_list::Iterator it(mdl_requests);
+ while (auto mdl_request= it++)
+ mdl_request->ticket= 0;
}
+ DBUG_RETURN(true);
}
@@ -4046,13 +4190,9 @@ bool open_tables(THD *thd, const DDL_options_st &options,
bool has_prelocking_list;
DBUG_ENTER("open_tables");
- /* Accessing data in XA_IDLE or XA_PREPARED is not allowed. */
- enum xa_states xa_state= thd->transaction.xid_state.xa_state;
- if (*start && (xa_state == XA_IDLE || xa_state == XA_PREPARED))
- {
- my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
+ /* Data access in XA transaction is only allowed when it is active. */
+ if (*start && thd->transaction.xid_state.check_has_uncommitted_xa())
DBUG_RETURN(true);
- }
thd->current_tablenr= 0;
restart:
@@ -4297,13 +4437,14 @@ restart:
}
}
+#ifdef WITH_WSREP
if (WSREP_ON &&
wsrep_replicate_myisam &&
(*start) &&
(*start)->table &&
(*start)->table->file->ht == myisam_hton &&
- wsrep_thd_exec_mode(thd) == LOCAL_STATE &&
- !is_stat_table(&(*start)->db, &(*start)->alias) &&
+ wsrep_thd_is_local(thd) &&
+ !is_stat_table(&(*start)->db, &(*start)->alias) &&
thd->get_command() != COM_STMT_PREPARE &&
((thd->lex->sql_command == SQLCOM_INSERT ||
thd->lex->sql_command == SQLCOM_INSERT_SELECT ||
@@ -4314,11 +4455,17 @@ restart:
thd->lex->sql_command == SQLCOM_LOAD ||
thd->lex->sql_command == SQLCOM_DELETE)))
{
- WSREP_TO_ISOLATION_BEGIN(NULL, NULL, (*start));
+ wsrep_before_rollback(thd, true);
+ wsrep_after_rollback(thd, true);
+ wsrep_after_statement(thd);
+ WSREP_TO_ISOLATION_BEGIN(NULL, NULL, (*start));
}
+#endif /* WITH_WSREP */
error:
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
THD_STAGE_INFO(thd, stage_after_opening_tables);
thd_proc_info(thd, 0);
@@ -5240,8 +5387,7 @@ err:
@retval TRUE A lock wait timeout, deadlock or out of memory.
*/
-bool lock_tables(THD *thd, TABLE_LIST *tables, uint count,
- uint flags)
+bool lock_tables(THD *thd, TABLE_LIST *tables, uint count, uint flags)
{
TABLE_LIST *table;
DBUG_ENTER("lock_tables");
@@ -5494,43 +5640,27 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
DBUG_ENTER("update_field_dependencies");
if (should_mark_column(thd->column_usage))
{
- MY_BITMAP *bitmap;
-
/*
We always want to register the used keys, as the column bitmap may have
been set for all fields (for example for view).
*/
-
table->covering_keys.intersect(field->part_of_key);
- if (field->vcol_info)
- table->mark_virtual_col(field);
-
if (thd->column_usage == MARK_COLUMNS_READ)
- bitmap= table->read_set;
+ {
+ if (table->mark_column_with_deps(field))
+ DBUG_VOID_RETURN; // Field was already marked
+ }
else
- bitmap= table->write_set;
-
- /*
- The test-and-set mechanism in the bitmap is not reliable during
- multi-UPDATE statements under MARK_COLUMNS_READ mode
- (thd->column_usage == MARK_COLUMNS_READ), as this bitmap contains
- only those columns that are used in the SET clause. I.e they are being
- set here. See multi_update::prepare()
- */
- if (bitmap_fast_test_and_set(bitmap, field->field_index))
{
- if (thd->column_usage == MARK_COLUMNS_WRITE)
+ if (bitmap_fast_test_and_set(table->write_set, field->field_index))
{
DBUG_PRINT("warning", ("Found duplicated field"));
thd->dup_field= field;
+ DBUG_VOID_RETURN;
}
- else
- {
- DBUG_PRINT("note", ("Field found before"));
- }
- DBUG_VOID_RETURN;
}
+
table->used_fields++;
}
if (table->get_fields_in_item_tree)
@@ -7459,7 +7589,7 @@ bool setup_fields(THD *thd, Ref_ptr_array ref_pointer_array,
Item_window_func::split_sum_func.
*/
if (sum_func_list &&
- ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) ||
+ ((item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM) ||
item->with_window_func))
{
item->split_sum_func(thd, ref_pointer_array, *sum_func_list,
@@ -7567,7 +7697,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
TABLE_LIST *first_select_table= (select_insert ?
tables->next_local:
0);
- SELECT_LEX *select_lex= select_insert ? &thd->lex->select_lex :
+ SELECT_LEX *select_lex= select_insert ? thd->lex->first_select_lex() :
thd->lex->current_select;
if (select_lex->first_cond_optimization)
{
@@ -7595,7 +7725,7 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
{
/* new counting for SELECT of INSERT ... SELECT command */
first_select_table= 0;
- thd->lex->select_lex.insert_tables= tablenr;
+ thd->lex->first_select_lex()->insert_tables= tablenr;
tablenr= 0;
}
if(table_list->jtbm_subselect)
@@ -7962,18 +8092,9 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
if ((field= field_iterator.field()))
{
- /* Mark fields as used to allow storage engine to optimze access */
- bitmap_set_bit(field->table->read_set, field->field_index);
- /*
- Mark virtual fields for write and others that the virtual fields
- depend on for read.
- */
- if (field->vcol_info)
- field->table->mark_virtual_col(field);
+ field->table->mark_column_with_deps(field);
if (table)
- {
table->covering_keys.intersect(field->part_of_key);
- }
if (tables->is_natural_join)
{
TABLE *field_table;
@@ -8151,7 +8272,7 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
from subquery of VIEW, because tables of subquery belongs to VIEW
(see condition before prepare_check_option() call)
*/
- bool it_is_update= (select_lex == &thd->lex->select_lex) &&
+ bool it_is_update= (select_lex == thd->lex->first_select_lex()) &&
thd->lex->which_check_option_applicable();
bool save_is_item_list_lookup= select_lex->is_item_list_lookup;
TABLE_LIST *derived= select_lex->master_unit()->derived;
@@ -8167,7 +8288,7 @@ int setup_conds(THD *thd, TABLE_LIST *tables, List<TABLE_LIST> &leaves,
for (table= tables; table; table= table->next_local)
{
- if (select_lex == &thd->lex->select_lex &&
+ if (select_lex == thd->lex->first_select_lex() &&
select_lex->first_cond_optimization &&
table->merged_for_insert &&
table->prepare_where(thd, conds, FALSE))
@@ -8337,12 +8458,12 @@ fill_record(THD *thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
if (!update && table_arg->default_field &&
table_arg->update_default_fields(0, ignore_errors))
goto err;
+ if (table_arg->versioned() && !only_unvers_fields)
+ table_arg->vers_update_fields();
/* Update virtual fields */
if (table_arg->vfield &&
table_arg->update_virtual_fields(table_arg->file, VCOL_UPDATE_FOR_WRITE))
goto err;
- if (table_arg->versioned() && !only_unvers_fields)
- table_arg->vers_update_fields();
thd->abort_on_warning= save_abort_on_warning;
thd->no_errors= save_no_errors;
DBUG_RETURN(thd->is_error());
@@ -8596,11 +8717,11 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
goto err;
/* Update virtual fields */
thd->abort_on_warning= FALSE;
+ if (table->versioned())
+ table->vers_update_fields();
if (table->vfield &&
table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE))
goto err;
- if (table->versioned())
- table->vers_update_fields();
thd->abort_on_warning= abort_on_warning_saved;
DBUG_RETURN(thd->is_error());
@@ -8774,7 +8895,7 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order)
Item_func_match *ifm;
while ((ifm=li++))
- if (unlikely(!ifm->fixed))
+ if (unlikely(!ifm->is_fixed()))
/*
it mean that clause where was FT function was removed, so we have
to remove the function from the list.
@@ -8828,7 +8949,6 @@ open_system_tables_for_read(THD *thd, TABLE_LIST *table_list,
{
Query_tables_list query_tables_list_backup;
LEX *lex= thd->lex;
-
DBUG_ENTER("open_system_tables_for_read");
/*
@@ -8842,9 +8962,15 @@ open_system_tables_for_read(THD *thd, TABLE_LIST *table_list,
thd->reset_n_backup_open_tables_state(backup);
thd->lex->sql_command= SQLCOM_SELECT;
+ /*
+ Only use MYSQL_LOCK_IGNORE_TIMEOUT for tables opened for read.
+ This is to ensure that lock_wait_timeout is honored when trying
+ to update stats tables.
+ */
if (open_and_lock_tables(thd, table_list, FALSE,
- MYSQL_OPEN_IGNORE_FLUSH |
- MYSQL_LOCK_IGNORE_TIMEOUT))
+ (MYSQL_OPEN_IGNORE_FLUSH |
+ (table_list->lock_type < TL_WRITE_ALLOW_WRITE ?
+ MYSQL_LOCK_IGNORE_TIMEOUT : 0))))
{
lex->restore_backup_query_tables_list(&query_tables_list_backup);
thd->restore_backup_open_tables_state(backup);
@@ -8876,6 +9002,13 @@ open_system_tables_for_read(THD *thd, TABLE_LIST *table_list,
void
close_system_tables(THD *thd, Open_tables_backup *backup)
{
+ /*
+ Inform the transaction handler that we are closing the
+ system tables and we don't need the read view anymore.
+ */
+ for (TABLE *table= thd->open_tables ; table ; table= table->next)
+ table->file->extra(HA_EXTRA_PREPARE_FOR_FORCED_CLOSE);
+
close_thread_tables(thd);
thd->restore_backup_open_tables_state(backup);
}
@@ -9009,7 +9142,7 @@ void unfix_fields(List<Item> &fields)
List_iterator<Item> li(fields);
Item *item;
while ((item= li++))
- item->fixed= 0;
+ item->unfix_fields();
}
diff --git a/sql/sql_base.h b/sql/sql_base.h
index 22247af07a8..7bcbc5d7a23 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -57,6 +57,13 @@ enum enum_resolution_type {
RESOLVED_AGAINST_ALIAS
};
+/* Argument to flush_tables() of what to flush */
+enum flush_tables_type {
+ FLUSH_ALL,
+ FLUSH_NON_TRANS_TABLES,
+ FLUSH_SYS_TABLES
+};
+
enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND,
IGNORE_ERRORS, REPORT_EXCEPT_NON_UNIQUE,
IGNORE_EXCEPT_NON_UNIQUE};
@@ -288,12 +295,10 @@ TABLE *open_system_table_for_update(THD *thd, TABLE_LIST *one_table);
TABLE *open_log_table(THD *thd, TABLE_LIST *one_table, Open_tables_backup *backup);
void close_log_table(THD *thd, Open_tables_backup *backup);
-TABLE *open_performance_schema_table(THD *thd, TABLE_LIST *one_table,
- Open_tables_state *backup);
-void close_performance_schema_table(THD *thd, Open_tables_state *backup);
-
bool close_cached_tables(THD *thd, TABLE_LIST *tables,
bool wait_for_refresh, ulong timeout);
+void purge_tables(bool purge_flag);
+bool flush_tables(THD *thd, flush_tables_type flag);
bool close_cached_connection_tables(THD *thd, LEX_CSTRING *connect_string);
void close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
ha_extra_function extra,
@@ -347,13 +352,6 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr)
table->force_index= table_list->force_index;
table->force_index_order= table->force_index_group= 0;
table->covering_keys= table->s->keys_for_keyread;
- TABLE_LIST *orig= table_list->select_lex ?
- table_list->select_lex->master_unit()->derived : 0;
- if (!orig || !orig->is_merged_derived())
- {
- /* Tables merged from derived were set up already.*/
- table->covering_keys= table->s->keys_for_keyread;
- }
}
inline TABLE_LIST *find_table_in_global_list(TABLE_LIST *table,
@@ -371,10 +369,12 @@ inline bool setup_fields_with_no_wrap(THD *thd, Ref_ptr_array ref_pointer_array,
bool allow_sum_func)
{
bool res;
- thd->lex->select_lex.no_wrap_view_item= TRUE;
+ SELECT_LEX *first= thd->lex->first_select_lex();
+ DBUG_ASSERT(thd->lex->current_select == first);
+ first->no_wrap_view_item= TRUE;
res= setup_fields(thd, ref_pointer_array, item, column_usage,
sum_func_list, NULL, allow_sum_func);
- thd->lex->select_lex.no_wrap_view_item= FALSE;
+ first->no_wrap_view_item= FALSE;
return res;
}
@@ -552,14 +552,14 @@ public:
Set flag indicating that we have already acquired metadata lock
protecting this statement against GRL while opening tables.
*/
- void set_has_protection_against_grl()
+ void set_has_protection_against_grl(enum_mdl_type mdl_type)
{
- m_has_protection_against_grl= TRUE;
+ m_has_protection_against_grl|= MDL_BIT(mdl_type);
}
- bool has_protection_against_grl() const
+ bool has_protection_against_grl(enum_mdl_type mdl_type) const
{
- return m_has_protection_against_grl;
+ return (bool) (m_has_protection_against_grl & MDL_BIT(mdl_type));
}
private:
@@ -591,7 +591,7 @@ private:
Indicates that in the process of opening tables we have acquired
protection against global read lock.
*/
- bool m_has_protection_against_grl;
+ mdl_bitmap_t m_has_protection_against_grl;
};
diff --git a/sql/sql_basic_types.h b/sql/sql_basic_types.h
index 1e97262cdf0..a790b68fc0c 100644
--- a/sql/sql_basic_types.h
+++ b/sql/sql_basic_types.h
@@ -22,4 +22,314 @@
typedef ulonglong sql_mode_t;
typedef int64 query_id_t;
+
+
+/*
+ "fuzzydate" with strict data type control.
+ Represents a mixture of *only* data type conversion flags, without rounding.
+ Please keep "explicit" in constructors and conversion methods.
+*/
+class date_conv_mode_t
+{
+public:
+ enum value_t
+ {
+ CONV_NONE= 0U,
+ /*
+ FUZZY_DATES is used for the result will only be used for comparison
+ purposes. Conversion is as relaxed as possible.
+ */
+ FUZZY_DATES= 1U,
+ TIME_ONLY= 4U,
+ INTERVAL_hhmmssff= 8U,
+ INTERVAL_DAY= 16U,
+ RANGE0_LAST= INTERVAL_DAY,
+ NO_ZERO_IN_DATE= (1UL << 23), // MODE_NO_ZERO_IN_DATE
+ NO_ZERO_DATE= (1UL << 24), // MODE_NO_ZERO_DATE
+ INVALID_DATES= (1UL << 25) // MODE_INVALID_DATES
+ };
+
+ /*
+ BIT-OR for all known values. Let's have a separate enum for it.
+ - We don't put this value "value_t", to avoid handling it in switch().
+ - We don't put this value as a static const inside the class,
+ because "gdb" would display it every time when we do "print"
+ for a time_round_mode_t value.
+ - We can't put into into a function returning this value, because
+ it's not allowed to use functions in static_assert.
+ */
+ enum known_values_t
+ {
+ KNOWN_MODES= FUZZY_DATES |
+ TIME_ONLY | INTERVAL_hhmmssff | INTERVAL_DAY |
+ NO_ZERO_IN_DATE | NO_ZERO_DATE | INVALID_DATES
+ };
+private:
+ value_t m_mode;
+public:
+
+ // Constructors
+ explicit date_conv_mode_t(ulonglong fuzzydate)
+ :m_mode((value_t) fuzzydate)
+ { }
+
+ // Conversion operators
+ explicit operator ulonglong() const
+ {
+ return m_mode;
+ }
+ explicit operator bool() const
+ {
+ return m_mode != 0;
+ }
+
+ // Unary operators
+ ulonglong operator~() const
+ {
+ return ~m_mode;
+ }
+
+ // Dyadic bitwise operators
+ date_conv_mode_t operator&(const date_conv_mode_t &other) const
+ {
+ return date_conv_mode_t(m_mode & other.m_mode);
+ }
+ date_conv_mode_t operator&(const ulonglong other) const
+ {
+ return date_conv_mode_t(m_mode & other);
+ }
+
+ date_conv_mode_t operator|(const date_conv_mode_t &other) const
+ {
+ return date_conv_mode_t(m_mode | other.m_mode);
+ }
+
+ // Dyadic bitwise assignment operators
+ date_conv_mode_t &operator&=(const date_conv_mode_t &other)
+ {
+ m_mode= value_t(m_mode & other.m_mode);
+ return *this;
+ }
+
+ date_conv_mode_t &operator|=(const date_conv_mode_t &other)
+ {
+ m_mode= value_t(m_mode | other.m_mode);
+ return *this;
+ }
+};
+
+
+/*
+ Fractional rounding mode for temporal data types.
+*/
+class time_round_mode_t
+{
+public:
+ enum value_t
+ {
+ /*
+ Use FRAC_NONE when the value needs no rounding nor truncation,
+ because it is already known not to haveany fractional digits outside
+ of the requested precision.
+ */
+ FRAC_NONE= 0,
+ FRAC_TRUNCATE= date_conv_mode_t::RANGE0_LAST << 1, // 32
+ FRAC_ROUND= date_conv_mode_t::RANGE0_LAST << 2 // 64
+ };
+ // BIT-OR for all known values. See comments in time_conv_mode_t.
+ enum known_values_t
+ {
+ KNOWN_MODES= FRAC_TRUNCATE | FRAC_ROUND
+ };
+private:
+ value_t m_mode;
+public:
+ // Constructors
+ explicit time_round_mode_t(ulonglong mode)
+ :m_mode((value_t) mode)
+ {
+ DBUG_ASSERT(mode == FRAC_NONE ||
+ mode == FRAC_TRUNCATE ||
+ mode == FRAC_ROUND);
+ }
+ // Conversion operators
+ explicit operator ulonglong() const
+ {
+ return m_mode;
+ }
+ value_t mode() const
+ {
+ return m_mode;
+ }
+ // Comparison operators
+ bool operator==(const time_round_mode_t &other)
+ {
+ return m_mode == other.m_mode;
+ }
+};
+
+
+/*
+ "fuzzydate" with strict data type control.
+ Used as a parameter to get_date() and represents a mixture of:
+ - data type conversion flags
+ - fractional second rounding flags
+ Please keep "explicit" in constructors and conversion methods.
+*/
+class date_mode_t
+{
+public:
+ enum value_t
+ {
+ CONV_NONE= date_conv_mode_t::CONV_NONE, // 0
+ FUZZY_DATES= date_conv_mode_t::FUZZY_DATES, // 1
+ TIME_ONLY= date_conv_mode_t::TIME_ONLY, // 4
+ INTERVAL_hhmmssff= date_conv_mode_t::INTERVAL_hhmmssff, // 8
+ INTERVAL_DAY= date_conv_mode_t::INTERVAL_DAY, // 16
+ FRAC_TRUNCATE= time_round_mode_t::FRAC_TRUNCATE, // 32
+ FRAC_ROUND= time_round_mode_t::FRAC_ROUND, // 64
+ NO_ZERO_IN_DATE= date_conv_mode_t::NO_ZERO_IN_DATE, // (1UL << 23)
+ NO_ZERO_DATE= date_conv_mode_t::NO_ZERO_DATE, // (1UL << 24)
+ INVALID_DATES= date_conv_mode_t::INVALID_DATES, // (1UL << 25)
+ };
+protected:
+ value_t m_mode;
+public:
+
+ // Constructors
+ explicit date_mode_t(ulonglong fuzzydate)
+ :m_mode((value_t) fuzzydate)
+ { }
+
+ // Conversion operators
+ explicit operator ulonglong() const
+ {
+ return m_mode;
+ }
+ explicit operator bool() const
+ {
+ return m_mode != 0;
+ }
+ explicit operator date_conv_mode_t() const
+ {
+ return date_conv_mode_t(ulonglong(m_mode) & date_conv_mode_t::KNOWN_MODES);
+ }
+ explicit operator time_round_mode_t() const
+ {
+ return time_round_mode_t(ulonglong(m_mode) & time_round_mode_t::KNOWN_MODES);
+ }
+ // Unary operators
+ ulonglong operator~() const
+ {
+ return ~m_mode;
+ }
+ bool operator!() const
+ {
+ return !m_mode;
+ }
+
+ // Dyadic bitwise operators
+ date_mode_t operator&(const date_mode_t &other) const
+ {
+ return date_mode_t(m_mode & other.m_mode);
+ }
+ date_mode_t operator&(ulonglong other) const
+ {
+ return date_mode_t(m_mode & other);
+ }
+
+ date_mode_t operator|(const date_mode_t &other) const
+ {
+ return date_mode_t(m_mode | other.m_mode);
+ }
+
+ // Dyadic bitwise assignment operators
+ date_mode_t &operator&=(const date_mode_t &other)
+ {
+ m_mode= value_t(m_mode & other.m_mode);
+ return *this;
+ }
+
+ date_mode_t &operator|=(const date_mode_t &other)
+ {
+ m_mode= value_t(m_mode | other.m_mode);
+ return *this;
+ }
+
+ date_mode_t &operator|=(const date_conv_mode_t &other)
+ {
+ m_mode= value_t(m_mode | ulonglong(other));
+ return *this;
+ }
+};
+
+
+// Bitwise OR out-of-class operators for data type mixtures
+static inline date_mode_t operator|(const date_mode_t &a,
+ const date_conv_mode_t &b)
+{
+ return date_mode_t(ulonglong(a) | ulonglong(b));
+}
+
+static inline date_mode_t operator|(const date_conv_mode_t &a,
+ const time_round_mode_t &b)
+{
+ return date_mode_t(ulonglong(a) | ulonglong(b));
+}
+
+
+static inline date_mode_t operator|(const date_conv_mode_t &a,
+ const date_mode_t &b)
+{
+ return date_mode_t(ulonglong(a) | ulonglong(b));
+}
+
+
+// Bitwise AND out-of-class operators for data type mixtures
+static inline date_conv_mode_t operator&(const date_mode_t &a,
+ const date_conv_mode_t &b)
+{
+ return date_conv_mode_t(ulonglong(a) & ulonglong(b));
+}
+
+static inline date_conv_mode_t operator&(const date_conv_mode_t &a,
+ const date_mode_t &b)
+{
+ return date_conv_mode_t(ulonglong(a) & ulonglong(b));
+}
+
+static inline date_conv_mode_t operator&(sql_mode_t &a,
+ const date_conv_mode_t &b)
+{
+ return date_conv_mode_t(a & ulonglong(b));
+}
+
+
+static const date_conv_mode_t
+ TIME_CONV_NONE (date_conv_mode_t::CONV_NONE),
+ TIME_FUZZY_DATES (date_conv_mode_t::FUZZY_DATES),
+ TIME_TIME_ONLY (date_conv_mode_t::TIME_ONLY),
+ TIME_INTERVAL_hhmmssff (date_conv_mode_t::INTERVAL_hhmmssff),
+ TIME_INTERVAL_DAY (date_conv_mode_t::INTERVAL_DAY),
+ TIME_NO_ZERO_IN_DATE (date_conv_mode_t::NO_ZERO_IN_DATE),
+ TIME_NO_ZERO_DATE (date_conv_mode_t::NO_ZERO_DATE),
+ TIME_INVALID_DATES (date_conv_mode_t::INVALID_DATES);
+
+// An often used combination
+static const date_conv_mode_t
+ TIME_NO_ZEROS (date_conv_mode_t::NO_ZERO_DATE|
+ date_conv_mode_t::NO_ZERO_IN_DATE);
+
+// Flags understood by str_to_xxx, number_to_xxx, check_date
+static const date_conv_mode_t
+ TIME_MODE_FOR_XXX_TO_DATE (date_mode_t::NO_ZERO_IN_DATE |
+ date_mode_t::NO_ZERO_DATE |
+ date_mode_t::INVALID_DATES);
+
+static const time_round_mode_t
+ TIME_FRAC_NONE (time_round_mode_t::FRAC_NONE),
+ TIME_FRAC_TRUNCATE (time_round_mode_t::FRAC_TRUNCATE),
+ TIME_FRAC_ROUND (time_round_mode_t::FRAC_ROUND);
+
+
#endif
diff --git a/sql/sql_binlog.cc b/sql/sql_binlog.cc
index 60de2923a8f..97b8e2e4f91 100644
--- a/sql/sql_binlog.cc
+++ b/sql/sql_binlog.cc
@@ -147,7 +147,7 @@ int binlog_defragment(THD *thd)
(char *) my_malloc(thd->lex->comment.length, MYF(MY_WME));
if (!thd->lex->comment.str)
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1);
return -1;
}
@@ -225,7 +225,7 @@ void mysql_client_binlog_statement(THD* thd)
*/
if (!(rli))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1); /* needed 1 bytes */
goto end;
}
@@ -244,7 +244,7 @@ void mysql_client_binlog_statement(THD* thd)
decoded_len= my_base64_needed_decoded_length((int)coded_len);
if (!(buf= (char *) my_malloc(decoded_len, MYF(MY_WME))))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 1);
goto end;
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index aa4c77d0939..73bb4d7b7f7 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -276,8 +276,8 @@ functions:
- Called before parsing and used to match a statement with the stored
queries hash.
If a match is found the cached result set is sent through repeated
- calls to net_real_write. (note: calling thread doesn't have a regis-
- tered result set writer: thd->net.query_cache_query=0)
+ calls to net_real_write. (note: calling thread does not have a
+ registered result set writer: thd->net.query_cache_query=0)
2. Query_cache::store_query
- Called just before handle_select() and is used to register a result
set writer to the statement currently being processed
@@ -480,8 +480,7 @@ static void make_base_query(String *new_query,
/* We do not support UCS2, UTF16, UTF32 as a client character set */
DBUG_ASSERT(current_thd->variables.character_set_client->mbminlen == 1);
- new_query->length(0); // Don't copy anything from old buffer
- if (new_query->realloc(query_length + additional_length))
+ if (new_query->alloc(query_length + additional_length))
{
/*
We could not allocate the query. Use original query for
@@ -4147,13 +4146,13 @@ Query_cache::is_cacheable(THD *thd, LEX *lex,
if (thd->lex->safe_to_cache_query &&
(thd->variables.query_cache_type == 1 ||
- (thd->variables.query_cache_type == 2 && (lex->select_lex.options &
- OPTION_TO_QUERY_CACHE))) &&
+ (thd->variables.query_cache_type == 2 &&
+ (lex->first_select_lex()->options & OPTION_TO_QUERY_CACHE))) &&
qc_is_able_to_intercept_result(thd))
{
DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
(long) OPTION_TO_QUERY_CACHE,
- (long) lex->select_lex.options,
+ (long) lex->first_select_lex()->options,
(int) thd->variables.query_cache_type));
if (!(table_count= process_and_count_tables(thd, tables_used,
@@ -4174,7 +4173,7 @@ Query_cache::is_cacheable(THD *thd, LEX *lex,
("not interesting query: %d or not cacheable, options %lx %lx type: %u net->vio present: %u",
(int) lex->sql_command,
(long) OPTION_TO_QUERY_CACHE,
- (long) lex->select_lex.options,
+ (long) lex->first_select_lex()->options,
(int) thd->variables.query_cache_type,
(uint) MY_TEST(qc_is_able_to_intercept_result(thd))));
DBUG_RETURN(0);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 2eebfbb6db0..b01edf6259b 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2008, 2018, MariaDB Corporation.
+ Copyright (c) 2008, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -66,9 +66,12 @@
#include "sql_callback.h"
#include "lock.h"
#include "wsrep_mysqld.h"
-#include "wsrep_thd.h"
#include "sql_connect.h"
-#include "my_atomic.h"
+#ifdef WITH_WSREP
+#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h"
+#endif /* WITH_WSREP */
+#include "opt_trace.h"
#ifdef HAVE_SYS_SYSCALL_H
#include <sys/syscall.h>
@@ -639,17 +642,50 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
tdc_hash_pins(0),
xid_hash_pins(0),
m_tmp_tables_locked(false)
+#ifdef HAVE_REPLICATION
+ ,
+ current_linfo(0),
+ slave_info(0)
+#endif
#ifdef WITH_WSREP
- ,
+ ,
wsrep_applier(is_wsrep_applier),
wsrep_applier_closing(false),
wsrep_client_thread(false),
- wsrep_apply_toi(false),
+ wsrep_retry_counter(0),
+ wsrep_PA_safe(true),
+ wsrep_retry_query(NULL),
+ wsrep_retry_query_len(0),
+ wsrep_retry_command(COM_CONNECT),
+ wsrep_consistency_check(NO_CONSISTENCY_CHECK),
+ wsrep_mysql_replicated(0),
+ wsrep_TOI_pre_query(NULL),
+ wsrep_TOI_pre_query_len(0),
wsrep_po_handle(WSREP_PO_INITIALIZER),
wsrep_po_cnt(0),
wsrep_apply_format(0),
- wsrep_ignore_table(false)
-#endif
+ wsrep_apply_toi(false),
+ wsrep_rbr_buf(NULL),
+ wsrep_sync_wait_gtid(WSREP_GTID_UNDEFINED),
+ wsrep_affected_rows(0),
+ wsrep_has_ignored_error(false),
+ wsrep_replicate_GTID(false),
+ wsrep_ignore_table(false),
+
+/* wsrep-lib */
+ m_wsrep_next_trx_id(WSREP_UNDEFINED_TRX_ID),
+ m_wsrep_mutex(LOCK_thd_data),
+ m_wsrep_cond(COND_wsrep_thd),
+ m_wsrep_client_service(this, m_wsrep_client_state),
+ m_wsrep_client_state(this,
+ m_wsrep_mutex,
+ m_wsrep_cond,
+ Wsrep_server_state::instance(),
+ m_wsrep_client_service,
+ wsrep::client_id(thread_id)),
+ wsrep_applier_service(NULL),
+ wsrep_wfc()
+#endif /*WITH_WSREP */
{
ulong tmp;
bzero(&variables, sizeof(variables));
@@ -668,6 +704,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
main_da.init();
mdl_context.init(this);
+ mdl_backup_lock= 0;
/*
Pass nominal parameters to init_alloc_root only to ensure that
@@ -717,7 +754,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
progress.arena= 0;
progress.report_to_client= 0;
progress.max_counter= 0;
- current_linfo = 0;
slave_thread = 0;
connection_name.str= 0;
connection_name.length= 0;
@@ -757,11 +793,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
mysql_mutex_init(key_LOCK_wakeup_ready, &LOCK_wakeup_ready, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_thd_kill, &LOCK_thd_kill, MY_MUTEX_INIT_FAST);
mysql_cond_init(key_COND_wakeup_ready, &COND_wakeup_ready, 0);
- /*
- LOCK_thread_count goes before LOCK_thd_data - the former is called around
- 'delete thd', the latter - in THD::~THD
- */
- mysql_mutex_record_order(&LOCK_thread_count, &LOCK_thd_data);
/* Variables with default values */
proc_info="login";
@@ -771,23 +802,8 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
*scramble= '\0';
#ifdef WITH_WSREP
- wsrep_ws_handle.trx_id = WSREP_UNDEFINED_TRX_ID;
- wsrep_ws_handle.opaque = NULL;
- wsrep_retry_counter = 0;
- wsrep_PA_safe = true;
- wsrep_retry_query = NULL;
- wsrep_retry_query_len = 0;
- wsrep_retry_command = COM_CONNECT;
- wsrep_consistency_check = NO_CONSISTENCY_CHECK;
- wsrep_mysql_replicated = 0;
- wsrep_TOI_pre_query = NULL;
- wsrep_TOI_pre_query_len = 0;
+ mysql_cond_init(key_COND_wsrep_thd, &COND_wsrep_thd, NULL);
wsrep_info[sizeof(wsrep_info) - 1] = '\0'; /* make sure it is 0-terminated */
- wsrep_sync_wait_gtid = WSREP_GTID_UNDEFINED;
- wsrep_affected_rows = 0;
- wsrep_replicate_GTID = false;
- wsrep_skip_wsrep_GTID = false;
- wsrep_split_flag = false;
#endif
/* Call to init() below requires fully initialized Open_tables_state. */
reset_open_tables_state(this);
@@ -852,7 +868,6 @@ THD::THD(my_thread_id id, bool is_wsrep_applier)
org_charset= 0;
/* Restore THR_THD */
set_current_thd(old_THR_THD);
- inc_thread_count();
}
@@ -1006,6 +1021,15 @@ Sql_condition* THD::raise_condition(uint sql_errno,
if (!(variables.option_bits & OPTION_SQL_NOTES) &&
(level == Sql_condition::WARN_LEVEL_NOTE))
DBUG_RETURN(NULL);
+#ifdef WITH_WSREP
+ /*
+ Suppress warnings/errors if the wsrep THD is going to replay. The
+ deadlock/interrupted errors may be transitient and should not be
+ reported to the client.
+ */
+ if (wsrep_must_replay(this))
+ DBUG_RETURN(NULL);
+#endif /* WITH_WSREP */
da->opt_clear_warning_info(query_id);
@@ -1031,7 +1055,8 @@ Sql_condition* THD::raise_condition(uint sql_errno,
level= Sql_condition::WARN_LEVEL_ERROR;
}
- if (handle_condition(sql_errno, sqlstate, &level, msg, &cond))
+ if (!is_fatal_error &&
+ handle_condition(sql_errno, sqlstate, &level, msg, &cond))
DBUG_RETURN(cond);
switch (level) {
@@ -1052,10 +1077,25 @@ Sql_condition* THD::raise_condition(uint sql_errno,
is_slave_error= 1; // needed to catch query errors during replication
- if (!da->is_error())
+#ifdef WITH_WSREP
+ /*
+ With wsrep we allow converting BF abort error to warning if
+ errors are ignored.
+ */
+ if (!is_fatal_error &&
+ no_errors &&
+ (wsrep_trx().bf_aborted() || wsrep_retry_counter))
+ {
+ WSREP_DEBUG("BF abort error converted to warning");
+ }
+ else
+#endif /* WITH_WSREP */
{
- set_row_count_func(-1);
- da->set_error_status(sql_errno, msg, sqlstate, ucid, cond);
+ if (!da->is_error())
+ {
+ set_row_count_func(-1);
+ da->set_error_status(sql_errno, msg, sqlstate, ucid, cond);
+ }
}
}
@@ -1116,7 +1156,16 @@ void *thd_memdup(MYSQL_THD thd, const void* str, size_t size)
extern "C"
void thd_get_xid(const MYSQL_THD thd, MYSQL_XID *xid)
{
- *xid = *(MYSQL_XID *) &thd->transaction.xid_state.xid;
+#ifdef WITH_WSREP
+ if (!thd->wsrep_xid.is_null())
+ {
+ *xid = *(MYSQL_XID *) &thd->wsrep_xid;
+ return;
+ }
+#endif /* WITH_WSREP */
+ *xid= thd->transaction.xid_state.is_explicit_XA() ?
+ *(MYSQL_XID *) thd->transaction.xid_state.get_xid() :
+ *(MYSQL_XID *) &thd->transaction.implicit_xid;
}
@@ -1220,13 +1269,11 @@ void THD::init()
first_successful_insert_id_in_prev_stmt= 0;
first_successful_insert_id_in_prev_stmt_for_binlog= 0;
first_successful_insert_id_in_cur_stmt= 0;
+ current_backup_stage= BACKUP_FINISHED;
#ifdef WITH_WSREP
- wsrep_exec_mode= wsrep_applier ? REPL_RECV : LOCAL_STATE;
- wsrep_conflict_state= NO_CONFLICT;
- wsrep_query_state= QUERY_IDLE;
wsrep_last_query_id= 0;
- wsrep_trx_meta.gtid= WSREP_GTID_UNDEFINED;
- wsrep_trx_meta.depends_on= WSREP_SEQNO_UNDEFINED;
+ wsrep_xid.null();
+ wsrep_skip_locking= FALSE;
wsrep_converted_lock_session= false;
wsrep_retry_counter= 0;
wsrep_rgi= NULL;
@@ -1235,11 +1282,10 @@ void THD::init()
wsrep_mysql_replicated = 0;
wsrep_TOI_pre_query = NULL;
wsrep_TOI_pre_query_len = 0;
- wsrep_sync_wait_gtid = WSREP_GTID_UNDEFINED;
+ wsrep_rbr_buf = NULL;
wsrep_affected_rows = 0;
+ m_wsrep_next_trx_id = WSREP_UNDEFINED_TRX_ID;
wsrep_replicate_GTID = false;
- wsrep_skip_wsrep_GTID = false;
- wsrep_split_flag = false;
#endif /* WITH_WSREP */
if (variables.sql_log_bin)
@@ -1340,7 +1386,8 @@ void THD::init_for_queries()
reset_root_defaults(&transaction.mem_root,
variables.trans_alloc_block_size,
variables.trans_prealloc_size);
- transaction.xid_state.xid.null();
+ DBUG_ASSERT(!transaction.xid_state.is_explicit_XA());
+ DBUG_ASSERT(transaction.implicit_xid.is_null());
}
@@ -1382,6 +1429,7 @@ void THD::change_user(void)
sp_cache_clear(&sp_func_cache);
sp_cache_clear(&sp_package_spec_cache);
sp_cache_clear(&sp_package_body_cache);
+ opt_trace.delete_traces();
}
/**
@@ -1413,7 +1461,7 @@ bool THD::set_db(const LEX_CSTRING *new_db)
const char *tmp= NULL;
if (new_db->str)
{
- if (!(tmp= my_strndup(new_db->str, new_db->length, MYF(MY_WME | ME_FATALERROR))))
+ if (!(tmp= my_strndup(new_db->str, new_db->length, MYF(MY_WME | ME_FATAL))))
result= 1;
}
@@ -1462,12 +1510,13 @@ void THD::cleanup(void)
DBUG_ASSERT(cleanup_done == 0);
set_killed(KILL_CONNECTION);
-#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE
- if (transaction.xid_state.xa_state == XA_PREPARED)
+#ifdef WITH_WSREP
+ if (wsrep_cs().state() != wsrep::client_state::s_none)
{
-#error xid_state in the cache should be replaced by the allocated value
+ wsrep_cs().cleanup();
}
-#endif
+ wsrep_client_thread= false;
+#endif /* WITH_WSREP */
mysql_ha_cleanup(this);
locked_tables_list.unlock_locked_tables(this);
@@ -1475,10 +1524,10 @@ void THD::cleanup(void)
delete_dynamic(&user_var_events);
close_temporary_tables();
- transaction.xid_state.xa_state= XA_NOTR;
- transaction.xid_state.rm_error= 0;
- trans_rollback(this);
- xid_cache_delete(this, &transaction.xid_state);
+ if (transaction.xid_state.is_explicit_XA())
+ trans_xa_detach(this);
+ else
+ trans_rollback(this);
DBUG_ASSERT(open_tables == NULL);
/*
@@ -1489,6 +1538,9 @@ void THD::cleanup(void)
*/
mdl_context.release_transactional_locks();
+ backup_end(this);
+ backup_unlock(this);
+
/* Release the global read lock, if acquired. */
if (global_read_lock.is_acquired())
global_read_lock.unlock_global_read_lock(this);
@@ -1520,6 +1572,9 @@ void THD::cleanup(void)
DBUG_ASSERT(!mdl_context.has_locks());
apc_target.destroy();
+#ifdef HAVE_REPLICATION
+ unregister_slave();
+#endif
cleanup_done=1;
DBUG_VOID_RETURN;
}
@@ -1585,6 +1640,9 @@ void THD::reset_for_reuse()
#ifdef SIGNAL_WITH_VIO_CLOSE
active_vio = 0;
#endif
+#ifdef WITH_WSREP
+ wsrep_free_status(this);
+#endif /* WITH_WSREP */
}
@@ -1593,10 +1651,8 @@ THD::~THD()
THD *orig_thd= current_thd;
THD_CHECK_SENTRY(this);
DBUG_ENTER("~THD()");
- /* Check that we have already called thd->unlink() */
- DBUG_ASSERT(prev == 0 && next == 0);
- /* This takes a long time so we should not do this under LOCK_thread_count */
- mysql_mutex_assert_not_owner(&LOCK_thread_count);
+ /* Make sure threads are not available via server_threads. */
+ assert_not_linked();
/*
In error cases, thd may not be current thd. We have to fix this so
@@ -1611,15 +1667,21 @@ THD::~THD()
THD is not deleted while they access it. The following mutex_lock
ensures that no one else is using this THD and it's now safe to delete
*/
+ if (WSREP_NNULL(this)) mysql_mutex_lock(&LOCK_thd_data);
mysql_mutex_lock(&LOCK_thd_kill);
mysql_mutex_unlock(&LOCK_thd_kill);
+ if (WSREP_NNULL(this)) mysql_mutex_unlock(&LOCK_thd_data);
-#ifdef WITH_WSREP
- delete wsrep_rgi;
-#endif
if (!free_connection_done)
free_connection();
+#ifdef WITH_WSREP
+ if (wsrep_rgi != NULL) {
+ delete wsrep_rgi;
+ wsrep_rgi = NULL;
+ }
+ mysql_cond_destroy(&COND_wsrep_thd);
+#endif
mdl_context.destroy();
free_root(&transaction.mem_root,MYF(0));
@@ -1671,7 +1733,6 @@ THD::~THD()
}
update_global_memory_status(status_var.global_memory_used);
set_current_thd(orig_thd == this ? 0 : orig_thd);
- dec_thread_count();
DBUG_VOID_RETURN;
}
@@ -1801,6 +1862,7 @@ void THD::awake_no_mutex(killed_state state_to_set)
DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d",
this, current_thd, (int) state_to_set));
THD_CHECK_SENTRY(this);
+ if (WSREP_NNULL(this)) mysql_mutex_assert_owner(&LOCK_thd_data);
mysql_mutex_assert_owner(&LOCK_thd_kill);
print_aborted_warning(3, "KILLED");
@@ -1833,7 +1895,8 @@ void THD::awake_no_mutex(killed_state state_to_set)
}
/* Interrupt target waiting inside a storage engine. */
- if (state_to_set != NOT_KILLED)
+ if (IF_WSREP(state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this),
+ state_to_set != NOT_KILLED))
ha_kill_query(this, thd_kill_level(this));
/* Broadcast a condition to kick the target if it is waiting on it. */
@@ -1986,12 +2049,6 @@ bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use,
if (!thd_table->needs_reopen())
{
signalled|= mysql_lock_abort_for_thread(this, thd_table);
- if (WSREP(this) && wsrep_thd_is_BF(this, FALSE))
- {
- WSREP_DEBUG("remove_table_from_cache: %llu",
- (unsigned long long) this->real_id);
- wsrep_abort_thd((void *)this, (void *)in_use, FALSE);
- }
}
}
}
@@ -2147,6 +2204,11 @@ void THD::reset_globals()
net.thd= 0;
}
+bool THD::trace_started()
+{
+ return opt_trace.is_started();
+}
+
/*
Cleanup after query.
@@ -2223,12 +2285,6 @@ void THD::cleanup_after_query()
/* reset table map for multi-table update */
table_map_for_update= 0;
m_binlog_invoker= INVOKER_NONE;
-#ifdef WITH_WSREP
- if (TOTAL_ORDER == wsrep_exec_mode)
- {
- wsrep_exec_mode = LOCAL_STATE;
- }
-#endif /* WITH_WSREP */
#ifndef EMBEDDED_LIBRARY
if (rgi_slave)
@@ -2236,7 +2292,6 @@ void THD::cleanup_after_query()
#endif
#ifdef WITH_WSREP
- wsrep_sync_wait_gtid= WSREP_GTID_UNDEFINED;
if (!in_active_multi_stmt_transaction())
wsrep_affected_rows= 0;
#endif /* WITH_WSREP */
@@ -2497,6 +2552,16 @@ void THD::update_charset()
&not_used);
}
+void THD::give_protection_error()
+{
+ if (current_backup_stage != BACKUP_FINISHED)
+ my_error(ER_BACKUP_LOCK_IS_ACTIVE, MYF(0));
+ else
+ {
+ DBUG_ASSERT(global_read_lock.is_acquired());
+ my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
+ }
+}
/* routings to adding tables to list of changed in transaction tables */
@@ -2573,7 +2638,7 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, size_t key_length)
key_length + 1);
if (!new_table)
{
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATALERROR),
+ my_error(EE_OUTOFMEMORY, MYF(ME_FATAL),
ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1);
set_killed(KILL_CONNECTION);
return 0;
@@ -2677,13 +2742,13 @@ void THD::make_explain_field_list(List<Item> &field_list, uint8 explain_flags,
NAME_CHAR_LEN*MAX_REF_PARTS, cs),
mem_root);
item->maybe_null=1;
- field_list.push_back(item= new (mem_root)
- Item_return_int(this, "rows", 10, MYSQL_TYPE_LONGLONG),
+ field_list.push_back(item=new (mem_root)
+ Item_empty_string(this, "rows", NAME_CHAR_LEN, cs),
mem_root);
if (is_analyze)
{
field_list.push_back(item= new (mem_root)
- Item_float(this, "r_rows", 0.1234, 2, 4),
+ Item_empty_string(this, "r_rows", NAME_CHAR_LEN, cs),
mem_root);
item->maybe_null=1;
}
@@ -3225,9 +3290,9 @@ int select_export::send_data(List<Item> &items)
((uint64) res->length() / res->charset()->mbminlen + 1) *
write_cs->mbmaxlen + 1;
set_if_smaller(estimated_bytes, UINT_MAX32);
- if (cvt_str.realloc((uint32) estimated_bytes))
+ if (cvt_str.alloc((uint32) estimated_bytes))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), (uint32) estimated_bytes);
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), (uint32) estimated_bytes);
goto err;
}
@@ -3494,7 +3559,7 @@ int select_singlerow_subselect::send_data(List<Item> &items)
if (it->assigned())
{
my_message(ER_SUBQUERY_NO_1_ROW, ER_THD(thd, ER_SUBQUERY_NO_1_ROW),
- MYF(current_thd->lex->ignore ? ME_JUST_WARNING : 0));
+ MYF(current_thd->lex->ignore ? ME_WARNING : 0));
DBUG_RETURN(1);
}
if (unit->offset_limit_cnt)
@@ -3601,18 +3666,15 @@ bool select_max_min_finder_subselect::cmp_int()
bool select_max_min_finder_subselect::cmp_decimal()
{
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
- my_decimal cval, *cvalue= cache->val_decimal(&cval);
- my_decimal mval, *mvalue= maxmin->val_decimal(&mval);
+ VDec cvalue(cache), mvalue(maxmin);
/* Ignore NULLs for ANY and keep them for ALL subqueries */
- if (cache->null_value)
- return (is_all && !maxmin->null_value) || (!is_all && maxmin->null_value);
- if (maxmin->null_value)
+ if (cvalue.is_null())
+ return (is_all && !mvalue.is_null()) || (!is_all && mvalue.is_null());
+ if (mvalue.is_null())
return !is_all;
- if (fmax)
- return (my_decimal_cmp(cvalue, mvalue) > 0) ;
- return (my_decimal_cmp(cvalue,mvalue) < 0);
+ return fmax ? cvalue.cmp(mvalue) > 0 : cvalue.cmp(mvalue) < 0;
}
bool select_max_min_finder_subselect::cmp_str()
@@ -4246,6 +4308,7 @@ void Security_context::init()
host_or_ip= "connecting host";
priv_user[0]= priv_host[0]= proxy_user[0]= priv_role[0]= '\0';
master_access= 0;
+ password_expired= false;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
db_access= NO_ACCESS;
#endif
@@ -4284,6 +4347,7 @@ void Security_context::skip_grants()
host_or_ip= (char *)"";
master_access= ~NO_ACCESS;
*priv_user= *priv_host= '\0';
+ password_expired= false;
}
@@ -4294,6 +4358,13 @@ bool Security_context::set_user(char *user_arg)
return user == 0;
}
+bool Security_context::check_access(ulong want_access, bool match_any)
+{
+ DBUG_ENTER("Security_context::check_access");
+ DBUG_RETURN((match_any ? (master_access & want_access)
+ : ((master_access & want_access) == want_access)));
+}
+
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/**
Initialize this security context from the passed in credentials
@@ -4395,6 +4466,13 @@ bool Security_context::user_matches(Security_context *them)
!strcmp(user, them->user));
}
+bool Security_context::is_priv_user(const char *user, const char *host)
+{
+ return ((user != NULL) && (host != NULL) &&
+ !strcmp(user, priv_user) &&
+ !my_strcasecmp(system_charset_info, host,priv_host));
+}
+
/****************************************************************************
Handling of open and locked tables states.
@@ -4721,14 +4799,14 @@ MYSQL_THD create_thd()
thd->set_command(COM_DAEMON);
thd->system_thread= SYSTEM_THREAD_GENERIC;
thd->security_ctx->host_or_ip="";
- add_to_active_threads(thd);
+ server_threads.insert(thd);
return thd;
}
void destroy_thd(MYSQL_THD thd)
{
thd->add_status_to_global();
- unlink_not_visible_thd(thd);
+ server_threads.erase(thd);
delete thd;
}
@@ -4995,8 +5073,9 @@ extern "C" int thd_binlog_format(const MYSQL_THD thd)
if (WSREP(thd))
{
/* for wsrep binlog format is meaningful also when binlogging is off */
- return (int) thd->wsrep_binlog_format();
+ return (int) WSREP_BINLOG_FORMAT(thd->variables.binlog_format);
}
+
if (mysql_bin_log.is_open() && (thd->variables.option_bits & OPTION_BIN_LOG))
return (int) thd->variables.binlog_format;
return BINLOG_FORMAT_UNSPEC;
@@ -5479,6 +5558,10 @@ void THD::set_query_and_id(char *query_arg, uint32 query_length_arg,
set_query_inner(query_arg, query_length_arg, cs);
mysql_mutex_unlock(&LOCK_thd_data);
query_id= new_query_id;
+#ifdef WITH_WSREP
+ set_wsrep_next_trx_id(query_id);
+ WSREP_DEBUG("assigned new next query and trx id: %" PRIu64, wsrep_next_trx_id());
+#endif /* WITH_WSREP */
}
/** Assign a new value to thd->mysys_var. */
@@ -5530,7 +5613,7 @@ void THD::get_definer(LEX_USER *definer, bool role)
{
definer->user= invoker.user;
definer->host= invoker.host;
- definer->reset_auth();
+ definer->auth= NULL;
}
else
#endif
@@ -5554,263 +5637,6 @@ void THD::mark_transaction_to_rollback(bool all)
is_fatal_sub_stmt_error= true;
transaction_rollback_request= all;
}
-/***************************************************************************
- Handling of XA id cacheing
-***************************************************************************/
-class XID_cache_element
-{
- /*
- m_state is used to prevent elements from being deleted while XA RECOVER
- iterates xid cache and to prevent recovered elments from being acquired by
- multiple threads.
-
- bits 1..29 are reference counter
- bit 30 is RECOVERED flag
- bit 31 is ACQUIRED flag (thread owns this xid)
- bit 32 is unused
-
- Newly allocated and deleted elements have m_state set to 0.
-
- On lock() m_state is atomically incremented. It also creates load-ACQUIRE
- memory barrier to make sure m_state is actually updated before furhter
- memory accesses. Attempting to lock an element that has neither ACQUIRED
- nor RECOVERED flag set returns failure and further accesses to element
- memory are forbidden.
-
- On unlock() m_state is decremented. It also creates store-RELEASE memory
- barrier to make sure m_state is actually updated after preceding memory
- accesses.
-
- ACQUIRED flag is set when thread registers it's xid or when thread acquires
- recovered xid.
-
- RECOVERED flag is set for elements found during crash recovery.
-
- ACQUIRED and RECOVERED flags are cleared before element is deleted from
- hash in a spin loop, after last reference is released.
- */
- int32 m_state;
-public:
- static const int32 ACQUIRED= 1 << 30;
- static const int32 RECOVERED= 1 << 29;
- XID_STATE *m_xid_state;
- bool is_set(int32 flag)
- { return my_atomic_load32_explicit(&m_state, MY_MEMORY_ORDER_RELAXED) & flag; }
- void set(int32 flag)
- {
- DBUG_ASSERT(!is_set(ACQUIRED | RECOVERED));
- my_atomic_add32_explicit(&m_state, flag, MY_MEMORY_ORDER_RELAXED);
- }
- bool lock()
- {
- int32 old= my_atomic_add32_explicit(&m_state, 1, MY_MEMORY_ORDER_ACQUIRE);
- if (old & (ACQUIRED | RECOVERED))
- return true;
- unlock();
- return false;
- }
- void unlock()
- { my_atomic_add32_explicit(&m_state, -1, MY_MEMORY_ORDER_RELEASE); }
- void mark_uninitialized()
- {
- int32 old= ACQUIRED;
- while (!my_atomic_cas32_weak_explicit(&m_state, &old, 0,
- MY_MEMORY_ORDER_RELAXED,
- MY_MEMORY_ORDER_RELAXED))
- {
- old&= ACQUIRED | RECOVERED;
- (void) LF_BACKOFF();
- }
- }
- bool acquire_recovered()
- {
- int32 old= RECOVERED;
- while (!my_atomic_cas32_weak_explicit(&m_state, &old, ACQUIRED | RECOVERED,
- MY_MEMORY_ORDER_RELAXED,
- MY_MEMORY_ORDER_RELAXED))
- {
- if (!(old & RECOVERED) || (old & ACQUIRED))
- return false;
- old= RECOVERED;
- (void) LF_BACKOFF();
- }
- return true;
- }
- static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)),
- XID_cache_element *element,
- XID_STATE *xid_state)
- {
- DBUG_ASSERT(!element->is_set(ACQUIRED | RECOVERED));
- element->m_xid_state= xid_state;
- xid_state->xid_cache_element= element;
- }
- static void lf_alloc_constructor(uchar *ptr)
- {
- XID_cache_element *element= (XID_cache_element*) (ptr + LF_HASH_OVERHEAD);
- element->m_state= 0;
- }
- static void lf_alloc_destructor(uchar *ptr)
- {
- XID_cache_element *element= (XID_cache_element*) (ptr + LF_HASH_OVERHEAD);
- DBUG_ASSERT(!element->is_set(ACQUIRED));
- if (element->is_set(RECOVERED))
- my_free(element->m_xid_state);
- }
- static uchar *key(const XID_cache_element *element, size_t *length,
- my_bool not_used __attribute__((unused)))
- {
- *length= element->m_xid_state->xid.key_length();
- return element->m_xid_state->xid.key();
- }
-};
-
-
-static LF_HASH xid_cache;
-static bool xid_cache_inited;
-
-
-bool THD::fix_xid_hash_pins()
-{
- if (!xid_hash_pins)
- xid_hash_pins= lf_hash_get_pins(&xid_cache);
- return !xid_hash_pins;
-}
-
-
-void xid_cache_init()
-{
- xid_cache_inited= true;
- lf_hash_init(&xid_cache, sizeof(XID_cache_element), LF_HASH_UNIQUE, 0, 0,
- (my_hash_get_key) XID_cache_element::key, &my_charset_bin);
- xid_cache.alloc.constructor= XID_cache_element::lf_alloc_constructor;
- xid_cache.alloc.destructor= XID_cache_element::lf_alloc_destructor;
- xid_cache.initializer=
- (lf_hash_initializer) XID_cache_element::lf_hash_initializer;
-}
-
-
-void xid_cache_free()
-{
- if (xid_cache_inited)
- {
- lf_hash_destroy(&xid_cache);
- xid_cache_inited= false;
- }
-}
-
-
-/**
- Find recovered XA transaction by XID.
-*/
-
-XID_STATE *xid_cache_search(THD *thd, XID *xid)
-{
- XID_STATE *xs= 0;
- DBUG_ASSERT(thd->xid_hash_pins);
- XID_cache_element *element=
- (XID_cache_element*) lf_hash_search(&xid_cache, thd->xid_hash_pins,
- xid->key(), xid->key_length());
- if (element)
- {
- if (element->acquire_recovered())
- xs= element->m_xid_state;
- lf_hash_search_unpin(thd->xid_hash_pins);
- DEBUG_SYNC(thd, "xa_after_search");
- }
- return xs;
-}
-
-
-bool xid_cache_insert(XID *xid, enum xa_states xa_state)
-{
- XID_STATE *xs;
- LF_PINS *pins;
- int res= 1;
-
- if (!(pins= lf_hash_get_pins(&xid_cache)))
- return true;
-
- if ((xs= (XID_STATE*) my_malloc(sizeof(*xs), MYF(MY_WME))))
- {
- xs->xa_state=xa_state;
- xs->xid.set(xid);
- xs->rm_error=0;
-
- if ((res= lf_hash_insert(&xid_cache, pins, xs)))
- my_free(xs);
- else
- xs->xid_cache_element->set(XID_cache_element::RECOVERED);
- if (res == 1)
- res= 0;
- }
- lf_hash_put_pins(pins);
- return res;
-}
-
-
-bool xid_cache_insert(THD *thd, XID_STATE *xid_state)
-{
- if (thd->fix_xid_hash_pins())
- return true;
-
- int res= lf_hash_insert(&xid_cache, thd->xid_hash_pins, xid_state);
- switch (res)
- {
- case 0:
- xid_state->xid_cache_element->set(XID_cache_element::ACQUIRED);
- break;
- case 1:
- my_error(ER_XAER_DUPID, MYF(0));
- /* fall through */
- default:
- xid_state->xid_cache_element= 0;
- }
- return res;
-}
-
-
-void xid_cache_delete(THD *thd, XID_STATE *xid_state)
-{
- if (xid_state->xid_cache_element)
- {
- bool recovered= xid_state->xid_cache_element->is_set(XID_cache_element::RECOVERED);
- DBUG_ASSERT(thd->xid_hash_pins);
- xid_state->xid_cache_element->mark_uninitialized();
- lf_hash_delete(&xid_cache, thd->xid_hash_pins,
- xid_state->xid.key(), xid_state->xid.key_length());
- xid_state->xid_cache_element= 0;
- if (recovered)
- my_free(xid_state);
- }
-}
-
-
-struct xid_cache_iterate_arg
-{
- my_hash_walk_action action;
- void *argument;
-};
-
-static my_bool xid_cache_iterate_callback(XID_cache_element *element,
- xid_cache_iterate_arg *arg)
-{
- my_bool res= FALSE;
- if (element->lock())
- {
- res= arg->action(element->m_xid_state, arg->argument);
- element->unlock();
- }
- return res;
-}
-
-int xid_cache_iterate(THD *thd, my_hash_walk_action action, void *arg)
-{
- xid_cache_iterate_arg argument= { action, arg };
- return thd->fix_xid_hash_pins() ? -1 :
- lf_hash_iterate(&xid_cache, thd->xid_hash_pins,
- (my_hash_walk_action) xid_cache_iterate_callback,
- &argument);
-}
/**
@@ -5924,9 +5750,28 @@ int THD::decide_logging_format(TABLE_LIST *tables)
binlogging is off, or if the statement is filtered out from the
binlog by filtering rules.
*/
+#ifdef WITH_WSREP
+ if (WSREP_CLIENT_NNULL(this) && wsrep_thd_is_local(this) &&
+ variables.wsrep_trx_fragment_size > 0)
+ {
+ if (!is_current_stmt_binlog_format_row())
+ {
+ my_message(ER_NOT_SUPPORTED_YET,
+ "Streaming replication not supported with "
+ "binlog_format=STATEMENT", MYF(0));
+ DBUG_RETURN(-1);
+ }
+ }
+
+ if ((WSREP_EMULATE_BINLOG_NNULL(this) ||
+ (mysql_bin_log.is_open() && (variables.option_bits & OPTION_BIN_LOG))) &&
+ !(wsrep_binlog_format() == BINLOG_FORMAT_STMT &&
+ !binlog_filter->db_ok(db.str)))
+#else
if (mysql_bin_log.is_open() && (variables.option_bits & OPTION_BIN_LOG) &&
!(wsrep_binlog_format() == BINLOG_FORMAT_STMT &&
!binlog_filter->db_ok(db.str)))
+#endif /* WITH_WSREP */
{
if (is_bulk_op())
@@ -6250,7 +6095,8 @@ int THD::decide_logging_format(TABLE_LIST *tables)
5. Error: Cannot modify table that uses a storage engine
limited to row-logging when binlog_format = STATEMENT
*/
- if (IF_WSREP((!WSREP(this) || wsrep_exec_mode == LOCAL_STATE),1))
+ if (IF_WSREP((!WSREP_NNULL(this) ||
+ wsrep_cs().mode() == wsrep::client_state::m_local),1))
{
my_error((error= ER_BINLOG_STMT_MODE_AND_ROW_ENGINE), MYF(0), "");
}
@@ -6601,8 +6447,9 @@ int THD::binlog_write_row(TABLE* table, bool is_trans,
uchar const *record)
{
- DBUG_ASSERT(is_current_stmt_binlog_format_row() &&
- ((WSREP(this) && wsrep_emulate_bin_log) || mysql_bin_log.is_open()));
+ DBUG_ASSERT(is_current_stmt_binlog_format_row());
+ DBUG_ASSERT((WSREP_NNULL(this) && wsrep_emulate_bin_log) ||
+ mysql_bin_log.is_open());
/*
Pack records into format for transfer. We are allocating more
memory than needed, but that doesn't matter.
@@ -6642,8 +6489,9 @@ int THD::binlog_update_row(TABLE* table, bool is_trans,
const uchar *before_record,
const uchar *after_record)
{
- DBUG_ASSERT(is_current_stmt_binlog_format_row() &&
- ((WSREP(this) && wsrep_emulate_bin_log) || mysql_bin_log.is_open()));
+ DBUG_ASSERT(is_current_stmt_binlog_format_row());
+ DBUG_ASSERT((WSREP_NNULL(this) && wsrep_emulate_bin_log) ||
+ mysql_bin_log.is_open());
/**
Save a reference to the original read bitmaps
@@ -6721,8 +6569,9 @@ int THD::binlog_update_row(TABLE* table, bool is_trans,
int THD::binlog_delete_row(TABLE* table, bool is_trans,
uchar const *record)
{
- DBUG_ASSERT(is_current_stmt_binlog_format_row() &&
- ((WSREP(this) && wsrep_emulate_bin_log) || mysql_bin_log.is_open()));
+ DBUG_ASSERT(is_current_stmt_binlog_format_row());
+ DBUG_ASSERT((WSREP_NNULL(this) && wsrep_emulate_bin_log) ||
+ mysql_bin_log.is_open());
/**
Save a reference to the original read bitmaps
We will need this to restore the bitmaps at the end as
@@ -6853,7 +6702,7 @@ int THD::binlog_remove_pending_rows_event(bool clear_maps,
{
DBUG_ENTER("THD::binlog_remove_pending_rows_event");
- if(!WSREP_EMULATE_BINLOG(this) && !mysql_bin_log.is_open())
+ if(!WSREP_EMULATE_BINLOG_NNULL(this) && !mysql_bin_log.is_open())
DBUG_RETURN(0);
/* Ensure that all events in a GTID group are in the same cache */
@@ -6876,7 +6725,7 @@ int THD::binlog_flush_pending_rows_event(bool stmt_end, bool is_transactional)
mode: it might be the case that we left row-based mode before
flushing anything (e.g., if we have explicitly locked tables).
*/
- if(!WSREP_EMULATE_BINLOG(this) && !mysql_bin_log.is_open())
+ if (!WSREP_EMULATE_BINLOG_NNULL(this) && !mysql_bin_log.is_open())
DBUG_RETURN(0);
/* Ensure that all events in a GTID group are in the same cache */
@@ -7149,7 +6998,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
show_query_type(qtype), (int) query_len, query_arg));
DBUG_ASSERT(query_arg);
- DBUG_ASSERT(WSREP_EMULATE_BINLOG(this) || mysql_bin_log.is_open());
+ DBUG_ASSERT(WSREP_EMULATE_BINLOG_NNULL(this) || mysql_bin_log.is_open());
/* If this is withing a BEGIN ... COMMIT group, don't log it */
if (variables.option_bits & OPTION_GTID_BEGIN)
@@ -7741,7 +7590,7 @@ Query_arena_stmt::~Query_arena_stmt()
bool THD::timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts,
- ulong sec_part, ulonglong fuzzydate)
+ ulong sec_part, date_mode_t fuzzydate)
{
time_zone_used= 1;
if (ts == 0 && sec_part == 0)
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 221e453eab5..1266e1777ce 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2016, Oracle and/or its affiliates.
- Copyright (c) 2009, 2017, MariaDB Corporation.
+ Copyright (c) 2009, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -28,6 +28,7 @@
#include "rpl_tblmap.h"
#include "mdl.h"
#include "field.h" // Create_field
+#include "opt_trace_context.h"
#include "probes_mysql.h"
#include "sql_locale.h" /* my_locale_st */
#include "sql_profile.h" /* PROFILING */
@@ -38,15 +39,15 @@
#include "thr_timer.h"
#include "thr_malloc.h"
#include "log_slow.h" /* LOG_SLOW_DISABLE_... */
-
#include "sql_digest_stream.h" // sql_digest_state
-
#include <mysql/psi/mysql_stage.h>
#include <mysql/psi/mysql_statement.h>
#include <mysql/psi/mysql_idle.h>
#include <mysql/psi/mysql_table.h>
#include <mysql_com_server.h>
#include "session_tracker.h"
+#include "backup.h"
+#include "xa.h"
extern "C"
void set_thd_stage_info(void *thd,
@@ -61,8 +62,19 @@ void set_thd_stage_info(void *thd,
#include "my_apc.h"
#include "rpl_gtid.h"
+
#include "wsrep_mysqld.h"
+#ifdef WITH_WSREP
+#include <inttypes.h>
+/* wsrep-lib */
+#include "wsrep_client_service.h"
+#include "wsrep_client_state.h"
+#include "wsrep_mutex.h"
+#include "wsrep_condition_variable.h"
+
+class Wsrep_applier_service;
+#endif /* WITH_WSREP */
class Reprepare_observer;
class Relay_log_info;
struct rpl_group_info;
@@ -79,6 +91,9 @@ class user_var_entry;
struct Trans_binlog_info;
class rpl_io_thread_info;
class rpl_sql_thread_info;
+#ifdef HAVE_REPLICATION
+struct Slave_info;
+#endif
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
@@ -156,8 +171,15 @@ enum enum_binlog_row_image {
#define MODE_HIGH_NOT_PRECEDENCE (1ULL << 29)
#define MODE_NO_ENGINE_SUBSTITUTION (1ULL << 30)
#define MODE_PAD_CHAR_TO_FULL_LENGTH (1ULL << 31)
+/* SQL mode bits defined above are common for MariaDB and MySQL */
+#define MODE_MASK_MYSQL_COMPATIBLE 0xFFFFFFFFULL
+/* The following modes are specific to MariaDB */
#define MODE_EMPTY_STRING_IS_NULL (1ULL << 32)
#define MODE_SIMULTANEOUS_ASSIGNMENT (1ULL << 33)
+#define MODE_TIME_ROUND_FRACTIONAL (1ULL << 34)
+/* The following modes are specific to MySQL */
+#define MODE_MYSQL80_TIME_TRUNCATE_FRACTIONAL (1ULL << 32)
+
/* Bits for different old style modes */
#define OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE (1 << 0)
@@ -168,8 +190,6 @@ extern char internal_table_name[2];
extern char empty_c_string[1];
extern MYSQL_PLUGIN_IMPORT const char **errmesg;
-extern bool volatile shutdown_in_progress;
-
extern "C" LEX_STRING * thd_query_string (MYSQL_THD thd);
extern "C" size_t thd_query_safe(MYSQL_THD thd, char *buf, size_t buflen);
@@ -273,7 +293,7 @@ public:
class Alter_drop :public Sql_alloc {
public:
- enum drop_type {KEY, COLUMN, FOREIGN_KEY, CHECK_CONSTRAINT };
+ enum drop_type { KEY, COLUMN, FOREIGN_KEY, CHECK_CONSTRAINT, PERIOD };
const char *name;
enum drop_type type;
bool drop_if_exists;
@@ -292,6 +312,7 @@ public:
{
return type == COLUMN ? "COLUMN" :
type == CHECK_CONSTRAINT ? "CONSTRAINT" :
+ type == PERIOD ? "PERIOD" :
type == KEY ? "INDEX" : "FOREIGN KEY";
}
};
@@ -555,6 +576,8 @@ typedef struct system_variables
ulonglong long_query_time;
ulonglong max_statement_time;
ulonglong optimizer_switch;
+ ulonglong optimizer_trace;
+ ulong optimizer_trace_max_mem_size;
sql_mode_t sql_mode; ///< which non-standard SQL behaviour should be enabled
sql_mode_t old_behavior; ///< which old SQL behaviour should be enabled
ulonglong option_bits; ///< OPTION_xxx constants, e.g. OPTION_PROFILING
@@ -615,6 +638,7 @@ typedef struct system_variables
ulong optimizer_selectivity_sampling_limit;
ulong optimizer_use_condition_selectivity;
ulong use_stat_tables;
+ double sample_percentage;
ulong histogram_size;
ulong histogram_type;
ulong preload_buff_size;
@@ -716,10 +740,12 @@ typedef struct system_variables
my_bool wsrep_on;
my_bool wsrep_causal_reads;
+ uint wsrep_sync_wait;
+ ulong wsrep_retry_autocommit;
+ ulonglong wsrep_trx_fragment_size;
+ ulong wsrep_trx_fragment_unit;
+ ulong wsrep_OSU_method;
my_bool wsrep_dirty_reads;
- uint wsrep_sync_wait;
- ulong wsrep_retry_autocommit;
- ulong wsrep_OSU_method;
double long_query_time_double, max_statement_time_double;
my_bool pseudo_slave_mode;
@@ -728,6 +754,7 @@ typedef struct system_variables
ulong session_track_transaction_info;
my_bool session_track_schema;
my_bool session_track_state_change;
+ my_bool tcp_nodelay;
ulong threadpool_priority;
@@ -737,6 +764,7 @@ typedef struct system_variables
uint column_compression_threshold;
uint column_compression_zlib_level;
uint in_subquery_conversion_threshold;
+ ulonglong max_rowid_filter_size;
vers_asof_timestamp_t vers_asof_timestamp;
ulong vers_alter_history;
@@ -835,6 +863,8 @@ typedef struct system_status_var
ulong feature_locale; /* +1 when LOCALE is set */
ulong feature_subquery; /* +1 when subqueries are used */
ulong feature_system_versioning; /* +1 opening a table WITH SYSTEM VERSIONING */
+ ulong feature_application_time_periods;
+ /* +1 opening a table with application-time period */
ulong feature_timezone; /* +1 when XPATH is used */
ulong feature_trigger; /* +1 opening a table with triggers */
ulong feature_xml; /* +1 when XPATH is used */
@@ -1201,7 +1231,7 @@ public:
int insert(THD *thd, Statement *statement);
- Statement *find_by_name(LEX_CSTRING *name)
+ Statement *find_by_name(const LEX_CSTRING *name)
{
Statement *stmt;
stmt= (Statement*)my_hash_search(&names_hash, (uchar*)name->str,
@@ -1247,50 +1277,6 @@ struct st_savepoint {
MDL_savepoint mdl_savepoint;
};
-enum xa_states {XA_NOTR=0, XA_ACTIVE, XA_IDLE, XA_PREPARED, XA_ROLLBACK_ONLY};
-extern const char *xa_state_names[];
-class XID_cache_element;
-
-typedef struct st_xid_state {
- /* For now, this is only used to catch duplicated external xids */
- XID xid; // transaction identifier
- enum xa_states xa_state; // used by external XA only
- /* Error reported by the Resource Manager (RM) to the Transaction Manager. */
- uint rm_error;
- XID_cache_element *xid_cache_element;
-
- /**
- Check that XA transaction has an uncommitted work. Report an error
- to the user in case when there is an uncommitted work for XA transaction.
-
- @return result of check
- @retval false XA transaction is NOT in state IDLE, PREPARED
- or ROLLBACK_ONLY.
- @retval true XA transaction is in state IDLE or PREPARED
- or ROLLBACK_ONLY.
- */
-
- bool check_has_uncommitted_xa() const
- {
- if (xa_state == XA_IDLE ||
- xa_state == XA_PREPARED ||
- xa_state == XA_ROLLBACK_ONLY)
- {
- my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
- return true;
- }
- return false;
- }
-} XID_STATE;
-
-void xid_cache_init(void);
-void xid_cache_free(void);
-XID_STATE *xid_cache_search(THD *thd, XID *xid);
-bool xid_cache_insert(XID *xid, enum xa_states xa_state);
-bool xid_cache_insert(THD *thd, XID_STATE *xid_state);
-void xid_cache_delete(THD *thd, XID_STATE *xid_state);
-int xid_cache_iterate(THD *thd, my_hash_walk_action action, void *argument);
-
/**
@class Security_context
@brief A set of THD members describing the current authenticated user.
@@ -1321,6 +1307,8 @@ public:
ulong master_access; /* Global privileges from mysql.user */
ulong db_access; /* Privileges for current db */
+ bool password_expired;
+
void init();
void destroy();
void skip_grants();
@@ -1343,6 +1331,15 @@ public:
restore_security_context(THD *thd, Security_context *backup);
#endif
bool user_matches(Security_context *);
+ /**
+ Check global access
+ @param want_access The required privileges
+ @param match_any if the security context must match all or any of the req.
+ * privileges.
+ @return True if the security context fulfills the access requirements.
+ */
+ bool check_access(ulong want_access, bool match_any = false);
+ bool is_priv_user(const char *user, const char *host);
};
@@ -1955,42 +1952,22 @@ public:
Global_read_lock()
: m_state(GRL_NONE),
- m_mdl_global_shared_lock(NULL),
- m_mdl_blocks_commits_lock(NULL)
+ m_mdl_global_read_lock(NULL)
{}
bool lock_global_read_lock(THD *thd);
void unlock_global_read_lock(THD *thd);
- /**
- Check if this connection can acquire protection against GRL and
- emit error if otherwise.
- */
- bool can_acquire_protection() const
- {
- if (m_state)
- {
- my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
- return TRUE;
- }
- return FALSE;
- }
bool make_global_read_lock_block_commit(THD *thd);
bool is_acquired() const { return m_state != GRL_NONE; }
void set_explicit_lock_duration(THD *thd);
private:
enum_grl_state m_state;
/**
- In order to acquire the global read lock, the connection must
- acquire shared metadata lock in GLOBAL namespace, to prohibit
- all DDL.
+ Global read lock is acquired in two steps:
+ 1. acquire MDL_BACKUP_FTWRL1 in BACKUP namespace to prohibit DDL and DML
+ 2. upgrade to MDL_BACKUP_FTWRL2 to prohibit commits
*/
- MDL_ticket *m_mdl_global_shared_lock;
- /**
- Also in order to acquire the global read lock, the connection
- must acquire a shared metadata lock in COMMIT namespace, to
- prohibit commits.
- */
- MDL_ticket *m_mdl_blocks_commits_lock;
+ MDL_ticket *m_mdl_global_read_lock;
};
@@ -2145,13 +2122,31 @@ struct wait_for_commit
extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
+
+/**
+ A wrapper around thread_count.
+
+ It must be specified as a first base class of THD, so that increment is
+ done before any other THD constructors and decrement - after any other THD
+ destructors.
+
+ Destructor unblocks close_conneciton() if there are no more THD's left.
+*/
+struct THD_count
+{
+ THD_count() { thread_count++; }
+ ~THD_count() { thread_count--; }
+};
+
+
/**
@class THD
For each client connection we create a separate thread with THD serving as
a thread/connection descriptor
*/
-class THD :public Statement,
+class THD: public THD_count, /* this must be first */
+ public Statement,
/*
This is to track items changed during execution of a prepared
statement/stored procedure. It's created by
@@ -2176,19 +2171,6 @@ private:
inline bool is_conventional() const
{ DBUG_ASSERT(0); return Statement::is_conventional(); }
- void dec_thread_count(void)
- {
- DBUG_ASSERT(thread_count > 0);
- thread_safe_decrement32(&thread_count);
- signal_thd_deleted();
- }
-
-
- void inc_thread_count(void)
- {
- thread_safe_increment32(&thread_count);
- }
-
public:
MDL_context mdl_context;
@@ -2202,6 +2184,7 @@ public:
rpl_io_thread_info *rpl_io_info;
rpl_sql_thread_info *rpl_sql_info;
} system_thread_info;
+ MDL_ticket *mdl_backup_ticket, *mdl_backup_lock;
void reset_for_next_command(bool do_clear_errors= 1);
/*
@@ -2257,7 +2240,7 @@ public:
- thd->db (used in SHOW PROCESSLIST)
Is locked when THD is deleted.
*/
- mysql_mutex_t LOCK_thd_data;
+ mutable mysql_mutex_t LOCK_thd_data;
/*
Protects:
- kill information
@@ -2301,6 +2284,8 @@ public:
Security_context main_security_ctx;
Security_context *security_ctx;
+ Security_context *security_context() const { return security_ctx; }
+ void set_security_context(Security_context *sctx) { security_ctx = sctx; }
/*
Points to info-string that we show in SHOW PROCESSLIST
@@ -2600,6 +2585,7 @@ public:
THD_TRANS stmt; // Trans for current statement
bool on; // see ha_enable_transaction()
XID_STATE xid_state;
+ XID implicit_xid;
WT_THD wt; ///< for deadlock detection
Rows_log_event *m_pending_rows_event;
@@ -2621,17 +2607,10 @@ public:
MEM_ROOT mem_root; // Transaction-life memory allocation pool
void cleanup()
{
- DBUG_ENTER("thd::cleanup");
+ DBUG_ENTER("THD::st_transactions::cleanup");
changed_tables= 0;
savepoints= 0;
- /*
- If rm_error is raised, it means that this piece of a distributed
- transaction has failed and must be rolled back. But the user must
- rollback it explicitly, so don't start a new distributed XA until
- then.
- */
- if (!xid_state.rm_error)
- xid_state.xid.null();
+ implicit_xid.null();
free_root(&mem_root,MYF(MY_KEEP_PREALLOC));
DBUG_VOID_RETURN;
}
@@ -2642,7 +2621,7 @@ public:
st_transactions()
{
bzero((char*)this, sizeof(*this));
- xid_state.xid.null();
+ implicit_xid.null();
init_sql_alloc(&mem_root, "THD::transactions",
ALLOC_ROOT_MIN_BLOCK_SIZE, 0,
MYF(MY_THREAD_SPECIFIC));
@@ -2987,12 +2966,14 @@ public:
ulonglong bytes_sent_old;
ulonglong affected_rows; /* Number of changed rows */
+ Opt_trace_context opt_trace;
pthread_t real_id; /* For debugging */
my_thread_id thread_id, thread_dbug_id;
uint32 os_thread_id;
uint tmp_table, global_disable_checkpoint;
uint server_status,open_options;
enum enum_thread_type system_thread;
+ enum backup_stages current_backup_stage;
/*
Current or next transaction isolation level.
When a connection is established, the value is taken from
@@ -3139,6 +3120,9 @@ public:
it returned an error on master, and this is OK on the slave.
*/
bool is_slave_error;
+ /* True if we have printed something to the error log for this statement */
+ bool error_printed_to_log;
+
/*
True when a transaction is queued up for binlog group commit.
Used so that if another transaction needs to wait for a row lock held by
@@ -3167,12 +3151,6 @@ public:
/** number of name_const() substitutions, see sp_head.cc:subst_spvars() */
uint query_name_consts;
- /*
- If we do a purge of binary logs, log index info of the threads
- that are currently reading it needs to be adjusted. To do that
- each thread that is using LOG_INFO needs to adjust the pointer to it
- */
- LOG_INFO* current_linfo;
NET* slave_net; // network connection from slave -> m.
/*
@@ -3220,7 +3198,6 @@ public:
mysql_bin_log.start_union_events() call.
*/
bool unioned_events_trans;
-
/*
'queries' (actually SP statements) that run under inside this binlog
union have thd->query_id >= first_query_id.
@@ -3228,7 +3205,6 @@ public:
query_id_t first_query_id;
} binlog_evt_union;
- mysql_cond_t COND_wsrep_thd;
/**
Internal parser state.
Note that since the parser is not re-entrant, we keep only one parser
@@ -3288,6 +3264,7 @@ public:
void reset_for_reuse();
bool store_globals();
void reset_globals();
+ bool trace_started();
#ifdef SIGNAL_WITH_VIO_CLOSE
inline void set_active_vio(Vio* vio)
{
@@ -3306,9 +3283,18 @@ public:
void awake_no_mutex(killed_state state_to_set);
void awake(killed_state state_to_set)
{
+ bool wsrep_on_local= WSREP_ON;
+ /*
+ mutex locking order (LOCK_thd_data - LOCK_thd_kill)) requires
+ to grab LOCK_thd_data here
+ */
+ if (wsrep_on_local)
+ mysql_mutex_lock(&LOCK_thd_data);
mysql_mutex_lock(&LOCK_thd_kill);
awake_no_mutex(state_to_set);
mysql_mutex_unlock(&LOCK_thd_kill);
+ if (wsrep_on_local)
+ mysql_mutex_unlock(&LOCK_thd_data);
}
/** Disconnect the associated communication endpoint. */
@@ -3417,11 +3403,16 @@ public:
}
const Type_handler *type_handler_for_date() const;
bool timestamp_to_TIME(MYSQL_TIME *ltime, my_time_t ts,
- ulong sec_part, ulonglong fuzzydate);
+ ulong sec_part, date_mode_t fuzzydate);
inline my_time_t query_start() { return start_time; }
inline ulong query_start_sec_part()
{ query_start_sec_part_used=1; return start_time_sec_part; }
MYSQL_TIME query_start_TIME();
+ time_round_mode_t temporal_round_mode() const
+ {
+ return variables.sql_mode & MODE_TIME_ROUND_FRACTIONAL ?
+ TIME_FRAC_ROUND : TIME_FRAC_TRUNCATE;
+ }
private:
struct {
@@ -3613,6 +3604,15 @@ public:
{
return server_status & SERVER_STATUS_IN_TRANS;
}
+ void give_protection_error();
+ inline bool has_read_only_protection()
+ {
+ if (current_backup_stage == BACKUP_FINISHED &&
+ !global_read_lock.is_acquired())
+ return FALSE;
+ give_protection_error();
+ return TRUE;
+ }
inline bool fill_derived_tables()
{
return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure();
@@ -4383,6 +4383,80 @@ private:
return raised;
}
+private:
+ void push_warning_truncated_priv(Sql_condition::enum_warning_level level,
+ uint sql_errno,
+ const char *type_str, const char *val)
+ {
+ DBUG_ASSERT(sql_errno == ER_TRUNCATED_WRONG_VALUE ||
+ sql_errno == ER_WRONG_VALUE);
+ char buff[MYSQL_ERRMSG_SIZE];
+ CHARSET_INFO *cs= &my_charset_latin1;
+ cs->cset->snprintf(cs, buff, sizeof(buff),
+ ER_THD(this, sql_errno), type_str, val);
+ /*
+ Note: the format string can vary between ER_TRUNCATED_WRONG_VALUE
+ and ER_WRONG_VALUE, but the code passed to push_warning() is
+ always ER_TRUNCATED_WRONG_VALUE. This is intentional.
+ */
+ push_warning(this, level, ER_TRUNCATED_WRONG_VALUE, buff);
+ }
+public:
+ void push_warning_truncated_wrong_value(Sql_condition::enum_warning_level level,
+ const char *type_str, const char *val)
+ {
+ return push_warning_truncated_priv(level, ER_TRUNCATED_WRONG_VALUE,
+ type_str, val);
+ }
+ void push_warning_wrong_value(Sql_condition::enum_warning_level level,
+ const char *type_str, const char *val)
+ {
+ return push_warning_truncated_priv(level, ER_WRONG_VALUE, type_str, val);
+ }
+ void push_warning_truncated_wrong_value(const char *type_str, const char *val)
+ {
+ return push_warning_truncated_wrong_value(Sql_condition::WARN_LEVEL_WARN,
+ type_str, val);
+ }
+ void push_warning_truncated_value_for_field(Sql_condition::enum_warning_level
+ level, const char *type_str,
+ const char *val,
+ const TABLE_SHARE *s,
+ const char *name)
+ {
+ DBUG_ASSERT(name);
+ char buff[MYSQL_ERRMSG_SIZE];
+ CHARSET_INFO *cs= &my_charset_latin1;
+ const char *db_name= s ? s->db.str : NULL;
+ const char *table_name= s ? s->table_name.str : NULL;
+
+ if (!db_name)
+ db_name= "";
+ if (!table_name)
+ table_name= "";
+ cs->cset->snprintf(cs, buff, sizeof(buff),
+ ER_THD(this, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
+ type_str, val, db_name, table_name, name,
+ (ulong) get_stmt_da()->current_row_for_warning());
+ push_warning(this, level, ER_TRUNCATED_WRONG_VALUE, buff);
+
+ }
+ void push_warning_wrong_or_truncated_value(Sql_condition::enum_warning_level level,
+ bool totally_useless_value,
+ const char *type_str,
+ const char *val,
+ const TABLE_SHARE *s,
+ const char *field_name)
+ {
+ if (field_name)
+ push_warning_truncated_value_for_field(level, type_str, val,
+ s, field_name);
+ else if (totally_useless_value)
+ push_warning_wrong_value(level, type_str, val);
+ else
+ push_warning_truncated_wrong_value(level, type_str, val);
+ }
+
public:
/** Overloaded to guard query/query_length fields */
virtual void set_statement(Statement *stmt);
@@ -4424,6 +4498,13 @@ public:
void set_query_id(query_id_t new_query_id)
{
query_id= new_query_id;
+#ifdef WITH_WSREP
+ if (WSREP_NNULL(this))
+ {
+ set_wsrep_next_trx_id(query_id);
+ WSREP_DEBUG("assigned new next trx id: %" PRIu64, wsrep_next_trx_id());
+ }
+#endif /* WITH_WSREP */
}
void set_open_tables(TABLE *open_tables_arg)
{
@@ -4606,12 +4687,10 @@ public:
};
bool has_thd_temporary_tables();
- TABLE *create_and_open_tmp_table(handlerton *hton,
- LEX_CUSTRING *frm,
+ TABLE *create_and_open_tmp_table(LEX_CUSTRING *frm,
const char *path,
const char *db,
const char *table_name,
- bool open_in_engine,
bool open_internal_tables);
TABLE *find_temporary_table(const char *db, const char *table_name,
@@ -4648,13 +4727,12 @@ private:
bool has_temporary_tables();
uint create_tmp_table_def_key(char *key, const char *db,
const char *table_name);
- TMP_TABLE_SHARE *create_temporary_table(handlerton *hton, LEX_CUSTRING *frm,
+ TMP_TABLE_SHARE *create_temporary_table(LEX_CUSTRING *frm,
const char *path, const char *db,
const char *table_name);
TABLE *find_temporary_table(const char *key, uint key_length,
Temporary_table_state state);
- TABLE *open_temporary_table(TMP_TABLE_SHARE *share, const char *alias,
- bool open_in_engine);
+ TABLE *open_temporary_table(TMP_TABLE_SHARE *share, const char *alias);
bool find_and_use_tmp_table(const TABLE_LIST *tl, TABLE **out_table);
bool use_temporary_table(TABLE *table, TABLE **out_table);
void close_temporary_table(TABLE *table);
@@ -4677,62 +4755,135 @@ private:
}
public:
+#ifdef HAVE_REPLICATION
+ /*
+ If we do a purge of binary logs, log index info of the threads
+ that are currently reading it needs to be adjusted. To do that
+ each thread that is using LOG_INFO needs to adjust the pointer to it
+ */
+ LOG_INFO *current_linfo;
+ Slave_info *slave_info;
+
+ void set_current_linfo(LOG_INFO *linfo);
+ void reset_current_linfo() { set_current_linfo(0); }
+
+ int register_slave(uchar *packet, size_t packet_length);
+ void unregister_slave();
+ bool is_binlog_dump_thread();
+#endif
+
inline ulong wsrep_binlog_format() const
{
- return WSREP_FORMAT(variables.binlog_format);
+ return WSREP_BINLOG_FORMAT(variables.binlog_format);
}
#ifdef WITH_WSREP
- const bool wsrep_applier; /* dedicated slave applier thread */
+ bool wsrep_applier; /* dedicated slave applier thread */
bool wsrep_applier_closing; /* applier marked to close */
bool wsrep_client_thread; /* to identify client threads*/
- bool wsrep_PA_safe;
- bool wsrep_converted_lock_session;
- bool wsrep_apply_toi; /* applier processing in TOI */
- enum wsrep_exec_mode wsrep_exec_mode;
query_id_t wsrep_last_query_id;
- enum wsrep_query_state wsrep_query_state;
- enum wsrep_conflict_state wsrep_conflict_state;
- wsrep_trx_meta_t wsrep_trx_meta;
+ XID wsrep_xid;
+
+ /** This flag denotes that record locking should be skipped during INSERT
+ and gap locking during SELECT. Only used by the streaming replication thread
+ that only modifies the wsrep_schema.SR table. */
+ my_bool wsrep_skip_locking;
+
+ mysql_cond_t COND_wsrep_thd;
+
+ // changed from wsrep_seqno_t to wsrep_trx_meta_t in wsrep API rev 75
uint32 wsrep_rand;
- Relay_log_info *wsrep_rli;
rpl_group_info *wsrep_rgi;
- wsrep_ws_handle_t wsrep_ws_handle;
+ bool wsrep_converted_lock_session;
+ char wsrep_info[128]; /* string for dynamic proc info */
ulong wsrep_retry_counter; // of autocommit
- char *wsrep_retry_query;
+ bool wsrep_PA_safe;
+ char* wsrep_retry_query;
size_t wsrep_retry_query_len;
enum enum_server_command wsrep_retry_command;
- enum wsrep_consistency_check_mode
+ enum wsrep_consistency_check_mode
wsrep_consistency_check;
+ std::vector<wsrep::provider::status_variable> wsrep_status_vars;
int wsrep_mysql_replicated;
- const char *wsrep_TOI_pre_query; /* a query to apply before
- the actual TOI query */
+ const char* wsrep_TOI_pre_query; /* a query to apply before
+ the actual TOI query */
size_t wsrep_TOI_pre_query_len;
wsrep_po_handle_t wsrep_po_handle;
size_t wsrep_po_cnt;
#ifdef GTID_SUPPORT
+ my_bool wsrep_po_in_trans;
rpl_sid wsrep_po_sid;
-#endif /* GTID_SUPPORT */
+#endif /* GTID_SUPPORT */
void *wsrep_apply_format;
- char wsrep_info[128]; /* string for dynamic proc info */
+ bool wsrep_apply_toi; /* applier processing in TOI */
+ uchar* wsrep_rbr_buf;
+ wsrep_gtid_t wsrep_sync_wait_gtid;
+ // wsrep_gtid_t wsrep_last_written_gtid;
+ ulong wsrep_affected_rows;
+ bool wsrep_has_ignored_error;
+ bool wsrep_replicate_GTID;
+
/*
When enabled, do not replicate/binlog updates from the current table that's
being processed. At the moment, it is used to keep mysql.gtid_slave_pos
table updates from being replicated to other nodes via galera replication.
*/
bool wsrep_ignore_table;
- wsrep_gtid_t wsrep_sync_wait_gtid;
- ulong wsrep_affected_rows;
- bool wsrep_replicate_GTID;
- bool wsrep_skip_wsrep_GTID;
- /* This flag is set when innodb do an intermediate commit to
- processing the LOAD DATA INFILE statement by splitting it into 10K
- rows chunks. If flag is set, then binlog rotation is not performed
- while intermediate transaction try to commit, because in this case
- rotation causes unregistration of innodb handler. Later innodb handler
- registered again, but replication of last chunk of rows is skipped
- by the innodb engine: */
- bool wsrep_split_flag;
+
+
+ /*
+ Transaction id:
+ * m_wsrep_next_trx_id is assigned on the first query after
+ wsrep_next_trx_id() return WSREP_UNDEFINED_TRX_ID
+ * Each storage engine must assign value of wsrep_next_trx_id()
+ when the transaction starts.
+ * Effective transaction id is returned via wsrep_trx_id()
+ */
+ /*
+ Return effective transaction id
+ */
+ wsrep_trx_id_t wsrep_trx_id() const
+ {
+ return m_wsrep_client_state.transaction().id().get();
+ }
+
+
+ /*
+ Set next trx id
+ */
+ void set_wsrep_next_trx_id(query_id_t query_id)
+ {
+ m_wsrep_next_trx_id = (wsrep_trx_id_t) query_id;
+ }
+ /*
+ Return next trx id
+ */
+ wsrep_trx_id_t wsrep_next_trx_id() const
+ {
+ return m_wsrep_next_trx_id;
+ }
+
+private:
+ wsrep_trx_id_t m_wsrep_next_trx_id; /* cast from query_id_t */
+ /* wsrep-lib */
+ Wsrep_mutex m_wsrep_mutex;
+ Wsrep_condition_variable m_wsrep_cond;
+ Wsrep_client_service m_wsrep_client_service;
+ Wsrep_client_state m_wsrep_client_state;
+
+public:
+ Wsrep_client_state& wsrep_cs() { return m_wsrep_client_state; }
+ const Wsrep_client_state& wsrep_cs() const { return m_wsrep_client_state; }
+ const wsrep::transaction& wsrep_trx() const
+ { return m_wsrep_client_state.transaction(); }
+ const wsrep::streaming_context& wsrep_sr() const
+ { return m_wsrep_client_state.transaction().streaming_context(); }
+ /* Pointer to applier service for streaming THDs. This is needed to
+ be able to delete applier service object in case of background
+ rollback. */
+ Wsrep_applier_service* wsrep_applier_service;
+ /* wait_for_commit struct for binlog group commit */
+ wait_for_commit wsrep_wfc;
#endif /* WITH_WSREP */
/* Handling of timeouts for commands */
@@ -4779,18 +4930,6 @@ public:
(THD_TRANS::DID_WAIT | THD_TRANS::CREATED_TEMP_TABLE |
THD_TRANS::DROPPED_TEMP_TABLE | THD_TRANS::DID_DDL));
}
- /*
- Reset current_linfo
- Setting current_linfo to 0 needs to be done with LOCK_thread_count to
- ensure that adjust_linfo_offsets doesn't use a structure that may
- be deleted.
- */
- inline void reset_current_linfo()
- {
- mysql_mutex_lock(&LOCK_thread_count);
- current_linfo= 0;
- mysql_mutex_unlock(&LOCK_thread_count);
- }
uint get_net_wait_timeout()
@@ -4844,27 +4983,6 @@ public:
bool sp_eval_expr(Field *result_field, Item **expr_item_ptr);
};
-inline void add_to_active_threads(THD *thd)
-{
- mysql_mutex_lock(&LOCK_thread_count);
- threads.append(thd);
- mysql_mutex_unlock(&LOCK_thread_count);
-}
-
-/*
- This should be called when you want to delete a thd that was not
- running any queries.
- This function will assert that the THD is linked.
-*/
-
-inline void unlink_not_visible_thd(THD *thd)
-{
- thd->assert_linked();
- mysql_mutex_lock(&LOCK_thread_count);
- thd->unlink();
- mysql_mutex_unlock(&LOCK_thread_count);
-}
-
/** A short cut for thd->get_stmt_da()->set_ok_status(). */
inline void
@@ -4898,10 +5016,18 @@ my_eof(THD *thd)
(A)->variables.sql_log_bin_off= 0;}
-inline sql_mode_t sql_mode_for_dates(THD *thd)
+inline date_conv_mode_t sql_mode_for_dates(THD *thd)
{
- return thd->variables.sql_mode &
- (MODE_NO_ZERO_DATE | MODE_NO_ZERO_IN_DATE | MODE_INVALID_DATES);
+ static_assert((date_conv_mode_t::KNOWN_MODES &
+ time_round_mode_t::KNOWN_MODES) == 0,
+ "date_conv_mode_t and time_round_mode_t must use different "
+ "bit values");
+ static_assert(MODE_NO_ZERO_DATE == date_mode_t::NO_ZERO_DATE &&
+ MODE_NO_ZERO_IN_DATE == date_mode_t::NO_ZERO_IN_DATE &&
+ MODE_INVALID_DATES == date_mode_t::INVALID_DATES,
+ "sql_mode_t and date_mode_t values must be equal");
+ return date_conv_mode_t(thd->variables.sql_mode &
+ (MODE_NO_ZERO_DATE | MODE_NO_ZERO_IN_DATE | MODE_INVALID_DATES));
}
/*
@@ -6041,6 +6167,10 @@ class multi_delete :public select_result_interceptor
bool error_handled;
public:
+ // Methods used by ColumnStore
+ uint get_num_of_tables() const { return num_of_tables; }
+ TABLE_LIST* get_tables() const { return delete_tables; }
+public:
multi_delete(THD *thd_arg, TABLE_LIST *dt, uint num_of_tables);
~multi_delete();
int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
@@ -6220,7 +6350,7 @@ public:
be rolled back or that do not expect any previously metadata
locked tables.
*/
-#define CF_IMPLICT_COMMIT_BEGIN (1U << 6)
+#define CF_IMPLICIT_COMMIT_BEGIN (1U << 6)
/**
Implicitly commit after the SQL statement.
@@ -6238,7 +6368,7 @@ public:
before and after every DDL statement and any statement that
modifies our currently non-transactional system tables.
*/
-#define CF_AUTO_COMMIT_TRANS (CF_IMPLICT_COMMIT_BEGIN | CF_IMPLICIT_COMMIT_END)
+#define CF_AUTO_COMMIT_TRANS (CF_IMPLICIT_COMMIT_BEGIN | CF_IMPLICIT_COMMIT_END)
/**
Diagnostic statement.
@@ -6319,6 +6449,14 @@ public:
*/
#define CF_DB_CHANGE (1U << 23)
+#ifdef WITH_WSREP
+/**
+ DDL statement that may be subject to error filtering.
+*/
+#define CF_WSREP_MAY_IGNORE_ERRORS (1U << 24)
+#endif /* WITH_WSREP */
+
+
/* Bits in server_command_flags */
/**
@@ -6353,7 +6491,8 @@ public:
inline bool add_item_to_list(THD *thd, Item *item)
{
- return thd->lex->current_select->add_item_to_list(thd, item);
+ bool res= thd->lex->current_select->add_item_to_list(thd, item);
+ return res;
}
inline bool add_value_to_list(THD *thd, Item *value)
@@ -6759,5 +6898,85 @@ private:
THD *thd;
};
+
+/** THD registry */
+class THD_list
+{
+ I_List<THD> threads;
+ mutable mysql_rwlock_t lock;
+
+public:
+ /**
+ Constructor replacement.
+
+ Unfortunately we can't use fair constructor to initialize mutex
+ for two reasons: PFS and embedded. The former can probably be fixed,
+ the latter can probably be dropped.
+ */
+ void init()
+ {
+ mysql_rwlock_init(key_rwlock_THD_list, &lock);
+ }
+
+ /** Destructor replacement. */
+ void destroy()
+ {
+ mysql_rwlock_destroy(&lock);
+ }
+
+ /**
+ Inserts thread to registry.
+
+ @param thd thread
+
+ Thread becomes accessible via server_threads.
+ */
+ void insert(THD *thd)
+ {
+ mysql_rwlock_wrlock(&lock);
+ threads.append(thd);
+ mysql_rwlock_unlock(&lock);
+ }
+
+ /**
+ Removes thread from registry.
+
+ @param thd thread
+
+ Thread becomes not accessible via server_threads.
+ */
+ void erase(THD *thd)
+ {
+ thd->assert_linked();
+ mysql_rwlock_wrlock(&lock);
+ thd->unlink();
+ mysql_rwlock_unlock(&lock);
+ }
+
+ /**
+ Iterates registered threads.
+
+ @param action called for every element
+ @param argument opque argument passed to action
+
+ @return
+ @retval 0 iteration completed successfully
+ @retval 1 iteration was interrupted (action returned 1)
+ */
+ template <typename T> int iterate(my_bool (*action)(THD *thd, T *arg), T *arg= 0)
+ {
+ int res= 0;
+ mysql_rwlock_rdlock(&lock);
+ I_List_iterator<THD> it(threads);
+ while (auto tmp= it++)
+ if ((res= action(tmp, arg)))
+ break;
+ mysql_rwlock_unlock(&lock);
+ return res;
+ }
+};
+
+extern THD_list server_threads;
+
#endif /* MYSQL_SERVER */
#endif /* SQL_CLASS_INCLUDED */
diff --git a/sql/sql_cmd.h b/sql/sql_cmd.h
index 017dbca3e5e..8ff26b09015 100644
--- a/sql/sql_cmd.h
+++ b/sql/sql_cmd.h
@@ -108,6 +108,7 @@ enum enum_sql_command {
SQLCOM_SHOW_STATUS_PACKAGE,
SQLCOM_SHOW_STATUS_PACKAGE_BODY,
SQLCOM_SHOW_PACKAGE_BODY_CODE,
+ SQLCOM_BACKUP, SQLCOM_BACKUP_LOCK,
/*
When a command is added here, be sure it's also added in mysqld.cc
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 21cd164e2a8..6ce2aeb095c 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -37,7 +37,11 @@
// reset_host_errors
#include "sql_acl.h" // acl_getroot, NO_ACCESS, SUPER_ACL
#include "sql_callback.h"
+
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h" /* wsrep open/close */
#include "wsrep_mysqld.h"
+#endif /* WITH_WSREP */
#include "proxy_protocol.h"
HASH global_user_stats, global_client_stats, global_table_stats;
@@ -1178,17 +1182,6 @@ exit:
void end_connection(THD *thd)
{
NET *net= &thd->net;
-#ifdef WITH_WSREP
- if (WSREP(thd))
- {
- wsrep_status_t rcode= wsrep->free_connection(wsrep, thd->thread_id);
- if (rcode) {
- WSREP_WARN("wsrep failed to free connection context: %lld code: %d",
- (longlong) thd->thread_id, rcode);
- }
- }
- thd->wsrep_client_thread= 0;
-#endif
if (thd->user_connect)
{
@@ -1322,7 +1315,7 @@ bool thd_prepare_connection(THD *thd)
prepare_new_connection_state(thd);
#ifdef WITH_WSREP
- thd->wsrep_client_thread= 1;
+ thd->wsrep_client_thread= true;
#endif /* WITH_WSREP */
return FALSE;
}
@@ -1367,7 +1360,7 @@ void do_handle_one_connection(CONNECT *connect)
delete connect;
/* Make THD visible in show processlist */
- add_to_active_threads(thd);
+ server_threads.insert(thd);
thd->thr_create_utime= thr_create_utime;
/* We need to set this because of time_out_user_resource_limits */
@@ -1395,6 +1388,9 @@ void do_handle_one_connection(CONNECT *connect)
create_user= FALSE;
goto end_thread;
}
+#ifdef WITH_WSREP
+ wsrep_open(thd);
+#endif /* WITH_WSREP */
while (thd_is_connection_alive(thd))
{
@@ -1405,13 +1401,9 @@ void do_handle_one_connection(CONNECT *connect)
end_connection(thd);
#ifdef WITH_WSREP
- if (WSREP(thd))
- {
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_query_state= QUERY_EXITING;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif
+ wsrep_close(thd);
+#endif /* WITH_WSREP */
+
end_thread:
close_connection(thd);
diff --git a/sql/sql_const.h b/sql/sql_const.h
index 82f3b9c21f2..d4e40cd551e 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -204,7 +204,11 @@
instead of reading with keys. The number says how many evaluation of the
WHERE clause is comparable to reading one extra row from a table.
*/
-#define TIME_FOR_COMPARE 5 // 5 compares == one read
+#define TIME_FOR_COMPARE 5 // 5 compares == one read
+#define TIME_FOR_COMPARE_IDX 20
+
+#define IDX_BLOCK_COPY_COST ((double) 1 / TIME_FOR_COMPARE)
+#define IDX_LOOKUP_COST ((double) 1 / 8)
/**
Number of comparisons of table rowids equivalent to reading one row from a
diff --git a/sql/sql_cte.cc b/sql/sql_cte.cc
index 247d7e5a3d1..6f5162b645b 100644
--- a/sql/sql_cte.cc
+++ b/sql/sql_cte.cc
@@ -55,6 +55,14 @@ bool With_clause::add_with_element(With_element *elem)
}
+void st_select_lex_unit::set_with_clause(With_clause *with_cl)
+{
+ with_clause= with_cl;
+ if (with_clause)
+ with_clause->set_owner(this);
+}
+
+
/**
@brief
Check dependencies between tables defined in a list of with clauses
@@ -682,7 +690,7 @@ void With_element::move_anchors_ahead()
{
st_select_lex *next_sl;
st_select_lex *new_pos= spec->first_select();
- new_pos->linkage= UNION_TYPE;
+ new_pos->set_linkage(UNION_TYPE);
for (st_select_lex *sl= new_pos; sl; sl= next_sl)
{
next_sl= sl->next_select();
@@ -691,9 +699,9 @@ void With_element::move_anchors_ahead()
sl->move_node(new_pos);
if (new_pos == spec->first_select())
{
- enum sub_select_type type= new_pos->linkage;
- new_pos->linkage= sl->linkage;
- sl->linkage= type;
+ enum sub_select_type type= new_pos->get_linkage();
+ new_pos->set_linkage(sl->get_linkage());
+ sl->set_linkage(type);
new_pos->with_all_modifier= sl->with_all_modifier;
sl->with_all_modifier= false;
}
@@ -706,7 +714,7 @@ void With_element::move_anchors_ahead()
}
}
first_recursive= new_pos;
- spec->first_select()->linkage= DERIVED_TABLE_TYPE;
+ spec->first_select()->set_linkage(DERIVED_TABLE_TYPE);
}
@@ -773,7 +781,7 @@ bool With_element::set_unparsed_spec(THD *thd, char *spec_start, char *spec_end,
if (!unparsed_spec.str)
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL),
static_cast<int>(unparsed_spec.length));
return true;
}
@@ -856,10 +864,9 @@ st_select_lex_unit *With_element::clone_parsed_spec(THD *thd,
lex->sp_chistics= old_lex->sp_chistics;
lex->stmt_lex= old_lex;
- with_select= &lex->select_lex;
- with_select->select_number= ++thd->lex->stmt_lex->current_select_number;
parse_status= parse_sql(thd, &parser_state, 0);
((char*) &unparsed_spec.str[unparsed_spec.length])[0]= save_end;
+ with_select= lex->first_select_lex();
if (parse_status)
goto err;
@@ -1012,7 +1019,7 @@ bool With_element::prepare_unreferenced(THD *thd)
rename_columns_of_derived_unit(thd, spec) ||
check_duplicate_names(thd, first_sl->item_list, 1)))
rc= true;
-
+
thd->lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
return rc;
}
@@ -1124,7 +1131,8 @@ bool TABLE_LIST::set_as_with_table(THD *thd, With_element *with_elem)
if(!(derived= with_elem->clone_parsed_spec(thd, this)))
return true;
}
- derived->first_select()->linkage= DERIVED_TABLE_TYPE;
+ derived->first_select()->set_linkage(DERIVED_TABLE_TYPE);
+ select_lex->add_statistics(derived);
with_elem->inc_references();
return false;
}
diff --git a/sql/sql_cte.h b/sql/sql_cte.h
index bda62271649..03c697bf746 100644
--- a/sql/sql_cte.h
+++ b/sql/sql_cte.h
@@ -292,8 +292,7 @@ private:
*/
With_clause *next_with_clause;
/* Set to true if dependencies between with elements have been checked */
- bool dependencies_are_checked;
-
+ bool dependencies_are_checked;
/*
The bitmap of all recursive with elements whose specifications
are not complied with restrictions imposed by the SQL standards
@@ -317,9 +316,8 @@ public:
bool with_recursive;
With_clause(bool recursive_fl, With_clause *emb_with_clause)
- : owner(NULL),
- embedding_with_clause(emb_with_clause), next_with_clause(NULL),
- dependencies_are_checked(false), unrestricted(0),
+ : owner(NULL), embedding_with_clause(emb_with_clause),
+ next_with_clause(NULL), dependencies_are_checked(false), unrestricted(0),
with_prepared_anchor(0), cleaned(0), stabilized(0),
with_recursive(recursive_fl)
{ }
@@ -333,8 +331,12 @@ public:
last_next= &this->next_with_clause;
}
+ st_select_lex_unit *get_owner() { return owner; }
+
void set_owner(st_select_lex_unit *unit) { owner= unit; }
+ void attach_to(st_select_lex *select_lex);
+
With_clause *pop() { return embedding_with_clause; }
bool check_dependencies();
@@ -367,7 +369,6 @@ bool With_element::is_unrestricted()
}
inline
-
bool With_element::is_with_prepared_anchor()
{
return owner->with_prepared_anchor & get_elem_map();
@@ -449,11 +450,14 @@ void With_element::prepare_for_next_iteration()
inline
-void st_select_lex_unit::set_with_clause(With_clause *with_cl)
-{
- with_clause= with_cl;
- if (with_clause)
- with_clause->set_owner(this);
+void With_clause::attach_to(st_select_lex *select_lex)
+{
+ for (With_element *with_elem= with_list.first;
+ with_elem;
+ with_elem= with_elem->next)
+ {
+ select_lex->register_unit(with_elem->spec, NULL);
+ }
}
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index cce0bdadedb..61d8f12deaa 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -882,7 +882,7 @@ mysql_rm_db_internal(THD *thd, const LEX_CSTRING *db, bool if_exists, bool silen
lock_db_routines(thd, dbnorm))
goto exit;
- if (!in_bootstrap && !rm_mysql_schema)
+ if (!rm_mysql_schema)
{
for (table= tables; table; table= table->next_local)
{
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 7b0572dbc78..d2e511b1d86 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -245,6 +245,50 @@ static bool record_should_be_deleted(THD *thd, TABLE *table, SQL_SELECT *sel,
return false;
}
+static
+int update_portion_of_time(THD *thd, TABLE *table,
+ const vers_select_conds_t &period_conds,
+ bool *inside_period)
+{
+ bool lcond= period_conds.field_start->val_datetime_packed(thd)
+ < period_conds.start.item->val_datetime_packed(thd);
+ bool rcond= period_conds.field_end->val_datetime_packed(thd)
+ > period_conds.end.item->val_datetime_packed(thd);
+
+ *inside_period= !lcond && !rcond;
+ if (*inside_period)
+ return 0;
+
+ DBUG_ASSERT(!table->triggers
+ || !table->triggers->has_triggers(TRG_EVENT_INSERT,
+ TRG_ACTION_BEFORE));
+
+ int res= 0;
+ Item *src= lcond ? period_conds.start.item : period_conds.end.item;
+ uint dst_fieldno= lcond ? table->s->period.end_fieldno
+ : table->s->period.start_fieldno;
+
+ store_record(table, record[1]);
+ if (likely(!res))
+ res= src->save_in_field(table->field[dst_fieldno], true);
+
+ if (likely(!res))
+ res= table->update_generated_fields();
+
+ if(likely(!res))
+ res= table->file->ha_update_row(table->record[1], table->record[0]);
+
+ if (likely(!res) && table->triggers)
+ res= table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
+ TRG_ACTION_AFTER, true);
+ restore_record(table, record[1]);
+
+ if (likely(!res) && lcond && rcond)
+ res= table->period_make_insert(period_conds.end.item,
+ table->field[table->s->period.start_fieldno]);
+
+ return res;
+}
inline
int TABLE::delete_row()
@@ -287,10 +331,10 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
bool return_error= 0;
ha_rows deleted= 0;
bool reverse= FALSE;
- bool has_triggers;
+ bool has_triggers= false;
ORDER *order= (ORDER *) ((order_list && order_list->elements) ?
order_list->first : NULL);
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
killed_state killed_status= NOT_KILLED;
THD::enum_binlog_query_type query_type= THD::ROW_QUERY_TYPE;
bool binlog_is_row;
@@ -298,7 +342,9 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
Explain_delete *explain;
Delete_plan query_plan(thd->mem_root);
Unique * deltempfile= NULL;
- bool delete_record, delete_while_scanning;
+ bool delete_record= false;
+ bool delete_while_scanning;
+ bool portion_of_time_through_update;
DBUG_ENTER("mysql_delete");
query_plan.index= MAX_KEY;
@@ -313,6 +359,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
bool truncate_history= table_list->vers_conditions.is_set();
if (truncate_history)
{
+ DBUG_ASSERT(!table_list->period_conditions.is_set());
+
if (table_list->is_view_or_derived())
{
my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
@@ -320,7 +368,6 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
}
DBUG_ASSERT(table_list->table);
-
DBUG_ASSERT(!conds || thd->stmt_arena->is_stmt_execute());
// conds could be cached from previous SP call
@@ -351,7 +398,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
DBUG_RETURN(TRUE);
}
table->map=1;
- query_plan.select_lex= &thd->lex->select_lex;
+ query_plan.select_lex= thd->lex->first_select_lex();
query_plan.table= table;
query_plan.updating_a_view= MY_TEST(table_list->view);
@@ -383,7 +430,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
setup_order(thd, select_lex->ref_pointer_array, &tables,
fields, all_fields, order))
{
- free_underlaid_joins(thd, &thd->lex->select_lex);
+ free_underlaid_joins(thd, thd->lex->first_select_lex());
DBUG_RETURN(TRUE);
}
}
@@ -424,12 +471,12 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
- there should be no delete triggers associated with the table.
*/
- has_triggers= (table->triggers &&
- table->triggers->has_delete_triggers());
+ has_triggers= table->triggers && table->triggers->has_delete_triggers();
+
if (!with_select && !using_limit && const_cond_result &&
(!thd->is_current_stmt_binlog_format_row() &&
!has_triggers)
- && !table->versioned(VERS_TIMESTAMP))
+ && !table->versioned(VERS_TIMESTAMP) && !table_list->has_period())
{
/* Update the table->file->stats.records number */
table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
@@ -599,7 +646,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
*/
if ((table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) &&
- !has_triggers && !binlog_is_row && !with_select)
+ !has_triggers && !binlog_is_row && !with_select &&
+ !table_list->has_period())
{
table->mark_columns_needed_for_delete();
if (!table->check_virtual_columns_marked_for_read())
@@ -669,7 +717,15 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
if (unlikely(init_ftfuncs(thd, select_lex, 1)))
goto got_error;
- table->mark_columns_needed_for_delete();
+ if (table_list->has_period())
+ {
+ table->use_all_columns();
+ table->rpl_write_set= table->write_set;
+ }
+ else
+ {
+ table->mark_columns_needed_for_delete();
+ }
if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_DELETE) &&
!table->prepare_triggers_for_delete_stmt_or_event())
@@ -726,6 +782,24 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
delete_record= true;
}
+ /*
+ From SQL2016, Part 2, 15.7 <Effect of deleting rows from base table>,
+ General Rules, 8), we can conclude that DELETE FOR PORTTION OF time performs
+ 0-2 INSERTS + DELETE. We can substitute INSERT+DELETE with one UPDATE, with
+ a condition of no side effects. The side effect is possible if there is a
+ BEFORE INSERT trigger, since it is the only one splitting DELETE and INSERT
+ operations.
+ Another possible side effect is related to tables of non-transactional
+ engines, since UPDATE is anyway atomic, and DELETE+INSERT is not.
+
+ This optimization is not possible for system-versioned table.
+ */
+ portion_of_time_through_update=
+ !(table->triggers && table->triggers->has_triggers(TRG_EVENT_INSERT,
+ TRG_ACTION_BEFORE))
+ && !table->versioned()
+ && table->file->has_transactions();
+
THD_STAGE_INFO(thd, stage_updating);
while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
likely(!thd->is_error()))
@@ -749,7 +823,25 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
break;
}
- error= table->delete_row();
+ if (table_list->has_period() && portion_of_time_through_update)
+ {
+ bool need_delete= true;
+ error= update_portion_of_time(thd, table, table_list->period_conditions,
+ &need_delete);
+ if (likely(!error) && need_delete)
+ error= table->delete_row();
+ }
+ else
+ {
+ error= table->delete_row();
+
+ ha_rows rows_inserted;
+ if (likely(!error) && table_list->has_period()
+ && !portion_of_time_through_update)
+ error= table->insert_portion_of_time(thd, table_list->period_conditions,
+ &rows_inserted);
+ }
+
if (likely(!error))
{
deleted++;
@@ -769,7 +861,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
else
{
table->file->print_error(error,
- MYF(thd->lex->ignore ? ME_JUST_WARNING : 0));
+ MYF(thd->lex->ignore ? ME_WARNING : 0));
if (thd->is_error())
{
error= 1;
@@ -799,6 +891,8 @@ terminate_delete:
}
THD_STAGE_INFO(thd, stage_end);
end_read_record(&info);
+ if (table_list->has_period())
+ table->file->ha_release_auto_increment();
if (options & OPTION_QUICK)
(void) table->file->extra(HA_EXTRA_NORMAL);
ANALYZE_STOP_TRACKING(&explain->command_tracker);
@@ -930,14 +1024,16 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list,
bool *delete_while_scanning)
{
Item *fake_conds= 0;
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
DBUG_ENTER("mysql_prepare_delete");
List<Item> all_fields;
*delete_while_scanning= true;
thd->lex->allow_sum_func.clear_all();
- if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
+ if (setup_tables_and_check_access(thd,
+ &thd->lex->first_select_lex()->context,
+ &thd->lex->first_select_lex()->
+ top_join_list,
table_list,
select_lex->leaf_tables, FALSE,
DELETE_ACL, SELECT_ACL, TRUE))
@@ -952,6 +1048,20 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list,
if (select_lex->vers_setup_conds(thd, table_list))
DBUG_RETURN(true);
}
+
+ if (table_list->has_period())
+ {
+ if (table_list->is_view_or_derived())
+ {
+ my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
+ DBUG_RETURN(true);
+ }
+
+ *conds= select_lex->period_setup_conds(thd, table_list, *conds);
+ if (!*conds)
+ DBUG_RETURN(true);
+ }
+
if ((wild_num && setup_wild(thd, table_list, field_list, NULL, wild_num,
&select_lex->hidden_bit_fields)) ||
setup_fields(thd, Ref_ptr_array(),
@@ -966,7 +1076,13 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
}
- if (unique_table(thd, table_list, table_list->next_global, 0))
+ /*
+ Application-time periods: if FOR PORTION OF ... syntax used, DELETE
+ statement could issue delete_row's mixed with write_row's. This causes
+ problems for myisam and corrupts table, if deleting while scanning.
+ */
+ if (table_list->has_period()
+ || unique_table(thd, table_list, table_list->next_global, 0))
*delete_while_scanning= false;
if (select_lex->inner_refs_list.elements &&
@@ -1020,21 +1136,23 @@ int mysql_multi_delete_prepare(THD *thd)
lex->query_tables also point on local list of DELETE SELECT_LEX
*/
- if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
+ if (setup_tables_and_check_access(thd,
+ &thd->lex->first_select_lex()->context,
+ &thd->lex->first_select_lex()->
+ top_join_list,
lex->query_tables,
- lex->select_lex.leaf_tables, FALSE,
- DELETE_ACL, SELECT_ACL, FALSE))
+ lex->first_select_lex()->leaf_tables,
+ FALSE, DELETE_ACL, SELECT_ACL, FALSE))
DBUG_RETURN(TRUE);
- if (lex->select_lex.handle_derived(thd->lex, DT_MERGE))
+ if (lex->first_select_lex()->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
/*
Multi-delete can't be constructed over-union => we always have
single SELECT on top and have to check underlying SELECTs of it
*/
- lex->select_lex.exclude_from_table_unique_test= TRUE;
+ lex->first_select_lex()->exclude_from_table_unique_test= TRUE;
/* Fix tables-to-be-deleted-from list to point at opened tables */
for (target_tbl= (TABLE_LIST*) aux_tables;
target_tbl;
@@ -1076,8 +1194,8 @@ int mysql_multi_delete_prepare(THD *thd)
Reset the exclude flag to false so it doesn't interfare
with further calls to unique_table
*/
- lex->select_lex.exclude_from_table_unique_test= FALSE;
-
+ lex->first_select_lex()->exclude_from_table_unique_test= FALSE;
+
if (lex->save_prep_leaf_tables())
DBUG_RETURN(TRUE);
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 960ee948cf7..102999c42d7 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -27,23 +27,26 @@
#include "unireg.h"
#include "sql_derived.h"
#include "sql_select.h"
+#include "derived_handler.h"
#include "sql_base.h"
#include "sql_view.h" // check_duplicate_names
#include "sql_acl.h" // SELECT_ACL
#include "sql_class.h"
#include "sql_cte.h"
+#include "my_json_writer.h"
+#include "opt_trace.h"
typedef bool (*dt_processor)(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived);
-bool mysql_derived_merge_for_insert(THD *thd, LEX *lex, TABLE_LIST *derived);
-
+static bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived);
+static bool mysql_derived_merge_for_insert(THD *thd, LEX *lex,
+ TABLE_LIST *derived);
dt_processor processors[]=
{
@@ -100,7 +103,8 @@ mysql_handle_derived(LEX *lex, uint phases)
processed normally.
*/
if (phases == DT_MERGE_FOR_INSERT &&
- cursor && cursor->top_table()->select_lex != &lex->select_lex)
+ cursor && (cursor->top_table()->select_lex !=
+ lex->first_select_lex()))
continue;
for (;
cursor && !res;
@@ -179,7 +183,10 @@ mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases)
if (!lex->derived_tables)
DBUG_RETURN(FALSE);
- derived->select_lex->changed_elements|= TOUCHED_SEL_DERIVED;
+ if (derived->select_lex)
+ derived->select_lex->changed_elements|= TOUCHED_SEL_DERIVED;
+ else
+ DBUG_ASSERT(derived->prelocking_placeholder);
lex->thd->derived_tables_processing= TRUE;
for (uint phase= 0; phase < DT_PHASES; phase++)
@@ -199,6 +206,7 @@ mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases)
if ((res= (*processors[phase])(lex->thd, lex, derived)))
break;
}
+
lex->thd->derived_tables_processing= FALSE;
DBUG_RETURN(res);
}
@@ -327,6 +335,7 @@ mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases)
@return TRUE if an error occur.
*/
+static
bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
{
bool res= FALSE;
@@ -339,6 +348,7 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
DBUG_PRINT("enter", ("Alias: '%s' Unit: %p",
(derived->alias.str ? derived->alias.str : "<NULL>"),
derived->get_unit()));
+ const char *cause= NULL;
if (derived->merged)
{
@@ -350,14 +360,31 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
if (dt_select->uncacheable & UNCACHEABLE_RAND)
{
/* There is random function => fall back to materialization. */
+ cause= "Random function in the select";
+ if (unlikely(thd->trace_started()))
+ {
+ OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived,
+ derived->is_derived() ? "derived" : "view",
+ derived->alias.str ? derived->alias.str : "<NULL>",
+ derived->get_unit()->first_select()->select_number,
+ "materialized");
+ trace_derived.add("cause", cause);
+ }
derived->change_refs_to_fields();
derived->set_materialized_derived();
DBUG_RETURN(FALSE);
}
- if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI ||
- thd->lex->sql_command == SQLCOM_DELETE_MULTI)
- thd->save_prep_leaf_list= TRUE;
+ if (derived->dt_handler)
+ {
+ derived->change_refs_to_fields();
+ derived->set_materialized_derived();
+ DBUG_RETURN(FALSE);
+ }
+
+ if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI ||
+ thd->lex->sql_command == SQLCOM_DELETE_MULTI)
+ thd->save_prep_leaf_list= TRUE;
arena= thd->activate_stmt_arena_if_needed(&backup); // For easier test
@@ -372,15 +399,11 @@ bool mysql_derived_merge(THD *thd, LEX *lex, TABLE_LIST *derived)
and small subqueries, and the bigger one can't be merged it wouldn't
block the smaller one.
*/
- if (parent_lex->get_free_table_map(&map, &tablenr))
- {
- /* There is no enough table bits, fall back to materialization. */
- goto unconditional_materialization;
- }
-
- if (dt_select->leaf_tables.elements + tablenr > MAX_TABLES)
+ if (parent_lex->get_free_table_map(&map, &tablenr) ||
+ dt_select->leaf_tables.elements + tablenr > MAX_TABLES)
{
/* There is no enough table bits, fall back to materialization. */
+ cause= "Not enough table bits to merge subquery";
goto unconditional_materialization;
}
@@ -457,6 +480,17 @@ exit_merge:
DBUG_RETURN(res);
unconditional_materialization:
+
+ if (unlikely(thd->trace_started()))
+ {
+ OPT_TRACE_VIEWS_TRANSFORM(thd,trace_wrapper, trace_derived,
+ derived->is_derived() ? "derived" : "view",
+ derived->alias.str ? derived->alias.str : "<NULL>",
+ derived->get_unit()->first_select()->select_number,
+ "materialized");
+ trace_derived.add("cause", cause);
+ }
+
derived->change_refs_to_fields();
derived->set_materialized_derived();
if (!derived->table || !derived->table->is_created())
@@ -484,6 +518,7 @@ unconditional_materialization:
@return TRUE if an error occur.
*/
+static
bool mysql_derived_merge_for_insert(THD *thd, LEX *lex, TABLE_LIST *derived)
{
DBUG_ENTER("mysql_derived_merge_for_insert");
@@ -540,7 +575,7 @@ bool mysql_derived_merge_for_insert(THD *thd, LEX *lex, TABLE_LIST *derived)
true Error
*/
-
+static
bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived)
{
SELECT_LEX_UNIT *unit= derived->get_unit();
@@ -617,7 +652,7 @@ bool mysql_derived_init(THD *thd, LEX *lex, TABLE_LIST *derived)
true Error
*/
-
+static
bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
{
SELECT_LEX_UNIT *unit= derived->get_unit();
@@ -625,7 +660,6 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
DBUG_ENTER("mysql_derived_prepare");
DBUG_PRINT("enter", ("unit: %p table_list: %p alias: '%s'",
unit, derived, derived->alias.str));
-
if (!unit)
DBUG_RETURN(FALSE);
@@ -718,6 +752,18 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
}
}
+ if (unlikely(thd->trace_started()))
+ {
+ /*
+ Add to optimizer trace whether a derived table/view
+ is merged into the parent select or not.
+ */
+ OPT_TRACE_VIEWS_TRANSFORM(thd, trace_wrapper, trace_derived,
+ derived->is_derived() ? "derived" : "view",
+ derived->alias.str ? derived->alias.str : "<NULL>",
+ derived->get_unit()->first_select()->select_number,
+ derived->is_merged_derived() ? "merged" : "materialized");
+ }
/*
Above cascade call of prepare is important for PS protocol, but after it
is called we can check if we really need prepare for this derived
@@ -783,6 +829,24 @@ bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *derived)
if (derived->is_derived() && derived->is_merged_derived())
first_select->mark_as_belong_to_derived(derived);
+ derived->dt_handler= derived->find_derived_handler(thd);
+ if (derived->dt_handler)
+ {
+ char query_buff[4096];
+ String derived_query(query_buff, sizeof(query_buff), thd->charset());
+ derived_query.length(0);
+ derived->derived->print(&derived_query,
+ enum_query_type(QT_VIEW_INTERNAL |
+ QT_ITEM_ORIGINAL_FUNC_NULLIF |
+ QT_PARSABLE));
+ if (!thd->make_lex_string(&derived->derived_spec,
+ derived_query.ptr(), derived_query.length()))
+ {
+ delete derived->dt_handler;
+ derived->dt_handler= NULL;
+ }
+ }
+
exit:
/* Hide "Unknown column" or "Unknown function" error */
if (derived->view)
@@ -859,6 +923,7 @@ exit:
@return TRUE if an error occur.
*/
+static
bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived)
{
SELECT_LEX_UNIT *unit= derived->get_unit();
@@ -875,6 +940,18 @@ bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived)
DBUG_RETURN(FALSE);
}
+ if (derived->is_materialized_derived() && derived->dt_handler)
+ {
+ /* Create an object for execution of the query specifying the table */
+ if (!(derived->pushdown_derived=
+ new (thd->mem_root) Pushdown_derived(derived, derived->dt_handler)))
+ {
+ delete derived->dt_handler;
+ derived->dt_handler= NULL;
+ DBUG_RETURN(TRUE);
+ }
+ }
+
lex->current_select= first_select;
if (unit->is_unit_op())
@@ -903,6 +980,15 @@ bool mysql_derived_optimize(THD *thd, LEX *lex, TABLE_LIST *derived)
if (unit->optimized)
DBUG_RETURN(FALSE);
unit->optimized= TRUE;
+ if (!join)
+ {
+ /*
+ This happens when derived is used in SELECT for which
+ zer_result_cause != 0.
+ In this case join is already destroyed.
+ */
+ DBUG_RETURN(FALSE);
+ }
}
if ((res= join->optimize()))
goto err;
@@ -949,6 +1035,7 @@ err:
@return TRUE if an error occur.
*/
+static
bool mysql_derived_create(THD *thd, LEX *lex, TABLE_LIST *derived)
{
DBUG_ENTER("mysql_derived_create");
@@ -1059,7 +1146,7 @@ bool TABLE_LIST::fill_recursive(THD *thd)
@return TRUE Error
*/
-
+static
bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
{
Field_iterator_table field_iterator;
@@ -1080,6 +1167,18 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived)
SELECT_LEX *save_current_select= lex->current_select;
bool derived_recursive_is_filled= false;
+ if (derived->pushdown_derived)
+ {
+ int res;
+ if (unit->executed)
+ DBUG_RETURN(FALSE);
+ /* Execute the query that specifies the derived table by a foreign engine */
+ res= derived->pushdown_derived->execute();
+ unit->executed= true;
+ delete derived->pushdown_derived;
+ DBUG_RETURN(res);
+ }
+
if (unit->executed && !derived_is_recursive &&
(unit->uncacheable & UNCACHEABLE_DEPENDENT))
{
@@ -1189,6 +1288,7 @@ err:
@return TRUE Error
*/
+static
bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived)
{
DBUG_ENTER("mysql_derived_reinit");
@@ -1202,11 +1302,6 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived)
unit->types.empty();
/* for derived tables & PS (which can't be reset by Item_subselect) */
unit->reinit_exec_mechanism();
- for (st_select_lex *sl= unit->first_select(); sl; sl= sl->next_select())
- {
- sl->cond_pushed_into_where= NULL;
- sl->cond_pushed_into_having= NULL;
- }
unit->set_thd(thd);
DBUG_RETURN(FALSE);
}
@@ -1214,25 +1309,61 @@ bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived)
/**
@brief
- Extract the condition depended on derived table/view and pushed it there
+ Extract condition that can be pushed into a derived table/view
- @param thd The thread handle
- @param cond The condition from which to extract the pushed condition
- @param derived The reference to the derived table/view
+ @param thd the thread handle
+ @param cond current condition
+ @param derived the reference to the derived table/view
@details
- This functiom builds the most restrictive condition depending only on
- the derived table/view that can be extracted from the condition cond.
- The built condition is pushed into the having clauses of the
- selects contained in the query specifying the derived table/view.
- The function also checks for each select whether any condition depending
- only on grouping fields can be extracted from the pushed condition.
- If so, it pushes the condition over grouping fields into the where
- clause of the select.
-
- @retval
- true if an error is reported
- false otherwise
+ This function builds the most restrictive condition depending only on
+ the derived table/view (directly or indirectly through equality) that
+ can be extracted from the given condition cond and pushes it into the
+ derived table/view.
+
+ Example of the transformation:
+
+ SELECT *
+ FROM t1,
+ (
+ SELECT x,MAX(y) AS max_y
+ FROM t2
+ GROUP BY x
+ ) AS d_tab
+ WHERE d_tab.x>1 AND d_tab.max_y<30;
+
+ =>
+
+ SELECT *
+ FROM t1,
+ (
+ SELECT x,z,MAX(y) AS max_y
+ FROM t2
+ WHERE x>1
+ HAVING max_y<30
+ GROUP BY x
+ ) AS d_tab
+ WHERE d_tab.x>1 AND d_tab.max_y<30;
+
+ In details:
+ 1. Check what pushable formula can be extracted from cond
+ 2. Build a clone PC of the formula that can be extracted
+ (the clone is built only if the extracted formula is a AND subformula
+ of cond or conjunction of such subformulas)
+ Do for every select specifying derived table/view:
+ 3. If there is no HAVING clause prepare PC to be conjuncted with
+ WHERE clause of the select. Otherwise do 4-7.
+ 4. Check what formula PC_where can be extracted from PC to be pushed
+ into the WHERE clause of the select
+ 5. Build PC_where and if PC_where is a conjunct(s) of PC remove it from PC
+ getting PC_having
+ 6. Prepare PC_where to be conjuncted with the WHERE clause of the select
+ 7. Prepare PC_having to be conjuncted with the HAVING clause of the select
+ @note
+ This method is similar to pushdown_cond_for_in_subquery()
+
+ @retval TRUE if an error occurs
+ @retval FALSE otherwise
*/
bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived)
@@ -1272,63 +1403,25 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived)
if (!some_select_allows_cond_pushdown)
DBUG_RETURN(false);
- /*
- Build the most restrictive condition extractable from 'cond'
- that can be pushed into the derived table 'derived'.
- All subexpressions of this condition are cloned from the
- subexpressions of 'cond'.
- This condition has to be fixed yet.
- */
+ /* 1. Check what pushable formula can be extracted from cond */
Item *extracted_cond;
- derived->check_pushable_cond_for_table(cond);
- extracted_cond= derived->build_pushable_cond_for_table(thd, cond);
+ cond->check_pushable_cond(&Item::pushable_cond_checker_for_derived,
+ (uchar *)(&derived->table->map));
+ /* 2. Build a clone PC of the formula that can be extracted */
+ extracted_cond=
+ cond->build_pushable_cond(thd,
+ &Item::pushable_equality_checker_for_derived,
+ ((uchar *)&derived->table->map));
if (!extracted_cond)
{
/* Nothing can be pushed into the derived table */
DBUG_RETURN(false);
}
- /* Push extracted_cond into every select of the unit specifying 'derived' */
+
st_select_lex *save_curr_select= thd->lex->current_select;
for (; sl; sl= sl->next_select())
{
Item *extracted_cond_copy;
- if (!sl->cond_pushdown_is_allowed())
- continue;
- thd->lex->current_select= sl;
- if (sl->have_window_funcs())
- {
- if (sl->join->group_list || sl->join->implicit_grouping)
- continue;
- ORDER *common_partition_fields=
- sl->find_common_window_func_partition_fields(thd);
- if (!common_partition_fields)
- continue;
- extracted_cond_copy= !sl->next_select() ?
- extracted_cond :
- extracted_cond->build_clone(thd);
- if (!extracted_cond_copy)
- continue;
-
- Item *cond_over_partition_fields;;
- sl->collect_grouping_fields(thd, common_partition_fields);
- sl->check_cond_extraction_for_grouping_fields(extracted_cond_copy,
- derived);
- cond_over_partition_fields=
- sl->build_cond_for_grouping_fields(thd, extracted_cond_copy, true);
- if (cond_over_partition_fields)
- cond_over_partition_fields= cond_over_partition_fields->transform(thd,
- &Item::derived_grouping_field_transformer_for_where,
- (uchar*) sl);
- if (cond_over_partition_fields)
- {
- cond_over_partition_fields->walk(
- &Item::cleanup_excluding_const_fields_processor, 0, 0);
- sl->cond_pushed_into_where= cond_over_partition_fields;
- }
-
- continue;
- }
-
/*
For each select of the unit except the last one
create a clone of extracted_cond
@@ -1339,72 +1432,111 @@ bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived)
if (!extracted_cond_copy)
continue;
- if (!sl->join->group_list && !sl->with_sum_func)
- {
- /* extracted_cond_copy is pushed into where of sl */
- extracted_cond_copy= extracted_cond_copy->transform(thd,
- &Item::derived_field_transformer_for_where,
- (uchar*) sl);
- if (extracted_cond_copy)
- {
- extracted_cond_copy->walk(
- &Item::cleanup_excluding_const_fields_processor, 0, 0);
- sl->cond_pushed_into_where= extracted_cond_copy;
- }
-
- continue;
- }
-
- /*
- Figure out what can be extracted from the pushed condition
- that could be pushed into the where clause of sl
- */
- Item *cond_over_grouping_fields;
- sl->collect_grouping_fields(thd, sl->join->group_list);
- sl->check_cond_extraction_for_grouping_fields(extracted_cond_copy,
- derived);
- cond_over_grouping_fields=
- sl->build_cond_for_grouping_fields(thd, extracted_cond_copy, true);
-
- /*
- Transform the references to the 'derived' columns from the condition
- pushed into the where clause of sl to make them usable in the new context
- */
- if (cond_over_grouping_fields)
- cond_over_grouping_fields= cond_over_grouping_fields->transform(thd,
- &Item::derived_grouping_field_transformer_for_where,
- (uchar*) sl);
-
- if (cond_over_grouping_fields)
+ /* Collect fields that are used in the GROUP BY of sl */
+ if (sl->have_window_funcs())
{
- /*
- In extracted_cond_copy remove top conjuncts that
- has been pushed into the where clause of sl
- */
- extracted_cond_copy= remove_pushed_top_conjuncts(thd, extracted_cond_copy);
-
- cond_over_grouping_fields->walk(
- &Item::cleanup_excluding_const_fields_processor, 0, 0);
- sl->cond_pushed_into_where= cond_over_grouping_fields;
-
- if (!extracted_cond_copy)
+ if (sl->group_list.first || sl->join->implicit_grouping)
+ continue;
+ ORDER *common_partition_fields=
+ sl->find_common_window_func_partition_fields(thd);
+ if (!common_partition_fields)
continue;
+ sl->collect_grouping_fields_for_derived(thd, common_partition_fields);
}
+ else
+ sl->collect_grouping_fields_for_derived(thd, sl->group_list.first);
+
+ Item *remaining_cond= NULL;
+ /* Do 4-6 */
+ sl->pushdown_cond_into_where_clause(thd, extracted_cond_copy,
+ &remaining_cond,
+ &Item::derived_field_transformer_for_where,
+ (uchar *) sl);
+ if (!remaining_cond)
+ continue;
/*
- Transform the references to the 'derived' columns from the condition
- pushed into the having clause of sl to make them usable in the new context
+ 7. Prepare PC_having to be conjuncted with the HAVING clause of
+ the select
*/
- extracted_cond_copy= extracted_cond_copy->transform(thd,
- &Item::derived_field_transformer_for_having,
- (uchar*) sl);
- if (!extracted_cond_copy)
+ remaining_cond=
+ remaining_cond->transform(thd,
+ &Item::derived_field_transformer_for_having,
+ (uchar *) sl);
+ if (!remaining_cond)
+ continue;
+
+ if (remaining_cond->walk(&Item::cleanup_excluding_const_fields_processor,
+ 0, 0))
continue;
- extracted_cond_copy->walk(&Item::cleanup_excluding_const_fields_processor,
- 0, 0);
- sl->cond_pushed_into_having= extracted_cond_copy;
+ mark_or_conds_to_avoid_pushdown(remaining_cond);
+
+ sl->cond_pushed_into_having= remaining_cond;
}
thd->lex->current_select= save_curr_select;
DBUG_RETURN(false);
}
+
+
+/**
+ @brief
+ Look for provision of the derived_handler interface by a foreign engine
+
+ @param thd The thread handler
+
+ @details
+ The function looks through its tables of the query that specifies this
+ derived table searching for a table whose handlerton owns a
+ create_derived call-back function. If the call of this function returns
+ a derived_handler interface object then the server will push the query
+ specifying the derived table into this engine.
+ This is a responsibility of the create_derived call-back function to
+ check whether the engine can execute the query.
+
+ @retval the found derived_handler if the search is successful
+ 0 otherwise
+*/
+
+derived_handler *TABLE_LIST::find_derived_handler(THD *thd)
+{
+ if (!derived || is_recursive_with_table())
+ return 0;
+ for (SELECT_LEX *sl= derived->first_select(); sl; sl= sl->next_select())
+ {
+ if (!(sl->join))
+ continue;
+ for (TABLE_LIST *tbl= sl->join->tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (!tbl->table)
+ continue;
+ handlerton *ht= tbl->table->file->partition_ht();
+ if (!ht->create_derived)
+ continue;
+ derived_handler *dh= ht->create_derived(thd, this);
+ if (dh)
+ {
+ dh->set_derived(this);
+ return dh;
+ }
+ }
+ }
+ return 0;
+}
+
+
+TABLE_LIST *TABLE_LIST::get_first_table()
+{
+ for (SELECT_LEX *sl= derived->first_select(); sl; sl= sl->next_select())
+ {
+ if (!(sl->join))
+ continue;
+ for (TABLE_LIST *tbl= sl->join->tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (!tbl->table)
+ continue;
+ return tbl;
+ }
+ }
+ return 0;
+}
diff --git a/sql/sql_derived.h b/sql/sql_derived.h
index abfdb007072..2454d40ba79 100644
--- a/sql/sql_derived.h
+++ b/sql/sql_derived.h
@@ -22,7 +22,6 @@ struct LEX;
bool mysql_handle_derived(LEX *lex, uint phases);
bool mysql_handle_single_derived(LEX *lex, TABLE_LIST *derived, uint phases);
-bool mysql_derived_reinit(THD *thd, LEX *lex, TABLE_LIST *derived);
bool pushdown_cond_for_derived(THD *thd, Item *cond, TABLE_LIST *derived);
diff --git a/sql/sql_do.cc b/sql/sql_do.cc
index 2a4e43ab78a..1652b313909 100644
--- a/sql/sql_do.cc
+++ b/sql/sql_do.cc
@@ -33,7 +33,7 @@ bool mysql_do(THD *thd, List<Item> &values)
DBUG_RETURN(TRUE);
while ((value = li++))
(void) value->is_null();
- free_underlaid_joins(thd, &thd->lex->select_lex);
+ free_underlaid_joins(thd, thd->lex->first_select_lex());
if (unlikely(thd->is_error()))
{
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index d6f5b99eef6..8d639f9271d 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -781,7 +781,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
List<Item> field_list;
MEM_ROOT *mem_root= thd->mem_root;
const Sql_condition *err;
- SELECT_LEX *sel= &thd->lex->select_lex;
+ SELECT_LEX *sel= thd->lex->first_select_lex();
SELECT_LEX_UNIT *unit= &thd->lex->unit;
ulonglong idx= 0;
Protocol *protocol=thd->protocol;
diff --git a/sql/sql_error.h b/sql/sql_error.h
index 822503f89d3..6586c49a125 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -814,11 +814,48 @@ private:
extern char *err_conv(char *buff, uint to_length, const char *from,
uint from_length, CHARSET_INFO *from_cs);
-class ErrConv
+class ErrBuff
{
protected:
mutable char err_buffer[MYSQL_ERRMSG_SIZE];
public:
+ ErrBuff()
+ {
+ err_buffer[0]= '\0';
+ }
+ const char *ptr() const { return err_buffer; }
+ const char *set_longlong(const Longlong_hybrid &nr) const
+ {
+ return nr.is_unsigned() ? ullstr(nr.value(), err_buffer) :
+ llstr(nr.value(), err_buffer);
+ }
+ const char *set_double(double nr) const
+ {
+ my_gcvt(nr, MY_GCVT_ARG_DOUBLE, sizeof(err_buffer), err_buffer, 0);
+ return err_buffer;
+ }
+ const char *set_decimal(const decimal_t *d) const
+ {
+ int len= sizeof(err_buffer);
+ decimal2string(d, err_buffer, &len, 0, 0, ' ');
+ return err_buffer;
+ }
+ const char *set_str(const char *str, size_t len, CHARSET_INFO *cs) const
+ {
+ DBUG_ASSERT(len < UINT_MAX32);
+ return err_conv(err_buffer, (uint) sizeof(err_buffer), str, (uint) len, cs);
+ }
+ const char *set_mysql_time(const MYSQL_TIME *ltime) const
+ {
+ my_TIME_to_str(ltime, err_buffer, AUTO_SEC_PART_DIGITS);
+ return err_buffer;
+ }
+};
+
+
+class ErrConv: public ErrBuff
+{
+public:
ErrConv() {}
virtual ~ErrConv() {}
virtual const char *ptr() const = 0;
@@ -838,20 +875,18 @@ public:
: ErrConv(), str(s->ptr()), len(s->length()), cs(s->charset()) {}
const char *ptr() const
{
- DBUG_ASSERT(len < UINT_MAX32);
- return err_conv(err_buffer, (uint) sizeof(err_buffer), str, (uint) len, cs);
+ return set_str(str, len, cs);
}
};
class ErrConvInteger : public ErrConv, public Longlong_hybrid
{
public:
- ErrConvInteger(longlong num_arg, bool unsigned_flag= false) :
- ErrConv(), Longlong_hybrid(num_arg, unsigned_flag) {}
+ ErrConvInteger(const Longlong_hybrid &nr)
+ : ErrConv(), Longlong_hybrid(nr) { }
const char *ptr() const
{
- return m_unsigned ? ullstr(m_value, err_buffer) :
- llstr(m_value, err_buffer);
+ return set_longlong(static_cast<Longlong_hybrid>(*this));
}
};
@@ -862,8 +897,7 @@ public:
ErrConvDouble(double num_arg) : ErrConv(), num(num_arg) {}
const char *ptr() const
{
- my_gcvt(num, MY_GCVT_ARG_DOUBLE, sizeof(err_buffer), err_buffer, 0);
- return err_buffer;
+ return set_double(num);
}
};
@@ -874,8 +908,7 @@ public:
ErrConvTime(const MYSQL_TIME *ltime_arg) : ErrConv(), ltime(ltime_arg) {}
const char *ptr() const
{
- my_TIME_to_str(ltime, err_buffer, AUTO_SEC_PART_DIGITS);
- return err_buffer;
+ return set_mysql_time(ltime);
}
};
@@ -886,9 +919,7 @@ public:
ErrConvDecimal(const decimal_t *d_arg) : ErrConv(), d(d_arg) {}
const char *ptr() const
{
- int len= sizeof(err_buffer);
- decimal2string(d, err_buffer, &len, 0, 0, ' ');
- return err_buffer;
+ return set_decimal(d);
}
};
diff --git a/sql/sql_explain.cc b/sql/sql_explain.cc
index 1c45b05ccc5..b14cffdc466 100644
--- a/sql/sql_explain.cc
+++ b/sql/sql_explain.cc
@@ -34,6 +34,9 @@ const char *unit_operation_text[4]=
"UNIT RESULT","UNION RESULT","INTERSECT RESULT","EXCEPT RESULT"
};
+const char *pushed_derived_text= "PUSHED DERIVED";
+const char *pushed_select_text= "PUSHED SELECT";
+
static void write_item(Json_writer *writer, Item *item);
static void append_item_to_str(String *out, Item *item);
@@ -233,7 +236,7 @@ void Explain_query::print_explain_json(select_result_sink *output,
CHARSET_INFO *cs= system_charset_info;
List<Item> item_list;
- String *buf= &writer.output;
+ const String *buf= writer.output.get_string();
item_list.push_back(new (thd->mem_root)
Item_string(thd, buf->ptr(), buf->length(), cs),
thd->mem_root);
@@ -334,6 +337,9 @@ int print_explain_row(select_result_sink *result,
List<Item> item_list;
Item *item;
+ if (!select_type[0])
+ return 0;
+
item_list.push_back(new (mem_root) Item_int(thd, (int32) select_number),
mem_root);
item_list.push_back(new (mem_root) Item_string_sys(thd, select_type),
@@ -380,21 +386,31 @@ int print_explain_row(select_result_sink *result,
item_list.push_back(item, mem_root);
/* 'rows' */
+ StringBuffer<64> rows_str;
if (rows)
{
+ rows_str.append_ulonglong((ulonglong)(*rows));
item_list.push_back(new (mem_root)
- Item_int(thd, *rows, MY_INT64_NUM_DECIMAL_DIGITS),
- mem_root);
+ Item_string_sys(thd, rows_str.ptr(),
+ rows_str.length()), mem_root);
}
else
item_list.push_back(item_null, mem_root);
/* 'r_rows' */
+ StringBuffer<64> r_rows_str;
if (is_analyze)
{
if (r_rows)
- item_list.push_back(new (mem_root) Item_float(thd, *r_rows, 2),
- mem_root);
+ {
+ Item_float *fl= new (mem_root) Item_float(thd, *r_rows, 2);
+ String tmp;
+ String *res= fl->val_str(&tmp);
+ r_rows_str.append(res->ptr());
+ item_list.push_back(new (mem_root)
+ Item_string_sys(thd, r_rows_str.ptr(),
+ r_rows_str.length()), mem_root);
+ }
else
item_list.push_back(item_null, mem_root);
}
@@ -527,10 +543,17 @@ int Explain_union::print_explain(Explain_query *query,
item_list.push_back(item_null, mem_root);
/* `r_rows` */
+ StringBuffer<64> r_rows_str;
if (is_analyze)
{
double avg_rows= fake_select_lex_tracker.get_avg_rows();
- item_list.push_back(new (mem_root) Item_float(thd, avg_rows, 2), mem_root);
+ Item_float *fl= new (mem_root) Item_float(thd, avg_rows, 2);
+ String tmp;
+ String *res= fl->val_str(&tmp);
+ r_rows_str.append(res->ptr());
+ item_list.push_back(new (mem_root)
+ Item_string_sys(thd, r_rows_str.ptr(),
+ r_rows_str.length()), mem_root);
}
/* `filtered` */
@@ -746,7 +769,15 @@ int Explain_select::print_explain(Explain_query *query,
THD *thd= output->thd;
MEM_ROOT *mem_root= thd->mem_root;
- if (message)
+ if (select_type == pushed_derived_text || select_type == pushed_select_text)
+ {
+ print_explain_message_line(output, explain_flags, is_analyze,
+ select_id /*select number*/,
+ select_type,
+ NULL, /* rows */
+ NULL);
+ }
+ else if (message)
{
List<Item> item_list;
Item *item_null= new (mem_root) Item_null(thd);
@@ -869,14 +900,20 @@ void Explain_select::print_explain_json(Explain_query *query,
bool started_cache= print_explain_json_cache(writer, is_analyze);
- if (message)
+ if (message ||
+ select_type == pushed_derived_text ||
+ select_type == pushed_select_text)
{
writer->add_member("query_block").start_object();
writer->add_member("select_id").add_ll(select_id);
add_linkage(writer);
writer->add_member("table").start_object();
- writer->add_member("message").add_str(message);
+ writer->add_member("message").add_str(select_type == pushed_derived_text ?
+ "Pushed derived" :
+ select_type == pushed_select_text ?
+ "Pushed select" :
+ message);
writer->end_object();
print_explain_json_for_children(query, writer, is_analyze);
@@ -1113,12 +1150,14 @@ void Explain_table_access::fill_key_str(String *key_str, bool is_json) const
- this is just used key length for ref/range
- for index_merge, it is a comma-separated list of lengths.
- for hash join, it is key_len:pseudo_key_len
+ - [tabular form only] rowid filter length is added after "|".
- The column looks identical in tabular and json forms. In JSON, we consider
- the column legacy, it is superceded by used_key_parts.
+ In JSON, we consider this column to be legacy, it is superceded by
+ used_key_parts.
*/
-void Explain_table_access::fill_key_len_str(String *key_len_str) const
+void Explain_table_access::fill_key_len_str(String *key_len_str,
+ bool is_json) const
{
bool is_hj= (type == JT_HASH || type == JT_HASH_NEXT ||
type == JT_HASH_RANGE || type == JT_HASH_INDEX_MERGE);
@@ -1146,6 +1185,14 @@ void Explain_table_access::fill_key_len_str(String *key_len_str) const
length= longlong10_to_str(hash_next_key.get_key_len(), buf, 10) - buf;
key_len_str->append(buf, length);
}
+
+ if (!is_json && rowid_filter)
+ {
+ key_len_str->append('|');
+ StringBuffer<64> filter_key_len;
+ rowid_filter->quick->print_key_len(&filter_key_len);
+ key_len_str->append(filter_key_len);
+ }
}
@@ -1231,7 +1278,18 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
}
/* `type` column */
- push_str(thd, &item_list, join_type_str[type]);
+ StringBuffer<64> join_type_buf;
+ if (rowid_filter == NULL)
+ push_str(thd, &item_list, join_type_str[type]);
+ else
+ {
+ join_type_buf.append(join_type_str[type]);
+ join_type_buf.append("|filter");
+ item_list.push_back(new (mem_root)
+ Item_string_sys(thd, join_type_buf.ptr(),
+ join_type_buf.length()),
+ mem_root);
+ }
/* `possible_keys` column */
StringBuffer<64> possible_keys_buf;
@@ -1243,6 +1301,14 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
/* `key` */
StringBuffer<64> key_str;
fill_key_str(&key_str, false);
+
+ if (rowid_filter)
+ {
+ key_str.append("|");
+ StringBuffer<64> rowid_key_str;
+ rowid_filter->quick->print_key(&rowid_key_str);
+ key_str.append(rowid_key_str);
+ }
if (key_str.length() > 0)
push_string(thd, &item_list, &key_str);
@@ -1251,7 +1317,7 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
/* `key_len` */
StringBuffer<64> key_len_str;
- fill_key_len_str(&key_len_str);
+ fill_key_len_str(&key_len_str, false);
if (key_len_str.length() > 0)
push_string(thd, &item_list, &key_len_str);
@@ -1274,17 +1340,27 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
push_string_list(thd, &item_list, ref_list, &ref_list_buf);
/* `rows` */
+ StringBuffer<64> rows_str;
if (rows_set)
{
+ rows_str.append_ulonglong((ulonglong)rows);
+
+ if (rowid_filter)
+ {
+ rows_str.append(" (");
+ rows_str.append_ulonglong((ulonglong) (round(rowid_filter->selectivity *
+ 100.0)));
+ rows_str.append("%)");
+ }
item_list.push_back(new (mem_root)
- Item_int(thd, (longlong) (ulonglong) rows,
- MY_INT64_NUM_DECIMAL_DIGITS),
- mem_root);
+ Item_string_sys(thd, rows_str.ptr(),
+ rows_str.length()), mem_root);
}
else
item_list.push_back(item_null, mem_root);
/* `r_rows` */
+ StringBuffer<64> r_rows_str;
if (is_analyze)
{
if (!tracker.has_scans())
@@ -1294,8 +1370,20 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
else
{
double avg_rows= tracker.get_avg_rows();
- item_list.push_back(new (mem_root) Item_float(thd, avg_rows, 2),
- mem_root);
+ Item_float *fl= new (mem_root) Item_float(thd, avg_rows, 2);
+ String tmp;
+ String *res= fl->val_str(&tmp);
+ r_rows_str.append(res->ptr());
+ if (rowid_filter)
+ {
+ r_rows_str.append(" (");
+ r_rows_str.append_ulonglong(
+ (ulonglong) (rowid_filter->tracker->get_r_selectivity_pct() * 100.0));
+ r_rows_str.append("%)");
+ }
+ item_list.push_back(new (mem_root)
+ Item_string_sys(thd, r_rows_str.ptr(),
+ r_rows_str.length()), mem_root);
}
}
@@ -1359,6 +1447,15 @@ int Explain_table_access::print_explain(select_result_sink *output, uint8 explai
extra_buf.append(STRING_WITH_LEN("Using filesort"));
}
+ if (rowid_filter)
+ {
+ if (first)
+ first= false;
+ else
+ extra_buf.append(STRING_WITH_LEN("; "));
+ extra_buf.append(STRING_WITH_LEN("Using rowid filter"));
+ }
+
item_list.push_back(new (mem_root)
Item_string_sys(thd, extra_buf.ptr(),
extra_buf.length()),
@@ -1547,6 +1644,29 @@ void add_json_keyset(Json_writer *writer, const char *elem_name,
}
+void Explain_rowid_filter::print_explain_json(Explain_query *query,
+ Json_writer *writer,
+ bool is_analyze)
+{
+ Json_writer_nesting_guard guard(writer);
+ writer->add_member("rowid_filter").start_object();
+ quick->print_json(writer);
+ writer->add_member("rows").add_ll(rows);
+ writer->add_member("selectivity_pct").add_double(selectivity * 100.0);
+ if (is_analyze)
+ {
+ writer->add_member("r_rows").add_double(tracker->get_container_elements());
+ writer->add_member("r_selectivity_pct").
+ add_double(tracker->get_r_selectivity_pct() * 100.0);
+ writer->add_member("r_buffer_size").
+ add_double((double) (tracker->get_container_buff_size()));
+ writer->add_member("r_filling_time_ms").
+ add_double(tracker->get_time_fill_container_ms());
+ }
+ writer->end_object(); // rowid_filter
+}
+
+
void Explain_table_access::print_explain_json(Explain_query *query,
Json_writer *writer,
bool is_analyze)
@@ -1619,7 +1739,7 @@ void Explain_table_access::print_explain_json(Explain_query *query,
/* `key_length` */
StringBuffer<64> key_len_str;
- fill_key_len_str(&key_len_str);
+ fill_key_len_str(&key_len_str, true);
if (key_len_str.length())
writer->add_member("key_length").add_str(key_len_str);
@@ -1644,6 +1764,11 @@ void Explain_table_access::print_explain_json(Explain_query *query,
if (!ref_list.is_empty())
print_json_array(writer, "ref", ref_list);
+ if (rowid_filter)
+ {
+ rowid_filter->print_explain_json(query, writer, is_analyze);
+ }
+
/* r_loops (not present in tabular output) */
if (is_analyze)
{
@@ -1676,8 +1801,10 @@ void Explain_table_access::print_explain_json(Explain_query *query,
if (op_tracker.get_loops())
{
- writer->add_member("r_total_time_ms").
- add_double(op_tracker.get_time_ms());
+ double total_time= op_tracker.get_time_ms();
+ if (rowid_filter)
+ total_time+= rowid_filter->tracker->get_time_fill_container_ms();
+ writer->add_member("r_total_time_ms").add_double(total_time);
}
}
diff --git a/sql/sql_explain.h b/sql/sql_explain.h
index 38250cc40ce..9478cd56a9b 100644
--- a/sql/sql_explain.h
+++ b/sql/sql_explain.h
@@ -328,6 +328,8 @@ public:
/////////////////////////////////////////////////////////////////////////////
extern const char *unit_operation_text[4];
+extern const char *pushed_derived_text;
+extern const char *pushed_select_text;
/*
Explain structure for a UNION.
@@ -583,6 +585,8 @@ class Explain_index_use : public Sql_alloc
{
char *key_name;
uint key_len;
+ char *filter_name;
+ uint filter_len;
public:
String_list key_parts_list;
@@ -595,12 +599,46 @@ public:
{
key_name= NULL;
key_len= (uint)-1;
+ filter_name= NULL;
+ filter_len= (uint)-1;
}
bool set(MEM_ROOT *root, KEY *key_name, uint key_len_arg);
bool set_pseudo_key(MEM_ROOT *root, const char *key_name);
inline const char *get_key_name() const { return key_name; }
inline uint get_key_len() const { return key_len; }
+ //inline const char *get_filter_name() const { return filter_name; }
+};
+
+
+/*
+ Query Plan data structure for Rowid filter.
+*/
+class Explain_rowid_filter : public Sql_alloc
+{
+public:
+ /* Quick select used to collect the rowids into filter */
+ Explain_quick_select *quick;
+
+ /* How many rows the above quick select is expected to return */
+ ha_rows rows;
+
+ /* Expected selectivity for the filter */
+ double selectivity;
+
+ /* Tracker with the information about how rowid filter is executed */
+ Rowid_filter_tracker *tracker;
+
+ void print_explain_json(Explain_query *query, Json_writer *writer,
+ bool is_analyze);
+
+ /*
+ TODO:
+ Here should be ANALYZE members:
+ - r_rows for the quick select
+ - An object that tracked the table access time
+ - real selectivity of the filter.
+ */
};
@@ -670,6 +708,7 @@ public:
void print_json(Json_writer *writer, bool is_analyze);
};
+
/*
EXPLAIN data structure for a single JOIN_TAB.
*/
@@ -689,7 +728,8 @@ public:
cache_cond(NULL),
pushed_index_cond(NULL),
sjm_nest(NULL),
- pre_join_sort(NULL)
+ pre_join_sort(NULL),
+ rowid_filter(NULL)
{}
~Explain_table_access() { delete sjm_nest; }
@@ -796,6 +836,8 @@ public:
Exec_time_tracker op_tracker;
Table_access_tracker jbuf_tracker;
+ Explain_rowid_filter *rowid_filter;
+
int print_explain(select_result_sink *output, uint8 explain_flags,
bool is_analyze,
uint select_id, const char *select_type,
@@ -806,7 +848,7 @@ public:
private:
void append_tag_name(String *str, enum explain_extra_tag tag);
void fill_key_str(String *key_str, bool is_json) const;
- void fill_key_len_str(String *key_len_str) const;
+ void fill_key_len_str(String *key_len_str, bool is_json) const;
double get_r_filtered();
void tag_to_json(Json_writer *writer, enum explain_extra_tag tag);
};
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index f0a395d63d5..9da1d314393 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -433,8 +433,6 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
/* Always read all columns */
table->read_set= &table->s->all_set;
- if (table->vcol_set)
- table->vcol_set= &table->s->all_set;
/* Restore the state. */
thd->set_open_tables(backup_open_tables);
@@ -1195,10 +1193,10 @@ void mysql_ha_flush(THD *thd)
@note Broadcasts refresh if it closed a table with old version.
*/
-void mysql_ha_cleanup(THD *thd)
+void mysql_ha_cleanup_no_free(THD *thd)
{
SQL_HANDLER *hash_tables;
- DBUG_ENTER("mysql_ha_cleanup");
+ DBUG_ENTER("mysql_ha_cleanup_no_free");
for (uint i= 0; i < thd->handler_tables_hash.records; i++)
{
@@ -1206,9 +1204,15 @@ void mysql_ha_cleanup(THD *thd)
if (hash_tables->table)
mysql_ha_close_table(hash_tables);
}
+ DBUG_VOID_RETURN;
+}
- my_hash_free(&thd->handler_tables_hash);
+void mysql_ha_cleanup(THD *thd)
+{
+ DBUG_ENTER("mysql_ha_cleanup");
+ mysql_ha_cleanup_no_free(thd);
+ my_hash_free(&thd->handler_tables_hash);
DBUG_VOID_RETURN;
}
diff --git a/sql/sql_handler.h b/sql/sql_handler.h
index 4c16f7e5c57..16063bb1f35 100644
--- a/sql/sql_handler.h
+++ b/sql/sql_handler.h
@@ -73,6 +73,7 @@ bool mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes, const char *,
void mysql_ha_flush(THD *thd);
void mysql_ha_flush_tables(THD *thd, TABLE_LIST *all_tables);
void mysql_ha_rm_tables(THD *thd, TABLE_LIST *tables);
+void mysql_ha_cleanup_no_free(THD *thd);
void mysql_ha_cleanup(THD *thd);
void mysql_ha_set_explicit_lock_duration(THD *thd);
void mysql_ha_rm_temporary_tables(THD *thd);
diff --git a/sql/sql_help.cc b/sql/sql_help.cc
index aa9f3fedd6d..95bc6ade366 100644
--- a/sql/sql_help.cc
+++ b/sql/sql_help.cc
@@ -87,7 +87,7 @@ enum enum_used_fields
static bool init_fields(THD *thd, TABLE_LIST *tables,
struct st_find_field *find_fields, uint count)
{
- Name_resolution_context *context= &thd->lex->select_lex.context;
+ Name_resolution_context *context= &thd->lex->first_select_lex()->context;
DBUG_ENTER("init_fields");
context->resolve_in_table_list_only(tables);
for (; count-- ; find_fields++)
@@ -719,10 +719,11 @@ static bool mysqld_help_internal(THD *thd, const char *mask)
Init tables and fields to be usable from items
tables do not contain VIEWs => we can pass 0 as conds
*/
- thd->lex->select_lex.context.table_list=
- thd->lex->select_lex.context.first_name_resolution_table= &tables[0];
- if (setup_tables(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
+ thd->lex->first_select_lex()->context.table_list=
+ thd->lex->first_select_lex()->context.first_name_resolution_table=
+ &tables[0];
+ if (setup_tables(thd, &thd->lex->first_select_lex()->context,
+ &thd->lex->first_select_lex()->top_join_list,
tables, leaves, FALSE, FALSE))
goto error;
memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields));
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index b1f68e049cf..e5d33b91804 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -82,6 +82,10 @@
#include "debug_sync.h"
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h" /* wsrep_start_transction() */
+#endif /* WITH_WSREP */
+
#ifndef EMBEDDED_LIBRARY
static bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
TABLE_LIST *table_list);
@@ -241,7 +245,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
}
else
{ // Part field list
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
Name_resolution_context *context= &select_lex->context;
Name_resolution_context_state ctx_state;
int res;
@@ -273,7 +277,7 @@ static int check_insert_fields(THD *thd, TABLE_LIST *table_list,
/* Restore the current context. */
ctx_state.restore_state(context, table_list);
- thd->lex->select_lex.no_wrap_view_item= FALSE;
+ thd->lex->first_select_lex()->no_wrap_view_item= FALSE;
if (res)
DBUG_RETURN(-1);
@@ -547,10 +551,10 @@ bool open_and_lock_for_insert_delayed(THD *thd, TABLE_LIST *table_list)
If this goes ok, the tickets are cloned and added to the list of granted
locks held by the handler thread.
*/
- if (thd->global_read_lock.can_acquire_protection())
+ if (thd->has_read_only_protection())
DBUG_RETURN(TRUE);
- protection_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
+ protection_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_DML,
MDL_STATEMENT);
if (thd->mdl_context.acquire_lock(&protection_request,
@@ -640,7 +644,7 @@ create_insert_stmt_from_insert_delayed(THD *thd, String *buf)
if (buf->append(thd->query()) ||
buf->replace(thd->lex->keyword_delayed_begin_offset,
thd->lex->keyword_delayed_end_offset -
- thd->lex->keyword_delayed_begin_offset, 0))
+ thd->lex->keyword_delayed_begin_offset, NULL, 0))
return 1;
return 0;
}
@@ -657,7 +661,7 @@ static void save_insert_query_plan(THD* thd, TABLE_LIST *table_list)
bool skip= MY_TEST(table_list->view);
/* Save subquery children */
- for (SELECT_LEX_UNIT *unit= thd->lex->select_lex.first_inner_unit();
+ for (SELECT_LEX_UNIT *unit= thd->lex->first_select_lex()->first_inner_unit();
unit;
unit= unit->next_unit())
{
@@ -777,7 +781,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
/* mysql_prepare_insert sets table_list->table if it was not set */
table= table_list->table;
- context= &thd->lex->select_lex.context;
+ context= &thd->lex->first_select_lex()->context;
/*
These three asserts test the hypothesis that the resetting of the name
resolution context below is not necessary at all since the list of local
@@ -1078,7 +1082,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
} while (bulk_parameters_iterations(thd));
values_loop_end:
- free_underlaid_joins(thd, &thd->lex->select_lex);
+ free_underlaid_joins(thd, thd->lex->first_select_lex());
joins_freed= TRUE;
/*
@@ -1271,7 +1275,7 @@ abort:
table->file->ha_release_auto_increment();
if (!joins_freed)
- free_underlaid_joins(thd, &thd->lex->select_lex);
+ free_underlaid_joins(thd, thd->lex->first_select_lex());
thd->abort_on_warning= 0;
DBUG_RETURN(retval);
}
@@ -1301,7 +1305,7 @@ abort:
static bool check_view_insertability(THD * thd, TABLE_LIST *view)
{
- uint num= view->view->select_lex.item_list.elements;
+ uint num= view->view->first_select_lex()->item_list.elements;
TABLE *table= view->table;
Field_translator *trans_start= view->field_translation,
*trans_end= trans_start + num;
@@ -1401,10 +1405,12 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
than INSERT.
*/
- if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
+ if (setup_tables_and_check_access(thd,
+ &thd->lex->first_select_lex()->context,
+ &thd->lex->first_select_lex()->
+ top_join_list,
table_list,
- thd->lex->select_lex.leaf_tables,
+ thd->lex->first_select_lex()->leaf_tables,
select_insert, INSERT_ACL, SELECT_ACL,
TRUE))
DBUG_RETURN(TRUE);
@@ -1412,7 +1418,7 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list,
if (insert_into_view && !fields.elements)
{
thd->lex->empty_field_list_on_rset= 1;
- if (!thd->lex->select_lex.leaf_tables.head()->table ||
+ if (!thd->lex->first_select_lex()->leaf_tables.head()->table ||
table_list->is_multitable())
{
my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0),
@@ -1486,7 +1492,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list,
enum_duplicates duplic, COND **where,
bool select_insert)
{
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
Name_resolution_context *context= &select_lex->context;
Name_resolution_context_state ctx_state;
bool insert_into_view= (table_list->view != 0);
@@ -1730,7 +1736,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
*/
if (info->ignore)
{
- table->file->print_error(error, MYF(ME_JUST_WARNING));
+ table->file->print_error(error, MYF(ME_WARNING));
goto ok_or_after_trg_err; /* Ignoring a not fatal error, return 0 */
}
goto err;
@@ -1749,10 +1755,8 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
was used. This ensures that we don't get a problem when the
whole range of the key has been used.
*/
- if (info->handle_duplicates == DUP_REPLACE &&
- table->next_number_field &&
- key_nr == table->s->next_number_index &&
- (insert_id_for_cur_row > 0))
+ if (info->handle_duplicates == DUP_REPLACE && table->next_number_field &&
+ key_nr == table->s->next_number_index && insert_id_for_cur_row > 0)
goto err;
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
{
@@ -1856,7 +1860,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
{
if (!(thd->variables.old_behavior &
OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE))
- table->file->print_error(error, MYF(ME_JUST_WARNING));
+ table->file->print_error(error, MYF(ME_WARNING));
goto ok_or_after_trg_err;
}
goto err;
@@ -2035,7 +2039,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
goto err;
if (!(thd->variables.old_behavior &
OLD_MODE_NO_DUP_KEY_WARNINGS_WITH_IGNORE))
- table->file->print_error(error, MYF(ME_JUST_WARNING));
+ table->file->print_error(error, MYF(ME_WARNING));
table->file->restore_auto_increment(prev_insert_id);
goto ok_or_after_trg_err;
}
@@ -2192,11 +2196,11 @@ public:
mysql_mutex_init(key_delayed_insert_mutex, &mutex, MY_MUTEX_INIT_FAST);
mysql_cond_init(key_delayed_insert_cond, &cond, NULL);
mysql_cond_init(key_delayed_insert_cond_client, &cond_client, NULL);
- mysql_mutex_lock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_delayed_insert);
delayed_insert_threads++;
+ mysql_mutex_unlock(&LOCK_delayed_insert);
delayed_lock= global_system_variables.low_priority_updates ?
TL_WRITE_LOW_PRIORITY : TL_WRITE;
- mysql_mutex_unlock(&LOCK_thread_count);
DBUG_VOID_RETURN;
}
~Delayed_insert()
@@ -2214,15 +2218,9 @@ public:
mysql_cond_destroy(&cond);
mysql_cond_destroy(&cond_client);
- /*
- We could use unlink_not_visible_threads() here, but as
- delayed_insert_threads also needs to be protected by
- the LOCK_thread_count mutex, we open code this.
- */
- mysql_mutex_lock(&LOCK_thread_count);
- thd.unlink(); // Must be unlinked under lock
+ server_threads.erase(&thd);
+ mysql_mutex_assert_owner(&LOCK_delayed_insert);
delayed_insert_threads--;
- mysql_mutex_unlock(&LOCK_thread_count);
my_free(thd.query());
thd.security_ctx->user= 0;
@@ -2369,7 +2367,7 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
di->thd.set_db(&table_list->db);
di->thd.set_query(my_strndup(table_list->table_name.str,
table_list->table_name.length,
- MYF(MY_WME | ME_FATALERROR)),
+ MYF(MY_WME | ME_FATAL)),
table_list->table_name.length, system_charset_info);
if (di->thd.db.str == NULL || di->thd.query() == NULL)
{
@@ -2382,9 +2380,12 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
di->table_list.alias.str= di->table_list.table_name.str= di->thd.query();
di->table_list.alias.length= di->table_list.table_name.length= di->thd.query_length();
di->table_list.db= di->thd.db;
- /* We need the tickets so that they can be cloned in handle_delayed_insert */
- di->grl_protection.init(MDL_key::GLOBAL, "", "",
- MDL_INTENTION_EXCLUSIVE, MDL_STATEMENT);
+ /*
+ We need the tickets so that they can be cloned in
+ handle_delayed_insert
+ */
+ di->grl_protection.init(MDL_key::BACKUP, "", "",
+ MDL_BACKUP_DML, MDL_STATEMENT);
di->grl_protection.ticket= grl_protection_request->ticket;
init_mdl_requests(&di->table_list);
di->table_list.mdl_request.ticket= table_list->mdl_request.ticket;
@@ -2401,7 +2402,7 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
mysql_mutex_unlock(&di->mutex);
di->unlock();
delete di;
- my_error(ER_CANT_CREATE_THREAD, MYF(ME_FATALERROR), error);
+ my_error(ER_CANT_CREATE_THREAD, MYF(ME_FATAL), error);
goto end_create;
}
@@ -2459,10 +2460,12 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
}
/* Unlock the delayed insert object after its last access. */
di->unlock();
- DBUG_RETURN((table_list->table == NULL));
+ DBUG_PRINT("exit", ("table_list->table: %p", table_list->table));
+ DBUG_RETURN(thd->is_error());
end_create:
mysql_mutex_unlock(&LOCK_delayed_create);
+ DBUG_PRINT("exit", ("is_error: %d", thd->is_error()));
DBUG_RETURN(thd->is_error());
}
@@ -2517,24 +2520,27 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
if (thd.killed)
{
/*
- Copy the error message. Note that we don't treat fatal
- errors in the delayed thread as fatal errors in the
- main thread. If delayed thread was killed, we don't
- want to send "Server shutdown in progress" in the
- INSERT THREAD.
-
- The thread could be killed with an error message if
- di->handle_inserts() or di->open_and_lock_table() fails.
- The thread could be killed without an error message if
- killed using THD::notify_shared_lock() or
- kill_delayed_threads_for_table().
+ Check how the insert thread was killed. If it was killed
+ by FLUSH TABLES which calls kill_delayed_threads_for_table(),
+ then is_error is not set.
+ In this case, return without setting an error,
+ which means that the insert will be converted to a normal insert.
*/
- if (!thd.is_error())
- my_message(ER_QUERY_INTERRUPTED, ER_THD(&thd, ER_QUERY_INTERRUPTED),
- MYF(0));
- else
+ if (thd.is_error())
+ {
+ /*
+ Copy the error message. Note that we don't treat fatal
+ errors in the delayed thread as fatal errors in the
+ main thread. If delayed thread was killed, we don't
+ want to send "Server shutdown in progress" in the
+ INSERT THREAD.
+
+ The thread could be killed with an error message if
+ di->handle_inserts() or di->open_and_lock_table() fails.
+ */
my_message(thd.get_stmt_da()->sql_errno(),
thd.get_stmt_da()->message(), MYF(0));
+ }
goto error;
}
}
@@ -2615,10 +2621,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
share->default_fields)
{
bool error_reported= FALSE;
- if (unlikely(!(copy->def_vcol_set=
- (MY_BITMAP*) alloc_root(client_thd->mem_root,
- sizeof(MY_BITMAP)))))
- goto error;
if (unlikely(parse_vcol_defs(client_thd, client_thd->mem_root, copy,
&error_reported)))
goto error;
@@ -2637,15 +2639,6 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
copy->def_write_set.bitmap= ((my_bitmap_map*)
(bitmap + share->column_bitmap_size));
bitmaps_used= 2;
- if (share->virtual_fields)
- {
- my_bitmap_init(copy->def_vcol_set,
- (my_bitmap_map*) (bitmap +
- bitmaps_used*share->column_bitmap_size),
- share->fields, FALSE);
- bitmaps_used++;
- copy->vcol_set= copy->def_vcol_set;
- }
if (share->default_fields || share->default_expressions)
{
my_bitmap_init(&copy->has_value_set,
@@ -2949,7 +2942,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
pthread_detach_this_thread();
/* Add thread to THD list so that's it's visible in 'show processlist' */
thd->set_start_time();
- add_to_active_threads(thd);
+ server_threads.insert(thd);
if (abort_loop)
thd->set_killed(KILL_CONNECTION);
else
@@ -3107,11 +3100,30 @@ pthread_handler_t handle_delayed_insert(void *arg)
mysql_mutex_unlock(&di->thd.mysys_var->mutex);
mysql_mutex_lock(&di->mutex);
}
+
+ /*
+ The code depends on that the following ASSERT always hold.
+ I don't want to accidently introduce and bugs in the following code
+ in this commit, so I leave the small cleaning up of the code to
+ a future commit
+ */
+ DBUG_ASSERT(thd->lock || di->stacked_inserts == 0);
+
DBUG_PRINT("delayed",
- ("thd->killed: %d di->tables_in_use: %d thd->lock: %d",
- thd->killed, di->tables_in_use, thd->lock != 0));
+ ("thd->killed: %d di->status: %d di->stacked_insert: %d di->tables_in_use: %d thd->lock: %d",
+ thd->killed, di->status, di->stacked_inserts, di->tables_in_use, thd->lock != 0));
+
+ /*
+ This is used to test see what happens if killed is sent before
+ we have time to handle the insert requests.
+ */
+ DBUG_EXECUTE_IF("write_delay_wakeup",
+ if (!thd->killed && di->stacked_inserts)
+ my_sleep(500000);
+ );
- if (di->tables_in_use && ! thd->lock && !thd->killed)
+ if (di->tables_in_use && ! thd->lock &&
+ (!thd->killed || di->stacked_inserts))
{
/*
Request for new delayed insert.
@@ -3271,7 +3283,7 @@ bool Delayed_insert::handle_inserts(void)
or if another thread is removing the current table definition
from the table cache.
*/
- my_error(ER_DELAYED_CANT_CHANGE_LOCK, MYF(ME_FATALERROR | ME_NOREFRESH),
+ my_error(ER_DELAYED_CANT_CHANGE_LOCK, MYF(ME_FATAL | ME_ERROR_LOG),
table->s->table_name.str);
goto err;
}
@@ -3447,7 +3459,7 @@ bool Delayed_insert::handle_inserts(void)
{
/* This is not known to happen. */
my_error(ER_DELAYED_CANT_CHANGE_LOCK,
- MYF(ME_FATALERROR | ME_NOREFRESH),
+ MYF(ME_FATAL | ME_ERROR_LOG),
table->s->table_name.str);
goto err;
}
@@ -3545,7 +3557,7 @@ bool Delayed_insert::handle_inserts(void)
bool mysql_insert_select_prepare(THD *thd)
{
LEX *lex= thd->lex;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
DBUG_ENTER("mysql_insert_select_prepare");
@@ -3634,7 +3646,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
select, LEX::current_select should point to the first select while
we are fixing fields from insert list.
*/
- lex->current_select= &lex->select_lex;
+ lex->current_select= lex->first_select_lex();
res= (setup_fields(thd, Ref_ptr_array(),
values, MARK_COLUMNS_READ, 0, NULL, 0) ||
@@ -3651,7 +3663,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
if (info.handle_duplicates == DUP_UPDATE && !res)
{
- Name_resolution_context *context= &lex->select_lex.context;
+ Name_resolution_context *context= &lex->first_select_lex()->context;
Name_resolution_context_state ctx_state;
/* Save the state of the current name resolution context. */
@@ -3661,7 +3673,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
table_list->next_local= 0;
context->resolve_in_table_list_only(table_list);
- lex->select_lex.no_wrap_view_item= TRUE;
+ lex->first_select_lex()->no_wrap_view_item= TRUE;
res= res ||
check_update_fields(thd, context->table_list,
*info.update_fields, *info.update_values,
@@ -3672,22 +3684,26 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
*/
true,
&map);
- lex->select_lex.no_wrap_view_item= FALSE;
+ lex->first_select_lex()->no_wrap_view_item= FALSE;
/*
- When we are not using GROUP BY and there are no ungrouped aggregate functions
- we can refer to other tables in the ON DUPLICATE KEY part.
- We use next_name_resolution_table descructively, so check it first (views?)
+ When we are not using GROUP BY and there are no ungrouped
+ aggregate functions we can refer to other tables in the ON
+ DUPLICATE KEY part. We use next_name_resolution_table
+ descructively, so check it first (views?)
*/
DBUG_ASSERT (!table_list->next_name_resolution_table);
- if (lex->select_lex.group_list.elements == 0 &&
- !lex->select_lex.with_sum_func)
+ if (lex->first_select_lex()->group_list.elements == 0 &&
+ !lex->first_select_lex()->with_sum_func)
+ {
/*
- We must make a single context out of the two separate name resolution contexts :
- the INSERT table and the tables in the SELECT part of INSERT ... SELECT.
- To do that we must concatenate the two lists
+ We must make a single context out of the two separate name
+ resolution contexts : the INSERT table and the tables in the
+ SELECT part of INSERT ... SELECT. To do that we must
+ concatenate the two lists
*/
table_list->next_name_resolution_table=
ctx_state.get_first_name_resolution_table();
+ }
res= res || setup_fields(thd, Ref_ptr_array(), *info.update_values,
MARK_COLUMNS_READ, 0, NULL, 0);
@@ -3793,9 +3809,9 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
void
DESCRIPTION
- If the result table is the same as one of the source tables (INSERT SELECT),
- the result table is not finally prepared at the join prepair phase.
- Do the final preparation now.
+ If the result table is the same as one of the source tables
+ (INSERT SELECT), the result table is not finally prepared at the
+ join prepair phase. Do the final preparation now.
RETURN
0 OK
@@ -3934,10 +3950,13 @@ bool select_insert::prepare_eof()
DBUG_PRINT("enter", ("trans_table=%d, table_type='%s'",
trans_table, table->file->table_type()));
- error= (IF_WSREP((thd->wsrep_conflict_state == MUST_ABORT ||
- thd->wsrep_conflict_state == CERT_FAILURE) ? -1 :, )
- (thd->locked_tables_mode <= LTM_LOCK_TABLES ?
- table->file->ha_end_bulk_insert() : 0));
+#ifdef WITH_WSREP
+ error= (thd->wsrep_cs().current_error()) ? -1 :
+ (thd->locked_tables_mode <= LTM_LOCK_TABLES) ?
+#else
+ error= (thd->locked_tables_mode <= LTM_LOCK_TABLES) ?
+#endif /* WITH_WSREP */
+ table->file->ha_end_bulk_insert() : 0;
if (likely(!error) && unlikely(thd->is_error()))
error= thd->get_stmt_da()->sql_errno();
@@ -4109,9 +4128,9 @@ void select_insert::abort_result_set() {
Field *Item::create_field_for_create_select(TABLE *table)
{
- Field *def_field, *tmp_field;
- return ::create_tmp_field(table->in_use, table, this, type(),
- (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0);
+ static Tmp_field_param param(false, false, false, false);
+ Tmp_field_src src;
+ return create_tmp_field_ex(table, &src, &param);
}
@@ -4154,10 +4173,8 @@ Field *Item::create_field_for_create_select(TABLE *table)
@retval 0 Error
*/
-TABLE *select_create::create_table_from_items(THD *thd,
- List<Item> *items,
- MYSQL_LOCK **lock,
- TABLEOP_HOOKS *hooks)
+TABLE *select_create::create_table_from_items(THD *thd, List<Item> *items,
+ MYSQL_LOCK **lock, TABLEOP_HOOKS *hooks)
{
TABLE tmp_table; // Used during 'Create_field()'
TABLE_SHARE share;
@@ -4179,8 +4196,7 @@ TABLE *select_create::create_table_from_items(THD *thd,
if (!opt_explicit_defaults_for_timestamp)
promote_first_timestamp_column(&alter_info->create_list);
- if (create_info->vers_fix_system_fields(thd, alter_info, *create_table,
- true))
+ if (create_info->fix_create_fields(thd, alter_info, *create_table, true))
DBUG_RETURN(NULL);
while ((item=it++))
@@ -4219,7 +4235,7 @@ TABLE *select_create::create_table_from_items(THD *thd,
alter_info->create_list.push_back(cr_field, thd->mem_root);
}
- if (create_info->vers_check_system_fields(thd, alter_info, *create_table))
+ if (create_info->check_fields(thd, alter_info, *create_table))
DBUG_RETURN(NULL);
DEBUG_SYNC(thd,"create_table_select_before_create");
@@ -4428,8 +4444,6 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
thd->binlog_start_trans_and_stmt();
}
- DEBUG_SYNC(thd,"create_table_select_before_check_if_exists");
-
if (!(table= create_table_from_items(thd, &values, &extra_lock, hook_ptr)))
/* abort() deletes table */
DBUG_RETURN(-1);
@@ -4547,9 +4561,16 @@ select_create::binlog_show_create_table(TABLE **tables, uint count)
/* suppress_use */ FALSE,
errcode);
}
-
- ha_fake_trx_id(thd);
-
+#ifdef WITH_WSREP
+ if (thd->wsrep_trx().active())
+ {
+ WSREP_DEBUG("transaction already started for CTAS");
+ }
+ else
+ {
+ wsrep_start_transaction(thd, thd->wsrep_next_trx_id());
+ }
+#endif
return result;
}
@@ -4607,10 +4628,18 @@ bool select_create::send_eof()
if (!table->s->tmp_table)
{
#ifdef WITH_WSREP
- if (WSREP_ON)
+ if (WSREP(thd))
{
+ if (thd->wsrep_trx_id() == WSREP_UNDEFINED_TRX_ID)
+ {
+ wsrep_start_transaction(thd, thd->wsrep_next_trx_id());
+ }
+ DBUG_ASSERT(thd->wsrep_trx_id() != WSREP_UNDEFINED_TRX_ID);
+ WSREP_DEBUG("CTAS key append for trx: %" PRIu64 " thd %llu query %lld ",
+ thd->wsrep_trx_id(), thd->thread_id, thd->query_id);
+
/*
- append table level exclusive key for CTAS
+ append table level exclusive key for CTAS
*/
wsrep_key_arr_t key_arr= {0, 0};
wsrep_prepare_keys_for_isolation(thd,
@@ -4618,38 +4647,34 @@ bool select_create::send_eof()
create_table->table_name.str,
table_list,
&key_arr);
- int rcode = wsrep->append_key(
- wsrep,
- &thd->wsrep_ws_handle,
- key_arr.keys, //&wkey,
- key_arr.keys_len,
- WSREP_KEY_EXCLUSIVE,
- false);
+ int rcode= wsrep_thd_append_key(thd, key_arr.keys, key_arr.keys_len,
+ WSREP_SERVICE_KEY_EXCLUSIVE);
wsrep_keys_free(&key_arr);
- if (rcode) {
+ if (rcode)
+ {
DBUG_PRINT("wsrep", ("row key failed: %d", rcode));
WSREP_ERROR("Appending table key for CTAS failed: %s, %d",
(wsrep_thd_query(thd)) ?
wsrep_thd_query(thd) : "void", rcode);
- return true;
+ DBUG_RETURN(true);
}
/* If commit fails, we should be able to reset the OK status. */
- thd->get_stmt_da()->set_overwrite_status(TRUE);
+ thd->get_stmt_da()->set_overwrite_status(true);
}
#endif /* WITH_WSREP */
trans_commit_stmt(thd);
if (!(thd->variables.option_bits & OPTION_GTID_BEGIN))
trans_commit_implicit(thd);
#ifdef WITH_WSREP
- if (WSREP_ON)
+ if (WSREP(thd))
{
thd->get_stmt_da()->set_overwrite_status(FALSE);
mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state != NO_CONFLICT)
+ if (wsrep_current_error(thd))
{
- WSREP_DEBUG("select_create commit failed, thd: %lld err: %d %s",
- (longlong) thd->thread_id, thd->wsrep_conflict_state,
- thd->query());
+ WSREP_DEBUG("select_create commit failed, thd: %llu err: %s %s",
+ thd->thread_id,
+ wsrep_thd_transaction_state_str(thd), WSREP_QUERY(thd));
mysql_mutex_unlock(&thd->LOCK_thd_data);
abort_result_set();
DBUG_RETURN(true);
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 6ce778d03cf..b0544300f1d 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -31,8 +31,11 @@
#include "sql_select.h"
#include "sql_cte.h"
#include "sql_signal.h"
+#include "sql_truncate.h" // Sql_cmd_truncate_table
+#include "sql_admin.h" // Sql_cmd_analyze/Check..._table
#include "sql_partition.h"
-
+#include "sql_partition_admin.h" // Sql_cmd_alter_table_*_part
+#include "event_parse_data.h"
void LEX::parse_error(uint err_number)
{
@@ -174,7 +177,7 @@ init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex)
{
TABLE_LIST *table_list;
Table_ident *table_ident;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
Name_resolution_context *context= &select_lex->context;
/*
We will call the parser to create a part_info struct based on the
@@ -635,6 +638,30 @@ void Lex_input_stream::reduce_digest_token(uint token_left, uint token_right)
}
}
+/**
+ lex starting operations for builtin select collected together
+*/
+
+void SELECT_LEX::lex_start(LEX *plex)
+{
+ SELECT_LEX_UNIT *unit= &plex->unit;
+ /* 'parent_lex' is used in init_query() so it must be before it. */
+ parent_lex= plex;
+ init_query();
+ master= unit;
+ prev= &unit->slave;
+ link_next= slave= next= 0;
+ link_prev= (st_select_lex_node**)&(plex->all_selects_list);
+ DBUG_ASSERT(!group_list_ptrs);
+ select_number= 1;
+ in_sum_expr=0;
+ ftfunc_list_alloc.empty();
+ ftfunc_list= &ftfunc_list_alloc;
+ group_list.empty();
+ order_list.empty();
+ gorder_list.empty();
+}
+
void lex_start(THD *thd)
{
DBUG_ENTER("lex_start");
@@ -659,18 +686,19 @@ void LEX::start(THD *thd_arg)
DBUG_ASSERT(!explain);
+ builtin_select.lex_start(this);
+ lex_options= 0;
context_stack.empty();
+ //empty select_stack
+ select_stack_top= 0;
unit.init_query();
- current_select_number= 1;
- select_lex.linkage= UNSPECIFIED_TYPE;
- /* 'parent_lex' is used in init_query() so it must be before it. */
- select_lex.parent_lex= this;
- select_lex.init_query();
+ current_select_number= 0;
curr_with_clause= 0;
with_clauses_list= 0;
with_clauses_list_last_next= &with_clauses_list;
clone_spec_offset= 0;
create_view= NULL;
+ field_list.empty();
value_list.empty();
update_list.empty();
set_var_list.empty();
@@ -680,21 +708,12 @@ void LEX::start(THD *thd_arg)
with_persistent_for_clause= FALSE;
column_list= NULL;
index_list= NULL;
- prepared_stmt_params.empty();
+ prepared_stmt.lex_start();
auxiliary_table_list.empty();
unit.next= unit.master= unit.link_next= unit.return_to= 0;
unit.prev= unit.link_prev= 0;
- unit.slave= current_select= all_selects_list= &select_lex;
- select_lex.master= &unit;
- select_lex.prev= &unit.slave;
- select_lex.link_next= select_lex.slave= select_lex.next= 0;
- select_lex.link_prev= (st_select_lex_node**)&(all_selects_list);
- select_lex.options= 0;
- select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
- select_lex.init_order();
- select_lex.group_list.empty();
- if (select_lex.group_list_ptrs)
- select_lex.group_list_ptrs->clear();
+ unit.slave= current_select= all_selects_list= &builtin_select;
+ sql_cache= LEX::SQL_CACHE_UNSPECIFIED;
describe= 0;
analyze_stmt= 0;
explain_json= false;
@@ -703,14 +722,7 @@ void LEX::start(THD *thd_arg)
safe_to_cache_query= 1;
parsing_options.reset();
empty_field_list_on_rset= 0;
- select_lex.select_number= 1;
part_info= 0;
- select_lex.in_sum_expr=0;
- select_lex.ftfunc_list_alloc.empty();
- select_lex.ftfunc_list= &select_lex.ftfunc_list_alloc;
- select_lex.group_list.empty();
- select_lex.order_list.empty();
- select_lex.gorder_list.empty();
m_sql_cmd= NULL;
duplicates= DUP_ERROR;
ignore= 0;
@@ -722,6 +734,8 @@ void LEX::start(THD *thd_arg)
query_tables= 0;
reset_query_tables_list(FALSE);
expr_allows_subselect= TRUE;
+ selects_allow_into= FALSE;
+ selects_allow_procedure= FALSE;
use_only_table_context= FALSE;
parse_vcol_expr= FALSE;
check_exists= FALSE;
@@ -731,8 +745,8 @@ void LEX::start(THD *thd_arg)
name= null_clex_str;
event_parse_data= NULL;
profile_options= PROFILE_NONE;
- nest_level=0 ;
- select_lex.nest_level_base= &unit;
+ nest_level= 0;
+ builtin_select.nest_level_base= &unit;
allow_sum_func.clear_all();
in_sum_func= NULL;
@@ -754,8 +768,16 @@ void LEX::start(THD *thd_arg)
win_spec= NULL;
vers_conditions.empty();
+ period_conditions.empty();
is_lex_started= TRUE;
+
+ next_is_main= FALSE;
+ next_is_down= FALSE;
+
+ wild= 0;
+ exchange= 0;
+
DBUG_VOID_RETURN;
}
@@ -1298,7 +1320,8 @@ int ORAlex(YYSTYPE *yylval, THD *thd)
int Lex_input_stream::lex_token(YYSTYPE *yylval, THD *thd)
{
int token;
-
+ const int left_paren= (int) '(';
+
if (lookahead_token >= 0)
{
/*
@@ -1315,6 +1338,8 @@ int Lex_input_stream::lex_token(YYSTYPE *yylval, THD *thd)
token= lex_one_token(yylval, thd);
add_digest_token(token, yylval);
+ SELECT_LEX *curr_sel= thd->lex->current_select;
+
switch(token) {
case WITH:
/*
@@ -1363,8 +1388,16 @@ int Lex_input_stream::lex_token(YYSTYPE *yylval, THD *thd)
}
break;
case VALUES:
- if (thd->lex->current_select->parsing_place == IN_UPDATE_ON_DUP_KEY ||
- thd->lex->current_select->parsing_place == IN_PART_FUNC)
+ if (curr_sel &&
+ (curr_sel->parsing_place == BEFORE_OPT_LIST ||
+ curr_sel->parsing_place == AFTER_LIST))
+ {
+ curr_sel->parsing_place= NO_MATTER;
+ break;
+ }
+ if (curr_sel &&
+ (curr_sel->parsing_place == IN_UPDATE_ON_DUP_KEY ||
+ curr_sel->parsing_place == IN_PART_FUNC))
return VALUE_SYM;
token= lex_one_token(yylval, thd);
add_digest_token(token, yylval);
@@ -1378,6 +1411,43 @@ int Lex_input_stream::lex_token(YYSTYPE *yylval, THD *thd)
lookahead_token= token;
return VALUES;
}
+ case VALUE_SYM:
+ if (curr_sel &&
+ (curr_sel->parsing_place == BEFORE_OPT_LIST ||
+ curr_sel->parsing_place == AFTER_LIST))
+ {
+ curr_sel->parsing_place= NO_MATTER;
+ return VALUES;
+ }
+ break;
+ case PARTITION_SYM:
+ case SELECT_SYM:
+ case UNION_SYM:
+ if (curr_sel &&
+ (curr_sel->parsing_place == BEFORE_OPT_LIST ||
+ curr_sel->parsing_place == AFTER_LIST))
+ {
+ curr_sel->parsing_place= NO_MATTER;
+ }
+ break;
+ case left_paren:
+ if (!curr_sel ||
+ curr_sel->parsing_place != BEFORE_OPT_LIST)
+ return token;
+ token= lex_one_token(yylval, thd);
+ add_digest_token(token, yylval);
+ lookahead_yylval= yylval;
+ yylval= NULL;
+ lookahead_token= token;
+ curr_sel->parsing_place= NO_MATTER;
+ if (token == LIKE)
+ return LEFT_PAREN_LIKE;
+ if (token == WITH)
+ return LEFT_PAREN_WITH;
+ if (token != left_paren && token != SELECT_SYM)
+ return LEFT_PAREN_ALT;
+ else
+ return left_paren;
break;
default:
break;
@@ -1434,6 +1504,8 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
next_state= MY_LEX_START;
return PERCENT_ORACLE_SYM;
}
+ if (c == '[' && (m_thd->variables.sql_mode & MODE_MSSQL))
+ return scan_ident_delimited(thd, &yylval->ident_cli, ']');
/* Fall through */
case MY_LEX_SKIP: // This should not happen
if (c != ')')
@@ -1612,7 +1684,7 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
return scan_ident_start(thd, &yylval->ident_cli);
case MY_LEX_USER_VARIABLE_DELIMITER: // Found quote char
- return scan_ident_delimited(thd, &yylval->ident_cli);
+ return scan_ident_delimited(thd, &yylval->ident_cli, m_tok_start[0]);
case MY_LEX_INT_OR_REAL: // Complete int or incomplete real
if (c != '.' || yyPeek() == '.')
@@ -1738,7 +1810,7 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
return(TEXT_STRING);
}
case MY_LEX_COMMENT: // Comment
- lex->select_lex.options|= OPTION_FOUND_COMMENT;
+ lex->lex_options|= OPTION_LEX_FOUND_COMMENT;
while ((c= yyGet()) != '\n' && c) ;
yyUnget(); // Safety against eof
state= MY_LEX_START; // Try again
@@ -1749,7 +1821,7 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
state= MY_LEX_CHAR; // Probable division
break;
}
- lex->select_lex.options|= OPTION_FOUND_COMMENT;
+ lex->lex_options|= OPTION_LEX_FOUND_COMMENT;
/* Reject '/' '*', since we might need to turn off the echo */
yyUnget();
@@ -1811,7 +1883,7 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd)
else
{
#ifdef WITH_WSREP
- if (WSREP(thd) && version == 99997 && thd->wsrep_exec_mode == LOCAL_STATE)
+ if (WSREP(thd) && version == 99997 && wsrep_thd_is_local(thd))
{
WSREP_DEBUG("consistency check: %s", thd->query());
thd->wsrep_consistency_check= CONSISTENCY_CHECK_DECLARED;
@@ -2184,11 +2256,12 @@ int Lex_input_stream::scan_ident_middle(THD *thd, Lex_ident_cli_st *str,
int Lex_input_stream::scan_ident_delimited(THD *thd,
- Lex_ident_cli_st *str)
+ Lex_ident_cli_st *str,
+ uchar quote_char)
{
CHARSET_INFO *const cs= thd->charset();
uint double_quotes= 0;
- uchar c, quote_char= m_tok_start[0];
+ uchar c;
DBUG_ASSERT(m_ptr == m_tok_start + 1);
while ((c= yyGet()))
@@ -2262,8 +2335,8 @@ void trim_whitespace(CHARSET_INFO *cs, LEX_CSTRING *str, size_t * prefix_length)
void st_select_lex_node::init_query_common()
{
options= 0;
- sql_cache= SQL_CACHE_UNSPECIFIED;
- linkage= UNSPECIFIED_TYPE;
+ set_linkage(UNSPECIFIED_TYPE);
+ distinct= TRUE;
no_table_names_allowed= 0;
uncacheable= 0;
}
@@ -2271,7 +2344,7 @@ void st_select_lex_node::init_query_common()
void st_select_lex_unit::init_query()
{
init_query_common();
- linkage= GLOBAL_OPTIONS_TYPE;
+ set_linkage(GLOBAL_OPTIONS_TYPE);
select_limit_cnt= HA_POS_ERROR;
offset_limit_cnt= 0;
union_distinct= 0;
@@ -2308,21 +2381,12 @@ void st_select_lex::init_query()
join= 0;
having= prep_having= where= prep_where= 0;
cond_pushed_into_where= cond_pushed_into_having= 0;
+ attach_to_conds.empty();
olap= UNSPECIFIED_OLAP_TYPE;
having_fix_field= 0;
having_fix_field_for_pushed_cond= 0;
context.select_lex= this;
context.init();
- /*
- Add the name resolution context of the current (sub)query to the
- stack of contexts for the whole query.
- TODO:
- push_context may return an error if there is no memory for a new
- element in the stack, however this method has no return value,
- thus push_context should be moved to a place where query
- initialization is checked for failure.
- */
- parent_lex->push_context(&context, parent_lex->thd->mem_root);
cond_count= between_count= with_wild= 0;
max_equal_elems= 0;
ref_pointer_array.reset();
@@ -2338,6 +2402,7 @@ void st_select_lex::init_query()
first_natural_join_processing= 1;
first_cond_optimization= 1;
parsing_place= NO_MATTER;
+ save_parsing_place= NO_MATTER;
exclude_from_table_unique_test= no_wrap_view_item= FALSE;
nest_level= 0;
link_next= 0;
@@ -2353,6 +2418,7 @@ void st_select_lex::init_query()
tvc= 0;
in_tvc= false;
versioned_tables= 0;
+ pushdown_select= 0;
}
void st_select_lex::init_select()
@@ -2368,16 +2434,14 @@ void st_select_lex::init_select()
table_join_options= 0;
in_sum_expr= with_wild= 0;
options= 0;
- sql_cache= SQL_CACHE_UNSPECIFIED;
ftfunc_list_alloc.empty();
inner_sum_func_list= 0;
ftfunc_list= &ftfunc_list_alloc;
- order_list.elements= 0;
- order_list.first= 0;
- order_list.next= &order_list.first;
+ order_list.empty();
/* Set limit and offset to default values */
select_limit= 0; /* denotes the default limit = HA_POS_ERROR */
offset_limit= 0; /* denotes the default offset = 0 */
+ is_set_query_expr_tail= false;
with_sum_func= 0;
with_all_modifier= 0;
is_correlated= 0;
@@ -2439,6 +2503,23 @@ void st_select_lex_node::add_slave(st_select_lex_node *slave_arg)
}
}
+void st_select_lex_node::link_chain_down(st_select_lex_node *first)
+{
+ st_select_lex_node *last_node;
+ st_select_lex_node *node= first;
+ do
+ {
+ last_node= node;
+ node->master= this;
+ node= node->next;
+ } while (node);
+ if ((last_node->next= slave))
+ {
+ slave->prev= &last_node->next;
+ }
+ first->prev= &slave;
+ slave= first;
+}
/*
include on level down (but do not link)
@@ -2488,7 +2569,7 @@ void st_select_lex_node::fast_exclude()
// Remove slave structure
for (; slave; slave= slave->next)
slave->fast_exclude();
-
+
}
@@ -2962,8 +3043,7 @@ void st_select_lex::print_order(String *str,
else
{
/* replace numeric reference with equivalent for ORDER constant */
- if (order->item[0]->type() == Item::INT_ITEM &&
- order->item[0]->basic_const_item())
+ if (order->item[0]->is_order_clause_position())
{
/* make it expression instead of integer constant */
str->append(STRING_WITH_LEN("''"));
@@ -3158,6 +3238,7 @@ LEX::LEX()
gtid_domain_static_buffer,
initial_gtid_domain_buffer_size,
initial_gtid_domain_buffer_size, 0);
+ unit.slave= &builtin_select;
}
@@ -3184,12 +3265,12 @@ bool LEX::can_be_merged()
// TODO: do not forget implement case when select_lex.table_list.elements==0
/* find non VIEW subqueries/unions */
- bool selects_allow_merge= (select_lex.next_select() == 0 &&
- !(select_lex.uncacheable &
+ bool selects_allow_merge= (first_select_lex()->next_select() == 0 &&
+ !(first_select_lex()->uncacheable &
UNCACHEABLE_RAND));
if (selects_allow_merge)
{
- for (SELECT_LEX_UNIT *tmp_unit= select_lex.first_inner_unit();
+ for (SELECT_LEX_UNIT *tmp_unit= first_select_lex()->first_inner_unit();
tmp_unit;
tmp_unit= tmp_unit->next_unit())
{
@@ -3206,12 +3287,12 @@ bool LEX::can_be_merged()
}
return (selects_allow_merge &&
- select_lex.group_list.elements == 0 &&
- select_lex.having == 0 &&
- select_lex.with_sum_func == 0 &&
- select_lex.table_list.elements >= 1 &&
- !(select_lex.options & SELECT_DISTINCT) &&
- select_lex.select_limit == 0);
+ first_select_lex()->group_list.elements == 0 &&
+ first_select_lex()->having == 0 &&
+ first_select_lex()->with_sum_func == 0 &&
+ first_select_lex()->table_list.elements >= 1 &&
+ !(first_select_lex()->options & SELECT_DISTINCT) &&
+ first_select_lex()->select_limit == 0);
}
@@ -3550,12 +3631,25 @@ void LEX::set_trg_event_type_for_tables()
break;
}
+ if (period_conditions.is_set())
+ {
+ switch (sql_command)
+ {
+ case SQLCOM_DELETE:
+ case SQLCOM_UPDATE:
+ case SQLCOM_REPLACE:
+ new_trg_event_map |= trg2bit(TRG_EVENT_INSERT);
+ default:
+ break;
+ }
+ }
+
/*
Do not iterate over sub-selects, only the tables in the outermost
SELECT_LEX can be modified, if any.
*/
- TABLE_LIST *tables= select_lex.get_table_list();
+ TABLE_LIST *tables= first_select_lex()->get_table_list();
while (tables)
{
@@ -3611,12 +3705,13 @@ TABLE_LIST *LEX::unlink_first_table(bool *link_to_local)
/*
and from local list if it is not empty
*/
- if ((*link_to_local= MY_TEST(select_lex.table_list.first)))
+ if ((*link_to_local= MY_TEST(first_select_lex()->table_list.first)))
{
- select_lex.context.table_list=
- select_lex.context.first_name_resolution_table= first->next_local;
- select_lex.table_list.first= first->next_local;
- select_lex.table_list.elements--; //safety
+ first_select_lex()->context.table_list=
+ first_select_lex()->context.first_name_resolution_table=
+ first->next_local;
+ first_select_lex()->table_list.first= first->next_local;
+ first_select_lex()->table_list.elements--; //safety
first->next_local= 0;
/*
Ensure that the global list has the same first table as the local
@@ -3647,7 +3742,7 @@ TABLE_LIST *LEX::unlink_first_table(bool *link_to_local)
void LEX::first_lists_tables_same()
{
- TABLE_LIST *first_table= select_lex.table_list.first;
+ TABLE_LIST *first_table= first_select_lex()->table_list.first;
if (query_tables != first_table && first_table != 0)
{
TABLE_LIST *next;
@@ -3672,6 +3767,23 @@ void LEX::first_lists_tables_same()
}
}
+void LEX::fix_first_select_number()
+{
+ SELECT_LEX *first= first_select_lex();
+ if (first && first->select_number != 1)
+ {
+ uint num= first->select_number;
+ for (SELECT_LEX *sel= all_selects_list;
+ sel;
+ sel= sel->next_select_in_list())
+ {
+ if (sel->select_number < num)
+ sel->select_number++;
+ }
+ first->select_number= 1;
+ }
+}
+
/*
Link table back that was unlinked with unlink_first_table()
@@ -3697,10 +3809,10 @@ void LEX::link_first_table_back(TABLE_LIST *first,
if (link_to_local)
{
- first->next_local= select_lex.table_list.first;
- select_lex.context.table_list= first;
- select_lex.table_list.first= first;
- select_lex.table_list.elements++; //safety
+ first->next_local= first_select_lex()->table_list.first;
+ first_select_lex()->context.table_list= first;
+ first_select_lex()->table_list.first= first;
+ first_select_lex()->table_list.elements++; //safety
}
}
}
@@ -3729,19 +3841,19 @@ void LEX::cleanup_after_one_table_open()
NOTE: all units will be connected to thd->lex->select_lex, because we
have not UNION on most upper level.
*/
- if (all_selects_list != &select_lex)
+ if (all_selects_list != first_select_lex())
{
derived_tables= 0;
- select_lex.exclude_from_table_unique_test= false;
+ first_select_lex()->exclude_from_table_unique_test= false;
/* cleunup underlying units (units of VIEW) */
- for (SELECT_LEX_UNIT *un= select_lex.first_inner_unit();
+ for (SELECT_LEX_UNIT *un= first_select_lex()->first_inner_unit();
un;
un= un->next_unit())
un->cleanup();
/* reduce all selects list to default state */
- all_selects_list= &select_lex;
+ all_selects_list= first_select_lex();
/* remove underlying units (units of VIEW) subtree */
- select_lex.cut_subtree();
+ first_select_lex()->cut_subtree();
}
}
@@ -4392,7 +4504,7 @@ void SELECT_LEX::update_used_tables()
tab->covering_keys= tab->s->keys_for_keyread;
tab->covering_keys.intersect(tab->keys_in_use_for_query);
/*
- View/derived was merged. Need to recalculate read_set/vcol_set
+ View/derived was merged. Need to recalculate read_set
bitmaps here. For example:
CREATE VIEW v1 AS SELECT f1,f2,f3 FROM t1;
SELECT f1 FROM v1;
@@ -4401,8 +4513,6 @@ void SELECT_LEX::update_used_tables()
be in the read_set.
*/
bitmap_clear_all(tab->read_set);
- if (tab->vcol_set)
- bitmap_clear_all(tab->vcol_set);
break;
}
}
@@ -4603,9 +4713,12 @@ void st_select_lex::set_explain_type(bool on_the_fly)
using_materialization= TRUE;
}
- if (&master_unit()->thd->lex->select_lex == this)
+ if (master_unit()->thd->lex->first_select_lex() == this)
{
- type= is_primary ? "PRIMARY" : "SIMPLE";
+ if (pushdown_select)
+ type= pushed_select_text;
+ else
+ type= is_primary ? "PRIMARY" : "SIMPLE";
}
else
{
@@ -4614,7 +4727,11 @@ void st_select_lex::set_explain_type(bool on_the_fly)
/* If we're a direct child of a UNION, we're the first sibling there */
if (linkage == DERIVED_TABLE_TYPE)
{
- if (is_uncacheable & UNCACHEABLE_DEPENDENT)
+ bool is_pushed_master_unit= master_unit()->derived &&
+ master_unit()->derived->pushdown_derived;
+ if (is_pushed_master_unit)
+ type= pushed_derived_text;
+ else if (is_uncacheable & UNCACHEABLE_DEPENDENT)
type= "LATERAL DERIVED";
else
type= "DERIVED";
@@ -4798,8 +4915,8 @@ bool LEX::save_prep_leaf_tables()
Query_arena *arena= thd->stmt_arena, backup;
arena= thd->activate_stmt_arena_if_needed(&backup);
//It is used for DETETE/UPDATE so top level has only one SELECT
- DBUG_ASSERT(select_lex.next_select() == NULL);
- bool res= select_lex.save_prep_leaf_tables(thd);
+ DBUG_ASSERT(first_select_lex()->next_select() == NULL);
+ bool res= first_select_lex()->save_prep_leaf_tables(thd);
if (arena)
thd->restore_active_arena(arena, &backup);
@@ -5130,8 +5247,13 @@ bool LEX::is_partition_management() const
SELECT_LEX *LEX::exclude_last_select()
{
- DBUG_ENTER("SELECT_LEX::exclude_last_select");
- SELECT_LEX *exclude= current_select;
+ return exclude_not_first_select(current_select);
+}
+
+SELECT_LEX *LEX::exclude_not_first_select(SELECT_LEX *exclude)
+{
+ DBUG_ENTER("LEX::exclude_not_first_select");
+ DBUG_PRINT("enter", ("exclude %p #%u", exclude, exclude->select_number));
SELECT_LEX_UNIT *unit= exclude->master_unit();
SELECT_LEX *sl;
DBUG_ASSERT(unit->first_select() != exclude);
@@ -5142,89 +5264,259 @@ SELECT_LEX *LEX::exclude_last_select()
DBUG_PRINT("info", ("excl: %p unit: %p prev: %p", exclude, unit, sl));
if (!sl)
DBUG_RETURN(NULL);
- DBUG_ASSERT(exclude->next_select() == NULL);
- exclude->exclude_from_tree();
+ DBUG_ASSERT(&sl->next == exclude->prev);
+
+ exclude->prev= NULL;
+
current_select= sl;
DBUG_RETURN(exclude);
}
-/**
- Put given (new) SELECT_LEX level below after currect (last) SELECT
+SELECT_LEX_UNIT *LEX::alloc_unit()
+{
+ SELECT_LEX_UNIT *unit;
+ DBUG_ENTER("LEX::alloc_unit");
+ if (!(unit= new (thd->mem_root) SELECT_LEX_UNIT()))
+ DBUG_RETURN(NULL);
+
+ unit->init_query();
+ /* TODO: reentrant problem */
+ unit->thd= thd;
+ unit->link_next= 0;
+ unit->link_prev= 0;
+ /* TODO: remove return_to */
+ unit->return_to= NULL;
+ DBUG_RETURN(unit);
+}
+
- LAST SELECT -> DUMMY SELECT
- |
- V
- NEW UNIT
- |
- V
- NEW SELECT
+SELECT_LEX *LEX::alloc_select(bool select)
+{
+ SELECT_LEX *select_lex;
+ DBUG_ENTER("LEX::alloc_select");
+ if (!(select_lex= new (thd->mem_root) SELECT_LEX()))
+ DBUG_RETURN(NULL);
+ DBUG_PRINT("info", ("Allocate select: %p #%u statement lex: %p",
+ select_lex, thd->lex->stmt_lex->current_select_number,
+ thd->lex->stmt_lex));
+ /*
+ TODO: move following init to constructor when we get rid of builtin
+ select
+ */
+ select_lex->select_number= ++thd->lex->stmt_lex->current_select_number;
+ select_lex->parent_lex= this; /* Used in init_query. */
+ select_lex->init_query();
+ if (select)
+ select_lex->init_select();
+ select_lex->nest_level_base= &this->unit;
+ select_lex->include_global((st_select_lex_node**)&all_selects_list);
+ select_lex->context.resolve_in_select_list= TRUE;
+ DBUG_RETURN(select_lex);
+}
- SELECT (*LAST*) ... FROM (SELECT (*NEW*) ... )
+SELECT_LEX_UNIT *
+LEX::create_unit(SELECT_LEX *first_sel)
+{
+ SELECT_LEX_UNIT *unit;
+ DBUG_ENTER("LEX::create_unit");
- @param nselect Select to put one level below
+ if (first_sel->master_unit())
+ DBUG_RETURN(first_sel->master_unit());
- @retval TRUE Error
- @retval FALSE OK
-*/
+ if (!(unit= alloc_unit()))
+ DBUG_RETURN(NULL);
-bool LEX::add_unit_in_brackets(SELECT_LEX *nselect)
+ unit->register_select_chain(first_sel);
+ if (first_sel->next_select())
+ {
+ unit->reset_distinct();
+ DBUG_ASSERT(!unit->fake_select_lex);
+ if (unit->add_fake_select_lex(thd))
+ DBUG_RETURN(NULL);
+ }
+ DBUG_RETURN(unit);
+}
+
+SELECT_LEX_UNIT *
+SELECT_LEX::attach_selects_chain(SELECT_LEX *first_sel,
+ Name_resolution_context *context)
{
- DBUG_ENTER("LEX::add_unit_in_brackets");
- bool distinct= nselect->master_unit()->union_distinct == nselect;
- bool rc= add_select_to_union_list(distinct, nselect->linkage, 0);
- if (rc)
- DBUG_RETURN(TRUE);
- SELECT_LEX* dummy_select= current_select;
- dummy_select->automatic_brackets= TRUE;
- dummy_select->linkage= nselect->linkage;
+ SELECT_LEX_UNIT *unit;
+ DBUG_ENTER("SELECT_LEX::attach_select_chain");
+
+ if (!(unit= parent_lex->alloc_unit()))
+ DBUG_RETURN(NULL);
+
+ unit->register_select_chain(first_sel);
+ register_unit(unit, context);
+ if (first_sel->next_select())
+ {
+ unit->reset_distinct();
+ DBUG_ASSERT(!unit->fake_select_lex);
+ if (unit->add_fake_select_lex(parent_lex->thd))
+ DBUG_RETURN(NULL);
+ }
+
+ DBUG_RETURN(unit);
+}
+
+SELECT_LEX *
+LEX::wrap_unit_into_derived(SELECT_LEX_UNIT *unit)
+{
+ SELECT_LEX *wrapping_sel;
+ Table_ident *ti;
+ DBUG_ENTER("LEX::wrap_unit_into_derived");
+
+ if (!(wrapping_sel= alloc_select(TRUE)))
+ DBUG_RETURN(NULL);
+ Name_resolution_context *context= &wrapping_sel->context;
+ context->init();
+ wrapping_sel->automatic_brackets= FALSE;
+
+ wrapping_sel->register_unit(unit, context);
/* stuff dummy SELECT * FROM (...) */
+
+ if (push_select(wrapping_sel)) // for Items & TABLE_LIST
+ DBUG_RETURN(NULL);
+
+ /* add SELECT list*/
+ {
+ Item *item= new (thd->mem_root)
+ Item_field(thd, context, NULL, NULL, &star_clex_str);
+ if (item == NULL)
+ goto err;
+ if (add_item_to_list(thd, item))
+ goto err;
+ (wrapping_sel->with_wild)++;
+ }
+
+ unit->first_select()->set_linkage(DERIVED_TABLE_TYPE);
+
+ ti= new (thd->mem_root) Table_ident(unit);
+ if (ti == NULL)
+ goto err;
+ {
+ TABLE_LIST *table_list;
+ LEX_CSTRING alias;
+ if (wrapping_sel->make_unique_derived_name(thd, &alias))
+ goto err;
+
+ if (!(table_list= wrapping_sel->add_table_to_list(thd, ti, &alias,
+ 0, TL_READ,
+ MDL_SHARED_READ)))
+ goto err;
+
+ context->resolve_in_table_list_only(table_list);
+ wrapping_sel->add_joined_table(table_list);
+ }
+
+ pop_select();
+
+ derived_tables|= DERIVED_SUBQUERY;
+
+ DBUG_RETURN(wrapping_sel);
+
+err:
+ pop_select();
+ DBUG_RETURN(NULL);
+}
+
+SELECT_LEX *LEX::wrap_select_chain_into_derived(SELECT_LEX *sel)
+{
+ SELECT_LEX *dummy_select;
+ SELECT_LEX_UNIT *unit;
+ Table_ident *ti;
+ DBUG_ENTER("LEX::wrap_select_chain_into_derived");
+
+ if (!(dummy_select= alloc_select(TRUE)))
+ DBUG_RETURN(NULL);
Name_resolution_context *context= &dummy_select->context;
- context->init();
+ dummy_select->automatic_brackets= FALSE;
+ sel->distinct= TRUE; // First select has not this attribute (safety)
+
+ if (!(unit= dummy_select->attach_selects_chain(sel, context)))
+ DBUG_RETURN(NULL);
+
+ /* stuff dummy SELECT * FROM (...) */
+
+ if (push_select(dummy_select)) // for Items & TABLE_LIST
+ DBUG_RETURN(NULL);
/* add SELECT list*/
- Item *item= new (thd->mem_root)
- Item_field(thd, context, NULL, NULL, &star_clex_str);
- if (unlikely(item == NULL))
- DBUG_RETURN(TRUE);
- if (unlikely(add_item_to_list(thd, item)))
- DBUG_RETURN(TRUE);
- (dummy_select->with_wild)++;
+ {
+ Item *item= new (thd->mem_root)
+ Item_field(thd, context, NULL, NULL, &star_clex_str);
+ if (item == NULL)
+ goto err;
+ if (add_item_to_list(thd, item))
+ goto err;
+ (dummy_select->with_wild)++;
+ }
- rc= mysql_new_select(this, 1, nselect);
- nselect->linkage= DERIVED_TABLE_TYPE;
- DBUG_ASSERT(nselect->outer_select() == dummy_select);
+ sel->set_linkage(DERIVED_TABLE_TYPE);
- current_select= dummy_select;
- current_select->nest_level--;
+ ti= new (thd->mem_root) Table_ident(unit);
+ if (ti == NULL)
+ goto err;
+ {
+ TABLE_LIST *table_list;
+ LEX_CSTRING alias;
+ if (dummy_select->make_unique_derived_name(thd, &alias))
+ goto err;
- SELECT_LEX_UNIT *unit= nselect->master_unit();
- Table_ident *ti= new (thd->mem_root) Table_ident(unit);
- if (unlikely(ti == NULL))
- DBUG_RETURN(TRUE);
- char buff[10];
- LEX_CSTRING alias;
- alias.length= my_snprintf(buff, sizeof(buff),
- "__%u", dummy_select->select_number);
- alias.str= thd->strmake(buff, alias.length);
- if (unlikely(!alias.str))
- DBUG_RETURN(TRUE);
+ if (!(table_list= dummy_select->add_table_to_list(thd, ti, &alias,
+ 0, TL_READ,
+ MDL_SHARED_READ)))
+ goto err;
- TABLE_LIST *table_list;
- if (unlikely(!(table_list=
- dummy_select->add_table_to_list(thd, ti, &alias,
- 0, TL_READ,
- MDL_SHARED_READ))))
- DBUG_RETURN(TRUE);
- context->resolve_in_table_list_only(table_list);
- dummy_select->add_joined_table(table_list);
+ context->resolve_in_table_list_only(table_list);
+ dummy_select->add_joined_table(table_list);
+ }
+
+ pop_select();
derived_tables|= DERIVED_SUBQUERY;
- current_select= nselect;
- current_select->nest_level++;
- DBUG_RETURN(rc);
+ DBUG_RETURN(dummy_select);
+
+err:
+ pop_select();
+ DBUG_RETURN(NULL);
+}
+
+bool LEX::push_context(Name_resolution_context *context)
+{
+ DBUG_ENTER("LEX::push_context");
+ DBUG_PRINT("info", ("Context: %p Select: %p (%d)",
+ context, context->select_lex,
+ (context->select_lex ?
+ context->select_lex->select_number:
+ 0)));
+ bool res= context_stack.push_front(context, thd->mem_root);
+ DBUG_RETURN(res);
+}
+
+
+SELECT_LEX *LEX::create_priority_nest(SELECT_LEX *first_in_nest)
+{
+ DBUG_ENTER("LEX::create_priority_nest");
+ DBUG_ASSERT(first_in_nest->first_nested);
+ enum sub_select_type wr_unit_type= first_in_nest->get_linkage();
+ bool wr_distinct= first_in_nest->distinct;
+ SELECT_LEX *attach_to= first_in_nest->first_nested;
+ attach_to->cut_next();
+ SELECT_LEX *wrapper= wrap_select_chain_into_derived(first_in_nest);
+ if (wrapper)
+ {
+ first_in_nest->first_nested= NULL;
+ wrapper->set_linkage_and_distinct(wr_unit_type, wr_distinct);
+ wrapper->first_nested= attach_to->first_nested;
+ wrapper->set_master_unit(attach_to->master_unit());
+ attach_to->link_neighbour(wrapper);
+ }
+ DBUG_RETURN(wrapper);
}
@@ -5239,7 +5531,7 @@ bool LEX::add_unit_in_brackets(SELECT_LEX *nselect)
void LEX::check_automatic_up(enum sub_select_type type)
{
if (type != INTERSECT_TYPE &&
- current_select->linkage == INTERSECT_TYPE &&
+ current_select->get_linkage() == INTERSECT_TYPE &&
current_select->outer_select() &&
current_select->outer_select()->automatic_brackets)
{
@@ -5689,10 +5981,17 @@ bool LEX::sp_for_loop_implicit_cursor_statement(THD *thd,
bounds->m_index->sp_lex_in_use= true;
sphead->reset_lex(thd, bounds->m_index);
DBUG_ASSERT(thd->lex != this);
- if (unlikely(!(item=
- new (thd->mem_root) Item_field(thd,
- thd->lex->current_context(),
- NullS, NullS, &name))))
+ /*
+ We pass NULL as Name_resolution_context here.
+ It's OK, fix_fields() will not be called for this Item_field created.
+ Item_field is only needed for LEX::sp_for_loop_cursor_declarations()
+ and is used to transfer the loop index variable name, "rec" in this example:
+ FOR rec IN (SELECT * FROM t1)
+ DO
+ SELECT rec.a, rec.b;
+ END FOR;
+ */
+ if (!(item= new (thd->mem_root) Item_field(thd, NULL, NullS, NullS, &name)))
return true;
bounds->m_index->set_item_and_free_list(item, NULL);
if (thd->lex->sphead->restore_lex(thd))
@@ -5799,10 +6098,22 @@ bool LEX::sp_for_loop_intrange_declarations(THD *thd, Lex_for_loop_st *loop,
const LEX_CSTRING *index,
const Lex_for_loop_bounds_st &bounds)
{
- if (unlikely(!(loop->m_index=
- bounds.m_index->
- sp_add_for_loop_variable(thd, index,
- bounds.m_index->get_item()))))
+ Item *item;
+ if ((item= bounds.m_index->get_item())->type() == Item::FIELD_ITEM)
+ {
+ // We're here is the lower bound is unknown identifier
+ my_error(ER_SP_UNDECLARED_VAR, MYF(0), item->full_name());
+ return true;
+ }
+ if ((item= bounds.m_target_bound->get_item())->type() == Item::FIELD_ITEM)
+ {
+ // We're here is the upper bound is unknown identifier
+ my_error(ER_SP_UNDECLARED_VAR, MYF(0), item->full_name());
+ return true;
+ }
+ if (!(loop->m_index=
+ bounds.m_index->sp_add_for_loop_variable(thd, index,
+ bounds.m_index->get_item())))
return true;
if (unlikely(!(loop->m_target_bound=
bounds.m_target_bound->
@@ -6167,13 +6478,14 @@ sp_name *LEX::make_sp_name(THD *thd, const LEX_CSTRING *name1,
sp_head *LEX::make_sp_head(THD *thd, const sp_name *name,
- const Sp_handler *sph)
+ const Sp_handler *sph,
+ enum_sp_aggregate_type agg_type)
{
sp_package *package= get_sp_package();
sp_head *sp;
/* Order is important here: new - reset - init */
- if (likely((sp= new sp_head(package, sph))))
+ if (likely((sp= new sp_head(package, sph, agg_type))))
{
sp->reset_thd_mem_root(thd);
sp->init(this);
@@ -6196,7 +6508,8 @@ sp_head *LEX::make_sp_head(THD *thd, const sp_name *name,
sp_head *LEX::make_sp_head_no_recursive(THD *thd, const sp_name *name,
- const Sp_handler *sph)
+ const Sp_handler *sph,
+ enum_sp_aggregate_type agg_type)
{
sp_package *package= thd->lex->get_sp_package();
/*
@@ -6214,13 +6527,13 @@ sp_head *LEX::make_sp_head_no_recursive(THD *thd, const sp_name *name,
(package &&
(sph == &sp_handler_package_procedure ||
sph == &sp_handler_package_function)))
- return make_sp_head(thd, name, sph);
+ return make_sp_head(thd, name, sph, agg_type);
my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), sph->type_str());
return NULL;
}
-bool LEX::sp_body_finalize_procedure(THD *thd)
+bool LEX::sp_body_finalize_routine(THD *thd)
{
if (sphead->check_unresolved_goto())
return true;
@@ -6230,22 +6543,61 @@ bool LEX::sp_body_finalize_procedure(THD *thd)
}
+bool LEX::sp_body_finalize_procedure(THD *thd)
+{
+ return sphead->check_group_aggregate_instructions_forbid() ||
+ sp_body_finalize_routine(thd);
+}
+
+
+bool LEX::sp_body_finalize_procedure_standalone(THD *thd,
+ const sp_name *end_name)
+{
+ return sp_body_finalize_procedure(thd) ||
+ sphead->check_standalone_routine_end_name(end_name);
+}
+
+
bool LEX::sp_body_finalize_function(THD *thd)
{
- if (sphead->is_not_allowed_in_function("function"))
+ if (sphead->is_not_allowed_in_function("function") ||
+ sphead->check_group_aggregate_instructions_function())
return true;
if (!(sphead->m_flags & sp_head::HAS_RETURN))
{
my_error(ER_SP_NORETURN, MYF(0), ErrConvDQName(sphead).ptr());
return true;
}
- if (sp_body_finalize_procedure(thd))
+ if (sp_body_finalize_routine(thd))
return true;
(void) is_native_function_with_warn(thd, &sphead->m_name);
return false;
}
+bool LEX::sp_body_finalize_trigger(THD *thd)
+{
+ return sphead->is_not_allowed_in_function("trigger") ||
+ sp_body_finalize_procedure(thd);
+}
+
+
+bool LEX::sp_body_finalize_event(THD *thd)
+{
+ event_parse_data->body_changed= true;
+ return sp_body_finalize_procedure(thd);
+}
+
+
+bool LEX::stmt_create_stored_function_finalize_standalone(const sp_name *end_name)
+{
+ if (sphead->check_standalone_routine_end_name(end_name))
+ return true;
+ stmt_create_routine_finalize();
+ return false;
+}
+
+
bool LEX::sp_block_with_exceptions_finalize_declarations(THD *thd)
{
/*
@@ -6537,7 +6889,7 @@ bool LEX::maybe_start_compound_statement(THD *thd)
{
if (!sphead)
{
- if (!make_sp_head(thd, NULL, &sp_handler_procedure))
+ if (!make_sp_head(thd, NULL, &sp_handler_procedure, DEFAULT_AGGREGATE))
return true;
sphead->set_suid(SP_IS_NOT_SUID);
sphead->set_body_start(thd, thd->m_parser_state->m_lip.get_cpp_ptr());
@@ -6747,7 +7099,6 @@ Item_param *LEX::add_placeholder(THD *thd, const LEX_CSTRING *name,
my_error(ER_VIEW_SELECT_VARIABLE, MYF(0));
return NULL;
}
-
Query_fragment pos(thd, sphead, start, end);
Item_param *item= new (thd->mem_root) Item_param(thd, name,
pos.pos(), pos.length());
@@ -6778,6 +7129,38 @@ bool LEX::add_resignal_statement(THD *thd, const sp_condition_value *v)
}
+/*
+ Make an Item when an identifier is found in the FOR loop bounds:
+ FOR rec IN cursor
+ FOR var IN var1 .. xxx
+ FOR var IN row1.field1 .. xxx
+ When we parse the first expression after the "IN" keyword,
+ we don't know yet if it's a cursor name, or a scalar SP variable name,
+ or a field of a ROW SP variable. Here we create Item_field to remember
+ the fully qualified name. Later sp_for_loop_cursor_declarations()
+ detects how to treat this name properly.
+*/
+Item *LEX::create_item_for_loop_bound(THD *thd,
+ const LEX_CSTRING *a,
+ const LEX_CSTRING *b,
+ const LEX_CSTRING *c)
+{
+ /*
+ Pass NULL as the name resolution context.
+ This is OK, fix_fields() won't be called for this Item_field.
+ */
+ return new (thd->mem_root) Item_field(thd, NULL, a->str, b->str, c);
+}
+
+
+bool LEX::check_expr_allows_fields_or_error(THD *thd, const char *name) const
+{
+ if (select_stack_top > 0)
+ return false; // OK, fields are allowed
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), name, thd->where);
+ return true; // Error, fields are not allowed
+}
+
Item *LEX::create_item_ident_nospvar(THD *thd,
const Lex_ident_sys_st *a,
const Lex_ident_sys_st *b)
@@ -6800,12 +7183,11 @@ Item *LEX::create_item_ident_nospvar(THD *thd,
my_error(ER_TABLENAME_NOT_ALLOWED_HERE, MYF(0), a->str, thd->where);
return NULL;
}
- if ((current_select->parsing_place != IN_HAVING) ||
- (current_select->get_in_sum_expr() > 0))
- return new (thd->mem_root) Item_field(thd, current_context(),
- NullS, a->str, b);
- return new (thd->mem_root) Item_ref(thd, current_context(),
- NullS, a->str, b);
+
+ if (current_select->parsing_place == FOR_LOOP_BOUND)
+ return create_item_for_loop_bound(thd, &null_clex_str, a, b);
+
+ return create_item_ident_field(thd, NullS, a->str, b);
}
@@ -7017,12 +7399,11 @@ Item *LEX::create_item_ident(THD *thd,
my_error(ER_TABLENAME_NOT_ALLOWED_HERE, MYF(0), b->str, thd->where);
return NULL;
}
- if (current_select->parsing_place != IN_HAVING ||
- current_select->get_in_sum_expr() > 0)
- return new (thd->mem_root) Item_field(thd, current_context(),
- schema, b->str, c);
- return new (thd->mem_root) Item_ref(thd, current_context(),
- schema, b->str, c);
+
+ if (current_select->parsing_place == FOR_LOOP_BOUND)
+ return create_item_for_loop_bound(thd, &null_clex_str, b, c);
+
+ return create_item_ident_field(thd, schema, b->str, c);
}
@@ -7055,11 +7436,9 @@ Item *LEX::create_item_limit(THD *thd, const Lex_ident_cli_st *ca)
#endif
safe_to_cache_query= 0;
- if (unlikely(item->type() != Item::INT_ITEM))
- {
- my_error(ER_WRONG_SPVAR_TYPE_IN_LIMIT, MYF(0));
+ if (!item->is_valid_limit_clause_variable_with_error())
return NULL;
- }
+
item->limit_clause_param= true;
return item;
}
@@ -7089,11 +7468,8 @@ Item *LEX::create_item_limit(THD *thd,
if (unlikely(!(item= create_item_spvar_row_field(thd, rh, &sa, &sb, spv,
ca->pos(), cb->end()))))
return NULL;
- if (unlikely(item->type() != Item::INT_ITEM))
- {
- my_error(ER_WRONG_SPVAR_TYPE_IN_LIMIT, MYF(0));
+ if (!item->is_valid_limit_clause_variable_with_error())
return NULL;
- }
item->limit_clause_param= true;
return item;
}
@@ -7113,15 +7489,20 @@ bool LEX::set_user_variable(THD *thd, const LEX_CSTRING *name, Item *val)
}
-Item *LEX::create_item_ident_nosp(THD *thd, Lex_ident_sys_st *name)
+Item *LEX::create_item_ident_field(THD *thd, const char *db,
+ const char *table,
+ const Lex_ident_sys_st *name)
{
+ if (check_expr_allows_fields_or_error(thd, name->str))
+ return NULL;
+
if (current_select->parsing_place != IN_HAVING ||
current_select->get_in_sum_expr() > 0)
return new (thd->mem_root) Item_field(thd, current_context(),
- NullS, NullS, name);
+ db, table, name);
return new (thd->mem_root) Item_ref(thd, current_context(),
- NullS, NullS, name);
+ db, table, name);
}
@@ -7171,6 +7552,11 @@ Item *LEX::create_item_ident_sp(THD *thd, Lex_ident_sys_st *name,
if (lex_string_eq(name, STRING_WITH_LEN("SQLERRM")))
return new (thd->mem_root) Item_func_sqlerrm(thd);
}
+
+ if (current_select->parsing_place == FOR_LOOP_BOUND)
+ return create_item_for_loop_bound(thd, &null_clex_str, &null_clex_str,
+ name);
+
return create_item_ident_nosp(thd, name);
}
@@ -7442,46 +7828,79 @@ void binlog_unsafe_map_init()
/**
@brief
- Finding fiels that are used in the GROUP BY of this st_select_lex
+ Collect fiels that are used in the GROUP BY of this st_select_lex
@param thd The thread handle
@details
- This method looks through the fields which are used in the GROUP BY of this
- st_select_lex and saves this fields.
+ This method looks through the fields that are used in the GROUP BY of this
+ st_select_lex and saves info on these fields.
*/
-void st_select_lex::collect_grouping_fields(THD *thd,
- ORDER *grouping_list)
+void st_select_lex::collect_grouping_fields_for_derived(THD *thd,
+ ORDER *grouping_list)
{
grouping_tmp_fields.empty();
List_iterator<Item> li(join->fields_list);
Item *item= li++;
- for (uint i= 0; i < master_unit()->derived->table->s->fields; i++, (item=li++))
+ for (uint i= 0; i < master_unit()->derived->table->s->fields;
+ i++, (item=li++))
{
for (ORDER *ord= grouping_list; ord; ord= ord->next)
{
if ((*ord->item)->eq((Item*)item, 0))
{
- Grouping_tmp_field *grouping_tmp_field=
- new Grouping_tmp_field(master_unit()->derived->table->field[i], item);
+ Field_pair *grouping_tmp_field=
+ new Field_pair(master_unit()->derived->table->field[i], item);
grouping_tmp_fields.push_back(grouping_tmp_field);
}
}
}
}
+
+/**
+ Collect fields that are used in the GROUP BY of this SELECT
+*/
+
+bool st_select_lex::collect_grouping_fields(THD *thd)
+{
+ grouping_tmp_fields.empty();
+
+ for (ORDER *ord= group_list.first; ord; ord= ord->next)
+ {
+ Item *item= *ord->item;
+ if (item->type() != Item::FIELD_ITEM &&
+ !(item->type() == Item::REF_ITEM &&
+ ((((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF) ||
+ (((Item_ref *) item)->ref_type() == Item_ref::REF))))
+ continue;
+
+ Field_pair *grouping_tmp_field=
+ new Field_pair(((Item_field *)item->real_item())->field, item);
+ if (grouping_tmp_fields.push_back(grouping_tmp_field, thd->mem_root))
+ return false;
+ }
+ if (grouping_tmp_fields.elements)
+ return false;
+ return true;
+}
+
+
/**
@brief
For a condition check possibility of exraction a formula over grouping fields
-
- @param cond The condition whose subformulas are to be analyzed
+
+ @param thd The thread handle
+ @param cond The condition whose subformulas are to be analyzed
+ @param checker The checker callback function to be applied to the nodes
+ of the tree of the object
@details
This method traverses the AND-OR condition cond and for each subformula of
the condition it checks whether it can be usable for the extraction of a
condition over the grouping fields of this select. The method uses
- the call-back parameter check_processor to ckeck whether a primary formula
+ the call-back parameter checker to check whether a primary formula
depends only on grouping fields.
The subformulas that are not usable are marked with the flag NO_EXTRACTION_FL.
The subformulas that can be entierly extracted are marked with the flag
@@ -7495,13 +7914,17 @@ void st_select_lex::collect_grouping_fields(THD *thd,
*/
void
-st_select_lex::check_cond_extraction_for_grouping_fields(Item *cond,
- TABLE_LIST *derived)
+st_select_lex::check_cond_extraction_for_grouping_fields(THD *thd, Item *cond)
{
+ if (cond->get_extraction_flag() == NO_EXTRACTION_FL)
+ return;
cond->clear_extraction_flag();
if (cond->type() == Item::COND_ITEM)
{
- bool and_cond= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC;
+ Item_cond_and *and_cond=
+ (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) ?
+ ((Item_cond_and*) cond) : 0;
+
List<Item> *arg_list= ((Item_cond*) cond)->argument_list();
List_iterator<Item> li(*arg_list);
uint count= 0; // to count items not containing NO_EXTRACTION_FL
@@ -7509,7 +7932,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(Item *cond,
Item *item;
while ((item=li++))
{
- check_cond_extraction_for_grouping_fields(item, derived);
+ check_cond_extraction_for_grouping_fields(thd, item);
if (item->get_extraction_flag() != NO_EXTRACTION_FL)
{
count++;
@@ -7522,7 +7945,9 @@ st_select_lex::check_cond_extraction_for_grouping_fields(Item *cond,
if ((and_cond && count == 0) || item)
cond->set_extraction_flag(NO_EXTRACTION_FL);
if (count_full == arg_list->elements)
+ {
cond->set_extraction_flag(FULL_EXTRACTION_FL);
+ }
if (cond->get_extraction_flag() != 0)
{
li.rewind();
@@ -7558,7 +7983,7 @@ st_select_lex::check_cond_extraction_for_grouping_fields(Item *cond,
to figure out whether a subformula depends only on these fields or not.
@note
The built condition C is always implied by the condition cond
- (cond => C). The method tries to build the most restictive such
+ (cond => C). The method tries to build the least restictive such
condition (i.e. for any other condition C' such that cond => C'
we have C => C').
@note
@@ -7634,6 +8059,140 @@ Item *st_select_lex::build_cond_for_grouping_fields(THD *thd, Item *cond,
}
+bool st_select_lex::set_nest_level(int new_nest_level)
+{
+ DBUG_ENTER("st_select_lex::set_nest_level");
+ DBUG_PRINT("enter", ("select #%d %p nest level: %d",
+ select_number, this, new_nest_level));
+ if (new_nest_level > (int) MAX_SELECT_NESTING)
+ {
+ my_error(ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ nest_level= new_nest_level;
+ new_nest_level++;
+ for (SELECT_LEX_UNIT *u= first_inner_unit(); u; u= u->next_unit())
+ {
+ if (u->set_nest_level(new_nest_level))
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+bool st_select_lex_unit::set_nest_level(int new_nest_level)
+{
+ DBUG_ENTER("st_select_lex_unit::set_nest_level");
+ for(SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
+ {
+ if (sl->set_nest_level(new_nest_level))
+ DBUG_RETURN(TRUE);
+ }
+ if (fake_select_lex &&
+ fake_select_lex->set_nest_level(new_nest_level))
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
+bool st_select_lex::check_parameters(SELECT_LEX *main_select)
+{
+ DBUG_ENTER("st_select_lex::check_parameters");
+ DBUG_PRINT("enter", ("select #%d %p nest level: %d",
+ select_number, this, nest_level));
+
+
+ if ((options & OPTION_PROCEDURE_CLAUSE) &&
+ (!parent_lex->selects_allow_procedure ||
+ next_select() != NULL ||
+ this != master_unit()->first_select() ||
+ nest_level != 0))
+ {
+ my_error(ER_CANT_USE_OPTION_HERE, MYF(0), "PROCEDURE");
+ DBUG_RETURN(TRUE);
+ }
+
+ if ((options & SELECT_HIGH_PRIORITY) && this != main_select)
+ {
+ my_error(ER_CANT_USE_OPTION_HERE, MYF(0), "HIGH_PRIORITY");
+ DBUG_RETURN(TRUE);
+ }
+ if ((options & OPTION_BUFFER_RESULT) && this != main_select)
+ {
+ my_error(ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_BUFFER_RESULT");
+ DBUG_RETURN(TRUE);
+ }
+ if ((options & OPTION_FOUND_ROWS) && this != main_select)
+ {
+ my_error(ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_CALC_FOUND_ROWS");
+ DBUG_RETURN(TRUE);
+ }
+ if (options & OPTION_NO_QUERY_CACHE)
+ {
+ /*
+ Allow this flag only on the first top-level SELECT statement, if
+ SQL_CACHE wasn't specified.
+ */
+ if (this != main_select)
+ {
+ my_error(ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_NO_CACHE");
+ DBUG_RETURN(TRUE);
+ }
+ if (parent_lex->sql_cache == LEX::SQL_CACHE)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "SQL_CACHE", "SQL_NO_CACHE");
+ DBUG_RETURN(TRUE);
+ }
+ parent_lex->safe_to_cache_query=0;
+ parent_lex->sql_cache= LEX::SQL_NO_CACHE;
+ }
+ if (options & OPTION_TO_QUERY_CACHE)
+ {
+ /*
+ Allow this flag only on the first top-level SELECT statement, if
+ SQL_NO_CACHE wasn't specified.
+ */
+ if (this != main_select)
+ {
+ my_error(ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_CACHE");
+ DBUG_RETURN(TRUE);
+ }
+ if (parent_lex->sql_cache == LEX::SQL_NO_CACHE)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "SQL_NO_CACHE", "SQL_CACHE");
+ DBUG_RETURN(TRUE);
+ }
+ parent_lex->safe_to_cache_query=1;
+ parent_lex->sql_cache= LEX::SQL_CACHE;
+ }
+
+ for (SELECT_LEX_UNIT *u= first_inner_unit(); u; u= u->next_unit())
+ {
+ if (u->check_parameters(main_select))
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+bool st_select_lex_unit::check_parameters(SELECT_LEX *main_select)
+{
+ for(SELECT_LEX *sl= first_select(); sl; sl= sl->next_select())
+ {
+ if (sl->check_parameters(main_select))
+ return TRUE;
+ }
+ return fake_select_lex && fake_select_lex->check_parameters(main_select);
+}
+
+
+bool LEX::check_main_unit_semantics()
+{
+ if (unit.set_nest_level(0) ||
+ unit.check_parameters(first_select_lex()))
+ return TRUE;
+ return FALSE;
+}
+
int set_statement_var_if_exists(THD *thd, const char *var_name,
size_t var_name_length, ulonglong value)
{
@@ -7684,14 +8243,23 @@ bool LEX::sp_add_cfetch(THD *thd, const LEX_CSTRING *name)
}
+bool LEX::sp_add_agg_cfetch()
+{
+ sphead->m_flags|= sp_head::HAS_AGGREGATE_INSTR;
+ sp_instr_agg_cfetch *i=
+ new (thd->mem_root) sp_instr_agg_cfetch(sphead->instructions(), spcont);
+ return i == NULL || sphead->add_instr(i);
+}
+
+
bool LEX::create_or_alter_view_finalize(THD *thd, Table_ident *table_ident)
{
sql_command= SQLCOM_CREATE_VIEW;
/* first table in list is target VIEW name */
- if (unlikely(!select_lex.add_table_to_list(thd, table_ident, NULL,
+ if (!first_select_lex()->add_table_to_list(thd, table_ident, NULL,
TL_OPTION_UPDATING,
TL_IGNORE,
- MDL_EXCLUSIVE)))
+ MDL_EXCLUSIVE))
return true;
query_tables->open_strategy= TABLE_LIST::OPEN_STUB;
return false;
@@ -7839,6 +8407,7 @@ bool LEX::create_package_finalize(THD *thd,
exp ? ErrConvDQName(name).ptr() : name->m_name.str);
return true;
}
+ // TODO: reuse code in LEX::create_package_finalize and sp_head::set_stmt_end
sphead->m_body.length= body_end - body_start;
if (unlikely(!(sphead->m_body.str= thd->strmake(body_start,
sphead->m_body.length))))
@@ -7853,7 +8422,8 @@ bool LEX::create_package_finalize(THD *thd,
sphead->restore_thd_mem_root(thd);
sp_package *pkg= sphead->get_package();
DBUG_ASSERT(pkg);
- return pkg->validate_after_parser(thd);
+ return sphead->check_group_aggregate_instructions_forbid() ||
+ pkg->validate_after_parser(thd);
}
@@ -8245,14 +8815,1610 @@ bool LEX::tvc_finalize()
bool LEX::tvc_finalize_derived()
{
derived_tables|= DERIVED_SUBQUERY;
- if (unlikely(!expr_allows_subselect || sql_command == (int)SQLCOM_PURGE))
+ if (unlikely(!expr_allows_subselect))
{
thd->parse_error();
return true;
}
- if (current_select->linkage == GLOBAL_OPTIONS_TYPE ||
+ if (current_select->get_linkage() == GLOBAL_OPTIONS_TYPE ||
unlikely(mysql_new_select(this, 1, NULL)))
return true;
- current_select->linkage= DERIVED_TABLE_TYPE;
+ current_select->set_linkage(DERIVED_TABLE_TYPE);
return tvc_finalize();
}
+
+
+void st_select_lex_unit::reset_distinct()
+{
+ union_distinct= NULL;
+ for(SELECT_LEX *sl= first_select()->next_select();
+ sl;
+ sl= sl->next_select())
+ {
+ if (sl->distinct)
+ {
+ union_distinct= sl;
+ }
+ }
+}
+
+
+void st_select_lex_unit::fix_distinct()
+{
+ if (union_distinct && this != union_distinct->master_unit())
+ reset_distinct();
+}
+
+
+void st_select_lex_unit::register_select_chain(SELECT_LEX *first_sel)
+{
+ DBUG_ASSERT(first_sel != 0);
+ slave= first_sel;
+ first_sel->prev= &slave;
+ for(SELECT_LEX *sel=first_sel; sel; sel= sel->next_select())
+ {
+ sel->master= (st_select_lex_node *)this;
+ uncacheable|= sel->uncacheable;
+ }
+}
+
+
+void st_select_lex::register_unit(SELECT_LEX_UNIT *unit,
+ Name_resolution_context *outer_context)
+{
+ if ((unit->next= slave))
+ slave->prev= &unit->next;
+ unit->prev= &slave;
+ slave= unit;
+ unit->master= this;
+ uncacheable|= unit->uncacheable;
+
+ for(SELECT_LEX *sel= unit->first_select();sel; sel= sel->next_select())
+ {
+ sel->context.outer_context= outer_context;
+ }
+}
+
+
+void st_select_lex::add_statistics(SELECT_LEX_UNIT *unit)
+{
+ for (;
+ unit;
+ unit= unit->next_unit())
+ for(SELECT_LEX *child= unit->first_select();
+ child;
+ child= child->next_select())
+ {
+ /*
+ A subselect can add fields to an outer select.
+ Reserve space for them.
+ */
+ select_n_where_fields+= child->select_n_where_fields;
+ /*
+ Aggregate functions in having clause may add fields
+ to an outer select. Count them also.
+ */
+ select_n_having_items+= child->select_n_having_items;
+ }
+}
+
+
+bool LEX::main_select_push()
+{
+ DBUG_ENTER("LEX::main_select_push");
+ current_select_number= 1;
+ builtin_select.select_number= 1;
+ if (push_select(&builtin_select))
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+void Lex_select_lock::set_to(SELECT_LEX *sel)
+{
+ if (defined_lock)
+ {
+ if (sel->master_unit() &&
+ sel == sel->master_unit()->fake_select_lex)
+ sel->master_unit()->set_lock_to_the_last_select(*this);
+ else
+ {
+ sel->parent_lex->safe_to_cache_query= 0;
+ if (update_lock)
+ {
+ sel->lock_type= TL_WRITE;
+ sel->set_lock_for_tables(TL_WRITE);
+ }
+ else
+ {
+ sel->lock_type= TL_READ_WITH_SHARED_LOCKS;
+ sel->set_lock_for_tables(TL_READ_WITH_SHARED_LOCKS);
+ }
+ }
+ }
+}
+
+bool Lex_order_limit_lock::set_to(SELECT_LEX *sel)
+{
+ /*TODO: lock */
+ //if (lock.defined_lock && sel == sel->master_unit()->fake_select_lex)
+ // return TRUE;
+ if (lock.defined_timeout)
+ {
+ THD *thd= sel->parent_lex->thd;
+ if (set_statement_var_if_exists(thd,
+ C_STRING_WITH_LEN("lock_wait_timeout"),
+ lock.timeout) ||
+ set_statement_var_if_exists(thd,
+ C_STRING_WITH_LEN("innodb_lock_wait_timeout"),
+ lock.timeout))
+ return TRUE;
+ }
+ lock.set_to(sel);
+ sel->explicit_limit= limit.explicit_limit;
+ sel->select_limit= limit.select_limit;
+ sel->offset_limit= limit.offset_limit;
+ if (order_list)
+ {
+ if (sel->get_linkage() != GLOBAL_OPTIONS_TYPE &&
+ sel->olap != UNSPECIFIED_OLAP_TYPE &&
+ (sel->get_linkage() != UNION_TYPE || sel->braces))
+ {
+ my_error(ER_WRONG_USAGE, MYF(0),
+ "CUBE/ROLLUP", "ORDER BY");
+ return TRUE;
+ }
+ sel->order_list= *(order_list);
+ }
+ sel->is_set_query_expr_tail= true;
+ return FALSE;
+}
+
+
+static void change_item_list_context(List<Item> *list,
+ Name_resolution_context *context)
+{
+ List_iterator_fast<Item> it (*list);
+ Item *item;
+ while((item= it++))
+ {
+ item->walk(&Item::change_context_processor, FALSE, (void *)context);
+ }
+}
+
+
+bool LEX::insert_select_hack(SELECT_LEX *sel)
+{
+ DBUG_ENTER("LEX::insert_select_hack");
+
+ DBUG_ASSERT(first_select_lex() == &builtin_select);
+ DBUG_ASSERT(sel != NULL);
+
+ DBUG_ASSERT(builtin_select.first_inner_unit() == NULL);
+
+ if (builtin_select.link_prev)
+ {
+ if ((*builtin_select.link_prev= builtin_select.link_next))
+ ((st_select_lex *)builtin_select.link_next)->link_prev=
+ builtin_select.link_prev;
+ builtin_select.link_prev= NULL; // indicator of removal
+ }
+
+ set_main_unit(sel->master_unit());
+
+ DBUG_ASSERT(builtin_select.table_list.elements == 1);
+ TABLE_LIST *insert_table= builtin_select.table_list.first;
+
+ if (!(insert_table->next_local= sel->table_list.first))
+ {
+ sel->table_list.next= &insert_table->next_local;
+ }
+ sel->table_list.first= insert_table;
+ sel->table_list.elements++;
+ insert_table->select_lex= sel;
+
+ sel->context.first_name_resolution_table= insert_table;
+ builtin_select.context= sel->context;
+ change_item_list_context(&field_list, &sel->context);
+
+ if (sel->tvc && !sel->next_select() &&
+ (sql_command == SQLCOM_INSERT_SELECT ||
+ sql_command == SQLCOM_REPLACE_SELECT))
+ {
+ DBUG_PRINT("info", ("'Usual' INSERT detected"));
+ many_values= sel->tvc->lists_of_values;
+ sel->options= sel->tvc->select_options;
+ sel->tvc= NULL;
+ if (sql_command == SQLCOM_INSERT_SELECT)
+ sql_command= SQLCOM_INSERT;
+ else
+ sql_command= SQLCOM_REPLACE;
+ }
+
+
+ for (SELECT_LEX *sel= all_selects_list;
+ sel;
+ sel= sel->next_select_in_list())
+ {
+ if (sel->select_number != 1)
+ sel->select_number--;
+ };
+
+ DBUG_RETURN(FALSE);
+}
+
+
+/*
+ Create an Item_singlerow_subselect for a query expression.
+*/
+Item *LEX::create_item_query_expression(THD *thd,
+ const char *tok_start,
+ st_select_lex_unit *unit)
+{
+ if (!expr_allows_subselect)
+ {
+ thd->parse_error(ER_SYNTAX_ERROR, tok_start);
+ return NULL;
+ }
+
+ // Add the subtree of subquery to the current SELECT_LEX
+ SELECT_LEX *curr_sel= select_stack_head();
+ DBUG_ASSERT(current_select == curr_sel);
+ if (!curr_sel)
+ curr_sel= &builtin_select;
+ curr_sel->register_unit(unit, &curr_sel->context);
+ curr_sel->add_statistics(unit);
+
+ return new (thd->mem_root)
+ Item_singlerow_subselect(thd, unit->first_select());
+}
+
+
+/**
+ Process unit parsed in brackets
+*/
+
+bool LEX::parsed_unit_in_brackets(SELECT_LEX_UNIT *unit)
+{
+ SELECT_LEX *first_in_nest= unit->pre_last_parse->next_select()->first_nested;
+ if (first_in_nest->first_nested != first_in_nest)
+ {
+ /* There is a priority jump starting from first_in_nest */
+ if (create_priority_nest(first_in_nest) == NULL)
+ return true;
+ unit->fix_distinct();
+ }
+ push_select(unit->fake_select_lex);
+ return false;
+}
+
+
+
+/**
+ Process tail of unit parsed in brackets
+*/
+SELECT_LEX *LEX::parsed_unit_in_brackets_tail(SELECT_LEX_UNIT *unit,
+ Lex_order_limit_lock * l)
+{
+ pop_select();
+ if (l)
+ {
+ (l)->set_to(unit->fake_select_lex);
+ }
+ return unit->first_select();
+}
+
+
+/**
+ Process select parsed in brackets
+*/
+
+SELECT_LEX *LEX::parsed_select(SELECT_LEX *sel, Lex_order_limit_lock * l)
+{
+ pop_select();
+ if (l)
+ {
+ if (sel->next_select())
+ {
+ SELECT_LEX_UNIT *unit= sel->master_unit();
+ if (!unit)
+ unit= create_unit(sel);
+ if (!unit)
+ return NULL;
+ if (!unit->fake_select_lex->is_set_query_expr_tail)
+ l->set_to(unit->fake_select_lex);
+ else
+ {
+ if (!l->order_list && !unit->fake_select_lex->explicit_limit)
+ {
+ sel= unit->fake_select_lex;
+ l->order_list= &sel->order_list;
+ }
+ else
+ sel= wrap_unit_into_derived(unit);
+ if (!sel)
+ return NULL;
+ l->set_to(sel);
+ }
+ }
+ else if (!sel->is_set_query_expr_tail)
+ {
+ l->set_to(sel);
+ }
+ else
+ {
+ if (!l->order_list && !sel->explicit_limit)
+ l->order_list= &sel->order_list;
+ else
+ {
+ SELECT_LEX_UNIT *unit= create_unit(sel);
+ if (!unit)
+ return NULL;
+ sel= wrap_unit_into_derived(unit);
+ }
+ if (!sel)
+ return NULL;
+ l->set_to(sel);
+ }
+ }
+ return sel;
+}
+
+
+/**
+ Process select parsed in brackets
+*/
+
+SELECT_LEX *LEX::parsed_select_in_brackets(SELECT_LEX *sel,
+ Lex_order_limit_lock * l)
+{
+ sel->braces= TRUE;
+ return parsed_select(sel, l);
+}
+
+
+SELECT_LEX_UNIT *LEX::parsed_select_expr_start(SELECT_LEX *s1, SELECT_LEX *s2,
+ enum sub_select_type unit_type,
+ bool distinct)
+{
+ SELECT_LEX_UNIT *res;
+ SELECT_LEX *sel1;
+ SELECT_LEX *sel2;
+ if (!s1->next_select())
+ sel1= s1;
+ else
+ {
+ sel1= wrap_unit_into_derived(s1->master_unit());
+ if (!sel1)
+ return NULL;
+ }
+ if (!s2->next_select())
+ sel2= s2;
+ else
+ {
+ sel2= wrap_unit_into_derived(s2->master_unit());
+ if (!sel2)
+ return NULL;
+ }
+ sel1->link_neighbour(sel2);
+ sel2->set_linkage_and_distinct(unit_type, distinct);
+ sel2->first_nested= sel1->first_nested= sel1;
+ res= create_unit(sel1);
+ if (res == NULL)
+ return NULL;
+ res->pre_last_parse= sel1;
+ return res;
+}
+
+
+SELECT_LEX_UNIT *LEX::parsed_select_expr_cont(SELECT_LEX_UNIT *unit,
+ SELECT_LEX *s2,
+ enum sub_select_type unit_type,
+ bool distinct, bool oracle)
+{
+ SELECT_LEX *sel1;
+ if (!s2->next_select())
+ sel1= s2;
+ else
+ {
+ sel1= wrap_unit_into_derived(s2->master_unit());
+ if (!sel1)
+ return NULL;
+ }
+ SELECT_LEX *last= unit->pre_last_parse->next_select();
+
+ int cmp= oracle? 0 : cmp_unit_op(unit_type, last->get_linkage());
+ if (cmp == 0)
+ {
+ sel1->first_nested= last->first_nested;
+ }
+ else if (cmp > 0)
+ {
+ last->first_nested= unit->pre_last_parse;
+ sel1->first_nested= last;
+ }
+ else /* cmp < 0 */
+ {
+ SELECT_LEX *first_in_nest= last->first_nested;
+ if (first_in_nest->first_nested != first_in_nest)
+ {
+ /* There is a priority jump starting from first_in_nest */
+ if ((last= create_priority_nest(first_in_nest)) == NULL)
+ return NULL;
+ unit->fix_distinct();
+ }
+ sel1->first_nested= last->first_nested;
+ }
+ last->link_neighbour(sel1);
+ sel1->set_master_unit(unit);
+ sel1->set_linkage_and_distinct(unit_type, distinct);
+ unit->pre_last_parse= last;
+ return unit;
+}
+
+/**
+ Process parsed select in body
+*/
+
+SELECT_LEX_UNIT *LEX::parsed_body_select(SELECT_LEX *sel,
+ Lex_order_limit_lock * l)
+{
+ if (!(sel= parsed_select(sel, l)))
+ return NULL;
+
+ SELECT_LEX_UNIT *res= create_unit(sel);
+ return res;
+}
+
+/**
+ Process parsed unit in body
+*/
+
+bool LEX::parsed_body_unit(SELECT_LEX_UNIT *unit)
+{
+ SELECT_LEX *first_in_nest=
+ unit->pre_last_parse->next_select()->first_nested;
+ if (first_in_nest->first_nested != first_in_nest)
+ {
+ /* There is a priority jump starting from first_in_nest */
+ if (create_priority_nest(first_in_nest) == NULL)
+ return true;
+ unit->fix_distinct();
+ }
+ push_select(unit->fake_select_lex);
+ return false;
+}
+
+/**
+ Process parsed tail of unit in body
+
+ TODO: make processing for double tail case
+*/
+
+SELECT_LEX_UNIT *LEX::parsed_body_unit_tail(SELECT_LEX_UNIT *unit,
+ Lex_order_limit_lock * l)
+{
+ pop_select();
+ if (l)
+ {
+ (l)->set_to(unit->fake_select_lex);
+ }
+ return unit;
+}
+
+/**
+ Process subselect parsing
+*/
+
+SELECT_LEX *LEX::parsed_subselect(SELECT_LEX_UNIT *unit, char *place)
+{
+ if (!expr_allows_subselect)
+ {
+ thd->parse_error(ER_SYNTAX_ERROR, place);
+ return NULL;
+ }
+
+ // Add the subtree of subquery to the current SELECT_LEX
+ SELECT_LEX *curr_sel= select_stack_head();
+ DBUG_ASSERT(current_select == curr_sel);
+ if (curr_sel)
+ {
+ curr_sel->register_unit(unit, &curr_sel->context);
+ curr_sel->add_statistics(unit);
+ }
+
+ return unit->first_select();
+}
+
+
+
+/**
+ Process INSERT-like select
+*/
+
+bool LEX::parsed_insert_select(SELECT_LEX *first_select)
+{
+ if (sql_command == SQLCOM_INSERT ||
+ sql_command == SQLCOM_REPLACE)
+ {
+ if (sql_command == SQLCOM_INSERT)
+ sql_command= SQLCOM_INSERT_SELECT;
+ else
+ sql_command= SQLCOM_REPLACE_SELECT;
+ }
+ insert_select_hack(first_select);
+ if (check_main_unit_semantics())
+ return true;
+
+ // fix "main" select
+ SELECT_LEX *blt __attribute__((unused))= pop_select();
+ DBUG_ASSERT(blt == &builtin_select);
+ push_select(first_select);
+ return false;
+}
+
+
+bool LEX::parsed_TVC_start()
+{
+ SELECT_LEX *sel;
+ many_values.empty();
+ insert_list= 0;
+ if (!(sel= alloc_select(TRUE)) ||
+ push_select(sel))
+ return true;
+ sel->init_select();
+ sel->braces= FALSE; // just initialisation
+ return false;
+}
+
+
+SELECT_LEX *LEX::parsed_TVC_end()
+{
+
+ SELECT_LEX *res= pop_select(); // above TVC select
+ if (!(res->tvc=
+ new (thd->mem_root) table_value_constr(many_values,
+ res,
+ res->options)))
+ return NULL;
+ many_values.empty();
+ return res;
+}
+
+
+TABLE_LIST *LEX::parsed_derived_select(SELECT_LEX *sel, int for_system_time,
+ LEX_CSTRING *alias)
+{
+ TABLE_LIST *res;
+ derived_tables|= DERIVED_SUBQUERY;
+ sel->set_linkage(DERIVED_TABLE_TYPE);
+ sel->braces= FALSE;
+ // Add the subtree of subquery to the current SELECT_LEX
+ SELECT_LEX *curr_sel= select_stack_head();
+ DBUG_ASSERT(current_select == curr_sel);
+ SELECT_LEX_UNIT *unit= sel->master_unit();
+ if (!unit)
+ {
+ unit= create_unit(sel);
+ if (!unit)
+ return NULL;
+ }
+ curr_sel->register_unit(unit, &curr_sel->context);
+ curr_sel->add_statistics(unit);
+
+ Table_ident *ti= new (thd->mem_root) Table_ident(unit);
+ if (ti == NULL)
+ return NULL;
+ if (!(res= curr_sel->add_table_to_list(thd, ti, alias, 0,
+ TL_READ, MDL_SHARED_READ)))
+ return NULL;
+ if (for_system_time)
+ {
+ res->vers_conditions= vers_conditions;
+ }
+ return res;
+}
+
+TABLE_LIST *LEX::parsed_derived_unit(SELECT_LEX_UNIT *unit,
+ int for_system_time,
+ LEX_CSTRING *alias)
+{
+ TABLE_LIST *res;
+ derived_tables|= DERIVED_SUBQUERY;
+ unit->first_select()->set_linkage(DERIVED_TABLE_TYPE);
+
+ // Add the subtree of subquery to the current SELECT_LEX
+ SELECT_LEX *curr_sel= select_stack_head();
+ DBUG_ASSERT(current_select == curr_sel);
+ curr_sel->register_unit(unit, &curr_sel->context);
+ curr_sel->add_statistics(unit);
+
+ Table_ident *ti= new (thd->mem_root) Table_ident(unit);
+ if (ti == NULL)
+ return NULL;
+ if (!(res= curr_sel->add_table_to_list(thd, ti, alias, 0,
+ TL_READ, MDL_SHARED_READ)))
+ return NULL;
+ if (for_system_time)
+ {
+ res->vers_conditions= vers_conditions;
+ }
+ return res;
+}
+
+bool LEX::parsed_create_view(SELECT_LEX_UNIT *unit, int check)
+{
+ SQL_I_List<TABLE_LIST> *save= &first_select_lex()->table_list;
+ set_main_unit(unit);
+ if (check_main_unit_semantics())
+ return true;
+ first_select_lex()->table_list.push_front(save);
+ current_select= first_select_lex();
+ size_t len= thd->m_parser_state->m_lip.get_cpp_ptr() -
+ create_view->select.str;
+ void *create_view_select= thd->memdup(create_view->select.str, len);
+ create_view->select.length= len;
+ create_view->select.str= (char *) create_view_select;
+ size_t not_used;
+ trim_whitespace(thd->charset(),
+ &create_view->select, &not_used);
+ create_view->check= check;
+ parsing_options.allows_variable= TRUE;
+ return false;
+}
+
+bool LEX::select_finalize(st_select_lex_unit *expr)
+{
+ sql_command= SQLCOM_SELECT;
+ selects_allow_into= TRUE;
+ selects_allow_procedure= TRUE;
+ set_main_unit(expr);
+ return check_main_unit_semantics();
+}
+
+
+/*
+ "IN" and "EXISTS" subselect can appear in two statement types:
+
+ 1. Statements that can have table columns, such as SELECT, DELETE, UPDATE
+ 2. Statements that cannot have table columns, e.g:
+ RETURN ((1) IN (SELECT * FROM t1))
+ IF ((1) IN (SELECT * FROM t1))
+
+ Statements of the first type call master_select_push() in the beginning.
+ In such case everything is properly linked.
+
+ Statements of the second type do not call mastr_select_push().
+ Here we catch the second case and relink thd->lex->builtin_select and
+ select_lex to properly point to each other.
+
+ QQ: Shouldn't subselects of other type also call relink_hack()?
+ QQ: Can we do it at constructor time instead?
+*/
+
+void LEX::relink_hack(st_select_lex *select_lex)
+{
+ if (!select_stack_top) // Statements of the second type
+ {
+ if (!select_lex->get_master()->get_master())
+ ((st_select_lex *) select_lex->get_master())->
+ set_master(&builtin_select);
+ if (!builtin_select.get_slave())
+ builtin_select.set_slave(select_lex->get_master());
+ }
+}
+
+
+
+bool SELECT_LEX_UNIT::set_lock_to_the_last_select(Lex_select_lock l)
+{
+ if (l.defined_lock)
+ {
+ SELECT_LEX *sel= first_select();
+ while (sel->next_select())
+ sel= sel->next_select();
+ if (sel->braces)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "lock options",
+ "End SELECT expression");
+ return TRUE;
+ }
+ l.set_to(sel);
+ }
+ return FALSE;
+}
+
+/**
+ Generate unique name for generated derived table for this SELECT
+*/
+
+bool SELECT_LEX::make_unique_derived_name(THD *thd, LEX_CSTRING *alias)
+{
+ // uint32 digits + two underscores + trailing '\0'
+ char buff[MAX_INT_WIDTH + 2 + 1];
+ alias->length= my_snprintf(buff, sizeof(buff), "__%u", select_number);
+ alias->str= thd->strmake(buff, alias->length);
+ return !alias->str;
+}
+
+
+/*
+ Make a new sp_instr_stmt and set its m_query to a concatenation
+ of two strings.
+*/
+bool LEX::new_sp_instr_stmt(THD *thd,
+ const LEX_CSTRING &prefix,
+ const LEX_CSTRING &suffix)
+{
+ LEX_STRING qbuff;
+ sp_instr_stmt *i;
+
+ if (!(i= new (thd->mem_root) sp_instr_stmt(sphead->instructions(),
+ spcont, this)))
+ return true;
+
+ qbuff.length= prefix.length + suffix.length;
+ if (!(qbuff.str= (char*) alloc_root(thd->mem_root, qbuff.length + 1)))
+ return true;
+ memcpy(qbuff.str, prefix.str, prefix.length);
+ strmake(qbuff.str + prefix.length, suffix.str, suffix.length);
+ i->m_query= qbuff;
+ return sphead->add_instr(i);
+}
+
+
+bool LEX::sp_proc_stmt_statement_finalize_buf(THD *thd, const LEX_CSTRING &qbuf)
+{
+ sphead->m_flags|= sp_get_flags_for_command(this);
+ /* "USE db" doesn't work in a procedure */
+ if (unlikely(sql_command == SQLCOM_CHANGE_DB))
+ {
+ my_error(ER_SP_BADSTATEMENT, MYF(0), "USE");
+ return true;
+ }
+ /*
+ Don't add an instruction for SET statements, since all
+ instructions for them were already added during processing
+ of "set" rule.
+ */
+ DBUG_ASSERT(sql_command != SQLCOM_SET_OPTION || var_list.is_empty());
+ if (sql_command != SQLCOM_SET_OPTION)
+ return new_sp_instr_stmt(thd, empty_clex_str, qbuf);
+ return false;
+}
+
+
+bool LEX::sp_proc_stmt_statement_finalize(THD *thd, bool no_lookahead)
+{
+ // Extract the query statement from the tokenizer
+ Lex_input_stream *lip= &thd->m_parser_state->m_lip;
+ Lex_cstring qbuf(sphead->m_tmp_query, no_lookahead ? lip->get_ptr() :
+ lip->get_tok_start());
+ return LEX::sp_proc_stmt_statement_finalize_buf(thd, qbuf);
+}
+
+
+/**
+ @brief
+ Extract the condition that can be pushed into WHERE clause
+
+ @param thd the thread handle
+ @param cond the condition from which to extract a pushed condition
+ @param remaining_cond IN/OUT the condition that will remain of cond after
+ the extraction
+ @param transformer the transformer callback function to be
+ applied to the fields of the condition so it
+ can be pushed`
+ @param arg parameter to be passed to the transformer
+
+ @details
+ This function builds the most restrictive condition depending only on
+ the fields used in the GROUP BY of this SELECT. These fields were
+ collected before in grouping_tmp_fields list of this SELECT.
+
+ First this method checks if this SELECT doesn't have any aggregation
+ functions and has no GROUP BY clause. If so cond can be entirely pushed
+ into WHERE.
+
+ Otherwise the method checks if there is a condition depending only on
+ grouping fields that can be extracted from cond.
+
+ The condition that can be pushed into WHERE should be transformed.
+ It is done by transformer.
+
+ The extracted condition is saved in cond_pushed_into_where of this select.
+ cond can remain un empty after the extraction of the condition that can be
+ pushed into WHERE. It is saved in remaining_cond.
+
+ @note
+ This method is called for pushdown conditions into materialized
+ derived tables/views optimization.
+ Item::derived_field_transformer_for_where is passed as the actual
+ callback function.
+ Also it is called for pushdown into materialized IN subqueries.
+ Item::in_subq_field_transformer_for_where is passed as the actual
+ callback function.
+*/
+
+void st_select_lex::pushdown_cond_into_where_clause(THD *thd, Item *cond,
+ Item **remaining_cond,
+ Item_transformer transformer,
+ uchar *arg)
+{
+ if (!cond_pushdown_is_allowed())
+ return;
+ thd->lex->current_select= this;
+ if (have_window_funcs())
+ {
+ Item *cond_over_partition_fields;
+ check_cond_extraction_for_grouping_fields(thd, cond);
+ cond_over_partition_fields=
+ build_cond_for_grouping_fields(thd, cond, true);
+ if (cond_over_partition_fields)
+ cond_over_partition_fields= cond_over_partition_fields->transform(thd,
+ &Item::grouping_field_transformer_for_where,
+ (uchar*) this);
+ if (cond_over_partition_fields)
+ {
+ cond_over_partition_fields->walk(
+ &Item::cleanup_excluding_const_fields_processor, 0, 0);
+ cond_pushed_into_where= cond_over_partition_fields;
+ }
+
+ return;
+ }
+
+ if (!join->group_list && !with_sum_func)
+ {
+ cond=
+ cond->transform(thd, transformer, arg);
+ if (cond)
+ {
+ cond->walk(
+ &Item::cleanup_excluding_const_fields_processor, 0, 0);
+ cond_pushed_into_where= cond;
+ }
+
+ return;
+ }
+
+ /*
+ Figure out what can be extracted from cond and pushed into
+ the WHERE clause of this select.
+ */
+ Item *cond_over_grouping_fields;
+ check_cond_extraction_for_grouping_fields(thd, cond);
+ cond_over_grouping_fields=
+ build_cond_for_grouping_fields(thd, cond, true);
+
+ /*
+ Transform references to the columns of condition that can be pushed
+ into WHERE so it can be pushed.
+ */
+ if (cond_over_grouping_fields)
+ cond_over_grouping_fields= cond_over_grouping_fields->transform(thd,
+ &Item::grouping_field_transformer_for_where,
+ (uchar*) this);
+
+ if (cond_over_grouping_fields)
+ {
+
+ /*
+ Remove top conjuncts in cond that has been pushed into the WHERE
+ clause of this select
+ */
+ cond= remove_pushed_top_conjuncts(thd, cond);
+
+ cond_over_grouping_fields->walk(
+ &Item::cleanup_excluding_const_fields_processor, 0, 0);
+ cond_pushed_into_where= cond_over_grouping_fields;
+ }
+
+ *remaining_cond= cond;
+}
+
+
+/**
+ @brief
+ Mark OR-conditions as non-pushable to avoid repeatable pushdown
+
+ @param cond the processed condition
+
+ @details
+ Consider pushdown into the materialized derived table/view.
+ Consider OR condition that can be pushed into HAVING and some
+ parts of this OR condition that can be pushed into WHERE.
+
+ On example:
+
+ SELECT *
+ FROM t1,
+ (
+ SELECT a,MAX(c) AS m_c
+ GROUP BY a
+ ) AS dt
+ WHERE ((dt.m_c>10) AND (dt.a>2)) OR ((dt.m_c<7) and (dt.a<3)) AND
+ (t1.a=v1.a);
+
+
+ Here ((dt.m_c>10) AND (dt.a>2)) OR ((dt.m_c<7) and (dt.a<3)) or1
+ can be pushed down into the HAVING of the materialized
+ derived table dt.
+
+ (dt.a>2) OR (dt.a<3) part of or1 depends only on grouping fields
+ of dt and can be pushed into WHERE.
+
+ As a result:
+
+ SELECT *
+ FROM t1,
+ (
+ SELECT a,MAX(c) AS m_c
+ WHERE (dt.a>2) OR (dt.a<3)
+ GROUP BY a
+ HAVING ((dt.m_c>10) AND (dt.a>2)) OR ((dt.m_c<7) and (dt.a<3))
+ ) AS dt
+ WHERE ((dt.m_c>10) AND (dt.a>2)) OR ((dt.m_c<7) and (dt.a<3)) AND
+ (t1.a=v1.a);
+
+
+ Here (dt.a>2) OR (dt.a<3) also remains in HAVING of dt.
+ When SELECT that defines df is processed HAVING pushdown optimization
+ is made. In HAVING pushdown optimization it will extract
+ (dt.a>2) OR (dt.a<3) condition from or1 again and push it into WHERE.
+ This will cause duplicate conditions in WHERE of dt.
+
+ To avoid repeatable pushdown such OR conditions as or1 describen
+ above are marked with NO_EXTRACTION_FL.
+
+ @note
+ This method is called for pushdown into materialized
+ derived tables/views/IN subqueries optimization.
+*/
+
+void mark_or_conds_to_avoid_pushdown(Item *cond)
+{
+ if (cond->type() == Item::COND_ITEM &&
+ ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
+ {
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ if (item->type() == Item::COND_ITEM &&
+ ((Item_cond*) item)->functype() == Item_func::COND_OR_FUNC)
+ item->set_extraction_flag(NO_EXTRACTION_FL);
+ }
+ }
+ else if (cond->type() == Item::COND_ITEM &&
+ ((Item_cond*) cond)->functype() == Item_func::COND_OR_FUNC)
+ cond->set_extraction_flag(NO_EXTRACTION_FL);
+}
+
+/**
+ @brief
+ Get condition that can be pushed from HAVING into WHERE
+
+ @param thd the thread handle
+ @param cond the condition from which to extract the condition
+
+ @details
+ The method collects in attach_to_conds list conditions from cond
+ that can be pushed from HAVING into WHERE.
+
+ Conditions that can be pushed were marked with FULL_EXTRACTION_FL in
+ check_cond_extraction_for_grouping_fields() method.
+ Conditions that can't be pushed were marked with NO_EXTRACTION_FL.
+ Conditions which parts can be pushed weren't marked.
+
+ There are two types of conditions that can be pushed:
+ 1. Condition that can be simply moved from HAVING
+ (if cond is marked with FULL_EXTRACTION_FL or
+ cond is an AND condition and some of its parts are marked with
+ FULL_EXTRACTION_FL)
+ In this case condition is transformed and pushed into attach_to_conds
+ list.
+ 2. Part of some other condition c1 that can't be entirely pushed
+ (if Ñ1 isn't marked with any flag).
+
+ For example:
+
+ SELECT t1.a,MAX(t1.b),t1.c
+ FROM t1
+ GROUP BY t1.a
+ HAVING ((t1.a > 5) AND (t1.c < 3)) OR (t1.a = 3);
+
+ Here (t1.a > 5) OR (t1.a = 3) from HAVING can be pushed into WHERE.
+
+ In this case build_pushable_cond() is called for c1.
+ This method builds a clone of the c1 part that can be pushed.
+
+ Transformation mentioned above is made with multiple_equality_transformer
+ transformer. It transforms all multiple equalities in the extracted
+ condition into the set of equalities.
+
+ @note
+ Conditions that can be pushed are collected in attach_to_conds in this way:
+ 1. if cond is an AND condition its parts that can be pushed into WHERE
+ are added to attach_to_conds list separately.
+ 2. in all other cases conditions are pushed into the list entirely.
+
+ @retval
+ true - if an error occurs
+ false - otherwise
+*/
+
+bool
+st_select_lex::build_pushable_cond_for_having_pushdown(THD *thd, Item *cond)
+{
+ List<Item> equalities;
+
+ /* Condition can't be pushed */
+ if (cond->get_extraction_flag() == NO_EXTRACTION_FL)
+ return false;
+
+ /**
+ Condition can be pushed entirely.
+ Transform its multiple equalities and add to attach_to_conds list.
+ */
+ if (cond->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ Item *result= cond->transform(thd,
+ &Item::multiple_equality_transformer,
+ (uchar *)this);
+ if (!result)
+ return true;
+ if (result->type() == Item::COND_ITEM &&
+ ((Item_cond*) result)->functype() == Item_func::COND_AND_FUNC)
+ {
+ List_iterator<Item> li(*((Item_cond*) result)->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ if (attach_to_conds.push_back(item, thd->mem_root))
+ return true;
+ }
+ }
+ else
+ {
+ if (attach_to_conds.push_back(result, thd->mem_root))
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ There is no flag set for this condition. It means that some
+ part of this condition can be pushed.
+ */
+ if (cond->type() != Item::COND_ITEM)
+ return false;
+ if (((Item_cond *)cond)->functype() != Item_cond::COND_AND_FUNC)
+ {
+ Item *fix= cond->build_pushable_cond(thd, 0, 0);
+ if (!fix)
+ return false;
+ if (attach_to_conds.push_back(fix, thd->mem_root))
+ return true;
+ }
+ else
+ {
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ if (item->get_extraction_flag() == NO_EXTRACTION_FL)
+ continue;
+ else if (item->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ Item *result= item->transform(thd,
+ &Item::multiple_equality_transformer,
+ (uchar *)item);
+
+ if (!result)
+ return true;
+ if (result->type() == Item::COND_ITEM &&
+ ((Item_cond*) result)->functype() == Item_func::COND_AND_FUNC)
+ {
+ List_iterator<Item> li(*((Item_cond*) result)->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ if (attach_to_conds.push_back(item, thd->mem_root))
+ return true;
+ }
+ }
+ else
+ {
+ if (attach_to_conds.push_back(result, thd->mem_root))
+ return true;
+ }
+ }
+ else
+ {
+ Item *fix= item->build_pushable_cond(thd, 0, 0);
+ if (!fix)
+ continue;
+ if (attach_to_conds.push_back(fix, thd->mem_root))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+/**
+ Check if item is equal to some field in Field_pair 'field_pair'
+ from 'pair_list' and return found 'field_pair' if it exists.
+*/
+
+Field_pair *get_corresponding_field_pair(Item *item,
+ List<Field_pair> pair_list)
+{
+ DBUG_ASSERT(item->type() == Item::FIELD_ITEM ||
+ (item->type() == Item::REF_ITEM &&
+ ((((Item_ref *) item)->ref_type() == Item_ref::VIEW_REF) ||
+ (((Item_ref *) item)->ref_type() == Item_ref::REF))));
+
+ List_iterator<Field_pair> it(pair_list);
+ Field_pair *field_pair;
+ Item_field *field_item= (Item_field *) (item->real_item());
+ while ((field_pair= it++))
+ {
+ if (field_item->field == field_pair->field)
+ return field_pair;
+ }
+ return NULL;
+}
+
+
+/**
+ @brief
+ Collect fields from multiple equalities which are equal to grouping
+
+ @param thd the thread handle
+
+ @details
+ This method checks if multiple equalities of the WHERE clause contain
+ fields from GROUP BY of this SELECT. If so all fields of such multiple
+ equalities are collected in grouping_tmp_fields list without repetitions.
+
+ @retval
+ true - if an error occurs
+ false - otherwise
+*/
+
+bool st_select_lex::collect_fields_equal_to_grouping(THD *thd)
+{
+ if (!join->cond_equal || join->cond_equal->is_empty())
+ return false;
+
+ List_iterator_fast<Item_equal> li(join->cond_equal->current_level);
+ Item_equal *item_equal;
+
+ while ((item_equal= li++))
+ {
+ Item_equal_fields_iterator it(*item_equal);
+ Item *item;
+ while ((item= it++))
+ {
+ if (get_corresponding_field_pair(item, grouping_tmp_fields))
+ break;
+ }
+ if (!item)
+ break;
+
+ it.rewind();
+ while ((item= it++))
+ {
+ if (get_corresponding_field_pair(item, grouping_tmp_fields))
+ continue;
+ Field_pair *grouping_tmp_field=
+ new Field_pair(((Item_field *)item->real_item())->field, item);
+ if (grouping_tmp_fields.push_back(grouping_tmp_field, thd->mem_root))
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Remove marked top conjuncts of HAVING for having pushdown
+
+ @param thd the thread handle
+ @param cond the condition which subformulas are to be removed
+
+ @details
+ This method removes from cond all subformulas that can be moved from HAVING
+ into WHERE.
+
+ @retval
+ condition without removed subformulas
+ 0 if the whole 'cond' is removed
+*/
+
+Item *remove_pushed_top_conjuncts_for_having(THD *thd, Item *cond)
+{
+ /* Nothing to extract */
+ if (cond->get_extraction_flag() == NO_EXTRACTION_FL)
+ {
+ cond->clear_extraction_flag();
+ return cond;
+ }
+ /* cond can be pushed in WHERE entirely */
+ if (cond->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ cond->clear_extraction_flag();
+ return 0;
+ }
+
+ /* Some parts of cond can be pushed */
+ if (cond->type() == Item::COND_ITEM &&
+ ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
+ {
+ List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
+ Item *item;
+ while ((item=li++))
+ {
+ if (item->get_extraction_flag() == NO_EXTRACTION_FL)
+ item->clear_extraction_flag();
+ else if (item->get_extraction_flag() == FULL_EXTRACTION_FL)
+ {
+ if (item->type() == Item::FUNC_ITEM &&
+ ((Item_func*) item)->functype() == Item_func::MULT_EQUAL_FUNC)
+ item->set_extraction_flag(DELETION_FL);
+ else
+ {
+ item->clear_extraction_flag();
+ li.remove();
+ }
+ }
+ }
+ switch (((Item_cond*) cond)->argument_list()->elements)
+ {
+ case 0:
+ return 0;
+ case 1:
+ return (((Item_cond*) cond)->argument_list()->head());
+ default:
+ return cond;
+ }
+ }
+ return cond;
+}
+
+
+/**
+ @brief
+ Extract condition that can be pushed from HAVING into WHERE
+
+ @param thd the thread handle
+ @param having the HAVING clause of this select
+ @param having_equal multiple equalities of HAVING
+
+ @details
+ This method builds a set of conditions dependent only on
+ fields used in the GROUP BY of this select (directly or indirectly
+ through equalities). These conditions are extracted from the HAVING
+ clause of this select.
+ The method saves these conditions into attach_to_conds list and removes
+ from HAVING conditions that can be entirely pushed into WHERE.
+
+ Example of the HAVING pushdown transformation:
+
+ SELECT t1.a,MAX(t1.b)
+ FROM t1
+ GROUP BY t1.a
+ HAVING (t1.a>2) AND (MAX(c)>12);
+
+ =>
+
+ SELECT t1.a,MAX(t1.b)
+ FROM t1
+ WHERE (t1.a>2)
+ GROUP BY t1.a
+ HAVING (MAX(c)>12);
+
+ In this method (t1.a>2) is not attached to the WHERE clause.
+ It is pushed into the attach_to_conds list to be attached to
+ the WHERE clause later.
+
+ In details:
+ 1. Collect fields used in the GROUP BY grouping_fields of this SELECT
+ 2. Collect fields equal to grouping_fields from the WHERE clause
+ of this SELECT and add them to the grouping_fields list.
+ 3. Extract the most restrictive condition from the HAVING clause of this
+ select that depends only on the grouping fields (directly or indirectly
+ through equality).
+ If the extracted condition is an AND condition it is transformed into a
+ list of all its conjuncts saved in attach_to_conds. Otherwise,
+ the condition is put into attach_to_conds as the only its element.
+ 4. Remove conditions from HAVING clause that can be entirely pushed
+ into WHERE.
+ Multiple equalities are not removed but marked with DELETION_FL flag.
+ They will be deleted later in substitite_for_best_equal_field() called
+ for the HAVING condition.
+ 5. Unwrap fields wrapped in Item_ref wrappers contained in the condition
+ of attach_to_conds so the condition could be pushed into WHERE.
+
+ @note
+ This method is similar to st_select_lex::pushdown_cond_into_where_clause().
+
+ @retval TRUE if an error occurs
+ @retval FALSE otherwise
+*/
+
+Item *st_select_lex::pushdown_from_having_into_where(THD *thd, Item *having)
+{
+ if (!having || !group_list.first)
+ return having;
+ if (!cond_pushdown_is_allowed())
+ return having;
+
+ st_select_lex *save_curr_select= thd->lex->current_select;
+ thd->lex->current_select= this;
+
+ /*
+ 1. Collect fields used in the GROUP BY grouping fields of this SELECT
+ 2. Collect fields equal to grouping_fields from the WHERE clause
+ of this SELECT and add them to the grouping fields list.
+ */
+ if (collect_grouping_fields(thd) ||
+ collect_fields_equal_to_grouping(thd))
+ return having;
+
+ /*
+ 3. Extract the most restrictive condition from the HAVING clause of this
+ select that depends only on the grouping fields (directly or indirectly
+ through equality).
+ If the extracted condition is an AND condition it is transformed into a
+ list of all its conjuncts saved in attach_to_conds. Otherwise,
+ the condition is put into attach_to_conds as the only its element.
+ */
+ List_iterator_fast<Item> it(attach_to_conds);
+ Item *item;
+ check_cond_extraction_for_grouping_fields(thd, having);
+ if (build_pushable_cond_for_having_pushdown(thd, having))
+ {
+ attach_to_conds.empty();
+ goto exit;
+ }
+ if (!attach_to_conds.elements)
+ goto exit;
+
+ /*
+ 4. Remove conditions from HAVING clause that can be entirely pushed
+ into WHERE.
+ Multiple equalities are not removed but marked with DELETION_FL flag.
+ They will be deleted later in substitite_for_best_equal_field() called
+ for the HAVING condition.
+ */
+ having= remove_pushed_top_conjuncts_for_having(thd, having);
+
+ /*
+ Change join->cond_equal which points to the multiple equalities of
+ the top level of HAVING.
+ Removal of AND conditions may leave only one conjunct in HAVING.
+
+ Example 1:
+ SELECT *
+ FROM t1
+ GROUP BY t1.a
+ (t1.a < 2) AND (t1.b = 2)
+
+ (t1.a < 2) is pushed into WHERE.
+ join->cond_equal should point on (t1.b = 2) multiple equality now.
+
+ Example 2:
+ SELECT *
+ FROM t1
+ GROUP BY t1.a
+ (t1.a = 2) AND (t1.b < 2)
+
+ (t1.a = 2) is pushed into WHERE.
+ join->cond_equal should be NULL now.
+ */
+ if (having &&
+ having->type() == Item::FUNC_ITEM &&
+ ((Item_func*) having)->functype() == Item_func::MULT_EQUAL_FUNC)
+ join->having_equal= new (thd->mem_root) COND_EQUAL((Item_equal *)having,
+ thd->mem_root);
+ else if (!having ||
+ having->type() != Item::COND_ITEM ||
+ ((Item_cond *)having)->functype() != Item_cond::COND_AND_FUNC)
+ join->having_equal= 0;
+
+ /*
+ 5. Unwrap fields wrapped in Item_ref wrappers contained in the condition
+ of attach_to_conds so the condition could be pushed into WHERE.
+ */
+ it.rewind();
+ while ((item=it++))
+ {
+ item= item->transform(thd,
+ &Item::field_transformer_for_having_pushdown,
+ (uchar *)this);
+
+ if (item->walk(&Item:: cleanup_processor, 0, STOP_PTR) ||
+ item->fix_fields(thd, NULL))
+ {
+ attach_to_conds.empty();
+ goto exit;
+ }
+ }
+exit:
+ thd->lex->current_select= save_curr_select;
+ return having;
+}
+
+
+bool LEX::stmt_install_plugin(const DDL_options_st &opt,
+ const Lex_ident_sys_st &name,
+ const LEX_CSTRING &soname)
+{
+ create_info.init();
+ if (add_create_options_with_check(opt))
+ return true;
+ sql_command= SQLCOM_INSTALL_PLUGIN;
+ comment= name;
+ ident= soname;
+ return false;
+}
+
+
+void LEX::stmt_install_plugin(const LEX_CSTRING &soname)
+{
+ sql_command= SQLCOM_INSTALL_PLUGIN;
+ comment= null_clex_str;
+ ident= soname;
+}
+
+
+bool LEX::stmt_uninstall_plugin_by_name(const DDL_options_st &opt,
+ const Lex_ident_sys_st &name)
+{
+ check_opt.init();
+ if (add_create_options_with_check(opt))
+ return true;
+ sql_command= SQLCOM_UNINSTALL_PLUGIN;
+ comment= name;
+ ident= null_clex_str;
+ return false;
+}
+
+
+bool LEX::stmt_uninstall_plugin_by_soname(const DDL_options_st &opt,
+ const LEX_CSTRING &soname)
+{
+ check_opt.init();
+ if (add_create_options_with_check(opt))
+ return true;
+ sql_command= SQLCOM_UNINSTALL_PLUGIN;
+ comment= null_clex_str;
+ ident= soname;
+ return false;
+}
+
+
+bool LEX::stmt_prepare_validate(const char *stmt_type)
+{
+ if (unlikely(table_or_sp_used()))
+ {
+ my_error(ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), stmt_type);
+ return true;
+ }
+ return check_main_unit_semantics();
+}
+
+
+bool LEX::stmt_prepare(const Lex_ident_sys_st &ident, Item *code)
+{
+ sql_command= SQLCOM_PREPARE;
+ if (stmt_prepare_validate("PREPARE..FROM"))
+ return true;
+ prepared_stmt.set(ident, code, NULL);
+ return false;
+}
+
+
+bool LEX::stmt_execute_immediate(Item *code, List<Item> *params)
+{
+ sql_command= SQLCOM_EXECUTE_IMMEDIATE;
+ if (stmt_prepare_validate("EXECUTE IMMEDIATE"))
+ return true;
+ static const Lex_ident_sys immediate(STRING_WITH_LEN("IMMEDIATE"));
+ prepared_stmt.set(immediate, code, params);
+ return false;
+}
+
+
+bool LEX::stmt_execute(const Lex_ident_sys_st &ident, List<Item> *params)
+{
+ sql_command= SQLCOM_EXECUTE;
+ prepared_stmt.set(ident, NULL, params);
+ return stmt_prepare_validate("EXECUTE..USING");
+}
+
+
+void LEX::stmt_deallocate_prepare(const Lex_ident_sys_st &ident)
+{
+ sql_command= SQLCOM_DEALLOCATE_PREPARE;
+ prepared_stmt.set(ident, NULL, NULL);
+}
+
+
+bool LEX::stmt_alter_table_exchange_partition(Table_ident *table)
+{
+ DBUG_ASSERT(sql_command == SQLCOM_ALTER_TABLE);
+ first_select_lex()->db= table->db;
+ if (first_select_lex()->db.str == NULL &&
+ copy_db_to(&first_select_lex()->db))
+ return true;
+ name= table->table;
+ alter_info.partition_flags|= ALTER_PARTITION_EXCHANGE;
+ if (!first_select_lex()->add_table_to_list(thd, table, NULL,
+ TL_OPTION_UPDATING,
+ TL_READ_NO_INSERT,
+ MDL_SHARED_NO_WRITE))
+ return true;
+ DBUG_ASSERT(!m_sql_cmd);
+ m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_table_exchange_partition();
+ return m_sql_cmd == NULL;
+}
+
+
+void LEX::stmt_purge_to(const LEX_CSTRING &to)
+{
+ type= 0;
+ sql_command= SQLCOM_PURGE;
+ to_log= to.str;
+}
+
+
+bool LEX::stmt_purge_before(Item *item)
+{
+ type= 0;
+ sql_command= SQLCOM_PURGE_BEFORE;
+ value_list.empty();
+ value_list.push_front(item, thd->mem_root);
+ return check_main_unit_semantics();
+}
+
+
+bool LEX::stmt_create_udf_function(const DDL_options_st &options,
+ enum_sp_aggregate_type agg_type,
+ const Lex_ident_sys_st &name,
+ Item_result return_type,
+ const LEX_CSTRING &soname)
+{
+ if (stmt_create_function_start(options))
+ return true;
+
+ if (unlikely(is_native_function(thd, &name)))
+ {
+ my_error(ER_NATIVE_FCT_NAME_COLLISION, MYF(0), name.str);
+ return true;
+ }
+ sql_command= SQLCOM_CREATE_FUNCTION;
+ udf.name= name;
+ udf.returns= return_type;
+ udf.dl= soname.str;
+ udf.type= agg_type == GROUP_AGGREGATE ? UDFTYPE_AGGREGATE :
+ UDFTYPE_FUNCTION;
+ stmt_create_routine_finalize();
+ return false;
+}
+
+
+bool LEX::stmt_create_stored_function_start(const DDL_options_st &options,
+ enum_sp_aggregate_type agg_type,
+ const sp_name *spname)
+{
+ if (stmt_create_function_start(options) ||
+ unlikely(!make_sp_head_no_recursive(thd, spname,
+ &sp_handler_function, agg_type)))
+ return true;
+ return false;
+}
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 4eaec7d062b..58c1dd3dfae 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2010, 2018, MariaDB Corporation
+ Copyright (c) 2010, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -148,6 +148,12 @@ public:
bool copy_or_convert(THD *thd, const Lex_ident_cli_st *str, CHARSET_INFO *cs);
bool is_null() const { return str == NULL; }
bool to_size_number(ulonglong *to) const;
+ void set_valid_utf8(const LEX_CSTRING *name)
+ {
+ DBUG_ASSERT(Well_formed_prefix(system_charset_info, name->str,
+ name->length).length() == name->length);
+ str= name->str ; length= name->length;
+ }
};
@@ -163,6 +169,33 @@ public:
{
((LEX_CSTRING &) *this)= null_clex_str;
}
+ Lex_ident_sys(const char *name, size_t length)
+ {
+ LEX_CSTRING tmp= {name, length};
+ set_valid_utf8(&tmp);
+ }
+ Lex_ident_sys & operator=(const Lex_ident_sys_st &name)
+ {
+ Lex_ident_sys_st::operator=(name);
+ return *this;
+ }
+};
+
+
+/**
+ ORDER BY ... LIMIT parameters;
+*/
+class Lex_order_limit_lock: public Sql_alloc
+{
+public:
+ SQL_I_List<st_order> *order_list; /* ORDER clause */
+ Lex_select_lock lock;
+ Lex_select_limit limit;
+
+ Lex_order_limit_lock() :order_list(NULL)
+ {}
+
+ bool set_to(st_select_lex *sel);
};
@@ -173,6 +206,14 @@ enum sub_select_type
UNION_TYPE, INTERSECT_TYPE, EXCEPT_TYPE,
GLOBAL_OPTIONS_TYPE, DERIVED_TABLE_TYPE, OLAP_TYPE
};
+
+inline int cmp_unit_op(enum sub_select_type op1, enum sub_select_type op2)
+{
+ DBUG_ASSERT(op1 >= UNION_TYPE && op1 <= EXCEPT_TYPE);
+ DBUG_ASSERT(op2 >= UNION_TYPE && op2 <= EXCEPT_TYPE);
+ return (op1 == INTERSECT_TYPE ? 1 : 0) - (op2 == INTERSECT_TYPE ? 1 : 0);
+}
+
enum unit_common_op {OP_MIX, OP_UNION, OP_INTERSECT, OP_EXCEPT};
enum enum_view_suid
@@ -192,6 +233,22 @@ enum plsql_cursor_attr_t
};
+enum enum_sp_suid_behaviour
+{
+ SP_IS_DEFAULT_SUID= 0,
+ SP_IS_NOT_SUID,
+ SP_IS_SUID
+};
+
+
+enum enum_sp_aggregate_type
+{
+ DEFAULT_AGGREGATE= 0,
+ NOT_AGGREGATE,
+ GROUP_AGGREGATE
+};
+
+
/* These may not be declared yet */
class Table_ident;
class sql_exchange;
@@ -214,6 +271,8 @@ class Item_window_func;
struct sql_digest_state;
class With_clause;
class my_var;
+class select_handler;
+class Pushdown_select;
#define ALLOC_ROOT_SET 1024
@@ -264,15 +323,6 @@ extern uint binlog_unsafe_map[256];
void binlog_unsafe_map_init();
#endif
-struct LEX_TYPE
-{
- enum enum_field_types type;
- char *length, *dec;
- CHARSET_INFO *charset;
- void set(int t, char *l, char *d, CHARSET_INFO *cs)
- { type= (enum_field_types)t; length= l; dec= d; charset= cs; }
-};
-
#ifdef MYSQL_SERVER
/*
The following hack is needed because mysql_yacc.cc does not define
@@ -301,7 +351,8 @@ struct LEX_TYPE
This is not within #ifdef because we want "EXPLAIN PARTITIONS ..." to produce
additional "partitions" column even if partitioning is not compiled in.
*/
-#define DESCRIBE_PARTITIONS 4
+#define DESCRIBE_PARTITIONS 4
+#define DESCRIBE_EXTENDED2 8
#ifdef MYSQL_SERVER
@@ -310,13 +361,6 @@ extern MYSQL_PLUGIN_IMPORT const LEX_CSTRING empty_clex_str;
extern const LEX_CSTRING star_clex_str;
extern const LEX_CSTRING param_clex_str;
-enum enum_sp_suid_behaviour
-{
- SP_IS_DEFAULT_SUID= 0,
- SP_IS_NOT_SUID,
- SP_IS_SUID
-};
-
enum enum_sp_data_access
{
SP_DEFAULT_ACCESS= 0,
@@ -326,13 +370,6 @@ enum enum_sp_data_access
SP_MODIFIES_SQL_DATA
};
-enum enum_sp_aggregate_type
-{
- DEFAULT_AGGREGATE= 0,
- NOT_AGGREGATE,
- GROUP_AGGREGATE
-};
-
const LEX_CSTRING sp_data_access_name[]=
{
{ STRING_WITH_LEN("") },
@@ -540,7 +577,7 @@ public:
unit is container of either
- One SELECT
- UNION of selects
- select_lex and unit are both inherited form select_lex_node
+ select_lex and unit are both inherited form st_select_lex_node
neighbors are two select_lex or units on the same level
All select describing structures linked with following pointers:
@@ -665,13 +702,6 @@ public:
ulonglong options;
/*
- In sql_cache we store SQL_CACHE flag as specified by user to be
- able to restore SELECT statement from internal structures.
- */
- enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE };
- e_sql_cache sql_cache;
-
- /*
result of this query can't be cached, bit field, can be :
UNCACHEABLE_DEPENDENT_GENERATED
UNCACHEABLE_DEPENDENT_INJECTED
@@ -681,11 +711,15 @@ public:
UNCACHEABLE_PREPARE
*/
uint8 uncacheable;
+private:
enum sub_select_type linkage;
+public:
bool is_linkage_set() const
{
return linkage == UNION_TYPE || linkage == INTERSECT_TYPE || linkage == EXCEPT_TYPE;
}
+ enum sub_select_type get_linkage() { return linkage; }
+ bool distinct;
bool no_table_names_allowed; /* used for global order by */
static void *operator new(size_t size, MEM_ROOT *mem_root) throw ()
@@ -703,13 +737,33 @@ public:
}
inline st_select_lex_node* get_master() { return master; }
+ inline st_select_lex_node* get_slave() { return slave; }
void include_down(st_select_lex_node *upper);
void add_slave(st_select_lex_node *slave_arg);
void include_neighbour(st_select_lex_node *before);
+ void link_chain_down(st_select_lex_node *first);
+ void link_neighbour(st_select_lex_node *neighbour)
+ {
+ DBUG_ASSERT(next == NULL);
+ DBUG_ASSERT(neighbour != NULL);
+ next= neighbour;
+ neighbour->prev= &next;
+ }
+ void cut_next() { next= NULL; }
void include_standalone(st_select_lex_node *sel, st_select_lex_node **ref);
void include_global(st_select_lex_node **plink);
void exclude();
void exclude_from_tree();
+ void exclude_from_global()
+ {
+ if (!link_prev)
+ return;
+ if (((*link_prev)= link_next))
+ link_next->link_prev= link_prev;
+ link_next= NULL;
+ link_prev= NULL;
+ }
+
void set_slave(st_select_lex_node *slave_arg) { slave= slave_arg; }
void move_node(st_select_lex_node *where_to_move)
@@ -725,10 +779,27 @@ public:
st_select_lex_node *insert_chain_before(st_select_lex_node **ptr_pos_to_insert,
st_select_lex_node *end_chain_node);
void move_as_slave(st_select_lex_node *new_master);
+ void set_linkage(enum sub_select_type l)
+ {
+ DBUG_ENTER("st_select_lex_node::set_linkage");
+ DBUG_PRINT("info", ("node: %p linkage: %d->%d", this, linkage, l));
+ linkage= l;
+ DBUG_VOID_RETURN;
+ }
+ /*
+ This method created for reiniting LEX in mysql_admin_table() and can be
+ used only if you are going remove all SELECT_LEX & units except belonger
+ to LEX (LEX::unit & LEX::select, for other purposes there are
+ SELECT_LEX_UNIT::exclude_level & SELECT_LEX_UNIT::exclude_tree.
+
+ It is also used in parsing to detach builtin select.
+ */
+ void cut_subtree() { slave= 0; }
friend class st_select_lex_unit;
friend bool mysql_new_select(LEX *lex, bool move_down, SELECT_LEX *sel);
friend bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
bool open_view_no_parse);
+ friend class st_select_lex;
private:
void fast_exclude();
};
@@ -760,12 +831,13 @@ protected:
bool prepare_join(THD *thd, SELECT_LEX *sl, select_result *result,
ulong additional_options,
bool is_union_select);
- bool join_union_item_types(THD *thd, List<Item> &types, uint count);
bool join_union_type_handlers(THD *thd,
class Type_holder *holders, uint count);
bool join_union_type_attributes(THD *thd,
class Type_holder *holders, uint count);
public:
+ bool join_union_item_types(THD *thd, List<Item> &types, uint count);
+public:
// Ensures that at least all members used during cleanup() are initialized.
st_select_lex_unit()
: union_result(NULL), table(NULL), result(NULL),
@@ -774,9 +846,9 @@ public:
{
}
-
TABLE *table; /* temporary table using for appending UNION results */
select_result *result;
+ st_select_lex *pre_last_parse;
bool prepared, // prepare phase already performed for UNION (unit)
optimized, // optimize phase already performed for UNION (unit)
optimized_2,
@@ -863,7 +935,7 @@ public:
{
return reinterpret_cast<st_select_lex*>(slave);
}
- inline void set_with_clause(With_clause *with_cl);
+ void set_with_clause(With_clause *with_cl);
st_select_lex_unit* next_unit()
{
return reinterpret_cast<st_select_lex_unit*>(next);
@@ -907,36 +979,70 @@ public:
int save_union_explain(Explain_query *output);
int save_union_explain_part2(Explain_query *output);
unit_common_op common_op();
+
+ void reset_distinct();
+ void fix_distinct();
+
+ void register_select_chain(SELECT_LEX *first_sel);
+
+ bool set_nest_level(int new_nest_level);
+ bool check_parameters(SELECT_LEX *main_select);
+
+ bool set_lock_to_the_last_select(Lex_select_lock l);
+
+ friend class st_select_lex;
};
typedef class st_select_lex_unit SELECT_LEX_UNIT;
typedef Bounds_checked_array<Item*> Ref_ptr_array;
-/*
- Structure which consists of the field and the item which
- produces this field.
+/**
+ Structure which consists of the field and the item that
+ corresponds to this field.
*/
-class Grouping_tmp_field :public Sql_alloc
+class Field_pair :public Sql_alloc
{
public:
- Field *tmp_field;
- Item *producing_item;
- Grouping_tmp_field(Field *fld, Item *item)
- :tmp_field(fld), producing_item(item) {}
+ Field *field;
+ Item *corresponding_item;
+ Field_pair(Field *fld, Item *item)
+ :field(fld), corresponding_item(item) {}
};
+Field_pair *get_corresponding_field_pair(Item *item,
+ List<Field_pair> pair_list);
+Field_pair *find_matching_field_pair(Item *item, List<Field_pair> pair_list);
+
#define TOUCHED_SEL_COND 1/* WHERE/HAVING/ON should be reinited before use */
#define TOUCHED_SEL_DERIVED (1<<1)/* derived should be reinited before use */
+
/*
SELECT_LEX - store information of parsed SELECT statment
*/
class st_select_lex: public st_select_lex_node
{
public:
+ /*
+ Currently the field first_nested is used only by parser.
+ It containa either a reference to the first select
+ of the nest of selects to which 'this' belongs to, or
+ in the case of priority jump it contains a reference to
+ the select to which the priority nest has to be attached to.
+ If there is no priority jump then the first select of the
+ nest contains the reference to itself in first_nested.
+ Example:
+ select1 union select2 intersect select
+ Here we have a priority jump at select2.
+ So select2->first_nested points to select1,
+ while select3->first_nested points to select2 and
+ select1->first_nested points to select1.
+ */
+ st_select_lex *first_nested;
+
Name_resolution_context context;
LEX_CSTRING db;
Item *where, *having; /* WHERE & HAVING clauses */
@@ -944,6 +1050,7 @@ public:
Item *prep_having;/* saved HAVING clause for prepared statement processing */
Item *cond_pushed_into_where; /* condition pushed into the select's WHERE */
Item *cond_pushed_into_having; /* condition pushed into the select's HAVING */
+ List<Item> attach_to_conds;
/* Saved values of the WHERE and HAVING clauses*/
Item::cond_result cond_value, having_value;
/*
@@ -1032,6 +1139,7 @@ public:
SQL_I_List<ORDER> order_list; /* ORDER clause */
SQL_I_List<ORDER> gorder_list;
Item *select_limit, *offset_limit; /* LIMIT clause parameters */
+ bool is_set_query_expr_tail;
/// Array of pointers to top elements of all_fields list
Ref_ptr_array ref_pointer_array;
@@ -1058,6 +1166,7 @@ public:
*/
uint hidden_bit_fields;
enum_parsing_place parsing_place; /* where we are parsing expression */
+ enum_parsing_place save_parsing_place;
enum_parsing_place context_analysis_place; /* where we are in prepare */
bool with_sum_func; /* sum function indicator */
@@ -1155,7 +1264,8 @@ public:
nesting_map name_visibility_map;
table_map with_dep;
- List<Grouping_tmp_field> grouping_tmp_fields;
+ /* the structure to store fields that are used in the GROUP BY of this select */
+ List<Field_pair> grouping_tmp_fields;
/* it is for correct printing SELECT options */
thr_lock_type lock_type;
@@ -1163,6 +1273,11 @@ public:
table_value_constr *tvc;
bool in_tvc;
+ /* The interface employed to execute the select query by a foreign engine */
+ select_handler *select_h;
+ /* The object used to organize execution of the query by a foreign engine */
+ Pushdown_select *pushdown_select;
+
/** System Versioning */
public:
uint versioned_tables;
@@ -1170,9 +1285,18 @@ public:
/* push new Item_field into item_list */
bool vers_push_field(THD *thd, TABLE_LIST *table, const LEX_CSTRING field_name);
+ Item* period_setup_conds(THD *thd, TABLE_LIST *table, Item *where);
void init_query();
void init_select();
st_select_lex_unit* master_unit() { return (st_select_lex_unit*) master; }
+ inline void set_master_unit(st_select_lex_unit *master_unit)
+ {
+ master= (st_select_lex_node *)master_unit;
+ }
+ void set_master(st_select_lex *master_arg)
+ {
+ master= master_arg;
+ }
st_select_lex_unit* first_inner_unit()
{
return (st_select_lex_unit*) slave;
@@ -1224,12 +1348,6 @@ public:
List<Item>* get_item_list();
ulong get_table_join_options();
void set_lock_for_tables(thr_lock_type lock_type);
- inline void init_order()
- {
- order_list.elements= 0;
- order_list.first= 0;
- order_list.next= &order_list.first;
- }
/*
This method created for reiniting LEX in mysql_admin_table() and can be
used only if you are going remove all SELECT_LEX & units except belonger
@@ -1360,9 +1478,10 @@ public:
With_element *find_table_def_in_with_clauses(TABLE_LIST *table);
bool check_unrestricted_recursive(bool only_standard_compliant);
bool check_subqueries_with_recursive_references();
- void collect_grouping_fields(THD *thd, ORDER *grouping_list);
- void check_cond_extraction_for_grouping_fields(Item *cond,
- TABLE_LIST *derived);
+ void collect_grouping_fields_for_derived(THD *thd, ORDER *grouping_list);
+ bool collect_grouping_fields(THD *thd);
+ bool collect_fields_equal_to_grouping(THD *thd);
+ void check_cond_extraction_for_grouping_fields(THD *thd, Item *cond);
Item *build_cond_for_grouping_fields(THD *thd, Item *cond,
bool no_to_clones);
@@ -1388,6 +1507,15 @@ public:
bool cond_pushdown_is_allowed() const
{ return !olap && !explicit_limit && !tvc; }
+ bool build_pushable_cond_for_having_pushdown(THD *thd, Item *cond);
+ void pushdown_cond_into_where_clause(THD *thd, Item *extracted_cond,
+ Item **remaining_cond,
+ Item_transformer transformer,
+ uchar *arg);
+ Item *pushdown_from_having_into_where(THD *thd, Item *having);
+
+ select_handler *find_select_handler(THD *thd);
+
private:
bool m_non_agg_field_used;
bool m_agg_func_used;
@@ -1405,6 +1533,35 @@ public:
DBUG_ASSERT(this != sel);
select_n_where_fields+= sel->select_n_where_fields;
}
+ inline void set_linkage_and_distinct(enum sub_select_type l, bool d)
+ {
+ DBUG_ENTER("SELECT_LEX::set_linkage_and_distinct");
+ DBUG_PRINT("info", ("select: %p distinct %d", this, d));
+ set_linkage(l);
+ DBUG_ASSERT(l == UNION_TYPE ||
+ l == INTERSECT_TYPE ||
+ l == EXCEPT_TYPE);
+ if (d && master_unit() && master_unit()->union_distinct != this)
+ master_unit()->union_distinct= this;
+ distinct= d;
+ with_all_modifier= !distinct;
+ DBUG_VOID_RETURN;
+ }
+ bool set_nest_level(int new_nest_level);
+ bool check_parameters(SELECT_LEX *main_select);
+ void mark_select()
+ {
+ DBUG_ENTER("st_select_lex::mark_select()");
+ DBUG_PRINT("info", ("Select #%d", select_number));
+ DBUG_VOID_RETURN;
+ }
+ void register_unit(SELECT_LEX_UNIT *unit,
+ Name_resolution_context *outer_context);
+ SELECT_LEX_UNIT *attach_selects_chain(SELECT_LEX *sel,
+ Name_resolution_context *context);
+ void add_statistics(SELECT_LEX_UNIT *unit);
+ bool make_unique_derived_name(THD *thd, LEX_CSTRING *alias);
+ void lex_start(LEX *plex);
};
typedef class st_select_lex SELECT_LEX;
@@ -2522,7 +2679,7 @@ private:
int scan_ident_start(THD *thd, Lex_ident_cli_st *str);
int scan_ident_middle(THD *thd, Lex_ident_cli_st *str,
CHARSET_INFO **cs, my_lex_states *);
- int scan_ident_delimited(THD *thd, Lex_ident_cli_st *str);
+ int scan_ident_delimited(THD *thd, Lex_ident_cli_st *str, uchar quote_char);
bool get_7bit_or_8bit_ident(THD *thd, uchar *last_char);
/** Current thread. */
@@ -2797,15 +2954,102 @@ public:
Explain_delete* save_explain_delete_data(MEM_ROOT *mem_root, THD *thd);
};
+enum account_lock_type
+{
+ ACCOUNTLOCK_UNSPECIFIED= 0,
+ ACCOUNTLOCK_LOCKED,
+ ACCOUNTLOCK_UNLOCKED
+};
+
+enum password_exp_type
+{
+ PASSWORD_EXPIRE_UNSPECIFIED= 0,
+ PASSWORD_EXPIRE_NOW,
+ PASSWORD_EXPIRE_NEVER,
+ PASSWORD_EXPIRE_DEFAULT,
+ PASSWORD_EXPIRE_INTERVAL
+};
+
+struct Account_options: public USER_RESOURCES
+{
+ Account_options() { }
+
+ void reset()
+ {
+ bzero(this, sizeof(*this));
+ ssl_type= SSL_TYPE_NOT_SPECIFIED;
+ }
+
+ enum SSL_type ssl_type; // defined in violite.h
+ LEX_CSTRING x509_subject, x509_issuer, ssl_cipher;
+ account_lock_type account_locked;
+ password_exp_type password_expire;
+ longlong num_expiration_days;
+};
class Query_arena_memroot;
/* The state of the lex parsing. This is saved in the THD struct */
+
+class Lex_prepared_stmt
+{
+ Lex_ident_sys m_name; // Statement name (in all queries)
+ Item *m_code; // PREPARE or EXECUTE IMMEDIATE source expression
+ List<Item> m_params; // List of parameters for EXECUTE [IMMEDIATE]
+public:
+
+ Lex_prepared_stmt()
+ :m_code(NULL)
+ { }
+ const Lex_ident_sys &name() const
+ {
+ return m_name;
+ }
+ uint param_count() const
+ {
+ return m_params.elements;
+ }
+ List<Item> &params()
+ {
+ return m_params;
+ }
+ void set(const Lex_ident_sys_st &ident, Item *code, List<Item> *params)
+ {
+ DBUG_ASSERT(m_params.elements == 0);
+ m_name= ident;
+ m_code= code;
+ if (params)
+ m_params= *params;
+ }
+ bool params_fix_fields(THD *thd)
+ {
+ // Fix Items in the EXECUTE..USING list
+ List_iterator_fast<Item> param_it(m_params);
+ while (Item *param= param_it++)
+ {
+ if (param->fix_fields_if_needed_for_scalar(thd, 0))
+ return true;
+ }
+ return false;
+ }
+ bool get_dynamic_sql_string(THD *thd, LEX_CSTRING *dst, String *buffer);
+ void lex_start()
+ {
+ m_params.empty();
+ }
+};
+
+
struct LEX: public Query_tables_list
{
SELECT_LEX_UNIT unit; /* most upper unit */
- SELECT_LEX select_lex; /* first SELECT_LEX */
+ inline SELECT_LEX *first_select_lex() {return unit.first_select();}
+
+private:
+ SELECT_LEX builtin_select;
/* current SELECT_LEX in parsing */
+
+public:
SELECT_LEX *current_select;
/* list of all SELECT_LEX */
SELECT_LEX *all_selects_list;
@@ -2851,7 +3095,6 @@ struct LEX: public Query_tables_list
const char *help_arg;
const char *backup_dir; /* For RESTORE/BACKUP */
const char* to_log; /* For PURGE MASTER LOGS TO */
- const char* x509_subject,*x509_issuer,*ssl_cipher;
String *wild; /* Wildcard in SHOW {something} LIKE 'wild'*/
sql_exchange *exchange;
select_result *result;
@@ -2887,6 +3130,9 @@ struct LEX: public Query_tables_list
*/
LEX_USER *definer;
+ /* Used in ALTER/CREATE user to store account locking options */
+ Account_options account_options;
+
Table_type table_type; /* Used for SHOW CREATE */
List<Key_part_spec> ref_list;
List<LEX_USER> users_list;
@@ -2911,6 +3157,12 @@ private:
bool sp_for_loop_condition(THD *thd, const Lex_for_loop_st &loop);
bool sp_for_loop_increment(THD *thd, const Lex_for_loop_st &loop);
+ /*
+ Check if Item_field and Item_ref are allowed in the current statement.
+ @retval false OK (fields are allowed)
+ @retval true ERROR (fields are not allowed). Error is raised.
+ */
+ bool check_expr_allows_fields_or_error(THD *thd, const char *name) const;
public:
void parse_error(uint err_number= ER_SYNTAX_ERROR);
inline bool is_arena_for_set_stmt() {return arena_for_set_stmt != 0;}
@@ -2938,6 +3190,8 @@ public:
required a local context, the parser pops the top-most context.
*/
List<Name_resolution_context> context_stack;
+ SELECT_LEX *select_stack[MAX_SELECT_NESTING + 1];
+ uint select_stack_top;
SQL_I_List<ORDER> proc_list;
SQL_I_List<TABLE_LIST> auxiliary_table_list, save_list;
@@ -2950,13 +3204,13 @@ public:
LEX_MASTER_INFO mi; // used by CHANGE MASTER
LEX_SERVER_OPTIONS server_options;
LEX_CSTRING relay_log_connection_name;
- USER_RESOURCES mqh;
LEX_RESET_SLAVE reset_slave_info;
ulonglong type;
ulong next_binlog_file_number;
/* The following is used by KILL */
killed_state kill_signal;
killed_type kill_type;
+ bool is_shutdown_wait_for_slaves;
/*
This variable is used in post-parse stage to declare that sum-functions,
or functions which have sense only if GROUP BY is present, are allowed.
@@ -2977,6 +3231,8 @@ public:
syntax error back.
*/
bool expr_allows_subselect;
+ bool selects_allow_into;
+ bool selects_allow_procedure;
/*
A special command "PARSE_VCOL_EXPR" is defined for the parser
to translate a defining expression of a virtual column into an
@@ -2986,7 +3242,6 @@ public:
*/
bool parse_vcol_expr;
- enum SSL_type ssl_type; // defined in violite.h
enum enum_duplicates duplicates;
enum enum_tx_isolation tx_isolation;
enum enum_ha_read_modes ha_read_mode;
@@ -3002,6 +3257,7 @@ public:
uint profile_query_id;
uint profile_options;
uint grant, grant_tot_col, which_columns;
+ enum backup_stages backup_stage;
enum Foreign_key::fk_match_opt fk_match_option;
enum_fk_option fk_update_opt;
enum_fk_option fk_delete_opt;
@@ -3031,7 +3287,17 @@ public:
enum enum_yes_no_unknown tx_chain, tx_release;
bool safe_to_cache_query;
bool ignore;
+ bool next_is_main; // use "main" SELECT_LEX for nrxt allocation;
+ bool next_is_down; // use "main" SELECT_LEX for nrxt allocation;
st_parsing_options parsing_options;
+ uint8 lex_options; // see OPTION_LEX_*
+ /*
+ In sql_cache we store SQL_CACHE flag as specified by user to be
+ able to restore SELECT statement from internal structures.
+ */
+ enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE };
+ e_sql_cache sql_cache;
+
Alter_info alter_info;
/*
For CREATE TABLE statement last element of table list which is not
@@ -3039,12 +3305,7 @@ public:
creating or last of tables referenced by foreign keys).
*/
TABLE_LIST *create_last_non_select_table;
- /* Prepared statements SQL syntax:*/
- LEX_CSTRING prepared_stmt_name; /* Statement name (in all queries) */
- /* PREPARE or EXECUTE IMMEDIATE source expression */
- Item *prepared_stmt_code;
- /* Names of user variables holding parameters (in EXECUTE) */
- List<Item> prepared_stmt_params;
+ Lex_prepared_stmt prepared_stmt;
sp_head *sphead;
sp_name *spname;
bool sp_lex_in_use; // Keep track on lex usage in SPs for error handling
@@ -3172,6 +3433,7 @@ public:
/* System Versioning */
vers_select_conds_t vers_conditions;
+ vers_select_conds_t period_conditions;
inline void free_set_stmt_mem_root()
{
@@ -3229,20 +3491,24 @@ public:
SELECT_LEX *sl;
SELECT_LEX_UNIT *un;
for (sl= current_select, un= sl->master_unit();
- un != &unit;
- sl= sl->outer_select(), un= sl->master_unit())
+ un && un != &unit;
+ sl= sl->outer_select(), un= (sl ? sl->master_unit() : NULL))
{
- sl->uncacheable|= cause;
- un->uncacheable|= cause;
+ sl->uncacheable|= cause;
+ un->uncacheable|= cause;
}
- select_lex.uncacheable|= cause;
+ if (sl)
+ sl->uncacheable|= cause;
}
+ if (first_select_lex())
+ first_select_lex()->uncacheable|= cause;
}
void set_trg_event_type_for_tables();
TABLE_LIST *unlink_first_table(bool *link_to_local);
void link_first_table_back(TABLE_LIST *first, bool link_to_local);
void first_lists_tables_same();
+ void fix_first_select_number();
bool can_be_merged();
bool can_use_merged();
@@ -3280,14 +3546,88 @@ public:
void cleanup_after_one_table_open();
- bool push_context(Name_resolution_context *context, MEM_ROOT *mem_root)
- {
- return context_stack.push_front(context, mem_root);
- }
+ bool push_context(Name_resolution_context *context);
void pop_context()
{
+ DBUG_ENTER("LEX::pop_context");
+#ifndef DBUG_OFF
+ Name_resolution_context *context=
+#endif
context_stack.pop();
+
+ DBUG_PRINT("info", ("Pop context %p Select: %p (%d)",
+ context, context->select_lex,
+ (context->select_lex ?
+ context->select_lex->select_number:
+ 0)));
+
+ DBUG_VOID_RETURN;
+ }
+
+ SELECT_LEX *select_stack_head()
+ {
+ if (likely(select_stack_top))
+ return select_stack[select_stack_top - 1];
+ return NULL;
+ }
+
+ bool push_select(SELECT_LEX *select_lex)
+ {
+ DBUG_ENTER("LEX::push_select");
+ DBUG_PRINT("info", ("Top Select was %p (%d) depth: %u pushed: %p (%d)",
+ select_stack_head(),
+ select_stack_top,
+ (select_stack_top ?
+ select_stack_head()->select_number :
+ 0),
+ select_lex, select_lex->select_number));
+ if (unlikely(select_stack_top > MAX_SELECT_NESTING))
+ {
+ my_error(ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (push_context(&select_lex->context))
+ DBUG_RETURN(TRUE);
+ select_stack[select_stack_top++]= select_lex;
+ current_select= select_lex;
+ DBUG_RETURN(FALSE);
+ }
+
+ SELECT_LEX *pop_select()
+ {
+ DBUG_ENTER("LEX::pop_select");
+ SELECT_LEX *select_lex;
+ if (likely(select_stack_top))
+ select_lex= select_stack[--select_stack_top];
+ else
+ select_lex= 0;
+ DBUG_PRINT("info", ("Top Select is %p (%d) depth: %u poped: %p (%d)",
+ select_stack_head(),
+ select_stack_top,
+ (select_stack_top ?
+ select_stack_head()->select_number :
+ 0),
+ select_lex,
+ (select_lex ? select_lex->select_number : 0)));
+ DBUG_ASSERT(select_lex);
+
+ pop_context();
+
+ if (unlikely(!select_stack_top))
+ {
+ current_select= NULL;
+ DBUG_PRINT("info", ("Top Select is empty"));
+ }
+ else
+ current_select= select_stack[select_stack_top - 1];
+
+ DBUG_RETURN(select_lex);
+ }
+
+ SELECT_LEX *current_select_or_default()
+ {
+ return current_select ? current_select : &builtin_select;
}
bool copy_db_to(LEX_CSTRING *to);
@@ -3296,6 +3636,7 @@ public:
{
return context_stack.head();
}
+
/*
Restore the LEX and THD in case of a parse error.
*/
@@ -3324,9 +3665,8 @@ public:
on its top. So select_lex (as the first added) will be at the tail
of the list.
*/
- if (&select_lex == all_selects_list && !sroutines.records)
+ if (first_select_lex() == all_selects_list && !sroutines.records)
{
- DBUG_ASSERT(!all_selects_list->next_select_in_list());
return TRUE;
}
return FALSE;
@@ -3347,27 +3687,17 @@ public:
bool last_field_generated_always_as_row_end();
bool set_bincmp(CHARSET_INFO *cs, bool bin);
- bool get_dynamic_sql_string(LEX_CSTRING *dst, String *buffer);
- bool prepared_stmt_params_fix_fields(THD *thd)
- {
- // Fix Items in the EXECUTE..USING list
- List_iterator_fast<Item> param_it(prepared_stmt_params);
- while (Item *param= param_it++)
- {
- if (param->fix_fields_if_needed_for_scalar(thd, 0))
- return true;
- }
- return false;
- }
+ bool new_sp_instr_stmt(THD *, const LEX_CSTRING &prefix,
+ const LEX_CSTRING &suffix);
+ bool sp_proc_stmt_statement_finalize_buf(THD *, const LEX_CSTRING &qbuf);
+ bool sp_proc_stmt_statement_finalize(THD *, bool no_lookahead);
+
sp_variable *sp_param_init(LEX_CSTRING *name);
bool sp_param_fill_definition(sp_variable *spvar);
int case_stmt_action_expr(Item* expr);
int case_stmt_action_when(Item *when, bool simple);
int case_stmt_action_then();
- bool add_select_to_union_list(bool is_union_distinct,
- enum sub_select_type type,
- bool is_top_level);
bool setup_select_in_parentheses();
bool set_trigger_new_row(const LEX_CSTRING *name, Item *val);
bool set_trigger_field(const LEX_CSTRING *name1, const LEX_CSTRING *name2,
@@ -3389,19 +3719,17 @@ public:
sp_name *make_sp_name(THD *thd, const LEX_CSTRING *name1,
const LEX_CSTRING *name2);
sp_name *make_sp_name_package_routine(THD *thd, const LEX_CSTRING *name);
- sp_head *make_sp_head(THD *thd, const sp_name *name, const Sp_handler *sph);
+ sp_head *make_sp_head(THD *thd, const sp_name *name, const Sp_handler *sph,
+ enum_sp_aggregate_type agg_type);
sp_head *make_sp_head_no_recursive(THD *thd, const sp_name *name,
- const Sp_handler *sph);
- sp_head *make_sp_head_no_recursive(THD *thd,
- DDL_options_st options, sp_name *name,
- const Sp_handler *sph)
- {
- if (add_create_options_with_check(options))
- return NULL;
- return make_sp_head_no_recursive(thd, name, sph);
- }
+ const Sp_handler *sph,
+ enum_sp_aggregate_type agg_type);
+ bool sp_body_finalize_routine(THD *);
+ bool sp_body_finalize_trigger(THD *);
+ bool sp_body_finalize_event(THD *);
bool sp_body_finalize_function(THD *);
bool sp_body_finalize_procedure(THD *);
+ bool sp_body_finalize_procedure_standalone(THD *, const sp_name *end_name);
sp_package *create_package_start(THD *thd,
enum_sql_command command,
const Sp_handler *sph,
@@ -3492,7 +3820,12 @@ public:
return create_item_qualified_asterisk(thd, &a, &b);
}
- Item *create_item_ident_nosp(THD *thd, Lex_ident_sys_st *name);
+ Item *create_item_ident_field(THD *thd, const char *db, const char *table,
+ const Lex_ident_sys_st *name);
+ Item *create_item_ident_nosp(THD *thd, Lex_ident_sys_st *name)
+ {
+ return create_item_ident_field(thd, NullS, NullS, name);
+ }
Item *create_item_ident_sp(THD *thd, Lex_ident_sys_st *name,
const char *start, const char *end);
Item *create_item_ident(THD *thd, Lex_ident_cli_st *cname)
@@ -3630,6 +3963,10 @@ public:
const Lex_ident_cli_st *var_name,
const Lex_ident_cli_st *field_name);
+ Item *create_item_query_expression(THD *thd,
+ const char *tok_start,
+ st_select_lex_unit *unit);
+
Item *make_item_func_replace(THD *thd, Item *org, Item *find, Item *replace);
Item *make_item_func_substr(THD *thd, Item *a, Item *b, Item *c);
Item *make_item_func_substr(THD *thd, Item *a, Item *b);
@@ -3828,6 +4165,17 @@ public:
sp_for_loop_intrange_finalize(thd, loop);
}
bool sp_for_loop_outer_block_finalize(THD *thd, const Lex_for_loop_st &loop);
+
+ /*
+ Make an Item when an identifier is found in the FOR loop bounds:
+ FOR rec IN cursor
+ FOR rec IN var1 .. var2
+ FOR rec IN row1.field1 .. xxx
+ */
+ Item *create_item_for_loop_bound(THD *thd,
+ const LEX_CSTRING *a,
+ const LEX_CSTRING *b,
+ const LEX_CSTRING *c);
/* End of FOR LOOP methods */
bool add_signal_statement(THD *thd, const class sp_condition_value *value);
@@ -3883,10 +4231,10 @@ public:
void add_key_to_list(LEX_CSTRING *field_name,
enum Key::Keytype type, bool check_exists);
// Add a constraint as a part of CREATE TABLE or ALTER TABLE
- bool add_constraint(LEX_CSTRING *name, Virtual_column_info *constr,
+ bool add_constraint(const LEX_CSTRING &name, Virtual_column_info *constr,
bool if_not_exists)
{
- constr->name= *name;
+ constr->name= name;
constr->flags= if_not_exists ?
Alter_info::CHECK_CONSTRAINT_IF_NOT_EXISTS : 0;
alter_info.check_constraint_list.push_back(constr);
@@ -3927,6 +4275,7 @@ public:
return check_create_options(create_info);
}
bool sp_add_cfetch(THD *thd, const LEX_CSTRING *name);
+ bool sp_add_agg_cfetch();
bool set_command_with_check(enum_sql_command command,
uint scope,
@@ -3975,7 +4324,7 @@ public:
}
SELECT_LEX *exclude_last_select();
- bool add_unit_in_brackets(SELECT_LEX *nselect);
+ SELECT_LEX *exclude_not_first_select(SELECT_LEX *exclude);
void check_automatic_up(enum sub_select_type type);
bool create_or_alter_view_finalize(THD *thd, Table_ident *table_ident);
bool add_alter_view(THD *thd, uint16 algorithm, enum_view_suid suid,
@@ -3983,7 +4332,6 @@ public:
bool add_create_view(THD *thd, DDL_options_st ddl,
uint16 algorithm, enum_view_suid suid,
Table_ident *table_ident);
-
bool add_grant_command(THD *thd, enum_sql_command sql_command_arg,
stored_procedure_type type_arg);
@@ -3991,6 +4339,30 @@ public:
{
return create_info.vers_info;
}
+
+ int add_period(Lex_ident name, Lex_ident_sys_st start, Lex_ident_sys_st end)
+ {
+ Table_period_info &info= create_info.period_info;
+
+ if (check_exists && info.name.streq(name))
+ return 0;
+
+ if (info.is_set())
+ {
+ my_error(ER_MORE_THAN_ONE_PERIOD, MYF(0));
+ return 1;
+ }
+ info.set_period(start, end);
+ info.name= name;
+
+ info.constr= new Virtual_column_info();
+ info.constr->expr= lt_creator.create(thd,
+ create_item_ident_nosp(thd, &start),
+ create_item_ident_nosp(thd, &end));
+ add_constraint(null_clex_str, info.constr, false);
+ return 0;
+ }
+
sp_package *get_sp_package() const;
/**
@@ -4002,7 +4374,7 @@ public:
*/
bool check_simple_select(const LEX_CSTRING *option)
{
- if (current_select != &select_lex)
+ if (current_select != &builtin_select)
{
char command[80];
strmake(command, option->str, MY_MIN(option->length, sizeof(command)-1));
@@ -4020,6 +4392,116 @@ public:
}
bool tvc_finalize();
bool tvc_finalize_derived();
+
+ bool make_select_in_brackets(SELECT_LEX* dummy_select,
+ SELECT_LEX *nselect, bool automatic);
+
+ SELECT_LEX_UNIT *alloc_unit();
+ SELECT_LEX *alloc_select(bool is_select);
+ SELECT_LEX_UNIT *create_unit(SELECT_LEX*);
+ SELECT_LEX *wrap_unit_into_derived(SELECT_LEX_UNIT *unit);
+ SELECT_LEX *wrap_select_chain_into_derived(SELECT_LEX *sel);
+ bool main_select_push();
+ bool insert_select_hack(SELECT_LEX *sel);
+ SELECT_LEX *create_priority_nest(SELECT_LEX *first_in_nest);
+
+ void set_main_unit(st_select_lex_unit *u)
+ {
+ unit.options= u->options;
+ unit.uncacheable= u->uncacheable;
+ unit.register_select_chain(u->first_select());
+ unit.first_select()->options|= builtin_select.options;
+ unit.fake_select_lex= u->fake_select_lex;
+ unit.union_distinct= u->union_distinct;
+ unit.set_with_clause(u->with_clause);
+ builtin_select.exclude_from_global();
+ }
+ bool check_main_unit_semantics();
+
+ // reaction on different parsed parts (bodies are in sql_yacc.yy)
+ bool parsed_unit_in_brackets(SELECT_LEX_UNIT *unit);
+ SELECT_LEX *parsed_select(SELECT_LEX *sel, Lex_order_limit_lock * l);
+ SELECT_LEX *parsed_unit_in_brackets_tail(SELECT_LEX_UNIT *unit,
+ Lex_order_limit_lock * l);
+ SELECT_LEX *parsed_select_in_brackets(SELECT_LEX *sel,
+ Lex_order_limit_lock * l);
+ SELECT_LEX_UNIT *parsed_select_expr_start(SELECT_LEX *s1, SELECT_LEX *s2,
+ enum sub_select_type unit_type,
+ bool distinct);
+ SELECT_LEX_UNIT *parsed_select_expr_cont(SELECT_LEX_UNIT *unit,
+ SELECT_LEX *s2,
+ enum sub_select_type unit_type,
+ bool distinct, bool oracle);
+ SELECT_LEX_UNIT *parsed_body_select(SELECT_LEX *sel,
+ Lex_order_limit_lock * l);
+ bool parsed_body_unit(SELECT_LEX_UNIT *unit);
+ SELECT_LEX_UNIT *parsed_body_unit_tail(SELECT_LEX_UNIT *unit,
+ Lex_order_limit_lock * l);
+ SELECT_LEX *parsed_subselect(SELECT_LEX_UNIT *unit, char *place);
+ bool parsed_insert_select(SELECT_LEX *firs_select);
+ bool parsed_TVC_start();
+ SELECT_LEX *parsed_TVC_end();
+ TABLE_LIST *parsed_derived_select(SELECT_LEX *sel, int for_system_time,
+ LEX_CSTRING *alias);
+ TABLE_LIST *parsed_derived_unit(SELECT_LEX_UNIT *unit,
+ int for_system_time,
+ LEX_CSTRING *alias);
+ bool parsed_create_view(SELECT_LEX_UNIT *unit, int check);
+ bool select_finalize(st_select_lex_unit *expr);
+ void relink_hack(st_select_lex *select_lex);
+
+ bool stmt_install_plugin(const DDL_options_st &opt,
+ const Lex_ident_sys_st &name,
+ const LEX_CSTRING &soname);
+ void stmt_install_plugin(const LEX_CSTRING &soname);
+
+ bool stmt_uninstall_plugin_by_name(const DDL_options_st &opt,
+ const Lex_ident_sys_st &name);
+ bool stmt_uninstall_plugin_by_soname(const DDL_options_st &opt,
+ const LEX_CSTRING &soname);
+ bool stmt_prepare_validate(const char *stmt_type);
+ bool stmt_prepare(const Lex_ident_sys_st &ident, Item *code);
+ bool stmt_execute(const Lex_ident_sys_st &ident, List<Item> *params);
+ bool stmt_execute_immediate(Item *code, List<Item> *params);
+ void stmt_deallocate_prepare(const Lex_ident_sys_st &ident);
+
+ bool stmt_alter_table_exchange_partition(Table_ident *table);
+
+ void stmt_purge_to(const LEX_CSTRING &to);
+ bool stmt_purge_before(Item *item);
+
+private:
+ bool stmt_create_routine_start(const DDL_options_st &options)
+ {
+ create_info.set(options);
+ return main_select_push() || check_create_options(options);
+ }
+public:
+ bool stmt_create_function_start(const DDL_options_st &options)
+ {
+ sql_command= SQLCOM_CREATE_SPFUNCTION;
+ return stmt_create_routine_start(options);
+ }
+ bool stmt_create_procedure_start(const DDL_options_st &options)
+ {
+ sql_command= SQLCOM_CREATE_PROCEDURE;
+ return stmt_create_routine_start(options);
+ }
+ void stmt_create_routine_finalize()
+ {
+ pop_select(); // main select
+ }
+
+ bool stmt_create_stored_function_start(const DDL_options_st &options,
+ enum_sp_aggregate_type,
+ const sp_name *name);
+ bool stmt_create_stored_function_finalize_standalone(const sp_name *end_name);
+
+ bool stmt_create_udf_function(const DDL_options_st &options,
+ enum_sp_aggregate_type agg_type,
+ const Lex_ident_sys_st &name,
+ Item_result return_type,
+ const LEX_CSTRING &soname);
};
@@ -4307,5 +4789,7 @@ Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal,
void sp_create_assignment_lex(THD *thd, bool no_lookahead);
bool sp_create_assignment_instr(THD *thd, bool no_lookahead);
+void mark_or_conds_to_avoid_pushdown(Item *cond);
+
#endif /* MYSQL_SERVER */
#endif /* SQL_LEX_INCLUDED */
diff --git a/sql/sql_list.h b/sql/sql_list.h
index 39a1c3375e0..60ec8ab4177 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -518,6 +518,12 @@ public:
empty();
}
T *elem(uint n) { return (T*) base_list::elem(n); }
+ // Create a new list with one element
+ static List<T> *make(MEM_ROOT *mem_root, T *first)
+ {
+ List<T> *res= new (mem_root) List<T>;
+ return res == NULL || res->push_back(first, mem_root) ? NULL : res;
+ }
};
@@ -611,7 +617,7 @@ struct ilink
struct ilink **prev,*next;
static void *operator new(size_t size) throw ()
{
- return (void*)my_malloc((uint)size, MYF(MY_WME | MY_FAE | ME_FATALERROR));
+ return (void*)my_malloc((uint)size, MYF(MY_WME | MY_FAE | ME_FATAL));
}
static void operator delete(void* ptr_arg, size_t)
{
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 8631569a30b..ba1403837b8 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -41,7 +41,8 @@
#include "sql_trigger.h"
#include "sql_derived.h"
#include "sql_show.h"
-#include "debug_sync.h"
+
+#include "wsrep_mysqld.h"
extern "C" int _my_b_net_read(IO_CACHE *info, uchar *Buffer, size_t Count);
@@ -99,64 +100,41 @@ public:
#define PUSH(A) *(stack_pos++)=(A)
#ifdef WITH_WSREP
-/** If requested by wsrep_load_data_splitting, commit and restart
-the transaction after every 10,000 inserted rows. */
-
-static bool wsrep_load_data_split(THD *thd, const TABLE *table,
- const COPY_INFO &info)
+/** If requested by wsrep_load_data_splitting and streaming replication is
+ not enabled, replicate a streaming fragment every 10,000 rows.*/
+class Wsrep_load_data_split
{
- DBUG_ENTER("wsrep_load_data_split");
-
- if (!wsrep_load_data_splitting || !wsrep_on(thd)
- || !info.records || (info.records % 10000)
- || !thd->transaction.stmt.ha_list
- || thd->transaction.stmt.ha_list->ht() != binlog_hton
- || !thd->transaction.stmt.ha_list->next()
- || thd->transaction.stmt.ha_list->next()->next())
- DBUG_RETURN(false);
-
- if (handlerton* hton= thd->transaction.stmt.ha_list->next()->ht())
+public:
+ Wsrep_load_data_split(THD *thd)
+ : m_thd(thd)
+ , m_load_data_splitting(wsrep_load_data_splitting)
+ , m_fragment_unit(thd->wsrep_trx().streaming_context().fragment_unit())
+ , m_fragment_size(thd->wsrep_trx().streaming_context().fragment_size())
{
- if (hton->db_type != DB_TYPE_INNODB)
- DBUG_RETURN(false);
- WSREP_DEBUG("intermediate transaction commit in LOAD DATA");
- wsrep_set_load_multi_commit(thd, true);
- if (wsrep_run_wsrep_commit(thd, true) != WSREP_TRX_OK) DBUG_RETURN(true);
- if (binlog_hton->commit(binlog_hton, thd, true)) DBUG_RETURN(true);
- wsrep_post_commit(thd, true);
- hton->commit(hton, thd, true);
- wsrep_set_load_multi_commit(thd, false);
- DEBUG_SYNC(thd, "intermediate_transaction_commit");
- table->file->extra(HA_EXTRA_FAKE_START_STMT);
+ if (WSREP(m_thd) && m_load_data_splitting)
+ {
+ /* Override streaming settings with backward compatible values for
+ load data splitting */
+ m_thd->wsrep_cs().streaming_params(wsrep::streaming_context::row, 10000);
+ }
}
- DBUG_RETURN(false);
-}
-/*
- If the commit fails, then an early return from
- the function occurs there and therefore we need
- to reset the table->auto_increment_field_not_null
- flag, which is usually reset after calling
- the write_record():
-*/
-#define WSREP_LOAD_DATA_SPLIT(thd,table,info) \
- if (wsrep_load_data_split(thd,table,info)) \
- { \
- table->auto_increment_field_not_null= FALSE; \
- DBUG_RETURN(1); \
+ ~Wsrep_load_data_split()
+ {
+ if (WSREP(m_thd) && m_load_data_splitting)
+ {
+ /* Restore original settings */
+ m_thd->wsrep_cs().streaming_params(m_fragment_unit, m_fragment_size);
+ }
}
-#else /* WITH_WSREP */
-#define WSREP_LOAD_DATA_SPLIT(thd,table,info) /* empty */
+private:
+ THD *m_thd;
+ my_bool m_load_data_splitting;
+ enum wsrep::streaming_context::fragment_unit m_fragment_unit;
+ size_t m_fragment_size;
+};
#endif /* WITH_WSREP */
-#define WRITE_RECORD(thd,table,info) \
- do { \
- int err_= write_record(thd, table, &info); \
- table->auto_increment_field_not_null= FALSE; \
- if (err_) \
- DBUG_RETURN(1); \
- } while (0)
-
class READ_INFO: public Load_data_param
{
File file;
@@ -374,6 +352,9 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
bool transactional_table __attribute__((unused));
DBUG_ENTER("mysql_load");
+#ifdef WITH_WSREP
+ Wsrep_load_data_split wsrep_load_data_split(thd);
+#endif /* WITH_WSREP */
/*
Bug #34283
mysqlbinlog leaves tmpfile after termination if binlog contains
@@ -410,10 +391,13 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
if (thd->lex->handle_list_of_derived(table_list, DT_PREPARE))
DBUG_RETURN(TRUE);
- if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context,
- &thd->lex->select_lex.top_join_list,
+ if (setup_tables_and_check_access(thd,
+ &thd->lex->first_select_lex()->context,
+ &thd->lex->first_select_lex()->
+ top_join_list,
table_list,
- thd->lex->select_lex.leaf_tables, FALSE,
+ thd->lex->first_select_lex()->leaf_tables,
+ FALSE,
INSERT_ACL | UPDATE_ACL,
INSERT_ACL | UPDATE_ACL, FALSE))
DBUG_RETURN(-1);
@@ -944,7 +928,7 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
List_iterator_fast<Item> it(fields_vars);
Item *item;
TABLE *table= table_list->table;
- bool progress_reports;
+ bool err, progress_reports;
ulonglong counter, time_to_report_progress;
DBUG_ENTER("read_fixed_length");
@@ -1035,9 +1019,11 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
DBUG_RETURN(-1);
}
- WSREP_LOAD_DATA_SPLIT(thd, table, info);
- WRITE_RECORD(thd, table, info);
-
+ err= write_record(thd, table, &info);
+ table->auto_increment_field_not_null= FALSE;
+ if (err)
+ DBUG_RETURN(1);
+
/*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
@@ -1070,7 +1056,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
Item *item;
TABLE *table= table_list->table;
uint enclosed_length;
- bool progress_reports;
+ bool err, progress_reports;
ulonglong counter, time_to_report_progress;
DBUG_ENTER("read_sep_field");
@@ -1175,9 +1161,10 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
DBUG_RETURN(-1);
}
- WSREP_LOAD_DATA_SPLIT(thd, table, info);
- WRITE_RECORD(thd, table, info);
-
+ err= write_record(thd, table, &info);
+ table->auto_increment_field_not_null= FALSE;
+ if (err)
+ DBUG_RETURN(1);
/*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
@@ -1221,6 +1208,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
for ( ; ; it.rewind())
{
+ bool err;
if (thd->killed)
{
thd->send_kill_message();
@@ -1294,10 +1282,12 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
case VIEW_CHECK_ERROR:
DBUG_RETURN(-1);
}
-
- WSREP_LOAD_DATA_SPLIT(thd, table, info);
- WRITE_RECORD(thd, table, info);
-
+
+ err= write_record(thd, table, &info);
+ table->auto_increment_field_not_null= false;
+ if (err)
+ DBUG_RETURN(1);
+
/*
We don't need to reset auto-increment field since we are restoring
its default value at the beginning of each loop iteration.
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index ab649f23160..e61a9675e65 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -100,6 +100,7 @@
#include "set_var.h"
#include "sql_bootstrap.h"
#include "sql_sequence.h"
+#include "opt_trace.h"
#include "my_json_writer.h"
@@ -109,14 +110,18 @@
#include "../storage/maria/ha_maria.h"
#endif
+#include "wsrep.h"
#include "wsrep_mysqld.h"
+#ifdef WITH_WSREP
#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h" /* wsrep transaction hooks */
-static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
+static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
Parser_state *parser_state,
bool is_com_multi,
bool is_next_command);
+#endif /* WITH_WSREP */
/**
@defgroup Runtime_Environment Runtime Environment
@{
@@ -390,10 +395,6 @@ const LEX_CSTRING command_name[257]={
{ STRING_WITH_LEN("Error") } // Last command number 255
};
-const char *xa_state_names[]={
- "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED", "ROLLBACK ONLY"
-};
-
#ifdef HAVE_REPLICATION
/**
Returns true if all tables should be ignored.
@@ -778,6 +779,8 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_CREATE_SERVER]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_ALTER_SERVER]= CF_AUTO_COMMIT_TRANS;
sql_command_flags[SQLCOM_DROP_SERVER]= CF_AUTO_COMMIT_TRANS;
+ sql_command_flags[SQLCOM_BACKUP]= CF_AUTO_COMMIT_TRANS;
+ sql_command_flags[SQLCOM_BACKUP_LOCK]= 0;
/*
The following statements can deal with temporary tables,
@@ -889,6 +892,16 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_REVOKE_ALL]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_INSTALL_PLUGIN]|= CF_DISALLOW_IN_RO_TRANS;
sql_command_flags[SQLCOM_UNINSTALL_PLUGIN]|= CF_DISALLOW_IN_RO_TRANS;
+#ifdef WITH_WSREP
+ /*
+ Statements for which some errors are ignored when
+ wsrep_ignore_apply_errors = WSREP_IGNORE_ERRORS_ON_RECONCILING_DDL
+ */
+ sql_command_flags[SQLCOM_DROP_DB]|= CF_WSREP_MAY_IGNORE_ERRORS;
+ sql_command_flags[SQLCOM_DROP_TABLE]|= CF_WSREP_MAY_IGNORE_ERRORS;
+ sql_command_flags[SQLCOM_DROP_INDEX]|= CF_WSREP_MAY_IGNORE_ERRORS;
+ sql_command_flags[SQLCOM_ALTER_TABLE]|= CF_WSREP_MAY_IGNORE_ERRORS;
+#endif /* WITH_WSREP */
}
bool sqlcom_can_generate_row_events(const THD *thd)
@@ -971,15 +984,29 @@ static char *fgets_fn(char *buffer, size_t size, fgets_input_t input, int *error
}
-static void handle_bootstrap_impl(THD *thd)
+int bootstrap(MYSQL_FILE *file)
{
- MYSQL_FILE *file= bootstrap_file;
- DBUG_ENTER("handle_bootstrap_impl");
+ int bootstrap_error= 0;
+ DBUG_ENTER("handle_bootstrap");
+
+ THD *thd= new THD(next_thread_id());
+#ifdef WITH_WSREP
+ thd->variables.wsrep_on= 0;
+#endif
+ thd->bootstrap=1;
+ my_net_init(&thd->net,(st_vio*) 0, thd, MYF(0));
+ thd->max_client_packet_length= thd->net.max_packet;
+ thd->security_ctx->master_access= ~(ulong)0;
#ifndef EMBEDDED_LIBRARY
- pthread_detach_this_thread();
+ mysql_thread_set_psi_id(thd->thread_id);
+#else
+ thd->mysql= 0;
+#endif
+
+ /* The following must be called before DBUG_ENTER */
thd->thread_stack= (char*) &thd;
-#endif /* EMBEDDED_LIBRARY */
+ thd->store_globals();
thd->security_ctx->user= (char*) my_strdup("boot", MYF(MY_WME));
thd->security_ctx->priv_user[0]= thd->security_ctx->priv_host[0]=
@@ -1056,10 +1083,6 @@ static void handle_bootstrap_impl(THD *thd)
thd->profiling.set_query_source(thd->query(), length);
#endif
- /*
- We don't need to obtain LOCK_thread_count here because in bootstrap
- mode we have only one thread.
- */
thd->set_time();
Parser_state parser_state;
if (parser_state.init(thd, thd->query(), length))
@@ -1087,56 +1110,8 @@ static void handle_bootstrap_impl(THD *thd)
free_root(&thd->transaction.mem_root,MYF(MY_KEEP_PREALLOC));
thd->lex->restore_set_statement_var();
}
-
- DBUG_VOID_RETURN;
-}
-
-
-/**
- Execute commands from bootstrap_file.
-
- Used when creating the initial grant tables.
-*/
-
-pthread_handler_t handle_bootstrap(void *arg)
-{
- THD *thd=(THD*) arg;
-
- mysql_thread_set_psi_id(thd->thread_id);
-
- do_handle_bootstrap(thd);
- return 0;
-}
-
-void do_handle_bootstrap(THD *thd)
-{
- /* The following must be called before DBUG_ENTER */
- thd->thread_stack= (char*) &thd;
- if (my_thread_init() || thd->store_globals())
- {
-#ifndef EMBEDDED_LIBRARY
- close_connection(thd, ER_OUT_OF_RESOURCES);
-#endif
- thd->fatal_error();
- goto end;
- }
-
- handle_bootstrap_impl(thd);
-
-end:
delete thd;
-
- mysql_mutex_lock(&LOCK_thread_count);
- in_bootstrap = FALSE;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
-
-#ifndef EMBEDDED_LIBRARY
- my_thread_end();
- pthread_exit(0);
-#endif
-
- return;
+ DBUG_RETURN(bootstrap_error);
}
@@ -1188,10 +1163,8 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables)
{
for (const TABLE_LIST *table= tables; table; table= table->next_global)
{
- TABLE_CATEGORY c;
LEX_CSTRING db= table->db, tn= table->table_name;
- c= get_table_category(&db, &tn);
- if (c != TABLE_CATEGORY_INFORMATION && c != TABLE_CATEGORY_PERFORMANCE)
+ if (get_table_category(&db, &tn) < TABLE_CATEGORY_INFORMATION)
return false;
}
return true;
@@ -1215,28 +1188,11 @@ bool do_command(THD *thd)
{
bool return_value;
char *packet= 0;
-#ifdef WITH_WSREP
- ulong packet_length= 0; // just to avoid (false positive) compiler warning
-#else
ulong packet_length;
-#endif /* WITH_WSREP */
NET *net= &thd->net;
enum enum_server_command command;
DBUG_ENTER("do_command");
-#ifdef WITH_WSREP
- if (WSREP(thd))
- {
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_query_state= QUERY_IDLE;
- if (thd->wsrep_conflict_state==MUST_ABORT)
- {
- wsrep_client_rollback(thd);
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif /* WITH_WSREP */
-
/*
indicator of uninitialized lex => normal flow of errors handling
(see my_message_sql)
@@ -1277,29 +1233,6 @@ bool do_command(THD *thd)
DEBUG_SYNC(thd, "before_do_command_net_read");
packet_length= my_net_read_packet(net, 1);
-#ifdef WITH_WSREP
- if (WSREP(thd)) {
- mysql_mutex_lock(&thd->LOCK_thd_data);
-
- /* these THD's are aborted or are aborting during being idle */
- if (thd->wsrep_conflict_state == ABORTING)
- {
- while (thd->wsrep_conflict_state == ABORTING) {
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- my_sleep(1000);
- mysql_mutex_lock(&thd->LOCK_thd_data);
- }
- thd->store_globals();
- }
- else if (thd->wsrep_conflict_state == ABORTED)
- {
- thd->store_globals();
- }
-
- thd->wsrep_query_state= QUERY_EXEC;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif /* WITH_WSREP */
if (unlikely(packet_length == packet_error))
{
@@ -1307,20 +1240,6 @@ bool do_command(THD *thd)
net->error,
vio_description(net->vio)));
-#ifdef WITH_WSREP
- if (WSREP(thd))
- {
- mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state == MUST_ABORT)
- {
- DBUG_PRINT("wsrep",("aborted for wsrep rollback: %lu",
- (ulong) thd->real_id));
- wsrep_client_rollback(thd);
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif /* WITH_WSREP */
-
/* Instrument this broken statement as "statement/com/error" */
thd->m_statement_psi= MYSQL_REFINE_STATEMENT(thd->m_statement_psi,
com_statement_info[COM_END].
@@ -1371,13 +1290,52 @@ bool do_command(THD *thd)
command= fetch_command(thd, packet);
#ifdef WITH_WSREP
+ /*
+ Aborted by background rollbacker thread.
+ Handle error here and jump straight to out
+ */
+ if (wsrep_before_command(thd))
+ {
+ thd->store_globals();
+ WSREP_LOG_THD(thd, "enter found BF aborted");
+ DBUG_ASSERT(!thd->mdl_context.has_locks());
+ DBUG_ASSERT(!thd->get_stmt_da()->is_set());
+ /* We let COM_QUIT and COM_STMT_CLOSE to execute even if wsrep aborted. */
+ if (command != COM_STMT_CLOSE &&
+ command != COM_QUIT)
+ {
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ WSREP_DEBUG("Deadlock error for: %s", thd->query());
+ thd->reset_killed();
+ thd->mysys_var->abort = 0;
+ thd->wsrep_retry_counter = 0;
+
+ /* Instrument this broken statement as "statement/com/error" */
+ thd->m_statement_psi= MYSQL_REFINE_STATEMENT(thd->m_statement_psi,
+ com_statement_info[COM_END].
+ m_key);
+
+ thd->protocol->end_statement();
+
+ /* Mark the statement completed. */
+ MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
+ thd->m_statement_psi= NULL;
+ thd->m_digest= NULL;
+ return_value= FALSE;
+
+ wsrep_after_command_before_result(thd);
+ goto out;
+ }
+ }
+
if (WSREP(thd))
{
/*
- Bail out if DB snapshot has not been installed.
- */
- if (!thd->wsrep_applier &&
- (!wsrep_ready || wsrep_reject_queries != WSREP_REJECT_NONE) &&
+ * bail out if DB snapshot has not been installed. We however,
+ * allow queries "SET" and "SHOW", they are trapped later in execute_command
+ */
+ if (!(thd->wsrep_applier) &&
+ (!wsrep_ready_get() || wsrep_reject_queries != WSREP_REJECT_NONE) &&
(server_command_flags[command] & CF_SKIP_WSREP_CHECK) == 0)
{
my_message(ER_UNKNOWN_COM_ERROR,
@@ -1390,11 +1348,11 @@ bool do_command(THD *thd)
thd->m_digest= NULL;
return_value= FALSE;
+ wsrep_after_command_before_result(thd);
goto out;
}
}
-#endif
-
+#endif /* WITH_WSREP */
/* Restore read timeout value */
my_net_set_read_timeout(net, thd->variables.net_read_timeout);
@@ -1402,37 +1360,6 @@ bool do_command(THD *thd)
DBUG_ASSERT(!thd->apc_target.is_enabled());
return_value= dispatch_command(command, thd, packet+1,
(uint) (packet_length-1), FALSE, FALSE);
-#ifdef WITH_WSREP
- if (WSREP(thd))
- {
- while (thd->wsrep_conflict_state== RETRY_AUTOCOMMIT)
- {
- WSREP_DEBUG("Retry autocommit for: %s\n", thd->wsrep_retry_query);
- CHARSET_INFO *current_charset = thd->variables.character_set_client;
- if (!is_supported_parser_charset(current_charset))
- {
- /* Do not use non-supported parser character sets */
- WSREP_WARN("Current client character set is non-supported parser "
- "character set: %s", current_charset->csname);
- thd->variables.character_set_client = &my_charset_latin1;
- WSREP_WARN("For retry temporally setting character set to : %s",
- my_charset_latin1.csname);
- }
- thd->clear_error();
- return_value= dispatch_command(command, thd, thd->wsrep_retry_query,
- thd->wsrep_retry_query_len, FALSE, FALSE);
- thd->variables.character_set_client = current_charset;
- }
-
- if (thd->wsrep_retry_query && thd->wsrep_conflict_state != REPLAYING)
- {
- my_free(thd->wsrep_retry_query);
- thd->wsrep_retry_query = NULL;
- thd->wsrep_retry_query_len = 0;
- thd->wsrep_retry_command = COM_CONNECT;
- }
- }
-#endif /* WITH_WSREP */
DBUG_ASSERT(!thd->apc_target.is_enabled());
out:
@@ -1440,6 +1367,13 @@ out:
/* The statement instrumentation must be closed in all cases. */
DBUG_ASSERT(thd->m_digest == NULL);
DBUG_ASSERT(thd->m_statement_psi == NULL);
+#ifdef WITH_WSREP
+ if (packet_length != packet_error)
+ {
+ /* there was a command to process, and before_command() has been called */
+ wsrep_after_command_after_result(thd);
+ }
+#endif /* WITH_WSREP */
DBUG_RETURN(return_value);
}
#endif /* EMBEDDED_LIBRARY */
@@ -1505,6 +1439,36 @@ static bool deny_updates_if_read_only_option(THD *thd, TABLE_LIST *all_tables)
DBUG_RETURN(FALSE);
}
+#ifdef WITH_WSREP
+static my_bool wsrep_read_only_option(THD *thd, TABLE_LIST *all_tables)
+{
+ int opt_readonly_saved = opt_readonly;
+ ulong flag_saved = (ulong)(thd->security_ctx->master_access & SUPER_ACL);
+
+ opt_readonly = 0;
+ thd->security_ctx->master_access &= ~SUPER_ACL;
+
+ my_bool ret = !deny_updates_if_read_only_option(thd, all_tables);
+
+ opt_readonly = opt_readonly_saved;
+ thd->security_ctx->master_access |= flag_saved;
+
+ return ret;
+}
+
+static void wsrep_copy_query(THD *thd)
+{
+ thd->wsrep_retry_command = thd->get_command();
+ thd->wsrep_retry_query_len = thd->query_length();
+ if (thd->wsrep_retry_query) {
+ my_free(thd->wsrep_retry_query);
+ }
+ thd->wsrep_retry_query = (char *)my_malloc(
+ thd->wsrep_retry_query_len + 1, MYF(0));
+ strncpy(thd->wsrep_retry_query, thd->query(), thd->wsrep_retry_query_len);
+ thd->wsrep_retry_query[thd->wsrep_retry_query_len] = '\0';
+}
+#endif /* WITH_WSREP */
/**
check COM_MULTI packet
@@ -1587,41 +1551,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* keep it withing 1 byte */
compile_time_assert(COM_END == 255);
-#ifdef WITH_WSREP
- if (WSREP(thd))
- {
- if (!thd->in_multi_stmt_transaction_mode())
- {
- thd->wsrep_PA_safe= true;
- }
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_query_state= QUERY_EXEC;
- if (thd->wsrep_conflict_state== RETRY_AUTOCOMMIT)
- {
- thd->wsrep_conflict_state= NO_CONFLICT;
- }
- if (thd->wsrep_conflict_state== MUST_ABORT)
- {
- wsrep_client_rollback(thd);
- }
- /* We let COM_QUIT and COM_STMT_CLOSE to execute even if wsrep aborted. */
- if (thd->wsrep_conflict_state == ABORTED &&
- command != COM_STMT_CLOSE && command != COM_QUIT)
- {
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- my_message(ER_LOCK_DEADLOCK, "Deadlock: wsrep aborted transaction",
- MYF(0));
- WSREP_DEBUG("Deadlock error for: %s", thd->query());
- thd->reset_killed();
- thd->mysys_var->abort = 0;
- thd->wsrep_conflict_state = NO_CONFLICT;
- thd->wsrep_retry_counter = 0;
- goto dispatch_end;
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif /* WITH_WSREP */
#if defined(ENABLED_PROFILING)
thd->profiling.start_new_query();
#endif
@@ -1664,6 +1593,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
*/
thd->set_query_id(get_query_id());
}
+#ifdef WITH_WSREP
+ if (WSREP(thd) && thd->wsrep_next_trx_id() == WSREP_UNDEFINED_TRX_ID)
+ {
+ thd->set_wsrep_next_trx_id(thd->query_id);
+ WSREP_DEBUG("assigned new next trx id: %" PRIu64, thd->wsrep_next_trx_id());
+ }
+#endif /* WITH_WSREP */
if (!(server_command_flags[command] & CF_SKIP_QUESTIONS))
statistic_increment(thd->status_var.questions, &LOCK_status);
@@ -1690,6 +1626,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->get_stmt_da()->set_skip_flush();
}
+ if (unlikely(thd->security_ctx->password_expired &&
+ command != COM_QUERY &&
+ command != COM_PING &&
+ command != COM_QUIT))
+ {
+ my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
+ goto dispatch_end;
+ }
+
switch (command) {
case COM_INIT_DB:
{
@@ -1709,7 +1654,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
case COM_REGISTER_SLAVE:
{
status_var_increment(thd->status_var.com_register_slave);
- if (!register_slave(thd, (uchar*)packet, packet_length))
+ if (!thd->register_slave((uchar*) packet, packet_length))
my_ok(thd);
break;
}
@@ -1793,11 +1738,23 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
case COM_STMT_BULK_EXECUTE:
{
mysqld_stmt_bulk_execute(thd, packet, packet_length);
+#ifdef WITH_WSREP
+ if (WSREP_ON)
+ {
+ (void)wsrep_after_statement(thd);
+ }
+#endif /* WITH_WSREP */
break;
}
case COM_STMT_EXECUTE:
{
mysqld_stmt_execute(thd, packet, packet_length);
+#ifdef WITH_WSREP
+ if (WSREP_ON)
+ {
+ (void)wsrep_after_statement(thd);
+ }
+#endif /* WITH_WSREP */
break;
}
case COM_STMT_FETCH:
@@ -1850,10 +1807,24 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (unlikely(parser_state.init(thd, thd->query(), thd->query_length())))
break;
+#ifdef WITH_WSREP
if (WSREP_ON)
- wsrep_mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
- is_com_multi, is_next_command);
+ {
+ if (wsrep_mysql_parse(thd, thd->query(), thd->query_length(),
+ &parser_state,
+ is_com_multi, is_next_command))
+ {
+ WSREP_DEBUG("Deadlock error for: %s", thd->query());
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ thd->killed = NOT_KILLED;
+ thd->mysys_var->abort = 0;
+ thd->wsrep_retry_counter = 0;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ goto dispatch_end;
+ }
+ }
else
+#endif /* WITH_WSREP */
mysql_parse(thd, thd->query(), thd->query_length(), &parser_state,
is_com_multi, is_next_command);
@@ -1935,17 +1906,32 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
*/
statistic_increment(thd->status_var.questions, &LOCK_status);
- if(!WSREP(thd))
- thd->set_time(); /* Reset the query start time. */
+ if (!WSREP(thd))
+ thd->set_time(); /* Reset the query start time. */
parser_state.reset(beginning_of_next_stmt, length);
+#ifdef WITH_WSREP
if (WSREP_ON)
- wsrep_mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
- is_com_multi, is_next_command);
+ {
+ if (wsrep_mysql_parse(thd, beginning_of_next_stmt,
+ length, &parser_state,
+ is_com_multi, is_next_command))
+ {
+ WSREP_DEBUG("Deadlock error for: %s", thd->query());
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ thd->killed = NOT_KILLED;
+ thd->mysys_var->abort = 0;
+ thd->wsrep_retry_counter = 0;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+
+ goto dispatch_end;
+ }
+ }
else
- mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
- is_com_multi, is_next_command);
+#endif /* WITH_WSREP */
+ mysql_parse(thd, beginning_of_next_stmt, length, &parser_state,
+ is_com_multi, is_next_command);
}
@@ -2016,10 +2002,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
Init TABLE_LIST members necessary when the undelrying
table is view.
*/
- table_list.select_lex= &(thd->lex->select_lex);
+ table_list.select_lex= thd->lex->first_select_lex();
thd->lex->
- select_lex.table_list.link_in_list(&table_list,
- &table_list.next_local);
+ first_select_lex()->table_list.link_in_list(&table_list,
+ &table_list.next_local);
thd->lex->add_to_query_tables(&table_list);
if (is_infoschema_db(&table_list.db))
@@ -2108,7 +2094,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
general_log_print(thd, command, "Log: '%s' Pos: %lu", name, pos);
if (nlen < FN_REFLEN)
mysql_binlog_send(thd, thd->strmake(name, nlen), (my_off_t)pos, flags);
- unregister_slave(thd,1,1);
+ thd->unregister_slave();
/* fake COM_QUIT -- if we get here, the thread needs to terminate */
error = TRUE;
break;
@@ -2138,6 +2124,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
DBUG_EXECUTE_IF("simulate_detached_thread_refresh", debug_simulate= TRUE;);
if (debug_simulate)
{
+ /* This code doesn't work under FTWRL */
+ DBUG_ASSERT(! (options & REFRESH_READ_LOCK));
/*
Simulate a reload without a attached thread session.
Provides a environment similar to that of when the
@@ -2180,6 +2168,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
*/
enum mysql_enum_shutdown_level level;
level= (enum mysql_enum_shutdown_level) (uchar) packet[0];
+ thd->lex->is_shutdown_wait_for_slaves= false; // "deferred" cleanup
if (level == SHUTDOWN_DEFAULT)
level= SHUTDOWN_WAIT_ALL_BUFFERS; // soon default will be configurable
else if (level != SHUTDOWN_WAIT_ALL_BUFFERS)
@@ -2378,9 +2367,28 @@ com_multi_end:
break;
}
+dispatch_end:
#ifdef WITH_WSREP
- dispatch_end:
-
+ /*
+ BF aborted before sending response back to client
+ */
+ if (thd->killed == KILL_QUERY)
+ {
+ WSREP_DEBUG("THD is killed at dispatch_end");
+ }
+ wsrep_after_command_before_result(thd);
+ if (wsrep_current_error(thd) &&
+ !(command == COM_STMT_PREPARE ||
+ command == COM_STMT_FETCH ||
+ command == COM_STMT_SEND_LONG_DATA ||
+ command == COM_STMT_CLOSE
+ ))
+ {
+ /* todo: Pass wsrep client state current error to override */
+ wsrep_override_error(thd, wsrep_current_error(thd),
+ wsrep_current_error_status(thd));
+ WSREP_LOG_THD(thd, "leave");
+ }
if (WSREP(thd))
{
/*
@@ -2391,9 +2399,10 @@ com_multi_end:
|| thd->get_stmt_da()->is_disabled());
/* wsrep BF abort in query exec phase */
mysql_mutex_lock(&thd->LOCK_thd_data);
- do_end_of_statement= thd->wsrep_conflict_state != REPLAYING &&
- thd->wsrep_conflict_state != RETRY_AUTOCOMMIT &&
- !thd->killed;
+ do_end_of_statement=
+ thd->wsrep_trx().state() != wsrep::transaction::s_replaying
+ && !thd->killed;
+
mysql_mutex_unlock(&thd->LOCK_thd_data);
}
else
@@ -2621,23 +2630,24 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
DBUG_RETURN(1);
#else
{
- if (lex->select_lex.db.str == NULL &&
- lex->copy_db_to(&lex->select_lex.db))
+ if (lex->first_select_lex()->db.str == NULL &&
+ lex->copy_db_to(&lex->first_select_lex()->db))
{
DBUG_RETURN(1);
}
schema_select_lex= new (thd->mem_root) SELECT_LEX();
schema_select_lex->table_list.first= NULL;
if (lower_case_table_names == 1)
- lex->select_lex.db.str= thd->strdup(lex->select_lex.db.str);
- schema_select_lex->db= lex->select_lex.db;
+ lex->first_select_lex()->db.str=
+ thd->strdup(lex->first_select_lex()->db.str);
+ schema_select_lex->db= lex->first_select_lex()->db;
/*
check_db_name() may change db.str if lower_case_table_names == 1,
but that's ok as the db is allocted above in this case.
*/
- if (check_db_name((LEX_STRING*) &lex->select_lex.db))
+ if (check_db_name((LEX_STRING*) &lex->first_select_lex()->db))
{
- my_error(ER_WRONG_DB_NAME, MYF(0), lex->select_lex.db.str);
+ my_error(ER_WRONG_DB_NAME, MYF(0), lex->first_select_lex()->db.str);
DBUG_RETURN(1);
}
break;
@@ -2676,7 +2686,8 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
default:
break;
}
-
+ if (schema_select_lex)
+ schema_select_lex->set_master_unit(&lex->unit);
SELECT_LEX *select_lex= lex->current_select;
if (make_schema_select(thd, select_lex, get_schema_table(schema_table_idx)))
DBUG_RETURN(1);
@@ -3053,7 +3064,7 @@ static int mysql_create_routine(THD *thd, LEX *lex)
if (sp_process_definer(thd))
return true;
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
if (!lex->sphead->m_handler->sp_create_routine(thd, lex->sphead))
{
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -3122,7 +3133,9 @@ static int mysql_create_routine(THD *thd, LEX *lex)
#endif
return false;
}
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
return true;
}
@@ -3270,7 +3283,7 @@ mysql_execute_command(THD *thd)
int up_result= 0;
LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
/* first table of first SELECT_LEX */
TABLE_LIST *first_table= select_lex->table_list.first;
/* list of all tables in query */
@@ -3285,6 +3298,13 @@ mysql_execute_command(THD *thd)
#endif
DBUG_ENTER("mysql_execute_command");
+ if (thd->security_ctx->password_expired &&
+ lex->sql_command != SQLCOM_SET_OPTION)
+ {
+ my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
+ DBUG_RETURN(1);
+ }
+
DBUG_ASSERT(thd->transaction.stmt.is_empty() || thd->in_sub_stmt);
/*
Each statement or replication event which might produce deadlock
@@ -3309,6 +3329,7 @@ mysql_execute_command(THD *thd)
DBUG_ASSERT(first_table == all_tables && first_table != 0);
*/
lex->first_lists_tables_same();
+ lex->fix_first_select_number();
/* should be assigned after making first tables same */
all_tables= lex->query_tables;
/* set context for commands which do not use setup_tables */
@@ -3457,8 +3478,15 @@ mysql_execute_command(THD *thd)
#ifdef HAVE_REPLICATION
} /* endif unlikely slave */
#endif
+ Opt_trace_start ots(thd, all_tables, lex->sql_command, &lex->var_list,
+ thd->query(), thd->query_length(),
+ thd->variables.character_set_client);
+
+ Json_writer_object trace_command(thd);
+ Json_writer_array trace_command_steps(thd, "steps");
+
#ifdef WITH_WSREP
- if (wsrep && WSREP(thd))
+ if (WSREP(thd))
{
/*
change LOCK TABLE WRITE to transaction
@@ -3488,8 +3516,8 @@ mysql_execute_command(THD *thd)
* allow SET and SHOW queries and reads from information schema
* and dirty reads (if configured)
*/
- if (!thd->wsrep_applier &&
- !(wsrep_ready && wsrep_reject_queries == WSREP_REJECT_NONE) &&
+ if (!(thd->wsrep_applier) &&
+ !(wsrep_ready_get() && wsrep_reject_queries == WSREP_REJECT_NONE) &&
!(thd->variables.wsrep_dirty_reads &&
(sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) == 0) &&
!wsrep_tables_accessible_when_detached(all_tables) &&
@@ -3654,7 +3682,7 @@ mysql_execute_command(THD *thd)
not run in it's own transaction it may simply never appear on
the slave in case the outside transaction rolls back.
*/
- if (stmt_causes_implicit_commit(thd, CF_IMPLICT_COMMIT_BEGIN))
+ if (stmt_causes_implicit_commit(thd, CF_IMPLICIT_COMMIT_BEGIN))
{
/*
Note that this should never happen inside of stored functions
@@ -3677,6 +3705,13 @@ mysql_execute_command(THD *thd)
}
}
thd->transaction.stmt.mark_trans_did_ddl();
+#ifdef WITH_WSREP
+ /* Clean up the previous transaction on implicit commit */
+ if (wsrep_thd_is_local(thd) && wsrep_after_statement(thd))
+ {
+ goto error;
+ }
+#endif /* WITH_WSREP */
}
#ifndef DBUG_OFF
@@ -3730,6 +3765,33 @@ mysql_execute_command(THD *thd)
/* Start timeouts */
thd->set_query_timer();
+#ifdef WITH_WSREP
+ /*
+ Always start a new transaction for a wsrep THD unless the
+ current command is DDL or explicit BEGIN. This will guarantee that
+ the THD is BF abortable even if it does not generate any
+ changes and takes only read locks. If the statement does not
+ start a multi STMT transaction, the wsrep_transaction is
+ committed as empty at the end of this function.
+
+ Transaction is started for BEGIN in trans_begin(), for DDL the
+ implicit commit took care of committing previous transaction
+ above and a new transaction should not be started.
+
+ Do not start transaction for stored procedures, it will be handled
+ internally in SP processing.
+ */
+ if (WSREP(thd) &&
+ wsrep_thd_is_local(thd) &&
+ lex->sql_command != SQLCOM_BEGIN &&
+ lex->sql_command != SQLCOM_CALL &&
+ lex->sql_command != SQLCOM_EXECUTE &&
+ !(sql_command_flags[lex->sql_command] & CF_AUTO_COMMIT_TRANS))
+ {
+ wsrep_start_trx_if_not_started(thd);
+ }
+#endif /* WITH_WSREP */
+
switch (lex->sql_command) {
case SQLCOM_SHOW_EVENTS:
@@ -3789,17 +3851,21 @@ mysql_execute_command(THD *thd)
case SQLCOM_SHOW_STORAGE_ENGINES:
case SQLCOM_SHOW_PROFILE:
case SQLCOM_SELECT:
- {
- if (lex->sql_command == SQLCOM_SELECT)
- WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_READ);
- else
- {
- WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_SHOW);
-#ifdef ENABLED_PROFILING
- if (lex->sql_command == SQLCOM_SHOW_PROFILE)
- thd->profiling.discard_current_query();
-#endif
- }
+ {
+#ifdef WITH_WSREP
+ if (lex->sql_command == SQLCOM_SELECT)
+ {
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_READ);
+ }
+ else
+ {
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_SHOW);
+# ifdef ENABLED_PROFILING
+ if (lex->sql_command == SQLCOM_SHOW_PROFILE)
+ thd->profiling.discard_current_query();
+# endif
+ }
+#endif /* WITH_WSREP */
thd->status_var.last_query_cost= 0.0;
@@ -4233,11 +4299,7 @@ mysql_execute_command(THD *thd)
goto end_with_restore_list;
}
- /* Copy temporarily the statement flags to thd for lock_table_names() */
- uint save_thd_create_info_options= thd->lex->create_info.options;
- thd->lex->create_info.options|= create_info.options;
res= open_and_lock_tables(thd, create_info, lex->query_tables, TRUE, 0);
- thd->lex->create_info.options= save_thd_create_info_options;
if (unlikely(res))
{
/* Got error or warning. Set res to 1 if error */
@@ -4311,8 +4373,8 @@ mysql_execute_command(THD *thd)
}
else
{
- if (create_info.vers_fix_system_fields(thd, &alter_info, *create_table) ||
- create_info.vers_check_system_fields(thd, &alter_info, *create_table))
+ if (create_info.fix_create_fields(thd, &alter_info, *create_table) ||
+ create_info.check_fields(thd, &alter_info, *create_table))
goto end_with_restore_list;
/*
@@ -4555,9 +4617,7 @@ end_with_restore_list:
WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE);
ha_rows found= 0, updated= 0;
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (WSREP_CLIENT(thd) &&
- wsrep_sync_wait(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE))
- goto error;
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE);
if (update_precheck(thd, all_tables))
break;
@@ -4705,9 +4765,7 @@ end_with_restore_list:
WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE);
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (WSREP_CLIENT(thd) &&
- wsrep_sync_wait(thd, WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE))
- goto error;
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE);
/*
Since INSERT DELAYED doesn't support temporary tables, we could
@@ -4765,9 +4823,7 @@ end_with_restore_list:
select_insert *sel_result;
bool explain= MY_TEST(lex->describe);
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (WSREP_CLIENT(thd) &&
- wsrep_sync_wait(thd, WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE))
- goto error;
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE);
if ((res= insert_precheck(thd, all_tables)))
break;
@@ -4887,9 +4943,7 @@ end_with_restore_list:
WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE);
select_result *sel_result= NULL;
DBUG_ASSERT(first_table == all_tables && first_table != 0);
- if (WSREP_CLIENT(thd) &&
- wsrep_sync_wait(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE))
- goto error;
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE);
if ((res= delete_precheck(thd, all_tables)))
break;
@@ -4948,9 +5002,7 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
TABLE_LIST *aux_tables= thd->lex->auxiliary_table_list.first;
multi_delete *result;
- if (WSREP_CLIENT(thd) &&
- wsrep_sync_wait(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE))
- goto error;
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE);
if ((res= multi_delete_precheck(thd, all_tables)))
break;
@@ -5171,7 +5223,8 @@ end_with_restore_list:
thd->mdl_context.release_transactional_locks();
thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
}
- if (thd->global_read_lock.is_acquired())
+ if (thd->global_read_lock.is_acquired() &&
+ thd->current_backup_stage == BACKUP_FINISHED)
thd->global_read_lock.unlock_global_read_lock(thd);
if (res)
goto error;
@@ -5186,6 +5239,13 @@ end_with_restore_list:
if (res)
goto error;
+ /* We can't have any kind of table locks while backup is active */
+ if (thd->current_backup_stage != BACKUP_FINISHED)
+ {
+ my_error(ER_BACKUP_LOCK_IS_ACTIVE, MYF(0));
+ goto error;
+ }
+
/*
Here we have to pre-open temporary tables for LOCK TABLES.
@@ -5218,6 +5278,23 @@ end_with_restore_list:
my_ok(thd);
}
break;
+ case SQLCOM_BACKUP:
+ if (check_global_access(thd, RELOAD_ACL))
+ goto error;
+ if (!(res= run_backup_stage(thd, lex->backup_stage)))
+ my_ok(thd);
+ break;
+ case SQLCOM_BACKUP_LOCK:
+ if (check_global_access(thd, RELOAD_ACL))
+ goto error;
+ /* first table is set for lock. For unlock the list is empty */
+ if (first_table)
+ res= backup_lock(thd, first_table);
+ else
+ backup_unlock(thd);
+ if (!res)
+ my_ok(thd);
+ break;
case SQLCOM_CREATE_DB:
{
if (prepare_db_action(thd, lex->create_info.or_replace() ?
@@ -5766,6 +5843,7 @@ end_with_restore_list:
thd->mdl_context.release_transactional_locks();
WSREP_DEBUG("BEGIN failed, MDL released: %lld",
(longlong) thd->thread_id);
+ WSREP_DEBUG("stmt_da, sql_errno: %d", (thd->get_stmt_da()->is_error()) ? thd->get_stmt_da()->sql_errno() : 0);
goto error;
}
my_ok(thd);
@@ -5805,20 +5883,7 @@ end_with_restore_list:
thd->set_killed(KILL_CONNECTION);
thd->print_aborted_warning(3, "RELEASE");
}
-#ifdef WITH_WSREP
- if (WSREP(thd)) {
-
- if (thd->wsrep_conflict_state == NO_CONFLICT ||
- thd->wsrep_conflict_state == REPLAYING)
- {
- my_ok(thd);
- }
- } else {
-#endif /* WITH_WSREP */
- my_ok(thd);
-#ifdef WITH_WSREP
- }
-#endif /* WITH_WSREP */
+ my_ok(thd);
break;
}
case SQLCOM_ROLLBACK:
@@ -5854,17 +5919,7 @@ end_with_restore_list:
/* Disconnect the current client connection. */
if (tx_release)
thd->set_killed(KILL_CONNECTION);
-#ifdef WITH_WSREP
- if (WSREP(thd)) {
- if (thd->wsrep_conflict_state == NO_CONFLICT) {
- my_ok(thd);
- }
- } else {
-#endif /* WITH_WSREP */
- my_ok(thd);
-#ifdef WITH_WSREP
- }
-#endif /* WITH_WSREP */
+ my_ok(thd);
break;
}
case SQLCOM_RELEASE_SAVEPOINT:
@@ -6298,8 +6353,10 @@ end_with_restore_list:
goto finish;
error:
-WSREP_ERROR_LABEL:
- res= TRUE;
+#ifdef WITH_WSREP
+wsrep_error_label:
+#endif
+ res= true;
finish:
@@ -6307,7 +6364,6 @@ finish:
DBUG_ASSERT(!thd->in_active_multi_stmt_transaction() ||
thd->in_multi_stmt_transaction_mode());
-
lex->unit.cleanup();
/* close/reopen tables that were marked to need reopen under LOCK TABLES */
@@ -6333,25 +6389,6 @@ finish:
THD_STAGE_INFO(thd, stage_rollback);
trans_rollback_stmt(thd);
}
-#ifdef WITH_WSREP
- if (thd->spcont &&
- (thd->wsrep_conflict_state == MUST_ABORT ||
- thd->wsrep_conflict_state == ABORTED ||
- thd->wsrep_conflict_state == CERT_FAILURE))
- {
- /*
- The error was cleared, but THD was aborted by wsrep and
- wsrep_conflict_state is still set accordingly. This
- situation is expected if we are running a stored procedure
- that declares a handler that catches ER_LOCK_DEADLOCK error.
- In which case the error may have been cleared in method
- sp_rcontext::handle_sql_condition().
- */
- trans_rollback_stmt(thd);
- thd->wsrep_conflict_state= NO_CONFLICT;
- thd->killed= NOT_KILLED;
- }
-#endif /* WITH_WSREP */
else
{
/* If commit fails, we should be able to reset the OK status. */
@@ -6367,9 +6404,6 @@ finish:
/* Free tables. Set stage 'closing tables' */
close_thread_tables(thd);
-#ifdef WITH_WSREP
- thd->wsrep_consistency_check= NO_CONSISTENCY_CHECK;
-#endif /* WITH_WSREP */
#ifndef DBUG_OFF
@@ -6431,9 +6465,10 @@ finish:
TRANSACT_TRACKER(add_trx_state_from_thd(thd));
- WSREP_TO_ISOLATION_END;
-
#ifdef WITH_WSREP
+ thd->wsrep_consistency_check= NO_CONSISTENCY_CHECK;
+
+ WSREP_TO_ISOLATION_END;
/*
Force release of transactional locks if not in active MST and wsrep is on.
*/
@@ -6446,11 +6481,26 @@ finish:
(longlong) thd->thread_id);
thd->mdl_context.release_transactional_locks();
}
+
+ /*
+ Current command did not start multi STMT transaction and the command
+ did not cause commit to happen (e.g. read only). Commit the wsrep
+ transaction as empty.
+ */
+ if (!thd->in_active_multi_stmt_transaction() &&
+ !thd->in_sub_stmt &&
+ thd->wsrep_trx().active() &&
+ thd->wsrep_trx().state() == wsrep::transaction::s_executing)
+ {
+ wsrep_commit_empty(thd, true);
+ }
+
+ /* assume PA safety for next transaction */
+ thd->wsrep_PA_safe= true;
#endif /* WITH_WSREP */
DBUG_RETURN(res || thd->is_error());
-}
-
+ }
static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
{
@@ -6576,6 +6626,7 @@ static bool execute_show_status(THD *thd, TABLE_LIST *all_tables)
bool res;
system_status_var old_status_var= thd->status_var;
thd->initial_status_var= &old_status_var;
+ WSREP_SYNC_WAIT(thd, WSREP_SYNC_WAIT_BEFORE_SHOW);
if (!(res= check_table_access(thd, SELECT_ACL, all_tables, FALSE,
UINT_MAX, FALSE)))
res= execute_sqlcom_select(thd, all_tables);
@@ -6594,6 +6645,10 @@ static bool execute_show_status(THD *thd, TABLE_LIST *all_tables)
offsetof(STATUS_VAR, last_cleared_system_status_var));
mysql_mutex_unlock(&LOCK_status);
return res;
+#ifdef WITH_WSREP
+wsrep_error_label: /* see WSREP_SYNC_WAIT() macro above */
+ return true;
+#endif /* WITH_WSREP */
}
@@ -7522,7 +7577,7 @@ bool check_stack_overrun(THD *thd, long margin,
if (ebuff) {
my_snprintf(ebuff, MYSQL_ERRMSG_SIZE, ER_THD(thd, ER_STACK_OVERRUN_NEED_MORE),
stack_used, my_thread_stack_size, margin);
- my_message(ER_STACK_OVERRUN_NEED_MORE, ebuff, MYF(ME_FATALERROR));
+ my_message(ER_STACK_OVERRUN_NEED_MORE, ebuff, MYF(ME_FATAL));
delete [] ebuff;
}
return 1;
@@ -7592,16 +7647,21 @@ void THD::reset_for_next_command(bool do_clear_error)
DBUG_ASSERT(!in_sub_stmt);
if (likely(do_clear_error))
+ {
clear_error(1);
-
+ /*
+ The following variable can't be reset in clear_error() as
+ clear_error() is called during auto_repair of table
+ */
+ error_printed_to_log= 0;
+ }
free_list= 0;
/*
We also assign stmt_lex in lex_start(), but during bootstrap this
code is executed first.
*/
DBUG_ASSERT(lex == &main_lex);
- main_lex.stmt_lex= &main_lex; main_lex.current_select_number= 1;
- DBUG_PRINT("info", ("Lex and stmt_lex: %p", &main_lex));
+ main_lex.stmt_lex= &main_lex; main_lex.current_select_number= 0;
/*
Those two lines below are theoretically unneeded as
THD::cleanup_after_query() should take care of this already.
@@ -7618,7 +7678,7 @@ void THD::reset_for_next_command(bool do_clear_error)
use autoinc values passed in binlog events, not the values forced by
the cluster.
*/
- if (WSREP(this) && wsrep_exec_mode == LOCAL_STATE &&
+ if (WSREP_NNULL(this) && wsrep_thd_is_local(this) &&
!slave_thread && wsrep_auto_increment_control)
{
variables.auto_increment_offset=
@@ -7687,11 +7747,7 @@ mysql_init_select(LEX *lex)
SELECT_LEX *select_lex= lex->current_select;
select_lex->init_select();
lex->wild= 0;
- if (select_lex == &lex->select_lex)
- {
- DBUG_ASSERT(lex->result == 0);
- lex->exchange= 0;
- }
+ lex->exchange= 0;
}
@@ -7712,6 +7768,7 @@ mysql_new_select(LEX *lex, bool move_down, SELECT_LEX *select_lex)
{
THD *thd= lex->thd;
bool new_select= select_lex == NULL;
+ int old_nest_level= lex->current_select->nest_level;
DBUG_ENTER("mysql_new_select");
if (new_select)
@@ -7723,26 +7780,18 @@ mysql_new_select(LEX *lex, bool move_down, SELECT_LEX *select_lex)
select_lex->init_query();
select_lex->init_select();
}
- lex->nest_level++;
- if (lex->nest_level > (int) MAX_SELECT_NESTING)
- {
- my_error(ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT, MYF(0));
- DBUG_RETURN(1);
- }
- select_lex->nest_level= lex->nest_level;
select_lex->nest_level_base= &thd->lex->unit;
if (move_down)
{
+ lex->nest_level++;
+ if (select_lex->set_nest_level(old_nest_level + 1))
+ DBUG_RETURN(1);
SELECT_LEX_UNIT *unit;
/* first select_lex of subselect or derived table */
- if (!(unit= new (thd->mem_root) SELECT_LEX_UNIT()))
+ if (!(unit= lex->alloc_unit()))
DBUG_RETURN(1);
- unit->init_query();
- unit->thd= thd;
unit->include_down(lex->current_select);
- unit->link_next= 0;
- unit->link_prev= 0;
unit->return_to= lex->current_select;
select_lex->include_down(unit);
/*
@@ -7776,15 +7825,13 @@ mysql_new_select(LEX *lex, bool move_down, SELECT_LEX *select_lex)
"SELECT ... PROCEDURE ANALYSE()");
DBUG_RETURN(TRUE);
}
- // SELECT 1 FROM t1 ORDER BY 1 UNION SELECT 1 FROM t1 -- not possible
- DBUG_ASSERT(!lex->current_select->order_list.first ||
- lex->current_select->braces);
- // SELECT 1 FROM t1 LIMIT 1 UNION SELECT 1 FROM t1; -- not possible
- DBUG_ASSERT(!lex->current_select->explicit_limit ||
- lex->current_select->braces);
+ SELECT_LEX_NODE *save_slave= select_lex->slave;
select_lex->include_neighbour(lex->current_select);
- SELECT_LEX_UNIT *unit= select_lex->master_unit();
+ select_lex->slave= save_slave;
+ SELECT_LEX_UNIT *unit= select_lex->master_unit();
+ if (select_lex->set_nest_level(old_nest_level))
+ DBUG_RETURN(1);
if (!unit->fake_select_lex && unit->add_fake_select_lex(lex->thd))
DBUG_RETURN(1);
select_lex->context.outer_context=
@@ -7840,151 +7887,163 @@ void mysql_init_multi_delete(LEX *lex)
{
lex->sql_command= SQLCOM_DELETE_MULTI;
mysql_init_select(lex);
- lex->select_lex.select_limit= 0;
+ lex->first_select_lex()->select_limit= 0;
lex->unit.select_limit_cnt= HA_POS_ERROR;
- lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list);
+ lex->first_select_lex()->table_list.
+ save_and_clear(&lex->auxiliary_table_list);
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
}
-static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
+#ifdef WITH_WSREP
+static void wsrep_prepare_for_autocommit_retry(THD* thd,
+ char* rawbuf,
+ uint length,
+ Parser_state* parser_state)
+{
+ thd->clear_error();
+ close_thread_tables(thd);
+ thd->wsrep_retry_counter++; // grow
+ wsrep_copy_query(thd);
+ thd->set_time();
+ parser_state->reset(rawbuf, length);
+
+ /* PSI end */
+ MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
+ thd->m_statement_psi= NULL;
+ thd->m_digest= NULL;
+
+ /* DTRACE end */
+ if (MYSQL_QUERY_DONE_ENABLED())
+ {
+ MYSQL_QUERY_DONE(thd->is_error());
+ }
+
+ /* SHOW PROFILE end */
+#if defined(ENABLED_PROFILING)
+ thd->profiling.finish_current_query();
+#endif
+
+ /* SHOW PROFILE begin */
+#if defined(ENABLED_PROFILING)
+ thd->profiling.start_new_query("continuing");
+ thd->profiling.set_query_source(rawbuf, length);
+#endif
+
+ /* DTRACE begin */
+ MYSQL_QUERY_START(rawbuf, thd->thread_id,
+ thd->get_db(),
+ &thd->security_ctx->priv_user[0],
+ (char *) thd->security_ctx->host_or_ip);
+
+ /* Performance Schema Interface instrumentation, begin */
+ thd->m_statement_psi= MYSQL_REFINE_STATEMENT(thd->m_statement_psi,
+ com_statement_info[thd->get_command()].m_key);
+ MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, thd->query(),
+ thd->query_length());
+
+ DBUG_ASSERT(thd->wsrep_trx().active() == false);
+ thd->wsrep_cs().reset_error();
+ thd->set_query_id(next_query_id());
+}
+
+static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length,
Parser_state *parser_state,
bool is_com_multi,
bool is_next_command)
{
-#ifdef WITH_WSREP
bool is_autocommit=
!thd->in_multi_stmt_transaction_mode() &&
- thd->wsrep_conflict_state == NO_CONFLICT &&
- !thd->wsrep_applier;
-
+ wsrep_read_only_option(thd, thd->lex->query_tables);
+ bool retry_autocommit;
do
{
- if (thd->wsrep_conflict_state== RETRY_AUTOCOMMIT)
- {
- thd->wsrep_conflict_state= NO_CONFLICT;
- /* Performance Schema Interface instrumentation, begin */
- thd->m_statement_psi= MYSQL_REFINE_STATEMENT(thd->m_statement_psi,
- com_statement_info[thd->get_command()].m_key);
- MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, thd->query(),
- thd->query_length());
-
- DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit",
- {
- const char act[]=
- "now "
- "SIGNAL wsrep_retry_autocommit_reached "
- "WAIT_FOR wsrep_retry_autocommit_continue";
- DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act)));
- });
- WSREP_DEBUG("Retry autocommit query: %s", thd->query());
- }
-
- mysql_parse(thd, rawbuf, length, parser_state, is_com_multi,
- is_next_command);
-
- if (WSREP(thd)) {
- /* wsrep BF abort in query exec phase */
- mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state == MUST_ABORT) {
- wsrep_client_rollback(thd);
-
- WSREP_DEBUG("abort in exec query state, avoiding autocommit");
- }
+ retry_autocommit= false;
+ mysql_parse(thd, rawbuf, length, parser_state, is_com_multi, is_next_command);
- if (thd->wsrep_conflict_state == MUST_REPLAY)
- {
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- if (thd->lex->explain)
- delete_explain_query(thd->lex);
- mysql_mutex_lock(&thd->LOCK_thd_data);
+ /*
+ Convert all ER_QUERY_INTERRUPTED errors to ER_LOCK_DEADLOCK
+ if the transaction was BF aborted. This can happen when the
+ transaction is being BF aborted via thd->awake() while it is
+ still executing.
- wsrep_replay_transaction(thd);
- }
+ Note that this must be done before wsrep_after_statement() call
+ since it clears the transaction for autocommit queries.
+ */
+ if (((thd->get_stmt_da()->is_error() &&
+ thd->get_stmt_da()->sql_errno() == ER_QUERY_INTERRUPTED) ||
+ !thd->get_stmt_da()->is_set()) &&
+ thd->wsrep_trx().bf_aborted())
+ {
+ WSREP_DEBUG("overriding error: %d with DEADLOCK",
+ (thd->get_stmt_da()->is_error()) ?
+ thd->get_stmt_da()->sql_errno() : 0);
- /* setting error code for BF aborted trxs */
- if (thd->wsrep_conflict_state == ABORTED ||
- thd->wsrep_conflict_state == CERT_FAILURE)
- {
- thd->reset_for_next_command();
- if (is_autocommit &&
- thd->lex->sql_command != SQLCOM_SELECT &&
- (thd->wsrep_retry_counter < thd->variables.wsrep_retry_autocommit))
- {
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- WSREP_DEBUG("wsrep retrying AC query: %s",
- (thd->query()) ? thd->query() : "void");
-
- /* Performance Schema Interface instrumentation, end */
- MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
- thd->m_statement_psi= NULL;
- thd->m_digest= NULL;
- // Released thd->LOCK_thd_data above as below could end up
- // close_thread_tables()/close_open_tables()/close_thread_table()/mysql_mutex_lock(&thd->LOCK_thd_data)
- close_thread_tables(thd);
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_conflict_state= RETRY_AUTOCOMMIT;
- thd->wsrep_retry_counter++; // grow
- wsrep_copy_query(thd);
- thd->set_time();
- parser_state->reset(rawbuf, length);
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
- else
- {
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- // This does dirty read to wsrep variables but it is only a debug code
- WSREP_DEBUG("%s, thd: %lld is_AC: %d, retry: %lu - %lu SQL: %s",
- (thd->wsrep_conflict_state == ABORTED) ?
- "BF Aborted" : "cert failure",
- (longlong) thd->thread_id, is_autocommit,
- thd->wsrep_retry_counter,
- thd->variables.wsrep_retry_autocommit, thd->query());
- my_message(ER_LOCK_DEADLOCK, "Deadlock: wsrep aborted transaction",
- MYF(0));
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_conflict_state= NO_CONFLICT;
- if (thd->wsrep_conflict_state != REPLAYING)
- thd->wsrep_retry_counter= 0; // reset
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
+ thd->killed = NOT_KILLED;
+ wsrep_override_error(thd, ER_LOCK_DEADLOCK);
+ }
- thd->reset_killed();
+ if (wsrep_after_statement(thd) && is_autocommit)
+ {
+ thd->reset_for_next_command();
+ thd->killed= NOT_KILLED;
+ if (is_autocommit &&
+ thd->lex->sql_command != SQLCOM_SELECT &&
+ thd->wsrep_retry_counter < thd->variables.wsrep_retry_autocommit)
+ {
+ DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL wsrep_retry_autocommit_reached "
+ "WAIT_FOR wsrep_retry_autocommit_continue";
+ DBUG_ASSERT(!debug_sync_set_action(thd, STRING_WITH_LEN(act)));
+ });
+ WSREP_DEBUG("wsrep retrying AC query: %lu %s",
+ thd->wsrep_retry_counter, WSREP_QUERY(thd));
+ wsrep_prepare_for_autocommit_retry(thd, rawbuf, length, parser_state);
+ if (thd->lex->explain)
+ delete_explain_query(thd->lex);
+ retry_autocommit= true;
}
else
{
- set_if_smaller(thd->wsrep_retry_counter, 0); // reset; eventually ok
- mysql_mutex_unlock(&thd->LOCK_thd_data);
+ WSREP_DEBUG("%s, thd: %llu is_AC: %d, retry: %lu - %lu SQL: %s",
+ wsrep_thd_transaction_state_str(thd),
+ thd->thread_id,
+ is_autocommit,
+ thd->wsrep_retry_counter,
+ thd->variables.wsrep_retry_autocommit,
+ WSREP_QUERY(thd));
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ thd->killed= NOT_KILLED;
+ thd->wsrep_retry_counter= 0; // reset
}
}
-
- /* If retry is requested clean up explain structure */
- if ((thd->wsrep_conflict_state == RETRY_AUTOCOMMIT ||
- thd->wsrep_conflict_state == MUST_REPLAY )
- && thd->lex->explain)
+ else
{
- delete_explain_query(thd->lex);
+ set_if_smaller(thd->wsrep_retry_counter, 0); // reset; eventually ok
}
-
- } while (thd->wsrep_conflict_state== RETRY_AUTOCOMMIT);
+ } while (retry_autocommit);
if (thd->wsrep_retry_query)
{
- WSREP_DEBUG("releasing retry_query: conf %d sent %d kill %d errno %d SQL %s",
- thd->wsrep_conflict_state,
- thd->get_stmt_da()->is_sent(),
+ WSREP_DEBUG("releasing retry_query: "
+ "conf %s sent %d kill %d errno %d SQL %s",
+ wsrep_thd_transaction_state_str(thd),
+ thd->get_stmt_da()->is_sent(),
thd->killed,
- thd->get_stmt_da()->is_error() ? thd->get_stmt_da()->sql_errno() : 0,
+ thd->get_stmt_da()->is_error() ?
+ thd->get_stmt_da()->sql_errno() : 0,
thd->wsrep_retry_query);
my_free(thd->wsrep_retry_query);
thd->wsrep_retry_query = NULL;
thd->wsrep_retry_query_len = 0;
thd->wsrep_retry_command = COM_CONNECT;
}
-#endif /* WITH_WSREP */
+ return false;
}
+#endif /* WITH_WSREP */
/*
@@ -8158,7 +8217,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length)
thd->reset_for_next_command();
if (!parse_sql(thd, & parser_state, NULL, true) &&
- all_tables_not_ok(thd, lex->select_lex.table_list.first))
+ all_tables_not_ok(thd, lex->first_select_lex()->table_list.first))
error= 1; /* Ignore question */
thd->end_statement();
}
@@ -8240,6 +8299,10 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
LEX_CSTRING alias_str;
LEX *lex= thd->lex;
DBUG_ENTER("add_table_to_list");
+ DBUG_PRINT("enter", ("Table '%s' (%p) Select %p (%u)",
+ (alias ? alias->str : table->table.str),
+ table,
+ this, select_number));
if (unlikely(!table))
DBUG_RETURN(0); // End of memory
@@ -8320,7 +8383,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
ptr->schema_table_name= ptr->table_name;
ptr->schema_table= schema_table;
}
- ptr->select_lex= lex->current_select;
+ ptr->select_lex= this;
/*
We can't cache internal temporary tables between prepares as the
table may be deleted before next exection.
@@ -8427,8 +8490,6 @@ bool st_select_lex::init_nested_join(THD *thd)
nested_join= ptr->nested_join=
((NESTED_JOIN*) ((uchar*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST))));
- if (unlikely(join_list->push_front(ptr, thd->mem_root)))
- DBUG_RETURN(1);
ptr->embedding= embedding;
ptr->join_list= join_list;
ptr->alias.str="(nested_join)";
@@ -8536,7 +8597,6 @@ TABLE_LIST *st_select_lex::nest_last_join(THD *thd)
ptr->join_using_fields= prev_join_using;
}
}
- join_list->push_front(ptr, thd->mem_root);
nested_join->used_tables= nested_join->not_null_tables= (table_map) 0;
DBUG_RETURN(ptr);
}
@@ -8728,7 +8788,7 @@ void st_select_lex::set_lock_for_tables(thr_lock_type lock_type)
bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
{
SELECT_LEX *first_sl= first_select();
- DBUG_ENTER("add_fake_select_lex");
+ DBUG_ENTER("st_select_lex_unit::add_fake_select_lex");
DBUG_ASSERT(!fake_select_lex);
if (!(fake_select_lex= new (thd_arg->mem_root) SELECT_LEX()))
@@ -8738,16 +8798,19 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
fake_select_lex->select_number= INT_MAX;
fake_select_lex->parent_lex= thd_arg->lex; /* Used in init_query. */
fake_select_lex->make_empty_select();
- fake_select_lex->linkage= GLOBAL_OPTIONS_TYPE;
+ fake_select_lex->set_linkage(GLOBAL_OPTIONS_TYPE);
fake_select_lex->select_limit= 0;
+ fake_select_lex->no_table_names_allowed= 1;
+
fake_select_lex->context.outer_context=first_sl->context.outer_context;
/* allow item list resolving in fake select for ORDER BY */
fake_select_lex->context.resolve_in_select_list= TRUE;
fake_select_lex->context.select_lex= fake_select_lex;
fake_select_lex->nest_level_base= first_select()->nest_level_base;
- fake_select_lex->nest_level=first_select()->nest_level;
+ if (fake_select_lex->set_nest_level(first_select()->nest_level))
+ DBUG_RETURN(1);
if (!is_unit_op())
{
@@ -8760,7 +8823,7 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg)
fake_select_lex->no_table_names_allowed= 1;
thd_arg->lex->current_select= fake_select_lex;
}
- thd_arg->lex->pop_context();
+ //thd_arg->lex->pop_context("add fake");
DBUG_RETURN(0);
}
@@ -8796,7 +8859,7 @@ push_new_name_resolution_context(THD *thd,
left_op->first_leaf_for_name_resolution();
on_context->last_name_resolution_table=
right_op->last_leaf_for_name_resolution();
- return thd->lex->push_context(on_context, thd->mem_root);
+ return thd->lex->push_context(on_context);
}
@@ -8908,23 +8971,35 @@ void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields,
pointer - thread found, and its LOCK_thd_kill is locked.
*/
-THD *find_thread_by_id(longlong id, bool query_id)
+struct find_thread_callback_arg
{
- THD *tmp;
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
+ find_thread_callback_arg(longlong id_arg, bool query_id_arg):
+ thd(0), id(id_arg), query_id(query_id_arg) {}
+ THD *thd;
+ longlong id;
+ bool query_id;
+};
+
+
+my_bool find_thread_callback(THD *thd, find_thread_callback_arg *arg)
+{
+ if (thd->get_command() != COM_DAEMON &&
+ arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id))
{
- if (tmp->get_command() == COM_DAEMON)
- continue;
- if (id == (query_id ? tmp->query_id : (longlong) tmp->thread_id))
- {
- mysql_mutex_lock(&tmp->LOCK_thd_kill); // Lock from delete
- break;
- }
+ if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
+ mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
+ arg->thd= thd;
+ return 1;
}
- mysql_mutex_unlock(&LOCK_thread_count);
- return tmp;
+ return 0;
+}
+
+
+THD *find_thread_by_id(longlong id, bool query_id)
+{
+ find_thread_callback_arg arg(id, query_id);
+ server_threads.iterate(find_thread_callback, &arg);
+ return arg.thd;
}
@@ -8935,9 +9010,6 @@ THD *find_thread_by_id(longlong id, bool query_id)
@param id Thread id or query id
@param kill_signal Should it kill the query or the connection
@param type Type of id: thread id or query id
-
- @note
- This is written such that we have a short lock on LOCK_thread_count
*/
uint
@@ -8947,7 +9019,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
uint error= (type == KILL_TYPE_QUERY ? ER_NO_SUCH_QUERY : ER_NO_SUCH_THREAD);
DBUG_ENTER("kill_one_thread");
DBUG_PRINT("enter", ("id: %lld signal: %u", id, (uint) kill_signal));
-
+ WSREP_DEBUG("kill_one_thread %llu", thd->thread_id);
if (id && (tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY)))
{
/*
@@ -8971,9 +9043,14 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
faster and do a harder kill than KILL_SYSTEM_THREAD;
*/
+#ifdef WITH_WSREP
if (((thd->security_ctx->master_access & SUPER_ACL) ||
thd->security_ctx->user_matches(tmp->security_ctx)) &&
- !wsrep_thd_is_BF(tmp, false))
+ !wsrep_thd_is_BF(tmp, false) && !tmp->wsrep_applier)
+#else
+ if ((thd->security_ctx->master_access & SUPER_ACL) ||
+ thd->security_ctx->user_matches(tmp->security_ctx))
+#endif /* WITH_WSREP */
{
tmp->awake_no_mutex(kill_signal);
error=0;
@@ -8982,6 +9059,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR :
ER_KILL_DENIED_ERROR);
mysql_mutex_unlock(&tmp->LOCK_thd_kill);
+ if (WSREP(tmp)) mysql_mutex_unlock(&tmp->LOCK_thd_data);
}
DBUG_PRINT("exit", ("%d", error));
DBUG_RETURN(error);
@@ -8996,56 +9074,67 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ
@param only_kill_query Should it kill the query or the connection
@note
- This is written such that we have a short lock on LOCK_thread_count
-
If we can't kill all threads because of security issues, no threads
are killed.
*/
-static uint kill_threads_for_user(THD *thd, LEX_USER *user,
- killed_state kill_signal, ha_rows *rows)
+struct kill_threads_callback_arg
{
- THD *tmp;
+ kill_threads_callback_arg(THD *thd_arg, LEX_USER *user_arg):
+ thd(thd_arg), user(user_arg) {}
+ THD *thd;
+ LEX_USER *user;
List<THD> threads_to_kill;
- DBUG_ENTER("kill_threads_for_user");
-
- *rows= 0;
+};
- if (unlikely(thd->is_fatal_error)) // If we run out of memory
- DBUG_RETURN(ER_OUT_OF_RESOURCES);
- DBUG_PRINT("enter", ("user: %s signal: %u", user->user.str,
- (uint) kill_signal));
-
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
+static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg)
+{
+ if (thd->security_ctx->user)
{
- if (!tmp->security_ctx->user)
- continue;
/*
Check that hostname (if given) and user name matches.
host.str[0] == '%' means that host name was not given. See sql_yacc.yy
*/
- if (((user->host.str[0] == '%' && !user->host.str[1]) ||
- !strcmp(tmp->security_ctx->host_or_ip, user->host.str)) &&
- !strcmp(tmp->security_ctx->user, user->user.str))
+ if (((arg->user->host.str[0] == '%' && !arg->user->host.str[1]) ||
+ !strcmp(thd->security_ctx->host_or_ip, arg->user->host.str)) &&
+ !strcmp(thd->security_ctx->user, arg->user->user.str))
{
- if (!(thd->security_ctx->master_access & SUPER_ACL) &&
- !thd->security_ctx->user_matches(tmp->security_ctx))
+ if (!(arg->thd->security_ctx->master_access & SUPER_ACL) &&
+ !arg->thd->security_ctx->user_matches(thd->security_ctx))
+ return 1;
+ if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root))
{
- mysql_mutex_unlock(&LOCK_thread_count);
- DBUG_RETURN(ER_KILL_DENIED_ERROR);
+ if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
+ mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
}
- if (!threads_to_kill.push_back(tmp, thd->mem_root))
- mysql_mutex_lock(&tmp->LOCK_thd_kill); // Lock from delete
}
}
- mysql_mutex_unlock(&LOCK_thread_count);
- if (!threads_to_kill.is_empty())
+ return 0;
+}
+
+
+static uint kill_threads_for_user(THD *thd, LEX_USER *user,
+ killed_state kill_signal, ha_rows *rows)
+{
+ kill_threads_callback_arg arg(thd, user);
+ DBUG_ENTER("kill_threads_for_user");
+
+ *rows= 0;
+
+ if (unlikely(thd->is_fatal_error)) // If we run out of memory
+ DBUG_RETURN(ER_OUT_OF_RESOURCES);
+
+ DBUG_PRINT("enter", ("user: %s signal: %u", user->user.str,
+ (uint) kill_signal));
+
+ if (server_threads.iterate(kill_threads_callback, &arg))
+ DBUG_RETURN(ER_KILL_DENIED_ERROR);
+
+ if (!arg.threads_to_kill.is_empty())
{
- List_iterator_fast<THD> it2(threads_to_kill);
+ List_iterator_fast<THD> it2(arg.threads_to_kill);
THD *next_ptr;
THD *ptr= it2++;
do
@@ -9061,6 +9150,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user,
*/
next_ptr= it2++;
mysql_mutex_unlock(&ptr->LOCK_thd_kill);
+ if (WSREP(ptr)) mysql_mutex_unlock(&ptr->LOCK_thd_data);
(*rows)++;
} while ((ptr= next_ptr));
}
@@ -9225,7 +9315,7 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
{
TABLE_LIST *table;
LEX *lex= thd->lex;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
DBUG_ENTER("multi_update_precheck");
if (select_lex->item_list.elements != lex->value_list.elements)
@@ -9261,7 +9351,7 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
/*
Is there tables of subqueries?
*/
- if (&lex->select_lex != lex->all_selects_list)
+ if (lex->first_select_lex() != lex->all_selects_list)
{
DBUG_PRINT("info",("Checking sub query list"));
for (table= tables; table; table= table->next_global)
@@ -9295,7 +9385,7 @@ bool multi_update_precheck(THD *thd, TABLE_LIST *tables)
bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
{
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
TABLE_LIST *aux_tables= thd->lex->auxiliary_table_list.first;
TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
DBUG_ENTER("multi_delete_precheck");
@@ -9412,7 +9502,7 @@ static TABLE_LIST *multi_delete_table_match(LEX *lex, TABLE_LIST *tbl,
bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
{
- TABLE_LIST *tables= lex->select_lex.table_list.first;
+ TABLE_LIST *tables= lex->first_select_lex()->table_list.first;
TABLE_LIST *target_tbl;
DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables");
@@ -9454,7 +9544,8 @@ bool multi_delete_set_locks_and_link_aux_tables(LEX *lex)
bool update_precheck(THD *thd, TABLE_LIST *tables)
{
DBUG_ENTER("update_precheck");
- if (thd->lex->select_lex.item_list.elements != thd->lex->value_list.elements)
+ if (thd->lex->first_select_lex()->item_list.elements !=
+ thd->lex->value_list.elements)
{
my_message(ER_WRONG_VALUE_COUNT, ER_THD(thd, ER_WRONG_VALUE_COUNT), MYF(0));
DBUG_RETURN(TRUE);
@@ -9545,7 +9636,7 @@ void create_table_set_open_action_and_adjust_tables(LEX *lex)
else
create_table->open_type= OT_BASE_ONLY;
- if (!lex->select_lex.item_list.elements)
+ if (!lex->first_select_lex()->item_list.elements)
{
/*
Avoid opening and locking target table for ordinary CREATE TABLE
@@ -9576,7 +9667,7 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
TABLE_LIST *create_table)
{
LEX *lex= thd->lex;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
ulong want_priv;
bool error= TRUE; // Error message is given
DBUG_ENTER("create_table_precheck");
@@ -9717,8 +9808,9 @@ Item *negate_expression(THD *thd, Item *expr)
{
/* it is NOT(NOT( ... )) */
Item *arg= ((Item_func *) expr)->arguments()[0];
+ const Type_handler *fh= arg->fixed_type_handler();
enum_parsing_place place= thd->lex->current_select->parsing_place;
- if (arg->is_bool_type() || place == IN_WHERE || place == IN_HAVING)
+ if ((fh && fh->is_bool_type()) || place == IN_WHERE || place == IN_HAVING)
return arg;
/*
if it is not boolean function then we have to emulate value of
@@ -9756,8 +9848,7 @@ void get_default_definer(THD *thd, LEX_USER *definer, bool role)
definer->host.length= strlen(definer->host.str);
}
definer->user.length= strlen(definer->user.str);
-
- definer->reset_auth();
+ definer->auth= NULL;
}
@@ -9816,7 +9907,7 @@ LEX_USER *create_definer(THD *thd, LEX_CSTRING *user_name,
definer->user= *user_name;
definer->host= *host_name;
- definer->reset_auth();
+ definer->auth= NULL;
return definer;
}
@@ -10085,6 +10176,9 @@ bool parse_sql(THD *thd, Parser_state *parser_state,
((thd->variables.sql_mode & MODE_ORACLE) ?
ORAparse(thd) :
MYSQLparse(thd)) != 0;
+ DBUG_ASSERT(opt_bootstrap || mysql_parse_status ||
+ thd->lex->select_stack_top == 0);
+ thd->lex->current_select= thd->lex->first_select_lex();
/*
Check that if MYSQLparse() failed either thd->is_error() is set, or an
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 1027872898a..7c8ba37f1de 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -99,10 +99,9 @@ void create_table_set_open_action_and_adjust_tables(LEX *lex);
void mysql_init_multi_delete(LEX *lex);
bool multi_delete_set_locks_and_link_aux_tables(LEX *lex);
void create_table_set_open_action_and_adjust_tables(LEX *lex);
-pthread_handler_t handle_bootstrap(void *arg);
+int bootstrap(MYSQL_FILE *file);
int mysql_execute_command(THD *thd);
bool do_command(THD *thd);
-void do_handle_bootstrap(THD *thd);
bool dispatch_command(enum enum_server_command command, THD *thd,
char* packet, uint packet_length,
bool is_com_multi, bool is_next_command);
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 80565eab208..746a030a7ab 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -533,15 +533,10 @@ static bool create_full_part_field_array(THD *thd, TABLE *table,
full_part_field_array may be NULL if storage engine supports native
partitioning.
*/
- table->vcol_set= table->read_set= &part_info->full_part_field_set;
+ table->read_set= &part_info->full_part_field_set;
if ((ptr= part_info->full_part_field_array))
for (; *ptr; ptr++)
- {
- if ((*ptr)->vcol_info)
- table->mark_virtual_col(*ptr);
- else
- bitmap_fast_test_and_set(table->read_set, (*ptr)->field_index);
- }
+ table->mark_column_with_deps(*ptr);
table->default_column_bitmaps();
end:
@@ -843,7 +838,8 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
goto end;
table->get_fields_in_item_tree= true;
- func_expr->walk(&Item::change_context_processor, 0, &lex.select_lex.context);
+ func_expr->walk(&Item::change_context_processor, 0,
+ &lex.first_select_lex()->context);
thd->where= "partition function";
/*
In execution we must avoid the use of thd->change_item_tree since
@@ -1571,7 +1567,7 @@ static bool check_vers_constants(THD *thd, partition_info *part_info)
my_tz_OFFSET0->gmt_sec_to_TIME(&ltime, vers_info->interval.start);
while ((el= it++)->id < hist_parts)
{
- if (date_add_interval(&ltime, vers_info->interval.type,
+ if (date_add_interval(thd, &ltime, vers_info->interval.type,
vers_info->interval.step))
goto err;
uint error= 0;
@@ -2664,7 +2660,7 @@ char *generate_partition_syntax(THD *thd, partition_info *part_info,
default:
DBUG_ASSERT(0);
/* We really shouldn't get here, no use in continuing from here */
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
DBUG_RETURN(NULL);
}
if (part_info->part_type == VERSIONING_PARTITION)
@@ -6011,7 +6007,7 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
lpt->pack_frm_data,
lpt->pack_frm_len))))
{
- file->print_error(error, MYF(error != ER_OUTOFMEMORY ? 0 : ME_FATALERROR));
+ file->print_error(error, MYF(error != ER_OUTOFMEMORY ? 0 : ME_FATAL));
}
if (mysql_trans_commit_alter_copy_data(thd))
@@ -8229,7 +8225,8 @@ static int get_part_iter_for_interval_via_mapping(partition_info *part_info,
field->type() == MYSQL_TYPE_DATETIME))
{
/* Monotonic, but return NULL for dates with zeros in month/day. */
- zero_in_start_date= field->get_date(&start_date, 0);
+ DBUG_ASSERT(field->cmp_type() == TIME_RESULT); // No rounding/truncation
+ zero_in_start_date= field->get_date(&start_date, date_mode_t(0));
DBUG_PRINT("info", ("zero start %u %04d-%02d-%02d",
zero_in_start_date, start_date.year,
start_date.month, start_date.day));
@@ -8253,7 +8250,8 @@ static int get_part_iter_for_interval_via_mapping(partition_info *part_info,
!part_info->part_expr->null_value)
{
MYSQL_TIME end_date;
- bool zero_in_end_date= field->get_date(&end_date, 0);
+ DBUG_ASSERT(field->cmp_type() == TIME_RESULT); // No rounding/truncation
+ bool zero_in_end_date= field->get_date(&end_date, date_mode_t(0));
/*
This is an optimization for TO_DAYS()/TO_SECONDS() to avoid scanning
the NULL partition for ranges that cannot include a date with 0 as
diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc
index a530f2886b4..11276aa5a64 100644
--- a/sql/sql_partition_admin.cc
+++ b/sql/sql_partition_admin.cc
@@ -51,7 +51,7 @@ bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd)
/* Moved from mysql_execute_command */
LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
/* first table of first SELECT_LEX */
TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first;
/*
@@ -742,7 +742,7 @@ bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
int error;
ha_partition *partition;
ulong timeout= thd->variables.lock_wait_timeout;
- TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
+ TABLE_LIST *first_table= thd->lex->first_select_lex()->table_list.first;
Alter_info *alter_info= &thd->lex->alter_info;
uint table_counter, i;
List<String> partition_names_list;
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index b8aff064aca..82b4e85e6b3 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -280,6 +280,7 @@ struct st_mysql_sys_var
MYSQL_PLUGIN_VAR_HEADER;
};
+enum install_status { INSTALL_GOOD, INSTALL_FAIL_WARN_OK, INSTALL_FAIL_NOT_OK };
/*
sys_var class for access to all plugin variables visible to the user
*/
@@ -1077,7 +1078,7 @@ static st_plugin_int *plugin_insert_or_reuse(struct st_plugin_int *plugin)
NOTE
Requires that a write-lock is held on LOCK_system_variables_hash
*/
-static bool plugin_add(MEM_ROOT *tmp_root,
+static enum install_status plugin_add(MEM_ROOT *tmp_root, bool if_not_exists,
const LEX_CSTRING *name, LEX_CSTRING *dl, myf MyFlags)
{
struct st_plugin_int tmp, *maybe_dupe;
@@ -1088,14 +1089,16 @@ static bool plugin_add(MEM_ROOT *tmp_root,
if (name->str && plugin_find_internal(name, MYSQL_ANY_PLUGIN))
{
+ if (if_not_exists)
+ MyFlags|= ME_NOTE;
my_error(ER_PLUGIN_INSTALLED, MyFlags, name->str);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(if_not_exists ? INSTALL_FAIL_WARN_OK : INSTALL_FAIL_NOT_OK);
}
/* Clear the whole struct to catch future extensions. */
bzero((char*) &tmp, sizeof(tmp));
fix_dl_name(tmp_root, dl);
if (! (tmp.plugin_dl= plugin_dl_add(dl, MyFlags)))
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(INSTALL_FAIL_NOT_OK);
/* Find plugin by name */
for (plugin= tmp.plugin_dl->plugins; plugin->info; plugin++)
{
@@ -1121,7 +1124,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
if (plugin->name != maybe_dupe->plugin->name)
{
my_error(ER_UDF_EXISTS, MyFlags, plugin->name);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(INSTALL_FAIL_NOT_OK);
}
dupes++;
continue; // already installed
@@ -1173,7 +1176,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
init_alloc_root(&tmp_plugin_ptr->mem_root, "plugin", 4096, 4096, MYF(0));
if (name->str)
- DBUG_RETURN(FALSE); // all done
+ DBUG_RETURN(INSTALL_GOOD); // all done
oks++;
tmp.plugin_dl->ref_count++;
@@ -1191,7 +1194,9 @@ err:
my_error(ER_CANT_FIND_DL_ENTRY, MyFlags, name->str);
plugin_dl_del(tmp.plugin_dl);
- DBUG_RETURN(errs > 0 || oks + dupes == 0);
+ if (errs > 0 || oks + dupes == 0)
+ DBUG_RETURN(INSTALL_FAIL_NOT_OK);
+ DBUG_RETURN(INSTALL_GOOD);
}
static void plugin_variables_deinit(struct st_plugin_int *plugin)
@@ -1847,7 +1852,7 @@ static void plugin_load(MEM_ROOT *tmp_root)
the mutex here to satisfy the assert
*/
mysql_mutex_lock(&LOCK_plugin);
- plugin_add(tmp_root, &name, &dl, MYF(ME_ERROR_LOG));
+ plugin_add(tmp_root, false, &name, &dl, MYF(ME_ERROR_LOG));
free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE));
mysql_mutex_unlock(&LOCK_plugin);
}
@@ -1870,7 +1875,7 @@ end:
static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list)
{
char buffer[FN_REFLEN];
- LEX_STRING name= {buffer, 0}, dl= {NULL, 0}, *str= &name;
+ LEX_CSTRING name= {buffer, 0}, dl= {NULL, 0}, *str= &name;
char *p= buffer;
DBUG_ENTER("plugin_load_list");
while (list)
@@ -1889,7 +1894,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list)
#ifndef __WIN__
case ':': /* can't use this as delimiter as it may be drive letter */
#endif
- str->str[str->length]= '\0';
+ p[-1]= 0;
if (str == &name) // load all plugins in named module
{
if (!name.length)
@@ -1902,16 +1907,16 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list)
mysql_mutex_lock(&LOCK_plugin);
free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE));
name.str= 0; // load everything
- if (plugin_add(tmp_root, (LEX_CSTRING*) &name, (LEX_CSTRING*) &dl,
- MYF(ME_ERROR_LOG)))
+ if (plugin_add(tmp_root, false, &name, &dl,
+ MYF(ME_ERROR_LOG)) != INSTALL_GOOD)
goto error;
}
else
{
free_root(tmp_root, MYF(MY_MARK_BLOCKS_FREE));
mysql_mutex_lock(&LOCK_plugin);
- if (plugin_add(tmp_root, (LEX_CSTRING*) &name, (LEX_CSTRING*) &dl,
- MYF(ME_ERROR_LOG)))
+ if (plugin_add(tmp_root, false, &name, &dl,
+ MYF(ME_ERROR_LOG)) != INSTALL_GOOD)
goto error;
}
mysql_mutex_unlock(&LOCK_plugin);
@@ -1923,7 +1928,7 @@ static bool plugin_load_list(MEM_ROOT *tmp_root, const char *list)
case '#':
if (str == &name)
{
- name.str[name.length]= '\0';
+ p[-1]= 0;
str= &dl;
str->str= p;
continue;
@@ -2146,7 +2151,7 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name,
TABLE_LIST tables;
TABLE *table;
LEX_CSTRING dl= *dl_arg;
- bool error;
+ enum install_status error;
int argc=orig_argc;
char **argv=orig_argv;
unsigned long event_class_mask[MYSQL_AUDIT_CLASS_MASK_SIZE] =
@@ -2194,12 +2199,14 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name,
mysql_audit_acquire_plugins(thd, event_class_mask);
mysql_mutex_lock(&LOCK_plugin);
- error= plugin_add(thd->mem_root, name, &dl, MYF(0));
- if (unlikely(error))
+ error= plugin_add(thd->mem_root, thd->lex->create_info.if_not_exists(),
+ name, &dl, MYF(0));
+ if (unlikely(error != INSTALL_GOOD))
goto err;
if (name->str)
- error= finalize_install(thd, table, name, &argc, argv);
+ error= finalize_install(thd, table, name, &argc, argv)
+ ? INSTALL_FAIL_NOT_OK : INSTALL_GOOD;
else
{
st_plugin_dl *plugin_dl= plugin_dl_find(&dl);
@@ -2207,11 +2214,12 @@ bool mysql_install_plugin(THD *thd, const LEX_CSTRING *name,
for (plugin= plugin_dl->plugins; plugin->info; plugin++)
{
LEX_CSTRING str= { plugin->name, strlen(plugin->name) };
- error|= finalize_install(thd, table, &str, &argc, argv);
+ if (finalize_install(thd, table, &str, &argc, argv))
+ error= INSTALL_FAIL_NOT_OK;
}
}
- if (unlikely(error))
+ if (unlikely(error != INSTALL_GOOD))
{
reap_needed= true;
reap_plugins();
@@ -2220,10 +2228,11 @@ err:
mysql_mutex_unlock(&LOCK_plugin);
if (argv)
free_defaults(argv);
- DBUG_RETURN(error);
-
-WSREP_ERROR_LABEL:
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(error == INSTALL_FAIL_NOT_OK);
+#ifdef WITH_WSREP
+wsrep_error_label:
+ DBUG_RETURN(true);
+#endif
}
@@ -2235,8 +2244,9 @@ static bool do_uninstall(THD *thd, TABLE *table, const LEX_CSTRING *name)
if (!(plugin= plugin_find_internal(name, MYSQL_ANY_PLUGIN)) ||
plugin->state & (PLUGIN_IS_UNINITIALIZED | PLUGIN_IS_DYING))
{
- my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str);
- return 1;
+ myf MyFlags= thd->lex->if_exists() ? ME_NOTE : 0;
+ my_error(ER_SP_DOES_NOT_EXIST, MyFlags, "PLUGIN", name->str);
+ return !MyFlags;
}
if (!plugin->plugin_dl)
{
@@ -2299,7 +2309,7 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_CSTRING *name,
if (!opt_noacl && check_table_access(thd, DELETE_ACL, &tables, FALSE, 1, FALSE))
DBUG_RETURN(TRUE);
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL)
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
/* need to open before acquiring LOCK_plugin or it will deadlock */
if (! (table= open_ltable(thd, &tables, TL_WRITE, MYSQL_LOCK_IGNORE_TIMEOUT)))
@@ -2358,17 +2368,19 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_CSTRING *name,
}
else
{
- my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "SONAME", dl.str);
- error= true;
+ myf MyFlags= thd->lex->if_exists() ? ME_NOTE : 0;
+ my_error(ER_SP_DOES_NOT_EXIST, MyFlags, "SONAME", dl.str);
+ error|= !MyFlags;
}
}
reap_plugins();
mysql_mutex_unlock(&LOCK_plugin);
DBUG_RETURN(error);
-
-WSREP_ERROR_LABEL:
- DBUG_RETURN(TRUE);
+#ifdef WITH_WSREP
+wsrep_error_label:
+ DBUG_RETURN(true);
+#endif
}
@@ -4337,25 +4349,23 @@ void wsrep_plugins_pre_init()
members of wsrep startup threads with correct values, as these value
were not available at the time these threads were created.
*/
-void wsrep_plugins_post_init()
-{
- THD *thd;
- I_List_iterator<THD> it(threads);
- while ((thd= it++))
+my_bool post_init_callback(THD *thd, void *)
+{
+ if (thd->wsrep_applier)
{
- if (IF_WSREP(thd->wsrep_applier,1))
- {
- // Save options_bits as it will get overwritten in plugin_thdvar_init()
- ulonglong option_bits_saved= thd->variables.option_bits;
-
- plugin_thdvar_init(thd);
-
- // Restore option_bits
- thd->variables.option_bits= option_bits_saved;
- }
+ // Save options_bits as it will get overwritten in plugin_thdvar_init()
+ ulonglong option_bits_saved= thd->variables.option_bits;
+ plugin_thdvar_init(thd);
+ // Restore option_bits
+ thd->variables.option_bits= option_bits_saved;
}
+ return 0;
+}
- return;
+
+void wsrep_plugins_post_init()
+{
+ server_threads.iterate(post_init_callback);
}
#endif /* WITH_WSREP */
diff --git a/sql/sql_plugin_services.ic b/sql/sql_plugin_services.ic
index 57600e1b4eb..955b9a0ce3a 100644
--- a/sql/sql_plugin_services.ic
+++ b/sql/sql_plugin_services.ic
@@ -142,51 +142,38 @@ static struct thd_error_context_service_st thd_error_context_handler= {
};
static struct wsrep_service_st wsrep_handler = {
- get_wsrep,
- get_wsrep_certify_nonPK,
- get_wsrep_debug,
- get_wsrep_drupal_282555_workaround,
get_wsrep_recovery,
- get_wsrep_load_data_splitting,
- get_wsrep_log_conflicts,
- get_wsrep_protocol_version,
- wsrep_aborting_thd_contains,
- wsrep_aborting_thd_enqueue,
wsrep_consistency_check,
wsrep_is_wsrep_xid,
wsrep_xid_seqno,
wsrep_xid_uuid,
- wsrep_lock_rollback,
wsrep_on,
- wsrep_post_commit,
- wsrep_prepare_key,
- wsrep_run_wsrep_commit,
+ wsrep_prepare_key_for_innodb,
wsrep_thd_LOCK,
wsrep_thd_UNLOCK,
- wsrep_thd_awake,
- wsrep_thd_conflict_state,
- wsrep_thd_conflict_state_str,
- wsrep_thd_exec_mode,
- wsrep_thd_exec_mode_str,
- wsrep_thd_get_conflict_state,
- wsrep_thd_is_BF,
- wsrep_thd_is_wsrep,
wsrep_thd_query,
- wsrep_thd_query_state,
- wsrep_thd_query_state_str,
wsrep_thd_retry_counter,
- wsrep_thd_set_conflict_state,
wsrep_thd_ignore_table,
wsrep_thd_trx_seqno,
- wsrep_thd_ws_handle,
wsrep_thd_auto_increment_variables,
- wsrep_set_load_multi_commit,
- wsrep_is_load_multi_commit,
- wsrep_trx_is_aborting,
- wsrep_trx_order_before,
- wsrep_unlock_rollback,
+ wsrep_thd_is_aborting,
wsrep_set_data_home_dir,
- wsrep_thd_is_applier
+ wsrep_thd_is_BF,
+ wsrep_thd_is_local,
+ wsrep_thd_self_abort,
+ wsrep_thd_append_key,
+ wsrep_thd_client_state_str,
+ wsrep_thd_client_mode_str,
+ wsrep_thd_transaction_state_str,
+ wsrep_thd_transaction_id,
+ wsrep_thd_bf_abort,
+ wsrep_thd_order_before,
+ wsrep_handle_SR_rollback,
+ wsrep_thd_skip_locking,
+ wsrep_get_sr_table_name,
+ wsrep_get_debug,
+ wsrep_commit_ordered,
+ wsrep_thd_is_applying
};
static struct thd_specifics_service_st thd_specifics_handler=
@@ -221,6 +208,16 @@ static struct my_print_error_service_st my_print_error_handler=
my_printv_error
};
+struct json_service_st json_handler=
+{
+ json_type,
+ json_get_array_item,
+ json_get_object_key,
+ json_get_object_nkey,
+ json_escape_string,
+ json_unescape_json
+};
+
static struct st_service_ref list_of_services[]=
{
{ "base64_service", VERSION_base64, &base64_handler },
@@ -243,6 +240,7 @@ static struct st_service_ref list_of_services[]=
{ "thd_specifics_service", VERSION_thd_specifics, &thd_specifics_handler },
{ "thd_timezone_service", VERSION_thd_timezone, &thd_timezone_handler },
{ "thd_wait_service", VERSION_thd_wait, &thd_wait_handler },
- { "wsrep_service", VERSION_wsrep, &wsrep_handler }
+ { "wsrep_service", VERSION_wsrep, &wsrep_handler },
+ { "json_service", VERSION_json, &json_handler }
};
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 63fbc9e29f6..fc4aaa82f33 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -112,6 +112,7 @@ When one supplies long data for a placeholder:
#include "sp_cache.h"
#include "sql_handler.h" // mysql_ha_rm_tables
#include "probes_mysql.h"
+#include "opt_trace.h"
#ifdef EMBEDDED_LIBRARY
/* include MYSQL_BIND headers */
#include <mysql.h>
@@ -189,7 +190,7 @@ public:
void setup_set_params();
virtual Query_arena::Type type() const;
virtual void cleanup_stmt();
- bool set_name(LEX_CSTRING *name);
+ bool set_name(const LEX_CSTRING *name);
inline void close_cursor() { delete cursor; cursor= 0; }
inline bool is_in_use() { return flags & (uint) IS_IN_USE; }
inline bool is_sql_prepare() const { return flags & (uint) IS_SQL_PREPARE; }
@@ -1354,7 +1355,7 @@ static int mysql_test_update(Prepared_statement *stmt,
THD *thd= stmt->thd;
uint table_count= 0;
TABLE_LIST *update_source_table;
- SELECT_LEX *select= &stmt->lex->select_lex;
+ SELECT_LEX *select= stmt->lex->first_select_lex();
#ifndef NO_EMBEDDED_ACCESS_CHECKS
uint want_privilege;
#endif
@@ -1410,10 +1411,10 @@ static int mysql_test_update(Prepared_statement *stmt,
table_list->table->grant.want_privilege= want_privilege;
table_list->register_want_access(want_privilege);
#endif
- thd->lex->select_lex.no_wrap_view_item= TRUE;
+ thd->lex->first_select_lex()->no_wrap_view_item= TRUE;
res= setup_fields(thd, Ref_ptr_array(),
select->item_list, MARK_COLUMNS_READ, 0, NULL, 0);
- thd->lex->select_lex.no_wrap_view_item= FALSE;
+ thd->lex->first_select_lex()->no_wrap_view_item= FALSE;
if (res)
goto error;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1478,10 +1479,10 @@ static bool mysql_test_delete(Prepared_statement *stmt,
goto error;
}
- DBUG_RETURN(mysql_prepare_delete(thd, table_list,
- lex->select_lex.with_wild,
- lex->select_lex.item_list,
- &lex->select_lex.where,
+ DBUG_RETURN(mysql_prepare_delete(thd, table_list,
+ lex->first_select_lex()->with_wild,
+ lex->first_select_lex()->item_list,
+ &lex->first_select_lex()->where,
&delete_while_scanning));
error:
DBUG_RETURN(TRUE);
@@ -1513,7 +1514,7 @@ static int mysql_test_select(Prepared_statement *stmt,
SELECT_LEX_UNIT *unit= &lex->unit;
DBUG_ENTER("mysql_test_select");
- lex->select_lex.context.resolve_in_select_list= TRUE;
+ lex->first_select_lex()->context.resolve_in_select_list= TRUE;
ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL;
if (tables)
@@ -1526,7 +1527,7 @@ static int mysql_test_select(Prepared_statement *stmt,
if (!lex->result && !(lex->result= new (stmt->mem_root) select_send(thd)))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR),
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL),
static_cast<int>(sizeof(select_send)));
goto error;
}
@@ -1547,7 +1548,7 @@ static int mysql_test_select(Prepared_statement *stmt,
if (!lex->describe && !thd->lex->analyze_stmt && !stmt->is_sql_prepare())
{
/* Make copy of item list, as change_columns may change it */
- List<Item> fields(lex->select_lex.item_list);
+ List<Item> fields(lex->first_select_lex()->item_list);
/* Change columns if a procedure like analyse() */
if (unit->last_procedure && unit->last_procedure->change_columns(thd, fields))
@@ -1705,7 +1706,7 @@ static bool select_like_stmt_test(Prepared_statement *stmt,
THD *thd= stmt->thd;
LEX *lex= stmt->lex;
- lex->select_lex.context.resolve_in_select_list= TRUE;
+ lex->first_select_lex()->context.resolve_in_select_list= TRUE;
if (specific_prepare && (*specific_prepare)(thd))
DBUG_RETURN(TRUE);
@@ -1773,7 +1774,7 @@ static bool mysql_test_create_table(Prepared_statement *stmt)
DBUG_ENTER("mysql_test_create_table");
THD *thd= stmt->thd;
LEX *lex= stmt->lex;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
bool res= FALSE;
bool link_to_local;
TABLE_LIST *create_table= lex->query_tables;
@@ -2105,11 +2106,11 @@ static bool mysql_test_multidelete(Prepared_statement *stmt,
{
THD *thd= stmt->thd;
- thd->lex->current_select= &thd->lex->select_lex;
+ thd->lex->current_select= thd->lex->first_select_lex();
if (add_item_to_list(thd, new (thd->mem_root)
Item_null(thd)))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 0);
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), 0);
goto error;
}
@@ -2144,13 +2145,14 @@ error:
static int mysql_insert_select_prepare_tester(THD *thd)
{
- SELECT_LEX *first_select= &thd->lex->select_lex;
+ SELECT_LEX *first_select= thd->lex->first_select_lex();
TABLE_LIST *second_table= first_select->table_list.first->next_local;
/* Skip first table, which is the table we are inserting in */
first_select->table_list.first= second_table;
- thd->lex->select_lex.context.table_list=
- thd->lex->select_lex.context.first_name_resolution_table= second_table;
+ thd->lex->first_select_lex()->context.table_list=
+ thd->lex->first_select_lex()->context.first_name_resolution_table=
+ second_table;
return mysql_insert_select_prepare(thd);
}
@@ -2185,7 +2187,7 @@ static bool mysql_test_insert_select(Prepared_statement *stmt,
return 1;
/* store it, because mysql_insert_select_prepare_tester change it */
- first_local_table= lex->select_lex.table_list.first;
+ first_local_table= lex->first_select_lex()->table_list.first;
DBUG_ASSERT(first_local_table != 0);
res=
@@ -2193,7 +2195,7 @@ static bool mysql_test_insert_select(Prepared_statement *stmt,
&mysql_insert_select_prepare_tester,
OPTION_SETUP_TABLES_DONE);
/* revert changes made by mysql_insert_select_prepare_tester */
- lex->select_lex.table_list.first= first_local_table;
+ lex->first_select_lex()->table_list.first= first_local_table;
return res;
}
@@ -2219,7 +2221,7 @@ static int mysql_test_handler_read(Prepared_statement *stmt,
SQL_HANDLER *ha_table;
DBUG_ENTER("mysql_test_handler_read");
- lex->select_lex.context.resolve_in_select_list= TRUE;
+ lex->first_select_lex()->context.resolve_in_select_list= TRUE;
/*
We don't have to test for permissions as this is already done during
@@ -2229,7 +2231,7 @@ static int mysql_test_handler_read(Prepared_statement *stmt,
lex->ident.str,
lex->insert_list,
lex->ha_rkey_mode,
- lex->select_lex.where)))
+ lex->first_select_lex()->where)))
DBUG_RETURN(1);
if (!stmt->is_sql_prepare())
@@ -2268,7 +2270,7 @@ static bool check_prepared_statement(Prepared_statement *stmt)
{
THD *thd= stmt->thd;
LEX *lex= stmt->lex;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
TABLE_LIST *tables;
enum enum_sql_command sql_command= lex->sql_command;
int res= 0;
@@ -2277,12 +2279,24 @@ static bool check_prepared_statement(Prepared_statement *stmt)
sql_command, stmt->param_count));
lex->first_lists_tables_same();
+ lex->fix_first_select_number();
tables= lex->query_tables;
/* set context for commands which do not use setup_tables */
- lex->select_lex.context.resolve_in_table_list_only(select_lex->
+ lex->first_select_lex()->context.resolve_in_table_list_only(select_lex->
get_table_list());
+ /*
+ For the optimizer trace, this is the symmetric, for statement preparation,
+ of what is done at statement execution (in mysql_execute_command()).
+ */
+ Opt_trace_start ots(thd, tables, lex->sql_command, &lex->var_list,
+ thd->query(), thd->query_length(),
+ thd->variables.character_set_client);
+
+ Json_writer_object trace_command(thd);
+ Json_writer_array trace_command_steps(thd, "steps");
+
/* Reset warning count for each query that uses tables */
if (tables)
thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
@@ -2663,7 +2677,7 @@ end:
}
/**
- Get an SQL statement from an item in lex->prepared_stmt_code.
+ Get an SQL statement from an item in m_code.
This function can return pointers to very different memory classes:
- a static string "NULL", if the item returned NULL
@@ -2688,13 +2702,15 @@ end:
@retval true on error (out of memory)
*/
-bool LEX::get_dynamic_sql_string(LEX_CSTRING *dst, String *buffer)
+bool Lex_prepared_stmt::get_dynamic_sql_string(THD *thd,
+ LEX_CSTRING *dst,
+ String *buffer)
{
- if (prepared_stmt_code->fix_fields_if_needed_for_scalar(thd, NULL))
+ if (m_code->fix_fields_if_needed_for_scalar(thd, NULL))
return true;
- const String *str= prepared_stmt_code->val_str(buffer);
- if (prepared_stmt_code->null_value)
+ const String *str= m_code->val_str(buffer);
+ if (m_code->null_value)
{
/*
Prepare source was NULL, so we need to set "str" to
@@ -2775,7 +2791,7 @@ bool LEX::get_dynamic_sql_string(LEX_CSTRING *dst, String *buffer)
void mysql_sql_stmt_prepare(THD *thd)
{
LEX *lex= thd->lex;
- LEX_CSTRING *name= &lex->prepared_stmt_name;
+ const LEX_CSTRING *name= &lex->prepared_stmt.name();
Prepared_statement *stmt;
LEX_CSTRING query;
DBUG_ENTER("mysql_sql_stmt_prepare");
@@ -2800,7 +2816,7 @@ void mysql_sql_stmt_prepare(THD *thd)
See comments in get_dynamic_sql_string().
*/
StringBuffer<256> buffer;
- if (lex->get_dynamic_sql_string(&query, &buffer) ||
+ if (lex->prepared_stmt.get_dynamic_sql_string(thd, &query, &buffer) ||
! (stmt= new Prepared_statement(thd)))
{
DBUG_VOID_RETURN; /* out of memory */
@@ -2863,7 +2879,7 @@ void mysql_sql_stmt_execute_immediate(THD *thd)
LEX_CSTRING query;
DBUG_ENTER("mysql_sql_stmt_execute_immediate");
- if (lex->prepared_stmt_params_fix_fields(thd))
+ if (lex->prepared_stmt.params_fix_fields(thd))
DBUG_VOID_RETURN;
/*
@@ -2875,7 +2891,7 @@ void mysql_sql_stmt_execute_immediate(THD *thd)
See comments in get_dynamic_sql_string().
*/
StringBuffer<256> buffer;
- if (lex->get_dynamic_sql_string(&query, &buffer) ||
+ if (lex->prepared_stmt.get_dynamic_sql_string(thd, &query, &buffer) ||
!(stmt= new Prepared_statement(thd)))
DBUG_VOID_RETURN; // out of memory
@@ -3010,6 +3026,10 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
for (order= win_spec->order_list->first; order; order= order->next)
order->item= &order->item_ptr;
}
+
+ // Reinit Pushdown
+ sl->cond_pushed_into_where= NULL;
+ sl->cond_pushed_into_having= NULL;
}
if (sl->changed_elements & TOUCHED_SEL_DERIVED)
{
@@ -3064,7 +3084,7 @@ void reinit_stmt_before_use(THD *thd, LEX *lex)
{
tables->reinit_before_use(thd);
}
- lex->current_select= &lex->select_lex;
+ lex->current_select= lex->first_select_lex();
if (lex->result)
@@ -3270,7 +3290,7 @@ void mysql_sql_stmt_execute(THD *thd)
{
LEX *lex= thd->lex;
Prepared_statement *stmt;
- LEX_CSTRING *name= &lex->prepared_stmt_name;
+ const LEX_CSTRING *name= &lex->prepared_stmt.name();
/* Query text for binary, general or slow log, if any of them is open */
String expanded_query;
DBUG_ENTER("mysql_sql_stmt_execute");
@@ -3283,7 +3303,7 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_VOID_RETURN;
}
- if (stmt->param_count != lex->prepared_stmt_params.elements)
+ if (stmt->param_count != lex->prepared_stmt.param_count())
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE");
DBUG_VOID_RETURN;
@@ -3291,7 +3311,7 @@ void mysql_sql_stmt_execute(THD *thd)
DBUG_PRINT("info",("stmt: %p", stmt));
- if (lex->prepared_stmt_params_fix_fields(thd))
+ if (lex->prepared_stmt.params_fix_fields(thd))
DBUG_VOID_RETURN;
/*
@@ -3511,7 +3531,7 @@ void mysqld_stmt_close(THD *thd, char *packet)
void mysql_sql_stmt_close(THD *thd)
{
Prepared_statement* stmt;
- LEX_CSTRING *name= &thd->lex->prepared_stmt_name;
+ const LEX_CSTRING *name= &thd->lex->prepared_stmt.name();
DBUG_PRINT("info", ("DEALLOCATE PREPARE: %.*s\n", (int) name->length,
name->str));
@@ -3876,7 +3896,7 @@ void Prepared_statement::cleanup_stmt()
}
-bool Prepared_statement::set_name(LEX_CSTRING *name_arg)
+bool Prepared_statement::set_name(const LEX_CSTRING *name_arg)
{
name.length= name_arg->length;
name.str= (char*) memdup_root(mem_root, name_arg->str, name_arg->length);
@@ -4125,7 +4145,7 @@ Prepared_statement::set_parameters(String *expanded_query,
if (is_sql_ps)
{
/* SQL prepared statement */
- res= set_params_from_actual_params(this, thd->lex->prepared_stmt_params,
+ res= set_params_from_actual_params(this, thd->lex->prepared_stmt.params(),
expanded_query);
}
else if (param_count)
@@ -4205,15 +4225,6 @@ Prepared_statement::execute_loop(String *expanded_query,
if (set_parameters(expanded_query, packet, packet_end))
return TRUE;
-#ifdef NOT_YET_FROM_MYSQL_5_6
- if (unlikely(thd->security_ctx->password_expired &&
- !lex->is_change_password))
- {
- my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
- return true;
- }
-#endif
-
reexecute:
// Make sure that reprepare() did not create any new Items.
DBUG_ASSERT(thd->free_list == NULL);
@@ -4235,30 +4246,6 @@ reexecute:
error= execute(expanded_query, open_cursor) || thd->is_error();
thd->m_reprepare_observer= NULL;
-#ifdef WITH_WSREP
-
- if (WSREP_ON)
- {
- mysql_mutex_lock(&thd->LOCK_thd_data);
- switch (thd->wsrep_conflict_state)
- {
- case CERT_FAILURE:
- WSREP_DEBUG("PS execute fail for CERT_FAILURE: thd: %lld err: %d",
- (longlong) thd->thread_id,
- thd->get_stmt_da()->sql_errno() );
- thd->wsrep_conflict_state = NO_CONFLICT;
- break;
-
- case MUST_REPLAY:
- (void) wsrep_replay_transaction(thd);
- break;
-
- default:
- break;
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif /* WITH_WSREP */
if (unlikely(error) &&
(sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
@@ -4378,16 +4365,6 @@ Prepared_statement::execute_bulk_loop(String *expanded_query,
}
read_types= FALSE;
-#ifdef NOT_YET_FROM_MYSQL_5_6
- if (unlikely(thd->security_ctx->password_expired &&
- !lex->is_change_password))
- {
- my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
- thd->set_bulk_execution(0);
- return true;
- }
-#endif
-
// iterations changed by set_bulk_parameters
while ((iterations || start_param) && !error && !thd->is_error())
{
@@ -4431,30 +4408,6 @@ reexecute:
error= execute(expanded_query, open_cursor) || thd->is_error();
thd->m_reprepare_observer= NULL;
-#ifdef WITH_WSREP
-
- if (WSREP_ON)
- {
- mysql_mutex_lock(&thd->LOCK_thd_data);
- switch (thd->wsrep_conflict_state)
- {
- case CERT_FAILURE:
- WSREP_DEBUG("PS execute fail for CERT_FAILURE: thd: %lld err: %d",
- (longlong) thd->thread_id,
- thd->get_stmt_da()->sql_errno() );
- thd->wsrep_conflict_state = NO_CONFLICT;
- break;
-
- case MUST_REPLAY:
- (void) wsrep_replay_transaction(thd);
- break;
-
- default:
- break;
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-#endif /* WITH_WSREP */
if (unlikely(error) &&
(sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
@@ -4602,8 +4555,8 @@ bool Prepared_statement::validate_metadata(Prepared_statement *copy)
if (is_sql_prepare() || lex->describe)
return FALSE;
- if (lex->select_lex.item_list.elements !=
- copy->lex->select_lex.item_list.elements)
+ if (lex->first_select_lex()->item_list.elements !=
+ copy->lex->first_select_lex()->item_list.elements)
{
/** Column counts mismatch, update the client */
thd->server_status|= SERVER_STATUS_METADATA_CHANGED;
@@ -4760,7 +4713,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
alloc_query(thd, (char*) expanded_query->ptr(),
expanded_query->length()))
{
- my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), expanded_query->length());
+ my_error(ER_OUTOFMEMORY, MYF(ME_FATAL), expanded_query->length());
goto error;
}
/*
@@ -4921,7 +4874,7 @@ bool Prepared_statement::execute_immediate(const char *query, uint query_len)
if (unlikely(prepare(query, query_len)))
DBUG_RETURN(true);
- if (param_count != thd->lex->prepared_stmt_params.elements)
+ if (param_count != thd->lex->prepared_stmt.param_count())
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE");
deallocate_immediate();
@@ -5340,16 +5293,9 @@ bool Protocol_local::store_longlong(longlong value, bool unsigned_flag)
bool Protocol_local::store_decimal(const my_decimal *value)
{
- char buf[DECIMAL_MAX_STR_LENGTH];
- String str(buf, sizeof (buf), &my_charset_bin);
- int rc;
-
- rc= my_decimal2string(E_DEC_FATAL_ERROR, value, 0, 0, 0, &str);
-
- if (rc)
- return TRUE;
-
- return store_column(str.ptr(), str.length());
+ DBUG_ASSERT(0); // This method is not used yet
+ StringBuffer<DECIMAL_MAX_STR_LENGTH> str;
+ return value->to_string(&str) ? store_column(str.ptr(), str.length()) : true;
}
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index ae6c691ae56..4332a6961d9 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2018, Oracle and/or its affiliates.
- Copyright (c) 2010, 2018, Monty Program Ab.
+ Copyright (c) 2010, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -177,7 +177,11 @@
#define OPTION_SKIP_REPLICATION (1ULL << 37) // THD, user
#define OPTION_RPL_SKIP_PARALLEL (1ULL << 38)
-#define OPTION_FOUND_COMMENT (1ULL << 39) // SELECT, intern, parser
+#define OPTION_NO_QUERY_CACHE (1ULL << 39) // SELECT, user
+#define OPTION_PROCEDURE_CLAUSE (1ULL << 40) // Internal usage
+
+
+#define OPTION_LEX_FOUND_COMMENT (1ULL << 0) // intern, parser
/* The rest of the file is included in the server only */
#ifndef MYSQL_CLIENT
@@ -222,6 +226,9 @@
#define OPTIMIZER_SWITCH_ORDERBY_EQ_PROP (1ULL << 29)
#define OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED (1ULL << 30)
#define OPTIMIZER_SWITCH_SPLIT_MATERIALIZED (1ULL << 31)
+#define OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_SUBQUERY (1ULL << 32)
+#define OPTIMIZER_SWITCH_USE_ROWID_FILTER (1ULL << 33)
+#define OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING (1ULL << 34)
#define OPTIMIZER_SWITCH_DEFAULT (OPTIMIZER_SWITCH_INDEX_MERGE | \
OPTIMIZER_SWITCH_INDEX_MERGE_UNION | \
@@ -248,7 +255,11 @@
OPTIMIZER_SWITCH_EXISTS_TO_IN | \
OPTIMIZER_SWITCH_ORDERBY_EQ_PROP | \
OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED | \
- OPTIMIZER_SWITCH_SPLIT_MATERIALIZED)
+ OPTIMIZER_SWITCH_SPLIT_MATERIALIZED | \
+ OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_SUBQUERY | \
+ OPTIMIZER_SWITCH_USE_ROWID_FILTER | \
+ OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING | \
+ OPTIMIZER_SWITCH_OPTIMIZE_JOIN_BUFFER_SIZE)
/*
Replication uses 8 bytes to store SQL_MODE in the binary log. The day you
@@ -333,10 +344,15 @@
#ifndef MYSQL_CLIENT
/*
- Some defines for exit codes for ::is_equal class functions.
+ Field::is_equal() return codes.
*/
#define IS_EQUAL_NO 0
#define IS_EQUAL_YES 1
+/**
+ new_field has compatible packed representation with old type,
+ so it is theoretically possible to perform change by only updating
+ data dictionary without changing table rows
+*/
#define IS_EQUAL_PACK_LENGTH 2
enum enum_parsing_place
@@ -350,6 +366,9 @@ enum enum_parsing_place
IN_ORDER_BY,
IN_UPDATE_ON_DUP_KEY,
IN_PART_FUNC,
+ BEFORE_OPT_LIST,
+ AFTER_LIST,
+ FOR_LOOP_BOUND,
PARSING_PLACE_SIZE /* always should be the last */
};
diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc
index 13f03fed5f3..6ca21aebb37 100644
--- a/sql/sql_profile.cc
+++ b/sql/sql_profile.cc
@@ -110,7 +110,7 @@ int make_profile_table_for_show(THD *thd, ST_SCHEMA_TABLE *schema_table)
};
ST_FIELD_INFO *field_info;
- Name_resolution_context *context= &thd->lex->select_lex.context;
+ Name_resolution_context *context= &thd->lex->first_select_lex()->context;
int i;
for (i= 0; schema_table->fields_info[i].field_name != NULL; i++)
@@ -402,7 +402,7 @@ bool PROFILING::show_profiles()
QUERY_PROFILE *prof;
List<Item> field_list;
MEM_ROOT *mem_root= thd->mem_root;
- SELECT_LEX *sel= &thd->lex->select_lex;
+ SELECT_LEX *sel= thd->lex->first_select_lex();
SELECT_LEX_UNIT *unit= &thd->lex->unit;
ha_rows idx= 0;
Protocol *protocol= thd->protocol;
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index abdf9d76d15..7a5cabc8880 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -221,7 +221,9 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
!thd->mdl_context.has_locks() ||
thd->handler_tables_hash.records ||
thd->ull_hash.records ||
- thd->global_read_lock.is_acquired());
+ thd->global_read_lock.is_acquired() ||
+ thd->current_backup_stage != BACKUP_FINISHED
+ );
/*
Note that if REFRESH_READ_LOCK bit is set then REFRESH_TABLES is set too
@@ -231,6 +233,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
{
if ((options & REFRESH_READ_LOCK) && thd)
{
+ DBUG_ASSERT(!(options & REFRESH_FAST) && !tables);
/*
On the first hand we need write lock on the tables to be flushed,
on the other hand we must not try to aspire a global read lock
@@ -242,6 +245,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
return 1;
}
+
/*
Writing to the binlog could cause deadlocks, as we don't log
UNLOCK TABLES
@@ -249,9 +253,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
tmp_write_to_binlog= 0;
if (thd->global_read_lock.lock_global_read_lock(thd))
return 1; // Killed
- if (close_cached_tables(thd, tables,
- ((options & REFRESH_FAST) ? FALSE : TRUE),
- thd->variables.lock_wait_timeout))
+ if (flush_tables(thd, FLUSH_ALL))
{
/*
NOTE: my_error() has been already called by reopen_tables() within
@@ -274,11 +276,9 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
make_global_read_lock_block_commit(thd) above since they could have
modified the tables too.
*/
- if (WSREP(thd) &&
- close_cached_tables(thd, tables, (options & REFRESH_FAST) ?
- FALSE : TRUE, TRUE))
- result= 1;
- }
+ if (WSREP(thd) && flush_tables(thd, FLUSH_ALL))
+ result= 1;
+ }
else
{
if (thd && thd->locked_tables_mode)
@@ -311,8 +311,8 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
with global read lock.
*/
if (thd->open_tables &&
- !thd->mdl_context.is_lock_owner(MDL_key::GLOBAL, "", "",
- MDL_INTENTION_EXCLUSIVE))
+ !thd->mdl_context.is_lock_owner(MDL_key::BACKUP, "", "",
+ MDL_BACKUP_DDL))
{
my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0),
thd->open_tables->s->table_name.str);
@@ -332,25 +332,21 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
}
#ifdef WITH_WSREP
- if (thd && thd->wsrep_applier)
- {
- /*
- In case of applier thread, do not wait for table share(s) to be
- removed from table definition cache.
- */
- options|= REFRESH_FAST;
- }
-#endif
- if (close_cached_tables(thd, tables,
- ((options & REFRESH_FAST) ? FALSE : TRUE),
- (thd ? thd->variables.lock_wait_timeout :
- LONG_TIMEOUT)))
+ /* In case of applier thread, do not call flush tables */
+ if (!thd || !thd->wsrep_applier)
+#endif /* WITH_WSREP */
{
- /*
- NOTE: my_error() has been already called by reopen_tables() within
- close_cached_tables().
- */
- result= 1;
+ if (close_cached_tables(thd, tables,
+ ((options & REFRESH_FAST) ? FALSE : TRUE),
+ (thd ? thd->variables.lock_wait_timeout :
+ LONG_TIMEOUT)))
+ {
+ /*
+ NOTE: my_error() has been already called by reopen_tables() within
+ close_cached_tables().
+ */
+ result= 1;
+ }
}
}
my_dbopt_cleanup();
@@ -420,6 +416,11 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
#endif
if (options & REFRESH_USER_RESOURCES)
reset_mqh((LEX_USER *) NULL, 0); /* purecov: inspected */
+ if (options & REFRESH_SSL)
+ {
+ if (reinit_ssl())
+ result= 1;
+ }
if (options & REFRESH_GENERIC)
{
List_iterator_fast<LEX_CSTRING> li(thd->lex->view_list);
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index fdca609f5af..a058c366f60 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -31,6 +31,7 @@
#include "debug_sync.h"
#include "semisync_master.h"
#include "semisync_slave.h"
+#include "mysys_err.h"
enum enum_gtid_until_state {
GTID_UNTIL_NOT_DONE,
@@ -506,6 +507,22 @@ static enum enum_binlog_checksum_alg get_binlog_checksum_value_at_connect(THD *
DBUG_RETURN(ret);
}
+
+/**
+ Set current_linfo
+
+ Setting current_linfo needs to be done with LOCK_thd_data to ensure that
+ adjust_linfo_offsets doesn't use a structure that may be deleted.
+*/
+
+void THD::set_current_linfo(LOG_INFO *linfo)
+{
+ mysql_mutex_lock(&LOCK_thd_data);
+ current_linfo= linfo;
+ mysql_mutex_unlock(&LOCK_thd_data);
+}
+
+
/*
Adjust the position pointer in the binary log file for all running slaves
@@ -527,61 +544,48 @@ static enum enum_binlog_checksum_alg get_binlog_checksum_value_at_connect(THD *
Now they sync is done for next read.
*/
-void adjust_linfo_offsets(my_off_t purge_offset)
+static my_bool adjust_callback(THD *thd, my_off_t *purge_offset)
{
- THD *tmp;
-
- mysql_mutex_lock(&LOCK_thread_count);
- I_List_iterator<THD> it(threads);
-
- while ((tmp=it++))
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (auto linfo= thd->current_linfo)
{
- LOG_INFO* linfo;
- if ((linfo = tmp->current_linfo))
- {
- mysql_mutex_lock(&linfo->lock);
- /*
- Index file offset can be less that purge offset only if
- we just started reading the index file. In that case
- we have nothing to adjust
- */
- if (linfo->index_file_offset < purge_offset)
- linfo->fatal = (linfo->index_file_offset != 0);
- else
- linfo->index_file_offset -= purge_offset;
- mysql_mutex_unlock(&linfo->lock);
- }
+ /*
+ Index file offset can be less that purge offset only if
+ we just started reading the index file. In that case
+ we have nothing to adjust
+ */
+ if (linfo->index_file_offset < *purge_offset)
+ linfo->fatal= (linfo->index_file_offset != 0);
+ else
+ linfo->index_file_offset-= *purge_offset;
}
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ return 0;
}
-bool log_in_use(const char* log_name)
+void adjust_linfo_offsets(my_off_t purge_offset)
{
- size_t log_name_len = strlen(log_name) + 1;
- THD *tmp;
- bool result = 0;
-
- mysql_mutex_lock(&LOCK_thread_count);
- I_List_iterator<THD> it(threads);
+ server_threads.iterate(adjust_callback, &purge_offset);
+}
- while ((tmp=it++))
- {
- LOG_INFO* linfo;
- if ((linfo = tmp->current_linfo))
- {
- mysql_mutex_lock(&linfo->lock);
- result = !strncmp(log_name, linfo->log_file_name, log_name_len);
- mysql_mutex_unlock(&linfo->lock);
- if (result)
- break;
- }
- }
- mysql_mutex_unlock(&LOCK_thread_count);
+static my_bool log_in_use_callback(THD *thd, const char *log_name)
+{
+ my_bool result= 0;
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ if (auto linfo= thd->current_linfo)
+ result= !strcmp(log_name, linfo->log_file_name);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
return result;
}
+
+bool log_in_use(const char* log_name)
+{
+ return server_threads.iterate(log_in_use_callback, log_name);
+}
+
bool purge_error_message(THD* thd, int res)
{
uint errcode;
@@ -863,44 +867,88 @@ static int send_heartbeat_event(binlog_send_info *info,
struct binlog_file_entry
{
binlog_file_entry *next;
- char *name;
+ LEX_CSTRING name;
+ my_off_t size;
};
+/**
+ Read all binary logs and return as a list
+
+ @param memroot Use this for mem_root calls
+ @param reverse If set filenames returned in latest first order (reverse
+ order than in the index file)
+ @param already_locked If set, index file is already locked.
+
+ @return 0 error
+ # pointer to list
+
+ @notes
+ index_file is always unlocked at return
+*/
+
static binlog_file_entry *
-get_binlog_list(MEM_ROOT *memroot)
+get_binlog_list(MEM_ROOT *memroot, bool reverse= true,
+ bool already_locked= false)
{
IO_CACHE *index_file;
- char fname[FN_REFLEN];
- size_t length;
- binlog_file_entry *current_list= NULL, *e;
+ char *fname, *buff, *end_pos;
+ binlog_file_entry *current_list= NULL, *current_link= NULL, *e;
DBUG_ENTER("get_binlog_list");
if (!mysql_bin_log.is_open())
{
+ if (already_locked)
+ mysql_bin_log.unlock_index();
my_error(ER_NO_BINARY_LOGGING, MYF(0));
DBUG_RETURN(NULL);
}
-
- mysql_bin_log.lock_index();
+ if (!already_locked)
+ mysql_bin_log.lock_index();
index_file=mysql_bin_log.get_index_file();
reinit_io_cache(index_file, READ_CACHE, (my_off_t) 0, 0, 0);
+ if (!(buff= (char*) alloc_root(memroot,
+ (size_t) (index_file->end_of_file+1))))
+ goto err;
+ if (my_b_read(index_file, (uchar*) buff, (size_t) index_file->end_of_file))
+ {
+ my_error(EE_READ, MYF(ME_ERROR_LOG), my_filename(index_file->file),
+ my_errno);
+ goto err;
+ }
+ buff[index_file->end_of_file]= 0; // For strchr
+ mysql_bin_log.unlock_index();
+
/* The file ends with EOF or empty line */
- while ((length=my_b_gets(index_file, fname, sizeof(fname))) > 1)
+ for (fname= buff;
+ (end_pos= strchr(fname, '\n')) && (end_pos - fname) > 1;
+ fname= end_pos+1)
{
- --length; /* Remove the newline */
- if (!(e= (binlog_file_entry *)alloc_root(memroot, sizeof(*e))) ||
- !(e->name= strmake_root(memroot, fname, length)))
- {
- mysql_bin_log.unlock_index();
+ end_pos[0]= '\0'; // remove the newline
+ if (!(e= (binlog_file_entry *) alloc_root(memroot, sizeof(*e))))
DBUG_RETURN(NULL);
+ if (reverse)
+ {
+ e->next= current_list;
+ current_list= e;
+ }
+ else
+ {
+ e->next= NULL;
+ if (!current_link)
+ current_list= e;
+ else
+ current_link->next= e;
+ current_link= e;
}
- e->next= current_list;
- current_list= e;
+ e->name.str= fname;
+ e->name.length= (size_t) (end_pos - fname);
}
- mysql_bin_log.unlock_index();
-
DBUG_RETURN(current_list);
+
+err:
+ mysql_bin_log.unlock_index();
+ DBUG_RETURN(0);
}
@@ -1225,8 +1273,7 @@ gtid_find_binlog_file(slave_connection_state *state, char *out_name,
char buf[FN_REFLEN];
init_alloc_root(&memroot, "gtid_find_binlog_file",
- 10*(FN_REFLEN+sizeof(binlog_file_entry)),
- 0, MYF(MY_THREAD_SPECIFIC));
+ 8192, 0, MYF(MY_THREAD_SPECIFIC));
if (!(list= get_binlog_list(&memroot)))
{
errormsg= "Out of memory while looking for GTID position in binlog";
@@ -1252,7 +1299,7 @@ gtid_find_binlog_file(slave_connection_state *state, char *out_name,
Read the Gtid_list_log_event at the start of the binlog file to
get the binlog state.
*/
- if (normalize_binlog_name(buf, list->name, false))
+ if (normalize_binlog_name(buf, list->name.str, false))
{
errormsg= "Failed to determine binlog file name while looking for "
"GTID position in binlog";
@@ -1947,7 +1994,7 @@ send_event_to_slave(binlog_send_info *info, Log_event_type event_type,
pos= my_b_tell(log);
if (repl_semisync_master.update_sync_header(info->thd,
- (uchar*) packet->c_ptr(),
+ (uchar*) packet->ptr(),
info->log_file_name + info->dirlen,
pos, &need_sync))
{
@@ -2138,9 +2185,8 @@ static int init_binlog_sender(binlog_send_info *info,
// set current pos too
linfo->pos= *pos;
-
// note: publish that we use file, before we open it
- thd->current_linfo= linfo;
+ thd->set_current_linfo(linfo);
if (check_start_offset(info, linfo->log_file_name, *pos))
return 1;
@@ -2378,14 +2424,15 @@ static int send_format_descriptor_event(binlog_send_info *info, IO_CACHE *log,
DBUG_RETURN(0);
}
-static bool should_stop(binlog_send_info *info)
+static bool should_stop(binlog_send_info *info, bool kill_server_check= false)
{
return
- info->net->error ||
- info->net->vio == NULL ||
- info->thd->killed ||
- info->error != 0 ||
- info->should_stop;
+ info->net->error ||
+ info->net->vio == NULL ||
+ (info->thd->killed &&
+ (info->thd->killed != KILL_SERVER || kill_server_check)) ||
+ info->error != 0 ||
+ info->should_stop;
}
/**
@@ -2406,7 +2453,7 @@ static int wait_new_events(binlog_send_info *info, /* in */
&stage_master_has_sent_all_binlog_to_slave,
&old_stage);
- while (!should_stop(info))
+ while (!should_stop(info, true))
{
*end_pos_ptr= mysql_bin_log.get_binlog_end_pos(binlog_end_pos_filename);
if (strcmp(linfo->log_file_name, binlog_end_pos_filename) != 0)
@@ -2758,6 +2805,14 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos,
info->error= ER_UNKNOWN_ERROR;
goto err;
}
+ DBUG_EXECUTE_IF("simulate_delay_at_shutdown",
+ {
+ const char act[]=
+ "now "
+ "WAIT_FOR greetings_from_kill_mysql";
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
/*
heartbeat_period from @master_heartbeat_period user variable
@@ -3369,31 +3424,42 @@ err:
slave_server_id the slave's server id
*/
-void kill_zombie_dump_threads(uint32 slave_server_id)
+struct kill_callback_arg
{
- mysql_mutex_lock(&LOCK_thread_count);
- I_List_iterator<THD> it(threads);
- THD *tmp;
+ kill_callback_arg(uint32 id): slave_server_id(id), thd(0) {}
+ uint32 slave_server_id;
+ THD *thd;
+};
- while ((tmp=it++))
+static my_bool kill_callback(THD *thd, kill_callback_arg *arg)
+{
+ if (thd->get_command() == COM_BINLOG_DUMP &&
+ thd->variables.server_id == arg->slave_server_id)
{
- if (tmp->get_command() == COM_BINLOG_DUMP &&
- tmp->variables.server_id == slave_server_id)
- {
- mysql_mutex_lock(&tmp->LOCK_thd_kill); // Lock from delete
- break;
- }
+ arg->thd= thd;
+ if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data);
+ mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete
+ return 1;
}
- mysql_mutex_unlock(&LOCK_thread_count);
- if (tmp)
+ return 0;
+}
+
+
+void kill_zombie_dump_threads(uint32 slave_server_id)
+{
+ kill_callback_arg arg(slave_server_id);
+ server_threads.iterate(kill_callback, &arg);
+
+ if (arg.thd)
{
/*
Here we do not call kill_one_thread() as
it will be slow because it will iterate through the list
again. We just to do kill the thread ourselves.
*/
- tmp->awake_no_mutex(KILL_SLAVE_SAME_ID);
- mysql_mutex_unlock(&tmp->LOCK_thd_kill);
+ arg.thd->awake_no_mutex(KILL_SLAVE_SAME_ID);
+ mysql_mutex_unlock(&arg.thd->LOCK_thd_kill);
+ if (WSREP(arg.thd)) mysql_mutex_unlock(&arg.thd->LOCK_thd_data);
}
}
@@ -3841,11 +3907,21 @@ int reset_master(THD* thd, rpl_gtid *init_state, uint32 init_state_len,
if (!mysql_bin_log.is_open())
{
my_message(ER_FLUSH_MASTER_BINLOG_CLOSED,
- ER_THD(thd, ER_FLUSH_MASTER_BINLOG_CLOSED),
- MYF(ME_BELL+ME_WAITTANG));
+ ER_THD(thd, ER_FLUSH_MASTER_BINLOG_CLOSED), MYF(0));
return 1;
}
+#ifdef WITH_WSREP
+ if (WSREP_ON)
+ {
+ /* RESET MASTER will initialize GTID sequence, and that would happen locally
+ in this node, so better reject it
+ */
+ my_message(ER_NOT_ALLOWED_COMMAND,
+ "RESET MASTER not allowed when node is in cluster", MYF(0));
+ return 1;
+ }
+#endif /* WITH_WSREP */
bool ret= 0;
/* Temporarily disable master semisync before reseting master. */
repl_semisync_master.before_reset_master();
@@ -3944,7 +4020,7 @@ bool mysql_show_binlog_events(THD* thd)
goto err;
}
- thd->current_linfo= &linfo;
+ thd->set_current_linfo(&linfo);
if ((file=open_binlog(&log, linfo.log_file_name, &errmsg)) < 0)
goto err;
@@ -4161,17 +4237,25 @@ void show_binlogs_get_fields(THD *thd, List<Item> *field_list)
@retval FALSE success
@retval TRUE failure
+
+ @notes
+ We only keep the index locked while reading all file names as
+ if there are 1000+ binary logs, there can be a serious impact
+ as getting the file sizes can take some notable time (up to 20 seconds
+ has been reported) and we don't want to block log rotations for that long.
*/
+
+#define BINLOG_INDEX_RETRY_COUNT 5
+
bool show_binlogs(THD* thd)
{
- IO_CACHE *index_file;
LOG_INFO cur;
- File file;
- char fname[FN_REFLEN];
+ MEM_ROOT mem_root;
+ binlog_file_entry *list;
List<Item> field_list;
- size_t length;
- size_t cur_dir_len;
Protocol *protocol= thd->protocol;
+ uint retry_count= 0;
+ size_t cur_dir_len;
DBUG_ENTER("show_binlogs");
if (!mysql_bin_log.is_open())
@@ -4185,55 +4269,71 @@ bool show_binlogs(THD* thd)
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
-
+
+ init_alloc_root(&mem_root, "binlog_file_list", 8192, 0,
+ MYF(MY_THREAD_SPECIFIC));
+retry:
+ /*
+ The current mutex handling here is to ensure we get the current log position
+ and all the log files from the index in sync without any index rotation
+ in between.
+ */
mysql_mutex_lock(mysql_bin_log.get_log_lock());
mysql_bin_log.lock_index();
- index_file=mysql_bin_log.get_index_file();
+ mysql_bin_log.raw_get_current_log(&cur);
+ mysql_mutex_unlock(mysql_bin_log.get_log_lock());
- mysql_bin_log.raw_get_current_log(&cur); // dont take mutex
- mysql_mutex_unlock(mysql_bin_log.get_log_lock()); // lockdep, OK
-
- cur_dir_len= dirname_length(cur.log_file_name);
+ /* The following call unlocks lock_index */
+ if ((!(list= get_binlog_list(&mem_root, false, true))))
+ goto err;
- reinit_io_cache(index_file, READ_CACHE, (my_off_t) 0, 0, 0);
+ DEBUG_SYNC(thd, "at_after_lock_index");
- /* The file ends with EOF or empty line */
- while ((length=my_b_gets(index_file, fname, sizeof(fname))) > 1)
+ // the 1st loop computes the sizes; If stat() fails, then retry
+ cur_dir_len= dirname_length(cur.log_file_name);
+ for (binlog_file_entry *cur_link= list; cur_link; cur_link= cur_link->next)
{
- size_t dir_len;
- ulonglong file_length= 0; // Length if open fails
- fname[--length] = '\0'; // remove the newline
+ const char *fname= cur_link->name.str;
+ size_t dir_len= dirname_length(fname);
+ size_t length= cur_link->name.length- dir_len;
- protocol->prepare_for_resend();
- dir_len= dirname_length(fname);
- length-= dir_len;
- protocol->store(fname + dir_len, length, &my_charset_bin);
+ /* Skip directory name as we shouldn't include this in the result */
+ cur_link->name.str+= dir_len;
+ cur_link->name.length-= dir_len;
if (!(strncmp(fname+dir_len, cur.log_file_name+cur_dir_len, length)))
- file_length= cur.pos; /* The active log, use the active position */
+ cur_link->size= cur.pos; /* The active log, use the active position */
else
{
- /* this is an old log, open it and find the size */
- if ((file= mysql_file_open(key_file_binlog,
- fname, O_RDONLY | O_SHARE | O_BINARY,
- MYF(0))) >= 0)
+ MY_STAT stat_info;
+ if (mysql_file_stat(key_file_binlog, fname, &stat_info, MYF(0)))
+ cur_link->size= stat_info.st_size;
+ else
{
- file_length= (ulonglong) mysql_file_seek(file, 0L, MY_SEEK_END, MYF(0));
- mysql_file_close(file, MYF(0));
+ if (retry_count++ < BINLOG_INDEX_RETRY_COUNT)
+ {
+ free_root(&mem_root, MYF(MY_MARK_BLOCKS_FREE));
+ goto retry;
+ }
+ cur_link->size= 0;
}
}
- protocol->store(file_length);
+ }
+
+ for (binlog_file_entry *cur_link= list; cur_link; cur_link= cur_link->next)
+ {
+ protocol->prepare_for_resend();
+ protocol->store(cur_link->name.str, cur_link->name.length, &my_charset_bin);
+ protocol->store((ulonglong) cur_link->size);
if (protocol->write())
goto err;
}
- if (unlikely(index_file->error == -1))
- goto err;
- mysql_bin_log.unlock_index();
+ free_root(&mem_root, MYF(0));
my_eof(thd);
DBUG_RETURN(FALSE);
err:
- mysql_bin_log.unlock_index();
+ free_root(&mem_root, MYF(0));
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_repl.h b/sql/sql_repl.h
index 8d9a127bca7..18aba0c9623 100644
--- a/sql/sql_repl.h
+++ b/sql/sql_repl.h
@@ -21,17 +21,6 @@
#ifdef HAVE_REPLICATION
#include "slave.h"
-typedef struct st_slave_info
-{
- uint32 server_id;
- uint32 rpl_recovery_rank, master_id;
- char host[HOSTNAME_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
- char user[USERNAME_LENGTH+1];
- char password[MAX_PASSWORD_LENGTH*SYSTEM_CHARSET_MBMAXLEN+1];
- uint16 port;
- THD* thd;
-} SLAVE_INFO;
-
struct slave_connection_state;
extern my_bool opt_show_slave_auth_info;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 7b98a235ab0..9e5e4bf4ee5 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -64,6 +64,10 @@
#include "sys_vars_shared.h"
#include "sp_head.h"
#include "sp_rcontext.h"
+#include "rowid_filter.h"
+#include "select_handler.h"
+#include "my_json_writer.h"
+#include "opt_trace.h"
/*
A key part number that means we're using a fulltext scan.
@@ -100,6 +104,9 @@ static int sort_keyuse(KEYUSE *a,KEYUSE *b);
static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables);
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
bool allow_full_scan, table_map used_tables);
+static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
+ TABLE *table,
+ const key_map *keys,ha_rows limit);
void best_access_path(JOIN *join, JOIN_TAB *s,
table_map remaining_tables, uint idx,
bool disable_jbuf, double record_count,
@@ -114,6 +121,7 @@ static bool best_extension_by_limited_search(JOIN *join,
double read_time, uint depth,
uint prune_level,
uint use_cond_selectivity);
+void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables);
static uint determine_search_depth(JOIN* join);
C_MODE_START
static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2);
@@ -151,7 +159,8 @@ static COND *build_equal_items(JOIN *join, COND *cond,
static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
COND *cond,
COND_EQUAL *cond_equal,
- void *table_join_idx);
+ void *table_join_idx,
+ bool do_substitution);
static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list,
COND *conds, bool top, bool in_sj);
static bool check_interleaving_with_nj(JOIN_TAB *next);
@@ -345,6 +354,36 @@ bool dbug_user_var_equals_int(THD *thd, const char *name, int value)
}
#endif
+static void trace_table_dependencies(THD *thd,
+ JOIN_TAB *join_tabs, uint table_count)
+{
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_array trace_dep(thd, "table_dependencies");
+ for (uint i= 0; i < table_count; i++)
+ {
+ TABLE_LIST *table_ref= join_tabs[i].tab_list;
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(&join_tabs[i]);
+ trace_one_table.add("row_may_be_null",
+ (bool)table_ref->table->maybe_null);
+ const table_map map= table_ref->get_map();
+ DBUG_ASSERT(map < (1ULL << table_count));
+ for (uint j= 0; j < table_count; j++)
+ {
+ if (map & (1ULL << j))
+ {
+ trace_one_table.add("map_bit", static_cast<longlong>(j));
+ break;
+ }
+ }
+ Json_writer_array depends_on(thd, "depends_on_map_bits");
+ Table_map_iterator it(join_tabs[i].dependent);
+ uint dep_bit;
+ while ((dep_bit= it++) != Table_map_iterator::BITMAP_END)
+ depends_on.add(static_cast<longlong>(dep_bit));
+ }
+}
+
/**
This handles SELECT with and without UNION.
@@ -354,7 +393,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
ulong setup_tables_done_option)
{
bool res;
- SELECT_LEX *select_lex = &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
DBUG_ENTER("handle_select");
MYSQL_SELECT_START(thd->query());
@@ -720,26 +759,173 @@ void vers_select_conds_t::print(String *str, enum_query_type query_type) const
}
}
-int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
+static
+Item* period_get_condition(THD *thd, TABLE_LIST *table, SELECT_LEX *select,
+ vers_select_conds_t *conds, bool timestamp)
{
- DBUG_ENTER("SELECT_LEX::vers_setup_cond");
+ DBUG_ASSERT(table);
+ DBUG_ASSERT(table->table);
#define newx new (thd->mem_root)
+ TABLE_SHARE *share= table->table->s;
+ const TABLE_SHARE::period_info_t *period= conds->period;
+
+ const LEX_CSTRING &fstart= period->start_field(share)->field_name;
+ const LEX_CSTRING &fend= period->end_field(share)->field_name;
+
+ conds->field_start= newx Item_field(thd, &select->context,
+ table->db.str, table->alias.str,
+ thd->make_clex_string(fstart));
+ conds->field_end= newx Item_field(thd, &select->context,
+ table->db.str, table->alias.str,
+ thd->make_clex_string(fend));
+
+ Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL;
+ if (timestamp)
+ {
+ MYSQL_TIME max_time;
+ switch (conds->type)
+ {
+ case SYSTEM_TIME_UNSPECIFIED:
+ thd->variables.time_zone->gmt_sec_to_TIME(&max_time, TIMESTAMP_MAX_VALUE);
+ max_time.second_part= TIME_MAX_SECOND_PART;
+ curr= newx Item_datetime_literal(thd, &max_time, TIME_SECOND_PART_DIGITS);
+ cond1= newx Item_func_eq(thd, conds->field_end, curr);
+ break;
+ case SYSTEM_TIME_AS_OF:
+ cond1= newx Item_func_le(thd, conds->field_start, conds->start.item);
+ cond2= newx Item_func_gt(thd, conds->field_end, conds->start.item);
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ cond1= newx Item_func_lt(thd, conds->field_start, conds->end.item);
+ cond2= newx Item_func_gt(thd, conds->field_end, conds->start.item);
+ cond3= newx Item_func_lt(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BETWEEN:
+ cond1= newx Item_func_le(thd, conds->field_start, conds->end.item);
+ cond2= newx Item_func_gt(thd, conds->field_end, conds->start.item);
+ cond3= newx Item_func_le(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BEFORE:
+ cond1= newx Item_func_lt(thd, conds->field_end, conds->start.item);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(table->table->s && table->table->s->db_plugin);
- TABLE_LIST *table;
+ Item *trx_id0= conds->start.item;
+ Item *trx_id1= conds->end.item;
+ if (conds->start.item && conds->start.unit == VERS_TIMESTAMP)
+ {
+ bool backwards= conds->type != SYSTEM_TIME_AS_OF;
+ trx_id0= newx Item_func_trt_id(thd, conds->start.item,
+ TR_table::FLD_TRX_ID, backwards);
+ }
+ if (conds->end.item && conds->end.unit == VERS_TIMESTAMP)
+ {
+ trx_id1= newx Item_func_trt_id(thd, conds->end.item,
+ TR_table::FLD_TRX_ID, false);
+ }
- if (!thd->stmt_arena->is_conventional() &&
- !thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
+ switch (conds->type)
+ {
+ case SYSTEM_TIME_UNSPECIFIED:
+ curr= newx Item_int(thd, ULONGLONG_MAX);
+ cond1= newx Item_func_eq(thd, conds->field_end, curr);
+ DBUG_ASSERT(!conds->start.item);
+ DBUG_ASSERT(!conds->end.item);
+ break;
+ case SYSTEM_TIME_AS_OF:
+ cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id0, conds->field_start);
+ cond2= newx Item_func_trt_trx_sees(thd, conds->field_end, trx_id0);
+ DBUG_ASSERT(!conds->end.item);
+ break;
+ case SYSTEM_TIME_FROM_TO:
+ cond1= newx Item_func_trt_trx_sees(thd, trx_id1, conds->field_start);
+ cond2= newx Item_func_trt_trx_sees_eq(thd, conds->field_end, trx_id0);
+ cond3= newx Item_func_lt(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BETWEEN:
+ cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id1, conds->field_start);
+ cond2= newx Item_func_trt_trx_sees_eq(thd, conds->field_end, trx_id0);
+ cond3= newx Item_func_le(thd, conds->start.item, conds->end.item);
+ break;
+ case SYSTEM_TIME_BEFORE:
+ cond1= newx Item_func_trt_trx_sees(thd, trx_id0, conds->field_end);
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+
+ if (cond1)
{
- // statement is already prepared
- DBUG_RETURN(0);
+ cond1= and_items(thd, cond2, cond1);
+ cond1= and_items(thd, cond3, cond1);
+ }
+ return cond1;
+#undef newx
+}
+
+static
+bool skip_setup_conds(THD *thd)
+{
+ return (!thd->stmt_arena->is_conventional()
+ && !thd->stmt_arena->is_stmt_prepare_or_first_sp_execute())
+ || thd->lex->is_view_context_analysis();
+}
+
+Item* SELECT_LEX::period_setup_conds(THD *thd, TABLE_LIST *tables, Item *where)
+{
+ DBUG_ENTER("SELECT_LEX::period_setup_conds");
+
+ if (skip_setup_conds(thd))
+ DBUG_RETURN(where);
+
+ Query_arena backup;
+ Query_arena *arena= thd->activate_stmt_arena_if_needed(&backup);
+
+ DBUG_ASSERT(!tables->next_local && tables->table);
+
+ Item *result= NULL;
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
+ {
+ if (!table->table)
+ continue;
+ vers_select_conds_t &conds= table->period_conditions;
+ if (!table->table->s->period.name.streq(conds.name))
+ {
+ my_error(ER_PERIOD_NOT_FOUND, MYF(0), conds.name.str);
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+ DBUG_RETURN(NULL);
+ }
+
+ conds.period= &table->table->s->period;
+ result= and_items(thd, result,
+ period_get_condition(thd, table, this, &conds, true));
}
+ result= and_items(thd, where, result);
+
+ if (arena)
+ thd->restore_active_arena(arena, &backup);
+
+ DBUG_RETURN(result);
+}
+
+int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
+{
+ DBUG_ENTER("SELECT_LEX::vers_setup_conds");
- if (thd->lex->is_view_context_analysis())
+ if (skip_setup_conds(thd))
DBUG_RETURN(0);
if (!versioned_tables)
{
- for (table= tables; table; table= table->next_local)
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (table->table && table->table->versioned())
versioned_tables++;
@@ -779,7 +965,7 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
}
}
- for (table= tables; table; table= table->next_local)
+ for (TABLE_LIST *table= tables; table; table= table->next_local)
{
if (!table->table || !table->table->versioned())
continue;
@@ -825,16 +1011,6 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
lock_type= TL_READ; // ignore TL_WRITE, history is immutable anyway
}
- const LEX_CSTRING *fstart=
- thd->make_clex_string(table->table->vers_start_field()->field_name);
- const LEX_CSTRING *fend=
- thd->make_clex_string(table->table->vers_end_field()->field_name);
-
- Item *row_start=
- newx Item_field(thd, &this->context, table->db.str, table->alias.str, fstart);
- Item *row_end=
- newx Item_field(thd, &this->context, table->db.str, table->alias.str, fend);
-
bool timestamps_only= table->table->versioned(VERS_TIMESTAMP);
if (vers_conditions.is_set())
@@ -854,101 +1030,16 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables)
}
}
- Item *cond1= NULL, *cond2= NULL, *cond3= NULL, *curr= NULL;
- Item *point_in_time1= vers_conditions.start.item;
- Item *point_in_time2= vers_conditions.end.item;
- TABLE *t= table->table;
- if (t->versioned(VERS_TIMESTAMP))
- {
- MYSQL_TIME max_time;
- switch (vers_conditions.type)
- {
- case SYSTEM_TIME_UNSPECIFIED:
- thd->variables.time_zone->gmt_sec_to_TIME(&max_time, TIMESTAMP_MAX_VALUE);
- max_time.second_part= TIME_MAX_SECOND_PART;
- curr= newx Item_datetime_literal(thd, &max_time, TIME_SECOND_PART_DIGITS);
- cond1= newx Item_func_eq(thd, row_end, curr);
- break;
- case SYSTEM_TIME_AS_OF:
- cond1= newx Item_func_le(thd, row_start, point_in_time1);
- cond2= newx Item_func_gt(thd, row_end, point_in_time1);
- break;
- case SYSTEM_TIME_FROM_TO:
- cond1= newx Item_func_lt(thd, row_start, point_in_time2);
- cond2= newx Item_func_gt(thd, row_end, point_in_time1);
- cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2);
- break;
- case SYSTEM_TIME_BETWEEN:
- cond1= newx Item_func_le(thd, row_start, point_in_time2);
- cond2= newx Item_func_gt(thd, row_end, point_in_time1);
- cond3= newx Item_func_le(thd, point_in_time1, point_in_time2);
- break;
- case SYSTEM_TIME_BEFORE:
- cond1= newx Item_func_lt(thd, row_end, point_in_time1);
- break;
- default:
- DBUG_ASSERT(0);
- }
- }
- else
- {
- DBUG_ASSERT(table->table->s && table->table->s->db_plugin);
-
- Item *trx_id0, *trx_id1;
-
- switch (vers_conditions.type)
- {
- case SYSTEM_TIME_UNSPECIFIED:
- curr= newx Item_int(thd, ULONGLONG_MAX);
- cond1= newx Item_func_eq(thd, row_end, curr);
- break;
- case SYSTEM_TIME_AS_OF:
- trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID)
- : point_in_time1;
- cond1= newx Item_func_trt_trx_sees_eq(thd, trx_id0, row_start);
- cond2= newx Item_func_trt_trx_sees(thd, row_end, trx_id0);
- break;
- case SYSTEM_TIME_FROM_TO:
- cond3= newx Item_func_lt(thd, point_in_time1, point_in_time2);
- /* fall through */
- case SYSTEM_TIME_BETWEEN:
- trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true)
- : point_in_time1;
- trx_id1= vers_conditions.end.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time2, TR_table::FLD_TRX_ID, false)
- : point_in_time2;
- cond1= vers_conditions.type == SYSTEM_TIME_FROM_TO
- ? newx Item_func_trt_trx_sees(thd, trx_id1, row_start)
- : newx Item_func_trt_trx_sees_eq(thd, trx_id1, row_start);
- cond2= newx Item_func_trt_trx_sees_eq(thd, row_end, trx_id0);
- if (!cond3)
- cond3= newx Item_func_le(thd, point_in_time1, point_in_time2);
- break;
- case SYSTEM_TIME_BEFORE:
- trx_id0= vers_conditions.start.unit == VERS_TIMESTAMP
- ? newx Item_func_trt_id(thd, point_in_time1, TR_table::FLD_TRX_ID, true)
- : point_in_time1;
- cond1= newx Item_func_trt_trx_sees(thd, trx_id0, row_end);
- break;
- default:
- DBUG_ASSERT(0);
- }
- }
-
- if (cond1)
- {
- cond1= and_items(thd, cond2, cond1);
- cond1= and_items(thd, cond3, cond1);
- table->on_expr= and_items(thd, table->on_expr, cond1);
- }
-
+ vers_conditions.period = &table->table->s->vers;
+ Item *cond= period_get_condition(thd, table, this, &vers_conditions,
+ timestamps_only);
+ if (cond)
+ table->on_expr= and_items(thd, table->on_expr, cond);
table->vers_conditions.type= SYSTEM_TIME_ALL;
+
} // for (table= tables; ...)
DBUG_RETURN(0);
-#undef newx
}
/*****************************************************************************
@@ -994,6 +1085,11 @@ JOIN::prepare(TABLE_LIST *tables_init,
join_list= &select_lex->top_join_list;
union_part= unit_arg->is_unit_op();
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_prepare(thd, "join_preparation");
+ trace_prepare.add_select_number(select_lex->select_number);
+ Json_writer_array trace_steps(thd, "steps");
+
// simple check that we got usable conds
dbug_print_item(conds);
@@ -1051,7 +1147,7 @@ JOIN::prepare(TABLE_LIST *tables_init,
while ((select_el= select_it++))
{
- if (select_el->with_sum_func)
+ if (select_el->with_sum_func())
found_sum_func_elem= true;
if (select_el->with_field)
found_field_elem= true;
@@ -1219,14 +1315,14 @@ JOIN::prepare(TABLE_LIST *tables_init,
item->max_length)))
real_order= TRUE;
- if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
+ if (item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM)
item->split_sum_func(thd, ref_ptrs, all_fields, 0);
}
if (!real_order)
order= NULL;
}
- if (having && having->with_sum_func)
+ if (having && having->with_sum_func())
having->split_sum_func2(thd, ref_ptrs, all_fields,
&having, SPLIT_SUM_SKIP_REGISTERED);
if (select_lex->inner_sum_func_list)
@@ -1336,6 +1432,11 @@ JOIN::prepare(TABLE_LIST *tables_init,
}
}
+ {
+ Json_writer_object trace_wrapper(thd);
+ opt_trace_print_expanded_query(thd, select_lex, &trace_wrapper);
+ }
+
if (!procedure && result && result->prepare(fields_list, unit_arg))
goto err; /* purecov: inspected */
@@ -1396,6 +1497,7 @@ err:
bool JOIN::build_explain()
{
+ DBUG_ENTER("JOIN::build_explain");
create_explain_query_if_not_exists(thd->lex, thd->mem_root);
have_query_plan= QEP_AVAILABLE;
@@ -1413,8 +1515,7 @@ bool JOIN::build_explain()
thd->mem_root= old_mem_root;
DBUG_ASSERT(thd->free_list == old_free_list); // no Items were created
if (res)
- return 1;
-
+ DBUG_RETURN(1);
uint select_nr= select_lex->select_number;
JOIN_TAB *curr_tab= join_tab + exec_join_tab_cnt();
for (uint i= 0; i < aggr_tables; i++, curr_tab++)
@@ -1432,7 +1533,7 @@ bool JOIN::build_explain()
get_using_temporary_read_tracker();
}
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -1440,7 +1541,16 @@ int JOIN::optimize()
{
int res= 0;
join_optimization_state init_state= optimization_state;
- if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
+ if (select_lex->pushdown_select)
+ {
+ if (!(select_options & SELECT_DESCRIBE))
+ {
+ /* Prepare to execute the query pushed into a foreign engine */
+ res= select_lex->pushdown_select->init();
+ }
+ with_two_phase_optimization= false;
+ }
+ else if (optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE)
res= optimize_stage2();
else
{
@@ -1461,6 +1571,133 @@ int JOIN::optimize()
}
+/**
+ @brief
+ Create range filters objects needed in execution for all join tables
+
+ @details
+ For each join table from the chosen execution plan such that a range filter
+ is used when joining this table the function creates a Rowid_filter object
+ for this range filter. In order to do this the function first constructs
+ a quick select to scan the range for this range filter. Then it creates
+ a container for the range filter and finally constructs a Range_rowid_filter
+ object a pointer to which is set in the field JOIN_TAB::rowid_filter of
+ the joined table.
+
+ @retval false always
+*/
+
+bool JOIN::make_range_rowid_filters()
+{
+ DBUG_ENTER("make_range_rowid_filters");
+
+ /*
+ Do not build range filters with detected impossible WHERE.
+ Anyway conditions cannot be used anymore to extract ranges for filters.
+ */
+ if (const_table_map != found_const_table_map)
+ DBUG_RETURN(0);
+
+ JOIN_TAB *tab;
+
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab;
+ tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ {
+ if (!tab->range_rowid_filter_info)
+ continue;
+ int err;
+ SQL_SELECT *sel= NULL;
+ Rowid_filter_container *filter_container= NULL;
+ Item **sargable_cond= get_sargable_cond(this, tab->table);
+ sel= make_select(tab->table, const_table_map, const_table_map,
+ *sargable_cond, (SORT_INFO*) 0, 1, &err);
+ if (!sel)
+ continue;
+
+ key_map filter_map;
+ filter_map.clear_all();
+ filter_map.set_bit(tab->range_rowid_filter_info->key_no);
+ filter_map.merge(tab->table->with_impossible_ranges);
+ bool force_index_save= tab->table->force_index;
+ tab->table->force_index= true;
+ int rc= sel->test_quick_select(thd, filter_map, (table_map) 0,
+ (ha_rows) HA_POS_ERROR,
+ true, false, true, true);
+ tab->table->force_index= force_index_save;
+ if (thd->is_error())
+ goto no_filter;
+ /*
+ If SUBS_IN_TO_EXISTS strtrategy is chosen for the subquery then
+ additional conditions are injected into WHERE/ON/HAVING and it may
+ happen that the call of test_quick_select() discovers impossible range.
+ */
+ if (rc == -1)
+ {
+ const_table_map|= tab->table->map;
+ goto no_filter;
+ }
+ DBUG_ASSERT(sel->quick);
+ filter_container=
+ tab->range_rowid_filter_info->create_container();
+ if (filter_container)
+ {
+ tab->rowid_filter=
+ new (thd->mem_root) Range_rowid_filter(tab->table,
+ tab->range_rowid_filter_info,
+ filter_container, sel);
+ if (tab->rowid_filter)
+ continue;
+ }
+ no_filter:
+ if (sel->quick)
+ delete sel->quick;
+ delete sel;
+ }
+
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief
+ Allocate memory the rowid containers of the used the range filters
+
+ @details
+ For each join table from the chosen execution plan such that a range filter
+ is used when joining this table the function allocate memory for the
+ rowid container employed by the filter. On success it lets the table engine
+ know that what rowid filter will be used when accessing the table rows.
+
+ @retval false always
+*/
+
+bool
+JOIN::init_range_rowid_filters()
+{
+ DBUG_ENTER("init_range_rowid_filters");
+
+ JOIN_TAB *tab;
+
+ for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
+ tab;
+ tab= next_linear_tab(this, tab, WITH_BUSH_ROOTS))
+ {
+ if (!tab->rowid_filter)
+ continue;
+ if (tab->rowid_filter->get_container()->alloc())
+ {
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ continue;
+ }
+ tab->table->file->rowid_filter_push(tab->rowid_filter);
+ tab->is_rowid_filter_built= false;
+ }
+ DBUG_RETURN(0);
+}
+
+
int JOIN::init_join_caches()
{
JOIN_TAB *tab;
@@ -1517,6 +1754,11 @@ JOIN::optimize_inner()
set_allowed_join_cache_types();
need_distinct= TRUE;
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_prepare(thd, "join_optimization");
+ trace_prepare.add_select_number(select_lex->select_number);
+ Json_writer_array trace_steps(thd, "steps");
+
/*
Needed in case optimizer short-cuts,
set properly in make_aggr_tables_info()
@@ -1597,7 +1839,7 @@ JOIN::optimize_inner()
{
/*
Item_cond_and can't be fixed after creation, so we do not check
- conds->fixed
+ conds->is_fixed()
*/
conds->fix_fields(thd, &conds);
conds->change_ref_to_fields(thd, tables_list);
@@ -1641,7 +1883,7 @@ JOIN::optimize_inner()
if (arena)
thd->restore_active_arena(arena, &backup);
}
-
+
if (optimize_constant_subqueries())
DBUG_RETURN(1);
@@ -1652,9 +1894,28 @@ JOIN::optimize_inner()
(void) having->walk(&Item::cleanup_is_expensive_cache_processor,
0, (void *) 0);
- if (setup_jtbm_semi_joins(this, join_list, &conds))
+ List<Item> eq_list;
+
+ if (setup_degenerate_jtbm_semi_joins(this, join_list, eq_list))
DBUG_RETURN(1);
+ if (eq_list.elements != 0)
+ {
+ Item *new_cond;
+
+ if (eq_list.elements == 1)
+ new_cond= eq_list.pop();
+ else
+ new_cond= new (thd->mem_root) Item_cond_and(thd, eq_list);
+
+ if (new_cond &&
+ ((new_cond->fix_fields(thd, &new_cond) ||
+ !(conds= and_items(thd, conds, new_cond)) ||
+ conds->fix_fields(thd, &conds))))
+ DBUG_RETURN(TRUE);
+ }
+ eq_list.empty();
+
if (select_lex->cond_pushed_into_where)
{
conds= and_conds(thd, conds, select_lex->cond_pushed_into_where);
@@ -1674,10 +1935,10 @@ JOIN::optimize_inner()
select_lex->having_fix_field_for_pushed_cond= 0;
}
}
-
+
conds= optimize_cond(this, conds, join_list, FALSE,
&cond_value, &cond_equal, OPT_LINK_EQUAL_FIELDS);
-
+
if (thd->is_error())
{
error= 1;
@@ -1685,6 +1946,58 @@ JOIN::optimize_inner()
DBUG_RETURN(1);
}
+ having= optimize_cond(this, having, join_list, TRUE,
+ &having_value, &having_equal);
+
+ if (thd->is_error())
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from optimize_cond"));
+ DBUG_RETURN(1);
+ }
+
+ /* Do not push into WHERE from HAVING if cond_value == Item::COND_FALSE */
+
+ if (thd->lex->sql_command == SQLCOM_SELECT &&
+ optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FROM_HAVING) &&
+ cond_value != Item::COND_FALSE)
+ {
+ having=
+ select_lex->pushdown_from_having_into_where(thd, having);
+ if (select_lex->attach_to_conds.elements != 0)
+ {
+ conds= and_new_conditions_to_optimized_cond(thd, conds, &cond_equal,
+ select_lex->attach_to_conds,
+ &cond_value);
+ sel->attach_to_conds.empty();
+ }
+ }
+
+ if (optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_SUBQUERY))
+ {
+ TABLE_LIST *tbl;
+ List_iterator_fast<TABLE_LIST> li(select_lex->leaf_tables);
+ while ((tbl= li++))
+ if (tbl->jtbm_subselect)
+ {
+ if (tbl->jtbm_subselect->pushdown_cond_for_in_subquery(thd, conds))
+ DBUG_RETURN(1);
+ }
+ }
+
+ if (setup_jtbm_semi_joins(this, join_list, eq_list))
+ DBUG_RETURN(1);
+
+ if (eq_list.elements != 0)
+ {
+ conds= and_new_conditions_to_optimized_cond(thd, conds, &cond_equal,
+ eq_list, &cond_value);
+
+ if (!conds &&
+ cond_value != Item::COND_FALSE && cond_value != Item::COND_TRUE)
+ DBUG_RETURN(TRUE);
+ }
+
if (optimizer_flag(thd, OPTIMIZER_SWITCH_COND_PUSHDOWN_FOR_DERIVED))
{
TABLE_LIST *tbl;
@@ -1722,32 +2035,22 @@ JOIN::optimize_inner()
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
}
-
{
- having= optimize_cond(this, having, join_list, TRUE,
- &having_value, &having_equal);
-
- if (unlikely(thd->is_error()))
- {
- error= 1;
- DBUG_PRINT("error",("Error from optimize_cond"));
- DBUG_RETURN(1);
- }
if (select_lex->where)
{
select_lex->cond_value= cond_value;
if (sel->where != conds && cond_value == Item::COND_OK)
thd->change_item_tree(&sel->where, conds);
- }
+ }
if (select_lex->having)
{
select_lex->having_value= having_value;
if (sel->having != having && having_value == Item::COND_OK)
- thd->change_item_tree(&sel->having, having);
+ thd->change_item_tree(&sel->having, having);
}
- if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
+ if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
(!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
- { /* Impossible cond */
+ { /* Impossible cond */
if (unit->select_limit_cnt)
{
DBUG_PRINT("info", (having_value == Item::COND_FALSE ?
@@ -1935,6 +2238,9 @@ int JOIN::optimize_stage2()
if (get_best_combination())
DBUG_RETURN(1);
+ if (make_range_rowid_filters())
+ DBUG_RETURN(1);
+
if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE))
DBUG_RETURN(1);
@@ -1991,7 +2297,7 @@ int JOIN::optimize_stage2()
if (!conds && outer_join)
{
/* Handle the case where we have an OUTER JOIN without a WHERE */
- conds= new (thd->mem_root) Item_int(thd, (longlong) 1,1); // Always true
+ conds= new (thd->mem_root) Item_bool(thd, true); // Always true
}
if (impossible_where)
@@ -2026,7 +2332,7 @@ int JOIN::optimize_stage2()
if (conds)
{
conds= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, conds,
- cond_equal, map2table);
+ cond_equal, map2table, true);
if (unlikely(thd->is_error()))
{
error= 1;
@@ -2039,6 +2345,23 @@ int JOIN::optimize_stage2()
"after substitute_best_equal",
QT_ORDINARY););
}
+ if (having)
+ {
+ having= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB, having,
+ having_equal, map2table, false);
+ if (thd->is_error())
+ {
+ error= 1;
+ DBUG_PRINT("error",("Error from substitute_for_best_equal"));
+ DBUG_RETURN(1);
+ }
+ if (having)
+ having->update_used_tables();
+ DBUG_EXECUTE("having",
+ print_where(having,
+ "after substitute_best_equal",
+ QT_ORDINARY););
+ }
/*
Perform the optimization on fields evaluation mentioned above
@@ -2052,7 +2375,7 @@ int JOIN::optimize_stage2()
*tab->on_expr_ref= substitute_for_best_equal_field(thd, NO_PARTICULAR_TAB,
*tab->on_expr_ref,
tab->cond_equal,
- map2table);
+ map2table, true);
if (unlikely(thd->is_error()))
{
error= 1;
@@ -2082,7 +2405,7 @@ int JOIN::optimize_stage2()
while (equals)
{
ref_item= substitute_for_best_equal_field(thd, tab, ref_item,
- equals, map2table);
+ equals, map2table, true);
if (unlikely(thd->is_fatal_error))
DBUG_RETURN(1);
@@ -2130,7 +2453,7 @@ int JOIN::optimize_stage2()
if (conds && const_table_map != found_const_table_map &&
(select_options & SELECT_DESCRIBE))
{
- conds=new (thd->mem_root) Item_int(thd, (longlong) 0, 1); // Always false
+ conds=new (thd->mem_root) Item_bool(thd, false); // Always false
}
/* Cache constant expressions in WHERE, HAVING, ON clauses. */
@@ -2413,13 +2736,13 @@ int JOIN::optimize_stage2()
elements may be lost during further having
condition transformation in JOIN::exec.
*/
- if (having && const_table_map && !having->with_sum_func)
+ if (having && const_table_map && !having->with_sum_func())
{
having->update_used_tables();
having= having->remove_eq_conds(thd, &select_lex->having_value, true);
if (select_lex->having_value == Item::COND_FALSE)
{
- having= new (thd->mem_root) Item_int(thd, (longlong) 0,1);
+ having= new (thd->mem_root) Item_bool(thd, false);
zero_result_cause= "Impossible HAVING noticed after reading const tables";
error= 0;
select_lex->mark_const_derived(zero_result_cause);
@@ -2457,7 +2780,7 @@ int JOIN::optimize_stage2()
{
JOIN_TAB *tab= &join_tab[const_tables];
- if (order)
+ if (order && !need_tmp)
{
/*
Force using of tmp table if sorting by a SP or UDF function due to
@@ -2599,6 +2922,9 @@ int JOIN::optimize_stage2()
if (init_join_caches())
DBUG_RETURN(1);
+ if (init_range_rowid_filters())
+ DBUG_RETURN(1);
+
error= 0;
if (select_options & SELECT_DESCRIBE)
@@ -3162,7 +3488,7 @@ bool JOIN::make_aggr_tables_info()
or end_write_group()) if JOIN::group is set to false.
*/
// the temporary table was explicitly requested
- DBUG_ASSERT(MY_TEST(select_options & OPTION_BUFFER_RESULT));
+ DBUG_ASSERT(select_options & OPTION_BUFFER_RESULT);
// the temporary table does not have a grouping expression
DBUG_ASSERT(!curr_tab->table->group);
}
@@ -3766,6 +4092,15 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
bool need_tmp_table, bool need_order,
bool distinct)
{
+ DBUG_ENTER("JOIN::save_explain_data");
+ DBUG_PRINT("enter", ("Save explain Select_lex: %u (%p) parent lex: %p stmt_lex: %p present select: %u (%p)",
+ select_lex->select_number, select_lex,
+ select_lex->parent_lex, thd->lex->stmt_lex,
+ (output->get_select(select_lex->select_number) ?
+ select_lex->select_number : 0),
+ (output->get_select(select_lex->select_number) ?
+ output->get_select(select_lex->select_number)
+ ->select_lex : NULL)));
/*
If there is SELECT in this statement with the same number it must be the
same SELECT
@@ -3792,8 +4127,9 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
/* It's a degenerate join */
message= zero_result_cause ? zero_result_cause : "No tables used";
}
- return save_explain_data_intern(thd->lex->explain, need_tmp_table, need_order,
- distinct, message);
+ bool rc= save_explain_data_intern(thd->lex->explain, need_tmp_table,
+ need_order, distinct, message);
+ DBUG_RETURN(rc);
}
/*
@@ -3815,11 +4151,11 @@ bool JOIN::save_explain_data(Explain_query *output, bool can_overwrite,
{
if (!(join_tab[i].filesort->tracker=
new Filesort_tracker(thd->lex->analyze_stmt)))
- return 1;
+ DBUG_RETURN(1);
}
}
}
- return 0;
+ DBUG_RETURN(0);
}
@@ -3861,6 +4197,12 @@ void JOIN::exec_inner()
limit in order to produce the partial query result stored in the
UNION temp table.
*/
+
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_exec(thd, "join_execution");
+ trace_exec.add_select_number(select_lex->select_number);
+ Json_writer_array trace_steps(thd, "steps");
+
if (!select_lex->outer_select() && // (1)
select_lex != select_lex->master_unit()->fake_select_lex) // (2)
thd->lex->set_limit_rows_examined();
@@ -3886,7 +4228,6 @@ void JOIN::exec_inner()
if (select_options & SELECT_DESCRIBE)
select_describe(this, FALSE, FALSE, FALSE,
(zero_result_cause?zero_result_cause:"No tables used"));
-
else
{
if (result->send_result_set_metadata(*columns_list,
@@ -3985,7 +4326,8 @@ void JOIN::exec_inner()
not the case.
*/
if (exec_const_order_group_cond.elements &&
- !(select_options & SELECT_DESCRIBE))
+ !(select_options & SELECT_DESCRIBE) &&
+ !select_lex->pushdown_select)
{
List_iterator_fast<Item> const_item_it(exec_const_order_group_cond);
Item *cur_const_item;
@@ -4012,6 +4354,12 @@ void JOIN::exec_inner()
!table_count ? "No tables used" : NullS);
DBUG_VOID_RETURN;
}
+ else if (select_lex->pushdown_select)
+ {
+ /* Execute the query pushed into a foreign engine */
+ error= select_lex->pushdown_select->execute();
+ DBUG_VOID_RETURN;
+ }
else
{
/* it's a const select, materialize it. */
@@ -4176,10 +4524,10 @@ mysql_select(THD *thd,
is it single SELECT in derived table, called in derived table
creation
*/
- if (select_lex->linkage != DERIVED_TABLE_TYPE ||
+ if (select_lex->get_linkage() != DERIVED_TABLE_TYPE ||
(select_options & SELECT_DESCRIBE))
{
- if (select_lex->linkage != GLOBAL_OPTIONS_TYPE)
+ if (select_lex->get_linkage() != GLOBAL_OPTIONS_TYPE)
{
/*
Original join tabs might be overwritten at first
@@ -4223,6 +4571,21 @@ mysql_select(THD *thd,
}
}
+ /* Look for a table owned by an engine with the select_handler interface */
+ select_lex->select_h= select_lex->find_select_handler(thd);
+ if (select_lex->select_h)
+ {
+ /* Create a Pushdown_select object for later execution of the query */
+ if (!(select_lex->pushdown_select=
+ new (thd->mem_root) Pushdown_select(select_lex,
+ select_lex->select_h)))
+ {
+ delete select_lex->select_h;
+ select_lex->select_h= NULL;
+ DBUG_RETURN(TRUE);
+ }
+ }
+
if ((err= join->optimize()))
{
goto err; // 1
@@ -4246,6 +4609,13 @@ mysql_select(THD *thd,
}
err:
+
+ if (select_lex->pushdown_select)
+ {
+ delete select_lex->pushdown_select;
+ select_lex->pushdown_select= NULL;
+ }
+
if (free_join)
{
THD_STAGE_INFO(thd, stage_end);
@@ -4278,7 +4648,8 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
select->test_quick_select(thd, *(key_map *)keys,
(table_map) 0,
limit, 0, FALSE,
- TRUE /* remove_where_parts*/)) ==
+ TRUE, /* remove_where_parts*/
+ FALSE)) ==
1))
DBUG_RETURN(select->quick->records);
if (unlikely(error == -1))
@@ -4411,6 +4782,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
SARGABLE_PARAM *sargables= 0;
List_iterator<TABLE_LIST> ti(tables_list);
TABLE_LIST *tables;
+ THD *thd= join->thd;
DBUG_ENTER("make_join_statistics");
table_count=join->table_count;
@@ -4606,9 +4978,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
}
}
+ if (thd->trace_started())
+ trace_table_dependencies(thd, stat, join->table_count);
+
if (join->conds || outer_join)
{
- if (update_ref_and_keys(join->thd, keyuse_array, stat, join->table_count,
+ if (update_ref_and_keys(thd, keyuse_array, stat, join->table_count,
join->conds, ~outer_join, join->select_lex, &sargables))
goto error;
/*
@@ -4620,10 +4995,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
((Item_in_subselect*)join->unit->item)->test_strategy(SUBS_IN_TO_EXISTS));
if (keyuse_array->elements &&
- sort_and_filter_keyuse(join->thd, keyuse_array,
+ sort_and_filter_keyuse(thd, keyuse_array,
skip_unprefixed_keyparts))
goto error;
DBUG_EXECUTE("opt", print_keyuse_array(keyuse_array););
+ if (thd->trace_started())
+ print_keyuse_array_for_trace(thd, keyuse_array);
}
join->const_table_map= no_rows_const_tables;
@@ -4871,7 +5248,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (join->cond_value == Item::COND_FALSE)
{
join->impossible_where= true;
- conds= new (join->thd->mem_root) Item_int(join->thd, (longlong) 0, 1);
+ conds= new (join->thd->mem_root) Item_bool(join->thd, false);
}
join->cond_equal= NULL;
@@ -4908,143 +5285,163 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
/* Calc how many (possible) matched records in each table */
- for (s=stat ; s < stat_end ; s++)
+ /*
+ Todo: add a function so that we can add these Json_writer_objects
+ easily.
+ Another way would be to enclose them in a scope {};
+ */
{
- s->startup_cost= 0;
- if (s->type == JT_SYSTEM || s->type == JT_CONST)
- {
- /* Only one matching row */
- s->found_records= s->records= 1;
- s->read_time=1.0;
- s->worst_seeks=1.0;
- continue;
- }
- /* Approximate found rows and time to read them */
- if (s->table->is_filled_at_execution())
- {
- get_delayed_table_estimates(s->table, &s->records, &s->read_time,
- &s->startup_cost);
- s->found_records= s->records;
- table->quick_condition_rows=s->records;
- }
- else
- {
- s->scan_time();
- }
+ Json_writer_object rows_estimation_wrapper(thd);
+ Json_writer_array rows_estimation(thd, "rows_estimation");
+ for (s=stat ; s < stat_end ; s++)
+ {
+ s->startup_cost= 0;
+ if (s->type == JT_SYSTEM || s->type == JT_CONST)
+ {
+
+ Json_writer_object table_records(thd);
+ /* Only one matching row */
+ s->found_records= s->records= 1;
+ s->read_time=1.0;
+ s->worst_seeks=1.0;
+ table_records.add_table_name(s)
+ .add("rows", s->found_records)
+ .add("cost", s->read_time)
+ .add("table_type", s->type == JT_CONST ?
+ "const" :
+ "system");
+ continue;
+ }
+ /* Approximate found rows and time to read them */
+ if (s->table->is_filled_at_execution())
+ {
+ get_delayed_table_estimates(s->table, &s->records, &s->read_time,
+ &s->startup_cost);
+ s->found_records= s->records;
+ table->quick_condition_rows=s->records;
+ }
+ else
+ s->scan_time();
- if (s->table->is_splittable())
- s->add_keyuses_for_splitting();
+ if (s->table->is_splittable())
+ s->add_keyuses_for_splitting();
- /*
- Set a max range of how many seeks we can expect when using keys
- This is can't be to high as otherwise we are likely to use
- table scan.
- */
- s->worst_seeks= MY_MIN((double) s->found_records / 10,
- (double) s->read_time*3);
- if (s->worst_seeks < 2.0) // Fix for small tables
- s->worst_seeks=2.0;
-
- /*
- Add to stat->const_keys those indexes for which all group fields or
- all select distinct fields participate in one index.
- */
- add_group_and_distinct_keys(join, s);
+ /*
+ Set a max range of how many seeks we can expect when using keys
+ This is can't be to high as otherwise we are likely to use
+ table scan.
+ */
+ s->worst_seeks= MY_MIN((double) s->found_records / 10,
+ (double) s->read_time*3);
+ if (s->worst_seeks < 2.0) // Fix for small tables
+ s->worst_seeks=2.0;
- s->table->cond_selectivity= 1.0;
-
- /*
- Perform range analysis if there are keys it could use (1).
- Don't do range analysis for materialized subqueries (2).
- Don't do range analysis for materialized derived tables (3)
- */
- if ((!s->const_keys.is_clear_all() ||
- !bitmap_is_clear_all(&s->table->cond_set)) && // (1)
- !s->table->is_filled_at_execution() && // (2)
- !(s->table->pos_in_table_list->derived && // (3)
- s->table->pos_in_table_list->is_materialized_derived())) // (3)
- {
- bool impossible_range= FALSE;
- ha_rows records= HA_POS_ERROR;
- SQL_SELECT *select= 0;
- Item **sargable_cond= NULL;
- if (!s->const_keys.is_clear_all())
- {
- sargable_cond= get_sargable_cond(join, s->table);
-
- select= make_select(s->table, found_const_table_map,
- found_const_table_map,
- *sargable_cond,
- (SORT_INFO*) 0,
- 1, &error);
- if (!select)
- goto error;
- records= get_quick_record_count(join->thd, select, s->table,
- &s->const_keys, join->row_limit);
+ /*
+ Add to stat->const_keys those indexes for which all group fields or
+ all select distinct fields participate in one index.
+ */
+ add_group_and_distinct_keys(join, s);
- /*
- Range analyzer might have modified the condition. Put it the new
- condition to where we got it from.
- */
- *sargable_cond= select->cond;
+ s->table->cond_selectivity= 1.0;
- s->quick=select->quick;
- s->needed_reg=select->needed_reg;
- select->quick=0;
- impossible_range= records == 0 && s->table->reginfo.impossible_range;
- }
- if (!impossible_range)
- {
- if (!sargable_cond)
+ /*
+ Perform range analysis if there are keys it could use (1).
+ Don't do range analysis for materialized subqueries (2).
+ Don't do range analysis for materialized derived tables (3)
+ */
+ if ((!s->const_keys.is_clear_all() ||
+ !bitmap_is_clear_all(&s->table->cond_set)) && // (1)
+ !s->table->is_filled_at_execution() && // (2)
+ !(s->table->pos_in_table_list->derived && // (3)
+ s->table->pos_in_table_list->is_materialized_derived())) // (3)
+ {
+ bool impossible_range= FALSE;
+ ha_rows records= HA_POS_ERROR;
+ SQL_SELECT *select= 0;
+ Item **sargable_cond= NULL;
+ if (!s->const_keys.is_clear_all())
+ {
sargable_cond= get_sargable_cond(join, s->table);
- if (join->thd->variables.optimizer_use_condition_selectivity > 1)
- calculate_cond_selectivity_for_table(join->thd, s->table,
- sargable_cond);
- if (s->table->reginfo.impossible_range)
- {
- impossible_range= TRUE;
- records= 0;
+
+ select= make_select(s->table, found_const_table_map,
+ found_const_table_map,
+ *sargable_cond,
+ (SORT_INFO*) 0, 1, &error);
+ if (!select)
+ goto error;
+ records= get_quick_record_count(join->thd, select, s->table,
+ &s->const_keys, join->row_limit);
+
+ /*
+ Range analyzer might have modified the condition. Put it the new
+ condition to where we got it from.
+ */
+ *sargable_cond= select->cond;
+
+ s->quick=select->quick;
+ s->needed_reg=select->needed_reg;
+ select->quick=0;
+ impossible_range= records == 0 && s->table->reginfo.impossible_range;
+ if (join->thd->lex->sql_command == SQLCOM_SELECT &&
+ optimizer_flag(join->thd, OPTIMIZER_SWITCH_USE_ROWID_FILTER))
+ s->table->init_cost_info_for_usable_range_rowid_filters(join->thd);
}
- }
- if (impossible_range)
- {
- /*
- Impossible WHERE or ON expression
- In case of ON, we mark that the we match one empty NULL row.
- In case of WHERE, don't set found_const_table_map to get the
- caller to abort with a zero row result.
- */
- TABLE_LIST *emb= s->table->pos_in_table_list->embedding;
- if (emb && !emb->sj_on_expr)
+ if (!impossible_range)
{
- /* Mark all tables in a multi-table join nest as const */
- mark_join_nest_as_const(join, emb, &found_const_table_map,
- &const_count);
+ if (!sargable_cond)
+ sargable_cond= get_sargable_cond(join, s->table);
+ if (join->thd->variables.optimizer_use_condition_selectivity > 1)
+ calculate_cond_selectivity_for_table(join->thd, s->table,
+ sargable_cond);
+ if (s->table->reginfo.impossible_range)
+ {
+ impossible_range= TRUE;
+ records= 0;
+ }
}
- else
+ if (impossible_range)
{
- join->const_table_map|= s->table->map;
- set_position(join,const_count++,s,(KEYUSE*) 0);
- s->type= JT_CONST;
- s->table->const_table= 1;
- if (*s->on_expr_ref)
+ /*
+ Impossible WHERE or ON expression
+ In case of ON, we mark that the we match one empty NULL row.
+ In case of WHERE, don't set found_const_table_map to get the
+ caller to abort with a zero row result.
+ */
+ TABLE_LIST *emb= s->table->pos_in_table_list->embedding;
+ if (emb && !emb->sj_on_expr)
{
- /* Generate empty row */
- s->info= ET_IMPOSSIBLE_ON_CONDITION;
- found_const_table_map|= s->table->map;
- mark_as_null_row(s->table); // All fields are NULL
+ /* Mark all tables in a multi-table join nest as const */
+ mark_join_nest_as_const(join, emb, &found_const_table_map,
+ &const_count);
+ }
+ else
+ {
+ join->const_table_map|= s->table->map;
+ set_position(join,const_count++,s,(KEYUSE*) 0);
+ s->type= JT_CONST;
+ s->table->const_table= 1;
+ if (*s->on_expr_ref)
+ {
+ /* Generate empty row */
+ s->info= ET_IMPOSSIBLE_ON_CONDITION;
+ found_const_table_map|= s->table->map;
+ mark_as_null_row(s->table); // All fields are NULL
+ }
}
}
+ if (records != HA_POS_ERROR)
+ {
+ s->found_records=records;
+ s->read_time= s->quick ? s->quick->read_time : 0.0;
+ }
+ if (select)
+ delete select;
+ else
+ add_table_scan_values_to_trace(thd, s);
}
- if (records != HA_POS_ERROR)
- {
- s->found_records=records;
- s->read_time= s->quick ? s->quick->read_time : 0.0;
- }
- if (select)
- delete select;
+ else
+ add_table_scan_values_to_trace(thd, s);
}
-
}
if (pull_out_semijoin_tables(join))
@@ -6098,6 +6495,7 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array,
keyuse.keypart_map= 0;
keyuse.sj_pred_no= UINT_MAX;
keyuse.validity_ref= 0;
+ keyuse.null_rejecting= FALSE;
return insert_dynamic(keyuse_array,(uchar*) &keyuse);
}
@@ -6679,6 +7077,7 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
// join->positions[idx].loosescan_key= MAX_KEY; /* Not a LooseScan */
join->positions[idx].sj_strategy= SJ_OPT_NONE;
join->positions[idx].use_join_buffer= FALSE;
+ join->positions[idx].range_rowid_filter_info= 0;
/* Move the const table as down as possible in best_ref */
JOIN_TAB **pos=join->best_ref+idx+1;
@@ -6798,17 +7197,23 @@ best_access_path(JOIN *join,
double best_time= DBL_MAX;
double records= DBL_MAX;
table_map best_ref_depends_map= 0;
+ Range_rowid_filter_cost_info *best_filter= 0;
double tmp;
ha_rows rec;
bool best_uses_jbuf= FALSE;
MY_BITMAP *eq_join_set= &s->table->eq_join_set;
KEYUSE *hj_start_key= 0;
SplM_plan_info *spl_plan= 0;
+ Range_rowid_filter_cost_info *filter= 0;
+ const char* cause= NULL;
disable_jbuf= disable_jbuf || idx == join->const_tables;
Loose_scan_opt loose_scan_opt;
DBUG_ENTER("best_access_path");
+
+ Json_writer_object trace_wrapper(thd, "best_access_path");
+ Json_writer_array trace_paths(thd, "considered_access_paths");
bitmap_clear_all(eq_join_set);
@@ -6835,6 +7240,7 @@ best_access_path(JOIN *join,
key_part_map found_part= 0;
table_map found_ref= 0;
uint key= keyuse->key;
+ filter= 0;
bool ft_key= (keyuse->keypart == FT_KEYPART);
/* Bitmap of keyparts where the ref access is over 'keypart=const': */
key_part_map const_part= 0;
@@ -6923,6 +7329,7 @@ best_access_path(JOIN *join,
if (rec < MATCHING_ROWS_IN_OTHER_TABLE)
rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables
+ Json_writer_object trace_access_idx(thd);
/*
ft-keys require special treatment
*/
@@ -6934,6 +7341,8 @@ best_access_path(JOIN *join,
*/
tmp= prev_record_reads(join->positions, idx, found_ref);
records= 1.0;
+ trace_access_idx.add("access_type", "fulltext")
+ .add("index", keyinfo->name);
}
else
{
@@ -6948,11 +7357,15 @@ best_access_path(JOIN *join,
if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
MY_TEST(key_flags & HA_EXT_NOSAME))
{
+ trace_access_idx.add("access_type", "eq_ref")
+ .add("index", keyinfo->name);
tmp = prev_record_reads(join->positions, idx, found_ref);
records=1.0;
}
else
{
+ trace_access_idx.add("access_type", "ref")
+ .add("index", keyinfo->name);
if (!found_ref)
{ /* We found a const key */
/*
@@ -6973,11 +7386,16 @@ best_access_path(JOIN *join,
empty interval we wouldn't have got here).
*/
if (table->quick_keys.is_set(key))
+ {
records= (double) table->quick_rows[key];
+ trace_access_idx.add("used_range_estimates", true);
+ }
else
{
/* quick_range couldn't use key! */
records= (double) s->records/rec;
+ trace_access_idx.add("used_range_estimates", false)
+ .add("cause", "not available");
}
}
else
@@ -7009,7 +7427,23 @@ best_access_path(JOIN *join,
table->quick_n_ranges[key] == 1 &&
records > (double) table->quick_rows[key])
{
+
records= (double) table->quick_rows[key];
+ trace_access_idx.add("used_range_estimates", true);
+ }
+ else
+ {
+ if (table->quick_keys.is_set(key))
+ {
+ trace_access_idx.add("used_range_estimates",false)
+ .add("cause",
+ "not better than ref estimates");
+ }
+ else
+ {
+ trace_access_idx.add("used_range_estimates", false)
+ .add("cause", "not available");
+ }
}
}
/* Limit the number of matched rows */
@@ -7025,6 +7459,9 @@ best_access_path(JOIN *join,
}
else
{
+ trace_access_idx.add("access_type",
+ ref_or_null_part ? "ref_or_null" : "ref")
+ .add("index", keyinfo->name);
/*
Use as much key-parts as possible and a uniq key is better
than a not unique key
@@ -7079,6 +7516,7 @@ best_access_path(JOIN *join,
table->quick_n_ranges[key] == 1 + MY_TEST(ref_or_null_part)) //(C3)
{
tmp= records= (double) table->quick_rows[key];
+ trace_access_idx.add("used_range_estimates", true);
}
else
{
@@ -7104,7 +7542,19 @@ best_access_path(JOIN *join,
if (!found_ref && table->quick_keys.is_set(key) && // (1)
table->quick_key_parts[key] > max_key_part && // (2)
records < (double)table->quick_rows[key]) // (3)
+ {
+ trace_access_idx.add("used_range_estimates", true);
records= (double)table->quick_rows[key];
+ }
+ else
+ {
+ if (table->quick_keys.is_set(key) &&
+ table->quick_key_parts[key] < max_key_part)
+ {
+ trace_access_idx.add("chosen", false);
+ cause= "range uses more keyparts";
+ }
+ }
tmp= records;
}
@@ -7188,22 +7638,50 @@ best_access_path(JOIN *join,
tmp*= record_count;
}
else
+ {
+ if (!(found_part & 1))
+ cause= "no predicate for first keypart";
tmp= best_time; // Do nothing
+ }
}
tmp += s->startup_cost;
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
} /* not ft_key */
+ if (records < DBL_MAX)
+ {
+ double rows= record_count * records;
+ double access_cost_factor= MY_MIN(tmp / rows, 1.0);
+ filter=
+ table->best_range_rowid_filter_for_partial_join(start_key->key, rows,
+ access_cost_factor);
+ if (filter)
+ {
+ filter->get_cmp_gain(rows);
+ tmp-= filter->get_adjusted_gain(rows) - filter->get_cmp_gain(rows);
+ DBUG_ASSERT(tmp >= 0);
+ }
+ }
+ trace_access_idx.add("rows", records).add("cost", tmp);
+
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
{
+ trace_access_idx.add("chosen", true);
best_time= tmp + records/(double) TIME_FOR_COMPARE;
best= tmp;
best_records= records;
best_key= start_key;
best_max_key_part= max_key_part;
best_ref_depends_map= found_ref;
+ best_filter= filter;
+ }
+ else
+ {
+ trace_access_idx.add("chosen", false)
+ .add("cause", cause ? cause : "cost");
}
+ cause= NULL;
} /* for each key */
records= best_records;
}
@@ -7225,6 +7703,7 @@ best_access_path(JOIN *join,
(!(s->table->map & join->outer_join) ||
join->allowed_outer_join_with_cache)) // (2)
{
+ Json_writer_object trace_access_hash(thd);
double join_sel= 0.1;
/* Estimate the cost of the hash join access to the table */
double rnd_records= matching_candidates_in_table(s, found_constraint,
@@ -7244,7 +7723,13 @@ best_access_path(JOIN *join,
best_key= hj_start_key;
best_ref_depends_map= 0;
best_uses_jbuf= TRUE;
- }
+ best_filter= 0;
+ trace_access_hash.add("type", "hash");
+ trace_access_hash.add("index", "hj-key");
+ trace_access_hash.add("cost", rnd_records);
+ trace_access_hash.add("cost", best);
+ trace_access_hash.add("chosen", true);
+ }
/*
Don't test table scan if it can't be better.
@@ -7279,6 +7764,7 @@ best_access_path(JOIN *join,
can be [considered to be] more expensive, which causes lookups not to
be used for cases with small datasets, which is annoying.
*/
+ Json_writer_object trace_access_scan(thd);
if ((records >= s->found_records || best > s->read_time) && // (1)
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
@@ -7296,8 +7782,13 @@ best_access_path(JOIN *join,
Here we estimate its cost.
*/
+ filter= 0;
if (s->quick)
{
+ trace_access_scan.add("access_type", "range");
+ /*
+ should have some info about all the different QUICK_SELECT
+ */
/*
For each record we:
- read record range through 'quick'
@@ -7311,10 +7802,30 @@ best_access_path(JOIN *join,
(s->quick->read_time +
(s->found_records - rnd_records)/(double) TIME_FOR_COMPARE);
+ if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
+ {
+ double rows= record_count * s->found_records;
+ double access_cost_factor= MY_MIN(tmp / rows, 1.0);
+ uint key_no= s->quick->index;
+ filter=
+ s->table->best_range_rowid_filter_for_partial_join(key_no, rows,
+ access_cost_factor);
+ if (filter)
+ {
+ tmp-= filter->get_adjusted_gain(rows);
+ DBUG_ASSERT(tmp >= 0);
+ }
+ }
+ else
+ {
+ best_filter= 0;
+ }
+
loose_scan_opt.check_range_access(join, idx, s->quick);
}
else
{
+ trace_access_scan.add("access_type", "scan");
/* Estimate cost of reading table. */
if (s->table->force_index && !best_key) // index scan
tmp= s->table->file->read_time(s->ref.key, 1, s->records);
@@ -7354,15 +7865,26 @@ best_access_path(JOIN *join,
tmp+= s->table->get_materialization_cost();
else
tmp+= s->startup_cost;
+
/*
We estimate the cost of evaluating WHERE clause for found records
as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus
tmp give us total cost of using TABLE SCAN
*/
+
+ double best_filter_cmp_gain= 0;
+ if (best_filter)
+ {
+ best_filter_cmp_gain= best_filter->get_cmp_gain(record_count * records);
+ }
+ trace_access_scan.add("resulting_rows", rnd_records);
+ trace_access_scan.add("cost", tmp);
+
if (best == DBL_MAX ||
(tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records <
(best_key->is_for_hash_join() ? best_time :
- best + record_count/(double) TIME_FOR_COMPARE*records)))
+ best + record_count/(double) TIME_FOR_COMPARE*records -
+ best_filter_cmp_gain)))
{
/*
If the table has a range (s->quick is set) make_join_select()
@@ -7371,12 +7893,22 @@ best_access_path(JOIN *join,
best= tmp;
records= rnd_records;
best_key= 0;
+ best_filter= 0;
+ if (s->quick && s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
+ best_filter= filter;
/* range/index_merge/ALL/index access method are "independent", so: */
best_ref_depends_map= 0;
best_uses_jbuf= MY_TEST(!disable_jbuf && !((s->table->map &
join->outer_join)));
spl_plan= 0;
}
+ trace_access_scan.add("chosen", best_key == NULL);
+ }
+ else
+ {
+ trace_access_scan.add("type", "scan");
+ trace_access_scan.add("chosen", false);
+ trace_access_scan.add("cause", "cost");
}
/* Update the cost information for the current partial plan */
@@ -7388,6 +7920,7 @@ best_access_path(JOIN *join,
pos->loosescan_picker.loosescan_key= MAX_KEY;
pos->use_join_buffer= best_uses_jbuf;
pos->spl_plan= spl_plan;
+ pos->range_rowid_filter_info= best_filter;
loose_scan_opt.save_to_position(s, loose_scan_pos);
@@ -7395,7 +7928,10 @@ best_access_path(JOIN *join,
idx == join->const_tables &&
s->table == join->sort_by_table &&
join->unit->select_limit_cnt >= records)
+ {
+ trace_access_scan.add("use_tmp_table", true);
join->sort_by_table= (TABLE*) 1; // Must use temporary table
+ }
DBUG_VOID_RETURN;
}
@@ -7538,6 +8074,7 @@ choose_plan(JOIN *join, table_map join_tables)
uint use_cond_selectivity=
join->thd->variables.optimizer_use_condition_selectivity;
bool straight_join= MY_TEST(join->select_options & SELECT_STRAIGHT_JOIN);
+ THD *thd= join->thd;
DBUG_ENTER("choose_plan");
join->cur_embedding_map= 0;
@@ -7574,6 +8111,9 @@ choose_plan(JOIN *join, table_map join_tables)
join->table_count - join->const_tables, sizeof(JOIN_TAB*),
jtab_sort_func, (void*)join->emb_sjm_nest);
+ Json_writer_object wrapper(thd);
+ Json_writer_array trace_plan(thd,"considered_execution_plans");
+
if (!join->emb_sjm_nest)
{
choose_initial_table_order(join);
@@ -7867,17 +8407,32 @@ optimize_straight_join(JOIN *join, table_map join_tables)
uint use_cond_selectivity=
join->thd->variables.optimizer_use_condition_selectivity;
POSITION loose_scan_pos;
+ THD *thd= join->thd;
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
{
+ POSITION *position= join->positions + idx;
+ Json_writer_object trace_one_table(thd);
+ if (unlikely(thd->trace_started()))
+ {
+ trace_plan_prefix(join, idx, join_tables);
+ trace_one_table.add_table_name(s);
+ }
/* Find the best access method from 's' to the current partial plan */
best_access_path(join, s, join_tables, idx, disable_jbuf, record_count,
- join->positions + idx, &loose_scan_pos);
+ position, &loose_scan_pos);
/* compute the cost of the new plan extended with 's' */
- record_count*= join->positions[idx].records_read;
- read_time+= join->positions[idx].read_time +
- record_count / (double) TIME_FOR_COMPARE;
+ record_count*= position->records_read;
+ double filter_cmp_gain= 0;
+ if (position->range_rowid_filter_info)
+ {
+ filter_cmp_gain=
+ position->range_rowid_filter_info->get_cmp_gain(record_count);
+ }
+ read_time+= position->read_time +
+ record_count / (double) TIME_FOR_COMPARE -
+ filter_cmp_gain;
advance_sj_state(join, join_tables, idx, &record_count, &read_time,
&loose_scan_pos);
@@ -7886,7 +8441,7 @@ optimize_straight_join(JOIN *join, table_map join_tables)
if (use_cond_selectivity > 1)
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
join_tables);
- join->positions[idx].cond_selectivity= pushdown_cond_selectivity;
+ position->cond_selectivity= pushdown_cond_selectivity;
++idx;
}
@@ -8598,6 +9153,18 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
}
+void trace_plan_prefix(JOIN *join, uint idx, table_map remaining_tables)
+{
+ THD *const thd= join->thd;
+ Json_writer_array plan_prefix(thd, "plan_prefix");
+ for (uint i= 0; i < idx; i++)
+ {
+ TABLE_LIST *const tr= join->positions[i].table->tab_list;
+ if (!(tr->map & remaining_tables))
+ plan_prefix.add_table_name(join->positions[i].table);
+ }
+}
+
/**
Find a good, possibly optimal, query execution plan (QEP) by a possibly
exhaustive search.
@@ -8775,6 +9342,13 @@ best_extension_by_limited_search(JOIN *join,
double current_record_count, current_read_time;
POSITION *position= join->positions + idx;
+ Json_writer_object trace_one_table(thd);
+ if (unlikely(thd->trace_started()))
+ {
+ trace_plan_prefix(join, idx, remaining_tables);
+ trace_one_table.add_table_name(s);
+ }
+
/* Find the best access method from 's' to the current partial plan */
POSITION loose_scan_pos;
best_access_path(join, s, remaining_tables, idx, disable_jbuf,
@@ -8785,8 +9359,15 @@ best_extension_by_limited_search(JOIN *join,
current_record_count= record_count * position->records_read;
else
current_record_count= DBL_MAX;
+ double filter_cmp_gain= 0;
+ if (position->range_rowid_filter_info)
+ {
+ filter_cmp_gain=
+ position->range_rowid_filter_info->get_cmp_gain(current_record_count);
+ }
current_read_time=read_time + position->read_time +
- current_record_count / (double) TIME_FOR_COMPARE;
+ current_record_count / (double) TIME_FOR_COMPARE -
+ filter_cmp_gain;
advance_sj_state(join, remaining_tables, idx, &current_record_count,
&current_read_time, &loose_scan_pos);
@@ -8799,6 +9380,7 @@ best_extension_by_limited_search(JOIN *join,
read_time,
current_read_time,
"prune_by_cost"););
+ trace_one_table.add("pruned_by_cost", true);
restore_prev_nj_state(s);
restore_prev_sj_state(remaining_tables, s, idx);
continue;
@@ -8832,6 +9414,7 @@ best_extension_by_limited_search(JOIN *join,
read_time,
current_read_time,
"pruned_by_heuristic"););
+ trace_one_table.add("pruned_by_heuristic", true);
restore_prev_nj_state(s);
restore_prev_sj_state(remaining_tables, s, idx);
continue;
@@ -8849,6 +9432,7 @@ best_extension_by_limited_search(JOIN *join,
if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) & allowed_tables )
{ /* Recursively expand the current partial plan */
swap_variables(JOIN_TAB*, join->best_ref[idx], *pos);
+ Json_writer_array trace_rest(thd, "rest_of_plan");
if (best_extension_by_limited_search(join,
remaining_tables & ~real_table_bit,
idx + 1,
@@ -9493,6 +10077,7 @@ bool JOIN::inject_cond_into_where(Item *injected_cond)
static Item * const null_ptr= NULL;
+
/*
Set up join struct according to the picked join order in
@@ -9652,6 +10237,8 @@ bool JOIN::get_best_combination()
is_hash_join_key_no(j->ref.key))
hash_join= TRUE;
+ j->range_rowid_filter_info= best_positions[tablenr].range_rowid_filter_info;
+
loop_end:
/*
Save records_read in JOIN_TAB so that select_describe()/etc don't have
@@ -10450,23 +11037,40 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
there inside the triggers.
*/
{ // Check const tables
- join->exec_const_cond=
- make_cond_for_table(thd, cond,
+ Item* const_cond= NULL;
+ const_cond= make_cond_for_table(thd, cond,
join->const_table_map,
(table_map) 0, -1, FALSE, FALSE);
/* Add conditions added by add_not_null_conds(). */
for (uint i= 0 ; i < join->const_tables ; i++)
- add_cond_and_fix(thd, &join->exec_const_cond,
+ add_cond_and_fix(thd, &const_cond,
join->join_tab[i].select_cond);
- DBUG_EXECUTE("where",print_where(join->exec_const_cond,"constants",
+ DBUG_EXECUTE("where",print_where(const_cond,"constants",
QT_ORDINARY););
- if (join->exec_const_cond && !join->exec_const_cond->is_expensive() &&
- !join->exec_const_cond->val_int())
+
+ if (const_cond)
{
- DBUG_PRINT("info",("Found impossible WHERE condition"));
- join->exec_const_cond= NULL;
- DBUG_RETURN(1); // Impossible const condition
+ Json_writer_object trace_const_cond(thd);
+ trace_const_cond.add("condition_on_constant_tables", const_cond);
+ if (const_cond->is_expensive())
+ {
+ trace_const_cond.add("evalualted", "false")
+ .add("cause", "expensive cond");
+ }
+ else
+ {
+ const bool const_cond_result = const_cond->val_int() != 0;
+ if (!const_cond_result)
+ {
+ DBUG_PRINT("info",("Found impossible WHERE condition"));
+ trace_const_cond.add("evalualted", "true")
+ .add("found", "impossible where");
+ join->exec_const_cond= NULL;
+ DBUG_RETURN(1);
+ }
+ }
+ join->exec_const_cond= const_cond;
}
if (join->table_count != join->const_tables)
@@ -10503,6 +11107,11 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
/*
Step #2: Extract WHERE/ON parts
*/
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_conditions(thd, "attaching_conditions_to_tables");
+ trace_conditions.add("original_condition", cond);
+ Json_writer_array trace_attached_comp(thd,
+ "attached_conditions_computation");
uint i;
for (i= join->top_join_tab_count - 1; i >= join->const_tables; i--)
{
@@ -10565,10 +11174,13 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
(!is_hash_join_key_no(tab->ref.key) &&
tab->table->intersect_keys.is_set(tab->ref.key))))
{
- /* Range uses longer key; Use this instead of ref on key */
- tab->type=JT_ALL;
- use_quick_range=1;
- tab->use_quick=1;
+ /* Range uses longer key; Use this instead of ref on key */
+ Json_writer_object ref_to_range(thd);
+ ref_to_range.add("ref_to_range", true);
+ ref_to_range.add("cause", "range uses longer key");
+ tab->type=JT_ALL;
+ use_quick_range=1;
+ tab->use_quick=1;
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
join->best_positions[i].records_read= rows2double(tab->quick->records);
@@ -10634,7 +11246,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
below to check if we should use 'quick' instead.
*/
DBUG_PRINT("info", ("Item_int"));
- tmp= new (thd->mem_root) Item_int(thd, (longlong) 1, 1); // Always true
+ tmp= new (thd->mem_root) Item_bool(thd, true); // Always true
}
}
@@ -10713,6 +11325,11 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
sel->quick=tab->quick; // Use value from get_quick_...
sel->quick_keys.clear_all();
sel->needed_reg.clear_all();
+ if (is_hj && tab->rowid_filter)
+ {
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ }
}
else
{
@@ -10762,7 +11379,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
Yet attributes of the just built condition are not needed.
Thus we call sel->cond->quick_fix_field for safety.
*/
- if (sel->cond && !sel->cond->fixed)
+ if (sel->cond && !sel->cond->is_fixed())
sel->cond->quick_fix_field();
if (sel->test_quick_select(thd, tab->keys,
@@ -10772,7 +11389,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
OPTION_FOUND_ROWS ?
HA_POS_ERROR :
join->unit->select_limit_cnt), 0,
- FALSE, FALSE) < 0)
+ FALSE, FALSE, FALSE) < 0)
{
/*
Before reporting "Impossible WHERE" for the whole query
@@ -10786,7 +11403,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
OPTION_FOUND_ROWS ?
HA_POS_ERROR :
join->unit->select_limit_cnt),0,
- FALSE, FALSE) < 0)
+ FALSE, FALSE, FALSE) < 0)
DBUG_RETURN(1); // Impossible WHERE
}
else
@@ -11006,6 +11623,20 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
if (!tab->bush_children)
i++;
}
+
+ trace_attached_comp.end();
+ Json_writer_array trace_attached_summary(thd,
+ "attached_conditions_summary");
+ for (tab= first_depth_first_tab(join); tab;
+ tab= next_depth_first_tab(join, tab))
+ {
+ if (!tab->table)
+ continue;
+ Item *const cond = tab->select_cond;
+ Json_writer_object trace_one_table(thd);
+ trace_one_table.add_table_name(tab);
+ trace_one_table.add("attached", cond);
+ }
}
DBUG_RETURN(0);
}
@@ -12386,6 +13017,37 @@ bool error_if_full_join(JOIN *join)
}
+void JOIN_TAB::build_range_rowid_filter_if_needed()
+{
+ if (rowid_filter && !is_rowid_filter_built)
+ {
+ /**
+ The same handler object (table->file) is used to build a filter
+ and to perfom a primary table access (by the main query).
+
+ To estimate the time for filter building tracker should be changed
+ and after building of the filter has been finished it should be
+ switched back to the previos tracker.
+ */
+ Exec_time_tracker *table_tracker= table->file->get_time_tracker();
+ Rowid_filter_tracker *rowid_tracker= rowid_filter->get_tracker();
+ table->file->set_time_tracker(rowid_tracker->get_time_tracker());
+ rowid_tracker->start_tracking();
+ if (!rowid_filter->build())
+ {
+ is_rowid_filter_built= true;
+ }
+ else
+ {
+ delete rowid_filter;
+ rowid_filter= 0;
+ }
+ rowid_tracker->stop_tracking();
+ table->file->set_time_tracker(table_tracker);
+ }
+}
+
+
/**
cleanup JOIN_TAB.
@@ -12409,6 +13071,11 @@ void JOIN_TAB::cleanup()
select= 0;
delete quick;
quick= 0;
+ if (rowid_filter)
+ {
+ delete rowid_filter;
+ rowid_filter= 0;
+ }
if (cache)
{
cache->free();
@@ -12771,7 +13438,8 @@ void JOIN::join_free()
!(select_options & SELECT_NO_UNLOCK) &&
!select_lex->subquery_in_having &&
(select_lex == (thd->lex->unit.fake_select_lex ?
- thd->lex->unit.fake_select_lex : &thd->lex->select_lex)))
+ thd->lex->unit.fake_select_lex :
+ thd->lex->first_select_lex())))
{
/*
TODO: unlock tables even if the join isn't top level select in the
@@ -12800,8 +13468,10 @@ void JOIN::join_free()
void JOIN::cleanup(bool full)
{
DBUG_ENTER("JOIN::cleanup");
- DBUG_PRINT("enter", ("full %u", (uint) full));
-
+ DBUG_PRINT("enter", ("select: %d (%p) join: %p full: %u",
+ select_lex->select_number, select_lex, this,
+ (uint) full));
+
if (full)
have_query_plan= QEP_DELETED;
@@ -13063,7 +13733,7 @@ static void update_depend_map_for_order(JOIN *join, ORDER *order)
order->used= 0;
// Not item_sum(), RAND() and no reference to table outside of sub select
if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))
- && !order->item[0]->with_sum_func &&
+ && !order->item[0]->with_sum_func() &&
join->join_tab)
{
for (JOIN_TAB **tab=join->map2table;
@@ -13138,7 +13808,23 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
tab++)
tab->cached_eq_ref_table= FALSE;
- *simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1;
+ JOIN_TAB *head= join->join_tab + join->const_tables;
+ *simple_order= head->on_expr_ref[0] == NULL;
+ if (*simple_order && head->table->file->ha_table_flags() & HA_SLOW_RND_POS)
+ {
+ uint u1, u2, u3;
+ /*
+ normally the condition is (see filesort_use_addons())
+
+ length + sortlength <= max_length_for_sort_data
+
+ but for HA_SLOW_RND_POS tables we relax it a bit, as the alternative
+ is to use a temporary table, which is rather expensive.
+
+ TODO proper cost estimations
+ */
+ *simple_order= filesort_use_addons(head->table, 0, &u1, &u2, &u3);
+ }
}
else
{
@@ -13154,7 +13840,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
for (order=first_order; order ; order=order->next)
{
table_map order_tables=order->item[0]->used_tables();
- if (order->item[0]->with_sum_func ||
+ if (order->item[0]->with_sum_func() ||
/*
If the outer table of an outer join is const (either by itself or
after applying WHERE condition), grouping on a field from such a
@@ -13330,7 +14016,7 @@ ORDER *simple_remove_const(ORDER *order, COND *where)
ORDER *first= NULL, *prev= NULL;
for (; order; order= order->next)
{
- DBUG_ASSERT(!order->item[0]->with_sum_func); // should never happen
+ DBUG_ASSERT(!order->item[0]->with_sum_func()); // should never happen
if (!const_expression_in_where(where, order->item[0]))
{
if (!first)
@@ -13579,21 +14265,23 @@ finish:
FALSE otherwise
*/
-static bool check_simple_equality(THD *thd, const Item::Context &ctx,
- Item *left_item, Item *right_item,
- COND_EQUAL *cond_equal)
+bool check_simple_equality(THD *thd, const Item::Context &ctx,
+ Item *left_item, Item *right_item,
+ COND_EQUAL *cond_equal)
{
Item *orig_left_item= left_item;
Item *orig_right_item= right_item;
if (left_item->type() == Item::REF_ITEM &&
- ((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF)
+ (((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF ||
+ ((Item_ref*)left_item)->ref_type() == Item_ref::REF))
{
if (((Item_ref*)left_item)->get_depended_from())
return FALSE;
left_item= left_item->real_item();
}
if (right_item->type() == Item::REF_ITEM &&
- ((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF)
+ (((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF ||
+ ((Item_ref*)right_item)->ref_type() == Item_ref::REF))
{
if (((Item_ref*)right_item)->get_depended_from())
return FALSE;
@@ -14034,7 +14722,7 @@ COND *Item_cond_and::build_equal_items(THD *thd,
if (!cond_args->elements &&
!cond_equal.current_level.elements &&
!eq_list.elements)
- return new (thd->mem_root) Item_int(thd, (longlong) 1, 1);
+ return new (thd->mem_root) Item_bool(thd, true);
List_iterator_fast<Item_equal> it(cond_equal.current_level);
while ((item_equal= it++))
@@ -14141,7 +14829,7 @@ COND *Item_func_eq::build_equal_items(THD *thd,
Item_equal *item_equal;
int n= cond_equal.current_level.elements + eq_list.elements;
if (n == 0)
- return new (thd->mem_root) Item_int(thd, (longlong) 1, 1);
+ return new (thd->mem_root) Item_bool(thd, true);
else if (n == 1)
{
if ((item_equal= cond_equal.current_level.pop()))
@@ -14532,7 +15220,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
List<Item> eq_list;
Item_func_eq *eq_item= 0;
if (((Item *) item_equal)->const_item() && !item_equal->val_int())
- return new (thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ return new (thd->mem_root) Item_bool(thd, false);
Item *item_const= item_equal->get_const();
Item_equal_fields_iterator it(*item_equal);
Item *head;
@@ -14540,12 +15228,12 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
Item *current_sjm_head= NULL;
DBUG_ASSERT(!cond ||
- cond->type() == Item::INT_ITEM ||
+ cond->is_bool_literal() ||
(cond->type() == Item::FUNC_ITEM &&
((Item_func *) cond)->functype() == Item_func::EQ_FUNC) ||
(cond->type() == Item::COND_ITEM &&
((Item_func *) cond)->functype() == Item_func::COND_AND_FUNC));
-
+
/*
Pick the "head" item: the constant one or the first in the join order
(if the first in the join order happends to be inside an SJM nest, that's
@@ -14642,11 +15330,9 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
*/
Item *head_item= (!item_const && current_sjm &&
current_sjm_head != field_item) ? current_sjm_head: head;
- Item *head_real_item= head_item->real_item();
- if (head_real_item->type() == Item::FIELD_ITEM)
- head_item= head_real_item;
-
- eq_item= new (thd->mem_root) Item_func_eq(thd, field_item->real_item(), head_item);
+ eq_item= new (thd->mem_root) Item_func_eq(thd,
+ field_item->remove_item_direct_ref(),
+ head_item->remove_item_direct_ref());
if (!eq_item || eq_item->set_cmp_func())
return 0;
@@ -14661,13 +15347,13 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
cond AND eq_1 AND eq_2 AND eq_3 AND ...
- 'cond' is a parameter for this function, which may be NULL, an Item_int(1),
+ 'cond' is a parameter for this function, which may be NULL, an Item_bool(1),
or an Item_func_eq or an Item_cond_and.
We want to return a well-formed condition: no nested Item_cond_and objects,
or Item_cond_and with a single child:
- if 'cond' is an Item_cond_and, we add eq_i as its tail
- - if 'cond' is Item_int(1), we return eq_i
+ - if 'cond' is Item_bool(1), we return eq_i
- otherwise, we create our own Item_cond_and and put 'cond' at the front of
it.
- if we have only one condition to return, we don't create an Item_cond_and
@@ -14679,10 +15365,10 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
switch (eq_list.elements)
{
case 0:
- res= cond ? cond : new (thd->mem_root) Item_int(thd, (longlong) 1, 1);
+ res= cond ? cond : new (thd->mem_root) Item_bool(thd, true);
break;
case 1:
- if (!cond || cond->type() == Item::INT_ITEM)
+ if (!cond || cond->is_bool_literal())
res= eq_item;
break;
default:
@@ -14733,6 +15419,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
@param cond condition to process
@param cond_equal multiple equalities to take into consideration
@param table_join_idx index to tables determining field preference
+ @param do_substitution if false: do not do any field substitution
@note
At the first glance full sort of fields in multiple equality
@@ -14772,7 +15459,8 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels,
static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
COND *cond,
COND_EQUAL *cond_equal,
- void *table_join_idx)
+ void *table_join_idx,
+ bool do_substitution)
{
Item_equal *item_equal;
COND *org_cond= cond; // Return this in case of fatal error
@@ -14801,7 +15489,8 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
{
Item *new_item= substitute_for_best_equal_field(thd, context_tab,
item, cond_equal,
- table_join_idx);
+ table_join_idx,
+ do_substitution);
/*
This works OK with PS/SP re-execution as changes are made to
the arguments of AND/OR items only
@@ -14815,8 +15504,12 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
COND *eq_cond= 0;
List_iterator_fast<Item_equal> it(cond_equal->current_level);
bool false_eq_cond= FALSE;
+ bool all_deleted= true;
while ((item_equal= it++))
{
+ if (item_equal->get_extraction_flag() == DELETION_FL)
+ continue;
+ all_deleted= false;
eq_cond= eliminate_item_equal(thd, eq_cond, cond_equal->upper_levels,
item_equal);
if (!eq_cond)
@@ -14824,7 +15517,7 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
eq_cond= 0;
break;
}
- else if (eq_cond->type() == Item::INT_ITEM && !eq_cond->val_bool())
+ else if (eq_cond->is_bool_literal() && !eq_cond->val_bool())
{
/*
This occurs when eliminate_item_equal() founds that cond is
@@ -14849,13 +15542,13 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
else
{
/* Do not add an equality condition if it's always true */
- if (eq_cond->type() != Item::INT_ITEM &&
+ if (!eq_cond->is_bool_literal() &&
cond_list->push_front(eq_cond, thd->mem_root))
eq_cond= 0;
}
}
}
- if (!eq_cond)
+ if (!eq_cond && !all_deleted)
{
/*
We are out of memory doing the transformation.
@@ -14874,10 +15567,12 @@ static COND* substitute_for_best_equal_field(THD *thd, JOIN_TAB *context_tab,
cond_equal= item_equal->upper_levels;
if (cond_equal && cond_equal->current_level.head() == item_equal)
cond_equal= cond_equal->upper_levels;
+ if (item_equal->get_extraction_flag() == DELETION_FL)
+ return 0;
cond= eliminate_item_equal(thd, 0, cond_equal, item_equal);
return cond ? cond : org_cond;
}
- else
+ else if (do_substitution)
{
while (cond_equal)
{
@@ -15363,7 +16058,7 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
conds= and_conds(join->thd, conds, table->on_expr);
conds->top_level_item();
/* conds is always a new item as both cond and on_expr existed */
- DBUG_ASSERT(!conds->fixed);
+ DBUG_ASSERT(!conds->is_fixed());
conds->fix_fields(join->thd, &conds);
}
else
@@ -15828,6 +16523,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
double cost, rec_count;
table_map reopt_remaining_tables= last_remaining_tables;
uint i;
+ THD *thd= join->thd;
+ Json_writer_temp_disable trace_wo_join_buffering(thd);
if (first_tab > join->const_tables)
{
@@ -15862,7 +16559,7 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
{
JOIN_TAB *rs= join->positions[i].table;
POSITION pos, loose_scan_pos;
-
+
if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer)
{
/* Find the best access method that would not use join buffering */
@@ -15914,12 +16611,24 @@ optimize_cond(JOIN *join, COND *conds,
that occurs in a function set a pointer to the multiple equality
predicate. Substitute a constant instead of this field if the
multiple equality contains a constant.
- */
+ */
+
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_cond(thd, "condition_processing");
+ trace_cond.add("condition", join->conds == conds ? "WHERE" : "HAVING")
+ .add("original_condition", conds);
+
+ Json_writer_array trace_steps(thd, "steps");
DBUG_EXECUTE("where", print_where(conds, "original", QT_ORDINARY););
conds= build_equal_items(join, conds, NULL, join_list,
ignore_on_conds, cond_equal,
MY_TEST(flags & OPT_LINK_EQUAL_FIELDS));
DBUG_EXECUTE("where",print_where(conds,"after equal_items", QT_ORDINARY););
+ {
+ Json_writer_object equal_prop_wrapper(thd);
+ equal_prop_wrapper.add("transformation", "equality_propagation")
+ .add("resulting_condition", conds);
+ }
/* change field = field to field = const for each found field = const */
propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds);
@@ -15928,10 +16637,21 @@ optimize_cond(JOIN *join, COND *conds,
Remove all and-levels where CONST item != CONST item
*/
DBUG_EXECUTE("where",print_where(conds,"after const change", QT_ORDINARY););
+ {
+ Json_writer_object const_prop_wrapper(thd);
+ const_prop_wrapper.add("transformation", "constant_propagation")
+ .add("resulting_condition", conds);
+ }
conds= conds->remove_eq_conds(thd, cond_value, true);
if (conds && conds->type() == Item::COND_ITEM &&
((Item_cond*) conds)->functype() == Item_func::COND_AND_FUNC)
*cond_equal= &((Item_cond_and*) conds)->m_cond_equal;
+
+ {
+ Json_writer_object cond_removal_wrapper(thd);
+ cond_removal_wrapper.add("transformation", "trivial_condition_removal")
+ .add("resulting_condition", conds);
+ }
DBUG_EXECUTE("info",print_where(conds,"after remove", QT_ORDINARY););
}
DBUG_RETURN(conds);
@@ -16431,9 +17151,8 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
{
Field *field= ((Item_field*) real_item)->field;
- if (((field->type() == MYSQL_TYPE_DATE) ||
- (field->type() == MYSQL_TYPE_DATETIME)) &&
- (field->flags & NOT_NULL_FLAG))
+ if ((field->flags & NOT_NULL_FLAG) &&
+ field->type_handler()->cond_notnull_field_isnull_to_field_eq_zero())
{
/* fix to replace 'NULL' dates with '0' (shreeve@uci.edu) */
/*
@@ -16449,7 +17168,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
*/
- Item *item0= new(thd->mem_root) Item_int(thd, (longlong) 0, 1);
+ Item *item0= new(thd->mem_root) Item_bool(thd, false);
Item *eq_cond= new(thd->mem_root) Item_func_eq(thd, args[0], item0);
if (!eq_cond)
return this;
@@ -16519,7 +17238,7 @@ Item_func_isnull::remove_eq_conds(THD *thd, Item::cond_result *cond_value,
cond= new_cond;
/*
Item_func_eq can't be fixed after creation so we do not check
- cond->fixed, also it do not need tables so we use 0 as second
+ cond->is_fixed(), also it do not need tables so we use 0 as second
argument.
*/
cond->fix_fields(thd, &cond);
@@ -16679,60 +17398,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Field *comp_field,
Create internal temporary table
****************************************************************************/
-/**
- Create field for temporary table from given field.
-
- @param thd Thread handler
- @param org_field field from which new field will be created
- @param name New field name
- @param table Temporary table
- @param item !=NULL if item->result_field should point to new field.
- This is relevant for how fill_record() is going to work:
- If item != NULL then fill_record() will update
- the record in the original table.
- If item == NULL then fill_record() will update
- the temporary table
-
- @retval
- NULL on error
- @retval
- new_created field
-*/
-
-Field *create_tmp_field_from_field(THD *thd, Field *org_field,
- LEX_CSTRING *name, TABLE *table,
- Item_field *item)
-{
- Field *new_field;
-
- new_field= org_field->make_new_field(thd->mem_root, table,
- table == org_field->table);
- if (new_field)
- {
- new_field->init(table);
- new_field->orig_table= org_field->orig_table;
- if (item)
- item->result_field= new_field;
- else
- new_field->field_name= *name;
- new_field->flags|= org_field->flags & NO_DEFAULT_VALUE_FLAG;
- if (org_field->maybe_null() || (item && item->maybe_null))
- new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join
- if (org_field->type() == MYSQL_TYPE_VAR_STRING ||
- org_field->type() == MYSQL_TYPE_VARCHAR)
- table->s->db_create_options|= HA_OPTION_PACK_RECORD;
- else if (org_field->type() == FIELD_TYPE_DOUBLE)
- ((Field_double *) new_field)->not_fixed= TRUE;
- new_field->vcol_info= 0;
- new_field->cond_selectivity= 1.0;
- new_field->next_equal_field= NULL;
- new_field->option_list= NULL;
- new_field->option_struct= NULL;
- }
- return new_field;
-}
-
-
Field *Item::create_tmp_field_int(TABLE *table, uint convert_int_length)
{
const Type_handler *h= &type_handler_long;
@@ -16742,6 +17407,22 @@ Field *Item::create_tmp_field_int(TABLE *table, uint convert_int_length)
*this, table);
}
+Field *Item::tmp_table_field_from_field_type_maybe_null(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param,
+ bool is_explicit_null)
+{
+ DBUG_ASSERT(!param->make_copy_field());
+ DBUG_ASSERT(!is_result_field());
+ Field *result;
+ if ((result= tmp_table_field_from_field_type(table)))
+ {
+ if (result && is_explicit_null)
+ result->is_created_from_null_item= true;
+ }
+ return result;
+}
+
Field *Item_sum::create_tmp_field(bool group, TABLE *table)
{
@@ -16773,57 +17454,6 @@ Field *Item_sum::create_tmp_field(bool group, TABLE *table)
}
-static void create_tmp_field_from_item_finalize(THD *thd,
- Field *new_field,
- Item *item,
- Item ***copy_func,
- bool modify_item)
-{
- if (copy_func &&
- (item->is_result_field() ||
- (item->real_item()->is_result_field())))
- *((*copy_func)++) = item; // Save for copy_funcs
- if (modify_item)
- item->set_result_field(new_field);
- if (item->type() == Item::NULL_ITEM)
- new_field->is_created_from_null_item= TRUE;
-}
-
-
-/**
- Create field for temporary table using type of given item.
-
- @param thd Thread handler
- @param item Item to create a field for
- @param table Temporary table
- @param copy_func If set and item is a function, store copy of
- item in this array
- @param modify_item 1 if item->result_field should point to new
- item. This is relevent for how fill_record()
- is going to work:
- If modify_item is 1 then fill_record() will
- update the record in the original table.
- If modify_item is 0 then fill_record() will
- update the temporary table
-
- @retval
- 0 on error
- @retval
- new_created field
-*/
-
-static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
- Item ***copy_func, bool modify_item)
-{
- Field *UNINIT_VAR(new_field);
- DBUG_ASSERT(thd == table->in_use);
- if ((new_field= item->create_tmp_field(false, table)))
- create_tmp_field_from_item_finalize(thd, new_field, item,
- copy_func, modify_item);
- return new_field;
-}
-
-
/**
Create field for information schema table.
@@ -16861,19 +17491,182 @@ Field *Item::create_field_for_schema(THD *thd, TABLE *table)
/**
+ Create a temporary field for Item_field (or its descendant),
+ either direct or referenced by an Item_ref.
+*/
+Field *
+Item_field::create_tmp_field_from_item_field(TABLE *new_table,
+ Item_ref *orig_item,
+ const Tmp_field_param *param)
+{
+ DBUG_ASSERT(!is_result_field());
+ Field *result;
+ /*
+ If item have to be able to store NULLs but underlaid field can't do it,
+ create_tmp_field_from_field() can't be used for tmp field creation.
+ */
+ if (((maybe_null && in_rollup) ||
+ (new_table->in_use->create_tmp_table_for_derived && /* for mat. view/dt */
+ orig_item && orig_item->maybe_null)) &&
+ !field->maybe_null())
+ {
+ /*
+ The item the ref points to may have maybe_null flag set while
+ the ref doesn't have it. This may happen for outer fields
+ when the outer query decided at some point after name resolution phase
+ that this field might be null. Take this into account here.
+ */
+ Record_addr rec(orig_item ? orig_item->maybe_null : maybe_null);
+ const Type_handler *handler= type_handler()->
+ type_handler_for_tmp_table(this);
+ result= handler->make_and_init_table_field(orig_item ? &orig_item->name : &name,
+ rec, *this, new_table);
+ }
+ else if (param->table_cant_handle_bit_fields() &&
+ field->type() == MYSQL_TYPE_BIT)
+ {
+ const Type_handler *handler= type_handler_long_or_longlong();
+ result= handler->make_and_init_table_field(&name,
+ Record_addr(maybe_null),
+ *this, new_table);
+ }
+ else
+ {
+ LEX_CSTRING *tmp= orig_item ? &orig_item->name : &name;
+ bool tmp_maybe_null= param->modify_item() ? maybe_null :
+ field->maybe_null();
+ result= field->create_tmp_field(new_table->in_use->mem_root, new_table,
+ tmp_maybe_null);
+ if (result)
+ result->field_name= *tmp;
+ }
+ if (result && param->modify_item())
+ result_field= result;
+ return result;
+}
+
+
+Field *Item_field::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ DBUG_ASSERT(!is_result_field());
+ Field *result;
+ src->set_field(field);
+ if (!(result= create_tmp_field_from_item_field(table, NULL, param)))
+ return NULL;
+ /*
+ Fields that are used as arguments to the DEFAULT() function already have
+ their data pointers set to the default value during name resolution. See
+ Item_default_value::fix_fields.
+ */
+ if (type() != Item::DEFAULT_VALUE_ITEM && field->eq_def(result))
+ src->set_default_field(field);
+ return result;
+}
+
+
+Field *Item_ref::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ Item *item= real_item();
+ DBUG_ASSERT(is_result_field());
+ if (item->type() == Item::FIELD_ITEM)
+ {
+ Field *result;
+ Item_field *field= (Item_field*) item;
+ Tmp_field_param prm2(*param);
+ prm2.set_modify_item(false);
+ src->set_field(field->field);
+ if (!(result= field->create_tmp_field_from_item_field(table, this, &prm2)))
+ return NULL;
+ if (param->modify_item())
+ result_field= result;
+ return result;
+ }
+ return Item_result_field::create_tmp_field_ex(table, src, param);
+}
+
+
+void Item_result_field::get_tmp_field_src(Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ if (param->make_copy_field())
+ {
+ DBUG_ASSERT(result_field);
+ src->set_field(result_field);
+ }
+ else
+ {
+ src->set_item_result_field(this); // Save for copy_funcs
+ }
+}
+
+
+Field *Item_result_field::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ /*
+ Possible Item types:
+ - Item_cache_wrapper (only for CREATE..SELECT ?)
+ - Item_func
+ - Item_subselect
+ */
+ DBUG_ASSERT(is_result_field());
+ DBUG_ASSERT(type() != NULL_ITEM);
+ get_tmp_field_src(src, param);
+ Field *result;
+ if ((result= tmp_table_field_from_field_type(table)) && param->modify_item())
+ result_field= result;
+ return result;
+}
+
+
+Field *Item_func_user_var::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ DBUG_ASSERT(is_result_field());
+ DBUG_ASSERT(type() != NULL_ITEM);
+ get_tmp_field_src(src, param);
+ Field *result;
+ if ((result= create_table_field_from_handler(table)) && param->modify_item())
+ result_field= result;
+ return result;
+}
+
+
+Field *Item_func_sp::create_tmp_field_ex(TABLE *table,
+ Tmp_field_src *src,
+ const Tmp_field_param *param)
+{
+ Field *result;
+ get_tmp_field_src(src, param);
+ if ((result= sp_result_field->create_tmp_field(table->in_use->mem_root,
+ table)))
+ {
+ result->field_name= name;
+ if (param->modify_item())
+ result_field= result;
+ }
+ return result;
+}
+
+/**
Create field for temporary table.
- @param thd Thread handler
- @param table Temporary table
- @param item Item to create a field for
- @param type Type of item (normally item->type)
- @param copy_func If set and item is a function, store copy of item
+ @param table Temporary table
+ @param item Item to create a field for
+ @param type Type of item (normally item->type)
+ @param copy_func If set and item is a function, store copy of item
in this array
@param from_field if field will be created using other field as example,
pointer example field will be written here
- @param default_field If field has a default value field, store it here
- @param group 1 if we are going to do a relative group by on result
- @param modify_item 1 if item->result_field should point to new item.
+ @param default_field If field has a default value field, store it here
+ @param group 1 if we are going to do a relative group by on result
+ @param modify_item 1 if item->result_field should point to new item.
This is relevent for how fill_record() is going to
work:
If modify_item is 1 then fill_record() will update
@@ -16882,175 +17675,28 @@ Field *Item::create_field_for_schema(THD *thd, TABLE *table)
the temporary table
@retval
- 0 on error
+ 0 on error
@retval
new_created field
+ Create a temporary field for Item_field (or its descendant),
+ either direct or referenced by an Item_ref.
*/
-
-Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
+Field *create_tmp_field(TABLE *table, Item *item,
Item ***copy_func, Field **from_field,
Field **default_field,
bool group, bool modify_item,
bool table_cant_handle_bit_fields,
bool make_copy_field)
{
- Field *result;
- Item::Type orig_type= type;
- Item *orig_item= 0;
-
- DBUG_ASSERT(thd == table->in_use);
-
- if (type != Item::FIELD_ITEM &&
- item->real_item()->type() == Item::FIELD_ITEM)
- {
- orig_item= item;
- item= item->real_item();
- type= Item::FIELD_ITEM;
- }
-
- switch (type) {
- case Item::TYPE_HOLDER:
- case Item::SUM_FUNC_ITEM:
- {
- result= item->create_tmp_field(group, table);
- if (!result)
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
- return result;
- }
- case Item::FIELD_ITEM:
- case Item::DEFAULT_VALUE_ITEM:
- case Item::INSERT_VALUE_ITEM:
- case Item::TRIGGER_FIELD_ITEM:
- {
- Item_field *field= (Item_field*) item;
- bool orig_modify= modify_item;
- if (orig_type == Item::REF_ITEM)
- modify_item= 0;
- /*
- If item have to be able to store NULLs but underlaid field can't do it,
- create_tmp_field_from_field() can't be used for tmp field creation.
- */
- if (((field->maybe_null && field->in_rollup) ||
- (thd->create_tmp_table_for_derived && /* for mat. view/dt */
- orig_item && orig_item->maybe_null)) &&
- !field->field->maybe_null())
- {
- bool save_maybe_null= FALSE;
- /*
- The item the ref points to may have maybe_null flag set while
- the ref doesn't have it. This may happen for outer fields
- when the outer query decided at some point after name resolution phase
- that this field might be null. Take this into account here.
- */
- if (orig_item)
- {
- save_maybe_null= item->maybe_null;
- item->maybe_null= orig_item->maybe_null;
- }
- result= create_tmp_field_from_item(thd, item, table, NULL,
- modify_item);
- *from_field= field->field;
- if (result && modify_item)
- field->result_field= result;
- if (orig_item)
- {
- item->maybe_null= save_maybe_null;
- result->field_name= orig_item->name;
- }
- }
- else if (table_cant_handle_bit_fields && field->field->type() ==
- MYSQL_TYPE_BIT)
- {
- const Type_handler *handler= item->type_handler_long_or_longlong();
- *from_field= field->field;
- if ((result=
- handler->make_and_init_table_field(&item->name,
- Record_addr(item->maybe_null),
- *item, table)))
- create_tmp_field_from_item_finalize(thd, result, item,
- copy_func, modify_item);
- if (result && modify_item)
- field->result_field= result;
- }
- else
- {
- LEX_CSTRING *tmp= orig_item ? &orig_item->name : &item->name;
- result= create_tmp_field_from_field(thd, (*from_field= field->field),
- tmp, table,
- modify_item ? field :
- NULL);
- }
-
- if (orig_type == Item::REF_ITEM && orig_modify)
- ((Item_ref*)orig_item)->set_result_field(result);
- /*
- Fields that are used as arguments to the DEFAULT() function already have
- their data pointers set to the default value during name resolution. See
- Item_default_value::fix_fields.
- */
- if (orig_type != Item::DEFAULT_VALUE_ITEM && field->field->eq_def(result))
- *default_field= field->field;
- return result;
- }
- /* Fall through */
- case Item::FUNC_ITEM:
- if (((Item_func *) item)->functype() == Item_func::FUNC_SP)
- {
- Item_func_sp *item_func_sp= (Item_func_sp *) item;
- Field *sp_result_field= item_func_sp->get_sp_result_field();
-
- if (make_copy_field)
- {
- DBUG_ASSERT(item_func_sp->result_field);
- *from_field= item_func_sp->result_field;
- }
- else
- {
- *((*copy_func)++)= item;
- }
- Field *result_field=
- create_tmp_field_from_field(thd,
- sp_result_field,
- &item_func_sp->name,
- table,
- NULL);
-
- if (modify_item)
- item->set_result_field(result_field);
-
- return result_field;
- }
-
- /* Fall through */
- case Item::COND_ITEM:
- case Item::FIELD_AVG_ITEM:
- case Item::FIELD_STD_ITEM:
- case Item::SUBSELECT_ITEM:
- /* The following can only happen with 'CREATE TABLE ... SELECT' */
- case Item::PROC_ITEM:
- case Item::INT_ITEM:
- case Item::REAL_ITEM:
- case Item::DECIMAL_ITEM:
- case Item::STRING_ITEM:
- case Item::DATE_ITEM:
- case Item::REF_ITEM:
- case Item::NULL_ITEM:
- case Item::VARBIN_ITEM:
- case Item::CACHE_ITEM:
- case Item::WINDOW_FUNC_ITEM: // psergey-winfunc:
- case Item::EXPR_CACHE_ITEM:
- case Item::PARAM_ITEM:
- if (make_copy_field)
- {
- DBUG_ASSERT(((Item_result_field*)item)->result_field);
- *from_field= ((Item_result_field*)item)->result_field;
- }
- return create_tmp_field_from_item(thd, item, table,
- (make_copy_field ? 0 : copy_func),
- modify_item);
- default: // Dosen't have to be stored
- return 0;
- }
+ Tmp_field_src src;
+ Tmp_field_param prm(group, modify_item, table_cant_handle_bit_fields,
+ make_copy_field);
+ Field *result= item->create_tmp_field_ex(table, &src, &prm);
+ *from_field= src.field();
+ *default_field= src.default_field();
+ if (src.item_result_field())
+ *((*copy_func)++)= src.item_result_field();
+ return result;
}
/*
@@ -17066,7 +17712,7 @@ setup_tmp_table_column_bitmaps(TABLE *table, uchar *bitmaps, uint field_count)
{
uint bitmap_size= bitmap_buffer_size(field_count);
- DBUG_ASSERT(table->s->virtual_fields == 0 && table->def_vcol_set == 0);
+ DBUG_ASSERT(table->s->virtual_fields == 0);
my_bitmap_init(&table->def_read_set, (my_bitmap_map*) bitmaps, field_count,
FALSE);
@@ -17301,6 +17947,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->intersect_keys.init();
table->keys_in_use_for_query.init();
table->no_rows_with_nulls= param->force_not_null_cols;
+ table->update_handler= NULL;
+ table->check_unique_buf= NULL;
table->s= share;
init_tmp_table_share(thd, share, "", 0, "(temporary)", tmpname);
@@ -17331,7 +17979,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
}
if (not_all_columns)
{
- if (item->with_sum_func && type != Item::SUM_FUNC_ITEM)
+ if (item->with_sum_func() && type != Item::SUM_FUNC_ITEM)
{
if (item->used_tables() & OUTER_REF_TABLE_BIT)
item->update_used_tables();
@@ -17361,7 +18009,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
{
Item *tmp_item;
Field *new_field=
- create_tmp_field(thd, table, arg, arg->type(), &copy_func,
+ create_tmp_field(table, arg, &copy_func,
tmp_from_field, &default_field[fieldnr],
group != 0,not_all_columns,
distinct, false);
@@ -17411,7 +18059,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
else
{
/*
- The last parameter to create_tmp_field() is a bit tricky:
+ The last parameter to create_tmp_field_ex() is a bit tricky:
We need to set it to 0 in union, to get fill_record() to modify the
temporary table.
@@ -17425,7 +18073,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
Field *new_field= (param->schema_table) ?
item->create_field_for_schema(thd, table) :
- create_tmp_field(thd, table, item, type, &copy_func,
+ create_tmp_field(table, item, &copy_func,
tmp_from_field, &default_field[fieldnr],
group != 0,
!force_copy_fields &&
@@ -17439,8 +18087,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
*/
item->marker == 4 || param->bit_fields_as_long,
force_copy_fields);
-
- if (unlikely(!new_field))
+ if (!new_field)
{
if (unlikely(thd->is_fatal_error))
goto err; // Got OOM
@@ -18021,12 +18668,10 @@ bool Virtual_tmp_table::add(List<Spvar_definition> &field_list)
while ((cdef= it++))
{
Field *tmp;
- if (!(tmp= cdef->make_field(s, in_use->mem_root, 0,
- (uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0),
- f_maybe_null(cdef->pack_flag) ? 1 : 0,
- &cdef->field_name)))
+ Record_addr addr(f_maybe_null(cdef->pack_flag));
+ if (!(tmp= cdef->make_field(s, in_use->mem_root, &addr, &cdef->field_name)))
DBUG_RETURN(true);
- add(tmp);
+ add(tmp);
}
DBUG_RETURN(false);
}
@@ -18146,7 +18791,7 @@ bool Virtual_tmp_table::sp_set_all_fields_from_item_list(THD *thd,
bool Virtual_tmp_table::sp_set_all_fields_from_item(THD *thd, Item *value)
{
- DBUG_ASSERT(value->fixed);
+ DBUG_ASSERT(value->is_fixed());
DBUG_ASSERT(value->cols() == s->fields);
for (uint i= 0; i < value->cols(); i++)
{
@@ -18568,7 +19213,7 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
We don't want this error to be converted to a warning, e.g. in case of
INSERT IGNORE ... SELECT.
*/
- table->file->print_error(error, MYF(ME_FATALERROR));
+ table->file->print_error(error, MYF(ME_FATAL));
DBUG_RETURN(1);
}
new_table= *table;
@@ -18591,7 +19236,7 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
new_table.no_rows= table->no_rows;
if (create_internal_tmp_table(&new_table, table->key_info, start_recinfo,
recinfo,
- thd->lex->select_lex.options |
+ thd->lex->first_select_lex()->options |
thd->variables.option_bits))
goto err2;
if (open_tmp_table(&new_table))
@@ -19365,6 +20010,8 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (!join_tab->preread_init_done && join_tab->preread_init())
DBUG_RETURN(NESTED_LOOP_ERROR);
+ join_tab->build_range_rowid_filter_if_needed();
+
join->return_tab= join_tab;
if (join_tab->last_inner)
@@ -20288,7 +20935,8 @@ test_if_quick_select(JOIN_TAB *tab)
int res= tab->select->test_quick_select(tab->join->thd, tab->keys,
(table_map) 0, HA_POS_ERROR, 0,
- FALSE, /*remove where parts*/FALSE);
+ FALSE, /*remove where parts*/FALSE,
+ FALSE);
if (tab->explain_plan && tab->explain_plan->range_checked_fer)
tab->explain_plan->range_checked_fer->collect_data(tab->select->quick);
@@ -20313,6 +20961,8 @@ int join_init_read_record(JOIN_TAB *tab)
if (tab->filesort && tab->sort_table()) // Sort table.
return 1;
+ tab->build_range_rowid_filter_if_needed();
+
DBUG_EXECUTE_IF("kill_join_init_read_record",
tab->join->thd->set_killed(KILL_QUERY););
if (tab->select && tab->select->quick && tab->select->quick->reset())
@@ -20327,6 +20977,8 @@ int join_init_read_record(JOIN_TAB *tab)
tab->join->thd->reset_killed(););
if (!tab->preread_init_done && tab->preread_init())
return 1;
+
+
if (init_read_record(&tab->read_record, tab->join->thd, tab->table,
tab->select, tab->filesort_result, 1,1, FALSE))
return 1;
@@ -22165,9 +22817,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
res= select->test_quick_select(tab->join->thd, new_ref_key_map, 0,
(tab->join->select_options &
OPTION_FOUND_ROWS) ?
- HA_POS_ERROR :
- tab->join->unit->select_limit_cnt,TRUE,
- TRUE, FALSE) <= 0;
+ HA_POS_ERROR :
+ tab->join->unit->select_limit_cnt,TRUE,
+ TRUE, FALSE, FALSE) <= 0;
if (res)
{
select->cond= save_cond;
@@ -22269,7 +22921,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
join->select_options & OPTION_FOUND_ROWS ?
HA_POS_ERROR :
join->unit->select_limit_cnt,
- TRUE, FALSE, FALSE);
+ TRUE, FALSE, FALSE, FALSE);
if (cond_saved)
select->cond= saved_cond;
@@ -22377,6 +23029,12 @@ check_reverse_order:
tab->use_quick=1;
tab->ref.key= -1;
tab->ref.key_parts=0; // Don't use ref key.
+ tab->range_rowid_filter_info= 0;
+ if (tab->rowid_filter)
+ {
+ delete tab->rowid_filter;
+ tab->rowid_filter= 0;
+ }
tab->read_first_record= join_init_read_record;
if (tab->is_using_loose_index_scan())
tab->join->tmp_table_param.precomputed_group_by= TRUE;
@@ -22763,7 +23421,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
if (unlikely(copy_blobs(first_field)))
{
my_message(ER_OUTOFMEMORY, ER_THD(thd,ER_OUTOFMEMORY),
- MYF(ME_FATALERROR));
+ MYF(ME_FATAL));
error=0;
goto err;
}
@@ -23040,12 +23698,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
uint counter;
enum_resolution_type resolution;
- /*
- Local SP variables may be int but are expressions, not positions.
- (And they can't be used before fix_fields is called for them).
- */
- if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item() &&
- !from_window_spec)
+ if (order_item->is_order_clause_position() && !from_window_spec)
{ /* Order by position */
uint count;
if (order->counter_used)
@@ -23159,7 +23812,7 @@ find_order_in_list(THD *thd, Ref_ptr_array ref_pointer_array,
inspite of that fix_fields() calls find_item_in_list() one more
time.
- We check order_item->fixed because Item_func_group_concat can put
+ We check order_item->is_fixed() because Item_func_group_concat can put
arguments for which fix_fields already was called.
*/
if (order_item->fix_fields_if_needed_for_order_by(thd, order->item) ||
@@ -23269,7 +23922,7 @@ setup_group(THD *thd, Ref_ptr_array ref_pointer_array, TABLE_LIST *tables,
all_fields, true, true, from_window_spec))
return 1;
(*ord->item)->marker= UNDEF_POS; /* Mark found */
- if ((*ord->item)->with_sum_func && context_analysis_place == IN_GROUP_BY)
+ if ((*ord->item)->with_sum_func() && context_analysis_place == IN_GROUP_BY)
{
my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name());
return 1;
@@ -23428,7 +24081,7 @@ create_distinct_group(THD *thd, Ref_ptr_array ref_pointer_array,
li.rewind();
while ((item=li++))
{
- if (!item->const_item() && !item->with_sum_func && !item->marker)
+ if (!item->const_item() && !item->with_sum_func() && !item->marker)
{
/*
Don't put duplicate columns from the SELECT list into the
@@ -23525,9 +24178,11 @@ count_field_types(SELECT_LEX *select_lex, TMP_TABLE_PARAM *param,
}
else
{
+ With_sum_func_cache *cache= field->get_with_sum_func_cache();
param->func_count++;
- if (reset_with_sum_func)
- field->with_sum_func=0;
+ // "field" can point to Item_std_field, so "cache" can be NULL here.
+ if (reset_with_sum_func && cache)
+ cache->reset_with_sum_func();
}
}
}
@@ -23667,7 +24322,7 @@ void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group)
{
/*
Group strings are taken as varstrings and require an length field.
- A field is not yet created by create_tmp_field()
+ A field is not yet created by create_tmp_field_ex()
and the sizes should match up.
*/
key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH;
@@ -23677,7 +24332,7 @@ void calc_group_buffer(TMP_TABLE_PARAM *param, ORDER *group)
default:
/* This case should never be choosen */
DBUG_ASSERT(0);
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
}
}
parts++;
@@ -23931,7 +24586,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
real_pos->real_type() == Item::SUBSELECT_ITEM ||
real_pos->type() == Item::CACHE_ITEM ||
real_pos->type() == Item::COND_ITEM) &&
- !real_pos->with_sum_func)
+ !real_pos->with_sum_func())
{ // Save for send fields
LEX_CSTRING real_name= pos->name;
pos= real_pos;
@@ -23942,7 +24597,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
on how the value is to be used: In some cases this may be an
argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
*/
- if (!(pos=new (thd->mem_root) Item_copy_string(thd, pos)))
+ if (!(pos= pos->type_handler()->create_item_copy(thd, pos)))
goto err;
if (i < border) // HAVING, ORDER and GROUP BY
{
@@ -24140,7 +24795,7 @@ change_to_use_tmp_fields(THD *thd, Ref_ptr_array ref_pointer_array,
for (uint i= 0; (item= it++); i++)
{
Field *field;
- if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM)
+ if (item->with_sum_func() && item->type() != Item::SUM_FUNC_ITEM)
item_field= item;
else if (item->type() == Item::FIELD_ITEM)
{
@@ -24448,7 +25103,7 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab)
}
if (unlikely(thd->is_fatal_error))
DBUG_RETURN(TRUE);
- if (!cond->fixed)
+ if (!cond->is_fixed())
{
Item *tmp_item= (Item*) cond;
cond->fix_fields(thd, &tmp_item);
@@ -24667,7 +25322,7 @@ bool JOIN::rollup_init()
Marking the expression item as 'with_sum_func' will ensure this.
*/
if (changed)
- item->with_sum_func= 1;
+ item->get_with_sum_func_cache()->set_with_sum_func();
}
}
return 0;
@@ -25009,11 +25664,13 @@ int print_explain_message_line(select_result_sink *result,
item_list.push_back(item_null, mem_root);
/* `rows` */
+ StringBuffer<64> rows_str;
if (rows)
{
- item_list.push_back(new (mem_root) Item_int(thd, *rows,
- MY_INT64_NUM_DECIMAL_DIGITS),
- mem_root);
+ rows_str.append_ulonglong((ulonglong)(*rows));
+ item_list.push_back(new (mem_root)
+ Item_string_sys(thd, rows_str.ptr(),
+ rows_str.length()), mem_root);
}
else
item_list.push_back(item_null, mem_root);
@@ -25113,7 +25770,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
filesort)))
return 1;
}
-
+ // psergey-todo: data for filtering!
tracker= &eta->tracker;
jbuf_tracker= &eta->jbuf_tracker;
@@ -25155,7 +25812,8 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
*/
if (real_table->merged_for_insert)
{
- TABLE_LIST *view_child= real_table->view->select_lex.table_list.first;
+ TABLE_LIST *view_child=
+ real_table->view->first_select_lex()->table_list.first;
for (;view_child; view_child= view_child->next_local)
{
if (view_child->table == table)
@@ -25213,6 +25871,22 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
// psergey-todo: ^ check for error return code
/* Build "key", "key_len", and "ref" */
+
+ if (rowid_filter)
+ {
+ Range_rowid_filter *range_filter= (Range_rowid_filter *) rowid_filter;
+ QUICK_SELECT_I *quick= range_filter->get_select()->quick;
+
+ Explain_rowid_filter *erf= new (thd->mem_root) Explain_rowid_filter;
+ erf->quick= quick->get_explain(thd->mem_root);
+ erf->selectivity= range_rowid_filter_info->selectivity;
+ erf->rows= quick->records;
+ if (!(erf->tracker= new Rowid_filter_tracker(thd->lex->analyze_stmt)))
+ return 1;
+ rowid_filter->set_tracker(erf->tracker);
+ eta->rowid_filter= erf;
+ }
+
if (tab_type == JT_NEXT)
{
key_info= table->key_info+index;
@@ -25608,8 +26282,9 @@ int JOIN::save_explain_data_intern(Explain_query *output,
{
JOIN *join= this; /* Legacy: this code used to be a non-member function */
DBUG_ENTER("JOIN::save_explain_data_intern");
- DBUG_PRINT("info", ("Select %p, type %s, message %s",
- join->select_lex, join->select_lex->type,
+ DBUG_PRINT("info", ("Select %p (%u), type %s, message %s",
+ join->select_lex, join->select_lex->select_number,
+ join->select_lex->type,
message ? message : "NULL"));
DBUG_ASSERT(have_query_plan == QEP_AVAILABLE);
/* fake_select_lex is created/printed by Explain_union */
@@ -25635,7 +26310,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
explain->select_id= join->select_lex->select_number;
explain->select_type= join->select_lex->type;
- explain->linkage= select_lex->linkage;
+ explain->linkage= select_lex->get_linkage();
explain->using_temporary= need_tmp;
explain->using_filesort= need_order_arg;
/* Setting explain->message means that all other members are invalid */
@@ -25658,7 +26333,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
explain->select_id= select_lex->select_number;
explain->select_type= select_lex->type;
- explain->linkage= select_lex->linkage;
+ explain->linkage= select_lex->get_linkage();
explain->using_temporary= need_tmp;
explain->using_filesort= need_order_arg;
explain->message= "Storage engine handles GROUP BY";
@@ -25681,7 +26356,7 @@ int JOIN::save_explain_data_intern(Explain_query *output,
join->select_lex->set_explain_type(true);
xpl_sel->select_id= join->select_lex->select_number;
xpl_sel->select_type= join->select_lex->type;
- xpl_sel->linkage= select_lex->linkage;
+ xpl_sel->linkage= select_lex->get_linkage();
if (select_lex->master_unit()->derived)
xpl_sel->connection_type= Explain_node::EXPLAIN_NODE_DERIVED;
@@ -25825,7 +26500,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order,
for such queries, we'll get here before having called
subquery_expr->fix_fields(), which will cause failure to
*/
- if (unit->item && !unit->item->fixed)
+ if (unit->item && !unit->item->is_fixed())
{
Item *ref= unit->item;
if (unit->item->fix_fields(thd, &ref))
@@ -25857,6 +26532,7 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
DBUG_ENTER("mysql_explain_union");
bool res= 0;
SELECT_LEX *first= unit->first_select();
+ bool is_pushed_union= unit->derived && unit->derived->pushdown_derived;
for (SELECT_LEX *sl= first; sl; sl= sl->next_select())
{
@@ -25874,7 +26550,10 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
}
if (!(res= unit->prepare(unit->derived, result,
SELECT_NO_UNLOCK | SELECT_DESCRIBE)))
- res= unit->exec();
+ {
+ if (!is_pushed_union)
+ res= unit->exec();
+ }
}
else
{
@@ -25892,6 +26571,13 @@ bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result)
first->options | thd->variables.option_bits | SELECT_DESCRIBE,
result, unit, first);
}
+
+ if (unit->derived && unit->derived->pushdown_derived)
+ {
+ delete unit->derived->pushdown_derived;
+ unit->derived->pushdown_derived= NULL;
+ }
+
DBUG_RETURN(res || thd->is_error());
}
@@ -26149,7 +26835,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
// A view
if (!(belong_to_view &&
- belong_to_view->compact_view_format))
+ belong_to_view->compact_view_format) &&
+ !(query_type & QT_ITEM_IDENT_SKIP_DB_NAMES))
{
append_identifier(thd, str, &view_db);
str->append('.');
@@ -26178,7 +26865,8 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str,
// A normal table
if (!(belong_to_view &&
- belong_to_view->compact_view_format))
+ belong_to_view->compact_view_format) &&
+ !(query_type & QT_ITEM_IDENT_SKIP_DB_NAMES))
{
append_identifier(thd, str, &db);
str->append('.');
@@ -26265,6 +26953,18 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
{
str->append("/* select#");
str->append_ulonglong(select_number);
+ if (thd->lex->describe & DESCRIBE_EXTENDED2)
+ {
+ str->append("/");
+ str->append_ulonglong(nest_level);
+
+ if (master_unit()->fake_select_lex &&
+ master_unit()->first_select() == this)
+ {
+ str->append(" Filter Select: ");
+ master_unit()->fake_select_lex->print(thd, str, query_type);
+ }
+ }
str->append(" */ ");
}
@@ -26296,18 +26996,21 @@ void st_select_lex::print(THD *thd, String *str, enum_query_type query_type)
str->append(STRING_WITH_LEN("sql_buffer_result "));
if (options & OPTION_FOUND_ROWS)
str->append(STRING_WITH_LEN("sql_calc_found_rows "));
- switch (sql_cache)
+ if (this == parent_lex->first_select_lex())
{
- case SQL_NO_CACHE:
- str->append(STRING_WITH_LEN("sql_no_cache "));
- break;
- case SQL_CACHE:
- str->append(STRING_WITH_LEN("sql_cache "));
- break;
- case SQL_CACHE_UNSPECIFIED:
- break;
- default:
- DBUG_ASSERT(0);
+ switch (parent_lex->sql_cache)
+ {
+ case LEX::SQL_NO_CACHE:
+ str->append(STRING_WITH_LEN("sql_no_cache "));
+ break;
+ case LEX::SQL_CACHE:
+ str->append(STRING_WITH_LEN("sql_cache "));
+ break;
+ case LEX::SQL_CACHE_UNSPECIFIED:
+ break;
+ default:
+ DBUG_ASSERT(0);
+ }
}
//Item List
@@ -26697,16 +27400,22 @@ void JOIN::cache_const_exprs()
/*
- Get a cost of reading rows_limit rows through index keynr.
+ Get the cost of using index keynr to read #LIMIT matching rows
@detail
- If there is a quick select, we try to use it.
- if there is a ref(const) access, we try to use it, too.
- quick and ref(const) use different cost formulas, so if both are possible
we should make a cost-based choice.
-
+
+ rows_limit is the number of rows we would need to read when using a full
+ index scan. This is generally higher than the N from "LIMIT N" clause,
+ because there's a WHERE condition (a part of which is used to construct a
+ range access we are considering using here)
+
@param tab JOIN_TAB with table access (is NULL for single-table
UPDATE/DELETE)
+ @param rows_limit See explanation above
@param read_time OUT Cost of reading using quick or ref(const) access.
@@ -26719,6 +27428,7 @@ void JOIN::cache_const_exprs()
static bool get_range_limit_read_cost(const JOIN_TAB *tab,
const TABLE *table,
+ ha_rows table_records,
uint keynr,
ha_rows rows_limit,
double *read_time)
@@ -26785,8 +27495,32 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
}
}
}
+
+ /*
+ Consider an example:
+
+ SELECT *
+ FROM t1
+ WHERE key1 BETWEEN 10 AND 20 AND col2='foo'
+ ORDER BY key1 LIMIT 10
+
+ If we were using a full index scan on key1, we would need to read this
+ many rows to get 10 matches:
+
+ 10 / selectivity(key1 BETWEEN 10 AND 20 AND col2='foo')
+
+ This is the number we get in rows_limit.
+ But we intend to use range access on key1. The rows returned by quick
+ select will satisfy the range part of the condition,
+ "key1 BETWEEN 10 and 20". We will still need to filter them with
+ the remainder condition, (col2='foo').
+
+ The selectivity of the range access is (best_rows/table_records). We need
+ to discount it from the rows_limit:
+ */
+ double rows_limit_for_quick= rows_limit * (best_rows / table_records);
- if (best_rows > rows_limit)
+ if (best_rows > rows_limit_for_quick)
{
/*
LIMIT clause specifies that we will need to read fewer records than
@@ -26795,7 +27529,7 @@ static bool get_range_limit_read_cost(const JOIN_TAB *tab,
only need 1/3rd of records, it will cost us 1/3rd of quick select's
read time)
*/
- best_cost *= rows_limit / best_rows;
+ best_cost *= rows_limit_for_quick / best_rows;
}
*read_time= best_cost;
res= true;
@@ -26867,6 +27601,12 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
bool group= join && join->group && order == join->group_list;
ha_rows refkey_rows_estimate= table->quick_condition_rows;
const bool has_limit= (select_limit_arg != HA_POS_ERROR);
+ THD* thd= join ? join->thd : table->in_use;
+
+ Json_writer_object trace_wrapper(thd);
+ Json_writer_object trace_cheaper_ordering(
+ thd, "reconsidering_access_paths_for_index_ordering");
+ trace_cheaper_ordering.add("clause", group ? "GROUP BY" : "ORDER BY");
/*
If not used with LIMIT, only use keys if the whole query can be
@@ -26896,16 +27636,21 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
uint tablenr= (uint)(tab - join->join_tab);
read_time= join->best_positions[tablenr].read_time;
for (uint i= tablenr+1; i < join->table_count; i++)
+ {
fanout*= join->best_positions[i].records_read; // fanout is always >= 1
+ // But selectivity is =< 1 :
+ fanout*= join->best_positions[i].cond_selectivity;
+ }
}
else
read_time= table->file->scan_time();
+ trace_cheaper_ordering.add("fanout", fanout);
/*
TODO: add cost of sorting here.
*/
read_time += COST_EPS;
-
+ trace_cheaper_ordering.add("read_time", read_time);
/*
Calculate the selectivity of the ref_key for REF_ACCESS. For
RANGE_ACCESS we use table->quick_condition_rows.
@@ -26922,11 +27667,20 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
set_if_bigger(refkey_rows_estimate, 1);
}
+ if (tab)
+ trace_cheaper_ordering.add_table_name(tab);
+ else
+ trace_cheaper_ordering.add_table_name(table);
+ trace_cheaper_ordering.add("rows_estimation", refkey_rows_estimate);
+
+ Json_writer_array possible_keys(thd,"possible_keys");
for (nr=0; nr < table->s->keys ; nr++)
{
int direction;
ha_rows select_limit= select_limit_arg;
uint used_key_parts= 0;
+ Json_writer_object possible_key(thd);
+ possible_key.add("index", table->key_info[nr].name);
if (keys.is_set(nr) &&
(direction= test_if_order_by_key(join, order, table, nr,
@@ -26939,6 +27693,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
*/
DBUG_ASSERT (ref_key != (int) nr);
+ possible_key.add("can_resolve_order", true);
bool is_covering= (table->covering_keys.is_set(nr) ||
(table->file->index_flags(nr, 0, 1) &
HA_CLUSTERED_INDEX));
@@ -27034,6 +27789,24 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
*/
select_limit= (ha_rows) (select_limit < fanout ?
1 : select_limit/fanout);
+
+ /*
+ refkey_rows_estimate is E(#rows) produced by the table access
+ strategy that was picked without regard to ORDER BY ... LIMIT.
+
+ It will be used as the source of selectivity data.
+ Use table->cond_selectivity as a better estimate which includes
+ condition selectivity too.
+ */
+ {
+ // we use MIN(...), because "Using LooseScan" queries have
+ // cond_selectivity=1 while refkey_rows_estimate has a better
+ // estimate.
+ refkey_rows_estimate= MY_MIN(refkey_rows_estimate,
+ ha_rows(table_records *
+ table->cond_selectivity));
+ }
+
/*
We assume that each of the tested indexes is not correlated
with ref_key. Thus, to select first N records we have to scan
@@ -27044,12 +27817,14 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
N/(refkey_rows_estimate/table_records) > table_records
<=> N > refkey_rows_estimate.
*/
+
if (select_limit > refkey_rows_estimate)
select_limit= table_records;
else
select_limit= (ha_rows) (select_limit *
(double) table_records /
refkey_rows_estimate);
+ possible_key.add("updated_limit", select_limit);
rec_per_key= keyinfo->actual_rec_per_key(keyinfo->user_defined_key_parts-1);
set_if_bigger(rec_per_key, 1);
/*
@@ -27066,12 +27841,14 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
index_scan_time= select_limit/rec_per_key *
MY_MIN(rec_per_key, table->file->scan_time());
double range_scan_time;
- if (get_range_limit_read_cost(tab, table, nr, select_limit,
- &range_scan_time))
+ if (get_range_limit_read_cost(tab, table, table_records, nr,
+ select_limit, &range_scan_time))
{
+ possible_key.add("range_scan_time", range_scan_time);
if (range_scan_time < index_scan_time)
index_scan_time= range_scan_time;
}
+ possible_key.add("index_scan_time", index_scan_time);
if ((ref_key < 0 && (group || table->force_index || is_covering)) ||
index_scan_time < read_time)
@@ -27082,17 +27859,29 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
table->covering_keys.is_set(ref_key)) ?
refkey_rows_estimate :
HA_POS_ERROR;
- if ((is_best_covering && !is_covering) ||
- (is_covering && refkey_select_limit < select_limit))
+ if (is_best_covering && !is_covering)
+ {
+ possible_key.add("chosen", false);
+ possible_key.add("cause", "covering index already found");
continue;
+ }
+
+ if (is_covering && refkey_select_limit < select_limit)
+ {
+ possible_key.add("chosen", false);
+ possible_key.add("cause", "ref estimates better");
+ continue;
+ }
if (table->quick_keys.is_set(nr))
quick_records= table->quick_rows[nr];
+ possible_key.add("records", quick_records);
if (best_key < 0 ||
(select_limit <= MY_MIN(quick_records,best_records) ?
keyinfo->user_defined_key_parts < best_key_parts :
quick_records < best_records) ||
(!is_best_covering && is_covering))
{
+ possible_key.add("chosen", true);
best_key= nr;
best_key_parts= keyinfo->user_defined_key_parts;
if (saved_best_key_parts)
@@ -27102,8 +27891,47 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
best_key_direction= direction;
best_select_limit= select_limit;
}
+ else
+ {
+ char const *cause;
+ possible_key.add("chosen", false);
+ if (is_covering)
+ cause= "covering index already found";
+ else
+ {
+ if (select_limit <= MY_MIN(quick_records,best_records))
+ cause= "keyparts greater than the current best keyparts";
+ else
+ cause= "rows estimation greater";
+ }
+ possible_key.add("cause", cause);
+ }
+ }
+ else
+ {
+ possible_key.add("usable", false);
+ possible_key.add("cause", "cost");
}
- }
+ }
+ else
+ {
+ possible_key.add("usable", false);
+ if (!group && select_limit == HA_POS_ERROR)
+ possible_key.add("cause", "order by without limit");
+ }
+ }
+ else
+ {
+ if (keys.is_set(nr))
+ {
+ possible_key.add("can_resolve_order", false);
+ possible_key.add("cause", "order can not be resolved by key");
+ }
+ else
+ {
+ possible_key.add("can_resolve_order", false);
+ possible_key.add("cause", "not usable index for the query");
+ }
}
}
@@ -27535,6 +28363,46 @@ Item *remove_pushed_top_conjuncts(THD *thd, Item *cond)
return cond;
}
+
+/**
+ @brief
+ Look for provision of the select_handler interface by a foreign engine
+
+ @param thd The thread handler
+
+ @details
+ The function checks that this is an upper level select and if so looks
+ through its tables searching for one whose handlerton owns a
+ create_select call-back function. If the call of this function returns
+ a select_handler interface object then the server will push the select
+ query into this engine.
+ This is a responsibility of the create_select call-back function to
+ check whether the engine can execute the query.
+
+ @retval the found select_handler if the search is successful
+ 0 otherwise
+*/
+
+select_handler *SELECT_LEX::find_select_handler(THD *thd)
+{
+ if (next_select())
+ return 0;
+ if (master_unit()->outer_select())
+ return 0;
+ for (TABLE_LIST *tbl= join->tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (!tbl->table)
+ continue;
+ handlerton *ht= tbl->table->file->partition_ht();
+ if (!ht->create_select)
+ continue;
+ select_handler *sh= ht->create_select(thd, this);
+ return sh;
+ }
+ return 0;
+}
+
+
/**
@} (end of group Query_Optimizer)
*/
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 88959f84fdb..482b4ff9608 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -225,6 +225,10 @@ Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab);
int rr_sequential(READ_RECORD *info);
int rr_sequential_and_unpack(READ_RECORD *info);
Item *remove_pushed_top_conjuncts(THD *thd, Item *cond);
+Item *and_new_conditions_to_optimized_cond(THD *thd, Item *cond,
+ COND_EQUAL **cond_eq,
+ List<Item> &new_conds,
+ Item::cond_result *cond_value);
#include "sql_explain.h"
@@ -510,6 +514,18 @@ typedef struct st_join_table {
bool preread_init_done;
+ /*
+ Cost info to the range filter used when joining this join table
+ (Defined when the best join order has been already chosen)
+ */
+ Range_rowid_filter_cost_info *range_rowid_filter_info;
+ /* Rowid filter to be used when joining this join table */
+ Rowid_filter *rowid_filter;
+ /* Becomes true just after the used range filter has been built / filled */
+ bool is_rowid_filter_built;
+
+ void build_range_rowid_filter_if_needed();
+
void cleanup();
inline bool is_using_loose_index_scan()
{
@@ -885,6 +901,10 @@ public:
};
+class Range_rowid_filter_cost_info;
+class Rowid_filter;
+
+
/**
Information about a position of table within a join order. Used in join
optimization.
@@ -967,6 +987,10 @@ typedef struct st_position
/* Info on splitting plan used at this position */
SplM_plan_info *spl_plan;
+
+ /* Cost info for the range filter used at this position */
+ Range_rowid_filter_cost_info *range_rowid_filter_info;
+
} POSITION;
typedef Bounds_checked_array<Item_null_result*> Item_null_array;
@@ -1477,6 +1501,11 @@ public:
Dynamic_array<KEYUSE_EXT> *ext_keyuses_for_splitting;
JOIN_TAB *sort_and_group_aggr_tab;
+ /*
+ Flag is set to true if select_lex was found to be degenerated before
+ the optimize_cond() call in JOIN::optimize_inner() method.
+ */
+ bool is_orig_degenerated;
JOIN(THD *thd_arg, List<Item> &fields_arg, ulonglong select_options_arg,
select_result *result_arg)
@@ -1572,6 +1601,7 @@ public:
emb_sjm_nest= NULL;
sjm_lookup_tables= 0;
sjm_scan_tables= 0;
+ is_orig_degenerated= false;
}
/* True if the plan guarantees that it will be returned zero or one row */
@@ -1610,6 +1640,8 @@ public:
bool optimize_unflattened_subqueries();
bool optimize_constant_subqueries();
int init_join_caches();
+ bool make_range_rowid_filters();
+ bool init_range_rowid_filters();
bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields,
bool before_group_by, bool recompute= FALSE);
@@ -1815,10 +1847,6 @@ bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
void copy_fields(TMP_TABLE_PARAM *param);
bool copy_funcs(Item **func_ptr, const THD *thd);
uint find_shortest_key(TABLE *table, const key_map *usable_keys);
-Field* create_tmp_field_from_field(THD *thd, Field* org_field,
- LEX_CSTRING *name, TABLE *table,
- Item_field *item);
-
bool is_indexed_agg_distinct(JOIN *join, List<Item_field> *out_args);
/* functions from opt_sum.cc */
@@ -2070,12 +2098,6 @@ bool mysql_select(THD *thd,
void free_underlaid_joins(THD *thd, SELECT_LEX *select);
bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit,
select_result *result);
-Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type,
- Item ***copy_func, Field **from_field,
- Field **def_field,
- bool group, bool modify_item,
- bool table_cant_handle_bit_fields,
- bool make_copy_field);
/*
General routine to change field->ptr of a NULL-terminated array of Field
@@ -2343,7 +2365,7 @@ Item_equal *find_item_equal(COND_EQUAL *cond_equal, Field *field,
extern bool test_if_ref(Item *,
Item_field *left_item,Item *right_item);
-inline bool optimizer_flag(THD *thd, uint flag)
+inline bool optimizer_flag(THD *thd, ulonglong flag)
{
return (thd->variables.optimizer_switch & flag);
}
@@ -2449,9 +2471,53 @@ public:
~Pushdown_query() { delete handler; }
/* Function that calls the above scan functions */
- int execute(JOIN *join);
+ int execute(JOIN *);
+};
+
+class derived_handler;
+
+class Pushdown_derived: public Sql_alloc
+{
+private:
+ bool is_analyze;
+public:
+ TABLE_LIST *derived;
+ derived_handler *handler;
+
+ Pushdown_derived(TABLE_LIST *tbl, derived_handler *h);
+
+ ~Pushdown_derived();
+
+ int execute();
+};
+
+
+class select_handler;
+
+
+class Pushdown_select: public Sql_alloc
+{
+private:
+ bool is_analyze;
+ List<Item> result_columns;
+ bool send_result_set_metadata();
+ bool send_data();
+ bool send_eof();
+
+public:
+ SELECT_LEX *select;
+ select_handler *handler;
+
+ Pushdown_select(SELECT_LEX *sel, select_handler *h);
+
+ ~Pushdown_select();
+
+ bool init();
+
+ int execute();
};
+
bool test_if_order_compatible(SQL_I_List<ORDER> &a, SQL_I_List<ORDER> &b);
int test_if_group_changed(List<Cached_item> &list);
int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort);
@@ -2459,4 +2525,13 @@ int create_sort_index(THD *thd, JOIN *join, JOIN_TAB *tab, Filesort *fsort);
JOIN_TAB *first_explain_order_tab(JOIN* join);
JOIN_TAB *next_explain_order_tab(JOIN* join, JOIN_TAB* tab);
+bool check_simple_equality(THD *thd, const Item::Context &ctx,
+ Item *left_item, Item *right_item,
+ COND_EQUAL *cond_equal);
+
+void propagate_new_equalities(THD *thd, Item *cond,
+ List<Item_equal> *new_equalities,
+ COND_EQUAL *inherited,
+ bool *is_simplifiable_cond);
+
#endif /* SQL_SELECT_INCLUDED */
diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc
index d672e7368d3..9f17590a315 100644
--- a/sql/sql_sequence.cc
+++ b/sql/sql_sequence.cc
@@ -220,8 +220,8 @@ bool check_sequence_fields(LEX *lex, List<Create_field> *fields)
err:
my_error(ER_SEQUENCE_INVALID_TABLE_STRUCTURE, MYF(0),
- lex->select_lex.table_list.first->db.str,
- lex->select_lex.table_list.first->table_name.str, reason);
+ lex->first_select_lex()->table_list.first->db.str,
+ lex->first_select_lex()->table_list.first->table_name.str, reason);
DBUG_RETURN(TRUE);
}
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index a0ac96a1c0d..a0cde02be65 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -63,6 +63,7 @@
#include "ha_partition.h"
#endif
#include "transaction.h"
+#include "opt_trace.h"
enum enum_i_s_events_fields
{
@@ -1034,9 +1035,9 @@ find_files(THD *thd, Dynamic_array<LEX_CSTRING*> *files, LEX_CSTRING *db,
if (!(dirp = my_dir(path, MY_THREAD_SPECIFIC | (db ? 0 : MY_WANT_STAT))))
{
if (my_errno == ENOENT)
- my_error(ER_BAD_DB_ERROR, MYF(ME_BELL | ME_WAITTANG), db->str);
+ my_error(ER_BAD_DB_ERROR, MYF(0), db->str);
else
- my_error(ER_CANT_READ_DIR, MYF(ME_BELL | ME_WAITTANG), path, my_errno);
+ my_error(ER_CANT_READ_DIR, MYF(0), path, my_errno);
DBUG_RETURN(FIND_FILES_DIR);
}
@@ -1563,7 +1564,6 @@ void
mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
{
TABLE *table;
- MEM_ROOT *mem_root= thd->mem_root;
DBUG_ENTER("mysqld_list_fields");
DBUG_PRINT("enter",("table: %s", table_list->table_name.str));
@@ -1573,28 +1573,18 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild)
DBUG_VOID_RETURN;
table= table_list->table;
- List<Item> field_list;
+ List<Field> field_list;
Field **ptr,*field;
for (ptr=table->field ; (field= *ptr); ptr++)
{
if (!wild || !wild[0] ||
!wild_case_compare(system_charset_info, field->field_name.str,wild))
- {
- if (table_list->view)
- field_list.push_back(new (mem_root)
- Item_ident_for_show(thd, field,
- table_list->view_db.str,
- table_list->view_name.str),
- mem_root);
- else
- field_list.push_back(new (mem_root) Item_field(thd, field), mem_root);
- }
+ field_list.push_back(field);
}
restore_record(table, s->default_values); // Get empty record
table->use_all_columns();
- if (thd->protocol->send_result_set_metadata(&field_list,
- Protocol::SEND_DEFAULTS))
+ if (thd->protocol->send_list_fields(&field_list, table_list))
DBUG_VOID_RETURN;
my_eof(thd);
DBUG_VOID_RETURN;
@@ -2089,6 +2079,22 @@ end_options:
append_directory(thd, packet, "INDEX", create_info.index_file_name);
}
+static void append_period(THD *thd, String *packet, const LEX_CSTRING &start,
+ const LEX_CSTRING &end, const LEX_CSTRING &period,
+ bool ident)
+{
+ packet->append(STRING_WITH_LEN(",\n PERIOD FOR "));
+ if (ident)
+ append_identifier(thd, packet, period.str, period.length);
+ else
+ packet->append(period);
+ packet->append(STRING_WITH_LEN(" ("));
+ append_identifier(thd, packet, start.str, start.length);
+ packet->append(STRING_WITH_LEN(", "));
+ append_identifier(thd, packet, end.str, end.length);
+ packet->append(STRING_WITH_LEN(")"));
+}
+
/*
Build a CREATE TABLE statement for a table.
@@ -2127,6 +2133,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
KEY *key_info;
TABLE *table= table_list->table;
TABLE_SHARE *share= table->s;
+ TABLE_SHARE::period_info_t &period= share->period;
sql_mode_t sql_mode= thd->variables.sql_mode;
bool explicit_fields= false;
bool foreign_db_mode= sql_mode & (MODE_POSTGRESQL | MODE_ORACLE |
@@ -2227,6 +2234,12 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
field->sql_type(type);
packet->append(type.ptr(), type.length(), system_charset_info);
+ DBUG_EXECUTE_IF("sql_type",
+ packet->append(" /* ");
+ packet->append(field->type_handler()->version().ptr());
+ packet->append(" */ ");
+ );
+
if (field->has_charset() && !(sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)))
{
if (field->charset() != share->table_charset)
@@ -2326,7 +2339,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
}
- key_info= table->key_info;
+ key_info= table->s->key_info;
primary_key= share->primary_key;
for (uint i=0 ; i < share->keys ; i++,key_info++)
@@ -2381,7 +2394,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
}
}
packet->append(')');
- store_key_options(thd, packet, table, key_info);
+ store_key_options(thd, packet, table, &table->key_info[i]);
if (key_info->parser)
{
LEX_CSTRING *parser_name= plugin_name(key_info->parser);
@@ -2403,11 +2416,8 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
DBUG_ASSERT(!explicit_fields || fe->invisible < INVISIBLE_SYSTEM);
if (explicit_fields)
{
- packet->append(STRING_WITH_LEN(",\n PERIOD FOR SYSTEM_TIME ("));
- append_identifier(thd,packet,fs->field_name.str, fs->field_name.length);
- packet->append(STRING_WITH_LEN(", "));
- append_identifier(thd,packet,fe->field_name.str, fe->field_name.length);
- packet->append(STRING_WITH_LEN(")"));
+ append_period(thd, packet, fs->field_name, fe->field_name,
+ table->s->vers.name, false);
}
else
{
@@ -2416,6 +2426,15 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
}
}
+ if (period.name)
+ {
+ append_period(thd, packet,
+ period.start_field(share)->field_name,
+ period.end_field(share)->field_name,
+ period.name, true);
+ }
+
+
/*
Get possible foreign key definitions stored in InnoDB and append them
to the CREATE TABLE statement
@@ -2433,8 +2452,12 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
for (uint i= share->field_check_constraints;
i < share->table_check_constraints ; i++)
{
- StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci);
Virtual_column_info *check= table->check_constraints[i];
+ // period constraint is implicit
+ if (share->period.constr_name.streq(check->name))
+ continue;
+
+ StringBuffer<MAX_FIELD_WIDTH> str(&my_charset_utf8mb4_general_ci);
check->print(&str);
packet->append(STRING_WITH_LEN(",\n "));
@@ -2506,7 +2529,8 @@ static void store_key_options(THD *thd, String *packet, TABLE *table,
if (key_info->algorithm == HA_KEY_ALG_BTREE)
packet->append(STRING_WITH_LEN(" USING BTREE"));
- if (key_info->algorithm == HA_KEY_ALG_HASH)
+ if (key_info->algorithm == HA_KEY_ALG_HASH ||
+ key_info->algorithm == HA_KEY_ALG_LONG_HASH)
packet->append(STRING_WITH_LEN(" USING HASH"));
/* send USING only in non-default case: non-spatial rtree */
@@ -2774,13 +2798,111 @@ static const char *thread_state_info(THD *tmp)
}
+struct list_callback_arg
+{
+ list_callback_arg(const char *u, THD *t, ulong m):
+ user(u), thd(t), max_query_length(m) {}
+ I_List<thread_info> thread_infos;
+ const char *user;
+ THD *thd;
+ ulong max_query_length;
+};
+
+
+static my_bool list_callback(THD *tmp, list_callback_arg *arg)
+{
+
+ Security_context *tmp_sctx= tmp->security_ctx;
+ bool got_thd_data;
+ if ((tmp->vio_ok() || tmp->system_thread) &&
+ (!arg->user || (!tmp->system_thread &&
+ tmp_sctx->user && !strcmp(tmp_sctx->user, arg->user))))
+ {
+ thread_info *thd_info= new (arg->thd->mem_root) thread_info;
+
+ thd_info->thread_id=tmp->thread_id;
+ thd_info->os_thread_id=tmp->os_thread_id;
+ thd_info->user= arg->thd->strdup(tmp_sctx->user ? tmp_sctx->user :
+ (tmp->system_thread ?
+ "system user" : "unauthenticated user"));
+ if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
+ arg->thd->security_ctx->host_or_ip[0])
+ {
+ if ((thd_info->host= (char*) arg->thd->alloc(LIST_PROCESS_HOST_LEN+1)))
+ my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN,
+ "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port);
+ }
+ else
+ thd_info->host= arg->thd->strdup(tmp_sctx->host_or_ip[0] ?
+ tmp_sctx->host_or_ip :
+ tmp_sctx->host ? tmp_sctx->host : "");
+ thd_info->command=(int) tmp->get_command();
+
+ if ((got_thd_data= !trylock_short(&tmp->LOCK_thd_data)))
+ {
+ /* This is an approximation */
+ thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ?
+ "Killed" : 0);
+
+ /* The following variables are only safe to access under a lock */
+ thd_info->db= 0;
+ if (tmp->db.str)
+ thd_info->db= arg->thd->strmake(tmp->db.str, tmp->db.length);
+
+ if (tmp->query())
+ {
+ uint length= MY_MIN(arg->max_query_length, tmp->query_length());
+ char *q= arg->thd->strmake(tmp->query(),length);
+ /* Safety: in case strmake failed, we set length to 0. */
+ thd_info->query_string=
+ CSET_STRING(q, q ? length : 0, tmp->query_charset());
+ }
+
+ /*
+ Progress report. We need to do this under a lock to ensure that all
+ is from the same stage.
+ */
+ if (tmp->progress.max_counter)
+ {
+ uint max_stage= MY_MAX(tmp->progress.max_stage, 1);
+ thd_info->progress= (((tmp->progress.stage / (double) max_stage) +
+ ((tmp->progress.counter /
+ (double) tmp->progress.max_counter) /
+ (double) max_stage)) *
+ 100.0);
+ set_if_smaller(thd_info->progress, 100);
+ }
+ else
+ thd_info->progress= 0.0;
+ }
+ else
+ {
+ thd_info->proc_info= "Busy";
+ thd_info->progress= 0.0;
+ thd_info->db= "";
+ }
+
+ thd_info->state_info= thread_state_info(tmp);
+ thd_info->start_time= tmp->start_utime;
+ ulonglong utime_after_query_snapshot= tmp->utime_after_query;
+ if (thd_info->start_time < utime_after_query_snapshot)
+ thd_info->start_time= utime_after_query_snapshot; // COM_SLEEP
+
+ if (got_thd_data)
+ mysql_mutex_unlock(&tmp->LOCK_thd_data);
+ arg->thread_infos.append(thd_info);
+ }
+ return 0;
+}
+
+
void mysqld_list_processes(THD *thd,const char *user, bool verbose)
{
Item *field;
List<Item> field_list;
- I_List<thread_info> thread_infos;
- ulong max_query_length= (verbose ? thd->variables.max_allowed_packet :
- PROCESS_LIST_WIDTH);
+ list_callback_arg arg(user, thd,
+ verbose ? thd->variables.max_allowed_packet :
+ PROCESS_LIST_WIDTH);
Protocol *protocol= thd->protocol;
MEM_ROOT *mem_root= thd->mem_root;
DBUG_ENTER("mysqld_list_processes");
@@ -2811,7 +2933,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
mem_root);
field->maybe_null=1;
field_list.push_back(field=new (mem_root)
- Item_empty_string(thd, "Info", max_query_length),
+ Item_empty_string(thd, "Info", arg.max_query_length),
mem_root);
field->maybe_null=1;
if (!thd->variables.old_mode &&
@@ -2830,102 +2952,13 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
if (thd->killed)
DBUG_VOID_RETURN;
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- I_List_iterator<THD> it(threads);
- THD *tmp;
- while ((tmp=it++))
- {
- Security_context *tmp_sctx= tmp->security_ctx;
- bool got_thd_data;
- if ((tmp->vio_ok() || tmp->system_thread) &&
- (!user || (!tmp->system_thread &&
- tmp_sctx->user && !strcmp(tmp_sctx->user, user))))
- {
- thread_info *thd_info= new (thd->mem_root) thread_info;
-
- thd_info->thread_id=tmp->thread_id;
- thd_info->os_thread_id=tmp->os_thread_id;
- thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user :
- (tmp->system_thread ?
- "system user" : "unauthenticated user"));
- if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
- thd->security_ctx->host_or_ip[0])
- {
- if ((thd_info->host= (char*) thd->alloc(LIST_PROCESS_HOST_LEN+1)))
- my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN,
- "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port);
- }
- else
- thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ?
- tmp_sctx->host_or_ip :
- tmp_sctx->host ? tmp_sctx->host : "");
- thd_info->command=(int) tmp->get_command();
-
- if ((got_thd_data= !trylock_short(&tmp->LOCK_thd_data)))
- {
- /* This is an approximation */
- thd_info->proc_info= (char*) (tmp->killed >= KILL_QUERY ?
- "Killed" : 0);
- /*
- The following variables are only safe to access under a lock
- */
-
- thd_info->db= 0;
- if (tmp->db.str)
- thd_info->db= thd->strmake(tmp->db.str, tmp->db.length);
-
- if (tmp->query())
- {
- uint length= MY_MIN(max_query_length, tmp->query_length());
- char *q= thd->strmake(tmp->query(),length);
- /* Safety: in case strmake failed, we set length to 0. */
- thd_info->query_string=
- CSET_STRING(q, q ? length : 0, tmp->query_charset());
- }
-
- /*
- Progress report. We need to do this under a lock to ensure that all
- is from the same stage.
- */
- if (tmp->progress.max_counter)
- {
- uint max_stage= MY_MAX(tmp->progress.max_stage, 1);
- thd_info->progress= (((tmp->progress.stage / (double) max_stage) +
- ((tmp->progress.counter /
- (double) tmp->progress.max_counter) /
- (double) max_stage)) *
- 100.0);
- set_if_smaller(thd_info->progress, 100);
- }
- else
- thd_info->progress= 0.0;
- }
- else
- {
- thd_info->proc_info= "Busy";
- thd_info->progress= 0.0;
- thd_info->db= "";
- }
+ server_threads.iterate(list_callback, &arg);
- thd_info->state_info= thread_state_info(tmp);
- thd_info->start_time= tmp->start_utime;
- ulonglong utime_after_query_snapshot= tmp->utime_after_query;
- if (thd_info->start_time < utime_after_query_snapshot)
- thd_info->start_time= utime_after_query_snapshot; // COM_SLEEP
-
- if (got_thd_data)
- mysql_mutex_unlock(&tmp->LOCK_thd_data);
- thread_infos.append(thd_info);
- }
- }
- mysql_mutex_unlock(&LOCK_thread_count);
-
- thread_info *thd_info;
ulonglong now= microsecond_interval_timer();
char buff[20]; // For progress
String store_buffer(buff, sizeof(buff), system_charset_info);
- while ((thd_info=thread_infos.get()))
+ while (auto thd_info= arg.thread_infos.get())
{
protocol->prepare_for_resend();
protocol->store(thd_info->thread_id);
@@ -3209,152 +3242,150 @@ int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond)
}
-int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
+struct processlist_callback_arg
{
- TABLE *table= tables->table;
- CHARSET_INFO *cs= system_charset_info;
- char *user;
- ulonglong unow= microsecond_interval_timer();
- DBUG_ENTER("fill_schema_processlist");
+ processlist_callback_arg(THD *thd_arg, TABLE *table_arg):
+ thd(thd_arg), table(table_arg), unow(microsecond_interval_timer()) {}
+ THD *thd;
+ TABLE *table;
+ ulonglong unow;
+};
- DEBUG_SYNC(thd,"fill_schema_processlist_after_unow");
- user= thd->security_ctx->master_access & PROCESS_ACL ?
- NullS : thd->security_ctx->priv_user;
+static my_bool processlist_callback(THD *tmp, processlist_callback_arg *arg)
+{
+ Security_context *tmp_sctx= tmp->security_ctx;
+ CHARSET_INFO *cs= system_charset_info;
+ const char *val;
+ ulonglong max_counter;
+ bool got_thd_data;
+ char *user= arg->thd->security_ctx->master_access & PROCESS_ACL ?
+ NullS : arg->thd->security_ctx->priv_user;
+
+ if ((!tmp->vio_ok() && !tmp->system_thread) ||
+ (user && (tmp->system_thread || !tmp_sctx->user ||
+ strcmp(tmp_sctx->user, user))))
+ return 0;
- mysql_mutex_lock(&LOCK_thread_count);
+ restore_record(arg->table, s->default_values);
+ /* ID */
+ arg->table->field[0]->store((longlong) tmp->thread_id, TRUE);
+ /* USER */
+ val= tmp_sctx->user ? tmp_sctx->user :
+ (tmp->system_thread ? "system user" : "unauthenticated user");
+ arg->table->field[1]->store(val, strlen(val), cs);
+ /* HOST */
+ if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
+ arg->thd->security_ctx->host_or_ip[0])
+ {
+ char host[LIST_PROCESS_HOST_LEN + 1];
+ my_snprintf(host, LIST_PROCESS_HOST_LEN, "%s:%u",
+ tmp_sctx->host_or_ip, tmp->peer_port);
+ arg->table->field[2]->store(host, strlen(host), cs);
+ }
+ else
+ arg->table->field[2]->store(tmp_sctx->host_or_ip,
+ strlen(tmp_sctx->host_or_ip), cs);
- if (!thd->killed)
+ if ((got_thd_data= !trylock_short(&tmp->LOCK_thd_data)))
{
- I_List_iterator<THD> it(threads);
- THD* tmp;
-
- while ((tmp= it++))
+ /* DB */
+ if (tmp->db.str)
{
- Security_context *tmp_sctx= tmp->security_ctx;
- const char *val;
- ulonglong max_counter;
- bool got_thd_data;
-
- if ((!tmp->vio_ok() && !tmp->system_thread) ||
- (user && (tmp->system_thread || !tmp_sctx->user ||
- strcmp(tmp_sctx->user, user))))
- continue;
+ arg->table->field[3]->store(tmp->db.str, tmp->db.length, cs);
+ arg->table->field[3]->set_notnull();
+ }
+ }
- restore_record(table, s->default_values);
- /* ID */
- table->field[0]->store((longlong) tmp->thread_id, TRUE);
- /* USER */
- val= tmp_sctx->user ? tmp_sctx->user :
- (tmp->system_thread ? "system user" : "unauthenticated user");
- table->field[1]->store(val, strlen(val), cs);
- /* HOST */
- if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) &&
- thd->security_ctx->host_or_ip[0])
- {
- char host[LIST_PROCESS_HOST_LEN + 1];
- my_snprintf(host, LIST_PROCESS_HOST_LEN, "%s:%u",
- tmp_sctx->host_or_ip, tmp->peer_port);
- table->field[2]->store(host, strlen(host), cs);
- }
- else
- table->field[2]->store(tmp_sctx->host_or_ip,
- strlen(tmp_sctx->host_or_ip), cs);
+ /* COMMAND */
+ if ((val= (char *) (!got_thd_data ? "Busy" :
+ (tmp->killed >= KILL_QUERY ?
+ "Killed" : 0))))
+ arg->table->field[4]->store(val, strlen(val), cs);
+ else
+ arg->table->field[4]->store(command_name[tmp->get_command()].str,
+ command_name[tmp->get_command()].length, cs);
- if ((got_thd_data= !trylock_short(&tmp->LOCK_thd_data)))
- {
- /* DB */
- if (tmp->db.str)
- {
- table->field[3]->store(tmp->db.str, tmp->db.length, cs);
- table->field[3]->set_notnull();
- }
- }
+ /* MYSQL_TIME */
+ ulonglong utime= tmp->start_utime;
+ ulonglong utime_after_query_snapshot= tmp->utime_after_query;
+ if (utime < utime_after_query_snapshot)
+ utime= utime_after_query_snapshot; // COM_SLEEP
+ utime= utime && utime < arg->unow ? arg->unow - utime : 0;
- /* COMMAND */
- if ((val= (char *) (!got_thd_data ? "Busy" :
- (tmp->killed >= KILL_QUERY ?
- "Killed" : 0))))
- table->field[4]->store(val, strlen(val), cs);
- else
- table->field[4]->store(command_name[tmp->get_command()].str,
- command_name[tmp->get_command()].length, cs);
+ arg->table->field[5]->store(utime / HRTIME_RESOLUTION, TRUE);
- /* MYSQL_TIME */
- ulonglong utime= tmp->start_utime;
- ulonglong utime_after_query_snapshot= tmp->utime_after_query;
- if (utime < utime_after_query_snapshot)
- utime= utime_after_query_snapshot; // COM_SLEEP
- utime= utime && utime < unow ? unow - utime : 0;
+ if (got_thd_data)
+ {
+ if (tmp->query())
+ {
+ arg->table->field[7]->store(tmp->query(),
+ MY_MIN(PROCESS_LIST_INFO_WIDTH,
+ tmp->query_length()), cs);
+ arg->table->field[7]->set_notnull();
- table->field[5]->store(utime / HRTIME_RESOLUTION, TRUE);
+ /* INFO_BINARY */
+ arg->table->field[16]->store(tmp->query(),
+ MY_MIN(PROCESS_LIST_INFO_WIDTH,
+ tmp->query_length()),
+ &my_charset_bin);
+ arg->table->field[16]->set_notnull();
+ }
- if (got_thd_data)
- {
- if (tmp->query())
- {
- table->field[7]->store(tmp->query(),
- MY_MIN(PROCESS_LIST_INFO_WIDTH,
- tmp->query_length()), cs);
- table->field[7]->set_notnull();
+ /*
+ Progress report. We need to do this under a lock to ensure that all
+ is from the same stage.
+ */
+ if ((max_counter= tmp->progress.max_counter))
+ {
+ arg->table->field[9]->store((longlong) tmp->progress.stage + 1, 1);
+ arg->table->field[10]->store((longlong) tmp->progress.max_stage, 1);
+ arg->table->field[11]->store((double) tmp->progress.counter /
+ (double) max_counter*100.0);
+ }
+ mysql_mutex_unlock(&tmp->LOCK_thd_data);
+ }
- /* INFO_BINARY */
- table->field[16]->store(tmp->query(),
- MY_MIN(PROCESS_LIST_INFO_WIDTH,
- tmp->query_length()),
- &my_charset_bin);
- table->field[16]->set_notnull();
- }
+ /* STATE */
+ if ((val= thread_state_info(tmp)))
+ {
+ arg->table->field[6]->store(val, strlen(val), cs);
+ arg->table->field[6]->set_notnull();
+ }
- /*
- Progress report. We need to do this under a lock to ensure that all
- is from the same stage.
- */
- if ((max_counter= tmp->progress.max_counter))
- {
- table->field[9]->store((longlong) tmp->progress.stage + 1, 1);
- table->field[10]->store((longlong) tmp->progress.max_stage, 1);
- table->field[11]->store((double) tmp->progress.counter /
- (double) max_counter*100.0);
- }
- mysql_mutex_unlock(&tmp->LOCK_thd_data);
- }
+ /* TIME_MS */
+ arg->table->field[8]->store((double)(utime / (HRTIME_RESOLUTION / 1000.0)));
- /* STATE */
- if ((val= thread_state_info(tmp)))
- {
- table->field[6]->store(val, strlen(val), cs);
- table->field[6]->set_notnull();
- }
+ /*
+ This may become negative if we free a memory allocated by another
+ thread in this thread. However it's better that we notice it eventually
+ than hide it.
+ */
+ arg->table->field[12]->store((longlong) tmp->status_var.local_memory_used,
+ FALSE);
+ arg->table->field[13]->store((longlong) tmp->status_var.max_local_memory_used,
+ FALSE);
+ arg->table->field[14]->store((longlong) tmp->get_examined_row_count(), TRUE);
- /* TIME_MS */
- table->field[8]->store((double)(utime / (HRTIME_RESOLUTION / 1000.0)));
+ /* QUERY_ID */
+ arg->table->field[15]->store(tmp->query_id, TRUE);
- /*
- This may become negative if we free a memory allocated by another
- thread in this thread. However it's better that we notice it eventually
- than hide it.
- */
- table->field[12]->store((longlong) tmp->status_var.local_memory_used,
- FALSE);
- table->field[13]->store((longlong) tmp->status_var.max_local_memory_used,
- FALSE);
- table->field[14]->store((longlong) tmp->get_examined_row_count(), TRUE);
+ arg->table->field[17]->store(tmp->os_thread_id);
- /* QUERY_ID */
- table->field[15]->store(tmp->query_id, TRUE);
-
- table->field[17]->store(tmp->os_thread_id);
+ if (schema_table_store_record(arg->thd, arg->table))
+ return 1;
+ return 0;
+}
- if (schema_table_store_record(thd, table))
- {
- mysql_mutex_unlock(&LOCK_thread_count);
- DBUG_RETURN(1);
- }
- }
- }
- mysql_mutex_unlock(&LOCK_thread_count);
+int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
+{
+ processlist_callback_arg arg(thd, tables->table);
+ DBUG_ENTER("fill_schema_processlist");
+ DEBUG_SYNC(thd,"fill_schema_processlist_after_unow");
+ if (!thd->killed &&
+ server_threads.iterate(processlist_callback, &arg))
+ DBUG_RETURN(1);
DBUG_RETURN(0);
}
@@ -3416,7 +3447,7 @@ int add_status_vars(SHOW_VAR *list)
{
int res= 0;
if (status_vars_inited)
- mysql_mutex_lock(&LOCK_show_status);
+ mysql_rwlock_wrlock(&LOCK_all_status_vars);
if (!all_status_vars.buffer && // array is not allocated yet - do it now
my_init_dynamic_array(&all_status_vars, sizeof(SHOW_VAR), 250, 50, MYF(0)))
{
@@ -3431,7 +3462,7 @@ int add_status_vars(SHOW_VAR *list)
sort_dynamic(&all_status_vars, show_var_cmp);
err:
if (status_vars_inited)
- mysql_mutex_unlock(&LOCK_show_status);
+ mysql_rwlock_unlock(&LOCK_all_status_vars);
return res;
}
@@ -3493,7 +3524,7 @@ void remove_status_vars(SHOW_VAR *list)
{
if (status_vars_inited)
{
- mysql_mutex_lock(&LOCK_show_status);
+ mysql_rwlock_wrlock(&LOCK_all_status_vars);
SHOW_VAR *all= dynamic_element(&all_status_vars, 0, SHOW_VAR *);
for (; list->name; list++)
@@ -3514,7 +3545,7 @@ void remove_status_vars(SHOW_VAR *list)
}
}
shrink_var_array(&all_status_vars);
- mysql_mutex_unlock(&LOCK_show_status);
+ mysql_rwlock_unlock(&LOCK_all_status_vars);
}
else
{
@@ -3810,36 +3841,38 @@ end:
Return number of threads used
*/
-uint calc_sum_of_all_status(STATUS_VAR *to)
+struct calc_sum_callback_arg
{
- uint count= 0;
- DBUG_ENTER("calc_sum_of_all_status");
+ calc_sum_callback_arg(STATUS_VAR *to_arg): to(to_arg), count(0) {}
+ STATUS_VAR *to;
+ uint count;
+};
- /* Ensure that thread id not killed during loop */
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
- I_List_iterator<THD> it(threads);
- THD *tmp;
+static my_bool calc_sum_callback(THD *thd, calc_sum_callback_arg *arg)
+{
+ arg->count++;
+ if (!thd->status_in_global)
+ {
+ add_to_status(arg->to, &thd->status_var);
+ arg->to->local_memory_used+= thd->status_var.local_memory_used;
+ }
+ if (thd->get_command() != COM_SLEEP)
+ arg->to->threads_running++;
+ return 0;
+}
+
+
+uint calc_sum_of_all_status(STATUS_VAR *to)
+{
+ calc_sum_callback_arg arg(to);
+ DBUG_ENTER("calc_sum_of_all_status");
- /* Get global values as base */
*to= global_status_var;
to->local_memory_used= 0;
-
/* Add to this status from existing threads */
- while ((tmp= it++))
- {
- count++;
- if (!tmp->status_in_global)
- {
- add_to_status(to, &tmp->status_var);
- to->local_memory_used+= tmp->status_var.local_memory_used;
- }
- if (tmp->get_command() != COM_SLEEP)
- to->threads_running++;
- }
-
- mysql_mutex_unlock(&LOCK_thread_count);
- DBUG_RETURN(count);
+ server_threads.iterate(calc_sum_callback, &arg);
+ DBUG_RETURN(arg.count);
}
@@ -4189,8 +4222,9 @@ bool get_lookup_field_values(THD *thd, COND *cond, TABLE_LIST *tables,
case SQLCOM_SHOW_TABLE_STATUS:
case SQLCOM_SHOW_TRIGGERS:
case SQLCOM_SHOW_EVENTS:
- thd->make_lex_string(&lookup_field_values->db_value,
- lex->select_lex.db.str, lex->select_lex.db.length);
+ thd->make_lex_string(&lookup_field_values->db_value,
+ lex->first_select_lex()->db.str,
+ lex->first_select_lex()->db.length);
if (wild)
{
thd->make_lex_string(&lookup_field_values->table_value,
@@ -4583,10 +4617,10 @@ fill_schema_table_by_open(THD *thd, MEM_ROOT *mem_root,
temporary LEX. The latter is required to correctly open views and
produce table describing their structure.
*/
- if (make_table_list(thd, &lex->select_lex, &db_name, &table_name))
+ if (make_table_list(thd, lex->first_select_lex(), &db_name, &table_name))
goto end;
- table_list= lex->select_lex.table_list.first;
+ table_list= lex->first_select_lex()->table_list.first;
if (is_show_fields_or_keys)
{
@@ -6375,7 +6409,6 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
const char *wild, bool full_access, const char *sp_user)
{
- MYSQL_TIME time;
LEX *lex= thd->lex;
CHARSET_INFO *cs= system_charset_info;
const Sp_handler *sph;
@@ -6463,14 +6496,11 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
copy_field_as_string(table->field[22],
proc_table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]);
- bzero((char *)&time, sizeof(time));
- ((Field_timestamp *) proc_table->field[MYSQL_PROC_FIELD_CREATED])->
- get_time(&time);
- table->field[23]->store_time(&time);
- bzero((char *)&time, sizeof(time));
- ((Field_timestamp *) proc_table->field[MYSQL_PROC_FIELD_MODIFIED])->
- get_time(&time);
- table->field[24]->store_time(&time);
+ proc_table->field[MYSQL_PROC_FIELD_CREATED]->
+ save_in_field(table->field[23]);
+ proc_table->field[MYSQL_PROC_FIELD_MODIFIED]->
+ save_in_field(table->field[24]);
+
copy_field_as_string(table->field[25],
proc_table->field[MYSQL_PROC_FIELD_SQL_MODE]);
copy_field_as_string(table->field[26],
@@ -6633,15 +6663,20 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
table->field[8]->set_notnull();
}
KEY *key=show_table->key_info+i;
- if (key->rec_per_key[j])
+ if (key->rec_per_key[j] && key->algorithm != HA_KEY_ALG_LONG_HASH)
{
ha_rows records= (ha_rows) ((double) show_table->stat_records() /
key->actual_rec_per_key(j));
table->field[9]->store((longlong) records, TRUE);
table->field[9]->set_notnull();
}
- const char *tmp= show_table->file->index_type(i);
- table->field[13]->store(tmp, strlen(tmp), cs);
+ if (key->algorithm == HA_KEY_ALG_LONG_HASH)
+ table->field[13]->store(STRING_WITH_LEN("HASH"), cs);
+ else
+ {
+ const char *tmp= show_table->file->index_type(i);
+ table->field[13]->store(tmp, strlen(tmp), cs);
+ }
}
if (!(key_info->flags & HA_FULLTEXT) &&
(key_part->field &&
@@ -6763,7 +6798,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
& 'field_translation_end' are uninitialized is this
case.
*/
- List<Item> *fields= &tables->view->select_lex.item_list;
+ List<Item> *fields= &tables->view->first_select_lex()->item_list;
List_iterator<Item> it(*fields);
Item *item;
Item_field *field;
@@ -6892,7 +6927,7 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
{
List<FOREIGN_KEY_INFO> f_key_list;
TABLE *show_table= tables->table;
- KEY *key_info=show_table->key_info;
+ KEY *key_info=show_table->s->key_info;
uint primary_key= show_table->s->primary_key;
show_table->file->info(HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK |
@@ -7090,7 +7125,7 @@ static int get_schema_key_column_usage_record(THD *thd,
{
List<FOREIGN_KEY_INFO> f_key_list;
TABLE *show_table= tables->table;
- KEY *key_info=show_table->key_info;
+ KEY *key_info=show_table->s->key_info;
uint primary_key= show_table->s->primary_key;
show_table->file->info(HA_STATUS_VARIABLE |
HA_STATUS_NO_LOCK |
@@ -7412,7 +7447,7 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
break;
default:
DBUG_ASSERT(0);
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
+ my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATAL));
DBUG_RETURN(1);
}
table->field[7]->set_notnull();
@@ -7538,7 +7573,8 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
}
else if (part_info->vers_info->interval.is_set())
{
- table->field[11]->store_timestamp((my_time_t)part_elem->range_value, 0);
+ Timeval tv((my_time_t) part_elem->range_value, 0);
+ table->field[11]->store_timestamp_dec(tv, AUTO_SEC_PART_DIGITS);
table->field[11]->set_notnull();
}
}
@@ -7724,11 +7760,11 @@ copy_event_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table)
sch_table->field[ISE_ON_COMPLETION]->
store(STRING_WITH_LEN("PRESERVE"), scs);
- number_to_datetime(et.created, 0, &time, 0, &not_used);
+ number_to_datetime_or_date(et.created, 0, &time, 0, &not_used);
DBUG_ASSERT(not_used==0);
sch_table->field[ISE_CREATED]->store_time(&time);
- number_to_datetime(et.modified, 0, &time, 0, &not_used);
+ number_to_datetime_or_date(et.modified, 0, &time, 0, &not_used);
DBUG_ASSERT(not_used==0);
sch_table->field[ISE_LAST_ALTERED]->store_time(&time);
@@ -7774,9 +7810,9 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond)
TABLE *table= tables->table;
CHARSET_INFO *cs= system_charset_info;
OPEN_TABLE_LIST *open_list;
- if (unlikely(!(open_list= list_open_tables(thd, thd->lex->select_lex.db.str,
- wild))) &&
- unlikely(thd->is_fatal_error))
+ if (!(open_list= list_open_tables(thd, thd->lex->first_select_lex()->db.str,
+ wild))
+ && thd->is_fatal_error)
DBUG_RETURN(1);
for (; open_list ; open_list=open_list->next)
@@ -7869,18 +7905,15 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond)
if (scope == OPT_GLOBAL)
{
- /* We only hold LOCK_status for summary status vars */
- mysql_mutex_lock(&LOCK_status);
calc_sum_of_all_status(&tmp);
- mysql_mutex_unlock(&LOCK_status);
}
- mysql_mutex_lock(&LOCK_show_status);
+ mysql_rwlock_rdlock(&LOCK_all_status_vars);
res= show_status_array(thd, wild,
(SHOW_VAR *)all_status_vars.buffer,
scope, tmp1, "", tables->table,
upper_case_names, partial_cond);
- mysql_mutex_unlock(&LOCK_show_status);
+ mysql_rwlock_unlock(&LOCK_all_status_vars);
DBUG_RETURN(res);
}
@@ -8270,7 +8303,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
tmp_table_param->table_charset= cs;
tmp_table_param->field_count= field_count;
tmp_table_param->schema_table= 1;
- SELECT_LEX *select_lex= thd->lex->current_select;
+ SELECT_LEX *select_lex= table_list->select_lex;
bool keep_row_order= is_show_command(thd);
if (!(table= create_tmp_table(thd, tmp_table_param,
field_list, (ORDER*) 0, 0, 0,
@@ -8307,7 +8340,7 @@ TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list)
static int make_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
{
ST_FIELD_INFO *field_info= schema_table->fields_info;
- Name_resolution_context *context= &thd->lex->select_lex.context;
+ Name_resolution_context *context= &thd->lex->first_select_lex()->context;
for (; field_info->field_name; field_info++)
{
if (field_info->old_name)
@@ -8367,14 +8400,14 @@ int make_table_names_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
char tmp[128];
String buffer(tmp,sizeof(tmp), thd->charset());
LEX *lex= thd->lex;
- Name_resolution_context *context= &lex->select_lex.context;
+ Name_resolution_context *context= &lex->first_select_lex()->context;
ST_FIELD_INFO *field_info= &schema_table->fields_info[2];
LEX_CSTRING field_name= {field_info->field_name,
strlen(field_info->field_name) };
buffer.length(0);
buffer.append(field_info->old_name);
- buffer.append(&lex->select_lex.db);
+ buffer.append(&lex->first_select_lex()->db);
if (lex->wild && lex->wild->ptr())
{
buffer.append(STRING_WITH_LEN(" ("));
@@ -8407,7 +8440,7 @@ int make_columns_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
int fields_arr[]= {3, 15, 14, 6, 16, 5, 17, 18, 19, -1};
int *field_num= fields_arr;
ST_FIELD_INFO *field_info;
- Name_resolution_context *context= &thd->lex->select_lex.context;
+ Name_resolution_context *context= &thd->lex->first_select_lex()->context;
for (; *field_num >= 0; field_num++)
{
@@ -8438,7 +8471,7 @@ int make_character_sets_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
int fields_arr[]= {0, 2, 1, 3, -1};
int *field_num= fields_arr;
ST_FIELD_INFO *field_info;
- Name_resolution_context *context= &thd->lex->select_lex.context;
+ Name_resolution_context *context= &thd->lex->first_select_lex()->context;
for (; *field_num >= 0; field_num++)
{
@@ -8465,7 +8498,7 @@ int make_proc_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table)
int fields_arr[]= {2, 3, 4, 27, 24, 23, 22, 26, 28, 29, 30, -1};
int *field_num= fields_arr;
ST_FIELD_INFO *field_info;
- Name_resolution_context *context= &thd->lex->select_lex.context;
+ Name_resolution_context *context= &thd->lex->first_select_lex()->context;
for (; *field_num >= 0; field_num++)
{
@@ -9800,6 +9833,10 @@ ST_FIELD_INFO check_constraints_fields_info[]=
OPEN_FULL_TABLE},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
};
+
+/** For creating fields of information_schema.OPTIMIZER_TRACE */
+extern ST_FIELD_INFO optimizer_trace_info[];
+
/*
Description of ST_FIELD_INFO in table.h
@@ -9852,6 +9889,8 @@ ST_SCHEMA_TABLE schema_tables[]=
OPTIMIZE_I_S_TABLE|OPEN_TABLE_ONLY},
{"OPEN_TABLES", open_tables_fields_info, 0,
fill_open_tables, make_old_format, 0, -1, -1, 1, 0},
+ {"OPTIMIZER_TRACE", optimizer_trace_info, 0,
+ fill_optimizer_trace_info, NULL, NULL, -1, -1, false, 0},
{"PARAMETERS", parameters_fields_info, 0,
fill_schema_proc, 0, 0, -1, -1, 0, 0},
{"PARTITIONS", partitions_fields_info, 0,
diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc
index 1317308ceb9..359b5e45f01 100644
--- a/sql/sql_signal.cc
+++ b/sql/sql_signal.cc
@@ -323,7 +323,7 @@ end:
set= m_set_signal_information.m_item[i];
if (set)
{
- if (set->fixed)
+ if (set->is_fixed())
set->cleanup();
}
}
diff --git a/sql/sql_sort.h b/sql/sql_sort.h
index c29bf1440c9..231bc93ce75 100644
--- a/sql/sql_sort.h
+++ b/sql/sql_sort.h
@@ -92,7 +92,6 @@ public:
memset(this, 0, sizeof(*this));
}
void init_for_filesort(uint sortlen, TABLE *table,
- ulong max_length_for_sort_data,
ha_rows maxrows, bool sort_positions);
};
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index c751b079147..e4cc00f45ba 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -29,7 +29,6 @@
#include "sql_statistics.h"
#include "opt_range.h"
#include "uniques.h"
-#include "my_atomic.h"
#include "sql_show.h"
#include "sql_partition.h"
@@ -325,8 +324,8 @@ private:
public:
inline void init(THD *thd, Field * table_field);
- inline bool add(ha_rows rowno);
- inline void finish(ha_rows rows);
+ inline bool add();
+ inline void finish(ha_rows rows, double sample_fraction);
inline void cleanup();
};
@@ -1545,6 +1544,8 @@ class Histogram_builder
uint curr_bucket; /* number of the current bucket to be built */
ulonglong count; /* number of values retrieved */
ulonglong count_distinct; /* number of distinct values retrieved */
+ /* number of distinct values that occured only once */
+ ulonglong count_distinct_single_occurence;
public:
Histogram_builder(Field *col, uint col_len, ha_rows rows)
@@ -1558,14 +1559,21 @@ public:
bucket_capacity= (double) records / (hist_width + 1);
curr_bucket= 0;
count= 0;
- count_distinct= 0;
+ count_distinct= 0;
+ count_distinct_single_occurence= 0;
}
- ulonglong get_count_distinct() { return count_distinct; }
+ ulonglong get_count_distinct() const { return count_distinct; }
+ ulonglong get_count_single_occurence() const
+ {
+ return count_distinct_single_occurence;
+ }
int next(void *elem, element_count elem_cnt)
{
count_distinct++;
+ if (elem_cnt == 1)
+ count_distinct_single_occurence++;
count+= elem_cnt;
if (curr_bucket == hist_width)
return 0;
@@ -1579,7 +1587,7 @@ public:
count > bucket_capacity * (curr_bucket + 1))
{
histogram->set_prev_value(curr_bucket);
- curr_bucket++;
+ curr_bucket++;
}
}
return 0;
@@ -1595,9 +1603,18 @@ int histogram_build_walk(void *elem, element_count elem_cnt, void *arg)
return hist_builder->next(elem, elem_cnt);
}
-C_MODE_END
+static int count_distinct_single_occurence_walk(void *elem,
+ element_count count, void *arg)
+{
+ ((ulonglong*)arg)[0]+= 1;
+ if (count == 1)
+ ((ulonglong*)arg)[1]+= 1;
+ return 0;
+}
+
+C_MODE_END
/*
The class Count_distinct_field is a helper class used to calculate
the number of distinct values for a column. The class employs the
@@ -1616,6 +1633,9 @@ protected:
Unique *tree; /* The helper object to contain distinct values */
uint tree_key_length; /* The length of the keys for the elements of 'tree */
+ ulonglong distincts;
+ ulonglong distincts_single_occurence;
+
public:
Count_distinct_field() {}
@@ -1667,30 +1687,40 @@ public:
{
return tree->unique_add(table_field->ptr);
}
-
+
/*
@brief
Calculate the number of elements accumulated in the container of 'tree'
*/
- ulonglong get_value()
- {
- ulonglong count;
- if (tree->elements == 0)
- return (ulonglong) tree->elements_in_tree();
- count= 0;
- tree->walk(table_field->table, count_distinct_walk, (void*) &count);
- return count;
+ void walk_tree()
+ {
+ ulonglong counts[2] = {0, 0};
+ tree->walk(table_field->table,
+ count_distinct_single_occurence_walk, counts);
+ distincts= counts[0];
+ distincts_single_occurence= counts[1];
}
/*
@brief
- Build the histogram for the elements accumulated in the container of 'tree'
+ Calculate a histogram of the tree
*/
- ulonglong get_value_with_histogram(ha_rows rows)
+ void walk_tree_with_histogram(ha_rows rows)
{
Histogram_builder hist_builder(table_field, tree_key_length, rows);
tree->walk(table_field->table, histogram_build_walk, (void *) &hist_builder);
- return hist_builder.get_count_distinct();
+ distincts= hist_builder.get_count_distinct();
+ distincts_single_occurence= hist_builder.get_count_single_occurence();
+ }
+
+ ulonglong get_count_distinct()
+ {
+ return distincts;
+ }
+
+ ulonglong get_count_distinct_single_occurence()
+ {
+ return distincts_single_occurence;
}
/*
@@ -2491,7 +2521,7 @@ void Column_statistics_collected::init(THD *thd, Field *table_field)
*/
inline
-bool Column_statistics_collected::add(ha_rows rowno)
+bool Column_statistics_collected::add()
{
bool err= 0;
@@ -2500,9 +2530,11 @@ bool Column_statistics_collected::add(ha_rows rowno)
else
{
column_total_length+= column->value_length();
- if (min_value && column->update_min(min_value, rowno == nulls))
+ if (min_value && column->update_min(min_value,
+ is_null(COLUMN_STAT_MIN_VALUE)))
set_not_null(COLUMN_STAT_MIN_VALUE);
- if (max_value && column->update_max(max_value, rowno == nulls))
+ if (max_value && column->update_max(max_value,
+ is_null(COLUMN_STAT_MAX_VALUE)))
set_not_null(COLUMN_STAT_MAX_VALUE);
if (count_distinct)
err= count_distinct->add();
@@ -2520,7 +2552,7 @@ bool Column_statistics_collected::add(ha_rows rowno)
*/
inline
-void Column_statistics_collected::finish(ha_rows rows)
+void Column_statistics_collected::finish(ha_rows rows, double sample_fraction)
{
double val;
@@ -2538,16 +2570,44 @@ void Column_statistics_collected::finish(ha_rows rows)
}
if (count_distinct)
{
- ulonglong distincts;
uint hist_size= count_distinct->get_hist_size();
+
+ /* Compute cardinality statistics and optionally histogram. */
if (hist_size == 0)
- distincts= count_distinct->get_value();
+ count_distinct->walk_tree();
else
- distincts= count_distinct->get_value_with_histogram(rows - nulls);
+ count_distinct->walk_tree_with_histogram(rows - nulls);
+
+ ulonglong distincts= count_distinct->get_count_distinct();
+ ulonglong distincts_single_occurence=
+ count_distinct->get_count_distinct_single_occurence();
+
if (distincts)
{
- val= (double) (rows - nulls) / distincts;
- set_avg_frequency(val);
+ /*
+ We use the unsmoothed first-order jackknife estimator" to estimate
+ the number of distinct values.
+ With a sufficient large percentage of rows sampled (80%), we revert back
+ to computing the avg_frequency off of the raw data.
+ */
+ if (sample_fraction > 0.8)
+ val= (double) (rows - nulls) / distincts;
+ else
+ {
+ if (nulls == 1)
+ distincts_single_occurence+= 1;
+ if (nulls)
+ distincts+= 1;
+ double fraction_single_occurence=
+ static_cast<double>(distincts_single_occurence) / rows;
+ double total_number_of_rows= rows / sample_fraction;
+ double estimate_total_distincts= total_number_of_rows /
+ (distincts /
+ (1.0 - (1.0 - sample_fraction) * fraction_single_occurence));
+ val = std::fmax(estimate_total_distincts * (rows - nulls) / rows, 1.0);
+ }
+
+ set_avg_frequency(val);
set_not_null(COLUMN_STAT_AVG_FREQUENCY);
}
else
@@ -2735,12 +2795,28 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
Field *table_field;
ha_rows rows= 0;
handler *file=table->file;
+ double sample_fraction= thd->variables.sample_percentage / 100;
+ const ha_rows MIN_THRESHOLD_FOR_SAMPLING= 50000;
DBUG_ENTER("collect_statistics_for_table");
table->collected_stats->cardinality_is_null= TRUE;
table->collected_stats->cardinality= 0;
+ if (thd->variables.sample_percentage == 0)
+ {
+ if (file->records() < MIN_THRESHOLD_FOR_SAMPLING)
+ {
+ sample_fraction= 1;
+ }
+ else
+ {
+ sample_fraction= std::fmin(
+ (MIN_THRESHOLD_FOR_SAMPLING + 4096 *
+ log(200 * file->records())) / file->records(), 1);
+ }
+ }
+
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
table_field= *field_ptr;
@@ -2753,7 +2829,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
/* Perform a full table scan to collect statistics on 'table's columns */
if (!(rc= file->ha_rnd_init(TRUE)))
- {
+ {
DEBUG_SYNC(table->in_use, "statistics_collection_start");
while ((rc= file->ha_rnd_next(table->record[0])) != HA_ERR_END_OF_FILE)
@@ -2764,17 +2840,20 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
if (rc)
break;
- for (field_ptr= table->field; *field_ptr; field_ptr++)
+ if (thd_rnd(thd) <= sample_fraction)
{
- table_field= *field_ptr;
- if (!bitmap_is_set(table->read_set, table_field->field_index))
- continue;
- if ((rc= table_field->collected_stats->add(rows)))
+ for (field_ptr= table->field; *field_ptr; field_ptr++)
+ {
+ table_field= *field_ptr;
+ if (!bitmap_is_set(table->read_set, table_field->field_index))
+ continue;
+ if ((rc= table_field->collected_stats->add()))
+ break;
+ }
+ if (rc)
break;
+ rows++;
}
- if (rc)
- break;
- rows++;
}
file->ha_rnd_end();
}
@@ -2788,7 +2867,8 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
if (!rc)
{
table->collected_stats->cardinality_is_null= FALSE;
- table->collected_stats->cardinality= rows;
+ table->collected_stats->cardinality=
+ static_cast<ha_rows>(rows / sample_fraction);
}
bitmap_clear_all(table->write_set);
@@ -2799,7 +2879,7 @@ int collect_statistics_for_table(THD *thd, TABLE *table)
continue;
bitmap_set_bit(table->write_set, table_field->field_index);
if (!rc)
- table_field->collected_stats->finish(rows);
+ table_field->collected_stats->finish(rows, sample_fraction);
else
table_field->collected_stats->cleanup();
}
@@ -3261,7 +3341,6 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
{
TABLE_LIST stat_tables[STATISTICS_TABLES];
Open_tables_backup open_tables_backup;
-
DBUG_ENTER("read_statistics_for_tables_if_needed");
DEBUG_SYNC(thd, "statistics_read_start");
@@ -3270,10 +3349,7 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
DBUG_RETURN(0);
if (open_stat_tables(thd, stat_tables, &open_tables_backup, FALSE))
- {
- thd->clear_error();
DBUG_RETURN(1);
- }
for (TABLE_LIST *tl= tables; tl; tl= tl->next_global)
{
@@ -3325,7 +3401,7 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
'db' from all statistical tables: table_stats, column_stats, index_stats.
@retval
- 0 If all deletions are successful
+ 0 If all deletions are successful or we couldn't open statistics table
@retval
1 Otherwise
@@ -3333,7 +3409,8 @@ int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables)
The function is called when executing the statement DROP TABLE 'tab'.
*/
-int delete_statistics_for_table(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *tab)
+int delete_statistics_for_table(THD *thd, const LEX_CSTRING *db,
+ const LEX_CSTRING *tab)
{
int err;
enum_binlog_format save_binlog_format;
@@ -3341,11 +3418,10 @@ int delete_statistics_for_table(THD *thd, const LEX_CSTRING *db, const LEX_CSTRI
TABLE_LIST tables[STATISTICS_TABLES];
Open_tables_backup open_tables_backup;
int rc= 0;
-
DBUG_ENTER("delete_statistics_for_table");
if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
- DBUG_RETURN(rc);
+ DBUG_RETURN(0);
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
@@ -3410,7 +3486,7 @@ int delete_statistics_for_table(THD *thd, const LEX_CSTRING *db, const LEX_CSTRI
'tab' from the statistical table column_stats.
@retval
- 0 If the deletion is successful
+ 0 If all deletions are successful or we couldn't open statistics table
@retval
1 Otherwise
@@ -3427,15 +3503,11 @@ int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col)
TABLE_LIST tables;
Open_tables_backup open_tables_backup;
int rc= 0;
-
DBUG_ENTER("delete_statistics_for_column");
if (open_single_stat_table(thd, &tables, &stat_table_name[1],
&open_tables_backup, TRUE))
- {
- thd->clear_error();
- DBUG_RETURN(rc);
- }
+ DBUG_RETURN(0);
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
@@ -3476,7 +3548,7 @@ int delete_statistics_for_column(THD *thd, TABLE *tab, Field *col)
defined on the table 'tab' from the statistical table index_stats.
@retval
- 0 If the deletion is successful
+ 0 If all deletions are successful or we couldn't open statistics table
@retval
1 Otherwise
@@ -3494,15 +3566,11 @@ int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info,
TABLE_LIST tables;
Open_tables_backup open_tables_backup;
int rc= 0;
-
DBUG_ENTER("delete_statistics_for_index");
if (open_single_stat_table(thd, &tables, &stat_table_name[2],
&open_tables_backup, TRUE))
- {
- thd->clear_error();
- DBUG_RETURN(rc);
- }
+ DBUG_RETURN(0);
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
@@ -3571,8 +3639,10 @@ int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info,
The function is called when executing any statement that renames a table
*/
-int rename_table_in_stat_tables(THD *thd, const LEX_CSTRING *db, const LEX_CSTRING *tab,
- const LEX_CSTRING *new_db, const LEX_CSTRING *new_tab)
+int rename_table_in_stat_tables(THD *thd, const LEX_CSTRING *db,
+ const LEX_CSTRING *tab,
+ const LEX_CSTRING *new_db,
+ const LEX_CSTRING *new_tab)
{
int err;
enum_binlog_format save_binlog_format;
@@ -3583,7 +3653,9 @@ int rename_table_in_stat_tables(THD *thd, const LEX_CSTRING *db, const LEX_CSTRI
DBUG_ENTER("rename_table_in_stat_tables");
if (open_stat_tables(thd, tables, &open_tables_backup, TRUE))
+ {
DBUG_RETURN(0); // not an error
+ }
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
@@ -3675,7 +3747,6 @@ int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
TABLE_LIST tables;
Open_tables_backup open_tables_backup;
int rc= 0;
-
DBUG_ENTER("rename_column_in_stat_tables");
if (tab->s->tmp_table != NO_TMP_TABLE)
@@ -3683,10 +3754,7 @@ int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
if (open_single_stat_table(thd, &tables, &stat_table_name[1],
&open_tables_backup, TRUE))
- {
- thd->clear_error();
DBUG_RETURN(rc);
- }
save_binlog_format= thd->set_current_stmt_binlog_format_stmt();
@@ -3728,9 +3796,8 @@ void set_statistics_for_table(THD *thd, TABLE *table)
{
TABLE_STATISTICS_CB *stats_cb= &table->s->stats_cb;
Table_statistics *read_stats= stats_cb->table_stats;
- Use_stat_tables_mode use_stat_table_mode= get_use_stat_tables_mode(thd);
table->used_stat_records=
- (use_stat_table_mode <= COMPLEMENTARY ||
+ (!check_eits_preferred(thd) ||
!table->stats_is_read || read_stats->cardinality_is_null) ?
table->file->stats.records : read_stats->cardinality;
@@ -3754,7 +3821,7 @@ void set_statistics_for_table(THD *thd, TABLE *table)
key_info < key_info_end; key_info++)
{
key_info->is_statistics_from_stat_tables=
- (use_stat_table_mode > COMPLEMENTARY &&
+ (check_eits_preferred(thd) &&
table->stats_is_read &&
key_info->read_stats->avg_frequency_is_inited() &&
key_info->read_stats->get_avg_frequency(0) > 0.5);
@@ -4084,7 +4151,9 @@ bool is_eits_usable(Field *field)
partition list of a table. We assume the selecticivity for
such columns would be handled during partition pruning.
*/
+#if 0 /* Work around MDEV-19334 */
DBUG_ASSERT(field->table->stats_is_read);
+#endif
Column_statistics* col_stats= field->read_stats;
return col_stats && !col_stats->no_stat_values_provided() && //(1)
field->type() != MYSQL_TYPE_GEOMETRY && //(2)
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index 89758f002ca..3ed4006efd3 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -16,12 +16,26 @@
#ifndef SQL_STATISTICS_H
#define SQL_STATISTICS_H
+/*
+ For COMPLEMENTARY_FOR_QUERIES and PREFERABLY_FOR_QUERIES they are
+ similar to the COMPLEMENTARY and PREFERABLY respectively except that
+ with these values we would not be collecting EITS for queries like
+ ANALYZE TABLE t1;
+ To collect EITS with these values, we have to use PERSISITENT FOR
+ analyze table t1 persistent for
+ columns (col1,col2...) index (idx1, idx2...)
+ or
+ analyze table t1 persistent for all
+*/
+
typedef
enum enum_use_stat_tables_mode
{
NEVER,
COMPLEMENTARY,
PREFERABLY,
+ COMPLEMENTARY_FOR_QUERIES,
+ PREFERABLY_FOR_QUERIES
} Use_stat_tables_mode;
typedef
@@ -87,6 +101,19 @@ Use_stat_tables_mode get_use_stat_tables_mode(THD *thd)
{
return (Use_stat_tables_mode) (thd->variables.use_stat_tables);
}
+inline
+bool check_eits_collection_allowed(THD *thd)
+{
+ return (get_use_stat_tables_mode(thd) == COMPLEMENTARY ||
+ get_use_stat_tables_mode(thd) == PREFERABLY);
+}
+
+inline
+bool check_eits_preferred(THD *thd)
+{
+ return (get_use_stat_tables_mode(thd) == PREFERABLY ||
+ get_use_stat_tables_mode(thd) == PREFERABLY_FOR_QUERIES);
+}
int read_statistics_for_tables_if_needed(THD *thd, TABLE_LIST *tables);
int collect_statistics_for_table(THD *thd, TABLE *table);
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index cc77452ecd1..45af08f8966 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -31,7 +31,7 @@
** String functions
*****************************************************************************/
-bool String::real_alloc(size_t length)
+bool Binary_string::real_alloc(size_t length)
{
size_t arg_length= ALIGN_SIZE(length + 1);
DBUG_ASSERT(arg_length > length);
@@ -81,7 +81,7 @@ bool String::real_alloc(size_t length)
@retval true An error occurred when attempting to allocate memory.
*/
-bool String::realloc_raw(size_t alloc_length)
+bool Binary_string::realloc_raw(size_t alloc_length)
{
if (Alloced_length <= alloc_length)
{
@@ -103,8 +103,7 @@ bool String::realloc_raw(size_t alloc_length)
(thread_specific ?
MY_THREAD_SPECIFIC : 0)))))
{
- if (str_length > len - 1)
- str_length= 0;
+ DBUG_ASSERT(str_length < len);
if (str_length) // Avoid bugs in memcpy on AIX
memcpy(new_ptr,Ptr,str_length);
new_ptr[str_length]=0;
@@ -127,19 +126,18 @@ bool String::set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs)
if (alloc(l))
return TRUE;
str_length=(uint32) (cs->cset->longlong10_to_str)(cs,Ptr,l,base,num);
- str_charset=cs;
+ set_charset(cs);
return FALSE;
}
// Convert a number into its HEX representation
-bool String::set_hex(ulonglong num)
+bool Binary_string::set_hex(ulonglong num)
{
char *n_end;
if (alloc(65) || !(n_end= longlong2str(num, Ptr, 16)))
return true;
length((uint32) (n_end - Ptr));
- set_charset(&my_charset_latin1);
return false;
}
@@ -157,7 +155,7 @@ static inline void APPEND_HEX(char *&to, uchar value)
}
-void String::qs_append_hex(const char *str, uint32 len)
+void Static_binary_string::qs_append_hex(const char *str, uint32 len)
{
const char *str_end= str + len;
for (char *to= Ptr + str_length ; str < str_end; str++)
@@ -167,7 +165,7 @@ void String::qs_append_hex(const char *str, uint32 len)
// Convert a string to its HEX representation
-bool String::set_hex(const char *str, uint32 len)
+bool Binary_string::set_hex(const char *str, uint32 len)
{
/*
Safety: cut the source string if "len" is too large.
@@ -181,7 +179,6 @@ bool String::set_hex(const char *str, uint32 len)
return true;
length(0);
qs_append_hex(str, len);
- set_charset(&my_charset_latin1);
return false;
}
@@ -192,7 +189,7 @@ bool String::set_real(double num,uint decimals, CHARSET_INFO *cs)
uint dummy_errors;
size_t len;
- str_charset=cs;
+ set_charset(cs);
if (decimals >= FLOATING_POINT_DECIMALS)
{
len= my_gcvt(num, MY_GCVT_ARG_DOUBLE, sizeof(buff) - 1, buff, NULL);
@@ -204,7 +201,7 @@ bool String::set_real(double num,uint decimals, CHARSET_INFO *cs)
}
-bool String::copy()
+bool Binary_string::copy()
{
if (!alloced)
{
@@ -225,18 +222,17 @@ bool String::copy()
@retval false Success.
@retval true Memory allocation failed.
*/
-bool String::copy(const String &str)
+bool Binary_string::copy(const Binary_string &str)
{
if (alloc(str.str_length))
return TRUE;
str_length=str.str_length;
bmove(Ptr,str.Ptr,str_length); // May be overlapping
Ptr[str_length]=0;
- str_charset=str.str_charset;
return FALSE;
}
-bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs)
+bool Binary_string::copy(const char *str, size_t arg_length)
{
DBUG_ASSERT(arg_length < UINT_MAX32);
if (alloc(arg_length))
@@ -253,7 +249,6 @@ bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs)
else if ((str_length=uint32(arg_length)))
memcpy(Ptr,str,arg_length);
Ptr[arg_length]=0;
- str_charset=cs;
return FALSE;
}
@@ -263,7 +258,7 @@ bool String::copy(const char *str,size_t arg_length, CHARSET_INFO *cs)
from valgrind
*/
-bool String::copy_or_move(const char *str,size_t arg_length, CHARSET_INFO *cs)
+bool Binary_string::copy_or_move(const char *str, size_t arg_length)
{
DBUG_ASSERT(arg_length < UINT_MAX32);
if (alloc(arg_length))
@@ -271,7 +266,6 @@ bool String::copy_or_move(const char *str,size_t arg_length, CHARSET_INFO *cs)
if ((str_length=uint32(arg_length)))
memmove(Ptr,str,arg_length);
Ptr[arg_length]=0;
- str_charset=cs;
return FALSE;
}
@@ -397,7 +391,7 @@ bool String::copy_aligned(const char *str, size_t arg_length, size_t offset,
Ptr[aligned_length]=0;
/* str_length is always >= 0 as arg_length is != 0 */
str_length= (uint32)aligned_length;
- str_charset= cs;
+ set_charset(cs);
return FALSE;
}
@@ -450,7 +444,7 @@ bool String::copy(const char *str, size_t arg_length,
return TRUE;
str_length=copy_and_convert((char*) Ptr, new_length, to_cs,
str, arg_length, from_cs, errors);
- str_charset=to_cs;
+ set_charset(to_cs);
return FALSE;
}
@@ -476,19 +470,20 @@ bool String::copy(const char *str, size_t arg_length,
bool String::set_ascii(const char *str, size_t arg_length)
{
- if (str_charset->mbminlen == 1)
+ if (mbminlen() == 1)
{
- set(str, arg_length, str_charset);
+ set(str, arg_length, charset());
return 0;
}
uint dummy_errors;
- return copy(str, (uint32)arg_length, &my_charset_latin1, str_charset, &dummy_errors);
+ return copy(str, (uint32) arg_length, &my_charset_latin1,
+ charset(), &dummy_errors);
}
/* This is used by mysql.cc */
-bool String::fill(uint32 max_length,char fill_char)
+bool Binary_string::fill(uint32 max_length,char fill_char)
{
if (str_length > max_length)
Ptr[str_length=max_length]=0;
@@ -504,22 +499,10 @@ bool String::fill(uint32 max_length,char fill_char)
void String::strip_sp()
{
- while (str_length && my_isspace(str_charset,Ptr[str_length-1]))
+ while (str_length && my_isspace(charset(), Ptr[str_length-1]))
str_length--;
}
-bool String::append(const String &s)
-{
- if (s.length())
- {
- if (realloc_with_extra_if_needed(str_length+s.length()))
- return TRUE;
- memcpy(Ptr+str_length,s.ptr(),s.length());
- str_length+=s.length();
- }
- return FALSE;
-}
-
/*
Append an ASCII string to the a string of the current character set
@@ -535,13 +518,13 @@ bool String::append(const char *s,size_t size)
/*
For an ASCII incompatible string, e.g. UCS-2, we need to convert
*/
- if (str_charset->mbminlen > 1)
+ if (mbminlen() > 1)
{
- uint32 add_length=arg_length * str_charset->mbmaxlen;
+ uint32 add_length= arg_length * mbmaxlen();
uint dummy_errors;
if (realloc_with_extra_if_needed(str_length+ add_length))
return TRUE;
- str_length+= copy_and_convert(Ptr+str_length, add_length, str_charset,
+ str_length+= copy_and_convert(Ptr + str_length, add_length, charset(),
s, arg_length, &my_charset_latin1,
&dummy_errors);
return FALSE;
@@ -550,24 +533,11 @@ bool String::append(const char *s,size_t size)
/*
For an ASCII compatinble string we can just append.
*/
- if (realloc_with_extra_if_needed(str_length+arg_length))
- return TRUE;
- memcpy(Ptr+str_length,s,arg_length);
- str_length+=arg_length;
- return FALSE;
+ return Binary_string::append(s, arg_length);
}
-/*
- Append a 0-terminated ASCII string
-*/
-
-bool String::append(const char *s)
-{
- return append(s, (uint) strlen(s));
-}
-
-bool String::append_longlong(longlong val)
+bool Binary_string::append_longlong(longlong val)
{
if (realloc(str_length+MAX_BIGINT_WIDTH+2))
return TRUE;
@@ -577,7 +547,7 @@ bool String::append_longlong(longlong val)
}
-bool String::append_ulonglong(ulonglong val)
+bool Binary_string::append_ulonglong(ulonglong val)
{
if (realloc(str_length+MAX_BIGINT_WIDTH+2))
return TRUE;
@@ -595,13 +565,13 @@ bool String::append(const char *s, size_t arg_length, CHARSET_INFO *cs)
{
uint32 offset;
- if (needs_conversion((uint32)arg_length, cs, str_charset, &offset))
+ if (needs_conversion((uint32)arg_length, cs, charset(), &offset))
{
size_t add_length;
if ((cs == &my_charset_bin) && offset)
{
- DBUG_ASSERT(str_charset->mbminlen > offset);
- offset= str_charset->mbminlen - offset; // How many characters to pad
+ DBUG_ASSERT(mbminlen() > offset);
+ offset= mbminlen() - offset; // How many characters to pad
add_length= arg_length + offset;
if (realloc(str_length + add_length))
return TRUE;
@@ -611,24 +581,19 @@ bool String::append(const char *s, size_t arg_length, CHARSET_INFO *cs)
return FALSE;
}
- add_length= arg_length / cs->mbminlen * str_charset->mbmaxlen;
+ add_length= arg_length / cs->mbminlen * mbmaxlen();
uint dummy_errors;
if (realloc_with_extra_if_needed(str_length + add_length))
return TRUE;
- str_length+= copy_and_convert(Ptr+str_length, (uint32)add_length, str_charset,
- s, (uint32)arg_length, cs, &dummy_errors);
+ str_length+= copy_and_convert(Ptr + str_length, (uint32)add_length, charset(),
+ s, (uint32)arg_length, cs, &dummy_errors);
+ return false;
}
- else
- {
- if (realloc_with_extra_if_needed(str_length + arg_length))
- return TRUE;
- memcpy(Ptr + str_length, s, arg_length);
- str_length+= (uint32)arg_length;
- }
- return FALSE;
+ return Binary_string::append(s, arg_length);
}
-bool String::append(IO_CACHE* file, uint32 arg_length)
+
+bool Binary_string::append(IO_CACHE* file, uint32 arg_length)
{
if (realloc_with_extra_if_needed(str_length+arg_length))
return TRUE;
@@ -676,19 +641,8 @@ bool String::append_with_prefill(const char *s,uint32 arg_length,
return FALSE;
}
-uint32 String::numchars() const
-{
- return (uint32) str_charset->cset->numchars(str_charset, Ptr, Ptr+str_length);
-}
-
-int String::charpos(longlong i,uint32 offset)
-{
- if (i <= 0)
- return (int)i;
- return (int)str_charset->cset->charpos(str_charset,Ptr+offset,Ptr+str_length,(size_t)i);
-}
-int String::strstr(const String &s,uint32 offset)
+int Static_binary_string::strstr(const Static_binary_string &s, uint32 offset)
{
if (s.length()+offset <= str_length)
{
@@ -719,7 +673,7 @@ skip:
** Search string from end. Offset is offset to the end of string
*/
-int String::strrstr(const String &s,uint32 offset)
+int Static_binary_string::strrstr(const Static_binary_string &s, uint32 offset)
{
if (s.length() <= offset && offset <= str_length)
{
@@ -746,18 +700,9 @@ skip:
return -1;
}
-/*
- Replace substring with string
- If wrong parameter or not enough memory, do nothing
-*/
-
-bool String::replace(uint32 offset,uint32 arg_length,const String &to)
-{
- return replace(offset,arg_length,to.ptr(),to.length());
-}
-bool String::replace(uint32 offset,uint32 arg_length,
- const char *to, uint32 to_length)
+bool Binary_string::replace(uint32 offset, uint32 arg_length,
+ const char *to, uint32 to_length)
{
long diff = (long) to_length-(long) arg_length;
if (offset+arg_length <= str_length)
@@ -788,7 +733,7 @@ bool String::replace(uint32 offset,uint32 arg_length,
// added by Holyfoot for "geometry" needs
-int String::reserve(size_t space_needed, size_t grow_by)
+int Binary_string::reserve(size_t space_needed, size_t grow_by)
{
if (Alloced_length < str_length + space_needed)
{
@@ -798,34 +743,34 @@ int String::reserve(size_t space_needed, size_t grow_by)
return FALSE;
}
-void String::qs_append(const char *str, size_t len)
+void Static_binary_string::qs_append(const char *str, size_t len)
{
memcpy(Ptr + str_length, str, len + 1);
str_length += (uint32)len;
}
-void String::qs_append(double d)
+void Static_binary_string::qs_append(double d)
{
char *buff = Ptr + str_length;
str_length+= (uint32) my_gcvt(d, MY_GCVT_ARG_DOUBLE, FLOATING_POINT_BUFFER - 1, buff,
NULL);
}
-void String::qs_append(double *d)
+void Static_binary_string::qs_append(double *d)
{
double ld;
float8get(ld, (char*) d);
qs_append(ld);
}
-void String::qs_append(int i)
+void Static_binary_string::qs_append(int i)
{
char *buff= Ptr + str_length;
char *end= int10_to_str(i, buff, -10);
str_length+= (int) (end-buff);
}
-void String::qs_append(ulonglong i)
+void Static_binary_string::qs_append(ulonglong i)
{
char *buff= Ptr + str_length;
char *end= longlong10_to_str(i, buff, 10);
@@ -947,12 +892,12 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
of a constant string.
Not safe to reuse.
*/
- if (from->Alloced_length > 0) // "from" is #c or #d (not a constant)
+ if (from->alloced_length() > 0) // "from" is #c or #d (not a constant)
{
- if (from->Alloced_length >= from_length)
+ if (from->alloced_length() >= from_length)
return from; // #c or #d (large enough to store from_length bytes)
- if (from->alloced)
+ if (from->is_alloced())
{
(void) from->realloc(from_length);
return from; // #d (reallocated to fit from_length bytes)
@@ -991,16 +936,16 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
Note, as we can't distinguish between #a and #b for sure,
so we can't assert "not #a", but we can at least assert "not #e".
*/
- DBUG_ASSERT(!from->alloced || from->Alloced_length > 0); // Not #e
+ DBUG_ASSERT(!from->is_alloced() || from->alloced_length() > 0); // Not #e
(void) from->realloc(from_length);
return from;
}
- if (to->realloc(from_length))
+ if (to->alloc(from_length))
return from; // Actually an error
if ((to->str_length=MY_MIN(from->str_length,from_length)))
memcpy(to->Ptr,from->Ptr,to->str_length);
- to->str_charset=from->str_charset;
+ to->set_charset(*from);
return to; // "from" was of types #a, #b, #e, or small #c.
}
@@ -1160,26 +1105,6 @@ void String::print_with_conversion(String *print, CHARSET_INFO *cs) const
}
-/*
- Exchange state of this object and argument.
-
- SYNOPSIS
- String::swap()
-
- RETURN
- Target string will contain state of this object and vice versa.
-*/
-
-void String::swap(String &s)
-{
- swap_variables(char *, Ptr, s.Ptr);
- swap_variables(uint32, str_length, s.str_length);
- swap_variables(uint32, Alloced_length, s.Alloced_length);
- swap_variables(bool, alloced, s.alloced);
- swap_variables(CHARSET_INFO*, str_charset, s.str_charset);
-}
-
-
/**
Convert string to printable ASCII string
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 4302f458436..39555020696 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -127,57 +127,275 @@ uint convert_to_printable(char *to, size_t to_len,
const char *from, size_t from_len,
CHARSET_INFO *from_cs, size_t nbytes= 0);
-class String : public Sql_alloc
+
+class Charset
{
+ CHARSET_INFO *m_charset;
+public:
+ Charset() :m_charset(&my_charset_bin) { }
+ Charset(CHARSET_INFO *cs) :m_charset(cs) { }
+
+ CHARSET_INFO *charset() const { return m_charset; }
+ uint mbminlen() const { return m_charset->mbminlen; }
+ uint mbmaxlen() const { return m_charset->mbmaxlen; }
+
+ size_t numchars(const char *str, const char *end) const
+ {
+ return m_charset->cset->numchars(m_charset, str, end);
+ }
+ size_t charpos(const char *str, const char *end, size_t pos) const
+ {
+ return m_charset->cset->charpos(m_charset, str, end, pos);
+ }
+ void set_charset(CHARSET_INFO *charset_arg)
+ {
+ m_charset= charset_arg;
+ }
+ void set_charset(const Charset &other)
+ {
+ m_charset= other.m_charset;
+ }
+ void swap(Charset &other)
+ {
+ swap_variables(CHARSET_INFO*, m_charset, other.m_charset);
+ }
+};
+
+
+/*
+ A storage for String.
+ Should be eventually derived from LEX_STRING.
+*/
+class Static_binary_string : public Sql_alloc
+{
+protected:
char *Ptr;
- uint32 str_length,Alloced_length, extra_alloc;
- bool alloced,thread_specific;
- CHARSET_INFO *str_charset;
+ uint32 str_length;
public:
- String()
- {
- Ptr=0; str_length=Alloced_length=extra_alloc=0;
- alloced= thread_specific= 0;
- str_charset= &my_charset_bin;
+ Static_binary_string()
+ :Ptr(NULL),
+ str_length(0)
+ { }
+ Static_binary_string(char *str, size_t length_arg)
+ :Ptr(str),
+ str_length((uint32) length_arg)
+ {
+ DBUG_ASSERT(length_arg < UINT_MAX32);
}
- String(size_t length_arg)
- {
- alloced= thread_specific= 0;
- Alloced_length= extra_alloc= 0; (void) real_alloc(length_arg);
- str_charset= &my_charset_bin;
+ inline uint32 length() const { return str_length;}
+ inline char& operator [] (size_t i) const { return Ptr[i]; }
+ inline void length(size_t len) { str_length=(uint32)len ; }
+ inline bool is_empty() const { return (str_length == 0); }
+ inline const char *ptr() const { return Ptr; }
+ inline const char *end() const { return Ptr + str_length; }
+
+ LEX_STRING lex_string() const
+ {
+ LEX_STRING str = { (char*) ptr(), length() };
+ return str;
}
- String(const char *str, CHARSET_INFO *cs)
- {
- Ptr=(char*) str; str_length= (uint32) strlen(str);
+ LEX_CSTRING lex_cstring() const
+ {
+ LEX_CSTRING skr = { ptr(), length() };
+ return skr;
+ }
+
+ bool has_8bit_bytes() const
+ {
+ for (const char *c= ptr(), *c_end= end(); c < c_end; c++)
+ {
+ if (!my_isascii(*c))
+ return true;
+ }
+ return false;
+ }
+
+ bool bin_eq(const Static_binary_string *other) const
+ {
+ return length() == other->length() &&
+ !memcmp(ptr(), other->ptr(), length());
+ }
+
+ void set(char *str, size_t len)
+ {
+ Ptr= str;
+ str_length= (uint32) len;
+ }
+
+ void swap(Static_binary_string &s)
+ {
+ swap_variables(char *, Ptr, s.Ptr);
+ swap_variables(uint32, str_length, s.str_length);
+ }
+
+ /*
+ PMG 2004.11.12
+ This is a method that works the same as perl's "chop". It simply
+ drops the last character of a string. This is useful in the case
+ of the federated storage handler where I'm building a unknown
+ number, list of values and fields to be used in a sql insert
+ statement to be run on the remote server, and have a comma after each.
+ When the list is complete, I "chop" off the trailing comma
+
+ ex.
+ String stringobj;
+ stringobj.append("VALUES ('foo', 'fi', 'fo',");
+ stringobj.chop();
+ stringobj.append(")");
+
+ In this case, the value of string was:
+
+ VALUES ('foo', 'fi', 'fo',
+ VALUES ('foo', 'fi', 'fo'
+ VALUES ('foo', 'fi', 'fo')
+ */
+ inline void chop()
+ {
+ str_length--;
+ Ptr[str_length]= '\0';
+ DBUG_ASSERT(strlen(Ptr) == str_length);
+ }
+
+ // Returns offset to substring or -1
+ int strstr(const Static_binary_string &search, uint32 offset=0);
+ // Returns offset to substring or -1
+ int strrstr(const Static_binary_string &search, uint32 offset=0);
+
+ /*
+ The following append operations do NOT check alloced memory
+ q_*** methods writes values of parameters itself
+ qs_*** methods writes string representation of value
+ */
+ void q_append(const char c)
+ {
+ Ptr[str_length++] = c;
+ }
+ void q_append2b(const uint32 n)
+ {
+ int2store(Ptr + str_length, n);
+ str_length += 2;
+ }
+ void q_append(const uint32 n)
+ {
+ int4store(Ptr + str_length, n);
+ str_length += 4;
+ }
+ void q_append(double d)
+ {
+ float8store(Ptr + str_length, d);
+ str_length += 8;
+ }
+ void q_append(double *d)
+ {
+ float8store(Ptr + str_length, *d);
+ str_length += 8;
+ }
+ void q_append(const char *data, size_t data_len)
+ {
+ memcpy(Ptr + str_length, data, data_len);
+ DBUG_ASSERT(str_length <= UINT_MAX32 - data_len);
+ str_length += (uint)data_len;
+ }
+ void q_append(const LEX_CSTRING *ls)
+ {
+ DBUG_ASSERT(ls->length < UINT_MAX32 &&
+ ((ls->length == 0 && !ls->str) ||
+ ls->length == strlen(ls->str)));
+ q_append(ls->str, (uint32) ls->length);
+ }
+
+ void write_at_position(int position, uint32 value)
+ {
+ int4store(Ptr + position,value);
+ }
+
+ void qs_append(const char *str)
+ {
+ qs_append(str, (uint32)strlen(str));
+ }
+ void qs_append(const LEX_CSTRING *ls)
+ {
+ DBUG_ASSERT(ls->length < UINT_MAX32 &&
+ ((ls->length == 0 && !ls->str) ||
+ ls->length == strlen(ls->str)));
+ qs_append(ls->str, (uint32)ls->length);
+ }
+ void qs_append(const char *str, size_t len);
+ void qs_append_hex(const char *str, uint32 len);
+ void qs_append(double d);
+ void qs_append(double *d);
+ inline void qs_append(const char c)
+ {
+ Ptr[str_length]= c;
+ str_length++;
+ }
+ void qs_append(int i);
+ void qs_append(uint i)
+ {
+ qs_append((ulonglong)i);
+ }
+ void qs_append(ulong i)
+ {
+ qs_append((ulonglong)i);
+ }
+ void qs_append(ulonglong i);
+ void qs_append(longlong i, int radix)
+ {
+ char *buff= Ptr + str_length;
+ char *end= ll2str(i, buff, radix, 0);
+ str_length+= uint32(end-buff);
+ }
+};
+
+
+class Binary_string: public Static_binary_string
+{
+ uint32 Alloced_length, extra_alloc;
+ bool alloced, thread_specific;
+ void init_private_data()
+ {
Alloced_length= extra_alloc= 0;
- alloced= thread_specific= 0;
- str_charset=cs;
+ alloced= thread_specific= false;
+ }
+public:
+ Binary_string()
+ {
+ init_private_data();
}
+ explicit Binary_string(size_t length_arg)
+ {
+ init_private_data();
+ (void) real_alloc(length_arg);
+ }
+ explicit Binary_string(const char *str)
+ :Binary_string(str, strlen(str))
+ { }
/*
NOTE: If one intend to use the c_ptr() method, the following two
contructors need the size of memory for STR to be at least LEN+1 (to make
room for zero termination).
*/
- String(const char *str,size_t len, CHARSET_INFO *cs)
- {
- Ptr=(char*) str; str_length=(uint32)len; Alloced_length= extra_alloc=0;
- alloced= thread_specific= 0;
- str_charset=cs;
+ Binary_string(const char *str, size_t len)
+ :Static_binary_string((char *) str, len)
+ {
+ init_private_data();
}
- String(char *str,size_t len, CHARSET_INFO *cs)
- {
- Ptr=(char*) str; Alloced_length=str_length=(uint32)len; extra_alloc= 0;
+ Binary_string(char *str, size_t len)
+ :Static_binary_string(str, len)
+ {
+ Alloced_length= (uint32) len;
+ extra_alloc= 0;
alloced= thread_specific= 0;
- str_charset=cs;
}
- String(const String &str)
- {
- Ptr=str.Ptr ; str_length=str.str_length ;
- Alloced_length=str.Alloced_length; extra_alloc= 0;
+ explicit Binary_string(const Binary_string &str)
+ :Static_binary_string(str)
+ {
+ Alloced_length= str.Alloced_length;
+ extra_alloc= 0;
alloced= thread_specific= 0;
- str_charset=str.str_charset;
}
- ~String() { free(); }
+
+ ~Binary_string() { free(); }
/* Mark variable thread specific it it's not allocated already */
inline void set_thread_specific()
@@ -185,153 +403,180 @@ public:
if (!alloced)
thread_specific= 1;
}
- inline void set_charset(CHARSET_INFO *charset_arg)
- { str_charset= charset_arg; }
- inline CHARSET_INFO *charset() const { return str_charset; }
- inline uint32 length() const { return str_length;}
+ bool is_alloced() const { return alloced; }
inline uint32 alloced_length() const { return Alloced_length;}
inline uint32 extra_allocation() const { return extra_alloc;}
- inline char& operator [] (size_t i) const { return Ptr[i]; }
- inline void length(size_t len) { str_length=(uint32)len ; }
inline void extra_allocation(size_t len) { extra_alloc= (uint32)len; }
- inline bool is_empty() const { return (str_length == 0); }
inline void mark_as_const() { Alloced_length= 0;}
- inline const char *ptr() const { return Ptr; }
- inline const char *end() const { return Ptr + str_length; }
- inline char *c_ptr()
- {
- DBUG_ASSERT(!alloced || !Ptr || !Alloced_length ||
- (Alloced_length >= (str_length + 1)));
- if (!Ptr || Ptr[str_length]) /* Should be safe */
- (void) realloc(str_length);
- return Ptr;
+ inline bool uses_buffer_owned_by(const Binary_string *s) const
+ {
+ return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length);
}
- inline char *c_ptr_quick()
+
+ /* Swap two string objects. Efficient way to exchange data without memcpy. */
+ void swap(Binary_string &s)
{
- if (Ptr && str_length < Alloced_length)
- Ptr[str_length]=0;
- return Ptr;
+ Static_binary_string::swap(s);
+ swap_variables(uint32, Alloced_length, s.Alloced_length);
+ swap_variables(bool, alloced, s.alloced);
}
- inline char *c_ptr_safe()
+
+ /**
+ Points the internal buffer to the supplied one. The old buffer is freed.
+ @param str Pointer to the new buffer.
+ @param arg_length Length of the new buffer in characters, excluding any
+ null character.
+ @note The new buffer will not be null terminated.
+ */
+ void set_alloced(char *str, size_t length_arg, size_t alloced_length_arg)
{
- if (Ptr && str_length < Alloced_length)
- Ptr[str_length]=0;
- else
- (void) realloc(str_length);
- return Ptr;
+ free();
+ Static_binary_string::set(str, length_arg);
+ DBUG_ASSERT(alloced_length_arg < UINT_MAX32);
+ Alloced_length= (uint32) alloced_length_arg;
}
- LEX_STRING lex_string() const
+ inline void set(char *str, size_t arg_length)
{
- LEX_STRING str = { (char*) ptr(), length() };
- return str;
+ set_alloced(str, arg_length, arg_length);
}
- LEX_CSTRING lex_cstring() const
+ inline void set(const char *str, size_t arg_length)
{
- LEX_CSTRING skr = { ptr(), length() };
- return skr;
+ free();
+ Static_binary_string::set((char *) str, arg_length);
}
- void set(String &str,size_t offset,size_t arg_length)
+ void set(Binary_string &str, size_t offset, size_t arg_length)
{
DBUG_ASSERT(&str != this);
free();
- Ptr=(char*) str.ptr()+offset; str_length=(uint32)arg_length;
+ Static_binary_string::set((char*) str.ptr() + offset, arg_length);
if (str.Alloced_length)
- Alloced_length=(uint32)(str.Alloced_length-offset);
- str_charset=str.str_charset;
+ Alloced_length= (uint32) (str.Alloced_length - offset);
}
-
- /**
- Points the internal buffer to the supplied one. The old buffer is freed.
- @param str Pointer to the new buffer.
- @param arg_length Length of the new buffer in characters, excluding any
- null character.
- @param cs Character set to use for interpreting string data.
- @note The new buffer will not be null terminated.
- */
- inline void set(char *str,size_t arg_length, CHARSET_INFO *cs)
+ /* Take over handling of buffer from some other object */
+ void reset(char *ptr_arg, size_t length_arg, size_t alloced_length_arg)
{
- free();
- Ptr=(char*) str; str_length=Alloced_length=(uint32)arg_length;
- str_charset=cs;
+ set_alloced(ptr_arg, length_arg, alloced_length_arg);
+ alloced= ptr_arg != 0;
}
- inline void set(const char *str,size_t arg_length, CHARSET_INFO *cs)
+
+ /* Forget about the buffer, let some other object handle it */
+ char *release()
{
- free();
- Ptr=(char*) str; str_length=(uint32)arg_length;
- str_charset=cs;
+ char *old= Ptr;
+ Static_binary_string::set(NULL, 0);
+ init_private_data();
+ return old;
}
- bool set_ascii(const char *str, size_t arg_length);
- inline void set_quick(char *str,size_t arg_length, CHARSET_INFO *cs)
+
+ inline void set_quick(char *str, size_t arg_length)
{
if (!alloced)
{
- Ptr=(char*) str; str_length=Alloced_length=(uint32)arg_length;
+ Static_binary_string::set(str, arg_length);
+ Alloced_length= (uint32) arg_length;
}
- str_charset=cs;
}
- bool set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs);
- bool set(int num, CHARSET_INFO *cs) { return set_int(num, false, cs); }
- bool set(uint num, CHARSET_INFO *cs) { return set_int(num, true, cs); }
- bool set(long num, CHARSET_INFO *cs) { return set_int(num, false, cs); }
- bool set(ulong num, CHARSET_INFO *cs) { return set_int(num, true, cs); }
- bool set(longlong num, CHARSET_INFO *cs) { return set_int(num, false, cs); }
- bool set(ulonglong num, CHARSET_INFO *cs) { return set_int((longlong)num, true, cs); }
- bool set_real(double num,uint decimals, CHARSET_INFO *cs);
+
+ inline Binary_string& operator=(const Binary_string &s)
+ {
+ if (&s != this)
+ {
+ /*
+ It is forbidden to do assignments like
+ some_string = substring_of_that_string
+ */
+ DBUG_ASSERT(!s.uses_buffer_owned_by(this));
+ set_alloced((char *) s.Ptr, s.str_length, s.Alloced_length);
+ }
+ return *this;
+ }
bool set_hex(ulonglong num);
bool set_hex(const char *str, uint32 len);
- /* Take over handling of buffer from some other object */
- void reset(char *ptr_arg, size_t length_arg, size_t alloced_length_arg,
- CHARSET_INFO *cs)
- {
- free();
- Ptr= ptr_arg;
- str_length= (uint32)length_arg;
- Alloced_length= (uint32)alloced_length_arg;
- str_charset= cs;
- alloced= ptr_arg != 0;
- }
+ bool copy(); // Alloc string if not alloced
+ bool copy(const Binary_string &s); // Allocate new string
+ bool copy(const char *s, size_t arg_length); // Allocate new string
+ bool copy_or_move(const char *s,size_t arg_length);
- /* Forget about the buffer, let some other object handle it */
- char *release()
+ bool append_ulonglong(ulonglong val);
+ bool append_longlong(longlong val);
+
+ bool append(const char *s, size_t size)
{
- char *old= Ptr;
- Ptr=0; str_length= Alloced_length= extra_alloc= 0;
- alloced= thread_specific= 0;
- return old;
+ if (!size)
+ return false;
+ if (realloc_with_extra_if_needed(str_length + size))
+ return true;
+ q_append(s, size);
+ return false;
+ }
+ bool append(const Binary_string &s)
+ {
+ return append(s.ptr(), s.length());
}
+ bool append(IO_CACHE* file, uint32 arg_length);
- /*
- PMG 2004.11.12
- This is a method that works the same as perl's "chop". It simply
- drops the last character of a string. This is useful in the case
- of the federated storage handler where I'm building a unknown
- number, list of values and fields to be used in a sql insert
- statement to be run on the remote server, and have a comma after each.
- When the list is complete, I "chop" off the trailing comma
+ inline bool append_char(char chr)
+ {
+ if (str_length < Alloced_length)
+ {
+ Ptr[str_length++]= chr;
+ }
+ else
+ {
+ if (unlikely(realloc_with_extra(str_length + 1)))
+ return true;
+ Ptr[str_length++]= chr;
+ }
+ return false;
+ }
+ bool append_hex(const char *src, uint32 srclen)
+ {
+ for (const char *src_end= src + srclen ; src != src_end ; src++)
+ {
+ if (unlikely(append_char(_dig_vec_lower[((uchar) *src) >> 4])) ||
+ unlikely(append_char(_dig_vec_lower[((uchar) *src) & 0x0F])))
+ return true;
+ }
+ return false;
+ }
- ex.
- String stringobj;
- stringobj.append("VALUES ('foo', 'fi', 'fo',");
- stringobj.chop();
- stringobj.append(")");
+ bool append_with_step(const char *s, uint32 arg_length, uint32 step_alloc)
+ {
+ uint32 new_length= arg_length + str_length;
+ if (new_length > Alloced_length &&
+ unlikely(realloc(new_length + step_alloc)))
+ return true;
+ q_append(s, arg_length);
+ return false;
+ }
- In this case, the value of string was:
+ inline char *c_ptr()
+ {
+ DBUG_ASSERT(!alloced || !Ptr || !Alloced_length ||
+ (Alloced_length >= (str_length + 1)));
- VALUES ('foo', 'fi', 'fo',
- VALUES ('foo', 'fi', 'fo'
- VALUES ('foo', 'fi', 'fo')
-
- */
- inline void chop()
+ if (!Ptr || Ptr[str_length]) // Should be safe
+ (void) realloc(str_length);
+ return Ptr;
+ }
+ inline char *c_ptr_quick()
{
- str_length--;
- Ptr[str_length]= '\0';
- DBUG_ASSERT(strlen(Ptr) == str_length);
+ if (Ptr && str_length < Alloced_length)
+ Ptr[str_length]=0;
+ return Ptr;
+ }
+ inline char *c_ptr_safe()
+ {
+ if (Ptr && str_length < Alloced_length)
+ Ptr[str_length]=0;
+ else
+ (void) realloc(str_length);
+ return Ptr;
}
inline void free()
@@ -342,8 +587,7 @@ public:
my_free(Ptr);
}
Alloced_length= extra_alloc= 0;
- Ptr=0;
- str_length=0; /* Safety */
+ Static_binary_string::set(NULL, 0); // Safety
}
inline bool alloc(size_t arg_length)
{
@@ -351,13 +595,13 @@ public:
return 0;
return real_alloc(arg_length);
}
- bool real_alloc(size_t arg_length); // Empties old string
+ bool real_alloc(size_t arg_length); // Empties old string
bool realloc_raw(size_t arg_length);
bool realloc(size_t arg_length)
{
if (realloc_raw(arg_length))
return TRUE;
- Ptr[arg_length]=0; // This make other funcs shorter
+ Ptr[arg_length]= 0; // This make other funcs shorter
return FALSE;
}
bool realloc_with_extra(size_t arg_length)
@@ -391,37 +635,179 @@ public:
arg_length,MYF((thread_specific ?
MY_THREAD_SPECIFIC : 0))))))
{
- Alloced_length = 0;
- real_alloc(arg_length);
+ Alloced_length= 0;
+ real_alloc(arg_length);
}
else
{
- Ptr=new_ptr;
- Alloced_length=(uint32)arg_length;
+ Ptr= new_ptr;
+ Alloced_length= (uint32) arg_length;
}
}
}
- bool is_alloced() const { return alloced; }
+ void move(Binary_string &s)
+ {
+ set_alloced(s.Ptr, s.str_length, s.Alloced_length);
+ extra_alloc= s.extra_alloc;
+ alloced= s.alloced;
+ thread_specific= s.thread_specific;
+ s.alloced= 0;
+ }
+ bool fill(uint32 max_length,char fill);
+ /*
+ Replace substring with string
+ If wrong parameter or not enough memory, do nothing
+ */
+ bool replace(uint32 offset,uint32 arg_length, const char *to, uint32 length);
+ bool replace(uint32 offset,uint32 arg_length, const Static_binary_string &to)
+ {
+ return replace(offset,arg_length,to.ptr(),to.length());
+ }
+
+ int reserve(size_t space_needed)
+ {
+ return realloc(str_length + space_needed);
+ }
+ int reserve(size_t space_needed, size_t grow_by);
+
+ inline char *prep_append(uint32 arg_length, uint32 step_alloc)
+ {
+ uint32 new_length= arg_length + str_length;
+ if (new_length > Alloced_length)
+ {
+ if (unlikely(realloc(new_length + step_alloc)))
+ return 0;
+ }
+ uint32 old_length= str_length;
+ str_length+= arg_length;
+ return Ptr + old_length; // Area to use
+ }
+
+
+ void q_net_store_length(ulonglong length)
+ {
+ DBUG_ASSERT(Alloced_length >= (str_length + net_length_size(length)));
+ char *pos= (char *) net_store_length((uchar *)(Ptr + str_length), length);
+ str_length= uint32(pos - Ptr);
+ }
+ void q_net_store_data(const uchar *from, size_t length)
+ {
+ DBUG_ASSERT(length < UINT_MAX32);
+ DBUG_ASSERT(Alloced_length >= (str_length + length +
+ net_length_size(length)));
+ q_net_store_length(length);
+ q_append((const char *)from, (uint32) length);
+ }
+};
+
+
+class String: public Charset, public Binary_string
+{
+public:
+ String() { }
+ String(size_t length_arg)
+ :Binary_string(length_arg)
+ { }
+ String(const char *str, CHARSET_INFO *cs)
+ :Charset(cs),
+ Binary_string(str)
+ { }
+ /*
+ NOTE: If one intend to use the c_ptr() method, the following two
+ contructors need the size of memory for STR to be at least LEN+1 (to make
+ room for zero termination).
+ */
+ String(const char *str, size_t len, CHARSET_INFO *cs)
+ :Charset(cs),
+ Binary_string((char *) str, len)
+ { }
+ String(char *str, size_t len, CHARSET_INFO *cs)
+ :Charset(cs),
+ Binary_string(str, len)
+ { }
+ String(const String &str)
+ :Charset(str),
+ Binary_string(str)
+ { }
+
+ void set(String &str,size_t offset,size_t arg_length)
+ {
+ Binary_string::set(str, offset, arg_length);
+ set_charset(str);
+ }
+ inline void set(char *str,size_t arg_length, CHARSET_INFO *cs)
+ {
+ Binary_string::set(str, arg_length);
+ set_charset(cs);
+ }
+ inline void set(const char *str,size_t arg_length, CHARSET_INFO *cs)
+ {
+ Binary_string::set(str, arg_length);
+ set_charset(cs);
+ }
+ bool set_ascii(const char *str, size_t arg_length);
+ inline void set_quick(char *str,size_t arg_length, CHARSET_INFO *cs)
+ {
+ Binary_string::set_quick(str, arg_length);
+ set_charset(cs);
+ }
+ bool set_int(longlong num, bool unsigned_flag, CHARSET_INFO *cs);
+ bool set(int num, CHARSET_INFO *cs) { return set_int(num, false, cs); }
+ bool set(uint num, CHARSET_INFO *cs) { return set_int(num, true, cs); }
+ bool set(long num, CHARSET_INFO *cs) { return set_int(num, false, cs); }
+ bool set(ulong num, CHARSET_INFO *cs) { return set_int(num, true, cs); }
+ bool set(longlong num, CHARSET_INFO *cs) { return set_int(num, false, cs); }
+ bool set(ulonglong num, CHARSET_INFO *cs) { return set_int((longlong)num, true, cs); }
+ bool set_real(double num,uint decimals, CHARSET_INFO *cs);
+
+ bool set_hex(ulonglong num)
+ {
+ set_charset(&my_charset_latin1);
+ return Binary_string::set_hex(num);
+ }
+ bool set_hex(const char *str, uint32 len)
+ {
+ set_charset(&my_charset_latin1);
+ return Binary_string::set_hex(str, len);
+ }
+
+ /* Take over handling of buffer from some other object */
+ void reset(char *ptr_arg, size_t length_arg, size_t alloced_length_arg,
+ CHARSET_INFO *cs)
+ {
+ Binary_string::reset(ptr_arg, length_arg, alloced_length_arg);
+ set_charset(cs);
+ }
+
inline String& operator = (const String &s)
{
if (&s != this)
{
- /*
- It is forbidden to do assignments like
- some_string = substring_of_that_string
- */
- DBUG_ASSERT(!s.uses_buffer_owned_by(this));
- free();
- Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length;
- str_charset=s.str_charset;
+ set_charset(s);
+ Binary_string::operator=(s);
}
return *this;
}
- bool copy(); // Alloc string if not alloced
- bool copy(const String &s); // Allocate new string
- bool copy(const char *s,size_t arg_length, CHARSET_INFO *cs); // Allocate new string
- bool copy_or_move(const char *s,size_t arg_length, CHARSET_INFO *cs);
+ bool copy()
+ {
+ return Binary_string::copy();
+ }
+ bool copy(const String &s)
+ {
+ set_charset(s);
+ return Binary_string::copy(s);
+ }
+ bool copy(const char *s, size_t arg_length, CHARSET_INFO *cs)
+ {
+ set_charset(cs);
+ return Binary_string::copy(s, arg_length);
+ }
+ bool copy_or_move(const char *s, size_t arg_length, CHARSET_INFO *cs)
+ {
+ set_charset(cs);
+ return Binary_string::copy_or_move(s, arg_length);
+ }
static bool needs_conversion(size_t arg_length,
CHARSET_INFO *cs_from, CHARSET_INFO *cs_to,
uint32 *offset);
@@ -443,206 +829,84 @@ public:
{
if (unlikely(alloc(tocs->mbmaxlen * src_length)))
return true;
- str_length= copier->well_formed_copy(tocs, Ptr, Alloced_length,
+ str_length= copier->well_formed_copy(tocs, Ptr, alloced_length(),
fromcs, src, (uint)src_length, (uint)nchars);
- str_charset= tocs;
+ set_charset(tocs);
return false;
}
- void move(String &s)
- {
- free();
- Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length;
- extra_alloc= s.extra_alloc;
- alloced= s.alloced;
- thread_specific= s.thread_specific;
- s.alloced= 0;
- }
- bool append(const String &s);
- bool append(const char *s);
- bool append(const LEX_STRING *ls)
- {
- DBUG_ASSERT(ls->length < UINT_MAX32 &&
- ((ls->length == 0 && !ls->str) ||
- ls->length == strlen(ls->str)));
- return append(ls->str, (uint32) ls->length);
- }
- bool append(const LEX_CSTRING *ls)
- {
- DBUG_ASSERT(ls->length < UINT_MAX32 &&
- ((ls->length == 0 && !ls->str) ||
- ls->length == strlen(ls->str)));
- return append(ls->str, (uint32) ls->length);
- }
- bool append(const LEX_CSTRING &ls)
+ // Append without character set conversion
+ bool append(const String &s)
{
- return append(&ls);
+ return Binary_string::append(s);
}
- bool append(const char *s, size_t size);
- bool append(const char *s, size_t arg_length, CHARSET_INFO *cs);
- bool append_ulonglong(ulonglong val);
- bool append_longlong(longlong val);
- bool append(IO_CACHE* file, uint32 arg_length);
- bool append_with_prefill(const char *s, uint32 arg_length,
- uint32 full_length, char fill_char);
- bool append_parenthesized(long nr, int radix= 10);
- int strstr(const String &search,uint32 offset=0); // Returns offset to substring or -1
- int strrstr(const String &search,uint32 offset=0); // Returns offset to substring or -1
- bool replace(uint32 offset,uint32 arg_length,const char *to,uint32 length);
- bool replace(uint32 offset,uint32 arg_length,const String &to);
inline bool append(char chr)
{
- if (str_length < Alloced_length)
- {
- Ptr[str_length++]=chr;
- }
- else
- {
- if (unlikely(realloc_with_extra(str_length + 1)))
- return 1;
- Ptr[str_length++]=chr;
- }
- return 0;
+ return Binary_string::append_char(chr);
}
bool append_hex(const char *src, uint32 srclen)
{
- for (const char *src_end= src + srclen ; src != src_end ; src++)
- {
- if (unlikely(append(_dig_vec_lower[((uchar) *src) >> 4])) ||
- unlikely(append(_dig_vec_lower[((uchar) *src) & 0x0F])))
- return true;
- }
- return false;
+ return Binary_string::append_hex(src, srclen);
}
bool append_hex(const uchar *src, uint32 srclen)
{
- return append_hex((const char*)src, srclen);
- }
- bool fill(uint32 max_length,char fill);
- void strip_sp();
- friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
- friend int stringcmp(const String *a,const String *b);
- friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
- friend class Field;
- uint32 numchars() const;
- int charpos(longlong i,uint32 offset=0);
-
- int reserve(size_t space_needed)
- {
- return realloc(str_length + space_needed);
- }
- int reserve(size_t space_needed, size_t grow_by);
-
- /*
- The following append operations do NOT check alloced memory
- q_*** methods writes values of parameters itself
- qs_*** methods writes string representation of value
- */
- void q_append(const char c)
- {
- Ptr[str_length++] = c;
- }
- void q_append2b(const uint32 n)
- {
- int2store(Ptr + str_length, n);
- str_length += 2;
+ return Binary_string::append_hex((const char*)src, srclen);
}
- void q_append(const uint32 n)
+ bool append(IO_CACHE* file, uint32 arg_length)
{
- int4store(Ptr + str_length, n);
- str_length += 4;
+ return Binary_string::append(file, arg_length);
}
- void q_append(double d)
- {
- float8store(Ptr + str_length, d);
- str_length += 8;
- }
- void q_append(double *d)
+ inline bool append(const char *s, uint32 arg_length, uint32 step_alloc)
{
- float8store(Ptr + str_length, *d);
- str_length += 8;
+ return append_with_step(s, arg_length, step_alloc);
}
- void q_append(const char *data, size_t data_len)
+
+ // Append with optional character set conversion from ASCII (e.g. to UCS2)
+ bool append(const char *s)
{
- memcpy(Ptr + str_length, data, data_len);
- DBUG_ASSERT(str_length <= UINT_MAX32 - data_len);
- str_length += (uint)data_len;
+ return append(s, strlen(s));
}
- void q_append(const LEX_CSTRING *ls)
+ bool append(const LEX_STRING *ls)
{
DBUG_ASSERT(ls->length < UINT_MAX32 &&
((ls->length == 0 && !ls->str) ||
ls->length == strlen(ls->str)));
- q_append(ls->str, (uint32) ls->length);
- }
-
- void write_at_position(int position, uint32 value)
- {
- int4store(Ptr + position,value);
- }
-
- void qs_append(const char *str)
- {
- qs_append(str, (uint32)strlen(str));
+ return append(ls->str, (uint32) ls->length);
}
- void qs_append(const LEX_CSTRING *ls)
+ bool append(const LEX_CSTRING *ls)
{
DBUG_ASSERT(ls->length < UINT_MAX32 &&
((ls->length == 0 && !ls->str) ||
ls->length == strlen(ls->str)));
- qs_append(ls->str, (uint32)ls->length);
- }
- void qs_append(const char *str, size_t len);
- void qs_append_hex(const char *str, uint32 len);
- void qs_append(double d);
- void qs_append(double *d);
- inline void qs_append(const char c)
- {
- Ptr[str_length]= c;
- str_length++;
- }
- void qs_append(int i);
- void qs_append(uint i)
- {
- qs_append((ulonglong)i);
- }
- void qs_append(ulong i)
- {
- qs_append((ulonglong)i);
+ return append(ls->str, (uint32) ls->length);
}
- void qs_append(ulonglong i);
- void qs_append(longlong i, int radix)
+ bool append(const LEX_CSTRING &ls)
{
- char *buff= Ptr + str_length;
- char *end= ll2str(i, buff, radix, 0);
- str_length+= uint32(end-buff);
+ return append(&ls);
}
+ bool append(const char *s, size_t size);
+ bool append_with_prefill(const char *s, uint32 arg_length,
+ uint32 full_length, char fill_char);
+ bool append_parenthesized(long nr, int radix= 10);
- /* Inline (general) functions used by the protocol functions */
+ // Append with optional character set conversion from cs to charset()
+ bool append(const char *s, size_t arg_length, CHARSET_INFO *cs);
- inline char *prep_append(uint32 arg_length, uint32 step_alloc)
+ void strip_sp();
+ friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
+ friend int stringcmp(const String *a,const String *b);
+ friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
+ friend class Field;
+ uint32 numchars() const
{
- uint32 new_length= arg_length + str_length;
- if (new_length > Alloced_length)
- {
- if (unlikely(realloc(new_length + step_alloc)))
- return 0;
- }
- uint32 old_length= str_length;
- str_length+= arg_length;
- return Ptr+ old_length; /* Area to use */
+ return (uint32) Charset::numchars(ptr(), end());
}
-
-
- inline bool append(const char *s, uint32 arg_length, uint32 step_alloc)
+ int charpos(longlong i, uint32 offset=0)
{
- uint32 new_length= arg_length + str_length;
- if (new_length > Alloced_length &&
- unlikely(realloc(new_length + step_alloc)))
- return TRUE;
- memcpy(Ptr+str_length, s, arg_length);
- str_length+= arg_length;
- return FALSE;
+ if (i <= 0)
+ return (int) i;
+ return (int) Charset::charpos(ptr() + offset, end(), (size_t) i);
}
+
void print(String *to) const;
void print_with_conversion(String *to, CHARSET_INFO *cs) const;
void print(String *to, CHARSET_INFO *cs) const
@@ -665,13 +929,12 @@ public:
return append_for_single_quote(st, (uint32) len);
}
- /* Swap two string objects. Efficient way to exchange data without memcpy. */
- void swap(String &s);
-
- inline bool uses_buffer_owned_by(const String *s) const
+ void swap(String &s)
{
- return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length);
+ Charset::swap(s);
+ Binary_string::swap(s);
}
+
uint well_formed_length() const
{
return (uint) Well_formed_prefix(charset(), ptr(), length()).length();
@@ -682,36 +945,12 @@ public:
return TRUE;
if (charset()->mbminlen > 1)
return FALSE;
- for (const char *c= ptr(), *c_end= c + length(); c < c_end; c++)
- {
- if (!my_isascii(*c))
- return FALSE;
- }
- return TRUE;
- }
- bool bin_eq(const String *other) const
- {
- return length() == other->length() &&
- !memcmp(ptr(), other->ptr(), length());
+ return !has_8bit_bytes();
}
bool eq(const String *other, CHARSET_INFO *cs) const
{
return !sortcmp(this, other, cs);
}
- void q_net_store_length(ulonglong length)
- {
- DBUG_ASSERT(Alloced_length >= (str_length + net_length_size(length)));
- char *pos= (char *) net_store_length((uchar *)(Ptr + str_length), length);
- str_length= uint32(pos - Ptr);
- }
- void q_net_store_data(const uchar *from, size_t length)
- {
- DBUG_ASSERT(length < UINT_MAX32);
- DBUG_ASSERT(Alloced_length >= (str_length + length +
- net_length_size(length)));
- q_net_store_length(length);
- q_append((const char *)from, (uint32) length);
- }
};
@@ -741,8 +980,19 @@ public:
};
+class String_space: public String
+{
+public:
+ String_space(uint n)
+ {
+ if (fill(n, ' '))
+ set("", 0, &my_charset_bin);
+ }
+};
+
+
static inline bool check_if_only_end_space(CHARSET_INFO *cs,
- const char *str,
+ const char *str,
const char *end)
{
return str+ cs->cset->scan(cs, str, end, MY_SEQ_SPACES) == end;
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index c7ec0dd99ef..28c67f0e59a 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -33,7 +33,6 @@
// partition_info
// NOT_A_PARTITION_ID
#include "sql_db.h" // load_db_opt_by_name
-#include "sql_time.h" // make_truncated_value_warning
#include "records.h" // init_read_record, end_read_record
#include "filesort.h" // filesort_free_buffers
#include "sql_select.h" // setup_order
@@ -64,22 +63,16 @@
const char *primary_key_name="PRIMARY";
static int check_if_keyname_exists(const char *name,KEY *start, KEY *end);
-static char *make_unique_key_name(THD *thd, const char *field_name, KEY *start,
- KEY *end);
-static void make_unique_constraint_name(THD *thd, LEX_CSTRING *name,
- List<Virtual_column_info> *vcol,
- uint *nr);
-static const
-char * make_unique_invisible_field_name(THD *thd, const char *field_name,
- List<Create_field> *fields);
-
-static int copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
- List<Create_field> &create, bool ignore,
- uint order_num, ORDER *order,
- ha_rows *copied,ha_rows *deleted,
- Alter_info::enum_enable_or_disable keys_onoff,
- Alter_table_ctx *alter_ctx);
-
+static char *make_unique_key_name(THD *, const char *, KEY *, KEY *);
+static void make_unique_constraint_name(THD *, LEX_CSTRING *, const char *,
+ List<Virtual_column_info> *, uint *);
+static const char *make_unique_invisible_field_name(THD *, const char *,
+ List<Create_field> *);
+static int copy_data_between_tables(THD *, TABLE *,TABLE *,
+ List<Create_field> &, bool, uint, ORDER *,
+ ha_rows *, ha_rows *,
+ Alter_info::enum_enable_or_disable,
+ Alter_table_ctx *);
static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *,
uint *, handler *, KEY **, uint *, int);
static uint blob_length_by_type(enum_field_types type);
@@ -1849,7 +1842,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
#endif
/* Write shadow frm file */
lpt->create_info->table_options= lpt->db_options;
- LEX_CUSTRING frm= build_frm_image(lpt->thd, &lpt->table_name,
+ LEX_CUSTRING frm= build_frm_image(lpt->thd, lpt->table_name,
lpt->create_info,
lpt->alter_info->create_list,
lpt->key_count, lpt->key_info_buffer,
@@ -2041,18 +2034,6 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists,
if (!drop_temporary)
{
- if (!in_bootstrap)
- {
- for (table= tables; table; table= table->next_local)
- {
- LEX_CSTRING db_name= table->db;
- LEX_CSTRING table_name= table->table_name;
- if (table->open_type == OT_BASE_ONLY ||
- !thd->find_temporary_table(table))
- (void) delete_statistics_for_table(thd, &db_name, &table_name);
- }
- }
-
if (!thd->locked_tables_mode)
{
if (drop_sequence)
@@ -2116,6 +2097,15 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists,
}
}
}
+ /* We remove statistics for table last, after we have the DDL lock */
+ for (table= tables; table; table= table->next_local)
+ {
+ LEX_CSTRING db_name= table->db;
+ LEX_CSTRING table_name= table->table_name;
+ if (table->open_type == OT_BASE_ONLY ||
+ !thd->find_temporary_table(table))
+ (void) delete_statistics_for_table(thd, &db_name, &table_name);
+ }
}
/* mark for close and remove all cached entries */
@@ -2128,7 +2118,6 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, bool if_exists,
DBUG_RETURN(TRUE);
my_ok(thd);
DBUG_RETURN(FALSE);
-
}
@@ -2613,9 +2602,6 @@ err:
/* Chop of the last comma */
built_non_trans_tmp_query.chop();
built_non_trans_tmp_query.append(" /* generated by server */");
-#ifdef WITH_WSREP
- thd->wsrep_skip_wsrep_GTID = true;
-#endif /* WITH_WSREP */
error |= thd->binlog_query(THD::STMT_QUERY_TYPE,
built_non_trans_tmp_query.ptr(),
built_non_trans_tmp_query.length(),
@@ -2628,9 +2614,6 @@ err:
/* Chop of the last comma */
built_trans_tmp_query.chop();
built_trans_tmp_query.append(" /* generated by server */");
-#ifdef WITH_WSREP
- thd->wsrep_skip_wsrep_GTID = true;
-#endif /* WITH_WSREP */
error |= thd->binlog_query(THD::STMT_QUERY_TYPE,
built_trans_tmp_query.ptr(),
built_trans_tmp_query.length(),
@@ -2645,9 +2628,6 @@ err:
built_query.append(" /* generated by server */");
int error_code = non_tmp_error ? thd->get_stmt_da()->sql_errno()
: 0;
-#ifdef WITH_WSREP
- thd->wsrep_skip_wsrep_GTID = false;
-#endif /* WITH_WSREP */
error |= thd->binlog_query(THD::STMT_QUERY_TYPE,
built_query.ptr(),
built_query.length(),
@@ -2696,9 +2676,6 @@ err:
}
end:
-#ifdef WITH_WSREP
- thd->wsrep_skip_wsrep_GTID = false;
-#endif /* WITH_WSREP */
DBUG_RETURN(error);
}
@@ -2807,6 +2784,14 @@ static int sort_keys(KEY *a, KEY *b)
{
ulong a_flags= a->flags, b_flags= b->flags;
+ /*
+ Do not reorder LONG_HASH indexes, because they must match the order
+ of their LONG_UNIQUE_HASH_FIELD's.
+ */
+ if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
+ b->algorithm == HA_KEY_ALG_LONG_HASH)
+ return a->usable_key_parts - b->usable_key_parts;
+
if (a_flags & HA_NOSAME)
{
if (!(b_flags & HA_NOSAME))
@@ -2835,9 +2820,7 @@ static int sort_keys(KEY *a, KEY *b)
Prefer original key order. usable_key_parts contains here
the original key position.
*/
- return ((a->usable_key_parts < b->usable_key_parts) ? -1 :
- (a->usable_key_parts > b->usable_key_parts) ? 1 :
- 0);
+ return a->usable_key_parts - b->usable_key_parts;
}
/*
@@ -3012,7 +2995,8 @@ CHARSET_INFO* get_sql_field_charset(Column_definition *sql_field,
by adding the features DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP.
If the first TIMESTAMP column appears to be nullable, or to have an
- explicit default, or to be a virtual column, then no promition is done.
+ explicit default, or to be a virtual column, or to be part of table period,
+ then no promotion is done.
@param column_definitions The list of column definitions, in the physical
order in which they appear in the table.
@@ -3033,6 +3017,7 @@ void promote_first_timestamp_column(List<Create_field> *column_definitions)
column_definition->default_value == NULL && // no constant default,
column_definition->unireg_check == Field::NONE && // no function default
column_definition->vcol_info == NULL &&
+ column_definition->period == NULL &&
!(column_definition->flags & VERS_SYSTEM_FIELD)) // column isn't generated
{
DBUG_PRINT("info", ("First TIMESTAMP column '%s' was promoted to "
@@ -3047,7 +3032,6 @@ void promote_first_timestamp_column(List<Create_field> *column_definitions)
}
}
-
/**
Check if there is a duplicate key. Report a warning for every duplicate key.
@@ -3313,6 +3297,55 @@ int mysql_add_invisible_field(THD *thd, List<Create_field> * field_list,
return 0;
}
+#define LONG_HASH_FIELD_NAME_LENGTH 30
+static inline void make_long_hash_field_name(LEX_CSTRING *buf, uint num)
+{
+ buf->length= my_snprintf((char *)buf->str,
+ LONG_HASH_FIELD_NAME_LENGTH, "DB_ROW_HASH_%u", num);
+}
+
+/**
+ Add fully invisible hash field to table in case of long
+ unique column
+ @param thd Thread Context.
+ @param create_list List of table fields.
+ @param key_info current long unique key info
+*/
+static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
+ KEY *key_info)
+{
+ List_iterator<Create_field> it(*create_list);
+ Create_field *dup_field, *cf= new (thd->mem_root) Create_field();
+ cf->flags|= UNSIGNED_FLAG | LONG_UNIQUE_HASH_FIELD;
+ cf->decimals= 0;
+ cf->length= cf->char_length= cf->pack_length= HA_HASH_FIELD_LENGTH;
+ cf->invisible= INVISIBLE_FULL;
+ cf->pack_flag|= FIELDFLAG_MAYBE_NULL;
+ cf->vcol_info= new (thd->mem_root) Virtual_column_info();
+ cf->vcol_info->stored_in_db= false;
+ uint num= 1;
+ LEX_CSTRING field_name;
+ field_name.str= (char *)thd->alloc(LONG_HASH_FIELD_NAME_LENGTH);
+ make_long_hash_field_name(&field_name, num);
+ /*
+ Check for collisions
+ */
+ while ((dup_field= it++))
+ {
+ if (!my_strcasecmp(system_charset_info, field_name.str, dup_field->field_name.str))
+ {
+ num++;
+ make_long_hash_field_name(&field_name, num);
+ it.rewind();
+ }
+ }
+ cf->field_name= field_name;
+ cf->set_handler(&type_handler_longlong);
+ key_info->algorithm= HA_KEY_ALG_LONG_HASH;
+ create_list->push_back(cf,thd->mem_root);
+ return cf;
+}
+
Key *
mysql_add_invisible_index(THD *thd, List<Key> *key_list,
LEX_CSTRING* field_name, enum Key::Keytype type)
@@ -3370,6 +3403,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
uint total_uneven_bit_length= 0;
int select_field_count= C_CREATE_SELECT(create_table_mode);
bool tmp_table= create_table_mode == C_ALTER_TABLE;
+ bool is_hash_field_needed= false;
DBUG_ENTER("mysql_prepare_create_table");
DBUG_EXECUTE_IF("test_pseudo_invisible",{
@@ -3687,6 +3721,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
uint key_length=0;
Key_part_spec *column;
+ is_hash_field_needed= false;
if (key->name.str == ignore_key)
{
/* ignore redundant keys */
@@ -3896,22 +3931,29 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (f_is_blob(sql_field->pack_flag) ||
(f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL))
- {
- if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
- {
- my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str,
+ {
+ if (!(file->ha_table_flags() & HA_CAN_INDEX_BLOBS))
+ {
+ my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name.str,
file->table_type());
- DBUG_RETURN(TRUE);
- }
+ DBUG_RETURN(TRUE);
+ }
if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type ==
Field::GEOM_POINT)
column->length= MAX_LEN_GEOM_POINT_FIELD;
- if (!column->length)
- {
- my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
- DBUG_RETURN(TRUE);
- }
- }
+ if (!column->length)
+ {
+ if (key->type == Key::UNIQUE)
+ is_hash_field_needed= true;
+ else if (key->type == Key::MULTIPLE)
+ column->length= file->max_key_length() + 1;
+ else
+ {
+ my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
#ifdef HAVE_SPATIAL
if (key->type == Key::SPATIAL)
{
@@ -3980,34 +4022,31 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (column->length)
{
- if (f_is_blob(sql_field->pack_flag))
- {
- key_part_length= MY_MIN(column->length,
- blob_length_by_type(sql_field->real_field_type())
- * sql_field->charset->mbmaxlen);
- if (key_part_length > max_key_length ||
- key_part_length > file->max_key_part_length())
- {
- key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
- if (key->type == Key::MULTIPLE)
- {
- /* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ if (f_is_blob(sql_field->pack_flag))
+ {
+ key_part_length= MY_MIN(column->length,
+ blob_length_by_type(sql_field->real_field_type())
+ * sql_field->charset->mbmaxlen);
+ if (key_part_length > max_key_length ||
+ key_part_length > file->max_key_part_length())
+ {
+ if (key->type == Key::MULTIPLE)
+ {
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
+ /* not a critical problem */
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TOO_LONG_KEY,
ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
- }
- else
- {
- my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
- DBUG_RETURN(TRUE);
- }
- }
- }
+ }
+ else
+ is_hash_field_needed= true;
+ }
+ }
// Catch invalid use of partial keys
- else if (!f_is_geom(sql_field->pack_flag) &&
+ else if (!f_is_geom(sql_field->pack_flag) &&
// is the key partial?
column->length != key_part_length &&
// is prefix length bigger than field length?
@@ -4021,13 +4060,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
// and is this a 'unique' key?
(key_info->flags & HA_NOSAME))))
{
- my_message(ER_WRONG_SUB_KEY, ER_THD(thd, ER_WRONG_SUB_KEY), MYF(0));
- DBUG_RETURN(TRUE);
- }
- else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
- key_part_length= column->length;
+ my_message(ER_WRONG_SUB_KEY, ER_THD(thd, ER_WRONG_SUB_KEY), MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ else if (!(file->ha_table_flags() & HA_NO_PREFIX_CHAR_KEYS))
+ key_part_length= column->length;
}
- else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG))
+ else if (key_part_length == 0 && (sql_field->flags & NOT_NULL_FLAG) &&
+ !is_hash_field_needed)
{
my_error(ER_WRONG_KEY_COLUMN, MYF(0), file->table_type(),
column->field_name.str);
@@ -4036,30 +4076,45 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (key_part_length > file->max_key_part_length() &&
key->type != Key::FULLTEXT)
{
- key_part_length= file->max_key_part_length();
- if (key->type == Key::MULTIPLE)
- {
- /* not a critical problem */
- push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ if (key->type == Key::MULTIPLE)
+ {
+ key_part_length= file->max_key_part_length();
+ /* not a critical problem */
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY),
key_part_length);
/* Align key length to multibyte char boundary */
key_part_length-= key_part_length % sql_field->charset->mbmaxlen;
- }
- else
- {
- my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
- DBUG_RETURN(TRUE);
- }
+ }
+ else
+ {
+ if (key->type == Key::UNIQUE)
+ {
+ is_hash_field_needed= true;
+ }
+ else
+ {
+ key_part_length= MY_MIN(max_key_length, file->max_key_part_length());
+ my_error(ER_TOO_LONG_KEY, MYF(0), key_part_length);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ /* We can not store key_part_length more then 2^16 - 1 in frm */
+ if (is_hash_field_needed && column->length > UINT_MAX16)
+ {
+ my_error(ER_TOO_LONG_KEYPART, MYF(0), UINT_MAX16);
+ DBUG_RETURN(TRUE);
}
- key_part_info->length= (uint16) key_part_length;
+ else
+ key_part_info->length= (uint16) key_part_length;
/* Use packed keys for long strings on the first column */
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
(key_part_length >= KEY_DEFAULT_PACK_LENGTH &&
(sql_field->real_field_type() == MYSQL_TYPE_STRING ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR ||
- sql_field->pack_flag & FIELDFLAG_BLOB)))
+ sql_field->pack_flag & FIELDFLAG_BLOB))&& !is_hash_field_needed)
{
if ((column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) ||
sql_field->real_field_type() == MYSQL_TYPE_VARCHAR)
@@ -4068,7 +4123,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
key_info->flags|= HA_PACK_KEY;
}
/* Check if the key segment is partial, set the key flag accordingly */
- if (key_part_length != sql_field->key_length)
+ if (key_part_length != sql_field->key_length &&
+ key_part_length != sql_field->type_handler()->max_octet_length())
key_info->flags|= HA_KEY_HAS_PART_KEY_SEG;
key_length+= key_part_length;
@@ -4108,12 +4164,41 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (key->type == Key::UNIQUE && !(key_info->flags & HA_NULL_PART_KEY))
unique_key=1;
key_info->key_length=(uint16) key_length;
- if (key_length > max_key_length && key->type != Key::FULLTEXT)
+ if (key_length > max_key_length && key->type != Key::FULLTEXT &&
+ !is_hash_field_needed)
{
- my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length);
+ my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
DBUG_RETURN(TRUE);
}
+ if (is_hash_field_needed && key_info->algorithm != HA_KEY_ALG_UNDEF &&
+ key_info->algorithm != HA_KEY_ALG_HASH )
+ {
+ my_error(ER_TOO_LONG_KEY, MYF(0), max_key_length);
+ DBUG_RETURN(TRUE);
+ }
+ if (is_hash_field_needed ||
+ (key_info->algorithm == HA_KEY_ALG_HASH &&
+ key->type != Key::PRIMARY &&
+ key_info->flags & HA_NOSAME &&
+ !(file->ha_table_flags() & HA_CAN_HASH_KEYS ) &&
+ file->ha_table_flags() & HA_CAN_VIRTUAL_COLUMNS))
+ {
+ Create_field *hash_fld= add_hash_field(thd, &alter_info->create_list,
+ key_info);
+ if (!hash_fld)
+ DBUG_RETURN(TRUE);
+ hash_fld->offset= record_offset;
+ hash_fld->charset= create_info->default_table_charset;
+ record_offset+= hash_fld->pack_length;
+ if (key_info->flags & HA_NULL_PART_KEY)
+ null_fields++;
+ else
+ {
+ hash_fld->flags|= NOT_NULL_FLAG;
+ hash_fld->pack_flag&= ~FIELDFLAG_MAYBE_NULL;
+ }
+ }
if (validate_comment_length(thd, &key->key_create_info.comment,
INDEX_COMMENT_MAXLEN,
ER_TOO_LONG_INDEX_COMMENT,
@@ -4129,14 +4214,11 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
// Check if a duplicate index is defined.
check_duplicate_key(thd, key, key_info, &alter_info->key_list);
-
key_info++;
}
- if (!unique_key && !primary_key &&
- ((file->ha_table_flags() & HA_REQUIRE_PRIMARY_KEY) ||
- ((file->ha_table_flags() & HA_WANTS_PRIMARY_KEY) &&
- !create_info->sequence)))
+ if (!unique_key && !primary_key && !create_info->sequence &&
+ (file->ha_table_flags() & HA_REQUIRE_PRIMARY_KEY))
{
my_message(ER_REQUIRES_PRIMARY_KEY, ER_THD(thd, ER_REQUIRES_PRIMARY_KEY),
MYF(0));
@@ -4217,9 +4299,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
while ((check= c_it++))
{
if (!check->name.length)
- make_unique_constraint_name(thd, &check->name,
+ {
+ const char *own_name_base= create_info->period_info.constr == check
+ ? create_info->period_info.name.str : NULL;
+
+ make_unique_constraint_name(thd, &check->name, own_name_base,
&alter_info->check_constraint_list,
&nr);
+ }
{
/* Check that there's no repeating constraint names. */
List_iterator_fast<Virtual_column_info>
@@ -4248,11 +4335,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
/* Give warnings for not supported table options */
-#if defined(WITH_ARIA_STORAGE_ENGINE)
extern handlerton *maria_hton;
- if (file->partition_ht() != maria_hton)
-#endif
- if (create_info->transactional)
+ if (file->partition_ht() != maria_hton && create_info->transactional &&
+ !file->has_transaction_manager())
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER_THD(thd, ER_ILLEGAL_HA_CREATE_OPTION),
@@ -4320,9 +4405,8 @@ bool validate_comment_length(THD *thd, LEX_CSTRING *comment, size_t max_len,
apply it to the table.
*/
-static void set_table_default_charset(THD *thd,
- HA_CREATE_INFO *create_info,
- const LEX_CSTRING *db)
+static void set_table_default_charset(THD *thd, HA_CREATE_INFO *create_info,
+ const LEX_CSTRING &db)
{
/*
If the table character set was not given explicitly,
@@ -4333,7 +4417,7 @@ static void set_table_default_charset(THD *thd,
{
Schema_specification_st db_info;
- load_db_opt_by_name(thd, db->str, &db_info);
+ load_db_opt_by_name(thd, db.str, &db_info);
create_info->default_table_charset= db_info.default_table_charset;
}
@@ -4387,7 +4471,7 @@ bool Column_definition::prepare_blob_field(THD *thd)
set_handler(Type_handler::blob_type_handler((uint) length));
pack_length= type_handler()->calc_pack_length(0);
}
- length= 0;
+ length= key_length= 0;
}
DBUG_RETURN(0);
}
@@ -4456,12 +4540,11 @@ static bool vers_prepare_keys(THD *thd, HA_CREATE_INFO *create_info,
return false;
}
-handler *mysql_create_frm_image(THD *thd,
- const LEX_CSTRING *db, const LEX_CSTRING *table_name,
+handler *mysql_create_frm_image(THD *thd, const LEX_CSTRING &db,
+ const LEX_CSTRING &table_name,
HA_CREATE_INFO *create_info,
Alter_info *alter_info, int create_table_mode,
- KEY **key_info,
- uint *key_count,
+ KEY **key_info, uint *key_count,
LEX_CUSTRING *frm)
{
uint db_options;
@@ -4595,7 +4678,7 @@ handler *mysql_create_frm_image(THD *thd,
if (part_info->vers_info && !create_info->versioned())
{
- my_error(ER_VERS_NOT_VERSIONED, MYF(0), table_name->str);
+ my_error(ER_VERS_NOT_VERSIONED, MYF(0), table_name.str);
goto err;
}
@@ -4699,8 +4782,7 @@ handler *mysql_create_frm_image(THD *thd,
}
if (mysql_prepare_create_table(thd, create_info, alter_info, &db_options,
- file, key_info, key_count,
- create_table_mode))
+ file, key_info, key_count, create_table_mode))
goto err;
create_info->table_options=db_options;
@@ -4752,19 +4834,13 @@ err:
*/
static
-int create_table_impl(THD *thd,
- const LEX_CSTRING *orig_db,
- const LEX_CSTRING *orig_table_name,
- const LEX_CSTRING *db, const LEX_CSTRING *table_name,
- const char *path,
- const DDL_options_st options,
- HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- int create_table_mode,
- bool *is_trans,
- KEY **key_info,
- uint *key_count,
- LEX_CUSTRING *frm)
+int create_table_impl(THD *thd, const LEX_CSTRING &orig_db,
+ const LEX_CSTRING &orig_table_name,
+ const LEX_CSTRING &db, const LEX_CSTRING &table_name,
+ const char *path, const DDL_options_st options,
+ HA_CREATE_INFO *create_info, Alter_info *alter_info,
+ int create_table_mode, bool *is_trans, KEY **key_info,
+ uint *key_count, LEX_CUSTRING *frm)
{
LEX_CSTRING *alias;
handler *file= 0;
@@ -4773,7 +4849,7 @@ int create_table_impl(THD *thd,
bool internal_tmp_table= create_table_mode == C_ALTER_TABLE || frm_only;
DBUG_ENTER("mysql_create_table_no_lock");
DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d path: %s",
- db->str, table_name->str, internal_tmp_table, path));
+ db.str, table_name.str, internal_tmp_table, path));
if (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE)
{
@@ -4799,7 +4875,7 @@ int create_table_impl(THD *thd,
goto err;
}
- alias= const_cast<LEX_CSTRING*>(table_case_name(create_info, table_name));
+ alias= const_cast<LEX_CSTRING*>(table_case_name(create_info, &table_name));
/* Check if table exists */
if (create_info->tmp_table())
@@ -4808,7 +4884,7 @@ int create_table_impl(THD *thd,
If a table exists, it must have been pre-opened. Try looking for one
in-use in THD::all_temp_tables list of TABLE_SHAREs.
*/
- TABLE *tmp_table= thd->find_temporary_table(db->str, table_name->str);
+ TABLE *tmp_table= thd->find_temporary_table(db.str, table_name.str);
if (tmp_table)
{
@@ -4843,14 +4919,14 @@ int create_table_impl(THD *thd,
}
else
{
- if (!internal_tmp_table && ha_table_exists(thd, db, table_name))
+ if (!internal_tmp_table && ha_table_exists(thd, &db, &table_name))
{
if (options.or_replace())
{
- (void) delete_statistics_for_table(thd, db, table_name);
+ (void) delete_statistics_for_table(thd, &db, &table_name);
TABLE_LIST table_list;
- table_list.init_one_table(db, table_name, 0, TL_WRITE_ALLOW_WRITE);
+ table_list.init_one_table(&db, &table_name, 0, TL_WRITE_ALLOW_WRITE);
table_list.table= create_info->table;
if (check_if_log_table(&table_list, TRUE, "CREATE OR REPLACE"))
@@ -4877,7 +4953,7 @@ int create_table_impl(THD *thd,
/*
Restart statement transactions for the case of CREATE ... SELECT.
*/
- if (thd->lex->select_lex.item_list.elements &&
+ if (thd->lex->first_select_lex()->item_list.elements &&
restart_trans_for_tables(thd, thd->lex->query_tables))
goto err;
}
@@ -4885,7 +4961,7 @@ int create_table_impl(THD *thd,
goto warn;
else
{
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name->str);
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name.str);
goto err;
}
}
@@ -4893,7 +4969,7 @@ int create_table_impl(THD *thd,
THD_STAGE_INFO(thd, stage_creating_table);
- if (check_engine(thd, orig_db->str, orig_table_name->str, create_info))
+ if (check_engine(thd, orig_db.str, orig_table_name.str, create_info))
goto err;
if (create_table_mode == C_ASSISTED_DISCOVERY)
@@ -4913,7 +4989,7 @@ int create_table_impl(THD *thd,
goto err;
}
- init_tmp_table_share(thd, &share, db->str, 0, table_name->str, path);
+ init_tmp_table_share(thd, &share, db.str, 0, table_name.str, path);
/* prepare everything for discovery */
share.field= &no_fields;
@@ -4955,17 +5031,29 @@ int create_table_impl(THD *thd,
*/
if (!file || thd->is_error())
goto err;
- if (rea_create_table(thd, frm, path, db->str, table_name->str, create_info,
- file, frm_only))
+
+ if (thd->variables.keep_files_on_create)
+ create_info->options|= HA_CREATE_KEEP_FILES;
+
+ if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG))
goto err;
+
+ if (!frm_only)
+ {
+ if (ha_create_table(thd, path, db.str, table_name.str, create_info, frm))
+ {
+ file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
+ deletefrm(path);
+ goto err;
+ }
+ }
}
create_info->table= 0;
if (!frm_only && create_info->tmp_table())
{
- TABLE *table= thd->create_and_open_tmp_table(create_info->db_type, frm,
- path, db->str,
- table_name->str, true,
+ TABLE *table= thd->create_and_open_tmp_table(frm, path, db.str,
+ table_name.str,
false);
if (!table)
@@ -4980,43 +5068,7 @@ int create_table_impl(THD *thd,
thd->thread_specific_used= TRUE;
create_info->table= table; // Store pointer to table
}
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- else if (thd->work_part_info && frm_only)
- {
- /*
- For partitioned tables we can't find some problems with table
- until table is opened. Therefore in order to disallow creation
- of corrupted tables we have to try to open table as the part
- of its creation process.
- In cases when both .FRM and SE part of table are created table
- is implicitly open in ha_create_table() call.
- In cases when we create .FRM without SE part we have to open
- table explicitly.
- */
- TABLE table;
- TABLE_SHARE share;
-
- init_tmp_table_share(thd, &share, db->str, 0, table_name->str, path);
-
- bool result= (open_table_def(thd, &share, GTS_TABLE) ||
- open_table_from_share(thd, &share, &empty_clex_str, 0,
- (uint) READ_ALL, 0, &table, true));
- if (!result)
- (void) closefrm(&table);
-
- free_table_share(&share);
- if (result)
- {
- char frm_name[FN_REFLEN];
- strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS);
- (void) mysql_file_delete(key_file_frm, frm_name, MYF(0));
- (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
- goto err;
- }
- }
-#endif
-
error= 0;
err:
THD_STAGE_INFO(thd, stage_after_create);
@@ -5044,13 +5096,11 @@ warn:
-1 Table was used with IF NOT EXISTS and table existed (warning, not error)
*/
-int mysql_create_table_no_lock(THD *thd,
- const LEX_CSTRING *db,
+int mysql_create_table_no_lock(THD *thd, const LEX_CSTRING *db,
const LEX_CSTRING *table_name,
Table_specification_st *create_info,
Alter_info *alter_info, bool *is_trans,
- int create_table_mode,
- TABLE_LIST *table_list)
+ int create_table_mode, TABLE_LIST *table_list)
{
KEY *not_used_1;
uint not_used_2;
@@ -5074,7 +5124,7 @@ int mysql_create_table_no_lock(THD *thd,
}
}
- res= create_table_impl(thd, db, table_name, db, table_name, path,
+ res= create_table_impl(thd, *db, *table_name, *db, *table_name, path,
*create_info, create_info,
alter_info, create_table_mode,
is_trans, &not_used_1, &not_used_2, &frm);
@@ -5300,17 +5350,22 @@ make_unique_key_name(THD *thd, const char *field_name,KEY *start,KEY *end)
*/
static void make_unique_constraint_name(THD *thd, LEX_CSTRING *name,
+ const char *own_name_base,
List<Virtual_column_info> *vcol,
uint *nr)
{
char buff[MAX_FIELD_NAME], *end;
List_iterator_fast<Virtual_column_info> it(*vcol);
-
- end=strmov(buff, "CONSTRAINT_");
- for (;;)
+ end=strmov(buff, own_name_base ? own_name_base : "CONSTRAINT_");
+ for (int round= 0;; round++)
{
Virtual_column_info *check;
- char *real_end= int10_to_str((*nr)++, end, 10);
+ char *real_end= end;
+ if (round == 1 && own_name_base)
+ *end++= '_';
+ // if own_base_name provided, try it first
+ if (round != 0 || !own_name_base)
+ real_end= int10_to_str((*nr)++, end, 10);
it.rewind();
while ((check= it++))
{
@@ -5548,12 +5603,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
properly isolated from all concurrent operations which matter.
*/
- /* Copy temporarily the statement flags to thd for lock_table_names() */
- // QQ: is this really needed???
- uint save_thd_create_info_options= thd->lex->create_info.options;
- thd->lex->create_info.options|= create_info->options;
- res= open_tables(thd, &thd->lex->query_tables, &not_used, 0);
- thd->lex->create_info.options= save_thd_create_info_options;
+ res= open_tables(thd, *create_info, &thd->lex->query_tables, &not_used, 0);
if (res)
{
@@ -5945,6 +5995,7 @@ static bool is_candidate_key(KEY *key)
thd Thread object.
table The altered table.
alter_info List of columns and indexes to create
+ period_info Application-time period info
DESCRIPTION
Looks for the IF [NOT] EXISTS options, checks the states and remove items
@@ -5955,7 +6006,8 @@ static bool is_candidate_key(KEY *key)
*/
static void
-handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
+handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info,
+ Table_period_info *period_info)
{
Field **f_ptr;
DBUG_ENTER("handle_if_exists_option");
@@ -6141,6 +6193,11 @@ drop_create_field:
}
}
}
+ else if (drop->type == Alter_drop::PERIOD)
+ {
+ if (table->s->period.name.streq(drop->name))
+ remove_drop= FALSE;
+ }
else /* Alter_drop::KEY and Alter_drop::FOREIGN_KEY */
{
uint n_key;
@@ -6421,27 +6478,27 @@ remove_key:
}
}
- DBUG_VOID_RETURN;
-}
-
+ /* ADD PERIOD */
-/**
- Get Create_field object for newly created table by field index.
-
- @param alter_info Alter_info describing newly created table.
- @param idx Field index.
-*/
+ if (period_info->create_if_not_exists && table->s->period.name
+ && table->s->period.name.streq(period_info->name))
+ {
+ DBUG_ASSERT(period_info->is_set());
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_DUP_FIELDNAME, ER_THD(thd, ER_DUP_FIELDNAME),
+ period_info->name.str, table->s->table_name.str);
-static Create_field *get_field_by_index(Alter_info *alter_info, uint idx)
-{
- List_iterator_fast<Create_field> field_it(alter_info->create_list);
- uint field_idx= 0;
- Create_field *field;
+ List_iterator<Virtual_column_info> vit(alter_info->check_constraint_list);
+ while (vit++ != period_info->constr)
+ {
+ // do nothing
+ }
+ vit.remove();
- while ((field= field_it++) && field_idx < idx)
- { field_idx++; }
+ *period_info= {};
+ }
- return field;
+ DBUG_VOID_RETURN;
}
@@ -6450,6 +6507,96 @@ static int compare_uint(const uint *s, const uint *t)
return (*s < *t) ? -1 : ((*s > *t) ? 1 : 0);
}
+enum class Compare_keys
+{
+ Equal,
+ EqualButKeyPartLength,
+ NotEqual
+};
+
+Compare_keys compare_keys_but_name(const KEY *table_key, const KEY *new_key,
+ Alter_info *alter_info, const TABLE *table,
+ const KEY *const new_pk,
+ const KEY *const old_pk)
+{
+ Compare_keys result= Compare_keys::Equal;
+
+ if ((table_key->algorithm != new_key->algorithm) ||
+ ((table_key->flags & HA_KEYFLAG_MASK) !=
+ (new_key->flags & HA_KEYFLAG_MASK)) ||
+ (table_key->user_defined_key_parts != new_key->user_defined_key_parts))
+ return Compare_keys::NotEqual;
+
+ if (table_key->block_size != new_key->block_size)
+ return Compare_keys::NotEqual;
+
+ if (engine_options_differ(table_key->option_struct, new_key->option_struct,
+ table->file->ht->index_options))
+ return Compare_keys::NotEqual;
+
+ const KEY_PART_INFO *end=
+ table_key->key_part + table_key->user_defined_key_parts;
+ for (const KEY_PART_INFO *key_part= table_key->key_part,
+ *new_part= new_key->key_part;
+ key_part < end; key_part++, new_part++)
+ {
+ Create_field *new_field= alter_info->create_list.elem(new_part->fieldnr);
+ const Field *old_field= table->field[key_part->fieldnr - 1];
+ /*
+ If there is a change in index length due to column expansion
+ like varchar(X) changed to varchar(X + N) and has a compatible
+ packed data representation, we mark it for fast/INPLACE change
+ in index definition. InnoDB supports INPLACE for this cases
+
+ Key definition has changed if we are using a different field or
+ if the user key part length is different.
+ */
+ auto old_field_len= old_field->pack_length();
+
+ if (old_field->type() == MYSQL_TYPE_VARCHAR)
+ {
+ old_field_len= (old_field->pack_length() -
+ ((Field_varstring *) old_field)->length_bytes);
+ }
+
+ if (key_part->length == old_field_len &&
+ key_part->length < new_part->length &&
+ (key_part->field->is_equal((Create_field *) new_field) ==
+ IS_EQUAL_PACK_LENGTH))
+ {
+ result= Compare_keys::EqualButKeyPartLength;
+ }
+ else if (key_part->length != new_part->length)
+ return Compare_keys::NotEqual;
+
+ /*
+ For prefix keys KEY_PART_INFO::field points to cloned Field
+ object with adjusted length. So below we have to check field
+ indexes instead of simply comparing pointers to Field objects.
+ */
+ if (!new_field->field ||
+ new_field->field->field_index != key_part->fieldnr - 1)
+ return Compare_keys::NotEqual;
+ }
+
+ /*
+ Rebuild the index if following condition get satisfied:
+
+ (i) Old table doesn't have primary key, new table has it and vice-versa
+ (ii) Primary key changed to another existing index
+*/
+ if ((new_key == new_pk) != (table_key == old_pk))
+ return Compare_keys::NotEqual;
+
+ /* Check that key comment is not changed. */
+ if (table_key->comment.length != new_key->comment.length ||
+ (table_key->comment.length &&
+ memcmp(table_key->comment.str, new_key->comment.str,
+ table_key->comment.length) != 0))
+ return Compare_keys::NotEqual;
+
+ return result;
+}
/**
Compare original and new versions of a table and fill Alter_inplace_info
@@ -6496,21 +6643,21 @@ static int compare_uint(const uint *s, const uint *t)
static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
Alter_inplace_info *ha_alter_info)
{
- Field **f_ptr, *field, *old_field;
+ Field **f_ptr, *field;
List_iterator_fast<Create_field> new_field_it;
Create_field *new_field;
- KEY_PART_INFO *key_part, *new_part;
- KEY_PART_INFO *end;
Alter_info *alter_info= ha_alter_info->alter_info;
DBUG_ENTER("fill_alter_inplace_info");
DBUG_PRINT("info", ("alter_info->flags: %llu", alter_info->flags));
/* Allocate result buffers. */
+ DBUG_ASSERT(ha_alter_info->rename_keys.mem_root() == thd->mem_root);
if (! (ha_alter_info->index_drop_buffer=
(KEY**) thd->alloc(sizeof(KEY*) * table->s->keys)) ||
! (ha_alter_info->index_add_buffer=
(uint*) thd->alloc(sizeof(uint) *
- alter_info->key_list.elements)))
+ alter_info->key_list.elements)) ||
+ ha_alter_info->rename_keys.reserve(ha_alter_info->index_add_count))
DBUG_RETURN(true);
/*
@@ -6794,7 +6941,6 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
Go through keys and check if the original ones are compatible
with new table.
*/
- uint old_field_len= 0;
KEY *table_key;
KEY *table_key_end= table->key_info + table->s->keys;
KEY *new_key;
@@ -6841,88 +6987,18 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
continue;
}
- /* Check that the key types are compatible between old and new tables. */
- if ((table_key->algorithm != new_key->algorithm) ||
- ((table_key->flags & HA_KEYFLAG_MASK) !=
- (new_key->flags & HA_KEYFLAG_MASK)) ||
- (table_key->user_defined_key_parts !=
- new_key->user_defined_key_parts))
- goto index_changed;
-
- if (table_key->block_size != new_key->block_size)
- goto index_changed;
-
- if (engine_options_differ(table_key->option_struct, new_key->option_struct,
- table->file->ht->index_options))
- goto index_changed;
-
- /*
- Check that the key parts remain compatible between the old and
- new tables.
- */
- end= table_key->key_part + table_key->user_defined_key_parts;
- for (key_part= table_key->key_part, new_part= new_key->key_part;
- key_part < end;
- key_part++, new_part++)
+ switch (compare_keys_but_name(table_key, new_key, alter_info, table, new_pk,
+ old_pk))
{
- new_field= get_field_by_index(alter_info, new_part->fieldnr);
- old_field= table->field[key_part->fieldnr - 1];
- /*
- If there is a change in index length due to column expansion
- like varchar(X) changed to varchar(X + N) and has a compatible
- packed data representation, we mark it for fast/INPLACE change
- in index definition. InnoDB supports INPLACE for this cases
-
- Key definition has changed if we are using a different field or
- if the user key part length is different.
- */
- old_field_len= old_field->pack_length();
-
- if (old_field->type() == MYSQL_TYPE_VARCHAR)
- {
- old_field_len= (old_field->pack_length()
- - ((Field_varstring*) old_field)->length_bytes);
- }
-
- if (key_part->length == old_field_len &&
- key_part->length < new_part->length &&
- (key_part->field->is_equal((Create_field*) new_field)
- == IS_EQUAL_PACK_LENGTH))
- {
- ha_alter_info->handler_flags |= ALTER_COLUMN_INDEX_LENGTH;
- }
- else if (key_part->length != new_part->length)
- goto index_changed;
-
- /*
- For prefix keys KEY_PART_INFO::field points to cloned Field
- object with adjusted length. So below we have to check field
- indexes instead of simply comparing pointers to Field objects.
- */
- if (! new_field->field ||
- new_field->field->field_index != key_part->fieldnr - 1)
- goto index_changed;
+ case Compare_keys::Equal:
+ continue;
+ case Compare_keys::EqualButKeyPartLength:
+ ha_alter_info->handler_flags|= ALTER_COLUMN_INDEX_LENGTH;
+ continue;
+ case Compare_keys::NotEqual:
+ break;
}
- /*
- Rebuild the index if following condition get satisfied:
-
- (i) Old table doesn't have primary key, new table has it and vice-versa
- (ii) Primary key changed to another existing index
- */
- if ((new_key == new_pk) != (table_key == old_pk))
- goto index_changed;
-
- /* Check that key comment is not changed. */
- if (table_key->comment.length != new_key->comment.length ||
- (table_key->comment.length &&
- memcmp(table_key->comment.str, new_key->comment.str,
- table_key->comment.length) != 0))
- goto index_changed;
-
- continue;
-
- index_changed:
/* Key modified. Add the key / key offset to both buffers. */
ha_alter_info->index_drop_buffer
[ha_alter_info->index_drop_count++]=
@@ -6962,6 +7038,40 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar,
new_key->option_struct;
}
+ for (uint i= 0; i < ha_alter_info->index_add_count; i++)
+ {
+ uint *add_buffer= ha_alter_info->index_add_buffer;
+ const KEY *new_key= ha_alter_info->key_info_buffer + add_buffer[i];
+
+ for (uint j= 0; j < ha_alter_info->index_drop_count; j++)
+ {
+ KEY **drop_buffer= ha_alter_info->index_drop_buffer;
+ const KEY *old_key= drop_buffer[j];
+
+ if (compare_keys_but_name(old_key, new_key, alter_info, table, new_pk,
+ old_pk) != Compare_keys::Equal)
+ {
+ continue;
+ }
+
+ DBUG_ASSERT(
+ lex_string_cmp(system_charset_info, &old_key->name, &new_key->name));
+
+ ha_alter_info->handler_flags|= ALTER_RENAME_INDEX;
+ ha_alter_info->rename_keys.push_back(
+ Alter_inplace_info::Rename_key_pair(old_key, new_key));
+
+ --ha_alter_info->index_add_count;
+ --ha_alter_info->index_drop_count;
+ memcpy(add_buffer + i, add_buffer + i + 1,
+ sizeof(add_buffer[0]) * (ha_alter_info->index_add_count - i));
+ memcpy(drop_buffer + j, drop_buffer + j + 1,
+ sizeof(drop_buffer[0]) * (ha_alter_info->index_drop_count - j));
+ --i; // this index once again
+ break;
+ }
+ }
+
/*
Sort index_add_buffer according to how key_info_buffer is sorted.
I.e. with primary keys first - see sort_keys().
@@ -7391,13 +7501,15 @@ static bool mysql_inplace_alter_table(THD *thd,
Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN | MYSQL_OPEN_IGNORE_KILLED);
handlerton *db_type= table->s->db_type();
MDL_ticket *mdl_ticket= table->mdl_ticket;
- HA_CREATE_INFO *create_info= ha_alter_info->create_info;
Alter_info *alter_info= ha_alter_info->alter_info;
bool reopen_tables= false;
bool res;
DBUG_ENTER("mysql_inplace_alter_table");
+ /* Downgrade DDL lock while we are waiting for exclusive lock below */
+ backup_set_alter_copy_lock(thd, table);
+
/*
Upgrade to EXCLUSIVE lock if:
- This is requested by the storage engine
@@ -7470,9 +7582,7 @@ static bool mysql_inplace_alter_table(THD *thd,
thd->mdl_context.upgrade_shared_lock(table->mdl_ticket,
MDL_SHARED_NO_WRITE,
thd->variables.lock_wait_timeout))
- {
goto cleanup;
- }
// It's now safe to take the table level lock.
if (lock_tables(thd, table_list, alter_ctx->tables_opened, 0))
@@ -7509,9 +7619,7 @@ static bool mysql_inplace_alter_table(THD *thd,
if (table->file->ha_prepare_inplace_alter_table(altered_table,
ha_alter_info))
- {
goto rollback;
- }
/*
Downgrade the lock if storage engine has told us that exclusive lock was
@@ -7553,6 +7661,10 @@ static bool mysql_inplace_alter_table(THD *thd,
if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME))
goto rollback;
+ /* Set MDL_BACKUP_DDL */
+ if (backup_reset_alter_copy_lock(thd))
+ goto rollback;
+
/*
If we are killed after this point, we should ignore and continue.
We have mostly completed the operation at this point, there should
@@ -7596,8 +7708,6 @@ static bool mysql_inplace_alter_table(THD *thd,
{
goto rollback;
}
-
- thd->drop_temporary_table(altered_table, NULL, false);
}
close_all_tables_for_name(thd, table->s,
@@ -7612,7 +7722,7 @@ static bool mysql_inplace_alter_table(THD *thd,
Rename to the new name (if needed) will be handled separately below.
TODO: remove this check of thd->is_error() (now it intercept
- errors in some val_*() methoids and bring some single place to
+ errors in some val_*() methods and bring some single place to
such error interception).
*/
if (mysql_rename_table(db_type, &alter_ctx->new_db, &alter_ctx->tmp_name,
@@ -7621,9 +7731,6 @@ static bool mysql_inplace_alter_table(THD *thd,
thd->is_error())
{
// Since changes were done in-place, we can't revert them.
- (void) quick_rm_table(thd, db_type,
- &alter_ctx->new_db, &alter_ctx->tmp_name,
- FN_IS_TMP | NO_HA_TABLE);
DBUG_RETURN(true);
}
@@ -7700,10 +7807,6 @@ static bool mysql_inplace_alter_table(THD *thd,
thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
/* QQ; do something about metadata locks ? */
}
- thd->drop_temporary_table(altered_table, NULL, false);
- // Delete temporary .frm/.par
- (void) quick_rm_table(thd, create_info->db_type, &alter_ctx->new_db,
- &alter_ctx->tmp_name, FN_IS_TMP | NO_HA_TABLE);
DBUG_RETURN(true);
}
@@ -7820,6 +7923,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
Create_field *def;
Field **f_ptr,*field;
MY_BITMAP *dropped_fields= NULL; // if it's NULL - no dropped fields
+ bool drop_period= false;
DBUG_ENTER("mysql_prepare_alter_table");
/*
@@ -8189,9 +8293,9 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
Collect all keys which isn't in drop list. Add only those
for which some fields exists.
*/
-
for (uint i=0 ; i < table->s->keys ; i++,key_info++)
{
+ bool long_hash_key= false;
if (key_info->flags & HA_INVISIBLE_KEY)
continue;
const char *key_name= key_info->name.str;
@@ -8224,6 +8328,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
continue;
}
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ setup_keyinfo_hash(key_info);
+ long_hash_key= true;
+ }
const char *dropped_key_part= NULL;
KEY_PART_INFO *key_part= key_info->key_part;
key_parts.empty();
@@ -8317,6 +8426,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
enum Key::Keytype key_type;
LEX_CSTRING tmp_name;
bzero((char*) &key_create_info, sizeof(key_create_info));
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ key_info->algorithm= HA_KEY_ALG_UNDEF;
key_create_info.algorithm= key_info->algorithm;
/*
We copy block size directly as some engines, like Area, sets this
@@ -8346,6 +8457,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (dropped_key_part)
{
my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), dropped_key_part);
+ if (long_hash_key)
+ {
+ key_info->algorithm= HA_KEY_ALG_LONG_HASH;
+ re_setup_keyinfo_hash(key_info);
+ }
goto err;
}
}
@@ -8356,11 +8472,17 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
tmp_name.str= key_name;
tmp_name.length= strlen(key_name);
+ /* We dont need LONG_UNIQUE_HASH_FIELD flag because it will be autogenerated */
key= new Key(key_type, &tmp_name, &key_create_info,
MY_TEST(key_info->flags & HA_GENERATED_KEY),
&key_parts, key_info->option_list, DDL_options());
new_key_list.push_back(key, thd->mem_root);
}
+ if (long_hash_key)
+ {
+ key_info->algorithm= HA_KEY_ALG_LONG_HASH;
+ re_setup_keyinfo_hash(key_info);
+ }
}
{
Key *key;
@@ -8379,6 +8501,35 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
}
+ if (table->s->period.name)
+ {
+ drop_it.rewind();
+ Alter_drop *drop;
+ for (bool found= false; !found && (drop= drop_it++); )
+ {
+ found= drop->type == Alter_drop::PERIOD &&
+ table->s->period.name.streq(drop->name);
+ }
+
+ if (drop)
+ {
+ drop_period= true;
+ drop_it.remove();
+ }
+ else if (create_info->period_info.is_set() && table->s->period.name)
+ {
+ my_error(ER_MORE_THAN_ONE_PERIOD, MYF(0));
+ goto err;
+ }
+ else
+ {
+ Field *s= table->s->period.start_field(table->s);
+ Field *e= table->s->period.end_field(table->s);
+ create_info->period_info.set_period(s->field_name, e->field_name);
+ create_info->period_info.name= table->s->period.name;
+ }
+ }
+
/* Add all table level constraints which are not in the drop list */
if (table->s->table_check_constraints)
{
@@ -8389,6 +8540,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
{
Virtual_column_info *check= table->check_constraints[i];
Alter_drop *drop;
+ bool keep= true;
drop_it.rewind();
while ((drop=drop_it++))
{
@@ -8396,17 +8548,39 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
!my_strcasecmp(system_charset_info, check->name.str, drop->name))
{
drop_it.remove();
+ keep= false;
break;
}
}
+
+ if (share->period.constr_name.streq(check->name.str))
+ {
+ if (!drop_period && !keep)
+ {
+ my_error(ER_PERIOD_CONSTRAINT_DROP, MYF(0), check->name.str,
+ share->period.name.str);
+ goto err;
+ }
+ keep= keep && !drop_period;
+
+ DBUG_ASSERT(create_info->period_info.constr == NULL || drop_period);
+
+ if (keep)
+ {
+ Item *expr_copy= check->expr->get_copy(thd);
+ check= new Virtual_column_info();
+ check->expr= expr_copy;
+ create_info->period_info.constr= check;
+ }
+ }
/* see if the constraint depends on *only* on dropped fields */
- if (!drop && dropped_fields)
+ if (keep && dropped_fields)
{
table->default_column_bitmaps();
bitmap_clear_all(table->read_set);
check->expr->walk(&Item::register_field_in_read_map, 1, 0);
if (bitmap_is_subset(table->read_set, dropped_fields))
- drop= (Alter_drop*)1;
+ keep= false;
else if (bitmap_is_overlapping(dropped_fields, table->read_set))
{
bitmap_intersect(table->read_set, dropped_fields);
@@ -8416,7 +8590,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
goto err;
}
}
- if (!drop)
+ if (keep)
{
if (alter_info->flags & ALTER_RENAME_COLUMN)
{
@@ -8440,8 +8614,9 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
case Alter_drop::KEY:
case Alter_drop::COLUMN:
case Alter_drop::CHECK_CONSTRAINT:
- my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop->type_name(),
- alter_info->drop_list.head()->name);
+ case Alter_drop::PERIOD:
+ my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop->type_name(),
+ alter_info->drop_list.head()->name);
goto err;
case Alter_drop::FOREIGN_KEY:
// Leave the DROP FOREIGN KEY names in the alter_info->drop_list.
@@ -8986,6 +9161,49 @@ simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
}
+static void cleanup_table_after_inplace_alter_keep_files(TABLE *table)
+{
+ TABLE_SHARE *share= table->s;
+ closefrm(table);
+ free_table_share(share);
+}
+
+
+static void cleanup_table_after_inplace_alter(TABLE *table)
+{
+ table->file->ha_create_partitioning_metadata(table->s->normalized_path.str, 0,
+ CHF_DELETE_FLAG);
+ deletefrm(table->s->normalized_path.str);
+ cleanup_table_after_inplace_alter_keep_files(table);
+}
+
+
+static int create_table_for_inplace_alter(THD *thd,
+ const Alter_table_ctx &alter_ctx,
+ LEX_CUSTRING *frm,
+ TABLE_SHARE *share,
+ TABLE *table)
+{
+ init_tmp_table_share(thd, share, alter_ctx.new_db.str, 0,
+ alter_ctx.new_name.str, alter_ctx.get_tmp_path());
+ if (share->init_from_binary_frm_image(thd, true, frm->str, frm->length) ||
+ open_table_from_share(thd, share, &alter_ctx.new_name, 0,
+ EXTRA_RECORD, thd->open_options,
+ table, false))
+ {
+ free_table_share(share);
+ deletefrm(alter_ctx.get_tmp_path());
+ return 1;
+ }
+ if (table->internal_tables && open_and_lock_internal_tables(table, false))
+ {
+ cleanup_table_after_inplace_alter(table);
+ return 1;
+ }
+ return 0;
+}
+
+
/**
Alter table
@@ -9084,6 +9302,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
uint tables_opened;
thd->open_options|= HA_OPEN_FOR_ALTER;
+ thd->mdl_backup_ticket= 0;
bool error= open_tables(thd, &table_list, &tables_opened, 0,
&alter_prelocking_strategy);
thd->open_options&= ~HA_OPEN_FOR_ALTER;
@@ -9191,12 +9410,12 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db,
}
/*
- Global intention exclusive lock must have been already acquired when
- table to be altered was open, so there is no need to do it here.
+ Protection against global read lock must have been acquired when table
+ to be altered was being opened.
*/
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::GLOBAL,
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::BACKUP,
"", "",
- MDL_INTENTION_EXCLUSIVE));
+ MDL_BACKUP_DDL));
if (thd->mdl_context.acquire_locks(&mdl_requests,
thd->variables.lock_wait_timeout))
@@ -9363,7 +9582,7 @@ do_continue:;
}
}
- handle_if_exists_options(thd, table, alter_info);
+ handle_if_exists_options(thd, table, alter_info, &create_info->period_info);
/*
Look if we have to do anything at all.
@@ -9443,7 +9662,11 @@ do_continue:;
DBUG_RETURN(true);
}
- set_table_default_charset(thd, create_info, &alter_ctx.db);
+ set_table_default_charset(thd, create_info, alter_ctx.db);
+
+ if (create_info->check_period_fields(thd, alter_info)
+ || create_info->fix_period_fields(thd, alter_info))
+ DBUG_RETURN(true);
if (!opt_explicit_defaults_for_timestamp)
promote_first_timestamp_column(&alter_info->create_list);
@@ -9598,8 +9821,6 @@ do_continue:;
}
DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock");
- /* We can abort alter table for any table type */
- thd->abort_on_warning= !ignore && thd->is_strict_mode();
/*
Create .FRM for new version of table with a temporary name.
@@ -9621,15 +9842,13 @@ do_continue:;
tmp_disable_binlog(thd);
create_info->options|=HA_CREATE_TMP_ALTER;
- error= create_table_impl(thd,
- &alter_ctx.db, &alter_ctx.table_name,
- &alter_ctx.new_db, &alter_ctx.tmp_name,
+ error= create_table_impl(thd, alter_ctx.db, alter_ctx.table_name,
+ alter_ctx.new_db, alter_ctx.tmp_name,
alter_ctx.get_tmp_path(),
thd->lex->create_info, create_info, alter_info,
C_ALTER_TABLE_FRM_ONLY, NULL,
&key_info, &key_count, &frm);
reenable_binlog(thd);
- thd->abort_on_warning= false;
if (unlikely(error))
{
my_free(const_cast<uchar*>(frm.str));
@@ -9645,7 +9864,8 @@ do_continue:;
key_info, key_count,
IF_PARTITIONING(thd->work_part_info, NULL),
ignore);
- TABLE *altered_table= NULL;
+ TABLE_SHARE altered_share;
+ TABLE altered_table;
bool use_inplace= true;
/* Fill the Alter_inplace_info structure. */
@@ -9674,11 +9894,10 @@ do_continue:;
Also note that we ignore the LOCK clause here.
- TODO don't create the frm in the first place
+ TODO don't create partitioning metadata in the first place
*/
- const char *path= alter_ctx.get_tmp_path();
- table->file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
- deletefrm(path);
+ table->file->ha_create_partitioning_metadata(alter_ctx.get_tmp_path(),
+ NULL, CHF_DELETE_FLAG);
my_free(const_cast<uchar*>(frm.str));
goto end_inplace;
}
@@ -9686,44 +9905,43 @@ do_continue:;
// We assume that the table is non-temporary.
DBUG_ASSERT(!table->s->tmp_table);
- if (!(altered_table=
- thd->create_and_open_tmp_table(new_db_type, &frm,
- alter_ctx.get_tmp_path(),
- alter_ctx.new_db.str,
- alter_ctx.new_name.str,
- false, true)))
+ if (create_table_for_inplace_alter(thd, alter_ctx, &frm, &altered_share,
+ &altered_table))
goto err_new_table_cleanup;
/* Set markers for fields in TABLE object for altered table. */
- update_altered_table(ha_alter_info, altered_table);
+ update_altered_table(ha_alter_info, &altered_table);
/*
Mark all columns in 'altered_table' as used to allow usage
of its record[0] buffer and Field objects during in-place
ALTER TABLE.
*/
- altered_table->column_bitmaps_set_no_signal(&altered_table->s->all_set,
- &altered_table->s->all_set);
- restore_record(altered_table, s->default_values); // Create empty record
+ altered_table.column_bitmaps_set_no_signal(&altered_table.s->all_set,
+ &altered_table.s->all_set);
+ restore_record(&altered_table, s->default_values); // Create empty record
/* Check that we can call default functions with default field values */
thd->count_cuted_fields= CHECK_FIELD_EXPRESSION;
- altered_table->reset_default_fields();
- if (altered_table->default_field &&
- altered_table->update_default_fields(0, 1))
+ altered_table.reset_default_fields();
+ if (altered_table.default_field &&
+ altered_table.update_default_fields(0, 1))
+ {
+ cleanup_table_after_inplace_alter(&altered_table);
goto err_new_table_cleanup;
+ }
thd->count_cuted_fields= CHECK_FIELD_IGNORE;
if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE)
ha_alter_info.online= true;
// Ask storage engine whether to use copy or in-place
enum_alter_inplace_result inplace_supported=
- table->file->check_if_supported_inplace_alter(altered_table,
+ table->file->check_if_supported_inplace_alter(&altered_table,
&ha_alter_info);
if (alter_info->supports_algorithm(thd, inplace_supported, &ha_alter_info) ||
alter_info->supports_lock(thd, inplace_supported, &ha_alter_info))
{
- thd->drop_temporary_table(altered_table, NULL, false);
+ cleanup_table_after_inplace_alter(&altered_table);
goto err_new_table_cleanup;
}
@@ -9748,21 +9966,23 @@ do_continue:;
for alter table.
*/
thd->count_cuted_fields = CHECK_FIELD_WARN;
- int res= mysql_inplace_alter_table(thd, table_list, table, altered_table,
+ int res= mysql_inplace_alter_table(thd, table_list, table, &altered_table,
&ha_alter_info, inplace_supported,
&target_mdl_request, &alter_ctx);
thd->count_cuted_fields= save_count_cuted_fields;
my_free(const_cast<uchar*>(frm.str));
if (res)
+ {
+ cleanup_table_after_inplace_alter(&altered_table);
DBUG_RETURN(true);
+ }
+ cleanup_table_after_inplace_alter_keep_files(&altered_table);
goto end_inplace;
}
else
- {
- thd->drop_temporary_table(altered_table, NULL, false);
- }
+ cleanup_table_after_inplace_alter_keep_files(&altered_table);
}
/* ALTER TABLE using copy algorithm. */
@@ -9813,15 +10033,15 @@ do_continue:;
/* Mark that we have created table in storage engine. */
no_ha_table= false;
- new_table=
- thd->create_and_open_tmp_table(new_db_type, &frm, alter_ctx.get_tmp_path(),
- alter_ctx.new_db.str,
- alter_ctx.new_name.str,
- true, true);
+ /* Open the table since we need to copy the data. */
+ new_table= thd->create_and_open_tmp_table(&frm,
+ alter_ctx.get_tmp_path(),
+ alter_ctx.new_db.str,
+ alter_ctx.new_name.str,
+ true);
if (!new_table)
goto err_new_table_cleanup;
- /* Open the table since we need to copy the data. */
if (table->s->tmp_table != NO_TMP_TABLE)
{
/* in case of alter temp table send the tracker in OK packet */
@@ -10113,19 +10333,17 @@ err_new_table_cleanup:
if (unlikely(alter_ctx.error_if_not_empty &&
thd->get_stmt_da()->current_row_for_warning()))
{
- const char *f_val= 0;
- enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE;
+ const char *f_val= "0000-00-00";
+ const char *f_type= "date";
switch (alter_ctx.datetime_field->real_field_type())
{
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_NEWDATE:
- f_val= "0000-00-00";
- t_type= MYSQL_TIMESTAMP_DATE;
break;
case MYSQL_TYPE_DATETIME:
case MYSQL_TYPE_DATETIME2:
f_val= "0000-00-00 00:00:00";
- t_type= MYSQL_TIMESTAMP_DATETIME;
+ f_type= "datetime";
break;
default:
/* Shouldn't get here. */
@@ -10133,10 +10351,11 @@ err_new_table_cleanup:
}
bool save_abort_on_warning= thd->abort_on_warning;
thd->abort_on_warning= true;
- make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- f_val, strlength(f_val), t_type,
- new_table->s,
- alter_ctx.datetime_field->field_name.str);
+ thd->push_warning_truncated_value_for_field(Sql_condition::WARN_LEVEL_WARN,
+ f_type, f_val,
+ new_table->s,
+ alter_ctx.datetime_field->
+ field_name.str);
thd->abort_on_warning= save_abort_on_warning;
}
@@ -10273,12 +10492,11 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
DBUG_RETURN(-1);
}
+ backup_set_alter_copy_lock(thd, from);
+
alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
- /* Set read map for all fields in from table */
from->default_column_bitmaps();
- bitmap_set_all(from->read_set);
- from->file->column_bitmaps_signal();
/* We can abort alter table for any table type */
thd->abort_on_warning= !ignore && thd->is_strict_mode();
@@ -10308,7 +10526,11 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
if (def->field == from->found_next_number_field)
thd->variables.sql_mode|= MODE_NO_AUTO_VALUE_ON_ZERO;
}
- (copy_end++)->set(*ptr,def->field,0);
+ if (!(*ptr)->vcol_info)
+ {
+ bitmap_set_bit(from->read_set, def->field->field_index);
+ (copy_end++)->set(*ptr,def->field,0);
+ }
}
else
{
@@ -10354,8 +10576,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
Filesort_tracker dummy_tracker(false);
Filesort fsort(order, HA_POS_ERROR, true, NULL);
- if (thd->lex->select_lex.setup_ref_array(thd, order_num) ||
- setup_order(thd, thd->lex->select_lex.ref_pointer_array,
+ if (thd->lex->first_select_lex()->setup_ref_array(thd, order_num) ||
+ setup_order(thd, thd->lex->first_select_lex()->ref_pointer_array,
&tables, fields, all_fields, order))
goto err;
@@ -10380,6 +10602,11 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
from_row_end= from->vers_end_field();
}
+ if (from_row_end)
+ bitmap_set_bit(from->read_set, from_row_end->field_index);
+
+ from->file->column_bitmaps_signal();
+
THD_STAGE_INFO(thd, stage_copy_to_tmp_table);
/* Tell handler that we have values for all columns in the to table */
to->use_all_columns();
@@ -10551,6 +10778,9 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
cleanup_done= 1;
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
+ if (backup_reset_alter_copy_lock(thd))
+ error= 1;
+
if (unlikely(mysql_trans_commit_alter_copy_data(thd)))
error= 1;
@@ -10569,7 +10799,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
if (!cleanup_done)
{
- /* This happens if we get an error during initialzation of data */
+ /* This happens if we get an error during initialization of data */
DBUG_ASSERT(error);
to->file->ha_end_bulk_insert();
ha_enable_transaction(thd, TRUE);
@@ -10730,7 +10960,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
(256 - (1 << t->s->last_null_bit_pos)):
0);
- t->use_all_columns();
+ t->use_all_stored_columns();
if (t->file->ha_rnd_init(1))
protocol->store_null();
@@ -10888,10 +11118,10 @@ bool check_engine(THD *thd, const char *db_name,
if (req_engine && req_engine != *new_engine)
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_WARN_USING_OTHER_HANDLER,
+ ER_WARN_USING_OTHER_HANDLER,
ER_THD(thd, ER_WARN_USING_OTHER_HANDLER),
- ha_resolve_storage_engine_name(*new_engine),
- table_name);
+ ha_resolve_storage_engine_name(*new_engine),
+ table_name);
}
if (create_info->tmp_table() &&
ha_check_storage_engine_flag(*new_engine, HTON_TEMPORARY_NOT_SUPPORTED))
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 9958a56958e..6f53dd8e75d 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -198,8 +198,8 @@ int mysql_create_table_no_lock(THD *thd, const LEX_CSTRING *db,
int create_table_mode, TABLE_LIST *table);
handler *mysql_create_frm_image(THD *thd,
- const LEX_CSTRING *db,
- const LEX_CSTRING *table_name,
+ const LEX_CSTRING &db,
+ const LEX_CSTRING &table_name,
HA_CREATE_INFO *create_info,
Alter_info *alter_info,
int create_table_mode,
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 3d43c35177d..93085251711 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -24,6 +24,7 @@
#include "sql_show.h" // calc_sum_of_all_status
#include "sql_select.h"
#include "keycaches.h"
+#include "my_json_writer.h"
#include <hash.h>
#include <thr_alarm.h>
#if defined(HAVE_MALLINFO) && defined(HAVE_MALLOC_H)
@@ -36,6 +37,8 @@
#include "events.h"
#endif
+#define FT_KEYPART (MAX_FIELDS+10)
+
static const char *lock_descriptions[] =
{
/* TL_UNLOCK */ "No lock",
@@ -225,8 +228,6 @@ TEST_join(JOIN *join)
}
-#define FT_KEYPART (MAX_FIELDS+10)
-
static void print_keyuse(KEYUSE *keyuse)
{
char buff[256];
@@ -263,7 +264,6 @@ void print_keyuse_array(DYNAMIC_ARRAY *keyuse_array)
DBUG_UNLOCK_FILE;
}
-
/*
Print the current state during query optimization.
@@ -655,3 +655,24 @@ Memory allocated by threads: %s\n",
puts("");
fflush(stdout);
}
+
+void print_keyuse_array_for_trace(THD *thd, DYNAMIC_ARRAY *keyuse_array)
+{
+ Json_writer_object wrapper(thd);
+ Json_writer_array trace_key_uses(thd, "ref_optimizer_key_uses");
+ for(uint i=0; i < keyuse_array->elements; i++)
+ {
+ KEYUSE *keyuse= (KEYUSE*)dynamic_array_ptr(keyuse_array, i);
+ Json_writer_object keyuse_elem(thd);
+ keyuse_elem.add_table_name(keyuse->table->reginfo.join_tab);
+ keyuse_elem.add("field", (keyuse->keypart == FT_KEYPART) ? "<fulltext>":
+ (keyuse->is_for_hash_join() ?
+ keyuse->table->field[keyuse->keypart]
+ ->field_name.str :
+ keyuse->table->key_info[keyuse->key]
+ .key_part[keyuse->keypart]
+ .field->field_name.str));
+ keyuse_elem.add("equals",keyuse->val);
+ keyuse_elem.add("null_rejecting",keyuse->null_rejecting);
+ }
+}
diff --git a/sql/sql_test.h b/sql/sql_test.h
index 867582a9569..cbef581b784 100644
--- a/sql/sql_test.h
+++ b/sql/sql_test.h
@@ -17,6 +17,7 @@
#define SQL_TEST_INCLUDED
#include "mysqld.h"
+#include "opt_trace_context.h"
class JOIN;
struct TABLE_LIST;
@@ -34,6 +35,7 @@ void print_keyuse_array(DYNAMIC_ARRAY *keyuse_array);
void print_sjm(SJ_MATERIALIZATION_INFO *sjm);
void dump_TABLE_LIST_graph(SELECT_LEX *select_lex, TABLE_LIST* tl);
#endif
+void print_keyuse_array_for_trace(THD *thd, DYNAMIC_ARRAY *keyuse_array);
void mysql_print_status();
#endif /* SQL_TEST_INCLUDED */
diff --git a/sql/sql_time.cc b/sql/sql_time.cc
index 73cf14650a1..fea23020d7e 100644
--- a/sql/sql_time.cc
+++ b/sql/sql_time.cc
@@ -175,7 +175,7 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week)
next week is week 1.
*/
-uint calc_week(MYSQL_TIME *l_time, uint week_behaviour, uint *year)
+uint calc_week(const MYSQL_TIME *l_time, uint week_behaviour, uint *year)
{
uint days;
ulong daynr=calc_daynr(l_time->year,l_time->month,l_time->day);
@@ -289,14 +289,14 @@ ulong convert_month_to_period(ulong month)
bool
-check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
- timestamp_type ts_type)
+check_date_with_warn(THD *thd, const MYSQL_TIME *ltime,
+ date_conv_mode_t fuzzydate, timestamp_type ts_type)
{
int unused;
- if (check_date(ltime, fuzzy_date, &unused))
+ if (check_date(ltime, fuzzydate, &unused))
{
ErrConvTime str(ltime);
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
+ make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
&str, ts_type, 0, 0);
return true;
}
@@ -305,7 +305,7 @@ check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
bool
-adjust_time_range_with_warn(MYSQL_TIME *ltime, uint dec)
+adjust_time_range_with_warn(THD *thd, MYSQL_TIME *ltime, uint dec)
{
MYSQL_TIME copy= *ltime;
ErrConvTime str(&copy);
@@ -313,8 +313,7 @@ adjust_time_range_with_warn(MYSQL_TIME *ltime, uint dec)
if (check_time_range(ltime, dec, &warnings))
return true;
if (warnings)
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, MYSQL_TIMESTAMP_TIME, 0, NullS);
+ thd->push_warning_truncated_wrong_value("time", str.ptr());
return false;
}
@@ -352,33 +351,70 @@ to_ascii(CHARSET_INFO *cs,
}
-/* Character set-aware version of str_to_time() */
-bool
-str_to_time(CHARSET_INFO *cs, const char *str, size_t length,
- MYSQL_TIME *l_time, ulonglong fuzzydate, MYSQL_TIME_STATUS *status)
+class TemporalAsciiBuffer: public LEX_CSTRING
{
char cnv[32];
- if ((cs->state & MY_CS_NONASCII) != 0)
+public:
+ TemporalAsciiBuffer(const char *str, size_t length, CHARSET_INFO *cs)
{
- length= to_ascii(cs, str, length, cnv, sizeof(cnv));
- str= cnv;
+ if ((cs->state & MY_CS_NONASCII) != 0)
+ {
+ LEX_CSTRING::str= cnv;
+ LEX_CSTRING::length= to_ascii(cs, str, length, cnv, sizeof(cnv));
+ }
+ else
+ {
+ LEX_CSTRING::str= str;
+ LEX_CSTRING::length= length;
+ }
}
- return str_to_time(str, length, l_time, fuzzydate, status);
+};
+
+
+/* Character set-aware version of ascii_to_datetime_or_date_or_time() */
+bool Temporal::str_to_datetime_or_date_or_time(THD *thd, MYSQL_TIME_STATUS *st,
+ const char *str, size_t length,
+ CHARSET_INFO *cs,
+ date_mode_t fuzzydate)
+{
+ TemporalAsciiBuffer tmp(str, length, cs);
+ return ascii_to_datetime_or_date_or_time(st, tmp.str, tmp.length, fuzzydate)||
+ add_nanoseconds(thd, &st->warnings, fuzzydate, st->nanoseconds);
}
-/* Character set-aware version of str_to_datetime() */
-bool str_to_datetime(CHARSET_INFO *cs, const char *str, size_t length,
- MYSQL_TIME *l_time, ulonglong flags,
- MYSQL_TIME_STATUS *status)
+/* Character set-aware version of str_to_datetime_or_date() */
+bool Temporal::str_to_datetime_or_date(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t length,
+ CHARSET_INFO *cs,
+ date_mode_t flags)
{
- char cnv[32];
- if ((cs->state & MY_CS_NONASCII) != 0)
- {
- length= to_ascii(cs, str, length, cnv, sizeof(cnv));
- str= cnv;
- }
- return str_to_datetime(str, length, l_time, flags, status);
+ TemporalAsciiBuffer tmp(str, length, cs);
+ return ascii_to_datetime_or_date(status, tmp.str, tmp.length, flags) ||
+ add_nanoseconds(thd, &status->warnings, flags, status->nanoseconds);
+}
+
+
+/* Character set-aware version of ascii_to_temporal() */
+bool Temporal::str_to_temporal(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t length, CHARSET_INFO *cs,
+ date_mode_t flags)
+{
+ TemporalAsciiBuffer tmp(str, length, cs);
+ return ascii_to_temporal(status, tmp.str, tmp.length, flags) ||
+ add_nanoseconds(thd, &status->warnings, flags, status->nanoseconds);
+}
+
+
+/* Character set-aware version of str_to_DDhhmmssff() */
+bool Interval_DDhhmmssff::str_to_DDhhmmssff(MYSQL_TIME_STATUS *status,
+ const char *str, size_t length,
+ CHARSET_INFO *cs, ulong max_hour)
+{
+ TemporalAsciiBuffer tmp(str, length, cs);
+ bool rc= ::str_to_DDhhmmssff(tmp.str, tmp.length, this, UINT_MAX32, status);
+ DBUG_ASSERT(status->warnings || !rc);
+ return rc;
}
@@ -387,125 +423,53 @@ bool str_to_datetime(CHARSET_INFO *cs, const char *str, size_t length,
if string was truncated during conversion.
NOTE
- See description of str_to_datetime() for more information.
+ See description of str_to_datetime_xxx() for more information.
*/
bool
-str_to_datetime_with_warn(CHARSET_INFO *cs,
- const char *str, size_t length, MYSQL_TIME *l_time,
- ulonglong flags)
+str_to_datetime_with_warn(THD *thd, CHARSET_INFO *cs,
+ const char *str, size_t length, MYSQL_TIME *to,
+ date_mode_t mode)
{
- MYSQL_TIME_STATUS status;
- THD *thd= current_thd;
- bool ret_val= str_to_datetime(cs, str, length, l_time, flags, &status);
- if (ret_val || status.warnings)
- make_truncated_value_warning(thd,
- ret_val ? Sql_condition::WARN_LEVEL_WARN :
- Sql_condition::time_warn_level(status.warnings),
- str, length, flags & TIME_TIME_ONLY ?
- MYSQL_TIMESTAMP_TIME : l_time->time_type, 0, NullS);
- DBUG_EXECUTE_IF("str_to_datetime_warn",
- push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
- ER_YES, str););
- return ret_val;
+ Temporal::Warn_push warn(thd, NULL, NullS, to, mode);
+ Temporal_hybrid *t= new(to) Temporal_hybrid(thd, &warn, str, length, cs, mode);
+ return !t->is_valid_temporal();
}
-/**
- converts a pair of numbers (integer part, microseconds) to MYSQL_TIME
-
- @param neg sign of the time value
- @param nr integer part of the number to convert
- @param sec_part microsecond part of the number
- @param ltime converted value will be written here
- @param fuzzydate conversion flags (TIME_INVALID_DATE, etc)
- @param str original number, as an ErrConv. For the warning
- @param field_name field name or NULL if not a field. For the warning
-
- @returns 0 for success, 1 for a failure
-*/
-static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part,
- MYSQL_TIME *ltime, ulonglong fuzzydate,
- const ErrConv *str,
- const TABLE_SHARE *s, const char *field_name)
-{
- int was_cut;
- longlong res;
- enum_mysql_timestamp_type ts_type;
- bool have_warnings;
-
- if (fuzzydate & TIME_TIME_ONLY)
- {
- fuzzydate= TIME_TIME_ONLY; // clear other flags
- ts_type= MYSQL_TIMESTAMP_TIME;
- res= number_to_time(neg, nr, sec_part, ltime, &was_cut);
- have_warnings= MYSQL_TIME_WARN_HAVE_WARNINGS(was_cut);
- }
- else
- {
- ts_type= MYSQL_TIMESTAMP_DATETIME;
- if (neg)
- {
- res= -1;
- }
- else
- {
- res= number_to_datetime(nr, sec_part, ltime, fuzzydate, &was_cut);
- have_warnings= was_cut && (fuzzydate & TIME_NO_ZERO_IN_DATE);
- }
- }
-
- if (res < 0 || have_warnings)
- {
- make_truncated_value_warning(current_thd,
- Sql_condition::WARN_LEVEL_WARN, str,
- res < 0 ? MYSQL_TIMESTAMP_ERROR : ts_type,
- s, field_name);
- }
- return res < 0;
-}
-
-
-bool double_to_datetime_with_warn(double value, MYSQL_TIME *ltime,
- ulonglong fuzzydate,
+bool double_to_datetime_with_warn(THD *thd, double value, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
const TABLE_SHARE *s, const char *field_name)
{
- const ErrConvDouble str(value);
- bool neg= value < 0;
-
- if (neg)
- value= -value;
-
- if (value > LONGLONG_MAX)
- value= static_cast<double>(LONGLONG_MAX);
-
- longlong nr= static_cast<ulonglong>(floor(value));
- uint sec_part= static_cast<ulong>((value - floor(value))*TIME_SECOND_PART_FACTOR);
- return number_to_time_with_warn(neg, nr, sec_part, ltime, fuzzydate, &str,
- s, field_name);
+ Temporal::Warn_push warn(thd, s, field_name, ltime, fuzzydate);
+ Temporal_hybrid *t= new (ltime) Temporal_hybrid(thd, &warn, value, fuzzydate);
+ return !t->is_valid_temporal();
}
-bool decimal_to_datetime_with_warn(const my_decimal *value, MYSQL_TIME *ltime,
- ulonglong fuzzydate,
+bool decimal_to_datetime_with_warn(THD *thd, const my_decimal *value,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
const TABLE_SHARE *s, const char *field_name)
{
- const ErrConvDecimal str(value);
- ulonglong nr;
- ulong sec_part;
- bool neg= my_decimal2seconds(value, &nr, &sec_part);
- return number_to_time_with_warn(neg, nr, sec_part, ltime, fuzzydate, &str,
- s, field_name);
+ Temporal::Warn_push warn(thd, s, field_name, ltime, fuzzydate);
+ Temporal_hybrid *t= new (ltime) Temporal_hybrid(thd, &warn, value, fuzzydate);
+ return !t->is_valid_temporal();
}
-bool int_to_datetime_with_warn(bool neg, ulonglong value, MYSQL_TIME *ltime,
- ulonglong fuzzydate,
+bool int_to_datetime_with_warn(THD *thd, const Longlong_hybrid &nr,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
const TABLE_SHARE *s, const char *field_name)
{
- const ErrConvInteger str(neg ? - (longlong) value : (longlong) value, !neg);
- return number_to_time_with_warn(neg, value, 0, ltime,
- fuzzydate, &str, s, field_name);
+ /*
+ Note: conversion from an integer to TIME can overflow to '838:59:59.999999',
+ so the conversion result can have fractional digits.
+ */
+ Temporal::Warn_push warn(thd, s, field_name, ltime, fuzzydate);
+ Temporal_hybrid *t= new (ltime) Temporal_hybrid(thd, &warn, nr, fuzzydate);
+ return !t->is_valid_temporal();
}
@@ -552,7 +516,7 @@ void localtime_to_TIME(MYSQL_TIME *to, struct tm *from)
}
-void calc_time_from_sec(MYSQL_TIME *to, long seconds, long microseconds)
+void calc_time_from_sec(MYSQL_TIME *to, ulong seconds, ulong microseconds)
{
long t_seconds;
// to->neg is not cleared, it may already be set to a useful value
@@ -935,50 +899,10 @@ void make_truncated_value_warning(THD *thd,
timestamp_type time_type,
const TABLE_SHARE *s, const char *field_name)
{
- char warn_buff[MYSQL_ERRMSG_SIZE];
- const char *type_str;
- CHARSET_INFO *cs= &my_charset_latin1;
-
- switch (time_type) {
- case MYSQL_TIMESTAMP_DATE:
- type_str= "date";
- break;
- case MYSQL_TIMESTAMP_TIME:
- type_str= "time";
- break;
- case MYSQL_TIMESTAMP_DATETIME: // FALLTHROUGH
- default:
- type_str= "datetime";
- break;
- }
- if (field_name)
- {
- const char *db_name= s->db.str;
- const char *table_name= s->table_name.str;
-
- if (!db_name)
- db_name= "";
- if (!table_name)
- table_name= "";
-
- cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
- ER_THD(thd, ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
- type_str, sval->ptr(),
- db_name, table_name, field_name,
- (ulong) thd->get_stmt_da()->current_row_for_warning());
- }
- else
- {
- if (time_type > MYSQL_TIMESTAMP_ERROR)
- cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
- ER_THD(thd, ER_TRUNCATED_WRONG_VALUE),
- type_str, sval->ptr());
- else
- cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
- ER_THD(thd, ER_WRONG_VALUE), type_str, sval->ptr());
- }
- push_warning(thd, level,
- ER_TRUNCATED_WRONG_VALUE, warn_buff);
+ const char *type_str= Temporal::type_name_by_timestamp_type(time_type);
+ return thd->push_warning_wrong_or_truncated_value
+ (level, time_type <= MYSQL_TIMESTAMP_ERROR, type_str, sval->ptr(),
+ s, field_name);
}
@@ -989,7 +913,7 @@ void make_truncated_value_warning(THD *thd,
(X)->second_part)
#define GET_PART(X, N) X % N ## LL; X/= N ## LL
-bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type,
+bool date_add_interval(THD *thd, MYSQL_TIME *ltime, interval_type int_type,
const INTERVAL &interval)
{
long period, sign;
@@ -1104,7 +1028,6 @@ bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type,
invalid_date:
{
- THD *thd= current_thd;
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_DATETIME_FUNCTION_OVERFLOW,
ER_THD(thd, ER_DATETIME_FUNCTION_OVERFLOW),
@@ -1144,7 +1067,7 @@ null_date:
bool
calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
- int l_sign, longlong *seconds_out, long *microseconds_out)
+ int l_sign, ulonglong *seconds_out, ulong *microseconds_out)
{
long days;
bool neg;
@@ -1171,10 +1094,10 @@ calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
}
microseconds= ((longlong)days * SECONDS_IN_24H +
- (longlong)(l_time1->hour*3600L +
+ (longlong)(l_time1->hour*3600LL +
l_time1->minute*60L +
l_time1->second) -
- l_sign*(longlong)(l_time2->hour*3600L +
+ l_sign*(longlong)(l_time2->hour*3600LL +
l_time2->minute*60L +
l_time2->second)) * 1000000LL +
(longlong)l_time1->second_part -
@@ -1186,17 +1109,17 @@ calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
microseconds= -microseconds;
neg= 1;
}
- *seconds_out= microseconds/1000000L;
- *microseconds_out= (long) (microseconds%1000000L);
+ *seconds_out= (ulonglong) microseconds/1000000L;
+ *microseconds_out= (ulong) (microseconds%1000000L);
return neg;
}
bool calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
- int l_sign, MYSQL_TIME *l_time3, ulonglong fuzzydate)
+ int l_sign, MYSQL_TIME *l_time3, date_mode_t fuzzydate)
{
- longlong seconds;
- long microseconds;
+ ulonglong seconds;
+ ulong microseconds;
bzero((char *) l_time3, sizeof(*l_time3));
l_time3->neg= calc_time_diff(l_time1, l_time2, l_sign,
&seconds, &microseconds);
@@ -1215,7 +1138,7 @@ bool calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
("invalid" means > TIME_MAX_SECOND)
*/
set_if_smaller(seconds, INT_MAX32);
- calc_time_from_sec(l_time3, (long) seconds, microseconds);
+ calc_time_from_sec(l_time3, (ulong) seconds, microseconds);
return ((fuzzydate & TIME_NO_ZERO_DATE) && (seconds == 0) &&
(microseconds == 0));
}
@@ -1272,56 +1195,6 @@ bool time_to_datetime(MYSQL_TIME *ltime)
}
-/**
- Return a valid DATE or DATETIME value from an arbitrary MYSQL_TIME.
- If ltime is TIME, it's first converted to DATETIME.
- If ts_type is DATE, hhmmss is set to zero.
- The date part of the result is checked against fuzzy_date.
-
- @param ltime The value to convert.
- @param fuzzy_date Flags to check date.
- @param ts_type The type to convert to.
- @return false on success, true of error (negative time).*/
-bool
-make_date_with_warn(MYSQL_TIME *ltime, ulonglong fuzzy_date,
- timestamp_type ts_type)
-{
- DBUG_ASSERT(ts_type == MYSQL_TIMESTAMP_DATE ||
- ts_type == MYSQL_TIMESTAMP_DATETIME);
- if (ltime->time_type == MYSQL_TIMESTAMP_TIME && time_to_datetime(ltime))
- {
- /* e.g. negative time */
- ErrConvTime str(ltime);
- make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
- &str, ts_type, 0, 0);
- return true;
- }
- if ((ltime->time_type= ts_type) == MYSQL_TIMESTAMP_DATE)
- ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0;
- return check_date_with_warn(ltime, fuzzy_date, ts_type);
-}
-
-
-/*
- Convert a TIME value to DAY-TIME interval, e.g. for extraction:
- EXTRACT(DAY FROM x), EXTRACT(HOUR FROM x), etc.
- Moves full days from ltime->hour to ltime->day.
- Note, time_type is set to MYSQL_TIMESTAMP_NONE, to make sure that
- the structure is not used for anything else other than extraction:
- non-extraction TIME functions expect zero day value!
-*/
-void time_to_daytime_interval(MYSQL_TIME *ltime)
-{
- DBUG_ASSERT(ltime->time_type == MYSQL_TIMESTAMP_TIME);
- DBUG_ASSERT(ltime->year == 0);
- DBUG_ASSERT(ltime->month == 0);
- DBUG_ASSERT(ltime->day == 0);
- ltime->day= ltime->hour / 24;
- ltime->hour%= 24;
- ltime->time_type= MYSQL_TIMESTAMP_NONE;
-}
-
-
/*** Conversion from TIME to DATETIME ***/
/*
@@ -1349,8 +1222,8 @@ mix_date_and_time_complex(MYSQL_TIME *ldate, const MYSQL_TIME *ltime)
{
DBUG_ASSERT(ldate->time_type == MYSQL_TIMESTAMP_DATE ||
ldate->time_type == MYSQL_TIMESTAMP_DATETIME);
- longlong seconds;
- long days, useconds;
+ ulonglong seconds;
+ ulong days, useconds;
int sign= ltime->neg ? 1 : -1;
ldate->neg= calc_time_diff(ldate, ltime, sign, &seconds, &useconds);
@@ -1440,7 +1313,7 @@ time_to_datetime(THD *thd, const MYSQL_TIME *from, MYSQL_TIME *to)
bool
time_to_datetime_with_warn(THD *thd,
const MYSQL_TIME *from, MYSQL_TIME *to,
- ulonglong fuzzydate)
+ date_conv_mode_t fuzzydate)
{
int warn= 0;
DBUG_ASSERT(from->time_type == MYSQL_TIMESTAMP_TIME);
@@ -1456,34 +1329,13 @@ time_to_datetime_with_warn(THD *thd,
check_date(to, fuzzydate, &warn)))
{
ErrConvTime str(from);
- make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- &str, MYSQL_TIMESTAMP_DATETIME, 0, 0);
+ thd->push_warning_truncated_wrong_value("datetime", str.ptr());
return true;
}
return false;
}
-bool datetime_to_time_with_warn(THD *thd, const MYSQL_TIME *dt,
- MYSQL_TIME *tm, uint dec)
-{
- if (thd->variables.old_behavior & OLD_MODE_ZERO_DATE_TIME_CAST)
- {
- *tm= *dt;
- datetime_to_time(tm);
- return false;
- }
- else /* new mode */
- {
- MYSQL_TIME current_date;
- set_current_date(thd, &current_date);
- calc_time_diff(dt, &current_date, 1, tm, 0);
- }
- int warnings= 0;
- return check_time_range(tm, dec, &warnings);
-}
-
-
longlong pack_time(const MYSQL_TIME *my_time)
{
return ((((((my_time->year * 13ULL +
diff --git a/sql/sql_time.h b/sql/sql_time.h
index ca9f79273ec..161d08c80b8 100644
--- a/sql/sql_time.h
+++ b/sql/sql_time.h
@@ -17,6 +17,7 @@
#ifndef SQL_TIME_INCLUDED
#define SQL_TIME_INCLUDED
+#include "sql_basic_types.h"
#include "my_time.h"
#include "mysql_time.h" /* timestamp_type */
#include "sql_error.h" /* Sql_condition */
@@ -35,69 +36,28 @@ ulong convert_period_to_month(ulong period);
ulong convert_month_to_period(ulong month);
void set_current_date(THD *thd, MYSQL_TIME *to);
bool time_to_datetime(MYSQL_TIME *ltime);
-void time_to_daytime_interval(MYSQL_TIME *l_time);
bool get_date_from_daynr(long daynr,uint *year, uint *month, uint *day);
my_time_t TIME_to_timestamp(THD *thd, const MYSQL_TIME *t, uint *error_code);
-bool str_to_datetime_with_warn(CHARSET_INFO *cs, const char *str, size_t length, MYSQL_TIME *l_time,
- ulonglong flags);
-bool double_to_datetime_with_warn(double value, MYSQL_TIME *ltime,
- ulonglong fuzzydate,
+bool str_to_datetime_with_warn(THD *thd,
+ CHARSET_INFO *cs, const char *str, size_t length,
+ MYSQL_TIME *l_time,
+ date_mode_t flags);
+bool double_to_datetime_with_warn(THD *thd, double value, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
const TABLE_SHARE *s, const char *name);
-bool decimal_to_datetime_with_warn(const my_decimal *value, MYSQL_TIME *ltime,
- ulonglong fuzzydate,
+bool decimal_to_datetime_with_warn(THD *thd,
+ const my_decimal *value, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
const TABLE_SHARE *s, const char *name);
-bool int_to_datetime_with_warn(bool neg, ulonglong value, MYSQL_TIME *ltime,
- ulonglong fuzzydate,
+bool int_to_datetime_with_warn(THD *thd, const Longlong_hybrid &nr,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate,
const TABLE_SHARE *s, const char *name);
bool time_to_datetime(THD *thd, const MYSQL_TIME *tm, MYSQL_TIME *dt);
bool time_to_datetime_with_warn(THD *thd,
const MYSQL_TIME *tm, MYSQL_TIME *dt,
- ulonglong fuzzydate);
-/*
- Simply truncate the YYYY-MM-DD part to 0000-00-00
- and change time_type to MYSQL_TIMESTAMP_TIME
-*/
-inline void datetime_to_time(MYSQL_TIME *ltime)
-{
- DBUG_ASSERT(ltime->time_type == MYSQL_TIMESTAMP_DATE ||
- ltime->time_type == MYSQL_TIMESTAMP_DATETIME);
- DBUG_ASSERT(ltime->neg == 0);
- ltime->year= ltime->month= ltime->day= 0;
- ltime->time_type= MYSQL_TIMESTAMP_TIME;
-}
-
-
-/**
- Convert DATE/DATETIME to TIME(dec)
- using CURRENT_DATE in a non-old mode,
- or using simple truncation in old mode (OLD_MODE_ZERO_DATE_TIME_CAST).
-
- @param thd - the thread to get the variables.old_behaviour value from
- @param dt - the DATE of DATETIME value to convert
- @param[out] tm - store result here
- @param dec - the desired scale. The fractional part of the result
- is checked according to this parameter before returning
- the conversion result. "dec" is important in the corner
- cases near the max/min limits.
- If the result is '838:59:59.999999' and the desired scale
- is less than 6, an error is returned.
- Note, dec is not important in the
- OLD_MODE_ZERO_DATE_TIME_CAST old mode.
-
- - in case of OLD_MODE_ZERO_DATE_TIME_CAST
- the TIME part is simply truncated and "false" is returned.
- - otherwise, the result is calculated effectively similar to:
- TIMEDIFF(dt, CAST(CURRENT_DATE AS DATETIME))
- If the difference fits into the supported TIME range, "false" is returned,
- otherwise a warning is issued and "true" is returned.
-
- @return false - on success
- @return true - on error
-*/
-bool datetime_to_time_with_warn(THD *, const MYSQL_TIME *dt,
- MYSQL_TIME *tm, uint dec);
-
+ date_conv_mode_t fuzzydate);
inline void datetime_to_date(MYSQL_TIME *ltime)
{
@@ -118,16 +78,8 @@ void make_truncated_value_warning(THD *thd,
Sql_condition::enum_warning_level level,
const ErrConv *str_val,
timestamp_type time_type,
- const TABLE_SHARE *s, const char *field_name);
-
-static inline void make_truncated_value_warning(THD *thd,
- Sql_condition::enum_warning_level level, const char *str_val,
- size_t str_length, timestamp_type time_type,
- const TABLE_SHARE *s, const char *field_name)
-{
- const ErrConvString str(str_val, str_length, &my_charset_bin);
- make_truncated_value_warning(thd, level, &str, time_type, s, field_name);
-}
+ const TABLE_SHARE *s,
+ const char *field_name);
extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type,
const char *format_str,
@@ -139,10 +91,10 @@ const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format,
bool my_TIME_to_str(const MYSQL_TIME *ltime, String *str, uint dec);
/* MYSQL_TIME operations */
-bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type,
+bool date_add_interval(THD *thd, MYSQL_TIME *ltime, interval_type int_type,
const INTERVAL &interval);
bool calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
- int l_sign, longlong *seconds_out, long *microseconds_out);
+ int l_sign, ulonglong *seconds_out, ulong *microseconds_out);
int append_interval(String *str, interval_type int_type,
const INTERVAL &interval);
/**
@@ -168,26 +120,17 @@ int append_interval(String *str, interval_type int_type,
@return false - otherwise
*/
bool calc_time_diff(const MYSQL_TIME *l_time1, const MYSQL_TIME *l_time2,
- int lsign, MYSQL_TIME *l_time3, ulonglong fuzzydate);
+ int lsign, MYSQL_TIME *l_time3, date_mode_t fuzzydate);
int my_time_compare(const MYSQL_TIME *a, const MYSQL_TIME *b);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
-void calc_time_from_sec(MYSQL_TIME *to, long seconds, long microseconds);
-uint calc_week(MYSQL_TIME *l_time, uint week_behaviour, uint *year);
+void calc_time_from_sec(MYSQL_TIME *to, ulong seconds, ulong microseconds);
+uint calc_week(const MYSQL_TIME *l_time, uint week_behaviour, uint *year);
int calc_weekday(long daynr,bool sunday_first_day_of_week);
bool parse_date_time_format(timestamp_type format_type,
const char *format, uint format_length,
DATE_TIME_FORMAT *date_time_format);
-/* Character set-aware version of str_to_time() */
-bool str_to_time(CHARSET_INFO *cs, const char *str,size_t length,
- MYSQL_TIME *l_time, ulonglong fuzzydate,
- MYSQL_TIME_STATUS *status);
-/* Character set-aware version of str_to_datetime() */
-bool str_to_datetime(CHARSET_INFO *cs,
- const char *str, size_t length,
- MYSQL_TIME *l_time, ulonglong flags,
- MYSQL_TIME_STATUS *status);
/* convenience wrapper */
inline bool parse_date_time_format(timestamp_type format_type,
@@ -224,15 +167,21 @@ non_zero_date(const MYSQL_TIME *ltime)
non_zero_hhmmssuu(ltime));
}
static inline bool
-check_date(const MYSQL_TIME *ltime, ulonglong flags, int *was_cut)
+check_date(const MYSQL_TIME *ltime, date_conv_mode_t flags, int *was_cut)
{
- return check_date(ltime, non_zero_date(ltime), flags, was_cut);
+ return check_date(ltime, non_zero_date(ltime),
+ ulonglong(flags & TIME_MODE_FOR_XXX_TO_DATE), was_cut);
}
-bool check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
- timestamp_type ts_type);
-bool make_date_with_warn(MYSQL_TIME *ltime,
- ulonglong fuzzy_date, timestamp_type ts_type);
-bool adjust_time_range_with_warn(MYSQL_TIME *ltime, uint dec);
+bool check_date_with_warn(THD *thd, const MYSQL_TIME *ltime,
+ date_conv_mode_t fuzzy_date, timestamp_type ts_type);
+static inline bool
+check_date_with_warn(THD *thd, const MYSQL_TIME *ltime,
+ date_mode_t fuzzydate, timestamp_type ts_type)
+{
+ return check_date_with_warn(thd, ltime, date_conv_mode_t(fuzzydate), ts_type);
+}
+
+bool adjust_time_range_with_warn(THD *thd, MYSQL_TIME *ltime, uint dec);
longlong pack_time(const MYSQL_TIME *my_time);
void unpack_time(longlong packed, MYSQL_TIME *my_time,
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 632822a8e87..96f21bc47a7 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -35,16 +35,6 @@
#include "sp_cache.h" // sp_invalidate_cache
#include <mysys_err.h>
-LEX_CSTRING *make_lex_string(LEX_CSTRING *lex_str,
- const char* str, size_t length,
- MEM_ROOT *mem_root)
-{
- if (!(lex_str->str= strmake_root(mem_root, str, length)))
- return 0;
- lex_str->length= length;
- return lex_str;
-}
-
/*************************************************************************/
/**
@@ -517,8 +507,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
}
#ifdef WITH_WSREP
- if (thd->wsrep_exec_mode == LOCAL_STATE)
- WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
+ WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL);
#endif
/* We should have only one table in table list. */
@@ -622,9 +611,10 @@ end:
my_ok(thd);
DBUG_RETURN(result);
-
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
DBUG_RETURN(true);
+#endif
}
@@ -1502,8 +1492,8 @@ bool Table_triggers_list::check_n_load(THD *thd, const LEX_CSTRING *db,
if (likely((name= error_handler.get_trigger_name())))
{
- if (unlikely(!(make_lex_string(&trigger->name, name->str,
- name->length, &table->mem_root))))
+ trigger->name= safe_lexcstrdup_root(&table->mem_root, *name);
+ if (unlikely(!trigger->name.str))
goto err_with_lex_cleanup;
}
trigger->definer= ((!trg_definer || !trg_definer->length) ?
@@ -2312,12 +2302,10 @@ void Table_triggers_list::mark_fields_used(trg_event_type event)
if (trg_field->field_idx != (uint)-1)
{
DBUG_PRINT("info", ("marking field: %d", trg_field->field_idx));
- bitmap_set_bit(trigger_table->read_set, trg_field->field_idx);
if (trg_field->get_settable_routine_parameter())
bitmap_set_bit(trigger_table->write_set, trg_field->field_idx);
- if (trigger_table->field[trg_field->field_idx]->vcol_info)
- trigger_table->mark_virtual_col(trigger_table->
- field[trg_field->field_idx]);
+ trigger_table->mark_column_with_deps(
+ trigger_table->field[trg_field->field_idx]);
}
}
}
diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc
index bab9bb5e9ac..389276d0bcf 100644
--- a/sql/sql_truncate.cc
+++ b/sql/sql_truncate.cc
@@ -416,9 +416,11 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref)
{
bool hton_can_recreate;
+#ifdef WITH_WSREP
if (WSREP(thd) &&
wsrep_to_isolation_begin(thd, table_ref->db.str, table_ref->table_name.str, 0))
DBUG_RETURN(TRUE);
+#endif /* WITH_WSREP */
if (lock_table(thd, table_ref, &hton_can_recreate))
DBUG_RETURN(TRUE);
@@ -495,7 +497,7 @@ bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref)
bool Sql_cmd_truncate_table::execute(THD *thd)
{
bool res= TRUE;
- TABLE_LIST *table= thd->lex->select_lex.table_list.first;
+ TABLE_LIST *table= thd->lex->first_select_lex()->table_list.first;
DBUG_ENTER("Sql_cmd_truncate_table::execute");
if (check_one_table_access(thd, DROP_ACL, table))
diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc
index a05673f2a6c..f16d34e8041 100644
--- a/sql/sql_tvc.cc
+++ b/sql/sql_tvc.cc
@@ -173,7 +173,7 @@ bool get_type_attributes_for_tvc(THD *thd,
Item *item;
for (uint holder_pos= 0 ; (item= it++); holder_pos++)
{
- DBUG_ASSERT(item->fixed);
+ DBUG_ASSERT(item->is_fixed());
holders[holder_pos].add_argument(item);
}
}
@@ -249,7 +249,6 @@ bool table_value_constr::prepare(THD *thd, SELECT_LEX *sl,
Item_type_holder(thd, item, holders[pos].type_handler(),
&holders[pos]/*Type_all_attributes*/,
holders[pos].get_maybe_null());
- new_holder->fix_fields(thd, 0);
sl->item_list.push_back(new_holder);
}
if (arena)
@@ -296,7 +295,7 @@ int table_value_constr::save_explain_data_intern(THD *thd,
explain->select_id= select_lex->select_number;
explain->select_type= select_lex->type;
- explain->linkage= select_lex->linkage;
+ explain->linkage= select_lex->get_linkage();
explain->using_temporary= false;
explain->using_filesort= false;
/* Setting explain->message means that all other members are invalid */
@@ -564,7 +563,7 @@ bool Item_subselect::wrap_tvc_in_derived_table(THD *thd,
Item *item;
SELECT_LEX *sq_select; // select for IN subquery;
sq_select= lex->current_select;
- sq_select->linkage= tvc_sl->linkage;
+ sq_select->set_linkage(tvc_sl->get_linkage());
sq_select->parsing_place= SELECT_LIST;
item= new (thd->mem_root) Item_field(thd, &sq_select->context,
NULL, NULL, &star_clex_str);
@@ -583,7 +582,7 @@ bool Item_subselect::wrap_tvc_in_derived_table(THD *thd,
goto err;
tvc_select= lex->current_select;
derived_unit= tvc_select->master_unit();
- tvc_select->linkage= DERIVED_TABLE_TYPE;
+ tvc_select->set_linkage(DERIVED_TABLE_TYPE);
lex->current_select= sq_select;
@@ -710,7 +709,7 @@ Item *Item_func_in::in_predicate_to_in_subs_transformer(THD *thd,
mysql_init_select(lex);
tvc_select= lex->current_select;
derived_unit= tvc_select->master_unit();
- tvc_select->linkage= DERIVED_TABLE_TYPE;
+ tvc_select->set_linkage(DERIVED_TABLE_TYPE);
/* Create TVC used in the transformation */
if (create_value_list_for_tvc(thd, &values))
diff --git a/sql/sql_type.cc b/sql/sql_type.cc
index a40172967a7..94b0ab9a6c1 100644
--- a/sql/sql_type.cc
+++ b/sql/sql_type.cc
@@ -21,11 +21,13 @@
#include "sql_time.h"
#include "item.h"
#include "log.h"
+#include "tztime.h"
Type_handler_row type_handler_row;
Type_handler_null type_handler_null;
+Type_handler_bool type_handler_bool;
Type_handler_tiny type_handler_tiny;
Type_handler_short type_handler_short;
Type_handler_long type_handler_long;
@@ -41,6 +43,7 @@ Type_handler_olddecimal type_handler_olddecimal;
Type_handler_newdecimal type_handler_newdecimal;
Type_handler_year type_handler_year;
+Type_handler_year type_handler_year2;
Type_handler_time type_handler_time;
Type_handler_date type_handler_date;
Type_handler_timestamp type_handler_timestamp;
@@ -56,6 +59,7 @@ Type_handler_set type_handler_set;
Type_handler_string type_handler_string;
Type_handler_var_string type_handler_var_string;
Type_handler_varchar type_handler_varchar;
+Type_handler_hex_hybrid type_handler_hex_hybrid;
static Type_handler_varchar_compressed type_handler_varchar_compressed;
Type_handler_tiny_blob type_handler_tiny_blob;
@@ -64,6 +68,8 @@ Type_handler_long_blob type_handler_long_blob;
Type_handler_blob type_handler_blob;
static Type_handler_blob_compressed type_handler_blob_compressed;
+Type_handler_interval_DDhhmmssff type_handler_interval_DDhhmmssff;
+
#ifdef HAVE_SPATIAL
Type_handler_geometry type_handler_geometry;
#endif
@@ -91,6 +97,9 @@ bool Type_handler_data::init()
&type_handler_geometry,
&type_handler_geometry) ||
m_type_aggregator_for_result.add(&type_handler_geometry,
+ &type_handler_hex_hybrid,
+ &type_handler_long_blob) ||
+ m_type_aggregator_for_result.add(&type_handler_geometry,
&type_handler_tiny_blob,
&type_handler_long_blob) ||
m_type_aggregator_for_result.add(&type_handler_geometry,
@@ -125,18 +134,680 @@ bool Type_handler_data::init()
Type_handler_data *type_handler_data= NULL;
-void Time::make_from_item(Item *item, const Options opt)
+String_ptr::String_ptr(Item *item, String *buffer)
+ :m_string_ptr(item->val_str(buffer))
+{ }
+
+
+Ascii_ptr::Ascii_ptr(Item *item, String *buffer)
+ :String_ptr(item->val_str_ascii(buffer))
+{ }
+
+
+void VDec::set(Item *item)
+{
+ m_ptr= item->val_decimal(&m_buffer);
+ DBUG_ASSERT((m_ptr == NULL) == item->null_value);
+}
+
+
+VDec::VDec(Item *item)
+{
+ m_ptr= item->val_decimal(&m_buffer);
+ DBUG_ASSERT((m_ptr == NULL) == item->null_value);
+}
+
+
+VDec_op::VDec_op(Item_func_hybrid_field_type *item)
+{
+ m_ptr= item->decimal_op(&m_buffer);
+ DBUG_ASSERT((m_ptr == NULL) == item->null_value);
+}
+
+
+date_conv_mode_t Temporal::sql_mode_for_dates(THD *thd)
+{
+ return ::sql_mode_for_dates(thd);
+}
+
+
+time_round_mode_t Temporal::default_round_mode(THD *thd)
+{
+ return thd->temporal_round_mode();
+}
+
+
+time_round_mode_t Timestamp::default_round_mode(THD *thd)
+{
+ return thd->temporal_round_mode();
+}
+
+
+my_decimal *Temporal::to_decimal(my_decimal *to) const
+{
+ return date2my_decimal(this, to);
+}
+
+
+my_decimal *Temporal::bad_to_decimal(my_decimal *to) const
+{
+ my_decimal_set_zero(to);
+ return NULL;
+}
+
+
+void Temporal::make_from_str(THD *thd, Warn *warn,
+ const char *str, size_t length,
+ CHARSET_INFO *cs, date_mode_t fuzzydate)
+{
+ DBUG_EXECUTE_IF("str_to_datetime_warn",
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_YES, ErrConvString(str, length,cs).ptr()););
+
+ if (str_to_temporal(thd, warn, str, length, cs, fuzzydate))
+ make_fuzzy_date(&warn->warnings, date_conv_mode_t(fuzzydate));
+ if (warn->warnings)
+ warn->set_str(str, length, &my_charset_bin);
+}
+
+
+Temporal_hybrid::Temporal_hybrid(THD *thd, Item *item, date_mode_t fuzzydate)
+{
+ if (item->get_date(thd, this, fuzzydate))
+ time_type= MYSQL_TIMESTAMP_NONE;
+}
+
+
+uint Timestamp::binary_length_to_precision(uint length)
+{
+ switch (length) {
+ case 4: return 0;
+ case 5: return 2;
+ case 6: return 4;
+ case 7: return 6;
+ }
+ DBUG_ASSERT(0);
+ return 0;
+}
+
+
+Timestamp::Timestamp(const Native &native)
+{
+ DBUG_ASSERT(native.length() >= 4 && native.length() <= 7);
+ uint dec= binary_length_to_precision(native.length());
+ my_timestamp_from_binary(this, (const uchar *) native.ptr(), dec);
+}
+
+
+bool Timestamp::to_native(Native *to, uint decimals) const
+{
+ uint len= my_timestamp_binary_length(decimals);
+ if (to->reserve(len))
+ return true;
+ my_timestamp_to_binary(this, (uchar *) to->ptr(), decimals);
+ to->length(len);
+ return false;
+}
+
+
+bool Timestamp::to_TIME(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) const
+{
+ return thd->timestamp_to_TIME(to, tv_sec, tv_usec, fuzzydate);
+}
+
+
+Timestamp::Timestamp(THD *thd, const MYSQL_TIME *ltime, uint *error_code)
+ :Timeval(TIME_to_timestamp(thd, ltime, error_code), ltime->second_part)
+{ }
+
+
+Timestamp_or_zero_datetime::Timestamp_or_zero_datetime(THD *thd,
+ const MYSQL_TIME *ltime,
+ uint *error_code)
+ :Timestamp(thd, ltime, error_code),
+ m_is_zero_datetime(*error_code == ER_WARN_DATA_OUT_OF_RANGE)
+{
+ if (m_is_zero_datetime)
+ {
+ if (!non_zero_date(ltime))
+ *error_code= 0; // ltime was '0000-00-00 00:00:00'
+ }
+ else if (*error_code == ER_WARN_INVALID_TIMESTAMP)
+ *error_code= 0; // ltime fell into spring time gap, adjusted.
+}
+
+
+bool Timestamp_or_zero_datetime::to_TIME(THD *thd, MYSQL_TIME *to,
+ date_mode_t fuzzydate) const
+{
+ if (m_is_zero_datetime)
+ {
+ set_zero_time(to, MYSQL_TIMESTAMP_DATETIME);
+ return false;
+ }
+ return Timestamp::to_TIME(thd, to, fuzzydate);
+}
+
+
+bool Timestamp_or_zero_datetime::to_native(Native *to, uint decimals) const
+{
+ if (m_is_zero_datetime)
+ {
+ to->length(0);
+ return false;
+ }
+ return Timestamp::to_native(to, decimals);
+}
+
+
+int Timestamp_or_zero_datetime_native::save_in_field(Field *field,
+ uint decimals) const
+{
+ field->set_notnull();
+ if (field->type_handler()->type_handler_for_native_format() ==
+ &type_handler_timestamp2)
+ return field->store_native(*this);
+ if (is_zero_datetime())
+ {
+ static Datetime zero(Datetime::zero());
+ return field->store_time_dec(zero.get_mysql_time(), decimals);
+ }
+ return field->store_timestamp_dec(Timestamp(*this).tv(), decimals);
+}
+
+
+void Sec6::make_from_decimal(const my_decimal *d, ulong *nanoseconds)
+{
+ m_neg= my_decimal2seconds(d, &m_sec, &m_usec, nanoseconds);
+ m_truncated= (m_sec >= LONGLONG_MAX);
+}
+
+
+void Sec6::make_from_double(double nr, ulong *nanoseconds)
+{
+ if ((m_neg= nr < 0))
+ nr= -nr;
+ if ((m_truncated= nr > (double) LONGLONG_MAX))
+ {
+ m_sec= LONGLONG_MAX;
+ m_usec= 0;
+ *nanoseconds= 0;
+ }
+ else
+ {
+ m_sec= (ulonglong) nr;
+ m_usec= (ulong) ((nr - floor(nr)) * 1000000000);
+ *nanoseconds= m_usec % 1000;
+ m_usec/= 1000;
+ }
+}
+
+
+void Sec6::make_truncated_warning(THD *thd, const char *type_str) const
+{
+ char buff[1 + MAX_BIGINT_WIDTH + 1 + 6 + 1]; // '-' int '.' frac '\0'
+ to_string(buff, sizeof(buff));
+ thd->push_warning_truncated_wrong_value(type_str, buff);
+}
+
+
+bool Sec6::convert_to_mysql_time(THD *thd, int *warn, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
+{
+ bool rc= fuzzydate & (TIME_INTERVAL_hhmmssff | TIME_INTERVAL_DAY) ?
+ to_datetime_or_to_interval_hhmmssff(ltime, warn) :
+ fuzzydate & TIME_TIME_ONLY ?
+ to_datetime_or_time(ltime, warn, date_conv_mode_t(fuzzydate)) :
+ to_datetime_or_date(ltime, warn, date_conv_mode_t(fuzzydate));
+ DBUG_ASSERT(*warn || !rc);
+ if (truncated())
+ *warn|= MYSQL_TIME_WARN_TRUNCATED;
+ return rc;
+}
+
+
+void Temporal::push_conversion_warnings(THD *thd, bool totally_useless_value,
+ int warn,
+ const char *typestr,
+ const TABLE_SHARE *s,
+ const char *field_name,
+ const char *value)
+{
+ if (MYSQL_TIME_WARN_HAVE_WARNINGS(warn))
+ thd->push_warning_wrong_or_truncated_value(Sql_condition::WARN_LEVEL_WARN,
+ totally_useless_value,
+ typestr, value, s, field_name);
+ else if (MYSQL_TIME_WARN_HAVE_NOTES(warn))
+ thd->push_warning_wrong_or_truncated_value(Sql_condition::WARN_LEVEL_NOTE,
+ false, typestr, value, s,
+ field_name);
+}
+
+
+VSec9::VSec9(THD *thd, Item *item, const char *type_str, ulonglong limit)
+{
+ if (item->decimals == 0)
+ { // optimize for an important special case
+ Longlong_hybrid nr(item->val_int(), item->unsigned_flag);
+ make_from_int(nr);
+ m_is_null= item->null_value;
+ if (!m_is_null && m_sec > limit)
+ {
+ m_sec= limit;
+ m_truncated= true;
+ ErrConvInteger err(nr);
+ thd->push_warning_truncated_wrong_value(type_str, err.ptr());
+ }
+ }
+ else if (item->cmp_type() == REAL_RESULT)
+ {
+ double nr= item->val_real();
+ make_from_double(nr, &m_nsec);
+ m_is_null= item->null_value;
+ if (!m_is_null && m_sec > limit)
+ {
+ m_sec= limit;
+ m_truncated= true;
+ }
+ if (m_truncated)
+ {
+ ErrConvDouble err(nr);
+ thd->push_warning_truncated_wrong_value(type_str, err.ptr());
+ }
+ }
+ else
+ {
+ VDec tmp(item);
+ (m_is_null= tmp.is_null()) ? reset() : make_from_decimal(tmp.ptr(), &m_nsec);
+ if (!m_is_null && m_sec > limit)
+ {
+ m_sec= limit;
+ m_truncated= true;
+ }
+ if (m_truncated)
+ {
+ ErrConvDecimal err(tmp.ptr());
+ thd->push_warning_truncated_wrong_value(type_str, err.ptr());
+ }
+ }
+}
+
+
+Year::Year(longlong value, bool unsigned_flag, uint length)
+{
+ if ((m_truncated= (value < 0))) // Negative or huge unsigned
+ m_year= unsigned_flag ? 9999 : 0;
+ else if (value > 9999)
+ {
+ m_truncated= true;
+ m_year= 9999;
+ }
+ else if (length == 2)
+ {
+ m_year= value < 70 ? (uint) value + 2000 :
+ value <= 1900 ? (uint) value + 1900 :
+ (uint) value;
+ }
+ else
+ m_year= (uint) value;
+ DBUG_ASSERT(m_year <= 9999);
+}
+
+
+uint Year::year_precision(const Item *item) const
+{
+ return item->type_handler() == &type_handler_year2 ? 2 : 4;
+}
+
+
+VYear::VYear(Item *item)
+ :Year_null(item->to_longlong_null(), item->unsigned_flag, year_precision(item))
+{ }
+
+
+VYear_op::VYear_op(Item_func_hybrid_field_type *item)
+ :Year_null(item->to_longlong_null_op(), item->unsigned_flag,
+ year_precision(item))
+{ }
+
+
+const LEX_CSTRING Interval_DDhhmmssff::m_type_name=
+ {STRING_WITH_LEN("INTERVAL DAY TO SECOND")};
+
+
+Interval_DDhhmmssff::Interval_DDhhmmssff(THD *thd, Status *st,
+ bool push_warnings,
+ Item *item, ulong max_hour,
+ time_round_mode_t mode, uint dec)
{
- if (item->get_date(this, opt.get_date_flags()))
+ switch (item->cmp_type()) {
+ case ROW_RESULT:
+ DBUG_ASSERT(0);
+ time_type= MYSQL_TIMESTAMP_NONE;
+ break;
+ case TIME_RESULT:
+ {
+ // Rounding mode is not important here
+ if (item->get_date(thd, this, Options(TIME_TIME_ONLY, TIME_FRAC_NONE)))
+ time_type= MYSQL_TIMESTAMP_NONE;
+ else if (time_type != MYSQL_TIMESTAMP_TIME)
+ {
+ st->warnings|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ push_warning_wrong_or_truncated_value(thd, ErrConvTime(this),
+ st->warnings);
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+ break;
+ }
+ case INT_RESULT:
+ case REAL_RESULT:
+ case DECIMAL_RESULT:
+ case STRING_RESULT:
+ {
+ StringBuffer<STRING_BUFFER_USUAL_SIZE> tmp;
+ String *str= item->val_str(&tmp);
+ if (!str)
+ time_type= MYSQL_TIMESTAMP_NONE;
+ else if (str_to_DDhhmmssff(st, str->ptr(), str->length(), str->charset(),
+ UINT_MAX32))
+ {
+ if (push_warnings)
+ thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN,
+ m_type_name.str,
+ ErrConvString(str).ptr());
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+ else
+ {
+ if (mode == TIME_FRAC_ROUND)
+ time_round_or_set_max(dec, &st->warnings, max_hour, st->nanoseconds);
+ if (hour > max_hour)
+ {
+ st->warnings|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+ // Warn if hour or nanosecond truncation happened
+ if (push_warnings)
+ push_warning_wrong_or_truncated_value(thd, ErrConvString(str),
+ st->warnings);
+ }
+ }
+ break;
+ }
+ DBUG_ASSERT(is_valid_value_slow());
+}
+
+
+void
+Interval_DDhhmmssff::push_warning_wrong_or_truncated_value(THD *thd,
+ const ErrConv &str,
+ int warnings)
+{
+ if (warnings & MYSQL_TIME_WARN_OUT_OF_RANGE)
+ {
+ thd->push_warning_wrong_value(Sql_condition::WARN_LEVEL_WARN,
+ m_type_name.str, str.ptr());
+ }
+ else if (MYSQL_TIME_WARN_HAVE_WARNINGS(warnings))
+ {
+ thd->push_warning_truncated_wrong_value(Sql_condition::WARN_LEVEL_WARN,
+ m_type_name.str, str.ptr());
+ }
+ else if (MYSQL_TIME_WARN_HAVE_NOTES(warnings))
+ {
+ thd->push_warning_truncated_wrong_value(Sql_condition::WARN_LEVEL_NOTE,
+ m_type_name.str, str.ptr());
+ }
+}
+
+
+uint Interval_DDhhmmssff::fsp(THD *thd, Item *item)
+{
+ switch (item->cmp_type()) {
+ case INT_RESULT:
+ case TIME_RESULT:
+ return item->decimals;
+ case REAL_RESULT:
+ case DECIMAL_RESULT:
+ return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
+ case ROW_RESULT:
+ DBUG_ASSERT(0);
+ return 0;
+ case STRING_RESULT:
+ break;
+ }
+ if (!item->const_item() || item->is_expensive())
+ return TIME_SECOND_PART_DIGITS;
+ Status st;
+ Interval_DDhhmmssff it(thd, &st, false/*no warnings*/, item, UINT_MAX32,
+ TIME_FRAC_TRUNCATE, TIME_SECOND_PART_DIGITS);
+ return it.is_valid_interval_DDhhmmssff() ? st.precision :
+ TIME_SECOND_PART_DIGITS;
+}
+
+
+void Time::make_from_item(THD *thd, int *warn, Item *item, const Options opt)
+{
+ *warn= 0;
+ if (item->get_date(thd, this, opt))
time_type= MYSQL_TIMESTAMP_NONE;
else
- valid_MYSQL_TIME_to_valid_value(opt);
+ valid_MYSQL_TIME_to_valid_value(thd, warn, opt);
+}
+
+
+static uint msec_round_add[7]=
+{
+ 500000000,
+ 50000000,
+ 5000000,
+ 500000,
+ 50000,
+ 5000,
+ 0
+};
+
+
+Sec9 & Sec9::round(uint dec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ if (Sec6::add_nanoseconds(m_nsec + msec_round_add[dec]))
+ m_sec++;
+ m_nsec= 0;
+ Sec6::trunc(dec);
+ return *this;
+}
+
+
+void Timestamp::round_or_set_max(uint dec, int *warn)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ if (add_nanoseconds_usec(msec_round_add[dec]) &&
+ tv_sec++ >= TIMESTAMP_MAX_VALUE)
+ {
+ tv_sec= TIMESTAMP_MAX_VALUE;
+ tv_usec= TIME_MAX_SECOND_PART;
+ *warn|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ }
+ my_timeval_trunc(this, dec);
+}
+
+
+bool Temporal::add_nanoseconds_with_round(THD *thd, int *warn,
+ date_conv_mode_t mode,
+ ulong nsec)
+{
+ switch (time_type) {
+ case MYSQL_TIMESTAMP_TIME:
+ {
+ ulong max_hour= (mode & (TIME_INTERVAL_DAY | TIME_INTERVAL_hhmmssff)) ?
+ TIME_MAX_INTERVAL_HOUR : TIME_MAX_HOUR;
+ time_round_or_set_max(6, warn, max_hour, nsec);
+ return false;
+ }
+ case MYSQL_TIMESTAMP_DATETIME:
+ return datetime_round_or_invalidate(thd, 6, warn, nsec);
+ case MYSQL_TIMESTAMP_DATE:
+ return false;
+ case MYSQL_TIMESTAMP_NONE:
+ return false;
+ case MYSQL_TIMESTAMP_ERROR:
+ break;
+ }
+ DBUG_ASSERT(0);
+ return false;
}
-void Temporal_with_date::make_from_item(THD *thd, Item *item, sql_mode_t flags)
+void Temporal::time_round_or_set_max(uint dec, int *warn,
+ ulong max_hour, ulong nsec)
{
- flags&= ~TIME_TIME_ONLY;
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ if (add_nanoseconds_mmssff(nsec) && ++hour > max_hour)
+ {
+ time_hhmmssff_set_max(max_hour);
+ *warn|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ }
+ my_time_trunc(this, dec);
+}
+
+
+void Time::round_or_set_max(uint dec, int *warn, ulong nsec)
+{
+ Temporal::time_round_or_set_max(dec, warn, TIME_MAX_HOUR, nsec);
+ DBUG_ASSERT(is_valid_time_slow());
+}
+
+
+void Time::round_or_set_max(uint dec, int *warn)
+{
+ round_or_set_max(dec, warn, msec_round_add[dec]);
+}
+
+/**
+ Create from a DATETIME by subtracting a given number of days,
+ implementing an optimized version of calc_time_diff().
+*/
+void Time::make_from_datetime_with_days_diff(int *warn, const MYSQL_TIME *from,
+ long days)
+{
+ *warn= 0;
+ DBUG_ASSERT(from->time_type == MYSQL_TIMESTAMP_DATETIME ||
+ from->time_type == MYSQL_TIMESTAMP_DATE);
+ long daynr= calc_daynr(from->year, from->month, from->day);
+ long daydiff= daynr - days;
+ if (!daynr) // Zero date
+ {
+ set_zero_time(this, MYSQL_TIMESTAMP_TIME);
+ neg= true;
+ hour= TIME_MAX_HOUR + 1; // to report "out of range" in "warn"
+ }
+ else if (daydiff >=0)
+ {
+ neg= false;
+ year= month= day= 0;
+ hhmmssff_copy(from);
+ hour+= daydiff * 24;
+ time_type= MYSQL_TIMESTAMP_TIME;
+ }
+ else
+ {
+ longlong timediff= ((((daydiff * 24LL +
+ from->hour) * 60LL +
+ from->minute) * 60LL +
+ from->second) * 1000000LL +
+ from->second_part);
+ unpack_time(timediff, this, MYSQL_TIMESTAMP_TIME);
+ if (year || month)
+ {
+ *warn|= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ year= month= day= 0;
+ hour= TIME_MAX_HOUR + 1;
+ }
+ }
+ // The above code can generate TIME values outside of the valid TIME range.
+ adjust_time_range_or_invalidate(warn);
+}
+
+
+void Time::make_from_datetime_move_day_to_hour(int *warn,
+ const MYSQL_TIME *from)
+{
+ *warn= 0;
+ DBUG_ASSERT(from->time_type == MYSQL_TIMESTAMP_DATE ||
+ from->time_type == MYSQL_TIMESTAMP_DATETIME);
+ time_type= MYSQL_TIMESTAMP_TIME;
+ neg= false;
+ year= month= day= 0;
+ hhmmssff_copy(from);
+ datetime_to_time_YYYYMMDD_000000DD_mix_to_hours(warn, from->year,
+ from->month, from->day);
+ adjust_time_range_or_invalidate(warn);
+}
+
+
+void Time::make_from_datetime(int *warn, const MYSQL_TIME *from, long curdays)
+{
+ if (!curdays)
+ make_from_datetime_move_day_to_hour(warn, from);
+ else
+ make_from_datetime_with_days_diff(warn, from, curdays);
+}
+
+
+void Time::make_from_time(int *warn, const MYSQL_TIME *from)
+{
+ DBUG_ASSERT(from->time_type == MYSQL_TIMESTAMP_TIME);
+ if (from->year || from->month)
+ make_from_out_of_range(warn);
+ else
+ {
+ *warn= 0;
+ DBUG_ASSERT(from->day == 0);
+ *(static_cast<MYSQL_TIME*>(this))= *from;
+ adjust_time_range_or_invalidate(warn);
+ }
+}
+
+
+Time::Time(int *warn, const MYSQL_TIME *from, long curdays)
+{
+ switch (from->time_type) {
+ case MYSQL_TIMESTAMP_NONE:
+ case MYSQL_TIMESTAMP_ERROR:
+ make_from_out_of_range(warn);
+ break;
+ case MYSQL_TIMESTAMP_DATE:
+ case MYSQL_TIMESTAMP_DATETIME:
+ make_from_datetime(warn, from, curdays);
+ break;
+ case MYSQL_TIMESTAMP_TIME:
+ make_from_time(warn, from);
+ break;
+ }
+ DBUG_ASSERT(is_valid_value_slow());
+}
+
+
+Time::Time(int *warn, bool neg, ulonglong hour, uint minute, const Sec6 &second)
+{
+ DBUG_ASSERT(second.sec() <= 59);
+ *warn= 0;
+ set_zero_time(this, MYSQL_TIMESTAMP_TIME);
+ MYSQL_TIME::neg= neg;
+ MYSQL_TIME::hour= hour > TIME_MAX_HOUR ? (uint) (TIME_MAX_HOUR + 1) :
+ (uint) hour;
+ MYSQL_TIME::minute= minute;
+ MYSQL_TIME::second= (uint) second.sec();
+ MYSQL_TIME::second_part= second.usec();
+ adjust_time_range_or_invalidate(warn);
+}
+
+
+void Temporal_with_date::make_from_item(THD *thd, Item *item,
+ date_mode_t fuzzydate)
+{
+ date_conv_mode_t flags= date_conv_mode_t(fuzzydate) & ~TIME_TIME_ONLY;
/*
Some TIME type items return error when trying to do get_date()
without TIME_TIME_ONLY set (e.g. Item_field for Field_time).
@@ -144,10 +815,11 @@ void Temporal_with_date::make_from_item(THD *thd, Item *item, sql_mode_t flags)
In the legacy time->datetime conversion mode we do not add TIME_TIME_ONLY
and leave it to get_date() to check date.
*/
- ulonglong time_flag= (item->field_type() == MYSQL_TYPE_TIME &&
- !(thd->variables.old_behavior & OLD_MODE_ZERO_DATE_TIME_CAST)) ?
- TIME_TIME_ONLY : 0;
- if (item->get_date(this, flags | time_flag))
+ date_conv_mode_t time_flag= (item->field_type() == MYSQL_TYPE_TIME &&
+ !(thd->variables.old_behavior & OLD_MODE_ZERO_DATE_TIME_CAST)) ?
+ TIME_TIME_ONLY : TIME_CONV_NONE;
+ Options opt(flags | time_flag, time_round_mode_t(fuzzydate));
+ if (item->get_date(thd, this, opt))
time_type= MYSQL_TIMESTAMP_NONE;
else if (time_type == MYSQL_TIMESTAMP_TIME)
{
@@ -160,6 +832,154 @@ void Temporal_with_date::make_from_item(THD *thd, Item *item, sql_mode_t flags)
}
+void Temporal_with_date::check_date_or_invalidate(int *warn,
+ date_conv_mode_t flags)
+{
+ if (::check_date(this, pack_time(this) != 0,
+ ulonglong(flags & TIME_MODE_FOR_XXX_TO_DATE), warn))
+ time_type= MYSQL_TIMESTAMP_NONE;
+}
+
+
+void Datetime::make_from_time(THD *thd, int *warn, const MYSQL_TIME *from,
+ date_conv_mode_t flags)
+{
+ DBUG_ASSERT(from->time_type == MYSQL_TIMESTAMP_TIME);
+ if (time_to_datetime(thd, from, this))
+ make_from_out_of_range(warn);
+ else
+ {
+ *warn= 0;
+ check_date_or_invalidate(warn, flags);
+ }
+}
+
+
+void Datetime::make_from_datetime(THD *thd, int *warn, const MYSQL_TIME *from,
+ date_conv_mode_t flags)
+{
+ DBUG_ASSERT(from->time_type == MYSQL_TIMESTAMP_DATE ||
+ from->time_type == MYSQL_TIMESTAMP_DATETIME);
+ if (from->neg || check_datetime_range(from))
+ make_from_out_of_range(warn);
+ else
+ {
+ *warn= 0;
+ *(static_cast<MYSQL_TIME*>(this))= *from;
+ date_to_datetime(this);
+ check_date_or_invalidate(warn, flags);
+ }
+}
+
+
+Datetime::Datetime(THD *thd, const timeval &tv)
+{
+ thd->variables.time_zone->gmt_sec_to_TIME(this, tv.tv_sec);
+ second_part= tv.tv_usec;
+ thd->time_zone_used= 1;
+ DBUG_ASSERT(is_valid_value_slow());
+}
+
+
+Datetime::Datetime(THD *thd, int *warn, const MYSQL_TIME *from,
+ date_conv_mode_t flags)
+{
+ DBUG_ASSERT(bool(flags & TIME_TIME_ONLY) == false);
+ switch (from->time_type) {
+ case MYSQL_TIMESTAMP_ERROR:
+ case MYSQL_TIMESTAMP_NONE:
+ make_from_out_of_range(warn);
+ break;
+ case MYSQL_TIMESTAMP_TIME:
+ make_from_time(thd, warn, from, flags);
+ break;
+ case MYSQL_TIMESTAMP_DATETIME:
+ case MYSQL_TIMESTAMP_DATE:
+ make_from_datetime(thd, warn, from, flags);
+ break;
+ }
+ DBUG_ASSERT(is_valid_value_slow());
+}
+
+
+bool Temporal::datetime_add_nanoseconds_or_invalidate(THD *thd, int *warn, ulong nsec)
+{
+ if (!add_nanoseconds_mmssff(nsec))
+ return false;
+ /*
+ Overflow happened on minutes. Now we need to add 1 hour to the value.
+ Catch a special case for the maximum possible date and hour==23, to
+ truncate '9999-12-31 23:59:59.9999999' (with 7 fractional digits)
+ to '9999-12-31 23:59:59.999999' (with 6 fractional digits),
+ with a warning, instead of returning an error, so this statement:
+ INSERT INTO (datetime_column) VALUES ('9999-12-31 23:59:59.9999999');
+ inserts a value truncated to 6 fractional digits, instead of zero
+ date '0000-00-00 00:00:00.000000'.
+ */
+ if (year == 9999 && month == 12 && day == 31 && hour == 23)
+ {
+ minute= 59;
+ second= 59;
+ second_part= 999999;
+ *warn= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ return false;
+ }
+ INTERVAL interval;
+ memset(&interval, 0, sizeof(interval));
+ interval.hour= 1;
+ /* date_add_interval cannot handle bad dates */
+ if (check_date(TIME_NO_ZERO_IN_DATE | TIME_NO_ZERO_DATE, warn) ||
+ date_add_interval(thd, this, INTERVAL_HOUR, interval))
+ {
+ make_from_out_of_range(warn);
+ return true;
+ }
+ return false;
+}
+
+
+bool Temporal::datetime_round_or_invalidate(THD *thd, uint dec, int *warn, ulong nsec)
+{
+ DBUG_ASSERT(dec <= TIME_SECOND_PART_DIGITS);
+ if (datetime_add_nanoseconds_or_invalidate(thd, warn, nsec))
+ return true;
+ my_time_trunc(this, dec);
+ return false;
+
+}
+
+
+bool Datetime::round_or_invalidate(THD *thd, uint dec, int *warn)
+{
+ return round_or_invalidate(thd, dec, warn, msec_round_add[dec]);
+}
+
+
+Datetime_from_temporal::Datetime_from_temporal(THD *thd, Item *temporal,
+ date_conv_mode_t fuzzydate)
+ :Datetime(thd, temporal, Options(fuzzydate, TIME_FRAC_NONE))
+{
+ // Exact rounding mode does not matter
+ DBUG_ASSERT(temporal->cmp_type() == TIME_RESULT);
+}
+
+
+Datetime_truncation_not_needed::Datetime_truncation_not_needed(THD *thd, Item *item,
+ date_conv_mode_t mode)
+ :Datetime(thd, item, Options(mode, TIME_FRAC_NONE))
+{
+ /*
+ The called Datetime() constructor only would truncate nanoseconds if they
+ existed (but we know there were no nanoseconds). Here we assert that there
+ are also no microsecond digits outside of the scale specified in "dec".
+ */
+ DBUG_ASSERT(!is_valid_datetime() ||
+ fraction_remainder(MY_MIN(item->decimals,
+ TIME_SECOND_PART_DIGITS)) == 0);
+}
+
+/********************************************************************/
+
uint Type_std_attributes::count_max_decimals(Item **item, uint nitems)
{
uint res= 0;
@@ -280,6 +1100,32 @@ bool Type_std_attributes::count_string_length(const char *func_name,
}
+/*
+ Find a handler by its ODBC literal data type.
+
+ @param type_str - data type name, not necessarily 0-terminated
+ @retval - a pointer to data type handler if type_str points
+ to a known ODBC literal data type, or NULL otherwise
+*/
+const Type_handler *
+Type_handler::odbc_literal_type_handler(const LEX_CSTRING *type_str)
+{
+ if (type_str->length == 1)
+ {
+ if (type_str->str[0] == 'd') // {d'2001-01-01'}
+ return &type_handler_newdate;
+ else if (type_str->str[0] == 't') // {t'10:20:30'}
+ return &type_handler_time2;
+ }
+ else if (type_str->length == 2) // {ts'2001-01-01 10:20:30'}
+ {
+ if (type_str->str[0] == 't' && type_str->str[1] == 's')
+ return &type_handler_datetime2;
+ }
+ return NULL; // Not a known ODBC literal type
+}
+
+
/**
This method is used by:
- Item_user_var_as_out_param::field_type()
@@ -433,6 +1279,7 @@ const Name
Type_handler_string::m_name_char(STRING_WITH_LEN("char")),
Type_handler_var_string::m_name_var_string(STRING_WITH_LEN("varchar")),
Type_handler_varchar::m_name_varchar(STRING_WITH_LEN("varchar")),
+ Type_handler_hex_hybrid::m_name_hex_hybrid(STRING_WITH_LEN("hex_hybrid")),
Type_handler_tiny_blob::m_name_tinyblob(STRING_WITH_LEN("tinyblob")),
Type_handler_medium_blob::m_name_mediumblob(STRING_WITH_LEN("mediumblob")),
Type_handler_long_blob::m_name_longblob(STRING_WITH_LEN("longblob")),
@@ -443,6 +1290,7 @@ const Name
Type_handler_set::m_name_set(STRING_WITH_LEN("set"));
const Name
+ Type_handler_bool::m_name_bool(STRING_WITH_LEN("boolean")),
Type_handler_tiny::m_name_tiny(STRING_WITH_LEN("tinyint")),
Type_handler_short::m_name_short(STRING_WITH_LEN("smallint")),
Type_handler_long::m_name_int(STRING_WITH_LEN("int")),
@@ -465,6 +1313,11 @@ const Name
Type_handler_datetime_common::m_name_datetime(STRING_WITH_LEN("datetime")),
Type_handler_timestamp_common::m_name_timestamp(STRING_WITH_LEN("timestamp"));
+const Name
+ Type_handler::m_version_default(STRING_WITH_LEN("")),
+ Type_handler::m_version_mariadb53(STRING_WITH_LEN("mariadb-5.3")),
+ Type_handler::m_version_mysql56(STRING_WITH_LEN("mysql-5.6"));
+
const Type_limits_int
Type_handler_tiny::m_limits_sint8= Type_limits_sint8(),
@@ -530,7 +1383,7 @@ const Type_handler *Type_handler_datetime_common::type_handler_for_comparison()
const Type_handler *Type_handler_timestamp_common::type_handler_for_comparison() const
{
- return &type_handler_datetime;
+ return &type_handler_timestamp;
}
@@ -541,6 +1394,15 @@ const Type_handler *Type_handler_row::type_handler_for_comparison() const
/***************************************************************************/
+const Type_handler *
+Type_handler_timestamp_common::type_handler_for_native_format() const
+{
+ return &type_handler_timestamp2;
+}
+
+
+/***************************************************************************/
+
const Type_handler *Type_handler_typelib::type_handler_for_item_field() const
{
return &type_handler_string;
@@ -715,6 +1577,16 @@ Type_handler_hybrid_field_type::aggregate_for_comparison(const Type_handler *h)
*/
if (b == TIME_RESULT)
m_type_handler= h; // Temporal types bit non-temporal types
+ /*
+ Compare TIMESTAMP to a non-temporal type as DATETIME.
+ This is needed to make queries with fuzzy dates work:
+ SELECT * FROM t1
+ WHERE
+ ts BETWEEN '0000-00-00' AND '2010-00-01 00:00:00';
+ */
+ if (m_type_handler->type_handler_for_native_format() ==
+ &type_handler_timestamp2)
+ m_type_handler= &type_handler_datetime;
}
else
{
@@ -798,7 +1670,19 @@ Type_handler_hybrid_field_type::aggregate_for_min_max(const Type_handler *h)
}
else if (a == TIME_RESULT || b == TIME_RESULT)
{
- if ((a == TIME_RESULT) + (b == TIME_RESULT) == 1)
+ if ((m_type_handler->type_handler_for_native_format() ==
+ &type_handler_timestamp2) +
+ (h->type_handler_for_native_format() ==
+ &type_handler_timestamp2) == 1)
+ {
+ /*
+ Handle LEAST(TIMESTAMP, non-TIMESTAMP) as DATETIME,
+ to make sure fuzzy dates work in this context:
+ LEAST('2001-00-00', timestamp_field)
+ */
+ m_type_handler= &type_handler_datetime2;
+ }
+ else if ((a == TIME_RESULT) + (b == TIME_RESULT) == 1)
{
/*
We're here if there's only one temporal data type:
@@ -1443,6 +2327,17 @@ Field *Type_handler_set::make_conversion_table_field(TABLE *table,
((const Field_enum*) target)->typelib, target->charset());
}
+
+/*************************************************************************/
+
+bool Type_handler::
+ Column_definition_validate_check_constraint(THD *thd,
+ Column_definition * c) const
+{
+ return c->validate_check_constraint(thd);
+}
+
+
/*************************************************************************/
bool Type_handler_null::
Column_definition_fix_attributes(Column_definition *def) const
@@ -1604,6 +2499,70 @@ bool Type_handler_bit::
/*************************************************************************/
+void Type_handler_blob_common::
+ Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *def,
+ const Field *field) const
+{
+ DBUG_ASSERT(def->key_length == 0);
+}
+
+
+void Type_handler_typelib::
+ Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *def,
+ const Field *field) const
+{
+ DBUG_ASSERT(def->flags & (ENUM_FLAG | SET_FLAG));
+ def->interval= field->get_typelib();
+}
+
+
+#ifdef HAVE_SPATIAL
+void Type_handler_geometry::
+ Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *def,
+ const Field *field) const
+{
+ def->geom_type= ((Field_geom*) field)->geom_type;
+ def->srid= ((Field_geom*) field)->srid;
+}
+#endif
+
+
+void Type_handler_year::
+ Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *def,
+ const Field *field) const
+{
+ if (def->length != 4)
+ {
+ char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1];
+ my_snprintf(buff, sizeof(buff), "YEAR(%llu)", def->length);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_WARN_DEPRECATED_SYNTAX,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX),
+ buff, "YEAR(4)");
+ }
+}
+
+
+void Type_handler_real_result::
+ Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *def,
+ const Field *field) const
+{
+ /*
+ Floating points are stored with FLOATING_POINT_DECIMALS but internally
+ in MariaDB used with NOT_FIXED_DEC, which is >= FLOATING_POINT_DECIMALS.
+ */
+ if (def->decimals >= FLOATING_POINT_DECIMALS)
+ def->decimals= NOT_FIXED_DEC;
+}
+
+
+/*************************************************************************/
+
bool Type_handler::
Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
@@ -2007,8 +2966,8 @@ Field *Type_handler_tiny::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_tiny(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_tiny(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name, 0/*zerofill*/, attr.unsigned_flag);
}
@@ -2020,8 +2979,8 @@ Field *Type_handler_short::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_short(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_short(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name, 0/*zerofill*/, attr.unsigned_flag);
}
@@ -2032,8 +2991,8 @@ Field *Type_handler_int24::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_medium(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_medium(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
0/*zerofill*/, attr.unsigned_flag);
}
@@ -2045,8 +3004,8 @@ Field *Type_handler_long::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_long(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_long(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name, 0/*zerofill*/, attr.unsigned_flag);
}
@@ -2057,8 +3016,8 @@ Field *Type_handler_longlong::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_longlong(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_longlong(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
0/*zerofill*/, attr.unsigned_flag);
}
@@ -2070,8 +3029,8 @@ Field *Type_handler_vers_trx_id::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_vers_trx_id(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_vers_trx_id(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
0/*zerofill*/, attr.unsigned_flag);
}
@@ -2083,8 +3042,8 @@ Field *Type_handler_float::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_float(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_float(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
(uint8) attr.decimals, 0/*zerofill*/, attr.unsigned_flag);
}
@@ -2096,8 +3055,8 @@ Field *Type_handler_double::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_double(addr.ptr, attr.max_char_length(),
- addr.null_ptr, addr.null_bit,
+ Field_double(addr.ptr(), attr.max_char_length(),
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
(uint8) attr.decimals, 0/*zerofill*/, attr.unsigned_flag);
}
@@ -2118,7 +3077,8 @@ Type_handler_olddecimal::make_table_field(const LEX_CSTRING *name,
*/
DBUG_ASSERT(0);
return new (table->in_use->mem_root)
- Field_decimal(addr.ptr, attr.max_length, addr.null_ptr, addr.null_bit,
+ Field_decimal(addr.ptr(), attr.max_length,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name, (uint8) attr.decimals,
0/*zerofill*/,attr.unsigned_flag);
}
@@ -2165,7 +3125,7 @@ Type_handler_newdecimal::make_table_field(const LEX_CSTRING *name,
len= required_length;
}
return new (table->in_use->mem_root)
- Field_new_decimal(addr.ptr, len, addr.null_ptr, addr.null_bit,
+ Field_new_decimal(addr.ptr(), len, addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
dec, 0/*zerofill*/, attr.unsigned_flag);
}
@@ -2177,7 +3137,8 @@ Field *Type_handler_year::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_year(addr.ptr, attr.max_length, addr.null_ptr, addr.null_bit,
+ Field_year(addr.ptr(), attr.max_length,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name);
}
@@ -2189,7 +3150,7 @@ Field *Type_handler_null::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_null(addr.ptr, attr.max_length,
+ Field_null(addr.ptr(), attr.max_length,
Field::NONE, name, attr.collation.collation);
}
@@ -2201,7 +3162,7 @@ Field *Type_handler_timestamp::make_table_field(const LEX_CSTRING *name,
{
return new_Field_timestamp(table->in_use->mem_root,
- addr.ptr, addr.null_ptr, addr.null_bit,
+ addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s, attr.decimals);
}
@@ -2217,7 +3178,7 @@ Field *Type_handler_timestamp2::make_table_field(const LEX_CSTRING *name,
make_table_field() for make_field() purposes in field.cc.
*/
return new_Field_timestamp(table->in_use->mem_root,
- addr.ptr, addr.null_ptr, addr.null_bit,
+ addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s, attr.decimals);
}
@@ -2229,7 +3190,7 @@ Field *Type_handler_newdate::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_newdate(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_newdate(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name);
}
@@ -2246,7 +3207,7 @@ Field *Type_handler_date::make_table_field(const LEX_CSTRING *name,
*/
DBUG_ASSERT(0);
return new (table->in_use->mem_root)
- Field_date(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_date(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name);
}
@@ -2258,7 +3219,7 @@ Field *Type_handler_time::make_table_field(const LEX_CSTRING *name,
{
return new_Field_time(table->in_use->mem_root,
- addr.ptr, addr.null_ptr, addr.null_bit,
+ addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, attr.decimals);
}
@@ -2275,7 +3236,7 @@ Field *Type_handler_time2::make_table_field(const LEX_CSTRING *name,
make_table_field() for make_field() purposes in field.cc.
*/
return new_Field_time(table->in_use->mem_root,
- addr.ptr, addr.null_ptr, addr.null_bit,
+ addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, attr.decimals);
}
@@ -2287,7 +3248,7 @@ Field *Type_handler_datetime::make_table_field(const LEX_CSTRING *name,
{
return new_Field_datetime(table->in_use->mem_root,
- addr.ptr, addr.null_ptr, addr.null_bit,
+ addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, attr.decimals);
}
@@ -2302,7 +3263,7 @@ Field *Type_handler_datetime2::make_table_field(const LEX_CSTRING *name,
make_table_field() for make_field() purposes in field.cc.
*/
return new_Field_datetime(table->in_use->mem_root,
- addr.ptr, addr.null_ptr, addr.null_bit,
+ addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, attr.decimals);
}
@@ -2314,8 +3275,8 @@ Field *Type_handler_bit::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_bit_as_char(addr.ptr, attr.max_length,
- addr.null_ptr, addr.null_bit,
+ Field_bit_as_char(addr.ptr(), attr.max_length,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name);
}
@@ -2327,7 +3288,8 @@ Field *Type_handler_string::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_string(addr.ptr, attr.max_length, addr.null_ptr, addr.null_bit,
+ Field_string(addr.ptr(), attr.max_length,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name, attr.collation);
}
@@ -2339,9 +3301,9 @@ Field *Type_handler_varchar::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_varstring(addr.ptr, attr.max_length,
+ Field_varstring(addr.ptr(), attr.max_length,
HA_VARCHAR_PACKLENGTH(attr.max_length),
- addr.null_ptr, addr.null_bit,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
table->s, attr.collation);
}
@@ -2354,7 +3316,7 @@ Field *Type_handler_tiny_blob::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_blob(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_blob(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s,
1, attr.collation);
}
@@ -2367,7 +3329,7 @@ Field *Type_handler_blob::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_blob(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_blob(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s,
2, attr.collation);
}
@@ -2381,7 +3343,7 @@ Type_handler_medium_blob::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_blob(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_blob(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s,
3, attr.collation);
}
@@ -2394,7 +3356,7 @@ Field *Type_handler_long_blob::make_table_field(const LEX_CSTRING *name,
{
return new (table->in_use->mem_root)
- Field_blob(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_blob(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s,
4, attr.collation);
}
@@ -2408,7 +3370,7 @@ Field *Type_handler_geometry::make_table_field(const LEX_CSTRING *name,
TABLE *table) const
{
return new (table->in_use->mem_root)
- Field_geom(addr.ptr, addr.null_ptr, addr.null_bit,
+ Field_geom(addr.ptr(), addr.null_ptr(), addr.null_bit(),
Field::NONE, name, table->s, 4,
(Field::geometry_type) attr.uint_geometry_type(),
0);
@@ -2424,7 +3386,8 @@ Field *Type_handler_enum::make_table_field(const LEX_CSTRING *name,
TYPELIB *typelib= attr.get_typelib();
DBUG_ASSERT(typelib);
return new (table->in_use->mem_root)
- Field_enum(addr.ptr, attr.max_length, addr.null_ptr, addr.null_bit,
+ Field_enum(addr.ptr(), attr.max_length,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
get_enum_pack_length(typelib->count), typelib,
attr.collation);
@@ -2440,7 +3403,8 @@ Field *Type_handler_set::make_table_field(const LEX_CSTRING *name,
TYPELIB *typelib= attr.get_typelib();
DBUG_ASSERT(typelib);
return new (table->in_use->mem_root)
- Field_set(addr.ptr, attr.max_length, addr.null_ptr, addr.null_bit,
+ Field_set(addr.ptr(), attr.max_length,
+ addr.null_ptr(), addr.null_bit(),
Field::NONE, name,
get_enum_pack_length(typelib->count), typelib,
attr.collation);
@@ -2512,6 +3476,63 @@ uint32 Type_handler_general_purpose_int::max_display_length(const Item *item)
/*************************************************************************/
+void Type_handler_row::Item_update_null_value(Item *item) const
+{
+ DBUG_ASSERT(0);
+ item->null_value= true;
+}
+
+
+void Type_handler_time_common::Item_update_null_value(Item *item) const
+{
+ MYSQL_TIME ltime;
+ THD *thd= current_thd;
+ (void) item->get_date(thd, &ltime, Time::Options(TIME_TIME_ONLY, thd));
+}
+
+
+void Type_handler_temporal_with_date::Item_update_null_value(Item *item) const
+{
+ MYSQL_TIME ltime;
+ THD *thd= current_thd;
+ (void) item->get_date(thd, &ltime, Datetime::Options(thd));
+}
+
+
+void Type_handler_string_result::Item_update_null_value(Item *item) const
+{
+ StringBuffer<MAX_FIELD_WIDTH> tmp;
+ (void) item->val_str(&tmp);
+}
+
+
+void Type_handler_real_result::Item_update_null_value(Item *item) const
+{
+ (void) item->val_real();
+}
+
+
+void Type_handler_decimal_result::Item_update_null_value(Item *item) const
+{
+ my_decimal tmp;
+ (void) item->val_decimal(&tmp);
+}
+
+
+void Type_handler_int_result::Item_update_null_value(Item *item) const
+{
+ (void) item->val_int();
+}
+
+
+void Type_handler_bool::Item_update_null_value(Item *item) const
+{
+ (void) item->val_bool();
+}
+
+
+/*************************************************************************/
+
int Type_handler_time_common::Item_save_in_field(Item *item, Field *field,
bool no_conversions) const
{
@@ -2527,6 +3548,18 @@ int Type_handler_temporal_with_date::Item_save_in_field(Item *item,
}
+int Type_handler_timestamp_common::Item_save_in_field(Item *item,
+ Field *field,
+ bool no_conversions)
+ const
+{
+ Timestamp_or_zero_datetime_native_null tmp(field->table->in_use, item, true);
+ if (tmp.is_null())
+ return set_field_to_null_with_conversions(field, no_conversions);
+ return tmp.save_in_field(field, item->decimals);
+}
+
+
int Type_handler_string_result::Item_save_in_field(Item *item, Field *field,
bool no_conversions) const
{
@@ -2593,6 +3626,12 @@ Type_handler_temporal_with_date::set_comparator_func(Arg_comparator *cmp) const
return cmp->set_cmp_func_datetime();
}
+bool
+Type_handler_timestamp_common::set_comparator_func(Arg_comparator *cmp) const
+{
+ return cmp->set_cmp_func_native();
+}
+
/*************************************************************************/
@@ -2704,7 +3743,7 @@ Type_handler_int_result::Item_get_cache(THD *thd, const Item *item) const
Item_cache *
Type_handler_year::Item_get_cache(THD *thd, const Item *item) const
{
- return new (thd->mem_root) Item_cache_year(thd);
+ return new (thd->mem_root) Item_cache_year(thd, item->type_handler());
}
Item_cache *
@@ -2728,7 +3767,7 @@ Type_handler_string_result::Item_get_cache(THD *thd, const Item *item) const
Item_cache *
Type_handler_timestamp_common::Item_get_cache(THD *thd, const Item *item) const
{
- return new (thd->mem_root) Item_cache_datetime(thd);
+ return new (thd->mem_root) Item_cache_timestamp(thd);
}
Item_cache *
@@ -2749,6 +3788,22 @@ Type_handler_date_common::Item_get_cache(THD *thd, const Item *item) const
return new (thd->mem_root) Item_cache_date(thd);
}
+
+/*************************************************************************/
+
+Item_copy *
+Type_handler::create_item_copy(THD *thd, Item *item) const
+{
+ return new (thd->mem_root) Item_copy_string(thd, item);
+}
+
+
+Item_copy *
+Type_handler_timestamp_common::create_item_copy(THD *thd, Item *item) const
+{
+ return new (thd->mem_root) Item_copy_timestamp(thd, item);
+}
+
/*************************************************************************/
bool Type_handler_int_result::
@@ -2824,8 +3879,21 @@ bool Type_handler_typelib::
TYPELIB *typelib= NULL;
for (uint i= 0; i < nitems; i++)
{
- if ((typelib= items[i]->get_typelib()))
- break;
+ TYPELIB *typelib2;
+ if ((typelib2= items[i]->get_typelib()))
+ {
+ if (typelib)
+ {
+ /*
+ Two ENUM/SET columns found. We convert such combinations to VARCHAR.
+ This may change in the future to preserve ENUM/SET
+ if typelib definitions are equal.
+ */
+ handler->set_handler(&type_handler_varchar);
+ return func->aggregate_attributes_string(func_name, items, nitems);
+ }
+ typelib= typelib2;
+ }
}
DBUG_ASSERT(typelib); // There must be at least one typelib
func->set_typelib(typelib);
@@ -2939,6 +4007,15 @@ bool Type_handler_temporal_result::
{
bool rc= Type_handler::Item_func_min_max_fix_attributes(thd, func,
items, nitems);
+ bool is_time= func->field_type() == MYSQL_TYPE_TIME;
+ func->decimals= 0;
+ for (uint i= 0; i < nitems; i++)
+ {
+ uint deci= is_time ? items[i]->time_precision(thd) :
+ items[i]->datetime_precision(thd);
+ set_if_bigger(func->decimals, deci);
+ }
+
if (rc || func->maybe_null)
return rc;
/*
@@ -3074,6 +4151,13 @@ bool Type_handler_int_result::
}
+bool Type_handler_bool::
+ Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *func) const
+{
+ return Item_sum_hybrid_fix_length_and_dec_numeric(func, &type_handler_bool);
+}
+
+
bool Type_handler_real_result::
Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *func) const
{
@@ -3302,15 +4386,6 @@ bool Type_handler_int_result::Item_val_bool(Item *item) const
return item->val_int() != 0;
}
-bool Type_handler_decimal_result::Item_val_bool(Item *item) const
-{
- my_decimal decimal_value;
- my_decimal *val= item->val_decimal(&decimal_value);
- if (val)
- return !my_decimal_is_zero(val);
- return false;
-}
-
bool Type_handler_temporal_result::Item_val_bool(Item *item) const
{
return item->val_real() != 0.0;
@@ -3324,48 +4399,89 @@ bool Type_handler_string_result::Item_val_bool(Item *item) const
/*************************************************************************/
-bool Type_handler_int_result::Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+
+bool Type_handler::Item_get_date_with_warn(THD *thd, Item *item,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
+{
+ Temporal::Warn_push warn(thd, item->field_table_or_null(),
+ item->field_name_or_null(), ltime, fuzzydate);
+ Item_get_date(thd, item, &warn, ltime, fuzzydate);
+ return ltime->time_type < 0;
+}
+
+
+bool Type_handler::Item_func_hybrid_field_type_get_date_with_warn(THD *thd,
+ Item_func_hybrid_field_type *item,
+ MYSQL_TIME *ltime,
+ date_mode_t mode) const
{
- return item->get_date_from_int(ltime, fuzzydate);
+ Temporal::Warn_push warn(thd, item->field_table_or_null(),
+ item->field_name_or_null(), ltime, mode);
+ Item_func_hybrid_field_type_get_date(thd, item, &warn, ltime, mode);
+ return ltime->time_type < 0;
}
-bool Type_handler_year::Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+/************************************************************************/
+void Type_handler_decimal_result::Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
{
- return item->get_date_from_year(ltime, fuzzydate);
+ new(ltime) Temporal_hybrid(thd, warn, VDec(item).ptr(), fuzzydate);
}
-bool Type_handler_real_result::Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+void Type_handler_int_result::Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *to,
+ date_mode_t mode) const
{
- return item->get_date_from_real(ltime, fuzzydate);
+ new(to) Temporal_hybrid(thd, warn, item->to_longlong_hybrid_null(), mode);
}
-bool Type_handler_decimal_result::Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+void Type_handler_year::Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
{
- return item->get_date_from_decimal(ltime, fuzzydate);
+ VYear year(item);
+ DBUG_ASSERT(!year.truncated());
+ Longlong_hybrid_null nr(Longlong_null(year.to_YYYYMMDD(), year.is_null()),
+ item->unsigned_flag);
+ new(ltime) Temporal_hybrid(thd, warn, nr, fuzzydate);
}
-bool Type_handler_string_result::Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+void Type_handler_real_result::Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
{
- return item->get_date_from_string(ltime, fuzzydate);
+ new(ltime) Temporal_hybrid(thd, warn, item->to_double_null(), fuzzydate);
}
-bool Type_handler_temporal_result::Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+void Type_handler_string_result::Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t mode) const
+{
+ StringBuffer<40> tmp;
+ new(ltime) Temporal_hybrid(thd, warn, item->val_str(&tmp), mode);
+}
+
+
+void Type_handler_temporal_result::Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
{
DBUG_ASSERT(0); // Temporal type items must implement native get_date()
item->null_value= true;
- set_zero_time(ltime, mysql_timestamp_type());
- return true;
+ set_zero_time(ltime, MYSQL_TIMESTAMP_NONE);
}
@@ -3374,13 +4490,13 @@ bool Type_handler_temporal_result::Item_get_date(Item *item, MYSQL_TIME *ltime,
longlong Type_handler_real_result::
Item_val_int_signed_typecast(Item *item) const
{
- return item->val_int();
+ return item->val_int_signed_typecast_from_int();
}
longlong Type_handler_int_result::
Item_val_int_signed_typecast(Item *item) const
{
- return item->val_int_signed_typecast_from_int();
+ return item->val_int();
}
longlong Type_handler_decimal_result::
@@ -3415,12 +4531,6 @@ longlong Type_handler_int_result::
return item->val_int_unsigned_typecast_from_int();
}
-longlong Type_handler_decimal_result::
- Item_val_int_unsigned_typecast(Item *item) const
-{
- return item->val_int_unsigned_typecast_from_decimal();
-}
-
longlong Type_handler_temporal_result::
Item_val_int_unsigned_typecast(Item *item) const
{
@@ -3481,7 +4591,7 @@ Type_handler_decimal_result::Item_func_hybrid_field_type_val_str(
Item_func_hybrid_field_type *item,
String *str) const
{
- return item->val_str_from_decimal_op(str);
+ return VDec_op(item).to_string_round(str, item->decimals);
}
@@ -3490,7 +4600,7 @@ Type_handler_decimal_result::Item_func_hybrid_field_type_val_real(
Item_func_hybrid_field_type *item)
const
{
- return item->val_real_from_decimal_op();
+ return VDec_op(item).to_double();
}
@@ -3499,7 +4609,7 @@ Type_handler_decimal_result::Item_func_hybrid_field_type_val_int(
Item_func_hybrid_field_type *item)
const
{
- return item->val_int_from_decimal_op();
+ return VDec_op(item).to_longlong(item->unsigned_flag);
}
@@ -3508,17 +4618,35 @@ Type_handler_decimal_result::Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *item,
my_decimal *dec) const
{
- return item->val_decimal_from_decimal_op(dec);
+ return VDec_op(item).to_decimal(dec);
}
-bool
+void
Type_handler_decimal_result::Item_func_hybrid_field_type_get_date(
+ THD *thd,
Item_func_hybrid_field_type *item,
+ Temporal::Warn *warn,
MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- return item->get_date_from_decimal_op(ltime, fuzzydate);
+ new (ltime) Temporal_hybrid(thd, warn, VDec_op(item).ptr(), fuzzydate);
+}
+
+
+void
+Type_handler_year::Item_func_hybrid_field_type_get_date(
+ THD *thd,
+ Item_func_hybrid_field_type *item,
+ Temporal::Warn *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
+{
+ VYear_op year(item);
+ DBUG_ASSERT(!year.truncated());
+ Longlong_hybrid_null nr(Longlong_null(year.to_YYYYMMDD(), year.is_null()),
+ item->unsigned_flag);
+ new(ltime) Temporal_hybrid(thd, warn, nr, fuzzydate);
}
@@ -3561,17 +4689,18 @@ Type_handler_int_result::Item_func_hybrid_field_type_val_decimal(
}
-bool
+void
Type_handler_int_result::Item_func_hybrid_field_type_get_date(
+ THD *thd,
Item_func_hybrid_field_type *item,
- MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+ Temporal::Warn *warn,
+ MYSQL_TIME *to,
+ date_mode_t mode) const
{
- return item->get_date_from_int_op(ltime, fuzzydate);
+ new(to) Temporal_hybrid(thd, warn, item->to_longlong_hybrid_null_op(), mode);
}
-
/***************************************************************************/
String *
@@ -3610,13 +4739,15 @@ Type_handler_real_result::Item_func_hybrid_field_type_val_decimal(
}
-bool
+void
Type_handler_real_result::Item_func_hybrid_field_type_get_date(
+ THD *thd,
Item_func_hybrid_field_type *item,
- MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+ Temporal::Warn *warn,
+ MYSQL_TIME *to,
+ date_mode_t mode) const
{
- return item->get_date_from_real_op(ltime, fuzzydate);
+ new(to) Temporal_hybrid(thd, warn, item->to_double_null_op(), mode);
}
@@ -3658,13 +4789,16 @@ Type_handler_temporal_result::Item_func_hybrid_field_type_val_decimal(
}
-bool
+void
Type_handler_temporal_result::Item_func_hybrid_field_type_get_date(
+ THD *thd,
Item_func_hybrid_field_type *item,
+ Temporal::Warn *warn,
MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- return item->date_op(ltime, fuzzydate);
+ if (item->date_op(thd, ltime, fuzzydate))
+ set_zero_time(ltime, MYSQL_TIMESTAMP_NONE);
}
@@ -3706,13 +4840,16 @@ Type_handler_time_common::Item_func_hybrid_field_type_val_decimal(
}
-bool
+void
Type_handler_time_common::Item_func_hybrid_field_type_get_date(
+ THD *thd,
Item_func_hybrid_field_type *item,
+ Temporal::Warn *warn,
MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+ date_mode_t fuzzydate) const
{
- return item->time_op(ltime);
+ if (item->time_op(thd, ltime))
+ set_zero_time(ltime, MYSQL_TIMESTAMP_NONE);
}
@@ -3754,13 +4891,18 @@ Type_handler_string_result::Item_func_hybrid_field_type_val_decimal(
}
-bool
+void
Type_handler_string_result::Item_func_hybrid_field_type_get_date(
+ THD *thd,
Item_func_hybrid_field_type *item,
+ Temporal::Warn *warn,
MYSQL_TIME *ltime,
- ulonglong fuzzydate) const
+ date_mode_t mode) const
{
- return item->get_date_from_str_op(ltime, fuzzydate);
+ StringBuffer<40> tmp;
+ String *res= item->str_op(&tmp);
+ DBUG_ASSERT((res == NULL) == item->null_value);
+ new(ltime) Temporal_hybrid(thd, warn, res, mode);
}
/***************************************************************************/
@@ -3798,10 +4940,22 @@ longlong Type_handler_string_result::
return func->val_int_cmp_string();
}
-longlong Type_handler_temporal_result::
+longlong Type_handler_temporal_with_date::
+ Item_func_between_val_int(Item_func_between *func) const
+{
+ return func->val_int_cmp_datetime();
+}
+
+longlong Type_handler_time_common::
+ Item_func_between_val_int(Item_func_between *func) const
+{
+ return func->val_int_cmp_time();
+}
+
+longlong Type_handler_timestamp_common::
Item_func_between_val_int(Item_func_between *func) const
{
- return func->val_int_cmp_temporal();
+ return func->val_int_cmp_native();
}
longlong Type_handler_int_result::
@@ -3867,6 +5021,12 @@ cmp_item *Type_handler_temporal_with_date::make_cmp_item(THD *thd,
return new (thd->mem_root) cmp_item_datetime;
}
+cmp_item *Type_handler_timestamp_common::make_cmp_item(THD *thd,
+ CHARSET_INFO *cs) const
+{
+ return new (thd->mem_root) cmp_item_timestamp;
+}
+
/***************************************************************************/
static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y)
@@ -3927,6 +5087,15 @@ Type_handler_temporal_with_date::make_in_vector(THD *thd,
}
+in_vector *
+Type_handler_timestamp_common::make_in_vector(THD *thd,
+ const Item_func_in *func,
+ uint nargs) const
+{
+ return new (thd->mem_root) in_timestamp(thd, nargs);
+}
+
+
in_vector *Type_handler_row::make_in_vector(THD *thd,
const Item_func_in *func,
uint nargs) const
@@ -4021,10 +5190,33 @@ String *Type_handler_string_result::
}
-String *Type_handler_temporal_result::
+String *Type_handler_time_common::
+ Item_func_min_max_val_str(Item_func_min_max *func, String *str) const
+{
+ return Time(func).to_string(str, func->decimals);
+}
+
+
+String *Type_handler_date_common::
+ Item_func_min_max_val_str(Item_func_min_max *func, String *str) const
+{
+ return Date(func).to_string(str);
+}
+
+
+String *Type_handler_datetime_common::
+ Item_func_min_max_val_str(Item_func_min_max *func, String *str) const
+{
+ return Datetime(func).to_string(str, func->decimals);
+}
+
+
+String *Type_handler_timestamp_common::
Item_func_min_max_val_str(Item_func_min_max *func, String *str) const
{
- return func->val_string_from_date(str);
+ THD *thd= current_thd;
+ return Timestamp_or_zero_datetime_native_null(thd, func).
+ to_datetime(thd).to_string(str, func->decimals);
}
@@ -4038,7 +5230,7 @@ String *Type_handler_int_result::
String *Type_handler_decimal_result::
Item_func_min_max_val_str(Item_func_min_max *func, String *str) const
{
- return func->val_string_from_decimal(str);
+ return VDec(func).to_string_round(str, func->decimals);
}
@@ -4056,13 +5248,33 @@ double Type_handler_string_result::
}
-double Type_handler_temporal_result::
+double Type_handler_time_common::
Item_func_min_max_val_real(Item_func_min_max *func) const
{
- MYSQL_TIME ltime;
- if (func->get_date(&ltime, 0))
- return 0;
- return TIME_to_double(&ltime);
+ return Time(current_thd, func).to_double();
+}
+
+
+double Type_handler_date_common::
+ Item_func_min_max_val_real(Item_func_min_max *func) const
+{
+ return Date(current_thd, func).to_double();
+}
+
+
+double Type_handler_datetime_common::
+ Item_func_min_max_val_real(Item_func_min_max *func) const
+{
+ return Datetime(current_thd, func).to_double();
+}
+
+
+double Type_handler_timestamp_common::
+ Item_func_min_max_val_real(Item_func_min_max *func) const
+{
+ THD *thd= current_thd;
+ return Timestamp_or_zero_datetime_native_null(thd, func).
+ to_datetime(thd).to_double();
}
@@ -4080,13 +5292,33 @@ longlong Type_handler_string_result::
}
-longlong Type_handler_temporal_result::
+longlong Type_handler_time_common::
Item_func_min_max_val_int(Item_func_min_max *func) const
{
- MYSQL_TIME ltime;
- if (func->get_date(&ltime, 0))
- return 0;
- return TIME_to_ulonglong(&ltime);
+ return Time(current_thd, func).to_longlong();
+}
+
+
+longlong Type_handler_date_common::
+ Item_func_min_max_val_int(Item_func_min_max *func) const
+{
+ return Date(current_thd, func).to_longlong();
+}
+
+
+longlong Type_handler_datetime_common::
+ Item_func_min_max_val_int(Item_func_min_max *func) const
+{
+ return Datetime(current_thd, func).to_longlong();
+}
+
+
+longlong Type_handler_timestamp_common::
+ Item_func_min_max_val_int(Item_func_min_max *func) const
+{
+ THD *thd= current_thd;
+ return Timestamp_or_zero_datetime_native_null(thd, func).
+ to_datetime(thd).to_longlong();
}
@@ -4113,20 +5345,43 @@ my_decimal *Type_handler_numeric::
}
-my_decimal *Type_handler_temporal_result::
+my_decimal *Type_handler_time_common::
Item_func_min_max_val_decimal(Item_func_min_max *func,
my_decimal *dec) const
{
- MYSQL_TIME ltime;
- if (func->get_date(&ltime, 0))
- return 0;
- return date2my_decimal(&ltime, dec);
+ return Time(current_thd, func).to_decimal(dec);
+}
+
+
+my_decimal *Type_handler_date_common::
+ Item_func_min_max_val_decimal(Item_func_min_max *func,
+ my_decimal *dec) const
+{
+ return Date(current_thd, func).to_decimal(dec);
+}
+
+
+my_decimal *Type_handler_datetime_common::
+ Item_func_min_max_val_decimal(Item_func_min_max *func,
+ my_decimal *dec) const
+{
+ return Datetime(current_thd, func).to_decimal(dec);
+}
+
+
+my_decimal *Type_handler_timestamp_common::
+ Item_func_min_max_val_decimal(Item_func_min_max *func,
+ my_decimal *dec) const
+{
+ THD *thd= current_thd;
+ return Timestamp_or_zero_datetime_native_null(thd, func).
+ to_datetime(thd).to_decimal(dec);
}
bool Type_handler_string_result::
- Item_func_min_max_get_date(Item_func_min_max *func,
- MYSQL_TIME *ltime, ulonglong fuzzydate) const
+ Item_func_min_max_get_date(THD *thd, Item_func_min_max *func,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const
{
/*
just like ::val_int() method of a string item can be called,
@@ -4134,30 +5389,51 @@ bool Type_handler_string_result::
::get_date() can be called for non-temporal values,
for example, SELECT MONTH(GREATEST("2011-11-21", "2010-10-09"))
*/
- return func->get_date_from_string(ltime, fuzzydate);
+ return func->get_date_from_string(thd, ltime, fuzzydate);
}
bool Type_handler_numeric::
- Item_func_min_max_get_date(Item_func_min_max *func,
- MYSQL_TIME *ltime, ulonglong fuzzydate) const
+ Item_func_min_max_get_date(THD *thd, Item_func_min_max *func,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const
{
- return Item_get_date(func, ltime, fuzzydate);
+ return Item_get_date_with_warn(thd, func, ltime, fuzzydate);
}
bool Type_handler_temporal_result::
- Item_func_min_max_get_date(Item_func_min_max *func,
- MYSQL_TIME *ltime, ulonglong fuzzydate) const
+ Item_func_min_max_get_date(THD *thd, Item_func_min_max *func,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const
{
- return func->get_date_native(ltime, fuzzydate);
+ /*
+ - If the caller specified TIME_TIME_ONLY, then it's going to convert
+ a DATETIME or DATE to TIME. So we pass the default flags for date. This is
+ exactly the same with what Item_func_min_max_val_{int|real|decimal|str} or
+ Item_send_datetime() do. We return the value in accordance with the
+ current session date flags and let the caller further convert it to TIME.
+ - If the caller did not specify TIME_TIME_ONLY, then return the value
+ according to the flags supplied by the caller.
+ */
+ return func->get_date_native(thd, ltime,
+ fuzzydate & TIME_TIME_ONLY ?
+ Datetime::Options(thd) :
+ fuzzydate);
}
bool Type_handler_time_common::
- Item_func_min_max_get_date(Item_func_min_max *func,
- MYSQL_TIME *ltime, ulonglong fuzzydate) const
+ Item_func_min_max_get_date(THD *thd, Item_func_min_max *func,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const
+{
+ return func->get_time_native(thd, ltime);
+}
+
+
+bool Type_handler_timestamp_common::
+ Item_func_min_max_get_date(THD *thd, Item_func_min_max *func,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const
{
- return func->get_time_native(ltime);
+ return Timestamp_or_zero_datetime_native_null(thd, func).
+ to_datetime(thd).copy_to_mysql_time(ltime);
}
/***************************************************************************/
@@ -4622,7 +5898,7 @@ bool Type_handler::
Item_time_typecast_fix_length_and_dec(Item_time_typecast *item) const
{
uint dec= item->decimals == NOT_FIXED_DEC ?
- item->arguments()[0]->time_precision() :
+ item->arguments()[0]->time_precision(current_thd) :
item->decimals;
item->fix_attributes_temporal(MIN_TIME_WIDTH, dec);
item->maybe_null= true;
@@ -4644,7 +5920,7 @@ bool Type_handler::
const
{
uint dec= item->decimals == NOT_FIXED_DEC ?
- item->arguments()[0]->datetime_precision() :
+ item->arguments()[0]->datetime_precision(current_thd) :
item->decimals;
item->fix_attributes_temporal(MAX_DATETIME_WIDTH, dec);
item->maybe_null= true;
@@ -4964,32 +6240,35 @@ bool Type_handler_string_result::
/***************************************************************************/
-uint Type_handler::Item_time_precision(Item *item) const
+uint Type_handler::Item_time_precision(THD *thd, Item *item) const
{
return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
}
-uint Type_handler::Item_datetime_precision(Item *item) const
+uint Type_handler::Item_datetime_precision(THD *thd, Item *item) const
{
return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
}
-uint Type_handler_string_result::Item_temporal_precision(Item *item,
+uint Type_handler_string_result::Item_temporal_precision(THD *thd, Item *item,
bool is_time) const
{
- MYSQL_TIME ltime;
StringBuffer<64> buf;
String *tmp;
MYSQL_TIME_STATUS status;
- DBUG_ASSERT(item->fixed);
+ DBUG_ASSERT(item->is_fixed());
+ // Nanosecond rounding is not needed here, for performance purposes
if ((tmp= item->val_str(&buf)) &&
- !(is_time ?
- str_to_time(tmp->charset(), tmp->ptr(), tmp->length(),
- &ltime, TIME_TIME_ONLY, &status) :
- str_to_datetime(tmp->charset(), tmp->ptr(), tmp->length(),
- &ltime, TIME_FUZZY_DATES, &status)))
+ (is_time ?
+ Time(thd, &status, tmp->ptr(), tmp->length(), tmp->charset(),
+ Time::Options(TIME_TIME_ONLY, TIME_FRAC_TRUNCATE,
+ Time::DATETIME_TO_TIME_YYYYMMDD_TRUNCATE)).
+ is_valid_time() :
+ Datetime(thd, &status, tmp->ptr(), tmp->length(), tmp->charset(),
+ Datetime::Options(TIME_FUZZY_DATES, TIME_FRAC_TRUNCATE)).
+ is_valid_datetime()))
return MY_MIN(status.precision, TIME_SECOND_PART_DIGITS);
return MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS);
}
@@ -5178,7 +6457,7 @@ bool Type_handler::check_null(const Item *item, st_value *value) const
bool Type_handler_null::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= DYN_COL_NULL;
return true;
@@ -5186,7 +6465,7 @@ bool Type_handler_null::
bool Type_handler_row::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
DBUG_ASSERT(0);
value->m_type= DYN_COL_NULL;
@@ -5195,7 +6474,7 @@ bool Type_handler_row::
bool Type_handler_int_result::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= item->unsigned_flag ? DYN_COL_UINT : DYN_COL_INT;
value->value.m_longlong= item->val_int();
@@ -5204,7 +6483,7 @@ bool Type_handler_int_result::
bool Type_handler_real_result::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= DYN_COL_DOUBLE;
value->value.m_double= item->val_real();
@@ -5213,7 +6492,7 @@ bool Type_handler_real_result::
bool Type_handler_decimal_result::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= DYN_COL_DECIMAL;
my_decimal *dec= item->val_decimal(&value->m_decimal);
@@ -5224,7 +6503,7 @@ bool Type_handler_decimal_result::
bool Type_handler_string_result::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= DYN_COL_STRING;
String *str= item->val_str(&value->m_string);
@@ -5235,19 +6514,20 @@ bool Type_handler_string_result::
bool Type_handler_temporal_with_date::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= DYN_COL_DATETIME;
- item->get_date(&value->value.m_time, sql_mode_for_dates(current_thd));
+ item->get_date(thd, &value->value.m_time,
+ Datetime::Options(thd, TIME_FRAC_NONE));
return check_null(item, value);
}
bool Type_handler_time_common::
- Item_save_in_value(Item *item, st_value *value) const
+ Item_save_in_value(THD *thd, Item *item, st_value *value) const
{
value->m_type= DYN_COL_DATETIME;
- item->get_time(&value->value.m_time);
+ item->get_time(thd, &value->value.m_time);
return check_null(item, value);
}
@@ -5428,10 +6708,23 @@ bool Type_handler::
}
+bool Type_handler::Item_send_timestamp(Item *item,
+ Protocol *protocol,
+ st_value *buf) const
+{
+ Timestamp_or_zero_datetime_native_null native(protocol->thd, item);
+ if (native.is_null())
+ return protocol->store_null();
+ native.to_TIME(protocol->thd, &buf->value.m_time);
+ return protocol->store(&buf->value.m_time, item->decimals);
+}
+
+
bool Type_handler::
Item_send_datetime(Item *item, Protocol *protocol, st_value *buf) const
{
- item->get_date(&buf->value.m_time, sql_mode_for_dates(current_thd));
+ item->get_date(protocol->thd, &buf->value.m_time,
+ Datetime::Options(protocol->thd));
if (!item->null_value)
return protocol->store(&buf->value.m_time, item->decimals);
return protocol->store_null();
@@ -5441,7 +6734,8 @@ bool Type_handler::
bool Type_handler::
Item_send_date(Item *item, Protocol *protocol, st_value *buf) const
{
- item->get_date(&buf->value.m_time, sql_mode_for_dates(current_thd));
+ item->get_date(protocol->thd, &buf->value.m_time,
+ Date::Options(protocol->thd));
if (!item->null_value)
return protocol->store_date(&buf->value.m_time);
return protocol->store_null();
@@ -5451,7 +6745,7 @@ bool Type_handler::
bool Type_handler::
Item_send_time(Item *item, Protocol *protocol, st_value *buf) const
{
- item->get_time(&buf->value.m_time);
+ item->get_time(protocol->thd, &buf->value.m_time);
if (!item->null_value)
return protocol->store_time(&buf->value.m_time, item->decimals);
return protocol->store_null();
@@ -5484,11 +6778,10 @@ Item *Type_handler_real_result::
Item *Type_handler_decimal_result::
make_const_item_for_comparison(THD *thd, Item *item, const Item *cmp) const
{
- my_decimal decimal_value;
- my_decimal *result= item->val_decimal(&decimal_value);
- if (item->null_value)
+ VDec result(item);
+ if (result.is_null())
return new (thd->mem_root) Item_null(thd, item->name.str);
- return new (thd->mem_root) Item_decimal(thd, item->name.str, result,
+ return new (thd->mem_root) Item_decimal(thd, item->name.str, result.ptr(),
item->max_length, item->decimals);
}
@@ -5511,7 +6804,7 @@ Item *Type_handler_time_common::
make_const_item_for_comparison(THD *thd, Item *item, const Item *cmp) const
{
Item_cache_temporal *cache;
- longlong value= item->val_time_packed();
+ longlong value= item->val_time_packed(thd);
if (item->null_value)
return new (thd->mem_root) Item_null(thd, item->name.str);
cache= new (thd->mem_root) Item_cache_time(thd);
@@ -5525,7 +6818,7 @@ Item *Type_handler_temporal_with_date::
make_const_item_for_comparison(THD *thd, Item *item, const Item *cmp) const
{
Item_cache_temporal *cache;
- longlong value= item->val_datetime_packed();
+ longlong value= item->val_datetime_packed(thd);
if (item->null_value)
return new (thd->mem_root) Item_null(thd, item->name.str);
cache= new (thd->mem_root) Item_cache_datetime(thd);
@@ -5727,6 +7020,21 @@ Item *Type_handler_long_blob::
return new (thd->mem_root) Item_char_typecast(thd, item, len, real_cs);
}
+Item *Type_handler_interval_DDhhmmssff::
+ create_typecast_item(THD *thd, Item *item,
+ const Type_cast_attributes &attr) const
+{
+ if (attr.decimals() > MAX_DATETIME_PRECISION)
+ {
+ wrong_precision_error(ER_TOO_BIG_PRECISION, item, attr.decimals(),
+ MAX_DATETIME_PRECISION);
+ return 0;
+ }
+ return new (thd->mem_root) Item_interval_DDhhmmssff_typecast(thd, item,
+ (uint)
+ attr.decimals());
+}
+
/***************************************************************************/
void Type_handler_string_result::Item_param_setup_conversion(THD *thd,
@@ -5878,6 +7186,490 @@ void Type_handler_geometry::Item_param_set_param_func(Item_param *param,
/***************************************************************************/
+Field *Type_handler_row::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ DBUG_ASSERT(attr->length == 0);
+ DBUG_ASSERT(f_maybe_null(attr->pack_flag));
+ return new (mem_root) Field_row(rec.ptr(), name);
+}
+
+
+Field *Type_handler_olddecimal::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_decimal(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_decimals(attr->pack_flag),
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_newdecimal::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_new_decimal(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_decimals(attr->pack_flag),
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_float::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ int decimals= f_decimals(attr->pack_flag);
+ if (decimals == FLOATING_POINT_DECIMALS)
+ decimals= NOT_FIXED_DEC;
+ return new (mem_root)
+ Field_float(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, decimals,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag)== 0);
+}
+
+
+Field *Type_handler_double::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ int decimals= f_decimals(attr->pack_flag);
+ if (decimals == FLOATING_POINT_DECIMALS)
+ decimals= NOT_FIXED_DEC;
+ return new (mem_root)
+ Field_double(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, decimals,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag)== 0);
+}
+
+
+Field *Type_handler_tiny::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_tiny(rec.ptr(), (uint32) attr->length, rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_short::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_short(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_int24::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_medium(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_long::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_long(rec.ptr(), (uint32) attr->length, rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_longlong::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ if (flags & (VERS_SYS_START_FLAG|VERS_SYS_END_FLAG))
+ return new (mem_root)
+ Field_vers_trx_id(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+ return new (mem_root)
+ Field_longlong(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ f_is_zerofill(attr->pack_flag) != 0,
+ f_is_dec(attr->pack_flag) == 0);
+}
+
+
+Field *Type_handler_timestamp::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new_Field_timestamp(mem_root,
+ rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, share,
+ attr->temporal_dec(MAX_DATETIME_WIDTH));
+}
+
+
+Field *Type_handler_timestamp2::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_timestampf(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check,
+ name, share, attr->temporal_dec(MAX_DATETIME_WIDTH));
+}
+
+
+Field *Type_handler_year::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_year(rec.ptr(), (uint32) attr->length, rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name);
+}
+
+
+Field *Type_handler_date::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_date(rec.ptr(),rec.null_ptr(),rec.null_bit(),
+ attr->unireg_check, name);
+}
+
+
+Field *Type_handler_newdate::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_newdate(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name);
+}
+
+
+Field *Type_handler_time::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new_Field_time(mem_root, rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ attr->temporal_dec(MIN_TIME_WIDTH));
+}
+
+
+Field *Type_handler_time2::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_timef(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ attr->temporal_dec(MIN_TIME_WIDTH));
+}
+
+
+Field *Type_handler_datetime::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new_Field_datetime(mem_root, rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ attr->temporal_dec(MAX_DATETIME_WIDTH));
+}
+
+
+Field *Type_handler_datetime2::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_datetimef(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name,
+ attr->temporal_dec(MAX_DATETIME_WIDTH));
+}
+
+
+Field *Type_handler_null::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_null(rec.ptr(), (uint32) attr->length, attr->unireg_check,
+ name, attr->charset);
+}
+
+
+Field *Type_handler_bit::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return f_bit_as_char(attr->pack_flag) ?
+ new (mem_root) Field_bit_as_char(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name) :
+ new (mem_root) Field_bit(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ bit.ptr(), bit.offs(), attr->unireg_check, name);
+}
+
+
+#ifdef HAVE_SPATIAL
+Field *Type_handler_geometry::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ status_var_increment(current_thd->status_var.feature_gis);
+ return new (mem_root)
+ Field_geom(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, share,
+ attr->pack_flag_to_pack_length(), attr->geom_type, attr->srid);
+}
+#endif
+
+
+Field *Type_handler_string::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_string(rec.ptr(), (uint32) attr->length,
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, attr->charset);
+}
+
+
+Field *Type_handler_varchar::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ if (attr->unireg_check == Field::TMYSQL_COMPRESSED)
+ return new (mem_root)
+ Field_varstring_compressed(rec.ptr(), (uint32) attr->length,
+ HA_VARCHAR_PACKLENGTH((uint32) attr->length),
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, share, attr->charset,
+ zlib_compression_method);
+ return new (mem_root)
+ Field_varstring(rec.ptr(), (uint32) attr->length,
+ HA_VARCHAR_PACKLENGTH((uint32) attr->length),
+ rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, share, attr->charset);
+}
+
+
+Field *Type_handler_blob_common::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ if (attr->unireg_check == Field::TMYSQL_COMPRESSED)
+ return new (mem_root)
+ Field_blob_compressed(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, share,
+ attr->pack_flag_to_pack_length(), attr->charset,
+ zlib_compression_method);
+ return new (mem_root)
+ Field_blob(rec.ptr(), rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, share,
+ attr->pack_flag_to_pack_length(), attr->charset);
+}
+
+
+Field *Type_handler_enum::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_enum(rec.ptr(), (uint32) attr->length, rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, attr->pack_flag_to_pack_length(),
+ attr->interval, attr->charset);
+}
+
+
+Field *Type_handler_set::
+ make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &rec, const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const
+{
+ return new (mem_root)
+ Field_set(rec.ptr(), (uint32) attr->length, rec.null_ptr(), rec.null_bit(),
+ attr->unireg_check, name, attr->pack_flag_to_pack_length(),
+ attr->interval, attr->charset);
+}
+
+
+/***************************************************************************/
+
+void Type_handler::
+ Column_definition_attributes_frm_pack(const Column_definition_attributes *def,
+ uchar *buff) const
+{
+ def->frm_pack_basic(buff);
+ def->frm_pack_charset(buff);
+}
+
+
+#ifdef HAVE_SPATIAL
+void Type_handler_geometry::
+ Column_definition_attributes_frm_pack(const Column_definition_attributes *def,
+ uchar *buff) const
+{
+ def->frm_pack_basic(buff);
+ buff[11]= 0;
+ buff[14]= (uchar) def->geom_type;
+}
+#endif
+
+
+/***************************************************************************/
+
+bool Type_handler::
+ Column_definition_attributes_frm_unpack(Column_definition_attributes *attr,
+ TABLE_SHARE *share,
+ const uchar *buffer,
+ LEX_CUSTRING *gis_options)
+ const
+{
+ attr->frm_unpack_basic(buffer);
+ return attr->frm_unpack_charset(share, buffer);
+}
+
+
+#ifdef HAVE_SPATIAL
+bool Type_handler_geometry::
+ Column_definition_attributes_frm_unpack(Column_definition_attributes *attr,
+ TABLE_SHARE *share,
+ const uchar *buffer,
+ LEX_CUSTRING *gis_options)
+ const
+{
+ uint gis_opt_read, gis_length, gis_decimals;
+ Field_geom::storage_type st_type;
+ attr->frm_unpack_basic(buffer);
+ // charset and geometry_type share the same byte in frm
+ attr->geom_type= (Field::geometry_type) buffer[14];
+ gis_opt_read= gis_field_options_read(gis_options->str,
+ gis_options->length,
+ &st_type, &gis_length,
+ &gis_decimals, &attr->srid);
+ gis_options->str+= gis_opt_read;
+ gis_options->length-= gis_opt_read;
+ return false;
+}
+#endif
+
+/***************************************************************************/
+
bool Type_handler::Vers_history_point_resolve_unit(THD *thd,
Vers_history_point *point)
const
@@ -5936,3 +7728,539 @@ bool Type_handler_general_purpose_string::
}
/***************************************************************************/
+
+bool Type_handler_null::Item_const_eq(const Item_const *a,
+ const Item_const *b,
+ bool binary_cmp) const
+{
+ return true;
+}
+
+
+bool Type_handler_real_result::Item_const_eq(const Item_const *a,
+ const Item_const *b,
+ bool binary_cmp) const
+{
+ const double *va= a->const_ptr_double();
+ const double *vb= b->const_ptr_double();
+ return va[0] == vb[0];
+}
+
+
+bool Type_handler_int_result::Item_const_eq(const Item_const *a,
+ const Item_const *b,
+ bool binary_cmp) const
+{
+ const longlong *va= a->const_ptr_longlong();
+ const longlong *vb= b->const_ptr_longlong();
+ bool res= va[0] == vb[0] &&
+ (va[0] >= 0 ||
+ (a->get_type_all_attributes_from_const()->unsigned_flag ==
+ b->get_type_all_attributes_from_const()->unsigned_flag));
+ return res;
+}
+
+
+bool Type_handler_string_result::Item_const_eq(const Item_const *a,
+ const Item_const *b,
+ bool binary_cmp) const
+{
+ const String *sa= a->const_ptr_string();
+ const String *sb= b->const_ptr_string();
+ return binary_cmp ? sa->bin_eq(sb) :
+ a->get_type_all_attributes_from_const()->collation.collation ==
+ b->get_type_all_attributes_from_const()->collation.collation &&
+ sa->eq(sb, a->get_type_all_attributes_from_const()->collation.collation);
+}
+
+
+bool
+Type_handler_decimal_result::Item_const_eq(const Item_const *a,
+ const Item_const *b,
+ bool binary_cmp) const
+{
+ const my_decimal *da= a->const_ptr_my_decimal();
+ const my_decimal *db= b->const_ptr_my_decimal();
+ return !da->cmp(db) &&
+ (!binary_cmp ||
+ a->get_type_all_attributes_from_const()->decimals ==
+ b->get_type_all_attributes_from_const()->decimals);
+}
+
+
+bool
+Type_handler_temporal_result::Item_const_eq(const Item_const *a,
+ const Item_const *b,
+ bool binary_cmp) const
+{
+ const MYSQL_TIME *ta= a->const_ptr_mysql_time();
+ const MYSQL_TIME *tb= b->const_ptr_mysql_time();
+ return !my_time_compare(ta, tb) &&
+ (!binary_cmp ||
+ a->get_type_all_attributes_from_const()->decimals ==
+ b->get_type_all_attributes_from_const()->decimals);
+}
+
+/***************************************************************************/
+
+const Type_handler *
+Type_handler_hex_hybrid::cast_to_int_type_handler() const
+{
+ return &type_handler_longlong;
+}
+
+
+const Type_handler *
+Type_handler_hex_hybrid::type_handler_for_system_time() const
+{
+ return &type_handler_longlong;
+}
+
+
+/***************************************************************************/
+
+bool Type_handler_row::Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ DBUG_ASSERT(0);
+ return false;
+}
+
+
+bool Type_handler_int_result::Item_eq_value(THD *thd,
+ const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ longlong value0= a->val_int();
+ longlong value1= b->val_int();
+ return !a->null_value && !b->null_value && value0 == value1 &&
+ (value0 >= 0 || a->unsigned_flag == b->unsigned_flag);
+}
+
+
+bool Type_handler_real_result::Item_eq_value(THD *thd,
+ const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ double value0= a->val_real();
+ double value1= b->val_real();
+ return !a->null_value && !b->null_value && value0 == value1;
+}
+
+
+bool Type_handler_time_common::Item_eq_value(THD *thd,
+ const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ longlong value0= a->val_time_packed(thd);
+ longlong value1= b->val_time_packed(thd);
+ return !a->null_value && !b->null_value && value0 == value1;
+}
+
+
+bool Type_handler_temporal_with_date::Item_eq_value(THD *thd,
+ const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ longlong value0= a->val_datetime_packed(thd);
+ longlong value1= b->val_datetime_packed(thd);
+ return !a->null_value && !b->null_value && value0 == value1;
+}
+
+
+bool Type_handler_timestamp_common::Item_eq_value(THD *thd,
+ const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ Timestamp_or_zero_datetime_native_null na(thd, a, true);
+ Timestamp_or_zero_datetime_native_null nb(thd, b, true);
+ return !na.is_null() && !nb.is_null() && !cmp_native(na, nb);
+}
+
+
+bool Type_handler_string_result::Item_eq_value(THD *thd,
+ const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+{
+ String *va, *vb;
+ StringBuffer<128> cmp_value1, cmp_value2;
+ return (va= a->val_str(&cmp_value1)) &&
+ (vb= b->val_str(&cmp_value2)) &&
+ va->eq(vb, attr->compare_collation());
+}
+
+
+/***************************************************************************/
+
+bool Type_handler_string_result::union_element_finalize(const Item * item) const
+{
+ if (item->collation.derivation == DERIVATION_NONE)
+ {
+ my_error(ER_CANT_AGGREGATE_NCOLLATIONS, MYF(0), "UNION");
+ return true;
+ }
+ return false;
+}
+
+
+/***************************************************************************/
+
+void Type_handler_var_string::
+ Column_definition_implicit_upgrade(Column_definition *c) const
+{
+ // Change old VARCHAR to new VARCHAR
+ c->set_handler(&type_handler_varchar);
+}
+
+
+void Type_handler_time_common::
+ Column_definition_implicit_upgrade(Column_definition *c) const
+{
+ if (opt_mysql56_temporal_format)
+ c->set_handler(&type_handler_time2);
+ else
+ c->set_handler(&type_handler_time);
+}
+
+
+void Type_handler_datetime_common::
+ Column_definition_implicit_upgrade(Column_definition *c) const
+{
+ if (opt_mysql56_temporal_format)
+ c->set_handler(&type_handler_datetime2);
+ else
+ c->set_handler(&type_handler_datetime);
+}
+
+
+void Type_handler_timestamp_common::
+ Column_definition_implicit_upgrade(Column_definition *c) const
+{
+ if (opt_mysql56_temporal_format)
+ c->set_handler(&type_handler_timestamp2);
+ else
+ c->set_handler(&type_handler_timestamp);
+}
+
+
+/***************************************************************************/
+
+
+int Type_handler_temporal_with_date::stored_field_cmp_to_item(THD *thd,
+ Field *field,
+ Item *item) const
+{
+ MYSQL_TIME field_time, item_time, item_time2, *item_time_cmp= &item_time;
+ field->get_date(&field_time, Datetime::Options(TIME_INVALID_DATES, thd));
+ item->get_date(thd, &item_time, Datetime::Options(TIME_INVALID_DATES, thd));
+ if (item_time.time_type == MYSQL_TIMESTAMP_TIME &&
+ time_to_datetime(thd, &item_time, item_time_cmp= &item_time2))
+ return 1;
+ return my_time_compare(&field_time, item_time_cmp);
+}
+
+
+int Type_handler_time_common::stored_field_cmp_to_item(THD *thd,
+ Field *field,
+ Item *item) const
+{
+ MYSQL_TIME field_time, item_time;
+ field->get_date(&field_time, Time::Options(thd));
+ item->get_date(thd, &item_time, Time::Options(thd));
+ return my_time_compare(&field_time, &item_time);
+}
+
+
+int Type_handler_string_result::stored_field_cmp_to_item(THD *thd,
+ Field *field,
+ Item *item) const
+{
+ StringBuffer<MAX_FIELD_WIDTH> item_tmp;
+ StringBuffer<MAX_FIELD_WIDTH> field_tmp;
+ String *item_result= item->val_str(&item_tmp);
+ /*
+ Some implementations of Item::val_str(String*) actually modify
+ the field Item::null_value, hence we can't check it earlier.
+ */
+ if (item->null_value)
+ return 0;
+ String *field_result= field->val_str(&field_tmp);
+ return sortcmp(field_result, item_result, field->charset());
+}
+
+
+int Type_handler_int_result::stored_field_cmp_to_item(THD *thd,
+ Field *field,
+ Item *item) const
+{
+ DBUG_ASSERT(0); // Not used yet
+ return 0;
+}
+
+
+int Type_handler_real_result::stored_field_cmp_to_item(THD *thd,
+ Field *field,
+ Item *item) const
+{
+ /*
+ The patch for Bug#13463415 started using this function for comparing
+ BIGINTs. That uncovered a bug in Visual Studio 32bit optimized mode.
+ Prefixing the auto variables with volatile fixes the problem....
+ */
+ volatile double result= item->val_real();
+ if (item->null_value)
+ return 0;
+ volatile double field_result= field->val_real();
+ if (field_result < result)
+ return -1;
+ else if (field_result > result)
+ return 1;
+ return 0;
+}
+
+
+/***************************************************************************/
+
+
+static bool have_important_literal_warnings(const MYSQL_TIME_STATUS *status)
+{
+ return (status->warnings & ~MYSQL_TIME_NOTE_TRUNCATED) != 0;
+}
+
+
+static void literal_warn(THD *thd, const Item *item,
+ const char *str, size_t length, CHARSET_INFO *cs,
+ const MYSQL_TIME_STATUS *st,
+ const char *typestr, bool send_error)
+{
+ if (likely(item))
+ {
+ if (st->warnings) // e.g. a note on nanosecond truncation
+ {
+ ErrConvString err(str, length, cs);
+ thd->push_warning_wrong_or_truncated_value(
+ Sql_condition::time_warn_level(st->warnings),
+ false, typestr, err.ptr(), NULL, NullS);
+ }
+ }
+ else if (send_error)
+ {
+ ErrConvString err(str, length, cs);
+ my_error(ER_WRONG_VALUE, MYF(0), typestr, err.ptr());
+ }
+}
+
+
+Item_literal *
+Type_handler_date_common::create_literal_item(THD *thd,
+ const char *str,
+ size_t length,
+ CHARSET_INFO *cs,
+ bool send_error) const
+{
+ Temporal::Warn st;
+ Item_literal *item= NULL;
+ Temporal_hybrid tmp(thd, &st, str, length, cs, Temporal_hybrid::Options(thd));
+ if (tmp.is_valid_temporal() &&
+ tmp.get_mysql_time()->time_type == MYSQL_TIMESTAMP_DATE &&
+ !have_important_literal_warnings(&st))
+ item= new (thd->mem_root) Item_date_literal(thd, tmp.get_mysql_time());
+ literal_warn(thd, item, str, length, cs, &st, "DATE", send_error);
+ return item;
+}
+
+
+Item_literal *
+Type_handler_temporal_with_date::create_literal_item(THD *thd,
+ const char *str,
+ size_t length,
+ CHARSET_INFO *cs,
+ bool send_error) const
+{
+ Temporal::Warn st;
+ Item_literal *item= NULL;
+ Temporal_hybrid tmp(thd, &st, str, length, cs, Temporal_hybrid::Options(thd));
+ if (tmp.is_valid_temporal() &&
+ tmp.get_mysql_time()->time_type == MYSQL_TIMESTAMP_DATETIME &&
+ !have_important_literal_warnings(&st))
+ item= new (thd->mem_root) Item_datetime_literal(thd, tmp.get_mysql_time(),
+ st.precision);
+ literal_warn(thd, item, str, length, cs, &st, "DATETIME", send_error);
+ return item;
+}
+
+
+Item_literal *
+Type_handler_time_common::create_literal_item(THD *thd,
+ const char *str,
+ size_t length,
+ CHARSET_INFO *cs,
+ bool send_error) const
+{
+ MYSQL_TIME_STATUS st;
+ Item_literal *item= NULL;
+ Time::Options opt(TIME_TIME_ONLY, thd, Time::DATETIME_TO_TIME_DISALLOW);
+ Time tmp(thd, &st, str, length, cs, opt);
+ if (tmp.is_valid_time() &&
+ !have_important_literal_warnings(&st))
+ item= new (thd->mem_root) Item_time_literal(thd, tmp.get_mysql_time(),
+ st.precision);
+ literal_warn(thd, item, str, length, cs, &st, "TIME", send_error);
+ return item;
+}
+
+
+bool Type_handler_timestamp_common::TIME_to_native(THD *thd,
+ const MYSQL_TIME *ltime,
+ Native *to,
+ uint decimals) const
+{
+ uint error_code;
+ Timestamp_or_zero_datetime tm(thd, ltime, &error_code);
+ if (error_code)
+ return true;
+ tm.trunc(decimals);
+ return tm.to_native(to, decimals);
+}
+
+
+bool
+Type_handler_timestamp_common::Item_val_native_with_conversion(THD *thd,
+ Item *item,
+ Native *to) const
+{
+ MYSQL_TIME ltime;
+ if (item->type_handler()->type_handler_for_native_format() ==
+ &type_handler_timestamp2)
+ return item->val_native(thd, to);
+ return
+ item->get_date(thd, &ltime, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) ||
+ TIME_to_native(thd, &ltime, to, item->datetime_precision(thd));
+}
+
+
+bool
+Type_handler_timestamp_common::Item_val_native_with_conversion_result(THD *thd,
+ Item *item,
+ Native *to)
+ const
+{
+ MYSQL_TIME ltime;
+ if (item->type_handler()->type_handler_for_native_format() ==
+ &type_handler_timestamp2)
+ return item->val_native_result(thd, to);
+ return
+ item->get_date_result(thd, &ltime,
+ Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) ||
+ TIME_to_native(thd, &ltime, to, item->datetime_precision(thd));
+}
+
+
+int Type_handler_timestamp_common::cmp_native(const Native &a,
+ const Native &b) const
+{
+ /*
+ Optimize a simple case:
+ Either both timeatamp values have the same fractional precision,
+ or both values are zero datetime '0000-00-00 00:00:00.000000',
+ */
+ if (a.length() == b.length())
+ return memcmp(a.ptr(), b.ptr(), a.length());
+ return Timestamp_or_zero_datetime(a).cmp(Timestamp_or_zero_datetime(b));
+}
+
+
+Timestamp_or_zero_datetime_native_null::
+ Timestamp_or_zero_datetime_native_null(THD *thd, Item *item, bool conv)
+ :Null_flag(false)
+{
+ DBUG_ASSERT(item->type_handler()->type_handler_for_native_format() ==
+ &type_handler_timestamp2 || conv);
+ if (conv ?
+ type_handler_timestamp2.Item_val_native_with_conversion(thd, item, this) :
+ item->val_native(thd, this))
+ Null_flag::operator=(true);
+ // If no conversion, then is_null() should be equal to item->null_value
+ DBUG_ASSERT(is_null() == item->null_value || conv);
+ /*
+ is_null() can be true together with item->null_value==false, which means
+ a non-NULL item was evaluated, but then the conversion to TIMESTAMP failed.
+ But is_null() can never be false if item->null_value==true.
+ */
+ DBUG_ASSERT(is_null() >= item->null_value);
+}
+
+
+bool
+Type_handler::Item_param_val_native(THD *thd,
+ Item_param *item,
+ Native *to) const
+{
+ DBUG_ASSERT(0); // TODO-TYPE: MDEV-14271
+ return item->null_value= true;
+}
+
+
+bool
+Type_handler_timestamp_common::Item_param_val_native(THD *thd,
+ Item_param *item,
+ Native *to) const
+{
+ /*
+ The below code may not run well in corner cases.
+ This will be fixed under terms of MDEV-14271.
+ Item_param should:
+ - either remember @@time_zone at bind time
+ - or store TIMESTAMP in my_time_t format, rather than in MYSQL_TIME format.
+ */
+ MYSQL_TIME ltime;
+ return
+ item->get_date(thd, &ltime, Datetime::Options(TIME_NO_ZERO_IN_DATE, thd)) ||
+ TIME_to_native(thd, &ltime, to, item->datetime_precision(thd));
+}
+
+static bool charsets_are_compatible(const char *old_cs_name,
+ const CHARSET_INFO *new_ci)
+{
+ const char *new_cs_name= new_ci->csname;
+
+ if (!strcmp(old_cs_name, new_cs_name))
+ return true;
+
+ if (!strcmp(old_cs_name, MY_UTF8MB3) && !strcmp(new_cs_name, MY_UTF8MB4))
+ return true;
+
+ if (!strcmp(old_cs_name, "ascii") && !(new_ci->state & MY_CS_NONASCII))
+ return true;
+
+ if (!strcmp(old_cs_name, "ucs2") && !strcmp(new_cs_name, "utf16"))
+ return true;
+
+ return false;
+}
+
+bool Type_handler::Charsets_are_compatible(const CHARSET_INFO *old_ci,
+ const CHARSET_INFO *new_ci,
+ bool part_of_a_key)
+{
+ const char *old_cs_name= old_ci->csname;
+ const char *new_cs_name= new_ci->csname;
+
+ if (!charsets_are_compatible(old_cs_name, new_ci))
+ {
+ return false;
+ }
+
+ if (!part_of_a_key)
+ {
+ return true;
+ }
+
+ if (strcmp(old_ci->name + strlen(old_cs_name),
+ new_ci->name + strlen(new_cs_name)))
+ {
+ return false;
+ }
+
+ return true;
+}
diff --git a/sql/sql_type.h b/sql/sql_type.h
index 21e0eb56338..569e8742d2f 100644
--- a/sql/sql_type.h
+++ b/sql/sql_type.h
@@ -25,12 +25,17 @@
#include "sql_array.h"
#include "sql_const.h"
#include "sql_time.h"
+#include "compat56.h"
class Field;
class Column_definition;
+class Column_definition_attributes;
class Item;
+class Item_const;
+class Item_literal;
class Item_param;
class Item_cache;
+class Item_copy;
class Item_func_or_sum;
class Item_sum_hybrid;
class Item_sum_sum;
@@ -73,6 +78,1174 @@ struct Schema_specification_st;
struct TABLE;
struct SORT_FIELD_ATTR;
class Vers_history_point;
+class Virtual_column_info;
+
+#define my_charset_numeric my_charset_latin1
+
+enum protocol_send_type_t
+{
+ PROTOCOL_SEND_STRING,
+ PROTOCOL_SEND_FLOAT,
+ PROTOCOL_SEND_DOUBLE,
+ PROTOCOL_SEND_TINY,
+ PROTOCOL_SEND_SHORT,
+ PROTOCOL_SEND_LONG,
+ PROTOCOL_SEND_LONGLONG,
+ PROTOCOL_SEND_DATETIME,
+ PROTOCOL_SEND_DATE,
+ PROTOCOL_SEND_TIME
+};
+
+
+enum scalar_comparison_op
+{
+ SCALAR_CMP_EQ,
+ SCALAR_CMP_EQUAL,
+ SCALAR_CMP_LT,
+ SCALAR_CMP_LE,
+ SCALAR_CMP_GE,
+ SCALAR_CMP_GT
+};
+
+
+class Native: public Binary_string
+{
+public:
+ Native(char *str, size_t len)
+ :Binary_string(str, len)
+ { }
+};
+
+
+template<size_t buff_sz>
+class NativeBuffer: public Native
+{
+ char buff[buff_sz];
+public:
+ NativeBuffer() : Native(buff, buff_sz) { length(0); }
+};
+
+
+class String_ptr
+{
+protected:
+ String *m_string_ptr;
+public:
+ String_ptr(String *str)
+ :m_string_ptr(str)
+ { }
+ String_ptr(Item *item, String *buffer);
+ const String *string() const
+ {
+ DBUG_ASSERT(m_string_ptr);
+ return m_string_ptr;
+ }
+ bool is_null() const { return m_string_ptr == NULL; }
+};
+
+
+class Ascii_ptr: public String_ptr
+{
+public:
+ Ascii_ptr(Item *item, String *buffer);
+};
+
+
+template<size_t buff_sz>
+class String_ptr_and_buffer: public StringBuffer<buff_sz>,
+ public String_ptr
+{
+public:
+ String_ptr_and_buffer(Item *item)
+ :String_ptr(item, this)
+ { }
+};
+
+
+template<size_t buff_sz>
+class Ascii_ptr_and_buffer: public StringBuffer<buff_sz>,
+ public Ascii_ptr
+{
+public:
+ Ascii_ptr_and_buffer(Item *item)
+ :Ascii_ptr(item, this)
+ { }
+};
+
+
+class Dec_ptr
+{
+protected:
+ my_decimal *m_ptr;
+ Dec_ptr() { }
+public:
+ Dec_ptr(my_decimal *ptr) :m_ptr(ptr) { }
+ bool is_null() const { return m_ptr == NULL; }
+ const my_decimal *ptr() const { return m_ptr; }
+ const my_decimal *ptr_or(const my_decimal *def) const
+ {
+ return m_ptr ? m_ptr : def;
+ }
+ my_decimal *to_decimal(my_decimal *to) const
+ {
+ if (!m_ptr)
+ return NULL;
+ *to= *m_ptr;
+ return to;
+ }
+ double to_double() const { return m_ptr ? m_ptr->to_double() : 0.0; }
+ longlong to_longlong(bool unsigned_flag)
+ { return m_ptr ? m_ptr->to_longlong(unsigned_flag) : 0; }
+ bool to_bool() const { return m_ptr ? m_ptr->to_bool() : false; }
+ String *to_string(String *to) const
+ {
+ return m_ptr ? m_ptr->to_string(to) : NULL;
+ }
+ String *to_string(String *to, uint prec, uint dec, char filler)
+ {
+ return m_ptr ? m_ptr->to_string(to, prec, dec, filler) : NULL;
+ }
+ int to_binary(uchar *bin, int prec, int scale) const
+ {
+ return (m_ptr ? m_ptr : &decimal_zero)->to_binary(bin, prec, scale);
+ }
+ int cmp(const my_decimal *dec) const
+ {
+ DBUG_ASSERT(m_ptr);
+ DBUG_ASSERT(dec);
+ return m_ptr->cmp(dec);
+ }
+ int cmp(const Dec_ptr &other) const
+ {
+ return cmp(other.m_ptr);
+ }
+};
+
+
+// A helper class to handle results of val_decimal(), date_op(), etc.
+class Dec_ptr_and_buffer: public Dec_ptr
+{
+protected:
+ my_decimal m_buffer;
+public:
+ int round_to(my_decimal *to, uint scale, decimal_round_mode mode)
+ {
+ DBUG_ASSERT(m_ptr);
+ return m_ptr->round_to(to, scale, mode);
+ }
+ int round_self(uint scale, decimal_round_mode mode)
+ {
+ return round_to(&m_buffer, scale, mode);
+ }
+ String *to_string_round(String *to, uint dec)
+ {
+ /*
+ decimal_round() allows from==to
+ So it's save even if m_ptr points to m_buffer before this call:
+ */
+ return m_ptr ? m_ptr->to_string_round(to, dec, &m_buffer) : NULL;
+ }
+};
+
+
+// A helper class to handle val_decimal() results.
+class VDec: public Dec_ptr_and_buffer
+{
+public:
+ VDec(): Dec_ptr_and_buffer() { }
+ VDec(Item *item);
+ void set(Item *a);
+};
+
+
+// A helper class to handler decimal_op() results.
+class VDec_op: public Dec_ptr_and_buffer
+{
+public:
+ VDec_op(Item_func_hybrid_field_type *item);
+};
+
+
+/*
+ Get and cache val_decimal() values for two items.
+ If the first value appears to be NULL, the second value is not evaluated.
+*/
+class VDec2_lazy
+{
+public:
+ VDec m_a;
+ VDec m_b;
+ VDec2_lazy(Item *a, Item *b) :m_a(a)
+ {
+ if (!m_a.is_null())
+ m_b.set(b);
+ }
+ bool has_null() const
+ {
+ return m_a.is_null() || m_b.is_null();
+ }
+};
+
+
+/**
+ Class Sec6 represents a fixed point value with 6 fractional digits.
+ Used e.g. to convert double and my_decimal values to TIME/DATETIME.
+*/
+
+class Sec6
+{
+protected:
+ ulonglong m_sec; // The integer part, between 0 and LONGLONG_MAX
+ ulong m_usec; // The fractional part, between 0 and 999999
+ bool m_neg; // false if positive, true of negative
+ bool m_truncated; // Indicates if the constructor truncated the value
+ void make_from_decimal(const my_decimal *d, ulong *nanoseconds);
+ void make_from_double(double d, ulong *nanoseconds);
+ void make_from_int(const Longlong_hybrid &nr)
+ {
+ m_neg= nr.neg();
+ m_sec= nr.abs();
+ m_usec= 0;
+ m_truncated= false;
+ }
+ void reset()
+ {
+ m_sec= m_usec= m_neg= m_truncated= 0;
+ }
+ Sec6() { }
+ bool add_nanoseconds(uint nanoseconds)
+ {
+ DBUG_ASSERT(nanoseconds <= 1000000000);
+ if (nanoseconds < 500)
+ return false;
+ m_usec+= (nanoseconds + 500) / 1000;
+ if (m_usec < 1000000)
+ return false;
+ m_usec%= 1000000;
+ return true;
+ }
+public:
+ explicit Sec6(double nr)
+ {
+ ulong nanoseconds;
+ make_from_double(nr, &nanoseconds);
+ }
+ explicit Sec6(const my_decimal *d)
+ {
+ ulong nanoseconds;
+ make_from_decimal(d, &nanoseconds);
+ }
+ explicit Sec6(const Longlong_hybrid &nr)
+ {
+ make_from_int(nr);
+ }
+ explicit Sec6(longlong nr, bool unsigned_val)
+ {
+ make_from_int(Longlong_hybrid(nr, unsigned_val));
+ }
+ bool neg() const { return m_neg; }
+ bool truncated() const { return m_truncated; }
+ ulonglong sec() const { return m_sec; }
+ long usec() const { return m_usec; }
+ /**
+ Converts Sec6 to MYSQL_TIME
+ @param thd current thd
+ @param [out] warn conversion warnings will be written here
+ @param [out] ltime converted value will be written here
+ @param fuzzydate conversion flags (TIME_INVALID_DATE, etc)
+ @returns false for success, true for a failure
+ */
+ bool convert_to_mysql_time(THD *thd,
+ int *warn,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const;
+
+protected:
+
+ bool to_interval_hhmmssff_only(MYSQL_TIME *to, int *warn) const
+ {
+ return number_to_time_only(m_neg, m_sec, m_usec,
+ TIME_MAX_INTERVAL_HOUR, to, warn);
+ }
+ bool to_datetime_or_to_interval_hhmmssff(MYSQL_TIME *to, int *warn) const
+ {
+ /*
+ Convert a number to a time interval.
+ The following formats are understood:
+ - 0 <= x <= 999999995959 - parse as hhhhmmss
+ - 999999995959 < x <= 99991231235959 - parse as YYYYMMDDhhmmss
+ (YYMMDDhhmmss) (YYYYMMDDhhmmss)
+
+ Note, these formats are NOT understood:
+ - YYMMDD - overlaps with INTERVAL range
+ - YYYYMMDD - overlaps with INTERVAL range
+ - YYMMDDhhmmss - overlaps with INTERVAL range, partially
+ (see TIME_MAX_INTERVAL_HOUR)
+
+ If we ever need wider intervals, this code switching between
+ full datetime and interval-only should be rewised.
+ */
+ DBUG_ASSERT(TIME_MAX_INTERVAL_HOUR <= 999999995959);
+ /* (YYMMDDhhmmss) */
+ if (m_sec > 999999995959ULL &&
+ m_sec <= 99991231235959ULL && m_neg == 0)
+ return to_datetime_or_date(to, warn, TIME_INVALID_DATES);
+ if (m_sec / 10000 > TIME_MAX_INTERVAL_HOUR)
+ {
+ *warn= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ return true;
+ }
+ return to_interval_hhmmssff_only(to, warn);
+ }
+public:
+ // [-][DD]hhhmmss.ff, YYMMDDhhmmss.ff, YYYYMMDDhhmmss.ff
+ bool to_datetime_or_time(MYSQL_TIME *to, int *warn,
+ date_conv_mode_t mode) const
+ {
+ bool rc= m_sec > 9999999 && m_sec <= 99991231235959ULL && !m_neg ?
+ ::number_to_datetime_or_date(m_sec, m_usec, to,
+ ulonglong(mode & TIME_MODE_FOR_XXX_TO_DATE), warn) < 0 :
+ ::number_to_time_only(m_neg, m_sec, m_usec, TIME_MAX_HOUR, to, warn);
+ DBUG_ASSERT(*warn || !rc);
+ return rc;
+ }
+ /*
+ Convert a number in formats YYYYMMDDhhmmss.ff or YYMMDDhhmmss.ff to
+ TIMESTAMP'YYYY-MM-DD hh:mm:ss.ff'
+ */
+ bool to_datetime_or_date(MYSQL_TIME *to, int *warn,
+ date_conv_mode_t flags) const
+ {
+ if (m_neg)
+ {
+ *warn= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ return true;
+ }
+ bool rc= number_to_datetime_or_date(m_sec, m_usec, to,
+ ulonglong(flags & TIME_MODE_FOR_XXX_TO_DATE),
+ warn) == -1;
+ DBUG_ASSERT(*warn || !rc);
+ return rc;
+ }
+ // Convert elapsed seconds to TIME
+ bool sec_to_time(MYSQL_TIME *ltime, uint dec) const
+ {
+ set_zero_time(ltime, MYSQL_TIMESTAMP_TIME);
+ ltime->neg= m_neg;
+ if (m_sec > TIME_MAX_VALUE_SECONDS)
+ {
+ // use check_time_range() to set ltime to the max value depending on dec
+ int unused;
+ ltime->hour= TIME_MAX_HOUR + 1;
+ check_time_range(ltime, dec, &unused);
+ return true;
+ }
+ DBUG_ASSERT(usec() <= TIME_MAX_SECOND_PART);
+ ltime->hour= (uint) (m_sec / 3600);
+ ltime->minute= (uint) (m_sec % 3600) / 60;
+ ltime->second= (uint) m_sec % 60;
+ ltime->second_part= m_usec;
+ return false;
+ }
+ Sec6 &trunc(uint dec)
+ {
+ m_usec-= my_time_fraction_remainder(m_usec, dec);
+ return *this;
+ }
+ size_t to_string(char *to, size_t nbytes) const
+ {
+ return m_usec ?
+ my_snprintf(to, nbytes, "%s%llu.%06lu",
+ m_neg ? "-" : "", m_sec, (uint) m_usec) :
+ my_snprintf(to, nbytes, "%s%llu", m_neg ? "-" : "", m_sec);
+ }
+ void make_truncated_warning(THD *thd, const char *type_str) const;
+};
+
+
+class Sec9: public Sec6
+{
+protected:
+ ulong m_nsec; // Nanoseconds 0..999
+ void make_from_int(const Longlong_hybrid &nr)
+ {
+ Sec6::make_from_int(nr);
+ m_nsec= 0;
+ }
+ Sec9() { }
+public:
+ Sec9(const my_decimal *d)
+ {
+ Sec6::make_from_decimal(d, &m_nsec);
+ }
+ Sec9(double d)
+ {
+ Sec6::make_from_double(d, &m_nsec);
+ }
+ ulong nsec() const { return m_nsec; }
+ Sec9 &trunc(uint dec)
+ {
+ m_nsec= 0;
+ Sec6::trunc(dec);
+ return *this;
+ }
+ Sec9 &round(uint dec);
+ Sec9 &round(uint dec, time_round_mode_t mode)
+ {
+ return mode == TIME_FRAC_TRUNCATE ? trunc(dec) : round(dec);
+ }
+};
+
+
+class VSec9: public Sec9
+{
+ bool m_is_null;
+public:
+ VSec9(THD *thd, Item *item, const char *type_str, ulonglong limit);
+ bool is_null() const { return m_is_null; }
+};
+
+
+/*
+ A heler class to perform additive operations between
+ two MYSQL_TIME structures and return the result as a
+ combination of seconds, microseconds and sign.
+*/
+class Sec6_add
+{
+ ulonglong m_sec; // number of seconds
+ ulong m_usec; // number of microseconds
+ bool m_neg; // false if positive, true if negative
+ bool m_error; // false if the value is OK, true otherwise
+ void to_hh24mmssff(MYSQL_TIME *ltime, timestamp_type tstype) const
+ {
+ bzero(ltime, sizeof(*ltime));
+ ltime->neg= m_neg;
+ calc_time_from_sec(ltime, (ulong) (m_sec % SECONDS_IN_24H), m_usec);
+ ltime->time_type= tstype;
+ }
+public:
+ /*
+ @param ltime1 - the first value to add (must be a valid DATE,TIME,DATETIME)
+ @param ltime2 - the second value to add (must be a valid TIME)
+ @param sign - the sign of the operation
+ (+1 for addition, -1 for subtraction)
+ */
+ Sec6_add(const MYSQL_TIME *ltime1, const MYSQL_TIME *ltime2, int sign)
+ {
+ DBUG_ASSERT(sign == -1 || sign == 1);
+ DBUG_ASSERT(!ltime1->neg || ltime1->time_type == MYSQL_TIMESTAMP_TIME);
+ if (!(m_error= (ltime2->time_type != MYSQL_TIMESTAMP_TIME)))
+ {
+ if (ltime1->neg != ltime2->neg)
+ sign= -sign;
+ m_neg= calc_time_diff(ltime1, ltime2, -sign, &m_sec, &m_usec);
+ if (ltime1->neg && (m_sec || m_usec))
+ m_neg= !m_neg; // Swap sign
+ }
+ }
+ bool to_time(THD *thd, MYSQL_TIME *ltime, uint decimals) const
+ {
+ if (m_error)
+ return true;
+ to_hh24mmssff(ltime, MYSQL_TIMESTAMP_TIME);
+ ltime->hour+= to_days_abs() * 24;
+ return adjust_time_range_with_warn(thd, ltime, decimals);
+ }
+ bool to_datetime(MYSQL_TIME *ltime) const
+ {
+ if (m_error || m_neg)
+ return true;
+ to_hh24mmssff(ltime, MYSQL_TIMESTAMP_DATETIME);
+ return get_date_from_daynr(to_days_abs(),
+ &ltime->year, &ltime->month, &ltime->day) ||
+ !ltime->day;
+ }
+ long to_days_abs() const { return (long) (m_sec / SECONDS_IN_24H); }
+};
+
+
+class Year
+{
+protected:
+ uint m_year;
+ bool m_truncated;
+ uint year_precision(const Item *item) const;
+public:
+ Year(): m_year(0), m_truncated(false) { }
+ Year(longlong value, bool unsigned_flag, uint length);
+ uint year() const { return m_year; }
+ uint to_YYYYMMDD() const { return m_year * 10000; }
+ bool truncated() const { return m_truncated; }
+};
+
+
+class Year_null: public Year, public Null_flag
+{
+public:
+ Year_null(const Longlong_null &nr, bool unsigned_flag, uint length)
+ :Year(nr.is_null() ? 0 : nr.value(), unsigned_flag, length),
+ Null_flag(nr.is_null())
+ { }
+};
+
+
+class VYear: public Year_null
+{
+public:
+ VYear(Item *item);
+};
+
+
+class VYear_op: public Year_null
+{
+public:
+ VYear_op(Item_func_hybrid_field_type *item);
+};
+
+
+class Double_null: public Null_flag
+{
+protected:
+ double m_value;
+public:
+ Double_null(double value, bool is_null)
+ :Null_flag(is_null), m_value(value)
+ { }
+ double value() const { return m_value; }
+};
+
+
+class Temporal: protected MYSQL_TIME
+{
+public:
+ class Status: public MYSQL_TIME_STATUS
+ {
+ public:
+ Status() { my_time_status_init(this); }
+ };
+
+ class Warn: public ErrBuff,
+ public Status
+ {
+ public:
+ void push_conversion_warnings(THD *thd, bool totally_useless_value,
+ date_mode_t mode, timestamp_type tstype,
+ const TABLE_SHARE* s, const char *name)
+ {
+ const char *typestr= tstype >= 0 ? type_name_by_timestamp_type(tstype) :
+ mode & (TIME_INTERVAL_hhmmssff | TIME_INTERVAL_DAY) ?
+ "interval" :
+ mode & TIME_TIME_ONLY ? "time" : "datetime";
+ Temporal::push_conversion_warnings(thd, totally_useless_value, warnings,
+ typestr, s, name, ptr());
+ }
+ };
+
+ class Warn_push: public Warn
+ {
+ THD *m_thd;
+ const TABLE_SHARE *m_s;
+ const char *m_name;
+ const MYSQL_TIME *m_ltime;
+ date_mode_t m_mode;
+ public:
+ Warn_push(THD *thd, const TABLE_SHARE *s, const char *name,
+ const MYSQL_TIME *ltime, date_mode_t mode)
+ :m_thd(thd), m_s(s), m_name(name), m_ltime(ltime), m_mode(mode)
+ { }
+ ~Warn_push()
+ {
+ if (warnings)
+ push_conversion_warnings(m_thd, m_ltime->time_type < 0,
+ m_mode, m_ltime->time_type, m_s, m_name);
+ }
+ };
+
+public:
+ static date_conv_mode_t sql_mode_for_dates(THD *thd);
+ static time_round_mode_t default_round_mode(THD *thd);
+ class Options: public date_mode_t
+ {
+ public:
+ explicit Options(date_mode_t flags)
+ :date_mode_t(flags)
+ { }
+ Options(date_conv_mode_t flags, time_round_mode_t round_mode)
+ :date_mode_t(flags | round_mode)
+ {
+ DBUG_ASSERT(ulonglong(flags) <= UINT_MAX32);
+ }
+ Options(date_conv_mode_t flags, THD *thd)
+ :Options(flags, default_round_mode(thd))
+ { }
+ };
+
+ bool is_valid_temporal() const
+ {
+ DBUG_ASSERT(time_type != MYSQL_TIMESTAMP_ERROR);
+ return time_type != MYSQL_TIMESTAMP_NONE;
+ }
+ static const char *type_name_by_timestamp_type(timestamp_type time_type)
+ {
+ switch (time_type) {
+ case MYSQL_TIMESTAMP_DATE: return "date";
+ case MYSQL_TIMESTAMP_TIME: return "time";
+ case MYSQL_TIMESTAMP_DATETIME: // FALLTHROUGH
+ default:
+ break;
+ }
+ return "datetime";
+ }
+ static void push_conversion_warnings(THD *thd, bool totally_useless_value, int warn,
+ const char *type_name,
+ const TABLE_SHARE *s,
+ const char *field_name,
+ const char *value);
+ /*
+ This method is used if the item was not null but convertion to
+ TIME/DATE/DATETIME failed. We return a zero date if allowed,
+ otherwise - null.
+ */
+ void make_fuzzy_date(int *warn, date_conv_mode_t fuzzydate)
+ {
+ /*
+ In the following scenario:
+ - The caller expected to get a TIME value
+ - Item returned a not NULL string or numeric value
+ - But then conversion from string or number to TIME failed
+ we need to change the default time_type from MYSQL_TIMESTAMP_DATE
+ (which was set in bzero) to MYSQL_TIMESTAMP_TIME and therefore
+ return TIME'00:00:00' rather than DATE'0000-00-00'.
+ If we don't do this, methods like Item::get_time_with_conversion()
+ will erroneously subtract CURRENT_DATE from '0000-00-00 00:00:00'
+ and return TIME'-838:59:59' instead of TIME'00:00:00' as a result.
+ */
+ timestamp_type tstype= !(fuzzydate & TIME_FUZZY_DATES) ?
+ MYSQL_TIMESTAMP_NONE :
+ fuzzydate & TIME_TIME_ONLY ?
+ MYSQL_TIMESTAMP_TIME :
+ MYSQL_TIMESTAMP_DATETIME;
+ set_zero_time(this, tstype);
+ }
+
+protected:
+ my_decimal *bad_to_decimal(my_decimal *to) const;
+ my_decimal *to_decimal(my_decimal *to) const;
+ static double to_double(bool negate, ulonglong num, ulong frac)
+ {
+ double d= (double) num + frac / (double) TIME_SECOND_PART_FACTOR;
+ return negate ? -d : d;
+ }
+ longlong to_packed() const { return ::pack_time(this); }
+ void make_from_out_of_range(int *warn)
+ {
+ *warn= MYSQL_TIME_WARN_OUT_OF_RANGE;
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+ void make_from_sec6(THD *thd, MYSQL_TIME_STATUS *st,
+ const Sec6 &nr, date_mode_t mode)
+ {
+ if (nr.convert_to_mysql_time(thd, &st->warnings, this, mode))
+ make_fuzzy_date(&st->warnings, date_conv_mode_t(mode));
+ }
+ void make_from_sec9(THD *thd, MYSQL_TIME_STATUS *st,
+ const Sec9 &nr, date_mode_t mode)
+ {
+ if (nr.convert_to_mysql_time(thd, &st->warnings, this, mode) ||
+ add_nanoseconds(thd, &st->warnings, mode, nr.nsec()))
+ make_fuzzy_date(&st->warnings, date_conv_mode_t(mode));
+ }
+ void make_from_str(THD *thd, Warn *warn,
+ const char *str, size_t length, CHARSET_INFO *cs,
+ date_mode_t fuzzydate);
+ void make_from_double(THD *thd, Warn *warn, double nr, date_mode_t mode)
+ {
+ make_from_sec9(thd, warn, Sec9(nr), mode);
+ if (warn->warnings)
+ warn->set_double(nr);
+ }
+ void make_from_longlong_hybrid(THD *thd, Warn *warn,
+ const Longlong_hybrid &nr, date_mode_t mode)
+ {
+ /*
+ Note: conversion from an integer to TIME can overflow to
+ '838:59:59.999999', so the conversion result can have fractional digits.
+ */
+ make_from_sec6(thd, warn, Sec6(nr), mode);
+ if (warn->warnings)
+ warn->set_longlong(nr);
+ }
+ void make_from_decimal(THD *thd, Warn *warn,
+ const my_decimal *nr, date_mode_t mode)
+ {
+ make_from_sec9(thd, warn, Sec9(nr), mode);
+ if (warn->warnings)
+ warn->set_decimal(nr);
+ }
+ bool ascii_to_temporal(MYSQL_TIME_STATUS *st,
+ const char *str, size_t length,
+ date_mode_t mode)
+ {
+ if (mode & (TIME_INTERVAL_hhmmssff | TIME_INTERVAL_DAY))
+ return ascii_to_datetime_or_date_or_interval_DDhhmmssff(st, str, length,
+ mode);
+ if (mode & TIME_TIME_ONLY)
+ return ascii_to_datetime_or_date_or_time(st, str, length, mode);
+ return ascii_to_datetime_or_date(st, str, length, mode);
+ }
+ bool ascii_to_datetime_or_date_or_interval_DDhhmmssff(MYSQL_TIME_STATUS *st,
+ const char *str,
+ size_t length,
+ date_mode_t mode)
+ {
+ longlong cflags= ulonglong(mode & TIME_MODE_FOR_XXX_TO_DATE);
+ bool rc= mode & TIME_INTERVAL_DAY ?
+ ::str_to_datetime_or_date_or_interval_day(str, length, this, cflags, st,
+ TIME_MAX_INTERVAL_HOUR,
+ TIME_MAX_INTERVAL_HOUR) :
+ ::str_to_datetime_or_date_or_interval_hhmmssff(str, length, this,
+ cflags, st,
+ TIME_MAX_INTERVAL_HOUR,
+ TIME_MAX_INTERVAL_HOUR);
+ DBUG_ASSERT(!rc || st->warnings);
+ return rc;
+ }
+ bool ascii_to_datetime_or_date_or_time(MYSQL_TIME_STATUS *status,
+ const char *str, size_t length,
+ date_mode_t fuzzydate)
+ {
+ ulonglong cflags= ulonglong(fuzzydate & TIME_MODE_FOR_XXX_TO_DATE);
+ bool rc= ::str_to_datetime_or_date_or_time(str, length, this,
+ cflags, status,
+ TIME_MAX_HOUR, UINT_MAX32);
+ DBUG_ASSERT(!rc || status->warnings);
+ return rc;
+ }
+ bool ascii_to_datetime_or_date(MYSQL_TIME_STATUS *status,
+ const char *str, size_t length,
+ date_mode_t fuzzydate)
+ {
+ DBUG_ASSERT(bool(fuzzydate & TIME_TIME_ONLY) == false);
+ bool rc= ::str_to_datetime_or_date(str, length, this,
+ ulonglong(fuzzydate & TIME_MODE_FOR_XXX_TO_DATE),
+ status);
+ DBUG_ASSERT(!rc || status->warnings);
+ return rc;
+ }
+ // Character set aware versions for string conversion routines
+ bool str_to_temporal(THD *thd, MYSQL_TIME_STATUS *st,
+ const char *str, size_t length,
+ CHARSET_INFO *cs, date_mode_t fuzzydate);
+ bool str_to_datetime_or_date_or_time(THD *thd, MYSQL_TIME_STATUS *st,
+ const char *str, size_t length,
+ CHARSET_INFO *cs, date_mode_t mode);
+ bool str_to_datetime_or_date(THD *thd, MYSQL_TIME_STATUS *st,
+ const char *str, size_t length,
+ CHARSET_INFO *cs, date_mode_t mode);
+
+ bool has_valid_mmssff() const
+ {
+ return minute <= TIME_MAX_MINUTE &&
+ second <= TIME_MAX_SECOND &&
+ second_part <= TIME_MAX_SECOND_PART;
+ }
+ bool has_zero_YYYYMM() const
+ {
+ return year == 0 && month == 0;
+ }
+ bool has_zero_YYYYMMDD() const
+ {
+ return year == 0 && month == 0 && day == 0;
+ }
+ bool check_date(date_conv_mode_t flags, int *warn) const
+ {
+ return ::check_date(this, flags, warn);
+ }
+ void time_hhmmssff_set_max(ulong max_hour)
+ {
+ hour= max_hour;
+ minute= TIME_MAX_MINUTE;
+ second= TIME_MAX_SECOND;
+ second_part= TIME_MAX_SECOND_PART;
+ }
+ /*
+ Add nanoseconds to ssff
+ retval true if seconds overflowed (the caller should increment minutes)
+ false if no overflow happened
+ */
+ bool add_nanoseconds_ssff(uint nanoseconds)
+ {
+ DBUG_ASSERT(nanoseconds <= 1000000000);
+ if (nanoseconds < 500)
+ return false;
+ second_part+= (nanoseconds + 500) / 1000;
+ if (second_part < 1000000)
+ return false;
+ second_part%= 1000000;
+ if (second < 59)
+ {
+ second++;
+ return false;
+ }
+ second= 0;
+ return true;
+ }
+ /*
+ Add nanoseconds to mmssff
+ retval true if hours overflowed (the caller should increment hours)
+ false if no overflow happened
+ */
+ bool add_nanoseconds_mmssff(uint nanoseconds)
+ {
+ if (!add_nanoseconds_ssff(nanoseconds))
+ return false;
+ if (minute < 59)
+ {
+ minute++;
+ return false;
+ }
+ minute= 0;
+ return true;
+ }
+ void time_round_or_set_max(uint dec, int *warn, ulong max_hour, ulong nsec);
+ bool datetime_add_nanoseconds_or_invalidate(THD *thd, int *warn, ulong nsec);
+ bool datetime_round_or_invalidate(THD *thd, uint dec, int *warn, ulong nsec);
+ bool add_nanoseconds_with_round(THD *thd, int *warn,
+ date_conv_mode_t mode, ulong nsec);
+ bool add_nanoseconds(THD *thd, int *warn, date_mode_t mode, ulong nsec)
+ {
+ date_conv_mode_t cmode= date_conv_mode_t(mode);
+ return time_round_mode_t(mode) == TIME_FRAC_ROUND ?
+ add_nanoseconds_with_round(thd, warn, cmode, nsec) : false;
+ }
+public:
+ static void *operator new(size_t size, MYSQL_TIME *ltime) throw()
+ {
+ DBUG_ASSERT(size == sizeof(MYSQL_TIME));
+ return ltime;
+ }
+ static void operator delete(void *ptr, MYSQL_TIME *ltime) { }
+
+ long fraction_remainder(uint dec) const
+ {
+ return my_time_fraction_remainder(second_part, dec);
+ }
+};
+
+
+/*
+ Use this class when you need to get a MYSQL_TIME from an Item
+ using Item's native timestamp type, without automatic timestamp
+ type conversion.
+*/
+class Temporal_hybrid: public Temporal
+{
+public:
+ class Options: public Temporal::Options
+ {
+ public:
+ Options(THD *thd)
+ :Temporal::Options(sql_mode_for_dates(thd), default_round_mode(thd))
+ { }
+ Options(date_conv_mode_t flags, time_round_mode_t round_mode)
+ :Temporal::Options(flags, round_mode)
+ { }
+ explicit Options(const Temporal::Options &opt)
+ :Temporal::Options(opt)
+ { }
+ explicit Options(date_mode_t fuzzydate)
+ :Temporal::Options(fuzzydate)
+ { }
+ };
+
+public:
+ // Contructors for Item
+ Temporal_hybrid(THD *thd, Item *item, date_mode_t fuzzydate);
+ Temporal_hybrid(THD *thd, Item *item)
+ :Temporal_hybrid(thd, item, Options(thd))
+ { }
+ Temporal_hybrid(Item *item)
+ :Temporal_hybrid(current_thd, item)
+ { }
+
+ // Constructors for non-NULL values
+ Temporal_hybrid(THD *thd, Warn *warn,
+ const char *str, size_t length, CHARSET_INFO *cs,
+ date_mode_t fuzzydate)
+ {
+ make_from_str(thd, warn, str, length, cs, fuzzydate);
+ }
+ Temporal_hybrid(THD *thd, Warn *warn,
+ const Longlong_hybrid &nr, date_mode_t fuzzydate)
+ {
+ make_from_longlong_hybrid(thd, warn, nr, fuzzydate);
+ }
+ Temporal_hybrid(THD *thd, Warn *warn, double nr, date_mode_t fuzzydate)
+ {
+ make_from_double(thd, warn, nr, fuzzydate);
+ }
+
+ // Constructors for nullable values
+ Temporal_hybrid(THD *thd, Warn *warn, const String *str, date_mode_t mode)
+ {
+ if (!str)
+ time_type= MYSQL_TIMESTAMP_NONE;
+ else
+ make_from_str(thd, warn, str->ptr(), str->length(), str->charset(), mode);
+ }
+ Temporal_hybrid(THD *thd, Warn *warn,
+ const Longlong_hybrid_null &nr, date_mode_t fuzzydate)
+ {
+ if (nr.is_null())
+ time_type= MYSQL_TIMESTAMP_NONE;
+ else
+ make_from_longlong_hybrid(thd, warn, nr, fuzzydate);
+ }
+ Temporal_hybrid(THD *thd, Warn *warn, const Double_null &nr, date_mode_t mode)
+ {
+ if (nr.is_null())
+ time_type= MYSQL_TIMESTAMP_NONE;
+ else
+ make_from_double(thd, warn, nr.value(), mode);
+ }
+ Temporal_hybrid(THD *thd, Warn *warn, const my_decimal *nr, date_mode_t mode)
+ {
+ if (!nr)
+ time_type= MYSQL_TIMESTAMP_NONE;
+ else
+ make_from_decimal(thd, warn, nr, mode);
+ }
+ // End of constuctors
+
+ longlong to_longlong() const
+ {
+ if (!is_valid_temporal())
+ return 0;
+ ulonglong v= TIME_to_ulonglong(this);
+ return neg ? -(longlong) v : (longlong) v;
+ }
+ double to_double() const
+ {
+ return is_valid_temporal() ? TIME_to_double(this) : 0;
+ }
+ my_decimal *to_decimal(my_decimal *to)
+ {
+ return is_valid_temporal() ? Temporal::to_decimal(to) : bad_to_decimal(to);
+ }
+ String *to_string(String *str, uint dec) const
+ {
+ if (!is_valid_temporal())
+ return NULL;
+ str->set_charset(&my_charset_numeric);
+ if (!str->alloc(MAX_DATE_STRING_REP_LENGTH))
+ str->length(my_TIME_to_str(this, const_cast<char*>(str->ptr()), dec));
+ return str;
+ }
+ const MYSQL_TIME *get_mysql_time() const
+ {
+ DBUG_ASSERT(is_valid_temporal());
+ return this;
+ }
+};
+
+
+/*
+ This class resembles the SQL standard <extract source>,
+ used in extract expressions, e.g: EXTRACT(DAY FROM dt)
+ <extract expression> ::=
+ EXTRACT <left paren> <extract field> FROM <extract source> <right paren>
+ <extract source> ::= <datetime value expression> | <interval value expression>
+*/
+class Extract_source: public Temporal_hybrid
+{
+ /*
+ Convert a TIME value to DAY-TIME interval, e.g. for extraction:
+ EXTRACT(DAY FROM x), EXTRACT(HOUR FROM x), etc.
+ Moves full days from ltime->hour to ltime->day.
+ */
+ void time_to_daytime_interval()
+ {
+ DBUG_ASSERT(time_type == MYSQL_TIMESTAMP_TIME);
+ DBUG_ASSERT(has_zero_YYYYMMDD());
+ MYSQL_TIME::day= MYSQL_TIME::hour / 24;
+ MYSQL_TIME::hour%= 24;
+ }
+ bool is_valid_extract_source_slow() const
+ {
+ return is_valid_temporal() && MYSQL_TIME::hour < 24 &&
+ (has_zero_YYYYMM() || time_type != MYSQL_TIMESTAMP_TIME);
+ }
+ bool is_valid_value_slow() const
+ {
+ return time_type == MYSQL_TIMESTAMP_NONE || is_valid_extract_source_slow();
+ }
+public:
+ Extract_source(THD *thd, Item *item, date_mode_t mode)
+ :Temporal_hybrid(thd, item, mode)
+ {
+ if (MYSQL_TIME::time_type == MYSQL_TIMESTAMP_TIME)
+ time_to_daytime_interval();
+ DBUG_ASSERT(is_valid_value_slow());
+ }
+ inline const MYSQL_TIME *get_mysql_time() const
+ {
+ DBUG_ASSERT(is_valid_extract_source_slow());
+ return this;
+ }
+ bool is_valid_extract_source() const { return is_valid_temporal(); }
+ int sign() const { return get_mysql_time()->neg ? -1 : 1; }
+ uint year() const { return get_mysql_time()->year; }
+ uint month() const { return get_mysql_time()->month; }
+ int day() const { return (int) get_mysql_time()->day * sign(); }
+ int hour() const { return (int) get_mysql_time()->hour * sign(); }
+ int minute() const { return (int) get_mysql_time()->minute * sign(); }
+ int second() const { return (int) get_mysql_time()->second * sign(); }
+ int microsecond() const { return (int) get_mysql_time()->second_part * sign(); }
+
+ uint year_month() const { return year() * 100 + month(); }
+ uint quarter() const { return (month() + 2)/3; }
+ uint week(THD *thd) const;
+
+ longlong second_microsecond() const
+ {
+ return (second() * 1000000LL + microsecond());
+ }
+
+ // DAY TO XXX
+ longlong day_hour() const
+ {
+ return (longlong) day() * 100LL + hour();
+ }
+ longlong day_minute() const
+ {
+ return day_hour() * 100LL + minute();
+ }
+ longlong day_second() const
+ {
+ return day_minute() * 100LL + second();
+ }
+ longlong day_microsecond() const
+ {
+ return day_second() * 1000000LL + microsecond();
+ }
+
+ // HOUR TO XXX
+ int hour_minute() const
+ {
+ return hour() * 100 + minute();
+ }
+ int hour_second() const
+ {
+ return hour_minute() * 100 + second();
+ }
+ longlong hour_microsecond() const
+ {
+ return hour_second() * 1000000LL + microsecond();
+ }
+
+ // MINUTE TO XXX
+ int minute_second() const
+ {
+ return minute() * 100 + second();
+ }
+ longlong minute_microsecond() const
+ {
+ return minute_second() * 1000000LL + microsecond();
+ }
+};
+
+
+/*
+ This class is used for the "time_interval" argument of these SQL functions:
+ TIMESTAMP(tm,time_interval)
+ ADDTIME(tm,time_interval)
+ Features:
+ - DATE and DATETIME formats are treated as errors
+ - Preserves hours for TIME format as is, without limiting to TIME_MAX_HOUR
+*/
+class Interval_DDhhmmssff: public Temporal
+{
+ static const LEX_CSTRING m_type_name;
+ bool str_to_DDhhmmssff(MYSQL_TIME_STATUS *status,
+ const char *str, size_t length, CHARSET_INFO *cs,
+ ulong max_hour);
+ void push_warning_wrong_or_truncated_value(THD *thd,
+ const ErrConv &str,
+ int warnings);
+ bool is_valid_interval_DDhhmmssff_slow() const
+ {
+ return time_type == MYSQL_TIMESTAMP_TIME &&
+ has_zero_YYYYMMDD() && has_valid_mmssff();
+ }
+ bool is_valid_value_slow() const
+ {
+ return time_type == MYSQL_TIMESTAMP_NONE ||
+ is_valid_interval_DDhhmmssff_slow();
+ }
+public:
+ // Get fractional second precision from an Item
+ static uint fsp(THD *thd, Item *item);
+ /*
+ Maximum useful HOUR value:
+ TIMESTAMP'0001-01-01 00:00:00' + '87649415:59:59' = '9999-12-31 23:59:59'
+ This gives maximum possible interval values:
+ - '87649415:59:59.999999' (in 'hh:mm:ss.ff' format)
+ - '3652058 23:59:59.999999' (in 'DD hh:mm:ss.ff' format)
+ */
+ static uint max_useful_hour()
+ {
+ return TIME_MAX_INTERVAL_HOUR;
+ }
+ static uint max_int_part_char_length()
+ {
+ // e.g. '+3652058 23:59:59'
+ return 1/*sign*/ + TIME_MAX_INTERVAL_DAY_CHAR_LENGTH + 1 + 8/*hh:mm:ss*/;
+ }
+ static uint max_char_length(uint fsp)
+ {
+ DBUG_ASSERT(fsp <= TIME_SECOND_PART_DIGITS);
+ return max_int_part_char_length() + (fsp ? 1 : 0) + fsp;
+ }
+
+public:
+ Interval_DDhhmmssff(THD *thd, Status *st, bool push_warnings,
+ Item *item, ulong max_hour,
+ time_round_mode_t mode, uint dec);
+ Interval_DDhhmmssff(THD *thd, Item *item, uint dec)
+ {
+ Status st;
+ new(this) Interval_DDhhmmssff(thd, &st, true, item, max_useful_hour(),
+ default_round_mode(thd), dec);
+ }
+ Interval_DDhhmmssff(THD *thd, Item *item)
+ :Interval_DDhhmmssff(thd, item, TIME_SECOND_PART_DIGITS)
+ { }
+ const MYSQL_TIME *get_mysql_time() const
+ {
+ DBUG_ASSERT(is_valid_interval_DDhhmmssff_slow());
+ return this;
+ }
+ bool is_valid_interval_DDhhmmssff() const
+ {
+ return time_type == MYSQL_TIMESTAMP_TIME;
+ }
+ bool is_valid_value() const
+ {
+ return time_type == MYSQL_TIMESTAMP_NONE || is_valid_interval_DDhhmmssff();
+ }
+ String *to_string(String *str, uint dec) const
+ {
+ if (!is_valid_interval_DDhhmmssff())
+ return NULL;
+ str->set_charset(&my_charset_numeric);
+ if (!str->alloc(MAX_DATE_STRING_REP_LENGTH))
+ str->length(my_interval_DDhhmmssff_to_str(this,
+ const_cast<char*>(str->ptr()),
+ dec));
+ return str;
+ }
+};
/**
@@ -93,35 +1266,46 @@ class Vers_history_point;
Time derives from MYSQL_TIME privately to make sure it is accessed
externally only in the valid state.
*/
-class Time: private MYSQL_TIME
+class Time: public Temporal
{
public:
enum datetime_to_time_mode_t
{
+ DATETIME_TO_TIME_DISALLOW,
DATETIME_TO_TIME_YYYYMMDD_000000DD_MIX_TO_HOURS,
- DATETIME_TO_TIME_YYYYMMDD_TRUNCATE
+ DATETIME_TO_TIME_YYYYMMDD_TRUNCATE,
+ DATETIME_TO_TIME_YYYYMMDD_00000000_ONLY,
+ DATETIME_TO_TIME_MINUS_CURRENT_DATE
};
- class Options
+ class Options: public Temporal::Options
{
- sql_mode_t m_get_date_flags;
datetime_to_time_mode_t m_datetime_to_time_mode;
public:
- Options()
- :m_get_date_flags(flags_for_get_date()),
- m_datetime_to_time_mode(DATETIME_TO_TIME_YYYYMMDD_000000DD_MIX_TO_HOURS)
+ Options(THD *thd)
+ :Temporal::Options(default_flags_for_get_date(), default_round_mode(thd)),
+ m_datetime_to_time_mode(default_datetime_to_time_mode())
{ }
- Options(sql_mode_t flags)
- :m_get_date_flags(flags),
- m_datetime_to_time_mode(DATETIME_TO_TIME_YYYYMMDD_000000DD_MIX_TO_HOURS)
+ Options(date_conv_mode_t flags, THD *thd)
+ :Temporal::Options(flags, default_round_mode(thd)),
+ m_datetime_to_time_mode(default_datetime_to_time_mode())
{ }
- Options(sql_mode_t flags, datetime_to_time_mode_t dtmode)
- :m_get_date_flags(flags),
+ Options(date_conv_mode_t flags, THD *thd, datetime_to_time_mode_t dtmode)
+ :Temporal::Options(flags, default_round_mode(thd)),
m_datetime_to_time_mode(dtmode)
{ }
- sql_mode_t get_date_flags() const
- { return m_get_date_flags; }
+ Options(date_conv_mode_t fuzzydate, time_round_mode_t round_mode,
+ datetime_to_time_mode_t datetime_to_time_mode)
+ :Temporal::Options(fuzzydate, round_mode),
+ m_datetime_to_time_mode(datetime_to_time_mode)
+ { }
+
datetime_to_time_mode_t datetime_to_time_mode() const
{ return m_datetime_to_time_mode; }
+
+ static datetime_to_time_mode_t default_datetime_to_time_mode()
+ {
+ return DATETIME_TO_TIME_YYYYMMDD_000000DD_MIX_TO_HOURS;
+ }
};
/*
CAST(AS TIME) historically does not mix days to hours.
@@ -131,8 +1315,26 @@ public:
class Options_for_cast: public Options
{
public:
- Options_for_cast()
- :Options(flags_for_get_date(), DATETIME_TO_TIME_YYYYMMDD_TRUNCATE)
+ Options_for_cast(THD *thd)
+ :Options(default_flags_for_get_date(), default_round_mode(thd),
+ DATETIME_TO_TIME_YYYYMMDD_TRUNCATE)
+ { }
+ Options_for_cast(date_mode_t mode, THD *thd)
+ :Options(default_flags_for_get_date() | (mode & TIME_FUZZY_DATES),
+ default_round_mode(thd),
+ DATETIME_TO_TIME_YYYYMMDD_TRUNCATE)
+ { }
+ };
+
+ class Options_cmp: public Options
+ {
+ public:
+ Options_cmp(THD *thd)
+ :Options(comparison_flags_for_get_date(), thd)
+ { }
+ Options_cmp(THD *thd, datetime_to_time_mode_t dtmode)
+ :Options(comparison_flags_for_get_date(),
+ default_round_mode(thd), dtmode)
{ }
};
private:
@@ -143,46 +1345,79 @@ private:
bool is_valid_time_slow() const
{
return time_type == MYSQL_TIMESTAMP_TIME &&
- year == 0 && month == 0 && day == 0 &&
- minute <= TIME_MAX_MINUTE &&
- second <= TIME_MAX_SECOND &&
- second_part <= TIME_MAX_SECOND_PART;
+ has_zero_YYYYMMDD() && has_valid_mmssff();
+ }
+ void hhmmssff_copy(const MYSQL_TIME *from)
+ {
+ hour= from->hour;
+ minute= from->minute;
+ second= from->second;
+ second_part= from->second_part;
+ }
+ void datetime_to_time_YYYYMMDD_000000DD_mix_to_hours(int *warn,
+ uint from_year,
+ uint from_month,
+ uint from_day)
+ {
+ if (from_year != 0 || from_month != 0)
+ *warn|= MYSQL_TIME_NOTE_TRUNCATED;
+ else
+ hour+= from_day * 24;
+ }
+ /*
+ The result is calculated effectively similar to:
+ TIMEDIFF(dt, CAST(CURRENT_DATE AS DATETIME))
+ If the difference does not fit to the supported TIME range, it's truncated.
+ */
+ void datetime_to_time_minus_current_date(THD *thd)
+ {
+ MYSQL_TIME current_date, tmp;
+ set_current_date(thd, &current_date);
+ calc_time_diff(this, &current_date, 1, &tmp, date_mode_t(0));
+ static_cast<MYSQL_TIME*>(this)[0]= tmp;
+ int warnings= 0;
+ (void) check_time_range(this, TIME_SECOND_PART_DIGITS, &warnings);
+ DBUG_ASSERT(is_valid_time());
}
-
/*
Convert a valid DATE or DATETIME to TIME.
Before this call, "this" must be a valid DATE or DATETIME value,
- e.g. returned from Item::get_date().
+ e.g. returned from Item::get_date(), str_to_xxx(), number_to_xxx().
After this call, "this" is a valid TIME value.
*/
- void valid_datetime_to_valid_time(const Options opt)
+ void valid_datetime_to_valid_time(THD *thd, int *warn, const Options opt)
{
DBUG_ASSERT(time_type == MYSQL_TIMESTAMP_DATE ||
time_type == MYSQL_TIMESTAMP_DATETIME);
/*
- Make sure that day and hour are valid, so the result hour value
+ We're dealing with a DATE or DATETIME returned from
+ str_to_xxx(), number_to_xxx() or unpack_time().
+ Do some asserts to make sure the result hour value
after mixing days to hours does not go out of the valid TIME range.
+ The maximum hour value after mixing days will be 31*24+23=767,
+ which is within the supported TIME range.
+ Thus no adjust_time_range_or_invalidate() is needed here.
*/
DBUG_ASSERT(day < 32);
DBUG_ASSERT(hour < 24);
- if (year == 0 && month == 0 &&
- opt.datetime_to_time_mode() ==
- DATETIME_TO_TIME_YYYYMMDD_000000DD_MIX_TO_HOURS)
+ if (opt.datetime_to_time_mode() == DATETIME_TO_TIME_MINUS_CURRENT_DATE)
+ {
+ datetime_to_time_minus_current_date(thd);
+ }
+ else
{
- /*
- The maximum hour value after mixing days will be 31*24+23=767,
- which is within the supported TIME range.
- Thus no adjust_time_range_or_invalidate() is needed here.
- */
- hour+= day * 24;
+ if (opt.datetime_to_time_mode() ==
+ DATETIME_TO_TIME_YYYYMMDD_000000DD_MIX_TO_HOURS)
+ datetime_to_time_YYYYMMDD_000000DD_mix_to_hours(warn, year, month, day);
+ year= month= day= 0;
+ time_type= MYSQL_TIMESTAMP_TIME;
}
- year= month= day= 0;
- time_type= MYSQL_TIMESTAMP_TIME;
DBUG_ASSERT(is_valid_time_slow());
}
/**
Convert valid DATE/DATETIME to valid TIME if needed.
This method is called after Item::get_date(),
+ str_to_xxx(), number_to_xxx().
which can return only valid TIME/DATE/DATETIME values.
Before this call, "this" is:
- either a valid TIME/DATE/DATETIME value
@@ -192,12 +1427,19 @@ private:
- either a valid TIME (within the supported TIME range),
- or MYSQL_TIMESTAMP_NONE
*/
- void valid_MYSQL_TIME_to_valid_value(const Options opt)
+ void valid_MYSQL_TIME_to_valid_value(THD *thd, int *warn, const Options opt)
{
switch (time_type) {
case MYSQL_TIMESTAMP_DATE:
case MYSQL_TIMESTAMP_DATETIME:
- valid_datetime_to_valid_time(opt);
+ if (opt.datetime_to_time_mode() ==
+ DATETIME_TO_TIME_YYYYMMDD_00000000_ONLY &&
+ (year || month || day))
+ make_from_out_of_range(warn);
+ else if (opt.datetime_to_time_mode() == DATETIME_TO_TIME_DISALLOW)
+ make_from_out_of_range(warn);
+ else
+ valid_datetime_to_valid_time(thd, warn, opt);
break;
case MYSQL_TIMESTAMP_NONE:
break;
@@ -209,14 +1451,147 @@ private:
break;
}
}
- void make_from_item(class Item *item, const Options opt);
+
+ /*
+ This method is called after number_to_xxx() and str_to_xxx(),
+ which can return DATE or DATETIME values. Convert to TIME if needed.
+ We trust that xxx_to_time() returns a valid TIME/DATE/DATETIME value,
+ so here we need to do only simple validation.
+ */
+ void xxx_to_time_result_to_valid_value(THD *thd, int *warn, const Options opt)
+ {
+ // str_to_xxx(), number_to_xxx() never return MYSQL_TIMESTAMP_ERROR
+ DBUG_ASSERT(time_type != MYSQL_TIMESTAMP_ERROR);
+ valid_MYSQL_TIME_to_valid_value(thd, warn, opt);
+ }
+ void adjust_time_range_or_invalidate(int *warn)
+ {
+ if (check_time_range(this, TIME_SECOND_PART_DIGITS, warn))
+ time_type= MYSQL_TIMESTAMP_NONE;
+ DBUG_ASSERT(is_valid_value_slow());
+ }
public:
+ void round_or_set_max(uint dec, int *warn, ulong nsec);
+private:
+ void round_or_set_max(uint dec, int *warn);
+
+ /*
+ All make_from_xxx() methods initialize *warn.
+ The old value gets lost.
+ */
+ void make_from_datetime_move_day_to_hour(int *warn, const MYSQL_TIME *from);
+ void make_from_datetime_with_days_diff(int *warn, const MYSQL_TIME *from,
+ long curdays);
+ void make_from_time(int *warn, const MYSQL_TIME *from);
+ void make_from_datetime(int *warn, const MYSQL_TIME *from, long curdays);
+ void make_from_item(THD *thd, int *warn, Item *item, const Options opt);
+public:
+ /*
+ All constructors that accept an "int *warn" parameter initialize *warn.
+ The old value gets lost.
+ */
+ Time(int *warn, bool neg, ulonglong hour, uint minute, const Sec6 &second);
Time() { time_type= MYSQL_TIMESTAMP_NONE; }
- Time(Item *item) { make_from_item(item, Options()); }
- Time(Item *item, const Options opt) { make_from_item(item, opt); }
- static sql_mode_t flags_for_get_date()
+ Time(Item *item)
+ :Time(current_thd, item)
+ { }
+ Time(THD *thd, Item *item, const Options opt)
+ {
+ int warn;
+ make_from_item(thd, &warn, item, opt);
+ }
+ Time(THD *thd, Item *item)
+ :Time(thd, item, Options(thd))
+ { }
+ Time(int *warn, const MYSQL_TIME *from, long curdays);
+ Time(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t len, CHARSET_INFO *cs,
+ const Options opt)
+ {
+ if (str_to_datetime_or_date_or_time(thd, status, str, len, cs, opt))
+ time_type= MYSQL_TIMESTAMP_NONE;
+ // The below call will optionally add notes to already collected warnings:
+ else
+ xxx_to_time_result_to_valid_value(thd, &status->warnings, opt);
+ }
+
+protected:
+ Time(THD *thd, int *warn, const Sec6 &nr, const Options opt)
+ {
+ if (nr.to_datetime_or_time(this, warn, TIME_INVALID_DATES))
+ time_type= MYSQL_TIMESTAMP_NONE;
+ xxx_to_time_result_to_valid_value(thd, warn, opt);
+ }
+ Time(THD *thd, int *warn, const Sec9 &nr, const Options &opt)
+ :Time(thd, warn, static_cast<Sec6>(nr), opt)
+ {
+ if (is_valid_time() && time_round_mode_t(opt) == TIME_FRAC_ROUND)
+ round_or_set_max(6, warn, nr.nsec());
+ }
+
+public:
+ Time(THD *thd, int *warn, const Longlong_hybrid &nr, const Options &opt)
+ :Time(thd, warn, Sec6(nr), opt)
+ { }
+ Time(THD *thd, int *warn, double nr, const Options &opt)
+ :Time(thd, warn, Sec9(nr), opt)
+ { }
+ Time(THD *thd, int *warn, const my_decimal *d, const Options &opt)
+ :Time(thd, warn, Sec9(d), opt)
+ { }
+
+ Time(THD *thd, Item *item, const Options opt, uint dec)
+ :Time(thd, item, opt)
+ {
+ round(dec, time_round_mode_t(opt));
+ }
+ Time(int *warn, const MYSQL_TIME *from, long curdays,
+ const Time::Options &opt, uint dec)
+ :Time(warn, from, curdays)
+ {
+ round(dec, time_round_mode_t(opt), warn);
+ }
+ Time(int *warn, bool neg, ulonglong hour, uint minute, const Sec9 &second,
+ time_round_mode_t mode, uint dec)
+ :Time(warn, neg, hour, minute, second)
+ {
+ DBUG_ASSERT(is_valid_time());
+ if ((ulonglong) mode == (ulonglong) TIME_FRAC_ROUND)
+ round_or_set_max(6, warn, second.nsec());
+ round(dec, mode, warn);
+ }
+ Time(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t len, CHARSET_INFO *cs,
+ const Options &opt, uint dec)
+ :Time(thd, status, str, len, cs, opt)
+ {
+ round(dec, time_round_mode_t(opt), &status->warnings);
+ }
+ Time(THD *thd, int *warn, const Longlong_hybrid &nr,
+ const Options &opt, uint dec)
+ :Time(thd, warn, nr, opt)
+ {
+ /*
+ Decimal digit truncation is needed here in case if nr was out
+ of the supported TIME range, so "this" was set to '838:59:59.999999'.
+ We always do truncation (not rounding) here, independently from "opt".
+ */
+ trunc(dec);
+ }
+ Time(THD *thd, int *warn, double nr, const Options &opt, uint dec)
+ :Time(thd, warn, nr, opt)
+ {
+ round(dec, time_round_mode_t(opt), warn);
+ }
+ Time(THD *thd, int *warn, const my_decimal *d, const Options &opt, uint dec)
+ :Time(thd, warn, d, opt)
+ {
+ round(dec, time_round_mode_t(opt), warn);
+ }
+
+ static date_conv_mode_t default_flags_for_get_date()
{ return TIME_TIME_ONLY | TIME_INVALID_DATES; }
- static sql_mode_t comparison_flags_for_get_date()
+ static date_conv_mode_t comparison_flags_for_get_date()
{ return TIME_TIME_ONLY | TIME_INVALID_DATES | TIME_FUZZY_DATES; }
bool is_valid_time() const
{
@@ -243,8 +1618,8 @@ public:
{
DBUG_ASSERT(is_valid_time_slow());
DBUG_ASSERT(other->is_valid_time_slow());
- longlong p0= pack_time(this);
- longlong p1= pack_time(other);
+ longlong p0= to_packed();
+ longlong p1= other->to_packed();
if (p0 < p1)
return -1;
if (p0 > p1)
@@ -260,6 +1635,74 @@ public:
{
return neg ? -to_seconds_abs() : to_seconds_abs();
}
+ longlong to_longlong() const
+ {
+ if (!is_valid_time())
+ return 0;
+ ulonglong v= TIME_to_ulonglong_time(this);
+ return neg ? -(longlong) v : (longlong) v;
+ }
+ double to_double() const
+ {
+ return !is_valid_time() ? 0 :
+ Temporal::to_double(neg, TIME_to_ulonglong_time(this), second_part);
+ }
+ String *to_string(String *str, uint dec) const
+ {
+ if (!is_valid_time())
+ return NULL;
+ str->set_charset(&my_charset_numeric);
+ if (!str->alloc(MAX_DATE_STRING_REP_LENGTH))
+ str->length(my_time_to_str(this, const_cast<char*>(str->ptr()), dec));
+ return str;
+ }
+ my_decimal *to_decimal(my_decimal *to)
+ {
+ return is_valid_time() ? Temporal::to_decimal(to) : bad_to_decimal(to);
+ }
+ longlong to_packed() const
+ {
+ return is_valid_time() ? Temporal::to_packed() : 0;
+ }
+ long fraction_remainder(uint dec) const
+ {
+ DBUG_ASSERT(is_valid_time());
+ return Temporal::fraction_remainder(dec);
+ }
+
+ Time &trunc(uint dec)
+ {
+ if (is_valid_time())
+ my_time_trunc(this, dec);
+ DBUG_ASSERT(is_valid_value_slow());
+ return *this;
+ }
+ Time &round(uint dec, int *warn)
+ {
+ if (is_valid_time())
+ round_or_set_max(dec, warn);
+ DBUG_ASSERT(is_valid_value_slow());
+ return *this;
+ }
+ Time &round(uint dec, time_round_mode_t mode, int *warn)
+ {
+ switch (mode.mode()) {
+ case time_round_mode_t::FRAC_NONE:
+ DBUG_ASSERT(fraction_remainder(dec) == 0);
+ return trunc(dec);
+ case time_round_mode_t::FRAC_TRUNCATE:
+ return trunc(dec);
+ case time_round_mode_t::FRAC_ROUND:
+ return round(dec, warn);
+ }
+ return *this;
+ }
+ Time &round(uint dec, time_round_mode_t mode)
+ {
+ int warn= 0;
+ return round(dec, mode, &warn);
+ }
+
};
@@ -286,14 +1729,79 @@ public:
it is accessed externally only in the valid state.
*/
-class Temporal_with_date: protected MYSQL_TIME
+class Temporal_with_date: public Temporal
{
+public:
+ class Options: public Temporal::Options
+ {
+ public:
+ Options(date_conv_mode_t fuzzydate, time_round_mode_t mode):
+ Temporal::Options(fuzzydate, mode)
+ {}
+ explicit Options(const Temporal::Options &opt)
+ :Temporal::Options(opt)
+ { }
+ explicit Options(date_mode_t mode)
+ :Temporal::Options(mode)
+ { }
+ };
protected:
- void make_from_item(THD *thd, Item *item, sql_mode_t flags);
- Temporal_with_date(THD *thd, Item *item, sql_mode_t flags)
+ void check_date_or_invalidate(int *warn, date_conv_mode_t flags);
+ void make_from_item(THD *thd, Item *item, date_mode_t flags);
+
+ ulong daynr() const
+ {
+ return (ulong) ::calc_daynr((uint) year, (uint) month, (uint) day);
+ }
+ ulong dayofyear() const
+ {
+ return (ulong) (daynr() - ::calc_daynr(year, 1, 1) + 1);
+ }
+ uint quarter() const
+ {
+ return (month + 2) / 3;
+ }
+ uint week(uint week_behaviour) const
+ {
+ uint year;
+ return calc_week(this, week_behaviour, &year);
+ }
+ uint yearweek(uint week_behaviour) const
+ {
+ uint year;
+ uint week= calc_week(this, week_behaviour, &year);
+ return week + year * 100;
+ }
+public:
+ Temporal_with_date()
+ {
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+ Temporal_with_date(THD *thd, Item *item, date_mode_t fuzzydate)
+ {
+ make_from_item(thd, item, fuzzydate);
+ }
+ Temporal_with_date(int *warn, const Sec6 &nr, date_mode_t flags)
+ {
+ DBUG_ASSERT(bool(flags & TIME_TIME_ONLY) == false);
+ if (nr.to_datetime_or_date(this, warn, date_conv_mode_t(flags)))
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+ Temporal_with_date(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t len, CHARSET_INFO *cs,
+ date_mode_t flags)
+ {
+ DBUG_ASSERT(bool(flags & TIME_TIME_ONLY) == false);
+ if (str_to_datetime_or_date(thd, status, str, len, cs, flags))
+ time_type= MYSQL_TIMESTAMP_NONE;
+ }
+public:
+ bool check_date_with_warn(THD *thd, date_conv_mode_t flags)
{
- make_from_item(thd, item, flags);
+ return ::check_date_with_warn(thd, this, flags, MYSQL_TIMESTAMP_ERROR);
}
+ static date_conv_mode_t comparison_flags_for_get_date()
+ { return TIME_INVALID_DATES | TIME_FUZZY_DATES; }
};
@@ -318,13 +1826,42 @@ class Date: public Temporal_with_date
return !check_datetime_range(this);
}
public:
- Date(THD *thd, Item *item, sql_mode_t flags)
- :Temporal_with_date(thd, item, flags)
+ class Options: public Temporal_with_date::Options
+ {
+ public:
+ explicit Options(date_conv_mode_t fuzzydate)
+ :Temporal_with_date::Options(fuzzydate, TIME_FRAC_TRUNCATE)
+ { }
+ Options(THD *thd, time_round_mode_t mode)
+ :Temporal_with_date::Options(sql_mode_for_dates(thd), mode)
+ { }
+ explicit Options(THD *thd)
+ :Temporal_with_date::Options(sql_mode_for_dates(thd), TIME_FRAC_TRUNCATE)
+ { }
+ explicit Options(date_mode_t fuzzydate)
+ :Temporal_with_date::Options(fuzzydate)
+ { }
+ };
+public:
+ Date(Item *item, date_mode_t fuzzydate)
+ :Date(current_thd, item, fuzzydate)
+ { }
+ Date(THD *thd, Item *item, date_mode_t fuzzydate)
+ :Temporal_with_date(thd, item, fuzzydate)
{
if (time_type == MYSQL_TIMESTAMP_DATETIME)
datetime_to_date(this);
DBUG_ASSERT(is_valid_value_slow());
}
+ Date(THD *thd, Item *item, date_conv_mode_t fuzzydate)
+ :Date(thd, item, Options(fuzzydate))
+ { }
+ Date(THD *thd, Item *item)
+ :Temporal_with_date(Date(thd, item, Options(thd, TIME_FRAC_TRUNCATE)))
+ { }
+ Date(Item *item)
+ :Temporal_with_date(Date(current_thd, item))
+ { }
Date(const Temporal_with_date *d)
:Temporal_with_date(*d)
{
@@ -341,6 +1878,64 @@ public:
DBUG_ASSERT(is_valid_date_slow());
return this;
}
+ bool copy_to_mysql_time(MYSQL_TIME *ltime) const
+ {
+ if (time_type == MYSQL_TIMESTAMP_NONE)
+ {
+ ltime->time_type= MYSQL_TIMESTAMP_NONE;
+ return true;
+ }
+ DBUG_ASSERT(is_valid_date_slow());
+ *ltime= *this;
+ return false;
+ }
+ ulong daynr() const
+ {
+ DBUG_ASSERT(is_valid_date_slow());
+ return Temporal_with_date::daynr();
+ }
+ ulong dayofyear() const
+ {
+ DBUG_ASSERT(is_valid_date_slow());
+ return Temporal_with_date::dayofyear();
+ }
+ uint quarter() const
+ {
+ DBUG_ASSERT(is_valid_date_slow());
+ return Temporal_with_date::quarter();
+ }
+ uint week(uint week_behaviour) const
+ {
+ DBUG_ASSERT(is_valid_date_slow());
+ return Temporal_with_date::week(week_behaviour);
+ }
+ uint yearweek(uint week_behaviour) const
+ {
+ DBUG_ASSERT(is_valid_date_slow());
+ return Temporal_with_date::yearweek(week_behaviour);
+ }
+
+ longlong to_longlong() const
+ {
+ return is_valid_date() ? (longlong) TIME_to_ulonglong_date(this) : 0LL;
+ }
+ double to_double() const
+ {
+ return (double) to_longlong();
+ }
+ String *to_string(String *str) const
+ {
+ if (!is_valid_date())
+ return NULL;
+ str->set_charset(&my_charset_numeric);
+ if (!str->alloc(MAX_DATE_STRING_REP_LENGTH))
+ str->length(my_date_to_str(this, const_cast<char*>(str->ptr())));
+ return str;
+ }
+ my_decimal *to_decimal(my_decimal *to)
+ {
+ return is_valid_date() ? Temporal::to_decimal(to) : bad_to_decimal(to);
+ }
};
@@ -365,14 +1960,148 @@ class Datetime: public Temporal_with_date
DBUG_ASSERT(time_type == MYSQL_TIMESTAMP_DATETIME);
return !check_datetime_range(this);
}
-public:
- Datetime(THD *thd, Item *item, sql_mode_t flags)
- :Temporal_with_date(thd, item, flags)
+ bool add_nanoseconds_or_invalidate(THD *thd, int *warn, ulong nsec)
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ bool rc= Temporal::datetime_add_nanoseconds_or_invalidate(thd, warn, nsec);
+ DBUG_ASSERT(is_valid_value_slow());
+ return rc;
+ }
+ void date_to_datetime_if_needed()
{
if (time_type == MYSQL_TIMESTAMP_DATE)
date_to_datetime(this);
+ }
+ void make_from_time(THD *thd, int *warn, const MYSQL_TIME *from,
+ date_conv_mode_t flags);
+ void make_from_datetime(THD *thd, int *warn, const MYSQL_TIME *from,
+ date_conv_mode_t flags);
+ bool round_or_invalidate(THD *thd, uint dec, int *warn);
+ bool round_or_invalidate(THD *thd, uint dec, int *warn, ulong nsec)
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ bool rc= Temporal::datetime_round_or_invalidate(thd, dec, warn, nsec);
DBUG_ASSERT(is_valid_value_slow());
+ return rc;
+ }
+public:
+
+ class Options: public Temporal_with_date::Options
+ {
+ public:
+ Options(date_conv_mode_t fuzzydate, time_round_mode_t nanosecond_rounding)
+ :Temporal_with_date::Options(fuzzydate, nanosecond_rounding)
+ { }
+ Options(THD *thd)
+ :Temporal_with_date::Options(sql_mode_for_dates(thd), default_round_mode(thd))
+ { }
+ Options(THD *thd, time_round_mode_t rounding_mode)
+ :Temporal_with_date::Options(sql_mode_for_dates(thd), rounding_mode)
+ { }
+ Options(date_conv_mode_t fuzzydate, THD *thd)
+ :Temporal_with_date::Options(fuzzydate, default_round_mode(thd))
+ { }
+ };
+
+ class Options_cmp: public Options
+ {
+ public:
+ Options_cmp(THD *thd)
+ :Options(comparison_flags_for_get_date(), thd)
+ { }
+ };
+
+ static Datetime zero()
+ {
+ int warn;
+ static Longlong_hybrid nr(0, false);
+ return Datetime(&warn, nr, date_mode_t(0));
+ }
+public:
+ Datetime() // NULL value
+ :Temporal_with_date()
+ { }
+ Datetime(THD *thd, Item *item, date_mode_t fuzzydate)
+ :Temporal_with_date(thd, item, fuzzydate)
+ {
+ date_to_datetime_if_needed();
+ DBUG_ASSERT(is_valid_value_slow());
+ }
+ Datetime(THD *thd, Item *item)
+ :Temporal_with_date(Datetime(thd, item, Options(thd)))
+ { }
+ Datetime(Item *item)
+ :Datetime(current_thd, item)
+ { }
+
+ Datetime(THD *thd, int *warn, const MYSQL_TIME *from, date_conv_mode_t flags);
+ Datetime(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t len, CHARSET_INFO *cs,
+ const date_mode_t fuzzydate)
+ :Temporal_with_date(thd, status, str, len, cs, fuzzydate)
+ {
+ date_to_datetime_if_needed();
+ DBUG_ASSERT(is_valid_value_slow());
+ }
+
+protected:
+ Datetime(int *warn, const Sec6 &nr, date_mode_t flags)
+ :Temporal_with_date(warn, nr, flags)
+ {
+ date_to_datetime_if_needed();
+ DBUG_ASSERT(is_valid_value_slow());
+ }
+ Datetime(THD *thd, int *warn, const Sec9 &nr, date_mode_t fuzzydate)
+ :Datetime(warn, static_cast<const Sec6>(nr), fuzzydate)
+ {
+ if (is_valid_datetime() &&
+ time_round_mode_t(fuzzydate) == TIME_FRAC_ROUND)
+ round_or_invalidate(thd, 6, warn, nr.nsec());
+ DBUG_ASSERT(is_valid_value_slow());
+ }
+
+public:
+ Datetime(int *warn, const Longlong_hybrid &nr, date_mode_t mode)
+ :Datetime(warn, Sec6(nr), mode)
+ { }
+ Datetime(THD *thd, int *warn, double nr, date_mode_t fuzzydate)
+ :Datetime(thd, warn, Sec9(nr), fuzzydate)
+ { }
+ Datetime(THD *thd, int *warn, const my_decimal *d, date_mode_t fuzzydate)
+ :Datetime(thd, warn, Sec9(d), fuzzydate)
+ { }
+ Datetime(THD *thd, const timeval &tv);
+
+ Datetime(THD *thd, Item *item, date_mode_t fuzzydate, uint dec)
+ :Datetime(thd, item, fuzzydate)
+ {
+ int warn= 0;
+ round(thd, dec, time_round_mode_t(fuzzydate), &warn);
+ }
+ Datetime(THD *thd, MYSQL_TIME_STATUS *status,
+ const char *str, size_t len, CHARSET_INFO *cs,
+ date_mode_t fuzzydate, uint dec)
+ :Datetime(thd, status, str, len, cs, fuzzydate)
+ {
+ round(thd, dec, time_round_mode_t(fuzzydate), &status->warnings);
+ }
+ Datetime(THD *thd, int *warn, double nr, date_mode_t fuzzydate, uint dec)
+ :Datetime(thd, warn, nr, fuzzydate)
+ {
+ round(thd, dec, time_round_mode_t(fuzzydate), warn);
+ }
+ Datetime(THD *thd, int *warn, const my_decimal *d, date_mode_t fuzzydate, uint dec)
+ :Datetime(thd, warn, d, fuzzydate)
+ {
+ round(thd, dec, time_round_mode_t(fuzzydate), warn);
}
+ Datetime(THD *thd, int *warn, const MYSQL_TIME *from,
+ date_mode_t fuzzydate, uint dec)
+ :Datetime(thd, warn, from, date_conv_mode_t(fuzzydate) & ~TIME_TIME_ONLY)
+ {
+ round(thd, dec, time_round_mode_t(fuzzydate), warn);
+ }
+
bool is_valid_datetime() const
{
/*
@@ -382,11 +2111,63 @@ public:
DBUG_ASSERT(is_valid_value_slow());
return time_type == MYSQL_TIMESTAMP_DATETIME;
}
+ bool check_date(date_conv_mode_t flags, int *warnings) const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return ::check_date(this, (year || month || day),
+ ulonglong(flags & TIME_MODE_FOR_XXX_TO_DATE),
+ warnings);
+ }
+ bool check_date(date_conv_mode_t flags) const
+ {
+ int dummy; /* unused */
+ return check_date(flags, &dummy);
+ }
bool hhmmssff_is_zero() const
{
DBUG_ASSERT(is_valid_datetime_slow());
return hour == 0 && minute == 0 && second == 0 && second_part == 0;
}
+ ulong daynr() const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return Temporal_with_date::daynr();
+ }
+ ulong dayofyear() const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return Temporal_with_date::dayofyear();
+ }
+ uint quarter() const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return Temporal_with_date::quarter();
+ }
+ uint week(uint week_behaviour) const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return Temporal_with_date::week(week_behaviour);
+ }
+ uint yearweek(uint week_behaviour) const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return Temporal_with_date::yearweek(week_behaviour);
+ }
+
+ longlong hhmmss_to_seconds_abs() const
+ {
+ DBUG_ASSERT(is_valid_datetime_slow());
+ return hour * 3600L + minute * 60 + second;
+ }
+ longlong hhmmss_to_seconds() const
+ {
+ return neg ? -hhmmss_to_seconds_abs() : hhmmss_to_seconds_abs();
+ }
+ longlong to_seconds() const
+ {
+ return hhmmss_to_seconds() + (longlong) daynr() * 24L * 3600L;
+ }
+
const MYSQL_TIME *get_mysql_time() const
{
DBUG_ASSERT(is_valid_datetime_slow());
@@ -418,8 +2199,304 @@ public:
ltime->time_type= type;
return false;
}
+ longlong to_longlong() const
+ {
+ return is_valid_datetime() ?
+ (longlong) TIME_to_ulonglong_datetime(this) : 0LL;
+ }
+ double to_double() const
+ {
+ return !is_valid_datetime() ? 0 :
+ Temporal::to_double(neg, TIME_to_ulonglong_datetime(this), second_part);
+ }
+ String *to_string(String *str, uint dec) const
+ {
+ if (!is_valid_datetime())
+ return NULL;
+ str->set_charset(&my_charset_numeric);
+ if (!str->alloc(MAX_DATE_STRING_REP_LENGTH))
+ str->length(my_datetime_to_str(this, const_cast<char*>(str->ptr()), dec));
+ return str;
+ }
+ my_decimal *to_decimal(my_decimal *to)
+ {
+ return is_valid_datetime() ? Temporal::to_decimal(to) : bad_to_decimal(to);
+ }
+ longlong to_packed() const
+ {
+ return is_valid_datetime() ? Temporal::to_packed() : 0;
+ }
+ long fraction_remainder(uint dec) const
+ {
+ DBUG_ASSERT(is_valid_datetime());
+ return Temporal::fraction_remainder(dec);
+ }
+
+ Datetime &trunc(uint dec)
+ {
+ if (is_valid_datetime())
+ my_time_trunc(this, dec);
+ DBUG_ASSERT(is_valid_value_slow());
+ return *this;
+ }
+ Datetime &round(THD *thd, uint dec, int *warn)
+ {
+ if (is_valid_datetime())
+ round_or_invalidate(thd, dec, warn);
+ DBUG_ASSERT(is_valid_value_slow());
+ return *this;
+ }
+ Datetime &round(THD *thd, uint dec, time_round_mode_t mode, int *warn)
+ {
+ switch (mode.mode()) {
+ case time_round_mode_t::FRAC_NONE:
+ DBUG_ASSERT(fraction_remainder(dec) == 0);
+ return trunc(dec);
+ case time_round_mode_t::FRAC_TRUNCATE:
+ return trunc(dec);
+ case time_round_mode_t::FRAC_ROUND:
+ return round(thd, dec, warn);
+ }
+ return *this;
+ }
+ Datetime &round(THD *thd, uint dec, time_round_mode_t mode)
+ {
+ int warn= 0;
+ return round(thd, dec, mode, &warn);
+ }
+
+};
+
+
+/*
+ Datetime to be created from an Item who is known to be of a temporal
+ data type. For temporal data types we don't need nanosecond rounding
+ or truncation, as their precision is limited.
+*/
+class Datetime_from_temporal: public Datetime
+{
+public:
+ // The constructor DBUG_ASSERTs on a proper Item data type.
+ Datetime_from_temporal(THD *thd, Item *temporal, date_conv_mode_t flags);
+};
+
+
+/*
+ Datetime to be created from an Item who is known not to have digits outside
+ of the specified scale. So it's not important which rounding method to use.
+ TRUNCATE should work.
+ Typically, Item is of a temporal data type, but this is not strictly required.
+*/
+class Datetime_truncation_not_needed: public Datetime
+{
+public:
+ Datetime_truncation_not_needed(THD *thd, Item *item, date_conv_mode_t mode);
+ Datetime_truncation_not_needed(THD *thd, Item *item, date_mode_t mode)
+ :Datetime_truncation_not_needed(thd, item, date_conv_mode_t(mode))
+ { }
+};
+
+
+class Timestamp: protected Timeval
+{
+ static uint binary_length_to_precision(uint length);
+protected:
+ void round_or_set_max(uint dec, int *warn);
+ bool add_nanoseconds_usec(uint nanoseconds)
+ {
+ DBUG_ASSERT(nanoseconds <= 1000000000);
+ if (nanoseconds < 500)
+ return false;
+ tv_usec+= (nanoseconds + 500) / 1000;
+ if (tv_usec < 1000000)
+ return false;
+ tv_usec%= 1000000;
+ return true;
+ }
+public:
+ static date_conv_mode_t sql_mode_for_timestamp(THD *thd);
+ static time_round_mode_t default_round_mode(THD *thd);
+ class DatetimeOptions: public date_mode_t
+ {
+ public:
+ DatetimeOptions(date_conv_mode_t fuzzydate, time_round_mode_t round_mode)
+ :date_mode_t(fuzzydate | round_mode)
+ { }
+ DatetimeOptions(THD *thd)
+ :DatetimeOptions(sql_mode_for_timestamp(thd), default_round_mode(thd))
+ { }
+ };
+public:
+ Timestamp(my_time_t timestamp, ulong sec_part)
+ :Timeval(timestamp, sec_part)
+ { }
+ explicit Timestamp(const timeval &tv)
+ :Timeval(tv)
+ { }
+ explicit Timestamp(const Native &native);
+ Timestamp(THD *thd, const MYSQL_TIME *ltime, uint *error_code);
+ const struct timeval &tv() const { return *this; }
+ int cmp(const Timestamp &other) const
+ {
+ return tv_sec < other.tv_sec ? -1 :
+ tv_sec > other.tv_sec ? +1 :
+ tv_usec < other.tv_usec ? -1 :
+ tv_usec > other.tv_usec ? +1 : 0;
+ }
+ bool to_TIME(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
+ bool to_native(Native *to, uint decimals) const;
+ long fraction_remainder(uint dec) const
+ {
+ return my_time_fraction_remainder(tv_usec, dec);
+ }
+ Timestamp &trunc(uint dec)
+ {
+ my_timeval_trunc(this, dec);
+ return *this;
+ }
+ Timestamp &round(uint dec, int *warn)
+ {
+ round_or_set_max(dec, warn);
+ return *this;
+ }
+ Timestamp &round(uint dec, time_round_mode_t mode, int *warn)
+ {
+ switch (mode.mode()) {
+ case time_round_mode_t::FRAC_NONE:
+ DBUG_ASSERT(fraction_remainder(dec) == 0);
+ return trunc(dec);
+ case time_round_mode_t::FRAC_TRUNCATE:
+ return trunc(dec);
+ case time_round_mode_t::FRAC_ROUND:
+ return round(dec, warn);
+ }
+ return *this;
+ }
+ Timestamp &round(uint dec, time_round_mode_t mode)
+ {
+ int warn= 0;
+ return round(dec, mode, &warn);
+ }
+};
+
+
+/**
+ A helper class to store MariaDB TIMESTAMP values, which can be:
+ - real TIMESTAMP (seconds and microseconds since epoch), or
+ - zero datetime '0000-00-00 00:00:00.000000'
+*/
+class Timestamp_or_zero_datetime: public Timestamp
+{
+ bool m_is_zero_datetime;
+public:
+ Timestamp_or_zero_datetime()
+ :Timestamp(0,0), m_is_zero_datetime(true)
+ { }
+ Timestamp_or_zero_datetime(const Native &native)
+ :Timestamp(native.length() ? Timestamp(native) : Timestamp(0,0)),
+ m_is_zero_datetime(native.length() == 0)
+ { }
+ Timestamp_or_zero_datetime(const Timestamp &tm, bool is_zero_datetime)
+ :Timestamp(tm), m_is_zero_datetime(is_zero_datetime)
+ { }
+ Timestamp_or_zero_datetime(THD *thd, const MYSQL_TIME *ltime, uint *err_code);
+ Datetime to_datetime(THD *thd) const
+ {
+ return Datetime(thd, *this);
+ }
+ bool is_zero_datetime() const { return m_is_zero_datetime; }
+ const struct timeval &tv() const
+ {
+ DBUG_ASSERT(!is_zero_datetime());
+ return Timestamp::tv();
+ }
+ void trunc(uint decimals)
+ {
+ if (!is_zero_datetime())
+ Timestamp::trunc(decimals);
+ }
+ int cmp(const Timestamp_or_zero_datetime &other) const
+ {
+ if (is_zero_datetime())
+ return other.is_zero_datetime() ? 0 : -1;
+ if (other.is_zero_datetime())
+ return 1;
+ return Timestamp::cmp(other);
+ }
+ bool to_TIME(THD *thd, MYSQL_TIME *to, date_mode_t fuzzydate) const;
+ /*
+ Convert to native format:
+ - Real timestamps are encoded in the same way how Field_timestamp2 stores
+ values (big endian seconds followed by big endian microseconds)
+ - Zero datetime '0000-00-00 00:00:00.000000' is encoded as empty string.
+ Two native values are binary comparable.
+ */
+ bool to_native(Native *to, uint decimals) const;
+};
+
+
+/**
+ A helper class to store non-null MariaDB TIMESTAMP values in
+ the native binary encoded representation.
+*/
+class Timestamp_or_zero_datetime_native:
+ public NativeBuffer<STRING_BUFFER_TIMESTAMP_BINARY_SIZE>
+{
+public:
+ Timestamp_or_zero_datetime_native() { }
+ Timestamp_or_zero_datetime_native(const Timestamp_or_zero_datetime &ts,
+ uint decimals)
+ {
+ if (ts.to_native(this, decimals))
+ length(0); // safety
+ }
+ int save_in_field(Field *field, uint decimals) const;
+ Datetime to_datetime(THD *thd) const
+ {
+ return is_zero_datetime() ?
+ Datetime() :
+ Datetime(thd, Timestamp_or_zero_datetime(*this).tv());
+ }
+ bool is_zero_datetime() const
+ {
+ return length() == 0;
+ }
+};
+
+
+/**
+ A helper class to store nullable MariaDB TIMESTAMP values in
+ the native binary encoded representation.
+*/
+class Timestamp_or_zero_datetime_native_null: public Timestamp_or_zero_datetime_native,
+ public Null_flag
+{
+public:
+ // With optional data type conversion
+ Timestamp_or_zero_datetime_native_null(THD *thd, Item *item, bool conv);
+ // Without data type conversion: item is known to be of the TIMESTAMP type
+ Timestamp_or_zero_datetime_native_null(THD *thd, Item *item)
+ :Timestamp_or_zero_datetime_native_null(thd, item, false)
+ { }
+ Datetime to_datetime(THD *thd) const
+ {
+ return is_null() ? Datetime() :
+ Timestamp_or_zero_datetime_native::to_datetime(thd);
+ }
+ void to_TIME(THD *thd, MYSQL_TIME *to)
+ {
+ DBUG_ASSERT(!is_null());
+ Datetime::Options opt(TIME_CONV_NONE, TIME_FRAC_NONE);
+ Timestamp_or_zero_datetime(*this).to_TIME(thd, to, opt);
+ }
+ bool is_zero_datetime() const
+ {
+ DBUG_ASSERT(!is_null());
+ return Timestamp_or_zero_datetime_native::is_zero_datetime();
+ }
};
+
/*
Flags for collation aggregation modes, used in TDCollation::agg():
@@ -445,7 +2522,6 @@ public:
#define MY_COLL_CMP_CONV (MY_COLL_ALLOW_CONV | MY_COLL_DISALLOW_NONE)
-#define my_charset_numeric my_charset_latin1
#define MY_REPERTOIRE_NUMERIC MY_REPERTOIRE_ASCII
@@ -834,6 +2910,14 @@ public:
};
+class Type_cmp_attributes
+{
+public:
+ virtual ~Type_cmp_attributes() { }
+ virtual CHARSET_INFO *compare_collation() const= 0;
+};
+
+
class Type_cast_attributes
{
CHARSET_INFO *m_charset;
@@ -886,28 +2970,67 @@ public:
};
-class Record_addr
+class Bit_addr
{
-public:
- uchar *ptr; // Position to field in record
/**
- Byte where the @c NULL bit is stored inside a record. If this Field is a
- @c NOT @c NULL field, this member is @c NULL.
+ Byte where the bit is stored inside a record.
+ If the corresponding Field is a NOT NULL field, this member is NULL.
+ */
+ uchar *m_ptr;
+ /**
+ Offset of the bit inside m_ptr[0], in the range 0..7.
*/
- uchar *null_ptr;
- uchar null_bit; // Bit used to test null bit
+ uchar m_offs;
+public:
+ Bit_addr()
+ :m_ptr(NULL),
+ m_offs(0)
+ { }
+ Bit_addr(uchar *ptr, uchar offs)
+ :m_ptr(ptr), m_offs(offs)
+ {
+ DBUG_ASSERT(ptr || offs == 0);
+ DBUG_ASSERT(offs < 8);
+ }
+ Bit_addr(bool maybe_null)
+ :m_ptr(maybe_null ? (uchar *) "" : NULL),
+ m_offs(0)
+ { }
+ uchar *ptr() const { return m_ptr; }
+ uchar offs() const { return m_offs; }
+ uchar bit() const { return m_ptr ? ((uchar) 1) << m_offs : 0; }
+ void inc()
+ {
+ DBUG_ASSERT(m_ptr);
+ m_ptr+= (m_offs == 7);
+ m_offs= (m_offs + 1) & 7;
+ }
+};
+
+
+class Record_addr
+{
+ uchar *m_ptr; // Position of the field in the record
+ Bit_addr m_null; // Position and offset of the null bit
+public:
Record_addr(uchar *ptr_arg,
uchar *null_ptr_arg,
uchar null_bit_arg)
- :ptr(ptr_arg),
- null_ptr(null_ptr_arg),
- null_bit(null_bit_arg)
+ :m_ptr(ptr_arg),
+ m_null(null_ptr_arg, null_bit_arg)
+ { }
+ Record_addr(uchar *ptr, const Bit_addr &null)
+ :m_ptr(ptr),
+ m_null(null)
{ }
Record_addr(bool maybe_null)
- :ptr(NULL),
- null_ptr(maybe_null ? (uchar*) "" : 0),
- null_bit(0)
+ :m_ptr(NULL),
+ m_null(maybe_null)
{ }
+ uchar *ptr() const { return m_ptr; }
+ const Bit_addr &null() const { return m_null; }
+ uchar *null_ptr() const { return m_null.ptr(); }
+ uchar null_bit() const { return m_null.bit(); }
};
@@ -982,6 +3105,9 @@ public:
class Type_handler
{
protected:
+ static const Name m_version_default;
+ static const Name m_version_mysql56;
+ static const Name m_version_mariadb53;
String *print_item_value_csstr(THD *thd, Item *item, String *str) const;
String *print_item_value_temporal(THD *thd, Item *item, String *str,
const Name &type_name, String *buf) const;
@@ -1003,6 +3129,7 @@ protected:
bool Item_send_double(Item *item, Protocol *protocol, st_value *buf) const;
bool Item_send_time(Item *item, Protocol *protocol, st_value *buf) const;
bool Item_send_date(Item *item, Protocol *protocol, st_value *buf) const;
+ bool Item_send_timestamp(Item *item, Protocol *protocol, st_value *buf) const;
bool Item_send_datetime(Item *item, Protocol *protocol, st_value *buf) const;
bool Column_definition_prepare_stage2_legacy(Column_definition *c,
enum_field_types type)
@@ -1014,6 +3141,7 @@ protected:
enum_field_types type)
const;
public:
+ static const Type_handler *odbc_literal_type_handler(const LEX_CSTRING *str);
static const Type_handler *blob_type_handler(uint max_octet_length);
static const Type_handler *string_type_handler(uint max_octet_length);
static const Type_handler *bit_and_int_mixture_handler(uint max_char_len);
@@ -1047,8 +3175,36 @@ public:
const Type_handler *h2);
virtual const Name name() const= 0;
+ virtual const Name version() const { return m_version_default; }
virtual enum_field_types field_type() const= 0;
virtual enum_field_types real_field_type() const { return field_type(); }
+ /**
+ Type code which is used for merging of traditional data types for result
+ (for UNION and for hybrid functions such as COALESCE).
+ Mapping can be done both ways: old->new, new->old, depending
+ on the particular data type implementation:
+ - type_handler_var_string (MySQL-4.1 old VARCHAR) is converted to
+ new VARCHAR before merging.
+ field_type_merge_rules[][] returns new VARCHAR.
+ - type_handler_newdate is converted to old DATE before merging.
+ field_type_merge_rules[][] returns NEWDATE.
+ - Temporal type_handler_xxx2 (new MySQL-5.6 types) are converted to
+ corresponding old type codes before merging (e.g. TIME2->TIME).
+ field_type_merge_rules[][] returns old type codes (e.g. TIME).
+ Then old types codes are supposed to convert to new type codes somehow,
+ but they do not. So UNION and COALESCE create old columns.
+ This is a bug and should be fixed eventually.
+ */
+ virtual enum_field_types traditional_merge_field_type() const
+ {
+ DBUG_ASSERT(is_traditional_type());
+ return field_type();
+ }
+ virtual enum_field_types type_code_for_protocol() const
+ {
+ return field_type();
+ }
+ virtual protocol_send_type_t protocol_send_type() const= 0;
virtual Item_result result_type() const= 0;
virtual Item_result cmp_type() const= 0;
virtual enum_mysql_timestamp_type mysql_timestamp_type() const
@@ -1059,6 +3215,25 @@ public:
{
return false;
}
+ virtual bool is_order_clause_position_type() const
+ {
+ return false;
+ }
+ virtual bool is_limit_clause_valid_type() const
+ {
+ return false;
+ }
+ /*
+ Returns true if this data type supports a hack that
+ WHERE notnull_column IS NULL
+ finds zero values, e.g.:
+ WHERE date_notnull_column IS NULL ->
+ WHERE date_notnull_column = '0000-00-00'
+ */
+ virtual bool cond_notnull_field_isnull_to_field_eq_zero() const
+ {
+ return false;
+ }
/**
Check whether a field type can be partially indexed by a key.
@param type field type
@@ -1073,6 +3248,7 @@ public:
{
return false;
}
+ virtual uint max_octet_length() const { return 0; }
/**
Prepared statement long data:
Check whether this parameter data type is compatible with long data.
@@ -1081,6 +3257,10 @@ public:
*/
virtual bool is_param_long_data_type() const { return false; }
virtual const Type_handler *type_handler_for_comparison() const= 0;
+ virtual const Type_handler *type_handler_for_native_format() const
+ {
+ return this;
+ }
virtual const Type_handler *type_handler_for_item_field() const
{
return this;
@@ -1097,6 +3277,12 @@ public:
{
return this;
}
+ virtual const Type_handler *type_handler_for_system_time() const
+ {
+ return this;
+ }
+ virtual int
+ stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const= 0;
virtual CHARSET_INFO *charset_for_protocol(const Item *item) const;
virtual const Type_handler*
type_handler_adjusted_to_max_octet_length(uint max_octet_length,
@@ -1123,9 +3309,10 @@ public:
virtual bool can_return_text() const { return true; }
virtual bool can_return_date() const { return true; }
virtual bool can_return_time() const { return true; }
+ virtual bool is_bool_type() const { return false; }
virtual bool is_general_purpose_string_type() const { return false; }
- virtual uint Item_time_precision(Item *item) const;
- virtual uint Item_datetime_precision(Item *item) const;
+ virtual uint Item_time_precision(THD *thd, Item *item) const;
+ virtual uint Item_datetime_precision(THD *thd, Item *item) const;
virtual uint Item_decimal_scale(const Item *item) const;
virtual uint Item_decimal_precision(const Item *item) const= 0;
/*
@@ -1169,7 +3356,32 @@ public:
virtual Field *make_conversion_table_field(TABLE *TABLE,
uint metadata,
const Field *target) const= 0;
+ /*
+ Performs the final data type validation for a UNION element,
+ after the regular "aggregation for result" was done.
+ */
+ virtual bool union_element_finalize(const Item * item) const
+ {
+ return false;
+ }
+ // Automatic upgrade, e.g. for ALTER TABLE t1 FORCE
+ virtual void Column_definition_implicit_upgrade(Column_definition *c) const
+ { }
+ // Validate CHECK constraint after the parser
+ virtual bool Column_definition_validate_check_constraint(THD *thd,
+ Column_definition *c)
+ const;
+ // Fix attributes after the parser
virtual bool Column_definition_fix_attributes(Column_definition *c) const= 0;
+ /*
+ Fix attributes from an existing field. Used for:
+ - ALTER TABLE (for columns that do not change)
+ - DECLARE var TYPE OF t1.col1; (anchored SP variables)
+ */
+ virtual void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const
+ { }
virtual bool Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *c,
@@ -1208,6 +3420,23 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ virtual Field *
+ make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const= 0;
+ virtual void
+ Column_definition_attributes_frm_pack(const Column_definition_attributes *at,
+ uchar *buff) const;
+ virtual bool
+ Column_definition_attributes_frm_unpack(Column_definition_attributes *attr,
+ TABLE_SHARE *share,
+ const uchar *buffer,
+ LEX_CUSTRING *gis_options) const;
+
virtual void make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const= 0;
@@ -1217,7 +3446,8 @@ public:
virtual uint32 max_display_length(const Item *item) const= 0;
virtual uint32 calc_pack_length(uint32 length) const= 0;
- virtual bool Item_save_in_value(Item *item, st_value *value) const= 0;
+ virtual void Item_update_null_value(Item *item) const= 0;
+ virtual bool Item_save_in_value(THD *thd, Item *item, st_value *value) const= 0;
virtual void Item_param_setup_conversion(THD *thd, Item_param *) const {}
virtual void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
@@ -1225,6 +3455,9 @@ public:
Item_param *param,
const Type_all_attributes *attr,
const st_value *value) const= 0;
+ virtual bool Item_param_val_native(THD *thd,
+ Item_param *item,
+ Native *to) const;
virtual bool Item_send(Item *item, Protocol *p, st_value *buf) const= 0;
virtual int Item_save_in_field(Item *item, Field *field,
bool no_conversions) const= 0;
@@ -1295,13 +3528,52 @@ public:
Item *src,
const Item *cmp) const= 0;
virtual Item_cache *Item_get_cache(THD *thd, const Item *item) const= 0;
+ /**
+ A builder for literals with data type name prefix, e.g.:
+ TIME'00:00:00', DATE'2001-01-01', TIMESTAMP'2001-01-01 00:00:00'.
+ @param thd The current thread
+ @param str Character literal
+ @param length Length of str
+ @param cs Character set of the string
+ @param send_error Whether to generate an error on failure
+
+ @retval A pointer to a new Item on success
+ NULL on error (wrong literal value, EOM)
+ */
+ virtual Item_literal *create_literal_item(THD *thd,
+ const char *str, size_t length,
+ CHARSET_INFO *cs,
+ bool send_error) const
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
+ Item_literal *create_literal_item(THD *thd, const String *str,
+ bool send_error) const
+ {
+ return create_literal_item(thd, str->ptr(), str->length(), str->charset(),
+ send_error);
+ }
virtual Item *create_typecast_item(THD *thd, Item *item,
const Type_cast_attributes &attr) const
{
DBUG_ASSERT(0);
return NULL;
}
+ virtual Item_copy *create_item_copy(THD *thd, Item *item) const;
+ virtual int cmp_native(const Native &a, const Native &b) const
+ {
+ DBUG_ASSERT(0);
+ return 0;
+ }
virtual bool set_comparator_func(Arg_comparator *cmp) const= 0;
+ virtual bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const
+ {
+ return false;
+ }
+ virtual bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const= 0;
virtual bool Item_hybrid_func_fix_attributes(THD *thd,
const char *name,
Type_handler_hybrid_field_type *,
@@ -1318,9 +3590,23 @@ public:
virtual
bool Item_sum_variance_fix_length_and_dec(Item_sum_variance *) const= 0;
+ virtual bool Item_val_native_with_conversion(THD *thd, Item *item,
+ Native *to) const
+ {
+ return true;
+ }
+ virtual bool Item_val_native_with_conversion_result(THD *thd, Item *item,
+ Native *to) const
+ {
+ return true;
+ }
+
virtual bool Item_val_bool(Item *item) const= 0;
- virtual bool Item_get_date(Item *item, MYSQL_TIME *ltime,
- ulonglong fuzzydate) const= 0;
+ virtual void Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *buff, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const= 0;
+ bool Item_get_date_with_warn(THD *thd, Item *item, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const;
virtual longlong Item_val_int_signed_typecast(Item *item) const= 0;
virtual longlong Item_val_int_unsigned_typecast(Item *item) const= 0;
@@ -1341,9 +3627,15 @@ public:
Item_func_hybrid_field_type *,
my_decimal *) const= 0;
virtual
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const= 0;
+ date_mode_t fuzzydate) const= 0;
+ bool Item_func_hybrid_field_type_get_date_with_warn(THD *thd,
+ Item_func_hybrid_field_type *,
+ MYSQL_TIME *,
+ date_mode_t) const;
virtual
String *Item_func_min_max_val_str(Item_func_min_max *, String *) const= 0;
virtual
@@ -1354,8 +3646,8 @@ public:
my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
my_decimal *) const= 0;
virtual
- bool Item_func_min_max_get_date(Item_func_min_max*,
- MYSQL_TIME *, ulonglong fuzzydate) const= 0;
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const= 0;
virtual bool
Item_func_between_fix_length_and_dec(Item_func_between *func) const= 0;
virtual longlong
@@ -1413,6 +3705,10 @@ public:
virtual bool
Vers_history_point_resolve_unit(THD *thd, Vers_history_point *point) const;
+
+ static bool Charsets_are_compatible(const CHARSET_INFO *old_ci,
+ const CHARSET_INFO *new_ci,
+ bool part_of_a_key);
};
@@ -1438,6 +3734,11 @@ public:
DBUG_ASSERT(0);
return MYSQL_TYPE_NULL;
};
+ protocol_send_type_t protocol_send_type() const
+ {
+ DBUG_ASSERT(0);
+ return PROTOCOL_SEND_STRING;
+ }
Item_result result_type() const
{
return ROW_RESULT;
@@ -1447,6 +3748,11 @@ public:
return ROW_RESULT;
}
const Type_handler *type_handler_for_comparison() const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const
+ {
+ DBUG_ASSERT(0);
+ return 0;
+ }
bool subquery_type_allows_materialization(const Item *inner,
const Item *outer) const
{
@@ -1469,6 +3775,12 @@ public:
{
return false;
}
+ void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const
+ {
+ DBUG_ASSERT(0);
+ }
bool Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *c,
@@ -1497,6 +3809,13 @@ public:
DBUG_ASSERT(0);
return NULL;
}
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void make_sort_key(uchar *to, Item *item,
const SORT_FIELD_ATTR *sort_field,
Sort_param *param) const
@@ -1518,12 +3837,14 @@ public:
DBUG_ASSERT(0);
return 0;
}
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
uint Item_decimal_precision(const Item *item) const
{
DBUG_ASSERT(0);
return DECIMAL_MAX_PRECISION;
}
- bool Item_save_in_value(Item *item, st_value *value) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
bool Item_param_set_from_value(THD *thd,
Item_param *param,
const Type_all_attributes *attr,
@@ -1533,6 +3854,7 @@ public:
DBUG_ASSERT(0);
return true;
}
+ void Item_update_null_value(Item *item) const;
int Item_save_in_field(Item *item, Field *field, bool no_conversions) const
{
DBUG_ASSERT(0);
@@ -1549,6 +3871,11 @@ public:
}
Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
+ Item_copy *create_item_copy(THD *thd, Item *item) const
+ {
+ DBUG_ASSERT(0);
+ return NULL;
+ }
bool set_comparator_func(Arg_comparator *cmp) const;
bool Item_hybrid_func_fix_attributes(THD *thd,
const char *name,
@@ -1584,10 +3911,12 @@ public:
DBUG_ASSERT(0);
return false;
}
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const
+ void Item_get_date(THD *thd, Item *item,
+ Temporal::Warn *warn, MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
{
DBUG_ASSERT(0);
- return true;
+ set_zero_time(ltime, MYSQL_TIMESTAMP_NONE);
}
longlong Item_val_int_signed_typecast(Item *item) const
{
@@ -1629,12 +3958,14 @@ public:
DBUG_ASSERT(0);
return NULL;
}
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
- MYSQL_TIME *,
- ulonglong fuzzydate) const
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
+ MYSQL_TIME *ltime,
+ date_mode_t fuzzydate) const
{
DBUG_ASSERT(0);
- return true;
+ set_zero_time(ltime, MYSQL_TIMESTAMP_NONE);
}
String *Item_func_min_max_val_str(Item_func_min_max *, String *) const
@@ -1658,8 +3989,8 @@ public:
DBUG_ASSERT(0);
return NULL;
}
- bool Item_func_min_max_get_date(Item_func_min_max*,
- MYSQL_TIME *, ulonglong fuzzydate) const
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const
{
DBUG_ASSERT(0);
return true;
@@ -1743,8 +4074,8 @@ public:
longlong Item_func_min_max_val_int(Item_func_min_max *) const;
my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
my_decimal *) const;
- bool Item_func_min_max_get_date(Item_func_min_max*,
- MYSQL_TIME *, ulonglong fuzzydate) const;
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const;
virtual ~Type_handler_numeric() { }
bool can_change_cond_ref_to_const(Item_bool_func2 *target,
Item *target_expr, Item *target_value,
@@ -1764,6 +4095,10 @@ public:
Item_result cmp_type() const { return REAL_RESULT; }
virtual ~Type_handler_real_result() {}
const Type_handler *type_handler_for_comparison() const;
+ void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const;
bool subquery_type_allows_materialization(const Item *inner,
const Item *outer) const;
void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
@@ -1771,12 +4106,17 @@ public:
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
+ bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
uint Item_decimal_precision(const Item *item) const;
- bool Item_save_in_value(Item *item, st_value *value) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
bool Item_param_set_from_value(THD *thd,
Item_param *param,
const Type_all_attributes *attr,
const st_value *value) const;
+ void Item_update_null_value(Item *item) const;
int Item_save_in_field(Item *item, Field *field, bool no_conversions) const;
Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
@@ -1795,7 +4135,8 @@ public:
bool Item_func_signed_fix_length_and_dec(Item_func_signed *item) const;
bool Item_func_unsigned_fix_length_and_dec(Item_func_unsigned *item) const;
bool Item_val_bool(Item *item) const;
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const;
+ void Item_get_date(THD *thd, Item *item, Temporal::Warn *warn,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
longlong Item_val_int_signed_typecast(Item *item) const;
longlong Item_val_int_unsigned_typecast(Item *item) const;
String *Item_func_hex_val_str_ascii(Item_func_hex *item, String *str) const;
@@ -1808,9 +4149,11 @@ public:
my_decimal *Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *,
my_decimal *) const;
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const;
+ date_mode_t fuzzydate) const;
String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
longlong Item_func_between_val_int(Item_func_between *func) const;
cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
@@ -1833,10 +4176,19 @@ public:
class Type_handler_decimal_result: public Type_handler_numeric
{
public:
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_STRING;
+ }
Item_result result_type() const { return DECIMAL_RESULT; }
Item_result cmp_type() const { return DECIMAL_RESULT; }
virtual ~Type_handler_decimal_result() {};
const Type_handler *type_handler_for_comparison() const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const
+ {
+ VDec item_val(item);
+ return item_val.is_null() ? 0 : my_decimal(field).cmp(item_val.ptr());
+ }
bool subquery_type_allows_materialization(const Item *inner,
const Item *outer) const;
Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
@@ -1848,8 +4200,16 @@ public:
uint32 max_display_length(const Item *item) const;
Item *create_typecast_item(THD *thd, Item *item,
const Type_cast_attributes &attr) const;
+ bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const
+ {
+ VDec va(a), vb(b);
+ return va.ptr() && vb.ptr() && !va.cmp(vb);
+ }
uint Item_decimal_precision(const Item *item) const;
- bool Item_save_in_value(Item *item, st_value *value) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
bool Item_param_set_from_value(THD *thd,
@@ -1860,6 +4220,7 @@ public:
{
return Item_send_str(item, protocol, buf);
}
+ void Item_update_null_value(Item *item) const;
int Item_save_in_field(Item *item, Field *field, bool no_conversions) const;
Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
@@ -1873,10 +4234,17 @@ public:
bool Item_sum_sum_fix_length_and_dec(Item_sum_sum *) const;
bool Item_sum_avg_fix_length_and_dec(Item_sum_avg *) const;
bool Item_sum_variance_fix_length_and_dec(Item_sum_variance *) const;
- bool Item_val_bool(Item *item) const;
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const;
+ bool Item_val_bool(Item *item) const
+ {
+ return VDec(item).to_bool();
+ }
+ void Item_get_date(THD *thd, Item *item, Temporal::Warn *warn,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
longlong Item_val_int_signed_typecast(Item *item) const;
- longlong Item_val_int_unsigned_typecast(Item *item) const;
+ longlong Item_val_int_unsigned_typecast(Item *item) const
+ {
+ return VDec(item).to_longlong(true);
+ }
String *Item_func_hex_val_str_ascii(Item_func_hex *item, String *str) const;
String *Item_func_hybrid_field_type_val_str(Item_func_hybrid_field_type *,
String *) const;
@@ -1887,9 +4255,11 @@ public:
my_decimal *Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *,
my_decimal *) const;
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const;
+ date_mode_t fuzzydate) const;
String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
longlong Item_func_between_val_int(Item_func_between *func) const;
cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
@@ -2038,8 +4408,11 @@ class Type_handler_int_result: public Type_handler_numeric
public:
Item_result result_type() const { return INT_RESULT; }
Item_result cmp_type() const { return INT_RESULT; }
+ bool is_order_clause_position_type() const { return true; }
+ bool is_limit_clause_valid_type() const { return true; }
virtual ~Type_handler_int_result() {}
const Type_handler *type_handler_for_comparison() const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const;
bool subquery_type_allows_materialization(const Item *inner,
const Item *outer) const;
Field *make_num_distinct_aggregator_field(MEM_ROOT *, const Item *) const;
@@ -2048,12 +4421,17 @@ public:
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
+ bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
uint Item_decimal_precision(const Item *item) const;
- bool Item_save_in_value(Item *item, st_value *value) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
bool Item_param_set_from_value(THD *thd,
Item_param *param,
const Type_all_attributes *attr,
const st_value *value) const;
+ void Item_update_null_value(Item *item) const;
int Item_save_in_field(Item *item, Field *field, bool no_conversions) const;
Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
@@ -2068,7 +4446,8 @@ public:
bool Item_sum_avg_fix_length_and_dec(Item_sum_avg *) const;
bool Item_sum_variance_fix_length_and_dec(Item_sum_variance *) const;
bool Item_val_bool(Item *item) const;
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const;
+ void Item_get_date(THD *thd, Item *item, Temporal::Warn *warn,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
longlong Item_val_int_signed_typecast(Item *item) const;
longlong Item_val_int_unsigned_typecast(Item *item) const;
String *Item_func_hex_val_str_ascii(Item_func_hex *item, String *str) const;
@@ -2081,9 +4460,11 @@ public:
my_decimal *Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *,
my_decimal *) const;
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const;
+ date_mode_t fuzzydate) const;
String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
longlong Item_func_between_val_int(Item_func_between *func) const;
cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
@@ -2099,6 +4480,7 @@ public:
bool Item_func_mul_fix_length_and_dec(Item_func_mul *) const;
bool Item_func_div_fix_length_and_dec(Item_func_div *) const;
bool Item_func_mod_fix_length_and_dec(Item_func_mod *) const;
+
};
@@ -2127,6 +4509,8 @@ public:
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
+ bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const;
bool Item_param_set_from_value(THD *thd,
Item_param *param,
const Type_all_attributes *attr,
@@ -2145,7 +4529,8 @@ public:
bool Item_sum_avg_fix_length_and_dec(Item_sum_avg *) const;
bool Item_sum_variance_fix_length_and_dec(Item_sum_variance *) const;
bool Item_val_bool(Item *item) const;
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const;
+ void Item_get_date(THD *thd, Item *item, Temporal::Warn *warn,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
longlong Item_val_int_signed_typecast(Item *item) const;
longlong Item_val_int_unsigned_typecast(Item *item) const;
String *Item_func_hex_val_str_ascii(Item_func_hex *item, String *str) const;
@@ -2158,18 +4543,14 @@ public:
my_decimal *Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *,
my_decimal *) const;
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const;
- String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
- double Item_func_min_max_val_real(Item_func_min_max *) const;
- longlong Item_func_min_max_val_int(Item_func_min_max *) const;
- my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
- my_decimal *) const;
- bool Item_func_min_max_get_date(Item_func_min_max*,
- MYSQL_TIME *, ulonglong fuzzydate) const;
+ date_mode_t fuzzydate) const;
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const;
bool Item_func_between_fix_length_and_dec(Item_func_between *func) const;
- longlong Item_func_between_val_int(Item_func_between *func) const;
bool Item_func_in_fix_comparator_compatible_types(THD *thd,
Item_func_in *) const;
bool Item_func_round_fix_length_and_dec(Item_func_round *) const;
@@ -2187,13 +4568,18 @@ public:
class Type_handler_string_result: public Type_handler
{
- uint Item_temporal_precision(Item *item, bool is_time) const;
+ uint Item_temporal_precision(THD *thd, Item *item, bool is_time) const;
public:
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_STRING;
+ }
Item_result result_type() const { return STRING_RESULT; }
Item_result cmp_type() const { return STRING_RESULT; }
CHARSET_INFO *charset_for_protocol(const Item *item) const;
virtual ~Type_handler_string_result() {}
const Type_handler *type_handler_for_comparison() const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const;
const Type_handler *
type_handler_adjusted_to_max_octet_length(uint max_octet_length,
CHARSET_INFO *cs) const;
@@ -2202,6 +4588,7 @@ public:
void sortlength(THD *thd,
const Type_std_attributes *item,
SORT_FIELD_ATTR *attr) const;
+ bool union_element_finalize(const Item * item) const;
bool Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *c,
@@ -2213,16 +4600,21 @@ public:
const Schema_specification_st *schema)
const;
uint32 max_display_length(const Item *item) const;
- uint Item_time_precision(Item *item) const
+ bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
+ uint Item_time_precision(THD *thd, Item *item) const
{
- return Item_temporal_precision(item, true);
+ return Item_temporal_precision(thd, item, true);
}
- uint Item_datetime_precision(Item *item) const
+ uint Item_datetime_precision(THD *thd, Item *item) const
{
- return Item_temporal_precision(item, false);
+ return Item_temporal_precision(thd, item, false);
}
uint Item_decimal_precision(const Item *item) const;
- bool Item_save_in_value(Item *item, st_value *value) const;
+ void Item_update_null_value(Item *item) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
void Item_param_setup_conversion(THD *thd, Item_param *) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
@@ -2260,7 +4652,8 @@ public:
bool Item_func_signed_fix_length_and_dec(Item_func_signed *item) const;
bool Item_func_unsigned_fix_length_and_dec(Item_func_unsigned *item) const;
bool Item_val_bool(Item *item) const;
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const;
+ void Item_get_date(THD *thd, Item *item, Temporal::Warn *warn,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
longlong Item_val_int_signed_typecast(Item *item) const;
longlong Item_val_int_unsigned_typecast(Item *item) const;
String *Item_func_hex_val_str_ascii(Item_func_hex *item, String *str) const;
@@ -2273,16 +4666,18 @@ public:
my_decimal *Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *,
my_decimal *) const;
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const;
+ date_mode_t fuzzydate) const;
String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
double Item_func_min_max_val_real(Item_func_min_max *) const;
longlong Item_func_min_max_val_int(Item_func_min_max *) const;
my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
my_decimal *) const;
- bool Item_func_min_max_get_date(Item_func_min_max*,
- MYSQL_TIME *, ulonglong fuzzydate) const;
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const;
bool Item_func_between_fix_length_and_dec(Item_func_between *func) const;
longlong Item_func_between_val_int(Item_func_between *func) const;
bool Item_char_typecast_fix_length_and_dec(Item_char_typecast *) const;
@@ -2339,6 +4734,10 @@ public:
virtual ~Type_handler_tiny() {}
const Name name() const { return m_name_tiny; }
enum_field_types field_type() const { return MYSQL_TYPE_TINY; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_TINY;
+ }
const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const
{
return unsigned_fl ? &m_limits_uint8 : &m_limits_sint8;
@@ -2359,6 +4758,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
};
@@ -2373,6 +4779,10 @@ public:
virtual ~Type_handler_short() {}
const Name name() const { return m_name_short; }
enum_field_types field_type() const { return MYSQL_TYPE_SHORT; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_SHORT;
+ }
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
{
return Item_send_short(item, protocol, buf);
@@ -2393,6 +4803,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
};
@@ -2407,6 +4824,10 @@ public:
virtual ~Type_handler_long() {}
const Name name() const { return m_name_int; }
enum_field_types field_type() const { return MYSQL_TYPE_LONG; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_LONG;
+ }
const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const
{
return unsigned_fl ? &m_limits_uint32 : &m_limits_sint32;
@@ -2427,11 +4848,29 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
};
+class Type_handler_bool: public Type_handler_long
+{
+ static const Name m_name_bool;
+public:
+ const Name name() const { return m_name_bool; }
+ bool is_bool_type() const { return true; }
+ void Item_update_null_value(Item *item) const;
+ bool Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *) const;
+};
+
+
class Type_handler_longlong: public Type_handler_general_purpose_int
{
static const Name m_name_longlong;
@@ -2441,6 +4880,10 @@ public:
virtual ~Type_handler_longlong() {}
const Name name() const { return m_name_longlong; }
enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_LONGLONG;
+ }
const Type_limits_int *type_limits_int_by_unsigned_flag(bool unsigned_fl) const
{
return unsigned_fl ? &m_limits_uint64 : &m_limits_sint64;
@@ -2465,6 +4908,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
};
@@ -2490,6 +4940,10 @@ public:
virtual ~Type_handler_int24() {}
const Name name() const { return m_name_mediumint; }
enum_field_types field_type() const { return MYSQL_TYPE_INT24; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_LONG;
+ }
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
{
return Item_send_long(item, protocol, buf);
@@ -2510,6 +4964,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2520,6 +4981,10 @@ public:
virtual ~Type_handler_year() {}
const Name name() const { return m_name_year; }
enum_field_types field_type() const { return MYSQL_TYPE_YEAR; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_SHORT;
+ }
uint32 max_display_length(const Item *item) const;
uint32 calc_pack_length(uint32 length) const { return 1; }
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
@@ -2529,6 +4994,9 @@ public:
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
+ void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
ulonglong table_flags) const
@@ -2537,8 +5005,21 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
- bool Item_get_date(Item *item, MYSQL_TIME *ltime, ulonglong fuzzydate) const;
+ void Item_get_date(THD *thd, Item *item, Temporal::Warn *warn,
+ MYSQL_TIME *ltime, date_mode_t fuzzydate) const;
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *item,
+ Temporal::Warn *,
+ MYSQL_TIME *to,
+ date_mode_t fuzzydate) const;
};
@@ -2549,6 +5030,10 @@ public:
virtual ~Type_handler_bit() {}
const Name name() const { return m_name_bit; }
enum_field_types field_type() const { return MYSQL_TYPE_BIT; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_STRING;
+ }
uint32 max_display_length(const Item *item) const;
uint32 calc_pack_length(uint32 length) const { return length / 8; }
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
@@ -2579,6 +5064,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
bool Vers_history_point_resolve_unit(THD *thd, Vers_history_point *p) const;
};
@@ -2590,6 +5082,10 @@ public:
virtual ~Type_handler_float() {}
const Name name() const { return m_name_float; }
enum_field_types field_type() const { return MYSQL_TYPE_FLOAT; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_FLOAT;
+ }
bool type_can_have_auto_increment_attribute() const { return true; }
uint32 max_display_length(const Item *item) const { return 25; }
uint32 calc_pack_length(uint32 length) const { return sizeof(float); }
@@ -2609,6 +5105,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
};
@@ -2621,6 +5124,10 @@ public:
virtual ~Type_handler_double() {}
const Name name() const { return m_name_double; }
enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_DOUBLE;
+ }
bool type_can_have_auto_increment_attribute() const { return true; }
uint32 max_display_length(const Item *item) const { return 53; }
uint32 calc_pack_length(uint32 length) const { return sizeof(double); }
@@ -2641,6 +5148,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
};
@@ -2653,12 +5167,20 @@ public:
virtual ~Type_handler_time_common() { }
const Name name() const { return m_name_time; }
enum_field_types field_type() const { return MYSQL_TYPE_TIME; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_TIME;
+ }
enum_mysql_timestamp_type mysql_timestamp_type() const
{
return MYSQL_TIMESTAMP_TIME;
}
+ Item_literal *create_literal_item(THD *thd, const char *str, size_t length,
+ CHARSET_INFO *cs, bool send_error) const;
Item *create_typecast_item(THD *thd, Item *item,
const Type_cast_attributes &attr) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
uint Item_decimal_scale(const Item *item) const
{
return Item_decimal_scale_with_seconds(item);
@@ -2669,12 +5191,15 @@ public:
return Item_divisor_precision_increment_with_seconds(item);
}
const Type_handler *type_handler_for_comparison() const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const;
+ void Column_definition_implicit_upgrade(Column_definition *c) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
- bool Item_save_in_value(Item *item, st_value *value) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
{
return Item_send_time(item, protocol, buf);
}
+ void Item_update_null_value(Item *item) const;
int Item_save_in_field(Item *item, Field *field, bool no_conversions) const;
String *print_item_value(THD *thd, Item *item, String *str) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
@@ -2692,11 +5217,19 @@ public:
my_decimal *Item_func_hybrid_field_type_val_decimal(
Item_func_hybrid_field_type *,
my_decimal *) const;
- bool Item_func_hybrid_field_type_get_date(Item_func_hybrid_field_type *,
+ void Item_func_hybrid_field_type_get_date(THD *,
+ Item_func_hybrid_field_type *,
+ Temporal::Warn *,
MYSQL_TIME *,
- ulonglong fuzzydate) const;
- bool Item_func_min_max_get_date(Item_func_min_max*,
- MYSQL_TIME *, ulonglong fuzzydate) const;
+ date_mode_t fuzzydate) const;
+ String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
+ double Item_func_min_max_val_real(Item_func_min_max *) const;
+ longlong Item_func_min_max_val_int(Item_func_min_max *) const;
+ my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
+ my_decimal *) const;
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const;
+ longlong Item_func_between_val_int(Item_func_between *func) const;
Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const;
bool set_comparator_func(Arg_comparator *cmp) const;
cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
@@ -2713,6 +5246,7 @@ class Type_handler_time: public Type_handler_time_common
public:
static uint hires_bytes(uint dec) { return m_hires_bytes[dec]; }
virtual ~Type_handler_time() {}
+ const Name version() const { return m_version_mariadb53; }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
@@ -2724,6 +5258,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2731,6 +5272,7 @@ class Type_handler_time2: public Type_handler_time_common
{
public:
virtual ~Type_handler_time2() {}
+ const Name version() const { return m_version_mysql56; }
enum_field_types real_field_type() const { return MYSQL_TYPE_TIME2; }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
@@ -2743,6 +5285,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2750,16 +5299,23 @@ class Type_handler_temporal_with_date: public Type_handler_temporal_result
{
public:
virtual ~Type_handler_temporal_with_date() {}
- bool Item_save_in_value(Item *item, st_value *value) const;
+ Item_literal *create_literal_item(THD *thd, const char *str, size_t length,
+ CHARSET_INFO *cs, bool send_error) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
+ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
{
return Item_send_date(item, protocol, buf);
}
+ void Item_update_null_value(Item *item) const;
int Item_save_in_field(Item *item, Field *field, bool no_conversions) const;
Item *make_const_item_for_comparison(THD *, Item *src, const Item *cmp) const;
bool set_comparator_func(Arg_comparator *cmp) const;
cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
in_vector *make_in_vector(THD *, const Item_func_in *, uint nargs) const;
+ longlong Item_func_between_val_int(Item_func_between *func) const;
};
@@ -2771,16 +5327,31 @@ public:
const Name name() const { return m_name_date; }
const Type_handler *type_handler_for_comparison() const;
enum_field_types field_type() const { return MYSQL_TYPE_DATE; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_DATE;
+ }
enum_mysql_timestamp_type mysql_timestamp_type() const
{
return MYSQL_TIMESTAMP_DATE;
}
+ bool cond_notnull_field_isnull_to_field_eq_zero() const
+ {
+ return true;
+ }
+ Item_literal *create_literal_item(THD *thd, const char *str, size_t length,
+ CHARSET_INFO *cs, bool send_error) const;
Item *create_typecast_item(THD *thd, Item *item,
const Type_cast_attributes &attr) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
uint Item_decimal_precision(const Item *item) const;
String *print_item_value(THD *thd, Item *item, String *str) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
+ String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
+ double Item_func_min_max_val_real(Item_func_min_max *) const;
+ longlong Item_func_min_max_val_int(Item_func_min_max *) const;
+ my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
+ my_decimal *) const;
bool Item_hybrid_func_fix_attributes(THD *thd,
const char *name,
Type_handler_hybrid_field_type *,
@@ -2805,6 +5376,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2824,6 +5402,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2835,12 +5420,21 @@ public:
const Name name() const { return m_name_datetime; }
const Type_handler *type_handler_for_comparison() const;
enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_DATETIME;
+ }
enum_mysql_timestamp_type mysql_timestamp_type() const
{
return MYSQL_TIMESTAMP_DATETIME;
}
+ bool cond_notnull_field_isnull_to_field_eq_zero() const
+ {
+ return true;
+ }
Item *create_typecast_item(THD *thd, Item *item,
const Type_cast_attributes &attr) const;
+ void Column_definition_implicit_upgrade(Column_definition *c) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
uint Item_decimal_scale(const Item *item) const
{
@@ -2857,6 +5451,11 @@ public:
}
String *print_item_value(THD *thd, Item *item, String *str) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
+ String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
+ double Item_func_min_max_val_real(Item_func_min_max *) const;
+ longlong Item_func_min_max_val_int(Item_func_min_max *) const;
+ my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
+ my_decimal *) const;
bool Item_hybrid_func_fix_attributes(THD *thd,
const char *name,
Type_handler_hybrid_field_type *,
@@ -2874,6 +5473,7 @@ class Type_handler_datetime: public Type_handler_datetime_common
public:
static uint hires_bytes(uint dec) { return m_hires_bytes[dec]; }
virtual ~Type_handler_datetime() {}
+ const Name version() const { return m_version_mariadb53; }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
@@ -2885,6 +5485,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2892,6 +5499,7 @@ class Type_handler_datetime2: public Type_handler_datetime_common
{
public:
virtual ~Type_handler_datetime2() {}
+ const Name version() const { return m_version_mysql56; }
enum_field_types real_field_type() const { return MYSQL_TYPE_DATETIME2; }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
@@ -2904,17 +5512,31 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
class Type_handler_timestamp_common: public Type_handler_temporal_with_date
{
static const Name m_name_timestamp;
+protected:
+ bool TIME_to_native(THD *, const MYSQL_TIME *from, Native *to, uint dec) const;
public:
virtual ~Type_handler_timestamp_common() {}
const Name name() const { return m_name_timestamp; }
const Type_handler *type_handler_for_comparison() const;
+ const Type_handler *type_handler_for_native_format() const;
enum_field_types field_type() const { return MYSQL_TYPE_TIMESTAMP; }
+ protocol_send_type_t protocol_send_type() const
+ {
+ return PROTOCOL_SEND_DATETIME;
+ }
enum_mysql_timestamp_type mysql_timestamp_type() const
{
return MYSQL_TIMESTAMP_DATETIME;
@@ -2923,6 +5545,21 @@ public:
{
return true;
}
+ void Column_definition_implicit_upgrade(Column_definition *c) const;
+ bool Item_eq_value(THD *thd, const Type_cmp_attributes *attr,
+ Item *a, Item *b) const;
+ bool Item_val_native_with_conversion(THD *thd, Item *, Native *to) const;
+ bool Item_val_native_with_conversion_result(THD *thd, Item *, Native *to) const;
+ bool Item_param_val_native(THD *thd, Item_param *item, Native *to) const;
+ int cmp_native(const Native &a, const Native &b) const;
+ longlong Item_func_between_val_int(Item_func_between *func) const;
+ cmp_item *make_cmp_item(THD *thd, CHARSET_INFO *cs) const;
+ in_vector *make_in_vector(THD *thd, const Item_func_in *f, uint nargs) const;
+ void make_sort_key(uchar *to, Item *item, const SORT_FIELD_ATTR *sort_field,
+ Sort_param *param) const;
+ void sortlength(THD *thd,
+ const Type_std_attributes *item,
+ SORT_FIELD_ATTR *attr) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
uint Item_decimal_scale(const Item *item) const
{
@@ -2935,10 +5572,18 @@ public:
}
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const
{
- return Item_send_datetime(item, protocol, buf);
+ return Item_send_timestamp(item, protocol, buf);
}
+ int Item_save_in_field(Item *item, Field *field, bool no_conversions) const;
String *print_item_value(THD *thd, Item *item, String *str) const;
Item_cache *Item_get_cache(THD *thd, const Item *item) const;
+ Item_copy *create_item_copy(THD *thd, Item *item) const;
+ String *Item_func_min_max_val_str(Item_func_min_max *, String *) const;
+ double Item_func_min_max_val_real(Item_func_min_max *) const;
+ longlong Item_func_min_max_val_int(Item_func_min_max *) const;
+ my_decimal *Item_func_min_max_val_decimal(Item_func_min_max *,
+ my_decimal *) const;
+ bool set_comparator_func(Arg_comparator *cmp) const;
bool Item_hybrid_func_fix_attributes(THD *thd,
const char *name,
Type_handler_hybrid_field_type *,
@@ -2946,6 +5591,8 @@ public:
Item **items, uint nitems) const;
void Item_param_set_param_func(Item_param *param,
uchar **pos, ulong len) const;
+ bool Item_func_min_max_get_date(THD *thd, Item_func_min_max*,
+ MYSQL_TIME *, date_mode_t fuzzydate) const;
};
@@ -2956,6 +5603,7 @@ class Type_handler_timestamp: public Type_handler_timestamp_common
public:
static uint sec_part_bytes(uint dec) { return m_sec_part_bytes[dec]; }
virtual ~Type_handler_timestamp() {}
+ const Name version() const { return m_version_mariadb53; }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
@@ -2967,6 +5615,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -2974,6 +5629,7 @@ class Type_handler_timestamp2: public Type_handler_timestamp_common
{
public:
virtual ~Type_handler_timestamp2() {}
+ const Name version() const { return m_version_mysql56; }
enum_field_types real_field_type() const { return MYSQL_TYPE_TIMESTAMP2; }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
@@ -2988,6 +5644,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3012,6 +5675,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3043,6 +5713,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3058,7 +5735,9 @@ public:
const Type_handler *type_handler_for_union(const Item *) const;
uint32 max_display_length(const Item *item) const { return 0; }
uint32 calc_pack_length(uint32 length) const { return 0; }
- bool Item_save_in_value(Item *item, st_value *value) const;
+ bool Item_const_eq(const Item_const *a, const Item_const *b,
+ bool binary_cmp) const;
+ bool Item_save_in_value(THD *thd, Item *item, st_value *value) const;
bool Item_send(Item *item, Protocol *protocol, st_value *buf) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
@@ -3081,6 +5760,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3117,6 +5803,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3129,10 +5822,15 @@ public:
const Name name() const { return m_name_var_string; }
enum_field_types field_type() const { return MYSQL_TYPE_VAR_STRING; }
enum_field_types real_field_type() const { return MYSQL_TYPE_STRING; }
+ enum_field_types traditional_merge_field_type() const
+ {
+ return MYSQL_TYPE_VARCHAR;
+ }
const Type_handler *type_handler_for_tmp_table(const Item *item) const
{
return varstring_type_handler(item);
}
+ void Column_definition_implicit_upgrade(Column_definition *c) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
@@ -3152,6 +5850,10 @@ public:
virtual ~Type_handler_varchar() {}
const Name name() const { return m_name_varchar; }
enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; }
+ enum_field_types type_code_for_protocol() const
+ {
+ return MYSQL_TYPE_VAR_STRING; // Keep things compatible for old clients
+ }
uint32 calc_pack_length(uint32 length) const
{
return (length + (length < 256 ? 1: 2));
@@ -3175,10 +5877,28 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
bool adjust_spparam_type(Spvar_definition *def, Item *from) const;
};
+class Type_handler_hex_hybrid: public Type_handler_varchar
+{
+ static const Name m_name_hex_hybrid;
+public:
+ virtual ~Type_handler_hex_hybrid() {}
+ const Name name() const { return m_name_hex_hybrid; }
+ const Type_handler *cast_to_int_type_handler() const;
+ const Type_handler *type_handler_for_system_time() const;
+};
+
+
class Type_handler_varchar_compressed: public Type_handler_varchar
{
public:
@@ -3208,6 +5928,9 @@ public:
}
bool is_param_long_data_type() const { return true; }
bool Column_definition_fix_attributes(Column_definition *c) const;
+ void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const;
bool Column_definition_prepare_stage2(Column_definition *c,
handler *file,
ulonglong table_flags) const;
@@ -3218,6 +5941,13 @@ public:
Item **items, uint nitems) const;
void Item_param_setup_conversion(THD *thd, Item_param *) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3233,6 +5963,7 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ uint max_octet_length() const { return UINT_MAX8; }
};
@@ -3248,6 +5979,7 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ uint max_octet_length() const { return UINT_MAX24; }
};
@@ -3265,6 +5997,7 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ uint max_octet_length() const { return UINT_MAX32; }
};
@@ -3280,6 +6013,7 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ uint max_octet_length() const { return UINT_MAX16; }
};
@@ -3319,7 +6053,18 @@ public:
const st_value *value) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
+ void
+ Column_definition_attributes_frm_pack(const Column_definition_attributes *at,
+ uchar *buff) const;
+ bool
+ Column_definition_attributes_frm_unpack(Column_definition_attributes *attr,
+ TABLE_SHARE *share,
+ const uchar *buffer,
+ LEX_CUSTRING *gis_options) const;
bool Column_definition_fix_attributes(Column_definition *c) const;
+ void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const;
bool Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *c,
@@ -3333,6 +6078,14 @@ public:
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
+
bool can_return_int() const { return false; }
bool can_return_decimal() const { return false; }
bool can_return_real() const { return false; }
@@ -3382,6 +6135,9 @@ public:
Type_handler_hybrid_field_type *,
Type_all_attributes *atrr,
Item **items, uint nitems) const;
+ void Column_definition_reuse_fix_attributes(THD *thd,
+ Column_definition *c,
+ const Field *field) const;
bool Column_definition_prepare_stage1(THD *thd,
MEM_ROOT *mem_root,
Column_definition *c,
@@ -3404,7 +6160,11 @@ class Type_handler_enum: public Type_handler_typelib
public:
virtual ~Type_handler_enum() {}
const Name name() const { return m_name_enum; }
- virtual enum_field_types real_field_type() const { return MYSQL_TYPE_ENUM; }
+ enum_field_types real_field_type() const { return MYSQL_TYPE_ENUM; }
+ enum_field_types traditional_merge_field_type() const
+ {
+ return MYSQL_TYPE_ENUM;
+ }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
@@ -3416,6 +6176,13 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
};
@@ -3425,7 +6192,11 @@ class Type_handler_set: public Type_handler_typelib
public:
virtual ~Type_handler_set() {}
const Name name() const { return m_name_set; }
- virtual enum_field_types real_field_type() const { return MYSQL_TYPE_SET; }
+ enum_field_types real_field_type() const { return MYSQL_TYPE_SET; }
+ enum_field_types traditional_merge_field_type() const
+ {
+ return MYSQL_TYPE_SET;
+ }
uint32 calc_pack_length(uint32 length) const;
Field *make_conversion_table_field(TABLE *, uint metadata,
const Field *target) const;
@@ -3437,9 +6208,26 @@ public:
const Record_addr &addr,
const Type_all_attributes &attr,
TABLE *table) const;
+ Field *make_table_field_from_def(TABLE_SHARE *share,
+ MEM_ROOT *mem_root,
+ const LEX_CSTRING *name,
+ const Record_addr &addr,
+ const Bit_addr &bit,
+ const Column_definition_attributes *attr,
+ uint32 flags) const;
+};
+
+
+// A pseudo type handler, mostly for test purposes for now
+class Type_handler_interval_DDhhmmssff: public Type_handler_long_blob
+{
+public:
+ Item *create_typecast_item(THD *thd, Item *item,
+ const Type_cast_attributes &attr) const;
};
+
/**
A handler for hybrid type functions, e.g.
COALESCE(), IF(), IFNULL(), NULLIF(), CASE,
@@ -3537,12 +6325,14 @@ extern MYSQL_PLUGIN_IMPORT Type_handler_set type_handler_set;
extern MYSQL_PLUGIN_IMPORT Type_handler_string type_handler_string;
extern MYSQL_PLUGIN_IMPORT Type_handler_var_string type_handler_var_string;
extern MYSQL_PLUGIN_IMPORT Type_handler_varchar type_handler_varchar;
+extern MYSQL_PLUGIN_IMPORT Type_handler_hex_hybrid type_handler_hex_hybrid;
extern MYSQL_PLUGIN_IMPORT Type_handler_tiny_blob type_handler_tiny_blob;
extern MYSQL_PLUGIN_IMPORT Type_handler_medium_blob type_handler_medium_blob;
extern MYSQL_PLUGIN_IMPORT Type_handler_long_blob type_handler_long_blob;
extern MYSQL_PLUGIN_IMPORT Type_handler_blob type_handler_blob;
+extern MYSQL_PLUGIN_IMPORT Type_handler_bool type_handler_bool;
extern MYSQL_PLUGIN_IMPORT Type_handler_tiny type_handler_tiny;
extern MYSQL_PLUGIN_IMPORT Type_handler_short type_handler_short;
extern MYSQL_PLUGIN_IMPORT Type_handler_int24 type_handler_int24;
@@ -3555,6 +6345,7 @@ extern MYSQL_PLUGIN_IMPORT Type_handler_newdecimal type_handler_newdecimal;
extern MYSQL_PLUGIN_IMPORT Type_handler_olddecimal type_handler_olddecimal;
extern MYSQL_PLUGIN_IMPORT Type_handler_year type_handler_year;
+extern MYSQL_PLUGIN_IMPORT Type_handler_year type_handler_year2;
extern MYSQL_PLUGIN_IMPORT Type_handler_newdate type_handler_newdate;
extern MYSQL_PLUGIN_IMPORT Type_handler_date type_handler_date;
extern MYSQL_PLUGIN_IMPORT Type_handler_time type_handler_time;
@@ -3564,10 +6355,8 @@ extern MYSQL_PLUGIN_IMPORT Type_handler_datetime2 type_handler_datetime2;
extern MYSQL_PLUGIN_IMPORT Type_handler_timestamp type_handler_timestamp;
extern MYSQL_PLUGIN_IMPORT Type_handler_timestamp2 type_handler_timestamp2;
-extern MYSQL_PLUGIN_IMPORT Type_handler_tiny_blob type_handler_tiny_blob;
-extern MYSQL_PLUGIN_IMPORT Type_handler_blob type_handler_blob;
-extern MYSQL_PLUGIN_IMPORT Type_handler_medium_blob type_handler_medium_blob;
-extern MYSQL_PLUGIN_IMPORT Type_handler_long_blob type_handler_long_blob;
+extern MYSQL_PLUGIN_IMPORT Type_handler_interval_DDhhmmssff
+ type_handler_interval_DDhhmmssff;
class Type_aggregator
{
diff --git a/sql/sql_type_int.h b/sql/sql_type_int.h
index dc76e62de36..c7fcd3f793b 100644
--- a/sql/sql_type_int.h
+++ b/sql/sql_type_int.h
@@ -1,5 +1,4 @@
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates.
- Copyright (c) 2011, 2016, MariaDB
+/* Copyright (c) 2018, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -18,11 +17,39 @@
#define SQL_TYPE_INT_INCLUDED
-// A longlong/ulonglong hybrid. Good to store results of val_int().
-class Longlong_hybrid
+class Null_flag
+{
+protected:
+ bool m_is_null;
+public:
+ bool is_null() const { return m_is_null; }
+ Null_flag(bool is_null) :m_is_null(is_null) { }
+};
+
+
+class Longlong
{
protected:
longlong m_value;
+public:
+ longlong value() const { return m_value; }
+ Longlong(longlong nr) :m_value(nr) { }
+};
+
+
+class Longlong_null: public Longlong, public Null_flag
+{
+public:
+ Longlong_null(longlong nr, bool is_null)
+ :Longlong(nr), Null_flag(is_null)
+ { }
+};
+
+
+// A longlong/ulonglong hybrid. Good to store results of val_int().
+class Longlong_hybrid: public Longlong
+{
+protected:
bool m_unsigned;
int cmp_signed(const Longlong_hybrid& other) const
{
@@ -35,9 +62,8 @@ protected:
}
public:
Longlong_hybrid(longlong nr, bool unsigned_flag)
- :m_value(nr), m_unsigned(unsigned_flag)
+ :Longlong(nr), m_unsigned(unsigned_flag)
{ }
- longlong value() const { return m_value; }
bool is_unsigned() const { return m_unsigned; }
bool is_unsigned_outside_of_signed_range() const
{
@@ -85,4 +111,16 @@ public:
}
};
+
+class Longlong_hybrid_null: public Longlong_hybrid,
+ public Null_flag
+{
+public:
+ Longlong_hybrid_null(const Longlong_null &nr, bool unsigned_flag)
+ :Longlong_hybrid(nr.value(), unsigned_flag),
+ Null_flag(nr.is_null())
+ { }
+};
+
+
#endif // SQL_TYPE_INT_INCLUDED
diff --git a/sql/sql_type_json.cc b/sql/sql_type_json.cc
new file mode 100644
index 00000000000..f53a247d816
--- /dev/null
+++ b/sql/sql_type_json.cc
@@ -0,0 +1,55 @@
+/*
+ Copyright (c) 2019, MariaDB
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; version 2 of
+ the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "sql_type_json.h"
+#include "sql_class.h"
+
+
+Type_handler_json_longtext type_handler_json_longtext;
+
+
+/**
+ Create JSON_VALID(field_name) expression
+*/
+
+Virtual_column_info *
+Type_handler_json_longtext::make_json_valid_expr(THD *thd,
+ const LEX_CSTRING *field_name)
+ const
+{
+ Lex_ident_sys_st str;
+ Item *field, *expr;
+ str.set_valid_utf8(field_name);
+ if (unlikely(!(field= thd->lex->create_item_ident_field(thd, NullS, NullS,
+ &str))))
+ return 0;
+ if (unlikely(!(expr= new (thd->mem_root) Item_func_json_valid(thd, field))))
+ return 0;
+ return add_virtual_expression(thd, expr);
+}
+
+
+bool Type_handler_json_longtext::
+ Column_definition_validate_check_constraint(THD *thd,
+ Column_definition * c) const
+{
+ if (!c->check_constraint &&
+ !(c->check_constraint= make_json_valid_expr(thd, &c->field_name)))
+ return true;
+ return Type_handler::Column_definition_validate_check_constraint(thd, c);
+}
diff --git a/sql/sql_type_json.h b/sql/sql_type_json.h
new file mode 100644
index 00000000000..6c4ee8cb2eb
--- /dev/null
+++ b/sql/sql_type_json.h
@@ -0,0 +1,38 @@
+#ifndef SQL_TYPE_JSON_INCLUDED
+#define SQL_TYPE_JSON_INCLUDED
+/*
+ Copyright (c) 2019, MariaDB
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; version 2 of
+ the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include "mariadb.h"
+#include "sql_type.h"
+
+class Type_handler_json_longtext: public Type_handler_long_blob
+{
+ Virtual_column_info *make_json_valid_expr(THD *thd,
+ const LEX_CSTRING *field_name)
+ const;
+public:
+ virtual ~Type_handler_json_longtext() {}
+ bool Column_definition_validate_check_constraint(THD *thd,
+ Column_definition *c) const;
+};
+
+extern MYSQL_PLUGIN_IMPORT
+ Type_handler_json_longtext type_handler_json_longtext;
+
+#endif // SQL_TYPE_JSON_INCLUDED
diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc
index 9a036156de6..aee4869bd40 100644
--- a/sql/sql_udf.cc
+++ b/sql/sql_udf.cc
@@ -76,6 +76,8 @@ static const char *init_syms(udf_func *tmp, char *nm)
(void)strmov(end, "_add");
if (!((tmp->func_add= (Udf_func_add) dlsym(tmp->dlhandle, nm))))
return nm;
+ (void)strmov(end, "_remove");
+ tmp->func_remove= (Udf_func_add) dlsym(tmp->dlhandle, nm);
}
(void) strmov(end,"_deinit");
@@ -565,6 +567,7 @@ int mysql_create_function(THD *thd,udf_func *udf)
u_d->func_deinit= udf->func_deinit;
u_d->func_clear= udf->func_clear;
u_d->func_add= udf->func_add;
+ u_d->func_remove= udf->func_remove;
/* create entry in mysql.func table */
diff --git a/sql/sql_udf.h b/sql/sql_udf.h
index 6e6fed2a81a..4fa75759269 100644
--- a/sql/sql_udf.h
+++ b/sql/sql_udf.h
@@ -47,6 +47,7 @@ typedef struct st_udf_func
Udf_func_deinit func_deinit;
Udf_func_clear func_clear;
Udf_func_add func_add;
+ Udf_func_add func_remove;
ulong usage_count;
} udf_func;
@@ -131,6 +132,20 @@ class udf_handler :public Sql_alloc
func(&initid, &f_args, &is_null, &error);
*null_value= (my_bool) (is_null || error);
}
+ bool supports_removal() const
+ { return MY_TEST(u_d->func_remove); }
+ void remove(my_bool *null_value)
+ {
+ DBUG_ASSERT(u_d->func_remove);
+ if (get_arguments())
+ {
+ *null_value=1;
+ return;
+ }
+ Udf_func_add func= u_d->func_remove;
+ func(&initid, &f_args, &is_null, &error);
+ *null_value= (my_bool) (is_null || error);
+ }
String *val_str(String *str,String *save_str);
};
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 3fb5552c77a..c32a6ee852f 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -68,7 +68,7 @@ void select_unit::change_select()
curr_sel= current_select_number;
/* New SELECT processing starts */
DBUG_ASSERT(table->file->inited == 0);
- step= thd->lex->current_select->linkage;
+ step= thd->lex->current_select->get_linkage();
switch (step)
{
case INTERSECT_TYPE:
@@ -248,7 +248,7 @@ bool select_unit::send_eof()
{
if (step != INTERSECT_TYPE ||
(thd->lex->current_select->next_select() &&
- thd->lex->current_select->next_select()->linkage == INTERSECT_TYPE))
+ thd->lex->current_select->next_select()->get_linkage() == INTERSECT_TYPE))
{
/*
it is not INTESECT or next SELECT in the sequence is INTERSECT so no
@@ -753,11 +753,11 @@ bool st_select_lex_unit::join_union_type_attributes(THD *thd_arg,
been fixed yet. An Item_type_holder must be created based on a fixed
Item, so use the inner Item instead.
*/
- DBUG_ASSERT(item_tmp->fixed ||
+ DBUG_ASSERT(item_tmp->is_fixed() ||
(item_tmp->type() == Item::REF_ITEM &&
((Item_ref *)(item_tmp))->ref_type() ==
Item_ref::OUTER_REF));
- if (!item_tmp->fixed)
+ if (!item_tmp->is_fixed())
item_tmp= item_tmp->real_item();
holders[holder_pos].add_argument(item_tmp);
}
@@ -1085,12 +1085,12 @@ cont:
while ((type= tp++))
{
- if (type->cmp_type() == STRING_RESULT &&
- type->collation.derivation == DERIVATION_NONE)
- {
- my_error(ER_CANT_AGGREGATE_NCOLLATIONS, MYF(0), "UNION");
+ /*
+ Test if the aggregated data type is OK for a UNION element.
+ E.g. in case of string data, DERIVATION_NONE is not allowed.
+ */
+ if (type->type_handler()->union_element_finalize(type))
goto err;
- }
}
/*
@@ -1432,7 +1432,7 @@ bool st_select_lex_unit::exec()
union_result->change_select();
if (fake_select_lex)
{
- if (sl != &thd->lex->select_lex)
+ if (sl != thd->lex->first_select_lex())
fake_select_lex->uncacheable|= sl->uncacheable;
else
fake_select_lex->uncacheable= 0;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 497f419c1db..723a3f26dc9 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -132,7 +132,8 @@ bool compare_record(const TABLE *table)
FALSE Items are OK
*/
-static bool check_fields(THD *thd, List<Item> &items, bool update_view)
+static bool check_fields(THD *thd, TABLE_LIST *table, List<Item> &items,
+ bool update_view)
{
Item *item;
if (update_view)
@@ -177,6 +178,22 @@ static bool check_fields(THD *thd, List<Item> &items, bool update_view)
f->set_has_explicit_value();
}
}
+
+ if (table->has_period())
+ {
+ DBUG_ASSERT(thd->lex->sql_command == SQLCOM_UPDATE);
+ for (List_iterator_fast<Item> it(items); (item=it++);)
+ {
+ Field *f= item->field_for_view_update()->field;
+ vers_select_conds_t &period= table->period_conditions;
+ if (period.field_start->field == f || period.field_end->field == f)
+ {
+ my_error(ER_PERIOD_COLUMNS_UPDATED, MYF(0),
+ item->name.str, period.name.str);
+ return true;
+ }
+ }
+ }
return FALSE;
}
@@ -275,6 +292,31 @@ static void prepare_record_for_error_message(int error, TABLE *table)
}
+static
+int cut_fields_for_portion_of_time(THD *thd, TABLE *table,
+ const vers_select_conds_t &period_conds)
+{
+ bool lcond= period_conds.field_start->val_datetime_packed(thd)
+ < period_conds.start.item->val_datetime_packed(thd);
+ bool rcond= period_conds.field_end->val_datetime_packed(thd)
+ > period_conds.end.item->val_datetime_packed(thd);
+
+ Field *start_field= table->field[table->s->period.start_fieldno];
+ Field *end_field= table->field[table->s->period.end_fieldno];
+
+ DBUG_ASSERT(!start_field->has_explicit_value()
+ && !end_field->has_explicit_value());
+
+ int res= 0;
+ if (lcond)
+ res= period_conds.start.item->save_in_field(start_field, true);
+
+ if (likely(!res) && rcond)
+ res= period_conds.end.item->save_in_field(end_field, true);
+
+ return res;
+}
+
/*
Process usual UPDATE
@@ -325,7 +367,7 @@ int mysql_update(THD *thd,
SQL_SELECT *select= NULL;
SORT_INFO *file_sort= 0;
READ_RECORD info;
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
ulonglong id;
List<Item> all_fields;
killed_state killed_status= NOT_KILLED;
@@ -337,7 +379,7 @@ int mysql_update(THD *thd,
query_plan.using_filesort= FALSE;
// For System Versioning (may need to insert new fields to a table).
- ha_rows updated_sys_ver= 0;
+ ha_rows rows_inserted= 0;
DBUG_ENTER("mysql_update");
@@ -349,6 +391,12 @@ int mysql_update(THD *thd,
if (mysql_handle_derived(thd->lex, DT_INIT))
DBUG_RETURN(1);
+ if (table_list->has_period() && table_list->is_view_or_derived())
+ {
+ my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
+ DBUG_RETURN(TRUE);
+ }
+
if (((update_source_table=unique_table(thd, table_list,
table_list->next_global, 0)) ||
table_list->is_multitable()))
@@ -384,7 +432,7 @@ int mysql_update(THD *thd,
table->covering_keys= table->s->keys_in_use;
table->quick_keys.clear_all();
- query_plan.select_lex= &thd->lex->select_lex;
+ query_plan.select_lex= thd->lex->first_select_lex();
query_plan.table= table;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
/* Force privilege re-checking for views after they have been opened. */
@@ -394,6 +442,16 @@ int mysql_update(THD *thd,
if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
DBUG_RETURN(1);
+ if (table_list->has_period())
+ {
+ if (!table_list->period_conditions.start.item->const_item()
+ || !table_list->period_conditions.end.item->const_item())
+ {
+ my_error(ER_NOT_CONSTANT_EXPRESSION, MYF(0), "FOR PORTION OF");
+ DBUG_RETURN(true);
+ }
+ }
+
old_covering_keys= table->covering_keys; // Keys used in WHERE
/* Check the fields we are going to modify */
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -407,7 +465,7 @@ int mysql_update(THD *thd,
if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
fields, MARK_COLUMNS_WRITE, 0, 0))
DBUG_RETURN(1); /* purecov: inspected */
- if (check_fields(thd, fields, table_list->view))
+ if (check_fields(thd, table_list, fields, table_list->view))
{
DBUG_RETURN(1);
}
@@ -518,7 +576,15 @@ int mysql_update(THD *thd,
if (unlikely(init_ftfuncs(thd, select_lex, 1)))
goto err;
- table->mark_columns_needed_for_update();
+ if (table_list->has_period())
+ {
+ table->use_all_columns();
+ table->rpl_write_set= table->write_set;
+ }
+ else
+ {
+ table->mark_columns_needed_for_update();
+ }
table->update_const_key_parts(conds);
order= simple_remove_const(order, conds);
@@ -599,6 +665,14 @@ int mysql_update(THD *thd,
TRG_ACTION_BEFORE) ||
table->triggers->has_triggers(TRG_EVENT_UPDATE,
TRG_ACTION_AFTER)));
+
+ if (table_list->has_period())
+ has_triggers= table->triggers &&
+ (table->triggers->has_triggers(TRG_EVENT_INSERT,
+ TRG_ACTION_BEFORE)
+ || table->triggers->has_triggers(TRG_EVENT_INSERT,
+ TRG_ACTION_AFTER)
+ || has_triggers);
DBUG_PRINT("info", ("has_triggers: %s", has_triggers ? "TRUE" : "FALSE"));
binlog_is_row= thd->is_current_stmt_binlog_format_row();
DBUG_PRINT("info", ("binlog_is_row: %s", binlog_is_row ? "TRUE" : "FALSE"));
@@ -889,14 +963,25 @@ update_begin:
explain->tracker.on_record_after_where();
store_record(table,record[1]);
+ if (table_list->has_period())
+ cut_fields_for_portion_of_time(thd, table,
+ table_list->period_conditions);
+
if (fill_record_n_invoke_before_triggers(thd, table, fields, values, 0,
TRG_EVENT_UPDATE))
break; /* purecov: inspected */
found++;
- if (!can_compare_record || compare_record(table))
+ bool record_was_same= false;
+ bool need_update= !can_compare_record || compare_record(table);
+
+ if (need_update)
{
+ if (table->versioned(VERS_TIMESTAMP) &&
+ thd->lex->sql_command == SQLCOM_DELETE)
+ table->vers_update_end();
+
if (table->default_field && table->update_default_fields(1, ignore))
{
error= 1;
@@ -955,7 +1040,9 @@ update_begin:
error= table->file->ha_update_row(table->record[1],
table->record[0]);
}
- if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
+
+ record_was_same= error == HA_ERR_RECORD_IS_THE_SAME;
+ if (unlikely(record_was_same))
{
error= 0;
}
@@ -971,12 +1058,22 @@ update_begin:
restore_record(table, record[2]);
}
if (likely(!error))
- updated_sys_ver++;
+ rows_inserted++;
}
if (likely(!error))
updated++;
}
+ if (likely(!error) && !record_was_same && table_list->has_period())
+ {
+ store_record(table, record[2]);
+ restore_record(table, record[1]);
+ error= table->insert_portion_of_time(thd,
+ table_list->period_conditions,
+ &rows_inserted);
+ restore_record(table, record[2]);
+ }
+
if (unlikely(error) &&
(!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
{
@@ -987,7 +1084,7 @@ update_begin:
myf flags= 0;
if (table->file->is_fatal_error(error, HA_CHECK_ALL))
- flags|= ME_FATALERROR; /* Other handler errors are fatal */
+ flags|= ME_FATAL; /* Other handler errors are fatal */
prepare_record_for_error_message(error, table);
table->file->print_error(error,MYF(flags));
@@ -1098,7 +1195,7 @@ update_begin:
{
/* purecov: begin inspected */
prepare_record_for_error_message(loc_error, table);
- table->file->print_error(loc_error,MYF(ME_FATALERROR));
+ table->file->print_error(loc_error,MYF(ME_FATAL));
error= 1;
/* purecov: end */
}
@@ -1117,6 +1214,8 @@ update_end:
delete select;
select= NULL;
THD_STAGE_INFO(thd, stage_end);
+ if (table_list->has_period())
+ table->file->ha_release_auto_increment();
(void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
/*
@@ -1179,14 +1278,14 @@ update_end:
if (likely(error < 0) && likely(!thd->lex->analyze_stmt))
{
char buff[MYSQL_ERRMSG_SIZE];
- if (!table->versioned(VERS_TIMESTAMP))
+ if (!table->versioned(VERS_TIMESTAMP) && !table_list->has_period())
my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO), (ulong) found,
(ulong) updated,
(ulong) thd->get_stmt_da()->current_statement_warn_count());
else
my_snprintf(buff, sizeof(buff),
ER_THD(thd, ER_UPDATE_INFO_WITH_SYSTEM_VERSIONING),
- (ulong) found, (ulong) updated, (ulong) updated_sys_ver,
+ (ulong) found, (ulong) updated, (ulong) rows_inserted,
(ulong) thd->get_stmt_da()->current_statement_warn_count());
my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
id, buff);
@@ -1256,7 +1355,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
TABLE *table= table_list->table;
#endif
List<Item> all_fields;
- SELECT_LEX *select_lex= &thd->lex->select_lex;
+ SELECT_LEX *select_lex= thd->lex->first_select_lex();
DBUG_ENTER("mysql_prepare_update");
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1267,6 +1366,13 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
thd->lex->allow_sum_func.clear_all();
+ if (table_list->has_period())
+ {
+ *conds= select_lex->period_setup_conds(thd, table_list, *conds);
+ if (!*conds)
+ DBUG_RETURN(true);
+ }
+
/*
We do not call DT_MERGE_FOR_INSERT because it has no sense for simple
(not multi-) update
@@ -1275,8 +1381,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
if (setup_tables_and_check_access(thd, &select_lex->context,
- &select_lex->top_join_list,
- table_list,
+ &select_lex->top_join_list, table_list,
select_lex->leaf_tables,
FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
@@ -1286,6 +1391,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
setup_ftfuncs(select_lex))
DBUG_RETURN(TRUE);
+
select_lex->fix_prepare_information(thd, conds, &fake_conds);
DBUG_RETURN(FALSE);
}
@@ -1533,7 +1639,7 @@ int mysql_multi_update_prepare(THD *thd)
LEX *lex= thd->lex;
TABLE_LIST *table_list= lex->query_tables;
TABLE_LIST *tl;
- List<Item> *fields= &lex->select_lex.item_list;
+ List<Item> *fields= &lex->first_select_lex()->item_list;
table_map tables_for_update;
bool update_view= 0;
/*
@@ -1575,14 +1681,26 @@ int mysql_multi_update_prepare(THD *thd)
if (mysql_handle_derived(lex, DT_PREPARE))
DBUG_RETURN(TRUE);
- if (setup_tables_and_check_access(thd, &lex->select_lex.context,
- &lex->select_lex.top_join_list,
+ if (table_list->has_period())
+ {
+ /*
+ Multi-table update is not supported on syntax lexel. However it's possible
+ to get here through PREPARE with update of multi-table view.
+ */
+ DBUG_ASSERT(table_list->is_view_or_derived());
+ my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
+ DBUG_RETURN(TRUE);
+ }
+
+ if (setup_tables_and_check_access(thd,
+ &lex->first_select_lex()->context,
+ &lex->first_select_lex()->top_join_list,
table_list,
- lex->select_lex.leaf_tables, FALSE,
- UPDATE_ACL, SELECT_ACL, FALSE))
+ lex->first_select_lex()->leaf_tables,
+ FALSE, UPDATE_ACL, SELECT_ACL, FALSE))
DBUG_RETURN(TRUE);
- if (lex->select_lex.handle_derived(thd->lex, DT_MERGE))
+ if (lex->first_select_lex()->handle_derived(thd->lex, DT_MERGE))
DBUG_RETURN(TRUE);
if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
@@ -1598,20 +1716,21 @@ int mysql_multi_update_prepare(THD *thd)
}
}
- if (check_fields(thd, *fields, update_view))
+ if (check_fields(thd, table_list, *fields, update_view))
{
DBUG_RETURN(TRUE);
}
thd->table_map_for_update= tables_for_update= get_table_map(fields);
- if (unsafe_key_update(lex->select_lex.leaf_tables, tables_for_update))
+ if (unsafe_key_update(lex->first_select_lex()->leaf_tables,
+ tables_for_update))
DBUG_RETURN(true);
/*
Setup timestamp handling and locking mode
*/
- List_iterator<TABLE_LIST> ti(lex->select_lex.leaf_tables);
+ List_iterator<TABLE_LIST> ti(lex->first_select_lex()->leaf_tables);
while ((tl= ti++))
{
TABLE *table= tl->table;
@@ -1705,7 +1824,7 @@ int mysql_multi_update_prepare(THD *thd)
Check that we are not using table that we are updating, but we should
skip all tables of UPDATE SELECT itself
*/
- lex->select_lex.exclude_from_table_unique_test= TRUE;
+ lex->first_select_lex()->exclude_from_table_unique_test= TRUE;
/* We only need SELECT privilege for columns in the values list */
ti.rewind();
while ((tl= ti++))
@@ -1727,7 +1846,7 @@ int mysql_multi_update_prepare(THD *thd)
Set exclude_from_table_unique_test value back to FALSE. It is needed for
further check in multi_update::prepare whether to use record cache.
*/
- lex->select_lex.exclude_from_table_unique_test= FALSE;
+ lex->first_select_lex()->exclude_from_table_unique_test= FALSE;
if (lex->save_prep_leaf_tables())
DBUG_RETURN(TRUE);
@@ -1756,7 +1875,7 @@ bool mysql_multi_update(THD *thd,
DBUG_ENTER("mysql_multi_update");
if (!(*result= new (thd->mem_root) multi_update(thd, table_list,
- &thd->lex->select_lex.leaf_tables,
+ &thd->lex->first_select_lex()->leaf_tables,
fields, values,
handle_duplicates, ignore)))
{
@@ -2259,11 +2378,11 @@ int multi_update::prepare2(JOIN *join)
{
if (item_rowid_table(*it2) != tbl)
continue;
- Item *fld= new (thd->mem_root)
- Item_field(thd, (*it)->get_tmp_table_field());
+ Item_field *fld= new (thd->mem_root)
+ Item_field(thd, (*it)->get_tmp_table_field());
if (!fld)
return 1;
- fld->set_result_field((*it2)->get_tmp_table_field());
+ fld->result_field= (*it2)->get_tmp_table_field();
*it2= fld;
}
}
@@ -2395,7 +2514,7 @@ int multi_update::send_data(List<Item> &not_used_values)
myf flags= 0;
if (table->file->is_fatal_error(error, HA_CHECK_ALL))
- flags|= ME_FATALERROR; /* Other handler errors are fatal */
+ flags|= ME_FATAL; /* Other handler errors are fatal */
prepare_record_for_error_message(error, table);
table->file->print_error(error,MYF(flags));
@@ -2550,17 +2669,10 @@ int multi_update::do_updates()
not its dependencies
*/
while(TABLE *tbl= check_opt_it++)
- {
- if (tbl->vcol_set)
- {
- bitmap_clear_all(tbl->vcol_set);
- for (Field **vf= tbl->vfield; *vf; vf++)
- {
+ if (Field **vf= tbl->vfield)
+ for (; *vf; vf++)
if (bitmap_is_set(tbl->read_set, (*vf)->field_index))
- tbl->mark_virtual_col(*vf);
- }
- }
- }
+ (*vf)->vcol_info->expr->walk(&Item::register_field_in_read_map, 1, 0);
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
{
@@ -2650,10 +2762,10 @@ int multi_update::do_updates()
uint field_num= 0;
do
{
- if (unlikely((local_error=
- tbl->file->ha_rnd_pos(tbl->record[0],
- (uchar *) tmp_table->
- field[field_num]->ptr))))
+ String rowid;
+ tmp_table->field[field_num]->val_str(&rowid);
+ if (unlikely((local_error= tbl->file->ha_rnd_pos(tbl->record[0],
+ (uchar*)rowid.ptr()))))
{
err_table= tbl;
goto err;
@@ -2770,7 +2882,7 @@ int multi_update::do_updates()
err:
{
prepare_record_for_error_message(local_error, err_table);
- err_table->file->print_error(local_error,MYF(ME_FATALERROR));
+ err_table->file->print_error(local_error,MYF(ME_FATAL));
}
err2:
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 50416940960..13b5caba539 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -36,6 +36,7 @@
#include "datadict.h" // dd_frm_is_view()
#include "sql_derived.h"
#include "sql_cte.h" // check_dependencies_in_with_clauses()
+#include "opt_trace.h"
#define MD5_BUFF_LENGTH 33
@@ -255,7 +256,7 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
LEX *lex= thd->lex;
/* first table in list is target VIEW name => cut off it */
TABLE_LIST *tbl;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
SELECT_LEX *sl;
bool res= TRUE;
DBUG_ENTER("create_view_precheck");
@@ -324,7 +325,6 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
}
}
- if (&lex->select_lex != lex->all_selects_list)
{
/* check tables of subqueries */
for (tbl= tables; tbl; tbl= tbl->next_global)
@@ -399,7 +399,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
TABLE_LIST *view= lex->unlink_first_table(&link_to_local);
TABLE_LIST *tables= lex->query_tables;
TABLE_LIST *tbl;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
SELECT_LEX *sl;
SELECT_LEX_UNIT *unit= &lex->unit;
bool res= FALSE;
@@ -711,9 +711,10 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
lex->link_first_table_back(view, link_to_local);
DBUG_RETURN(0);
-
-WSREP_ERROR_LABEL:
- res= TRUE;
+#ifdef WITH_WSREP
+wsrep_error_label:
+ res= true;
+#endif
err:
lex->link_first_table_back(view, link_to_local);
@@ -988,7 +989,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
view->algorithm != VIEW_ALGORITHM_TMPTABLE)))
{
/* TODO: change here when we will support UNIONs */
- for (TABLE_LIST *tbl= lex->select_lex.table_list.first;
+ for (TABLE_LIST *tbl= lex->first_select_lex()->table_list.first;
tbl;
tbl= tbl->next_local)
{
@@ -1107,8 +1108,8 @@ loop_out:
UNION
*/
if (view->updatable_view &&
- !lex->select_lex.master_unit()->is_unit_op() &&
- !(lex->select_lex.table_list.first)->next_local &&
+ !lex->first_select_lex()->master_unit()->is_unit_op() &&
+ !(lex->first_select_lex()->table_list.first)->next_local &&
find_table_in_global_list(lex->query_tables->next_global,
&lex->query_tables->db,
&lex->query_tables->table_name))
@@ -1155,7 +1156,8 @@ err:
bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
bool open_view_no_parse)
{
- SELECT_LEX *end, *UNINIT_VAR(view_select);
+ SELECT_LEX_NODE *end;
+ SELECT_LEX *UNINIT_VAR(view_select);
LEX *old_lex, *lex;
Query_arena *arena, backup;
TABLE_LIST *top_view= table->top_table();
@@ -1198,7 +1200,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
in which case the reinit call wasn't done.
See MDEV-6668 for details.
*/
- mysql_derived_reinit(thd, NULL, table);
+ mysql_handle_single_derived(thd->lex, table, DT_REINIT);
DEBUG_SYNC(thd, "after_cached_view_opened");
DBUG_RETURN(0);
@@ -1354,8 +1356,6 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
lex_start(thd);
lex->stmt_lex= old_lex;
- view_select= &lex->select_lex;
- view_select->select_number= ++thd->lex->stmt_lex->current_select_number;
sql_mode_t saved_mode= thd->variables.sql_mode;
/* switch off modes which can prevent normal parsing of VIEW
@@ -1390,6 +1390,8 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
parse_status= parse_sql(thd, & parser_state, table->view_creation_ctx);
+ view_select= lex->first_select_lex();
+
/* Restore environment. */
if ((old_lex->sql_command == SQLCOM_SHOW_FIELDS) ||
@@ -1412,6 +1414,15 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
goto err;
/*
+ Check rights to run commands which show underlying tables.
+ In the optimizer trace we would not like to show trace for
+ cases when the current user does not have rights for the
+ underlying tables.
+ */
+ if (!table->prelocking_placeholder)
+ opt_trace_disable_if_no_view_access(thd, table, view_tables);
+
+ /*
Check rights to run commands (ANALYZE SELECT, EXPLAIN SELECT &
SHOW CREATE) which show underlying tables.
Skip this step if we are opening view for prelocking only.
@@ -1539,7 +1550,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
This may change in future, for example if we enable merging of
views with subqueries in select list.
*/
- view_main_select_tables= lex->select_lex.table_list.first;
+ view_main_select_tables= lex->first_select_lex()->table_list.first;
/*
Let us set proper lock type for tables of the view's main
@@ -1566,7 +1577,7 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
/* Fields in this view can be used in upper select in case of merge. */
if (table->select_lex)
- table->select_lex->add_where_field(&lex->select_lex);
+ table->select_lex->add_where_field(lex->first_select_lex());
}
/*
This method has a dependency on the proper lock type being set,
@@ -1588,8 +1599,8 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
old_lex->safe_to_cache_query= (old_lex->safe_to_cache_query &&
lex->safe_to_cache_query);
/* move SQL_CACHE to whole query */
- if (view_select->options & OPTION_TO_QUERY_CACHE)
- old_lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
+ if (lex->first_select_lex()->options & OPTION_TO_QUERY_CACHE)
+ old_lex->first_select_lex()->options|= OPTION_TO_QUERY_CACHE;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (table->view_suid)
@@ -1671,9 +1682,10 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
tbl->grant.want_privilege= top_view->grant.orig_want_privilege;
/* prepare view context */
- lex->select_lex.context.resolve_in_table_list_only(view_main_select_tables);
- lex->select_lex.context.outer_context= 0;
- lex->select_lex.select_n_having_items+=
+ lex->first_select_lex()->
+ context.resolve_in_table_list_only(view_main_select_tables);
+ lex->first_select_lex()->context.outer_context= 0;
+ lex->first_select_lex()->select_n_having_items+=
table->select_lex->select_n_having_items;
table->where= view_select->where;
@@ -1684,12 +1696,13 @@ bool mysql_make_view(THD *thd, TABLE_SHARE *share, TABLE_LIST *table,
*/
if (!table->select_lex->master_unit()->is_unit_op() &&
table->select_lex->order_list.elements == 0)
- table->select_lex->order_list.push_back(&lex->select_lex.order_list);
+ table->select_lex->order_list.
+ push_back(&lex->first_select_lex()->order_list);
else
{
if (old_lex->sql_command == SQLCOM_SELECT &&
(old_lex->describe & DESCRIBE_EXTENDED) &&
- lex->select_lex.order_list.elements &&
+ lex->first_select_lex()->order_list.elements &&
!table->select_lex->master_unit()->is_unit_op())
{
push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
@@ -1723,7 +1736,11 @@ ok:
lex->unit.include_down(table->select_lex);
lex->unit.slave= view_select; // fix include_down initialisation
/* global SELECT list linking */
- end= view_select; // primary SELECT_LEX is always last
+ /*
+ The primary SELECT_LEX is always last (because parsed first) if WITH not
+ used, otherwise it is good start point for last element finding
+ */
+ for (end= view_select; end->link_next; end= end->link_next);
end->link_next= old_lex->all_selects_list;
old_lex->all_selects_list->link_prev= &end->link_next;
old_lex->all_selects_list= lex->all_selects_list;
@@ -1908,7 +1925,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
*/
if ((!view->view && !view->belong_to_view) ||
thd->lex->sql_command == SQLCOM_INSERT ||
- thd->lex->select_lex.select_limit == 0)
+ thd->lex->first_select_lex()->select_limit == 0)
DBUG_RETURN(FALSE); /* it is normal table or query without LIMIT */
table= view->table;
view= view->top_table();
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index b9f6a64b378..d09aa85d402 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2010, 2016, MariaDB
+ Copyright (c) 2010, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@
#include "sql_lex.h"
#include "sql_sequence.h"
#include "my_base.h"
+#include "sql_type_json.h"
/* this is to get the bison compilation windows warnings out */
#ifdef _MSC_VER
@@ -491,96 +492,6 @@ Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal,
}
/**
- @brief Creates a new SELECT_LEX for a UNION branch.
-
- Sets up and initializes a SELECT_LEX structure for a query once the parser
- discovers a UNION token. The current SELECT_LEX is pushed on the stack and
- the new SELECT_LEX becomes the current one.
-
- @param lex The parser state.
-
- @param is_union_distinct True if the union preceding the new select
- statement uses UNION DISTINCT.
-
- @param is_top_level This should be @c TRUE if the newly created SELECT_LEX
- is a non-nested statement.
-
- @return <code>false</code> if successful, <code>true</code> if an error was
- reported. In the latter case parsing should stop.
- */
-bool LEX::add_select_to_union_list(bool is_union_distinct,
- enum sub_select_type type,
- bool is_top_level)
-{
- const char *type_name= (type == INTERSECT_TYPE ? "INTERSECT" :
- (type == EXCEPT_TYPE ? "EXCEPT" : "UNION"));
- /*
- Only the last SELECT can have INTO. Since the grammar won't allow INTO in
- a nested SELECT, we make this check only when creating a top-level SELECT.
- */
- if (is_top_level && result)
- {
- my_error(ER_WRONG_USAGE, MYF(0), type_name, "INTO");
- return TRUE;
- }
- if (current_select->order_list.first && !current_select->braces)
- {
- my_error(ER_WRONG_USAGE, MYF(0), type_name, "ORDER BY");
- return TRUE;
- }
-
- if (current_select->explicit_limit && !current_select->braces)
- {
- my_error(ER_WRONG_USAGE, MYF(0), type_name, "LIMIT");
- return TRUE;
- }
- if (current_select->linkage == GLOBAL_OPTIONS_TYPE)
- {
- thd->parse_error();
- return TRUE;
- }
- if (!is_union_distinct && (type == INTERSECT_TYPE || type == EXCEPT_TYPE))
- {
- my_error(ER_WRONG_USAGE, MYF(0), type_name, "ALL");
- return TRUE;
- }
- /*
- Priority implementation, but also trying to keep things as flat
- as possible */
- if (type == INTERSECT_TYPE &&
- (current_select->linkage != INTERSECT_TYPE &&
- current_select != current_select->master_unit()->first_select())
- && !(thd->variables.sql_mode & MODE_ORACLE))
- {
- /*
- This and previous SELECTs should go one level down because of
- priority
- */
- SELECT_LEX *prev= exclude_last_select();
- if (add_unit_in_brackets(prev))
- return TRUE;
- return add_select_to_union_list(is_union_distinct, type, 0);
- }
- else
- {
- check_automatic_up(type);
- }
- /* This counter shouldn't be incremented for UNION parts */
- nest_level--;
- if (mysql_new_select(this, 0, NULL))
- return TRUE;
- mysql_init_select(this);
- current_select->linkage= type;
- current_select->with_all_modifier= !is_union_distinct;
- if (is_union_distinct) /* UNION DISTINCT - remember position */
- current_select->master_unit()->union_distinct= current_select;
- else
- DBUG_ASSERT(type == UNION_TYPE);
- return FALSE;
-}
-
-
-/**
Create a separate LEX for each assignment if in SP.
If we are in SP we want have own LEX for each assignment.
@@ -621,6 +532,7 @@ void sp_create_assignment_lex(THD *thd, bool no_lookahead)
lex->sphead->m_tmp_query= lip->get_tok_end();
/* Inherit from outer lex. */
lex->option_type= old_lex->option_type;
+ lex->main_select_push();
}
}
@@ -642,8 +554,6 @@ bool sp_create_assignment_instr(THD *thd, bool no_lookahead)
if (lex->sphead)
{
- sp_head *sp= lex->sphead;
-
if (!lex->var_list.is_empty())
{
/*
@@ -651,35 +561,22 @@ bool sp_create_assignment_instr(THD *thd, bool no_lookahead)
option setting, so we should construct sp_instr_stmt
for it.
*/
- LEX_STRING qbuff;
- sp_instr_stmt *i;
Lex_input_stream *lip= &thd->m_parser_state->m_lip;
- if (!(i= new (thd->mem_root)
- sp_instr_stmt(sp->instructions(), lex->spcont, lex)))
- return true;
-
/*
Extract the query statement from the tokenizer. The
end is either lip->ptr, if there was no lookahead,
lip->tok_end otherwise.
*/
- if (no_lookahead)
- qbuff.length= lip->get_ptr() - sp->m_tmp_query;
- else
- qbuff.length= lip->get_tok_end() - sp->m_tmp_query;
-
- if (!(qbuff.str= (char*) alloc_root(thd->mem_root,
- qbuff.length + 5)))
- return true;
-
- strmake(strmake(qbuff.str, "SET ", 4), sp->m_tmp_query,
- qbuff.length);
- qbuff.length+= 4;
- i->m_query= qbuff;
- if (sp->add_instr(i))
+ static const LEX_CSTRING setsp= { STRING_WITH_LEN("SET ") };
+ const char *qend= no_lookahead ? lip->get_ptr() : lip->get_tok_end();
+ Lex_cstring qbuf(lex->sphead->m_tmp_query, qend);
+ if (lex->new_sp_instr_stmt(thd, setsp, qbuf))
return true;
}
+ lex->pop_select();
+ if (Lex->check_main_unit_semantics())
+ return true;
enum_var_type inner_option_type= lex->option_type;
if (lex->sphead->restore_lex(thd))
return true;
@@ -767,6 +664,7 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
return v;
}
+
%}
%union {
int num;
@@ -791,6 +689,20 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
Lex_for_loop_bounds_st for_loop_bounds;
Lex_trim_st trim;
vers_history_point_t vers_history_point;
+ struct
+ {
+ enum sub_select_type unit_type;
+ bool distinct;
+ } unit_operation;
+ struct
+ {
+ SELECT_LEX *first;
+ SELECT_LEX *prev_last;
+ } select_list;
+ SQL_I_List<ORDER> *select_order;
+ Lex_select_lock select_lock;
+ Lex_select_limit select_limit;
+ Lex_order_limit_lock *order_limit_lock;
/* pointers */
Create_field *create_field;
@@ -811,6 +723,7 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
class sp_lex_cursor *sp_cursor_stmt;
LEX_CSTRING *lex_str_ptr;
LEX_USER *lex_user;
+ USER_AUTH *user_auth;
List<Condition_information_item> *cond_info_list;
List<DYNCALL_CREATE_DEF> *dyncol_def_list;
List<Item> *item_list;
@@ -836,6 +749,7 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
handlerton *db_type;
st_select_lex *select_lex;
+ st_select_lex_unit *select_lex_unit;
struct p_elem_val *p_elem_value;
class Window_frame *window_frame;
class Window_frame_bound *window_frame_bound;
@@ -843,8 +757,9 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
st_trg_execution_order trg_execution_order;
/* enums */
+ enum enum_sp_suid_behaviour sp_suid;
+ enum enum_sp_aggregate_type sp_aggregate_type;
enum enum_view_suid view_suid;
- enum sub_select_type unit_type;
enum Condition_information_item::Name cond_info_item_name;
enum enum_diag_condition_item_name diag_condition_item_name;
enum Diagnostics_information::Which_area diag_area;
@@ -877,6 +792,8 @@ Virtual_column_info *add_virtual_expression(THD *thd, Item *expr)
}
%{
+/* avoid unintentional %union size increases, it's what a parser stack made of */
+static_assert(sizeof(YYSTYPE) == sizeof(void*)*2+8, "%union size check");
bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%}
@@ -884,10 +801,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%parse-param { THD *thd }
%lex-param { THD *thd }
/*
- Currently there are 52 shift/reduce conflicts.
+ Currently there are 48 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
-%expect 52
+%expect 48
/*
Comments for TOKENS.
@@ -1044,6 +961,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token LEADING /* SQL-2003-R */
%token LEAVE_SYM
%token LEFT /* SQL-2003-R */
+%token LEFT_PAREN_ALT /* INTERNAL */
+%token LEFT_PAREN_WITH /* INTERNAL */
+%token LEFT_PAREN_LIKE /* INTERNAL */
%token LEX_HOSTNAME
%token LIKE /* SQL-2003-R */
%token LIMIT
@@ -1106,6 +1026,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token PERCENT_RANK_SYM
%token PERCENTILE_CONT_SYM
%token PERCENTILE_DISC_SYM
+%token PORTION_SYM /* SQL-2016-R */
%token POSITION_SYM /* SQL-2003-N */
%token PRECISION /* SQL-2003-R */
%token PRIMARY_SYM /* SQL-2003-R */
@@ -1234,6 +1155,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
Non-reserved keywords
*/
+%token <kwd> ACCOUNT_SYM /* MYSQL */
%token <kwd> ACTION /* SQL-2003-N */
%token <kwd> ADMIN_SYM /* SQL-2003-N */
%token <kwd> ADDDATE_SYM /* MYSQL-FUNC */
@@ -1351,6 +1273,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> EXIT_MARIADB_SYM /* PLSQL-R */
%token <kwd> EXIT_ORACLE_SYM /* PLSQL-R */
%token <kwd> EXPANSION_SYM
+%token <kwd> EXPIRE_SYM /* MySQL */
%token <kwd> EXPORT_SYM
%token <kwd> EXTENDED_SYM
%token <kwd> EXTENT_SIZE_SYM
@@ -1466,6 +1389,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> NAME_SYM /* SQL-2003-N */
%token <kwd> NATIONAL_SYM /* SQL-2003-R */
%token <kwd> NCHAR_SYM /* SQL-2003-R */
+%token <kwd> NEVER_SYM /* MySQL */
%token <kwd> NEW_SYM /* SQL-2003-R */
%token <kwd> NEXT_SYM /* SQL-2003-N */
%token <kwd> NEXTVAL_SYM /* PostgreSQL sequence function */
@@ -1589,6 +1513,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> SQL_CALC_FOUND_ROWS
%token <kwd> SQL_NO_CACHE_SYM
%token <kwd> SQL_THREAD
+%token <kwd> STAGE_SYM
%token <kwd> STARTS_SYM
%token <kwd> START_SYM /* SQL-2003-R */
%token <kwd> STATEMENT_SYM
@@ -1817,7 +1742,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
NCHAR_STRING
%type <lex_str_ptr>
- opt_table_alias
+ opt_table_alias_clause
+ table_alias_clause
%type <ident_cli>
IDENT
@@ -1882,7 +1808,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
opt_temporary all_or_any opt_distinct opt_glimit_clause
opt_ignore_leaves fulltext_options union_option
opt_not
- select_derived_init transaction_access_mode_types
+ transaction_access_mode_types
opt_natural_language_mode opt_query_expansion
opt_ev_status opt_ev_on_completion ev_on_completion opt_ev_comment
ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
@@ -1891,7 +1817,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
opt_default_time_precision
case_stmt_body opt_bin_mod opt_for_system_time_clause
opt_if_exists_table_element opt_if_not_exists_table_element
- opt_recursive opt_format_xid
+ opt_recursive opt_format_xid opt_for_portion_of_time_clause
%type <object_ddl_options>
create_or_replace
@@ -1931,7 +1857,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <item>
literal insert_ident order_ident temporal_literal
- simple_ident expr sum_expr in_sum_expr
+ simple_ident expr expr_no_subselect sum_expr in_sum_expr
variable variable_aux bool_pri
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
@@ -1972,6 +1898,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
expr_list opt_udf_expr_list udf_expr_list when_list when_list_opt_else
ident_list ident_list_arg opt_expr_list
decode_when_list_oracle
+ execute_using
+ execute_params
%type <sp_cursor_stmt>
sp_cursor_stmt_lex
@@ -2005,11 +1933,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
join_table_list join_table
table_factor table_ref esc_table_ref
table_primary_ident table_primary_derived
- select_derived derived_table_list
- select_derived_union
- derived_simple_table
- derived_query_specification
- derived_table_value_constructor
+ derived_table_list table_reference_list_parens
+ nested_table_reference_list join_table_parens
+ update_table_list
%type <date_time_type> date_time_type;
%type <interval> interval
@@ -2030,6 +1956,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <lex_user> user grant_user grant_role user_or_role current_role
admin_option_for_role user_maybe_role
+%type <user_auth> opt_auth_str auth_expression auth_token
+
%type <charset>
opt_collate
charset_name
@@ -2043,14 +1971,19 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
UNDERSCORE_CHARSET
%type <select_lex> subselect
- get_select_lex get_select_lex_derived
- simple_table
query_specification
- query_term_union_not_ready
- query_term_union_ready
- query_expression_body
- select_paren_derived
table_value_constructor
+ simple_table
+ query_primary
+ query_primary_parens
+ select_into_query_specification
+
+
+%type <select_lex_unit>
+ query_specification_start
+ query_expression_body
+ query_expression
+ query_expression_unit
%type <boolfunc2creator> comp_op
@@ -2062,11 +1995,28 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <virtual_column> opt_check_constraint check_constraint virtual_column_func
column_default_expr
-%type <unit_type> unit_type_decl
+
+%type <unit_operation> unit_type_decl
+
+%type <select_lock>
+ opt_procedure_or_into
+ opt_select_lock_type
+ select_lock_type
+ opt_lock_wait_timeout_new
+
+%type <select_limit> opt_limit_clause limit_clause limit_options
+
+%type <order_limit_lock>
+ query_expression_tail
+ order_or_limit
+ opt_order_limit_lock
+
+%type <select_order> opt_order_clause order_clause order_list
%type <NONE>
- analyze_stmt_command
- query verb_clause create change select do drop insert replace insert2
+ analyze_stmt_command backup backup_statements
+ query verb_clause create change select select_into
+ do drop insert replace insert2
insert_values update delete truncate rename compound_statement
show describe load alter optimize keycache preload flush
reset purge begin_stmt_mariadb commit rollback savepoint release
@@ -2082,7 +2032,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
assign_to_keycache_parts
preload_list preload_list_or_parts preload_keys preload_keys_parts
select_item_list select_item values_list no_braces
- opt_limit_clause delete_limit_clause fields opt_values values
+ delete_limit_clause fields opt_values values
no_braces_with_names opt_values_with_names values_with_names
procedure_list procedure_list2 procedure_item
field_def handler opt_generated_always
@@ -2103,18 +2053,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
table_to_table_list table_to_table opt_table_list opt_as
handler_rkey_function handler_read_or_scan
single_multi table_wild_list table_wild_one opt_wild
- union_clause union_list
- subselect_start opt_and charset
- subselect_end select_var_list select_var_list_init help
+ opt_and charset
+ select_var_list select_var_list_init help
opt_extended_describe shutdown
opt_format_json
- prepare prepare_src execute deallocate
- statement sp_suid
+ prepare execute deallocate
+ statement
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
view_list_opt view_list view_select
- trigger_tail sp_tail sf_tail event_tail
- udf_tail create_function_tail create_aggregate_function_tail
+ trigger_tail sp_tail event_tail
install uninstall partition_entry binlog_base64_event
normal_key_options normal_key_opts all_key_opt
spatial_key_options fulltext_key_options normal_key_opt
@@ -2123,6 +2071,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
key_using_alg
part_column_list
period_for_system_time
+ period_for_application_time
server_def server_options_list server_option
definer_opt no_definer definer get_diagnostics
parse_vcol_expr vcol_opt_specifier vcol_opt_attribute
@@ -2155,6 +2104,8 @@ END_OF_INPUT
%type <view_suid> view_suid opt_view_suid
%type <plsql_cursor_attr> plsql_cursor_attr
+%type <sp_suid> sp_suid
+%type <sp_aggregate_type> opt_aggregate
%type <num> sp_decl_idents sp_decl_idents_init_vars
%type <num> sp_handler_type sp_hcond_list
@@ -2240,8 +2191,8 @@ rule: <-- starts at col 1
query:
END_OF_INPUT
{
- if (likely(!thd->bootstrap) &&
- unlikely(!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT)))
+ if (!thd->bootstrap &&
+ (!(thd->lex->lex_options & OPTION_LEX_FOUND_COMMENT)))
my_yyabort_error((ER_EMPTY_QUERY, MYF(0)));
thd->lex->sql_command= SQLCOM_EMPTY_QUERY;
@@ -2295,6 +2246,7 @@ statement:
alter
| analyze
| analyze_stmt_command
+ | backup
| binlog_base64_event
| call
| change
@@ -2337,6 +2289,7 @@ statement:
| rollback
| savepoint
| select
+ | select_into
| set
| signal_stmt
| show
@@ -2354,9 +2307,7 @@ statement:
deallocate:
deallocate_or_drop PREPARE_SYM ident
{
- LEX *lex= thd->lex;
- lex->sql_command= SQLCOM_DEALLOCATE_PREPARE;
- lex->prepared_stmt_name= $3;
+ Lex->stmt_deallocate_prepare($3);
}
;
@@ -2366,72 +2317,59 @@ deallocate_or_drop:
;
prepare:
- PREPARE_SYM ident FROM prepare_src
+ PREPARE_SYM ident FROM expr_no_subselect
{
- LEX *lex= thd->lex;
- if (unlikely(lex->table_or_sp_used()))
- my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
- "PREPARE..FROM"));
- lex->sql_command= SQLCOM_PREPARE;
- lex->prepared_stmt_name= $2;
+ if (Lex->stmt_prepare($2, $4))
+ MYSQL_YYABORT;
}
;
-prepare_src:
+expr_no_subselect:
{ Lex->expr_allows_subselect= false; }
expr
{
- Lex->prepared_stmt_code= $2;
Lex->expr_allows_subselect= true;
+ $$= $2;
}
;
execute:
- EXECUTE_SYM ident
+ EXECUTE_SYM ident execute_using
{
- LEX *lex= thd->lex;
- lex->sql_command= SQLCOM_EXECUTE;
- lex->prepared_stmt_name= $2;
+ if (Lex->stmt_execute($2, $3))
+ MYSQL_YYABORT;
}
- execute_using
- {}
- | EXECUTE_SYM IMMEDIATE_SYM prepare_src
+ | EXECUTE_SYM IMMEDIATE_SYM expr_no_subselect execute_using
{
- if (unlikely(Lex->table_or_sp_used()))
- my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
- "EXECUTE IMMEDIATE"));
- Lex->sql_command= SQLCOM_EXECUTE_IMMEDIATE;
+ if (Lex->stmt_execute_immediate($3, $4))
+ MYSQL_YYABORT;
}
- execute_using
- {}
;
execute_using:
- /* nothing */
+ /* nothing */ { $$= NULL; }
| USING { Lex->expr_allows_subselect= false; }
- execute_var_list
+ execute_params
{
- if (unlikely(Lex->table_or_sp_used()))
- my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
- "EXECUTE..USING"));
+ $$= $3;
Lex->expr_allows_subselect= true;
}
;
-execute_var_list:
- execute_var_list ',' execute_var_ident
- | execute_var_ident
- ;
-
-execute_var_ident:
+execute_params:
expr_or_default
{
- if (unlikely(Lex->prepared_stmt_params.push_back($1,
- thd->mem_root)))
+ if (unlikely(!($$= List<Item>::make(thd->mem_root, $1))))
+ MYSQL_YYABORT;
+ }
+ | execute_params ',' expr_or_default
+ {
+ if (($$= $1)->push_back($3, thd->mem_root))
MYSQL_YYABORT;
}
;
+
/* help */
help:
@@ -2690,17 +2628,22 @@ connection_name:
/* create a table */
create:
- create_or_replace opt_temporary TABLE_SYM opt_if_not_exists table_ident
+ create_or_replace opt_temporary TABLE_SYM opt_if_not_exists
{
LEX *lex= thd->lex;
lex->create_info.init();
- if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2,
- $1 | $4)))
+ if (lex->main_select_push())
+ MYSQL_YYABORT;
+ lex->current_select->parsing_place= BEFORE_OPT_LIST;
+ if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4))
MYSQL_YYABORT;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_UPDATING,
- TL_WRITE,
- MDL_EXCLUSIVE)))
+ }
+ table_ident
+ {
+ LEX *lex= thd->lex;
+ if (!lex->first_select_lex()->
+ add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING,
+ TL_WRITE, MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
lex->alter_info.reset();
/*
@@ -2715,7 +2658,6 @@ create:
create_body
{
LEX *lex= thd->lex;
- lex->current_select= &lex->select_lex;
if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
!lex->create_info.db_type)
{
@@ -2724,22 +2666,24 @@ create:
ER_WARN_USING_OTHER_HANDLER,
ER_THD(thd, ER_WARN_USING_OTHER_HANDLER),
hton_name(lex->create_info.db_type)->str,
- $5->table.str);
+ $6->table.str);
}
create_table_set_open_action_and_adjust_tables(lex);
+ Lex->pop_select(); //main select
}
| create_or_replace opt_temporary SEQUENCE_SYM opt_if_not_exists table_ident
{
LEX *lex= thd->lex;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
lex->create_info.init();
if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_SEQUENCE, $2,
$1 | $4)))
MYSQL_YYABORT;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_UPDATING,
- TL_WRITE,
- MDL_EXCLUSIVE)))
+ if (!lex->first_select_lex()->
+ add_table_to_list(thd, $5, NULL, TL_OPTION_UPDATING,
+ TL_WRITE, MDL_EXCLUSIVE))
MYSQL_YYABORT;
/*
@@ -2762,8 +2706,9 @@ create:
if (unlikely(lex->create_info.seq_create_info->check_and_adjust(1)))
{
my_error(ER_SEQUENCE_INVALID_DATA, MYF(0),
- lex->select_lex.table_list.first->db.str,
- lex->select_lex.table_list.first->table_name.str);
+ lex->first_select_lex()->table_list.first->db.str,
+ lex->first_select_lex()->table_list.first->
+ table_name.str);
MYSQL_YYABORT;
}
@@ -2776,10 +2721,8 @@ create:
Lex->create_info.used_fields|= HA_CREATE_USED_SEQUENCE;
Lex->create_info.sequence= 1;
- lex->current_select= &lex->select_lex;
- if (unlikely((lex->create_info.used_fields &
- HA_CREATE_USED_ENGINE) &&
- !lex->create_info.db_type))
+ if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
+ !lex->create_info.db_type)
{
lex->create_info.use_default_db_type(thd);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -2789,44 +2732,69 @@ create:
$5->table.str);
}
create_table_set_open_action_and_adjust_tables(lex);
+ Lex->pop_select(); //main select
}
- | create_or_replace opt_unique INDEX_SYM opt_if_not_exists ident
+ | create_or_replace opt_unique INDEX_SYM opt_if_not_exists
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ ident
opt_key_algorithm_clause
ON table_ident
{
- if (unlikely(Lex->add_create_index_prepare($8)))
+ if (Lex->add_create_index_prepare($9))
MYSQL_YYABORT;
- if (unlikely(Lex->add_create_index($2, &$5, $6, $1 | $4)))
+ if (Lex->add_create_index($2, &$6, $7, $1 | $4))
MYSQL_YYABORT;
}
'(' key_list ')' opt_lock_wait_timeout normal_key_options
- opt_index_lock_algorithm { }
- | create_or_replace fulltext INDEX_SYM opt_if_not_exists ident
+ opt_index_lock_algorithm
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace fulltext INDEX_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ opt_if_not_exists ident
ON table_ident
{
- if (unlikely(Lex->add_create_index_prepare($7)))
+ if (Lex->add_create_index_prepare($8))
MYSQL_YYABORT;
- if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF,
- $1 | $4)))
+ if (Lex->add_create_index($2, &$6, HA_KEY_ALG_UNDEF, $1 | $5))
MYSQL_YYABORT;
}
'(' key_list ')' opt_lock_wait_timeout fulltext_key_options
- opt_index_lock_algorithm { }
- | create_or_replace spatial INDEX_SYM opt_if_not_exists ident
+ opt_index_lock_algorithm
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace spatial INDEX_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ opt_if_not_exists ident
ON table_ident
{
- if (unlikely(Lex->add_create_index_prepare($7)))
+ if (Lex->add_create_index_prepare($8))
MYSQL_YYABORT;
- if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF,
- $1 | $4)))
+ if (Lex->add_create_index($2, &$6, HA_KEY_ALG_UNDEF, $1 | $5))
MYSQL_YYABORT;
}
'(' key_list ')' opt_lock_wait_timeout spatial_key_options
- opt_index_lock_algorithm { }
+ opt_index_lock_algorithm
+ {
+ Lex->pop_select(); //main select
+ }
| create_or_replace DATABASE opt_if_not_exists ident
{
Lex->create_info.default_table_charset= NULL;
Lex->create_info.used_fields= 0;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
opt_create_database_options
{
@@ -2835,61 +2803,95 @@ create:
$1 | $3)))
MYSQL_YYABORT;
lex->name= $4;
+ Lex->pop_select(); //main select
}
| create_or_replace definer_opt opt_view_suid VIEW_SYM
opt_if_not_exists table_ident
{
- if (unlikely(Lex->add_create_view(thd, $1 | $5,
- DTYPE_ALGORITHM_UNDEFINED, $3,
- $6)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_create_view(thd, $1 | $5,
+ DTYPE_ALGORITHM_UNDEFINED, $3, $6))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- { }
+ {
+ Lex->pop_select(); //main select
+ }
| create_or_replace view_algorithm definer_opt opt_view_suid VIEW_SYM
opt_if_not_exists table_ident
{
if (unlikely(Lex->add_create_view(thd, $1 | $6, $2, $4, $7)))
MYSQL_YYABORT;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
view_list_opt AS view_select
- { }
+ {
+ Lex->pop_select(); //main select
+ }
| create_or_replace definer_opt TRIGGER_SYM
- { Lex->create_info.set($1); }
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ Lex->create_info.set($1);
+ }
trigger_tail
- { }
- | create_or_replace definer_opt PROCEDURE_SYM
- { Lex->create_info.set($1); }
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace definer_opt PROCEDURE_SYM opt_if_not_exists
+ {
+ if (Lex->stmt_create_procedure_start($1 | $4))
+ MYSQL_YYABORT;
+ }
sp_tail
- { }
+ {
+ Lex->stmt_create_routine_finalize();
+ }
| create_or_replace definer_opt EVENT_SYM
- { Lex->create_info.set($1); }
- event_tail
- { }
- | create_or_replace definer FUNCTION_SYM
{
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
Lex->create_info.set($1);
}
- sf_tail_not_aggregate
- { }
- | create_or_replace definer AGGREGATE_SYM FUNCTION_SYM
+ event_tail
{
- Lex->create_info.set($1);
+ Lex->pop_select(); //main select
}
- sf_tail_aggregate
- { }
- | create_or_replace no_definer FUNCTION_SYM
- { Lex->create_info.set($1); }
- create_function_tail
- { }
- | create_or_replace no_definer AGGREGATE_SYM FUNCTION_SYM
+ | create_or_replace definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ sp_name '('
{
- Lex->create_info.set($1);
+ if (Lex->stmt_create_stored_function_start($1 | $5, $3, $6))
+ MYSQL_YYABORT;
}
- create_aggregate_function_tail
- { }
- | create_or_replace USER_SYM opt_if_not_exists clear_privileges grant_list
- opt_require_clause opt_resource_options
+ sp_fdparam_list ')'
+ sf_return_type
+ sf_c_chistics_and_body
+ {
+ Lex->stmt_create_routine_finalize();
+ }
+ | create_or_replace no_definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ sp_name '('
+ {
+ if (Lex->stmt_create_stored_function_start($1 | $5, $3, $6))
+ MYSQL_YYABORT;
+ }
+ sp_fdparam_list ')'
+ sf_return_type
+ sf_c_chistics_and_body
+ {
+ Lex->stmt_create_routine_finalize();
+ }
+ | create_or_replace no_definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ ident RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
+ {
+ if (Lex->stmt_create_udf_function($1 | $5, $3, $6,
+ (Item_result) $8, $10))
+ MYSQL_YYABORT;
+ }
+ | create_or_replace USER_SYM opt_if_not_exists clear_privileges
+ grant_list opt_require_clause opt_resource_options opt_account_locking opt_password_expiration
{
if (unlikely(Lex->set_command_with_check(SQLCOM_CREATE_USER,
$1 | $3)))
@@ -2915,36 +2917,6 @@ create:
{ }
;
-sf_tail_not_aggregate:
- sf_tail
- {
- if (unlikely(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR))
- {
- my_yyabort_error((ER_NOT_AGGREGATE_FUNCTION, MYF(0)));
- }
- Lex->sphead->set_chistics_agg_type(NOT_AGGREGATE);
- }
-
-sf_tail_aggregate:
- sf_tail
- {
- if (unlikely(!(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR)))
- {
- my_yyabort_error((ER_INVALID_AGGREGATE_FUNCTION, MYF(0)));
- }
- Lex->sphead->set_chistics_agg_type(GROUP_AGGREGATE);
- }
-
-create_function_tail:
- sf_tail_not_aggregate { }
- | udf_tail { Lex->udf.type= UDFTYPE_FUNCTION; }
- ;
-
-create_aggregate_function_tail:
- sf_tail_aggregate
- { }
- | udf_tail { Lex->udf.type= UDFTYPE_AGGREGATE; }
- ;
opt_sequence:
/* empty */ { }
| sequence_defs
@@ -3265,20 +3237,17 @@ ev_sql_stmt:
if (unlikely(!lex->make_sp_head(thd,
lex->event_parse_data->identifier,
- &sp_handler_procedure)))
+ &sp_handler_procedure,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
lex->sphead->set_body_start(thd, lip->get_cpp_ptr());
}
sp_proc_stmt
{
- LEX *lex= thd->lex;
-
/* return back to the original memory root ASAP */
- lex->sphead->set_stmt_end(thd);
- lex->sphead->restore_thd_mem_root(thd);
-
- lex->event_parse_data->body_changed= TRUE;
+ if (Lex->sp_body_finalize_event(thd))
+ MYSQL_YYABORT;
}
;
@@ -3290,13 +3259,16 @@ clear_privileges:
lex->columns.empty();
lex->grant= lex->grant_tot_col= 0;
lex->all_privileges= 0;
- lex->select_lex.db= null_clex_str;
- lex->ssl_type= SSL_TYPE_NOT_SPECIFIED;
- lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0;
- bzero((char *)&(lex->mqh),sizeof(lex->mqh));
+ lex->first_select_lex()->db= null_clex_str;
+ lex->account_options.reset();
}
;
+opt_aggregate:
+ /* Empty */ { $$= NOT_AGGREGATE; }
+ | AGGREGATE_SYM { $$= GROUP_AGGREGATE; }
+ ;
+
sp_name:
ident '.' ident
{
@@ -3335,7 +3307,7 @@ sp_chistic:
| MODIFIES_SYM SQL_SYM DATA_SYM
{ Lex->sp_chistics.daccess= SP_MODIFIES_SQL_DATA; }
| sp_suid
- {}
+ { Lex->sp_chistics.suid= $1; }
;
/* Create characteristics */
@@ -3345,14 +3317,8 @@ sp_c_chistic:
;
sp_suid:
- SQL_SYM SECURITY_SYM DEFINER_SYM
- {
- Lex->sp_chistics.suid= SP_IS_SUID;
- }
- | SQL_SYM SECURITY_SYM INVOKER_SYM
- {
- Lex->sp_chistics.suid= SP_IS_NOT_SUID;
- }
+ SQL_SYM SECURITY_SYM DEFINER_SYM { $$= SP_IS_SUID; }
+ | SQL_SYM SECURITY_SYM INVOKER_SYM { $$= SP_IS_NOT_SUID; }
;
call:
@@ -3389,7 +3355,18 @@ sp_cparams:
/* Stored FUNCTION parameter declaration list */
sp_fdparam_list:
/* Empty */
- | sp_fdparams
+ {
+ Lex->sphead->m_param_begin= YYLIP->get_cpp_tok_start();
+ Lex->sphead->m_param_end= Lex->sphead->m_param_begin;
+ }
+ |
+ {
+ Lex->sphead->m_param_begin= YYLIP->get_cpp_tok_start();
+ }
+ sp_fdparams
+ {
+ Lex->sphead->m_param_end= YYLIP->get_cpp_tok_start();
+ }
;
sp_fdparams:
@@ -3463,18 +3440,6 @@ sp_opt_inout:
| INOUT_SYM { $$= sp_variable::MODE_INOUT; }
;
-sp_parenthesized_fdparam_list:
- '('
- {
- Lex->sphead->m_param_begin= YYLIP->get_cpp_tok_start() + 1;
- }
- sp_fdparam_list
- ')'
- {
- Lex->sphead->m_param_end= YYLIP->get_cpp_tok_start();
- }
- ;
-
sp_parenthesized_pdparam_list:
'('
{
@@ -3815,7 +3780,7 @@ raise_stmt_oracle:
signal_stmt:
SIGNAL_SYM signal_value opt_set_signal_information
{
- if (unlikely(Lex->add_signal_statement(thd, $2)))
+ if (Lex->add_signal_statement(thd, $2))
MYSQL_YYABORT;
}
;
@@ -4001,6 +3966,7 @@ statement_information_item:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
+ ;
simple_target_specification:
ident_cli
@@ -4057,6 +4023,7 @@ condition_information_item:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
+ ;
condition_information_item_name:
CLASS_ORIGIN_SYM
@@ -4186,44 +4153,8 @@ sp_proc_stmt_statement:
}
statement
{
- LEX *lex= thd->lex;
- Lex_input_stream *lip= YYLIP;
- sp_head *sp= lex->sphead;
-
- sp->m_flags|= sp_get_flags_for_command(lex);
- /* "USE db" doesn't work in a procedure */
- if (unlikely(lex->sql_command == SQLCOM_CHANGE_DB))
- my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "USE"));
- /*
- Don't add an instruction for SET statements, since all
- instructions for them were already added during processing
- of "set" rule.
- */
- DBUG_ASSERT(lex->sql_command != SQLCOM_SET_OPTION ||
- lex->var_list.is_empty());
- if (lex->sql_command != SQLCOM_SET_OPTION)
- {
- sp_instr_stmt *i=new (thd->mem_root)
- sp_instr_stmt(sp->instructions(), lex->spcont, lex);
- if (unlikely(i == NULL))
- MYSQL_YYABORT;
-
- /*
- Extract the query statement from the tokenizer. The
- end is either lex->ptr, if there was no lookahead,
- lex->tok_end otherwise.
- */
- if (yychar == YYEMPTY)
- i->m_query.length= lip->get_ptr() - sp->m_tmp_query;
- else
- i->m_query.length= lip->get_tok_start() - sp->m_tmp_query;;
- if (unlikely(!(i->m_query.str= strmake_root(thd->mem_root,
- sp->m_tmp_query,
- i->m_query.length))) ||
- unlikely(sp->add_instr(i)))
- MYSQL_YYABORT;
- }
- if (unlikely(sp->restore_lex(thd)))
+ if (Lex->sp_proc_stmt_statement_finalize(thd, yychar == YYEMPTY) ||
+ Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
;
@@ -4357,7 +4288,7 @@ assignment_source_expr:
$$->sp_lex_in_use= true;
$$->set_item_and_free_list($3, thd->free_list);
thd->free_list= NULL;
- if (unlikely($$->sphead->restore_lex(thd)))
+ if ($$->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
;
@@ -4366,6 +4297,7 @@ for_loop_bound_expr:
assignment_source_lex
{
Lex->sphead->reset_lex(thd, $1);
+ Lex->current_select->parsing_place= FOR_LOOP_BOUND;
}
expr
{
@@ -4375,6 +4307,7 @@ for_loop_bound_expr:
$$->set_item_and_free_list($3, NULL);
if (unlikely($$->sphead->restore_lex(thd)))
MYSQL_YYABORT;
+ Lex->current_select->parsing_place= NO_MATTER;
}
;
@@ -4427,15 +4360,8 @@ sp_proc_stmt_fetch:
sp_proc_stmt_fetch_head sp_fetch_list { }
| FETCH_SYM GROUP_SYM NEXT_SYM ROW_SYM
{
- LEX *lex= Lex;
- sp_head *sp= lex->sphead;
- lex->sphead->m_flags|= sp_head::HAS_AGGREGATE_INSTR;
- sp_instr_agg_cfetch *i=
- new (thd->mem_root) sp_instr_agg_cfetch(sp->instructions(),
- lex->spcont);
- if (unlikely(i == NULL) ||
- unlikely(sp->add_instr(i)))
- MYSQL_YYABORT;
+ if (unlikely(Lex->sp_add_agg_cfetch()))
+ MYSQL_YYABORT;
}
;
@@ -4608,7 +4534,8 @@ case_stmt_body:
{
if (unlikely(Lex->case_stmt_action_expr($2)))
MYSQL_YYABORT;
- if (unlikely(Lex->sphead->restore_lex(thd)))
+
+ if (Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
simple_when_clause_list
@@ -4807,7 +4734,7 @@ while_body:
LEX *lex= Lex;
if (unlikely(lex->sp_while_loop_expression(thd, $1)))
MYSQL_YYABORT;
- if (unlikely(lex->sphead->restore_lex(thd)))
+ if (lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
sp_proc_stmts1 END WHILE_SYM
@@ -4830,7 +4757,7 @@ repeat_body:
if (unlikely(i == NULL) ||
unlikely(lex->sphead->add_instr(i)))
MYSQL_YYABORT;
- if (unlikely(lex->sphead->restore_lex(thd)))
+ if (lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
/* We can shortcut the cont_backpatch here */
i->m_cont_dest= ip+1;
@@ -5310,26 +5237,16 @@ size_number:
*/
create_body:
- '(' create_field_list ')'
+ create_field_list_parens
{ Lex->create_info.option_list= NULL; }
opt_create_table_options opt_create_partitioning opt_create_select {}
| opt_create_table_options opt_create_partitioning opt_create_select {}
- /*
- the following rule is redundant, but there's a shift/reduce
- conflict that prevents the rule above from parsing a syntax like
- CREATE TABLE t1 (SELECT 1);
- */
- | '(' create_select_query_specification ')'
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_list {}
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_order_or_limit {}
| create_like
{
Lex->create_info.add(DDL_options_st::OPT_LIKE);
- TABLE_LIST *src_table= Lex->select_lex.add_table_to_list(thd,
- $1, NULL, 0, TL_READ, MDL_SHARED_READ);
+ TABLE_LIST *src_table= Lex->first_select_lex()->
+ add_table_to_list(thd, $1, NULL, 0, TL_READ, MDL_SHARED_READ);
if (unlikely(! src_table))
MYSQL_YYABORT;
/* CREATE TABLE ... LIKE is not allowed for views. */
@@ -5339,7 +5256,7 @@ create_body:
create_like:
LIKE table_ident { $$= $2; }
- | '(' LIKE table_ident ')' { $$= $3; }
+ | LEFT_PAREN_LIKE LIKE table_ident ')' { $$= $3; }
;
opt_create_select:
@@ -5348,23 +5265,19 @@ opt_create_select:
;
create_select_query_expression:
- opt_with_clause SELECT_SYM create_select_part2 opt_table_expression
- create_select_part4
- {
- Select->set_braces(0);
- Select->set_with_clause($1);
+ query_expression
+ {
+ if (Lex->parsed_insert_select($1->first_select()))
+ MYSQL_YYABORT;
}
- union_clause
- | opt_with_clause SELECT_SYM create_select_part2
- create_select_part3_union_not_ready create_select_part4
+ | LEFT_PAREN_WITH with_clause query_expression_body ')'
{
- Select->set_with_clause($1);
+ SELECT_LEX *first_select= $3->first_select();
+ $3->set_with_clause($2);
+ $2->attach_to(first_select);
+ if (Lex->parsed_insert_select(first_select))
+ MYSQL_YYABORT;
}
- | '(' create_select_query_specification ')'
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_list {}
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_order_or_limit {}
;
opt_create_partitioning:
@@ -5447,13 +5360,17 @@ partition_entry:
thd->parse_error(ER_PARTITION_ENTRY_ERROR);
MYSQL_YYABORT;
}
- DBUG_ASSERT(Lex->part_info->table);
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
/*
We enter here when opening the frm file to translate
partition info string into part_info data structure.
*/
}
- partition {}
+ partition
+ {
+ Lex->pop_select(); //main select
+ }
;
partition:
@@ -6064,7 +5981,7 @@ opt_versioning_rotation:
| INTERVAL_SYM expr interval opt_versioning_interval_start
{
partition_info *part_info= Lex->part_info;
- if (unlikely(part_info->vers_set_interval($2, $3, $4)))
+ if (unlikely(part_info->vers_set_interval(thd, $2, $3, $4)))
{
my_error(ER_PART_WRONG_VALUE, MYF(0),
Lex->create_last_non_select_table->table_name.str,
@@ -6107,56 +6024,6 @@ opt_versioning_interval_start:
End of partition parser part
*/
-create_select_query_specification:
- opt_with_clause SELECT_SYM create_select_part2 create_select_part3
- create_select_part4
- {
- Select->set_with_clause($1);
- }
- ;
-
-create_select_part2:
- {
- LEX *lex=Lex;
- if (lex->sql_command == SQLCOM_INSERT)
- lex->sql_command= SQLCOM_INSERT_SELECT;
- else if (lex->sql_command == SQLCOM_REPLACE)
- lex->sql_command= SQLCOM_REPLACE_SELECT;
- /*
- The following work only with the local list, the global list
- is created correctly in this case
- */
- lex->current_select->table_list.save_and_clear(&lex->save_list);
- mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LIST;
- }
- select_options select_item_list
- {
- Select->parsing_place= NO_MATTER;
- }
- ;
-
-create_select_part3:
- opt_table_expression
- | create_select_part3_union_not_ready
- ;
-
-create_select_part3_union_not_ready:
- table_expression order_or_limit
- | order_or_limit
- ;
-
-create_select_part4:
- opt_select_lock_type
- {
- /*
- The following work only with the local list, the global list
- is created correctly in this case
- */
- Lex->current_select->table_list.push_front(&Lex->save_list);
- }
- ;
-
opt_as:
/* empty */ {}
| AS {}
@@ -6374,7 +6241,7 @@ create_table_option:
}
| UNION_SYM opt_equal
{
- Lex->select_lex.table_list.save_and_clear(&Lex->save_list);
+ Lex->first_select_lex()->table_list.save_and_clear(&Lex->save_list);
}
'(' opt_table_list ')'
{
@@ -6383,8 +6250,8 @@ create_table_option:
from the global list.
*/
LEX *lex=Lex;
- lex->create_info.merge_list= lex->select_lex.table_list.first;
- lex->select_lex.table_list= lex->save_list;
+ lex->create_info.merge_list= lex->first_select_lex()->table_list.first;
+ lex->first_select_lex()->table_list= lex->save_list;
/*
When excluding union list from the global list we assume that
elements of the former immediately follow elements which represent
@@ -6585,6 +6452,13 @@ create_field_list:
}
;
+create_field_list_parens:
+ LEFT_PAREN_ALT field_list ')'
+ {
+ Lex->create_last_non_select_table= Lex->last_table();
+ }
+ ;
+
field_list:
field_list_item
| field_list ',' field_list_item
@@ -6595,6 +6469,7 @@ field_list_item:
| key_def
| constraint_def
| period_for_system_time
+ | PERIOD_SYM period_for_application_time { }
;
column_def:
@@ -6691,7 +6566,7 @@ key_def:
constraint_def:
opt_constraint check_constraint
{
- Lex->add_constraint(&$1, $2, FALSE);
+ Lex->add_constraint($1, $2, FALSE);
}
;
@@ -6700,7 +6575,15 @@ period_for_system_time:
PERIOD_SYM FOR_SYSTEM_TIME_SYM '(' ident ',' ident ')'
{
Vers_parse_info &info= Lex->vers_get_info();
- info.set_system_time($4, $6);
+ info.set_period($4, $6);
+ }
+ ;
+
+period_for_application_time:
+ FOR_SYM ident '(' ident ',' ident ')'
+ {
+ if (Lex->add_period($2, $4, $6))
+ MYSQL_YYABORT;
}
;
@@ -6884,6 +6767,8 @@ parse_vcol_expr:
Prevent the end user from invoking this command.
*/
MYSQL_YYABORT_UNLESS(Lex->parse_vcol_expr);
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
expr
{
@@ -6891,14 +6776,15 @@ parse_vcol_expr:
if (unlikely(!v))
MYSQL_YYABORT;
Lex->last_field->vcol_info= v;
+ Lex->pop_select(); //main select
}
;
parenthesized_expr:
- subselect
+ remember_tok_start
+ query_expression
{
- $$= new (thd->mem_root) Item_singlerow_subselect(thd, $1);
- if (unlikely($$ == NULL))
+ if (!($$= Lex->create_item_query_expression(thd, $1, $2)))
MYSQL_YYABORT;
}
| expr
@@ -7145,7 +7031,7 @@ field_type_lob:
| JSON_SYM
{
Lex->charset= &my_charset_utf8mb4_bin;
- $$.set(&type_handler_long_blob);
+ $$.set(&type_handler_json_longtext);
}
;
@@ -7246,10 +7132,12 @@ field_length:
opt_field_length:
/* empty */ { $$= (char*) 0; /* use default length */ }
| field_length { $$= $1; }
+ ;
opt_field_length_default_1:
/* empty */ { $$= (char*) "1"; }
| field_length { $$= $1; }
+ ;
opt_precision:
/* empty */ { $$.set(0, 0); }
@@ -7710,12 +7598,14 @@ fulltext_key_opts:
opt_USING_key_algorithm:
/* Empty*/ { $$= HA_KEY_ALG_UNDEF; }
| USING btree_or_rtree { $$= $2; }
+ ;
/* TYPE is a valid identifier, so it's handled differently than USING */
opt_key_algorithm_clause:
/* Empty*/ { $$= HA_KEY_ALG_UNDEF; }
| USING btree_or_rtree { $$= $2; }
| TYPE_SYM btree_or_rtree { $$= $2; }
+ ;
key_using_alg:
USING btree_or_rtree
@@ -7838,23 +7728,25 @@ alter:
Lex->name= null_clex_str;
Lex->table_type= TABLE_TYPE_UNKNOWN;
Lex->sql_command= SQLCOM_ALTER_TABLE;
- Lex->duplicates= DUP_ERROR;
- Lex->select_lex.init_order();
+ Lex->duplicates= DUP_ERROR;
+ Lex->first_select_lex()->order_list.empty();
Lex->create_info.init();
Lex->create_info.row_type= ROW_TYPE_NOT_USED;
Lex->alter_info.reset();
Lex->no_write_to_binlog= 0;
Lex->create_info.storage_media= HA_SM_DEFAULT;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
DBUG_ASSERT(!Lex->m_sql_cmd);
}
alter_options TABLE_SYM table_ident opt_lock_wait_timeout
{
- if (unlikely(!Lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_UPDATING,
- TL_READ_NO_INSERT,
- MDL_SHARED_UPGRADABLE)))
+ if (!Lex->first_select_lex()->
+ add_table_to_list(thd, $5, NULL, TL_OPTION_UPDATING,
+ TL_READ_NO_INSERT, MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
- Lex->select_lex.db= (Lex->select_lex.table_list.first)->db;
+ Lex->first_select_lex()->db=
+ (Lex->first_select_lex()->table_list.first)->db;
Lex->create_last_non_select_table= Lex->last_table();
}
alter_commands
@@ -7866,11 +7758,14 @@ alter:
if (unlikely(Lex->m_sql_cmd == NULL))
MYSQL_YYABORT;
}
+ Lex->pop_select(); //main select
}
| ALTER DATABASE ident_or_empty
{
Lex->create_info.default_table_charset= NULL;
Lex->create_info.used_fields= 0;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
create_database_options
{
@@ -7880,6 +7775,7 @@ alter:
if (lex->name.str == NULL &&
unlikely(lex->copy_db_to(&lex->name)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
}
| ALTER DATABASE ident UPGRADE_SYM DATA_SYM DIRECTORY_SYM NAME_SYM
{
@@ -7895,6 +7791,8 @@ alter:
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"));
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
lex->sp_chistics.init();
}
sp_a_chistics
@@ -7903,6 +7801,9 @@ alter:
lex->sql_command= SQLCOM_ALTER_PROCEDURE;
lex->spname= $3;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
| ALTER FUNCTION_SYM sp_name
{
@@ -7910,6 +7811,8 @@ alter:
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"));
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
lex->sp_chistics.init();
}
sp_a_chistics
@@ -7918,14 +7821,23 @@ alter:
lex->sql_command= SQLCOM_ALTER_FUNCTION;
lex->spname= $3;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
| ALTER view_algorithm definer_opt opt_view_suid VIEW_SYM table_ident
{
- if (unlikely(Lex->add_alter_view(thd, $2, $4, $6)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_alter_view(thd, $2, $4, $6))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
| ALTER definer_opt opt_view_suid VIEW_SYM table_ident
/*
We have two separate rules for ALTER VIEW rather that
@@ -7933,14 +7845,22 @@ alter:
with the ALTER EVENT below.
*/
{
- if (unlikely(Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
| ALTER definer_opt remember_name EVENT_SYM sp_name
{
- /*
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ /*
It is safe to use Lex->spname because
ALTER EVENT xxx RENATE TO yyy DO ALTER EVENT RENAME TO
is not allowed. Lex->spname is used in the case of RENAME TO
@@ -7972,6 +7892,8 @@ alter:
*/
Lex->sql_command= SQLCOM_ALTER_EVENT;
Lex->stmt_definition_end= (char*)YYLIP->get_cpp_ptr();
+
+ Lex->pop_select(); //main select
}
| ALTER TABLESPACE alter_tablespace_info
{
@@ -8001,7 +7923,7 @@ alter:
} OPTIONS_SYM '(' server_options_list ')' { }
/* ALTER USER foo is allowed for MySQL compatibility. */
| ALTER opt_if_exists USER_SYM clear_privileges grant_list
- opt_require_clause opt_resource_options
+ opt_require_clause opt_resource_options opt_account_locking opt_password_expiration
{
Lex->create_info.set($2);
Lex->sql_command= SQLCOM_ALTER_USER;
@@ -8015,16 +7937,17 @@ alter:
lex->create_info.init();
lex->no_write_to_binlog= 0;
DBUG_ASSERT(!lex->m_sql_cmd);
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
table_ident
{
LEX *lex= Lex;
- if (unlikely(!(lex->create_info.seq_create_info=
- new (thd->mem_root) sequence_definition())) ||
- unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_SEQUENCE,
- TL_WRITE,
- MDL_EXCLUSIVE)))
+ if (!(lex->create_info.seq_create_info= new (thd->mem_root)
+ sequence_definition()) ||
+ !lex->first_select_lex()->
+ add_table_to_list(thd, $5, NULL, TL_OPTION_SEQUENCE,
+ TL_WRITE, MDL_EXCLUSIVE))
MYSQL_YYABORT;
}
sequence_defs
@@ -8033,6 +7956,42 @@ alter:
Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_sequence($3);
if (unlikely(Lex->m_sql_cmd == NULL))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
+ ;
+
+opt_account_locking:
+ /* Nothing */ {}
+ | ACCOUNT_SYM LOCK_SYM
+ {
+ Lex->account_options.account_locked= ACCOUNTLOCK_LOCKED;
+ }
+ | ACCOUNT_SYM UNLOCK_SYM
+ {
+ Lex->account_options.account_locked= ACCOUNTLOCK_UNLOCKED;
+ }
+ ;
+opt_password_expiration:
+ /* Nothing */ {}
+ | PASSWORD_SYM EXPIRE_SYM
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_NOW;
+ }
+ | PASSWORD_SYM EXPIRE_SYM NEVER_SYM
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_NEVER;
+ }
+ | PASSWORD_SYM EXPIRE_SYM DEFAULT
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_DEFAULT;
+ }
+ | PASSWORD_SYM EXPIRE_SYM INTERVAL_SYM NUM DAY_SYM
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_INTERVAL;
+ if (!(Lex->account_options.num_expiration_days= atoi($4.str)))
+ my_yyabort_error((ER_WRONG_VALUE, MYF(0), "DAY", $4.str));
}
;
@@ -8181,22 +8140,7 @@ alter_commands:
| EXCHANGE_SYM PARTITION_SYM alt_part_name_item
WITH TABLE_SYM table_ident have_partitioning
{
- LEX *lex= thd->lex;
- lex->select_lex.db= $6->db;
- if (lex->select_lex.db.str == NULL &&
- unlikely(lex->copy_db_to(&lex->select_lex.db)))
- MYSQL_YYABORT;
- lex->name= $6->table;
- lex->alter_info.partition_flags|= ALTER_PARTITION_EXCHANGE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $6, NULL,
- TL_OPTION_UPDATING,
- TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE)))
- MYSQL_YYABORT;
- DBUG_ASSERT(!lex->m_sql_cmd);
- lex->m_sql_cmd= new (thd->mem_root)
- Sql_cmd_alter_table_exchange_partition();
- if (unlikely(lex->m_sql_cmd == NULL))
+ if (Lex->stmt_alter_table_exchange_partition($6))
MYSQL_YYABORT;
}
;
@@ -8320,6 +8264,13 @@ alter_list_item:
{
Lex->alter_info.flags|= ALTER_ADD_PERIOD;
}
+ | ADD
+ PERIOD_SYM opt_if_not_exists_table_element period_for_application_time
+ {
+ Table_period_info &period= Lex->create_info.period_info;
+ period.create_if_not_exists= Lex->check_exists;
+ Lex->alter_info.flags|= ALTER_ADD_CHECK_CONSTRAINT;
+ }
| add_column '(' create_field_list ')'
{
LEX *lex=Lex;
@@ -8334,7 +8285,7 @@ alter_list_item:
| ADD CONSTRAINT IF_SYM not EXISTS field_ident check_constraint
{
Lex->alter_info.flags|= ALTER_ADD_CHECK_CONSTRAINT;
- Lex->add_constraint(&$6, $7, TRUE);
+ Lex->add_constraint($6, $7, TRUE);
}
| CHANGE opt_column opt_if_exists_table_element field_ident
field_spec opt_place
@@ -8430,9 +8381,9 @@ alter_list_item:
| RENAME opt_to table_ident
{
LEX *lex=Lex;
- lex->select_lex.db= $3->db;
- if (lex->select_lex.db.str == NULL &&
- unlikely(lex->copy_db_to(&lex->select_lex.db)))
+ lex->first_select_lex()->db= $3->db;
+ if (lex->first_select_lex()->db.str == NULL &&
+ lex->copy_db_to(&lex->first_select_lex()->db))
MYSQL_YYABORT;
if (unlikely(check_table_name($3->table.str,$3->table.length,
FALSE)) ||
@@ -8453,7 +8404,7 @@ alter_list_item:
$5->name, $4->csname));
if (unlikely(Lex->create_info.add_alter_list_item_convert_to_charset($5)))
MYSQL_YYABORT;
- Lex->alter_info.flags|= ALTER_OPTIONS;
+ Lex->alter_info.flags|= ALTER_CONVERT_TO;
}
| create_table_options_space_separated
{
@@ -8489,6 +8440,14 @@ alter_list_item:
{
Lex->alter_info.flags|= ALTER_DROP_PERIOD;
}
+ | DROP PERIOD_SYM opt_if_exists_table_element FOR_SYM ident
+ {
+ Alter_drop *ad= new Alter_drop(Alter_drop::PERIOD, $5.str, $3);
+ if (unlikely(ad == NULL))
+ MYSQL_YYABORT;
+ Lex->alter_info.drop_list.push_back(ad, thd->mem_root);
+ Lex->alter_info.flags|= ALTER_DROP_CHECK_CONSTRAINT;
+ }
;
opt_index_lock_algorithm:
@@ -8497,6 +8456,7 @@ opt_index_lock_algorithm:
| alter_algorithm_option
| alter_lock_option alter_algorithm_option
| alter_algorithm_option alter_lock_option
+ ;
alter_algorithm_option:
ALGORITHM_SYM opt_equal DEFAULT
@@ -8555,7 +8515,7 @@ alter_option:
Lex->alter_info.requested_lock=
Alter_info::ALTER_TABLE_LOCK_NONE;
}
-
+ ;
opt_restrict:
/* empty */ { Lex->drop_mode= DROP_DEFAULT; }
@@ -8822,6 +8782,7 @@ persistent_stat_spec:
{}
| COLUMNS persistent_column_stat_spec INDEXES persistent_index_stat_spec
{}
+ ;
persistent_column_stat_spec:
ALL {}
@@ -9136,8 +9097,8 @@ adm_partition:
cache_keys_spec:
{
- Lex->select_lex.alloc_index_hints(thd);
- Select->set_index_hint_type(INDEX_HINT_USE,
+ Lex->first_select_lex()->alloc_index_hints(thd);
+ Select->set_index_hint_type(INDEX_HINT_USE,
INDEX_HINT_MASK_ALL);
}
cache_key_list_or_empty
@@ -9158,217 +9119,217 @@ opt_ignore_leaves:
Select : retrieve data from table
*/
-
select:
- opt_with_clause select_init
+ query_expression_body
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->current_select->set_with_clause($1);
- }
- ;
-
-select_init:
- SELECT_SYM select_options_and_item_list select_init3
- | table_value_constructor
- | table_value_constructor union_list
- | table_value_constructor union_order_or_limit
- | '(' select_paren ')'
- | '(' select_paren ')' union_list
- | '(' select_paren ')' union_order_or_limit
- ;
-
-union_list_part2:
- SELECT_SYM select_options_and_item_list select_init3_union_query_term
- | table_value_constructor
- | table_value_constructor union_list
- | table_value_constructor union_order_or_limit
- | '(' select_paren_union_query_term ')'
- | '(' select_paren_union_query_term ')' union_list
- | '(' select_paren_union_query_term ')' union_order_or_limit
- ;
-
-select_paren:
- {
- Lex->current_select->set_braces(true);
+ if (Lex->push_select($1->fake_select_lex ?
+ $1->fake_select_lex :
+ $1->first_select()))
+ MYSQL_YYABORT;
}
- table_value_constructor
+ opt_procedure_or_into
{
- DBUG_ASSERT(Lex->current_select->braces);
+ Lex->pop_select();
+ if ($1->set_lock_to_the_last_select($3))
+ MYSQL_YYABORT;
+ if (Lex->select_finalize($1))
+ MYSQL_YYABORT;
}
- |
+ | with_clause query_expression_body
{
- /*
- In order to correctly parse UNION's global ORDER BY we need to
- set braces before parsing the clause.
- */
- Lex->current_select->set_braces(true);
+ if (Lex->push_select($2->fake_select_lex ?
+ $2->fake_select_lex :
+ $2->first_select()))
+ MYSQL_YYABORT;
}
- SELECT_SYM select_options_and_item_list select_part3
- opt_select_lock_type
+ opt_procedure_or_into
{
- DBUG_ASSERT(Lex->current_select->braces);
+ Lex->pop_select();
+ $2->set_with_clause($1);
+ $1->attach_to($2->first_select());
+ if ($2->set_lock_to_the_last_select($4))
+ MYSQL_YYABORT;
+ if (Lex->select_finalize($2))
+ MYSQL_YYABORT;
}
- | '(' select_paren ')'
;
-select_paren_union_query_term:
+
+select_into:
+ select_into_query_specification
{
- /*
- In order to correctly parse UNION's global ORDER BY we need to
- set braces before parsing the clause.
- */
- Lex->current_select->set_braces(true);
+ if (Lex->push_select($1))
+ MYSQL_YYABORT;
}
- SELECT_SYM select_options_and_item_list select_part3_union_query_term
- opt_select_lock_type
+ opt_order_limit_lock
{
- DBUG_ASSERT(Lex->current_select->braces);
- }
- | '(' select_paren_union_query_term ')'
+ st_select_lex_unit *unit;
+ if (!(unit= Lex->parsed_body_select($1, $3)))
+ MYSQL_YYABORT;
+ if (Lex->select_finalize(unit))
+ MYSQL_YYABORT;
+ }
+ ;
+
+
+simple_table:
+ query_specification { $$= $1; }
+ | table_value_constructor { $$= $1; }
;
-select_paren_view:
+table_value_constructor:
+ VALUES
+ {
+ if (Lex->parsed_TVC_start())
+ MYSQL_YYABORT;
+ }
+ values_list
+ {
+ if (!($$= Lex->parsed_TVC_end()))
+ MYSQL_YYABORT;
+ }
+ ;
+
+query_specification_start:
+ SELECT_SYM
{
- /*
- In order to correctly parse UNION's global ORDER BY we need to
- set braces before parsing the clause.
- */
- Lex->current_select->set_braces(true);
+ SELECT_LEX *sel;
+ LEX *lex= Lex;
+ if (!(sel= lex->alloc_select(TRUE)) ||
+ lex->push_select(sel))
+ MYSQL_YYABORT;
+ sel->init_select();
+ sel->braces= FALSE;
}
- SELECT_SYM select_options_and_item_list select_part3_view
- opt_select_lock_type
+ select_options
{
- DBUG_ASSERT(Lex->current_select->braces);
+ Select->parsing_place= SELECT_LIST;
}
- | '(' select_paren_view ')'
- ;
+ select_item_list
+ {
+ Select->parsing_place= NO_MATTER;
+ }
+ ;
-/* The equivalent of select_paren for nested queries. */
-select_paren_derived:
+query_specification:
+ query_specification_start
+ opt_from_clause
+ opt_where_clause
+ opt_group_clause
+ opt_having_clause
+ opt_window_clause
{
- Lex->current_select->set_braces(true);
+ $$= Lex->pop_select();
}
- table_value_constructor
+ ;
+
+select_into_query_specification:
+ query_specification_start
+ into
+ opt_from_clause
+ opt_where_clause
+ opt_group_clause
+ opt_having_clause
+ opt_window_clause
{
- DBUG_ASSERT(Lex->current_select->braces);
- $$= Lex->current_select->master_unit()->first_select();
+ $$= Lex->pop_select();
}
- |
+ ;
+
+opt_from_clause:
+ /* Empty */
+ | from_clause
+ ;
+
+
+query_primary:
+ simple_table
+ { $$= $1; }
+ | query_primary_parens
+ { $$= $1; }
+ ;
+
+query_primary_parens:
+ '(' query_expression_unit
{
- Lex->current_select->set_braces(true);
+ if (Lex->parsed_unit_in_brackets($2))
+ MYSQL_YYABORT;
}
- SELECT_SYM select_part2_derived
- opt_table_expression
- opt_order_clause
- opt_limit_clause
- opt_select_lock_type
+ query_expression_tail ')'
{
- DBUG_ASSERT(Lex->current_select->braces);
- $$= Lex->current_select->master_unit()->first_select();
+ $$= Lex->parsed_unit_in_brackets_tail($2, $4);
}
- | '(' select_paren_derived ')' { $$= $2; }
- ;
-
-select_init3:
- opt_table_expression
- opt_select_lock_type
+ | '(' query_primary
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ Lex->push_select($2);
}
- union_clause
- | select_part3_union_not_ready
- opt_select_lock_type
+ query_expression_tail ')'
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_select_in_brackets($2, $4)))
+ YYABORT;
}
;
-
-select_init3_union_query_term:
- opt_table_expression
- opt_select_lock_type
+query_expression_unit:
+ query_primary
+ unit_type_decl
+ query_primary
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_select_expr_start($1, $3, $2.unit_type,
+ $2.distinct)))
+ YYABORT;
}
- union_clause
- | select_part3_union_not_ready_noproc
- opt_select_lock_type
+ | query_expression_unit
+ unit_type_decl
+ query_primary
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_select_expr_cont($1, $3, $2.unit_type,
+ $2.distinct, FALSE)))
+ YYABORT;
}
;
-
-select_init3_view:
- opt_table_expression opt_select_lock_type
+query_expression_body:
+ query_primary
{
- Lex->current_select->set_braces(false);
+ Lex->push_select($1);
}
- | opt_table_expression opt_select_lock_type
+ query_expression_tail
{
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_body_select($1, $3)))
+ MYSQL_YYABORT;
}
- union_list_view
- | order_or_limit opt_select_lock_type
+ | query_expression_unit
{
- Lex->current_select->set_braces(false);
+ if (Lex->parsed_body_unit($1))
+ MYSQL_YYABORT;
}
- | table_expression order_or_limit opt_select_lock_type
+ query_expression_tail
{
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_body_unit_tail($1, $3)))
+ MYSQL_YYABORT;
}
;
-/*
- The SELECT parts after select_item_list that cannot be followed by UNION.
-*/
-
-select_part3:
- opt_table_expression
- | select_part3_union_not_ready
- ;
-
-select_part3_union_query_term:
- opt_table_expression
- | select_part3_union_not_ready_noproc
- ;
-
-select_part3_view:
- opt_table_expression
- | order_or_limit
- | table_expression order_or_limit
- ;
-
-select_part3_union_not_ready:
- select_part3_union_not_ready_noproc
- | table_expression procedure_clause
- | table_expression order_or_limit procedure_clause
- ;
-
-select_part3_union_not_ready_noproc:
- order_or_limit
- | into opt_table_expression opt_order_clause opt_limit_clause
- | table_expression into
- | table_expression order_or_limit
- | table_expression order_or_limit into
- ;
-
-select_options_and_item_list:
+query_expression:
+ opt_with_clause
+ query_expression_body
{
- LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- if (sel->linkage != UNION_TYPE)
- mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LIST;
+ if ($1)
+ {
+ $2->set_with_clause($1);
+ $1->attach_to($2->first_select());
+ }
+ $$= $2;
}
- select_options select_item_list
+ ;
+
+subselect:
+ remember_tok_start
+ query_expression
{
- Select->parsing_place= NO_MATTER;
+ if (!($$= Lex->parsed_subselect($2, $1)))
+ YYABORT;
}
;
@@ -9376,18 +9337,6 @@ select_options_and_item_list:
/**
<table expression>, as in the SQL standard.
*/
-table_expression:
- from_clause
- opt_where_clause
- opt_group_clause
- opt_having_clause
- opt_window_clause
- ;
-
-opt_table_expression:
- /* Empty */
- | table_expression
- ;
from_clause:
FROM table_reference_list
@@ -9436,8 +9385,9 @@ history_point:
TIMESTAMP TEXT_STRING
{
Item *item;
- if (!(item= create_temporal_literal(thd, $2.str, $2.length, YYCSCL,
- MYSQL_TYPE_DATETIME, true)))
+ if (!(item= type_handler_datetime.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true)))
MYSQL_YYABORT;
$$= Vers_history_point(VERS_TIMESTAMP, item);
}
@@ -9451,6 +9401,33 @@ history_point:
}
;
+for_portion_of_time_clause:
+ FOR_SYM PORTION_SYM OF_SYM remember_tok_start ident FROM
+ bit_expr TO_SYM bit_expr
+ {
+ if (unlikely(0 == strcasecmp($5.str, "SYSTEM_TIME")))
+ {
+ thd->parse_error(ER_SYNTAX_ERROR, $4);
+ MYSQL_YYABORT;
+ }
+ Lex->period_conditions.init(SYSTEM_TIME_FROM_TO,
+ Vers_history_point(VERS_TIMESTAMP, $7),
+ Vers_history_point(VERS_TIMESTAMP, $9),
+ $5);
+ }
+ ;
+
+opt_for_portion_of_time_clause:
+ /* empty */
+ {
+ $$= false;
+ }
+ | for_portion_of_time_clause
+ {
+ $$= true;
+ }
+ ;
+
opt_for_system_time_clause:
/* empty */
{
@@ -9490,59 +9467,68 @@ select_option:
query_expression_option
| SQL_NO_CACHE_SYM
{
- /*
- Allow this flag only on the first top-level SELECT statement, if
- SQL_CACHE wasn't specified, and only once per query.
- */
- if (unlikely(Lex->current_select != &Lex->select_lex))
- my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_NO_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE))
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_CACHE", "SQL_NO_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE))
+ /*
+ Allow this flag once per query.
+ */
+ if (Select->options & OPTION_NO_QUERY_CACHE)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_NO_CACHE"));
-
- Lex->safe_to_cache_query=0;
- Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE;
- Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE;
+ Select->options|= OPTION_NO_QUERY_CACHE;
}
| SQL_CACHE_SYM
{
- /*
- Allow this flag only on the first top-level SELECT statement, if
- SQL_NO_CACHE wasn't specified, and only once per query.
- */
- if (unlikely(Lex->current_select != &Lex->select_lex))
- my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE))
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_NO_CACHE", "SQL_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE))
+ /*
+ Allow this flag once per query.
+ */
+ if (Select->options & OPTION_TO_QUERY_CACHE)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_CACHE"));
-
- Lex->safe_to_cache_query=1;
- Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
- Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE;
+ Select->options|= OPTION_TO_QUERY_CACHE;
}
;
-opt_select_lock_type:
- /* empty */
- | FOR_SYM UPDATE_SYM opt_lock_wait_timeout
+
+select_lock_type:
+ FOR_SYM UPDATE_SYM opt_lock_wait_timeout_new
{
- LEX *lex=Lex;
- lex->current_select->lock_type= TL_WRITE;
- lex->current_select->set_lock_for_tables(TL_WRITE);
- lex->safe_to_cache_query=0;
+ $$= $3;
+ $$.defined_lock= TRUE;
+ $$.update_lock= TRUE;
}
- | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM opt_lock_wait_timeout
+ | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM opt_lock_wait_timeout_new
{
- LEX *lex=Lex;
- lex->current_select->lock_type= TL_READ_WITH_SHARED_LOCKS;
- lex->current_select->
- set_lock_for_tables(TL_READ_WITH_SHARED_LOCKS);
- lex->safe_to_cache_query=0;
+ $$= $5;
+ $$.defined_lock= TRUE;
+ $$.update_lock= FALSE;
}
;
+opt_select_lock_type:
+ /* empty */
+ {
+ $$.empty();
+ }
+ | select_lock_type
+ {
+ $$= $1;
+ }
+ ;
+
+opt_lock_wait_timeout_new:
+ /* empty */
+ {
+ $$.empty();
+ }
+ | WAIT_SYM ulong_num
+ {
+ $$.defined_timeout= TRUE;
+ $$.timeout= $2;
+ }
+ | NOWAIT_SYM
+ {
+ $$.defined_timeout= TRUE;
+ $$.timeout= 0;
+ }
+ ;
+
select_item_list:
select_item_list ',' select_item
| select_item
@@ -9621,13 +9607,13 @@ select_alias:
opt_default_time_precision:
/* empty */ { $$= NOT_FIXED_DEC; }
| '(' ')' { $$= NOT_FIXED_DEC; }
- | '(' real_ulong_num ')' { $$= $2; };
+ | '(' real_ulong_num ')' { $$= $2; }
;
opt_time_precision:
/* empty */ { $$= 0; }
| '(' ')' { $$= 0; }
- | '(' real_ulong_num ')' { $$= $2; };
+ | '(' real_ulong_num ')' { $$= $2; }
;
optional_braces:
@@ -10132,6 +10118,7 @@ dyncall_create_element:
else
$$->len= 0;
}
+ ;
dyncall_create_list:
dyncall_create_element
@@ -10214,7 +10201,21 @@ column_default_non_parenthesized_expr:
| param_marker { $$= $1; }
| variable
| sum_expr
+ {
+ if (!Lex->select_stack_top)
+ {
+ my_error(ER_INVALID_GROUP_FUNC_USE, MYF(0));
+ MYSQL_YYABORT;
+ }
+ }
| window_func_expr
+ {
+ if (!Lex->select_stack_top)
+ {
+ my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0));
+ MYSQL_YYABORT;
+ }
+ }
| inverse_distribution_function
| ROW_SYM '(' expr ',' expr_list ')'
{
@@ -10393,7 +10394,7 @@ function_call_keyword_timestamp:
}
| TIMESTAMP '(' expr ',' expr ')'
{
- $$= new (thd->mem_root) Item_func_add_time(thd, $3, $5, 1, 0);
+ $$= new (thd->mem_root) Item_func_timestamp(thd, $3, $5);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
@@ -11372,6 +11373,21 @@ window_func:
{
((Item_sum *) $1)->mark_as_window_func_sum_expr();
}
+ |
+ function_call_generic
+ {
+ Item* item = (Item*)$1;
+ /* Only UDF aggregate here possible */
+ if ((item == NULL) ||
+ (item->type() != Item::SUM_FUNC_ITEM)
+ || (((Item_sum *)item)->sum_func() != Item_sum::UDF_SUM_FUNC))
+ {
+ thd->parse_error();
+ MYSQL_YYABORT;
+ }
+
+ ((Item_sum *) $1)->mark_as_window_func_sum_expr();
+ }
;
simple_window_func:
@@ -11612,7 +11628,7 @@ opt_gconcat_separator:
opt_gorder_clause:
/* empty */
- | ORDER_SYM BY gorder_list;
+ | ORDER_SYM BY gorder_list
;
gorder_list:
@@ -11725,6 +11741,10 @@ cast_type_temporal:
DATE_SYM { $$.set(&type_handler_newdate); }
| TIME_SYM opt_field_length { $$.set(&type_handler_time2, 0, $2); }
| DATETIME opt_field_length { $$.set(&type_handler_datetime2, 0, $2); }
+ | INTERVAL_SYM DAY_SECOND_SYM field_length
+ {
+ $$.set(&type_handler_interval_DDhhmmssff, 0, $3);
+ }
;
opt_expr_list:
@@ -11735,9 +11755,7 @@ opt_expr_list:
expr_list:
expr
{
- $$= new (thd->mem_root) List<Item>;
- if (unlikely($$ == NULL) ||
- unlikely($$->push_back($1, thd->mem_root)))
+ if (unlikely(!($$= List<Item>::make(thd->mem_root, $1))))
MYSQL_YYABORT;
}
| expr_list ',' expr
@@ -11849,10 +11867,15 @@ esc_table_ref:
/* Equivalent to <table reference list> in the SQL:2003 standard. */
/* Warning - may return NULL in case of incomplete SELECT */
derived_table_list:
- esc_table_ref { $$=$1; }
+ esc_table_ref
+ {
+ $$=$1;
+ Select->add_joined_table($1);
+ }
| derived_table_list ',' esc_table_ref
{
MYSQL_YYABORT_UNLESS($1 && ($$=$3));
+ Select->add_joined_table($3);
}
;
@@ -11871,11 +11894,18 @@ join_table:
left-associative joins.
*/
table_ref normal_join table_ref %prec TABLE_REF_PRIORITY
- { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); $3->straight=$2; }
+ {
+ MYSQL_YYABORT_UNLESS($1 && ($$=$3));
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
+ $3->straight=$2;
+ }
| table_ref normal_join table_ref
ON
{
MYSQL_YYABORT_UNLESS($1 && $3);
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
/* Change the current name resolution context to a local context. */
if (unlikely(push_new_name_resolution_context(thd, $1, $3)))
MYSQL_YYABORT;
@@ -11892,6 +11922,8 @@ join_table:
USING
{
MYSQL_YYABORT_UNLESS($1 && $3);
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
}
'(' using_list ')'
{
@@ -11902,6 +11934,8 @@ join_table:
| table_ref NATURAL inner_join table_factor
{
MYSQL_YYABORT_UNLESS($1 && ($$=$4));
+ Select->add_joined_table($1);
+ Select->add_joined_table($4);
$4->straight=$3;
add_join_natural($1,$4,NULL,Select);
}
@@ -11911,6 +11945,8 @@ join_table:
ON
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
/* Change the current name resolution context to a local context. */
if (unlikely(push_new_name_resolution_context(thd, $1, $5)))
MYSQL_YYABORT;
@@ -11927,6 +11963,8 @@ join_table:
| table_ref LEFT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
}
USING '(' using_list ')'
{
@@ -11937,6 +11975,8 @@ join_table:
| table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $6);
+ Select->add_joined_table($1);
+ Select->add_joined_table($6);
add_join_natural($1,$6,NULL,Select);
$6->outer_join|=JOIN_TYPE_LEFT;
$$=$6;
@@ -11947,6 +11987,8 @@ join_table:
ON
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
/* Change the current name resolution context to a local context. */
if (unlikely(push_new_name_resolution_context(thd, $1, $5)))
MYSQL_YYABORT;
@@ -11964,6 +12006,8 @@ join_table:
| table_ref RIGHT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
}
USING '(' using_list ')'
{
@@ -11975,6 +12019,8 @@ join_table:
| table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $6);
+ Select->add_joined_table($1);
+ Select->add_joined_table($6);
add_join_natural($6,$1,NULL,Select);
LEX *lex= Lex;
if (unlikely(!($$= lex->current_select->convert_right_join())))
@@ -12007,241 +12053,49 @@ use_partition:
PARTITION_SYM '(' using_list ')' have_partitioning
{
$$= $3;
+ Select->parsing_place= Select->save_parsing_place;
+ Select->save_parsing_place= NO_MATTER;
}
;
-
-/*
- This is a flattening of the rules <table factor> and <table primary>
- in the SQL:2003 standard, since we don't have <sample clause>
- I.e.
- <table factor> ::= <table primary> [ <sample clause> ]
-*/
-/* Warning - may return NULL in case of incomplete SELECT */
table_factor:
- table_primary_ident
- | table_primary_derived
+ table_primary_ident { $$= $1; }
+ | table_primary_derived { $$= $1; }
+ | join_table_parens { $$= $1; }
+ | table_reference_list_parens { $$= $1; }
;
-table_primary_ident:
+table_reference_list_parens:
+ '(' table_reference_list_parens ')' { $$= $2; }
+ | '(' nested_table_reference_list ')'
{
- DBUG_ASSERT(Select);
- SELECT_LEX *sel= Select;
- sel->table_join_options= 0;
- }
- table_ident opt_use_partition opt_for_system_time_clause opt_table_alias opt_key_definition
- {
- if (unlikely(!($$= Select->add_table_to_list(thd, $2, $5,
- Select->get_table_join_options(),
- YYPS->m_lock_type,
- YYPS->m_mdl_type,
- Select->
- pop_index_hints(),
- $3))))
+ if (!($$= Select->end_nested_join(thd)))
MYSQL_YYABORT;
- TABLE_LIST *tl= $$;
- Select->add_joined_table(tl);
- if ($4)
- tl->vers_conditions= Lex->vers_conditions;
}
;
-
-
-/*
- Represents a flattening of the following rules from the SQL:2003
- standard. This sub-rule corresponds to the sub-rule
- <table primary> ::= ... | <derived table> [ AS ] <correlation name>
-
- <derived table> ::= <table subquery>
- <table subquery> ::= <subquery>
- <subquery> ::= <left paren> <query expression> <right paren>
- <query expression> ::= [ <with clause> ] <query expression body>
-
- For the time being we use the non-standard rule
- select_derived_union which is a compromise between the standard
- and our parser. Possibly this rule could be replaced by our
- query_expression_body.
-*/
-
-table_primary_derived:
- '(' get_select_lex select_derived_union ')' opt_for_system_time_clause opt_table_alias
+nested_table_reference_list:
+ table_ref ',' table_ref
{
- /* Use $2 instead of Lex->current_select as derived table will
- alter value of Lex->current_select. */
- if (!($3 || $6) && $2->embedding &&
- !$2->embedding->nested_join->join_list.elements)
- {
- /* we have a derived table ($3 == NULL) but no alias,
- Since we are nested in further parentheses so we
- can pass NULL to the outer level parentheses
- Permits parsing of "((((select ...))) as xyz)" */
- $$= 0;
- }
- else if (!$3)
- {
- /* Handle case of derived table, alias may be NULL if there
- are no outer parentheses, add_table_to_list() will throw
- error in this case */
- LEX *lex=Lex;
- lex->check_automatic_up(UNSPECIFIED_TYPE);
- SELECT_LEX *sel= lex->current_select;
- SELECT_LEX_UNIT *unit= sel->master_unit();
- lex->current_select= sel= unit->outer_select();
- Table_ident *ti= new (thd->mem_root) Table_ident(unit);
- if (unlikely(ti == NULL))
- MYSQL_YYABORT;
- if (unlikely(!($$= sel->add_table_to_list(thd,
- ti, $6, 0,
- TL_READ,
- MDL_SHARED_READ))))
- MYSQL_YYABORT;
- sel->add_joined_table($$);
- lex->pop_context();
- lex->nest_level--;
- }
- else if (unlikely($6 != NULL))
- {
- /*
- Tables with or without joins within parentheses cannot
- have aliases, and we ruled out derived tables above.
- */
- thd->parse_error();
+ if (Select->init_nested_join(thd))
MYSQL_YYABORT;
- }
- else
- {
- /* nested join: FROM (t1 JOIN t2 ...),
- nest_level is the same as in the outer query */
- $$= $3;
- }
- /*
- Fields in derived table can be used in upper select in
- case of merge. We do not add HAVING fields because we do
- not merge such derived. We do not add union because
- also do not merge them
- */
- if ($$ && $$->derived &&
- !$$->derived->first_select()->next_select())
- $$->select_lex->add_where_field($$->derived->first_select());
- if ($5)
- {
- MYSQL_YYABORT_UNLESS(!$3);
- $$->vers_conditions= Lex->vers_conditions;
- }
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
+ $$= $1->embedding;
}
- /* Represents derived table with WITH clause */
- | '(' get_select_lex subselect_start
- with_clause query_expression_body
- subselect_end ')' opt_for_system_time_clause opt_table_alias
+ | nested_table_reference_list ',' table_ref
{
- LEX *lex=Lex;
- SELECT_LEX *sel= $2;
- SELECT_LEX_UNIT *unit= $5->master_unit();
- Table_ident *ti= new (thd->mem_root) Table_ident(unit);
- if (unlikely(ti == NULL))
- MYSQL_YYABORT;
- $5->set_with_clause($4);
- lex->current_select= sel;
- if (unlikely(!($$= sel->add_table_to_list(lex->thd,
- ti, $9, 0,
- TL_READ,
- MDL_SHARED_READ))))
- MYSQL_YYABORT;
- sel->add_joined_table($$);
- if ($8)
- $$->vers_conditions= Lex->vers_conditions;
- }
- ;
-
-/*
- This rule accepts just about anything. The reason is that we have
- empty-producing rules in the beginning of rules, in this case
- subselect_start. This forces bison to take a decision which rules to
- reduce by long before it has seen any tokens. This approach ties us
- to a very limited class of parseable languages, and unfortunately
- SQL is not one of them. The chosen 'solution' was this rule, which
- produces just about anything, even complete bogus statements, for
- instance ( table UNION SELECT 1 ).
- Fortunately, we know that the semantic value returned by
- select_derived is NULL if it contained a derived table, and a pointer to
- the base table's TABLE_LIST if it was a base table. So in the rule
- regarding union's, we throw a parse error manually and pretend it
- was bison that did it.
-
- Also worth noting is that this rule concerns query expressions in
- the from clause only. Top level select statements and other types of
- subqueries have their own union rules.
-*/
-select_derived_union:
- select_derived
- | select_derived union_order_or_limit
- {
- if (unlikely($1))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- }
- | select_derived union_head_non_top
- {
- if (unlikely($1))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- }
- union_list_derived_part2
- | derived_simple_table opt_select_lock_type
- | derived_simple_table order_or_limit opt_select_lock_type
- | derived_simple_table opt_select_lock_type union_list_derived
- ;
-
-union_list_derived_part2:
- query_term_union_not_ready { Lex->pop_context(); }
- | query_term_union_ready { Lex->pop_context(); }
- | query_term_union_ready { Lex->pop_context(); } union_list_derived
- ;
-
-union_list_derived:
- union_head_non_top union_list_derived_part2
- ;
-
-
-/* The equivalent of select_init2 for nested queries. */
-select_init2_derived:
- select_part2_derived
- {
- Select->set_braces(0);
- }
- ;
-
-/* The equivalent of select_part2 for nested queries. */
-select_part2_derived:
- {
- LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- if (sel->linkage != UNION_TYPE)
- mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LIST;
- }
- opt_query_expression_options select_item_list
- {
- Select->parsing_place= NO_MATTER;
+ Select->add_joined_table($3);
+ $$= $1;
}
;
-/* handle contents of parentheses in join expression */
-select_derived:
- get_select_lex_derived derived_table_list
+join_table_parens:
+ '(' join_table_parens ')' { $$= $2; }
+ | '(' join_table ')'
{
LEX *lex= Lex;
- /* for normal joins, $2 != NULL and end_nested_join() != NULL,
- for derived tables, both must equal NULL */
-
- if (unlikely(!($$= $1->end_nested_join(lex->thd)) && $2))
- MYSQL_YYABORT;
- if (unlikely(!$2 && $$))
+ if (!($$= lex->current_select->nest_last_join(thd)))
{
thd->parse_error();
MYSQL_YYABORT;
@@ -12249,83 +12103,54 @@ select_derived:
}
;
-derived_simple_table:
- derived_query_specification { $$= $1; }
- | derived_table_value_constructor { $$= $1; }
- ;
-/*
- Similar to query_specification, but for derived tables.
- Example: the inner parenthesized SELECT in this query:
- SELECT * FROM (SELECT * FROM t1);
-*/
-derived_query_specification:
- SELECT_SYM select_derived_init select_derived2
- {
- if ($2)
- Select->set_braces(1);
- $$= NULL;
- }
- ;
-derived_table_value_constructor:
- VALUES
- {
- Lex->tvc_start();
- }
- values_list
+table_primary_ident:
+ table_ident opt_use_partition opt_for_system_time_clause
+ opt_table_alias_clause opt_key_definition
{
- if (Lex->tvc_finalize_derived())
+ SELECT_LEX *sel= Select;
+ sel->table_join_options= 0;
+ if (!($$= Select->add_table_to_list(thd, $1, $4,
+ Select->get_table_join_options(),
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ Select->pop_index_hints(),
+ $2)))
MYSQL_YYABORT;
- $$= NULL;
+ if ($3)
+ $$->vers_conditions= Lex->vers_conditions;
}
;
-select_derived2:
- {
- LEX *lex= Lex;
- lex->derived_tables|= DERIVED_SUBQUERY;
- if (unlikely(!lex->expr_allows_subselect ||
- lex->sql_command == (int)SQLCOM_PURGE))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE ||
- unlikely(mysql_new_select(lex, 1, NULL)))
- MYSQL_YYABORT;
- mysql_init_select(lex);
- lex->current_select->linkage= DERIVED_TABLE_TYPE;
- lex->current_select->parsing_place= SELECT_LIST;
- }
- select_options select_item_list
- {
- Select->parsing_place= NO_MATTER;
- }
- opt_table_expression
- ;
+/*
+ Represents a flattening of the following rules from the SQL:2003
+ standard. This sub-rule corresponds to the sub-rule
+ <table primary> ::= ... | <derived table> [ AS ] <correlation name>
-get_select_lex:
- /* Empty */ { $$= Select; }
- ;
+ <derived table> ::= <table subquery>
+ <table subquery> ::= <subquery>
+ <subquery> ::= <left paren> <query expression> <right paren>
+ <query expression> ::= [ <with clause> ] <query expression body>
-get_select_lex_derived:
- get_select_lex
+ For the time being we use the non-standard rule
+ select_derived_union which is a compromise between the standard
+ and our parser. Possibly this rule could be replaced by our
+ query_expression_body.
+*/
+
+table_primary_derived:
+ query_primary_parens opt_for_system_time_clause table_alias_clause
{
- LEX *lex= Lex;
- if (unlikely($1->init_nested_join(lex->thd)))
- MYSQL_YYABORT;
+ if (!($$= Lex->parsed_derived_select($1, $2, $3)))
+ YYABORT;
}
- ;
-
-select_derived_init:
+ | '('
+ query_expression
+ ')' opt_for_system_time_clause table_alias_clause
{
- LEX *lex= Lex;
-
- TABLE_LIST *embedding= lex->current_select->embedding;
- $$= embedding &&
- !embedding->nested_join->join_list.elements;
- /* return true if we are deeply nested */
+ if (!($$= Lex->parsed_derived_unit($2, $4, $5)))
+ YYABORT;
}
;
@@ -12459,9 +12284,14 @@ table_alias:
| '='
;
-opt_table_alias:
+opt_table_alias_clause:
/* empty */ { $$=0; }
- | table_alias ident_table_alias
+
+ | table_alias_clause { $$= $1; }
+ ;
+
+table_alias_clause:
+ table_alias ident_table_alias
{
$$= (LEX_CSTRING*) thd->memdup(&$2,sizeof(LEX_STRING));
if (unlikely($$ == NULL))
@@ -12557,7 +12387,7 @@ olap_opt:
SQL-2003: GROUP BY ... CUBE(col1, col2, col3)
*/
LEX *lex=Lex;
- if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE))
+ if (unlikely(lex->current_select->get_linkage() == GLOBAL_OPTIONS_TYPE))
my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH CUBE",
"global union parameters"));
lex->current_select->olap= CUBE_TYPE;
@@ -12574,7 +12404,7 @@ olap_opt:
SQL-2003: GROUP BY ... ROLLUP(col1, col2, col3)
*/
LEX *lex= Lex;
- if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE))
+ if (unlikely(lex->current_select->get_linkage() == GLOBAL_OPTIONS_TYPE))
my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH ROLLUP",
"global union parameters"));
lex->current_select->olap= ROLLUP_TYPE;
@@ -12626,6 +12456,7 @@ opt_window_ref:
if (unlikely(thd->lex->win_ref == NULL))
MYSQL_YYABORT;
}
+ ;
opt_window_partition_clause:
/* empty */ { }
@@ -12634,7 +12465,7 @@ opt_window_partition_clause:
opt_window_order_clause:
/* empty */ { }
- | ORDER_SYM BY order_list
+ | ORDER_SYM BY order_list { Select->order_list= *($3); }
;
opt_window_frame_clause:
@@ -12760,70 +12591,35 @@ alter_order_item:
opt_order_clause:
/* empty */
+ { $$= NULL; }
| order_clause
+ { $$= $1; }
;
order_clause:
ORDER_SYM BY
{
- LEX *lex=Lex;
- SELECT_LEX *sel= lex->current_select;
- SELECT_LEX_UNIT *unit= sel-> master_unit();
- if (unlikely(sel->linkage != GLOBAL_OPTIONS_TYPE &&
- sel->olap != UNSPECIFIED_OLAP_TYPE &&
- (sel->linkage != UNION_TYPE || sel->braces)))
- {
- my_error(ER_WRONG_USAGE, MYF(0),
- "CUBE/ROLLUP", "ORDER BY");
- MYSQL_YYABORT;
- }
- if (lex->sql_command != SQLCOM_ALTER_TABLE &&
- !unit->fake_select_lex)
- {
- /*
- A query of the of the form (SELECT ...) ORDER BY order_list is
- executed in the same way as the query
- SELECT ... ORDER BY order_list
- unless the SELECT construct contains ORDER BY or LIMIT clauses.
- Otherwise we create a fake SELECT_LEX if it has not been
- created yet.
- */
- SELECT_LEX *first_sl= unit->first_select();
- if (unlikely(!unit->is_unit_op() &&
- (first_sl->order_list.elements ||
- first_sl->select_limit) &&
- unit->add_fake_select_lex(thd)))
- MYSQL_YYABORT;
- }
- if (sel->master_unit()->is_unit_op() && !sel->braces)
- {
- /*
- At this point we don't know yet whether this is the last
- select in union or not, but we move ORDER BY to
- fake_select_lex anyway. If there would be one more select
- in union mysql_new_select will correctly throw error.
- */
- DBUG_ASSERT(sel->master_unit()->fake_select_lex);
- lex->current_select= sel->master_unit()->fake_select_lex;
- }
+ thd->where= "ORDER clause";
}
order_list
{
-
+ $$= $4;
}
;
order_list:
order_list ',' order_ident order_dir
{
- if (unlikely(add_order_to_list(thd, $3,(bool) $4)))
- MYSQL_YYABORT;
- }
+ $$= $1;
+ if (add_to_list(thd, *$$, $3,(bool) $4))
+ MYSQL_YYABORT;
+ }
| order_ident order_dir
{
- if (unlikely(add_order_to_list(thd, $1,(bool) $2)))
+ $$= new (thd->mem_root) SQL_I_List<ORDER>();
+ if (add_to_list(thd, *$$, $1, (bool) $2))
MYSQL_YYABORT;
- }
+ }
;
order_dir:
@@ -12833,63 +12629,62 @@ order_dir:
;
opt_limit_clause:
- /* empty */ {}
- | limit_clause {}
+ /* empty */
+ { $$.empty(); }
+ | limit_clause
+ { $$= $1; }
;
-limit_clause_init:
- LIMIT
- {
- SELECT_LEX *sel= Select;
- if (sel->master_unit()->is_unit_op() && !sel->braces)
- {
- /* Move LIMIT that belongs to UNION to fake_select_lex */
- Lex->current_select= sel->master_unit()->fake_select_lex;
- DBUG_ASSERT(Select);
- }
- }
- ;
-
limit_clause:
- limit_clause_init limit_options
+ LIMIT limit_options
{
- SELECT_LEX *sel= Select;
- if (!sel->select_limit->basic_const_item() ||
- sel->select_limit->val_int() > 0)
+ $$= $2;
+ if (!$$.select_limit->basic_const_item() ||
+ $$.select_limit->val_int() > 0)
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT);
}
- | limit_clause_init limit_options
+ | LIMIT limit_options
ROWS_SYM EXAMINED_SYM limit_rows_option
{
+ $$= $2;
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT);
}
- | limit_clause_init ROWS_SYM EXAMINED_SYM limit_rows_option
+ | LIMIT ROWS_SYM EXAMINED_SYM limit_rows_option
{
+ $$.select_limit= 0;
+ $$.offset_limit= 0;
+ $$.explicit_limit= 0;
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT);
}
;
+opt_global_limit_clause:
+ opt_limit_clause
+ {
+ Select->explicit_limit= $1.explicit_limit;
+ Select->select_limit= $1.select_limit;
+ Select->offset_limit= $1.offset_limit;
+ }
+ ;
+
limit_options:
limit_option
{
- SELECT_LEX *sel= Select;
- sel->select_limit= $1;
- sel->offset_limit= 0;
- sel->explicit_limit= 1;
+ $$.select_limit= $1;
+ $$.offset_limit= 0;
+ $$.explicit_limit= 1;
}
| limit_option ',' limit_option
{
- SELECT_LEX *sel= Select;
- sel->select_limit= $3;
- sel->offset_limit= $1;
- sel->explicit_limit= 1;
+ $$.select_limit= $3;
+ $$.offset_limit= $1;
+ $$.explicit_limit= 1;
}
| limit_option OFFSET_SYM limit_option
{
- SELECT_LEX *sel= Select;
- sel->select_limit= $1;
- sel->offset_limit= $3;
- sel->explicit_limit= 1;
+ $$.select_limit= $1;
+ $$.offset_limit= $3;
+ $$.explicit_limit= 1;
}
;
@@ -12934,6 +12729,7 @@ limit_rows_option:
LEX *lex=Lex;
lex->limit_rows_examined= $1;
}
+ ;
delete_limit_clause:
/* empty */
@@ -12952,6 +12748,77 @@ delete_limit_clause:
| LIMIT limit_option ROWS_SYM EXAMINED_SYM { thd->parse_error(); MYSQL_YYABORT; }
;
+opt_order_limit_lock:
+ /* empty */
+ { $$= NULL; }
+ | order_or_limit
+ {
+ $$= $1;
+ $$->lock.empty();
+ }
+ | order_or_limit select_lock_type
+ {
+ $$= $1;
+ $$->lock= $2;
+ }
+ | select_lock_type
+ {
+ $$= new(thd->mem_root) Lex_order_limit_lock;
+ if (!$$)
+ YYABORT;
+ $$->order_list= NULL;
+ $$->limit.empty();
+ $$->lock= $1;
+ }
+ ;
+query_expression_tail:
+ opt_order_limit_lock
+ ;
+
+opt_procedure_or_into:
+ /* empty */
+ {
+ $$.empty();
+ }
+ | procedure_clause opt_select_lock_type
+ {
+ $$= $2;
+ }
+ | into opt_select_lock_type
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_DEPRECATED_SYNTAX,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX),
+ "<select expression> INTO <destination>;",
+ "'SELECT <select list> INTO <destination>"
+ " FROM...'");
+ $$= $2;
+ }
+ ;
+
+
+order_or_limit:
+ order_clause opt_limit_clause
+ {
+ $$= new(thd->mem_root) Lex_order_limit_lock;
+ if (!$$)
+ YYABORT;
+ $$->order_list= $1;
+ $$->limit= $2;
+ }
+ | limit_clause
+ {
+ Lex_order_limit_lock *op= $$= new(thd->mem_root) Lex_order_limit_lock;
+ if (!$$)
+ YYABORT;
+ op->order_list= NULL;
+ op->limit= $1;
+ $$->order_list= NULL;
+ $$->limit= $1;
+ }
+ ;
+
+
opt_plus:
/* empty */
| '+'
@@ -12960,6 +12827,7 @@ opt_plus:
int_num:
opt_plus NUM { int error; $$= (int) my_strtoll10($2.str, (char**) 0, &error); }
| '-' NUM { int error; $$= -(int) my_strtoll10($2.str, (char**) 0, &error); }
+ ;
ulong_num:
opt_plus NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); }
@@ -12983,7 +12851,7 @@ longlong_num:
| LONG_NUM { int error; $$= (longlong) my_strtoll10($1.str, (char**) 0, &error); }
| '-' NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); }
| '-' LONG_NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); }
-
+ ;
ulonglong_num:
opt_plus NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); }
@@ -13020,15 +12888,13 @@ bool:
ulong_num { $$= $1 != 0; }
| TRUE_SYM { $$= 1; }
| FALSE_SYM { $$= 0; }
-
+ ;
procedure_clause:
PROCEDURE_SYM ident /* Procedure name */
{
LEX *lex=Lex;
- DBUG_ASSERT(&lex->select_lex == lex->current_select);
-
lex->proc_list.elements=0;
lex->proc_list.first=0;
lex->proc_list.next= &lex->proc_list.first;
@@ -13048,6 +12914,7 @@ procedure_clause:
parameters are reduced.
*/
Lex->expr_allows_subselect= false;
+ Select->options|= OPTION_PROCEDURE_CLAUSE;
}
'(' procedure_list ')'
{
@@ -13131,6 +12998,7 @@ select_outvar:
into:
INTO into_destination
+ {}
;
into_destination:
@@ -13324,10 +13192,11 @@ table_list:
table_name:
table_ident
{
- if (unlikely(!Select->add_table_to_list(thd, $1, NULL,
- TL_OPTION_UPDATING,
- YYPS->m_lock_type,
- YYPS->m_mdl_type)))
+ if (!thd->lex->current_select_or_default()->
+ add_table_to_list(thd, $1, NULL,
+ TL_OPTION_UPDATING,
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type))
MYSQL_YYABORT;
}
;
@@ -13400,17 +13269,24 @@ insert:
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_INSERT;
- lex->duplicates= DUP_ERROR;
+ lex->duplicates= DUP_ERROR;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
+ lex->current_select->parsing_place= BEFORE_OPT_LIST;
}
insert_lock_option
opt_ignore insert2
{
Select->set_lock_for_tables($3);
- Lex->current_select= &Lex->select_lex;
+ Lex->current_select= Lex->first_select_lex();
}
insert_field_spec opt_insert_update
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
replace:
@@ -13419,15 +13295,22 @@ replace:
LEX *lex=Lex;
lex->sql_command = SQLCOM_REPLACE;
lex->duplicates= DUP_REPLACE;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
+ lex->current_select->parsing_place= BEFORE_OPT_LIST;
}
replace_lock_option insert2
{
Select->set_lock_for_tables($3);
- Lex->current_select= &Lex->select_lex;
+ Lex->current_select= Lex->first_select_lex();
}
insert_field_spec
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
insert_lock_option:
@@ -13467,18 +13350,21 @@ insert2:
;
insert_table:
+ {
+ Select->save_parsing_place= Select->parsing_place;
+ }
table_name_with_opt_use_partition
{
LEX *lex=Lex;
- lex->field_list.empty();
+ //lex->field_list.empty();
lex->many_values.empty();
lex->insert_list=0;
- };
+ }
+ ;
insert_field_spec:
insert_values {}
- | '(' ')' insert_values {}
- | '(' fields ')' insert_values {}
+ | insert_field_list insert_values {}
| SET
{
LEX *lex=Lex;
@@ -13486,20 +13372,33 @@ insert_field_spec:
unlikely(lex->many_values.push_back(lex->insert_list,
thd->mem_root)))
MYSQL_YYABORT;
+ lex->current_select->parsing_place= NO_MATTER;
}
ident_eq_list
;
+insert_field_list:
+ LEFT_PAREN_ALT opt_fields ')'
+ {
+ Lex->current_select->parsing_place= AFTER_LIST;
+ }
+ ;
+
+opt_fields:
+ /* empty */
+ | fields
+ ;
+
fields:
fields ',' insert_ident
{ Lex->field_list.push_back($3, thd->mem_root); }
| insert_ident { Lex->field_list.push_back($1, thd->mem_root); }
;
+
+
insert_values:
- VALUES values_list {}
- | VALUE_SYM values_list {}
- | create_select_query_expression {}
+ create_select_query_expression {}
;
values_list:
@@ -13643,27 +13542,48 @@ opt_insert_update:
}
;
+update_table_list:
+ table_ident opt_use_partition for_portion_of_time_clause
+ opt_table_alias_clause opt_key_definition
+ {
+ SELECT_LEX *sel= Select;
+ sel->table_join_options= 0;
+ if (!($$= Select->add_table_to_list(thd, $1, $4,
+ Select->get_table_join_options(),
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ Select->pop_index_hints(),
+ $2)))
+ MYSQL_YYABORT;
+ $$->period_conditions= Lex->period_conditions;
+ }
+ | join_table_list { $$= $1; }
+ ;
+
/* Update rows in a table */
update:
UPDATE_SYM
{
LEX *lex= Lex;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
lex->sql_command= SQLCOM_UPDATE;
lex->duplicates= DUP_ERROR;
}
- opt_low_priority opt_ignore join_table_list
+ opt_low_priority opt_ignore update_table_list
SET update_list
{
LEX *lex= Lex;
- if (lex->select_lex.table_list.elements > 1)
+ if (lex->first_select_lex()->table_list.elements > 1)
lex->sql_command= SQLCOM_UPDATE_MULTI;
- else if (unlikely(lex->select_lex.get_table_list()->derived))
+ else if (lex->first_select_lex()->get_table_list()->derived)
{
/* it is single table update and it is update of derived table */
my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
- lex->select_lex.get_table_list()->alias.str, "UPDATE");
+ lex->first_select_lex()->get_table_list()->alias.str,
+ "UPDATE");
MYSQL_YYABORT;
}
/*
@@ -13673,7 +13593,14 @@ update:
*/
Select->set_lock_for_tables($3);
}
- opt_where_clause opt_order_clause delete_limit_clause {}
+ opt_where_clause opt_order_clause delete_limit_clause
+ {
+ if ($10)
+ Select->order_list= *($10);
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
update_list:
@@ -13717,12 +13644,13 @@ delete:
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_DELETE;
- mysql_init_select(lex);
YYPS->m_lock_type= TL_WRITE_DEFAULT;
YYPS->m_mdl_type= MDL_SHARED_WRITE;
-
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ mysql_init_select(lex);
lex->ignore= 0;
- lex->select_lex.init_order();
+ lex->first_select_lex()->order_list.empty();
}
delete_part2
;
@@ -13743,6 +13671,7 @@ delete_part2:
| HISTORY_SYM delete_single_table opt_delete_system_time
{
Lex->last_table()->vers_conditions= Lex->vers_conditions;
+ Lex->pop_select(); //main select
}
;
@@ -13761,12 +13690,25 @@ delete_single_table:
}
;
+delete_single_table_for_period:
+ delete_single_table opt_for_portion_of_time_clause
+ {
+ if ($2)
+ Lex->last_table()->period_conditions= Lex->period_conditions;
+ }
+ ;
+
single_multi:
- delete_single_table
+ delete_single_table_for_period
opt_where_clause
opt_order_clause
delete_limit_clause
- opt_select_expressions {}
+ opt_select_expressions
+ {
+ if ($3)
+ Select->order_list= *($3);
+ Lex->pop_select(); //main select
+ }
| table_wild_list
{
mysql_init_multi_delete(Lex);
@@ -13777,6 +13719,9 @@ single_multi:
{
if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
| FROM table_alias_ref_list
{
@@ -13788,6 +13733,9 @@ single_multi:
{
if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
;
@@ -13856,9 +13804,9 @@ truncate:
LEX* lex= Lex;
lex->sql_command= SQLCOM_TRUNCATE;
lex->alter_info.reset();
- lex->select_lex.options= 0;
- lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
- lex->select_lex.init_order();
+ lex->first_select_lex()->options= 0;
+ lex->sql_cache= LEX::SQL_CACHE_UNSPECIFIED;
+ lex->first_select_lex()->order_list.empty();
YYPS->m_lock_type= TL_WRITE;
YYPS->m_mdl_type= MDL_EXCLUSIVE;
}
@@ -13943,6 +13891,8 @@ show:
LEX *lex=Lex;
lex->wild=0;
lex->ident= null_clex_str;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
lex->current_select->parsing_place= SELECT_LIST;
lex->create_info.init();
@@ -13950,6 +13900,7 @@ show:
show_param
{
Select->parsing_place= NO_MATTER;
+ Lex->pop_select(); //main select
}
;
@@ -13965,40 +13916,40 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_TABLES;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES))
MYSQL_YYABORT;
}
| opt_full TRIGGERS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_TRIGGERS;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TRIGGERS)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_TRIGGERS))
MYSQL_YYABORT;
}
| EVENTS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_EVENTS;
- lex->select_lex.db= $2;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_EVENTS)))
+ lex->first_select_lex()->db= $2;
+ if (prepare_schema_table(thd, lex, 0, SCH_EVENTS))
MYSQL_YYABORT;
}
| TABLE_SYM STATUS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_TABLE_STATUS;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLES)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_TABLES))
MYSQL_YYABORT;
}
| OPEN_SYM TABLES opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES))
MYSQL_YYABORT;
}
| PLUGINS_SYM
@@ -14047,12 +13998,13 @@ show_param:
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS;
}
- opt_limit_clause
+ opt_global_limit_clause
| RELAYLOG_SYM optional_connection_name EVENTS_SYM binlog_in binlog_from
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_RELAYLOG_EVENTS;
- } opt_limit_clause
+ }
+ opt_global_limit_clause
| keys_or_index from_or_in table_ident opt_db opt_where_clause
{
LEX *lex= Lex;
@@ -14094,13 +14046,13 @@ show_param:
LEX_CSTRING var= {STRING_WITH_LEN("error_count")};
(void) create_select_for_variable(thd, &var);
}
- | WARNINGS opt_limit_clause
+ | WARNINGS opt_global_limit_clause
{ Lex->sql_command = SQLCOM_SHOW_WARNS;}
- | ERRORS opt_limit_clause
+ | ERRORS opt_global_limit_clause
{ Lex->sql_command = SQLCOM_SHOW_ERRORS;}
| PROFILES_SYM
{ Lex->sql_command = SQLCOM_SHOW_PROFILES; }
- | PROFILE_SYM opt_profile_defs opt_profile_args opt_limit_clause
+ | PROFILE_SYM opt_profile_defs opt_profile_args opt_global_limit_clause
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_PROFILE;
@@ -14162,7 +14114,7 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command = SQLCOM_SHOW_CREATE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL,0)))
+ if (!lex->first_select_lex()->add_table_to_list(thd, $3, NULL,0))
MYSQL_YYABORT;
lex->create_info.storage_media= HA_SM_DEFAULT;
}
@@ -14170,7 +14122,7 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command = SQLCOM_SHOW_CREATE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)))
+ if (!lex->first_select_lex()->add_table_to_list(thd, $3, NULL, 0))
MYSQL_YYABORT;
lex->table_type= TABLE_TYPE_VIEW;
}
@@ -14178,7 +14130,7 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command = SQLCOM_SHOW_CREATE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)))
+ if (!lex->first_select_lex()->add_table_to_list(thd, $3, NULL, 0))
MYSQL_YYABORT;
lex->table_type= TABLE_TYPE_SEQUENCE;
}
@@ -14395,7 +14347,7 @@ describe:
mysql_init_select(lex);
lex->current_select->parsing_place= SELECT_LIST;
lex->sql_command= SQLCOM_SHOW_FIELDS;
- lex->select_lex.db= null_clex_str;
+ lex->first_select_lex()->db= null_clex_str;
lex->verbose= 0;
if (unlikely(prepare_schema_table(thd, lex, $2, SCH_COLUMNS)))
MYSQL_YYABORT;
@@ -14409,12 +14361,13 @@ describe:
explainable_command
{
LEX *lex=Lex;
- lex->select_lex.options|= SELECT_DESCRIBE;
+ lex->first_select_lex()->options|= SELECT_DESCRIBE;
}
;
explainable_command:
select
+ | select_into
| insert
| replace
| update
@@ -14435,6 +14388,8 @@ analyze_stmt_command:
opt_extended_describe:
EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+ | EXTENDED_SYM ALL
+ { Lex->describe|= DESCRIBE_EXTENDED | DESCRIBE_EXTENDED2; }
| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
| opt_format_json {}
;
@@ -14477,8 +14432,7 @@ flush:
lex->type= 0;
lex->no_write_to_binlog= $2;
}
- flush_options
- {}
+ flush_options {}
;
flush_options:
@@ -14495,6 +14449,7 @@ flush_options:
opt_table_list opt_flush_lock
{}
| flush_options_list
+ {}
;
opt_flush_lock:
@@ -14580,6 +14535,8 @@ flush_option:
{ Lex->type|= REFRESH_DES_KEY_FILE; }
| RESOURCES
{ Lex->type|= REFRESH_USER_RESOURCES; }
+ | SSL_SYM
+ { Lex->type|= REFRESH_SSL;}
| IDENT_sys remember_tok_start
{
Lex->type|= REFRESH_GENERIC;
@@ -14601,6 +14558,37 @@ opt_table_list:
| table_list {}
;
+backup:
+ BACKUP_SYM backup_statements {}
+ ;
+
+backup_statements:
+ STAGE_SYM ident
+ {
+ int type;
+ if (unlikely(Lex->sphead))
+ my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "BACKUP STAGE"));
+ if ((type= find_type($2.str, &backup_stage_names,
+ FIND_TYPE_NO_PREFIX)) <= 0)
+ my_yyabort_error((ER_BACKUP_UNKNOWN_STAGE, MYF(0), $2.str));
+ Lex->sql_command= SQLCOM_BACKUP;
+ Lex->backup_stage= (backup_stages) (type-1);
+ break;
+ }
+ | LOCK_SYM table_ident
+ {
+ if (unlikely(!Select->add_table_to_list(thd, $2, NULL, 0,
+ TL_READ, MDL_SHARED_HIGH_PRIO)))
+ MYSQL_YYABORT;
+ Lex->sql_command= SQLCOM_BACKUP_LOCK;
+ }
+ | UNLOCK_SYM
+ {
+ /* Table list is empty for unlock */
+ Lex->sql_command= SQLCOM_BACKUP_LOCK;
+ }
+ ;
+
opt_delete_gtid_domain:
/* empty */ {}
| DELETE_DOMAIN_ID_SYM '=' '(' delete_domain_id_list ')'
@@ -14631,6 +14619,7 @@ delete_domain_id:
optional_flush_tables_arguments:
/* empty */ {$$= 0;}
| AND_SYM DISABLE_SYM CHECKPOINT_SYM {$$= REFRESH_CHECKPOINT; }
+ ;
reset:
RESET_SYM
@@ -14674,34 +14663,18 @@ master_reset_options:
;
purge:
- PURGE
- {
- LEX *lex=Lex;
- lex->type=0;
- lex->sql_command = SQLCOM_PURGE;
- }
- purge_options
- {}
- ;
-
-purge_options:
- master_or_binary LOGS_SYM purge_option
- ;
-
-purge_option:
- TO_SYM TEXT_STRING_sys
+ PURGE master_or_binary LOGS_SYM TO_SYM TEXT_STRING_sys
{
- Lex->to_log = $2.str;
+ Lex->stmt_purge_to($5);
}
- | BEFORE_SYM expr
+ | PURGE master_or_binary LOGS_SYM BEFORE_SYM expr_no_subselect
{
- LEX *lex= Lex;
- lex->value_list.empty();
- lex->value_list.push_front($2, thd->mem_root);
- lex->sql_command= SQLCOM_PURGE_BEFORE;
+ if (Lex->stmt_purge_before($5))
+ MYSQL_YYABORT;
}
;
+
/* kill threads */
kill:
@@ -14723,6 +14696,7 @@ kill_type:
/* Empty */ { $$= (int) KILL_HARD_BIT; }
| HARD_SYM { $$= (int) KILL_HARD_BIT; }
| SOFT_SYM { $$= 0; }
+ ;
kill_option:
/* empty */ { $$= (int) KILL_CONNECTION; }
@@ -14750,6 +14724,15 @@ kill_expr:
shutdown:
SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
+ shutdown_option {}
+ ;
+
+shutdown_option:
+ /* Empty */ { Lex->is_shutdown_wait_for_slaves= false; }
+ | WAIT_SYM FOR_SYM ALL SLAVES
+ {
+ Lex->is_shutdown_wait_for_slaves= true;
+ }
;
/* change database */
@@ -14759,7 +14742,7 @@ use:
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_CHANGE_DB;
- lex->select_lex.db= $2;
+ lex->first_select_lex()->db= $2;
}
;
@@ -14769,7 +14752,6 @@ load:
LOAD data_or_xml
{
LEX *lex= thd->lex;
- mysql_init_select(lex);
if (unlikely(lex->sphead))
{
@@ -14777,6 +14759,9 @@ load:
$2 == FILETYPE_CSV ? "LOAD DATA" : "LOAD XML");
MYSQL_YYABORT;
}
+ if (lex->main_select_push())
+ MYSQL_YYABORT;
+ mysql_init_select(lex);
}
load_data_lock opt_local INFILE TEXT_STRING_filesystem
{
@@ -14807,7 +14792,11 @@ load:
opt_xml_rows_identified_by
opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec
opt_load_data_set_spec
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
data_or_xml:
@@ -15005,11 +14994,6 @@ hex_or_bin_String:
$1.length);
if (unlikely(tmp == NULL))
MYSQL_YYABORT;
- /*
- it is OK only emulate fix_fields, because we need only
- value of constant
- */
- tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
| HEX_STRING
@@ -15018,7 +15002,6 @@ hex_or_bin_String:
$1.length);
if (unlikely(tmp == NULL))
MYSQL_YYABORT;
- tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
| BIN_NUM
@@ -15031,7 +15014,6 @@ hex_or_bin_String:
it is OK only emulate fix_fields, because we need only
value of constant
*/
- tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
;
@@ -15180,26 +15162,23 @@ NUM_literal:
temporal_literal:
DATE_SYM TEXT_STRING
{
- if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length,
- YYCSCL,
- MYSQL_TYPE_DATE,
- true))))
+ if (unlikely(!($$= type_handler_newdate.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true))))
MYSQL_YYABORT;
}
| TIME_SYM TEXT_STRING
{
- if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length,
- YYCSCL,
- MYSQL_TYPE_TIME,
- true))))
+ if (unlikely(!($$= type_handler_time2.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true))))
MYSQL_YYABORT;
}
| TIMESTAMP TEXT_STRING
{
- if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length,
- YYCSCL,
- MYSQL_TYPE_DATETIME,
- true))))
+ if (unlikely(!($$= type_handler_datetime.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true))))
MYSQL_YYABORT;
}
;
@@ -15215,17 +15194,21 @@ opt_with_clause:
with_clause:
- WITH opt_recursive
+ WITH opt_recursive
{
+ LEX *lex= Lex;
With_clause *with_clause=
new With_clause($2, Lex->curr_with_clause);
if (unlikely(with_clause == NULL))
MYSQL_YYABORT;
- Lex->derived_tables|= DERIVED_WITH;
- Lex->curr_with_clause= with_clause;
+ lex->derived_tables|= DERIVED_WITH;
+ lex->curr_with_clause= with_clause;
with_clause->add_to_list(Lex->with_clauses_list_last_next);
+ if (lex->current_select &&
+ lex->current_select->parsing_place == BEFORE_OPT_LIST)
+ lex->current_select->parsing_place= NO_MATTER;
}
- with_list
+ with_list
{
$$= Lex->curr_with_clause;
Lex->curr_with_clause= Lex->curr_with_clause->pop();
@@ -15254,15 +15237,14 @@ with_list_element:
MYSQL_YYABORT;
Lex->with_column_list.empty();
}
- AS '(' remember_tok_start subselect remember_tok_end ')'
+ AS '(' remember_tok_start query_expression remember_tok_end ')'
{
LEX *lex= thd->lex;
const char *query_start= lex->sphead ? lex->sphead->m_tmp_query
: thd->query();
char *spec_start= $6 + 1;
- With_element *elem= new With_element($1, *$2, $7->master_unit());
- if (unlikely(elem == NULL) ||
- unlikely(Lex->curr_with_clause->add_with_element(elem)))
+ With_element *elem= new With_element($1, *$2, $7);
+ if (elem == NULL || Lex->curr_with_clause->add_with_element(elem))
MYSQL_YYABORT;
if (elem->set_unparsed_spec(thd, spec_start, $8,
spec_start - query_start))
@@ -15576,11 +15558,9 @@ ident_or_text:
user_maybe_role:
ident_or_text
{
- if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))))
+ if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user = $1;
- $$->host= null_clex_str; // User or Role, see get_current_user()
- $$->reset_auth();
if (unlikely(check_string_char_length(&$$->user, ER_USERNAME,
username_char_length,
@@ -15589,10 +15569,9 @@ user_maybe_role:
}
| ident_or_text '@' ident_or_text
{
- if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))))
+ if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user = $1; $$->host=$3;
- $$->reset_auth();
if (unlikely(check_string_char_length(&$$->user, ER_USERNAME,
username_char_length,
@@ -15622,8 +15601,7 @@ user_maybe_role:
if (unlikely(!($$=(LEX_USER*)thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user= current_user;
- $$->plugin= empty_clex_str;
- $$->auth= empty_clex_str;
+ $$->auth= new (thd->mem_root) USER_AUTH();
}
;
@@ -15908,6 +15886,7 @@ keyword_data_type:
*/
keyword_sp_var_and_label:
ACTION
+ | ACCOUNT_SYM
| ADDDATE_SYM
| ADMIN_SYM
| AFTER_SYM
@@ -15992,6 +15971,7 @@ keyword_sp_var_and_label:
| EXCEPTION_MARIADB_SYM
| EXCHANGE_SYM
| EXPANSION_SYM
+ | EXPIRE_SYM
| EXPORT_SYM
| EXTENDED_SYM
| EXTENT_SIZE_SYM
@@ -16082,6 +16062,7 @@ keyword_sp_var_and_label:
| MYSQL_SYM
| MYSQL_ERRNO_SYM
| NAME_SYM
+ | NEVER_SYM
| NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2
| NEXTVAL_SYM
| NEW_SYM
@@ -16170,6 +16151,7 @@ keyword_sp_var_and_label:
| SQL_BUFFER_RESULT
| SQL_NO_CACHE_SYM
| SQL_THREAD
+ | STAGE_SYM
| STARTS_SYM
| STATEMENT_SYM
| STATUS_SYM
@@ -16238,14 +16220,22 @@ set:
SET
{
LEX *lex=Lex;
+ if (lex->main_select_push())
+ MYSQL_YYABORT;
lex->set_stmt_init();
lex->var_list.empty();
sp_create_assignment_lex(thd, yychar == YYEMPTY);
}
start_option_value_list
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
| SET STATEMENT_SYM
{
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
Lex->set_stmt_init();
}
set_stmt_option_value_following_option_type_list
@@ -16255,6 +16245,9 @@ set:
my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "SET STATEMENT"));
lex->stmt_var_list= lex->var_list;
lex->var_list.empty();
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
FOR_SYM verb_clause
{}
@@ -16594,21 +16587,29 @@ opt_for_user:
thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
lex->definer->user= current_user;
- lex->definer->plugin= empty_clex_str;
- lex->definer->auth= empty_clex_str;
+ lex->definer->auth= new (thd->mem_root) USER_AUTH();
}
| FOR_SYM user equal { Lex->definer= $2; }
;
text_or_password:
- TEXT_STRING { Lex->definer->pwhash= $1;}
- | PASSWORD_SYM '(' TEXT_STRING ')' { Lex->definer->pwtext= $3; }
+ TEXT_STRING
+ {
+ Lex->definer->auth= new (thd->mem_root) USER_AUTH();
+ Lex->definer->auth->auth_str= $1;
+ }
+ | PASSWORD_SYM '(' TEXT_STRING ')'
+ {
+ Lex->definer->auth= new (thd->mem_root) USER_AUTH();
+ Lex->definer->auth->pwtext= $3;
+ }
| OLD_PASSWORD_SYM '(' TEXT_STRING ')'
{
- Lex->definer->pwtext= $3;
- Lex->definer->pwhash.str= Item_func_password::alloc(thd,
+ Lex->definer->auth= new (thd->mem_root) USER_AUTH();
+ Lex->definer->auth->pwtext= $3;
+ Lex->definer->auth->auth_str.str= Item_func_password::alloc(thd,
$3.str, $3.length, Item_func_password::OLD);
- Lex->definer->pwhash.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ Lex->definer->auth->auth_str.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
}
;
@@ -16678,7 +16679,7 @@ table_lock_list:
;
table_lock:
- table_ident opt_table_alias lock_option
+ table_ident opt_table_alias_clause lock_option
{
thr_lock_type lock_type= (thr_lock_type) $3;
bool lock_for_write= (lock_type >= TL_WRITE_ALLOW_WRITE);
@@ -16723,27 +16724,37 @@ unlock:
*/
handler:
- HANDLER_SYM table_ident OPEN_SYM opt_table_alias
+ HANDLER_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ handler_tail
+ {
+ Lex->pop_select(); //main select
+ }
+ ;
+
+handler_tail:
+ table_ident OPEN_SYM opt_table_alias_clause
{
LEX *lex= Lex;
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER"));
lex->sql_command = SQLCOM_HA_OPEN;
- if (unlikely(!lex->current_select->add_table_to_list(thd, $2, $4,
- 0)))
+ if (!lex->current_select->add_table_to_list(thd, $1, $3, 0))
MYSQL_YYABORT;
}
- | HANDLER_SYM table_ident_nodb CLOSE_SYM
+ | table_ident_nodb CLOSE_SYM
{
LEX *lex= Lex;
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER"));
lex->sql_command = SQLCOM_HA_CLOSE;
- if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0,
- 0)))
+ if (!lex->current_select->add_table_to_list(thd, $1, 0, 0))
MYSQL_YYABORT;
}
- | HANDLER_SYM table_ident_nodb READ_SYM
+ | table_ident_nodb READ_SYM
{
LEX *lex=Lex;
if (unlikely(lex->sphead))
@@ -16757,15 +16768,24 @@ handler:
lex->current_select->select_limit= one;
lex->current_select->offset_limit= 0;
lex->limit_rows_examined= 0;
- if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0,
- 0)))
+ if (!lex->current_select->add_table_to_list(thd, $1, 0, 0))
MYSQL_YYABORT;
}
- handler_read_or_scan opt_where_clause opt_limit_clause
+ handler_read_or_scan opt_where_clause opt_global_limit_clause
{
- Lex->expr_allows_subselect= TRUE;
+ LEX *lex=Lex;
+ lex->expr_allows_subselect= TRUE;
+ if (!lex->current_select->explicit_limit)
+ {
+ Item *one= new (thd->mem_root) Item_int(thd, (int32) 1);
+ if (one == NULL)
+ MYSQL_YYABORT;
+ lex->current_select->select_limit= one;
+ lex->current_select->offset_limit= 0;
+ lex->limit_rows_examined= 0;
+ }
/* Stored functions are not supported for HANDLER READ. */
- if (unlikely(Lex->uses_stored_routines()))
+ if (lex->uses_stored_routines())
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
"stored functions in HANDLER ... READ");
@@ -16935,12 +16955,14 @@ grant_command:
;
opt_with_admin:
- /* nothing */ { Lex->definer = 0; }
- | WITH ADMIN_SYM user_or_role { Lex->definer = $3; }
+ /* nothing */ { Lex->definer = 0; }
+ | WITH ADMIN_SYM user_or_role { Lex->definer = $3; }
+ ;
opt_with_admin_option:
- /* nothing */ { Lex->with_admin_option= false; }
- | WITH ADMIN_SYM OPTION { Lex->with_admin_option= true; }
+ /* nothing */ { Lex->with_admin_option= false; }
+ | WITH ADMIN_SYM OPTION { Lex->with_admin_option= true; }
+ ;
role_list:
grant_role
@@ -16961,7 +16983,7 @@ current_role:
if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user= current_role;
- $$->reset_auth();
+ $$->auth= NULL;
}
;
@@ -16978,7 +17000,7 @@ grant_role:
MYSQL_YYABORT;
$$->user= $1;
$$->host= empty_clex_str;
- $$->reset_auth();
+ $$->auth= NULL;
if (unlikely(check_string_char_length(&$$->user, ER_USERNAME,
username_char_length,
@@ -17068,23 +17090,23 @@ require_list_element:
SUBJECT_SYM TEXT_STRING
{
LEX *lex=Lex;
- if (unlikely(lex->x509_subject))
+ if (lex->account_options.x509_subject.str)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SUBJECT"));
- lex->x509_subject=$2.str;
+ lex->account_options.x509_subject= $2;
}
| ISSUER_SYM TEXT_STRING
{
LEX *lex=Lex;
- if (unlikely(lex->x509_issuer))
+ if (lex->account_options.x509_issuer.str)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "ISSUER"));
- lex->x509_issuer=$2.str;
+ lex->account_options.x509_issuer= $2;
}
| CIPHER_SYM TEXT_STRING
{
LEX *lex=Lex;
- if (unlikely(lex->ssl_cipher))
+ if (lex->account_options.ssl_cipher.str)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CIPHER"));
- lex->ssl_cipher=$2.str;
+ lex->account_options.ssl_cipher= $2;
}
;
@@ -17175,31 +17197,67 @@ grant_user:
user IDENTIFIED_SYM BY TEXT_STRING
{
$$= $1;
- $1->pwtext= $4;
- if (unlikely(Lex->sql_command == SQLCOM_REVOKE))
- MYSQL_YYABORT;
+ $1->auth= new (thd->mem_root) USER_AUTH();
+ $1->auth->pwtext= $4;
}
| user IDENTIFIED_SYM BY PASSWORD_SYM TEXT_STRING
{
$$= $1;
- $1->pwhash= $5;
+ $1->auth= new (thd->mem_root) USER_AUTH();
+ $1->auth->auth_str= $5;
}
- | user IDENTIFIED_SYM via_or_with ident_or_text
+ | user IDENTIFIED_SYM via_or_with auth_expression
{
$$= $1;
- $1->plugin= $4;
- $1->auth= empty_clex_str;
+ $1->auth= $4;
}
- | user IDENTIFIED_SYM via_or_with ident_or_text using_or_as TEXT_STRING_sys
+ | user_or_role
{
$$= $1;
- $1->plugin= $4;
- $1->auth= $6;
}
- | user_or_role
- { $$= $1; }
;
+auth_expression:
+ auth_token OR_SYM auth_expression
+ {
+ $$= $1;
+ DBUG_ASSERT($$->next == NULL);
+ $$->next= $3;
+ }
+ | auth_token
+ {
+ $$= $1;
+ }
+ ;
+
+auth_token:
+ ident_or_text opt_auth_str
+ {
+ $$= $2;
+ $$->plugin= $1;
+ }
+ ;
+
+opt_auth_str:
+ /* empty */
+ {
+ if (!($$=(USER_AUTH*) thd->calloc(sizeof(USER_AUTH))))
+ MYSQL_YYABORT;
+ }
+ | using_or_as TEXT_STRING_sys
+ {
+ if (!($$=(USER_AUTH*) thd->calloc(sizeof(USER_AUTH))))
+ MYSQL_YYABORT;
+ $$->auth_str= $2;
+ }
+ | using_or_as PASSWORD_SYM '(' TEXT_STRING ')'
+ {
+ if (!($$=(USER_AUTH*) thd->calloc(sizeof(USER_AUTH))))
+ MYSQL_YYABORT;
+ $$->pwtext= $4;
+ }
+ ;
+
opt_column_list:
/* empty */
{
@@ -17247,52 +17305,47 @@ opt_require_clause:
/* empty */
| REQUIRE_SYM require_list
{
- Lex->ssl_type=SSL_TYPE_SPECIFIED;
+ Lex->account_options.ssl_type= SSL_TYPE_SPECIFIED;
}
| REQUIRE_SYM SSL_SYM
{
- Lex->ssl_type=SSL_TYPE_ANY;
+ Lex->account_options.ssl_type= SSL_TYPE_ANY;
}
| REQUIRE_SYM X509_SYM
{
- Lex->ssl_type=SSL_TYPE_X509;
+ Lex->account_options.ssl_type= SSL_TYPE_X509;
}
| REQUIRE_SYM NONE_SYM
{
- Lex->ssl_type=SSL_TYPE_NONE;
+ Lex->account_options.ssl_type= SSL_TYPE_NONE;
}
;
resource_option:
MAX_QUERIES_PER_HOUR ulong_num
{
- LEX *lex=Lex;
- lex->mqh.questions=$2;
- lex->mqh.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR;
+ Lex->account_options.questions=$2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR;
}
| MAX_UPDATES_PER_HOUR ulong_num
{
- LEX *lex=Lex;
- lex->mqh.updates=$2;
- lex->mqh.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR;
+ Lex->account_options.updates=$2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR;
}
| MAX_CONNECTIONS_PER_HOUR ulong_num
{
- LEX *lex=Lex;
- lex->mqh.conn_per_hour= $2;
- lex->mqh.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR;
+ Lex->account_options.conn_per_hour= $2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR;
}
| MAX_USER_CONNECTIONS_SYM int_num
{
- LEX *lex=Lex;
- lex->mqh.user_conn= $2;
- lex->mqh.specified_limits|= USER_RESOURCES::USER_CONNECTIONS;
+ Lex->account_options.user_conn= $2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::USER_CONNECTIONS;
}
| MAX_STATEMENT_TIME_SYM NUM_literal
{
- LEX *lex=Lex;
- lex->mqh.max_statement_time= $2->val_real();
- lex->mqh.specified_limits|= USER_RESOURCES::MAX_STATEMENT_TIME;
+ Lex->account_options.max_statement_time= $2->val_real();
+ Lex->account_options.specified_limits|= USER_RESOURCES::MAX_STATEMENT_TIME;
}
;
@@ -17341,8 +17394,8 @@ compound_statement:
sp_proc_stmt_compound_ok
{
Lex->sql_command= SQLCOM_COMPOUND;
- Lex->sphead->set_stmt_end(thd);
- Lex->sphead->restore_thd_mem_root(thd);
+ if (Lex->sp_body_finalize_procedure(thd))
+ MYSQL_YYABORT;
}
;
@@ -17429,214 +17482,27 @@ release:
*/
unit_type_decl:
- UNION_SYM
- { $$= UNION_TYPE; }
+ UNION_SYM union_option
+ { $$.unit_type= UNION_TYPE; $$.distinct= $2; }
| INTERSECT_SYM
- { $$= INTERSECT_TYPE; }
+ { $$.unit_type= INTERSECT_TYPE; $$.distinct= 1; }
| EXCEPT_SYM
- { $$= EXCEPT_TYPE; }
-
-
-union_clause:
- /* empty */ {}
- | union_list
- ;
-
-union_list:
- unit_type_decl union_option
- {
- if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE)))
- MYSQL_YYABORT;
- }
- union_list_part2
- {
- /*
- Remove from the name resolution context stack the context of the
- last select in the union.
- */
- Lex->pop_context();
- }
- ;
-
-union_list_view:
- unit_type_decl union_option
- {
- if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE)))
- MYSQL_YYABORT;
- }
- query_expression_body_view
- {
- Lex->pop_context();
- }
- ;
-
-union_order_or_limit:
- {
- LEX *lex= thd->lex;
- DBUG_ASSERT(lex->current_select->linkage != GLOBAL_OPTIONS_TYPE);
- SELECT_LEX *sel= lex->current_select;
- SELECT_LEX_UNIT *unit= sel->master_unit();
- SELECT_LEX *fake= unit->fake_select_lex;
- if (fake)
- {
- fake->no_table_names_allowed= 1;
- lex->current_select= fake;
- }
- thd->where= "global ORDER clause";
- }
- order_or_limit
- {
- thd->lex->current_select->no_table_names_allowed= 0;
- thd->where= "";
- }
- ;
-
-order_or_limit:
- order_clause opt_limit_clause
- | limit_clause
+ { $$.unit_type= EXCEPT_TYPE; $$.distinct= 1; }
;
/*
Start a UNION, for non-top level query expressions.
*/
-union_head_non_top:
- unit_type_decl union_option
- {
- if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, FALSE)))
- MYSQL_YYABORT;
- }
- ;
-
union_option:
/* empty */ { $$=1; }
| DISTINCT { $$=1; }
| ALL { $$=0; }
;
-simple_table:
- query_specification { $$= $1; }
- | table_value_constructor { $$= $1; }
- ;
-
-table_value_constructor:
- VALUES
- {
- Lex->tvc_start();
- }
- values_list
- {
- $$= Lex->current_select;
- if (Lex->tvc_finalize())
- MYSQL_YYABORT;
- }
- ;
-
-/*
- Corresponds to the SQL Standard
- <query specification> ::=
- SELECT [ <set quantifier> ] <select list> <table expression>
-
- Notes:
- - We allow more options in addition to <set quantifier>
- - <table expression> is optional in MariaDB
-*/
-query_specification:
- SELECT_SYM select_init2_derived opt_table_expression
- {
- $$= Lex->current_select->master_unit()->first_select();
- }
- ;
-
-query_term_union_not_ready:
- simple_table order_or_limit opt_select_lock_type { $$= $1; }
- | '(' select_paren_derived ')' union_order_or_limit { $$= $2; }
- ;
-
-query_term_union_ready:
- simple_table opt_select_lock_type { $$= $1; }
- | '(' select_paren_derived ')' { $$= $2; }
- ;
-
-query_expression_body:
- query_term_union_not_ready { $$= $1; }
- | query_term_union_ready { $$= $1; }
- | query_term_union_ready union_list_derived { $$= $1; }
- ;
-
-/* Corresponds to <query expression> in the SQL:2003 standard. */
-subselect:
- subselect_start opt_with_clause query_expression_body subselect_end
- {
- $3->set_with_clause($2);
- $$= $3;
- }
- ;
-
-subselect_start:
- {
- LEX *lex=Lex;
- if (unlikely(!lex->expr_allows_subselect ||
- lex->sql_command == (int)SQLCOM_PURGE))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- /*
- we are making a "derived table" for the parenthesis
- as we need to have a lex level to fit the union
- after the parenthesis, e.g.
- (SELECT .. ) UNION ... becomes
- SELECT * FROM ((SELECT ...) UNION ...)
- */
- if (unlikely(mysql_new_select(Lex, 1, NULL)))
- MYSQL_YYABORT;
- }
- ;
-
-subselect_end:
- {
- LEX *lex=Lex;
-
- lex->check_automatic_up(UNSPECIFIED_TYPE);
- lex->pop_context();
- SELECT_LEX *child= lex->current_select;
- lex->current_select = lex->current_select->return_after_parsing();
- lex->nest_level--;
- lex->current_select->n_child_sum_items += child->n_sum_items;
-
- /*
- A subquery (and all the subsequent query blocks in a UNION) can
- add columns to an outer query block. Reserve space for them.
- Aggregate functions in having clause can also add fields to an
- outer select.
- */
- for (SELECT_LEX *temp= child->master_unit()->first_select();
- temp != NULL; temp= temp->next_select())
- {
- lex->current_select->select_n_where_fields+=
- temp->select_n_where_fields;
- lex->current_select->select_n_having_items+=
- temp->select_n_having_items;
- }
- }
- ;
-
-opt_query_expression_options:
- /* empty */
- | query_expression_option_list
- ;
-
-query_expression_option_list:
- query_expression_option_list query_expression_option
- | query_expression_option
- ;
-
query_expression_option:
STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; }
| HIGH_PRIORITY
{
- if (unlikely(Lex->check_simple_select(&$1)))
- MYSQL_YYABORT;
YYPS->m_lock_type= TL_READ_HIGH_PRIORITY;
YYPS->m_mdl_type= MDL_SHARED_READ;
Select->options|= SELECT_HIGH_PRIORITY;
@@ -17644,18 +17510,8 @@ query_expression_option:
| DISTINCT { Select->options|= SELECT_DISTINCT; }
| SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
| SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
- | SQL_BUFFER_RESULT
- {
- if (unlikely(Lex->check_simple_select(&$1)))
- MYSQL_YYABORT;
- Select->options|= OPTION_BUFFER_RESULT;
- }
- | SQL_CALC_FOUND_ROWS
- {
- if (unlikely(Lex->check_simple_select(&$1)))
- MYSQL_YYABORT;
- Select->options|= OPTION_FOUND_ROWS;
- }
+ | SQL_BUFFER_RESULT { Select->options|= OPTION_BUFFER_RESULT; }
+ | SQL_CALC_FOUND_ROWS { Select->options|= OPTION_FOUND_ROWS; }
| ALL { Select->options|= SELECT_ALL; }
;
@@ -17688,9 +17544,7 @@ definer:
DEFINER_SYM '=' user_or_role
{
Lex->definer= $3;
- Lex->ssl_type= SSL_TYPE_NOT_SPECIFIED;
- Lex->ssl_cipher= Lex->x509_subject= Lex->x509_issuer= 0;
- bzero(&(Lex->mqh), sizeof(Lex->mqh));
+ Lex->account_options.reset();
}
;
@@ -17743,35 +17597,14 @@ view_select:
lex->parsing_options.allows_variable= FALSE;
lex->create_view->select.str= (char *) YYLIP->get_cpp_ptr();
}
- opt_with_clause query_expression_body_view view_check_option
+ query_expression
+ view_check_option
{
- LEX *lex= Lex;
- size_t len= YYLIP->get_cpp_ptr() - lex->create_view->select.str;
- void *create_view_select= thd->memdup(lex->create_view->select.str, len);
- lex->create_view->select.length= len;
- lex->create_view->select.str= (char *) create_view_select;
- trim_whitespace(thd->charset(),
- &lex->create_view->select);
- lex->create_view->check= $4;
- lex->parsing_options.allows_variable= TRUE;
- lex->current_select->set_with_clause($2);
+ if (Lex->parsed_create_view($2, $3))
+ MYSQL_YYABORT;
}
;
-/*
- SQL Standard <query expression body> for VIEWs.
- Does not include INTO and PROCEDURE clauses.
-*/
-query_expression_body_view:
- SELECT_SYM select_options_and_item_list select_init3_view
- | table_value_constructor
- | table_value_constructor union_order_or_limit
- | table_value_constructor union_list_view
- | '(' select_paren_view ')'
- | '(' select_paren_view ')' union_order_or_limit
- | '(' select_paren_view ')' union_list_view
- ;
-
view_check_option:
/* empty */ { $$= VIEW_CHECK_NONE; }
| WITH CHECK_SYM OPTION { $$= VIEW_CHECK_CASCADED; }
@@ -17848,7 +17681,8 @@ trigger_tail:
(*static_cast<st_trg_execution_order*>(&lex->trg_chistics))= ($17);
lex->trg_chistics.ordering_clause_end= lip->get_cpp_ptr();
- if (unlikely(!lex->make_sp_head(thd, $4, &sp_handler_trigger)))
+ if (unlikely(!lex->make_sp_head(thd, $4, &sp_handler_trigger,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
lex->sphead->set_body_start(thd, lip->get_cpp_tok_start());
@@ -17856,13 +17690,9 @@ trigger_tail:
sp_proc_stmt /* $19 */
{ /* $20 */
LEX *lex= Lex;
- sp_head *sp= lex->sphead;
lex->sql_command= SQLCOM_CREATE_TRIGGER;
- sp->set_stmt_end(thd);
- sp->restore_thd_mem_root(thd);
-
- if (unlikely(sp->is_not_allowed_in_function("trigger")))
+ if (lex->sp_body_finalize_trigger(thd))
MYSQL_YYABORT;
/*
@@ -17870,11 +17700,10 @@ trigger_tail:
sp_proc_stmt alternatives are not saving/restoring LEX, so
lex->query_tables can be wiped out.
*/
- if (unlikely(!lex->select_lex.
- add_table_to_list(thd, $10, (LEX_CSTRING*) 0,
- TL_OPTION_UPDATING,
- TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE)))
+ if (!lex->first_select_lex()->
+ add_table_to_list(thd, $10, (LEX_CSTRING*) 0,
+ TL_OPTION_UPDATING, TL_READ_NO_INSERT,
+ MDL_SHARED_NO_WRITE))
MYSQL_YYABORT;
}
;
@@ -17885,22 +17714,6 @@ trigger_tail:
**************************************************************************/
-udf_tail:
- opt_if_not_exists ident
- RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
- {
- LEX *lex= thd->lex;
- if (unlikely(lex->add_create_options_with_check($1)))
- MYSQL_YYABORT;
- if (unlikely(is_native_function(thd, & $2)))
- my_yyabort_error((ER_NATIVE_FCT_NAME_COLLISION, MYF(0), $2.str));
- lex->sql_command= SQLCOM_CREATE_FUNCTION;
- lex->udf.name= $2;
- lex->udf.returns= (Item_result) $4;
- lex->udf.dl= $6.str;
- }
- ;
-
sf_return_type:
RETURNS_SYM
@@ -17918,24 +17731,12 @@ sf_return_type:
}
;
-sf_tail:
- opt_if_not_exists
- sp_name
- {
- Lex->sql_command= SQLCOM_CREATE_SPFUNCTION;
- if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2,
- &sp_handler_function)))
- MYSQL_YYABORT;
- }
- sp_parenthesized_fdparam_list
- sf_return_type
+sf_c_chistics_and_body:
sp_c_chistics
{
LEX *lex= thd->lex;
- Lex_input_stream *lip= YYLIP;
-
- lex->sphead->set_chistics(lex->sp_chistics);
- lex->sphead->set_body_start(thd, lip->get_cpp_tok_start());
+ lex->sphead->set_c_chistics(lex->sp_chistics);
+ lex->sphead->set_body_start(thd, YYLIP->get_cpp_tok_start());
}
sp_proc_stmt_in_returns_clause
{
@@ -17944,18 +17745,19 @@ sf_tail:
}
;
+
sp_tail:
- opt_if_not_exists sp_name
+ sp_name
{
- Lex->sql_command= SQLCOM_CREATE_PROCEDURE;
- if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2,
- &sp_handler_procedure)))
+ if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1,
+ &sp_handler_procedure,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
}
sp_parenthesized_pdparam_list
sp_c_chistics
{
- Lex->sphead->set_chistics(Lex->sp_chistics);
+ Lex->sphead->set_c_chistics(Lex->sp_chistics);
Lex->sphead->set_body_start(thd, YYLIP->get_cpp_tok_start());
}
sp_proc_stmt
@@ -18067,44 +17869,37 @@ opt_migrate:
;
install:
- INSTALL_SYM PLUGIN_SYM ident SONAME_SYM TEXT_STRING_sys
+ INSTALL_SYM PLUGIN_SYM opt_if_not_exists ident SONAME_SYM TEXT_STRING_sys
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_INSTALL_PLUGIN;
- lex->comment= $3;
- lex->ident= $5;
+ if (Lex->stmt_install_plugin($3, $4, $6))
+ MYSQL_YYABORT;
}
| INSTALL_SYM SONAME_SYM TEXT_STRING_sys
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_INSTALL_PLUGIN;
- lex->comment= null_clex_str;
- lex->ident= $3;
+ Lex->stmt_install_plugin($3);
}
;
uninstall:
- UNINSTALL_SYM PLUGIN_SYM ident
+ UNINSTALL_SYM PLUGIN_SYM opt_if_exists ident
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_UNINSTALL_PLUGIN;
- lex->comment= $3;
+ if (Lex->stmt_uninstall_plugin_by_name($3, $4))
+ MYSQL_YYABORT;
}
- | UNINSTALL_SYM SONAME_SYM TEXT_STRING_sys
+ | UNINSTALL_SYM SONAME_SYM opt_if_exists TEXT_STRING_sys
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_UNINSTALL_PLUGIN;
- lex->comment= null_clex_str;
- lex->ident= $3;
+ if (Lex->stmt_uninstall_plugin_by_soname($3, $4))
+ MYSQL_YYABORT;
}
;
/* Avoid compiler warning from sql_yacc.cc where yyerrlab1 is not used */
keep_gcc_happy:
- IMPOSSIBLE_ACTION
- {
- YYERROR;
- }
+ IMPOSSIBLE_ACTION
+ {
+ YYERROR;
+ }
+ ;
/**
@} (end of group Parser)
diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy
index 61f2426427e..333bca4af86 100644
--- a/sql/sql_yacc_ora.yy
+++ b/sql/sql_yacc_ora.yy
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2015, Oracle and/or its affiliates.
- Copyright (c) 2010, 2016, MariaDB
+ Copyright (c) 2010, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@
#include "sql_lex.h"
#include "sql_sequence.h"
#include "my_base.h"
+#include "sql_type_json.h"
/* this is to get the bison compilation windows warnings out */
#ifdef _MSC_VER
@@ -184,6 +185,20 @@ void ORAerror(THD *thd, const char *s)
Lex_for_loop_bounds_st for_loop_bounds;
Lex_trim_st trim;
vers_history_point_t vers_history_point;
+ struct
+ {
+ enum sub_select_type unit_type;
+ bool distinct;
+ } unit_operation;
+ struct
+ {
+ SELECT_LEX *first;
+ SELECT_LEX *prev_last;
+ } select_list;
+ SQL_I_List<ORDER> *select_order;
+ Lex_select_lock select_lock;
+ Lex_select_limit select_limit;
+ Lex_order_limit_lock *order_limit_lock;
/* pointers */
Create_field *create_field;
@@ -204,6 +219,7 @@ void ORAerror(THD *thd, const char *s)
class sp_lex_cursor *sp_cursor_stmt;
LEX_CSTRING *lex_str_ptr;
LEX_USER *lex_user;
+ USER_AUTH *user_auth;
List<Condition_information_item> *cond_info_list;
List<DYNCALL_CREATE_DEF> *dyncol_def_list;
List<Item> *item_list;
@@ -229,6 +245,7 @@ void ORAerror(THD *thd, const char *s)
handlerton *db_type;
st_select_lex *select_lex;
+ st_select_lex_unit *select_lex_unit;
struct p_elem_val *p_elem_value;
class Window_frame *window_frame;
class Window_frame_bound *window_frame_bound;
@@ -237,8 +254,8 @@ void ORAerror(THD *thd, const char *s)
/* enums */
enum enum_sp_suid_behaviour sp_suid;
+ enum enum_sp_aggregate_type sp_aggregate_type;
enum enum_view_suid view_suid;
- enum sub_select_type unit_type;
enum Condition_information_item::Name cond_info_item_name;
enum enum_diag_condition_item_name diag_condition_item_name;
enum Diagnostics_information::Which_area diag_area;
@@ -278,10 +295,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%parse-param { THD *thd }
%lex-param { THD *thd }
/*
- Currently there are 53 shift/reduce conflicts.
+ Currently there are 49 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
-%expect 53
+%expect 49
/*
Comments for TOKENS.
@@ -438,6 +455,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token LEADING /* SQL-2003-R */
%token LEAVE_SYM
%token LEFT /* SQL-2003-R */
+%token LEFT_PAREN_ALT /* INTERNAL */
+%token LEFT_PAREN_WITH /* INTERNAL */
+%token LEFT_PAREN_LIKE /* INTERNAL */
%token LEX_HOSTNAME
%token LIKE /* SQL-2003-R */
%token LIMIT
@@ -500,6 +520,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token PERCENT_RANK_SYM
%token PERCENTILE_CONT_SYM
%token PERCENTILE_DISC_SYM
+%token PORTION_SYM /* SQL-2016-R */
%token POSITION_SYM /* SQL-2003-N */
%token PRECISION /* SQL-2003-R */
%token PRIMARY_SYM /* SQL-2003-R */
@@ -628,6 +649,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
Non-reserved keywords
*/
+%token <kwd> ACCOUNT_SYM /* MYSQL */
%token <kwd> ACTION /* SQL-2003-N */
%token <kwd> ADMIN_SYM /* SQL-2003-N */
%token <kwd> ADDDATE_SYM /* MYSQL-FUNC */
@@ -745,6 +767,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> EXIT_MARIADB_SYM /* PLSQL-R */
%token <kwd> EXIT_ORACLE_SYM /* PLSQL-R */
%token <kwd> EXPANSION_SYM
+%token <kwd> EXPIRE_SYM /* MySQL */
%token <kwd> EXPORT_SYM
%token <kwd> EXTENDED_SYM
%token <kwd> EXTENT_SIZE_SYM
@@ -860,6 +883,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> NAME_SYM /* SQL-2003-N */
%token <kwd> NATIONAL_SYM /* SQL-2003-R */
%token <kwd> NCHAR_SYM /* SQL-2003-R */
+%token <kwd> NEVER_SYM /* MySQL */
%token <kwd> NEW_SYM /* SQL-2003-R */
%token <kwd> NEXT_SYM /* SQL-2003-N */
%token <kwd> NEXTVAL_SYM /* PostgreSQL sequence function */
@@ -983,6 +1007,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%token <kwd> SQL_CALC_FOUND_ROWS
%token <kwd> SQL_NO_CACHE_SYM
%token <kwd> SQL_THREAD
+%token <kwd> STAGE_SYM
%token <kwd> STARTS_SYM
%token <kwd> START_SYM /* SQL-2003-R */
%token <kwd> STATEMENT_SYM
@@ -1214,7 +1239,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
NCHAR_STRING
%type <lex_str_ptr>
- opt_table_alias
+ opt_table_alias_clause
+ table_alias_clause
%type <ident_cli>
IDENT
@@ -1283,7 +1309,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
opt_temporary all_or_any opt_distinct opt_glimit_clause
opt_ignore_leaves fulltext_options union_option
opt_not
- select_derived_init transaction_access_mode_types
+ transaction_access_mode_types
opt_natural_language_mode opt_query_expansion
opt_ev_status opt_ev_on_completion ev_on_completion opt_ev_comment
ev_alter_on_schedule_completion opt_ev_rename_to opt_ev_sql_stmt
@@ -1292,7 +1318,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
opt_default_time_precision
case_stmt_body opt_bin_mod opt_for_system_time_clause
opt_if_exists_table_element opt_if_not_exists_table_element
- opt_recursive opt_format_xid
+ opt_recursive opt_format_xid opt_for_portion_of_time_clause
%type <object_ddl_options>
create_or_replace
@@ -1332,7 +1358,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <item>
literal insert_ident order_ident temporal_literal
- simple_ident expr sum_expr in_sum_expr
+ simple_ident expr expr_no_subselect sum_expr in_sum_expr
variable variable_aux bool_pri
predicate bit_expr parenthesized_expr
table_wild simple_expr column_default_non_parenthesized_expr udf_expr
@@ -1373,6 +1399,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
expr_list opt_udf_expr_list udf_expr_list when_list when_list_opt_else
ident_list ident_list_arg opt_expr_list
decode_when_list_oracle
+ execute_using
+ execute_params
%type <sp_cursor_stmt>
sp_cursor_stmt_lex
@@ -1406,11 +1434,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
join_table_list join_table
table_factor table_ref esc_table_ref
table_primary_ident table_primary_derived
- select_derived derived_table_list
- select_derived_union
- derived_simple_table
- derived_query_specification
- derived_table_value_constructor
+ derived_table_list table_reference_list_parens
+ nested_table_reference_list join_table_parens
+ update_table_list
%type <date_time_type> date_time_type;
%type <interval> interval
@@ -1433,6 +1459,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <lex_user> user grant_user grant_role user_or_role current_role
admin_option_for_role user_maybe_role
+%type <user_auth> opt_auth_str auth_expression auth_token
+
%type <charset>
opt_collate
charset_name
@@ -1446,14 +1474,19 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
UNDERSCORE_CHARSET
%type <select_lex> subselect
- get_select_lex get_select_lex_derived
- simple_table
query_specification
- query_term_union_not_ready
- query_term_union_ready
- query_expression_body
- select_paren_derived
table_value_constructor
+ simple_table
+ query_primary
+ query_primary_parens
+ select_into_query_specification
+
+
+%type <select_lex_unit>
+ query_specification_start
+ query_expression_body
+ query_expression
+ query_expression_unit
%type <boolfunc2creator> comp_op
@@ -1465,11 +1498,28 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
%type <virtual_column> opt_check_constraint check_constraint virtual_column_func
column_default_expr
-%type <unit_type> unit_type_decl
+
+%type <unit_operation> unit_type_decl
+
+%type <select_lock>
+ opt_procedure_or_into
+ opt_select_lock_type
+ select_lock_type
+ opt_lock_wait_timeout_new
+
+%type <select_limit> opt_limit_clause limit_clause limit_options
+
+%type <order_limit_lock>
+ query_expression_tail
+ order_or_limit
+ opt_order_limit_lock
+
+%type <select_order> opt_order_clause order_clause order_list
%type <NONE>
- analyze_stmt_command
- query verb_clause create change select do drop insert replace insert2
+ analyze_stmt_command backup backup_statements
+ query verb_clause create change select select_into
+ do drop insert replace insert2
insert_values update delete truncate rename compound_statement
show describe load alter optimize keycache preload flush
reset purge begin_stmt_mariadb commit rollback savepoint release
@@ -1485,7 +1535,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
assign_to_keycache_parts
preload_list preload_list_or_parts preload_keys preload_keys_parts
select_item_list select_item values_list no_braces
- opt_limit_clause delete_limit_clause fields opt_values values
+ delete_limit_clause fields opt_values values
no_braces_with_names opt_values_with_names values_with_names
procedure_list procedure_list2 procedure_item
field_def handler opt_generated_always
@@ -1506,18 +1556,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
table_to_table_list table_to_table opt_table_list opt_as
handler_rkey_function handler_read_or_scan
single_multi table_wild_list table_wild_one opt_wild
- union_clause union_list
- subselect_start opt_and charset
- subselect_end select_var_list select_var_list_init help
+ opt_and charset
+ select_var_list select_var_list_init help
opt_extended_describe shutdown
opt_format_json
- prepare prepare_src execute deallocate
+ prepare execute deallocate
statement
sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa
opt_field_or_var_spec fields_or_vars opt_load_data_set_spec
view_list_opt view_list view_select
- trigger_tail sp_tail sf_tail event_tail
- udf_tail create_function_tail
+ trigger_tail event_tail
install uninstall partition_entry binlog_base64_event
normal_key_options normal_key_opts all_key_opt
spatial_key_options fulltext_key_options normal_key_opt
@@ -1526,6 +1574,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
key_using_alg
part_column_list
period_for_system_time
+ period_for_application_time
server_def server_options_list server_option
definer_opt no_definer definer get_diagnostics
parse_vcol_expr vcol_opt_specifier vcol_opt_attribute
@@ -1536,7 +1585,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize);
opt_delete_gtid_domain
asrow_attribute
set_assign
- sf_tail_standalone
sp_tail_standalone
opt_constraint_no_id
END_OF_INPUT
@@ -1562,6 +1610,7 @@ END_OF_INPUT
%type <plsql_cursor_attr> plsql_cursor_attr
%type <sp_suid> sp_suid
+%type <sp_aggregate_type> opt_aggregate
%type <num> sp_decl_idents sp_decl_idents_init_vars
%type <num> sp_handler_type sp_hcond_list
@@ -1661,8 +1710,8 @@ rule: <-- starts at col 1
query:
END_OF_INPUT
{
- if (likely(!thd->bootstrap) &&
- unlikely(!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT)))
+ if (!thd->bootstrap &&
+ (!(thd->lex->lex_options & OPTION_LEX_FOUND_COMMENT)))
my_yyabort_error((ER_EMPTY_QUERY, MYF(0)));
thd->lex->sql_command= SQLCOM_EMPTY_QUERY;
@@ -1716,6 +1765,7 @@ statement:
alter
| analyze
| analyze_stmt_command
+ | backup
| binlog_base64_event
| call
| change
@@ -1758,6 +1808,7 @@ statement:
| rollback
| savepoint
| select
+ | select_into
| set
| set_assign
| signal_stmt
@@ -1776,9 +1827,7 @@ statement:
deallocate:
deallocate_or_drop PREPARE_SYM ident
{
- LEX *lex= thd->lex;
- lex->sql_command= SQLCOM_DEALLOCATE_PREPARE;
- lex->prepared_stmt_name= $3;
+ Lex->stmt_deallocate_prepare($3);
}
;
@@ -1788,72 +1837,59 @@ deallocate_or_drop:
;
prepare:
- PREPARE_SYM ident FROM prepare_src
+ PREPARE_SYM ident FROM expr_no_subselect
{
- LEX *lex= thd->lex;
- if (unlikely(lex->table_or_sp_used()))
- my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
- "PREPARE..FROM"));
- lex->sql_command= SQLCOM_PREPARE;
- lex->prepared_stmt_name= $2;
+ if (Lex->stmt_prepare($2, $4))
+ MYSQL_YYABORT;
}
;
-prepare_src:
+expr_no_subselect:
{ Lex->expr_allows_subselect= false; }
expr
{
- Lex->prepared_stmt_code= $2;
Lex->expr_allows_subselect= true;
+ $$= $2;
}
;
execute:
- EXECUTE_SYM ident
+ EXECUTE_SYM ident execute_using
{
- LEX *lex= thd->lex;
- lex->sql_command= SQLCOM_EXECUTE;
- lex->prepared_stmt_name= $2;
+ if (Lex->stmt_execute($2, $3))
+ MYSQL_YYABORT;
}
- execute_using
- {}
- | EXECUTE_SYM IMMEDIATE_SYM prepare_src
+ | EXECUTE_SYM IMMEDIATE_SYM expr_no_subselect execute_using
{
- if (unlikely(Lex->table_or_sp_used()))
- my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
- "EXECUTE IMMEDIATE"));
- Lex->sql_command= SQLCOM_EXECUTE_IMMEDIATE;
+ if (Lex->stmt_execute_immediate($3, $4))
+ MYSQL_YYABORT;
}
- execute_using
- {}
;
execute_using:
- /* nothing */
+ /* nothing */ { $$= NULL; }
| USING { Lex->expr_allows_subselect= false; }
- execute_var_list
+ execute_params
{
- if (unlikely(Lex->table_or_sp_used()))
- my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0),
- "EXECUTE..USING"));
+ $$= $3;
Lex->expr_allows_subselect= true;
}
;
-execute_var_list:
- execute_var_list ',' execute_var_ident
- | execute_var_ident
- ;
-
-execute_var_ident:
+execute_params:
expr_or_default
{
- if (unlikely(Lex->prepared_stmt_params.push_back($1,
- thd->mem_root)))
+ if (unlikely(!($$= List<Item>::make(thd->mem_root, $1))))
+ MYSQL_YYABORT;
+ }
+ | execute_params ',' expr_or_default
+ {
+ if (($$= $1)->push_back($3, thd->mem_root))
MYSQL_YYABORT;
}
;
+
/* help */
help:
@@ -2112,17 +2148,22 @@ connection_name:
/* create a table */
create:
- create_or_replace opt_temporary TABLE_SYM opt_if_not_exists table_ident
+ create_or_replace opt_temporary TABLE_SYM opt_if_not_exists
{
LEX *lex= thd->lex;
lex->create_info.init();
- if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2,
- $1 | $4)))
+ if (lex->main_select_push())
+ MYSQL_YYABORT;
+ lex->current_select->parsing_place= BEFORE_OPT_LIST;
+ if (lex->set_command_with_check(SQLCOM_CREATE_TABLE, $2, $1 | $4))
MYSQL_YYABORT;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_UPDATING,
- TL_WRITE,
- MDL_EXCLUSIVE)))
+ }
+ table_ident
+ {
+ LEX *lex= thd->lex;
+ if (!lex->first_select_lex()->
+ add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING,
+ TL_WRITE, MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
lex->alter_info.reset();
/*
@@ -2137,7 +2178,6 @@ create:
create_body
{
LEX *lex= thd->lex;
- lex->current_select= &lex->select_lex;
if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
!lex->create_info.db_type)
{
@@ -2146,22 +2186,24 @@ create:
ER_WARN_USING_OTHER_HANDLER,
ER_THD(thd, ER_WARN_USING_OTHER_HANDLER),
hton_name(lex->create_info.db_type)->str,
- $5->table.str);
+ $6->table.str);
}
create_table_set_open_action_and_adjust_tables(lex);
+ Lex->pop_select(); //main select
}
| create_or_replace opt_temporary SEQUENCE_SYM opt_if_not_exists table_ident
{
LEX *lex= thd->lex;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
lex->create_info.init();
if (unlikely(lex->set_command_with_check(SQLCOM_CREATE_SEQUENCE, $2,
$1 | $4)))
MYSQL_YYABORT;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_UPDATING,
- TL_WRITE,
- MDL_EXCLUSIVE)))
+ if (!lex->first_select_lex()->
+ add_table_to_list(thd, $5, NULL, TL_OPTION_UPDATING,
+ TL_WRITE, MDL_EXCLUSIVE))
MYSQL_YYABORT;
/*
@@ -2184,8 +2226,9 @@ create:
if (unlikely(lex->create_info.seq_create_info->check_and_adjust(1)))
{
my_error(ER_SEQUENCE_INVALID_DATA, MYF(0),
- lex->select_lex.table_list.first->db.str,
- lex->select_lex.table_list.first->table_name.str);
+ lex->first_select_lex()->table_list.first->db.str,
+ lex->first_select_lex()->table_list.first->
+ table_name.str);
MYSQL_YYABORT;
}
@@ -2198,10 +2241,8 @@ create:
Lex->create_info.used_fields|= HA_CREATE_USED_SEQUENCE;
Lex->create_info.sequence= 1;
- lex->current_select= &lex->select_lex;
- if (unlikely((lex->create_info.used_fields &
- HA_CREATE_USED_ENGINE) &&
- !lex->create_info.db_type))
+ if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
+ !lex->create_info.db_type)
{
lex->create_info.use_default_db_type(thd);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
@@ -2211,44 +2252,69 @@ create:
$5->table.str);
}
create_table_set_open_action_and_adjust_tables(lex);
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace opt_unique INDEX_SYM opt_if_not_exists
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
- | create_or_replace opt_unique INDEX_SYM opt_if_not_exists ident
+ ident
opt_key_algorithm_clause
ON table_ident
{
- if (unlikely(Lex->add_create_index_prepare($8)))
+ if (Lex->add_create_index_prepare($9))
MYSQL_YYABORT;
- if (unlikely(Lex->add_create_index($2, &$5, $6, $1 | $4)))
+ if (Lex->add_create_index($2, &$6, $7, $1 | $4))
MYSQL_YYABORT;
}
'(' key_list ')' opt_lock_wait_timeout normal_key_options
- opt_index_lock_algorithm { }
- | create_or_replace fulltext INDEX_SYM opt_if_not_exists ident
+ opt_index_lock_algorithm
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace fulltext INDEX_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ opt_if_not_exists ident
ON table_ident
{
- if (unlikely(Lex->add_create_index_prepare($7)))
+ if (Lex->add_create_index_prepare($8))
MYSQL_YYABORT;
- if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF,
- $1 | $4)))
+ if (Lex->add_create_index($2, &$6, HA_KEY_ALG_UNDEF, $1 | $5))
MYSQL_YYABORT;
}
'(' key_list ')' opt_lock_wait_timeout fulltext_key_options
- opt_index_lock_algorithm { }
- | create_or_replace spatial INDEX_SYM opt_if_not_exists ident
+ opt_index_lock_algorithm
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace spatial INDEX_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ opt_if_not_exists ident
ON table_ident
{
- if (unlikely(Lex->add_create_index_prepare($7)))
+ if (Lex->add_create_index_prepare($8))
MYSQL_YYABORT;
- if (unlikely(Lex->add_create_index($2, &$5, HA_KEY_ALG_UNDEF,
- $1 | $4)))
+ if (Lex->add_create_index($2, &$6, HA_KEY_ALG_UNDEF, $1 | $5))
MYSQL_YYABORT;
}
'(' key_list ')' opt_lock_wait_timeout spatial_key_options
- opt_index_lock_algorithm { }
+ opt_index_lock_algorithm
+ {
+ Lex->pop_select(); //main select
+ }
| create_or_replace DATABASE opt_if_not_exists ident
{
Lex->create_info.default_table_charset= NULL;
Lex->create_info.used_fields= 0;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
opt_create_database_options
{
@@ -2257,54 +2323,125 @@ create:
$1 | $3)))
MYSQL_YYABORT;
lex->name= $4;
+ Lex->pop_select(); //main select
}
| create_or_replace definer_opt opt_view_suid VIEW_SYM
opt_if_not_exists table_ident
{
- if (unlikely(Lex->add_create_view(thd, $1 | $5,
- DTYPE_ALGORITHM_UNDEFINED, $3,
- $6)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_create_view(thd, $1 | $5,
+ DTYPE_ALGORITHM_UNDEFINED, $3, $6))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- { }
+ {
+ Lex->pop_select(); //main select
+ }
| create_or_replace view_algorithm definer_opt opt_view_suid VIEW_SYM
opt_if_not_exists table_ident
{
- if (unlikely(Lex->add_create_view(thd, $1 | $6, $2, $4, $7)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_create_view(thd, $1 | $6, $2, $4, $7))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- { }
+ {
+ Lex->pop_select(); //main select
+ }
| create_or_replace definer_opt TRIGGER_SYM
- { Lex->create_info.set($1); }
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ Lex->create_info.set($1);
+ }
trigger_tail
- { }
- | create_or_replace definer_opt PROCEDURE_SYM
- { Lex->create_info.set($1); }
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace definer_opt PROCEDURE_SYM opt_if_not_exists
+ {
+ if (Lex->stmt_create_procedure_start($1 | $4))
+ MYSQL_YYABORT;
+ }
sp_tail_standalone
- { }
+ {
+ Lex->stmt_create_routine_finalize();
+ }
| create_or_replace definer_opt EVENT_SYM
- { Lex->create_info.set($1); }
- event_tail
- { }
- | create_or_replace definer FUNCTION_SYM
- { Lex->create_info.set($1); }
- sf_tail_standalone
- { }
- | create_or_replace no_definer FUNCTION_SYM
- { Lex->create_info.set($1); }
- create_function_tail
- { }
- | create_or_replace no_definer AGGREGATE_SYM FUNCTION_SYM
{
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
Lex->create_info.set($1);
- Lex->udf.type= UDFTYPE_AGGREGATE;
}
- udf_tail
- { }
- | create_or_replace USER_SYM opt_if_not_exists clear_privileges grant_list
- opt_require_clause opt_resource_options
+ event_tail
+ {
+ Lex->pop_select(); //main select
+ }
+ | create_or_replace definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ sp_name RETURN_ORACLE_SYM
+ {
+ if (Lex->stmt_create_stored_function_start($1 | $5, $3, $6))
+ MYSQL_YYABORT;
+ }
+ sf_return_type
+ sf_c_chistics_and_body_standalone
+ opt_sp_name
+ {
+ if (Lex->stmt_create_stored_function_finalize_standalone($11))
+ MYSQL_YYABORT;
+ }
+ | create_or_replace definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ sp_name '('
+ {
+ if (Lex->stmt_create_stored_function_start($1 | $5, $3, $6))
+ MYSQL_YYABORT;
+ }
+ sp_fdparam_list ')'
+ RETURN_ORACLE_SYM sf_return_type
+ sf_c_chistics_and_body_standalone
+ opt_sp_name
+ {
+ if (Lex->stmt_create_stored_function_finalize_standalone($14))
+ MYSQL_YYABORT;
+ }
+ | create_or_replace no_definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ sp_name RETURN_ORACLE_SYM
+ {
+ if (Lex->stmt_create_stored_function_start($1 | $5, $3, $6))
+ MYSQL_YYABORT;
+ }
+ sf_return_type
+ sf_c_chistics_and_body_standalone
+ opt_sp_name
+ {
+ if (Lex->stmt_create_stored_function_finalize_standalone($11))
+ MYSQL_YYABORT;
+ }
+ | create_or_replace no_definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ sp_name '('
+ {
+ if (Lex->stmt_create_stored_function_start($1 | $5, $3, $6))
+ MYSQL_YYABORT;
+ }
+ sp_fdparam_list ')'
+ RETURN_ORACLE_SYM sf_return_type
+ sf_c_chistics_and_body_standalone
+ opt_sp_name
+ {
+ if (Lex->stmt_create_stored_function_finalize_standalone($14))
+ MYSQL_YYABORT;
+ }
+ | create_or_replace no_definer opt_aggregate FUNCTION_SYM opt_if_not_exists
+ ident RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
+ {
+ if (Lex->stmt_create_udf_function($1 | $5, $3, $6,
+ (Item_result) $8, $10))
+ MYSQL_YYABORT;
+ }
+ | create_or_replace USER_SYM opt_if_not_exists clear_privileges
+ grant_list opt_require_clause opt_resource_options opt_account_locking opt_password_expiration
{
if (unlikely(Lex->set_command_with_check(SQLCOM_CREATE_USER,
$1 | $3)))
@@ -2340,7 +2477,7 @@ create:
&sp_handler_package_spec,
$5, $1 | $4))))
MYSQL_YYABORT;
- pkg->set_chistics(Lex->sp_chistics);
+ pkg->set_c_chistics(Lex->sp_chistics);
}
opt_package_specification_element_list END
remember_end_opt opt_sp_name
@@ -2360,7 +2497,7 @@ create:
&sp_handler_package_body,
$6, $1 | $5))))
MYSQL_YYABORT;
- pkg->set_chistics(Lex->sp_chistics);
+ pkg->set_c_chistics(Lex->sp_chistics);
Lex->sp_block_init(thd);
}
package_implementation_declare_section
@@ -2437,13 +2574,14 @@ package_specification_function:
MYSQL_YYABORT;
thd->lex= $2;
if (unlikely(!$2->make_sp_head_no_recursive(thd, spname,
- &sp_handler_package_function)))
+ &sp_handler_package_function,
+ NOT_AGGREGATE)))
MYSQL_YYABORT;
$1->sphead->get_package()->m_current_routine= $2;
(void) is_native_function_with_warn(thd, &$3);
}
opt_sp_parenthesized_fdparam_list
- sf_return_type
+ RETURN_ORACLE_SYM sf_return_type
sp_c_chistics
{
sp_head *sp= thd->lex->sphead;
@@ -2463,7 +2601,8 @@ package_specification_procedure:
MYSQL_YYABORT;
thd->lex= $2;
if (unlikely(!$2->make_sp_head_no_recursive(thd, spname,
- &sp_handler_package_procedure)))
+ &sp_handler_package_procedure,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
$1->sphead->get_package()->m_current_routine= $2;
}
@@ -2513,11 +2652,6 @@ package_implementation_function_body:
}
sp_body opt_package_routine_end_name
{
- if (unlikely(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR))
- {
- my_yyabort_error((ER_NOT_AGGREGATE_FUNCTION, MYF(0)));
- }
- Lex->sphead->set_chistics_agg_type(NOT_AGGREGATE);
if (unlikely(thd->lex->sp_body_finalize_function(thd) ||
thd->lex->sphead->check_package_routine_end_name($5)))
MYSQL_YYABORT;
@@ -2537,7 +2671,7 @@ package_implementation_procedure_body:
sp_body opt_package_routine_end_name
{
if (unlikely(thd->lex->sp_body_finalize_procedure(thd) ||
- thd->lex->sphead->check_package_routine_end_name($5)))
+ thd->lex->sphead->check_package_routine_end_name($5)))
MYSQL_YYABORT;
thd->lex= $2;
}
@@ -2575,10 +2709,6 @@ package_specification_element:
}
;
-create_function_tail:
- sf_tail_standalone { }
- | udf_tail { Lex->udf.type= UDFTYPE_FUNCTION; }
- ;
opt_sequence:
/* empty */ { }
@@ -2900,20 +3030,17 @@ ev_sql_stmt:
if (unlikely(!lex->make_sp_head(thd,
lex->event_parse_data->identifier,
- &sp_handler_procedure)))
+ &sp_handler_procedure,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
lex->sphead->set_body_start(thd, lip->get_cpp_ptr());
}
sp_proc_stmt
{
- LEX *lex= thd->lex;
-
/* return back to the original memory root ASAP */
- lex->sphead->set_stmt_end(thd);
- lex->sphead->restore_thd_mem_root(thd);
-
- lex->event_parse_data->body_changed= TRUE;
+ if (Lex->sp_body_finalize_event(thd))
+ MYSQL_YYABORT;
}
;
@@ -2925,13 +3052,16 @@ clear_privileges:
lex->columns.empty();
lex->grant= lex->grant_tot_col= 0;
lex->all_privileges= 0;
- lex->select_lex.db= null_clex_str;
- lex->ssl_type= SSL_TYPE_NOT_SPECIFIED;
- lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0;
- bzero((char *)&(lex->mqh),sizeof(lex->mqh));
+ lex->first_select_lex()->db= null_clex_str;
+ lex->account_options.reset();
}
;
+opt_aggregate:
+ /* Empty */ { $$= NOT_AGGREGATE; }
+ | AGGREGATE_SYM { $$= GROUP_AGGREGATE; }
+ ;
+
sp_name:
ident '.' ident
{
@@ -3045,7 +3175,18 @@ sp_cparams:
/* Stored FUNCTION parameter declaration list */
sp_fdparam_list:
/* Empty */
- | sp_fdparams
+ {
+ Lex->sphead->m_param_begin= YYLIP->get_cpp_tok_start();
+ Lex->sphead->m_param_end= Lex->sphead->m_param_begin;
+ }
+ |
+ {
+ Lex->sphead->m_param_begin= YYLIP->get_cpp_tok_start();
+ }
+ sp_fdparams
+ {
+ Lex->sphead->m_param_end= YYLIP->get_cpp_tok_start();
+ }
;
sp_fdparams:
@@ -3152,18 +3293,6 @@ sp_opt_inout:
| IN_SYM OUT_SYM { $$= sp_variable::MODE_INOUT; }
;
-sp_parenthesized_fdparam_list:
- '('
- {
- Lex->sphead->m_param_begin= YYLIP->get_cpp_tok_start() + 1;
- }
- sp_fdparam_list
- ')'
- {
- Lex->sphead->m_param_end= YYLIP->get_cpp_tok_start();
- }
- ;
-
sp_parenthesized_pdparam_list:
'('
{
@@ -3186,7 +3315,7 @@ sp_no_param:
opt_sp_parenthesized_fdparam_list:
sp_no_param
- | sp_parenthesized_fdparam_list
+ | '(' sp_fdparam_list ')'
;
opt_sp_parenthesized_pdparam_list:
@@ -3563,7 +3692,7 @@ raise_stmt_oracle:
signal_stmt:
SIGNAL_SYM signal_value opt_set_signal_information
{
- if (unlikely(Lex->add_signal_statement(thd, $2)))
+ if (Lex->add_signal_statement(thd, $2))
MYSQL_YYABORT;
}
;
@@ -3749,6 +3878,7 @@ statement_information_item:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
+ ;
simple_target_specification:
ident_cli
@@ -3805,6 +3935,7 @@ condition_information_item:
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
+ ;
condition_information_item_name:
CLASS_ORIGIN_SYM
@@ -3947,44 +4078,8 @@ sp_proc_stmt_statement:
}
sp_statement
{
- LEX *lex= thd->lex;
- Lex_input_stream *lip= YYLIP;
- sp_head *sp= lex->sphead;
-
- sp->m_flags|= sp_get_flags_for_command(lex);
- /* "USE db" doesn't work in a procedure */
- if (unlikely(lex->sql_command == SQLCOM_CHANGE_DB))
- my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "USE"));
- /*
- Don't add an instruction for SET statements, since all
- instructions for them were already added during processing
- of "set" rule.
- */
- DBUG_ASSERT(lex->sql_command != SQLCOM_SET_OPTION ||
- lex->var_list.is_empty());
- if (lex->sql_command != SQLCOM_SET_OPTION)
- {
- sp_instr_stmt *i=new (thd->mem_root)
- sp_instr_stmt(sp->instructions(), lex->spcont, lex);
- if (unlikely(i == NULL))
- MYSQL_YYABORT;
-
- /*
- Extract the query statement from the tokenizer. The
- end is either lex->ptr, if there was no lookahead,
- lex->tok_end otherwise.
- */
- if (yychar == YYEMPTY)
- i->m_query.length= lip->get_ptr() - sp->m_tmp_query;
- else
- i->m_query.length= lip->get_tok_start() - sp->m_tmp_query;;
- if (unlikely(!(i->m_query.str= strmake_root(thd->mem_root,
- sp->m_tmp_query,
- i->m_query.length))) ||
- unlikely(sp->add_instr(i)))
- MYSQL_YYABORT;
- }
- if (unlikely(sp->restore_lex(thd)))
+ if (Lex->sp_proc_stmt_statement_finalize(thd, yychar == YYEMPTY) ||
+ Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
;
@@ -4018,7 +4113,9 @@ sp_proc_stmt_return:
;
reset_lex_expr:
- { Lex->sphead->reset_lex(thd); } expr { $$= $2; }
+ { Lex->sphead->reset_lex(thd); }
+ expr
+ { $$= $2; }
;
sp_proc_stmt_exit_oracle:
@@ -4034,14 +4131,14 @@ sp_proc_stmt_exit_oracle:
}
| EXIT_ORACLE_SYM WHEN_SYM reset_lex_expr
{
- if (unlikely(Lex->sp_exit_statement(thd, $3)) ||
- unlikely(Lex->sphead->restore_lex(thd)))
+ if (Lex->sp_exit_statement(thd, $3) ||
+ Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
| EXIT_ORACLE_SYM label_ident WHEN_SYM reset_lex_expr
{
- if (unlikely(Lex->sp_exit_statement(thd, &$2, $4)) ||
- unlikely(Lex->sphead->restore_lex(thd)))
+ if (Lex->sp_exit_statement(thd, &$2, $4) ||
+ Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
;
@@ -4059,14 +4156,14 @@ sp_proc_stmt_continue_oracle:
}
| CONTINUE_ORACLE_SYM WHEN_SYM reset_lex_expr
{
- if (unlikely(Lex->sp_continue_statement(thd, $3)) ||
- unlikely(Lex->sphead->restore_lex(thd)))
+ if (Lex->sp_continue_statement(thd, $3) ||
+ Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
| CONTINUE_ORACLE_SYM label_ident WHEN_SYM reset_lex_expr
{
- if (unlikely(Lex->sp_continue_statement(thd, &$2, $4)) ||
- unlikely(Lex->sphead->restore_lex(thd)))
+ if (Lex->sp_continue_statement(thd, &$2, $4) ||
+ Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
;
@@ -4125,7 +4222,7 @@ assignment_source_expr:
$$->sp_lex_in_use= true;
$$->set_item_and_free_list($3, thd->free_list);
thd->free_list= NULL;
- if (unlikely($$->sphead->restore_lex(thd)))
+ if ($$->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
;
@@ -4134,6 +4231,7 @@ for_loop_bound_expr:
assignment_source_lex
{
Lex->sphead->reset_lex(thd, $1);
+ Lex->current_select->parsing_place= FOR_LOOP_BOUND;
}
expr
{
@@ -4143,6 +4241,7 @@ for_loop_bound_expr:
$$->set_item_and_free_list($3, NULL);
if (unlikely($$->sphead->restore_lex(thd)))
MYSQL_YYABORT;
+ Lex->current_select->parsing_place= NO_MATTER;
}
;
@@ -4192,7 +4291,12 @@ sp_proc_stmt_fetch_head:
;
sp_proc_stmt_fetch:
- sp_proc_stmt_fetch_head sp_fetch_list { }
+ sp_proc_stmt_fetch_head sp_fetch_list { }
+ | FETCH_SYM GROUP_SYM NEXT_SYM ROW_SYM
+ {
+ if (unlikely(Lex->sp_add_agg_cfetch()))
+ MYSQL_YYABORT;
+ }
;
sp_proc_stmt_close:
@@ -4364,7 +4468,8 @@ case_stmt_body:
{
if (unlikely(Lex->case_stmt_action_expr($2)))
MYSQL_YYABORT;
- if (unlikely(Lex->sphead->restore_lex(thd)))
+
+ if (Lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
simple_when_clause_list
@@ -4653,7 +4758,7 @@ while_body:
LEX *lex= Lex;
if (unlikely(lex->sp_while_loop_expression(thd, $1)))
MYSQL_YYABORT;
- if (unlikely(lex->sphead->restore_lex(thd)))
+ if (lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
}
sp_proc_stmts1 END LOOP_SYM
@@ -4676,7 +4781,7 @@ repeat_body:
if (unlikely(i == NULL) ||
unlikely(lex->sphead->add_instr(i)))
MYSQL_YYABORT;
- if (unlikely(lex->sphead->restore_lex(thd)))
+ if (lex->sphead->restore_lex(thd))
MYSQL_YYABORT;
/* We can shortcut the cont_backpatch here */
i->m_cont_dest= ip+1;
@@ -5156,26 +5261,16 @@ size_number:
*/
create_body:
- '(' create_field_list ')'
+ create_field_list_parens
{ Lex->create_info.option_list= NULL; }
opt_create_table_options opt_create_partitioning opt_create_select {}
| opt_create_table_options opt_create_partitioning opt_create_select {}
- /*
- the following rule is redundant, but there's a shift/reduce
- conflict that prevents the rule above from parsing a syntax like
- CREATE TABLE t1 (SELECT 1);
- */
- | '(' create_select_query_specification ')'
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_list {}
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_order_or_limit {}
| create_like
{
Lex->create_info.add(DDL_options_st::OPT_LIKE);
- TABLE_LIST *src_table= Lex->select_lex.add_table_to_list(thd,
- $1, NULL, 0, TL_READ, MDL_SHARED_READ);
+ TABLE_LIST *src_table= Lex->first_select_lex()->
+ add_table_to_list(thd, $1, NULL, 0, TL_READ, MDL_SHARED_READ);
if (unlikely(! src_table))
MYSQL_YYABORT;
/* CREATE TABLE ... LIKE is not allowed for views. */
@@ -5185,7 +5280,7 @@ create_body:
create_like:
LIKE table_ident { $$= $2; }
- | '(' LIKE table_ident ')' { $$= $3; }
+ | LEFT_PAREN_LIKE LIKE table_ident ')' { $$= $3; }
;
opt_create_select:
@@ -5194,23 +5289,19 @@ opt_create_select:
;
create_select_query_expression:
- opt_with_clause SELECT_SYM create_select_part2 opt_table_expression
- create_select_part4
- {
- Select->set_braces(0);
- Select->set_with_clause($1);
+ query_expression
+ {
+ if (Lex->parsed_insert_select($1->first_select()))
+ MYSQL_YYABORT;
}
- union_clause
- | opt_with_clause SELECT_SYM create_select_part2
- create_select_part3_union_not_ready create_select_part4
+ | LEFT_PAREN_WITH with_clause query_expression_body ')'
{
- Select->set_with_clause($1);
+ SELECT_LEX *first_select= $3->first_select();
+ $3->set_with_clause($2);
+ $2->attach_to(first_select);
+ if (Lex->parsed_insert_select(first_select))
+ MYSQL_YYABORT;
}
- | '(' create_select_query_specification ')'
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_list {}
- | '(' create_select_query_specification ')'
- { Select->set_braces(1);} union_order_or_limit {}
;
opt_create_partitioning:
@@ -5298,8 +5389,13 @@ partition_entry:
We enter here when opening the frm file to translate
partition info string into part_info data structure.
*/
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ partition
+ {
+ Lex->pop_select(); //main select
}
- partition {}
;
partition:
@@ -5910,7 +6006,7 @@ opt_versioning_rotation:
| INTERVAL_SYM expr interval opt_versioning_interval_start
{
partition_info *part_info= Lex->part_info;
- if (unlikely(part_info->vers_set_interval($2, $3, $4)))
+ if (unlikely(part_info->vers_set_interval(thd, $2, $3, $4)))
{
my_error(ER_PART_WRONG_VALUE, MYF(0),
Lex->create_last_non_select_table->table_name.str,
@@ -5953,56 +6049,6 @@ opt_versioning_interval_start:
End of partition parser part
*/
-create_select_query_specification:
- opt_with_clause SELECT_SYM create_select_part2 create_select_part3
- create_select_part4
- {
- Select->set_with_clause($1);
- }
- ;
-
-create_select_part2:
- {
- LEX *lex=Lex;
- if (lex->sql_command == SQLCOM_INSERT)
- lex->sql_command= SQLCOM_INSERT_SELECT;
- else if (lex->sql_command == SQLCOM_REPLACE)
- lex->sql_command= SQLCOM_REPLACE_SELECT;
- /*
- The following work only with the local list, the global list
- is created correctly in this case
- */
- lex->current_select->table_list.save_and_clear(&lex->save_list);
- mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LIST;
- }
- select_options select_item_list
- {
- Select->parsing_place= NO_MATTER;
- }
- ;
-
-create_select_part3:
- opt_table_expression
- | create_select_part3_union_not_ready
- ;
-
-create_select_part3_union_not_ready:
- table_expression order_or_limit
- | order_or_limit
- ;
-
-create_select_part4:
- opt_select_lock_type
- {
- /*
- The following work only with the local list, the global list
- is created correctly in this case
- */
- Lex->current_select->table_list.push_front(&Lex->save_list);
- }
- ;
-
opt_as:
/* empty */ {}
| AS {}
@@ -6220,7 +6266,7 @@ create_table_option:
}
| UNION_SYM opt_equal
{
- Lex->select_lex.table_list.save_and_clear(&Lex->save_list);
+ Lex->first_select_lex()->table_list.save_and_clear(&Lex->save_list);
}
'(' opt_table_list ')'
{
@@ -6229,8 +6275,8 @@ create_table_option:
from the global list.
*/
LEX *lex=Lex;
- lex->create_info.merge_list= lex->select_lex.table_list.first;
- lex->select_lex.table_list= lex->save_list;
+ lex->create_info.merge_list= lex->first_select_lex()->table_list.first;
+ lex->first_select_lex()->table_list= lex->save_list;
/*
When excluding union list from the global list we assume that
elements of the former immediately follow elements which represent
@@ -6431,6 +6477,13 @@ create_field_list:
}
;
+create_field_list_parens:
+ LEFT_PAREN_ALT field_list ')'
+ {
+ Lex->create_last_non_select_table= Lex->last_table();
+ }
+ ;
+
field_list:
field_list_item
| field_list ',' field_list_item
@@ -6441,6 +6494,7 @@ field_list_item:
| key_def
| constraint_def
| period_for_system_time
+ | PERIOD_SYM period_for_application_time { }
;
column_def:
@@ -6537,7 +6591,7 @@ key_def:
constraint_def:
opt_constraint check_constraint
{
- Lex->add_constraint(&$1, $2, FALSE);
+ Lex->add_constraint($1, $2, FALSE);
}
;
@@ -6546,7 +6600,15 @@ period_for_system_time:
PERIOD_SYM FOR_SYSTEM_TIME_SYM '(' ident ',' ident ')'
{
Vers_parse_info &info= Lex->vers_get_info();
- info.set_system_time($4, $6);
+ info.set_period($4, $6);
+ }
+ ;
+
+period_for_application_time:
+ FOR_SYM ident '(' ident ',' ident ')'
+ {
+ if (Lex->add_period($2, $4, $6))
+ MYSQL_YYABORT;
}
;
@@ -6730,6 +6792,8 @@ parse_vcol_expr:
Prevent the end user from invoking this command.
*/
MYSQL_YYABORT_UNLESS(Lex->parse_vcol_expr);
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
expr
{
@@ -6737,14 +6801,15 @@ parse_vcol_expr:
if (unlikely(!v))
MYSQL_YYABORT;
Lex->last_field->vcol_info= v;
+ Lex->pop_select(); //main select
}
;
parenthesized_expr:
- subselect
+ remember_tok_start
+ query_expression
{
- $$= new (thd->mem_root) Item_singlerow_subselect(thd, $1);
- if (unlikely($$ == NULL))
+ if (!($$= Lex->create_item_query_expression(thd, $1, $2)))
MYSQL_YYABORT;
}
| expr
@@ -7043,7 +7108,7 @@ field_type_lob:
| JSON_SYM
{
Lex->charset= &my_charset_utf8mb4_bin;
- $$.set(&type_handler_long_blob);
+ $$.set(&type_handler_json_longtext);
}
;
@@ -7144,11 +7209,12 @@ field_length:
opt_field_length:
/* empty */ { $$= (char*) 0; /* use default length */ }
| field_length { $$= $1; }
+ ;
opt_field_length_default_1:
/* empty */ { $$= (char*) "1"; }
| field_length { $$= $1; }
-
+ ;
/*
In sql_mode=ORACLE, real size of VARCHAR and CHAR with no length
@@ -7170,10 +7236,12 @@ opt_field_length_default_1:
opt_field_length_default_sp_param_varchar:
/* empty */ { $$.set("4000", "4000"); }
| field_length { $$.set($1, NULL); }
+ ;
opt_field_length_default_sp_param_char:
/* empty */ { $$.set("2000", "2000"); }
| field_length { $$.set($1, NULL); }
+ ;
opt_precision:
/* empty */ { $$.set(0, 0); }
@@ -7647,12 +7715,14 @@ fulltext_key_opts:
opt_USING_key_algorithm:
/* Empty*/ { $$= HA_KEY_ALG_UNDEF; }
| USING btree_or_rtree { $$= $2; }
+ ;
/* TYPE is a valid identifier, so it's handled differently than USING */
opt_key_algorithm_clause:
/* Empty*/ { $$= HA_KEY_ALG_UNDEF; }
| USING btree_or_rtree { $$= $2; }
| TYPE_SYM btree_or_rtree { $$= $2; }
+ ;
key_using_alg:
USING btree_or_rtree
@@ -7775,23 +7845,25 @@ alter:
Lex->name= null_clex_str;
Lex->table_type= TABLE_TYPE_UNKNOWN;
Lex->sql_command= SQLCOM_ALTER_TABLE;
- Lex->duplicates= DUP_ERROR;
- Lex->select_lex.init_order();
+ Lex->duplicates= DUP_ERROR;
+ Lex->first_select_lex()->order_list.empty();
Lex->create_info.init();
Lex->create_info.row_type= ROW_TYPE_NOT_USED;
Lex->alter_info.reset();
Lex->no_write_to_binlog= 0;
Lex->create_info.storage_media= HA_SM_DEFAULT;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
DBUG_ASSERT(!Lex->m_sql_cmd);
}
alter_options TABLE_SYM table_ident opt_lock_wait_timeout
{
- if (unlikely(!Lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_UPDATING,
- TL_READ_NO_INSERT,
- MDL_SHARED_UPGRADABLE)))
+ if (!Lex->first_select_lex()->
+ add_table_to_list(thd, $5, NULL, TL_OPTION_UPDATING,
+ TL_READ_NO_INSERT, MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
- Lex->select_lex.db= (Lex->select_lex.table_list.first)->db;
+ Lex->first_select_lex()->db=
+ (Lex->first_select_lex()->table_list.first)->db;
Lex->create_last_non_select_table= Lex->last_table();
}
alter_commands
@@ -7803,11 +7875,14 @@ alter:
if (unlikely(Lex->m_sql_cmd == NULL))
MYSQL_YYABORT;
}
+ Lex->pop_select(); //main select
}
| ALTER DATABASE ident_or_empty
{
Lex->create_info.default_table_charset= NULL;
Lex->create_info.used_fields= 0;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
create_database_options
{
@@ -7817,6 +7892,7 @@ alter:
if (lex->name.str == NULL &&
unlikely(lex->copy_db_to(&lex->name)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
}
| ALTER DATABASE ident UPGRADE_SYM DATA_SYM DIRECTORY_SYM NAME_SYM
{
@@ -7832,6 +7908,8 @@ alter:
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"));
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
lex->sp_chistics.init();
}
sp_a_chistics
@@ -7840,6 +7918,9 @@ alter:
lex->sql_command= SQLCOM_ALTER_PROCEDURE;
lex->spname= $3;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
| ALTER FUNCTION_SYM sp_name
{
@@ -7847,6 +7928,8 @@ alter:
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"));
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
lex->sp_chistics.init();
}
sp_a_chistics
@@ -7855,14 +7938,23 @@ alter:
lex->sql_command= SQLCOM_ALTER_FUNCTION;
lex->spname= $3;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
| ALTER view_algorithm definer_opt opt_view_suid VIEW_SYM table_ident
{
- if (unlikely(Lex->add_alter_view(thd, $2, $4, $6)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_alter_view(thd, $2, $4, $6))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
| ALTER definer_opt opt_view_suid VIEW_SYM table_ident
/*
We have two separate rules for ALTER VIEW rather that
@@ -7870,14 +7962,22 @@ alter:
with the ALTER EVENT below.
*/
{
- if (unlikely(Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5)))
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ if (Lex->add_alter_view(thd, VIEW_ALGORITHM_INHERIT, $3, $5))
MYSQL_YYABORT;
}
view_list_opt AS view_select
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
| ALTER definer_opt remember_name EVENT_SYM sp_name
{
- /*
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ /*
It is safe to use Lex->spname because
ALTER EVENT xxx RENATE TO yyy DO ALTER EVENT RENAME TO
is not allowed. Lex->spname is used in the case of RENAME TO
@@ -7909,6 +8009,8 @@ alter:
*/
Lex->sql_command= SQLCOM_ALTER_EVENT;
Lex->stmt_definition_end= (char*)YYLIP->get_cpp_ptr();
+
+ Lex->pop_select(); //main select
}
| ALTER TABLESPACE alter_tablespace_info
{
@@ -7938,7 +8040,7 @@ alter:
} OPTIONS_SYM '(' server_options_list ')' { }
/* ALTER USER foo is allowed for MySQL compatibility. */
| ALTER opt_if_exists USER_SYM clear_privileges grant_list
- opt_require_clause opt_resource_options
+ opt_require_clause opt_resource_options opt_account_locking opt_password_expiration
{
Lex->create_info.set($2);
Lex->sql_command= SQLCOM_ALTER_USER;
@@ -7952,16 +8054,17 @@ alter:
lex->create_info.init();
lex->no_write_to_binlog= 0;
DBUG_ASSERT(!lex->m_sql_cmd);
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
}
table_ident
{
LEX *lex= Lex;
- if (unlikely(!(lex->create_info.seq_create_info=
- new (thd->mem_root) sequence_definition())) ||
- unlikely(!lex->select_lex.add_table_to_list(thd, $5, NULL,
- TL_OPTION_SEQUENCE,
- TL_WRITE,
- MDL_EXCLUSIVE)))
+ if (!(lex->create_info.seq_create_info= new (thd->mem_root)
+ sequence_definition()) ||
+ !lex->first_select_lex()->
+ add_table_to_list(thd, $5, NULL, TL_OPTION_SEQUENCE,
+ TL_WRITE, MDL_EXCLUSIVE))
MYSQL_YYABORT;
}
sequence_defs
@@ -7970,6 +8073,42 @@ alter:
Lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_alter_sequence($3);
if (unlikely(Lex->m_sql_cmd == NULL))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
+ ;
+
+opt_account_locking:
+ /* Nothing */ {}
+ | ACCOUNT_SYM LOCK_SYM
+ {
+ Lex->account_options.account_locked= ACCOUNTLOCK_LOCKED;
+ }
+ | ACCOUNT_SYM UNLOCK_SYM
+ {
+ Lex->account_options.account_locked= ACCOUNTLOCK_UNLOCKED;
+ }
+ ;
+opt_password_expiration:
+ /* Nothing */ {}
+ | PASSWORD_SYM EXPIRE_SYM
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_NOW;
+ }
+ | PASSWORD_SYM EXPIRE_SYM NEVER_SYM
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_NEVER;
+ }
+ | PASSWORD_SYM EXPIRE_SYM DEFAULT
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_DEFAULT;
+ }
+ | PASSWORD_SYM EXPIRE_SYM INTERVAL_SYM NUM DAY_SYM
+ {
+ Lex->account_options.password_expire= PASSWORD_EXPIRE_INTERVAL;
+ if (!(Lex->account_options.num_expiration_days= atoi($4.str)))
+ my_yyabort_error((ER_WRONG_VALUE, MYF(0), "DAY", $4.str));
}
;
@@ -8118,22 +8257,7 @@ alter_commands:
| EXCHANGE_SYM PARTITION_SYM alt_part_name_item
WITH TABLE_SYM table_ident have_partitioning
{
- LEX *lex= thd->lex;
- lex->select_lex.db= $6->db;
- if (lex->select_lex.db.str == NULL &&
- unlikely(lex->copy_db_to(&lex->select_lex.db)))
- MYSQL_YYABORT;
- lex->name= $6->table;
- lex->alter_info.partition_flags|= ALTER_PARTITION_EXCHANGE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $6, NULL,
- TL_OPTION_UPDATING,
- TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE)))
- MYSQL_YYABORT;
- DBUG_ASSERT(!lex->m_sql_cmd);
- lex->m_sql_cmd= new (thd->mem_root)
- Sql_cmd_alter_table_exchange_partition();
- if (unlikely(lex->m_sql_cmd == NULL))
+ if (Lex->stmt_alter_table_exchange_partition($6))
MYSQL_YYABORT;
}
;
@@ -8257,6 +8381,13 @@ alter_list_item:
{
Lex->alter_info.flags|= ALTER_ADD_PERIOD;
}
+ | ADD
+ PERIOD_SYM opt_if_not_exists_table_element period_for_application_time
+ {
+ Table_period_info &period= Lex->create_info.period_info;
+ period.create_if_not_exists= Lex->check_exists;
+ Lex->alter_info.flags|= ALTER_ADD_CHECK_CONSTRAINT;
+ }
| add_column '(' create_field_list ')'
{
LEX *lex=Lex;
@@ -8271,7 +8402,7 @@ alter_list_item:
| ADD CONSTRAINT IF_SYM not EXISTS field_ident check_constraint
{
Lex->alter_info.flags|= ALTER_ADD_CHECK_CONSTRAINT;
- Lex->add_constraint(&$6, $7, TRUE);
+ Lex->add_constraint($6, $7, TRUE);
}
| CHANGE opt_column opt_if_exists_table_element field_ident
field_spec opt_place
@@ -8367,9 +8498,9 @@ alter_list_item:
| RENAME opt_to table_ident
{
LEX *lex=Lex;
- lex->select_lex.db= $3->db;
- if (lex->select_lex.db.str == NULL &&
- unlikely(lex->copy_db_to(&lex->select_lex.db)))
+ lex->first_select_lex()->db= $3->db;
+ if (lex->first_select_lex()->db.str == NULL &&
+ lex->copy_db_to(&lex->first_select_lex()->db))
MYSQL_YYABORT;
if (unlikely(check_table_name($3->table.str,$3->table.length,
FALSE)) ||
@@ -8390,7 +8521,7 @@ alter_list_item:
$5->name, $4->csname));
if (unlikely(Lex->create_info.add_alter_list_item_convert_to_charset($5)))
MYSQL_YYABORT;
- Lex->alter_info.flags|= ALTER_OPTIONS;
+ Lex->alter_info.flags|= ALTER_CONVERT_TO;
}
| create_table_options_space_separated
{
@@ -8426,6 +8557,14 @@ alter_list_item:
{
Lex->alter_info.flags|= ALTER_DROP_PERIOD;
}
+ | DROP PERIOD_SYM opt_if_exists_table_element FOR_SYM ident
+ {
+ Alter_drop *ad= new Alter_drop(Alter_drop::PERIOD, $5.str, $3);
+ if (unlikely(ad == NULL))
+ MYSQL_YYABORT;
+ Lex->alter_info.drop_list.push_back(ad, thd->mem_root);
+ Lex->alter_info.flags|= ALTER_DROP_CHECK_CONSTRAINT;
+ }
;
opt_index_lock_algorithm:
@@ -8434,6 +8573,7 @@ opt_index_lock_algorithm:
| alter_algorithm_option
| alter_lock_option alter_algorithm_option
| alter_algorithm_option alter_lock_option
+ ;
alter_algorithm_option:
ALGORITHM_SYM opt_equal DEFAULT
@@ -8492,7 +8632,7 @@ alter_option:
Lex->alter_info.requested_lock=
Alter_info::ALTER_TABLE_LOCK_NONE;
}
-
+ ;
opt_restrict:
/* empty */ { Lex->drop_mode= DROP_DEFAULT; }
@@ -8759,6 +8899,7 @@ persistent_stat_spec:
{}
| COLUMNS persistent_column_stat_spec INDEXES persistent_index_stat_spec
{}
+ ;
persistent_column_stat_spec:
ALL {}
@@ -9073,8 +9214,8 @@ adm_partition:
cache_keys_spec:
{
- Lex->select_lex.alloc_index_hints(thd);
- Select->set_index_hint_type(INDEX_HINT_USE,
+ Lex->first_select_lex()->alloc_index_hints(thd);
+ Select->set_index_hint_type(INDEX_HINT_USE,
INDEX_HINT_MASK_ALL);
}
cache_key_list_or_empty
@@ -9095,217 +9236,213 @@ opt_ignore_leaves:
Select : retrieve data from table
*/
-
select:
- opt_with_clause select_init
+ query_expression_body
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_SELECT;
- lex->current_select->set_with_clause($1);
- }
- ;
-
-select_init:
- SELECT_SYM select_options_and_item_list select_init3
- | table_value_constructor
- | table_value_constructor union_list
- | table_value_constructor union_order_or_limit
- | '(' select_paren ')'
- | '(' select_paren ')' union_list
- | '(' select_paren ')' union_order_or_limit
- ;
-
-union_list_part2:
- SELECT_SYM select_options_and_item_list select_init3_union_query_term
- | table_value_constructor
- | table_value_constructor union_list
- | table_value_constructor union_order_or_limit
- | '(' select_paren_union_query_term ')'
- | '(' select_paren_union_query_term ')' union_list
- | '(' select_paren_union_query_term ')' union_order_or_limit
- ;
-
-select_paren:
- {
- Lex->current_select->set_braces(true);
+ if (Lex->push_select($1->fake_select_lex ?
+ $1->fake_select_lex :
+ $1->first_select()))
+ MYSQL_YYABORT;
}
- table_value_constructor
+ opt_procedure_or_into
{
- DBUG_ASSERT(Lex->current_select->braces);
+ Lex->pop_select();
+ if (Lex->select_finalize($1))
+ MYSQL_YYABORT;
}
- |
+ | with_clause query_expression_body
{
- /*
- In order to correctly parse UNION's global ORDER BY we need to
- set braces before parsing the clause.
- */
- Lex->current_select->set_braces(true);
+ if (Lex->push_select($2->fake_select_lex ?
+ $2->fake_select_lex :
+ $2->first_select()))
+ MYSQL_YYABORT;
}
- SELECT_SYM select_options_and_item_list select_part3
- opt_select_lock_type
+ opt_procedure_or_into
{
- DBUG_ASSERT(Lex->current_select->braces);
+ Lex->pop_select();
+ $2->set_with_clause($1);
+ $1->attach_to($2->first_select());
+ if (Lex->select_finalize($2))
+ MYSQL_YYABORT;
}
- | '(' select_paren ')'
;
-select_paren_union_query_term:
+
+select_into:
+ select_into_query_specification
{
- /*
- In order to correctly parse UNION's global ORDER BY we need to
- set braces before parsing the clause.
- */
- Lex->current_select->set_braces(true);
+ if (Lex->push_select($1))
+ MYSQL_YYABORT;
}
- SELECT_SYM select_options_and_item_list select_part3_union_query_term
- opt_select_lock_type
+ opt_order_limit_lock
{
- DBUG_ASSERT(Lex->current_select->braces);
- }
- | '(' select_paren_union_query_term ')'
+ st_select_lex_unit *unit;
+ if (!(unit= Lex->parsed_body_select($1, $3)))
+ MYSQL_YYABORT;
+ if (Lex->select_finalize(unit))
+ MYSQL_YYABORT;
+ }
+ ;
+
+
+simple_table:
+ query_specification { $$= $1; }
+ | table_value_constructor { $$= $1; }
;
-select_paren_view:
+table_value_constructor:
+ VALUES
+ {
+ if (Lex->parsed_TVC_start())
+ MYSQL_YYABORT;
+ }
+ values_list
+ {
+ if (!($$= Lex->parsed_TVC_end()))
+ MYSQL_YYABORT;
+ }
+ ;
+
+query_specification_start:
+ SELECT_SYM
{
- /*
- In order to correctly parse UNION's global ORDER BY we need to
- set braces before parsing the clause.
- */
- Lex->current_select->set_braces(true);
+ SELECT_LEX *sel;
+ LEX *lex= Lex;
+ if (!(sel= lex->alloc_select(TRUE)) ||
+ lex->push_select(sel))
+ MYSQL_YYABORT;
+ sel->init_select();
+ sel->braces= FALSE;
}
- SELECT_SYM select_options_and_item_list select_part3_view
- opt_select_lock_type
+ select_options
{
- DBUG_ASSERT(Lex->current_select->braces);
+ Select->parsing_place= SELECT_LIST;
}
- | '(' select_paren_view ')'
- ;
+ select_item_list
+ {
+ Select->parsing_place= NO_MATTER;
+ }
+ ;
-/* The equivalent of select_paren for nested queries. */
-select_paren_derived:
+query_specification:
+ query_specification_start
+ opt_from_clause
+ opt_where_clause
+ opt_group_clause
+ opt_having_clause
+ opt_window_clause
{
- Lex->current_select->set_braces(true);
+ $$= Lex->pop_select();
}
- table_value_constructor
+ ;
+
+select_into_query_specification:
+ query_specification_start
+ into
+ opt_from_clause
+ opt_where_clause
+ opt_group_clause
+ opt_having_clause
+ opt_window_clause
{
- DBUG_ASSERT(Lex->current_select->braces);
- $$= Lex->current_select->master_unit()->first_select();
+ $$= Lex->pop_select();
}
- |
+ ;
+
+opt_from_clause:
+ /* Empty */
+ | from_clause
+ ;
+
+
+query_primary:
+ simple_table
+ { $$= $1; }
+ | query_primary_parens
+ { $$= $1; }
+ ;
+
+query_primary_parens:
+ '(' query_expression_unit
{
- Lex->current_select->set_braces(true);
+ if (Lex->parsed_unit_in_brackets($2))
+ MYSQL_YYABORT;
}
- SELECT_SYM select_part2_derived
- opt_table_expression
- opt_order_clause
- opt_limit_clause
- opt_select_lock_type
+ query_expression_tail ')'
{
- DBUG_ASSERT(Lex->current_select->braces);
- $$= Lex->current_select->master_unit()->first_select();
+ $$= Lex->parsed_unit_in_brackets_tail($2, $4);
}
- | '(' select_paren_derived ')' { $$= $2; }
- ;
-
-select_init3:
- opt_table_expression
- opt_select_lock_type
+ | '(' query_primary
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ Lex->push_select($2);
}
- union_clause
- | select_part3_union_not_ready
- opt_select_lock_type
+ query_expression_tail ')'
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_select_in_brackets($2, $4)))
+ YYABORT;
}
;
-
-select_init3_union_query_term:
- opt_table_expression
- opt_select_lock_type
+query_expression_unit:
+ query_primary
+ unit_type_decl
+ query_primary
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_select_expr_start($1, $3, $2.unit_type,
+ $2.distinct)))
+ YYABORT;
}
- union_clause
- | select_part3_union_not_ready_noproc
- opt_select_lock_type
+ | query_expression_unit
+ unit_type_decl
+ query_primary
{
- /* Parentheses carry no meaning here */
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_select_expr_cont($1, $3, $2.unit_type,
+ $2.distinct, TRUE)))
+ YYABORT;
}
;
-
-select_init3_view:
- opt_table_expression opt_select_lock_type
+query_expression_body:
+ query_primary
{
- Lex->current_select->set_braces(false);
+ Lex->push_select($1);
}
- | opt_table_expression opt_select_lock_type
+ query_expression_tail
{
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_body_select($1, $3)))
+ MYSQL_YYABORT;
}
- union_list_view
- | order_or_limit opt_select_lock_type
+ | query_expression_unit
{
- Lex->current_select->set_braces(false);
+ if (Lex->parsed_body_unit($1))
+ MYSQL_YYABORT;
}
- | table_expression order_or_limit opt_select_lock_type
+ query_expression_tail
{
- Lex->current_select->set_braces(false);
+ if (!($$= Lex->parsed_body_unit_tail($1, $3)))
+ MYSQL_YYABORT;
}
;
-/*
- The SELECT parts after select_item_list that cannot be followed by UNION.
-*/
-
-select_part3:
- opt_table_expression
- | select_part3_union_not_ready
- ;
-
-select_part3_union_query_term:
- opt_table_expression
- | select_part3_union_not_ready_noproc
- ;
-
-select_part3_view:
- opt_table_expression
- | order_or_limit
- | table_expression order_or_limit
- ;
-
-select_part3_union_not_ready:
- select_part3_union_not_ready_noproc
- | table_expression procedure_clause
- | table_expression order_or_limit procedure_clause
- ;
-
-select_part3_union_not_ready_noproc:
- order_or_limit
- | into opt_table_expression opt_order_clause opt_limit_clause
- | table_expression into
- | table_expression order_or_limit
- | table_expression order_or_limit into
- ;
-
-select_options_and_item_list:
+query_expression:
+ opt_with_clause
+ query_expression_body
{
- LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- if (sel->linkage != UNION_TYPE)
- mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LIST;
+ if ($1)
+ {
+ $2->set_with_clause($1);
+ $1->attach_to($2->first_select());
+ }
+ $$= $2;
}
- select_options select_item_list
+ ;
+
+subselect:
+ remember_tok_start
+ query_expression
{
- Select->parsing_place= NO_MATTER;
+ if (!($$= Lex->parsed_subselect($2, $1)))
+ YYABORT;
}
;
@@ -9313,18 +9450,6 @@ select_options_and_item_list:
/**
<table expression>, as in the SQL standard.
*/
-table_expression:
- from_clause
- opt_where_clause
- opt_group_clause
- opt_having_clause
- opt_window_clause
- ;
-
-opt_table_expression:
- /* Empty */
- | table_expression
- ;
from_clause:
FROM table_reference_list
@@ -9373,8 +9498,9 @@ history_point:
TIMESTAMP TEXT_STRING
{
Item *item;
- if (!(item= create_temporal_literal(thd, $2.str, $2.length, YYCSCL,
- MYSQL_TYPE_DATETIME, true)))
+ if (!(item= type_handler_datetime2.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true)))
MYSQL_YYABORT;
$$= Vers_history_point(VERS_TIMESTAMP, item);
}
@@ -9388,6 +9514,33 @@ history_point:
}
;
+for_portion_of_time_clause:
+ FOR_SYM PORTION_SYM OF_SYM remember_tok_start ident FROM
+ bit_expr TO_SYM bit_expr
+ {
+ if (unlikely(0 == strcasecmp($5.str, "SYSTEM_TIME")))
+ {
+ thd->parse_error(ER_SYNTAX_ERROR, $4);
+ MYSQL_YYABORT;
+ }
+ Lex->period_conditions.init(SYSTEM_TIME_FROM_TO,
+ Vers_history_point(VERS_TIMESTAMP, $7),
+ Vers_history_point(VERS_TIMESTAMP, $9),
+ $5);
+ }
+ ;
+
+opt_for_portion_of_time_clause:
+ /* empty */
+ {
+ $$= false;
+ }
+ | for_portion_of_time_clause
+ {
+ $$= true;
+ }
+ ;
+
opt_for_system_time_clause:
/* empty */
{
@@ -9427,59 +9580,68 @@ select_option:
query_expression_option
| SQL_NO_CACHE_SYM
{
- /*
- Allow this flag only on the first top-level SELECT statement, if
- SQL_CACHE wasn't specified, and only once per query.
- */
- if (unlikely(Lex->current_select != &Lex->select_lex))
- my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_NO_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE))
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_CACHE", "SQL_NO_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE))
+ /*
+ Allow this flag once per query.
+ */
+ if (Select->options & OPTION_NO_QUERY_CACHE)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_NO_CACHE"));
-
- Lex->safe_to_cache_query=0;
- Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE;
- Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE;
+ Select->options|= OPTION_NO_QUERY_CACHE;
}
| SQL_CACHE_SYM
{
- /*
- Allow this flag only on the first top-level SELECT statement, if
- SQL_NO_CACHE wasn't specified, and only once per query.
- */
- if (unlikely(Lex->current_select != &Lex->select_lex))
- my_yyabort_error((ER_CANT_USE_OPTION_HERE, MYF(0), "SQL_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_NO_CACHE))
- my_yyabort_error((ER_WRONG_USAGE, MYF(0), "SQL_NO_CACHE", "SQL_CACHE"));
- if (unlikely(Lex->select_lex.sql_cache == SELECT_LEX::SQL_CACHE))
+ /*
+ Allow this flag once per query.
+ */
+ if (Select->options & OPTION_TO_QUERY_CACHE)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SQL_CACHE"));
-
- Lex->safe_to_cache_query=1;
- Lex->select_lex.options|= OPTION_TO_QUERY_CACHE;
- Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE;
+ Select->options|= OPTION_TO_QUERY_CACHE;
}
;
-opt_select_lock_type:
- /* empty */
- | FOR_SYM UPDATE_SYM opt_lock_wait_timeout
+
+select_lock_type:
+ FOR_SYM UPDATE_SYM opt_lock_wait_timeout_new
{
- LEX *lex=Lex;
- lex->current_select->lock_type= TL_WRITE;
- lex->current_select->set_lock_for_tables(TL_WRITE);
- lex->safe_to_cache_query=0;
+ $$= $3;
+ $$.defined_lock= TRUE;
+ $$.update_lock= TRUE;
}
- | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM opt_lock_wait_timeout
+ | LOCK_SYM IN_SYM SHARE_SYM MODE_SYM opt_lock_wait_timeout_new
{
- LEX *lex=Lex;
- lex->current_select->lock_type= TL_READ_WITH_SHARED_LOCKS;
- lex->current_select->
- set_lock_for_tables(TL_READ_WITH_SHARED_LOCKS);
- lex->safe_to_cache_query=0;
+ $$= $5;
+ $$.defined_lock= TRUE;
+ $$.update_lock= FALSE;
}
;
+opt_select_lock_type:
+ /* empty */
+ {
+ $$.empty();
+ }
+ | select_lock_type
+ {
+ $$= $1;
+ }
+ ;
+
+opt_lock_wait_timeout_new:
+ /* empty */
+ {
+ $$.empty();
+ }
+ | WAIT_SYM ulong_num
+ {
+ $$.defined_timeout= TRUE;
+ $$.timeout= $2;
+ }
+ | NOWAIT_SYM
+ {
+ $$.defined_timeout= TRUE;
+ $$.timeout= 0;
+ }
+ ;
+
select_item_list:
select_item_list ',' select_item
| select_item
@@ -9567,13 +9729,13 @@ select_alias:
opt_default_time_precision:
/* empty */ { $$= NOT_FIXED_DEC; }
| '(' ')' { $$= NOT_FIXED_DEC; }
- | '(' real_ulong_num ')' { $$= $2; };
+ | '(' real_ulong_num ')' { $$= $2; }
;
opt_time_precision:
/* empty */ { $$= 0; }
| '(' ')' { $$= 0; }
- | '(' real_ulong_num ')' { $$= $2; };
+ | '(' real_ulong_num ')' { $$= $2; }
;
optional_braces:
@@ -10078,6 +10240,7 @@ dyncall_create_element:
else
$$->len= 0;
}
+ ;
dyncall_create_list:
dyncall_create_element
@@ -10160,7 +10323,21 @@ column_default_non_parenthesized_expr:
| param_marker { $$= $1; }
| variable
| sum_expr
+ {
+ if (!Lex->select_stack_top)
+ {
+ my_error(ER_INVALID_GROUP_FUNC_USE, MYF(0));
+ MYSQL_YYABORT;
+ }
+ }
| window_func_expr
+ {
+ if (!Lex->select_stack_top)
+ {
+ my_error(ER_WRONG_PLACEMENT_OF_WINDOW_FUNCTION, MYF(0));
+ MYSQL_YYABORT;
+ }
+ }
| inverse_distribution_function
| ROW_SYM '(' expr ',' expr_list ')'
{
@@ -10339,7 +10516,7 @@ function_call_keyword_timestamp:
}
| TIMESTAMP '(' expr ',' expr ')'
{
- $$= new (thd->mem_root) Item_func_add_time(thd, $3, $5, 1, 0);
+ $$= new (thd->mem_root) Item_func_timestamp(thd, $3, $5);
if (unlikely($$ == NULL))
MYSQL_YYABORT;
}
@@ -11318,6 +11495,21 @@ window_func:
{
((Item_sum *) $1)->mark_as_window_func_sum_expr();
}
+ |
+ function_call_generic
+ {
+ Item* item = (Item*)$1;
+ /* Only UDF aggregate here possible */
+ if ((item == NULL) ||
+ (item->type() != Item::SUM_FUNC_ITEM)
+ || (((Item_sum *)item)->sum_func() != Item_sum::UDF_SUM_FUNC))
+ {
+ thd->parse_error();
+ MYSQL_YYABORT;
+ }
+
+ ((Item_sum *) $1)->mark_as_window_func_sum_expr();
+ }
;
simple_window_func:
@@ -11558,7 +11750,7 @@ opt_gconcat_separator:
opt_gorder_clause:
/* empty */
- | ORDER_SYM BY gorder_list;
+ | ORDER_SYM BY gorder_list
;
gorder_list:
@@ -11671,6 +11863,10 @@ cast_type_temporal:
DATE_SYM { $$.set(&type_handler_newdate); }
| TIME_SYM opt_field_length { $$.set(&type_handler_time2, 0, $2); }
| DATETIME opt_field_length { $$.set(&type_handler_datetime2, 0, $2); }
+ | INTERVAL_SYM DAY_SECOND_SYM field_length
+ {
+ $$.set(&type_handler_interval_DDhhmmssff, 0, $3);
+ }
;
opt_expr_list:
@@ -11681,9 +11877,7 @@ opt_expr_list:
expr_list:
expr
{
- $$= new (thd->mem_root) List<Item>;
- if (unlikely($$ == NULL) ||
- unlikely($$->push_back($1, thd->mem_root)))
+ if (unlikely(!($$= List<Item>::make(thd->mem_root, $1))))
MYSQL_YYABORT;
}
| expr_list ',' expr
@@ -11795,10 +11989,15 @@ esc_table_ref:
/* Equivalent to <table reference list> in the SQL:2003 standard. */
/* Warning - may return NULL in case of incomplete SELECT */
derived_table_list:
- esc_table_ref { $$=$1; }
+ esc_table_ref
+ {
+ $$=$1;
+ Select->add_joined_table($1);
+ }
| derived_table_list ',' esc_table_ref
{
MYSQL_YYABORT_UNLESS($1 && ($$=$3));
+ Select->add_joined_table($3);
}
;
@@ -11817,11 +12016,18 @@ join_table:
left-associative joins.
*/
table_ref normal_join table_ref %prec TABLE_REF_PRIORITY
- { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); $3->straight=$2; }
+ {
+ MYSQL_YYABORT_UNLESS($1 && ($$=$3));
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
+ $3->straight=$2;
+ }
| table_ref normal_join table_ref
ON
{
MYSQL_YYABORT_UNLESS($1 && $3);
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
/* Change the current name resolution context to a local context. */
if (unlikely(push_new_name_resolution_context(thd, $1, $3)))
MYSQL_YYABORT;
@@ -11838,6 +12044,8 @@ join_table:
USING
{
MYSQL_YYABORT_UNLESS($1 && $3);
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
}
'(' using_list ')'
{
@@ -11848,6 +12056,8 @@ join_table:
| table_ref NATURAL inner_join table_factor
{
MYSQL_YYABORT_UNLESS($1 && ($$=$4));
+ Select->add_joined_table($1);
+ Select->add_joined_table($4);
$4->straight=$3;
add_join_natural($1,$4,NULL,Select);
}
@@ -11857,6 +12067,8 @@ join_table:
ON
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
/* Change the current name resolution context to a local context. */
if (unlikely(push_new_name_resolution_context(thd, $1, $5)))
MYSQL_YYABORT;
@@ -11873,6 +12085,8 @@ join_table:
| table_ref LEFT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
}
USING '(' using_list ')'
{
@@ -11883,6 +12097,8 @@ join_table:
| table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $6);
+ Select->add_joined_table($1);
+ Select->add_joined_table($6);
add_join_natural($1,$6,NULL,Select);
$6->outer_join|=JOIN_TYPE_LEFT;
$$=$6;
@@ -11893,6 +12109,8 @@ join_table:
ON
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
/* Change the current name resolution context to a local context. */
if (unlikely(push_new_name_resolution_context(thd, $1, $5)))
MYSQL_YYABORT;
@@ -11910,6 +12128,8 @@ join_table:
| table_ref RIGHT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $5);
+ Select->add_joined_table($1);
+ Select->add_joined_table($5);
}
USING '(' using_list ')'
{
@@ -11921,6 +12141,8 @@ join_table:
| table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor
{
MYSQL_YYABORT_UNLESS($1 && $6);
+ Select->add_joined_table($1);
+ Select->add_joined_table($6);
add_join_natural($6,$1,NULL,Select);
LEX *lex= Lex;
if (unlikely(!($$= lex->current_select->convert_right_join())))
@@ -11953,240 +12175,49 @@ use_partition:
PARTITION_SYM '(' using_list ')' have_partitioning
{
$$= $3;
+ Select->parsing_place= Select->save_parsing_place;
+ Select->save_parsing_place= NO_MATTER;
}
;
-
-/*
- This is a flattening of the rules <table factor> and <table primary>
- in the SQL:2003 standard, since we don't have <sample clause>
- I.e.
- <table factor> ::= <table primary> [ <sample clause> ]
-*/
-/* Warning - may return NULL in case of incomplete SELECT */
table_factor:
- table_primary_ident
- | table_primary_derived
+ table_primary_ident { $$= $1; }
+ | table_primary_derived { $$= $1; }
+ | join_table_parens { $$= $1; }
+ | table_reference_list_parens { $$= $1; }
;
-table_primary_ident:
- {
- DBUG_ASSERT(Select);
- SELECT_LEX *sel= Select;
- sel->table_join_options= 0;
- }
- table_ident opt_use_partition opt_for_system_time_clause opt_table_alias opt_key_definition
+table_reference_list_parens:
+ '(' table_reference_list_parens ')' { $$= $2; }
+ | '(' nested_table_reference_list ')'
{
- if (unlikely(!($$= Select->add_table_to_list(thd, $2, $5,
- Select->get_table_join_options(),
- YYPS->m_lock_type,
- YYPS->m_mdl_type,
- Select->
- pop_index_hints(),
- $3))))
+ if (!($$= Select->end_nested_join(thd)))
MYSQL_YYABORT;
- Select->add_joined_table($$);
- if ($4)
- $$->vers_conditions= Lex->vers_conditions;
}
;
-
-
-/*
- Represents a flattening of the following rules from the SQL:2003
- standard. This sub-rule corresponds to the sub-rule
- <table primary> ::= ... | <derived table> [ AS ] <correlation name>
-
- <derived table> ::= <table subquery>
- <table subquery> ::= <subquery>
- <subquery> ::= <left paren> <query expression> <right paren>
- <query expression> ::= [ <with clause> ] <query expression body>
-
- For the time being we use the non-standard rule
- select_derived_union which is a compromise between the standard
- and our parser. Possibly this rule could be replaced by our
- query_expression_body.
-*/
-
-table_primary_derived:
- '(' get_select_lex select_derived_union ')' opt_for_system_time_clause opt_table_alias
+nested_table_reference_list:
+ table_ref ',' table_ref
{
- /* Use $2 instead of Lex->current_select as derived table will
- alter value of Lex->current_select. */
- if (!($3 || $6) && $2->embedding &&
- !$2->embedding->nested_join->join_list.elements)
- {
- /* we have a derived table ($3 == NULL) but no alias,
- Since we are nested in further parentheses so we
- can pass NULL to the outer level parentheses
- Permits parsing of "((((select ...))) as xyz)" */
- $$= 0;
- }
- else if (!$3)
- {
- /* Handle case of derived table, alias may be NULL if there
- are no outer parentheses, add_table_to_list() will throw
- error in this case */
- LEX *lex=Lex;
- lex->check_automatic_up(UNSPECIFIED_TYPE);
- SELECT_LEX *sel= lex->current_select;
- SELECT_LEX_UNIT *unit= sel->master_unit();
- lex->current_select= sel= unit->outer_select();
- Table_ident *ti= new (thd->mem_root) Table_ident(unit);
- if (unlikely(ti == NULL))
- MYSQL_YYABORT;
- if (unlikely(!($$= sel->add_table_to_list(thd,
- ti, $6, 0,
- TL_READ,
- MDL_SHARED_READ))))
- MYSQL_YYABORT;
- sel->add_joined_table($$);
- lex->pop_context();
- lex->nest_level--;
- }
- else if (unlikely($6 != NULL))
- {
- /*
- Tables with or without joins within parentheses cannot
- have aliases, and we ruled out derived tables above.
- */
- thd->parse_error();
+ if (Select->init_nested_join(thd))
MYSQL_YYABORT;
- }
- else
- {
- /* nested join: FROM (t1 JOIN t2 ...),
- nest_level is the same as in the outer query */
- $$= $3;
- }
- /*
- Fields in derived table can be used in upper select in
- case of merge. We do not add HAVING fields because we do
- not merge such derived. We do not add union because
- also do not merge them
- */
- if ($$ && $$->derived &&
- !$$->derived->first_select()->next_select())
- $$->select_lex->add_where_field($$->derived->first_select());
- if ($5)
- {
- MYSQL_YYABORT_UNLESS(!$3);
- $$->vers_conditions= Lex->vers_conditions;
- }
+ Select->add_joined_table($1);
+ Select->add_joined_table($3);
+ $$= $1->embedding;
}
- /* Represents derived table with WITH clause */
- | '(' get_select_lex subselect_start
- with_clause query_expression_body
- subselect_end ')' opt_for_system_time_clause opt_table_alias
+ | nested_table_reference_list ',' table_ref
{
- LEX *lex=Lex;
- SELECT_LEX *sel= $2;
- SELECT_LEX_UNIT *unit= $5->master_unit();
- Table_ident *ti= new (thd->mem_root) Table_ident(unit);
- if (unlikely(ti == NULL))
- MYSQL_YYABORT;
- $5->set_with_clause($4);
- lex->current_select= sel;
- if (unlikely(!($$= sel->add_table_to_list(lex->thd,
- ti, $9, 0,
- TL_READ,
- MDL_SHARED_READ))))
- MYSQL_YYABORT;
- sel->add_joined_table($$);
- if ($8)
- $$->vers_conditions= Lex->vers_conditions;
- }
- ;
-
-/*
- This rule accepts just about anything. The reason is that we have
- empty-producing rules in the beginning of rules, in this case
- subselect_start. This forces bison to take a decision which rules to
- reduce by long before it has seen any tokens. This approach ties us
- to a very limited class of parseable languages, and unfortunately
- SQL is not one of them. The chosen 'solution' was this rule, which
- produces just about anything, even complete bogus statements, for
- instance ( table UNION SELECT 1 ).
- Fortunately, we know that the semantic value returned by
- select_derived is NULL if it contained a derived table, and a pointer to
- the base table's TABLE_LIST if it was a base table. So in the rule
- regarding union's, we throw a parse error manually and pretend it
- was bison that did it.
-
- Also worth noting is that this rule concerns query expressions in
- the from clause only. Top level select statements and other types of
- subqueries have their own union rules.
-*/
-select_derived_union:
- select_derived
- | select_derived union_order_or_limit
- {
- if (unlikely($1))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- }
- | select_derived union_head_non_top
- {
- if (unlikely($1))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- }
- union_list_derived_part2
- | derived_simple_table opt_select_lock_type
- | derived_simple_table order_or_limit opt_select_lock_type
- | derived_simple_table opt_select_lock_type union_list_derived
- ;
-
-union_list_derived_part2:
- query_term_union_not_ready { Lex->pop_context(); }
- | query_term_union_ready { Lex->pop_context(); }
- | query_term_union_ready { Lex->pop_context(); } union_list_derived
- ;
-
-union_list_derived:
- union_head_non_top union_list_derived_part2
- ;
-
-
-/* The equivalent of select_init2 for nested queries. */
-select_init2_derived:
- select_part2_derived
- {
- Select->set_braces(0);
- }
- ;
-
-/* The equivalent of select_part2 for nested queries. */
-select_part2_derived:
- {
- LEX *lex= Lex;
- SELECT_LEX *sel= lex->current_select;
- if (sel->linkage != UNION_TYPE)
- mysql_init_select(lex);
- lex->current_select->parsing_place= SELECT_LIST;
- }
- opt_query_expression_options select_item_list
- {
- Select->parsing_place= NO_MATTER;
+ Select->add_joined_table($3);
+ $$= $1;
}
;
-/* handle contents of parentheses in join expression */
-select_derived:
- get_select_lex_derived derived_table_list
+join_table_parens:
+ '(' join_table_parens ')' { $$= $2; }
+ | '(' join_table ')'
{
LEX *lex= Lex;
- /* for normal joins, $2 != NULL and end_nested_join() != NULL,
- for derived tables, both must equal NULL */
-
- if (unlikely(!($$= $1->end_nested_join(lex->thd)) && $2))
- MYSQL_YYABORT;
- if (unlikely(!$2 && $$))
+ if (!($$= lex->current_select->nest_last_join(thd)))
{
thd->parse_error();
MYSQL_YYABORT;
@@ -12194,83 +12225,54 @@ select_derived:
}
;
-derived_simple_table:
- derived_query_specification { $$= $1; }
- | derived_table_value_constructor { $$= $1; }
- ;
-/*
- Similar to query_specification, but for derived tables.
- Example: the inner parenthesized SELECT in this query:
- SELECT * FROM (SELECT * FROM t1);
-*/
-derived_query_specification:
- SELECT_SYM select_derived_init select_derived2
- {
- if ($2)
- Select->set_braces(1);
- $$= NULL;
- }
- ;
-derived_table_value_constructor:
- VALUES
- {
- Lex->tvc_start();
- }
- values_list
+table_primary_ident:
+ table_ident opt_use_partition opt_for_system_time_clause
+ opt_table_alias_clause opt_key_definition
{
- if (Lex->tvc_finalize_derived())
+ SELECT_LEX *sel= Select;
+ sel->table_join_options= 0;
+ if (!($$= Select->add_table_to_list(thd, $1, $4,
+ Select->get_table_join_options(),
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ Select->pop_index_hints(),
+ $2)))
MYSQL_YYABORT;
- $$= NULL;
+ if ($3)
+ $$->vers_conditions= Lex->vers_conditions;
}
;
-select_derived2:
- {
- LEX *lex= Lex;
- lex->derived_tables|= DERIVED_SUBQUERY;
- if (unlikely(!lex->expr_allows_subselect ||
- lex->sql_command == (int)SQLCOM_PURGE))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE ||
- unlikely(mysql_new_select(lex, 1, NULL)))
- MYSQL_YYABORT;
- mysql_init_select(lex);
- lex->current_select->linkage= DERIVED_TABLE_TYPE;
- lex->current_select->parsing_place= SELECT_LIST;
- }
- select_options select_item_list
- {
- Select->parsing_place= NO_MATTER;
- }
- opt_table_expression
- ;
+/*
+ Represents a flattening of the following rules from the SQL:2003
+ standard. This sub-rule corresponds to the sub-rule
+ <table primary> ::= ... | <derived table> [ AS ] <correlation name>
-get_select_lex:
- /* Empty */ { $$= Select; }
- ;
+ <derived table> ::= <table subquery>
+ <table subquery> ::= <subquery>
+ <subquery> ::= <left paren> <query expression> <right paren>
+ <query expression> ::= [ <with clause> ] <query expression body>
-get_select_lex_derived:
- get_select_lex
+ For the time being we use the non-standard rule
+ select_derived_union which is a compromise between the standard
+ and our parser. Possibly this rule could be replaced by our
+ query_expression_body.
+*/
+
+table_primary_derived:
+ query_primary_parens opt_for_system_time_clause table_alias_clause
{
- LEX *lex= Lex;
- if (unlikely($1->init_nested_join(lex->thd)))
- MYSQL_YYABORT;
+ if (!($$= Lex->parsed_derived_select($1, $2, $3)))
+ YYABORT;
}
- ;
-
-select_derived_init:
+ | '('
+ query_expression
+ ')' opt_for_system_time_clause table_alias_clause
{
- LEX *lex= Lex;
-
- TABLE_LIST *embedding= lex->current_select->embedding;
- $$= embedding &&
- !embedding->nested_join->join_list.elements;
- /* return true if we are deeply nested */
+ if (!($$= Lex->parsed_derived_unit($2, $4, $5)))
+ YYABORT;
}
;
@@ -12404,9 +12406,14 @@ table_alias:
| '='
;
-opt_table_alias:
+opt_table_alias_clause:
/* empty */ { $$=0; }
- | table_alias ident_table_alias
+
+ | table_alias_clause { $$= $1; }
+ ;
+
+table_alias_clause:
+ table_alias ident_table_alias
{
$$= (LEX_CSTRING*) thd->memdup(&$2,sizeof(LEX_STRING));
if (unlikely($$ == NULL))
@@ -12502,7 +12509,7 @@ olap_opt:
SQL-2003: GROUP BY ... CUBE(col1, col2, col3)
*/
LEX *lex=Lex;
- if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE))
+ if (unlikely(lex->current_select->get_linkage() == GLOBAL_OPTIONS_TYPE))
my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH CUBE",
"global union parameters"));
lex->current_select->olap= CUBE_TYPE;
@@ -12519,7 +12526,7 @@ olap_opt:
SQL-2003: GROUP BY ... ROLLUP(col1, col2, col3)
*/
LEX *lex= Lex;
- if (unlikely(lex->current_select->linkage == GLOBAL_OPTIONS_TYPE))
+ if (unlikely(lex->current_select->get_linkage() == GLOBAL_OPTIONS_TYPE))
my_yyabort_error((ER_WRONG_USAGE, MYF(0), "WITH ROLLUP",
"global union parameters"));
lex->current_select->olap= ROLLUP_TYPE;
@@ -12571,6 +12578,7 @@ opt_window_ref:
if (unlikely(thd->lex->win_ref == NULL))
MYSQL_YYABORT;
}
+ ;
opt_window_partition_clause:
/* empty */ { }
@@ -12579,7 +12587,7 @@ opt_window_partition_clause:
opt_window_order_clause:
/* empty */ { }
- | ORDER_SYM BY order_list
+ | ORDER_SYM BY order_list { Select->order_list= *($3); }
;
opt_window_frame_clause:
@@ -12705,70 +12713,35 @@ alter_order_item:
opt_order_clause:
/* empty */
+ { $$= NULL; }
| order_clause
+ { $$= $1; }
;
order_clause:
ORDER_SYM BY
{
- LEX *lex=Lex;
- SELECT_LEX *sel= lex->current_select;
- SELECT_LEX_UNIT *unit= sel-> master_unit();
- if (unlikely(sel->linkage != GLOBAL_OPTIONS_TYPE &&
- sel->olap != UNSPECIFIED_OLAP_TYPE &&
- (sel->linkage != UNION_TYPE || sel->braces)))
- {
- my_error(ER_WRONG_USAGE, MYF(0),
- "CUBE/ROLLUP", "ORDER BY");
- MYSQL_YYABORT;
- }
- if (lex->sql_command != SQLCOM_ALTER_TABLE &&
- !unit->fake_select_lex)
- {
- /*
- A query of the of the form (SELECT ...) ORDER BY order_list is
- executed in the same way as the query
- SELECT ... ORDER BY order_list
- unless the SELECT construct contains ORDER BY or LIMIT clauses.
- Otherwise we create a fake SELECT_LEX if it has not been
- created yet.
- */
- SELECT_LEX *first_sl= unit->first_select();
- if (unlikely(!unit->is_unit_op() &&
- (first_sl->order_list.elements ||
- first_sl->select_limit) &&
- unit->add_fake_select_lex(thd)))
- MYSQL_YYABORT;
- }
- if (sel->master_unit()->is_unit_op() && !sel->braces)
- {
- /*
- At this point we don't know yet whether this is the last
- select in union or not, but we move ORDER BY to
- fake_select_lex anyway. If there would be one more select
- in union mysql_new_select will correctly throw error.
- */
- DBUG_ASSERT(sel->master_unit()->fake_select_lex);
- lex->current_select= sel->master_unit()->fake_select_lex;
- }
+ thd->where= "ORDER clause";
}
order_list
{
-
+ $$= $4;
}
;
order_list:
order_list ',' order_ident order_dir
{
- if (unlikely(add_order_to_list(thd, $3,(bool) $4)))
- MYSQL_YYABORT;
- }
+ $$= $1;
+ if (add_to_list(thd, *$$, $3,(bool) $4))
+ MYSQL_YYABORT;
+ }
| order_ident order_dir
{
- if (unlikely(add_order_to_list(thd, $1,(bool) $2)))
+ $$= new (thd->mem_root) SQL_I_List<ORDER>();
+ if (add_to_list(thd, *$$, $1, (bool) $2))
MYSQL_YYABORT;
- }
+ }
;
order_dir:
@@ -12778,63 +12751,62 @@ order_dir:
;
opt_limit_clause:
- /* empty */ {}
- | limit_clause {}
+ /* empty */
+ { $$.empty(); }
+ | limit_clause
+ { $$= $1; }
;
-limit_clause_init:
- LIMIT
- {
- SELECT_LEX *sel= Select;
- if (sel->master_unit()->is_unit_op() && !sel->braces)
- {
- /* Move LIMIT that belongs to UNION to fake_select_lex */
- Lex->current_select= sel->master_unit()->fake_select_lex;
- DBUG_ASSERT(Select);
- }
- }
- ;
-
limit_clause:
- limit_clause_init limit_options
+ LIMIT limit_options
{
- SELECT_LEX *sel= Select;
- if (!sel->select_limit->basic_const_item() ||
- sel->select_limit->val_int() > 0)
+ $$= $2;
+ if (!$$.select_limit->basic_const_item() ||
+ $$.select_limit->val_int() > 0)
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT);
}
- | limit_clause_init limit_options
+ | LIMIT limit_options
ROWS_SYM EXAMINED_SYM limit_rows_option
{
+ $$= $2;
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT);
}
- | limit_clause_init ROWS_SYM EXAMINED_SYM limit_rows_option
+ | LIMIT ROWS_SYM EXAMINED_SYM limit_rows_option
{
+ $$.select_limit= 0;
+ $$.offset_limit= 0;
+ $$.explicit_limit= 1;
Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_LIMIT);
}
;
+opt_global_limit_clause:
+ opt_limit_clause
+ {
+ Select->explicit_limit= $1.explicit_limit;
+ Select->select_limit= $1.select_limit;
+ Select->offset_limit= $1.offset_limit;
+ }
+ ;
+
limit_options:
limit_option
{
- SELECT_LEX *sel= Select;
- sel->select_limit= $1;
- sel->offset_limit= 0;
- sel->explicit_limit= 1;
+ $$.select_limit= $1;
+ $$.offset_limit= 0;
+ $$.explicit_limit= 1;
}
| limit_option ',' limit_option
{
- SELECT_LEX *sel= Select;
- sel->select_limit= $3;
- sel->offset_limit= $1;
- sel->explicit_limit= 1;
+ $$.select_limit= $3;
+ $$.offset_limit= $1;
+ $$.explicit_limit= 1;
}
| limit_option OFFSET_SYM limit_option
{
- SELECT_LEX *sel= Select;
- sel->select_limit= $1;
- sel->offset_limit= $3;
- sel->explicit_limit= 1;
+ $$.select_limit= $1;
+ $$.offset_limit= $3;
+ $$.explicit_limit= 1;
}
;
@@ -12879,6 +12851,7 @@ limit_rows_option:
LEX *lex=Lex;
lex->limit_rows_examined= $1;
}
+ ;
delete_limit_clause:
/* empty */
@@ -12897,6 +12870,77 @@ delete_limit_clause:
| LIMIT limit_option ROWS_SYM EXAMINED_SYM { thd->parse_error(); MYSQL_YYABORT; }
;
+opt_order_limit_lock:
+ /* empty */
+ { $$= NULL; }
+ | order_or_limit
+ {
+ $$= $1;
+ $$->lock.empty();
+ }
+ | order_or_limit select_lock_type
+ {
+ $$= $1;
+ $$->lock= $2;
+ }
+ | select_lock_type
+ {
+ $$= new(thd->mem_root) Lex_order_limit_lock;
+ if (!$$)
+ YYABORT;
+ $$->order_list= NULL;
+ $$->limit.empty();
+ $$->lock= $1;
+ }
+ ;
+query_expression_tail:
+ opt_order_limit_lock
+ ;
+
+opt_procedure_or_into:
+ /* empty */
+ {
+ $$.empty();
+ }
+ | procedure_clause opt_select_lock_type
+ {
+ $$= $2;
+ }
+ | into opt_select_lock_type
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WARN_DEPRECATED_SYNTAX,
+ ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX),
+ "<select expression> INTO <destination>;",
+ "'SELECT <select list> INTO <destination>"
+ " FROM...'");
+ $$= $2;
+ }
+ ;
+
+
+order_or_limit:
+ order_clause opt_limit_clause
+ {
+ $$= new(thd->mem_root) Lex_order_limit_lock;
+ if (!$$)
+ YYABORT;
+ $$->order_list= $1;
+ $$->limit= $2;
+ }
+ | limit_clause
+ {
+ Lex_order_limit_lock *op= $$= new(thd->mem_root) Lex_order_limit_lock;
+ if (!$$)
+ YYABORT;
+ op->order_list= NULL;
+ op->limit= $1;
+ $$->order_list= NULL;
+ $$->limit= $1;
+ }
+ ;
+
+
opt_plus:
/* empty */
| '+'
@@ -12905,6 +12949,7 @@ opt_plus:
int_num:
opt_plus NUM { int error; $$= (int) my_strtoll10($2.str, (char**) 0, &error); }
| '-' NUM { int error; $$= -(int) my_strtoll10($2.str, (char**) 0, &error); }
+ ;
ulong_num:
opt_plus NUM { int error; $$= (ulong) my_strtoll10($2.str, (char**) 0, &error); }
@@ -12928,7 +12973,7 @@ longlong_num:
| LONG_NUM { int error; $$= (longlong) my_strtoll10($1.str, (char**) 0, &error); }
| '-' NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); }
| '-' LONG_NUM { int error; $$= -(longlong) my_strtoll10($2.str, (char**) 0, &error); }
-
+ ;
ulonglong_num:
opt_plus NUM { int error; $$= (ulonglong) my_strtoll10($2.str, (char**) 0, &error); }
@@ -12965,15 +13010,13 @@ bool:
ulong_num { $$= $1 != 0; }
| TRUE_SYM { $$= 1; }
| FALSE_SYM { $$= 0; }
-
+ ;
procedure_clause:
PROCEDURE_SYM ident /* Procedure name */
{
LEX *lex=Lex;
- DBUG_ASSERT(&lex->select_lex == lex->current_select);
-
lex->proc_list.elements=0;
lex->proc_list.first=0;
lex->proc_list.next= &lex->proc_list.first;
@@ -12993,6 +13036,7 @@ procedure_clause:
parameters are reduced.
*/
Lex->expr_allows_subselect= false;
+ Select->options|= OPTION_PROCEDURE_CLAUSE;
}
'(' procedure_list ')'
{
@@ -13076,6 +13120,7 @@ select_outvar:
into:
INTO into_destination
+ {}
;
into_destination:
@@ -13285,10 +13330,11 @@ table_list:
table_name:
table_ident
{
- if (unlikely(!Select->add_table_to_list(thd, $1, NULL,
- TL_OPTION_UPDATING,
- YYPS->m_lock_type,
- YYPS->m_mdl_type)))
+ if (!thd->lex->current_select_or_default()->
+ add_table_to_list(thd, $1, NULL,
+ TL_OPTION_UPDATING,
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type))
MYSQL_YYABORT;
}
;
@@ -13361,17 +13407,24 @@ insert:
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_INSERT;
- lex->duplicates= DUP_ERROR;
+ lex->duplicates= DUP_ERROR;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
+ lex->current_select->parsing_place= BEFORE_OPT_LIST;
}
insert_lock_option
opt_ignore insert2
{
Select->set_lock_for_tables($3);
- Lex->current_select= &Lex->select_lex;
+ Lex->current_select= Lex->first_select_lex();
}
insert_field_spec opt_insert_update
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
replace:
@@ -13380,15 +13433,22 @@ replace:
LEX *lex=Lex;
lex->sql_command = SQLCOM_REPLACE;
lex->duplicates= DUP_REPLACE;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
+ lex->current_select->parsing_place= BEFORE_OPT_LIST;
}
replace_lock_option insert2
{
Select->set_lock_for_tables($3);
- Lex->current_select= &Lex->select_lex;
+ Lex->current_select= Lex->first_select_lex();
}
insert_field_spec
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
insert_lock_option:
@@ -13428,18 +13488,21 @@ insert2:
;
insert_table:
+ {
+ Select->save_parsing_place= Select->parsing_place;
+ }
table_name_with_opt_use_partition
{
LEX *lex=Lex;
- lex->field_list.empty();
+ //lex->field_list.empty();
lex->many_values.empty();
lex->insert_list=0;
- };
+ }
+ ;
insert_field_spec:
insert_values {}
- | '(' ')' insert_values {}
- | '(' fields ')' insert_values {}
+ | insert_field_list insert_values {}
| SET
{
LEX *lex=Lex;
@@ -13447,20 +13510,33 @@ insert_field_spec:
unlikely(lex->many_values.push_back(lex->insert_list,
thd->mem_root)))
MYSQL_YYABORT;
+ lex->current_select->parsing_place= NO_MATTER;
}
ident_eq_list
;
+insert_field_list:
+ LEFT_PAREN_ALT opt_fields ')'
+ {
+ Lex->current_select->parsing_place= AFTER_LIST;
+ }
+ ;
+
+opt_fields:
+ /* empty */
+ | fields
+ ;
+
fields:
fields ',' insert_ident
{ Lex->field_list.push_back($3, thd->mem_root); }
| insert_ident { Lex->field_list.push_back($1, thd->mem_root); }
;
+
+
insert_values:
- VALUES values_list {}
- | VALUE_SYM values_list {}
- | create_select_query_expression {}
+ create_select_query_expression {}
;
values_list:
@@ -13604,27 +13680,48 @@ opt_insert_update:
}
;
+update_table_list:
+ table_ident opt_use_partition for_portion_of_time_clause
+ opt_table_alias_clause opt_key_definition
+ {
+ SELECT_LEX *sel= Select;
+ sel->table_join_options= 0;
+ if (!($$= Select->add_table_to_list(thd, $1, $4,
+ Select->get_table_join_options(),
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ Select->pop_index_hints(),
+ $2)))
+ MYSQL_YYABORT;
+ $$->period_conditions= Lex->period_conditions;
+ }
+ | join_table_list { $$= $1; }
+ ;
+
/* Update rows in a table */
update:
UPDATE_SYM
{
LEX *lex= Lex;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
lex->sql_command= SQLCOM_UPDATE;
lex->duplicates= DUP_ERROR;
}
- opt_low_priority opt_ignore join_table_list
+ opt_low_priority opt_ignore update_table_list
SET update_list
{
LEX *lex= Lex;
- if (lex->select_lex.table_list.elements > 1)
+ if (lex->first_select_lex()->table_list.elements > 1)
lex->sql_command= SQLCOM_UPDATE_MULTI;
- else if (unlikely(lex->select_lex.get_table_list()->derived))
+ else if (lex->first_select_lex()->get_table_list()->derived)
{
/* it is single table update and it is update of derived table */
my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
- lex->select_lex.get_table_list()->alias.str, "UPDATE");
+ lex->first_select_lex()->get_table_list()->alias.str,
+ "UPDATE");
MYSQL_YYABORT;
}
/*
@@ -13634,7 +13731,14 @@ update:
*/
Select->set_lock_for_tables($3);
}
- opt_where_clause opt_order_clause delete_limit_clause {}
+ opt_where_clause opt_order_clause delete_limit_clause
+ {
+ if ($10)
+ Select->order_list= *($10);
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
update_list:
@@ -13678,12 +13782,13 @@ delete:
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_DELETE;
- mysql_init_select(lex);
YYPS->m_lock_type= TL_WRITE_DEFAULT;
YYPS->m_mdl_type= MDL_SHARED_WRITE;
-
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ mysql_init_select(lex);
lex->ignore= 0;
- lex->select_lex.init_order();
+ lex->first_select_lex()->order_list.empty();
}
delete_part2
;
@@ -13704,6 +13809,7 @@ delete_part2:
| HISTORY_SYM delete_single_table opt_delete_system_time
{
Lex->last_table()->vers_conditions= Lex->vers_conditions;
+ Lex->pop_select(); //main select
}
;
@@ -13722,12 +13828,25 @@ delete_single_table:
}
;
+delete_single_table_for_period:
+ delete_single_table opt_for_portion_of_time_clause
+ {
+ if ($2)
+ Lex->last_table()->period_conditions= Lex->period_conditions;
+ }
+ ;
+
single_multi:
- delete_single_table
+ delete_single_table_for_period
opt_where_clause
opt_order_clause
delete_limit_clause
- opt_select_expressions {}
+ opt_select_expressions
+ {
+ if ($3)
+ Select->order_list= *($3);
+ Lex->pop_select(); //main select
+ }
| table_wild_list
{
mysql_init_multi_delete(Lex);
@@ -13738,6 +13857,9 @@ single_multi:
{
if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
| FROM table_alias_ref_list
{
@@ -13749,6 +13871,9 @@ single_multi:
{
if (unlikely(multi_delete_set_locks_and_link_aux_tables(Lex)))
MYSQL_YYABORT;
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
;
@@ -13817,9 +13942,9 @@ truncate:
LEX* lex= Lex;
lex->sql_command= SQLCOM_TRUNCATE;
lex->alter_info.reset();
- lex->select_lex.options= 0;
- lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED;
- lex->select_lex.init_order();
+ lex->first_select_lex()->options= 0;
+ lex->sql_cache= LEX::SQL_CACHE_UNSPECIFIED;
+ lex->first_select_lex()->order_list.empty();
YYPS->m_lock_type= TL_WRITE;
YYPS->m_mdl_type= MDL_EXCLUSIVE;
}
@@ -13911,6 +14036,8 @@ show:
LEX *lex=Lex;
lex->wild=0;
lex->ident= null_clex_str;
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
mysql_init_select(lex);
lex->current_select->parsing_place= SELECT_LIST;
lex->create_info.init();
@@ -13918,6 +14045,7 @@ show:
show_param
{
Select->parsing_place= NO_MATTER;
+ Lex->pop_select(); //main select
}
;
@@ -13933,40 +14061,40 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_TABLES;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_TABLE_NAMES))
MYSQL_YYABORT;
}
| opt_full TRIGGERS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_TRIGGERS;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TRIGGERS)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_TRIGGERS))
MYSQL_YYABORT;
}
| EVENTS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_EVENTS;
- lex->select_lex.db= $2;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_EVENTS)))
+ lex->first_select_lex()->db= $2;
+ if (prepare_schema_table(thd, lex, 0, SCH_EVENTS))
MYSQL_YYABORT;
}
| TABLE_SYM STATUS_SYM opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_TABLE_STATUS;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_TABLES)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_TABLES))
MYSQL_YYABORT;
}
| OPEN_SYM TABLES opt_db wild_and_where
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_OPEN_TABLES;
- lex->select_lex.db= $3;
- if (unlikely(prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES)))
+ lex->first_select_lex()->db= $3;
+ if (prepare_schema_table(thd, lex, 0, SCH_OPEN_TABLES))
MYSQL_YYABORT;
}
| PLUGINS_SYM
@@ -14015,12 +14143,13 @@ show_param:
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS;
}
- opt_limit_clause
+ opt_global_limit_clause
| RELAYLOG_SYM optional_connection_name EVENTS_SYM binlog_in binlog_from
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_RELAYLOG_EVENTS;
- } opt_limit_clause
+ }
+ opt_global_limit_clause
| keys_or_index from_or_in table_ident opt_db opt_where_clause
{
LEX *lex= Lex;
@@ -14062,13 +14191,13 @@ show_param:
LEX_CSTRING var= {STRING_WITH_LEN("error_count")};
(void) create_select_for_variable(thd, &var);
}
- | WARNINGS opt_limit_clause
+ | WARNINGS opt_global_limit_clause
{ Lex->sql_command = SQLCOM_SHOW_WARNS;}
- | ERRORS opt_limit_clause
+ | ERRORS opt_global_limit_clause
{ Lex->sql_command = SQLCOM_SHOW_ERRORS;}
| PROFILES_SYM
{ Lex->sql_command = SQLCOM_SHOW_PROFILES; }
- | PROFILE_SYM opt_profile_defs opt_profile_args opt_limit_clause
+ | PROFILE_SYM opt_profile_defs opt_profile_args opt_global_limit_clause
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_PROFILE;
@@ -14130,7 +14259,7 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command = SQLCOM_SHOW_CREATE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL,0)))
+ if (!lex->first_select_lex()->add_table_to_list(thd, $3, NULL,0))
MYSQL_YYABORT;
lex->create_info.storage_media= HA_SM_DEFAULT;
}
@@ -14138,7 +14267,7 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command = SQLCOM_SHOW_CREATE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)))
+ if (!lex->first_select_lex()->add_table_to_list(thd, $3, NULL, 0))
MYSQL_YYABORT;
lex->table_type= TABLE_TYPE_VIEW;
}
@@ -14146,7 +14275,7 @@ show_param:
{
LEX *lex= Lex;
lex->sql_command = SQLCOM_SHOW_CREATE;
- if (unlikely(!lex->select_lex.add_table_to_list(thd, $3, NULL, 0)))
+ if (!lex->first_select_lex()->add_table_to_list(thd, $3, NULL, 0))
MYSQL_YYABORT;
lex->table_type= TABLE_TYPE_SEQUENCE;
}
@@ -14363,7 +14492,7 @@ describe:
mysql_init_select(lex);
lex->current_select->parsing_place= SELECT_LIST;
lex->sql_command= SQLCOM_SHOW_FIELDS;
- lex->select_lex.db= null_clex_str;
+ lex->first_select_lex()->db= null_clex_str;
lex->verbose= 0;
if (unlikely(prepare_schema_table(thd, lex, $2, SCH_COLUMNS)))
MYSQL_YYABORT;
@@ -14377,12 +14506,13 @@ describe:
explainable_command
{
LEX *lex=Lex;
- lex->select_lex.options|= SELECT_DESCRIBE;
+ lex->first_select_lex()->options|= SELECT_DESCRIBE;
}
;
explainable_command:
select
+ | select_into
| insert
| replace
| update
@@ -14403,6 +14533,8 @@ analyze_stmt_command:
opt_extended_describe:
EXTENDED_SYM { Lex->describe|= DESCRIBE_EXTENDED; }
+ | EXTENDED_SYM ALL
+ { Lex->describe|= DESCRIBE_EXTENDED | DESCRIBE_EXTENDED2; }
| PARTITIONS_SYM { Lex->describe|= DESCRIBE_PARTITIONS; }
| opt_format_json {}
;
@@ -14445,8 +14577,7 @@ flush:
lex->type= 0;
lex->no_write_to_binlog= $2;
}
- flush_options
- {}
+ flush_options {}
;
flush_options:
@@ -14463,6 +14594,7 @@ flush_options:
opt_table_list opt_flush_lock
{}
| flush_options_list
+ {}
;
opt_flush_lock:
@@ -14548,6 +14680,8 @@ flush_option:
{ Lex->type|= REFRESH_DES_KEY_FILE; }
| RESOURCES
{ Lex->type|= REFRESH_USER_RESOURCES; }
+ | SSL_SYM
+ { Lex->type|= REFRESH_SSL;}
| IDENT_sys remember_tok_start
{
Lex->type|= REFRESH_GENERIC;
@@ -14569,6 +14703,37 @@ opt_table_list:
| table_list {}
;
+backup:
+ BACKUP_SYM backup_statements {}
+ ;
+
+backup_statements:
+ STAGE_SYM ident
+ {
+ int type;
+ if (unlikely(Lex->sphead))
+ my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "BACKUP STAGE"));
+ if ((type= find_type($2.str, &backup_stage_names,
+ FIND_TYPE_NO_PREFIX)) <= 0)
+ my_yyabort_error((ER_BACKUP_UNKNOWN_STAGE, MYF(0), $2.str));
+ Lex->sql_command= SQLCOM_BACKUP;
+ Lex->backup_stage= (backup_stages) (type-1);
+ break;
+ }
+ | LOCK_SYM table_ident
+ {
+ if (unlikely(!Select->add_table_to_list(thd, $2, NULL, 0,
+ TL_READ, MDL_SHARED_HIGH_PRIO)))
+ MYSQL_YYABORT;
+ Lex->sql_command= SQLCOM_BACKUP_LOCK;
+ }
+ | UNLOCK_SYM
+ {
+ /* Table list is empty for unlock */
+ Lex->sql_command= SQLCOM_BACKUP_LOCK;
+ }
+ ;
+
opt_delete_gtid_domain:
/* empty */ {}
| DELETE_DOMAIN_ID_SYM '=' '(' delete_domain_id_list ')'
@@ -14599,6 +14764,7 @@ delete_domain_id:
optional_flush_tables_arguments:
/* empty */ {$$= 0;}
| AND_SYM DISABLE_SYM CHECKPOINT_SYM {$$= REFRESH_CHECKPOINT; }
+ ;
reset:
RESET_SYM
@@ -14642,34 +14808,18 @@ master_reset_options:
;
purge:
- PURGE
+ PURGE master_or_binary LOGS_SYM TO_SYM TEXT_STRING_sys
{
- LEX *lex=Lex;
- lex->type=0;
- lex->sql_command = SQLCOM_PURGE;
+ Lex->stmt_purge_to($5);
}
- purge_options
- {}
- ;
-
-purge_options:
- master_or_binary LOGS_SYM purge_option
- ;
-
-purge_option:
- TO_SYM TEXT_STRING_sys
+ | PURGE master_or_binary LOGS_SYM BEFORE_SYM expr_no_subselect
{
- Lex->to_log = $2.str;
- }
- | BEFORE_SYM expr
- {
- LEX *lex= Lex;
- lex->value_list.empty();
- lex->value_list.push_front($2, thd->mem_root);
- lex->sql_command= SQLCOM_PURGE_BEFORE;
+ if (Lex->stmt_purge_before($5))
+ MYSQL_YYABORT;
}
;
+
/* kill threads */
kill:
@@ -14691,6 +14841,7 @@ kill_type:
/* Empty */ { $$= (int) KILL_HARD_BIT; }
| HARD_SYM { $$= (int) KILL_HARD_BIT; }
| SOFT_SYM { $$= 0; }
+ ;
kill_option:
/* empty */ { $$= (int) KILL_CONNECTION; }
@@ -14718,8 +14869,16 @@ kill_expr:
shutdown:
SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; }
+ shutdown_option {}
;
+shutdown_option:
+ /* Empty */ { Lex->is_shutdown_wait_for_slaves= false; }
+ | WAIT_SYM FOR_SYM ALL SLAVES
+ {
+ Lex->is_shutdown_wait_for_slaves= true;
+ }
+ ;
/* change database */
use:
@@ -14727,7 +14886,7 @@ use:
{
LEX *lex=Lex;
lex->sql_command=SQLCOM_CHANGE_DB;
- lex->select_lex.db= $2;
+ lex->first_select_lex()->db= $2;
}
;
@@ -14744,6 +14903,9 @@ load:
$2 == FILETYPE_CSV ? "LOAD DATA" : "LOAD XML");
MYSQL_YYABORT;
}
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ mysql_init_select(lex);
}
load_data_lock opt_local INFILE TEXT_STRING_filesystem
{
@@ -14774,7 +14936,11 @@ load:
opt_xml_rows_identified_by
opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec
opt_load_data_set_spec
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
;
data_or_xml:
@@ -14972,11 +15138,6 @@ hex_or_bin_String:
$1.length);
if (unlikely(tmp == NULL))
MYSQL_YYABORT;
- /*
- it is OK only emulate fix_fields, because we need only
- value of constant
- */
- tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
| HEX_STRING
@@ -14985,7 +15146,6 @@ hex_or_bin_String:
$1.length);
if (unlikely(tmp == NULL))
MYSQL_YYABORT;
- tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
| BIN_NUM
@@ -14998,7 +15158,6 @@ hex_or_bin_String:
it is OK only emulate fix_fields, because we need only
value of constant
*/
- tmp->quick_fix_field();
$$= tmp->val_str((String*) 0);
}
;
@@ -15147,26 +15306,23 @@ NUM_literal:
temporal_literal:
DATE_SYM TEXT_STRING
{
- if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length,
- YYCSCL,
- MYSQL_TYPE_DATE,
- true))))
+ if (unlikely(!($$= type_handler_newdate.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true))))
MYSQL_YYABORT;
}
| TIME_SYM TEXT_STRING
{
- if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length,
- YYCSCL,
- MYSQL_TYPE_TIME,
- true))))
+ if (unlikely(!($$= type_handler_time2.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true))))
MYSQL_YYABORT;
}
| TIMESTAMP TEXT_STRING
{
- if (unlikely(!($$= create_temporal_literal(thd, $2.str, $2.length,
- YYCSCL,
- MYSQL_TYPE_DATETIME,
- true))))
+ if (unlikely(!($$= type_handler_datetime2.create_literal_item(thd,
+ $2.str, $2.length,
+ YYCSCL, true))))
MYSQL_YYABORT;
}
;
@@ -15182,17 +15338,21 @@ opt_with_clause:
with_clause:
- WITH opt_recursive
+ WITH opt_recursive
{
+ LEX *lex= Lex;
With_clause *with_clause=
new With_clause($2, Lex->curr_with_clause);
if (unlikely(with_clause == NULL))
MYSQL_YYABORT;
- Lex->derived_tables|= DERIVED_WITH;
- Lex->curr_with_clause= with_clause;
+ lex->derived_tables|= DERIVED_WITH;
+ lex->curr_with_clause= with_clause;
with_clause->add_to_list(Lex->with_clauses_list_last_next);
+ if (lex->current_select &&
+ lex->current_select->parsing_place == BEFORE_OPT_LIST)
+ lex->current_select->parsing_place= NO_MATTER;
}
- with_list
+ with_list
{
$$= Lex->curr_with_clause;
Lex->curr_with_clause= Lex->curr_with_clause->pop();
@@ -15221,15 +15381,14 @@ with_list_element:
MYSQL_YYABORT;
Lex->with_column_list.empty();
}
- AS '(' remember_tok_start subselect remember_tok_end ')'
+ AS '(' remember_tok_start query_expression remember_tok_end ')'
{
LEX *lex= thd->lex;
const char *query_start= lex->sphead ? lex->sphead->m_tmp_query
: thd->query();
char *spec_start= $6 + 1;
- With_element *elem= new With_element($1, *$2, $7->master_unit());
- if (unlikely(elem == NULL) ||
- unlikely(Lex->curr_with_clause->add_with_element(elem)))
+ With_element *elem= new With_element($1, *$2, $7);
+ if (elem == NULL || Lex->curr_with_clause->add_with_element(elem))
MYSQL_YYABORT;
if (elem->set_unparsed_spec(thd, spec_start, $8,
spec_start - query_start))
@@ -15567,11 +15726,9 @@ ident_or_text:
user_maybe_role:
ident_or_text
{
- if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))))
+ if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user = $1;
- $$->host= null_clex_str; // User or Role, see get_current_user()
- $$->reset_auth();
if (unlikely(check_string_char_length(&$$->user, ER_USERNAME,
username_char_length,
@@ -15580,10 +15737,9 @@ user_maybe_role:
}
| ident_or_text '@' ident_or_text
{
- if (unlikely(!($$=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))))
+ if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user = $1; $$->host=$3;
- $$->reset_auth();
if (unlikely(check_string_char_length(&$$->user, ER_USERNAME,
username_char_length,
@@ -15613,8 +15769,7 @@ user_maybe_role:
if (unlikely(!($$=(LEX_USER*)thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user= current_user;
- $$->plugin= empty_clex_str;
- $$->auth= empty_clex_str;
+ $$->auth= new (thd->mem_root) USER_AUTH();
}
;
@@ -15912,6 +16067,7 @@ keyword_data_type:
*/
keyword_sp_var_and_label:
ACTION
+ | ACCOUNT_SYM
| ADDDATE_SYM
| ADMIN_SYM
| AFTER_SYM
@@ -15996,6 +16152,7 @@ keyword_sp_var_and_label:
| EXCEPTION_MARIADB_SYM
| EXCHANGE_SYM
| EXPANSION_SYM
+ | EXPIRE_SYM
| EXPORT_SYM
| EXTENDED_SYM
| EXTENT_SIZE_SYM
@@ -16088,6 +16245,7 @@ keyword_sp_var_and_label:
| NAME_SYM
| NEXT_SYM %prec PREC_BELOW_CONTRACTION_TOKEN2
| NEXTVAL_SYM
+ | NEVER_SYM
| NEW_SYM
| NOCACHE_SYM
| NOCYCLE_SYM
@@ -16174,6 +16332,7 @@ keyword_sp_var_and_label:
| SQL_BUFFER_RESULT
| SQL_NO_CACHE_SYM
| SQL_THREAD
+ | STAGE_SYM
| STARTS_SYM
| STATEMENT_SYM
| STATUS_SYM
@@ -16241,14 +16400,22 @@ set:
SET
{
LEX *lex=Lex;
+ if (lex->main_select_push())
+ MYSQL_YYABORT;
lex->set_stmt_init();
lex->var_list.empty();
sp_create_assignment_lex(thd, yychar == YYEMPTY);
}
start_option_value_list
- {}
+ {
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
+ }
| SET STATEMENT_SYM
{
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
Lex->set_stmt_init();
}
set_stmt_option_value_following_option_type_list
@@ -16258,6 +16425,9 @@ set:
my_yyabort_error((ER_SUBQUERIES_NOT_SUPPORTED, MYF(0), "SET STATEMENT"));
lex->stmt_var_list= lex->var_list;
lex->var_list.empty();
+ Lex->pop_select(); //main select
+ if (Lex->check_main_unit_semantics())
+ MYSQL_YYABORT;
}
FOR_SYM verb_clause
{}
@@ -16647,21 +16817,29 @@ opt_for_user:
thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
lex->definer->user= current_user;
- lex->definer->plugin= empty_clex_str;
- lex->definer->auth= empty_clex_str;
+ lex->definer->auth= new (thd->mem_root) USER_AUTH();
}
| FOR_SYM user equal { Lex->definer= $2; }
;
text_or_password:
- TEXT_STRING { Lex->definer->pwhash= $1;}
- | PASSWORD_SYM '(' TEXT_STRING ')' { Lex->definer->pwtext= $3; }
+ TEXT_STRING
+ {
+ Lex->definer->auth= new (thd->mem_root) USER_AUTH();
+ Lex->definer->auth->auth_str= $1;
+ }
+ | PASSWORD_SYM '(' TEXT_STRING ')'
+ {
+ Lex->definer->auth= new (thd->mem_root) USER_AUTH();
+ Lex->definer->auth->pwtext= $3;
+ }
| OLD_PASSWORD_SYM '(' TEXT_STRING ')'
{
- Lex->definer->pwtext= $3;
- Lex->definer->pwhash.str= Item_func_password::alloc(thd,
+ Lex->definer->auth= new (thd->mem_root) USER_AUTH();
+ Lex->definer->auth->pwtext= $3;
+ Lex->definer->auth->auth_str.str= Item_func_password::alloc(thd,
$3.str, $3.length, Item_func_password::OLD);
- Lex->definer->pwhash.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ Lex->definer->auth->auth_str.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
}
;
@@ -16731,7 +16909,7 @@ table_lock_list:
;
table_lock:
- table_ident opt_table_alias lock_option
+ table_ident opt_table_alias_clause lock_option
{
thr_lock_type lock_type= (thr_lock_type) $3;
bool lock_for_write= (lock_type >= TL_WRITE_ALLOW_WRITE);
@@ -16776,27 +16954,37 @@ unlock:
*/
handler:
- HANDLER_SYM table_ident OPEN_SYM opt_table_alias
+ HANDLER_SYM
+ {
+ if (Lex->main_select_push())
+ MYSQL_YYABORT;
+ }
+ handler_tail
+ {
+ Lex->pop_select(); //main select
+ }
+ ;
+
+handler_tail:
+ table_ident OPEN_SYM opt_table_alias_clause
{
LEX *lex= Lex;
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER"));
lex->sql_command = SQLCOM_HA_OPEN;
- if (unlikely(!lex->current_select->add_table_to_list(thd, $2, $4,
- 0)))
+ if (!lex->current_select->add_table_to_list(thd, $1, $3, 0))
MYSQL_YYABORT;
}
- | HANDLER_SYM table_ident_nodb CLOSE_SYM
+ | table_ident_nodb CLOSE_SYM
{
LEX *lex= Lex;
if (unlikely(lex->sphead))
my_yyabort_error((ER_SP_BADSTATEMENT, MYF(0), "HANDLER"));
lex->sql_command = SQLCOM_HA_CLOSE;
- if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0,
- 0)))
+ if (!lex->current_select->add_table_to_list(thd, $1, 0, 0))
MYSQL_YYABORT;
}
- | HANDLER_SYM table_ident_nodb READ_SYM
+ | table_ident_nodb READ_SYM
{
LEX *lex=Lex;
if (unlikely(lex->sphead))
@@ -16810,15 +16998,24 @@ handler:
lex->current_select->select_limit= one;
lex->current_select->offset_limit= 0;
lex->limit_rows_examined= 0;
- if (unlikely(!lex->current_select->add_table_to_list(thd, $2, 0,
- 0)))
+ if (!lex->current_select->add_table_to_list(thd, $1, 0, 0))
MYSQL_YYABORT;
}
- handler_read_or_scan opt_where_clause opt_limit_clause
+ handler_read_or_scan opt_where_clause opt_global_limit_clause
{
- Lex->expr_allows_subselect= TRUE;
+ LEX *lex=Lex;
+ lex->expr_allows_subselect= TRUE;
+ if (!lex->current_select->explicit_limit)
+ {
+ Item *one= new (thd->mem_root) Item_int(thd, (int32) 1);
+ if (one == NULL)
+ MYSQL_YYABORT;
+ lex->current_select->select_limit= one;
+ lex->current_select->offset_limit= 0;
+ lex->limit_rows_examined= 0;
+ }
/* Stored functions are not supported for HANDLER READ. */
- if (unlikely(Lex->uses_stored_routines()))
+ if (lex->uses_stored_routines())
{
my_error(ER_NOT_SUPPORTED_YET, MYF(0),
"stored functions in HANDLER ... READ");
@@ -16988,12 +17185,14 @@ grant_command:
;
opt_with_admin:
- /* nothing */ { Lex->definer = 0; }
- | WITH ADMIN_SYM user_or_role { Lex->definer = $3; }
+ /* nothing */ { Lex->definer = 0; }
+ | WITH ADMIN_SYM user_or_role { Lex->definer = $3; }
+ ;
opt_with_admin_option:
- /* nothing */ { Lex->with_admin_option= false; }
- | WITH ADMIN_SYM OPTION { Lex->with_admin_option= true; }
+ /* nothing */ { Lex->with_admin_option= false; }
+ | WITH ADMIN_SYM OPTION { Lex->with_admin_option= true; }
+ ;
role_list:
grant_role
@@ -17014,7 +17213,7 @@ current_role:
if (unlikely(!($$=(LEX_USER*) thd->calloc(sizeof(LEX_USER)))))
MYSQL_YYABORT;
$$->user= current_role;
- $$->reset_auth();
+ $$->auth= NULL;
}
;
@@ -17031,7 +17230,7 @@ grant_role:
MYSQL_YYABORT;
$$->user= $1;
$$->host= empty_clex_str;
- $$->reset_auth();
+ $$->auth= NULL;
if (unlikely(check_string_char_length(&$$->user, ER_USERNAME,
username_char_length,
@@ -17121,23 +17320,23 @@ require_list_element:
SUBJECT_SYM TEXT_STRING
{
LEX *lex=Lex;
- if (unlikely(lex->x509_subject))
+ if (lex->account_options.x509_subject.str)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "SUBJECT"));
- lex->x509_subject=$2.str;
+ lex->account_options.x509_subject= $2;
}
| ISSUER_SYM TEXT_STRING
{
LEX *lex=Lex;
- if (unlikely(lex->x509_issuer))
+ if (lex->account_options.x509_issuer.str)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "ISSUER"));
- lex->x509_issuer=$2.str;
+ lex->account_options.x509_issuer= $2;
}
| CIPHER_SYM TEXT_STRING
{
LEX *lex=Lex;
- if (unlikely(lex->ssl_cipher))
+ if (lex->account_options.ssl_cipher.str)
my_yyabort_error((ER_DUP_ARGUMENT, MYF(0), "CIPHER"));
- lex->ssl_cipher=$2.str;
+ lex->account_options.ssl_cipher= $2;
}
;
@@ -17228,29 +17427,65 @@ grant_user:
user IDENTIFIED_SYM BY TEXT_STRING
{
$$= $1;
- $1->pwtext= $4;
- if (unlikely(Lex->sql_command == SQLCOM_REVOKE))
- MYSQL_YYABORT;
+ $1->auth= new (thd->mem_root) USER_AUTH();
+ $1->auth->pwtext= $4;
}
| user IDENTIFIED_SYM BY PASSWORD_SYM TEXT_STRING
{
$$= $1;
- $1->pwhash= $5;
+ $1->auth= new (thd->mem_root) USER_AUTH();
+ $1->auth->auth_str= $5;
}
- | user IDENTIFIED_SYM via_or_with ident_or_text
+ | user IDENTIFIED_SYM via_or_with auth_expression
{
$$= $1;
- $1->plugin= $4;
- $1->auth= empty_clex_str;
+ $1->auth= $4;
}
- | user IDENTIFIED_SYM via_or_with ident_or_text using_or_as TEXT_STRING_sys
+ | user_or_role
{
$$= $1;
- $1->plugin= $4;
- $1->auth= $6;
}
- | user_or_role
- { $$= $1; }
+ ;
+
+auth_expression:
+ auth_token OR_SYM auth_expression
+ {
+ $$= $1;
+ DBUG_ASSERT($$->next == NULL);
+ $$->next= $3;
+ }
+ | auth_token
+ {
+ $$= $1;
+ }
+ ;
+
+auth_token:
+ ident_or_text opt_auth_str
+ {
+ $$= $2;
+ $$->plugin= $1;
+ }
+ ;
+
+opt_auth_str:
+ /* empty */
+ {
+ if (!($$=(USER_AUTH*) thd->calloc(sizeof(USER_AUTH))))
+ MYSQL_YYABORT;
+ }
+ | using_or_as TEXT_STRING_sys
+ {
+ if (!($$=(USER_AUTH*) thd->calloc(sizeof(USER_AUTH))))
+ MYSQL_YYABORT;
+ $$->auth_str= $2;
+ }
+ | using_or_as PASSWORD_SYM '(' TEXT_STRING ')'
+ {
+ if (!($$=(USER_AUTH*) thd->calloc(sizeof(USER_AUTH))))
+ MYSQL_YYABORT;
+ $$->pwtext= $4;
+ }
;
opt_column_list:
@@ -17300,52 +17535,47 @@ opt_require_clause:
/* empty */
| REQUIRE_SYM require_list
{
- Lex->ssl_type=SSL_TYPE_SPECIFIED;
+ Lex->account_options.ssl_type= SSL_TYPE_SPECIFIED;
}
| REQUIRE_SYM SSL_SYM
{
- Lex->ssl_type=SSL_TYPE_ANY;
+ Lex->account_options.ssl_type= SSL_TYPE_ANY;
}
| REQUIRE_SYM X509_SYM
{
- Lex->ssl_type=SSL_TYPE_X509;
+ Lex->account_options.ssl_type= SSL_TYPE_X509;
}
| REQUIRE_SYM NONE_SYM
{
- Lex->ssl_type=SSL_TYPE_NONE;
+ Lex->account_options.ssl_type= SSL_TYPE_NONE;
}
;
resource_option:
MAX_QUERIES_PER_HOUR ulong_num
{
- LEX *lex=Lex;
- lex->mqh.questions=$2;
- lex->mqh.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR;
+ Lex->account_options.questions=$2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR;
}
| MAX_UPDATES_PER_HOUR ulong_num
{
- LEX *lex=Lex;
- lex->mqh.updates=$2;
- lex->mqh.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR;
+ Lex->account_options.updates=$2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR;
}
| MAX_CONNECTIONS_PER_HOUR ulong_num
{
- LEX *lex=Lex;
- lex->mqh.conn_per_hour= $2;
- lex->mqh.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR;
+ Lex->account_options.conn_per_hour= $2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR;
}
| MAX_USER_CONNECTIONS_SYM int_num
{
- LEX *lex=Lex;
- lex->mqh.user_conn= $2;
- lex->mqh.specified_limits|= USER_RESOURCES::USER_CONNECTIONS;
+ Lex->account_options.user_conn= $2;
+ Lex->account_options.specified_limits|= USER_RESOURCES::USER_CONNECTIONS;
}
| MAX_STATEMENT_TIME_SYM NUM_literal
{
- LEX *lex=Lex;
- lex->mqh.max_statement_time= $2->val_real();
- lex->mqh.specified_limits|= USER_RESOURCES::MAX_STATEMENT_TIME;
+ Lex->account_options.max_statement_time= $2->val_real();
+ Lex->account_options.specified_limits|= USER_RESOURCES::MAX_STATEMENT_TIME;
}
;
@@ -17394,8 +17624,8 @@ compound_statement:
sp_proc_stmt_compound_ok
{
Lex->sql_command= SQLCOM_COMPOUND;
- Lex->sphead->set_stmt_end(thd);
- Lex->sphead->restore_thd_mem_root(thd);
+ if (Lex->sp_body_finalize_procedure(thd))
+ MYSQL_YYABORT;
}
;
@@ -17482,214 +17712,27 @@ release:
*/
unit_type_decl:
- UNION_SYM
- { $$= UNION_TYPE; }
+ UNION_SYM union_option
+ { $$.unit_type= UNION_TYPE; $$.distinct= $2; }
| INTERSECT_SYM
- { $$= INTERSECT_TYPE; }
+ { $$.unit_type= INTERSECT_TYPE; $$.distinct= 1; }
| EXCEPT_SYM
- { $$= EXCEPT_TYPE; }
-
-
-union_clause:
- /* empty */ {}
- | union_list
- ;
-
-union_list:
- unit_type_decl union_option
- {
- if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE)))
- MYSQL_YYABORT;
- }
- union_list_part2
- {
- /*
- Remove from the name resolution context stack the context of the
- last select in the union.
- */
- Lex->pop_context();
- }
- ;
-
-union_list_view:
- unit_type_decl union_option
- {
- if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, TRUE)))
- MYSQL_YYABORT;
- }
- query_expression_body_view
- {
- Lex->pop_context();
- }
- ;
-
-union_order_or_limit:
- {
- LEX *lex= thd->lex;
- DBUG_ASSERT(lex->current_select->linkage != GLOBAL_OPTIONS_TYPE);
- SELECT_LEX *sel= lex->current_select;
- SELECT_LEX_UNIT *unit= sel->master_unit();
- SELECT_LEX *fake= unit->fake_select_lex;
- if (fake)
- {
- fake->no_table_names_allowed= 1;
- lex->current_select= fake;
- }
- thd->where= "global ORDER clause";
- }
- order_or_limit
- {
- thd->lex->current_select->no_table_names_allowed= 0;
- thd->where= "";
- }
- ;
-
-order_or_limit:
- order_clause opt_limit_clause
- | limit_clause
+ { $$.unit_type= EXCEPT_TYPE; $$.distinct= 1; }
;
/*
Start a UNION, for non-top level query expressions.
*/
-union_head_non_top:
- unit_type_decl union_option
- {
- if (unlikely(Lex->add_select_to_union_list((bool)$2, $1, FALSE)))
- MYSQL_YYABORT;
- }
- ;
-
union_option:
/* empty */ { $$=1; }
| DISTINCT { $$=1; }
| ALL { $$=0; }
;
-simple_table:
- query_specification { $$= $1; }
- | table_value_constructor { $$= $1; }
- ;
-
-table_value_constructor:
- VALUES
- {
- Lex->tvc_start();
- }
- values_list
- {
- $$= Lex->current_select;
- if (Lex->tvc_finalize())
- MYSQL_YYABORT;
- }
- ;
-
-/*
- Corresponds to the SQL Standard
- <query specification> ::=
- SELECT [ <set quantifier> ] <select list> <table expression>
-
- Notes:
- - We allow more options in addition to <set quantifier>
- - <table expression> is optional in MariaDB
-*/
-query_specification:
- SELECT_SYM select_init2_derived opt_table_expression
- {
- $$= Lex->current_select->master_unit()->first_select();
- }
- ;
-
-query_term_union_not_ready:
- simple_table order_or_limit opt_select_lock_type { $$= $1; }
- | '(' select_paren_derived ')' union_order_or_limit { $$= $2; }
- ;
-
-query_term_union_ready:
- simple_table opt_select_lock_type { $$= $1; }
- | '(' select_paren_derived ')' { $$= $2; }
- ;
-
-query_expression_body:
- query_term_union_not_ready { $$= $1; }
- | query_term_union_ready { $$= $1; }
- | query_term_union_ready union_list_derived { $$= $1; }
- ;
-
-/* Corresponds to <query expression> in the SQL:2003 standard. */
-subselect:
- subselect_start opt_with_clause query_expression_body subselect_end
- {
- $3->set_with_clause($2);
- $$= $3;
- }
- ;
-
-subselect_start:
- {
- LEX *lex=Lex;
- if (unlikely(!lex->expr_allows_subselect ||
- lex->sql_command == (int)SQLCOM_PURGE))
- {
- thd->parse_error();
- MYSQL_YYABORT;
- }
- /*
- we are making a "derived table" for the parenthesis
- as we need to have a lex level to fit the union
- after the parenthesis, e.g.
- (SELECT .. ) UNION ... becomes
- SELECT * FROM ((SELECT ...) UNION ...)
- */
- if (unlikely(mysql_new_select(Lex, 1, NULL)))
- MYSQL_YYABORT;
- }
- ;
-
-subselect_end:
- {
- LEX *lex=Lex;
-
- lex->check_automatic_up(UNSPECIFIED_TYPE);
- lex->pop_context();
- SELECT_LEX *child= lex->current_select;
- lex->current_select = lex->current_select->return_after_parsing();
- lex->nest_level--;
- lex->current_select->n_child_sum_items += child->n_sum_items;
-
- /*
- A subquery (and all the subsequent query blocks in a UNION) can
- add columns to an outer query block. Reserve space for them.
- Aggregate functions in having clause can also add fields to an
- outer select.
- */
- for (SELECT_LEX *temp= child->master_unit()->first_select();
- temp != NULL; temp= temp->next_select())
- {
- lex->current_select->select_n_where_fields+=
- temp->select_n_where_fields;
- lex->current_select->select_n_having_items+=
- temp->select_n_having_items;
- }
- }
- ;
-
-opt_query_expression_options:
- /* empty */
- | query_expression_option_list
- ;
-
-query_expression_option_list:
- query_expression_option_list query_expression_option
- | query_expression_option
- ;
-
query_expression_option:
STRAIGHT_JOIN { Select->options|= SELECT_STRAIGHT_JOIN; }
| HIGH_PRIORITY
{
- if (unlikely(Lex->check_simple_select(&$1)))
- MYSQL_YYABORT;
YYPS->m_lock_type= TL_READ_HIGH_PRIORITY;
YYPS->m_mdl_type= MDL_SHARED_READ;
Select->options|= SELECT_HIGH_PRIORITY;
@@ -17698,18 +17741,8 @@ query_expression_option:
| UNIQUE_SYM { Select->options|= SELECT_DISTINCT; }
| SQL_SMALL_RESULT { Select->options|= SELECT_SMALL_RESULT; }
| SQL_BIG_RESULT { Select->options|= SELECT_BIG_RESULT; }
- | SQL_BUFFER_RESULT
- {
- if (unlikely(Lex->check_simple_select(&$1)))
- MYSQL_YYABORT;
- Select->options|= OPTION_BUFFER_RESULT;
- }
- | SQL_CALC_FOUND_ROWS
- {
- if (unlikely(Lex->check_simple_select(&$1)))
- MYSQL_YYABORT;
- Select->options|= OPTION_FOUND_ROWS;
- }
+ | SQL_BUFFER_RESULT { Select->options|= OPTION_BUFFER_RESULT; }
+ | SQL_CALC_FOUND_ROWS { Select->options|= OPTION_FOUND_ROWS; }
| ALL { Select->options|= SELECT_ALL; }
;
@@ -17742,9 +17775,7 @@ definer:
DEFINER_SYM '=' user_or_role
{
Lex->definer= $3;
- Lex->ssl_type= SSL_TYPE_NOT_SPECIFIED;
- Lex->ssl_cipher= Lex->x509_subject= Lex->x509_issuer= 0;
- bzero(&(Lex->mqh), sizeof(Lex->mqh));
+ Lex->account_options.reset();
}
;
@@ -17797,35 +17828,14 @@ view_select:
lex->parsing_options.allows_variable= FALSE;
lex->create_view->select.str= (char *) YYLIP->get_cpp_ptr();
}
- opt_with_clause query_expression_body_view view_check_option
+ query_expression
+ view_check_option
{
- LEX *lex= Lex;
- size_t len= YYLIP->get_cpp_ptr() - lex->create_view->select.str;
- void *create_view_select= thd->memdup(lex->create_view->select.str, len);
- lex->create_view->select.length= len;
- lex->create_view->select.str= (char *) create_view_select;
- trim_whitespace(thd->charset(),
- &lex->create_view->select);
- lex->create_view->check= $4;
- lex->parsing_options.allows_variable= TRUE;
- lex->current_select->set_with_clause($2);
+ if (Lex->parsed_create_view($2, $3))
+ MYSQL_YYABORT;
}
;
-/*
- SQL Standard <query expression body> for VIEWs.
- Does not include INTO and PROCEDURE clauses.
-*/
-query_expression_body_view:
- SELECT_SYM select_options_and_item_list select_init3_view
- | table_value_constructor
- | table_value_constructor union_order_or_limit
- | table_value_constructor union_list_view
- | '(' select_paren_view ')'
- | '(' select_paren_view ')' union_order_or_limit
- | '(' select_paren_view ')' union_list_view
- ;
-
view_check_option:
/* empty */ { $$= VIEW_CHECK_NONE; }
| WITH CHECK_SYM OPTION { $$= VIEW_CHECK_CASCADED; }
@@ -17902,7 +17912,8 @@ trigger_tail:
(*static_cast<st_trg_execution_order*>(&lex->trg_chistics))= ($17);
lex->trg_chistics.ordering_clause_end= lip->get_cpp_ptr();
- if (unlikely(!lex->make_sp_head(thd, $4, &sp_handler_trigger)))
+ if (unlikely(!lex->make_sp_head(thd, $4, &sp_handler_trigger,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
lex->sphead->set_body_start(thd, lip->get_cpp_tok_start());
@@ -17910,15 +17921,9 @@ trigger_tail:
sp_proc_stmt /* $19 */
{ /* $20 */
LEX *lex= Lex;
- sp_head *sp= lex->sphead;
- if (unlikely(sp->check_unresolved_goto()))
- MYSQL_YYABORT;
lex->sql_command= SQLCOM_CREATE_TRIGGER;
- sp->set_stmt_end(thd);
- sp->restore_thd_mem_root(thd);
-
- if (unlikely(sp->is_not_allowed_in_function("trigger")))
+ if (lex->sp_body_finalize_trigger(thd))
MYSQL_YYABORT;
/*
@@ -17926,11 +17931,10 @@ trigger_tail:
sp_proc_stmt alternatives are not saving/restoring LEX, so
lex->query_tables can be wiped out.
*/
- if (unlikely(!lex->select_lex.
- add_table_to_list(thd, $10, (LEX_CSTRING*) 0,
- TL_OPTION_UPDATING,
- TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE)))
+ if (!lex->first_select_lex()->
+ add_table_to_list(thd, $10, (LEX_CSTRING*) 0,
+ TL_OPTION_UPDATING, TL_READ_NO_INSERT,
+ MDL_SHARED_NO_WRITE))
MYSQL_YYABORT;
}
;
@@ -17941,25 +17945,7 @@ trigger_tail:
**************************************************************************/
-udf_tail:
- opt_if_not_exists ident
- RETURNS_SYM udf_type SONAME_SYM TEXT_STRING_sys
- {
- LEX *lex= thd->lex;
- if (unlikely(lex->add_create_options_with_check($1)))
- MYSQL_YYABORT;
- if (unlikely(is_native_function(thd, & $2)))
- my_yyabort_error((ER_NATIVE_FCT_NAME_COLLISION, MYF(0), $2.str));
- lex->sql_command= SQLCOM_CREATE_FUNCTION;
- lex->udf.name= $2;
- lex->udf.returns= (Item_result) $4;
- lex->udf.dl= $6.str;
- }
- ;
-
-
sf_return_type:
- RETURN_ORACLE_SYM
{
LEX *lex= Lex;
lex->init_last_field(&lex->sphead->m_return_field_def,
@@ -17974,79 +17960,44 @@ sf_return_type:
}
;
-sf_tail:
- opt_if_not_exists
- sp_name
- {
- Lex->sql_command= SQLCOM_CREATE_SPFUNCTION;
- if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2,
- &sp_handler_function)))
- MYSQL_YYABORT;
- }
- opt_sp_parenthesized_fdparam_list
- sf_return_type
+sf_c_chistics_and_body_standalone:
sp_c_chistics
{
LEX *lex= thd->lex;
- Lex_input_stream *lip= YYLIP;
-
- lex->sphead->set_chistics(lex->sp_chistics);
- lex->sphead->set_body_start(thd, lip->get_cpp_tok_start());
+ lex->sphead->set_c_chistics(lex->sp_chistics);
+ lex->sphead->set_body_start(thd, YYLIP->get_cpp_tok_start());
}
sp_tail_is
sp_body
{
if (unlikely(Lex->sp_body_finalize_function(thd)))
MYSQL_YYABORT;
- if (unlikely(Lex->sphead->m_flags & sp_head::HAS_AGGREGATE_INSTR))
- {
- my_yyabort_error((ER_NOT_AGGREGATE_FUNCTION, MYF(0)));
- }
- Lex->sphead->set_chistics_agg_type(NOT_AGGREGATE);
}
;
-sp_tail:
- opt_if_not_exists sp_name
+sp_tail_standalone:
+ sp_name
{
- Lex->sql_command= SQLCOM_CREATE_PROCEDURE;
- if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1, $2,
- &sp_handler_procedure)))
+ if (unlikely(!Lex->make_sp_head_no_recursive(thd, $1,
+ &sp_handler_procedure,
+ DEFAULT_AGGREGATE)))
MYSQL_YYABORT;
}
opt_sp_parenthesized_pdparam_list
sp_c_chistics
{
- Lex->sphead->set_chistics(Lex->sp_chistics);
+ Lex->sphead->set_c_chistics(Lex->sp_chistics);
Lex->sphead->set_body_start(thd, YYLIP->get_cpp_tok_start());
}
sp_tail_is
sp_body
+ opt_sp_name
{
- if (unlikely(Lex->sp_body_finalize_procedure(thd)))
+ if (unlikely(Lex->sp_body_finalize_procedure_standalone(thd, $8)))
MYSQL_YYABORT;
}
;
-sf_tail_standalone:
- sf_tail opt_sp_name
- {
- if (unlikely($2 && !$2->eq(Lex->sphead)))
- my_yyabort_error((ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0),
- ErrConvDQName($2).ptr(),
- ErrConvDQName(Lex->sphead).ptr()));
- }
- ;
-
-sp_tail_standalone:
- sp_tail opt_sp_name
- {
- if (unlikely($2 && !$2->eq(Lex->sphead)))
- my_yyabort_error((ER_END_IDENTIFIER_DOES_NOT_MATCH, MYF(0),
- ErrConvDQName($2).ptr(),
- ErrConvDQName(Lex->sphead).ptr()));
- }
- ;
opt_package_routine_end_name:
/* Empty */ { $$= null_clex_str; }
@@ -18160,44 +18111,37 @@ opt_migrate:
;
install:
- INSTALL_SYM PLUGIN_SYM ident SONAME_SYM TEXT_STRING_sys
+ INSTALL_SYM PLUGIN_SYM opt_if_not_exists ident SONAME_SYM TEXT_STRING_sys
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_INSTALL_PLUGIN;
- lex->comment= $3;
- lex->ident= $5;
+ if (Lex->stmt_install_plugin($3, $4, $6))
+ MYSQL_YYABORT;
}
| INSTALL_SYM SONAME_SYM TEXT_STRING_sys
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_INSTALL_PLUGIN;
- lex->comment= null_clex_str;
- lex->ident= $3;
+ Lex->stmt_install_plugin($3);
}
;
uninstall:
- UNINSTALL_SYM PLUGIN_SYM ident
+ UNINSTALL_SYM PLUGIN_SYM opt_if_exists ident
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_UNINSTALL_PLUGIN;
- lex->comment= $3;
+ if (Lex->stmt_uninstall_plugin_by_name($3, $4))
+ MYSQL_YYABORT;
}
- | UNINSTALL_SYM SONAME_SYM TEXT_STRING_sys
+ | UNINSTALL_SYM SONAME_SYM opt_if_exists TEXT_STRING_sys
{
- LEX *lex= Lex;
- lex->sql_command= SQLCOM_UNINSTALL_PLUGIN;
- lex->comment= null_clex_str;
- lex->ident= $3;
+ if (Lex->stmt_uninstall_plugin_by_soname($3, $4))
+ MYSQL_YYABORT;
}
;
/* Avoid compiler warning from sql_yacc.cc where yyerrlab1 is not used */
keep_gcc_happy:
- IMPOSSIBLE_ACTION
- {
- YYERROR;
- }
+ IMPOSSIBLE_ACTION
+ {
+ YYERROR;
+ }
+ ;
/**
@} (end of group Parser)
diff --git a/sql/structs.h b/sql/structs.h
index 1aa26eec4eb..743dee66c57 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -1,8 +1,8 @@
#ifndef STRUCTS_INCLUDED
#define STRUCTS_INCLUDED
-/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
- Copyright (c) 2017, MariaDB Corporation.
+/* Copyright (c) 2000, 2010, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,6 +27,15 @@
#include "thr_lock.h" /* thr_lock_type */
#include "my_base.h" /* ha_rows, ha_key_alg */
#include <mysql_com.h> /* USERNAME_LENGTH */
+#include "sql_bitmap.h"
+
+#if MAX_INDEXES <= 64
+typedef Bitmap<64> key_map; /* Used for finding keys */
+#elif MAX_INDEXES > 128
+#error "MAX_INDEXES values greater than 128 is not supported."
+#else
+typedef Bitmap<((MAX_INDEXES+7)/8*8)> key_map; /* Used for finding keys */
+#endif
struct TABLE;
class Type_handler;
@@ -110,6 +119,13 @@ typedef struct st_key {
ext_key_part_map.is_set(1) == false
*/
key_part_map ext_key_part_map;
+ /*
+ Bitmap of indexes having common parts with this index
+ (only key parts from key definitions are taken into account)
+ */
+ key_map overlapped;
+ /* Set of keys constraint correlated with this key */
+ key_map constraint_correlated;
LEX_CSTRING name;
uint block_size;
enum ha_key_alg algorithm;
@@ -200,6 +216,17 @@ extern const char *show_comp_option_name[];
typedef int *(*update_var)(THD *, struct st_mysql_show_var *);
+struct USER_AUTH : public Sql_alloc
+{
+ LEX_CSTRING plugin, auth_str, pwtext;
+ USER_AUTH *next;
+ USER_AUTH() : next(NULL)
+ {
+ plugin.str= auth_str.str= "";
+ pwtext.str= NULL;
+ plugin.length= auth_str.length= pwtext.length= 0;
+ }
+};
struct AUTHID
{
@@ -224,13 +251,10 @@ struct AUTHID
struct LEX_USER: public AUTHID
{
- LEX_CSTRING plugin, auth;
- LEX_CSTRING pwtext, pwhash;
- void reset_auth()
+ USER_AUTH *auth;
+ bool has_auth()
{
- pwtext.length= pwhash.length= plugin.length= auth.length= 0;
- pwtext.str= pwhash.str= 0;
- plugin.str= auth.str= "";
+ return auth && (auth->plugin.length || auth->auth_str.length || auth->pwtext.length);
}
};
@@ -770,6 +794,43 @@ public:
};
+class st_select_lex;
+
+class Lex_select_lock
+{
+public:
+ struct
+ {
+ uint defined_lock:1;
+ uint update_lock:1;
+ uint defined_timeout:1;
+ };
+ ulong timeout;
+
+
+ void empty()
+ {
+ defined_lock= update_lock= defined_timeout= FALSE;
+ timeout= 0;
+ }
+ void set_to(st_select_lex *sel);
+};
+
+class Lex_select_limit
+{
+public:
+ bool explicit_limit;
+ Item *select_limit, *offset_limit;
+
+ void empty()
+ {
+ explicit_limit= FALSE;
+ select_limit= offset_limit= NULL;
+ }
+};
+
+struct st_order;
+
class Load_data_param
{
protected:
@@ -806,4 +867,20 @@ public:
};
+class Timeval: public timeval
+{
+protected:
+ Timeval() { }
+public:
+ Timeval(my_time_t sec, ulong usec)
+ {
+ tv_sec= sec;
+ tv_usec= usec;
+ }
+ explicit Timeval(const timeval &tv)
+ :timeval(tv)
+ { }
+};
+
+
#endif /* STRUCTS_INCLUDED */
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 185078ff363..2e92838ad3a 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -53,6 +53,7 @@
#include <myisam.h>
#include "debug_sync.h" // DEBUG_SYNC
#include "sql_show.h"
+#include "opt_trace_context.h"
#include "log_event.h"
#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
@@ -368,6 +369,15 @@ static bool update_auto_increment_increment (sys_var *self, THD *thd, enum_var_t
#endif /* WITH_WSREP */
+static Sys_var_double Sys_analyze_sample_percentage(
+ "analyze_sample_percentage",
+ "Percentage of rows from the table ANALYZE TABLE will sample "
+ "to collect table statistics. Set to 0 to let MariaDB decide "
+ "what percentage of rows to sample.",
+ SESSION_VAR(sample_percentage),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 100),
+ DEFAULT(100));
+
static Sys_var_ulong Sys_auto_increment_increment(
"auto_increment_increment",
"Auto-increment columns are incremented by this",
@@ -521,7 +531,7 @@ bool check_has_super(sys_var *self, THD *thd, set_var *var)
static Sys_var_bit Sys_core_file("core_file", "write a core-file on crashes",
READ_ONLY GLOBAL_VAR(test_flags), NO_CMD_LINE,
- TEST_CORE_ON_SIGNAL, DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ TEST_CORE_ON_SIGNAL, DEFAULT(IF_WIN(TRUE,FALSE)), NO_MUTEX_GUARD, NOT_IN_BINLOG,
0,0,0);
static bool binlog_format_check(sys_var *self, THD *thd, set_var *var)
@@ -646,7 +656,7 @@ static Sys_var_mybool Sys_explicit_defaults_for_timestamp(
"as NULL with DEFAULT NULL attribute, Without this option, "
"TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.",
READ_ONLY GLOBAL_VAR(opt_explicit_defaults_for_timestamp),
- CMD_LINE(OPT_ARG), DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG);
+ CMD_LINE(OPT_ARG), DEFAULT(FALSE));
static Sys_var_ulonglong Sys_bulk_insert_buff_size(
@@ -1555,6 +1565,24 @@ static Sys_var_ulong Sys_max_connections(
DEFAULT(MAX_CONNECTIONS_DEFAULT), BLOCK_SIZE(1), NO_MUTEX_GUARD,
NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(fix_max_connections));
+static Sys_var_uint Sys_default_password_lifetime(
+ "default_password_lifetime",
+ "This defines the global password expiration policy. 0 means "
+ "automatic password expiration is disabled. If the value is a "
+ "positive integer N, the passwords must be changed every N days. This "
+ "behavior can be overriden using the password expiration options in "
+ "ALTER USER.",
+ GLOBAL_VAR(default_password_lifetime), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, UINT_MAX), DEFAULT(0), BLOCK_SIZE(1));
+
+static Sys_var_mybool Sys_disconnect_on_expired_password(
+ "disconnect_on_expired_password",
+ "This variable controls how the server handles clients that are not "
+ "aware of the sandbox mode. If enabled, the server disconnects the "
+ "client, otherwise the server puts the client in a sandbox mode.",
+ GLOBAL_VAR(disconnect_on_expired_password), CMD_LINE(OPT_ARG),
+ DEFAULT(FALSE));
+
static Sys_var_ulong Sys_max_connect_errors(
"max_connect_errors",
"If there is more than this number of interrupted connections from "
@@ -1563,6 +1591,14 @@ static Sys_var_ulong Sys_max_connect_errors(
VALID_RANGE(1, UINT_MAX), DEFAULT(MAX_CONNECT_ERRORS),
BLOCK_SIZE(1));
+static Sys_var_uint Sys_max_password_errors(
+ "max_password_errors",
+ "If there is more than this number of failed connect attempts "
+ "due to invalid password, user will be blocked from further connections until FLUSH_PRIVILEGES.",
+ GLOBAL_VAR(max_password_errors), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1, UINT_MAX), DEFAULT(UINT_MAX),
+ BLOCK_SIZE(1));
+
static Sys_var_uint Sys_max_digest_length(
"max_digest_length", "Maximum length considered for digest text.",
READ_ONLY GLOBAL_VAR(max_digest_length),
@@ -1990,6 +2026,19 @@ Sys_var_last_gtid::session_value_ptr(THD *thd, const LEX_CSTRING *base)
}
+static Sys_var_uint Sys_gtid_cleanup_batch_size(
+ "gtid_cleanup_batch_size",
+ "Normally does not need tuning. How many old rows must accumulate in "
+ "the mysql.gtid_slave_pos table before a background job will be run to "
+ "delete them. Can be increased to reduce number of commits if "
+ "using many different engines with --gtid_pos_auto_engines, or to "
+ "reduce CPU overhead if using a huge number of different "
+ "gtid_domain_ids. Can be decreased to reduce number of old rows in the "
+ "table.",
+ GLOBAL_VAR(opt_gtid_cleanup_batch_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0,2147483647), DEFAULT(64), BLOCK_SIZE(1));
+
+
static bool
check_slave_parallel_threads(sys_var *self, THD *thd, set_var *var)
{
@@ -2183,7 +2232,7 @@ static Sys_var_bit Sys_skip_parallel_replication(
"retry for transactions that are likely to cause a conflict if "
"replicated in parallel.",
SESSION_ONLY(option_bits), NO_CMD_LINE, OPTION_RPL_SKIP_PARALLEL,
- DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG);
+ DEFAULT(FALSE));
static bool
@@ -2279,7 +2328,9 @@ static Sys_var_ulong Sys_max_long_data_size(
READ_ONLY GLOBAL_VAR(max_long_data_size),
CMD_LINE(REQUIRED_ARG, OPT_MAX_LONG_DATA_SIZE),
VALID_RANGE(1024, UINT_MAX32), DEFAULT(1024*1024),
- BLOCK_SIZE(1));
+ BLOCK_SIZE(1), NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(0), ON_UPDATE(0),
+ DEPRECATED("'@@max_allowed_packet'"));
static PolyLock_mutex PLock_prepared_stmt_count(&LOCK_prepared_stmt_count);
static Sys_var_uint Sys_max_prepared_stmt_count(
@@ -2504,7 +2555,7 @@ static Sys_var_ulong Sys_optimizer_use_condition_selectivity(
"5 - additionally use selectivity of certain non-range predicates "
"calculated on record samples",
SESSION_VAR(optimizer_use_condition_selectivity), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(1, 5), DEFAULT(1), BLOCK_SIZE(1));
+ VALID_RANGE(1, 5), DEFAULT(4), BLOCK_SIZE(1));
static Sys_var_ulong Sys_optimizer_search_depth(
"optimizer_search_depth",
@@ -2544,6 +2595,9 @@ export const char *optimizer_switch_names[]=
"orderby_uses_equalities",
"condition_pushdown_for_derived",
"split_materialized",
+ "condition_pushdown_for_subquery",
+ "rowid_filter",
+ "condition_pushdown_from_having",
"default",
NullS
};
@@ -2563,9 +2617,26 @@ static Sys_var_flagset Sys_optimizer_switch(
"Fine-tune the optimizer behavior",
SESSION_VAR(optimizer_switch), CMD_LINE(REQUIRED_ARG),
optimizer_switch_names, DEFAULT(OPTIMIZER_SWITCH_DEFAULT),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_optimizer_switch));
+static Sys_var_flagset Sys_optimizer_trace(
+ "optimizer_trace",
+ "Controls tracing of the Optimizer:"
+ " optimizer_trace=option=val[,option=val...], where option is one of"
+ " {enabled}"
+ " and val is one of {on, off, default}",
+ SESSION_VAR(optimizer_trace), CMD_LINE(REQUIRED_ARG),
+ Opt_trace_context::flag_names, DEFAULT(Opt_trace_context::FLAG_DEFAULT));
+ // @see set_var::is_var_optimizer_trace()
+export sys_var *Sys_optimizer_trace_ptr = &Sys_optimizer_trace;
+
+static Sys_var_ulong Sys_optimizer_trace_max_mem_size(
+ "optimizer_trace_max_mem_size",
+ "Maximum allowed size of an optimizer trace",
+ SESSION_VAR(optimizer_trace_max_mem_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, ULONG_MAX), DEFAULT(1024 * 1024), BLOCK_SIZE(1));
+
static Sys_var_charptr Sys_pid_file(
"pid_file", "Pid file used by safe_mysqld",
READ_ONLY GLOBAL_VAR(pidfile_name_ptr), CMD_LINE(REQUIRED_ARG),
@@ -2619,13 +2690,15 @@ static Sys_var_ulong Sys_read_buff_size(
static bool check_read_only(sys_var *self, THD *thd, set_var *var)
{
/* Prevent self dead-lock */
- if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction())
+ if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction() ||
+ thd->current_backup_stage != BACKUP_FINISHED)
{
my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
return true;
}
return false;
}
+
static bool fix_read_only(sys_var *self, THD *thd, enum_var_type type)
{
bool result= true;
@@ -2726,7 +2799,7 @@ static Sys_var_uint Sys_eq_range_index_dive_limit(
"ranges for the index is larger than or equal to this number. "
"If set to 0, index dives are always used.",
SESSION_VAR(eq_range_index_dive_limit), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, UINT_MAX32), DEFAULT(0),
+ VALID_RANGE(0, UINT_MAX32), DEFAULT(200),
BLOCK_SIZE(1));
static Sys_var_ulong Sys_range_alloc_block_size(
@@ -2768,17 +2841,6 @@ static Sys_var_ulong Sys_query_prealloc_size(
BLOCK_SIZE(1024), NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_thd_mem_root));
-#ifdef HAVE_SMEM
-static Sys_var_mybool Sys_shared_memory(
- "shared_memory", "Enable the shared memory",
- READ_ONLY GLOBAL_VAR(opt_enable_shared_memory), CMD_LINE(OPT_ARG),
- DEFAULT(FALSE));
-
-static Sys_var_charptr Sys_shared_memory_base_name(
- "shared_memory_base_name", "Base name of shared memory",
- READ_ONLY GLOBAL_VAR(shared_memory_base_name), CMD_LINE(REQUIRED_ARG),
- IN_FS_CHARSET, DEFAULT(0));
-#endif
// this has to be NO_CMD_LINE as the command-line option has a different name
static Sys_var_mybool Sys_skip_external_locking(
@@ -2926,7 +2988,7 @@ static Sys_var_ulong Sys_query_cache_limit(
"Don't cache results that are bigger than this",
GLOBAL_VAR(query_cache_limit), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, UINT_MAX), DEFAULT(1024*1024), BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_query_cache_limit));
static bool fix_qcache_min_res_unit(sys_var *self, THD *thd, enum_var_type type)
@@ -3413,6 +3475,7 @@ static const char *sql_mode_names[]=
"ALLOW_INVALID_DATES", "ERROR_FOR_DIVISION_BY_ZERO", "TRADITIONAL",
"NO_AUTO_CREATE_USER", "HIGH_NOT_PRECEDENCE", "NO_ENGINE_SUBSTITUTION",
"PAD_CHAR_TO_FULL_LENGTH", "EMPTY_STRING_IS_NULL", "SIMULTANEOUS_ASSIGNMENT",
+ "TIME_ROUND_FRACTIONAL",
0
};
@@ -3796,7 +3859,7 @@ static Sys_var_mybool Sys_timed_mutexes(
"timed_mutexes",
"Specify whether to time mutexes. Deprecated, has no effect.",
GLOBAL_VAR(timed_mutexes), CMD_LINE(OPT_ARG), DEFAULT(0),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL), ON_UPDATE(NULL),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(0),
DEPRECATED(""));
static Sys_var_charptr Sys_version(
@@ -4104,6 +4167,16 @@ static bool fix_sql_log_bin_after_update(sys_var *self, THD *thd,
return FALSE;
}
+static bool check_session_only_variable(sys_var *self, THD *,set_var *var)
+{
+ if (unlikely(var->type == OPT_GLOBAL))
+ {
+ my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), self->name.str, "SESSION");
+ return true;
+ }
+ return false;
+}
+
/**
This function checks if the sql_log_bin can be changed,
what is possible if:
@@ -4119,20 +4192,17 @@ static bool fix_sql_log_bin_after_update(sys_var *self, THD *thd,
static bool check_sql_log_bin(sys_var *self, THD *thd, set_var *var)
{
if (check_has_super(self, thd, var))
- return TRUE;
+ return true;
- if (unlikely(var->type == OPT_GLOBAL))
- {
- my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), self->name.str, "SESSION");
- return TRUE;
- }
+ if (check_session_only_variable(self, thd, var))
+ return true;
if (unlikely(error_if_in_trans_or_substatement(thd,
ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN,
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN)))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
static Sys_var_mybool Sys_log_binlog(
@@ -5153,8 +5223,10 @@ static Sys_var_ulonglong Sys_read_binlog_speed_limit(
static Sys_var_charptr Sys_slave_transaction_retry_errors(
"slave_transaction_retry_errors", "Tells the slave thread to retry "
"transaction for replication when a query event returns an error from "
- "the provided list. Deadlock and elapsed lock wait timeout errors are "
- "automatically added to this list",
+ "the provided list. Deadlock error, elapsed lock wait timeout, "
+ "net read error, net read timeout, net write error, net write timeout, "
+ "connect error and 2 types of lost connection error are automatically "
+ "added to this list",
READ_ONLY GLOBAL_VAR(opt_slave_transaction_retry_errors), CMD_LINE(REQUIRED_ARG),
IN_SYSTEM_CHARSET, DEFAULT(0));
@@ -5325,13 +5397,13 @@ static Sys_var_charptr Sys_wsrep_cluster_name(
ON_CHECK(wsrep_cluster_name_check),
ON_UPDATE(wsrep_cluster_name_update));
-static PolyLock_mutex PLock_wsrep_slave_threads(&LOCK_wsrep_slave_threads);
+static PolyLock_mutex PLock_wsrep_cluster_config(&LOCK_wsrep_cluster_config);
static Sys_var_charptr Sys_wsrep_cluster_address (
"wsrep_cluster_address", "Address to initially connect to cluster",
PREALLOCATED GLOBAL_VAR(wsrep_cluster_address),
CMD_LINE(REQUIRED_ARG),
IN_SYSTEM_CHARSET, DEFAULT(""),
- &PLock_wsrep_slave_threads, NOT_IN_BINLOG,
+ &PLock_wsrep_cluster_config, NOT_IN_BINLOG,
ON_CHECK(wsrep_cluster_address_check),
ON_UPDATE(wsrep_cluster_address_update));
@@ -5362,8 +5434,8 @@ static Sys_var_ulong Sys_wsrep_slave_threads(
"wsrep_slave_threads", "Number of slave appliers to launch",
GLOBAL_VAR(wsrep_slave_threads), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, 512), DEFAULT(1), BLOCK_SIZE(1),
- &PLock_wsrep_slave_threads, NOT_IN_BINLOG,
- ON_CHECK(NULL),
+ &PLock_wsrep_cluster_config, NOT_IN_BINLOG,
+ ON_CHECK(0),
ON_UPDATE(wsrep_slave_threads_update));
static Sys_var_charptr Sys_wsrep_dbug_option(
@@ -5371,9 +5443,14 @@ static Sys_var_charptr Sys_wsrep_dbug_option(
GLOBAL_VAR(wsrep_dbug_option),CMD_LINE(REQUIRED_ARG),
IN_SYSTEM_CHARSET, DEFAULT(""));
-static Sys_var_mybool Sys_wsrep_debug(
- "wsrep_debug", "To enable debug level logging",
- GLOBAL_VAR(wsrep_debug), CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+static const char *wsrep_debug_names[]=
+{ "NONE", "SERVER", "TRANSACTION", "STREAMING", "CLIENT", NullS };
+static Sys_var_enum Sys_wsrep_debug(
+ "wsrep_debug", "WSREP debug level logging",
+ GLOBAL_VAR(wsrep_debug), CMD_LINE(REQUIRED_ARG),
+ wsrep_debug_names, DEFAULT(0),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(0), ON_UPDATE(wsrep_debug_update));
static Sys_var_mybool Sys_wsrep_convert_LOCK_to_trx(
"wsrep_convert_LOCK_to_trx", "To convert locking sessions "
@@ -5601,9 +5678,10 @@ static Sys_var_ulong Sys_wsrep_mysql_replication_bundle(
static Sys_var_mybool Sys_wsrep_load_data_splitting(
"wsrep_load_data_splitting", "To commit LOAD DATA "
- "transaction after every 10K rows inserted",
+ "transaction after every 10K rows inserted (deprecated)",
GLOBAL_VAR(wsrep_load_data_splitting),
- CMD_LINE(OPT_ARG), DEFAULT(TRUE));
+ CMD_LINE(OPT_ARG), DEFAULT(0), NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(0), ON_UPDATE(0), DEPRECATED(""));
static Sys_var_mybool Sys_wsrep_slave_FK_checks(
"wsrep_slave_FK_checks", "Should slave thread do "
@@ -5621,19 +5699,53 @@ static Sys_var_mybool Sys_wsrep_restart_slave(
"wsrep_restart_slave", "Should MariaDB slave be restarted automatically, when node joins back to cluster",
GLOBAL_VAR(wsrep_restart_slave), CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+static Sys_var_ulonglong Sys_wsrep_trx_fragment_size(
+ "wsrep_trx_fragment_size",
+ "Size of transaction fragments for streaming replication (measured in "
+ "units of 'wsrep_trx_fragment_unit')",
+ SESSION_VAR(wsrep_trx_fragment_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(0, WSREP_MAX_WS_SIZE), DEFAULT(0), BLOCK_SIZE(1),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(wsrep_trx_fragment_size_check),
+ ON_UPDATE(wsrep_trx_fragment_size_update));
+
+extern const char *wsrep_fragment_units[];
+
+static Sys_var_enum Sys_wsrep_trx_fragment_unit(
+ "wsrep_trx_fragment_unit",
+ "Unit for streaming replication transaction fragments' size: bytes, "
+ "rows, statements",
+ SESSION_VAR(wsrep_trx_fragment_unit), CMD_LINE(REQUIRED_ARG),
+ wsrep_fragment_units,
+ DEFAULT(WSREP_FRAG_BYTES),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(0),
+ ON_UPDATE(wsrep_trx_fragment_unit_update));
+
+extern const char *wsrep_SR_store_types[];
+static Sys_var_enum Sys_wsrep_SR_store(
+ "wsrep_SR_store", "Storage for streaming replication fragments",
+ READ_ONLY GLOBAL_VAR(wsrep_SR_store_type), CMD_LINE(REQUIRED_ARG),
+ wsrep_SR_store_types, DEFAULT(WSREP_SR_STORE_TABLE));
+
static Sys_var_mybool Sys_wsrep_dirty_reads(
"wsrep_dirty_reads",
"Allow reads even when the node is not in the primary component.",
SESSION_VAR(wsrep_dirty_reads), CMD_LINE(OPT_ARG),
- DEFAULT(FALSE), NO_MUTEX_GUARD, NOT_IN_BINLOG);
+ DEFAULT(FALSE));
+
+static Sys_var_uint Sys_wsrep_ignore_apply_errors (
+ "wsrep_ignore_apply_errors", "Ignore replication errors",
+ GLOBAL_VAR(wsrep_ignore_apply_errors), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(WSREP_IGNORE_ERRORS_NONE, WSREP_IGNORE_ERRORS_MAX),
+ DEFAULT(7), BLOCK_SIZE(1));
static Sys_var_uint Sys_wsrep_gtid_domain_id(
"wsrep_gtid_domain_id", "When wsrep_gtid_mode is set, this value is "
"used as gtid_domain_id for galera transactions and also copied to the "
"joiner nodes during state transfer. It is ignored, otherwise.",
GLOBAL_VAR(wsrep_gtid_domain_id), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, UINT_MAX32), DEFAULT(0), BLOCK_SIZE(1), NO_MUTEX_GUARD,
- NOT_IN_BINLOG);
+ VALID_RANGE(0, UINT_MAX32), DEFAULT(0), BLOCK_SIZE(1));
static Sys_var_mybool Sys_wsrep_gtid_mode(
"wsrep_gtid_mode", "Automatically update the (joiner) node's "
@@ -5664,7 +5776,7 @@ static Sys_var_ulong Sys_host_cache_size(
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 65536),
DEFAULT(HOST_CACHE_SIZE),
BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0),
ON_UPDATE(fix_host_cache_size));
vio_keepalive_opts opt_vio_keepalive;
@@ -5674,30 +5786,45 @@ static Sys_var_int Sys_keepalive_time(
"Timeout, in milliseconds, with no activity until the first TCP keep-alive packet is sent."
"If set to 0, system dependent default is used.",
AUTO_SET GLOBAL_VAR(opt_vio_keepalive.idle),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, INT_MAX32/1000),
- DEFAULT(0),
- BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, INT_MAX32/1000), DEFAULT(0),
+ BLOCK_SIZE(1));
static Sys_var_int Sys_keepalive_interval(
"tcp_keepalive_interval",
"The interval, in seconds, between when successive keep-alive packets are sent if no acknowledgement is received."
"If set to 0, system dependent default is used.",
AUTO_SET GLOBAL_VAR(opt_vio_keepalive.interval),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, INT_MAX32/1000),
- DEFAULT(0),
- BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, INT_MAX32/1000), DEFAULT(0),
+ BLOCK_SIZE(1));
static Sys_var_int Sys_keepalive_probes(
"tcp_keepalive_probes",
"The number of unacknowledged probes to send before considering the connection dead and notifying the application layer."
"If set to 0, system dependent default is used.",
AUTO_SET GLOBAL_VAR(opt_vio_keepalive.probes),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, INT_MAX32/1000),
- DEFAULT(0),
- BLOCK_SIZE(1),
- NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, INT_MAX32/1000), DEFAULT(0),
+ BLOCK_SIZE(1));
+
+
+static bool update_tcp_nodelay(sys_var *self, THD *thd,
+ enum_var_type type)
+{
+ DBUG_ASSERT(thd);
+
+ Vio *vio = thd->net.vio;
+ if (vio)
+ return (MY_TEST(vio_nodelay(vio, thd->variables.tcp_nodelay)));
+
+ return false;
+}
+
+static Sys_var_mybool Sys_tcp_nodelay(
+ "tcp_nodelay",
+ "Set option TCP_NODELAY (disable Nagle's algorithm) on socket",
+ SESSION_VAR(tcp_nodelay), CMD_LINE(OPT_ARG),
+ DEFAULT(TRUE),NO_MUTEX_GUARD, NOT_IN_BINLOG,
+ ON_CHECK(check_session_only_variable),
+ ON_UPDATE(update_tcp_nodelay));
static Sys_var_charptr Sys_ignore_db_dirs(
"ignore_db_dirs",
@@ -5930,19 +6057,20 @@ static Sys_var_ulong Sys_progress_report_time(
VALID_RANGE(0, UINT_MAX), DEFAULT(5), BLOCK_SIZE(1));
const char *use_stat_tables_modes[] =
- {"NEVER", "COMPLEMENTARY", "PREFERABLY", 0};
+ {"NEVER", "COMPLEMENTARY", "PREFERABLY",
+ "COMPLEMENTARY_FOR_QUERIES", "PREFERABLY_FOR_QUERIES", 0};
static Sys_var_enum Sys_optimizer_use_stat_tables(
"use_stat_tables",
"Specifies how to use system statistics tables",
SESSION_VAR(use_stat_tables), CMD_LINE(REQUIRED_ARG),
- use_stat_tables_modes, DEFAULT(0));
+ use_stat_tables_modes, DEFAULT(4));
static Sys_var_ulong Sys_histogram_size(
"histogram_size",
"Number of bytes used for a histogram. "
"If set to 0, no histograms are created by ANALYZE.",
SESSION_VAR(histogram_size), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(0, 255), DEFAULT(0), BLOCK_SIZE(1));
+ VALID_RANGE(0, 255), DEFAULT(254), BLOCK_SIZE(1));
extern const char *histogram_types[];
static Sys_var_enum Sys_histogram_type(
@@ -5952,7 +6080,7 @@ static Sys_var_enum Sys_histogram_type(
"SINGLE_PREC_HB - single precision height-balanced, "
"DOUBLE_PREC_HB - double precision height-balanced.",
SESSION_VAR(histogram_type), CMD_LINE(REQUIRED_ARG),
- histogram_types, DEFAULT(0));
+ histogram_types, DEFAULT(1));
static Sys_var_mybool Sys_no_thread_alarm(
"debug_no_thread_alarm",
@@ -6087,14 +6215,14 @@ static Sys_var_mybool Sys_mysql56_temporal_format(
"mysql56_temporal_format",
"Use MySQL-5.6 (instead of MariaDB-5.3) format for TIME, DATETIME, TIMESTAMP columns.",
GLOBAL_VAR(opt_mysql56_temporal_format),
- CMD_LINE(OPT_ARG), DEFAULT(TRUE), NO_MUTEX_GUARD, NOT_IN_BINLOG);
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE));
static Sys_var_mybool Sys_strict_password_validation(
"strict_password_validation",
"When password validation plugins are enabled, reject passwords "
"that cannot be validated (passwords specified as a hash)",
GLOBAL_VAR(strict_password_validation),
- CMD_LINE(OPT_ARG), DEFAULT(TRUE), NO_MUTEX_GUARD, NOT_IN_BINLOG);
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE));
#ifdef HAVE_MMAP
static Sys_var_ulong Sys_log_tc_size(
@@ -6121,8 +6249,7 @@ static Sys_var_sesvartrack Sys_track_session_sys_vars(
"Track changes in registered system variables. ",
CMD_LINE(REQUIRED_ARG), IN_SYSTEM_CHARSET,
DEFAULT("autocommit,character_set_client,character_set_connection,"
- "character_set_results,time_zone"),
- NO_MUTEX_GUARD);
+ "character_set_results,time_zone"));
static bool update_session_track_schema(sys_var *self, THD *thd,
enum_var_type type)
@@ -6202,3 +6329,10 @@ static Sys_var_enum Sys_secure_timestamp(
"historical behavior, anyone can modify session timestamp",
READ_ONLY GLOBAL_VAR(opt_secure_timestamp), CMD_LINE(REQUIRED_ARG),
secure_timestamp_levels, DEFAULT(SECTIME_NO));
+
+static Sys_var_ulonglong Sys_max_rowid_filter_size(
+ "max_rowid_filter_size",
+ "The maximum size of the container of a rowid filter",
+ SESSION_VAR(max_rowid_filter_size), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1024, (ulonglong)~(intptr)0), DEFAULT(128*1024),
+ BLOCK_SIZE(1));
diff --git a/sql/sys_vars.ic b/sql/sys_vars.ic
index d8d95046cc2..440890bccfd 100644
--- a/sql/sys_vars.ic
+++ b/sql/sys_vars.ic
@@ -596,7 +596,7 @@ public:
const char *comment,
CMD_LINE getopt,
enum charset_enum is_os_charset_arg,
- const char *def_val, PolyLock *lock) :
+ const char *def_val, PolyLock *lock= 0) :
Sys_var_charptr_base(name_arg, comment,
SESSION_VAR(session_track_system_variables),
getopt, is_os_charset_arg, def_val, lock,
@@ -2634,7 +2634,8 @@ public:
if (!Sys_var_enum::do_check(thd, var))
return false;
MYSQL_TIME ltime;
- bool res= var->value->get_date(&ltime, 0);
+ Datetime::Options opt(TIME_CONV_NONE, thd);
+ bool res= var->value->get_date(thd, &ltime, opt);
if (!res)
{
var->save_result.ulonglong_value= SYSTEM_TIME_AS_OF;
@@ -2651,7 +2652,9 @@ private:
{
if (var->value)
{
- res= var->value->get_date(&out.ltime, 0);
+ THD *thd= current_thd;
+ Datetime::Options opt(TIME_CONV_NONE, thd);
+ res= var->value->get_date(thd, &out.ltime, opt);
}
else // set DEFAULT from global var
{
diff --git a/sql/table.cc b/sql/table.cc
index 7042959215d..9567ed722f1 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2017, Oracle and/or its affiliates.
- Copyright (c) 2008, 2018, MariaDB
+ Copyright (c) 2008, 2019, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -44,11 +44,25 @@
#include "sql_cte.h"
#include "ha_sequence.h"
#include "sql_show.h"
+#include "opt_trace.h"
/* For MySQL 5.7 virtual fields */
#define MYSQL57_GENERATED_FIELD 128
#define MYSQL57_GCOL_HEADER_SIZE 4
+struct extra2_fields
+{
+ LEX_CUSTRING version;
+ LEX_CUSTRING options;
+ Lex_ident engine;
+ LEX_CUSTRING gis;
+ LEX_CUSTRING field_flags;
+ LEX_CUSTRING system_period;
+ LEX_CUSTRING application_period;
+ void reset()
+ { bzero((void*)this, sizeof(*this)); }
+};
+
static Virtual_column_info * unpack_vcol_info_from_frm(THD *, MEM_ROOT *,
TABLE *, String *, Virtual_column_info **, bool *);
static bool check_vcol_forward_refs(Field *, Virtual_column_info *,
@@ -70,8 +84,6 @@ LEX_CSTRING GENERAL_LOG_NAME= {STRING_WITH_LEN("general_log")};
LEX_CSTRING SLOW_LOG_NAME= {STRING_WITH_LEN("slow_log")};
LEX_CSTRING TRANSACTION_REG_NAME= {STRING_WITH_LEN("transaction_registry")};
-LEX_CSTRING MYSQL_USER_NAME= {STRING_WITH_LEN("user")};
-LEX_CSTRING MYSQL_DB_NAME= {STRING_WITH_LEN("db")};
LEX_CSTRING MYSQL_PROC_NAME= {STRING_WITH_LEN("proc")};
/*
@@ -80,7 +92,7 @@ LEX_CSTRING MYSQL_PROC_NAME= {STRING_WITH_LEN("proc")};
*/
static LEX_CSTRING parse_vcol_keyword= { STRING_WITH_LEN("PARSE_VCOL_EXPR ") };
-static int64 last_table_id;
+static std::atomic<ulong> last_table_id;
/* Functions defined in this file */
@@ -250,6 +262,13 @@ TABLE_CATEGORY get_table_category(const LEX_CSTRING *db,
DBUG_ASSERT(db != NULL);
DBUG_ASSERT(name != NULL);
+#ifdef WITH_WSREP
+ if (my_strcasecmp(system_charset_info, db->str, "mysql") == 0 &&
+ my_strcasecmp(system_charset_info, name->str, "wsrep_streaming_log") == 0)
+ {
+ return TABLE_CATEGORY_INFORMATION;
+ }
+#endif /* WITH_WSREP */
if (is_infoschema_db(db))
return TABLE_CATEGORY_INFORMATION;
@@ -348,8 +367,8 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
*/
do
{
- share->table_map_id=(ulong) my_atomic_add64_explicit(&last_table_id, 1,
- MY_MEMORY_ORDER_RELAXED);
+ share->table_map_id=
+ last_table_id.fetch_add(1, std::memory_order_relaxed);
} while (unlikely(share->table_map_id == ~0UL ||
share->table_map_id == 0));
}
@@ -695,9 +714,9 @@ err_not_open:
static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
uint keys, KEY *keyinfo,
- uint new_frm_ver, uint &ext_key_parts,
+ uint new_frm_ver, uint *ext_key_parts,
TABLE_SHARE *share, uint len,
- KEY *first_keyinfo, char* &keynames)
+ KEY *first_keyinfo, char** keynames)
{
uint i, j, n_length;
KEY_PART_INFO *key_part= NULL;
@@ -753,8 +772,8 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
if (i == 0)
{
- ext_key_parts+= (share->use_ext_keys ? first_keyinfo->user_defined_key_parts*(keys-1) : 0);
- n_length=keys * sizeof(KEY) + ext_key_parts * sizeof(KEY_PART_INFO);
+ (*ext_key_parts)+= (share->use_ext_keys ? first_keyinfo->user_defined_key_parts*(keys-1) : 0);
+ n_length=keys * sizeof(KEY) + *ext_key_parts * sizeof(KEY_PART_INFO);
if (!(keyinfo= (KEY*) alloc_root(&share->mem_root,
n_length + len)))
return 1;
@@ -763,7 +782,7 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
key_part= reinterpret_cast<KEY_PART_INFO*> (keyinfo + keys);
if (!(rec_per_key= (ulong*) alloc_root(&share->mem_root,
- sizeof(ulong) * ext_key_parts)))
+ sizeof(ulong) * *ext_key_parts)))
return 1;
first_key_part= key_part;
first_key_parts= first_keyinfo->user_defined_key_parts;
@@ -805,6 +824,11 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
}
key_part->store_length=key_part->length;
}
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ keyinfo->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
+ key_part++; // reserved for the hash value
+ }
/*
Add primary key to end of extended keys for non unique keys for
@@ -838,10 +862,12 @@ static bool create_key_infos(const uchar *strpos, const uchar *frm_image_end,
if (j == first_key_parts)
keyinfo->ext_key_flags= keyinfo->flags | HA_EXT_NOSAME;
}
- share->ext_key_parts+= keyinfo->ext_key_parts;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ share->ext_key_parts++;
+ share->ext_key_parts+= keyinfo->ext_key_parts;
}
- keynames=(char*) key_part;
- strpos+= strnmov(keynames, (char *) strpos, frm_image_end - strpos) - keynames;
+ *keynames=(char*) key_part;
+ strpos+= strnmov(*keynames, (char *) strpos, frm_image_end - strpos) - *keynames;
if (*strpos++) // key names are \0-terminated
return 1;
@@ -922,6 +948,54 @@ static uint upgrade_collation(ulong mysql_version, uint cs_number)
}
+void Column_definition_attributes::frm_pack_basic(uchar *buff) const
+{
+ int2store(buff + 3, length);
+ int2store(buff + 8, pack_flag);
+ buff[10]= (uchar) unireg_check;
+}
+
+
+void Column_definition_attributes::frm_unpack_basic(const uchar *buff)
+{
+ length= uint2korr(buff + 3);
+ pack_flag= uint2korr(buff + 8);
+ unireg_check= (Field::utype) MTYP_TYPENR((uint) buff[10]);
+}
+
+
+void Column_definition_attributes::frm_pack_charset(uchar *buff) const
+{
+ buff[11]= (uchar) (charset->number >> 8);
+ buff[14]= (uchar) charset->number;
+}
+
+
+bool Column_definition_attributes::frm_unpack_charset(TABLE_SHARE *share,
+ const uchar *buff)
+{
+ uint cs_org= buff[14] + (((uint) buff[11]) << 8);
+ uint cs_new= upgrade_collation(share->mysql_version, cs_org);
+ if (cs_org != cs_new)
+ share->incompatible_version|= HA_CREATE_USED_CHARSET;
+ if (cs_new && !(charset= get_charset(cs_new, MYF(0))))
+ {
+ const char *csname= get_charset_name((uint) cs_new);
+ char tmp[10];
+ if (!csname || csname[0] =='?')
+ {
+ my_snprintf(tmp, sizeof(tmp), "#%u", cs_new);
+ csname= tmp;
+ }
+ my_printf_error(ER_UNKNOWN_COLLATION,
+ "Unknown collation '%s' in table '%-.64s' definition",
+ MYF(0), csname, share->table_name.str);
+ return true;
+ }
+ return false;
+}
+
+
/*
In MySQL 5.7 the null bits for not stored virtual fields are last.
Calculate the position for these bits
@@ -1102,10 +1176,57 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
pos+= expr_length;
}
- /* Now, initialize CURRENT_TIMESTAMP fields */
+ /* Now, initialize CURRENT_TIMESTAMP and UNIQUE_INDEX_HASH_FIELD fields */
for (field_ptr= table->field; *field_ptr; field_ptr++)
{
Field *field= *field_ptr;
+ if (field->flags & LONG_UNIQUE_HASH_FIELD)
+ {
+ List<Item> *field_list= new (mem_root) List<Item>();
+ Item *list_item;
+ KEY *key;
+ uint key_index, parts;
+ for (key_index= 0; key_index < table->s->keys; key_index++)
+ {
+ key=table->key_info + key_index;
+ parts= key->user_defined_key_parts;
+ if (key->key_part[parts].fieldnr == field->field_index + 1)
+ break;
+ }
+ if (key->algorithm != HA_KEY_ALG_LONG_HASH)
+ goto end;
+ KEY_PART_INFO *keypart;
+ for (uint i=0; i < parts; i++)
+ {
+ keypart= key->key_part + i;
+ if (keypart->key_part_flag & HA_PART_KEY_SEG)
+ {
+ int length= keypart->length/keypart->field->charset()->mbmaxlen;
+ list_item= new (mem_root) Item_func_left(thd,
+ new (mem_root) Item_field(thd, keypart->field),
+ new (mem_root) Item_int(thd, length));
+ list_item->fix_fields(thd, NULL);
+ keypart->field->vcol_info=
+ table->field[keypart->field->field_index]->vcol_info;
+ }
+ else
+ list_item= new (mem_root) Item_field(thd, keypart->field);
+ field_list->push_back(list_item, mem_root);
+ }
+ Item_func_hash *hash_item= new(mem_root)Item_func_hash(thd, *field_list);
+ Virtual_column_info *v= new (mem_root) Virtual_column_info();
+ field->vcol_info= v;
+ field->vcol_info->expr= hash_item;
+ key->user_defined_key_parts= key->ext_key_parts= key->usable_key_parts= 1;
+ key->key_part+= parts;
+
+ if (key->flags & HA_NULL_PART_KEY)
+ key->key_length= HA_HASH_KEY_LENGTH_WITH_NULL;
+ else
+ key->key_length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
+
+ *(vfield_ptr++)= *field_ptr;
+ }
if (field->has_default_now_unireg_check())
{
expr_str.length(parse_vcol_keyword.length);
@@ -1142,6 +1263,8 @@ bool parse_vcol_defs(THD *thd, MEM_ROOT *mem_root, TABLE *table,
goto end;
}
+ table->find_constraint_correlated_indexes();
+
res=0;
end:
thd->restore_active_arena(table->expr_arena, &backup_arena);
@@ -1152,6 +1275,255 @@ end:
DBUG_RETURN(res);
}
+
+static const Type_handler *old_frm_type_handler(uint pack_flag,
+ uint interval_nr)
+{
+ enum_field_types field_type= (enum_field_types) f_packtype(pack_flag);
+ DBUG_ASSERT(field_type < 16);
+
+ if (!f_is_alpha(pack_flag))
+ return Type_handler::get_handler_by_real_type(field_type);
+
+ if (!f_is_packed(pack_flag))
+ {
+ if (field_type == MYSQL_TYPE_DECIMAL) // 3.23 or 4.0 string
+ return &type_handler_string;
+ if (field_type == MYSQL_TYPE_VARCHAR) // Since mysql-5.0
+ return &type_handler_varchar;
+ return NULL; // Error (bad frm?)
+ }
+
+ if (f_is_blob(pack_flag))
+ return &type_handler_blob; // QQ: exact type??
+
+ if (interval_nr)
+ {
+ if (f_is_enum(pack_flag))
+ return &type_handler_enum;
+ return &type_handler_set;
+ }
+ return Type_handler::get_handler_by_real_type(field_type);
+}
+
+/* Set overlapped bitmaps for each index */
+
+void TABLE_SHARE::set_overlapped_keys()
+{
+ KEY *key1= key_info;
+ for (uint i= 0; i < keys; i++, key1++)
+ {
+ key1->overlapped.clear_all();
+ key1->overlapped.set_bit(i);
+ }
+ key1= key_info;
+ for (uint i= 0; i < keys; i++, key1++)
+ {
+ KEY *key2= key1 + 1;
+ for (uint j= i+1; j < keys; j++, key2++)
+ {
+ KEY_PART_INFO *key_part1= key1->key_part;
+ uint n1= key1->user_defined_key_parts;
+ uint n2= key2->user_defined_key_parts;
+ for (uint k= 0; k < n1; k++, key_part1++)
+ {
+ KEY_PART_INFO *key_part2= key2->key_part;
+ for (uint l= 0; l < n2; l++, key_part2++)
+ {
+ if (key_part1->fieldnr == key_part2->fieldnr)
+ {
+ key1->overlapped.set_bit(j);
+ key2->overlapped.set_bit(i);
+ goto end_checking_overlap;
+ }
+ }
+ }
+ end_checking_overlap:
+ ;
+ }
+ }
+}
+
+
+bool Item_field::check_index_dependence(void *arg)
+{
+ TABLE *table= (TABLE *)arg;
+
+ KEY *key= table->key_info;
+ for (uint j= 0; j < table->s->keys; j++, key++)
+ {
+ if (table->constraint_dependent_keys.is_set(j))
+ continue;
+
+ KEY_PART_INFO *key_part= key->key_part;
+ uint n= key->user_defined_key_parts;
+
+ for (uint k= 0; k < n; k++, key_part++)
+ {
+ if (this->field == key_part->field)
+ {
+ table->constraint_dependent_keys.set_bit(j);
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+
+/**
+ @brief
+ Find keys that occur in the same constraint on this table
+
+ @details
+ Constraints on this table are checked only.
+
+ The method goes through constraints list trying to find at
+ least two keys which parts participate in some constraint.
+ These keys are called constraint correlated.
+
+ Each key has its own key map with the information about with
+ which keys it is constraint correlated. Bit in this map is set
+ only if keys are constraint correlated.
+ This method fills each keys constraint correlated key map.
+*/
+
+void TABLE::find_constraint_correlated_indexes()
+{
+ if (s->keys == 0)
+ return;
+
+ KEY *key= key_info;
+ for (uint i= 0; i < s->keys; i++, key++)
+ {
+ key->constraint_correlated.clear_all();
+ key->constraint_correlated.set_bit(i);
+ }
+
+ if (!check_constraints)
+ return;
+
+ for (Virtual_column_info **chk= check_constraints ; *chk ; chk++)
+ {
+ constraint_dependent_keys.clear_all();
+ (*chk)->expr->walk(&Item::check_index_dependence, 0, this);
+
+ if (constraint_dependent_keys.bits_set() <= 1)
+ continue;
+
+ uint key_no= 0;
+ key_map::Iterator ki(constraint_dependent_keys);
+ while ((key_no= ki++) != key_map::Iterator::BITMAP_END)
+ key_info[key_no].constraint_correlated.merge(constraint_dependent_keys);
+ }
+}
+
+
+bool TABLE_SHARE::init_period_from_extra2(period_info_t *period,
+ const uchar *data, const uchar *end)
+{
+ if (data + 2*frm_fieldno_size > end)
+ return 1;
+ period->start_fieldno= read_frm_fieldno(data);
+ period->end_fieldno= read_frm_fieldno(data + frm_fieldno_size);
+ return period->start_fieldno >= fields || period->end_fieldno >= fields;
+}
+
+
+static size_t extra2_read_len(const uchar **extra2, const uchar *extra2_end)
+{
+ size_t length= *(*extra2)++;
+ if (length)
+ return length;
+
+ if ((*extra2) + 2 >= extra2_end)
+ return 0;
+ length= uint2korr(*extra2);
+ (*extra2)+= 2;
+ if (length < 256 || *extra2 + length > extra2_end)
+ return 0;
+ return length;
+}
+
+
+static
+bool read_extra2(const uchar *frm_image, size_t len, extra2_fields *fields)
+{
+ const uchar *extra2= frm_image + 64;
+
+ DBUG_ENTER("read_extra2");
+
+ fields->reset();
+
+ if (*extra2 != '/') // old frm had '/' there
+ {
+ const uchar *e2end= extra2 + len;
+ while (extra2 + 3 <= e2end)
+ {
+ extra2_frm_value_type type= (extra2_frm_value_type)*extra2++;
+ size_t length= extra2_read_len(&extra2, e2end);
+ if (!length)
+ DBUG_RETURN(true);
+ switch (type) {
+ case EXTRA2_TABLEDEF_VERSION:
+ if (fields->version.str) // see init_from_sql_statement_string()
+ {
+ if (length != fields->version.length)
+ DBUG_RETURN(true);
+ }
+ else
+ {
+ fields->version.str= extra2;
+ fields->version.length= length;
+ }
+ break;
+ case EXTRA2_ENGINE_TABLEOPTS:
+ if (fields->options.str)
+ DBUG_RETURN(true);
+ fields->options.str= extra2;
+ fields->options.length= length;
+ break;
+ case EXTRA2_DEFAULT_PART_ENGINE:
+ fields->engine.set((const char*)extra2, length);
+ break;
+ case EXTRA2_GIS:
+ if (fields->gis.str)
+ DBUG_RETURN(true);
+ fields->gis.str= extra2;
+ fields->gis.length= length;
+ break;
+ case EXTRA2_PERIOD_FOR_SYSTEM_TIME:
+ if (fields->system_period.str || length != 2 * frm_fieldno_size)
+ DBUG_RETURN(true);
+ fields->system_period.str = extra2;
+ fields->system_period.length= length;
+ break;
+ case EXTRA2_FIELD_FLAGS:
+ if (fields->field_flags.str)
+ DBUG_RETURN(true);
+ fields->field_flags.str= extra2;
+ fields->field_flags.length= length;
+ break;
+ case EXTRA2_APPLICATION_TIME_PERIOD:
+ if (fields->application_period.str)
+ DBUG_RETURN(true);
+ fields->application_period.str= extra2;
+ fields->application_period.length= length;
+ break;
+ default:
+ /* abort frm parsing if it's an unknown but important extra2 value */
+ if (type >= EXTRA2_ENGINE_IMPORTANT)
+ DBUG_RETURN(true);
+ }
+ extra2+= length;
+ }
+ if (extra2 != e2end)
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(false);
+}
+
+
/**
Read data from a binary .frm file image into a TABLE_SHARE
@@ -1177,10 +1549,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint interval_count, interval_parts, read_length, int_length;
uint db_create_options, keys, key_parts, n_length;
uint com_length, null_bit_pos, UNINIT_VAR(mysql57_vcol_null_bit_pos), bitmap_count;
- uint i;
+ uint i, hash_fields= 0;
bool use_hash, mysql57_null_bits= 0;
char *keynames, *names, *comment_pos;
- const uchar *forminfo, *extra2;
+ const uchar *forminfo;
const uchar *frm_image_end = frm_image + frm_length;
uchar *record, *null_flags, *null_pos, *UNINIT_VAR(mysql57_vcol_null_pos);
const uchar *disk_buff, *strpos;
@@ -1195,26 +1567,21 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
my_bitmap_map *bitmaps;
bool null_bits_are_used;
uint vcol_screen_length;
- size_t UNINIT_VAR(options_len);
uchar *vcol_screen_pos;
- const uchar *options= 0;
- size_t UNINIT_VAR(gis_options_len);
- const uchar *gis_options= 0;
+ LEX_CUSTRING options;
KEY first_keyinfo;
uint len;
uint ext_key_parts= 0;
plugin_ref se_plugin= 0;
- const uchar *system_period= 0;
bool vers_can_native= false;
- const uchar *extra2_field_flags= 0;
- size_t extra2_field_flags_length= 0;
MEM_ROOT *old_root= thd->mem_root;
Virtual_column_info **table_check_constraints;
+ extra2_fields extra2;
+
DBUG_ENTER("TABLE_SHARE::init_from_binary_frm_image");
keyinfo= &first_keyinfo;
- share->ext_key_parts= 0;
thd->mem_root= &share->mem_root;
if (write && write_frm_image(frm_image, frm_length))
@@ -1239,90 +1606,27 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
/* Length of the MariaDB extra2 segment in the form file. */
len = uint2korr(frm_image+4);
- extra2= frm_image + 64;
- if (*extra2 != '/') // old frm had '/' there
- {
- const uchar *e2end= extra2 + len;
- while (extra2 + 3 <= e2end)
- {
- uchar type= *extra2++;
- size_t length= *extra2++;
- if (!length)
- {
- if (extra2 + 2 >= e2end)
- goto err;
- length= uint2korr(extra2);
- extra2+= 2;
- if (length < 256)
- goto err;
- }
- if (extra2 + length > e2end)
- goto err;
- switch (type) {
- case EXTRA2_TABLEDEF_VERSION:
- if (tabledef_version.str) // see init_from_sql_statement_string()
- {
- if (length != tabledef_version.length ||
- memcmp(extra2, tabledef_version.str, length))
- goto err;
- }
- else
- {
- tabledef_version.length= length;
- tabledef_version.str= (uchar*)memdup_root(&mem_root, extra2, length);
- if (!tabledef_version.str)
- goto err;
- }
- break;
- case EXTRA2_ENGINE_TABLEOPTS:
- if (options)
- goto err;
- /* remember but delay parsing until we have read fields and keys */
- options= extra2;
- options_len= length;
- break;
- case EXTRA2_DEFAULT_PART_ENGINE:
+ if (read_extra2(frm_image, len, &extra2))
+ goto err;
+
+ tabledef_version.length= extra2.version.length;
+ tabledef_version.str= (uchar*)memdup_root(&mem_root, extra2.version.str,
+ extra2.version.length);
+ if (!tabledef_version.str)
+ goto err;
+
+ /* remember but delay parsing until we have read fields and keys */
+ options= extra2.options;
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
- {
- LEX_CSTRING name= { (char*)extra2, length };
- share->default_part_plugin= ha_resolve_by_name(NULL, &name, false);
- if (!share->default_part_plugin)
- goto err;
- }
-#endif
- break;
- case EXTRA2_GIS:
-#ifdef HAVE_SPATIAL
- {
- if (gis_options)
- goto err;
- gis_options= extra2;
- gis_options_len= length;
- }
-#endif /*HAVE_SPATIAL*/
- break;
- case EXTRA2_PERIOD_FOR_SYSTEM_TIME:
- if (system_period || length != 2 * sizeof(uint16))
- goto err;
- system_period = extra2;
- break;
- case EXTRA2_FIELD_FLAGS:
- if (extra2_field_flags)
- goto err;
- extra2_field_flags= extra2;
- extra2_field_flags_length= length;
- break;
- default:
- /* abort frm parsing if it's an unknown but important extra2 value */
- if (type >= EXTRA2_ENGINE_IMPORTANT)
- goto err;
- }
- extra2+= length;
- }
- if (extra2 != e2end)
+ if (extra2.engine)
+ {
+ share->default_part_plugin= ha_resolve_by_name(NULL, &extra2.engine, false);
+ if (!share->default_part_plugin)
goto err;
}
+#endif
if (frm_length < FRM_HEADER_SIZE + len ||
!(pos= uint4korr(frm_image + FRM_HEADER_SIZE + len)))
@@ -1517,8 +1821,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->set_use_ext_keys_flag(plugin_hton(se_plugin)->flags & HTON_SUPPORTS_EXTENDED_KEYS);
if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo,
- new_frm_ver, ext_key_parts,
- share, len, &first_keyinfo, keynames))
+ new_frm_ver, &ext_key_parts,
+ share, len, &first_keyinfo, &keynames))
goto err;
if (next_chunk + 5 < buff_end)
@@ -1599,23 +1903,26 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (share->db_create_options & HA_OPTION_TEXT_CREATE_OPTIONS_legacy)
{
- if (options)
+ if (options.str)
goto err;
- options_len= uint4korr(next_chunk);
- options= next_chunk + 4;
- next_chunk+= options_len + 4;
+ options.length= uint4korr(next_chunk);
+ options.str= next_chunk + 4;
+ next_chunk+= options.length + 4;
}
DBUG_ASSERT(next_chunk <= buff_end);
}
else
{
if (create_key_infos(disk_buff + 6, frm_image_end, keys, keyinfo,
- new_frm_ver, ext_key_parts,
- share, len, &first_keyinfo, keynames))
+ new_frm_ver, &ext_key_parts,
+ share, len, &first_keyinfo, &keynames))
goto err;
}
-
share->key_block_size= uint2korr(frm_image+62);
+ keyinfo= share->key_info;
+ for (uint i= 0; i < share->keys; i++, keyinfo++)
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ hash_fields++;
if (share->db_plugin && !plugin_equals(share->db_plugin, se_plugin))
goto err; // wrong engine (someone changed the frm under our feet?)
@@ -1631,7 +1938,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
disk_buff= frm_image + pos + FRM_FORMINFO_SIZE;
share->fields= uint2korr(forminfo+258);
- if (extra2_field_flags && extra2_field_flags_length != share->fields)
+ if (extra2.field_flags.str && extra2.field_flags.length != share->fields)
goto err;
pos= uint2korr(forminfo+260); /* Length of all screens */
n_length= uint2korr(forminfo+268);
@@ -1766,106 +2073,59 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
/* Set system versioning information. */
- if (system_period == NULL)
+ vers.name= Lex_ident(STRING_WITH_LEN("SYSTEM_TIME"));
+ if (extra2.system_period.str == NULL)
{
versioned= VERS_UNDEFINED;
- row_start_field= 0;
- row_end_field= 0;
+ vers.start_fieldno= 0;
+ vers.end_fieldno= 0;
}
else
{
DBUG_PRINT("info", ("Setting system versioning informations"));
- uint16 row_start= uint2korr(system_period);
- uint16 row_end= uint2korr(system_period + sizeof(uint16));
- if (row_start >= share->fields || row_end >= share->fields)
+ if (init_period_from_extra2(&vers, extra2.system_period.str,
+ extra2.system_period.str + extra2.system_period.length))
goto err;
- DBUG_PRINT("info", ("Columns with system versioning: [%d, %d]", row_start, row_end));
+ DBUG_PRINT("info", ("Columns with system versioning: [%d, %d]",
+ vers.start_fieldno, vers.end_fieldno));
versioned= VERS_TIMESTAMP;
vers_can_native= handler_file->vers_can_native(thd);
- row_start_field= row_start;
- row_end_field= row_end;
status_var_increment(thd->status_var.feature_system_versioning);
} // if (system_period == NULL)
+ if (extra2.application_period.str)
+ {
+ const uchar *pos= extra2.application_period.str;
+ const uchar *end= pos + extra2.application_period.length;
+ period.name.length= extra2_read_len(&pos, end);
+ period.name.str= strmake_root(&mem_root, (char*)pos, period.name.length);
+ pos+= period.name.length;
+
+ period.constr_name.length= extra2_read_len(&pos, end);
+ period.constr_name.str= strmake_root(&mem_root, (char*)pos,
+ period.constr_name.length);
+ pos+= period.constr_name.length;
+
+ if (init_period_from_extra2(&period, pos, end))
+ goto err;
+ status_var_increment(thd->status_var.feature_application_time_periods);
+ }
+
for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++)
{
- uint pack_flag, interval_nr, unireg_type, recpos, field_length;
- uint vcol_info_length=0;
- uint vcol_expr_length=0;
- enum_field_types field_type;
- CHARSET_INFO *charset=NULL;
- Field::geometry_type geom_type= Field::GEOM_GEOMETRY;
+ uint interval_nr= 0, recpos;
LEX_CSTRING comment;
LEX_CSTRING name;
Virtual_column_info *vcol_info= 0;
- uint gis_length, gis_decimals, srid= 0;
- Field::utype unireg_check;
const Type_handler *handler;
uint32 flags= 0;
+ Column_definition_attributes attr;
if (new_frm_ver >= 3)
{
/* new frm file in 4.1 */
- field_length= uint2korr(strpos+3);
recpos= uint3korr(strpos+5);
- pack_flag= uint2korr(strpos+8);
- unireg_type= (uint) strpos[10];
- interval_nr= (uint) strpos[12];
uint comment_length=uint2korr(strpos+15);
- field_type=(enum_field_types) (uint) strpos[13];
-
- /* charset and geometry_type share the same byte in frm */
- if (field_type == MYSQL_TYPE_GEOMETRY)
- {
-#ifdef HAVE_SPATIAL
- uint gis_opt_read;
- Field_geom::storage_type st_type;
- geom_type= (Field::geometry_type) strpos[14];
- charset= &my_charset_bin;
- gis_opt_read= gis_field_options_read(gis_options, gis_options_len,
- &st_type, &gis_length, &gis_decimals, &srid);
- gis_options+= gis_opt_read;
- gis_options_len-= gis_opt_read;
-#else
- goto err;
-#endif
- }
- else
- {
- uint cs_org= strpos[14] + (((uint) strpos[11]) << 8);
- uint cs_new= upgrade_collation(share->mysql_version, cs_org);
- if (cs_org != cs_new)
- share->incompatible_version|= HA_CREATE_USED_CHARSET;
- if (!cs_new)
- charset= &my_charset_bin;
- else if (!(charset= get_charset(cs_new, MYF(0))))
- {
- const char *csname= get_charset_name((uint) cs_new);
- char tmp[10];
- if (!csname || csname[0] =='?')
- {
- my_snprintf(tmp, sizeof(tmp), "#%u", cs_new);
- csname= tmp;
- }
- my_printf_error(ER_UNKNOWN_COLLATION,
- "Unknown collation '%s' in table '%-.64s' definition",
- MYF(0), csname, share->table_name.str);
- goto err;
- }
- }
-
- if ((uchar)field_type == (uchar)MYSQL_TYPE_VIRTUAL)
- {
- if (!interval_nr) // Expect non-null expression
- goto err;
- /*
- MariaDB version 10.0 version.
- The interval_id byte in the .frm file stores the length of the
- expression statement for a virtual column.
- */
- vcol_info_length= interval_nr;
- interval_nr= 0;
- }
if (!comment_length)
{
@@ -1879,33 +2139,21 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
comment_pos+= comment_length;
}
- if (unireg_type & MYSQL57_GENERATED_FIELD)
+ if ((uchar) strpos[13] == (uchar) MYSQL_TYPE_VIRTUAL)
{
- unireg_type&= MYSQL57_GENERATED_FIELD;
-
/*
- MySQL 5.7 generated fields
-
- byte 1 = 1
- byte 2,3 = expr length
- byte 4 = stored_in_db
- byte 5.. = expr
+ MariaDB version 10.0 version.
+ The interval_id byte in the .frm file stores the length of the
+ expression statement for a virtual column.
*/
- if ((uint)(vcol_screen_pos)[0] != 1)
- goto err;
- vcol_info= new (&share->mem_root) Virtual_column_info();
- vcol_info_length= uint2korr(vcol_screen_pos + 1);
- if (!vcol_info_length) // Expect non-empty expression
+ uint vcol_info_length= (uint) strpos[12];
+
+ if (!vcol_info_length) // Expect non-null expression
goto err;
- vcol_info->stored_in_db= vcol_screen_pos[3];
- vcol_info->utf8= 0;
- vcol_screen_pos+= vcol_info_length + MYSQL57_GCOL_HEADER_SIZE;;
- share->virtual_fields++;
- vcol_info_length= 0;
- }
- if (vcol_info_length)
- {
+ attr.frm_unpack_basic(strpos);
+ if (attr.frm_unpack_charset(share, strpos))
+ goto err;
/*
Old virtual field information before 10.2
@@ -1919,7 +2167,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
vcol_info= new (&share->mem_root) Virtual_column_info();
bool opt_interval_id= (uint)vcol_screen_pos[0] == 2;
- field_type= (enum_field_types) (uchar) vcol_screen_pos[1];
+ enum_field_types ftype= (enum_field_types) (uchar) vcol_screen_pos[1];
+ if (!(handler= Type_handler::get_handler_by_real_type(ftype)))
+ goto err;
if (opt_interval_id)
interval_nr= (uint)vcol_screen_pos[3];
else if ((uint)vcol_screen_pos[0] != 1)
@@ -1927,26 +2177,64 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
bool stored= vcol_screen_pos[2] & 1;
vcol_info->stored_in_db= stored;
vcol_info->set_vcol_type(stored ? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL);
- vcol_expr_length= vcol_info_length -
- (uint)(FRM_VCOL_OLD_HEADER_SIZE(opt_interval_id));
+ uint vcol_expr_length= vcol_info_length -
+ (uint)(FRM_VCOL_OLD_HEADER_SIZE(opt_interval_id));
vcol_info->utf8= 0; // before 10.2.1 the charset was unknown
int2store(vcol_screen_pos+1, vcol_expr_length); // for parse_vcol_defs()
vcol_screen_pos+= vcol_info_length;
share->virtual_fields++;
}
+ else
+ {
+ interval_nr= (uint) strpos[12];
+ enum_field_types field_type= (enum_field_types) strpos[13];
+ if (!(handler= Type_handler::get_handler_by_real_type(field_type)))
+ goto err; // Not supported field type
+ if (handler->Column_definition_attributes_frm_unpack(&attr, share,
+ strpos,
+ &extra2.gis))
+ goto err;
+ }
+
+ if (((uint) strpos[10]) & MYSQL57_GENERATED_FIELD)
+ {
+ attr.unireg_check= Field::NONE;
+
+ /*
+ MySQL 5.7 generated fields
+
+ byte 1 = 1
+ byte 2,3 = expr length
+ byte 4 = stored_in_db
+ byte 5.. = expr
+ */
+ if ((uint)(vcol_screen_pos)[0] != 1)
+ goto err;
+ vcol_info= new (&share->mem_root) Virtual_column_info();
+ uint vcol_info_length= uint2korr(vcol_screen_pos + 1);
+ if (!vcol_info_length) // Expect non-empty expression
+ goto err;
+ vcol_info->stored_in_db= vcol_screen_pos[3];
+ vcol_info->utf8= 0;
+ vcol_screen_pos+= vcol_info_length + MYSQL57_GCOL_HEADER_SIZE;;
+ share->virtual_fields++;
+ }
}
else
{
- field_length= (uint) strpos[3];
+ attr.length= (uint) strpos[3];
recpos= uint2korr(strpos+4),
- pack_flag= uint2korr(strpos+6);
- pack_flag&= ~FIELDFLAG_NO_DEFAULT; // Safety for old files
- unireg_type= (uint) strpos[8];
+ attr.pack_flag= uint2korr(strpos+6);
+ attr.pack_flag&= ~FIELDFLAG_NO_DEFAULT; // Safety for old files
+ attr.unireg_check= (Field::utype) MTYP_TYPENR((uint) strpos[8]);
interval_nr= (uint) strpos[10];
/* old frm file */
- field_type= (enum_field_types) f_packtype(pack_flag);
- if (f_is_binary(pack_flag))
+ enum_field_types ftype= (enum_field_types) f_packtype(attr.pack_flag);
+ if (!(handler= Type_handler::get_handler_by_real_type(ftype)))
+ goto err; // Not supported field type
+
+ if (f_is_binary(attr.pack_flag))
{
/*
Try to choose the best 4.1 type:
@@ -1954,26 +2242,26 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
try to find a binary collation for character set.
- for other types (e.g. BLOB) just use my_charset_bin.
*/
- if (!f_is_blob(pack_flag))
+ if (!f_is_blob(attr.pack_flag))
{
// 3.23 or 4.0 string
- if (!(charset= get_charset_by_csname(share->table_charset->csname,
- MY_CS_BINSORT, MYF(0))))
- charset= &my_charset_bin;
+ if (!(attr.charset= get_charset_by_csname(share->table_charset->csname,
+ MY_CS_BINSORT, MYF(0))))
+ attr.charset= &my_charset_bin;
}
- else
- charset= &my_charset_bin;
}
else
- charset= share->table_charset;
+ attr.charset= share->table_charset;
bzero((char*) &comment, sizeof(comment));
+ if ((!(handler= old_frm_type_handler(attr.pack_flag, interval_nr))))
+ goto err; // Not supported field type
}
/* Remove >32 decimals from old files */
if (share->mysql_version < 100200)
- pack_flag&= ~FIELDFLAG_LONG_DECIMAL;
+ attr.pack_flag&= ~FIELDFLAG_LONG_DECIMAL;
- if (interval_nr && charset->mbminlen > 1)
+ if (interval_nr && attr.charset->mbminlen > 1)
{
/* Unescape UCS2 intervals from HEX notation */
TYPELIB *interval= share->intervals + interval_nr - 1;
@@ -1981,17 +2269,18 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
#ifndef TO_BE_DELETED_ON_PRODUCTION
- if (field_type == MYSQL_TYPE_NEWDECIMAL && !share->mysql_version)
+ if (handler->real_field_type() == MYSQL_TYPE_NEWDECIMAL &&
+ !share->mysql_version)
{
/*
Fix pack length of old decimal values from 5.0.3 -> 5.0.4
The difference is that in the old version we stored precision
in the .frm table while we now store the display_length
*/
- uint decimals= f_decimals(pack_flag);
- field_length= my_decimal_precision_to_length(field_length,
- decimals,
- f_is_dec(pack_flag) == 0);
+ uint decimals= f_decimals(attr.pack_flag);
+ attr.length=
+ my_decimal_precision_to_length((uint) attr.length, decimals,
+ f_is_dec(attr.pack_flag) == 0);
sql_print_error("Found incompatible DECIMAL field '%s' in %s; "
"Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
share->fieldnames.type_names[i], share->table_name.str,
@@ -2015,14 +2304,14 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (versioned)
{
- if (i == row_start_field)
+ if (i == vers.start_fieldno)
flags|= VERS_SYS_START_FLAG;
- else if (i == row_end_field)
+ else if (i == vers.end_fieldno)
flags|= VERS_SYS_END_FLAG;
if (flags & VERS_SYSTEM_FIELD)
{
- switch (field_type)
+ switch (handler->real_field_type())
{
case MYSQL_TYPE_TIMESTAMP2:
case MYSQL_TYPE_DATETIME2:
@@ -2044,22 +2333,17 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
/* Convert pre-10.2.2 timestamps to use Field::default_value */
- unireg_check= (Field::utype) MTYP_TYPENR(unireg_type);
name.str= fieldnames.type_names[i];
name.length= strlen(name.str);
- if (!(handler= Type_handler::get_handler_by_real_type(field_type)))
- goto err; // Not supported field type
+ attr.interval= interval_nr ? share->intervals + interval_nr - 1 : NULL;
+ Record_addr addr(record + recpos, null_pos, null_bit_pos);
*field_ptr= reg_field=
- make_field(share, &share->mem_root, record+recpos, (uint32) field_length,
- null_pos, null_bit_pos, pack_flag, handler, charset,
- geom_type, srid, unireg_check,
- (interval_nr ? share->intervals+interval_nr-1 : NULL),
- &name, flags);
+ attr.make_field(share, &share->mem_root, &addr, handler, &name, flags);
if (!reg_field) // Not supported field type
goto err;
- if (unireg_check == Field::TIMESTAMP_DNUN_FIELD ||
- unireg_check == Field::TIMESTAMP_DN_FIELD)
+ if (attr.unireg_check == Field::TIMESTAMP_DNUN_FIELD ||
+ attr.unireg_check == Field::TIMESTAMP_DN_FIELD)
{
reg_field->default_value= new (&share->mem_root) Virtual_column_info();
reg_field->default_value->set_vcol_type(VCOL_DEFAULT);
@@ -2071,9 +2355,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
reg_field->comment=comment;
reg_field->vcol_info= vcol_info;
reg_field->flags|= flags;
- if (extra2_field_flags)
+ if (extra2.field_flags.str)
{
- uchar flags= *extra2_field_flags++;
+ uchar flags= *extra2.field_flags.str++;
if (flags & VERS_OPTIMIZED_UPDATE)
reg_field->flags|= VERS_UPDATE_UNVERSIONED_FLAG;
@@ -2083,10 +2367,11 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
status_var_increment(thd->status_var.feature_invisible_columns);
if (!reg_field->invisible)
share->visible_fields++;
- if (field_type == MYSQL_TYPE_BIT && !f_bit_as_char(pack_flag))
+ if (handler->real_field_type() == MYSQL_TYPE_BIT &&
+ !f_bit_as_char(attr.pack_flag))
{
null_bits_are_used= 1;
- if ((null_bit_pos+= field_length & 7) > 7)
+ if ((null_bit_pos+= (uint) (attr.length & 7)) > 7)
{
null_pos++;
null_bit_pos-= 8;
@@ -2109,7 +2394,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
}
- if (f_no_default(pack_flag))
+ if (f_no_default(attr.pack_flag))
reg_field->flags|= NO_DEFAULT_VALUE_FLAG;
if (reg_field->unireg_check == Field::NEXT_NUMBER)
@@ -2146,6 +2431,37 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
/* Fix key->name and key_part->field */
if (key_parts)
{
+ keyinfo= share->key_info;
+ uint hash_field_used_no= share->fields - hash_fields;
+ KEY_PART_INFO *hash_keypart;
+ Field *hash_field;
+ uint offset= share->reclength - HA_HASH_FIELD_LENGTH * hash_fields;
+ for (uint i= 0; i < share->keys; i++, keyinfo++)
+ {
+ /* We need set value in hash key_part */
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ share->long_unique_table= 1;
+ hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
+ hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
+ hash_keypart->store_length= hash_keypart->length;
+ hash_keypart->type= HA_KEYTYPE_ULONGLONG;
+ hash_keypart->key_part_flag= 0;
+ hash_keypart->key_type= 32834;
+ /* Last n fields are unique_index_hash fields*/
+ hash_keypart->offset= offset;
+ hash_keypart->fieldnr= hash_field_used_no + 1;
+ hash_field= share->field[hash_field_used_no];
+ hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
+ keyinfo->flags|= HA_NOSAME;
+ share->virtual_fields++;
+ share->stored_fields--;
+ if (record + share->stored_rec_length >= hash_field->ptr)
+ share->stored_rec_length= (ulong)(hash_field->ptr - record - 1);
+ hash_field_used_no++;
+ offset+= HA_HASH_FIELD_LENGTH;
+ }
+ }
uint add_first_key_parts= 0;
longlong ha_option= handler_file->ha_table_flags();
keyinfo= share->key_info;
@@ -2153,7 +2469,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
primary_key_name) ? MAX_KEY : 0;
KEY* key_first_info= NULL;
- if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME)
+ if (primary_key >= MAX_KEY && keyinfo->flags & HA_NOSAME &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
/*
If the UNIQUE key doesn't have NULL columns and is not a part key
@@ -2188,7 +2505,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
if (share->use_ext_keys)
- {
+ {
if (primary_key >= MAX_KEY)
{
add_first_key_parts= 0;
@@ -2240,6 +2557,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
uint length_bytes= 0, len_null_byte= 0, ext_key_length= 0;
Field *field;
+ if ((keyinfo-1)->algorithm == HA_KEY_ALG_LONG_HASH)
+ new_key_part++; // reserved for the hash value
+
/*
Do not extend the key that contains a component
defined over the beginning of a field.
@@ -2247,22 +2567,22 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
{
uint fieldnr= keyinfo->key_part[i].fieldnr;
- field= share->field[keyinfo->key_part[i].fieldnr-1];
+ field= share->field[fieldnr-1];
if (field->null_ptr)
len_null_byte= HA_KEY_NULL_LENGTH;
- if (field->type() == MYSQL_TYPE_BLOB ||
+ if ((field->type() == MYSQL_TYPE_BLOB ||
field->real_type() == MYSQL_TYPE_VARCHAR ||
- field->type() == MYSQL_TYPE_GEOMETRY)
+ field->type() == MYSQL_TYPE_GEOMETRY) &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH )
{
length_bytes= HA_KEY_BLOB_LENGTH;
}
ext_key_length+= keyinfo->key_part[i].length + len_null_byte
+ length_bytes;
- if (share->field[fieldnr-1]->key_length() !=
- keyinfo->key_part[i].length)
+ if (field->key_length() != keyinfo->key_part[i].length)
{
add_keyparts_for_this_key= 0;
break;
@@ -2319,6 +2639,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
key_part= keyinfo->key_part;
uint key_parts= share->use_ext_keys ? keyinfo->ext_key_parts :
keyinfo->user_defined_key_parts;
+ if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
+ key_parts++;
for (i=0; i < key_parts; key_part++, i++)
{
Field *field;
@@ -2332,8 +2654,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
field= key_part->field= share->field[key_part->fieldnr-1];
key_part->type= field->key_type();
+
if (field->invisible > INVISIBLE_USER && !field->vers_sys_field())
- keyinfo->flags |= HA_INVISIBLE_KEY;
+ if (keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
+ keyinfo->flags |= HA_INVISIBLE_KEY;
if (field->null_ptr)
{
key_part->null_offset=(uint) ((uchar*) field->null_ptr -
@@ -2359,13 +2683,15 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
key_part->key_part_flag|= HA_BIT_PART;
if (i == 0 && key != primary_key)
- field->flags |= (((keyinfo->flags & HA_NOSAME) &&
+ field->flags |= (((keyinfo->flags & HA_NOSAME ||
+ keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) &&
(keyinfo->user_defined_key_parts == 1)) ?
UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
if (i == 0)
field->key_start.set_bit(key);
if (field->key_length() == key_part->length &&
- !(field->flags & BLOB_FLAG))
+ !(field->flags & BLOB_FLAG) &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
if (handler_file->index_flags(key, i, 0) & HA_KEYREAD_ONLY)
{
@@ -2400,7 +2726,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (field->key_length() != key_part->length)
{
#ifndef TO_BE_DELETED_ON_PRODUCTION
- if (field->type() == MYSQL_TYPE_NEWDECIMAL)
+ if (field->type() == MYSQL_TYPE_NEWDECIMAL &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
{
/*
Fix a fatal error in decimal key handling that causes crashes
@@ -2439,7 +2766,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (!(key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART |
HA_BIT_PART)) &&
key_part->type != HA_KEYTYPE_FLOAT &&
- key_part->type != HA_KEYTYPE_DOUBLE)
+ key_part->type == HA_KEYTYPE_DOUBLE &&
+ keyinfo->algorithm != HA_KEY_ALG_LONG_HASH)
key_part->key_part_flag|= HA_CAN_MEMCMP;
}
keyinfo->usable_key_parts= usable_parts; // Filesort
@@ -2487,6 +2815,8 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
null_length, 255);
}
+ set_overlapped_keys();
+
/* Handle virtual expressions */
if (vcol_screen_length && share->frm_version >= FRM_VER_EXPRESSSIONS)
{
@@ -2579,10 +2909,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
(uint) (share->table_check_constraints -
share->field_check_constraints));
- if (options)
+ if (options.str)
{
- DBUG_ASSERT(options_len);
- if (engine_table_options_frm_read(options, options_len, share))
+ DBUG_ASSERT(options.length);
+ if (engine_table_options_frm_read(options.str, options.length, share))
goto err;
}
if (parse_engine_table_options(thd, handler_file->partition_ht(), share))
@@ -2701,7 +3031,7 @@ static bool sql_unusable_for_discovery(THD *thd, handlerton *engine,
if (lex->create_info.like())
return 1;
// ... create select
- if (lex->select_lex.item_list.elements)
+ if (lex->first_select_lex()->item_list.elements)
return 1;
// ... temporary
if (create_info->tmp_table())
@@ -2789,7 +3119,7 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
thd->lex->create_info.tabledef_version= tabledef_version;
promote_first_timestamp_column(&thd->lex->alter_info.create_list);
- file= mysql_create_frm_image(thd, &db, &table_name,
+ file= mysql_create_frm_image(thd, db, table_name,
&thd->lex->create_info, &thd->lex->alter_info,
C_ORDINARY_CREATE, &unused1, &unused2, &frm);
error|= file == 0;
@@ -2890,7 +3220,7 @@ bool fix_session_vcol_expr(THD *thd, Virtual_column_info *vcol)
DBUG_RETURN(0);
vcol->expr->walk(&Item::cleanup_excluding_fields_processor, 0, 0);
- DBUG_ASSERT(!vcol->expr->fixed);
+ DBUG_ASSERT(!vcol->expr->is_fixed());
DBUG_RETURN(fix_vcol_expr(thd, vcol));
}
@@ -2945,7 +3275,7 @@ static bool fix_and_check_vcol_expr(THD *thd, TABLE *table,
DBUG_PRINT("info", ("vcol: %p", vcol));
DBUG_ASSERT(func_expr);
- if (func_expr->fixed)
+ if (func_expr->is_fixed())
DBUG_RETURN(0); // nothing to do
if (fix_vcol_expr(thd, vcol))
@@ -2989,7 +3319,7 @@ static bool fix_and_check_vcol_expr(THD *thd, TABLE *table,
of the statement because the field item does not have a field
pointer at that time
*/
- myf warn= table->s->frm_version < FRM_VER_EXPRESSSIONS ? ME_JUST_WARNING : 0;
+ myf warn= table->s->frm_version < FRM_VER_EXPRESSSIONS ? ME_WARNING : 0;
my_error(ER_VIRTUAL_COLUMN_FUNCTION_IS_NOT_ALLOWED, MYF(warn),
"AUTO_INCREMENT", vcol->get_vcol_type_name(), res.name);
if (!warn)
@@ -3105,6 +3435,86 @@ static bool check_vcol_forward_refs(Field *field, Virtual_column_info *vcol,
return res;
}
+#ifndef DBUG_OFF
+static void print_long_unique_table(TABLE *table)
+{
+ char buff[256];
+ String str;
+ KEY *key_info_table, *key_info_share;
+ KEY_PART_INFO *key_part;
+ Field *field;
+ my_snprintf(buff, sizeof(buff), "Printing Table state, It will print table fields,"
+ " fields->offset,field->null_bit, field->null_pos and key_info ... \n"
+ "\nPrinting Table keyinfo\n");
+ str.append(buff, strlen(buff));
+ my_snprintf(buff, sizeof(buff), "\ntable->s->reclength %d\n"
+ "table->s->fields %d\n",
+ table->s->reclength, table->s->fields);
+ str.append(buff, strlen(buff));
+ for (uint i= 0; i < table->s->keys; i++)
+ {
+ key_info_table= table->key_info + i;
+ key_info_share= table->s->key_info + i;
+ my_snprintf(buff, sizeof(buff), "\ntable->key_info[%d] user_defined_key_parts = %d\n"
+ "table->key_info[%d] algorithm == HA_KEY_ALG_LONG_HASH = %d\n"
+ "table->key_info[%d] flags & HA_NOSAME = %d\n",
+ i, key_info_table->user_defined_key_parts,
+ i, key_info_table->algorithm == HA_KEY_ALG_LONG_HASH,
+ i, key_info_table->flags & HA_NOSAME);
+ str.append(buff, strlen(buff));
+ my_snprintf(buff, sizeof(buff), "\ntable->s->key_info[%d] user_defined_key_parts = %d\n"
+ "table->s->key_info[%d] algorithm == HA_KEY_ALG_LONG_HASH = %d\n"
+ "table->s->key_info[%d] flags & HA_NOSAME = %d\n",
+ i, key_info_share->user_defined_key_parts,
+ i, key_info_share->algorithm == HA_KEY_ALG_LONG_HASH,
+ i, key_info_share->flags & HA_NOSAME);
+ str.append(buff, strlen(buff));
+ key_part = key_info_table->key_part;
+ my_snprintf(buff, sizeof(buff), "\nPrinting table->key_info[%d].key_part[0] info\n"
+ "key_part->offset = %d\n"
+ "key_part->field_name = %s\n"
+ "key_part->length = %d\n"
+ "key_part->null_bit = %d\n"
+ "key_part->null_offset = %d\n",
+ i, key_part->offset, key_part->field->field_name.str, key_part->length,
+ key_part->null_bit, key_part->null_offset);
+ str.append(buff, strlen(buff));
+
+ for (uint j= 0; j < key_info_share->user_defined_key_parts; j++)
+ {
+ key_part= key_info_share->key_part + j;
+ my_snprintf(buff, sizeof(buff), "\nPrinting share->key_info[%d].key_part[%d] info\n"
+ "key_part->offset = %d\n"
+ "key_part->field_name = %s\n"
+ "key_part->length = %d\n"
+ "key_part->null_bit = %d\n"
+ "key_part->null_offset = %d\n",
+ i,j,key_part->offset, key_part->field->field_name.str, key_part->length,
+ key_part->null_bit, key_part->null_offset);
+ str.append(buff, strlen(buff));
+ }
+ }
+ my_snprintf(buff, sizeof(buff), "\nPrinting table->fields\n");
+ str.append(buff, strlen(buff));
+ for(uint i= 0; i < table->s->fields; i++)
+ {
+ field= table->field[i];
+ my_snprintf(buff, sizeof(buff), "\ntable->field[%d]->field_name %s\n"
+ "table->field[%d]->offset = %d\n"
+ "table->field[%d]->field_length = %d\n"
+ "table->field[%d]->null_pos wrt to record 0 = %d\n"
+ "table->field[%d]->null_bit_pos = %d\n",
+ i, field->field_name.str,
+ i, field->ptr- table->record[0],
+ i, field->pack_length(),
+ i, field->null_bit ? field->null_ptr - table->record[0] : -1,
+ i, field->null_bit);
+ str.append(buff, strlen(buff));
+ }
+ (*error_handler_hook)(1, str.ptr(), ME_NOTE);
+}
+#endif
+
/*
Open a table based on a TABLE_SHARE
@@ -3215,7 +3625,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
if (prgflag & (READ_ALL + EXTRA_RECORD))
{
records++;
- if (share->versioned)
+ if (share->versioned || share->period.name)
records++;
}
@@ -3298,10 +3708,14 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
key_info->user_defined_key_parts) ;
+ if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
+ {
+ key_part_end++;
+ key_info->flags&= ~HA_NOSAME;
+ }
for ( ; key_part < key_part_end; key_part++)
{
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
-
if (field->key_length() != key_part->length &&
!(field->flags & BLOB_FLAG))
{
@@ -3454,17 +3868,6 @@ partititon_err:
(my_bitmap_map*) bitmaps, share->fields, FALSE);
bitmaps+= bitmap_size;
- /* Don't allocate vcol_bitmap if we don't need it */
- if (share->virtual_fields)
- {
- if (!(outparam->def_vcol_set= (MY_BITMAP*)
- alloc_root(&outparam->mem_root, sizeof(*outparam->def_vcol_set))))
- goto err;
- my_bitmap_init(outparam->def_vcol_set,
- (my_bitmap_map*) bitmaps, share->fields, FALSE);
- bitmaps+= bitmap_size;
- }
-
my_bitmap_init(&outparam->has_value_set,
(my_bitmap_map*) bitmaps, share->fields, FALSE);
bitmaps+= bitmap_size;
@@ -3545,6 +3948,8 @@ partititon_err:
share->no_replicate= TRUE;
if (outparam->file->table_cache_type() & HA_CACHE_TBL_NOCACHE)
share->not_usable_by_query_cache= TRUE;
+ if (outparam->file->ha_table_flags() & HA_CAN_ONLINE_BACKUPS)
+ share->online_backup= 1;
}
if (share->no_replicate || !binlog_filter->db_ok(share->db.str))
@@ -3555,6 +3960,8 @@ partititon_err:
thd->status_var.opened_tables++;
thd->lex->context_analysis_only= save_context_analysis_only;
+ DBUG_EXECUTE_IF("print_long_unique_internal_state",
+ print_long_unique_table(outparam););
DBUG_RETURN (OPEN_FRM_OK);
err:
@@ -3667,7 +4074,7 @@ void open_table_error(TABLE_SHARE *share, enum open_frm_error error,
int db_errno)
{
char buff[FN_REFLEN];
- const myf errortype= ME_ERROR+ME_WAITTANG; // Write fatals error to log
+ const myf errortype= ME_ERROR_LOG; // Write fatals error to log
DBUG_ENTER("open_table_error");
DBUG_PRINT("info", ("error: %d db_errno: %d", error, db_errno));
@@ -3850,6 +4257,8 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
{
size_t key_comment_total_bytes= 0;
uint i;
+ uchar frm_format= create_info->expression_length ? FRM_VER_EXPRESSSIONS
+ : FRM_VER_TRUE_VARCHAR;
DBUG_ENTER("prepare_frm_header");
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
@@ -3858,17 +4267,6 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
if (create_info->min_rows > UINT_MAX32)
create_info->min_rows= UINT_MAX32;
- size_t key_length, tmp_key_length, tmp, csid;
- bzero((char*) fileinfo, FRM_HEADER_SIZE);
- /* header */
- fileinfo[0]=(uchar) 254;
- fileinfo[1]= 1;
- fileinfo[2]= (create_info->expression_length == 0 ? FRM_VER_TRUE_VARCHAR :
- FRM_VER_EXPRESSSIONS);
-
- DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
- fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
-
/*
Keep in sync with pack_keys() in unireg.cc
For each key:
@@ -3887,8 +4285,20 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
(key_info[i].comment.length > 0));
if (key_info[i].flags & HA_USES_COMMENT)
key_comment_total_bytes += 2 + key_info[i].comment.length;
+ if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
+ frm_format= FRM_VER_EXPRESSSIONS;
}
+ size_t key_length, tmp_key_length, tmp, csid;
+ bzero((char*) fileinfo, FRM_HEADER_SIZE);
+ /* header */
+ fileinfo[0]=(uchar) 254;
+ fileinfo[1]= 1;
+ fileinfo[2]= frm_format;
+
+ DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
+ fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
+
key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
+ key_comment_total_bytes;
@@ -4034,7 +4444,7 @@ uint calculate_key_len(TABLE *table, uint key, const uchar *buf,
/* works only with key prefixes */
DBUG_ASSERT(((keypart_map + 1) & keypart_map) == 0);
- KEY *key_info= table->s->key_info+key;
+ KEY *key_info= table->key_info+key;
KEY_PART_INFO *key_part= key_info->key_part;
KEY_PART_INFO *end_key_part= key_part + table->actual_n_key_parts(key_info);
uint length= 0;
@@ -4641,6 +5051,11 @@ void TABLE::init(THD *thd, TABLE_LIST *tl)
created= TRUE;
cond_selectivity= 1.0;
cond_selectivity_sampling_explain= NULL;
+ range_rowid_filter_cost_info_elems= 0;
+ range_rowid_filter_cost_info_ptr= NULL;
+ range_rowid_filter_cost_info= NULL;
+ update_handler= NULL;
+ check_unique_buf= NULL;
#ifdef HAVE_REPLICATION
/* used in RBR Triggers */
master_had_triggers= 0;
@@ -4924,7 +5339,7 @@ bool TABLE_LIST::prep_where(THD *thd, Item **conds,
if (where)
{
- if (where->fixed)
+ if (where->is_fixed())
where->update_used_tables();
else if (where->fix_fields(thd, &where))
DBUG_RETURN(TRUE);
@@ -4984,13 +5399,13 @@ bool TABLE_LIST::single_table_updatable()
{
if (!updatable)
return false;
- if (view && view->select_lex.table_list.elements == 1)
+ if (view && view->first_select_lex()->table_list.elements == 1)
{
/*
We need to check deeply only single table views. Multi-table views
will be turned to multi-table updates and then checked by leaf tables
*/
- return (((TABLE_LIST *)view->select_lex.table_list.first)->
+ return (((TABLE_LIST *)view->first_select_lex()->table_list.first)->
single_table_updatable());
}
return true;
@@ -5027,7 +5442,8 @@ merge_on_conds(THD *thd, TABLE_LIST *table, bool is_cascaded)
cond= table->on_expr->copy_andor_structure(thd);
if (!table->view)
DBUG_RETURN(cond);
- for (TABLE_LIST *tbl= (TABLE_LIST*)table->view->select_lex.table_list.first;
+ for (TABLE_LIST *tbl=
+ (TABLE_LIST*)table->view->first_select_lex()->table_list.first;
tbl;
tbl= tbl->next_local)
{
@@ -5069,7 +5485,7 @@ bool TABLE_LIST::prep_check_option(THD *thd, uint8 check_opt_type)
{
DBUG_ENTER("TABLE_LIST::prep_check_option");
bool is_cascaded= check_opt_type == VIEW_CHECK_CASCADED;
- TABLE_LIST *merge_underlying_list= view->select_lex.get_table_list();
+ TABLE_LIST *merge_underlying_list= view->first_select_lex()->get_table_list();
for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local)
{
/* see comment of check_opt_type parameter */
@@ -5187,7 +5603,7 @@ TABLE_LIST *TABLE_LIST::find_underlying_table(TABLE *table_to_find)
if (!view)
return 0;
- for (TABLE_LIST *tbl= view->select_lex.get_table_list();
+ for (TABLE_LIST *tbl= view->first_select_lex()->get_table_list();
tbl;
tbl= tbl->next_local)
{
@@ -5249,7 +5665,7 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
main_view->db.str);
const char *name_table= (main_view->view ? main_view->view_name.str :
main_view->table_name.str);
- my_error(ER_VIEW_CHECK_FAILED, MYF(ignore_failure ? ME_JUST_WARNING : 0),
+ my_error(ER_VIEW_CHECK_FAILED, MYF(ignore_failure ? ME_WARNING : 0),
name_db, name_table);
return ignore_failure ? VIEW_CHECK_SKIP : VIEW_CHECK_ERROR;
}
@@ -5293,7 +5709,7 @@ int TABLE::verify_constraints(bool ignore_failure)
}
field_error.append((*chk)->name.str);
my_error(ER_CONSTRAINT_FAILED,
- MYF(ignore_failure ? ME_JUST_WARNING : 0), field_error.c_ptr(),
+ MYF(ignore_failure ? ME_WARNING : 0), field_error.c_ptr(),
s->db.str, s->table_name.str);
return ignore_failure ? VIEW_CHECK_SKIP : VIEW_CHECK_ERROR;
}
@@ -5385,7 +5801,8 @@ bool TABLE_LIST::set_insert_values(MEM_ROOT *mem_root)
{
DBUG_PRINT("info", ("setting insert_value for view"));
DBUG_ASSERT(is_view_or_derived() && is_merged_derived());
- for (TABLE_LIST *tbl= (TABLE_LIST*)view->select_lex.table_list.first;
+ for (TABLE_LIST *tbl=
+ (TABLE_LIST*)view->first_select_lex()->table_list.first;
tbl;
tbl= tbl->next_local)
if (tbl->set_insert_values(mem_root))
@@ -5552,7 +5969,7 @@ void TABLE_LIST::register_want_access(ulong want_access)
}
if (!view)
return;
- for (TABLE_LIST *tbl= view->select_lex.get_table_list();
+ for (TABLE_LIST *tbl= view->first_select_lex()->get_table_list();
tbl;
tbl= tbl->next_local)
tbl->register_want_access(want_access);
@@ -5686,6 +6103,7 @@ bool TABLE_LIST::prepare_security(THD *thd)
if (prepare_view_security_context(thd))
DBUG_RETURN(TRUE);
thd->security_ctx= find_view_security_context(thd);
+ opt_trace_disable_if_no_security_context_access(thd);
while ((tbl= tb++))
{
DBUG_ASSERT(tbl->referencing_view);
@@ -5744,6 +6162,7 @@ void TABLE_LIST::set_check_materialized()
The subtree should be already excluded
*/
DBUG_ASSERT(!derived->first_select()->first_inner_unit() ||
+ derived->first_select()->first_inner_unit()->with_element ||
derived->first_select()->first_inner_unit()->first_select()->
exclude_from_table_unique_test);
}
@@ -5760,14 +6179,14 @@ TABLE *TABLE_LIST::get_real_join_table()
break;
/* we do not support merging of union yet */
DBUG_ASSERT(tbl->view == NULL ||
- tbl->view->select_lex.next_select() == NULL);
+ tbl->view->first_select_lex()->next_select() == NULL);
DBUG_ASSERT(tbl->derived == NULL ||
tbl->derived->first_select()->next_select() == NULL);
{
List_iterator_fast<TABLE_LIST>
ti(tbl->view != NULL ?
- tbl->view->select_lex.top_join_list :
+ tbl->view->first_select_lex()->top_join_list :
tbl->derived->first_select()->top_join_list);
for (;;)
{
@@ -5938,7 +6357,7 @@ Item *Field_iterator_view::create_item(THD *thd)
Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
LEX_CSTRING *name)
{
- bool save_wrapper= thd->lex->select_lex.no_wrap_view_item;
+ bool save_wrapper= thd->lex->first_select_lex()->no_wrap_view_item;
Item *field= *field_ref;
DBUG_ENTER("create_view_field");
@@ -5949,13 +6368,13 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
('mysql_schema_table' function). So we can return directly the
field. This case happens only for 'show & where' commands.
*/
- DBUG_ASSERT(field && field->fixed);
+ DBUG_ASSERT(field && field->is_fixed());
DBUG_RETURN(field);
}
DBUG_ASSERT(field);
thd->lex->current_select->no_wrap_view_item= TRUE;
- if (!field->fixed)
+ if (!field->is_fixed())
{
if (field->fix_fields(thd, field_ref))
{
@@ -5969,8 +6388,9 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
{
DBUG_RETURN(field);
}
- Name_resolution_context *context= view->view ? &view->view->select_lex.context :
- &thd->lex->select_lex.context;
+ Name_resolution_context *context= (view->view ?
+ &view->view->first_select_lex()->context:
+ &thd->lex->first_select_lex()->context);
Item *item= (new (thd->mem_root)
Item_direct_view_ref(thd, context, field_ref, view->alias.str,
name, view));
@@ -6293,13 +6713,12 @@ void TABLE::clear_column_bitmaps()
Reset column read/write usage. It's identical to:
bitmap_clear_all(&table->def_read_set);
bitmap_clear_all(&table->def_write_set);
- if (s->virtual_fields) bitmap_clear_all(table->def_vcol_set);
The code assumes that the bitmaps are allocated after each other, as
guaranteed by open_table_from_share()
*/
bzero((char*) def_read_set.bitmap,
s->column_bitmap_size * (s->virtual_fields ? 3 : 2));
- column_bitmaps_set(&def_read_set, &def_write_set, def_vcol_set);
+ column_bitmaps_set(&def_read_set, &def_write_set);
rpl_write_set= 0; // Safety
}
@@ -6446,13 +6865,8 @@ void TABLE::mark_columns_needed_for_delete()
Field **reg_field;
for (reg_field= field ; *reg_field ; reg_field++)
{
- Field *cur_field= *reg_field;
- if (cur_field->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG))
- {
- bitmap_set_bit(read_set, cur_field->field_index);
- if (cur_field->vcol_info)
- bitmap_set_bit(vcol_set, cur_field->field_index);
- }
+ if ((*reg_field)->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG))
+ mark_column_with_deps(*reg_field);
}
need_signal= true;
}
@@ -6477,9 +6891,9 @@ void TABLE::mark_columns_needed_for_delete()
if (s->versioned)
{
- bitmap_set_bit(read_set, s->vers_start_field()->field_index);
- bitmap_set_bit(read_set, s->vers_end_field()->field_index);
- bitmap_set_bit(write_set, s->vers_end_field()->field_index);
+ bitmap_set_bit(read_set, s->vers.start_fieldno);
+ bitmap_set_bit(read_set, s->vers.end_fieldno);
+ bitmap_set_bit(write_set, s->vers.end_fieldno);
}
}
@@ -6531,13 +6945,7 @@ void TABLE::mark_columns_needed_for_update()
if (any_written && !all_read)
{
for (KEY_PART_INFO *kp= k->key_part; kp < kpend; kp++)
- {
- int idx= kp->fieldnr - 1;
- if (bitmap_fast_test_and_set(read_set, idx))
- continue;
- if (field[idx]->vcol_info)
- mark_virtual_col(field[idx]);
- }
+ mark_column_with_deps(field[kp->fieldnr - 1]);
}
}
need_signal= true;
@@ -6734,49 +7142,12 @@ void TABLE::mark_columns_per_binlog_row_image()
DBUG_ASSERT(FALSE);
}
}
- /*
- We have to ensure that all virtual columns that are part of read set
- are calculated.
- */
- if (vcol_set)
- bitmap_union(vcol_set, read_set);
file->column_bitmaps_signal();
}
DBUG_VOID_RETURN;
}
-/*
- @brief Mark a column as virtual used by the query
-
- @param field the field for the column to be marked
-
- @details
- The function marks the column for 'field' as virtual (computed)
- in the bitmap vcol_set.
- If the column is marked for the first time the expression to compute
- the column is traversed and all columns that are occurred there are
- marked in the read_set of the table.
-
- @retval
- TRUE if column is marked for the first time
- @retval
- FALSE otherwise
-*/
-
-bool TABLE::mark_virtual_col(Field *field)
-{
- bool res;
- DBUG_ASSERT(field->vcol_info);
- if (!(res= bitmap_fast_test_and_set(vcol_set, field->field_index)))
- {
- Item *vcol_item= field->vcol_info->expr;
- DBUG_ASSERT(vcol_item);
- vcol_item->walk(&Item::register_field_in_read_map, 1, 0);
- }
- return res;
-}
-
/*
@brief Mark virtual columns for update/insert commands
@@ -6818,13 +7189,13 @@ bool TABLE::mark_virtual_columns_for_write(bool insert_fl
{
tmp_vfield= *vfield_ptr;
if (bitmap_is_set(write_set, tmp_vfield->field_index))
- bitmap_updated|= mark_virtual_col(tmp_vfield);
+ bitmap_updated|= mark_virtual_column_with_deps(tmp_vfield);
else if (tmp_vfield->vcol_info->stored_in_db ||
(tmp_vfield->flags & (PART_KEY_FLAG | FIELD_IN_PART_FUNC_FLAG |
PART_INDIRECT_KEY_FLAG)))
{
bitmap_set_bit(write_set, tmp_vfield->field_index);
- mark_virtual_col(tmp_vfield);
+ mark_virtual_column_with_deps(tmp_vfield);
bitmap_updated= true;
}
}
@@ -6955,8 +7326,6 @@ void TABLE::mark_columns_used_by_virtual_fields(void)
void TABLE::mark_check_constraint_columns_for_read(void)
{
bitmap_union(read_set, s->check_set);
- if (vcol_set)
- bitmap_union(vcol_set, s->check_set);
}
@@ -7730,6 +8099,20 @@ public:
}
};
+
+/*
+ to satisfy marked_for_write_or_computed() Field's assert we temporarily
+ mark field for write before storing the generated value in it
+*/
+#ifdef DBUG_ASSERT_EXISTS
+#define DBUG_FIX_WRITE_SET(f) bool _write_set_fixed= !bitmap_fast_test_and_set(write_set, (f)->field_index)
+#define DBUG_RESTORE_WRITE_SET(f) if (_write_set_fixed) bitmap_clear_bit(write_set, (f)->field_index)
+#else
+#define DBUG_FIX_WRITE_SET(f)
+#define DBUG_RESTORE_WRITE_SET(f)
+#endif
+
+
/*
@brief Compute values for virtual columns used in query
@@ -7793,17 +8176,17 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode)
switch (update_mode) {
case VCOL_UPDATE_FOR_READ:
update= (!vcol_info->stored_in_db &&
- bitmap_is_set(vcol_set, vf->field_index));
+ bitmap_is_set(read_set, vf->field_index));
swap_values= 1;
break;
case VCOL_UPDATE_FOR_DELETE:
case VCOL_UPDATE_FOR_WRITE:
- update= bitmap_is_set(vcol_set, vf->field_index);
+ update= bitmap_is_set(read_set, vf->field_index);
break;
case VCOL_UPDATE_FOR_REPLACE:
update= ((!vcol_info->stored_in_db &&
(vf->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG)) &&
- bitmap_is_set(vcol_set, vf->field_index)) ||
+ bitmap_is_set(read_set, vf->field_index)) ||
update_all_columns);
if (update && (vf->flags & BLOB_FLAG))
{
@@ -7823,7 +8206,7 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode)
/* Read indexed fields that was not updated in VCOL_UPDATE_FOR_READ */
update= (!vcol_info->stored_in_db &&
(vf->flags & (PART_KEY_FLAG | PART_INDIRECT_KEY_FLAG)) &&
- !bitmap_is_set(vcol_set, vf->field_index));
+ !bitmap_is_set(read_set, vf->field_index));
swap_values= 1;
break;
}
@@ -7832,8 +8215,10 @@ int TABLE::update_virtual_fields(handler *h, enum_vcol_update_mode update_mode)
{
int field_error __attribute__((unused)) = 0;
/* Compute the actual value of the virtual fields */
+ DBUG_FIX_WRITE_SET(vf);
if (vcol_info->expr->save_in_field(vf, 0))
field_error= error= 1;
+ DBUG_RESTORE_WRITE_SET(vf);
DBUG_PRINT("info", ("field '%s' - updated error: %d",
vf->field_name.str, field_error));
if (swap_values && (vf->flags & BLOB_FLAG))
@@ -7867,7 +8252,9 @@ int TABLE::update_virtual_field(Field *vf)
in_use->set_n_backup_active_arena(expr_arena, &backup_arena);
bitmap_clear_all(&tmp_set);
vf->vcol_info->expr->walk(&Item::update_vcol_processor, 0, &tmp_set);
+ DBUG_FIX_WRITE_SET(vf);
vf->vcol_info->expr->save_in_field(vf, 0);
+ DBUG_RESTORE_WRITE_SET(vf);
in_use->restore_active_arena(expr_arena, &backup_arena);
DBUG_RETURN(in_use->is_error());
}
@@ -7933,6 +8320,78 @@ int TABLE::update_default_fields(bool update_command, bool ignore_errors)
DBUG_RETURN(res);
}
+int TABLE::update_generated_fields()
+{
+ int res= 0;
+ if (found_next_number_field)
+ {
+ next_number_field= found_next_number_field;
+ res= found_next_number_field->set_default();
+ if (likely(!res))
+ res= file->update_auto_increment();
+ }
+
+ if (likely(!res) && vfield)
+ res= update_virtual_fields(file, VCOL_UPDATE_FOR_WRITE);
+ if (likely(!res) && versioned())
+ vers_update_fields();
+ if (likely(!res))
+ res= verify_constraints(false) == VIEW_CHECK_ERROR;
+ return res;
+}
+
+int TABLE::period_make_insert(Item *src, Field *dst)
+{
+ THD *thd= in_use;
+
+ store_record(this, record[1]);
+ int res= src->save_in_field(dst, true);
+
+ if (likely(!res))
+ res= update_generated_fields();
+
+ if (likely(!res) && triggers)
+ res= triggers->process_triggers(thd, TRG_EVENT_INSERT,
+ TRG_ACTION_BEFORE, true);
+
+ if (likely(!res))
+ res = file->ha_write_row(record[0]);
+
+ if (likely(!res) && triggers)
+ res= triggers->process_triggers(thd, TRG_EVENT_INSERT,
+ TRG_ACTION_AFTER, true);
+
+ restore_record(this, record[1]);
+ return res;
+}
+
+int TABLE::insert_portion_of_time(THD *thd,
+ const vers_select_conds_t &period_conds,
+ ha_rows *rows_inserted)
+{
+ bool lcond= period_conds.field_start->val_datetime_packed(thd)
+ < period_conds.start.item->val_datetime_packed(thd);
+ bool rcond= period_conds.field_end->val_datetime_packed(thd)
+ > period_conds.end.item->val_datetime_packed(thd);
+
+ int res= 0;
+ if (lcond)
+ {
+ res= period_make_insert(period_conds.start.item,
+ field[s->period.end_fieldno]);
+ if (likely(!res))
+ ++*rows_inserted;
+ }
+ if (likely(!res) && rcond)
+ {
+ res= period_make_insert(period_conds.end.item,
+ field[s->period.start_fieldno]);
+ if (likely(!res))
+ ++*rows_inserted;
+ }
+
+ return res;
+}
void TABLE::vers_update_fields()
{
@@ -8531,199 +8990,72 @@ double KEY::actual_rec_per_key(uint i)
read_stats->get_avg_frequency(i) : (double) rec_per_key[i]);
}
+/*
+ find total number of field in hash expr
+*/
+int fields_in_hash_keyinfo(KEY *keyinfo)
+{
+ Item_func_hash * temp= (Item_func_hash *)
+ keyinfo->key_part->field->vcol_info->expr;
+ return temp->argument_count();
+}
+/*
+ setup_keyinfo_hash changes the key_info->key_part
+ to be same as defined by user
+ */
+void setup_keyinfo_hash(KEY *key_info)
+{
+ DBUG_ASSERT(key_info->algorithm == HA_KEY_ALG_LONG_HASH);
+ DBUG_ASSERT(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD);
+ uint no_of_keyparts= fields_in_hash_keyinfo(key_info);
+ key_info->key_part-= no_of_keyparts;
+ key_info->user_defined_key_parts= key_info->usable_key_parts=
+ key_info->ext_key_parts= no_of_keyparts;
+ key_info->flags|= HA_NOSAME;
+}
+/*
+ re_setup_keyinfo_hash reverts th setup_keyinfo_hash and this type of
+ arrangement is expected by storage engine
+ */
+void re_setup_keyinfo_hash(KEY *key_info)
+{
+ DBUG_ASSERT(key_info->algorithm == HA_KEY_ALG_LONG_HASH);
+ DBUG_ASSERT(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD));
+ while(!(key_info->key_part->field->flags & LONG_UNIQUE_HASH_FIELD))
+ key_info->key_part++;
+ key_info->user_defined_key_parts= key_info->usable_key_parts=
+ key_info->ext_key_parts= 1;
+ key_info->flags&= ~HA_NOSAME;
+}
/**
- @brief
- Mark subformulas of a condition unusable for the condition pushed into table
-
- @param cond The condition whose subformulas are to be marked
-
- @details
- This method recursively traverses the AND-OR condition cond and for each subformula
- of the codition it checks whether it can be usable for the extraction of a condition
- that can be pushed into this table. The subformulas that are not usable are
- marked with the flag NO_EXTRACTION_FL.
- @note
- This method is called before any call of TABLE_LIST::build_pushable_cond_for_table.
- The flag NO_EXTRACTION_FL set in a subformula allows to avoid building clone
- for the subformula when extracting the pushable condition.
-*/
-
-void TABLE_LIST::check_pushable_cond_for_table(Item *cond)
+ @brief clone of current handler.
+ Creates a clone of handler used in update for
+ unique hash key.
+*/
+void TABLE::clone_handler_for_update()
{
- table_map tab_map= table->map;
- cond->clear_extraction_flag();
- if (cond->type() == Item::COND_ITEM)
- {
- bool and_cond= ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC;
- List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
- uint count= 0;
- Item *item;
- while ((item=li++))
- {
- check_pushable_cond_for_table(item);
- if (item->get_extraction_flag() != NO_EXTRACTION_FL)
- count++;
- else if (!and_cond)
- break;
- }
- if ((and_cond && count == 0) || item)
- {
- cond->set_extraction_flag(NO_EXTRACTION_FL);
- if (and_cond)
- li.rewind();
- while ((item= li++))
- item->clear_extraction_flag();
- }
- }
- else if (!cond->excl_dep_on_table(tab_map))
- cond->set_extraction_flag(NO_EXTRACTION_FL);
+ if (this->update_handler)
+ return;
+ handler *update_handler= NULL;
+ if (!s->long_unique_table)
+ return;
+ update_handler= file->clone(s->normalized_path.str,
+ in_use->mem_root);
+ update_handler->ha_external_lock(in_use, F_RDLCK);
+ this->update_handler= update_handler;
+ return;
}
-
/**
- @brief
- Build condition extractable from the given one depended only on this table
-
- @param thd The thread handle
- @param cond The condition from which the pushable one is to be extracted
-
- @details
- For the given condition cond this method finds out what condition depended
- only on this table can be extracted from cond. If such condition C exists
- the method builds the item for it.
- The method uses the flag NO_EXTRACTION_FL set by the preliminary call of
- the method TABLE_LIST::check_pushable_cond_for_table to figure out whether
- a subformula depends only on this table or not.
- @note
- The built condition C is always implied by the condition cond
- (cond => C). The method tries to build the most restictive such
- condition (i.e. for any other condition C' such that cond => C'
- we have C => C').
- @note
- The build item is not ready for usage: substitution for the field items
- has to be done and it has to be re-fixed.
-
- @retval
- the built condition pushable into this table if such a condition exists
- NULL if there is no such a condition
-*/
-
-Item* TABLE_LIST::build_pushable_cond_for_table(THD *thd, Item *cond)
+ @brief Deletes update handler object
+*/
+void TABLE::delete_update_handler()
{
- table_map tab_map= table->map;
- bool is_multiple_equality= cond->type() == Item::FUNC_ITEM &&
- ((Item_func*) cond)->functype() == Item_func::MULT_EQUAL_FUNC;
- if (cond->get_extraction_flag() == NO_EXTRACTION_FL)
- return 0;
- if (cond->type() == Item::COND_ITEM)
- {
- bool cond_and= false;
- Item_cond *new_cond;
- if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
- {
- cond_and= true;
- new_cond=new (thd->mem_root) Item_cond_and(thd);
- }
- else
- new_cond= new (thd->mem_root) Item_cond_or(thd);
- if (!new_cond)
- return 0;
- List_iterator<Item> li(*((Item_cond*) cond)->argument_list());
- Item *item;
- bool is_fix_needed= false;
- while ((item=li++))
- {
- if (item->get_extraction_flag() == NO_EXTRACTION_FL)
- {
- if (!cond_and)
- return 0;
- continue;
- }
- Item *fix= build_pushable_cond_for_table(thd, item);
- if (!fix && !cond_and)
- return 0;
- if (!fix)
- continue;
-
- if (fix->type() == Item::COND_ITEM &&
- ((Item_cond*) fix)->functype() == Item_func::COND_AND_FUNC)
- is_fix_needed= true;
-
- new_cond->argument_list()->push_back(fix, thd->mem_root);
- }
- if (is_fix_needed && new_cond->fix_fields(thd, 0))
- return 0;
-
- switch (new_cond->argument_list()->elements)
- {
- case 0:
- return 0;
- case 1:
- return new_cond->argument_list()->head();
- default:
- return new_cond;
- }
- }
- else if (is_multiple_equality)
- {
- if (!(cond->used_tables() & tab_map))
- return 0;
- Item *new_cond= NULL;
- int i= 0;
- Item_equal *item_equal= (Item_equal *) cond;
- Item *left_item = item_equal->get_const();
- Item_equal_fields_iterator it(*item_equal);
- Item *item;
- if (!left_item)
- {
- while ((item=it++))
- if (item->used_tables() == tab_map)
- {
- left_item= item;
- break;
- }
- }
- if (!left_item)
- return 0;
- while ((item=it++))
- {
- if (!(item->used_tables() == tab_map))
- continue;
- Item_func_eq *eq= 0;
- Item *left_item_clone= left_item->build_clone(thd);
- Item *right_item_clone= item->build_clone(thd);
- if (left_item_clone && right_item_clone)
- {
- left_item_clone->set_item_equal(NULL);
- right_item_clone->set_item_equal(NULL);
- eq= new (thd->mem_root) Item_func_eq(thd, right_item_clone,
- left_item_clone);
- }
- if (eq)
- {
- i++;
- switch (i)
- {
- case 1:
- new_cond= eq;
- break;
- case 2:
- new_cond= new (thd->mem_root) Item_cond_and(thd, new_cond, eq);
- break;
- default:
- ((Item_cond_and*)new_cond)->argument_list()->push_back(eq,
- thd->mem_root);
- }
- }
- }
- if (new_cond)
- new_cond->fix_fields(thd, &new_cond);
- return new_cond;
- }
- else if (cond->get_extraction_flag() != NO_EXTRACTION_FL)
- return cond->build_clone(thd);
- return 0;
+ update_handler->ha_external_lock(in_use, F_UNLCK);
+ update_handler->ha_close();
+ delete update_handler;
+ this->update_handler= NULL;
}
LEX_CSTRING *fk_option_name(enum_fk_option opt)
@@ -8813,7 +9145,7 @@ bool TR_table::update(ulonglong start_id, ulonglong end_id)
store(FLD_BEGIN_TS, thd->transaction_time());
thd->set_time();
- timeval end_time= {thd->query_start(), long(thd->query_start_sec_part())};
+ timeval end_time= {thd->query_start(), int(thd->query_start_sec_part())};
store(FLD_TRX_ID, start_id);
store(FLD_COMMIT_ID, end_id);
store(FLD_COMMIT_TS, end_time);
@@ -8834,7 +9166,7 @@ bool TR_table::query(ulonglong trx_id)
READ_RECORD info;
int error;
List<TABLE_LIST> dummy;
- SELECT_LEX &slex= thd->lex->select_lex;
+ SELECT_LEX &slex= *(thd->lex->first_select_lex());
Name_resolution_context_backup backup(slex.context, *this);
Item *field= newx Item_field(thd, &slex.context, (*this)[FLD_TRX_ID]);
Item *value= newx Item_int(thd, trx_id);
@@ -8868,7 +9200,7 @@ bool TR_table::query(MYSQL_TIME &commit_time, bool backwards)
READ_RECORD info;
int error;
List<TABLE_LIST> dummy;
- SELECT_LEX &slex= thd->lex->select_lex;
+ SELECT_LEX &slex= *(thd->lex->first_select_lex());
Name_resolution_context_backup backup(slex.context, *this);
Item *field= newx Item_field(thd, &slex.context, (*this)[FLD_COMMIT_TS]);
Item *value= newx Item_datetime_literal(thd, &commit_time, 6);
@@ -8898,7 +9230,7 @@ bool TR_table::query(MYSQL_TIME &commit_time, bool backwards)
if (res > 0)
{
MYSQL_TIME commit_ts;
- if ((*this)[FLD_COMMIT_TS]->get_date(&commit_ts, 0))
+ if ((*this)[FLD_COMMIT_TS]->get_date(&commit_ts, date_mode_t(0)))
{
found= false;
break;
@@ -9100,6 +9432,7 @@ bool vers_select_conds_t::eq(const vers_select_conds_t &conds) const
return true;
case SYSTEM_TIME_BEFORE:
DBUG_ASSERT(0);
+ return false;
case SYSTEM_TIME_AS_OF:
return start.eq(conds.start);
case SYSTEM_TIME_FROM_TO:
@@ -9117,7 +9450,8 @@ bool Vers_history_point::resolve_unit(THD *thd)
return false;
if (item->fix_fields_if_needed(thd, &item))
return true;
- return item->this_item()->type_handler_for_system_time()->
+ return item->this_item()->real_type_handler()->
+ type_handler_for_system_time()->
Vers_history_point_resolve_unit(thd, this);
}
diff --git a/sql/table.h b/sql/table.h
index b7c14e0a606..865824bf36f 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -55,6 +55,9 @@ class Virtual_column_info;
class Table_triggers_list;
class TMP_TABLE_PARAM;
class SEQUENCE;
+class Range_rowid_filter_cost_info;
+class derived_handler;
+class Pushdown_derived;
/*
Used to identify NESTED_JOIN structures within a join (applicable only to
@@ -346,9 +349,18 @@ enum field_visibility_t {
INVISIBLE_FULL
};
-#define INVISIBLE_MAX_BITS 3
+#define INVISIBLE_MAX_BITS 3
+#define HA_HASH_FIELD_LENGTH 8
+#define HA_HASH_KEY_LENGTH_WITHOUT_NULL 8
+#define HA_HASH_KEY_LENGTH_WITH_NULL 9
+int fields_in_hash_keyinfo(KEY *keyinfo);
+
+void setup_keyinfo_hash(KEY *key_info);
+
+void re_setup_keyinfo_hash(KEY *key_info);
+
/**
Category of table found in the table share.
*/
@@ -395,28 +407,6 @@ enum enum_table_category
TABLE_CATEGORY_SYSTEM=3,
/**
- Information schema tables.
- These tables are an interface provided by the system
- to inspect the system metadata.
- These tables do *not* honor:
- - LOCK TABLE t FOR READ/WRITE
- - FLUSH TABLES WITH READ LOCK
- - SET GLOBAL READ_ONLY = ON
- as there is no point in locking explicitly
- an INFORMATION_SCHEMA table.
- Nothing is directly written to information schema tables.
- Note that this value is not used currently,
- since information schema tables are not shared,
- but implemented as session specific temporary tables.
- */
- /*
- TODO: Fixing the performance issues of I_S will lead
- to I_S tables in the table cache, which should use
- this table type.
- */
- TABLE_CATEGORY_INFORMATION=4,
-
- /**
Log tables.
These tables are an interface provided by the system
to inspect the system logs.
@@ -436,7 +426,33 @@ enum enum_table_category
The server implementation perform writes.
Log tables are cached in the table cache.
*/
- TABLE_CATEGORY_LOG=5,
+ TABLE_CATEGORY_LOG=4,
+
+ /*
+ Types below are read only tables, not affected by FLUSH TABLES or
+ MDL locks.
+ */
+ /**
+ Information schema tables.
+ These tables are an interface provided by the system
+ to inspect the system metadata.
+ These tables do *not* honor:
+ - LOCK TABLE t FOR READ/WRITE
+ - FLUSH TABLES WITH READ LOCK
+ - SET GLOBAL READ_ONLY = ON
+ as there is no point in locking explicitly
+ an INFORMATION_SCHEMA table.
+ Nothing is directly written to information schema tables.
+ Note that this value is not used currently,
+ since information schema tables are not shared,
+ but implemented as session specific temporary tables.
+ */
+ /*
+ TODO: Fixing the performance issues of I_S will lead
+ to I_S tables in the table cache, which should use
+ this table type.
+ */
+ TABLE_CATEGORY_INFORMATION=5,
/**
Performance schema tables.
@@ -460,6 +476,7 @@ enum enum_table_category
*/
TABLE_CATEGORY_PERFORMANCE=6
};
+
typedef enum enum_table_category TABLE_CATEGORY;
TABLE_CATEGORY get_table_category(const LEX_CSTRING *db,
@@ -717,6 +734,7 @@ struct TABLE_SHARE
bool null_field_first;
bool system; /* Set if system table (one record) */
bool not_usable_by_query_cache;
+ bool online_backup; /* Set if on-line backup supported */
bool no_replicate;
bool crashed;
bool is_view;
@@ -726,6 +744,7 @@ struct TABLE_SHARE
bool vcols_need_refixing;
bool has_update_default_function;
bool can_do_row_logging; /* 1 if table supports RBR */
+ bool long_unique_table;
ulong table_map_id; /* for row-based replication */
@@ -756,20 +775,38 @@ struct TABLE_SHARE
/**
System versioning support.
- */
+ */
+ struct period_info_t
+ {
+ uint16 start_fieldno;
+ uint16 end_fieldno;
+ Lex_ident name;
+ Lex_ident constr_name;
+ Field *start_field(TABLE_SHARE *s) const
+ {
+ return s->field[start_fieldno];
+ }
+ Field *end_field(TABLE_SHARE *s) const
+ {
+ return s->field[end_fieldno];
+ }
+ };
vers_sys_type_t versioned;
- uint16 row_start_field;
- uint16 row_end_field;
+ period_info_t vers;
+ period_info_t period;
+
+ bool init_period_from_extra2(period_info_t *period, const uchar *data,
+ const uchar *end);
Field *vers_start_field()
{
- return field[row_start_field];
+ return field[vers.start_fieldno];
}
Field *vers_end_field()
{
- return field[row_end_field];
+ return field[vers.end_fieldno];
}
/**
@@ -984,6 +1021,8 @@ struct TABLE_SHARE
/* frees the memory allocated in read_frm_image */
void free_frm_image(const uchar *frm);
+
+ void set_overlapped_keys();
};
@@ -1057,6 +1096,8 @@ typedef Bitmap<MAX_FIELDS> Field_map;
class SplM_opt_info;
+struct vers_select_conds_t;
+
struct TABLE
{
TABLE() {} /* Remove gcc warning */
@@ -1082,6 +1123,9 @@ public:
THD *in_use; /* Which thread uses this */
uchar *record[3]; /* Pointer to records */
+ /* record buf to resolve hash collisions for long UNIQUE constraints */
+ uchar *check_unique_buf;
+ handler *update_handler; /* Handler used in case of update */
uchar *write_row_record; /* Used as optimisation in
THD::write_row */
uchar *insert_values; /* used by INSERT ... UPDATE */
@@ -1107,6 +1151,8 @@ public:
key_map keys_in_use_for_group_by;
/* Map of keys that can be used to calculate ORDER BY without sorting */
key_map keys_in_use_for_order_by;
+ /* Map of keys dependent on some constraint */
+ key_map constraint_dependent_keys;
KEY *key_info; /* data of keys in database */
Field **field; /* Pointer to fields */
@@ -1138,8 +1184,6 @@ public:
MY_BITMAP cond_set; /* used to mark fields from sargable conditions*/
/* Active column sets */
MY_BITMAP *read_set, *write_set, *rpl_write_set;
- /* Set if using virtual fields */
- MY_BITMAP *vcol_set, *def_vcol_set;
/* On INSERT: fields that the user specified a value for */
MY_BITMAP has_value_set;
@@ -1177,7 +1221,14 @@ public:
and max #key parts that range access would use.
*/
ha_rows quick_rows[MAX_KEY];
+ uint quick_key_parts[MAX_KEY];
+
double quick_costs[MAX_KEY];
+ /*
+ If there is a range access by i-th index then the cost of
+ index only access for it is stored in quick_index_only_costs[i]
+ */
+ double quick_index_only_costs[MAX_KEY];
/*
Bitmaps of key parts that =const for the duration of join execution. If
@@ -1186,8 +1237,7 @@ public:
*/
key_part_map const_key_parts[MAX_KEY];
- uint quick_key_parts[MAX_KEY];
- uint quick_n_ranges[MAX_KEY];
+ uint quick_n_ranges[MAX_KEY];
/*
Estimate of number of records that satisfy SARGable part of the table
@@ -1362,7 +1412,9 @@ public:
void mark_columns_needed_for_delete(void);
void mark_columns_needed_for_insert(void);
void mark_columns_per_binlog_row_image(void);
- bool mark_virtual_col(Field *field);
+ inline bool mark_column_with_deps(Field *field);
+ inline bool mark_virtual_column_with_deps(Field *field);
+ inline void mark_virtual_column_deps(Field *field);
bool mark_virtual_columns_for_write(bool insert_fl);
bool check_virtual_columns_marked_for_read();
bool check_virtual_columns_marked_for_write();
@@ -1384,39 +1436,21 @@ public:
if (file)
file->column_bitmaps_signal();
}
- inline void column_bitmaps_set(MY_BITMAP *read_set_arg,
- MY_BITMAP *write_set_arg,
- MY_BITMAP *vcol_set_arg)
- {
- read_set= read_set_arg;
- write_set= write_set_arg;
- vcol_set= vcol_set_arg;
- if (file)
- file->column_bitmaps_signal();
- }
inline void column_bitmaps_set_no_signal(MY_BITMAP *read_set_arg,
MY_BITMAP *write_set_arg)
{
read_set= read_set_arg;
write_set= write_set_arg;
}
- inline void column_bitmaps_set_no_signal(MY_BITMAP *read_set_arg,
- MY_BITMAP *write_set_arg,
- MY_BITMAP *vcol_set_arg)
- {
- read_set= read_set_arg;
- write_set= write_set_arg;
- vcol_set= vcol_set_arg;
- }
inline void use_all_columns()
{
column_bitmaps_set(&s->all_set, &s->all_set);
}
+ inline void use_all_stored_columns();
inline void default_column_bitmaps()
{
read_set= &def_read_set;
write_set= &def_write_set;
- vcol_set= def_vcol_set; /* Note that this may be 0 */
rpl_write_set= 0;
}
/** Should this instance of the table be reopened? */
@@ -1497,6 +1531,22 @@ public:
double get_materialization_cost(); // Now used only if is_splittable()==true
void add_splitting_info_for_key_field(struct KEY_FIELD *key_field);
+ key_map with_impossible_ranges;
+
+ /* Number of cost info elements for possible range filters */
+ uint range_rowid_filter_cost_info_elems;
+ /* Pointer to the array of cost info elements for range filters */
+ Range_rowid_filter_cost_info *range_rowid_filter_cost_info;
+ /* The array of pointers to cost info elements for range filters */
+ Range_rowid_filter_cost_info **range_rowid_filter_cost_info_ptr;
+
+ void init_cost_info_for_usable_range_rowid_filters(THD *thd);
+ void prune_range_rowid_filters();
+ Range_rowid_filter_cost_info *
+ best_range_rowid_filter_for_partial_join(uint access_key_no,
+ double records,
+ double access_cost_factor);
+
/**
System Versioning support
*/
@@ -1524,21 +1574,28 @@ public:
Field *vers_start_field() const
{
DBUG_ASSERT(s && s->versioned);
- return field[s->row_start_field];
+ return field[s->vers.start_fieldno];
}
Field *vers_end_field() const
{
DBUG_ASSERT(s && s->versioned);
- return field[s->row_end_field];
+ return field[s->vers.end_fieldno];
}
ulonglong vers_start_id() const;
ulonglong vers_end_id() const;
+ int update_generated_fields();
+ int period_make_insert(Item *src, Field *dst);
+ int insert_portion_of_time(THD *thd, const vers_select_conds_t &period_conds,
+ ha_rows *rows_inserted);
int delete_row();
void vers_update_fields();
void vers_update_end();
+ void find_constraint_correlated_indexes();
+ void clone_handler_for_update();
+ void delete_update_handler();
/** Number of additional fields used in versioned tables */
#define VERSIONING_FIELDS 2
@@ -1735,6 +1792,13 @@ class IS_table_read_plan;
/** The threshold size a blob field buffer before it is freed */
#define MAX_TDC_BLOB_SIZE 65536
+/** number of bytes used by field positional indexes in frm */
+constexpr uint frm_fieldno_size= 2;
+static inline uint16 read_frm_fieldno(const uchar *data)
+{ return uint2korr(data); }
+static inline void store_frm_fieldno(const uchar *data, uint16 fieldno)
+{ int2store(data, fieldno); }
+
class select_unit;
class TMP_TABLE_PARAM;
@@ -1844,6 +1908,12 @@ struct vers_select_conds_t
bool used:1;
Vers_history_point start;
Vers_history_point end;
+ Lex_ident name;
+
+ Item_field *field_start;
+ Item_field *field_end;
+
+ const TABLE_SHARE::period_info_t *period;
void empty()
{
@@ -1855,12 +1925,14 @@ struct vers_select_conds_t
void init(vers_system_time_t _type,
Vers_history_point _start= Vers_history_point(),
- Vers_history_point _end= Vers_history_point())
+ Vers_history_point _end= Vers_history_point(),
+ Lex_ident _name= "SYSTEM_TIME")
{
type= _type;
used= false;
start= _start;
end= _end;
+ name= _name;
}
void print(String *str, enum_query_type query_type) const;
@@ -1959,6 +2031,7 @@ struct TABLE_LIST
init_one_table(&table_arg->s->db, &table_arg->s->table_name,
NULL, lock_type);
table= table_arg;
+ vers_conditions.name= table->s->vers.name;
}
inline void init_one_table_for_prelocking(const LEX_CSTRING *db_arg,
@@ -2132,6 +2205,15 @@ struct TABLE_LIST
TABLE_LIST * next_with_rec_ref;
bool is_derived_with_recursive_reference;
bool block_handle_derived;
+ /* The interface employed to materialize the table by a foreign engine */
+ derived_handler *dt_handler;
+ /* The text of the query specifying the derived table */
+ LEX_CSTRING derived_spec;
+ /*
+ The object used to organize execution of the query that specifies
+ the derived table by a foreign engine
+ */
+ Pushdown_derived *pushdown_derived;
ST_SCHEMA_TABLE *schema_table; /* Information_schema table */
st_select_lex *schema_select_lex;
/*
@@ -2392,6 +2474,12 @@ struct TABLE_LIST
/* System Versioning */
vers_select_conds_t vers_conditions;
+ vers_select_conds_t period_conditions;
+
+ bool has_period() const
+ {
+ return period_conditions.is_set();
+ }
/**
@brief
@@ -2594,8 +2682,9 @@ struct TABLE_LIST
return false;
}
void set_lock_type(THD* thd, enum thr_lock_type lock);
- void check_pushable_cond_for_table(Item *cond);
- Item *build_pushable_cond_for_table(THD *thd, Item *cond);
+
+ derived_handler *find_derived_handler(THD *thd);
+ TABLE_LIST *get_first_table();
void remove_join_columns()
{
@@ -2949,7 +3038,7 @@ extern LEX_CSTRING INFORMATION_SCHEMA_NAME;
extern LEX_CSTRING MYSQL_SCHEMA_NAME;
/* table names */
-extern LEX_CSTRING MYSQL_USER_NAME, MYSQL_DB_NAME, MYSQL_PROC_NAME;
+extern LEX_CSTRING MYSQL_PROC_NAME;
inline bool is_infoschema_db(const LEX_CSTRING *name)
{
diff --git a/sql/table_cache.cc b/sql/table_cache.cc
index cb9583a2440..7a555d53558 100644
--- a/sql/table_cache.cc
+++ b/sql/table_cache.cc
@@ -57,7 +57,7 @@ ulong tdc_size; /**< Table definition cache threshold for LRU eviction. */
ulong tc_size; /**< Table cache threshold for LRU eviction. */
uint32 tc_instances;
uint32 tc_active_instances= 1;
-static uint32 tc_contention_warning_reported;
+static std::atomic<bool> tc_contention_warning_reported;
/** Data collections. */
static LF_HASH tdc_hash; /**< Collection of TABLE_SHARE objects. */
@@ -187,8 +187,8 @@ struct Table_cache_instance
n_instances + 1);
}
}
- else if (!my_atomic_fas32_explicit((int32*) &tc_contention_warning_reported,
- 1, MY_MEMORY_ORDER_RELAXED))
+ else if (!tc_contention_warning_reported.exchange(true,
+ std::memory_order_relaxed))
{
sql_print_warning("Detected table cache mutex contention at instance %d: "
"%d%% waits. Additional table cache instance "
@@ -232,7 +232,7 @@ static void intern_close_table(TABLE *table)
uint tc_records(void)
{
ulong total= 0;
- for (ulong i= 0; i < tc_instances; i++)
+ for (uint32 i= 0; i < tc_instances; i++)
{
mysql_mutex_lock(&tc[i].LOCK_table_cache);
total+= tc[i].records;
@@ -277,7 +277,7 @@ static void tc_remove_all_unused_tables(TDC_element *element,
*/
if (mark_flushed)
element->flushed= true;
- for (ulong i= 0; i < tc_instances; i++)
+ for (uint32 i= 0; i < tc_instances; i++)
{
mysql_mutex_lock(&tc[i].LOCK_table_cache);
while ((table= element->free_tables[i].list.pop_front()))
@@ -406,7 +406,7 @@ void tc_add_table(THD *thd, TABLE *table)
@return TABLE object, or NULL if no unused objects.
*/
-static TABLE *tc_acquire_table(THD *thd, TDC_element *element)
+TABLE *tc_acquire_table(THD *thd, TDC_element *element)
{
uint32 n_instances=
my_atomic_load32_explicit((int32*) &tc_active_instances,
@@ -491,7 +491,7 @@ static void tdc_assert_clean_share(TDC_element *element)
DBUG_ASSERT(element->m_flush_tickets.is_empty());
DBUG_ASSERT(element->all_tables.is_empty());
#ifndef DBUG_OFF
- for (ulong i= 0; i < tc_instances; i++)
+ for (uint32 i= 0; i < tc_instances; i++)
DBUG_ASSERT(element->free_tables[i].list.is_empty());
#endif
DBUG_ASSERT(element->all_tables_refs == 0);
@@ -564,7 +564,7 @@ static void lf_alloc_constructor(uchar *arg)
mysql_cond_init(key_TABLE_SHARE_COND_release, &element->COND_release, 0);
element->m_flush_tickets.empty();
element->all_tables.empty();
- for (ulong i= 0; i < tc_instances; i++)
+ for (uint32 i= 0; i < tc_instances; i++)
element->free_tables[i].list.empty();
element->all_tables_refs= 0;
element->share= 0;
@@ -645,7 +645,7 @@ bool tdc_init(void)
void tdc_start_shutdown(void)
{
- DBUG_ENTER("table_def_start_shutdown");
+ DBUG_ENTER("tdc_start_shutdown");
if (tdc_inited)
{
/*
@@ -657,7 +657,7 @@ void tdc_start_shutdown(void)
tdc_size= 0;
tc_size= 0;
/* Free all cached but unused TABLEs and TABLE_SHAREs. */
- close_cached_tables(NULL, NULL, FALSE, LONG_TIMEOUT);
+ purge_tables(true);
}
DBUG_VOID_RETURN;
}
@@ -689,7 +689,7 @@ void tdc_deinit(void)
ulong tdc_records(void)
{
- return my_atomic_load32_explicit(&tdc_hash.count, MY_MEMORY_ORDER_RELAXED);
+ return lf_hash_size(&tdc_hash);
}
@@ -1094,6 +1094,7 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
TABLE *table;
TDC_element *element;
uint my_refs= 1;
+ bool res= false;
DBUG_ENTER("tdc_remove_table");
DBUG_PRINT("enter",("name: %s remove_type: %d", table_name, remove_type));
@@ -1101,7 +1102,6 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
MDL_EXCLUSIVE));
-
mysql_mutex_lock(&LOCK_unused_shares);
if (!(element= tdc_lock_share(thd, db, table_name)))
{
@@ -1123,7 +1123,7 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
mysql_mutex_unlock(&LOCK_unused_shares);
tdc_delete_share_from_hash(element);
- DBUG_RETURN(true);
+ DBUG_RETURN(false);
}
mysql_mutex_unlock(&LOCK_unused_shares);
@@ -1189,10 +1189,16 @@ bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
#endif
mysql_mutex_unlock(&element->LOCK_table_share);
}
+ else
+ {
+ mysql_mutex_lock(&element->LOCK_table_share);
+ res= element->ref_count > 1;
+ mysql_mutex_unlock(&element->LOCK_table_share);
+ }
tdc_release_share(element->share);
- DBUG_RETURN(true);
+ DBUG_RETURN(res);
}
diff --git a/sql/table_cache.h b/sql/table_cache.h
index b41665258c9..148edc84223 100644
--- a/sql/table_cache.h
+++ b/sql/table_cache.h
@@ -88,7 +88,6 @@ extern bool tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
const char *db, const char *table_name,
bool kill_delayed_threads);
-
extern int tdc_wait_for_old_version(THD *thd, const char *db,
const char *table_name,
ulong wait_timeout, uint deadlock_weight,
@@ -102,6 +101,7 @@ extern uint tc_records(void);
extern void tc_purge(bool mark_flushed= false);
extern void tc_add_table(THD *thd, TABLE *table);
extern void tc_release_table(TABLE *table);
+extern TABLE *tc_acquire_table(THD *thd, TDC_element *element);
/**
Create a table cache key for non-temporary table.
diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc
index 2f980993fa1..89f40b55f86 100644
--- a/sql/temporary_tables.cc
+++ b/sql/temporary_tables.cc
@@ -49,23 +49,18 @@ bool THD::has_thd_temporary_tables()
/**
Create a temporary table, open it and return the TABLE handle.
- @param hton [IN] Handlerton
@param frm [IN] Binary frm image
@param path [IN] File path (without extension)
@param db [IN] Schema name
@param table_name [IN] Table name
- @param open_in_engine [IN] Whether open table in SE
-
@return Success A pointer to table object
Failure NULL
*/
-TABLE *THD::create_and_open_tmp_table(handlerton *hton,
- LEX_CUSTRING *frm,
+TABLE *THD::create_and_open_tmp_table(LEX_CUSTRING *frm,
const char *path,
const char *db,
const char *table_name,
- bool open_in_engine,
bool open_internal_tables)
{
DBUG_ENTER("THD::create_and_open_tmp_table");
@@ -73,10 +68,10 @@ TABLE *THD::create_and_open_tmp_table(handlerton *hton,
TMP_TABLE_SHARE *share;
TABLE *table= NULL;
- if ((share= create_temporary_table(hton, frm, path, db, table_name)))
+ if ((share= create_temporary_table(frm, path, db, table_name)))
{
open_options|= HA_OPEN_FOR_CREATE;
- table= open_temporary_table(share, table_name, open_in_engine);
+ table= open_temporary_table(share, table_name);
open_options&= ~HA_OPEN_FOR_CREATE;
/*
@@ -96,7 +91,7 @@ TABLE *THD::create_and_open_tmp_table(handlerton *hton,
/* Open any related tables */
if (open_internal_tables && table->internal_tables &&
- open_and_lock_internal_tables(table, open_in_engine))
+ open_and_lock_internal_tables(table, true))
{
drop_temporary_table(table, NULL, false);
DBUG_RETURN(0);
@@ -381,7 +376,7 @@ bool THD::open_temporary_table(TABLE_LIST *tl)
*/
if (!table && (share= find_tmp_table_share(tl)))
{
- table= open_temporary_table(share, tl->get_table_name(), true);
+ table= open_temporary_table(share, tl->get_table_name());
}
if (!table)
@@ -731,6 +726,8 @@ void THD::mark_tmp_tables_as_free_for_reuse()
{
if ((table->query_id == query_id) && !table->open_by_handler)
{
+ if (table->update_handler)
+ table->delete_update_handler();
mark_tmp_table_as_free_for_reuse(table);
}
}
@@ -903,7 +900,6 @@ uint THD::create_tmp_table_def_key(char *key, const char *db,
/**
Create a temporary table.
- @param hton [IN] Handlerton
@param frm [IN] Binary frm image
@param path [IN] File path (without extension)
@param db [IN] Schema name
@@ -912,8 +908,7 @@ uint THD::create_tmp_table_def_key(char *key, const char *db,
@return Success A pointer to table share object
Failure NULL
*/
-TMP_TABLE_SHARE *THD::create_temporary_table(handlerton *hton,
- LEX_CUSTRING *frm,
+TMP_TABLE_SHARE *THD::create_temporary_table(LEX_CUSTRING *frm,
const char *path,
const char *db,
const char *table_name)
@@ -951,8 +946,6 @@ TMP_TABLE_SHARE *THD::create_temporary_table(handlerton *hton,
init_tmp_table_share(this, share, saved_key_cache, key_length,
strend(saved_key_cache) + 1, tmp_path);
- share->db_plugin= ha_lock_engine(this, hton);
-
/*
Prefer using frm image over file. The image might not be available in
ALTER TABLE, when the discovering engine took over the ownership (see
@@ -1079,14 +1072,12 @@ TABLE *THD::find_temporary_table(const char *key, uint key_length,
@param share [IN] Table share
@param alias [IN] Table alias
- @param open_in_engine [IN] Whether open table in SE
@return Success A pointer to table object
Failure NULL
*/
TABLE *THD::open_temporary_table(TMP_TABLE_SHARE *share,
- const char *alias_arg,
- bool open_in_engine)
+ const char *alias_arg)
{
TABLE *table;
LEX_CSTRING alias= {alias_arg, strlen(alias_arg) };
@@ -1099,11 +1090,11 @@ TABLE *THD::open_temporary_table(TMP_TABLE_SHARE *share,
}
if (open_table_from_share(this, share, &alias,
- open_in_engine ? (uint)HA_OPEN_KEYFILE : 0,
+ (uint) HA_OPEN_KEYFILE,
EXTRA_RECORD,
(ha_open_options |
(open_options & HA_OPEN_FOR_CREATE)),
- table, open_in_engine ? false : true))
+ table, false))
{
my_free(table);
DBUG_RETURN(NULL);
diff --git a/sql/threadpool.h b/sql/threadpool.h
index ba17dc042c2..57750b73e42 100644
--- a/sql/threadpool.h
+++ b/sql/threadpool.h
@@ -1,3 +1,7 @@
+#ifndef THREADPOOL_H_INCLUDED
+#define THREADPOOL_H_INCLUDED
+
+#ifdef HAVE_POOL_OF_THREADS
/* Copyright (C) 2012 Monty Program Ab
This program is free software; you can redistribute it and/or modify
@@ -154,3 +158,6 @@ struct TP_pool_generic :TP_pool
virtual int set_stall_limit(uint);
virtual int get_idle_thread_count();
};
+
+#endif /* HAVE_POOL_OF_THREADS */
+#endif /* THREADPOOL_H_INCLUDED */
diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc
index 24ab972776c..695623cd4ea 100644
--- a/sql/threadpool_common.cc
+++ b/sql/threadpool_common.cc
@@ -243,7 +243,7 @@ static THD* threadpool_add_connection(CONNECT *connect, void *scheduler_data)
return NULL;
}
delete connect;
- add_to_active_threads(thd);
+ server_threads.insert(thd);
thd->set_mysys_var(mysys_var);
thd->event_scheduler.data= scheduler_data;
diff --git a/sql/threadpool_generic.cc b/sql/threadpool_generic.cc
index a306822b1f1..e37fd6f0cf4 100644
--- a/sql/threadpool_generic.cc
+++ b/sql/threadpool_generic.cc
@@ -578,43 +578,24 @@ static void queue_put(thread_group_t *thread_group, native_event *ev, int cnt)
Also, recalculate time when next timeout check should run.
*/
-static void timeout_check(pool_timer_t *timer)
+static my_bool timeout_check(THD *thd, pool_timer_t *timer)
{
DBUG_ENTER("timeout_check");
-
- mysql_mutex_lock(&LOCK_thread_count);
- I_List_iterator<THD> it(threads);
-
- /* Reset next timeout check, it will be recalculated in the loop below */
- my_atomic_fas64((volatile int64*)&timer->next_timeout_check, ULONGLONG_MAX);
-
- THD *thd;
- while ((thd=it++))
+ if (thd->net.reading_or_writing == 1)
{
- if (thd->net.reading_or_writing != 1)
- continue;
-
- TP_connection_generic *connection= (TP_connection_generic *)thd->event_scheduler.data;
- if (!connection)
- {
- /*
- Connection does not have scheduler data. This happens for example
- if THD belongs to a different scheduler, that is listening to extra_port.
- */
- continue;
- }
-
- if(connection->abs_wait_timeout < timer->current_microtime)
- {
- tp_timeout_handler(connection);
- }
- else
+ /*
+ Check if connection does not have scheduler data. This happens for example
+ if THD belongs to a different scheduler, that is listening to extra_port.
+ */
+ if (auto connection= (TP_connection_generic *) thd->event_scheduler.data)
{
- set_next_timeout_check(connection->abs_wait_timeout);
+ if (connection->abs_wait_timeout < timer->current_microtime)
+ tp_timeout_handler(connection);
+ else
+ set_next_timeout_check(connection->abs_wait_timeout);
}
}
- mysql_mutex_unlock(&LOCK_thread_count);
- DBUG_VOID_RETURN;
+ DBUG_RETURN(0);
}
@@ -671,7 +652,12 @@ static void* timer_thread(void *param)
/* Check if any client exceeded wait_timeout */
if (timer->next_timeout_check <= timer->current_microtime)
- timeout_check(timer);
+ {
+ /* Reset next timeout check, it will be recalculated below */
+ my_atomic_fas64((volatile int64*) &timer->next_timeout_check,
+ ULONGLONG_MAX);
+ server_threads.iterate(timeout_check, timer);
+ }
}
mysql_mutex_unlock(&timer->mutex);
}
@@ -1694,7 +1680,7 @@ int TP_pool_generic::set_pool_size(uint size)
success= (group->pollfd != INVALID_HANDLE_VALUE);
if(!success)
{
- sql_print_error("io_poll_create() failed, errno=%d\n", errno);
+ sql_print_error("io_poll_create() failed, errno=%d", errno);
}
}
mysql_mutex_unlock(&group->mutex);
diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc
index 0cc683c631d..67a8e783208 100644
--- a/sql/threadpool_win.cc
+++ b/sql/threadpool_win.cc
@@ -70,12 +70,16 @@ static DWORD fls;
static bool skip_completion_port_on_success = false;
+PTP_CALLBACK_ENVIRON get_threadpool_win_callback_environ()
+{
+ return pool? &callback_environ: 0;
+}
+
/*
Threadpool callbacks.
io_completion_callback - handle client request
timer_callback - handle wait timeout (kill connection)
- shm_read_callback, shm_close_callback - shared memory stuff
login_callback - user login (submitted as threadpool work)
*/
@@ -89,9 +93,6 @@ static void CALLBACK io_completion_callback(PTP_CALLBACK_INSTANCE instance,
static void CALLBACK work_callback(PTP_CALLBACK_INSTANCE instance, PVOID context, PTP_WORK work);
-static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
- PVOID Context, PTP_WAIT wait,TP_WAIT_RESULT wait_result);
-
static void pre_callback(PVOID context, PTP_CALLBACK_INSTANCE instance);
/* Get current time as Windows time */
@@ -120,7 +121,6 @@ public:
PTP_CALLBACK_INSTANCE callback_instance;
PTP_IO io;
PTP_TIMER timer;
- PTP_WAIT shm_read;
PTP_WORK work;
bool long_callback;
@@ -139,7 +139,15 @@ struct TP_connection *new_TP_connection(CONNECT *connect)
void TP_pool_win::add(TP_connection *c)
{
- SubmitThreadpoolWork(((TP_connection_win *)c)->work);
+ if(FlsGetValue(fls))
+ {
+ /* Inside threadpool(), execute callback directly. */
+ tp_callback(c);
+ }
+ else
+ {
+ SubmitThreadpoolWork(((TP_connection_win *)c)->work);
+ }
}
@@ -149,7 +157,6 @@ TP_connection_win::TP_connection_win(CONNECT *c) :
callback_instance(0),
io(0),
timer(0),
- shm_read(0),
work(0)
{
}
@@ -170,30 +177,20 @@ int TP_connection_win::init()
case VIO_TYPE_NAMEDPIPE:
handle= (HANDLE)vio->hPipe;
break;
- case VIO_TYPE_SHARED_MEMORY:
- handle= vio->event_server_wrote;
- break;
default:
abort();
}
- if (vio_type == VIO_TYPE_SHARED_MEMORY)
- {
- CHECK_ALLOC_ERROR(shm_read= CreateThreadpoolWait(shm_read_callback, this, &callback_environ));
- }
- else
+
+ /* Performance tweaks (s. MSDN documentation)*/
+ UCHAR flags= FILE_SKIP_SET_EVENT_ON_HANDLE;
+ if (skip_completion_port_on_success)
{
- /* Performance tweaks (s. MSDN documentation)*/
- UCHAR flags= FILE_SKIP_SET_EVENT_ON_HANDLE;
- if (skip_completion_port_on_success)
- {
- flags |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
- }
- (void)SetFileCompletionNotificationModes(handle, flags);
- /* Assign io completion callback */
- CHECK_ALLOC_ERROR(io= CreateThreadpoolIo(handle, io_completion_callback, this, &callback_environ));
+ flags |= FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
}
-
+ (void)SetFileCompletionNotificationModes(handle, flags);
+ /* Assign io completion callback */
+ CHECK_ALLOC_ERROR(io= CreateThreadpoolIo(handle, io_completion_callback, this, &callback_environ));
CHECK_ALLOC_ERROR(timer= CreateThreadpoolTimer(timer_callback, this, &callback_environ));
CHECK_ALLOC_ERROR(work= CreateThreadpoolWork(work_callback, this, &callback_environ));
return 0;
@@ -214,11 +211,6 @@ int TP_connection_win::start_io()
DWORD last_error= 0;
int retval;
- if (shm_read)
- {
- SetThreadpoolWait(shm_read, handle, NULL);
- return 0;
- }
StartThreadpoolIo(io);
if (vio_type == VIO_TYPE_TCPIP || vio_type == VIO_TYPE_SSL)
@@ -297,9 +289,6 @@ TP_connection_win::~TP_connection_win()
if (io)
CloseThreadpoolIo(io);
- if (shm_read)
- CloseThreadpoolWait(shm_read);
-
if (work)
CloseThreadpoolWork(work);
@@ -312,14 +301,13 @@ TP_connection_win::~TP_connection_win()
void TP_connection_win::wait_begin(int type)
{
-
/*
Signal to the threadpool whenever callback can run long. Currently, binlog
waits are a good candidate, its waits are really long
*/
if (type == THD_WAIT_BINLOG)
{
- if (!long_callback)
+ if (!long_callback && callback_instance)
{
CallbackMayRunLong(callback_instance);
long_callback= true;
@@ -332,12 +320,11 @@ void TP_connection_win::wait_end()
/* Do we need to do anything ? */
}
-/*
- This function should be called first whenever a callback is invoked in the
+/*
+ This function should be called first whenever a callback is invoked in the
threadpool, does my_thread_init() if not yet done
*/
-extern ulong thread_created;
-static void pre_callback(PVOID context, PTP_CALLBACK_INSTANCE instance)
+void tp_win_callback_prolog()
{
if (FlsGetValue(fls) == NULL)
{
@@ -347,6 +334,12 @@ static void pre_callback(PVOID context, PTP_CALLBACK_INSTANCE instance)
InterlockedIncrement((volatile long *)&tp_stats.num_worker_threads);
my_thread_init();
}
+}
+
+extern ulong thread_created;
+static void pre_callback(PVOID context, PTP_CALLBACK_INSTANCE instance)
+{
+ tp_win_callback_prolog();
TP_connection_win *c = (TP_connection_win *)context;
c->callback_instance = instance;
c->long_callback = false;
@@ -420,29 +413,6 @@ static VOID CALLBACK timer_callback(PTP_CALLBACK_INSTANCE instance,
}
}
-
-/*
- Shared memory read callback.
- Invoked when read event is set on connection.
-*/
-
-static void CALLBACK shm_read_callback(PTP_CALLBACK_INSTANCE instance,
- PVOID context, PTP_WAIT wait,TP_WAIT_RESULT wait_result)
-{
- TP_connection_win *c= (TP_connection_win *)context;
- /* Disarm wait. */
- SetThreadpoolWait(wait, NULL, NULL);
-
- /*
- This is an autoreset event, and one wakeup is eaten already by threadpool,
- and the current state is "not set". Thus we need to reset the event again,
- or vio_read will hang.
- */
- SetEvent(c->handle);
- tp_callback(instance, context);
-}
-
-
static void CALLBACK work_callback(PTP_CALLBACK_INSTANCE instance, PVOID context, PTP_WORK work)
{
tp_callback(instance, context);
diff --git a/sql/transaction.cc b/sql/transaction.cc
index 74f4eda881b..2887ae763df 100644
--- a/sql/transaction.cc
+++ b/sql/transaction.cc
@@ -24,19 +24,20 @@
#include "debug_sync.h" // DEBUG_SYNC
#include "sql_acl.h"
#include "semisync_master.h"
+#ifdef WITH_WSREP
+#include "wsrep_trans_observer.h"
+#endif /* WITH_WSREP */
-#ifndef EMBEDDED_LIBRARY
/**
Helper: Tell tracker (if any) that transaction ended.
*/
-static void trans_track_end_trx(THD *thd)
+void trans_track_end_trx(THD *thd)
{
+#ifndef EMBEDDED_LIBRARY
if (thd->variables.session_track_transaction_info > TX_TRACK_NONE)
thd->session_tracker.transaction_info.end_trx(thd);
-}
-#else
-#define trans_track_end_trx(A) do{}while(0)
#endif //EMBEDDED_LIBRARY
+}
/**
@@ -59,7 +60,6 @@ void trans_reset_one_shot_chistics(THD *thd)
/* Conditions under which the transaction state must not change. */
static bool trans_check(THD *thd)
{
- enum xa_states xa_state= thd->transaction.xid_state.xa_state;
DBUG_ENTER("trans_check");
/*
@@ -70,8 +70,8 @@ static bool trans_check(THD *thd)
if (unlikely(thd->in_sub_stmt))
my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
- if (xa_state != XA_NOTR)
- my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
+ if (thd->transaction.xid_state.is_explicit_XA())
+ thd->transaction.xid_state.er_xaer_rmfail();
else
DBUG_RETURN(FALSE);
@@ -80,67 +80,6 @@ static bool trans_check(THD *thd)
/**
- Mark a XA transaction as rollback-only if the RM unilaterally
- rolled back the transaction branch.
-
- @note If a rollback was requested by the RM, this function sets
- the appropriate rollback error code and transits the state
- to XA_ROLLBACK_ONLY.
-
- @return TRUE if transaction was rolled back or if the transaction
- state is XA_ROLLBACK_ONLY. FALSE otherwise.
-*/
-static bool xa_trans_rolled_back(XID_STATE *xid_state)
-{
- if (xid_state->rm_error)
- {
- switch (xid_state->rm_error) {
- case ER_LOCK_WAIT_TIMEOUT:
- my_error(ER_XA_RBTIMEOUT, MYF(0));
- break;
- case ER_LOCK_DEADLOCK:
- my_error(ER_XA_RBDEADLOCK, MYF(0));
- break;
- default:
- my_error(ER_XA_RBROLLBACK, MYF(0));
- }
- xid_state->xa_state= XA_ROLLBACK_ONLY;
- }
-
- return (xid_state->xa_state == XA_ROLLBACK_ONLY);
-}
-
-
-/**
- Rollback the active XA transaction.
-
- @note Resets rm_error before calling ha_rollback(), so
- the thd->transaction.xid structure gets reset
- by ha_rollback() / THD::transaction::cleanup().
-
- @return TRUE if the rollback failed, FALSE otherwise.
-*/
-
-static bool xa_trans_force_rollback(THD *thd)
-{
- /*
- We must reset rm_error before calling ha_rollback(),
- so thd->transaction.xid structure gets reset
- by ha_rollback()/THD::transaction::cleanup().
- */
- thd->transaction.xid_state.rm_error= 0;
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
- if (ha_rollback_trans(thd, true))
- {
- my_error(ER_XAER_RMERR, MYF(0));
- return true;
- }
- return false;
-}
-
-
-/**
Begin a new transaction.
@note Beginning a transaction implicitly commits any current
@@ -169,14 +108,16 @@ bool trans_begin(THD *thd, uint flags)
(thd->variables.option_bits & OPTION_TABLE_LOCK))
{
thd->variables.option_bits&= ~OPTION_TABLE_LOCK;
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
thd->server_status&=
~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
res= MY_TEST(ha_commit_trans(thd, TRUE));
- if (WSREP_ON)
- wsrep_post_commit(thd, TRUE);
+#ifdef WITH_WSREP
+ if (wsrep_thd_is_local(thd))
+ {
+ res= res || wsrep_after_statement(thd);
+ }
+#endif /* WITH_WSREP */
}
thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
@@ -237,9 +178,14 @@ bool trans_begin(THD *thd, uint flags)
}
#ifdef WITH_WSREP
- thd->wsrep_PA_safe= true;
- if (WSREP_CLIENT(thd) && wsrep_sync_wait(thd))
- DBUG_RETURN(TRUE);
+ if (wsrep_thd_is_local(thd))
+ {
+ if (wsrep_sync_wait(thd))
+ DBUG_RETURN(TRUE);
+ if (!thd->tx_read_only &&
+ wsrep_start_transaction(thd, thd->wsrep_next_trx_id()))
+ DBUG_RETURN(TRUE);
+ }
#endif /* WITH_WSREP */
thd->variables.option_bits|= OPTION_BEGIN;
@@ -284,8 +230,6 @@ bool trans_commit(THD *thd)
if (trans_check(thd))
DBUG_RETURN(TRUE);
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
thd->server_status&=
~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
@@ -296,8 +240,6 @@ bool trans_commit(THD *thd)
mysql_mutex_assert_not_owner(&LOCK_after_binlog_sync);
mysql_mutex_assert_not_owner(&LOCK_commit_ordered);
- if (WSREP_ON)
- wsrep_post_commit(thd, TRUE);
/*
if res is non-zero, then ha_commit_trans has rolled back the
transaction, so the hooks for rollback will be called.
@@ -353,14 +295,10 @@ bool trans_commit_implicit(THD *thd)
/* Safety if one did "drop table" on locked tables */
if (!thd->locked_tables_mode)
thd->variables.option_bits&= ~OPTION_TABLE_LOCK;
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
thd->server_status&=
~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
res= MY_TEST(ha_commit_trans(thd, TRUE));
- if (WSREP_ON)
- wsrep_post_commit(thd, TRUE);
}
thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
@@ -394,14 +332,9 @@ bool trans_rollback(THD *thd)
int res;
DBUG_ENTER("trans_rollback");
-#ifdef WITH_WSREP
- thd->wsrep_PA_safe= true;
-#endif /* WITH_WSREP */
if (trans_check(thd))
DBUG_RETURN(TRUE);
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
thd->server_status&=
~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
@@ -500,14 +433,10 @@ bool trans_commit_stmt(THD *thd)
if (thd->transaction.stmt.ha_list)
{
- if (WSREP_ON)
- wsrep_register_hton(thd, FALSE);
res= ha_commit_trans(thd, FALSE);
if (! thd->in_active_multi_stmt_transaction())
{
trans_reset_one_shot_chistics(thd);
- if (WSREP_ON)
- wsrep_post_commit(thd, FALSE);
}
}
@@ -563,8 +492,6 @@ bool trans_rollback_stmt(THD *thd)
if (thd->transaction.stmt.ha_list)
{
- if (WSREP_ON)
- wsrep_register_hton(thd, FALSE);
ha_rollback_trans(thd, FALSE);
if (! thd->in_active_multi_stmt_transaction())
trans_reset_one_shot_chistics(thd);
@@ -718,7 +645,8 @@ bool trans_rollback_to_savepoint(THD *thd, LEX_CSTRING name)
logging is off.
*/
bool mdl_can_safely_rollback_to_savepoint=
- (!(mysql_bin_log.is_open() && thd->variables.sql_log_bin) ||
+ (!((WSREP_EMULATE_BINLOG_NNULL(thd) || mysql_bin_log.is_open())
+ && thd->variables.sql_log_bin) ||
ha_rollback_to_savepoint_can_release_mdl(thd));
if (ha_rollback_to_savepoint(thd, sv))
@@ -772,274 +700,3 @@ bool trans_release_savepoint(THD *thd, LEX_CSTRING name)
DBUG_RETURN(MY_TEST(res));
}
-
-
-/**
- Starts an XA transaction with the given xid value.
-
- @param thd Current thread
-
- @retval FALSE Success
- @retval TRUE Failure
-*/
-
-bool trans_xa_start(THD *thd)
-{
- enum xa_states xa_state= thd->transaction.xid_state.xa_state;
- DBUG_ENTER("trans_xa_start");
-
- if (xa_state == XA_IDLE && thd->lex->xa_opt == XA_RESUME)
- {
- bool not_equal= !thd->transaction.xid_state.xid.eq(thd->lex->xid);
- if (not_equal)
- my_error(ER_XAER_NOTA, MYF(0));
- else
- thd->transaction.xid_state.xa_state= XA_ACTIVE;
- DBUG_RETURN(not_equal);
- }
-
- /* TODO: JOIN is not supported yet. */
- if (thd->lex->xa_opt != XA_NONE)
- my_error(ER_XAER_INVAL, MYF(0));
- else if (xa_state != XA_NOTR)
- my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
- else if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction())
- my_error(ER_XAER_OUTSIDE, MYF(0));
- else if (!trans_begin(thd))
- {
- DBUG_ASSERT(thd->transaction.xid_state.xid.is_null());
- thd->transaction.xid_state.xa_state= XA_ACTIVE;
- thd->transaction.xid_state.rm_error= 0;
- thd->transaction.xid_state.xid.set(thd->lex->xid);
- if (xid_cache_insert(thd, &thd->transaction.xid_state))
- {
- thd->transaction.xid_state.xa_state= XA_NOTR;
- thd->transaction.xid_state.xid.null();
- trans_rollback(thd);
- DBUG_RETURN(true);
- }
- DBUG_RETURN(FALSE);
- }
-
- DBUG_RETURN(TRUE);
-}
-
-
-/**
- Put a XA transaction in the IDLE state.
-
- @param thd Current thread
-
- @retval FALSE Success
- @retval TRUE Failure
-*/
-
-bool trans_xa_end(THD *thd)
-{
- DBUG_ENTER("trans_xa_end");
-
- /* TODO: SUSPEND and FOR MIGRATE are not supported yet. */
- if (thd->lex->xa_opt != XA_NONE)
- my_error(ER_XAER_INVAL, MYF(0));
- else if (thd->transaction.xid_state.xa_state != XA_ACTIVE)
- my_error(ER_XAER_RMFAIL, MYF(0),
- xa_state_names[thd->transaction.xid_state.xa_state]);
- else if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
- my_error(ER_XAER_NOTA, MYF(0));
- else if (!xa_trans_rolled_back(&thd->transaction.xid_state))
- thd->transaction.xid_state.xa_state= XA_IDLE;
-
- DBUG_RETURN(thd->is_error() ||
- thd->transaction.xid_state.xa_state != XA_IDLE);
-}
-
-
-/**
- Put a XA transaction in the PREPARED state.
-
- @param thd Current thread
-
- @retval FALSE Success
- @retval TRUE Failure
-*/
-
-bool trans_xa_prepare(THD *thd)
-{
- DBUG_ENTER("trans_xa_prepare");
-
- if (thd->transaction.xid_state.xa_state != XA_IDLE)
- my_error(ER_XAER_RMFAIL, MYF(0),
- xa_state_names[thd->transaction.xid_state.xa_state]);
- else if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
- my_error(ER_XAER_NOTA, MYF(0));
- else if (ha_prepare(thd))
- {
- xid_cache_delete(thd, &thd->transaction.xid_state);
- thd->transaction.xid_state.xa_state= XA_NOTR;
- my_error(ER_XA_RBROLLBACK, MYF(0));
- }
- else
- thd->transaction.xid_state.xa_state= XA_PREPARED;
-
- DBUG_RETURN(thd->is_error() ||
- thd->transaction.xid_state.xa_state != XA_PREPARED);
-}
-
-
-/**
- Commit and terminate the a XA transaction.
-
- @param thd Current thread
-
- @retval FALSE Success
- @retval TRUE Failure
-*/
-
-bool trans_xa_commit(THD *thd)
-{
- bool res= TRUE;
- enum xa_states xa_state= thd->transaction.xid_state.xa_state;
- DBUG_ENTER("trans_xa_commit");
-
- if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
- {
- if (thd->fix_xid_hash_pins())
- {
- my_error(ER_OUT_OF_RESOURCES, MYF(0));
- DBUG_RETURN(TRUE);
- }
-
- XID_STATE *xs= xid_cache_search(thd, thd->lex->xid);
- res= !xs;
- if (res)
- my_error(ER_XAER_NOTA, MYF(0));
- else
- {
- res= xa_trans_rolled_back(xs);
- ha_commit_or_rollback_by_xid(thd->lex->xid, !res);
- xid_cache_delete(thd, xs);
- }
- DBUG_RETURN(res);
- }
-
- if (xa_trans_rolled_back(&thd->transaction.xid_state))
- {
- xa_trans_force_rollback(thd);
- res= thd->is_error();
- }
- else if (xa_state == XA_IDLE && thd->lex->xa_opt == XA_ONE_PHASE)
- {
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
- int r= ha_commit_trans(thd, TRUE);
- if ((res= MY_TEST(r)))
- my_error(r == 1 ? ER_XA_RBROLLBACK : ER_XAER_RMERR, MYF(0));
- if (WSREP_ON)
- wsrep_post_commit(thd, TRUE);
- }
- else if (xa_state == XA_PREPARED && thd->lex->xa_opt == XA_NONE)
- {
- MDL_request mdl_request;
-
- /*
- Acquire metadata lock which will ensure that COMMIT is blocked
- by active FLUSH TABLES WITH READ LOCK (and vice versa COMMIT in
- progress blocks FTWRL).
-
- We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does.
- */
- mdl_request.init(MDL_key::COMMIT, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_TRANSACTION);
-
- if (thd->mdl_context.acquire_lock(&mdl_request,
- thd->variables.lock_wait_timeout))
- {
- if (WSREP_ON)
- wsrep_register_hton(thd, TRUE);
- ha_rollback_trans(thd, TRUE);
- my_error(ER_XAER_RMERR, MYF(0));
- }
- else
- {
- DEBUG_SYNC(thd, "trans_xa_commit_after_acquire_commit_lock");
-
- res= MY_TEST(ha_commit_one_phase(thd, 1));
- if (res)
- my_error(ER_XAER_RMERR, MYF(0));
- }
- }
- else
- {
- my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
- DBUG_RETURN(TRUE);
- }
-
- thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
- thd->transaction.all.reset();
- thd->server_status&=
- ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
- DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
- xid_cache_delete(thd, &thd->transaction.xid_state);
- thd->transaction.xid_state.xa_state= XA_NOTR;
-
- trans_track_end_trx(thd);
-
- DBUG_RETURN(res);
-}
-
-
-/**
- Roll back and terminate a XA transaction.
-
- @param thd Current thread
-
- @retval FALSE Success
- @retval TRUE Failure
-*/
-
-bool trans_xa_rollback(THD *thd)
-{
- bool res= TRUE;
- enum xa_states xa_state= thd->transaction.xid_state.xa_state;
- DBUG_ENTER("trans_xa_rollback");
-
- if (!thd->transaction.xid_state.xid.eq(thd->lex->xid))
- {
- if (thd->fix_xid_hash_pins())
- {
- my_error(ER_OUT_OF_RESOURCES, MYF(0));
- DBUG_RETURN(TRUE);
- }
-
- XID_STATE *xs= xid_cache_search(thd, thd->lex->xid);
- if (!xs)
- my_error(ER_XAER_NOTA, MYF(0));
- else
- {
- xa_trans_rolled_back(xs);
- ha_commit_or_rollback_by_xid(thd->lex->xid, 0);
- xid_cache_delete(thd, xs);
- }
- DBUG_RETURN(thd->get_stmt_da()->is_error());
- }
-
- if (xa_state != XA_IDLE && xa_state != XA_PREPARED && xa_state != XA_ROLLBACK_ONLY)
- {
- my_error(ER_XAER_RMFAIL, MYF(0), xa_state_names[xa_state]);
- DBUG_RETURN(TRUE);
- }
-
- res= xa_trans_force_rollback(thd);
-
- thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
- thd->transaction.all.reset();
- thd->server_status&=
- ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
- DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
- xid_cache_delete(thd, &thd->transaction.xid_state);
- thd->transaction.xid_state.xa_state= XA_NOTR;
-
- trans_track_end_trx(thd);
-
- DBUG_RETURN(res);
-}
diff --git a/sql/transaction.h b/sql/transaction.h
index 7e34693a2eb..5eaa2b00027 100644
--- a/sql/transaction.h
+++ b/sql/transaction.h
@@ -24,6 +24,8 @@
class THD;
+void trans_track_end_trx(THD *thd);
+
bool trans_begin(THD *thd, uint flags= 0);
bool trans_commit(THD *thd);
bool trans_commit_implicit(THD *thd);
@@ -37,12 +39,6 @@ bool trans_savepoint(THD *thd, LEX_CSTRING name);
bool trans_rollback_to_savepoint(THD *thd, LEX_CSTRING name);
bool trans_release_savepoint(THD *thd, LEX_CSTRING name);
-bool trans_xa_start(THD *thd);
-bool trans_xa_end(THD *thd);
-bool trans_xa_prepare(THD *thd);
-bool trans_xa_commit(THD *thd);
-bool trans_xa_rollback(THD *thd);
-
void trans_reset_one_shot_chistics(THD *thd);
#endif /* TRANSACTION_H */
diff --git a/sql/udf_example.c b/sql/udf_example.c
index 6db2b5e737a..bdc995b51fc 100644
--- a/sql/udf_example.c
+++ b/sql/udf_example.c
@@ -173,6 +173,13 @@ void avgcost_reset( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error
void avgcost_clear( UDF_INIT* initid, char* is_null, char *error );
void avgcost_add( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error );
double avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error );
+my_bool avg2_init( UDF_INIT* initid, UDF_ARGS* args, char* message );
+void avg2_deinit( UDF_INIT* initid );
+void avg2_reset( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error );
+void avg2_clear( UDF_INIT* initid, char* is_null, char *error );
+void avg2_add( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error );
+void avg2_remove( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error );
+double avg2( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error );
my_bool is_const_init(UDF_INIT *initid, UDF_ARGS *args, char *message);
char *is_const(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long
*length, char *is_null, char *error);
@@ -1049,6 +1056,138 @@ avgcost( UDF_INIT* initid, UDF_ARGS* args __attribute__((unused)),
return data->totalprice/(double)data->totalquantity;
}
+
+/*
+** Average 2 (number, sum)*/
+struct avg2_data
+{
+ ulonglong count;
+ double sum;
+};
+
+
+my_bool
+avg2_init( UDF_INIT* initid, UDF_ARGS* args, char* message )
+{
+ struct avg2_data* data;
+
+ if (args->arg_count != 2)
+ {
+ strcpy(
+ message,
+ "wrong number of arguments: AVG2() requires two arguments"
+ );
+ return 1;
+ }
+
+ if ((args->arg_type[0] != INT_RESULT) || (args->arg_type[1] != REAL_RESULT) )
+ {
+ strcpy(
+ message,
+ "wrong argument type: AVG2() requires an INT and a REAL"
+ );
+ return 1;
+ }
+
+ /*
+ ** force arguments to double.
+ */
+ /*args->arg_type[0] = REAL_RESULT;
+ args->arg_type[1] = REAL_RESULT;*/
+
+ initid->maybe_null = 0; /* The result may be null */
+ initid->decimals = 4; /* We want 4 decimals in the result */
+ initid->max_length = 20; /* 6 digits + . + 10 decimals */
+
+ if (!(data = (struct avg2_data*) malloc(sizeof(struct avg2_data))))
+ {
+ strmov(message,"Couldn't allocate memory");
+ return 1;
+ }
+ data->count = 0;
+ data->sum = 0.0;
+
+ initid->ptr = (char*)data;
+
+ return 0;
+}
+
+void
+avg2_deinit( UDF_INIT* initid )
+{
+ free(initid->ptr);
+}
+
+
+/* This is only for MySQL 4.0 compability */
+void
+avg2_reset(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message)
+{
+ avgcost_clear(initid, is_null, message);
+ avgcost_add(initid, args, is_null, message);
+}
+
+/* This is needed to get things to work in MySQL 4.1.1 and above */
+
+void
+avg2_clear(UDF_INIT* initid, char* is_null __attribute__((unused)),
+ char* message __attribute__((unused)))
+{
+ struct avg2_data* data = (struct avg2_data*)initid->ptr;
+ data->sum= 0.0;
+ data->count= 0;
+}
+
+
+void
+avg2_add(UDF_INIT* initid, UDF_ARGS* args,
+ char* is_null __attribute__((unused)),
+ char* message __attribute__((unused)))
+{
+ if (args->args[0] && args->args[1])
+ {
+ struct avg2_data* data = (struct avg2_data*)initid->ptr;
+ longlong quantity = *((longlong*)args->args[0]);
+ double sum = *((double*)args->args[1]);
+
+ data->count += quantity;
+ data->sum += sum;
+ }
+}
+
+
+void
+avg2_remove(UDF_INIT* initid, UDF_ARGS* args,
+ char* is_null __attribute__((unused)),
+ char* message __attribute__((unused)))
+{
+ if (args->args[0] && args->args[1])
+ {
+ struct avg2_data* data = (struct avg2_data*)initid->ptr;
+ longlong quantity = *((longlong*)args->args[0]);
+ double sum = *((double*)args->args[1]);
+
+ data->count -= quantity;
+ data->sum -= sum;
+ }
+}
+
+
+double
+avg2( UDF_INIT* initid, UDF_ARGS* args __attribute__((unused)),
+ char* is_null, char* error __attribute__((unused)))
+{
+ struct avg2_data* data = (struct avg2_data*)initid->ptr;
+ if (!data->count)
+ {
+ *is_null = 1;
+ return 0.0;
+ }
+
+ *is_null = 0;
+ return data->sum/(double)data->count;
+}
+
my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args,
char *message);
char *myfunc_argument_name(UDF_INIT *initid, UDF_ARGS *args, char *result,
diff --git a/sql/udf_example.def b/sql/udf_example.def
index 74230b638bf..903c2b74893 100644
--- a/sql/udf_example.def
+++ b/sql/udf_example.def
@@ -23,6 +23,13 @@ EXPORTS
avgcost_add
avgcost_clear
avgcost
+ avg2_init
+ avg2_deinit
+ avg2_reset
+ avg2_add
+ avg2_remove
+ avg2_clear
+ avg2
is_const
is_const_init
check_const_len
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 6540e11578b..02d876e1455 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -39,7 +39,7 @@
/* threshold for safe_alloca */
#define ALLOCA_THRESHOLD 2048
-static uint pack_keys(uchar *,uint, KEY *, ulong);
+static uint pack_keys(uchar *,uint, KEY *, ulong, uint);
static bool pack_header(THD *, uchar *, List<Create_field> &, HA_CREATE_INFO *,
ulong, handler *);
static bool pack_vcols(String *, List<Create_field> &, List<Virtual_column_info> *);
@@ -72,19 +72,24 @@ static uchar *extra2_write_len(uchar *pos, size_t len)
return pos;
}
+static uchar* extra2_write_str(uchar *pos, const LEX_CSTRING &str)
+{
+ pos= extra2_write_len(pos, str.length);
+ memcpy(pos, str.str, str.length);
+ return pos + str.length;
+}
+
static uchar *extra2_write(uchar *pos, enum extra2_frm_value_type type,
- const LEX_CSTRING *str)
+ const LEX_CSTRING &str)
{
*pos++ = type;
- pos= extra2_write_len(pos, str->length);
- memcpy(pos, str->str, str->length);
- return pos + str->length;
+ return extra2_write_str(pos, str);
}
static uchar *extra2_write(uchar *pos, enum extra2_frm_value_type type,
- LEX_CUSTRING *str)
+ const LEX_CUSTRING &str)
{
- return extra2_write(pos, type, reinterpret_cast<LEX_CSTRING *>(str));
+ return extra2_write(pos, type, *reinterpret_cast<const LEX_CSTRING*>(&str));
}
static uchar *extra2_write_field_properties(uchar *pos,
@@ -106,25 +111,18 @@ static uchar *extra2_write_field_properties(uchar *pos,
return pos;
}
-static const bool ROW_START = true;
-static const bool ROW_END = false;
-
-static inline
-uint16
-vers_get_field(HA_CREATE_INFO *create_info, List<Create_field> &create_fields, bool row_start)
+static uint16
+get_fieldno_by_name(HA_CREATE_INFO *create_info, List<Create_field> &create_fields,
+ const Lex_ident &field_name)
{
- DBUG_ASSERT(create_info->versioned());
-
List_iterator<Create_field> it(create_fields);
Create_field *sql_field = NULL;
- const Lex_ident row_field= row_start ? create_info->vers_info.as_row.start
- : create_info->vers_info.as_row.end;
- DBUG_ASSERT(row_field);
+ DBUG_ASSERT(field_name);
for (unsigned field_no = 0; (sql_field = it++); ++field_no)
{
- if (row_field.streq(sql_field->field_name))
+ if (field_name.streq(sql_field->field_name))
{
DBUG_ASSERT(field_no <= uint16(~0U));
return uint16(field_no);
@@ -149,6 +147,11 @@ bool has_extra2_field_flags(List<Create_field> &create_fields)
return false;
}
+static size_t extra2_str_size(size_t len)
+{
+ return (len > 255 ? 3 : 1) + len;
+}
+
/**
Create a frm (table definition) file
@@ -164,7 +167,7 @@ bool has_extra2_field_flags(List<Create_field> &create_fields)
or null LEX_CUSTRING (str==0) in case of an error.
*/
-LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
+LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table,
HA_CREATE_INFO *create_info,
List<Create_field> &create_fields,
uint keys, KEY *key_info, handler *db_file)
@@ -176,6 +179,12 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
ulong data_offset;
uint options_len;
uint gis_extra2_len= 0;
+ size_t period_info_len= create_info->period_info.name
+ ? extra2_str_size(create_info->period_info.name.length)
+ + extra2_str_size(create_info->period_info.constr->name.length)
+ + 2 * frm_fieldno_size
+ : 0;
+ uint e_unique_hash_extra_parts= 0;
uchar fileinfo[FRM_HEADER_SIZE],forminfo[FRM_FORMINFO_SIZE];
const partition_info *part_info= IF_PARTITIONING(thd->work_part_info, 0);
bool error;
@@ -238,7 +247,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
DBUG_PRINT("info", ("Options length: %u", options_len));
if (validate_comment_length(thd, &create_info->comment, TABLE_COMMENT_MAXLEN,
- ER_TOO_LONG_TABLE_COMMENT, table->str))
+ ER_TOO_LONG_TABLE_COMMENT, table.str))
DBUG_RETURN(frm);
/*
If table comment is longer than TABLE_COMMENT_INLINE_MAXLEN bytes,
@@ -272,28 +281,35 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
prepare_frm_header(thd, reclength, fileinfo, create_info, keys, key_info);
/* one byte for a type, one or three for a length */
- size_t extra2_size= 1 + 1 + create_info->tabledef_version.length;
+ size_t extra2_size= 1 + extra2_str_size(create_info->tabledef_version.length);
if (options_len)
- extra2_size+= 1 + (options_len > 255 ? 3 : 1) + options_len;
+ extra2_size+= 1 + extra2_str_size(options_len);
if (part_info)
- extra2_size+= 1 + 1 + hton_name(part_info->default_engine_type)->length;
+ extra2_size+= 1 + extra2_str_size(hton_name(part_info->default_engine_type)->length);
if (gis_extra2_len)
- extra2_size+= 1 + (gis_extra2_len > 255 ? 3 : 1) + gis_extra2_len;
+ extra2_size+= 1 + extra2_str_size(gis_extra2_len);
if (create_info->versioned())
{
- extra2_size+= 1 + 1 + 2 * sizeof(uint16);
+ extra2_size+= 1 + extra2_str_size(2 * frm_fieldno_size);
+ }
+
+ if (create_info->period_info.name)
+ {
+ extra2_size+= 1 + extra2_str_size(period_info_len);
}
bool has_extra2_field_flags_= has_extra2_field_flags(create_fields);
if (has_extra2_field_flags_)
{
- extra2_size+= 1 + (create_fields.elements > 255 ? 3 : 1) +
- create_fields.elements;
+ extra2_size+= 1 + extra2_str_size(create_fields.elements);
}
+ for (i= 0; i < keys; i++)
+ if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
+ e_unique_hash_extra_parts++;
key_buff_length= uint4korr(fileinfo+47);
frm.length= FRM_HEADER_SIZE; // fileinfo;
@@ -313,7 +329,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
if (frm.length > FRM_MAX_SIZE ||
create_info->expression_length > UINT_MAX32)
{
- my_error(ER_TABLE_DEFINITION_TOO_BIG, MYF(0), table->str);
+ my_error(ER_TABLE_DEFINITION_TOO_BIG, MYF(0), table.str);
DBUG_RETURN(frm);
}
@@ -326,11 +342,11 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
pos = frm_ptr + 64;
compile_time_assert(EXTRA2_TABLEDEF_VERSION != '/');
pos= extra2_write(pos, EXTRA2_TABLEDEF_VERSION,
- &create_info->tabledef_version);
+ create_info->tabledef_version);
if (part_info)
pos= extra2_write(pos, EXTRA2_DEFAULT_PART_ENGINE,
- hton_name(part_info->default_engine_type));
+ *hton_name(part_info->default_engine_type));
if (options_len)
{
@@ -349,14 +365,32 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
}
#endif /*HAVE_SPATIAL*/
+ // PERIOD
+ if (create_info->period_info.is_set())
+ {
+ *pos++= EXTRA2_APPLICATION_TIME_PERIOD;
+ pos= extra2_write_len(pos, period_info_len);
+ pos= extra2_write_str(pos, create_info->period_info.name);
+ pos= extra2_write_str(pos, create_info->period_info.constr->name);
+
+ store_frm_fieldno(pos, get_fieldno_by_name(create_info, create_fields,
+ create_info->period_info.period.start));
+ pos+= frm_fieldno_size;
+ store_frm_fieldno(pos, get_fieldno_by_name(create_info, create_fields,
+ create_info->period_info.period.end));
+ pos+= frm_fieldno_size;
+ }
+
if (create_info->versioned())
{
*pos++= EXTRA2_PERIOD_FOR_SYSTEM_TIME;
- *pos++= 2 * sizeof(uint16);
- int2store(pos, vers_get_field(create_info, create_fields, ROW_START));
- pos+= sizeof(uint16);
- int2store(pos, vers_get_field(create_info, create_fields, ROW_END));
- pos+= sizeof(uint16);
+ *pos++= 2 * frm_fieldno_size;
+ store_frm_fieldno(pos, get_fieldno_by_name(create_info, create_fields,
+ create_info->vers_info.as_row.start));
+ pos+= frm_fieldno_size;
+ store_frm_fieldno(pos, get_fieldno_by_name(create_info, create_fields,
+ create_info->vers_info.as_row.end));
+ pos+= frm_fieldno_size;
}
if (has_extra2_field_flags_)
@@ -366,13 +400,13 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
pos+= 4;
DBUG_ASSERT(pos == frm_ptr + uint2korr(fileinfo+6));
- key_info_length= pack_keys(pos, keys, key_info, data_offset);
+ key_info_length= pack_keys(pos, keys, key_info, data_offset, e_unique_hash_extra_parts);
if (key_info_length > UINT_MAX16)
{
my_printf_error(ER_CANT_CREATE_TABLE,
"Cannot create table %`s: index information is too long. "
"Decrease number of indexes or use shorter index names or shorter comments.",
- MYF(0), table->str);
+ MYF(0), table.str);
goto err;
}
@@ -475,60 +509,10 @@ err:
}
-/**
- Create a frm (table definition) file and the tables
-
- @param thd Thread handler
- @param frm Binary frm image of the table to create
- @param path Name of file (including database, without .frm)
- @param db Data base name
- @param table_name Table name
- @param create_info create info parameters
- @param file Handler to use or NULL if only frm needs to be created
-
- @retval 0 ok
- @retval 1 error
-*/
-
-int rea_create_table(THD *thd, LEX_CUSTRING *frm,
- const char *path, const char *db, const char *table_name,
- HA_CREATE_INFO *create_info, handler *file,
- bool no_ha_create_table)
-{
- DBUG_ENTER("rea_create_table");
-
- if (no_ha_create_table)
- {
- if (writefrm(path, db, table_name, true, frm->str, frm->length))
- goto err_frm;
- }
-
- if (thd->variables.keep_files_on_create)
- create_info->options|= HA_CREATE_KEEP_FILES;
-
- if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG))
- goto err_part;
-
- if (!no_ha_create_table)
- {
- if (ha_create_table(thd, path, db, table_name, create_info, frm))
- goto err_part;
- }
-
- DBUG_RETURN(0);
-
-err_part:
- file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
-err_frm:
- deletefrm(path);
- DBUG_RETURN(1);
-} /* rea_create_table */
-
-
/* Pack keyinfo and keynames to keybuff for save in form-file. */
static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
- ulong data_offset)
+ ulong data_offset, uint e_unique_hash_extra_parts)
{
uint key_parts,length;
uchar *pos, *keyname_pos;
@@ -590,6 +574,7 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
}
}
+ key_parts+= e_unique_hash_extra_parts;
if (key_count > 127 || key_parts > 127)
{
keybuff[0]= (key_count & 0x7f) | 0x80;
@@ -656,7 +641,7 @@ static bool pack_vcols(String *buf, List<Create_field> &create_fields,
for (uint field_nr=0; (field= it++); field_nr++)
{
- if (field->vcol_info)
+ if (field->vcol_info && field->vcol_info->expr)
if (pack_expression(buf, field->vcol_info, field_nr,
field->vcol_info->stored_in_db
? VCOL_GENERATED_STORED : VCOL_GENERATED_VIRTUAL))
@@ -897,32 +882,12 @@ static bool pack_fields(uchar **buff_arg, List<Create_field> &create_fields,
while ((field=it++))
{
uint recpos;
- int2store(buff+3, field->length);
/* The +1 is here becasue the col offset in .frm file have offset 1 */
recpos= field->offset+1 + (uint) data_offset;
int3store(buff+5,recpos);
- int2store(buff+8,field->pack_flag);
- buff[10]= (uchar) field->unireg_check;
buff[12]= (uchar) field->interval_id;
- buff[13]= (uchar) field->real_field_type();
- if (field->real_field_type() == MYSQL_TYPE_GEOMETRY)
- {
- buff[11]= 0;
- buff[14]= (uchar) field->geom_type;
-#ifndef HAVE_SPATIAL
- DBUG_ASSERT(0); // Should newer happen
-#endif
- }
- else if (field->charset)
- {
- buff[11]= (uchar) (field->charset->number >> 8);
- buff[14]= (uchar) field->charset->number;
- }
- else
- {
- buff[11]= buff[14]= 0; // Numerical
- }
-
+ buff[13]= (uchar) field->type_handler()->real_field_type();
+ field->type_handler()->Column_definition_attributes_frm_pack(field, buff);
int2store(buff+15, field->comment.length);
comment_length+= field->comment.length;
set_if_bigger(int_count,field->interval_id);
@@ -1009,13 +974,36 @@ static bool pack_fields(uchar **buff_arg, List<Create_field> &create_fields,
DBUG_RETURN(0);
}
+
+static bool make_empty_rec_store_default(THD *thd, Field *regfield,
+ Virtual_column_info *default_value)
+{
+ if (default_value && !default_value->flags)
+ {
+ Item *expr= default_value->expr;
+ // may be already fixed if ALTER TABLE
+ if (expr->fix_fields_if_needed(thd, &expr))
+ return true;
+ DBUG_ASSERT(expr == default_value->expr); // Should not change
+ if (regfield->make_empty_rec_store_default_value(thd, expr))
+ {
+ my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name.str);
+ return true;
+ }
+ return false;
+ }
+ regfield->make_empty_rec_reset(thd);
+ return false;
+}
+
+
/* save an empty record on start of formfile */
static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
List<Create_field> &create_fields,
uint reclength, ulong data_offset)
{
- int error= 0;
+ int error= false;
uint null_count;
uchar *null_pos;
TABLE table;
@@ -1043,24 +1031,19 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
thd->count_cuted_fields= CHECK_FIELD_WARN; // To find wrong default values
while ((field=it++))
{
+ Record_addr addr(buff + field->offset + data_offset,
+ null_pos + null_count / 8, null_count & 7);
+ Column_definition_attributes tmp(*field);
+ tmp.interval= field->save_interval ?
+ field->save_interval : field->interval;
/* regfield don't have to be deleted as it's allocated on THD::mem_root */
- Field *regfield= make_field(&share, thd->mem_root,
- buff+field->offset + data_offset,
- (uint32)field->length,
- null_pos + null_count / 8,
- null_count & 7,
- field->pack_flag,
- field->type_handler(),
- field->charset,
- field->geom_type, field->srid,
- field->unireg_check,
- field->save_interval ? field->save_interval
- : field->interval,
- &field->field_name,
- field->flags);
+ Field *regfield= tmp.make_field(&share, thd->mem_root, &addr,
+ field->type_handler(),
+ &field->field_name,
+ field->flags);
if (!regfield)
{
- error= 1;
+ error= true;
goto err; // End of memory
}
@@ -1077,36 +1060,10 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
!f_bit_as_char(field->pack_flag))
null_count+= field->length & 7;
- if (field->default_value && !field->default_value->flags &&
- (!(field->flags & BLOB_FLAG) ||
- field->real_field_type() == MYSQL_TYPE_GEOMETRY))
- {
- Item *expr= field->default_value->expr;
- // may be already fixed if ALTER TABLE
- int res= expr->fix_fields_if_needed(thd, &expr);
- if (!res)
- res= expr->save_in_field(regfield, 1);
- if (!res && (field->flags & BLOB_FLAG))
- regfield->reset();
-
- /* If not ok or warning of level 'note' */
- if (res != 0 && res != 3)
- {
- my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name.str);
- error= 1;
- delete regfield; //To avoid memory leak
- goto err;
- }
- delete regfield; //To avoid memory leak
- }
- else if (regfield->real_type() == MYSQL_TYPE_ENUM &&
- (field->flags & NOT_NULL_FLAG))
- {
- regfield->set_notnull();
- regfield->store((longlong) 1, TRUE);
- }
- else
- regfield->reset();
+ error= make_empty_rec_store_default(thd, regfield, field->default_value);
+ delete regfield; // Avoid memory leaks
+ if (error)
+ goto err;
}
DBUG_ASSERT(data_offset == ((null_count + 7) / 8));
diff --git a/sql/unireg.h b/sql/unireg.h
index 8d07e5940a3..d038de7a88f 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -56,11 +56,6 @@
#endif
#define ER_THD_OR_DEFAULT(thd,X) ((thd) ? ER_THD(thd, (X)) : ER_DEFAULT(X))
-
-#define ME_INFO (ME_HOLDTANG | ME_NOREFRESH)
-#define ME_ERROR (ME_BELL | ME_NOREFRESH)
-#define MYF_RW MYF(MY_WME+MY_NABP) /* Vid my_read & my_write */
-
#define SPECIAL_USE_LOCKS 1 /* Lock used databases */
#define SPECIAL_NO_NEW_FUNC 2 /* Skip new functions */
#define SPECIAL_SKIP_SHOW_DB 4 /* Don't allow 'show db' */
@@ -175,6 +170,7 @@ enum extra2_frm_value_type {
EXTRA2_TABLEDEF_VERSION=0,
EXTRA2_DEFAULT_PART_ENGINE=1,
EXTRA2_GIS=2,
+ EXTRA2_APPLICATION_TIME_PERIOD=3,
EXTRA2_PERIOD_FOR_SYSTEM_TIME=4,
#define EXTRA2_ENGINE_IMPORTANT 128
@@ -184,14 +180,10 @@ enum extra2_frm_value_type {
};
enum extra2_field_flags {
- VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS
+ VERS_OPTIMIZED_UPDATE= 1 << INVISIBLE_MAX_BITS,
};
-int rea_create_table(THD *thd, LEX_CUSTRING *frm,
- const char *path, const char *db, const char *table_name,
- HA_CREATE_INFO *create_info, handler *file,
- bool no_ha_create_table);
-LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING *table,
+LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table,
HA_CREATE_INFO *create_info,
List<Create_field> &create_fields,
uint keys, KEY *key_info, handler *db_file);
diff --git a/sql/vers_string.h b/sql/vers_string.h
index 3709cdbf786..2349cc0cac1 100644
--- a/sql/vers_string.h
+++ b/sql/vers_string.h
@@ -58,6 +58,12 @@ class Lex_cstring : public LEX_CSTRING
str= _str;
length= _len;
}
+ Lex_cstring(const char *start, const char *end)
+ {
+ DBUG_ASSERT(start <= end);
+ str= start;
+ length= end - start;
+ }
void set(const char *_str, size_t _len)
{
str= _str;
diff --git a/sql/vers_utils.h b/sql/vers_utils.h
index e896f84135e..2bea191da9e 100644
--- a/sql/vers_utils.h
+++ b/sql/vers_utils.h
@@ -5,43 +5,4 @@
#include "sql_class.h"
#include "vers_string.h"
-class MDL_auto_lock
-{
- THD *thd;
- TABLE_LIST &table;
- bool error;
-
-public:
- MDL_auto_lock(THD *_thd, TABLE_LIST &_table) :
- thd(_thd), table(_table)
- {
- DBUG_ASSERT(thd);
- MDL_request protection_request;
- if (thd->global_read_lock.can_acquire_protection())
- {
- error= true;
- return;
- }
- protection_request.init(MDL_key::GLOBAL, "", "", MDL_INTENTION_EXCLUSIVE,
- MDL_EXPLICIT);
- error= thd->mdl_context.acquire_lock(&protection_request, thd->variables.lock_wait_timeout);
- if (error)
- return;
-
- table.mdl_request.init(MDL_key::TABLE, table.db.str, table.table_name.str, MDL_EXCLUSIVE, MDL_EXPLICIT);
- error= thd->mdl_context.acquire_lock(&table.mdl_request, thd->variables.lock_wait_timeout);
- thd->mdl_context.release_lock(protection_request.ticket);
- }
- ~MDL_auto_lock()
- {
- if (!error)
- {
- DBUG_ASSERT(table.mdl_request.ticket);
- thd->mdl_context.release_lock(table.mdl_request.ticket);
- table.mdl_request.ticket= NULL;
- }
- }
- bool acquire_error() const { return error; }
-};
-
#endif // VERS_UTILS_INCLUDED
diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc
index 1f50ee55711..39cdef77be2 100644
--- a/sql/wsrep_applier.cc
+++ b/sql/wsrep_applier.cc
@@ -14,12 +14,17 @@
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
#include "mariadb.h"
+#include "mysql/service_wsrep.h"
+#include "wsrep_applier.h"
+
#include "wsrep_priv.h"
#include "wsrep_binlog.h" // wsrep_dump_rbr_buf()
#include "wsrep_xid.h"
+#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h"
+#include "slave.h" // opt_log_slave_updates
#include "log_event.h" // class THD, EVENT_LEN_OFFSET, etc.
-#include "wsrep_applier.h"
#include "debug_sync.h"
/*
@@ -27,7 +32,6 @@
At the end (*buf) is shitfed to point to the following event or NULL and
(*buf_len) will be changed to account just being read bytes of the 1st event.
*/
-
static Log_event* wsrep_read_log_event(
char **arg_buf, size_t *arg_buf_len,
const Format_description_log_event *description_event)
@@ -35,7 +39,7 @@ static Log_event* wsrep_read_log_event(
DBUG_ENTER("wsrep_read_log_event");
char *head= (*arg_buf);
- uint data_len = uint4korr(head + EVENT_LEN_OFFSET);
+ uint data_len= uint4korr(head + EVENT_LEN_OFFSET);
char *buf= (*arg_buf);
const char *error= 0;
Log_event *res= 0;
@@ -62,12 +66,13 @@ void wsrep_set_apply_format(THD* thd, Format_description_log_event* ev)
{
if (thd->wsrep_apply_format)
{
- delete (Format_description_log_event*)thd->wsrep_apply_format;
+ delete (Format_description_log_event*)thd->wsrep_apply_format;
}
thd->wsrep_apply_format= ev;
}
-Format_description_log_event* wsrep_get_apply_format(THD* thd)
+Format_description_log_event*
+wsrep_get_apply_format(THD* thd)
{
if (thd->wsrep_apply_format)
{
@@ -79,45 +84,77 @@ Format_description_log_event* wsrep_get_apply_format(THD* thd)
return thd->wsrep_rgi->rli->relay_log.description_event_for_exec;
}
-static wsrep_cb_status_t wsrep_apply_events(THD* thd,
- const void* events_buf,
- size_t buf_len)
+void wsrep_apply_error::store(const THD* const thd)
{
- char *buf= (char *)events_buf;
- int rcode= 0;
- int event= 1;
- Log_event_type typ;
+ Diagnostics_area::Sql_condition_iterator it=
+ thd->get_stmt_da()->sql_conditions();
+ const Sql_condition* cond;
- DBUG_ENTER("wsrep_apply_events");
+ static size_t const max_len= 2*MAX_SLAVE_ERRMSG; // 2x so that we have enough
+
+ if (NULL == str_)
+ {
+ // this must be freeable by standard free()
+ str_= static_cast<char*>(malloc(max_len));
+ if (NULL == str_)
+ {
+ WSREP_ERROR("Failed to allocate %zu bytes for error buffer.", max_len);
+ len_= 0;
+ return;
+ }
+ }
+ else
+ {
+ /* This is possible when we invoke rollback after failed applying.
+ * In this situation DA should not be reset yet and should contain
+ * all previous errors from applying and new ones from rollbacking,
+ * so we just overwrite is from scratch */
+ }
- if (thd->killed == KILL_CONNECTION &&
- thd->wsrep_conflict_state != REPLAYING)
+ char* slider= str_;
+ const char* const buf_end= str_ + max_len - 1; // -1: leave space for \0
+
+ for (cond= it++; cond && slider < buf_end; cond= it++)
{
- WSREP_INFO("applier has been aborted, skipping apply_rbr: %lld",
- (long long) wsrep_thd_trx_seqno(thd));
- DBUG_RETURN(WSREP_CB_FAILURE);
+ uint const err_code= cond->get_sql_errno();
+ const char* const err_str= cond->get_message_text();
+
+ slider+= my_snprintf(slider, buf_end - slider, " %s, Error_code: %d;",
+ err_str, err_code);
}
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_query_state= QUERY_EXEC;
- if (thd->wsrep_conflict_state!= REPLAYING)
- thd->wsrep_conflict_state= NO_CONFLICT;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
+ *slider= '\0';
+ len_= slider - str_ + 1; // +1: add \0
+
+ WSREP_DEBUG("Error buffer for thd %llu seqno %lld, %zu bytes: %s",
+ thd->thread_id, (long long)wsrep_thd_trx_seqno(thd),
+ len_, str_ ? str_ : "(null)");
+}
+
+int wsrep_apply_events(THD* thd,
+ Relay_log_info* rli,
+ const void* events_buf,
+ size_t buf_len)
+{
+ char *buf= (char *)events_buf;
+ int rcode= 0;
+ int event= 1;
+ Log_event_type typ;
+ DBUG_ENTER("wsrep_apply_events");
if (!buf_len) WSREP_DEBUG("empty rbr buffer to apply: %lld",
(long long) wsrep_thd_trx_seqno(thd));
- while(buf_len)
+ while (buf_len)
{
int exec_res;
Log_event* ev= wsrep_read_log_event(&buf, &buf_len,
- wsrep_get_apply_format(thd));
-
+ wsrep_get_apply_format(thd));
if (!ev)
{
WSREP_ERROR("applier could not read binlog event, seqno: %lld, len: %zu",
(long long)wsrep_thd_trx_seqno(thd), buf_len);
- rcode= 1;
+ rcode= WSREP_ERR_BAD_EVENT;
goto error;
}
@@ -147,9 +184,6 @@ static wsrep_cb_status_t wsrep_apply_events(THD* thd,
thd->set_server_id(ev->server_id);
thd->set_time(); // time the query
thd->transaction.start_time.reset(thd);
- wsrep_xid_init(&thd->transaction.xid_state.xid,
- thd->wsrep_trx_meta.gtid.uuid,
- thd->wsrep_trx_meta.gtid.seqno);
thd->lex->current_select= 0;
if (!ev->when)
{
@@ -162,13 +196,13 @@ static wsrep_cb_status_t wsrep_apply_events(THD* thd,
(thd->variables.option_bits & ~OPTION_SKIP_REPLICATION) |
(ev->flags & LOG_EVENT_SKIP_REPLICATION_F ? OPTION_SKIP_REPLICATION : 0);
- ev->thd = thd;
- exec_res = ev->apply_event(thd->wsrep_rgi);
+ ev->thd= thd;
+ exec_res= ev->apply_event(thd->wsrep_rgi);
DBUG_PRINT("info", ("exec_event result: %d", exec_res));
if (exec_res)
{
- WSREP_WARN("RBR event %d %s apply warning: %d, %lld",
+ WSREP_WARN("Event %d %s apply failed: %d, seqno %lld",
event, ev->get_type_str(), exec_res,
(long long) wsrep_thd_trx_seqno(thd));
rcode= exec_res;
@@ -178,230 +212,14 @@ static wsrep_cb_status_t wsrep_apply_events(THD* thd,
}
event++;
- if (thd->wsrep_conflict_state!= NO_CONFLICT &&
- thd->wsrep_conflict_state!= REPLAYING)
- WSREP_WARN("conflict state after RBR event applying: %d, %lld",
- thd->wsrep_query_state, (long long)wsrep_thd_trx_seqno(thd));
-
- if (thd->wsrep_conflict_state == MUST_ABORT) {
- WSREP_WARN("RBR event apply failed, rolling back: %lld",
- (long long) wsrep_thd_trx_seqno(thd));
- trans_rollback(thd);
- thd->locked_tables_list.unlock_locked_tables(thd);
- /* Release transactional metadata locks. */
- thd->mdl_context.release_transactional_locks();
- thd->wsrep_conflict_state= NO_CONFLICT;
- DBUG_RETURN(WSREP_CB_FAILURE);
- }
-
delete_or_keep_event_post_apply(thd->wsrep_rgi, typ, ev);
}
- error:
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_query_state= QUERY_IDLE;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- assert(thd->wsrep_exec_mode== REPL_RECV);
-
+error:
if (thd->killed == KILL_CONNECTION)
WSREP_INFO("applier aborted: %lld", (long long)wsrep_thd_trx_seqno(thd));
- if (rcode) DBUG_RETURN(WSREP_CB_FAILURE);
- DBUG_RETURN(WSREP_CB_SUCCESS);
-}
-
-wsrep_cb_status_t wsrep_apply_cb(void* const ctx,
- const void* const buf,
- size_t const buf_len,
- uint32_t const flags,
- const wsrep_trx_meta_t* meta)
-{
- THD* const thd((THD*)ctx);
-
- assert(thd->wsrep_apply_toi == false);
-
- // Allow tests to block the applier thread using the DBUG facilities.
- DBUG_EXECUTE_IF("sync.wsrep_apply_cb",
- {
- const char act[]=
- "now "
- "SIGNAL sync.wsrep_apply_cb_reached "
- "WAIT_FOR signal.wsrep_apply_cb";
- DBUG_ASSERT(!debug_sync_set_action(thd,
- STRING_WITH_LEN(act)));
- };);
-
- thd->wsrep_trx_meta = *meta;
-
-#ifdef WSREP_PROC_INFO
- snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Applying write set %lld: %p, %zu",
- (long long)wsrep_thd_trx_seqno(thd), buf, buf_len);
- thd_proc_info(thd, thd->wsrep_info);
-#else
- thd_proc_info(thd, "Applying write set");
-#endif /* WSREP_PROC_INFO */
-
- /* tune FK and UK checking policy */
- if (wsrep_slave_UK_checks == FALSE)
- thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
- else
- thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
-
- if (wsrep_slave_FK_checks == FALSE)
- thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
- else
- thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
-
- /* With galera we assume that the master has done the constraint checks */
- thd->variables.option_bits|= OPTION_NO_CHECK_CONSTRAINT_CHECKS;
-
- if (flags & WSREP_FLAG_ISOLATION)
- {
- thd->wsrep_apply_toi= true;
- /*
- Don't run in transaction mode with TOI actions.
- */
- thd->variables.option_bits&= ~OPTION_BEGIN;
- thd->server_status&= ~SERVER_STATUS_IN_TRANS;
- }
- wsrep_cb_status_t rcode(wsrep_apply_events(thd, buf, buf_len));
-
-#ifdef WSREP_PROC_INFO
- snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Applied write set %lld", (long long)wsrep_thd_trx_seqno(thd));
- thd_proc_info(thd, thd->wsrep_info);
-#else
- thd_proc_info(thd, "Applied write set");
-#endif /* WSREP_PROC_INFO */
-
- if (WSREP_CB_SUCCESS != rcode)
- {
- wsrep_dump_rbr_buf_with_header(thd, buf, buf_len);
- }
-
- if (thd->has_thd_temporary_tables())
- {
- WSREP_DEBUG("Applier %lld has temporary tables. Closing them now..",
- thd->thread_id);
- thd->close_temporary_tables();
- }
-
- return rcode;
-}
-
-static wsrep_cb_status_t wsrep_commit(THD* const thd)
-{
-#ifdef WSREP_PROC_INFO
- snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Committing %lld", (long long)wsrep_thd_trx_seqno(thd));
- thd_proc_info(thd, thd->wsrep_info);
-#else
- thd_proc_info(thd, "Committing");
-#endif /* WSREP_PROC_INFO */
-
- wsrep_cb_status_t const rcode(trans_commit(thd) ?
- WSREP_CB_FAILURE : WSREP_CB_SUCCESS);
-
- if (WSREP_CB_SUCCESS == rcode)
- {
- thd->wsrep_rgi->cleanup_context(thd, false);
-#ifdef GTID_SUPPORT
- thd->variables.gtid_next.set_automatic();
-#endif /* GTID_SUPPORT */
- if (thd->wsrep_apply_toi)
- {
- wsrep_set_SE_checkpoint(thd->wsrep_trx_meta.gtid.uuid,
- thd->wsrep_trx_meta.gtid.seqno);
- }
- }
-
-#ifdef WSREP_PROC_INFO
- snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Committed %lld", (long long) wsrep_thd_trx_seqno(thd));
- thd_proc_info(thd, thd->wsrep_info);
-#else
- thd_proc_info(thd, "Committed");
-#endif /* WSREP_PROC_INFO */
-
- return rcode;
-}
-
-static wsrep_cb_status_t wsrep_rollback(THD* const thd)
-{
-#ifdef WSREP_PROC_INFO
- snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Rolling back %lld", (long long)wsrep_thd_trx_seqno(thd));
- thd_proc_info(thd, thd->wsrep_info);
-#else
- thd_proc_info(thd, "Rolling back");
-#endif /* WSREP_PROC_INFO */
-
- wsrep_cb_status_t const rcode(trans_rollback(thd) ?
- WSREP_CB_FAILURE : WSREP_CB_SUCCESS);
-
-#ifdef WSREP_PROC_INFO
- snprintf(thd->wsrep_info, sizeof(thd->wsrep_info) - 1,
- "Rolled back %lld", (long long)wsrep_thd_trx_seqno(thd));
- thd_proc_info(thd, thd->wsrep_info);
-#else
- thd_proc_info(thd, "Rolled back");
-#endif /* WSREP_PROC_INFO */
-
- return rcode;
-}
-
-wsrep_cb_status_t wsrep_commit_cb(void* const ctx,
- uint32_t const flags,
- const wsrep_trx_meta_t* meta,
- wsrep_bool_t* const exit,
- bool const commit)
-{
- THD* const thd((THD*)ctx);
-
- assert(meta->gtid.seqno == wsrep_thd_trx_seqno(thd));
-
- wsrep_cb_status_t rcode;
-
- if (commit)
- rcode = wsrep_commit(thd);
- else
- rcode = wsrep_rollback(thd);
-
- /* Cleanup */
wsrep_set_apply_format(thd, NULL);
- thd->mdl_context.release_transactional_locks();
- thd->reset_query(); /* Mutex protected */
- free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC));
- thd->tx_isolation= (enum_tx_isolation) thd->variables.tx_isolation;
- if (wsrep_slave_count_change < 0 && commit && WSREP_CB_SUCCESS == rcode)
- {
- mysql_mutex_lock(&LOCK_wsrep_slave_threads);
- if (wsrep_slave_count_change < 0)
- {
- wsrep_slave_count_change++;
- *exit = true;
- }
- mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
- }
-
- if (thd->wsrep_applier)
- {
- /* From trans_begin() */
- thd->variables.option_bits|= OPTION_BEGIN;
- thd->server_status|= SERVER_STATUS_IN_TRANS;
- thd->wsrep_apply_toi= false;
- }
-
- return rcode;
-}
-
-
-wsrep_cb_status_t wsrep_unordered_cb(void* const ctx,
- const void* const data,
- size_t const size)
-{
- return WSREP_CB_SUCCESS;
+ DBUG_RETURN(rcode);
}
diff --git a/sql/wsrep_applier.h b/sql/wsrep_applier.h
index f19d2d46d0c..a8da2acbb9a 100644
--- a/sql/wsrep_applier.h
+++ b/sql/wsrep_applier.h
@@ -1,4 +1,4 @@
-/* Copyright 2013 Codership Oy <http://www.codership.com>
+/* Copyright 2013-2015 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,28 +17,57 @@
#define WSREP_APPLIER_H
#include <my_config.h>
-#include "../wsrep/wsrep_api.h"
-void wsrep_set_apply_format(THD* thd, Format_description_log_event* ev);
-Format_description_log_event* wsrep_get_apply_format(THD* thd);
+#include "sql_class.h" // THD class
+
+int wsrep_apply_events(THD* thd,
+ Relay_log_info* rli,
+ const void* events_buf,
+ size_t buf_len);
-/* wsrep callback prototypes */
-extern "C" {
-wsrep_cb_status_t wsrep_apply_cb(void *ctx,
- const void* buf, size_t buf_len,
- uint32_t flags,
- const wsrep_trx_meta_t* meta);
+/* Applier error codes, when nothing better is available. */
+#define WSREP_RET_SUCCESS 0 // Success
+#define WSREP_ERR_GENERIC 1 // When in doubt (MySQL default error code)
+#define WSREP_ERR_BAD_EVENT 2 // Can't parse event
+#define WSREP_ERR_NOT_FOUND 3 // Key. table, schema not found
+#define WSREP_ERR_EXISTS 4 // Key, table, schema already exists
+#define WSREP_ERR_WRONG_TYPE 5 // Incompatible data type
+#define WSREP_ERR_FAILED 6 // Operation failed for some internal reason
+#define WSREP_ERR_ABORTED 7 // Operation was aborted externally
-wsrep_cb_status_t wsrep_commit_cb(void *ctx,
- uint32_t flags,
- const wsrep_trx_meta_t* meta,
- wsrep_bool_t* exit,
- bool commit);
+class wsrep_apply_error
+{
+public:
+ wsrep_apply_error() : str_(NULL), len_(0) {};
+ ~wsrep_apply_error() { ::free(str_); }
+ /* stores the current THD error info from the diagnostic area. Works only
+ * once, subsequent invocations are ignored in order to preserve the original
+ * condition. */
+ void store(const THD* thd);
+ const char* c_str() const { return str_; }
+ size_t length() const { return len_; }
+ bool is_null() const { return (c_str() == NULL && length() == 0); }
+ wsrep_buf_t get_buf() const
+ {
+ wsrep_buf_t ret= { c_str(), length() };
+ return ret;
+ }
+private:
+ char* str_;
+ size_t len_;
+};
+
+class Format_description_log_event;
+void wsrep_set_apply_format(THD*, Format_description_log_event*);
+Format_description_log_event* wsrep_get_apply_format(THD* thd);
+int wsrep_apply(void* ctx,
+ uint32_t flags,
+ const wsrep_buf_t* buf,
+ const wsrep_trx_meta_t* meta,
+ wsrep_apply_error& err);
-wsrep_cb_status_t wsrep_unordered_cb(void* ctx,
- const void* data,
- size_t size);
+wsrep_cb_status_t wsrep_unordered_cb(void* ctx,
+ const wsrep_buf_t* data);
-} /* extern "C" */
#endif /* WSREP_APPLIER_H */
diff --git a/sql/wsrep_binlog.cc b/sql/wsrep_binlog.cc
index b823f9e70d3..80790ca604c 100644
--- a/sql/wsrep_binlog.cc
+++ b/sql/wsrep_binlog.cc
@@ -14,12 +14,16 @@
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
#include "mariadb.h"
+#include "mysql/service_wsrep.h"
#include "wsrep_binlog.h"
#include "wsrep_priv.h"
#include "log.h"
+#include "slave.h"
#include "log_event.h"
#include "wsrep_applier.h"
+#include "transaction.h"
+
extern handlerton *binlog_hton;
/*
Write the contents of a cache to a memory buffer.
@@ -40,10 +44,10 @@ int wsrep_write_cache_buf(IO_CACHE *cache, uchar **buf, size_t *buf_len)
DBUG_RETURN(ER_ERROR_ON_WRITE);
}
- uint length = my_b_bytes_in_cache(cache);
- if (unlikely(0 == length)) length = my_b_fill(cache);
+ uint length= my_b_bytes_in_cache(cache);
+ if (unlikely(0 == length)) length= my_b_fill(cache);
- size_t total_length = 0;
+ size_t total_length= 0;
if (likely(length > 0)) do
{
@@ -60,7 +64,7 @@ int wsrep_write_cache_buf(IO_CACHE *cache, uchar **buf, size_t *buf_len)
wsrep_max_ws_size, total_length);
goto error;
}
- uchar* tmp = (uchar *)my_realloc(*buf, total_length,
+ uchar* tmp= (uchar *)my_realloc(*buf, total_length,
MYF(MY_ALLOW_ZERO_PTR));
if (!tmp)
{
@@ -68,17 +72,17 @@ int wsrep_write_cache_buf(IO_CACHE *cache, uchar **buf, size_t *buf_len)
*buf_len, length);
goto error;
}
- *buf = tmp;
+ *buf= tmp;
memcpy(*buf + *buf_len, cache->read_pos, length);
- *buf_len = total_length;
+ *buf_len= total_length;
if (cache->file < 0)
{
cache->read_pos= cache->read_end;
break;
}
- } while ((length = my_b_fill(cache)));
+ } while ((length= my_b_fill(cache)));
if (reinit_io_cache(cache, WRITE_CACHE, saved_pos, 0, 0))
{
@@ -104,137 +108,6 @@ cleanup:
* many transactions would fit in there
* so there is no need to reach for the heap */
-/* Returns minimum multiple of HEAP_PAGE_SIZE that is >= length */
-static inline size_t
-heap_size(size_t length)
-{
- return (length + HEAP_PAGE_SIZE - 1)/HEAP_PAGE_SIZE*HEAP_PAGE_SIZE;
-}
-
-/* append data to writeset */
-static inline wsrep_status_t
-wsrep_append_data(wsrep_t* const wsrep,
- wsrep_ws_handle_t* const ws,
- const void* const data,
- size_t const len)
-{
- struct wsrep_buf const buff = { data, len };
- wsrep_status_t const rc(wsrep->append_data(wsrep, ws, &buff, 1,
- WSREP_DATA_ORDERED, true));
- DBUG_DUMP("buff", (uchar*) data, len);
- if (rc != WSREP_OK)
- {
- WSREP_WARN("append_data() returned %d", rc);
- }
-
- return rc;
-}
-
-/*
- Write the contents of a cache to wsrep provider.
-
- This function quite the same as MYSQL_BIN_LOG::write_cache(),
- with the exception that here we write in buffer instead of log file.
-
- This version reads all of cache into single buffer and then appends to a
- writeset at once.
- */
-static int wsrep_write_cache_once(wsrep_t* const wsrep,
- THD* const thd,
- IO_CACHE* const cache,
- size_t* const len)
-{
- my_off_t const saved_pos(my_b_tell(cache));
- DBUG_ENTER("wsrep_write_cache_once");
-
- if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
- {
- WSREP_ERROR("failed to initialize io-cache");
- DBUG_RETURN(ER_ERROR_ON_WRITE);
- }
-
- int err(WSREP_OK);
-
- size_t total_length(0);
- uchar stack_buf[STACK_SIZE]; /* to avoid dynamic allocations for few data*/
- uchar* heap_buf(NULL);
- uchar* buf(stack_buf);
- size_t allocated(sizeof(stack_buf));
- size_t used(0);
-
- uint length(my_b_bytes_in_cache(cache));
- if (unlikely(0 == length)) length = my_b_fill(cache);
-
- if (likely(length > 0)) do
- {
- total_length += length;
- /*
- Bail out if buffer grows too large.
- A temporary fix to avoid allocating indefinitely large buffer,
- not a real limit on a writeset size which includes other things
- like header and keys.
- */
- if (unlikely(total_length > wsrep_max_ws_size))
- {
- WSREP_WARN("transaction size limit (%lu) exceeded: %zu",
- wsrep_max_ws_size, total_length);
- err = WSREP_TRX_SIZE_EXCEEDED;
- goto cleanup;
- }
-
- if (total_length > allocated)
- {
- size_t const new_size(heap_size(total_length));
- uchar* tmp = (uchar *)my_realloc(heap_buf, new_size,
- MYF(MY_ALLOW_ZERO_PTR));
- if (!tmp)
- {
- WSREP_ERROR("could not (re)allocate buffer: %zu + %u",
- allocated, length);
- err = WSREP_TRX_SIZE_EXCEEDED;
- goto cleanup;
- }
-
- heap_buf = tmp;
- buf = heap_buf;
- allocated = new_size;
-
- if (used <= STACK_SIZE && used > 0) // there's data in stack_buf
- {
- DBUG_ASSERT(buf == stack_buf);
- memcpy(heap_buf, stack_buf, used);
- }
- }
-
- memcpy(buf + used, cache->read_pos, length);
- used = total_length;
- if (cache->file < 0)
- {
- cache->read_pos= cache->read_end;
- break;
- }
- } while ((length = my_b_fill(cache)));
-
- if (used > 0)
- err = wsrep_append_data(wsrep, &thd->wsrep_ws_handle, buf, used);
-
- if (WSREP_OK == err) *len = total_length;
-
-cleanup:
- if (reinit_io_cache(cache, WRITE_CACHE, saved_pos, 0, 0))
- {
- WSREP_ERROR("failed to reinitialize io-cache");
- }
-
- if (unlikely(WSREP_OK != err))
- {
- wsrep_dump_rbr_buf_with_header(thd, buf, used);
- }
-
- my_free(heap_buf);
- DBUG_RETURN(err);
-}
-
/*
Write the contents of a cache to wsrep provider.
@@ -243,62 +116,58 @@ cleanup:
This version uses incremental data appending as it reads it from cache.
*/
-static int wsrep_write_cache_inc(wsrep_t* const wsrep,
- THD* const thd,
+static int wsrep_write_cache_inc(THD* const thd,
IO_CACHE* const cache,
size_t* const len)
{
- my_off_t const saved_pos(my_b_tell(cache));
- DBUG_ENTER("wsrep_write_cache_inc");
-
- if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
- {
- WSREP_ERROR("failed to initialize io-cache");
- DBUG_RETURN(WSREP_TRX_ERROR);
- }
+ DBUG_ENTER("wsrep_write_cache_inc");
+ my_off_t const saved_pos(my_b_tell(cache));
- int err(WSREP_OK);
+ if (reinit_io_cache(cache, READ_CACHE, thd->wsrep_sr().bytes_certified(), 0, 0))
+ {
+ WSREP_ERROR("failed to initialize io-cache");
+ DBUG_RETURN(1);;
+ }
- size_t total_length(0);
+ int ret= 0;
+ size_t total_length(0);
- uint length(my_b_bytes_in_cache(cache));
- if (unlikely(0 == length)) length = my_b_fill(cache);
+ uint length(my_b_bytes_in_cache(cache));
+ if (unlikely(0 == length)) length= my_b_fill(cache);
- if (likely(length > 0)) do
+ if (likely(length > 0))
+ {
+ do
{
- total_length += length;
- /* bail out if buffer grows too large
- not a real limit on a writeset size which includes other things
- like header and keys.
- */
- if (unlikely(total_length > wsrep_max_ws_size))
- {
- WSREP_WARN("transaction size limit (%lu) exceeded: %zu",
- wsrep_max_ws_size, total_length);
- err = WSREP_TRX_SIZE_EXCEEDED;
- goto cleanup;
- }
-
- if(WSREP_OK != (err=wsrep_append_data(wsrep, &thd->wsrep_ws_handle,
- cache->read_pos, length)))
- goto cleanup;
-
- if (cache->file < 0)
- {
- cache->read_pos= cache->read_end;
- break;
- }
- } while ((length = my_b_fill(cache)));
-
- if (WSREP_OK == err) *len = total_length;
+ total_length += length;
+ /* bail out if buffer grows too large
+ not a real limit on a writeset size which includes other things
+ like header and keys.
+ */
+ if (unlikely(total_length > wsrep_max_ws_size))
+ {
+ WSREP_WARN("transaction size limit (%lu) exceeded: %zu",
+ wsrep_max_ws_size, total_length);
+ ret= 1;
+ goto cleanup;
+ }
+ if (thd->wsrep_cs().append_data(wsrep::const_buffer(cache->read_pos, length)))
+ goto cleanup;
+ cache->read_pos= cache->read_end;
+ } while ((cache->file >= 0) && (length= my_b_fill(cache)));
+ }
+ if (ret == 0)
+ {
+ assert(total_length + thd->wsrep_sr().bytes_certified() == saved_pos);
+ }
cleanup:
- if (reinit_io_cache(cache, WRITE_CACHE, saved_pos, 0, 0))
- {
- WSREP_ERROR("failed to reinitialize io-cache");
- }
-
- DBUG_RETURN(err);
+ *len= total_length;
+ if (reinit_io_cache(cache, WRITE_CACHE, saved_pos, 0, 0))
+ {
+ WSREP_ERROR("failed to reinitialize io-cache");
+ }
+ DBUG_RETURN(ret);
}
/*
@@ -307,17 +176,11 @@ cleanup:
This function quite the same as MYSQL_BIN_LOG::write_cache(),
with the exception that here we write in buffer instead of log file.
*/
-int wsrep_write_cache(wsrep_t* const wsrep,
- THD* const thd,
+int wsrep_write_cache(THD* const thd,
IO_CACHE* const cache,
size_t* const len)
{
- if (wsrep_incremental_data_collection) {
- return wsrep_write_cache_inc(wsrep, thd, cache, len);
- }
- else {
- return wsrep_write_cache_once(wsrep, thd, cache, len);
- }
+ return wsrep_write_cache_inc(thd, cache, len);
}
void wsrep_dump_rbr_buf(THD *thd, const void* rbr_buf, size_t buf_len)
@@ -383,80 +246,17 @@ int wsrep_binlog_close_connection(THD* thd)
int wsrep_binlog_savepoint_set(THD *thd, void *sv)
{
if (!wsrep_emulate_bin_log) return 0;
- int rcode = binlog_hton->savepoint_set(binlog_hton, thd, sv);
+ int rcode= binlog_hton->savepoint_set(binlog_hton, thd, sv);
return rcode;
}
int wsrep_binlog_savepoint_rollback(THD *thd, void *sv)
{
if (!wsrep_emulate_bin_log) return 0;
- int rcode = binlog_hton->savepoint_rollback(binlog_hton, thd, sv);
+ int rcode= binlog_hton->savepoint_rollback(binlog_hton, thd, sv);
return rcode;
}
-#if 0
-void wsrep_dump_rbr_direct(THD* thd, IO_CACHE* cache)
-{
- char filename[PATH_MAX]= {0};
- int len= snprintf(filename, PATH_MAX, "%s/GRA_%lld_%lld.log",
- wsrep_data_home_dir, (longlong) thd->thread_id,
- (longlong) wsrep_thd_trx_seqno(thd));
- size_t bytes_in_cache = 0;
- // check path
- if (len >= PATH_MAX)
- {
- WSREP_ERROR("RBR dump path too long: %d, skipping dump.", len);
- return ;
- }
- // init cache
- my_off_t const saved_pos(my_b_tell(cache));
- if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0))
- {
- WSREP_ERROR("failed to initialize io-cache");
- return ;
- }
- // open file
- FILE* of = fopen(filename, "wb");
- if (!of)
- {
- WSREP_ERROR("Failed to open file '%s': %d (%s)",
- filename, errno, strerror(errno));
- goto cleanup;
- }
- // ready to write
- bytes_in_cache= my_b_bytes_in_cache(cache);
- if (unlikely(bytes_in_cache == 0)) bytes_in_cache = my_b_fill(cache);
- if (likely(bytes_in_cache > 0)) do
- {
- if (my_fwrite(of, cache->read_pos, bytes_in_cache,
- MYF(MY_WME | MY_NABP)) == (size_t) -1)
- {
- WSREP_ERROR("Failed to write file '%s'", filename);
- goto cleanup;
- }
-
- if (cache->file < 0)
- {
- cache->read_pos= cache->read_end;
- break;
- }
- } while ((bytes_in_cache= my_b_fill(cache)));
- if (cache->error == -1)
- {
- WSREP_ERROR("RBR inconsistent");
- goto cleanup;
- }
-cleanup:
- // init back
- if (reinit_io_cache(cache, WRITE_CACHE, saved_pos, 0, 0))
- {
- WSREP_ERROR("failed to reinitialize io-cache");
- }
- // close file
- if (of) fclose(of);
-}
-#endif
-
void thd_binlog_flush_pending_rows_event(THD *thd, bool stmt_end)
{
thd->binlog_flush_pending_rows_event(stmt_end);
@@ -543,3 +343,94 @@ cleanup1:
DBUG_VOID_RETURN;
}
+#include "log_event.h"
+
+int wsrep_write_skip_event(THD* thd)
+{
+ DBUG_ENTER("wsrep_write_skip_event");
+ Ignorable_log_event skip_event(thd);
+ int ret= mysql_bin_log.write_event(&skip_event);
+ if (ret)
+ {
+ WSREP_WARN("wsrep_write_skip_event: write to binlog failed: %d", ret);
+ }
+ if (!ret && (ret= trans_commit_stmt(thd)))
+ {
+ WSREP_WARN("wsrep_write_skip_event: statt commit failed");
+ }
+ DBUG_RETURN(ret);
+}
+
+int wsrep_write_dummy_event_low(THD *thd, const char *msg)
+{
+ ::abort();
+ return 0;
+}
+
+int wsrep_write_dummy_event(THD *orig_thd, const char *msg)
+{
+ return 0;
+}
+
+bool wsrep_commit_will_write_binlog(THD *thd)
+{
+ return (!wsrep_emulate_bin_log && /* binlog enabled*/
+ (wsrep_thd_is_local(thd) || /* local thd*/
+ (thd->wsrep_applier_service && /* applier and log-slave-updates */
+ opt_log_slave_updates)));
+}
+
+/*
+ The last THD/commit_for_wait registered for group commit.
+*/
+static wait_for_commit *commit_order_tail= NULL;
+
+void wsrep_register_for_group_commit(THD *thd)
+{
+ DBUG_ENTER("wsrep_register_for_group_commit");
+ if (wsrep_emulate_bin_log)
+ {
+ /* Binlog is off, no need to maintain group commit queue */
+ DBUG_VOID_RETURN;
+ }
+
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_committing);
+
+ wait_for_commit *wfc= thd->wait_for_commit_ptr= &thd->wsrep_wfc;
+
+ mysql_mutex_lock(&LOCK_wsrep_group_commit);
+ if (commit_order_tail)
+ {
+ wfc->register_wait_for_prior_commit(commit_order_tail);
+ }
+ commit_order_tail= thd->wait_for_commit_ptr;
+ mysql_mutex_unlock(&LOCK_wsrep_group_commit);
+
+ /*
+ Now we have queued for group commit. If the commit will go
+ through TC log_and_order(), the commit ordering is done
+ by TC group commit. Otherwise the wait for prior
+ commits to complete is done in ha_commit_one_phase().
+ */
+ DBUG_VOID_RETURN;
+}
+
+void wsrep_unregister_from_group_commit(THD *thd)
+{
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_ordered_commit);
+ wait_for_commit *wfc= thd->wait_for_commit_ptr;
+
+ if (wfc)
+ {
+ mysql_mutex_lock(&LOCK_wsrep_group_commit);
+ wfc->unregister_wait_for_prior_commit();
+ thd->wakeup_subsequent_commits(0);
+
+ /* The last one queued for group commit has completed commit, it is
+ safe to set tail to NULL. */
+ if (wfc == commit_order_tail)
+ commit_order_tail= NULL;
+ mysql_mutex_unlock(&LOCK_wsrep_group_commit);
+ thd->wait_for_commit_ptr= NULL;
+ }
+}
diff --git a/sql/wsrep_binlog.h b/sql/wsrep_binlog.h
index 864813d5c98..1e0e1e3cb2d 100644
--- a/sql/wsrep_binlog.h
+++ b/sql/wsrep_binlog.h
@@ -16,6 +16,7 @@
#ifndef WSREP_BINLOG_H
#define WSREP_BINLOG_H
+#include "my_global.h"
#include "sql_class.h" // THD, IO_CACHE
#define HEAP_PAGE_SIZE 65536 /* 64K */
@@ -38,23 +39,67 @@ int wsrep_write_cache_buf(IO_CACHE *cache, uchar **buf, size_t *buf_len);
@param len total amount of data written
@return wsrep error status
*/
-int wsrep_write_cache (wsrep_t* const wsrep,
- THD* const thd,
- IO_CACHE* const cache,
- size_t* const len);
+int wsrep_write_cache(THD* thd,
+ IO_CACHE* cache,
+ size_t* len);
/* Dump replication buffer to disk */
void wsrep_dump_rbr_buf(THD *thd, const void* rbr_buf, size_t buf_len);
-/* Dump replication buffer to disk without intermediate buffer */
-void wsrep_dump_rbr_direct(THD* thd, IO_CACHE* cache);
-
/* Dump replication buffer along with header to a file */
void wsrep_dump_rbr_buf_with_header(THD *thd, const void *rbr_buf,
size_t buf_len);
int wsrep_binlog_close_connection(THD* thd);
-int wsrep_binlog_savepoint_set(THD *thd, void *sv);
-int wsrep_binlog_savepoint_rollback(THD *thd, void *sv);
+
+/**
+ Write a skip event into binlog.
+
+ @param thd Thread object pointer
+ @return Zero in case of success, non-zero on failure.
+*/
+int wsrep_write_skip_event(THD* thd);
+
+/*
+ Write dummy event into binlog in place of unused GTID.
+ The binlog write is done in thd context.
+*/
+int wsrep_write_dummy_event_low(THD *thd, const char *msg);
+/*
+ Write dummy event to binlog in place of unused GTID and
+ commit. The binlog write and commit are done in temporary
+ thd context, the original thd state is not altered.
+*/
+int wsrep_write_dummy_event(THD* thd, const char *msg);
+
+void wsrep_register_binlog_handler(THD *thd, bool trx);
+
+/**
+ Return true if committing THD will write to binlog during commit.
+ This is the case for:
+ - Local THD, binlog is open
+ - Replaying THD, binlog is open
+ - Applier THD, log-slave-updates is enabled
+*/
+bool wsrep_commit_will_write_binlog(THD *thd);
+
+/**
+ Register THD for group commit. The wsrep_trx must be in committing state,
+ i.e. the call must be done after wsrep_before_commit() but before
+ commit order is released.
+
+ This call will release commit order critical section if it is
+ determined that the commit will go through binlog group commit.
+ */
+void wsrep_register_for_group_commit(THD *thd);
+
+/**
+ Deregister THD from group commit. The wsrep_trx must be in committing state,
+ as for wsrep_register_for_group_commit() above.
+
+ This call must be used only for THDs which will not go through
+ binlog group commit.
+*/
+void wsrep_unregister_from_group_commit(THD *thd);
#endif /* WSREP_BINLOG_H */
diff --git a/sql/wsrep_check_opts.cc b/sql/wsrep_check_opts.cc
index 0b7a9ca6252..7b8067ef238 100644
--- a/sql/wsrep_check_opts.cc
+++ b/sql/wsrep_check_opts.cc
@@ -33,7 +33,7 @@ int wsrep_check_opts()
autoinc_lock_mode->val_int(&is_null, 0, OPT_GLOBAL, 0) != 2)
{
WSREP_ERROR("Parallel applying (wsrep_slave_threads > 1) requires"
- " innodb_autoinc_lock_mode = 2.");
+ " innodb_autoinc_lock_mode= 2.");
return 1;
}
}
@@ -88,7 +88,7 @@ int wsrep_check_opts()
{
if (global_system_variables.binlog_format != BINLOG_FORMAT_ROW)
{
- WSREP_ERROR("Only binlog_format = 'ROW' is currently supported. "
+ WSREP_ERROR("Only binlog_format= 'ROW' is currently supported. "
"Configured value: '%s'. Please adjust your "
"configuration.",
binlog_format_names[global_system_variables.binlog_format]);
diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc
new file mode 100644
index 00000000000..b182691c593
--- /dev/null
+++ b/sql/wsrep_client_service.cc
@@ -0,0 +1,337 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "wsrep_client_service.h"
+#include "wsrep_high_priority_service.h"
+#include "wsrep_applier.h" /* wsrep_apply_events() */
+#include "wsrep_binlog.h" /* wsrep_dump_rbr_buf() */
+#include "wsrep_schema.h" /* remove_fragments() */
+#include "wsrep_thd.h"
+#include "wsrep_xid.h"
+#include "wsrep_trans_observer.h"
+
+#include "sql_base.h" /* close_temporary_table() */
+#include "sql_class.h" /* THD */
+#include "sql_parse.h" /* stmt_causes_implicit_commit() */
+#include "rpl_filter.h" /* binlog_filter */
+#include "rpl_rli.h" /* Relay_log_info */
+#include "slave.h" /* opt_log_slave_updates */
+#include "transaction.h" /* trans_commit()... */
+#include "log.h" /* stmt_has_updated_trans_table() */
+//#include "debug_sync.h"
+#include "mysql/service_debug_sync.h"
+#include "mysql/psi/mysql_thread.h" /* mysql_mutex_assert_owner() */
+namespace
+{
+
+void debug_sync_caller(THD* thd, const char* sync_point)
+{
+#ifdef ENABLED_DEBUG_SYNC_OUT
+ debug_sync_set_action(thd, sync_point, strlen(sync_point));
+#endif
+#ifdef ENABLED_DEBUG_SYNC
+ if (debug_sync_service) debug_sync_service(thd,sync_point,strlen(sync_point));
+#endif
+
+}
+}
+
+Wsrep_client_service::Wsrep_client_service(THD* thd,
+ Wsrep_client_state& client_state)
+ : wsrep::client_service()
+ , m_thd(thd)
+ , m_client_state(client_state)
+{ }
+
+void Wsrep_client_service::store_globals()
+{
+ DBUG_ENTER("Wsrep_client_service::store_globals");
+ m_thd->store_globals();
+ DBUG_VOID_RETURN;
+}
+
+void Wsrep_client_service::reset_globals()
+{
+ DBUG_ENTER("Wsrep_client_service::reset_globals");
+ m_thd->reset_globals();
+ DBUG_VOID_RETURN;
+}
+
+bool Wsrep_client_service::interrupted(
+ wsrep::unique_lock<wsrep::mutex>& lock WSREP_UNUSED) const
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ /* Underlying mutex in lock object points to LOCK_thd_data, which
+ protects m_thd->wsrep_trx(), LOCK_thd_kill protects m_thd->killed.
+ Locking order is:
+ 1) LOCK_thd_data
+ 2) LOCK_thd_kill */
+ mysql_mutex_assert_owner(static_cast<mysql_mutex_t*>(lock.mutex().native()));
+ mysql_mutex_lock(&m_thd->LOCK_thd_kill);
+ bool ret= (m_thd->killed != NOT_KILLED);
+ if (ret)
+ {
+ WSREP_DEBUG("wsrep state is interrupted, THD::killed %d trx state %d",
+ m_thd->killed, m_thd->wsrep_trx().state());
+ }
+ mysql_mutex_unlock(&m_thd->LOCK_thd_kill);
+ return ret;
+}
+
+int Wsrep_client_service::prepare_data_for_replication()
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_ENTER("Wsrep_client_service::prepare_data_for_replication");
+ size_t data_len= 0;
+ IO_CACHE* cache= wsrep_get_trans_cache(m_thd);
+
+ if (cache)
+ {
+ m_thd->binlog_flush_pending_rows_event(true);
+ if (wsrep_write_cache(m_thd, cache, &data_len))
+ {
+ WSREP_ERROR("rbr write fail, data_len: %zu",
+ data_len);
+ // wsrep_override_error(m_thd, ER_ERROR_DURING_COMMIT);
+ DBUG_RETURN(1);
+ }
+ }
+
+ if (data_len == 0)
+ {
+ if (m_thd->get_stmt_da()->is_ok() &&
+ m_thd->get_stmt_da()->affected_rows() > 0 &&
+ !binlog_filter->is_on() &&
+ !m_thd->wsrep_trx().is_streaming())
+ {
+ WSREP_DEBUG("empty rbr buffer, query: %s, "
+ "affected rows: %llu, "
+ "changed tables: %d, "
+ "sql_log_bin: %d",
+ WSREP_QUERY(m_thd),
+ m_thd->get_stmt_da()->affected_rows(),
+ stmt_has_updated_trans_table(m_thd),
+ m_thd->variables.sql_log_bin);
+ }
+ else
+ {
+ WSREP_DEBUG("empty rbr buffer, query: %s", WSREP_QUERY(m_thd));
+ }
+ }
+ DBUG_RETURN(0);
+}
+
+
+void Wsrep_client_service::cleanup_transaction()
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ if (WSREP_EMULATE_BINLOG(m_thd)) wsrep_thd_binlog_trx_reset(m_thd);
+ m_thd->wsrep_affected_rows= 0;
+}
+
+
+int Wsrep_client_service::prepare_fragment_for_replication(wsrep::mutable_buffer& buffer)
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ THD* thd= m_thd;
+ DBUG_ENTER("Wsrep_client_service::prepare_fragment_for_replication");
+ IO_CACHE* cache= wsrep_get_trans_cache(thd);
+ thd->binlog_flush_pending_rows_event(true);
+
+ if (!cache)
+ {
+ DBUG_RETURN(0);
+ }
+
+ const my_off_t saved_pos(my_b_tell(cache));
+ if (reinit_io_cache(cache, READ_CACHE, thd->wsrep_sr().bytes_certified(), 0, 0))
+ {
+ DBUG_RETURN(1);
+ }
+
+ int ret= 0;
+ size_t total_length= 0;
+ size_t length= my_b_bytes_in_cache(cache);
+
+ if (!length)
+ {
+ length= my_b_fill(cache);
+ }
+
+ if (length > 0)
+ {
+ do
+ {
+ total_length+= length;
+ if (total_length > wsrep_max_ws_size)
+ {
+ WSREP_WARN("transaction size limit (%lu) exceeded: %zu",
+ wsrep_max_ws_size, total_length);
+ ret= 1;
+ goto cleanup;
+ }
+
+ buffer.push_back(reinterpret_cast<const char*>(cache->read_pos),
+ reinterpret_cast<const char*>(cache->read_pos + length));
+ cache->read_pos= cache->read_end;
+ }
+ while (cache->file >= 0 && (length= my_b_fill(cache)));
+ }
+ DBUG_ASSERT(total_length == buffer.size());
+cleanup:
+ if (reinit_io_cache(cache, WRITE_CACHE, saved_pos, 0, 0))
+ {
+ WSREP_WARN("Failed to reinitialize IO cache");
+ ret= 1;
+ }
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_client_service::remove_fragments()
+{
+ DBUG_ENTER("Wsrep_client_service::remove_fragments");
+ if (wsrep_schema->remove_fragments(m_thd,
+ Wsrep_server_state::instance().id(),
+ m_thd->wsrep_trx().id(),
+ m_thd->wsrep_sr().fragments()))
+ {
+ WSREP_DEBUG("Failed to remove fragments from SR storage for transaction "
+ "%llu, %llu",
+ m_thd->thread_id, m_thd->wsrep_trx().id().get());
+ DBUG_RETURN(1);
+ }
+ DBUG_RETURN(0);
+}
+
+bool Wsrep_client_service::statement_allowed_for_streaming() const
+{
+ /*
+ Todo: Decide if implicit commit is allowed with streaming
+ replication.
+ !stmt_causes_implicit_commit(m_thd, CF_IMPLICIT_COMMIT_BEGIN);
+ */
+ return true;
+}
+
+size_t Wsrep_client_service::bytes_generated() const
+{
+ IO_CACHE* cache= wsrep_get_trans_cache(m_thd);
+ if (cache)
+ {
+ size_t pending_rows_event_length= 0;
+ if (Rows_log_event* ev= m_thd->binlog_get_pending_rows_event(true))
+ {
+ pending_rows_event_length= ev->get_data_size();
+ }
+ return my_b_tell(cache) + pending_rows_event_length;
+ }
+ return 0;
+}
+
+void Wsrep_client_service::will_replay()
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ ++wsrep_replaying;
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+}
+
+enum wsrep::provider::status Wsrep_client_service::replay()
+{
+
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_ENTER("Wsrep_client_service::replay");
+
+ /*
+ Allocate separate THD for replaying to avoid tampering
+ original THD state during replication event applying.
+ */
+ THD *replayer_thd= new THD(true, true);
+ replayer_thd->thread_stack= m_thd->thread_stack;
+ replayer_thd->real_id= pthread_self();
+ replayer_thd->prior_thr_create_utime=
+ replayer_thd->start_utime= microsecond_interval_timer();
+ replayer_thd->set_command(COM_SLEEP);
+ replayer_thd->reset_for_next_command(true);
+
+ enum wsrep::provider::status ret;
+ {
+ Wsrep_replayer_service replayer_service(replayer_thd, m_thd);
+ wsrep::provider& provider(replayer_thd->wsrep_cs().provider());
+ ret= provider.replay(replayer_thd->wsrep_trx().ws_handle(),
+ &replayer_service);
+ replayer_service.replay_status(ret);
+ }
+
+ delete replayer_thd;
+
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ --wsrep_replaying;
+ mysql_cond_broadcast(&COND_wsrep_replaying);
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ DBUG_RETURN(ret);
+}
+
+void Wsrep_client_service::wait_for_replayers(wsrep::unique_lock<wsrep::mutex>& lock)
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ lock.unlock();
+ mysql_mutex_lock(&LOCK_wsrep_replaying);
+ /* We need to check if the THD is BF aborted during condition wait.
+ Because the aborter does not know which condition this thread is waiting,
+ use timed wait and check if the THD is BF aborted in the loop. */
+ while (wsrep_replaying > 0 && !wsrep_is_bf_aborted(m_thd))
+ {
+ struct timespec wait_time;
+ set_timespec_nsec(wait_time, 10000000L);
+ mysql_cond_timedwait(&COND_wsrep_replaying, &LOCK_wsrep_replaying,
+ &wait_time);
+ }
+ mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ lock.lock();
+}
+
+void Wsrep_client_service::debug_sync(const char* sync_point)
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ debug_sync_caller(m_thd, sync_point);
+}
+
+void Wsrep_client_service::debug_crash(const char* crash_point)
+{
+ // DBUG_ASSERT(m_thd == current_thd);
+ DBUG_EXECUTE_IF(crash_point, DBUG_SUICIDE(); );
+}
+
+int Wsrep_client_service::bf_rollback()
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_ENTER("Wsrep_client_service::rollback");
+
+ int ret= (trans_rollback_stmt(m_thd) || trans_rollback(m_thd));
+ if (m_thd->locked_tables_mode && m_thd->lock)
+ {
+ m_thd->locked_tables_list.unlock_locked_tables(m_thd);
+ m_thd->variables.option_bits&= ~OPTION_TABLE_LOCK;
+ }
+ if (m_thd->global_read_lock.is_acquired())
+ {
+ m_thd->global_read_lock.unlock_global_read_lock(m_thd);
+ }
+ m_thd->mdl_context.release_transactional_locks();
+ m_thd->mdl_context.release_explicit_locks();
+
+ DBUG_RETURN(ret);
+}
diff --git a/sql/wsrep_client_service.h b/sql/wsrep_client_service.h
new file mode 100644
index 00000000000..b1695b7aedf
--- /dev/null
+++ b/sql/wsrep_client_service.h
@@ -0,0 +1,63 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/** @file wsrep_client_service.h
+
+ This file provides declaratios for client service implementation.
+ See wsrep/client_service.hpp for interface documentation.
+*/
+
+#ifndef WSREP_CLIENT_SERVICE_H
+#define WSREP_CLIENT_SERVICE_H
+
+/* wsrep-lib */
+#include "wsrep/client_service.hpp"
+#include "wsrep/client_state.hpp"
+#include "wsrep/exception.hpp" /* not_implemented_error, remove when finished */
+
+class THD;
+class Wsrep_client_state;
+class Wsrep_high_priority_context;
+
+class Wsrep_client_service : public wsrep::client_service
+{
+public:
+ Wsrep_client_service(THD*, Wsrep_client_state&);
+
+ bool interrupted(wsrep::unique_lock<wsrep::mutex>&) const;
+ void reset_globals();
+ void store_globals();
+ int prepare_data_for_replication();
+ void cleanup_transaction();
+ bool statement_allowed_for_streaming() const;
+ size_t bytes_generated() const;
+ int prepare_fragment_for_replication(wsrep::mutable_buffer&);
+ int remove_fragments();
+ void emergency_shutdown()
+ { throw wsrep::not_implemented_error(); }
+ void will_replay();
+ enum wsrep::provider::status replay();
+ void wait_for_replayers(wsrep::unique_lock<wsrep::mutex>&);
+ void debug_sync(const char*);
+ void debug_crash(const char*);
+ int bf_rollback();
+private:
+ friend class Wsrep_server_service;
+ THD* m_thd;
+ Wsrep_client_state& m_client_state;
+};
+
+
+#endif /* WSREP_CLIENT_SERVICE_H */
diff --git a/sql/wsrep_client_state.h b/sql/wsrep_client_state.h
new file mode 100644
index 00000000000..403bfa81365
--- /dev/null
+++ b/sql/wsrep_client_state.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_CLIENT_STATE_H
+#define WSREP_CLIENT_STATE_H
+
+/* wsrep-lib */
+#include "wsrep/client_state.hpp"
+#include "my_global.h"
+
+class THD;
+
+class Wsrep_client_state : public wsrep::client_state
+{
+public:
+ Wsrep_client_state(THD* thd,
+ wsrep::mutex& mutex,
+ wsrep::condition_variable& cond,
+ wsrep::server_state& server_state,
+ wsrep::client_service& client_service,
+ const wsrep::client_id& id)
+ : wsrep::client_state(mutex,
+ cond,
+ server_state,
+ client_service,
+ id,
+ wsrep::client_state::m_local)
+ , m_thd(thd)
+ { }
+ THD* thd() { return m_thd; }
+private:
+ THD* m_thd;
+};
+
+#endif /* WSREP_CLIENT_STATE_H */
diff --git a/sql/wsrep_condition_variable.h b/sql/wsrep_condition_variable.h
new file mode 100644
index 00000000000..4412154e67b
--- /dev/null
+++ b/sql/wsrep_condition_variable.h
@@ -0,0 +1,54 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_CONDITION_VARIABLE_H
+#define WSREP_CONDITION_VARIABLE_H
+
+/* wsrep-lib */
+#include "wsrep/condition_variable.hpp"
+
+/* implementation */
+#include "my_pthread.h"
+
+class Wsrep_condition_variable : public wsrep::condition_variable
+{
+public:
+
+ Wsrep_condition_variable(mysql_cond_t& cond)
+ : m_cond(cond)
+ { }
+ ~Wsrep_condition_variable()
+ { }
+
+ void notify_one()
+ {
+ mysql_cond_signal(&m_cond);
+ }
+
+ void notify_all()
+ {
+ mysql_cond_broadcast(&m_cond);
+ }
+
+ void wait(wsrep::unique_lock<wsrep::mutex>& lock)
+ {
+ mysql_mutex_t* mutex= static_cast<mysql_mutex_t*>(lock.mutex().native());
+ mysql_cond_wait(&m_cond, mutex);
+ }
+private:
+ mysql_cond_t& m_cond;
+};
+
+#endif /* WSREP_CONDITION_VARIABLE_H */
diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc
index 97ec85f7566..01f2ad88ed5 100644
--- a/sql/wsrep_dummy.cc
+++ b/sql/wsrep_dummy.cc
@@ -17,16 +17,10 @@
#include <sql_class.h>
#include <mysql/service_wsrep.h>
-my_bool wsrep_thd_is_BF(THD *, my_bool)
+my_bool wsrep_thd_is_BF(const THD *, my_bool)
{ return 0; }
-int wsrep_trx_order_before(THD *, THD *)
-{ return 0; }
-
-enum wsrep_conflict_state wsrep_thd_conflict_state(THD *, my_bool)
-{ return NO_CONFLICT; }
-
-int wsrep_is_wsrep_xid(const XID*)
+int wsrep_is_wsrep_xid(const void* xid)
{ return 0; }
long long wsrep_xid_seqno(const XID* x)
@@ -34,104 +28,77 @@ long long wsrep_xid_seqno(const XID* x)
const unsigned char* wsrep_xid_uuid(const XID*)
{
- static const unsigned char uuid[16] = {0};
+ static const unsigned char uuid[16]= {0};
return uuid;
}
+bool wsrep_prepare_key_for_innodb(THD* thd, const uchar*, size_t, const uchar*, size_t, struct wsrep_buf*, size_t*)
+{ return 1; }
+
bool wsrep_prepare_key(const uchar*, size_t, const uchar*, size_t, struct wsrep_buf*, size_t*)
{ return 0; }
struct wsrep *get_wsrep()
{ return 0; }
-my_bool get_wsrep_certify_nonPK()
-{ return 0; }
-
-my_bool get_wsrep_debug()
-{ return 0; }
-
-my_bool get_wsrep_drupal_282555_workaround()
-{ return 0; }
-
-my_bool get_wsrep_load_data_splitting()
-{ return 0; }
-
my_bool get_wsrep_recovery()
{ return 0; }
-my_bool get_wsrep_log_conflicts()
-{ return 0; }
-
-long get_wsrep_protocol_version()
-{ return 0; }
-
-my_bool wsrep_aborting_thd_contains(THD *)
-{ return 0; }
-
-void wsrep_aborting_thd_enqueue(THD *)
-{ }
-
bool wsrep_consistency_check(THD *)
{ return 0; }
void wsrep_lock_rollback()
{ }
-int wsrep_on(THD *thd)
+my_bool wsrep_on(const THD *)
{ return 0; }
-void wsrep_post_commit(THD*, bool)
-{ }
-
-enum wsrep_trx_status wsrep_run_wsrep_commit(THD *, bool)
-{ return WSREP_TRX_ERROR; }
-
-void wsrep_thd_LOCK(THD *)
-{ }
-
-void wsrep_thd_UNLOCK(THD *)
+void wsrep_thd_LOCK(const THD *)
{ }
-void wsrep_thd_awake(THD *, my_bool)
+void wsrep_thd_UNLOCK(const THD *)
{ }
const char *wsrep_thd_conflict_state_str(THD *)
{ return 0; }
-enum wsrep_exec_mode wsrep_thd_exec_mode(THD *)
-{ return LOCAL_STATE; }
-
const char *wsrep_thd_exec_mode_str(THD *)
{ return NULL; }
-enum wsrep_conflict_state wsrep_thd_get_conflict_state(THD *)
-{ return NO_CONFLICT; }
+const char *wsrep_thd_query(const THD *)
+{ return 0; }
-my_bool wsrep_thd_is_wsrep(THD *)
+const char *wsrep_thd_query_state_str(THD *)
{ return 0; }
-char *wsrep_thd_query(THD *)
+int wsrep_thd_retry_counter(const THD *)
{ return 0; }
-enum wsrep_query_state wsrep_thd_query_state(THD *)
-{ return QUERY_IDLE; }
+bool wsrep_thd_ignore_table(THD *)
+{ return 0; }
-const char *wsrep_thd_query_state_str(THD *)
+long long wsrep_thd_trx_seqno(const THD *)
+{ return -1; }
+
+my_bool wsrep_thd_is_aborting(const THD *)
{ return 0; }
-int wsrep_thd_retry_counter(THD *)
+void wsrep_set_data_home_dir(const char *)
+{ }
+
+my_bool wsrep_thd_is_local(const THD *)
{ return 0; }
-void wsrep_thd_set_conflict_state(THD *, enum wsrep_conflict_state)
+void wsrep_thd_self_abort(THD *)
{ }
-bool wsrep_thd_ignore_table(THD *)
+int wsrep_thd_append_key(THD *, const struct wsrep_key*, int, enum Wsrep_service_key_type)
{ return 0; }
-longlong wsrep_thd_trx_seqno(THD *)
-{ return -1; }
+const char* wsrep_thd_client_state_str(const THD*)
+{ return 0; }
-struct wsrep_ws_handle* wsrep_thd_ws_handle(THD *)
+const char* wsrep_thd_client_mode_str(const THD*)
{ return 0; }
void wsrep_thd_auto_increment_variables(THD *thd,
@@ -142,20 +109,32 @@ void wsrep_thd_auto_increment_variables(THD *thd,
*increment= thd->variables.auto_increment_increment;
}
-void wsrep_set_load_multi_commit(THD *thd, bool split)
-{ }
+const char* wsrep_thd_transaction_state_str(const THD*)
+{ return 0; }
+
+query_id_t wsrep_thd_transaction_id(const THD *)
+{ return 0; }
-bool wsrep_is_load_multi_commit(THD *thd)
-{ return false; }
+my_bool wsrep_thd_bf_abort(const THD *, THD *, my_bool)
+{ return 0; }
-int wsrep_trx_is_aborting(THD *)
+my_bool wsrep_thd_order_before(const THD*, const THD *)
{ return 0; }
-void wsrep_unlock_rollback()
+void wsrep_handle_SR_rollback(THD*, THD*)
{ }
-void wsrep_set_data_home_dir(const char *)
+my_bool wsrep_thd_skip_locking(const THD*)
+{ return 0;}
+
+const char* wsrep_get_sr_table_name()
+{ return 0; }
+
+my_bool wsrep_get_debug()
+{ return 0;}
+
+void wsrep_commit_ordered(THD* )
{ }
-my_bool wsrep_thd_is_applier(MYSQL_THD thd)
-{ return false; }
+my_bool wsrep_thd_is_applying(const THD*)
+{ return 0;}
diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc
new file mode 100644
index 00000000000..afb4ca3d3b7
--- /dev/null
+++ b/sql/wsrep_high_priority_service.cc
@@ -0,0 +1,644 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "wsrep_high_priority_service.h"
+#include "wsrep_applier.h"
+#include "wsrep_binlog.h"
+#include "wsrep_schema.h"
+#include "wsrep_xid.h"
+#include "wsrep_trans_observer.h"
+
+#include "sql_class.h" /* THD */
+#include "transaction.h"
+#include "debug_sync.h"
+/* RLI */
+#include "rpl_rli.h"
+#define NUMBER_OF_FIELDS_TO_IDENTIFY_COORDINATOR 1
+#define NUMBER_OF_FIELDS_TO_IDENTIFY_WORKER 2
+#include "slave.h"
+#include "rpl_mi.h"
+
+namespace
+{
+/*
+ Scoped mode for applying non-transactional write sets (TOI)
+ */
+class Wsrep_non_trans_mode
+{
+public:
+ Wsrep_non_trans_mode(THD* thd, const wsrep::ws_meta& ws_meta)
+ : m_thd(thd)
+ , m_option_bits(thd->variables.option_bits)
+ , m_server_status(thd->server_status)
+ {
+ m_thd->variables.option_bits&= ~OPTION_BEGIN;
+ m_thd->server_status&= ~SERVER_STATUS_IN_TRANS;
+ m_thd->wsrep_cs().enter_toi(ws_meta);
+ }
+ ~Wsrep_non_trans_mode()
+ {
+ m_thd->variables.option_bits= m_option_bits;
+ m_thd->server_status= m_server_status;
+ m_thd->wsrep_cs().leave_toi();
+ }
+private:
+ Wsrep_non_trans_mode(const Wsrep_non_trans_mode&);
+ Wsrep_non_trans_mode& operator=(const Wsrep_non_trans_mode&);
+ THD* m_thd;
+ ulonglong m_option_bits;
+ uint m_server_status;
+};
+}
+
+static rpl_group_info* wsrep_relay_group_init(THD* thd, const char* log_fname)
+{
+ Relay_log_info* rli= new Relay_log_info(false);
+
+ if (!rli->relay_log.description_event_for_exec)
+ {
+ rli->relay_log.description_event_for_exec=
+ new Format_description_log_event(4);
+ }
+
+ static LEX_CSTRING connection_name= { STRING_WITH_LEN("wsrep") };
+
+ /*
+ Master_info's constructor initializes rpl_filter by either an already
+ constructed Rpl_filter object from global 'rpl_filters' list if the
+ specified connection name is same, or it constructs a new Rpl_filter
+ object and adds it to rpl_filters. This object is later destructed by
+ Mater_info's destructor by looking it up based on connection name in
+ rpl_filters list.
+
+ However, since all Master_info objects created here would share same
+ connection name ("wsrep"), destruction of any of the existing Master_info
+ objects (in wsrep_return_from_bf_mode()) would free rpl_filter referenced
+ by any/all existing Master_info objects.
+
+ In order to avoid that, we have added a check in Master_info's destructor
+ to not free the "wsrep" rpl_filter. It will eventually be freed by
+ free_all_rpl_filters() when server terminates.
+ */
+ rli->mi= new Master_info(&connection_name, false);
+
+ struct rpl_group_info *rgi= new rpl_group_info(rli);
+ rgi->thd= rli->sql_driver_thd= thd;
+
+ if ((rgi->deferred_events_collecting= rli->mi->rpl_filter->is_on()))
+ {
+ rgi->deferred_events= new Deferred_log_events(rli);
+ }
+
+ return rgi;
+}
+
+static void wsrep_setup_uk_and_fk_checks(THD* thd)
+{
+ /* Tune FK and UK checking policy. These are reset back to original
+ in Wsrep_high_priority_service destructor. */
+ if (wsrep_slave_UK_checks == FALSE)
+ thd->variables.option_bits|= OPTION_RELAXED_UNIQUE_CHECKS;
+ else
+ thd->variables.option_bits&= ~OPTION_RELAXED_UNIQUE_CHECKS;
+
+ if (wsrep_slave_FK_checks == FALSE)
+ thd->variables.option_bits|= OPTION_NO_FOREIGN_KEY_CHECKS;
+ else
+ thd->variables.option_bits&= ~OPTION_NO_FOREIGN_KEY_CHECKS;
+}
+
+/****************************************************************************
+ High priority service
+*****************************************************************************/
+
+Wsrep_high_priority_service::Wsrep_high_priority_service(THD* thd)
+ : wsrep::high_priority_service(Wsrep_server_state::instance())
+ , wsrep::high_priority_context(thd->wsrep_cs())
+ , m_thd(thd)
+ , m_rli()
+{
+ LEX_CSTRING db_str= { NULL, 0 };
+ m_shadow.option_bits = thd->variables.option_bits;
+ m_shadow.server_status= thd->server_status;
+ m_shadow.vio = thd->net.vio;
+ m_shadow.tx_isolation = thd->variables.tx_isolation;
+ m_shadow.db = (char *)thd->db.str;
+ m_shadow.db_length = thd->db.length;
+ m_shadow.user_time = thd->user_time;
+ m_shadow.row_count_func= thd->get_row_count_func();
+ m_shadow.wsrep_applier= thd->wsrep_applier;
+
+ /* Disable general logging on applier threads */
+ thd->variables.option_bits |= OPTION_LOG_OFF;
+ /* Enable binlogging if opt_log_slave_updates is set */
+ if (opt_log_slave_updates)
+ thd->variables.option_bits|= OPTION_BIN_LOG;
+ else
+ thd->variables.option_bits&= ~(OPTION_BIN_LOG);
+
+ thd->net.vio= 0;
+ thd->reset_db(&db_str);
+ thd->clear_error();
+ thd->variables.tx_isolation= ISO_READ_COMMITTED;
+ thd->tx_isolation = ISO_READ_COMMITTED;
+
+ /* From trans_begin() */
+ thd->variables.option_bits|= OPTION_BEGIN;
+ thd->server_status|= SERVER_STATUS_IN_TRANS;
+
+ /* Make THD wsrep_applier so that it cannot be killed */
+ thd->wsrep_applier= true;
+
+ if (!thd->wsrep_rgi) thd->wsrep_rgi= wsrep_relay_group_init(thd, "wsrep_relay");
+
+ m_rgi= thd->wsrep_rgi;
+ m_rgi->thd= thd;
+ m_rli= m_rgi->rli;
+ thd_proc_info(thd, "wsrep applier idle");
+}
+
+Wsrep_high_priority_service::~Wsrep_high_priority_service()
+{
+ THD* thd= m_thd;
+ thd->variables.option_bits = m_shadow.option_bits;
+ thd->server_status = m_shadow.server_status;
+ thd->net.vio = m_shadow.vio;
+ thd->variables.tx_isolation= m_shadow.tx_isolation;
+ LEX_CSTRING db_str= { m_shadow.db, m_shadow.db_length };
+ thd->reset_db(&db_str);
+ thd->user_time = m_shadow.user_time;
+
+ if (thd->wsrep_rgi && thd->wsrep_rgi->rli)
+ delete thd->wsrep_rgi->rli->mi;
+ if (thd->wsrep_rgi)
+ delete thd->wsrep_rgi->rli;
+ delete thd->wsrep_rgi;
+ thd->wsrep_rgi= NULL;
+
+ thd->set_row_count_func(m_shadow.row_count_func);
+ thd->wsrep_applier = m_shadow.wsrep_applier;
+}
+
+int Wsrep_high_priority_service::start_transaction(
+ const wsrep::ws_handle& ws_handle, const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER(" Wsrep_high_priority_service::start_transaction");
+ DBUG_RETURN(m_thd->wsrep_cs().start_transaction(ws_handle, ws_meta) ||
+ trans_begin(m_thd));
+}
+
+const wsrep::transaction& Wsrep_high_priority_service::transaction() const
+{
+ DBUG_ENTER(" Wsrep_high_priority_service::transaction");
+ DBUG_RETURN(m_thd->wsrep_trx());
+}
+
+int Wsrep_high_priority_service::adopt_transaction(
+ const wsrep::transaction& transaction)
+{
+ DBUG_ENTER(" Wsrep_high_priority_service::adopt_transaction");
+ /* Adopt transaction first to set up transaction meta data for
+ trans begin. If trans_begin() fails for some reason, roll back
+ the wsrep transaction before return. */
+ m_thd->wsrep_cs().adopt_transaction(transaction);
+ int ret= trans_begin(m_thd);
+ if (ret)
+ {
+ m_thd->wsrep_cs().before_rollback();
+ m_thd->wsrep_cs().after_rollback();
+ }
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_high_priority_service::append_fragment_and_commit(
+ const wsrep::ws_handle& ws_handle,
+ const wsrep::ws_meta& ws_meta,
+ const wsrep::const_buffer& data)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::append_fragment_and_commit");
+ int ret= start_transaction(ws_handle, ws_meta);
+ /*
+ Start transaction explicitly to avoid early commit via
+ trans_commit_stmt() in append_fragment()
+ */
+ ret= ret || trans_begin(m_thd);
+ ret= ret || wsrep_schema->append_fragment(m_thd,
+ ws_meta.server_id(),
+ ws_meta.transaction_id(),
+ ws_meta.seqno(),
+ ws_meta.flags(),
+ data);
+
+ /*
+ Note: The commit code below seems to be identical to
+ Wsrep_storage_service::commit(). Consider implementing
+ common utility function to deal with commit.
+ */
+ const bool do_binlog_commit= (opt_log_slave_updates &&
+ wsrep_gtid_mode &&
+ m_thd->variables.gtid_seq_no);
+ /*
+ Write skip event into binlog if gtid_mode is on. This is to
+ maintain gtid continuity.
+ */
+ if (do_binlog_commit)
+ {
+ ret= wsrep_write_skip_event(m_thd);
+ }
+
+ if (!ret)
+ {
+ ret= m_thd->wsrep_cs().prepare_for_ordering(ws_handle,
+ ws_meta, true);
+ }
+
+ ret= ret || trans_commit(m_thd);
+
+ m_thd->wsrep_cs().after_applying();
+ m_thd->mdl_context.release_transactional_locks();
+
+ thd_proc_info(m_thd, "wsrep applier committed");
+
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_high_priority_service::remove_fragments(const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::remove_fragments");
+ int ret= wsrep_schema->remove_fragments(m_thd,
+ ws_meta.server_id(),
+ ws_meta.transaction_id(),
+ m_thd->wsrep_sr().fragments());
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_high_priority_service::commit(const wsrep::ws_handle& ws_handle,
+ const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::commit");
+ THD* thd= m_thd;
+ DBUG_ASSERT(thd->wsrep_trx().active());
+ thd->wsrep_cs().prepare_for_ordering(ws_handle, ws_meta, true);
+ thd_proc_info(thd, "committing");
+
+ const bool is_ordered= !ws_meta.seqno().is_undefined();
+ int ret= trans_commit(thd);
+
+ if (ret == 0)
+ {
+ m_rgi->cleanup_context(thd, 0);
+ }
+
+ m_thd->mdl_context.release_transactional_locks();
+
+ thd_proc_info(thd, "wsrep applier committed");
+
+ if (!is_ordered)
+ {
+ m_thd->wsrep_cs().before_rollback();
+ m_thd->wsrep_cs().after_rollback();
+ }
+ else if (m_thd->wsrep_trx().state() == wsrep::transaction::s_executing)
+ {
+ /*
+ Wsrep commit was ordered but it did not go through commit time
+ hooks and remains active. Cycle through commit hooks to release
+ commit order and to make cleanup happen in after_applying() call.
+
+ This is a workaround for CTAS with empty result set.
+ */
+ WSREP_DEBUG("Commit not finished for applier %llu", thd->thread_id);
+ ret= ret || m_thd->wsrep_cs().before_commit() ||
+ m_thd->wsrep_cs().ordered_commit() ||
+ m_thd->wsrep_cs().after_commit();
+ }
+
+ thd->lex->sql_command= SQLCOM_END;
+
+ must_exit_= check_exit_status();
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_high_priority_service::rollback(const wsrep::ws_handle& ws_handle,
+ const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::rollback");
+ m_thd->wsrep_cs().prepare_for_ordering(ws_handle, ws_meta, false);
+ int ret= (trans_rollback_stmt(m_thd) || trans_rollback(m_thd));
+ m_thd->mdl_context.release_transactional_locks();
+ m_thd->mdl_context.release_explicit_locks();
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_high_priority_service::apply_toi(const wsrep::ws_meta& ws_meta,
+ const wsrep::const_buffer& data)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::apply_toi");
+ THD* thd= m_thd;
+ Wsrep_non_trans_mode non_trans_mode(thd, ws_meta);
+
+ wsrep::client_state& client_state(thd->wsrep_cs());
+ DBUG_ASSERT(client_state.in_toi());
+
+ thd_proc_info(thd, "wsrep applier toi");
+
+ WSREP_DEBUG("Wsrep_high_priority_service::apply_toi: %lld",
+ client_state.toi_meta().seqno().get());
+
+ int ret= wsrep_apply_events(thd, m_rli, data.data(), data.size());
+ if (ret != 0 || thd->wsrep_has_ignored_error)
+ {
+ wsrep_dump_rbr_buf_with_header(thd, data.data(), data.size());
+ thd->wsrep_has_ignored_error= false;
+ /* todo: error voting */
+ }
+ trans_commit(thd);
+
+ thd->close_temporary_tables();
+ thd->lex->sql_command= SQLCOM_END;
+
+ wsrep_set_SE_checkpoint(client_state.toi_meta().gtid());
+
+ must_exit_= check_exit_status();
+
+ DBUG_RETURN(ret);
+}
+
+void Wsrep_high_priority_service::store_globals()
+{
+ DBUG_ENTER("Wsrep_high_priority_service::store_globals");
+ /* In addition to calling THD::store_globals(), call
+ wsrep::client_state::store_globals() to gain ownership of
+ the client state */
+ m_thd->store_globals();
+ m_thd->wsrep_cs().store_globals();
+ DBUG_VOID_RETURN;
+}
+
+void Wsrep_high_priority_service::reset_globals()
+{
+ DBUG_ENTER("Wsrep_high_priority_service::reset_globals");
+ m_thd->reset_globals();
+ DBUG_VOID_RETURN;
+}
+
+void Wsrep_high_priority_service::switch_execution_context(wsrep::high_priority_service& orig_high_priority_service)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::switch_execution_context");
+ Wsrep_high_priority_service&
+ orig_hps= static_cast<Wsrep_high_priority_service&>(orig_high_priority_service);
+ m_thd->thread_stack= orig_hps.m_thd->thread_stack;
+ DBUG_VOID_RETURN;
+}
+
+int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_handle,
+ const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_high_priority_service::log_dummy_write_set");
+ int ret= 0;
+ DBUG_PRINT("info",
+ ("Wsrep_high_priority_service::log_dummy_write_set: seqno=%lld",
+ ws_meta.seqno().get()));
+ m_thd->wsrep_cs().start_transaction(ws_handle, ws_meta);
+ WSREP_DEBUG("Log dummy write set %lld", ws_meta.seqno().get());
+ if (!(opt_log_slave_updates && wsrep_gtid_mode && m_thd->variables.gtid_seq_no))
+ {
+ m_thd->wsrep_cs().before_rollback();
+ m_thd->wsrep_cs().after_rollback();
+ }
+ m_thd->wsrep_cs().after_applying();
+ DBUG_RETURN(ret);
+}
+
+void Wsrep_high_priority_service::debug_crash(const char* crash_point)
+{
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_EXECUTE_IF(crash_point, DBUG_SUICIDE(););
+}
+
+/****************************************************************************
+ Applier service
+*****************************************************************************/
+
+Wsrep_applier_service::Wsrep_applier_service(THD* thd)
+ : Wsrep_high_priority_service(thd)
+{
+ thd->wsrep_applier_service= this;
+ thd->wsrep_cs().open(wsrep::client_id(thd->thread_id));
+ thd->wsrep_cs().before_command();
+ thd->wsrep_cs().debug_log_level(wsrep_debug);
+
+}
+
+Wsrep_applier_service::~Wsrep_applier_service()
+{
+ m_thd->wsrep_cs().after_command_before_result();
+ m_thd->wsrep_cs().after_command_after_result();
+ m_thd->wsrep_cs().close();
+ m_thd->wsrep_cs().cleanup();
+ m_thd->wsrep_applier_service= NULL;
+}
+
+int Wsrep_applier_service::apply_write_set(const wsrep::ws_meta& ws_meta,
+ const wsrep::const_buffer& data)
+{
+ DBUG_ENTER("Wsrep_applier_service::apply_write_set");
+ THD* thd= m_thd;
+
+ thd->variables.option_bits |= OPTION_BEGIN;
+ thd->variables.option_bits |= OPTION_NOT_AUTOCOMMIT;
+ DBUG_ASSERT(thd->wsrep_trx().active());
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_executing);
+
+ thd_proc_info(thd, "applying write set");
+ /* moved dbug sync point here, after possible THD switch for SR transactions
+ has ben done
+ */
+ /* Allow tests to block the applier thread using the DBUG facilities */
+ DBUG_EXECUTE_IF("sync.wsrep_apply_cb",
+ {
+ const char act[]=
+ "now "
+ "SIGNAL sync.wsrep_apply_cb_reached "
+ "WAIT_FOR signal.wsrep_apply_cb";
+ DBUG_ASSERT(!debug_sync_set_action(thd,
+ STRING_WITH_LEN(act)));
+ };);
+
+ wsrep_setup_uk_and_fk_checks(thd);
+
+ int ret= wsrep_apply_events(thd, m_rli, data.data(), data.size());
+
+ if (ret || thd->wsrep_has_ignored_error)
+ {
+ wsrep_dump_rbr_buf_with_header(thd, data.data(), data.size());
+ }
+
+ thd->close_temporary_tables();
+ if (!ret && !(ws_meta.flags() & wsrep::provider::flag::commit))
+ {
+ thd->wsrep_cs().fragment_applied(ws_meta.seqno());
+ }
+ thd_proc_info(thd, "wsrep applied write set");
+ DBUG_RETURN(ret);
+}
+
+void Wsrep_applier_service::after_apply()
+{
+ DBUG_ENTER("Wsrep_applier_service::after_apply");
+ wsrep_after_apply(m_thd);
+ DBUG_VOID_RETURN;
+}
+
+bool Wsrep_applier_service::check_exit_status() const
+{
+ bool ret= false;
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
+ if (wsrep_slave_count_change < 0)
+ {
+ ++wsrep_slave_count_change;
+ ret= true;
+ }
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
+ return ret;
+}
+
+/****************************************************************************
+ Replayer service
+*****************************************************************************/
+
+Wsrep_replayer_service::Wsrep_replayer_service(THD* replayer_thd, THD* orig_thd)
+ : Wsrep_high_priority_service(replayer_thd)
+ , m_orig_thd(orig_thd)
+ , m_da_shadow()
+ , m_replay_status()
+{
+ /* Response must not have been sent to client */
+ DBUG_ASSERT(!orig_thd->get_stmt_da()->is_sent());
+ /* PS reprepare observer should have been removed already
+ open_table() will fail if we have dangling observer here */
+ DBUG_ASSERT(!orig_thd->m_reprepare_observer);
+ /* Replaying should happen always from after_statement() hook
+ after rollback, which should guarantee that there are no
+ transactional locks */
+ DBUG_ASSERT(!orig_thd->mdl_context.has_transactional_locks());
+
+ /* Make a shadow copy of diagnostics area and reset */
+ m_da_shadow.status= orig_thd->get_stmt_da()->status();
+ if (m_da_shadow.status == Diagnostics_area::DA_OK)
+ {
+ m_da_shadow.affected_rows= orig_thd->get_stmt_da()->affected_rows();
+ m_da_shadow.last_insert_id= orig_thd->get_stmt_da()->last_insert_id();
+ strmake(m_da_shadow.message, orig_thd->get_stmt_da()->message(),
+ sizeof(m_da_shadow.message) - 1);
+ }
+ orig_thd->get_stmt_da()->reset_diagnostics_area();
+
+ /* Release explicit locks */
+ if (orig_thd->locked_tables_mode && orig_thd->lock)
+ {
+ WSREP_WARN("releasing table lock for replaying (%llu)",
+ orig_thd->thread_id);
+ orig_thd->locked_tables_list.unlock_locked_tables(orig_thd);
+ orig_thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
+ }
+
+ thd_proc_info(orig_thd, "wsrep replaying trx");
+
+ /*
+ Swith execution context to replayer_thd and prepare it for
+ replay execution.
+ */
+ orig_thd->reset_globals();
+ replayer_thd->store_globals();
+ wsrep_open(replayer_thd);
+ wsrep_before_command(replayer_thd);
+ replayer_thd->wsrep_cs().clone_transaction_for_replay(orig_thd->wsrep_trx());
+}
+
+Wsrep_replayer_service::~Wsrep_replayer_service()
+{
+ THD* replayer_thd= m_thd;
+ THD* orig_thd= m_orig_thd;
+
+ /* Store replay result/state to original thread wsrep client
+ state and switch execution context back to original. */
+ orig_thd->wsrep_cs().after_replay(replayer_thd->wsrep_trx());
+ wsrep_after_apply(replayer_thd);
+ wsrep_after_command_ignore_result(replayer_thd);
+ wsrep_close(replayer_thd);
+ replayer_thd->reset_globals();
+ orig_thd->store_globals();
+
+ DBUG_ASSERT(!orig_thd->get_stmt_da()->is_sent());
+ DBUG_ASSERT(!orig_thd->get_stmt_da()->is_set());
+
+ if (m_replay_status == wsrep::provider::success)
+ {
+ DBUG_ASSERT(replayer_thd->wsrep_cs().current_error() == wsrep::e_success);
+ orig_thd->killed= NOT_KILLED;
+ my_ok(orig_thd, m_da_shadow.affected_rows, m_da_shadow.last_insert_id);
+ }
+ else if (m_replay_status == wsrep::provider::error_certification_failed)
+ {
+ wsrep_override_error(orig_thd, ER_LOCK_DEADLOCK);
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
+ m_replay_status,
+ orig_thd->db.str, WSREP_QUERY(orig_thd));
+ unireg_abort(1);
+ }
+}
+
+int Wsrep_replayer_service::apply_write_set(const wsrep::ws_meta& ws_meta,
+ const wsrep::const_buffer& data)
+{
+ DBUG_ENTER("Wsrep_replayer_service::apply_write_set");
+ THD* thd= m_thd;
+
+ DBUG_ASSERT(thd->wsrep_trx().active());
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_replaying);
+
+ wsrep_setup_uk_and_fk_checks(thd);
+
+ int ret= 0;
+ if (!wsrep::starts_transaction(ws_meta.flags()))
+ {
+ DBUG_ASSERT(thd->wsrep_trx().is_streaming());
+ ret= wsrep_schema->replay_transaction(thd,
+ m_rli,
+ ws_meta,
+ thd->wsrep_sr().fragments());
+ }
+
+ ret= ret || wsrep_apply_events(thd, m_rli, data.data(), data.size());
+
+ if (ret || thd->wsrep_has_ignored_error)
+ {
+ wsrep_dump_rbr_buf_with_header(thd, data.data(), data.size());
+ }
+
+ thd->close_temporary_tables();
+ if (!ret && !(ws_meta.flags() & wsrep::provider::flag::commit))
+ {
+ thd->wsrep_cs().fragment_applied(ws_meta.seqno());
+ }
+
+ thd_proc_info(thd, "wsrep replayed write set");
+ DBUG_RETURN(ret);
+}
diff --git a/sql/wsrep_high_priority_service.h b/sql/wsrep_high_priority_service.h
new file mode 100644
index 00000000000..34fa1669b71
--- /dev/null
+++ b/sql/wsrep_high_priority_service.h
@@ -0,0 +1,119 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_HIGH_PRIORITY_SERVICE_H
+#define WSREP_HIGH_PRIORITY_SERVICE_H
+
+#include "wsrep/high_priority_service.hpp"
+#include "wsrep/client_state.hpp"
+#include "my_global.h"
+#include "sql_error.h" /* Diagnostics area */
+#include "sql_class.h" /* rpl_group_info */
+
+class THD;
+class Relay_log_info;
+class Wsrep_server_service;
+
+class Wsrep_high_priority_service :
+ public wsrep::high_priority_service,
+ public wsrep::high_priority_context
+{
+public:
+ Wsrep_high_priority_service(THD*);
+ ~Wsrep_high_priority_service();
+ int start_transaction(const wsrep::ws_handle&,
+ const wsrep::ws_meta&);
+ const wsrep::transaction& transaction() const;
+ int adopt_transaction(const wsrep::transaction&);
+ int apply_write_set(const wsrep::ws_meta&, const wsrep::const_buffer&) = 0;
+ int append_fragment_and_commit(const wsrep::ws_handle&,
+ const wsrep::ws_meta&,
+ const wsrep::const_buffer&);
+ int remove_fragments(const wsrep::ws_meta&);
+ int commit(const wsrep::ws_handle&, const wsrep::ws_meta&);
+ int rollback(const wsrep::ws_handle&, const wsrep::ws_meta&);
+ int apply_toi(const wsrep::ws_meta&, const wsrep::const_buffer&);
+ void store_globals();
+ void reset_globals();
+ void switch_execution_context(wsrep::high_priority_service&);
+ int log_dummy_write_set(const wsrep::ws_handle&,
+ const wsrep::ws_meta&);
+
+ virtual bool check_exit_status() const = 0;
+ void debug_crash(const char*);
+protected:
+ friend Wsrep_server_service;
+ THD* m_thd;
+ Relay_log_info* m_rli;
+ rpl_group_info* m_rgi;
+ struct shadow
+ {
+ ulonglong option_bits;
+ uint server_status;
+ struct st_vio* vio;
+ ulong tx_isolation;
+ char* db;
+ size_t db_length;
+ //struct timeval user_time;
+ my_hrtime_t user_time;
+ longlong row_count_func;
+ bool wsrep_applier;
+} m_shadow;
+};
+
+class Wsrep_applier_service : public Wsrep_high_priority_service
+{
+public:
+ Wsrep_applier_service(THD*);
+ ~Wsrep_applier_service();
+ int apply_write_set(const wsrep::ws_meta&, const wsrep::const_buffer&);
+ void after_apply();
+ bool is_replaying() const { return false; }
+ bool check_exit_status() const;
+};
+
+class Wsrep_replayer_service : public Wsrep_high_priority_service
+{
+public:
+ Wsrep_replayer_service(THD* replayer_thd, THD* orig_thd);
+ ~Wsrep_replayer_service();
+ int apply_write_set(const wsrep::ws_meta&, const wsrep::const_buffer&);
+ void after_apply() { }
+ bool is_replaying() const { return true; }
+ void replay_status(enum wsrep::provider::status status)
+ { m_replay_status = status; }
+ enum wsrep::provider::status replay_status() const
+ { return m_replay_status; }
+ /* Replayer should never be forced to exit */
+ bool check_exit_status() const { return false; }
+private:
+ THD* m_orig_thd;
+ struct da_shadow
+ {
+ enum Diagnostics_area::enum_diagnostics_status status;
+ ulonglong affected_rows;
+ ulonglong last_insert_id;
+ char message[MYSQL_ERRMSG_SIZE];
+ da_shadow()
+ : status()
+ , affected_rows()
+ , last_insert_id()
+ , message()
+ { }
+ } m_da_shadow;
+ enum wsrep::provider::status m_replay_status;
+};
+
+#endif /* WSREP_HIGH_PRIORITY_SERVICE_H */
diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc
deleted file mode 100644
index aa8dff8d188..00000000000
--- a/sql/wsrep_hton.cc
+++ /dev/null
@@ -1,659 +0,0 @@
-/* Copyright 2008-2015 Codership Oy <http://www.codership.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
-
-#include "mariadb.h"
-#include <mysqld.h>
-#include "sql_base.h"
-#include "rpl_filter.h"
-#include <sql_class.h>
-#include "wsrep_mysqld.h"
-#include "wsrep_binlog.h"
-#include "wsrep_xid.h"
-#include <cstdio>
-#include <cstdlib>
-#include "debug_sync.h"
-
-extern handlerton *binlog_hton;
-extern int binlog_close_connection(handlerton *hton, THD *thd);
-extern ulonglong thd_to_trx_id(THD *thd);
-
-extern "C" int thd_binlog_format(const MYSQL_THD thd);
-// todo: share interface with ha_innodb.c
-
-/*
- Cleanup after local transaction commit/rollback, replay or TOI.
-*/
-void wsrep_cleanup_transaction(THD *thd)
-{
- if (!WSREP(thd)) return;
-
- if (wsrep_emulate_bin_log) thd_binlog_trx_reset(thd);
- thd->wsrep_ws_handle.trx_id= WSREP_UNDEFINED_TRX_ID;
- thd->wsrep_trx_meta.gtid= WSREP_GTID_UNDEFINED;
- thd->wsrep_trx_meta.depends_on= WSREP_SEQNO_UNDEFINED;
- thd->wsrep_exec_mode= LOCAL_STATE;
- thd->wsrep_affected_rows= 0;
- thd->wsrep_skip_wsrep_GTID= false;
- thd->wsrep_split_flag= false;
- return;
-}
-
-/*
- wsrep hton
-*/
-handlerton *wsrep_hton;
-
-
-/*
- Registers wsrep hton at commit time if transaction has registered htons
- for supported engine types.
-
- Hton should not be registered for TOTAL_ORDER operations.
-
- Registration is needed for both LOCAL_MODE and REPL_RECV transactions to run
- commit in 2pc so that wsrep position gets properly recorded in storage
- engines.
-
- Note that all hton calls should immediately return for threads that are
- in REPL_RECV mode as their states are controlled by wsrep appliers or
- replaying code. Only threads in LOCAL_MODE should run wsrep callbacks
- from hton methods.
-*/
-void wsrep_register_hton(THD* thd, bool all)
-{
- if (WSREP(thd) && thd->wsrep_exec_mode != TOTAL_ORDER &&
- !thd->wsrep_apply_toi)
- {
- if (thd->wsrep_exec_mode == LOCAL_STATE &&
- (thd_sql_command(thd) == SQLCOM_OPTIMIZE ||
- thd_sql_command(thd) == SQLCOM_ANALYZE ||
- thd_sql_command(thd) == SQLCOM_REPAIR) &&
- thd->lex->no_write_to_binlog == 1)
- {
- WSREP_DEBUG("Skipping wsrep_register_hton for LOCAL sql admin command : %s",
- thd->query());
- return;
- }
-
- THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
- for (Ha_trx_info *i= trans->ha_list; i; i = i->next())
- {
- if ((i->ht()->db_type == DB_TYPE_INNODB) ||
- (i->ht()->db_type == DB_TYPE_TOKUDB))
- {
- trans_register_ha(thd, all, wsrep_hton);
-
- /* follow innodb read/write settting
- * but, as an exception: CTAS with empty result set will not be
- * replicated unless we declare wsrep hton as read/write here
- */
- if (i->is_trx_read_write() ||
- ((thd->lex->sql_command == SQLCOM_CREATE_TABLE ||
- thd->lex->sql_command == SQLCOM_CREATE_SEQUENCE) &&
- thd->wsrep_exec_mode == LOCAL_STATE))
- {
- thd->ha_data[wsrep_hton->slot].ha_info[all].set_trx_read_write();
- }
- break;
- }
- }
- }
-}
-
-/*
- Calls wsrep->post_commit() for locally executed transactions that have
- got seqno from provider (must commit) and don't require replaying.
- */
-void wsrep_post_commit(THD* thd, bool all)
-{
- if (!WSREP(thd)) return;
-
- switch (thd->wsrep_exec_mode)
- {
- case LOCAL_COMMIT:
- {
- DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno != WSREP_SEQNO_UNDEFINED);
- if (wsrep && wsrep->post_commit(wsrep, &thd->wsrep_ws_handle))
- {
- DBUG_PRINT("wsrep", ("set committed fail"));
- WSREP_WARN("set committed fail: %llu %d",
- (long long)thd->real_id, thd->get_stmt_da()->status());
- }
- wsrep_cleanup_transaction(thd);
- break;
- }
- case LOCAL_STATE:
- {
- /* non-InnoDB statements may have populated events in stmt cache
- => cleanup
- */
- WSREP_DEBUG("cleanup transaction for LOCAL_STATE");
- /*
- Run post-rollback hook to clean up in the case if
- some keys were populated for the transaction in provider
- but during commit time there was no write set to replicate.
- This may happen when client sets the SAVEPOINT and immediately
- rolls back to savepoint after first operation.
- */
- if (all && thd->wsrep_conflict_state != MUST_REPLAY &&
- wsrep && wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle))
- {
- WSREP_WARN("post_rollback fail: %llu %d",
- (long long)thd->thread_id, thd->get_stmt_da()->status());
- }
- wsrep_cleanup_transaction(thd);
- break;
- }
- default: break;
- }
-}
-
-/*
- wsrep exploits binlog's caches even if binlogging itself is not
- activated. In such case connection close needs calling
- actual binlog's method.
- Todo: split binlog hton from its caches to use ones by wsrep
- without referring to binlog's stuff.
-*/
-static int
-wsrep_close_connection(handlerton* hton, THD* thd)
-{
- DBUG_ENTER("wsrep_close_connection");
-
- if (thd->wsrep_exec_mode == REPL_RECV)
- {
- DBUG_RETURN(0);
- }
-
- if (wsrep_emulate_bin_log && thd_get_ha_data(thd, binlog_hton) != NULL)
- binlog_hton->close_connection (binlog_hton, thd);
- DBUG_RETURN(0);
-}
-
-/*
- prepare/wsrep_run_wsrep_commit can fail in two ways
- - certification test or an equivalent. As a result,
- the current transaction just rolls back
- Error codes:
- WSREP_TRX_CERT_FAIL, WSREP_TRX_SIZE_EXCEEDED, WSREP_TRX_ERROR
- - a post-certification failure makes this server unable to
- commit its own WS and therefore the server must abort
-*/
-static int wsrep_prepare(handlerton *hton, THD *thd, bool all)
-{
- DBUG_ENTER("wsrep_prepare");
-
- if (thd->wsrep_exec_mode == REPL_RECV)
- {
- DBUG_RETURN(0);
- }
-
- DBUG_ASSERT(thd->ha_data[wsrep_hton->slot].ha_info[all].is_trx_read_write());
- DBUG_ASSERT(thd->wsrep_exec_mode == LOCAL_STATE);
- DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno == WSREP_SEQNO_UNDEFINED);
-
- if ((all ||
- !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
- (thd->variables.wsrep_on && !wsrep_trans_cache_is_empty(thd)))
- {
- int res= wsrep_run_wsrep_commit(thd, all);
- if (res != 0)
- {
- if (res == WSREP_TRX_SIZE_EXCEEDED)
- res= EMSGSIZE;
- else
- res= EDEADLK; // for a better error message
- }
- DBUG_RETURN (res);
- }
- DBUG_RETURN(0);
-}
-
-static int wsrep_savepoint_set(handlerton *hton, THD *thd, void *sv)
-{
- DBUG_ENTER("wsrep_savepoint_set");
-
- if (thd->wsrep_exec_mode == REPL_RECV)
- {
- DBUG_RETURN(0);
- }
-
- if (!wsrep_emulate_bin_log) DBUG_RETURN(0);
- int rcode = wsrep_binlog_savepoint_set(thd, sv);
- DBUG_RETURN(rcode);
-}
-
-static int wsrep_savepoint_rollback(handlerton *hton, THD *thd, void *sv)
-{
- DBUG_ENTER("wsrep_savepoint_rollback");
-
- if (thd->wsrep_exec_mode == REPL_RECV)
- {
- DBUG_RETURN(0);
- }
-
- if (!wsrep_emulate_bin_log) DBUG_RETURN(0);
- int rcode = wsrep_binlog_savepoint_rollback(thd, sv);
- DBUG_RETURN(rcode);
-}
-
-static int wsrep_rollback(handlerton *hton, THD *thd, bool all)
-{
- DBUG_ENTER("wsrep_rollback");
-
- if (thd->wsrep_exec_mode == REPL_RECV)
- {
- DBUG_RETURN(0);
- }
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- switch (thd->wsrep_exec_mode)
- {
- case TOTAL_ORDER:
- case REPL_RECV:
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- WSREP_DEBUG("Avoiding wsrep rollback for failed DDL: %s", thd->query());
- DBUG_RETURN(0);
- default: break;
- }
-
- if ((all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
- thd->variables.wsrep_on && thd->wsrep_conflict_state != MUST_REPLAY)
- {
- if (wsrep && wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle))
- {
- DBUG_PRINT("wsrep", ("setting rollback fail"));
- WSREP_ERROR("settting rollback fail: thd: %llu, schema: %s, SQL: %s",
- (long long)thd->real_id, thd->get_db(), thd->query());
- }
- wsrep_cleanup_transaction(thd);
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- DBUG_RETURN(0);
-}
-
-int wsrep_commit(handlerton *hton, THD *thd, bool all)
-{
- DBUG_ENTER("wsrep_commit");
-
- if (thd->wsrep_exec_mode == REPL_RECV)
- {
- DBUG_RETURN(0);
- }
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- if ((all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
- (thd->variables.wsrep_on && thd->wsrep_conflict_state != MUST_REPLAY))
- {
- if (thd->wsrep_exec_mode == LOCAL_COMMIT)
- {
- DBUG_ASSERT(thd->ha_data[wsrep_hton->slot].ha_info[all].is_trx_read_write());
- /*
- Call to wsrep->post_commit() (moved to wsrep_post_commit()) must
- be done only after commit has done for all involved htons.
- */
- DBUG_PRINT("wsrep", ("commit"));
- }
- else
- {
- /*
- Transaction didn't go through wsrep->pre_commit() so just roll back
- possible changes to clean state.
- */
- if (WSREP_PROVIDER_EXISTS) {
- if (wsrep && wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle))
- {
- DBUG_PRINT("wsrep", ("setting rollback fail"));
- WSREP_ERROR("settting rollback fail: thd: %llu, schema: %s, SQL: %s",
- (long long)thd->real_id, thd->get_db(),
- thd->query());
- }
- }
- wsrep_cleanup_transaction(thd);
- }
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- DBUG_RETURN(0);
-}
-
-
-extern Rpl_filter* binlog_filter;
-extern my_bool opt_log_slave_updates;
-
-enum wsrep_trx_status
-wsrep_run_wsrep_commit(THD *thd, bool all)
-{
- int rcode= -1;
- size_t data_len= 0;
- IO_CACHE *cache;
- int replay_round= 0;
- DBUG_ENTER("wsrep_run_wsrep_commit");
-
- if (thd->get_stmt_da()->is_error()) {
- WSREP_DEBUG("commit issue, error: %d %s",
- thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message());
- }
-
- DEBUG_SYNC(thd, "wsrep_before_replication");
-
- if (thd->slave_thread && !opt_log_slave_updates) DBUG_RETURN(WSREP_TRX_OK);
-
- if (thd->wsrep_exec_mode == REPL_RECV) {
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state == MUST_ABORT) {
- if (wsrep_debug)
- WSREP_INFO("WSREP: must abort for BF");
- DBUG_PRINT("wsrep", ("BF apply commit fail"));
- thd->wsrep_conflict_state = NO_CONFLICT;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- //
- // TODO: test all calls of the rollback.
- // rollback must happen automagically innobase_rollback(hton, thd, 1);
- //
- DBUG_RETURN(WSREP_TRX_ERROR);
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
-
- if (thd->wsrep_exec_mode != LOCAL_STATE) DBUG_RETURN(WSREP_TRX_OK);
-
- if (thd->wsrep_consistency_check == CONSISTENCY_CHECK_RUNNING) {
- WSREP_DEBUG("commit for consistency check: %s", thd->query());
- DBUG_RETURN(WSREP_TRX_OK);
- }
-
- DBUG_PRINT("wsrep", ("replicating commit"));
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state == MUST_ABORT) {
- DBUG_PRINT("wsrep", ("replicate commit fail"));
- thd->wsrep_conflict_state = ABORTED;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- if (wsrep_debug) {
- WSREP_INFO("innobase_commit, abort %s",
- (thd->query()) ? thd->query() : "void");
- }
- DBUG_RETURN(WSREP_TRX_CERT_FAIL);
- }
-
- mysql_mutex_lock(&LOCK_wsrep_replaying);
-
- DBUG_PRINT("info", ("wsrep_replaying: %d wsrep_conflict_state: %d killed: %d shutdown_in_progress: %d",
- (int) wsrep_replaying, (int) thd->wsrep_conflict_state,
- (int) thd->killed,
- (int) shutdown_in_progress));
-
- while (wsrep_replaying > 0 &&
- thd->wsrep_conflict_state == NO_CONFLICT &&
- thd->killed == NOT_KILLED &&
- !shutdown_in_progress)
- {
-
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- mysql_mutex_lock(&thd->mysys_var->mutex);
- thd_proc_info(thd, "WSREP waiting on replaying");
- thd->mysys_var->current_mutex= &LOCK_wsrep_replaying;
- thd->mysys_var->current_cond= &COND_wsrep_replaying;
- mysql_mutex_unlock(&thd->mysys_var->mutex);
-
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- // Using timedwait is a hack to avoid deadlock in case if BF victim
- // misses the signal.
- struct timespec wtime = {0, 1000000};
- mysql_cond_timedwait(&COND_wsrep_replaying, &LOCK_wsrep_replaying,
- &wtime);
-
- if (replay_round++ % 100000 == 0)
- WSREP_DEBUG("commit waiting for replaying: replayers %d, thd: %lld "
- "conflict: %d (round: %d)",
- wsrep_replaying, (longlong) thd->thread_id,
- thd->wsrep_conflict_state, replay_round);
-
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
-
- mysql_mutex_lock(&thd->mysys_var->mutex);
- thd->mysys_var->current_mutex= 0;
- thd->mysys_var->current_cond= 0;
- mysql_mutex_unlock(&thd->mysys_var->mutex);
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- }
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
-
- if (thd->wsrep_conflict_state == MUST_ABORT) {
- DBUG_PRINT("wsrep", ("replicate commit fail"));
- thd->wsrep_conflict_state = ABORTED;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- WSREP_DEBUG("innobase_commit abort after replaying wait %s",
- (thd->query()) ? thd->query() : "void");
- DBUG_RETURN(WSREP_TRX_CERT_FAIL);
- }
-
- thd->wsrep_query_state = QUERY_COMMITTING;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- cache = get_trans_log(thd);
- rcode = 0;
- if (cache) {
- thd->binlog_flush_pending_rows_event(true);
- rcode = wsrep_write_cache(wsrep, thd, cache, &data_len);
- if (WSREP_OK != rcode) {
- WSREP_ERROR("rbr write fail, data_len: %zu, %d", data_len, rcode);
- DBUG_RETURN(WSREP_TRX_SIZE_EXCEEDED);
- }
- }
-
- DBUG_PRINT("info", ("rcode: %d wsrep_conflict_state: %d",
- rcode, thd->wsrep_conflict_state));
-
- if (data_len == 0)
- {
- if (thd->get_stmt_da()->is_ok() &&
- thd->get_stmt_da()->affected_rows() > 0 &&
- !binlog_filter->is_on())
- {
- WSREP_DEBUG("empty rbr buffer, query: %s, "
- "affected rows: %llu, "
- "changed tables: %d, "
- "sql_log_bin: %d, "
- "wsrep status (%d %d %d)",
- thd->query(), thd->get_stmt_da()->affected_rows(),
- stmt_has_updated_trans_table(thd), thd->variables.sql_log_bin,
- thd->wsrep_exec_mode, thd->wsrep_query_state,
- thd->wsrep_conflict_state);
- }
- else
- {
- WSREP_DEBUG("empty rbr buffer, query: %s", thd->query());
- }
- thd->wsrep_query_state= QUERY_EXEC;
- DBUG_RETURN(WSREP_TRX_OK);
- }
-
- if (WSREP_UNDEFINED_TRX_ID == thd->wsrep_ws_handle.trx_id)
- {
- WSREP_WARN("SQL statement was ineffective thd: %lld buf: %zu\n"
- "schema: %s \n"
- "QUERY: %s\n"
- " => Skipping replication",
- (longlong) thd->thread_id, data_len,
- thd->get_db(), thd->query());
- rcode = WSREP_TRX_FAIL;
- }
- else if (!rcode)
- {
- if (WSREP_OK == rcode && wsrep)
- rcode = wsrep->pre_commit(wsrep,
- (wsrep_conn_id_t)thd->thread_id,
- &thd->wsrep_ws_handle,
- WSREP_FLAG_COMMIT |
- ((thd->wsrep_PA_safe) ?
- 0ULL : WSREP_FLAG_PA_UNSAFE),
- &thd->wsrep_trx_meta);
-
- DBUG_PRINT("info", ("rcode after pre_commit: %d", rcode));
-
- if (rcode == WSREP_TRX_MISSING) {
- WSREP_WARN("Transaction missing in provider, thd: %lld schema: %s SQL: %s",
- (longlong) thd->thread_id,
- thd->get_db(), thd->query());
- rcode = WSREP_TRX_FAIL;
- } else if (rcode == WSREP_BF_ABORT) {
- WSREP_DEBUG("thd: %lld seqno: %lld BF aborted by provider, will replay",
- (longlong) thd->thread_id,
- (longlong) thd->wsrep_trx_meta.gtid.seqno);
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_conflict_state = MUST_REPLAY;
- DBUG_ASSERT(wsrep_thd_trx_seqno(thd) > 0);
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- wsrep_replaying++;
- WSREP_DEBUG("replaying increased: %d, thd: %lld",
- wsrep_replaying, (longlong) thd->thread_id);
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
- }
- } else {
- WSREP_ERROR("I/O error reading from thd's binlog iocache: "
- "errno=%d, io cache code=%d", my_errno, cache->error);
- DBUG_ASSERT(0); // failure like this can not normally happen
- DBUG_RETURN(WSREP_TRX_ERROR);
- }
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
-
- DEBUG_SYNC(thd, "wsrep_after_replication");
-
- DBUG_PRINT("info", ("rcode: %d wsrep_conflict_state: %d",
- rcode, thd->wsrep_conflict_state));
-
- switch(rcode) {
- case 0:
- /*
- About MUST_ABORT: We assume that even if thd conflict state was set
- to MUST_ABORT, underlying transaction was not rolled back or marked
- as deadlock victim in QUERY_COMMITTING state. Conflict state is
- set to NO_CONFLICT and commit proceeds as usual.
- */
- if (thd->wsrep_conflict_state == MUST_ABORT)
- thd->wsrep_conflict_state= NO_CONFLICT;
-
- if (thd->wsrep_conflict_state != NO_CONFLICT)
- {
- WSREP_WARN("thd: %llu seqno: %lld conflict state %d after post commit",
- (longlong) thd->thread_id,
- (longlong) thd->wsrep_trx_meta.gtid.seqno,
- thd->wsrep_conflict_state);
- }
- thd->wsrep_exec_mode= LOCAL_COMMIT;
- DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno != WSREP_SEQNO_UNDEFINED);
- /* Override XID iff it was generated by mysql */
- if (thd->transaction.xid_state.xid.get_my_xid())
- {
- wsrep_xid_init(&thd->transaction.xid_state.xid,
- thd->wsrep_trx_meta.gtid.uuid,
- thd->wsrep_trx_meta.gtid.seqno);
- }
- DBUG_PRINT("wsrep", ("replicating commit success"));
- break;
- case WSREP_BF_ABORT:
- DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno != WSREP_SEQNO_UNDEFINED);
- /* fall through */
- case WSREP_TRX_FAIL:
- WSREP_DEBUG("commit failed for reason: %d", rcode);
- DBUG_PRINT("wsrep", ("replicating commit fail"));
-
- thd->wsrep_query_state= QUERY_EXEC;
-
- if (thd->wsrep_conflict_state == MUST_ABORT) {
- thd->wsrep_conflict_state= ABORTED;
- }
- else
- {
- WSREP_DEBUG("conflict state: %d", thd->wsrep_conflict_state);
- if (thd->wsrep_conflict_state == NO_CONFLICT)
- {
- thd->wsrep_conflict_state = CERT_FAILURE;
- WSREP_LOG_CONFLICT(NULL, thd, FALSE);
- }
- }
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- DBUG_RETURN(WSREP_TRX_CERT_FAIL);
-
- case WSREP_SIZE_EXCEEDED:
- WSREP_ERROR("transaction size exceeded");
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- DBUG_RETURN(WSREP_TRX_SIZE_EXCEEDED);
- case WSREP_CONN_FAIL:
- WSREP_ERROR("connection failure");
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- DBUG_RETURN(WSREP_TRX_ERROR);
- default:
- WSREP_ERROR("unknown connection failure");
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- DBUG_RETURN(WSREP_TRX_ERROR);
- }
-
- thd->wsrep_query_state= QUERY_EXEC;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- DBUG_RETURN(WSREP_TRX_OK);
-}
-
-
-static int wsrep_hton_init(void *p)
-{
- wsrep_hton= (handlerton *)p;
- //wsrep_hton->state=opt_bin_log ? SHOW_OPTION_YES : SHOW_OPTION_NO;
- wsrep_hton->state= SHOW_OPTION_YES;
- wsrep_hton->db_type=(legacy_db_type)0;
- wsrep_hton->savepoint_offset= sizeof(my_off_t);
- wsrep_hton->close_connection= wsrep_close_connection;
- wsrep_hton->savepoint_set= wsrep_savepoint_set;
- wsrep_hton->savepoint_rollback= wsrep_savepoint_rollback;
- wsrep_hton->commit= wsrep_commit;
- wsrep_hton->rollback= wsrep_rollback;
- wsrep_hton->prepare= wsrep_prepare;
- wsrep_hton->flags= HTON_NOT_USER_SELECTABLE | HTON_HIDDEN; // todo: fix flags
- return 0;
-}
-
-
-struct st_mysql_storage_engine wsrep_storage_engine=
-{ MYSQL_HANDLERTON_INTERFACE_VERSION };
-
-
-maria_declare_plugin(wsrep)
-{
- MYSQL_STORAGE_ENGINE_PLUGIN,
- &wsrep_storage_engine,
- "wsrep",
- "Codership Oy",
- "A pseudo storage engine to represent transactions in multi-master "
- "synchornous replication",
- PLUGIN_LICENSE_GPL,
- wsrep_hton_init, /* Plugin Init */
- NULL, /* Plugin Deinit */
- 0x0100 /* 1.0 */,
- NULL, /* status variables */
- NULL, /* system variables */
- "1.0", /* string version */
- MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
-}
-maria_declare_plugin_end;
diff --git a/sql/wsrep_mutex.h b/sql/wsrep_mutex.h
new file mode 100644
index 00000000000..3454b44e0ec
--- /dev/null
+++ b/sql/wsrep_mutex.h
@@ -0,0 +1,50 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_MUTEX_H
+#define WSREP_MUTEX_H
+
+/* wsrep-lib */
+#include "wsrep/mutex.hpp"
+
+/* implementation */
+#include "my_pthread.h"
+
+class Wsrep_mutex : public wsrep::mutex
+{
+public:
+ Wsrep_mutex(mysql_mutex_t& mutex)
+ : m_mutex(mutex)
+ { }
+
+ void lock()
+ {
+ mysql_mutex_lock(&m_mutex);
+ }
+
+ void unlock()
+ {
+ mysql_mutex_unlock(&m_mutex);
+ }
+
+ void* native()
+ {
+ return &m_mutex;
+ }
+private:
+ mysql_mutex_t& m_mutex;
+};
+
+#endif /* WSREP_MUTEX_H */
diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc
index 7e43fdfb16f..dba793aba55 100644
--- a/sql/wsrep_mysqld.cc
+++ b/sql/wsrep_mysqld.cc
@@ -14,7 +14,12 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
#include "sql_plugin.h" /* wsrep_plugins_pre_init() */
+#include "my_global.h"
+#include "wsrep_server_state.h"
+
+#include "mariadb.h"
#include <mysqld.h>
+#include <transaction.h>
#include <sql_class.h>
#include <sql_parse.h>
#include <sql_base.h> /* find_temporary_table() */
@@ -33,26 +38,32 @@
#include "wsrep_var.h"
#include "wsrep_binlog.h"
#include "wsrep_applier.h"
+#include "wsrep_schema.h"
#include "wsrep_xid.h"
+#include "wsrep_trans_observer.h"
+#include "mysql/service_wsrep.h"
#include <cstdio>
#include <cstdlib>
+#include <string>
#include "log_event.h"
#include <slave.h>
-wsrep_t *wsrep = NULL;
-/*
- wsrep_emulate_bin_log is a flag to tell that binlog has not been configured.
- wsrep needs to get binlog events from transaction cache even when binlog is
- not enabled, wsrep_emulate_bin_log opens needed code paths to make this
- possible
-*/
-my_bool wsrep_emulate_bin_log = FALSE; // activating parts of binlog interface
+#include <sstream>
+
+/* wsrep-lib */
+Wsrep_server_state* Wsrep_server_state::m_instance;
+
+my_bool wsrep_emulate_bin_log = FALSE; // activating parts of binlog interface
#ifdef GTID_SUPPORT
/* Sidno in global_sid_map corresponding to group uuid */
rpl_sidno wsrep_sidno= -1;
#endif /* GTID_SUPPORT */
my_bool wsrep_preordered_opt= FALSE;
+/* Streaming Replication */
+const char *wsrep_fragment_units[]= { "bytes", "rows", "statements", NullS };
+const char *wsrep_SR_store_types[]= { "none", "table", NullS };
+
/*
* Begin configuration options
*/
@@ -74,7 +85,7 @@ const char *wsrep_data_home_dir;
const char *wsrep_dbug_option;
const char *wsrep_notify_cmd;
-my_bool wsrep_debug; // Enable debug level logging
+ulong wsrep_debug; // Debug level logging
my_bool wsrep_convert_LOCK_to_trx; // Convert locking sessions to trx
my_bool wsrep_auto_increment_control; // Control auto increment variables
my_bool wsrep_drupal_282555_workaround; // Retry autoinc insert after dupkey
@@ -83,7 +94,7 @@ ulong wsrep_certification_rules = WSREP_CERTIFICATION_RULES_STRICT;
my_bool wsrep_recovery; // Recovery
my_bool wsrep_replicate_myisam; // Enable MyISAM replication
my_bool wsrep_log_conflicts;
-my_bool wsrep_load_data_splitting; // Commit load data every 10K intervals
+my_bool wsrep_load_data_splitting= 0; // Commit load data every 10K intervals
my_bool wsrep_slave_UK_checks; // Slave thread does UK checks
my_bool wsrep_slave_FK_checks; // Slave thread does FK checks
my_bool wsrep_restart_slave; // Should mysql slave thread be
@@ -108,7 +119,13 @@ my_bool wsrep_restart_slave_activated= 0; // Node has dropped, and slave
bool wsrep_new_cluster= false; // Bootstrap the cluster?
int wsrep_slave_count_change= 0; // No. of appliers to stop/start
int wsrep_to_isolation= 0; // No. of active TO isolation threads
-long wsrep_max_protocol_version= 3; // Maximum protocol version to use
+long wsrep_max_protocol_version= 4; // Maximum protocol version to use
+long int wsrep_protocol_version= wsrep_max_protocol_version;
+ulong wsrep_trx_fragment_unit= WSREP_FRAG_BYTES;
+ // unit for fragment size
+ulong wsrep_SR_store_type= WSREP_SR_STORE_TABLE;
+uint wsrep_ignore_apply_errors= 0;
+
/*
* End configuration options
@@ -124,29 +141,37 @@ mysql_mutex_t LOCK_wsrep_sst;
mysql_cond_t COND_wsrep_sst;
mysql_mutex_t LOCK_wsrep_sst_init;
mysql_cond_t COND_wsrep_sst_init;
-mysql_mutex_t LOCK_wsrep_rollback;
-mysql_cond_t COND_wsrep_rollback;
-wsrep_aborting_thd_t wsrep_aborting_thd= NULL;
mysql_mutex_t LOCK_wsrep_replaying;
mysql_cond_t COND_wsrep_replaying;
mysql_mutex_t LOCK_wsrep_slave_threads;
+mysql_cond_t COND_wsrep_slave_threads;
+mysql_mutex_t LOCK_wsrep_cluster_config;
mysql_mutex_t LOCK_wsrep_desync;
mysql_mutex_t LOCK_wsrep_config_state;
+mysql_mutex_t LOCK_wsrep_group_commit;
+mysql_mutex_t LOCK_wsrep_SR_pool;
+mysql_mutex_t LOCK_wsrep_SR_store;
int wsrep_replaying= 0;
-ulong wsrep_running_threads = 0; // # of currently running wsrep threads
+ulong wsrep_running_threads= 0; // # of currently running wsrep threads
ulong my_bind_addr;
#ifdef HAVE_PSI_INTERFACE
-PSI_mutex_key key_LOCK_wsrep_rollback,
+PSI_mutex_key
key_LOCK_wsrep_replaying, key_LOCK_wsrep_ready, key_LOCK_wsrep_sst,
key_LOCK_wsrep_sst_thread, key_LOCK_wsrep_sst_init,
key_LOCK_wsrep_slave_threads, key_LOCK_wsrep_desync,
- key_LOCK_wsrep_config_state;
+ key_LOCK_wsrep_config_state, key_LOCK_wsrep_cluster_config,
+ key_LOCK_wsrep_group_commit,
+ key_LOCK_wsrep_SR_pool,
+ key_LOCK_wsrep_SR_store,
+ key_LOCK_wsrep_thd_queue;
-PSI_cond_key key_COND_wsrep_rollback,
+PSI_cond_key key_COND_wsrep_thd,
key_COND_wsrep_replaying, key_COND_wsrep_ready, key_COND_wsrep_sst,
- key_COND_wsrep_sst_init, key_COND_wsrep_sst_thread;
+ key_COND_wsrep_sst_init, key_COND_wsrep_sst_thread,
+ key_COND_wsrep_thd_queue, key_COND_wsrep_slave_threads;
+
PSI_file_key key_file_wsrep_gra_log;
@@ -157,11 +182,14 @@ static PSI_mutex_info wsrep_mutexes[]=
{ &key_LOCK_wsrep_sst_thread, "wsrep_sst_thread", 0},
{ &key_LOCK_wsrep_sst_init, "LOCK_wsrep_sst_init", PSI_FLAG_GLOBAL},
{ &key_LOCK_wsrep_sst, "LOCK_wsrep_sst", PSI_FLAG_GLOBAL},
- { &key_LOCK_wsrep_rollback, "LOCK_wsrep_rollback", PSI_FLAG_GLOBAL},
{ &key_LOCK_wsrep_replaying, "LOCK_wsrep_replaying", PSI_FLAG_GLOBAL},
{ &key_LOCK_wsrep_slave_threads, "LOCK_wsrep_slave_threads", PSI_FLAG_GLOBAL},
+ { &key_LOCK_wsrep_cluster_config, "LOCK_wsrep_cluster_config", PSI_FLAG_GLOBAL},
{ &key_LOCK_wsrep_desync, "LOCK_wsrep_desync", PSI_FLAG_GLOBAL},
- { &key_LOCK_wsrep_config_state, "LOCK_wsrep_config_state", PSI_FLAG_GLOBAL}
+ { &key_LOCK_wsrep_config_state, "LOCK_wsrep_config_state", PSI_FLAG_GLOBAL},
+ { &key_LOCK_wsrep_group_commit, "LOCK_wsrep_group_commit", PSI_FLAG_GLOBAL},
+ { &key_LOCK_wsrep_SR_pool, "LOCK_wsrep_SR_pool", PSI_FLAG_GLOBAL},
+ { &key_LOCK_wsrep_SR_store, "LOCK_wsrep_SR_store", PSI_FLAG_GLOBAL}
};
static PSI_cond_info wsrep_conds[]=
@@ -170,8 +198,9 @@ static PSI_cond_info wsrep_conds[]=
{ &key_COND_wsrep_sst, "COND_wsrep_sst", PSI_FLAG_GLOBAL},
{ &key_COND_wsrep_sst_init, "COND_wsrep_sst_init", PSI_FLAG_GLOBAL},
{ &key_COND_wsrep_sst_thread, "wsrep_sst_thread", 0},
- { &key_COND_wsrep_rollback, "COND_wsrep_rollback", PSI_FLAG_GLOBAL},
- { &key_COND_wsrep_replaying, "COND_wsrep_replaying", PSI_FLAG_GLOBAL}
+ { &key_COND_wsrep_thd, "THD::COND_wsrep_thd", 0},
+ { &key_COND_wsrep_replaying, "COND_wsrep_replaying", PSI_FLAG_GLOBAL},
+ { &key_COND_wsrep_slave_threads, "COND_wsrep_wsrep_slave_threads", PSI_FLAG_GLOBAL}
};
static PSI_file_info wsrep_files[]=
@@ -180,310 +209,218 @@ static PSI_file_info wsrep_files[]=
};
#endif
-my_bool wsrep_inited = 0; // initialized ?
+my_bool wsrep_inited= 0; // initialized ?
-static wsrep_uuid_t cluster_uuid = WSREP_UUID_UNDEFINED;
+static wsrep_uuid_t node_uuid= WSREP_UUID_UNDEFINED;
static char cluster_uuid_str[40]= { 0, };
-static const char* cluster_status_str[WSREP_VIEW_MAX] =
-{
- "Primary",
- "non-Primary",
- "Disconnected"
-};
static char provider_name[256]= { 0, };
static char provider_version[256]= { 0, };
static char provider_vendor[256]= { 0, };
/*
- * wsrep status variables
+ * Wsrep status variables. LOCK_status must be locked When modifying
+ * these variables,
*/
-my_bool wsrep_connected = FALSE;
-my_bool wsrep_ready = FALSE; // node can accept queries
-const char* wsrep_cluster_state_uuid = cluster_uuid_str;
-long long wsrep_cluster_conf_id = WSREP_SEQNO_UNDEFINED;
-const char* wsrep_cluster_status = cluster_status_str[WSREP_VIEW_DISCONNECTED];
-long wsrep_cluster_size = 0;
-long wsrep_local_index = -1;
-long long wsrep_local_bf_aborts = 0;
-const char* wsrep_provider_name = provider_name;
-const char* wsrep_provider_version = provider_version;
-const char* wsrep_provider_vendor = provider_vendor;
+my_bool wsrep_connected = FALSE;
+my_bool wsrep_ready = FALSE;
+const char* wsrep_cluster_state_uuid= cluster_uuid_str;
+long long wsrep_cluster_conf_id = WSREP_SEQNO_UNDEFINED;
+const char* wsrep_cluster_status = "Disconnected";
+long wsrep_cluster_size = 0;
+long wsrep_local_index = -1;
+long long wsrep_local_bf_aborts = 0;
+const char* wsrep_provider_name = provider_name;
+const char* wsrep_provider_version = provider_version;
+const char* wsrep_provider_vendor = provider_vendor;
+char* wsrep_provider_capabilities = NULL;
+char* wsrep_cluster_capabilities = NULL;
/* End wsrep status variables */
-wsrep_uuid_t local_uuid = WSREP_UUID_UNDEFINED;
-wsrep_seqno_t local_seqno = WSREP_SEQNO_UNDEFINED;
-long wsrep_protocol_version = 3;
-
wsp::Config_state *wsrep_config_state;
-// Boolean denoting if server is in initial startup phase. This is needed
-// to make sure that main thread waiting in wsrep_sst_wait() is signaled
-// if there was no state gap on receiving first view event.
-static my_bool wsrep_startup = TRUE;
+wsrep_uuid_t local_uuid = WSREP_UUID_UNDEFINED;
+wsrep_seqno_t local_seqno = WSREP_SEQNO_UNDEFINED;
+wsp::node_status local_status;
-static void wsrep_log_cb(wsrep_log_level_t level, const char *msg) {
- switch (level) {
- case WSREP_LOG_INFO:
- sql_print_information("WSREP: %s", msg);
- break;
- case WSREP_LOG_WARN:
- sql_print_warning("WSREP: %s", msg);
- break;
- case WSREP_LOG_ERROR:
- case WSREP_LOG_FATAL:
+/*
+ */
+Wsrep_schema *wsrep_schema= 0;
+
+static void wsrep_log_cb(wsrep::log::level level, const char *msg)
+{
+ /*
+ Silence all wsrep related logging from lib and provider if
+ wsrep is not enabled.
+ */
+ if (WSREP_ON)
+ {
+ switch (level) {
+ case wsrep::log::info:
+ sql_print_information("WSREP: %s", msg);
+ break;
+ case wsrep::log::warning:
+ sql_print_warning("WSREP: %s", msg);
+ break;
+ case wsrep::log::error:
sql_print_error("WSREP: %s", msg);
break;
- case WSREP_LOG_DEBUG:
- if (wsrep_debug) sql_print_information ("[Debug] WSREP: %s", msg);
- default:
- break;
+ case wsrep::log::debug:
+ if (wsrep_debug) sql_print_information ("[Debug] WSREP: %s", msg);
+ default:
+ break;
+ }
}
}
-void wsrep_log(void (*fun)(const char *, ...), const char *format, ...)
-{
- va_list args;
- char msg[1024];
- va_start(args, format);
- vsnprintf(msg, sizeof(msg) - 1, format, args);
- va_end(args);
- (fun)("WSREP: %s", msg);
-}
-
-
-static void wsrep_log_states (wsrep_log_level_t const level,
- const wsrep_uuid_t* const group_uuid,
- wsrep_seqno_t const group_seqno,
- const wsrep_uuid_t* const node_uuid,
- wsrep_seqno_t const node_seqno)
-{
- char uuid_str[37];
- char msg[256];
-
- wsrep_uuid_print (group_uuid, uuid_str, sizeof(uuid_str));
- snprintf (msg, 255, "WSREP: Group state: %s:%lld",
- uuid_str, (long long)group_seqno);
- wsrep_log_cb (level, msg);
-
- wsrep_uuid_print (node_uuid, uuid_str, sizeof(uuid_str));
- snprintf (msg, 255, "WSREP: Local state: %s:%lld",
- uuid_str, (long long)node_seqno);
- wsrep_log_cb (level, msg);
-}
-
-#ifdef GTID_SUPPORT
-void wsrep_init_sidno(const wsrep_uuid_t& wsrep_uuid)
+void wsrep_init_sidno(const wsrep::id& uuid)
{
- /* generate new Sid map entry from inverted uuid */
- rpl_sid sid;
- wsrep_uuid_t ltid_uuid;
-
- for (size_t i= 0; i < sizeof(ltid_uuid.data); ++i)
+ /*
+ Protocol versions starting from 4 use group gtid as it is.
+ For lesser protocol versions generate new Sid map entry from inverted
+ uuid.
+ */
+ rpl_gtid sid;
+ if (wsrep_protocol_version >= 4)
{
- ltid_uuid.data[i] = ~wsrep_uuid.data[i];
+ memcpy((void*)&sid, (const uchar*)uuid.data(),16);
}
-
- sid.copy_from(ltid_uuid.data);
+ else
+ {
+ wsrep_uuid_t ltid_uuid;
+ for (size_t i= 0; i < sizeof(ltid_uuid.data); ++i)
+ {
+ ltid_uuid.data[i]= ~((const uchar*)uuid.data())[i];
+ }
+ memcpy((void*)&sid, (const uchar*)ltid_uuid.data,16);
+ }
+#ifdef GTID_SUPPORT
global_sid_lock->wrlock();
wsrep_sidno= global_sid_map->add_sid(sid);
WSREP_INFO("Initialized wsrep sidno %d", wsrep_sidno);
global_sid_lock->unlock();
+#endif
}
-#endif /* GTID_SUPPORT */
-static wsrep_cb_status_t
-wsrep_view_handler_cb (void* app_ctx,
- void* recv_ctx,
- const wsrep_view_info_t* view,
- const char* state,
- size_t state_len,
- void** sst_req,
- size_t* sst_req_len)
+void wsrep_init_schema()
{
- *sst_req = NULL;
- *sst_req_len = 0;
+ DBUG_ASSERT(!wsrep_schema);
- wsrep_member_status_t memb_status= wsrep_config_state->get_status();
-
- if (memcmp(&cluster_uuid, &view->state_id.uuid, sizeof(wsrep_uuid_t)))
+ WSREP_INFO("wsrep_init_schema_and_SR %p", wsrep_schema);
+ if (!wsrep_schema)
{
- memcpy(&cluster_uuid, &view->state_id.uuid, sizeof(cluster_uuid));
-
- wsrep_uuid_print (&cluster_uuid, cluster_uuid_str,
- sizeof(cluster_uuid_str));
- }
-
- wsrep_cluster_conf_id= view->view;
- wsrep_cluster_status= cluster_status_str[view->status];
- wsrep_cluster_size= view->memb_num;
- wsrep_local_index= view->my_idx;
-
- WSREP_INFO("New cluster view: global state: %s:%lld, view# %lld: %s, "
- "number of nodes: %ld, my index: %ld, protocol version %d",
- wsrep_cluster_state_uuid, (long long)view->state_id.seqno,
- (long long)wsrep_cluster_conf_id, wsrep_cluster_status,
- wsrep_cluster_size, wsrep_local_index, view->proto_ver);
-
- /* Proceed further only if view is PRIMARY */
- if (WSREP_VIEW_PRIMARY != view->status)
- {
-#ifdef HAVE_QUERY_CACHE
- // query cache must be initialised by now
- query_cache.flush();
-#endif /* HAVE_QUERY_CACHE */
-
- wsrep_ready_set(FALSE);
- memb_status= WSREP_MEMBER_UNDEFINED;
- /* Always record local_uuid and local_seqno in non-prim since this
- * may lead to re-initializing provider and start position is
- * determined according to these variables */
- // WRONG! local_uuid should be the last primary configuration uuid we were
- // a member of. local_seqno should be updated in commit calls.
- // local_uuid= cluster_uuid;
- // local_seqno= view->first - 1;
- goto out;
- }
-
- switch (view->proto_ver)
- {
- case 0:
- case 1:
- case 2:
- case 3:
- // version change
- if (view->proto_ver != wsrep_protocol_version)
- {
- my_bool wsrep_ready_saved= wsrep_ready_get();
- wsrep_ready_set(FALSE);
- WSREP_INFO("closing client connections for "
- "protocol change %ld -> %d",
- wsrep_protocol_version, view->proto_ver);
- wsrep_close_client_connections(TRUE);
- wsrep_protocol_version= view->proto_ver;
- wsrep_ready_set(wsrep_ready_saved);
- }
- break;
- default:
- WSREP_ERROR("Unsupported application protocol version: %d",
- view->proto_ver);
- unireg_abort(1);
- }
-
- if (view->state_gap)
- {
- WSREP_WARN("Gap in state sequence. Need state transfer.");
-
- /* After that wsrep will call wsrep_sst_prepare. */
- /* keep ready flag 0 until we receive the snapshot */
- wsrep_ready_set(FALSE);
-
- /* Close client connections to ensure that they don't interfere
- * with SST. Necessary only if storage engines are initialized
- * before SST.
- * TODO: Just killing all ongoing transactions should be enough
- * since wsrep_ready is OFF and no new transactions can start.
- */
- if (!wsrep_before_SE())
+ wsrep_schema= new Wsrep_schema();
+ if (wsrep_schema->init())
{
- WSREP_DEBUG("[debug]: closing client connections for PRIM");
- wsrep_close_client_connections(FALSE);
+ WSREP_ERROR("Failed to init wsrep schema");
+ unireg_abort(1);
}
+ }
+}
- ssize_t const req_len= wsrep_sst_prepare (sst_req);
+void wsrep_deinit_schema()
+{
+ delete wsrep_schema;
+ wsrep_schema= 0;
+}
- if (req_len < 0)
+void wsrep_recover_sr_from_storage(THD *orig_thd)
+{
+ switch (wsrep_SR_store_type)
+ {
+ case WSREP_SR_STORE_TABLE:
+ if (!wsrep_schema)
{
- WSREP_ERROR("SST preparation failed: %zd (%s)", -req_len,
- strerror(-req_len));
- memb_status= WSREP_MEMBER_UNDEFINED;
+ WSREP_ERROR("Wsrep schema not initialized when trying to recover "
+ "streaming transactions");
+ unireg_abort(1);
}
- else
+ if (wsrep_schema->recover_sr_transactions(orig_thd))
{
- assert(sst_req != NULL);
- *sst_req_len= req_len;
- memb_status= WSREP_MEMBER_JOINER;
+ WSREP_ERROR("Failed to recover SR transactions from schema");
+ unireg_abort(1);
}
+ break;
+ default:
+ /* */
+ WSREP_ERROR("Unsupported wsrep SR store type: %lu", wsrep_SR_store_type);
+ unireg_abort(1);
+ break;
}
- else
- {
- /*
- * NOTE: Initialize wsrep_group_uuid here only if it wasn't initialized
- * before - OR - it was reinitilized on startup (lp:992840)
- */
- if (wsrep_startup)
+}
+
+/** Export the WSREP provider's capabilities as a human readable string.
+ * The result is saved in a dynamically allocated string of the form:
+ * :cap1:cap2:cap3:
+ */
+static void wsrep_capabilities_export(wsrep_cap_t const cap, char** str)
+{
+ static const char* names[] =
+ {
+ /* Keep in sync with wsrep/wsrep_api.h WSREP_CAP_* macros. */
+ "MULTI_MASTER",
+ "CERTIFICATION",
+ "PARALLEL_APPLYING",
+ "TRX_REPLAY",
+ "ISOLATION",
+ "PAUSE",
+ "CAUSAL_READS",
+ "CAUSAL_TRX",
+ "INCREMENTAL_WRITESET",
+ "SESSION_LOCKS",
+ "DISTRIBUTED_LOCKS",
+ "CONSISTENCY_CHECK",
+ "UNORDERED",
+ "ANNOTATION",
+ "PREORDERED",
+ "STREAMING",
+ "SNAPSHOT",
+ "NBO",
+ };
+
+ std::string s;
+ for (size_t i= 0; i < sizeof(names) / sizeof(names[0]); ++i)
+ {
+ if (cap & (1ULL << i))
{
- if (wsrep_before_SE())
- {
- wsrep_SE_init_grab();
- // Signal mysqld init thread to continue
- wsrep_sst_complete (&cluster_uuid, view->state_id.seqno, false);
- // and wait for SE initialization
- wsrep_SE_init_wait();
- }
- else
+ if (s.empty())
{
- local_uuid= cluster_uuid;
- local_seqno= view->state_id.seqno;
+ s= ":";
}
- /* Init storage engine XIDs from first view */
- wsrep_set_SE_checkpoint(local_uuid, local_seqno);
-#ifdef GTID_SUPPORT
- wsrep_init_sidno(local_uuid);
-#endif /* GTID_SUPPORT */
- memb_status= WSREP_MEMBER_JOINED;
+ s += names[i];
+ s += ":";
}
-
- // just some sanity check
- if (memcmp (&local_uuid, &cluster_uuid, sizeof (wsrep_uuid_t)))
- {
- WSREP_ERROR("Undetected state gap. Can't continue.");
- wsrep_log_states(WSREP_LOG_FATAL, &cluster_uuid, view->state_id.seqno,
- &local_uuid, -1);
- unireg_abort(1);
- }
- }
-
- if (wsrep_auto_increment_control)
- {
- global_system_variables.auto_increment_offset= view->my_idx + 1;
- global_system_variables.auto_increment_increment= view->memb_num;
}
- { /* capabilities may be updated on new configuration */
- uint64_t const caps(wsrep->capabilities (wsrep));
+ /* A read from the string pointed to by *str may be started at any time,
+ * so it must never point to free(3)d memory or non '\0' terminated string. */
- my_bool const idc((caps & WSREP_CAP_INCREMENTAL_WRITESET) != 0);
- if (TRUE == wsrep_incremental_data_collection && FALSE == idc)
- {
- WSREP_WARN("Unsupported protocol downgrade: "
- "incremental data collection disabled. Expect abort.");
- }
- wsrep_incremental_data_collection = idc;
- }
+ char* const previous= *str;
-out:
- if (view->status == WSREP_VIEW_PRIMARY) wsrep_startup= FALSE;
- wsrep_config_state->set(memb_status, view);
+ *str= strdup(s.c_str());
- return WSREP_CB_SUCCESS;
+ if (previous != NULL)
+ {
+ free(previous);
+ }
}
-my_bool wsrep_ready_set (my_bool x)
+/* Verifies that SE position is consistent with the group position
+ * and initializes other variables */
+void wsrep_verify_SE_checkpoint(const wsrep_uuid_t& uuid,
+ wsrep_seqno_t const seqno)
{
- WSREP_DEBUG("Setting wsrep_ready to %d", x);
- if (mysql_mutex_lock (&LOCK_wsrep_ready)) abort();
- my_bool ret= (wsrep_ready != x);
- if (ret)
- {
- wsrep_ready= x;
- mysql_cond_signal (&COND_wsrep_ready);
- }
- mysql_mutex_unlock (&LOCK_wsrep_ready);
- return ret;
}
+/*
+ Wsrep is considered ready if
+ 1) Provider is not loaded (native mode)
+ 2) Server has reached synced state
+ 3) Server is in joiner mode and mysqldump SST method has been
+ specified
+ See Wsrep_server_service::log_state_change() for further details.
+ */
my_bool wsrep_ready_get (void)
{
if (mysql_mutex_lock (&LOCK_wsrep_ready)) abort();
@@ -500,178 +437,67 @@ int wsrep_show_ready(THD *thd, SHOW_VAR *var, char *buff)
return 0;
}
-// Wait until wsrep has reached ready state
-void wsrep_ready_wait ()
+void wsrep_update_cluster_state_uuid(const char* uuid)
{
- if (mysql_mutex_lock (&LOCK_wsrep_ready)) abort();
- while (!wsrep_ready)
- {
- WSREP_INFO("Waiting to reach ready state");
- mysql_cond_wait (&COND_wsrep_ready, &LOCK_wsrep_ready);
- }
- WSREP_INFO("ready state reached");
- mysql_mutex_unlock (&LOCK_wsrep_ready);
+ strncpy(cluster_uuid_str, uuid, sizeof(cluster_uuid_str) - 1);
}
-static void wsrep_synced_cb(void* app_ctx)
+static void wsrep_init_position()
{
- WSREP_INFO("Synchronized with group, ready for connections");
- my_bool signal_main= wsrep_ready_set(TRUE);
- wsrep_config_state->set(WSREP_MEMBER_SYNCED);
-
- if (signal_main)
- {
- wsrep_SE_init_grab();
- // Signal mysqld init thread to continue
- wsrep_sst_complete (&local_uuid, local_seqno, false);
- // and wait for SE initialization
- wsrep_SE_init_wait();
- }
- if (wsrep_restart_slave_activated)
- {
- int rcode;
- WSREP_INFO("MariaDB slave restart");
- wsrep_restart_slave_activated= FALSE;
-
- mysql_mutex_lock(&LOCK_active_mi);
- if ((rcode = start_slave_threads(0,
- 1 /* need mutex */,
- 0 /* no wait for start*/,
- active_mi,
- master_info_file,
- relay_log_info_file,
- SLAVE_SQL)))
- {
- WSREP_WARN("Failed to create slave threads: %d", rcode);
- }
- mysql_mutex_unlock(&LOCK_active_mi);
-
- }
}
-static void wsrep_init_position()
+/****************************************************************************
+ Helpers for wsrep_init()
+ ****************************************************************************/
+static std::string wsrep_server_name()
{
- /* read XIDs from storage engines */
- wsrep_uuid_t uuid;
- wsrep_seqno_t seqno;
- wsrep_get_SE_checkpoint(uuid, seqno);
-
- if (!memcmp(&uuid, &WSREP_UUID_UNDEFINED, sizeof(wsrep_uuid_t)))
- {
- WSREP_INFO("Read nil XID from storage engines, skipping position init");
- return;
- }
-
- char uuid_str[40] = {0, };
- wsrep_uuid_print(&uuid, uuid_str, sizeof(uuid_str));
- WSREP_INFO("Initial position: %s:%lld", uuid_str, (long long)seqno);
-
- if (!memcmp(&local_uuid, &WSREP_UUID_UNDEFINED, sizeof(local_uuid)) &&
- local_seqno == WSREP_SEQNO_UNDEFINED)
- {
- // Initial state
- local_uuid= uuid;
- local_seqno= seqno;
- }
- else if (memcmp(&local_uuid, &uuid, sizeof(local_uuid)) ||
- local_seqno != seqno)
- {
- WSREP_WARN("Initial position was provided by configuration or SST, "
- "avoiding override");
- }
+ std::string ret(wsrep_node_name ? wsrep_node_name : "");
+ return ret;
}
-extern char* my_bind_addr_str;
-
-int wsrep_init()
+static std::string wsrep_server_id()
{
- int rcode= -1;
- DBUG_ASSERT(wsrep_inited == 0);
-
- if (strcmp(wsrep_start_position, WSREP_START_POSITION_ZERO) &&
- wsrep_start_position_init(wsrep_start_position))
- {
- return 1;
- }
-
- wsrep_sst_auth_init();
-
- wsrep_ready_set(FALSE);
- assert(wsrep_provider);
-
- wsrep_init_position();
-
- if ((rcode= wsrep_load(wsrep_provider, &wsrep, wsrep_log_cb)) != WSREP_OK)
- {
- if (strcasecmp(wsrep_provider, WSREP_NONE))
- {
- WSREP_ERROR("wsrep_load(%s) failed: %s (%d). Reverting to no provider.",
- wsrep_provider, strerror(rcode), rcode);
- strcpy((char*)wsrep_provider, WSREP_NONE); // damn it's a dirty hack
- return wsrep_init();
- }
- else /* this is for recursive call above */
- {
- WSREP_ERROR("Could not revert to no provider: %s (%d). Need to abort.",
- strerror(rcode), rcode);
- unireg_abort(1);
- }
- }
+ /* using empty server_id, which enables view change handler to
+ set final server_id later on
+ */
+ std::string ret("");
+ return ret;
+}
- if (!WSREP_PROVIDER_EXISTS)
- {
- // enable normal operation in case no provider is specified
- wsrep_ready_set(TRUE);
- wsrep_inited= 1;
- global_system_variables.wsrep_on = 0;
- wsrep_init_args args;
- args.logger_cb = wsrep_log_cb;
- args.options = (wsrep_provider_options) ?
- wsrep_provider_options : "";
- rcode = wsrep->init(wsrep, &args);
- if (rcode)
- {
- DBUG_PRINT("wsrep",("wsrep::init() failed: %d", rcode));
- WSREP_ERROR("wsrep::init() failed: %d, must shutdown", rcode);
- wsrep->free(wsrep);
- free(wsrep);
- wsrep = NULL;
- }
- return rcode;
- }
- else
- {
- global_system_variables.wsrep_on = 1;
- strncpy(provider_name,
- wsrep->provider_name, sizeof(provider_name) - 1);
- strncpy(provider_version,
- wsrep->provider_version, sizeof(provider_version) - 1);
- strncpy(provider_vendor,
- wsrep->provider_vendor, sizeof(provider_vendor) - 1);
- }
+static std::string wsrep_server_node_address()
+{
+ std::string ret;
if (!wsrep_data_home_dir || strlen(wsrep_data_home_dir) == 0)
- wsrep_data_home_dir = mysql_real_data_home;
+ wsrep_data_home_dir= mysql_real_data_home;
/* Initialize node address */
- char node_addr[512]= { 0, };
- size_t const node_addr_max= sizeof(node_addr) - 1;
if (!wsrep_node_address || !strcmp(wsrep_node_address, ""))
{
- size_t const ret= wsrep_guess_ip(node_addr, node_addr_max);
- if (!(ret > 0 && ret < node_addr_max))
+ char node_addr[512]= {0, };
+ const size_t node_addr_max= sizeof(node_addr) - 1;
+ size_t guess_ip_ret= wsrep_guess_ip(node_addr, node_addr_max);
+ if (!(guess_ip_ret > 0 && guess_ip_ret < node_addr_max))
{
WSREP_WARN("Failed to guess base node address. Set it explicitly via "
"wsrep_node_address.");
- node_addr[0]= '\0';
+ }
+ else
+ {
+ ret= node_addr;
}
}
else
{
- strncpy(node_addr, wsrep_node_address, node_addr_max);
+ ret= wsrep_node_address;
}
+ return ret;
+}
- /* Initialize node's incoming address */
+static std::string wsrep_server_incoming_address()
+{
+ std::string ret;
+ const std::string node_addr(wsrep_server_node_address());
char inc_addr[512]= { 0, };
size_t const inc_addr_max= sizeof (inc_addr);
@@ -686,7 +512,8 @@ int wsrep_init()
bool is_ipv6= false;
unsigned int my_bind_ip= INADDR_ANY; // default if not set
- if (my_bind_addr_str && strlen(my_bind_addr_str))
+ if (my_bind_addr_str && strlen(my_bind_addr_str) &&
+ strcmp(my_bind_addr_str, "*") != 0)
{
my_bind_ip= wsrep_check_ip(my_bind_addr_str, &is_ipv6);
}
@@ -705,22 +532,28 @@ int wsrep_init()
}
else /* mysqld binds to 0.0.0.0, try taking IP from wsrep_node_address. */
{
- size_t const node_addr_len= strlen(node_addr);
- if (node_addr_len > 0)
+ if (node_addr.size())
{
- wsp::Address addr(node_addr);
-
- if (!addr.is_valid())
+ size_t const ip_len= wsrep_host_len(node_addr.c_str(), node_addr.size());
+ if (ip_len + 7 /* :55555\0 */ < inc_addr_max)
+ {
+ memcpy (inc_addr, node_addr.c_str(), ip_len);
+ snprintf(inc_addr + ip_len, inc_addr_max - ip_len, ":%u",
+ (int)mysqld_port);
+ }
+ else
{
- WSREP_DEBUG("Could not parse node address : %s", node_addr);
- WSREP_WARN("Guessing address for incoming client connections failed. "
- "Try setting wsrep_node_incoming_address explicitly.");
- goto done;
+ WSREP_WARN("Guessing address for incoming client connections: "
+ "address too long.");
+ inc_addr[0]= '\0';
}
+ }
- const char *fmt= (addr.is_ipv6()) ? "[%s]:%u" : "%s:%u";
- snprintf(inc_addr, inc_addr_max, fmt, addr.get_address(),
- (int) mysqld_port);
+ if (!strlen(inc_addr))
+ {
+ WSREP_WARN("Guessing address for incoming client connections failed. "
+ "Try setting wsrep_node_incoming_address explicitly.");
+ WSREP_INFO("Node addr: %s", node_addr.c_str());
}
}
}
@@ -744,52 +577,179 @@ int wsrep_init()
snprintf(inc_addr, inc_addr_max, fmt, addr.get_address(), port);
}
+
+ done:
+ ret= wsrep_node_incoming_address;
+ return ret;
+}
-done:
- struct wsrep_init_args wsrep_args;
+static std::string wsrep_server_working_dir()
+{
+ std::string ret;
+ if (!wsrep_data_home_dir || strlen(wsrep_data_home_dir) == 0)
+ {
+ ret= mysql_real_data_home;
+ }
+ else
+ {
+ ret= wsrep_data_home_dir;
+ }
+ return ret;
+}
- struct wsrep_gtid const state_id = { local_uuid, local_seqno };
+static wsrep::gtid wsrep_server_initial_position()
+{
+ wsrep::gtid ret;
+ WSREP_DEBUG("Server initial position: %s", wsrep_start_position);
+ std::istringstream is(wsrep_start_position);
+ is >> ret;
+ return ret;
+}
- wsrep_args.data_dir = wsrep_data_home_dir;
- wsrep_args.node_name = (wsrep_node_name) ? wsrep_node_name : "";
- wsrep_args.node_address = node_addr;
- wsrep_args.node_incoming = inc_addr;
- wsrep_args.options = (wsrep_provider_options) ?
- wsrep_provider_options : "";
- wsrep_args.proto_ver = wsrep_max_protocol_version;
+/*
+ Intitialize provider specific status variables
+ */
+static void wsrep_init_provider_status_variables()
+{
+ const wsrep::provider& provider=
+ Wsrep_server_state::instance().provider();
+ strncpy(provider_name,
+ provider.name().c_str(), sizeof(provider_name) - 1);
+ strncpy(provider_version,
+ provider.version().c_str(), sizeof(provider_version) - 1);
+ strncpy(provider_vendor,
+ provider.vendor().c_str(), sizeof(provider_vendor) - 1);
+}
+
+int wsrep_init_server()
+{
+ wsrep::log::logger_fn(wsrep_log_cb);
+ try
+ {
+ std::string server_name;
+ std::string server_id;
+ std::string node_address;
+ std::string incoming_address;
+ std::string working_dir;
+ wsrep::gtid initial_position;
+
+ server_name= wsrep_server_name();
+ server_id= wsrep_server_id();
+ node_address= wsrep_server_node_address();
+ incoming_address= wsrep_server_incoming_address();
+ working_dir= wsrep_server_working_dir();
+ initial_position= wsrep_server_initial_position();
+
+ Wsrep_server_state::init_once(server_name,
+ incoming_address,
+ node_address,
+ working_dir,
+ initial_position,
+ wsrep_max_protocol_version);
+ Wsrep_server_state::instance().debug_log_level(wsrep_debug);
+ }
+ catch (const wsrep::runtime_error& e)
+ {
+ WSREP_ERROR("Failed to init wsrep server %s", e.what());
+ return 1;
+ }
+ catch (const std::exception& e)
+ {
+ WSREP_ERROR("Failed to init wsrep server %s", e.what());
+ }
+ return 0;
+}
- wsrep_args.state_id = &state_id;
+void wsrep_init_globals()
+{
+ wsrep_init_sidno(Wsrep_server_state::instance().connected_gtid().id());
+ wsrep_init_schema();
+ if (WSREP_ON)
+ {
+ Wsrep_server_state::instance().initialized();
+ }
+}
+
+void wsrep_deinit_server()
+{
+ wsrep_deinit_schema();
+ Wsrep_server_state::destroy();
+}
+
+int wsrep_init()
+{
+ assert(wsrep_provider);
+
+ wsrep_init_position();
+ wsrep_sst_auth_init();
+
+ if (strlen(wsrep_provider)== 0 ||
+ !strcmp(wsrep_provider, WSREP_NONE))
+ {
+ // enable normal operation in case no provider is specified
+ global_system_variables.wsrep_on= 0;
+ int err= Wsrep_server_state::instance().load_provider(wsrep_provider, wsrep_provider_options ? wsrep_provider_options : "");
+ if (err)
+ {
+ DBUG_PRINT("wsrep",("wsrep::init() failed: %d", err));
+ WSREP_ERROR("wsrep::init() failed: %d, must shutdown", err);
+ }
+ else
+ {
+ wsrep_init_provider_status_variables();
+ }
+ return err;
+ }
- wsrep_args.logger_cb = wsrep_log_cb;
- wsrep_args.view_handler_cb = wsrep_view_handler_cb;
- wsrep_args.apply_cb = wsrep_apply_cb;
- wsrep_args.commit_cb = wsrep_commit_cb;
- wsrep_args.unordered_cb = wsrep_unordered_cb;
- wsrep_args.sst_donate_cb = wsrep_sst_donate_cb;
- wsrep_args.synced_cb = wsrep_synced_cb;
+ global_system_variables.wsrep_on= 1;
- rcode = wsrep->init(wsrep, &wsrep_args);
+ if (wsrep_gtid_mode && opt_bin_log && !opt_log_slave_updates)
+ {
+ WSREP_ERROR("Option --log-slave-updates is required if "
+ "binlog is enabled, GTID mode is on and wsrep provider "
+ "is specified");
+ return 1;
+ }
- if (rcode)
+ if (!wsrep_data_home_dir || strlen(wsrep_data_home_dir) == 0)
+ wsrep_data_home_dir= mysql_real_data_home;
+
+ if (Wsrep_server_state::instance().load_provider(wsrep_provider,
+ wsrep_provider_options))
{
- DBUG_PRINT("wsrep",("wsrep::init() failed: %d", rcode));
- WSREP_ERROR("wsrep::init() failed: %d, must shutdown", rcode);
- wsrep->free(wsrep);
- free(wsrep);
- wsrep = NULL;
- } else {
- wsrep_inited= 1;
+ WSREP_ERROR("Failed to load provider");
+ return 1;
}
- return rcode;
-}
+ if (!wsrep_provider_is_SR_capable() &&
+ global_system_variables.wsrep_trx_fragment_size > 0)
+ {
+ WSREP_ERROR("The WSREP provider (%s) does not support streaming "
+ "replication but wsrep_trx_fragment_size is set to a "
+ "value other than 0 (%llu). Cannot continue. Either set "
+ "wsrep_trx_fragment_size to 0 or use wsrep_provider that "
+ "supports streaming replication.",
+ wsrep_provider, global_system_variables.wsrep_trx_fragment_size);
+ Wsrep_server_state::instance().unload_provider();
+ return 1;
+ }
+ wsrep_inited= 1;
+
+ wsrep_init_provider_status_variables();
+ wsrep_capabilities_export(Wsrep_server_state::instance().provider().capabilities(),
+ &wsrep_provider_capabilities);
+ WSREP_DEBUG("SR storage init for: %s",
+ (wsrep_SR_store_type == WSREP_SR_STORE_TABLE) ? "table" : "void");
+
+ return 0;
+}
/* Initialize wsrep thread LOCKs and CONDs */
void wsrep_thr_init()
{
DBUG_ENTER("wsrep_thr_init");
- wsrep_config_state = new wsp::Config_state;
+ wsrep_config_state= new wsp::Config_state;
#ifdef HAVE_PSI_INTERFACE
mysql_mutex_register("sql", wsrep_mutexes, array_elements(wsrep_mutexes));
mysql_cond_register("sql", wsrep_conds, array_elements(wsrep_conds));
@@ -802,25 +762,27 @@ void wsrep_thr_init()
mysql_cond_init(key_COND_wsrep_sst, &COND_wsrep_sst, NULL);
mysql_mutex_init(key_LOCK_wsrep_sst_init, &LOCK_wsrep_sst_init, MY_MUTEX_INIT_FAST);
mysql_cond_init(key_COND_wsrep_sst_init, &COND_wsrep_sst_init, NULL);
- mysql_mutex_init(key_LOCK_wsrep_rollback, &LOCK_wsrep_rollback, MY_MUTEX_INIT_FAST);
- mysql_cond_init(key_COND_wsrep_rollback, &COND_wsrep_rollback, NULL);
mysql_mutex_init(key_LOCK_wsrep_replaying, &LOCK_wsrep_replaying, MY_MUTEX_INIT_FAST);
mysql_cond_init(key_COND_wsrep_replaying, &COND_wsrep_replaying, NULL);
mysql_mutex_init(key_LOCK_wsrep_slave_threads, &LOCK_wsrep_slave_threads, MY_MUTEX_INIT_FAST);
+ mysql_cond_init(key_COND_wsrep_slave_threads, &COND_wsrep_slave_threads, NULL);
+ mysql_mutex_init(key_LOCK_wsrep_cluster_config, &LOCK_wsrep_cluster_config, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_wsrep_desync, &LOCK_wsrep_desync, MY_MUTEX_INIT_FAST);
mysql_mutex_init(key_LOCK_wsrep_config_state, &LOCK_wsrep_config_state, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_wsrep_group_commit, &LOCK_wsrep_group_commit, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_wsrep_SR_pool,
+ &LOCK_wsrep_SR_pool, MY_MUTEX_INIT_FAST);
+ mysql_mutex_init(key_LOCK_wsrep_SR_store,
+ &LOCK_wsrep_SR_store, MY_MUTEX_INIT_FAST);
DBUG_VOID_RETURN;
}
-void wsrep_init_startup (bool first)
+void wsrep_init_startup (bool sst_first)
{
if (wsrep_init()) unireg_abort(1);
- wsrep_thr_lock_init(
- (wsrep_thd_is_brute_force_fun)wsrep_thd_is_BF,
- (wsrep_abort_thd_fun)wsrep_abort_thd,
- wsrep_debug, wsrep_convert_LOCK_to_trx,
- (wsrep_on_fun)wsrep_on);
+ wsrep_thr_lock_init(wsrep_thd_is_BF, wsrep_thd_bf_abort,
+ wsrep_debug, wsrep_convert_LOCK_to_trx, wsrep_on);
/*
Pre-initialize global_system_variables.table_plugin with a dummy engine
@@ -839,28 +801,54 @@ void wsrep_init_startup (bool first)
/* Skip replication start if no cluster address */
if (!wsrep_cluster_address || wsrep_cluster_address[0] == 0) return;
- if (first) wsrep_sst_grab(); // do it so we can wait for SST below
-
+ /*
+ Read value of wsrep_new_cluster before wsrep_start_replication(),
+ the value is reset to FALSE inside wsrep_start_replication.
+ */
if (!wsrep_start_replication()) unireg_abort(1);
wsrep_create_rollbacker();
wsrep_create_appliers(1);
- if (first && !wsrep_sst_wait()) unireg_abort(1);// wait until SST is completed
+ Wsrep_server_state& server_state= Wsrep_server_state::instance();
+ /*
+ If the SST happens before server initialization, wait until the server
+ state reaches initializing. This indicates that
+ either SST was not necessary or SST has been delivered.
+
+ With mysqldump SST (!sst_first) wait until the server reaches
+ joiner state and procedd to accepting connections.
+ */
+ if (sst_first)
+ {
+ server_state.wait_until_state(Wsrep_server_state::s_initializing);
+ }
+ else
+ {
+ server_state.wait_until_state(Wsrep_server_state::s_joiner);
+ }
}
void wsrep_deinit(bool free_options)
{
DBUG_ASSERT(wsrep_inited == 1);
- wsrep_unload(wsrep);
- wsrep= 0;
+ WSREP_DEBUG("wsrep_deinit");
+
+ Wsrep_server_state::instance().unload_provider();
provider_name[0]= '\0';
provider_version[0]= '\0';
provider_vendor[0]= '\0';
wsrep_inited= 0;
+ if (wsrep_provider_capabilities != NULL)
+ {
+ char* p= wsrep_provider_capabilities;
+ wsrep_provider_capabilities= NULL;
+ free(p);
+ }
+
if (free_options)
{
wsrep_sst_auth_free();
@@ -872,28 +860,40 @@ void wsrep_thr_deinit()
{
if (!wsrep_config_state)
return; // Never initialized
+ WSREP_DEBUG("wsrep_thr_deinit");
mysql_mutex_destroy(&LOCK_wsrep_ready);
mysql_cond_destroy(&COND_wsrep_ready);
mysql_mutex_destroy(&LOCK_wsrep_sst);
mysql_cond_destroy(&COND_wsrep_sst);
mysql_mutex_destroy(&LOCK_wsrep_sst_init);
mysql_cond_destroy(&COND_wsrep_sst_init);
- mysql_mutex_destroy(&LOCK_wsrep_rollback);
- mysql_cond_destroy(&COND_wsrep_rollback);
mysql_mutex_destroy(&LOCK_wsrep_replaying);
mysql_cond_destroy(&COND_wsrep_replaying);
mysql_mutex_destroy(&LOCK_wsrep_slave_threads);
+ mysql_cond_destroy(&COND_wsrep_slave_threads);
+ mysql_mutex_destroy(&LOCK_wsrep_cluster_config);
mysql_mutex_destroy(&LOCK_wsrep_desync);
mysql_mutex_destroy(&LOCK_wsrep_config_state);
+ mysql_mutex_destroy(&LOCK_wsrep_group_commit);
+ mysql_mutex_destroy(&LOCK_wsrep_SR_pool);
+ mysql_mutex_destroy(&LOCK_wsrep_SR_store);
+
delete wsrep_config_state;
wsrep_config_state= 0; // Safety
+
+ if (wsrep_cluster_capabilities != NULL)
+ {
+ char* p= wsrep_cluster_capabilities;
+ wsrep_cluster_capabilities= NULL;
+ free(p);
+ }
}
void wsrep_recover()
{
char uuid_str[40];
- if (!memcmp(&local_uuid, &WSREP_UUID_UNDEFINED, sizeof(wsrep_uuid_t)) &&
+ if (wsrep_uuid_compare(&local_uuid, &WSREP_UUID_UNDEFINED) == 0 &&
local_seqno == -2)
{
wsrep_uuid_print(&local_uuid, uuid_str, sizeof(uuid_str));
@@ -901,43 +901,60 @@ void wsrep_recover()
uuid_str, (long long)local_seqno);
return;
}
- wsrep_uuid_t uuid;
- wsrep_seqno_t seqno;
- wsrep_get_SE_checkpoint(uuid, seqno);
- wsrep_uuid_print(&uuid, uuid_str, sizeof(uuid_str));
- WSREP_INFO("Recovered position: %s:%lld", uuid_str, (long long)seqno);
+ wsrep::gtid gtid= wsrep_get_SE_checkpoint();
+ std::ostringstream oss;
+ oss << gtid;
+ WSREP_INFO("Recovered position: %s", oss.str().c_str());
}
void wsrep_stop_replication(THD *thd)
{
- WSREP_INFO("Stop replication");
- if (!wsrep)
+ WSREP_INFO("Stop replication by %llu", (thd) ? thd->thread_id : 0);
+ if (Wsrep_server_state::instance().state() !=
+ Wsrep_server_state::s_disconnected)
{
- WSREP_INFO("Provider was not loaded, in stop replication");
- return;
+ WSREP_DEBUG("Disconnect provider");
+ Wsrep_server_state::instance().disconnect();
+ Wsrep_server_state::instance().wait_until_state(Wsrep_server_state::s_disconnected);
}
- /* disconnect from group first to get wsrep_ready == FALSE */
- WSREP_DEBUG("Provider disconnect");
- wsrep->disconnect(wsrep);
+ /* my connection, should not terminate with wsrep_close_client_connection(),
+ make transaction to rollback
+ */
+ if (thd && !thd->wsrep_applier) trans_rollback(thd);
+ wsrep_close_client_connections(TRUE, thd);
+
+ /* wait until appliers have stopped */
+ wsrep_wait_appliers_close(thd);
+
+ node_uuid= WSREP_UUID_UNDEFINED;
+}
- wsrep_connected= FALSE;
+void wsrep_shutdown_replication()
+{
+ WSREP_INFO("Shutdown replication");
+ if (Wsrep_server_state::instance().state() != wsrep::server_state::s_disconnected)
+ {
+ WSREP_DEBUG("Disconnect provider");
+ Wsrep_server_state::instance().disconnect();
+ Wsrep_server_state::instance().wait_until_state(Wsrep_server_state::s_disconnected);
+ }
wsrep_close_client_connections(TRUE);
/* wait until appliers have stopped */
- wsrep_wait_appliers_close(thd);
+ wsrep_wait_appliers_close(NULL);
+ node_uuid= WSREP_UUID_UNDEFINED;
- return;
+ /* Undocking the thread specific data. */
+ my_pthread_setspecific_ptr(THR_THD, NULL);
}
bool wsrep_start_replication()
{
- wsrep_status_t rcode;
-
- /* wsrep provider must be loaded. */
- DBUG_ASSERT(wsrep);
+ int rcode;
+ WSREP_DEBUG("wsrep_start_replication");
/*
if provider is trivial, don't even try to connect,
@@ -946,34 +963,28 @@ bool wsrep_start_replication()
if (!WSREP_PROVIDER_EXISTS)
{
// enable normal operation in case no provider is specified
- wsrep_ready_set(TRUE);
return true;
}
if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0)
{
// if provider is non-trivial, but no address is specified, wait for address
- wsrep_ready_set(FALSE);
+ WSREP_DEBUG("wsrep_start_replication exit due to empty address");
return true;
}
- bool const bootstrap= wsrep_new_cluster;
+ bool const bootstrap(TRUE == wsrep_new_cluster);
+ wsrep_new_cluster= FALSE;
WSREP_INFO("Start replication");
- if (wsrep_new_cluster)
- {
- WSREP_INFO("'wsrep-new-cluster' option used, bootstrapping the cluster");
- wsrep_new_cluster= false;
- }
-
- if ((rcode = wsrep->connect(wsrep,
- wsrep_cluster_name,
- wsrep_cluster_address,
- wsrep_sst_donor,
- bootstrap)))
+ if ((rcode= Wsrep_server_state::instance().connect(
+ wsrep_cluster_name,
+ wsrep_cluster_address,
+ wsrep_sst_donor,
+ bootstrap)))
{
- DBUG_PRINT("wsrep",("wsrep->connect(%s) failed: %d",
+ DBUG_PRINT("wsrep",("wsrep_ptr->connect(%s) failed: %d",
wsrep_cluster_address, rcode));
WSREP_ERROR("wsrep::connect(%s) failed: %d",
wsrep_cluster_address, rcode);
@@ -981,15 +992,12 @@ bool wsrep_start_replication()
}
else
{
- wsrep_connected= TRUE;
-
- char* opts= wsrep->options_get(wsrep);
- if (opts)
+ try
{
- wsrep_provider_options_init(opts);
- free(opts);
+ std::string opts= Wsrep_server_state::instance().provider().options();
+ wsrep_provider_options_init(opts.c_str());
}
- else
+ catch (const wsrep::runtime_error&)
{
WSREP_WARN("Failed to get wsrep options");
}
@@ -1000,40 +1008,50 @@ bool wsrep_start_replication()
bool wsrep_must_sync_wait (THD* thd, uint mask)
{
- return (thd->variables.wsrep_sync_wait & mask) &&
+ bool ret;
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ ret= (thd->variables.wsrep_sync_wait & mask) &&
+ thd->wsrep_client_thread &&
thd->variables.wsrep_on &&
!(thd->variables.wsrep_dirty_reads &&
!is_update_query(thd->lex->sql_command)) &&
!thd->in_active_multi_stmt_transaction() &&
- thd->wsrep_conflict_state != REPLAYING &&
- thd->wsrep_sync_wait_gtid.seqno == WSREP_SEQNO_UNDEFINED;
+ thd->wsrep_trx().state() !=
+ wsrep::transaction::s_replaying &&
+ thd->wsrep_cs().sync_wait_gtid().is_undefined();
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ return ret;
}
bool wsrep_sync_wait (THD* thd, uint mask)
{
if (wsrep_must_sync_wait(thd, mask))
{
- WSREP_DEBUG("wsrep_sync_wait: thd->variables.wsrep_sync_wait = %u, mask = %u",
- thd->variables.wsrep_sync_wait, mask);
- // This allows autocommit SELECTs and a first SELECT after SET AUTOCOMMIT=0
- // TODO: modify to check if thd has locked any rows.
- wsrep_status_t ret= wsrep->causal_read (wsrep, &thd->wsrep_sync_wait_gtid);
-
- if (unlikely(WSREP_OK != ret))
+ WSREP_DEBUG("wsrep_sync_wait: thd->variables.wsrep_sync_wait= %u, "
+ "mask= %u, thd->variables.wsrep_on= %d",
+ thd->variables.wsrep_sync_wait, mask,
+ thd->variables.wsrep_on);
+ /*
+ This allows autocommit SELECTs and a first SELECT after SET AUTOCOMMIT=0
+ TODO: modify to check if thd has locked any rows.
+ */
+ if (thd->wsrep_cs().sync_wait(-1))
{
const char* msg;
int err;
- // Possibly relevant error codes:
- // ER_CHECKREAD, ER_ERROR_ON_READ, ER_INVALID_DEFAULT, ER_EMPTY_QUERY,
- // ER_FUNCTION_NOT_DEFINED, ER_NOT_ALLOWED_COMMAND, ER_NOT_SUPPORTED_YET,
- // ER_FEATURE_DISABLED, ER_QUERY_INTERRUPTED
+ /*
+ Possibly relevant error codes:
+ ER_CHECKREAD, ER_ERROR_ON_READ, ER_INVALID_DEFAULT, ER_EMPTY_QUERY,
+ ER_FUNCTION_NOT_DEFINED, ER_NOT_ALLOWED_COMMAND, ER_NOT_SUPPORTED_YET,
+ ER_FEATURE_DISABLED, ER_QUERY_INTERRUPTED
+ */
- switch (ret)
+ switch (thd->wsrep_cs().current_error())
{
- case WSREP_NOT_IMPLEMENTED:
+ case wsrep::e_not_supported_error:
msg= "synchronous reads by wsrep backend. "
- "Please unset wsrep_causal_reads variable.";
+ "Please unset wsrep_causal_reads variable.";
err= ER_NOT_SUPPORTED_YET;
break;
default:
@@ -1051,6 +1069,27 @@ bool wsrep_sync_wait (THD* thd, uint mask)
return false;
}
+enum wsrep::provider::status
+wsrep_sync_wait_upto (THD* thd,
+ wsrep_gtid_t* upto,
+ int timeout)
+{
+ DBUG_ASSERT(upto);
+ enum wsrep::provider::status ret;
+ if (upto)
+ {
+ wsrep::gtid upto_gtid(wsrep::id(upto->uuid.data, sizeof(upto->uuid.data)),
+ wsrep::seqno(upto->seqno));
+ ret= Wsrep_server_state::instance().wait_for_gtid(upto_gtid, timeout);
+ }
+ else
+ {
+ ret= Wsrep_server_state::instance().causal_read(timeout).second;
+ }
+ WSREP_DEBUG("wsrep_sync_wait_upto: %d", ret);
+ return ret;
+}
+
void wsrep_keys_free(wsrep_key_arr_t* key_arr)
{
for (size_t i= 0; i < key_arr->keys_len; ++i)
@@ -1062,7 +1101,6 @@ void wsrep_keys_free(wsrep_key_arr_t* key_arr)
key_arr->keys_len= 0;
}
-
/*!
* @param db Database string
* @param table Table string
@@ -1074,9 +1112,9 @@ void wsrep_keys_free(wsrep_key_arr_t* key_arr)
*/
static bool wsrep_prepare_key_for_isolation(const char* db,
- const char* table,
- wsrep_buf_t* key,
- size_t* key_len)
+ const char* table,
+ wsrep_buf_t* key,
+ size_t* key_len)
{
if (*key_len < 2) return false;
@@ -1088,11 +1126,11 @@ static bool wsrep_prepare_key_for_isolation(const char* db,
case 1:
case 2:
case 3:
+ case 4:
{
*key_len= 0;
if (db)
{
- // sql_print_information("%s.%s", db, table);
key[*key_len].ptr= db;
key[*key_len].len= strlen(db);
++(*key_len);
@@ -1106,26 +1144,23 @@ static bool wsrep_prepare_key_for_isolation(const char* db,
break;
}
default:
+ assert(0);
+ WSREP_ERROR("Unsupported protocol version: %ld", wsrep_protocol_version);
+ unireg_abort(1);
return false;
}
- return true;
-}
+ return true;
+}
static bool wsrep_prepare_key_for_isolation(const char* db,
const char* table,
wsrep_key_arr_t* ka)
{
wsrep_key_t* tmp;
-
- if (!ka->keys)
- tmp= (wsrep_key_t*)my_malloc((ka->keys_len + 1) * sizeof(wsrep_key_t),
- MYF(0));
- else
- tmp= (wsrep_key_t*)my_realloc(ka->keys,
- (ka->keys_len + 1) * sizeof(wsrep_key_t),
- MYF(0));
-
+ tmp= (wsrep_key_t*)my_realloc(ka->keys,
+ (ka->keys_len + 1) * sizeof(wsrep_key_t),
+ MYF(MY_ALLOW_ZERO_PTR));
if (!tmp)
{
WSREP_ERROR("Can't allocate memory for key_array");
@@ -1151,7 +1186,6 @@ static bool wsrep_prepare_key_for_isolation(const char* db,
return true;
}
-
static bool wsrep_prepare_keys_for_alter_add_fk(const char* child_table_db,
Alter_info* alter_info,
wsrep_key_arr_t* ka)
@@ -1178,7 +1212,6 @@ static bool wsrep_prepare_keys_for_alter_add_fk(const char* child_table_db,
return true;
}
-
static bool wsrep_prepare_keys_for_isolation(THD* thd,
const char* db,
const char* table,
@@ -1206,16 +1239,19 @@ static bool wsrep_prepare_keys_for_isolation(THD* thd,
if (!wsrep_prepare_keys_for_alter_add_fk(table_list->db.str, alter_info, ka))
goto err;
}
-
return false;
err:
- wsrep_keys_free(ka);
- return true;
+ wsrep_keys_free(ka);
+ return true;
}
+/*
+ * Prepare key list from db/table and table_list
+ *
+ * Return zero in case of success, 1 in case of failure.
+ */
-/* Prepare key list from db/table and table_list */
bool wsrep_prepare_keys_for_isolation(THD* thd,
const char* db,
const char* table,
@@ -1225,7 +1261,6 @@ bool wsrep_prepare_keys_for_isolation(THD* thd,
return wsrep_prepare_keys_for_isolation(thd, db, table, table_list, NULL, ka);
}
-
bool wsrep_prepare_key(const uchar* cache_key, size_t cache_key_len,
const uchar* row_id, size_t row_id_len,
wsrep_buf_t* key, size_t* key_len)
@@ -1237,37 +1272,110 @@ bool wsrep_prepare_key(const uchar* cache_key, size_t cache_key_len,
{
case 0:
{
- key[0].ptr = cache_key;
- key[0].len = cache_key_len;
+ key[0].ptr= cache_key;
+ key[0].len= cache_key_len;
- *key_len = 1;
+ *key_len= 1;
break;
}
case 1:
case 2:
case 3:
+ case 4:
{
- key[0].ptr = cache_key;
- key[0].len = strlen( (char*)cache_key );
+ key[0].ptr= cache_key;
+ key[0].len= strlen( (char*)cache_key );
- key[1].ptr = cache_key + strlen( (char*)cache_key ) + 1;
- key[1].len = strlen( (char*)(key[1].ptr) );
+ key[1].ptr= cache_key + strlen( (char*)cache_key ) + 1;
+ key[1].len= strlen( (char*)(key[1].ptr) );
- *key_len = 2;
+ *key_len= 2;
break;
}
default:
return false;
}
- key[*key_len].ptr = row_id;
- key[*key_len].len = row_id_len;
+ key[*key_len].ptr= row_id;
+ key[*key_len].len= row_id_len;
++(*key_len);
return true;
}
+bool wsrep_prepare_key_for_innodb(THD* thd,
+ const uchar* cache_key,
+ size_t cache_key_len,
+ const uchar* row_id,
+ size_t row_id_len,
+ wsrep_buf_t* key,
+ size_t* key_len)
+{
+
+ return wsrep_prepare_key(cache_key, cache_key_len, row_id, row_id_len, key, key_len);
+}
+wsrep::key wsrep_prepare_key_for_toi(const char* db, const char* table,
+ enum wsrep::key::type type)
+{
+ wsrep::key ret(type);
+ DBUG_ASSERT(db);
+ ret.append_key_part(db, strlen(db));
+ if (table) ret.append_key_part(table, strlen(table));
+ return ret;
+}
+
+wsrep::key_array
+wsrep_prepare_keys_for_alter_add_fk(const char* child_table_db,
+ Alter_info* alter_info)
+
+{
+ wsrep::key_array ret;
+ Key *key;
+ List_iterator<Key> key_iterator(alter_info->key_list);
+ while ((key= key_iterator++))
+ {
+ if (key->type == Key::FOREIGN_KEY)
+ {
+ Foreign_key *fk_key= (Foreign_key *)key;
+ const char *db_name= fk_key->ref_db.str;
+ const char *table_name= fk_key->ref_table.str;
+ if (!db_name)
+ {
+ db_name= child_table_db;
+ }
+ ret.push_back(wsrep_prepare_key_for_toi(db_name, table_name,
+ wsrep::key::exclusive));
+ }
+ }
+ return ret;
+}
+
+wsrep::key_array wsrep_prepare_keys_for_toi(const char* db,
+ const char* table,
+ const TABLE_LIST* table_list,
+ Alter_info* alter_info)
+{
+ wsrep::key_array ret;
+ if (db || table)
+ {
+ ret.push_back(wsrep_prepare_key_for_toi(db, table, wsrep::key::exclusive));
+ }
+ for (const TABLE_LIST* table= table_list; table; table= table->next_global)
+ {
+ ret.push_back(wsrep_prepare_key_for_toi(table->db.str, table->table_name.str,
+ wsrep::key::exclusive));
+ }
+ if (alter_info && (alter_info->flags & ALTER_ADD_FOREIGN_KEY))
+ {
+ wsrep::key_array fk(wsrep_prepare_keys_for_alter_add_fk(table_list->db.str, alter_info));
+ if (!fk.empty())
+ {
+ ret.insert(ret.end(), fk.begin(), fk.end());
+ }
+ }
+ return ret;
+}
/*
* Construct Query_log_Event from thd query and serialize it
* into buffer.
@@ -1278,7 +1386,7 @@ int wsrep_to_buf_helper(
THD* thd, const char *query, uint query_len, uchar** buf, size_t* buf_len)
{
IO_CACHE tmp_io_cache;
- Log_event_writer writer(&tmp_io_cache,0);
+ Log_event_writer writer(&tmp_io_cache, 0);
if (open_cached_file(&tmp_io_cache, mysql_tmpdir, TEMP_PREFIX,
65536, MYF(MY_WME)))
return 1;
@@ -1364,9 +1472,9 @@ static int
create_view_query(THD *thd, uchar** buf, size_t* buf_len)
{
LEX *lex= thd->lex;
- SELECT_LEX *select_lex= &lex->select_lex;
+ SELECT_LEX *select_lex= lex->first_select_lex();
TABLE_LIST *first_table= select_lex->table_list.first;
- TABLE_LIST *views = first_table;
+ TABLE_LIST *views= first_table;
LEX_USER *definer;
String buff;
const LEX_CSTRING command[3]=
@@ -1391,16 +1499,16 @@ create_view_query(THD *thd, uchar** buf, size_t* buf_len)
if (definer)
{
- views->definer.user = definer->user;
- views->definer.host = definer->host;
+ views->definer.user= definer->user;
+ views->definer.host= definer->host;
} else {
WSREP_ERROR("Failed to get DEFINER for VIEW.");
return 1;
}
- views->algorithm = lex->create_view->algorithm;
- views->view_suid = lex->create_view->suid;
- views->with_check = lex->create_view->check;
+ views->algorithm = lex->create_view->algorithm;
+ views->view_suid = lex->create_view->suid;
+ views->with_check = lex->create_view->check;
view_store_options(thd, views, &buff);
buff.append(STRING_WITH_LEN("VIEW "));
@@ -1426,12 +1534,8 @@ create_view_query(THD *thd, uchar** buf, size_t* buf_len)
buff.append(')');
}
buff.append(STRING_WITH_LEN(" AS "));
- //buff.append(views->source.str, views->source.length);
buff.append(thd->lex->create_view->select.str,
thd->lex->create_view->select.length);
- //int errcode= query_error_code(thd, TRUE);
- //if (thd->binlog_query(THD::STMT_QUERY_TYPE,
- // buff.ptr(), buff.length(), FALSE, FALSE, FALSE, errcod
return wsrep_to_buf_helper(thd, buff.ptr(), buff.length(), buf, buf_len);
}
@@ -1446,7 +1550,7 @@ static int wsrep_drop_table_query(THD* thd, uchar** buf, size_t* buf_len)
{
LEX* lex= thd->lex;
- SELECT_LEX* select_lex= &lex->select_lex;
+ SELECT_LEX* select_lex= lex->first_select_lex();
TABLE_LIST* first_table= select_lex->table_list.first;
String buff;
@@ -1497,8 +1601,7 @@ static int wsrep_drop_table_query(THD* thd, uchar** buf, size_t* buf_len)
/* Forward declarations. */
-static int wsrep_create_sp(THD *thd, uchar** buf, size_t* buf_len);
-static int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len);
+int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len);
/*
Decide if statement should run in TOI.
@@ -1517,7 +1620,7 @@ static bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table,
DBUG_ASSERT(table_list || db);
LEX* lex= thd->lex;
- SELECT_LEX* select_lex= &lex->select_lex;
+ SELECT_LEX* select_lex= lex->first_select_lex();
TABLE_LIST* first_table= select_lex->table_list.first;
switch (lex->sql_command)
@@ -1578,6 +1681,7 @@ static bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table,
}
}
+#if UNUSED /* 323f269d4099 (Jan Lindström 2018-07-19) */
static const char* wsrep_get_query_or_msg(const THD* thd)
{
switch(thd->lex->sql_command)
@@ -1590,58 +1694,70 @@ static const char* wsrep_get_query_or_msg(const THD* thd)
return "REVOKE";
case SQLCOM_SET_OPTION:
if (thd->lex->definer)
- return "SET PASSWORD";
+ return "SET PASSWORD";
/* fallthrough */
default:
return thd->query();
}
}
+#endif //UNUSED
-/*
- returns:
- 0: statement was replicated as TOI
- 1: TOI replication was skipped
- -1: TOI replication failed
- */
-static int wsrep_TOI_begin(THD *thd, const char *db_, const char *table_,
- const TABLE_LIST* table_list,
- Alter_info* alter_info)
+static int wsrep_create_sp(THD *thd, uchar** buf, size_t* buf_len)
{
- wsrep_status_t ret(WSREP_WARNING);
- uchar* buf(0);
- size_t buf_len(0);
- int buf_err;
- int rc= 0;
+ String log_query;
+ sp_head *sp= thd->lex->sphead;
+ sql_mode_t saved_mode= thd->variables.sql_mode;
+ String retstr(64);
+ LEX_CSTRING returns= empty_clex_str;
+ retstr.set_charset(system_charset_info);
- if (wsrep_can_run_in_toi(thd, db_, table_, table_list) == false)
+ log_query.set_charset(system_charset_info);
+
+ if (sp->m_handler->type() == TYPE_ENUM_FUNCTION)
{
- WSREP_DEBUG("No TOI for %s", WSREP_QUERY(thd));
+ sp_returns_type(thd, retstr, sp);
+ returns= retstr.lex_cstring();
+ }
+ if (sp->m_handler->
+ show_create_sp(thd, &log_query,
+ sp->m_explicit_name ? sp->m_db : null_clex_str,
+ sp->m_name, sp->m_params, returns,
+ sp->m_body, sp->chistics(),
+ thd->lex->definer[0],
+ thd->lex->create_info,
+ saved_mode))
+ {
+ WSREP_WARN("SP create string failed: schema: %s, query: %s",
+ thd->get_db(), thd->query());
return 1;
}
- WSREP_DEBUG("TO BEGIN: %lld, %d : %s", (long long)wsrep_thd_trx_seqno(thd),
- thd->wsrep_exec_mode, wsrep_get_query_or_msg(thd));
+ return wsrep_to_buf_helper(thd, log_query.ptr(), log_query.length(), buf, buf_len);
+}
+static int wsrep_TOI_event_buf(THD* thd, uchar** buf, size_t* buf_len)
+{
+ int err;
switch (thd->lex->sql_command)
{
case SQLCOM_CREATE_VIEW:
- buf_err= create_view_query(thd, &buf, &buf_len);
+ err= create_view_query(thd, buf, buf_len);
break;
case SQLCOM_CREATE_PROCEDURE:
case SQLCOM_CREATE_SPFUNCTION:
- buf_err= wsrep_create_sp(thd, &buf, &buf_len);
+ err= wsrep_create_sp(thd, buf, buf_len);
break;
case SQLCOM_CREATE_TRIGGER:
- buf_err= wsrep_create_trigger_query(thd, &buf, &buf_len);
+ err= wsrep_create_trigger_query(thd, buf, buf_len);
break;
case SQLCOM_CREATE_EVENT:
- buf_err= wsrep_create_event_query(thd, &buf, &buf_len);
+ err= wsrep_create_event_query(thd, buf, buf_len);
break;
case SQLCOM_ALTER_EVENT:
- buf_err= wsrep_alter_event_query(thd, &buf, &buf_len);
+ err= wsrep_alter_event_query(thd, buf, buf_len);
break;
case SQLCOM_DROP_TABLE:
- buf_err= wsrep_drop_table_query(thd, &buf, &buf_len);
+ err= wsrep_drop_table_query(thd, buf, buf_len);
break;
case SQLCOM_CREATE_ROLE:
if (sp_process_definer(thd))
@@ -1650,169 +1766,201 @@ static int wsrep_TOI_begin(THD *thd, const char *db_, const char *table_,
}
/* fallthrough */
default:
- buf_err= wsrep_to_buf_helper(thd, thd->query(), thd->query_length(),
- &buf, &buf_len);
+ err= wsrep_to_buf_helper(thd, thd->query(), thd->query_length(), buf,
+ buf_len);
break;
}
- wsrep_key_arr_t key_arr= {0, 0};
- struct wsrep_buf buff = { buf, buf_len };
- if (!buf_err &&
- !wsrep_prepare_keys_for_isolation(thd, db_, table_,
- table_list, alter_info, &key_arr) &&
- key_arr.keys_len > 0 &&
- WSREP_OK == (ret = wsrep->to_execute_start(wsrep, thd->thread_id,
- key_arr.keys, key_arr.keys_len,
- &buff, 1,
- &thd->wsrep_trx_meta)))
- {
- thd->wsrep_exec_mode= TOTAL_ORDER;
- wsrep_to_isolation++;
- wsrep_keys_free(&key_arr);
- WSREP_DEBUG("TO BEGIN: %lld, %d",(long long)wsrep_thd_trx_seqno(thd),
- thd->wsrep_exec_mode);
- }
- else if (key_arr.keys_len > 0) {
- /* jump to error handler in mysql_execute_command() */
- WSREP_WARN("TO isolation failed for: %d, schema: %s, sql: %s. Check wsrep "
- "connection state and retry the query.",
- ret,
- thd->get_db(),
- (thd->query()) ? thd->query() : "void");
- my_message(ER_LOCK_DEADLOCK, "WSREP replication failed. Check "
- "your wsrep connection state and retry the query.", MYF(0));
- wsrep_keys_free(&key_arr);
- rc= -1;
- }
- else {
- /* non replicated DDL, affecting temporary tables only */
- WSREP_DEBUG("TO isolation skipped for: %d, sql: %s."
- "Only temporary tables affected.",
- ret, (thd->query()) ? thd->query() : "void");
- rc= 1;
+ return err;
+}
+
+static void wsrep_TOI_begin_failed(THD* thd, const wsrep_buf_t* /* const err */)
+{
+ if (wsrep_thd_trx_seqno(thd) > 0)
+ {
+ /* GTID was granted and TO acquired - need to log event and release TO */
+ if (wsrep_emulate_bin_log) wsrep_thd_binlog_trx_reset(thd);
+ if (wsrep_write_dummy_event(thd, "TOI begin failed")) { goto fail; }
+ wsrep::client_state& cs(thd->wsrep_cs());
+ int const ret= cs.leave_toi();
+ if (ret)
+ {
+ WSREP_ERROR("Leaving critical section for failed TOI failed: thd: %lld, "
+ "schema: %s, SQL: %s, rcode: %d wsrep_error: %s",
+ (long long)thd->real_id, thd->db.str,
+ thd->query(), ret, wsrep::to_c_string(cs.current_error()));
+ goto fail;
+ }
}
- if (buf) my_free(buf);
- return rc;
+ return;
+fail:
+ WSREP_ERROR("Failed to release TOI resources. Need to abort.");
+ unireg_abort(1);
}
-static void wsrep_TOI_end(THD *thd) {
- wsrep_status_t ret;
- wsrep_to_isolation--;
- WSREP_DEBUG("TO END: %lld, %d: %s", (long long)wsrep_thd_trx_seqno(thd),
- thd->wsrep_exec_mode, wsrep_get_query_or_msg(thd));
+/*
+ returns:
+ 0: statement was replicated as TOI
+ 1: TOI replication was skipped
+ -1: TOI replication failed
+ */
+static int wsrep_TOI_begin(THD *thd, const char *db, const char *table,
+ const TABLE_LIST* table_list,
+ Alter_info* alter_info)
+{
+ DBUG_ASSERT(thd->variables.wsrep_OSU_method == WSREP_OSU_TOI);
- wsrep_set_SE_checkpoint(thd->wsrep_trx_meta.gtid.uuid,
- thd->wsrep_trx_meta.gtid.seqno);
- WSREP_DEBUG("TO END: %lld, update seqno",
- (long long)wsrep_thd_trx_seqno(thd));
-
- if (WSREP_OK == (ret = wsrep->to_execute_end(wsrep, thd->thread_id))) {
- WSREP_DEBUG("TO END: %lld", (long long)wsrep_thd_trx_seqno(thd));
+ WSREP_DEBUG("TOI Begin");
+ if (wsrep_can_run_in_toi(thd, db, table, table_list) == false)
+ {
+ WSREP_DEBUG("No TOI for %s", WSREP_QUERY(thd));
+ return 1;
}
- else {
- WSREP_WARN("TO isolation end failed for: %d, schema: %s, sql: %s",
- ret,
- thd->get_db(),
- (thd->query()) ? thd->query() : "void");
+
+ uchar* buf= 0;
+ size_t buf_len(0);
+ int buf_err;
+ int rc;
+
+ buf_err= wsrep_TOI_event_buf(thd, &buf, &buf_len);
+ if (buf_err) {
+ WSREP_ERROR("Failed to create TOI event buf: %d", buf_err);
+ my_message(ER_UNKNOWN_ERROR,
+ "WSREP replication failed to prepare TOI event buffer. "
+ "Check your query.",
+ MYF(0));
+ return -1;
}
-}
+ struct wsrep_buf buff= { buf, buf_len };
-static int wsrep_RSU_begin(THD *thd, const char *db_, const char *table_)
-{
- wsrep_status_t ret(WSREP_WARNING);
- WSREP_DEBUG("RSU BEGIN: %lld, %d : %s", (long long)wsrep_thd_trx_seqno(thd),
- thd->wsrep_exec_mode, thd->query() );
+ wsrep::key_array key_array=
+ wsrep_prepare_keys_for_toi(db, table, table_list, alter_info);
- ret = wsrep->desync(wsrep);
- if (ret != WSREP_OK)
+ if (thd->has_read_only_protection())
{
- WSREP_WARN("RSU desync failed %d for schema: %s, query: %s",
- ret, thd->get_db(), thd->query());
- my_error(ER_LOCK_DEADLOCK, MYF(0));
- return(ret);
+ /* non replicated DDL, affecting temporary tables only */
+ WSREP_DEBUG("TO isolation skipped, sql: %s."
+ "Only temporary tables affected.",
+ WSREP_QUERY(thd));
+ if (buf) my_free(buf);
+ return -1;
}
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- wsrep_replaying++;
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ thd_proc_info(thd, "acquiring total order isolation");
- if (wsrep_wait_committing_connections_close(5000))
+ wsrep::client_state& cs(thd->wsrep_cs());
+ int ret= cs.enter_toi(key_array,
+ wsrep::const_buffer(buff.ptr, buff.len),
+ wsrep::provider::flag::start_transaction |
+ wsrep::provider::flag::commit);
+
+ if (ret)
{
- /* no can do, bail out from DDL */
- WSREP_WARN("RSU failed due to pending transactions, schema: %s, query %s",
- thd->get_db(), thd->query());
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- wsrep_replaying--;
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ DBUG_ASSERT(cs.current_error());
+ WSREP_DEBUG("to_execute_start() failed for %llu: %s, seqno: %lld",
+ thd->thread_id, WSREP_QUERY(thd),
+ (long long)wsrep_thd_trx_seqno(thd));
- ret = wsrep->resync(wsrep);
- if (ret != WSREP_OK)
+ /* jump to error handler in mysql_execute_command() */
+ switch (cs.current_error())
{
- WSREP_WARN("resync failed %d for schema: %s, query: %s",
- ret, thd->get_db(), thd->query());
+ case wsrep::e_size_exceeded_error:
+ WSREP_WARN("TO isolation failed for: %d, schema: %s, sql: %s. "
+ "Maximum size exceeded.",
+ ret,
+ (thd->db.str ? thd->db.str : "(null)"),
+ WSREP_QUERY(thd));
+ my_error(ER_ERROR_DURING_COMMIT, MYF(0), WSREP_SIZE_EXCEEDED);
+ break;
+ default:
+ WSREP_WARN("TO isolation failed for: %d, schema: %s, sql: %s. "
+ "Check wsrep connection state and retry the query.",
+ ret,
+ (thd->db.str ? thd->db.str : "(null)"),
+ WSREP_QUERY(thd));
+ if (!thd->is_error())
+ {
+ my_error(ER_LOCK_DEADLOCK, MYF(0), "WSREP replication failed. Check "
+ "your wsrep connection state and retry the query.");
+ }
}
-
- my_error(ER_LOCK_DEADLOCK, MYF(0));
- return(1);
+ rc= -1;
}
-
- wsrep_seqno_t seqno = wsrep->pause(wsrep);
- if (seqno == WSREP_SEQNO_UNDEFINED)
- {
- WSREP_WARN("pause failed %lld for schema: %s, query: %s", (long long)seqno,
- thd->get_db(), thd->query());
- return(1);
+ else {
+ ++wsrep_to_isolation;
+ rc= 0;
}
- WSREP_DEBUG("paused at %lld", (long long)seqno);
- thd->variables.wsrep_on = 0;
- return 0;
-}
-static void wsrep_RSU_end(THD *thd)
-{
- wsrep_status_t ret(WSREP_WARNING);
- WSREP_DEBUG("RSU END: %lld, %d : %s", (long long)wsrep_thd_trx_seqno(thd),
- thd->wsrep_exec_mode, thd->query() );
+ if (buf) my_free(buf);
+ if (rc) wsrep_TOI_begin_failed(thd, NULL);
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- wsrep_replaying--;
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ return rc;
+}
- ret = wsrep->resume(wsrep);
- if (ret != WSREP_OK)
+static void wsrep_TOI_end(THD *thd) {
+ wsrep_to_isolation--;
+ wsrep::client_state& client_state(thd->wsrep_cs());
+ DBUG_ASSERT(wsrep_thd_is_local_toi(thd));
+ WSREP_DEBUG("TO END: %lld: %s", client_state.toi_meta().seqno().get(),
+ WSREP_QUERY(thd));
+
+ if (wsrep_thd_is_local_toi(thd))
{
- WSREP_WARN("resume failed %d for schema: %s, query: %s", ret,
- thd->get_db(), thd->query());
+ wsrep_set_SE_checkpoint(client_state.toi_meta().gtid());
+ int ret= client_state.leave_toi();
+ if (!ret)
+ {
+ WSREP_DEBUG("TO END: %lld", client_state.toi_meta().seqno().get());
+ }
+ else
+ {
+ WSREP_WARN("TO isolation end failed for: %d, schema: %s, sql: %s",
+ ret, (thd->db.str ? thd->db.str : "(null)"), WSREP_QUERY(thd));
+ }
}
+}
- ret = wsrep->resync(wsrep);
- if (ret != WSREP_OK)
+static int wsrep_RSU_begin(THD *thd, const char *db_, const char *table_)
+{
+ WSREP_DEBUG("RSU BEGIN: %lld, : %s", wsrep_thd_trx_seqno(thd),
+ WSREP_QUERY(thd));
+ if (thd->wsrep_cs().begin_rsu(5000))
{
- WSREP_WARN("resync failed %d for schema: %s, query: %s", ret,
- thd->get_db(), thd->query());
- return;
+ WSREP_WARN("RSU begin failed");
+ }
+ else
+ {
+ thd->variables.wsrep_on= 0;
}
+ return 0;
+}
- thd->variables.wsrep_on = 1;
+static void wsrep_RSU_end(THD *thd)
+{
+ WSREP_DEBUG("RSU END: %lld : %s", wsrep_thd_trx_seqno(thd),
+ WSREP_QUERY(thd));
+ if (thd->wsrep_cs().end_rsu())
+ {
+ WSREP_WARN("Failed to end RSU, server may need to be restarted");
+ }
+ thd->variables.wsrep_on= 1;
}
int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
const TABLE_LIST* table_list,
Alter_info* alter_info)
{
- int ret= 0;
-
/*
No isolation for applier or replaying threads.
*/
- if (thd->wsrep_exec_mode == REPL_RECV)
- return 0;
+ if (!wsrep_thd_is_local(thd)) return 0;
+ int ret= 0;
mysql_mutex_lock(&thd->LOCK_thd_data);
- if (thd->wsrep_conflict_state == MUST_ABORT)
+ if (thd->wsrep_trx().state() == wsrep::transaction::s_must_abort)
{
WSREP_INFO("thread: %lld schema: %s query: %s has been aborted due to multi-master conflict",
(longlong) thd->thread_id, thd->get_db(), thd->query());
@@ -1821,20 +1969,20 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
}
mysql_mutex_unlock(&thd->LOCK_thd_data);
- DBUG_ASSERT(thd->wsrep_exec_mode == LOCAL_STATE);
- DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno == WSREP_SEQNO_UNDEFINED);
+ DBUG_ASSERT(wsrep_thd_is_local(thd));
+ DBUG_ASSERT(thd->wsrep_trx().ws_meta().seqno().is_undefined());
- if (thd->global_read_lock.can_acquire_protection())
+ if (thd->global_read_lock.is_acquired())
{
- WSREP_DEBUG("Aborting TOI: Global Read-Lock (FTWRL) in place: %s %lld",
- thd->query(), (longlong) thd->thread_id);
+ WSREP_DEBUG("Aborting TOI: Global Read-Lock (FTWRL) in place: %s %llu",
+ WSREP_QUERY(thd), thd->thread_id);
return -1;
}
if (wsrep_debug && thd->mdl_context.has_locks())
{
- WSREP_DEBUG("thread holds MDL locks at TI begin: %s %lld",
- thd->query(), (longlong) thd->thread_id);
+ WSREP_DEBUG("thread holds MDL locks at TI begin: %s %llu",
+ WSREP_QUERY(thd), thd->thread_id);
}
/*
@@ -1846,11 +1994,11 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
*/
if (wsrep_auto_increment_control)
{
- thd->variables.auto_increment_offset = 1;
- thd->variables.auto_increment_increment = 1;
+ thd->variables.auto_increment_offset= 1;
+ thd->variables.auto_increment_increment= 1;
}
- if (thd->variables.wsrep_on && thd->wsrep_exec_mode==LOCAL_STATE)
+ if (thd->variables.wsrep_on && wsrep_thd_is_local(thd))
{
switch (thd->variables.wsrep_OSU_method) {
case WSREP_OSU_TOI:
@@ -1866,48 +2014,53 @@ int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
break;
}
switch (ret) {
- case 0: thd->wsrep_exec_mode= TOTAL_ORDER; break;
+ case 0: /* wsrep_TOI_begin sould set toi mode */ break;
case 1:
/* TOI replication skipped, treat as success */
- ret = 0;
+ ret= 0;
break;
case -1:
/* TOI replication failed, treat as error */
break;
}
}
+
return ret;
}
void wsrep_to_isolation_end(THD *thd)
{
- if (thd->wsrep_exec_mode == TOTAL_ORDER)
+ DBUG_ASSERT(wsrep_thd_is_local_toi(thd) ||
+ wsrep_thd_is_in_rsu(thd));
+ if (wsrep_thd_is_local_toi(thd))
{
- switch(thd->variables.wsrep_OSU_method)
- {
- case WSREP_OSU_TOI: wsrep_TOI_end(thd); break;
- case WSREP_OSU_RSU: wsrep_RSU_end(thd); break;
- default:
- WSREP_WARN("Unsupported wsrep OSU method at isolation end: %lu",
- thd->variables.wsrep_OSU_method);
- break;
- }
- wsrep_cleanup_transaction(thd);
+ DBUG_ASSERT(thd->variables.wsrep_OSU_method == WSREP_OSU_TOI);
+ wsrep_TOI_end(thd);
}
+ else if (wsrep_thd_is_in_rsu(thd))
+ {
+ DBUG_ASSERT(thd->variables.wsrep_OSU_method == WSREP_OSU_RSU);
+ wsrep_RSU_end(thd);
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ }
+ if (wsrep_emulate_bin_log) wsrep_thd_binlog_trx_reset(thd);
}
#define WSREP_MDL_LOG(severity, msg, schema, schema_len, req, gra) \
WSREP_##severity( \
"%s\n" \
"schema: %.*s\n" \
- "request: (%lld \tseqno %lld \twsrep (%d, %d, %d) cmd %d %d \t%s)\n" \
- "granted: (%lld \tseqno %lld \twsrep (%d, %d, %d) cmd %d %d \t%s)", \
+ "request: (%llu \tseqno %lld \twsrep (%s, %s, %s) cmd %d %d \t%s)\n" \
+ "granted: (%llu \tseqno %lld \twsrep (%s, %s, %s) cmd %d %d \t%s)", \
msg, schema_len, schema, \
- (longlong) req->thread_id, (long long)wsrep_thd_trx_seqno(req), \
- req->wsrep_exec_mode, req->wsrep_query_state, req->wsrep_conflict_state, \
+ req->thread_id, (long long)wsrep_thd_trx_seqno(req), \
+ wsrep_thd_client_mode_str(req), wsrep_thd_client_state_str(req), wsrep_thd_transaction_state_str(req), \
req->get_command(), req->lex->sql_command, req->query(), \
- (longlong) gra->thread_id, (long long)wsrep_thd_trx_seqno(gra), \
- gra->wsrep_exec_mode, gra->wsrep_query_state, gra->wsrep_conflict_state, \
+ gra->thread_id, (long long)wsrep_thd_trx_seqno(gra), \
+ wsrep_thd_client_mode_str(gra), wsrep_thd_client_state_str(gra), wsrep_thd_transaction_state_str(gra), \
gra->get_command(), gra->lex->sql_command, gra->query());
/**
@@ -1920,58 +2073,47 @@ void wsrep_to_isolation_end(THD *thd)
@retval FALSE Lock request cannot be granted
*/
-bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
+void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx,
MDL_ticket *ticket,
const MDL_key *key)
{
/* Fallback to the non-wsrep behaviour */
- if (!WSREP_ON) return FALSE;
+ if (!WSREP_ON) return;
THD *request_thd= requestor_ctx->get_thd();
THD *granted_thd= ticket->get_ctx()->get_thd();
- bool ret= false;
const char* schema= key->db_name();
int schema_len= key->db_name_length();
mysql_mutex_lock(&request_thd->LOCK_thd_data);
+ if (wsrep_thd_is_toi(request_thd) ||
+ wsrep_thd_is_applying(request_thd)) {
- /*
- We consider granting MDL exceptions only for appliers (BF THD) and ones
- executing under TOI mode.
-
- Rules:
- 1. If granted/owner THD is also an applier (BF THD) or one executing
- under TOI mode, then we grant the requested lock to the requester
- THD.
- @return true
-
- 2. If granted/owner THD is executing a FLUSH command or already has an
- explicit lock, then do not grant the requested lock to the requester
- THD and it has to wait.
- @return false
-
- 3. In all other cases the granted/owner THD is aborted and the requested
- lock is not granted to the requester THD, thus it has to wait.
- @return false
- */
- if (request_thd->wsrep_exec_mode == TOTAL_ORDER ||
- request_thd->wsrep_exec_mode == REPL_RECV)
- {
mysql_mutex_unlock(&request_thd->LOCK_thd_data);
WSREP_MDL_LOG(DEBUG, "MDL conflict ", schema, schema_len,
request_thd, granted_thd);
ticket->wsrep_report(wsrep_debug);
mysql_mutex_lock(&granted_thd->LOCK_thd_data);
- if (granted_thd->wsrep_exec_mode == TOTAL_ORDER ||
- granted_thd->wsrep_exec_mode == REPL_RECV)
+ if (wsrep_thd_is_toi(granted_thd) ||
+ wsrep_thd_is_applying(granted_thd))
{
- WSREP_MDL_LOG(INFO, "MDL BF-BF conflict", schema, schema_len,
- request_thd, granted_thd);
- ticket->wsrep_report(true);
- mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- ret= true;
+ if (wsrep_thd_is_SR(granted_thd) && !wsrep_thd_is_SR(request_thd))
+ {
+ WSREP_MDL_LOG(INFO, "MDL conflict, DDL vs SR",
+ schema, schema_len, request_thd, granted_thd);
+ mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
+ wsrep_abort_thd((void*)request_thd, (void*)granted_thd, 1);
+ }
+ else
+ {
+ WSREP_MDL_LOG(INFO, "MDL BF-BF conflict", schema, schema_len,
+ request_thd, granted_thd);
+ ticket->wsrep_report(true);
+ mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
+ unireg_abort(1);
+ }
}
else if (granted_thd->lex->sql_command == SQLCOM_FLUSH ||
granted_thd->mdl_context.has_explicit_locks())
@@ -1979,173 +2121,57 @@ bool wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
WSREP_DEBUG("BF thread waiting for FLUSH");
ticket->wsrep_report(wsrep_debug);
mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- ret= false;
+ }
+ else if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE)
+ {
+ WSREP_DEBUG("DROP caused BF abort, conf %s",
+ wsrep_thd_transaction_state_str(granted_thd));
+ ticket->wsrep_report(wsrep_debug);
+ mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
+ wsrep_abort_thd((void*)request_thd, (void*)granted_thd, 1);
}
else
{
- /* Print some debug information. */
- if (wsrep_debug)
+ WSREP_MDL_LOG(DEBUG, "MDL conflict-> BF abort", schema, schema_len,
+ request_thd, granted_thd);
+ ticket->wsrep_report(wsrep_debug);
+ if (granted_thd->wsrep_trx().active())
{
- if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE ||
- request_thd->lex->sql_command == SQLCOM_DROP_SEQUENCE)
- {
- WSREP_DEBUG("DROP caused BF abort, conf %d", granted_thd->wsrep_conflict_state);
- }
- else if (granted_thd->wsrep_query_state == QUERY_COMMITTING)
+ mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
+ wsrep_abort_thd(request_thd, granted_thd, 1);
+ }
+ else
+ {
+ /*
+ Granted_thd is likely executing with wsrep_on=0. If the requesting
+ thd is BF, BF abort and wait.
+ */
+ mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
+ if (wsrep_thd_is_BF(request_thd, FALSE))
{
- WSREP_DEBUG("MDL granted, but committing thd abort scheduled");
+ ha_abort_transaction(request_thd, granted_thd, TRUE);
}
else
{
- WSREP_MDL_LOG(DEBUG, "MDL conflict-> BF abort", schema, schema_len,
- request_thd, granted_thd);
+ WSREP_MDL_LOG(INFO, "MDL unknown BF-BF conflict", schema, schema_len,
+ request_thd, granted_thd);
+ ticket->wsrep_report(true);
+ unireg_abort(1);
}
- ticket->wsrep_report(true);
}
-
- mysql_mutex_unlock(&granted_thd->LOCK_thd_data);
- wsrep_abort_thd((void *) request_thd, (void *) granted_thd, 1);
- ret= false;
}
}
else
{
mysql_mutex_unlock(&request_thd->LOCK_thd_data);
}
-
- return ret;
-}
-
-
-pthread_handler_t start_wsrep_THD(void *arg)
-{
- THD *thd;
- wsrep_thd_processor_fun processor= (wsrep_thd_processor_fun)arg;
-
- if (my_thread_init() || (!(thd= new THD(next_thread_id(), true))))
- {
- goto error;
- }
-
- mysql_mutex_lock(&LOCK_thread_count);
-
- if (wsrep_gtid_mode)
- {
- /* Adjust domain_id. */
- thd->variables.gtid_domain_id= wsrep_gtid_domain_id;
- }
-
- thd->real_id=pthread_self(); // Keep purify happy
- thread_created++;
- threads.append(thd);
-
- my_net_init(&thd->net,(st_vio*) 0, thd, MYF(0));
-
- DBUG_PRINT("wsrep",(("creating thread %lld"), (long long)thd->thread_id));
- thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer();
- (void) mysql_mutex_unlock(&LOCK_thread_count);
-
- /* from bootstrap()... */
- thd->bootstrap=1;
- thd->max_client_packet_length= thd->net.max_packet;
- thd->security_ctx->master_access= ~(ulong)0;
-
- /* from handle_one_connection... */
- pthread_detach_this_thread();
-
- mysql_thread_set_psi_id(thd->thread_id);
- thd->thr_create_utime= microsecond_interval_timer();
- if (MYSQL_CALLBACK_ELSE(thread_scheduler, init_new_connection_thread, (), 0))
- {
- close_connection(thd, ER_OUT_OF_RESOURCES);
- statistic_increment(aborted_connects,&LOCK_status);
- MYSQL_CALLBACK(thread_scheduler, end_thread, (thd, 0));
- goto error;
- }
-
-// </5.1.17>
- /*
- handle_one_connection() is normally the only way a thread would
- start and would always be on the very high end of the stack ,
- therefore, the thread stack always starts at the address of the
- first local variable of handle_one_connection, which is thd. We
- need to know the start of the stack so that we could check for
- stack overruns.
- */
- DBUG_PRINT("wsrep", ("handle_one_connection called by thread %lld\n",
- (long long)thd->thread_id));
- /* now that we've called my_thread_init(), it is safe to call DBUG_* */
-
- thd->thread_stack= (char*) &thd;
- if (thd->store_globals())
- {
- close_connection(thd, ER_OUT_OF_RESOURCES);
- statistic_increment(aborted_connects,&LOCK_status);
- MYSQL_CALLBACK(thread_scheduler, end_thread, (thd, 0));
- goto error;
- }
-
- thd->system_thread= SYSTEM_THREAD_SLAVE_SQL;
- thd->security_ctx->skip_grants();
-
- /* handle_one_connection() again... */
- //thd->version= refresh_version;
- thd->proc_info= 0;
- thd->set_command(COM_SLEEP);
- thd->init_for_queries();
-
- mysql_mutex_lock(&LOCK_thread_count);
- wsrep_running_threads++;
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
-
- processor(thd);
-
- close_connection(thd, 0);
-
- mysql_mutex_lock(&LOCK_thread_count);
- wsrep_running_threads--;
- WSREP_DEBUG("wsrep running threads now: %lu", wsrep_running_threads);
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
-
- // Note: We can't call THD destructor without crashing
- // if plugins have not been initialized. However, in most of the
- // cases this means that pre SE initialization SST failed and
- // we are going to exit anyway.
- if (plugins_are_initialized)
- {
- net_end(&thd->net);
- MYSQL_CALLBACK(thread_scheduler, end_thread, (thd, 1));
- }
- else
- {
- // TODO: lightweight cleanup to get rid of:
- // 'Error in my_thread_global_end(): 2 threads didn't exit'
- // at server shutdown
- }
-
- unlink_not_visible_thd(thd);
- delete thd;
- my_thread_end();
- return(NULL);
-
-error:
- WSREP_ERROR("Failed to create/initialize system thread");
-
- /* Abort if its the first applier/rollbacker thread. */
- if (!mysqld_server_initialized)
- unireg_abort(1);
- else
- return NULL;
}
-
/**/
static bool abort_replicated(THD *thd)
{
bool ret_code= false;
- if (thd->wsrep_query_state== QUERY_COMMITTING)
+ if (thd->wsrep_trx().state() == wsrep::transaction::s_committing)
{
WSREP_DEBUG("aborting replicated trx: %llu", (ulonglong)(thd->real_id));
@@ -2155,54 +2181,44 @@ static bool abort_replicated(THD *thd)
return ret_code;
}
-
/**/
static inline bool is_client_connection(THD *thd)
{
return (thd->wsrep_client_thread && thd->variables.wsrep_on);
}
-
static inline bool is_replaying_connection(THD *thd)
{
bool ret;
mysql_mutex_lock(&thd->LOCK_thd_data);
- ret= (thd->wsrep_conflict_state == REPLAYING) ? true : false;
+ ret= (thd->wsrep_trx().state() == wsrep::transaction::s_replaying) ? true : false;
mysql_mutex_unlock(&thd->LOCK_thd_data);
return ret;
}
-
static inline bool is_committing_connection(THD *thd)
{
bool ret;
mysql_mutex_lock(&thd->LOCK_thd_data);
- ret= (thd->wsrep_query_state == QUERY_COMMITTING) ? true : false;
+ ret= (thd->wsrep_trx().state() == wsrep::transaction::s_committing) ? true : false;
mysql_mutex_unlock(&thd->LOCK_thd_data);
return ret;
}
-
-static bool have_client_connections()
+static my_bool have_client_connections(THD *thd, void*)
{
- THD *tmp;
-
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
+ DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
+ (longlong) thd->thread_id));
+ if (is_client_connection(thd) && thd->killed == KILL_CONNECTION)
{
- DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
- (longlong) tmp->thread_id));
- if (is_client_connection(tmp) && tmp->killed == KILL_CONNECTION)
- {
- (void)abort_replicated(tmp);
- return true;
- }
+ (void)abort_replicated(thd);
+ return 1;
}
- return false;
+ return 0;
}
static void wsrep_close_thread(THD *thd)
@@ -2223,134 +2239,86 @@ static void wsrep_close_thread(THD *thd)
}
}
-
-static my_bool have_committing_connections()
+static my_bool have_committing_connections(THD *thd, void *)
{
- THD *tmp;
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
-
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
- {
- if (!is_client_connection(tmp))
- continue;
-
- if (is_committing_connection(tmp))
- {
- return TRUE;
- }
- }
- mysql_mutex_unlock(&LOCK_thread_count);
- return FALSE;
+ return is_client_connection(thd) && is_committing_connection(thd) ? 1 : 0;
}
-
int wsrep_wait_committing_connections_close(int wait_time)
{
int sleep_time= 100;
- while (have_committing_connections() && wait_time > 0)
+ while (server_threads.iterate(have_committing_connections) && wait_time > 0)
{
WSREP_DEBUG("wait for committing transaction to close: %d", wait_time);
my_sleep(sleep_time);
wait_time -= sleep_time;
}
- if (have_committing_connections())
+ return server_threads.iterate(have_committing_connections);
+}
+
+static my_bool kill_all_threads(THD *thd, THD *caller_thd)
+{
+ DBUG_PRINT("quit", ("Informing thread %lld that it's time to die",
+ (longlong) thd->thread_id));
+ /* We skip slave threads & scheduler on this first loop through. */
+ if (is_client_connection(thd) && thd != caller_thd)
{
- return 1;
+ if (is_replaying_connection(thd))
+ thd->set_killed(KILL_CONNECTION);
+ else if (!abort_replicated(thd))
+ {
+ /* replicated transactions must be skipped */
+ WSREP_DEBUG("closing connection %lld", (longlong) thd->thread_id);
+ /* instead of wsrep_close_thread() we do now soft kill by THD::awake */
+ thd->awake(KILL_CONNECTION);
+ }
}
return 0;
}
+static my_bool kill_remaining_threads(THD *thd, THD *caller_thd)
+{
+#ifndef __bsdi__ // Bug in BSDI kernel
+ if (is_client_connection(thd) &&
+ !abort_replicated(thd) &&
+ !is_replaying_connection(thd) &&
+ thd != caller_thd)
+ {
+ WSREP_INFO("killing local connection: %lld", (longlong) thd->thread_id);
+ close_connection(thd, 0);
+ }
+#endif
+ return 0;
+}
-void wsrep_close_client_connections(my_bool wait_to_end, THD *except_caller_thd)
+void wsrep_close_client_connections(my_bool wait_to_end, THD* except_caller_thd)
{
+ /* Clear thread cache */
+ kill_cached_threads++;
+ flush_thread_cache();
+
/*
First signal all threads that it's time to die
*/
+ server_threads.iterate(kill_all_threads, except_caller_thd);
- THD *tmp;
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
-
- bool kill_cached_threads_saved= kill_cached_threads;
- kill_cached_threads= true; // prevent future threads caching
- mysql_cond_broadcast(&COND_thread_cache); // tell cached threads to die
-
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
- {
- DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
- (longlong) tmp->thread_id));
- /* We skip slave threads & scheduler on this first loop through. */
- if (!is_client_connection(tmp))
- continue;
-
- if (tmp == except_caller_thd)
- {
- DBUG_ASSERT(is_client_connection(tmp));
- continue;
- }
-
- if (is_replaying_connection(tmp))
- {
- tmp->set_killed(KILL_CONNECTION);
- continue;
- }
-
- /* replicated transactions must be skipped */
- if (abort_replicated(tmp))
- continue;
-
- WSREP_DEBUG("closing connection %lld", (longlong) tmp->thread_id);
-
- /*
- instead of wsrep_close_thread() we do now soft kill by THD::awake
- */
- mysql_mutex_lock(&tmp->LOCK_thd_data);
-
- tmp->awake(KILL_CONNECTION);
-
- mysql_mutex_unlock(&tmp->LOCK_thd_data);
-
- }
- mysql_mutex_unlock(&LOCK_thread_count);
-
- if (thread_count)
- sleep(2); // Give threads time to die
-
- mysql_mutex_lock(&LOCK_thread_count);
/*
Force remaining threads to die by closing the connection to the client
*/
+ server_threads.iterate(kill_remaining_threads, except_caller_thd);
- I_List_iterator<THD> it2(threads);
- while ((tmp=it2++))
- {
-#ifndef __bsdi__ // Bug in BSDI kernel
- if (is_client_connection(tmp) &&
- !abort_replicated(tmp) &&
- !is_replaying_connection(tmp) &&
- tmp != except_caller_thd)
- {
- WSREP_INFO("killing local connection: %lld", (longlong) tmp->thread_id);
- close_connection(tmp,0);
- }
-#endif
- }
-
- DBUG_PRINT("quit",("Waiting for threads to die (count=%u)",thread_count));
- WSREP_DEBUG("waiting for client connections to close: %u", thread_count);
+ DBUG_PRINT("quit", ("Waiting for threads to die (count=%u)",
+ uint32_t(thread_count)));
+ WSREP_DEBUG("waiting for client connections to close: %u",
+ uint32_t(thread_count));
- while (wait_to_end && have_client_connections())
+ while (wait_to_end && server_threads.iterate(have_client_connections))
{
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- DBUG_PRINT("quit",("One thread died (count=%u)", thread_count));
+ sleep(1);
+ DBUG_PRINT("quit",("One thread died (count=%u)", uint32_t(thread_count)));
}
- kill_cached_threads= kill_cached_threads_saved;
-
- mysql_mutex_unlock(&LOCK_thread_count);
-
/* All client connection threads have now been aborted */
}
@@ -2361,349 +2329,153 @@ void wsrep_close_applier(THD *thd)
wsrep_close_thread(thd);
}
-
-void wsrep_close_threads(THD *thd)
+static my_bool wsrep_close_threads_callback(THD *thd, THD *caller_thd)
{
- THD *tmp;
- mysql_mutex_lock(&LOCK_thread_count); // For unlink from list
-
- I_List_iterator<THD> it(threads);
- while ((tmp=it++))
+ DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
+ (longlong) thd->thread_id));
+ /* We skip slave threads & scheduler on this first loop through. */
+ if (thd->wsrep_applier && thd != caller_thd)
{
- DBUG_PRINT("quit",("Informing thread %lld that it's time to die",
- (longlong) tmp->thread_id));
- /* We skip slave threads & scheduler on this first loop through. */
- if (tmp->wsrep_applier && tmp != thd)
- {
- WSREP_DEBUG("closing wsrep thread %lld", (longlong) tmp->thread_id);
- wsrep_close_thread (tmp);
- }
+ WSREP_DEBUG("closing wsrep thread %lld", (longlong) thd->thread_id);
+ wsrep_close_thread(thd);
}
+ return 0;
+}
- mysql_mutex_unlock(&LOCK_thread_count);
+void wsrep_close_threads(THD *thd)
+{
+ server_threads.iterate(wsrep_close_threads_callback, thd);
}
void wsrep_wait_appliers_close(THD *thd)
{
/* Wait for wsrep appliers to gracefully exit */
- mysql_mutex_lock(&LOCK_thread_count);
- while (wsrep_running_threads > 1)
- // 1 is for rollbacker thread which needs to be killed explicitly.
- // This gotta be fixed in a more elegant manner if we gonna have arbitrary
- // number of non-applier wsrep threads.
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
+ while (wsrep_running_threads > 2)
+ /*
+ 2 is for rollbacker thread which needs to be killed explicitly.
+ This gotta be fixed in a more elegant manner if we gonna have arbitrary
+ number of non-applier wsrep threads.
+ */
{
- if (thread_handling > SCHEDULER_ONE_THREAD_PER_CONNECTION)
- {
- mysql_mutex_unlock(&LOCK_thread_count);
- my_sleep(100);
- mysql_mutex_lock(&LOCK_thread_count);
- }
- else
- mysql_cond_wait(&COND_thread_count,&LOCK_thread_count);
- DBUG_PRINT("quit",("One applier died (count=%u)",thread_count));
+ mysql_cond_wait(&COND_wsrep_slave_threads, &LOCK_wsrep_slave_threads);
}
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
+ DBUG_PRINT("quit",("applier threads have died (count=%u)",
+ uint32_t(wsrep_running_threads)));
+
/* Now kill remaining wsrep threads: rollbacker */
wsrep_close_threads (thd);
/* and wait for them to die */
- mysql_mutex_lock(&LOCK_thread_count);
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
while (wsrep_running_threads > 0)
{
- if (thread_handling > SCHEDULER_ONE_THREAD_PER_CONNECTION)
- {
- mysql_mutex_unlock(&LOCK_thread_count);
- my_sleep(100);
- mysql_mutex_lock(&LOCK_thread_count);
- }
- else
- mysql_cond_wait(&COND_thread_count,&LOCK_thread_count);
- DBUG_PRINT("quit",("One thread died (count=%u)",thread_count));
+ mysql_cond_wait(&COND_wsrep_slave_threads, &LOCK_wsrep_slave_threads);
}
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
+ DBUG_PRINT("quit",("all wsrep system threads have died"));
/* All wsrep applier threads have now been aborted. However, if this thread
is also applier, we are still running...
*/
}
-
-void wsrep_kill_mysql(THD *thd)
+void
+wsrep_last_committed_id(wsrep_gtid_t* gtid)
{
- if (mysqld_server_started)
- {
- if (!shutdown_in_progress)
- {
- WSREP_INFO("starting shutdown");
- kill_mysql();
- }
- }
- else
- {
- unireg_abort(1);
- }
+ wsrep::gtid ret= Wsrep_server_state::instance().last_committed_gtid();
+ memcpy(gtid->uuid.data, ret.id().data(), sizeof(gtid->uuid.data));
+ gtid->seqno= ret.seqno().get();
}
-
-static int wsrep_create_sp(THD *thd, uchar** buf, size_t* buf_len)
+void
+wsrep_node_uuid(wsrep_uuid_t& uuid)
{
- String log_query;
- sp_head *sp = thd->lex->sphead;
- sql_mode_t saved_mode= thd->variables.sql_mode;
- String retstr(64);
- LEX_CSTRING returns= empty_clex_str;
- retstr.set_charset(system_charset_info);
-
- log_query.set_charset(system_charset_info);
-
- if (sp->m_handler->type() == TYPE_ENUM_FUNCTION)
- {
- sp_returns_type(thd, retstr, sp);
- returns= retstr.lex_cstring();
- }
- if (sp->m_handler->
- show_create_sp(thd, &log_query,
- sp->m_explicit_name ? sp->m_db : null_clex_str,
- sp->m_name, sp->m_params, returns,
- sp->m_body, sp->chistics(),
- thd->lex->definer[0],
- thd->lex->create_info,
- saved_mode))
- {
- WSREP_WARN("SP create string failed: schema: %s, query: %s",
- thd->get_db(), thd->query());
- return 1;
- }
-
- return wsrep_to_buf_helper(thd, log_query.ptr(), log_query.length(), buf, buf_len);
-}
-
-
-extern int wsrep_on(THD *thd)
-{
- return (int)(WSREP(thd));
-}
-
-
-extern "C" bool wsrep_thd_is_wsrep_on(THD *thd)
-{
- return thd->variables.wsrep_on;
-}
-
-
-bool wsrep_consistency_check(THD *thd)
-{
- return thd->wsrep_consistency_check == CONSISTENCY_CHECK_RUNNING;
-}
-
-
-extern "C" void wsrep_thd_set_exec_mode(THD *thd, enum wsrep_exec_mode mode)
-{
- thd->wsrep_exec_mode= mode;
-}
-
-
-extern "C" void wsrep_thd_set_query_state(
- THD *thd, enum wsrep_query_state state)
-{
- thd->wsrep_query_state= state;
-}
-
-
-void wsrep_thd_set_conflict_state(THD *thd, enum wsrep_conflict_state state)
-{
- if (WSREP(thd)) thd->wsrep_conflict_state= state;
-}
-
-
-enum wsrep_exec_mode wsrep_thd_exec_mode(THD *thd)
-{
- return thd->wsrep_exec_mode;
-}
-
-
-const char *wsrep_thd_exec_mode_str(THD *thd)
-{
- return
- (!thd) ? "void" :
- (thd->wsrep_exec_mode == LOCAL_STATE) ? "local" :
- (thd->wsrep_exec_mode == REPL_RECV) ? "applier" :
- (thd->wsrep_exec_mode == TOTAL_ORDER) ? "total order" :
- (thd->wsrep_exec_mode == LOCAL_COMMIT) ? "local commit" : "void";
-}
-
-
-enum wsrep_query_state wsrep_thd_query_state(THD *thd)
-{
- return thd->wsrep_query_state;
+ uuid= node_uuid;
}
-
-const char *wsrep_thd_query_state_str(THD *thd)
+int wsrep_must_ignore_error(THD* thd)
{
- return
- (!thd) ? "void" :
- (thd->wsrep_query_state == QUERY_IDLE) ? "idle" :
- (thd->wsrep_query_state == QUERY_EXEC) ? "executing" :
- (thd->wsrep_query_state == QUERY_COMMITTING) ? "committing" :
- (thd->wsrep_query_state == QUERY_EXITING) ? "exiting" :
- (thd->wsrep_query_state == QUERY_ROLLINGBACK) ? "rolling back" : "void";
-}
+ const int error= thd->get_stmt_da()->sql_errno();
+ const uint flags= sql_command_flags[thd->lex->sql_command];
+ DBUG_ASSERT(error);
+ DBUG_ASSERT((wsrep_thd_is_toi(thd)) ||
+ (wsrep_thd_is_applying(thd) && thd->wsrep_apply_toi));
-enum wsrep_conflict_state wsrep_thd_get_conflict_state(THD *thd)
-{
- return thd->wsrep_conflict_state;
-}
-
-
-const char *wsrep_thd_conflict_state_str(THD *thd)
-{
- return
- (!thd) ? "void" :
- (thd->wsrep_conflict_state == NO_CONFLICT) ? "no conflict" :
- (thd->wsrep_conflict_state == MUST_ABORT) ? "must abort" :
- (thd->wsrep_conflict_state == ABORTING) ? "aborting" :
- (thd->wsrep_conflict_state == MUST_REPLAY) ? "must replay" :
- (thd->wsrep_conflict_state == REPLAYING) ? "replaying" :
- (thd->wsrep_conflict_state == RETRY_AUTOCOMMIT) ? "retrying" :
- (thd->wsrep_conflict_state == CERT_FAILURE) ? "cert failure" : "void";
-}
-
-
-wsrep_ws_handle_t* wsrep_thd_ws_handle(THD *thd)
-{
- return &thd->wsrep_ws_handle;
-}
-
-
-void wsrep_thd_LOCK(THD *thd)
-{
- mysql_mutex_lock(&thd->LOCK_thd_data);
-}
+ if ((wsrep_ignore_apply_errors & WSREP_IGNORE_ERRORS_ON_DDL))
+ goto ignore_error;
+ if ((flags & CF_WSREP_MAY_IGNORE_ERRORS) &&
+ (wsrep_ignore_apply_errors & WSREP_IGNORE_ERRORS_ON_RECONCILING_DDL))
+ {
+ switch (error)
+ {
+ case ER_DB_DROP_EXISTS:
+ case ER_BAD_TABLE_ERROR:
+ case ER_CANT_DROP_FIELD_OR_KEY:
+ goto ignore_error;
+ }
+ }
-void wsrep_thd_UNLOCK(THD *thd)
-{
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-}
-
-
-extern "C" time_t wsrep_thd_query_start(THD *thd)
-{
- return thd->query_start();
-}
-
-
-extern "C" uint32 wsrep_thd_wsrep_rand(THD *thd)
-{
- return thd->wsrep_rand;
-}
-
-longlong wsrep_thd_trx_seqno(THD *thd)
-{
- return (thd) ? thd->wsrep_trx_meta.gtid.seqno : WSREP_SEQNO_UNDEFINED;
-}
-
-
-extern "C" query_id_t wsrep_thd_query_id(THD *thd)
-{
- return thd->query_id;
-}
-
-
-char *wsrep_thd_query(THD *thd)
-{
- return (thd) ? thd->query() : NULL;
-}
-
+ return 0;
-extern "C" query_id_t wsrep_thd_wsrep_last_query_id(THD *thd)
-{
- return thd->wsrep_last_query_id;
+ignore_error:
+ WSREP_WARN("Ignoring error '%s' on query. "
+ "Default database: '%s'. Query: '%s', Error_code: %d",
+ thd->get_stmt_da()->message(),
+ print_slave_db_safe(thd->db.str),
+ thd->query(),
+ error);
+ return 1;
}
-
-extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id)
+int wsrep_ignored_error_code(Log_event* ev, int error)
{
- thd->wsrep_last_query_id= id;
-}
+ const THD* thd= ev->thd;
+ DBUG_ASSERT(error);
+ DBUG_ASSERT(wsrep_thd_is_applying(thd) &&
+ !wsrep_thd_is_local_toi(thd));
-extern "C" void wsrep_thd_awake(THD *thd, my_bool signal)
-{
- if (signal)
- {
- thd->awake(KILL_QUERY);
- }
- else
+ if ((wsrep_ignore_apply_errors & WSREP_IGNORE_ERRORS_ON_RECONCILING_DML))
{
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- mysql_cond_broadcast(&COND_wsrep_replaying);
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
+ const int ev_type= ev->get_type_code();
+ if ((ev_type == DELETE_ROWS_EVENT || ev_type == DELETE_ROWS_EVENT_V1)
+ && error == ER_KEY_NOT_FOUND)
+ goto ignore_error;
}
-}
-
-
-int wsrep_thd_retry_counter(THD *thd)
-{
- return(thd->wsrep_retry_counter);
-}
+ return 0;
-extern "C" bool wsrep_thd_ignore_table(THD *thd)
-{
- return thd->wsrep_ignore_table;
+ignore_error:
+ WSREP_WARN("Ignoring error '%s' on %s event. Error_code: %d",
+ thd->get_stmt_da()->message(),
+ ev->get_type_str(),
+ error);
+ return 1;
}
-
-extern int
-wsrep_trx_order_before(THD *thd1, THD *thd2)
+bool wsrep_provider_is_SR_capable()
{
- if (wsrep_thd_trx_seqno(thd1) < wsrep_thd_trx_seqno(thd2)) {
- WSREP_DEBUG("BF conflict, order: %lld %lld\n",
- (long long)wsrep_thd_trx_seqno(thd1),
- (long long)wsrep_thd_trx_seqno(thd2));
- return 1;
- }
- WSREP_DEBUG("waiting for BF, trx order: %lld %lld\n",
- (long long)wsrep_thd_trx_seqno(thd1),
- (long long)wsrep_thd_trx_seqno(thd2));
- return 0;
+ return Wsrep_server_state::has_capability(wsrep::provider::capability::streaming);
}
-
-int wsrep_trx_is_aborting(THD *thd_ptr)
+int wsrep_thd_retry_counter(const THD *thd)
{
- if (thd_ptr) {
- if ((((THD *)thd_ptr)->wsrep_conflict_state == MUST_ABORT) ||
- (((THD *)thd_ptr)->wsrep_conflict_state == ABORTING)) {
- return 1;
- }
- }
- return 0;
+ return thd->wsrep_retry_counter;
}
-
-void wsrep_copy_query(THD *thd)
+extern bool wsrep_thd_ignore_table(THD *thd)
{
- thd->wsrep_retry_command = thd->get_command();
- thd->wsrep_retry_query_len = thd->query_length();
- if (thd->wsrep_retry_query) {
- my_free(thd->wsrep_retry_query);
- }
- thd->wsrep_retry_query = (char *)my_malloc(
- thd->wsrep_retry_query_len + 1, MYF(0));
- strncpy(thd->wsrep_retry_query, thd->query(), thd->wsrep_retry_query_len);
- thd->wsrep_retry_query[thd->wsrep_retry_query_len] = '\0';
+ return thd->wsrep_ignore_table;
}
-
bool wsrep_is_show_query(enum enum_sql_command command)
{
DBUG_ASSERT(command >= 0 && command <= SQLCOM_END);
return (sql_command_flags[command] & CF_STATUS_COMMAND) != 0;
}
-
bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
TABLE_LIST* src_table,
HA_CREATE_INFO *create_info)
@@ -2747,14 +2519,14 @@ bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
}
return(false);
-
-WSREP_ERROR_LABEL:
+#ifdef WITH_WSREP
+wsrep_error_label:
thd->wsrep_TOI_pre_query= NULL;
return (true);
+#endif
}
-
-static int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len)
+int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len)
{
LEX *lex= thd->lex;
String stmt_query;
@@ -2809,88 +2581,164 @@ static int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len)
buf, buf_len);
}
-/***** callbacks for wsrep service ************/
-
-my_bool get_wsrep_debug()
+void* start_wsrep_THD(void *arg)
{
- return wsrep_debug;
-}
+ THD *thd;
-my_bool get_wsrep_load_data_splitting()
-{
- return wsrep_load_data_splitting;
-}
+ Wsrep_thd_args* thd_args= (Wsrep_thd_args*) arg;
-long get_wsrep_protocol_version()
-{
- return wsrep_protocol_version;
-}
+ if (my_thread_init() || (!(thd= new THD(next_thread_id(), true))))
+ {
+ goto error;
+ }
-my_bool get_wsrep_drupal_282555_workaround()
-{
- return wsrep_drupal_282555_workaround;
-}
+ statistic_increment(thread_created, &LOCK_status);
-my_bool get_wsrep_recovery()
-{
- return wsrep_recovery;
-}
+ if (wsrep_gtid_mode)
+ {
+ /* Adjust domain_id. */
+ thd->variables.gtid_domain_id= wsrep_gtid_domain_id;
+ }
-my_bool get_wsrep_log_conflicts()
-{
- return wsrep_log_conflicts;
-}
+ thd->real_id=pthread_self(); // Keep purify happy
-wsrep_t *get_wsrep()
-{
- return wsrep;
-}
+ my_net_init(&thd->net,(st_vio*) 0, thd, MYF(0));
-my_bool get_wsrep_certify_nonPK()
-{
- return wsrep_certify_nonPK;
-}
+ DBUG_PRINT("wsrep",(("creating thread %lld"), (long long)thd->thread_id));
+ thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer();
-void wsrep_lock_rollback()
-{
- mysql_mutex_lock(&LOCK_wsrep_rollback);
-}
+ server_threads.insert(thd);
-void wsrep_unlock_rollback()
-{
- mysql_cond_signal(&COND_wsrep_rollback);
- mysql_mutex_unlock(&LOCK_wsrep_rollback);
-}
+ /* from bootstrap()... */
+ thd->bootstrap=1;
+ thd->max_client_packet_length= thd->net.max_packet;
+ thd->security_ctx->master_access= ~(ulong)0;
-my_bool wsrep_aborting_thd_contains(THD *thd)
-{
- mysql_mutex_assert_owner(&LOCK_wsrep_rollback);
- wsrep_aborting_thd_t abortees = wsrep_aborting_thd;
- while (abortees)
+ /* from handle_one_connection... */
+ pthread_detach_this_thread();
+
+ mysql_thread_set_psi_id(thd->thread_id);
+ thd->thr_create_utime= microsecond_interval_timer();
+ if (MYSQL_CALLBACK_ELSE(thread_scheduler, init_new_connection_thread, (), 0))
{
- if (abortees->aborting_thd == thd)
- return true;
- abortees = abortees->next;
+ close_connection(thd, ER_OUT_OF_RESOURCES);
+ statistic_increment(aborted_connects,&LOCK_status);
+ MYSQL_CALLBACK(thread_scheduler, end_thread, (thd, 0));
+ goto error;
}
- return false;
+
+// </5.1.17>
+ /*
+ handle_one_connection() is normally the only way a thread would
+ start and would always be on the very high end of the stack ,
+ therefore, the thread stack always starts at the address of the
+ first local variable of handle_one_connection, which is thd. We
+ need to know the start of the stack so that we could check for
+ stack overruns.
+ */
+ DBUG_PRINT("wsrep", ("handle_one_connection called by thread %lld\n",
+ (long long)thd->thread_id));
+ /* now that we've called my_thread_init(), it is safe to call DBUG_* */
+
+ thd->thread_stack= (char*) &thd;
+ if (thd->store_globals())
+ {
+ close_connection(thd, ER_OUT_OF_RESOURCES);
+ statistic_increment(aborted_connects,&LOCK_status);
+ MYSQL_CALLBACK(thread_scheduler, end_thread, (thd, 0));
+ delete thd;
+ delete thd_args;
+ goto error;
+ }
+
+ thd->system_thread= SYSTEM_THREAD_SLAVE_SQL;
+ thd->security_ctx->skip_grants();
+
+ /* handle_one_connection() again... */
+ thd->proc_info= 0;
+ thd->set_command(COM_SLEEP);
+ thd->init_for_queries();
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
+ wsrep_running_threads++;
+ mysql_cond_broadcast(&COND_wsrep_slave_threads);
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
+
+ WSREP_DEBUG("wsrep system thread %llu, %p starting",
+ thd->thread_id, thd);
+ thd_args->fun()(thd, thd_args->args());
+
+ WSREP_DEBUG("wsrep system thread: %llu, %p closing",
+ thd->thread_id, thd);
+
+ /* Wsrep may reset globals during thread context switches, store globals
+ before cleanup. */
+ thd->store_globals();
+
+ close_connection(thd, 0);
+
+ delete thd_args;
+
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
+ wsrep_running_threads--;
+ WSREP_DEBUG("wsrep running threads now: %lu", wsrep_running_threads);
+ mysql_cond_broadcast(&COND_wsrep_slave_threads);
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
+ /*
+ Note: We can't call THD destructor without crashing
+ if plugins have not been initialized. However, in most of the
+ cases this means that pre SE initialization SST failed and
+ we are going to exit anyway.
+ */
+ if (plugins_are_initialized)
+ {
+ net_end(&thd->net);
+ MYSQL_CALLBACK(thread_scheduler, end_thread, (thd, 1));
+ }
+ else
+ {
+ /*
+ TODO: lightweight cleanup to get rid of:
+ 'Error in my_thread_global_end(): 2 threads didn't exit'
+ at server shutdown
+ */
+ }
+
+ server_threads.erase(thd);
+ delete thd;
+ my_thread_end();
+ return(NULL);
+
+error:
+ WSREP_ERROR("Failed to create/initialize system thread");
+
+ /* Abort if its the first applier/rollbacker thread. */
+ if (!mysqld_server_initialized)
+ unireg_abort(1);
+ else
+ return NULL;
}
-void wsrep_aborting_thd_enqueue(THD *thd)
+enum wsrep::streaming_context::fragment_unit wsrep_fragment_unit(ulong unit)
{
- mysql_mutex_assert_owner(&LOCK_wsrep_rollback);
- wsrep_aborting_thd_t aborting = (wsrep_aborting_thd_t)
- my_malloc(sizeof(struct wsrep_aborting_thd), MYF(0));
- aborting->aborting_thd = thd;
- aborting->next = wsrep_aborting_thd;
- wsrep_aborting_thd = aborting;
+ switch (unit)
+ {
+ case WSREP_FRAG_BYTES: return wsrep::streaming_context::bytes;
+ case WSREP_FRAG_ROWS: return wsrep::streaming_context::row;
+ case WSREP_FRAG_STATEMENTS: return wsrep::streaming_context::statement;
+ default:
+ DBUG_ASSERT(0);
+ return wsrep::streaming_context::bytes;
+ }
}
-bool wsrep_node_is_donor()
+/***** callbacks for wsrep service ************/
+
+my_bool get_wsrep_recovery()
{
- return (WSREP_ON) ? (wsrep_config_state->get_status() == 2) : false;
+ return wsrep_recovery;
}
-bool wsrep_node_is_synced()
+bool wsrep_consistency_check(THD *thd)
{
- return (WSREP_ON) ? (wsrep_config_state->get_status() == 4) : false;
+ return thd->wsrep_consistency_check == CONSISTENCY_CHECK_RUNNING;
}
diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h
index cca66922a24..f71d998ed4e 100644
--- a/sql/wsrep_mysqld.h
+++ b/sql/wsrep_mysqld.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2015 Codership Oy <http://www.codership.com>
+/* Copyright 2008-2017 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,25 +13,33 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
-#include <wsrep.h>
-
#ifndef WSREP_MYSQLD_H
#define WSREP_MYSQLD_H
-#include <mysql/plugin.h>
-#include <mysql/service_wsrep.h>
+#include <wsrep.h>
#ifdef WITH_WSREP
+#include <mysql/plugin.h>
+#include "mysql/service_wsrep.h"
+
+#include <my_global.h>
+#include <my_pthread.h>
+#include "log.h"
+#include "mysqld.h"
+
typedef struct st_mysql_show_var SHOW_VAR;
#include <sql_priv.h>
-//#include "rpl_gtid.h"
-#include "../wsrep/wsrep_api.h"
#include "mdl.h"
-#include "mysqld.h"
#include "sql_table.h"
#include "wsrep_mysqld_c.h"
+#include "wsrep/provider.hpp"
+#include "wsrep/streaming_context.hpp"
+#include "wsrep_api.h"
+#include <vector>
+#include "wsrep_server_state.h"
+
#define WSREP_UNDEFINED_TRX_ID ULONGLONG_MAX
class set_var;
@@ -43,20 +51,7 @@ enum wsrep_consistency_check_mode {
CONSISTENCY_CHECK_RUNNING,
};
-struct wsrep_thd_shadow {
- ulonglong options;
- uint server_status;
- enum wsrep_exec_mode wsrep_exec_mode;
- Vio *vio;
- ulong tx_isolation;
- const char *db;
- size_t db_length;
- my_hrtime_t user_time;
- longlong row_count_func;
-};
-
// Global wsrep parameters
-extern wsrep_t* wsrep;
// MySQL wsrep options
extern const char* wsrep_provider;
@@ -70,24 +65,33 @@ extern const char* wsrep_data_home_dir;
extern const char* wsrep_dbug_option;
extern long wsrep_slave_threads;
extern int wsrep_slave_count_change;
+extern ulong wsrep_debug;
extern my_bool wsrep_convert_LOCK_to_trx;
extern ulong wsrep_retry_autocommit;
extern my_bool wsrep_auto_increment_control;
+extern my_bool wsrep_drupal_282555_workaround;
extern my_bool wsrep_incremental_data_collection;
extern const char* wsrep_start_position;
extern ulong wsrep_max_ws_size;
extern ulong wsrep_max_ws_rows;
extern const char* wsrep_notify_cmd;
-extern long wsrep_max_protocol_version;
+extern my_bool wsrep_certify_nonPK;
+extern long int wsrep_protocol_version;
extern ulong wsrep_forced_binlog_format;
extern my_bool wsrep_desync;
extern ulong wsrep_reject_queries;
+extern my_bool wsrep_recovery;
extern my_bool wsrep_replicate_myisam;
+extern my_bool wsrep_log_conflicts;
extern ulong wsrep_mysql_replication_bundle;
+extern my_bool wsrep_load_data_splitting;
extern my_bool wsrep_restart_slave;
extern my_bool wsrep_restart_slave_activated;
extern my_bool wsrep_slave_FK_checks;
extern my_bool wsrep_slave_UK_checks;
+extern ulong wsrep_trx_fragment_unit;
+extern ulong wsrep_SR_store_type;
+extern uint wsrep_ignore_apply_errors;
extern ulong wsrep_running_threads;
extern bool wsrep_new_cluster;
extern bool wsrep_gtid_mode;
@@ -106,15 +110,34 @@ enum enum_wsrep_OSU_method {
};
enum enum_wsrep_sync_wait {
- WSREP_SYNC_WAIT_NONE = 0x0,
+ WSREP_SYNC_WAIT_NONE= 0x0,
// select, begin
- WSREP_SYNC_WAIT_BEFORE_READ = 0x1,
- WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE = 0x2,
- WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE = 0x4,
- WSREP_SYNC_WAIT_BEFORE_SHOW = 0x8,
- WSREP_SYNC_WAIT_MAX = 0xF
+ WSREP_SYNC_WAIT_BEFORE_READ= 0x1,
+ WSREP_SYNC_WAIT_BEFORE_UPDATE_DELETE= 0x2,
+ WSREP_SYNC_WAIT_BEFORE_INSERT_REPLACE= 0x4,
+ WSREP_SYNC_WAIT_BEFORE_SHOW= 0x8,
+ WSREP_SYNC_WAIT_MAX= 0xF
+};
+
+enum enum_wsrep_ignore_apply_error {
+ WSREP_IGNORE_ERRORS_NONE= 0x0,
+ WSREP_IGNORE_ERRORS_ON_RECONCILING_DDL= 0x1,
+ WSREP_IGNORE_ERRORS_ON_RECONCILING_DML= 0x2,
+ WSREP_IGNORE_ERRORS_ON_DDL= 0x4,
+ WSREP_IGNORE_ERRORS_MAX= 0x7
};
+// Streaming Replication
+#define WSREP_FRAG_BYTES 0
+#define WSREP_FRAG_ROWS 1
+#define WSREP_FRAG_STATEMENTS 2
+
+#define WSREP_SR_STORE_NONE 0
+#define WSREP_SR_STORE_TABLE 1
+
+extern const char *wsrep_fragment_units[];
+extern const char *wsrep_SR_store_types[];
+
// MySQL status variables
extern my_bool wsrep_connected;
extern my_bool wsrep_ready;
@@ -127,9 +150,18 @@ extern long long wsrep_local_bf_aborts;
extern const char* wsrep_provider_name;
extern const char* wsrep_provider_version;
extern const char* wsrep_provider_vendor;
+extern char* wsrep_provider_capabilities;
+extern char* wsrep_cluster_capabilities;
+
+int wsrep_show_status(THD *thd, SHOW_VAR *var, char *buff);
+int wsrep_show_ready(THD *thd, SHOW_VAR *var, char *buff);
+void wsrep_free_status(THD *thd);
+void wsrep_update_cluster_state_uuid(const char* str);
+
+/* Filters out --wsrep-new-cluster oprtion from argv[]
+ * should be called in the very beginning of main() */
+void wsrep_filter_new_cluster (int* argc, char* argv[]);
-int wsrep_show_status(THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope);
int wsrep_init();
void wsrep_deinit(bool free_options);
@@ -145,19 +177,17 @@ bool wsrep_before_SE(); // initialize wsrep before storage
* @param before wsrep_before_SE() value */
void wsrep_init_startup(bool before);
+/* Recover streaming transactions from fragment storage */
+void wsrep_recover_sr_from_storage(THD *);
+
// Other wsrep global variables
extern my_bool wsrep_inited; // whether wsrep is initialized ?
-
-extern "C" void wsrep_thd_set_exec_mode(THD *thd, enum wsrep_exec_mode mode);
-extern "C" void wsrep_thd_set_query_state(
- THD *thd, enum wsrep_query_state state);
-
-extern "C" void wsrep_thd_set_trx_to_replay(THD *thd, uint64 trx_id);
-
+extern "C" void wsrep_fire_rollbacker(THD *thd);
extern "C" uint32 wsrep_thd_wsrep_rand(THD *thd);
extern "C" time_t wsrep_thd_query_start(THD *thd);
-extern "C" query_id_t wsrep_thd_query_id(THD *thd);
+extern void wsrep_close_client_connections(my_bool wait_to_end,
+ THD *except_caller_thd= NULL);
extern "C" query_id_t wsrep_thd_wsrep_last_query_id(THD *thd);
extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id);
@@ -165,62 +195,88 @@ extern int wsrep_wait_committing_connections_close(int wait_time);
extern void wsrep_close_applier(THD *thd);
extern void wsrep_wait_appliers_close(THD *thd);
extern void wsrep_close_applier_threads(int count);
-extern void wsrep_kill_mysql(THD *thd);
+
/* new defines */
extern void wsrep_stop_replication(THD *thd);
extern bool wsrep_start_replication();
-extern bool wsrep_must_sync_wait(THD* thd, uint mask = WSREP_SYNC_WAIT_BEFORE_READ);
-extern bool wsrep_sync_wait(THD* thd, uint mask = WSREP_SYNC_WAIT_BEFORE_READ);
+extern void wsrep_shutdown_replication();
+extern bool wsrep_must_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ);
+extern bool wsrep_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ);
+extern enum wsrep::provider::status
+wsrep_sync_wait_upto (THD* thd, wsrep_gtid_t* upto, int timeout);
+extern void wsrep_last_committed_id (wsrep_gtid_t* gtid);
extern int wsrep_check_opts();
extern void wsrep_prepend_PATH (const char* path);
/* Other global variables */
extern wsrep_seqno_t wsrep_locked_seqno;
-
#define WSREP_ON \
- (global_system_variables.wsrep_on)
-
-#define WSREP_ON_NEW \
((global_system_variables.wsrep_on) && \
wsrep_provider && \
strcmp(wsrep_provider, WSREP_NONE))
-#define WSREP(thd) \
+/* use xxxxxx_NNULL macros when thd pointer is guaranteed to be non-null to
+ * avoid compiler warnings (GCC 6 and later) */
+#define WSREP_NNULL(thd) \
(WSREP_ON && thd->variables.wsrep_on)
+#define WSREP(thd) \
+ (thd && WSREP_NNULL(thd))
+
+#define WSREP_CLIENT_NNULL(thd) \
+ (WSREP_NNULL(thd) && thd->wsrep_client_thread)
+
#define WSREP_CLIENT(thd) \
(WSREP(thd) && thd->wsrep_client_thread)
+#define WSREP_EMULATE_BINLOG_NNULL(thd) \
+ (WSREP_NNULL(thd) && wsrep_emulate_bin_log)
+
#define WSREP_EMULATE_BINLOG(thd) \
(WSREP(thd) && wsrep_emulate_bin_log)
-#define WSREP_FORMAT(my_format) \
- ((wsrep_forced_binlog_format != BINLOG_FORMAT_UNSPEC) \
- ? wsrep_forced_binlog_format : (ulong)(my_format))
+#define WSREP_BINLOG_FORMAT(my_format) \
+ ((wsrep_forced_binlog_format != BINLOG_FORMAT_UNSPEC) ? \
+ wsrep_forced_binlog_format : my_format)
// prefix all messages with "WSREP"
-void wsrep_log(void (*fun)(const char *, ...), const char *format, ...);
-#define WSREP_LOG(fun, ...) wsrep_log(fun, ## __VA_ARGS__)
-#define WSREP_LOG_CONFLICT_THD(thd, role) \
- WSREP_LOG(sql_print_information, \
- "%s: \n " \
- " THD: %lu, mode: %s, state: %s, conflict: %s, seqno: %lld\n " \
- " SQL: %s", \
- role, thd_get_thread_id(thd), wsrep_thd_exec_mode_str(thd), \
- wsrep_thd_query_state_str(thd), \
- wsrep_thd_conflict_state_str(thd), (long long)wsrep_thd_trx_seqno(thd), \
- wsrep_thd_query(thd) \
- );
-
-#define WSREP_LOG_CONFLICT(bf_thd, victim_thd, bf_abort) \
- if (wsrep_debug || wsrep_log_conflicts) \
- { \
- WSREP_LOG(sql_print_information, "cluster conflict due to %s for threads:",\
- (bf_abort) ? "high priority abort" : "certification failure" \
- ); \
- if (bf_thd != NULL) WSREP_LOG_CONFLICT_THD(bf_thd, "Winning thread"); \
- if (victim_thd) WSREP_LOG_CONFLICT_THD(victim_thd, "Victim thread"); \
+#define WSREP_LOG(fun, ...) \
+ do { \
+ char msg[1024]= {'\0'}; \
+ snprintf(msg, sizeof(msg) - 1, ## __VA_ARGS__); \
+ fun("WSREP: %s", msg); \
+ } while(0)
+
+#define WSREP_DEBUG(...) \
+ if (wsrep_debug) WSREP_LOG(sql_print_information, ##__VA_ARGS__)
+#define WSREP_INFO(...) WSREP_LOG(sql_print_information, ##__VA_ARGS__)
+#define WSREP_WARN(...) WSREP_LOG(sql_print_warning, ##__VA_ARGS__)
+#define WSREP_ERROR(...) WSREP_LOG(sql_print_error, ##__VA_ARGS__)
+
+#define WSREP_LOG_CONFLICT_THD(thd, role) \
+ WSREP_LOG(sql_print_information, \
+ "%s: \n " \
+ " THD: %lu, mode: %s, state: %s, conflict: %s, seqno: %lld\n " \
+ " SQL: %s", \
+ role, \
+ thd_get_thread_id(thd), \
+ wsrep_thd_client_mode_str(thd), \
+ wsrep_thd_client_state_str(thd), \
+ wsrep_thd_transaction_state_str(thd), \
+ wsrep_thd_trx_seqno(thd), \
+ wsrep_thd_query(thd) \
+ );
+
+#define WSREP_LOG_CONFLICT(bf_thd, victim_thd, bf_abort) \
+ if (wsrep_debug || wsrep_log_conflicts) \
+ { \
+ WSREP_LOG(sql_print_information, "cluster conflict due to %s for threads:", \
+ (bf_abort) ? "high priority abort" : "certification failure" \
+ ); \
+ if (bf_thd) WSREP_LOG_CONFLICT_THD(bf_thd, "Winning thread"); \
+ if (victim_thd) WSREP_LOG_CONFLICT_THD(victim_thd, "Victim thread"); \
+ WSREP_LOG(sql_print_information, "context: %s:%d", __FILE__, __LINE__); \
}
#define WSREP_PROVIDER_EXISTS \
@@ -233,15 +289,6 @@ extern void wsrep_ready_wait();
class Ha_trx_info;
struct THD_TRANS;
-void wsrep_register_hton(THD* thd, bool all);
-void wsrep_brute_force_killer(THD *thd);
-int wsrep_hire_brute_force_killer(THD *thd, uint64_t trx_id);
-
-/* this is visible for client build so that innodb plugin gets this */
-typedef struct wsrep_aborting_thd {
- struct wsrep_aborting_thd *next;
- THD *aborting_thd;
-} *wsrep_aborting_thd_t;
extern mysql_mutex_t LOCK_wsrep_ready;
extern mysql_cond_t COND_wsrep_ready;
@@ -249,24 +296,28 @@ extern mysql_mutex_t LOCK_wsrep_sst;
extern mysql_cond_t COND_wsrep_sst;
extern mysql_mutex_t LOCK_wsrep_sst_init;
extern mysql_cond_t COND_wsrep_sst_init;
-extern mysql_mutex_t LOCK_wsrep_rollback;
-extern mysql_cond_t COND_wsrep_rollback;
extern int wsrep_replaying;
extern mysql_mutex_t LOCK_wsrep_replaying;
extern mysql_cond_t COND_wsrep_replaying;
extern mysql_mutex_t LOCK_wsrep_slave_threads;
+extern mysql_cond_t COND_wsrep_slave_threads;
+extern mysql_mutex_t LOCK_wsrep_cluster_config;
extern mysql_mutex_t LOCK_wsrep_desync;
+extern mysql_mutex_t LOCK_wsrep_SR_pool;
+extern mysql_mutex_t LOCK_wsrep_SR_store;
extern mysql_mutex_t LOCK_wsrep_config_state;
-extern wsrep_aborting_thd_t wsrep_aborting_thd;
+extern mysql_mutex_t LOCK_wsrep_group_commit;
extern my_bool wsrep_emulate_bin_log;
extern int wsrep_to_isolation;
#ifdef GTID_SUPPORT
extern rpl_sidno wsrep_sidno;
#endif /* GTID_SUPPORT */
extern my_bool wsrep_preordered_opt;
-extern handlerton *wsrep_hton;
#ifdef HAVE_PSI_INTERFACE
+
+extern PSI_cond_key key_COND_wsrep_thd;
+
extern PSI_mutex_key key_LOCK_wsrep_ready;
extern PSI_mutex_key key_COND_wsrep_ready;
extern PSI_mutex_key key_LOCK_wsrep_sst;
@@ -275,12 +326,17 @@ extern PSI_mutex_key key_LOCK_wsrep_sst_init;
extern PSI_cond_key key_COND_wsrep_sst_init;
extern PSI_mutex_key key_LOCK_wsrep_sst_thread;
extern PSI_cond_key key_COND_wsrep_sst_thread;
-extern PSI_mutex_key key_LOCK_wsrep_rollback;
-extern PSI_cond_key key_COND_wsrep_rollback;
extern PSI_mutex_key key_LOCK_wsrep_replaying;
extern PSI_cond_key key_COND_wsrep_replaying;
extern PSI_mutex_key key_LOCK_wsrep_slave_threads;
+extern PSI_cond_key key_COND_wsrep_slave_threads;
+extern PSI_mutex_key key_LOCK_wsrep_cluster_config;
extern PSI_mutex_key key_LOCK_wsrep_desync;
+extern PSI_mutex_key key_LOCK_wsrep_SR_pool;
+extern PSI_mutex_key key_LOCK_wsrep_SR_store;
+extern PSI_mutex_key key_LOCK_wsrep_global_seqno;
+extern PSI_mutex_key key_LOCK_wsrep_thd_queue;
+extern PSI_cond_key key_COND_wsrep_thd_queue;
extern PSI_file_key key_file_wsrep_gra_log;
#endif /* HAVE_PSI_INTERFACE */
@@ -288,42 +344,33 @@ struct TABLE_LIST;
class Alter_info;
int wsrep_to_isolation_begin(THD *thd, const char *db_, const char *table_,
const TABLE_LIST* table_list,
- Alter_info* alter_info = NULL);
+ Alter_info* alter_info= NULL);
+
void wsrep_to_isolation_end(THD *thd);
-void wsrep_cleanup_transaction(THD *thd);
+
+bool wsrep_append_SR_keys(THD *thd);
int wsrep_to_buf_helper(
THD* thd, const char *query, uint query_len, uchar** buf, size_t* buf_len);
+int wsrep_create_trigger_query(THD *thd, uchar** buf, size_t* buf_len);
int wsrep_create_event_query(THD *thd, uchar** buf, size_t* buf_len);
-extern bool
-wsrep_grant_mdl_exception(MDL_context *requestor_ctx,
- MDL_ticket *ticket,
- const MDL_key *key);
-IO_CACHE * get_trans_log(THD * thd);
-bool wsrep_trans_cache_is_empty(THD *thd);
-void thd_binlog_flush_pending_rows_event(THD *thd, bool stmt_end);
-void thd_binlog_rollback_stmt(THD * thd);
-void thd_binlog_trx_reset(THD * thd);
+bool wsrep_stmt_rollback_is_safe(THD* thd);
-typedef void (*wsrep_thd_processor_fun)(THD *);
-pthread_handler_t start_wsrep_THD(void *arg);
-int wsrep_wait_committing_connections_close(int wait_time);
-extern void wsrep_close_client_connections(my_bool wait_to_end,
- THD *except_caller_thd = NULL);
-void wsrep_close_applier(THD *thd);
-void wsrep_close_applier_threads(int count);
-void wsrep_wait_appliers_close(THD *thd);
-void wsrep_kill_mysql(THD *thd);
-void wsrep_close_threads(THD *thd);
-void wsrep_copy_query(THD *thd);
-bool wsrep_is_show_query(enum enum_sql_command command);
-void wsrep_replay_transaction(THD *thd);
-bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
- TABLE_LIST* src_table,
- HA_CREATE_INFO *create_info);
+void wsrep_init_sidno(const wsrep_uuid_t&);
bool wsrep_node_is_donor();
bool wsrep_node_is_synced();
+void wsrep_init_SR();
+void wsrep_verify_SE_checkpoint(const wsrep_uuid_t& uuid, wsrep_seqno_t seqno);
+int wsrep_replay_from_SR_store(THD*, const wsrep_trx_meta_t&);
+void wsrep_node_uuid(wsrep_uuid_t&);
+
+class Log_event;
+int wsrep_ignored_error_code(Log_event* ev, int error);
+int wsrep_must_ignore_error(THD* thd);
+
+bool wsrep_replicate_GTID(THD* thd);
+
typedef struct wsrep_key_arr
{
wsrep_key_t* keys;
@@ -336,38 +383,104 @@ bool wsrep_prepare_keys_for_isolation(THD* thd,
wsrep_key_arr_t* ka);
void wsrep_keys_free(wsrep_key_arr_t* key_arr);
-#define WSREP_BINLOG_FORMAT(my_format) \
- ((wsrep_forced_binlog_format != BINLOG_FORMAT_UNSPEC) ? \
- wsrep_forced_binlog_format : my_format)
+extern void
+wsrep_handle_mdl_conflict(MDL_context *requestor_ctx,
+ MDL_ticket *ticket,
+ const MDL_key *key);
+IO_CACHE * get_trans_log(THD * thd);
+bool wsrep_trans_cache_is_empty(THD *thd);
+void thd_binlog_flush_pending_rows_event(THD *thd, bool stmt_end);
+void thd_binlog_rollback_stmt(THD * thd);
+void thd_binlog_trx_reset(THD * thd);
+
+typedef void (*wsrep_thd_processor_fun)(THD*, void *);
+class Wsrep_thd_args
+{
+ public:
+ Wsrep_thd_args(wsrep_thd_processor_fun fun, void* args)
+ :
+ fun_ (fun),
+ args_(args)
+ { }
+
+ wsrep_thd_processor_fun fun() { return fun_; }
+
+ void* args() { return args_; }
-#else /* WITH_WSREP */
+ private:
+
+ Wsrep_thd_args(const Wsrep_thd_args&);
+ Wsrep_thd_args& operator=(const Wsrep_thd_args&);
+
+ wsrep_thd_processor_fun fun_;
+ void* args_;
+};
+
+void* start_wsrep_THD(void*);
+
+void wsrep_close_threads(THD *thd);
+bool wsrep_is_show_query(enum enum_sql_command command);
+void wsrep_replay_transaction(THD *thd);
+bool wsrep_create_like_table(THD* thd, TABLE_LIST* table,
+ TABLE_LIST* src_table,
+ HA_CREATE_INFO *create_info);
+bool wsrep_node_is_donor();
+bool wsrep_node_is_synced();
+
+/**
+ * Check if the wsrep provider (ie the Galera library) is capable of
+ * doing streaming replication.
+ * @return true if SR capable
+ */
+bool wsrep_provider_is_SR_capable();
+
+/**
+ * Initialize WSREP server instance.
+ *
+ * @return Zero on success, non-zero on error.
+ */
+int wsrep_init_server();
+
+/**
+ * Initialize WSREP globals. This should be done after server initialization
+ * is complete and the server has joined to the cluster.
+ *
+ */
+void wsrep_init_globals();
+
+/**
+ * Deinit and release WSREP resources.
+ */
+void wsrep_deinit_server();
+
+/**
+ * Convert streaming fragment unit (WSREP_FRAG_BYTES, WSREP_FRAG_ROWS...)
+ * to corresponding wsrep-lib fragment_unit
+ */
+enum wsrep::streaming_context::fragment_unit wsrep_fragment_unit(ulong unit);
+
+#else /* !WITH_WSREP */
+
+/* These macros are needed to compile MariaDB without WSREP support
+ * (e.g. embedded) */
#define WSREP(T) (0)
+#define WSREP_NNULL(T) (0)
#define WSREP_ON (0)
#define WSREP_EMULATE_BINLOG(thd) (0)
-#define WSREP_CLIENT(thd) (0)
-#define WSREP_FORMAT(my_format) ((ulong)my_format)
+#define WSREP_EMULATE_BINLOG_NNULL(thd) (0)
+#define WSREP_BINLOG_FORMAT(my_format) ((ulong)my_format)
#define WSREP_PROVIDER_EXISTS (0)
#define wsrep_emulate_bin_log (0)
#define wsrep_to_isolation (0)
-#define wsrep_init() (1)
-#define wsrep_prepend_PATH(X)
#define wsrep_before_SE() (0)
#define wsrep_init_startup(X)
-#define wsrep_must_sync_wait(...) (0)
-#define wsrep_sync_wait(...) (0)
-#define wsrep_to_isolation_begin(...) (0)
-#define wsrep_register_hton(...) do { } while(0)
#define wsrep_check_opts() (0)
-#define wsrep_stop_replication(X) do { } while(0)
-#define wsrep_inited (0)
-#define wsrep_deinit(X) do { } while(0)
-#define wsrep_recover() do { } while(0)
-#define wsrep_slave_threads (1)
-#define wsrep_replicate_myisam (0)
#define wsrep_thr_init() do {} while(0)
#define wsrep_thr_deinit() do {} while(0)
-#define wsrep_running_threads (0)
-#define WSREP_BINLOG_FORMAT(my_format) my_format
+#define wsrep_init_globals() do {} while(0)
+#define wsrep_create_appliers(X) do {} while(0)
+
#endif /* WITH_WSREP */
+
#endif /* WSREP_MYSQLD_H */
diff --git a/sql/wsrep_notify.cc b/sql/wsrep_notify.cc
index 92bcc8eda43..ef9dd872075 100644
--- a/sql/wsrep_notify.cc
+++ b/sql/wsrep_notify.cc
@@ -18,22 +18,8 @@
#include "wsrep_priv.h"
#include "wsrep_utils.h"
-
-static const char* _status_str(wsrep_member_status_t status)
-{
- switch (status)
- {
- case WSREP_MEMBER_UNDEFINED: return "Undefined";
- case WSREP_MEMBER_JOINER: return "Joiner";
- case WSREP_MEMBER_DONOR: return "Donor";
- case WSREP_MEMBER_JOINED: return "Joined";
- case WSREP_MEMBER_SYNCED: return "Synced";
- default: return "Error(?)";
- }
-}
-
-void wsrep_notify_status (wsrep_member_status_t status,
- const wsrep_view_info_t* view)
+void wsrep_notify_status(enum wsrep::server_state::state status,
+ const wsrep::view* view)
{
if (!wsrep_notify_cmd || 0 == strlen(wsrep_notify_cmd))
{
@@ -42,51 +28,44 @@ void wsrep_notify_status (wsrep_member_status_t status,
}
char cmd_buf[1 << 16]; // this can be long
- long cmd_len = sizeof(cmd_buf) - 1;
- char* cmd_ptr = cmd_buf;
- long cmd_off = 0;
+ long cmd_len= sizeof(cmd_buf) - 1;
+ char* cmd_ptr= cmd_buf;
+ long cmd_off= 0;
cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off, "%s",
wsrep_notify_cmd);
- if (status >= WSREP_MEMBER_UNDEFINED && status < WSREP_MEMBER_ERROR)
- {
- cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off, " --status %s",
- _status_str(status));
- }
- else
- {
- /* here we preserve provider error codes */
- cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off,
- " --status 'Error(%d)'", status);
- }
+ cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off, " --status %s",
+ to_c_string(status));
- if (0 != view)
+ if (view != NULL)
{
- char uuid_str[40];
-
- wsrep_uuid_print (&view->state_id.uuid, uuid_str, sizeof(uuid_str));
+ std::ostringstream uuid;
+ uuid << view->state_id().id();
cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off,
- " --uuid %s", uuid_str);
+ " --uuid %s", uuid.str().c_str());
cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off,
- " --primary %s", view->view >= 0 ? "yes" : "no");
+ " --primary %s", view->view_seqno().get() >= 0 ? "yes" : "no");
cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off,
- " --index %d", view->my_idx);
+ " --index %zd", view->own_index());
- if (view->memb_num)
+ const std::vector<wsrep::view::member>& members(view->members());
+ if (members.size())
{
- cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off, " --members");
-
- for (int i = 0; i < view->memb_num; i++)
- {
- wsrep_uuid_print (&view->members[i].id, uuid_str, sizeof(uuid_str));
- cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off,
- "%c%s/%s/%s", i > 0 ? ',' : ' ',
- uuid_str, view->members[i].name,
- view->members[i].incoming);
- }
+ cmd_off += snprintf (cmd_ptr + cmd_off, cmd_len - cmd_off, " --members");
+
+ for (unsigned int i= 0; i < members.size(); i++)
+ {
+ std::ostringstream id;
+ id << members[i].id();
+ cmd_off += snprintf(cmd_ptr + cmd_off, cmd_len - cmd_off,
+ "%c%s/%s/%s", i > 0 ? ',' : ' ',
+ id.str().c_str(),
+ members[i].name().c_str(),
+ members[i].incoming().c_str());
+ }
}
}
@@ -100,7 +79,7 @@ void wsrep_notify_status (wsrep_member_status_t status,
wsp::process p(cmd_ptr, "r", NULL);
p.wait();
- int err = p.error();
+ int err= p.error();
if (err)
{
diff --git a/sql/wsrep_plugin.cc b/sql/wsrep_plugin.cc
new file mode 100644
index 00000000000..743b8a593b8
--- /dev/null
+++ b/sql/wsrep_plugin.cc
@@ -0,0 +1,53 @@
+/* Copyright 2016 Codership Oy <http://www.codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "wsrep_trans_observer.h"
+#include "wsrep_mysqld.h"
+
+#include <mysql/plugin.h>
+
+static int wsrep_plugin_init(void *p)
+{
+ WSREP_DEBUG("wsrep_plugin_init()");
+ return 0;
+}
+
+static int wsrep_plugin_deinit(void *p)
+{
+ WSREP_DEBUG("wsrep_plugin_deinit()");
+ return 0;
+}
+
+struct Mysql_replication wsrep_plugin= {
+ MYSQL_REPLICATION_INTERFACE_VERSION
+};
+
+maria_declare_plugin(wsrep)
+{
+ MYSQL_REPLICATION_PLUGIN,
+ &wsrep_plugin,
+ "wsrep",
+ "Codership Oy",
+ "Wsrep replication plugin",
+ PLUGIN_LICENSE_GPL,
+ wsrep_plugin_init,
+ wsrep_plugin_deinit,
+ 0x0100,
+ NULL, /* Status variables */
+ NULL, /* System variables */
+ "1.0", /* Version (string) */
+ MariaDB_PLUGIN_MATURITY_STABLE /* Maturity */
+}
+maria_declare_plugin_end;
diff --git a/sql/wsrep_priv.h b/sql/wsrep_priv.h
index 222a49cc2ab..68773d27948 100644
--- a/sql/wsrep_priv.h
+++ b/sql/wsrep_priv.h
@@ -19,8 +19,9 @@
#ifndef WSREP_PRIV_H
#define WSREP_PRIV_H
+#include <my_global.h>
#include "wsrep_mysqld.h"
-#include "../wsrep/wsrep_api.h"
+#include "wsrep_schema.h"
#include <log.h>
#include <pthread.h>
@@ -31,25 +32,20 @@ my_bool wsrep_ready_set (my_bool x);
ssize_t wsrep_sst_prepare (void** msg);
wsrep_cb_status wsrep_sst_donate_cb (void* app_ctx,
void* recv_ctx,
- const void* msg, size_t msg_len,
+ const wsrep_buf_t* msg,
const wsrep_gtid_t* state_id,
- const char* state, size_t state_len,
+ const wsrep_buf_t* state,
bool bypass);
extern wsrep_uuid_t local_uuid;
extern wsrep_seqno_t local_seqno;
+extern Wsrep_schema* wsrep_schema;
// a helper function
-bool wsrep_sst_received (wsrep_t* const wsrep,
- const wsrep_uuid_t& uuid,
- const wsrep_seqno_t seqno,
- const void* const state,
- const size_t state_len,
- const bool implicit);
-/*! SST thread signals init thread about sst completion */
-void wsrep_sst_complete(const wsrep_uuid_t*, wsrep_seqno_t, bool);
-
-void wsrep_notify_status (wsrep_member_status_t new_status,
- const wsrep_view_info_t* view = 0);
+void wsrep_sst_received(THD*, const wsrep_uuid_t&, wsrep_seqno_t,
+ const void*, size_t);
+
+void wsrep_notify_status(enum wsrep::server_state::state status,
+ const wsrep::view* view= 0);
#endif /* WSREP_PRIV_H */
diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc
new file mode 100644
index 00000000000..82c085a61d2
--- /dev/null
+++ b/sql/wsrep_schema.cc
@@ -0,0 +1,1366 @@
+/* Copyright (C) 2015-2019 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
+
+#include "mariadb.h"
+
+#include "table.h"
+#include "key.h"
+#include "sql_base.h"
+#include "sql_parse.h"
+#include "sql_update.h"
+#include "transaction.h"
+
+#include "mysql/service_wsrep.h"
+#include "wsrep_schema.h"
+#include "wsrep_applier.h"
+#include "wsrep_xid.h"
+#include "wsrep_binlog.h"
+#include "wsrep_high_priority_service.h"
+#include "wsrep_storage_service.h"
+
+#include <string>
+#include <sstream>
+
+#define WSREP_SCHEMA "mysql"
+#define WSREP_STREAMING_TABLE "wsrep_streaming_log"
+#define WSREP_CLUSTER_TABLE "wsrep_cluster"
+#define WSREP_MEMBERS_TABLE "wsrep_cluster_members"
+
+const char* wsrep_sr_table_name_full= WSREP_SCHEMA "/" WSREP_STREAMING_TABLE;
+
+static const std::string wsrep_schema_str= WSREP_SCHEMA;
+static const std::string sr_table_str= WSREP_STREAMING_TABLE;
+static const std::string cluster_table_str= WSREP_CLUSTER_TABLE;
+static const std::string members_table_str= WSREP_MEMBERS_TABLE;
+
+static const std::string create_cluster_table_str=
+ "CREATE TABLE IF NOT EXISTS " + wsrep_schema_str + "." + cluster_table_str +
+ "("
+ "cluster_uuid CHAR(36) PRIMARY KEY,"
+ "view_id BIGINT NOT NULL,"
+ "view_seqno BIGINT NOT NULL,"
+ "protocol_version INT NOT NULL,"
+ "capabilities INT NOT NULL"
+ ") ENGINE=InnoDB";
+
+static const std::string create_members_table_str=
+ "CREATE TABLE IF NOT EXISTS " + wsrep_schema_str + "." + members_table_str +
+ "("
+ "node_uuid CHAR(36) PRIMARY KEY,"
+ "cluster_uuid CHAR(36) NOT NULL,"
+ "node_name CHAR(32) NOT NULL,"
+ "node_incoming_address VARCHAR(256) NOT NULL"
+ ") ENGINE=InnoDB";
+
+#ifdef WSREP_SCHEMA_MEMBERS_HISTORY
+static const std::string cluster_member_history_table_str= "wsrep_cluster_member_history";
+static const std::string create_members_history_table_str=
+ "CREATE TABLE IF NOT EXISTS " + wsrep_schema_str + "." + cluster_member_history_table_str +
+ "("
+ "node_uuid CHAR(36) PRIMARY KEY,"
+ "cluster_uuid CHAR(36) NOT NULL,"
+ "last_view_id BIGINT NOT NULL,"
+ "last_view_seqno BIGINT NOT NULL,"
+ "node_name CHAR(32) NOT NULL,"
+ "node_incoming_address VARCHAR(256) NOT NULL"
+ ") ENGINE=InnoDB";
+#endif /* WSREP_SCHEMA_MEMBERS_HISTORY */
+
+static const std::string create_frag_table_str=
+ "CREATE TABLE IF NOT EXISTS " + wsrep_schema_str + "." + sr_table_str +
+ "("
+ "node_uuid CHAR(36), "
+ "trx_id BIGINT, "
+ "seqno BIGINT, "
+ "flags INT NOT NULL, "
+ "frag LONGBLOB NOT NULL, "
+ "PRIMARY KEY (node_uuid, trx_id, seqno)"
+ ") ENGINE=InnoDB";
+
+static const std::string delete_from_cluster_table=
+ "DELETE FROM " + wsrep_schema_str + "." + cluster_table_str;
+
+static const std::string delete_from_members_table=
+ "DELETE FROM " + wsrep_schema_str + "." + members_table_str;
+
+namespace Wsrep_schema_impl
+{
+
+class binlog_off
+{
+public:
+ binlog_off(THD* thd)
+ : m_thd(thd)
+ , m_option_bits(thd->variables.option_bits)
+ , m_sql_log_bin(thd->variables.sql_log_bin)
+ {
+ thd->variables.option_bits&= ~OPTION_BIN_LOG;
+ thd->variables.sql_log_bin= 0;
+ }
+ ~binlog_off()
+ {
+ m_thd->variables.option_bits= m_option_bits;
+ m_thd->variables.sql_log_bin= m_sql_log_bin;
+ }
+private:
+ THD* m_thd;
+ ulonglong m_option_bits;
+ my_bool m_sql_log_bin;
+};
+
+class wsrep_off
+{
+public:
+ wsrep_off(THD* thd)
+ : m_thd(thd)
+ , m_wsrep_on(thd->variables.wsrep_on)
+ {
+ thd->variables.wsrep_on= 0;
+ }
+ ~wsrep_off()
+ {
+ m_thd->variables.wsrep_on= m_wsrep_on;
+ }
+private:
+ THD* m_thd;
+ my_bool m_wsrep_on;
+};
+
+class thd_context_switch
+{
+public:
+ thd_context_switch(THD *orig_thd, THD *cur_thd)
+ : m_orig_thd(orig_thd)
+ , m_cur_thd(cur_thd)
+ {
+ m_orig_thd->reset_globals();
+ m_cur_thd->store_globals();
+ }
+ ~thd_context_switch()
+ {
+ m_cur_thd->reset_globals();
+ m_orig_thd->store_globals();
+ }
+private:
+ THD *m_orig_thd;
+ THD *m_cur_thd;
+};
+
+static int execute_SQL(THD* thd, const char* sql, uint length) {
+ DBUG_ENTER("Wsrep_schema::execute_SQL()");
+ int err= 0;
+
+ PSI_statement_locker *parent_locker= thd->m_statement_psi;
+ Parser_state parser_state;
+
+ WSREP_DEBUG("SQL: %d %s thd: %lld", length, sql, (long long)thd->thread_id);
+
+ if (parser_state.init(thd, (char*)sql, length) == 0) {
+ thd->reset_for_next_command();
+ lex_start(thd);
+
+ thd->m_statement_psi= NULL;
+
+ thd->set_query((char*)sql, length);
+ thd->set_query_id(next_query_id());
+
+ mysql_parse(thd, (char*)sql, length, & parser_state, FALSE, FALSE);
+
+ if (thd->is_error()) {
+ WSREP_WARN("Wsrep_schema::execute_sql() failed, %d %s\nSQL: %s",
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message(),
+ sql);
+ err= 1;
+ }
+ thd->m_statement_psi= parent_locker;
+ thd->end_statement();
+ thd->reset_query();
+ close_thread_tables(thd);
+ delete_explain_query(thd->lex);
+ }
+ else {
+ WSREP_WARN("SR init failure");
+ }
+ thd->cleanup_after_query();
+ DBUG_RETURN(err);
+}
+
+/*
+ Initialize thd for next "statement"
+ */
+static void init_stmt(THD* thd) {
+ thd->reset_for_next_command();
+}
+
+static void finish_stmt(THD* thd) {
+ trans_commit_stmt(thd);
+ close_thread_tables(thd);
+}
+
+static int open_table(THD* thd,
+ const LEX_CSTRING *schema_name,
+ const LEX_CSTRING *table_name,
+ enum thr_lock_type const lock_type,
+ TABLE** table) {
+ assert(table);
+ *table= NULL;
+
+ DBUG_ENTER("Wsrep_schema::open_table()");
+
+ TABLE_LIST tables;
+ uint flags= (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
+ MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY |
+ MYSQL_OPEN_IGNORE_FLUSH |
+ MYSQL_LOCK_IGNORE_TIMEOUT);
+
+ tables.init_one_table(schema_name,
+ table_name,
+ NULL, lock_type);
+
+ if (!open_n_lock_single_table(thd, &tables, tables.lock_type, flags)) {
+ close_thread_tables(thd);
+ my_error(ER_NO_SUCH_TABLE, MYF(0), schema_name->str, table_name->str);
+ DBUG_RETURN(1);
+ }
+
+ *table= tables.table;
+ (*table)->use_all_columns();
+
+ DBUG_RETURN(0);
+}
+
+
+static int open_for_write(THD* thd, const char* table_name, TABLE** table) {
+ LEX_CSTRING schema_str= { wsrep_schema_str.c_str(), wsrep_schema_str.length() };
+ LEX_CSTRING table_str= { table_name, strlen(table_name) };
+ if (Wsrep_schema_impl::open_table(thd, &schema_str, &table_str, TL_WRITE,
+ table)) {
+ WSREP_ERROR("Failed to open table %s.%s for writing",
+ schema_str.str, table_name);
+ return 1;
+ }
+ empty_record(*table);
+ (*table)->use_all_columns();
+ restore_record(*table, s->default_values);
+ return 0;
+}
+
+static void store(TABLE* table, uint field, const Wsrep_id& id) {
+ assert(field < table->s->fields);
+ std::ostringstream os;
+ os << id;
+ table->field[field]->store(os.str().c_str(),
+ os.str().size(),
+ &my_charset_bin);
+}
+
+
+template <typename INTTYPE>
+static void store(TABLE* table, uint field, const INTTYPE val) {
+ assert(field < table->s->fields);
+ table->field[field]->store(val);
+}
+
+template <typename CHARTYPE>
+static void store(TABLE* table, uint field, const CHARTYPE* str, size_t str_len) {
+ assert(field < table->s->fields);
+ table->field[field]->store((const char*)str,
+ str_len,
+ &my_charset_bin);
+}
+
+static void store(TABLE* table, uint field, const std::string& str)
+{
+ store(table, field, str.c_str(), str.size());
+}
+
+static int update_or_insert(TABLE* table) {
+ DBUG_ENTER("Wsrep_schema::update_or_insert()");
+ int ret= 0;
+ char* key;
+ int error;
+
+ /*
+ Verify that the table has primary key defined.
+ */
+ if (table->s->primary_key >= MAX_KEY ||
+ !table->s->keys_in_use.is_set(table->s->primary_key)) {
+ WSREP_ERROR("No primary key for %s.%s",
+ table->s->db.str, table->s->table_name.str);
+ DBUG_RETURN(1);
+ }
+
+ /*
+ Find the record and update or insert a new one if not found.
+ */
+ if (!(key= (char*) my_safe_alloca(table->s->max_unique_length))) {
+ WSREP_ERROR("Error allocating %ud bytes for key",
+ table->s->max_unique_length);
+ DBUG_RETURN(1);
+ }
+
+ key_copy((uchar*) key, table->record[0],
+ table->key_info + table->s->primary_key, 0);
+
+ if ((error= table->file->ha_index_read_idx_map(table->record[1],
+ table->s->primary_key,
+ (uchar*) key,
+ HA_WHOLE_KEY,
+ HA_READ_KEY_EXACT))) {
+ /*
+ Row not found, insert a new one.
+ */
+ if ((error= table->file->ha_write_row(table->record[0]))) {
+ WSREP_ERROR("Error writing into %s.%s: %d",
+ table->s->db.str,
+ table->s->table_name.str,
+ error);
+ ret= 1;
+ }
+ }
+ else if (!records_are_comparable(table) || compare_record(table)) {
+ /*
+ Record has changed
+ */
+ if ((error= table->file->ha_update_row(table->record[1],
+ table->record[0])) &&
+ error != HA_ERR_RECORD_IS_THE_SAME) {
+ WSREP_ERROR("Error updating record in %s.%s: %d",
+ table->s->db.str,
+ table->s->table_name.str,
+ error);
+ ret= 1;
+ }
+ }
+
+ my_safe_afree(key, table->s->max_unique_length);
+
+ DBUG_RETURN(ret);
+}
+
+static int insert(TABLE* table) {
+ DBUG_ENTER("Wsrep_schema::insert()");
+ int ret= 0;
+ int error;
+
+ /*
+ Verify that the table has primary key defined.
+ */
+ if (table->s->primary_key >= MAX_KEY ||
+ !table->s->keys_in_use.is_set(table->s->primary_key)) {
+ WSREP_ERROR("No primary key for %s.%s",
+ table->s->db.str, table->s->table_name.str);
+ DBUG_RETURN(1);
+ }
+
+ if ((error= table->file->ha_write_row(table->record[0]))) {
+ WSREP_ERROR("Error writing into %s.%s: %d",
+ table->s->db.str,
+ table->s->table_name.str,
+ error);
+ ret= 1;
+ }
+
+ DBUG_RETURN(ret);
+}
+
+static int delete_row(TABLE* table) {
+ int error;
+ int retry= 3;
+
+ do {
+ error= table->file->ha_delete_row(table->record[0]);
+ retry--;
+ } while (error && retry);
+
+ if (error) {
+ WSREP_ERROR("Error deleting row from %s.%s: %d",
+ table->s->db.str,
+ table->s->table_name.str,
+ error);
+ return 1;
+ }
+ return 0;
+}
+
+static int open_for_read(THD* thd, const char* table_name, TABLE** table) {
+
+ LEX_CSTRING schema_str= { wsrep_schema_str.c_str(), wsrep_schema_str.length() };
+ LEX_CSTRING table_str= { table_name, strlen(table_name) };
+ if (Wsrep_schema_impl::open_table(thd, &schema_str, &table_str, TL_READ,
+ table)) {
+ WSREP_ERROR("Failed to open table %s.%s for reading",
+ schema_str.str, table_name);
+ return 1;
+ }
+ empty_record(*table);
+ (*table)->use_all_columns();
+ restore_record(*table, s->default_values);
+ return 0;
+}
+
+/*
+ Init table for sequential scan.
+
+ @return 0 in case of success, 1 in case of error.
+ */
+static int init_for_scan(TABLE* table) {
+ int error;
+ if ((error= table->file->ha_rnd_init(TRUE))) {
+ WSREP_ERROR("Failed to init table for scan: %d", error);
+ return 1;
+ }
+ return 0;
+}
+/*
+ Scan next record. For return codes see handler::ha_rnd_next()
+
+ @return 0 in case of success, error code in case of error
+ */
+static int next_record(TABLE* table) {
+ int error;
+ if ((error= table->file->ha_rnd_next(table->record[0])) &&
+ error != HA_ERR_END_OF_FILE) {
+ WSREP_ERROR("Failed to read next record: %d", error);
+ }
+ return error;
+}
+
+/*
+ End scan.
+
+ @return 0 in case of success, 1 in case of error.
+ */
+static int end_scan(TABLE* table) {
+ int error;
+ if ((error= table->file->ha_rnd_end())) {
+ WSREP_ERROR("Failed to end scan: %d", error);
+ return 1;
+ }
+ return 0;
+}
+
+static int scan(TABLE* table, uint field, wsrep::id& id)
+{
+ assert(field < table->s->fields);
+ String uuid_str;
+ (void)table->field[field]->val_str(&uuid_str);
+ id= wsrep::id(std::string(uuid_str.c_ptr(), uuid_str.length()));
+ return 0;
+}
+
+template <typename INTTYPE>
+static int scan(TABLE* table, uint field, INTTYPE& val)
+{
+ assert(field < table->s->fields);
+ val= table->field[field]->val_int();
+ return 0;
+}
+
+static int scan(TABLE* table, uint field, char* strbuf, uint strbuf_len)
+{
+ String str;
+ (void)table->field[field]->val_str(&str);
+ strncpy(strbuf, str.c_ptr(), std::min(str.length(), strbuf_len));
+ strbuf[strbuf_len - 1]= '\0';
+ return 0;
+}
+
+/*
+ Scan member
+ TODO: filter members by cluster UUID
+ */
+static int scan_member(TABLE* table,
+ const Wsrep_id& cluster_uuid,
+ std::vector<Wsrep_view::member>& members)
+{
+ Wsrep_id member_id;
+ char member_name[128]= { 0, };
+ char member_incoming[128]= { 0, };
+
+ if (scan(table, 0, member_id) ||
+ scan(table, 2, member_name, sizeof(member_name)) ||
+ scan(table, 3, member_incoming, sizeof(member_incoming))) {
+ return 1;
+ }
+
+ if (members.empty() == false) {
+ assert(members.rbegin()->id() < member_id);
+ }
+
+ try {
+ members.push_back(Wsrep_view::member(member_id,
+ member_name,
+ member_incoming));
+ }
+ catch (...) {
+ WSREP_ERROR("Caught exception while scanning members table");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ Init table for index scan and retrieve first record
+
+ @return 0 in case of success, error code in case of error.
+ */
+static int init_for_index_scan(TABLE* table, const uchar* key,
+ key_part_map map) {
+ int error;
+ if ((error= table->file->ha_index_init(table->s->primary_key, true))) {
+ WSREP_ERROR("Failed to init table for index scan: %d", error);
+ return error;
+ }
+
+ error= table->file->ha_index_read_map(table->record[0],
+ key, map, HA_READ_KEY_EXACT);
+ switch(error) {
+ case 0:
+ case HA_ERR_END_OF_FILE:
+ case HA_ERR_KEY_NOT_FOUND:
+ case HA_ERR_ABORTED_BY_USER:
+ break;
+ case -1:
+ WSREP_DEBUG("init_for_index_scan interrupted");
+ break;
+ default:
+ WSREP_ERROR("init_for_index_scan failed to read first record, error %d", error);
+ }
+ return error;
+}
+
+/*
+ End index scan.
+
+ @return 0 in case of success, 1 in case of error.
+ */
+static int end_index_scan(TABLE* table) {
+ int error;
+ if ((error= table->file->ha_index_end())) {
+ WSREP_ERROR("Failed to end scan: %d", error);
+ return 1;
+ }
+ return 0;
+}
+
+static void make_key(TABLE* table, uchar* key, key_part_map* map, int parts) {
+ uint prefix_length= 0;
+ KEY_PART_INFO* key_part= table->key_info->key_part;
+ for (int i=0; i < parts; i++)
+ prefix_length += key_part[i].store_length;
+ *map= make_prev_keypart_map(parts);
+ key_copy(key, table->record[0], table->key_info, prefix_length);
+}
+} /* namespace Wsrep_schema_impl */
+
+
+Wsrep_schema::Wsrep_schema()
+{
+}
+
+Wsrep_schema::~Wsrep_schema()
+{ }
+
+static void wsrep_init_thd_for_schema(THD *thd)
+{
+ thd->security_ctx->skip_grants();
+ thd->system_thread= SYSTEM_THREAD_GENERIC;
+
+ thd->real_id=pthread_self(); // Keep purify happy
+
+ thd->prior_thr_create_utime= thd->start_utime= thd->thr_create_utime;
+
+ /* */
+ thd->variables.wsrep_on = 0;
+ /* No binlogging */
+ thd->variables.sql_log_bin = 0;
+ thd->variables.option_bits &= ~OPTION_BIN_LOG;
+ /* No general log */
+ thd->variables.option_bits |= OPTION_LOG_OFF;
+ /* Read committed isolation to avoid gap locking */
+ thd->variables.tx_isolation= ISO_READ_COMMITTED;
+ thd->store_globals();
+}
+
+int Wsrep_schema::init()
+{
+ DBUG_ENTER("Wsrep_schema::init()");
+ int ret;
+ THD* thd= new THD(next_thread_id());
+ if (!thd) {
+ WSREP_ERROR("Unable to get thd");
+ DBUG_RETURN(1);
+ }
+ thd->thread_stack= (char*)&thd;
+ wsrep_init_thd_for_schema(thd);
+
+ if (Wsrep_schema_impl::execute_SQL(thd, create_cluster_table_str.c_str(),
+ create_cluster_table_str.size()) ||
+ Wsrep_schema_impl::execute_SQL(thd, create_members_table_str.c_str(),
+ create_members_table_str.size()) ||
+#ifdef WSREP_SCHEMA_MEMBERS_HISTORY
+ Wsrep_schema_impl::execute_SQL(thd,
+ create_members_history_table_str.c_str(),
+ create_members_history_table_str.size()) ||
+#endif /* WSREP_SCHEMA_MEMBERS_HISTORY */
+ Wsrep_schema_impl::execute_SQL(thd,
+ create_frag_table_str.c_str(),
+ create_frag_table_str.size())) {
+ ret= 1;
+ }
+ else {
+ ret= 0;
+ }
+
+ delete thd;
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_schema::store_view(THD* thd, const Wsrep_view& view)
+{
+ DBUG_ENTER("Wsrep_schema::store_view()");
+ assert(view.status() == Wsrep_view::primary);
+ int ret= 1;
+ int error;
+ TABLE* cluster_table= 0;
+ TABLE* members_table= 0;
+#ifdef WSREP_SCHEMA_MEMBERS_HISTORY
+ TABLE* members_history_table= 0;
+#endif /* WSREP_SCHEMA_MEMBERS_HISTORY */
+
+ Wsrep_schema_impl::wsrep_off wsrep_off(thd);
+ Wsrep_schema_impl::binlog_off binlog_off(thd);
+
+ /*
+ Clean up cluster table and members table.
+ */
+ if (Wsrep_schema_impl::execute_SQL(thd,
+ delete_from_cluster_table.c_str(),
+ delete_from_cluster_table.size()) ||
+ Wsrep_schema_impl::execute_SQL(thd,
+ delete_from_members_table.c_str(),
+ delete_from_members_table.size())) {
+ goto out;
+ }
+
+ /*
+ Store cluster view info
+ */
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_write(thd, cluster_table_str.c_str(), &cluster_table))
+ {
+ goto out;
+ }
+
+ Wsrep_schema_impl::store(cluster_table, 0, view.state_id().id());
+ Wsrep_schema_impl::store(cluster_table, 1, view.view_seqno().get());
+ Wsrep_schema_impl::store(cluster_table, 2, view.state_id().seqno().get());
+ Wsrep_schema_impl::store(cluster_table, 3, view.protocol_version());
+ Wsrep_schema_impl::store(cluster_table, 4, view.capabilities());
+
+ if ((error= Wsrep_schema_impl::update_or_insert(cluster_table)))
+ {
+ WSREP_ERROR("failed to write to cluster table: %d", error);
+ goto out;
+ }
+
+ Wsrep_schema_impl::finish_stmt(thd);
+
+ /*
+ Store info about current members
+ */
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_write(thd, members_table_str.c_str(),
+ &members_table))
+ {
+ WSREP_ERROR("failed to open wsrep.members table");
+ goto out;
+ }
+
+ for (size_t i= 0; i < view.members().size(); ++i)
+ {
+ Wsrep_schema_impl::store(members_table, 0, view.members()[i].id());
+ Wsrep_schema_impl::store(members_table, 1, view.state_id().id());
+ Wsrep_schema_impl::store(members_table, 2, view.members()[i].name());
+ Wsrep_schema_impl::store(members_table, 3, view.members()[i].incoming());
+ if ((error= Wsrep_schema_impl::update_or_insert(members_table)))
+ {
+ WSREP_ERROR("failed to write wsrep.members table: %d", error);
+ goto out;
+ }
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+
+#ifdef WSREP_SCHEMA_MEMBERS_HISTORY
+ /*
+ Store members history
+ */
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_write(thd, cluster_member_history.c_str(),
+ &members_history_table)) {
+ WSREP_ERROR("failed to open wsrep.members table");
+ goto out;
+ }
+
+ for (size_t i= 0; i < view.members().size(); ++i) {
+ Wsrep_schema_impl::store(members_history_table, 0, view.members()[i].id());
+ Wsrep_schema_impl::store(members_history_table, 1, view.state_id().id());
+ Wsrep_schema_impl::store(members_history_table, 2, view.view_seqno());
+ Wsrep_schema_impl::store(members_history_table, 3, view.state_id().seqno());
+ Wsrep_schema_impl::store(members_history_table, 4,
+ view.members()[i].name());
+ Wsrep_schema_impl::store(members_history_table, 5,
+ view.members()[i].incoming());
+ if ((error= Wsrep_schema_impl::update_or_insert(members_history_table))) {
+ WSREP_ERROR("failed to write wsrep_cluster_member_history table: %d", error);
+ goto out;
+ }
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+#endif /* WSREP_SCHEMA_MEMBERS_HISTORY */
+ ret= 0;
+ out:
+
+ DBUG_RETURN(ret);
+}
+
+Wsrep_view Wsrep_schema::restore_view(THD* thd, const Wsrep_id& own_id) const {
+ DBUG_ENTER("Wsrep_schema::restore_view()");
+
+ int ret= 1;
+ int error;
+
+ TABLE* cluster_table= 0;
+ bool end_cluster_scan= false;
+ TABLE* members_table= 0;
+ bool end_members_scan= false;
+
+ /* variables below need to be initialized in case cluster table is empty */
+ Wsrep_id cluster_uuid;
+ wsrep_seqno_t view_id= -1;
+ wsrep_seqno_t view_seqno= -1;
+ int my_idx= -1;
+ int proto_ver= 0;
+ wsrep_cap_t capabilities= 0;
+ std::vector<Wsrep_view::member> members;
+
+ // we don't want causal waits for reading non-replicated private data
+ int const wsrep_sync_wait_saved= thd->variables.wsrep_sync_wait;
+ thd->variables.wsrep_sync_wait= 0;
+
+ if (trans_begin(thd, MYSQL_START_TRANS_OPT_READ_ONLY)) {
+ WSREP_ERROR("wsrep_schema::restore_view(): Failed to start transaction");
+ goto out;
+ }
+
+ /*
+ Read cluster info from cluster table
+ */
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_read(thd, cluster_table_str.c_str(), &cluster_table) ||
+ Wsrep_schema_impl::init_for_scan(cluster_table)) {
+ goto out;
+ }
+
+ if (((error= Wsrep_schema_impl::next_record(cluster_table)) != 0 ||
+ Wsrep_schema_impl::scan(cluster_table, 0, cluster_uuid) ||
+ Wsrep_schema_impl::scan(cluster_table, 1, view_id) ||
+ Wsrep_schema_impl::scan(cluster_table, 2, view_seqno) ||
+ Wsrep_schema_impl::scan(cluster_table, 3, proto_ver) ||
+ Wsrep_schema_impl::scan(cluster_table, 4, capabilities)) &&
+ error != HA_ERR_END_OF_FILE) {
+ end_cluster_scan= true;
+ goto out;
+ }
+
+ if (Wsrep_schema_impl::end_scan(cluster_table)) {
+ goto out;
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+
+ /*
+ Read members from members table
+ */
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_read(thd, members_table_str.c_str(), &members_table) ||
+ Wsrep_schema_impl::init_for_scan(members_table)) {
+ goto out;
+ }
+ end_members_scan= true;
+
+ while (true) {
+ if ((error= Wsrep_schema_impl::next_record(members_table)) == 0) {
+ if (Wsrep_schema_impl::scan_member(members_table,
+ cluster_uuid,
+ members)) {
+ goto out;
+ }
+ }
+ else if (error == HA_ERR_END_OF_FILE) {
+ break;
+ }
+ else {
+ goto out;
+ }
+ }
+
+ end_members_scan= false;
+ if (Wsrep_schema_impl::end_scan(members_table)) {
+ goto out;
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+
+ if (own_id.is_undefined() == false) {
+ for (uint i= 0; i < members.size(); ++i) {
+ if (members[i].id() == own_id) {
+ my_idx= i;
+ break;
+ }
+ }
+ }
+
+ (void)trans_commit(thd);
+ ret= 0; /* Success*/
+ out:
+
+ if (end_cluster_scan) Wsrep_schema_impl::end_scan(cluster_table);
+ if (end_members_scan) Wsrep_schema_impl::end_scan(members_table);
+
+ if (0 != ret) {
+ trans_rollback_stmt(thd);
+ if (!trans_rollback(thd)) {
+ close_thread_tables(thd);
+ }
+ }
+ thd->mdl_context.release_transactional_locks();
+
+ thd->variables.wsrep_sync_wait= wsrep_sync_wait_saved;
+
+ if (0 == ret) {
+ Wsrep_view ret_view(
+ wsrep::gtid(cluster_uuid, Wsrep_seqno(view_seqno)),
+ Wsrep_seqno(view_id),
+ wsrep::view::primary,
+ capabilities,
+ my_idx,
+ proto_ver,
+ members
+ );
+
+ if (wsrep_debug) {
+ std::ostringstream os;
+ os << "Restored cluster view:\n" << ret_view;
+ WSREP_INFO("%s", os.str().c_str());
+ }
+ DBUG_RETURN(ret_view);
+ }
+ else
+ {
+ WSREP_ERROR("wsrep_schema::restore_view() failed.");
+ Wsrep_view ret_view;
+ DBUG_RETURN(ret_view);
+ }
+}
+
+int Wsrep_schema::append_fragment(THD* thd,
+ const wsrep::id& server_id,
+ wsrep::transaction_id transaction_id,
+ wsrep::seqno seqno,
+ int flags,
+ const wsrep::const_buffer& data)
+{
+ DBUG_ENTER("Wsrep_schema::append_fragment");
+ std::ostringstream os;
+ os << server_id;
+ WSREP_DEBUG("Append fragment(%llu) %s, %llu",
+ thd->thread_id,
+ os.str().c_str(),
+ transaction_id.get());
+ Wsrep_schema_impl::binlog_off binlog_off(thd);
+ Wsrep_schema_impl::init_stmt(thd);
+
+ TABLE* frag_table= 0;
+ if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
+ {
+ trans_rollback_stmt(thd);
+ DBUG_RETURN(1);
+ }
+
+ Wsrep_schema_impl::store(frag_table, 0, server_id);
+ Wsrep_schema_impl::store(frag_table, 1, transaction_id.get());
+ Wsrep_schema_impl::store(frag_table, 2, seqno.get());
+ Wsrep_schema_impl::store(frag_table, 3, flags);
+ Wsrep_schema_impl::store(frag_table, 4, data.data(), data.size());
+
+ int error;
+ if ((error= Wsrep_schema_impl::insert(frag_table))) {
+ WSREP_ERROR("Failed to write to frag table: %d", error);
+ trans_rollback_stmt(thd);
+ DBUG_RETURN(1);
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+ DBUG_RETURN(0);
+}
+
+int Wsrep_schema::update_fragment_meta(THD* thd,
+ const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_schema::update_fragment_meta");
+ std::ostringstream os;
+ os << ws_meta.server_id();
+ WSREP_DEBUG("update_frag_seqno(%llu) %s, %llu, seqno %lld",
+ thd->thread_id,
+ os.str().c_str(),
+ ws_meta.transaction_id().get(),
+ ws_meta.seqno().get());
+ DBUG_ASSERT(ws_meta.seqno().is_undefined() == false);
+
+ Wsrep_schema_impl::binlog_off binlog_off(thd);
+ int error;
+ uchar key[MAX_KEY_LENGTH];
+ key_part_map key_map= 0;
+ TABLE* frag_table= 0;
+
+ Wsrep_schema_impl::init_stmt(thd);
+ if (Wsrep_schema_impl::open_for_write(thd, sr_table_str.c_str(), &frag_table))
+ {
+ DBUG_RETURN(1);
+ }
+
+ /* Find record with the given uuid, trx id, and seqno -1 */
+ Wsrep_schema_impl::store(frag_table, 0, ws_meta.server_id());
+ Wsrep_schema_impl::store(frag_table, 1, ws_meta.transaction_id().get());
+ Wsrep_schema_impl::store(frag_table, 2, -1);
+ Wsrep_schema_impl::make_key(frag_table, key, &key_map, 3);
+
+ if ((error= Wsrep_schema_impl::init_for_index_scan(frag_table,
+ key, key_map)))
+ {
+ if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND)
+ {
+ WSREP_WARN("Record not found in %s.%s: %d",
+ frag_table->s->db.str,
+ frag_table->s->table_name.str,
+ error);
+ }
+ Wsrep_schema_impl::finish_stmt(thd);
+ DBUG_RETURN(1);
+ }
+
+ /* Copy the original record to frag_table->record[1] */
+ store_record(frag_table, record[1]);
+
+ /* Store seqno in frag_table->record[0] and update the row */
+ Wsrep_schema_impl::store(frag_table, 2, ws_meta.seqno().get());
+ if ((error= frag_table->file->ha_update_row(frag_table->record[1],
+ frag_table->record[0]))) {
+ WSREP_ERROR("Error updating record in %s.%s: %d",
+ frag_table->s->db.str,
+ frag_table->s->table_name.str,
+ error);
+ Wsrep_schema_impl::finish_stmt(thd);
+ DBUG_RETURN(1);
+ }
+
+ int ret= Wsrep_schema_impl::end_index_scan(frag_table);
+ Wsrep_schema_impl::finish_stmt(thd);
+ DBUG_RETURN(ret);
+}
+
+static int remove_fragment(THD* thd,
+ TABLE* frag_table,
+ const wsrep::id& server_id,
+ wsrep::transaction_id transaction_id,
+ wsrep::seqno seqno)
+{
+ WSREP_DEBUG("remove_fragment(%llu) trx %llu, seqno %lld",
+ thd->thread_id,
+ transaction_id.get(),
+ seqno.get());
+ int ret= 0;
+ int error;
+ uchar key[MAX_KEY_LENGTH];
+ key_part_map key_map= 0;
+
+ DBUG_ASSERT(server_id.is_undefined() == false);
+ DBUG_ASSERT(transaction_id.is_undefined() == false);
+ DBUG_ASSERT(seqno.is_undefined() == false);
+
+ /*
+ Remove record with the given uuid, trx id, and seqno.
+ Using a complete key here avoids gap locks.
+ */
+ Wsrep_schema_impl::store(frag_table, 0, server_id);
+ Wsrep_schema_impl::store(frag_table, 1, transaction_id.get());
+ Wsrep_schema_impl::store(frag_table, 2, seqno.get());
+ Wsrep_schema_impl::make_key(frag_table, key, &key_map, 3);
+
+ if ((error= Wsrep_schema_impl::init_for_index_scan(frag_table,
+ key,
+ key_map)))
+ {
+ if (error == HA_ERR_END_OF_FILE || error == HA_ERR_KEY_NOT_FOUND)
+ {
+ WSREP_DEBUG("Record not found in %s.%s:trx %llu, seqno %lld, error %d",
+ frag_table->s->db.str,
+ frag_table->s->table_name.str,
+ transaction_id.get(),
+ seqno.get(),
+ error);
+ }
+ ret= error;
+ }
+ else if (Wsrep_schema_impl::delete_row(frag_table))
+ {
+ ret= 1;
+ }
+
+ Wsrep_schema_impl::end_index_scan(frag_table);
+ return ret;
+}
+
+int Wsrep_schema::remove_fragments(THD* thd,
+ const wsrep::id& server_id,
+ wsrep::transaction_id transaction_id,
+ const std::vector<wsrep::seqno>& fragments)
+{
+ DBUG_ENTER("Wsrep_schema::remove_fragments");
+ int ret= 0;
+
+ WSREP_DEBUG("Removing %zu fragments", fragments.size());
+ Wsrep_schema_impl::wsrep_off wsrep_off(thd);
+ Wsrep_schema_impl::binlog_off binlog_off(thd);
+
+ /*
+ Open SR table for write.
+ Adopted from Rpl_info_table_access::open_table()
+ */
+ uint flags= (MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK |
+ MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY |
+ MYSQL_OPEN_IGNORE_FLUSH |
+ MYSQL_LOCK_IGNORE_TIMEOUT);
+ Query_tables_list query_tables_list_backup;
+ Open_tables_backup open_tables_backup;
+ thd->lex->reset_n_backup_query_tables_list(&query_tables_list_backup);
+ thd->reset_n_backup_open_tables_state(&open_tables_backup);
+ TABLE_LIST tables;
+ LEX_CSTRING schema_str= { wsrep_schema_str.c_str(), wsrep_schema_str.length() };
+ LEX_CSTRING table_str= { sr_table_str.c_str(), sr_table_str.length() };
+ tables.init_one_table(&schema_str,
+ &table_str, 0, TL_WRITE);
+
+ if (!open_n_lock_single_table(thd, &tables, tables.lock_type, flags))
+ {
+ WSREP_DEBUG("Failed to open SR table for access");
+ ret= 1;
+ }
+ else
+ {
+ tables.table->use_all_columns();
+ for (std::vector<wsrep::seqno>::const_iterator i= fragments.begin();
+ i != fragments.end(); ++i)
+ {
+ if (remove_fragment(thd,
+ tables.table,
+ server_id,
+ transaction_id, *i))
+ {
+ ret= 1;
+ break;
+ }
+ }
+ }
+ close_thread_tables(thd);
+ thd->restore_backup_open_tables_state(&open_tables_backup);
+ thd->lex->restore_backup_query_tables_list(&query_tables_list_backup);
+
+ if (thd->wsrep_cs().mode() == wsrep::client_state::m_local &&
+ !thd->in_multi_stmt_transaction_mode())
+ {
+ /*
+ The ugly part: Locally executing autocommit statement is
+ committing and it has removed a fragment from stable storage.
+ Now calling finish_stmt() will call trans_commit_stmt(), which will
+ actually commit the transaction, what we really don't want
+ to do at this point.
+
+ Doing nothing at this point seems to work ok, this block is
+ intentionally no-op and for documentation purposes only.
+ */
+ }
+ else
+ {
+ Wsrep_schema_impl::finish_stmt(thd);
+ }
+
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_schema::replay_transaction(THD* orig_thd,
+ Relay_log_info* rli,
+ const wsrep::ws_meta& ws_meta,
+ const std::vector<wsrep::seqno>& fragments)
+{
+ DBUG_ENTER("Wsrep_schema::replay_transaction");
+ DBUG_ASSERT(!fragments.empty());
+
+ THD thd(next_thread_id(), true);
+ thd.thread_stack= (orig_thd ? orig_thd->thread_stack :
+ (char*) &thd);
+
+ Wsrep_schema_impl::wsrep_off wsrep_off(&thd);
+ Wsrep_schema_impl::binlog_off binlog_off(&thd);
+ Wsrep_schema_impl::thd_context_switch thd_context_switch(orig_thd, &thd);
+
+ int ret= 1;
+ int error;
+ TABLE* frag_table= 0;
+ uchar key[MAX_KEY_LENGTH];
+ key_part_map key_map= 0;
+
+ for (std::vector<wsrep::seqno>::const_iterator i= fragments.begin();
+ i != fragments.end(); ++i)
+ {
+ Wsrep_schema_impl::init_stmt(&thd);
+ if ((error= Wsrep_schema_impl::open_for_read(&thd, sr_table_str.c_str(), &frag_table)))
+ {
+ WSREP_WARN("Could not open SR table for read: %d", error);
+ Wsrep_schema_impl::finish_stmt(&thd);
+ DBUG_RETURN(1);
+ }
+
+ Wsrep_schema_impl::store(frag_table, 0, ws_meta.server_id());
+ Wsrep_schema_impl::store(frag_table, 1, ws_meta.transaction_id().get());
+ Wsrep_schema_impl::store(frag_table, 2, i->get());
+ Wsrep_schema_impl::make_key(frag_table, key, &key_map, 3);
+
+ int error= Wsrep_schema_impl::init_for_index_scan(frag_table,
+ key,
+ key_map);
+ if (error)
+ {
+ WSREP_WARN("Failed to init streaming log table for index scan: %d",
+ error);
+ Wsrep_schema_impl::end_index_scan(frag_table);
+ ret= 1;
+ break;
+ }
+
+ int flags;
+ Wsrep_schema_impl::scan(frag_table, 3, flags);
+ WSREP_DEBUG("replay_fragment(%llu): seqno: %lld flags: %x",
+ ws_meta.transaction_id().get(),
+ i->get(),
+ flags);
+ String buf;
+ frag_table->field[4]->val_str(&buf);
+
+ {
+ Wsrep_schema_impl::thd_context_switch thd_context_switch(&thd, orig_thd);
+
+ ret= wsrep_apply_events(orig_thd, rli, buf.c_ptr_quick(), buf.length());
+ if (ret)
+ {
+ WSREP_WARN("Wsrep_schema::replay_transaction: failed to apply fragments");
+ break;
+ }
+ }
+
+ Wsrep_schema_impl::end_index_scan(frag_table);
+ Wsrep_schema_impl::finish_stmt(&thd);
+
+ Wsrep_schema_impl::init_stmt(&thd);
+
+ if ((error= Wsrep_schema_impl::open_for_write(&thd,
+ sr_table_str.c_str(),
+ &frag_table)))
+ {
+ WSREP_WARN("Could not open SR table for write: %d", error);
+ Wsrep_schema_impl::finish_stmt(&thd);
+ DBUG_RETURN(1);
+ }
+ error= Wsrep_schema_impl::init_for_index_scan(frag_table,
+ key,
+ key_map);
+ if (error)
+ {
+ WSREP_WARN("Failed to init streaming log table for index scan: %d",
+ error);
+ Wsrep_schema_impl::end_index_scan(frag_table);
+ ret= 1;
+ break;
+ }
+
+ error= Wsrep_schema_impl::delete_row(frag_table);
+ if (error)
+ {
+ WSREP_WARN("Could not delete row from streaming log table: %d", error);
+ Wsrep_schema_impl::end_index_scan(frag_table);
+ ret= 1;
+ break;
+ }
+ Wsrep_schema_impl::end_index_scan(frag_table);
+ Wsrep_schema_impl::finish_stmt(&thd);
+ }
+
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_schema::recover_sr_transactions(THD *orig_thd)
+{
+ DBUG_ENTER("Wsrep_schema::recover_sr_transactions");
+ THD storage_thd(next_thread_id(), true);
+ storage_thd.thread_stack= (orig_thd ? orig_thd->thread_stack :
+ (char*) &storage_thd);
+ TABLE* frag_table= 0;
+ TABLE* cluster_table= 0;
+ Wsrep_storage_service storage_service(&storage_thd);
+ Wsrep_schema_impl::binlog_off binlog_off(&storage_thd);
+ Wsrep_schema_impl::wsrep_off wsrep_off(&storage_thd);
+ Wsrep_schema_impl::thd_context_switch thd_context_switch(orig_thd,
+ &storage_thd);
+ Wsrep_server_state& server_state(Wsrep_server_state::instance());
+
+ int ret= 1;
+ int error;
+ wsrep::id cluster_id;
+
+ Wsrep_schema_impl::init_stmt(&storage_thd);
+ storage_thd.wsrep_skip_locking= FALSE;
+ if (Wsrep_schema_impl::open_for_read(&storage_thd,
+ cluster_table_str.c_str(),
+ &cluster_table) ||
+ Wsrep_schema_impl::init_for_scan(cluster_table))
+ {
+ Wsrep_schema_impl::finish_stmt(&storage_thd);
+ DBUG_RETURN(1);
+ }
+
+ if ((error= Wsrep_schema_impl::next_record(cluster_table)))
+ {
+ Wsrep_schema_impl::end_scan(cluster_table);
+ Wsrep_schema_impl::finish_stmt(&storage_thd);
+ trans_commit(&storage_thd);
+ if (error == HA_ERR_END_OF_FILE)
+ {
+ WSREP_INFO("Cluster table is empty, not recovering transactions");
+ DBUG_RETURN(0);
+ }
+ else
+ {
+ WSREP_ERROR("Failed to read cluster table: %d", error);
+ DBUG_RETURN(1);
+ }
+ }
+
+ Wsrep_schema_impl::scan(cluster_table, 0, cluster_id);
+ Wsrep_schema_impl::end_scan(cluster_table);
+ Wsrep_schema_impl::finish_stmt(&storage_thd);
+
+ std::ostringstream os;
+ os << cluster_id;
+ WSREP_INFO("Recovered cluster id %s", os.str().c_str());
+
+ storage_thd.wsrep_skip_locking= TRUE;
+ Wsrep_schema_impl::init_stmt(&storage_thd);
+
+ /*
+ Open the table for reading and writing so that fragments without
+ valid seqno can be deleted.
+ */
+ if (Wsrep_schema_impl::open_for_write(&storage_thd, sr_table_str.c_str(), &frag_table) ||
+ Wsrep_schema_impl::init_for_scan(frag_table))
+ {
+ WSREP_ERROR("Failed to open SR table for write");
+ goto out;
+ }
+
+ while (true)
+ {
+ if ((error= Wsrep_schema_impl::next_record(frag_table)) == 0)
+ {
+ wsrep::id server_id;
+ Wsrep_schema_impl::scan(frag_table, 0, server_id);
+ wsrep::client_id client_id;
+ unsigned long long transaction_id_ull;
+ Wsrep_schema_impl::scan(frag_table, 1, transaction_id_ull);
+ wsrep::transaction_id transaction_id(transaction_id_ull);
+ long long seqno_ll;
+ Wsrep_schema_impl::scan(frag_table, 2, seqno_ll);
+ wsrep::seqno seqno(seqno_ll);
+
+ /* This is possible if the server crashes between inserting the
+ fragment into table and updating the fragment seqno after
+ certification. */
+ if (seqno.is_undefined())
+ {
+ Wsrep_schema_impl::delete_row(frag_table);
+ continue;
+ }
+
+ wsrep::gtid gtid(cluster_id, seqno);
+ int flags;
+ Wsrep_schema_impl::scan(frag_table, 3, flags);
+ String data_str;
+
+ (void)frag_table->field[4]->val_str(&data_str);
+ wsrep::const_buffer data(data_str.c_ptr_quick(), data_str.length());
+ wsrep::ws_meta ws_meta(gtid,
+ wsrep::stid(server_id,
+ transaction_id,
+ client_id),
+ wsrep::seqno::undefined(),
+ flags);
+
+ wsrep::high_priority_service* applier;
+ if (!(applier= server_state.find_streaming_applier(server_id,
+ transaction_id)))
+ {
+ DBUG_ASSERT(wsrep::starts_transaction(flags));
+ THD* thd= new THD(next_thread_id(), true);
+ thd->thread_stack= (char*)&storage_thd;
+
+ thd->real_id= pthread_self();
+
+ applier= new Wsrep_applier_service(thd);
+ server_state.start_streaming_applier(server_id, transaction_id,
+ applier);
+ applier->start_transaction(wsrep::ws_handle(transaction_id, 0),
+ ws_meta);
+ }
+ applier->store_globals();
+ applier->apply_write_set(ws_meta, data);
+ applier->after_apply();
+ storage_service.store_globals();
+ }
+ else if (error == HA_ERR_END_OF_FILE)
+ {
+ ret= 0;
+ break;
+ }
+ else
+ {
+ WSREP_ERROR("SR table scan returned error %d", error);
+ break;
+ }
+ }
+ Wsrep_schema_impl::end_scan(frag_table);
+ Wsrep_schema_impl::finish_stmt(&storage_thd);
+ trans_commit(&storage_thd);
+out:
+ DBUG_RETURN(ret);
+}
diff --git a/sql/wsrep_schema.h b/sql/wsrep_schema.h
new file mode 100644
index 00000000000..36e23998d19
--- /dev/null
+++ b/sql/wsrep_schema.h
@@ -0,0 +1,144 @@
+/* Copyright (C) 2015-2019 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
+
+
+#ifndef WSREP_SCHEMA_H
+#define WSREP_SCHEMA_H
+
+/* wsrep-lib */
+#include "wsrep_types.h"
+
+#include "mysqld.h"
+#include "wsrep_mysqld.h"
+
+/*
+ Forward decls
+*/
+class THD;
+class Relay_log_info;
+struct TABLE;
+struct TABLE_LIST;
+struct st_mysql_lex_string;
+typedef struct st_mysql_lex_string LEX_STRING;
+
+/** Name of the table in `wsrep_schema_str` used for storing streaming
+replication data. In an InnoDB full format, e.g. "database/tablename". */
+extern const char* wsrep_sr_table_name_full;
+
+class Wsrep_schema
+{
+ public:
+
+ Wsrep_schema();
+ ~Wsrep_schema();
+
+ /*
+ Initialize wsrep schema. Storage engines must be running before
+ calling this function.
+ */
+ int init();
+
+ /*
+ Store wsrep view info into wsrep schema.
+ */
+ int store_view(THD*, const Wsrep_view& view);
+
+ /*
+ Restore view info from stable storage.
+ */
+ Wsrep_view restore_view(THD* thd, const Wsrep_id& own_id) const;
+
+ /**
+ Append transaction fragment to fragment storage.
+ Transaction must have been started for THD before this call.
+ In order to make changes durable, transaction must be committed
+ separately after this call.
+
+ @param thd THD object
+ @param server_id Wsrep server identifier
+ @param transaction_id Transaction identifier
+ @param flags Flags for the fragment
+ @param data Fragment data buffer
+
+ @return Zero in case of success, non-zero on failure.
+ */
+ int append_fragment(THD* thd,
+ const wsrep::id& server_id,
+ wsrep::transaction_id transaction_id,
+ wsrep::seqno seqno,
+ int flags,
+ const wsrep::const_buffer& data);
+ /**
+ Update existing fragment meta data. The fragment must have been
+ inserted before using append_fragment().
+
+ @param thd THD object
+ @param ws_meta Wsrep meta data
+
+ @return Zero in case of success, non-zero on failure.
+ */
+ int update_fragment_meta(THD* thd,
+ const wsrep::ws_meta& ws_meta);
+
+ /**
+ Remove fragments from storage. This method must be called
+ inside active transaction. Fragment removal will be committed
+ once the transaction commits.
+
+ @param thd Pointer to THD object
+ @param server_id Identifier of the running server
+ @param transaction_id Identifier of the current transaction
+ @param fragments Vector of fragment seqnos to be removed
+ */
+ int remove_fragments(THD* thd,
+ const wsrep::id& server_id,
+ wsrep::transaction_id transaction_id,
+ const std::vector<wsrep::seqno>& fragments);
+
+ /**
+ Replay a transaction from stored fragments. The caller must have
+ started a transaction for a thd.
+
+ @param thd Pointer to THD object
+ @param ws_meta Write set meta data for commit fragment.
+ @param fragments Vector of fragments to be replayed
+
+ @return Zero on success, non-zero on failure.
+ */
+ int replay_transaction(THD* thd,
+ Relay_log_info* rli,
+ const wsrep::ws_meta& ws_meta,
+ const std::vector<wsrep::seqno>& fragments);
+
+ /**
+ Recover streaming transactions from SR table.
+ This method should be called after storage enignes are initialized.
+ It will scan SR table and replay found streaming transactions.
+
+ @param orig_thd The THD object of the calling thread.
+
+ @return Zero on success, non-zero on failure.
+ */
+ int recover_sr_transactions(THD* orig_thd);
+
+ private:
+ /* Non-copyable */
+ Wsrep_schema(const Wsrep_schema&);
+ Wsrep_schema& operator=(const Wsrep_schema&);
+};
+
+extern Wsrep_schema* wsrep_schema;
+
+#endif /* !WSREP_SCHEMA_H */
diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc
new file mode 100644
index 00000000000..42856862db3
--- /dev/null
+++ b/sql/wsrep_server_service.cc
@@ -0,0 +1,334 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "my_global.h"
+#include "wsrep_server_service.h"
+#include "wsrep_server_state.h"
+#include "wsrep_client_state.h"
+#include "wsrep_client_service.h"
+#include "wsrep_storage_service.h"
+#include "wsrep_high_priority_service.h"
+
+#include "wsrep_sst.h"
+#include "wsrep_xid.h"
+#include "wsrep_mysqld.h"
+#include "wsrep_schema.h"
+#include "wsrep_utils.h"
+
+#include "log.h" /* sql_print_xxx() */
+#include "sql_class.h" /* system variables */
+#include "transaction.h" /* trans_xxx */
+#include "sql_base.h" /* close_thread_tables */
+
+static void init_service_thd(THD* thd, char* thread_stack)
+{
+ thd->thread_stack= thread_stack;
+ thd->real_id= pthread_self();
+ thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer();
+ thd->set_command(COM_SLEEP);
+ thd->reset_for_next_command(true);
+}
+
+wsrep::storage_service* Wsrep_server_service::storage_service(
+ wsrep::client_service& client_service)
+{
+ Wsrep_client_service& cs=
+ static_cast<Wsrep_client_service&>(client_service);
+ THD* thd= new THD(next_thread_id(), true);
+ init_service_thd(thd, cs.m_thd->thread_stack);
+ WSREP_DEBUG("Created storage service with thread id %llu",
+ thd->thread_id);
+ return new Wsrep_storage_service(thd);
+}
+
+wsrep::storage_service* Wsrep_server_service::storage_service(
+ wsrep::high_priority_service& high_priority_service)
+{
+ Wsrep_high_priority_service& hps=
+ static_cast<Wsrep_high_priority_service&>(high_priority_service);
+ THD* thd= new THD(next_thread_id(), true);
+ init_service_thd(thd, hps.m_thd->thread_stack);
+ WSREP_DEBUG("Created high priority storage service with thread id %llu",
+ thd->thread_id);
+ return new Wsrep_storage_service(thd);
+}
+
+void Wsrep_server_service::release_storage_service(
+ wsrep::storage_service* storage_service)
+{
+ Wsrep_storage_service* ss=
+ static_cast<Wsrep_storage_service*>(storage_service);
+ THD* thd= ss->m_thd;
+ delete ss;
+ delete thd;
+}
+
+wsrep::high_priority_service*
+Wsrep_server_service::streaming_applier_service(
+ wsrep::client_service& orig_client_service)
+{
+ Wsrep_client_service& orig_cs=
+ static_cast<Wsrep_client_service&>(orig_client_service);
+ THD* thd= new THD(next_thread_id(), true);
+ init_service_thd(thd, orig_cs.m_thd->thread_stack);
+ WSREP_DEBUG("Created streaming applier service in local context with "
+ "thread id %llu", thd->thread_id);
+ return new Wsrep_applier_service(thd);
+}
+
+wsrep::high_priority_service*
+Wsrep_server_service::streaming_applier_service(
+ wsrep::high_priority_service& orig_high_priority_service)
+{
+ Wsrep_high_priority_service&
+ orig_hps(static_cast<Wsrep_high_priority_service&>(orig_high_priority_service));
+ THD* thd= new THD(next_thread_id(), true);
+ init_service_thd(thd, orig_hps.m_thd->thread_stack);
+ WSREP_DEBUG("Created streaming applier service in high priority "
+ "context with thread id %llu", thd->thread_id);
+ return new Wsrep_applier_service(thd);
+}
+
+void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_service* high_priority_service)
+{
+ Wsrep_high_priority_service* hps=
+ static_cast<Wsrep_high_priority_service*>(high_priority_service);
+ THD* thd= hps->m_thd;
+ delete hps;
+ delete thd;
+}
+
+void Wsrep_server_service::background_rollback(wsrep::client_state& client_state)
+{
+ Wsrep_client_state& cs= static_cast<Wsrep_client_state&>(client_state);
+ wsrep_fire_rollbacker(cs.thd());
+}
+
+void Wsrep_server_service::bootstrap()
+{
+ wsrep::log_info()
+ << "Bootstrapping a new cluster, setting initial position to "
+ << wsrep::gtid::undefined();
+ wsrep_set_SE_checkpoint(wsrep::gtid::undefined());
+}
+
+void Wsrep_server_service::log_message(enum wsrep::log::level level,
+ const char* message)
+{
+ switch (level)
+ {
+ case wsrep::log::debug:
+ sql_print_information("debug: %s", message);
+ break;
+ case wsrep::log::info:
+ sql_print_information("%s", message);
+ break;
+ case wsrep::log::warning:
+ sql_print_warning("%s", message);
+ break;
+ case wsrep::log::error:
+ sql_print_error("%s", message);
+ break;
+ }
+}
+
+void Wsrep_server_service::log_view(
+ wsrep::high_priority_service* high_priority_service,
+ const wsrep::view& view)
+{
+ Wsrep_high_priority_service* applier=
+ static_cast<Wsrep_high_priority_service*>(high_priority_service);
+ /* Update global system variables */
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ if (wsrep_auto_increment_control && view.own_index() >= 0)
+ {
+ global_system_variables.auto_increment_offset= view.own_index() + 1;
+ global_system_variables.auto_increment_increment= view.members().size();
+ wsrep_protocol_version= view.protocol_version();
+ }
+ mysql_mutex_unlock(&LOCK_global_system_variables);
+
+ /* Update wsrep status variables */
+ mysql_mutex_lock(&LOCK_status);
+ wsrep_cluster_size= view.members().size();
+ wsrep_local_index= view.own_index();
+ std::ostringstream os;
+ os << view.state_id().id();
+ wsrep_update_cluster_state_uuid(os.str().c_str());
+ mysql_mutex_unlock(&LOCK_status);
+ wsrep_config_state->set(view);
+
+ if (view.status() == wsrep::view::primary)
+ {
+ if (applier)
+ {
+ Wsrep_id id;
+ Wsrep_view prev_view= wsrep_schema->restore_view(applier->m_thd, id);
+ bool checkpoint_was_reset= false;
+ if (prev_view.state_id().id() != view.state_id().id())
+ {
+ WSREP_DEBUG("New cluster UUID was generated, resetting position info");
+ wsrep_set_SE_checkpoint(wsrep::gtid::undefined());
+ checkpoint_was_reset= true;
+ }
+
+ if (wsrep_debug)
+ {
+ std::ostringstream os;
+ os << "Storing cluster view:\n" << view;
+ WSREP_INFO("%s", os.str().c_str());
+ DBUG_ASSERT(prev_view.state_id().id() != view.state_id().id() ||
+ view.state_id().seqno().get() >= prev_view.state_id().seqno().get());
+ }
+
+ if (trans_begin(applier->m_thd, MYSQL_START_TRANS_OPT_READ_WRITE))
+ {
+ WSREP_WARN("Failed to start transaction for store view");
+ }
+ else
+ {
+ if (wsrep_schema->store_view(applier->m_thd, view))
+ {
+ WSREP_WARN("Failed to store view");
+ trans_rollback_stmt(applier->m_thd);
+ if (!trans_rollback(applier->m_thd))
+ {
+ close_thread_tables(applier->m_thd);
+ }
+ }
+ else
+ {
+ if (trans_commit(applier->m_thd))
+ {
+ WSREP_WARN("Failed to commit transaction for store view");
+ }
+ }
+ applier->m_thd->mdl_context.release_transactional_locks();
+ }
+
+ /*
+ Backwards compatibility: When running in mixed cluster with
+ Galera 3.x, the provider does not generate unique sequence numbers
+ for views. This condition can be checked by inspecting last
+ committed as returned by the provider. If the last_committed
+ matches to view state_id seqno, the cluster runs in backwards
+ compatibility mode and we skip setting the checkpoint for
+ view.
+ */
+ wsrep::seqno last_committed=
+ Wsrep_server_state::instance().provider().last_committed_gtid().seqno();
+ if (checkpoint_was_reset || last_committed != view.state_id().seqno())
+ {
+ wsrep_set_SE_checkpoint(view.state_id());
+ }
+ DBUG_ASSERT(wsrep_get_SE_checkpoint().id() == view.state_id().id());
+ }
+ else
+ {
+ WSREP_DEBUG("No applier in Wsrep_server_service::log_view(), "
+ "skipping write to wsrep_schema");
+ }
+ }
+}
+
+void Wsrep_server_service::recover_streaming_appliers(wsrep::client_service& cs)
+{
+ Wsrep_client_service& client_service= static_cast<Wsrep_client_service&>(cs);
+ wsrep_recover_sr_from_storage(client_service.m_thd);
+}
+
+void Wsrep_server_service::recover_streaming_appliers(
+ wsrep::high_priority_service& hs)
+{
+ Wsrep_high_priority_service& high_priority_service=
+ static_cast<Wsrep_high_priority_service&>(hs);
+ wsrep_recover_sr_from_storage(high_priority_service.m_thd);
+}
+
+wsrep::view Wsrep_server_service::get_view(wsrep::client_service& c,
+ const wsrep::id& own_id)
+{
+ Wsrep_client_service& cs(static_cast<Wsrep_client_service&>(c));
+ wsrep::view v(wsrep_schema->restore_view(cs.m_thd, own_id));
+ return v;
+}
+
+wsrep::gtid Wsrep_server_service::get_position(wsrep::client_service&)
+{
+ return wsrep_get_SE_checkpoint();
+}
+
+void Wsrep_server_service::log_state_change(
+ enum Wsrep_server_state::state prev_state,
+ enum Wsrep_server_state::state current_state)
+{
+ WSREP_INFO("Server status change %s -> %s",
+ wsrep::to_c_string(prev_state),
+ wsrep::to_c_string(current_state));
+ mysql_mutex_lock(&LOCK_status);
+ switch (current_state)
+ {
+ case Wsrep_server_state::s_synced:
+ wsrep_ready= TRUE;
+ WSREP_INFO("Synchronized with group, ready for connections");
+ /* fall through */
+ case Wsrep_server_state::s_joined:
+ case Wsrep_server_state::s_donor:
+ wsrep_cluster_status= "Primary";
+ break;
+ case Wsrep_server_state::s_connected:
+ wsrep_cluster_status= "non-Primary";
+ wsrep_ready= FALSE;
+ wsrep_connected= TRUE;
+ break;
+ case Wsrep_server_state::s_disconnected:
+ wsrep_ready= FALSE;
+ wsrep_connected= FALSE;
+ wsrep_cluster_status= "Disconnected";
+ break;
+ default:
+ wsrep_ready= FALSE;
+ wsrep_cluster_status= "non-Primary";
+ break;
+ }
+ mysql_mutex_unlock(&LOCK_status);
+ wsrep_config_state->set(current_state);
+}
+
+bool Wsrep_server_service::sst_before_init() const
+{
+ return wsrep_before_SE();
+}
+
+std::string Wsrep_server_service::sst_request()
+{
+ return wsrep_sst_prepare();
+}
+
+int Wsrep_server_service::start_sst(const std::string& sst_request,
+ const wsrep::gtid& gtid,
+ bool bypass)
+{
+ return wsrep_sst_donate(sst_request, gtid, bypass);
+}
+
+int Wsrep_server_service::wait_committing_transactions(int timeout)
+{
+ return wsrep_wait_committing_connections_close(timeout);
+}
+
+void Wsrep_server_service::debug_sync(const char*)
+{
+}
diff --git a/sql/wsrep_server_service.h b/sql/wsrep_server_service.h
new file mode 100644
index 00000000000..b8f1f009cde
--- /dev/null
+++ b/sql/wsrep_server_service.h
@@ -0,0 +1,81 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_SERVER_SERVICE_H
+#define WSREP_SERVER_SERVICE_H
+
+/* wsrep-lib */
+#include "wsrep/server_service.hpp"
+#include "wsrep/exception.hpp" // not_impemented_error(), remove when finished
+#include "wsrep/storage_service.hpp"
+
+class Wsrep_server_state;
+
+
+/* wsrep::server_service interface implementation */
+class Wsrep_server_service : public wsrep::server_service
+{
+public:
+ Wsrep_server_service(Wsrep_server_state& server_state)
+ : m_server_state(server_state)
+ { }
+
+ wsrep::storage_service* storage_service(wsrep::client_service&);
+
+ wsrep::storage_service* storage_service(wsrep::high_priority_service&);
+
+ void release_storage_service(wsrep::storage_service*);
+
+ wsrep::high_priority_service*
+ streaming_applier_service(wsrep::client_service&);
+
+ wsrep::high_priority_service*
+ streaming_applier_service(wsrep::high_priority_service&);
+
+ void release_high_priority_service(wsrep::high_priority_service*);
+
+ void background_rollback(wsrep::client_state&);
+
+ void bootstrap();
+ void log_message(enum wsrep::log::level, const char*);
+
+ void log_dummy_write_set(wsrep::client_state&, const wsrep::ws_meta&)
+ { throw wsrep::not_implemented_error(); }
+
+ void log_view(wsrep::high_priority_service*, const wsrep::view&);
+
+ void recover_streaming_appliers(wsrep::client_service&);
+ void recover_streaming_appliers(wsrep::high_priority_service&);
+ wsrep::view get_view(wsrep::client_service&, const wsrep::id& own_id);
+
+ wsrep::gtid get_position(wsrep::client_service&);
+
+ void log_state_change(enum wsrep::server_state::state,
+ enum wsrep::server_state::state);
+
+ bool sst_before_init() const;
+
+ std::string sst_request();
+ int start_sst(const std::string&, const wsrep::gtid&, bool);
+
+ int wait_committing_transactions(int);
+
+ void debug_sync(const char*);
+private:
+ Wsrep_server_state& m_server_state;
+};
+
+
+#endif /* WSREP_SERVER_SERVICE */
diff --git a/sql/wsrep_server_state.cc b/sql/wsrep_server_state.cc
new file mode 100644
index 00000000000..ebc4efaabe5
--- /dev/null
+++ b/sql/wsrep_server_state.cc
@@ -0,0 +1,85 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "my_global.h"
+#include "wsrep_api.h"
+#include "wsrep_server_state.h"
+#include "wsrep_binlog.h" /* init/deinit group commit */
+
+mysql_mutex_t LOCK_wsrep_server_state;
+mysql_cond_t COND_wsrep_server_state;
+
+#ifdef HAVE_PSI_INTERFACE
+PSI_mutex_key key_LOCK_wsrep_server_state;
+PSI_cond_key key_COND_wsrep_server_state;
+#endif
+
+Wsrep_server_state::Wsrep_server_state(const std::string& name,
+ const std::string& incoming_address,
+ const std::string& address,
+ const std::string& working_dir,
+ const wsrep::gtid& initial_position,
+ int max_protocol_version)
+ : wsrep::server_state(m_mutex,
+ m_cond,
+ m_service,
+ NULL,
+ name,
+ incoming_address,
+ address,
+ working_dir,
+ initial_position,
+ max_protocol_version,
+ wsrep::server_state::rm_sync)
+ , m_mutex(LOCK_wsrep_server_state)
+ , m_cond(COND_wsrep_server_state)
+ , m_service(*this)
+{ }
+
+Wsrep_server_state::~Wsrep_server_state()
+{ }
+
+void Wsrep_server_state::init_once(const std::string& name,
+ const std::string& incoming_address,
+ const std::string& address,
+ const std::string& working_dir,
+ const wsrep::gtid& initial_position,
+ int max_protocol_version)
+{
+ if (m_instance == 0)
+ {
+ mysql_mutex_init(key_LOCK_wsrep_server_state, &LOCK_wsrep_server_state,
+ MY_MUTEX_INIT_FAST);
+ mysql_cond_init(key_COND_wsrep_server_state, &COND_wsrep_server_state, 0);
+ m_instance = new Wsrep_server_state(name,
+ incoming_address,
+ address,
+ working_dir,
+ initial_position,
+ max_protocol_version);
+ }
+}
+
+void Wsrep_server_state::destroy()
+{
+
+ if (m_instance)
+ {
+ delete m_instance;
+ m_instance= 0;
+ mysql_mutex_destroy(&LOCK_wsrep_server_state);
+ mysql_cond_destroy(&COND_wsrep_server_state);
+ }
+}
diff --git a/sql/wsrep_server_state.h b/sql/wsrep_server_state.h
new file mode 100644
index 00000000000..34ff4105180
--- /dev/null
+++ b/sql/wsrep_server_state.h
@@ -0,0 +1,68 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_SERVER_STATE_H
+#define WSREP_SERVER_STATE_H
+
+/* wsrep-lib */
+#include "wsrep/server_state.hpp"
+#include "wsrep/provider.hpp"
+
+/* implementation */
+#include "wsrep_server_service.h"
+#include "wsrep_mutex.h"
+#include "wsrep_condition_variable.h"
+
+class Wsrep_server_state : public wsrep::server_state
+{
+public:
+ static void init_once(const std::string& name,
+ const std::string& incoming_address,
+ const std::string& address,
+ const std::string& working_dir,
+ const wsrep::gtid& initial_position,
+ int max_protocol_version);
+ static void destroy();
+ static Wsrep_server_state& instance()
+ {
+ return *m_instance;
+ }
+
+ static wsrep::provider& get_provider()
+ {
+ return instance().provider();
+ }
+
+ static bool has_capability(int capability)
+ {
+ return (get_provider().capabilities() & capability);
+ }
+
+private:
+ Wsrep_server_state(const std::string& name,
+ const std::string& incoming_address,
+ const std::string& address,
+ const std::string& working_dir,
+ const wsrep::gtid& initial_position,
+ int max_protocol_version);
+ ~Wsrep_server_state();
+ Wsrep_mutex m_mutex;
+ Wsrep_condition_variable m_cond;
+ Wsrep_server_service m_service;
+ static Wsrep_server_state* m_instance;
+
+};
+
+#endif // WSREP_SERVER_STATE_H
diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc
index 92ca8fa748d..d79b7771571 100644
--- a/sql/wsrep_sst.cc
+++ b/sql/wsrep_sst.cc
@@ -1,4 +1,4 @@
-/* Copyright 2008-2015 Codership Oy <http://www.codership.com>
+/* Copyright 2008-2017 Codership Oy <http://www.codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -35,16 +35,16 @@
static char wsrep_defaults_file[FN_REFLEN * 2 + 10 + 30 +
sizeof(WSREP_SST_OPT_CONF) +
sizeof(WSREP_SST_OPT_CONF_SUFFIX) +
- sizeof(WSREP_SST_OPT_CONF_EXTRA)] = {0};
+ sizeof(WSREP_SST_OPT_CONF_EXTRA)]= {0};
-const char* wsrep_sst_method = WSREP_SST_DEFAULT;
-const char* wsrep_sst_receive_address = WSREP_SST_ADDRESS_AUTO;
-const char* wsrep_sst_donor = "";
-const char* wsrep_sst_auth = NULL;
+const char* wsrep_sst_method = WSREP_SST_DEFAULT;
+const char* wsrep_sst_receive_address= WSREP_SST_ADDRESS_AUTO;
+const char* wsrep_sst_donor = "";
+const char* wsrep_sst_auth = NULL;
// container for real auth string
-static const char* sst_auth_real = NULL;
-my_bool wsrep_sst_donor_rejects_queries = FALSE;
+static const char* sst_auth_real = NULL;
+my_bool wsrep_sst_donor_rejects_queries= FALSE;
bool wsrep_sst_method_check (sys_var *self, THD* thd, set_var* var)
{
@@ -60,7 +60,7 @@ bool wsrep_sst_method_check (sys_var *self, THD* thd, set_var* var)
return 0;
}
-static const char* data_home_dir = NULL;
+static const char* data_home_dir;
void wsrep_set_data_home_dir(const char *data_dir)
{
@@ -134,7 +134,7 @@ static bool sst_auth_real_set (const char* value)
{
// set sst_auth_real
if (sst_auth_real) { my_free((void *) sst_auth_real); }
- sst_auth_real = v;
+ sst_auth_real= v;
// mask wsrep_sst_auth
if (strlen(sst_auth_real))
@@ -175,6 +175,7 @@ bool wsrep_sst_donor_update (sys_var *self, THD* thd, enum_var_type type)
return 0;
}
+
bool wsrep_before_SE()
{
return (wsrep_provider != NULL
@@ -183,111 +184,29 @@ bool wsrep_before_SE()
&& strcmp (wsrep_sst_method, WSREP_SST_MYSQLDUMP));
}
-static bool sst_complete = false;
-static bool sst_needed = false;
-
-#define WSREP_EXTEND_TIMEOUT_INTERVAL 30
-#define WSREP_TIMEDWAIT_SECONDS 10
-
-void wsrep_sst_grab ()
-{
- WSREP_INFO("wsrep_sst_grab()");
- if (mysql_mutex_lock (&LOCK_wsrep_sst)) abort();
- sst_complete = false;
- mysql_mutex_unlock (&LOCK_wsrep_sst);
-}
-
-// Wait for end of SST
-bool wsrep_sst_wait ()
-{
- double total_wtime = 0;
-
- if (mysql_mutex_lock (&LOCK_wsrep_sst))
- abort();
-
- WSREP_INFO("Waiting for SST to complete.");
-
- while (!sst_complete)
- {
- struct timespec wtime;
- set_timespec(wtime, WSREP_TIMEDWAIT_SECONDS);
- time_t start_time = time(NULL);
- mysql_cond_timedwait (&COND_wsrep_sst, &LOCK_wsrep_sst, &wtime);
- time_t end_time = time(NULL);
-
- if (!sst_complete)
- {
- total_wtime += difftime(end_time, start_time);
- WSREP_DEBUG("Waiting for SST to complete. current seqno: %" PRId64 " waited %f secs.", local_seqno, total_wtime);
- service_manager_extend_timeout(WSREP_EXTEND_TIMEOUT_INTERVAL,
- "WSREP state transfer ongoing, current seqno: %" PRId64 " waited %f secs", local_seqno, total_wtime);
- }
- }
-
- if (local_seqno >= 0)
- {
- WSREP_INFO("SST complete, seqno: %lld", (long long) local_seqno);
- }
- else
- {
- WSREP_ERROR("SST failed: %d (%s)",
- int(-local_seqno), strerror(-local_seqno));
- }
-
- mysql_mutex_unlock (&LOCK_wsrep_sst);
-
- return (local_seqno >= 0);
-}
-
// Signal end of SST
-void wsrep_sst_complete (const wsrep_uuid_t* sst_uuid,
- wsrep_seqno_t sst_seqno,
- bool needed)
+static void wsrep_sst_complete (THD* thd,
+ int const rcode)
{
- if (mysql_mutex_lock (&LOCK_wsrep_sst)) abort();
- if (!sst_complete)
- {
- sst_complete = true;
- sst_needed = needed;
- local_uuid = *sst_uuid;
- local_seqno = sst_seqno;
- mysql_cond_signal (&COND_wsrep_sst);
- }
- else
- {
- /* This can happen when called from wsrep_synced_cb().
- At the moment there is no way to check there
- if main thread is still waiting for signal,
- so wsrep_sst_complete() is called from there
- each time wsrep_ready changes from FALSE -> TRUE.
- */
- WSREP_DEBUG("Nobody is waiting for SST.");
- }
- mysql_mutex_unlock (&LOCK_wsrep_sst);
+ Wsrep_client_service client_service(thd, thd->wsrep_cs());
+ Wsrep_server_state::instance().sst_received(client_service, rcode);
}
-/*
+ /*
If wsrep provider is loaded, inform that the new state snapshot
has been received. Also update the local checkpoint.
- @param wsrep [IN] wsrep handle
+ @param thd [IN]
@param uuid [IN] Initial state UUID
@param seqno [IN] Initial state sequence number
@param state [IN] Always NULL, also ignored by wsrep provider (?)
@param state_len [IN] Always 0, also ignored by wsrep provider (?)
- @param implicit [IN] Whether invoked implicitly due to SST
- (true) or explicitly because if change
- in wsrep_start_position by user (false).
- @return false Success
- true Error
-
*/
-bool wsrep_sst_received (wsrep_t* const wsrep,
- const wsrep_uuid_t& uuid,
- const wsrep_seqno_t seqno,
- const void* const state,
- const size_t state_len,
- const bool implicit)
+void wsrep_sst_received (THD* thd,
+ const wsrep_uuid_t& uuid,
+ wsrep_seqno_t const seqno,
+ const void* const state,
+ size_t const state_len)
{
/*
To keep track of whether the local uuid:seqno should be updated. Also, note
@@ -295,81 +214,40 @@ bool wsrep_sst_received (wsrep_t* const wsrep,
OK from wsrep provider. By doing so, the values remain consistent across
the server & wsrep provider.
*/
- bool do_update= false;
-
- // Get the locally stored uuid:seqno.
- if (wsrep_get_SE_checkpoint(local_uuid, local_seqno))
- {
- return true;
- }
-
- if (memcmp(&local_uuid, &uuid, sizeof(wsrep_uuid_t)) ||
- local_seqno < seqno || seqno < 0)
- {
- do_update= true;
- }
- else if (local_seqno > seqno)
- {
- WSREP_WARN("SST position can't be set in past. Requested: %lld, Current: "
- " %lld.", (long long)seqno, (long long)local_seqno);
/*
- If we are here because of SET command, simply return true (error) instead of
- aborting.
+ TODO: Handle backwards compatibility. WSREP API v25 does not have
+ wsrep schema.
*/
- if (implicit)
- {
- WSREP_WARN("Can't continue.");
- unireg_abort(1);
- }
- else
- {
- return true;
+ /*
+ Logical SST methods (mysqldump etc) don't update InnoDB sys header.
+ Reset the SE checkpoint before recovering view in order to avoid
+ sanity check failure.
+ */
+ wsrep::gtid const sst_gtid(wsrep::id(uuid.data, sizeof(uuid.data)),
+ wsrep::seqno(seqno));
+
+ if (!wsrep_before_SE()) {
+ wsrep_set_SE_checkpoint(wsrep::gtid::undefined());
+ wsrep_set_SE_checkpoint(sst_gtid);
}
- }
-
-#ifdef GTID_SUPPORT
- wsrep_init_sidno(uuid);
-#endif /* GTID_SUPPORT */
-
- if (wsrep)
- {
- int const rcode(seqno < 0 ? seqno : 0);
- wsrep_gtid_t const state_id= {uuid,
- (rcode ? WSREP_SEQNO_UNDEFINED : seqno)};
-
- wsrep_status_t ret= wsrep->sst_received(wsrep, &state_id, state,
- state_len, rcode);
+ wsrep_verify_SE_checkpoint(uuid, seqno);
- if (ret != WSREP_OK)
- {
- return true;
+ /*
+ Both wsrep_init_SR() and wsrep_recover_view() may use
+ wsrep thread pool. Restore original thd context before returning.
+ */
+ if (thd) {
+ thd->store_globals();
+ }
+ else {
+ my_pthread_setspecific_ptr(THR_THD, NULL);
}
- }
- // Now is the good time to update the local state and checkpoint.
- if (do_update)
- {
- if (wsrep_set_SE_checkpoint(uuid, seqno))
+ if (WSREP_ON)
{
- return true;
+ int const rcode(seqno < 0 ? seqno : 0);
+ wsrep_sst_complete(thd,rcode);
}
-
- local_uuid= uuid;
- local_seqno= seqno;
- }
-
- return false;
-}
-
-// Let applier threads to continue
-bool wsrep_sst_continue ()
-{
- if (sst_needed)
- {
- WSREP_INFO("Signalling provider to continue.");
- return wsrep_sst_received (wsrep, local_uuid, local_seqno, NULL, 0, true);
- }
- return false;
}
struct sst_thread_arg
@@ -399,11 +277,11 @@ struct sst_thread_arg
static int sst_scan_uuid_seqno (const char* str,
wsrep_uuid_t* uuid, wsrep_seqno_t* seqno)
{
- int offt = wsrep_uuid_scan (str, strlen(str), uuid);
+ int offt= wsrep_uuid_scan (str, strlen(str), uuid);
errno= 0; /* Reset the errno */
if (offt > 0 && strlen(str) > (unsigned int)offt && ':' == str[offt])
{
- *seqno = strtoll (str + offt + 1, NULL, 10);
+ *seqno= strtoll (str + offt + 1, NULL, 10);
if (*seqno != LLONG_MAX || errno != ERANGE)
{
return 0;
@@ -411,7 +289,7 @@ static int sst_scan_uuid_seqno (const char* str,
}
WSREP_ERROR("Failed to parse uuid:seqno pair: '%s'", str);
- return EINVAL;
+ return -EINVAL;
}
// get rid of trailing \n
@@ -421,8 +299,8 @@ static char* my_fgets (char* buf, size_t buf_len, FILE* stream)
if (ret)
{
- size_t len = strlen(ret);
- if (len > 0 && ret[len - 1] == '\n') ret[len - 1] = '\0';
+ size_t len= strlen(ret);
+ if (len > 0 && ret[len - 1] == '\n') ret[len - 1]= '\0';
}
return ret;
@@ -459,7 +337,8 @@ static int generate_binlog_index_opt_val(char** ret)
{
DBUG_ASSERT(ret);
*ret= NULL;
- if (opt_binlog_index_name) {
+ if (opt_binlog_index_name)
+ {
*ret= strcmp(opt_binlog_index_name, "0") ?
my_strdup(opt_binlog_index_name, MYF(0)) : my_strdup("", MYF(0));
}
@@ -477,9 +356,10 @@ static void* sst_joiner_thread (void* a)
int err= 1;
{
- const char magic[] = "ready";
- const size_t magic_len = sizeof(magic) - 1;
- const size_t out_len = 512;
+ THD* thd;
+ const char magic[]= "ready";
+ const size_t magic_len= sizeof(magic) - 1;
+ const size_t out_len= 512;
char out[out_len];
WSREP_INFO("Running: '%s'", arg->cmd);
@@ -496,29 +376,31 @@ static void* sst_joiner_thread (void* a)
WSREP_ERROR("Failed to read '%s <addr>' from: %s\n\tRead: '%s'",
magic, arg->cmd, tmp);
proc.wait();
- if (proc.error()) err = proc.error();
+ if (proc.error()) err= proc.error();
}
else
{
- err = 0;
+ err= 0;
}
}
else
{
- err = proc.error();
+ err= proc.error();
WSREP_ERROR("Failed to execute: %s : %d (%s)",
arg->cmd, err, strerror(err));
}
- // signal sst_prepare thread with ret code,
- // it will go on sending SST request
+ /*
+ signal sst_prepare thread with ret code,
+ it will go on sending SST request
+ */
mysql_mutex_lock (&arg->lock);
if (!err)
{
- arg->ret_str = strdup (out + magic_len + 1);
- if (!arg->ret_str) err = ENOMEM;
+ arg->ret_str= strdup (out + magic_len + 1);
+ if (!arg->ret_str) err= ENOMEM;
}
- arg->err = -err;
+ arg->err= -err;
mysql_cond_signal (&arg->cond);
mysql_mutex_unlock (&arg->lock); //! @note arg is unusable after that.
@@ -526,11 +408,11 @@ static void* sst_joiner_thread (void* a)
* initializer thread to ensure single thread of
* shutdown. */
- wsrep_uuid_t ret_uuid = WSREP_UUID_UNDEFINED;
- wsrep_seqno_t ret_seqno = WSREP_SEQNO_UNDEFINED;
+ wsrep_uuid_t ret_uuid = WSREP_UUID_UNDEFINED;
+ wsrep_seqno_t ret_seqno= WSREP_SEQNO_UNDEFINED;
// in case of successfull receiver start, wait for SST completion/end
- char* tmp = my_fgets (out, out_len, proc.pipe());
+ char* tmp= my_fgets (out, out_len, proc.pipe());
proc.wait();
err= EINVAL;
@@ -539,7 +421,7 @@ static void* sst_joiner_thread (void* a)
{
WSREP_ERROR("Failed to read uuid:seqno and wsrep_gtid_domain_id from "
"joiner script.");
- if (proc.error()) err = proc.error();
+ if (proc.error()) err= proc.error();
}
else
{
@@ -547,7 +429,14 @@ static void* sst_joiner_thread (void* a)
const char *pos= strchr(out, ' ');
if (!pos) {
- // There is no wsrep_gtid_domain_id (some older version SST script?).
+
+ if (wsrep_gtid_mode)
+ {
+ // There is no wsrep_gtid_domain_id (some older version SST script?).
+ WSREP_WARN("Did not find domain ID from SST script output '%s'. "
+ "Domain ID must be set manually to keep binlog consistent",
+ out);
+ }
err= sst_scan_uuid_seqno (out, &ret_uuid, &ret_seqno);
} else {
@@ -583,14 +472,59 @@ static void* sst_joiner_thread (void* a)
err:
+ wsrep::gtid ret_gtid;
+
if (err)
{
- ret_uuid= WSREP_UUID_UNDEFINED;
- ret_seqno= -err;
+ ret_gtid= wsrep::gtid::undefined();
+ }
+ else
+ {
+ ret_gtid= wsrep::gtid(wsrep::id(ret_uuid.data, sizeof(ret_uuid.data)),
+ wsrep::seqno(ret_seqno));
+ }
+
+ /*
+ Tell initializer thread that SST is complete
+ For that initialize a THD
+ */
+ if (my_thread_init())
+ {
+ WSREP_ERROR("my_thread_init() failed, can't signal end of SST. "
+ "Aborting.");
+ unireg_abort(1);
+ }
+
+ thd= new THD(next_thread_id());
+
+ if (!thd)
+ {
+ WSREP_ERROR("Failed to allocate THD to restore view from local state, "
+ "can't signal end of SST. Aborting.");
+ unireg_abort(1);
}
- // Tell initializer thread that SST is complete
- wsrep_sst_complete (&ret_uuid, ret_seqno, true);
+ thd->thread_stack= (char*) &thd;
+ thd->security_ctx->skip_grants();
+ thd->system_thread= SYSTEM_THREAD_GENERIC;
+ thd->real_id= pthread_self();
+
+ thd->store_globals();
+
+ /* */
+ thd->variables.wsrep_on = 0;
+ /* No binlogging */
+ thd->variables.sql_log_bin = 0;
+ thd->variables.option_bits &= ~OPTION_BIN_LOG;
+ /* No general log */
+ thd->variables.option_bits |= OPTION_LOG_OFF;
+ /* Read committed isolation to avoid gap locking */
+ thd->variables.tx_isolation= ISO_READ_COMMITTED;
+
+ wsrep_sst_complete (thd, -err);
+
+ delete thd;
+ my_thread_end();
}
return NULL;
@@ -689,7 +623,7 @@ static ssize_t sst_prepare_other (const char* method,
" %s "
WSREP_SST_OPT_PARENT " '%d'"
" %s '%s'"
- " %s '%s'",
+ " %s '%s'",
method, addr_in, mysql_real_data_home,
wsrep_defaults_file,
(int)getpid(), binlog_opt, binlog_opt_val,
@@ -729,7 +663,7 @@ static ssize_t sst_prepare_other (const char* method,
pthread_t tmp;
sst_thread_arg arg(cmd_str(), env());
mysql_mutex_lock (&arg.lock);
- ret = pthread_create (&tmp, NULL, sst_joiner_thread, &arg);
+ ret= pthread_create (&tmp, NULL, sst_joiner_thread, &arg);
if (ret)
{
WSREP_ERROR("sst_prepare_other(): pthread_create() failed: %d (%s)",
@@ -741,11 +675,11 @@ static ssize_t sst_prepare_other (const char* method,
*addr_out= arg.ret_str;
if (!arg.err)
- ret = strlen(*addr_out);
+ ret= strlen(*addr_out);
else
{
assert (arg.err < 0);
- ret = arg.err;
+ ret= arg.err;
}
pthread_detach (tmp);
@@ -759,12 +693,12 @@ extern uint mysqld_port;
static ssize_t sst_prepare_mysqldump (const char* addr_in,
const char** addr_out)
{
- ssize_t ret = strlen (addr_in);
+ ssize_t ret= strlen (addr_in);
if (!strrchr(addr_in, ':'))
{
- ssize_t s = ret + 7;
- char* tmp = (char*) malloc (s);
+ ssize_t s= ret + 7;
+ char* tmp= (char*) malloc (s);
if (tmp)
{
@@ -775,7 +709,7 @@ static ssize_t sst_prepare_mysqldump (const char* addr_in,
*addr_out= tmp;
return ret;
}
- if (ret > 0) /* buffer too short */ ret = -EMSGSIZE;
+ if (ret > 0) /* buffer too short */ ret= -EMSGSIZE;
free (tmp);
}
else {
@@ -792,32 +726,22 @@ static ssize_t sst_prepare_mysqldump (const char* addr_in,
return ret;
}
-static bool SE_initialized = false;
-
-ssize_t wsrep_sst_prepare (void** msg)
+std::string wsrep_sst_prepare()
{
+ const ssize_t ip_max= 256;
+ char ip_buf[ip_max];
const char* addr_in= NULL;
const char* addr_out= NULL;
const char* method;
if (!strcmp(wsrep_sst_method, WSREP_SST_SKIP))
{
- ssize_t ret = strlen(WSREP_STATE_TRANSFER_TRIVIAL) + 1;
- *msg = strdup(WSREP_STATE_TRANSFER_TRIVIAL);
- if (!msg)
- {
- WSREP_ERROR("Could not allocate %zd bytes for state request", ret);
- unireg_abort(1);
- }
- return ret;
+ return WSREP_STATE_TRANSFER_TRIVIAL;
}
/*
Figure out SST receive address. Common for all SST methods.
*/
- char ip_buf[256];
- const ssize_t ip_max= sizeof(ip_buf);
-
// Attempt 1: wsrep_sst_receive_address
if (wsrep_sst_receive_address &&
strcmp (wsrep_sst_receive_address, WSREP_SST_ADDRESS_AUTO))
@@ -834,7 +758,7 @@ ssize_t wsrep_sst_prepare (void** msg)
{
WSREP_ERROR("Could not parse wsrep_node_address : %s",
wsrep_node_address);
- unireg_abort(1);
+ throw wsrep::runtime_error("Failed to prepare for SST. Unrecoverable");
}
memcpy(ip_buf, addr.get_address(), addr.get_address_len());
addr_in= ip_buf;
@@ -852,7 +776,7 @@ ssize_t wsrep_sst_prepare (void** msg)
{
WSREP_ERROR("Failed to guess address to accept state transfer. "
"wsrep_sst_receive_address must be set manually.");
- unireg_abort(1);
+ throw wsrep::runtime_error("Could not prepare state transfer request");
}
}
@@ -861,12 +785,16 @@ ssize_t wsrep_sst_prepare (void** msg)
if (!strcmp(method, WSREP_SST_MYSQLDUMP))
{
addr_len= sst_prepare_mysqldump (addr_in, &addr_out);
- if (addr_len < 0) unireg_abort(1);
+ if (addr_len < 0)
+ {
+ throw wsrep::runtime_error("Could not prepare mysqldimp address");
+ }
}
else
{
/*! A heuristic workaround until we learn how to stop and start engines */
- if (SE_initialized)
+ if (Wsrep_server_state::instance().is_initialized() &&
+ Wsrep_server_state::instance().state() == Wsrep_server_state::s_joiner)
{
if (!strcmp(method, WSREP_SST_XTRABACKUP) ||
!strcmp(method, WSREP_SST_XTRABACKUPV2))
@@ -885,8 +813,7 @@ ssize_t wsrep_sst_prepare (void** msg)
"if other means of state transfer are unavailable. "
"In that case you will need to restart the server.",
method);
- *msg = 0;
- return 0;
+ return "";
}
addr_len = sst_prepare_other (method, sst_auth_real,
@@ -895,37 +822,28 @@ ssize_t wsrep_sst_prepare (void** msg)
{
WSREP_ERROR("Failed to prepare for '%s' SST. Unrecoverable.",
method);
- unireg_abort(1);
+ throw wsrep::runtime_error("Failed to prepare for SST. Unrecoverable");
}
}
- size_t const method_len(strlen(method));
- size_t const msg_len (method_len + addr_len + 2 /* + auth_len + 1*/);
-
- *msg = malloc (msg_len);
- if (NULL != *msg) {
- char* const method_ptr(reinterpret_cast<char*>(*msg));
- strcpy (method_ptr, method);
- char* const addr_ptr(method_ptr + method_len + 1);
- strcpy (addr_ptr, addr_out);
+ std::string ret;
+ ret += method;
+ ret.push_back('\0');
+ ret += addr_out;
- WSREP_INFO ("Prepared SST request: %s|%s", method_ptr, addr_ptr);
- }
- else {
- WSREP_ERROR("Failed to allocate SST request of size %zu. Can't continue.",
- msg_len);
- unireg_abort(1);
- }
+ const char* method_ptr(ret.data());
+ const char* addr_ptr(ret.data() + strlen(method_ptr) + 1);
+ WSREP_INFO ("Prepared SST request: %s|%s", method_ptr, addr_ptr);
if (addr_out != addr_in) /* malloc'ed */ free ((char*)addr_out);
- return msg_len;
+ return ret;
}
// helper method for donors
static int sst_run_shell (const char* cmd_str, char** env, int max_tries)
{
- int ret = 0;
+ int ret= 0;
for (int tries=1; tries <= max_tries; tries++)
{
@@ -936,7 +854,7 @@ static int sst_run_shell (const char* cmd_str, char** env, int max_tries)
proc.wait();
}
- if ((ret = proc.error()))
+ if ((ret= proc.error()))
{
WSREP_ERROR("Try %d/%d: '%s' failed: %d (%s)",
tries, max_tries, proc.cmd(), ret, strerror(ret));
@@ -954,15 +872,12 @@ static int sst_run_shell (const char* cmd_str, char** env, int max_tries)
static void sst_reject_queries(my_bool close_conn)
{
- wsrep_ready_set (FALSE); // this will be resotred when donor becomes synced
- WSREP_INFO("Rejecting client queries for the duration of SST.");
- if (TRUE == close_conn) wsrep_close_client_connections(FALSE);
+ WSREP_INFO("Rejecting client queries for the duration of SST.");
+ if (TRUE == close_conn) wsrep_close_client_connections(FALSE);
}
static int sst_donate_mysqldump (const char* addr,
- const wsrep_uuid_t* uuid,
- const char* uuid_str,
- wsrep_seqno_t seqno,
+ const wsrep::gtid& gtid,
bool bypass,
char** env) // carries auth info
{
@@ -985,23 +900,31 @@ static int sst_donate_mysqldump (const char* addr,
return -ENOMEM;
}
+ /*
+ we enable new client connections so that mysqldump donation can connect in,
+ but we reject local connections from modifyingcdata during SST, to keep
+ data intact
+ */
if (!bypass && wsrep_sst_donor_rejects_queries) sst_reject_queries(TRUE);
make_wsrep_defaults_file();
+ std::ostringstream uuid_oss;
+ uuid_oss << gtid.id();
int ret= snprintf (cmd_str(), cmd_len,
"wsrep_sst_mysqldump "
WSREP_SST_OPT_ADDR " '%s' "
- WSREP_SST_OPT_PORT " '%d' "
+ WSREP_SST_OPT_PORT " '%u' "
WSREP_SST_OPT_LPORT " '%u' "
WSREP_SST_OPT_SOCKET " '%s' "
" %s "
WSREP_SST_OPT_GTID " '%s:%lld' "
WSREP_SST_OPT_GTID_DOMAIN_ID " '%d'"
"%s",
- addr, port, mysqld_port, mysqld_unix_port,
- wsrep_defaults_file, uuid_str,
- (long long)seqno, wsrep_gtid_domain_id,
+ addr, port, mysqld_port, mysqld_unix_port,
+ wsrep_defaults_file,
+ uuid_oss.str().c_str(), gtid.seqno().get(),
+ wsrep_gtid_domain_id,
bypass ? " " WSREP_SST_OPT_BYPASS : "");
if (ret < 0 || ret >= cmd_len)
@@ -1014,16 +937,17 @@ static int sst_donate_mysqldump (const char* addr,
ret= sst_run_shell (cmd_str(), env, 3);
- wsrep_gtid_t const state_id = { *uuid, (ret ? WSREP_SEQNO_UNDEFINED : seqno)};
-
- wsrep->sst_sent (wsrep, &state_id, ret);
+ wsrep::gtid sst_sent_gtid(ret == 0 ?
+ gtid :
+ wsrep::gtid(gtid.id(),
+ wsrep::seqno::undefined()));
+ Wsrep_server_state::instance().sst_sent(sst_sent_gtid, ret);
return ret;
}
wsrep_seqno_t wsrep_locked_seqno= WSREP_SEQNO_UNDEFINED;
-
/*
Create a file under data directory.
*/
@@ -1072,7 +996,6 @@ static int sst_create_file(const char *name, const char *content)
return err;
}
-
static int run_sql_command(THD *thd, const char *query)
{
thd->set_query((char *)query, strlen(query));
@@ -1118,9 +1041,9 @@ static int sst_flush_tables(THD* thd)
{
/* Do not use non-supported parser character sets */
WSREP_WARN("Current client character set is non-supported parser character set: %s", current_charset->csname);
- thd->variables.character_set_client = &my_charset_latin1;
+ thd->variables.character_set_client= &my_charset_latin1;
WSREP_WARN("For SST temporally setting character set to : %s",
- my_charset_latin1.csname);
+ my_charset_latin1.csname);
}
if (run_sql_command(thd, "FLUSH TABLES WITH READ LOCK"))
@@ -1141,7 +1064,7 @@ static int sst_flush_tables(THD* thd)
}
}
- thd->variables.character_set_client = current_charset;
+ thd->variables.character_set_client= current_charset;
if (err)
{
@@ -1159,7 +1082,6 @@ static int sst_flush_tables(THD* thd)
else
{
WSREP_INFO("Tables flushed.");
-
/*
Tables have been flushed. Create a file with cluster state ID and
wsrep_gtid_domain_id.
@@ -1168,6 +1090,41 @@ static int sst_flush_tables(THD* thd)
snprintf(content, sizeof(content), "%s:%lld %d\n", wsrep_cluster_state_uuid,
(long long)wsrep_locked_seqno, wsrep_gtid_domain_id);
err= sst_create_file(flush_success, content);
+
+ const char base_name[]= "tables_flushed";
+ ssize_t const full_len= strlen(mysql_real_data_home) + strlen(base_name)+2;
+ char *real_name= (char*) malloc(full_len);
+ sprintf(real_name, "%s/%s", mysql_real_data_home, base_name);
+ char *tmp_name= (char*) malloc(full_len + 4);
+ sprintf(tmp_name, "%s.tmp", real_name);
+
+ FILE* file= fopen(tmp_name, "w+");
+ if (0 == file)
+ {
+ err= errno;
+ WSREP_ERROR("Failed to open '%s': %d (%s)", tmp_name, err,strerror(err));
+ }
+ else
+ {
+ Wsrep_server_state& server_state= Wsrep_server_state::instance();
+ std::ostringstream uuid_oss;
+
+ uuid_oss << server_state.current_view().state_id().id();
+
+ fprintf(file, "%s:%lld %u\n",
+ uuid_oss.str().c_str(), server_state.pause_seqno().get(),
+ wsrep_gtid_domain_id);
+ fsync(fileno(file));
+ fclose(file);
+ if (rename(tmp_name, real_name) == -1)
+ {
+ err= errno;
+ WSREP_ERROR("Failed to rename '%s' to '%s': %d (%s)",
+ tmp_name, real_name, err,strerror(err));
+ }
+ }
+ free(real_name);
+ free(tmp_name);
}
return err;
@@ -1176,19 +1133,19 @@ static int sst_flush_tables(THD* thd)
static void sst_disallow_writes (THD* thd, bool yes)
{
- char query_str[64] = { 0, };
- ssize_t const query_max = sizeof(query_str) - 1;
+ char query_str[64]= { 0, };
+ ssize_t const query_max= sizeof(query_str) - 1;
CHARSET_INFO *current_charset;
- current_charset = thd->variables.character_set_client;
+ current_charset= thd->variables.character_set_client;
if (!is_supported_parser_charset(current_charset))
{
/* Do not use non-supported parser character sets */
WSREP_WARN("Current client character set is non-supported parser character set: %s", current_charset->csname);
- thd->variables.character_set_client = &my_charset_latin1;
+ thd->variables.character_set_client= &my_charset_latin1;
WSREP_WARN("For SST temporally setting character set to : %s",
- my_charset_latin1.csname);
+ my_charset_latin1.csname);
}
snprintf (query_str, query_max, "SET GLOBAL innodb_disallow_writes=%d",
@@ -1198,7 +1155,7 @@ static void sst_disallow_writes (THD* thd, bool yes)
{
WSREP_ERROR("Failed to disallow InnoDB writes");
}
- thd->variables.character_set_client = current_charset;
+ thd->variables.character_set_client= current_charset;
}
static void* sst_donor_thread (void* a)
@@ -1221,11 +1178,11 @@ static void* sst_donor_thread (void* a)
// operate with wsrep_ready == OFF
wsp::process proc(arg->cmd, "r", arg->env);
- err= proc.error();
+ err= -proc.error();
/* Inform server about SST script startup and release TO isolation */
mysql_mutex_lock (&arg->lock);
- arg->err = -err;
+ arg->err= -err;
mysql_cond_signal (&arg->cond);
mysql_mutex_unlock (&arg->lock); //! @note arg is unusable after that.
@@ -1270,7 +1227,7 @@ wait_signal:
mysql_mutex_unlock(mysql_bin_log.get_log_lock());
}
sst_disallow_writes (thd.ptr, false);
- thd.ptr->global_read_lock.unlock_global_read_lock (thd.ptr);
+ thd.ptr->global_read_lock.unlock_global_read_lock(thd.ptr);
locked= false;
}
err= 0;
@@ -1284,6 +1241,7 @@ wait_signal:
else
{
WSREP_WARN("Received unknown signal: '%s'", out);
+ proc.wait();
}
}
else
@@ -1291,7 +1249,7 @@ wait_signal:
WSREP_ERROR("Failed to read from: %s", proc.cmd());
proc.wait();
}
- if (!err && proc.error()) err= proc.error();
+ if (!err && proc.error()) err= -proc.error();
}
else
{
@@ -1307,27 +1265,23 @@ wait_signal:
mysql_mutex_unlock(mysql_bin_log.get_log_lock());
}
sst_disallow_writes (thd.ptr, false);
- thd.ptr->global_read_lock.unlock_global_read_lock (thd.ptr);
+ thd.ptr->global_read_lock.unlock_global_read_lock(thd.ptr);
}
- // signal to donor that SST is over
- struct wsrep_gtid const state_id = {
- ret_uuid, err ? WSREP_SEQNO_UNDEFINED : ret_seqno
- };
- wsrep->sst_sent (wsrep, &state_id, -err);
+ wsrep::gtid gtid(wsrep::id(ret_uuid.data, sizeof(ret_uuid.data)),
+ wsrep::seqno(err ? wsrep::seqno::undefined() :
+ wsrep::seqno(ret_seqno)));
+ Wsrep_server_state::instance().sst_sent(gtid, err);
proc.wait();
return NULL;
}
-
-
-static int sst_donate_other (const char* method,
- const char* addr,
- const char* uuid,
- wsrep_seqno_t seqno,
- bool bypass,
- char** env) // carries auth info
+static int sst_donate_other (const char* method,
+ const char* addr,
+ const wsrep::gtid& gtid,
+ bool bypass,
+ char** env) // carries auth info
{
int const cmd_len= 4096;
wsp::string cmd_str(cmd_len);
@@ -1340,7 +1294,9 @@ static int sst_donate_other (const char* method,
}
const char* binlog_opt= "";
+ const char* binlog_index_opt= "";
char* binlog_opt_val= NULL;
+ char* binlog_index_opt_val= NULL;
int ret;
if ((ret= generate_binlog_opt_val(&binlog_opt_val)))
@@ -1348,10 +1304,20 @@ static int sst_donate_other (const char* method,
WSREP_ERROR("sst_donate_other(): generate_binlog_opt_val() failed: %d",ret);
return ret;
}
+
+ if ((ret= generate_binlog_index_opt_val(&binlog_index_opt_val)))
+ {
+ WSREP_ERROR("sst_prepare_other(): generate_binlog_index_opt_val() failed %d",
+ ret);
+ }
+
if (strlen(binlog_opt_val)) binlog_opt= WSREP_SST_OPT_BINLOG;
+ if (strlen(binlog_index_opt_val)) binlog_index_opt= WSREP_SST_OPT_BINLOG_INDEX;
make_wsrep_defaults_file();
+ std::ostringstream uuid_oss;
+ uuid_oss << gtid.id();
ret= snprintf (cmd_str(), cmd_len,
"wsrep_sst_%s "
WSREP_SST_OPT_ROLE " 'donor' "
@@ -1360,15 +1326,18 @@ static int sst_donate_other (const char* method,
WSREP_SST_OPT_DATA " '%s' "
" %s "
" %s '%s' "
+ " %s '%s' "
WSREP_SST_OPT_GTID " '%s:%lld' "
WSREP_SST_OPT_GTID_DOMAIN_ID " '%d'"
"%s",
method, addr, mysqld_unix_port, mysql_real_data_home,
wsrep_defaults_file,
binlog_opt, binlog_opt_val,
- uuid, (long long) seqno, wsrep_gtid_domain_id,
+ binlog_index_opt, binlog_index_opt_val,
+ uuid_oss.str().c_str(), gtid.seqno().get(), wsrep_gtid_domain_id,
bypass ? " " WSREP_SST_OPT_BYPASS : "");
my_free(binlog_opt_val);
+ my_free(binlog_index_opt_val);
if (ret < 0 || ret >= cmd_len)
{
@@ -1381,7 +1350,7 @@ static int sst_donate_other (const char* method,
pthread_t tmp;
sst_thread_arg arg(cmd_str(), env);
mysql_mutex_lock (&arg.lock);
- ret = pthread_create (&tmp, NULL, sst_donor_thread, &arg);
+ ret= pthread_create (&tmp, NULL, sst_donor_thread, &arg);
if (ret)
{
WSREP_ERROR("sst_donate_other(): pthread_create() failed: %d (%s)",
@@ -1394,23 +1363,18 @@ static int sst_donate_other (const char* method,
return arg.err;
}
-wsrep_cb_status_t wsrep_sst_donate_cb (void* app_ctx, void* recv_ctx,
- const void* msg, size_t msg_len,
- const wsrep_gtid_t* current_gtid,
- const char* state, size_t state_len,
- bool bypass)
+int wsrep_sst_donate(const std::string& msg,
+ const wsrep::gtid& current_gtid,
+ const bool bypass)
{
/* This will be reset when sync callback is called.
* Should we set wsrep_ready to FALSE here too? */
- wsrep_config_state->set(WSREP_MEMBER_DONOR);
+ wsrep_config_state->set(wsrep::server_state::s_donor);
- const char* method = (char*)msg;
- size_t method_len = strlen (method);
- const char* data = method + method_len + 1;
-
- char uuid_str[37];
- wsrep_uuid_print (&current_gtid->uuid, uuid_str, sizeof(uuid_str));
+ const char* method= msg.data();
+ size_t method_len= strlen (method);
+ const char* data= method + method_len + 1;
wsp::env env(NULL);
if (env.error())
@@ -1438,54 +1402,12 @@ wsrep_cb_status_t wsrep_sst_donate_cb (void* app_ctx, void* recv_ctx,
if (!strcmp (WSREP_SST_MYSQLDUMP, method))
{
- ret = sst_donate_mysqldump(data, &current_gtid->uuid, uuid_str,
- current_gtid->seqno, bypass, env());
+ ret= sst_donate_mysqldump(data, current_gtid, bypass, env());
}
else
{
- ret = sst_donate_other(method, data, uuid_str,
- current_gtid->seqno, bypass, env());
- }
-
- return (ret >= 0 ? WSREP_CB_SUCCESS : WSREP_CB_FAILURE);
-}
-
-void wsrep_SE_init_grab()
-{
- if (mysql_mutex_lock (&LOCK_wsrep_sst_init)) abort();
-}
-
-void wsrep_SE_init_wait()
-{
- double total_wtime=0;
-
- while (SE_initialized == false)
- {
- struct timespec wtime;
- set_timespec(wtime, WSREP_TIMEDWAIT_SECONDS);
- time_t start_time = time(NULL);
- mysql_cond_timedwait (&COND_wsrep_sst_init, &LOCK_wsrep_sst_init, &wtime);
- time_t end_time = time(NULL);
-
- if (!SE_initialized)
- {
- total_wtime += difftime(end_time, start_time);
- WSREP_DEBUG("Waiting for SST to complete. current seqno: %" PRId64 " waited %f secs.", local_seqno, total_wtime);
- service_manager_extend_timeout(WSREP_EXTEND_TIMEOUT_INTERVAL,
- "WSREP state transfer ongoing, current seqno: %" PRId64 " waited %f secs", local_seqno, total_wtime);
- }
+ ret= sst_donate_other(method, data, current_gtid, bypass, env());
}
- mysql_mutex_unlock (&LOCK_wsrep_sst_init);
-}
-
-void wsrep_SE_init_done()
-{
- mysql_cond_signal (&COND_wsrep_sst_init);
- mysql_mutex_unlock (&LOCK_wsrep_sst_init);
-}
-
-void wsrep_SE_initialized()
-{
- SE_initialized = true;
+ return (ret >= 0 ? 0 : 1);
}
diff --git a/sql/wsrep_sst.h b/sql/wsrep_sst.h
index 29724a00797..46059a7f436 100644
--- a/sql/wsrep_sst.h
+++ b/sql/wsrep_sst.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013 Codership Oy <info@codership.com>
+/* Copyright (C) 2013-2018 Codership Oy <info@codership.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -13,14 +13,14 @@
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
-#include <my_config.h>
-
#ifndef WSREP_SST_H
#define WSREP_SST_H
-#ifdef WITH_WSREP
+#include <my_config.h>
-#include <mysql.h> // my_bool
+#include "wsrep/gtid.hpp"
+#include <my_global.h>
+#include <string>
#define WSREP_SST_OPT_ROLE "--role"
#define WSREP_SST_OPT_ADDR "--address"
@@ -77,11 +77,29 @@ extern void wsrep_SE_init_wait(); /*! wait for SE init to complete */
extern void wsrep_SE_init_done(); /*! signal that SE init is complte */
extern void wsrep_SE_initialized(); /*! mark SE initialization complete */
+/**
+ Return a string containing the state transfer request string.
+ Note that the string may contain a '\0' in the middle.
+*/
+std::string wsrep_sst_prepare();
+
+/**
+ Donate a SST.
+
+ @param request SST request string received from the joiner. Note that
+ the string may contain a '\0' in the middle.
+ @param gtid Current position of the donor
+ @param bypass If true, full SST is not needed. Joiner needs to be
+ notified that it can continue starting from gtid.
+ */
+int wsrep_sst_donate(const std::string& request,
+ const wsrep::gtid& gtid,
+ bool bypass);
+
#else
#define wsrep_SE_initialized() do { } while(0)
#define wsrep_SE_init_grab() do { } while(0)
#define wsrep_SE_init_done() do { } while(0)
#define wsrep_sst_continue() (0)
-#endif /* WITH_WSREP */
#endif /* WSREP_SST_H */
diff --git a/sql/wsrep_storage_service.cc b/sql/wsrep_storage_service.cc
new file mode 100644
index 00000000000..e164114b733
--- /dev/null
+++ b/sql/wsrep_storage_service.cc
@@ -0,0 +1,213 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "my_global.h"
+#include "wsrep_storage_service.h"
+#include "wsrep_trans_observer.h" /* wsrep_open() */
+#include "wsrep_schema.h"
+#include "wsrep_binlog.h"
+
+#include "sql_class.h"
+#include "mysqld.h" /* next_query_id() */
+#include "slave.h" /* opt_log_slave_updates() */
+#include "transaction.h" /* trans_commit(), trans_rollback() */
+
+/*
+ Temporarily enable wsrep on thd
+ */
+class Wsrep_on
+{
+public:
+ Wsrep_on(THD* thd)
+ : m_thd(thd)
+ , m_wsrep_on(thd->variables.wsrep_on)
+ {
+ thd->variables.wsrep_on= TRUE;
+ }
+ ~Wsrep_on()
+ {
+ m_thd->variables.wsrep_on= m_wsrep_on;
+ }
+private:
+ THD* m_thd;
+ my_bool m_wsrep_on;
+};
+
+Wsrep_storage_service::Wsrep_storage_service(THD* thd)
+ : wsrep::storage_service()
+ , wsrep::high_priority_context(thd->wsrep_cs())
+ , m_thd(thd)
+{
+ thd->security_ctx->skip_grants();
+ thd->system_thread= SYSTEM_THREAD_SLAVE_SQL;
+
+ /* No binlogging */
+
+ /* No general log */
+ thd->variables.option_bits |= OPTION_LOG_OFF;
+
+ /* Read committed isolation to avoid gap locking */
+ thd->variables.tx_isolation = ISO_READ_COMMITTED;
+
+ /* Keep wsrep on to enter commit ordering hooks */
+ thd->variables.wsrep_on= 1;
+ thd->wsrep_skip_locking= true;
+
+ wsrep_open(thd);
+ wsrep_before_command(thd);
+}
+
+Wsrep_storage_service::~Wsrep_storage_service()
+{
+ wsrep_after_command_ignore_result(m_thd);
+ wsrep_close(m_thd);
+ m_thd->wsrep_skip_locking= false;
+}
+
+int Wsrep_storage_service::start_transaction(const wsrep::ws_handle& ws_handle)
+{
+ DBUG_ENTER("Wsrep_storage_service::start_transaction");
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_PRINT("info", ("Wsrep_storage_service::start_transcation(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ m_thd->set_wsrep_next_trx_id(ws_handle.transaction_id().get());
+ DBUG_RETURN(m_thd->wsrep_cs().start_transaction(
+ wsrep::transaction_id(m_thd->wsrep_next_trx_id())) ||
+ trans_begin(m_thd, MYSQL_START_TRANS_OPT_READ_WRITE));
+}
+
+void Wsrep_storage_service::adopt_transaction(const wsrep::transaction& transaction)
+{
+ DBUG_ENTER("Wsrep_Storage_server::adopt_transaction");
+ DBUG_ASSERT(m_thd == current_thd);
+ m_thd->wsrep_cs().adopt_transaction(transaction);
+ trans_begin(m_thd, MYSQL_START_TRANS_OPT_READ_WRITE);
+ DBUG_VOID_RETURN;
+}
+
+int Wsrep_storage_service::append_fragment(const wsrep::id& server_id,
+ wsrep::transaction_id transaction_id,
+ int flags,
+ const wsrep::const_buffer& data)
+{
+ DBUG_ENTER("Wsrep_storage_service::append_fragment");
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_PRINT("info", ("Wsrep_storage_service::append_fragment(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ int ret= wsrep_schema->append_fragment(m_thd,
+ server_id,
+ transaction_id,
+ wsrep::seqno(-1),
+ flags,
+ data);
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_storage_service::update_fragment_meta(const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_storage_service::update_fragment_meta");
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_PRINT("info", ("Wsrep_storage_service::update_fragment_meta(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ int ret= wsrep_schema->update_fragment_meta(m_thd, ws_meta);
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_storage_service::remove_fragments()
+{
+ DBUG_ENTER("Wsrep_storage_service::remove_fragments");
+ DBUG_ASSERT(m_thd == current_thd);
+
+ int ret= wsrep_schema->remove_fragments(m_thd,
+ m_thd->wsrep_trx().server_id(),
+ m_thd->wsrep_trx().id(),
+ m_thd->wsrep_sr().fragments());
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_storage_service::commit(const wsrep::ws_handle& ws_handle,
+ const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_storage_service::commit");
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_PRINT("info", ("Wsrep_storage_service::commit(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ WSREP_DEBUG("Storage service commit: %llu, %lld",
+ ws_meta.transaction_id().get(), ws_meta.seqno().get());
+ int ret= 0;
+ const bool is_ordered= !ws_meta.seqno().is_undefined();
+
+ if (is_ordered)
+ {
+ ret= m_thd->wsrep_cs().prepare_for_ordering(ws_handle, ws_meta, true);
+ }
+
+ ret= ret || trans_commit(m_thd);
+
+ if (!is_ordered)
+ {
+ /* Wsrep commit was not ordered so it does not go through commit time
+ hooks and remains active. Roll it back to make cleanup happen
+ in after_applying() call. */
+ m_thd->wsrep_cs().before_rollback();
+ m_thd->wsrep_cs().after_rollback();
+ }
+ else if (ret)
+ {
+ /* Commit failed, this probably means that the parent SR transaction
+ was BF aborted. Roll back out of order, the parent
+ transaction will release commit order after it has rolled back. */
+ m_thd->wsrep_cs().prepare_for_ordering(wsrep::ws_handle(),
+ wsrep::ws_meta(),
+ false);
+ trans_rollback(m_thd);
+ }
+ m_thd->wsrep_cs().after_applying();
+ m_thd->mdl_context.release_transactional_locks();
+ DBUG_RETURN(ret);
+}
+
+int Wsrep_storage_service::rollback(const wsrep::ws_handle& ws_handle,
+ const wsrep::ws_meta& ws_meta)
+{
+ DBUG_ENTER("Wsrep_storage_service::rollback");
+ DBUG_ASSERT(m_thd == current_thd);
+ DBUG_PRINT("info", ("Wsrep_storage_service::rollback(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ int ret= (m_thd->wsrep_cs().prepare_for_ordering(
+ ws_handle, ws_meta, false) ||
+ trans_rollback(m_thd));
+ m_thd->wsrep_cs().after_applying();
+ m_thd->mdl_context.release_transactional_locks();
+ DBUG_RETURN(ret);
+}
+
+void Wsrep_storage_service::store_globals()
+{
+ DBUG_ENTER("Wsrep_storage_service::store_globals");
+ DBUG_PRINT("info", ("Wsrep_storage_service::store_globals(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ m_thd->store_globals();
+ DBUG_VOID_RETURN;
+}
+
+void Wsrep_storage_service::reset_globals()
+{
+ DBUG_ENTER("Wsrep_storage_service::reset_globals");
+ DBUG_PRINT("info", ("Wsrep_storage_service::reset_globals(%llu, %p)",
+ m_thd->thread_id, m_thd));
+ m_thd->reset_globals();
+ DBUG_VOID_RETURN;
+}
diff --git a/sql/wsrep_storage_service.h b/sql/wsrep_storage_service.h
new file mode 100644
index 00000000000..6208300930f
--- /dev/null
+++ b/sql/wsrep_storage_service.h
@@ -0,0 +1,48 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#ifndef WSREP_STORAGE_SERVICE_H
+#define WSREP_STORAGE_SERVICE_H
+
+#include "wsrep/storage_service.hpp"
+#include "wsrep/client_state.hpp"
+
+class THD;
+class Wsrep_server_service;
+class Wsrep_storage_service :
+ public wsrep::storage_service,
+ public wsrep::high_priority_context
+{
+public:
+ Wsrep_storage_service(THD*);
+ ~Wsrep_storage_service();
+ int start_transaction(const wsrep::ws_handle&);
+ void adopt_transaction(const wsrep::transaction&);
+ int append_fragment(const wsrep::id&,
+ wsrep::transaction_id,
+ int flags,
+ const wsrep::const_buffer&);
+ int update_fragment_meta(const wsrep::ws_meta&);
+ int remove_fragments();
+ int commit(const wsrep::ws_handle&, const wsrep::ws_meta&);
+ int rollback(const wsrep::ws_handle&, const wsrep::ws_meta&);
+ void store_globals();
+ void reset_globals();
+private:
+ friend class Wsrep_server_service;
+ THD* m_thd;
+};
+
+#endif /* WSREP_STORAGE_SERVICE_H */
diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc
index df09610b66b..b849bc256cb 100644
--- a/sql/wsrep_thd.cc
+++ b/sql/wsrep_thd.cc
@@ -15,666 +15,386 @@
#include "mariadb.h"
#include "wsrep_thd.h"
+#include "wsrep_trans_observer.h"
+#include "wsrep_high_priority_service.h"
+#include "wsrep_storage_service.h"
#include "transaction.h"
#include "rpl_rli.h"
#include "log_event.h"
#include "sql_parse.h"
-//#include "global_threads.h" // LOCK_thread_count, etc.
#include "sql_base.h" // close_thread_tables()
#include "mysqld.h" // start_wsrep_THD();
-
-#include "slave.h" // opt_log_slave_updates
-#include "rpl_filter.h"
+#include "wsrep_applier.h" // start_wsrep_THD();
+#include "mysql/service_wsrep.h"
+#include "debug_sync.h"
+#include "slave.h"
#include "rpl_rli.h"
#include "rpl_mi.h"
-#if (__LP64__)
-static volatile int64 wsrep_bf_aborts_counter(0);
-#define WSREP_ATOMIC_LOAD_LONG my_atomic_load64
-#define WSREP_ATOMIC_ADD_LONG my_atomic_add64
-#else
-static volatile int32 wsrep_bf_aborts_counter(0);
-#define WSREP_ATOMIC_LOAD_LONG my_atomic_load32
-#define WSREP_ATOMIC_ADD_LONG my_atomic_add32
-#endif
+static Wsrep_thd_queue* wsrep_rollback_queue= 0;
+static Wsrep_thd_queue* wsrep_post_rollback_queue= 0;
+static Atomic_counter<uint64_t> wsrep_bf_aborts_counter;
+
int wsrep_show_bf_aborts (THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope)
{
- wsrep_local_bf_aborts = WSREP_ATOMIC_LOAD_LONG(&wsrep_bf_aborts_counter);
- var->type = SHOW_LONGLONG;
- var->value = (char*)&wsrep_local_bf_aborts;
+ wsrep_local_bf_aborts= wsrep_bf_aborts_counter;
+ var->type= SHOW_LONGLONG;
+ var->value= (char*)&wsrep_local_bf_aborts;
return 0;
}
-/* must have (&thd->LOCK_thd_data) */
-void wsrep_client_rollback(THD *thd)
-{
- WSREP_DEBUG("client rollback due to BF abort for (%lld), query: %s",
- (longlong) thd->thread_id, thd->query());
-
- WSREP_ATOMIC_ADD_LONG(&wsrep_bf_aborts_counter, 1);
-
- thd->wsrep_conflict_state= ABORTING;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- trans_rollback(thd);
-
- if (thd->locked_tables_mode && thd->lock)
- {
- WSREP_DEBUG("unlocking tables for BF abort (%lld)",
- (longlong) thd->thread_id);
- thd->locked_tables_list.unlock_locked_tables(thd);
- thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
- }
-
- if (thd->global_read_lock.is_acquired())
- {
- WSREP_DEBUG("unlocking GRL for BF abort (%lld)",
- (longlong) thd->thread_id);
- thd->global_read_lock.unlock_global_read_lock(thd);
- }
-
- /* Release transactional metadata locks. */
- thd->mdl_context.release_transactional_locks();
-
- /* release explicit MDL locks */
- thd->mdl_context.release_explicit_locks();
-
- if (thd->get_binlog_table_maps())
- {
- WSREP_DEBUG("clearing binlog table map for BF abort (%lld)",
- (longlong) thd->thread_id);
- thd->clear_binlog_table_maps();
- }
- mysql_mutex_lock(&thd->LOCK_thd_data);
- thd->wsrep_conflict_state= ABORTED;
-}
-
-#define NUMBER_OF_FIELDS_TO_IDENTIFY_COORDINATOR 1
-#define NUMBER_OF_FIELDS_TO_IDENTIFY_WORKER 2
-
-static rpl_group_info* wsrep_relay_group_init(const char* log_fname)
-{
- Relay_log_info* rli= new Relay_log_info(false);
-
- if (!rli->relay_log.description_event_for_exec)
- {
- rli->relay_log.description_event_for_exec=
- new Format_description_log_event(4);
- }
-
- static LEX_CSTRING connection_name= { STRING_WITH_LEN("wsrep") };
-
- /*
- Master_info's constructor initializes rpl_filter by either an already
- constructed Rpl_filter object from global 'rpl_filters' list if the
- specified connection name is same, or it constructs a new Rpl_filter
- object and adds it to rpl_filters. This object is later destructed by
- Mater_info's destructor by looking it up based on connection name in
- rpl_filters list.
-
- However, since all Master_info objects created here would share same
- connection name ("wsrep"), destruction of any of the existing Master_info
- objects (in wsrep_return_from_bf_mode()) would free rpl_filter referenced
- by any/all existing Master_info objects.
-
- In order to avoid that, we have added a check in Master_info's destructor
- to not free the "wsrep" rpl_filter. It will eventually be freed by
- free_all_rpl_filters() when server terminates.
- */
- rli->mi = new Master_info(&connection_name, false);
-
- struct rpl_group_info *rgi= new rpl_group_info(rli);
- rgi->thd= rli->sql_driver_thd= current_thd;
-
- if ((rgi->deferred_events_collecting= rli->mi->rpl_filter->is_on()))
- {
- rgi->deferred_events= new Deferred_log_events(rli);
- }
-
- return rgi;
-}
-
-static void wsrep_prepare_bf_thd(THD *thd, struct wsrep_thd_shadow* shadow)
+static void wsrep_replication_process(THD *thd,
+ void* arg __attribute__((unused)))
{
- shadow->options = thd->variables.option_bits;
- shadow->server_status = thd->server_status;
- shadow->wsrep_exec_mode = thd->wsrep_exec_mode;
- shadow->vio = thd->net.vio;
-
- // Disable general logging on applier threads
- thd->variables.option_bits |= OPTION_LOG_OFF;
- // Enable binlogging if opt_log_slave_updates is set
- if (opt_log_slave_updates)
- thd->variables.option_bits|= OPTION_BIN_LOG;
- else
- thd->variables.option_bits&= ~(OPTION_BIN_LOG);
+ DBUG_ENTER("wsrep_replication_process");
- if (!thd->wsrep_rgi) thd->wsrep_rgi= wsrep_relay_group_init("wsrep_relay");
+ Wsrep_applier_service applier_service(thd);
/* thd->system_thread_info.rpl_sql_info isn't initialized. */
thd->system_thread_info.rpl_sql_info=
new rpl_sql_thread_info(thd->wsrep_rgi->rli->mi->rpl_filter);
- thd->wsrep_exec_mode= REPL_RECV;
- thd->net.vio= 0;
- thd->clear_error();
-
- shadow->tx_isolation = thd->variables.tx_isolation;
- thd->variables.tx_isolation = ISO_READ_COMMITTED;
- thd->tx_isolation = ISO_READ_COMMITTED;
-
- shadow->db = thd->db.str;
- shadow->db_length = thd->db.length;
- shadow->user_time = thd->user_time;
- shadow->row_count_func= thd->get_row_count_func();
- thd->reset_db(&null_clex_str);
-}
+ WSREP_INFO("Starting applier thread %llu", thd->thread_id);
+ enum wsrep::provider::status
+ ret= Wsrep_server_state::get_provider().run_applier(&applier_service);
-static void wsrep_return_from_bf_mode(THD *thd, struct wsrep_thd_shadow* shadow)
-{
- LEX_CSTRING db= {shadow->db, shadow->db_length };
- thd->variables.option_bits = shadow->options;
- thd->server_status = shadow->server_status;
- thd->wsrep_exec_mode = shadow->wsrep_exec_mode;
- thd->net.vio = shadow->vio;
- thd->variables.tx_isolation = shadow->tx_isolation;
- thd->user_time = shadow->user_time;
- thd->reset_db(&db);
+ WSREP_INFO("Applier thread exiting ret: %d thd: %llu", ret, thd->thread_id);
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
+ wsrep_close_applier(thd);
+ mysql_cond_broadcast(&COND_wsrep_slave_threads);
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
delete thd->system_thread_info.rpl_sql_info;
delete thd->wsrep_rgi->rli->mi;
delete thd->wsrep_rgi->rli;
-
+
thd->wsrep_rgi->cleanup_after_session();
delete thd->wsrep_rgi;
- thd->wsrep_rgi = NULL;
- thd->set_row_count_func(shadow->row_count_func);
-}
+ thd->wsrep_rgi= NULL;
-void wsrep_replay_transaction(THD *thd)
-{
- DBUG_ENTER("wsrep_replay_transaction");
- /* checking if BF trx must be replayed */
- if (thd->wsrep_conflict_state== MUST_REPLAY) {
- DBUG_ASSERT(wsrep_thd_trx_seqno(thd));
- if (thd->wsrep_exec_mode!= REPL_RECV) {
- if (thd->get_stmt_da()->is_sent())
- {
- WSREP_ERROR("replay issue, thd has reported status already");
- }
-
-
- /*
- PS reprepare observer should have been removed already.
- open_table() will fail if we have dangling observer here.
- */
- DBUG_ASSERT(thd->m_reprepare_observer == NULL);
-
- struct da_shadow
- {
- enum Diagnostics_area::enum_diagnostics_status status;
- ulonglong affected_rows;
- ulonglong last_insert_id;
- char message[MYSQL_ERRMSG_SIZE];
- };
- struct da_shadow da_status;
- da_status.status= thd->get_stmt_da()->status();
- if (da_status.status == Diagnostics_area::DA_OK)
- {
- da_status.affected_rows= thd->get_stmt_da()->affected_rows();
- da_status.last_insert_id= thd->get_stmt_da()->last_insert_id();
- strmake(da_status.message,
- thd->get_stmt_da()->message(),
- sizeof(da_status.message)-1);
- }
-
- thd->get_stmt_da()->reset_diagnostics_area();
-
- thd->wsrep_conflict_state= REPLAYING;
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- thd->reset_for_next_command();
- thd->reset_killed();
- close_thread_tables(thd);
- if (thd->locked_tables_mode && thd->lock)
- {
- WSREP_DEBUG("releasing table lock for replaying (%lld)",
- (longlong) thd->thread_id);
- thd->locked_tables_list.unlock_locked_tables(thd);
- thd->variables.option_bits&= ~(OPTION_TABLE_LOCK);
- }
- thd->mdl_context.release_transactional_locks();
- /*
- Replaying will call MYSQL_START_STATEMENT when handling
- BEGIN Query_log_event so end statement must be called before
- replaying.
- */
- MYSQL_END_STATEMENT(thd->m_statement_psi, thd->get_stmt_da());
- thd->m_statement_psi= NULL;
- thd->m_digest= NULL;
- thd_proc_info(thd, "WSREP replaying trx");
- WSREP_DEBUG("replay trx: %s %lld",
- thd->query() ? thd->query() : "void",
- (long long)wsrep_thd_trx_seqno(thd));
- struct wsrep_thd_shadow shadow;
- wsrep_prepare_bf_thd(thd, &shadow);
-
- /* From trans_begin() */
- thd->variables.option_bits|= OPTION_BEGIN;
- thd->server_status|= SERVER_STATUS_IN_TRANS;
-
- int rcode = wsrep->replay_trx(wsrep,
- &thd->wsrep_ws_handle,
- (void *)thd);
-
- wsrep_return_from_bf_mode(thd, &shadow);
- if (thd->wsrep_conflict_state!= REPLAYING)
- WSREP_WARN("lost replaying mode: %d", thd->wsrep_conflict_state );
-
- mysql_mutex_lock(&thd->LOCK_thd_data);
-
- switch (rcode)
- {
- case WSREP_OK:
- thd->wsrep_conflict_state= NO_CONFLICT;
- wsrep->post_commit(wsrep, &thd->wsrep_ws_handle);
- WSREP_DEBUG("trx_replay successful for: %lld %lld",
- (longlong) thd->thread_id, (longlong) thd->real_id);
- if (thd->get_stmt_da()->is_sent())
- {
- WSREP_WARN("replay ok, thd has reported status");
- }
- else if (thd->get_stmt_da()->is_set())
- {
- if (thd->get_stmt_da()->status() != Diagnostics_area::DA_OK &&
- thd->get_stmt_da()->status() != Diagnostics_area::DA_OK_BULK)
- {
- WSREP_WARN("replay ok, thd has error status %d",
- thd->get_stmt_da()->status());
- }
- }
- else
- {
- if (da_status.status == Diagnostics_area::DA_OK)
- {
- my_ok(thd,
- da_status.affected_rows,
- da_status.last_insert_id,
- da_status.message);
- }
- else
- {
- my_ok(thd);
- }
- }
- break;
- case WSREP_TRX_FAIL:
- if (thd->get_stmt_da()->is_sent())
- {
- WSREP_ERROR("replay failed, thd has reported status");
- }
- else
- {
- WSREP_DEBUG("replay failed, rolling back");
- }
- thd->wsrep_conflict_state= ABORTED;
- wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle);
- break;
- default:
- WSREP_ERROR("trx_replay failed for: %d, schema: %s, query: %s",
- rcode, thd->get_db(),
- thd->query() ? thd->query() : "void");
- /* we're now in inconsistent state, must abort */
-
- /* http://bazaar.launchpad.net/~codership/codership-mysql/5.6/revision/3962#sql/wsrep_thd.cc */
- mysql_mutex_unlock(&thd->LOCK_thd_data);
-
- unireg_abort(1);
- break;
- }
-
- wsrep_cleanup_transaction(thd);
-
- mysql_mutex_lock(&LOCK_wsrep_replaying);
- wsrep_replaying--;
- WSREP_DEBUG("replaying decreased: %d, thd: %lld",
- wsrep_replaying, (longlong) thd->thread_id);
- mysql_cond_broadcast(&COND_wsrep_replaying);
- mysql_mutex_unlock(&LOCK_wsrep_replaying);
- }
- }
- DBUG_VOID_RETURN;
-}
-
-static void wsrep_replication_process(THD *thd)
-{
- int rcode;
- DBUG_ENTER("wsrep_replication_process");
-
- struct wsrep_thd_shadow shadow;
- wsrep_prepare_bf_thd(thd, &shadow);
-
- /* From trans_begin() */
- thd->variables.option_bits|= OPTION_BEGIN;
- thd->server_status|= SERVER_STATUS_IN_TRANS;
-
- rcode = wsrep->recv(wsrep, (void *)thd);
- DBUG_PRINT("wsrep",("wsrep_repl returned: %d", rcode));
-
- WSREP_INFO("applier thread exiting (code:%d)", rcode);
-
- switch (rcode) {
- case WSREP_OK:
- case WSREP_NOT_IMPLEMENTED:
- case WSREP_CONN_FAIL:
- /* provider does not support slave operations / disconnected from group,
- * just close applier thread */
- break;
- case WSREP_NODE_FAIL:
- /* data inconsistency => SST is needed */
- /* Note: we cannot just blindly restart replication here,
- * SST might require server restart if storage engines must be
- * initialized after SST */
- WSREP_ERROR("node consistency compromised, aborting");
- wsrep_kill_mysql(thd);
- break;
- case WSREP_WARNING:
- case WSREP_TRX_FAIL:
- case WSREP_TRX_MISSING:
- /* these suggests a bug in provider code */
- WSREP_WARN("bad return from recv() call: %d", rcode);
- /* Shut down this node. */
- /* fall through */
- case WSREP_FATAL:
- /* Cluster connectivity is lost.
- *
- * If applier was killed on purpose (KILL_CONNECTION), we
- * avoid mysql shutdown. This is because the killer will then handle
- * shutdown processing (or replication restarting)
- */
- if (thd->killed != KILL_CONNECTION)
- {
- wsrep_kill_mysql(thd);
- }
- break;
- }
-
- mysql_mutex_lock(&LOCK_thread_count);
- wsrep_close_applier(thd);
- mysql_cond_broadcast(&COND_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
if(thd->has_thd_temporary_tables())
{
WSREP_WARN("Applier %lld has temporary tables at exit.",
thd->thread_id);
}
- wsrep_return_from_bf_mode(thd, &shadow);
DBUG_VOID_RETURN;
}
-static bool create_wsrep_THD(wsrep_thd_processor_fun processor)
+static bool create_wsrep_THD(Wsrep_thd_args* args)
{
ulong old_wsrep_running_threads= wsrep_running_threads;
pthread_t unused;
- mysql_mutex_lock(&LOCK_thread_count);
+
bool res= pthread_create(&unused, &connection_attrib, start_wsrep_THD,
- (void*)processor);
+ args);
/*
if starting a thread on server startup, wait until the this thread's THD
is fully initialized (otherwise a THD initialization code might
try to access a partially initialized server data structure - MDEV-8208).
*/
+ mysql_mutex_lock(&LOCK_wsrep_slave_threads);
if (!mysqld_server_initialized)
while (old_wsrep_running_threads == wsrep_running_threads)
- mysql_cond_wait(&COND_thread_count, &LOCK_thread_count);
- mysql_mutex_unlock(&LOCK_thread_count);
+ mysql_cond_wait(&COND_wsrep_slave_threads, &LOCK_wsrep_slave_threads);
+ mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
return res;
}
void wsrep_create_appliers(long threads)
{
- if (!wsrep_connected)
+ /* Dont' start slave threads if wsrep-provider or wsrep-cluster-address
+ is not set.
+ */
+ if (!WSREP_PROVIDER_EXISTS)
{
- /* see wsrep_replication_start() for the logic */
- if (wsrep_cluster_address && strlen(wsrep_cluster_address) &&
- wsrep_provider && strcasecmp(wsrep_provider, "none"))
- {
- WSREP_ERROR("Trying to launch slave threads before creating "
- "connection at '%s'", wsrep_cluster_address);
- assert(0);
- }
+ return;
+ }
+
+ if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0)
+ {
+ WSREP_DEBUG("wsrep_create_appliers exit due to empty address");
return;
}
long wsrep_threads=0;
- while (wsrep_threads++ < threads) {
- if (create_wsrep_THD(wsrep_replication_process))
+
+ while (wsrep_threads++ < threads)
+ {
+ Wsrep_thd_args* args(new Wsrep_thd_args(wsrep_replication_process, 0));
+ if (create_wsrep_THD(args))
+ {
WSREP_WARN("Can't create thread to manage wsrep replication");
+ }
}
}
-static void wsrep_rollback_process(THD *thd)
+static void wsrep_rollback_process(THD *rollbacker,
+ void *arg __attribute__((unused)))
{
DBUG_ENTER("wsrep_rollback_process");
- mysql_mutex_lock(&LOCK_wsrep_rollback);
- wsrep_aborting_thd= NULL;
+ THD* thd= NULL;
+ DBUG_ASSERT(!wsrep_rollback_queue);
+ wsrep_rollback_queue= new Wsrep_thd_queue(rollbacker);
+ WSREP_INFO("Starting rollbacker thread %llu", rollbacker->thread_id);
- while (thd->killed == NOT_KILLED) {
- thd_proc_info(thd, "WSREP aborter idle");
- thd->mysys_var->current_mutex= &LOCK_wsrep_rollback;
- thd->mysys_var->current_cond= &COND_wsrep_rollback;
-
- mysql_cond_wait(&COND_wsrep_rollback,&LOCK_wsrep_rollback);
+ thd_proc_info(rollbacker, "wsrep aborter idle");
+ while ((thd= wsrep_rollback_queue->pop_front()) != NULL)
+ {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ wsrep::client_state& cs(thd->wsrep_cs());
+ const wsrep::transaction& tx(cs.transaction());
+ if (tx.state() == wsrep::transaction::s_aborted)
+ {
+ WSREP_DEBUG("rollbacker thd already aborted: %llu state: %d",
+ (long long)thd->real_id,
+ tx.state());
- WSREP_DEBUG("WSREP rollback thread wakes for signal");
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ continue;
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
- mysql_mutex_lock(&thd->mysys_var->mutex);
- thd_proc_info(thd, "WSREP aborter active");
- thd->mysys_var->current_mutex= 0;
- thd->mysys_var->current_cond= 0;
- mysql_mutex_unlock(&thd->mysys_var->mutex);
+ thd_proc_info(rollbacker, "wsrep aborter active");
- /* check for false alarms */
- if (!wsrep_aborting_thd)
+ wsrep::transaction_id transaction_id(thd->wsrep_trx().id());
+ if (thd->wsrep_trx().is_streaming() &&
+ thd->wsrep_trx().bf_aborted_in_total_order())
{
- WSREP_DEBUG("WSREP rollback thread has empty abort queue");
- }
- /* process all entries in the queue */
- while (wsrep_aborting_thd) {
- THD *aborting;
- wsrep_aborting_thd_t next = wsrep_aborting_thd->next;
- aborting = wsrep_aborting_thd->aborting_thd;
- my_free(wsrep_aborting_thd);
- wsrep_aborting_thd= next;
- /*
- * must release mutex, appliers my want to add more
- * aborting thds in our work queue, while we rollback
- */
- mysql_mutex_unlock(&LOCK_wsrep_rollback);
-
- mysql_mutex_lock(&aborting->LOCK_thd_data);
- if (aborting->wsrep_conflict_state== ABORTED)
+ thd->store_globals();
+ thd->wsrep_cs().store_globals();
+ if (thd->wsrep_cs().mode() == wsrep::client_state::m_high_priority)
{
- WSREP_DEBUG("WSREP, thd already aborted: %llu state: %d",
- (long long)aborting->real_id,
- aborting->wsrep_conflict_state);
-
- mysql_mutex_unlock(&aborting->LOCK_thd_data);
- mysql_mutex_lock(&LOCK_wsrep_rollback);
- continue;
+ DBUG_ASSERT(thd->wsrep_applier_service);
+ thd->wsrep_applier_service->rollback(wsrep::ws_handle(),
+ wsrep::ws_meta());
+ thd->wsrep_applier_service->after_apply();
+ /* Will free THD */
+ Wsrep_server_state::instance().server_service().
+ release_high_priority_service(thd->wsrep_applier_service);
}
- aborting->wsrep_conflict_state= ABORTING;
-
- mysql_mutex_unlock(&aborting->LOCK_thd_data);
-
- set_current_thd(aborting);
- aborting->store_globals();
-
- mysql_mutex_lock(&aborting->LOCK_thd_data);
- wsrep_client_rollback(aborting);
- WSREP_DEBUG("WSREP rollbacker aborted thd: (%lld %lld)",
- (longlong) aborting->thread_id,
- (longlong) aborting->real_id);
- mysql_mutex_unlock(&aborting->LOCK_thd_data);
-
- set_current_thd(thd);
+ else
+ {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ /* prepare THD for rollback processing */
+ thd->reset_for_next_command(true);
+ thd->lex->sql_command= SQLCOM_ROLLBACK;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ /* Perform a client rollback, restore globals and signal
+ the victim only when all the resources have been
+ released */
+ thd->wsrep_cs().client_service().bf_rollback();
+ thd->reset_globals();
+ thd->wsrep_cs().sync_rollback_complete();
+ }
+ }
+ else if (wsrep_thd_is_applying(thd))
+ {
+ WSREP_DEBUG("rollbacker aborting SR thd: (%lld %llu)",
+ thd->thread_id, (long long)thd->real_id);
+ DBUG_ASSERT(thd->wsrep_cs().mode() == Wsrep_client_state::m_high_priority);
+ /* Must be streaming and must have been removed from the
+ server state streaming appliers map. */
+ DBUG_ASSERT(thd->wsrep_trx().is_streaming());
+ DBUG_ASSERT(!Wsrep_server_state::instance().find_streaming_applier(
+ thd->wsrep_trx().server_id(),
+ thd->wsrep_trx().id()));
+ DBUG_ASSERT(thd->wsrep_applier_service);
+
+ /* Fragment removal should happen before rollback to make
+ the transaction non-observable in SR table after the rollback
+ completes. For correctness the order does not matter here,
+ but currently it is mandated by checks in some MTR tests. */
+ Wsrep_storage_service* storage_service=
+ static_cast<Wsrep_storage_service*>(
+ Wsrep_server_state::instance().server_service().storage_service(
+ *thd->wsrep_applier_service));
+ storage_service->store_globals();
+ storage_service->adopt_transaction(thd->wsrep_trx());
+ storage_service->remove_fragments();
+ storage_service->commit(wsrep::ws_handle(transaction_id, 0),
+ wsrep::ws_meta());
+ Wsrep_server_state::instance().server_service().release_storage_service(storage_service);
thd->store_globals();
+ thd->wsrep_cs().store_globals();
+ thd->wsrep_applier_service->rollback(wsrep::ws_handle(),
+ wsrep::ws_meta());
+ thd->wsrep_applier_service->after_apply();
+ /* Will free THD */
+ Wsrep_server_state::instance().server_service()
+ .release_high_priority_service(thd->wsrep_applier_service);
- mysql_mutex_lock(&LOCK_wsrep_rollback);
}
+ else
+ {
+ if (thd->wsrep_trx().is_streaming())
+ {
+ Wsrep_storage_service* storage_service=
+ static_cast<Wsrep_storage_service*>(
+ Wsrep_server_state::instance().server_service().
+ storage_service(thd->wsrep_cs().client_service()));
+
+ storage_service->store_globals();
+ storage_service->adopt_transaction(thd->wsrep_trx());
+ storage_service->remove_fragments();
+ storage_service->commit(wsrep::ws_handle(transaction_id, 0),
+ wsrep::ws_meta());
+ Wsrep_server_state::instance().server_service().
+ release_storage_service(storage_service);
+ }
+ thd->store_globals();
+ thd->wsrep_cs().store_globals();
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ /* prepare THD for rollback processing */
+ thd->reset_for_next_command();
+ thd->lex->sql_command= SQLCOM_ROLLBACK;
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ /* Perform a client rollback, restore globals and signal
+ the victim only when all the resources have been
+ released */
+ thd->wsrep_cs().client_service().bf_rollback();
+ thd->reset_globals();
+ thd->wsrep_cs().sync_rollback_complete();
+ WSREP_DEBUG("rollbacker aborted thd: (%llu %llu)",
+ thd->thread_id, (long long)thd->real_id);
+ }
+
+ thd_proc_info(rollbacker, "wsrep aborter idle");
}
+
+ delete wsrep_rollback_queue;
+ wsrep_rollback_queue= NULL;
- mysql_mutex_unlock(&LOCK_wsrep_rollback);
- sql_print_information("WSREP: rollbacker thread exiting");
+ WSREP_INFO("rollbacker thread exiting %llu", rollbacker->thread_id);
+ DBUG_ASSERT(rollbacker->killed != NOT_KILLED);
DBUG_PRINT("wsrep",("wsrep rollbacker thread exiting"));
DBUG_VOID_RETURN;
}
-void wsrep_create_rollbacker()
+static void wsrep_post_rollback_process(THD *post_rollbacker,
+ void *arg __attribute__((unused)))
{
- if (wsrep_provider && strcasecmp(wsrep_provider, "none"))
- {
- /* create rollbacker */
- if (create_wsrep_THD(wsrep_rollback_process))
- WSREP_WARN("Can't create thread to manage wsrep rollback");
- }
-}
+ DBUG_ENTER("wsrep_post_rollback_process");
+ THD* thd= NULL;
-void wsrep_thd_set_PA_safe(void *thd_ptr, my_bool safe)
-{
- if (thd_ptr)
- {
- THD* thd = (THD*)thd_ptr;
- thd->wsrep_PA_safe = safe;
- }
-}
+ WSREP_INFO("Starting post rollbacker thread %llu", post_rollbacker->thread_id);
+ DBUG_ASSERT(!wsrep_post_rollback_queue);
+ wsrep_post_rollback_queue= new Wsrep_thd_queue(post_rollbacker);
-enum wsrep_conflict_state wsrep_thd_conflict_state(THD *thd, my_bool sync)
-{
- enum wsrep_conflict_state state = NO_CONFLICT;
- if (thd)
+ while ((thd= wsrep_post_rollback_queue->pop_front()) != NULL)
{
- if (sync) mysql_mutex_lock(&thd->LOCK_thd_data);
-
- state = thd->wsrep_conflict_state;
- if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data);
+ thd->store_globals();
+ wsrep::client_state& cs(thd->wsrep_cs());
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_aborting);
+ WSREP_DEBUG("post rollbacker calling post rollback for thd %llu, conf %s",
+ thd->thread_id, wsrep_thd_transaction_state_str(thd));
+
+ cs.after_rollback();
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_aborted);
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
}
- return state;
-}
-my_bool wsrep_thd_is_wsrep(THD *thd)
-{
- my_bool status = FALSE;
- if (thd)
- {
- status = (WSREP(thd) && WSREP_PROVIDER_EXISTS);
- }
- return status;
+ delete wsrep_post_rollback_queue;
+ wsrep_post_rollback_queue= NULL;
+
+ DBUG_ASSERT(post_rollbacker->killed != NOT_KILLED);
+ DBUG_PRINT("wsrep",("wsrep post rollbacker thread exiting"));
+ WSREP_INFO("post rollbacker thread exiting %llu", post_rollbacker->thread_id);
+ DBUG_VOID_RETURN;
}
-my_bool wsrep_thd_is_BF(THD *thd, my_bool sync)
+void wsrep_create_rollbacker()
{
- my_bool status = FALSE;
- if (thd)
+ if (wsrep_cluster_address && wsrep_cluster_address[0] != 0)
{
- // THD can be BF only if provider exists
- if (wsrep_thd_is_wsrep(thd))
- {
- if (sync)
- mysql_mutex_lock(&thd->LOCK_thd_data);
+ Wsrep_thd_args* args= new Wsrep_thd_args(wsrep_rollback_process, 0);
- status = ((thd->wsrep_exec_mode == REPL_RECV) ||
- (thd->wsrep_exec_mode == TOTAL_ORDER));
- if (sync)
- mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
- }
- return status;
-}
+ /* create rollbacker */
+ if (create_wsrep_THD(args))
+ WSREP_WARN("Can't create thread to manage wsrep rollback");
-extern "C"
-my_bool wsrep_thd_is_BF_or_commit(void *thd_ptr, my_bool sync)
-{
- bool status = FALSE;
- if (thd_ptr)
- {
- THD* thd = (THD*)thd_ptr;
- if (sync) mysql_mutex_lock(&thd->LOCK_thd_data);
-
- status = ((thd->wsrep_exec_mode == REPL_RECV) ||
- (thd->wsrep_exec_mode == TOTAL_ORDER) ||
- (thd->wsrep_exec_mode == LOCAL_COMMIT));
- if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data);
- }
- return status;
+ /* create post_rollbacker */
+ args= new Wsrep_thd_args(wsrep_post_rollback_process, 0);
+ if (create_wsrep_THD(args))
+ WSREP_WARN("Can't create thread to manage wsrep post rollback");
+ }
}
-extern "C"
-my_bool wsrep_thd_is_local(void *thd_ptr, my_bool sync)
+/*
+ Start async rollback process
+
+ Asserts thd->LOCK_thd_data ownership
+ */
+void wsrep_fire_rollbacker(THD *thd)
{
- bool status = FALSE;
- if (thd_ptr)
+ DBUG_ASSERT(thd->wsrep_trx().state() == wsrep::transaction::s_aborting);
+ DBUG_PRINT("wsrep",("enqueuing trx abort for %llu", thd->thread_id));
+ WSREP_DEBUG("enqueuing trx abort for (%llu)", thd->thread_id);
+ if (wsrep_rollback_queue->push_back(thd))
{
- THD* thd = (THD*)thd_ptr;
- if (sync) mysql_mutex_lock(&thd->LOCK_thd_data);
-
- status = (thd->wsrep_exec_mode == LOCAL_STATE);
- if (sync) mysql_mutex_unlock(&thd->LOCK_thd_data);
+ WSREP_WARN("duplicate thd %llu for rollbacker",
+ thd->thread_id);
}
- return status;
}
+
int wsrep_abort_thd(void *bf_thd_ptr, void *victim_thd_ptr, my_bool signal)
{
- THD *victim_thd = (THD *) victim_thd_ptr;
- THD *bf_thd = (THD *) bf_thd_ptr;
DBUG_ENTER("wsrep_abort_thd");
-
+ THD *victim_thd= (THD *) victim_thd_ptr;
+ THD *bf_thd= (THD *) bf_thd_ptr;
+ mysql_mutex_lock(&victim_thd->LOCK_thd_data);
if ( (WSREP(bf_thd) ||
( (WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) &&
- bf_thd->wsrep_exec_mode == TOTAL_ORDER) ) &&
- victim_thd)
+ wsrep_thd_is_toi(bf_thd)) ) &&
+ victim_thd &&
+ !wsrep_thd_is_aborting(victim_thd))
{
- if ((victim_thd->wsrep_conflict_state == MUST_ABORT) ||
- (victim_thd->wsrep_conflict_state == ABORTED) ||
- (victim_thd->wsrep_conflict_state == ABORTING))
- {
- WSREP_DEBUG("wsrep_abort_thd called by %llu with victim %llu already "
- "aborted. Ignoring.",
- (bf_thd) ? (long long)bf_thd->real_id : 0,
- (long long)victim_thd->real_id);
- DBUG_RETURN(1);
- }
-
- WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", (bf_thd) ?
- (long long)bf_thd->real_id : 0, (long long)victim_thd->real_id);
- ha_abort_transaction(bf_thd, victim_thd, signal);
+ WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", (bf_thd) ?
+ (long long)bf_thd->real_id : 0, (long long)victim_thd->real_id);
+ mysql_mutex_unlock(&victim_thd->LOCK_thd_data);
+ ha_abort_transaction(bf_thd, victim_thd, signal);
+ mysql_mutex_lock(&victim_thd->LOCK_thd_data);
}
else
{
WSREP_DEBUG("wsrep_abort_thd not effective: %p %p", bf_thd, victim_thd);
}
-
+ mysql_mutex_unlock(&victim_thd->LOCK_thd_data);
DBUG_RETURN(1);
}
-extern "C"
-int wsrep_thd_in_locking_session(void *thd_ptr)
+bool wsrep_bf_abort(const THD* bf_thd, THD* victim_thd)
{
- if (thd_ptr && ((THD *)thd_ptr)->in_lock_tables) {
- return 1;
+ WSREP_LOG_THD((THD*)bf_thd, "BF aborter before");
+ WSREP_LOG_THD(victim_thd, "victim before");
+ wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno());
+
+ if (WSREP(victim_thd) && !victim_thd->wsrep_trx().active())
+ {
+ WSREP_DEBUG("wsrep_bf_abort, BF abort for non active transaction");
+ wsrep_start_transaction(victim_thd, victim_thd->wsrep_next_trx_id());
}
- return 0;
-}
-bool wsrep_thd_has_explicit_locks(THD *thd)
-{
- assert(thd);
- return thd->mdl_context.has_explicit_locks();
+ bool ret;
+ if (wsrep_thd_is_toi(bf_thd))
+ {
+ ret= victim_thd->wsrep_cs().total_order_bf_abort(bf_seqno);
+ }
+ else
+ {
+ ret= victim_thd->wsrep_cs().bf_abort(bf_seqno);
+ }
+ if (ret)
+ {
+ wsrep_bf_aborts_counter++;
+ }
+ return ret;
}
/*
@@ -685,35 +405,13 @@ void wsrep_thd_auto_increment_variables(THD* thd,
unsigned long long* offset,
unsigned long long* increment)
{
- if (thd->wsrep_exec_mode == REPL_RECV &&
- thd->wsrep_conflict_state != REPLAYING)
+ if (wsrep_thd_is_applying(thd) &&
+ thd->wsrep_trx().state() != wsrep::transaction::s_replaying)
{
*offset= global_system_variables.auto_increment_offset;
*increment= global_system_variables.auto_increment_increment;
+ return;
}
- else
- {
- *offset= thd->variables.auto_increment_offset;
- *increment= thd->variables.auto_increment_increment;
- }
-}
-
-my_bool wsrep_thd_is_applier(MYSQL_THD thd)
-{
- my_bool is_applier= false;
-
- if (thd && thd->wsrep_applier)
- is_applier= true;
-
- return (is_applier);
-}
-
-void wsrep_set_load_multi_commit(THD *thd, bool split)
-{
- thd->wsrep_split_flag= split;
-}
-
-bool wsrep_is_load_multi_commit(THD *thd)
-{
- return thd->wsrep_split_flag;
+ *offset= thd->variables.auto_increment_offset;
+ *increment= thd->variables.auto_increment_increment;
}
diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h
index 5900668f3fb..3114e02e1b8 100644
--- a/sql/wsrep_thd.h
+++ b/sql/wsrep_thd.h
@@ -13,42 +13,220 @@
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
-#include <my_config.h>
-
#ifndef WSREP_THD_H
#define WSREP_THD_H
-#ifdef WITH_WSREP
+#include <my_config.h>
+#include "mysql/service_wsrep.h"
+#include "wsrep/client_state.hpp"
#include "sql_class.h"
+#include "wsrep_utils.h"
+#include <deque>
+class Wsrep_thd_queue
+{
+public:
+ Wsrep_thd_queue(THD* t) : thd(t)
+ {
+ mysql_mutex_init(key_LOCK_wsrep_thd_queue,
+ &LOCK_wsrep_thd_queue,
+ MY_MUTEX_INIT_FAST);
+ mysql_cond_init(key_COND_wsrep_thd_queue, &COND_wsrep_thd_queue, NULL);
+ }
+ ~Wsrep_thd_queue()
+ {
+ mysql_mutex_destroy(&LOCK_wsrep_thd_queue);
+ mysql_cond_destroy(&COND_wsrep_thd_queue);
+ }
+ bool push_back(THD* thd)
+ {
+ DBUG_ASSERT(thd);
+ wsp::auto_lock lock(&LOCK_wsrep_thd_queue);
+ std::deque<THD*>::iterator it = queue.begin();
+ while (it != queue.end())
+ {
+ if (*it == thd)
+ {
+ return true;
+ }
+ it++;
+ }
+ queue.push_back(thd);
+ mysql_cond_signal(&COND_wsrep_thd_queue);
+ return false;
+ }
+ THD* pop_front()
+ {
+ wsp::auto_lock lock(&LOCK_wsrep_thd_queue);
+ while (queue.empty())
+ {
+ if (thd->killed != NOT_KILLED)
+ return NULL;
+
+ thd->mysys_var->current_mutex= &LOCK_wsrep_thd_queue;
+ thd->mysys_var->current_cond= &COND_wsrep_thd_queue;
+
+ mysql_cond_wait(&COND_wsrep_thd_queue, &LOCK_wsrep_thd_queue);
+
+ thd->mysys_var->current_mutex= 0;
+ thd->mysys_var->current_cond= 0;
+ }
+ THD* ret= queue.front();
+ queue.pop_front();
+ return ret;
+ }
+private:
+ THD* thd;
+ std::deque<THD*> queue;
+ mysql_mutex_t LOCK_wsrep_thd_queue;
+ mysql_cond_t COND_wsrep_thd_queue;
+};
+
+void wsrep_prepare_bf_thd(THD*, struct wsrep_thd_shadow*);
+void wsrep_return_from_bf_mode(THD*, struct wsrep_thd_shadow*);
int wsrep_show_bf_aborts (THD *thd, SHOW_VAR *var, char *buff,
enum enum_var_type scope);
-void wsrep_client_rollback(THD *thd);
+void wsrep_client_rollback(THD *thd, bool rollbacker = false);
void wsrep_replay_transaction(THD *thd);
void wsrep_create_appliers(long threads);
void wsrep_create_rollbacker();
+bool wsrep_bf_abort(const THD*, THD*);
int wsrep_abort_thd(void *bf_thd_ptr, void *victim_thd_ptr,
my_bool signal);
-
-/*
- PA = Parallel Applying (on the slave side)
-*/
extern void wsrep_thd_set_PA_safe(void *thd_ptr, my_bool safe);
-extern my_bool wsrep_thd_is_BF(THD *thd, my_bool sync);
-extern my_bool wsrep_thd_is_wsrep(void *thd_ptr);
+THD* wsrep_start_SR_THD(char *thread_stack);
+void wsrep_end_SR_THD(THD* thd);
+
+/**
+ Helper functions to override error status
+
+ In many contexts it is desirable to mask the original error status
+ set for THD or it is necessary to change OK status to error.
+ This function implements the common logic for the most
+ of the cases.
+
+ Rules:
+ * If the diagnostics are has OK or EOF status, override it unconditionally
+ * If the error is either ER_ERROR_DURING_COMMIT or ER_LOCK_DEADLOCK
+ it is usually the correct error status to be returned to client,
+ so don't override those by default
+ */
+
+static inline void wsrep_override_error(THD *thd, uint error)
+{
+ DBUG_ASSERT(error != ER_ERROR_DURING_COMMIT);
+ Diagnostics_area *da= thd->get_stmt_da();
+ if (da->is_ok() ||
+ da->is_eof() ||
+ !da->is_set() ||
+ (da->is_error() &&
+ da->sql_errno() != error &&
+ da->sql_errno() != ER_ERROR_DURING_COMMIT &&
+ da->sql_errno() != ER_LOCK_DEADLOCK))
+ {
+ da->reset_diagnostics_area();
+ my_error(error, MYF(0));
+ }
+}
+
+/**
+ Override error with additional wsrep status.
+ */
+static inline void wsrep_override_error(THD *thd, uint error,
+ enum wsrep::provider::status status)
+{
+ Diagnostics_area *da= thd->get_stmt_da();
+ if (da->is_ok() ||
+ !da->is_set() ||
+ (da->is_error() &&
+ da->sql_errno() != error &&
+ da->sql_errno() != ER_ERROR_DURING_COMMIT &&
+ da->sql_errno() != ER_LOCK_DEADLOCK))
+ {
+ da->reset_diagnostics_area();
+ my_error(error, MYF(0), status);
+ }
+}
+
+static inline void wsrep_override_error(THD* thd,
+ wsrep::client_error ce,
+ enum wsrep::provider::status status)
+{
+ DBUG_ASSERT(ce != wsrep::e_success);
+ switch (ce)
+ {
+ case wsrep::e_error_during_commit:
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT, status);
+ break;
+ case wsrep::e_deadlock_error:
+ wsrep_override_error(thd, ER_LOCK_DEADLOCK);
+ break;
+ case wsrep::e_interrupted_error:
+ wsrep_override_error(thd, ER_QUERY_INTERRUPTED);
+ break;
+ case wsrep::e_size_exceeded_error:
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT, status);
+ break;
+ case wsrep::e_append_fragment_error:
+ /* TODO: Figure out better error number */
+ wsrep_override_error(thd, ER_ERROR_DURING_COMMIT, status);
+ break;
+ case wsrep::e_not_supported_error:
+ wsrep_override_error(thd, ER_NOT_SUPPORTED_YET);
+ break;
+ case wsrep::e_timeout_error:
+ wsrep_override_error(thd, ER_LOCK_WAIT_TIMEOUT);
+ break;
+ default:
+ wsrep_override_error(thd, ER_UNKNOWN_ERROR);
+ break;
+ }
+}
-enum wsrep_conflict_state wsrep_thd_conflict_state(void *thd_ptr, my_bool sync);
-extern "C" my_bool wsrep_thd_is_BF_or_commit(void *thd_ptr, my_bool sync);
-extern "C" my_bool wsrep_thd_is_local(void *thd_ptr, my_bool sync);
-extern "C" int wsrep_thd_in_locking_session(void *thd_ptr);
+/**
+ Helper function to log THD wsrep context.
-#else /* WITH_WSREP */
+ @param thd Pointer to THD
+ @param message Optional message
+ @param function Function where the call was made from
+ */
+static inline void wsrep_log_thd(THD *thd,
+ const char *message,
+ const char *function)
+{
+ WSREP_DEBUG("%s %s\n"
+ " thd: %llu thd_ptr: %p client_mode: %s client_state: %s trx_state: %s\n"
+ " next_trx_id: %lld trx_id: %lld seqno: %lld\n"
+ " is_streaming: %d fragments: %zu\n"
+ " sql_errno: %u message: %s\n"
+#define WSREP_THD_LOG_QUERIES
+#ifdef WSREP_THD_LOG_QUERIES
+ " command: %d query: %.72s"
+#endif /* WSREP_OBSERVER_LOG_QUERIES */
+ ,
+ function,
+ message ? message : "",
+ thd->thread_id,
+ thd,
+ wsrep_thd_client_mode_str(thd),
+ wsrep_thd_client_state_str(thd),
+ wsrep_thd_transaction_state_str(thd),
+ (long long)thd->wsrep_next_trx_id(),
+ (long long)thd->wsrep_trx_id(),
+ (long long)wsrep_thd_trx_seqno(thd),
+ thd->wsrep_trx().is_streaming(),
+ thd->wsrep_sr().fragments().size(),
+ (thd->get_stmt_da()->is_error() ? thd->get_stmt_da()->sql_errno() : 0),
+ (thd->get_stmt_da()->is_error() ? thd->get_stmt_da()->message() : "")
+#ifdef WSREP_THD_LOG_QUERIES
+ , thd->lex->sql_command,
+ WSREP_QUERY(thd)
+#endif /* WSREP_OBSERVER_LOG_QUERIES */
+ );
+}
-#define wsrep_thd_is_BF(T, S) (0)
-#define wsrep_abort_thd(X,Y,Z) do { } while(0)
-#define wsrep_create_appliers(T) do { } while(0)
+#define WSREP_LOG_THD(thd_, message_) wsrep_log_thd(thd_, message_, __FUNCTION__)
-#endif
#endif /* WSREP_THD_H */
diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h
new file mode 100644
index 00000000000..e6901f15ca7
--- /dev/null
+++ b/sql/wsrep_trans_observer.h
@@ -0,0 +1,513 @@
+/* Copyright 2016-2019 Codership Oy <http://www.codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef WSREP_TRANS_OBSERVER_H
+#define WSREP_TRANS_OBSERVER_H
+
+#include "my_global.h"
+#include "mysql/service_wsrep.h"
+#include "wsrep_applier.h" /* wsrep_apply_error */
+#include "wsrep_xid.h"
+#include "wsrep_thd.h"
+#include "wsrep_binlog.h" /* register/deregister group commit */
+#include "my_dbug.h"
+
+class THD;
+
+/*
+ Return true if THD has active wsrep transaction.
+ */
+static inline bool wsrep_is_active(THD* thd)
+{
+ return (thd->wsrep_cs().state() != wsrep::client_state::s_none &&
+ thd->wsrep_cs().transaction().active());
+}
+
+/*
+ Return true if transaction is ordered.
+ */
+static inline bool wsrep_is_ordered(THD* thd)
+{
+ return thd->wsrep_trx().ordered();
+}
+
+/*
+ Return true if transaction has been BF aborted but has not been
+ rolled back yet.
+
+ It is required that the caller holds thd->LOCK_thd_data.
+*/
+static inline bool wsrep_must_abort(THD* thd)
+{
+ mysql_mutex_assert_owner(&thd->LOCK_thd_data);
+ return (thd->wsrep_trx().state() == wsrep::transaction::s_must_abort);
+}
+
+/*
+ Return true if the transaction must be replayed.
+ */
+static inline bool wsrep_must_replay(THD* thd)
+{
+ return (thd->wsrep_trx().state() == wsrep::transaction::s_must_replay);
+}
+/*
+ Return true if transaction has not been committed.
+
+ Note that we don't require thd->LOCK_thd_data here. Calling this method
+ makes sense only from codepaths which are past ordered_commit state
+ and the wsrep transaction is immune to BF aborts at that point.
+*/
+static inline bool wsrep_not_committed(THD* thd)
+{
+ return (thd->wsrep_trx().state() != wsrep::transaction::s_committed);
+}
+
+/*
+ Return true if THD is either committing a transaction or statement
+ is autocommit.
+ */
+static inline bool wsrep_is_real(THD* thd, bool all)
+{
+ return (all || thd->transaction.all.ha_list == 0);
+}
+
+/*
+ Check if a transaction has generated changes.
+ */
+static inline bool wsrep_has_changes(THD* thd)
+{
+ return (thd->wsrep_trx().is_empty() == false);
+}
+
+/*
+ Check if an active transaction has been BF aborted.
+ */
+static inline bool wsrep_is_bf_aborted(THD* thd)
+{
+ return (thd->wsrep_trx().active() && thd->wsrep_trx().bf_aborted());
+}
+
+static inline int wsrep_check_pk(THD* thd)
+{
+ if (!wsrep_certify_nonPK)
+ {
+ for (TABLE* table= thd->open_tables; table != NULL; table= table->next)
+ {
+ if (table->key_info == NULL || table->s->primary_key == MAX_KEY)
+ {
+ WSREP_DEBUG("No primary key found for table %s.%s",
+ table->s->db.str, table->s->table_name.str);
+ wsrep_override_error(thd, ER_LOCK_DEADLOCK);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static inline bool wsrep_streaming_enabled(THD* thd)
+{
+ return (thd->wsrep_sr().fragment_size() > 0);
+}
+
+/*
+ Return number of fragments succesfully certified for the
+ current statement.
+ */
+static inline size_t wsrep_fragments_certified_for_stmt(THD* thd)
+{
+ return thd->wsrep_trx().fragments_certified_for_statement();
+}
+
+static inline int wsrep_start_transaction(THD* thd, wsrep_trx_id_t trx_id)
+{
+ return (thd->wsrep_cs().state() != wsrep::client_state::s_none ?
+ thd->wsrep_cs().start_transaction(wsrep::transaction_id(trx_id)) :
+ 0);
+}
+
+/**/
+static inline int wsrep_start_trx_if_not_started(THD* thd)
+{
+ int ret= 0;
+ DBUG_ASSERT(thd->wsrep_next_trx_id() != WSREP_UNDEFINED_TRX_ID);
+ DBUG_ASSERT(thd->wsrep_cs().mode() == Wsrep_client_state::m_local);
+ if (thd->wsrep_trx().active() == false)
+ {
+ ret= wsrep_start_transaction(thd, thd->wsrep_next_trx_id());
+ }
+ return ret;
+}
+
+/*
+ Called after each row operation.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_after_row(THD* thd, bool)
+{
+ if (thd->wsrep_cs().state() != wsrep::client_state::s_none &&
+ wsrep_thd_is_local(thd))
+ {
+ if (wsrep_check_pk(thd))
+ {
+ return 1;
+ }
+ else if (wsrep_streaming_enabled(thd))
+ {
+ return thd->wsrep_cs().after_row();
+ }
+ }
+ return 0;
+}
+
+/*
+ Helper method to determine whether commit time hooks
+ should be run for the transaction.
+
+ Commit hooks must be run in the following cases:
+ - The transaction is local and has generated write set and is committing.
+ - The transaction has been BF aborted
+ - Is running in high priority mode and is ordered. This can be replayer,
+ applier or storage access.
+ */
+static inline bool wsrep_run_commit_hook(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_run_commit_hook");
+ DBUG_PRINT("wsrep", ("Is_active: %d is_real %d has_changes %d is_applying %d "
+ "is_ordered: %d",
+ wsrep_is_active(thd), wsrep_is_real(thd, all),
+ wsrep_has_changes(thd), wsrep_thd_is_applying(thd),
+ wsrep_is_ordered(thd)));
+ /* Is MST commit or autocommit? */
+ bool ret= wsrep_is_active(thd) && wsrep_is_real(thd, all);
+ if (ret && !(wsrep_has_changes(thd) || /* Has generated write set */
+ /* Is high priority (replay, applier, storage) and the
+ transaction is scheduled for commit ordering */
+ (wsrep_thd_is_applying(thd) && wsrep_is_ordered(thd))))
+ {
+ mysql_mutex_lock(&thd->LOCK_thd_data);
+ DBUG_PRINT("wsrep", ("state: %s",
+ wsrep::to_c_string(thd->wsrep_trx().state())));
+ /* Transaction is local but has no changes, the commit hooks will
+ be skipped and the wsrep transaction is terminated in
+ wsrep_commit_empty() */
+ if (thd->wsrep_trx().state() == wsrep::transaction::s_executing)
+ {
+ ret= false;
+ }
+ mysql_mutex_unlock(&thd->LOCK_thd_data);
+ }
+ DBUG_PRINT("wsrep", ("return: %d", ret));
+ DBUG_RETURN(ret);
+}
+
+/*
+ Called before the transaction is prepared.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_before_prepare(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_before_prepare");
+ WSREP_DEBUG("wsrep_before_prepare: %d", wsrep_is_real(thd, all));
+ int ret= 0;
+ DBUG_ASSERT(wsrep_run_commit_hook(thd, all));
+ if ((ret= thd->wsrep_cs().before_prepare()) == 0)
+ {
+ DBUG_ASSERT(!thd->wsrep_trx().ws_meta().gtid().is_undefined());
+ wsrep_xid_init(&thd->wsrep_xid,
+ thd->wsrep_trx().ws_meta().gtid());
+ }
+ DBUG_RETURN(ret);
+}
+
+/*
+ Called after the transaction has been prepared.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_after_prepare(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_after_prepare");
+ WSREP_DEBUG("wsrep_after_prepare: %d", wsrep_is_real(thd, all));
+ DBUG_ASSERT(wsrep_run_commit_hook(thd, all));
+ int ret= thd->wsrep_cs().after_prepare();
+ DBUG_ASSERT(ret == 0 || thd->wsrep_cs().current_error() ||
+ thd->wsrep_cs().transaction().state() == wsrep::transaction::s_must_replay);
+ DBUG_RETURN(ret);
+}
+
+/*
+ Called before the transaction is committed.
+
+ This function must be called from both client and
+ applier contexts before commit.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_before_commit(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_before_commit");
+ WSREP_DEBUG("wsrep_before_commit: %d, %lld",
+ wsrep_is_real(thd, all),
+ (long long)wsrep_thd_trx_seqno(thd));
+ int ret= 0;
+ DBUG_ASSERT(wsrep_run_commit_hook(thd, all));
+ if ((ret= thd->wsrep_cs().before_commit()) == 0)
+ {
+ DBUG_ASSERT(!thd->wsrep_trx().ws_meta().gtid().is_undefined());
+ wsrep_xid_init(&thd->wsrep_xid,
+ thd->wsrep_trx().ws_meta().gtid());
+ wsrep_register_for_group_commit(thd);
+ }
+ DBUG_RETURN(ret);
+}
+
+/*
+ Called after the transaction has been ordered for commit.
+
+ This function must be called from both client and
+ applier contexts after the commit has been ordered.
+
+ @param thd Pointer to THD
+ @param all
+ @param err Error buffer in case of applying error
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_ordered_commit(THD* thd,
+ bool all,
+ const wsrep_apply_error&)
+{
+ DBUG_ENTER("wsrep_ordered_commit");
+ WSREP_DEBUG("wsrep_ordered_commit: %d", wsrep_is_real(thd, all));
+ DBUG_ASSERT(wsrep_run_commit_hook(thd, all));
+ DBUG_RETURN(thd->wsrep_cs().ordered_commit());
+}
+
+/*
+ Called after the transaction has been committed.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_after_commit(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_after_commit");
+ WSREP_DEBUG("wsrep_after_commit: %d, %d, %lld, %d",
+ wsrep_is_real(thd, all),
+ wsrep_is_active(thd),
+ (long long)wsrep_thd_trx_seqno(thd),
+ wsrep_has_changes(thd));
+ DBUG_ASSERT(wsrep_run_commit_hook(thd, all));
+ int ret= 0;
+ if (thd->wsrep_trx().state() == wsrep::transaction::s_committing)
+ {
+ ret= thd->wsrep_cs().ordered_commit();
+ }
+ wsrep_unregister_from_group_commit(thd);
+ thd->wsrep_xid.null();
+ DBUG_RETURN(ret || thd->wsrep_cs().after_commit());
+}
+
+/*
+ Called before the transaction is rolled back.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_before_rollback(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_before_rollback");
+ int ret= 0;
+ if (wsrep_is_active(thd))
+ {
+ if (!all && thd->in_active_multi_stmt_transaction() &&
+ thd->wsrep_trx().is_streaming() &&
+ !wsrep_stmt_rollback_is_safe(thd))
+ {
+ /* Non-safe statement rollback during SR multi statement
+ transasction. Self abort the transaction, the actual rollback
+ and error handling will be done in after statement phase. */
+ wsrep_thd_self_abort(thd);
+ ret= 0;
+ }
+ else if (wsrep_is_real(thd, all) &&
+ thd->wsrep_trx().state() != wsrep::transaction::s_aborted)
+ {
+ /* Real transaction rolling back and wsrep abort not completed
+ yet */
+ /* Reset XID so that it does not trigger writing serialization
+ history in InnoDB. This needs to be avoided because rollback
+ may happen out of order and replay may follow. */
+ thd->wsrep_xid.null();
+ ret= thd->wsrep_cs().before_rollback();
+ }
+ }
+ DBUG_RETURN(ret);
+}
+
+/*
+ Called after the transaction has been rolled back.
+
+ Return zero on succes, non-zero on failure.
+ */
+static inline int wsrep_after_rollback(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_after_rollback");
+ DBUG_RETURN((wsrep_is_real(thd, all) && wsrep_is_active(thd) &&
+ thd->wsrep_cs().transaction().state() !=
+ wsrep::transaction::s_aborted) ?
+ thd->wsrep_cs().after_rollback() : 0);
+}
+
+static inline int wsrep_before_statement(THD* thd)
+{
+ return (thd->wsrep_cs().state() != wsrep::client_state::s_none ?
+ thd->wsrep_cs().before_statement() : 0);
+}
+
+static inline
+int wsrep_after_statement(THD* thd)
+{
+ DBUG_ENTER("wsrep_after_statement");
+ DBUG_RETURN(thd->wsrep_cs().state() != wsrep::client_state::s_none ?
+ thd->wsrep_cs().after_statement() : 0);
+}
+
+static inline void wsrep_after_apply(THD* thd)
+{
+ DBUG_ASSERT(wsrep_thd_is_applying(thd));
+ WSREP_DEBUG("wsrep_after_apply %lld", thd->thread_id);
+ thd->wsrep_cs().after_applying();
+}
+
+static inline void wsrep_open(THD* thd)
+{
+ DBUG_ENTER("wsrep_open");
+ if (wsrep_on(thd))
+ {
+ thd->wsrep_cs().open(wsrep::client_id(thd->thread_id));
+ thd->wsrep_cs().debug_log_level(wsrep_debug);
+ if (!thd->wsrep_applier && thd->variables.wsrep_trx_fragment_size)
+ {
+ thd->wsrep_cs().enable_streaming(
+ wsrep_fragment_unit(thd->variables.wsrep_trx_fragment_unit),
+ size_t(thd->variables.wsrep_trx_fragment_size));
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+static inline void wsrep_close(THD* thd)
+{
+ DBUG_ENTER("wsrep_close");
+ if (thd->wsrep_cs().state() != wsrep::client_state::s_none)
+ {
+ thd->wsrep_cs().close();
+ }
+ DBUG_VOID_RETURN;
+}
+
+static inline int wsrep_before_command(THD* thd)
+{
+ return (thd->wsrep_cs().state() != wsrep::client_state::s_none ?
+ thd->wsrep_cs().before_command() : 0);
+}
+/*
+ Called after each command.
+
+ Return zero on success, non-zero on failure.
+*/
+static inline void wsrep_after_command_before_result(THD* thd)
+{
+ if (thd->wsrep_cs().state() != wsrep::client_state::s_none)
+ {
+ thd->wsrep_cs().after_command_before_result();
+ }
+}
+
+static inline void wsrep_after_command_after_result(THD* thd)
+{
+ if (thd->wsrep_cs().state() != wsrep::client_state::s_none)
+ {
+ thd->wsrep_cs().after_command_after_result();
+ }
+}
+
+static inline void wsrep_after_command_ignore_result(THD* thd)
+{
+ wsrep_after_command_before_result(thd);
+ DBUG_ASSERT(!thd->wsrep_cs().current_error());
+ wsrep_after_command_after_result(thd);
+}
+
+static inline enum wsrep::client_error wsrep_current_error(THD* thd)
+{
+ return thd->wsrep_cs().current_error();
+}
+
+static inline enum wsrep::provider::status
+wsrep_current_error_status(THD* thd)
+{
+ return thd->wsrep_cs().current_error_status();
+}
+
+
+/*
+ Commit an empty transaction.
+
+ If the transaction is real and the wsrep transaction is still active,
+ the transaction did not generate any rows or keys and is committed
+ as empty. Here the wsrep transaction is rolled back and after statement
+ step is performed to leave the wsrep transaction in the state as it
+ never existed.
+*/
+static inline void wsrep_commit_empty(THD* thd, bool all)
+{
+ DBUG_ENTER("wsrep_commit_empty");
+ WSREP_DEBUG("wsrep_commit_empty(%llu)", thd->thread_id);
+ if (wsrep_is_real(thd, all) &&
+ wsrep_thd_is_local(thd) &&
+ thd->wsrep_trx().active() &&
+ thd->wsrep_trx().state() != wsrep::transaction::s_committed)
+ {
+ /* @todo CTAS with STATEMENT binlog format and empty result set
+ seems to be committing empty. Figure out why and try to fix
+ elsewhere. */
+ DBUG_ASSERT(!wsrep_has_changes(thd) ||
+ (thd->lex->sql_command == SQLCOM_CREATE_TABLE &&
+ !thd->is_current_stmt_binlog_format_row()));
+ bool have_error= wsrep_current_error(thd);
+ int ret= wsrep_before_rollback(thd, all) ||
+ wsrep_after_rollback(thd, all) ||
+ wsrep_after_statement(thd);
+ /* The committing transaction was empty but it held some locks and
+ got BF aborted. As there were no certified changes in the
+ data, we ignore the deadlock error and rely on error reporting
+ by storage engine/server. */
+ if (!ret && !have_error && wsrep_current_error(thd))
+ {
+ DBUG_ASSERT(wsrep_current_error(thd) == wsrep::e_deadlock_error);
+ thd->wsrep_cs().reset_error();
+ }
+ if (ret)
+ {
+ WSREP_DEBUG("wsrep_commit_empty failed: %d", wsrep_current_error(thd));
+ }
+ }
+ DBUG_VOID_RETURN;
+}
+
+#endif /* WSREP_TRANS_OBSERVER */
diff --git a/sql/wsrep_types.h b/sql/wsrep_types.h
new file mode 100644
index 00000000000..9da00e305a7
--- /dev/null
+++ b/sql/wsrep_types.h
@@ -0,0 +1,29 @@
+/* Copyright 2018 Codership Oy <info@codership.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/*
+ Wsrep typedefs to better conform to coding style.
+ */
+#ifndef WSREP_TYPES_H
+#define WSREP_TYPES_H
+
+#include "wsrep/seqno.hpp"
+#include "wsrep/view.hpp"
+
+typedef wsrep::id Wsrep_id;
+typedef wsrep::seqno Wsrep_seqno;
+typedef wsrep::view Wsrep_view;
+
+#endif /* WSREP_TYPES_H */
diff --git a/sql/wsrep_utils.cc b/sql/wsrep_utils.cc
index 3c341e222b3..8db0f7be99a 100644
--- a/sql/wsrep_utils.cc
+++ b/sql/wsrep_utils.cc
@@ -21,6 +21,8 @@
#endif
#include "mariadb.h"
+#include "my_global.h"
+#include "wsrep_api.h"
#include "wsrep_utils.h"
#include "wsrep_mysqld.h"
@@ -47,7 +49,7 @@ static wsp::string wsrep_PATH;
void
wsrep_prepend_PATH (const char* path)
{
- int count = 0;
+ int count= 0;
while (environ[count])
{
@@ -72,7 +74,7 @@ wsrep_prepend_PATH (const char* path)
old_path + strlen("PATH="));
wsrep_PATH.set (new_path);
- environ[count] = new_path;
+ environ[count]= new_path;
}
else
{
@@ -93,28 +95,28 @@ namespace wsp
bool
env::ctor_common(char** e)
{
- env_ = static_cast<char**>(malloc((len_ + 1) * sizeof(char*)));
+ env_= static_cast<char**>(malloc((len_ + 1) * sizeof(char*)));
if (env_)
{
for (size_t i(0); i < len_; ++i)
{
assert(e[i]); // caller should make sure about len_
- env_[i] = strdup(e[i]);
+ env_[i]= strdup(e[i]);
if (!env_[i])
{
- errno_ = errno;
+ errno_= errno;
WSREP_ERROR("Failed to allocate env. var: %s", e[i]);
return true;
}
}
- env_[len_] = NULL;
+ env_[len_]= NULL;
return false;
}
else
{
- errno_ = errno;
+ errno_= errno;
WSREP_ERROR("Failed to allocate env. var vector of length: %zu", len_);
return true;
}
@@ -128,15 +130,15 @@ env::dtor()
/* don't need to go beyond the first NULL */
for (size_t i(0); env_[i] != NULL; ++i) { free(env_[i]); }
free(env_);
- env_ = NULL;
+ env_= NULL;
}
- len_ = 0;
+ len_= 0;
}
env::env(char** e)
: len_(0), env_(NULL), errno_(0)
{
- if (!e) { e = environ; }
+ if (!e) { e= environ; }
/* count the size of the vector */
while (e[len_]) { ++len_; }
@@ -154,21 +156,21 @@ env::~env() { dtor(); }
int
env::append(const char* val)
{
- char** tmp = static_cast<char**>(realloc(env_, (len_ + 2)*sizeof(char*)));
+ char** tmp= static_cast<char**>(realloc(env_, (len_ + 2)*sizeof(char*)));
if (tmp)
{
- env_ = tmp;
- env_[len_] = strdup(val);
+ env_= tmp;
+ env_[len_]= strdup(val);
if (env_[len_])
{
++len_;
- env_[len_] = NULL;
+ env_[len_]= NULL;
}
- else errno_ = errno;
+ else errno_= errno;
}
- else errno_ = errno;
+ else errno_= errno;
return errno_;
}
@@ -189,7 +191,7 @@ process::process (const char* cmd, const char* type, char** env)
if (0 == str_)
{
WSREP_ERROR ("Can't allocate command line of size: %zu", strlen(cmd));
- err_ = ENOMEM;
+ err_= ENOMEM;
return;
}
@@ -205,12 +207,12 @@ process::process (const char* cmd, const char* type, char** env)
return;
}
- if (NULL == env) { env = environ; } // default to global environment
+ if (NULL == env) { env= environ; } // default to global environment
- int pipe_fds[2] = { -1, };
+ int pipe_fds[2]= { -1, };
if (::pipe(pipe_fds))
{
- err_ = errno;
+ err_= errno;
WSREP_ERROR ("pipe() failed: %d (%s)", err_, strerror(err_));
return;
}
@@ -220,16 +222,16 @@ process::process (const char* cmd, const char* type, char** env)
int const child_end (parent_end == PIPE_READ ? PIPE_WRITE : PIPE_READ);
int const close_fd (parent_end == PIPE_READ ? STDOUT_FD : STDIN_FD);
- char* const pargv[4] = { strdup("sh"), strdup("-c"), strdup(str_), NULL };
+ char* const pargv[4]= { strdup("sh"), strdup("-c"), strdup(str_), NULL };
if (!(pargv[0] && pargv[1] && pargv[2]))
{
- err_ = ENOMEM;
+ err_= ENOMEM;
WSREP_ERROR ("Failed to allocate pargv[] array.");
goto cleanup_pipe;
}
posix_spawnattr_t attr;
- err_ = posix_spawnattr_init (&attr);
+ err_= posix_spawnattr_init (&attr);
if (err_)
{
WSREP_ERROR ("posix_spawnattr_init() failed: %d (%s)",
@@ -239,7 +241,7 @@ process::process (const char* cmd, const char* type, char** env)
/* make sure that no signlas are masked in child process */
sigset_t sigmask_empty; sigemptyset(&sigmask_empty);
- err_ = posix_spawnattr_setsigmask(&attr, &sigmask_empty);
+ err_= posix_spawnattr_setsigmask(&attr, &sigmask_empty);
if (err_)
{
WSREP_ERROR ("posix_spawnattr_setsigmask() failed: %d (%s)",
@@ -255,7 +257,7 @@ process::process (const char* cmd, const char* type, char** env)
sigaddset(&default_signals, SIGPIPE);
sigaddset(&default_signals, SIGTERM);
sigaddset(&default_signals, SIGCHLD);
- err_ = posix_spawnattr_setsigdefault(&attr, &default_signals);
+ err_= posix_spawnattr_setsigdefault(&attr, &default_signals);
if (err_)
{
WSREP_ERROR ("posix_spawnattr_setsigdefault() failed: %d (%s)",
@@ -263,7 +265,7 @@ process::process (const char* cmd, const char* type, char** env)
goto cleanup_attr;
}
- err_ = posix_spawnattr_setflags (&attr, POSIX_SPAWN_SETSIGDEF |
+ err_= posix_spawnattr_setflags (&attr, POSIX_SPAWN_SETSIGDEF |
POSIX_SPAWN_SETSIGMASK |
POSIX_SPAWN_USEVFORK);
if (err_)
@@ -274,7 +276,7 @@ process::process (const char* cmd, const char* type, char** env)
}
posix_spawn_file_actions_t fact;
- err_ = posix_spawn_file_actions_init (&fact);
+ err_= posix_spawn_file_actions_init (&fact);
if (err_)
{
WSREP_ERROR ("posix_spawn_file_actions_init() failed: %d (%s)",
@@ -283,7 +285,7 @@ process::process (const char* cmd, const char* type, char** env)
}
// close child's stdout|stdin depending on what we returning
- err_ = posix_spawn_file_actions_addclose (&fact, close_fd);
+ err_= posix_spawn_file_actions_addclose (&fact, close_fd);
if (err_)
{
WSREP_ERROR ("posix_spawn_file_actions_addclose() failed: %d (%s)",
@@ -292,7 +294,7 @@ process::process (const char* cmd, const char* type, char** env)
}
// substitute our pipe descriptor in place of the closed one
- err_ = posix_spawn_file_actions_adddup2 (&fact,
+ err_= posix_spawn_file_actions_adddup2 (&fact,
pipe_fds[child_end], close_fd);
if (err_)
{
@@ -301,30 +303,30 @@ process::process (const char* cmd, const char* type, char** env)
goto cleanup_fact;
}
- err_ = posix_spawnp (&pid_, pargv[0], &fact, &attr, pargv, env);
+ err_= posix_spawnp (&pid_, pargv[0], &fact, &attr, pargv, env);
if (err_)
{
WSREP_ERROR ("posix_spawnp(%s) failed: %d (%s)",
pargv[2], err_, strerror(err_));
- pid_ = 0; // just to make sure it was not messed up in the call
+ pid_= 0; // just to make sure it was not messed up in the call
goto cleanup_fact;
}
- io_ = fdopen (pipe_fds[parent_end], type);
+ io_= fdopen (pipe_fds[parent_end], type);
if (io_)
{
- pipe_fds[parent_end] = -1; // skip close on cleanup
+ pipe_fds[parent_end]= -1; // skip close on cleanup
}
else
{
- err_ = errno;
+ err_= errno;
WSREP_ERROR ("fdopen() failed: %d (%s)", err_, strerror(err_));
}
cleanup_fact:
int err; // to preserve err_ code
- err = posix_spawn_file_actions_destroy (&fact);
+ err= posix_spawn_file_actions_destroy (&fact);
if (err)
{
WSREP_ERROR ("posix_spawn_file_actions_destroy() failed: %d (%s)\n",
@@ -332,7 +334,7 @@ cleanup_fact:
}
cleanup_attr:
- err = posix_spawnattr_destroy (&attr);
+ err= posix_spawnattr_destroy (&attr);
if (err)
{
WSREP_ERROR ("posix_spawnattr_destroy() failed: %d (%s)",
@@ -360,7 +362,7 @@ process::~process ()
if (fclose (io_) == -1)
{
- err_ = errno;
+ err_= errno;
WSREP_ERROR("fclose() failed: %d (%s)", err_, strerror(err_));
}
}
@@ -376,34 +378,34 @@ process::wait ()
int status;
if (-1 == waitpid(pid_, &status, 0))
{
- err_ = errno; assert (err_);
+ err_= errno; assert (err_);
WSREP_ERROR("Waiting for process failed: %s, PID(%ld): %d (%s)",
str_, (long)pid_, err_, strerror (err_));
}
else
{ // command completed, check exit status
if (WIFEXITED (status)) {
- err_ = WEXITSTATUS (status);
+ err_= WEXITSTATUS (status);
}
else { // command didn't complete with exit()
WSREP_ERROR("Process was aborted.");
- err_ = errno ? errno : ECHILD;
+ err_= errno ? errno : ECHILD;
}
if (err_) {
switch (err_) /* Translate error codes to more meaningful */
{
- case 126: err_ = EACCES; break; /* Permission denied */
- case 127: err_ = ENOENT; break; /* No such file or directory */
- case 143: err_ = EINTR; break; /* Subprocess killed */
+ case 126: err_= EACCES; break; /* Permission denied */
+ case 127: err_= ENOENT; break; /* No such file or directory */
+ case 143: err_= EINTR; break; /* Subprocess killed */
}
WSREP_ERROR("Process completed with error: %s: %d (%s)",
str_, err_, strerror(err_));
}
- pid_ = 0;
+ pid_= 0;
if (io_) fclose (io_);
- io_ = NULL;
+ io_= NULL;
}
}
else {
@@ -421,7 +423,7 @@ thd::thd (my_bool won) : init(), ptr(new THD(0))
ptr->thread_stack= (char*) &ptr;
ptr->store_globals();
ptr->variables.option_bits&= ~OPTION_BIN_LOG; // disable binlog
- ptr->variables.wsrep_on = won;
+ ptr->variables.wsrep_on= won;
ptr->security_ctx->master_access= ~(ulong)0;
lex_start(ptr);
}
@@ -441,7 +443,7 @@ thd::~thd ()
/* Returns INADDR_NONE, INADDR_ANY, INADDR_LOOPBACK or something else */
unsigned int wsrep_check_ip (const char* const addr, bool *is_ipv6)
{
- unsigned int ret = INADDR_NONE;
+ unsigned int ret= INADDR_NONE;
struct addrinfo *res, hints;
memset (&hints, 0, sizeof(hints));
@@ -451,7 +453,7 @@ unsigned int wsrep_check_ip (const char* const addr, bool *is_ipv6)
*is_ipv6= false;
- int gai_ret = getaddrinfo(addr, NULL, &hints, &res);
+ int gai_ret= getaddrinfo(addr, NULL, &hints, &res);
if (0 == gai_ret)
{
if (AF_INET == res->ai_family) /* IPv4 */
@@ -488,7 +490,9 @@ size_t wsrep_guess_ip (char* buf, size_t buf_len)
size_t ret= 0;
// Attempt 1: Try to get the IP from bind-address.
- if (my_bind_addr_str && my_bind_addr_str[0] != '\0')
+ // Skip if empty or bind-address=*
+ if (my_bind_addr_str && my_bind_addr_str[0] != '\0' &&
+ strcmp(my_bind_addr_str, "*") != 0)
{
bool unused;
unsigned int const ip_type= wsrep_check_ip(my_bind_addr_str, &unused);
@@ -539,7 +543,7 @@ size_t wsrep_guess_ip (char* buf, size_t buf_len)
if (getifaddrs(&ifaddr) == 0)
{
- for (ifa= ifaddr; ifa != NULL; ifa = ifa->ifa_next)
+ for (ifa= ifaddr; ifa != NULL; ifa= ifa->ifa_next)
{
if (!ifa->ifa_addr)
continue;
diff --git a/sql/wsrep_utils.h b/sql/wsrep_utils.h
index 0afca96ea41..488b455938b 100644
--- a/sql/wsrep_utils.h
+++ b/sql/wsrep_utils.h
@@ -21,6 +21,27 @@
unsigned int wsrep_check_ip (const char* const addr, bool *is_ipv6);
size_t wsrep_guess_ip (char* buf, size_t buf_len);
+namespace wsp {
+class node_status
+{
+public:
+ node_status() : status(wsrep::server_state::s_disconnected) {}
+ void set(enum wsrep::server_state::state new_status,
+ const wsrep::view* view= 0)
+ {
+ if (status != new_status || 0 != view)
+ {
+ wsrep_notify_status(new_status, view);
+ status= new_status;
+ }
+ }
+ enum wsrep::server_state::state get() const { return status; }
+private:
+ enum wsrep::server_state::state status;
+};
+} /* namespace wsp */
+
+extern wsp::node_status local_status;
/* returns the length of the host part of the address string */
size_t wsrep_host_len(const char* addr, size_t addr_len);
@@ -174,52 +195,37 @@ private:
class Config_state
{
public:
- Config_state() : view_(), status_(WSREP_MEMBER_UNDEFINED)
+ Config_state() : view_(), status_(wsrep::server_state::s_disconnected)
{}
- void set(wsrep_member_status_t status, const wsrep_view_info_t* view)
+ void set(const wsrep::view& view)
{
- wsrep_notify_status(status, view);
+ wsrep_notify_status(status_, &view);
lock();
-
- status_= status;
- view_= *view;
- member_info_.clear();
-
- wsrep_member_info_t memb;
- for(int i= 0; i < view->memb_num; i ++)
- {
- memb= view->members[i];
- member_info_.append_val(memb);
- }
-
+ view_= view;
unlock();
}
- void set(wsrep_member_status_t status)
+ void set(enum wsrep::server_state::state status)
{
- wsrep_notify_status(status, 0);
+ wsrep_notify_status(status);
+
lock();
status_= status;
unlock();
}
- wsrep_view_info_t get_view_info() const
+ const wsrep::view& get_view_info() const
{
return view_;
}
- wsrep_member_status_t get_status() const
+ enum wsrep::server_state::state get_status() const
{
return status_;
}
- Dynamic_array<wsrep_member_info_t> * get_member_info()
- {
- return &member_info_;
- }
-
int lock()
{
return mysql_mutex_lock(&LOCK_wsrep_config_state);
@@ -231,9 +237,8 @@ public:
}
private:
- wsrep_view_info_t view_;
- wsrep_member_status_t status_;
- Dynamic_array<wsrep_member_info_t> member_info_;
+ wsrep::view view_;
+ enum wsrep::server_state::state status_;
};
} /* namespace wsp */
@@ -309,12 +314,23 @@ public:
string() : string_(0) {}
explicit string(size_t s) : string_(static_cast<char*>(malloc(s))) {}
char* operator()() { return string_; }
- void set(char* str) { if (string_) free (string_); string_ = str; }
+ void set(char* str) { if (string_) free (string_); string_= str; }
~string() { set (0); }
private:
char* string_;
};
+/* scope level lock */
+class auto_lock
+{
+public:
+ auto_lock(mysql_mutex_t* m) : m_(m) { mysql_mutex_lock(m_); }
+ ~auto_lock() { mysql_mutex_unlock(m_); }
+private:
+ mysql_mutex_t& operator =(mysql_mutex_t&);
+ mysql_mutex_t* const m_;
+};
+
#ifdef REMOVED
class lock
{
@@ -324,7 +340,7 @@ public:
lock (pthread_mutex_t* mtx) : mtx_(mtx)
{
- int err = pthread_mutex_lock (mtx_);
+ int err= pthread_mutex_lock (mtx_);
if (err)
{
@@ -335,7 +351,7 @@ public:
virtual ~lock ()
{
- int err = pthread_mutex_unlock (mtx_);
+ int err= pthread_mutex_unlock (mtx_);
if (err)
{
diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc
index 1471ad91a96..288d4cdf134 100644
--- a/sql/wsrep_var.cc
+++ b/sql/wsrep_var.cc
@@ -28,8 +28,6 @@
ulong wsrep_reject_queries;
-static long wsrep_prev_slave_threads = wsrep_slave_threads;
-
int wsrep_init_vars()
{
wsrep_provider = my_strdup(WSREP_NONE, MYF(MY_WME));
@@ -53,7 +51,7 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type)
{
if (var_type == OPT_GLOBAL) {
// FIXME: this variable probably should be changed only per session
- thd->variables.wsrep_on = global_system_variables.wsrep_on;
+ thd->variables.wsrep_on= global_system_variables.wsrep_on;
}
return false;
@@ -68,8 +66,8 @@ bool wsrep_on_check(sys_var *self, THD* thd, set_var* var)
if (new_wsrep_on && innodb_hton_ptr && innodb_lock_schedule_algorithm != 0) {
my_message(ER_WRONG_ARGUMENTS, " WSREP (galera) can't be enabled "
- "if innodb_lock_schedule_algorithm=VATS. Please configure"
- " innodb_lock_schedule_algorithm=FCFS and restart.", MYF(0));
+ "if innodb_lock_schedule_algorithm=VATS. Please configure"
+ " innodb_lock_schedule_algorithm=FCFS and restart.", MYF(0));
return true;
}
return false;
@@ -77,10 +75,6 @@ bool wsrep_on_check(sys_var *self, THD* thd, set_var* var)
bool wsrep_causal_reads_update (sys_var *self, THD* thd, enum_var_type var_type)
{
- // global setting should not affect session setting.
- // if (var_type == OPT_GLOBAL) {
- // thd->variables.wsrep_causal_reads = global_system_variables.wsrep_causal_reads;
- // }
if (thd->variables.wsrep_causal_reads) {
thd->variables.wsrep_sync_wait |= WSREP_SYNC_WAIT_BEFORE_READ;
} else {
@@ -99,15 +93,11 @@ bool wsrep_causal_reads_update (sys_var *self, THD* thd, enum_var_type var_type)
bool wsrep_sync_wait_update (sys_var* self, THD* thd, enum_var_type var_type)
{
- // global setting should not affect session setting.
- // if (var_type == OPT_GLOBAL) {
- // thd->variables.wsrep_sync_wait = global_system_variables.wsrep_sync_wait;
- // }
- thd->variables.wsrep_causal_reads = thd->variables.wsrep_sync_wait &
+ thd->variables.wsrep_causal_reads= thd->variables.wsrep_sync_wait &
WSREP_SYNC_WAIT_BEFORE_READ;
// update global settings too
- global_system_variables.wsrep_causal_reads = global_system_variables.wsrep_sync_wait &
+ global_system_variables.wsrep_causal_reads= global_system_variables.wsrep_sync_wait &
WSREP_SYNC_WAIT_BEFORE_READ;
return false;
@@ -129,7 +119,7 @@ bool wsrep_start_position_verify (const char* start_str)
ssize_t uuid_len;
// Check whether it has minimum acceptable length.
- start_len = strlen (start_str);
+ start_len= strlen (start_str);
if (start_len < 34)
return true;
@@ -137,7 +127,7 @@ bool wsrep_start_position_verify (const char* start_str)
Parse the input to check whether UUID length is acceptable
and seqno has been provided.
*/
- uuid_len = wsrep_uuid_scan (start_str, start_len, &uuid);
+ uuid_len= wsrep_uuid_scan (start_str, start_len, &uuid);
if (uuid_len < 0 || (start_len - uuid_len) < 2)
return true;
@@ -157,19 +147,18 @@ bool wsrep_start_position_verify (const char* start_str)
static
-bool wsrep_set_local_position(const char* const value, size_t length,
- bool const sst)
+bool wsrep_set_local_position(THD* thd, const char* const value,
+ size_t length, bool const sst)
{
wsrep_uuid_t uuid;
- size_t const uuid_len = wsrep_uuid_scan(value, length, &uuid);
- wsrep_seqno_t const seqno = strtoll(value + uuid_len + 1, NULL, 10);
+ size_t const uuid_len= wsrep_uuid_scan(value, length, &uuid);
+ wsrep_seqno_t const seqno= strtoll(value + uuid_len + 1, NULL, 10);
if (sst) {
- return wsrep_sst_received (wsrep, uuid, seqno, NULL, 0, false);
+ wsrep_sst_received (thd, uuid, seqno, NULL, 0);
} else {
- // initialization
- local_uuid = uuid;
- local_seqno = seqno;
+ local_uuid= uuid;
+ local_seqno= seqno;
}
return false;
}
@@ -194,7 +183,7 @@ bool wsrep_start_position_check (sys_var *self, THD* thd, set_var* var)
As part of further verification, we try to update the value and catch
errors (if any).
*/
- if (wsrep_set_local_position(var->save_result.string_value.str,
+ if (wsrep_set_local_position(thd, var->save_result.string_value.str,
var->save_result.string_value.length,
true))
{
@@ -226,7 +215,7 @@ bool wsrep_start_position_init (const char* val)
return true;
}
- if (wsrep_set_local_position (val, strlen(val), false))
+ if (wsrep_set_local_position (NULL, val, strlen(val), false))
{
WSREP_ERROR("Failed to set initial wsep_start_position: %s", val);
return true;
@@ -263,25 +252,23 @@ end:
static bool refresh_provider_options()
{
- DBUG_ASSERT(wsrep);
-
WSREP_DEBUG("refresh_provider_options: %s",
(wsrep_provider_options) ? wsrep_provider_options : "null");
- char* opts= wsrep->options_get(wsrep);
- if (opts)
+
+ try
{
- wsrep_provider_options_init(opts);
+ std::string opts= Wsrep_server_state::instance().provider().options();
+ wsrep_provider_options_init(opts.c_str());
get_provider_option_value(wsrep_provider_options,
(char*)"repl.max_ws_size",
&wsrep_max_ws_size);
- free(opts);
+ return false;
}
- else
+ catch (...)
{
WSREP_ERROR("Failed to get provider options");
return true;
}
- return false;
}
static int wsrep_provider_verify (const char* provider_str)
@@ -332,8 +319,6 @@ bool wsrep_provider_update (sys_var *self, THD* thd, enum_var_type type)
{
bool rcode= false;
- bool wsrep_on_saved= thd->variables.wsrep_on;
- thd->variables.wsrep_on= false;
WSREP_DEBUG("wsrep_provider_update: %s", wsrep_provider);
@@ -346,7 +331,12 @@ bool wsrep_provider_update (sys_var *self, THD* thd, enum_var_type type)
*/
mysql_mutex_unlock(&LOCK_global_system_variables);
wsrep_stop_replication(thd);
- mysql_mutex_lock(&LOCK_global_system_variables);
+
+ /* provider status variables are allocated in provider library
+ and need to freed here, otherwise a dangling reference to
+ wsrep_status_vars would remain in THD
+ */
+ wsrep_free_status(thd);
if (wsrep_inited == 1)
wsrep_deinit(false);
@@ -357,17 +347,17 @@ bool wsrep_provider_update (sys_var *self, THD* thd, enum_var_type type)
if (wsrep_init())
{
my_error(ER_CANT_OPEN_LIBRARY, MYF(0), tmp, my_error, "wsrep_init failed");
- rcode = true;
+ rcode= true;
}
free(tmp);
// we sure don't want to use old address with new provider
wsrep_cluster_address_init(NULL);
wsrep_provider_options_init(NULL);
+ if (!rcode)
+ refresh_provider_options();
- thd->variables.wsrep_on= wsrep_on_saved;
-
- refresh_provider_options();
+ mysql_mutex_lock(&LOCK_global_system_variables);
return rcode;
}
@@ -385,12 +375,12 @@ void wsrep_provider_init (const char* value)
}
if (wsrep_provider) my_free((void *)wsrep_provider);
- wsrep_provider = my_strdup(value, MYF(0));
+ wsrep_provider= my_strdup(value, MYF(0));
}
bool wsrep_provider_options_check(sys_var *self, THD* thd, set_var* var)
{
- if (wsrep == NULL)
+ if (!WSREP_ON)
{
my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) not started", MYF(0));
return true;
@@ -400,9 +390,9 @@ bool wsrep_provider_options_check(sys_var *self, THD* thd, set_var* var)
bool wsrep_provider_options_update(sys_var *self, THD* thd, enum_var_type type)
{
- DBUG_ASSERT(wsrep);
- wsrep_status_t ret= wsrep->options_set(wsrep, wsrep_provider_options);
- if (ret != WSREP_OK)
+ enum wsrep::provider::status ret=
+ Wsrep_server_state::instance().provider().options(wsrep_provider_options);
+ if (ret)
{
WSREP_ERROR("Set options returned %d", ret);
refresh_provider_options();
@@ -415,7 +405,7 @@ void wsrep_provider_options_init(const char* value)
{
if (wsrep_provider_options && wsrep_provider_options != value)
my_free((void *)wsrep_provider_options);
- wsrep_provider_options = (value) ? my_strdup(value, MYF(0)) : NULL;
+ wsrep_provider_options= (value) ? my_strdup(value, MYF(0)) : NULL;
}
bool wsrep_reject_queries_update(sys_var *self, THD* thd, enum_var_type type)
@@ -440,6 +430,12 @@ bool wsrep_reject_queries_update(sys_var *self, THD* thd, enum_var_type type)
return false;
}
+bool wsrep_debug_update(sys_var *self, THD* thd, enum_var_type type)
+{
+ Wsrep_server_state::instance().debug_log_level(wsrep_debug);
+ return false;
+}
+
static int wsrep_cluster_address_verify (const char* cluster_address_str)
{
/* There is no predefined address format, it depends on provider. */
@@ -469,18 +465,12 @@ bool wsrep_cluster_address_check (sys_var *self, THD* thd, set_var* var)
bool wsrep_cluster_address_update (sys_var *self, THD* thd, enum_var_type type)
{
- bool wsrep_on_saved;
-
- /* Do not proceed if wsrep provider is not loaded. */
- if (!wsrep)
+ if (!Wsrep_server_state::instance().is_provider_loaded())
{
- WSREP_INFO("wsrep provider is not loaded, can't re(start) replication.");
+ WSREP_INFO("WSREP (galera) provider is not loaded, can't re(start) replication.");
return false;
}
- wsrep_on_saved= thd->variables.wsrep_on;
- thd->variables.wsrep_on= false;
-
/* stop replication is heavy operation, and includes closing all client
connections. Closing clients may need to get LOCK_global_system_variables
at least in MariaDB.
@@ -488,24 +478,24 @@ bool wsrep_cluster_address_update (sys_var *self, THD* thd, enum_var_type type)
Note: releasing LOCK_global_system_variables may cause race condition, if
there can be several concurrent clients changing wsrep_provider
*/
+ WSREP_DEBUG("wsrep_cluster_address_update: %s", wsrep_cluster_address);
mysql_mutex_unlock(&LOCK_global_system_variables);
wsrep_stop_replication(thd);
- /*
- Unlock and lock LOCK_wsrep_slave_threads to maintain lock order & avoid
- any potential deadlock.
- */
- mysql_mutex_unlock(&LOCK_wsrep_slave_threads);
- mysql_mutex_lock(&LOCK_global_system_variables);
- mysql_mutex_lock(&LOCK_wsrep_slave_threads);
-
if (wsrep_start_replication())
{
wsrep_create_rollbacker();
wsrep_create_appliers(wsrep_slave_threads);
}
+ /* locking order to be enforced is:
+ 1. LOCK_global_system_variables
+ 2. LOCK_wsrep_cluster_config
+ => have to juggle mutexes to comply with this
+ */
- thd->variables.wsrep_on= wsrep_on_saved;
+ mysql_mutex_unlock(&LOCK_wsrep_cluster_config);
+ mysql_mutex_lock(&LOCK_global_system_variables);
+ mysql_mutex_lock(&LOCK_wsrep_cluster_config);
return false;
}
@@ -590,15 +580,14 @@ void wsrep_node_address_init (const char* value)
if (wsrep_node_address && strcmp(wsrep_node_address, value))
my_free ((void*)wsrep_node_address);
- wsrep_node_address = (value) ? my_strdup(value, MYF(0)) : NULL;
+ wsrep_node_address= (value) ? my_strdup(value, MYF(0)) : NULL;
}
static void wsrep_slave_count_change_update ()
{
- wsrep_slave_count_change = (wsrep_slave_threads - wsrep_prev_slave_threads);
+ wsrep_slave_count_change= (wsrep_slave_threads - wsrep_running_threads + 2);
WSREP_DEBUG("Change on slave threads: New %lu old %lu difference %d",
- wsrep_slave_threads, wsrep_prev_slave_threads, wsrep_slave_count_change);
- wsrep_prev_slave_threads = wsrep_slave_threads;
+ wsrep_slave_threads, wsrep_running_threads, wsrep_slave_count_change);
}
bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type)
@@ -607,14 +596,14 @@ bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type)
if (wsrep_slave_count_change > 0)
{
wsrep_create_appliers(wsrep_slave_count_change);
- wsrep_slave_count_change = 0;
+ wsrep_slave_count_change= 0;
}
return false;
}
bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var)
{
- if (wsrep == NULL)
+ if (!WSREP_ON)
{
my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) not started", MYF(0));
return true;
@@ -639,17 +628,17 @@ bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var)
}
return false;
}
- wsrep_status_t ret(WSREP_WARNING);
+ int ret= 1;
if (new_wsrep_desync) {
- ret = wsrep->desync (wsrep);
- if (ret != WSREP_OK) {
- WSREP_WARN ("SET desync failed %d for schema: %s, query: %s",
- ret, thd->get_db(), thd->query());
+ ret= Wsrep_server_state::instance().provider().desync();
+ if (ret) {
+ WSREP_WARN ("SET desync failed %d for schema: %s, query: %s", ret,
+ thd->db.str, WSREP_QUERY(thd));
my_error (ER_CANNOT_USER, MYF(0), "'desync'", thd->query());
return true;
}
} else {
- ret = wsrep->resync (wsrep);
+ ret= Wsrep_server_state::instance().provider().resync();
if (ret != WSREP_OK) {
WSREP_WARN ("SET resync failed %d for schema: %s, query: %s", ret,
thd->get_db(), thd->query());
@@ -662,13 +651,78 @@ bool wsrep_desync_check (sys_var *self, THD* thd, set_var* var)
bool wsrep_desync_update (sys_var *self, THD* thd, enum_var_type type)
{
- DBUG_ASSERT(wsrep);
+ return false;
+}
+
+bool wsrep_trx_fragment_size_check (sys_var *self, THD* thd, set_var* var)
+{
+ if (var->value == NULL) {
+ return false;
+ }
+
+ const ulong new_trx_fragment_size= var->value->val_uint();
+
+ if (!WSREP(thd) && new_trx_fragment_size > 0) {
+ push_warning (thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Cannot set 'wsrep_trx_fragment_size' to a value other than "
+ "0 because wsrep is switched off.");
+ return true;
+ }
+
+ if (new_trx_fragment_size > 0 && !wsrep_provider_is_SR_capable()) {
+ push_warning (thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Cannot set 'wsrep_trx_fragment_size' to a value other than "
+ "0 because the wsrep_provider does not support streaming "
+ "replication.");
+ return true;
+ }
+
+ if (wsrep_protocol_version < 4 && new_trx_fragment_size > 0) {
+ push_warning (thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_VALUE_FOR_VAR,
+ "Cannot set 'wsrep_trx_fragment_size' to a value other than "
+ "0 because cluster is not yet operating in Galera 4 mode.");
+ return true;
+ }
+
+ return false;
+}
+
+bool wsrep_trx_fragment_size_update(sys_var* self, THD *thd, enum_var_type)
+{
+ WSREP_DEBUG("wsrep_trx_fragment_size_update: %llu",
+ thd->variables.wsrep_trx_fragment_size);
+ if (thd->variables.wsrep_trx_fragment_size)
+ {
+ return thd->wsrep_cs().enable_streaming(
+ wsrep_fragment_unit(thd->variables.wsrep_trx_fragment_unit),
+ size_t(thd->variables.wsrep_trx_fragment_size));
+ }
+ else
+ {
+ thd->wsrep_cs().disable_streaming();
+ return false;
+ }
+}
+
+bool wsrep_trx_fragment_unit_update(sys_var* self, THD *thd, enum_var_type)
+{
+ WSREP_DEBUG("wsrep_trx_fragment_unit_update: %lu",
+ thd->variables.wsrep_trx_fragment_unit);
+ if (thd->variables.wsrep_trx_fragment_size)
+ {
+ return thd->wsrep_cs().enable_streaming(
+ wsrep_fragment_unit(thd->variables.wsrep_trx_fragment_unit),
+ size_t(thd->variables.wsrep_trx_fragment_size));
+ }
return false;
}
bool wsrep_max_ws_size_check(sys_var *self, THD* thd, set_var* var)
{
- if (wsrep == NULL)
+ if (!WSREP_ON)
{
my_message(ER_WRONG_ARGUMENTS, "WSREP (galera) not started", MYF(0));
return true;
@@ -676,36 +730,35 @@ bool wsrep_max_ws_size_check(sys_var *self, THD* thd, set_var* var)
return false;
}
-bool wsrep_max_ws_size_update (sys_var *self, THD *thd, enum_var_type)
+bool wsrep_max_ws_size_update(sys_var *self, THD *thd, enum_var_type)
{
- DBUG_ASSERT(wsrep);
-
char max_ws_size_opt[128];
my_snprintf(max_ws_size_opt, sizeof(max_ws_size_opt),
- "repl.max_ws_size=%lu", wsrep_max_ws_size);
- wsrep_status_t ret= wsrep->options_set(wsrep, max_ws_size_opt);
- if (ret != WSREP_OK)
+ "repl.max_ws_size=%d", wsrep_max_ws_size);
+ enum wsrep::provider::status ret= Wsrep_server_state::instance().provider().options(max_ws_size_opt);
+ if (ret)
{
WSREP_ERROR("Set options returned %d", ret);
- refresh_provider_options();
return true;
}
return refresh_provider_options();
}
+#if UNUSED /* eaec266eb16c (Sergei Golubchik 2014-09-28) */
static SHOW_VAR wsrep_status_vars[]=
{
{"connected", (char*) &wsrep_connected, SHOW_BOOL},
- {"ready", (char*) &wsrep_ready, SHOW_BOOL},
+ {"ready", (char*) &wsrep_show_ready, SHOW_FUNC},
{"cluster_state_uuid",(char*) &wsrep_cluster_state_uuid,SHOW_CHAR_PTR},
{"cluster_conf_id", (char*) &wsrep_cluster_conf_id, SHOW_LONGLONG},
{"cluster_status", (char*) &wsrep_cluster_status, SHOW_CHAR_PTR},
{"cluster_size", (char*) &wsrep_cluster_size, SHOW_LONG_NOFLUSH},
{"local_index", (char*) &wsrep_local_index, SHOW_LONG_NOFLUSH},
- {"local_bf_aborts", (char*) &wsrep_show_bf_aborts, SHOW_SIMPLE_FUNC},
+ {"local_bf_aborts", (char*) &wsrep_show_bf_aborts, SHOW_FUNC},
{"provider_name", (char*) &wsrep_provider_name, SHOW_CHAR_PTR},
{"provider_version", (char*) &wsrep_provider_version, SHOW_CHAR_PTR},
{"provider_vendor", (char*) &wsrep_provider_vendor, SHOW_CHAR_PTR},
+ {"wsrep_provider_capabilities", (char*) &wsrep_provider_capabilities, SHOW_CHAR_PTR},
{"thread_count", (char*) &wsrep_running_threads, SHOW_LONG_NOFLUSH}
};
@@ -714,48 +767,90 @@ static int show_var_cmp(const void *var1, const void *var2)
return strcasecmp(((SHOW_VAR*)var1)->name, ((SHOW_VAR*)var2)->name);
}
-int wsrep_show_status (THD *thd, SHOW_VAR *var, char *buff,
- enum enum_var_type scope)
+/*
+ * Status variables stuff below
+ */
+static inline void
+wsrep_assign_to_mysql (SHOW_VAR* mysql, wsrep_stats_var* wsrep_var)
+{
+ mysql->name= wsrep_var->name;
+ switch (wsrep_var->type) {
+ case WSREP_VAR_INT64:
+ mysql->value= (char*) &wsrep_var->value._int64;
+ mysql->type= SHOW_LONGLONG;
+ break;
+ case WSREP_VAR_STRING:
+ mysql->value= (char*) &wsrep_var->value._string;
+ mysql->type= SHOW_CHAR_PTR;
+ break;
+ case WSREP_VAR_DOUBLE:
+ mysql->value= (char*) &wsrep_var->value._double;
+ mysql->type= SHOW_DOUBLE;
+ break;
+ }
+}
+#endif /* UNUSED */
+
+#if DYNAMIC
+// somehow this mysql status thing works only with statically allocated arrays.
+static SHOW_VAR* mysql_status_vars= NULL;
+static int mysql_status_len= -1;
+#else
+static SHOW_VAR mysql_status_vars[512 + 1];
+static const int mysql_status_len= 512;
+#endif
+
+static void export_wsrep_status_to_mysql(THD* thd)
{
- uint i, maxi= SHOW_VAR_FUNC_BUFF_SIZE / sizeof(*var) - 1;
- SHOW_VAR *v= (SHOW_VAR *)buff;
+ int wsrep_status_len, i;
- var->type= SHOW_ARRAY;
- var->value= buff;
+ thd->wsrep_status_vars= Wsrep_server_state::instance().status();
- for (i=0; i < array_elements(wsrep_status_vars); i++)
- *v++= wsrep_status_vars[i];
+ wsrep_status_len= thd->wsrep_status_vars.size();
- DBUG_ASSERT(i < maxi);
+#if DYNAMIC
+ if (wsrep_status_len != mysql_status_len) {
+ void* tmp= realloc (mysql_status_vars,
+ (wsrep_status_len + 1) * sizeof(SHOW_VAR));
+ if (!tmp) {
- if (wsrep != NULL)
- {
- wsrep_stats_var* stats= wsrep->stats_get(wsrep);
- for (wsrep_stats_var *sv= stats;
- i < maxi && sv && sv->name; i++,
- sv++, v++)
- {
- v->name = thd->strdup(sv->name);
- switch (sv->type) {
- case WSREP_VAR_INT64:
- v->value = (char*)thd->memdup(&sv->value._integer64, sizeof(longlong));
- v->type = SHOW_LONGLONG;
- break;
- case WSREP_VAR_STRING:
- v->value = thd->strdup(sv->value._string);
- v->type = SHOW_CHAR;
- break;
- case WSREP_VAR_DOUBLE:
- v->value = (char*)thd->memdup(&sv->value._double, sizeof(double));
- v->type = SHOW_DOUBLE;
- break;
- }
+ sql_print_error ("Out of memory for wsrep status variables."
+ "Number of variables: %d", wsrep_status_len);
+ return;
}
- wsrep->stats_free(wsrep, stats);
+
+ mysql_status_len= wsrep_status_len;
+ mysql_status_vars= (SHOW_VAR*)tmp;
}
+ /* @TODO: fix this: */
+#else
+ if (mysql_status_len < wsrep_status_len) wsrep_status_len= mysql_status_len;
+#endif
- my_qsort(buff, i, sizeof(*v), show_var_cmp);
+ for (i= 0; i < wsrep_status_len; i++)
+ {
+ mysql_status_vars[i].name= (char*)thd->wsrep_status_vars[i].name().c_str();
+ mysql_status_vars[i].value= (char*)thd->wsrep_status_vars[i].value().c_str();
+ mysql_status_vars[i].type= SHOW_CHAR;
+ }
+
+ mysql_status_vars[wsrep_status_len].name = NullS;
+ mysql_status_vars[wsrep_status_len].value = NullS;
+ mysql_status_vars[wsrep_status_len].type = SHOW_LONG;
+}
- v->name= 0; // terminator
+int wsrep_show_status (THD *thd, SHOW_VAR *var, char *buff)
+{
+ if (WSREP_ON)
+ {
+ export_wsrep_status_to_mysql(thd);
+ var->type= SHOW_ARRAY;
+ var->value= (char *) &mysql_status_vars;
+ }
return 0;
}
+
+void wsrep_free_status (THD* thd)
+{
+ thd->wsrep_status_vars.clear();
+}
diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h
index 7d3ff50f1d2..0acb61432f0 100644
--- a/sql/wsrep_var.h
+++ b/sql/wsrep_var.h
@@ -13,11 +13,11 @@
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */
-#include <my_config.h>
-
#ifndef WSREP_VAR_H
#define WSREP_VAR_H
+#include <my_config.h>
+
#ifdef WITH_WSREP
#define WSREP_CLUSTER_NAME "my_wsrep_cluster"
@@ -90,13 +90,20 @@ extern bool wsrep_slave_threads_update UPDATE_ARGS;
extern bool wsrep_desync_check CHECK_ARGS;
extern bool wsrep_desync_update UPDATE_ARGS;
+extern bool wsrep_trx_fragment_size_check CHECK_ARGS;
+extern bool wsrep_trx_fragment_size_update UPDATE_ARGS;
+
+extern bool wsrep_trx_fragment_unit_update UPDATE_ARGS;
+
extern bool wsrep_max_ws_size_check CHECK_ARGS;
extern bool wsrep_max_ws_size_update UPDATE_ARGS;
+
extern bool wsrep_reject_queries_update UPDATE_ARGS;
+extern bool wsrep_debug_update UPDATE_ARGS;
+
#else /* WITH_WSREP */
-#define WSREP_NONE
#define wsrep_provider_init(X)
#define wsrep_init_vars() (0)
#define wsrep_start_position_init(X)
diff --git a/sql/wsrep_xid.cc b/sql/wsrep_xid.cc
index 2f9f69d56e6..a1c454d9d65 100644
--- a/sql/wsrep_xid.cc
+++ b/sql/wsrep_xid.cc
@@ -21,8 +21,9 @@
#include "sql_class.h"
#include "wsrep_mysqld.h" // for logging macros
-#include <algorithm> /* std::sort() */
+#include <mysql/service_wsrep.h>
+#include <algorithm> /* std::sort() */
/*
* WSREPXid
*/
@@ -36,20 +37,22 @@
#define WSREP_XID_SEQNO_OFFSET (WSREP_XID_UUID_OFFSET + sizeof(wsrep_uuid_t))
#define WSREP_XID_GTRID_LEN (WSREP_XID_SEQNO_OFFSET + sizeof(wsrep_seqno_t))
-void wsrep_xid_init(XID* xid, const wsrep_uuid_t& uuid, wsrep_seqno_t seqno)
+void wsrep_xid_init(XID* xid, const wsrep::gtid& wsgtid)
{
xid->formatID= 1;
xid->gtrid_length= WSREP_XID_GTRID_LEN;
xid->bqual_length= 0;
memset(xid->data, 0, sizeof(xid->data));
memcpy(xid->data, WSREP_XID_PREFIX, WSREP_XID_PREFIX_LEN);
- xid->data[WSREP_XID_VERSION_OFFSET] = WSREP_XID_VERSION_2;
- memcpy(xid->data + WSREP_XID_UUID_OFFSET, &uuid, sizeof(wsrep_uuid_t));
- int8store(xid->data + WSREP_XID_SEQNO_OFFSET,seqno);
+ xid->data[WSREP_XID_VERSION_OFFSET]= WSREP_XID_VERSION_2;
+ memcpy(xid->data + WSREP_XID_UUID_OFFSET, wsgtid.id().data(),sizeof(wsrep::id));
+ int8store(xid->data + WSREP_XID_SEQNO_OFFSET, wsgtid.seqno().get());
}
-int wsrep_is_wsrep_xid(const XID* xid)
+extern "C"
+int wsrep_is_wsrep_xid(const void* xid_ptr)
{
+ const XID* xid= static_cast<const XID*>(xid_ptr);
return (xid->formatID == 1 &&
xid->gtrid_length == WSREP_XID_GTRID_LEN &&
xid->bqual_length == 0 &&
@@ -58,33 +61,36 @@ int wsrep_is_wsrep_xid(const XID* xid)
xid->data[WSREP_XID_VERSION_OFFSET] == WSREP_XID_VERSION_2));
}
-const wsrep_uuid_t* wsrep_xid_uuid(const XID& xid)
+const unsigned char* wsrep_xid_uuid(const xid_t* xid)
{
- if (wsrep_is_wsrep_xid(&xid))
- return reinterpret_cast<const wsrep_uuid_t*>(xid.data
- + WSREP_XID_UUID_OFFSET);
+ DBUG_ASSERT(xid);
+ static wsrep::id const undefined;
+ if (wsrep_is_wsrep_xid(xid))
+ return reinterpret_cast<const unsigned char*>
+ (xid->data + WSREP_XID_UUID_OFFSET);
else
- return &WSREP_UUID_UNDEFINED;
+ return static_cast<const unsigned char*>(wsrep::id::undefined().data());
}
-const unsigned char* wsrep_xid_uuid(const xid_t* xid)
+const wsrep::id& wsrep_xid_uuid(const XID& xid)
{
- DBUG_ASSERT(xid);
- return wsrep_xid_uuid(*xid)->data;
+ compile_time_assert(sizeof(wsrep::id) == sizeof(wsrep_uuid_t));
+ return *reinterpret_cast<const wsrep::id*>(wsrep_xid_uuid(&xid));
}
-wsrep_seqno_t wsrep_xid_seqno(const XID& xid)
+long long wsrep_xid_seqno(const xid_t* xid)
{
- wsrep_seqno_t ret= WSREP_SEQNO_UNDEFINED;
- if (wsrep_is_wsrep_xid(&xid))
+ DBUG_ASSERT(xid);
+ long long ret= wsrep::seqno::undefined().get();
+ if (wsrep_is_wsrep_xid(xid))
{
- switch (xid.data[WSREP_XID_VERSION_OFFSET])
+ switch (xid->data[WSREP_XID_VERSION_OFFSET])
{
case WSREP_XID_VERSION_1:
- memcpy(&ret, xid.data + WSREP_XID_SEQNO_OFFSET, sizeof ret);
+ memcpy(&ret, xid->data + WSREP_XID_SEQNO_OFFSET, sizeof ret);
break;
case WSREP_XID_VERSION_2:
- ret= sint8korr(xid.data + WSREP_XID_SEQNO_OFFSET);
+ ret= sint8korr(xid->data + WSREP_XID_SEQNO_OFFSET);
break;
default:
break;
@@ -93,10 +99,9 @@ wsrep_seqno_t wsrep_xid_seqno(const XID& xid)
return ret;
}
-long long wsrep_xid_seqno(const xid_t* xid)
+wsrep::seqno wsrep_xid_seqno(const XID& xid)
{
- DBUG_ASSERT(xid);
- return wsrep_xid_seqno(*xid);
+ return wsrep::seqno(wsrep_xid_seqno(&xid));
}
static my_bool set_SE_checkpoint(THD* unused, plugin_ref plugin, void* arg)
@@ -106,11 +111,11 @@ static my_bool set_SE_checkpoint(THD* unused, plugin_ref plugin, void* arg)
if (hton->set_checkpoint)
{
- const wsrep_uuid_t* uuid(wsrep_xid_uuid(*xid));
- char uuid_str[40] = {0, };
- wsrep_uuid_print(uuid, uuid_str, sizeof(uuid_str));
+ const unsigned char* uuid= wsrep_xid_uuid(xid);
+ char uuid_str[40]= {0, };
+ wsrep_uuid_print((const wsrep_uuid_t*)uuid, uuid_str, sizeof(uuid_str));
WSREP_DEBUG("Set WSREPXid for InnoDB: %s:%lld",
- uuid_str, (long long)wsrep_xid_seqno(*xid));
+ uuid_str, (long long)wsrep_xid_seqno(xid));
hton->set_checkpoint(hton, xid);
}
return FALSE;
@@ -122,10 +127,10 @@ bool wsrep_set_SE_checkpoint(XID& xid)
&xid);
}
-bool wsrep_set_SE_checkpoint(const wsrep_uuid_t& uuid, wsrep_seqno_t seqno)
+bool wsrep_set_SE_checkpoint(const wsrep::gtid& wsgtid)
{
XID xid;
- wsrep_xid_init(&xid, uuid, seqno);
+ wsrep_xid_init(&xid, wsgtid);
return wsrep_set_SE_checkpoint(xid);
}
@@ -137,11 +142,12 @@ static my_bool get_SE_checkpoint(THD* unused, plugin_ref plugin, void* arg)
if (hton->get_checkpoint)
{
hton->get_checkpoint(hton, xid);
- const wsrep_uuid_t* uuid(wsrep_xid_uuid(*xid));
- char uuid_str[40] = {0, };
- wsrep_uuid_print(uuid, uuid_str, sizeof(uuid_str));
+ wsrep_uuid_t uuid;
+ memcpy(&uuid, wsrep_xid_uuid(xid), sizeof(uuid));
+ char uuid_str[40]= {0, };
+ wsrep_uuid_print(&uuid, uuid_str, sizeof(uuid_str));
WSREP_DEBUG("Read WSREPXid from InnoDB: %s:%lld",
- uuid_str, (long long)wsrep_xid_seqno(*xid));
+ uuid_str, (long long)wsrep_xid_seqno(xid));
}
return FALSE;
}
@@ -152,34 +158,28 @@ bool wsrep_get_SE_checkpoint(XID& xid)
&xid);
}
-bool wsrep_get_SE_checkpoint(wsrep_uuid_t& uuid, wsrep_seqno_t& seqno)
+wsrep::gtid wsrep_get_SE_checkpoint()
{
- uuid= WSREP_UUID_UNDEFINED;
- seqno= WSREP_SEQNO_UNDEFINED;
-
XID xid;
xid.null();
if (wsrep_get_SE_checkpoint(xid))
{
- return true;
+ return wsrep::gtid();
}
if (xid.is_null())
{
- return false;
+ return wsrep::gtid();
}
if (!wsrep_is_wsrep_xid(&xid))
{
WSREP_WARN("Read non-wsrep XID from storage engines.");
- return false;
+ return wsrep::gtid();
}
- uuid= *wsrep_xid_uuid(xid);
- seqno= wsrep_xid_seqno(xid);
-
- return false;
+ return wsrep::gtid(wsrep_xid_uuid(xid),wsrep_xid_seqno(xid));
}
/*
@@ -196,7 +196,7 @@ struct Wsrep_xid_cmp
const bool right_is_wsrep= wsrep_is_wsrep_xid(&right);
if (left_is_wsrep && right_is_wsrep)
{
- return (wsrep_xid_seqno(left) < wsrep_xid_seqno(right));
+ return (wsrep_xid_seqno(&left) < wsrep_xid_seqno(&right));
}
else if (left_is_wsrep)
{
diff --git a/sql/wsrep_xid.h b/sql/wsrep_xid.h
index 01b18506708..e41f6fba420 100644
--- a/sql/wsrep_xid.h
+++ b/sql/wsrep_xid.h
@@ -20,17 +20,17 @@
#ifdef WITH_WSREP
-#include "../wsrep/wsrep_api.h"
+#include "wsrep/gtid.hpp"
#include "handler.h" // XID typedef
-void wsrep_xid_init(xid_t*, const wsrep_uuid_t&, wsrep_seqno_t);
-const wsrep_uuid_t* wsrep_xid_uuid(const XID&);
-wsrep_seqno_t wsrep_xid_seqno(const XID&);
+void wsrep_xid_init(xid_t*, const wsrep::gtid&);
+const wsrep::id& wsrep_xid_uuid(const XID&);
+wsrep::seqno wsrep_xid_seqno(const XID&);
+wsrep::gtid wsrep_get_SE_checkpoint();
+bool wsrep_set_SE_checkpoint(const wsrep::gtid& gtid);
//void wsrep_get_SE_checkpoint(XID&); /* uncomment if needed */
-bool wsrep_get_SE_checkpoint(wsrep_uuid_t&, wsrep_seqno_t&);
//void wsrep_set_SE_checkpoint(XID&); /* uncomment if needed */
-bool wsrep_set_SE_checkpoint(const wsrep_uuid_t&, wsrep_seqno_t);
void wsrep_sort_xid_array(XID *array, int len);
diff --git a/sql/xa.cc b/sql/xa.cc
new file mode 100644
index 00000000000..c4b983aa4f5
--- /dev/null
+++ b/sql/xa.cc
@@ -0,0 +1,867 @@
+/*
+ Copyright (c) 2000, 2016, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2019, MariaDB Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+*/
+
+#include "mariadb.h"
+#include "sql_class.h"
+#include "transaction.h"
+
+
+/***************************************************************************
+ Handling of XA id cacheing
+***************************************************************************/
+enum xa_states { XA_ACTIVE= 0, XA_IDLE, XA_PREPARED, XA_ROLLBACK_ONLY };
+
+
+struct XID_cache_insert_element
+{
+ enum xa_states xa_state;
+ XID *xid;
+ XID_cache_element *xid_cache_element;
+
+ XID_cache_insert_element(enum xa_states xa_state_arg, XID *xid_arg):
+ xa_state(xa_state_arg), xid(xid_arg) {}
+};
+
+
+class XID_cache_element
+{
+ /*
+ m_state is used to prevent elements from being deleted while XA RECOVER
+ iterates xid cache and to prevent recovered elments from being acquired by
+ multiple threads.
+
+ bits 1..29 are reference counter
+ bit 30 is RECOVERED flag
+ bit 31 is ACQUIRED flag (thread owns this xid)
+ bit 32 is unused
+
+ Newly allocated and deleted elements have m_state set to 0.
+
+ On lock() m_state is atomically incremented. It also creates load-ACQUIRE
+ memory barrier to make sure m_state is actually updated before furhter
+ memory accesses. Attempting to lock an element that has neither ACQUIRED
+ nor RECOVERED flag set returns failure and further accesses to element
+ memory are forbidden.
+
+ On unlock() m_state is decremented. It also creates store-RELEASE memory
+ barrier to make sure m_state is actually updated after preceding memory
+ accesses.
+
+ ACQUIRED flag is set when thread registers it's xid or when thread acquires
+ recovered xid.
+
+ RECOVERED flag is set for elements found during crash recovery.
+
+ ACQUIRED and RECOVERED flags are cleared before element is deleted from
+ hash in a spin loop, after last reference is released.
+ */
+ std::atomic<int32_t> m_state;
+public:
+ static const int32 ACQUIRED= 1 << 30;
+ static const int32 RECOVERED= 1 << 29;
+ /* Error reported by the Resource Manager (RM) to the Transaction Manager. */
+ uint rm_error;
+ enum xa_states xa_state;
+ XID xid;
+ bool is_set(int32_t flag)
+ { return m_state.load(std::memory_order_relaxed) & flag; }
+ void set(int32_t flag)
+ {
+ DBUG_ASSERT(!is_set(ACQUIRED | RECOVERED));
+ m_state.fetch_add(flag, std::memory_order_relaxed);
+ }
+ bool lock()
+ {
+ int32_t old= m_state.fetch_add(1, std::memory_order_acquire);
+ if (old & (ACQUIRED | RECOVERED))
+ return true;
+ unlock();
+ return false;
+ }
+ void unlock()
+ { m_state.fetch_sub(1, std::memory_order_release); }
+ void mark_uninitialized()
+ {
+ int32_t old= ACQUIRED;
+ while (!m_state.compare_exchange_weak(old, 0,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed))
+ {
+ old&= ACQUIRED | RECOVERED;
+ (void) LF_BACKOFF();
+ }
+ }
+ void acquired_to_recovered()
+ {
+ m_state.fetch_or(RECOVERED, std::memory_order_relaxed);
+ m_state.fetch_and(~ACQUIRED, std::memory_order_release);
+ }
+ bool acquire_recovered()
+ {
+ int32_t old= RECOVERED;
+ while (!m_state.compare_exchange_weak(old, ACQUIRED | RECOVERED,
+ std::memory_order_acquire,
+ std::memory_order_relaxed))
+ {
+ if (!(old & RECOVERED) || (old & ACQUIRED))
+ return false;
+ old= RECOVERED;
+ (void) LF_BACKOFF();
+ }
+ return true;
+ }
+ static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)),
+ XID_cache_element *element,
+ XID_cache_insert_element *new_element)
+ {
+ DBUG_ASSERT(!element->is_set(ACQUIRED | RECOVERED));
+ element->rm_error= 0;
+ element->xa_state= new_element->xa_state;
+ element->xid.set(new_element->xid);
+ new_element->xid_cache_element= element;
+ }
+ static void lf_alloc_constructor(uchar *ptr)
+ {
+ XID_cache_element *element= (XID_cache_element*) (ptr + LF_HASH_OVERHEAD);
+ element->m_state= 0;
+ }
+ static void lf_alloc_destructor(uchar *ptr)
+ {
+ XID_cache_element *element= (XID_cache_element*) (ptr + LF_HASH_OVERHEAD);
+ DBUG_ASSERT(!element->is_set(ACQUIRED));
+ }
+ static uchar *key(const XID_cache_element *element, size_t *length,
+ my_bool not_used __attribute__((unused)))
+ {
+ *length= element->xid.key_length();
+ return element->xid.key();
+ }
+};
+
+
+static LF_HASH xid_cache;
+static bool xid_cache_inited;
+
+
+bool THD::fix_xid_hash_pins()
+{
+ if (!xid_hash_pins)
+ xid_hash_pins= lf_hash_get_pins(&xid_cache);
+ return !xid_hash_pins;
+}
+
+
+void XID_STATE::set_error(uint error)
+{
+ if (is_explicit_XA())
+ xid_cache_element->rm_error= error;
+}
+
+
+void XID_STATE::er_xaer_rmfail() const
+{
+ static const char *xa_state_names[]=
+ { "ACTIVE", "IDLE", "PREPARED", "ROLLBACK ONLY" };
+ my_error(ER_XAER_RMFAIL, MYF(0), is_explicit_XA() ?
+ xa_state_names[xid_cache_element->xa_state] : "NON-EXISTING");
+}
+
+
+/**
+ Check that XA transaction has an uncommitted work. Report an error
+ to the user in case when there is an uncommitted work for XA transaction.
+
+ @return result of check
+ @retval false XA transaction is NOT in state IDLE, PREPARED
+ or ROLLBACK_ONLY.
+ @retval true XA transaction is in state IDLE or PREPARED
+ or ROLLBACK_ONLY.
+*/
+
+bool XID_STATE::check_has_uncommitted_xa() const
+{
+ if (is_explicit_XA() && xid_cache_element->xa_state != XA_ACTIVE)
+ {
+ er_xaer_rmfail();
+ return true;
+ }
+ return false;
+}
+
+
+XID *XID_STATE::get_xid() const
+{
+ DBUG_ASSERT(is_explicit_XA());
+ return &xid_cache_element->xid;
+}
+
+
+void xid_cache_init()
+{
+ xid_cache_inited= true;
+ lf_hash_init(&xid_cache, sizeof(XID_cache_element), LF_HASH_UNIQUE, 0, 0,
+ (my_hash_get_key) XID_cache_element::key, &my_charset_bin);
+ xid_cache.alloc.constructor= XID_cache_element::lf_alloc_constructor;
+ xid_cache.alloc.destructor= XID_cache_element::lf_alloc_destructor;
+ xid_cache.initializer=
+ (lf_hash_initializer) XID_cache_element::lf_hash_initializer;
+}
+
+
+void xid_cache_free()
+{
+ if (xid_cache_inited)
+ {
+ lf_hash_destroy(&xid_cache);
+ xid_cache_inited= false;
+ }
+}
+
+
+/**
+ Find recovered XA transaction by XID.
+*/
+
+static XID_cache_element *xid_cache_search(THD *thd, XID *xid)
+{
+ DBUG_ASSERT(thd->xid_hash_pins);
+ XID_cache_element *element=
+ (XID_cache_element*) lf_hash_search(&xid_cache, thd->xid_hash_pins,
+ xid->key(), xid->key_length());
+ if (element)
+ {
+ if (!element->acquire_recovered())
+ element= 0;
+ lf_hash_search_unpin(thd->xid_hash_pins);
+ DEBUG_SYNC(thd, "xa_after_search");
+ }
+ return element;
+}
+
+
+bool xid_cache_insert(XID *xid)
+{
+ XID_cache_insert_element new_element(XA_PREPARED, xid);
+ LF_PINS *pins;
+
+ if (!(pins= lf_hash_get_pins(&xid_cache)))
+ return true;
+
+ int res= lf_hash_insert(&xid_cache, pins, &new_element);
+ switch (res)
+ {
+ case 0:
+ new_element.xid_cache_element->set(XID_cache_element::RECOVERED);
+ break;
+ case 1:
+ res= 0;
+ }
+ lf_hash_put_pins(pins);
+ return res;
+}
+
+
+bool xid_cache_insert(THD *thd, XID_STATE *xid_state, XID *xid)
+{
+ XID_cache_insert_element new_element(XA_ACTIVE, xid);
+
+ if (thd->fix_xid_hash_pins())
+ return true;
+
+ int res= lf_hash_insert(&xid_cache, thd->xid_hash_pins, &new_element);
+ switch (res)
+ {
+ case 0:
+ xid_state->xid_cache_element= new_element.xid_cache_element;
+ xid_state->xid_cache_element->set(XID_cache_element::ACQUIRED);
+ break;
+ case 1:
+ my_error(ER_XAER_DUPID, MYF(0));
+ }
+ return res;
+}
+
+
+static void xid_cache_delete(THD *thd, XID_cache_element *&element)
+{
+ DBUG_ASSERT(thd->xid_hash_pins);
+ element->mark_uninitialized();
+ lf_hash_delete(&xid_cache, thd->xid_hash_pins,
+ element->xid.key(), element->xid.key_length());
+}
+
+
+void xid_cache_delete(THD *thd, XID_STATE *xid_state)
+{
+ DBUG_ASSERT(xid_state->is_explicit_XA());
+ xid_cache_delete(thd, xid_state->xid_cache_element);
+ xid_state->xid_cache_element= 0;
+}
+
+
+struct xid_cache_iterate_arg
+{
+ my_hash_walk_action action;
+ void *argument;
+};
+
+static my_bool xid_cache_iterate_callback(XID_cache_element *element,
+ xid_cache_iterate_arg *arg)
+{
+ my_bool res= FALSE;
+ if (element->lock())
+ {
+ res= arg->action(element, arg->argument);
+ element->unlock();
+ }
+ return res;
+}
+
+static int xid_cache_iterate(THD *thd, my_hash_walk_action action, void *arg)
+{
+ xid_cache_iterate_arg argument= { action, arg };
+ return thd->fix_xid_hash_pins() ? -1 :
+ lf_hash_iterate(&xid_cache, thd->xid_hash_pins,
+ (my_hash_walk_action) xid_cache_iterate_callback,
+ &argument);
+}
+
+
+/**
+ Mark a XA transaction as rollback-only if the RM unilaterally
+ rolled back the transaction branch.
+
+ @note If a rollback was requested by the RM, this function sets
+ the appropriate rollback error code and transits the state
+ to XA_ROLLBACK_ONLY.
+
+ @return TRUE if transaction was rolled back or if the transaction
+ state is XA_ROLLBACK_ONLY. FALSE otherwise.
+*/
+static bool xa_trans_rolled_back(XID_cache_element *element)
+{
+ if (element->rm_error)
+ {
+ switch (element->rm_error) {
+ case ER_LOCK_WAIT_TIMEOUT:
+ my_error(ER_XA_RBTIMEOUT, MYF(0));
+ break;
+ case ER_LOCK_DEADLOCK:
+ my_error(ER_XA_RBDEADLOCK, MYF(0));
+ break;
+ default:
+ my_error(ER_XA_RBROLLBACK, MYF(0));
+ }
+ element->xa_state= XA_ROLLBACK_ONLY;
+ }
+
+ return element->xa_state == XA_ROLLBACK_ONLY;
+}
+
+
+/**
+ Rollback the active XA transaction.
+
+ @return TRUE if the rollback failed, FALSE otherwise.
+*/
+
+static bool xa_trans_force_rollback(THD *thd)
+{
+ bool rc= false;
+
+ if (ha_rollback_trans(thd, true))
+ {
+ my_error(ER_XAER_RMERR, MYF(0));
+ rc= true;
+ }
+
+ thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
+ thd->transaction.all.reset();
+ thd->server_status&=
+ ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
+ DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
+ xid_cache_delete(thd, &thd->transaction.xid_state);
+
+ trans_track_end_trx(thd);
+
+ return rc;
+}
+
+
+/**
+ Starts an XA transaction with the given xid value.
+
+ @param thd Current thread
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool trans_xa_start(THD *thd)
+{
+ DBUG_ENTER("trans_xa_start");
+
+ if (thd->transaction.xid_state.is_explicit_XA() &&
+ thd->transaction.xid_state.xid_cache_element->xa_state == XA_IDLE &&
+ thd->lex->xa_opt == XA_RESUME)
+ {
+ bool not_equal=
+ !thd->transaction.xid_state.xid_cache_element->xid.eq(thd->lex->xid);
+ if (not_equal)
+ my_error(ER_XAER_NOTA, MYF(0));
+ else
+ thd->transaction.xid_state.xid_cache_element->xa_state= XA_ACTIVE;
+ DBUG_RETURN(not_equal);
+ }
+
+ /* TODO: JOIN is not supported yet. */
+ if (thd->lex->xa_opt != XA_NONE)
+ my_error(ER_XAER_INVAL, MYF(0));
+ else if (thd->transaction.xid_state.is_explicit_XA())
+ thd->transaction.xid_state.er_xaer_rmfail();
+ else if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction())
+ my_error(ER_XAER_OUTSIDE, MYF(0));
+ else if (!trans_begin(thd))
+ {
+ if (xid_cache_insert(thd, &thd->transaction.xid_state, thd->lex->xid))
+ {
+ trans_rollback(thd);
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(FALSE);
+ }
+
+ DBUG_RETURN(TRUE);
+}
+
+
+/**
+ Put a XA transaction in the IDLE state.
+
+ @param thd Current thread
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool trans_xa_end(THD *thd)
+{
+ DBUG_ENTER("trans_xa_end");
+
+ /* TODO: SUSPEND and FOR MIGRATE are not supported yet. */
+ if (thd->lex->xa_opt != XA_NONE)
+ my_error(ER_XAER_INVAL, MYF(0));
+ else if (!thd->transaction.xid_state.is_explicit_XA() ||
+ thd->transaction.xid_state.xid_cache_element->xa_state != XA_ACTIVE)
+ thd->transaction.xid_state.er_xaer_rmfail();
+ else if (!thd->transaction.xid_state.xid_cache_element->xid.eq(thd->lex->xid))
+ my_error(ER_XAER_NOTA, MYF(0));
+ else if (!xa_trans_rolled_back(thd->transaction.xid_state.xid_cache_element))
+ thd->transaction.xid_state.xid_cache_element->xa_state= XA_IDLE;
+
+ DBUG_RETURN(thd->is_error() ||
+ thd->transaction.xid_state.xid_cache_element->xa_state != XA_IDLE);
+}
+
+
+/**
+ Put a XA transaction in the PREPARED state.
+
+ @param thd Current thread
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool trans_xa_prepare(THD *thd)
+{
+ DBUG_ENTER("trans_xa_prepare");
+
+ if (!thd->transaction.xid_state.is_explicit_XA() ||
+ thd->transaction.xid_state.xid_cache_element->xa_state != XA_IDLE)
+ thd->transaction.xid_state.er_xaer_rmfail();
+ else if (!thd->transaction.xid_state.xid_cache_element->xid.eq(thd->lex->xid))
+ my_error(ER_XAER_NOTA, MYF(0));
+ else if (ha_prepare(thd))
+ {
+ xid_cache_delete(thd, &thd->transaction.xid_state);
+ my_error(ER_XA_RBROLLBACK, MYF(0));
+ }
+ else
+ thd->transaction.xid_state.xid_cache_element->xa_state= XA_PREPARED;
+
+ DBUG_RETURN(thd->is_error() ||
+ thd->transaction.xid_state.xid_cache_element->xa_state != XA_PREPARED);
+}
+
+
+/**
+ Commit and terminate the a XA transaction.
+
+ @param thd Current thread
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool trans_xa_commit(THD *thd)
+{
+ bool res= TRUE;
+ DBUG_ENTER("trans_xa_commit");
+
+ if (!thd->transaction.xid_state.is_explicit_XA() ||
+ !thd->transaction.xid_state.xid_cache_element->xid.eq(thd->lex->xid))
+ {
+ if (thd->fix_xid_hash_pins())
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ if (auto xs= xid_cache_search(thd, thd->lex->xid))
+ {
+ res= xa_trans_rolled_back(xs);
+ ha_commit_or_rollback_by_xid(thd->lex->xid, !res);
+ xid_cache_delete(thd, xs);
+ }
+ else
+ my_error(ER_XAER_NOTA, MYF(0));
+ DBUG_RETURN(res);
+ }
+
+ if (xa_trans_rolled_back(thd->transaction.xid_state.xid_cache_element))
+ {
+ xa_trans_force_rollback(thd);
+ DBUG_RETURN(thd->is_error());
+ }
+ else if (thd->transaction.xid_state.xid_cache_element->xa_state == XA_IDLE &&
+ thd->lex->xa_opt == XA_ONE_PHASE)
+ {
+ int r= ha_commit_trans(thd, TRUE);
+ if ((res= MY_TEST(r)))
+ my_error(r == 1 ? ER_XA_RBROLLBACK : ER_XAER_RMERR, MYF(0));
+ }
+ else if (thd->transaction.xid_state.xid_cache_element->xa_state == XA_PREPARED &&
+ thd->lex->xa_opt == XA_NONE)
+ {
+ MDL_request mdl_request;
+
+ /*
+ Acquire metadata lock which will ensure that COMMIT is blocked
+ by active FLUSH TABLES WITH READ LOCK (and vice versa COMMIT in
+ progress blocks FTWRL).
+
+ We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does.
+ */
+ mdl_request.init(MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT,
+ MDL_TRANSACTION);
+
+ if (thd->mdl_context.acquire_lock(&mdl_request,
+ thd->variables.lock_wait_timeout))
+ {
+ ha_rollback_trans(thd, TRUE);
+ my_error(ER_XAER_RMERR, MYF(0));
+ }
+ else
+ {
+ DEBUG_SYNC(thd, "trans_xa_commit_after_acquire_commit_lock");
+
+ res= MY_TEST(ha_commit_one_phase(thd, 1));
+ if (res)
+ my_error(ER_XAER_RMERR, MYF(0));
+ }
+ }
+ else
+ {
+ thd->transaction.xid_state.er_xaer_rmfail();
+ DBUG_RETURN(TRUE);
+ }
+
+ thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_KEEP_LOG);
+ thd->transaction.all.reset();
+ thd->server_status&=
+ ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY);
+ DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS"));
+ xid_cache_delete(thd, &thd->transaction.xid_state);
+
+ trans_track_end_trx(thd);
+
+ DBUG_RETURN(res);
+}
+
+
+/**
+ Roll back and terminate a XA transaction.
+
+ @param thd Current thread
+
+ @retval FALSE Success
+ @retval TRUE Failure
+*/
+
+bool trans_xa_rollback(THD *thd)
+{
+ DBUG_ENTER("trans_xa_rollback");
+
+ if (!thd->transaction.xid_state.is_explicit_XA() ||
+ !thd->transaction.xid_state.xid_cache_element->xid.eq(thd->lex->xid))
+ {
+ if (thd->fix_xid_hash_pins())
+ {
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ if (auto xs= xid_cache_search(thd, thd->lex->xid))
+ {
+ xa_trans_rolled_back(xs);
+ ha_commit_or_rollback_by_xid(thd->lex->xid, 0);
+ xid_cache_delete(thd, xs);
+ }
+ else
+ my_error(ER_XAER_NOTA, MYF(0));
+ DBUG_RETURN(thd->get_stmt_da()->is_error());
+ }
+
+ if (thd->transaction.xid_state.xid_cache_element->xa_state == XA_ACTIVE)
+ {
+ thd->transaction.xid_state.er_xaer_rmfail();
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(xa_trans_force_rollback(thd));
+}
+
+
+bool trans_xa_detach(THD *thd)
+{
+ DBUG_ASSERT(thd->transaction.xid_state.is_explicit_XA());
+#if 1
+ return xa_trans_force_rollback(thd);
+#else
+ if (thd->transaction.xid_state.xid_cache_element->xa_state != XA_PREPARED)
+ return xa_trans_force_rollback(thd);
+ thd->transaction.xid_state.xid_cache_element->acquired_to_recovered();
+ thd->transaction.xid_state.xid_cache_element= 0;
+ thd->transaction.cleanup();
+
+ Ha_trx_info *ha_info, *ha_info_next;
+ for (ha_info= thd->transaction.all.ha_list;
+ ha_info;
+ ha_info= ha_info_next)
+ {
+ ha_info_next= ha_info->next();
+ ha_info->reset(); /* keep it conveniently zero-filled */
+ }
+
+ thd->transaction.all.ha_list= 0;
+ thd->transaction.all.no_2pc= 0;
+ return false;
+#endif
+}
+
+
+/**
+ return the XID as it appears in the SQL function's arguments.
+ So this string can be passed to XA START, XA PREPARE etc...
+
+ @note
+ the 'buf' has to have space for at least SQL_XIDSIZE bytes.
+*/
+
+
+/*
+ 'a'..'z' 'A'..'Z', '0'..'9'
+ and '-' '_' ' ' symbols don't have to be
+ converted.
+*/
+
+static const char xid_needs_conv[128]=
+{
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,
+ 0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
+ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,
+ 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1
+};
+
+/*
+ The size of XID string representation in the form
+ 'gtrid', 'bqual', formatID
+ see xid_t::get_sql_string() for details.
+*/
+#define SQL_XIDSIZE (XIDDATASIZE * 2 + 8 + MY_INT64_NUM_DECIMAL_DIGITS)
+
+/* The 'buf' has to have space for at least SQL_XIDSIZE bytes. */
+static uint get_sql_xid(XID *xid, char *buf)
+{
+ int tot_len= xid->gtrid_length + xid->bqual_length;
+ int i;
+ const char *orig_buf= buf;
+
+ for (i=0; i<tot_len; i++)
+ {
+ uchar c= ((uchar *) xid->data)[i];
+ if (c >= 128 || xid_needs_conv[c])
+ break;
+ }
+
+ if (i >= tot_len)
+ {
+ /* No need to convert characters to hexadecimals. */
+ *buf++= '\'';
+ memcpy(buf, xid->data, xid->gtrid_length);
+ buf+= xid->gtrid_length;
+ *buf++= '\'';
+ if (xid->bqual_length > 0 || xid->formatID != 1)
+ {
+ *buf++= ',';
+ *buf++= '\'';
+ memcpy(buf, xid->data+xid->gtrid_length, xid->bqual_length);
+ buf+= xid->bqual_length;
+ *buf++= '\'';
+ }
+ }
+ else
+ {
+ *buf++= 'X';
+ *buf++= '\'';
+ for (i= 0; i < xid->gtrid_length; i++)
+ {
+ *buf++=_dig_vec_lower[((uchar*) xid->data)[i] >> 4];
+ *buf++=_dig_vec_lower[((uchar*) xid->data)[i] & 0x0f];
+ }
+ *buf++= '\'';
+ if (xid->bqual_length > 0 || xid->formatID != 1)
+ {
+ *buf++= ',';
+ *buf++= 'X';
+ *buf++= '\'';
+ for (; i < tot_len; i++)
+ {
+ *buf++=_dig_vec_lower[((uchar*) xid->data)[i] >> 4];
+ *buf++=_dig_vec_lower[((uchar*) xid->data)[i] & 0x0f];
+ }
+ *buf++= '\'';
+ }
+ }
+
+ if (xid->formatID != 1)
+ {
+ *buf++= ',';
+ buf+= my_longlong10_to_str_8bit(&my_charset_bin, buf,
+ MY_INT64_NUM_DECIMAL_DIGITS, -10, xid->formatID);
+ }
+
+ return (uint)(buf - orig_buf);
+}
+
+
+/**
+ return the list of XID's to a client, the same way SHOW commands do.
+
+ @note
+ I didn't find in XA specs that an RM cannot return the same XID twice,
+ so mysql_xa_recover does not filter XID's to ensure uniqueness.
+ It can be easily fixed later, if necessary.
+*/
+
+static my_bool xa_recover_callback(XID_cache_element *xs, Protocol *protocol,
+ char *data, uint data_len, CHARSET_INFO *data_cs)
+{
+ if (xs->xa_state == XA_PREPARED)
+ {
+ protocol->prepare_for_resend();
+ protocol->store_longlong((longlong) xs->xid.formatID, FALSE);
+ protocol->store_longlong((longlong) xs->xid.gtrid_length, FALSE);
+ protocol->store_longlong((longlong) xs->xid.bqual_length, FALSE);
+ protocol->store(data, data_len, data_cs);
+ if (protocol->write())
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+static my_bool xa_recover_callback_short(XID_cache_element *xs,
+ Protocol *protocol)
+{
+ return xa_recover_callback(xs, protocol, xs->xid.data,
+ xs->xid.gtrid_length + xs->xid.bqual_length, &my_charset_bin);
+}
+
+
+static my_bool xa_recover_callback_verbose(XID_cache_element *xs,
+ Protocol *protocol)
+{
+ char buf[SQL_XIDSIZE];
+ uint len= get_sql_xid(&xs->xid, buf);
+ return xa_recover_callback(xs, protocol, buf, len,
+ &my_charset_utf8_general_ci);
+}
+
+
+bool mysql_xa_recover(THD *thd)
+{
+ List<Item> field_list;
+ Protocol *protocol= thd->protocol;
+ MEM_ROOT *mem_root= thd->mem_root;
+ my_hash_walk_action action;
+ DBUG_ENTER("mysql_xa_recover");
+
+ field_list.push_back(new (mem_root)
+ Item_int(thd, "formatID", 0,
+ MY_INT32_NUM_DECIMAL_DIGITS), mem_root);
+ field_list.push_back(new (mem_root)
+ Item_int(thd, "gtrid_length", 0,
+ MY_INT32_NUM_DECIMAL_DIGITS), mem_root);
+ field_list.push_back(new (mem_root)
+ Item_int(thd, "bqual_length", 0,
+ MY_INT32_NUM_DECIMAL_DIGITS), mem_root);
+ {
+ uint len;
+ CHARSET_INFO *cs;
+
+ if (thd->lex->verbose)
+ {
+ len= SQL_XIDSIZE;
+ cs= &my_charset_utf8_general_ci;
+ action= (my_hash_walk_action) xa_recover_callback_verbose;
+ }
+ else
+ {
+ len= XIDDATASIZE;
+ cs= &my_charset_bin;
+ action= (my_hash_walk_action) xa_recover_callback_short;
+ }
+
+ field_list.push_back(new (mem_root)
+ Item_empty_string(thd, "data", len, cs), mem_root);
+ }
+
+ if (protocol->send_result_set_metadata(&field_list,
+ Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
+ DBUG_RETURN(1);
+
+ if (xid_cache_iterate(thd, action, protocol))
+ DBUG_RETURN(1);
+ my_eof(thd);
+ DBUG_RETURN(0);
+}
diff --git a/sql/xa.h b/sql/xa.h
new file mode 100644
index 00000000000..7cf74efad35
--- /dev/null
+++ b/sql/xa.h
@@ -0,0 +1,44 @@
+/*
+ Copyright (c) 2000, 2016, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2019, MariaDB Corporation.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+*/
+
+
+class XID_cache_element;
+
+struct XID_STATE {
+ XID_cache_element *xid_cache_element;
+
+ bool check_has_uncommitted_xa() const;
+ bool is_explicit_XA() const { return xid_cache_element != 0; }
+ void set_error(uint error);
+ void er_xaer_rmfail() const;
+ XID *get_xid() const;
+};
+
+void xid_cache_init(void);
+void xid_cache_free(void);
+bool xid_cache_insert(XID *xid);
+bool xid_cache_insert(THD *thd, XID_STATE *xid_state, XID *xid);
+void xid_cache_delete(THD *thd, XID_STATE *xid_state);
+
+bool trans_xa_start(THD *thd);
+bool trans_xa_end(THD *thd);
+bool trans_xa_prepare(THD *thd);
+bool trans_xa_commit(THD *thd);
+bool trans_xa_rollback(THD *thd);
+bool trans_xa_detach(THD *thd);
+bool mysql_xa_recover(THD *thd);
diff --git a/storage/archive/azio.c b/storage/archive/azio.c
index 0f66b999c94..3529d875f72 100644
--- a/storage/archive/azio.c
+++ b/storage/archive/azio.c
@@ -866,7 +866,10 @@ int azclose (azio_stream *s)
if (s->mode == 'w')
{
if (do_flush(s, Z_FINISH) != Z_OK)
- return destroy(s);
+ {
+ destroy(s);
+ return Z_ERRNO;
+ }
putLong(s->file, s->crc);
putLong(s->file, (uLong)(s->in & 0xffffffff));
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 9d7f7e66b28..5cc878388ca 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -802,7 +802,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
#endif /* HAVE_READLINK */
{
if (create_info->data_file_name)
- my_error(WARN_OPTION_IGNORED, MYF(ME_JUST_WARNING), "DATA DIRECTORY");
+ my_error(WARN_OPTION_IGNORED, MYF(ME_WARNING), "DATA DIRECTORY");
fn_format(name_buff, name, "", ARZ,
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
@@ -811,7 +811,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
/* Archive engine never uses INDEX DIRECTORY. */
if (create_info->index_file_name)
- my_error(WARN_OPTION_IGNORED, MYF(ME_JUST_WARNING), "INDEX DIRECTORY");
+ my_error(WARN_OPTION_IGNORED, MYF(ME_WARNING), "INDEX DIRECTORY");
/*
There is a chance that the file was "discovered". In this case
@@ -980,7 +980,7 @@ int ha_archive::write_row(uchar *buf)
if (table->next_number_field && record == table->record[0])
{
- KEY *mkey= &table->s->key_info[0]; // We only support one key right now
+ KEY *mkey= &table->key_info[0]; // We only support one key right now
update_auto_increment();
temp_auto= table->next_number_field->val_int();
@@ -1098,7 +1098,7 @@ int ha_archive::index_read_idx(uchar *buf, uint index, const uchar *key,
{
int rc;
bool found= 0;
- KEY *mkey= &table->s->key_info[index];
+ KEY *mkey= &table->key_info[index];
current_k_offset= mkey->key_part->offset;
current_key= key;
current_key_len= key_len;
@@ -1753,6 +1753,20 @@ void ha_archive::flush_and_clear_pending_writes()
}
+int ha_archive::extra(enum ha_extra_function operation)
+{
+ switch (operation) {
+ case HA_EXTRA_FLUSH:
+ mysql_mutex_lock(&share->mutex);
+ share->close_archive_writer();
+ mysql_mutex_unlock(&share->mutex);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
/*
This method tells us that a bulk insert operation is about to occur. We set
a flag which will keep write_row from saying that its data is dirty. This in
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index a74374a340f..b61e6bc67e7 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -108,7 +108,7 @@ public:
return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_CAN_BIT_FIELD |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
HA_STATS_RECORDS_IS_EXACT | HA_CAN_EXPORT |
- HA_HAS_RECORDS | HA_CAN_REPAIR |
+ HA_HAS_RECORDS | HA_CAN_REPAIR | HA_SLOW_RND_POS |
HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
@@ -148,6 +148,7 @@ public:
int read_data_header(azio_stream *file_to_read);
void position(const uchar *record);
int info(uint);
+ int extra(enum ha_extra_function operation);
void update_create_info(HA_CREATE_INFO *create_info);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
diff --git a/storage/blackhole/ha_blackhole.h b/storage/blackhole/ha_blackhole.h
index 9a4b34809f8..345d6683938 100644
--- a/storage/blackhole/ha_blackhole.h
+++ b/storage/blackhole/ha_blackhole.h
@@ -55,7 +55,7 @@ public:
{
return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
HA_BINLOG_STMT_CAPABLE | HA_BINLOG_ROW_CAPABLE |
- HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
+ HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_CAN_ONLINE_BACKUPS |
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_CAN_INSERT_DELAYED);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp
index cd1785b48ac..0bf31fdb5fa 100644
--- a/storage/connect/array.cpp
+++ b/storage/connect/array.cpp
@@ -618,10 +618,12 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp)
/* Converting STRING to DATE can be done according to date format. */
/*********************************************************************/
if (Type == TYPE_DATE && ovblp->GetType() == TYPE_STRING && vp)
+ {
if (((DTVAL*)Value)->SetFormat(g, vp))
return TYPE_ERROR;
else
b = true; // Sort the new array on date internal values
+ }
/*********************************************************************/
/* Do the actual conversion. */
diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc
index 08c99dfdebd..d1b5e728cef 100644
--- a/storage/connect/ha_connect.cc
+++ b/storage/connect/ha_connect.cc
@@ -1677,10 +1677,7 @@ bool ha_connect::GetIndexOption(KEY *kp, PCSZ opname)
/****************************************************************************/
bool ha_connect::IsUnique(uint n)
{
- TABLE_SHARE *s= (table) ? table->s : NULL;
- KEY kp= s->key_info[n];
-
- return (kp.flags & 1) != 0;
+ return (table->key_info[n].flags & HA_NOSAME) != 0;
} // end of IsUnique
/****************************************************************************/
@@ -1950,7 +1947,7 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
k1= k2= 0;
n1= n2= 1; // 1 is space for final null character
- for (field= table->field; fp= *field; field++) {
+ for (field= table->field; (fp= *field); field++) {
if (bitmap_is_set(map, fp->field_index)) {
n1+= (fp->field_name.length + 1);
k1++;
@@ -1966,7 +1963,7 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
if (k1) {
p= c1= (char*)PlugSubAlloc(g, NULL, n1);
- for (field= table->field; fp= *field; field++)
+ for (field= table->field; (fp= *field); field++)
if (bitmap_is_set(map, fp->field_index)) {
strcpy(p, fp->field_name.str);
p+= (fp->field_name.length + 1);
@@ -1978,7 +1975,7 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
if (k2) {
p= c2= (char*)PlugSubAlloc(g, NULL, n2);
- for (field= table->field; fp= *field; field++)
+ for (field= table->field; (fp= *field); field++)
if (bitmap_is_set(ump, fp->field_index)) {
strcpy(p, fp->field_name.str);
@@ -2005,11 +2002,13 @@ int ha_connect::OpenTable(PGLOBAL g, bool del)
istable= true;
// strmake(tname, table_name, sizeof(tname)-1);
+#ifdef NOT_USED_VARIABLE
// We may be in a create index query
if (xmod == MODE_ANY && *tdbp->GetName() != '#') {
// The current indexes
PIXDEF oldpix= GetIndexInfo();
} // endif xmod
+#endif
} else
htrc("OpenTable: %s\n", g->Message);
@@ -2036,7 +2035,7 @@ bool ha_connect::CheckColumnList(PGLOBAL g)
MY_BITMAP *map= table->read_set;
try {
- for (field= table->field; fp= *field; field++)
+ for (field= table->field; (fp= *field); field++)
if (bitmap_is_set(map, fp->field_index)) {
if (!(colp= tdbp->ColDB(g, (PSZ)fp->field_name.str, 0))) {
sprintf(g->Message, "Column %s not found in %s",
@@ -2109,9 +2108,8 @@ int ha_connect::MakeRecord(char *buf)
DBUG_ENTER("ha_connect::MakeRecord");
if (trace(2))
- htrc("Maps: read=%08X write=%08X vcol=%08X defr=%08X defw=%08X\n",
+ htrc("Maps: read=%08X write=%08X defr=%08X defw=%08X\n",
*table->read_set->bitmap, *table->write_set->bitmap,
- (table->vcol_set) ? *table->vcol_set->bitmap : 0,
*table->def_read_set.bitmap, *table->def_write_set.bitmap);
// Avoid asserts in field::store() for columns that are not updated
@@ -2751,37 +2749,40 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond)
if (!i && (ismul))
return NULL;
- switch (args[i]->real_type()) {
- case COND::STRING_ITEM:
- res= pval->val_str(&tmp);
- pp->Value= PlugSubAllocStr(g, NULL, res->ptr(), res->length());
- pp->Type= (pp->Value) ? TYPE_STRING : TYPE_ERROR;
- break;
- case COND::INT_ITEM:
- pp->Type= TYPE_INT;
- pp->Value= PlugSubAlloc(g, NULL, sizeof(int));
- *((int*)pp->Value)= (int)pval->val_int();
- break;
- case COND::DATE_ITEM:
- pp->Type= TYPE_DATE;
- pp->Value= PlugSubAlloc(g, NULL, sizeof(int));
- *((int*)pp->Value)= (int)pval->val_int_from_date();
- break;
- case COND::REAL_ITEM:
- pp->Type= TYPE_DOUBLE;
- pp->Value= PlugSubAlloc(g, NULL, sizeof(double));
- *((double*)pp->Value)= pval->val_real();
- break;
- case COND::DECIMAL_ITEM:
- pp->Type= TYPE_DOUBLE;
- pp->Value= PlugSubAlloc(g, NULL, sizeof(double));
- *((double*)pp->Value)= pval->val_real_from_decimal();
- break;
+ switch (args[i]->real_type()) {
+ case COND::CONST_ITEM:
+ switch (args[i]->cmp_type()) {
+ case STRING_RESULT:
+ res= pval->val_str(&tmp);
+ pp->Value= PlugSubAllocStr(g, NULL, res->ptr(), res->length());
+ pp->Type= (pp->Value) ? TYPE_STRING : TYPE_ERROR;
+ break;
+ case INT_RESULT:
+ pp->Type= TYPE_INT;
+ pp->Value= PlugSubAlloc(g, NULL, sizeof(int));
+ *((int*)pp->Value)= (int)pval->val_int();
+ break;
+ case TIME_RESULT:
+ pp->Type= TYPE_DATE;
+ pp->Value= PlugSubAlloc(g, NULL, sizeof(int));
+ *((int*)pp->Value)= (int) Temporal_hybrid(pval).to_longlong();
+ break;
+ case REAL_RESULT:
+ case DECIMAL_RESULT:
+ pp->Type= TYPE_DOUBLE;
+ pp->Value= PlugSubAlloc(g, NULL, sizeof(double));
+ *((double*)pp->Value)= pval->val_real();
+ break;
+ case ROW_RESULT:
+ DBUG_ASSERT(0);
+ return NULL;
+ }
+ break;
case COND::CACHE_ITEM: // Possible ???
case COND::NULL_ITEM: // TODO: handle this
default:
return NULL;
- } // endswitch type
+ } // endswitch type
if (trace(1))
htrc("Value type=%hd\n", pp->Type);
@@ -3033,12 +3034,8 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
Item::Type type= args[i]->real_type();
switch (type) {
- case COND::STRING_ITEM:
- case COND::INT_ITEM:
- case COND::REAL_ITEM:
+ case COND::CONST_ITEM:
case COND::NULL_ITEM:
- case COND::DECIMAL_ITEM:
- case COND::DATE_ITEM:
case COND::CACHE_ITEM:
break;
default:
@@ -3074,14 +3071,14 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
strcat(s, "'}");
break;
} // endif ODBC
-
- // fall through
+ // fall through
case MYSQL_TYPE_DATE:
if (tty == TYPE_AM_ODBC) {
strcat(s, "{d '");
strcat(strncat(s, res->ptr(), res->length()), "'}");
break;
} // endif ODBC
+ // fall through
case MYSQL_TYPE_TIME:
if (tty == TYPE_AM_ODBC) {
@@ -3089,6 +3086,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond)
strcat(strncat(s, res->ptr(), res->length()), "'}");
break;
} // endif ODBC
+ // fall through
case MYSQL_TYPE_VARCHAR:
if (tty == TYPE_AM_ODBC && i) {
@@ -4277,8 +4275,6 @@ int ha_connect::info(uint flag)
// tdbp must be available to get updated info
if (xp->CheckQuery(valid_query_id) || !tdbp) {
- PDBUSER dup= PlgGetUser(g);
- PCATLG cat= (dup) ? dup->Catalog : NULL;
if (xmod == MODE_ANY || xmod == MODE_ALTER) {
// Pure info, not a query
@@ -4581,12 +4577,14 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd,
// break;
case SQLCOM_DELETE_MULTI:
*cras = true;
+ // fall through
case SQLCOM_DELETE:
case SQLCOM_TRUNCATE:
newmode= MODE_DELETE;
break;
case SQLCOM_UPDATE_MULTI:
*cras = true;
+ // fall through
case SQLCOM_UPDATE:
newmode= MODE_UPDATE;
break;
@@ -4596,6 +4594,7 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd,
break;
case SQLCOM_FLUSH:
locked= 0;
+ // fall through
case SQLCOM_DROP_TABLE:
case SQLCOM_RENAME_TABLE:
newmode= MODE_ANY;
@@ -4692,7 +4691,6 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd,
int ha_connect::start_stmt(THD *thd, thr_lock_type lock_type)
{
- int rc= 0;
bool chk=false, cras= false;
MODE newmode;
PGLOBAL g= GetPlug(thd, xp);
@@ -5521,7 +5519,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
PCSZ nsp= NULL, cls= NULL;
#endif // __WIN__
//int hdr, mxe;
- int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0;
+ int port = 0, mxr __attribute__((unused)) = 0, rc = 0, mul = 0;
//PCSZ tabtyp = NULL;
#if defined(ODBC_SUPPORT)
POPARM sop= NULL;
@@ -5545,8 +5543,6 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
if (!g)
return HA_ERR_INTERNAL_ERROR;
- PDBUSER dup= PlgGetUser(g);
- PCATLG cat= (dup) ? dup->Catalog : NULL;
PTOS topt= table_s->option_struct;
char buf[1024];
String sql(buf, sizeof(buf), system_charset_info);
@@ -5776,6 +5772,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
#endif // __WIN__
case TAB_PIVOT:
supfnc = FNC_NO;
+ // fall through
case TAB_PRX:
case TAB_TBL:
case TAB_XCL:
@@ -6000,7 +5997,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
} // endfor crp
} else {
- char *schem = NULL;
+ char *schem __attribute__((unused)) = NULL;
char *tn = NULL;
// Not a catalog table
diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h
index 789b4ba3ce6..5f853d843ed 100644
--- a/storage/connect/ha_connect.h
+++ b/storage/connect/ha_connect.h
@@ -548,3 +548,7 @@ public:
uint int_table_flags; // Inherited from MyISAM
bool enable_activate_all_index; // Inherited from MyISAM
}; // end of ha_connect class definition
+
+#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT)
+bool MongoEnabled(void);
+#endif // JAVA_SUPPORT || CMGO_SUPPORT
diff --git a/storage/connect/mysql-test/connect/r/mysql_index.result b/storage/connect/mysql-test/connect/r/mysql_index.result
index dd1864529ca..b0c88b16fef 100644
--- a/storage/connect/mysql-test/connect/r/mysql_index.result
+++ b/storage/connect/mysql-test/connect/r/mysql_index.result
@@ -45,10 +45,9 @@ id msg
SELECT * FROM t2 WHERE id IN (2,4) AND msg = 'Two';
id msg
2 Two
-SELECT * FROM t2 WHERE id > 3;
+SELECT * FROM t2 WHERE id > 4;
id msg
5 Cinq
-4 Four
6 Six
SELECT * FROM t2 WHERE id >= 3;
id msg
@@ -60,10 +59,9 @@ SELECT * FROM t2 WHERE id < 3;
id msg
1 Un
2 Two
-SELECT * FROM t2 WHERE id < 3 OR id > 4;
+SELECT * FROM t2 WHERE id < 2 OR id > 4;
id msg
1 Un
-2 Two
5 Cinq
6 Six
SELECT * FROM t2 WHERE id <= 3;
@@ -166,141 +164,141 @@ matricule nom prenom sexe aanais mmnais ddentree ddnom brut net service sitmat f
4974 LONES GERARD 1 1959 10 1979-01-01 1994-12-01 16081 12916.70 0 M SANS
SELECT matricule, nom, prenom FROM t2 WHERE nom IN ('FOCH','MOGADOR');
matricule nom prenom
+3368 MOGADOR ALAIN
1977 FOCH BERNADETTE
-5707 FOCH DENIS
+4080 FOCH SERGE
2552 FOCH FRANCK
+5707 FOCH DENIS
2634 FOCH JOCELYNE
5765 FOCH ROBERT
-4080 FOCH SERGE
-3368 MOGADOR ALAIN
SELECT matricule, nom, prenom FROM t2 WHERE nom = 'FOCH' OR nom = 'MOGADOR';
matricule nom prenom
+3368 MOGADOR ALAIN
1977 FOCH BERNADETTE
-5707 FOCH DENIS
+4080 FOCH SERGE
2552 FOCH FRANCK
+5707 FOCH DENIS
2634 FOCH JOCELYNE
5765 FOCH ROBERT
-4080 FOCH SERGE
-3368 MOGADOR ALAIN
SELECT matricule, nom, prenom FROM t2 WHERE nom < 'ADDAX';
matricule nom prenom
-4552 ABBADIE MONIQUE
-307 ABBAYE ANNICK
-6627 ABBAYE GERALD
-7961 ABBE KATIA
1340 ABBE MICHELE
-9270 ABBE SOPHIE
+2728 ABOUT CATHERINE MARIE
+895 ABORD CHANTAL
+4038 ADAM JANICK
+6627 ABBAYE GERALD
+6124 ABELIAS DELIA
+4552 ABBADIE MONIQUE
+8673 ABEL JEAN PIERRE
+3395 ADAM JEAN CLAUDE
2945 ABBEVILLE PASCAL
-8596 ABEBERRY PATRICK
+115 ACHILLE JACQUES
6399 ABEILLES RENE
-8673 ABEL JEAN PIERRE
-6124 ABELIAS DELIA
-6314 ABERDEN EVELYNE
-895 ABORD CHANTAL
-2728 ABOUT CATHERINE MARIE
+8596 ABEBERRY PATRICK
+9270 ABBE SOPHIE
398 ABREUVOIR JEAN LUC
-1122 ACACIAS SERGE
+7961 ABBE KATIA
+307 ABBAYE ANNICK
+6314 ABERDEN EVELYNE
1644 ACARDIE BEATE
-115 ACHILLE JACQUES
-4038 ADAM JANICK
-3395 ADAM JEAN CLAUDE
+1122 ACACIAS SERGE
SELECT matricule, nom, prenom FROM t2 WHERE nom <= 'ABEL';
matricule nom prenom
-4552 ABBADIE MONIQUE
-307 ABBAYE ANNICK
-6627 ABBAYE GERALD
-7961 ABBE KATIA
1340 ABBE MICHELE
-9270 ABBE SOPHIE
+6627 ABBAYE GERALD
+4552 ABBADIE MONIQUE
+8673 ABEL JEAN PIERRE
2945 ABBEVILLE PASCAL
-8596 ABEBERRY PATRICK
6399 ABEILLES RENE
-8673 ABEL JEAN PIERRE
+8596 ABEBERRY PATRICK
+9270 ABBE SOPHIE
+7961 ABBE KATIA
+307 ABBAYE ANNICK
SELECT matricule, nom, prenom FROM t2 WHERE nom > 'YVON';
matricule nom prenom
9742 YZENGREMER MICHEL
-8738 ZILINA JEAN LOUIS
5357 ZOLA BERNARD
5441 ZOLA BRIGITTE
-1325 ZOLA CHRISTINE
-4859 ZORI CATHERINE
4102 ZOUAVES ALAIN
+4859 ZORI CATHERINE
+1325 ZOLA CHRISTINE
+8738 ZILINA JEAN LOUIS
SELECT matricule, nom, prenom FROM t2 WHERE nom >= 'YVON';
matricule nom prenom
-5389 YVON CAROLE
9742 YZENGREMER MICHEL
-8738 ZILINA JEAN LOUIS
5357 ZOLA BERNARD
+5389 YVON CAROLE
5441 ZOLA BRIGITTE
-1325 ZOLA CHRISTINE
-4859 ZORI CATHERINE
4102 ZOUAVES ALAIN
+4859 ZORI CATHERINE
+1325 ZOLA CHRISTINE
+8738 ZILINA JEAN LOUIS
SELECT matricule, nom, prenom FROM t2 WHERE nom <= 'ABEL' OR nom > 'YVON';
matricule nom prenom
-4552 ABBADIE MONIQUE
-307 ABBAYE ANNICK
-6627 ABBAYE GERALD
-7961 ABBE KATIA
-1340 ABBE MICHELE
-9270 ABBE SOPHIE
-2945 ABBEVILLE PASCAL
-8596 ABEBERRY PATRICK
-6399 ABEILLES RENE
-8673 ABEL JEAN PIERRE
9742 YZENGREMER MICHEL
-8738 ZILINA JEAN LOUIS
+1340 ABBE MICHELE
5357 ZOLA BERNARD
+6627 ABBAYE GERALD
+4552 ABBADIE MONIQUE
5441 ZOLA BRIGITTE
-1325 ZOLA CHRISTINE
-4859 ZORI CATHERINE
4102 ZOUAVES ALAIN
+8673 ABEL JEAN PIERRE
+4859 ZORI CATHERINE
+2945 ABBEVILLE PASCAL
+1325 ZOLA CHRISTINE
+6399 ABEILLES RENE
+8596 ABEBERRY PATRICK
+9270 ABBE SOPHIE
+7961 ABBE KATIA
+307 ABBAYE ANNICK
+8738 ZILINA JEAN LOUIS
SELECT matricule, nom, prenom FROM t2 WHERE nom > 'HELEN' AND nom < 'HEROS';
matricule nom prenom
-9096 HELENA PHILIPPE
-3309 HELENE ISABELLE
-8365 HELIOTROPES LISE
-4666 HELLEN PIERRE
-5781 HELSINKI DANIELLE
+2085 HEOL GUY PAUL
+2673 HENNER LILIANE
+7093 HERAULTS DANIEL
7626 HENIN PHILIPPE
+403 HERMITTE PHILIPPE
4254 HENIN SERGE
-2673 HENNER LILIANE
+4666 HELLEN PIERRE
+3309 HELENE ISABELLE
+9749 HEROLD ISABELLE
9716 HENRI JACQUES
-2085 HEOL GUY PAUL
-2579 HERANDIERE PIERRE
-7093 HERAULTS DANIEL
+1291 HERMITAGE XAVIER
+8365 HELIOTROPES LISE
4050 HERBILLON FRANCOIS
9231 HERBILLON MADELEINE
-1291 HERMITAGE XAVIER
+9096 HELENA PHILIPPE
+5781 HELSINKI DANIELLE
+2579 HERANDIERE PIERRE
6185 HERMITTE FRANCOIS
-403 HERMITTE PHILIPPE
-9749 HEROLD ISABELLE
SELECT matricule, nom, prenom FROM t2 WHERE nom BETWEEN 'HELEN' AND 'HEROS';
matricule nom prenom
-6199 HELEN MARTIAL
-9096 HELENA PHILIPPE
-3309 HELENE ISABELLE
-8365 HELIOTROPES LISE
-4666 HELLEN PIERRE
-5781 HELSINKI DANIELLE
+2085 HEOL GUY PAUL
+2673 HENNER LILIANE
+7093 HERAULTS DANIEL
7626 HENIN PHILIPPE
+403 HERMITTE PHILIPPE
4254 HENIN SERGE
-2673 HENNER LILIANE
+4666 HELLEN PIERRE
+3309 HELENE ISABELLE
+9749 HEROLD ISABELLE
9716 HENRI JACQUES
-2085 HEOL GUY PAUL
-2579 HERANDIERE PIERRE
-7093 HERAULTS DANIEL
+1291 HERMITAGE XAVIER
+8365 HELIOTROPES LISE
4050 HERBILLON FRANCOIS
9231 HERBILLON MADELEINE
-1291 HERMITAGE XAVIER
-6185 HERMITTE FRANCOIS
-403 HERMITTE PHILIPPE
-9749 HEROLD ISABELLE
8445 HEROS SYLVIE
+9096 HELENA PHILIPPE
+5781 HELSINKI DANIELLE
+2579 HERANDIERE PIERRE
+6199 HELEN MARTIAL
+6185 HERMITTE FRANCOIS
SELECT matricule, nom, prenom FROM t2 WHERE nom BETWEEN 'HELEN' AND 'HEROS' AND prenom = 'PHILIPPE';
matricule nom prenom
-9096 HELENA PHILIPPE
7626 HENIN PHILIPPE
403 HERMITTE PHILIPPE
+9096 HELENA PHILIPPE
SELECT matricule, nom, prenom FROM t2 ORDER BY nom LIMIT 10;
matricule nom prenom
4552 ABBADIE MONIQUE
diff --git a/storage/connect/mysql-test/connect/r/part_file.result b/storage/connect/mysql-test/connect/r/part_file.result
index c679ed95062..3dabd946b50 100644
--- a/storage/connect/mysql-test/connect/r/part_file.result
+++ b/storage/connect/mysql-test/connect/r/part_file.result
@@ -145,14 +145,11 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
SELECT * FROM t1 WHERE id = 10;
rwid rnum prtn tbn fid id msg
1 1 2 t1 part2 10 ten
-EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 10;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 40;
id select_type table partitions type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 2,3 range PRIMARY PRIMARY 4 NULL 7 Using where
-SELECT * FROM t1 WHERE id >= 10;
+1 SIMPLE t1 2,3 range PRIMARY PRIMARY 4 NULL 4 Using where
+SELECT * FROM t1 WHERE id >= 40;
rwid rnum prtn tbn fid id msg
-1 1 2 t1 part2 10 ten
-3 3 2 t1 part2 20 twenty
-4 4 2 t1 part2 35 thirty five
2 2 2 t1 part2 40 forty
1 1 3 t1 part3 60 sixty
3 3 3 t1 part3 72 seventy two
diff --git a/storage/connect/mysql-test/connect/t/mysql_index.test b/storage/connect/mysql-test/connect/t/mysql_index.test
index 81fdcad9330..74dc48f42c8 100644
--- a/storage/connect/mysql-test/connect/t/mysql_index.test
+++ b/storage/connect/mysql-test/connect/t/mysql_index.test
@@ -49,10 +49,10 @@ SELECT * FROM t2;
SELECT * FROM t2 WHERE id = 3;
SELECT * FROM t2 WHERE id IN (2,4);
SELECT * FROM t2 WHERE id IN (2,4) AND msg = 'Two';
-SELECT * FROM t2 WHERE id > 3;
+SELECT * FROM t2 WHERE id > 4;
SELECT * FROM t2 WHERE id >= 3;
SELECT * FROM t2 WHERE id < 3;
-SELECT * FROM t2 WHERE id < 3 OR id > 4;
+SELECT * FROM t2 WHERE id < 2 OR id > 4;
SELECT * FROM t2 WHERE id <= 3;
SELECT * FROM t2 WHERE id BETWEEN 3 AND 5;
SELECT * FROM t2 WHERE id > 2 AND id < 6;
diff --git a/storage/connect/mysql-test/connect/t/part_file.test b/storage/connect/mysql-test/connect/t/part_file.test
index 8ee43a917ec..2e5127f03e5 100644
--- a/storage/connect/mysql-test/connect/t/part_file.test
+++ b/storage/connect/mysql-test/connect/t/part_file.test
@@ -82,8 +82,8 @@ SELECT * FROM t1;
SELECT * FROM t1 order by id;
EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id = 10;
SELECT * FROM t1 WHERE id = 10;
-EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 10;
-SELECT * FROM t1 WHERE id >= 10;
+EXPLAIN PARTITIONS SELECT * FROM t1 WHERE id >= 40;
+SELECT * FROM t1 WHERE id >= 40;
SELECT count(*) FROM t1 WHERE id < 10;
SELECT case when id < 10 then 1 when id < 50 then 2 else 3 end as pn, count(*) FROM t1 group by pn;
SELECT prtn, count(*) FROM t1 group by prtn;
diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp
index e9c7b2490d8..aaf14f123c6 100644
--- a/storage/connect/tabext.cpp
+++ b/storage/connect/tabext.cpp
@@ -342,7 +342,6 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt)
char *catp = NULL, buf[NAM_LEN * 3];
int len;
bool first = true;
- PTABLE tablep = To_Table;
PCOL colp;
if (Srcdef)
@@ -455,6 +454,7 @@ void TDBEXT::RemoveConst(PGLOBAL g, char *stmt)
int n, nc;
while ((p = strstr(stmt, "NAME_CONST")))
+ {
if ((n = sscanf(p, "%*[^,],%1024[^)])%n", val, &nc))) {
if (trace(33))
htrc("p=%s\nn=%d val=%s nc=%d\n", p, n, val, nc);
@@ -478,8 +478,8 @@ void TDBEXT::RemoveConst(PGLOBAL g, char *stmt)
} else
break;
-
- return;
+ }
+ return;
} // end of RemoveConst
/***********************************************************************/
diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp
index ceffafac02c..83c20b26701 100644
--- a/storage/connect/tabmysql.cpp
+++ b/storage/connect/tabmysql.cpp
@@ -1259,7 +1259,7 @@ MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am)
MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am)
: COLBLK(NULL, tdbp, i)
{
- const char *chset = get_charset_name(fld->charsetnr);
+//const char *chset = get_charset_name(fld->charsetnr);
//char v = (!strcmp(chset, "binary")) ? 'B' : 0;
char v = 0;
@@ -1400,6 +1400,7 @@ void MYSQLCOL::ReadColumn(PGLOBAL g)
/* If physical fetching of the line was deferred, do it now. */
/*********************************************************************/
if (!tdbp->Fetched)
+ {
if ((rc = tdbp->Myc.Fetch(g, tdbp->N)) != RC_OK) {
if (rc == RC_EF)
sprintf(g->Message, MSG(INV_DEF_READ), rc);
@@ -1407,7 +1408,7 @@ void MYSQLCOL::ReadColumn(PGLOBAL g)
throw 11;
} else
tdbp->Fetched = true;
-
+ }
if ((buf = ((PTDBMY)To_Tdb)->Myc.GetCharField(Rank))) {
if (trace(2))
htrc("MySQL ReadColumn: name=%s buf=%s\n", Name, buf);
diff --git a/storage/connect/tabxcl.cpp b/storage/connect/tabxcl.cpp
index 93a24accc3c..4634f6a4ded 100644
--- a/storage/connect/tabxcl.cpp
+++ b/storage/connect/tabxcl.cpp
@@ -274,7 +274,8 @@ void XCLCOL::ReadColumn(PGLOBAL g)
PSZ p;
// Trim left
- for (p = Cp; *p == ' '; p++) ;
+ for (p = Cp; *p == ' '; p++)
+ ;
if ((Cp = strchr(Cp, Sep)))
// Separator is found
diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp
index e159efaa989..ac1a67e3ca7 100644
--- a/storage/connect/value.cpp
+++ b/storage/connect/value.cpp
@@ -2599,12 +2599,13 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval)
// Pass g to have an error return or NULL to set invalid dates to 0
if (MakeTime(&datm))
+ {
if (g) {
strcpy(g->Message, MSG(BAD_DATETIME));
rc = true;
} else
Tval = 0;
-
+ }
return rc;
} // end of MakeDate
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index 0e092e2fd90..f69dd7989a2 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -1311,12 +1311,28 @@ int ha_tina::info(uint flag)
int ha_tina::extra(enum ha_extra_function operation)
{
DBUG_ENTER("ha_tina::extra");
- if (operation == HA_EXTRA_MARK_AS_LOG_TABLE)
- {
- mysql_mutex_lock(&share->mutex);
- share->is_log_table= TRUE;
- mysql_mutex_unlock(&share->mutex);
- }
+ switch (operation) {
+ case HA_EXTRA_MARK_AS_LOG_TABLE:
+ {
+ mysql_mutex_lock(&share->mutex);
+ share->is_log_table= TRUE;
+ mysql_mutex_unlock(&share->mutex);
+ }
+ break;
+ case HA_EXTRA_FLUSH:
+ mysql_mutex_lock(&share->mutex);
+ if (share->tina_write_opened)
+ {
+ (void)write_meta_file(share->meta_file, share->rows_recorded,
+ share->crashed ? TRUE :FALSE);
+ mysql_file_close(share->tina_write_filedes, MYF(0));
+ share->tina_write_opened= FALSE;
+ }
+ mysql_mutex_unlock(&share->mutex);
+ break;
+ default:
+ break;
+ }
DBUG_RETURN(0);
}
@@ -1385,7 +1401,7 @@ int ha_tina::rnd_end()
if (mysql_file_write(update_temp_file,
(uchar*) (file_buff->ptr() +
(write_begin - file_buff->start())),
- (size_t)write_length, MYF_RW))
+ (size_t)write_length, MYF(MY_WME+MY_NABP)))
goto error;
temp_file_length+= write_length;
}
@@ -1571,7 +1587,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
write_end= MY_MIN(file_buff->end(), current_position);
if ((write_end - write_begin) &&
(mysql_file_write(repair_file, (uchar*)file_buff->ptr(),
- (size_t) (write_end - write_begin), MYF_RW)))
+ (size_t) (write_end - write_begin), MYF(MY_WME+MY_NABP))))
DBUG_RETURN(-1);
write_begin= write_end;
diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h
index c75a64faa52..5b389d984d6 100644
--- a/storage/csv/ha_tina.h
+++ b/storage/csv/ha_tina.h
@@ -107,7 +107,7 @@ public:
{
return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_CAN_EXPORT |
- HA_CAN_REPAIR);
+ HA_CAN_REPAIR | HA_SLOW_RND_POS);
}
ulong index_flags(uint idx, uint part, bool all_parts) const
{
diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h
index 8fe42bd2b08..45318f4c594 100644
--- a/storage/federated/ha_federated.h
+++ b/storage/federated/ha_federated.h
@@ -146,6 +146,7 @@ public:
HA_NO_PREFIX_CHAR_KEYS | HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
HA_NO_TRANSACTIONS /* until fixed by WL#2952 */ |
HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY |
+ HA_CAN_ONLINE_BACKUPS |
HA_CAN_REPAIR);
}
/*
diff --git a/storage/federatedx/TODO b/storage/federatedx/TODO
deleted file mode 100644
index 71330742f4e..00000000000
--- a/storage/federatedx/TODO
+++ /dev/null
@@ -1,30 +0,0 @@
-Features
-
-* Add Pushdown conditions
-* Add other network driver interfaces
-* Handle large result sets
-* Auto-discovery of tables on foreign data sources
-
-Bugs (http://bugs.mysql.com)
-
-20026 2006-05-23 FEDERATED lacks support for auto_increment_increment and auto_increment_offset
-20724 2006-06-27 FEDERATED does not honour SET INSERT_ID
-28269 2007-05-06 Any FEDERATED engine fails to quote reserved words for field names
-25509 2007-01-10 Federated: Failure with non-ASCII characters
-26697 2007-02-27 Every query to a federated table results in a full scan of MyISAM table.
-21360 2006-07-31 Microsoft Windows (Windows/Linux) mysqldump error on federated tables
-34189 2008-01-31 Any ALTER TABLE t1 ENGINE=FEDERATED CONNECTION='connectionString' on MyISAM fails
-31757 2007-10-22 Any Federated tables break replication Antony Curtis
-33953 2008-01-21 Any mysqld dies on search federated table using nullable index with < or <= operator
-34015 2008-01-23 Linux Problems with float fields using federated tables
-21583 2006-08-11 Linux (Linux) Federated table returns broken strings.
-33702 2008-01-05 Accessing a federated table with a non existing server returns random error code
-25512 2007-01-10 Federated: CREATE failures
-32426 2007-11-16 Any FEDERATED query returns corrupt results for ORDER BY on a TEXT field
-25510 2007-01-10 Federated: double trigger activation
-33250 2007-12-14 SELECT * FROM really_big_federated_table eats lots of virtual memory (OOM)
-14874 2005-11-11 Error 2013: Lost connection to MySQL server with Federated table
-25508 2007-01-10 Federated: Failure to Remove Partitioning
-27180 2007-03-15 #1030 - Got error 1 from storage engine with big tables
-33947 2008-01-20 Any Join on Federated tables with Unique index and IS NOT NULL crashes server
-30051 (fixed) CREATE TABLE does not connect and check existence of remote table
diff --git a/storage/federatedx/federatedx_pushdown.cc b/storage/federatedx/federatedx_pushdown.cc
new file mode 100644
index 00000000000..2bcee943308
--- /dev/null
+++ b/storage/federatedx/federatedx_pushdown.cc
@@ -0,0 +1,293 @@
+/*
+ Copyright (c) 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/* !!! For inclusion into ha_federatedx.cc */
+
+
+/*
+ This is a quick a dirty implemention of the derived_handler and select_handler
+ interfaces to be used to push select queries and the queries specifying
+ derived tables into FEDERATEDX engine.
+ The functions
+ create_federatedx_derived_handler and
+ create_federatedx_select_handler
+ that return the corresponding interfaces for pushdown capabilities do
+ not check a lot of things. In particular they do not check that the tables
+ of the pushed queries belong to the same foreign server.
+
+ The implementation is provided purely for testing purposes.
+ The pushdown capabilities are enabled by turning on the plugin system
+ variable federated_pushdown:
+ set global federated_pushdown=1;
+*/
+
+
+static derived_handler*
+create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived)
+{
+ if (!use_pushdown)
+ return 0;
+
+ ha_federatedx_derived_handler* handler = NULL;
+ handlerton *ht= 0;
+
+ SELECT_LEX_UNIT *unit= derived->derived;
+
+ for (SELECT_LEX *sl= unit->first_select(); sl; sl= sl->next_select())
+ {
+ if (!(sl->join))
+ return 0;
+ for (TABLE_LIST *tbl= sl->join->tables_list; tbl; tbl= tbl->next_local)
+ {
+ if (!tbl->table)
+ return 0;
+ if (!ht)
+ ht= tbl->table->file->partition_ht();
+ else if (ht != tbl->table->file->partition_ht())
+ return 0;
+ }
+ }
+
+ handler= new ha_federatedx_derived_handler(thd, derived);
+
+ return handler;
+}
+
+
+/*
+ Implementation class of the derived_handler interface for FEDERATEDX:
+ class implementation
+*/
+
+ha_federatedx_derived_handler::ha_federatedx_derived_handler(THD *thd,
+ TABLE_LIST *dt)
+ : derived_handler(thd, federatedx_hton),
+ share(NULL), txn(NULL), iop(NULL), stored_result(NULL)
+{
+ derived= dt;
+}
+
+ha_federatedx_derived_handler::~ha_federatedx_derived_handler() {}
+
+int ha_federatedx_derived_handler::init_scan()
+{
+ THD *thd;
+ int rc= 0;
+
+ DBUG_ENTER("ha_federatedx_derived_handler::init_scan");
+
+ TABLE *table= derived->get_first_table()->table;
+ ha_federatedx *h= (ha_federatedx *) table->file;
+ iop= &h->io;
+ share= get_share(table->s->table_name.str, table);
+ thd= table->in_use;
+ txn= h->get_txn(thd);
+ if ((rc= txn->acquire(share, thd, TRUE, iop)))
+ DBUG_RETURN(rc);
+
+ if ((*iop)->query(derived->derived_spec.str, derived->derived_spec.length))
+ goto err;
+
+ stored_result= (*iop)->store_result();
+ if (!stored_result)
+ goto err;
+
+ DBUG_RETURN(0);
+
+err:
+ DBUG_RETURN(HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM);
+}
+
+int ha_federatedx_derived_handler::next_row()
+{
+ int rc;
+ FEDERATEDX_IO_ROW *row;
+ ulong *lengths;
+ Field **field;
+ int column= 0;
+ Time_zone *saved_time_zone= table->in_use->variables.time_zone;
+ DBUG_ENTER("ha_federatedx_derived_handler::next_row");
+
+ if ((rc= txn->acquire(share, table->in_use, TRUE, iop)))
+ DBUG_RETURN(rc);
+
+ if (!(row= (*iop)->fetch_row(stored_result)))
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ /* Convert row to internal format */
+ table->in_use->variables.time_zone= UTC;
+ lengths= (*iop)->fetch_lengths(stored_result);
+
+ for (field= table->field; *field; field++, column++)
+ {
+ if ((*iop)->is_column_null(row, column))
+ (*field)->set_null();
+ else
+ {
+ (*field)->set_notnull();
+ (*field)->store((*iop)->get_column_data(row, column),
+ lengths[column], &my_charset_bin);
+ }
+ }
+ table->in_use->variables.time_zone= saved_time_zone;
+
+ DBUG_RETURN(rc);
+}
+
+int ha_federatedx_derived_handler::end_scan()
+{
+ DBUG_ENTER("ha_federatedx_derived_handler::end_scan");
+
+ (*iop)->free_result(stored_result);
+
+ free_share(txn, share);
+
+ DBUG_RETURN(0);
+}
+
+void ha_federatedx_derived_handler::print_error(int, unsigned long)
+{
+}
+
+
+static select_handler*
+create_federatedx_select_handler(THD* thd, SELECT_LEX *sel)
+{
+ if (!use_pushdown)
+ return 0;
+
+ ha_federatedx_select_handler* handler = NULL;
+ handlerton *ht= 0;
+
+ for (TABLE_LIST *tbl= thd->lex->query_tables; tbl; tbl= tbl->next_global)
+ {
+ if (!tbl->table)
+ return 0;
+ if (!ht)
+ ht= tbl->table->file->partition_ht();
+ else if (ht != tbl->table->file->partition_ht())
+ return 0;
+ }
+
+ handler= new ha_federatedx_select_handler(thd, sel);
+
+ return handler;
+}
+
+/*
+ Implementation class of the select_handler interface for FEDERATEDX:
+ class implementation
+*/
+
+ha_federatedx_select_handler::ha_federatedx_select_handler(THD *thd,
+ SELECT_LEX *sel)
+ : select_handler(thd, federatedx_hton),
+ share(NULL), txn(NULL), iop(NULL), stored_result(NULL)
+{
+ select= sel;
+}
+
+ha_federatedx_select_handler::~ha_federatedx_select_handler() {}
+
+int ha_federatedx_select_handler::init_scan()
+{
+ int rc= 0;
+
+ DBUG_ENTER("ha_federatedx_select_handler::init_scan");
+
+ TABLE *table= 0;
+ for (TABLE_LIST *tbl= thd->lex->query_tables; tbl; tbl= tbl->next_global)
+ {
+ if (!tbl->table)
+ continue;
+ table= tbl->table;
+ break;
+ }
+ ha_federatedx *h= (ha_federatedx *) table->file;
+ iop= &h->io;
+ share= get_share(table->s->table_name.str, table);
+ txn= h->get_txn(thd);
+ if ((rc= txn->acquire(share, thd, TRUE, iop)))
+ DBUG_RETURN(rc);
+
+ if ((*iop)->query(thd->query(), thd->query_length()))
+ goto err;
+
+ stored_result= (*iop)->store_result();
+ if (!stored_result)
+ goto err;
+
+ DBUG_RETURN(0);
+
+err:
+ DBUG_RETURN(HA_FEDERATEDX_ERROR_WITH_REMOTE_SYSTEM);
+}
+
+int ha_federatedx_select_handler::next_row()
+{
+ int rc= 0;
+ FEDERATEDX_IO_ROW *row;
+ ulong *lengths;
+ Field **field;
+ int column= 0;
+ Time_zone *saved_time_zone= table->in_use->variables.time_zone;
+ DBUG_ENTER("ha_federatedx_select_handler::next_row");
+
+ if ((rc= txn->acquire(share, table->in_use, TRUE, iop)))
+ DBUG_RETURN(rc);
+
+ if (!(row= (*iop)->fetch_row(stored_result)))
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+
+ /* Convert row to internal format */
+ table->in_use->variables.time_zone= UTC;
+ lengths= (*iop)->fetch_lengths(stored_result);
+
+ for (field= table->field; *field; field++, column++)
+ {
+ if ((*iop)->is_column_null(row, column))
+ (*field)->set_null();
+ else
+ {
+ (*field)->set_notnull();
+ (*field)->store((*iop)->get_column_data(row, column),
+ lengths[column], &my_charset_bin);
+ }
+ }
+ table->in_use->variables.time_zone= saved_time_zone;
+
+ DBUG_RETURN(rc);
+}
+
+int ha_federatedx_select_handler::end_scan()
+{
+ DBUG_ENTER("ha_federatedx_derived_handler::end_scan");
+
+ free_tmp_table(thd, table);
+ table= 0;
+
+ (*iop)->free_result(stored_result);
+
+ free_share(txn, share);
+
+ DBUG_RETURN(0);
+}
+
+void ha_federatedx_select_handler::print_error(int, unsigned long)
+{
+}
+
+
diff --git a/storage/federatedx/federatedx_pushdown.h b/storage/federatedx/federatedx_pushdown.h
new file mode 100644
index 00000000000..673abcfc68d
--- /dev/null
+++ b/storage/federatedx/federatedx_pushdown.h
@@ -0,0 +1,63 @@
+/*
+ Copyright (c) 2019 MariaDB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include "derived_handler.h"
+#include "select_handler.h"
+
+/*
+ Implementation class of the derived_handler interface for FEDERATEDX:
+ class declaration
+*/
+
+class ha_federatedx_derived_handler: public derived_handler
+{
+private:
+ FEDERATEDX_SHARE *share;
+ federatedx_txn *txn;
+ federatedx_io **iop;
+ FEDERATEDX_IO_RESULT *stored_result;
+
+public:
+ ha_federatedx_derived_handler(THD* thd_arg, TABLE_LIST *tbl);
+ ~ha_federatedx_derived_handler();
+ int init_scan();
+ int next_row();
+ int end_scan();
+ void print_error(int, unsigned long);
+};
+
+
+/*
+ Implementation class of the select_handler interface for FEDERATEDX:
+ class declaration
+*/
+
+class ha_federatedx_select_handler: public select_handler
+{
+private:
+ FEDERATEDX_SHARE *share;
+ federatedx_txn *txn;
+ federatedx_io **iop;
+ FEDERATEDX_IO_RESULT *stored_result;
+
+public:
+ ha_federatedx_select_handler(THD* thd_arg, SELECT_LEX *sel);
+ ~ha_federatedx_select_handler();
+ int init_scan();
+ int next_row();
+ int end_scan();
+ void print_error(int, unsigned long);
+};
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 74d547cb674..b0a08a0d49a 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -319,6 +319,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "sql_analyse.h" // append_escaped()
#include "sql_show.h" // append_identifier()
#include "tztime.h" // my_tz_find()
+#include "sql_select.h"
#ifdef I_AM_PARANOID
#define MIN_PORT 1023
@@ -401,6 +402,12 @@ static void init_federated_psi_keys(void)
#define init_federated_psi_keys() /* no-op */
#endif /* HAVE_PSI_INTERFACE */
+handlerton* federatedx_hton;
+
+static derived_handler*
+create_federatedx_derived_handler(THD* thd, TABLE_LIST *derived);
+static select_handler*
+create_federatedx_select_handler(THD* thd, SELECT_LEX *sel);
/*
Initialize the federatedx handler.
@@ -418,7 +425,7 @@ int federatedx_db_init(void *p)
{
DBUG_ENTER("federatedx_db_init");
init_federated_psi_keys();
- handlerton *federatedx_hton= (handlerton *)p;
+ federatedx_hton= (handlerton *)p;
federatedx_hton->state= SHOW_OPTION_YES;
/* Needed to work with old .frm files */
federatedx_hton->db_type= DB_TYPE_FEDERATED_DB;
@@ -432,6 +439,8 @@ int federatedx_db_init(void *p)
federatedx_hton->discover_table_structure= ha_federatedx::discover_assisted;
federatedx_hton->create= federatedx_create_handler;
federatedx_hton->flags= HTON_ALTER_NOT_SUPPORTED;
+ federatedx_hton->create_derived= create_federatedx_derived_handler;
+ federatedx_hton->create_select= create_federatedx_select_handler;
if (mysql_mutex_init(fe_key_mutex_federatedx,
&federatedx_mutex, MY_MUTEX_INIT_FAST))
@@ -3672,6 +3681,13 @@ err1:
struct st_mysql_storage_engine federatedx_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
+my_bool use_pushdown;
+static MYSQL_SYSVAR_BOOL(pushdown, use_pushdown, 0,
+ "Use query fragments pushdown capabilities", NULL, NULL, FALSE);
+static struct st_mysql_sys_var* sysvars[]= { MYSQL_SYSVAR(pushdown), NULL };
+
+#include "federatedx_pushdown.cc"
+
maria_declare_plugin(federatedx)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
@@ -3684,8 +3700,9 @@ maria_declare_plugin(federatedx)
federatedx_done, /* Plugin Deinit */
0x0201 /* 2.1 */,
NULL, /* status variables */
- NULL, /* system variables */
+ sysvars, /* system variables */
"2.1", /* string version */
MariaDB_PLUGIN_MATURITY_STABLE /* maturity */
}
maria_declare_plugin_end;
+
diff --git a/storage/federatedx/ha_federatedx.h b/storage/federatedx/ha_federatedx.h
index ffd9274d7eb..67aa83f7b33 100644
--- a/storage/federatedx/ha_federatedx.h
+++ b/storage/federatedx/ha_federatedx.h
@@ -1,3 +1,5 @@
+#ifndef HA_FEDERATEDX_INCLUDED
+#define HA_FEDERATEDX_INCLUDED
/*
Copyright (c) 2008, Patrick Galbraith
All rights reserved.
@@ -332,7 +334,7 @@ public:
return (HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED
| HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE | HA_CAN_REPAIR |
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE |
+ HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | HA_CAN_ONLINE_BACKUPS |
HA_PARTIAL_COLUMN_READ | HA_NULL_IN_KEY);
}
/*
@@ -444,6 +446,9 @@ public:
int external_lock(THD *thd, int lock_type);
int reset(void);
int free_result(void);
+
+ friend class ha_federatedx_derived_handler;
+ friend class ha_federatedx_select_handler;
};
extern const char ident_quote_char; // Character for quoting
@@ -459,3 +464,7 @@ extern federatedx_io *instantiate_io_mysql(MEM_ROOT *server_root,
FEDERATEDX_SERVER *server);
extern federatedx_io *instantiate_io_null(MEM_ROOT *server_root,
FEDERATEDX_SERVER *server);
+
+#include "federatedx_pushdown.h"
+
+#endif /* HA_FEDERATEDX_INCLUDED */
diff --git a/storage/heap/ha_heap.h b/storage/heap/ha_heap.h
index e17c18c8b14..c765e8e2f62 100644
--- a/storage/heap/ha_heap.h
+++ b/storage/heap/ha_heap.h
@@ -49,9 +49,9 @@ public:
{
return (HA_FAST_KEY_READ | HA_NO_BLOBS | HA_NULL_IN_KEY |
HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
- HA_CAN_SQL_HANDLER |
+ HA_CAN_SQL_HANDLER | HA_CAN_ONLINE_BACKUPS |
HA_REC_NOT_IN_SEQ | HA_CAN_INSERT_DELAYED | HA_NO_TRANSACTIONS |
- HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT);
+ HA_HAS_RECORDS | HA_STATS_RECORDS_IS_EXACT | HA_CAN_HASH_KEYS);
}
ulong index_flags(uint inx, uint part, bool all_parts) const
{
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt
index 0c932407598..89e4fd1bce7 100644
--- a/storage/innobase/CMakeLists.txt
+++ b/storage/innobase/CMakeLists.txt
@@ -113,7 +113,6 @@ SET(INNOBASE_SOURCES
row/row0purge.cc
row/row0row.cc
row/row0sel.cc
- row/row0trunc.cc
row/row0uins.cc
row/row0umod.cc
row/row0undo.cc
@@ -176,7 +175,6 @@ IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64"
mtr/mtr0mtr.cc
row/row0merge.cc
row/row0mysql.cc
- row/row0trunc.cc
srv/srv0srv.cc
COMPILE_FLAGS "-O0"
)
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index a0a75fae1cf..c7ee80cca5f 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -44,6 +44,8 @@ Created 6/2/1994 Heikki Tuuri
#include "dict0boot.h"
#include "row0sel.h" /* row_search_max_autoinc() */
+Atomic_counter<uint32_t> btr_validate_index_running;
+
/**************************************************************//**
Checks if the page in the cursor can be merged with given page.
If necessary, re-organize the merge_page.
@@ -57,15 +59,12 @@ btr_can_merge_with_page(
buf_block_t** merge_block, /*!< out: the merge block */
mtr_t* mtr); /*!< in: mini-transaction */
-/**************************************************************//**
-Report that an index page is corrupted. */
-void
-btr_corruption_report(
-/*==================*/
- const buf_block_t* block, /*!< in: corrupted block */
- const dict_index_t* index) /*!< in: index tree */
+/** Report that an index page is corrupted.
+@param[in] buffer block
+@param[in] index tree */
+void btr_corruption_report(const buf_block_t* block, const dict_index_t* index)
{
- ib::error()
+ ib::fatal()
<< "Flag mismatch in page " << block->page.id
<< " index " << index->name
<< " of table " << index->table->name;
@@ -226,7 +225,7 @@ btr_root_block_get(
buf_block_t* block = btr_block_get(
page_id_t(index->table->space_id, index->page),
- page_size_t(index->table->space->flags), mode,
+ index->table->space->zip_size(), mode,
index, mtr);
if (!block) {
@@ -359,7 +358,7 @@ btr_root_adjust_on_import(
page_zip_des_t* page_zip;
dict_table_t* table = index->table;
const page_id_t page_id(table->space_id, index->page);
- const page_size_t page_size(table->space->flags);
+ const ulint zip_size = table->space->zip_size();
DBUG_EXECUTE_IF("ib_import_trigger_corruption_3",
return(DB_CORRUPTION););
@@ -368,7 +367,7 @@ btr_root_adjust_on_import(
mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
- block = btr_block_get(page_id, page_size, RW_X_LATCH, index, &mtr);
+ block = btr_block_get(page_id, zip_size, RW_X_LATCH, index, &mtr);
page = buf_block_get_frame(block);
page_zip = buf_block_get_page_zip(block);
@@ -387,9 +386,21 @@ btr_root_adjust_on_import(
} else {
/* Check that the table flags and the tablespace
flags match. */
- err = (dict_tf_to_fsp_flags(table->flags)
- == table->space->flags)
- ? DB_SUCCESS : DB_CORRUPTION;
+ ulint tf = dict_tf_to_fsp_flags(table->flags);
+ ulint sf = table->space->flags;
+ sf &= ~FSP_FLAGS_MEM_MASK;
+ tf &= ~FSP_FLAGS_MEM_MASK;
+ if (fil_space_t::is_flags_equal(tf, sf)
+ || fil_space_t::is_flags_equal(sf, tf)) {
+ mutex_enter(&fil_system.mutex);
+ table->space->flags = (table->space->flags
+ & ~FSP_FLAGS_MEM_MASK)
+ | (tf & FSP_FLAGS_MEM_MASK);
+ mutex_exit(&fil_system.mutex);
+ err = DB_SUCCESS;
+ } else {
+ err = DB_CORRUPTION;
+ }
}
} else {
err = DB_SUCCESS;
@@ -429,7 +440,7 @@ btr_page_create(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
if (page_zip) {
- page_create_zip(block, index, level, 0, NULL, mtr);
+ page_create_zip(block, index, level, 0, mtr);
} else {
page_create(block, mtr, dict_table_is_comp(index->table),
dict_index_is_spatial(index));
@@ -469,7 +480,7 @@ btr_page_alloc_for_ibuf(
new_block = buf_page_get(
page_id_t(index->table->space_id, node_addr.page),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_X_LATCH, mtr);
new_page = buf_block_get_frame(new_block);
@@ -751,19 +762,17 @@ void btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
: PAGE_HEADER + PAGE_BTR_SEG_TOP];
fseg_free_page(seg_header,
index->table->space, block->page.id.page_no(),
- block->index != NULL, mtr);
+ block->index != NULL, !block->page.flush_observer, mtr);
/* The page was marked free in the allocation bitmap, but it
should remain exclusively latched until mtr_t::commit() or until it
is explicitly freed from the mini-transaction. */
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
- if (srv_immediate_scrub_data_uncompressed) {
- /* In MDEV-15528 this call must be removed, and we should
- zero out the page after the redo log for this mini-transaction
- has been durably written. */
- fsp_init_file_page(index->table->space, block, mtr);
- }
+ /* MDEV-15528 FIXME: Zero out the page after the redo log for
+ this mini-transaction has been durably written.
+ This must be done unconditionally if
+ srv_immediate_scrub_data_uncompressed is set. */
}
/**************************************************************//**
@@ -820,7 +829,7 @@ btr_node_ptr_get_child(
return btr_block_get(
page_id_t(index->table->space_id,
btr_node_ptr_get_child_page_no(node_ptr, offsets)),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_SX_LATCH, index, mtr);
}
@@ -1011,7 +1020,7 @@ static void btr_free_root(buf_block_t* block, mtr_t* mtr, bool invalidate)
/** Prepare to free a B-tree.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] index_id PAGE_INDEX_ID contents
@param[in,out] mtr mini-transaction
@return root block, to invoke btr_free_but_not_root() and btr_free_root()
@@ -1020,7 +1029,7 @@ static MY_ATTRIBUTE((warn_unused_result))
buf_block_t*
btr_free_root_check(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
index_id_t index_id,
mtr_t* mtr)
{
@@ -1028,7 +1037,7 @@ btr_free_root_check(
ut_ad(index_id != BTR_FREED_INDEX_ID);
buf_block_t* block = buf_page_get(
- page_id, page_size, RW_X_LATCH, mtr);
+ page_id, zip_size, RW_X_LATCH, mtr);
if (block) {
buf_block_dbg_add_level(block, SYNC_TREE_NODE);
@@ -1049,21 +1058,18 @@ btr_free_root_check(
/** Create the root node for a new index tree.
@param[in] type type of the index
-@param[in,out] space tablespace where created
@param[in] index_id index id
-@param[in] index index, or NULL when applying TRUNCATE
-log record during recovery
-@param[in] btr_redo_create_info used for applying TRUNCATE log
-@param[in] mtr mini-transaction handle
-record during recovery
-@return page number of the created root, FIL_NULL if did not succeed */
+@param[in,out] space tablespace where created
+@param[in] index index
+@param[in,out] mtr mini-transaction
+@return page number of the created root
+@retval FIL_NULL if did not succeed */
ulint
btr_create(
ulint type,
fil_space_t* space,
index_id_t index_id,
dict_index_t* index,
- const btr_create_t* btr_redo_create_info,
mtr_t* mtr)
{
buf_block_t* block;
@@ -1078,7 +1084,7 @@ btr_create(
(for an ibuf tree, not in the root, but on a separate ibuf header
page) */
- if (type & DICT_IBUF) {
+ if (UNIV_UNLIKELY(type & DICT_IBUF)) {
/* Allocate first the ibuf header page */
buf_block_t* ibuf_hdr_block = fseg_create(
space, 0,
@@ -1110,8 +1116,7 @@ btr_create(
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
- flst_init(block->frame + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
- mtr);
+ flst_init(block, PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr);
} else {
block = fseg_create(space, 0,
PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr);
@@ -1140,44 +1145,11 @@ btr_create(
page_zip = buf_block_get_page_zip(block);
if (page_zip) {
- if (index != NULL) {
- page = page_create_zip(block, index, 0, 0, NULL, mtr);
- } else {
- /* Create a compressed index page when applying
- TRUNCATE log record during recovery */
- ut_ad(btr_redo_create_info != NULL);
-
- redo_page_compress_t page_comp_info;
-
- page_comp_info.type = type;
-
- page_comp_info.index_id = index_id;
-
- page_comp_info.n_fields =
- btr_redo_create_info->n_fields;
-
- page_comp_info.field_len =
- btr_redo_create_info->field_len;
-
- page_comp_info.fields = btr_redo_create_info->fields;
-
- page_comp_info.trx_id_pos =
- btr_redo_create_info->trx_id_pos;
-
- page = page_create_zip(block, NULL, 0, 0,
- &page_comp_info, mtr);
- }
+ page = page_create_zip(block, index, 0, 0, mtr);
} else {
- if (index != NULL) {
- page = page_create(block, mtr,
- dict_table_is_comp(index->table),
- dict_index_is_spatial(index));
- } else {
- ut_ad(btr_redo_create_info != NULL);
- page = page_create(
- block, mtr, btr_redo_create_info->format_flags,
- type == DICT_SPATIAL);
- }
+ page = page_create(block, mtr,
+ dict_table_is_comp(index->table),
+ dict_index_is_spatial(index));
/* Set the level of the new index page */
btr_page_set_level(page, NULL, 0, mtr);
}
@@ -1189,18 +1161,14 @@ btr_create(
btr_page_set_next(page, page_zip, FIL_NULL, mtr);
btr_page_set_prev(page, page_zip, FIL_NULL, mtr);
- /* We reset the free bits for the page to allow creation of several
- trees in the same mtr, otherwise the latch on a bitmap page would
- prevent it because of the latching order.
-
- index will be NULL if we are recreating the table during recovery
- on behalf of TRUNCATE.
+ /* We reset the free bits for the page in a separate
+ mini-transaction to allow creation of several trees in the
+ same mtr, otherwise the latch on a bitmap page would prevent
+ it because of the latching order.
Note: Insert Buffering is disabled for temporary tables given that
most temporary tables are smaller in size and short-lived. */
- if (!(type & DICT_CLUSTERED)
- && (index == NULL || !index->table->is_temporary())) {
-
+ if (!(type & DICT_CLUSTERED) && !index->table->is_temporary()) {
ibuf_reset_free_bits(block);
}
@@ -1281,18 +1249,18 @@ top_loop:
/** Free a persistent index tree if it exists.
@param[in] page_id root page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] index_id PAGE_INDEX_ID contents
@param[in,out] mtr mini-transaction */
void
btr_free_if_exists(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
index_id_t index_id,
mtr_t* mtr)
{
buf_block_t* root = btr_free_root_check(
- page_id, page_size, index_id, mtr);
+ page_id, zip_size, index_id, mtr);
if (root == NULL) {
return;
@@ -1303,20 +1271,15 @@ btr_free_if_exists(
btr_free_root(root, mtr, true);
}
-/** Free an index tree in a temporary tablespace or during TRUNCATE TABLE.
-@param[in] page_id root page id
-@param[in] page_size page size */
-void
-btr_free(
- const page_id_t page_id,
- const page_size_t& page_size)
+/** Free an index tree in a temporary tablespace.
+@param[in] page_id root page id */
+void btr_free(const page_id_t page_id)
{
mtr_t mtr;
mtr.start();
mtr.set_log_mode(MTR_LOG_NO_REDO);
- buf_block_t* block = buf_page_get(
- page_id, page_size, RW_X_LATCH, &mtr);
+ buf_block_t* block = buf_page_get(page_id, 0, RW_X_LATCH, &mtr);
if (block) {
btr_free_but_not_root(block, MTR_LOG_NO_REDO);
@@ -1340,7 +1303,7 @@ btr_read_autoinc(dict_index_t* index)
ib_uint64_t autoinc;
if (buf_block_t* block = buf_page_get(
page_id_t(index->table->space_id, index->page),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_S_LATCH, &mtr)) {
autoinc = page_get_autoinc(block->frame);
} else {
@@ -1372,7 +1335,7 @@ btr_read_autoinc_with_fallback(const dict_table_t* table, unsigned col_no)
mtr.start();
buf_block_t* block = buf_page_get(
page_id_t(index->table->space_id, index->page),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_S_LATCH, &mtr);
ib_uint64_t autoinc = block ? page_get_autoinc(block->frame) : 0;
@@ -1417,7 +1380,7 @@ btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset)
fil_space_t* space = index->table->space;
mtr.set_named_space(space);
page_set_autoinc(buf_page_get(page_id_t(space->id, index->page),
- page_size_t(space->flags),
+ space->zip_size(),
RW_SX_LATCH, &mtr),
index, autoinc, &mtr, reset);
mtr.commit();
@@ -1544,7 +1507,7 @@ btr_page_reorganize_low(
}
if (page_zip
- && !page_zip_compress(page_zip, page, index, z_level, NULL, mtr)) {
+ && !page_zip_compress(page_zip, page, index, z_level, mtr)) {
/* Restore the old page and exit. */
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
@@ -1570,11 +1533,6 @@ btr_page_reorganize_low(
goto func_exit;
}
- if (!recovery && !dict_table_is_locking_disabled(index->table)) {
- /* Update the record lock bitmaps */
- lock_move_reorganize_page(block, temp_block);
- }
-
data_size2 = page_get_data_size(page);
max_ins_size2 = page_get_max_insert_size_after_reorganize(page, 1);
@@ -1598,21 +1556,41 @@ btr_page_reorganize_low(
ut_ad(cursor->rec == page_get_infimum_rec(page));
}
-func_exit:
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
- if (!recovery && block->page.id.page_no() == index->page
- && fil_page_get_type(temp_page) == FIL_PAGE_TYPE_INSTANT) {
- /* Preserve the PAGE_INSTANT information. */
- ut_ad(!page_zip);
- ut_ad(index->is_instant());
- memcpy(FIL_PAGE_TYPE + page, FIL_PAGE_TYPE + temp_page, 2);
- memcpy(PAGE_HEADER + PAGE_INSTANT + page,
- PAGE_HEADER + PAGE_INSTANT + temp_page, 2);
+ if (!recovery) {
+ if (block->page.id.page_no() == index->page
+ && fil_page_get_type(temp_page) == FIL_PAGE_TYPE_INSTANT) {
+ /* Preserve the PAGE_INSTANT information. */
+ ut_ad(!page_zip);
+ ut_ad(index->is_instant());
+ memcpy(FIL_PAGE_TYPE + page,
+ FIL_PAGE_TYPE + temp_page, 2);
+ memcpy(PAGE_HEADER + PAGE_INSTANT + page,
+ PAGE_HEADER + PAGE_INSTANT + temp_page, 2);
+ if (!index->table->instant) {
+ } else if (page_is_comp(page)) {
+ memcpy(PAGE_NEW_INFIMUM + page,
+ PAGE_NEW_INFIMUM + temp_page, 8);
+ memcpy(PAGE_NEW_SUPREMUM + page,
+ PAGE_NEW_SUPREMUM + temp_page, 8);
+ } else {
+ memcpy(PAGE_OLD_INFIMUM + page,
+ PAGE_OLD_INFIMUM + temp_page, 8);
+ memcpy(PAGE_OLD_SUPREMUM + page,
+ PAGE_OLD_SUPREMUM + temp_page, 8);
+ }
+ }
+
+ if (!dict_table_is_locking_disabled(index->table)) {
+ /* Update the record lock bitmaps */
+ lock_move_reorganize_page(block, temp_block);
+ }
}
+func_exit:
buf_block_free(temp_block);
/* Restore logging mode */
@@ -1658,6 +1636,14 @@ func_exit:
mach_read_from_2(PAGE_HEADER + PAGE_INSTANT
+ page),
MLOG_2BYTES, mtr);
+ if (!index->table->instant) {
+ } else if (page_is_comp(page)) {
+ mlog_log_string(PAGE_NEW_INFIMUM + page, 8, mtr);
+ mlog_log_string(PAGE_NEW_SUPREMUM + page, 8, mtr);
+ } else {
+ mlog_log_string(PAGE_OLD_INFIMUM + page, 8, mtr);
+ mlog_log_string(PAGE_OLD_SUPREMUM + page, 8, mtr);
+ }
}
return(success);
@@ -1796,7 +1782,7 @@ btr_page_empty(
: 0;
if (page_zip) {
- page_create_zip(block, index, level, autoinc, NULL, mtr);
+ page_create_zip(block, index, level, autoinc, mtr);
} else {
page_create(block, mtr, dict_table_is_comp(index->table),
dict_index_is_spatial(index));
@@ -1808,6 +1794,65 @@ btr_page_empty(
}
}
+/** Write instant ALTER TABLE metadata to a root page.
+@param[in,out] root clustered index root page
+@param[in] index clustered index with instant ALTER TABLE
+@param[in,out] mtr mini-transaction */
+void btr_set_instant(buf_block_t* root, const dict_index_t& index, mtr_t* mtr)
+{
+ ut_ad(index.n_core_fields > 0);
+ ut_ad(index.n_core_fields < REC_MAX_N_FIELDS);
+ ut_ad(index.is_instant());
+ ut_ad(fil_page_get_type(root->frame) == FIL_PAGE_TYPE_INSTANT
+ || fil_page_get_type(root->frame) == FIL_PAGE_INDEX);
+ ut_ad(!page_has_siblings(root->frame));
+ ut_ad(root->page.id.page_no() == index.page);
+
+ rec_t* infimum = page_get_infimum_rec(root->frame);
+ rec_t* supremum = page_get_supremum_rec(root->frame);
+ byte* page_type = root->frame + FIL_PAGE_TYPE;
+ uint16_t i = page_header_get_field(root->frame, PAGE_INSTANT);
+
+ switch (mach_read_from_2(page_type)) {
+ case FIL_PAGE_TYPE_INSTANT:
+ ut_ad(page_get_instant(root->frame) == index.n_core_fields);
+ if (memcmp(infimum, "infimum", 8)
+ || memcmp(supremum, "supremum", 8)) {
+ ut_ad(index.table->instant);
+ ut_ad(!memcmp(infimum, field_ref_zero, 8));
+ ut_ad(!memcmp(supremum, field_ref_zero, 7));
+ /* The n_core_null_bytes only matters for
+ ROW_FORMAT=COMPACT and ROW_FORMAT=DYNAMIC tables. */
+ ut_ad(supremum[7] == index.n_core_null_bytes
+ || !index.table->not_redundant());
+ return;
+ }
+ break;
+ default:
+ ut_ad(!"wrong page type");
+ /* fall through */
+ case FIL_PAGE_INDEX:
+ ut_ad(!page_is_comp(root->frame)
+ || !page_get_instant(root->frame));
+ ut_ad(!memcmp(infimum, "infimum", 8));
+ ut_ad(!memcmp(supremum, "supremum", 8));
+ mlog_write_ulint(page_type, FIL_PAGE_TYPE_INSTANT,
+ MLOG_2BYTES, mtr);
+ ut_ad(i <= PAGE_NO_DIRECTION);
+ i |= index.n_core_fields << 3;
+ mlog_write_ulint(PAGE_HEADER + PAGE_INSTANT + root->frame, i,
+ MLOG_2BYTES, mtr);
+ break;
+ }
+
+ if (index.table->instant) {
+ mlog_memset(root, infimum - root->frame, 8, 0, mtr);
+ mlog_memset(root, supremum - root->frame, 7, 0, mtr);
+ mlog_write_ulint(&supremum[7], index.n_core_null_bytes,
+ MLOG_1BYTE, mtr);
+ }
+}
+
/*************************************************************//**
Makes tree one level higher by splitting the root, and inserts
the tuple. It is assumed that mtr contains an x-latch on the tree.
@@ -1993,11 +2038,7 @@ btr_root_raise_and_insert(
if (index->is_instant()) {
ut_ad(!root_page_zip);
- byte* page_type = root_block->frame + FIL_PAGE_TYPE;
- ut_ad(mach_read_from_2(page_type) == FIL_PAGE_INDEX);
- mlog_write_ulint(page_type, FIL_PAGE_TYPE_INSTANT,
- MLOG_2BYTES, mtr);
- page_set_instant(root_block->frame, index->n_core_fields, mtr);
+ btr_set_instant(root_block, *index, mtr);
}
/* Set the next node and previous node fields, although
@@ -2528,12 +2569,12 @@ btr_attach_half_pages(
/* for consistency, both blocks should be locked, before change */
if (prev_page_no != FIL_NULL && direction == FSP_DOWN) {
prev_block = btr_block_get(
- page_id_t(space, prev_page_no), block->page.size,
+ page_id_t(space, prev_page_no), block->zip_size(),
RW_X_LATCH, index, mtr);
}
if (next_page_no != FIL_NULL && direction != FSP_DOWN) {
next_block = btr_block_get(
- page_id_t(space, next_page_no), block->page.size,
+ page_id_t(space, next_page_no), block->zip_size(),
RW_X_LATCH, index, mtr);
}
@@ -2683,7 +2724,7 @@ btr_insert_into_right_sibling(
const ulint space = block->page.id.space();
next_block = btr_block_get(
- page_id_t(space, next_page_no), block->page.size,
+ page_id_t(space, next_page_no), block->zip_size(),
RW_X_LATCH, cursor->index, mtr);
next_page = buf_block_get_frame(next_block);
@@ -2709,7 +2750,7 @@ btr_insert_into_right_sibling(
if (rec == NULL) {
if (is_leaf
- && next_block->page.size.is_compressed()
+ && next_block->page.zip.ssize
&& !dict_index_is_clust(cursor->index)
&& !cursor->index->table->is_temporary()) {
/* Reset the IBUF_BITMAP_FREE bits, because
@@ -2757,7 +2798,7 @@ btr_insert_into_right_sibling(
/* Update the free bits of the B-tree page in the
insert buffer bitmap. */
- if (next_block->page.size.is_compressed()) {
+ if (next_block->page.zip.ssize) {
ibuf_update_free_bits_zip(next_block, mtr);
} else {
ibuf_update_free_bits_if_full(
@@ -3202,16 +3243,16 @@ func_exit:
return(rec);
}
-/** Removes a page from the level list of pages.
+/** Remove a page from the level list of pages.
@param[in] space space where removed
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] page page to remove
@param[in] index index tree
@param[in,out] mtr mini-transaction */
void
btr_level_list_remove_func(
ulint space,
- const page_size_t& page_size,
+ ulint zip_size,
page_t* page,
dict_index_t* index,
mtr_t* mtr)
@@ -3230,7 +3271,7 @@ btr_level_list_remove_func(
if (prev_page_no != FIL_NULL) {
buf_block_t* prev_block
= btr_block_get(page_id_t(space, prev_page_no),
- page_size, RW_X_LATCH, index, mtr);
+ zip_size, RW_X_LATCH, index, mtr);
page_t* prev_page
= buf_block_get_frame(prev_block);
@@ -3248,7 +3289,7 @@ btr_level_list_remove_func(
if (next_page_no != FIL_NULL) {
buf_block_t* next_block
= btr_block_get(
- page_id_t(space, next_page_no), page_size,
+ page_id_t(space, next_page_no), zip_size,
RW_X_LATCH, index, mtr);
page_t* next_page
@@ -3455,12 +3496,7 @@ btr_lift_page_up(
if (page_level == 0 && index->is_instant()) {
ut_ad(!father_page_zip);
- byte* page_type = father_block->frame + FIL_PAGE_TYPE;
- ut_ad(mach_read_from_2(page_type) == FIL_PAGE_INDEX);
- mlog_write_ulint(page_type, FIL_PAGE_TYPE_INSTANT,
- MLOG_2BYTES, mtr);
- page_set_instant(father_block->frame,
- index->n_core_fields, mtr);
+ btr_set_instant(father_block, *index, mtr);
}
page_level++;
@@ -3597,7 +3633,7 @@ btr_compress(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
MONITOR_INC(MONITOR_INDEX_MERGE_ATTEMPTS);
@@ -3755,7 +3791,7 @@ retry:
/* Remove the page from the level list */
btr_level_list_remove(index->table->space_id,
- page_size, page, index, mtr);
+ zip_size, page, index, mtr);
if (dict_index_is_spatial(index)) {
rec_t* my_rec = father_cursor.page_cur.rec;
@@ -3885,7 +3921,7 @@ retry:
/* Remove the page from the level list */
btr_level_list_remove(index->table->space_id,
- page_size, page, index, mtr);
+ zip_size, page, index, mtr);
ut_ad(btr_node_ptr_get_child_page_no(
btr_cur_get_rec(&father_cursor), offsets)
@@ -3993,7 +4029,7 @@ retry:
committed mini-transaction, because in crash recovery,
the free bits could momentarily be set too high. */
- if (page_size.is_compressed()) {
+ if (zip_size) {
/* Because the free bits may be incremented
and we cannot update the insert buffer bitmap
in the same mini-transaction, the only safe
@@ -4053,7 +4089,7 @@ func_exit:
err_exit:
/* We play it safe and reset the free bits. */
- if (page_size.is_compressed()
+ if (zip_size
&& merge_page
&& page_is_leaf(merge_page)
&& !dict_index_is_clust(index)) {
@@ -4136,15 +4172,42 @@ btr_discard_only_page_on_level(
}
#endif /* UNIV_BTR_DEBUG */
+ mem_heap_t* heap = NULL;
+ const rec_t* rec = NULL;
+ ulint* offsets = NULL;
+ if (index->table->instant) {
+ const rec_t* r = page_rec_get_next(page_get_infimum_rec(
+ block->frame));
+ ut_ad(rec_is_metadata(r, *index) == index->is_instant());
+ if (rec_is_alter_metadata(r, *index)) {
+ heap = mem_heap_create(srv_page_size);
+ offsets = rec_get_offsets(r, index, NULL, true,
+ ULINT_UNDEFINED, &heap);
+ rec = rec_copy(mem_heap_alloc(heap,
+ rec_offs_size(offsets)),
+ r, offsets);
+ rec_offs_make_valid(rec, index, true, offsets);
+ }
+ }
+
btr_page_empty(block, buf_block_get_page_zip(block), index, 0, mtr);
ut_ad(page_is_leaf(buf_block_get_frame(block)));
/* btr_page_empty() is supposed to zero-initialize the field. */
ut_ad(!page_get_instant(block->frame));
if (index->is_primary()) {
- /* Concurrent access is prevented by the root_block->lock
- X-latch, so this should be safe. */
- index->remove_instant();
+ if (rec) {
+ DBUG_ASSERT(index->table->instant);
+ DBUG_ASSERT(rec_is_alter_metadata(rec, *index));
+ btr_set_instant(block, *index, mtr);
+ rec = page_cur_insert_rec_low(
+ page_get_infimum_rec(block->frame),
+ index, rec, offsets, mtr);
+ ut_ad(rec);
+ mem_heap_free(heap);
+ } else if (index->is_instant()) {
+ index->clear_instant_add();
+ }
} else if (!index->table->is_temporary()) {
/* We play it safe and reset the free bits for the root */
ibuf_reset_free_bits(block);
@@ -4200,12 +4263,12 @@ btr_discard_page(
left_page_no = btr_page_get_prev(buf_block_get_frame(block), mtr);
right_page_no = btr_page_get_next(buf_block_get_frame(block), mtr);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
ut_d(bool parent_is_different = false);
if (left_page_no != FIL_NULL) {
merge_block = btr_block_get(
page_id_t(index->table->space_id, left_page_no),
- page_size, RW_X_LATCH, index, mtr);
+ zip_size, RW_X_LATCH, index, mtr);
merge_page = buf_block_get_frame(merge_block);
#ifdef UNIV_BTR_DEBUG
@@ -4221,7 +4284,7 @@ btr_discard_page(
} else if (right_page_no != FIL_NULL) {
merge_block = btr_block_get(
page_id_t(index->table->space_id, right_page_no),
- page_size, RW_X_LATCH, index, mtr);
+ zip_size, RW_X_LATCH, index, mtr);
merge_page = buf_block_get_frame(merge_block);
#ifdef UNIV_BTR_DEBUG
@@ -4263,7 +4326,7 @@ btr_discard_page(
}
/* Remove the page from the level list */
- btr_level_list_remove(index->table->space_id, page_size,
+ btr_level_list_remove(index->table->space_id, zip_size,
page, index, mtr);
#ifdef UNIV_ZIP_DEBUG
@@ -4438,7 +4501,7 @@ btr_print_index(
mtr_commit(&mtr);
- ut_ad(btr_validate_index(index, 0, false));
+ ut_ad(btr_validate_index(index, 0));
}
#endif /* UNIV_BTR_PRINT */
@@ -4563,14 +4626,32 @@ btr_index_rec_validate(
return(FALSE);
}
+ const bool is_alter_metadata = page_is_leaf(page)
+ && !page_has_prev(page)
+ && index->is_primary() && index->table->instant
+ && rec == page_rec_get_next_const(page_get_infimum_rec(page));
+
+ if (is_alter_metadata
+ && !rec_is_alter_metadata(rec, page_is_comp(page))) {
+ btr_index_rec_validate_report(page, rec, index);
+
+ ib::error() << "First record is not ALTER TABLE metadata";
+ return FALSE;
+ }
+
if (!page_is_comp(page)) {
const ulint n_rec_fields = rec_get_n_fields_old(rec);
if (n_rec_fields == DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD
&& index->id == DICT_INDEXES_ID) {
/* A record for older SYS_INDEXES table
(missing merge_threshold column) is acceptable. */
+ } else if (is_alter_metadata) {
+ if (n_rec_fields != ulint(index->n_fields) + 1) {
+ goto n_field_mismatch;
+ }
} else if (n_rec_fields < index->n_core_fields
|| n_rec_fields > index->n_fields) {
+n_field_mismatch:
btr_index_rec_validate_report(page, rec, index);
ib::error() << "Has " << rec_get_n_fields_old(rec)
@@ -4589,15 +4670,28 @@ btr_index_rec_validate(
offsets = rec_get_offsets(rec, index, offsets, page_is_leaf(page),
ULINT_UNDEFINED, &heap);
+ const dict_field_t* field = index->fields;
+ ut_ad(rec_offs_n_fields(offsets)
+ == ulint(index->n_fields) + is_alter_metadata);
- for (unsigned i = 0; i < index->n_fields; i++) {
- dict_field_t* field = dict_index_get_nth_field(index, i);
- ulint fixed_size = dict_col_get_fixed_size(
- dict_field_get_col(field),
- page_is_comp(page));
-
+ for (unsigned i = 0; i < rec_offs_n_fields(offsets); i++) {
rec_get_nth_field_offs(offsets, i, &len);
+ ulint fixed_size;
+
+ if (is_alter_metadata && i == index->first_user_field()) {
+ fixed_size = FIELD_REF_SIZE;
+ if (len != FIELD_REF_SIZE
+ || !rec_offs_nth_extern(offsets, i)) {
+ goto len_mismatch;
+ }
+
+ continue;
+ } else {
+ fixed_size = dict_col_get_fixed_size(
+ field->col, page_is_comp(page));
+ }
+
/* Note that if fixed_size != 0, it equals the
length of a fixed-size column in the clustered index.
We should adjust it here.
@@ -4609,8 +4703,8 @@ btr_index_rec_validate(
&& (field->prefix_len
? len > field->prefix_len
: (fixed_size && len != fixed_size))) {
+len_mismatch:
btr_index_rec_validate_report(page, rec, index);
-
ib::error error;
error << "Field " << i << " len is " << len
@@ -4628,6 +4722,8 @@ btr_index_rec_validate(
}
return(FALSE);
}
+
+ field++;
}
#ifdef VIRTUAL_INDEX_DEBUG
@@ -4793,19 +4889,7 @@ btr_validate_level(
page = buf_block_get_frame(block);
fil_space_t* space = index->table->space;
- const page_size_t table_page_size(
- dict_table_page_size(index->table));
- const page_size_t space_page_size(space->flags);
-
- if (!table_page_size.equals_to(space_page_size)) {
-
- ib::warn() << "Flags mismatch: table=" << index->table->flags
- << ", tablespace=" << space->flags;
-
- mtr_commit(&mtr);
-
- return(false);
- }
+ const ulint zip_size = space->zip_size();
while (level != btr_page_get_level(page)) {
const rec_t* node_ptr;
@@ -4858,7 +4942,7 @@ btr_validate_level(
block = btr_block_get(
page_id_t(index->table->space_id,
left_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
page = buf_block_get_frame(block);
left_page_no = btr_page_get_prev(page, &mtr);
@@ -4929,7 +5013,7 @@ loop:
right_block = btr_block_get(
page_id_t(index->table->space_id, right_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
right_page = buf_block_get_frame(right_block);
@@ -5107,13 +5191,13 @@ loop:
btr_block_get(
page_id_t(index->table->space_id,
parent_right_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
right_block = btr_block_get(
page_id_t(index->table->space_id,
right_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
}
@@ -5191,21 +5275,21 @@ node_ptr_fails:
page_id_t(
index->table->space_id,
parent_right_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
}
} else if (parent_page_no != FIL_NULL) {
btr_block_get(
page_id_t(index->table->space_id,
parent_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
}
}
block = btr_block_get(
page_id_t(index->table->space_id, right_page_no),
- table_page_size,
+ zip_size,
RW_SX_LATCH, index, &mtr);
page = buf_block_get_frame(block);
@@ -5219,57 +5303,16 @@ node_ptr_fails:
}
/**************************************************************//**
-Do an index level validation of spaital index tree.
-@return true if no error found */
-static
-bool
-btr_validate_spatial_index(
-/*=======================*/
- dict_index_t* index, /*!< in: index */
- const trx_t* trx) /*!< in: transaction or NULL */
-{
-
- mtr_t mtr;
- bool ok = true;
-
- mtr_start(&mtr);
-
- mtr_x_lock(dict_index_get_lock(index), &mtr);
-
- page_t* root = btr_root_get(index, &mtr);
- ulint n = btr_page_get_level(root);
-
-#ifdef UNIV_RTR_DEBUG
- fprintf(stderr, "R-tree level is %lu\n", n);
-#endif /* UNIV_RTR_DEBUG */
-
- for (ulint i = 0; i <= n; ++i) {
-#ifdef UNIV_RTR_DEBUG
- fprintf(stderr, "Level %lu:\n", n - i);
-#endif /* UNIV_RTR_DEBUG */
-
- if (!btr_validate_level(index, trx, n - i, true)) {
- ok = false;
- break;
- }
- }
-
- mtr_commit(&mtr);
-
- return(ok);
-}
-
-/**************************************************************//**
Checks the consistency of an index tree.
@return DB_SUCCESS if ok, error code if not */
dberr_t
btr_validate_index(
/*===============*/
dict_index_t* index, /*!< in: index */
- const trx_t* trx, /*!< in: transaction or NULL */
- bool lockout)/*!< in: true if X-latch index is intended */
+ const trx_t* trx) /*!< in: transaction or NULL */
{
dberr_t err = DB_SUCCESS;
+ bool lockout = dict_index_is_spatial(index);
/* Full Text index are implemented by auxiliary tables,
not the B-tree */
@@ -5277,13 +5320,6 @@ btr_validate_index(
return(err);
}
- if (dict_index_is_spatial(index)) {
- if(!btr_validate_spatial_index(index, trx)) {
- err = DB_ERROR;
- }
- return(err);
- }
-
mtr_t mtr;
mtr_start(&mtr);
@@ -5299,13 +5335,13 @@ btr_validate_index(
page_t* root = btr_root_get(index, &mtr);
if (!root) {
- err = DB_CORRUPTION;
mtr_commit(&mtr);
- return err;
+ return DB_CORRUPTION;
}
ulint n = btr_page_get_level(root);
+ btr_validate_index_running++;
for (ulint i = 0; i <= n; ++i) {
if (!btr_validate_level(index, trx, n - i, lockout)) {
@@ -5315,6 +5351,14 @@ btr_validate_index(
}
mtr_commit(&mtr);
+ /* In theory we need release barrier here, so that
+ btr_validate_index_running decrement is guaranteed to
+ happen after latches are released.
+
+ Original code issued SEQ_CST on update and non-atomic
+ access on load. Which means it had broken synchronisation
+ as well. */
+ btr_validate_index_running--;
return(err);
}
@@ -5351,9 +5395,9 @@ btr_can_merge_with_page(
page = btr_cur_get_page(cursor);
const page_id_t page_id(index->table->space_id, page_no);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
- mblock = btr_block_get(page_id, page_size, RW_X_LATCH, index, mtr);
+ mblock = btr_block_get(page_id, zip_size, RW_X_LATCH, index, mtr);
mpage = buf_block_get_frame(mblock);
n_recs = page_get_n_recs(page);
@@ -5369,7 +5413,7 @@ btr_can_merge_with_page(
/* If compression padding tells us that merging will result in
too packed up page i.e.: which is likely to cause compression
failure then don't merge the pages. */
- if (page_size.is_compressed() && page_is_leaf(mpage)
+ if (zip_size && page_is_leaf(mpage)
&& (page_get_data_size(mpage) + data_size
>= dict_index_zip_pad_optimal_page_size(index))) {
diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc
index edd26a672be..82fac3bce2d 100644
--- a/storage/innobase/btr/btr0bulk.cc
+++ b/storage/innobase/btr/btr0bulk.cc
@@ -94,7 +94,7 @@ PageBulk::init()
if (new_page_zip) {
page_create_zip(new_block, m_index, m_level, 0,
- NULL, &m_mtr);
+ &m_mtr);
memset(FIL_PAGE_PREV + new_page, 0xff, 8);
page_zip_write_header(new_page_zip,
FIL_PAGE_PREV + new_page,
@@ -121,7 +121,7 @@ PageBulk::init()
} else {
new_block = btr_block_get(
page_id_t(m_index->table->space_id, m_page_no),
- page_size_t(m_index->table->space->flags),
+ m_index->table->space->zip_size(),
RW_X_LATCH, m_index, &m_mtr);
new_page = buf_block_get_frame(new_block);
@@ -374,7 +374,7 @@ PageBulk::compress()
ut_ad(m_page_zip != NULL);
return(page_zip_compress(m_page_zip, m_page, m_index,
- page_zip_level, NULL, &m_mtr));
+ page_zip_level, &m_mtr));
}
/** Get node pointer
@@ -589,8 +589,9 @@ PageBulk::needExt(
const dtuple_t* tuple,
ulint rec_size)
{
- return(page_zip_rec_needs_ext(rec_size, m_is_comp,
- dtuple_get_n_fields(tuple), m_block->page.size));
+ return page_zip_rec_needs_ext(rec_size, m_is_comp,
+ dtuple_get_n_fields(tuple),
+ m_block->zip_size());
}
/** Store external record
@@ -664,7 +665,7 @@ PageBulk::latch()
__FILE__, __LINE__, &m_mtr)) {
m_block = buf_page_get_gen(page_id_t(m_index->table->space_id,
m_page_no),
- univ_page_size, RW_X_LATCH,
+ 0, RW_X_LATCH,
m_block, BUF_GET_IF_IN_POOL,
__FILE__, __LINE__, &m_mtr, &m_err);
@@ -1017,7 +1018,7 @@ BtrBulk::finish(dberr_t err)
ut_ad(last_page_no != FIL_NULL);
last_block = btr_block_get(
page_id_t(m_index->table->space_id, last_page_no),
- page_size_t(m_index->table->space->flags),
+ m_index->table->space->zip_size(),
RW_X_LATCH, m_index, &mtr);
first_rec = page_rec_get_next(
page_get_infimum_rec(last_block->frame));
@@ -1046,6 +1047,6 @@ BtrBulk::finish(dberr_t err)
ut_ad(!sync_check_iterate(dict_sync_check()));
ut_ad(err != DB_SUCCESS
- || btr_validate_index(m_index, NULL, false) == DB_SUCCESS);
+ || btr_validate_index(m_index, NULL) == DB_SUCCESS);
return(err);
}
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 25ed6fc1654..1ba0febd415 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -210,6 +210,7 @@ btr_rec_free_externally_stored_fields(
/** Latches the leaf page or pages requested.
@param[in] block leaf page where the search converged
@param[in] page_id page id of the leaf
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] latch_mode BTR_SEARCH_LEAF, ...
@param[in] cursor cursor
@param[in] mtr mini-transaction
@@ -218,7 +219,7 @@ btr_latch_leaves_t
btr_cur_latch_leaves(
buf_block_t* block,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint latch_mode,
btr_cur_t* cursor,
mtr_t* mtr)
@@ -249,7 +250,7 @@ btr_cur_latch_leaves(
mode = latch_mode == BTR_MODIFY_LEAF ? RW_X_LATCH : RW_S_LATCH;
latch_leaves.savepoints[1] = mtr_set_savepoint(mtr);
- get_block = btr_block_get(page_id, page_size, mode,
+ get_block = btr_block_get(page_id, zip_size, mode,
cursor->index, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
@@ -282,7 +283,7 @@ btr_cur_latch_leaves(
latch_leaves.savepoints[0] = mtr_set_savepoint(mtr);
get_block = btr_block_get(
page_id_t(page_id.space(), left_page_no),
- page_size, RW_X_LATCH, cursor->index, mtr);
+ zip_size, RW_X_LATCH, cursor->index, mtr);
latch_leaves.blocks[0] = get_block;
if (spatial) {
@@ -298,7 +299,7 @@ btr_cur_latch_leaves(
latch_leaves.savepoints[1] = mtr_set_savepoint(mtr);
get_block = btr_block_get(
- page_id, page_size, RW_X_LATCH, cursor->index, mtr);
+ page_id, zip_size, RW_X_LATCH, cursor->index, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
@@ -329,7 +330,7 @@ btr_cur_latch_leaves(
latch_leaves.savepoints[2] = mtr_set_savepoint(mtr);
get_block = btr_block_get(
page_id_t(page_id.space(), right_page_no),
- page_size, RW_X_LATCH, cursor->index, mtr);
+ zip_size, RW_X_LATCH, cursor->index, mtr);
latch_leaves.blocks[2] = get_block;
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(get_block->frame)
@@ -357,7 +358,7 @@ btr_cur_latch_leaves(
latch_leaves.savepoints[0] = mtr_set_savepoint(mtr);
get_block = btr_block_get(
page_id_t(page_id.space(), left_page_no),
- page_size, mode, cursor->index, mtr);
+ zip_size, mode, cursor->index, mtr);
latch_leaves.blocks[0] = get_block;
cursor->left_block = get_block;
#ifdef UNIV_BTR_DEBUG
@@ -369,7 +370,7 @@ btr_cur_latch_leaves(
}
latch_leaves.savepoints[1] = mtr_set_savepoint(mtr);
- get_block = btr_block_get(page_id, page_size, mode,
+ get_block = btr_block_get(page_id, zip_size, mode,
cursor->index, mtr);
latch_leaves.blocks[1] = get_block;
#ifdef UNIV_BTR_DEBUG
@@ -422,8 +423,12 @@ unreadable:
}
btr_cur_t cur;
+ /* Relax the assertion in rec_init_offsets(). */
+ ut_ad(!index->in_instant_init);
+ ut_d(index->in_instant_init = true);
dberr_t err = btr_cur_open_at_index_side(true, index, BTR_SEARCH_LEAF,
&cur, 0, mtr);
+ ut_d(index->in_instant_init = false);
if (err != DB_SUCCESS) {
index->table->corrupted = true;
return err;
@@ -457,8 +462,8 @@ unreadable:
return DB_CORRUPTION;
}
- if (info_bits != REC_INFO_MIN_REC_FLAG
- || (comp && rec_get_status(rec) != REC_STATUS_COLUMNS_ADDED)) {
+ if ((info_bits & ~REC_INFO_DELETED_FLAG) != REC_INFO_MIN_REC_FLAG
+ || (comp && rec_get_status(rec) != REC_STATUS_INSTANT)) {
incompatible:
ib::error() << "Table " << index->table->name
<< " contains unrecognizable instant ALTER metadata";
@@ -476,6 +481,72 @@ incompatible:
concurrent operations on the table, including table eviction
from the cache. */
+ if (info_bits & REC_INFO_DELETED_FLAG) {
+ /* This metadata record includes a BLOB that identifies
+ any dropped or reordered columns. */
+ ulint trx_id_offset = index->trx_id_offset;
+ if (!trx_id_offset) {
+ /* The PRIMARY KEY contains variable-length columns.
+ For the metadata record, variable-length columns are
+ always written with zero length. The DB_TRX_ID will
+ start right after any fixed-length columns. */
+ for (uint i = index->n_uniq; i--; ) {
+ trx_id_offset += index->fields[i].fixed_len;
+ }
+ }
+
+ const byte* ptr = rec + trx_id_offset
+ + (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN);
+
+ if (mach_read_from_4(ptr + BTR_EXTERN_LEN)) {
+ goto incompatible;
+ }
+
+ uint len = mach_read_from_4(ptr + BTR_EXTERN_LEN + 4);
+ if (!len
+ || mach_read_from_4(ptr + BTR_EXTERN_OFFSET)
+ != FIL_PAGE_DATA
+ || mach_read_from_4(ptr + BTR_EXTERN_SPACE_ID)
+ != space->id) {
+ goto incompatible;
+ }
+
+ buf_block_t* block = buf_page_get(
+ page_id_t(space->id,
+ mach_read_from_4(ptr + BTR_EXTERN_PAGE_NO)),
+ 0, RW_S_LATCH, mtr);
+ buf_block_dbg_add_level(block, SYNC_EXTERN_STORAGE);
+ if (fil_page_get_type(block->frame) != FIL_PAGE_TYPE_BLOB
+ || mach_read_from_4(&block->frame[FIL_PAGE_DATA
+ + BTR_BLOB_HDR_NEXT_PAGE_NO])
+ != FIL_NULL
+ || mach_read_from_4(&block->frame[FIL_PAGE_DATA
+ + BTR_BLOB_HDR_PART_LEN])
+ != len) {
+ goto incompatible;
+ }
+
+ /* The unused part of the BLOB page should be zero-filled. */
+ for (const byte* b = block->frame
+ + (FIL_PAGE_DATA + BTR_BLOB_HDR_SIZE) + len,
+ * const end = block->frame + srv_page_size
+ - BTR_EXTERN_LEN;
+ b < end; ) {
+ if (*b++) {
+ goto incompatible;
+ }
+ }
+
+ if (index->table->deserialise_columns(
+ &block->frame[FIL_PAGE_DATA + BTR_BLOB_HDR_SIZE],
+ len)) {
+ goto incompatible;
+ }
+
+ /* Proceed to initialize the default values of
+ any instantly added columns. */
+ }
+
mem_heap_t* heap = NULL;
ulint* offsets = rec_get_offsets(rec, index, NULL, true,
ULINT_UNDEFINED, &heap);
@@ -489,7 +560,8 @@ inconsistent:
record, it is also OK to perform READ UNCOMMITTED and
then ignore any extra fields, provided that
trx_sys.is_registered(DB_TRX_ID). */
- if (rec_offs_n_fields(offsets) > index->n_fields
+ if (rec_offs_n_fields(offsets)
+ > ulint(index->n_fields) + !!index->table->instant
&& !trx_sys.is_registered(current_trx(),
row_get_rec_trx_id(rec, index,
offsets))) {
@@ -497,10 +569,11 @@ inconsistent:
}
for (unsigned i = index->n_core_fields; i < index->n_fields; i++) {
- ulint len;
- const byte* data = rec_get_nth_field(rec, offsets, i, &len);
dict_col_t* col = index->fields[i].col;
- ut_ad(!col->is_instant());
+ const unsigned o = i + !!index->table->instant;
+ ulint len;
+ const byte* data = rec_get_nth_field(rec, offsets, o, &len);
+ ut_ad(!col->is_added());
ut_ad(!col->def_val.data);
col->def_val.len = len;
switch (len) {
@@ -511,7 +584,7 @@ inconsistent:
continue;
}
ut_ad(len != UNIV_SQL_DEFAULT);
- if (!rec_offs_nth_extern(offsets, i)) {
+ if (!rec_offs_nth_extern(offsets, o)) {
col->def_val.data = mem_heap_dup(
index->table->heap, data, len);
} else if (len < BTR_EXTERN_FIELD_REF_SIZE
@@ -523,7 +596,7 @@ inconsistent:
} else {
col->def_val.data = btr_copy_externally_stored_field(
&col->def_val.len, data,
- dict_table_page_size(index->table),
+ cur.page_cur.block->zip_size(),
len, index->table->heap);
}
}
@@ -592,30 +665,49 @@ bool btr_cur_instant_root_init(dict_index_t* index, const page_t* page)
const uint16_t n = page_get_instant(page);
- if (n < index->n_uniq + DATA_ROLL_PTR || n > index->n_fields) {
+ if (n < index->n_uniq + DATA_ROLL_PTR) {
/* The PRIMARY KEY (or hidden DB_ROW_ID) and
DB_TRX_ID,DB_ROLL_PTR columns must always be present
- as 'core' fields. All fields, including those for
- instantly added columns, must be present in the data
- dictionary. */
+ as 'core' fields. */
return true;
}
- if (memcmp(page_get_infimum_rec(page), "infimum", 8)
- || memcmp(page_get_supremum_rec(page), "supremum", 8)) {
- /* In a later format, these fields in a FIL_PAGE_TYPE_INSTANT
- root page could be repurposed for something else. */
+ if (n > REC_MAX_N_FIELDS) {
return true;
}
index->n_core_fields = n;
- ut_ad(!index->is_dummy);
- ut_d(index->is_dummy = true);
- index->n_core_null_bytes = n == index->n_fields
- ? UT_BITS_IN_BYTES(unsigned(index->n_nullable))
- : UT_BITS_IN_BYTES(index->get_n_nullable(n));
- ut_d(index->is_dummy = false);
- return false;
+
+ const rec_t* infimum = page_get_infimum_rec(page);
+ const rec_t* supremum = page_get_supremum_rec(page);
+
+ if (!memcmp(infimum, "infimum", 8)
+ && !memcmp(supremum, "supremum", 8)) {
+ if (n > index->n_fields) {
+ /* All fields, including those for instantly
+ added columns, must be present in the
+ data dictionary. */
+ return true;
+ }
+
+ ut_ad(!index->is_dummy);
+ ut_d(index->is_dummy = true);
+ index->n_core_null_bytes = UT_BITS_IN_BYTES(
+ index->get_n_nullable(n));
+ ut_d(index->is_dummy = false);
+ return false;
+ }
+
+ if (memcmp(infimum, field_ref_zero, 8)
+ || memcmp(supremum, field_ref_zero, 7)) {
+ /* The infimum and supremum records must either contain
+ the original strings, or they must be filled with zero
+ bytes, except for the bytes that we have repurposed. */
+ return true;
+ }
+
+ index->n_core_null_bytes = supremum[7];
+ return index->n_core_null_bytes > 128;
}
/** Optimistically latches the leaf page or pages requested.
@@ -673,8 +765,7 @@ btr_cur_optimistic_latch_leaves(
cursor->left_block = btr_block_get(
page_id_t(cursor->index->table->space_id,
left_page_no),
- page_size_t(cursor->index->table->space
- ->flags),
+ cursor->index->table->space->zip_size(),
mode, cursor->index, mtr);
} else {
cursor->left_block = NULL;
@@ -775,7 +866,7 @@ btr_cur_latch_for_root_leaf(
@param[in] lock_intention lock intention for the tree operation
@param[in] rec record (current node_ptr)
@param[in] rec_size size of the record or max size of node_ptr
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] mtr mtr
@return true if tree modification is needed */
static
@@ -786,7 +877,7 @@ btr_cur_will_modify_tree(
btr_intention_t lock_intention,
const rec_t* rec,
ulint rec_size,
- const page_size_t& page_size,
+ ulint zip_size,
mtr_t* mtr)
{
ut_ad(!page_is_leaf(page));
@@ -874,9 +965,8 @@ btr_cur_will_modify_tree(
This is based on the worst case, and we could invoke
page_zip_available() on the block->page.zip. */
/* needs 2 records' space also for worst compress rate. */
- if (page_size.is_compressed()
- && page_zip_empty_size(index->n_fields,
- page_size.physical())
+ if (zip_size
+ && page_zip_empty_size(index->n_fields, zip_size)
<= rec_size * 2 + page_get_data_size(page)
+ page_dir_calc_reserved_space(n_recs + 2)) {
return(true);
@@ -1315,7 +1405,7 @@ btr_cur_search_to_nth_level_func(
Free blocks and read IO bandwidth should be prior
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
- && trx_sys.history_size() > BTR_CUR_FINE_HISTORY_LENGTH
+ && trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_get_n_pending_read_ios()) {
mtr_x_lock(dict_index_get_lock(index), mtr);
} else if (dict_index_is_spatial(index)
@@ -1372,7 +1462,7 @@ btr_cur_search_to_nth_level_func(
page_cursor = btr_cur_get_page_cur(cursor);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
/* Start with the root page. */
page_id_t page_id(index->table->space_id, index->page);
@@ -1455,7 +1545,7 @@ search_loop:
retry_page_get:
ut_ad(n_blocks < BTR_MAX_LEVELS);
tree_savepoints[n_blocks] = mtr_set_savepoint(mtr);
- block = buf_page_get_gen(page_id, page_size, rw_latch, guess,
+ block = buf_page_get_gen(page_id, zip_size, rw_latch, guess,
buf_mode, file, line, mtr, &err);
tree_blocks[n_blocks] = block;
@@ -1491,7 +1581,7 @@ retry_page_get:
ut_ad(!dict_index_is_spatial(index));
if (ibuf_insert(IBUF_OP_INSERT, tuple, index,
- page_id, page_size, cursor->thr)) {
+ page_id, zip_size, cursor->thr)) {
cursor->flag = BTR_CUR_INSERT_TO_IBUF;
@@ -1504,7 +1594,7 @@ retry_page_get:
ut_ad(!dict_index_is_spatial(index));
if (ibuf_insert(IBUF_OP_DELETE_MARK, tuple,
- index, page_id, page_size,
+ index, page_id, zip_size,
cursor->thr)) {
cursor->flag = BTR_CUR_DEL_MARK_IBUF;
@@ -1524,7 +1614,7 @@ retry_page_get:
/* The record cannot be purged yet. */
cursor->flag = BTR_CUR_DELETE_REF;
} else if (ibuf_insert(IBUF_OP_DELETE, tuple,
- index, page_id, page_size,
+ index, page_id, zip_size,
cursor->thr)) {
/* The purge was buffered. */
@@ -1571,7 +1661,7 @@ retry_page_get:
= mtr_set_savepoint(mtr);
get_block = buf_page_get_gen(
page_id_t(page_id.space(), left_page_no),
- page_size, rw_latch, NULL, buf_mode,
+ zip_size, rw_latch, NULL, buf_mode,
file, line, mtr, &err);
prev_tree_blocks[prev_n_blocks] = get_block;
prev_n_blocks++;
@@ -1601,7 +1691,7 @@ retry_page_get:
tree_blocks[n_blocks]);
tree_savepoints[n_blocks] = mtr_set_savepoint(mtr);
- block = buf_page_get_gen(page_id, page_size, rw_latch, NULL,
+ block = buf_page_get_gen(page_id, zip_size, rw_latch, NULL,
buf_mode, file, line, mtr, &err);
tree_blocks[n_blocks] = block;
@@ -1699,7 +1789,7 @@ retry_page_get:
if (rw_latch == RW_NO_LATCH) {
latch_leaves = btr_cur_latch_leaves(
- block, page_id, page_size, latch_mode,
+ block, page_id, zip_size, latch_mode,
cursor, mtr);
}
@@ -2063,7 +2153,7 @@ need_opposite_intention:
&& latch_mode == BTR_MODIFY_TREE
&& !btr_cur_will_modify_tree(
index, page, lock_intention, node_ptr,
- node_ptr_max_size, page_size, mtr)
+ node_ptr_max_size, zip_size, mtr)
&& !rtree_parent_modified) {
ut_ad(upper_rw_latch == RW_X_LATCH);
ut_ad(n_releases <= n_blocks);
@@ -2261,12 +2351,12 @@ need_opposite_intention:
if (latch_mode == BTR_CONT_MODIFY_TREE) {
child_block = btr_block_get(
- page_id, page_size, RW_X_LATCH,
+ page_id, zip_size, RW_X_LATCH,
index, mtr);
} else {
ut_ad(latch_mode == BTR_CONT_SEARCH_TREE);
child_block = btr_block_get(
- page_id, page_size, RW_SX_LATCH,
+ page_id, zip_size, RW_SX_LATCH,
index, mtr);
}
@@ -2321,9 +2411,10 @@ need_opposite_intention:
ut_ad(index->is_instant());
/* This may be a search tuple for
btr_pcur_restore_position(). */
- ut_ad(tuple->info_bits == REC_INFO_METADATA
- || tuple->info_bits == REC_INFO_MIN_REC_FLAG);
- } else if (rec_is_metadata(btr_cur_get_rec(cursor), index)) {
+ ut_ad(tuple->is_metadata()
+ || (tuple->is_metadata(tuple->info_bits
+ ^ REC_STATUS_INSTANT)));
+ } else if (rec_is_metadata(btr_cur_get_rec(cursor), *index)) {
/* Only user records belong in the adaptive
hash index. */
} else {
@@ -2450,7 +2541,7 @@ btr_cur_open_at_index_side_func(
Free blocks and read IO bandwidth should be prior
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
- && trx_sys.history_size() > BTR_CUR_FINE_HISTORY_LENGTH
+ && trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_get_n_pending_read_ios()) {
mtr_x_lock(dict_index_get_lock(index), mtr);
} else {
@@ -2483,7 +2574,7 @@ btr_cur_open_at_index_side_func(
cursor->index = index;
page_id_t page_id(index->table->space_id, index->page);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
if (root_leaf_rw_latch == RW_X_LATCH) {
node_ptr_max_size = btr_node_ptr_max_size(index);
@@ -2506,7 +2597,7 @@ btr_cur_open_at_index_side_func(
}
tree_savepoints[n_blocks] = mtr_set_savepoint(mtr);
- block = buf_page_get_gen(page_id, page_size, rw_latch, NULL,
+ block = buf_page_get_gen(page_id, zip_size, rw_latch, NULL,
BUF_GET, file, line, mtr, &err);
ut_ad((block != NULL) == (err == DB_SUCCESS));
tree_blocks[n_blocks] = block;
@@ -2562,12 +2653,12 @@ btr_cur_open_at_index_side_func(
if (height == level) {
if (srv_read_only_mode) {
btr_cur_latch_leaves(
- block, page_id, page_size,
+ block, page_id, zip_size,
latch_mode, cursor, mtr);
} else if (height == 0) {
if (rw_latch == RW_NO_LATCH) {
btr_cur_latch_leaves(
- block, page_id, page_size,
+ block, page_id, zip_size,
latch_mode, cursor, mtr);
}
/* In versions <= 3.23.52 we had
@@ -2698,7 +2789,7 @@ btr_cur_open_at_index_side_func(
if (latch_mode == BTR_MODIFY_TREE
&& !btr_cur_will_modify_tree(
cursor->index, page, lock_intention, node_ptr,
- node_ptr_max_size, page_size, mtr)) {
+ node_ptr_max_size, zip_size, mtr)) {
ut_ad(upper_rw_latch == RW_X_LATCH);
ut_ad(n_releases <= n_blocks);
@@ -2795,7 +2886,7 @@ btr_cur_open_at_rnd_pos_func(
Free blocks and read IO bandwidth should be prior
for them, when the history list is glowing huge. */
if (lock_intention == BTR_INTENTION_DELETE
- && trx_sys.history_size() > BTR_CUR_FINE_HISTORY_LENGTH
+ && trx_sys.rseg_history_len > BTR_CUR_FINE_HISTORY_LENGTH
&& buf_get_n_pending_read_ios()) {
mtr_x_lock(dict_index_get_lock(index), mtr);
} else {
@@ -2840,7 +2931,7 @@ btr_cur_open_at_rnd_pos_func(
cursor->index = index;
page_id_t page_id(index->table->space_id, index->page);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
dberr_t err = DB_SUCCESS;
if (root_leaf_rw_latch == RW_X_LATCH) {
@@ -2864,7 +2955,7 @@ btr_cur_open_at_rnd_pos_func(
}
tree_savepoints[n_blocks] = mtr_set_savepoint(mtr);
- block = buf_page_get_gen(page_id, page_size, rw_latch, NULL,
+ block = buf_page_get_gen(page_id, zip_size, rw_latch, NULL,
BUF_GET, file, line, mtr, &err);
tree_blocks[n_blocks] = block;
@@ -2917,7 +3008,7 @@ btr_cur_open_at_rnd_pos_func(
if (rw_latch == RW_NO_LATCH
|| srv_read_only_mode) {
btr_cur_latch_leaves(
- block, page_id, page_size,
+ block, page_id, zip_size,
latch_mode, cursor, mtr);
}
@@ -2993,7 +3084,7 @@ btr_cur_open_at_rnd_pos_func(
if (latch_mode == BTR_MODIFY_TREE
&& !btr_cur_will_modify_tree(
cursor->index, page, lock_intention, node_ptr,
- node_ptr_max_size, page_size, mtr)) {
+ node_ptr_max_size, zip_size, mtr)) {
ut_ad(upper_rw_latch == RW_X_LATCH);
ut_ad(n_releases <= n_blocks);
@@ -3170,8 +3261,11 @@ btr_cur_ins_lock_and_undo(
roll_ptr = roll_ptr_t(1) << ROLL_PTR_INSERT_FLAG_POS;
if (!(flags & BTR_KEEP_SYS_FLAG)) {
upd_sys:
- row_upd_index_entry_sys_field(entry, index,
- DATA_ROLL_PTR, roll_ptr);
+ dfield_t* r = dtuple_get_nth_field(
+ entry, index->db_roll_ptr());
+ ut_ad(r->len == DATA_ROLL_PTR_LEN);
+ trx_write_roll_ptr(static_cast<byte*>(r->data),
+ roll_ptr);
}
} else {
err = trx_undo_report_row_operation(thr, index, entry,
@@ -3203,12 +3297,12 @@ btr_cur_prefetch_siblings(
if (left_page_no != FIL_NULL) {
buf_read_page_background(
page_id_t(block->page.id.space(), left_page_no),
- block->page.size, false);
+ block->zip_size(), false);
}
if (right_page_no != FIL_NULL) {
buf_read_page_background(
page_id_t(block->page.id.space(), right_page_no),
- block->page.size, false);
+ block->zip_size(), false);
}
if (left_page_no != FIL_NULL
|| right_page_no != FIL_NULL) {
@@ -3275,23 +3369,28 @@ btr_cur_optimistic_insert(
|| (flags & BTR_CREATE_FLAG));
ut_ad(dtuple_check_typed(entry));
- const page_size_t& page_size = block->page.size;
-
#ifdef UNIV_DEBUG_VALGRIND
- if (page_size.is_compressed()) {
- UNIV_MEM_ASSERT_RW(page, page_size.logical());
- UNIV_MEM_ASSERT_RW(block->page.zip.data, page_size.physical());
+ if (block->page.zip.data) {
+ UNIV_MEM_ASSERT_RW(page, srv_page_size);
+ UNIV_MEM_ASSERT_RW(block->page.zip.data,
+ block->zip_size());
}
#endif /* UNIV_DEBUG_VALGRIND */
leaf = page_is_leaf(page);
+ if (UNIV_UNLIKELY(entry->is_alter_metadata())) {
+ ut_ad(leaf);
+ goto convert_big_rec;
+ }
+
/* Calculate the record size when entry is converted to a record */
rec_size = rec_get_converted_size(index, entry, n_ext);
if (page_zip_rec_needs_ext(rec_size, page_is_comp(page),
- dtuple_get_n_fields(entry), page_size)) {
-
+ dtuple_get_n_fields(entry),
+ block->zip_size())) {
+convert_big_rec:
/* The record is so big that we have to store some fields
externally on separate database pages */
big_rec_vec = dtuple_convert_big_rec(index, 0, entry, &n_ext);
@@ -3304,7 +3403,7 @@ btr_cur_optimistic_insert(
rec_size = rec_get_converted_size(index, entry, n_ext);
}
- if (page_size.is_compressed() && page_zip_is_too_big(index, entry)) {
+ if (block->page.zip.data && page_zip_is_too_big(index, entry)) {
if (big_rec_vec != NULL) {
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
}
@@ -3315,7 +3414,7 @@ btr_cur_optimistic_insert(
LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
goto fail);
- if (leaf && page_size.is_compressed()
+ if (block->page.zip.data && leaf
&& (page_get_data_size(page) + rec_size
>= dict_index_zip_pad_optimal_page_size(index))) {
/* If compression padding tells us that insertion will
@@ -3358,7 +3457,7 @@ fail_err:
we have to split the page to reserve enough free space for
future updates of records. */
- if (leaf && !page_size.is_compressed() && dict_index_is_clust(index)
+ if (leaf && !block->page.zip.data && dict_index_is_clust(index)
&& page_get_n_recs(page) >= 2
&& dict_index_get_space_reserve() + rec_size > max_size
&& (btr_page_get_split_rec_to_right(cursor, &dummy)
@@ -3421,7 +3520,7 @@ fail_err:
}
if (*rec) {
- } else if (page_size.is_compressed()) {
+ } else if (block->page.zip.data) {
ut_ad(!index->table->is_temporary());
/* Reset the IBUF_BITMAP_FREE bits, because
page_cur_tuple_insert() will have attempted page
@@ -3462,7 +3561,7 @@ fail_err:
} else if (index->disable_ahi) {
# endif
} else if (entry->info_bits & REC_INFO_MIN_REC_FLAG) {
- ut_ad(entry->info_bits == REC_INFO_METADATA);
+ ut_ad(entry->is_metadata());
ut_ad(index->is_instant());
ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else {
@@ -3497,7 +3596,7 @@ fail_err:
committed mini-transaction, because in crash recovery,
the free bits could momentarily be set too high. */
- if (page_size.is_compressed()) {
+ if (block->page.zip.data) {
/* Update the bits in the same mini-transaction. */
ibuf_update_free_bits_zip(block, mtr);
} else {
@@ -3595,9 +3694,14 @@ btr_cur_pessimistic_insert(
}
if (page_zip_rec_needs_ext(rec_get_converted_size(index, entry, n_ext),
- dict_table_is_comp(index->table),
+ index->table->not_redundant(),
dtuple_get_n_fields(entry),
- dict_table_page_size(index->table))) {
+ btr_cur_get_block(cursor)->zip_size())
+ || UNIV_UNLIKELY(entry->is_alter_metadata()
+ && !dfield_is_ext(
+ dtuple_get_nth_field(
+ entry,
+ index->first_user_field())))) {
/* The record is so big that we have to store some fields
externally on separate database pages */
@@ -3670,10 +3774,10 @@ btr_cur_pessimistic_insert(
if (index->disable_ahi); else
# endif
if (entry->info_bits & REC_INFO_MIN_REC_FLAG) {
- ut_ad(entry->info_bits == REC_INFO_METADATA);
+ ut_ad(entry->is_metadata());
ut_ad(index->is_instant());
- ut_ad((flags & ulint(~BTR_KEEP_IBUF_BITMAP))
- == BTR_NO_LOCKING_FLAG);
+ ut_ad(flags & BTR_NO_LOCKING_FLAG);
+ ut_ad(!(flags & BTR_CREATE_FLAG));
} else {
btr_search_update_hash_on_insert(
cursor, btr_get_search_latch(index));
@@ -3755,6 +3859,50 @@ btr_cur_upd_lock_and_undo(
cmpl_info, rec, offsets, roll_ptr));
}
+/** Copy DB_TRX_ID,DB_ROLL_PTR to the redo log.
+@param[in] index clustered index
+@param[in] trx_id_t DB_TRX_ID
+@param[in] roll_ptr DB_ROLL_PTR
+@param[in,out] log_ptr redo log buffer
+@return current end of the redo log buffer */
+static byte*
+btr_cur_log_sys(
+ const dict_index_t* index,
+ trx_id_t trx_id,
+ roll_ptr_t roll_ptr,
+ byte* log_ptr)
+{
+ log_ptr += mach_write_compressed(log_ptr, index->db_trx_id());
+ /* Yes, we are writing DB_ROLL_PTR,DB_TRX_ID in reverse order,
+ after emitting the position of DB_TRX_ID in the index.
+ This is how row_upd_write_sys_vals_to_log()
+ originally worked, and it is part of the redo log format. */
+ trx_write_roll_ptr(log_ptr, roll_ptr);
+ log_ptr += DATA_ROLL_PTR_LEN;
+ log_ptr += mach_u64_write_compressed(log_ptr, trx_id);
+
+ return log_ptr;
+}
+
+/** Write DB_TRX_ID,DB_ROLL_PTR to a clustered index entry.
+@param[in,out] entry clustered index entry
+@param[in] index clustered index
+@param[in] trx_id DB_TRX_ID
+@param[in] roll_ptr DB_ROLL_PTR */
+static void btr_cur_write_sys(
+ dtuple_t* entry,
+ const dict_index_t* index,
+ trx_id_t trx_id,
+ roll_ptr_t roll_ptr)
+{
+ dfield_t* t = dtuple_get_nth_field(entry, index->db_trx_id());
+ ut_ad(t->len == DATA_TRX_ID_LEN);
+ trx_write_trx_id(static_cast<byte*>(t->data), trx_id);
+ dfield_t* r = dtuple_get_nth_field(entry, index->db_roll_ptr());
+ ut_ad(r->len == DATA_ROLL_PTR_LEN);
+ trx_write_roll_ptr(static_cast<byte*>(r->data), roll_ptr);
+}
+
/***********************************************************//**
Writes a redo log record of updating a record in-place. */
void
@@ -3794,8 +3942,7 @@ btr_cur_update_in_place_log(
log_ptr++;
if (dict_index_is_clust(index)) {
- log_ptr = row_upd_write_sys_vals_to_log(
- index, trx_id, roll_ptr, log_ptr, mtr);
+ log_ptr = btr_cur_log_sys(index, trx_id, roll_ptr, log_ptr);
} else {
/* Dummy system fields for a secondary index */
/* TRX_ID Position */
@@ -4148,6 +4295,72 @@ func_exit:
return(err);
}
+/** Trim a metadata record during the rollback of instant ALTER TABLE.
+@param[in] entry metadata tuple
+@param[in] index primary key
+@param[in] update update vector for the rollback */
+ATTRIBUTE_COLD
+static void btr_cur_trim_alter_metadata(dtuple_t* entry,
+ const dict_index_t* index,
+ const upd_t* update)
+{
+ ut_ad(index->is_instant());
+ ut_ad(update->is_alter_metadata());
+ ut_ad(entry->is_alter_metadata());
+
+ ut_ad(update->fields[0].field_no == index->first_user_field());
+ ut_ad(update->fields[0].new_val.ext);
+ ut_ad(update->fields[0].new_val.len == FIELD_REF_SIZE);
+ ut_ad(entry->n_fields - 1 == index->n_fields);
+
+ const byte* ptr = static_cast<const byte*>(
+ update->fields[0].new_val.data);
+ ut_ad(!mach_read_from_4(ptr + BTR_EXTERN_LEN));
+ ut_ad(mach_read_from_4(ptr + BTR_EXTERN_LEN + 4) > 4);
+ ut_ad(mach_read_from_4(ptr + BTR_EXTERN_OFFSET) == FIL_PAGE_DATA);
+ ut_ad(mach_read_from_4(ptr + BTR_EXTERN_SPACE_ID)
+ == index->table->space->id);
+
+ ulint n_fields = update->fields[1].field_no;
+ ut_ad(n_fields <= index->n_fields);
+ if (n_fields != index->n_uniq) {
+ ut_ad(n_fields
+ >= index->n_core_fields);
+ entry->n_fields = n_fields;
+ return;
+ }
+
+ /* This is based on dict_table_t::deserialise_columns()
+ and btr_cur_instant_init_low(). */
+ mtr_t mtr;
+ mtr.start();
+ buf_block_t* block = buf_page_get(
+ page_id_t(index->table->space->id,
+ mach_read_from_4(ptr + BTR_EXTERN_PAGE_NO)),
+ 0, RW_S_LATCH, &mtr);
+ buf_block_dbg_add_level(block, SYNC_EXTERN_STORAGE);
+ ut_ad(fil_page_get_type(block->frame) == FIL_PAGE_TYPE_BLOB);
+ ut_ad(mach_read_from_4(&block->frame[FIL_PAGE_DATA
+ + BTR_BLOB_HDR_NEXT_PAGE_NO])
+ == FIL_NULL);
+ ut_ad(mach_read_from_4(&block->frame[FIL_PAGE_DATA
+ + BTR_BLOB_HDR_PART_LEN])
+ == mach_read_from_4(ptr + BTR_EXTERN_LEN + 4));
+ n_fields = mach_read_from_4(
+ &block->frame[FIL_PAGE_DATA + BTR_BLOB_HDR_SIZE])
+ + index->first_user_field();
+ /* Rollback should not increase the number of fields. */
+ ut_ad(n_fields <= index->n_fields);
+ ut_ad(n_fields + 1 <= entry->n_fields);
+ /* dict_index_t::clear_instant_alter() cannot be invoked while
+ rollback of an instant ALTER TABLE transaction is in progress
+ for an is_alter_metadata() record. */
+ ut_ad(n_fields >= index->n_core_fields);
+
+ mtr.commit();
+ entry->n_fields = n_fields + 1;
+}
+
/** Trim an update tuple due to instant ADD COLUMN, if needed.
For normal records, the trailing instantly added fields that match
the initial default values are omitted.
@@ -4169,13 +4382,12 @@ btr_cur_trim(
const que_thr_t* thr)
{
if (!index->is_instant()) {
- } else if (UNIV_UNLIKELY(update->info_bits == REC_INFO_METADATA)) {
+ } else if (UNIV_UNLIKELY(update->is_metadata())) {
/* We are either updating a metadata record
- (instantly adding columns to a table where instant ADD was
+ (instant ALTER TABLE on a table where instant ALTER was
already executed) or rolling back such an operation. */
ut_ad(!upd_get_nth_field(update, 0)->orig_len);
- ut_ad(upd_get_nth_field(update, 0)->field_no
- > index->n_core_fields);
+ ut_ad(entry->is_metadata());
if (thr->graph->trx->in_rollback) {
/* This rollback can occur either as part of
@@ -4192,6 +4404,13 @@ btr_cur_trim(
first instantly added column logged by
innobase_add_instant_try(). */
ut_ad(update->n_fields > 2);
+ if (update->is_alter_metadata()) {
+ btr_cur_trim_alter_metadata(
+ entry, index, update);
+ return;
+ }
+ ut_ad(!entry->is_alter_metadata());
+
ulint n_fields = upd_get_nth_field(update, 0)
->field_no;
ut_ad(n_fields + 1 >= entry->n_fields);
@@ -4277,9 +4496,7 @@ btr_cur_optimistic_update(
|| trx_is_recv(thr_get_trx(thr)));
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
- const bool is_metadata = update->info_bits == REC_INFO_METADATA;
-
- if (UNIV_LIKELY(!is_metadata)
+ if (UNIV_LIKELY(!update->is_metadata())
&& !row_upd_changes_field_size_or_external(index, *offsets,
update)) {
@@ -4305,6 +4522,10 @@ any_extern:
return(DB_OVERFLOW);
}
+ if (rec_is_metadata(rec, *index) && index->table->instant) {
+ goto any_extern;
+ }
+
for (i = 0; i < upd_get_n_fields(update); i++) {
if (dfield_is_ext(&upd_get_nth_field(update, i)->new_val)) {
@@ -4349,7 +4570,7 @@ any_extern:
if (page_zip_rec_needs_ext(new_rec_size, page_is_comp(page),
dict_index_get_n_fields(index),
- dict_table_page_size(index->table))) {
+ block->zip_size())) {
goto any_extern;
}
@@ -4363,10 +4584,10 @@ any_extern:
}
/* We limit max record size to 16k even for 64k page size. */
- if (new_rec_size >= COMPRESSED_REC_MAX_DATA_SIZE ||
- (!dict_table_is_comp(index->table)
- && new_rec_size >= REDUNDANT_REC_MAX_DATA_SIZE)) {
- err = DB_OVERFLOW;
+ if (new_rec_size >= COMPRESSED_REC_MAX_DATA_SIZE ||
+ (!dict_table_is_comp(index->table)
+ && new_rec_size >= REDUNDANT_REC_MAX_DATA_SIZE)) {
+ err = DB_OVERFLOW;
goto func_exit;
}
@@ -4439,8 +4660,8 @@ any_extern:
lock_rec_store_on_page_infimum(block, rec);
}
- if (UNIV_UNLIKELY(is_metadata)) {
- ut_ad(new_entry->info_bits == REC_INFO_METADATA);
+ if (UNIV_UNLIKELY(update->is_metadata())) {
+ ut_ad(new_entry->is_metadata());
ut_ad(index->is_instant());
/* This can be innobase_add_instant_try() performing a
subsequent instant ADD COLUMN, or its rollback by
@@ -4455,10 +4676,7 @@ any_extern:
page_cur_move_to_prev(page_cursor);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
- row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
- roll_ptr);
- row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,
- trx_id);
+ btr_cur_write_sys(new_entry, index, trx_id, roll_ptr);
}
/* There are no externally stored columns in new_entry */
@@ -4466,7 +4684,7 @@ any_extern:
cursor, new_entry, offsets, heap, 0/*n_ext*/, mtr);
ut_a(rec); /* <- We calculated above the insert would fit */
- if (UNIV_UNLIKELY(is_metadata)) {
+ if (UNIV_UNLIKELY(update->is_metadata())) {
/* We must empty the PAGE_FREE list, because if this
was a rollback, the shortened metadata record
would have too many fields, and we would be unable to
@@ -4531,7 +4749,8 @@ btr_cur_pess_upd_restore_supremum(
const page_id_t page_id(block->page.id.space(), prev_page_no);
ut_ad(prev_page_no != FIL_NULL);
- prev_block = buf_page_get_with_no_latch(page_id, block->page.size, mtr);
+ prev_block = buf_page_get_with_no_latch(page_id, block->zip_size(),
+ mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(btr_page_get_next(prev_block->frame, mtr)
== page_get_page_no(page));
@@ -4657,8 +4876,25 @@ btr_cur_pessimistic_update(
rec = btr_cur_get_rec(cursor);
ut_ad(rec_offs_validate(rec, index, *offsets));
- dtuple_t* new_entry = row_rec_to_index_entry(
- rec, index, *offsets, &n_ext, entry_heap);
+ dtuple_t* new_entry;
+
+ const bool is_metadata = rec_is_metadata(rec, *index);
+
+ if (UNIV_UNLIKELY(is_metadata)) {
+ ut_ad(update->is_metadata());
+ ut_ad(flags & BTR_NO_LOCKING_FLAG);
+ ut_ad(index->is_instant());
+ new_entry = row_metadata_to_tuple(
+ rec, index, *offsets,
+ &n_ext, entry_heap,
+ update->info_bits, !thr_get_trx(thr)->in_rollback);
+ ut_ad(new_entry->n_fields
+ == ulint(index->n_fields)
+ + update->is_alter_metadata());
+ } else {
+ new_entry = row_rec_to_index_entry(rec, index, *offsets,
+ &n_ext, entry_heap);
+ }
/* The page containing the clustered index record
corresponding to new_entry is latched in mtr. If the
@@ -4670,9 +4906,6 @@ btr_cur_pessimistic_update(
entry_heap);
btr_cur_trim(new_entry, index, update, thr);
- const bool is_metadata = new_entry->info_bits
- & REC_INFO_MIN_REC_FLAG;
-
/* We have to set appropriate extern storage bits in the new
record to be inserted: we have to remember which fields were such */
@@ -4700,11 +4933,14 @@ btr_cur_pessimistic_update(
}
if (page_zip_rec_needs_ext(
- rec_get_converted_size(index, new_entry, n_ext),
- page_is_comp(page),
- dict_index_get_n_fields(index),
- block->page.size)) {
-
+ rec_get_converted_size(index, new_entry, n_ext),
+ page_is_comp(page),
+ dict_index_get_n_fields(index),
+ block->zip_size())
+ || (UNIV_UNLIKELY(update->is_alter_metadata())
+ && !dfield_is_ext(dtuple_get_nth_field(
+ new_entry,
+ index->first_user_field())))) {
big_rec_vec = dtuple_convert_big_rec(index, update, new_entry, &n_ext);
if (UNIV_UNLIKELY(big_rec_vec == NULL)) {
@@ -4753,10 +4989,7 @@ btr_cur_pessimistic_update(
}
if (!(flags & BTR_KEEP_SYS_FLAG)) {
- row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
- roll_ptr);
- row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,
- trx_id);
+ btr_cur_write_sys(new_entry, index, trx_id, roll_ptr);
}
if (!page_zip) {
@@ -4765,10 +4998,10 @@ btr_cur_pessimistic_update(
}
if (UNIV_UNLIKELY(is_metadata)) {
- ut_ad(new_entry->info_bits == REC_INFO_METADATA);
+ ut_ad(new_entry->is_metadata());
ut_ad(index->is_instant());
/* This can be innobase_add_instant_try() performing a
- subsequent instant ADD COLUMN, or its rollback by
+ subsequent instant ALTER TABLE, or its rollback by
row_undo_mod_clust_low(). */
ut_ad(flags & BTR_NO_LOCKING_FLAG);
} else {
@@ -4817,7 +5050,8 @@ btr_cur_pessimistic_update(
btr_cur_get_block(cursor), rec, block);
}
- if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
+ if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))
+ || rec_is_alter_metadata(rec, *index)) {
/* The new inserted record owns its possible externally
stored fields */
btr_cur_unmark_extern_fields(
@@ -5027,8 +5261,7 @@ btr_cur_del_mark_set_clust_rec_log(
*log_ptr++ = 0;
*log_ptr++ = 1;
- log_ptr = row_upd_write_sys_vals_to_log(
- index, trx_id, roll_ptr, log_ptr, mtr);
+ log_ptr = btr_cur_log_sys(index, trx_id, roll_ptr, log_ptr);
mach_write_to_2(log_ptr, page_offset(rec));
log_ptr += 2;
@@ -5460,42 +5693,41 @@ btr_cur_optimistic_delete_func(
if (UNIV_UNLIKELY(block->page.id.page_no() == cursor->index->page
&& page_get_n_recs(block->frame) == 1
+ (cursor->index->is_instant()
- && !rec_is_metadata(rec, cursor->index)))) {
+ && !rec_is_metadata(rec, *cursor->index)))) {
/* The whole index (and table) becomes logically empty.
Empty the whole page. That is, if we are deleting the
only user record, also delete the metadata record
- if one exists (it exists if and only if is_instant()).
+ if one exists for instant ADD COLUMN (not generic ALTER TABLE).
If we are deleting the metadata record and the
table becomes empty, clean up the whole page. */
dict_index_t* index = cursor->index;
+ const rec_t* first_rec = page_rec_get_next_const(
+ page_get_infimum_rec(block->frame));
ut_ad(!index->is_instant()
- || rec_is_metadata(
- page_rec_get_next_const(
- page_get_infimum_rec(block->frame)),
- index));
- if (UNIV_UNLIKELY(rec_get_info_bits(rec, page_rec_is_comp(rec))
- & REC_INFO_MIN_REC_FLAG)) {
- /* This should be rolling back instant ADD COLUMN.
- If this is a recovered transaction, then
- index->is_instant() will hold until the
- insert into SYS_COLUMNS is rolled back. */
- ut_ad(index->table->supports_instant());
- ut_ad(index->is_primary());
- } else {
- lock_update_delete(block, rec);
- }
- btr_page_empty(block, buf_block_get_page_zip(block),
- index, 0, mtr);
- page_cur_set_after_last(block, btr_cur_get_page_cur(cursor));
-
- if (index->is_primary()) {
- /* Concurrent access is prevented by
- root_block->lock X-latch, so this should be
- safe. */
- index->remove_instant();
+ || rec_is_metadata(first_rec, *index));
+ const bool is_metadata = rec_is_metadata(rec, *index);
+ /* We can remove the metadata when rolling back an
+ instant ALTER TABLE operation, or when deleting the
+ last user record on the page such that only metadata for
+ instant ADD COLUMN (not generic ALTER TABLE) remains. */
+ const bool empty_table = is_metadata
+ || !index->is_instant()
+ || (first_rec != rec
+ && rec_is_add_metadata(first_rec, *index));
+ if (UNIV_LIKELY(empty_table)) {
+ if (UNIV_LIKELY(!is_metadata)) {
+ lock_update_delete(block, rec);
+ }
+ btr_page_empty(block, buf_block_get_page_zip(block),
+ index, 0, mtr);
+ if (index->is_instant()) {
+ /* MDEV-17383: free metadata BLOBs! */
+ index->clear_instant_alter();
+ }
+ page_cur_set_after_last(block,
+ btr_cur_get_page_cur(cursor));
+ return true;
}
-
- return true;
}
offsets = rec_get_offsets(rec, cursor->index, offsets, true,
@@ -5677,10 +5909,10 @@ btr_cur_pessimistic_delete(
}
if (page_is_leaf(page)) {
- const bool is_metadata = rec_get_info_bits(
- rec, page_rec_is_comp(rec)) & REC_INFO_MIN_REC_FLAG;
+ const bool is_metadata = rec_is_metadata(
+ rec, page_rec_is_comp(rec));
if (UNIV_UNLIKELY(is_metadata)) {
- /* This should be rolling back instant ADD COLUMN.
+ /* This should be rolling back instant ALTER TABLE.
If this is a recovered transaction, then
index->is_instant() will hold until the
insert into SYS_COLUMNS is rolled back. */
@@ -5696,30 +5928,34 @@ btr_cur_pessimistic_delete(
goto discard_page;
}
} else if (page_get_n_recs(page) == 1
- + (index->is_instant()
- && !rec_is_metadata(rec, index))) {
+ + (index->is_instant() && !is_metadata)) {
/* The whole index (and table) becomes logically empty.
Empty the whole page. That is, if we are deleting the
only user record, also delete the metadata record
- if one exists (it exists if and only if is_instant()).
- If we are deleting the metadata record and the
+ if one exists for instant ADD COLUMN
+ (not generic ALTER TABLE).
+ If we are deleting the metadata record
+ (in the rollback of instant ALTER TABLE) and the
table becomes empty, clean up the whole page. */
+
+ const rec_t* first_rec = page_rec_get_next_const(
+ page_get_infimum_rec(page));
ut_ad(!index->is_instant()
- || rec_is_metadata(
- page_rec_get_next_const(
- page_get_infimum_rec(page)),
- index));
- btr_page_empty(block, page_zip, index, 0, mtr);
- page_cur_set_after_last(block,
- btr_cur_get_page_cur(cursor));
- if (index->is_primary()) {
- /* Concurrent access is prevented by
- index->lock and root_block->lock
- X-latch, so this should be safe. */
- index->remove_instant();
+ || rec_is_metadata(first_rec, *index));
+ if (is_metadata || !index->is_instant()
+ || (first_rec != rec
+ && rec_is_add_metadata(first_rec, *index))) {
+ btr_page_empty(block, page_zip, index, 0, mtr);
+ if (index->is_instant()) {
+ /* MDEV-17383: free metadata BLOBs! */
+ index->clear_instant_alter();
+ }
+ page_cur_set_after_last(
+ block,
+ btr_cur_get_page_cur(cursor));
+ ret = TRUE;
+ goto return_after_reservations;
}
- ret = TRUE;
- goto return_after_reservations;
}
if (UNIV_LIKELY(!is_metadata)) {
@@ -5827,7 +6063,7 @@ discard_page:
|| btr_cur_will_modify_tree(
index, page, BTR_INTENTION_DELETE, rec,
btr_node_ptr_max_size(index),
- block->page.size, mtr);
+ block->zip_size(), mtr);
page_cur_delete_rec(btr_cur_get_page_cur(cursor), index,
offsets, mtr);
#ifdef UNIV_ZIP_DEBUG
@@ -5994,7 +6230,7 @@ btr_estimate_n_rows_in_range_on_level(
const fil_space_t* space = index->table->space;
page_id_t page_id(space->id, slot1->page_no);
- const page_size_t page_size(space->flags);
+ const ulint zip_size = space->zip_size();
level = slot1->page_level;
@@ -6011,7 +6247,7 @@ btr_estimate_n_rows_in_range_on_level(
attempting to read a page that is no longer part of
the B-tree. We pass BUF_GET_POSSIBLY_FREED in order to
silence a debug assertion about this. */
- block = buf_page_get_gen(page_id, page_size, RW_S_LATCH,
+ block = buf_page_get_gen(page_id, zip_size, RW_S_LATCH,
NULL, BUF_GET_POSSIBLY_FREED,
__FILE__, __LINE__, &mtr, &err);
@@ -7232,7 +7468,7 @@ struct btr_blob_log_check_t {
mtr_x_lock(dict_index_get_lock(index), m_mtr);
m_pcur->btr_cur.page_cur.block = btr_block_get(
page_id_t(index->table->space_id, page_no),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_X_LATCH, index, m_mtr);
m_pcur->btr_cur.page_cur.rec
= m_pcur->btr_cur.page_cur.block->frame
@@ -7320,9 +7556,6 @@ btr_store_big_rec_extern_fields(
ut_ad(buf_block_get_frame(rec_block) == page_align(rec));
ut_a(dict_index_is_clust(index));
- ut_a(dict_table_page_size(index->table)
- .equals_to(rec_block->page.size));
-
btr_blob_log_check_t redo_log(pcur, btr_mtr, offsets, &rec_block,
&rec, op);
page_zip = buf_block_get_page_zip(rec_block);
@@ -7366,15 +7599,13 @@ btr_store_big_rec_extern_fields(
}
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
- const page_size_t page_size(dict_table_page_size(index->table));
-
/* Space available in compressed page to carry blob data */
- const ulint payload_size_zip = page_size.physical()
+ const ulint payload_size_zip = rec_block->physical_size()
- FIL_PAGE_DATA;
/* Space available in uncompressed page to carry blob data */
- const ulint payload_size = page_size.physical()
- - FIL_PAGE_DATA - BTR_BLOB_HDR_SIZE - FIL_PAGE_DATA_END;
+ const ulint payload_size = payload_size_zip
+ - (BTR_BLOB_HDR_SIZE + FIL_PAGE_DATA_END);
/* We have to create a file segment to the tablespace
for each field and put the pointer to the field in rec */
@@ -7430,7 +7661,7 @@ btr_store_big_rec_extern_fields(
mtr.set_flush_observer(btr_mtr->get_flush_observer());
buf_page_get(rec_block->page.id,
- rec_block->page.size, RW_X_LATCH, &mtr);
+ rec_block->zip_size(), RW_X_LATCH, &mtr);
if (prev_page_no == FIL_NULL) {
hint_page_no = 1 + rec_page_no;
@@ -7478,7 +7709,7 @@ btr_store_big_rec_extern_fields(
prev_block = buf_page_get(
page_id_t(space_id, prev_page_no),
- rec_block->page.size,
+ rec_block->zip_size(),
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(prev_block,
@@ -7567,16 +7798,20 @@ btr_store_big_rec_extern_fields(
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4,
rec_page_no,
MLOG_4BYTES, &mtr);
-
- /* Zero out the unused part of the page. */
- memset(page + page_zip_get_size(page_zip)
- - c_stream.avail_out,
- 0, c_stream.avail_out);
mlog_log_string(page
+ FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
page_zip_get_size(page_zip)
- - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ - FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
+ - c_stream.avail_out,
&mtr);
+ /* Zero out the unused part of the page. */
+ if (c_stream.avail_out) {
+ mlog_memset(block,
+ page_zip_get_size(page_zip)
+ - c_stream.avail_out,
+ c_stream.avail_out,
+ 0, &mtr);
+ }
/* Copy the page to compressed storage,
because it will be flushed to disk
from there. */
@@ -7837,10 +8072,9 @@ btr_free_externally_stored_field(
ut_ad(space_id == index->table->space->id);
ut_ad(space_id == index->table->space_id);
- const page_size_t ext_page_size(dict_table_page_size(index->table));
- const page_size_t& rec_page_size(rec == NULL
- ? univ_page_size
- : ext_page_size);
+ const ulint ext_zip_size = index->table->space->zip_size();
+ const ulint rec_zip_size = rec ? ext_zip_size : 0;
+
if (rec == NULL) {
/* This is a call from row_purge_upd_exist_or_extern(). */
ut_ad(!page_zip);
@@ -7867,7 +8101,7 @@ btr_free_externally_stored_field(
#ifdef UNIV_DEBUG
rec_block =
#endif /* UNIV_DEBUG */
- buf_page_get(page_id, rec_page_size, RW_X_LATCH, &mtr);
+ buf_page_get(page_id, rec_zip_size, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(rec_block, SYNC_NO_ORDER_CHECK);
page_no = mach_read_from_4(field_ref + BTR_EXTERN_PAGE_NO);
@@ -7893,13 +8127,13 @@ btr_free_externally_stored_field(
}
ext_block = buf_page_get(
- page_id_t(space_id, page_no), ext_page_size,
+ page_id_t(space_id, page_no), ext_zip_size,
RW_X_LATCH, &mtr);
buf_block_dbg_add_level(ext_block, SYNC_EXTERN_STORAGE);
page = buf_block_get_frame(ext_block);
- if (ext_page_size.is_compressed()) {
+ if (ext_zip_size) {
/* Note that page_zip will be NULL
in row_purge_upd_exist_or_extern(). */
switch (fil_page_get_type(page)) {
@@ -8068,7 +8302,7 @@ btr_copy_blob_prefix(
mtr_start(&mtr);
block = buf_page_get(page_id_t(space_id, page_no),
- univ_page_size, RW_S_LATCH, &mtr);
+ 0, RW_S_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_EXTERN_STORAGE);
page = buf_block_get_frame(block);
@@ -8106,7 +8340,7 @@ by a lock or a page latch.
@param[out] buf the externally stored part of the field,
or a prefix of it
@param[in] len length of buf, in bytes
-@param[in] page_size compressed BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size
@param[in] space_id space id of the BLOB pages
@param[in] offset offset on the first BLOB page
@return number of bytes written to buf */
@@ -8115,7 +8349,7 @@ ulint
btr_copy_zblob_prefix(
byte* buf,
ulint len,
- const page_size_t& page_size,
+ ulint zip_size,
ulint space_id,
ulint page_no,
ulint offset)
@@ -8135,7 +8369,8 @@ btr_copy_zblob_prefix(
heap = mem_heap_create(40000);
page_zip_set_alloc(&d_stream, heap);
- ut_ad(page_size.is_compressed());
+ ut_ad(zip_size);
+ ut_ad(ut_is_2pow(zip_size));
ut_ad(space_id);
err = inflateInit(&d_stream);
@@ -8150,7 +8385,7 @@ btr_copy_zblob_prefix(
is being held on the clustered index record, or,
in row_merge_copy_blobs(), by an exclusive table lock. */
bpage = buf_page_get_zip(page_id_t(space_id, page_no),
- page_size);
+ zip_size);
if (UNIV_UNLIKELY(!bpage)) {
ib::error() << "Cannot load compressed BLOB "
@@ -8182,8 +8417,7 @@ btr_copy_zblob_prefix(
}
d_stream.next_in = bpage->zip.data + offset;
- d_stream.avail_in = static_cast<uInt>(page_size.physical()
- - offset);
+ d_stream.avail_in = uInt(zip_size - offset);
err = inflate(&d_stream, Z_NO_FLUSH);
switch (err) {
@@ -8253,7 +8487,7 @@ by a lock or a page latch.
@param[out] buf the externally stored part of the
field, or a prefix of it
@param[in] len length of buf, in bytes
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] space_id space id of the first BLOB page
@param[in] page_no page number of the first BLOB page
@param[in] offset offset on the first BLOB page
@@ -8263,7 +8497,7 @@ ulint
btr_copy_externally_stored_field_prefix_low(
byte* buf,
ulint len,
- const page_size_t& page_size,
+ ulint zip_size,
ulint space_id,
ulint page_no,
ulint offset)
@@ -8272,11 +8506,10 @@ btr_copy_externally_stored_field_prefix_low(
return(0);
}
- if (page_size.is_compressed()) {
- return(btr_copy_zblob_prefix(buf, len, page_size,
+ if (zip_size) {
+ return(btr_copy_zblob_prefix(buf, len, zip_size,
space_id, page_no, offset));
} else {
- ut_ad(page_size.equals_to(univ_page_size));
return(btr_copy_blob_prefix(buf, len, space_id,
page_no, offset));
}
@@ -8286,7 +8519,7 @@ btr_copy_externally_stored_field_prefix_low(
The clustered index record must be protected by a lock or a page latch.
@param[out] buf the field, or a prefix of it
@param[in] len length of buf, in bytes
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] data 'internally' stored part of the field
containing also the reference to the external part; must be protected by
a lock or a page latch
@@ -8297,7 +8530,7 @@ ulint
btr_copy_externally_stored_field_prefix(
byte* buf,
ulint len,
- const page_size_t& page_size,
+ ulint zip_size,
const byte* data,
ulint local_len)
{
@@ -8336,7 +8569,7 @@ btr_copy_externally_stored_field_prefix(
return(local_len
+ btr_copy_externally_stored_field_prefix_low(buf + local_len,
len - local_len,
- page_size,
+ zip_size,
space_id, page_no,
offset));
}
@@ -8347,7 +8580,7 @@ The clustered index record must be protected by a lock or a page latch.
@param[in] data 'internally' stored part of the field
containing also the reference to the external part; must be protected by
a lock or a page latch
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] local_len length of data
@param[in,out] heap mem heap
@return the whole field copied to heap */
@@ -8355,7 +8588,7 @@ byte*
btr_copy_externally_stored_field(
ulint* len,
const byte* data,
- const page_size_t& page_size,
+ ulint zip_size,
ulint local_len,
mem_heap_t* heap)
{
@@ -8386,7 +8619,7 @@ btr_copy_externally_stored_field(
*len = local_len
+ btr_copy_externally_stored_field_prefix_low(buf + local_len,
extern_len,
- page_size,
+ zip_size,
space_id,
page_no, offset);
@@ -8397,7 +8630,7 @@ btr_copy_externally_stored_field(
@param[in] rec record in a clustered index; must be
protected by a lock or a page latch
@param[in] offset array returned by rec_get_offsets()
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] no field number
@param[out] len length of the field
@param[in,out] heap mem heap
@@ -8406,7 +8639,7 @@ byte*
btr_rec_copy_externally_stored_field(
const rec_t* rec,
const ulint* offsets,
- const page_size_t& page_size,
+ ulint zip_size,
ulint no,
ulint* len,
mem_heap_t* heap)
@@ -8440,5 +8673,5 @@ btr_rec_copy_externally_stored_field(
}
return(btr_copy_externally_stored_field(len, data,
- page_size, local_len, heap));
+ zip_size, local_len, heap));
}
diff --git a/storage/innobase/btr/btr0defragment.cc b/storage/innobase/btr/btr0defragment.cc
index aa142e15a50..0775dfe5e5f 100644
--- a/storage/innobase/btr/btr0defragment.cc
+++ b/storage/innobase/btr/btr0defragment.cc
@@ -63,14 +63,14 @@ UNIV_INTERN mysql_pfs_key_t btr_defragment_mutex_key;
/* Number of compression failures caused by defragmentation since server
start. */
-ulint btr_defragment_compression_failures = 0;
+Atomic_counter<ulint> btr_defragment_compression_failures;
/* Number of btr_defragment_n_pages calls that altered page but didn't
manage to release any page. */
-ulint btr_defragment_failures = 0;
+Atomic_counter<ulint> btr_defragment_failures;
/* Total number of btr_defragment_n_pages calls that altered page.
The difference between btr_defragment_count and btr_defragment_failures shows
the amount of effort wasted. */
-ulint btr_defragment_count = 0;
+Atomic_counter<ulint> btr_defragment_count;
/******************************************************************//**
Constructor for btr_defragment_item_t. */
@@ -167,7 +167,7 @@ btr_defragment_add_index(
// Load index rood page.
buf_block_t* block = btr_block_get(
page_id_t(index->table->space_id, index->page),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_NO_LATCH, index, &mtr);
page_t* page = NULL;
@@ -377,7 +377,7 @@ btr_defragment_merge_pages(
dict_index_t* index, /*!< in: index tree */
buf_block_t* from_block, /*!< in: origin of merge */
buf_block_t* to_block, /*!< in: destination of merge */
- const page_size_t page_size, /*!< in: page size of the block */
+ ulint zip_size, /*!< in: ROW_FORMAT=COMPRESSED size */
ulint reserved_space, /*!< in: space reserved for future
insert to avoid immediate page split */
ulint* max_data_size, /*!< in/out: max data size to
@@ -405,7 +405,7 @@ btr_defragment_merge_pages(
// Estimate how many records can be moved from the from_page to
// the to_page.
- if (page_size.is_compressed()) {
+ if (zip_size) {
ulint page_diff = srv_page_size - *max_data_size;
max_ins_size_to_use = (max_ins_size_to_use > page_diff)
? max_ins_size_to_use - page_diff : 0;
@@ -449,8 +449,7 @@ btr_defragment_merge_pages(
// n_recs_to_move number of records to to_page. We try to reduce
// the targeted data size on the to_page by
// BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE and try again.
- my_atomic_addlint(
- &btr_defragment_compression_failures, 1);
+ btr_defragment_compression_failures++;
max_ins_size_to_use =
move_size > BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE
? move_size - BTR_DEFRAGMENT_PAGE_REDUCTION_STEP_SIZE
@@ -474,7 +473,7 @@ btr_defragment_merge_pages(
// Set ibuf free bits if necessary.
if (!dict_index_is_clust(index)
&& page_is_leaf(to_page)) {
- if (page_size.is_compressed()) {
+ if (zip_size) {
ibuf_reset_free_bits(to_block);
} else {
ibuf_update_free_bits_if_full(
@@ -492,7 +491,7 @@ btr_defragment_merge_pages(
btr_search_drop_page_hash_index(from_block);
btr_level_list_remove(
index->table->space_id,
- page_size, from_page, index, mtr);
+ zip_size, from_page, index, mtr);
btr_page_get_father(index, from_block, mtr, &parent);
btr_cur_node_ptr_delete(&parent, mtr);
/* btr_blob_dbg_remove(from_page, index,
@@ -579,7 +578,7 @@ btr_defragment_n_pages(
}
first_page = buf_block_get_frame(block);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
/* 1. Load the pages and calculate the total data size. */
blocks[0] = block;
@@ -595,7 +594,7 @@ btr_defragment_n_pages(
}
blocks[i] = btr_block_get(page_id_t(index->table->space_id,
- page_no), page_size,
+ page_no), zip_size,
RW_X_LATCH, index, mtr);
}
@@ -621,7 +620,7 @@ btr_defragment_n_pages(
optimal_page_size = page_get_free_space_of_empty(
page_is_comp(first_page));
// For compressed pages, we take compression failures into account.
- if (page_size.is_compressed()) {
+ if (zip_size) {
ulint size = 0;
uint i = 0;
// We estimate the optimal data size of the index use samples of
@@ -664,7 +663,7 @@ btr_defragment_n_pages(
// Start from the second page.
for (uint i = 1; i < n_pages; i ++) {
buf_block_t* new_block = btr_defragment_merge_pages(
- index, blocks[i], current_block, page_size,
+ index, blocks[i], current_block, zip_size,
reserved_space, &max_data_size, heap, mtr);
if (new_block != current_block) {
n_defragmented ++;
@@ -673,11 +672,9 @@ btr_defragment_n_pages(
}
mem_heap_free(heap);
n_defragmented ++;
- my_atomic_addlint(
- &btr_defragment_count, 1);
+ btr_defragment_count++;
if (n_pages == n_defragmented) {
- my_atomic_addlint(
- &btr_defragment_failures, 1);
+ btr_defragment_failures++;
} else {
index->stat_defrag_n_pages_freed += (n_pages - n_defragmented);
}
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index 2cbc9767cb4..e6b658b531e 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -150,17 +150,26 @@ before_first:
rec = page_rec_get_prev(rec);
ut_ad(!page_rec_is_infimum(rec));
- ut_ad(!rec_is_metadata(rec, index));
+ if (UNIV_UNLIKELY(rec_is_metadata(rec, *index))) {
+ ut_ad(index->table->instant);
+ ut_ad(page_get_n_recs(block->frame) == 1);
+ ut_ad(page_is_leaf(block->frame));
+ ut_ad(page_get_page_no(block->frame) == index->page);
+ cursor->rel_pos = BTR_PCUR_AFTER_LAST_IN_TREE;
+ return;
+ }
cursor->rel_pos = BTR_PCUR_AFTER;
} else if (page_rec_is_infimum_low(offs)) {
rec = page_rec_get_next(rec);
- if (rec_is_metadata(rec, index)) {
+ if (rec_is_metadata(rec, *index)) {
ut_ad(!page_has_prev(block->frame));
+ ut_d(const rec_t* p = rec);
rec = page_rec_get_next(rec);
if (page_rec_is_supremum(rec)) {
- ut_ad(page_has_next(block->frame));
+ ut_ad(page_has_next(block->frame)
+ || rec_is_alter_metadata(p, *index));
goto before_first;
}
}
@@ -170,10 +179,25 @@ before_first:
cursor->rel_pos = BTR_PCUR_ON;
}
- cursor->old_rec = dict_index_copy_rec_order_prefix(
- index, rec, &cursor->old_n_fields,
- &cursor->old_rec_buf, &cursor->buf_size);
+ if (index->is_ibuf()) {
+ ut_ad(!index->table->not_redundant());
+ cursor->old_n_fields = rec_get_n_fields_old(rec);
+ } else if (page_rec_is_leaf(rec)) {
+ cursor->old_n_fields = dict_index_get_n_unique_in_tree(index);
+ } else if (index->is_spatial()) {
+ ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
+ == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
+ /* For R-tree, we have to compare
+ the child page numbers as well. */
+ cursor->old_n_fields = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
+ } else {
+ cursor->old_n_fields = dict_index_get_n_unique_in_tree(index);
+ }
+ cursor->old_rec = rec_copy_prefix_to_buf(rec, index,
+ cursor->old_n_fields,
+ &cursor->old_rec_buf,
+ &cursor->buf_size);
cursor->block_when_stored = block;
/* Function try to check if block is S/X latch. */
@@ -450,7 +474,7 @@ btr_pcur_move_to_next_page(
next_block = btr_block_get(
page_id_t(block->page.id.space(), next_page_no),
- block->page.size, mode,
+ block->zip_size(), mode,
btr_pcur_get_btr_cur(cursor)->index, mtr);
if (UNIV_UNLIKELY(!next_block)) {
diff --git a/storage/innobase/btr/btr0scrub.cc b/storage/innobase/btr/btr0scrub.cc
index 7d8966d4109..a4ae24b8946 100644
--- a/storage/innobase/btr/btr0scrub.cc
+++ b/storage/innobase/btr/btr0scrub.cc
@@ -434,7 +434,7 @@ btr_pessimistic_scrub(
const ulint page_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
const ulint left_page_no = mach_read_from_4(page + FIL_PAGE_PREV);
const ulint right_page_no = mach_read_from_4(page + FIL_PAGE_NEXT);
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
/**
* When splitting page, we need X-latches on left/right brothers
@@ -449,16 +449,16 @@ btr_pessimistic_scrub(
*/
mtr->release_block_at_savepoint(scrub_data->savepoint, block);
- buf_block_t* get_block __attribute__((unused)) = btr_block_get(
+ btr_block_get(
page_id_t(index->table->space_id, left_page_no),
- page_size, RW_X_LATCH, index, mtr);
+ zip_size, RW_X_LATCH, index, mtr);
/**
* Refetch block and re-initialize page
*/
block = btr_block_get(
page_id_t(index->table->space_id, page_no),
- page_size, RW_X_LATCH, index, mtr);
+ zip_size, RW_X_LATCH, index, mtr);
page = buf_block_get_frame(block);
@@ -470,9 +470,9 @@ btr_pessimistic_scrub(
}
if (right_page_no != FIL_NULL) {
- buf_block_t* get_block __attribute__((unused))= btr_block_get(
+ btr_block_get(
page_id_t(index->table->space_id, right_page_no),
- page_size, RW_X_LATCH, index, mtr);
+ zip_size, RW_X_LATCH, index, mtr);
}
/* arguments to btr_page_split_and_insert */
@@ -842,13 +842,15 @@ btr_scrub_start_space(
ulint space, /*!< in: space */
btr_scrub_t* scrub_data) /*!< in/out: scrub data */
{
- bool found;
scrub_data->space = space;
scrub_data->current_table = NULL;
scrub_data->current_index = NULL;
- const page_size_t page_size = fil_space_get_page_size(space, &found);
-
- scrub_data->compressed = page_size.is_compressed();
+ if (fil_space_t* s = fil_space_acquire_silent(space)) {
+ scrub_data->compressed = s->zip_size();
+ s->release();
+ } else {
+ scrub_data->compressed = 0;
+ }
scrub_data->scrubbing = check_scrub_setting(scrub_data);
return scrub_data->scrubbing;
}
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 74f9816801d..ca350d02316 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -1190,7 +1190,7 @@ retry:
rec = page_get_infimum_rec(page);
rec = page_rec_get_next_low(rec, page_is_comp(page));
- if (rec_is_metadata(rec, index)) {
+ if (rec_is_metadata(rec, *index)) {
rec = page_rec_get_next_low(rec, page_is_comp(page));
}
@@ -1287,7 +1287,7 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id)
are possibly holding, we cannot s-latch the page, but must
(recursively) x-latch it, even though we are only reading. */
- block = buf_page_get_gen(page_id, univ_page_size, RW_X_LATCH, NULL,
+ block = buf_page_get_gen(page_id, 0, RW_X_LATCH, NULL,
BUF_PEEK_IF_IN_POOL, __FILE__, __LINE__,
&mtr, &err);
@@ -1398,7 +1398,7 @@ btr_search_build_page_hash_index(
rec = page_rec_get_next_const(page_get_infimum_rec(page));
- if (rec_is_metadata(rec, index)) {
+ if (rec_is_metadata(rec, *index)) {
rec = page_rec_get_next_const(rec);
if (!--n_recs) return;
}
@@ -1862,7 +1862,7 @@ btr_search_update_hash_on_insert(btr_cur_t* cursor, rw_lock_t* ahi_latch)
n_bytes, index->id);
}
- if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, index)) {
+ if (!page_rec_is_infimum(rec) && !rec_is_metadata(rec, *index)) {
offsets = rec_get_offsets(
rec, index, offsets, true,
btr_search_get_n_fields(n_fields, n_bytes), &heap);
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 579eb62d82c..37dd04f0d8e 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -33,14 +33,11 @@ Created 11/5/1995 Heikki Tuuri
#include "mtr0types.h"
#include "mach0data.h"
-#include "page0size.h"
#include "buf0buf.h"
+#include "buf0checksum.h"
+#include "ut0crc32.h"
#include <string.h>
-#ifdef UNIV_NONINL
-#include "buf0buf.ic"
-#endif
-
#ifndef UNIV_INNOCHECKSUM
#include "mem0mem.h"
#include "btr0btr.h"
@@ -60,19 +57,14 @@ Created 11/5/1995 Heikki Tuuri
#include "dict0dict.h"
#include "log0recv.h"
#include "srv0mon.h"
+#include "fil0pagecompress.h"
+#include "fsp0pagecompress.h"
#endif /* !UNIV_INNOCHECKSUM */
#include "page0zip.h"
#include "sync0sync.h"
#include "buf0dump.h"
-#include <new>
#include <map>
#include <sstream>
-#ifndef UNIV_INNOCHECKSUM
-#include "fil0pagecompress.h"
-#include "fsp0pagecompress.h"
-#endif
-#include "ut0byte.h"
-#include <new>
#ifdef UNIV_LINUX
#include <stdlib.h>
@@ -129,6 +121,7 @@ struct set_numa_interleave_t
#include "snappy-c.h"
#endif
+#ifndef UNIV_INNOCHECKSUM
inline void* aligned_malloc(size_t size, size_t align) {
void *result;
#ifdef _MSC_VER
@@ -152,6 +145,16 @@ inline void aligned_free(void *ptr) {
#endif
}
+buf_pool_t::io_buf_t::~io_buf_t()
+{
+ for (buf_tmp_buffer_t* s = slots, *e = slots + n_slots; s != e; s++) {
+ aligned_free(s->crypt_buf);
+ aligned_free(s->comp_buf);
+ }
+ ut_free(slots);
+}
+#endif /* !UNIV_INNOCHECKSUM */
+
/*
IMPLEMENTATION OF THE BUFFER POOL
=================================
@@ -424,16 +427,9 @@ on the io_type */
@return reserved buffer slot */
static buf_tmp_buffer_t* buf_pool_reserve_tmp_slot(buf_pool_t* buf_pool)
{
- for (ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
- buf_tmp_buffer_t* slot = &buf_pool->tmp_arr->slots[i];
- if (slot->acquire()) {
- return slot;
- }
- }
-
- /* We assume that free slot is found */
- ut_error;
- return NULL;
+ buf_tmp_buffer_t* slot = buf_pool->io_buf.reserve();
+ ut_a(slot);
+ return slot;
}
/** Reserve a buffer for encryption, decryption or decompression.
@@ -487,7 +483,8 @@ static bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space)
byte* dst_frame = bpage->zip.data ? bpage->zip.data :
((buf_block_t*) bpage)->frame;
- bool page_compressed = fil_page_is_compressed(dst_frame);
+ bool page_compressed = space->is_compressed()
+ && buf_page_is_compressed(dst_frame, space->flags);
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
if (bpage->id.page_no() == 0) {
@@ -500,39 +497,45 @@ static bool buf_page_decrypt_after_read(buf_page_t* bpage, fil_space_t* space)
also for pages first compressed and then encrypted. */
buf_tmp_buffer_t* slot;
+ uint key_version = buf_page_get_key_version(dst_frame, space->flags);
- if (page_compressed) {
+ if (page_compressed && !key_version) {
/* the page we read is unencrypted */
/* Find free slot from temporary memory array */
decompress:
+ if (space->full_crc32()
+ && buf_page_is_corrupted(true, dst_frame, space->flags)) {
+ return false;
+ }
+
slot = buf_pool_reserve_tmp_slot(buf_pool);
/* For decompression, use crypt_buf. */
buf_tmp_reserve_crypt_buf(slot);
+
decompress_with_slot:
- ut_d(fil_page_type_validate(dst_frame));
+ ut_d(fil_page_type_validate(space, dst_frame));
- bpage->write_size = fil_page_decompress(slot->crypt_buf,
- dst_frame);
+ bpage->write_size = fil_page_decompress(
+ slot->crypt_buf, dst_frame, space->flags);
slot->release();
- ut_ad(!bpage->write_size || fil_page_type_validate(dst_frame));
+ ut_ad(!bpage->write_size
+ || fil_page_type_validate(space, dst_frame));
+
ut_ad(space->pending_io());
+
return bpage->write_size != 0;
}
- if (space->crypt_data
- && mach_read_from_4(FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- + dst_frame)) {
+ if (key_version && space->crypt_data) {
/* Verify encryption checksum before we even try to
decrypt. */
- if (!fil_space_verify_crypt_checksum(dst_frame, bpage->size)) {
+ if (!buf_page_verify_crypt_checksum(dst_frame, space->flags)) {
decrypt_failed:
ib::error() << "Encrypted page " << bpage->id
<< " in file " << space->chain.start->name
<< " looks corrupted; key_version="
- << mach_read_from_4(
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- + dst_frame);
+ << key_version;
/* Mark page encrypted in case it should be. */
if (space->crypt_data->type
!= CRYPT_SCHEME_UNENCRYPTED) {
@@ -546,7 +549,7 @@ decrypt_failed:
slot = buf_pool_reserve_tmp_slot(buf_pool);
buf_tmp_reserve_crypt_buf(slot);
- ut_d(fil_page_type_validate(dst_frame));
+ ut_d(fil_page_type_validate(space, dst_frame));
/* decrypt using crypt_buf to dst_frame */
if (!fil_space_decrypt(space, slot->crypt_buf,
@@ -555,9 +558,10 @@ decrypt_failed:
goto decrypt_failed;
}
- ut_d(fil_page_type_validate(dst_frame));
+ ut_d(fil_page_type_validate(space, dst_frame));
- if (fil_page_is_compressed_encrypted(dst_frame)) {
+ if ((space->full_crc32() && page_compressed)
+ || fil_page_is_compressed_encrypted(dst_frame)) {
goto decompress_with_slot;
}
@@ -765,11 +769,7 @@ buf_page_is_checksum_valid_crc32(
return false;
}
- return checksum_field1 == crc32
-#ifdef INNODB_BUG_ENDIAN_CRC32
- || checksum_field1 == buf_calc_page_crc32(read_buf, true)
-#endif
- ;
+ return checksum_field1 == crc32;
}
/** Checks if the page is in innodb checksum format.
@@ -898,54 +898,127 @@ buf_page_is_checksum_valid_none(
&& checksum_field1 == BUF_NO_CHECKSUM_MAGIC);
}
-#ifdef INNODB_BUG_ENDIAN_CRC32
-/** Validate the CRC-32C checksum of a page.
-@param[in] page buffer page (srv_page_size bytes)
-@param[in] checksum CRC-32C checksum stored on page
-@return computed checksum */
-static uint32_t buf_page_check_crc32(const byte* page, uint32_t checksum)
+/** Checks whether the lsn present in the page is lesser than the
+peek current lsn.
+@param[in] check_lsn lsn to check
+@param[in] read_buf page. */
+static void buf_page_check_lsn(bool check_lsn, const byte* read_buf)
{
- uint32_t crc32 = buf_calc_page_crc32(page);
+#ifndef UNIV_INNOCHECKSUM
+ if (check_lsn && recv_lsn_checks_on) {
+ lsn_t current_lsn;
+ const lsn_t page_lsn
+ = mach_read_from_8(read_buf + FIL_PAGE_LSN);
+
+ /* Since we are going to reset the page LSN during the import
+ phase it makes no sense to spam the log with error messages. */
+
+ if (log_peek_lsn(&current_lsn) && current_lsn < page_lsn) {
+
+ const ulint space_id = mach_read_from_4(
+ read_buf + FIL_PAGE_SPACE_ID);
+ const ulint page_no = mach_read_from_4(
+ read_buf + FIL_PAGE_OFFSET);
+
+ ib::error() << "Page " << page_id_t(space_id, page_no)
+ << " log sequence number " << page_lsn
+ << " is in the future! Current system"
+ << " log sequence number "
+ << current_lsn << ".";
- if (checksum != crc32) {
- crc32 = buf_calc_page_crc32(page, true);
+ ib::error() << "Your database may be corrupt or"
+ " you may have copied the InnoDB"
+ " tablespace but not the InnoDB"
+ " log files. "
+ << FORCE_RECOVERY_MSG;
+
+ }
}
+#endif /* !UNIV_INNOCHECKSUM */
+}
- return crc32;
+/** Check if a page is all zeroes.
+@param[in] read_buf database page
+@param[in] page_size page frame size
+@return whether the page is all zeroes */
+bool buf_page_is_zeroes(const void* read_buf, size_t page_size)
+{
+ const ulint* b = reinterpret_cast<const ulint*>(read_buf);
+ const ulint* const e = b + page_size / sizeof *b;
+ do {
+ if (*b++) {
+ return false;
+ }
+ } while (b != e);
+ return true;
}
-#else /* INNODB_BUG_ENDIAN_CRC32 */
-/** Validate the CRC-32C checksum of a page.
-@param[in] page buffer page (srv_page_size bytes)
-@param[in] checksum CRC-32C checksum stored on page
-@return computed checksum */
-# define buf_page_check_crc32(page, checksum) buf_calc_page_crc32(page)
-#endif /* INNODB_BUG_ENDIAN_CRC32 */
/** Check if a page is corrupt.
@param[in] check_lsn whether the LSN should be checked
@param[in] read_buf database page
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] space tablespace
@return whether the page is corrupted */
bool
buf_page_is_corrupted(
bool check_lsn,
const byte* read_buf,
- const page_size_t& page_size,
-#ifndef UNIV_INNOCHECKSUM
- const fil_space_t* space)
-#else
- const void* space)
-#endif
+ ulint fsp_flags)
{
- ut_ad(page_size.logical() == srv_page_size);
#ifndef UNIV_INNOCHECKSUM
DBUG_EXECUTE_IF("buf_page_import_corrupt_failure", return(true); );
#endif
+ if (fil_space_t::full_crc32(fsp_flags)) {
+ bool compressed = false, corrupted = false;
+ const uint size = buf_page_full_crc32_size(
+ read_buf, &compressed, &corrupted);
+ if (corrupted) {
+ return true;
+ }
+ const byte* end = read_buf + (size - FIL_PAGE_FCRC32_CHECKSUM);
+ uint crc32 = mach_read_from_4(end);
+
+ if (!crc32 && size == srv_page_size
+ && buf_page_is_zeroes(read_buf, size)) {
+ return false;
+ }
+
+ DBUG_EXECUTE_IF(
+ "page_intermittent_checksum_mismatch", {
+ static int page_counter;
+ if (page_counter++ == 2) {
+ crc32++;
+ }
+ });
+
+ if (crc32 != ut_crc32(read_buf,
+ size - FIL_PAGE_FCRC32_CHECKSUM)) {
+ return true;
+ }
+ if (!compressed
+ && !mach_read_from_4(FIL_PAGE_FCRC32_KEY_VERSION
+ + read_buf)
+ && memcmp(read_buf + (FIL_PAGE_LSN + 4),
+ end - (FIL_PAGE_FCRC32_END_LSN
+ - FIL_PAGE_FCRC32_CHECKSUM), 4)) {
+ return true;
+ }
+
+ buf_page_check_lsn(check_lsn, read_buf);
+ return false;
+ }
+
size_t checksum_field1 = 0;
size_t checksum_field2 = 0;
uint32_t crc32 = 0;
bool crc32_inited = false;
+ ulint zip_size = 0;
+ bool crc32_chksum = false;
+
+ zip_size = FSP_FLAGS_GET_ZIP_SSIZE(fsp_flags);
+ if (zip_size) {
+ zip_size = (UNIV_ZIP_SIZE_MIN >> 1) << zip_size;
+ }
ulint page_type = mach_read_from_2(read_buf + FIL_PAGE_TYPE);
@@ -961,16 +1034,15 @@ buf_page_is_corrupted(
if ((page_type == FIL_PAGE_PAGE_COMPRESSED ||
page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED)
#ifndef UNIV_INNOCHECKSUM
- && space && FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags)
+ && FSP_FLAGS_HAS_PAGE_COMPRESSION(fsp_flags)
#endif
) {
return(false);
}
- if (!page_size.is_compressed()
- && memcmp(read_buf + FIL_PAGE_LSN + 4,
- read_buf + page_size.logical()
- - FIL_PAGE_END_LSN_OLD_CHKSUM + 4, 4)) {
+ if (!zip_size && memcmp(read_buf + FIL_PAGE_LSN + 4,
+ read_buf + srv_page_size
+ - FIL_PAGE_END_LSN_OLD_CHKSUM + 4, 4)) {
/* Stored log sequence numbers at the start and the end
of page do not match */
@@ -978,37 +1050,7 @@ buf_page_is_corrupted(
return(true);
}
-#ifndef UNIV_INNOCHECKSUM
- if (check_lsn && recv_lsn_checks_on) {
- lsn_t current_lsn;
- const lsn_t page_lsn
- = mach_read_from_8(read_buf + FIL_PAGE_LSN);
-
- /* Since we are going to reset the page LSN during the import
- phase it makes no sense to spam the log with error messages. */
-
- if (log_peek_lsn(&current_lsn) && current_lsn < page_lsn) {
-
- const ulint space_id = mach_read_from_4(
- read_buf + FIL_PAGE_SPACE_ID);
- const ulint page_no = mach_read_from_4(
- read_buf + FIL_PAGE_OFFSET);
-
- ib::error() << "Page " << page_id_t(space_id, page_no)
- << " log sequence number " << page_lsn
- << " is in the future! Current system"
- << " log sequence number "
- << current_lsn << ".";
-
- ib::error() << "Your database may be corrupt or"
- " you may have copied the InnoDB"
- " tablespace but not the InnoDB"
- " log files. "
- << FORCE_RECOVERY_MSG;
-
- }
- }
-#endif /* !UNIV_INNOCHECKSUM */
+ buf_page_check_lsn(check_lsn, read_buf);
/* Check whether the checksum fields have correct values */
@@ -1019,16 +1061,15 @@ buf_page_is_corrupted(
return(false);
}
- if (page_size.is_compressed()) {
- return(!page_zip_verify_checksum(read_buf,
- page_size.physical()));
+ if (zip_size) {
+ return !page_zip_verify_checksum(read_buf, zip_size);
}
checksum_field1 = mach_read_from_4(
read_buf + FIL_PAGE_SPACE_OR_CHKSUM);
checksum_field2 = mach_read_from_4(
- read_buf + page_size.logical() - FIL_PAGE_END_LSN_OLD_CHKSUM);
+ read_buf + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM);
compile_time_assert(!(FIL_PAGE_LSN % 8));
@@ -1044,13 +1085,10 @@ buf_page_is_corrupted(
}
} while (++i < FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
-#ifndef UNIV_INNOCHECKSUM
- if (!space || !space->id) {
- /* Skip FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- in the system tablespace. */
- i += 8;
- }
-#endif
+ /* Ignore FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION unless
+ innodb_checksum_algorithm=full_crc32. */
+ i += 8;
+
do {
if (read_buf[i]) {
return true;
@@ -1060,6 +1098,7 @@ buf_page_is_corrupted(
}
switch (curr_algo) {
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
return !buf_page_is_checksum_valid_crc32(
read_buf, checksum_field1, checksum_field2);
@@ -1069,6 +1108,7 @@ buf_page_is_corrupted(
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
return !buf_page_is_checksum_valid_none(
read_buf, checksum_field1, checksum_field2);
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_INNODB:
if (buf_page_is_checksum_valid_none(read_buf,
@@ -1093,6 +1133,9 @@ buf_page_is_corrupted(
return false;
}
+ crc32_chksum = curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32
+ || curr_algo == SRV_CHECKSUM_ALGORITHM_FULL_CRC32;
+
/* Very old versions of InnoDB only stored 8 byte lsn to the
start and the end of the page. */
@@ -1103,19 +1146,18 @@ buf_page_is_corrupted(
!= mach_read_from_4(read_buf + FIL_PAGE_LSN)
&& checksum_field2 != BUF_NO_CHECKSUM_MAGIC) {
- if (curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32) {
+ if (crc32_chksum) {
+ crc32 = buf_calc_page_crc32(read_buf);
+ crc32_inited = true;
+
DBUG_EXECUTE_IF(
"page_intermittent_checksum_mismatch", {
static int page_counter;
if (page_counter++ == 2) {
- checksum_field2++;
+ crc32++;
}
});
- crc32 = buf_page_check_crc32(read_buf,
- checksum_field2);
- crc32_inited = true;
-
if (checksum_field2 != crc32
&& checksum_field2
!= buf_calc_page_old_checksum(read_buf)) {
@@ -1127,8 +1169,7 @@ buf_page_is_corrupted(
if (checksum_field2
!= buf_calc_page_old_checksum(read_buf)) {
- crc32 = buf_page_check_crc32(
- read_buf, checksum_field2);
+ crc32 = buf_calc_page_crc32(read_buf);
crc32_inited = true;
if (checksum_field2 != crc32) {
@@ -1140,10 +1181,10 @@ buf_page_is_corrupted(
if (checksum_field1 == 0
|| checksum_field1 == BUF_NO_CHECKSUM_MAGIC) {
- } else if (curr_algo == SRV_CHECKSUM_ALGORITHM_CRC32) {
+ } else if (crc32_chksum) {
+
if (!crc32_inited) {
- crc32 = buf_page_check_crc32(
- read_buf, checksum_field2);
+ crc32 = buf_calc_page_crc32(read_buf);
crc32_inited = true;
}
@@ -1159,8 +1200,7 @@ buf_page_is_corrupted(
!= buf_calc_page_new_checksum(read_buf)) {
if (!crc32_inited) {
- crc32 = buf_page_check_crc32(
- read_buf, checksum_field2);
+ crc32 = buf_calc_page_crc32(read_buf);
crc32_inited = true;
}
@@ -1240,20 +1280,19 @@ buf_madvise_do_dump()
/** Dump a page to stderr.
@param[in] read_buf database page
-@param[in] page_size page size */
-UNIV_INTERN
-void
-buf_page_print(const byte* read_buf, const page_size_t& page_size)
+@param[in] zip_size compressed page size, or 0 */
+void buf_page_print(const byte* read_buf, ulint zip_size)
{
+ const ulint size = zip_size ? zip_size : srv_page_size;
dict_index_t* index;
ib::info() << "Page dump in ascii and hex ("
- << page_size.physical() << " bytes):";
+ << size << " bytes):";
- ut_print_buf(stderr, read_buf, page_size.physical());
+ ut_print_buf(stderr, read_buf, size);
fputs("\nInnoDB: End of page dump\n", stderr);
- if (page_size.is_compressed()) {
+ if (zip_size) {
/* Print compressed page. */
ib::info() << "Compressed page type ("
<< fil_page_get_type(read_buf)
@@ -1265,27 +1304,21 @@ buf_page_print(const byte* read_buf, const page_size_t& page_size)
SRV_CHECKSUM_ALGORITHM_CRC32)
<< " "
<< page_zip_calc_checksum(
- read_buf, page_size.physical(),
+ read_buf, zip_size,
SRV_CHECKSUM_ALGORITHM_CRC32)
-#ifdef INNODB_BUG_ENDIAN_CRC32
- << "/"
- << page_zip_calc_checksum(
- read_buf, page_size.physical(),
- SRV_CHECKSUM_ALGORITHM_CRC32, true)
-#endif
<< ", "
<< buf_checksum_algorithm_name(
SRV_CHECKSUM_ALGORITHM_INNODB)
<< " "
<< page_zip_calc_checksum(
- read_buf, page_size.physical(),
+ read_buf, zip_size,
SRV_CHECKSUM_ALGORITHM_INNODB)
<< ", "
<< buf_checksum_algorithm_name(
SRV_CHECKSUM_ALGORITHM_NONE)
<< " "
<< page_zip_calc_checksum(
- read_buf, page_size.physical(),
+ read_buf, zip_size,
SRV_CHECKSUM_ALGORITHM_NONE)
<< "; page LSN "
<< mach_read_from_8(read_buf + FIL_PAGE_LSN)
@@ -1298,10 +1331,6 @@ buf_page_print(const byte* read_buf, const page_size_t& page_size)
} else {
const uint32_t crc32 = buf_calc_page_crc32(read_buf);
-#ifdef INNODB_BUG_ENDIAN_CRC32
- const uint32_t crc32_legacy = buf_calc_page_crc32(read_buf,
- true);
-#endif /* INNODB_BUG_ENDIAN_CRC32 */
ulint page_type = fil_page_get_type(read_buf);
ib::info() << "Uncompressed page, stored checksum in field1 "
@@ -1311,9 +1340,6 @@ buf_page_print(const byte* read_buf, const page_size_t& page_size)
<< buf_checksum_algorithm_name(
SRV_CHECKSUM_ALGORITHM_CRC32) << " "
<< crc32
-#ifdef INNODB_BUG_ENDIAN_CRC32
- << "/" << crc32_legacy
-#endif
<< ", "
<< buf_checksum_algorithm_name(
SRV_CHECKSUM_ALGORITHM_INNODB) << " "
@@ -1325,15 +1351,12 @@ buf_page_print(const byte* read_buf, const page_size_t& page_size)
SRV_CHECKSUM_ALGORITHM_NONE) << " "
<< BUF_NO_CHECKSUM_MAGIC
<< ", stored checksum in field2 "
- << mach_read_from_4(read_buf + page_size.logical()
+ << mach_read_from_4(read_buf + srv_page_size
- FIL_PAGE_END_LSN_OLD_CHKSUM)
<< ", calculated checksums for field2: "
<< buf_checksum_algorithm_name(
SRV_CHECKSUM_ALGORITHM_CRC32) << " "
<< crc32
-#ifdef INNODB_BUG_ENDIAN_CRC32
- << "/" << crc32_legacy
-#endif
<< ", "
<< buf_checksum_algorithm_name(
SRV_CHECKSUM_ALGORITHM_INNODB) << " "
@@ -1347,7 +1370,7 @@ buf_page_print(const byte* read_buf, const page_size_t& page_size)
<< " "
<< mach_read_from_4(read_buf + FIL_PAGE_LSN + 4)
<< ", low 4 bytes of LSN at page end "
- << mach_read_from_4(read_buf + page_size.logical()
+ << mach_read_from_4(read_buf + srv_page_size
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)
<< ", page number (if stored to page already) "
<< mach_read_from_4(read_buf + FIL_PAGE_OFFSET)
@@ -1461,7 +1484,7 @@ pfs_register_buffer_block(
: NULL;
# ifdef UNIV_DEBUG
- rwlock = &block->debug_latch;
+ rwlock = block->debug_latch;
ut_a(!rwlock->pfs_psi);
rwlock->pfs_psi = (PSI_server)
? PSI_server->init_rwlock(buf_block_debug_latch_key,
@@ -1523,6 +1546,7 @@ buf_block_init(
page_zip_des_init(&block->page.zip);
mutex_create(LATCH_ID_BUF_BLOCK_MUTEX, &block->mutex);
+ ut_d(block->debug_latch = (rw_lock_t *) ut_malloc_nokey(sizeof(rw_lock_t)));
#if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC
/* If PFS_SKIP_BUFFER_MUTEX_RWLOCK is defined, skip registration
@@ -1534,7 +1558,7 @@ buf_block_init(
rw_lock_create(PFS_NOT_INSTRUMENTED, &block->lock, SYNC_LEVEL_VARYING);
- ut_d(rw_lock_create(PFS_NOT_INSTRUMENTED, &block->debug_latch,
+ ut_d(rw_lock_create(PFS_NOT_INSTRUMENTED, block->debug_latch,
SYNC_LEVEL_VARYING));
#else /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
@@ -1542,7 +1566,7 @@ buf_block_init(
rw_lock_create(buf_block_lock_key, &block->lock, SYNC_LEVEL_VARYING);
ut_d(rw_lock_create(buf_block_debug_latch_key,
- &block->debug_latch, SYNC_LEVEL_VARYING));
+ block->debug_latch, SYNC_LEVEL_VARYING));
#endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
@@ -1569,11 +1593,6 @@ buf_chunk_init(
/* Round down to a multiple of page size,
although it already should be. */
mem_size = ut_2pow_round<ulint>(mem_size, srv_page_size);
- /* Reserve space for the block descriptors. */
- mem_size += ut_2pow_round<ulint>((mem_size >> srv_page_size_shift)
- * (sizeof *block)
- + (srv_page_size - 1),
- srv_page_size);
DBUG_EXECUTE_IF("ib_buf_chunk_init_fails", return(NULL););
@@ -1800,6 +1819,16 @@ buf_pool_set_sizes(void)
buf_pool_mutex_exit_all();
}
+/** Free the synchronization objects of a buffer pool block descriptor
+@param[in,out] block buffer pool block descriptor */
+static void buf_block_free_mutexes(buf_block_t* block)
+{
+ mutex_free(&block->mutex);
+ rw_lock_free(&block->lock);
+ ut_d(rw_lock_free(block->debug_latch));
+ ut_d(ut_free(block->debug_latch));
+}
+
/********************************************************************//**
Initialize a buffer pool instance.
@return DB_SUCCESS if all goes well. */
@@ -1863,11 +1892,7 @@ buf_pool_init_instance(
buf_block_t* block = chunk->blocks;
for (i = chunk->size; i--; block++) {
- mutex_free(&block->mutex);
- rw_lock_free(&block->lock);
-
- ut_d(rw_lock_free(
- &block->debug_latch));
+ buf_block_free_mutexes(block);
}
buf_pool->allocator.deallocate_large_dodump(
@@ -1887,8 +1912,7 @@ buf_pool_init_instance(
ut_min(BUF_READ_AHEAD_PAGES,
ut_2_power_up(buf_pool->curr_size /
BUF_READ_AHEAD_PORTION));
- buf_pool->curr_pool_size = buf_pool->curr_size
- << srv_page_size_shift;
+ buf_pool->curr_pool_size = buf_pool_size;
buf_pool->old_size = buf_pool->curr_size;
buf_pool->n_chunks_new = buf_pool->n_chunks;
@@ -1945,12 +1969,9 @@ buf_pool_init_instance(
new(&buf_pool->single_scan_itr) LRUItr(buf_pool, &buf_pool->mutex);
/* Initialize the temporal memory array and slots */
- buf_pool->tmp_arr = (buf_tmp_array_t *)ut_malloc_nokey(sizeof(buf_tmp_array_t));
- memset(buf_pool->tmp_arr, 0, sizeof(buf_tmp_array_t));
- ulint n_slots = (srv_n_read_io_threads + srv_n_write_io_threads) * (8 * OS_AIO_N_PENDING_IOS_PER_THREAD);
- buf_pool->tmp_arr->n_slots = n_slots;
- buf_pool->tmp_arr->slots = (buf_tmp_buffer_t*)ut_malloc_nokey(sizeof(buf_tmp_buffer_t) * n_slots);
- memset(buf_pool->tmp_arr->slots, 0, (sizeof(buf_tmp_buffer_t) * n_slots));
+ new(&buf_pool->io_buf) buf_pool_t::io_buf_t(
+ (srv_n_read_io_threads + srv_n_write_io_threads)
+ * (8 * OS_AIO_N_PENDING_IOS_PER_THREAD));
buf_pool_mutex_exit(buf_pool);
@@ -2012,10 +2033,7 @@ buf_pool_free_instance(
buf_block_t* block = chunk->blocks;
for (ulint i = chunk->size; i--; block++) {
- mutex_free(&block->mutex);
- rw_lock_free(&block->lock);
-
- ut_d(rw_lock_free(&block->debug_latch));
+ buf_block_free_mutexes(block);
}
buf_pool->allocator.deallocate_large_dodump(
@@ -2031,26 +2049,7 @@ buf_pool_free_instance(
hash_table_free(buf_pool->page_hash);
hash_table_free(buf_pool->zip_hash);
- /* Free all used temporary slots */
- if (buf_pool->tmp_arr) {
- for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
- buf_tmp_buffer_t* slot = &(buf_pool->tmp_arr->slots[i]);
- if (slot && slot->crypt_buf) {
- aligned_free(slot->crypt_buf);
- slot->crypt_buf = NULL;
- }
-
- if (slot && slot->comp_buf) {
- aligned_free(slot->comp_buf);
- slot->comp_buf = NULL;
- }
- }
-
- ut_free(buf_pool->tmp_arr->slots);
- ut_free(buf_pool->tmp_arr);
- buf_pool->tmp_arr = NULL;
- }
-
+ buf_pool->io_buf.~io_buf_t();
buf_pool->allocator.~ut_allocator();
}
@@ -2695,12 +2694,12 @@ buf_pool_resize()
ut_ad(UT_LIST_GET_LEN(buf_pool->withdraw) == 0);
ut_ad(buf_pool->flush_rbt == NULL);
- buf_pool->curr_size = new_instance_size;
-
buf_pool->n_chunks_new =
(new_instance_size << srv_page_size_shift)
/ srv_buf_pool_chunk_unit;
+ buf_pool->curr_size = buf_pool->n_chunks_new * buf_pool->chunks->size;
+
buf_pool_mutex_exit(buf_pool);
}
#ifdef BTR_CUR_HASH_ADAPT
@@ -2889,11 +2888,7 @@ withdraw_retry:
for (ulint j = chunk->size;
j--; block++) {
- mutex_free(&block->mutex);
- rw_lock_free(&block->lock);
-
- ut_d(rw_lock_free(
- &block->debug_latch));
+ buf_block_free_mutexes(block);
}
buf_pool->allocator.deallocate_large_dodump(
@@ -3034,7 +3029,7 @@ calc_buf_pool_size:
ut_2_power_up(buf_pool->curr_size /
BUF_READ_AHEAD_PORTION));
buf_pool->curr_pool_size
- = buf_pool->curr_size << srv_page_size_shift;
+ = buf_pool->n_chunks * srv_buf_pool_chunk_unit;
curr_size += buf_pool->curr_pool_size;
buf_pool->old_size = buf_pool->curr_size;
}
@@ -3467,7 +3462,7 @@ page_found:
}
/* Add to an existing watch. */
- buf_block_fix(bpage);
+ bpage->fix();
return(NULL);
}
@@ -3607,7 +3602,7 @@ void buf_pool_watch_unset(const page_id_t page_id)
increments buf_fix_count. */
bpage = buf_page_hash_get_low(buf_pool, page_id);
- if (buf_block_unfix(bpage) == 0
+ if (bpage->unfix() == 0
&& buf_pool_watch_is_sentinel(buf_pool, bpage)) {
buf_pool_watch_remove(buf_pool, bpage);
}
@@ -3774,12 +3769,9 @@ be implemented at a higher level. In other words, all possible
accesses to a given page through this function must be protected by
the same set of mutexes or latches.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size
@return pointer to the block */
-buf_page_t*
-buf_page_get_zip(
- const page_id_t page_id,
- const page_size_t& page_size)
+buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size)
{
buf_page_t* bpage;
BPageMutex* block_mutex;
@@ -3788,6 +3780,8 @@ buf_page_get_zip(
ibool must_read;
buf_pool_t* buf_pool = buf_pool_get(page_id);
+ ut_ad(zip_size);
+ ut_ad(ut_is_2pow(zip_size));
buf_pool->stat.n_page_gets++;
for (;;) {
@@ -3805,7 +3799,7 @@ lookup:
/* Page not in buf_pool: needs to be read from file */
ut_ad(!hash_lock);
- dberr_t err = buf_read_page(page_id, page_size);
+ dberr_t err = buf_read_page(page_id, zip_size);
if (err != DB_SUCCESS) {
ib::error() << "Reading compressed page " << page_id
@@ -3833,7 +3827,7 @@ err_exit:
switch (buf_page_get_state(bpage)) {
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
- buf_block_fix(bpage);
+ bpage->fix();
block_mutex = &buf_pool->zip_mutex;
goto got_block;
case BUF_BLOCK_FILE_PAGE:
@@ -3944,7 +3938,7 @@ buf_zip_decompress(
&& (!crypt_data->is_default_encryption()
|| srv_encrypt_tables);
- ut_ad(block->page.size.is_compressed());
+ ut_ad(block->zip_size());
ut_a(block->page.id.space() != 0);
if (UNIV_UNLIKELY(check && !page_zip_verify_checksum(frame, size))) {
@@ -3956,19 +3950,12 @@ buf_zip_decompress(
<< ", crc32: "
<< page_zip_calc_checksum(
frame, size, SRV_CHECKSUM_ALGORITHM_CRC32)
-#ifdef INNODB_BUG_ENDIAN_CRC32
- << "/"
- << page_zip_calc_checksum(
- frame, size, SRV_CHECKSUM_ALGORITHM_CRC32,
- true)
-#endif
<< " innodb: "
<< page_zip_calc_checksum(
frame, size, SRV_CHECKSUM_ALGORITHM_INNODB)
<< ", none: "
<< page_zip_calc_checksum(
frame, size, SRV_CHECKSUM_ALGORITHM_NONE);
-
goto err_exit;
}
@@ -3995,7 +3982,7 @@ buf_zip_decompress(
case FIL_PAGE_TYPE_ZBLOB:
case FIL_PAGE_TYPE_ZBLOB2:
/* Copy to uncompressed storage. */
- memcpy(block->frame, frame, block->page.size.physical());
+ memcpy(block->frame, frame, block->zip_size());
if (space) {
space->release_for_io();
}
@@ -4212,6 +4199,7 @@ buf_wait_for_read(
/** This is the general function used to get access to a database page.
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
@@ -4219,11 +4207,12 @@ BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
@param[in] file file name
@param[in] line line where called
@param[in] mtr mini-transaction
+@param[out] err DB_SUCCESS or error code
@return pointer to the block or NULL */
buf_block_t*
buf_page_get_gen(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint rw_latch,
buf_block_t* guess,
ulint mode,
@@ -4271,16 +4260,15 @@ buf_page_get_gen(
case BUF_GET:
case BUF_GET_IF_IN_POOL_OR_WATCH:
case BUF_GET_POSSIBLY_FREED:
- bool found;
- const page_size_t& space_page_size
- = fil_space_get_page_size(page_id.space(), &found);
- ut_ad(found);
- ut_ad(page_size.equals_to(space_page_size));
+ fil_space_t* s = fil_space_acquire_for_io(page_id.space());
+ ut_ad(s);
+ ut_ad(s->zip_size() == zip_size);
+ s->release_for_io();
}
#endif /* UNIV_DEBUG */
ut_ad(!mtr || !ibuf_inside(mtr)
- || ibuf_page_low(page_id, page_size, FALSE, file, line, NULL));
+ || ibuf_page_low(page_id, zip_size, FALSE, file, line, NULL));
buf_pool->stat.n_page_gets++;
hash_lock = buf_page_hash_lock_get(buf_pool, page_id);
@@ -4353,10 +4341,10 @@ loop:
= buf_page_get_mutex(
&fix_block->page);
mutex_enter(fix_mutex);
- buf_block_fix(fix_block);
+ fix_block->fix();
mutex_exit(fix_mutex);
} else {
- buf_block_fix(fix_block);
+ fix_block->fix();
}
/* Now safe to release page_hash mutex */
@@ -4389,10 +4377,10 @@ loop:
corrupted, or if an encrypted page with a valid
checksum cannot be decypted. */
- dberr_t local_err = buf_read_page(page_id, page_size);
+ dberr_t local_err = buf_read_page(page_id, zip_size);
if (local_err == DB_SUCCESS) {
- buf_read_ahead_random(page_id, page_size,
+ buf_read_ahead_random(page_id, zip_size,
ibuf_inside(mtr));
retries = 0;
@@ -4463,18 +4451,20 @@ loop:
BPageMutex* fix_mutex = buf_page_get_mutex(
&fix_block->page);
mutex_enter(fix_mutex);
- buf_block_fix(fix_block);
+ fix_block->fix();
mutex_exit(fix_mutex);
} else {
- buf_block_fix(fix_block);
+ fix_block->fix();
}
/* Now safe to release page_hash mutex */
rw_lock_s_unlock(hash_lock);
got_block:
-
switch (mode) {
+ default:
+ ut_ad(block->zip_size() == zip_size);
+ break;
case BUF_GET_IF_IN_POOL:
case BUF_PEEK_IF_IN_POOL:
case BUF_EVICT_IF_IN_POOL:
@@ -4489,7 +4479,7 @@ got_block:
/* The page is being read to buffer pool,
but we cannot wait around for the read to
complete. */
- buf_block_unfix(fix_block);
+ fix_block->unfix();
return(NULL);
}
@@ -4505,7 +4495,7 @@ got_block:
/* This suggests that the page is being flushed.
Avoid returning reference to this page.
Instead wait for the flush action to complete. */
- buf_block_unfix(fix_block);
+ fix_block->unfix();
os_thread_sleep(WAIT_FOR_WRITE);
goto loop;
}
@@ -4514,7 +4504,7 @@ got_block:
evict_from_pool:
ut_ad(!fix_block->page.oldest_modification);
buf_pool_mutex_enter(buf_pool);
- buf_block_unfix(fix_block);
+ fix_block->unfix();
if (!buf_LRU_free_page(&fix_block->page, true)) {
ut_ad(0);
@@ -4532,7 +4522,7 @@ evict_from_pool:
adaptive hash index. There cannot be an
adaptive hash index for a compressed-only
page, so do not bother decompressing the page. */
- buf_block_unfix(fix_block);
+ fix_block->unfix();
return(NULL);
}
@@ -4546,7 +4536,7 @@ evict_from_pool:
/* This condition often occurs when the buffer
is not buffer-fixed, but I/O-fixed by
buf_page_init_for_read(). */
- buf_block_unfix(fix_block);
+ fix_block->unfix();
/* The block is buffer-fixed or I/O-fixed.
Try again later. */
@@ -4575,7 +4565,7 @@ evict_from_pool:
/* Buffer-fixing prevents the page_hash from changing. */
ut_ad(bpage == buf_page_hash_get_low(buf_pool, page_id));
- buf_block_unfix(fix_block);
+ fix_block->unfix();
buf_page_mutex_enter(block);
mutex_enter(&buf_pool->zip_mutex);
@@ -4667,7 +4657,7 @@ evict_from_pool:
buf_page_mutex_exit(fix_block);
--buf_pool->n_pend_unzip;
- buf_block_unfix(fix_block);
+ fix_block->unfix();
buf_pool_mutex_exit(buf_pool);
rw_lock_x_unlock(&fix_block->lock);
@@ -4680,7 +4670,7 @@ evict_from_pool:
if (!access_time && !recv_no_ibuf_operations) {
ibuf_merge_or_delete_for_page(
- block, page_id, &page_size, TRUE);
+ block, block->page.id, zip_size, true);
}
buf_pool_mutex_enter(buf_pool);
@@ -4726,7 +4716,7 @@ evict_from_pool:
buf_pool_mutex_enter(buf_pool);
- buf_block_unfix(fix_block);
+ fix_block->unfix();
/* Now we are only holding the buf_pool->mutex,
not block->mutex or hash_lock. Blocks cannot be
@@ -4785,7 +4775,7 @@ evict_from_pool:
buf_page_mutex_exit(fix_block);
- buf_block_fix(fix_block);
+ fix_block->fix();
/* Failed to evict the page; change it directly */
@@ -4803,7 +4793,7 @@ evict_from_pool:
if (!fsp_is_system_temporary(page_id.space())) {
ibool ret;
ret = rw_lock_s_lock_nowait(
- &fix_block->debug_latch, file, line);
+ fix_block->debug_latch, file, line);
ut_a(ret);
}
#endif /* UNIV_DEBUG */
@@ -4879,7 +4869,7 @@ evict_from_pool:
/* In the case of a first access, try to apply linear
read-ahead */
- buf_read_ahead_linear(page_id, page_size, ibuf_inside(mtr));
+ buf_read_ahead_linear(page_id, zip_size, ibuf_inside(mtr));
}
ut_ad(!rw_lock_own_flagged(hash_lock,
@@ -4931,7 +4921,7 @@ buf_page_optimistic_get(
buf_page_make_young_if_needed(&block->page);
ut_ad(!ibuf_inside(mtr)
- || ibuf_page(block->page.id, block->page.size, NULL));
+ || ibuf_page(block->page.id, block->zip_size(), NULL));
mtr_memo_type_t fix_type;
@@ -4985,7 +4975,7 @@ buf_page_optimistic_get(
if (!access_time) {
/* In the case of a first access, try to apply linear
read-ahead */
- buf_read_ahead_linear(block->page.id, block->page.size,
+ buf_read_ahead_linear(block->page.id, block->zip_size(),
ibuf_inside(mtr));
}
@@ -5208,13 +5198,14 @@ buf_page_init_low(
/** Inits a page to the buffer buf_pool.
@param[in,out] buf_pool buffer pool
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] block block to init */
static
void
buf_page_init(
buf_pool_t* buf_pool,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
buf_block_t* block)
{
buf_page_t* hash_page;
@@ -5259,7 +5250,7 @@ buf_page_init(
ut_a(buf_fix_count > 0);
- my_atomic_add32((int32*) &block->page.buf_fix_count, buf_fix_count);
+ block->page.buf_fix_count += buf_fix_count;
buf_pool_watch_remove(buf_pool, hash_page);
} else {
@@ -5282,14 +5273,11 @@ buf_page_init(
ut_d(block->page.in_page_hash = TRUE);
block->page.id = page_id;
- block->page.size.copy_from(page_size);
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash,
page_id.fold(), &block->page);
- if (page_size.is_compressed()) {
- page_zip_set_size(&block->page.zip, page_size.physical());
- }
+ page_zip_set_size(&block->page.zip, zip_size);
}
/** Initialize a page for read to the buffer buf_pool. If the page is
@@ -5303,6 +5291,7 @@ and the lock released later.
@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED
@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ...
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] unzip whether the uncompressed page is
requested (for ROW_FORMAT=COMPRESSED)
@return pointer to the block
@@ -5312,7 +5301,7 @@ buf_page_init_for_read(
dberr_t* err,
ulint mode,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
bool unzip)
{
buf_block_t* block;
@@ -5331,12 +5320,12 @@ buf_page_init_for_read(
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
/* It is a read-ahead within an ibuf routine */
- ut_ad(!ibuf_bitmap_page(page_id, page_size));
+ ut_ad(!ibuf_bitmap_page(page_id, zip_size));
ibuf_mtr_start(&mtr);
- if (!recv_no_ibuf_operations &&
- !ibuf_page(page_id, page_size, &mtr)) {
+ if (!recv_no_ibuf_operations
+ && !ibuf_page(page_id, zip_size, &mtr)) {
ibuf_mtr_commit(&mtr);
@@ -5346,7 +5335,7 @@ buf_page_init_for_read(
ut_ad(mode == BUF_READ_ANY_PAGE);
}
- if (page_size.is_compressed() && !unzip && !recv_recovery_is_on()) {
+ if (zip_size && !unzip && !recv_recovery_is_on()) {
block = NULL;
} else {
block = buf_LRU_get_free_block(buf_pool);
@@ -5381,7 +5370,7 @@ buf_page_init_for_read(
ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
- buf_page_init(buf_pool, page_id, page_size, block);
+ buf_page_init(buf_pool, page_id, zip_size, block);
/* Note: We are using the hash_lock for protection. This is
safe because no other thread can lookup the block from the
@@ -5405,7 +5394,7 @@ buf_page_init_for_read(
rw_lock_x_lock_gen(&block->lock, BUF_IO_READ);
- if (page_size.is_compressed()) {
+ if (zip_size) {
/* buf_pool->mutex may be released and
reacquired by buf_buddy_alloc(). Thus, we
must release block->mutex in order not to
@@ -5415,8 +5404,7 @@ buf_page_init_for_read(
been added to buf_pool->LRU and
buf_pool->page_hash. */
buf_page_mutex_exit(block);
- data = buf_buddy_alloc(buf_pool, page_size.physical(),
- &lru);
+ data = buf_buddy_alloc(buf_pool, zip_size, &lru);
buf_page_mutex_enter(block);
block->page.zip.data = (page_zip_t*) data;
@@ -5437,7 +5425,7 @@ buf_page_init_for_read(
control block (bpage), in order to avoid the
invocation of buf_buddy_relocate_block() on
uninitialized data. */
- data = buf_buddy_alloc(buf_pool, page_size.physical(), &lru);
+ data = buf_buddy_alloc(buf_pool, zip_size, &lru);
rw_lock_x_lock(hash_lock);
@@ -5455,8 +5443,7 @@ buf_page_init_for_read(
/* The block was added by some other thread. */
rw_lock_x_unlock(hash_lock);
watch_page = NULL;
- buf_buddy_free(buf_pool, data,
- page_size.physical());
+ buf_buddy_free(buf_pool, data, zip_size);
bpage = NULL;
goto func_exit;
@@ -5469,13 +5456,11 @@ buf_page_init_for_read(
bpage->buf_pool_index = buf_pool_index(buf_pool);
page_zip_des_init(&bpage->zip);
- page_zip_set_size(&bpage->zip, page_size.physical());
+ page_zip_set_size(&bpage->zip, zip_size);
bpage->zip.data = (page_zip_t*) data;
- bpage->size.copy_from(page_size);
-
mutex_enter(&buf_pool->zip_mutex);
- UNIV_MEM_DESC(bpage->zip.data, bpage->size.physical());
+ UNIV_MEM_DESC(bpage->zip.data, zip_size);
buf_page_init_low(bpage);
@@ -5500,7 +5485,7 @@ buf_page_init_for_read(
ut_a(buf_fix_count > 0);
- my_atomic_add32((int32*) &bpage->buf_fix_count, buf_fix_count);
+ bpage->buf_fix_count += buf_fix_count;
ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page));
buf_pool_watch_remove(buf_pool, watch_page);
@@ -5539,18 +5524,18 @@ func_exit:
return(bpage);
}
-/** Initializes a page to the buffer buf_pool. The page is usually not read
+/** Initialize a page in the buffer pool. The page is usually not read
from a file even if it cannot be found in the buffer buf_pool. This is one
of the functions which perform to a block a state transition NOT_USED =>
FILE_PAGE (the other is buf_page_get_gen).
@param[in] page_id page id
-@param[in] page_size page size
-@param[in] mtr mini-transaction
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in,out] mtr mini-transaction
@return pointer to the block, page bufferfixed */
buf_block_t*
buf_page_create(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
mtr_t* mtr)
{
buf_frame_t* frame;
@@ -5560,7 +5545,7 @@ buf_page_create(
rw_lock_t* hash_lock;
ut_ad(mtr->is_active());
- ut_ad(page_id.space() != 0 || !page_size.is_compressed());
+ ut_ad(page_id.space() != 0 || !zip_size);
free_block = buf_LRU_get_free_block(buf_pool);
@@ -5583,12 +5568,12 @@ buf_page_create(
buf_block_free(free_block);
if (!recv_recovery_is_on()) {
- return buf_page_get_with_no_latch(page_id, page_size,
+ return buf_page_get_with_no_latch(page_id, zip_size,
mtr);
}
mutex_exit(&recv_sys->mutex);
- block = buf_page_get_with_no_latch(page_id, page_size, mtr);
+ block = buf_page_get_with_no_latch(page_id, zip_size, mtr);
mutex_enter(&recv_sys->mutex);
return block;
}
@@ -5602,7 +5587,7 @@ buf_page_create(
buf_page_mutex_enter(block);
- buf_page_init(buf_pool, page_id, page_size, block);
+ buf_page_init(buf_pool, page_id, zip_size, block);
rw_lock_x_unlock(hash_lock);
@@ -5612,7 +5597,7 @@ buf_page_create(
buf_block_buf_fix_inc(block, __FILE__, __LINE__);
buf_pool->stat.n_pages_created++;
- if (page_size.is_compressed()) {
+ if (zip_size) {
void* data;
bool lru;
@@ -5630,7 +5615,7 @@ buf_page_create(
the reacquisition of buf_pool->mutex. We also must
defer this operation until after the block descriptor
has been added to buf_pool->LRU and buf_pool->page_hash. */
- data = buf_buddy_alloc(buf_pool, page_size.physical(), &lru);
+ data = buf_buddy_alloc(buf_pool, zip_size, &lru);
buf_page_mutex_enter(block);
block->page.zip.data = (page_zip_t*) data;
@@ -5657,7 +5642,7 @@ buf_page_create(
/* Delete possible entries for the page from the insert buffer:
such can exist if the page belonged to an index which was dropped */
if (!recv_recovery_is_on()) {
- ibuf_merge_or_delete_for_page(NULL, page_id, &page_size, TRUE);
+ ibuf_merge_or_delete_for_page(NULL, page_id, zip_size, true);
}
frame = block->frame;
@@ -5835,6 +5820,29 @@ buf_mark_space_corrupt(buf_page_t* bpage, const fil_space_t* space)
buf_pool_mutex_exit(buf_pool);
}
+/** Check if the encrypted page is corrupted for the full crc32 format.
+@param[in] space_id page belongs to space id
+@param[in] dst_frame page
+@param[in] is_compressed compressed page
+@return true if page is corrupted or false if it isn't */
+static bool buf_page_full_crc32_is_corrupted(
+ ulint space_id,
+ const byte* dst_frame,
+ bool is_compressed)
+{
+ if (!is_compressed
+ && memcmp(dst_frame + FIL_PAGE_LSN + 4,
+ dst_frame + srv_page_size - FIL_PAGE_FCRC32_END_LSN, 4)) {
+ return true;
+ }
+
+ if (space_id != mach_read_from_4(dst_frame + FIL_PAGE_SPACE_ID)) {
+ return true;
+ }
+
+ return false;
+}
+
/** Check if page is maybe compressed, encrypted or both when we encounter
corrupted page. Note that we can't be 100% sure if page is corrupted
or decrypt/decompress just failed.
@@ -5854,6 +5862,7 @@ static dberr_t buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space)
((buf_block_t*) bpage)->frame;
dberr_t err = DB_SUCCESS;
bool corrupted = false;
+ uint key_version = buf_page_get_key_version(dst_frame, space->flags);
/* In buf_decrypt_after_read we have either decrypted the page if
page post encryption checksum matches and used key_id is found
@@ -5861,18 +5870,26 @@ static dberr_t buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space)
not decrypted and it could be either encrypted and corrupted
or corrupted or good page. If we decrypted, there page could
still be corrupted if used key does not match. */
- const bool still_encrypted = mach_read_from_4(
- dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION)
+ const bool still_encrypted = key_version
&& space->crypt_data
&& space->crypt_data->type != CRYPT_SCHEME_UNENCRYPTED
&& !bpage->encrypted
- && fil_space_verify_crypt_checksum(dst_frame, bpage->size);
+ && fil_space_verify_crypt_checksum(dst_frame,
+ bpage->zip_size());
if (!still_encrypted) {
/* If traditional checksums match, we assume that page is
not anymore encrypted. */
- corrupted = buf_page_is_corrupted(
- true, dst_frame, bpage->size, space);
+ if (space->full_crc32()
+ && !buf_page_is_zeroes(dst_frame, space->physical_size())
+ && (key_version || space->is_compressed())) {
+ corrupted = buf_page_full_crc32_is_corrupted(
+ space->id, dst_frame,
+ space->is_compressed());
+ } else {
+ corrupted = buf_page_is_corrupted(
+ true, dst_frame, space->flags);
+ }
if (!corrupted) {
bpage->encrypted = false;
@@ -5897,8 +5914,7 @@ static dberr_t buf_page_check_corrupt(buf_page_t* bpage, fil_space_t* space)
ib::info()
<< "However key management plugin or used key_version "
- << mach_read_from_4(dst_frame
- + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION)
+ << key_version
<< " is not found or"
" used encryption algorithm or method does not match.";
@@ -5943,7 +5959,7 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict)
io_type = buf_page_get_io_fix(bpage);
ut_ad(io_type == BUF_IO_READ || io_type == BUF_IO_WRITE);
- ut_ad(bpage->size.is_compressed() == (bpage->zip.data != NULL));
+ ut_ad(!!bpage->zip.ssize == (bpage->zip.data != NULL));
ut_ad(uncompressed || bpage->zip.data);
if (io_type == BUF_IO_READ) {
@@ -5968,10 +5984,10 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict)
}
if (bpage->zip.data && uncompressed) {
- my_atomic_addlint(&buf_pool->n_pend_unzip, 1);
+ buf_pool->n_pend_unzip++;
ibool ok = buf_zip_decompress((buf_block_t*) bpage,
FALSE);
- my_atomic_addlint(&buf_pool->n_pend_unzip, ulint(-1));
+ buf_pool->n_pend_unzip--;
if (!ok) {
ib::info() << "Page "
@@ -5989,8 +6005,7 @@ buf_page_io_complete(buf_page_t* bpage, bool dblwr, bool evict)
read_page_no = mach_read_from_4(frame + FIL_PAGE_OFFSET);
read_space_id = mach_read_from_4(
frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
- key_version = mach_read_from_4(
- frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ key_version = buf_page_get_key_version(frame, space->flags);
if (bpage->id.space() == TRX_SYS_SPACE
&& buf_dblwr_page_inside(bpage->id.page_no())) {
@@ -6043,7 +6058,7 @@ database_corrupted:
<< ". You may have to recover from "
<< "a backup.";
- buf_page_print(frame, bpage->size);
+ buf_page_print(frame, bpage->zip_size());
ib::info()
<< "It is also possible that your"
@@ -6088,7 +6103,6 @@ database_corrupted:
&& !recv_no_ibuf_operations
&& (bpage->id.space() == 0
|| !is_predefined_tablespace(bpage->id.space()))
- && !srv_is_tablespace_truncated(bpage->id.space())
&& fil_page_get_type(frame) == FIL_PAGE_INDEX
&& page_is_leaf(frame)) {
@@ -6106,7 +6120,7 @@ database_corrupted:
ibuf_merge_or_delete_for_page(
(buf_block_t*) bpage, bpage->id,
- &bpage->size, TRUE);
+ bpage->zip_size(), true);
}
}
@@ -7202,6 +7216,21 @@ buf_all_freed(void)
return(TRUE);
}
+/** Verify that post encryption checksum match with the calculated checksum.
+This function should be called only if tablespace contains crypt data metadata.
+@param[in] page page frame
+@param[in] fsp_flags tablespace flags
+@return true if true if page is encrypted and OK, false otherwise */
+bool buf_page_verify_crypt_checksum(const byte* page, ulint fsp_flags)
+{
+ if (!fil_space_t::full_crc32(fsp_flags)) {
+ return fil_space_verify_crypt_checksum(
+ page, fil_space_t::zip_size(fsp_flags));
+ }
+
+ return !buf_page_is_corrupted(true, page, fsp_flags);
+}
+
/*********************************************************************//**
Checks that there currently are no pending i/o-operations for the buffer
pool.
@@ -7283,7 +7312,7 @@ a page is written to disk.
(may be src_frame or an encrypted/compressed copy of it) */
UNIV_INTERN
byte*
-buf_page_encrypt_before_write(
+buf_page_encrypt(
fil_space_t* space,
buf_page_t* bpage,
byte* src_frame)
@@ -7291,7 +7320,7 @@ buf_page_encrypt_before_write(
ut_ad(space->id == bpage->id.space());
bpage->real_size = srv_page_size;
- fil_page_type_validate(src_frame);
+ ut_d(fil_page_type_validate(space, src_frame));
switch (bpage->id.page_no()) {
case 0:
@@ -7313,16 +7342,22 @@ buf_page_encrypt_before_write(
&& (!crypt_data->is_default_encryption()
|| srv_encrypt_tables);
- bool page_compressed = FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags);
+ bool page_compressed = space->is_compressed();
if (!encrypted && !page_compressed) {
/* No need to encrypt or page compress the page.
Clear key-version & crypt-checksum. */
- memset(src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
+ if (space->full_crc32()) {
+ memset(src_frame + FIL_PAGE_FCRC32_KEY_VERSION, 0, 4);
+ } else {
+ memset(src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ 0, 8);
+ }
+
return src_frame;
}
- ut_ad(!bpage->size.is_compressed() || !page_compressed);
+ ut_ad(!bpage->zip_size() || !page_compressed);
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
/* Find free slot from temporary memory array */
buf_tmp_buffer_t* slot = buf_pool_reserve_tmp_slot(buf_pool);
@@ -7331,6 +7366,19 @@ buf_page_encrypt_before_write(
buf_tmp_reserve_crypt_buf(slot);
byte *dst_frame = slot->crypt_buf;
+ const bool full_crc32 = space->full_crc32();
+
+ if (full_crc32) {
+ /* Write LSN for the full crc32 checksum before
+ encryption. Because lsn is one of the input for encryption. */
+ mach_write_to_8(src_frame + FIL_PAGE_LSN,
+ bpage->newest_modification);
+ if (!page_compressed) {
+ mach_write_to_4(
+ src_frame + srv_page_size - FIL_PAGE_FCRC32_END_LSN,
+ (ulint) bpage->newest_modification);
+ }
+ }
if (!page_compressed) {
not_compressed:
@@ -7344,25 +7392,37 @@ not_compressed:
bpage->real_size = srv_page_size;
slot->out_buf = dst_frame = tmp;
- ut_d(fil_page_type_validate(tmp));
+ ut_d(fil_page_type_validate(space, tmp));
} else {
/* First we compress the page content */
buf_tmp_reserve_compression_buf(slot);
byte* tmp = slot->comp_buf;
ulint out_len = fil_page_compress(
- src_frame, tmp,
- fsp_flags_get_page_compression_level(space->flags),
+ src_frame, tmp, space->flags,
fil_space_get_block_size(space, bpage->id.page_no()),
encrypted);
+
if (!out_len) {
goto not_compressed;
}
bpage->real_size = out_len;
+ if (full_crc32) {
+ ut_d(bool compressed = false);
+ out_len = buf_page_full_crc32_size(tmp,
+#ifdef UNIV_DEBUG
+ &compressed,
+#else
+ NULL,
+#endif
+ NULL);
+ ut_ad(compressed);
+ }
+
/* Workaround for MDEV-15527. */
memset(tmp + out_len, 0 , srv_page_size - out_len);
- ut_d(fil_page_type_validate(tmp));
+ ut_d(fil_page_type_validate(space, tmp));
if (encrypted) {
/* And then we encrypt the page content */
@@ -7373,10 +7433,17 @@ not_compressed:
dst_frame);
}
+ if (full_crc32) {
+ compile_time_assert(FIL_PAGE_FCRC32_CHECKSUM == 4);
+ mach_write_to_4(tmp + out_len - 4,
+ ut_crc32(tmp, out_len - 4));
+ ut_ad(!buf_page_is_corrupted(true, tmp, space->flags));
+ }
+
slot->out_buf = dst_frame = tmp;
}
- ut_d(fil_page_type_validate(dst_frame));
+ ut_d(fil_page_type_validate(space, dst_frame));
// return dst_frame which will be written
return dst_frame;
@@ -7390,7 +7457,7 @@ bool
buf_page_should_punch_hole(
const buf_page_t* bpage)
{
- return (bpage->real_size != bpage->size.physical());
+ return bpage->real_size != bpage->physical_size();
}
/**
@@ -7403,6 +7470,6 @@ buf_page_get_trim_length(
const buf_page_t* bpage,
ulint write_length)
{
- return (bpage->size.physical() - write_length);
+ return bpage->physical_size() - write_length;
}
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/buf/buf0checksum.cc b/storage/innobase/buf/buf0checksum.cc
index 6c103c4f44b..c5ad0cfb657 100644
--- a/storage/innobase/buf/buf0checksum.cc
+++ b/storage/innobase/buf/buf0checksum.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,35 +39,6 @@ ha_innodb.cc:12251: error: cannot convert 'srv_checksum_algorithm_t*' to
'long unsigned int*' in initialization */
ulong srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_INNODB;
-#ifdef INNODB_BUG_ENDIAN_CRC32
-/** Calculate the CRC32 checksum of a page. The value is stored to the page
-when it is written to a file and also checked for a match when reading from
-the file. Note that we must be careful to calculate the same value on all
-architectures.
-@param[in] page buffer page (srv_page_size bytes)
-@param[in] bug_endian whether to use big endian byteorder
-when converting byte strings to integers, for bug-compatibility with
-big-endian architecture running MySQL 5.6, MariaDB 10.0 or MariaDB 10.1
-@return CRC-32C */
-uint32_t buf_calc_page_crc32(const byte* page, bool bug_endian)
-{
- return bug_endian
- ? ut_crc32_legacy_big_endian(
- page + FIL_PAGE_OFFSET,
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- - FIL_PAGE_OFFSET)
- ^ ut_crc32_legacy_big_endian(page + FIL_PAGE_DATA,
- srv_page_size
- - (FIL_PAGE_DATA
- + FIL_PAGE_END_LSN_OLD_CHKSUM))
- : ut_crc32(page + FIL_PAGE_OFFSET,
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- - FIL_PAGE_OFFSET)
- ^ ut_crc32(page + FIL_PAGE_DATA,
- srv_page_size
- - (FIL_PAGE_DATA + FIL_PAGE_END_LSN_OLD_CHKSUM));
-}
-#else
/** Calculate the CRC32 checksum of a page. The value is stored to the page
when it is written to a file and also checked for a match when reading from
the file. Note that we must be careful to calculate the same value on all
@@ -88,7 +59,6 @@ uint32_t buf_calc_page_crc32(const byte* page)
srv_page_size
- (FIL_PAGE_DATA + FIL_PAGE_END_LSN_OLD_CHKSUM));
}
-#endif
/** Calculate a checksum which is stored to the page when it is written
to a file. Note that we must be careful to calculate the same value on
@@ -151,6 +121,10 @@ buf_checksum_algorithm_name(srv_checksum_algorithm_t algo)
return("none");
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
return("strict_none");
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ return("full_crc32");
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ return("strict_full_crc32");
}
ut_error;
diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc
index 74d7c6ab475..fb3d4d96003 100644
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2018, MariaDB Corporation.
+Copyright (c) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -85,7 +85,7 @@ buf_dblwr_get(
buf_block_t* block;
block = buf_page_get(page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
@@ -335,20 +335,6 @@ too_small:
goto start_again;
}
-/** Check if a page is all zeroes.
-@param[in] read_buf database page
-@param[in] page_size page frame size
-@return whether the page is all zeroes */
-static bool buf_page_is_zeroes(const byte* read_buf, size_t page_size)
-{
- for (ulint i = 0; i < page_size; i++) {
- if (read_buf[i] != 0) {
- return false;
- }
- }
- return true;
-}
-
/**
At database startup initializes the doublewrite buffer memory structure if
we already have a doublewrite buffer created in the data files. If we are
@@ -477,6 +463,7 @@ buf_dblwr_init_or_load_pages(
page = buf;
for (ulint i = 0; i < TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * 2; i++) {
+
if (reset_space_ids) {
ulint source_page_no;
@@ -569,12 +556,9 @@ buf_dblwr_process()
if (page_no >= space->size) {
- /* Do not report the warning if the tablespace
- is scheduled for truncation or was truncated
- and we have parsed an MLOG_TRUNCATE record. */
- if (!srv_is_tablespace_truncated(space_id)
- && !srv_was_tablespace_truncated(space)
- && !srv_is_undo_tablespace(space_id)) {
+ /* Do not report the warning for undo
+ tablespaces, because they can be truncated in place. */
+ if (!srv_is_undo_tablespace(space_id)) {
ib::warn() << "A copy of page " << page_id
<< " in the doublewrite buffer slot "
<< page_no_dblwr
@@ -583,12 +567,13 @@ buf_dblwr_process()
continue;
}
- const page_size_t page_size(space->flags);
- ut_ad(!buf_page_is_zeroes(page, page_size.physical()));
+ const ulint physical_size = space->physical_size();
+ const ulint zip_size = space->zip_size();
+ ut_ad(!buf_page_is_zeroes(page, physical_size));
/* We want to ensure that for partial reads the
unread portion of the page is NUL. */
- memset(read_buf, 0x0, page_size.physical());
+ memset(read_buf, 0x0, physical_size);
IORequest request;
@@ -597,8 +582,8 @@ buf_dblwr_process()
/* Read in the actual page from the file */
dberr_t err = fil_io(
request, true,
- page_id, page_size,
- 0, page_size.physical(), read_buf, NULL);
+ page_id, zip_size,
+ 0, physical_size, read_buf, NULL);
if (err != DB_SUCCESS) {
ib::warn()
@@ -608,9 +593,10 @@ buf_dblwr_process()
}
const bool is_all_zero = buf_page_is_zeroes(
- read_buf, page_size.physical());
+ read_buf, physical_size);
const bool expect_encrypted = space->crypt_data
&& space->crypt_data->type != CRYPT_SCHEME_UNENCRYPTED;
+ bool is_corrupted = false;
if (is_all_zero) {
/* We will check if the copy in the
@@ -620,19 +606,22 @@ buf_dblwr_process()
} else {
/* Decompress the page before
validating the checksum. */
- ulint decomp = fil_page_decompress(buf, read_buf);
- if (!decomp || (decomp != srv_page_size
- && page_size.is_compressed())) {
+ ulint decomp = fil_page_decompress(buf, read_buf,
+ space->flags);
+ if (!decomp || (zip_size && decomp != srv_page_size)) {
goto bad;
}
- if (expect_encrypted && mach_read_from_4(
- read_buf
- + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION)
- ? fil_space_verify_crypt_checksum(read_buf,
- page_size)
- : !buf_page_is_corrupted(true, read_buf,
- page_size, space)) {
+ if (expect_encrypted
+ && buf_page_get_key_version(read_buf, space->flags)) {
+ is_corrupted = !buf_page_verify_crypt_checksum(
+ read_buf, space->flags);
+ } else {
+ is_corrupted = buf_page_is_corrupted(
+ true, read_buf, space->flags);
+ }
+
+ if (!is_corrupted) {
/* The page is good; there is no need
to consult the doublewrite buffer. */
continue;
@@ -646,16 +635,21 @@ bad:
<< " from the doublewrite buffer.";
}
- ulint decomp = fil_page_decompress(buf, page);
- if (!decomp || (decomp != srv_page_size
- && page_size.is_compressed())) {
+ ulint decomp = fil_page_decompress(buf, page, space->flags);
+ if (!decomp || (zip_size && decomp != srv_page_size)) {
goto bad_doublewrite;
}
- if (expect_encrypted && mach_read_from_4(
- page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION)
- ? !fil_space_verify_crypt_checksum(page, page_size)
- : buf_page_is_corrupted(true, page, page_size, space)) {
+ if (expect_encrypted
+ && buf_page_get_key_version(read_buf, space->flags)) {
+ is_corrupted = !buf_page_verify_crypt_checksum(
+ page, space->flags);
+ } else {
+ is_corrupted = buf_page_is_corrupted(
+ true, page, space->flags);
+ }
+
+ if (is_corrupted) {
if (!is_all_zero) {
bad_doublewrite:
ib::warn() << "A doublewrite copy of page "
@@ -672,7 +666,7 @@ bad_doublewrite:
if (page_no == 0) {
/* Check the FSP_SPACE_FLAGS. */
ulint flags = fsp_header_get_flags(page);
- if (!fsp_flags_is_valid(flags, space_id)
+ if (!fil_space_t::is_valid_flags(flags, space_id)
&& fsp_flags_convert_from_101(flags)
== ULINT_UNDEFINED) {
ib::warn() << "Ignoring a doublewrite copy"
@@ -689,8 +683,8 @@ bad_doublewrite:
IORequest write_request(IORequest::WRITE);
- fil_io(write_request, true, page_id, page_size,
- 0, page_size.physical(),
+ fil_io(write_request, true, page_id, zip_size,
+ 0, physical_size,
const_cast<byte*>(page), NULL);
ib::info() << "Recovered page " << page_id
@@ -794,40 +788,42 @@ buf_dblwr_update(
}
}
-/********************************************************************//**
-Check the LSN values on the page. */
-static
-void
-buf_dblwr_check_page_lsn(
-/*=====================*/
- const page_t* page) /*!< in: page to check */
+#ifdef UNIV_DEBUG
+/** Check the LSN values on the page.
+@param[in] page page to check
+@param[in] s tablespace */
+static void buf_dblwr_check_page_lsn(const page_t* page, const fil_space_t& s)
{
- ibool page_compressed = (mach_read_from_2(page+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED);
- uint key_version = mach_read_from_4(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
-
/* Ignore page compressed or encrypted pages */
- if (page_compressed || key_version) {
+ if (s.is_compressed()
+ || buf_page_get_key_version(page, s.flags)) {
return;
}
- if (memcmp(page + (FIL_PAGE_LSN + 4),
- page + (srv_page_size
- - FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
- 4)) {
-
- const ulint lsn1 = mach_read_from_4(
- page + FIL_PAGE_LSN + 4);
- const ulint lsn2 = mach_read_from_4(
- page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM
- + 4);
-
- ib::error() << "The page to be written seems corrupt!"
+ const unsigned lsn1 = mach_read_from_4(page + FIL_PAGE_LSN + 4),
+ lsn2 = mach_read_from_4(page + srv_page_size
+ - (s.full_crc32()
+ ? FIL_PAGE_FCRC32_END_LSN
+ : FIL_PAGE_END_LSN_OLD_CHKSUM - 4));
+ if (UNIV_UNLIKELY(lsn1 != lsn2)) {
+ ib::error() << "The page to be written to "
+ << s.chain.start->name <<
+ " seems corrupt!"
" The low 4 bytes of LSN fields do not match"
" (" << lsn1 << " != " << lsn2 << ")!"
" Noticed in the buffer pool.";
}
}
+static void buf_dblwr_check_page_lsn(const buf_page_t& b, const byte* page)
+{
+ if (fil_space_t* space = fil_space_acquire_for_io(b.id.space())) {
+ buf_dblwr_check_page_lsn(page, *space);
+ space->release_for_io();
+ }
+}
+#endif /* UNIV_DEBUG */
+
/********************************************************************//**
Asserts when a corrupt block is find during writing out data to the
disk. */
@@ -837,7 +833,7 @@ buf_dblwr_assert_on_corrupt_block(
/*==============================*/
const buf_block_t* block) /*!< in: block to check */
{
- buf_page_print(block->frame, univ_page_size);
+ buf_page_print(block->frame);
ib::fatal() << "Apparent corruption of an index page "
<< block->page.id
@@ -927,14 +923,14 @@ buf_dblwr_write_block_to_datafile(
void * frame = buf_page_get_frame(bpage);
if (bpage->zip.data != NULL) {
- ut_ad(bpage->size.is_compressed());
+ ut_ad(bpage->zip_size());
- fil_io(request, sync, bpage->id, bpage->size, 0,
- bpage->size.physical(),
+ fil_io(request, sync, bpage->id, bpage->zip_size(), 0,
+ bpage->zip_size(),
(void*) frame,
(void*) bpage);
} else {
- ut_ad(!bpage->size.is_compressed());
+ ut_ad(!bpage->zip_size());
/* Our IO API is common for both reads and writes and is
therefore geared towards a non-const parameter. */
@@ -943,11 +939,10 @@ buf_dblwr_write_block_to_datafile(
const_cast<buf_page_t*>(bpage));
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
- buf_dblwr_check_page_lsn(block->frame);
-
+ ut_d(buf_dblwr_check_page_lsn(block->page, block->frame));
fil_io(request,
- sync, bpage->id, bpage->size, 0, bpage->real_size,
- frame, block);
+ sync, bpage->id, bpage->zip_size(), 0, bpage->real_size,
+ frame, block);
}
}
@@ -1037,10 +1032,7 @@ try_again:
/* Check that the actual page in the buffer pool is
not corrupt and the LSN values are sane. */
buf_dblwr_check_block(block);
-
- /* Check that the page as written to the doublewrite
- buffer has sane LSN values. */
- buf_dblwr_check_page_lsn(write_buf + len2);
+ ut_d(buf_dblwr_check_page_lsn(block->page, write_buf + len2));
}
/* Write out the first block of the doublewrite buffer */
@@ -1048,7 +1040,7 @@ try_again:
buf_dblwr->first_free) << srv_page_size_shift;
fil_io(IORequestWrite, true,
- page_id_t(TRX_SYS_SPACE, buf_dblwr->block1), univ_page_size,
+ page_id_t(TRX_SYS_SPACE, buf_dblwr->block1), 0,
0, len, (void*) write_buf, NULL);
if (buf_dblwr->first_free <= TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
@@ -1064,7 +1056,7 @@ try_again:
+ (TRX_SYS_DOUBLEWRITE_BLOCK_SIZE << srv_page_size_shift);
fil_io(IORequestWrite, true,
- page_id_t(TRX_SYS_SPACE, buf_dblwr->block2), univ_page_size,
+ page_id_t(TRX_SYS_SPACE, buf_dblwr->block2), 0,
0, len, (void*) write_buf, NULL);
flush:
@@ -1149,21 +1141,16 @@ try_again:
encryption and/or page compression */
void * frame = buf_page_get_frame(bpage);
- if (bpage->size.is_compressed()) {
- UNIV_MEM_ASSERT_RW(bpage->zip.data, bpage->size.physical());
+ if (auto zip_size = bpage->zip_size()) {
+ UNIV_MEM_ASSERT_RW(bpage->zip.data, zip_size);
/* Copy the compressed page and clear the rest. */
-
- memcpy(p, frame, bpage->size.physical());
-
- memset(p + bpage->size.physical(), 0x0,
- srv_page_size - bpage->size.physical());
+ memcpy(p, frame, zip_size);
+ memset(p + zip_size, 0x0, srv_page_size - zip_size);
} else {
ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
- UNIV_MEM_ASSERT_RW(frame,
- bpage->size.logical());
-
- memcpy(p, frame, bpage->size.logical());
+ UNIV_MEM_ASSERT_RW(frame, srv_page_size);
+ memcpy(p, frame, srv_page_size);
}
buf_dblwr->buf_block_arr[buf_dblwr->first_free] = bpage;
@@ -1225,8 +1212,8 @@ buf_dblwr_write_single_page(
/* Check that the page as written to the doublewrite
buffer has sane LSN values. */
if (!bpage->zip.data) {
- buf_dblwr_check_page_lsn(
- ((buf_block_t*) bpage)->frame);
+ ut_d(buf_dblwr_check_page_lsn(
+ *bpage, ((buf_block_t*) bpage)->frame));
}
}
@@ -1285,18 +1272,18 @@ retry:
encryption and/or page compression */
void * frame = buf_page_get_frame(bpage);
- if (bpage->size.is_compressed()) {
+ if (auto zip_size = bpage->zip_size()) {
memcpy(buf_dblwr->write_buf + srv_page_size * i,
- frame, bpage->size.physical());
+ frame, zip_size);
memset(buf_dblwr->write_buf + srv_page_size * i
- + bpage->size.physical(), 0x0,
- srv_page_size - bpage->size.physical());
+ + zip_size, 0x0,
+ srv_page_size - zip_size);
fil_io(IORequestWrite,
true,
page_id_t(TRX_SYS_SPACE, offset),
- univ_page_size,
+ 0,
0,
srv_page_size,
(void *)(buf_dblwr->write_buf + srv_page_size * i),
@@ -1307,7 +1294,7 @@ retry:
fil_io(IORequestWrite,
true,
page_id_t(TRX_SYS_SPACE, offset),
- univ_page_size,
+ 0,
0,
srv_page_size,
(void*) frame,
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index 8b7ff88c792..f933bfb81b3 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -672,7 +672,7 @@ buf_load()
so all pages from a given tablespace are consecutive. */
ulint cur_space_id = BUF_DUMP_SPACE(dump[0]);
fil_space_t* space = fil_space_acquire_silent(cur_space_id);
- page_size_t page_size(space ? space->flags : 0);
+ ulint zip_size = space ? space->zip_size() : 0;
/* JAN: TODO: MySQL 5.7 PSI
#ifdef HAVE_PSI_STAGE_INTERFACE
@@ -703,9 +703,7 @@ buf_load()
space = fil_space_acquire_silent(cur_space_id);
if (space != NULL) {
- const page_size_t cur_page_size(
- space->flags);
- page_size.copy_from(cur_page_size);
+ zip_size = space->zip_size();
}
}
@@ -720,7 +718,7 @@ buf_load()
buf_read_page_background(
page_id_t(this_space_id, BUF_DUMP_PAGE(dump[i])),
- page_size, true);
+ zip_size, true);
if (i % 64 == 63) {
os_aio_simulated_wake_handler_threads();
@@ -822,7 +820,7 @@ DECLARE_THREAD(buf_dump_thread)(void*)
if (srv_buffer_pool_load_at_startup) {
#ifdef WITH_WSREP
- if (!wsrep_recovery) {
+ if (!get_wsrep_recovery()) {
#endif /* WITH_WSREP */
buf_load();
#ifdef WITH_WSREP
@@ -856,7 +854,7 @@ DECLARE_THREAD(buf_dump_thread)(void*)
"Dumping of buffer pool not started"
" as load was incomplete");
#ifdef WITH_WSREP
- } else if (wsrep_recovery) {
+ } else if (get_wsrep_recovery()) {
#endif /* WITH_WSREP */
} else {
buf_dump(FALSE/* do complete dump at shutdown */);
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index e3c45863c8b..6e3d1fa7356 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -211,7 +211,7 @@ incr_flush_list_size_in_bytes(
{
ut_ad(buf_flush_list_mutex_own(buf_pool));
- buf_pool->stat.flush_list_bytes += block->page.size.physical();
+ buf_pool->stat.flush_list_bytes += block->physical_size();
ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
}
@@ -427,137 +427,44 @@ buf_flush_insert_into_flush_list(
ut_ad(buf_page_mutex_own(block));
buf_flush_list_mutex_enter(buf_pool);
-
- ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
- || (UT_LIST_GET_FIRST(buf_pool->flush_list)->oldest_modification
- <= lsn));
-
- /* If we are in the recovery then we need to update the flush
- red-black tree as well. */
- if (buf_pool->flush_rbt != NULL) {
- buf_flush_list_mutex_exit(buf_pool);
- buf_flush_insert_sorted_into_flush_list(buf_pool, block, lsn);
- return;
- }
-
- ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(!block->page.in_flush_list);
-
ut_d(block->page.in_flush_list = TRUE);
+ ut_ad(!block->page.oldest_modification);
block->page.oldest_modification = lsn;
-
- UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
-
+ UNIV_MEM_ASSERT_RW(block->page.zip.data
+ ? block->page.zip.data : block->frame,
+ block->physical_size());
incr_flush_list_size_in_bytes(block, buf_pool);
-#ifdef UNIV_DEBUG_VALGRIND
- void* p;
-
- if (block->page.size.is_compressed()) {
- p = block->page.zip.data;
- } else {
- p = block->frame;
- }
-
- UNIV_MEM_ASSERT_RW(p, block->page.size.physical());
-#endif /* UNIV_DEBUG_VALGRIND */
-
-#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- ut_a(buf_flush_validate_skip(buf_pool));
-#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
-
- buf_flush_list_mutex_exit(buf_pool);
-}
-
-/********************************************************************//**
-Inserts a modified block into the flush list in the right sorted position.
-This function is used by recovery, because there the modifications do not
-necessarily come in the order of lsn's. */
-void
-buf_flush_insert_sorted_into_flush_list(
-/*====================================*/
- buf_pool_t* buf_pool, /*!< in: buffer pool instance */
- buf_block_t* block, /*!< in/out: block which is modified */
- lsn_t lsn) /*!< in: oldest modification */
-{
- buf_page_t* prev_b;
- buf_page_t* b;
-
- ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
- ut_ad(!buf_pool_mutex_own(buf_pool));
- ut_ad(log_flush_order_mutex_own());
- ut_ad(buf_page_mutex_own(block));
- ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
-
- buf_flush_list_mutex_enter(buf_pool);
-
- /* The field in_LRU_list is protected by buf_pool->mutex, which
- we are not holding. However, while a block is in the flush
- list, it is dirty and cannot be discarded, not from the
- page_hash or from the LRU list. At most, the uncompressed
- page frame of a compressed block may be discarded or created
- (copying the block->page to or from a buf_page_t that is
- dynamically allocated from buf_buddy_alloc()). Because those
- transitions hold block->mutex and the flush list mutex (via
- buf_flush_relocate_on_flush_list()), there is no possibility
- of a race condition in the assertions below. */
- ut_ad(block->page.in_LRU_list);
- ut_ad(block->page.in_page_hash);
- /* buf_buddy_block_register() will take a block in the
- BUF_BLOCK_MEMORY state, not a file page. */
- ut_ad(!block->page.in_zip_hash);
-
- ut_ad(!block->page.in_flush_list);
- ut_d(block->page.in_flush_list = TRUE);
- block->page.oldest_modification = lsn;
-
-#ifdef UNIV_DEBUG_VALGRIND
- void* p;
-
- if (block->page.size.is_compressed()) {
- p = block->page.zip.data;
- } else {
- p = block->frame;
- }
-
- UNIV_MEM_ASSERT_RW(p, block->page.size.physical());
-#endif /* UNIV_DEBUG_VALGRIND */
-
- prev_b = NULL;
-
- /* For the most part when this function is called the flush_rbt
- should not be NULL. In a very rare boundary case it is possible
- that the flush_rbt has already been freed by the recovery thread
- before the last page was hooked up in the flush_list by the
- io-handler thread. In that case we'll just do a simple
- linear search in the else block. */
- if (buf_pool->flush_rbt != NULL) {
-
- prev_b = buf_flush_insert_in_flush_rbt(&block->page);
-
- } else {
-
- b = UT_LIST_GET_FIRST(buf_pool->flush_list);
-
- while (b != NULL && b->oldest_modification
- > block->page.oldest_modification) {
-
- ut_ad(b->in_flush_list);
- prev_b = b;
- b = UT_LIST_GET_NEXT(list, b);
+ if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
+ ut_ad(srv_shutdown_state != SRV_SHUTDOWN_FLUSH_PHASE);
+ /* The field in_LRU_list is protected by buf_pool->mutex, which
+ we are not holding. However, while a block is in the flush
+ list, it is dirty and cannot be discarded, not from the
+ page_hash or from the LRU list. At most, the uncompressed
+ page frame of a compressed block may be discarded or created
+ (copying the block->page to or from a buf_page_t that is
+ dynamically allocated from buf_buddy_alloc()). Because those
+ transitions hold block->mutex and the flush list mutex (via
+ buf_flush_relocate_on_flush_list()), there is no possibility
+ of a race condition in the assertions below. */
+ ut_ad(block->page.in_LRU_list);
+ ut_ad(block->page.in_page_hash);
+ /* buf_buddy_block_register() will take a block in the
+ BUF_BLOCK_MEMORY state, not a file page. */
+ ut_ad(!block->page.in_zip_hash);
+
+ if (buf_page_t* prev_b =
+ buf_flush_insert_in_flush_rbt(&block->page)) {
+ UT_LIST_INSERT_AFTER(buf_pool->flush_list, prev_b, &block->page);
+ goto func_exit;
}
}
- if (prev_b == NULL) {
- UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
- } else {
- UT_LIST_INSERT_AFTER(buf_pool->flush_list, prev_b, &block->page);
- }
-
- incr_flush_list_size_in_bytes(block, buf_pool);
-
+ UT_LIST_ADD_FIRST(buf_pool->flush_list, &block->page);
+func_exit:
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
- ut_a(buf_flush_validate_low(buf_pool));
+ ut_a(buf_flush_validate_skip(buf_pool));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_flush_list_mutex_exit(buf_pool);
@@ -686,7 +593,7 @@ buf_flush_remove(
}
/* If the flush_rbt is active then delete from there as well. */
- if (buf_pool->flush_rbt != NULL) {
+ if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
}
@@ -694,7 +601,7 @@ buf_flush_remove(
because we assert on in_flush_list in comparison function. */
ut_d(bpage->in_flush_list = FALSE);
- buf_pool->stat.flush_list_bytes -= bpage->size.physical();
+ buf_pool->stat.flush_list_bytes -= bpage->physical_size();
bpage->oldest_modification = 0;
@@ -754,7 +661,7 @@ buf_flush_relocate_on_flush_list(
/* If recovery is active we must swap the control blocks in
the flush_rbt as well. */
- if (buf_pool->flush_rbt != NULL) {
+ if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_flush_delete_from_flush_rbt(bpage);
prev_b = buf_flush_insert_in_flush_rbt(dpage);
}
@@ -821,9 +728,9 @@ void buf_flush_write_complete(buf_page_t* bpage, bool dblwr)
/** Calculate the checksum of a page from compressed table and update
the page.
-@param[in,out] page page to update
-@param[in] size compressed page size
-@param[in] lsn LSN to stamp on the page */
+@param[in,out] page page to update
+@param[in] size compressed page size
+@param[in] lsn LSN to stamp on the page */
void
buf_flush_update_zip_checksum(
buf_frame_t* page,
@@ -840,18 +747,44 @@ buf_flush_update_zip_checksum(
mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
}
+/** Assign the full crc32 checksum for non-compressed page.
+@param[in,out] page page to be updated */
+void buf_flush_assign_full_crc32_checksum(byte* page)
+{
+ ut_d(bool compressed = false);
+ ut_d(bool corrupted = false);
+ ut_d(const uint size = buf_page_full_crc32_size(page, &compressed,
+ &corrupted));
+ ut_ad(!compressed);
+ ut_ad(!corrupted);
+ ut_ad(size == uint(srv_page_size));
+ const ulint payload = srv_page_size - FIL_PAGE_FCRC32_CHECKSUM;
+ mach_write_to_4(page + payload, ut_crc32(page, payload));
+}
+
/** Initialize a page for writing to the tablespace.
-@param[in] block buffer block; NULL if bypassing the buffer pool
-@param[in,out] page page frame
-@param[in,out] page_zip_ compressed page, or NULL if uncompressed
-@param[in] newest_lsn newest modification LSN to the page */
+@param[in] block buffer block; NULL if bypassing
+ the buffer pool
+@param[in,out] page page frame
+@param[in,out] page_zip_ compressed page, or NULL if
+ uncompressed
+@param[in] newest_lsn newest modification LSN to the page
+@param[in] use_full_checksum whether tablespace uses full checksum */
void
buf_flush_init_for_writing(
const buf_block_t* block,
byte* page,
void* page_zip_,
- lsn_t newest_lsn)
+ lsn_t newest_lsn,
+ bool use_full_checksum)
{
+ if (block != NULL && block->frame != page) {
+ /* If page is encrypted in full crc32 format then
+ checksum stored already as a part of fil_encrypt_buf() */
+ ut_ad(use_full_checksum);
+ return;
+ }
+
ut_ad(block == NULL || block->frame == page);
ut_ad(block == NULL || page_zip_ == NULL
|| &block->page.zip == page_zip_);
@@ -900,8 +833,13 @@ buf_flush_init_for_writing(
/* Write the newest modification lsn to the page header and trailer */
mach_write_to_8(page + FIL_PAGE_LSN, newest_lsn);
- mach_write_to_8(page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM,
- newest_lsn);
+ if (use_full_checksum) {
+ mach_write_to_4(page + srv_page_size - FIL_PAGE_FCRC32_END_LSN,
+ (ulint) newest_lsn);
+ } else {
+ mach_write_to_8(page + srv_page_size - FIL_PAGE_END_LSN_OLD_CHKSUM,
+ newest_lsn);
+ }
if (block && srv_page_size == 16384) {
/* The page type could be garbage in old files
@@ -967,6 +905,10 @@ buf_flush_init_for_writing(
uint32_t checksum = BUF_NO_CHECKSUM_MAGIC;
+ if (use_full_checksum) {
+ return buf_flush_assign_full_crc32_checksum(page);
+ }
+
switch (srv_checksum_algorithm_t(srv_checksum_algorithm)) {
case SRV_CHECKSUM_ALGORITHM_INNODB:
case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
@@ -979,6 +921,8 @@ buf_flush_init_for_writing(
be calculated after storing the new formula checksum. */
checksum = buf_calc_page_old_checksum(page);
break;
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
/* In other cases we write the same checksum to both fields. */
@@ -1021,7 +965,10 @@ buf_flush_write_block_low(
|| space->purpose == FIL_TYPE_TABLESPACE);
ut_ad((space->purpose == FIL_TYPE_TEMPORARY)
== (space == fil_system.temp_space));
+
page_t* frame = NULL;
+ const bool full_crc32 = space->full_crc32();
+
#ifdef UNIV_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
ut_ad(!buf_pool_mutex_own(buf_pool));
@@ -1065,7 +1012,7 @@ buf_flush_write_block_low(
mach_write_to_8(frame + FIL_PAGE_LSN,
bpage->newest_modification);
- ut_a(page_zip_verify_checksum(frame, bpage->size.physical()));
+ ut_a(page_zip_verify_checksum(frame, bpage->zip_size()));
break;
case BUF_BLOCK_FILE_PAGE:
frame = bpage->zip.data;
@@ -1073,15 +1020,23 @@ buf_flush_write_block_low(
frame = ((buf_block_t*) bpage)->frame;
}
+ byte* page = reinterpret_cast<const buf_block_t*>(bpage)->frame;
+
+ if (full_crc32) {
+ page = buf_page_encrypt(space, bpage, page);
+ frame = page;
+ }
+
buf_flush_init_for_writing(
- reinterpret_cast<const buf_block_t*>(bpage),
- reinterpret_cast<const buf_block_t*>(bpage)->frame,
+ reinterpret_cast<const buf_block_t*>(bpage), page,
bpage->zip.data ? &bpage->zip : NULL,
- bpage->newest_modification);
+ bpage->newest_modification, full_crc32);
break;
}
- frame = buf_page_encrypt_before_write(space, bpage, frame);
+ if (!full_crc32) {
+ frame = buf_page_encrypt(space, bpage, frame);
+ }
ut_ad(space->purpose == FIL_TYPE_TABLESPACE
|| space->atomic_write_supported);
@@ -1092,7 +1047,8 @@ buf_flush_write_block_low(
/* TODO: pass the tablespace to fil_io() */
fil_io(request,
- sync, bpage->id, bpage->size, 0, bpage->size.physical(),
+ sync, bpage->id, bpage->zip_size(), 0,
+ bpage->physical_size(),
frame, bpage);
} else {
ut_ad(!srv_read_only_mode);
@@ -1353,9 +1309,13 @@ buf_flush_try_neighbors(
buf_pool_t* buf_pool = buf_pool_get(page_id);
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
+ fil_space_t* space = fil_space_acquire_for_io(page_id.space());
+ if (!space) {
+ return 0;
+ }
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN
- || srv_flush_neighbors == 0) {
+ || !srv_flush_neighbors || !space->is_rotational()) {
/* If there is little space or neighbor flushing is
not enabled then just flush the victim. */
low = page_id.page_no();
@@ -1410,9 +1370,8 @@ buf_flush_try_neighbors(
}
}
- const ulint space_size = fil_space_get_size(page_id.space());
- if (high > space_size) {
- high = space_size;
+ if (high > space->size) {
+ high = space->size;
}
DBUG_PRINT("ib_buf", ("flush %u:%u..%u",
@@ -1489,6 +1448,8 @@ buf_flush_try_neighbors(
buf_pool_mutex_exit(buf_pool);
}
+ space->release_for_io();
+
if (count > 1) {
MONITOR_INC_VALUE_CUMULATIVE(
MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE,
@@ -3594,7 +3555,7 @@ buf_flush_validate_low(
/* If we are in recovery mode i.e.: flush_rbt != NULL
then each block in the flush_list must also be present
in the flush_rbt. */
- if (buf_pool->flush_rbt != NULL) {
+ if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
rnode = rbt_first(buf_pool->flush_rbt);
}
@@ -3615,7 +3576,7 @@ buf_flush_validate_low(
|| buf_page_get_state(bpage) == BUF_BLOCK_REMOVE_HASH);
ut_a(om > 0);
- if (buf_pool->flush_rbt != NULL) {
+ if (UNIV_LIKELY_NULL(buf_pool->flush_rbt)) {
buf_page_t** prpage;
ut_a(rnode != NULL);
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 9b4a0809f25..397ff8efa42 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -166,7 +166,7 @@ incr_LRU_size_in_bytes(
{
ut_ad(buf_pool_mutex_own(buf_pool));
- buf_pool->stat.LRU_bytes += bpage->size.physical();
+ buf_pool->stat.LRU_bytes += bpage->physical_size();
ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size);
}
@@ -1389,7 +1389,7 @@ buf_LRU_remove_block(
UT_LIST_REMOVE(buf_pool->LRU, bpage);
ut_d(bpage->in_LRU_list = FALSE);
- buf_pool->stat.LRU_bytes -= bpage->size.physical();
+ buf_pool->stat.LRU_bytes -= bpage->physical_size();
buf_unzip_LRU_remove_block_if_needed(bpage);
@@ -1657,9 +1657,9 @@ func_exit:
? BUF_BLOCK_ZIP_DIRTY
: BUF_BLOCK_ZIP_PAGE;
- ut_ad(b->size.is_compressed());
+ ut_ad(b->zip_size());
- UNIV_MEM_DESC(b->zip.data, b->size.physical());
+ UNIV_MEM_DESC(b->zip.data, b->zip_size());
/* The fields in_page_hash and in_LRU_list of
the to-be-freed block descriptor should have
@@ -1738,10 +1738,6 @@ func_exit:
page_zip_set_size(&bpage->zip, 0);
- bpage->size.copy_from(page_size_t(bpage->size.logical(),
- bpage->size.logical(),
- false));
-
mutex_exit(block_mutex);
/* Prevent buf_page_get_gen() from
@@ -1781,11 +1777,11 @@ func_exit:
buf_pool->page_hash, thus inaccessible by any
other thread. */
- ut_ad(b->size.is_compressed());
+ ut_ad(b->zip_size());
const uint32_t checksum = page_zip_calc_checksum(
b->zip.data,
- b->size.physical(),
+ b->zip_size(),
static_cast<srv_checksum_algorithm_t>(
srv_checksum_algorithm));
@@ -1852,19 +1848,14 @@ buf_LRU_block_free_non_file_page(
buf_page_mutex_exit(block);
buf_pool_mutex_exit_forbid(buf_pool);
- ut_ad(block->page.size.is_compressed());
+ ut_ad(block->zip_size());
- buf_buddy_free(buf_pool, data, block->page.size.physical());
+ buf_buddy_free(buf_pool, data, block->zip_size());
buf_pool_mutex_exit_allow(buf_pool);
buf_page_mutex_enter(block);
page_zip_set_size(&block->page.zip, 0);
-
- block->page.size.copy_from(
- page_size_t(block->page.size.logical(),
- block->page.size.logical(),
- false));
}
if (buf_pool->curr_size < buf_pool->old_size
@@ -1935,7 +1926,7 @@ buf_LRU_block_remove_hashed(
const page_t* page = ((buf_block_t*) bpage)->frame;
ut_a(!zip || bpage->oldest_modification == 0);
- ut_ad(bpage->size.is_compressed());
+ ut_ad(bpage->zip_size());
switch (fil_page_get_type(page)) {
case FIL_PAGE_TYPE_ALLOCATED:
@@ -1950,7 +1941,7 @@ buf_LRU_block_remove_hashed(
to the compressed page, which will
be preserved. */
memcpy(bpage->zip.data, page,
- bpage->size.physical());
+ bpage->zip_size());
}
break;
case FIL_PAGE_TYPE_ZBLOB:
@@ -1967,14 +1958,13 @@ buf_LRU_block_remove_hashed(
default:
ib::error() << "The compressed page to be"
" evicted seems corrupt:";
- ut_print_buf(stderr, page,
- bpage->size.logical());
+ ut_print_buf(stderr, page, srv_page_size);
ib::error() << "Possibly older version of"
" the page:";
ut_print_buf(stderr, bpage->zip.data,
- bpage->size.physical());
+ bpage->zip_size());
putc('\n', stderr);
ut_error;
}
@@ -1984,10 +1974,7 @@ buf_LRU_block_remove_hashed(
/* fall through */
case BUF_BLOCK_ZIP_PAGE:
ut_a(bpage->oldest_modification == 0);
- if (bpage->size.is_compressed()) {
- UNIV_MEM_ASSERT_W(bpage->zip.data,
- bpage->size.physical());
- }
+ UNIV_MEM_ASSERT_W(bpage->zip.data, bpage->zip_size());
break;
case BUF_BLOCK_POOL_WATCH:
case BUF_BLOCK_ZIP_DIRTY:
@@ -2003,25 +1990,16 @@ buf_LRU_block_remove_hashed(
if (bpage != hashed_bpage) {
ib::error() << "Page " << bpage->id
<< " not found in the hash table";
-
-#ifdef UNIV_DEBUG
-
-
ib::error()
+#ifdef UNIV_DEBUG
<< "in_page_hash:" << bpage->in_page_hash
<< " in_zip_hash:" << bpage->in_zip_hash
- // << " in_free_list:"<< bpage->in_fee_list
<< " in_flush_list:" << bpage->in_flush_list
<< " in_LRU_list:" << bpage->in_LRU_list
+#endif
<< " zip.data:" << bpage->zip.data
- << " zip_size:" << bpage->size.logical()
- << " page_state:" << buf_page_get_state(bpage);
-#else
- ib::error()
- << " zip.data:" << bpage->zip.data
- << " zip_size:" << bpage->size.logical()
+ << " zip_size:" << bpage->zip_size()
<< " page_state:" << buf_page_get_state(bpage);
-#endif
if (hashed_bpage) {
@@ -2055,7 +2033,7 @@ buf_LRU_block_remove_hashed(
ut_ad(!bpage->in_flush_list);
ut_ad(!bpage->in_LRU_list);
ut_a(bpage->zip.data);
- ut_a(bpage->size.is_compressed());
+ ut_a(bpage->zip.ssize);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_REMOVE(buf_pool->zip_clean, bpage);
@@ -2065,8 +2043,7 @@ buf_LRU_block_remove_hashed(
rw_lock_x_unlock(hash_lock);
buf_pool_mutex_exit_forbid(buf_pool);
- buf_buddy_free(buf_pool, bpage->zip.data,
- bpage->size.physical());
+ buf_buddy_free(buf_pool, bpage->zip.data, bpage->zip_size());
buf_pool_mutex_exit_allow(buf_pool);
buf_page_free_descriptor(bpage);
@@ -2113,16 +2090,11 @@ buf_LRU_block_remove_hashed(
ut_ad(!bpage->in_LRU_list);
buf_pool_mutex_exit_forbid(buf_pool);
- buf_buddy_free(buf_pool, data, bpage->size.physical());
+ buf_buddy_free(buf_pool, data, bpage->zip_size());
buf_pool_mutex_exit_allow(buf_pool);
page_zip_set_size(&bpage->zip, 0);
-
- bpage->size.copy_from(
- page_size_t(bpage->size.logical(),
- bpage->size.logical(),
- false));
}
return(true);
@@ -2455,7 +2427,7 @@ buf_LRU_print_instance(
if (bpage->buf_fix_count) {
fprintf(stderr, "buffix count %u ",
- bpage->buf_fix_count);
+ uint32_t(bpage->buf_fix_count));
}
if (buf_page_get_io_fix(bpage)) {
@@ -2480,7 +2452,7 @@ buf_LRU_print_instance(
fprintf(stderr, "\ntype %u size " ULINTPF
" index id " IB_ID_FMT "\n",
fil_page_get_type(frame),
- bpage->size.physical(),
+ bpage->zip_size(),
btr_page_get_index_id(frame));
break;
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 18006563f0e..d8ec476cb4b 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -95,15 +95,14 @@ buffer buf_pool if it is not already there, in which case does nothing.
Sets the io_fix flag and sets an exclusive lock on the buffer frame. The
flag is cleared and the x-lock released by an i/o-handler thread.
-@param[out] err DB_SUCCESS, DB_TABLESPACE_DELETED or
- DB_TABLESPACE_TRUNCATED if we are trying
- to read from a non-existent tablespace, a
- tablespace which is just now being dropped,
- or a tablespace which is truncated
+@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED
+ if we are trying
+ to read from a non-existent tablespace
@param[in] sync true if synchronous aio is desired
@param[in] type IO type, SIMULATED, IGNORE_MISSING
@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ...,
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] unzip true=request uncompressed page
@param[in] ignore_missing_space true=ignore missing space when reading
@return 1 if a read request was queued, 0 if the page already resided
@@ -118,7 +117,7 @@ buf_read_page_low(
ulint type,
ulint mode,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
bool unzip,
bool ignore_missing_space = false)
{
@@ -134,7 +133,7 @@ buf_read_page_low(
return(0);
}
- if (ibuf_bitmap_page(page_id, page_size) || trx_sys_hdr_page(page_id)) {
+ if (ibuf_bitmap_page(page_id, zip_size) || trx_sys_hdr_page(page_id)) {
/* Trx sys header is so low in the latching order that we play
safe and do not leave the i/o-completion to an asynchronous
@@ -149,7 +148,7 @@ buf_read_page_low(
or is being dropped; if we succeed in initing the page in the buffer
pool for read, then DISCARD cannot proceed until the read has
completed */
- bpage = buf_page_init_for_read(err, mode, page_id, page_size, unzip);
+ bpage = buf_page_init_for_read(err, mode, page_id, zip_size, unzip);
if (bpage == NULL) {
@@ -157,7 +156,7 @@ buf_read_page_low(
}
DBUG_LOG("ib_buf",
- "read page " << page_id << " size=" << page_size.physical()
+ "read page " << page_id << " zip_size=" << zip_size
<< " unzip=" << unzip << ',' << (sync ? "sync" : "async"));
ut_ad(buf_page_in_file(bpage));
@@ -168,7 +167,7 @@ buf_read_page_low(
void* dst;
- if (page_size.is_compressed()) {
+ if (zip_size) {
dst = bpage->zip.data;
} else {
ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
@@ -179,7 +178,8 @@ buf_read_page_low(
IORequest request(type | IORequest::READ);
*err = fil_io(
- request, sync, page_id, page_size, 0, page_size.physical(),
+ request, sync, page_id, zip_size, 0,
+ zip_size ? zip_size : srv_page_size,
dst, bpage, ignore_missing_space);
if (sync) {
@@ -187,20 +187,8 @@ buf_read_page_low(
}
if (UNIV_UNLIKELY(*err != DB_SUCCESS)) {
- if (*err == DB_TABLESPACE_TRUNCATED) {
- /* Remove the page which is outside the
- truncated tablespace bounds when recovering
- from a crash happened during a truncation */
- buf_read_page_handle_error(bpage);
- if (recv_recovery_is_on()) {
- mutex_enter(&recv_sys->mutex);
- ut_ad(recv_sys->n_addrs > 0);
- recv_sys->n_addrs--;
- mutex_exit(&recv_sys->mutex);
- }
- return(0);
- } else if (IORequest::ignore_missing(type)
- || *err == DB_TABLESPACE_DELETED) {
+ if (IORequest::ignore_missing(type)
+ || *err == DB_TABLESPACE_DELETED) {
buf_read_page_handle_error(bpage);
return(0);
}
@@ -232,16 +220,13 @@ performed by ibuf routines, a situation which could result in a deadlock if
the OS does not support asynchronous i/o.
@param[in] page_id page id of a page which the current thread
wants to access
-@param[in] page_size page size
-@param[in] inside_ibuf TRUE if we are inside ibuf routine
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] ibuf whether we are inside ibuf routine
@return number of page read requests issued; NOTE that if we read ibuf
pages, it may happen that the page at the given page number does not
get read even if we return a positive value! */
ulint
-buf_read_ahead_random(
- const page_id_t page_id,
- const page_size_t& page_size,
- ibool inside_ibuf)
+buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf)
{
buf_pool_t* buf_pool = buf_pool_get(page_id);
ulint recent_blocks = 0;
@@ -263,7 +248,7 @@ buf_read_ahead_random(
return(0);
}
- if (ibuf_bitmap_page(page_id, page_size) || trx_sys_hdr_page(page_id)) {
+ if (ibuf_bitmap_page(page_id, zip_size) || trx_sys_hdr_page(page_id)) {
/* If it is an ibuf bitmap page or trx sys hdr, we do
no read-ahead, as that could break the ibuf page access
@@ -278,14 +263,14 @@ buf_read_ahead_random(
high = (page_id.page_no() / buf_read_ahead_random_area + 1)
* buf_read_ahead_random_area;
- /* Remember the tablespace version before we ask the tablespace size
- below: if DISCARD + IMPORT changes the actual .ibd file meanwhile, we
+ /* If DISCARD + IMPORT changes the actual .ibd file meanwhile, we
do not try to read outside the bounds of the tablespace! */
if (fil_space_t* space = fil_space_acquire(page_id.space())) {
#ifdef UNIV_DEBUG
if (srv_file_per_table) {
ulint size = 0;
+ const ulint physical_size = space->physical_size();
for (const fil_node_t* node =
UT_LIST_GET_FIRST(space->chain);
@@ -293,7 +278,7 @@ buf_read_ahead_random(
node = UT_LIST_GET_NEXT(chain, node)) {
size += ulint(os_file_get_size(node->handle)
- / page_size.physical());
+ / physical_size);
}
ut_ad(size == space->size);
@@ -346,12 +331,7 @@ buf_read_ahead_random(
read_ahead:
/* Read all the suitable blocks within the area */
- if (inside_ibuf) {
- ibuf_mode = BUF_READ_IBUF_PAGES_ONLY;
- } else {
- ibuf_mode = BUF_READ_ANY_PAGE;
- }
-
+ ibuf_mode = ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE;
count = 0;
for (i = low; i < high; i++) {
@@ -360,16 +340,15 @@ read_ahead:
const page_id_t cur_page_id(page_id.space(), i);
- if (!ibuf_bitmap_page(cur_page_id, page_size)) {
+ if (!ibuf_bitmap_page(cur_page_id, zip_size)) {
count += buf_read_page_low(
&err, false,
IORequest::DO_NOT_WAKE,
ibuf_mode,
- cur_page_id, page_size, false);
+ cur_page_id, zip_size, false);
switch (err) {
case DB_SUCCESS:
- case DB_TABLESPACE_TRUNCATED:
case DB_ERROR:
break;
case DB_TABLESPACE_DELETED:
@@ -411,16 +390,13 @@ buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@retval DB_SUCCESS if the page was read and is not corrupted,
@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
-dberr_t
-buf_read_page(
- const page_id_t page_id,
- const page_size_t& page_size)
+dberr_t buf_read_page(const page_id_t page_id, ulint zip_size)
{
ulint count;
dberr_t err = DB_SUCCESS;
@@ -433,7 +409,7 @@ buf_read_page(
count = buf_read_page_low(
&err, true,
- 0, BUF_READ_ANY_PAGE, page_id, page_size, false);
+ 0, BUF_READ_ANY_PAGE, page_id, zip_size, false);
srv_stats.buf_pool_reads.add(count);
@@ -453,13 +429,10 @@ buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] sync true if synchronous aio is desired */
void
-buf_read_page_background(
- const page_id_t page_id,
- const page_size_t& page_size,
- bool sync)
+buf_read_page_background(const page_id_t page_id, ulint zip_size, bool sync)
{
ulint count;
dberr_t err;
@@ -468,11 +441,10 @@ buf_read_page_background(
&err, sync,
IORequest::DO_NOT_WAKE | IORequest::IGNORE_MISSING,
BUF_READ_ANY_PAGE,
- page_id, page_size, false);
+ page_id, zip_size, false);
switch (err) {
case DB_SUCCESS:
- case DB_TABLESPACE_TRUNCATED:
case DB_ERROR:
break;
case DB_TABLESPACE_DELETED:
@@ -524,14 +496,11 @@ NOTE 3: the calling thread must want access to the page given: this rule is
set to prevent unintended read-aheads performed by ibuf routines, a situation
which could result in a deadlock if the OS does not support asynchronous io.
@param[in] page_id page id; see NOTE 3 above
-@param[in] page_size page size
-@param[in] inside_ibuf TRUE if we are inside ibuf routine
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] ibuf whether if we are inside ibuf routine
@return number of page read requests issued */
ulint
-buf_read_ahead_linear(
- const page_id_t page_id,
- const page_size_t& page_size,
- ibool inside_ibuf)
+buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf)
{
buf_pool_t* buf_pool = buf_pool_get(page_id);
buf_page_t* bpage;
@@ -570,7 +539,7 @@ buf_read_ahead_linear(
return(0);
}
- if (ibuf_bitmap_page(page_id, page_size) || trx_sys_hdr_page(page_id)) {
+ if (ibuf_bitmap_page(page_id, zip_size) || trx_sys_hdr_page(page_id)) {
/* If it is an ibuf bitmap page or trx sys hdr, we do
no read-ahead, as that could break the ibuf page access
@@ -731,9 +700,7 @@ buf_read_ahead_linear(
/* If we got this far, read-ahead can be sensible: do it */
- ulint ibuf_mode;
-
- ibuf_mode = inside_ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE;
+ ulint ibuf_mode = ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE;
/* Since Windows XP seems to schedule the i/o handler thread
very eagerly, and consequently it does not wait for the
@@ -747,15 +714,14 @@ buf_read_ahead_linear(
const page_id_t cur_page_id(page_id.space(), i);
- if (!ibuf_bitmap_page(cur_page_id, page_size)) {
+ if (!ibuf_bitmap_page(cur_page_id, zip_size)) {
count += buf_read_page_low(
&err, false,
IORequest::DO_NOT_WAKE,
- ibuf_mode, cur_page_id, page_size, false);
+ ibuf_mode, cur_page_id, zip_size, false);
switch (err) {
case DB_SUCCESS:
- case DB_TABLESPACE_TRUNCATED:
case DB_TABLESPACE_DELETED:
case DB_ERROR:
break;
@@ -818,11 +784,8 @@ buf_read_ibuf_merge_pages(
#endif
for (ulint i = 0; i < n_stored; i++) {
- bool found;
- const page_size_t page_size(fil_space_get_page_size(
- space_ids[i], &found));
-
- if (!found) {
+ fil_space_t* s = fil_space_acquire_for_io(space_ids[i]);
+ if (!s) {
tablespace_deleted:
/* The tablespace was not found: remove all
entries for it */
@@ -834,6 +797,9 @@ tablespace_deleted:
continue;
}
+ const ulint zip_size = s->zip_size();
+ s->release_for_io();
+
const page_id_t page_id(space_ids[i], page_nos[i]);
buf_pool_t* buf_pool = buf_pool_get(page_id);
@@ -848,12 +814,11 @@ tablespace_deleted:
buf_read_page_low(&err,
sync && (i + 1 == n_stored),
0,
- BUF_READ_ANY_PAGE, page_id, page_size,
+ BUF_READ_ANY_PAGE, page_id, zip_size,
true, true /* ignore_missing_space */);
switch(err) {
case DB_SUCCESS:
- case DB_TABLESPACE_TRUNCATED:
case DB_ERROR:
break;
case DB_TABLESPACE_DELETED:
@@ -900,7 +865,7 @@ buf_read_recv_pages(
fil_space_open_if_needed(space);
- const page_size_t page_size(space->flags);
+ const ulint zip_size = space->zip_size();
for (ulint i = 0; i < n_stored; i++) {
buf_pool_t* buf_pool;
@@ -933,13 +898,13 @@ buf_read_recv_pages(
&err, true,
0,
BUF_READ_ANY_PAGE,
- cur_page_id, page_size, true);
+ cur_page_id, zip_size, true);
} else {
buf_read_page_low(
&err, false,
IORequest::DO_NOT_WAKE,
BUF_READ_ANY_PAGE,
- cur_page_id, page_size, true);
+ cur_page_id, zip_size, true);
}
if (err == DB_DECRYPTION_FAILED || err == DB_PAGE_CORRUPTED) {
diff --git a/storage/innobase/data/data0data.cc b/storage/innobase/data/data0data.cc
index c7629bd1c1a..8b1966443c9 100644
--- a/storage/innobase/data/data0data.cc
+++ b/storage/innobase/data/data0data.cc
@@ -58,7 +58,12 @@ void dtuple_t::trim(const dict_index_t& index)
for (; i > index.n_core_fields; i--) {
const dfield_t* dfield = dtuple_get_nth_field(this, i - 1);
const dict_col_t* col = dict_index_get_nth_col(&index, i - 1);
- ut_ad(col->is_instant());
+
+ if (col->is_dropped()) {
+ continue;
+ }
+
+ ut_ad(col->is_added());
ulint len = dfield_get_len(dfield);
if (len != col->def_val.len) {
break;
@@ -594,7 +599,6 @@ dtuple_convert_big_rec(
mem_heap_t* heap;
big_rec_t* vector;
dfield_t* dfield;
- dict_field_t* ifield;
ulint size;
ulint n_fields;
ulint local_len;
@@ -604,15 +608,14 @@ dtuple_convert_big_rec(
return(NULL);
}
- if (!dict_table_has_atomic_blobs(index->table)) {
- /* up to MySQL 5.1: store a 768-byte prefix locally */
- local_len = BTR_EXTERN_FIELD_REF_SIZE
- + DICT_ANTELOPE_MAX_INDEX_COL_LEN;
- } else {
- /* new-format table: do not store any BLOB prefix locally */
- local_len = BTR_EXTERN_FIELD_REF_SIZE;
+ if (!index->table->space) {
+ return NULL;
}
+ const auto zip_size = index->table->space->zip_size();
+
+ ut_ad(index->n_uniq > 0);
+
ut_a(dtuple_check_typed_no_assert(entry));
size = rec_get_converted_size(index, entry, *n_ext);
@@ -634,24 +637,42 @@ dtuple_convert_big_rec(
stored externally */
n_fields = 0;
+ ulint longest_i;
+
+ const bool mblob = entry->is_alter_metadata();
+ ut_ad(entry->n_fields >= index->first_user_field() + mblob);
+ ut_ad(entry->n_fields - mblob <= index->n_fields);
+
+ if (mblob) {
+ longest_i = index->first_user_field();
+ dfield = dtuple_get_nth_field(entry, longest_i);
+ local_len = BTR_EXTERN_FIELD_REF_SIZE;
+ ut_ad(!dfield_is_ext(dfield));
+ goto ext_write;
+ }
+
+ if (!dict_table_has_atomic_blobs(index->table)) {
+ /* up to MySQL 5.1: store a 768-byte prefix locally */
+ local_len = BTR_EXTERN_FIELD_REF_SIZE
+ + DICT_ANTELOPE_MAX_INDEX_COL_LEN;
+ } else {
+ /* new-format table: do not store any BLOB prefix locally */
+ local_len = BTR_EXTERN_FIELD_REF_SIZE;
+ }
while (page_zip_rec_needs_ext(rec_get_converted_size(index, entry,
*n_ext),
- dict_table_is_comp(index->table),
+ index->table->not_redundant(),
dict_index_get_n_fields(index),
- dict_table_page_size(index->table))) {
-
- ulint i;
- ulint longest = 0;
- ulint longest_i = ULINT_MAX;
- byte* data;
-
- for (i = dict_index_get_n_unique_in_tree(index);
- i < dtuple_get_n_fields(entry); i++) {
+ zip_size)) {
+ longest_i = 0;
+ for (ulint i = index->first_user_field(), longest = 0;
+ i + mblob < entry->n_fields; i++) {
ulint savings;
+ dfield = dtuple_get_nth_field(entry, i + mblob);
- dfield = dtuple_get_nth_field(entry, i);
- ifield = dict_index_get_nth_field(index, i);
+ const dict_field_t* ifield = dict_index_get_nth_field(
+ index, i);
/* Skip fixed-length, NULL, externally stored,
or short columns */
@@ -693,7 +714,7 @@ skip_field:
continue;
}
- if (!longest) {
+ if (!longest_i) {
/* Cannot shorten more */
mem_heap_free(heap);
@@ -706,9 +727,8 @@ skip_field:
We store the first bytes locally to the record. Then
we can calculate all ordering fields in all indexes
from locally stored data. */
-
dfield = dtuple_get_nth_field(entry, longest_i);
- ifield = dict_index_get_nth_field(index, longest_i);
+ext_write:
local_prefix_len = local_len - BTR_EXTERN_FIELD_REF_SIZE;
vector->append(
@@ -719,7 +739,8 @@ skip_field:
+ local_prefix_len));
/* Allocate the locally stored part of the column. */
- data = static_cast<byte*>(mem_heap_alloc(heap, local_len));
+ byte* data = static_cast<byte*>(
+ mem_heap_alloc(heap, local_len));
/* Copy the local prefix. */
memcpy(data, dfield_get_data(dfield), local_prefix_len);
@@ -733,7 +754,6 @@ skip_field:
UNIV_MEM_ALLOC(data + local_prefix_len,
BTR_EXTERN_FIELD_REF_SIZE);
#endif
-
dfield_set_data(dfield, data, local_len);
dfield_set_ext(dfield);
diff --git a/storage/innobase/data/data0type.cc b/storage/innobase/data/data0type.cc
index 84962d097aa..a154dc6b490 100644
--- a/storage/innobase/data/data0type.cc
+++ b/storage/innobase/data/data0type.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,7 +24,8 @@ Data types
Created 1/16/1996 Heikki Tuuri
*******************************************************/
-#include "data0type.h"
+#include "dict0mem.h"
+#include "my_sys.h"
/** The DB_TRX_ID,DB_ROLL_PTR values for "no history is available" */
const byte reset_trx_id[DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN] = {
@@ -79,67 +80,6 @@ dtype_get_at_most_n_mbchars(
}
/*********************************************************************//**
-Checks if a data main type is a string type. Also a BLOB is considered a
-string type.
-@return TRUE if string type */
-ibool
-dtype_is_string_type(
-/*=================*/
- ulint mtype) /*!< in: InnoDB main data type code: DATA_CHAR, ... */
-{
- if (mtype <= DATA_BLOB
- || mtype == DATA_MYSQL
- || mtype == DATA_VARMYSQL) {
-
- return(TRUE);
- }
-
- return(FALSE);
-}
-
-/*********************************************************************//**
-Checks if a type is a binary string type. Note that for tables created with
-< 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column. For
-those DATA_BLOB columns this function currently returns FALSE.
-@return TRUE if binary string type */
-ibool
-dtype_is_binary_string_type(
-/*========================*/
- ulint mtype, /*!< in: main data type */
- ulint prtype) /*!< in: precise type */
-{
- if ((mtype == DATA_FIXBINARY)
- || (mtype == DATA_BINARY)
- || (mtype == DATA_BLOB && (prtype & DATA_BINARY_TYPE))) {
-
- return(TRUE);
- }
-
- return(FALSE);
-}
-
-/*********************************************************************//**
-Checks if a type is a non-binary string type. That is, dtype_is_string_type is
-TRUE and dtype_is_binary_string_type is FALSE. Note that for tables created
-with < 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column.
-For those DATA_BLOB columns this function currently returns TRUE.
-@return TRUE if non-binary string type */
-ibool
-dtype_is_non_binary_string_type(
-/*============================*/
- ulint mtype, /*!< in: main data type */
- ulint prtype) /*!< in: precise type */
-{
- if (dtype_is_string_type(mtype) == TRUE
- && dtype_is_binary_string_type(mtype, prtype) == FALSE) {
-
- return(TRUE);
- }
-
- return(FALSE);
-}
-
-/*********************************************************************//**
Validates a data type structure.
@return TRUE if ok */
ibool
diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc
index e5bf33593ed..2a80def7335 100644
--- a/storage/innobase/dict/dict0boot.cc
+++ b/storage/innobase/dict/dict0boot.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, MariaDB Corporation.
+Copyright (c) 2016, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -47,7 +47,7 @@ dict_hdr_get(
dict_hdr_t* header;
block = buf_page_get(page_id_t(DICT_HDR_SPACE, DICT_HDR_PAGE_NO),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
header = DICT_HDR + buf_block_get_frame(block);
buf_block_dbg_add_level(block, SYNC_DICT_HEADER);
@@ -64,52 +64,14 @@ dict_hdr_get_new_id(
(not assigned if NULL) */
index_id_t* index_id, /*!< out: index id
(not assigned if NULL) */
- ulint* space_id, /*!< out: space id
+ ulint* space_id) /*!< out: space id
(not assigned if NULL) */
- const dict_table_t* table, /*!< in: table */
- bool disable_redo) /*!< in: if true and table
- object is NULL
- then disable-redo */
{
dict_hdr_t* dict_hdr;
ib_id_t id;
mtr_t mtr;
mtr_start(&mtr);
- if (table) {
- if (table->is_temporary()) {
- mtr.set_log_mode(MTR_LOG_NO_REDO);
- }
- } else if (disable_redo) {
- /* In non-read-only mode we need to ensure that space-id header
- page is written to disk else if page is removed from buffer
- cache and re-loaded it would assign temporary tablespace id
- to another tablespace.
- This is not a case with read-only mode as there is no new object
- that is created except temporary tablespace. */
- mtr.set_log_mode(srv_read_only_mode
- ? MTR_LOG_NONE : MTR_LOG_NO_REDO);
- }
-
- /* Server started and let's say space-id = x
- - table created with file-per-table
- - space-id = x + 1
- - crash
- Case 1: If it was redo logged then we know that it will be
- restored to x + 1
- Case 2: if not redo-logged
- Header will have the old space-id = x
- This is OK because on restart there is no object with
- space id = x + 1
- Case 3:
- space-id = x (on start)
- space-id = x+1 (temp-table allocation) - no redo logging
- space-id = x+2 (non-temp-table allocation), this get's
- redo logged.
- If there is a crash there will be only 2 entries
- x (original) and x+2 (new) and disk hdr will be updated
- to reflect x + 2 entry.
- We cannot allocate the same space id to different objects. */
dict_hdr = dict_hdr_get(&mtr);
if (table_id) {
@@ -212,7 +174,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_TABLES_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -223,7 +185,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_UNIQUE,
fil_system.sys_space, DICT_TABLE_IDS_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -234,7 +196,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_COLUMNS_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -245,7 +207,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_INDEXES_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
@@ -256,7 +218,7 @@ dict_hdr_create(
/*--------------------------*/
root_page_no = btr_create(DICT_CLUSTERED | DICT_UNIQUE,
fil_system.sys_space, DICT_FIELDS_ID,
- dict_ind_redundant, NULL, mtr);
+ dict_ind_redundant, mtr);
if (root_page_no == FIL_NULL) {
return(FALSE);
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index 25a90342f78..d8b9bcdaf39 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -352,10 +352,12 @@ dict_build_table_def_step(
{
ut_ad(mutex_own(&dict_sys->mutex));
dict_table_t* table = node->table;
+ trx_t* trx = thr_get_trx(thr);
ut_ad(!table->is_temporary());
ut_ad(!table->space);
ut_ad(table->space_id == ULINT_UNDEFINED);
- dict_table_assign_new_id(table, thr_get_trx(thr));
+ dict_hdr_get_new_id(&table->id, NULL, NULL);
+ trx->table_id = table->id;
/* Always set this bit for all new created tables */
DICT_TF2_FLAG_SET(table, DICT_TF2_FTS_AUX_HEX_NAME);
@@ -368,8 +370,6 @@ dict_build_table_def_step(
ut_ad(DICT_TF_GET_ZIP_SSIZE(table->flags) == 0
|| dict_table_has_atomic_blobs(table));
- trx_t* trx = thr_get_trx(thr);
- ut_ad(trx->table_id);
mtr_t mtr;
trx_undo_t* undo = trx->rsegs.m_redo.undo;
if (undo && !undo->table_id
@@ -397,7 +397,7 @@ dict_build_table_def_step(
}
/* Get a new tablespace ID */
ulint space_id;
- dict_hdr_get_new_id(NULL, NULL, &space_id, table, false);
+ dict_hdr_get_new_id(NULL, NULL, &space_id);
DBUG_EXECUTE_IF(
"ib_create_table_fail_out_of_space_ids",
@@ -745,7 +745,7 @@ dict_build_index_def_step(
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|| dict_index_is_clust(index));
- dict_hdr_get_new_id(NULL, &index->id, NULL, table, false);
+ dict_hdr_get_new_id(NULL, &index->id, NULL);
/* Inherit the space id from the table; we store all indexes of a
table in the same tablespace */
@@ -785,7 +785,7 @@ dict_build_index_def(
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|| dict_index_is_clust(index));
- dict_hdr_get_new_id(NULL, &index->id, NULL, table, false);
+ dict_hdr_get_new_id(NULL, &index->id, NULL);
/* Note that the index was created by this transaction. */
index->trx_id = trx->id;
@@ -859,7 +859,7 @@ dict_create_index_tree_step(
node->page_no = btr_create(
index->type, index->table->space,
- index->id, index, NULL, &mtr);
+ index->id, index, &mtr);
if (node->page_no == FIL_NULL) {
err = DB_OUT_OF_FILE_SPACE;
@@ -905,7 +905,7 @@ dict_create_index_tree_in_mem(
ut_ad(!(index->table->flags2 & DICT_TF2_DISCARDED));
index->page = btr_create(index->type, index->table->space,
- index->id, index, NULL, &mtr);
+ index->id, index, &mtr);
mtr_commit(&mtr);
index->trx_id = trx->id;
@@ -960,28 +960,19 @@ dict_drop_index_tree(
ut_ad(len == 8);
- bool found;
- const page_size_t page_size(fil_space_get_page_size(space,
- &found));
-
- if (!found) {
- /* It is a single table tablespace and the .ibd file is
- missing: do nothing */
-
- return(false);
- }
-
- /* If tablespace is scheduled for truncate, do not try to drop
- the indexes in that tablespace. There is a truncate fixup action
- which will take care of it. */
- if (srv_is_tablespace_truncated(space)) {
- return(false);
+ if (fil_space_t* s = fil_space_acquire_silent(space)) {
+ /* Ensure that the tablespace file exists
+ in order to avoid a crash in buf_page_get_gen(). */
+ if (s->size || fil_space_get_size(space)) {
+ btr_free_if_exists(page_id_t(space, root_page_no),
+ s->zip_size(),
+ mach_read_from_8(ptr), mtr);
+ }
+ s->release();
+ return true;
}
- btr_free_if_exists(page_id_t(space, root_page_no), page_size,
- mach_read_from_8(ptr), mtr);
-
- return(true);
+ return false;
}
/*******************************************************************//**
@@ -1053,7 +1044,7 @@ dict_recreate_index_tree(
ulint root_page_no = (index->type & DICT_FTS)
? FIL_NULL
: btr_create(type, table->space,
- index_id, index, NULL, mtr);
+ index_id, index, mtr);
index->page = unsigned(root_page_no);
return root_page_no;
}
@@ -2134,6 +2125,8 @@ dict_create_add_foreigns_to_dictionary(
return(DB_ERROR);
}
+ error = DB_SUCCESS;
+
for (dict_foreign_set::const_iterator it = local_fk_set.begin();
it != local_fk_set.end();
++it) {
@@ -2145,12 +2138,11 @@ dict_create_add_foreigns_to_dictionary(
table->name.m_name, foreign, trx);
if (error != DB_SUCCESS) {
-
- return(error);
+ break;
}
}
- return(DB_SUCCESS);
+ return error;
}
/****************************************************************//**
@@ -2382,15 +2374,3 @@ dict_delete_tablespace_and_datafiles(
return(err);
}
-
-/** Assign a new table ID and put it into the table cache and the transaction.
-@param[in,out] table Table that needs an ID
-@param[in,out] trx Transaction */
-void
-dict_table_assign_new_id(
- dict_table_t* table,
- trx_t* trx)
-{
- dict_hdr_get_new_id(&table->id, NULL, NULL, table, false);
- trx->table_id = table->id;
-}
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index e67f860f3e2..9c5293d0f9d 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -406,6 +406,27 @@ dict_table_stats_unlock(
}
}
+
+/** Open a persistent table.
+@param[in] table_id persistent table identifier
+@param[in] ignore_err errors to ignore
+@param[in] cached_only whether to skip loading
+@return persistent table
+@retval NULL if not found */
+static dict_table_t* dict_table_open_on_id_low(
+ table_id_t table_id,
+ dict_err_ignore_t ignore_err,
+ bool cached_only)
+{
+ dict_table_t* table = dict_sys->get_table(table_id);
+
+ if (!table && !cached_only) {
+ table = dict_load_table_on_id(table_id, ignore_err);
+ }
+
+ return table;
+}
+
/**********************************************************************//**
Try to drop any indexes after an aborted index creation.
This can also be after a server kill during DROP INDEX. */
@@ -416,7 +437,7 @@ dict_table_try_drop_aborted(
dict_table_t* table, /*!< in: table, or NULL if it
needs to be looked up again */
table_id_t table_id, /*!< in: table identifier */
- int32 ref_count) /*!< in: expected table->n_ref_count */
+ uint32_t ref_count) /*!< in: expected table->n_ref_count */
{
trx_t* trx;
@@ -884,47 +905,29 @@ dict_index_get_nth_col_or_prefix_pos(
return(ULINT_UNDEFINED);
}
-/** Returns TRUE if the index contains a column or a prefix of that column.
-@param[in] index index
+/** Check if the index contains a column or a prefix of that column.
@param[in] n column number
@param[in] is_virtual whether it is a virtual col
-@return TRUE if contains the column or its prefix */
-bool
-dict_index_contains_col_or_prefix(
- const dict_index_t* index,
- ulint n,
- bool is_virtual)
+@return whether the index contains the column or its prefix */
+bool dict_index_t::contains_col_or_prefix(ulint n, bool is_virtual) const
{
- const dict_field_t* field;
- const dict_col_t* col;
- ulint pos;
- ulint n_fields;
-
- ut_ad(index);
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
+ ut_ad(magic_n == DICT_INDEX_MAGIC_N);
- if (dict_index_is_clust(index)) {
+ if (is_primary()) {
return(!is_virtual);
}
- if (is_virtual) {
- col = &dict_table_get_nth_v_col(index->table, n)->m_col;
- } else {
- col = dict_table_get_nth_col(index->table, n);
- }
+ const dict_col_t* col = is_virtual
+ ? &dict_table_get_nth_v_col(table, n)->m_col
+ : dict_table_get_nth_col(table, n);
- n_fields = dict_index_get_n_fields(index);
-
- for (pos = 0; pos < n_fields; pos++) {
- field = dict_index_get_nth_field(index, pos);
-
- if (col == field->col) {
-
- return(true);
+ for (ulint pos = 0; pos < n_fields; pos++) {
+ if (col == fields[pos].col) {
+ return true;
}
}
- return(false);
+ return false;
}
/********************************************************************//**
@@ -1081,20 +1084,19 @@ dict_init(void)
dict_operation_lock = static_cast<rw_lock_t*>(
ut_zalloc_nokey(sizeof(*dict_operation_lock)));
- dict_sys = static_cast<dict_sys_t*>(ut_zalloc_nokey(sizeof(*dict_sys)));
+ dict_sys = new (ut_zalloc_nokey(sizeof(*dict_sys))) dict_sys_t();
UT_LIST_INIT(dict_sys->table_LRU, &dict_table_t::table_LRU);
UT_LIST_INIT(dict_sys->table_non_LRU, &dict_table_t::table_LRU);
mutex_create(LATCH_ID_DICT_SYS, &dict_sys->mutex);
- dict_sys->table_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
+ const ulint hash_size = buf_pool_get_curr_size()
+ / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE);
- dict_sys->table_id_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
+ dict_sys->table_hash = hash_create(hash_size);
+ dict_sys->table_id_hash = hash_create(hash_size);
+ dict_sys->temp_id_hash = hash_create(hash_size);
rw_lock_create(dict_operation_lock_key,
dict_operation_lock, SYNC_DICT_OPERATION);
@@ -1253,8 +1255,7 @@ dict_table_add_system_columns(
}
/** Add the table definition to the data dictionary cache */
-void
-dict_table_t::add_to_cache()
+void dict_table_t::add_to_cache()
{
ut_ad(dict_lru_validate());
ut_ad(mutex_own(&dict_sys->mutex));
@@ -1262,7 +1263,6 @@ dict_table_t::add_to_cache()
cached = TRUE;
ulint fold = ut_fold_string(name.m_name);
- ulint id_fold = ut_fold_ull(id);
/* Look for a table with the same name: error if such exists */
{
@@ -1280,31 +1280,30 @@ dict_table_t::add_to_cache()
ut_ad(table2 == NULL);
#endif /* UNIV_DEBUG */
}
+ HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold,
+ this);
/* Look for a table with the same id: error if such exists */
+ hash_table_t* id_hash = is_temporary()
+ ? dict_sys->temp_id_hash : dict_sys->table_id_hash;
+ const ulint id_fold = ut_fold_ull(id);
{
dict_table_t* table2;
- HASH_SEARCH(id_hash, dict_sys->table_id_hash, id_fold,
+ HASH_SEARCH(id_hash, id_hash, id_fold,
dict_table_t*, table2, ut_ad(table2->cached),
table2->id == id);
ut_a(table2 == NULL);
#ifdef UNIV_DEBUG
/* Look for the same table pointer with a different id */
- HASH_SEARCH_ALL(id_hash, dict_sys->table_id_hash,
+ HASH_SEARCH_ALL(id_hash, id_hash,
dict_table_t*, table2, ut_ad(table2->cached),
table2 == this);
ut_ad(table2 == NULL);
#endif /* UNIV_DEBUG */
- }
- /* Add table to hash table of tables */
- HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold,
- this);
-
- /* Add table to hash table of tables based on table id */
- HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, id_fold,
- this);
+ HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, this);
+ }
if (can_be_evicted) {
UT_LIST_ADD_FIRST(dict_sys->table_LRU, this);
@@ -1429,7 +1428,7 @@ dict_make_room_in_cache(
ut_ad(0);
}
};);
- dict_table_remove_from_cache_low(table, TRUE);
+ dict_table_remove_from_cache(table, true);
++n_evicted;
}
@@ -1950,6 +1949,7 @@ dict_table_change_id_in_cache(
{
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
+ ut_ad(!table->is_temporary());
/* Remove the table from the hash table of id's */
@@ -1962,14 +1962,11 @@ dict_table_change_id_in_cache(
ut_fold_ull(table->id), table);
}
-/**********************************************************************//**
-Removes a table object from the dictionary cache. */
-void
-dict_table_remove_from_cache_low(
-/*=============================*/
- dict_table_t* table, /*!< in, own: table */
- ibool lru_evict) /*!< in: TRUE if table being evicted
- to make room in the table LRU list */
+/** Evict a table definition from the InnoDB data dictionary cache.
+@param[in,out] table cached table definition to be evicted
+@param[in] lru whether this is part of least-recently-used eviction
+@param[in] keep whether to keep (not free) the object */
+void dict_table_remove_from_cache(dict_table_t* table, bool lru, bool keep)
{
dict_foreign_t* foreign;
dict_index_t* index;
@@ -2001,7 +1998,7 @@ dict_table_remove_from_cache_low(
index != NULL;
index = UT_LIST_GET_LAST(table->indexes)) {
- dict_index_remove_from_cache_low(table, index, lru_evict);
+ dict_index_remove_from_cache_low(table, index, lru);
}
/* Remove table from the hash tables of tables */
@@ -2009,8 +2006,10 @@ dict_table_remove_from_cache_low(
HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash,
ut_fold_string(table->name.m_name), table);
- HASH_DELETE(dict_table_t, id_hash, dict_sys->table_id_hash,
- ut_fold_ull(table->id), table);
+ hash_table_t* id_hash = table->is_temporary()
+ ? dict_sys->temp_id_hash : dict_sys->table_id_hash;
+ const ulint id_fold = ut_fold_ull(table->id);
+ HASH_DELETE(dict_table_t, id_hash, id_hash, id_fold, table);
/* Remove table from LRU or non-LRU list. */
if (table->can_be_evicted) {
@@ -2023,7 +2022,7 @@ dict_table_remove_from_cache_low(
ut_ad(dict_lru_validate());
- if (lru_evict && table->drop_aborted) {
+ if (lru && table->drop_aborted) {
/* When evicting the table definition,
drop the orphan indexes from the data dictionary
and free the index pages. */
@@ -2048,17 +2047,9 @@ dict_table_remove_from_cache_low(
UT_DELETE(table->vc_templ);
}
- dict_mem_table_free(table);
-}
-
-/**********************************************************************//**
-Removes a table object from the dictionary cache. */
-void
-dict_table_remove_from_cache(
-/*=========================*/
- dict_table_t* table) /*!< in, own: table */
-{
- dict_table_remove_from_cache_low(table, FALSE);
+ if (!keep) {
+ dict_mem_table_free(table);
+ }
}
/****************************************************************//**
@@ -2121,10 +2112,9 @@ dict_index_too_big_for_tree(
comp = dict_table_is_comp(table);
- const page_size_t page_size(dict_tf_get_page_size(table->flags));
+ const ulint zip_size = dict_tf_get_zip_size(table->flags);
- if (page_size.is_compressed()
- && page_size.physical() < srv_page_size) {
+ if (zip_size && zip_size < srv_page_size) {
/* On a compressed page, two records must fit in the
uncompressed page modification log. On compressed pages
with size.physical() == srv_page_size,
@@ -2135,7 +2125,7 @@ dict_index_too_big_for_tree(
number in the page modification log. The maximum
allowed node pointer size is half that. */
page_rec_max = page_zip_empty_size(new_index->n_fields,
- page_size.physical());
+ zip_size);
if (page_rec_max) {
page_rec_max--;
}
@@ -5436,46 +5426,6 @@ dict_index_build_node_ptr(
return(tuple);
}
-/**********************************************************************//**
-Copies an initial segment of a physical record, long enough to specify an
-index entry uniquely.
-@return pointer to the prefix record */
-rec_t*
-dict_index_copy_rec_order_prefix(
-/*=============================*/
- const dict_index_t* index, /*!< in: index */
- const rec_t* rec, /*!< in: record for which to
- copy prefix */
- ulint* n_fields,/*!< out: number of fields copied */
- byte** buf, /*!< in/out: memory buffer for the
- copied prefix, or NULL */
- ulint* buf_size)/*!< in/out: buffer size */
-{
- ulint n;
-
- UNIV_PREFETCH_R(rec);
-
- if (dict_index_is_ibuf(index)) {
- ut_ad(!dict_table_is_comp(index->table));
- n = rec_get_n_fields_old(rec);
- } else {
- if (page_rec_is_leaf(rec)) {
- n = dict_index_get_n_unique_in_tree(index);
- } else if (dict_index_is_spatial(index)) {
- ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index)
- == DICT_INDEX_SPATIAL_NODEPTR_SIZE);
- /* For R-tree, we have to compare
- the child page numbers as well. */
- n = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
- } else {
- n = dict_index_get_n_unique_in_tree(index);
- }
- }
-
- *n_fields = n;
- return(rec_copy_prefix_to_buf(rec, index, n, buf, buf_size));
-}
-
/** Convert a physical record into a search tuple.
@param[in] rec index record (not necessarily in an index page)
@param[in] index index
@@ -6522,17 +6472,17 @@ dict_resize()
/* all table entries are in table_LRU and table_non_LRU lists */
hash_table_free(dict_sys->table_hash);
hash_table_free(dict_sys->table_id_hash);
+ hash_table_free(dict_sys->temp_id_hash);
- dict_sys->table_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
-
- dict_sys->table_id_hash = hash_create(
- buf_pool_get_curr_size()
- / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE));
+ const ulint hash_size = buf_pool_get_curr_size()
+ / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE);
+ dict_sys->table_hash = hash_create(hash_size);
+ dict_sys->table_id_hash = hash_create(hash_size);
+ dict_sys->temp_id_hash = hash_create(hash_size);
for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table;
table = UT_LIST_GET_NEXT(table_LRU, table)) {
+ ut_ad(!table->is_temporary());
ulint fold = ut_fold_string(table->name.m_name);
ulint id_fold = ut_fold_ull(table->id);
@@ -6551,8 +6501,10 @@ dict_resize()
HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash,
fold, table);
- HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash,
- id_fold, table);
+ hash_table_t* id_hash = table->is_temporary()
+ ? dict_sys->temp_id_hash : dict_sys->table_id_hash;
+
+ HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, table);
}
mutex_exit(&dict_sys->mutex);
@@ -6575,7 +6527,7 @@ dict_close(void)
/* Free the hash elements. We don't remove them from the table
because we are going to destroy the table anyway. */
- for (ulint i = 0; i < hash_get_n_cells(dict_sys->table_id_hash); i++) {
+ for (ulint i = 0; i < hash_get_n_cells(dict_sys->table_hash); i++) {
dict_table_t* table;
table = static_cast<dict_table_t*>(
@@ -6596,6 +6548,7 @@ dict_close(void)
/* The elements are the same instance as in dict_sys->table_hash,
therefore we don't delete the individual elements. */
hash_table_free(dict_sys->table_id_hash);
+ hash_table_free(dict_sys->temp_id_hash);
mutex_exit(&dict_sys->mutex);
mutex_free(&dict_sys->mutex);
@@ -6825,6 +6778,7 @@ dict_index_zip_pad_update(
ulint fail_pct;
ut_ad(info);
+ ut_ad(info->pad % ZIP_PAD_INCR == 0);
total = info->success + info->failure;
@@ -6849,17 +6803,16 @@ dict_index_zip_pad_update(
if (fail_pct > zip_threshold) {
/* Compression failures are more then user defined
threshold. Increase the pad size to reduce chances of
- compression failures. */
- ut_ad(info->pad % ZIP_PAD_INCR == 0);
+ compression failures.
- /* Only do increment if it won't increase padding
+ Only do increment if it won't increase padding
beyond max pad size. */
if (info->pad + ZIP_PAD_INCR
< (srv_page_size * zip_pad_max) / 100) {
/* Use atomics even though we have the mutex.
This is to ensure that we are able to read
info->pad atomically. */
- my_atomic_addlint(&info->pad, ZIP_PAD_INCR);
+ info->pad += ZIP_PAD_INCR;
MONITOR_INC(MONITOR_PAD_INCREMENTS);
}
@@ -6877,11 +6830,10 @@ dict_index_zip_pad_update(
if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT
&& info->pad > 0) {
- ut_ad(info->pad % ZIP_PAD_INCR == 0);
/* Use atomics even though we have the mutex.
This is to ensure that we are able to read
info->pad atomically. */
- my_atomic_addlint(&info->pad, ulint(-ZIP_PAD_INCR));
+ info->pad -= ZIP_PAD_INCR;
info->n_rounds = 0;
@@ -6948,7 +6900,7 @@ dict_index_zip_pad_optimal_page_size(
return(srv_page_size);
}
- pad = my_atomic_loadlint(&index->zip_pad.pad);
+ pad = index->zip_pad.pad;
ut_ad(pad < srv_page_size);
sz = srv_page_size - pad;
@@ -7090,52 +7042,3 @@ dict_space_get_id(
return(id);
}
-
-/** Determine the extent size (in pages) for the given table
-@param[in] table the table whose extent size is being
- calculated.
-@return extent size in pages (256, 128 or 64) */
-ulint
-dict_table_extent_size(
- const dict_table_t* table)
-{
- const ulint mb_1 = 1024 * 1024;
- const ulint mb_2 = 2 * mb_1;
- const ulint mb_4 = 4 * mb_1;
-
- page_size_t page_size = dict_table_page_size(table);
- ulint pages_in_extent = FSP_EXTENT_SIZE;
-
- if (page_size.is_compressed()) {
-
- ulint disk_page_size = page_size.physical();
-
- switch (disk_page_size) {
- case 1024:
- pages_in_extent = mb_1/1024;
- break;
- case 2048:
- pages_in_extent = mb_1/2048;
- break;
- case 4096:
- pages_in_extent = mb_1/4096;
- break;
- case 8192:
- pages_in_extent = mb_1/8192;
- break;
- case 16384:
- pages_in_extent = mb_1/16384;
- break;
- case 32768:
- pages_in_extent = mb_2/32768;
- break;
- case 65536:
- pages_in_extent = mb_4/65536;
- break;
- default:
- ut_ad(0);
- }
- }
-
- return(pages_in_extent);
-}
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 6cb62f7e256..02da027f7a4 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -424,6 +424,8 @@ dict_process_sys_indexes_rec(
const char* err_msg;
byte* buf;
+ ut_d(index->is_dummy = true);
+ ut_d(index->in_instant_init = false);
buf = static_cast<byte*>(mem_heap_alloc(heap, 8));
/* Parse the record, and get "dict_index_t" struct filled */
@@ -3009,8 +3011,7 @@ corrupted:
dict_table_get_first_index(table)->page);
mtr.start();
buf_block_t* block = buf_page_get(
- page_id,
- dict_table_page_size(table),
+ page_id, table->space->zip_size(),
RW_S_LATCH, &mtr);
const bool corrupted = !block
|| page_get_space_id(block->frame)
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index b55cb51aef6..e33a4819134 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -185,8 +185,6 @@ dict_mem_table_create(
|| DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) {
table->fts = fts_create(table);
table->fts->cache = fts_cache_create(table);
- } else {
- table->fts = NULL;
}
new(&table->foreign_set) dict_foreign_set();
@@ -526,6 +524,14 @@ dict_mem_table_col_rename_low(
= dict_index_get_nth_field(
index, i);
+ ut_ad(!field->name
+ == field->col->is_dropped());
+ if (!field->name) {
+ /* dropped columns lack a name */
+ ut_ad(index->is_instant());
+ continue;
+ }
+
/* if is_virtual and that in field->col does
not match, continue */
if ((!is_virtual) !=
@@ -710,6 +716,7 @@ dict_mem_fill_column_struct(
column->mbmaxlen = mbmaxlen;
column->def_val.data = NULL;
column->def_val.len = UNIV_SQL_DEFAULT;
+ ut_ad(!column->is_dropped());
}
/**********************************************************************//**
@@ -1159,293 +1166,122 @@ operator<< (std::ostream& out, const dict_foreign_set& fk_set)
return(out);
}
-/** Adjust clustered index metadata for instant ADD COLUMN.
-@param[in] clustered index definition after instant ADD COLUMN */
-inline void dict_index_t::instant_add_field(const dict_index_t& instant)
+/** Reconstruct the clustered index fields. */
+inline void dict_index_t::reconstruct_fields()
{
DBUG_ASSERT(is_primary());
- DBUG_ASSERT(instant.is_primary());
- DBUG_ASSERT(!instant.is_instant());
- DBUG_ASSERT(n_def == n_fields);
- DBUG_ASSERT(instant.n_def == instant.n_fields);
-
- DBUG_ASSERT(type == instant.type);
- DBUG_ASSERT(trx_id_offset == instant.trx_id_offset);
- DBUG_ASSERT(n_user_defined_cols == instant.n_user_defined_cols);
- DBUG_ASSERT(n_uniq == instant.n_uniq);
- DBUG_ASSERT(instant.n_fields > n_fields);
- DBUG_ASSERT(instant.n_def > n_def);
- DBUG_ASSERT(instant.n_nullable >= n_nullable);
- DBUG_ASSERT(instant.n_core_fields >= n_core_fields);
- DBUG_ASSERT(instant.n_core_null_bytes >= n_core_null_bytes);
-
- n_fields = instant.n_fields;
- n_def = instant.n_def;
- n_nullable = instant.n_nullable;
- fields = static_cast<dict_field_t*>(
- mem_heap_dup(heap, instant.fields, n_fields * sizeof *fields));
-
- ut_d(unsigned n_null = 0);
-
- for (unsigned i = 0; i < n_fields; i++) {
- DBUG_ASSERT(fields[i].same(instant.fields[i]));
- const dict_col_t* icol = instant.fields[i].col;
- DBUG_ASSERT(!icol->is_virtual());
- dict_col_t* col = fields[i].col = &table->cols[
- icol - instant.table->cols];
- fields[i].name = col->name(*table);
- ut_d(n_null += col->is_nullable());
- }
- ut_ad(n_null == n_nullable);
-}
+ n_fields += table->instant->n_dropped;
+ n_def += table->instant->n_dropped;
-/** Adjust metadata for instant ADD COLUMN.
-@param[in] table table definition after instant ADD COLUMN */
-void dict_table_t::instant_add_column(const dict_table_t& table)
-{
- DBUG_ASSERT(!table.cached);
- DBUG_ASSERT(table.n_def == table.n_cols);
- DBUG_ASSERT(table.n_t_def == table.n_t_cols);
- DBUG_ASSERT(n_def == n_cols);
- DBUG_ASSERT(n_t_def == n_t_cols);
- DBUG_ASSERT(table.n_cols > n_cols);
- ut_ad(mutex_own(&dict_sys->mutex));
-
- const char* end = table.col_names;
- for (unsigned i = table.n_cols; i--; ) end += strlen(end) + 1;
-
- col_names = static_cast<char*>(
- mem_heap_dup(heap, table.col_names,
- ulint(end - table.col_names)));
- const dict_col_t* const old_cols = cols;
- const dict_col_t* const old_cols_end = cols + n_cols;
- cols = static_cast<dict_col_t*>(mem_heap_dup(heap, table.cols,
- table.n_cols
- * sizeof *cols));
-
- /* Preserve the default values of previously instantly
- added columns. */
- for (unsigned i = unsigned(n_cols) - DATA_N_SYS_COLS; i--; ) {
- cols[i].def_val = old_cols[i].def_val;
- }
+ const unsigned n_first = first_user_field();
+
+ dict_field_t* tfields = static_cast<dict_field_t*>(
+ mem_heap_zalloc(heap, n_fields * sizeof *fields));
+
+ memcpy(tfields, fields, n_first * sizeof *fields);
- /* Copy the new default values to this->heap. */
- for (unsigned i = n_cols; i < table.n_cols; i++) {
- dict_col_t& c = cols[i - DATA_N_SYS_COLS];
- DBUG_ASSERT(c.is_instant());
- if (c.def_val.len == 0) {
- c.def_val.data = field_ref_zero;
- } else if (const void*& d = c.def_val.data) {
- d = mem_heap_dup(heap, d, c.def_val.len);
+ n_nullable = 0;
+ ulint n_core_null = 0;
+ const bool comp = dict_table_is_comp(table);
+ const auto* field_map_it = table->instant->field_map;
+ for (unsigned i = n_first, j = 0; i < n_fields; ) {
+ dict_field_t& f = tfields[i++];
+ auto c = *field_map_it++;
+ if (c.is_dropped()) {
+ f.col = &table->instant->dropped[j++];
+ DBUG_ASSERT(f.col->is_dropped());
+ f.fixed_len = dict_col_get_fixed_size(f.col, comp);
} else {
- DBUG_ASSERT(c.def_val.len == UNIV_SQL_NULL);
+ DBUG_ASSERT(!c.is_not_null());
+ const auto old = std::find_if(
+ fields + n_first, fields + n_fields,
+ [c](const dict_field_t& o)
+ { return o.col->ind == c.ind(); });
+ ut_ad(old >= &fields[n_first]);
+ ut_ad(old < &fields[n_fields]);
+ DBUG_ASSERT(!old->prefix_len);
+ DBUG_ASSERT(old->col == &table->cols[c.ind()]);
+ f = *old;
}
- }
- const unsigned old_n_cols = n_cols;
- const unsigned n_add = unsigned(table.n_cols - n_cols);
-
- n_t_def += n_add;
- n_t_cols += n_add;
- n_cols = table.n_cols;
- n_def = n_cols;
-
- for (unsigned i = n_v_def; i--; ) {
- const dict_v_col_t& v = v_cols[i];
- for (ulint n = v.num_base; n--; ) {
- dict_col_t*& base = v.base_col[n];
- if (!base->is_virtual()) {
- DBUG_ASSERT(base >= old_cols);
- size_t n = size_t(base - old_cols);
- DBUG_ASSERT(n + DATA_N_SYS_COLS < old_n_cols);
- base = &cols[n];
- }
+ f.col->clear_instant();
+ if (f.col->is_nullable()) {
+ n_nullable++;
+ n_core_null += i <= n_core_fields;
}
}
- dict_index_t* index = dict_table_get_first_index(this);
-
- index->instant_add_field(*dict_table_get_first_index(&table));
-
- while ((index = dict_table_get_next_index(index)) != NULL) {
- for (unsigned i = 0; i < index->n_fields; i++) {
- dict_field_t& field = index->fields[i];
- if (field.col < old_cols
- || field.col >= old_cols_end) {
- DBUG_ASSERT(field.col->is_virtual());
- } else {
- /* Secondary indexes may contain user
- columns and DB_ROW_ID (if there is
- GEN_CLUST_INDEX instead of PRIMARY KEY),
- but not DB_TRX_ID,DB_ROLL_PTR. */
- DBUG_ASSERT(field.col >= old_cols);
- size_t n = size_t(field.col - old_cols);
- DBUG_ASSERT(n + DATA_N_SYS_COLS <= old_n_cols);
- if (n + DATA_N_SYS_COLS >= old_n_cols) {
- /* Replace DB_ROW_ID */
- n += n_add;
- }
- field.col = &cols[n];
- DBUG_ASSERT(!field.col->is_virtual());
- field.name = field.col->name(*this);
- }
- }
- }
+ fields = tfields;
+ n_core_null_bytes = UT_BITS_IN_BYTES(n_core_null);
}
-/** Roll back instant_add_column().
-@param[in] old_n_cols original n_cols
-@param[in] old_cols original cols
-@param[in] old_col_names original col_names */
-void
-dict_table_t::rollback_instant(
- unsigned old_n_cols,
- dict_col_t* old_cols,
- const char* old_col_names)
+/** Reconstruct dropped or reordered columns.
+@param[in] metadata data from serialise_columns()
+@param[in] len length of the metadata, in bytes
+@return whether parsing the metadata failed */
+bool dict_table_t::deserialise_columns(const byte* metadata, ulint len)
{
- ut_ad(mutex_own(&dict_sys->mutex));
- dict_index_t* index = indexes.start;
- /* index->is_instant() does not necessarily hold here, because
- the table may have been emptied */
- DBUG_ASSERT(old_n_cols >= DATA_N_SYS_COLS);
- DBUG_ASSERT(n_cols >= old_n_cols);
- DBUG_ASSERT(n_cols == n_def);
- DBUG_ASSERT(index->n_def == index->n_fields);
-
- const unsigned n_remove = n_cols - old_n_cols;
-
- for (unsigned i = index->n_fields - n_remove; i < index->n_fields;
- i++) {
- if (index->fields[i].col->is_nullable()) {
- index->n_nullable--;
- }
- }
+ DBUG_ASSERT(!instant);
- index->n_fields -= n_remove;
- index->n_def = index->n_fields;
- if (index->n_core_fields > index->n_fields) {
- index->n_core_fields = index->n_fields;
- index->n_core_null_bytes
- = UT_BITS_IN_BYTES(unsigned(index->n_nullable));
- }
+ unsigned num_non_pk_fields = mach_read_from_4(metadata);
+ metadata += 4;
- const dict_col_t* const new_cols = cols;
- const dict_col_t* const new_cols_end = cols + n_cols;
-
- cols = old_cols;
- col_names = old_col_names;
- n_cols = old_n_cols;
- n_def = old_n_cols;
- n_t_def -= n_remove;
- n_t_cols -= n_remove;
-
- for (unsigned i = n_v_def; i--; ) {
- const dict_v_col_t& v = v_cols[i];
- for (ulint n = v.num_base; n--; ) {
- dict_col_t*& base = v.base_col[n];
- if (!base->is_virtual()) {
- base = &cols[base - new_cols];
- }
- }
+ if (num_non_pk_fields >= REC_MAX_N_FIELDS - 3) {
+ return true;
}
- do {
- for (unsigned i = 0; i < index->n_fields; i++) {
- dict_field_t& field = index->fields[i];
- if (field.col < new_cols
- || field.col >= new_cols_end) {
- DBUG_ASSERT(field.col->is_virtual());
- } else {
- DBUG_ASSERT(field.col >= new_cols);
- size_t n = size_t(field.col - new_cols);
- DBUG_ASSERT(n <= n_cols);
- if (n + DATA_N_SYS_COLS >= n_cols) {
- n -= n_remove;
- }
- field.col = &cols[n];
- DBUG_ASSERT(!field.col->is_virtual());
- field.name = field.col->name(*this);
- }
- }
- } while ((index = dict_table_get_next_index(index)) != NULL);
-}
+ dict_index_t* index = UT_LIST_GET_FIRST(indexes);
-/** Trim the instantly added columns when an insert into SYS_COLUMNS
-is rolled back during ALTER TABLE or recovery.
-@param[in] n number of surviving non-system columns */
-void dict_table_t::rollback_instant(unsigned n)
-{
- ut_ad(mutex_own(&dict_sys->mutex));
- dict_index_t* index = indexes.start;
- DBUG_ASSERT(index->is_instant());
- DBUG_ASSERT(index->n_def == index->n_fields);
- DBUG_ASSERT(n_cols == n_def);
- DBUG_ASSERT(n >= index->n_uniq);
- DBUG_ASSERT(n_cols > n + DATA_N_SYS_COLS);
- const unsigned n_remove = n_cols - n - DATA_N_SYS_COLS;
-
- char* names = const_cast<char*>(dict_table_get_col_name(this, n));
- const char* sys = names;
- for (unsigned i = n_remove; i--; ) {
- sys += strlen(sys) + 1;
+ if (num_non_pk_fields < unsigned(index->n_fields)
+ - index->first_user_field()) {
+ return true;
}
- static const char system[] = "DB_ROW_ID\0DB_TRX_ID\0DB_ROLL_PTR";
- DBUG_ASSERT(!memcmp(sys, system, sizeof system));
- for (unsigned i = index->n_fields - n_remove; i < index->n_fields;
- i++) {
- if (index->fields[i].col->is_nullable()) {
- index->n_nullable--;
+
+ field_map_element_t* field_map = static_cast<field_map_element_t*>(
+ mem_heap_alloc(heap,
+ num_non_pk_fields * sizeof *field_map));
+
+ unsigned n_dropped_cols = 0;
+
+ for (unsigned i = 0; i < num_non_pk_fields; i++) {
+ auto c = field_map[i] = mach_read_from_2(metadata);
+ metadata += 2;
+
+ if (field_map[i].is_dropped()) {
+ if (c.ind() > DICT_MAX_FIXED_COL_LEN + 1) {
+ return true;
+ }
+ n_dropped_cols++;
+ } else if (c >= n_cols) {
+ return true;
}
}
- index->n_fields -= n_remove;
- index->n_def = index->n_fields;
- memmove(names, sys, sizeof system);
- memmove(cols + n, cols + n_cols - DATA_N_SYS_COLS,
- DATA_N_SYS_COLS * sizeof *cols);
- n_cols -= n_remove;
- n_def = n_cols;
- n_t_cols -= n_remove;
- n_t_def -= n_remove;
-
- for (unsigned i = DATA_N_SYS_COLS; i--; ) {
- cols[n_cols - i].ind--;
- }
- if (dict_index_is_auto_gen_clust(index)) {
- DBUG_ASSERT(index->n_uniq == 1);
- dict_field_t* field = index->fields;
- field->name = sys;
- field->col = dict_table_get_sys_col(this, DATA_ROW_ID);
- field++;
- field->name = sys + sizeof "DB_ROW_ID";
- field->col = dict_table_get_sys_col(this, DATA_TRX_ID);
- field++;
- field->name = sys + sizeof "DB_ROW_ID\0DB_TRX_ID";
- field->col = dict_table_get_sys_col(this, DATA_ROLL_PTR);
-
- /* Replace the DB_ROW_ID column in secondary indexes. */
- while ((index = dict_table_get_next_index(index)) != NULL) {
- field = &index->fields[index->n_fields - 1];
- DBUG_ASSERT(field->col->mtype == DATA_SYS);
- DBUG_ASSERT(field->col->prtype
- == DATA_NOT_NULL + DATA_TRX_ID);
- field->col--;
- field->name = sys;
+ dict_col_t* dropped_cols = static_cast<dict_col_t*>(mem_heap_zalloc(
+ heap, n_dropped_cols * sizeof(dict_col_t)));
+ instant = new (mem_heap_alloc(heap, sizeof *instant)) dict_instant_t();
+ instant->n_dropped = n_dropped_cols;
+ instant->dropped = dropped_cols;
+ instant->field_map = field_map;
+
+ dict_col_t* col = dropped_cols;
+ for (unsigned i = 0; i < num_non_pk_fields; i++) {
+ if (field_map[i].is_dropped()) {
+ auto fixed_len = field_map[i].ind();
+ DBUG_ASSERT(fixed_len <= DICT_MAX_FIXED_COL_LEN + 1);
+ (col++)->set_dropped(field_map[i].is_not_null(),
+ fixed_len == 1,
+ fixed_len > 1 ? fixed_len - 1
+ : 0);
}
-
- return;
}
+ DBUG_ASSERT(col == &dropped_cols[n_dropped_cols]);
- dict_field_t* field = &index->fields[index->n_uniq];
- field->name = sys + sizeof "DB_ROW_ID";
- field->col = dict_table_get_sys_col(this, DATA_TRX_ID);
- field++;
- field->name = sys + sizeof "DB_ROW_ID\0DB_TRX_ID";
- field->col = dict_table_get_sys_col(this, DATA_ROLL_PTR);
+ UT_LIST_GET_FIRST(indexes)->reconstruct_fields();
+ return false;
}
-
/** Check if record in clustered index is historical row.
@param[in] rec clustered row
@param[in] offsets offsets
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index b9185b99aa4..9492d9ed711 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1505,7 +1505,7 @@ dict_stats_analyze_index_below_cur(
page_id_t page_id(index->table->space_id,
btr_node_ptr_get_child_page_no(
rec, offsets_rec));
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
/* assume no external pages by default - in case we quit from this
function without analyzing any leaf pages */
@@ -1518,7 +1518,7 @@ dict_stats_analyze_index_below_cur(
dberr_t err = DB_SUCCESS;
- block = buf_page_get_gen(page_id, page_size, RW_S_LATCH,
+ block = buf_page_get_gen(page_id, zip_size, RW_S_LATCH,
NULL /* no guessed block */,
BUF_GET, __FILE__, __LINE__, &mtr, &err);
@@ -3840,6 +3840,63 @@ dict_stats_rename_table(
return(ret);
}
+/*********************************************************************//**
+Renames an index in InnoDB persistent stats storage.
+This function creates its own transaction and commits it.
+@return DB_SUCCESS or error code. DB_STATS_DO_NOT_EXIST will be returned
+if the persistent stats do not exist. */
+dberr_t
+dict_stats_rename_index(
+/*====================*/
+ const dict_table_t* table, /*!< in: table whose index
+ is renamed */
+ const char* old_index_name, /*!< in: old index name */
+ const char* new_index_name) /*!< in: new index name */
+{
+ rw_lock_x_lock(dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
+
+ if (!dict_stats_persistent_storage_check(true)) {
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(dict_operation_lock);
+ return(DB_STATS_DO_NOT_EXIST);
+ }
+
+ char dbname_utf8[MAX_DB_UTF8_LEN];
+ char tablename_utf8[MAX_TABLE_UTF8_LEN];
+
+ dict_fs2utf8(table->name.m_name, dbname_utf8, sizeof(dbname_utf8),
+ tablename_utf8, sizeof(tablename_utf8));
+
+ pars_info_t* pinfo;
+
+ pinfo = pars_info_create();
+
+ pars_info_add_str_literal(pinfo, "dbname_utf8", dbname_utf8);
+ pars_info_add_str_literal(pinfo, "tablename_utf8", tablename_utf8);
+ pars_info_add_str_literal(pinfo, "new_index_name", new_index_name);
+ pars_info_add_str_literal(pinfo, "old_index_name", old_index_name);
+
+ dberr_t ret;
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE RENAME_INDEX_IN_INDEX_STATS () IS\n"
+ "BEGIN\n"
+ "UPDATE \"" INDEX_STATS_NAME "\" SET\n"
+ "index_name = :new_index_name\n"
+ "WHERE\n"
+ "database_name = :dbname_utf8 AND\n"
+ "table_name = :tablename_utf8 AND\n"
+ "index_name = :old_index_name;\n"
+ "END;\n", NULL);
+
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(dict_operation_lock);
+
+ return(ret);
+}
+
/* tests @{ */
#ifdef UNIV_ENABLE_UNIT_TEST_DICT_STATS
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
index 7f4b12d302b..603122903ff 100644
--- a/storage/innobase/dict/dict0stats_bg.cc
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -180,7 +180,7 @@ void dict_stats_update_if_needed_func(dict_table_t* table)
lock waits to be enqueued at head of waiting
queue. */
if (thd
- && !wsrep_thd_is_applier(thd)
+ && !wsrep_thd_is_applying(thd)
&& wsrep_on(thd)
&& wsrep_thd_is_BF(thd, 0)) {
WSREP_DEBUG("Avoiding background statistics"
diff --git a/storage/innobase/fil/fil0crypt.cc b/storage/innobase/fil/fil0crypt.cc
index e1081ea2778..b242b57714f 100644
--- a/storage/innobase/fil/fil0crypt.cc
+++ b/storage/innobase/fil/fil0crypt.cc
@@ -23,16 +23,16 @@ Created Jonas Oreland Google
Modified Jan Lindström jan.lindstrom@mariadb.com
*******************************************************/
-#include "fil0fil.h"
+#include "fil0crypt.h"
#include "mtr0types.h"
#include "mach0data.h"
-#include "page0size.h"
#include "page0zip.h"
-#ifndef UNIV_INNOCHECKSUM
-#include "fil0crypt.h"
+#include "buf0checksum.h"
+#ifdef UNIV_INNOCHECKSUM
+# include "buf0buf.h"
+#else
#include "srv0srv.h"
#include "srv0start.h"
-#include "log0recv.h"
#include "mtr0mtr.h"
#include "mtr0log.h"
#include "ut0ut.h"
@@ -274,16 +274,14 @@ fil_space_merge_crypt_data(
}
/** Initialize encryption parameters from a tablespace header page.
-@param[in] page_size page size of the tablespace
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] page first page of the tablespace
@return crypt data from page 0
@retval NULL if not present or not valid */
-UNIV_INTERN
-fil_space_crypt_t*
-fil_space_read_crypt_data(const page_size_t& page_size, const byte* page)
+fil_space_crypt_t* fil_space_read_crypt_data(ulint zip_size, const byte* page)
{
const ulint offset = FSP_HEADER_OFFSET
- + fsp_header_get_encryption_offset(page_size);
+ + fsp_header_get_encryption_offset(zip_size);
if (memcmp(page + offset, CRYPT_MAGIC, MAGIC_SZ) != 0) {
/* Crypt data is not stored. */
@@ -371,7 +369,7 @@ fil_space_crypt_t::write_page0(
ut_ad(this == space->crypt_data);
const uint len = sizeof(iv);
const ulint offset = FSP_HEADER_OFFSET
- + fsp_header_get_encryption_offset(page_size_t(space->flags));
+ + fsp_header_get_encryption_offset(space->zip_size());
page0_offset = offset;
/*
@@ -510,29 +508,27 @@ fil_parse_write_crypt_data(
return ptr;
}
-/** Encrypt a buffer.
-@param[in,out] crypt_data Crypt data
-@param[in] space space_id
-@param[in] offset Page offset
-@param[in] lsn Log sequence number
-@param[in] src_frame Page to encrypt
-@param[in] page_size Page size
-@param[in,out] dst_frame Output buffer
+/** Encrypt a buffer for non full checksum.
+@param[in,out] crypt_data Crypt data
+@param[in] space space_id
+@param[in] offset Page offset
+@param[in] lsn Log sequence number
+@param[in] src_frame Page to encrypt
+@param[in] zip_size ROW_FORMAT=COMPRESSED
+ page size, or 0
+@param[in,out] dst_frame Output buffer
@return encrypted buffer or NULL */
-UNIV_INTERN
-byte*
-fil_encrypt_buf(
+static byte* fil_encrypt_buf_for_non_full_checksum(
fil_space_crypt_t* crypt_data,
ulint space,
ulint offset,
lsn_t lsn,
const byte* src_frame,
- const page_size_t& page_size,
+ ulint zip_size,
byte* dst_frame)
{
- uint size = uint(page_size.physical());
+ uint size = uint(zip_size ? zip_size : srv_page_size);
uint key_version = fil_crypt_get_latest_key_version(crypt_data);
-
ut_a(key_version != ENCRYPTION_KEY_VERSION_INVALID);
ulint orig_page_type = mach_read_from_2(src_frame+FIL_PAGE_TYPE);
@@ -540,21 +536,21 @@ fil_encrypt_buf(
uint header_len = FIL_PAGE_DATA;
if (page_compressed) {
- header_len += (FIL_PAGE_COMPRESSED_SIZE + FIL_PAGE_COMPRESSION_METHOD_SIZE);
+ header_len += FIL_PAGE_ENCRYPT_COMP_METADATA_LEN;
}
/* FIL page header is not encrypted */
memcpy(dst_frame, src_frame, header_len);
-
- /* Store key version */
- mach_write_to_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, key_version);
+ mach_write_to_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ key_version);
/* Calculate the start offset in a page */
- uint unencrypted_bytes = header_len + FIL_PAGE_DATA_END;
- uint srclen = size - unencrypted_bytes;
- const byte* src = src_frame + header_len;
- byte* dst = dst_frame + header_len;
- uint32 dstlen = 0;
+ uint unencrypted_bytes = header_len + FIL_PAGE_DATA_END;
+ uint srclen = size - unencrypted_bytes;
+ const byte* src = src_frame + header_len;
+ byte* dst = dst_frame + header_len;
+ uint32 dstlen = 0;
+ ib_uint32_t checksum = 0;
if (page_compressed) {
srclen = mach_read_from_2(src_frame + FIL_PAGE_DATA);
@@ -572,30 +568,137 @@ fil_encrypt_buf(
to sector boundary is written. */
if (!page_compressed) {
/* FIL page trailer is also not encrypted */
- memcpy(dst_frame + page_size.physical() - FIL_PAGE_DATA_END,
- src_frame + page_size.physical() - FIL_PAGE_DATA_END,
+ memcpy(dst_frame + size - FIL_PAGE_DATA_END,
+ src_frame + size - FIL_PAGE_DATA_END,
FIL_PAGE_DATA_END);
} else {
/* Clean up rest of buffer */
memset(dst_frame+header_len+srclen, 0,
- page_size.physical() - (header_len + srclen));
+ size - (header_len + srclen));
}
- /* handle post encryption checksum */
- ib_uint32_t checksum = 0;
+ checksum = fil_crypt_calculate_checksum(zip_size, dst_frame);
- checksum = fil_crypt_calculate_checksum(page_size, dst_frame);
+ /* store the post-encryption checksum after the key-version */
+ mach_write_to_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4,
+ checksum);
- // store the post-encryption checksum after the key-version
- mach_write_to_4(dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + 4, checksum);
+ ut_ad(fil_space_verify_crypt_checksum(dst_frame, zip_size));
+
+ srv_stats.pages_encrypted.inc();
+
+ return dst_frame;
+}
- ut_ad(fil_space_verify_crypt_checksum(dst_frame, page_size));
+/** Encrypt a buffer for full checksum format.
+@param[in,out] crypt_data Crypt data
+@param[in] space space_id
+@param[in] offset Page offset
+@param[in] lsn Log sequence number
+@param[in] src_frame Page to encrypt
+@param[in,out] dst_frame Output buffer
+@return encrypted buffer or NULL */
+static byte* fil_encrypt_buf_for_full_crc32(
+ fil_space_crypt_t* crypt_data,
+ ulint space,
+ ulint offset,
+ lsn_t lsn,
+ const byte* src_frame,
+ byte* dst_frame)
+{
+ uint key_version = fil_crypt_get_latest_key_version(crypt_data);
+ ut_d(bool corrupted = false);
+ const uint size = buf_page_full_crc32_size(src_frame, NULL,
+#ifdef UNIV_DEBUG
+ &corrupted
+#else
+ NULL
+#endif
+ );
+ ut_ad(!corrupted);
+ uint srclen = size - (FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
+ + FIL_PAGE_FCRC32_CHECKSUM);
+ const byte* src = src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION;
+ byte* dst = dst_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION;
+ uint dstlen = 0;
+
+ ut_a(key_version != ENCRYPTION_KEY_VERSION_INVALID);
+
+ /* Till FIL_PAGE_LSN, page is not encrypted */
+ memcpy(dst_frame, src_frame, FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+
+ /* Write key version to the page. */
+ mach_write_to_4(dst_frame + FIL_PAGE_FCRC32_KEY_VERSION, key_version);
+
+ int rc = encryption_scheme_encrypt(src, srclen, dst, &dstlen,
+ crypt_data, key_version,
+ uint(space), uint(offset), lsn);
+ ut_a(rc == MY_AES_OK);
+ ut_a(dstlen == srclen);
+
+ const ulint payload = size - FIL_PAGE_FCRC32_CHECKSUM;
+ mach_write_to_4(dst_frame + payload, ut_crc32(dst_frame, payload));
+ /* Clean the rest of the buffer. FIXME: Punch holes when writing! */
+ memset(dst_frame + (payload + 4), 0, srv_page_size - (payload + 4));
srv_stats.pages_encrypted.inc();
return dst_frame;
}
+/** Encrypt a buffer.
+@param[in,out] crypt_data Crypt data
+@param[in] space space_id
+@param[in] offset Page offset
+@param[in] lsn Log sequence number
+@param[in] src_frame Page to encrypt
+@param[in] zip_size ROW_FORMAT=COMPRESSED
+ page size, or 0
+@param[in,out] dst_frame Output buffer
+@param[in] use_full_checksum full crc32 algo is used
+@return encrypted buffer or NULL */
+UNIV_INTERN
+byte*
+fil_encrypt_buf(
+ fil_space_crypt_t* crypt_data,
+ ulint space,
+ ulint offset,
+ lsn_t lsn,
+ const byte* src_frame,
+ ulint zip_size,
+ byte* dst_frame,
+ bool use_full_checksum)
+{
+ if (use_full_checksum) {
+ return fil_encrypt_buf_for_full_crc32(
+ crypt_data, space, offset,
+ lsn, src_frame, dst_frame);
+ }
+
+ return fil_encrypt_buf_for_non_full_checksum(
+ crypt_data, space, offset, lsn,
+ src_frame, zip_size, dst_frame);
+}
+
+/** Check whether these page types are allowed to encrypt.
+@param[in] space tablespace object
+@param[in] src_frame source page
+@return true if it is valid page type */
+static bool fil_space_encrypt_valid_page_type(
+ const fil_space_t* space,
+ byte* src_frame)
+{
+ switch (mach_read_from_2(src_frame+FIL_PAGE_TYPE)) {
+ case FIL_PAGE_RTREE:
+ return space->full_crc32();
+ case FIL_PAGE_TYPE_FSP_HDR:
+ case FIL_PAGE_TYPE_XDES:
+ return false;
+ }
+
+ return true;
+}
+
/******************************************************************
Encrypt a page
@@ -614,12 +717,7 @@ fil_space_encrypt(
byte* src_frame,
byte* dst_frame)
{
- switch (mach_read_from_2(src_frame+FIL_PAGE_TYPE)) {
- case FIL_PAGE_TYPE_FSP_HDR:
- case FIL_PAGE_TYPE_XDES:
- case FIL_PAGE_RTREE:
- /* File space header, extent descriptor or spatial index
- are not encrypted. */
+ if (!fil_space_encrypt_valid_page_type(space, src_frame)) {
return src_frame;
}
@@ -628,73 +726,167 @@ fil_space_encrypt(
}
fil_space_crypt_t* crypt_data = space->crypt_data;
- const page_size_t page_size(space->flags);
+ const ulint zip_size = space->zip_size();
ut_ad(space->pending_io());
+
+ const bool full_crc32 = space->full_crc32();
+
byte* tmp = fil_encrypt_buf(crypt_data, space->id, offset, lsn,
- src_frame, page_size, dst_frame);
+ src_frame, zip_size, dst_frame,
+ full_crc32);
#ifdef UNIV_DEBUG
if (tmp) {
/* Verify that encrypted buffer is not corrupted */
dberr_t err = DB_SUCCESS;
byte* src = src_frame;
- bool page_compressed_encrypted = (mach_read_from_2(tmp+FIL_PAGE_TYPE) == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
- byte uncomp_mem[UNIV_PAGE_SIZE_MAX];
byte tmp_mem[UNIV_PAGE_SIZE_MAX];
- if (page_compressed_encrypted) {
- memcpy(uncomp_mem, src, srv_page_size);
- ulint unzipped1 = fil_page_decompress(
- tmp_mem, uncomp_mem);
- ut_ad(unzipped1);
- if (unzipped1 != srv_page_size) {
- src = uncomp_mem;
+ if (full_crc32) {
+ bool compressed = false, corrupted = false;
+ uint size = buf_page_full_crc32_size(
+ tmp, &compressed, &corrupted);
+ ut_ad(!corrupted);
+ ut_ad(!compressed == (size == srv_page_size));
+ ut_ad(fil_space_decrypt(space->id, crypt_data, tmp_mem,
+ size, space->flags, tmp,
+ &err));
+ ut_ad(err == DB_SUCCESS);
+ memcpy(tmp_mem, src, FIL_PAGE_OFFSET);
+ ut_ad(!memcmp(src, tmp_mem,
+ (size - FIL_PAGE_FCRC32_CHECKSUM)));
+ } else {
+ bool page_compressed_encrypted =
+ (mach_read_from_2(tmp+FIL_PAGE_TYPE)
+ == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
+ byte uncomp_mem[UNIV_PAGE_SIZE_MAX];
+
+ if (page_compressed_encrypted) {
+ memcpy(uncomp_mem, src, srv_page_size);
+ ulint unzipped1 = fil_page_decompress(
+ tmp_mem, uncomp_mem, space->flags);
+ ut_ad(unzipped1);
+ if (unzipped1 != srv_page_size) {
+ src = uncomp_mem;
+ }
}
- }
- ut_ad(!buf_page_is_corrupted(true, src, page_size, space));
- ut_ad(fil_space_decrypt(crypt_data, tmp_mem, page_size, tmp,
- &err));
- ut_ad(err == DB_SUCCESS);
-
- /* Need to decompress the page if it was also compressed */
- if (page_compressed_encrypted) {
- byte buf[UNIV_PAGE_SIZE_MAX];
- memcpy(buf, tmp_mem, srv_page_size);
- ulint unzipped2 = fil_page_decompress(tmp_mem, buf);
- ut_ad(unzipped2);
- }
+ ut_ad(!buf_page_is_corrupted(true, src, space->flags));
+
+ ut_ad(fil_space_decrypt(space->id, crypt_data, tmp_mem,
+ space->physical_size(),
+ space->flags, tmp, &err));
+ ut_ad(err == DB_SUCCESS);
+
+ if (page_compressed_encrypted) {
+ memcpy(tmp_mem, uncomp_mem, srv_page_size);
+ ulint unzipped2 = fil_page_decompress(
+ uncomp_mem, tmp_mem, space->flags);
+ ut_ad(unzipped2);
+ }
- memcpy(tmp_mem + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
- src + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 8);
- ut_ad(!memcmp(src, tmp_mem, page_size.physical()));
+ memcpy(tmp_mem + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION,
+ src + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 8);
+ ut_ad(!memcmp(src, tmp_mem, space->physical_size()));
+ }
}
#endif /* UNIV_DEBUG */
return tmp;
}
-/** Decrypt a page.
+/** Decrypt a page for full checksum format.
+@param[in] space space id
@param[in] crypt_data crypt_data
@param[in] tmp_frame Temporary buffer
-@param[in] page_size Page size
@param[in,out] src_frame Page to decrypt
@param[out] err DB_SUCCESS or DB_DECRYPTION_FAILED
@return true if page decrypted, false if not.*/
-UNIV_INTERN
-bool
-fil_space_decrypt(
+static bool fil_space_decrypt_full_crc32(
+ ulint space,
fil_space_crypt_t* crypt_data,
byte* tmp_frame,
- const page_size_t& page_size,
+ byte* src_frame,
+ dberr_t* err)
+{
+ uint key_version = mach_read_from_4(
+ src_frame + FIL_PAGE_FCRC32_KEY_VERSION);
+ lsn_t lsn = mach_read_from_8(src_frame + FIL_PAGE_LSN);
+ uint offset = mach_read_from_4(src_frame + FIL_PAGE_OFFSET);
+ *err = DB_SUCCESS;
+
+ if (key_version == ENCRYPTION_KEY_NOT_ENCRYPTED) {
+ return false;
+ }
+
+ ut_ad(crypt_data);
+ ut_ad(crypt_data->is_encrypted());
+
+ memcpy(tmp_frame, src_frame, FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+
+ /* Calculate the offset where decryption starts */
+ const byte* src = src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION;
+ byte* dst = tmp_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION;
+ uint dstlen = 0;
+ bool corrupted = false;
+ uint size = buf_page_full_crc32_size(src_frame, NULL, &corrupted);
+ if (UNIV_UNLIKELY(corrupted)) {
+fail:
+ *err = DB_DECRYPTION_FAILED;
+ return false;
+ }
+
+ uint srclen = size - (FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
+ + FIL_PAGE_FCRC32_CHECKSUM);
+
+ int rc = encryption_scheme_decrypt(src, srclen, dst, &dstlen,
+ crypt_data, key_version,
+ (uint) space, offset, lsn);
+
+ if (rc != MY_AES_OK || dstlen != srclen) {
+ if (rc == -1) {
+ goto fail;
+ }
+
+ ib::fatal() << "Unable to decrypt data-block "
+ << " src: " << src << "srclen: "
+ << srclen << " buf: " << dst << "buflen: "
+ << dstlen << " return-code: " << rc
+ << " Can't continue!";
+ }
+
+ /* Copy only checksum part in the trailer */
+ memcpy(tmp_frame + srv_page_size - FIL_PAGE_FCRC32_CHECKSUM,
+ src_frame + srv_page_size - FIL_PAGE_FCRC32_CHECKSUM,
+ FIL_PAGE_FCRC32_CHECKSUM);
+
+ srv_stats.pages_decrypted.inc();
+
+ return true; /* page was decrypted */
+}
+
+/** Decrypt a page for non full checksum format.
+@param[in] crypt_data crypt_data
+@param[in] tmp_frame Temporary buffer
+@param[in] physical_size page size
+@param[in,out] src_frame Page to decrypt
+@param[out] err DB_SUCCESS or DB_DECRYPTION_FAILED
+@return true if page decrypted, false if not.*/
+static bool fil_space_decrypt_for_non_full_checksum(
+ fil_space_crypt_t* crypt_data,
+ byte* tmp_frame,
+ ulint physical_size,
byte* src_frame,
dberr_t* err)
{
ulint page_type = mach_read_from_2(src_frame+FIL_PAGE_TYPE);
- uint key_version = mach_read_from_4(src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
- bool page_compressed = (page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
+ uint key_version = mach_read_from_4(
+ src_frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ bool page_compressed = (page_type
+ == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
uint offset = mach_read_from_4(src_frame + FIL_PAGE_OFFSET);
- uint space = mach_read_from_4(src_frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+ uint space = mach_read_from_4(
+ src_frame + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
ib_uint64_t lsn = mach_read_from_8(src_frame + FIL_PAGE_LSN);
*err = DB_SUCCESS;
@@ -709,7 +901,7 @@ fil_space_decrypt(
uint header_len = FIL_PAGE_DATA;
if (page_compressed) {
- header_len += (FIL_PAGE_COMPRESSED_SIZE + FIL_PAGE_COMPRESSION_METHOD_SIZE);
+ header_len += FIL_PAGE_ENCRYPT_COMP_METADATA_LEN;
}
/* Copy FIL page header, it is not encrypted */
@@ -719,8 +911,7 @@ fil_space_decrypt(
const byte* src = src_frame + header_len;
byte* dst = tmp_frame + header_len;
uint32 dstlen = 0;
- uint srclen = uint(page_size.physical())
- - header_len - FIL_PAGE_DATA_END;
+ uint srclen = uint(physical_size) - header_len - FIL_PAGE_DATA_END;
if (page_compressed) {
srclen = mach_read_from_2(src_frame + FIL_PAGE_DATA);
@@ -750,8 +941,8 @@ fil_space_decrypt(
to sector boundary is written. */
if (!page_compressed) {
/* Copy FIL trailer */
- memcpy(tmp_frame + page_size.physical() - FIL_PAGE_DATA_END,
- src_frame + page_size.physical() - FIL_PAGE_DATA_END,
+ memcpy(tmp_frame + physical_size - FIL_PAGE_DATA_END,
+ src_frame + physical_size - FIL_PAGE_DATA_END,
FIL_PAGE_DATA_END);
}
@@ -760,6 +951,36 @@ fil_space_decrypt(
return true; /* page was decrypted */
}
+/** Decrypt a page.
+@param[in] space_id tablespace id
+@param[in] crypt_data crypt_data
+@param[in] tmp_frame Temporary buffer
+@param[in] physical_size page size
+@param[in] fsp_flags Tablespace flags
+@param[in,out] src_frame Page to decrypt
+@param[out] err DB_SUCCESS or DB_DECRYPTION_FAILED
+@return true if page decrypted, false if not.*/
+UNIV_INTERN
+bool
+fil_space_decrypt(
+ ulint space_id,
+ fil_space_crypt_t* crypt_data,
+ byte* tmp_frame,
+ ulint physical_size,
+ ulint fsp_flags,
+ byte* src_frame,
+ dberr_t* err)
+{
+ if (fil_space_t::full_crc32(fsp_flags)) {
+ return fil_space_decrypt_full_crc32(
+ space_id, crypt_data, tmp_frame, src_frame, err);
+ }
+
+ return fil_space_decrypt_for_non_full_checksum(crypt_data, tmp_frame,
+ physical_size, src_frame,
+ err);
+}
+
/**
Decrypt a page.
@param[in] space Tablespace
@@ -778,21 +999,23 @@ fil_space_decrypt(
{
dberr_t err = DB_SUCCESS;
byte* res = NULL;
- const page_size_t page_size(space->flags);
+ const ulint physical_size = space->physical_size();
*decrypted = false;
ut_ad(space->crypt_data != NULL && space->crypt_data->is_encrypted());
ut_ad(space->pending_io());
- bool encrypted = fil_space_decrypt(space->crypt_data, tmp_frame,
- page_size, src_frame, &err);
+ bool encrypted = fil_space_decrypt(space->id, space->crypt_data,
+ tmp_frame, physical_size,
+ space->flags,
+ src_frame, &err);
if (err == DB_SUCCESS) {
if (encrypted) {
*decrypted = true;
/* Copy the decrypted page back to page buffer, not
really any other options. */
- memcpy(src_frame, tmp_frame, page_size.physical());
+ memcpy(src_frame, tmp_frame, physical_size);
}
res = src_frame;
@@ -801,21 +1024,18 @@ fil_space_decrypt(
return res;
}
-/******************************************************************
+/**
Calculate post encryption checksum
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] dst_frame Block where checksum is calculated
@return page checksum
not needed. */
-UNIV_INTERN
uint32_t
-fil_crypt_calculate_checksum(
- const page_size_t& page_size,
- const byte* dst_frame)
+fil_crypt_calculate_checksum(ulint zip_size, const byte* dst_frame)
{
/* For encrypted tables we use only crc32 and strict_crc32 */
- return page_size.is_compressed()
- ? page_zip_calc_checksum(dst_frame, page_size.physical(),
+ return zip_size
+ ? page_zip_calc_checksum(dst_frame, zip_size,
SRV_CHECKSUM_ALGORITHM_CRC32)
: buf_calc_page_crc32(dst_frame);
}
@@ -927,15 +1147,15 @@ fil_crypt_read_crypt_data(fil_space_t* space)
return;
}
- const page_size_t page_size(space->flags);
+ const ulint zip_size = space->zip_size();
mtr_t mtr;
mtr.start();
if (buf_block_t* block = buf_page_get(page_id_t(space->id, 0),
- page_size, RW_S_LATCH, &mtr)) {
+ zip_size, RW_S_LATCH, &mtr)) {
mutex_enter(&fil_system.mutex);
if (!space->crypt_data) {
space->crypt_data = fil_space_read_crypt_data(
- page_size, block->frame);
+ zip_size, block->frame);
}
mutex_exit(&fil_system.mutex);
}
@@ -1010,7 +1230,7 @@ fil_crypt_start_encrypting_space(
/* 2 - get page 0 */
dberr_t err = DB_SUCCESS;
buf_block_t* block = buf_page_get_gen(
- page_id_t(space->id, 0), page_size_t(space->flags),
+ page_id_t(space->id, 0), space->zip_size(),
RW_X_LATCH, NULL, BUF_GET,
__FILE__, __LINE__,
&mtr, &err);
@@ -1599,7 +1819,7 @@ fil_crypt_get_page_throttle_func(
unsigned line)
{
fil_space_t* space = state->space;
- const page_size_t page_size = page_size_t(space->flags);
+ const ulint zip_size = space->zip_size();
const page_id_t page_id(space->id, offset);
ut_ad(space->referenced());
@@ -1610,7 +1830,7 @@ fil_crypt_get_page_throttle_func(
}
dberr_t err = DB_SUCCESS;
- buf_block_t* block = buf_page_get_gen(page_id, page_size, RW_X_LATCH,
+ buf_block_t* block = buf_page_get_gen(page_id, zip_size, RW_X_LATCH,
NULL,
BUF_PEEK_IF_IN_POOL, file, line,
mtr, &err);
@@ -1627,7 +1847,7 @@ fil_crypt_get_page_throttle_func(
state->crypt_stat.pages_read_from_disk++;
uintmax_t start = ut_time_us(NULL);
- block = buf_page_get_gen(page_id, page_size,
+ block = buf_page_get_gen(page_id, zip_size,
RW_X_LATCH,
NULL, BUF_GET_POSSIBLY_FREED,
file, line, mtr, &err);
@@ -1763,7 +1983,7 @@ fil_crypt_rotate_page(
int needs_scrubbing = BTR_SCRUB_SKIP_PAGE;
lsn_t block_lsn = block->page.newest_modification;
byte* frame = buf_block_get_frame(block);
- uint kv = mach_read_from_4(frame+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+ uint kv = buf_page_get_key_version(frame, space->flags);
if (space->is_stopping()) {
/* The tablespace is closing (in DROP TABLE or
@@ -1996,7 +2216,7 @@ fil_crypt_flush_space(
dberr_t err;
if (buf_block_t* block = buf_page_get_gen(
- page_id_t(space->id, 0), page_size_t(space->flags),
+ page_id_t(space->id, 0), space->zip_size(),
RW_X_LATCH, NULL, BUF_GET,
__FILE__, __LINE__, &mtr, &err)) {
mtr.set_named_space(space);
@@ -2575,10 +2795,9 @@ calculated checksum as if it does page could be valid unencrypted,
encrypted, or corrupted.
@param[in,out] page page frame (checksum is temporarily modified)
-@param[in] page_size page size
-@return whether the encrypted page is OK */
-bool
-fil_space_verify_crypt_checksum(const byte* page, const page_size_t& page_size)
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@return true if page is encrypted AND OK, false otherwise */
+bool fil_space_verify_crypt_checksum(const byte* page, ulint zip_size)
{
ut_ad(mach_read_from_4(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION));
@@ -2598,24 +2817,14 @@ fil_space_verify_crypt_checksum(const byte* page, const page_size_t& page_size)
page is not corrupted. */
switch (srv_checksum_algorithm_t(srv_checksum_algorithm)) {
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
- if (page_size.is_compressed()) {
+ if (zip_size) {
return checksum == page_zip_calc_checksum(
- page, page_size.physical(),
- SRV_CHECKSUM_ALGORITHM_CRC32)
-#ifdef INNODB_BUG_ENDIAN_CRC32
- || checksum == page_zip_calc_checksum(
- page, page_size.physical(),
- SRV_CHECKSUM_ALGORITHM_CRC32, true)
-#endif
- ;
+ page, zip_size, SRV_CHECKSUM_ALGORITHM_CRC32);
}
- return checksum == buf_calc_page_crc32(page)
-#ifdef INNODB_BUG_ENDIAN_CRC32
- || checksum == buf_calc_page_crc32(page, true)
-#endif
- ;
+ return checksum == buf_calc_page_crc32(page);
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
/* Starting with MariaDB 10.1.25, 10.2.7, 10.3.1,
due to MDEV-12114, fil_crypt_calculate_checksum()
@@ -2630,27 +2839,20 @@ fil_space_verify_crypt_checksum(const byte* page, const page_size_t& page_size)
Due to this, we must treat "strict_innodb" as "innodb". */
case SRV_CHECKSUM_ALGORITHM_INNODB:
case SRV_CHECKSUM_ALGORITHM_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
if (checksum == BUF_NO_CHECKSUM_MAGIC) {
return true;
}
- if (page_size.is_compressed()) {
+ if (zip_size) {
return checksum == page_zip_calc_checksum(
- page, page_size.physical(),
+ page, zip_size,
SRV_CHECKSUM_ALGORITHM_CRC32)
-#ifdef INNODB_BUG_ENDIAN_CRC32
|| checksum == page_zip_calc_checksum(
- page, page_size.physical(),
- SRV_CHECKSUM_ALGORITHM_CRC32, true)
-#endif
- || checksum == page_zip_calc_checksum(
- page, page_size.physical(),
+ page, zip_size,
SRV_CHECKSUM_ALGORITHM_INNODB);
}
return checksum == buf_calc_page_crc32(page)
-#ifdef INNODB_BUG_ENDIAN_CRC32
- || checksum == buf_calc_page_crc32(page, true)
-#endif
|| checksum == buf_calc_page_new_checksum(page);
}
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 30ff70b78e3..f02f304a858 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -42,7 +42,6 @@ Created 10/25/1995 Heikki Tuuri
#include "os0file.h"
#include "page0zip.h"
#include "row0mysql.h"
-#include "row0trunc.h"
#include "srv0start.h"
#include "trx0purge.h"
#include "buf0lru.h"
@@ -51,6 +50,11 @@ Created 10/25/1995 Heikki Tuuri
#include "sync0sync.h"
#include "buf0flu.h"
#include "os0api.h"
+#ifdef UNIV_LINUX
+# include <sys/types.h>
+# include <sys/sysmacros.h>
+# include <dirent.h>
+#endif
/** Tries to close a file in the LRU list. The caller must hold the fil_sys
mutex.
@@ -166,9 +170,6 @@ ulint fil_n_pending_log_flushes = 0;
/** Number of pending tablespace flushes */
ulint fil_n_pending_tablespace_flushes = 0;
-/** The null file address */
-const fil_addr_t fil_addr_null = {FIL_NULL, 0};
-
/** The tablespace memory cache. This variable is NULL before the module is
initialized. */
fil_system_t fil_system;
@@ -208,19 +209,11 @@ fil_validate_skip(void)
/*===================*/
{
/** The fil_validate() call skip counter. */
- static int fil_validate_count = FIL_VALIDATE_SKIP;
+ static Atomic_counter<uint32_t> fil_validate_count;
/* We want to reduce the call frequency of the costly fil_validate()
check in debug builds. */
- int count = my_atomic_add32_explicit(&fil_validate_count, -1,
- MY_MEMORY_ORDER_RELAXED);
- if (count > 0) {
- return(true);
- }
-
- my_atomic_store32_explicit(&fil_validate_count, FIL_VALIDATE_SKIP,
- MY_MEMORY_ORDER_RELAXED);
- return(fil_validate());
+ return (fil_validate_count++ % FIL_VALIDATE_SKIP) || fil_validate();
}
#endif /* UNIV_DEBUG */
@@ -273,7 +266,7 @@ fil_node_complete_io(fil_node_t* node, const IORequest& type);
blocks at the end of file are ignored: they are not taken into account when
calculating the byte offset within a space.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] byte_offset remainder of offset in bytes; in aio this
must be divisible by the OS block size
@param[in] len how many bytes to read; this must not cross a
@@ -286,12 +279,12 @@ UNIV_INLINE
dberr_t
fil_read(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint byte_offset,
ulint len,
void* buf)
{
- return(fil_io(IORequestRead, true, page_id, page_size,
+ return(fil_io(IORequestRead, true, page_id, zip_size,
byte_offset, len, buf, NULL));
}
@@ -299,7 +292,7 @@ fil_read(
blocks at the end of file are ignored: they are not taken into account when
calculating the byte offset within a space.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] byte_offset remainder of offset in bytes; in aio this
must be divisible by the OS block size
@param[in] len how many bytes to write; this must not cross
@@ -312,14 +305,14 @@ UNIV_INLINE
dberr_t
fil_write(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint byte_offset,
ulint len,
void* buf)
{
ut_ad(!srv_read_only_mode);
- return(fil_io(IORequestWrite, true, page_id, page_size,
+ return(fil_io(IORequestWrite, true, page_id, zip_size,
byte_offset, len, buf, NULL));
}
@@ -393,20 +386,6 @@ fil_space_get_latch(
return(&(space->latch));
}
-/** Note that the tablespace has been imported.
-Initially, purpose=FIL_TYPE_IMPORT so that no redo log is
-written while the space ID is being updated in each page. */
-void fil_space_t::set_imported()
-{
- ut_ad(purpose == FIL_TYPE_IMPORT);
- const fil_node_t* node = UT_LIST_GET_FIRST(chain);
- atomic_write_supported = node->atomic_write
- && srv_use_atomic_writes
- && my_test_if_atomic_write(node->handle,
- int(page_size_t(flags).physical()));
- purpose = FIL_TYPE_TABLESPACE;
-}
-
/**********************************************************************//**
Checks if all the file nodes in a space are flushed.
@return true if all are flushed */
@@ -432,6 +411,42 @@ fil_space_is_flushed(
return(true);
}
+/** Validate the compression algorithm for full crc32 format.
+@param[in] space tablespace object
+@return whether the compression algorithm support */
+static bool fil_comp_algo_validate(const fil_space_t* space)
+{
+ if (!space->full_crc32()) {
+ return true;
+ }
+
+ DBUG_EXECUTE_IF("fil_comp_algo_validate_fail",
+ return false;);
+
+ ulint comp_algo = space->get_compression_algo();
+ switch (comp_algo) {
+ case PAGE_UNCOMPRESSED:
+ case PAGE_ZLIB_ALGORITHM:
+#ifdef HAVE_LZ4
+ case PAGE_LZ4_ALGORITHM:
+#endif /* HAVE_LZ4 */
+#ifdef HAVE_LZO
+ case PAGE_LZO_ALGORITHM:
+#endif /* HAVE_LZO */
+#ifdef HAVE_LZMA
+ case PAGE_LZMA_ALGORITHM:
+#endif /* HAVE_LZMA */
+#ifdef HAVE_BZIP2
+ case PAGE_BZIP2_ALGORITHM:
+#endif /* HAVE_BZIP2 */
+#ifdef HAVE_SNAPPY
+ case PAGE_SNAPPY_ALGORITHM:
+#endif /* HAVE_SNAPPY */
+ return true;
+ }
+
+ return false;
+}
/** Append a file to the chain of files of a space.
@param[in] name file name of a file that is not open
@@ -483,103 +498,6 @@ fil_node_t* fil_space_t::add(const char* name, pfs_os_file_t handle,
return node;
}
-/** Read the first page of a data file.
-@param[in] first whether this is the very first read
-@return whether the page was found valid */
-bool fil_node_t::read_page0(bool first)
-{
- ut_ad(mutex_own(&fil_system.mutex));
- ut_a(space->purpose != FIL_TYPE_LOG);
- const page_size_t page_size(space->flags);
- const ulint psize = page_size.physical();
-
- os_offset_t size_bytes = os_file_get_size(handle);
- ut_a(size_bytes != (os_offset_t) -1);
- const ulint min_size = FIL_IBD_FILE_INITIAL_SIZE * psize;
-
- if (size_bytes < min_size) {
- ib::error() << "The size of the file " << name
- << " is only " << size_bytes
- << " bytes, should be at least " << min_size;
- return false;
- }
-
- byte* buf2 = static_cast<byte*>(ut_malloc_nokey(2 * psize));
-
- /* Align the memory for file i/o if we might have O_DIRECT set */
- byte* page = static_cast<byte*>(ut_align(buf2, psize));
- IORequest request(IORequest::READ);
- if (os_file_read(request, handle, page, 0, psize) != DB_SUCCESS) {
- ib::error() << "Unable to read first page of file " << name;
- ut_free(buf2);
- return false;
- }
- srv_stats.page0_read.add(1);
- const ulint space_id = fsp_header_get_space_id(page);
- ulint flags = fsp_header_get_flags(page);
- const ulint size = fsp_header_get_field(page, FSP_SIZE);
- const ulint free_limit = fsp_header_get_field(page, FSP_FREE_LIMIT);
- const ulint free_len = flst_get_len(FSP_HEADER_OFFSET + FSP_FREE
- + page);
- /* Try to read crypt_data from page 0 if it is not yet read. */
- if (!space->crypt_data) {
- space->crypt_data = fil_space_read_crypt_data(page_size, page);
- }
- ut_free(buf2);
-
- if (!fsp_flags_is_valid(flags, space->id)) {
- ulint cflags = fsp_flags_convert_from_101(flags);
- if (cflags == ULINT_UNDEFINED
- || (cflags ^ space->flags) & ~FSP_FLAGS_MEM_MASK) {
- ib::error()
- << "Expected tablespace flags "
- << ib::hex(space->flags)
- << " but found " << ib::hex(flags)
- << " in the file " << name;
- return false;
- }
-
- flags = cflags;
- }
-
- if (UNIV_UNLIKELY(space_id != space->id)) {
- ib::error() << "Expected tablespace id " << space->id
- << " but found " << space_id
- << " in the file " << name;
- return false;
- }
-
- if (first) {
- ut_ad(space->id != TRX_SYS_SPACE);
-
- /* Truncate the size to a multiple of extent size. */
- ulint mask = psize * FSP_EXTENT_SIZE - 1;
-
- if (size_bytes <= mask) {
- /* .ibd files start smaller than an
- extent size. Do not truncate valid data. */
- } else {
- size_bytes &= ~os_offset_t(mask);
- }
-
- this->size = ulint(size_bytes / psize);
- space->size += this->size;
- } else if (space->id != TRX_SYS_SPACE || space->size_in_header) {
- /* If this is not the first-time open, do nothing.
- For the system tablespace, we always get invoked as
- first=false, so we detect the true first-time-open based
- on size_in_header and proceed to initiailze the data. */
- return true;
- }
-
- ut_ad(space->free_limit == 0 || space->free_limit == free_limit);
- ut_ad(space->free_len == 0 || space->free_len == free_len);
- space->size_in_header = size;
- space->free_limit = free_limit;
- space->free_len = free_len;
- return true;
-}
-
/** Open a file node of a tablespace.
@param[in,out] node File node
@return false if the file can't be opened, otherwise true */
@@ -601,8 +519,7 @@ static bool fil_node_open_file(fil_node_t* node)
if (first_time_open
|| (space->purpose == FIL_TYPE_TABLESPACE
&& node == UT_LIST_GET_FIRST(space->chain)
- && srv_startup_is_before_trx_rollback_phase
- && !undo::Truncate::was_tablespace_truncated(space->id))) {
+ && srv_startup_is_before_trx_rollback_phase)) {
/* We do not know the size of the file yet. First we
open the file in the normal mode, no async I/O here,
for simplicity. Then do some checks, and close the
@@ -633,10 +550,16 @@ retry:
}
if (!node->read_page0(first_time_open)) {
+fail:
os_file_close(node->handle);
node->handle = OS_FILE_CLOSED;
return false;
}
+
+ if (first_time_open && !fil_comp_algo_validate(space)) {
+ goto fail;
+ }
+
} else if (space->purpose == FIL_TYPE_LOG) {
node->handle = os_file_create(
innodb_log_file_key, node->name, OS_FILE_OPEN,
@@ -650,30 +573,6 @@ retry:
OS_FILE_AIO, OS_DATA_FILE, read_only_mode, &success);
}
- if (space->purpose != FIL_TYPE_LOG) {
- /*
- For the temporary tablespace and during the
- non-redo-logged adjustments in
- IMPORT TABLESPACE, we do not care about
- the atomicity of writes.
-
- Atomic writes is supported if the file can be used
- with atomic_writes (not log file), O_DIRECT is
- used (tested in ha_innodb.cc) and the file is
- device and file system that supports atomic writes
- for the given block size
- */
- space->atomic_write_supported
- = space->purpose == FIL_TYPE_TEMPORARY
- || space->purpose == FIL_TYPE_IMPORT
- || (node->atomic_write
- && srv_use_atomic_writes
- && my_test_if_atomic_write(
- node->handle,
- int(page_size_t(space->flags)
- .physical())));
- }
-
ut_a(success);
ut_a(node->is_open());
@@ -937,14 +836,7 @@ fil_space_extend_must_retry(
ulint last_page_no = space->size;
const ulint file_start_page_no = last_page_no - node->size;
- /* Determine correct file block size */
- if (node->block_size == 0) {
- node->block_size = os_file_get_block_size(
- node->handle, node->name);
- }
-
- const page_size_t pageSize(space->flags);
- const ulint page_size = pageSize.physical();
+ const ulint page_size = space->physical_size();
/* fil_read_first_page() expects srv_page_size bytes.
fil_node_open_file() expects at least 4 * srv_page_size bytes.*/
@@ -1004,7 +896,6 @@ fil_space_extend_must_retry(
srv_tmp_space.set_last_file_size(pages_in_MiB);
return(false);
}
-
}
/*******************************************************************//**
@@ -1322,7 +1213,7 @@ fil_space_create(
fil_space_t* space;
ut_ad(fil_system.is_initialised());
- ut_ad(fsp_flags_is_valid(flags & ~FSP_FLAGS_MEM_MASK, id));
+ ut_ad(fil_space_t::is_valid_flags(flags & ~FSP_FLAGS_MEM_MASK, id));
ut_ad(purpose == FIL_TYPE_LOG
|| srv_page_size == UNIV_PAGE_SIZE_ORIG || flags != 0);
@@ -1404,8 +1295,8 @@ fil_space_create(
to do */
if (purpose == FIL_TYPE_TABLESPACE
&& !srv_fil_crypt_rotate_key_age && fil_crypt_threads_event &&
- (mode == FIL_ENCRYPTION_ON || mode == FIL_ENCRYPTION_OFF ||
- srv_encrypt_tables)) {
+ (mode == FIL_ENCRYPTION_ON || mode == FIL_ENCRYPTION_OFF
+ || srv_encrypt_tables)) {
/* Key rotation is not enabled, need to inform background
encryption threads. */
UT_LIST_ADD_LAST(fil_system.rotation_list, space);
@@ -1658,28 +1549,6 @@ void fil_space_t::close()
mutex_exit(&fil_system.mutex);
}
-/** Returns the page size of the space and whether it is compressed or not.
-The tablespace must be cached in the memory cache.
-@param[in] id space id
-@param[out] found true if tablespace was found
-@return page size */
-const page_size_t
-fil_space_get_page_size(
- ulint id,
- bool* found)
-{
- const ulint flags = fil_space_get_flags(id);
-
- if (flags == ULINT_UNDEFINED) {
- *found = false;
- return(univ_page_size);
- }
-
- *found = true;
-
- return(page_size_t(flags));
-}
-
void fil_system_t::create(ulint hash_size)
{
ut_ad(this == &fil_system);
@@ -1700,6 +1569,66 @@ void fil_system_t::create(ulint hash_size)
spaces = hash_create(hash_size);
fil_space_crypt_init();
+#ifdef UNIV_LINUX
+ ssd.clear();
+ char fn[sizeof(dirent::d_name)
+ + sizeof "/sys/block/" "/queue/rotational"];
+ const size_t sizeof_fnp = (sizeof fn) - sizeof "/sys/block";
+ memcpy(fn, "/sys/block/", sizeof "/sys/block");
+ char* fnp = &fn[sizeof "/sys/block"];
+
+ std::set<std::string> ssd_devices;
+ if (DIR* d = opendir("/sys/block")) {
+ while (struct dirent* e = readdir(d)) {
+ if (e->d_name[0] == '.') {
+ continue;
+ }
+ snprintf(fnp, sizeof_fnp, "%s/queue/rotational",
+ e->d_name);
+ int f = open(fn, O_RDONLY);
+ if (f == -1) {
+ continue;
+ }
+ char b[sizeof "4294967295:4294967295\n"];
+ ssize_t l = read(f, b, sizeof b);
+ ::close(f);
+ if (l != 2 || memcmp("0\n", b, 2)) {
+ continue;
+ }
+ snprintf(fnp, sizeof_fnp, "%s/dev", e->d_name);
+ f = open(fn, O_RDONLY);
+ if (f == -1) {
+ continue;
+ }
+ l = read(f, b, sizeof b);
+ ::close(f);
+ if (l <= 0 || b[l - 1] != '\n') {
+ continue;
+ }
+ b[l - 1] = '\0';
+ char* end = b;
+ unsigned long dev_major = strtoul(b, &end, 10);
+ if (b == end || *end != ':'
+ || dev_major != unsigned(dev_major)) {
+ continue;
+ }
+ char* c = end + 1;
+ unsigned long dev_minor = strtoul(c, &end, 10);
+ if (c == end || *end
+ || dev_minor != unsigned(dev_minor)) {
+ continue;
+ }
+ ssd.push_back(makedev(unsigned(dev_major),
+ unsigned(dev_minor)));
+ }
+ closedir(d);
+ }
+ /* fil_system_t::is_ssd() assumes the following */
+ ut_ad(makedev(0, 8) == 8);
+ ut_ad(makedev(0, 4) == 4);
+ ut_ad(makedev(0, 2) == 2);
+ ut_ad(makedev(0, 1) == 1);
+#endif
}
void fil_system_t::close()
@@ -1917,13 +1846,19 @@ fil_write_flushed_lsn(
const page_id_t page_id(TRX_SYS_SPACE, 0);
- err = fil_read(page_id, univ_page_size, 0, srv_page_size,
- buf);
+ err = fil_read(page_id, 0, 0, srv_page_size, buf);
if (err == DB_SUCCESS) {
mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, lsn);
- err = fil_write(page_id, univ_page_size, 0,
- srv_page_size, buf);
+
+ ulint fsp_flags = mach_read_from_4(
+ buf + FSP_HEADER_OFFSET + FSP_SPACE_FLAGS);
+
+ if (fil_space_t::full_crc32(fsp_flags)) {
+ buf_flush_assign_full_crc32_checksum(buf);
+ }
+
+ err = fil_write(page_id, 0, 0, srv_page_size, buf);
fil_flush_file_spaces(FIL_TYPE_TABLESPACE);
}
@@ -2040,7 +1975,7 @@ fil_op_write_log(
ulint len;
ut_ad(first_page_no == 0 || type == MLOG_FILE_CREATE2);
- ut_ad(fsp_flags_is_valid(flags, space_id));
+ ut_ad(fil_space_t::is_valid_flags(flags, space_id));
/* fil_name_parse() requires that there be at least one path
separator and that the file path end with ".ibd". */
@@ -2277,7 +2212,7 @@ fil_check_pending_ops(const fil_space_t* space, ulint count)
return 0;
}
- if (ulint n_pending_ops = my_atomic_loadlint(&space->n_pending_ops)) {
+ if (ulint n_pending_ops = space->n_pending_ops) {
if (count > 5000) {
ib::warn() << "Trying to close/delete/truncate"
@@ -2956,6 +2891,9 @@ fil_rename_tablespace(
return(success);
}
+/* FIXME: remove this! */
+IF_WIN(, bool os_is_sparse_file_supported(os_file_t fh));
+
/** Create a tablespace file.
@param[in] space_id Tablespace ID
@param[in] name Tablespace name in dbname/tablename format.
@@ -2991,7 +2929,7 @@ fil_ibd_create(
ut_ad(!srv_read_only_mode);
ut_a(space_id < SRV_LOG_SPACE_FIRST_ID);
ut_a(size >= FIL_IBD_FILE_INITIAL_SIZE);
- ut_a(fsp_flags_is_valid(flags & ~FSP_FLAGS_MEM_MASK, space_id));
+ ut_a(fil_space_t::is_valid_flags(flags & ~FSP_FLAGS_MEM_MASK, space_id));
/* Create the subdirectories in the path, if they are
not there already. */
@@ -3031,6 +2969,7 @@ fil_ibd_create(
}
const bool is_compressed = FSP_FLAGS_HAS_PAGE_COMPRESSION(flags);
+ bool punch_hole = is_compressed;
#ifdef _WIN32
if (is_compressed) {
@@ -3048,9 +2987,8 @@ err_exit:
return NULL;
}
- bool punch_hole = os_is_sparse_file_supported(file);
-
- ulint block_size = os_file_get_block_size(file, path);
+ /* FIXME: remove this */
+ IF_WIN(, punch_hole = punch_hole && os_is_sparse_file_supported(file));
/* We have to write the space id to the file immediately and flush the
file to disk. This is because in crash recovery we must be aware what
@@ -3067,22 +3005,18 @@ err_exit:
memset(page, '\0', srv_page_size);
- flags |= FSP_FLAGS_PAGE_SSIZE();
+ if (fil_space_t::full_crc32(flags)) {
+ flags |= FSP_FLAGS_FCRC32_PAGE_SSIZE();
+ } else {
+ flags |= FSP_FLAGS_PAGE_SSIZE();
+ }
+
fsp_header_init_fields(page, space_id, flags);
mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id);
- const page_size_t page_size(flags);
- IORequest request(IORequest::WRITE);
-
- if (!page_size.is_compressed()) {
-
- buf_flush_init_for_writing(NULL, page, NULL, 0);
-
- *err = os_file_write(
- request, path, file, page, 0, page_size.physical());
- } else {
+ if (ulint zip_size = fil_space_t::zip_size(flags)) {
page_zip_des_t page_zip;
- page_zip_set_size(&page_zip, page_size.physical());
+ page_zip_set_size(&page_zip, zip_size);
page_zip.data = page + srv_page_size;
#ifdef UNIV_DEBUG
page_zip.m_start =
@@ -3090,11 +3024,16 @@ err_exit:
page_zip.m_end = page_zip.m_nonempty =
page_zip.n_blobs = 0;
- buf_flush_init_for_writing(NULL, page, &page_zip, 0);
+ buf_flush_init_for_writing(NULL, page, &page_zip, 0, false);
+
+ *err = os_file_write(
+ IORequestWrite, path, file, page_zip.data, 0, zip_size);
+ } else {
+ buf_flush_init_for_writing(NULL, page, NULL, 0,
+ fil_space_t::full_crc32(flags));
*err = os_file_write(
- request, path, file, page_zip.data, 0,
- page_size.physical());
+ IORequestWrite, path, file, page, 0, srv_page_size);
}
ut_free(buf2);
@@ -3125,7 +3064,7 @@ err_exit:
/* Create crypt data if the tablespace is either encrypted or user has
requested it to remain unencrypted. */
if (mode == FIL_ENCRYPTION_ON || mode == FIL_ENCRYPTION_OFF ||
- srv_encrypt_tables) {
+ srv_encrypt_tables) {
crypt_data = fil_space_create_crypt_data(mode, key_id);
}
@@ -3135,19 +3074,19 @@ err_exit:
free(crypt_data);
*err = DB_ERROR;
} else {
- fil_node_t* file = space->add(path, OS_FILE_CLOSED, size,
+ space->punch_hole = punch_hole;
+ /* FIXME: Keep the file open! */
+ fil_node_t* node = space->add(path, OS_FILE_CLOSED, size,
false, true);
mtr_t mtr;
mtr.start();
fil_op_write_log(
- MLOG_FILE_CREATE2, space_id, 0, file->name,
+ MLOG_FILE_CREATE2, space_id, 0, node->name,
NULL, space->flags & ~FSP_FLAGS_MEM_MASK, &mtr);
- fil_name_write(space, 0, file, &mtr);
+ fil_name_write(space, 0, node, &mtr);
mtr.commit();
- file->block_size = block_size;
- space->punch_hole = punch_hole;
-
+ node->find_metadata(file);
*err = DB_SUCCESS;
}
@@ -3254,7 +3193,7 @@ corrupted:
return NULL;
}
- ut_ad(fsp_flags_is_valid(flags & ~FSP_FLAGS_MEM_MASK, id));
+ ut_ad(fil_space_t::is_valid_flags(flags & ~FSP_FLAGS_MEM_MASK, id));
df_default.init(tablename.m_name, flags);
df_dict.init(tablename.m_name, flags);
df_remote.init(tablename.m_name, flags);
@@ -3508,7 +3447,8 @@ skip_validate:
df_remote.get_first_page();
fil_space_crypt_t* crypt_data = first_page
- ? fil_space_read_crypt_data(page_size_t(flags), first_page)
+ ? fil_space_read_crypt_data(fil_space_t::zip_size(flags),
+ first_page)
: NULL;
fil_space_t* space = fil_space_create(
@@ -3856,7 +3796,8 @@ fil_ibd_load(
const byte* first_page = file.get_first_page();
fil_space_crypt_t* crypt_data = first_page
- ? fil_space_read_crypt_data(page_size_t(flags), first_page)
+ ? fil_space_read_crypt_data(fil_space_t::zip_size(flags),
+ first_page)
: NULL;
space = fil_space_create(
file.name(), space_id, flags, FIL_TYPE_TABLESPACE, crypt_data);
@@ -3919,7 +3860,10 @@ fil_file_readdir_next_file(
void fsp_flags_try_adjust(fil_space_t* space, ulint flags)
{
ut_ad(!srv_read_only_mode);
- ut_ad(fsp_flags_is_valid(flags, space->id));
+ ut_ad(fil_space_t::is_valid_flags(flags, space->id));
+ if (space->full_crc32() || fil_space_t::full_crc32(flags)) {
+ return;
+ }
if (!space->size && (space->purpose != FIL_TYPE_TABLESPACE
|| !fil_space_get_size(space->id))) {
return;
@@ -3930,9 +3874,15 @@ void fsp_flags_try_adjust(fil_space_t* space, ulint flags)
mtr_t mtr;
mtr.start();
if (buf_block_t* b = buf_page_get(
- page_id_t(space->id, 0), page_size_t(flags),
+ page_id_t(space->id, 0), space->zip_size(),
RW_X_LATCH, &mtr)) {
ulint f = fsp_header_get_flags(b->frame);
+ if (fil_space_t::full_crc32(f)) {
+ goto func_exit;
+ }
+ if (fil_space_t::is_flags_equal(f, flags)) {
+ goto func_exit;
+ }
/* Suppress the message if only the DATA_DIR flag to differs. */
if ((f ^ flags) & ~(1U << FSP_FLAGS_POS_RESERVED)) {
ib::warn()
@@ -3941,13 +3891,11 @@ void fsp_flags_try_adjust(fil_space_t* space, ulint flags)
<< "' from " << ib::hex(f)
<< " to " << ib::hex(flags);
}
- if (f != flags) {
- mtr.set_named_space(space);
- mlog_write_ulint(FSP_HEADER_OFFSET
- + FSP_SPACE_FLAGS + b->frame,
- flags, MLOG_4BYTES, &mtr);
- }
+ mtr.set_named_space(space);
+ mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS
+ + b->frame, flags, MLOG_4BYTES, &mtr);
}
+func_exit:
mtr.commit();
}
@@ -3969,7 +3917,11 @@ fil_space_for_table_exists_in_mem(
mutex_enter(&fil_system.mutex);
if (fil_space_t* space = fil_space_get_by_id(id)) {
- if ((space->flags ^ expected_flags) & ~FSP_FLAGS_MEM_MASK) {
+ ulint tf = expected_flags & ~FSP_FLAGS_MEM_MASK;
+ ulint sf = space->flags & ~FSP_FLAGS_MEM_MASK;
+
+ if (!fil_space_t::is_flags_equal(tf, sf)
+ && !fil_space_t::is_flags_equal(sf, tf)) {
goto func_exit;
}
@@ -3986,7 +3938,8 @@ fil_space_for_table_exists_in_mem(
/* Adjust the flags that are in FSP_FLAGS_MEM_MASK.
FSP_SPACE_FLAGS will not be written back here. */
- space->flags = expected_flags;
+ space->flags = (space->flags & ~FSP_FLAGS_MEM_MASK)
+ | (expected_flags & FSP_FLAGS_MEM_MASK);
mutex_exit(&fil_system.mutex);
if (!srv_read_only_mode) {
fsp_flags_try_adjust(space, expected_flags
@@ -4112,12 +4065,21 @@ fil_report_invalid_page_access(
: "");
}
+inline void IORequest::set_fil_node(fil_node_t* node)
+{
+ if (!node->space->punch_hole) {
+ clear_punch_hole();
+ }
+
+ m_fil_node = node;
+}
+
/** Reads or writes data. This operation could be asynchronous (aio).
@param[in,out] type IO context
@param[in] sync true if synchronous aio is desired
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] byte_offset remainder of offset in bytes; in aio this
must be divisible by the OS block size
@param[in] len how many bytes to read or write; this must
@@ -4129,14 +4091,14 @@ fil_report_invalid_page_access(
@param[in] message message for aio handler if non-sync aio
used, else ignored
@param[in] ignore_missing_space true=ignore missing space duging read
-@return DB_SUCCESS, DB_TABLESPACE_DELETED or DB_TABLESPACE_TRUNCATED
+@return DB_SUCCESS, or DB_TABLESPACE_DELETED
if we are trying to do i/o on a tablespace which does not exist */
dberr_t
fil_io(
const IORequest& type,
bool sync,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint byte_offset,
ulint len,
void* buf,
@@ -4150,7 +4112,7 @@ fil_io(
ut_ad(len > 0);
ut_ad(byte_offset < srv_page_size);
- ut_ad(!page_size.is_compressed() || byte_offset == 0);
+ ut_ad(!zip_size || byte_offset == 0);
ut_ad(srv_page_size == 1UL << srv_page_size_shift);
compile_time_assert((1U << UNIV_PAGE_SIZE_SHIFT_MAX)
== UNIV_PAGE_SIZE_MAX);
@@ -4161,7 +4123,7 @@ fil_io(
/* ibuf bitmap pages must be read in the sync AIO mode: */
ut_ad(recv_no_ibuf_operations
|| req_type.is_write()
- || !ibuf_bitmap_page(page_id, page_size)
+ || !ibuf_bitmap_page(page_id, zip_size)
|| sync
|| req_type.is_log());
@@ -4177,7 +4139,7 @@ fil_io(
} else if (req_type.is_read()
&& !recv_no_ibuf_operations
- && ibuf_page(page_id, page_size, NULL)) {
+ && ibuf_page(page_id, zip_size, NULL)) {
mode = OS_AIO_IBUF;
@@ -4261,19 +4223,6 @@ fil_io(
break;
} else {
- if (space->id != TRX_SYS_SPACE
- && UT_LIST_GET_LEN(space->chain) == 1
- && (srv_is_tablespace_truncated(space->id)
- || srv_was_tablespace_truncated(space))
- && req_type.is_read()) {
-
- /* Handle page which is outside the truncated
- tablespace bounds when recovering from a crash
- happened during a truncation */
- mutex_exit(&fil_system.mutex);
- return(DB_TABLESPACE_TRUNCATED);
- }
-
cur_page_no -= node->size;
node = UT_LIST_GET_NEXT(chain, node);
@@ -4332,37 +4281,10 @@ fil_io(
/* Now we have made the changes in the data structures of fil_system */
mutex_exit(&fil_system.mutex);
- /* Calculate the low 32 bits and the high 32 bits of the file offset */
-
- if (!page_size.is_compressed()) {
-
- offset = ((os_offset_t) cur_page_no
- << srv_page_size_shift) + byte_offset;
-
- ut_a(node->size - cur_page_no
- >= ((byte_offset + len + (srv_page_size - 1))
- >> srv_page_size_shift));
- } else {
- ulint size_shift;
-
- switch (page_size.physical()) {
- case 1024: size_shift = 10; break;
- case 2048: size_shift = 11; break;
- case 4096: size_shift = 12; break;
- case 8192: size_shift = 13; break;
- case 16384: size_shift = 14; break;
- case 32768: size_shift = 15; break;
- case 65536: size_shift = 16; break;
- default: ut_error;
- }
+ if (!zip_size) zip_size = srv_page_size;
- offset = ((os_offset_t) cur_page_no << size_shift)
- + byte_offset;
-
- ut_a(node->size - cur_page_no
- >= (len + (page_size.physical() - 1))
- / page_size.physical());
- }
+ offset = os_offset_t(cur_page_no) * zip_size + byte_offset;
+ ut_ad(node->size - cur_page_no >= (len + (zip_size - 1)) / zip_size);
/* Do AIO */
@@ -4993,116 +4915,6 @@ fil_names_clear(
return(do_write);
}
-/** Truncate a single-table tablespace. The tablespace must be cached
-in the memory cache.
-@param space_id space id
-@param dir_path directory path
-@param tablename the table name in the usual
- databasename/tablename format of InnoDB
-@param flags tablespace flags
-@param trunc_to_default truncate to default size if tablespace
- is being newly re-initialized.
-@return DB_SUCCESS or error */
-dberr_t
-truncate_t::truncate(
-/*=================*/
- ulint space_id,
- const char* dir_path,
- const char* tablename,
- ulint flags,
- bool trunc_to_default)
-{
- dberr_t err = DB_SUCCESS;
- char* path;
-
- ut_a(!is_system_tablespace(space_id));
-
- if (FSP_FLAGS_HAS_DATA_DIR(flags)) {
- ut_ad(dir_path != NULL);
- path = fil_make_filepath(dir_path, tablename, IBD, true);
- } else {
- path = fil_make_filepath(NULL, tablename, IBD, false);
- }
-
- if (path == NULL) {
- return(DB_OUT_OF_MEMORY);
- }
-
- mutex_enter(&fil_system.mutex);
-
- fil_space_t* space = fil_space_get_by_id(space_id);
-
- /* The following code must change when InnoDB supports
- multiple datafiles per tablespace. */
- ut_a(UT_LIST_GET_LEN(space->chain) == 1);
-
- fil_node_t* node = UT_LIST_GET_FIRST(space->chain);
-
- if (trunc_to_default) {
- space->size = node->size = FIL_IBD_FILE_INITIAL_SIZE;
- }
-
- const bool already_open = node->is_open();
-
- if (!already_open) {
-
- bool ret;
-
- node->handle = os_file_create_simple_no_error_handling(
- innodb_data_file_key, path, OS_FILE_OPEN,
- OS_FILE_READ_WRITE,
- space->purpose != FIL_TYPE_TEMPORARY
- && srv_read_only_mode, &ret);
-
- if (!ret) {
- ib::error() << "Failed to open tablespace file "
- << path << ".";
-
- ut_free(path);
-
- return(DB_ERROR);
- }
-
- ut_a(node->is_open());
- }
-
- os_offset_t trunc_size = trunc_to_default
- ? FIL_IBD_FILE_INITIAL_SIZE
- : space->size;
-
- const bool success = os_file_truncate(
- path, node->handle, trunc_size << srv_page_size_shift);
-
- if (!success) {
- ib::error() << "Cannot truncate file " << path
- << " in TRUNCATE TABLESPACE.";
- err = DB_ERROR;
- }
-
- space->stop_new_ops = false;
-
- /* If we opened the file in this function, close it. */
- if (!already_open) {
- bool closed = os_file_close(node->handle);
-
- if (!closed) {
-
- ib::error() << "Failed to close tablespace file "
- << path << ".";
-
- err = DB_ERROR;
- } else {
- node->handle = OS_FILE_CLOSED;
- }
- }
-
- mutex_exit(&fil_system.mutex);
-
- ut_free(path);
-
- return(err);
-}
-
/* Unit Tests */
#ifdef UNIV_ENABLE_UNIT_TEST_MAKE_FILEPATH
#define MF fil_make_filepath
@@ -5323,29 +5135,6 @@ fil_space_found_by_id(
return space;
}
-/**
-Get should we punch hole to tablespace.
-@param[in] node File node
-@return true, if punch hole should be tried, false if not. */
-bool
-fil_node_should_punch_hole(
- const fil_node_t* node)
-{
- return (node->space->punch_hole);
-}
-
-/**
-Set punch hole to tablespace to given value.
-@param[in] node File node
-@param[in] val value to be set. */
-void
-fil_space_set_punch_hole(
- fil_node_t* node,
- bool val)
-{
- node->space->punch_hole = val;
-}
-
/** Checks that this tablespace in a list of unflushed tablespaces.
@return true if in a list */
bool fil_space_t::is_in_unflushed_spaces() const {
diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc
index 57bb736d230..63e9048aee3 100644
--- a/storage/innobase/fil/fil0pagecompress.cc
+++ b/storage/innobase/fil/fil0pagecompress.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (C) 2013, 2018, MariaDB Corporation.
+Copyright (C) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -73,45 +73,24 @@ Updated 14/02/2015
#include "snappy-c.h"
#endif
-/** Compress a page_compressed page before writing to a data file.
+/** Compress a page for the given compression algorithm.
@param[in] buf page to be compressed
@param[out] out_buf compressed page
-@param[in] level compression level
-@param[in] block_size file system block size
-@param[in] encrypted whether the page will be subsequently encrypted
-@return actual length of compressed page
-@retval 0 if the page was not compressed */
-ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
- ulint block_size, bool encrypted)
+@param[in] header_len header length of the page
+@param[in] comp_algo compression algorithm
+@param[in] comp_level compression level
+@return actual length of compressed page data
+@retval 0 if the page was not compressed */
+static ulint fil_page_compress_low(
+ const byte* buf,
+ byte* out_buf,
+ ulint header_len,
+ ulint comp_algo,
+ ulint comp_level)
{
- int comp_level = int(level);
- ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
- /* Cache to avoid change during function execution */
- ulint comp_method = innodb_compression_algorithm;
-
- if (encrypted) {
- header_len += FIL_PAGE_COMPRESSION_METHOD_SIZE;
- }
-
- /* Let's not compress file space header or
- extent descriptor */
- switch (fil_page_get_type(buf)) {
- case 0:
- case FIL_PAGE_TYPE_FSP_HDR:
- case FIL_PAGE_TYPE_XDES:
- case FIL_PAGE_PAGE_COMPRESSED:
- return 0;
- }
-
- /* If no compression level was provided to this table, use system
- default level */
- if (comp_level == 0) {
- comp_level = int(page_zip_level);
- }
-
ulint write_size = srv_page_size - header_len;
- switch (comp_method) {
+ switch (comp_algo) {
default:
ut_ad(!"unknown compression method");
/* fall through */
@@ -121,10 +100,9 @@ ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
{
ulong len = uLong(write_size);
if (Z_OK == compress2(
- out_buf + header_len, &len,
- buf, uLong(srv_page_size), comp_level)) {
- write_size = len;
- goto success;
+ out_buf + header_len, &len, buf,
+ uLong(srv_page_size), int(comp_level))) {
+ return len;
}
}
break;
@@ -142,10 +120,7 @@ ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
int(srv_page_size), int(write_size));
# endif
- if (write_size) {
- goto success;
- }
- break;
+ return write_size;
#endif /* HAVE_LZ4 */
#ifdef HAVE_LZO
case PAGE_LZO_ALGORITHM: {
@@ -156,8 +131,7 @@ ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
out_buf + header_len, &len,
out_buf + srv_page_size)
&& len <= write_size) {
- write_size = len;
- goto success;
+ return len;
}
break;
}
@@ -171,8 +145,7 @@ ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
buf, srv_page_size, out_buf + header_len,
&out_pos, write_size)
&& out_pos <= write_size) {
- write_size = out_pos;
- goto success;
+ return out_pos;
}
break;
}
@@ -188,8 +161,7 @@ ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
reinterpret_cast<const char*>(buf)),
unsigned(srv_page_size), 1, 0, 0)
&& len <= write_size) {
- write_size = len;
- goto success;
+ return len;
}
break;
}
@@ -205,53 +177,188 @@ ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
reinterpret_cast<char*>(out_buf) + header_len,
&len)
&& len <= write_size) {
- write_size = len;
- goto success;
+ return len;
}
break;
}
#endif /* HAVE_SNAPPY */
}
- srv_stats.pages_page_compression_error.inc();
return 0;
-success:
+}
+
+/** Compress a page_compressed page for full crc32 format.
+@param[in] buf page to be compressed
+@param[out] out_buf compressed page
+@param[in] flags tablespace flags
+@param[in] block_size file system block size
+@return actual length of compressed page
+@retval 0 if the page was not compressed */
+static ulint fil_page_compress_for_full_crc32(
+ const byte* buf,
+ byte* out_buf,
+ ulint flags,
+ ulint block_size,
+ bool encrypted)
+{
+ ulint comp_level = fsp_flags_get_page_compression_level(flags);
+
+ if (comp_level == 0) {
+ comp_level = page_zip_level;
+ }
+
+ const ulint header_len = FIL_PAGE_COMP_ALGO;
+
+ ulint write_size = fil_page_compress_low(
+ buf, out_buf, header_len,
+ fil_space_t::get_compression_algo(flags), comp_level);
+
+ if (write_size == 0) {
+fail:
+ srv_stats.pages_page_compression_error.inc();
+ return 0;
+ }
+
+ write_size += header_len;
+ const ulint actual_size = write_size;
+ /* Write the actual length of the data & page type
+ for full crc32 format. */
+ const bool lsb = fil_space_t::full_crc32_page_compressed_len(flags);
+ /* In the MSB, store the rounded-up page size. */
+ write_size = (write_size + lsb + (4 + 255)) & ~255;
+ if (write_size >= srv_page_size) {
+ goto fail;
+ }
+
+ /* Set up the page header */
+ memcpy(out_buf, buf, header_len);
+ out_buf[FIL_PAGE_TYPE] = 1U << (FIL_PAGE_COMPRESS_FCRC32_MARKER - 8);
+ out_buf[FIL_PAGE_TYPE + 1] = byte(write_size >> 8);
+ /* Clean up the buffer for the remaining write_size (except checksum) */
+ memset(out_buf + actual_size, 0, write_size - actual_size - 4);
+ if (lsb) {
+ /* Store the LSB */
+ out_buf[write_size - 5] = byte(actual_size + (1 + 4));
+ }
+
+ if (!block_size) {
+ block_size = 512;
+ }
+
+ ut_ad(write_size);
+ if (write_size & (block_size - 1)) {
+ size_t tmp = write_size;
+ write_size = (write_size + (block_size - 1))
+ & ~(block_size - 1);
+ memset(out_buf + tmp, 0, write_size - tmp);
+ }
+
+#ifdef UNIV_DEBUG
+ /* Verify that page can be decompressed */
+ {
+ page_t tmp_buf[UNIV_PAGE_SIZE_MAX];
+ page_t page[UNIV_PAGE_SIZE_MAX];
+ memcpy(page, out_buf, write_size);
+ ut_ad(fil_page_decompress(tmp_buf, page, flags));
+ }
+#endif
+ srv_stats.page_compression_saved.add(srv_page_size - write_size);
+ srv_stats.pages_page_compressed.inc();
+
+ return write_size;
+}
+
+/** Compress a page_compressed page for non full crc32 format.
+@param[in] buf page to be compressed
+@param[out] out_buf compressed page
+@param[in] flags tablespace flags
+@param[in] block_size file system block size
+@param[in] encrypted whether the page will be subsequently encrypted
+@return actual length of compressed page
+@retval 0 if the page was not compressed */
+static ulint fil_page_compress_for_non_full_crc32(
+ const byte* buf,
+ byte* out_buf,
+ ulint flags,
+ ulint block_size,
+ bool encrypted)
+{
+ int comp_level = int(fsp_flags_get_page_compression_level(flags));
+ ulint header_len = FIL_PAGE_DATA + FIL_PAGE_COMP_METADATA_LEN;
+ /* Cache to avoid change during function execution */
+ ulint comp_algo = innodb_compression_algorithm;
+
+ if (encrypted) {
+ header_len += FIL_PAGE_ENCRYPT_COMP_ALGO;
+ }
+
+ /* If no compression level was provided to this table, use system
+ default level */
+ if (comp_level == 0) {
+ comp_level = int(page_zip_level);
+ }
+
+ ulint write_size = fil_page_compress_low(
+ buf, out_buf,
+ header_len, comp_algo, comp_level);
+
+ if (write_size == 0) {
+ srv_stats.pages_page_compression_error.inc();
+ return 0;
+ }
+
/* Set up the page header */
memcpy(out_buf, buf, FIL_PAGE_DATA);
/* Set up the checksum */
- mach_write_to_4(out_buf+FIL_PAGE_SPACE_OR_CHKSUM, BUF_NO_CHECKSUM_MAGIC);
+ mach_write_to_4(out_buf + FIL_PAGE_SPACE_OR_CHKSUM, BUF_NO_CHECKSUM_MAGIC);
/* Set up the compression algorithm */
- mach_write_to_8(out_buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, comp_method);
+ mach_write_to_8(out_buf + FIL_PAGE_COMP_ALGO, comp_algo);
if (encrypted) {
/* Set up the correct page type */
- mach_write_to_2(out_buf+FIL_PAGE_TYPE, FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
- mach_write_to_2(out_buf+FIL_PAGE_DATA+FIL_PAGE_COMPRESSED_SIZE, comp_method);
+ mach_write_to_2(out_buf + FIL_PAGE_TYPE,
+ FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED);
+
+ mach_write_to_2(out_buf + FIL_PAGE_DATA
+ + FIL_PAGE_ENCRYPT_COMP_ALGO, comp_algo);
} else {
/* Set up the correct page type */
- mach_write_to_2(out_buf+FIL_PAGE_TYPE, FIL_PAGE_PAGE_COMPRESSED);
+ mach_write_to_2(out_buf + FIL_PAGE_TYPE, FIL_PAGE_PAGE_COMPRESSED);
}
/* Set up the actual payload lenght */
- mach_write_to_2(out_buf+FIL_PAGE_DATA, write_size);
+ mach_write_to_2(out_buf + FIL_PAGE_DATA + FIL_PAGE_COMP_SIZE,
+ write_size);
#ifdef UNIV_DEBUG
/* Verify */
- ut_ad(fil_page_is_compressed(out_buf) || fil_page_is_compressed_encrypted(out_buf));
- ut_ad(mach_read_from_4(out_buf+FIL_PAGE_SPACE_OR_CHKSUM) == BUF_NO_CHECKSUM_MAGIC);
- ut_ad(mach_read_from_2(out_buf+FIL_PAGE_DATA) == write_size);
- ut_ad(mach_read_from_8(out_buf+FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION) == (ulint)comp_method ||
- mach_read_from_2(out_buf+FIL_PAGE_DATA+FIL_PAGE_COMPRESSED_SIZE) == (ulint)comp_method);
+ ut_ad(fil_page_is_compressed(out_buf)
+ || fil_page_is_compressed_encrypted(out_buf));
+
+ ut_ad(mach_read_from_4(out_buf + FIL_PAGE_SPACE_OR_CHKSUM)
+ == BUF_NO_CHECKSUM_MAGIC);
+
+ ut_ad(mach_read_from_2(out_buf + FIL_PAGE_DATA + FIL_PAGE_COMP_SIZE)
+ == write_size);
+
+ bool is_compressed = (mach_read_from_8(out_buf + FIL_PAGE_COMP_ALGO)
+ == (ulint) comp_algo);
+
+ bool is_encrypted_compressed =
+ (mach_read_from_2(out_buf + FIL_PAGE_DATA
+ + FIL_PAGE_ENCRYPT_COMP_ALGO)
+ == (ulint) comp_algo);
+
+ ut_ad(is_compressed || is_encrypted_compressed);
/* Verify that page can be decompressed */
{
page_t tmp_buf[UNIV_PAGE_SIZE_MAX];
page_t page[UNIV_PAGE_SIZE_MAX];
memcpy(page, out_buf, srv_page_size);
- ut_ad(fil_page_decompress(tmp_buf, page));
- ut_ad(!buf_page_is_corrupted(false, page, univ_page_size,
- NULL));
+ ut_ad(fil_page_decompress(tmp_buf, page, flags));
+ ut_ad(!buf_page_is_corrupted(false, page, flags));
}
#endif /* UNIV_DEBUG */
@@ -266,7 +373,8 @@ success:
/* Actual write needs to be alligned on block size */
if (write_size % block_size) {
size_t tmp = write_size;
- write_size = (size_t)ut_uint64_align_up((ib_uint64_t)write_size, block_size);
+ write_size = (size_t)ut_uint64_align_up(
+ (ib_uint64_t)write_size, block_size);
/* Clean up the end of buffer */
memset(out_buf+tmp, 0, write_size - tmp);
#ifdef UNIV_DEBUG
@@ -281,131 +389,245 @@ success:
return write_size;
}
-/** Decompress a page that may be subject to page_compressed compression.
-@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
-@param[in,out] buf possibly compressed page buffer
-@return size of the compressed data
-@retval 0 if decompression failed
-@retval srv_page_size if the page was not compressed */
-ulint fil_page_decompress(byte* tmp_buf, byte* buf)
+/** Compress a page_compressed page before writing to a data file.
+@param[in] buf page to be compressed
+@param[out] out_buf compressed page
+@param[in] flags tablespace flags
+@param[in] block_size file system block size
+@param[in] encrypted whether the page will be subsequently encrypted
+@return actual length of compressed page
+@retval 0 if the page was not compressed */
+ulint fil_page_compress(
+ const byte* buf,
+ byte* out_buf,
+ ulint flags,
+ ulint block_size,
+ bool encrypted)
{
- const unsigned ptype = mach_read_from_2(buf+FIL_PAGE_TYPE);
- ulint header_len;
- uint64_t compression_alg;
- switch (ptype) {
- case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED:
- header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE
- + FIL_PAGE_COMPRESSION_METHOD_SIZE;
- compression_alg = mach_read_from_2(
- FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE + buf);
- break;
- case FIL_PAGE_PAGE_COMPRESSED:
- header_len = FIL_PAGE_DATA + FIL_PAGE_COMPRESSED_SIZE;
- compression_alg = mach_read_from_8(
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION + buf);
- break;
- default:
- return srv_page_size;
- }
+ /* The full_crc32 page_compressed format assumes this. */
+ ut_ad(!(block_size & 255));
+ ut_ad(ut_is_2pow(block_size));
- if (mach_read_from_4(buf + FIL_PAGE_SPACE_OR_CHKSUM)
- != BUF_NO_CHECKSUM_MAGIC) {
+ /* Let's not compress file space header or
+ extent descriptor */
+ switch (fil_page_get_type(buf)) {
+ case 0:
+ case FIL_PAGE_TYPE_FSP_HDR:
+ case FIL_PAGE_TYPE_XDES:
+ case FIL_PAGE_PAGE_COMPRESSED:
return 0;
}
- ulint actual_size = mach_read_from_2(buf + FIL_PAGE_DATA);
-
- /* Check if payload size is corrupted */
- if (actual_size == 0 || actual_size > srv_page_size - header_len) {
- return 0;
+ if (fil_space_t::full_crc32(flags)) {
+ return fil_page_compress_for_full_crc32(
+ buf, out_buf, flags, block_size, encrypted);
}
- switch (compression_alg) {
+ return fil_page_compress_for_non_full_crc32(
+ buf, out_buf, flags, block_size, encrypted);
+}
+
+/** Decompress a page that may be subject to page_compressed compression.
+@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
+@param[in,out] buf possibly compressed page buffer
+@param[in] comp_algo compression algorithm
+@param[in] header_len header length of the page
+@param[in] actual size actual size of the page
+@retval true if the page is decompressed or false */
+static bool fil_page_decompress_low(
+ byte* tmp_buf,
+ byte* buf,
+ ulint comp_algo,
+ ulint header_len,
+ ulint actual_size)
+{
+ switch (comp_algo) {
default:
ib::error() << "Unknown compression algorithm "
- << compression_alg;
- return 0;
+ << comp_algo;
+ return false;
case PAGE_ZLIB_ALGORITHM:
{
uLong len = srv_page_size;
- if (Z_OK == uncompress(tmp_buf, &len,
+ return (Z_OK == uncompress(tmp_buf, &len,
buf + header_len,
uLong(actual_size))
- && len == srv_page_size) {
- break;
- }
+ && len == srv_page_size);
}
- return 0;
#ifdef HAVE_LZ4
case PAGE_LZ4_ALGORITHM:
- if (LZ4_decompress_safe(reinterpret_cast<const char*>(buf)
- + header_len,
- reinterpret_cast<char*>(tmp_buf),
- actual_size, srv_page_size)
- == int(srv_page_size)) {
- break;
- }
- return 0;
+ return LZ4_decompress_safe(
+ reinterpret_cast<const char*>(buf) + header_len,
+ reinterpret_cast<char*>(tmp_buf),
+ actual_size, srv_page_size) == int(srv_page_size);
#endif /* HAVE_LZ4 */
#ifdef HAVE_LZO
- case PAGE_LZO_ALGORITHM: {
- lzo_uint len_lzo = srv_page_size;
- if (LZO_E_OK == lzo1x_decompress_safe(
- buf + header_len,
- actual_size, tmp_buf, &len_lzo, NULL)
- && len_lzo == srv_page_size) {
- break;
+ case PAGE_LZO_ALGORITHM:
+ {
+ lzo_uint len_lzo = srv_page_size;
+ return (LZO_E_OK == lzo1x_decompress_safe(
+ buf + header_len,
+ actual_size, tmp_buf, &len_lzo, NULL)
+ && len_lzo == srv_page_size);
}
- return 0;
- }
#endif /* HAVE_LZO */
#ifdef HAVE_LZMA
- case PAGE_LZMA_ALGORITHM: {
- size_t src_pos = 0;
- size_t dst_pos = 0;
- uint64_t memlimit = UINT64_MAX;
-
- if (LZMA_OK == lzma_stream_buffer_decode(
- &memlimit, 0, NULL, buf + header_len,
- &src_pos, actual_size, tmp_buf, &dst_pos,
- srv_page_size)
- && dst_pos == srv_page_size) {
- break;
+ case PAGE_LZMA_ALGORITHM:
+ {
+ size_t src_pos = 0;
+ size_t dst_pos = 0;
+ uint64_t memlimit = UINT64_MAX;
+
+ return LZMA_OK == lzma_stream_buffer_decode(
+ &memlimit, 0, NULL, buf + header_len,
+ &src_pos, actual_size, tmp_buf, &dst_pos,
+ srv_page_size)
+ && dst_pos == srv_page_size;
}
- return 0;
- }
#endif /* HAVE_LZMA */
#ifdef HAVE_BZIP2
- case PAGE_BZIP2_ALGORITHM: {
- unsigned int dst_pos = srv_page_size;
- if (BZ_OK == BZ2_bzBuffToBuffDecompress(
- reinterpret_cast<char*>(tmp_buf),
- &dst_pos,
- reinterpret_cast<char*>(buf) + header_len,
- actual_size, 1, 0)
- && dst_pos == srv_page_size) {
- break;
+ case PAGE_BZIP2_ALGORITHM:
+ {
+ unsigned int dst_pos = srv_page_size;
+ return BZ_OK == BZ2_bzBuffToBuffDecompress(
+ reinterpret_cast<char*>(tmp_buf),
+ &dst_pos,
+ reinterpret_cast<char*>(buf) + header_len,
+ actual_size, 1, 0)
+ && dst_pos == srv_page_size;
}
- return 0;
- }
#endif /* HAVE_BZIP2 */
#ifdef HAVE_SNAPPY
- case PAGE_SNAPPY_ALGORITHM: {
- size_t olen = srv_page_size;
-
- if (SNAPPY_OK == snappy_uncompress(
- reinterpret_cast<const char*>(buf) + header_len,
- actual_size,
- reinterpret_cast<char*>(tmp_buf), &olen)
- && olen == srv_page_size) {
- break;
+ case PAGE_SNAPPY_ALGORITHM:
+ {
+ size_t olen = srv_page_size;
+
+ return SNAPPY_OK == snappy_uncompress(
+ reinterpret_cast<const char*>(buf)
+ + header_len,
+ actual_size,
+ reinterpret_cast<char*>(tmp_buf), &olen)
+ && olen == srv_page_size;
+ }
+#endif /* HAVE_SNAPPY */
+ }
+
+ return false;
+}
+
+/** Decompress a page for full crc32 format.
+@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
+@param[in,out] buf possibly compressed page buffer
+@param[in] flags tablespace flags
+@return size of the compressed data
+@retval 0 if decompression failed
+@retval srv_page_size if the page was not compressed */
+ulint fil_page_decompress_for_full_crc32(byte* tmp_buf, byte* buf, ulint flags)
+{
+ ut_ad(fil_space_t::full_crc32(flags));
+ bool compressed = false;
+ size_t size = buf_page_full_crc32_size(buf, &compressed, NULL);
+ if (!compressed) {
+ ut_ad(size == srv_page_size);
+ return size;
+ }
+
+ if (!fil_space_t::is_compressed(flags)) {
+ return 0;
+ }
+
+ if (size >= srv_page_size) {
+ return 0;
+ }
+
+ if (fil_space_t::full_crc32_page_compressed_len(flags)) {
+ compile_time_assert(FIL_PAGE_FCRC32_CHECKSUM == 4);
+ if (size_t lsb = buf[size - 5]) {
+ size += lsb - 0x100;
}
+ size -= 5;
+ }
+
+ const size_t header_len = FIL_PAGE_COMP_ALGO;
+
+ if (!fil_page_decompress_low(tmp_buf, buf,
+ fil_space_t::get_compression_algo(flags),
+ header_len, size - header_len)) {
return 0;
}
-#endif /* HAVE_SNAPPY */
+
+ srv_stats.pages_page_decompressed.inc();
+ memcpy(buf, tmp_buf, srv_page_size);
+ return size;
+}
+
+/** Decompress a page for non full crc32 format.
+@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
+@param[in,out] buf possibly compressed page buffer
+@return size of the compressed data
+@retval 0 if decompression failed
+@retval srv_page_size if the page was not compressed */
+ulint fil_page_decompress_for_non_full_crc32(
+ byte* tmp_buf,
+ byte* buf)
+{
+ const unsigned ptype = mach_read_from_2(buf+FIL_PAGE_TYPE);
+ ulint header_len;
+ uint comp_algo;
+ switch (ptype) {
+ case FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED:
+ header_len= FIL_PAGE_DATA + FIL_PAGE_ENCRYPT_COMP_METADATA_LEN;
+ comp_algo = mach_read_from_2(
+ FIL_PAGE_DATA + FIL_PAGE_ENCRYPT_COMP_ALGO + buf);
+ break;
+ case FIL_PAGE_PAGE_COMPRESSED:
+ header_len = FIL_PAGE_DATA + FIL_PAGE_COMP_METADATA_LEN;
+ if (mach_read_from_6(FIL_PAGE_COMP_ALGO + buf)) {
+ return 0;
+ }
+ comp_algo = mach_read_from_2(FIL_PAGE_COMP_ALGO + 6 + buf);
+ break;
+ default:
+ return srv_page_size;
+ }
+
+ if (mach_read_from_4(buf + FIL_PAGE_SPACE_OR_CHKSUM)
+ != BUF_NO_CHECKSUM_MAGIC) {
+ return 0;
+ }
+
+ ulint actual_size = mach_read_from_2(buf + FIL_PAGE_DATA
+ + FIL_PAGE_COMP_SIZE);
+
+ /* Check if payload size is corrupted */
+ if (actual_size == 0 || actual_size > srv_page_size - header_len) {
+ return 0;
+ }
+
+ if (!fil_page_decompress_low(tmp_buf, buf, comp_algo, header_len,
+ actual_size)) {
+ return 0;
}
srv_stats.pages_page_decompressed.inc();
memcpy(buf, tmp_buf, srv_page_size);
return actual_size;
}
+
+/** Decompress a page that may be subject to page_compressed compression.
+@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
+@param[in,out] buf possibly compressed page buffer
+@return size of the compressed data
+@retval 0 if decompression failed
+@retval srv_page_size if the page was not compressed */
+ulint fil_page_decompress(
+ byte* tmp_buf,
+ byte* buf,
+ ulint flags)
+{
+ if (fil_space_t::full_crc32(flags)) {
+ return fil_page_decompress_for_full_crc32(tmp_buf, buf, flags);
+ }
+
+ return fil_page_decompress_for_non_full_crc32(tmp_buf, buf);
+}
diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc
index f32cf18b774..6a1ef7ba99e 100644
--- a/storage/innobase/fsp/fsp0file.cc
+++ b/storage/innobase/fsp/fsp0file.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -343,7 +343,7 @@ Datafile::read_first_page(bool read_only_mode)
if (m_order == 0) {
m_space_id = fsp_header_get_space_id(m_first_page);
m_flags = fsp_header_get_flags(m_first_page);
- if (!fsp_flags_is_valid(m_flags, m_space_id)) {
+ if (!fil_space_t::is_valid_flags(m_flags, m_space_id)) {
ulint cflags = fsp_flags_convert_from_101(m_flags);
if (cflags == ULINT_UNDEFINED) {
ib::error()
@@ -356,8 +356,9 @@ Datafile::read_first_page(bool read_only_mode)
}
}
- const page_size_t ps(m_flags);
- if (ps.physical() > page_size) {
+ const size_t physical_size = fil_space_t::physical_size(m_flags);
+
+ if (physical_size > page_size) {
ib::error() << "File " << m_filepath
<< " should be longer than "
<< page_size << " bytes";
@@ -407,7 +408,9 @@ Datafile::validate_to_dd(ulint space_id, ulint flags)
/* Make sure the datafile we found matched the space ID.
If the datafile is a file-per-table tablespace then also match
the row format and zip page size. */
- if (m_space_id == space_id && m_flags == flags) {
+ if (m_space_id == space_id
+ && (fil_space_t::is_flags_equal(flags, m_flags)
+ || fil_space_t::is_flags_equal(m_flags, flags))) {
/* Datafile matches the tablespace expected. */
return(DB_SUCCESS);
}
@@ -537,19 +540,19 @@ err_exit:
}
}
- if (!fsp_flags_is_valid(m_flags, m_space_id)) {
+ if (!fil_space_t::is_valid_flags(m_flags, m_space_id)) {
/* Tablespace flags must be valid. */
error_txt = "Tablespace flags are invalid";
goto err_exit;
}
- const page_size_t page_size(m_flags);
+ ulint logical_size = fil_space_t::logical_size(m_flags);
- if (srv_page_size != page_size.logical()) {
+ if (srv_page_size != logical_size) {
/* Logical size must be innodb_page_size. */
ib::error()
<< "Data file '" << m_filepath << "' uses page size "
- << page_size.logical() << ", but the innodb_page_size"
+ << logical_size << ", but the innodb_page_size"
" start-up parameter is "
<< srv_page_size;
free_first_page();
@@ -567,7 +570,7 @@ err_exit:
goto err_exit;
}
- if (buf_page_is_corrupted(false, m_first_page, page_size)) {
+ if (buf_page_is_corrupted(false, m_first_page, m_flags)) {
/* Look for checksum and other corruptions. */
error_txt = "Checksum mismatch";
goto err_exit;
@@ -629,7 +632,6 @@ Datafile::find_space_id()
for (ulint page_size = UNIV_ZIP_SIZE_MIN;
page_size <= UNIV_PAGE_SIZE_MAX;
page_size <<= 1) {
-
/* map[space_id] = count of pages */
typedef std::map<
ulint,
@@ -657,6 +659,20 @@ Datafile::find_space_id()
byte* page = static_cast<byte*>(
ut_align(buf, UNIV_SECTOR_SIZE));
+ ulint fsp_flags;
+ /* provide dummy value if the first os_file_read() fails */
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ fsp_flags = 1U << FSP_FLAGS_FCRC32_POS_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE()
+ | innodb_compression_algorithm
+ << FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO;
+ break;
+ default:
+ fsp_flags = 0;
+ }
+
for (ulint j = 0; j < page_count; ++j) {
dberr_t err;
@@ -674,33 +690,26 @@ Datafile::find_space_id()
continue;
}
+ if (j == 0) {
+ fsp_flags = mach_read_from_4(
+ page + FSP_HEADER_OFFSET + FSP_SPACE_FLAGS);
+ }
+
bool noncompressed_ok = false;
/* For noncompressed pages, the page size must be
equal to srv_page_size. */
if (page_size == srv_page_size) {
noncompressed_ok = !buf_page_is_corrupted(
- false, page, univ_page_size, NULL);
+ false, page, fsp_flags);
}
bool compressed_ok = false;
- /* file-per-table tablespaces can be compressed with
- the same physical and logical page size. General
- tablespaces must have different physical and logical
- page sizes in order to be compressed. For this check,
- assume the page is compressed if univ_page_size.
- logical() is equal to or less than 16k and the
- page_size we are checking is equal to or less than
- srv_page_size. */
if (srv_page_size <= UNIV_PAGE_SIZE_DEF
&& page_size <= srv_page_size) {
- const page_size_t compr_page_size(
- page_size, srv_page_size,
- true);
-
compressed_ok = !buf_page_is_corrupted(
- false, page, compr_page_size, NULL);
+ false, page, fsp_flags);
}
if (noncompressed_ok || compressed_ok) {
@@ -787,7 +796,7 @@ Datafile::restore_from_doublewrite()
ulint flags = mach_read_from_4(
FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + page);
- if (!fsp_flags_is_valid(flags, m_space_id)) {
+ if (!fil_space_t::is_valid_flags(flags, m_space_id)) {
ulint cflags = fsp_flags_convert_from_101(flags);
if (cflags == ULINT_UNDEFINED) {
ib::warn()
@@ -800,21 +809,21 @@ Datafile::restore_from_doublewrite()
/* The flags on the page should be converted later. */
}
- const page_size_t page_size(flags);
+ ulint physical_size = fil_space_t::physical_size(flags);
ut_a(page_get_page_no(page) == page_id.page_no());
ib::info() << "Restoring page " << page_id
<< " of datafile '" << m_filepath
<< "' from the doublewrite buffer. Writing "
- << page_size.physical() << " bytes into file '"
+ << physical_size << " bytes into file '"
<< m_filepath << "'";
IORequest request(IORequest::WRITE);
return(os_file_write(
request,
- m_filepath, m_handle, page, 0, page_size.physical())
+ m_filepath, m_handle, page, 0, physical_size)
!= DB_SUCCESS);
}
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 2b22cd253eb..da15fb38530 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -50,7 +50,6 @@ typedef ulint page_no_t;
/** Return an extent to the free list of a space.
@param[in,out] space tablespace
@param[in] offset page number in the extent
-@param[in] page_size page size
@param[in,out] mtr mini-transaction */
MY_ATTRIBUTE((nonnull))
static
@@ -58,7 +57,6 @@ void
fsp_free_extent(
fil_space_t* space,
page_no_t offset,
- const page_size_t& page_size,
mtr_t* mtr);
/********************************************************************//**
@@ -78,7 +76,6 @@ We think of the extent lists of the segment catenated in the order
FSEG_FULL -> FSEG_NOT_FULL -> FSEG_FREE.
@param[in] inode segment inode
@param[in] space tablespace
-@param[in] page_size page size
@param[in,out] mtr mini-transaction
@return the first extent descriptor, or NULL if none */
MY_ATTRIBUTE((nonnull, warn_unused_result))
@@ -87,7 +84,6 @@ xdes_t*
fseg_get_first_extent(
fseg_inode_t* inode,
const fil_space_t* space,
- const page_size_t& page_size,
mtr_t* mtr);
/** Put new extents to the free list if there are free extents above the free
@@ -111,7 +107,6 @@ fsp_fill_free_list(
This function implements the intelligent allocation strategy which tries
to minimize file space fragmentation.
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in,out] seg_inode segment inode
@param[in] hint hint of which page would be desirable
@param[in] direction if the new page is needed because of
@@ -132,7 +127,6 @@ static
buf_block_t*
fseg_alloc_free_page_low(
fil_space_t* space,
- const page_size_t& page_size,
fseg_inode_t* seg_inode,
ulint hint,
byte direction,
@@ -147,24 +141,16 @@ fseg_alloc_free_page_low(
/** Gets a pointer to the space header and x-locks its page.
@param[in] space tablespace
-@param[in] page_size page size
@param[in,out] mtr mini-transaction
@return pointer to the space header, page x-locked */
-UNIV_INLINE
-fsp_header_t*
-fsp_get_space_header(
- const fil_space_t* space,
- const page_size_t& page_size,
- mtr_t* mtr)
+inline fsp_header_t* fsp_get_space_header(const fil_space_t* space, mtr_t* mtr)
{
buf_block_t* block;
fsp_header_t* header;
ut_ad(space->purpose != FIL_TYPE_LOG);
- ut_ad(!FSP_FLAGS_GET_ZIP_SSIZE(space->flags)
- == !page_size.is_compressed());
- block = buf_page_get(page_id_t(space->id, 0), page_size,
+ block = buf_page_get(page_id_t(space->id, 0), space->zip_size(),
RW_SX_LATCH, mtr);
header = FSP_HEADER_OFFSET + buf_block_get_frame(block);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
@@ -427,9 +413,9 @@ xdes_get_descriptor_with_space_hdr(
return(NULL);
}
- const page_size_t page_size(space->flags);
+ const ulint zip_size = space->zip_size();
- descr_page_no = xdes_calc_descriptor_page(page_size, offset);
+ descr_page_no = xdes_calc_descriptor_page(zip_size, offset);
buf_block_t* block;
@@ -440,7 +426,7 @@ xdes_get_descriptor_with_space_hdr(
block = NULL;
} else {
block = buf_page_get(
- page_id_t(space->id, descr_page_no), page_size,
+ page_id_t(space->id, descr_page_no), zip_size,
RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
@@ -453,7 +439,7 @@ xdes_get_descriptor_with_space_hdr(
}
return(descr_page + XDES_ARR_OFFSET
- + XDES_SIZE * xdes_calc_descriptor_index(page_size, offset));
+ + XDES_SIZE * xdes_calc_descriptor_index(zip_size, offset));
}
/** Get the extent descriptor of a page.
@@ -465,22 +451,17 @@ defined, as they are uninitialized above the free limit.
@param[in] space tablespace
@param[in] offset page offset; if equal to the free limit, we
try to add new extents to the space free list
-@param[in] page_size page size
@param[in,out] mtr mini-transaction
@return the extent descriptor */
MY_ATTRIBUTE((warn_unused_result))
static
xdes_t*
-xdes_get_descriptor(
- const fil_space_t* space,
- page_no_t offset,
- const page_size_t& page_size,
- mtr_t* mtr)
+xdes_get_descriptor(const fil_space_t* space, page_no_t offset, mtr_t* mtr)
{
buf_block_t* block;
fsp_header_t* sp_header;
- block = buf_page_get(page_id_t(space->id, 0), page_size,
+ block = buf_page_get(page_id_t(space->id, 0), space->zip_size(),
RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
@@ -499,7 +480,6 @@ defined, as they are uninitialized above the free limit.
@param[in] space tablespace
@param[in] page descriptor page offset
@param[in] offset page offset
-@param[in] page_size page size
@param[in,out] mtr mini-transaction
@return the extent descriptor
@retval NULL if the descriptor is not available */
@@ -510,15 +490,16 @@ xdes_get_descriptor_const(
const fil_space_t* space,
page_no_t page,
page_no_t offset,
- const page_size_t& page_size,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_S_LOCK));
ut_ad(offset < space->free_limit);
ut_ad(offset < space->size_in_header);
+ const ulint zip_size = space->zip_size();
+
if (buf_block_t* block = buf_page_get(page_id_t(space->id, page),
- page_size, RW_S_LATCH, mtr)) {
+ zip_size, RW_S_LATCH, mtr)) {
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
ut_ad(page != 0 || space->free_limit == mach_read_from_4(
@@ -529,7 +510,7 @@ xdes_get_descriptor_const(
+ block->frame));
return(block->frame + XDES_ARR_OFFSET + XDES_SIZE
- * xdes_calc_descriptor_index(page_size, offset));
+ * xdes_calc_descriptor_index(zip_size, offset));
}
return(NULL);
@@ -538,7 +519,6 @@ xdes_get_descriptor_const(
/** Get a pointer to the extent descriptor. The page where the
extent descriptor resides is x-locked.
@param[in] space tablespace
-@param[in] page_size page size
@param[in] lst_node file address of the list node
contained in the descriptor
@param[in,out] mtr mini-transaction
@@ -548,14 +528,13 @@ UNIV_INLINE
xdes_t*
xdes_lst_get_descriptor(
const fil_space_t* space,
- const page_size_t& page_size,
fil_addr_t lst_node,
mtr_t* mtr)
{
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
- ut_ad(page_size.equals_to(page_size_t(space->flags)));
- return(fut_get_ptr(space->id, page_size, lst_node, RW_SX_LATCH, mtr)
- - XDES_FLST_NODE);
+ return fut_get_ptr(space->id, space->zip_size(),
+ lst_node, RW_SX_LATCH, mtr)
+ - XDES_FLST_NODE;
}
/********************************************************************//**
@@ -612,9 +591,7 @@ void fil_space_t::modify_check(const mtr_t& mtr) const
case MTR_LOG_NO_REDO:
ut_ad(purpose == FIL_TYPE_TEMPORARY
|| purpose == FIL_TYPE_IMPORT
- || my_atomic_loadlint(&redo_skipped_count)
- || is_being_truncated
- || srv_is_tablespace_truncated(id));
+ || redo_skipped_count);
return;
case MTR_LOG_ALL:
/* We may only write redo log for a persistent
@@ -640,7 +617,7 @@ fsp_header_init_fields(
ulint flags) /*!< in: tablespace flags (FSP_SPACE_FLAGS) */
{
flags &= ~FSP_FLAGS_MEM_MASK;
- ut_a(fsp_flags_is_valid(flags, space_id));
+ ut_a(fil_space_t::is_valid_flags(flags, space_id));
mach_write_to_4(FSP_HEADER_OFFSET + FSP_SPACE_ID + page,
space_id);
@@ -654,12 +631,12 @@ fsp_header_init_fields(
@param[in,out] mtr mini-transaction */
void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
{
- const page_id_t page_id(space->id, 0);
- const page_size_t page_size(space->flags);
+ const page_id_t page_id(space->id, 0);
+ const ulint zip_size = space->zip_size();
mtr_x_lock(&space->latch, mtr);
- buf_block_t* block = buf_page_create(page_id, page_size, mtr);
- buf_page_get(page_id, page_size, RW_SX_LATCH, mtr);
+ buf_block_t* block = buf_page_create(page_id, zip_size, mtr);
+ buf_page_get(page_id, zip_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
space->size_in_header = size;
@@ -675,23 +652,23 @@ void fsp_header_init(fil_space_t* space, ulint size, mtr_t* mtr)
mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_ID + block->frame,
space->id, MLOG_4BYTES, mtr);
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_NOT_USED + block->frame, 0,
- MLOG_4BYTES, mtr);
+ ut_ad(0 == mach_read_from_4(FSP_HEADER_OFFSET + FSP_NOT_USED
+ + block->frame));
mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SIZE + block->frame, size,
MLOG_4BYTES, mtr);
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_FREE_LIMIT + block->frame, 0,
- MLOG_4BYTES, mtr);
+ ut_ad(0 == mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
+ + block->frame));
mlog_write_ulint(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + block->frame,
space->flags & ~FSP_FLAGS_MEM_MASK,
MLOG_4BYTES, mtr);
- mlog_write_ulint(FSP_HEADER_OFFSET + FSP_FRAG_N_USED + block->frame, 0,
- MLOG_4BYTES, mtr);
+ ut_ad(0 == mach_read_from_4(FSP_HEADER_OFFSET + FSP_FRAG_N_USED
+ + block->frame));
- flst_init(FSP_HEADER_OFFSET + FSP_FREE + block->frame, mtr);
- flst_init(FSP_HEADER_OFFSET + FSP_FREE_FRAG + block->frame, mtr);
- flst_init(FSP_HEADER_OFFSET + FSP_FULL_FRAG + block->frame, mtr);
- flst_init(FSP_HEADER_OFFSET + FSP_SEG_INODES_FULL + block->frame, mtr);
- flst_init(FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE + block->frame, mtr);
+ flst_init(block, FSP_HEADER_OFFSET + FSP_FREE, mtr);
+ flst_init(block, FSP_HEADER_OFFSET + FSP_FREE_FRAG, mtr);
+ flst_init(block, FSP_HEADER_OFFSET + FSP_FULL_FRAG, mtr);
+ flst_init(block, FSP_HEADER_OFFSET + FSP_SEG_INODES_FULL, mtr);
+ flst_init(block, FSP_HEADER_OFFSET + FSP_SEG_INODES_FREE, mtr);
mlog_write_ull(FSP_HEADER_OFFSET + FSP_SEG_ID + block->frame, 1, mtr);
@@ -768,6 +745,44 @@ fsp_try_extend_data_file_with_pages(
return(success);
}
+/** Calculate the number of physical pages in an extent for this file.
+@param[in] physical_size page_size of the datafile
+@return number of pages in an extent for this file */
+inline ulint fsp_get_extent_size_in_pages(ulint physical_size)
+{
+ return (FSP_EXTENT_SIZE << srv_page_size_shift) / physical_size;
+}
+
+
+/** Calculate the number of pages to extend a datafile.
+We extend single-table tablespaces first one extent at a time,
+but 4 at a time for bigger tablespaces. It is not enough to extend always
+by one extent, because we need to add at least one extent to FSP_FREE.
+A single extent descriptor page will track many extents. And the extent
+that uses its extent descriptor page is put onto the FSP_FREE_FRAG list.
+Extents that do not use their extent descriptor page are added to FSP_FREE.
+The physical page size is used to determine how many extents are tracked
+on one extent descriptor page. See xdes_calc_descriptor_page().
+@param[in] physical_size page size in data file
+@param[in] size current number of pages in the datafile
+@return number of pages to extend the file. */
+static ulint fsp_get_pages_to_extend_ibd(ulint physical_size, ulint size)
+{
+ ulint extent_size = fsp_get_extent_size_in_pages(physical_size);
+ /* The threshold is set at 32MiB except when the physical page
+ size is small enough that it must be done sooner. */
+ ulint threshold = std::min(32 * extent_size, physical_size);
+
+ if (size >= threshold) {
+ /* Below in fsp_fill_free_list() we assume
+ that we add at most FSP_FREE_ADD extents at
+ a time */
+ extent_size *= FSP_FREE_ADD;
+ }
+
+ return extent_size;
+}
+
/** Try to extend the last data file of a tablespace if it is auto-extending.
@param[in,out] space tablespace
@param[in,out] header tablespace header
@@ -820,8 +835,7 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
size = mach_read_from_4(header + FSP_SIZE);
ut_ad(size == space->size_in_header);
- const page_size_t page_size(
- mach_read_from_4(header + FSP_SPACE_FLAGS));
+ const ulint ps = space->physical_size();
switch (space->id) {
case TRX_SYS_SPACE:
@@ -831,8 +845,7 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
size_increase = srv_tmp_space.get_increment();
break;
default:
- ulint extent_pages
- = fsp_get_extent_size_in_pages(page_size);
+ ulint extent_pages = fsp_get_extent_size_in_pages(ps);
if (size < extent_pages) {
/* Let us first extend the file to extent_size */
if (!fsp_try_extend_data_file_with_pages(
@@ -843,7 +856,7 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
size = extent_pages;
}
- size_increase = fsp_get_pages_to_extend_ibd(page_size, size);
+ size_increase = fsp_get_pages_to_extend_ibd(ps, size);
}
if (size_increase == 0) {
@@ -857,8 +870,7 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
/* We ignore any fragments of a full megabyte when storing the size
to the space header */
- space->size_in_header = ut_2pow_round(
- space->size, (1024 * 1024) / page_size.physical());
+ space->size_in_header = ut_2pow_round(space->size, (1024 * 1024) / ps);
mlog_write_ulint(
header + FSP_SIZE, space->size_in_header, MLOG_4BYTES, mtr);
@@ -866,47 +878,6 @@ fsp_try_extend_data_file(fil_space_t* space, fsp_header_t* header, mtr_t* mtr)
return(size_increase);
}
-/** Calculate the number of pages to extend a datafile.
-We extend single-table tablespaces first one extent at a time,
-but 4 at a time for bigger tablespaces. It is not enough to extend always
-by one extent, because we need to add at least one extent to FSP_FREE.
-A single extent descriptor page will track many extents. And the extent
-that uses its extent descriptor page is put onto the FSP_FREE_FRAG list.
-Extents that do not use their extent descriptor page are added to FSP_FREE.
-The physical page size is used to determine how many extents are tracked
-on one extent descriptor page. See xdes_calc_descriptor_page().
-@param[in] page_size page_size of the datafile
-@param[in] size current number of pages in the datafile
-@return number of pages to extend the file. */
-ulint
-fsp_get_pages_to_extend_ibd(
- const page_size_t& page_size,
- ulint size)
-{
- ulint size_increase; /* number of pages to extend this file */
- ulint extent_size; /* one megabyte, in pages */
- ulint threshold; /* The size of the tablespace (in number
- of pages) where we start allocating more
- than one extent at a time. */
-
- extent_size = fsp_get_extent_size_in_pages(page_size);
-
- /* The threshold is set at 32MiB except when the physical page
- size is small enough that it must be done sooner. */
- threshold = ut_min(32 * extent_size, page_size.physical());
-
- if (size < threshold) {
- size_increase = extent_size;
- } else {
- /* Below in fsp_fill_free_list() we assume
- that we add at most FSP_FREE_ADD extents at
- a time */
- size_increase = FSP_FREE_ADD * extent_size;
- }
-
- return(size_increase);
-}
-
/** Reset the page type.
Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE.
In MySQL 3.23.53, only undo log pages and index pages were tagged.
@@ -957,7 +928,7 @@ fsp_fill_free_list(
ut_ad(size == space->size_in_header);
ut_ad(limit == space->free_limit);
- const page_size_t page_size(space->flags);
+ const ulint zip_size = space->zip_size();
if (size < limit + FSP_EXTENT_SIZE * FSP_FREE_ADD) {
bool skip_resize = init_space;
@@ -981,8 +952,8 @@ fsp_fill_free_list(
while ((init_space && i < 1)
|| ((i + FSP_EXTENT_SIZE <= size) && (count < FSP_FREE_ADD))) {
- bool init_xdes
- = (ut_2pow_remainder(i, page_size.physical()) == 0);
+ const bool init_xdes = 0
+ == ut_2pow_remainder(i, ulint(space->physical_size()));
space->free_limit = i + FSP_EXTENT_SIZE;
mlog_write_ulint(header + FSP_FREE_LIMIT, i + FSP_EXTENT_SIZE,
@@ -1000,10 +971,10 @@ fsp_fill_free_list(
const page_id_t page_id(space->id, i);
block = buf_page_create(
- page_id, page_size, mtr);
+ page_id, zip_size, mtr);
buf_page_get(
- page_id, page_size, RW_SX_LATCH, mtr);
+ page_id, zip_size, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
@@ -1025,30 +996,23 @@ fsp_fill_free_list(
mtr_start(&ibuf_mtr);
ibuf_mtr.set_named_space(space);
- /* Avoid logging while truncate table
- fix-up is active. */
- if (srv_is_tablespace_truncated(space->id)) {
- mtr_set_log_mode(
- &ibuf_mtr, MTR_LOG_NO_REDO);
- }
-
const page_id_t page_id(
space->id,
i + FSP_IBUF_BITMAP_OFFSET);
block = buf_page_create(
- page_id, page_size, &ibuf_mtr);
+ page_id, zip_size, &ibuf_mtr);
buf_page_get(
- page_id, page_size, RW_SX_LATCH,
+ page_id, zip_size, RW_SX_LATCH,
&ibuf_mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fsp_init_file_page(space, block, &ibuf_mtr);
-
- ibuf_bitmap_page_init(block, &ibuf_mtr);
-
+ mlog_write_ulint(block->frame + FIL_PAGE_TYPE,
+ FIL_PAGE_IBUF_BITMAP,
+ MLOG_2BYTES, &ibuf_mtr);
mtr_commit(&ibuf_mtr);
}
}
@@ -1093,7 +1057,6 @@ fsp_fill_free_list(
/** Allocates a new free extent.
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in] hint hint of which extent would be desirable: any
page offset in the extent goes; the hint must not be > FSP_FREE_LIMIT
@param[in,out] mtr mini-transaction
@@ -1102,7 +1065,6 @@ static
xdes_t*
fsp_alloc_free_extent(
fil_space_t* space,
- const page_size_t& page_size,
ulint hint,
mtr_t* mtr)
{
@@ -1111,7 +1073,7 @@ fsp_alloc_free_extent(
xdes_t* descr;
buf_block_t* desc_block = NULL;
- header = fsp_get_space_header(space, page_size, mtr);
+ header = fsp_get_space_header(space, mtr);
descr = xdes_get_descriptor_with_space_hdr(
header, space, hint, mtr, false, &desc_block);
@@ -1137,8 +1099,7 @@ fsp_alloc_free_extent(
return(NULL); /* No free extents left */
}
- descr = xdes_lst_get_descriptor(
- space, page_size, first, mtr);
+ descr = xdes_lst_get_descriptor(space, first, mtr);
}
flst_remove(header + FSP_FREE, descr + XDES_FLST_NODE, mtr);
@@ -1189,7 +1150,6 @@ not previously x-latched. It is assumed that the block has been
x-latched only by mtr, and freed in mtr in that case.
@param[in,out] space tablespace
@param[in] offset page number of the allocated page
-@param[in] page_size page size of the allocated page
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction of the allocation
@param[in,out] init_mtr mini-transaction for initializing the page
@@ -1200,15 +1160,12 @@ buf_block_t*
fsp_page_create(
fil_space_t* space,
page_no_t offset,
- const page_size_t& page_size,
rw_lock_type_t rw_latch,
mtr_t* mtr,
mtr_t* init_mtr)
{
- ut_ad(page_size.equals_to(page_size_t(space->flags)));
-
buf_block_t* block = buf_page_create(page_id_t(space->id, offset),
- page_size, init_mtr);
+ space->zip_size(), init_mtr);
ut_d(bool latched = mtr_memo_contains_flagged(mtr, block,
MTR_MEMO_PAGE_X_FIX
@@ -1245,7 +1202,6 @@ fsp_page_create(
/** Allocates a single free page from a space.
The page is marked as used.
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in] hint hint of which page would be desirable
@param[in] rw_latch RW_SX_LATCH, RW_X_LATCH
@param[in,out] mtr mini-transaction
@@ -1259,7 +1215,6 @@ static MY_ATTRIBUTE((warn_unused_result, nonnull))
buf_block_t*
fsp_alloc_free_page(
fil_space_t* space,
- const page_size_t& page_size,
ulint hint,
rw_lock_type_t rw_latch,
mtr_t* mtr,
@@ -1272,7 +1227,7 @@ fsp_alloc_free_page(
const ulint space_id = space->id;
ut_d(space->modify_check(*mtr));
- header = fsp_get_space_header(space, page_size, mtr);
+ header = fsp_get_space_header(space, mtr);
/* Get the hinted descriptor */
descr = xdes_get_descriptor_with_space_hdr(header, space, hint, mtr);
@@ -1291,8 +1246,7 @@ fsp_alloc_free_page(
FREE_FRAG list. But we will allocate our page from the
the free extent anyway. */
- descr = fsp_alloc_free_extent(space, page_size,
- hint, mtr);
+ descr = fsp_alloc_free_extent(space, hint, mtr);
if (descr == NULL) {
/* No free space left */
@@ -1304,8 +1258,7 @@ fsp_alloc_free_page(
flst_add_last(header + FSP_FREE_FRAG,
descr + XDES_FLST_NODE, mtr);
} else {
- descr = xdes_lst_get_descriptor(space, page_size,
- first, mtr);
+ descr = xdes_lst_get_descriptor(space, first, mtr);
}
/* Reset the hint */
@@ -1353,23 +1306,17 @@ fsp_alloc_free_page(
}
fsp_alloc_from_free_frag(header, descr, free, mtr);
- return(fsp_page_create(space, page_no, page_size, rw_latch,
- mtr, init_mtr));
+ return fsp_page_create(space, page_no, rw_latch, mtr, init_mtr);
}
/** Frees a single page of a space.
The page is marked as free and clean.
@param[in,out] space tablespace
@param[in] offset page number
-@param[in] page_size page size
+@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
-static
-void
-fsp_free_page(
- fil_space_t* space,
- ulint offset,
- const page_size_t& page_size,
- mtr_t* mtr)
+static void fsp_free_page(fil_space_t* space, page_no_t offset,
+ bool log, mtr_t* mtr)
{
fsp_header_t* header;
xdes_t* descr;
@@ -1381,7 +1328,7 @@ fsp_free_page(
/* fprintf(stderr, "Freeing page %lu in space %lu\n", page, space); */
- header = fsp_get_space_header(space, page_size, mtr);
+ header = fsp_get_space_header(space, mtr);
descr = xdes_get_descriptor_with_space_hdr(
header, space, offset, mtr);
@@ -1423,6 +1370,17 @@ fsp_free_page(
return;
}
+ if (UNIV_UNLIKELY(!log)) {
+ /* The last page freed in BtrBulk::finish() must be
+ written with redo logging disabled for the page
+ itself. The modifications of the allocation data
+ structures are covered by redo log. */
+ } else if (byte* log_ptr = mlog_open(mtr, 11)) {
+ log_ptr = mlog_write_initial_log_record_low(
+ MLOG_INIT_FREE_PAGE, space->id, offset, log_ptr, mtr);
+ mlog_close(mtr, log_ptr);
+ }
+
const ulint bit = offset % FSP_EXTENT_SIZE;
xdes_set_bit(descr, XDES_FREE_BIT, bit, TRUE, mtr);
@@ -1450,29 +1408,22 @@ fsp_free_page(
/* The extent has become free: move it to another list */
flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
mtr);
- fsp_free_extent(space, offset, page_size, mtr);
+ fsp_free_extent(space, offset, mtr);
}
}
/** Return an extent to the free list of a space.
@param[in,out] space tablespace
@param[in] offset page number in the extent
-@param[in] page_size page size
@param[in,out] mtr mini-transaction */
-static
-void
-fsp_free_extent(
- fil_space_t* space,
- page_no_t offset,
- const page_size_t& page_size,
- mtr_t* mtr)
+static void fsp_free_extent(fil_space_t* space, page_no_t offset, mtr_t* mtr)
{
fsp_header_t* header;
xdes_t* descr;
ut_ad(mtr_memo_contains(mtr, &space->latch, MTR_MEMO_X_LOCK));
- header = fsp_get_space_header(space, page_size, mtr);
+ header = fsp_get_space_header(space, mtr);
descr = xdes_get_descriptor_with_space_hdr(
header, space, offset, mtr);
@@ -1485,10 +1436,16 @@ fsp_free_extent(
space->free_len++;
}
+/** @return Number of segment inodes which fit on a single page */
+inline ulint FSP_SEG_INODES_PER_PAGE(ulint physical_size)
+{
+ return (physical_size - FSEG_ARR_OFFSET - 10) / FSEG_INODE_SIZE;
+}
+
/** Returns the nth inode slot on an inode page.
@param[in] page segment inode page
@param[in] i inode index on page
-@param[in] page_size page size
+@param[in] physical_size page size
@param[in,out] mtr mini-transaction
@return segment inode */
UNIV_INLINE
@@ -1496,10 +1453,10 @@ fseg_inode_t*
fsp_seg_inode_page_get_nth_inode(
page_t* page,
ulint i,
- const page_size_t& page_size,
+ ulint physical_size,
mtr_t* mtr)
{
- ut_ad(i < FSP_SEG_INODES_PER_PAGE(page_size));
+ ut_ad(i < FSP_SEG_INODES_PER_PAGE(physical_size));
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_SX_FIX));
return(page + FSEG_ARR_OFFSET + FSEG_INODE_SIZE * i);
@@ -1507,23 +1464,23 @@ fsp_seg_inode_page_get_nth_inode(
/** Looks for a used segment inode on a segment inode page.
@param[in] page segment inode page
-@param[in] page_size page size
+@param[in] physical_size page size
@param[in,out] mtr mini-transaction
@return segment inode index, or ULINT_UNDEFINED if not found */
static
ulint
fsp_seg_inode_page_find_used(
page_t* page,
- const page_size_t& page_size,
+ ulint physical_size,
mtr_t* mtr)
{
ulint i;
fseg_inode_t* inode;
- for (i = 0; i < FSP_SEG_INODES_PER_PAGE(page_size); i++) {
+ for (i = 0; i < FSP_SEG_INODES_PER_PAGE(physical_size); i++) {
inode = fsp_seg_inode_page_get_nth_inode(
- page, i, page_size, mtr);
+ page, i, physical_size, mtr);
if (mach_read_from_8(inode + FSEG_ID)) {
/* This is used */
@@ -1540,7 +1497,7 @@ fsp_seg_inode_page_find_used(
/** Looks for an unused segment inode on a segment inode page.
@param[in] page segment inode page
@param[in] i search forward starting from this index
-@param[in] page_size page size
+@param[in] physical_size page size
@param[in,out] mtr mini-transaction
@return segment inode index, or ULINT_UNDEFINED if not found */
static
@@ -1548,15 +1505,15 @@ ulint
fsp_seg_inode_page_find_free(
page_t* page,
ulint i,
- const page_size_t& page_size,
+ ulint physical_size,
mtr_t* mtr)
{
- for (; i < FSP_SEG_INODES_PER_PAGE(page_size); i++) {
+ for (; i < FSP_SEG_INODES_PER_PAGE(physical_size); i++) {
fseg_inode_t* inode;
inode = fsp_seg_inode_page_get_nth_inode(
- page, i, page_size, mtr);
+ page, i, physical_size, mtr);
if (!mach_read_from_8(inode + FSEG_ID)) {
/* This is unused */
@@ -1590,10 +1547,7 @@ fsp_alloc_seg_inode_page(
ut_ad(page_offset(space_header) == FSP_HEADER_OFFSET);
ut_ad(page_get_space_id(page_align(space_header)) == space->id);
- const page_size_t page_size(space->flags);
-
- block = fsp_alloc_free_page(
- space, page_size, 0, RW_SX_LATCH, mtr, mtr);
+ block = fsp_alloc_free_page(space, 0, RW_SX_LATCH, mtr, mtr);
if (block == NULL) {
@@ -1608,10 +1562,12 @@ fsp_alloc_seg_inode_page(
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_INODE,
MLOG_2BYTES, mtr);
- for (ulint i = 0; i < FSP_SEG_INODES_PER_PAGE(page_size); i++) {
+ const ulint physical_size = space->physical_size();
+
+ for (ulint i = 0; i < FSP_SEG_INODES_PER_PAGE(physical_size); i++) {
inode = fsp_seg_inode_page_get_nth_inode(
- page, i, page_size, mtr);
+ page, i, physical_size, mtr);
mlog_write_ull(inode + FSEG_ID, 0, mtr);
}
@@ -1649,25 +1605,27 @@ fsp_alloc_seg_inode(
&& !fsp_alloc_seg_inode_page(space, space_header, mtr)) {
return(NULL);
}
- const page_size_t page_size(space->flags);
const page_id_t page_id(
space->id,
flst_get_first(space_header + FSP_SEG_INODES_FREE, mtr).page);
- block = buf_page_get(page_id, page_size, RW_SX_LATCH, mtr);
+ block = buf_page_get(page_id, space->zip_size(), RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
fil_block_check_type(*block, FIL_PAGE_INODE, mtr);
page = buf_block_get_frame(block);
- n = fsp_seg_inode_page_find_free(page, 0, page_size, mtr);
+ const ulint physical_size = space->physical_size();
+
+ n = fsp_seg_inode_page_find_free(page, 0, physical_size, mtr);
ut_a(n != ULINT_UNDEFINED);
- inode = fsp_seg_inode_page_get_nth_inode(page, n, page_size, mtr);
+ inode = fsp_seg_inode_page_get_nth_inode(page, n, physical_size, mtr);
if (ULINT_UNDEFINED == fsp_seg_inode_page_find_free(page, n + 1,
- page_size, mtr)) {
+ physical_size,
+ mtr)) {
/* There are no other unused headers left on the page: move it
to another list */
@@ -1685,15 +1643,13 @@ fsp_alloc_seg_inode(
/** Frees a file segment inode.
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in,out] inode segment inode
+@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
-static
-void
-fsp_free_seg_inode(
+static void fsp_free_seg_inode(
fil_space_t* space,
- const page_size_t& page_size,
fseg_inode_t* inode,
+ bool log,
mtr_t* mtr)
{
page_t* page;
@@ -1703,12 +1659,14 @@ fsp_free_seg_inode(
page = page_align(inode);
- space_header = fsp_get_space_header(space, page_size, mtr);
+ space_header = fsp_get_space_header(space, mtr);
ut_ad(mach_read_from_4(inode + FSEG_MAGIC_N) == FSEG_MAGIC_N_VALUE);
+ const ulint physical_size = space->physical_size();
+
if (ULINT_UNDEFINED
- == fsp_seg_inode_page_find_free(page, 0, page_size, mtr)) {
+ == fsp_seg_inode_page_find_free(page, 0, physical_size, mtr)) {
/* Move the page to another list */
@@ -1723,21 +1681,21 @@ fsp_free_seg_inode(
mlog_write_ulint(inode + FSEG_MAGIC_N, 0xfa051ce3, MLOG_4BYTES, mtr);
if (ULINT_UNDEFINED
- == fsp_seg_inode_page_find_used(page, page_size, mtr)) {
+ == fsp_seg_inode_page_find_used(page, physical_size, mtr)) {
/* There are no other used headers left on the page: free it */
flst_remove(space_header + FSP_SEG_INODES_FREE,
page + FSEG_INODE_PAGE_NODE, mtr);
- fsp_free_page(space, page_get_page_no(page), page_size, mtr);
+ fsp_free_page(space, page_get_page_no(page), log, mtr);
}
}
/** Returns the file segment inode, page x-latched.
@param[in] header segment header
@param[in] space space id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] mtr mini-transaction
@param[out] block inode block, or NULL to ignore
@return segment inode, page x-latched; NULL if the inode is free */
@@ -1746,7 +1704,7 @@ fseg_inode_t*
fseg_inode_try_get(
fseg_header_t* header,
ulint space,
- const page_size_t& page_size,
+ ulint zip_size,
mtr_t* mtr,
buf_block_t** block)
{
@@ -1757,7 +1715,7 @@ fseg_inode_try_get(
inode_addr.boffset = mach_read_from_2(header + FSEG_HDR_OFFSET);
ut_ad(space == mach_read_from_4(header + FSEG_HDR_SPACE));
- inode = fut_get_ptr(space, page_size, inode_addr, RW_SX_LATCH, mtr,
+ inode = fut_get_ptr(space, zip_size, inode_addr, RW_SX_LATCH, mtr,
block);
if (UNIV_UNLIKELY(!mach_read_from_8(inode + FSEG_ID))) {
@@ -1774,7 +1732,7 @@ fseg_inode_try_get(
/** Returns the file segment inode, page x-latched.
@param[in] header segment header
@param[in] space space id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] mtr mini-transaction
@param[out] block inode block
@return segment inode, page x-latched */
@@ -1783,12 +1741,12 @@ fseg_inode_t*
fseg_inode_get(
fseg_header_t* header,
ulint space,
- const page_size_t& page_size,
+ ulint zip_size,
mtr_t* mtr,
buf_block_t** block = NULL)
{
fseg_inode_t* inode
- = fseg_inode_try_get(header, space, page_size, mtr, block);
+ = fseg_inode_try_get(header, space, zip_size, mtr, block);
ut_a(inode);
return(inode);
}
@@ -1948,11 +1906,11 @@ fseg_create(
<= srv_page_size - FIL_PAGE_DATA_END);
mtr_x_lock(&space->latch, mtr);
- const page_size_t page_size(space->flags);
ut_d(space->modify_check(*mtr));
if (page != 0) {
- block = buf_page_get(page_id_t(space->id, page), page_size,
+ block = buf_page_get(page_id_t(space->id, page),
+ space->zip_size(),
RW_SX_LATCH, mtr);
header = byte_offset + buf_block_get_frame(block);
@@ -1971,7 +1929,7 @@ fseg_create(
DBUG_RETURN(NULL);
}
- space_header = fsp_get_space_header(space, page_size, mtr);
+ space_header = fsp_get_space_header(space, mtr);
inode = fsp_alloc_seg_inode(space, space_header, mtr);
@@ -2000,7 +1958,7 @@ fseg_create(
}
if (page == 0) {
- block = fseg_alloc_free_page_low(space, page_size,
+ block = fseg_alloc_free_page_low(space,
inode, 0, FSP_UP, RW_SX_LATCH,
mtr, mtr
#ifdef UNIV_DEBUG
@@ -2013,9 +1971,7 @@ fseg_create(
ut_ad(!has_done_reservation || block != NULL);
if (block == NULL) {
-
- fsp_free_seg_inode(space, page_size, inode, mtr);
-
+ fsp_free_seg_inode(space, inode, true, mtr);
goto funct_exit;
}
@@ -2092,9 +2048,7 @@ fseg_n_reserved_pages(
space_id = page_get_space_id(page_align(header));
space = mtr_x_lock_space(space_id, mtr);
- const page_size_t page_size(space->flags);
-
- inode = fseg_inode_get(header, space_id, page_size, mtr);
+ inode = fseg_inode_get(header, space_id, space->zip_size(), mtr);
ret = fseg_n_reserved_pages_low(inode, used, mtr);
@@ -2107,7 +2061,6 @@ the free list is empty, and the extents can be allocated consecutively from
the hint onward.
@param[in] inode segment inode
@param[in] space tablespace
-@param[in] page_size page size
@param[in] hint hint which extent would be good as the first
extent
@param[in,out] mtr mini-transaction */
@@ -2116,7 +2069,6 @@ void
fseg_fill_free_list(
fseg_inode_t* inode,
fil_space_t* space,
- const page_size_t& page_size,
ulint hint,
mtr_t* mtr)
{
@@ -2146,7 +2098,7 @@ fseg_fill_free_list(
}
for (i = 0; i < FSEG_FREE_LIST_MAX_LEN; i++) {
- descr = xdes_get_descriptor(space, hint, page_size, mtr);
+ descr = xdes_get_descriptor(space, hint, mtr);
if ((descr == NULL)
|| (XDES_FREE != xdes_get_state(descr, mtr))) {
@@ -2156,7 +2108,7 @@ fseg_fill_free_list(
return;
}
- descr = fsp_alloc_free_extent(space, page_size, hint, mtr);
+ descr = fsp_alloc_free_extent(space, hint, mtr);
xdes_set_state(descr, XDES_FSEG, mtr);
@@ -2176,7 +2128,6 @@ NOTE that the extent returned still resides in the segment free list, it is
not yet taken off it!
@param[in] inode segment inode
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in,out] mtr mini-transaction
@retval NULL if no page could be allocated
@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
@@ -2187,7 +2138,6 @@ xdes_t*
fseg_alloc_free_extent(
fseg_inode_t* inode,
fil_space_t* space,
- const page_size_t& page_size,
mtr_t* mtr)
{
xdes_t* descr;
@@ -2203,10 +2153,10 @@ fseg_alloc_free_extent(
first = flst_get_first(inode + FSEG_FREE, mtr);
- descr = xdes_lst_get_descriptor(space, page_size, first, mtr);
+ descr = xdes_lst_get_descriptor(space, first, mtr);
} else {
/* Segment free list was empty, allocate from space */
- descr = fsp_alloc_free_extent(space, page_size, 0, mtr);
+ descr = fsp_alloc_free_extent(space, 0, mtr);
if (descr == NULL) {
@@ -2220,7 +2170,7 @@ fseg_alloc_free_extent(
flst_add_last(inode + FSEG_FREE, descr + XDES_FLST_NODE, mtr);
/* Try to fill the segment free list */
- fseg_fill_free_list(inode, space, page_size,
+ fseg_fill_free_list(inode, space,
xdes_get_offset(descr) + FSP_EXTENT_SIZE,
mtr);
}
@@ -2232,7 +2182,6 @@ fseg_alloc_free_extent(
This function implements the intelligent allocation strategy which tries to
minimize file space fragmentation.
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in,out] seg_inode segment inode
@param[in] hint hint of which page would be desirable
@param[in] direction if the new page is needed because of
@@ -2253,7 +2202,6 @@ static
buf_block_t*
fseg_alloc_free_page_low(
fil_space_t* space,
- const page_size_t& page_size,
fseg_inode_t* seg_inode,
ulint hint,
byte direction,
@@ -2288,7 +2236,7 @@ fseg_alloc_free_page_low(
reserved = fseg_n_reserved_pages_low(seg_inode, &used, mtr);
- space_header = fsp_get_space_header(space, page_size, mtr);
+ space_header = fsp_get_space_header(space, mtr);
descr = xdes_get_descriptor_with_space_hdr(space_header, space,
hint, mtr);
@@ -2297,7 +2245,7 @@ fseg_alloc_free_page_low(
hint */
/* The file space header page is always allocated. */
hint = 0;
- descr = xdes_get_descriptor(space, hint, page_size, mtr);
+ descr = xdes_get_descriptor(space, hint, mtr);
}
/* In the big if-else below we look for ret_page and ret_descr */
@@ -2324,7 +2272,7 @@ take_hinted_page:
=========================================================
the hinted page
===============*/
- ret_descr = fsp_alloc_free_extent(space, page_size, hint, mtr);
+ ret_descr = fsp_alloc_free_extent(space, hint, mtr);
ut_a(ret_descr == descr);
@@ -2334,7 +2282,7 @@ take_hinted_page:
ret_descr + XDES_FLST_NODE, mtr);
/* Try to fill the segment free list */
- fseg_fill_free_list(seg_inode, space, page_size,
+ fseg_fill_free_list(seg_inode, space,
hint + FSP_EXTENT_SIZE, mtr);
goto take_hinted_page;
/*-----------------------------------------------------------*/
@@ -2342,8 +2290,7 @@ take_hinted_page:
&& ((reserved - used) < reserved / FSEG_FILLFACTOR)
&& (used >= FSEG_FRAG_LIMIT)
&& (!!(ret_descr
- = fseg_alloc_free_extent(
- seg_inode, space, page_size, mtr)))) {
+ = fseg_alloc_free_extent(seg_inode, space, mtr)))) {
/* 3. We take any free extent (which was already assigned above
===============================================================
@@ -2389,8 +2336,7 @@ take_hinted_page:
return(NULL);
}
- ret_descr = xdes_lst_get_descriptor(space, page_size,
- first, mtr);
+ ret_descr = xdes_lst_get_descriptor(space, first, mtr);
ret_page = xdes_get_offset(ret_descr)
+ xdes_find_bit(ret_descr, XDES_FREE_BIT, TRUE,
0, mtr);
@@ -2400,7 +2346,7 @@ take_hinted_page:
/* 6. We allocate an individual page from the space
===================================================*/
buf_block_t* block = fsp_alloc_free_page(
- space, page_size, hint, rw_latch, mtr, init_mtr);
+ space, hint, rw_latch, mtr, init_mtr);
ut_ad(!has_done_reservation || block != NULL);
@@ -2422,8 +2368,7 @@ take_hinted_page:
} else {
/* 7. We allocate a new extent and take its first page
======================================================*/
- ret_descr = fseg_alloc_free_extent(seg_inode,
- space, page_size, mtr);
+ ret_descr = fseg_alloc_free_extent(seg_inode, space, mtr);
if (ret_descr == NULL) {
ret_page = FIL_NULL;
@@ -2471,8 +2416,7 @@ got_hinted_page:
The extent is still in the appropriate list (FSEG_NOT_FULL
or FSEG_FREE), and the page is not yet marked as used. */
- ut_ad(xdes_get_descriptor(space, ret_page, page_size, mtr)
- == ret_descr);
+ ut_ad(xdes_get_descriptor(space, ret_page, mtr) == ret_descr);
ut_ad(xdes_mtr_get_bit(
ret_descr, XDES_FREE_BIT,
@@ -2481,8 +2425,7 @@ got_hinted_page:
fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr);
}
- return(fsp_page_create(space, ret_page, page_size, rw_latch,
- mtr, init_mtr));
+ return fsp_page_create(space, ret_page, rw_latch, mtr, init_mtr);
}
/**********************************************************************//**
@@ -2524,9 +2467,8 @@ fseg_alloc_free_page_general(
space_id = page_get_space_id(page_align(seg_header));
space = mtr_x_lock_space(space_id, mtr);
- const page_size_t page_size(space->flags);
-
- inode = fseg_inode_get(seg_header, space_id, page_size, mtr, &iblock);
+ inode = fseg_inode_get(seg_header, space_id, space->zip_size(),
+ mtr, &iblock);
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
if (!has_done_reservation
@@ -2535,7 +2477,7 @@ fseg_alloc_free_page_general(
return(NULL);
}
- block = fseg_alloc_free_page_low(space, page_size,
+ block = fseg_alloc_free_page_low(space,
inode, hint, direction,
RW_X_LATCH, mtr, init_mtr
#ifdef UNIV_DEBUG
@@ -2653,9 +2595,9 @@ fsp_reserve_free_extents(
*n_reserved = n_ext;
mtr_x_lock(&space->latch, mtr);
- const page_size_t page_size(space->flags);
+ const ulint physical_size = space->physical_size();
- space_header = fsp_get_space_header(space, page_size, mtr);
+ space_header = fsp_get_space_header(space, mtr);
try_again:
size = mach_read_from_4(space_header + FSP_SIZE);
ut_ad(size == space->size_in_header);
@@ -2687,8 +2629,7 @@ try_again:
if (n_free_up > 0) {
n_free_up--;
- n_free_up -= n_free_up / (page_size.physical()
- / FSP_EXTENT_SIZE);
+ n_free_up -= n_free_up / (physical_size / FSP_EXTENT_SIZE);
}
n_free = n_free_list_ext + n_free_up;
@@ -2796,9 +2737,9 @@ fseg_mark_page_used(
@param[in] seg_inode segment inode
@param[in,out] space tablespace
@param[in] offset page number
-@param[in] page_size page size
@param[in] ahi whether we may need to drop the adaptive
hash index
+@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
static
void
@@ -2806,10 +2747,10 @@ fseg_free_page_low(
fseg_inode_t* seg_inode,
fil_space_t* space,
page_no_t offset,
- const page_size_t& page_size,
#ifdef BTR_CUR_HASH_ADAPT
bool ahi,
#endif /* BTR_CUR_HASH_ADAPT */
+ bool log,
mtr_t* mtr)
{
xdes_t* descr;
@@ -2835,7 +2776,7 @@ fseg_free_page_low(
}
#endif /* BTR_CUR_HASH_ADAPT */
- descr = xdes_get_descriptor(space, offset, page_size, mtr);
+ descr = xdes_get_descriptor(space, offset, mtr);
if (xdes_mtr_get_bit(descr, XDES_FREE_BIT,
offset % FSP_EXTENT_SIZE, mtr)) {
@@ -2863,8 +2804,7 @@ fseg_free_page_low(
}
}
- fsp_free_page(space, offset, page_size, mtr);
-
+ fsp_free_page(space, offset, log, mtr);
return;
}
@@ -2914,13 +2854,13 @@ fseg_free_page_low(
/* The extent has become free: free it to space */
flst_remove(seg_inode + FSEG_NOT_FULL,
descr + XDES_FLST_NODE, mtr);
- fsp_free_extent(space, offset, page_size, mtr);
+ fsp_free_extent(space, offset, mtr);
}
}
#ifndef BTR_CUR_HASH_ADAPT
-# define fseg_free_page_low(inode, space, offset, page_size, ahi, mtr) \
- fseg_free_page_low(inode, space, offset, page_size, mtr)
+# define fseg_free_page_low(inode, space, offset, ahi, log, mtr) \
+ fseg_free_page_low(inode, space, offset, log, mtr)
#endif /* !BTR_CUR_HASH_ADAPT */
/** Free a page in a file segment.
@@ -2929,6 +2869,7 @@ fseg_free_page_low(
@param[in] offset page number
@param[in] ahi whether we may need to drop the adaptive
hash index
+@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
void
fseg_free_page_func(
@@ -2938,22 +2879,23 @@ fseg_free_page_func(
#ifdef BTR_CUR_HASH_ADAPT
bool ahi,
#endif /* BTR_CUR_HASH_ADAPT */
+ bool log,
mtr_t* mtr)
{
DBUG_ENTER("fseg_free_page");
fseg_inode_t* seg_inode;
buf_block_t* iblock;
mtr_x_lock(&space->latch, mtr);
- const page_size_t page_size(space->flags);
DBUG_LOG("fseg_free_page", "space_id: " << space->id
<< ", page_no: " << offset);
- seg_inode = fseg_inode_get(seg_header, space->id, page_size, mtr,
+ seg_inode = fseg_inode_get(seg_header, space->id, space->zip_size(),
+ mtr,
&iblock);
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
- fseg_free_page_low(seg_inode, space, offset, page_size, ahi, mtr);
+ fseg_free_page_low(seg_inode, space, offset, ahi, log, mtr);
ut_d(buf_page_set_file_page_was_freed(page_id_t(space->id, offset)));
@@ -2969,8 +2911,8 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
{
bool is_free;
mtr_t mtr;
- page_size_t page_size(space->flags);
- page_no_t dpage = xdes_calc_descriptor_page(page_size, page);
+ page_no_t dpage = xdes_calc_descriptor_page(space->zip_size(),
+ page);
mtr.start();
mtr_s_lock(&space->latch, &mtr);
@@ -2978,7 +2920,7 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
if (page >= space->free_limit || page >= space->size_in_header) {
is_free = true;
} else if (const xdes_t* descr = xdes_get_descriptor_const(
- space, dpage, page, page_size, &mtr)) {
+ space, dpage, page, &mtr)) {
is_free = xdes_get_bit(descr, XDES_FREE_BIT,
page % FSP_EXTENT_SIZE);
} else {
@@ -2992,7 +2934,6 @@ fseg_page_is_free(fil_space_t* space, unsigned page)
/** Free an extent of a segment to the space free list.
@param[in,out] seg_inode segment inode
@param[in,out] space tablespace
-@param[in] page_size page size
@param[in] page page number in the extent
@param[in] ahi whether we may need to drop
the adaptive hash index
@@ -3003,7 +2944,6 @@ void
fseg_free_extent(
fseg_inode_t* seg_inode,
fil_space_t* space,
- const page_size_t& page_size,
ulint page,
#ifdef BTR_CUR_HASH_ADAPT
bool ahi,
@@ -3018,7 +2958,7 @@ fseg_free_extent(
ut_ad(mtr != NULL);
- descr = xdes_get_descriptor(space, page, page_size, mtr);
+ descr = xdes_get_descriptor(space, page, mtr);
ut_a(xdes_get_state(descr, mtr) == XDES_FSEG);
ut_a(!memcmp(descr + XDES_ID, seg_inode + FSEG_ID, 8));
@@ -3065,7 +3005,7 @@ fseg_free_extent(
MLOG_4BYTES, mtr);
}
- fsp_free_extent(space, page, page_size, mtr);
+ fsp_free_extent(space, page, mtr);
#ifdef UNIV_DEBUG
for (i = 0; i < FSP_EXTENT_SIZE; i++) {
@@ -3077,8 +3017,8 @@ fseg_free_extent(
}
#ifndef BTR_CUR_HASH_ADAPT
-# define fseg_free_extent(inode, space, page_size, page, ahi, mtr) \
- fseg_free_extent(inode, space, page_size, page, mtr)
+# define fseg_free_extent(inode, space, page, ahi, mtr) \
+ fseg_free_extent(inode, space, page, mtr)
#endif /* !BTR_CUR_HASH_ADAPT */
/**********************************************************************//**
@@ -3112,9 +3052,8 @@ fseg_free_step_func(
header_page = page_get_page_no(page_align(header));
fil_space_t* space = mtr_x_lock_space(space_id, mtr);
- const page_size_t page_size(space->flags);
- descr = xdes_get_descriptor(space, header_page, page_size, mtr);
+ descr = xdes_get_descriptor(space, header_page, mtr);
/* Check that the header resides on a page which has not been
freed yet */
@@ -3122,8 +3061,8 @@ fseg_free_step_func(
ut_a(xdes_mtr_get_bit(descr, XDES_FREE_BIT,
header_page % FSP_EXTENT_SIZE, mtr) == FALSE);
buf_block_t* iblock;
-
- inode = fseg_inode_try_get(header, space_id, page_size, mtr, &iblock);
+ const ulint zip_size = space->zip_size();
+ inode = fseg_inode_try_get(header, space_id, zip_size, mtr, &iblock);
if (inode == NULL) {
ib::info() << "Double free of inode from "
@@ -3132,14 +3071,12 @@ fseg_free_step_func(
}
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
- descr = fseg_get_first_extent(inode, space, page_size, mtr);
+ descr = fseg_get_first_extent(inode, space, mtr);
if (descr != NULL) {
/* Free the extent held by the segment */
page = xdes_get_offset(descr);
-
- fseg_free_extent(inode, space, page_size, page, ahi, mtr);
-
+ fseg_free_extent(inode, space, page, ahi, mtr);
DBUG_RETURN(FALSE);
}
@@ -3148,7 +3085,7 @@ fseg_free_step_func(
if (n == ULINT_UNDEFINED) {
/* Freeing completed: free the segment inode */
- fsp_free_seg_inode(space, page_size, inode, mtr);
+ fsp_free_seg_inode(space, inode, true, mtr);
DBUG_RETURN(TRUE);
}
@@ -3156,13 +3093,13 @@ fseg_free_step_func(
fseg_free_page_low(
inode, space,
fseg_get_nth_frag_page_no(inode, n, mtr),
- page_size, ahi, mtr);
+ ahi, true, mtr);
n = fseg_find_last_used_frag_page_slot(inode, mtr);
if (n == ULINT_UNDEFINED) {
/* Freeing completed: free the segment inode */
- fsp_free_seg_inode(space, page_size, inode, mtr);
+ fsp_free_seg_inode(space, inode, true, mtr);
DBUG_RETURN(TRUE);
}
@@ -3195,19 +3132,19 @@ fseg_free_step_not_header_func(
ut_ad(mtr->is_named_space(space_id));
fil_space_t* space = mtr_x_lock_space(space_id, mtr);
- const page_size_t page_size(space->flags);
buf_block_t* iblock;
- inode = fseg_inode_get(header, space_id, page_size, mtr, &iblock);
+ inode = fseg_inode_get(header, space_id, space->zip_size(), mtr,
+ &iblock);
fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr);
- descr = fseg_get_first_extent(inode, space, page_size, mtr);
+ descr = fseg_get_first_extent(inode, space, mtr);
if (descr != NULL) {
/* Free the extent held by the segment */
page = xdes_get_offset(descr);
- fseg_free_extent(inode, space, page_size, page, ahi, mtr);
+ fseg_free_extent(inode, space, page, ahi, mtr);
return(FALSE);
}
@@ -3227,7 +3164,7 @@ fseg_free_step_not_header_func(
return(TRUE);
}
- fseg_free_page_low(inode, space, page_no, page_size, ahi, mtr);
+ fseg_free_page_low(inode, space, page_no, ahi, true, mtr);
return(FALSE);
}
@@ -3237,7 +3174,6 @@ We think of the extent lists of the segment catenated in the order
FSEG_FULL -> FSEG_NOT_FULL -> FSEG_FREE.
@param[in] inode segment inode
@param[in] space tablespace
-@param[in] page_size page size
@param[in,out] mtr mini-transaction
@return the first extent descriptor, or NULL if none */
MY_ATTRIBUTE((nonnull, warn_unused_result))
@@ -3246,7 +3182,6 @@ xdes_t*
fseg_get_first_extent(
fseg_inode_t* inode,
const fil_space_t* space,
- const page_size_t& page_size,
mtr_t* mtr)
{
fil_addr_t first;
@@ -3272,7 +3207,7 @@ fseg_get_first_extent(
ut_ad(first.page != FIL_NULL);
return(first.page == FIL_NULL ? NULL
- : xdes_lst_get_descriptor(space, page_size, first, mtr));
+ : xdes_lst_get_descriptor(space, first, mtr));
}
#ifdef UNIV_BTR_PRINT
@@ -3336,9 +3271,8 @@ fseg_print(
space_id = page_get_space_id(page_align(header));
const fil_space_t* space = mtr_x_lock_space(space_id, mtr);
- const page_size_t page_size(space->flags);
- inode = fseg_inode_get(header, space_id, page_size, mtr);
+ inode = fseg_inode_get(header, space_id, space->zip_size(), mtr);
fseg_print_low(inode, mtr);
}
diff --git a/storage/innobase/fsp/fsp0space.cc b/storage/innobase/fsp/fsp0space.cc
index 88b34be4952..40c69515933 100644
--- a/storage/innobase/fsp/fsp0space.cc
+++ b/storage/innobase/fsp/fsp0space.cc
@@ -118,8 +118,20 @@ Tablespace::open_or_create(bool is_temp)
/* Create the tablespace entry for the multi-file
tablespace in the tablespace manager. */
+ ulint fsp_flags = 0;
+
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ fsp_flags = (FSP_FLAGS_FCRC32_MASK_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE());
+ break;
+ default:
+ fsp_flags = FSP_FLAGS_PAGE_SSIZE();
+ }
+
space = fil_space_create(
- m_name, m_space_id, FSP_FLAGS_PAGE_SSIZE(),
+ m_name, m_space_id, fsp_flags,
is_temp
? FIL_TYPE_TEMPORARY : FIL_TYPE_TABLESPACE,
NULL);
diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc
index aec2a3914b8..65e68eb8330 100644
--- a/storage/innobase/fsp/fsp0sysspace.cc
+++ b/storage/innobase/fsp/fsp0sysspace.cc
@@ -914,7 +914,7 @@ SysTablespace::open_or_create(
ut_ad(!fil_system.sys_space);
ut_ad(space_id() == TRX_SYS_SPACE);
space = fil_system.sys_space = fil_space_create(
- name(), TRX_SYS_SPACE, flags(),
+ name(), TRX_SYS_SPACE, it->flags(),
FIL_TYPE_TABLESPACE, NULL);
if (!space) {
return DB_ERROR;
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 59d60ec4a15..128518d7433 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -3244,15 +3244,10 @@ fts_fetch_doc_from_rec(
documents */
{
dict_index_t* index;
- dict_table_t* table;
const rec_t* clust_rec;
- ulint num_field;
const dict_field_t* ifield;
- const dict_col_t* col;
ulint clust_pos;
- ulint i;
ulint doc_len = 0;
- ulint processed_doc = 0;
st_mysql_ftparser* parser;
if (!get_doc) {
@@ -3260,19 +3255,15 @@ fts_fetch_doc_from_rec(
}
index = get_doc->index_cache->index;
- table = get_doc->index_cache->index->table;
parser = get_doc->index_cache->index->parser;
clust_rec = btr_pcur_get_rec(pcur);
ut_ad(!page_rec_is_comp(clust_rec)
|| rec_get_status(clust_rec) == REC_STATUS_ORDINARY);
- num_field = dict_index_get_n_fields(index);
-
- for (i = 0; i < num_field; i++) {
+ for (ulint i = 0; i < index->n_fields; i++) {
ifield = dict_index_get_nth_field(index, i);
- col = dict_field_get_col(ifield);
- clust_pos = dict_col_get_clust_pos(col, clust_index);
+ clust_pos = dict_col_get_clust_pos(ifield->col, clust_index);
if (!get_doc->index_cache->charset) {
get_doc->index_cache->charset = fts_get_charset(
@@ -3283,7 +3274,7 @@ fts_fetch_doc_from_rec(
doc->text.f_str =
btr_rec_copy_externally_stored_field(
clust_rec, offsets,
- dict_table_page_size(table),
+ btr_pcur_get_block(pcur)->zip_size(),
clust_pos, &doc->text.f_len,
static_cast<mem_heap_t*>(
doc->self_heap->arg));
@@ -3301,13 +3292,12 @@ fts_fetch_doc_from_rec(
continue;
}
- if (processed_doc == 0) {
+ if (!doc_len) {
fts_tokenize_document(doc, NULL, parser);
} else {
fts_tokenize_document_next(doc, doc_len, NULL, parser);
}
- processed_doc++;
doc_len += doc->text.f_len + 1;
}
}
@@ -3714,13 +3704,6 @@ fts_get_max_doc_id(
if (!page_is_empty(btr_pcur_get_page(&pcur))) {
const rec_t* rec = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- ulint* offsets = offsets_;
- mem_heap_t* heap = NULL;
- ulint len;
- const void* data;
-
- rec_offs_init(offsets_);
do {
rec = btr_pcur_get_rec(&pcur);
@@ -3730,18 +3713,11 @@ fts_get_max_doc_id(
}
} while (btr_pcur_move_to_prev(&pcur, &mtr));
- if (!rec) {
+ if (!rec || rec_is_metadata(rec, *index)) {
goto func_exit;
}
- ut_ad(!rec_is_metadata(rec, index));
- offsets = rec_get_offsets(
- rec, index, offsets, true, ULINT_UNDEFINED, &heap);
-
- data = rec_get_nth_field(rec, offsets, 0, &len);
-
- doc_id = static_cast<doc_id_t>(fts_read_doc_id(
- static_cast<const byte*>(data)));
+ doc_id = fts_read_doc_id(rec);
}
func_exit:
@@ -5223,49 +5199,23 @@ fts_get_doc_id_from_row(
}
/** Extract the doc id from the record that belongs to index.
-@param[in] table table
-@param[in] rec record contains FTS_DOC_ID
+@param[in] rec record containing FTS_DOC_ID
@param[in] index index of rec
-@param[in] heap heap memory
+@param[in] offsets rec_get_offsets(rec,index)
@return doc id that was extracted from rec */
doc_id_t
fts_get_doc_id_from_rec(
- dict_table_t* table,
const rec_t* rec,
const dict_index_t* index,
- mem_heap_t* heap)
+ const ulint* offsets)
{
- ulint len;
- const byte* data;
- ulint col_no;
- doc_id_t doc_id = 0;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- ulint* offsets = offsets_;
- mem_heap_t* my_heap = heap;
-
- ut_a(table->fts->doc_col != ULINT_UNDEFINED);
-
- rec_offs_init(offsets_);
-
- offsets = rec_get_offsets(
- rec, index, offsets, true, ULINT_UNDEFINED, &my_heap);
-
- col_no = dict_col_get_index_pos(
- &table->cols[table->fts->doc_col], index);
-
- ut_ad(col_no != ULINT_UNDEFINED);
-
- data = rec_get_nth_field(rec, offsets, col_no, &len);
-
- ut_a(len == 8);
- ut_ad(8 == sizeof(doc_id));
- doc_id = static_cast<doc_id_t>(mach_read_from_8(data));
-
- if (my_heap && !heap) {
- mem_heap_free(my_heap);
- }
-
- return(doc_id);
+ ulint f = dict_col_get_index_pos(
+ &index->table->cols[index->table->fts->doc_col], index);
+ ulint len;
+ doc_id_t doc_id = mach_read_from_8(
+ rec_get_nth_field(rec, offsets, f, &len));
+ ut_ad(len == 8);
+ return doc_id;
}
/*********************************************************************//**
@@ -7506,7 +7456,7 @@ fts_init_recover_doc(
doc.text.f_str = btr_copy_externally_stored_field(
&doc.text.f_len,
static_cast<byte*>(dfield_get_data(dfield)),
- dict_table_page_size(table), len,
+ table->space->zip_size(), len,
static_cast<mem_heap_t*>(doc.self_heap->arg));
} else {
doc.text.f_str = static_cast<byte*>(
diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc
index 00f3b9aedf0..bf7e262ac98 100644
--- a/storage/innobase/fts/fts0que.cc
+++ b/storage/innobase/fts/fts0que.cc
@@ -206,7 +206,7 @@ struct fts_phrase_t {
distance(0),
charset(NULL),
heap(NULL),
- page_size(dict_table_page_size(table)),
+ zip_size(table->space->zip_size()),
proximity_pos(NULL),
parser(NULL)
{
@@ -230,8 +230,8 @@ struct fts_phrase_t {
/** Heap for word processing */
mem_heap_t* heap;
- /** Row page size */
- const page_size_t page_size;
+ /** ROW_FORMAT=COMPRESSED page size, or 0 */
+ const ulint zip_size;
/** Position info for proximity search verification. Records the
min and max position of words matched */
@@ -2013,7 +2013,7 @@ fts_query_fetch_document(
if (dfield_is_ext(dfield)) {
data = btr_copy_externally_stored_field(
- &cur_len, data, phrase->page_size,
+ &cur_len, data, phrase->zip_size,
dfield_get_len(dfield), phrase->heap);
} else {
cur_len = dfield_get_len(dfield);
diff --git a/storage/innobase/fut/fut0lst.cc b/storage/innobase/fut/fut0lst.cc
index 3e77165ac31..66e35c6e2c4 100644
--- a/storage/innobase/fut/fut0lst.cc
+++ b/storage/innobase/fut/fut0lst.cc
@@ -58,8 +58,8 @@ flst_add_to_empty(
flst_write_addr(base + FLST_LAST, node_addr, mtr);
/* Set prev and next fields of node to add */
- flst_write_addr(node + FLST_PREV, fil_addr_null, mtr);
- flst_write_addr(node + FLST_NEXT, fil_addr_null, mtr);
+ flst_zero_addr(node + FLST_PREV, mtr);
+ flst_zero_addr(node + FLST_NEXT, mtr);
/* Update len of base node */
mlog_write_ulint(base + FLST_LEN, 1, MLOG_4BYTES, mtr);
@@ -120,13 +120,11 @@ flst_add_last(
if (last_addr.page == node_addr.page) {
last_node = page_align(node) + last_addr.boffset;
} else {
- bool found;
- const page_size_t& page_size
- = fil_space_get_page_size(space, &found);
+ fil_space_t* s = fil_space_acquire_silent(space);
+ ulint zip_size = s ? s->zip_size() : 0;
+ if (s) s->release();
- ut_ad(found);
-
- last_node = fut_get_ptr(space, page_size, last_addr,
+ last_node = fut_get_ptr(space, zip_size, last_addr,
RW_SX_LATCH, mtr);
}
@@ -170,13 +168,11 @@ flst_add_first(
if (first_addr.page == node_addr.page) {
first_node = page_align(node) + first_addr.boffset;
} else {
- bool found;
- const page_size_t& page_size
- = fil_space_get_page_size(space, &found);
-
- ut_ad(found);
+ fil_space_t* s = fil_space_acquire_silent(space);
+ ulint zip_size = s ? s->zip_size() : 0;
+ if (s) s->release();
- first_node = fut_get_ptr(space, page_size, first_addr,
+ first_node = fut_get_ptr(space, zip_size, first_addr,
RW_SX_LATCH, mtr);
}
@@ -230,13 +226,11 @@ flst_insert_after(
if (!fil_addr_is_null(node3_addr)) {
/* Update prev field of node3 */
- bool found;
- const page_size_t& page_size
- = fil_space_get_page_size(space, &found);
-
- ut_ad(found);
+ fil_space_t* s = fil_space_acquire_silent(space);
+ ulint zip_size = s ? s->zip_size() : 0;
+ if (s) s->release();
- node3 = fut_get_ptr(space, page_size,
+ node3 = fut_get_ptr(space, zip_size,
node3_addr, RW_SX_LATCH, mtr);
flst_write_addr(node3 + FLST_PREV, node2_addr, mtr);
} else {
@@ -294,14 +288,12 @@ flst_insert_before(
flst_write_addr(node2 + FLST_NEXT, node3_addr, mtr);
if (!fil_addr_is_null(node1_addr)) {
- bool found;
- const page_size_t& page_size
- = fil_space_get_page_size(space, &found);
-
- ut_ad(found);
+ fil_space_t* s = fil_space_acquire_silent(space);
+ ulint zip_size = s ? s->zip_size() : 0;
+ if (s) s->release();
/* Update next field of node1 */
- node1 = fut_get_ptr(space, page_size, node1_addr,
+ node1 = fut_get_ptr(space, zip_size, node1_addr,
RW_SX_LATCH, mtr);
flst_write_addr(node1 + FLST_NEXT, node2_addr, mtr);
} else {
@@ -344,11 +336,9 @@ flst_remove(
buf_ptr_get_fsp_addr(node2, &space, &node2_addr);
- bool found;
- const page_size_t& page_size = fil_space_get_page_size(space,
- &found);
-
- ut_ad(found);
+ fil_space_t* s = fil_space_acquire_silent(space);
+ ulint zip_size = s ? s->zip_size() : 0;
+ if (s) s->release();
node1_addr = flst_get_prev_addr(node2, mtr);
node3_addr = flst_get_next_addr(node2, mtr);
@@ -361,7 +351,7 @@ flst_remove(
node1 = page_align(node2) + node1_addr.boffset;
} else {
- node1 = fut_get_ptr(space, page_size,
+ node1 = fut_get_ptr(space, zip_size,
node1_addr, RW_SX_LATCH, mtr);
}
@@ -380,7 +370,7 @@ flst_remove(
node3 = page_align(node2) + node3_addr.boffset;
} else {
- node3 = fut_get_ptr(space, page_size,
+ node3 = fut_get_ptr(space, zip_size,
node3_addr, RW_SX_LATCH, mtr);
}
@@ -431,11 +421,9 @@ flst_validate(
/* Find out the space id */
buf_ptr_get_fsp_addr(base, &space, &base_addr);
- bool found;
- const page_size_t& page_size = fil_space_get_page_size(space,
- &found);
-
- ut_ad(found);
+ fil_space_t* s = fil_space_acquire_silent(space);
+ ulint zip_size = s ? s->zip_size() : 0;
+ if (s) s->release();
len = flst_get_len(base);
node_addr = flst_get_first(base, mtr1);
@@ -443,7 +431,7 @@ flst_validate(
for (i = 0; i < len; i++) {
mtr_start(&mtr2);
- node = fut_get_ptr(space, page_size,
+ node = fut_get_ptr(space, zip_size,
node_addr, RW_SX_LATCH, &mtr2);
node_addr = flst_get_next_addr(node, &mtr2);
@@ -458,7 +446,7 @@ flst_validate(
for (i = 0; i < len; i++) {
mtr_start(&mtr2);
- node = fut_get_ptr(space, page_size,
+ node = fut_get_ptr(space, zip_size,
node_addr, RW_SX_LATCH, &mtr2);
node_addr = flst_get_prev_addr(node, &mtr2);
diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc
index 9ddfa42cf98..3f9812928a4 100644
--- a/storage/innobase/gis/gis0rtree.cc
+++ b/storage/innobase/gis/gis0rtree.cc
@@ -746,14 +746,15 @@ rtr_adjust_upper_level(
prev_page_no = btr_page_get_prev(page, mtr);
next_page_no = btr_page_get_next(page, mtr);
space = block->page.id.space();
- const page_size_t& page_size = dict_table_page_size(index->table);
+ ut_ad(block->zip_size() == index->table->space->zip_size());
/* Update page links of the level */
if (prev_page_no != FIL_NULL) {
page_id_t prev_page_id(space, prev_page_no);
buf_block_t* prev_block = btr_block_get(
- prev_page_id, page_size, RW_X_LATCH, index, mtr);
+ prev_page_id, block->zip_size(), RW_X_LATCH,
+ index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(prev_block->frame) == page_is_comp(page));
ut_a(btr_page_get_next(prev_block->frame, mtr)
@@ -769,7 +770,8 @@ rtr_adjust_upper_level(
page_id_t next_page_id(space, next_page_no);
buf_block_t* next_block = btr_block_get(
- next_page_id, page_size, RW_X_LATCH, index, mtr);
+ next_page_id, block->zip_size(), RW_X_LATCH,
+ index, mtr);
#ifdef UNIV_BTR_DEBUG
ut_a(page_is_comp(next_block->frame) == page_is_comp(page));
ut_a(btr_page_get_prev(next_block->frame, mtr)
@@ -914,7 +916,7 @@ rtr_split_page_move_rec_list(
mtr_set_log_mode(mtr, log_mode);
if (!page_zip_compress(new_page_zip, new_page, index,
- page_zip_level, NULL, mtr)) {
+ page_zip_level, mtr)) {
ulint ret_pos;
/* Before trying to reorganize the page,
@@ -1872,7 +1874,7 @@ rtr_estimate_n_rows_in_range(
buf_block_t* block = btr_block_get(
page_id_t(index->table->space_id, index->page),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_S_LATCH, index, &mtr);
const page_t* page = buf_block_get_frame(block);
const unsigned n_recs = page_header_get_field(page, PAGE_N_RECS);
diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc
index b2ff3a697bd..913a299c118 100644
--- a/storage/innobase/gis/gis0sea.cc
+++ b/storage/innobase/gis/gis0sea.cc
@@ -145,7 +145,7 @@ rtr_pcur_getnext_from_path(
| MTR_MEMO_X_LOCK));
}
- const page_size_t page_size(index->table->space->flags);
+ const ulint zip_size = index->table->space->zip_size();
/* Pop each node/page to be searched from "path" structure
and do a search on it. Please note, any pages that are in
@@ -269,7 +269,7 @@ rtr_pcur_getnext_from_path(
block = buf_page_get_gen(
page_id_t(index->table->space_id,
- next_rec.page_no), page_size,
+ next_rec.page_no), zip_size,
rw_latch, NULL, BUF_GET, __FILE__, __LINE__, mtr, &err);
if (block == NULL) {
@@ -424,7 +424,7 @@ rtr_pcur_getnext_from_path(
block,
page_id_t(index->table->space_id,
block->page.id.page_no()),
- page_size, BTR_MODIFY_TREE,
+ zip_size, BTR_MODIFY_TREE,
btr_cur, mtr);
}
@@ -1344,8 +1344,7 @@ rtr_cur_restore_position(
page_cur_t* page_cursor;
node_visit_t* node = rtr_get_parent_node(btr_cur, level, false);
node_seq_t path_ssn = node->seq_no;
- const page_size_t page_size(index->table->space->flags);
-
+ const ulint zip_size = index->table->space->zip_size();
ulint page_no = node->page_no;
heap = mem_heap_create(256);
@@ -1361,7 +1360,7 @@ search_again:
block = buf_page_get_gen(
page_id_t(index->table->space_id, page_no),
- page_size, RW_X_LATCH, NULL,
+ zip_size, RW_X_LATCH, NULL,
BUF_GET, __FILE__, __LINE__, mtr, &err);
ut_ad(block);
@@ -1561,14 +1560,13 @@ rtr_copy_buf(
matches->block.n_fields = block->n_fields;
matches->block.left_side = block->left_side;
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- matches->block.n_pointers = block->n_pointers;
+ matches->block.n_pointers = 0;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
matches->block.curr_n_fields = block->curr_n_fields;
matches->block.curr_left_side = block->curr_left_side;
matches->block.index = block->index;
#endif /* BTR_CUR_HASH_ADAPT */
- ut_d(matches->block.debug_latch = block->debug_latch);
-
+ ut_d(matches->block.debug_latch = NULL);
}
/****************************************************************//**
diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc
index fa1a9bc5db9..b6fdab5968b 100644
--- a/storage/innobase/ha/ha0ha.cc
+++ b/storage/innobase/ha/ha0ha.cc
@@ -245,11 +245,8 @@ ha_insert_for_fold_func(
buf_block_t* prev_block = prev_node->block;
ut_a(prev_block->frame
== page_align(prev_node->data));
- ut_a(my_atomic_addlint(&prev_block->n_pointers,
- ulint(-1))
- < MAX_N_POINTERS);
- ut_a(my_atomic_addlint(&block->n_pointers, 1)
- < MAX_N_POINTERS);
+ ut_a(prev_block->n_pointers-- < MAX_N_POINTERS);
+ ut_a(block->n_pointers++ < MAX_N_POINTERS);
}
prev_node->block = block;
@@ -280,8 +277,7 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
- ut_a(my_atomic_addlint(&block->n_pointers, 1)
- < MAX_N_POINTERS);
+ ut_a(block->n_pointers++ < MAX_N_POINTERS);
}
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
@@ -342,8 +338,7 @@ ha_delete_hash_node(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
ut_a(del_node->block->frame = page_align(del_node->data));
- ut_a(my_atomic_addlint(&del_node->block->n_pointers, ulint(-1))
- < MAX_N_POINTERS);
+ ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS);
}
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
@@ -385,11 +380,8 @@ ha_search_and_update_if_found_func(
if (node) {
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
- ut_a(my_atomic_addlint(&node->block->n_pointers,
- ulint(-1))
- < MAX_N_POINTERS);
- ut_a(my_atomic_addlint(&new_block->n_pointers, 1)
- < MAX_N_POINTERS);
+ ut_a(node->block->n_pointers-- < MAX_N_POINTERS);
+ ut_a(new_block->n_pointers++ < MAX_N_POINTERS);
}
node->block = new_block;
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index 35f748d4754..04fdb0ff3b2 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -97,7 +97,6 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "row0mysql.h"
#include "row0quiesce.h"
#include "row0sel.h"
-#include "row0trunc.h"
#include "row0upd.h"
#include "fil0crypt.h"
#include "ut0timer.h"
@@ -132,6 +131,9 @@ void close_thread_tables(THD* thd);
#define tdc_size 400
#endif
+#include <mysql/plugin.h>
+#include <mysql/service_wsrep.h>
+
#include "ha_innodb.h"
#include "i_s.h"
#include "sync0sync.h"
@@ -139,28 +141,10 @@ void close_thread_tables(THD* thd);
#include <string>
#include <sstream>
-#include <mysql/plugin.h>
-#include <mysql/service_wsrep.h>
-
#ifdef WITH_WSREP
#include "dict0priv.h"
#include <mysql/service_md5.h>
#include "wsrep_sst.h"
-
-static inline wsrep_ws_handle_t*
-wsrep_ws_handle(THD* thd, const trx_t* trx) {
- return wsrep_ws_handle_for_trx(wsrep_thd_ws_handle(thd),
- (wsrep_trx_id_t)trx->id);
-}
-
-extern void wsrep_cleanup_transaction(THD *thd);
-static int
-wsrep_abort_transaction(handlerton* hton, THD *bf_thd, THD *victim_thd,
- my_bool signal);
-static void
-wsrep_fake_trx_id(handlerton* hton, THD *thd);
-static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid);
-static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid);
#endif /* WITH_WSREP */
/** to force correct commit order in binlog */
@@ -267,7 +251,7 @@ is_partition(
/** Signal to shut down InnoDB (NULL if shutdown was signaled, or if
running in innodb_read_only mode, srv_read_only_mode) */
-st_my_thread_var *srv_running;
+std::atomic <st_my_thread_var *> srv_running;
/** Service thread that waits for the server shutdown and stops purge threads.
Purge workers have THDs that are needed to calculate virtual columns.
This THDs must be destroyed rather early in the server shutdown sequence.
@@ -295,16 +279,12 @@ thd_destructor_proxy(void *)
mysql_mutex_lock(&thd_destructor_mutex);
- my_atomic_storeptr_explicit(reinterpret_cast<void**>(&srv_running),
- myvar,
- MY_MEMORY_ORDER_RELAXED);
+ srv_running.store(myvar, std::memory_order_relaxed);
/* wait until the server wakes the THD to abort and die */
- while (!srv_running->abort)
+ while (!myvar->abort)
mysql_cond_wait(&thd_destructor_cond, &thd_destructor_mutex);
mysql_mutex_unlock(&thd_destructor_mutex);
- my_atomic_storeptr_explicit(reinterpret_cast<void**>(&srv_running),
- NULL,
- MY_MEMORY_ORDER_RELAXED);
+ srv_running.store(NULL, std::memory_order_relaxed);
while (srv_fast_shutdown == 0 &&
(trx_sys.any_active_transactions() ||
@@ -376,6 +356,8 @@ const char* innodb_checksum_algorithm_names[] = {
"strict_innodb",
"none",
"strict_none",
+ "full_crc32",
+ "strict_full_crc32",
NullS
};
@@ -1042,8 +1024,6 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_pages_created, SHOW_LONG},
{"pages_read",
(char*) &export_vars.innodb_pages_read, SHOW_LONG},
- {"pages0_read",
- (char*) &export_vars.innodb_page0_read, SHOW_LONG},
{"pages_written",
(char*) &export_vars.innodb_pages_written, SHOW_LONG},
{"row_lock_current_waits",
@@ -1862,8 +1842,13 @@ thd_to_trx_id(
{
return(thd_to_trx(thd)->id);
}
-#endif /* WITH_WSREP */
+static int
+wsrep_abort_transaction(handlerton* hton, THD *bf_thd, THD *victim_thd,
+ my_bool signal);
+static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid);
+static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid);
+#endif /* WITH_WSREP */
/********************************************************************//**
Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth
time calls srv_active_wake_master_thread. This function should be used
@@ -2926,8 +2911,9 @@ ha_innobase::ha_innobase(
| HA_CAN_EXPORT
| HA_CAN_RTREEKEYS
| HA_CAN_TABLES_WITHOUT_ROLLBACK
+ | HA_CAN_ONLINE_BACKUPS
| HA_CONCURRENT_OPTIMIZE
- | (srv_force_primary_key ? HA_WANTS_PRIMARY_KEY : 0)
+ | (srv_force_primary_key ? HA_REQUIRE_PRIMARY_KEY : 0)
),
m_start_of_scan(),
m_mysql_has_locked()
@@ -3473,6 +3459,10 @@ ha_innobase::reset_template(void)
in ha_innobase::write_row(). */
m_prebuilt->template_type = ROW_MYSQL_NO_TEMPLATE;
}
+ if (m_prebuilt->pk_filter) {
+ m_prebuilt->pk_filter = NULL;
+ m_prebuilt->template_type = ROW_MYSQL_NO_TEMPLATE;
+ }
}
/*****************************************************************//**
@@ -3864,13 +3854,18 @@ static int innodb_init_params()
DBUG_RETURN(HA_ERR_INITIALIZATION);
}
- /* This is the first time univ_page_size is used.
- It was initialized to 16k pages before srv_page_size was set */
- univ_page_size.copy_from(
- page_size_t(srv_page_size, srv_page_size, false));
-
srv_sys_space.set_space_id(TRX_SYS_SPACE);
- srv_sys_space.set_flags(FSP_FLAGS_PAGE_SSIZE());
+
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ srv_sys_space.set_flags(FSP_FLAGS_FCRC32_MASK_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE());
+ break;
+ default:
+ srv_sys_space.set_flags(FSP_FLAGS_PAGE_SSIZE());
+ }
+
srv_sys_space.set_name("innodb_system");
srv_sys_space.set_path(srv_data_home);
@@ -3883,7 +3878,16 @@ static int innodb_init_params()
srv_tmp_space.set_name("innodb_temporary");
srv_tmp_space.set_path(srv_data_home);
- srv_tmp_space.set_flags(FSP_FLAGS_PAGE_SSIZE());
+
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ srv_tmp_space.set_flags(FSP_FLAGS_FCRC32_MASK_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE());
+ break;
+ default:
+ srv_tmp_space.set_flags(FSP_FLAGS_PAGE_SSIZE());
+ }
if (!srv_tmp_space.parse_params(innobase_temp_data_file_path, false)) {
ib::error() << "Unable to parse innodb_temp_data_file_path="
@@ -4174,13 +4178,12 @@ static int innodb_init(void* p)
innobase_hton->show_status = innobase_show_status;
innobase_hton->flags =
HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS
- | HTON_NATIVE_SYS_VERSIONING;
+ | HTON_NATIVE_SYS_VERSIONING | HTON_WSREP_REPLICATION;
#ifdef WITH_WSREP
innobase_hton->abort_transaction=wsrep_abort_transaction;
innobase_hton->set_checkpoint=innobase_wsrep_set_checkpoint;
innobase_hton->get_checkpoint=innobase_wsrep_get_checkpoint;
- innobase_hton->fake_trx_id=wsrep_fake_trx_id;
#endif /* WITH_WSREP */
innobase_hton->tablefile_extensions = ha_innobase_exts;
@@ -4272,9 +4275,7 @@ static int innodb_init(void* p)
mysql_thread_create(thd_destructor_thread_key,
&thd_destructor_thread,
NULL, thd_destructor_proxy, NULL);
- while (!my_atomic_loadptr_explicit(reinterpret_cast<void**>
- (&srv_running),
- MY_MEMORY_ORDER_RELAXED))
+ while (!srv_running.load(std::memory_order_relaxed))
os_thread_sleep(20);
}
@@ -4354,11 +4355,7 @@ innobase_end(handlerton*, ha_panic_function)
}
}
- st_my_thread_var* r = reinterpret_cast<st_my_thread_var*>(
- my_atomic_loadptr_explicit(
- reinterpret_cast<void**>(&srv_running),
- MY_MEMORY_ORDER_RELAXED));
- if (r) {
+ if (auto r = srv_running.load(std::memory_order_relaxed)) {
ut_ad(!srv_read_only_mode);
if (!abort_loop) {
// may be UNINSTALL PLUGIN statement
@@ -4521,6 +4518,14 @@ innobase_commit_ordered_2(
trx->flush_log_later = true;
}
+#ifdef WITH_WSREP
+ /* If the transaction is not run in 2pc, we must assign wsrep
+ XID here in order to get it written in rollback segment. */
+ if (wsrep_on(thd)) {
+ thd_get_xid(thd, (MYSQL_XID*)trx->xid);
+ }
+#endif /* WITH_WSREP */
+
innobase_commit_low(trx);
if (!read_only) {
@@ -4723,6 +4728,15 @@ innobase_rollback(
dberr_t error;
+#ifdef WITH_WSREP
+ /* If trx was assigned wsrep XID in prepare phase and the
+ trx is being rolled back due to BF abort, clear XID in order
+ to avoid writing it to rollback segment out of order. The XID
+ will be reassigned when the transaction is replayed. */
+ if (trx->state != TRX_STATE_NOT_STARTED && wsrep_is_wsrep_xid(trx->xid)) {
+ trx->xid->null();
+ }
+#endif /* WITH_WSREP */
if (rollback_trx
|| !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) {
@@ -5128,7 +5142,7 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels)
{
DBUG_ENTER("innobase_kill_query");
#ifdef WITH_WSREP
- if (wsrep_thd_get_conflict_state(thd) != NO_CONFLICT) {
+ if (wsrep_on(thd) && wsrep_thd_is_aborting(thd)) {
/* if victim has been signaled by BF thread and/or aborting
is already progressing, following query aborting is not necessary
any more.
@@ -5256,24 +5270,21 @@ ha_innobase::index_flags(
return(0);
}
- ulong extra_flag= 0;
-
- if (table && key == table->s->primary_key) {
- extra_flag= HA_CLUSTERED_INDEX;
- }
-
- ulong flags = HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER
- | HA_READ_RANGE | HA_KEYREAD_ONLY
- | extra_flag
- | HA_DO_INDEX_COND_PUSHDOWN;
-
/* For spatial index, we don't support descending scan
and ICP so far. */
if (table_share->key_info[key].flags & HA_SPATIAL) {
- flags = HA_READ_NEXT | HA_READ_ORDER| HA_READ_RANGE
+ return HA_READ_NEXT | HA_READ_ORDER| HA_READ_RANGE
| HA_KEYREAD_ONLY | HA_KEY_SCAN_NOT_ROR;
}
+ ulong flags= key == table_share->primary_key
+ ? HA_CLUSTERED_INDEX : 0;
+
+ flags |= HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER
+ | HA_READ_RANGE | HA_KEYREAD_ONLY
+ | HA_DO_INDEX_COND_PUSHDOWN
+ | HA_DO_RANGE_FILTER_PUSHDOWN;
+
return(flags);
}
@@ -5340,7 +5351,7 @@ ha_innobase::keys_to_use_for_scanning()
/****************************************************************//**
Ensures that if there's a concurrent inplace ADD INDEX, being-indexed virtual
columns are computed. They are not marked as indexed in the old table, so the
-server won't add them to the vcol_set automatically */
+server won't add them to the read_set automatically */
void
ha_innobase::column_bitmaps_signal()
/*================================*/
@@ -5360,7 +5371,7 @@ ha_innobase::column_bitmaps_signal()
if (col->ord_part ||
(dict_index_is_online_ddl(clust_index) &&
row_log_col_is_indexed(clust_index, num_v))) {
- table->mark_virtual_col(table->vfield[j]);
+ table->mark_virtual_column_with_deps(table->vfield[j]);
}
num_v++;
}
@@ -6086,6 +6097,14 @@ initialize_auto_increment(dict_table_t* table, const Field* field)
int
ha_innobase::open(const char* name, int, uint)
{
+ /* TODO: If trx_rollback_recovered(bool all=false) is ever
+ removed, the first-time open() must hold (or acquire and release)
+ a table lock that conflicts with trx_resurrect_table_locks(),
+ to ensure that any recovered incomplete ALTER TABLE will have been
+ rolled back. Otherwise, dict_table_t::instant could be cleared by
+ the rollback invoking dict_index_t::clear_instant_alter() while
+ open table handles exist in client connections. */
+
dict_table_t* ib_table;
char norm_name[FN_REFLEN];
dict_err_ignore_t ignore_err = DICT_ERR_IGNORE_NONE;
@@ -6124,6 +6143,11 @@ no_such_table:
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
}
+ if (!ib_table->not_redundant()) {
+ m_int_table_flags |= HA_EXTENDED_TYPES_CONVERSION;
+ cached_table_flags |= HA_EXTENDED_TYPES_CONVERSION;
+ }
+
size_t n_fields = omits_virtual_cols(*table_share)
? table_share->stored_fields : table_share->fields;
size_t n_cols = dict_table_get_n_user_cols(ib_table)
@@ -7268,19 +7292,19 @@ static
const Field*
build_template_needs_field(
/*=======================*/
- ibool index_contains, /*!< in:
- dict_index_contains_col_or_prefix(
- index, i) */
- ibool read_just_key, /*!< in: TRUE when MySQL calls
+ bool index_contains, /*!< in:
+ dict_index_t::contains_col_or_prefix(
+ i) */
+ bool read_just_key, /*!< in: TRUE when MySQL calls
ha_innobase::extra with the
argument HA_EXTRA_KEYREAD; it is enough
to read just columns defined in
the index (i.e., no read of the
clustered index record necessary) */
- ibool fetch_all_in_key,
+ bool fetch_all_in_key,
/*!< in: true=fetch all fields in
the index */
- ibool fetch_primary_key_cols,
+ bool fetch_primary_key_cols,
/*!< in: true=fetch the
primary key columns */
dict_index_t* index, /*!< in: InnoDB index to use */
@@ -7342,11 +7366,11 @@ build_template_needs_field_in_icp(
bool is_virtual)
/*!< in: a virtual column or not */
{
- ut_ad(contains == dict_index_contains_col_or_prefix(index, i, is_virtual));
+ ut_ad(contains == index->contains_col_or_prefix(i, is_virtual));
return(index == prebuilt->index
? contains
- : dict_index_contains_col_or_prefix(prebuilt->index, i, is_virtual));
+ : prebuilt->index->contains_col_or_prefix(i, is_virtual));
}
/**************************************************************//**
@@ -7589,6 +7613,13 @@ ha_innobase::build_template(
/* Below we check column by column if we need to access
the clustered index. */
+ if (pushed_rowid_filter && rowid_filter_is_active) {
+ fetch_primary_key_cols = TRUE;
+ m_prebuilt->pk_filter = this;
+ } else {
+ m_prebuilt->pk_filter = NULL;
+ }
+
const bool skip_virtual = omits_virtual_cols(*table_share);
const ulint n_fields = table_share->fields;
@@ -7612,8 +7643,9 @@ ha_innobase::build_template(
ulint num_v = 0;
- if (active_index != MAX_KEY
- && active_index == pushed_idx_cond_keyno) {
+ if ((active_index != MAX_KEY
+ && active_index == pushed_idx_cond_keyno)
+ || (pushed_rowid_filter && rowid_filter_is_active)) {
/* Push down an index condition or an end_range check. */
for (ulint i = 0; i < n_fields; i++) {
const Field* field = table->field[i];
@@ -7622,9 +7654,8 @@ ha_innobase::build_template(
num_v++;
continue;
}
- ibool index_contains
- = dict_index_contains_col_or_prefix(
- index, is_v ? num_v : i - num_v, is_v);
+ bool index_contains = index->contains_col_or_prefix(
+ is_v ? num_v : i - num_v, is_v);
if (is_v && index_contains) {
m_prebuilt->n_template = 0;
num_v = 0;
@@ -7762,9 +7793,8 @@ ha_innobase::build_template(
continue;
}
- ibool index_contains
- = dict_index_contains_col_or_prefix(
- index, is_v ? num_v : i - num_v, is_v);
+ bool index_contains = index->contains_col_or_prefix(
+ is_v ? num_v : i - num_v, is_v);
if (!build_template_needs_field_in_icp(
index, m_prebuilt, index_contains,
@@ -7796,8 +7826,9 @@ ha_innobase::build_template(
}
}
}
-
- m_prebuilt->idx_cond = this;
+ if (active_index == pushed_idx_cond_keyno) {
+ m_prebuilt->idx_cond = this;
+ }
} else {
no_icp:
/* No index condition pushdown */
@@ -7821,8 +7852,8 @@ no_icp:
cluster index. */
if (is_v
&& m_prebuilt->read_just_key
- && !dict_index_contains_col_or_prefix(
- m_prebuilt->index, num_v, true))
+ && !m_prebuilt->index->contains_col_or_prefix(
+ num_v, true))
{
/* Turn off ROW_MYSQL_WHOLE_ROW */
m_prebuilt->template_type =
@@ -7831,21 +7862,15 @@ no_icp:
continue;
}
} else {
- ibool contain;
-
- if (!is_v) {
- contain = dict_index_contains_col_or_prefix(
- index, i - num_v,
- false);
- } else if (skip_virtual
- || dict_index_is_clust(index)) {
+ if (is_v
+ && (skip_virtual || index->is_primary())) {
num_v++;
continue;
- } else {
- contain = dict_index_contains_col_or_prefix(
- index, num_v, true);
}
+ bool contain = index->contains_col_or_prefix(
+ is_v ? num_v: i - num_v, is_v);
+
field = build_template_needs_field(
contain,
m_prebuilt->read_just_key,
@@ -8009,16 +8034,6 @@ ha_innobase::write_row(
++trx->will_lock;
}
-#ifdef WITH_WSREP
- if (wsrep_is_load_multi_commit(m_user_thd))
- {
- /* Note that this transaction is still active. */
- trx_register_for_2pc(m_prebuilt->trx);
- /* We will need an IX lock on the destination table. */
- m_prebuilt->sql_stat_start = TRUE;
- }
-#endif /* WITH_WSREP */
-
ins_mode_t vers_set_fields;
/* Handling of Auto-Increment Columns. */
if (table->next_number_field && record == table->record[0]) {
@@ -8138,9 +8153,9 @@ ha_innobase::write_row(
wsrep_thd_query(m_user_thd) :
(char *)"void");
error= DB_SUCCESS;
- wsrep_thd_set_conflict_state(
- m_user_thd, MUST_ABORT);
- innobase_srv_conc_exit_innodb(m_prebuilt);
+ wsrep_thd_self_abort(m_user_thd);
+ innobase_srv_conc_exit_innodb(
+ m_prebuilt);
/* jump straight to func exit over
* later wsrep hooks */
goto func_exit;
@@ -8177,20 +8192,18 @@ set_max_autoinc:
properly assigned. Fetch values from
server side. */
if (wsrep_on(m_user_thd) &&
- wsrep_thd_exec_mode(m_user_thd) == REPL_RECV)
+ wsrep_thd_is_applying(m_user_thd))
{
wsrep_thd_auto_increment_variables(
m_user_thd, &offset, &increment);
}
else
- {
#endif /* WITH_WSREP */
+ {
ut_a(m_prebuilt->autoinc_increment > 0);
offset = m_prebuilt->autoinc_offset;
increment = m_prebuilt->autoinc_increment;
-#ifdef WITH_WSREP
}
-#endif /* WITH_WSREP */
auto_inc = innobase_next_autoinc(
auto_inc,
1, increment, offset,
@@ -8227,10 +8240,13 @@ report_error:
#ifdef WITH_WSREP
if (!error_result
&& wsrep_on(m_user_thd)
- && wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE
+ && wsrep_thd_is_local(m_user_thd)
&& !wsrep_consistency_check(m_user_thd)
- && !wsrep_thd_ignore_table(m_user_thd)) {
- if (wsrep_append_keys(m_user_thd, WSREP_KEY_EXCLUSIVE, record,
+ && (thd_sql_command(m_user_thd) != SQLCOM_CREATE_TABLE)
+ && (thd_sql_command(m_user_thd) != SQLCOM_LOAD ||
+ thd_binlog_format(m_user_thd) == BINLOG_FORMAT_ROW)) {
+ if (wsrep_append_keys(m_user_thd, WSREP_SERVICE_KEY_EXCLUSIVE,
+ record,
NULL)) {
DBUG_PRINT("wsrep", ("row key failed"));
error_result = HA_ERR_INTERNAL_ERROR;
@@ -8889,19 +8905,13 @@ ha_innobase::update_row(
properly assigned. Fetch values from
server side. */
if (wsrep_on(m_user_thd) &&
- wsrep_thd_exec_mode(m_user_thd) == REPL_RECV)
- {
- wsrep_thd_auto_increment_variables(
- m_user_thd, &offset, &increment);
- }
+ wsrep_thd_is_applying(m_user_thd))
+ wsrep_thd_auto_increment_variables(
+ m_user_thd, &offset, &increment);
else
- {
-#endif /* WITH_WSREP */
- offset = m_prebuilt->autoinc_offset;
- increment = m_prebuilt->autoinc_increment;
-#ifdef WITH_WSREP
- }
#endif /* WITH_WSREP */
+ offset = m_prebuilt->autoinc_offset,
+ increment = m_prebuilt->autoinc_increment;
autoinc = innobase_next_autoinc(
autoinc, 1, increment, offset,
@@ -8939,14 +8949,17 @@ func_exit:
innobase_active_small();
#ifdef WITH_WSREP
- if (error == DB_SUCCESS &&
- wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE &&
- wsrep_on(m_user_thd) &&
- !wsrep_thd_ignore_table(m_user_thd)) {
+ if (error == DB_SUCCESS
+ && wsrep_on(m_user_thd)
+ && wsrep_thd_is_local(m_user_thd)) {
+
DBUG_PRINT("wsrep", ("update row key"));
- if (wsrep_append_keys(m_user_thd, WSREP_KEY_EXCLUSIVE, old_row,
- new_row)) {
+ if (wsrep_append_keys(m_user_thd,
+ wsrep_protocol_version >= 4
+ ? WSREP_SERVICE_KEY_UPDATE
+ : WSREP_SERVICE_KEY_EXCLUSIVE,
+ old_row, new_row)){
WSREP_DEBUG("WSREP: UPDATE_ROW_KEY FAILED");
DBUG_PRINT("wsrep", ("row key failed"));
err = HA_ERR_INTERNAL_ERROR;
@@ -9007,11 +9020,13 @@ ha_innobase::delete_row(
#ifdef WITH_WSREP
if (error == DB_SUCCESS
- && wsrep_thd_exec_mode(m_user_thd) == LOCAL_STATE
&& wsrep_on(m_user_thd)
+ && wsrep_thd_is_local(m_user_thd)
&& !wsrep_thd_ignore_table(m_user_thd)) {
- if (wsrep_append_keys(m_user_thd, WSREP_KEY_EXCLUSIVE, record,
- NULL)) {
+
+ if (wsrep_append_keys(m_user_thd, WSREP_SERVICE_KEY_EXCLUSIVE,
+ record,
+ NULL)) {
DBUG_PRINT("wsrep", ("delete fail"));
error = (dberr_t) HA_ERR_INTERNAL_ERROR;
goto wsrep_error;
@@ -9534,12 +9549,14 @@ ha_innobase::change_active_index(
}
#endif
} else {
- dtuple_set_n_fields(m_prebuilt->search_tuple,
- m_prebuilt->index->n_fields);
+ ulint n_fields = dict_index_get_n_unique_in_tree(
+ m_prebuilt->index);
+
+ dtuple_set_n_fields(m_prebuilt->search_tuple, n_fields);
dict_index_copy_types(
m_prebuilt->search_tuple, m_prebuilt->index,
- m_prebuilt->index->n_fields);
+ n_fields);
/* If it's FTS query and FTS_DOC_ID exists FTS_DOC_ID field is
always added to read_set. */
@@ -10194,20 +10211,22 @@ wsrep_dict_foreign_find_index(
inline
const char*
-wsrep_key_type_to_str(wsrep_key_type type)
+wsrep_key_type_to_str(Wsrep_service_key_type type)
{
switch (type) {
- case WSREP_KEY_SHARED:
+ case WSREP_SERVICE_KEY_SHARED:
return "shared";
- case WSREP_KEY_SEMI:
- return "semi";
- case WSREP_KEY_EXCLUSIVE:
+ case WSREP_SERVICE_KEY_REFERENCE:
+ return "reference";
+ case WSREP_SERVICE_KEY_UPDATE:
+ return "update";
+ case WSREP_SERVICE_KEY_EXCLUSIVE:
return "exclusive";
};
return "unknown";
}
-ulint
+extern dberr_t
wsrep_append_foreign_key(
/*===========================*/
trx_t* trx, /*!< in: trx */
@@ -10215,18 +10234,17 @@ wsrep_append_foreign_key(
const rec_t* rec, /*!<in: clustered index record */
dict_index_t* index, /*!<in: clustered index */
ibool referenced, /*!<in: is check for referenced table */
- wsrep_key_type key_type) /*!< in: access type of this key
- (shared, exclusive, semi...) */
+ Wsrep_service_key_type key_type) /*!< in: access type of this key
+ (shared, exclusive, reference...) */
{
ut_a(trx);
THD* thd = (THD*)trx->mysql_thd;
ulint rcode = DB_SUCCESS;
char cache_key[513] = {'\0'};
int cache_key_len=0;
- bool const copy = true;
if (!wsrep_on(trx->mysql_thd) ||
- wsrep_thd_exec_mode(thd) != LOCAL_STATE) {
+ wsrep_thd_is_local(trx->mysql_thd) == false) {
return DB_SUCCESS;
}
@@ -10316,11 +10334,11 @@ wsrep_append_foreign_key(
if (rcode != DB_SUCCESS) {
WSREP_ERROR(
"FK key set failed: " ULINTPF
- " (" ULINTPF " %s), index: %s %s, %s",
+ " (" ULINTPF "%s), index: %s %s, %s",
rcode, referenced, wsrep_key_type_to_str(key_type),
- index ? index->name() : "void index",
+ (index) ? index->name() : "void index",
(index && index->table) ? index->table->name.m_name :
- "void table",
+ "void table",
wsrep_thd_query(thd));
return DB_ERROR;
}
@@ -10336,7 +10354,7 @@ wsrep_append_foreign_key(
#ifdef WSREP_DEBUG_PRINT
ulint j;
fprintf(stderr, "FK parent key, table: %s %s len: %lu ",
- cache_key, (shared) ? "shared" : "exclusive", len+1);
+ cache_key, wsrep_key_type_to_str(key_type), len+1);
for (j=0; j<len+1; j++) {
fprintf(stderr, " %hhX, ", key[j]);
}
@@ -10355,7 +10373,8 @@ wsrep_append_foreign_key(
wsrep_buf_t wkey_part[3];
wsrep_key_t wkey = {wkey_part, 3};
- if (!wsrep_prepare_key(
+ if (!wsrep_prepare_key_for_innodb(
+ thd,
(const uchar*)cache_key,
cache_key_len + 1,
(const uchar*)key, len+1,
@@ -10366,17 +10385,7 @@ wsrep_append_foreign_key(
wsrep_thd_query(thd) : "void");
return DB_ERROR;
}
-
- wsrep_t *wsrep= get_wsrep();
-
- rcode = (int)wsrep->append_key(
- wsrep,
- wsrep_ws_handle(thd, trx),
- &wkey,
- 1,
- key_type,
- copy);
-
+ rcode = wsrep_thd_append_key(thd, &wkey, 1, key_type);
if (rcode) {
DBUG_PRINT("wsrep", ("row key failed: " ULINTPF, rcode));
WSREP_ERROR("Appending cascaded fk row key failed: %s, "
@@ -10397,17 +10406,19 @@ wsrep_append_key(
TABLE_SHARE *table_share,
const char* key,
uint16_t key_len,
- wsrep_key_type key_type /*!< in: access type of this key
+ Wsrep_service_key_type key_type /*!< in: access type of this key
(shared, exclusive, semi...) */
)
{
DBUG_ENTER("wsrep_append_key");
- bool const copy = true;
+ DBUG_PRINT("enter",
+ ("thd: %lu trx: %lld", thd_get_thread_id(thd),
+ (long long)trx->id));
#ifdef WSREP_DEBUG_PRINT
- fprintf(stderr, "%s conn %ld, trx %llu, keylen %d, table %s\n Query: %s ",
+ fprintf(stderr, "%s conn %lu, trx " TRX_ID_FMT ", keylen %d, key %s.%s\n",
wsrep_key_type_to_str(key_type),
- wsrep_thd_thread_id(thd), trx->id, key_len,
- table_share->table_name.str, wsrep_thd_query(thd));
+ thd_get_thread_id(thd), trx->id, key_len,
+ table_share->table_name.str, key);
for (int i=0; i<key_len; i++) {
fprintf(stderr, "%hhX, ", key[i]);
}
@@ -10416,7 +10427,8 @@ wsrep_append_key(
wsrep_buf_t wkey_part[3];
wsrep_key_t wkey = {wkey_part, 3};
- if (!wsrep_prepare_key(
+ if (!wsrep_prepare_key_for_innodb(
+ thd,
(const uchar*)table_share->table_cache_key.str,
table_share->table_cache_key.length,
(const uchar*)key, key_len,
@@ -10428,15 +10440,7 @@ wsrep_append_key(
DBUG_RETURN(HA_ERR_INTERNAL_ERROR);
}
- wsrep_t *wsrep= get_wsrep();
-
- int rcode = (int)wsrep->append_key(
- wsrep,
- wsrep_ws_handle(thd, trx),
- &wkey,
- 1,
- key_type,
- copy);
+ int rcode = wsrep_thd_append_key(thd, &wkey, 1, key_type);
if (rcode) {
DBUG_PRINT("wsrep", ("row key failed: %d", rcode));
WSREP_WARN("Appending row key failed: %s, %d",
@@ -10477,17 +10481,30 @@ int
ha_innobase::wsrep_append_keys(
/*===========================*/
THD *thd,
- wsrep_key_type key_type, /*!< in: access type of this key
- (shared, exclusive, semi...) */
+ Wsrep_service_key_type key_type, /*!< in: access type of this row
+ operation:
+ (shared, exclusive, reference...) */
const uchar* record0, /* in: row in MySQL format */
const uchar* record1) /* in: row in MySQL format */
{
+ /* Sanity check: newly inserted records should always be passed with
+ EXCLUSIVE key type, all the rest are expected to carry a pre-image
+ */
+ ut_a(record1 != NULL || key_type == WSREP_SERVICE_KEY_EXCLUSIVE);
+
int rcode;
DBUG_ENTER("wsrep_append_keys");
bool key_appended = false;
trx_t *trx = thd_to_trx(thd);
+#ifdef WSREP_DEBUG_PRINT
+ fprintf(stderr, "%s conn %lu, trx " TRX_ID_FMT ", table %s\nSQL: %s\n",
+ wsrep_key_type_to_str(key_type),
+ thd_get_thread_id(thd), trx->id,
+ table_share->table_name.str, wsrep_thd_query(thd));
+#endif
+
if (table_share && table_share->tmp_table != NO_TMP_TABLE) {
WSREP_DEBUG("skipping tmp table DML: THD: %lu tmp: %d SQL: %s",
thd_get_thread_id(thd),
@@ -10512,7 +10529,9 @@ ha_innobase::wsrep_append_keys(
thd, trx, table_share, keyval,
len, key_type);
- if (rcode) DBUG_RETURN(rcode);
+ if (rcode) {
+ DBUG_RETURN(rcode);
+ }
} else {
WSREP_DEBUG("NULL key skipped (proto 0): %s",
wsrep_thd_query(thd));
@@ -10526,68 +10545,91 @@ ha_innobase::wsrep_append_keys(
KEY* key_info = table->key_info + i;
if (key_info->flags & HA_NOSAME) {
hasPK = true;
+ break;
}
}
for (i=0; i<table->s->keys; ++i) {
- uint len;
- char keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
- char keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
- char* key0 = &keyval0[1];
- char* key1 = &keyval1[1];
KEY* key_info = table->key_info + i;
- ibool is_null;
dict_index_t* idx = innobase_get_index(i);
dict_table_t* tab = (idx) ? idx->table : NULL;
+ /* keyval[] shall contain an ordinal number at byte 0
+ and the actual key data shall be written at byte 1.
+ Hence the total data length is the key length + 1 */
+ char keyval0[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
+ char keyval1[WSREP_MAX_SUPPORTED_KEY_LENGTH+1] = {'\0'};
keyval0[0] = (char)i;
keyval1[0] = (char)i;
+ char* key0 = &keyval0[1];
+ char* key1 = &keyval1[1];
if (!tab) {
WSREP_WARN("MariaDB-InnoDB key mismatch %s %s",
table->s->table_name.str,
key_info->name.str);
}
- /* !hasPK == table with no PK, must append all non-unique keys */
+ /* !hasPK == table with no PK,
+ must append all non-unique keys */
if (!hasPK || key_info->flags & HA_NOSAME ||
((tab &&
referenced_by_foreign_key2(tab, idx)) ||
(!tab && referenced_by_foreign_key()))) {
- len = wsrep_store_key_val_for_row(
+ ibool is_null0;
+ uint len0 = wsrep_store_key_val_for_row(
thd, table, i, key0,
WSREP_MAX_SUPPORTED_KEY_LENGTH,
- record0, &is_null);
- if (!is_null) {
- rcode = wsrep_append_key(
- thd, trx, table_share,
- keyval0, len+1, key_type);
- if (rcode) DBUG_RETURN(rcode);
-
- if (key_info->flags & HA_NOSAME ||
- key_type == WSREP_KEY_SHARED)
- key_appended = true;
- } else {
- WSREP_DEBUG("NULL key skipped: %s",
- wsrep_thd_query(thd));
- }
+ record0, &is_null0);
if (record1) {
- len = wsrep_store_key_val_for_row(
+ ibool is_null1;
+ uint len1 = wsrep_store_key_val_for_row(
thd, table, i, key1,
WSREP_MAX_SUPPORTED_KEY_LENGTH,
- record1, &is_null);
+ record1, &is_null1);
+
+ if (is_null0 != is_null1 ||
+ len0 != len1 ||
+ memcmp(key0, key1, len0)) {
+ /* This key has chaged. If it
+ is unique, this is an exclusive
+ operation -> upgrade key type */
+ if (key_info->flags & HA_NOSAME) {
+ key_type = WSREP_SERVICE_KEY_EXCLUSIVE;
+ }
- if (!is_null
- && memcmp(key0, key1, len)) {
- rcode = wsrep_append_key(
+ if (!is_null1) {
+ rcode = wsrep_append_key(
thd, trx, table_share,
- keyval1, len+1,
- key_type);
- if (rcode) DBUG_RETURN(rcode);
+ keyval1,
+ /* for len1+1 see keyval1
+ initialization comment */
+ len1+1, key_type);
+ if (rcode)
+ DBUG_RETURN(rcode);
+ }
}
}
+
+ if (!is_null0) {
+ rcode = wsrep_append_key(
+ thd, trx, table_share,
+ /* for len0+1 see keyval0
+ initialization comment */
+ keyval0, len0+1, key_type);
+ if (rcode)
+ DBUG_RETURN(rcode);
+
+ if (key_info->flags & HA_NOSAME ||
+ key_type == WSREP_SERVICE_KEY_SHARED||
+ key_type == WSREP_SERVICE_KEY_REFERENCE)
+ key_appended = true;
+ } else {
+ WSREP_DEBUG("NULL key skipped: %s",
+ wsrep_thd_query(thd));
+ }
}
}
}
@@ -10741,9 +10783,8 @@ prepare_vcol_for_base_setup(
ut_ad(col->base_col == NULL);
MY_BITMAP *old_read_set = field->table->read_set;
- MY_BITMAP *old_vcol_set = field->table->vcol_set;
- field->table->read_set = field->table->vcol_set = &field->table->tmp_set;
+ field->table->read_set = &field->table->tmp_set;
bitmap_clear_all(&field->table->tmp_set);
field->vcol_info->expr->walk(
@@ -10755,7 +10796,6 @@ prepare_vcol_for_base_setup(
* col->base_col)));
}
field->table->read_set= old_read_set;
- field->table->vcol_set= old_vcol_set;
}
@@ -10940,9 +10980,9 @@ create_table_info_t::create_table_def()
ulint vers_row = 0;
if (m_form->versioned()) {
- if (i == m_form->s->row_start_field) {
+ if (i == m_form->s->vers.start_fieldno) {
vers_row = DATA_VERS_START;
- } else if (i == m_form->s->row_end_field) {
+ } else if (i == m_form->s->vers.end_fieldno) {
vers_row = DATA_VERS_END;
} else if (!(field->flags
& VERS_UPDATE_UNVERSIONED_FLAG)) {
@@ -11107,9 +11147,8 @@ err_col:
dict_table_add_system_columns(table, heap);
if (table->is_temporary()) {
- /* Get a new table ID. FIXME: Make this a private
- sequence, not shared with persistent tables! */
- dict_table_assign_new_id(table, m_trx);
+ m_trx->table_id = table->id
+ = dict_sys->get_temporary_table_id();
ut_ad(dict_tf_get_rec_format(table->flags)
!= REC_FORMAT_COMPRESSED);
table->space_id = SRV_TMP_SPACE_ID;
@@ -11361,7 +11400,7 @@ create_table_info_t::create_option_data_directory_is_valid()
}
/* Do not use DATA DIRECTORY with TEMPORARY TABLE. */
- if (m_create_info->options & HA_LEX_CREATE_TMP_TABLE) {
+ if (m_create_info->tmp_table()) {
push_warning(
m_thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
@@ -11390,8 +11429,7 @@ create_table_info_t::create_options_are_invalid()
const char* ret = NULL;
enum row_type row_format = m_create_info->row_type;
- const bool is_temp
- = m_create_info->options & HA_LEX_CREATE_TMP_TABLE;
+ const bool is_temp = m_create_info->tmp_table();
ut_ad(m_thd != NULL);
@@ -11577,9 +11615,12 @@ create_table_info_t::check_table_options()
return "ENCRYPTION_KEY_ID";
}
- /* Currently we do not support encryption for spatial indexes.
+ /* We do not support encryption for spatial indexes,
+ except if innodb_checksum_algorithm=full_crc32.
Do not allow ENCRYPTED=YES if any SPATIAL INDEX exists. */
- if (options->encryption != FIL_ENCRYPTION_ON) {
+ if (options->encryption != FIL_ENCRYPTION_ON
+ || srv_checksum_algorithm
+ >= SRV_CHECKSUM_ALGORITHM_FULL_CRC32) {
break;
}
for (ulint i = 0; i < m_form->s->keys; i++) {
@@ -11746,7 +11787,7 @@ create_table_info_t::parse_table_name(
if (m_innodb_file_per_table
&& !mysqld_embedded
- && !(m_create_info->options & HA_LEX_CREATE_TMP_TABLE)) {
+ && !m_create_info->tmp_table()) {
if ((name[1] == ':')
|| (name[0] == '\\' && name[1] == '\\')) {
@@ -11781,7 +11822,7 @@ create_table_info_t::parse_table_name(
}
if (m_create_info->index_file_name) {
- my_error(WARN_OPTION_IGNORED, ME_JUST_WARNING,
+ my_error(WARN_OPTION_IGNORED, ME_WARNING,
"INDEX DIRECTORY");
}
@@ -11801,10 +11842,8 @@ bool create_table_info_t::innobase_table_flags()
enum row_type row_type;
rec_format_t innodb_row_format =
get_row_format(m_default_row_format);
- const bool is_temp
- = m_create_info->options & HA_LEX_CREATE_TMP_TABLE;
- bool zip_allowed
- = !is_temp;
+ const bool is_temp = m_create_info->tmp_table();
+ bool zip_allowed = !is_temp;
const ulint zip_ssize_max =
ut_min(static_cast<ulint>(UNIV_PAGE_SSIZE_MAX),
@@ -12204,9 +12243,8 @@ create_table_info_t::set_tablespace_type(
/* Ignore the current innodb-file-per-table setting if we are
creating a temporary table. */
- m_use_file_per_table =
- m_allow_file_per_table
- && !(m_create_info->options & HA_LEX_CREATE_TMP_TABLE);
+ m_use_file_per_table = m_allow_file_per_table
+ && !m_create_info->tmp_table();
/* DATA DIRECTORY must have m_use_file_per_table but cannot be
used with TEMPORARY tables. */
@@ -13867,7 +13905,7 @@ fsp_get_available_space_in_free_extents(const fil_space_t& space)
ulint n_free_up =
(size_in_header - space.free_limit) / FSP_EXTENT_SIZE;
- const ulint size = page_size_t(space.flags).physical();
+ const ulint size = space.physical_size();
if (n_free_up > 0) {
n_free_up--;
n_free_up -= n_free_up / (size / FSP_EXTENT_SIZE);
@@ -14017,8 +14055,7 @@ ha_innobase::info_low(
stats.records = (ha_rows) n_rows;
stats.deleted = 0;
if (fil_space_t* space = ib_table->space) {
- const ulint size = page_size_t(space->flags)
- .physical();
+ const ulint size = space->physical_size();
stats.data_file_length
= ulonglong(stat_clustered_index_size)
* size;
@@ -14521,20 +14558,9 @@ ha_innobase::check(
if (!(check_opt->flags & T_QUICK)
&& !index->is_corrupted()) {
- /* Enlarge the fatal lock wait timeout during
- CHECK TABLE. */
- my_atomic_addlong(
- &srv_fatal_semaphore_wait_threshold,
- SRV_SEMAPHORE_WAIT_EXTENSION);
dberr_t err = btr_validate_index(
- index, m_prebuilt->trx, false);
-
- /* Restore the fatal lock wait timeout after
- CHECK TABLE. */
- my_atomic_addlong(
- &srv_fatal_semaphore_wait_threshold,
- -SRV_SEMAPHORE_WAIT_EXTENSION);
+ index, m_prebuilt->trx);
if (err != DB_SUCCESS) {
is_ok = false;
@@ -15516,8 +15542,7 @@ ha_innobase::external_lock(
if (!skip) {
#ifdef WITH_WSREP
- if (!wsrep_on(thd) || wsrep_thd_exec_mode(thd) == LOCAL_STATE)
- {
+ if (!wsrep_on(thd) || wsrep_thd_is_local(m_user_thd)) {
#endif /* WITH_WSREP */
my_error(ER_BINLOG_STMT_MODE_AND_ROW_ENGINE, MYF(0),
" InnoDB is limited to row-logging when"
@@ -17245,9 +17270,7 @@ fast_shutdown_validate(
uint new_val = *reinterpret_cast<uint*>(save);
if (srv_fast_shutdown && !new_val
- && !my_atomic_loadptr_explicit(reinterpret_cast<void**>
- (&srv_running),
- MY_MEMORY_ORDER_RELAXED)) {
+ && !srv_running.load(std::memory_order_relaxed)) {
return(1);
}
@@ -17357,7 +17380,7 @@ innodb_internal_table_validate(
DBUG_EXECUTE_IF("innodb_evict_autoinc_table",
mutex_enter(&dict_sys->mutex);
- dict_table_remove_from_cache_low(user_table, TRUE);
+ dict_table_remove_from_cache(user_table, true);
mutex_exit(&dict_sys->mutex);
);
}
@@ -17496,7 +17519,7 @@ innodb_make_page_dirty(THD*, st_mysql_sys_var*, void*, const void* save)
buf_block_t* block = buf_page_get(
page_id_t(space_id, srv_saved_page_number_debug),
- page_size_t(space->flags), RW_X_LATCH, &mtr);
+ space->zip_size(), RW_X_LATCH, &mtr);
if (block != NULL) {
byte* page = block->frame;
@@ -18641,123 +18664,32 @@ wsrep_innobase_kill_one_trx(
bf_trx ? bf_trx->id : 0);
DBUG_RETURN(1);
}
-
WSREP_LOG_CONFLICT(bf_thd, thd, TRUE);
-
+ wsrep_thd_LOCK(thd);
WSREP_DEBUG("BF kill (" ULINTPF ", seqno: " INT64PF
"), victim: (%lu) trx: " TRX_ID_FMT,
signal, bf_seqno,
thd_get_thread_id(thd),
victim_trx->id);
- WSREP_DEBUG("Aborting query: %s conf %d trx: %" PRId64,
+ WSREP_DEBUG("Aborting query: %s conf %s trx: %lld",
(thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void",
- wsrep_thd_conflict_state(thd, FALSE),
- wsrep_thd_ws_handle(thd)->trx_id);
-
- wsrep_thd_LOCK(thd);
- DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock",
- {
- const char act[]=
- "now "
- "wait_for signal.wsrep_after_BF_victim_lock";
- DBUG_ASSERT(!debug_sync_set_action(bf_thd,
- STRING_WITH_LEN(act)));
- };);
-
-
- if (wsrep_thd_query_state(thd) == QUERY_EXITING) {
- WSREP_DEBUG("kill trx EXITING for " TRX_ID_FMT,
- victim_trx->id);
- wsrep_thd_UNLOCK(thd);
- DBUG_RETURN(0);
- }
-
- if (wsrep_thd_exec_mode(thd) != LOCAL_STATE) {
- WSREP_DEBUG("withdraw for BF trx: " TRX_ID_FMT ", state: %d",
- victim_trx->id,
- wsrep_thd_get_conflict_state(thd));
- }
-
- switch (wsrep_thd_get_conflict_state(thd)) {
- case NO_CONFLICT:
- wsrep_thd_set_conflict_state(thd, MUST_ABORT);
- break;
- case MUST_ABORT:
- WSREP_DEBUG("victim " TRX_ID_FMT " in MUST ABORT state",
- victim_trx->id);
- wsrep_thd_UNLOCK(thd);
- wsrep_thd_awake(thd, signal);
- DBUG_RETURN(0);
- break;
- case ABORTED:
- case ABORTING: // fall through
- default:
- WSREP_DEBUG("victim " TRX_ID_FMT " in state %d",
- victim_trx->id, wsrep_thd_get_conflict_state(thd));
- wsrep_thd_UNLOCK(thd);
- DBUG_RETURN(0);
- break;
- }
-
- switch (wsrep_thd_query_state(thd)) {
- case QUERY_COMMITTING:
- enum wsrep_status rcode;
-
- WSREP_DEBUG("kill query for: %ld",
- thd_get_thread_id(thd));
- WSREP_DEBUG("kill trx QUERY_COMMITTING for " TRX_ID_FMT,
- victim_trx->id);
-
- if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
- wsrep_abort_slave_trx(bf_seqno,
- wsrep_thd_trx_seqno(thd));
- } else {
- wsrep_t *wsrep= get_wsrep();
- rcode = wsrep->abort_pre_commit(
- wsrep, bf_seqno,
- (wsrep_trx_id_t)wsrep_thd_ws_handle(thd)->trx_id
- );
-
- switch (rcode) {
- case WSREP_WARNING:
- WSREP_DEBUG("cancel commit warning: "
- TRX_ID_FMT,
- victim_trx->id);
- wsrep_thd_UNLOCK(thd);
- wsrep_thd_awake(thd, signal);
- DBUG_RETURN(1);
- break;
- case WSREP_OK:
- break;
- default:
- WSREP_ERROR(
- "cancel commit bad exit: %d "
- TRX_ID_FMT,
- rcode, victim_trx->id);
- /* unable to interrupt, must abort */
- /* note: kill_mysql() will block, if we cannot.
- * kill the lock holder first.
- */
- abort();
- break;
- }
- }
- wsrep_thd_UNLOCK(thd);
- wsrep_thd_awake(thd, signal);
- break;
- case QUERY_EXEC:
- /* it is possible that victim trx is itself waiting for some
- * other lock. We need to cancel this waiting
- */
- WSREP_DEBUG("kill trx QUERY_EXEC for " TRX_ID_FMT,
- victim_trx->id);
-
- victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
-
+ wsrep_thd_transaction_state_str(thd),
+ wsrep_thd_transaction_id(thd));
+
+ /*
+ * we mark with was_chosen_as_deadlock_victim transaction,
+ * which is already marked as BF victim
+ * lock_sys is held until this vicitm has aborted
+ */
+ victim_trx->lock.was_chosen_as_wsrep_victim = TRUE;
+
+ wsrep_thd_UNLOCK(thd);
+ if (wsrep_thd_bf_abort(bf_thd, thd, signal))
+ {
if (victim_trx->lock.wait_lock) {
- WSREP_DEBUG("victim has wait flag: %ld",
- thd_get_thread_id(thd));
+ WSREP_DEBUG("victim has wait flag: %lu",
+ thd_get_thread_id(thd));
lock_t* wait_lock = victim_trx->lock.wait_lock;
if (wait_lock) {
@@ -18765,67 +18697,7 @@ wsrep_innobase_kill_one_trx(
victim_trx->lock.was_chosen_as_deadlock_victim= TRUE;
lock_cancel_waiting_and_release(wait_lock);
}
-
- wsrep_thd_UNLOCK(thd);
- wsrep_thd_awake(thd, signal);
- } else {
- /* abort currently executing query */
- DBUG_PRINT("wsrep",("sending KILL_QUERY to: %lu",
- thd_get_thread_id(thd)));
- WSREP_DEBUG("kill query for: %ld",
- thd_get_thread_id(thd));
- /* Note that innobase_kill_query will take lock_mutex
- and trx_mutex */
- wsrep_thd_UNLOCK(thd);
- wsrep_thd_awake(thd, signal);
-
- /* for BF thd, we need to prevent him from committing */
- if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
- wsrep_abort_slave_trx(bf_seqno,
- wsrep_thd_trx_seqno(thd));
- }
- }
- break;
- case QUERY_IDLE:
- {
- WSREP_DEBUG("kill IDLE for " TRX_ID_FMT, victim_trx->id);
-
- if (wsrep_thd_exec_mode(thd) == REPL_RECV) {
- WSREP_DEBUG("kill BF IDLE, seqno: %lld",
- (long long)wsrep_thd_trx_seqno(thd));
- wsrep_thd_UNLOCK(thd);
- wsrep_abort_slave_trx(bf_seqno,
- wsrep_thd_trx_seqno(thd));
- DBUG_RETURN(0);
}
- /* This will lock thd from proceeding after net_read() */
- wsrep_thd_set_conflict_state(thd, ABORTING);
-
- wsrep_lock_rollback();
-
- if (wsrep_aborting_thd_contains(thd)) {
- WSREP_WARN("duplicate thd aborter %lu",
- (ulong) thd_get_thread_id(thd));
- } else {
- wsrep_aborting_thd_enqueue(thd);
- DBUG_PRINT("wsrep",("enqueuing trx abort for %lu",
- thd_get_thread_id(thd)));
- WSREP_DEBUG("enqueuing trx abort for (%lu)",
- thd_get_thread_id(thd));
- }
-
- DBUG_PRINT("wsrep",("signalling wsrep rollbacker"));
- WSREP_DEBUG("signaling aborter");
- wsrep_unlock_rollback();
- wsrep_thd_UNLOCK(thd);
-
- break;
- }
- default:
- WSREP_WARN("bad wsrep query state: %d",
- wsrep_thd_query_state(thd));
- wsrep_thd_UNLOCK(thd);
- break;
}
DBUG_RETURN(0);
@@ -18845,26 +18717,22 @@ wsrep_abort_transaction(
trx_t* victim_trx = thd_to_trx(victim_thd);
trx_t* bf_trx = (bf_thd) ? thd_to_trx(bf_thd) : NULL;
- WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %d",
+ WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %s",
wsrep_thd_query(bf_thd),
wsrep_thd_query(victim_thd),
- wsrep_thd_conflict_state(victim_thd, FALSE));
+ wsrep_thd_transaction_state_str(victim_thd));
if (victim_trx) {
lock_mutex_enter();
trx_mutex_enter(victim_trx);
- int rcode = wsrep_innobase_kill_one_trx(bf_thd, bf_trx,
- victim_trx, signal);
- lock_mutex_exit();
+ int rcode= wsrep_innobase_kill_one_trx(bf_thd, bf_trx,
+ victim_trx, signal);
trx_mutex_exit(victim_trx);
+ lock_mutex_exit();
wsrep_srv_conc_cancel_wait(victim_trx);
DBUG_RETURN(rcode);
} else {
- WSREP_DEBUG("victim does not have transaction");
- wsrep_thd_LOCK(victim_thd);
- wsrep_thd_set_conflict_state(victim_thd, MUST_ABORT);
- wsrep_thd_UNLOCK(victim_thd);
- wsrep_thd_awake(victim_thd, signal);
+ wsrep_thd_bf_abort(bf_thd, victim_thd, signal);
}
DBUG_RETURN(-1);
@@ -18900,15 +18768,6 @@ innobase_wsrep_get_checkpoint(
trx_rseg_read_wsrep_checkpoint(*xid);
return 0;
}
-
-static void wsrep_fake_trx_id(handlerton *, THD *thd)
-{
- trx_id_t trx_id = trx_sys.get_new_trx_id();
- WSREP_DEBUG("innodb fake trx id: " TRX_ID_FMT " thd: %s",
- trx_id, wsrep_thd_query(thd));
- wsrep_ws_handle_for_trx(wsrep_thd_ws_handle(thd), trx_id);
-}
-
#endif /* WITH_WSREP */
/* plugin options */
@@ -18916,7 +18775,11 @@ static void wsrep_fake_trx_id(handlerton *, THD *thd)
static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm,
PLUGIN_VAR_RQCMDARG,
"The algorithm InnoDB uses for page checksumming. Possible values are"
- " CRC32 (hardware accelerated if the CPU supports it)"
+ " FULL_CRC32"
+ " for new files, always use CRC-32C; for old, see CRC32 below;"
+ " STRICT_FULL_CRC32"
+ " for new files, always use CRC-32C; for old, see STRICT_CRC32 below;"
+ " CRC32"
" write crc32, allow any of the other checksums to match when reading;"
" STRICT_CRC32"
" write crc32, do not allow other algorithms to match when reading;"
@@ -18933,7 +18796,8 @@ static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm,
" write a constant magic number, do not allow values other than that"
" magic number when reading;"
" Files updated when this option is set to crc32 or strict_crc32 will"
- " not be readable by MariaDB versions older than 10.0.4",
+ " not be readable by MariaDB versions older than 10.0.4;"
+ " new files created with full_crc32 are readable by MariaDB 10.4.3+",
NULL, NULL, SRV_CHECKSUM_ALGORITHM_CRC32,
&innodb_checksum_algorithm_typelib);
@@ -20470,20 +20334,6 @@ ha_innobase::multi_range_read_explain_info(
return m_ds_mrr.dsmrr_explain_info(mrr_mode, str, size);
}
-/**
-Index Condition Pushdown interface implementation */
-
-/*************************************************************//**
-InnoDB index push-down condition check
-@return ICP_NO_MATCH, ICP_MATCH, or ICP_OUT_OF_RANGE */
-ICP_RESULT
-innobase_index_cond(
-/*================*/
- void* file) /*!< in/out: pointer to ha_innobase */
-{
- return handler_index_cond_check(file);
-}
-
/** Parse the table file name into table name and database name.
@param[in] tbl_name InnoDB table name
@param[out] dbname database name buffer (NAME_LEN + 1 bytes)
@@ -20851,9 +20701,9 @@ innobase_get_computed_value(
dfield_t* field;
ulint len;
- const page_size_t page_size = (old_table == NULL)
- ? dict_table_page_size(index->table)
- : dict_table_page_size(old_table);
+ const ulint zip_size = old_table
+ ? old_table->space->zip_size()
+ : dict_tf_get_zip_size(index->table->flags);
ulint ret = 0;
@@ -20905,7 +20755,7 @@ innobase_get_computed_value(
}
data = btr_copy_externally_stored_field(
- &len, data, page_size,
+ &len, data, zip_size,
dfield_get_len(row_field), *local_heap);
}
@@ -21019,6 +20869,19 @@ ha_innobase::idx_cond_push(
DBUG_RETURN(NULL);
}
+
+/** Push a primary key filter.
+@param[in] pk_filter filter against which primary keys
+ are to be checked
+@retval false if pushed (always) */
+bool ha_innobase::rowid_filter_push(Rowid_filter* pk_filter)
+{
+ DBUG_ENTER("ha_innobase::rowid_filter_push");
+ DBUG_ASSERT(pk_filter != NULL);
+ pushed_rowid_filter= pk_filter;
+ DBUG_RETURN(false);
+}
+
/******************************************************************//**
Use this when the args are passed to the format string from
errmsg-utf8.txt directly as is.
@@ -21055,10 +20918,10 @@ ib_senderrf(
switch (level) {
case IB_LOG_LEVEL_INFO:
- l = ME_JUST_INFO;
+ l = ME_NOTE;
break;
case IB_LOG_LEVEL_WARN:
- l = ME_JUST_WARNING;
+ l = ME_WARNING;
break;
case IB_LOG_LEVEL_ERROR:
sd_notifyf(0, "STATUS=InnoDB: Error: %s", str);
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index 945aeacfd20..af81b22f879 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -16,10 +16,9 @@ this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
-
#ifdef WITH_WSREP
-# include <mysql/service_wsrep.h>
-# include "../../../wsrep/wsrep_api.h"
+#include "wsrep_api.h"
+#include <mysql/service_wsrep.h>
#endif /* WITH_WSREP */
#include "table.h"
@@ -432,6 +431,12 @@ public:
return s.frm_version<FRM_VER_EXPRESSSIONS && s.virtual_fields;
}
+ /** Push a primary key filter.
+ @param[in] pk_filter filter against which primary keys
+ are to be checked
+ @retval false if pushed (always) */
+ bool rowid_filter_push(Rowid_filter *rowid_filter);
+
protected:
/**
MySQL calls this method at the end of each statement. This method
@@ -457,8 +462,11 @@ protected:
dict_index_t* innobase_get_index(uint keynr);
#ifdef WITH_WSREP
- int wsrep_append_keys(THD *thd, wsrep_key_type key_type,
- const uchar* record0, const uchar* record1);
+ int wsrep_append_keys(
+ THD *thd,
+ Wsrep_service_key_type key_type,
+ const uchar* record0,
+ const uchar* record1);
#endif
/** Builds a 'template' to the prebuilt struct.
@@ -570,23 +578,8 @@ extern void mysql_bin_log_commit_pos(THD *thd, ulonglong *out_pos, const char **
struct trx_t;
#ifdef WITH_WSREP
-//extern "C" int wsrep_trx_order_before(void *thd1, void *thd2);
-
-extern "C" bool wsrep_thd_is_wsrep_on(THD *thd);
-
-
-extern "C" void wsrep_thd_set_exec_mode(THD *thd, enum wsrep_exec_mode mode);
-extern "C" void wsrep_thd_set_query_state(
- THD *thd, enum wsrep_query_state state);
-
-extern "C" void wsrep_thd_set_trx_to_replay(THD *thd, uint64 trx_id);
-
-extern "C" uint32 wsrep_thd_wsrep_rand(THD *thd);
-extern "C" time_t wsrep_thd_query_start(THD *thd);
-extern "C" query_id_t wsrep_thd_query_id(THD *thd);
-extern "C" query_id_t wsrep_thd_wsrep_last_query_id(THD *thd);
-extern "C" void wsrep_thd_set_wsrep_last_query_id(THD *thd, query_id_t id);
-#endif
+#include <mysql/service_wsrep.h>
+#endif /* WITH_WSREP */
extern const struct _ft_vft ft_vft_result;
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index 70f2c1940fc..403dae8334d 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -104,6 +104,7 @@ static const alter_table_operations INNOBASE_INPLACE_IGNORE
| ALTER_PARTITIONED
| ALTER_COLUMN_COLUMN_FORMAT
| ALTER_COLUMN_STORAGE_TYPE
+ | ALTER_CONVERT_TO
| ALTER_VIRTUAL_GCOL_EXPR
| ALTER_DROP_CHECK_CONSTRAINT
| ALTER_RENAME
@@ -132,8 +133,721 @@ static const alter_table_operations INNOBASE_ALTER_INSTANT
| INNOBASE_FOREIGN_OPERATIONS
| ALTER_COLUMN_EQUAL_PACK_LENGTH
| ALTER_COLUMN_UNVERSIONED
+ | ALTER_RENAME_INDEX
| ALTER_DROP_VIRTUAL_COLUMN;
+/** Acquire a page latch on the possible metadata record,
+to prevent concurrent invocation of dict_index_t::clear_instant_alter()
+by purge when the table turns out to be empty.
+@param[in,out] index clustered index
+@param[in,out] mtr mini-transaction */
+static void instant_metadata_lock(dict_index_t& index, mtr_t& mtr)
+{
+ DBUG_ASSERT(index.is_primary());
+
+ if (!index.is_instant()) {
+ /* dict_index_t::clear_instant_alter() cannot be called.
+ No need for a latch. */
+ return;
+ }
+
+ btr_cur_t btr_cur;
+ btr_cur_open_at_index_side(true, &index, BTR_SEARCH_LEAF,
+ &btr_cur, 0, &mtr);
+ ut_ad(page_cur_is_before_first(btr_cur_get_page_cur(&btr_cur)));
+ ut_ad(page_is_leaf(btr_cur_get_page(&btr_cur)));
+ ut_ad(!page_has_prev(btr_cur_get_page(&btr_cur)));
+ ut_ad(!buf_block_get_page_zip(btr_cur_get_block(&btr_cur)));
+}
+
+/** Initialize instant->field_map.
+@tparam replace_dropped whether to point clustered index fields
+to instant->dropped[]
+@param[in] table table definition to copy from */
+template<bool replace_dropped>
+inline void dict_table_t::init_instant(const dict_table_t& table)
+{
+ const dict_index_t& oindex __attribute__((unused))= *table.indexes.start;
+ dict_index_t& index = *indexes.start;
+ const unsigned u = index.first_user_field();
+ DBUG_ASSERT(u == oindex.first_user_field());
+ DBUG_ASSERT(index.n_fields >= oindex.n_fields);
+
+ field_map_element_t* field_map_it = static_cast<field_map_element_t*>(
+ mem_heap_zalloc(heap, (index.n_fields - u)
+ * sizeof *field_map_it));
+ instant->field_map = field_map_it;
+
+ ut_d(unsigned n_drop = 0);
+ ut_d(unsigned n_nullable = 0);
+ for (unsigned i = u; i < index.n_fields; i++) {
+ auto& f = index.fields[i];
+ DBUG_ASSERT(dict_col_get_fixed_size(f.col, not_redundant())
+ <= DICT_MAX_FIXED_COL_LEN);
+ ut_d(n_nullable += f.col->is_nullable());
+
+ if (!f.col->is_dropped()) {
+ (*field_map_it++).set_ind(f.col->ind);
+ continue;
+ }
+
+ auto fixed_len = dict_col_get_fixed_size(
+ f.col, not_redundant());
+ field_map_it->set_dropped();
+ if (!f.col->is_nullable()) {
+ field_map_it->set_not_null();
+ }
+ field_map_it->set_ind(fixed_len
+ ? uint16_t(fixed_len + 1)
+ : DATA_BIG_COL(f.col));
+ field_map_it++;
+ ut_ad(f.col >= table.instant->dropped);
+ ut_ad(f.col < table.instant->dropped
+ + table.instant->n_dropped);
+ ut_d(n_drop++);
+ if (replace_dropped) {
+ size_t d = f.col - table.instant->dropped;
+ ut_ad(f.col == &table.instant->dropped[d]);
+ ut_ad(d <= instant->n_dropped);
+ f.col = &instant->dropped[d];
+ }
+ }
+ ut_ad(n_drop == n_dropped());
+ ut_ad(field_map_it == &instant->field_map[index.n_fields - u]);
+ ut_ad(index.n_nullable == n_nullable);
+}
+
+/** Set is_instant() before instant_column().
+@param[in] old previous table definition
+@param[in] col_map map from old.cols[] and old.v_cols[] to this
+@param[out] first_alter_pos 0, or 1 + first changed column position */
+inline void dict_table_t::prepare_instant(const dict_table_t& old,
+ const ulint* col_map,
+ unsigned& first_alter_pos)
+{
+ DBUG_ASSERT(!is_instant());
+ DBUG_ASSERT(n_dropped() == 0);
+ DBUG_ASSERT(old.n_cols == old.n_def);
+ DBUG_ASSERT(n_cols == n_def);
+ DBUG_ASSERT(old.supports_instant());
+ DBUG_ASSERT(!persistent_autoinc
+ || persistent_autoinc == old.persistent_autoinc);
+ /* supports_instant() does not necessarily hold here,
+ in case ROW_FORMAT=COMPRESSED according to the
+ MariaDB data dictionary, and ALTER_OPTIONS was not set.
+ If that is the case, the instant ALTER TABLE would keep
+ the InnoDB table in its current format. */
+
+ dict_index_t& oindex = *old.indexes.start;
+ dict_index_t& index = *indexes.start;
+ first_alter_pos = 0;
+
+ mtr_t mtr;
+ mtr.start();
+ /* Protect oindex.n_core_fields and others, so that
+ purge cannot invoke dict_index_t::clear_instant_alter(). */
+ instant_metadata_lock(oindex, mtr);
+
+ for (unsigned i = 0; i + DATA_N_SYS_COLS < old.n_cols; i++) {
+ if (col_map[i] != i) {
+ first_alter_pos = 1 + i;
+ goto add_metadata;
+ }
+ }
+
+ if (!old.instant) {
+ /* Columns were not dropped or reordered.
+ Therefore columns must have been added at the end,
+ or modified instantly in place. */
+ DBUG_ASSERT(index.n_fields >= oindex.n_fields);
+ DBUG_ASSERT(index.n_fields > oindex.n_fields
+ || !not_redundant());
+#ifdef UNIV_DEBUG
+ if (index.n_fields == oindex.n_fields) {
+ ut_ad(!not_redundant());
+ for (unsigned i = index.n_fields; i--; ) {
+ ut_ad(index.fields[i].col->same_format(
+ *oindex.fields[i].col));
+ }
+ }
+#endif
+set_core_fields:
+ index.n_core_fields = oindex.n_core_fields;
+ index.n_core_null_bytes = oindex.n_core_null_bytes;
+ } else {
+add_metadata:
+ const unsigned n_old_drop = old.n_dropped();
+ unsigned n_drop = n_old_drop;
+ for (unsigned i = old.n_cols; i--; ) {
+ if (col_map[i] == ULINT_UNDEFINED) {
+ DBUG_ASSERT(i + DATA_N_SYS_COLS
+ < uint(old.n_cols));
+ n_drop++;
+ }
+ }
+
+ instant = new (mem_heap_alloc(heap, sizeof(dict_instant_t)))
+ dict_instant_t();
+ instant->n_dropped = n_drop;
+ if (n_drop) {
+ instant->dropped
+ = static_cast<dict_col_t*>(
+ mem_heap_alloc(heap, n_drop
+ * sizeof(dict_col_t)));
+ if (n_old_drop) {
+ memcpy(instant->dropped, old.instant->dropped,
+ n_old_drop * sizeof(dict_col_t));
+ }
+ } else {
+ instant->dropped = NULL;
+ }
+
+ for (unsigned i = 0, d = n_old_drop; i < old.n_cols; i++) {
+ if (col_map[i] == ULINT_UNDEFINED) {
+ (new (&instant->dropped[d++])
+ dict_col_t(old.cols[i]))->set_dropped();
+ }
+ }
+#ifndef DBUG_OFF
+ for (unsigned i = 0; i < n_drop; i++) {
+ DBUG_ASSERT(instant->dropped[i].is_dropped());
+ }
+#endif
+ const uint n_fields = index.n_fields + n_dropped();
+
+ DBUG_ASSERT(n_fields >= oindex.n_fields);
+ dict_field_t* fields = static_cast<dict_field_t*>(
+ mem_heap_zalloc(heap, n_fields * sizeof *fields));
+ uint i = 0, j = 0, n_nullable = 0;
+ ut_d(uint core_null = 0);
+ for (; i < oindex.n_fields; i++) {
+ DBUG_ASSERT(j <= i);
+ dict_field_t&f = fields[i] = oindex.fields[i];
+ if (f.col->is_dropped()) {
+ /* The column has been instantly
+ dropped earlier. */
+ DBUG_ASSERT(f.col >= old.instant->dropped);
+ {
+ size_t d = f.col
+ - old.instant->dropped;
+ DBUG_ASSERT(d < n_old_drop);
+ DBUG_ASSERT(&old.instant->dropped[d]
+ == f.col);
+ DBUG_ASSERT(!f.name);
+ f.col = instant->dropped + d;
+ }
+ if (f.col->is_nullable()) {
+found_nullable:
+ n_nullable++;
+ ut_d(core_null
+ += i < oindex.n_core_fields);
+ }
+ continue;
+ }
+
+ const ulint col_ind = col_map[f.col->ind];
+ if (col_ind != ULINT_UNDEFINED) {
+ if (index.fields[j].col->ind != col_ind) {
+ /* The fields for instantly
+ added columns must be placed
+ last in the clustered index.
+ Keep pre-existing fields in
+ the same position. */
+ uint k;
+ for (k = j + 1; k < index.n_fields;
+ k++) {
+ if (index.fields[k].col->ind
+ == col_ind) {
+ goto found_j;
+ }
+ }
+ DBUG_ASSERT(!"no such col");
+found_j:
+ std::swap(index.fields[j],
+ index.fields[k]);
+ }
+ DBUG_ASSERT(index.fields[j].col->ind
+ == col_ind);
+ fields[i] = index.fields[j++];
+ DBUG_ASSERT(!fields[i].col->is_dropped());
+ DBUG_ASSERT(fields[i].name
+ == fields[i].col->name(*this));
+ if (fields[i].col->is_nullable()) {
+ goto found_nullable;
+ }
+ continue;
+ }
+
+ /* This column is being dropped. */
+ unsigned d = n_old_drop;
+ for (unsigned c = 0; c < f.col->ind; c++) {
+ d += col_map[c] == ULINT_UNDEFINED;
+ }
+ DBUG_ASSERT(d < n_drop);
+ f.col = &instant->dropped[d];
+ f.name = NULL;
+ if (f.col->is_nullable()) {
+ goto found_nullable;
+ }
+ }
+ /* The n_core_null_bytes only matters for
+ ROW_FORMAT=COMPACT and ROW_FORMAT=DYNAMIC tables. */
+ ut_ad(UT_BITS_IN_BYTES(core_null) == oindex.n_core_null_bytes
+ || !not_redundant());
+ DBUG_ASSERT(i >= oindex.n_core_fields);
+ DBUG_ASSERT(j <= i);
+ DBUG_ASSERT(n_fields - (i - j) == index.n_fields);
+ std::sort(index.fields + j, index.fields + index.n_fields,
+ [](const dict_field_t& a, const dict_field_t& b)
+ { return a.col->ind < b.col->ind; });
+ for (; i < n_fields; i++) {
+ fields[i] = index.fields[j++];
+ n_nullable += fields[i].col->is_nullable();
+ DBUG_ASSERT(!fields[i].col->is_dropped());
+ DBUG_ASSERT(fields[i].name
+ == fields[i].col->name(*this));
+ }
+ DBUG_ASSERT(j == index.n_fields);
+ index.n_fields = index.n_def = n_fields;
+ index.fields = fields;
+ DBUG_ASSERT(n_nullable >= index.n_nullable);
+ DBUG_ASSERT(n_nullable >= oindex.n_nullable);
+ index.n_nullable = n_nullable;
+ goto set_core_fields;
+ }
+
+ DBUG_ASSERT(n_cols + n_dropped() >= old.n_cols + old.n_dropped());
+ DBUG_ASSERT(n_dropped() >= old.n_dropped());
+ DBUG_ASSERT(index.n_core_fields == oindex.n_core_fields);
+ DBUG_ASSERT(index.n_core_null_bytes == oindex.n_core_null_bytes);
+ mtr.commit();
+}
+
+/** Adjust index metadata for instant ADD/DROP/reorder COLUMN.
+@param[in] clustered index definition after instant ALTER TABLE */
+inline void dict_index_t::instant_add_field(const dict_index_t& instant)
+{
+ DBUG_ASSERT(is_primary());
+ DBUG_ASSERT(instant.is_primary());
+ DBUG_ASSERT(!has_virtual());
+ DBUG_ASSERT(!instant.has_virtual());
+ DBUG_ASSERT(instant.n_core_fields <= instant.n_fields);
+ DBUG_ASSERT(n_def == n_fields);
+ DBUG_ASSERT(instant.n_def == instant.n_fields);
+ DBUG_ASSERT(type == instant.type);
+ DBUG_ASSERT(trx_id_offset == instant.trx_id_offset);
+ DBUG_ASSERT(n_user_defined_cols == instant.n_user_defined_cols);
+ DBUG_ASSERT(n_uniq == instant.n_uniq);
+ DBUG_ASSERT(instant.n_fields >= n_fields);
+ DBUG_ASSERT(instant.n_nullable >= n_nullable);
+ /* dict_table_t::prepare_instant() initialized n_core_fields
+ to be equal. However, after that purge could have emptied the
+ table and invoked dict_index_t::clear_instant_alter(). */
+ DBUG_ASSERT(instant.n_core_fields <= n_core_fields);
+ DBUG_ASSERT(instant.n_core_null_bytes <= n_core_null_bytes);
+ DBUG_ASSERT(instant.n_core_fields == n_core_fields
+ || (!is_instant() && instant.is_instant()));
+ DBUG_ASSERT(instant.n_core_null_bytes == n_core_null_bytes
+ || (!is_instant() && instant.is_instant()));
+
+ /* instant will have all fields (including ones for columns
+ that have been or are being instantly dropped) in the same position
+ as this index. Fields for any added columns are appended at the end. */
+#ifndef DBUG_OFF
+ for (unsigned i = 0; i < n_fields; i++) {
+ DBUG_ASSERT(fields[i].same(instant.fields[i]));
+ DBUG_ASSERT(instant.fields[i].col->same_format(*fields[i]
+ .col));
+ /* Instant conversion from NULL to NOT NULL is not allowed. */
+ DBUG_ASSERT(!fields[i].col->is_nullable()
+ || instant.fields[i].col->is_nullable());
+ DBUG_ASSERT(fields[i].col->is_nullable()
+ == instant.fields[i].col->is_nullable()
+ || !table->not_redundant());
+ }
+#endif
+ n_fields = instant.n_fields;
+ n_def = instant.n_def;
+ n_nullable = instant.n_nullable;
+ fields = static_cast<dict_field_t*>(
+ mem_heap_dup(heap, instant.fields, n_fields * sizeof *fields));
+
+ ut_d(unsigned n_null = 0);
+ ut_d(unsigned n_dropped = 0);
+
+ for (unsigned i = 0; i < n_fields; i++) {
+ const dict_col_t* icol = instant.fields[i].col;
+ dict_field_t& f = fields[i];
+ ut_d(n_null += icol->is_nullable());
+ DBUG_ASSERT(!icol->is_virtual());
+ if (icol->is_dropped()) {
+ ut_d(n_dropped++);
+ f.col->set_dropped();
+ f.name = NULL;
+ } else {
+ f.col = &table->cols[icol - instant.table->cols];
+ f.name = f.col->name(*table);
+ }
+ }
+
+ ut_ad(n_null == n_nullable);
+ ut_ad(n_dropped == instant.table->n_dropped());
+}
+
+/** Adjust table metadata for instant ADD/DROP/reorder COLUMN.
+@param[in] table altered table (with dropped columns)
+@param[in] col_map mapping from cols[] and v_cols[] to table
+@return whether the metadata record must be updated */
+inline bool dict_table_t::instant_column(const dict_table_t& table,
+ const ulint* col_map)
+{
+ DBUG_ASSERT(!table.cached);
+ DBUG_ASSERT(table.n_def == table.n_cols);
+ DBUG_ASSERT(table.n_t_def == table.n_t_cols);
+ DBUG_ASSERT(n_def == n_cols);
+ DBUG_ASSERT(n_t_def == n_t_cols);
+ DBUG_ASSERT(n_v_def == n_v_cols);
+ DBUG_ASSERT(table.n_v_def == table.n_v_cols);
+ DBUG_ASSERT(table.n_cols + table.n_dropped() >= n_cols + n_dropped());
+ DBUG_ASSERT(!table.persistent_autoinc
+ || persistent_autoinc == table.persistent_autoinc);
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ {
+ const char* end = table.col_names;
+ for (unsigned i = table.n_cols; i--; ) end += strlen(end) + 1;
+
+ col_names = static_cast<char*>(
+ mem_heap_dup(heap, table.col_names,
+ ulint(end - table.col_names)));
+ }
+ const dict_col_t* const old_cols = cols;
+ cols = static_cast<dict_col_t*>(mem_heap_dup(heap, table.cols,
+ table.n_cols
+ * sizeof *cols));
+
+ /* Preserve the default values of previously instantly added
+ columns, or copy the new default values to this->heap. */
+ for (ulint i = 0; i < ulint(table.n_cols); i++) {
+ dict_col_t& c = cols[i];
+
+ if (const dict_col_t* o = find(old_cols, col_map, n_cols, i)) {
+ c.def_val = o->def_val;
+ DBUG_ASSERT(!((c.prtype ^ o->prtype)
+ & ~(DATA_NOT_NULL | DATA_VERSIONED
+ | DATA_LONG_TRUE_VARCHAR)));
+ DBUG_ASSERT(c.mtype == o->mtype);
+ DBUG_ASSERT(c.len >= o->len);
+
+ if (o->vers_sys_start()) {
+ ut_ad(o->ind == vers_start);
+ vers_start = i;
+ } else if (o->vers_sys_end()) {
+ ut_ad(o->ind == vers_end);
+ vers_end = i;
+ }
+ continue;
+ }
+
+ DBUG_ASSERT(c.is_added());
+ if (c.def_val.len <= sizeof field_ref_zero
+ && !memcmp(c.def_val.data, field_ref_zero,
+ c.def_val.len)) {
+ c.def_val.data = field_ref_zero;
+ } else if (const void*& d = c.def_val.data) {
+ d = mem_heap_dup(heap, d, c.def_val.len);
+ } else {
+ DBUG_ASSERT(c.def_val.len == UNIV_SQL_NULL);
+ }
+ }
+
+ n_t_def += table.n_cols - n_cols;
+ n_t_cols += table.n_cols - n_cols;
+ n_def = table.n_cols;
+
+ const dict_v_col_t* const old_v_cols = v_cols;
+
+ if (const char* end = table.v_col_names) {
+ for (unsigned i = table.n_v_cols; i--; ) {
+ end += strlen(end) + 1;
+ }
+
+ v_col_names = static_cast<char*>(
+ mem_heap_dup(heap, table.v_col_names,
+ ulint(end - table.v_col_names)));
+ v_cols = static_cast<dict_v_col_t*>(
+ mem_heap_dup(heap, table.v_cols,
+ table.n_v_cols * sizeof *v_cols));
+ } else {
+ ut_ad(table.n_v_cols == 0);
+ v_col_names = NULL;
+ v_cols = NULL;
+ }
+
+ n_t_def += table.n_v_cols - n_v_cols;
+ n_t_cols += table.n_v_cols - n_v_cols;
+ n_v_def = table.n_v_cols;
+
+ for (unsigned i = 0; i < n_v_def; i++) {
+ dict_v_col_t& v = v_cols[i];
+ v.v_indexes = UT_NEW_NOKEY(dict_v_idx_list());
+ v.base_col = static_cast<dict_col_t**>(
+ mem_heap_dup(heap, v.base_col,
+ v.num_base * sizeof *v.base_col));
+
+ for (ulint n = v.num_base; n--; ) {
+ dict_col_t*& base = v.base_col[n];
+ if (base->is_virtual()) {
+ } else if (base >= table.cols
+ && base < table.cols + table.n_cols) {
+ /* The base column was instantly added. */
+ size_t c = base - table.cols;
+ DBUG_ASSERT(base == &table.cols[c]);
+ base = &cols[c];
+ } else {
+ DBUG_ASSERT(base >= old_cols);
+ size_t c = base - old_cols;
+ DBUG_ASSERT(c + DATA_N_SYS_COLS < n_cols);
+ DBUG_ASSERT(base == &old_cols[c]);
+ DBUG_ASSERT(col_map[c] + DATA_N_SYS_COLS
+ < n_cols);
+ base = &cols[col_map[c]];
+ }
+ }
+ }
+
+ dict_index_t* index = dict_table_get_first_index(this);
+ bool metadata_changed;
+ {
+ const dict_index_t& i = *dict_table_get_first_index(&table);
+ metadata_changed = i.n_fields > index->n_fields;
+ ut_ad(i.n_fields >= index->n_fields);
+ index->instant_add_field(i);
+ }
+
+ if (instant || table.instant) {
+ const auto old_instant = instant;
+ /* FIXME: add instant->heap, and transfer ownership here */
+ if (!instant) {
+ instant = new (mem_heap_zalloc(heap, sizeof *instant))
+ dict_instant_t();
+ goto dup_dropped;
+ } else if (n_dropped() < table.n_dropped()) {
+dup_dropped:
+ instant->dropped = static_cast<dict_col_t*>(
+ mem_heap_dup(heap, table.instant->dropped,
+ table.instant->n_dropped
+ * sizeof *instant->dropped));
+ instant->n_dropped = table.instant->n_dropped;
+ } else if (table.instant->n_dropped) {
+ memcpy(instant->dropped, table.instant->dropped,
+ table.instant->n_dropped
+ * sizeof *instant->dropped);
+ }
+
+ const field_map_element_t* field_map = old_instant
+ ? old_instant->field_map : NULL;
+
+ init_instant<true>(table);
+
+ if (!metadata_changed) {
+ metadata_changed = !field_map
+ || memcmp(field_map,
+ instant->field_map,
+ (index->n_fields
+ - index->first_user_field())
+ * sizeof *field_map);
+ }
+ }
+
+ while ((index = dict_table_get_next_index(index)) != NULL) {
+ if (index->to_be_dropped) {
+ continue;
+ }
+ for (unsigned i = 0; i < index->n_fields; i++) {
+ dict_field_t& f = index->fields[i];
+ if (f.col >= table.cols
+ && f.col < table.cols + table.n_cols) {
+ /* This is an instantly added column
+ in a newly added index. */
+ DBUG_ASSERT(!f.col->is_virtual());
+ size_t c = f.col - table.cols;
+ DBUG_ASSERT(f.col == &table.cols[c]);
+ f.col = &cols[c];
+ } else if (f.col >= &table.v_cols->m_col
+ && f.col < &table.v_cols[n_v_cols].m_col) {
+ /* This is an instantly added virtual column
+ in a newly added index. */
+ DBUG_ASSERT(f.col->is_virtual());
+ size_t c = reinterpret_cast<dict_v_col_t*>(
+ f.col) - table.v_cols;
+ DBUG_ASSERT(f.col == &table.v_cols[c].m_col);
+ f.col = &v_cols[c].m_col;
+ } else if (f.col < old_cols
+ || f.col >= old_cols + n_cols) {
+ DBUG_ASSERT(f.col->is_virtual());
+ f.col = &v_cols[col_map[
+ reinterpret_cast<dict_v_col_t*>(
+ f.col)
+ - old_v_cols + n_cols]].m_col;
+ } else {
+ f.col = &cols[col_map[f.col - old_cols]];
+ DBUG_ASSERT(!f.col->is_virtual());
+ }
+ f.name = f.col->name(*this);
+ if (f.col->is_virtual()) {
+ reinterpret_cast<dict_v_col_t*>(f.col)
+ ->v_indexes->push_back(
+ dict_v_idx_t(index, i));
+ }
+ }
+ }
+
+ n_cols = table.n_cols;
+ n_v_cols = table.n_v_cols;
+ return metadata_changed;
+}
+
+/** Find the old column number for the given new column position.
+@param[in] col_map column map from old column to new column
+@param[in] pos new column position
+@param[in] n number of columns present in the column map
+@return old column position for the given new column position. */
+static ulint find_old_col_no(const ulint* col_map, ulint pos, ulint n)
+{
+ do {
+ ut_ad(n);
+ } while (col_map[--n] != pos);
+ return n;
+}
+
+/** Roll back instant_column().
+@param[in] old_n_cols original n_cols
+@param[in] old_cols original cols
+@param[in] old_col_names original col_names
+@param[in] old_instant original instant structure
+@param[in] old_fields original fields
+@param[in] old_n_fields original number of fields
+@param[in] old_n_core_fields original number of core fields
+@param[in] old_n_v_cols original n_v_cols
+@param[in] old_v_cols original v_cols
+@param[in] old_v_col_names original v_col_names
+@param[in] col_map column map */
+inline void dict_table_t::rollback_instant(
+ unsigned old_n_cols,
+ dict_col_t* old_cols,
+ const char* old_col_names,
+ dict_instant_t* old_instant,
+ dict_field_t* old_fields,
+ unsigned old_n_fields,
+ unsigned old_n_core_fields,
+ unsigned old_n_v_cols,
+ dict_v_col_t* old_v_cols,
+ const char* old_v_col_names,
+ const ulint* col_map)
+{
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
+ dict_index_t* index = indexes.start;
+ mtr_t mtr;
+ mtr.start();
+ /* Prevent concurrent execution of dict_index_t::clear_instant_alter()
+ by acquiring a latch on the leftmost leaf page. */
+ instant_metadata_lock(*index, mtr);
+ /* index->is_instant() does not necessarily hold here, because
+ the table may have been emptied */
+ DBUG_ASSERT(old_n_cols >= DATA_N_SYS_COLS);
+ DBUG_ASSERT(n_cols == n_def);
+ DBUG_ASSERT(index->n_def == index->n_fields);
+ DBUG_ASSERT(index->n_core_fields <= index->n_fields);
+ DBUG_ASSERT(old_n_core_fields <= old_n_fields);
+ DBUG_ASSERT(instant || !old_instant);
+
+ instant = old_instant;
+
+ index->n_nullable = 0;
+
+ for (unsigned i = old_n_fields; i--; ) {
+ if (old_fields[i].col->is_nullable()) {
+ index->n_nullable++;
+ }
+ }
+
+ for (unsigned i = n_v_cols; i--; ) {
+ UT_DELETE(v_cols[i].v_indexes);
+ }
+
+ index->n_core_fields = (index->n_fields == index->n_core_fields)
+ ? old_n_fields
+ : old_n_core_fields;
+ index->n_def = index->n_fields = old_n_fields;
+ index->n_core_null_bytes = UT_BITS_IN_BYTES(
+ index->get_n_nullable(index->n_core_fields));
+
+ const dict_col_t* const new_cols = cols;
+ const dict_col_t* const new_cols_end __attribute__((unused)) = cols + n_cols;
+ const dict_v_col_t* const new_v_cols = v_cols;
+ const dict_v_col_t* const new_v_cols_end __attribute__((unused))= v_cols + n_v_cols;
+
+ cols = old_cols;
+ col_names = old_col_names;
+ v_cols = old_v_cols;
+ v_col_names = old_v_col_names;
+ n_def = n_cols = old_n_cols;
+ n_v_def = n_v_cols = old_n_v_cols;
+ n_t_def = n_t_cols = n_cols + n_v_cols;
+
+ if (versioned()) {
+ for (unsigned i = 0; i < n_cols; ++i) {
+ if (cols[i].vers_sys_start()) {
+ vers_start = i;
+ } else if (cols[i].vers_sys_end()) {
+ vers_end = i;
+ }
+ }
+ }
+
+ index->fields = old_fields;
+ mtr.commit();
+
+ while ((index = dict_table_get_next_index(index)) != NULL) {
+ if (index->to_be_dropped) {
+ /* instant_column() did not adjust these indexes. */
+ continue;
+ }
+
+ for (unsigned i = 0; i < index->n_fields; i++) {
+ dict_field_t& f = index->fields[i];
+ if (f.col->is_virtual()) {
+ DBUG_ASSERT(f.col >= &new_v_cols->m_col);
+ DBUG_ASSERT(f.col < &new_v_cols_end->m_col);
+ size_t n = size_t(
+ reinterpret_cast<dict_v_col_t*>(f.col)
+ - new_v_cols);
+ DBUG_ASSERT(n <= n_v_cols);
+
+ ulint old_col_no = find_old_col_no(
+ col_map + n_cols, n, n_v_cols);
+ DBUG_ASSERT(old_col_no <= n_v_cols);
+ f.col = &v_cols[old_col_no].m_col;
+ DBUG_ASSERT(f.col->is_virtual());
+ } else {
+ DBUG_ASSERT(f.col >= new_cols);
+ DBUG_ASSERT(f.col < new_cols_end);
+ size_t n = size_t(f.col - new_cols);
+ DBUG_ASSERT(n <= n_cols);
+
+ ulint old_col_no = find_old_col_no(col_map,
+ n, n_cols);
+ DBUG_ASSERT(old_col_no < n_cols);
+ f.col = &cols[old_col_no];
+ DBUG_ASSERT(!f.col->is_virtual());
+ }
+ f.name = f.col->name(*this);
+ }
+ }
+}
+
struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
{
/** Dummy query graph */
@@ -150,10 +864,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
dict_index_t** drop_index;
/** number of InnoDB indexes being dropped */
const ulint num_to_drop_index;
- /** InnoDB indexes being renamed */
- dict_index_t** rename;
- /** number of InnoDB indexes being renamed */
- const ulint num_to_rename;
/** InnoDB foreign key constraints being dropped */
dict_foreign_t** drop_fk;
/** number of InnoDB foreign key constraints being dropped */
@@ -172,7 +882,7 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
dict_table_t* old_table;
/** table where the indexes are being created or dropped */
dict_table_t* new_table;
- /** table definition for instant ADD COLUMN */
+ /** table definition for instant ADD/DROP/reorder COLUMN */
dict_table_t* instant_table;
/** mapping of old column numbers to new ones, or NULL */
const ulint* col_map;
@@ -206,7 +916,22 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
dict_col_t* const old_cols;
/** original column names of the table */
const char* const old_col_names;
-
+ /** original instantly dropped or reordered columns */
+ dict_instant_t* const old_instant;
+ /** original index fields */
+ dict_field_t* const old_fields;
+ /** size of old_fields */
+ const unsigned old_n_fields;
+ /** original old_table->n_core_fields */
+ const unsigned old_n_core_fields;
+ /** original number of virtual columns in the table */
+ const unsigned old_n_v_cols;
+ /** original virtual columns of the table */
+ dict_v_col_t* const old_v_cols;
+ /** original virtual column names of the table */
+ const char* const old_v_col_names;
+ /** 0, or 1 + first column whose position changes in instant ALTER */
+ unsigned first_alter_pos;
/** Allow non-null conversion.
(1) Alter ignore should allow the conversion
irrespective of sql mode.
@@ -220,8 +945,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
ha_innobase_inplace_ctx(row_prebuilt_t*& prebuilt_arg,
dict_index_t** drop_arg,
ulint num_to_drop_arg,
- dict_index_t** rename_arg,
- ulint num_to_rename_arg,
dict_foreign_t** drop_fk_arg,
ulint num_to_drop_fk_arg,
dict_foreign_t** add_fk_arg,
@@ -240,7 +963,6 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
prebuilt (prebuilt_arg),
add_index (0), add_key_numbers (0), num_to_add_index (0),
drop_index (drop_arg), num_to_drop_index (num_to_drop_arg),
- rename (rename_arg), num_to_rename (num_to_rename_arg),
drop_fk (drop_fk_arg), num_to_drop_fk (num_to_drop_fk_arg),
add_fk (add_fk_arg), num_to_add_fk (num_to_add_fk_arg),
online (online_arg), heap (heap_arg), trx (0),
@@ -263,6 +985,15 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
old_n_cols(prebuilt_arg->table->n_cols),
old_cols(prebuilt_arg->table->cols),
old_col_names(prebuilt_arg->table->col_names),
+ old_instant(prebuilt_arg->table->instant),
+ old_fields(prebuilt_arg->table->indexes.start->fields),
+ old_n_fields(prebuilt_arg->table->indexes.start->n_fields),
+ old_n_core_fields(prebuilt_arg->table->indexes.start
+ ->n_core_fields),
+ old_n_v_cols(prebuilt_arg->table->n_v_cols),
+ old_v_cols(prebuilt_arg->table->v_cols),
+ old_v_col_names(prebuilt_arg->table->v_col_names),
+ first_alter_pos(0),
allow_not_null(allow_not_null_flag),
page_compression_level(page_compressed
? (page_compression_level_arg
@@ -295,6 +1026,9 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
rw_lock_free(&index->lock);
dict_mem_index_free(index);
}
+ for (unsigned i = old_n_v_cols; i--; ) {
+ UT_DELETE(old_v_cols[i].v_indexes);
+ }
dict_mem_table_free(instant_table);
}
mem_heap_free(heap);
@@ -319,14 +1053,24 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
{
DBUG_ASSERT(need_rebuild());
DBUG_ASSERT(!is_instant());
- DBUG_ASSERT(old_table->n_cols == old_table->n_def);
- DBUG_ASSERT(new_table->n_cols == new_table->n_def);
DBUG_ASSERT(old_table->n_cols == old_n_cols);
- DBUG_ASSERT(new_table->n_cols > old_table->n_cols);
- instant_table = new_table;
+ instant_table = new_table;
new_table = old_table;
export_vars.innodb_instant_alter_column++;
+
+ instant_table->prepare_instant(*old_table, col_map,
+ first_alter_pos);
+ }
+
+ /** Adjust table metadata for instant ADD/DROP/reorder COLUMN.
+ @return whether the metadata record must be updated */
+ bool instant_column()
+ {
+ DBUG_ASSERT(is_instant());
+ DBUG_ASSERT(old_n_fields
+ == old_table->indexes.start->n_fields);
+ return old_table->instant_column(*instant_table, col_map);
}
/** Revert prepare_instant() if the transaction is rolled back. */
@@ -334,7 +1078,13 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
{
if (!is_instant()) return;
old_table->rollback_instant(old_n_cols,
- old_cols, old_col_names);
+ old_cols, old_col_names,
+ old_instant,
+ old_fields, old_n_fields,
+ old_n_core_fields,
+ old_n_v_cols, old_v_cols,
+ old_v_col_names,
+ col_map);
}
/** @return whether this is instant ALTER TABLE */
@@ -344,6 +1094,40 @@ struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
return instant_table;
}
+ /** Create an index table where indexes are ordered as follows:
+
+ IF a new primary key is defined for the table THEN
+
+ 1) New primary key
+ 2) The remaining keys in key_info
+
+ ELSE
+
+ 1) All new indexes in the order they arrive from MySQL
+
+ ENDIF
+
+ @return key definitions */
+ MY_ATTRIBUTE((nonnull, warn_unused_result, malloc))
+ inline index_def_t*
+ create_key_defs(
+ const Alter_inplace_info* ha_alter_info,
+ /*!< in: alter operation */
+ const TABLE* altered_table,
+ /*!< in: MySQL table that is being altered */
+ ulint& n_fts_add,
+ /*!< out: number of FTS indexes to be created */
+ ulint& fts_doc_id_col,
+ /*!< in: The column number for Doc ID */
+ bool& add_fts_doc_id,
+ /*!< in: whether we need to add new DOC ID
+ column for FTS index */
+ bool& add_fts_doc_idx,
+ /*!< in: whether we need to add new DOC ID
+ index for FTS index */
+ const TABLE* table);
+ /*!< in: MySQL table that is being altered */
+
private:
// Disable copying
ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&);
@@ -660,20 +1444,237 @@ check_v_col_in_order(
}
/** Determine if an instant operation is possible for altering columns.
+@param[in] ib_table InnoDB table definition
@param[in] ha_alter_info the ALTER TABLE operation
-@param[in] table table definition before ALTER TABLE */
+@param[in] table table definition before ALTER TABLE
+@param[in] altered_table table definition after ALTER TABLE
+@param[in] strict whether to ensure that user records fit */
static
bool
instant_alter_column_possible(
+ const dict_table_t& ib_table,
const Alter_inplace_info* ha_alter_info,
- const TABLE* table)
+ const TABLE* table,
+ const TABLE* altered_table,
+ bool strict)
{
+ const dict_index_t* const pk = ib_table.indexes.start;
+ ut_ad(pk->is_primary());
+ ut_ad(!pk->has_virtual());
+
+ if (ha_alter_info->handler_flags
+ & (ALTER_STORED_COLUMN_ORDER | ALTER_DROP_STORED_COLUMN
+ | ALTER_ADD_STORED_BASE_COLUMN)) {
+#if 1 // MDEV-17459: adjust fts_fetch_doc_from_rec() and friends; remove this
+ if (ib_table.fts || innobase_fulltext_exist(altered_table))
+ return false;
+#endif
+#if 1 // MDEV-17468: fix bugs with indexed virtual columns & remove this
+ for (const dict_index_t* index = ib_table.indexes.start;
+ index; index = index->indexes.next) {
+ if (index->has_virtual()) {
+ ut_ad(ib_table.n_v_cols);
+ return false;
+ }
+ }
+#endif
+ uint n_add = 0, n_nullable = 0, lenlen = 0;
+ const uint blob_prefix = dict_table_has_atomic_blobs(&ib_table)
+ ? 0
+ : REC_ANTELOPE_MAX_INDEX_COL_LEN;
+ const uint min_local_len = blob_prefix
+ ? blob_prefix + FIELD_REF_SIZE
+ : 2 * FIELD_REF_SIZE;
+ size_t min_size = 0, max_size = 0;
+ Field** af = altered_table->field;
+ Field** const end = altered_table->field
+ + altered_table->s->fields;
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
+
+ for (; af < end; af++) {
+ const Create_field* cf = cf_it++;
+ if (!(*af)->stored_in_db() || cf->field) {
+ /* Virtual or pre-existing column */
+ continue;
+ }
+ const bool nullable = (*af)->real_maybe_null();
+ const bool is_null = (*af)->is_real_null();
+ ut_ad(!is_null || nullable);
+ n_nullable += nullable;
+ n_add++;
+ uint l;
+ switch ((*af)->type()) {
+ case MYSQL_TYPE_VARCHAR:
+ l = reinterpret_cast<const Field_varstring*>
+ (*af)->get_length();
+ variable_length:
+ if (l >= min_local_len) {
+ max_size += blob_prefix
+ + FIELD_REF_SIZE;
+ if (!is_null) {
+ min_size += blob_prefix
+ + FIELD_REF_SIZE;
+ }
+ lenlen += 2;
+ } else {
+ if (!is_null) {
+ min_size += l;
+ }
+ l = (*af)->pack_length();
+ max_size += l;
+ lenlen += l > 255 ? 2 : 1;
+ }
+ break;
+ case MYSQL_TYPE_GEOMETRY:
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ l = reinterpret_cast<const Field_blob*>
+ ((*af))->get_length();
+ goto variable_length;
+ default:
+ l = (*af)->pack_length();
+ if (l > 255 && ib_table.not_redundant()) {
+ goto variable_length;
+ }
+ max_size += l;
+ if (!is_null) {
+ min_size += l;
+ }
+ }
+ }
+
+ ulint n_fields = pk->n_fields + n_add;
+
+ if (n_fields >= REC_MAX_N_USER_FIELDS + DATA_N_SYS_COLS) {
+ return false;
+ }
+
+ if (pk->is_gen_clust()) {
+ min_size += DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN
+ + DATA_ROW_ID_LEN;
+ max_size += DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN
+ + DATA_ROW_ID_LEN;
+ } else {
+ min_size += DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
+ max_size += DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
+ }
+
+ uint i = pk->n_fields;
+ while (i-- > pk->n_core_fields) {
+ const dict_field_t& f = pk->fields[i];
+ if (f.col->is_nullable()) {
+ n_nullable++;
+ if (!f.col->is_dropped()
+ && f.col->def_val.data) {
+ goto instantly_added_column;
+ }
+ } else if (f.fixed_len
+ && (f.fixed_len <= 255
+ || !ib_table.not_redundant())) {
+ if (ib_table.not_redundant()
+ || !f.col->is_dropped()) {
+ min_size += f.fixed_len;
+ max_size += f.fixed_len;
+ }
+ } else if (f.col->is_dropped() || !f.col->is_added()) {
+ lenlen++;
+ goto set_max_size;
+ } else {
+instantly_added_column:
+ ut_ad(f.col->is_added());
+ if (f.col->def_val.len >= min_local_len) {
+ min_size += blob_prefix
+ + FIELD_REF_SIZE;
+ lenlen += 2;
+ } else {
+ min_size += f.col->def_val.len;
+ lenlen += f.col->def_val.len
+ > 255 ? 2 : 1;
+ }
+set_max_size:
+ if (f.fixed_len
+ && (f.fixed_len <= 255
+ || !ib_table.not_redundant())) {
+ max_size += f.fixed_len;
+ } else if (f.col->len >= min_local_len) {
+ max_size += blob_prefix
+ + FIELD_REF_SIZE;
+ } else {
+ max_size += f.col->len;
+ }
+ }
+ }
+
+ do {
+ const dict_field_t& f = pk->fields[i];
+ if (f.col->is_nullable()) {
+ n_nullable++;
+ } else if (f.fixed_len) {
+ min_size += f.fixed_len;
+ } else {
+ lenlen++;
+ }
+ } while (i--);
+
+ if (ib_table.instant
+ || (ha_alter_info->handler_flags
+ & (ALTER_STORED_COLUMN_ORDER
+ | ALTER_DROP_STORED_COLUMN))) {
+ n_fields++;
+ lenlen += 2;
+ min_size += FIELD_REF_SIZE;
+ }
+
+ if (ib_table.not_redundant()) {
+ min_size += REC_N_NEW_EXTRA_BYTES
+ + UT_BITS_IN_BYTES(n_nullable)
+ + lenlen;
+ } else {
+ min_size += (n_fields > 255 || min_size > 255)
+ ? n_fields * 2 : n_fields;
+ min_size += REC_N_OLD_EXTRA_BYTES;
+ }
+
+ if (page_zip_rec_needs_ext(min_size, ib_table.not_redundant(),
+ 0, 0)) {
+ return false;
+ }
+
+ if (strict && page_zip_rec_needs_ext(max_size,
+ ib_table.not_redundant(),
+ 0, 0)) {
+ return false;
+ }
+ }
// Making table system-versioned instantly is not implemented yet.
if (ha_alter_info->handler_flags & ALTER_ADD_SYSTEM_VERSIONING) {
return false;
}
- if (~ha_alter_info->handler_flags & ALTER_ADD_STORED_BASE_COLUMN) {
+ static constexpr alter_table_operations avoid_rebuild
+ = ALTER_ADD_STORED_BASE_COLUMN
+ | ALTER_DROP_STORED_COLUMN
+ | ALTER_STORED_COLUMN_ORDER
+ | ALTER_COLUMN_NULLABLE;
+
+ if (!(ha_alter_info->handler_flags & avoid_rebuild)) {
+ alter_table_operations flags = ha_alter_info->handler_flags
+ & ~avoid_rebuild;
+ /* None of the flags are set that we can handle
+ specially to avoid rebuild. In this case, we can
+ allow ALGORITHM=INSTANT, except if some requested
+ operation requires that the table be rebuilt. */
+ if (flags & INNOBASE_ALTER_REBUILD) {
+ return false;
+ }
+ if ((flags & ALTER_OPTIONS)
+ && alter_options_need_rebuild(ha_alter_info, table)) {
+ return false;
+ }
+ } else if (!ib_table.supports_instant()) {
return false;
}
@@ -696,12 +1697,59 @@ instant_alter_column_possible(
columns. */
if (ha_alter_info->handler_flags
& ((INNOBASE_ALTER_REBUILD | INNOBASE_ONLINE_CREATE)
- & ~ALTER_ADD_STORED_BASE_COLUMN & ~ALTER_OPTIONS)) {
+ & ~ALTER_DROP_STORED_COLUMN
+ & ~ALTER_STORED_COLUMN_ORDER
+ & ~ALTER_ADD_STORED_BASE_COLUMN
+ & ~ALTER_COLUMN_NULLABLE
+ & ~ALTER_OPTIONS)) {
+ return false;
+ }
+
+ if ((ha_alter_info->handler_flags & ALTER_OPTIONS)
+ && alter_options_need_rebuild(ha_alter_info, table)) {
return false;
}
- return !(ha_alter_info->handler_flags & ALTER_OPTIONS)
- || !alter_options_need_rebuild(ha_alter_info, table);
+ if (ha_alter_info->handler_flags & ALTER_COLUMN_NULLABLE) {
+ if (ib_table.not_redundant()) {
+ /* Instantaneous removal of NOT NULL is
+ only supported for ROW_FORMAT=REDUNDANT. */
+ return false;
+ }
+ if (ib_table.fts_doc_id_index
+ && !innobase_fulltext_exist(altered_table)) {
+ /* Removing hidden FTS_DOC_ID_INDEX(FTS_DOC_ID)
+ requires that the table be rebuilt. */
+ return false;
+ }
+
+ Field** af = altered_table->field;
+ Field** const end = altered_table->field
+ + altered_table->s->fields;
+ for (unsigned c = 0; af < end; af++) {
+ if (!(*af)->stored_in_db()) {
+ continue;
+ }
+
+ const dict_col_t* col = dict_table_get_nth_col(
+ &ib_table, c++);
+
+ if (!col->ord_part || col->is_nullable()
+ || !(*af)->real_maybe_null()) {
+ continue;
+ }
+
+ /* The column would be changed from NOT NULL.
+ Ensure that it is not a clustered index key. */
+ for (auto i = pk->n_uniq; i--; ) {
+ if (pk->fields[i].col == col) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
}
/** Check whether the non-const default value for the field
@@ -1023,46 +2071,10 @@ ha_innobase::check_if_supported_inplace_alter(
DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
}
- bool add_drop_v_cols = false;
-
- /* If there is add or drop virtual columns, we will support operations
- with these 2 options alone with inplace interface for now */
-
- if (ha_alter_info->handler_flags
- & (ALTER_ADD_VIRTUAL_COLUMN
- | ALTER_DROP_VIRTUAL_COLUMN
- | ALTER_VIRTUAL_COLUMN_ORDER)) {
- ulonglong flags = ha_alter_info->handler_flags;
-
- /* TODO: uncomment the flags below, once we start to
- support them */
-
- flags &= ~(ALTER_ADD_VIRTUAL_COLUMN
- | ALTER_DROP_VIRTUAL_COLUMN
- | ALTER_VIRTUAL_COLUMN_ORDER
- | ALTER_VIRTUAL_GCOL_EXPR
- | ALTER_COLUMN_VCOL
- /*
- | ALTER_ADD_STORED_BASE_COLUMN
- | ALTER_DROP_STORED_COLUMN
- | ALTER_STORED_COLUMN_ORDER
- | ALTER_ADD_UNIQUE_INDEX
- */
- | ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX
- | ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX);
-
- if (flags != 0
- || IF_PARTITIONING((altered_table->s->partition_info_str
- && altered_table->s->partition_info_str_len), 0)
- || (!check_v_col_in_order(
- this->table, altered_table, ha_alter_info))) {
- ha_alter_info->unsupported_reason =
- MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN;
- DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
- }
-
- add_drop_v_cols = true;
- }
+ const bool add_drop_v_cols = !!(ha_alter_info->handler_flags
+ & (ALTER_ADD_VIRTUAL_COLUMN
+ | ALTER_DROP_VIRTUAL_COLUMN
+ | ALTER_VIRTUAL_COLUMN_ORDER));
/* We should be able to do the operation in-place.
See if we can do it online (LOCK=NONE) or without rebuild. */
@@ -1232,8 +2244,9 @@ ha_innobase::check_if_supported_inplace_alter(
constant DEFAULT expression. */
cf_it.rewind();
Field **af = altered_table->field;
- bool add_column_not_last = false;
- uint n_stored_cols = 0, n_add_cols = 0;
+ bool fts_need_rebuild = false;
+ need_rebuild = need_rebuild
+ || innobase_need_rebuild(ha_alter_info, table);
while (Create_field* cf = cf_it++) {
DBUG_ASSERT(cf->field
@@ -1282,43 +2295,73 @@ ha_innobase::check_if_supported_inplace_alter(
ha_alter_info->unsupported_reason = my_get_err_msg(
ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL);
- } else if (!is_non_const_value(*af)) {
-
- n_add_cols++;
-
- if (af < &altered_table->field[table_share->fields]) {
- add_column_not_last = true;
- }
-
- if (set_default_value(*af)) {
- goto next_column;
+ } else if (!is_non_const_value(*af)
+ && set_default_value(*af)) {
+ if (fulltext_indexes > 1
+ && !my_strcasecmp(system_charset_info,
+ (*af)->field_name.str,
+ FTS_DOC_ID_COL_NAME)) {
+ /* If a hidden FTS_DOC_ID column exists
+ (because of FULLTEXT INDEX), it cannot
+ be replaced with a user-created one
+ except when using ALGORITHM=COPY. */
+ ha_alter_info->unsupported_reason =
+ my_get_err_msg(ER_INNODB_FT_LIMIT);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
}
+ goto next_column;
}
DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
next_column:
- n_stored_cols += (*af++)->stored_in_db();
+ af++;
}
- if (!add_column_not_last
- && uint(m_prebuilt->table->n_cols) - DATA_N_SYS_COLS + n_add_cols
- == n_stored_cols
- && m_prebuilt->table->supports_instant()
- && instant_alter_column_possible(ha_alter_info, table)) {
+ const bool supports_instant = instant_alter_column_possible(
+ *m_prebuilt->table, ha_alter_info, table, altered_table,
+ trx_is_strict(m_prebuilt->trx));
+ if (add_drop_v_cols) {
+ ulonglong flags = ha_alter_info->handler_flags;
- DBUG_RETURN(HA_ALTER_INPLACE_INSTANT);
+ /* TODO: uncomment the flags below, once we start to
+ support them */
+
+ flags &= ~(ALTER_ADD_VIRTUAL_COLUMN
+ | ALTER_DROP_VIRTUAL_COLUMN
+ | ALTER_VIRTUAL_COLUMN_ORDER
+ | ALTER_VIRTUAL_GCOL_EXPR
+ | ALTER_COLUMN_VCOL
+ /*
+ | ALTER_ADD_STORED_BASE_COLUMN
+ | ALTER_DROP_STORED_COLUMN
+ | ALTER_STORED_COLUMN_ORDER
+ | ALTER_ADD_UNIQUE_INDEX
+ */
+ | ALTER_ADD_NON_UNIQUE_NON_PRIM_INDEX
+ | ALTER_DROP_NON_UNIQUE_NON_PRIM_INDEX);
+ if (supports_instant) {
+ flags &= ~(ALTER_DROP_STORED_COLUMN
+#if 0 /* MDEV-17468: remove check_v_col_in_order() and fix the code */
+ | ALTER_ADD_STORED_BASE_COLUMN
+#endif
+ | ALTER_STORED_COLUMN_ORDER);
+ }
+ if (flags != 0
+ || IF_PARTITIONING((altered_table->s->partition_info_str
+ && altered_table->s->partition_info_str_len), 0)
+ || (!check_v_col_in_order(
+ this->table, altered_table, ha_alter_info))) {
+ ha_alter_info->unsupported_reason =
+ MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN;
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
}
- if (!(ha_alter_info->handler_flags & ~(INNOBASE_ALTER_INSTANT
- | INNOBASE_INPLACE_IGNORE))) {
+ if (supports_instant) {
DBUG_RETURN(HA_ALTER_INPLACE_INSTANT);
}
- bool fts_need_rebuild = false;
- need_rebuild = need_rebuild
- || innobase_need_rebuild(ha_alter_info, table);
-
if (need_rebuild
&& (fulltext_indexes
|| innobase_spatial_exist(altered_table)
@@ -2287,9 +3330,9 @@ innobase_row_to_mysql(
}
}
if (table->vfield) {
- my_bitmap_map* old_vcol_set = tmp_use_all_columns(table, table->vcol_set);
+ my_bitmap_map* old_read_set = tmp_use_all_columns(table, table->read_set);
table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ);
- tmp_restore_column_map(table->vcol_set, old_vcol_set);
+ tmp_restore_column_map(table->read_set, old_read_set);
}
}
@@ -2364,7 +3407,6 @@ innobase_check_index_keys(
}
}
-
my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
key.name.str);
return(ER_WRONG_NAME_FOR_INDEX);
@@ -2717,8 +3759,7 @@ innobase_fts_check_doc_id_index_in_def(
return(FTS_NOT_EXIST_DOC_ID_INDEX);
}
-/*******************************************************************//**
-Create an index table where indexes are ordered as follows:
+/** Create an index table where indexes are ordered as follows:
IF a new primary key is defined for the table THEN
@@ -2732,23 +3773,15 @@ ELSE
ENDIF
@return key definitions */
-static MY_ATTRIBUTE((nonnull, warn_unused_result, malloc))
-index_def_t*
-innobase_create_key_defs(
-/*=====================*/
- mem_heap_t* heap,
- /*!< in/out: memory heap where space for key
- definitions are allocated */
+MY_ATTRIBUTE((nonnull, warn_unused_result, malloc))
+inline index_def_t*
+ha_innobase_inplace_ctx::create_key_defs(
const Alter_inplace_info* ha_alter_info,
/*!< in: alter operation */
const TABLE* altered_table,
/*!< in: MySQL table that is being altered */
- ulint& n_add,
- /*!< in/out: number of indexes to be created */
ulint& n_fts_add,
/*!< out: number of FTS indexes to be created */
- bool got_default_clust,
- /*!< in: whether the table lacks a primary key */
ulint& fts_doc_id_col,
/*!< in: The column number for Doc ID */
bool& add_fts_doc_id,
@@ -2760,6 +3793,9 @@ innobase_create_key_defs(
const TABLE* table)
/*!< in: MySQL table that is being altered */
{
+ ulint& n_add = num_to_add_index;
+ const bool got_default_clust = new_table->indexes.start->is_gen_clust();
+
index_def_t* indexdef;
index_def_t* indexdefs;
bool new_primary;
@@ -2768,7 +3804,7 @@ innobase_create_key_defs(
const KEY*const key_info
= ha_alter_info->key_info_buffer;
- DBUG_ENTER("innobase_create_key_defs");
+ DBUG_ENTER("ha_innobase_inplace_ctx::create_key_defs");
DBUG_ASSERT(!add_fts_doc_id || add_fts_doc_idx);
DBUG_ASSERT(ha_alter_info->index_add_count == n_add);
@@ -3203,7 +4239,7 @@ innobase_check_foreigns(
@param[in,out] heap Memory heap where allocated
@param[out] dfield InnoDB data field to copy to
@param[in] field MySQL value for the column
-@param[in] old_field Old field or NULL if new col is added
+@param[in] old_field Old column if altering; NULL for ADD COLUMN
@param[in] comp nonzero if in compact format. */
static void innobase_build_col_map_add(
mem_heap_t* heap,
@@ -3222,14 +4258,13 @@ static void innobase_build_col_map_add(
return;
}
- ulint size = field->pack_length();
+ const Field& from = old_field ? *old_field : *field;
+ ulint size = from.pack_length();
byte* buf = static_cast<byte*>(mem_heap_alloc(heap, size));
- const byte* mysql_data = old_field ? old_field->ptr : field->ptr;
-
row_mysql_store_col_in_innobase_format(
- dfield, buf, true, mysql_data, size, comp);
+ dfield, buf, true, from.ptr, size, comp);
}
/** Construct the translation table for reordering, dropping or
@@ -3251,7 +4286,7 @@ innobase_build_col_map(
Alter_inplace_info* ha_alter_info,
const TABLE* altered_table,
const TABLE* table,
- const dict_table_t* new_table,
+ dict_table_t* new_table,
const dict_table_t* old_table,
dtuple_t* defaults,
mem_heap_t* heap)
@@ -3336,16 +4371,25 @@ innobase_build_col_map(
}
col_map[old_i - num_old_v] = i;
+ if (old_table->versioned()
+ && altered_table->versioned()) {
+ if (old_i == old_table->vers_start) {
+ new_table->vers_start = i + num_v;
+ } else if (old_i == old_table->vers_end) {
+ new_table->vers_end = i + num_v;
+ }
+ }
goto found_col;
}
}
- ut_ad(!is_v);
- innobase_build_col_map_add(
- heap, dtuple_get_nth_field(defaults, i),
- altered_table->field[i + num_v],
- NULL,
- dict_table_is_comp(new_table));
+ if (!is_v) {
+ innobase_build_col_map_add(
+ heap, dtuple_get_nth_field(defaults, i),
+ altered_table->field[i + num_v],
+ NULL,
+ dict_table_is_comp(new_table));
+ }
found_col:
if (is_v) {
num_v++;
@@ -3811,13 +4855,12 @@ prepare_inplace_add_virtual(
ha_innobase_inplace_ctx* ctx;
ulint i = 0;
ulint j = 0;
- const Create_field* new_field;
ctx = static_cast<ha_innobase_inplace_ctx*>
(ha_alter_info->handler_ctx);
- ctx->num_to_add_vcol = altered_table->s->fields
- + ctx->num_to_drop_vcol - table->s->fields;
+ ctx->num_to_add_vcol = altered_table->s->virtual_fields
+ + ctx->num_to_drop_vcol - table->s->virtual_fields;
ctx->add_vcol = static_cast<dict_v_col_t*>(
mem_heap_zalloc(ctx->heap, ctx->num_to_add_vcol
@@ -3829,43 +4872,21 @@ prepare_inplace_add_virtual(
List_iterator_fast<Create_field> cf_it(
ha_alter_info->alter_info->create_list);
- while ((new_field = (cf_it++)) != NULL) {
- const Field* field = new_field->field;
- ulint old_i;
-
- for (old_i = 0; table->field[old_i]; old_i++) {
- const Field* n_field = table->field[old_i];
- if (field == n_field) {
- break;
- }
- }
-
- i++;
+ while (const Create_field* new_field = cf_it++) {
+ const Field* field = altered_table->field[i++];
- if (table->field[old_i]) {
+ if (new_field->field || field->stored_in_db()) {
continue;
}
- ut_ad(!field);
-
- ulint col_len;
ulint is_unsigned;
- ulint field_type;
ulint charset_no;
-
- field = altered_table->field[i - 1];
-
ulint col_type
= get_innobase_type_from_mysql_type(
&is_unsigned, field);
-
- if (field->stored_in_db()) {
- continue;
- }
-
- col_len = field->pack_length();
- field_type = (ulint) field->type();
+ ulint col_len = field->pack_length();
+ ulint field_type = (ulint) field->type();
if (!field->real_maybe_null()) {
field_type |= DATA_NOT_NULL;
@@ -3907,7 +4928,6 @@ prepare_inplace_add_virtual(
}
}
-
ctx->add_vcol[j].m_col.prtype = dtype_form_prtype(
field_type, charset_no);
@@ -3926,6 +4946,7 @@ prepare_inplace_add_virtual(
/* No need to track the list */
ctx->add_vcol[j].v_indexes = NULL;
+ /* MDEV-17468: Do this on ctx->instant_table later */
innodb_base_col_setup(ctx->old_table, field, &ctx->add_vcol[j]);
j++;
}
@@ -4052,33 +5073,96 @@ prepare_inplace_drop_virtual(
@param[in] pos virtual column column no
@param[in] base_pos base column pos
@param[in] trx transaction
-@return DB_SUCCESS if successful, otherwise error code */
-static
-dberr_t
-innobase_insert_sys_virtual(
+@retval false on success
+@retval true on failure (my_error() will have been called) */
+static bool innobase_insert_sys_virtual(
const dict_table_t* table,
ulint pos,
ulint base_pos,
trx_t* trx)
{
pars_info_t* info = pars_info_create();
-
pars_info_add_ull_literal(info, "id", table->id);
+ pars_info_add_int4_literal(info, "pos", pos);
+ pars_info_add_int4_literal(info, "base_pos", base_pos);
+
+ if (DB_SUCCESS != que_eval_sql(
+ info,
+ "PROCEDURE P () IS\n"
+ "BEGIN\n"
+ "INSERT INTO SYS_VIRTUAL VALUES (:id, :pos, :base_pos);\n"
+ "END;\n",
+ FALSE, trx)) {
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "InnoDB: ADD COLUMN...VIRTUAL");
+ return true;
+ }
+ return false;
+}
+
+/** Insert a record to the SYS_COLUMNS dictionary table.
+@param[in] table_id table id
+@param[in] pos position of the column
+@param[in] field_name field name
+@param[in] mtype main type
+@param[in] prtype precise type
+@param[in] len fixed length in bytes, or 0
+@param[in] n_base number of base columns of virtual columns, or 0
+@param[in] update whether to update instead of inserting
+@retval false on success
+@retval true on failure (my_error() will have been called) */
+static bool innodb_insert_sys_columns(
+ table_id_t table_id,
+ ulint pos,
+ const char* field_name,
+ ulint mtype,
+ ulint prtype,
+ ulint len,
+ ulint n_base,
+ trx_t* trx,
+ bool update = false)
+{
+ pars_info_t* info = pars_info_create();
+ pars_info_add_ull_literal(info, "id", table_id);
pars_info_add_int4_literal(info, "pos", pos);
+ pars_info_add_str_literal(info, "name", field_name);
+ pars_info_add_int4_literal(info, "mtype", mtype);
+ pars_info_add_int4_literal(info, "prtype", prtype);
+ pars_info_add_int4_literal(info, "len", len);
+ pars_info_add_int4_literal(info, "base", n_base);
+
+ if (update) {
+ if (DB_SUCCESS != que_eval_sql(
+ info,
+ "PROCEDURE UPD_COL () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_COLUMNS SET\n"
+ "NAME=:name, MTYPE=:mtype, PRTYPE=:prtype, "
+ "LEN=:len, PREC=:base\n"
+ "WHERE TABLE_ID=:id AND POS=:pos;\n"
+ "END;\n", FALSE, trx)) {
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "InnoDB: Updating SYS_COLUMNS failed");
+ return true;
+ }
- pars_info_add_int4_literal(info, "base_pos", base_pos);
+ return false;
+ }
- dberr_t error = que_eval_sql(
- info,
- "PROCEDURE P () IS\n"
- "BEGIN\n"
- "INSERT INTO SYS_VIRTUAL VALUES"
- "(:id, :pos, :base_pos);\n"
- "END;\n",
- FALSE, trx);
+ if (DB_SUCCESS != que_eval_sql(
+ info,
+ "PROCEDURE ADD_COL () IS\n"
+ "BEGIN\n"
+ "INSERT INTO SYS_COLUMNS VALUES"
+ "(:id,:pos,:name,:mtype,:prtype,:len,:base);\n"
+ "END;\n", FALSE, trx)) {
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "InnoDB: Insert into SYS_COLUMNS failed");
+ return true;
+ }
- return(error);
+ return false;
}
/** Update INNODB SYS_COLUMNS on new virtual columns
@@ -4086,10 +5170,9 @@ innobase_insert_sys_virtual(
@param[in] col_name column name
@param[in] vcol virtual column
@param[in] trx transaction
-@return DB_SUCCESS if successful, otherwise error code */
-static
-dberr_t
-innobase_add_one_virtual(
+@retval false on success
+@retval true on failure (my_error() will have been called) */
+static bool innobase_add_one_virtual(
const dict_table_t* table,
const char* col_name,
dict_v_col_t* vcol,
@@ -4097,67 +5180,41 @@ innobase_add_one_virtual(
{
ulint pos = dict_create_v_col_pos(vcol->v_pos,
vcol->m_col.ind);
- ulint mtype = vcol->m_col.mtype;
- ulint prtype = vcol->m_col.prtype;
- ulint len = vcol->m_col.len;
- pars_info_t* info = pars_info_create();
-
- pars_info_add_ull_literal(info, "id", table->id);
-
- pars_info_add_int4_literal(info, "pos", pos);
-
- pars_info_add_str_literal(info, "name", col_name);
- pars_info_add_int4_literal(info, "mtype", mtype);
- pars_info_add_int4_literal(info, "prtype", prtype);
- pars_info_add_int4_literal(info, "len", len);
- pars_info_add_int4_literal(info, "prec", vcol->num_base);
-
- dberr_t error = que_eval_sql(
- info,
- "PROCEDURE P () IS\n"
- "BEGIN\n"
- "INSERT INTO SYS_COLUMNS VALUES"
- "(:id, :pos, :name, :mtype, :prtype, :len, :prec);\n"
- "END;\n",
- FALSE, trx);
- if (error != DB_SUCCESS) {
- return(error);
+ if (innodb_insert_sys_columns(table->id, pos, col_name,
+ vcol->m_col.mtype, vcol->m_col.prtype,
+ vcol->m_col.len, vcol->num_base, trx)) {
+ return true;
}
for (ulint i = 0; i < vcol->num_base; i++) {
- error = innobase_insert_sys_virtual(
- table, pos, vcol->base_col[i]->ind, trx);
- if (error != DB_SUCCESS) {
- return(error);
+ if (innobase_insert_sys_virtual(
+ table, pos, vcol->base_col[i]->ind, trx)) {
+ return true;
}
}
- return(error);
+ return false;
}
/** Update SYS_TABLES.N_COLS in the data dictionary.
@param[in] user_table InnoDB table
-@param[in] n_cols the new value of SYS_TABLES.N_COLS
+@param[in] n the new value of SYS_TABLES.N_COLS
@param[in] trx transaction
@return whether the operation failed */
-static
-bool
-innodb_update_n_cols(const dict_table_t* table, ulint n_cols, trx_t* trx)
+static bool innodb_update_cols(const dict_table_t* table, ulint n, trx_t* trx)
{
pars_info_t* info = pars_info_create();
- pars_info_add_int4_literal(info, "n", n_cols);
+ pars_info_add_int4_literal(info, "n", n);
pars_info_add_ull_literal(info, "id", table->id);
- dberr_t err = que_eval_sql(info,
- "PROCEDURE UPDATE_N_COLS () IS\n"
- "BEGIN\n"
- "UPDATE SYS_TABLES SET N_COLS = :n"
- " WHERE ID = :id;\n"
- "END;\n", FALSE, trx);
-
- if (err != DB_SUCCESS) {
+ if (DB_SUCCESS != que_eval_sql(info,
+ "PROCEDURE UPDATE_N_COLS () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_TABLES SET N_COLS = :n"
+ " WHERE ID = :id;\n"
+ "END;\n", FALSE, trx)) {
my_error(ER_INTERNAL_ERROR, MYF(0),
"InnoDB: Updating SYS_TABLES.N_COLS failed");
return true;
@@ -4175,283 +5232,47 @@ innodb_update_n_cols(const dict_table_t* table, ulint n_cols, trx_t* trx)
static
bool
innobase_add_virtual_try(
- Alter_inplace_info* ha_alter_info,
- const dict_table_t* user_table,
- trx_t* trx)
+ const Alter_inplace_info* ha_alter_info,
+ const dict_table_t* user_table,
+ trx_t* trx)
{
- ha_innobase_inplace_ctx* ctx;
- dberr_t err = DB_SUCCESS;
-
- ctx = static_cast<ha_innobase_inplace_ctx*>(
+ ha_innobase_inplace_ctx* ctx = static_cast<ha_innobase_inplace_ctx*>(
ha_alter_info->handler_ctx);
for (ulint i = 0; i < ctx->num_to_add_vcol; i++) {
-
- err = innobase_add_one_virtual(
- user_table, ctx->add_vcol_name[i],
- &ctx->add_vcol[i], trx);
-
- if (err != DB_SUCCESS) {
- my_error(ER_INTERNAL_ERROR, MYF(0),
- "InnoDB: ADD COLUMN...VIRTUAL");
- return(true);
+ if (innobase_add_one_virtual(
+ user_table, ctx->add_vcol_name[i],
+ &ctx->add_vcol[i], trx)) {
+ return true;
}
}
-
- ulint n_col = unsigned(user_table->n_cols) - DATA_N_SYS_COLS;
- ulint n_v_col = unsigned(user_table->n_v_cols)
- + ctx->num_to_add_vcol - ctx->num_to_drop_vcol;
- ulint new_n = dict_table_encode_n_col(n_col, n_v_col)
- + (unsigned(user_table->flags & DICT_TF_COMPACT) << 31);
-
- return innodb_update_n_cols(user_table, new_n, trx);
+ return false;
}
-/** Insert into SYS_COLUMNS and insert/update the hidden metadata record
-for instant ADD COLUMN.
-@param[in,out] ctx ALTER TABLE context for the current partition
-@param[in] altered_table MySQL table that is being altered
-@param[in] table MySQL table as it is before the ALTER operation
-@param[in,out] trx dictionary transaction
-@retval true failure
-@retval false success */
-static
-bool
-innobase_add_instant_try(
- ha_innobase_inplace_ctx*ctx,
- const TABLE* altered_table,
- const TABLE* table,
- trx_t* trx)
+/** Delete metadata from SYS_COLUMNS and SYS_VIRTUAL.
+@param[in] id table id
+@param[in] pos first SYS_COLUMNS.POS
+@param[in,out] trx data dictionary transaction
+@retval true Failure
+@retval false Success. */
+static bool innobase_instant_drop_cols(table_id_t id, ulint pos, trx_t* trx)
{
- DBUG_ASSERT(!ctx->need_rebuild());
-
- if (!ctx->is_instant()) return false;
-
- DBUG_ASSERT(altered_table->s->fields > table->s->fields);
- DBUG_ASSERT(ctx->old_table->n_cols == ctx->old_n_cols);
-
- dict_table_t* user_table = ctx->old_table;
- user_table->instant_add_column(*ctx->instant_table);
- dict_index_t* index = dict_table_get_first_index(user_table);
- /* The table may have been emptied and may have lost its
- 'instant-add-ness' during this instant ADD COLUMN. */
-
- /* Construct a table row of default values for the stored columns. */
- dtuple_t* row = dtuple_create(ctx->heap, user_table->n_cols);
- dict_table_copy_types(row, user_table);
- Field** af = altered_table->field;
- Field** const end = altered_table->field + altered_table->s->fields;
-
- for (uint i = 0; af < end; af++) {
- if (!(*af)->stored_in_db()) {
- continue;
- }
-
- dict_col_t* col = dict_table_get_nth_col(user_table, i);
- DBUG_ASSERT(!strcmp((*af)->field_name.str,
- dict_table_get_col_name(user_table, i)));
-
- dfield_t* d = dtuple_get_nth_field(row, i);
-
- if (col->is_instant()) {
- dfield_set_data(d, col->def_val.data,
- col->def_val.len);
- } else if ((*af)->real_maybe_null()) {
- /* Store NULL for nullable 'core' columns. */
- dfield_set_null(d);
- } else {
- switch ((*af)->type()) {
- case MYSQL_TYPE_VARCHAR:
- case MYSQL_TYPE_GEOMETRY:
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- /* Store the empty string for 'core'
- variable-length NOT NULL columns. */
- dfield_set_data(d, field_ref_zero, 0);
- break;
- default:
- /* For fixed-length NOT NULL 'core' columns,
- get a dummy default value from SQL. Note that
- we will preserve the old values of these
- columns when updating the metadata
- record, to avoid unnecessary updates. */
- ulint len = (*af)->pack_length();
- DBUG_ASSERT(d->type.mtype != DATA_INT
- || len <= 8);
- row_mysql_store_col_in_innobase_format(
- d, d->type.mtype == DATA_INT
- ? static_cast<byte*>(
- mem_heap_alloc(ctx->heap, len))
- : NULL, true, (*af)->ptr, len,
- dict_table_is_comp(user_table));
- }
- }
-
- if (i + DATA_N_SYS_COLS < ctx->old_n_cols) {
- i++;
- continue;
- }
-
- pars_info_t* info = pars_info_create();
- pars_info_add_ull_literal(info, "id", user_table->id);
- pars_info_add_int4_literal(info, "pos", i);
- pars_info_add_str_literal(info, "name", (*af)->field_name.str);
- pars_info_add_int4_literal(info, "mtype", d->type.mtype);
- pars_info_add_int4_literal(info, "prtype", d->type.prtype);
- pars_info_add_int4_literal(info, "len", d->type.len);
+ pars_info_t* info = pars_info_create();
+ pars_info_add_ull_literal(info, "id", id);
+ pars_info_add_int4_literal(info, "pos", pos);
- dberr_t err = que_eval_sql(
+ dberr_t err = que_eval_sql(
info,
- "PROCEDURE ADD_COL () IS\n"
+ "PROCEDURE DELETE_COL () IS\n"
"BEGIN\n"
- "INSERT INTO SYS_COLUMNS VALUES"
- "(:id,:pos,:name,:mtype,:prtype,:len,0);\n"
+ "DELETE FROM SYS_COLUMNS WHERE\n"
+ "TABLE_ID = :id AND POS >= :pos;\n"
+ "DELETE FROM SYS_VIRTUAL WHERE TABLE_ID = :id;\n"
"END;\n", FALSE, trx);
- if (err != DB_SUCCESS) {
- my_error(ER_INTERNAL_ERROR, MYF(0),
- "InnoDB: Insert into SYS_COLUMNS failed");
- return(true);
- }
-
- i++;
- }
-
- if (innodb_update_n_cols(user_table, dict_table_encode_n_col(
- unsigned(user_table->n_cols)
- - DATA_N_SYS_COLS,
- user_table->n_v_cols)
- | (user_table->flags & DICT_TF_COMPACT) << 31,
- trx)) {
- return true;
- }
-
- unsigned i = unsigned(user_table->n_cols) - DATA_N_SYS_COLS;
- byte trx_id[DATA_TRX_ID_LEN], roll_ptr[DATA_ROLL_PTR_LEN];
- dfield_set_data(dtuple_get_nth_field(row, i++), field_ref_zero,
- DATA_ROW_ID_LEN);
- dfield_set_data(dtuple_get_nth_field(row, i++), trx_id, sizeof trx_id);
- dfield_set_data(dtuple_get_nth_field(row, i),roll_ptr,sizeof roll_ptr);
- DBUG_ASSERT(i + 1 == user_table->n_cols);
-
- trx_write_trx_id(trx_id, trx->id);
- /* The DB_ROLL_PTR will be assigned later, when allocating undo log.
- Silence a Valgrind warning in dtuple_validate() when
- row_ins_clust_index_entry_low() searches for the insert position. */
- memset(roll_ptr, 0, sizeof roll_ptr);
-
- dtuple_t* entry = row_build_index_entry(row, NULL, index, ctx->heap);
- entry->info_bits = REC_INFO_METADATA;
-
- mtr_t mtr;
- mtr.start();
- index->set_modified(mtr);
- btr_pcur_t pcur;
- btr_pcur_open_at_index_side(true, index, BTR_MODIFY_TREE, &pcur, true,
- 0, &mtr);
- ut_ad(btr_pcur_is_before_first_on_page(&pcur));
- btr_pcur_move_to_next_on_page(&pcur);
-
- buf_block_t* block = btr_pcur_get_block(&pcur);
- ut_ad(page_is_leaf(block->frame));
- ut_ad(!page_has_prev(block->frame));
- ut_ad(!buf_block_get_page_zip(block));
- const rec_t* rec = btr_pcur_get_rec(&pcur);
- que_thr_t* thr = pars_complete_graph_for_exec(
- NULL, trx, ctx->heap, NULL);
-
- dberr_t err;
- if (rec_is_metadata(rec, index)) {
- ut_ad(page_rec_is_user_rec(rec));
- if (!page_has_next(block->frame)
- && page_rec_is_last(rec, block->frame)) {
- goto empty_table;
- }
- /* Extend the record with the instantly added columns. */
- const unsigned n = user_table->n_cols - ctx->old_n_cols;
- /* Reserve room for DB_TRX_ID,DB_ROLL_PTR and any
- non-updated off-page columns in case they are moved off
- page as a result of the update. */
- upd_t* update = upd_create(index->n_fields, ctx->heap);
- update->n_fields = n;
- update->info_bits = REC_INFO_METADATA;
- /* Add the default values for instantly added columns */
- for (unsigned i = 0; i < n; i++) {
- upd_field_t* uf = upd_get_nth_field(update, i);
- unsigned f = index->n_fields - n + i;
- uf->field_no = f;
- uf->new_val = entry->fields[f];
- }
- ulint* offsets = NULL;
- mem_heap_t* offsets_heap = NULL;
- big_rec_t* big_rec;
- err = btr_cur_pessimistic_update(
- BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG,
- btr_pcur_get_btr_cur(&pcur),
- &offsets, &offsets_heap, ctx->heap,
- &big_rec, update, UPD_NODE_NO_ORD_CHANGE,
- thr, trx->id, &mtr);
- if (big_rec) {
- if (err == DB_SUCCESS) {
- err = btr_store_big_rec_extern_fields(
- &pcur, offsets, big_rec, &mtr,
- BTR_STORE_UPDATE);
- }
-
- dtuple_big_rec_free(big_rec);
- }
- if (offsets_heap) {
- mem_heap_free(offsets_heap);
- }
- btr_pcur_close(&pcur);
- goto func_exit;
- } else if (page_rec_is_supremum(rec)) {
-empty_table:
- /* The table is empty. */
- ut_ad(fil_page_index_page_check(block->frame));
- ut_ad(!page_has_siblings(block->frame));
- ut_ad(block->page.id.page_no() == index->page);
- btr_page_empty(block, NULL, index, 0, &mtr);
- index->remove_instant();
- err = DB_SUCCESS;
- goto func_exit;
- }
-
- /* Convert the table to the instant ADD COLUMN format. */
- ut_ad(user_table->is_instant());
- mtr.commit();
- mtr.start();
- index->set_modified(mtr);
- if (page_t* root = btr_root_get(index, &mtr)) {
- if (fil_page_get_type(root) != FIL_PAGE_INDEX) {
- DBUG_ASSERT(!"wrong page type");
- goto err_exit;
- }
-
- DBUG_ASSERT(!page_is_comp(root) || !page_get_instant(root));
- mlog_write_ulint(root + FIL_PAGE_TYPE,
- FIL_PAGE_TYPE_INSTANT, MLOG_2BYTES,
- &mtr);
- page_set_instant(root, index->n_core_fields, &mtr);
- mtr.commit();
- mtr.start();
- index->set_modified(mtr);
- err = row_ins_clust_index_entry_low(
- BTR_NO_LOCKING_FLAG, BTR_MODIFY_TREE, index,
- index->n_uniq, entry, 0, thr, false);
- } else {
-err_exit:
- err = DB_CORRUPTION;
- }
-
-func_exit:
- mtr.commit();
-
if (err != DB_SUCCESS) {
- my_error_innodb(err, table->s->table_name.str,
- user_table->flags);
+ my_error(ER_INTERNAL_ERROR, MYF(0),
+ "InnoDB: DELETE from SYS_COLUMNS/SYS_VIRTUAL failed");
return true;
}
@@ -4629,9 +5450,9 @@ innobase_drop_one_virtual_sys_virtual(
static
bool
innobase_drop_virtual_try(
- Alter_inplace_info* ha_alter_info,
- const dict_table_t* user_table,
- trx_t* trx)
+ const Alter_inplace_info* ha_alter_info,
+ const dict_table_t* user_table,
+ trx_t* trx)
{
ha_innobase_inplace_ctx* ctx;
dberr_t err = DB_SUCCESS;
@@ -4664,14 +5485,458 @@ innobase_drop_virtual_try(
}
}
+ return false;
+}
+
+/** Serialise metadata of dropped or reordered columns.
+@param[in,out] heap memory heap for allocation
+@param[out] field data field with the metadata */
+inline
+void dict_table_t::serialise_columns(mem_heap_t* heap, dfield_t* field) const
+{
+ DBUG_ASSERT(instant);
+ const dict_index_t& index = *UT_LIST_GET_FIRST(indexes);
+ unsigned n_fixed = index.first_user_field();
+ unsigned num_non_pk_fields = index.n_fields - n_fixed;
+
+ ulint len = 4 + num_non_pk_fields * 2;
+
+ byte* data = static_cast<byte*>(mem_heap_alloc(heap, len));
+
+ dfield_set_data(field, data, len);
+
+ mach_write_to_4(data, num_non_pk_fields);
- ulint n_col = unsigned(user_table->n_cols) - DATA_N_SYS_COLS;
- ulint n_v_col = unsigned(user_table->n_v_cols)
- - ctx->num_to_drop_vcol;
- ulint new_n = dict_table_encode_n_col(n_col, n_v_col)
- | ((user_table->flags & DICT_TF_COMPACT) << 31);
+ data += 4;
- return innodb_update_n_cols(user_table, new_n, trx);
+ for (ulint i = n_fixed; i < index.n_fields; i++) {
+ mach_write_to_2(data, instant->field_map[i - n_fixed]);
+ data += 2;
+ }
+}
+
+/** Construct the metadata record for instant ALTER TABLE.
+@param[in] row dummy or default values for existing columns
+@param[in,out] heap memory heap for allocations
+@return metadata record */
+inline
+dtuple_t*
+dict_index_t::instant_metadata(const dtuple_t& row, mem_heap_t* heap) const
+{
+ ut_ad(is_primary());
+ dtuple_t* entry;
+
+ if (!table->instant) {
+ entry = row_build_index_entry(&row, NULL, this, heap);
+ entry->info_bits = REC_INFO_METADATA_ADD;
+ return entry;
+ }
+
+ entry = dtuple_create(heap, n_fields + 1);
+ entry->n_fields_cmp = n_uniq;
+ entry->info_bits = REC_INFO_METADATA_ALTER;
+
+ const dict_field_t* field = fields;
+
+ for (uint i = 0; i <= n_fields; i++, field++) {
+ dfield_t* dfield = dtuple_get_nth_field(entry, i);
+
+ if (i == first_user_field()) {
+ table->serialise_columns(heap, dfield);
+ dfield->type.metadata_blob_init();
+ field--;
+ continue;
+ }
+
+ ut_ad(!field->col->is_virtual());
+
+ if (field->col->is_dropped()) {
+ dict_col_copy_type(field->col, &dfield->type);
+ if (field->col->is_nullable()) {
+ dfield_set_null(dfield);
+ } else {
+ dfield_set_data(dfield, field_ref_zero,
+ field->fixed_len);
+ }
+ continue;
+ }
+
+ const dfield_t* s = dtuple_get_nth_field(&row, field->col->ind);
+ ut_ad(dict_col_type_assert_equal(field->col, &s->type));
+ *dfield = *s;
+
+ if (dfield_is_null(dfield)) {
+ continue;
+ }
+
+ if (dfield_is_ext(dfield)) {
+ ut_ad(i > first_user_field());
+ ut_ad(!field->prefix_len);
+ ut_ad(dfield->len >= FIELD_REF_SIZE);
+ dfield_set_len(dfield, dfield->len - FIELD_REF_SIZE);
+ }
+
+ if (!field->prefix_len) {
+ continue;
+ }
+
+ ut_ad(field->col->ord_part);
+ ut_ad(i < n_uniq);
+
+ ulint len = dtype_get_at_most_n_mbchars(
+ field->col->prtype,
+ field->col->mbminlen, field->col->mbmaxlen,
+ field->prefix_len, dfield->len,
+ static_cast<char*>(dfield_get_data(dfield)));
+ dfield_set_len(dfield, len);
+ }
+
+ return entry;
+}
+
+/** Insert or update SYS_COLUMNS and the hidden metadata record
+for instant ALTER TABLE.
+@param[in] ha_alter_info ALTER TABLE context
+@param[in,out] ctx ALTER TABLE context for the current partition
+@param[in] altered_table MySQL table that is being altered
+@param[in] table MySQL table as it is before the ALTER operation
+@param[in,out] trx dictionary transaction
+@retval true failure
+@retval false success */
+static bool innobase_instant_try(
+ const Alter_inplace_info* ha_alter_info,
+ ha_innobase_inplace_ctx* ctx,
+ const TABLE* altered_table,
+ const TABLE* table,
+ trx_t* trx)
+{
+ DBUG_ASSERT(!ctx->need_rebuild());
+ DBUG_ASSERT(ctx->is_instant());
+
+ dict_table_t* user_table = ctx->old_table;
+
+ dict_index_t* index = dict_table_get_first_index(user_table);
+ mtr_t mtr;
+ mtr.start();
+ /* Prevent purge from calling dict_index_t::clear_instant_alter(),
+ to protect index->n_core_fields, index->table->instant and others
+ from changing during ctx->instant_column(). */
+ instant_metadata_lock(*index, mtr);
+ const unsigned n_old_fields = index->n_fields;
+ const dict_col_t* old_cols = user_table->cols;
+ DBUG_ASSERT(user_table->n_cols == ctx->old_n_cols);
+
+ const bool metadata_changed = ctx->instant_column();
+
+ DBUG_ASSERT(index->n_fields >= n_old_fields);
+ /* Release the page latch. Between this and the next
+ btr_pcur_open_at_index_side(), data fields such as
+ index->n_core_fields and index->table->instant could change,
+ but we would handle that in empty_table: below. */
+ mtr.commit();
+ /* The table may have been emptied and may have lost its
+ 'instantness' during this ALTER TABLE. */
+
+ /* Construct a table row of default values for the stored columns. */
+ dtuple_t* row = dtuple_create(ctx->heap, user_table->n_cols);
+ dict_table_copy_types(row, user_table);
+ Field** af = altered_table->field;
+ Field** const end = altered_table->field + altered_table->s->fields;
+ ut_d(List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list));
+ if (ctx->first_alter_pos
+ && innobase_instant_drop_cols(user_table->id,
+ ctx->first_alter_pos - 1, trx)) {
+ return true;
+ }
+ for (uint i = 0; af < end; af++) {
+ if (!(*af)->stored_in_db()) {
+ ut_d(cf_it++);
+ continue;
+ }
+
+ const dict_col_t* old = dict_table_t::find(old_cols,
+ ctx->col_map,
+ ctx->old_n_cols, i);
+ DBUG_ASSERT(!old || i >= ctx->old_n_cols - DATA_N_SYS_COLS
+ || old->ind == i
+ || (ctx->first_alter_pos
+ && old->ind >= ctx->first_alter_pos - 1));
+
+ dfield_t* d = dtuple_get_nth_field(row, i);
+ const dict_col_t* col = dict_table_get_nth_col(user_table, i);
+ DBUG_ASSERT(!col->is_virtual());
+ DBUG_ASSERT(!col->is_dropped());
+ DBUG_ASSERT(col->mtype != DATA_SYS);
+ DBUG_ASSERT(!strcmp((*af)->field_name.str,
+ dict_table_get_col_name(user_table, i)));
+ DBUG_ASSERT(old || col->is_added());
+
+ ut_d(const Create_field* new_field = cf_it++);
+ /* new_field->field would point to an existing column.
+ If it is NULL, the column was added by this ALTER TABLE. */
+ ut_ad(!new_field->field == !old);
+
+ if (col->is_added()) {
+ dfield_set_data(d, col->def_val.data,
+ col->def_val.len);
+ } else if ((*af)->real_maybe_null()) {
+ /* Store NULL for nullable 'core' columns. */
+ dfield_set_null(d);
+ } else {
+ switch ((*af)->type()) {
+ case MYSQL_TYPE_VARCHAR:
+ case MYSQL_TYPE_GEOMETRY:
+ case MYSQL_TYPE_TINY_BLOB:
+ case MYSQL_TYPE_MEDIUM_BLOB:
+ case MYSQL_TYPE_BLOB:
+ case MYSQL_TYPE_LONG_BLOB:
+ /* Store the empty string for 'core'
+ variable-length NOT NULL columns. */
+ dfield_set_data(d, field_ref_zero, 0);
+ break;
+ default:
+ /* For fixed-length NOT NULL 'core' columns,
+ get a dummy default value from SQL. Note that
+ we will preserve the old values of these
+ columns when updating the metadata
+ record, to avoid unnecessary updates. */
+ ulint len = (*af)->pack_length();
+ DBUG_ASSERT(d->type.mtype != DATA_INT
+ || len <= 8);
+ row_mysql_store_col_in_innobase_format(
+ d, d->type.mtype == DATA_INT
+ ? static_cast<byte*>(
+ mem_heap_alloc(ctx->heap, len))
+ : NULL, true, (*af)->ptr, len,
+ dict_table_is_comp(user_table));
+ ut_ad(new_field->field->pack_length() == len);
+
+ }
+ }
+
+ bool update = old && (!ctx->first_alter_pos
+ || i < ctx->first_alter_pos - 1);
+ DBUG_ASSERT(!old || col->same_format(*old));
+ if (update
+ && old->prtype == d->type.prtype) {
+ /* The record is already present in SYS_COLUMNS. */
+ } else if (innodb_insert_sys_columns(user_table->id, i,
+ (*af)->field_name.str,
+ d->type.mtype,
+ d->type.prtype,
+ d->type.len, 0, trx,
+ update)) {
+ return true;
+ }
+
+ i++;
+ }
+
+ if (innodb_update_cols(user_table, dict_table_encode_n_col(
+ unsigned(user_table->n_cols)
+ - DATA_N_SYS_COLS,
+ user_table->n_v_cols)
+ | (user_table->flags & DICT_TF_COMPACT) << 31,
+ trx)) {
+ return true;
+ }
+
+ if (ctx->first_alter_pos) {
+add_all_virtual:
+ for (uint i = 0; i < user_table->n_v_cols; i++) {
+ if (innobase_add_one_virtual(
+ user_table,
+ dict_table_get_v_col_name(user_table, i),
+ &user_table->v_cols[i], trx)) {
+ return true;
+ }
+ }
+ } else if (ha_alter_info->handler_flags & ALTER_DROP_VIRTUAL_COLUMN) {
+ if (innobase_instant_drop_cols(user_table->id, 65536, trx)) {
+ return true;
+ }
+ goto add_all_virtual;
+ } else if ((ha_alter_info->handler_flags & ALTER_ADD_VIRTUAL_COLUMN)
+ && innobase_add_virtual_try(ha_alter_info, user_table,
+ trx)) {
+ return true;
+ }
+
+ unsigned i = unsigned(user_table->n_cols) - DATA_N_SYS_COLS;
+ DBUG_ASSERT(i >= altered_table->s->stored_fields);
+ DBUG_ASSERT(i <= altered_table->s->stored_fields + 1);
+ if (i > altered_table->s->fields) {
+ const dict_col_t& fts_doc_id = user_table->cols[i - 1];
+ DBUG_ASSERT(!strcmp(fts_doc_id.name(*user_table),
+ FTS_DOC_ID_COL_NAME));
+ DBUG_ASSERT(!fts_doc_id.is_nullable());
+ DBUG_ASSERT(fts_doc_id.len == 8);
+ dfield_set_data(dtuple_get_nth_field(row, i - 1),
+ field_ref_zero, fts_doc_id.len);
+ }
+ byte trx_id[DATA_TRX_ID_LEN], roll_ptr[DATA_ROLL_PTR_LEN];
+ dfield_set_data(dtuple_get_nth_field(row, i++), field_ref_zero,
+ DATA_ROW_ID_LEN);
+ dfield_set_data(dtuple_get_nth_field(row, i++), trx_id, sizeof trx_id);
+ dfield_set_data(dtuple_get_nth_field(row, i),roll_ptr,sizeof roll_ptr);
+ DBUG_ASSERT(i + 1 == user_table->n_cols);
+
+ trx_write_trx_id(trx_id, trx->id);
+ /* The DB_ROLL_PTR will be assigned later, when allocating undo log.
+ Silence a Valgrind warning in dtuple_validate() when
+ row_ins_clust_index_entry_low() searches for the insert position. */
+ memset(roll_ptr, 0, sizeof roll_ptr);
+
+ dtuple_t* entry = index->instant_metadata(*row, ctx->heap);
+ mtr.start();
+ index->set_modified(mtr);
+ btr_pcur_t pcur;
+ btr_pcur_open_at_index_side(true, index, BTR_MODIFY_TREE, &pcur, true,
+ 0, &mtr);
+ ut_ad(btr_pcur_is_before_first_on_page(&pcur));
+ btr_pcur_move_to_next_on_page(&pcur);
+
+ buf_block_t* block = btr_pcur_get_block(&pcur);
+ ut_ad(page_is_leaf(block->frame));
+ ut_ad(!page_has_prev(block->frame));
+ ut_ad(!buf_block_get_page_zip(block));
+ const rec_t* rec = btr_pcur_get_rec(&pcur);
+ que_thr_t* thr = pars_complete_graph_for_exec(
+ NULL, trx, ctx->heap, NULL);
+
+ dberr_t err = DB_SUCCESS;
+ if (rec_is_metadata(rec, *index)) {
+ ut_ad(page_rec_is_user_rec(rec));
+ if (!page_has_next(block->frame)
+ && page_rec_is_last(rec, block->frame)) {
+ goto empty_table;
+ }
+
+ if (!metadata_changed) {
+ goto func_exit;
+ }
+
+ /* Ensure that the root page is in the correct format. */
+ buf_block_t* root = btr_root_block_get(index, RW_X_LATCH,
+ &mtr);
+ DBUG_ASSERT(root);
+ DBUG_ASSERT(!root->page.encrypted);
+ if (fil_page_get_type(root->frame) != FIL_PAGE_TYPE_INSTANT) {
+ DBUG_ASSERT(!"wrong page type");
+ err = DB_CORRUPTION;
+ goto func_exit;
+ }
+
+ btr_set_instant(root, *index, &mtr);
+
+ /* Extend the record with any added columns. */
+ uint n = uint(index->n_fields) - n_old_fields;
+ /* Reserve room for DB_TRX_ID,DB_ROLL_PTR and any
+ non-updated off-page columns in case they are moved off
+ page as a result of the update. */
+ const unsigned f = user_table->instant != NULL;
+ upd_t* update = upd_create(index->n_fields + f, ctx->heap);
+ update->n_fields = n + f;
+ update->info_bits = f
+ ? REC_INFO_METADATA_ALTER
+ : REC_INFO_METADATA_ADD;
+ if (f) {
+ upd_field_t* uf = upd_get_nth_field(update, 0);
+ uf->field_no = index->first_user_field();
+ uf->new_val = entry->fields[uf->field_no];
+ DBUG_ASSERT(!dfield_is_ext(&uf->new_val));
+ DBUG_ASSERT(!dfield_is_null(&uf->new_val));
+ }
+
+ /* Add the default values for instantly added columns */
+ unsigned j = f;
+
+ for (unsigned k = n_old_fields; k < index->n_fields; k++) {
+ upd_field_t* uf = upd_get_nth_field(update, j++);
+ uf->field_no = k + f;
+ uf->new_val = entry->fields[k + f];
+
+ ut_ad(j <= n + f);
+ }
+
+ ut_ad(j == n + f);
+
+ ulint* offsets = NULL;
+ mem_heap_t* offsets_heap = NULL;
+ big_rec_t* big_rec;
+ err = btr_cur_pessimistic_update(
+ BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG,
+ btr_pcur_get_btr_cur(&pcur),
+ &offsets, &offsets_heap, ctx->heap,
+ &big_rec, update, UPD_NODE_NO_ORD_CHANGE,
+ thr, trx->id, &mtr);
+
+ offsets = rec_get_offsets(
+ btr_pcur_get_rec(&pcur), index, offsets,
+ true, ULINT_UNDEFINED, &offsets_heap);
+ if (big_rec) {
+ if (err == DB_SUCCESS) {
+ err = btr_store_big_rec_extern_fields(
+ &pcur, offsets, big_rec, &mtr,
+ BTR_STORE_UPDATE);
+ }
+
+ dtuple_big_rec_free(big_rec);
+ }
+ if (offsets_heap) {
+ mem_heap_free(offsets_heap);
+ }
+ btr_pcur_close(&pcur);
+ goto func_exit;
+ } else if (page_rec_is_supremum(rec)) {
+empty_table:
+ /* The table is empty. */
+ ut_ad(fil_page_index_page_check(block->frame));
+ ut_ad(!page_has_siblings(block->frame));
+ ut_ad(block->page.id.page_no() == index->page);
+ /* MDEV-17383: free metadata BLOBs! */
+ btr_page_empty(block, NULL, index, 0, &mtr);
+ index->clear_instant_alter();
+ goto func_exit;
+ } else if (!user_table->is_instant()) {
+ ut_ad(!user_table->not_redundant());
+ goto func_exit;
+ }
+
+ /* Convert the table to the instant ALTER TABLE format. */
+ mtr.commit();
+ mtr.start();
+ index->set_modified(mtr);
+ if (buf_block_t* root = btr_root_block_get(index, RW_SX_LATCH, &mtr)) {
+ if (root->page.encrypted
+ || fil_page_get_type(root->frame) != FIL_PAGE_INDEX) {
+ DBUG_ASSERT(!"wrong page type");
+ goto err_exit;
+ }
+
+ btr_set_instant(root, *index, &mtr);
+ mtr.commit();
+ mtr.start();
+ index->set_modified(mtr);
+ err = row_ins_clust_index_entry_low(
+ BTR_NO_LOCKING_FLAG, BTR_MODIFY_TREE, index,
+ index->n_uniq, entry, 0, thr, false);
+ } else {
+err_exit:
+ err = DB_CORRUPTION;
+ }
+
+func_exit:
+ mtr.commit();
+
+ if (err != DB_SUCCESS) {
+ my_error_innodb(err, table->s->table_name.str,
+ user_table->flags);
+ return true;
+ }
+
+ return false;
}
/** Adjust the create index column number from "New table" to
@@ -4901,11 +6166,9 @@ prepare_inplace_alter_table_dict(
const char* path = thd_innodb_tmpdir(
ctx->prebuilt->trx->mysql_thd);
- index_defs = innobase_create_key_defs(
- ctx->heap, ha_alter_info, altered_table, ctx->num_to_add_index,
+ index_defs = ctx->create_key_defs(
+ ha_alter_info, altered_table,
num_fts_index,
- dict_index_is_auto_gen_clust(dict_table_get_first_index(
- ctx->new_table)),
fts_doc_id_col, add_fts_doc_id, add_fts_doc_id_idx,
old_table);
@@ -5094,10 +6357,10 @@ new_clustered_failed:
}
if (altered_table->versioned()) {
- if (i == altered_table->s->row_start_field) {
+ if (i == altered_table->s->vers.start_fieldno) {
field_type |= DATA_VERS_START;
} else if (i ==
- altered_table->s->row_end_field) {
+ altered_table->s->vers.end_fieldno) {
field_type |= DATA_VERS_END;
} else if (!(field->flags
& VERS_UPDATE_UNVERSIONED_FLAG)) {
@@ -5273,20 +6536,12 @@ new_clustered_failed:
== !!new_clustered);
}
- if (ctx->need_rebuild() && user_table->supports_instant()) {
- if (!instant_alter_column_possible(ha_alter_info, old_table)) {
- goto not_instant_add_column;
- }
-
- for (uint i = uint(ctx->old_table->n_cols) - DATA_N_SYS_COLS;
- i--; ) {
- if (ctx->col_map[i] != i) {
- goto not_instant_add_column;
- }
- }
-
- DBUG_ASSERT(ctx->new_table->n_cols > ctx->old_table->n_cols);
+ DBUG_ASSERT(!ctx->need_rebuild()
+ || !ctx->new_table->persistent_autoinc);
+ if (ctx->need_rebuild() && instant_alter_column_possible(
+ *user_table, ha_alter_info, old_table, altered_table,
+ trx_is_strict(ctx->trx))) {
for (uint a = 0; a < ctx->num_to_add_index; a++) {
ctx->add_index[a]->table = ctx->new_table;
ctx->add_index[a] = dict_index_add_to_cache(
@@ -5294,6 +6549,7 @@ new_clustered_failed:
&error, add_v);
ut_a(error == DB_SUCCESS);
}
+
DBUG_ASSERT(ha_alter_info->key_count
/* hidden GEN_CLUST_INDEX in InnoDB */
+ dict_index_is_auto_gen_clust(
@@ -5305,6 +6561,7 @@ new_clustered_failed:
altered_table->key_info)
!= FTS_EXIST_DOC_ID_INDEX)
== ctx->num_to_add_index);
+
ctx->num_to_add_index = 0;
ctx->add_index = NULL;
@@ -5335,26 +6592,11 @@ new_clustered_failed:
DBUG_ASSERT(!strcmp((*af)->field_name.str,
dict_table_get_col_name(ctx->new_table,
i)));
- DBUG_ASSERT(!col->is_instant());
+ DBUG_ASSERT(!col->is_added());
if (new_field->field) {
- ut_d(const dict_col_t* old_col
- = dict_table_get_nth_col(user_table, i));
- ut_d(const dict_index_t* index
- = user_table->indexes.start);
- DBUG_SLOW_ASSERT(col->mtype == old_col->mtype);
- ut_ad(col->prtype == old_col->prtype
- || col->prtype
- == (old_col->prtype & ~DATA_VERSIONED));
- DBUG_SLOW_ASSERT(col->mbminlen
- == old_col->mbminlen);
- DBUG_SLOW_ASSERT(col->mbmaxlen
- == old_col->mbmaxlen);
- DBUG_SLOW_ASSERT(col->len >= old_col->len);
- DBUG_SLOW_ASSERT(old_col->is_instant()
- == (dict_col_get_clust_pos(
- old_col, index)
- >= index->n_core_fields));
+ /* This is a pre-existing column,
+ possibly at a different position. */
} else if ((*af)->is_real_null()) {
/* DEFAULT NULL */
col->def_val.len = UNIV_SQL_NULL;
@@ -5422,11 +6664,15 @@ new_clustered_failed:
ctx->new_table, i),
FTS_DOC_ID_COL_NAME)));
+ if (altered_table->found_next_number_field) {
+ ctx->new_table->persistent_autoinc
+ = ctx->old_table->persistent_autoinc;
+ }
+
ctx->prepare_instant();
}
if (ctx->need_rebuild()) {
-not_instant_add_column:
DBUG_ASSERT(ctx->need_rebuild());
DBUG_ASSERT(!ctx->is_instant());
DBUG_ASSERT(num_fts_index <= 1);
@@ -5560,7 +6806,6 @@ new_table_failed:
ut_ad(new_clust_index->n_core_null_bytes
== UT_BITS_IN_BYTES(new_clust_index->n_nullable));
- DBUG_ASSERT(!ctx->new_table->persistent_autoinc);
if (const Field* ai = altered_table->found_next_number_field) {
const unsigned col_no = innodb_col_no(ai);
@@ -5971,6 +7216,120 @@ innobase_check_foreign_key_index(
return(false);
}
+/**
+Rename a given index in the InnoDB data dictionary.
+
+@param index index to rename
+@param new_name new name of the index
+@param[in,out] trx dict transaction to use, not going to be committed here
+
+@retval true Failure
+@retval false Success */
+static MY_ATTRIBUTE((warn_unused_result))
+bool
+rename_index_try(
+ const dict_index_t* index,
+ const char* new_name,
+ trx_t* trx)
+{
+ DBUG_ENTER("rename_index_try");
+
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+
+ pars_info_t* pinfo;
+ dberr_t err;
+
+ pinfo = pars_info_create();
+
+ pars_info_add_ull_literal(pinfo, "table_id", index->table->id);
+ pars_info_add_ull_literal(pinfo, "index_id", index->id);
+ pars_info_add_str_literal(pinfo, "new_name", new_name);
+
+ trx->op_info = "Renaming an index in SYS_INDEXES";
+
+ DBUG_EXECUTE_IF(
+ "ib_rename_index_fail1",
+ DBUG_SET("+d,innodb_report_deadlock");
+ );
+
+ err = que_eval_sql(
+ pinfo,
+ "PROCEDURE RENAME_INDEX_IN_SYS_INDEXES () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_INDEXES SET\n"
+ "NAME = :new_name\n"
+ "WHERE\n"
+ "ID = :index_id AND\n"
+ "TABLE_ID = :table_id;\n"
+ "END;\n",
+ FALSE, trx); /* pinfo is freed by que_eval_sql() */
+
+ DBUG_EXECUTE_IF(
+ "ib_rename_index_fail1",
+ DBUG_SET("-d,innodb_report_deadlock");
+ );
+
+ trx->op_info = "";
+
+ if (err != DB_SUCCESS) {
+ my_error_innodb(err, index->table->name.m_name, 0);
+ DBUG_RETURN(true);
+ }
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+Rename a given index in the InnoDB data dictionary cache.
+
+@param[in,out] index index to rename
+@param new_name new index name
+*/
+static
+void
+innobase_rename_index_cache(dict_index_t* index, const char* new_name)
+{
+ DBUG_ENTER("innobase_rename_index_cache");
+
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
+
+ size_t old_name_len = strlen(index->name);
+ size_t new_name_len = strlen(new_name);
+
+ if (old_name_len < new_name_len) {
+ index->name = static_cast<char*>(
+ mem_heap_alloc(index->heap, new_name_len + 1));
+ }
+
+ memcpy(const_cast<char*>(index->name()), new_name, new_name_len + 1);
+
+ DBUG_VOID_RETURN;
+}
+
+
+/** Rename the index name in cache.
+@param[in] ctx alter context
+@param[in] ha_alter_info Data used during inplace alter. */
+static void innobase_rename_indexes_cache(
+ const ha_innobase_inplace_ctx* ctx,
+ const Alter_inplace_info* ha_alter_info)
+{
+ DBUG_ASSERT(ha_alter_info->handler_flags & ALTER_RENAME_INDEX);
+
+ for (const Alter_inplace_info::Rename_key_pair& pair :
+ ha_alter_info->rename_keys) {
+ dict_index_t* index = dict_table_get_index_on_name(
+ ctx->old_table, pair.old_key->name.str);
+ ut_ad(index);
+
+ innobase_rename_index_cache(index, pair.new_key->name.str);
+ }
+}
+
/** Fill the stored column information in s_cols list.
@param[in] altered_table mysql table object
@@ -6048,8 +7407,6 @@ ha_innobase::prepare_inplace_alter_table(
{
dict_index_t** drop_index; /*!< Index to be dropped */
ulint n_drop_index; /*!< Number of indexes to drop */
- dict_index_t** rename_index; /*!< Indexes to be dropped */
- ulint n_rename_index; /*!< Number of indexes to rename */
dict_foreign_t**drop_fk; /*!< Foreign key constraints to drop */
ulint n_drop_fk; /*!< Number of foreign keys to drop */
dict_foreign_t**add_fk = NULL; /*!< Foreign key constraints to drop */
@@ -6565,9 +7922,6 @@ check_if_can_drop_indexes:
}
}
- n_rename_index = 0;
- rename_index = NULL;
-
n_add_fk = 0;
if (ha_alter_info->handler_flags
@@ -6621,6 +7975,20 @@ err_exit:
}
}
+ if (ha_alter_info->handler_flags & ALTER_RENAME_INDEX) {
+ for (const Alter_inplace_info::Rename_key_pair& pair :
+ ha_alter_info->rename_keys) {
+ dict_index_t* index = dict_table_get_index_on_name(
+ indexed_table, pair.old_key->name.str);
+
+ if (!index || index->is_corrupted()) {
+ my_error(ER_INDEX_CORRUPT, MYF(0),
+ index->name());
+ goto err_exit;
+ }
+ }
+ }
+
const ha_table_option_struct& alt_opt=
*ha_alter_info->create_info->option_struct;
@@ -6636,7 +8004,6 @@ err_exit:
= new ha_innobase_inplace_ctx(
m_prebuilt,
drop_index, n_drop_index,
- rename_index, n_rename_index,
drop_fk, n_drop_fk,
add_fk, n_add_fk,
ha_alter_info->online,
@@ -6764,7 +8131,6 @@ found_col:
ha_alter_info->handler_ctx = new ha_innobase_inplace_ctx(
m_prebuilt,
drop_index, n_drop_index,
- rename_index, n_rename_index,
drop_fk, n_drop_fk, add_fk, n_add_fk,
ha_alter_info->online,
heap, m_prebuilt->table, col_names,
@@ -7423,7 +8789,6 @@ innobase_drop_foreign_try(
@param[in] user_table InnoDB table that was being altered
@param[in] trx data dictionary transaction
@param[in] table_name Table name in MySQL
-@param[in] nth_col 0-based index of the column
@param[in] from old column name
@param[in] to new column name
@param[in] new_clustered whether the table has been rebuilt
@@ -7436,12 +8801,10 @@ innobase_rename_column_try(
const dict_table_t* user_table,
trx_t* trx,
const char* table_name,
- ulint nth_col,
const char* from,
const char* to,
bool new_clustered)
{
- pars_info_t* info;
dberr_t error;
DBUG_ENTER("innobase_rename_column_try");
@@ -7455,34 +8818,7 @@ innobase_rename_column_try(
goto rename_foreign;
}
- info = pars_info_create();
-
- pars_info_add_ull_literal(info, "tableid", user_table->id);
- pars_info_add_int4_literal(info, "nth", nth_col);
- pars_info_add_str_literal(info, "new", to);
-
- trx->op_info = "renaming column in SYS_COLUMNS";
-
- error = que_eval_sql(
- info,
- "PROCEDURE RENAME_SYS_COLUMNS_PROC () IS\n"
- "BEGIN\n"
- "UPDATE SYS_COLUMNS SET NAME=:new\n"
- "WHERE TABLE_ID=:tableid\n"
- "AND POS=:nth;\n"
- "END;\n",
- FALSE, trx);
-
- DBUG_EXECUTE_IF("ib_rename_column_error",
- error = DB_OUT_OF_FILE_SPACE;);
-
- if (error != DB_SUCCESS) {
-err_exit:
- my_error_innodb(error, table_name, 0);
- trx->error_state = DB_SUCCESS;
- trx->op_info = "";
- DBUG_RETURN(true);
- }
+ error = DB_SUCCESS;
trx->op_info = "renaming column in SYS_FIELDS";
@@ -7500,19 +8836,16 @@ err_exit:
}
for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
- const dict_field_t* field
- = dict_index_get_nth_field(index, i);
- if (my_strcasecmp(system_charset_info, field->name,
- from)) {
+ const dict_field_t& f = index->fields[i];
+ DBUG_ASSERT(!f.name == f.col->is_dropped());
+
+ if (!f.name || my_strcasecmp(system_charset_info,
+ f.name, from)) {
continue;
}
- info = pars_info_create();
-
- ulint pos = i;
- if (has_prefixes) {
- pos = (pos << 16) + field->prefix_len;
- }
+ pars_info_t* info = pars_info_create();
+ ulint pos = has_prefixes ? i << 16 | f.prefix_len : i;
pars_info_add_ull_literal(info, "indexid", index->id);
pars_info_add_int4_literal(info, "nth", pos);
@@ -7527,6 +8860,8 @@ err_exit:
"AND POS=:nth;\n"
"END;\n",
FALSE, trx);
+ DBUG_EXECUTE_IF("ib_rename_column_error",
+ error = DB_OUT_OF_FILE_SPACE;);
if (error != DB_SUCCESS) {
goto err_exit;
@@ -7534,6 +8869,14 @@ err_exit:
}
}
+ if (error != DB_SUCCESS) {
+err_exit:
+ my_error_innodb(error, table_name, 0);
+ trx->error_state = DB_SUCCESS;
+ trx->op_info = "";
+ DBUG_RETURN(true);
+ }
+
rename_foreign:
trx->op_info = "renaming column in SYS_FOREIGN_COLS";
@@ -7554,7 +8897,7 @@ rename_foreign:
continue;
}
- info = pars_info_create();
+ pars_info_t* info = pars_info_create();
pars_info_add_str_literal(info, "id", foreign->id);
pars_info_add_int4_literal(info, "nth", i);
@@ -7596,7 +8939,7 @@ rename_foreign:
continue;
}
- info = pars_info_create();
+ pars_info_t* info = pars_info_create();
pars_info_add_str_literal(info, "id", foreign->id);
pars_info_add_int4_literal(info, "nth", i);
@@ -7655,6 +8998,7 @@ innobase_rename_columns_try(
uint i = 0;
ulint num_v = 0;
+ DBUG_ASSERT(ctx->need_rebuild());
DBUG_ASSERT(ha_alter_info->handler_flags
& ALTER_COLUMN_NAME);
@@ -7668,17 +9012,10 @@ innobase_rename_columns_try(
while (Create_field* cf = cf_it++) {
if (cf->field == *fp) {
- ulint col_n = is_virtual
- ? dict_create_v_col_pos(
- num_v, i)
- : i - num_v;
-
if (innobase_rename_column_try(
ctx->old_table, trx, table_name,
- col_n,
cf->field->field_name.str,
- cf->field_name.str,
- ctx->need_rebuild())) {
+ cf->field_name.str, true)) {
return(true);
}
goto processed_field;
@@ -7697,63 +9034,92 @@ processed_field:
return(false);
}
+/** Convert field type and length to InnoDB format */
+static void get_type(const Field& f, ulint& prtype, ulint& mtype, ulint& len)
+{
+ mtype = get_innobase_type_from_mysql_type(&prtype, &f);
+ len = f.pack_length();
+ prtype |= f.type();
+ if (f.type() == MYSQL_TYPE_VARCHAR) {
+ auto l = static_cast<const Field_varstring&>(f).length_bytes;
+ len -= l;
+ if (l == 2) prtype |= DATA_LONG_TRUE_VARCHAR;
+ }
+ if (!f.real_maybe_null()) prtype |= DATA_NOT_NULL;
+ if (f.binary()) prtype |= DATA_BINARY_TYPE;
+ if (f.table->versioned()) {
+ if (&f == f.table->field[f.table->s->vers.start_fieldno]) {
+ prtype |= DATA_VERS_START;
+ } else if (&f == f.table->field[f.table->s->vers.end_fieldno]) {
+ prtype |= DATA_VERS_END;
+ } else if (!(f.flags & VERS_UPDATE_UNVERSIONED_FLAG)) {
+ prtype |= DATA_VERSIONED;
+ }
+ }
+ if (!f.stored_in_db()) prtype |= DATA_VIRTUAL;
+
+ if (dtype_is_string_type(mtype)) {
+ prtype |= ulint(f.charset()->number) << 16;
+ }
+}
+
/** Enlarge a column in the data dictionary tables.
@param user_table InnoDB table that was being altered
@param trx data dictionary transaction
@param table_name Table name in MySQL
-@param nth_col 0-based index of the column
-@param new_len new column length, in bytes
+@param pos 0-based index to user_table->cols[] or user_table->v_cols[]
+@param f new column
@param is_v if it's a virtual column
@retval true Failure
@retval false Success */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
-innobase_enlarge_column_try(
-/*========================*/
+innobase_rename_or_enlarge_column_try(
const dict_table_t* user_table,
trx_t* trx,
const char* table_name,
- ulint nth_col,
- ulint new_len,
+ ulint pos,
+ const Field& f,
bool is_v)
{
- pars_info_t* info;
- dberr_t error;
-#ifdef UNIV_DEBUG
dict_col_t* col;
-#endif /* UNIV_DEBUG */
- dict_v_col_t* v_col;
- ulint pos;
- DBUG_ENTER("innobase_enlarge_column_try");
+ DBUG_ENTER("innobase_rename_or_enlarge_column_try");
DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
ut_ad(mutex_own(&dict_sys->mutex));
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
+ ulint n_base;
+
if (is_v) {
- v_col = dict_table_get_nth_v_col(user_table, nth_col);
+ dict_v_col_t* v_col= dict_table_get_nth_v_col(user_table, pos);
pos = dict_create_v_col_pos(v_col->v_pos, v_col->m_col.ind);
-#ifdef UNIV_DEBUG
col = &v_col->m_col;
-#endif /* UNIV_DEBUG */
+ n_base = v_col->num_base;
} else {
-#ifdef UNIV_DEBUG
- col = dict_table_get_nth_col(user_table, nth_col);
-#endif /* UNIV_DEBUG */
- pos = nth_col;
+ col = dict_table_get_nth_col(user_table, pos);
+ n_base = 0;
}
+ ulint prtype, mtype, len;
+ get_type(f, prtype, mtype, len);
+ DBUG_ASSERT(!dtype_is_string_type(col->mtype)
+ || col->mbminlen == f.charset()->mbminlen);
+ DBUG_ASSERT(col->len <= len);
+
#ifdef UNIV_DEBUG
- ut_ad(col->len < new_len);
- switch (col->mtype) {
+ switch (mtype) {
+ case DATA_FIXBINARY:
+ case DATA_CHAR:
case DATA_MYSQL:
/* NOTE: we could allow this when !(prtype & DATA_BINARY_TYPE)
and ROW_FORMAT is not REDUNDANT and mbminlen<mbmaxlen.
That is, we treat a UTF-8 CHAR(n) column somewhat like
a VARCHAR. */
- ut_error;
+ ut_ad(col->len == len);
+ break;
case DATA_BINARY:
case DATA_VARCHAR:
case DATA_VARMYSQL:
@@ -7761,58 +9127,62 @@ innobase_enlarge_column_try(
case DATA_BLOB:
break;
default:
- ut_error;
+ ut_ad(col->prtype == prtype);
+ ut_ad(col->mtype == mtype);
+ ut_ad(col->len == len);
}
#endif /* UNIV_DEBUG */
- info = pars_info_create();
-
- pars_info_add_ull_literal(info, "tableid", user_table->id);
- pars_info_add_int4_literal(info, "nth", pos);
- pars_info_add_int4_literal(info, "new", new_len);
-
- trx->op_info = "resizing column in SYS_COLUMNS";
- error = que_eval_sql(
- info,
- "PROCEDURE RESIZE_SYS_COLUMNS_PROC () IS\n"
- "BEGIN\n"
- "UPDATE SYS_COLUMNS SET LEN=:new\n"
- "WHERE TABLE_ID=:tableid AND POS=:nth;\n"
- "END;\n",
- FALSE, trx);
-
- DBUG_EXECUTE_IF("ib_resize_column_error",
- error = DB_OUT_OF_FILE_SPACE;);
-
- trx->op_info = "";
- trx->error_state = DB_SUCCESS;
+ const char* col_name = col->name(*user_table);
+ const bool same_name = !strcmp(col_name, f.field_name.str);
- if (error != DB_SUCCESS) {
- my_error_innodb(error, table_name, 0);
+ if (!same_name
+ && innobase_rename_column_try(user_table, trx, table_name,
+ col_name, f.field_name.str,
+ false)) {
DBUG_RETURN(true);
}
- DBUG_RETURN(false);
+ if (same_name
+ && col->prtype == prtype && col->mtype == mtype
+ && col->len == len) {
+ DBUG_RETURN(false);
+ }
+
+ DBUG_RETURN(innodb_insert_sys_columns(user_table->id, pos,
+ f.field_name.str,
+ mtype, prtype, len,
+ n_base, trx, true));
}
-/** Enlarge columns in the data dictionary tables.
+/** Rename or enlarge columns in the data dictionary cache
+as part of commit_try_norebuild().
@param ha_alter_info Data used during in-place alter.
-@param table the TABLE
-@param user_table InnoDB table that was being altered
+@param ctx In-place ALTER TABLE context
+@param altered_table metadata after ALTER TABLE
+@param table metadata before ALTER TABLE
@param trx data dictionary transaction
@param table_name Table name in MySQL
@retval true Failure
@retval false Success */
static MY_ATTRIBUTE((nonnull, warn_unused_result))
bool
-innobase_enlarge_columns_try(
-/*=========================*/
+innobase_rename_or_enlarge_columns_try(
Alter_inplace_info* ha_alter_info,
+ ha_innobase_inplace_ctx*ctx,
+ const TABLE* altered_table,
const TABLE* table,
- const dict_table_t* user_table,
trx_t* trx,
const char* table_name)
{
+ DBUG_ENTER("innobase_rename_or_enlarge_columns_try");
+
+ if (!(ha_alter_info->handler_flags
+ & (ALTER_COLUMN_EQUAL_PACK_LENGTH
+ | ALTER_COLUMN_NAME))) {
+ DBUG_RETURN(false);
+ }
+
List_iterator_fast<Create_field> cf_it(
ha_alter_info->alter_info->create_list);
ulint i = 0;
@@ -7823,34 +9193,35 @@ innobase_enlarge_columns_try(
ulint idx = is_v ? num_v++ : i - num_v;
cf_it.rewind();
+ Field** af = altered_table->field;
while (Create_field* cf = cf_it++) {
if (cf->field == *fp) {
- if ((*fp)->is_equal(cf)
- == IS_EQUAL_PACK_LENGTH
- && innobase_enlarge_column_try(
- user_table, trx, table_name,
- idx, static_cast<ulint>(cf->length), is_v)) {
- return(true);
+ if (innobase_rename_or_enlarge_column_try(
+ ctx->old_table, trx, table_name,
+ idx, **af, is_v)) {
+ DBUG_RETURN(true);
}
-
break;
}
+ af++;
}
}
- return(false);
+ DBUG_RETURN(false);
}
/** Rename or enlarge columns in the data dictionary cache
as part of commit_cache_norebuild().
@param ha_alter_info Data used during in-place alter.
-@param table the TABLE
+@param altered_table metadata after ALTER TABLE
+@param table metadata before ALTER TABLE
@param user_table InnoDB table that was being altered */
static MY_ATTRIBUTE((nonnull))
void
innobase_rename_or_enlarge_columns_cache(
/*=====================================*/
Alter_inplace_info* ha_alter_info,
+ const TABLE* altered_table,
const TABLE* table,
dict_table_t* user_table)
{
@@ -7869,30 +9240,37 @@ innobase_rename_or_enlarge_columns_cache(
const bool is_virtual = !(*fp)->stored_in_db();
cf_it.rewind();
+ Field** af = altered_table->field;
while (Create_field* cf = cf_it++) {
if (cf->field != *fp) {
+ af++;
continue;
}
ulint col_n = is_virtual ? num_v : i - num_v;
-
- if ((*fp)->is_equal(cf) == IS_EQUAL_PACK_LENGTH) {
- if (is_virtual) {
- dict_table_get_nth_v_col(
- user_table, col_n)->m_col.len
- = cf->length;
- } else {
- dict_table_get_nth_col(
- user_table, col_n)->len
- = cf->length;
- }
- }
+ dict_col_t *col = is_virtual
+ ? &dict_table_get_nth_v_col(user_table, col_n)
+ ->m_col
+ : dict_table_get_nth_col(user_table, col_n);
+ const bool is_string= dtype_is_string_type(col->mtype);
+ DBUG_ASSERT(col->mbminlen
+ == (is_string
+ ? (*af)->charset()->mbminlen : 0));
+ ulint prtype, mtype, len;
+ get_type(**af, prtype, mtype, len);
+ DBUG_ASSERT(is_string == dtype_is_string_type(mtype));
+
+ col->prtype = prtype;
+ col->mtype = mtype;
+ col->len = len;
+ col->mbmaxlen = is_string
+ ? (*af)->charset()->mbmaxlen : 0;
if ((*fp)->flags & FIELD_IS_RENAMED) {
dict_mem_table_col_rename(
user_table, col_n,
cf->field->field_name.str,
- cf->field_name.str, is_virtual);
+ (*af)->field_name.str, is_virtual);
}
break;
@@ -8546,6 +9924,38 @@ commit_try_rebuild(
}
}
+/** Rename indexes in dictionary.
+@param[in] ctx alter info context
+@param[in] ha_alter_info Operation used during inplace alter
+@param[out] trx transaction to change the index name
+ in dictionary
+@return true if it failed to rename
+@return false if it is success. */
+static
+bool
+rename_indexes_try(
+ const ha_innobase_inplace_ctx* ctx,
+ const Alter_inplace_info* ha_alter_info,
+ trx_t* trx)
+{
+ DBUG_ASSERT(ha_alter_info->handler_flags & ALTER_RENAME_INDEX);
+
+ for (const Alter_inplace_info::Rename_key_pair& pair :
+ ha_alter_info->rename_keys) {
+ dict_index_t* index = dict_table_get_index_on_name(
+ ctx->old_table, pair.old_key->name.str);
+ // This was checked previously in
+ // ha_innobase::prepare_inplace_alter_table()
+ ut_ad(index);
+
+ if (rename_index_try(index, pair.new_key->name.str, trx)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
/** Apply the changes made during commit_try_rebuild(),
to the data dictionary cache and the file system.
@param ctx In-place ALTER TABLE context */
@@ -8797,34 +10207,49 @@ commit_try_norebuild(
}
}
- if ((ha_alter_info->handler_flags
- & ALTER_COLUMN_NAME)
- && innobase_rename_columns_try(ha_alter_info, ctx, old_table,
- trx, table_name)) {
+ if (innobase_rename_or_enlarge_columns_try(ha_alter_info, ctx,
+ altered_table, old_table,
+ trx, table_name)) {
DBUG_RETURN(true);
}
- if ((ha_alter_info->handler_flags
- & ALTER_COLUMN_EQUAL_PACK_LENGTH)
- && innobase_enlarge_columns_try(ha_alter_info, old_table,
- ctx->old_table, trx, table_name)) {
+ if ((ha_alter_info->handler_flags & ALTER_RENAME_INDEX)
+ && rename_indexes_try(ctx, ha_alter_info, trx)) {
DBUG_RETURN(true);
}
- if ((ha_alter_info->handler_flags
- & ALTER_DROP_VIRTUAL_COLUMN)
- && innobase_drop_virtual_try(ha_alter_info, ctx->old_table, trx)) {
- DBUG_RETURN(true);
+ if (ctx->is_instant()) {
+ DBUG_RETURN(innobase_instant_try(ha_alter_info, ctx,
+ altered_table, old_table,
+ trx));
}
- if ((ha_alter_info->handler_flags
- & ALTER_ADD_VIRTUAL_COLUMN)
- && innobase_add_virtual_try(ha_alter_info, ctx->old_table, trx)) {
- DBUG_RETURN(true);
- }
+ if (ha_alter_info->handler_flags
+ & (ALTER_DROP_VIRTUAL_COLUMN | ALTER_ADD_VIRTUAL_COLUMN)) {
+ if ((ha_alter_info->handler_flags & ALTER_DROP_VIRTUAL_COLUMN)
+ && innobase_drop_virtual_try(ha_alter_info, ctx->old_table,
+ trx)) {
+ DBUG_RETURN(true);
+ }
- if (innobase_add_instant_try(ctx, altered_table, old_table, trx)) {
- DBUG_RETURN(true);
+ if ((ha_alter_info->handler_flags & ALTER_ADD_VIRTUAL_COLUMN)
+ && innobase_add_virtual_try(ha_alter_info, ctx->old_table,
+ trx)) {
+ DBUG_RETURN(true);
+ }
+
+ ulint n_col = unsigned(ctx->old_table->n_cols)
+ - DATA_N_SYS_COLS;
+ ulint n_v_col = unsigned(ctx->old_table->n_v_cols)
+ + ctx->num_to_add_vcol - ctx->num_to_drop_vcol;
+
+ if (innodb_update_cols(
+ ctx->old_table,
+ dict_table_encode_n_col(n_col, n_v_col)
+ | unsigned(ctx->old_table->flags & DICT_TF_COMPACT)
+ << 31, trx)) {
+ DBUG_RETURN(true);
+ }
}
DBUG_RETURN(false);
@@ -8834,6 +10259,7 @@ commit_try_norebuild(
after a successful commit_try_norebuild() call.
@param ha_alter_info algorithm=inplace context
@param ctx In-place ALTER TABLE context for the current partition
+@param altered_table the TABLE after the ALTER
@param table the TABLE before the ALTER
@param trx Data dictionary transaction
(will be started and committed, for DROP INDEX)
@@ -8844,6 +10270,7 @@ commit_cache_norebuild(
/*===================*/
Alter_inplace_info* ha_alter_info,
ha_innobase_inplace_ctx*ctx,
+ const TABLE* altered_table,
const TABLE* table,
trx_t* trx)
{
@@ -8866,11 +10293,17 @@ commit_cache_norebuild(
bool update = !(space->flags
& FSP_FLAGS_MASK_PAGE_COMPRESSION);
mutex_enter(&fil_system.mutex);
- space->flags = (~FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL
- & (space->flags
- | FSP_FLAGS_MASK_PAGE_COMPRESSION))
- | ctx->page_compression_level
+ space->flags &= ~FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL;
+ space->flags |= ctx->page_compression_level
<< FSP_FLAGS_MEM_COMPRESSION_LEVEL;
+ if (!space->full_crc32()) {
+ space->flags
+ |= FSP_FLAGS_MASK_PAGE_COMPRESSION;
+ } else if (!space->is_compressed()) {
+ space->flags
+ |= innodb_compression_algorithm
+ << FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO;
+ }
mutex_exit(&fil_system.mutex);
if (update) {
@@ -8888,7 +10321,7 @@ commit_cache_norebuild(
mtr.start();
if (buf_block_t* b = buf_page_get(
page_id_t(space->id, 0),
- page_size_t(space->flags),
+ space->zip_size(),
RW_X_LATCH, &mtr)) {
mtr.set_named_space(space);
mlog_write_ulint(
@@ -8990,13 +10423,57 @@ commit_cache_norebuild(
if (!ctx->is_instant()) {
innobase_rename_or_enlarge_columns_cache(
- ha_alter_info, table, ctx->new_table);
+ ha_alter_info, altered_table, table, ctx->new_table);
+ } else {
+ ut_ad(ctx->col_map);
+
+ if (fts_t* fts = ctx->new_table->fts) {
+ ut_ad(fts->doc_col != ULINT_UNDEFINED);
+ ut_ad(ctx->new_table->n_cols > DATA_N_SYS_COLS);
+ const ulint c = ctx->col_map[fts->doc_col];
+ ut_ad(c < ulint(ctx->new_table->n_cols)
+ - DATA_N_SYS_COLS);
+ ut_d(const dict_col_t& col = ctx->new_table->cols[c]);
+ ut_ad(!col.is_nullable());
+ ut_ad(!col.is_virtual());
+ ut_ad(!col.is_added());
+ ut_ad(col.prtype & DATA_UNSIGNED);
+ ut_ad(col.mtype == DATA_INT);
+ ut_ad(col.len == 8);
+ ut_ad(col.ord_part);
+ fts->doc_col = c;
+ }
+
+ if (ha_alter_info->handler_flags & ALTER_DROP_STORED_COLUMN) {
+ const dict_index_t* index = ctx->new_table->indexes.start;
+
+ for (const dict_field_t* f = index->fields,
+ * const end = f + index->n_fields;
+ f != end; f++) {
+ dict_col_t& c = *f->col;
+ if (c.is_dropped()) {
+ c.set_dropped(!c.is_nullable(),
+ DATA_LARGE_MTYPE(c.mtype)
+ || (!f->fixed_len
+ && c.len > 255),
+ f->fixed_len);
+ }
+ }
+ }
+
+ if (!ctx->instant_table->persistent_autoinc) {
+ ctx->new_table->persistent_autoinc = 0;
+ }
}
if (ha_alter_info->handler_flags & ALTER_COLUMN_UNVERSIONED) {
vers_change_fields_cache(ha_alter_info, ctx, table);
}
+ if (ha_alter_info->handler_flags & ALTER_RENAME_INDEX) {
+ innobase_rename_indexes_cache(ctx, ha_alter_info);
+ }
+
ctx->new_table->fts_doc_id_index
= ctx->new_table->fts
? dict_table_get_index_on_name(
@@ -9066,6 +10543,27 @@ alter_stats_norebuild(
}
}
+ for (const Alter_inplace_info::Rename_key_pair& pair :
+ ha_alter_info->rename_keys) {
+ dberr_t err = dict_stats_rename_index(ctx->new_table,
+ pair.old_key->name.str,
+ pair.new_key->name.str);
+
+ if (err != DB_SUCCESS) {
+ push_warning_printf(
+ thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_ERROR_ON_RENAME,
+ "Error renaming an index of table '%s'"
+ " from '%s' to '%s' in InnoDB persistent"
+ " statistics storage: %s",
+ ctx->new_table->name.m_name,
+ pair.old_key->name.str,
+ pair.new_key->name.str,
+ ut_strerr(err));
+ }
+ }
+
for (i = 0; i < ctx->num_to_add_index; i++) {
dict_index_t* index = ctx->add_index[i];
DBUG_ASSERT(index->table == ctx->new_table);
@@ -9101,23 +10599,8 @@ alter_stats_rebuild(
DBUG_VOID_RETURN;
}
-#ifndef DBUG_OFF
- bool file_unreadable_orig = false;
-#endif /* DBUG_OFF */
-
- DBUG_EXECUTE_IF(
- "ib_rename_index_fail2",
- file_unreadable_orig = table->file_unreadable;
- table->file_unreadable = true;
- );
-
dberr_t ret = dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT);
- DBUG_EXECUTE_IF(
- "ib_rename_index_fail2",
- table->file_unreadable = file_unreadable_orig;
- );
-
if (ret != DB_SUCCESS) {
push_warning_printf(
thd,
@@ -9593,7 +11076,8 @@ foreign_fail:
bool fk_fail = innobase_update_foreign_cache(
ctx, m_user_thd) != DB_SUCCESS;
- if (!commit_cache_norebuild(ha_alter_info, ctx, table,
+ if (!commit_cache_norebuild(ha_alter_info, ctx,
+ altered_table, table,
trx)) {
fk_fail = true;
}
@@ -9663,6 +11147,9 @@ foreign_fail:
}
}
+ /* MDEV-17468: Avoid this at least when ctx->is_instant().
+ Currently dict_load_column_low() is the only place where
+ num_base for virtual columns is assigned to nonzero. */
if (ctx0->num_to_drop_vcol || ctx0->num_to_add_vcol) {
DBUG_ASSERT(ctx0->old_table->get_ref_count() == 1);
@@ -9680,6 +11167,12 @@ foreign_fail:
tb_name[strlen(m_prebuilt->table->name.m_name)] = 0;
dict_table_close(m_prebuilt->table, true, false);
+ if (ctx0->is_instant()) {
+ for (unsigned i = ctx0->old_n_v_cols; i--; ) {
+ UT_DELETE(ctx0->old_v_cols[i].v_indexes);
+ }
+ const_cast<unsigned&>(ctx0->old_n_v_cols) = 0;
+ }
dict_table_remove_from_cache(m_prebuilt->table);
m_prebuilt->table = dict_table_open_on_name(
tb_name, TRUE, TRUE, DICT_ERR_IGNORE_NONE);
@@ -9764,11 +11257,6 @@ foreign_fail:
DBUG_ASSERT(0 == strcmp(ctx->old_table->name.m_name,
ctx->tmp_name));
- DBUG_EXECUTE_IF(
- "ib_rename_index_fail3",
- DBUG_SET("+d,innodb_report_deadlock");
- );
-
if (dict_stats_drop_table(
ctx->new_table->name.m_name,
errstr, sizeof(errstr))
@@ -9784,11 +11272,6 @@ foreign_fail:
errstr);
}
- DBUG_EXECUTE_IF(
- "ib_rename_index_fail3",
- DBUG_SET("-d,innodb_report_deadlock");
- );
-
DBUG_EXECUTE_IF("ib_ddl_crash_before_commit",
DBUG_SUICIDE(););
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 85a9cd7dbf0..4016b11387e 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -5978,7 +5978,7 @@ i_s_dict_fill_sys_tables(
ulint compact = DICT_TF_GET_COMPACT(table->flags);
ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(
table->flags);
- const page_size_t& page_size = dict_tf_get_page_size(table->flags);
+ const ulint zip_size = dict_tf_get_zip_size(table->flags);
const char* row_format;
if (!compact) {
@@ -6007,10 +6007,7 @@ i_s_dict_fill_sys_tables(
OK(field_store_string(fields[SYS_TABLES_ROW_FORMAT], row_format));
- OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(
- page_size.is_compressed()
- ? page_size.physical()
- : 0, true));
+ OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(zip_size, true));
OK(field_store_string(fields[SYS_TABLES_SPACE_TYPE],
table->space_id ? "Single" : "System"));
@@ -7967,7 +7964,9 @@ i_s_dict_fill_sys_tablespaces(
DBUG_ENTER("i_s_dict_fill_sys_tablespaces");
- if (is_system_tablespace(space)) {
+ if (fil_space_t::full_crc32(flags)) {
+ row_format = NULL;
+ } else if (is_system_tablespace(space)) {
row_format = "Compact, Redundant or Dynamic";
} else if (FSP_FLAGS_GET_ZIP_SSIZE(flags)) {
row_format = "Compressed";
@@ -7991,7 +7990,7 @@ i_s_dict_fill_sys_tablespaces(
is_system_tablespace(space)
? "System" : "Single"));
- ulint cflags = fsp_flags_is_valid(flags, space)
+ ulint cflags = fil_space_t::is_valid_flags(flags, space)
? flags : fsp_flags_convert_from_101(flags);
if (cflags == ULINT_UNDEFINED) {
fields[SYS_TABLESPACES_PAGE_SIZE]->set_null();
@@ -8003,13 +8002,11 @@ i_s_dict_fill_sys_tablespaces(
DBUG_RETURN(0);
}
- const page_size_t page_size(cflags);
-
OK(fields[SYS_TABLESPACES_PAGE_SIZE]->store(
- page_size.logical(), true));
+ fil_space_t::logical_size(cflags), true));
OK(fields[SYS_TABLESPACES_ZIP_PAGE_SIZE]->store(
- page_size.physical(), true));
+ fil_space_t::physical_size(cflags), true));
char* filepath = NULL;
if (FSP_FLAGS_HAS_DATA_DIR(cflags)) {
@@ -8700,7 +8697,7 @@ static ST_FIELD_INFO innodb_tablespaces_scrubbing_fields_info[] =
#define TABLESPACES_SCRUBBING_COMPRESSED 2
{STRUCT_FLD(field_name, "COMPRESSED"),
- STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_length, 1),
STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
@@ -8752,6 +8749,15 @@ static ST_FIELD_INFO innodb_tablespaces_scrubbing_fields_info[] =
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+#define TABLESPACES_SCRUBBING_ON_SSD 8
+ {STRUCT_FLD(field_name, "ON_SSD"),
+ STRUCT_FLD(field_length, 1),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
END_OF_ST_FIELD_INFO
};
@@ -8823,6 +8829,8 @@ i_s_dict_fill_tablespaces_scrubbing(
}
}
+ OK(fields[TABLESPACES_SCRUBBING_ON_SSD]->store(!space->is_rotational(),
+ true));
OK(schema_table_store_record(thd, table_to_fill));
DBUG_RETURN(0);
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index 86deae0c015..d78fb0b92c9 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -334,8 +334,7 @@ ibuf_header_page_get(
block = buf_page_get(
page_id_t(IBUF_SPACE_ID, FSP_IBUF_HEADER_PAGE_NO),
- univ_page_size, RW_X_LATCH, mtr);
-
+ 0, RW_X_LATCH, mtr);
if (!block->page.encrypted) {
buf_block_dbg_add_level(block, SYNC_IBUF_HEADER);
@@ -366,7 +365,7 @@ ibuf_tree_root_get(
/* only segment list access is exclusive each other */
block = buf_page_get(
page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO),
- univ_page_size, RW_SX_LATCH, mtr);
+ 0, RW_SX_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
@@ -481,7 +480,7 @@ ibuf_init_at_db_start(void)
block = buf_page_get(
page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO),
- univ_page_size, RW_X_LATCH, &mtr);
+ 0, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
@@ -527,13 +526,8 @@ ibuf_max_size_update(
}
-/*********************************************************************//**
-Initializes an ibuf bitmap page. */
-void
-ibuf_bitmap_page_init(
-/*==================*/
- buf_block_t* block, /*!< in: bitmap page */
- mtr_t* mtr) /*!< in: mtr */
+/** Apply MLOG_IBUF_BITMAP_INIT when crash-upgrading */
+ATTRIBUTE_COLD void ibuf_bitmap_init_apply(buf_block_t* block)
{
page_t* page;
ulint byte_offset;
@@ -544,65 +538,41 @@ ibuf_bitmap_page_init(
/* Write all zeros to the bitmap */
compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
- byte_offset = UT_BITS_IN_BYTES(block->page.size.physical()
+ byte_offset = UT_BITS_IN_BYTES(block->physical_size()
* IBUF_BITS_PER_PAGE);
memset(page + IBUF_BITMAP, 0, byte_offset);
-
- /* The remaining area (up to the page trailer) is uninitialized. */
- mlog_write_initial_log_record(page, MLOG_IBUF_BITMAP_INIT, mtr);
-}
-
-/*********************************************************************//**
-Parses a redo log record of an ibuf bitmap page init.
-@return end of log record or NULL */
-byte*
-ibuf_parse_bitmap_init(
-/*===================*/
- byte* ptr, /*!< in: buffer */
- byte* end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
- buf_block_t* block, /*!< in: block or NULL */
- mtr_t* mtr) /*!< in: mtr or NULL */
-{
- ut_ad(ptr != NULL);
- ut_ad(end_ptr != NULL);
-
- if (block) {
- ibuf_bitmap_page_init(block, mtr);
- }
-
- return(ptr);
}
# ifdef UNIV_DEBUG
/** Gets the desired bits for a given page from a bitmap page.
@param[in] page bitmap page
@param[in] page_id page id whose bits to get
-@param[in] page_size page id whose bits to get
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param[in,out] mtr mini-transaction holding an x-latch on the
bitmap page
@return value of bits */
-# define ibuf_bitmap_page_get_bits(page, page_id, page_size, bit, mtr) \
- ibuf_bitmap_page_get_bits_low(page, page_id, page_size, \
+# define ibuf_bitmap_page_get_bits(page, page_id, zip_size, bit, mtr) \
+ ibuf_bitmap_page_get_bits_low(page, page_id, zip_size, \
MTR_MEMO_PAGE_X_FIX, mtr, bit)
# else /* UNIV_DEBUG */
/** Gets the desired bits for a given page from a bitmap page.
@param[in] page bitmap page
@param[in] page_id page id whose bits to get
-@param[in] page_size page id whose bits to get
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param[in,out] mtr mini-transaction holding an x-latch on the
bitmap page
@return value of bits */
-# define ibuf_bitmap_page_get_bits(page, page_id, page_size, bit, mtr) \
- ibuf_bitmap_page_get_bits_low(page, page_id, page_size, bit)
+# define ibuf_bitmap_page_get_bits(page, page_id, zip_size, bit, mtr) \
+ ibuf_bitmap_page_get_bits_low(page, page_id, zip_size, bit)
# endif /* UNIV_DEBUG */
/** Gets the desired bits for a given page from a bitmap page.
@param[in] page bitmap page
@param[in] page_id page id whose bits to get
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] latch_type MTR_MEMO_PAGE_X_FIX, MTR_MEMO_BUF_FIX, ...
@param[in,out] mtr mini-transaction holding latch_type on the
bitmap page
@@ -613,7 +583,7 @@ ulint
ibuf_bitmap_page_get_bits_low(
const page_t* page,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
#ifdef UNIV_DEBUG
ulint latch_type,
mtr_t* mtr,
@@ -624,12 +594,14 @@ ibuf_bitmap_page_get_bits_low(
ulint bit_offset;
ulint map_byte;
ulint value;
+ const ulint size = zip_size ? zip_size : srv_page_size;
+ ut_ad(ut_is_2pow(zip_size));
ut_ad(bit < IBUF_BITS_PER_PAGE);
compile_time_assert(!(IBUF_BITS_PER_PAGE % 2));
ut_ad(mtr_memo_contains_page(mtr, page, latch_type));
- bit_offset = (page_id.page_no() % page_size.physical())
+ bit_offset = (page_id.page_no() & (size - 1))
* IBUF_BITS_PER_PAGE + bit;
byte_offset = bit_offset / 8;
@@ -653,7 +625,7 @@ ibuf_bitmap_page_get_bits_low(
/** Sets the desired bit for a given page in a bitmap page.
@param[in,out] page bitmap page
@param[in] page_id page id whose bits to set
-@param[in] page_size page size
+@param[in] physical_size page size
@param[in] bit IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ...
@param[in] val value to set
@param[in,out] mtr mtr containing an x-latch to the bitmap page */
@@ -662,7 +634,7 @@ void
ibuf_bitmap_page_set_bits(
page_t* page,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint physical_size,
ulint bit,
ulint val,
mtr_t* mtr)
@@ -676,7 +648,7 @@ ibuf_bitmap_page_set_bits(
ut_ad(mtr_memo_contains_page(mtr, page, MTR_MEMO_PAGE_X_FIX));
ut_ad(mtr->is_named_space(page_id.space()));
- bit_offset = (page_id.page_no() % page_size.physical())
+ bit_offset = (page_id.page_no() % physical_size)
* IBUF_BITS_PER_PAGE + bit;
byte_offset = bit_offset / 8;
@@ -703,26 +675,20 @@ ibuf_bitmap_page_set_bits(
/** Calculates the bitmap page number for a given page number.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] size page size
@return the bitmap page id where the file page is mapped */
-UNIV_INLINE
-const page_id_t
-ibuf_bitmap_page_no_calc(
- const page_id_t page_id,
- const page_size_t& page_size)
+inline page_id_t ibuf_bitmap_page_no_calc(const page_id_t page_id, ulint size)
{
- ulint bitmap_page_no;
-
- bitmap_page_no = FSP_IBUF_BITMAP_OFFSET
- + (page_id.page_no() & ~(page_size.physical() - 1));
+ if (!size) size = srv_page_size;
- return(page_id_t(page_id.space(), bitmap_page_no));
+ return page_id_t(page_id.space(), FSP_IBUF_BITMAP_OFFSET
+ + (page_id.page_no() & ~(size - 1)));
}
/** Gets the ibuf bitmap page where the bits describing a given file page are
stored.
@param[in] page_id page id of the file page
-@param[in] page_size page size of the file page
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] file file name
@param[in] line line where called
@param[in,out] mtr mini-transaction
@@ -733,7 +699,7 @@ static
page_t*
ibuf_bitmap_get_map_page_func(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
const char* file,
unsigned line,
mtr_t* mtr)
@@ -741,8 +707,8 @@ ibuf_bitmap_get_map_page_func(
buf_block_t* block = NULL;
dberr_t err = DB_SUCCESS;
- block = buf_page_get_gen(ibuf_bitmap_page_no_calc(page_id, page_size),
- page_size, RW_X_LATCH, NULL, BUF_GET,
+ block = buf_page_get_gen(ibuf_bitmap_page_no_calc(page_id, zip_size),
+ zip_size, RW_X_LATCH, NULL, BUF_GET,
file, line, mtr, &err);
if (err != DB_SUCCESS) {
@@ -758,13 +724,13 @@ ibuf_bitmap_get_map_page_func(
/** Gets the ibuf bitmap page where the bits describing a given file page are
stored.
@param[in] page_id page id of the file page
-@param[in] page_size page size of the file page
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] mtr mini-transaction
@return bitmap page where the file page is mapped, that is, the bitmap
page containing the descriptor bits for the file page; the bitmap page
is x-latched */
-#define ibuf_bitmap_get_map_page(page_id, page_size, mtr) \
- ibuf_bitmap_get_map_page_func(page_id, page_size, \
+#define ibuf_bitmap_get_map_page(page_id, zip_size, mtr) \
+ ibuf_bitmap_get_map_page_func(page_id, zip_size, \
__FILE__, __LINE__, mtr)
/************************************************************************//**
@@ -798,14 +764,14 @@ ibuf_set_free_bits_low(
}
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->page.size, mtr);
+ block->zip_size(), mtr);
#ifdef UNIV_IBUF_DEBUG
ut_a(val <= ibuf_index_page_calc_free(block));
#endif /* UNIV_IBUF_DEBUG */
ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->page.size,
+ bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, val, mtr);
}
@@ -842,17 +808,14 @@ ibuf_set_free_bits_func(
block->page.id.space());
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->page.size, &mtr);
+ block->zip_size(), &mtr);
switch (space->purpose) {
case FIL_TYPE_LOG:
ut_ad(0);
break;
case FIL_TYPE_TABLESPACE:
- /* Avoid logging while fixing up truncate of table. */
- if (!srv_is_tablespace_truncated(block->page.id.space())) {
- break;
- }
+ break;
/* fall through */
case FIL_TYPE_TEMPORARY:
case FIL_TYPE_IMPORT:
@@ -887,7 +850,7 @@ ibuf_set_free_bits_func(
#endif /* UNIV_IBUF_DEBUG */
ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->page.size,
+ bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, val, &mtr);
mtr_commit(&mtr);
@@ -937,7 +900,7 @@ ibuf_update_free_bits_low(
ut_a(!buf_block_get_page_zip(block));
ut_ad(mtr->is_named_space(block->page.id.space()));
- before = ibuf_index_page_calc_free_bits(block->page.size.logical(),
+ before = ibuf_index_page_calc_free_bits(srv_page_size,
max_ins_size);
after = ibuf_index_page_calc_free(block);
@@ -972,10 +935,10 @@ ibuf_update_free_bits_zip(
buf_frame_t* frame = buf_block_get_frame(block);
ut_a(frame);
ut_a(page_is_leaf(frame));
- ut_a(block->page.size.is_compressed());
+ ut_a(block->zip_size());
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->page.size, mtr);
+ block->zip_size(), mtr);
after = ibuf_index_page_calc_free_zip(block);
@@ -989,7 +952,7 @@ ibuf_update_free_bits_zip(
}
ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->page.size,
+ bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, after, mtr);
}
@@ -1031,23 +994,19 @@ ibuf_update_free_bits_for_two_pages_low(
/** Returns TRUE if the page is one of the fixed address ibuf pages.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return TRUE if a fixed address ibuf i/o page */
-UNIV_INLINE
-ibool
-ibuf_fixed_addr_page(
- const page_id_t page_id,
- const page_size_t& page_size)
+inline bool ibuf_fixed_addr_page(const page_id_t page_id, ulint zip_size)
{
return((page_id.space() == IBUF_SPACE_ID
&& page_id.page_no() == IBUF_TREE_ROOT_PAGE_NO)
- || ibuf_bitmap_page(page_id, page_size));
+ || ibuf_bitmap_page(page_id, zip_size));
}
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
Must not be called when recv_no_ibuf_operations==true.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] x_latch FALSE if relaxed check (avoid latching the
bitmap page)
@param[in] file file name
@@ -1056,12 +1015,12 @@ bitmap page)
bitmap page if the page is not one of the fixed address ibuf pages, or NULL,
in which case a new transaction is created.
@return TRUE if level 2 or level 3 page */
-ibool
+bool
ibuf_page_low(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
#ifdef UNIV_DEBUG
- ibool x_latch,
+ bool x_latch,
#endif /* UNIV_DEBUG */
const char* file,
unsigned line,
@@ -1074,12 +1033,10 @@ ibuf_page_low(
ut_ad(!recv_no_ibuf_operations);
ut_ad(x_latch || mtr == NULL);
- if (ibuf_fixed_addr_page(page_id, page_size)) {
-
- return(TRUE);
+ if (ibuf_fixed_addr_page(page_id, zip_size)) {
+ return(true);
} else if (page_id.space() != IBUF_SPACE_ID) {
-
- return(FALSE);
+ return(false);
}
compile_time_assert(IBUF_SPACE_ID == 0);
@@ -1102,14 +1059,14 @@ ibuf_page_low(
dberr_t err = DB_SUCCESS;
buf_block_t* block = buf_page_get_gen(
- ibuf_bitmap_page_no_calc(page_id, page_size),
- page_size, RW_NO_LATCH, NULL, BUF_GET_NO_LATCH,
- file, line, &local_mtr, &err);
+ ibuf_bitmap_page_no_calc(page_id, zip_size),
+ zip_size, RW_NO_LATCH, NULL, BUF_GET_NO_LATCH,
+ file, line, &local_mtr, &err);
bitmap_page = buf_block_get_frame(block);
ret = ibuf_bitmap_page_get_bits_low(
- bitmap_page, page_id, page_size,
+ bitmap_page, page_id, zip_size,
MTR_MEMO_BUF_FIX, &local_mtr, IBUF_BITMAP_IBUF);
mtr_commit(&local_mtr);
@@ -1122,10 +1079,10 @@ ibuf_page_low(
mtr_start(mtr);
}
- bitmap_page = ibuf_bitmap_get_map_page_func(page_id, page_size,
+ bitmap_page = ibuf_bitmap_get_map_page_func(page_id, zip_size,
file, line, mtr);
- ret = ibuf_bitmap_page_get_bits(bitmap_page, page_id, page_size,
+ ret = ibuf_bitmap_page_get_bits(bitmap_page, page_id, zip_size,
IBUF_BITMAP_IBUF, mtr);
if (mtr == &local_mtr) {
@@ -1368,32 +1325,26 @@ ibuf_rec_get_counter(
}
}
-/****************************************************************//**
-Add accumulated operation counts to a permanent array. Both arrays must be
-of size IBUF_OP_COUNT. */
-static
-void
-ibuf_add_ops(
-/*=========*/
- ulint* arr, /*!< in/out: array to modify */
- const ulint* ops) /*!< in: operation counts */
+/**
+ Add accumulated operation counts to a permanent array.
+ Both arrays must be of size IBUF_OP_COUNT.
+*/
+static void ibuf_add_ops(Atomic_counter<ulint> *out, const ulint *in)
{
- ulint i;
-
- for (i = 0; i < IBUF_OP_COUNT; i++) {
- my_atomic_addlint(&arr[i], ops[i]);
- }
+ for (auto i = 0; i < IBUF_OP_COUNT; i++)
+ out[i]+= in[i];
}
+
/****************************************************************//**
Print operation counts. The array must be of size IBUF_OP_COUNT. */
static
void
ibuf_print_ops(
/*===========*/
- const ulint* ops, /*!< in: operation counts */
- FILE* file) /*!< in: file where to print */
+ const Atomic_counter<ulint>* ops, /*!< in: operation counts */
+ FILE* file) /*!< in: file where to print */
{
static const char* op_names[] = {
"insert",
@@ -1406,7 +1357,7 @@ ibuf_print_ops(
for (i = 0; i < IBUF_OP_COUNT; i++) {
fprintf(file, "%s " ULINTPF "%s", op_names[i],
- ops[i], (i < (IBUF_OP_COUNT - 1)) ? ", " : "");
+ ulint{ops[i]}, (i < (IBUF_OP_COUNT - 1)) ? ", " : "");
}
putc('\n', file);
@@ -2004,11 +1955,11 @@ ibuf_add_free_page(void)
(level 2 page) */
const page_id_t page_id(IBUF_SPACE_ID, block->page.id.page_no());
- bitmap_page = ibuf_bitmap_get_map_page(page_id, univ_page_size, &mtr);
+ bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
mutex_exit(&ibuf_mutex);
- ibuf_bitmap_page_set_bits(bitmap_page, page_id, univ_page_size,
+ ibuf_bitmap_page_set_bits(bitmap_page, page_id, srv_page_size,
IBUF_BITMAP_IBUF, TRUE, &mtr);
ibuf_mtr_commit(&mtr);
@@ -2079,7 +2030,7 @@ ibuf_remove_free_page(void)
compile_time_assert(IBUF_SPACE_ID == 0);
fseg_free_page(header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER,
- fil_system.sys_space, page_no, false, &mtr);
+ fil_system.sys_space, page_no, false, true, &mtr);
const page_id_t page_id(IBUF_SPACE_ID, page_no);
@@ -2097,7 +2048,7 @@ ibuf_remove_free_page(void)
{
buf_block_t* block;
- block = buf_page_get(page_id, univ_page_size, RW_X_LATCH, &mtr);
+ block = buf_page_get(page_id, 0, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
@@ -2117,13 +2068,13 @@ ibuf_remove_free_page(void)
/* Set the bit indicating that this page is no more an ibuf tree page
(level 2 page) */
- bitmap_page = ibuf_bitmap_get_map_page(page_id, univ_page_size, &mtr);
+ bitmap_page = ibuf_bitmap_get_map_page(page_id, 0, &mtr);
mutex_exit(&ibuf_mutex);
ibuf_bitmap_page_set_bits(
- bitmap_page, page_id, univ_page_size, IBUF_BITMAP_IBUF, FALSE,
- &mtr);
+ bitmap_page, page_id, srv_page_size,
+ IBUF_BITMAP_IBUF, FALSE, &mtr);
ut_d(buf_page_set_file_page_was_freed(page_id));
@@ -2973,7 +2924,7 @@ ibuf_get_volume_buffered(
block = buf_page_get(
page_id_t(IBUF_SPACE_ID, prev_page_no),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
@@ -3045,7 +2996,7 @@ count_later:
block = buf_page_get(
page_id_t(IBUF_SPACE_ID, next_page_no),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE);
@@ -3258,6 +3209,24 @@ ibuf_get_entry_counter_func(
}
}
+
+/** Translates the ibuf free bits to the free space on a page in bytes.
+@param[in] physical_size page_size
+@param[in] bits value for ibuf bitmap bits
+@return maximum insert size after reorganize for the page */
+inline ulint
+ibuf_index_page_calc_free_from_bits(ulint physical_size, ulint bits)
+{
+ ut_ad(bits < 4);
+ ut_ad(physical_size > IBUF_PAGE_SIZE_PER_FREE_SPACE);
+
+ if (bits == 3) {
+ bits = 4;
+ }
+
+ return bits * physical_size / IBUF_PAGE_SIZE_PER_FREE_SPACE;
+}
+
/** Buffer an operation in the insert/delete buffer, instead of doing it
directly to the disk page, if this is possible.
@param[in] mode BTR_MODIFY_PREV or BTR_MODIFY_TREE
@@ -3269,7 +3238,7 @@ buffering
@param[in,out] index index where to insert; must not be unique
or clustered
@param[in] page_id page id where to insert
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] thr query thread
@return DB_SUCCESS, DB_STRONG_FAIL or other error */
static MY_ATTRIBUTE((warn_unused_result))
@@ -3282,7 +3251,7 @@ ibuf_insert_low(
ulint entry_size,
dict_index_t* index,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
que_thr_t* thr)
{
big_rec_t* dummy_big_rec;
@@ -3392,6 +3361,8 @@ ibuf_insert_low(
? &min_n_recs
: NULL, &mtr);
+ const ulint physical_size = zip_size ? zip_size : srv_page_size;
+
if (op == IBUF_OP_DELETE
&& (min_n_recs < 2 || buf_pool_watch_occurred(page_id))) {
/* The page could become empty after the record is
@@ -3433,8 +3404,7 @@ fail_exit:
ibuf_mtr_start(&bitmap_mtr);
index->set_modified(bitmap_mtr);
- bitmap_page = ibuf_bitmap_get_map_page(page_id, page_size,
- &bitmap_mtr);
+ bitmap_page = ibuf_bitmap_get_map_page(page_id, zip_size, &bitmap_mtr);
/* We check if the index page is suitable for buffered entries */
@@ -3448,11 +3418,12 @@ fail_exit:
if (op == IBUF_OP_INSERT) {
ulint bits = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, page_size, IBUF_BITMAP_FREE,
+ bitmap_page, page_id, physical_size, IBUF_BITMAP_FREE,
&bitmap_mtr);
if (buffered + entry_size + page_dir_calc_reserved_space(1)
- > ibuf_index_page_calc_free_from_bits(page_size, bits)) {
+ > ibuf_index_page_calc_free_from_bits(physical_size,
+ bits)) {
/* Release the bitmap page latch early. */
ibuf_mtr_commit(&bitmap_mtr);
@@ -3495,11 +3466,11 @@ fail_exit:
buffered entries for this index page, if the bit is not set yet */
old_bit_value = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, page_size,
+ bitmap_page, page_id, physical_size,
IBUF_BITMAP_BUFFERED, &bitmap_mtr);
if (!old_bit_value) {
- ibuf_bitmap_page_set_bits(bitmap_page, page_id, page_size,
+ ibuf_bitmap_page_set_bits(bitmap_page, page_id, physical_size,
IBUF_BITMAP_BUFFERED, TRUE,
&bitmap_mtr);
}
@@ -3593,23 +3564,23 @@ func_exit:
return(err);
}
-/** Buffer an operation in the insert/delete buffer, instead of doing it
-directly to the disk page, if this is possible. Does not do it if the index
+/** Buffer an operation in the change buffer, instead of applying it
+directly to the file page, if this is possible. Does not do it if the index
is clustered or unique.
@param[in] op operation type
@param[in] entry index entry to insert
@param[in,out] index index where to insert
@param[in] page_id page id where to insert
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] thr query thread
-@return TRUE if success */
-ibool
+@return true if success */
+bool
ibuf_insert(
ibuf_op_t op,
const dtuple_t* entry,
dict_index_t* index,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
que_thr_t* thr)
{
dberr_t err;
@@ -3637,7 +3608,7 @@ ibuf_insert(
case IBUF_USE_NONE:
case IBUF_USE_DELETE:
case IBUF_USE_DELETE_MARK:
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
case IBUF_USE_INSERT:
case IBUF_USE_INSERT_DELETE_MARK:
case IBUF_USE_ALL:
@@ -3648,7 +3619,7 @@ ibuf_insert(
switch (use) {
case IBUF_USE_NONE:
case IBUF_USE_INSERT:
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
case IBUF_USE_DELETE_MARK:
case IBUF_USE_DELETE:
case IBUF_USE_INSERT_DELETE_MARK:
@@ -3662,7 +3633,7 @@ ibuf_insert(
case IBUF_USE_NONE:
case IBUF_USE_INSERT:
case IBUF_USE_INSERT_DELETE_MARK:
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
case IBUF_USE_DELETE_MARK:
case IBUF_USE_DELETE:
case IBUF_USE_ALL:
@@ -3702,7 +3673,7 @@ check_watch:
is being buffered, have this request executed
directly on the page in the buffer pool after the
buffered entries for this page have been merged. */
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
}
}
@@ -3713,30 +3684,22 @@ skip_watch:
>= page_get_free_space_of_empty(dict_table_is_comp(index->table))
/ 2) {
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(false);
}
err = ibuf_insert_low(BTR_MODIFY_PREV, op, no_counter,
entry, entry_size,
- index, page_id, page_size, thr);
+ index, page_id, zip_size, thr);
if (err == DB_FAIL) {
err = ibuf_insert_low(BTR_MODIFY_TREE | BTR_LATCH_FOR_INSERT,
op, no_counter, entry, entry_size,
- index, page_id, page_size, thr);
+ index, page_id, zip_size, thr);
}
- if (err == DB_SUCCESS) {
-#ifdef UNIV_IBUF_DEBUG
- /* fprintf(stderr, "Ibuf insert for page no %lu of index %s\n",
- page_no, index->name); */
-#endif
- DBUG_RETURN(TRUE);
+ ut_a(err == DB_SUCCESS || err == DB_STRONG_FAIL
+ || err == DB_TOO_BIG_RECORD);
- } else {
- ut_a(err == DB_STRONG_FAIL || err == DB_TOO_BIG_RECORD);
-
- DBUG_RETURN(FALSE);
- }
+ DBUG_RETURN(err == DB_SUCCESS);
}
/********************************************************************//**
@@ -3800,13 +3763,13 @@ ibuf_insert_to_index_page_low(
"InnoDB: that table.\n", stderr);
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->page.size, mtr);
+ block->zip_size(), mtr);
old_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, block->page.id, block->page.size,
+ bitmap_page, block->page.id, block->zip_size(),
IBUF_BITMAP_FREE, mtr);
ib::error() << "page " << block->page.id << ", size "
- << block->page.size.physical() << ", bitmap bits " << old_bits;
+ << block->physical_size() << ", bitmap bits " << old_bits;
ib::error() << BUG_REPORT_MSG;
@@ -4320,15 +4283,16 @@ subsequently was dropped.
@param[in,out] block if page has been read from disk,
pointer to the page x-latched, else NULL
@param[in] page_id page id of the index page
-@param[in] update_ibuf_bitmap normally this is set to TRUE, but
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] update_ibuf_bitmap normally this is set, but
if we have deleted or are deleting the tablespace, then we naturally do not
want to update a non-existent bitmap page */
void
ibuf_merge_or_delete_for_page(
buf_block_t* block,
const page_id_t page_id,
- const page_size_t* page_size,
- ibool update_ibuf_bitmap)
+ ulint zip_size,
+ bool update_ibuf_bitmap)
{
mem_heap_t* heap;
btr_pcur_t pcur;
@@ -4354,38 +4318,23 @@ ibuf_merge_or_delete_for_page(
return;
}
- /* We cannot refer to page_size in the following, because it is passed
- as NULL (it is unknown) when buf_read_ibuf_merge_pages() is merging
- (discarding) changes for a dropped tablespace. When block != NULL or
- update_ibuf_bitmap is specified, then page_size must be known.
- That is why we will repeat the check below, with page_size in
- place of univ_page_size. Passing univ_page_size assumes that the
- uncompressed page size always is a power-of-2 multiple of the
- compressed page size. */
-
- if (ibuf_fixed_addr_page(page_id, univ_page_size)
- || fsp_descr_page(page_id, univ_page_size)) {
+ const ulint physical_size = zip_size ? zip_size : srv_page_size;
+
+ if (ibuf_fixed_addr_page(page_id, physical_size)
+ || fsp_descr_page(page_id, physical_size)) {
return;
}
fil_space_t* space;
if (update_ibuf_bitmap) {
-
- ut_ad(page_size != NULL);
-
- if (ibuf_fixed_addr_page(page_id, *page_size)
- || fsp_descr_page(page_id, *page_size)) {
- return;
- }
-
space = fil_space_acquire_silent(page_id.space());
if (UNIV_UNLIKELY(!space)) {
/* Do not try to read the bitmap page from the
non-existent tablespace, delete the ibuf records */
block = NULL;
- update_ibuf_bitmap = FALSE;
+ update_ibuf_bitmap = false;
} else {
page_t* bitmap_page = NULL;
ulint bitmap_bits = 0;
@@ -4393,12 +4342,12 @@ ibuf_merge_or_delete_for_page(
ibuf_mtr_start(&mtr);
bitmap_page = ibuf_bitmap_get_map_page(
- page_id, *page_size, &mtr);
+ page_id, zip_size, &mtr);
if (bitmap_page &&
fil_page_get_type(bitmap_page) != FIL_PAGE_TYPE_ALLOCATED) {
bitmap_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, *page_size,
+ bitmap_page, page_id, zip_size,
IBUF_BITMAP_BUFFERED, &mtr);
}
@@ -4412,8 +4361,8 @@ ibuf_merge_or_delete_for_page(
}
}
} else if (block != NULL
- && (ibuf_fixed_addr_page(page_id, *page_size)
- || fsp_descr_page(page_id, *page_size))) {
+ && (ibuf_fixed_addr_page(page_id, physical_size)
+ || fsp_descr_page(page_id, physical_size))) {
return;
} else {
@@ -4646,23 +4595,23 @@ reset_bit:
if (update_ibuf_bitmap) {
page_t* bitmap_page;
- bitmap_page = ibuf_bitmap_get_map_page(page_id, *page_size,
+ bitmap_page = ibuf_bitmap_get_map_page(page_id, zip_size,
&mtr);
ibuf_bitmap_page_set_bits(
- bitmap_page, page_id, *page_size,
+ bitmap_page, page_id, physical_size,
IBUF_BITMAP_BUFFERED, FALSE, &mtr);
if (block != NULL) {
ulint old_bits = ibuf_bitmap_page_get_bits(
- bitmap_page, page_id, *page_size,
+ bitmap_page, page_id, zip_size,
IBUF_BITMAP_FREE, &mtr);
ulint new_bits = ibuf_index_page_calc_free(block);
if (old_bits != new_bits) {
ibuf_bitmap_page_set_bits(
- bitmap_page, page_id, *page_size,
+ bitmap_page, page_id, physical_size,
IBUF_BITMAP_FREE, new_bits, &mtr);
}
}
@@ -4677,7 +4626,7 @@ reset_bit:
btr_pcur_close(&pcur);
mem_heap_free(heap);
- my_atomic_addlint(&ibuf->n_merges, 1);
+ ibuf->n_merges++;
ibuf_add_ops(ibuf->n_merged_ops, mops);
ibuf_add_ops(ibuf->n_discarded_ops, dops);
}
@@ -4805,7 +4754,7 @@ ibuf_print(
ibuf->size,
ibuf->free_list_len,
ibuf->seg_size,
- ibuf->n_merges);
+ ulint{ibuf->n_merges});
fputs("merged operations:\n ", file);
ibuf_print_ops(ibuf->n_merged_ops, file);
@@ -4816,20 +4765,6 @@ ibuf_print(
mutex_exit(&ibuf_mutex);
}
-/** Check if a page is all zeroes.
-@param[in] read_buf database page
-@param[in] size page size
-@return whether the page is all zeroes */
-static bool buf_page_is_zeroes(const byte* read_buf, const page_size_t& size)
-{
- for (ulint i = 0; i < size.physical(); i++) {
- if (read_buf[i] != 0) {
- return false;
- }
- }
- return true;
-}
-
/** Check the insert buffer bitmaps on IMPORT TABLESPACE.
@param[in] trx transaction
@param[in,out] space tablespace being imported
@@ -4839,7 +4774,9 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
ulint page_no;
ut_ad(trx->mysql_thd);
ut_ad(space->purpose == FIL_TYPE_IMPORT);
- const page_size_t page_size(space->flags);
+
+ const ulint zip_size = space->zip_size();
+ const ulint physical_size = space->physical_size();
/* fil_space_t::size and fil_space_t::free_limit would still be 0
at this point. So, we will have to read page 0. */
ut_ad(!space->free_limit);
@@ -4848,7 +4785,8 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
mtr_t mtr;
ulint size;
mtr.start();
- if (buf_block_t* sp = buf_page_get(page_id_t(space->id, 0), page_size,
+ if (buf_block_t* sp = buf_page_get(page_id_t(space->id, 0),
+ zip_size,
RW_S_LATCH, &mtr)) {
size = std::min(
mach_read_from_4(FSP_HEADER_OFFSET + FSP_FREE_LIMIT
@@ -4872,7 +4810,7 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
below page_no is measured in number of pages since the beginning of
the space, as usual. */
- for (page_no = 0; page_no < size; page_no += page_size.physical()) {
+ for (page_no = 0; page_no < size; page_no += physical_size) {
page_t* bitmap_page;
ulint i;
@@ -4888,21 +4826,21 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
ibuf_enter(&mtr);
bitmap_page = ibuf_bitmap_get_map_page(
- page_id_t(space->id, page_no), page_size, &mtr);
+ page_id_t(space->id, page_no), zip_size, &mtr);
- if (buf_page_is_zeroes(bitmap_page, page_size)) {
+ if (buf_page_is_zeroes(bitmap_page, physical_size)) {
/* This means we got all-zero page instead of
ibuf bitmap page. The subsequent page should be
all-zero pages. */
#ifdef UNIV_DEBUG
for (ulint curr_page = page_no + 1;
- curr_page < page_size.physical(); curr_page++) {
+ curr_page < physical_size; curr_page++) {
buf_block_t* block = buf_page_get(
page_id_t(space->id, curr_page),
- page_size, RW_S_LATCH, &mtr);
+ zip_size, RW_S_LATCH, &mtr);
page_t* page = buf_block_get_frame(block);
- ut_ad(buf_page_is_zeroes(page, page_size));
+ ut_ad(buf_page_is_zeroes(page, physical_size));
}
#endif /* UNIV_DEBUG */
ibuf_exit(&mtr);
@@ -4915,17 +4853,13 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
return DB_CORRUPTION;
}
- for (i = FSP_IBUF_BITMAP_OFFSET + 1;
- i < page_size.physical();
- i++) {
-
+ for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < physical_size; i++) {
const ulint offset = page_no + i;
-
const page_id_t cur_page_id(space->id, offset);
if (ibuf_bitmap_page_get_bits(
- bitmap_page, cur_page_id, page_size,
- IBUF_BITMAP_IBUF, &mtr)) {
+ bitmap_page, cur_page_id, zip_size,
+ IBUF_BITMAP_IBUF, &mtr)) {
mutex_exit(&ibuf_mutex);
ibuf_exit(&mtr);
@@ -4942,7 +4876,7 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
}
if (ibuf_bitmap_page_get_bits(
- bitmap_page, cur_page_id, page_size,
+ bitmap_page, cur_page_id, zip_size,
IBUF_BITMAP_BUFFERED, &mtr)) {
ib_errf(trx->mysql_thd,
@@ -4957,7 +4891,8 @@ dberr_t ibuf_check_bitmap_on_import(const trx_t* trx, fil_space_t* space)
slightly corrupted tables can be
imported and dumped. Clear the bit. */
ibuf_bitmap_page_set_bits(
- bitmap_page, cur_page_id, page_size,
+ bitmap_page, cur_page_id,
+ physical_size,
IBUF_BITMAP_BUFFERED, FALSE, &mtr);
}
}
@@ -4987,18 +4922,18 @@ ibuf_set_bitmap_for_bulk_load(
free_val = ibuf_index_page_calc_free(block);
mtr_start(&mtr);
- mtr.set_named_space_id(block->page.id.space());
+ fil_space_t* space = mtr.set_named_space_id(block->page.id.space());
bitmap_page = ibuf_bitmap_get_map_page(block->page.id,
- block->page.size, &mtr);
+ space->zip_size(), &mtr);
free_val = reset ? 0 : ibuf_index_page_calc_free(block);
ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->page.size,
+ bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_FREE, free_val, &mtr);
ibuf_bitmap_page_set_bits(
- bitmap_page, block->page.id, block->page.size,
+ bitmap_page, block->page.id, block->physical_size(),
IBUF_BITMAP_BUFFERED, FALSE, &mtr);
mtr_commit(&mtr);
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index 2d69256f0e3..adfd31aa870 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -172,24 +172,19 @@ record is in spatial index */
| BTR_LATCH_FOR_DELETE \
| BTR_MODIFY_EXTERNAL)))
-/**************************************************************//**
-Report that an index page is corrupted. */
-void
-btr_corruption_report(
-/*==================*/
- const buf_block_t* block, /*!< in: corrupted block */
- const dict_index_t* index) /*!< in: index tree */
- ATTRIBUTE_COLD __attribute__((nonnull));
+/** Report that an index page is corrupted.
+@param[in] buffer block
+@param[in] index tree */
+ATTRIBUTE_COLD ATTRIBUTE_NORETURN __attribute__((nonnull))
+void btr_corruption_report(const buf_block_t* block,const dict_index_t* index);
/** Assert that a B-tree page is not corrupted.
@param block buffer block containing a B-tree page
@param index the B-tree index */
-#define btr_assert_not_corrupted(block, index) \
- if ((ibool) !!page_is_comp(buf_block_get_frame(block)) \
- != dict_table_is_comp((index)->table)) { \
- btr_corruption_report(block, index); \
- ut_error; \
- }
+#define btr_assert_not_corrupted(block, index) \
+ if (!!page_is_comp(buf_block_get_frame(block)) \
+ != index->table->not_redundant()) \
+ btr_corruption_report(block, index)
/**************************************************************//**
Gets the root node of a tree and sx-latches it for segment access.
@@ -224,6 +219,7 @@ btr_height_get(
/** Gets a buffer page and declares its latching order level.
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] mode latch mode
@param[in] file file name
@param[in] line line where called
@@ -235,7 +231,7 @@ UNIV_INLINE
buf_block_t*
btr_block_get_func(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint mode,
const char* file,
unsigned line,
@@ -245,28 +241,28 @@ btr_block_get_func(
# ifdef UNIV_DEBUG
/** Gets a buffer page and declares its latching order level.
@param page_id tablespace/page identifier
-@param page_size page size
+@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param mode latch mode
@param index index tree, may be NULL if not the insert buffer tree
@param mtr mini-transaction handle
@return the block descriptor */
-# define btr_block_get(page_id, page_size, mode, index, mtr) \
- btr_block_get_func(page_id, page_size, mode, \
+# define btr_block_get(page_id, zip_size, mode, index, mtr) \
+ btr_block_get_func(page_id, zip_size, mode, \
__FILE__, __LINE__, (dict_index_t*)index, mtr)
# else /* UNIV_DEBUG */
/** Gets a buffer page and declares its latching order level.
@param page_id tablespace/page identifier
-@param page_size page size
+@param zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param mode latch mode
@param index index tree, may be NULL if not the insert buffer tree
@param mtr mini-transaction handle
@return the block descriptor */
-# define btr_block_get(page_id, page_size, mode, index, mtr) \
- btr_block_get_func(page_id, page_size, mode, __FILE__, __LINE__, (dict_index_t*)index, mtr)
+# define btr_block_get(page_id, zip_size, mode, index, mtr) \
+ btr_block_get_func(page_id, zip_size, mode, __FILE__, __LINE__, (dict_index_t*)index, mtr)
# endif /* UNIV_DEBUG */
/** Gets a buffer page and declares its latching order level.
@param page_id tablespace/page identifier
-@param page_size page size
+@param zip_size compressed page size in bytes or 0 for uncompressed pages
@param mode latch mode
@param index index tree, may be NULL if not the insert buffer tree
@param mtr mini-transaction handle
@@ -274,9 +270,8 @@ btr_block_get_func(
UNIV_INLINE
page_t*
btr_page_get(
-/*=========*/
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint mode,
dict_index_t* index,
mtr_t* mtr)
@@ -358,40 +353,33 @@ btr_node_ptr_get_child_page_no(
@param[in] type type of the index
@param[in,out] space tablespace where created
@param[in] index_id index id
-@param[in] index index, or NULL when applying TRUNCATE
-log record during recovery
-@param[in] btr_redo_create_info used for applying TRUNCATE log
-@param[in] mtr mini-transaction handle
-record during recovery
-@return page number of the created root, FIL_NULL if did not succeed */
+@param[in] index index
+@param[in,out] mtr mini-transaction
+@return page number of the created root
+@retval FIL_NULL if did not succeed */
ulint
btr_create(
ulint type,
fil_space_t* space,
index_id_t index_id,
dict_index_t* index,
- const btr_create_t* btr_redo_create_info,
mtr_t* mtr);
/** Free a persistent index tree if it exists.
@param[in] page_id root page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] index_id PAGE_INDEX_ID contents
@param[in,out] mtr mini-transaction */
void
btr_free_if_exists(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
index_id_t index_id,
mtr_t* mtr);
-/** Free an index tree in a temporary tablespace or during TRUNCATE TABLE.
-@param[in] page_id root page id
-@param[in] page_size page size */
-void
-btr_free(
- const page_id_t page_id,
- const page_size_t& page_size);
+/** Free an index tree in a temporary tablespace.
+@param[in] page_id root page id */
+void btr_free(const page_id_t page_id);
/** Read the last used AUTO_INCREMENT value from PAGE_ROOT_AUTO_INC.
@param[in,out] index clustered index
@@ -421,6 +409,12 @@ void
btr_write_autoinc(dict_index_t* index, ib_uint64_t autoinc, bool reset = false)
MY_ATTRIBUTE((nonnull));
+/** Write instant ALTER TABLE metadata to a root page.
+@param[in,out] root clustered index root page
+@param[in] index clustered index with instant ALTER TABLE
+@param[in,out] mtr mini-transaction */
+void btr_set_instant(buf_block_t* root, const dict_index_t& index, mtr_t* mtr);
+
/*************************************************************//**
Makes tree one level higher by splitting the root, and inserts
the tuple. It is assumed that mtr contains an x-latch on the tree.
@@ -793,21 +787,23 @@ dberr_t
btr_validate_index(
/*===============*/
dict_index_t* index, /*!< in: index */
- const trx_t* trx, /*!< in: transaction or 0 */
- bool lockout)/*!< in: true if X-latch index is intended */
+ const trx_t* trx) /*!< in: transaction or 0 */
MY_ATTRIBUTE((warn_unused_result));
-/*************************************************************//**
-Removes a page from the level list of pages. */
-UNIV_INTERN
+/** Remove a page from the level list of pages.
+@param[in] space space where removed
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in,out] page page to remove
+@param[in] index index tree
+@param[in,out] mtr mini-transaction */
void
btr_level_list_remove_func(
-/*=======================*/
- ulint space, /*!< in: space where removed */
- const page_size_t& page_size,/*!< in: page size */
- page_t* page, /*!< in/out: page to remove */
- dict_index_t* index, /*!< in: index tree */
- mtr_t* mtr); /*!< in/out: mini-transaction */
+ ulint space,
+ ulint zip_size,
+ page_t* page,
+ dict_index_t* index,
+ mtr_t* mtr);
+
/*************************************************************//**
Removes a page from the level list of pages.
@param space in: space where removed
@@ -842,5 +838,6 @@ btr_lift_page_up(
/****************************************************************
Global variable controlling if scrubbing should be performed */
extern my_bool srv_immediate_scrub_data_uncompressed;
+extern Atomic_counter<uint32_t> btr_validate_index_running;
#endif
diff --git a/storage/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic
index 2669611a9e6..3cdf279f25f 100644
--- a/storage/innobase/include/btr0btr.ic
+++ b/storage/innobase/include/btr0btr.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2017, MariaDB Corporation.
+Copyright (c) 2015, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -31,6 +31,7 @@ Created 6/2/1994 Heikki Tuuri
/** Gets a buffer page and declares its latching order level.
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] mode latch mode
@param[in] file file name
@param[in] line line where called
@@ -42,7 +43,7 @@ UNIV_INLINE
buf_block_t*
btr_block_get_func(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint mode,
const char* file,
unsigned line,
@@ -53,7 +54,7 @@ btr_block_get_func(
dberr_t err=DB_SUCCESS;
block = buf_page_get_gen(
- page_id, page_size, mode, NULL, BUF_GET, file, line, mtr, &err);
+ page_id, zip_size, mode, NULL, BUF_GET, file, line, mtr, &err);
if (err == DB_DECRYPTION_FAILED) {
if (index && index->table) {
@@ -96,7 +97,7 @@ btr_page_set_index_id(
}
/** Gets a buffer page and declares its latching order level.
-@param space tablespace identifier
+@param page_id tablespace/page identifier
@param zip_size compressed page size in bytes or 0 for uncompressed pages
@param page_no page number
@param mode latch mode
@@ -106,9 +107,8 @@ btr_page_set_index_id(
UNIV_INLINE
page_t*
btr_page_get(
-/*=========*/
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint mode,
dict_index_t* index,
mtr_t* mtr)
@@ -116,7 +116,7 @@ btr_page_get(
buf_block_t* block=NULL;
buf_frame_t* frame=NULL;
- block = btr_block_get(page_id, page_size, mode, index, mtr);
+ block = btr_block_get(page_id, zip_size, mode, index, mtr);
if (block) {
frame = buf_block_get_frame(block);
diff --git a/storage/innobase/include/btr0bulk.h b/storage/innobase/include/btr0bulk.h
index 846f59760bd..0cabbcab7cd 100644
--- a/storage/innobase/include/btr0bulk.h
+++ b/storage/innobase/include/btr0bulk.h
@@ -288,8 +288,7 @@ public:
ut_ad(!dict_index_is_spatial(index));
#ifdef UNIV_DEBUG
if (m_flush_observer)
- my_atomic_addlint(&m_index->table->space->redo_skipped_count,
- 1);
+ m_index->table->space->redo_skipped_count++;
#endif /* UNIV_DEBUG */
}
@@ -298,8 +297,7 @@ public:
{
#ifdef UNIV_DEBUG
if (m_flush_observer)
- my_atomic_addlint(&m_index->table->space->redo_skipped_count,
- ulint(-1));
+ m_index->table->space->redo_skipped_count--;
#endif /* UNIV_DEBUG */
}
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 1ddec6b9815..9acde0e3f94 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -728,11 +728,12 @@ btr_free_externally_stored_field(
ignored if rec == NULL */
bool rollback, /*!< in: performing rollback? */
mtr_t* local_mtr); /*!< in: mtr containing the latch */
+
/** Copies the prefix of an externally stored field of a record.
The clustered index record must be protected by a lock or a page latch.
@param[out] buf the field, or a prefix of it
@param[in] len length of buf, in bytes
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] data 'internally' stored part of the field
containing also the reference to the external part; must be protected by
a lock or a page latch
@@ -743,7 +744,7 @@ ulint
btr_copy_externally_stored_field_prefix(
byte* buf,
ulint len,
- const page_size_t& page_size,
+ ulint zip_size,
const byte* data,
ulint local_len);
@@ -753,7 +754,7 @@ The clustered index record must be protected by a lock or a page latch.
@param[in] data 'internally' stored part of the field
containing also the reference to the external part; must be protected by
a lock or a page latch
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] local_len length of data
@param[in,out] heap mem heap
@return the whole field copied to heap */
@@ -761,7 +762,7 @@ byte*
btr_copy_externally_stored_field(
ulint* len,
const byte* data,
- const page_size_t& page_size,
+ ulint zip_size,
ulint local_len,
mem_heap_t* heap);
@@ -769,7 +770,7 @@ btr_copy_externally_stored_field(
@param[in] rec record in a clustered index; must be
protected by a lock or a page latch
@param[in] offset array returned by rec_get_offsets()
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] no field number
@param[out] len length of the field
@param[in,out] heap mem heap
@@ -778,7 +779,7 @@ byte*
btr_rec_copy_externally_stored_field(
const rec_t* rec,
const ulint* offsets,
- const page_size_t& page_size,
+ ulint zip_size,
ulint no,
ulint* len,
mem_heap_t* heap);
@@ -821,6 +822,7 @@ btr_rec_set_deleted_flag(
/** Latches the leaf page or pages requested.
@param[in] block leaf page where the search converged
@param[in] page_id page id of the leaf
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] latch_mode BTR_SEARCH_LEAF, ...
@param[in] cursor cursor
@param[in] mtr mini-transaction
@@ -829,7 +831,7 @@ btr_latch_leaves_t
btr_cur_latch_leaves(
buf_block_t* block,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint latch_mode,
btr_cur_t* cursor,
mtr_t* mtr);
diff --git a/storage/innobase/include/btr0defragment.h b/storage/innobase/include/btr0defragment.h
index 9c78ec412a2..8965889b7e9 100644
--- a/storage/innobase/include/btr0defragment.h
+++ b/storage/innobase/include/btr0defragment.h
@@ -26,9 +26,9 @@ this program; if not, write to the Free Software Foundation, Inc.,
#define BTR_DEFRAGMENT_MAX_N_PAGES 32
/** stats in btr_defragment */
-extern ulint btr_defragment_compression_failures;
-extern ulint btr_defragment_failures;
-extern ulint btr_defragment_count;
+extern Atomic_counter<ulint> btr_defragment_compression_failures;
+extern Atomic_counter<ulint> btr_defragment_failures;
+extern Atomic_counter<ulint> btr_defragment_count;
/** Item in the work queue for btr_degrament_thread. */
struct btr_defragment_item_t
diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h
index f8685d34764..0eb89f28de1 100644
--- a/storage/innobase/include/btr0types.h
+++ b/storage/innobase/include/btr0types.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2018, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,7 +28,6 @@ Created 2/17/1996 Heikki Tuuri
#define btr0types_h
#include "page0types.h"
-#include "page0size.h"
#include "rem0types.h"
/** Persistent cursor */
@@ -49,41 +49,17 @@ extern ulong btr_ahi_parts;
/** The size of a reference to data stored on a different page.
The reference is stored at the end of the prefix of the field
in the index record. */
+#define FIELD_REF_SIZE 20U
#define BTR_EXTERN_FIELD_REF_SIZE FIELD_REF_SIZE
/** If the data don't exceed the size, the data are stored locally. */
#define BTR_EXTERN_LOCAL_STORED_MAX_SIZE \
(BTR_EXTERN_FIELD_REF_SIZE * 2)
-/** The information is used for creating a new index tree when
-applying TRUNCATE log record during recovery */
-struct btr_create_t {
-
- explicit btr_create_t(const byte* const ptr)
- :
- format_flags(),
- n_fields(),
- field_len(),
- fields(ptr),
- trx_id_pos(ULINT_UNDEFINED)
- {
- /* Do nothing */
- }
-
- /** Page format */
- ulint format_flags;
-
- /** Numbr of index fields */
- ulint n_fields;
-
- /** The length of the encoded meta-data */
- ulint field_len;
-
- /** Field meta-data, encoded. */
- const byte* const fields;
-
- /** Position of trx-id column. */
- ulint trx_id_pos;
-};
+/** A field reference full of zero, for use in assertions and checks,
+and dummy default values of instantly dropped columns.
+Initially, BLOB field references are set to zero, in
+dtuple_convert_big_rec(). */
+extern const byte field_ref_zero[UNIV_PAGE_SIZE_MAX];
#endif
diff --git a/storage/innobase/include/buf0buddy.h b/storage/innobase/include/buf0buddy.h
index 7ee837d6d2e..ed275ab1870 100644
--- a/storage/innobase/include/buf0buddy.h
+++ b/storage/innobase/include/buf0buddy.h
@@ -26,11 +26,6 @@ Created December 2006 by Marko Makela
#ifndef buf0buddy_h
#define buf0buddy_h
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE
-#endif
-
#include "buf0types.h"
/**********************************************************************//**
diff --git a/storage/innobase/include/buf0buddy.ic b/storage/innobase/include/buf0buddy.ic
index d166ab8441c..7eb739a99db 100644
--- a/storage/innobase/include/buf0buddy.ic
+++ b/storage/innobase/include/buf0buddy.ic
@@ -23,11 +23,6 @@ Binary buddy allocator for compressed pages
Created December 2006 by Marko Makela
*******************************************************/
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE
-#endif
-
#include "buf0buf.h"
#include "buf0buddy.h"
@@ -132,8 +127,3 @@ buf_buddy_free(
buf_buddy_free_low(buf_pool, buf, buf_buddy_get_slot(size));
}
-
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE UNIV_INLINE_ORIGINAL
-#endif
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index 64716703b40..c6f693b1dc2 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2018, MariaDB Corporation.
+Copyright (c) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -41,7 +41,6 @@ Created 11/5/1995 Heikki Tuuri
#include "os0proc.h"
#include "log0log.h"
#include "srv0srv.h"
-#include "my_atomic.h"
#include <ostream>
// Forward declaration
@@ -498,15 +497,13 @@ be implemented at a higher level. In other words, all possible
accesses to a given page through this function must be protected by
the same set of mutexes or latches.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size
@return pointer to the block */
-buf_page_t*
-buf_page_get_zip(
- const page_id_t page_id,
- const page_size_t& page_size);
+buf_page_t* buf_page_get_zip(const page_id_t page_id, ulint zip_size);
/** This is the general function used to get access to a database page.
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_NO_LATCH
@param[in] guess guessed block or NULL
@param[in] mode BUF_GET, BUF_GET_IF_IN_POOL,
@@ -519,7 +516,7 @@ BUF_PEEK_IF_IN_POOL, BUF_GET_NO_LATCH, or BUF_GET_IF_IN_POOL_OR_WATCH
buf_block_t*
buf_page_get_gen(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint rw_latch,
buf_block_t* guess,
ulint mode,
@@ -528,18 +525,18 @@ buf_page_get_gen(
mtr_t* mtr,
dberr_t* err);
-/** Initializes a page to the buffer buf_pool. The page is usually not read
+/** Initialize a page in the buffer pool. The page is usually not read
from a file even if it cannot be found in the buffer buf_pool. This is one
of the functions which perform to a block a state transition NOT_USED =>
FILE_PAGE (the other is buf_page_get_gen).
@param[in] page_id page id
-@param[in] page_size page size
-@param[in] mtr mini-transaction
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in,out] mtr mini-transaction
@return pointer to the block, page bufferfixed */
buf_block_t*
buf_page_create(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
mtr_t* mtr);
/********************************************************************//**
@@ -675,37 +672,6 @@ buf_block_buf_fix_inc_func(
buf_block_t* block) /*!< in/out: block to bufferfix */
MY_ATTRIBUTE((nonnull));
-/** Increments the bufferfix count.
-@param[in,out] bpage block to bufferfix
-@return the count */
-UNIV_INLINE
-ulint
-buf_block_fix(
- buf_page_t* bpage);
-
-/** Increments the bufferfix count.
-@param[in,out] block block to bufferfix
-@return the count */
-UNIV_INLINE
-ulint
-buf_block_fix(
- buf_block_t* block);
-
-/** Decrements the bufferfix count.
-@param[in,out] bpage block to bufferunfix
-@return the remaining buffer-fix count */
-UNIV_INLINE
-ulint
-buf_block_unfix(
- buf_page_t* bpage);
-/** Decrements the bufferfix count.
-@param[in,out] block block to bufferunfix
-@return the remaining buffer-fix count */
-UNIV_INLINE
-ulint
-buf_block_unfix(
- buf_block_t* block);
-
# ifdef UNIV_DEBUG
/** Increments the bufferfix count.
@param[in,out] b block to bufferfix
@@ -721,6 +687,12 @@ buf_block_unfix(
# endif /* UNIV_DEBUG */
#endif /* !UNIV_INNOCHECKSUM */
+/** Check if a page is all zeroes.
+@param[in] read_buf database page
+@param[in] page_size page frame size
+@return whether the page is all zeroes */
+bool buf_page_is_zeroes(const void* read_buf, size_t page_size);
+
/** Checks if the page is in crc32 checksum format.
@param[in] read_buf database page
@param[in] checksum_field1 new checksum field
@@ -760,21 +732,72 @@ buf_page_is_checksum_valid_none(
/** Check if a page is corrupt.
@param[in] check_lsn whether the LSN should be checked
@param[in] read_buf database page
-@param[in] page_size page size
-@param[in] space tablespace
+@param[in] fsp_flags tablespace flags
@return whether the page is corrupted */
bool
buf_page_is_corrupted(
bool check_lsn,
const byte* read_buf,
- const page_size_t& page_size,
-#ifndef UNIV_INNOCHECKSUM
- const fil_space_t* space = NULL)
-#else
- const void* space = NULL)
-#endif
+ ulint fsp_flags)
MY_ATTRIBUTE((warn_unused_result));
+/** Read the key version from the page. In full crc32 format,
+key version is stored at {0-3th} bytes. In other format, it is
+stored in 26th position.
+@param[in] read_buf database page
+@param[in] fsp_flags tablespace flags
+@return key version of the page. */
+inline uint32_t buf_page_get_key_version(const byte* read_buf, ulint fsp_flags)
+{
+ return fil_space_t::full_crc32(fsp_flags)
+ ? mach_read_from_4(read_buf + FIL_PAGE_FCRC32_KEY_VERSION)
+ : mach_read_from_4(read_buf
+ + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+}
+
+/** Read the compression info from the page. In full crc32 format,
+compression info is at MSB of page type. In other format, it is
+stored in page type.
+@param[in] read_buf database page
+@param[in] fsp_flags tablespace flags
+@return true if page is compressed. */
+inline bool buf_page_is_compressed(const byte* read_buf, ulint fsp_flags)
+{
+ ulint page_type = mach_read_from_2(read_buf + FIL_PAGE_TYPE);
+ return fil_space_t::full_crc32(fsp_flags)
+ ? !!(page_type & 1U << FIL_PAGE_COMPRESS_FCRC32_MARKER)
+ : page_type == FIL_PAGE_PAGE_COMPRESSED;
+}
+
+/** Get the compressed or uncompressed size of a full_crc32 page.
+@param[in] buf page_compressed or uncompressed page
+@param[out] comp whether the page could be compressed
+@param[out] cr whether the page could be corrupted
+@return the payload size in the file page */
+inline uint buf_page_full_crc32_size(const byte* buf, bool* comp, bool* cr)
+{
+ uint t = mach_read_from_2(buf + FIL_PAGE_TYPE);
+ uint page_size = uint(srv_page_size);
+
+ if (!(t & 1U << FIL_PAGE_COMPRESS_FCRC32_MARKER)) {
+ return page_size;
+ }
+
+ t &= ~(1U << FIL_PAGE_COMPRESS_FCRC32_MARKER);
+ t <<= 8;
+
+ if (t < page_size) {
+ page_size = t;
+ if (comp) {
+ *comp = true;
+ }
+ } else if (cr) {
+ *cr = true;
+ }
+
+ return page_size;
+}
+
#ifndef UNIV_INNOCHECKSUM
/**********************************************************************//**
Gets the space id, page offset, and byte offset within page of a
@@ -836,10 +859,8 @@ buf_print(void);
/** Dump a page to stderr.
@param[in] read_buf database page
-@param[in] page_size page size */
-UNIV_INTERN
-void
-buf_page_print(const byte* read_buf, const page_size_t& page_size)
+@param[in] zip_size compressed page size, or 0 */
+void buf_page_print(const byte* read_buf, ulint zip_size = 0)
ATTRIBUTE_COLD __attribute__((nonnull));
/********************************************************************//**
Decompress a block.
@@ -1198,6 +1219,7 @@ and the lock released later.
@param[out] err DB_SUCCESS or DB_TABLESPACE_DELETED
@param[in] mode BUF_READ_IBUF_PAGES_ONLY, ...
@param[in] page_id page id
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] unzip whether the uncompressed page is
requested (for ROW_FORMAT=COMPRESSED)
@return pointer to the block
@@ -1207,7 +1229,7 @@ buf_page_init_for_read(
dberr_t* err,
ulint mode,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
bool unzip);
/** Complete a read or write request of a file page to or from the buffer pool.
@@ -1418,6 +1440,15 @@ ulint
buf_pool_size_align(
ulint size);
+/** Verify that post encryption checksum match with the calculated checksum.
+This function should be called only if tablespace contains crypt data metadata.
+@param[in] page page frame
+@param[in] fsp_flags tablespace flags
+@return true if page is encrypted and OK, false otherwise */
+bool buf_page_verify_crypt_checksum(
+ const byte* page,
+ ulint fsp_flags);
+
/** Calculate the checksum of a page from compressed table and update the
page.
@param[in,out] page page to update
@@ -1438,7 +1469,7 @@ a page is written to disk.
(may be src_frame or an encrypted/compressed copy of it) */
UNIV_INTERN
byte*
-buf_page_encrypt_before_write(
+buf_page_encrypt(
fil_space_t* space,
buf_page_t* bpage,
byte* src_frame);
@@ -1448,10 +1479,9 @@ buf_page_encrypt_before_write(
NOTE! The definition appears here only for other modules of this
directory (buf) to see it. Do not use from outside! */
-typedef struct {
-private:
- int32 reserved; /*!< true if this slot is reserved
- */
+class buf_tmp_buffer_t {
+ /** whether this slot is reserved */
+ std::atomic<bool> reserved;
public:
byte* crypt_buf; /*!< for encryption the data needs to be
copied to a separate buffer before it's
@@ -1467,18 +1497,16 @@ public:
/** Release the slot */
void release()
{
- my_atomic_store32_explicit(&reserved, false,
- MY_MEMORY_ORDER_RELAXED);
+ reserved.store(false, std::memory_order_relaxed);
}
/** Acquire the slot
@return whether the slot was acquired */
bool acquire()
{
- return !my_atomic_fas32_explicit(&reserved, true,
- MY_MEMORY_ORDER_RELAXED);
+ return !reserved.exchange(true, std::memory_order_relaxed);
}
-} buf_tmp_buffer_t;
+};
/** The common buffer control block structure
for compressed and uncompressed frames */
@@ -1502,11 +1530,8 @@ public:
buf_pool->page_hash or
buf_pool->zip_hash */
- /** Page size. Protected by buf_pool mutex. */
- page_size_t size;
-
/** Count of how manyfold this block is currently bufferfixed. */
- ib_uint32_t buf_fix_count;
+ Atomic_counter<uint32_t> buf_fix_count;
/** type of pending I/O operation; also protected by
buf_pool->mutex for writes only */
@@ -1658,6 +1683,27 @@ public:
protected by buf_pool->zip_mutex
or buf_block_t::mutex. */
# endif /* UNIV_DEBUG */
+
+ void fix() { buf_fix_count++; }
+ uint32_t unfix()
+ {
+ uint32_t count= buf_fix_count--;
+ ut_ad(count != 0);
+ return count - 1;
+ }
+
+ /** @return the physical size, in bytes */
+ ulint physical_size() const
+ {
+ return zip.ssize ? (UNIV_ZIP_SIZE_MIN >> 1) << zip.ssize : srv_page_size;
+ }
+
+ /** @return the ROW_FORMAT=COMPRESSED physical size, in bytes
+ @retval 0 if not compressed */
+ ulint zip_size() const
+ {
+ return zip.ssize ? (UNIV_ZIP_SIZE_MIN >> 1) << zip.ssize : 0;
+ }
};
/** The buffer control block structure */
@@ -1764,20 +1810,20 @@ struct buf_block_t{
/* @{ */
# if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
- ulint n_pointers; /*!< used in debugging: the number of
+ Atomic_counter<ulint>
+ n_pointers; /*!< used in debugging: the number of
pointers in the adaptive hash index
pointing to this frame;
protected by atomic memory access
or btr_search_own_all(). */
# define assert_block_ahi_empty(block) \
- ut_a(my_atomic_addlint(&(block)->n_pointers, 0) == 0)
+ ut_a((block)->n_pointers == 0)
# define assert_block_ahi_empty_on_init(block) do { \
UNIV_MEM_VALID(&(block)->n_pointers, sizeof (block)->n_pointers); \
assert_block_ahi_empty(block); \
} while (0)
# define assert_block_ahi_valid(block) \
- ut_a((block)->index \
- || my_atomic_loadlint(&(block)->n_pointers) == 0)
+ ut_a((block)->index || (block)->n_pointers == 0)
# else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
# define assert_block_ahi_empty(block) /* nothing */
# define assert_block_ahi_empty_on_init(block) /* nothing */
@@ -1809,7 +1855,7 @@ struct buf_block_t{
# ifdef UNIV_DEBUG
/** @name Debug fields */
/* @{ */
- rw_lock_t debug_latch; /*!< in the debug version, each thread
+ rw_lock_t* debug_latch; /*!< in the debug version, each thread
which bufferfixes the block acquires
an s-latch here; so we can use the
debug utilities in sync0rw */
@@ -1821,6 +1867,16 @@ struct buf_block_t{
and accessed; we introduce this new
mutex in InnoDB-5.1 to relieve
contention on the buffer pool mutex */
+
+ void fix() { page.fix(); }
+ uint32_t unfix() { return page.unfix(); }
+
+ /** @return the physical size, in bytes */
+ ulint physical_size() const { return page.physical_size(); }
+
+ /** @return the ROW_FORMAT=COMPRESSED physical size, in bytes
+ @retval 0 if not compressed */
+ ulint zip_size() const { return page.zip_size(); }
};
/** Check if a buf_block_t object is in a valid state
@@ -2025,17 +2081,6 @@ struct buf_buddy_stat_t {
ib_uint64_t relocated_usec;
};
-/** @brief The temporary memory array structure.
-
-NOTE! The definition appears here only for other modules of this
-directory (buf) to see it. Do not use from outside! */
-
-typedef struct {
- ulint n_slots; /*!< Total number of slots */
- buf_tmp_buffer_t *slots; /*!< Pointer to the slots in the
- array */
-} buf_tmp_array_t;
-
/** @brief The buffer pool structure.
NOTE! The definition appears here only for other modules of this
@@ -2095,7 +2140,8 @@ struct buf_pool_t{
indexed by block->frame */
ulint n_pend_reads; /*!< number of pending read
operations */
- ulint n_pend_unzip; /*!< number of pending decompressions */
+ Atomic_counter<ulint>
+ n_pend_unzip; /*!< number of pending decompressions */
time_t last_printout_time;
/*!< when buf_print_io was last time
@@ -2236,20 +2282,47 @@ struct buf_pool_t{
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
UT_LIST_BASE_NODE_T(buf_buddy_free_t) zip_free[BUF_BUDDY_SIZES_MAX];
/*!< buddy free lists */
+#if BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN
+# error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN"
+#endif
+ /* @} */
buf_page_t* watch;
/*!< Sentinel records for buffer
pool watches. Protected by
buf_pool->mutex. */
- buf_tmp_array_t* tmp_arr;
- /*!< Array for temporal memory
- used in compression and encryption */
+ /** Temporary memory for page_compressed and encrypted I/O */
+ struct io_buf_t {
+ /** number of elements in slots[] */
+ const ulint n_slots;
+ /** array of slots */
+ buf_tmp_buffer_t* const slots;
+
+ io_buf_t() = delete;
+
+ /** Constructor */
+ explicit io_buf_t(ulint n_slots) :
+ n_slots(n_slots),
+ slots(static_cast<buf_tmp_buffer_t*>(
+ ut_malloc_nokey(n_slots
+ * sizeof *slots)))
+ {
+ memset((void*) slots, 0, n_slots * sizeof *slots);
+ }
-#if BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN
-# error "BUF_BUDDY_LOW > UNIV_ZIP_SIZE_MIN"
-#endif
- /* @} */
+ ~io_buf_t();
+
+ /** Reserve a buffer */
+ buf_tmp_buffer_t* reserve()
+ {
+ for (buf_tmp_buffer_t* s = slots, *e = slots + n_slots;
+ s != e; s++) {
+ if (s->acquire()) return s;
+ }
+ return NULL;
+ }
+ } io_buf;
};
/** Print the given buf_pool_t object.
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index 21f6ed0c71f..2169eedd015 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -945,28 +945,6 @@ buf_block_get_modify_clock(
return(block->modify_clock);
}
-/** Increments the bufferfix count.
-@param[in,out] bpage block to bufferfix
-@return the count */
-UNIV_INLINE
-ulint
-buf_block_fix(
- buf_page_t* bpage)
-{
- return uint32(my_atomic_add32((int32*) &bpage->buf_fix_count, 1) + 1);
-}
-
-/** Increments the bufferfix count.
-@param[in,out] block block to bufferfix
-@return the count */
-UNIV_INLINE
-ulint
-buf_block_fix(
- buf_block_t* block)
-{
- return(buf_block_fix(&block->page));
-}
-
/*******************************************************************//**
Increments the bufferfix count. */
UNIV_INLINE
@@ -985,37 +963,12 @@ buf_block_buf_fix_inc_func(
threaded. */
if (!fsp_is_system_temporary(block->page.id.space())) {
ibool ret;
- ret = rw_lock_s_lock_nowait(&block->debug_latch, file, line);
+ ret = rw_lock_s_lock_nowait(block->debug_latch, file, line);
ut_a(ret);
}
#endif /* UNIV_DEBUG */
- buf_block_fix(block);
-}
-
-/** Decrements the bufferfix count.
-@param[in,out] bpage block to bufferunfix
-@return the remaining buffer-fix count */
-UNIV_INLINE
-ulint
-buf_block_unfix(
- buf_page_t* bpage)
-{
- uint32 count = uint32(my_atomic_add32((int32*) &bpage->buf_fix_count,
- -1));
- ut_ad(count != 0);
- return count - 1;
-}
-
-/** Decrements the bufferfix count.
-@param[in,out] block block to bufferunfix
-@return the remaining buffer-fix count */
-UNIV_INLINE
-ulint
-buf_block_unfix(
- buf_block_t* block)
-{
- return(buf_block_unfix(&block->page));
+ block->fix();
}
/*******************************************************************//**
@@ -1026,14 +979,14 @@ buf_block_buf_fix_dec(
/*==================*/
buf_block_t* block) /*!< in/out: block to bufferunfix */
{
- buf_block_unfix(block);
+ block->unfix();
#ifdef UNIV_DEBUG
/* No debug latch is acquired if block belongs to system temporary.
Debug latch is not of much help if access to block is single
threaded. */
if (!fsp_is_system_temporary(block->page.id.space())) {
- rw_lock_s_unlock(&block->debug_latch);
+ rw_lock_s_unlock(block->debug_latch);
}
#endif /* UNIV_DEBUG */
}
@@ -1276,14 +1229,14 @@ buf_page_release_zip(
is single threaded. */
buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
if (!fsp_is_system_temporary(block->page.id.space())) {
- rw_lock_s_unlock(&block->debug_latch);
+ rw_lock_s_unlock(block->debug_latch);
}
}
#endif /* UNIV_DEBUG */
/* Fall through */
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_ZIP_DIRTY:
- buf_block_unfix(reinterpret_cast<buf_block_t*>(bpage));
+ reinterpret_cast<buf_block_t*>(bpage)->unfix();
return;
case BUF_BLOCK_POOL_WATCH:
@@ -1312,7 +1265,7 @@ buf_page_release_latch(
temporary. Debug latch is not of much help if access to block
is single threaded. */
if (!fsp_is_system_temporary(block->page.id.space())) {
- rw_lock_s_unlock(&block->debug_latch);
+ rw_lock_s_unlock(block->debug_latch);
}
#endif /* UNIV_DEBUG */
diff --git a/storage/innobase/include/buf0checksum.h b/storage/innobase/include/buf0checksum.h
index 06eb37906d2..98ce879b9ea 100644
--- a/storage/innobase/include/buf0checksum.h
+++ b/storage/innobase/include/buf0checksum.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,18 +29,6 @@ Created Aug 11, 2011 Vasil Dimov
#include "buf0types.h"
-#ifdef INNODB_BUG_ENDIAN_CRC32
-/** Calculate the CRC32 checksum of a page. The value is stored to the page
-when it is written to a file and also checked for a match when reading from
-the file. Note that we must be careful to calculate the same value on all
-architectures.
-@param[in] page buffer page (srv_page_size bytes)
-@param[in] bug_endian whether to use big endian byteorder
-when converting byte strings to integers, for bug-compatibility with
-big-endian architecture running MySQL 5.6, MariaDB 10.0 or MariaDB 10.1
-@return CRC-32C */
-uint32_t buf_calc_page_crc32(const byte* page, bool bug_endian = false);
-#else
/** Calculate the CRC32 checksum of a page. The value is stored to the page
when it is written to a file and also checked for a match when reading from
the file. Note that we must be careful to calculate the same value on all
@@ -48,7 +36,6 @@ architectures.
@param[in] page buffer page (srv_page_size bytes)
@return CRC-32C */
uint32_t buf_calc_page_crc32(const byte* page);
-#endif
/** Calculate a checksum which is stored to the page when it is written
to a file. Note that we must be careful to calculate the same value on
diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h
index 6ee84e99d5b..6cffb664051 100644
--- a/storage/innobase/include/buf0flu.h
+++ b/storage/innobase/include/buf0flu.h
@@ -73,17 +73,24 @@ buf_flush_relocate_on_flush_list(
@param[in,out] bpage flushed page
@param[in] dblwr whether the doublewrite buffer was used */
void buf_flush_write_complete(buf_page_t* bpage, bool dblwr);
+
+/** Assign the full crc32 checksum for non-compressed page.
+@param[in,out] page page to be updated */
+void buf_flush_assign_full_crc32_checksum(byte* page);
+
/** Initialize a page for writing to the tablespace.
-@param[in] block buffer block; NULL if bypassing the buffer pool
-@param[in,out] page page frame
-@param[in,out] page_zip_ compressed page, or NULL if uncompressed
-@param[in] newest_lsn newest modification LSN to the page */
+@param[in] block buffer block; NULL if bypassing the buffer pool
+@param[in,out] page page frame
+@param[in,out] page_zip_ compressed page, or NULL if uncompressed
+@param[in] newest_lsn newest modification LSN to the page
+@param[in] use_full_checksum whether tablespace uses full checksum */
void
buf_flush_init_for_writing(
const buf_block_t* block,
byte* page,
void* page_zip_,
- lsn_t newest_lsn);
+ lsn_t newest_lsn,
+ bool use_full_checksum);
# if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
/********************************************************************//**
@@ -191,18 +198,6 @@ buf_flush_note_modification(
lsn_t end_lsn, /*!< in: end lsn of the last mtr in the
set of mtr's */
FlushObserver* observer); /*!< in: flush observer */
-
-/********************************************************************//**
-This function should be called when recovery has modified a buffer page. */
-UNIV_INLINE
-void
-buf_flush_recv_note_modification(
-/*=============================*/
- buf_block_t* block, /*!< in: block which is modified */
- lsn_t start_lsn, /*!< in: start lsn of the first mtr in a
- set of mtr's */
- lsn_t end_lsn); /*!< in: end lsn of the last mtr in the
- set of mtr's */
/********************************************************************//**
Returns TRUE if the file page block is immediately suitable for replacement,
i.e., transition FILE_PAGE => NOT_USED allowed.
diff --git a/storage/innobase/include/buf0flu.ic b/storage/innobase/include/buf0flu.ic
index 5a682ed121a..0652d108ca2 100644
--- a/storage/innobase/include/buf0flu.ic
+++ b/storage/innobase/include/buf0flu.ic
@@ -38,17 +38,6 @@ buf_flush_insert_into_flush_list(
lsn_t lsn); /*!< in: oldest modification */
/********************************************************************//**
-Inserts a modified block into the flush list in the right sorted position.
-This function is used by recovery, because there the modifications do not
-necessarily come in the order of lsn's. */
-void
-buf_flush_insert_sorted_into_flush_list(
-/*====================================*/
- buf_pool_t* buf_pool, /*!< buffer pool instance */
- buf_block_t* block, /*!< in/out: block which is modified */
- lsn_t lsn); /*!< in: oldest modification */
-
-/********************************************************************//**
This function should be called at a mini-transaction commit, if a page was
modified in it. Puts the block to the list of modified blocks, if it is not
already in it. */
@@ -63,24 +52,11 @@ buf_flush_note_modification(
modified this block */
FlushObserver* observer) /*!< in: flush observer */
{
-#ifdef UNIV_DEBUG
- {
- /* Allow write to proceed to shared temporary tablespace
- in read-only mode. */
- ut_ad(!srv_read_only_mode
- || fsp_is_system_temporary(block->page.id.space()));
- ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
- ut_ad(block->page.buf_fix_count > 0);
-
- buf_pool_t* buf_pool = buf_pool_from_block(block);
-
- ut_ad(!buf_pool_mutex_own(buf_pool));
- ut_ad(!buf_flush_list_mutex_own(buf_pool));
- }
-#endif /* UNIV_DEBUG */
-
mutex_enter(&block->mutex);
-
+ ut_ad(!srv_read_only_mode
+ || fsp_is_system_temporary(block->page.id.space()));
+ ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
+ ut_ad(block->page.buf_fix_count > 0);
ut_ad(block->page.newest_modification <= end_lsn);
block->page.newest_modification = end_lsn;
@@ -98,52 +74,7 @@ buf_flush_note_modification(
ut_ad(block->page.oldest_modification <= start_lsn);
}
- buf_page_mutex_exit(block);
+ mutex_exit(&block->mutex);
srv_stats.buf_pool_write_requests.inc();
}
-
-/********************************************************************//**
-This function should be called when recovery has modified a buffer page. */
-UNIV_INLINE
-void
-buf_flush_recv_note_modification(
-/*=============================*/
- buf_block_t* block, /*!< in: block which is modified */
- lsn_t start_lsn, /*!< in: start lsn of the first mtr in a
- set of mtr's */
- lsn_t end_lsn) /*!< in: end lsn of the last mtr in the
- set of mtr's */
-{
-#ifdef UNIV_DEBUG
- {
- ut_ad(!srv_read_only_mode);
- ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
- ut_ad(block->page.buf_fix_count > 0);
-
- buf_pool_t* buf_pool = buf_pool_from_block(block);
-
- ut_ad(!buf_pool_mutex_own(buf_pool));
- ut_ad(!buf_flush_list_mutex_own(buf_pool));
-
- ut_ad(start_lsn != 0);
- ut_ad(block->page.newest_modification <= end_lsn);
- }
-#endif /* UNIV_DEBUG */
-
- buf_page_mutex_enter(block);
-
- block->page.newest_modification = end_lsn;
-
- if (!block->page.oldest_modification) {
- buf_pool_t* buf_pool = buf_pool_from_block(block);
-
- buf_flush_insert_sorted_into_flush_list(
- buf_pool, block, start_lsn);
- } else {
- ut_ad(block->page.oldest_modification <= start_lsn);
- }
-
- buf_page_mutex_exit(block);
-
-}
diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h
index c32b0d3637e..057ce7711f5 100644
--- a/storage/innobase/include/buf0rea.h
+++ b/storage/innobase/include/buf0rea.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2015, 2017, MariaDB Corporation.
+Copyright (c) 2015, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -34,30 +34,23 @@ buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@retval DB_SUCCESS if the page was read and is not corrupted,
@retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted,
@retval DB_DECRYPTION_FAILED if page post encryption checksum matches but
after decryption normal page checksum does not match.
@retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */
-dberr_t
-buf_read_page(
- const page_id_t page_id,
- const page_size_t& page_size);
+dberr_t buf_read_page(const page_id_t page_id, ulint zip_size);
-/********************************************************************//**
-High-level function which reads a page asynchronously from a file to the
+/** High-level function which reads a page asynchronously from a file to the
buffer buf_pool if it is not already there. Sets the io_fix flag and sets
an exclusive lock on the buffer frame. The flag is cleared and the x-lock
released by the i/o-handler thread.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] sync true if synchronous aio is desired */
void
-buf_read_page_background(
- const page_id_t page_id,
- const page_size_t& page_size,
- bool sync);
+buf_read_page_background(const page_id_t page_id, ulint zip_size, bool sync);
/** Applies a random read-ahead in buf_pool if there are at least a threshold
value of accessed pages from the random read-ahead area. Does not read any
@@ -70,16 +63,13 @@ performed by ibuf routines, a situation which could result in a deadlock if
the OS does not support asynchronous i/o.
@param[in] page_id page id of a page which the current thread
wants to access
-@param[in] page_size page size
-@param[in] inside_ibuf TRUE if we are inside ibuf routine
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] ibuf whether we are inside ibuf routine
@return number of page read requests issued; NOTE that if we read ibuf
pages, it may happen that the page at the given page number does not
get read even if we return a positive value! */
ulint
-buf_read_ahead_random(
- const page_id_t page_id,
- const page_size_t& page_size,
- ibool inside_ibuf);
+buf_read_ahead_random(const page_id_t page_id, ulint zip_size, bool ibuf);
/** Applies linear read-ahead if in the buf_pool the page is a border page of
a linear read-ahead area and all the pages in the area have been accessed.
@@ -104,14 +94,11 @@ NOTE 3: the calling thread must want access to the page given: this rule is
set to prevent unintended read-aheads performed by ibuf routines, a situation
which could result in a deadlock if the OS does not support asynchronous io.
@param[in] page_id page id; see NOTE 3 above
-@param[in] page_size page size
-@param[in] inside_ibuf TRUE if we are inside ibuf routine
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] ibuf whether if we are inside ibuf routine
@return number of page read requests issued */
ulint
-buf_read_ahead_linear(
- const page_id_t page_id,
- const page_size_t& page_size,
- ibool inside_ibuf);
+buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf);
/********************************************************************//**
Issues read requests for pages which the ibuf module wants to read in, in
diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h
index 2847e328515..fef42b82eb1 100644
--- a/storage/innobase/include/buf0types.h
+++ b/storage/innobase/include/buf0types.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -82,8 +83,16 @@ enum srv_checksum_algorithm_t {
innodb when reading */
SRV_CHECKSUM_ALGORITHM_NONE, /*!< Write none, allow crc32,
innodb or none when reading */
- SRV_CHECKSUM_ALGORITHM_STRICT_NONE /*!< Write none, allow none
+ SRV_CHECKSUM_ALGORITHM_STRICT_NONE, /*!< Write none, allow none
when reading */
+
+ /** For new files, always compute CRC-32C for the whole page.
+ For old files, allow crc32, innodb or none when reading. */
+ SRV_CHECKSUM_ALGORITHM_FULL_CRC32,
+
+ /** For new files, always compute CRC-32C for the whole page.
+ For old files, allow crc32 when reading. */
+ SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32
};
inline
diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h
index 1ae4c70c109..f4f3b3622c5 100644
--- a/storage/innobase/include/data0data.h
+++ b/storage/innobase/include/data0data.h
@@ -545,6 +545,33 @@ struct dtuple_t {
inserted or updated.
@param[in] index index possibly with instantly added columns */
void trim(const dict_index_t& index);
+
+ /**
+ @param info_bits the info_bits of a data tuple
+ @return whether this is a hidden metadata record
+ for instant ADD COLUMN or ALTER TABLE */
+ static bool is_alter_metadata(ulint info_bits)
+ {
+ return UNIV_UNLIKELY(info_bits == REC_INFO_METADATA_ALTER);
+ }
+
+ /**
+ @param info_bits the info_bits of a data tuple
+ @return whether this is a hidden metadata record
+ for instant ADD COLUMN or ALTER TABLE */
+ static bool is_metadata(ulint info_bits)
+ {
+ return UNIV_UNLIKELY((info_bits & ~REC_INFO_DELETED_FLAG)
+ == REC_INFO_METADATA_ADD);
+ }
+
+ /** @return whether this is a hidden metadata record
+ for instant ALTER TABLE (not only ADD COLUMN) */
+ bool is_alter_metadata() const { return is_alter_metadata(info_bits); }
+
+ /** @return whether this is a hidden metadata record
+ for instant ADD COLUMN or ALTER TABLE */
+ bool is_metadata() const { return is_metadata(info_bits); }
};
inline ulint dtuple_get_n_fields(const dtuple_t* tuple)
diff --git a/storage/innobase/include/data0type.h b/storage/innobase/include/data0type.h
index 03f3e4d2c71..aa63be9b8a1 100644
--- a/storage/innobase/include/data0type.h
+++ b/storage/innobase/include/data0type.h
@@ -262,35 +262,31 @@ dtype_get_at_most_n_mbchars(
ulint data_len, /*!< in: length of str (in bytes) */
const char* str); /*!< in: the string whose prefix
length is being determined */
-/*********************************************************************//**
-Checks if a data main type is a string type. Also a BLOB is considered a
-string type.
-@return TRUE if string type */
-ibool
-dtype_is_string_type(
-/*=================*/
- ulint mtype); /*!< in: InnoDB main data type code: DATA_CHAR, ... */
-/*********************************************************************//**
-Checks if a type is a binary string type. Note that for tables created with
-< 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column. For
-those DATA_BLOB columns this function currently returns FALSE.
-@return TRUE if binary string type */
-ibool
-dtype_is_binary_string_type(
-/*========================*/
- ulint mtype, /*!< in: main data type */
- ulint prtype);/*!< in: precise type */
-/*********************************************************************//**
-Checks if a type is a non-binary string type. That is, dtype_is_string_type is
-TRUE and dtype_is_binary_string_type is FALSE. Note that for tables created
-with < 4.0.14, we do not know if a DATA_BLOB column is a BLOB or a TEXT column.
-For those DATA_BLOB columns this function currently returns TRUE.
-@return TRUE if non-binary string type */
-ibool
-dtype_is_non_binary_string_type(
-/*============================*/
- ulint mtype, /*!< in: main data type */
- ulint prtype);/*!< in: precise type */
+/** @return whether main type is a string type */
+inline bool dtype_is_string_type(ulint mtype)
+{
+ return mtype <= DATA_BLOB
+ || mtype == DATA_MYSQL || mtype == DATA_VARMYSQL;
+}
+
+/** @return whether a type is a binary string type */
+inline bool dtype_is_binary_string_type(ulint mtype, ulint prtype)
+{
+ /* Note that for tables created before MySQL 4.0.14,
+ we do not know if a DATA_BLOB column is a BLOB or a TEXT column.
+ For those DATA_BLOB columns we return false. */
+
+ return mtype == DATA_FIXBINARY || mtype == DATA_BINARY
+ || (mtype == DATA_BLOB && (prtype & DATA_BINARY_TYPE));
+}
+
+/** @return whether a type is a non-binary string type */
+inline bool dtype_is_non_binary_string_type(ulint mtype, ulint prtype)
+{
+ return dtype_is_string_type(mtype)
+ && !dtype_is_binary_string_type(mtype, prtype);
+}
+
/*********************************************************************//**
Sets a data type structure. */
UNIV_INLINE
@@ -554,11 +550,55 @@ struct dtype_t{
{
return (prtype & DATA_VERSIONED) == DATA_VERS_END;
}
+
+ /** Set the type of the BLOB in the hidden metadata record. */
+ void metadata_blob_init()
+ {
+ prtype = DATA_NOT_NULL;
+ mtype = DATA_BLOB;
+ len = 0;
+ mbminlen = 0;
+ mbmaxlen = 0;
+ }
};
/** The DB_TRX_ID,DB_ROLL_PTR values for "no history is available" */
extern const byte reset_trx_id[DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN];
+/** Info bit denoting the predefined minimum record: this bit is set
+if and only if the record is the first user record on a non-leaf
+B-tree page that is the leftmost page on its level
+(PAGE_LEVEL is nonzero and FIL_PAGE_PREV is FIL_NULL). */
+#define REC_INFO_MIN_REC_FLAG 0x10UL
+/** The delete-mark flag in info bits */
+#define REC_INFO_DELETED_FLAG 0x20UL
+
+/** Record status values for ROW_FORMAT=COMPACT,DYNAMIC,COMPRESSED */
+enum rec_comp_status_t {
+ /** User record (PAGE_LEVEL=0, heap>=PAGE_HEAP_NO_USER_LOW) */
+ REC_STATUS_ORDINARY = 0,
+ /** Node pointer record (PAGE_LEVEL>=0, heap>=PAGE_HEAP_NO_USER_LOW) */
+ REC_STATUS_NODE_PTR = 1,
+ /** The page infimum pseudo-record (heap=PAGE_HEAP_NO_INFIMUM) */
+ REC_STATUS_INFIMUM = 2,
+ /** The page supremum pseudo-record (heap=PAGE_HEAP_NO_SUPREMUM) */
+ REC_STATUS_SUPREMUM = 3,
+ /** Clustered index record that has been inserted or updated
+ after instant ADD COLUMN (more than dict_index_t::n_core_fields) */
+ REC_STATUS_INSTANT = 4
+};
+
+/** The dtuple_t::info_bits of the hidden metadata of instant ADD COLUMN.
+@see rec_is_metadata()
+@see rec_is_alter_metadata() */
+static const byte REC_INFO_METADATA_ADD
+ = REC_INFO_MIN_REC_FLAG | REC_STATUS_INSTANT;
+
+/** The dtuple_t::info_bits of the hidden metadata of instant ALTER TABLE.
+@see rec_is_metadata() */
+static const byte REC_INFO_METADATA_ALTER
+ = REC_INFO_METADATA_ADD | REC_INFO_DELETED_FLAG;
+
#include "data0type.ic"
#endif
diff --git a/storage/innobase/include/data0type.ic b/storage/innobase/include/data0type.ic
index 56a588562ee..9e88ee53559 100644
--- a/storage/innobase/include/data0type.ic
+++ b/storage/innobase/include/data0type.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h
index 0d7c7b28ea7..53e75e695ae 100644
--- a/storage/innobase/include/db0err.h
+++ b/storage/innobase/include/db0err.h
@@ -136,8 +136,6 @@ enum dberr_t {
DB_FTS_TOO_MANY_WORDS_IN_PHRASE,
/*< Too many words in a phrase */
- DB_TABLESPACE_TRUNCATED, /*!< tablespace was truncated */
-
DB_DECRYPTION_FAILED, /* Tablespace encrypted and
decrypt operation failed because
of missing key management plugin,
diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h
index d683afcdc7e..d9d4b3d69d5 100644
--- a/storage/innobase/include/dict0boot.h
+++ b/storage/innobase/include/dict0boot.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -50,12 +51,8 @@ dict_hdr_get_new_id(
(not assigned if NULL) */
index_id_t* index_id, /*!< out: index id
(not assigned if NULL) */
- ulint* space_id, /*!< out: space id
+ ulint* space_id); /*!< out: space id
(not assigned if NULL) */
- const dict_table_t* table, /*!< in: table */
- bool disable_redo); /*!< in: if true and table
- object is NULL
- then disable-redo */
/**********************************************************************//**
Writes the current value of the row id counter to the dictionary header file
page. */
@@ -124,13 +121,6 @@ dict_is_sys_table(
/* The following is a secondary index on SYS_TABLES */
#define DICT_TABLE_IDS_ID 5
-#define DICT_HDR_FIRST_ID 10 /* the ids for tables etc. start
- from this number, except for basic
- system tables and their above defined
- indexes; ibuf tables and indexes are
- assigned as the id the number
- DICT_IBUF_ID_MIN plus the space id */
-
/* The offset of the dictionary header on the page */
#define DICT_HDR FSEG_PAGE_DATA
diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h
index 75ef4f09dbb..8c941d494c1 100644
--- a/storage/innobase/include/dict0crea.h
+++ b/storage/innobase/include/dict0crea.h
@@ -67,14 +67,6 @@ dict_create_table_step(
/*===================*/
que_thr_t* thr); /*!< in: query thread */
-/** Assign a new table ID and put it into the table cache and the transaction.
-@param[in,out] table Table that needs an ID
-@param[in,out] trx Transaction */
-void
-dict_table_assign_new_id(
- dict_table_t* table,
- trx_t* trx);
-
/***********************************************************//**
Creates an index. This is a high-level function used in SQL execution
graphs.
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 316a9c620f7..d2273b65349 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -32,12 +32,13 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0mem.h"
#include "fsp0fsp.h"
#include <deque>
-#include "dict0pagecompress.h"
extern bool innodb_table_stats_not_found;
extern bool innodb_index_stats_not_found;
-#include "sync0rw.h"
+/** the first table or index ID for other than hard-coded system tables */
+#define DICT_HDR_FIRST_ID 10
+
/********************************************************************//**
Get the database name length in a table name.
@return database name length */
@@ -360,21 +361,12 @@ dict_table_add_system_columns(
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap) /*!< in: temporary heap */
MY_ATTRIBUTE((nonnull));
-/**********************************************************************//**
-Removes a table object from the dictionary cache. */
-void
-dict_table_remove_from_cache(
-/*=========================*/
- dict_table_t* table) /*!< in, own: table */
- MY_ATTRIBUTE((nonnull));
-/**********************************************************************//**
-Removes a table object from the dictionary cache. */
-void
-dict_table_remove_from_cache_low(
-/*=============================*/
- dict_table_t* table, /*!< in, own: table */
- ibool lru_evict) /*!< in: TRUE if table being evicted
- to make room in the table LRU list */
+/** Evict a table definition from the InnoDB data dictionary cache.
+@param[in,out] table cached table definition to be evicted
+@param[in] lru whether this is part of least-recently-used evictiono
+@param[in] keep whether to keep (not free) the object */
+void dict_table_remove_from_cache(dict_table_t* table, bool lru = false,
+ bool keep = false)
MY_ATTRIBUTE((nonnull));
/**********************************************************************//**
Renames a table object.
@@ -684,65 +676,14 @@ do { \
dict_table_skip_corrupt_index(index); \
} while (0)
-/********************************************************************//**
-Check whether the index is the clustered index.
-@return nonzero for clustered index, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_clust(
-/*================*/
- const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((warn_unused_result));
-
-/** Check if index is auto-generated clustered index.
-@param[in] index index
-
-@return true if index is auto-generated clustered index. */
-UNIV_INLINE
-bool
-dict_index_is_auto_gen_clust(
- const dict_index_t* index);
-
-/********************************************************************//**
-Check whether the index is unique.
-@return nonzero for unique index, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_unique(
-/*=================*/
- const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((warn_unused_result));
-/********************************************************************//**
-Check whether the index is a Spatial Index.
-@return nonzero for Spatial Index, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_spatial(
-/*==================*/
- const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((warn_unused_result));
-
+#define dict_index_is_clust(index) (index)->is_clust()
+#define dict_index_is_auto_gen_clust(index) (index)->is_gen_clust()
+#define dict_index_is_unique(index) (index)->is_unique()
+#define dict_index_is_spatial(index) (index)->is_spatial()
+#define dict_index_is_ibuf(index) (index)->is_ibuf()
+#define dict_index_is_sec_or_ibuf(index) !(index)->is_primary()
#define dict_index_has_virtual(index) (index)->has_virtual()
-/********************************************************************//**
-Check whether the index is the insert buffer tree.
-@return nonzero for insert buffer, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_ibuf(
-/*===============*/
- const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((warn_unused_result));
-/********************************************************************//**
-Check whether the index is a secondary index or the insert buffer tree.
-@return nonzero for insert buffer, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_sec_or_ibuf(
-/*======================*/
- const dict_index_t* index) /*!< in: index */
- MY_ATTRIBUTE((warn_unused_result));
-
/** Get all the FTS indexes on a table.
@param[in] table table
@param[out] indexes all FTS indexes on this table
@@ -898,15 +839,8 @@ dict_index_get_min_size(
/*====================*/
const dict_index_t* index) /*!< in: index */
MY_ATTRIBUTE((nonnull, warn_unused_result));
-/********************************************************************//**
-Check whether the table uses the compact page format.
-@return TRUE if table uses the compact page format */
-UNIV_INLINE
-bool
-dict_table_is_comp(
-/*===============*/
- const dict_table_t* table) /*!< in: table */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
+
+#define dict_table_is_comp(table) (table)->not_redundant()
/** Determine if a table uses atomic BLOBs (no locally stored prefix).
@param[in] table InnoDB table
@@ -952,25 +886,33 @@ ulint
dict_tf_to_fsp_flags(ulint table_flags)
MY_ATTRIBUTE((const));
-/** Extract the page size from table flags.
+
+/** Extract the ROW_FORMAT=COMPRESSED page size from table flags.
@param[in] flags flags
-@return compressed page size, or 0 if not compressed */
-UNIV_INLINE
-const page_size_t
-dict_tf_get_page_size(
- ulint flags)
-MY_ATTRIBUTE((const));
+@return ROW_FORMAT=COMPRESSED page size
+@retval 0 if not compressed */
+inline ulint dict_tf_get_zip_size(ulint flags)
+{
+ flags &= DICT_TF_MASK_ZIP_SSIZE;
+ return flags
+ ? (UNIV_ZIP_SIZE_MIN >> 1)
+ << (FSP_FLAGS_GET_ZIP_SSIZE(flags >> DICT_TF_POS_ZIP_SSIZE
+ << FSP_FLAGS_POS_ZIP_SSIZE))
+ : 0;
+}
/** Determine the extent size (in pages) for the given table
@param[in] table the table whose extent size is being
calculated.
@return extent size in pages (256, 128 or 64) */
-ulint
-dict_table_extent_size(
- const dict_table_t* table);
+inline ulint dict_table_extent_size(const dict_table_t* table)
+{
+ if (ulint zip_size = table->space->zip_size()) {
+ return (1ULL << 20) / zip_size;
+ }
-/** Get the table page size. */
-#define dict_table_page_size(table) page_size_t(table->space->flags)
+ return FSP_EXTENT_SIZE;
+}
/*********************************************************************//**
Obtain exclusive locks on all index trees of the table. This is to prevent
@@ -1220,21 +1162,6 @@ dict_index_get_nth_col_or_prefix_pos(
ulint* prefix_col_pos) /*!< out: col num if prefix
*/
__attribute__((warn_unused_result));
-
-/********************************************************************//**
-Returns TRUE if the index contains a column or a prefix of that column.
-@param[in] index index
-@param[in] n column number
-@param[in] is_virtual whether it is a virtual col
-@return TRUE if contains the column or its prefix */
-bool
-dict_index_contains_col_or_prefix(
-/*==============================*/
- const dict_index_t* index, /*!< in: index */
- ulint n, /*!< in: column number */
- bool is_virtual)
- /*!< in: whether it is a virtual col */
- MY_ATTRIBUTE((warn_unused_result));
/********************************************************************//**
Looks for a matching field in an index. The column has to be the same. The
column in index must be complete, or must contain a prefix longer than the
@@ -1259,16 +1186,6 @@ dict_table_get_nth_col_pos(
ulint n, /*!< in: column number */
ulint* prefix_col_pos) /*!< out: col num if prefix */
MY_ATTRIBUTE((nonnull(1), warn_unused_result));
-/********************************************************************//**
-Returns the position of a system column in an index.
-@return position, ULINT_UNDEFINED if not contained */
-UNIV_INLINE
-ulint
-dict_index_get_sys_col_pos(
-/*=======================*/
- const dict_index_t* index, /*!< in: index */
- ulint type) /*!< in: DATA_ROW_ID, ... */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
/*******************************************************************//**
Adds a column to index. */
void
@@ -1366,21 +1283,6 @@ dict_index_build_node_ptr(
ulint level) /*!< in: level of rec in tree:
0 means leaf level */
MY_ATTRIBUTE((nonnull, warn_unused_result));
-/**********************************************************************//**
-Copies an initial segment of a physical record, long enough to specify an
-index entry uniquely.
-@return pointer to the prefix record */
-rec_t*
-dict_index_copy_rec_order_prefix(
-/*=============================*/
- const dict_index_t* index, /*!< in: index */
- const rec_t* rec, /*!< in: record for which to
- copy prefix */
- ulint* n_fields,/*!< out: number of fields copied */
- byte** buf, /*!< in/out: memory buffer for the
- copied prefix, or NULL */
- ulint* buf_size)/*!< in/out: buffer size */
- MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Convert a physical record into a search tuple.
@param[in] rec index record (not necessarily in an index page)
@param[in] index index
@@ -1626,8 +1528,10 @@ struct dict_sys_t{
the log records */
hash_table_t* table_hash; /*!< hash table of the tables, based
on name */
- hash_table_t* table_id_hash; /*!< hash table of the tables, based
- on id */
+ /** hash table of persistent table IDs */
+ hash_table_t* table_id_hash;
+ /** hash table of temporary table IDs */
+ hash_table_t* temp_id_hash;
dict_table_t* sys_tables; /*!< SYS_TABLES table */
dict_table_t* sys_columns; /*!< SYS_COLUMNS table */
dict_table_t* sys_indexes; /*!< SYS_INDEXES table */
@@ -1641,6 +1545,52 @@ struct dict_sys_t{
UT_LIST_BASE_NODE_T(dict_table_t)
table_non_LRU; /*!< List of tables that can't be
evicted from the cache */
+
+ /** @return a new temporary table ID */
+ table_id_t get_temporary_table_id() {
+ return temp_table_id.fetch_add(1, std::memory_order_relaxed);
+ }
+
+ /** Look up a temporary table.
+ @param id temporary table ID
+ @return temporary table
+ @retval NULL if the table does not exist
+ (should only happen during the rollback of CREATE...SELECT) */
+ dict_table_t* get_temporary_table(table_id_t id)
+ {
+ ut_ad(mutex_own(&mutex));
+ dict_table_t* table;
+ ulint fold = ut_fold_ull(id);
+ HASH_SEARCH(id_hash, temp_id_hash, fold, dict_table_t*, table,
+ ut_ad(table->cached), table->id == id);
+ if (UNIV_LIKELY(table != NULL)) {
+ DBUG_ASSERT(table->is_temporary());
+ DBUG_ASSERT(table->id >= DICT_HDR_FIRST_ID);
+ table->acquire();
+ }
+ return table;
+ }
+
+ /** Look up a persistent table.
+ @param id table ID
+ @return table
+ @retval NULL if not cached */
+ dict_table_t* get_table(table_id_t id)
+ {
+ ut_ad(mutex_own(&mutex));
+ dict_table_t* table;
+ ulint fold = ut_fold_ull(id);
+ HASH_SEARCH(id_hash, table_id_hash, fold, dict_table_t*, table,
+ ut_ad(table->cached), table->id == id);
+ DBUG_ASSERT(!table || !table->is_temporary());
+ return table;
+ }
+
+ dict_sys_t() : temp_table_id(DICT_HDR_FIRST_ID) {}
+
+private:
+ /** the sequence of temporary table IDs */
+ std::atomic<table_id_t> temp_table_id;
};
/** dummy index for ROW_FORMAT=REDUNDANT supremum and infimum records */
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index d591cb57378..30bda5f6971 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -25,6 +25,7 @@ Created 1/8/1996 Heikki Tuuri
***********************************************************************/
#include "fsp0sysspace.h"
+#include "dict0pagecompress.h"
/*********************************************************************//**
Gets the minimum number of bytes per character.
@@ -241,83 +242,6 @@ dict_table_get_next_index(
#endif /* UNIV_DEBUG */
/********************************************************************//**
-Check whether the index is the clustered index.
-@return nonzero for clustered index, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_clust(
-/*================*/
- const dict_index_t* index) /*!< in: index */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
- return(index->type & DICT_CLUSTERED);
-}
-
-/** Check if index is auto-generated clustered index.
-@param[in] index index
-
-@return true if index is auto-generated clustered index. */
-UNIV_INLINE
-bool
-dict_index_is_auto_gen_clust(
- const dict_index_t* index)
-{
- return(index->type == DICT_CLUSTERED);
-}
-
-/********************************************************************//**
-Check whether the index is unique.
-@return nonzero for unique index, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_unique(
-/*=================*/
- const dict_index_t* index) /*!< in: index */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
- return(index->type & DICT_UNIQUE);
-}
-
-/********************************************************************//**
-Check whether the index is a Spatial Index.
-@return nonzero for Spatial Index, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_spatial(
-/*==================*/
- const dict_index_t* index) /*!< in: index */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
- return ulint(UNIV_EXPECT(index->type & DICT_SPATIAL, 0));
-}
-
-/********************************************************************//**
-Check whether the index is the insert buffer tree.
-@return nonzero for insert buffer, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_ibuf(
-/*===============*/
- const dict_index_t* index) /*!< in: index */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
- return(index->type & DICT_IBUF);
-}
-
-/********************************************************************//**
-Check whether the index is a secondary index or the insert buffer tree.
-@return nonzero for insert buffer, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_sec_or_ibuf(
-/*======================*/
- const dict_index_t* index) /*!< in: index */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
- return((index->type & (DICT_CLUSTERED | DICT_IBUF)) != DICT_CLUSTERED);
-}
-
-/********************************************************************//**
Gets the number of user-defined non-virtual columns in a table in the
dictionary cache.
@return number of user-defined (e.g., not ROW_ID) non-virtual
@@ -462,7 +386,8 @@ dict_table_get_nth_v_col(
ut_ad(table);
ut_ad(pos < table->n_v_def);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
- ut_ad(!table->v_cols[pos].m_col.is_instant());
+ ut_ad(!table->v_cols[pos].m_col.is_added());
+ ut_ad(!table->v_cols[pos].m_col.is_dropped());
return &table->v_cols[pos];
}
@@ -501,19 +426,6 @@ dict_table_get_sys_col_no(
return unsigned(table->n_cols) + (sys - DATA_N_SYS_COLS);
}
-/********************************************************************//**
-Check whether the table uses the compact page format.
-@return TRUE if table uses the compact page format */
-UNIV_INLINE
-bool
-dict_table_is_comp(
-/*===============*/
- const dict_table_t* table) /*!< in: table */
-{
- ut_ad(table);
- return (table->flags & DICT_TF_COMPACT) != 0;
-}
-
/************************************************************************
Check if the table has an FTS index. */
UNIV_INLINE
@@ -720,20 +632,34 @@ dict_tf_to_fsp_flags(ulint table_flags)
DBUG_EXECUTE_IF("dict_tf_to_fsp_flags_failure",
return(ULINT_UNDEFINED););
- /* Adjust bit zero. */
- fsp_flags = DICT_TF_HAS_ATOMIC_BLOBS(table_flags) ? 1 : 0;
+ /* No ROW_FORMAT=COMPRESSED for innodb_checksum_algorithm=full_crc32 */
+ if ((srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32
+ || srv_checksum_algorithm == SRV_CHECKSUM_ALGORITHM_FULL_CRC32)
+ && !(table_flags & DICT_TF_MASK_ZIP_SSIZE)) {
- /* ZIP_SSIZE and ATOMIC_BLOBS are at the same position. */
- fsp_flags |= table_flags
- & (DICT_TF_MASK_ZIP_SSIZE | DICT_TF_MASK_ATOMIC_BLOBS);
+ fsp_flags = 1U << FSP_FLAGS_FCRC32_POS_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE();
+
+ if (page_compression_level) {
+ fsp_flags |= innodb_compression_algorithm
+ << FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO;
+ }
+ } else {
+ /* Adjust bit zero. */
+ fsp_flags = DICT_TF_HAS_ATOMIC_BLOBS(table_flags) ? 1 : 0;
- fsp_flags |= FSP_FLAGS_PAGE_SSIZE();
+ /* ZIP_SSIZE and ATOMIC_BLOBS are at the same position. */
+ fsp_flags |= table_flags
+ & (DICT_TF_MASK_ZIP_SSIZE | DICT_TF_MASK_ATOMIC_BLOBS);
- if (page_compression_level) {
- fsp_flags |= FSP_FLAGS_MASK_PAGE_COMPRESSION;
+ fsp_flags |= FSP_FLAGS_PAGE_SSIZE();
+
+ if (page_compression_level) {
+ fsp_flags |= FSP_FLAGS_MASK_PAGE_COMPRESSION;
+ }
}
- ut_a(fsp_flags_is_valid(fsp_flags, false));
+ ut_a(fil_space_t::is_valid_flags(fsp_flags, false));
if (DICT_TF_HAS_DATA_DIR(table_flags)) {
fsp_flags |= 1U << FSP_FLAGS_MEM_DATA_DIR;
@@ -779,28 +705,6 @@ dict_tf_to_sys_tables_type(
return(type);
}
-/** Extract the page size info from table flags.
-@param[in] flags flags
-@return a structure containing the compressed and uncompressed
-page sizes and a boolean indicating if the page is compressed. */
-UNIV_INLINE
-const page_size_t
-dict_tf_get_page_size(
- ulint flags)
-{
- const ulint zip_ssize = DICT_TF_GET_ZIP_SSIZE(flags);
-
- if (zip_ssize == 0) {
- return(univ_page_size);
- }
-
- const ulint zip_size = (UNIV_ZIP_SIZE_MIN >> 1) << zip_ssize;
-
- ut_ad(zip_size <= UNIV_ZIP_SIZE_MAX);
-
- return(page_size_t(zip_size, srv_page_size, true));
-}
-
/*********************************************************************//**
Obtain exclusive locks on all index trees of the table. This is to prevent
accessing index trees while InnoDB is updating internal metadata for
@@ -979,30 +883,6 @@ dict_index_get_nth_field(
}
#endif /* UNIV_DEBUG */
-/********************************************************************//**
-Returns the position of a system column in an index.
-@return position, ULINT_UNDEFINED if not contained */
-UNIV_INLINE
-ulint
-dict_index_get_sys_col_pos(
-/*=======================*/
- const dict_index_t* index, /*!< in: index */
- ulint type) /*!< in: DATA_ROW_ID, ... */
-{
- ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
- ut_ad(!dict_index_is_ibuf(index));
-
- if (dict_index_is_clust(index)) {
-
- return(dict_col_get_clust_pos(
- dict_table_get_sys_col(index->table, type),
- index));
- }
-
- return(dict_index_get_nth_col_pos(
- index, dict_table_get_sys_col_no(index->table, type), NULL));
-}
-
/*********************************************************************//**
Gets the field column.
@return field->col, pointer to the table column */
@@ -1233,9 +1113,7 @@ dict_table_is_fts_column(
index = (dict_index_t*) ib_vector_getp(indexes, i);
- if (dict_index_contains_col_or_prefix(
- index, col_no, is_virtual)) {
-
+ if (index->contains_col_or_prefix(col_no, is_virtual)) {
return(i);
}
}
@@ -1359,7 +1237,7 @@ void
dict_table_t::acquire()
{
ut_ad(mutex_own(&dict_sys->mutex));
- my_atomic_add32_explicit(&n_ref_count, 1, MY_MEMORY_ORDER_RELAXED);
+ n_ref_count++;
}
/** Release the table handle.
@@ -1368,8 +1246,7 @@ inline
bool
dict_table_t::release()
{
- int32 n = my_atomic_add32_explicit(
- &n_ref_count, -1, MY_MEMORY_ORDER_RELAXED);
+ auto n = n_ref_count--;
ut_ad(n > 0);
return n == 1;
}
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index 2cf16dfe57a..2e8b8d72c93 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -47,6 +47,7 @@ Created 1/8/1996 Heikki Tuuri
#include "os0once.h"
#include "fil0fil.h"
#include "fil0crypt.h"
+#include <sql_const.h>
#include <set>
#include <algorithm>
#include <iterator>
@@ -573,6 +574,10 @@ struct dict_col_t{
this column. Our current max limit is
3072 (REC_VERSION_56_MAX_INDEX_COL_LEN)
bytes. */
+private:
+ /** Special value of ind for a dropped column */
+ static const unsigned DROPPED = 1023;
+public:
/** Detach the column from an index.
@param[in] index index to be detached from */
@@ -602,7 +607,8 @@ struct dict_col_t{
ut_ad(mtype == DATA_INT || mtype == DATA_FIXBINARY);
return mtype == DATA_INT;
}
- /** @return whether this is system versioned */
+ /** @return whether this user column (not row_start, row_end)
+ has System Versioning property */
bool is_versioned() const { return !(~prtype & DATA_VERSIONED); }
/** @return whether this is the system version start */
bool vers_sys_start() const
@@ -616,29 +622,73 @@ struct dict_col_t{
}
/** @return whether this is an instantly-added column */
- bool is_instant() const
+ bool is_added() const
{
DBUG_ASSERT(def_val.len != UNIV_SQL_DEFAULT || !def_val.data);
return def_val.len != UNIV_SQL_DEFAULT;
}
+ /** Flag the column instantly dropped */
+ void set_dropped() { ind = DROPPED; }
+ /** Flag the column instantly dropped.
+ @param[in] not_null whether the column was NOT NULL
+ @param[in] len2 whether the length exceeds 255 bytes
+ @param[in] fixed_len the fixed length in bytes, or 0 */
+ void set_dropped(bool not_null, bool len2, unsigned fixed)
+ {
+ DBUG_ASSERT(!len2 || !fixed);
+ prtype = not_null
+ ? DATA_NOT_NULL | DATA_BINARY_TYPE
+ : DATA_BINARY_TYPE;
+ if (fixed) {
+ mtype = DATA_FIXBINARY;
+ len = fixed;
+ } else {
+ mtype = DATA_BINARY;
+ len = len2 ? 65535 : 255;
+ }
+ mbminlen = mbmaxlen = 0;
+ ind = DROPPED;
+ ord_part = 0;
+ max_prefix = 0;
+ }
+ /** @return whether the column was instantly dropped */
+ bool is_dropped() const { return ind == DROPPED; }
+ /** @return whether the column was instantly dropped
+ @param[in] index the clustered index */
+ inline bool is_dropped(const dict_index_t& index) const;
+
/** Get the default value of an instantly-added column.
@param[out] len value length (in bytes), or UNIV_SQL_NULL
@return default value
@retval NULL if the default value is SQL NULL (len=UNIV_SQL_NULL) */
const byte* instant_value(ulint* len) const
{
- DBUG_ASSERT(is_instant());
+ DBUG_ASSERT(is_added());
*len = def_val.len;
return static_cast<const byte*>(def_val.data);
}
/** Remove the 'instant ADD' status of the column */
- void remove_instant()
+ void clear_instant()
{
- DBUG_ASSERT(is_instant());
def_val.len = UNIV_SQL_DEFAULT;
def_val.data = NULL;
}
+
+ /** Determine if the columns have the same format
+ except for is_nullable() and is_versioned().
+ @param[in] other column to compare to
+ @return whether the columns have the same format */
+ bool same_format(const dict_col_t& other) const
+ {
+ return mtype == other.mtype
+ && len >= other.len
+ && mbminlen == other.mbminlen
+ && mbmaxlen == other.mbmaxlen
+ && !((prtype ^ other.prtype)
+ & ~(DATA_NOT_NULL | DATA_VERSIONED
+ | DATA_LONG_TRUE_VARCHAR));
+ }
};
/** Index information put in a list of virtual column structure. Index
@@ -812,7 +862,8 @@ an uncompressed page should be left as padding to avoid compression
failures. This estimate is based on a self-adapting heuristic. */
struct zip_pad_info_t {
SysMutex* mutex; /*!< mutex protecting the info */
- ulint pad; /*!< number of bytes used as pad */
+ Atomic_counter<ulint>
+ pad; /*!< number of bytes used as pad */
ulint success;/*!< successful compression ops during
current round */
ulint failure;/*!< failed compression ops during
@@ -838,7 +889,7 @@ to start with. */
/** Data structure for an index. Most fields will be
initialized to 0, NULL or FALSE in dict_mem_index_create(). */
-struct dict_index_t{
+struct dict_index_t {
index_id_t id; /*!< id of the index */
mem_heap_t* heap; /*!< memory heap */
id_name_t name; /*!< index name */
@@ -913,6 +964,8 @@ struct dict_index_t{
#ifdef UNIV_DEBUG
/** whether this is a dummy index object */
bool is_dummy;
+ /** whether btr_cur_instant_init() is in progress */
+ bool in_instant_init;
uint32_t magic_n;/*!< magic number */
/** Value of dict_index_t::magic_n */
# define DICT_INDEX_MAGIC_N 76789786
@@ -1027,7 +1080,7 @@ struct dict_index_t{
page cannot be read or decrypted */
inline bool is_readable() const;
- /** @return whether instant ADD COLUMN is in effect */
+ /** @return whether instant ALTER TABLE is in effect */
inline bool is_instant() const;
/** @return whether the index is the primary key index
@@ -1037,9 +1090,38 @@ struct dict_index_t{
return DICT_CLUSTERED == (type & (DICT_CLUSTERED | DICT_IBUF));
}
+ /** @return whether this is a generated clustered index */
+ bool is_gen_clust() const { return type == DICT_CLUSTERED; }
+
+ /** @return whether this is a clustered index */
+ bool is_clust() const { return type & DICT_CLUSTERED; }
+
+ /** @return whether this is a unique index */
+ bool is_unique() const { return type & DICT_UNIQUE; }
+
+ /** @return whether this is a spatial index */
+ bool is_spatial() const { return UNIV_UNLIKELY(type & DICT_SPATIAL); }
+
+ /** @return whether this is the change buffer */
+ bool is_ibuf() const { return UNIV_UNLIKELY(type & DICT_IBUF); }
+
/** @return whether the index includes virtual columns */
bool has_virtual() const { return type & DICT_VIRTUAL; }
+ /** @return the position of DB_TRX_ID */
+ unsigned db_trx_id() const {
+ DBUG_ASSERT(is_primary());
+ DBUG_ASSERT(n_uniq);
+ DBUG_ASSERT(n_uniq <= MAX_REF_PARTS);
+ return n_uniq;
+ }
+ /** @return the position of DB_ROLL_PTR */
+ unsigned db_roll_ptr() const { return db_trx_id() + 1; }
+
+ /** @return the offset of the metadata BLOB field,
+ or the first user field after the PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR */
+ unsigned first_user_field() const { return db_trx_id() + 2; }
+
/** @return whether the index is corrupted */
inline bool is_corrupted() const;
@@ -1085,24 +1167,20 @@ struct dict_index_t{
return fields[n].col->instant_value(len);
}
- /** Adjust clustered index metadata for instant ADD COLUMN.
- @param[in] clustered index definition after instant ADD COLUMN */
- void instant_add_field(const dict_index_t& instant);
-
- /** Remove the 'instant ADD' status of a clustered index.
- Protected by index root page x-latch or table X-lock. */
- void remove_instant()
- {
- DBUG_ASSERT(is_primary());
- if (!is_instant()) {
- return;
- }
- for (unsigned i = n_core_fields; i < n_fields; i++) {
- fields[i].col->remove_instant();
- }
- n_core_fields = n_fields;
- n_core_null_bytes = UT_BITS_IN_BYTES(unsigned(n_nullable));
- }
+ /** Adjust index metadata for instant ADD/DROP/reorder COLUMN.
+ @param[in] clustered index definition after instant ALTER TABLE */
+ inline void instant_add_field(const dict_index_t& instant);
+ /** Remove instant ADD COLUMN metadata. */
+ inline void clear_instant_add();
+ /** Remove instant ALTER TABLE metadata. */
+ inline void clear_instant_alter();
+
+ /** Construct the metadata record for instant ALTER TABLE.
+ @param[in] row dummy or default values for existing columns
+ @param[in,out] heap memory heap for allocations
+ @return metadata record */
+ inline dtuple_t*
+ instant_metadata(const dtuple_t& row, mem_heap_t* heap) const;
/** Check if record in clustered index is historical row.
@param[in] rec clustered row
@@ -1117,6 +1195,16 @@ struct dict_index_t{
@return true on error */
bool
vers_history_row(const rec_t* rec, bool &history_row);
+
+ /** Reconstruct the clustered index fields. */
+ inline void reconstruct_fields();
+
+ /** Check if the index contains a column or a prefix of that column.
+ @param[in] n column number
+ @param[in] is_virtual whether it is a virtual col
+ @return whether the index contains the column or its prefix */
+ bool contains_col_or_prefix(ulint n, bool is_virtual) const
+ MY_ATTRIBUTE((warn_unused_result));
};
/** Detach a column from an index.
@@ -1451,6 +1539,64 @@ struct dict_vcol_templ_t {
dict_vcol_templ_t() : vtempl(0), mysql_table_query_id(~0ULL) {}
};
+/** Metadata on clustered index fields starting from first_user_field() */
+class field_map_element_t
+{
+ /** Number of bits for representing a column number */
+ static constexpr uint16_t IND_BITS = 10;
+
+ /** Set if the column of the field has been instantly dropped */
+ static constexpr uint16_t DROPPED = 1U << (IND_BITS + 5);
+
+ /** Set if the column was dropped and originally declared NOT NULL */
+ static constexpr uint16_t NOT_NULL = 1U << (IND_BITS + 4);
+
+ /** Column index (if !(data & DROPPED)): table->cols[data & IND],
+ or field length (if (data & DROPPED)):
+ (data & IND) = 0 if variable-length with max_len < 256 bytes;
+ (data & IND) = 1 if variable-length with max_len > 255 bytes;
+ (data & IND) = 1 + L otherwise, with L=fixed length of the column */
+ static constexpr uint16_t IND = (1U << IND_BITS) - 1;
+
+ /** Field metadata */
+ uint16_t data;
+
+ void clear_not_null() { data &= ~NOT_NULL; }
+public:
+ bool is_dropped() const { return data & DROPPED; }
+ void set_dropped() { data |= DROPPED; }
+ bool is_not_null() const { return data & NOT_NULL; }
+ void set_not_null() { ut_ad(is_dropped()); data |= NOT_NULL; }
+ uint16_t ind() const { return data & IND; }
+ void set_ind(uint16_t i)
+ {
+ DBUG_ASSERT(i <= IND);
+ DBUG_ASSERT(!ind());
+ data |= i;
+ }
+ field_map_element_t& operator= (uint16_t value)
+ {
+ data = value;
+ return *this;
+ }
+ operator uint16_t() { return data; }
+};
+
+static_assert(sizeof(field_map_element_t) == 2,
+ "Size mismatch for a persistent data item!");
+
+/** Instantly dropped or reordered columns */
+struct dict_instant_t
+{
+ /** Number of dropped columns */
+ unsigned n_dropped;
+ /** Dropped columns */
+ dict_col_t* dropped;
+ /** Map of clustered index non-PK fields[i - first_user_field()]
+ to table columns */
+ field_map_element_t* field_map;
+};
+
/** These are used when MySQL FRM and InnoDB data dictionary are
in inconsistent state. */
typedef enum {
@@ -1469,11 +1615,7 @@ struct dict_table_t {
/** Get reference count.
@return current value of n_ref_count */
- inline int32 get_ref_count()
- {
- return my_atomic_load32_explicit(&n_ref_count,
- MY_MEMORY_ORDER_RELAXED);
- }
+ inline uint32_t get_ref_count() const { return n_ref_count; }
/** Acquire the table handle. */
inline void acquire();
@@ -1493,6 +1635,9 @@ struct dict_table_t {
return flags2 & DICT_TF2_TEMPORARY;
}
+ /** @return whether the table is not in ROW_FORMAT=REDUNDANT */
+ bool not_redundant() const { return flags & DICT_TF_COMPACT; }
+
/** @return whether this table is readable
@retval true normally
@retval false if this is a single-table tablespace
@@ -1511,35 +1656,92 @@ struct dict_table_t {
return strstr(name, "/" TEMP_FILE_PREFIX) != NULL;
}
- /** @return whether instant ADD COLUMN is in effect */
+ /** @return whether instant ALTER TABLE is in effect */
bool is_instant() const
{
return(UT_LIST_GET_FIRST(indexes)->is_instant());
}
- /** @return whether the table supports instant ADD COLUMN */
+ /** @return whether the table supports instant ALTER TABLE */
bool supports_instant() const
{
return(!(flags & DICT_TF_MASK_ZIP_SSIZE));
}
- /** Adjust metadata for instant ADD COLUMN.
- @param[in] table table definition after instant ADD COLUMN */
- void instant_add_column(const dict_table_t& table);
+ /** @return the number of instantly dropped columns */
+ unsigned n_dropped() const { return instant ? instant->n_dropped : 0; }
+
+ /** Look up an old column.
+ @param[in] cols the old columns of the table
+ @param[in] col_map map from old table columns to altered ones
+ @param[in] n_cols number of old columns
+ @param[in] i the number of the new column
+ @return old column
+ @retval NULL if column i was added to the table */
+ static const dict_col_t* find(const dict_col_t* cols,
+ const ulint* col_map, ulint n_cols,
+ ulint i)
+ {
+ for (ulint o = n_cols; o--; ) {
+ if (col_map[o] == i) {
+ return &cols[o];
+ }
+ }
+ return NULL;
+ }
- /** Roll back instant_add_column().
- @param[in] old_n_cols original n_cols
- @param[in] old_cols original cols
- @param[in] old_col_names original col_names */
- void rollback_instant(
+ /** Serialise metadata of dropped or reordered columns.
+ @param[in,out] heap memory heap for allocation
+ @param[out] field data field with the metadata */
+ inline void serialise_columns(mem_heap_t* heap, dfield_t* field) const;
+
+ /** Reconstruct dropped or reordered columns.
+ @param[in] metadata data from serialise_columns()
+ @param[in] len length of the metadata, in bytes
+ @return whether parsing the metadata failed */
+ bool deserialise_columns(const byte* metadata, ulint len);
+
+ /** Set is_instant() before instant_column().
+ @param[in] old previous table definition
+ @param[in] col_map map from old.cols[]
+ and old.v_cols[] to this
+ @param[out] first_alter_pos 0, or
+ 1 + first changed column position */
+ inline void prepare_instant(const dict_table_t& old,
+ const ulint* col_map,
+ unsigned& first_alter_pos);
+
+ /** Adjust table metadata for instant ADD/DROP/reorder COLUMN.
+ @param[in] table table on which prepare_instant() was invoked
+ @param[in] col_map mapping from cols[] and v_cols[] to table
+ @return whether the metadata record must be updated */
+ inline bool instant_column(const dict_table_t& table,
+ const ulint* col_map);
+
+ /** Roll back instant_column().
+ @param[in] old_n_cols original n_cols
+ @param[in] old_cols original cols
+ @param[in] old_col_names original col_names
+ @param[in] old_instant original instant structure
+ @param[in] old_fields original fields
+ @param[in] old_n_fields original number of fields
+ @param[in] old_n_core_fields original number of core fields
+ @param[in] old_n_v_cols original n_v_cols
+ @param[in] old_v_cols original v_cols
+ @param[in] old_v_col_names original v_col_names
+ @param[in] col_map column map */
+ inline void rollback_instant(
unsigned old_n_cols,
dict_col_t* old_cols,
- const char* old_col_names);
-
- /** Trim the instantly added columns when an insert into SYS_COLUMNS
- is rolled back during ALTER TABLE or recovery.
- @param[in] n number of surviving non-system columns */
- void rollback_instant(unsigned n);
+ const char* old_col_names,
+ dict_instant_t* old_instant,
+ dict_field_t* old_fields,
+ unsigned old_n_fields,
+ unsigned old_n_core_fields,
+ unsigned old_n_v_cols,
+ dict_v_col_t* old_v_cols,
+ const char* old_v_col_names,
+ const ulint* col_map);
/** Add the table definition to the data dictionary cache */
void add_to_cache();
@@ -1553,20 +1755,28 @@ struct dict_table_t {
void inc_fk_checks()
{
#ifdef UNIV_DEBUG
- lint fk_checks= (lint)
+ int32_t fk_checks=
#endif
- my_atomic_addlint(&n_foreign_key_checks_running, 1);
+ n_foreign_key_checks_running++;
ut_ad(fk_checks >= 0);
}
void dec_fk_checks()
{
#ifdef UNIV_DEBUG
- lint fk_checks= (lint)
+ int32_t fk_checks=
#endif
- my_atomic_addlint(&n_foreign_key_checks_running, ulint(-1));
+ n_foreign_key_checks_running--;
ut_ad(fk_checks > 0);
}
+private:
+ /** Initialize instant->field_map.
+ @tparam replace_dropped whether to point clustered index fields
+ to instant->dropped[]
+ @param[in] table table definition to copy from */
+ template<bool replace_dropped = false>
+ inline void init_instant(const dict_table_t& table);
+public:
/** Id of the table. */
table_id_t id;
/** Hash chain node. */
@@ -1676,6 +1886,9 @@ struct dict_table_t {
reason s_cols is a part of dict_table_t */
dict_s_col_list* s_cols;
+ /** Instantly dropped or reordered columns, or NULL if none */
+ dict_instant_t* instant;
+
/** Column names packed in a character string
"name1\0name2\0...nameN\0". Until the string contains n_cols, it will
be allocated from a temporary heap. The final string will be allocated
@@ -1720,7 +1933,7 @@ struct dict_table_t {
/** Count of how many foreign key check operations are currently being
performed on the table. We cannot drop the table while there are
foreign key checks running on it. */
- ulint n_foreign_key_checks_running;
+ Atomic_counter<int32_t> n_foreign_key_checks_running;
/** Transactions whose view low limit is greater than this number are
not allowed to store to the MySQL query cache or retrieve from it.
@@ -1920,7 +2133,7 @@ private:
/** Count of how many handles are opened to this table. Dropping of the
table is NOT allowed until this count gets to zero. MySQL does NOT
itself check the number of open handles at DROP. */
- int32 n_ref_count;
+ Atomic_counter<uint32_t> n_ref_count;
public:
/** List of locks on the table. Protected by lock_sys.mutex. */
@@ -1956,12 +2169,15 @@ inline bool dict_index_t::is_readable() const { return table->is_readable(); }
inline bool dict_index_t::is_instant() const
{
ut_ad(n_core_fields > 0);
- ut_ad(n_core_fields <= n_fields);
+ ut_ad(n_core_fields <= n_fields || table->n_dropped());
ut_ad(n_core_fields == n_fields
|| (type & ~(DICT_UNIQUE | DICT_CORRUPT)) == DICT_CLUSTERED);
ut_ad(n_core_fields == n_fields || table->supports_instant());
ut_ad(n_core_fields == n_fields || !table->is_temporary());
- return(n_core_fields != n_fields);
+ ut_ad(!table->instant || !table->is_temporary());
+
+ return n_core_fields != n_fields
+ || (is_primary() && table->instant);
}
inline bool dict_index_t::is_corrupted() const
@@ -1971,6 +2187,81 @@ inline bool dict_index_t::is_corrupted() const
|| (table && table->corrupted));
}
+inline void dict_index_t::clear_instant_add()
+{
+ DBUG_ASSERT(is_primary());
+ DBUG_ASSERT(is_instant());
+ DBUG_ASSERT(!table->instant);
+ for (unsigned i = n_core_fields; i < n_fields; i++) {
+ fields[i].col->clear_instant();
+ }
+ n_core_fields = n_fields;
+ n_core_null_bytes = UT_BITS_IN_BYTES(unsigned(n_nullable));
+}
+
+inline void dict_index_t::clear_instant_alter()
+{
+ DBUG_ASSERT(is_primary());
+ DBUG_ASSERT(n_fields == n_def);
+
+ if (!table->instant) {
+ if (is_instant()) {
+ clear_instant_add();
+ }
+ return;
+ }
+
+#ifndef DBUG_OFF
+ for (unsigned i = first_user_field(); i--; ) {
+ DBUG_ASSERT(!fields[i].col->is_dropped());
+ DBUG_ASSERT(!fields[i].col->is_nullable());
+ }
+#endif
+ const dict_col_t* ai_col = table->persistent_autoinc
+ ? fields[table->persistent_autoinc - 1].col
+ : NULL;
+ dict_field_t* const begin = &fields[first_user_field()];
+ dict_field_t* end = &fields[n_fields];
+
+ for (dict_field_t* d = begin; d < end; ) {
+ /* Move fields for dropped columns to the end. */
+ if (!d->col->is_dropped()) {
+ d++;
+ } else {
+ if (d->col->is_nullable()) {
+ n_nullable--;
+ }
+
+ std::swap(*d, *--end);
+ }
+ }
+
+ DBUG_ASSERT(&fields[n_fields - table->n_dropped()] == end);
+ n_core_fields = n_fields = n_def = end - fields;
+ n_core_null_bytes = UT_BITS_IN_BYTES(n_nullable);
+ std::sort(begin, end, [](const dict_field_t& a, const dict_field_t& b)
+ { return a.col->ind < b.col->ind; });
+ table->instant = NULL;
+ if (ai_col) {
+ auto a = std::find_if(begin, end,
+ [ai_col](const dict_field_t& f)
+ { return f.col == ai_col; });
+ table->persistent_autoinc = (a == end) ? 0 : 1 + (a - fields);
+ }
+}
+
+/** @return whether the column was instantly dropped
+@param[in] index the clustered index */
+inline bool dict_col_t::is_dropped(const dict_index_t& index) const
+{
+ DBUG_ASSERT(index.is_primary());
+ DBUG_ASSERT(!is_dropped() == !index.table->instant);
+ DBUG_ASSERT(!is_dropped() || (this >= index.table->instant->dropped
+ && this < index.table->instant->dropped
+ + index.table->instant->n_dropped));
+ return is_dropped();
+}
+
/*******************************************************************//**
Initialise the table lock list. */
void
diff --git a/storage/innobase/include/dict0priv.h b/storage/innobase/include/dict0priv.h
index b216a16c181..8eda44bd3f2 100644
--- a/storage/innobase/include/dict0priv.h
+++ b/storage/innobase/include/dict0priv.h
@@ -45,18 +45,6 @@ dict_table_check_if_in_cache_low(
/*=============================*/
const char* table_name); /*!< in: table name */
-/**********************************************************************//**
-Returns a table object based on table id.
-@return table, NULL if does not exist */
-UNIV_INLINE
-dict_table_t*
-dict_table_open_on_id_low(
-/*=====================*/
- table_id_t table_id, /*!< in: table id */
- dict_err_ignore_t ignore_err, /*!< in: errors to ignore
- when loading the table */
- ibool open_only_if_in_cache);
-
#include "dict0priv.ic"
#endif /* dict0priv.h */
diff --git a/storage/innobase/include/dict0priv.ic b/storage/innobase/include/dict0priv.ic
index fb7af2772fc..6d7fbf07394 100644
--- a/storage/innobase/include/dict0priv.ic
+++ b/storage/innobase/include/dict0priv.ic
@@ -25,7 +25,6 @@ Created Wed 13 Oct 2010 16:10:14 EST Sunny Bains
#include "dict0dict.h"
#include "dict0load.h"
-#include "dict0priv.h"
/**********************************************************************//**
Gets a table; loads it to the dictionary cache if necessary. A low-level
@@ -64,40 +63,6 @@ dict_table_get_low(
}
/**********************************************************************//**
-Returns a table object based on table id.
-@return table, NULL if does not exist */
-UNIV_INLINE
-dict_table_t*
-dict_table_open_on_id_low(
-/*======================*/
- table_id_t table_id, /*!< in: table id */
- dict_err_ignore_t ignore_err, /*!< in: errors to ignore
- when loading the table */
- ibool open_only_if_in_cache)
-{
- dict_table_t* table;
- ulint fold;
-
- ut_ad(mutex_own(&dict_sys->mutex));
-
- /* Look for the table name in the hash table */
- fold = ut_fold_ull(table_id);
-
- HASH_SEARCH(id_hash, dict_sys->table_id_hash, fold,
- dict_table_t*, table, ut_ad(table->cached),
- table->id == table_id);
- if (table == NULL && !open_only_if_in_cache) {
- table = dict_load_table_on_id(table_id, ignore_err);
- }
-
- ut_ad(!table || table->cached);
-
- /* TODO: should get the type information from MySQL */
-
- return(table);
-}
-
-/**********************************************************************//**
Checks if a table is in the dictionary cache.
@return table, NULL if not found */
UNIV_INLINE
diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h
index e846ecabf5a..96169e6219a 100644
--- a/storage/innobase/include/dict0stats.h
+++ b/storage/innobase/include/dict0stats.h
@@ -187,6 +187,19 @@ dict_stats_rename_table(
char* errstr, /*!< out: error string if != DB_SUCCESS
is returned */
size_t errstr_sz); /*!< in: errstr size */
+/*********************************************************************//**
+Renames an index in InnoDB persistent stats storage.
+This function creates its own transaction and commits it.
+@return DB_SUCCESS or error code. DB_STATS_DO_NOT_EXIST will be returned
+if the persistent stats do not exist. */
+dberr_t
+dict_stats_rename_index(
+/*====================*/
+ const dict_table_t* table, /*!< in: table whose index
+ is renamed */
+ const char* old_index_name, /*!< in: old index name */
+ const char* new_index_name) /*!< in: new index name */
+ __attribute__((warn_unused_result));
/** Save an individual index's statistic into the persistent statistics
storage.
diff --git a/storage/innobase/include/fil0crypt.h b/storage/innobase/include/fil0crypt.h
index 77445fb8a0e..2092e7ae21f 100644
--- a/storage/innobase/include/fil0crypt.h
+++ b/storage/innobase/include/fil0crypt.h
@@ -1,6 +1,6 @@
/*****************************************************************************
Copyright (C) 2013, 2015, Google Inc. All Rights Reserved.
-Copyright (c) 2015, 2018, MariaDB Corporation.
+Copyright (c) 2015, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -26,11 +26,9 @@ Created 04/01/2015 Jan Lindström
#ifndef fil0crypt_h
#define fil0crypt_h
-#ifndef UNIV_INNOCHECKSUM
#include "os0event.h"
#include "my_crypt.h"
#include "fil0fil.h"
-#endif /*! UNIV_INNOCHECKSUM */
/**
* Magic pattern in start of crypt data on page 0
@@ -275,13 +273,11 @@ fil_space_merge_crypt_data(
const fil_space_crypt_t* src);
/** Initialize encryption parameters from a tablespace header page.
-@param[in] page_size page size of the tablespace
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] page first page of the tablespace
@return crypt data from page 0
@retval NULL if not present or not valid */
-UNIV_INTERN
-fil_space_crypt_t*
-fil_space_read_crypt_data(const page_size_t& page_size, const byte* page)
+fil_space_crypt_t* fil_space_read_crypt_data(ulint zip_size, const byte* page)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/**
@@ -307,14 +303,16 @@ fil_parse_write_crypt_data(
MY_ATTRIBUTE((warn_unused_result));
/** Encrypt a buffer.
-@param[in,out] crypt_data Crypt data
-@param[in] space space_id
-@param[in] offset Page offset
-@param[in] lsn Log sequence number
-@param[in] src_frame Page to encrypt
-@param[in] page_size Page size
-@param[in,out] dst_frame Output buffer
+@param[in,out] crypt_data Crypt data
+@param[in] space space_id
+@param[in] offset Page offset
+@param[in] lsn Log sequence number
+@param[in] src_frame Page to encrypt
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in,out] dst_frame Output buffer
+@param[in] use_full_checksum full crc32 algo is used
@return encrypted buffer or NULL */
+UNIV_INTERN
byte*
fil_encrypt_buf(
fil_space_crypt_t* crypt_data,
@@ -322,8 +320,9 @@ fil_encrypt_buf(
ulint offset,
lsn_t lsn,
const byte* src_frame,
- const page_size_t& page_size,
- byte* dst_frame)
+ ulint zip_size,
+ byte* dst_frame,
+ bool use_full_checksum)
MY_ATTRIBUTE((warn_unused_result));
/**
@@ -345,20 +344,24 @@ fil_space_encrypt(
byte* dst_frame)
MY_ATTRIBUTE((warn_unused_result));
-/**
-Decrypt a page.
-@param[in,out] crypt_data crypt_data
+
+/** Decrypt a page.
+@param]in] space_id space id
+@param[in] crypt_data crypt_data
@param[in] tmp_frame Temporary buffer
-@param[in] page_size Page size
+@param[in] physical_size page size
+@param[in] fsp_flags Tablespace flags
@param[in,out] src_frame Page to decrypt
-@param[out] err DB_SUCCESS or error
+@param[out] err DB_SUCCESS or DB_DECRYPTION_FAILED
@return true if page decrypted, false if not.*/
UNIV_INTERN
bool
fil_space_decrypt(
+ ulint space_id,
fil_space_crypt_t* crypt_data,
byte* tmp_frame,
- const page_size_t& page_size,
+ ulint physical_size,
+ ulint fsp_flags,
byte* src_frame,
dberr_t* err);
@@ -379,17 +382,14 @@ fil_space_decrypt(
bool* decrypted)
MY_ATTRIBUTE((warn_unused_result));
-/******************************************************************
+/**
Calculate post encryption checksum
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] dst_frame Block where checksum is calculated
-@return page checksum or BUF_NO_CHECKSUM_MAGIC
+@return page checksum
not needed. */
-UNIV_INTERN
uint32_t
-fil_crypt_calculate_checksum(
- const page_size_t& page_size,
- const byte* dst_frame)
+fil_crypt_calculate_checksum(ulint zip_size, const byte* dst_frame)
MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************
@@ -487,10 +487,9 @@ calculated checksum as if it does page could be valid unencrypted,
encrypted, or corrupted.
@param[in,out] page page frame (checksum is temporarily modified)
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return true if page is encrypted AND OK, false otherwise */
-bool
-fil_space_verify_crypt_checksum(const byte* page, const page_size_t& page_size)
+bool fil_space_verify_crypt_checksum(const byte* page, ulint zip_size)
MY_ATTRIBUTE((warn_unused_result));
#endif /* fil0crypt_h */
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 85526bc763f..bfbe3a3578f 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -27,18 +27,20 @@ Created 10/25/1995 Heikki Tuuri
#ifndef fil0fil_h
#define fil0fil_h
+#include "fsp0types.h"
+
#ifndef UNIV_INNOCHECKSUM
#include "log0recv.h"
#include "dict0types.h"
-#include "page0size.h"
+#ifdef UNIV_LINUX
+# include <set>
+#endif
// Forward declaration
extern my_bool srv_use_doublewrite_buf;
extern struct buf_dblwr_t* buf_dblwr;
class page_id_t;
-struct trx_t;
-class truncate_t;
/** Structure containing encryption specification */
struct fil_space_crypt_t;
@@ -72,8 +74,11 @@ fil_type_is_data(
struct fil_node_t;
+#endif
+
/** Tablespace or log data space */
struct fil_space_t {
+#ifndef UNIV_INNOCHECKSUM
ulint id; /*!< space id */
hash_node_t hash; /*!< hash chain node */
char* name; /*!< Tablespace name */
@@ -102,11 +107,9 @@ struct fil_space_t {
/** whether undo tablespace truncation is in progress */
bool is_being_truncated;
#ifdef UNIV_DEBUG
- ulint redo_skipped_count;
- /*!< reference count for operations who want
- to skip redo log in the file space in order
- to make modify_check() pass.
- Uses my_atomic_loadlint() and friends. */
+ /** reference count for operations who want to skip redo log in the
+ file space in order to make modify_check() pass. */
+ Atomic_counter<ulint> redo_skipped_count;
#endif
fil_type_t purpose;/*!< purpose */
UT_LIST_BASE_NODE_T(fil_node_t) chain;
@@ -124,10 +127,6 @@ struct fil_space_t {
/*!< recovered tablespace size in pages;
0 if no size change was read from the redo log,
or if the size change was implemented */
- ulint flags; /*!< FSP_SPACE_FLAGS and FSP_FLAGS_MEM_ flags;
- see fsp0types.h,
- fsp_flags_is_valid(),
- page_size_t(ulint) (constructor) */
ulint n_reserved_extents;
/*!< number of reserved free extents for
ongoing operations like B-tree page split */
@@ -140,15 +139,15 @@ struct fil_space_t {
dropped. An example is change buffer merge.
The tablespace cannot be dropped while this is nonzero,
or while fil_node_t::n_pending is nonzero.
- Protected by fil_system.mutex and my_atomic_loadlint() and friends. */
- ulint n_pending_ops;
+ Protected by fil_system.mutex and std::atomic. */
+ std::atomic<ulint> n_pending_ops;
/** Number of pending block read or write operations
(when a write is imminent or a read has recently completed).
The tablespace object cannot be freed while this is nonzero,
but it can be detached from fil_system.
Note that fil_node_t::n_pending tracks actual pending I/O requests.
- Protected by fil_system.mutex and my_atomic_loadlint() and friends. */
- ulint n_pending_ios;
+ Protected by fil_system.mutex and std::atomic. */
+ std::atomic<ulint> n_pending_ios;
rw_lock_t latch; /*!< latch protecting the file space storage
allocation */
UT_LIST_NODE_T(fil_space_t) unflushed_spaces;
@@ -247,7 +246,10 @@ struct fil_space_t {
/** Note that the tablespace has been imported.
Initially, purpose=FIL_TYPE_IMPORT so that no redo log is
written while the space ID is being updated in each page. */
- void set_imported();
+ inline void set_imported();
+
+ /** @return whether the storage device is rotational (HDD, not SSD) */
+ inline bool is_rotational() const;
/** Open each file. Only invoked on fil_system.temp_space.
@return whether all files were opened */
@@ -256,38 +258,287 @@ struct fil_space_t {
void close();
/** Acquire a tablespace reference. */
- void acquire() { my_atomic_addlint(&n_pending_ops, 1); }
+ void acquire() { n_pending_ops++; }
/** Release a tablespace reference. */
- void release()
+ void release() { ut_ad(referenced()); n_pending_ops--; }
+ /** @return whether references are being held */
+ bool referenced() const { return n_pending_ops; }
+
+ /** Acquire a tablespace reference for I/O. */
+ void acquire_for_io() { n_pending_ios++; }
+ /** Release a tablespace reference for I/O. */
+ void release_for_io() { ut_ad(pending_io()); n_pending_ios--; }
+ /** @return whether I/O is pending */
+ bool pending_io() const { return n_pending_ios; }
+#endif
+ /** FSP_SPACE_FLAGS and FSP_FLAGS_MEM_ flags;
+ check fsp0types.h to more info about flags. */
+ ulint flags;
+
+ /** Determine if full_crc32 is used for a data file
+ @param[in] flags tablespace flags (FSP_FLAGS)
+ @return whether the full_crc32 algorithm is active */
+ static bool full_crc32(ulint flags) {
+ return flags & FSP_FLAGS_FCRC32_MASK_MARKER;
+ }
+ /** @return whether innodb_checksum_algorithm=full_crc32 is active */
+ bool full_crc32() const { return full_crc32(flags); }
+ /** Determine the logical page size.
+ @param flags tablespace flags (FSP_FLAGS)
+ @return the logical page size
+ @retval 0 if the flags are invalid */
+ static unsigned logical_size(ulint flags) {
+
+ ulint page_ssize = 0;
+
+ if (full_crc32(flags)) {
+ page_ssize = FSP_FLAGS_FCRC32_GET_PAGE_SSIZE(flags);
+ } else {
+ page_ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
+ }
+
+ switch (page_ssize) {
+ case 3: return 4096;
+ case 4: return 8192;
+ case 5:
+ { ut_ad(full_crc32(flags)); return 16384; }
+ case 0:
+ { ut_ad(!full_crc32(flags)); return 16384; }
+ case 6: return 32768;
+ case 7: return 65536;
+ default: return 0;
+ }
+ }
+ /** Determine the ROW_FORMAT=COMPRESSED page size.
+ @param flags tablespace flags (FSP_FLAGS)
+ @return the ROW_FORMAT=COMPRESSED page size
+ @retval 0 if ROW_FORMAT=COMPRESSED is not used */
+ static unsigned zip_size(ulint flags) {
+
+ if (full_crc32(flags)) {
+ return 0;
+ }
+
+ ulint zip_ssize = FSP_FLAGS_GET_ZIP_SSIZE(flags);
+ return zip_ssize
+ ? (UNIV_ZIP_SIZE_MIN >> 1) << zip_ssize : 0;
+ }
+ /** Determine the physical page size.
+ @param flags tablespace flags (FSP_FLAGS)
+ @return the physical page size */
+ static unsigned physical_size(ulint flags) {
+
+ if (full_crc32(flags)) {
+ return logical_size(flags);
+ }
+
+ ulint zip_ssize = FSP_FLAGS_GET_ZIP_SSIZE(flags);
+ return zip_ssize
+ ? (UNIV_ZIP_SIZE_MIN >> 1) << zip_ssize
+ : unsigned(srv_page_size);
+ }
+ /** @return the ROW_FORMAT=COMPRESSED page size
+ @retval 0 if ROW_FORMAT=COMPRESSED is not used */
+ unsigned zip_size() const { return zip_size(flags); }
+ /** @return the physical page size */
+ unsigned physical_size() const { return physical_size(flags); }
+ /** Check whether the compression enabled in tablespace.
+ @param[in] flags tablespace flags */
+ static bool is_compressed(ulint flags) {
+
+ if (full_crc32(flags)) {
+ ulint algo = FSP_FLAGS_FCRC32_GET_COMPRESSED_ALGO(
+ flags);
+ DBUG_ASSERT(algo <= PAGE_ALGORITHM_LAST);
+ return algo > 0;
+ }
+
+ return FSP_FLAGS_HAS_PAGE_COMPRESSION(flags);
+ }
+ /** @return whether the compression enabled for the tablespace. */
+ bool is_compressed() const { return is_compressed(flags); }
+
+ /** Get the compression algorithm for full crc32 format.
+ @param[in] flags tablespace flags
+ @return algorithm type of tablespace */
+ static ulint get_compression_algo(ulint flags)
{
- ut_ad(referenced());
- my_atomic_addlint(&n_pending_ops, ulint(-1));
+ return full_crc32(flags)
+ ? FSP_FLAGS_FCRC32_GET_COMPRESSED_ALGO(flags)
+ : 0;
}
- /** @return whether references are being held */
- bool referenced() { return my_atomic_loadlint(&n_pending_ops); }
- /** @return whether references are being held */
- bool referenced() const
+ /** @return the page_compressed algorithm
+ @retval 0 if not page_compressed */
+ ulint get_compression_algo() const {
+ return fil_space_t::get_compression_algo(flags);
+ }
+ /** Determine if the page_compressed page contains an extra byte
+ for exact compressed stream length
+ @param[in] flags tablespace flags
+ @return whether the extra byte is needed */
+ static bool full_crc32_page_compressed_len(ulint flags)
{
- return const_cast<fil_space_t*>(this)->referenced();
+ DBUG_ASSERT(full_crc32(flags));
+ switch (get_compression_algo(flags)) {
+ case PAGE_LZ4_ALGORITHM:
+ case PAGE_LZO_ALGORITHM:
+ case PAGE_SNAPPY_ALGORITHM:
+ return true;
+ }
+ return false;
}
- /** Acquire a tablespace reference for I/O. */
- void acquire_for_io() { my_atomic_addlint(&n_pending_ios, 1); }
- /** Release a tablespace reference for I/O. */
- void release_for_io()
+ /** Whether the full checksum matches with non full checksum flags.
+ @param[in] flags flags present
+ @param[in] expected expected flags
+ @return true if it is equivalent */
+ static bool is_flags_full_crc32_equal(ulint flags, ulint expected)
{
- ut_ad(pending_io());
- my_atomic_addlint(&n_pending_ios, ulint(-1));
+ ut_ad(full_crc32(flags));
+
+ if (full_crc32(expected)) {
+ return get_compression_algo(flags)
+ == get_compression_algo(expected);
+ }
+
+ ulint page_ssize = FSP_FLAGS_FCRC32_GET_PAGE_SSIZE(flags);
+ ulint space_page_ssize = FSP_FLAGS_GET_PAGE_SSIZE(expected);
+
+ if (page_ssize == 5) {
+ if (space_page_ssize) {
+ return false;
+ }
+ } else if (space_page_ssize != page_ssize) {
+ return false;
+ }
+
+ return is_compressed(expected) == is_compressed(flags);
}
- /** @return whether I/O is pending */
- bool pending_io() { return my_atomic_loadlint(&n_pending_ios); }
- /** @return whether I/O is pending */
- bool pending_io() const
+ /** Whether old tablespace flags match full_crc32 flags.
+ @param[in] flags flags present
+ @param[in] expected expected flags
+ @return true if it is equivalent */
+ static bool is_flags_non_full_crc32_equal(ulint flags, ulint expected)
+ {
+ ut_ad(!full_crc32(flags));
+
+ if (!full_crc32(expected)) {
+ return false;
+ }
+
+ ulint page_ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
+ ulint space_page_ssize = FSP_FLAGS_FCRC32_GET_PAGE_SSIZE(
+ expected);
+
+ if (page_ssize) {
+ if (space_page_ssize != 5) {
+ return false;
+ }
+ } else if (space_page_ssize != page_ssize) {
+ return false;
+ }
+
+ return is_compressed(expected) == is_compressed(flags);
+ }
+ /** Whether both fsp flags are equivalent */
+ static bool is_flags_equal(ulint flags, ulint expected)
+ {
+ if (!((flags ^ expected) & ~(1U << FSP_FLAGS_POS_RESERVED))) {
+ return true;
+ }
+
+ return full_crc32(flags)
+ ? is_flags_full_crc32_equal(flags, expected)
+ : is_flags_non_full_crc32_equal(flags, expected);
+ }
+ /** Validate the tablespace flags for full crc32 format.
+ @param[in] flags the content of FSP_SPACE_FLAGS
+ @return whether the flags are correct in full crc32 format */
+ static bool is_fcrc32_valid_flags(ulint flags)
+ {
+ ut_ad(flags & FSP_FLAGS_FCRC32_MASK_MARKER);
+ const ulint page_ssize = physical_size(flags);
+ if (page_ssize < 3 || page_ssize & 8) {
+ return false;
+ }
+
+ flags >>= FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO;
+
+ return flags <= PAGE_ALGORITHM_LAST;
+ }
+ /** Validate the tablespace flags.
+ @param[in] flags content of FSP_SPACE_FLAGS
+ @param[in] is_ibd whether this is an .ibd file
+ (not system tablespace)
+ @return whether the flags are correct. */
+ static bool is_valid_flags(ulint flags, bool is_ibd)
{
- return const_cast<fil_space_t*>(this)->pending_io();
+ DBUG_EXECUTE_IF("fsp_flags_is_valid_failure",
+ return false;);
+
+ if (full_crc32(flags)) {
+ return is_fcrc32_valid_flags(flags);
+ }
+
+ if (flags == 0) {
+ return true;
+ }
+
+ if (flags & ~FSP_FLAGS_MASK) {
+ return false;
+ }
+
+ if ((flags & (FSP_FLAGS_MASK_POST_ANTELOPE
+ | FSP_FLAGS_MASK_ATOMIC_BLOBS))
+ == FSP_FLAGS_MASK_ATOMIC_BLOBS) {
+ /* If the "atomic blobs" flag (indicating
+ ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED) flag
+ is set, then the "post Antelope"
+ (ROW_FORMAT!=REDUNDANT) flag must also be set. */
+ return false;
+ }
+
+ /* Bits 10..14 should be 0b0000d where d is the DATA_DIR flag
+ of MySQL 5.6 and MariaDB 10.0, which we ignore.
+ In the buggy FSP_SPACE_FLAGS written by MariaDB 10.1.0 to 10.1.20,
+ bits 10..14 would be nonzero 0bsssaa where sss is
+ nonzero PAGE_SSIZE (3, 4, 6, or 7)
+ and aa is ATOMIC_WRITES (not 0b11). */
+ if (FSP_FLAGS_GET_RESERVED(flags) & ~1U) {
+ return false;
+ }
+
+ const ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
+ if (ssize == 1 || ssize == 2 || ssize == 5 || ssize & 8) {
+ /* the page_size is not between 4k and 64k;
+ 16k should be encoded as 0, not 5 */
+ return false;
+ }
+
+ const ulint zssize = FSP_FLAGS_GET_ZIP_SSIZE(flags);
+ if (zssize == 0) {
+ /* not ROW_FORMAT=COMPRESSED */
+ } else if (zssize > (ssize ? ssize : 5)) {
+ /* Invalid KEY_BLOCK_SIZE */
+ return false;
+ } else if (~flags & (FSP_FLAGS_MASK_POST_ANTELOPE
+ | FSP_FLAGS_MASK_ATOMIC_BLOBS)) {
+ /* both these flags should be set for
+ ROW_FORMAT=COMPRESSED */
+ return false;
+ }
+
+ /* The flags do look valid. But, avoid misinterpreting
+ buggy MariaDB 10.1 format flags for
+ PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL={0,2,3}
+ as valid-looking PAGE_SSIZE if this is known to be
+ an .ibd file and we are using the default innodb_page_size=16k. */
+ return(ssize == 0 || !is_ibd
+ || srv_page_size != UNIV_PAGE_SIZE_ORIG);
}
};
+#ifndef UNIV_INNOCHECKSUM
/** Value of fil_space_t::magic_n */
#define FIL_SPACE_MAGIC_N 89472
@@ -301,6 +552,8 @@ struct fil_node_t {
pfs_os_file_t handle;
/** whether the file actually is a raw device or disk partition */
bool is_raw_disk;
+ /** whether the file is on non-rotational media (SSD) */
+ bool on_ssd;
/** size of the file in database pages (0 if not known yet);
the possible last incomplete megabyte may be ignored
if space->id == 0 */
@@ -343,6 +596,14 @@ struct fil_node_t {
@return whether the page was found valid */
bool read_page0(bool first);
+ /** Determine some file metadata when creating or reading the file.
+ @param file the file that is being created, or OS_FILE_CLOSED */
+ void find_metadata(os_file_t file = OS_FILE_CLOSED
+#ifdef UNIV_LINUX
+ , struct stat* statbuf = NULL
+#endif
+ );
+
/** Close the file handle. */
void close();
};
@@ -350,6 +611,24 @@ struct fil_node_t {
/** Value of fil_node_t::magic_n */
#define FIL_NODE_MAGIC_N 89389
+inline void fil_space_t::set_imported()
+{
+ ut_ad(purpose == FIL_TYPE_IMPORT);
+ purpose = FIL_TYPE_TABLESPACE;
+ UT_LIST_GET_FIRST(chain)->find_metadata();
+}
+
+inline bool fil_space_t::is_rotational() const
+{
+ for (const fil_node_t* node = UT_LIST_GET_FIRST(chain);
+ node != NULL; node = UT_LIST_GET_NEXT(chain, node)) {
+ if (!node->on_ssd) {
+ return true;
+ }
+ }
+ return false;
+}
+
/** Common InnoDB file extentions */
enum ib_extention {
NO_EXT = 0,
@@ -388,19 +667,12 @@ typedef byte fil_faddr_t; /*!< 'type' definition in C: an address
#define FIL_ADDR_BYTE 4U /* then comes 2-byte byte offset within page*/
#define FIL_ADDR_SIZE 6U /* address size is 6 bytes */
-#ifndef UNIV_INNOCHECKSUM
-
/** File space address */
struct fil_addr_t {
ulint page; /*!< page number within a space */
ulint boffset; /*!< byte offset within the page */
};
-/** The null file address */
-extern const fil_addr_t fil_addr_null;
-
-#endif /* !UNIV_INNOCHECKSUM */
-
/** The byte offsets on a file page for various variables @{ */
#define FIL_PAGE_SPACE_OR_CHKSUM 0 /*!< in < MySQL-4.0.14 space id the
page belongs to (== 0) but in later
@@ -441,19 +713,19 @@ extern const fil_addr_t fil_addr_null;
MySQL/InnoDB 5.1.7 or later, the
contents of this field is valid
for all uncompressed pages. */
-#define FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION 26U /*!< for the first page
- in a system tablespace data file
- (ibdata*, not *.ibd): the file has
- been flushed to disk at least up
- to this lsn
- for other pages: a 32-bit key version
- used to encrypt the page + 32-bit checksum
- or 64 bits of zero if no encryption
- */
+
+/** For the first page in a system tablespace data file(ibdata*, not *.ibd):
+the file has been flushed to disk at least up to this lsn
+For other pages: 32-bit key version used to encrypt the page + 32-bit checksum
+or 64 bites of zero if no encryption */
+#define FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION 26U
/** This overloads FIL_PAGE_FILE_FLUSH_LSN for RTREE Split Sequence Number */
#define FIL_RTREE_SPLIT_SEQ_NUM FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
+/** Start of the page_compressed content */
+#define FIL_PAGE_COMP_ALGO FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
+
/** starting from 4.1.x this contains the space id of the page */
#define FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID 34U
@@ -461,25 +733,45 @@ extern const fil_addr_t fil_addr_null;
#define FIL_PAGE_DATA 38U /*!< start of the data on the page */
-/* Following are used when page compression is used */
-#define FIL_PAGE_COMPRESSED_SIZE 2 /*!< Number of bytes used to store
- actual payload data size on
- compressed pages. */
-#define FIL_PAGE_COMPRESSION_METHOD_SIZE 2
- /*!< Number of bytes used to store
- actual compression method. */
+/** 32-bit key version used to encrypt the page in full_crc32 format.
+For non-encrypted page, it contains 0. */
+#define FIL_PAGE_FCRC32_KEY_VERSION 0
+
+/** page_compressed without innodb_checksum_algorithm=full_crc32 @{ */
+/** Number of bytes used to store actual payload data size on
+page_compressed pages when not using full_crc32. */
+#define FIL_PAGE_COMP_SIZE 0
+
+/** Number of bytes for FIL_PAGE_COMP_SIZE */
+#define FIL_PAGE_COMP_METADATA_LEN 2
+
+/** Number of bytes used to store actual compression method
+for encrypted tables when not using full_crc32. */
+#define FIL_PAGE_ENCRYPT_COMP_ALGO 2
+
+/** Extra header size for encrypted page_compressed pages when
+not using full_crc32 */
+#define FIL_PAGE_ENCRYPT_COMP_METADATA_LEN 4
/* @} */
+
/** File page trailer @{ */
#define FIL_PAGE_END_LSN_OLD_CHKSUM 8 /*!< the low 4 bytes of this are used
to store the page checksum, the
last 4 bytes should be identical
to the last 4 bytes of FIL_PAGE_LSN */
#define FIL_PAGE_DATA_END 8 /*!< size of the page trailer */
+
+/** Store the last 4 bytes of FIL_PAGE_LSN */
+#define FIL_PAGE_FCRC32_END_LSN 8
+
+/** Store crc32 checksum at the end of the page */
+#define FIL_PAGE_FCRC32_CHECKSUM 4
/* @} */
/** File page types (values of FIL_PAGE_TYPE) @{ */
-#define FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED 37401 /*!< Page is compressed and
- then encrypted */
+/** page_compressed, encrypted=YES (not used for full_crc32) */
+#define FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED 37401
+/** page_compressed (not used for full_crc32) */
#define FIL_PAGE_PAGE_COMPRESSED 34354 /*!< page compressed page */
#define FIL_PAGE_INDEX 17855 /*!< B-tree node */
#define FIL_PAGE_RTREE 17854 /*!< R-tree node (SPATIAL INDEX) */
@@ -512,6 +804,12 @@ extern const fil_addr_t fil_addr_null;
Note: FIL_PAGE_TYPE_INSTANT maps to the same as FIL_PAGE_INDEX. */
#define FIL_PAGE_TYPE_LAST FIL_PAGE_TYPE_UNKNOWN
/*!< Last page type */
+/** Set in FIL_PAGE_TYPE if for full_crc32 pages in page_compressed format.
+If the flag is set, then the following holds for the remaining bits
+of FIL_PAGE_TYPE:
+Bits 0..7 will contain the compressed page size in bytes.
+Bits 8..14 are reserved and must be 0. */
+#define FIL_PAGE_COMPRESS_FCRC32_MARKER 15
/* @} */
/** @return whether the page type is B-tree or R-tree index */
@@ -598,6 +896,22 @@ struct fil_system_t {
private:
bool m_initialised;
+#ifdef UNIV_LINUX
+ /** available block devices that reside on non-rotational storage */
+ std::vector<dev_t> ssd;
+public:
+ /** @return whether a file system device is on non-rotational storage */
+ bool is_ssd(dev_t dev) const
+ {
+ /* Linux seems to allow up to 15 partitions per block device.
+ If the detected ssd carries "partition number 0" (it is the whole device),
+ compare the candidate file system number without the partition number. */
+ for (const auto s : ssd)
+ if (dev == s || (dev & ~15U) == s)
+ return true;
+ return false;
+ }
+#endif
public:
ib_mutex_t mutex; /*!< The mutex protecting the cache */
fil_space_t* sys_space; /*!< The innodb_system tablespace */
@@ -731,16 +1045,6 @@ fil_space_get_flags(
/*================*/
ulint id); /*!< in: space id */
-/** Returns the page size of the space and whether it is compressed or not.
-The tablespace must be cached in the memory cache.
-@param[in] id space id
-@param[out] found true if tablespace was found
-@return page size */
-const page_size_t
-fil_space_get_page_size(
- ulint id,
- bool* found);
-
/*******************************************************************//**
Opens all log files and system tablespace data files. They stay open until the
database server shutdown. This should be called at a server startup after the
@@ -1072,7 +1376,7 @@ fil_space_extend(
@param[in] type IO context
@param[in] sync true if synchronous aio is desired
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] byte_offset remainder of offset in bytes; in aio this
must be divisible by the OS block size
@param[in] len how many bytes to read or write; this must
@@ -1084,14 +1388,14 @@ fil_space_extend(
@param[in] message message for aio handler if non-sync aio
used, else ignored
@param[in] ignore_missing_space true=ignore missing space during read
-@return DB_SUCCESS, DB_TABLESPACE_DELETED or DB_TABLESPACE_TRUNCATED
+@return DB_SUCCESS, or DB_TABLESPACE_DELETED
if we are trying to do i/o on a tablespace which does not exist */
dberr_t
fil_io(
const IORequest& type,
bool sync,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
ulint byte_offset,
ulint len,
void* buf,
diff --git a/storage/innobase/include/fil0fil.ic b/storage/innobase/include/fil0fil.ic
index 2a7d06e243f..b70358e83d1 100644
--- a/storage/innobase/include/fil0fil.ic
+++ b/storage/innobase/include/fil0fil.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2015, 2018, MariaDB Corporation.
+Copyright (c) 2015, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -75,17 +75,25 @@ fil_get_page_type_name(
}
}
-/****************************************************************//**
-Validate page type.
+#ifdef UNIV_DEBUG
+/** Validate page type.
+@param[in] space Tablespace object
+@param[in] page page to validate
@return true if valid, false if not */
UNIV_INLINE
bool
fil_page_type_validate(
- const byte* page) /*!< in: page */
+ fil_space_t* space,
+ const byte* page)
{
-#ifdef UNIV_DEBUG
ulint page_type = mach_read_from_2(page + FIL_PAGE_TYPE);
+ if ((page_type & 1U << FIL_PAGE_COMPRESS_FCRC32_MARKER)
+ && space->full_crc32()
+ && space->is_compressed()) {
+ return true;
+ }
+
/* Validate page type */
if (!((page_type == FIL_PAGE_PAGE_COMPRESSED ||
page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED ||
@@ -106,25 +114,31 @@ fil_page_type_validate(
page_type == FIL_PAGE_TYPE_ZBLOB2 ||
page_type == FIL_PAGE_TYPE_UNKNOWN))) {
- ulint space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+ ulint space_id = mach_read_from_4(
+ page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+
ulint offset = mach_read_from_4(page + FIL_PAGE_OFFSET);
- fil_system_enter();
- fil_space_t* rspace = fil_space_get_by_id(space);
- fil_system_exit();
+
+ ulint key_version = mach_read_from_4(
+ page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION);
+
+ if (space && space->full_crc32()) {
+ key_version = mach_read_from_4(
+ page + FIL_PAGE_FCRC32_KEY_VERSION);
+ }
/* Dump out the page info */
- ib::fatal() << "Page " << space << ":" << offset
- << " name " << (rspace ? rspace->name : "???")
+ ib::fatal() << "Page " << space_id << ":" << offset
+ << " name " << (space ? space->name : "???")
<< " page_type " << page_type
- << " key_version "
- << mach_read_from_4(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION)
+ << " key_version " << key_version
<< " lsn " << mach_read_from_8(page + FIL_PAGE_LSN)
<< " compressed_len " << mach_read_from_2(page + FIL_PAGE_DATA);
return false;
}
-#endif /* UNIV_DEBUG */
return true;
}
+#endif /* UNIV_DEBUG */
#endif /* fil0fil_ic */
diff --git a/storage/innobase/include/fil0pagecompress.h b/storage/innobase/include/fil0pagecompress.h
index 1046d720102..9cd91b323c5 100644
--- a/storage/innobase/include/fil0pagecompress.h
+++ b/storage/innobase/include/fil0pagecompress.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (C) 2013, 2018 MariaDB Corporation.
+Copyright (C) 2013, 2019 MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,21 +33,29 @@ Created 11/12/2013 Jan Lindström jan.lindstrom@skysql.com
/** Compress a page_compressed page before writing to a data file.
@param[in] buf page to be compressed
@param[out] out_buf compressed page
-@param[in] level compression level
+@param[in] flags tablespace flags
@param[in] block_size file system block size
@param[in] encrypted whether the page will be subsequently encrypted
@return actual length of compressed page
@retval 0 if the page was not compressed */
-ulint fil_page_compress(const byte* buf, byte* out_buf, ulint level,
- ulint block_size, bool encrypted)
+ulint fil_page_compress(
+ const byte* buf,
+ byte* out_buf,
+ ulint flags,
+ ulint block_size,
+ bool encrypted)
MY_ATTRIBUTE((nonnull, warn_unused_result));
/** Decompress a page that may be subject to page_compressed compression.
@param[in,out] tmp_buf temporary buffer (of innodb_page_size)
@param[in,out] buf compressed page buffer
+@param[in] flags talespace flags
@return size of the compressed data
@retval 0 if decompression failed
@retval srv_page_size if the page was not compressed */
-ulint fil_page_decompress(byte* tmp_buf, byte* buf)
+ulint fil_page_decompress(
+ byte* tmp_buf,
+ byte* buf,
+ ulint flags)
MY_ATTRIBUTE((nonnull, warn_unused_result));
#endif
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 5709a0bb11b..897d9d29d63 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -28,15 +28,15 @@ Created 12/18/1995 Heikki Tuuri
#define fsp0fsp_h
#include "fsp0types.h"
+#include "fut0lst.h"
+#include "ut0byte.h"
#ifndef UNIV_INNOCHECKSUM
-
-#include "fut0lst.h"
#include "mtr0mtr.h"
#include "page0types.h"
#include "rem0types.h"
-#include "ut0byte.h"
-
+#else
+# include "mach0data.h"
#endif /* !UNIV_INNOCHECKSUM */
/** @return the PAGE_SSIZE flags for the current innodb_page_size */
@@ -45,6 +45,12 @@ Created 12/18/1995 Heikki Tuuri
0U : (srv_page_size_shift - UNIV_ZIP_SIZE_SHIFT_MIN + 1) \
<< FSP_FLAGS_POS_PAGE_SSIZE)
+/** @return the PAGE_SSIZE flags for the current innodb_page_size in
+full checksum format */
+#define FSP_FLAGS_FCRC32_PAGE_SSIZE() \
+ ((srv_page_size_shift - UNIV_ZIP_SIZE_SHIFT_MIN + 1) \
+ << FSP_FLAGS_FCRC32_POS_PAGE_SSIZE)
+
/* @defgroup Compatibility macros for MariaDB 10.1.0 through 10.1.20;
see the table in fsp0types.h @{ */
/** Zero relative shift position of the PAGE_COMPRESSION field */
@@ -201,11 +207,6 @@ typedef byte fseg_inode_t;
(16 + 3 * FLST_BASE_NODE_SIZE \
+ FSEG_FRAG_ARR_N_SLOTS * FSEG_FRAG_SLOT_SIZE)
-#define FSP_SEG_INODES_PER_PAGE(page_size) \
- ((page_size.physical() - FSEG_ARR_OFFSET - 10) / FSEG_INODE_SIZE)
- /* Number of segment inodes which fit on a
- single page */
-
#define FSEG_MAGIC_N_VALUE 97937874
#define FSEG_FILLFACTOR 8 /* If this value is x, then if
@@ -290,33 +291,6 @@ the extent are free and which contain old tuple version to clean. */
#ifndef UNIV_INNOCHECKSUM
/* @} */
-/** Calculate the number of pages to extend a datafile.
-We extend single-table tablespaces first one extent at a time,
-but 4 at a time for bigger tablespaces. It is not enough to extend always
-by one extent, because we need to add at least one extent to FSP_FREE.
-A single extent descriptor page will track many extents. And the extent
-that uses its extent descriptor page is put onto the FSP_FREE_FRAG list.
-Extents that do not use their extent descriptor page are added to FSP_FREE.
-The physical page size is used to determine how many extents are tracked
-on one extent descriptor page. See xdes_calc_descriptor_page().
-@param[in] page_size page_size of the datafile
-@param[in] size current number of pages in the datafile
-@return number of pages to extend the file. */
-ulint
-fsp_get_pages_to_extend_ibd(
- const page_size_t& page_size,
- ulint size);
-
-/** Calculate the number of physical pages in an extent for this file.
-@param[in] page_size page_size of the datafile
-@return number of pages in an extent for this file. */
-UNIV_INLINE
-ulint
-fsp_get_extent_size_in_pages(const page_size_t& page_size)
-{
- return (FSP_EXTENT_SIZE << srv_page_size_shift) / page_size.physical();
-}
-
/**********************************************************************//**
Reads the space id from the first page of a tablespace.
@return space id, ULINT UNDEFINED if error */
@@ -347,13 +321,15 @@ fsp_header_get_flags(const page_t* page)
}
/** Get the byte offset of encryption information in page 0.
-@param[in] ps page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return byte offset relative to FSP_HEADER_OFFSET */
inline MY_ATTRIBUTE((pure, warn_unused_result))
-ulint
-fsp_header_get_encryption_offset(const page_size_t& ps)
+ulint fsp_header_get_encryption_offset(ulint zip_size)
{
- return XDES_ARR_OFFSET + XDES_SIZE * ps.physical() / FSP_EXTENT_SIZE;
+ return zip_size
+ ? XDES_ARR_OFFSET + XDES_SIZE * zip_size / FSP_EXTENT_SIZE
+ : XDES_ARR_OFFSET + (XDES_SIZE << srv_page_size_shift)
+ / FSP_EXTENT_SIZE;
}
/** Check the encryption key from the first page of a tablespace.
@@ -514,6 +490,7 @@ fsp_reserve_free_extents(
@param[in] offset page number
@param[in] ahi whether we may need to drop the adaptive
hash index
+@param[in] log whether to write MLOG_INIT_FREE_PAGE record
@param[in,out] mtr mini-transaction */
void
fseg_free_page_func(
@@ -523,13 +500,14 @@ fseg_free_page_func(
#ifdef BTR_CUR_HASH_ADAPT
bool ahi,
#endif /* BTR_CUR_HASH_ADAPT */
+ bool log,
mtr_t* mtr);
#ifdef BTR_CUR_HASH_ADAPT
-# define fseg_free_page(header, space, offset, ahi, mtr) \
- fseg_free_page_func(header, space, offset, ahi, mtr)
+# define fseg_free_page(header, space, offset, ahi, log, mtr) \
+ fseg_free_page_func(header, space, offset, ahi, log, mtr)
#else /* BTR_CUR_HASH_ADAPT */
-# define fseg_free_page(header, space, offset, ahi, mtr) \
- fseg_free_page_func(header, space, offset, mtr)
+# define fseg_free_page(header, space, offset, ahi, log, mtr) \
+ fseg_free_page_func(header, space, offset, log, mtr)
#endif /* BTR_CUR_HASH_ADAPT */
/** Determine whether a page is free.
@param[in,out] space tablespace
@@ -623,13 +601,12 @@ fil_block_check_type(
/** Checks if a page address is an extent descriptor page address.
@param[in] page_id page id
-@param[in] page_size page size
-@return TRUE if a descriptor page */
-UNIV_INLINE
-ibool
-fsp_descr_page(
- const page_id_t page_id,
- const page_size_t& page_size);
+@param[in] physical_size page size
+@return whether a descriptor page */
+inline bool fsp_descr_page(const page_id_t page_id, ulint physical_size)
+{
+ return (page_id.page_no() & (physical_size - 1)) == FSP_XDES_OFFSET;
+}
/** Initialize a file page whose prior contents should be ignored.
@param[in,out] block buffer pool block */
@@ -676,7 +653,7 @@ fsp_flags_convert_from_101(ulint flags)
{
DBUG_EXECUTE_IF("fsp_flags_is_valid_failure",
return(ULINT_UNDEFINED););
- if (flags == 0) {
+ if (flags == 0 || fil_space_t::full_crc32(flags)) {
return(flags);
}
@@ -771,7 +748,7 @@ fsp_flags_convert_from_101(ulint flags)
flags = ((flags & 0x3f) | ssize << FSP_FLAGS_POS_PAGE_SSIZE
| FSP_FLAGS_GET_PAGE_COMPRESSION_MARIADB101(flags)
<< FSP_FLAGS_POS_PAGE_COMPRESSION);
- ut_ad(fsp_flags_is_valid(flags, false));
+ ut_ad(fil_space_t::is_valid_flags(flags, false));
return(flags);
}
@@ -785,7 +762,7 @@ bool
fsp_flags_match(ulint expected, ulint actual)
{
expected &= ~FSP_FLAGS_MEM_MASK;
- ut_ad(fsp_flags_is_valid(expected, false));
+ ut_ad(fil_space_t::is_valid_flags(expected, false));
if (actual == expected) {
return(true);
@@ -795,16 +772,6 @@ fsp_flags_match(ulint expected, ulint actual)
return(actual == expected);
}
-/** Calculates the descriptor index within a descriptor page.
-@param[in] page_size page size
-@param[in] offset page offset
-@return descriptor index */
-UNIV_INLINE
-ulint
-xdes_calc_descriptor_index(
- const page_size_t& page_size,
- ulint offset);
-
/**********************************************************************//**
Gets a descriptor bit of a page.
@return TRUE if free */
@@ -817,15 +784,42 @@ xdes_get_bit(
ulint offset);/*!< in: page offset within extent:
0 ... FSP_EXTENT_SIZE - 1 */
-/** Calculates the page where the descriptor of a page resides.
-@param[in] page_size page size
+/** Determine the descriptor index within a descriptor page.
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] offset page offset
+@return descriptor index */
+inline ulint xdes_calc_descriptor_index(ulint zip_size, ulint offset)
+{
+ return ut_2pow_remainder<ulint>(offset,
+ zip_size ? zip_size : srv_page_size)
+ / FSP_EXTENT_SIZE;
+}
+
+/** Determine the descriptor page number for a page.
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] offset page offset
@return descriptor page offset */
-UNIV_INLINE
-ulint
-xdes_calc_descriptor_page(
- const page_size_t& page_size,
- ulint offset);
+inline ulint xdes_calc_descriptor_page(ulint zip_size, ulint offset)
+{
+ compile_time_assert(UNIV_PAGE_SIZE_MAX > XDES_ARR_OFFSET
+ + (UNIV_PAGE_SIZE_MAX / FSP_EXTENT_SIZE_MAX)
+ * XDES_SIZE_MAX);
+ compile_time_assert(UNIV_PAGE_SIZE_MIN > XDES_ARR_OFFSET
+ + (UNIV_PAGE_SIZE_MIN / FSP_EXTENT_SIZE_MIN)
+ * XDES_SIZE_MIN);
+
+ ut_ad(srv_page_size > XDES_ARR_OFFSET
+ + (srv_page_size / FSP_EXTENT_SIZE)
+ * XDES_SIZE);
+ ut_ad(UNIV_ZIP_SIZE_MIN > XDES_ARR_OFFSET
+ + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE)
+ * XDES_SIZE);
+ ut_ad(!zip_size
+ || zip_size > XDES_ARR_OFFSET
+ + (zip_size / FSP_EXTENT_SIZE) * XDES_SIZE);
+ return ut_2pow_round<ulint>(offset,
+ zip_size ? zip_size : srv_page_size);
+}
#endif /* UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/fsp0fsp.ic b/storage/innobase/include/fsp0fsp.ic
index 3258704615a..5977a954aea 100644
--- a/storage/innobase/include/fsp0fsp.ic
+++ b/storage/innobase/include/fsp0fsp.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2013, 2017, MariaDB Corporation.
+Copyright (c) 2013, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,37 +24,6 @@ File space management
Created 12/18/1995 Heikki Tuuri
*******************************************************/
-#ifndef UNIV_INNOCHECKSUM
-
-/** Checks if a page address is an extent descriptor page address.
-@param[in] page_id page id
-@param[in] page_size page size
-@return TRUE if a descriptor page */
-UNIV_INLINE
-ibool
-fsp_descr_page(
- const page_id_t page_id,
- const page_size_t& page_size)
-{
- return((page_id.page_no() & (page_size.physical() - 1))
- == FSP_XDES_OFFSET);
-}
-
-/** Calculates the descriptor index within a descriptor page.
-@param[in] page_size page size
-@param[in] offset page offset
-@return descriptor index */
-UNIV_INLINE
-ulint
-xdes_calc_descriptor_index(
- const page_size_t& page_size,
- ulint offset)
-{
- return(ut_2pow_remainder(offset, page_size.physical())
- / FSP_EXTENT_SIZE);
-}
-#endif /*!UNIV_INNOCHECKSUM */
-
/**********************************************************************//**
Gets a descriptor bit of a page.
@return TRUE if free */
@@ -80,39 +49,3 @@ xdes_get_bit(
MLOG_1BYTE),
bit_index));
}
-
-#ifndef UNIV_INNOCHECKSUM
-/** Calculates the page where the descriptor of a page resides.
-@param[in] page_size page size
-@param[in] offset page offset
-@return descriptor page offset */
-UNIV_INLINE
-ulint
-xdes_calc_descriptor_page(
- const page_size_t& page_size,
- ulint offset)
-{
- compile_time_assert(UNIV_PAGE_SIZE_MAX > XDES_ARR_OFFSET
- + (UNIV_PAGE_SIZE_MAX / FSP_EXTENT_SIZE_MAX)
- * XDES_SIZE_MAX);
- compile_time_assert(UNIV_PAGE_SIZE_MIN > XDES_ARR_OFFSET
- + (UNIV_PAGE_SIZE_MIN / FSP_EXTENT_SIZE_MIN)
- * XDES_SIZE_MIN);
-
- ut_ad(srv_page_size > XDES_ARR_OFFSET
- + (srv_page_size / FSP_EXTENT_SIZE)
- * XDES_SIZE);
- ut_ad(UNIV_ZIP_SIZE_MIN > XDES_ARR_OFFSET
- + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE)
- * XDES_SIZE);
-
-#ifdef UNIV_DEBUG
- if (page_size.is_compressed()) {
- ut_a(page_size.physical() > XDES_ARR_OFFSET
- + (page_size.physical() / FSP_EXTENT_SIZE) * XDES_SIZE);
- }
-#endif /* UNIV_DEBUG */
-
- return(ut_2pow_round(offset, page_size.physical()));
-}
-#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/fsp0pagecompress.h b/storage/innobase/include/fsp0pagecompress.h
index a5c76737b3a..7463fe396e4 100644
--- a/storage/innobase/include/fsp0pagecompress.h
+++ b/storage/innobase/include/fsp0pagecompress.h
@@ -27,17 +27,6 @@ Created 11/12/2013 Jan Lindström jan.lindstrom@skysql.com
#ifndef fsp0pagecompress_h
#define fsp0pagecompress_h
-/* Supported page compression methods */
-
-#define PAGE_UNCOMPRESSED 0
-#define PAGE_ZLIB_ALGORITHM 1
-#define PAGE_LZ4_ALGORITHM 2
-#define PAGE_LZO_ALGORITHM 3
-#define PAGE_LZMA_ALGORITHM 4
-#define PAGE_BZIP2_ALGORITHM 5
-#define PAGE_SNAPPY_ALGORITHM 6
-#define PAGE_ALGORITHM_LAST PAGE_SNAPPY_ALGORITHM
-
/**********************************************************************//**
Reads the page compression level from the first page of a tablespace.
@return page compression level, or 0 if uncompressed */
diff --git a/storage/innobase/include/fsp0space.h b/storage/innobase/include/fsp0space.h
index 88e5bb583de..f50559973c6 100644
--- a/storage/innobase/include/fsp0space.h
+++ b/storage/innobase/include/fsp0space.h
@@ -127,7 +127,7 @@ public:
@param[in] fsp_flags tablespace flags */
void set_flags(ulint fsp_flags)
{
- ut_ad(fsp_flags_is_valid(fsp_flags, false));
+ ut_ad(fil_space_t::is_valid_flags(fsp_flags, false));
m_flags = fsp_flags;
}
diff --git a/storage/innobase/include/fsp0types.h b/storage/innobase/include/fsp0types.h
index 0f965261ca8..ee0259e5f2d 100644
--- a/storage/innobase/include/fsp0types.h
+++ b/storage/innobase/include/fsp0types.h
@@ -27,10 +27,6 @@ Created May 26, 2009 Vasil Dimov
#ifndef fsp0types_h
#define fsp0types_h
-#include "univ.i"
-
-#ifndef UNIV_INNOCHECKSUM
-
/** The fil_space_t::id of the redo log. All persistent tablespaces
have a smaller fil_space_t::id. */
#define SRV_LOG_SPACE_FIRST_ID 0xFFFFFFF0U
@@ -39,6 +35,16 @@ have a smaller fil_space_t::id. */
#include "ut0byte.h"
+/* Possible values of innodb_compression_algorithm */
+#define PAGE_UNCOMPRESSED 0
+#define PAGE_ZLIB_ALGORITHM 1
+#define PAGE_LZ4_ALGORITHM 2
+#define PAGE_LZO_ALGORITHM 3
+#define PAGE_LZMA_ALGORITHM 4
+#define PAGE_BZIP2_ALGORITHM 5
+#define PAGE_SNAPPY_ALGORITHM 6
+#define PAGE_ALGORITHM_LAST PAGE_SNAPPY_ALGORITHM
+
/** @name Flags for inserting records in order
If records are inserted in order, there are the following
flags to tell this (their type is made byte for the compiler
@@ -50,7 +56,6 @@ fseg_alloc_free_page) */
#define FSP_NO_DIR ((byte)113) /*!< no order */
/* @} */
-#endif /* !UNIV_INNOCHECKSUM */
/** File space extent size in pages
page size | file space extent size
----------+-----------------------
@@ -73,7 +78,6 @@ page size | file space extent size
offset */
#define FSEG_PAGE_DATA FIL_PAGE_DATA
-#ifndef UNIV_INNOCHECKSUM
/** @name File segment header
The file segment header points to the inode describing the file segment. */
/* @{ */
@@ -88,6 +92,7 @@ typedef byte fseg_header_t;
header, in bytes */
/* @} */
+#ifndef UNIV_INNOCHECKSUM
#ifdef UNIV_DEBUG
struct mtr_t;
@@ -224,6 +229,15 @@ to ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT. */
/** A mask of all the known/used bits in FSP_SPACE_FLAGS */
#define FSP_FLAGS_MASK (~(~0U << FSP_FLAGS_WIDTH))
+/** Number of flag bits used to indicate the tablespace page size */
+#define FSP_FLAGS_FCRC32_WIDTH_PAGE_SSIZE 4
+
+/** Marker to indicate whether tablespace is in full checksum format. */
+#define FSP_FLAGS_FCRC32_WIDTH_MARKER 1
+
+/** Stores the compressed algo for full checksum format. */
+#define FSP_FLAGS_FCRC32_WIDTH_COMPRESSED_ALGO 3
+
/* FSP_SPACE_FLAGS position and name in MySQL 5.6/MariaDB 10.0 or older
and MariaDB 10.1.20 or older MariaDB 10.1 and in MariaDB 10.1.21
or newer.
@@ -286,6 +300,19 @@ these are only used in MySQL 5.7 and used for compatibility. */
#define FSP_FLAGS_POS_PAGE_COMPRESSION (FSP_FLAGS_POS_RESERVED \
+ FSP_FLAGS_WIDTH_RESERVED)
+/** Zero relative shift position of the PAGE_SIZE field
+in full crc32 format */
+#define FSP_FLAGS_FCRC32_POS_PAGE_SSIZE 0
+
+/** Zero relative shift position of the MARKER field in full crc32 format. */
+#define FSP_FLAGS_FCRC32_POS_MARKER (FSP_FLAGS_FCRC32_POS_PAGE_SSIZE \
+ + FSP_FLAGS_FCRC32_WIDTH_PAGE_SSIZE)
+
+/** Zero relative shift position of the compressed algorithm stored
+in full crc32 format. */
+#define FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO (FSP_FLAGS_FCRC32_POS_MARKER \
+ + FSP_FLAGS_FCRC32_WIDTH_MARKER)
+
/** Bit mask of the POST_ANTELOPE field */
#define FSP_FLAGS_MASK_POST_ANTELOPE \
((~(~0U << FSP_FLAGS_WIDTH_POST_ANTELOPE)) \
@@ -315,6 +342,21 @@ these are only used in MySQL 5.7 and used for compatibility. */
#define FSP_FLAGS_MASK_MEM_COMPRESSION_LEVEL \
(15U << FSP_FLAGS_MEM_COMPRESSION_LEVEL)
+/** Bit mask of the PAGE_SIZE field in full crc32 format */
+#define FSP_FLAGS_FCRC32_MASK_PAGE_SSIZE \
+ ((~(~0U << FSP_FLAGS_FCRC32_WIDTH_PAGE_SSIZE)) \
+ << FSP_FLAGS_FCRC32_POS_PAGE_SSIZE)
+
+/** Bit mask of the MARKER field in full crc32 format */
+#define FSP_FLAGS_FCRC32_MASK_MARKER \
+ ((~(~0U << FSP_FLAGS_FCRC32_WIDTH_MARKER)) \
+ << FSP_FLAGS_FCRC32_POS_MARKER)
+
+/** Bit mask of the COMPRESSED ALGO field in full crc32 format */
+#define FSP_FLAGS_FCRC32_MASK_COMPRESSED_ALGO \
+ ((~(~0U << FSP_FLAGS_FCRC32_WIDTH_COMPRESSED_ALGO)) \
+ << FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO)
+
/** Return the value of the POST_ANTELOPE field */
#define FSP_FLAGS_GET_POST_ANTELOPE(flags) \
((flags & FSP_FLAGS_MASK_POST_ANTELOPE) \
@@ -339,10 +381,14 @@ these are only used in MySQL 5.7 and used for compatibility. */
#define FSP_FLAGS_HAS_PAGE_COMPRESSION(flags) \
((flags & FSP_FLAGS_MASK_PAGE_COMPRESSION) \
>> FSP_FLAGS_POS_PAGE_COMPRESSION)
-
-/** Return the contents of the UNUSED bits */
-#define FSP_FLAGS_GET_UNUSED(flags) \
- (flags >> FSP_FLAGS_POS_UNUSED)
+/** @return the PAGE_SSIZE flags in full crc32 format */
+#define FSP_FLAGS_FCRC32_GET_PAGE_SSIZE(flags) \
+ ((flags & FSP_FLAGS_FCRC32_MASK_PAGE_SSIZE) \
+ >> FSP_FLAGS_FCRC32_POS_PAGE_SSIZE)
+/** @return the COMPRESSED_ALGO flags in full crc32 format */
+#define FSP_FLAGS_FCRC32_GET_COMPRESSED_ALGO(flags) \
+ ((flags & FSP_FLAGS_FCRC32_MASK_COMPRESSED_ALGO) \
+ >> FSP_FLAGS_FCRC32_POS_COMPRESSED_ALGO)
/** @return the value of the DATA_DIR field */
#define FSP_FLAGS_HAS_DATA_DIR(flags) \
@@ -354,67 +400,4 @@ these are only used in MySQL 5.7 and used for compatibility. */
/* @} */
-/** Validate the tablespace flags, which are stored in the
-tablespace header at offset FSP_SPACE_FLAGS.
-@param[in] flags the contents of FSP_SPACE_FLAGS
-@param[in] is_ibd whether this is an .ibd file (not system tablespace)
-@return whether the flags are correct (not in the buggy 10.1) format */
-MY_ATTRIBUTE((warn_unused_result, const))
-UNIV_INLINE
-bool
-fsp_flags_is_valid(ulint flags, bool is_ibd)
-{
- DBUG_EXECUTE_IF("fsp_flags_is_valid_failure",
- return(false););
- if (flags == 0) {
- return(true);
- }
- if (flags & ~FSP_FLAGS_MASK) {
- return(false);
- }
- if ((flags & (FSP_FLAGS_MASK_POST_ANTELOPE | FSP_FLAGS_MASK_ATOMIC_BLOBS))
- == FSP_FLAGS_MASK_ATOMIC_BLOBS) {
- /* If the "atomic blobs" flag (indicating
- ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED) flag
- is set, then the "post Antelope" (ROW_FORMAT!=REDUNDANT) flag
- must also be set. */
- return(false);
- }
- /* Bits 10..14 should be 0b0000d where d is the DATA_DIR flag
- of MySQL 5.6 and MariaDB 10.0, which we ignore.
- In the buggy FSP_SPACE_FLAGS written by MariaDB 10.1.0 to 10.1.20,
- bits 10..14 would be nonzero 0bsssaa where sss is
- nonzero PAGE_SSIZE (3, 4, 6, or 7)
- and aa is ATOMIC_WRITES (not 0b11). */
- if (FSP_FLAGS_GET_RESERVED(flags) & ~1U) {
- return(false);
- }
-
- const ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
- if (ssize == 1 || ssize == 2 || ssize == 5 || ssize & 8) {
- /* the page_size is not between 4k and 64k;
- 16k should be encoded as 0, not 5 */
- return(false);
- }
- const ulint zssize = FSP_FLAGS_GET_ZIP_SSIZE(flags);
- if (zssize == 0) {
- /* not ROW_FORMAT=COMPRESSED */
- } else if (zssize > (ssize ? ssize : 5)) {
- /* invalid KEY_BLOCK_SIZE */
- return(false);
- } else if (~flags & (FSP_FLAGS_MASK_POST_ANTELOPE
- | FSP_FLAGS_MASK_ATOMIC_BLOBS)) {
- /* both these flags should be set for
- ROW_FORMAT=COMPRESSED */
- return(false);
- }
-
- /* The flags do look valid. But, avoid misinterpreting
- buggy MariaDB 10.1 format flags for
- PAGE_COMPRESSED=1 PAGE_COMPRESSION_LEVEL={0,2,3}
- as valid-looking PAGE_SSIZE if this is known to be
- an .ibd file and we are using the default innodb_page_size=16k. */
- return(ssize == 0 || !is_ibd || srv_page_size != UNIV_PAGE_SIZE_ORIG);
-}
-
#endif /* fsp0types_h */
diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h
index 4c4647dba95..9a10375759c 100644
--- a/storage/innobase/include/fts0fts.h
+++ b/storage/innobase/include/fts0fts.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2011, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2017, MariaDB Corporation.
+Copyright (c) 2016, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -610,17 +610,15 @@ fts_get_doc_id_from_row(
want to extract.*/
/** Extract the doc id from the record that belongs to index.
-@param[in] table table
-@param[in] rec record contains FTS_DOC_ID
+@param[in] rec record containing FTS_DOC_ID
@param[in] index index of rec
-@param[in] heap heap memory
+@param[in] offsets rec_get_offsets(rec,index)
@return doc id that was extracted from rec */
doc_id_t
fts_get_doc_id_from_rec(
- dict_table_t* table,
- const rec_t* rec,
- const dict_index_t* index,
- mem_heap_t* heap);
+ const rec_t* rec,
+ const dict_index_t* index,
+ const ulint* offsets);
/** Add new fts doc id to the update vector.
@param[in] table the table that contains the FTS index.
diff --git a/storage/innobase/include/fut0fut.h b/storage/innobase/include/fut0fut.h
index 497b6ac5114..e06fc3c5e92 100644
--- a/storage/innobase/include/fut0fut.h
+++ b/storage/innobase/include/fut0fut.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,12 +28,11 @@ Created 12/13/1995 Heikki Tuuri
#ifndef fut0fut_h
#define fut0fut_h
-#include "fil0fil.h"
#include "mtr0mtr.h"
/** Gets a pointer to a file address and latches the page.
@param[in] space space id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] addr file address
@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_SX_LATCH
@param[out] ptr_block file page
@@ -43,13 +43,32 @@ UNIV_INLINE
byte*
fut_get_ptr(
ulint space,
- const page_size_t& page_size,
+ ulint zip_size,
fil_addr_t addr,
rw_lock_type_t rw_latch,
mtr_t* mtr,
buf_block_t** ptr_block = NULL)
- MY_ATTRIBUTE((warn_unused_result));
+{
+ buf_block_t* block;
+ byte* ptr = NULL;
-#include "fut0fut.ic"
+ ut_ad(addr.boffset < srv_page_size);
+ ut_ad((rw_latch == RW_S_LATCH)
+ || (rw_latch == RW_X_LATCH)
+ || (rw_latch == RW_SX_LATCH));
+
+ block = buf_page_get(page_id_t(space, addr.page), zip_size,
+ rw_latch, mtr);
+
+ ptr = buf_block_get_frame(block) + addr.boffset;
+
+ buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
+
+ if (ptr_block != NULL) {
+ *ptr_block = block;
+ }
+
+ return(ptr);
+}
#endif /* fut0fut_h */
diff --git a/storage/innobase/include/fut0fut.ic b/storage/innobase/include/fut0fut.ic
deleted file mode 100644
index 56be971f233..00000000000
--- a/storage/innobase/include/fut0fut.ic
+++ /dev/null
@@ -1,68 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/******************************************************************//**
-@file include/fut0fut.ic
-File-based utilities
-
-Created 12/13/1995 Heikki Tuuri
-***********************************************************************/
-
-#include "sync0rw.h"
-#include "buf0buf.h"
-
-/** Gets a pointer to a file address and latches the page.
-@param[in] space space id
-@param[in] page_size page size
-@param[in] addr file address
-@param[in] rw_latch RW_S_LATCH, RW_X_LATCH, RW_SX_LATCH
-@param[in,out] mtr mini-transaction
-@param[out] ptr_block file page
-@return pointer to a byte in (*ptr_block)->frame; the *ptr_block is
-bufferfixed and latched */
-UNIV_INLINE
-byte*
-fut_get_ptr(
- ulint space,
- const page_size_t& page_size,
- fil_addr_t addr,
- rw_lock_type_t rw_latch,
- mtr_t* mtr,
- buf_block_t** ptr_block)
-{
- buf_block_t* block;
- byte* ptr = NULL;
-
- ut_ad(addr.boffset < srv_page_size);
- ut_ad((rw_latch == RW_S_LATCH)
- || (rw_latch == RW_X_LATCH)
- || (rw_latch == RW_SX_LATCH));
-
- block = buf_page_get(page_id_t(space, addr.page), page_size,
- rw_latch, mtr);
-
- ptr = buf_block_get_frame(block) + addr.boffset;
-
- buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
-
- if (ptr_block != NULL) {
- *ptr_block = block;
- }
-
- return(ptr);
-}
diff --git a/storage/innobase/include/fut0lst.h b/storage/innobase/include/fut0lst.h
index 092889fc42c..9d7a8ff640f 100644
--- a/storage/innobase/include/fut0lst.h
+++ b/storage/innobase/include/fut0lst.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -26,11 +27,11 @@ Created 11/28/1995 Heikki Tuuri
#ifndef fut0lst_h
#define fut0lst_h
-#ifndef UNIV_INNOCHECKSUM
-
-#include "fil0fil.h"
-#include "mtr0mtr.h"
-
+#ifdef UNIV_INNOCHECKSUM
+# include "fil0fil.h"
+#else
+#include "fut0fut.h"
+#include "mtr0log.h"
/* The C 'types' of base node and list node: these should be used to
write self-documenting code. Of course, the sizeof macro cannot be
@@ -39,14 +40,55 @@ applied to these types! */
typedef byte flst_base_node_t;
typedef byte flst_node_t;
-/* The physical size of a list base node in bytes */
-#define FLST_BASE_NODE_SIZE (4 + 2 * FIL_ADDR_SIZE)
#endif /* !UNIV_INNOCHECKSUM */
+/* The physical size of a list base node in bytes */
+#define FLST_BASE_NODE_SIZE (4 + 2 * FIL_ADDR_SIZE)
/* The physical size of a list node in bytes */
#define FLST_NODE_SIZE (2 * FIL_ADDR_SIZE)
#ifndef UNIV_INNOCHECKSUM
+/* We define the field offsets of a node for the list */
+#define FLST_PREV 0 /* 6-byte address of the previous list element;
+ the page part of address is FIL_NULL, if no
+ previous element */
+#define FLST_NEXT FIL_ADDR_SIZE /* 6-byte address of the next
+ list element; the page part of address
+ is FIL_NULL, if no next element */
+
+/* We define the field offsets of a base node for the list */
+#define FLST_LEN 0 /* 32-bit list length field */
+#define FLST_FIRST 4 /* 6-byte address of the first element
+ of the list; undefined if empty list */
+#define FLST_LAST (4 + FIL_ADDR_SIZE) /* 6-byte address of the
+ last element of the list; undefined
+ if empty list */
+
+/** Initialize a zero-initialized list base node.
+@param[in,out] block file page
+@param[in] ofs byte offset of the list base node
+@param[in,out] mtr mini-transaction */
+inline void flst_init(buf_block_t* block, uint16_t ofs, mtr_t* mtr)
+{
+ ut_ad(0 == mach_read_from_2(FLST_LEN + ofs + block->frame));
+ ut_ad(0 == mach_read_from_2(FLST_FIRST + FIL_ADDR_BYTE + ofs
+ + block->frame));
+ ut_ad(0 == mach_read_from_2(FLST_LAST + FIL_ADDR_BYTE + ofs
+ + block->frame));
+ compile_time_assert(FIL_NULL == 0xffU * 0x1010101U);
+ mlog_memset(block, FLST_FIRST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
+ mlog_memset(block, FLST_LAST + FIL_ADDR_PAGE + ofs, 4, 0xff, mtr);
+}
+
+/** Write a null file address.
+@param[in,out] faddr file address to be zeroed otu
+@param[in,out] mtr mini-transaction */
+inline void flst_zero_addr(fil_faddr_t* faddr, mtr_t* mtr)
+{
+ mlog_memset(faddr + FIL_ADDR_PAGE, 4, 0xff, mtr);
+ mlog_write_ulint(faddr + FIL_ADDR_BYTE, 0, MLOG_2BYTES, mtr);
+}
+
/********************************************************************//**
Initializes a list base node. */
UNIV_INLINE
@@ -83,7 +125,7 @@ flst_remove(
@param[in] base base node
@return length */
UNIV_INLINE
-ulint
+uint32_t
flst_get_len(
const flst_base_node_t* base);
/********************************************************************//**
diff --git a/storage/innobase/include/fut0lst.ic b/storage/innobase/include/fut0lst.ic
index 5c9a9ca94c1..5ec1e359f9c 100644
--- a/storage/innobase/include/fut0lst.ic
+++ b/storage/innobase/include/fut0lst.ic
@@ -23,26 +23,8 @@ File-based list utilities
Created 11/28/1995 Heikki Tuuri
***********************************************************************/
-#include "fut0fut.h"
-#include "mtr0log.h"
#include "buf0buf.h"
-/* We define the field offsets of a node for the list */
-#define FLST_PREV 0 /* 6-byte address of the previous list element;
- the page part of address is FIL_NULL, if no
- previous element */
-#define FLST_NEXT FIL_ADDR_SIZE /* 6-byte address of the next
- list element; the page part of address
- is FIL_NULL, if no next element */
-
-/* We define the field offsets of a base node for the list */
-#define FLST_LEN 0 /* 32-bit list length field */
-#define FLST_FIRST 4 /* 6-byte address of the first element
- of the list; undefined if empty list */
-#define FLST_LAST (4 + FIL_ADDR_SIZE) /* 6-byte address of the
- last element of the list; undefined
- if empty list */
-
/********************************************************************//**
Writes a file address. */
UNIV_INLINE
@@ -101,15 +83,15 @@ flst_init(
| MTR_MEMO_PAGE_SX_FIX));
mlog_write_ulint(base + FLST_LEN, 0, MLOG_4BYTES, mtr);
- flst_write_addr(base + FLST_FIRST, fil_addr_null, mtr);
- flst_write_addr(base + FLST_LAST, fil_addr_null, mtr);
+ flst_zero_addr(base + FLST_FIRST, mtr);
+ flst_zero_addr(base + FLST_LAST, mtr);
}
/** Get the length of a list.
@param[in] base base node
@return length */
UNIV_INLINE
-ulint
+uint32_t
flst_get_len(
const flst_base_node_t* base)
{
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 0e3402d2864..96c30c7f757 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -542,18 +542,6 @@ normalize_table_name_c_low(
const char* name, /*!< in: table name string */
ibool set_lower_case); /*!< in: TRUE if we want to set
name to lower case */
-/*************************************************************//**
-InnoDB index push-down condition check defined in ha_innodb.cc
-@return ICP_NO_MATCH, ICP_MATCH, or ICP_OUT_OF_RANGE */
-
-#include <my_compare.h>
-
-ICP_RESULT
-innobase_index_cond(
-/*================*/
- void* file) /*!< in/out: pointer to ha_innobase */
- MY_ATTRIBUTE((warn_unused_result));
-
/** Update the system variable with the given value of the InnoDB
buffer pool size.
@param[in] buf_pool_size given value of buffer pool size.*/
diff --git a/storage/innobase/include/ib0mutex.h b/storage/innobase/include/ib0mutex.h
index eaf391be09b..f4ba8a81275 100644
--- a/storage/innobase/include/ib0mutex.h
+++ b/storage/innobase/include/ib0mutex.h
@@ -35,7 +35,7 @@ Created 2013-03-26 Sunny Bains.
#include "sync0arr.h"
/** OS mutex for tracking lock/unlock for debugging */
-template <template <typename> class Policy = NoPolicy>
+template <template <typename> class Policy>
struct OSTrackMutex {
typedef Policy<OSTrackMutex> MutexPolicy;
@@ -152,7 +152,7 @@ private:
#include <sys/syscall.h>
/** Mutex implementation that used the Linux futex. */
-template <template <typename> class Policy = NoPolicy>
+template <template <typename> class Policy>
struct TTASFutexMutex {
typedef Policy<TTASFutexMutex> MutexPolicy;
@@ -167,21 +167,24 @@ struct TTASFutexMutex {
~TTASFutexMutex()
{
- ut_a(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_UNLOCKED);
}
/** Called when the mutex is "created". Note: Not from the constructor
but when the mutex is initialised. */
void init(latch_id_t, const char*, uint32_t) UNIV_NOTHROW
{
- ut_a(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_UNLOCKED);
}
/** Destroy the mutex. */
void destroy() UNIV_NOTHROW
{
/* The destructor can be called at shutdown. */
- ut_a(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_UNLOCKED);
}
/** Acquire the mutex.
@@ -202,9 +205,8 @@ struct TTASFutexMutex {
}
for (n_waits= 0;; n_waits++) {
- if (my_atomic_fas32_explicit(&m_lock_word,
- MUTEX_STATE_WAITERS,
- MY_MEMORY_ORDER_ACQUIRE)
+ if (m_lock_word.exchange(MUTEX_STATE_WAITERS,
+ std::memory_order_acquire)
== MUTEX_STATE_UNLOCKED) {
break;
}
@@ -220,9 +222,8 @@ struct TTASFutexMutex {
/** Release the mutex. */
void exit() UNIV_NOTHROW
{
- if (my_atomic_fas32_explicit(&m_lock_word,
- MUTEX_STATE_UNLOCKED,
- MY_MEMORY_ORDER_RELEASE)
+ if (m_lock_word.exchange(MUTEX_STATE_UNLOCKED,
+ std::memory_order_release)
== MUTEX_STATE_WAITERS) {
syscall(SYS_futex, &m_lock_word, FUTEX_WAKE_PRIVATE,
1, 0, 0, 0);
@@ -234,10 +235,11 @@ struct TTASFutexMutex {
bool try_lock() UNIV_NOTHROW
{
int32 oldval = MUTEX_STATE_UNLOCKED;
- return(my_atomic_cas32_strong_explicit(&m_lock_word, &oldval,
- MUTEX_STATE_LOCKED,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED));
+ return m_lock_word.compare_exchange_strong(
+ oldval,
+ MUTEX_STATE_LOCKED,
+ std::memory_order_acquire,
+ std::memory_order_relaxed);
}
/** @return non-const version of the policy */
@@ -257,12 +259,12 @@ private:
/** lock_word is the target of the atomic test-and-set instruction
when atomic operations are enabled. */
- int32 m_lock_word;
+ std::atomic<int32> m_lock_word;
};
#endif /* HAVE_IB_LINUX_FUTEX */
-template <template <typename> class Policy = NoPolicy>
+template <template <typename> class Policy>
struct TTASMutex {
typedef Policy<TTASMutex> MutexPolicy;
@@ -277,40 +279,45 @@ struct TTASMutex {
~TTASMutex()
{
- ut_ad(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_UNLOCKED);
}
/** Called when the mutex is "created". Note: Not from the constructor
but when the mutex is initialised. */
void init(latch_id_t) UNIV_NOTHROW
{
- ut_ad(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_UNLOCKED);
}
/** Destroy the mutex. */
void destroy() UNIV_NOTHROW
{
/* The destructor can be called at shutdown. */
- ut_ad(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_UNLOCKED);
}
/** Try and lock the mutex.
@return true on success */
bool try_lock() UNIV_NOTHROW
{
- int32 oldval = MUTEX_STATE_UNLOCKED;
- return(my_atomic_cas32_strong_explicit(&m_lock_word, &oldval,
- MUTEX_STATE_LOCKED,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED));
+ uint32_t oldval = MUTEX_STATE_UNLOCKED;
+ return m_lock_word.compare_exchange_strong(
+ oldval,
+ MUTEX_STATE_LOCKED,
+ std::memory_order_acquire,
+ std::memory_order_relaxed);
}
/** Release the mutex. */
void exit() UNIV_NOTHROW
{
- ut_ad(m_lock_word == MUTEX_STATE_LOCKED);
- my_atomic_store32_explicit(&m_lock_word, MUTEX_STATE_UNLOCKED,
- MY_MEMORY_ORDER_RELEASE);
+ ut_ad(m_lock_word.load(std::memory_order_relaxed)
+ == MUTEX_STATE_LOCKED);
+ m_lock_word.store(MUTEX_STATE_UNLOCKED,
+ std::memory_order_release);
}
/** Acquire the mutex.
@@ -353,12 +360,11 @@ private:
/** Policy data */
MutexPolicy m_policy;
- /** lock_word is the target of the atomic test-and-set instruction
- when atomic operations are enabled. */
- int32 m_lock_word;
+ /** mutex state */
+ std::atomic<uint32_t> m_lock_word;
};
-template <template <typename> class Policy = NoPolicy>
+template <template <typename> class Policy>
struct TTASEventMutex {
typedef Policy<TTASEventMutex> MutexPolicy;
@@ -376,7 +382,7 @@ struct TTASEventMutex {
~TTASEventMutex()
UNIV_NOTHROW
{
- ut_ad(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(state() == MUTEX_STATE_UNLOCKED);
}
/** Called when the mutex is "created". Note: Not from the constructor
@@ -385,7 +391,7 @@ struct TTASEventMutex {
void init(latch_id_t id, const char*, uint32_t) UNIV_NOTHROW
{
ut_a(m_event == 0);
- ut_a(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(state() == MUTEX_STATE_UNLOCKED);
m_event = os_event_create(sync_latch_get_name(id));
}
@@ -396,7 +402,7 @@ struct TTASEventMutex {
void destroy()
UNIV_NOTHROW
{
- ut_ad(m_lock_word == MUTEX_STATE_UNLOCKED);
+ ut_ad(state() == MUTEX_STATE_UNLOCKED);
/* We have to free the event before InnoDB shuts down. */
os_event_destroy(m_event);
@@ -408,20 +414,20 @@ struct TTASEventMutex {
bool try_lock()
UNIV_NOTHROW
{
- int32 oldval = MUTEX_STATE_UNLOCKED;
- return(my_atomic_cas32_strong_explicit(&m_lock_word, &oldval,
- MUTEX_STATE_LOCKED,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED));
+ uint32_t oldval = MUTEX_STATE_UNLOCKED;
+ return m_lock_word.compare_exchange_strong(
+ oldval,
+ MUTEX_STATE_LOCKED,
+ std::memory_order_acquire,
+ std::memory_order_relaxed);
}
/** Release the mutex. */
void exit()
UNIV_NOTHROW
{
- if (my_atomic_fas32_explicit(&m_lock_word,
- MUTEX_STATE_UNLOCKED,
- MY_MEMORY_ORDER_RELEASE)
+ if (m_lock_word.exchange(MUTEX_STATE_UNLOCKED,
+ std::memory_order_release)
== MUTEX_STATE_WAITERS) {
os_event_set(m_event);
sync_array_object_signalled();
@@ -459,11 +465,12 @@ struct TTASEventMutex {
: SYNC_MUTEX,
filename, line, &cell);
- int32 oldval = MUTEX_STATE_LOCKED;
- my_atomic_cas32_strong_explicit(&m_lock_word, &oldval,
- MUTEX_STATE_WAITERS,
- MY_MEMORY_ORDER_RELAXED,
- MY_MEMORY_ORDER_RELAXED);
+ uint32_t oldval = MUTEX_STATE_LOCKED;
+ m_lock_word.compare_exchange_strong(
+ oldval,
+ MUTEX_STATE_WAITERS,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed);
if (oldval == MUTEX_STATE_UNLOCKED) {
sync_array_free_cell(sync_arr, cell);
@@ -482,9 +489,7 @@ struct TTASEventMutex {
int32 state() const
UNIV_NOTHROW
{
- return(my_atomic_load32_explicit(const_cast<int32*>
- (&m_lock_word),
- MY_MEMORY_ORDER_RELAXED));
+ return m_lock_word.load(std::memory_order_relaxed);
}
/** The event that the mutex will wait in sync0arr.cc
@@ -514,9 +519,8 @@ private:
TTASEventMutex(const TTASEventMutex&);
TTASEventMutex& operator=(const TTASEventMutex&);
- /** lock_word is the target of the atomic test-and-set instruction
- when atomic operations are enabled. */
- int32 m_lock_word;
+ /** mutex state */
+ std::atomic<uint32_t> m_lock_word;
/** Used by sync0arr.cc for the wait queue */
os_event_t m_event;
@@ -530,7 +534,6 @@ with the Performance Schema instrumentation. */
template <typename MutexImpl>
struct PolicyMutex
{
- typedef MutexImpl MutexType;
typedef typename MutexImpl::MutexPolicy Policy;
PolicyMutex() UNIV_NOTHROW : m_impl()
@@ -561,7 +564,7 @@ struct PolicyMutex
pfs_exit();
#endif /* UNIV_PFS_MUTEX */
- policy().release(m_impl);
+ ut_d(policy().context.release(m_impl));
m_impl.exit();
}
@@ -587,11 +590,11 @@ struct PolicyMutex
locker = pfs_begin_lock(&state, name, line);
#endif /* UNIV_PFS_MUTEX */
- policy().enter(m_impl, name, line);
+ ut_d(policy().context.enter(m_impl, name, line));
m_impl.enter(n_spins, n_delay, name, line);
- policy().locked(m_impl, name, line);
+ ut_d(policy().context.locked(m_impl, name, line));
#ifdef UNIV_PFS_MUTEX
pfs_end(locker, 0);
#endif /* UNIV_PFS_MUTEX */
@@ -620,9 +623,9 @@ struct PolicyMutex
if (ret == 0) {
- policy().enter(m_impl, name, line);
+ ut_d(policy().context.enter(m_impl, name, line));
- policy().locked(m_impl, name, line);
+ ut_d(policy().context.locked(m_impl, name, line));
}
#ifdef UNIV_PFS_MUTEX
@@ -636,7 +639,7 @@ struct PolicyMutex
/** @return true if the thread owns the mutex. */
bool is_owned() const UNIV_NOTHROW
{
- return(policy().is_owned());
+ return(policy().context.is_owned());
}
#endif /* UNIV_DEBUG */
@@ -658,6 +661,7 @@ struct PolicyMutex
m_impl.init(id, filename, line);
policy().init(m_impl, id, filename, line);
+ ut_d(policy().context.init(id));
}
/** Free resources (if any) */
@@ -668,6 +672,7 @@ struct PolicyMutex
#endif /* UNIV_PFS_MUTEX */
m_impl.destroy();
policy().destroy();
+ ut_d(policy().context.destroy());
}
/** Required for os_event_t */
diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index 1f5e8bfe4b2..82c59ce0ba4 100644
--- a/storage/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
@@ -119,13 +119,6 @@ ibuf_mtr_commit(
/*============*/
mtr_t* mtr) /*!< in/out: mini-transaction */
MY_ATTRIBUTE((nonnull));
-/*********************************************************************//**
-Initializes an ibuf bitmap page. */
-void
-ibuf_bitmap_page_init(
-/*==================*/
- buf_block_t* block, /*!< in: bitmap page */
- mtr_t* mtr); /*!< in: mtr */
/************************************************************************//**
Resets the free bits of the page in the ibuf bitmap. This is done in a
separate mini-transaction, hence this operation does not restrict
@@ -241,18 +234,19 @@ ibuf_inside(
/** Checks if a page address is an ibuf bitmap page (level 3 page) address.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@return TRUE if a bitmap page */
-UNIV_INLINE
-ibool
-ibuf_bitmap_page(
- const page_id_t page_id,
- const page_size_t& page_size);
+inline bool ibuf_bitmap_page(const page_id_t page_id, ulint zip_size)
+{
+ ut_ad(ut_is_2pow(zip_size));
+ ulint size = zip_size ? zip_size : srv_page_size;
+ return (page_id.page_no() & (size - 1)) == FSP_IBUF_BITMAP_OFFSET;
+}
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
Must not be called when recv_no_ibuf_operations==true.
@param[in] page_id page id
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] x_latch FALSE if relaxed check (avoid latching the
bitmap page)
@param[in] file file name
@@ -260,13 +254,13 @@ bitmap page)
@param[in,out] mtr mtr which will contain an x-latch to the
bitmap page if the page is not one of the fixed address ibuf pages, or NULL,
in which case a new transaction is created.
-@return TRUE if level 2 or level 3 page */
-ibool
+@return true if level 2 or level 3 page */
+bool
ibuf_page_low(
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
#ifdef UNIV_DEBUG
- ibool x_latch,
+ bool x_latch,
#endif /* UNIV_DEBUG */
const char* file,
unsigned line,
@@ -278,22 +272,22 @@ ibuf_page_low(
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
Must not be called when recv_no_ibuf_operations==true.
@param[in] page_id tablespace/page identifier
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] mtr mini-transaction or NULL
@return TRUE if level 2 or level 3 page */
-# define ibuf_page(page_id, page_size, mtr) \
- ibuf_page_low(page_id, page_size, TRUE, __FILE__, __LINE__, mtr)
+# define ibuf_page(page_id, zip_size, mtr) \
+ ibuf_page_low(page_id, zip_size, true, __FILE__, __LINE__, mtr)
#else /* UVIV_DEBUG */
/** Checks if a page is a level 2 or 3 page in the ibuf hierarchy of pages.
Must not be called when recv_no_ibuf_operations==true.
@param[in] page_id tablespace/page identifier
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] mtr mini-transaction or NULL
@return TRUE if level 2 or level 3 page */
-# define ibuf_page(page_id, page_size, mtr) \
- ibuf_page_low(page_id, page_size, __FILE__, __LINE__, mtr)
+# define ibuf_page(page_id, zip_size, mtr) \
+ ibuf_page_low(page_id, zip_size, __FILE__, __LINE__, mtr)
#endif /* UVIV_DEBUG */
/***********************************************************************//**
@@ -304,23 +298,23 @@ void
ibuf_free_excess_pages(void);
/*========================*/
-/** Buffer an operation in the insert/delete buffer, instead of doing it
-directly to the disk page, if this is possible. Does not do it if the index
+/** Buffer an operation in the change buffer, instead of applying it
+directly to the file page, if this is possible. Does not do it if the index
is clustered or unique.
@param[in] op operation type
@param[in] entry index entry to insert
@param[in,out] index index where to insert
@param[in] page_id page id where to insert
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] thr query thread
-@return TRUE if success */
-ibool
+@return true if success */
+bool
ibuf_insert(
ibuf_op_t op,
const dtuple_t* entry,
dict_index_t* index,
const page_id_t page_id,
- const page_size_t& page_size,
+ ulint zip_size,
que_thr_t* thr);
/** When an index page is read from a disk to the buffer pool, this function
@@ -332,15 +326,16 @@ subsequently was dropped.
@param[in,out] block if page has been read from disk,
pointer to the page x-latched, else NULL
@param[in] page_id page id of the index page
-@param[in] update_ibuf_bitmap normally this is set to TRUE, but
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@param[in] update_ibuf_bitmap normally this is set, but
if we have deleted or are deleting the tablespace, then we naturally do not
want to update a non-existent bitmap page */
void
ibuf_merge_or_delete_for_page(
buf_block_t* block,
const page_id_t page_id,
- const page_size_t* page_size,
- ibool update_ibuf_bitmap);
+ ulint zip_size,
+ bool update_ibuf_bitmap);
/*********************************************************************//**
Deletes all entries in the insert buffer for a given space id. This is used
@@ -370,16 +365,8 @@ ibuf_merge_space(
/*=============*/
ulint space); /*!< in: space id */
-/*********************************************************************//**
-Parses a redo log record of an ibuf bitmap page init.
-@return end of log record or NULL */
-byte*
-ibuf_parse_bitmap_init(
-/*===================*/
- byte* ptr, /*!< in: buffer */
- byte* end_ptr,/*!< in: buffer end */
- buf_block_t* block, /*!< in: block or NULL */
- mtr_t* mtr); /*!< in: mtr or NULL */
+/** Apply MLOG_IBUF_BITMAP_INIT when crash-upgrading */
+ATTRIBUTE_COLD void ibuf_bitmap_init_apply(buf_block_t* block);
/******************************************************************//**
Looks if the insert buffer is empty.
diff --git a/storage/innobase/include/ibuf0ibuf.ic b/storage/innobase/include/ibuf0ibuf.ic
index f6ff6f2a7fd..5be9569290b 100644
--- a/storage/innobase/include/ibuf0ibuf.ic
+++ b/storage/innobase/include/ibuf0ibuf.ic
@@ -78,11 +78,12 @@ struct ibuf_t{
ulint height; /*!< tree height */
dict_index_t* index; /*!< insert buffer index */
- ulint n_merges; /*!< number of pages merged */
- ulint n_merged_ops[IBUF_OP_COUNT];
+ /** number of pages merged */
+ Atomic_counter<ulint> n_merges;
+ Atomic_counter<ulint> n_merged_ops[IBUF_OP_COUNT];
/*!< number of operations of each type
merged to index pages */
- ulint n_discarded_ops[IBUF_OP_COUNT];
+ Atomic_counter<ulint> n_discarded_ops[IBUF_OP_COUNT];
/*!< number of operations of each type
discarded without merging due to the
tablespace being deleted or the
@@ -149,20 +150,6 @@ ibuf_inside(
return(mtr->is_inside_ibuf());
}
-/** Checks if a page address is an ibuf bitmap page (level 3 page) address.
-@param[in] page_id page id
-@param[in] page_size page size
-@return TRUE if a bitmap page */
-UNIV_INLINE
-ibool
-ibuf_bitmap_page(
- const page_id_t page_id,
- const page_size_t& page_size)
-{
- return((page_id.page_no() & (page_size.physical() - 1))
- == FSP_IBUF_BITMAP_OFFSET);
-}
-
/** Translates the free space on a page to a value in the ibuf bitmap.
@param[in] page_size page size in bytes
@param[in] max_ins_size maximum insert size after reorganize for
@@ -191,29 +178,6 @@ ibuf_index_page_calc_free_bits(
return(n);
}
-/** Translates the ibuf free bits to the free space on a page in bytes.
-@param[in] page_size page_size
-@param[in] bits value for ibuf bitmap bits
-@return maximum insert size after reorganize for the page */
-UNIV_INLINE
-ulint
-ibuf_index_page_calc_free_from_bits(
- const page_size_t& page_size,
- ulint bits)
-{
- ut_ad(bits < 4);
- ut_ad(!page_size.is_compressed()
- || page_size.physical() > IBUF_PAGE_SIZE_PER_FREE_SPACE);
-
- if (bits == 3) {
- return(4 * page_size.physical()
- / IBUF_PAGE_SIZE_PER_FREE_SPACE);
- }
-
- return(bits * (page_size.physical()
- / IBUF_PAGE_SIZE_PER_FREE_SPACE));
-}
-
/*********************************************************************//**
Translates the free space on a compressed page to a value in the ibuf bitmap.
@return value for ibuf bitmap bits */
@@ -227,7 +191,7 @@ ibuf_index_page_calc_free_zip(
const page_zip_des_t* page_zip;
lint zip_max_ins;
- ut_ad(block->page.size.is_compressed());
+ ut_ad(block->page.zip.data);
/* Consider the maximum insert size on the uncompressed page
without reorganizing the page. We must not assume anything
@@ -250,7 +214,7 @@ ibuf_index_page_calc_free_zip(
max_ins_size = (ulint) zip_max_ins;
}
- return(ibuf_index_page_calc_free_bits(block->page.size.physical(),
+ return(ibuf_index_page_calc_free_bits(block->physical_size(),
max_ins_size));
}
@@ -263,14 +227,14 @@ ibuf_index_page_calc_free(
/*======================*/
const buf_block_t* block) /*!< in: buffer block */
{
- if (!block->page.size.is_compressed()) {
+ if (!block->page.zip.data) {
ulint max_ins_size;
max_ins_size = page_get_max_insert_size_after_reorganize(
buf_block_get_frame(block), 1);
return(ibuf_index_page_calc_free_bits(
- block->page.size.physical(), max_ins_size));
+ block->physical_size(), max_ins_size));
} else {
return(ibuf_index_page_calc_free_zip(block));
}
@@ -311,12 +275,12 @@ ibuf_update_free_bits_if_full(
ut_ad(buf_block_get_page_zip(block) == NULL);
before = ibuf_index_page_calc_free_bits(
- block->page.size.physical(), max_ins_size);
+ srv_page_size, max_ins_size);
if (max_ins_size >= increase) {
compile_time_assert(ULINT32_UNDEFINED > UNIV_PAGE_SIZE_MAX);
after = ibuf_index_page_calc_free_bits(
- block->page.size.physical(), max_ins_size - increase);
+ srv_page_size, max_ins_size - increase);
#ifdef UNIV_IBUF_DEBUG
ut_a(after <= ibuf_index_page_calc_free(block));
#endif
diff --git a/storage/innobase/include/log0crypt.h b/storage/innobase/include/log0crypt.h
index d972ca01491..359896c2fc5 100644
--- a/storage/innobase/include/log0crypt.h
+++ b/storage/innobase/include/log0crypt.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (C) 2013, 2015, Google Inc. All Rights Reserved.
-Copyright (C) 2014, 2017, MariaDB Corporation. All Rights Reserved.
+Copyright (C) 2014, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -73,14 +73,23 @@ UNIV_INTERN
bool
log_crypt_read_checkpoint_buf(const byte* buf);
+/** log_crypt() operation code */
+enum log_crypt_t {
+ /** encrypt a log block without rotating key */
+ LOG_ENCRYPT,
+ /** decrypt a log block */
+ LOG_DECRYPT,
+ /** attempt to rotate the key, and encrypt a log block */
+ LOG_ENCRYPT_ROTATE_KEY
+};
+
/** Encrypt or decrypt log blocks.
@param[in,out] buf log blocks to encrypt or decrypt
@param[in] lsn log sequence number of the start of the buffer
@param[in] size size of the buffer, in bytes
-@param[in] decrypt whether to decrypt instead of encrypting */
-UNIV_INTERN
-void
-log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt = false);
+@param[in] op whether to decrypt, encrypt, or rotate key and encrypt
+@return whether the operation succeeded (encrypt always does) */
+bool log_crypt(byte* buf, lsn_t lsn, ulint size, log_crypt_t op = LOG_ENCRYPT);
/** Encrypt or decrypt a temporary file block.
@param[in] src block to encrypt or decrypt
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index 16399edad50..2fb026849c1 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -160,19 +160,16 @@ bool
log_set_capacity(ulonglong file_size)
MY_ATTRIBUTE((warn_unused_result));
-/******************************************************//**
-This function is called, e.g., when a transaction wants to commit. It checks
-that the log has been written to the log file up to the last log entry written
-by the transaction. If there is a flush running, it waits and checks if the
-flush flushed enough. If not, starts a new flush. */
-void
-log_write_up_to(
-/*============*/
- lsn_t lsn, /*!< in: log sequence number up to which
- the log should be written, LSN_MAX if not specified */
- bool flush_to_disk);
- /*!< in: true if we want the written log
- also to be flushed to disk */
+/** Ensure that the log has been written to the log file up to a given
+log entry (such as that of a transaction commit). Start a new write, or
+wait and check if an already running write is covering the request.
+@param[in] lsn log sequence number that should be
+included in the redo log file write
+@param[in] flush_to_disk whether the written log should also
+be flushed to the file system
+@param[in] rotate_key whether to rotate the encryption key */
+void log_write_up_to(lsn_t lsn, bool flush_to_disk, bool rotate_key = false);
+
/** write to the log file up to the last log entry.
@param[in] sync whether we want the written log
also to be flushed to disk. */
@@ -404,13 +401,14 @@ extern my_bool innodb_log_checksums;
#define LOG_BLOCK_HDR_SIZE 12 /* size of the log block header in
bytes */
-/* Offsets of a log block trailer from the end of the block */
+#define LOG_BLOCK_KEY 4 /* encryption key version
+ before LOG_BLOCK_CHECKSUM;
+ in LOG_HEADER_FORMAT_ENC_10_4 only */
#define LOG_BLOCK_CHECKSUM 4 /* 4 byte checksum of the log block
contents; in InnoDB versions
< 3.23.52 this did not contain the
checksum but the same value as
- .._HDR_NO */
-#define LOG_BLOCK_TRL_SIZE 4 /* trailer size in bytes */
+ LOG_BLOCK_HDR_NO */
/** Offsets inside the checkpoint pages (redo log format version 1) @{ */
/** Checkpoint number */
@@ -472,11 +470,9 @@ MariaDB 10.2.18 and later will use the 10.3 format, but LOG_HEADER_SUBFORMAT
1 instead of 0. MariaDB 10.3 will use subformat 0 (5.7-style TRUNCATE) or 2
(MDEV-13564 backup-friendly TRUNCATE). */
#define LOG_HEADER_FORMAT_10_3 103
-/** The redo log format identifier corresponding to the current format version.
-Stored in LOG_HEADER_FORMAT. */
-#define LOG_HEADER_FORMAT_CURRENT LOG_HEADER_FORMAT_10_3
-/** Future MariaDB 10.4 log format */
#define LOG_HEADER_FORMAT_10_4 104
+/** The MariaDB 10.4.0 log format (only with innodb_encrypt_log=ON) */
+#define LOG_HEADER_FORMAT_ENC_10_4 (104U | 1U << 31)
/** Encrypted MariaDB redo log */
#define LOG_HEADER_FORMAT_ENCRYPTED (1U<<31)
@@ -546,7 +542,7 @@ struct log_t{
struct files {
/** number of files */
ulint n_files;
- /** format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT */
+ /** format of the redo log: e.g., LOG_HEADER_FORMAT_10_4 */
uint32_t format;
/** redo log subformat: 0 with separately logged TRUNCATE,
2 with fully redo-logged TRUNCATE (1 in MariaDB 10.2) */
@@ -701,11 +697,34 @@ public:
/** @return whether the redo log is encrypted */
bool is_encrypted() const { return(log.is_encrypted()); }
- bool is_initialised() { return m_initialised; }
+ bool is_initialised() const { return m_initialised; }
/** Complete an asynchronous checkpoint write. */
void complete_checkpoint();
+ /** @return the log block header + trailer size */
+ unsigned framing_size() const
+ {
+ return log.format == LOG_HEADER_FORMAT_ENC_10_4
+ ? LOG_BLOCK_HDR_SIZE + LOG_BLOCK_KEY + LOG_BLOCK_CHECKSUM
+ : LOG_BLOCK_HDR_SIZE + LOG_BLOCK_CHECKSUM;
+ }
+ /** @return the log block payload size */
+ unsigned payload_size() const
+ {
+ return log.format == LOG_HEADER_FORMAT_ENC_10_4
+ ? OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE - LOG_BLOCK_CHECKSUM -
+ LOG_BLOCK_KEY
+ : OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE - LOG_BLOCK_CHECKSUM;
+ }
+ /** @return the log block trailer offset */
+ unsigned trailer_offset() const
+ {
+ return log.format == LOG_HEADER_FORMAT_ENC_10_4
+ ? OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_CHECKSUM - LOG_BLOCK_KEY
+ : OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_CHECKSUM;
+ }
+
/** Initialise the redo log subsystem. */
void create();
diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic
index 87d55f9e01d..60e6958d592 100644
--- a/storage/innobase/include/log0log.ic
+++ b/storage/innobase/include/log0log.ic
@@ -215,7 +215,7 @@ log_block_calc_checksum_format_0(
sum = 1;
sh = 0;
- for (i = 0; i < OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE; i++) {
+ for (i = 0; i < OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_CHECKSUM; i++) {
ulint b = (ulint) block[i];
sum &= 0x7FFFFFFFUL;
sum += b;
@@ -237,7 +237,7 @@ ulint
log_block_calc_checksum_crc32(
const byte* block)
{
- return(ut_crc32(block, OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE));
+ return ut_crc32(block, OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_CHECKSUM);
}
/** Calculates the checksum for a log block using the "no-op" algorithm.
@@ -338,7 +338,7 @@ log_reserve_and_write_fast(
#endif /* UNIV_LOG_LSN_DEBUG */
+ log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE;
- if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
+ if (data_len >= log_sys.trailer_offset()) {
/* The string does not fit within the current log block
or the log block would become full */
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index 77bf365bc9d..9c1c6a3c38b 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -130,10 +130,6 @@ corresponding to MLOG_INDEX_LOAD.
*/
extern void (*log_optimized_ddl_op)(ulint space_id);
-/** Report backup-unfriendly TRUNCATE operation (with separate log file),
-corresponding to MLOG_TRUNCATE. */
-extern void (*log_truncate)();
-
/** Report an operation to create, delete, or rename a file during backup.
@param[in] space_id tablespace identifier
@param[in] flags tablespace flags (NULL if not create)
diff --git a/storage/innobase/include/mach0data.h b/storage/innobase/include/mach0data.h
index bcf71ea6b17..96e8d629021 100644
--- a/storage/innobase/include/mach0data.h
+++ b/storage/innobase/include/mach0data.h
@@ -29,11 +29,10 @@ Created 11/28/1995 Heikki Tuuri
#define mach0data_h
#include "univ.i"
+#include "mtr0types.h"
#ifndef UNIV_INNOCHECKSUM
-#include "mtr0types.h"
-
/* The data and all fields are always stored in a database file
in the same format: ascii, big-endian, ... .
All data in the files MUST be accessed using the functions in this
diff --git a/storage/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic
index f60092f5472..ce30146c53c 100644
--- a/storage/innobase/include/mem0mem.ic
+++ b/storage/innobase/include/mem0mem.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2014, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -212,7 +212,7 @@ mem_heap_alloc(
mem_block_set_free(block, free + MEM_SPACE_NEEDED(n));
- UNIV_MEM_ALLOC(buf, n);
+ TRASH_ALLOC(buf, n);
return(buf);
}
diff --git a/storage/innobase/include/mtr0log.h b/storage/innobase/include/mtr0log.h
index d2de11b3470..4cb7ea4eb64 100644
--- a/storage/innobase/include/mtr0log.h
+++ b/storage/innobase/include/mtr0log.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -71,6 +72,23 @@ mlog_log_string(
byte* ptr, /*!< in: pointer written to */
ulint len, /*!< in: string length */
mtr_t* mtr); /*!< in: mini-transaction handle */
+
+/** Initialize a string of bytes.
+@param[in,out] b buffer page
+@param[in] ofs byte offset from block->frame
+@param[in] len length of the data to write
+@param[in] val the data byte to write
+@param[in,out] mtr mini-transaction */
+void
+mlog_memset(buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr);
+
+/** Initialize a string of bytes.
+@param[in,out] byte byte address
+@param[in] len length of the data to write
+@param[in] val the data byte to write
+@param[in,out] mtr mini-transaction */
+void mlog_memset(byte* b, ulint len, byte val, mtr_t* mtr);
+
/********************************************************//**
Writes initial part of a log record consisting of one-byte item
type and four-byte space and page numbers. */
@@ -188,7 +206,7 @@ mlog_parse_initial_log_record(
ulint* space, /*!< out: space id */
ulint* page_no);/*!< out: page number */
/********************************************************//**
-Parses a log record written by mlog_write_ulint or mlog_write_ull.
+Parses a log record written by mlog_write_ulint, mlog_write_ull, mlog_memset.
@return parsed record end, NULL if not a complete record */
byte*
mlog_parse_nbytes(
diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic
index 82f552f22ce..5213f90d966 100644
--- a/storage/innobase/include/mtr0mtr.ic
+++ b/storage/innobase/include/mtr0mtr.ic
@@ -170,7 +170,7 @@ mtr_t::release_block_at_savepoint(
ut_a(slot->object == block);
- buf_block_unfix(reinterpret_cast<buf_block_t*>(block));
+ reinterpret_cast<buf_block_t*>(block)->unfix();
buf_page_release_latch(block, slot->type);
diff --git a/storage/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h
index eaf838aaa76..af42010f415 100644
--- a/storage/innobase/include/mtr0types.h
+++ b/storage/innobase/include/mtr0types.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -118,7 +118,7 @@ enum mlog_id_t {
/** mark an index record as the predefined minimum record */
MLOG_REC_MIN_MARK = 26,
- /** initialize an ibuf bitmap page */
+ /** initialize an ibuf bitmap page (used in MariaDB 10.2 and 10.3) */
MLOG_IBUF_BITMAP_INIT = 27,
#ifdef UNIV_LOG_LSN_DEBUG
@@ -216,7 +216,8 @@ enum mlog_id_t {
/** initialize a file page */
MLOG_INIT_FILE_PAGE2 = 59,
- /** Table is being truncated. (Marked only for file-per-table) */
+ /** Table is being truncated. (Was used in 10.2 and 10.3;
+ not supported for crash-upgrade to 10.4 or later.) */
MLOG_TRUNCATE = 60,
/** notify that an index tree is being loaded without writing
@@ -227,8 +228,14 @@ enum mlog_id_t {
of a ROW_FORMAT=COMPRESSED table */
MLOG_ZIP_WRITE_TRX_ID = 62,
+ /** initialize a page with a string of identical bytes */
+ MLOG_MEMSET = 63,
+
+ /** Zero-fill a page that is not allocated. */
+ MLOG_INIT_FREE_PAGE = 64,
+
/** biggest value (used in assertions) */
- MLOG_BIGGEST_TYPE = MLOG_ZIP_WRITE_TRX_ID,
+ MLOG_BIGGEST_TYPE = MLOG_INIT_FREE_PAGE,
/** log record for writing/updating crypt data of
a tablespace */
diff --git a/storage/innobase/include/os0api.h b/storage/innobase/include/os0api.h
index 63f213b5457..20111cbf7f0 100644
--- a/storage/innobase/include/os0api.h
+++ b/storage/innobase/include/os0api.h
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -54,22 +54,4 @@ buf_page_get_trim_length(
ulint write_length)
MY_ATTRIBUTE((warn_unused_result));
-/**
-Get should we punch hole to tablespace.
-@param[in] space Tablespace
-@return true, if punch hole should be tried, false if not. */
-bool
-fil_node_should_punch_hole(
- const fil_node_t* node)
- MY_ATTRIBUTE((warn_unused_result));
-
-/**
-Set punch hole to tablespace to given value.
-@param[in] space Tablespace
-@param[in] val value to be set. */
-void
-fil_space_set_punch_hole(
- fil_node_t* node,
- bool val);
-
#endif /* OS_API_H */
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index 71da751ad25..e7f076fb79a 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -2,7 +2,7 @@
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
-Copyright (c) 2013, 2017, MariaDB Corporation.
+Copyright (c) 2013, 2019, MariaDB Corporation.
Portions of this file contain modifications contributed and copyrighted
by Percona Inc.. Those modifications are
@@ -36,7 +36,7 @@ Created 10/21/1995 Heikki Tuuri
#ifndef os0file_h
#define os0file_h
-#include "page0size.h"
+#include "fsp0types.h"
#include "os0api.h"
#ifndef _WIN32
@@ -360,17 +360,8 @@ public:
/** Set the pointer to file node for IO
@param[in] node File node */
- void set_fil_node(fil_node_t* node)
- {
- if (node && !fil_node_should_punch_hole(node)) {
- clear_punch_hole();
- }
-
- m_fil_node = node;
- }
+ inline void set_fil_node(fil_node_t* node);
- /** Compare two requests
- @reutrn true if the are equal */
bool operator==(const IORequest& rhs) const
{
return(m_type == rhs.m_type);
@@ -414,17 +405,7 @@ public:
: 0);
}
- bool should_punch_hole() const {
- return (m_fil_node ?
- fil_node_should_punch_hole(m_fil_node)
- : false);
- }
-
- void space_no_punch_hole() const {
- if (m_fil_node) {
- fil_space_set_punch_hole(m_fil_node, false);
- }
- }
+ inline bool should_punch_hole() const;
/** Free storage space associated with a section of the file.
@param[in] fh Open file handle
@@ -1591,19 +1572,6 @@ os_file_change_size_win32(
#endif /*_WIN32 */
-/** Check if the file system supports sparse files.
-
-Warning: On POSIX systems we try and punch a hole from offset 0 to
-the system configured page size. This should only be called on an empty
-file.
-
-@param[in] fh File handle for the file - if opened
-@return true if the file system supports sparse files */
-bool
-os_is_sparse_file_supported(
- os_file_t fh)
- MY_ATTRIBUTE((warn_unused_result));
-
/** Free storage space associated with a section of the file.
@param[in] fh Open file handle
@param[in] off Starting offset (SEEK_SET)
@@ -1643,16 +1611,6 @@ is_absolute_path(
return(false);
}
-/***********************************************************************//**
-Try to get number of bytes per sector from file system.
-@return file block size */
-UNIV_INTERN
-ulint
-os_file_get_block_size(
-/*===================*/
- os_file_t file, /*!< in: handle to a file */
- const char* name); /*!< in: file name */
-
#include "os0file.ic"
#endif /* os0file_h */
diff --git a/storage/innobase/include/os0proc.h b/storage/innobase/include/os0proc.h
index 69d48c4d2f6..30ef295a7ab 100644
--- a/storage/innobase/include/os0proc.h
+++ b/storage/innobase/include/os0proc.h
@@ -40,7 +40,7 @@ typedef unsigned long int os_process_id_t;
/** The total amount of memory currently allocated from the operating
system with os_mem_alloc_large(). */
-extern ulint os_total_large_mem_allocated;
+extern Atomic_counter<ulint> os_total_large_mem_allocated;
/** Converts the current process id to a number.
@return process id as a number */
diff --git a/storage/innobase/include/os0thread.h b/storage/innobase/include/os0thread.h
index b6838c919a0..e1a1dac6826 100644
--- a/storage/innobase/include/os0thread.h
+++ b/storage/innobase/include/os0thread.h
@@ -73,7 +73,7 @@ typedef unsigned int mysql_pfs_key_t;
#endif /* HAVE_PSI_INTERFACE */
/** Number of threads active. */
-extern ulint os_thread_count;
+extern Atomic_counter<ulint> os_thread_count;
/***************************************************************//**
Compares two thread ids for equality.
diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic
index 86e560395f3..b5812560093 100644
--- a/storage/innobase/include/page0cur.ic
+++ b/storage/innobase/include/page0cur.ic
@@ -24,12 +24,7 @@ The page cursor
Created 10/4/1994 Heikki Tuuri
*************************************************************************/
-#include "page0page.h"
-#include "buf0types.h"
-
#ifdef UNIV_DEBUG
-# include "rem0cmp.h"
-
/*********************************************************//**
Gets pointer to the page frame where the cursor is positioned.
@return page */
@@ -280,6 +275,7 @@ page_cur_tuple_insert(
*offsets = rec_get_offsets(rec, index, *offsets,
page_is_leaf(cursor->block->frame),
ULINT_UNDEFINED, heap);
+ ut_ad(size == rec_offs_size(*offsets));
if (buf_block_get_page_zip(cursor->block)) {
rec = page_cur_insert_rec_zip(
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index e5113980c4c..dae60fba327 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -27,28 +27,23 @@ Created 2/2/1994 Heikki Tuuri
#define page0page_h
#include "page0types.h"
-#ifndef UNIV_INNOCHECKSUM
+#include "fsp0fsp.h"
#include "fil0fil.h"
#include "buf0buf.h"
-#include "data0data.h"
-#include "dict0dict.h"
#include "rem0rec.h"
-#endif /* !UNIV_INNOCHECKSUM*/
-#include "fsp0fsp.h"
#ifndef UNIV_INNOCHECKSUM
+#include "dict0dict.h"
+#include "data0data.h"
#include "mtr0mtr.h"
-#ifdef UNIV_MATERIALIZE
-#undef UNIV_INLINE
-#define UNIV_INLINE
-#endif
-
/* PAGE HEADER
===========
Index page header starts at the first offset left free by the FIL-module */
typedef byte page_header_t;
+#else
+# include "mach0data.h"
#endif /* !UNIV_INNOCHECKSUM */
#define PAGE_HEADER FSEG_PAGE_DATA /* index page header starts at this
@@ -1007,13 +1002,6 @@ page_get_direction(const page_t* page)
inline
uint16_t
page_get_instant(const page_t* page);
-/** Assign the PAGE_INSTANT field.
-@param[in,out] page clustered index root page
-@param[in] n original number of clustered index fields
-@param[in,out] mtr mini-transaction */
-inline
-void
-page_set_instant(page_t* page, unsigned n, mtr_t* mtr);
/**********************************************************//**
Create an uncompressed B-tree index page.
@@ -1041,10 +1029,6 @@ page_create_zip(
ulint level, /*!< in: the B-tree level of
the page */
trx_id_t max_trx_id, /*!< in: PAGE_MAX_TRX_ID */
- const redo_page_compress_t* page_comp_info,
- /*!< in: used for applying
- TRUNCATE log
- record during recovery */
mtr_t* mtr); /*!< in/out: mini-transaction
handle */
/**********************************************************//**
@@ -1335,11 +1319,6 @@ const rec_t*
page_find_rec_max_not_deleted(
const page_t* page);
-#ifdef UNIV_MATERIALIZE
-#undef UNIV_INLINE
-#define UNIV_INLINE UNIV_INLINE_ORIGINAL
-#endif
-
#endif /* !UNIV_INNOCHECKSUM */
#include "page0page.ic"
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index 4d2be2a0445..49d127499f7 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -29,18 +29,10 @@ Created 2/2/1994 Heikki Tuuri
#ifndef UNIV_INNOCHECKSUM
#include "mach0data.h"
-#ifdef UNIV_DEBUG
-# include "log0recv.h"
-#endif /* !UNIV_DEBUG */
#include "rem0cmp.h"
#include "mtr0log.h"
#include "page0zip.h"
-#ifdef UNIV_MATERIALIZE
-#undef UNIV_INLINE
-#define UNIV_INLINE
-#endif
-
/*************************************************************//**
Returns the max trx id field value. */
UNIV_INLINE
@@ -1102,29 +1094,6 @@ page_get_instant(const page_t* page)
#endif /* UNIV_DEBUG */
return(i >> 3);
}
-
-/** Assign the PAGE_INSTANT field.
-@param[in,out] page clustered index root page
-@param[in] n original number of clustered index fields
-@param[in,out] mtr mini-transaction */
-inline
-void
-page_set_instant(page_t* page, unsigned n, mtr_t* mtr)
-{
- ut_ad(fil_page_get_type(page) == FIL_PAGE_TYPE_INSTANT);
- ut_ad(n > 0);
- ut_ad(n < REC_MAX_N_FIELDS);
- uint16_t i = page_header_get_field(page, PAGE_INSTANT);
- ut_ad(i <= PAGE_NO_DIRECTION);
- i |= n << 3;
- mlog_write_ulint(PAGE_HEADER + PAGE_INSTANT + page, i,
- MLOG_2BYTES, mtr);
-}
#endif /* !UNIV_INNOCHECKSUM */
-#ifdef UNIV_MATERIALIZE
-#undef UNIV_INLINE
-#define UNIV_INLINE UNIV_INLINE_ORIGINAL
-#endif
-
#endif
diff --git a/storage/innobase/include/page0size.h b/storage/innobase/include/page0size.h
deleted file mode 100644
index 981f8743960..00000000000
--- a/storage/innobase/include/page0size.h
+++ /dev/null
@@ -1,197 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/**************************************************//**
-@file include/page0size.h
-A class describing a page size.
-
-Created Nov 14, 2013 Vasil Dimov
-*******************************************************/
-
-#ifndef page0size_t
-#define page0size_t
-
-#include "fsp0types.h"
-
-#define FIELD_REF_SIZE 20U
-
-/** A BLOB field reference full of zero, for use in assertions and
-tests.Initially, BLOB field references are set to zero, in
-dtuple_convert_big_rec(). */
-extern const byte field_ref_zero[FIELD_REF_SIZE];
-
-#define PAGE_SIZE_T_SIZE_BITS 17
-
-/** Page size descriptor. Contains the physical and logical page size, as well
-as whether the page is compressed or not. */
-class page_size_t {
-public:
- /** Constructor from (physical, logical, is_compressed).
- @param[in] physical physical (on-disk/zipped) page size
- @param[in] logical logical (in-memory/unzipped) page size
- @param[in] is_compressed whether the page is compressed */
- page_size_t(ulint physical, ulint logical, bool is_compressed)
- {
- if (physical == 0) {
- physical = UNIV_PAGE_SIZE_ORIG;
- }
- if (logical == 0) {
- logical = UNIV_PAGE_SIZE_ORIG;
- }
-
- m_physical = static_cast<unsigned>(physical);
- m_logical = static_cast<unsigned>(logical);
- m_is_compressed = static_cast<unsigned>(is_compressed);
-
- ut_ad(physical <= (1 << PAGE_SIZE_T_SIZE_BITS));
- ut_ad(logical <= (1 << PAGE_SIZE_T_SIZE_BITS));
-
- ut_ad(ut_is_2pow(physical));
- ut_ad(ut_is_2pow(logical));
-
- ut_ad(logical <= UNIV_PAGE_SIZE_MAX);
- ut_ad(logical >= physical);
- ut_ad(!is_compressed || physical <= UNIV_ZIP_SIZE_MAX);
- }
-
- /** Constructor from (fsp_flags).
- @param[in] fsp_flags filespace flags */
- explicit page_size_t(ulint fsp_flags)
- {
- ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(fsp_flags);
-
- /* If the logical page size is zero in fsp_flags, then use the
- legacy 16k page size. */
- ssize = (0 == ssize) ? UNIV_PAGE_SSIZE_ORIG : ssize;
-
- /* Convert from a 'log2 minus 9' to a page size in bytes. */
- const unsigned size = ((UNIV_ZIP_SIZE_MIN >> 1) << ssize);
-
- ut_ad(size <= UNIV_PAGE_SIZE_MAX);
- ut_ad(size <= (1 << PAGE_SIZE_T_SIZE_BITS));
-
- m_logical = size;
-
- ssize = FSP_FLAGS_GET_ZIP_SSIZE(fsp_flags);
-
- /* If the fsp_flags have zero in the zip_ssize field, then it means
- that the tablespace does not have compressed pages and the physical
- page size is the same as the logical page size. */
- if (ssize == 0) {
- m_is_compressed = false;
- m_physical = m_logical;
- } else {
- m_is_compressed = true;
-
- /* Convert from a 'log2 minus 9' to a page size
- in bytes. */
- const unsigned phy
- = ((UNIV_ZIP_SIZE_MIN >> 1) << ssize);
-
- ut_ad(phy <= UNIV_ZIP_SIZE_MAX);
- ut_ad(phy <= (1 << PAGE_SIZE_T_SIZE_BITS));
-
- m_physical = phy;
- }
- }
-
- /** Retrieve the physical page size (on-disk).
- @return physical page size in bytes */
- inline ulint physical() const
- {
- ut_ad(m_physical > 0);
-
- return(m_physical);
- }
-
- /** Retrieve the logical page size (in-memory).
- @return logical page size in bytes */
- inline ulint logical() const
- {
- ut_ad(m_logical > 0);
- return(m_logical);
- }
-
- /** Check whether the page is compressed on disk.
- @return true if compressed */
- inline bool is_compressed() const
- {
- return(m_is_compressed);
- }
-
- /** Copy the values from a given page_size_t object.
- @param[in] src page size object whose values to fetch */
- inline void copy_from(const page_size_t& src)
- {
- *this = src;
- }
-
- /** Check if a given page_size_t object is equal to the current one.
- @param[in] a page_size_t object to compare
- @return true if equal */
- inline bool equals_to(const page_size_t& a) const
- {
- return(a.physical() == m_physical
- && a.logical() == m_logical
- && a.is_compressed() == m_is_compressed);
- }
-
-private:
-
- /* For non compressed tablespaces, physical page size is equal to
- the logical page size and the data is stored in buf_page_t::frame
- (and is also always equal to univ_page_size (--innodb-page-size=)).
-
- For compressed tablespaces, physical page size is the compressed
- page size as stored on disk and in buf_page_t::zip::data. The logical
- page size is the uncompressed page size in memory - the size of
- buf_page_t::frame (currently also always equal to univ_page_size
- (--innodb-page-size=)). */
-
- /** Physical page size. */
- unsigned m_physical:PAGE_SIZE_T_SIZE_BITS;
-
- /** Logical page size. */
- unsigned m_logical:PAGE_SIZE_T_SIZE_BITS;
-
- /** Flag designating whether the physical page is compressed, which is
- true IFF the whole tablespace where the page belongs is compressed. */
- unsigned m_is_compressed:1;
-};
-
-/* Overloading the global output operator to conveniently print an object
-of type the page_size_t.
-@param[in,out] out the output stream
-@param[in] obj an object of type page_size_t to be printed
-@retval the output stream */
-inline
-std::ostream&
-operator<<(
- std::ostream& out,
- const page_size_t& obj)
-{
- out << "[page size: physical=" << obj.physical()
- << ", logical=" << obj.logical()
- << ", compressed=" << obj.is_compressed() << "]";
- return(out);
-}
-
-extern page_size_t univ_page_size;
-
-#endif /* page0size_t */
diff --git a/storage/innobase/include/page0types.h b/storage/innobase/include/page0types.h
index 47d66df2758..0913a50fee2 100644
--- a/storage/innobase/include/page0types.h
+++ b/storage/innobase/include/page0types.h
@@ -83,18 +83,6 @@ enum page_cur_mode_t {
PAGE_CUR_RTREE_GET_FATHER = 14
};
-
-/** The information used for compressing a page when applying
-TRUNCATE log record during recovery */
-struct redo_page_compress_t {
- ulint type; /*!< index type */
- index_id_t index_id; /*!< index id */
- ulint n_fields; /*!< number of index fields */
- ulint field_len; /*!< the length of index field */
- const byte* fields; /*!< index field information */
- ulint trx_id_pos; /*!< position of trx-id column. */
-};
-
/** Compressed page descriptor */
struct page_zip_des_t
{
diff --git a/storage/innobase/include/page0zip.h b/storage/innobase/include/page0zip.h
index c9ce4efd3dc..b462833e4c7 100644
--- a/storage/innobase/include/page0zip.h
+++ b/storage/innobase/include/page0zip.h
@@ -28,27 +28,11 @@ Created June 2005 by Marko Makela
#ifndef page0zip_h
#define page0zip_h
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE
-#endif
-
-#ifdef UNIV_INNOCHECKSUM
-#include "buf0buf.h"
-#include "ut0crc32.h"
-#include "buf0checksum.h"
-#include "mach0data.h"
-#include "zlib.h"
-#endif /* UNIV_INNOCHECKSUM */
+#include "buf0types.h"
#ifndef UNIV_INNOCHECKSUM
#include "mtr0types.h"
#include "page0types.h"
-#endif /* !UNIV_INNOCHECKSUM */
-
-#include "buf0types.h"
-
-#ifndef UNIV_INNOCHECKSUM
#include "dict0types.h"
#include "srv0srv.h"
#include "trx0types.h"
@@ -102,15 +86,10 @@ page_zip_set_size(
@param[in] comp nonzero=compact format
@param[in] n_fields number of fields in the record; ignored if
tablespace is not compressed
-@param[in] page_size page size
-@return FALSE if the entire record can be stored locally on the page */
-UNIV_INLINE
-ibool
-page_zip_rec_needs_ext(
- ulint rec_size,
- ulint comp,
- ulint n_fields,
- const page_size_t& page_size)
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@return false if the entire record can be stored locally on the page */
+inline bool page_zip_rec_needs_ext(ulint rec_size, ulint comp, ulint n_fields,
+ ulint zip_size)
MY_ATTRIBUTE((warn_unused_result));
/**********************************************************************//**
@@ -163,10 +142,6 @@ page_zip_compress(
dict_index_t* index, /*!< in: index of the B-tree
node */
ulint level, /*!< in: commpression level */
- const redo_page_compress_t* page_comp_info,
- /*!< in: used for applying
- TRUNCATE log
- record during recovery */
mtr_t* mtr); /*!< in/out: mini-transaction,
or NULL */
@@ -516,22 +491,14 @@ uint32_t
page_zip_calc_checksum(
const void* data,
ulint size,
- srv_checksum_algorithm_t algo
-#ifdef INNODB_BUG_ENDIAN_CRC32
- /** for crc32, use the big-endian bug-compatible crc32 variant */
- , bool use_legacy_big_endian = false
-#endif
-);
+ srv_checksum_algorithm_t algo);
-/**********************************************************************//**
-Verify a compressed page's checksum.
-@return TRUE if the stored checksum is valid according to the value of
+/** Verify a compressed page's checksum.
+@param[in] data compressed page
+@param[in] size size of compressed page
+@return whether the stored checksum is valid according to the value of
innodb_checksum_algorithm */
-ibool
-page_zip_verify_checksum(
-/*=====================*/
- const void* data, /*!< in: compressed page */
- ulint size); /*!< in: size of compressed page */
+bool page_zip_verify_checksum(const void* data, ulint size);
#ifndef UNIV_INNOCHECKSUM
/**********************************************************************//**
@@ -566,11 +533,6 @@ void
page_zip_reset_stat_per_index();
/*===========================*/
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE UNIV_INLINE_ORIGINAL
-#endif
-
#include "page0zip.ic"
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic
index b3ebc5dcf51..a187f7e0111 100644
--- a/storage/innobase/include/page0zip.ic
+++ b/storage/innobase/include/page0zip.ic
@@ -2,7 +2,7 @@
Copyright (c) 2005, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2012, Facebook Inc.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -25,11 +25,6 @@ Compressed page interface
Created June 2005 by Marko Makela
*******************************************************/
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE
-#endif
-
#include "page0zip.h"
#include "mtr0log.h"
#include "page0page.h"
@@ -154,19 +149,14 @@ page_zip_set_size(
@param[in] comp nonzero=compact format
@param[in] n_fields number of fields in the record; ignored if
tablespace is not compressed
-@param[in] page_size page size
-@return FALSE if the entire record can be stored locally on the page */
-UNIV_INLINE
-ibool
-page_zip_rec_needs_ext(
- ulint rec_size,
- ulint comp,
- ulint n_fields,
- const page_size_t& page_size)
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
+@return false if the entire record can be stored locally on the page */
+inline bool page_zip_rec_needs_ext(ulint rec_size, ulint comp, ulint n_fields,
+ ulint zip_size)
{
ut_ad(rec_size
> ulint(comp ? REC_N_NEW_EXTRA_BYTES : REC_N_OLD_EXTRA_BYTES));
- ut_ad(comp || !page_size.is_compressed());
+ ut_ad(comp || !zip_size);
#if UNIV_PAGE_SIZE_MAX > COMPRESSED_REC_MAX_DATA_SIZE
if (comp ? rec_size >= COMPRESSED_REC_MAX_DATA_SIZE :
@@ -175,7 +165,7 @@ page_zip_rec_needs_ext(
}
#endif
- if (page_size.is_compressed()) {
+ if (zip_size) {
ut_ad(comp);
/* On a compressed page, there is a two-byte entry in
the dense page directory for every record. But there
@@ -184,7 +174,7 @@ page_zip_rec_needs_ext(
the encoded heap number. Check also the available space
on the uncompressed page. */
return(rec_size - (REC_N_NEW_EXTRA_BYTES - 2 - 1)
- >= page_zip_empty_size(n_fields, page_size.physical())
+ >= page_zip_empty_size(n_fields, zip_size)
|| rec_size >= page_get_free_space_of_empty(TRUE) / 2);
}
@@ -414,7 +404,7 @@ page_zip_parse_compress_no_data(
was successful. Crash in this case. */
if (page
- && !page_zip_compress(page_zip, page, index, level, NULL, NULL)) {
+ && !page_zip_compress(page_zip, page, index, level, NULL)) {
ut_error;
}
@@ -437,8 +427,3 @@ page_zip_reset_stat_per_index()
mutex_exit(&page_zip_stat_per_index_mutex);
}
-
-#ifdef UNIV_MATERIALIZE
-# undef UNIV_INLINE
-# define UNIV_INLINE UNIV_INLINE_ORIGINAL
-#endif
diff --git a/storage/innobase/include/read0types.h b/storage/innobase/include/read0types.h
index 52338889c47..5cd8e1740a7 100644
--- a/storage/innobase/include/read0types.h
+++ b/storage/innobase/include/read0types.h
@@ -66,7 +66,14 @@ class ReadView
Close view:
READ_VIEW_STATE_OPEN -> READ_VIEW_STATE_CLOSED
*/
- int32_t m_state;
+ std::atomic<uint32_t> m_state;
+
+
+ /** m_state getter for ReadView owner thread */
+ uint32_t state() const
+ {
+ return m_state.load(std::memory_order_relaxed);
+ }
public:
@@ -134,35 +141,36 @@ loop:
Closes the view.
View becomes not visible to purge thread.
+
+ This method is intended to be called by ReadView owner thread, thus
+ m_state cannot change.
*/
void close()
{
- ut_ad(m_state == READ_VIEW_STATE_CLOSED ||
- m_state == READ_VIEW_STATE_OPEN);
- if (m_state == READ_VIEW_STATE_OPEN)
- my_atomic_store32_explicit(&m_state, READ_VIEW_STATE_CLOSED,
- MY_MEMORY_ORDER_RELAXED);
+ ut_ad(state() == READ_VIEW_STATE_CLOSED ||
+ state() == READ_VIEW_STATE_OPEN);
+ m_state.store(READ_VIEW_STATE_CLOSED, std::memory_order_relaxed);
}
/** m_state getter for trx_sys::clone_oldest_view() trx_sys::size(). */
- int32_t get_state() const
+ uint32_t get_state() const
{
- return my_atomic_load32_explicit(const_cast<int32*>(&m_state),
- MY_MEMORY_ORDER_ACQUIRE);
+ return m_state.load(std::memory_order_acquire);
}
/**
Returns true if view is open.
- Only used by view owner thread, thus we can omit atomic operations.
+ This method is intended to be called by ReadView owner thread, thus
+ m_state cannot change.
*/
bool is_open() const
{
- ut_ad(m_state == READ_VIEW_STATE_OPEN ||
- m_state == READ_VIEW_STATE_CLOSED);
- return m_state == READ_VIEW_STATE_OPEN;
+ ut_ad(state() == READ_VIEW_STATE_OPEN ||
+ state() == READ_VIEW_STATE_CLOSED);
+ return state() == READ_VIEW_STATE_OPEN;
}
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 3a541289a61..fdcae3818bc 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -38,15 +38,6 @@ Created 5/30/1994 Heikki Tuuri
#include <ostream>
#include <sstream>
-/* Info bit denoting the predefined minimum record: this bit is set
-if and only if the record is the first user record on a non-leaf
-B-tree page that is the leftmost page on its level
-(PAGE_LEVEL is nonzero and FIL_PAGE_PREV is FIL_NULL). */
-#define REC_INFO_MIN_REC_FLAG 0x10UL
-/* The deleted flag in info bits */
-#define REC_INFO_DELETED_FLAG 0x20UL /* when bit is set to 1, it means the
- record has been delete marked */
-
/* Number of extra bytes in an old-style record,
in addition to the data and the offsets */
#define REC_N_OLD_EXTRA_BYTES 6
@@ -54,26 +45,6 @@ in addition to the data and the offsets */
in addition to the data and the offsets */
#define REC_N_NEW_EXTRA_BYTES 5
-/** Record status values for ROW_FORMAT=COMPACT,DYNAMIC,COMPRESSED */
-enum rec_comp_status_t {
- /** User record (PAGE_LEVEL=0, heap>=PAGE_HEAP_NO_USER_LOW) */
- REC_STATUS_ORDINARY = 0,
- /** Node pointer record (PAGE_LEVEL>=0, heap>=PAGE_HEAP_NO_USER_LOW) */
- REC_STATUS_NODE_PTR = 1,
- /** The page infimum pseudo-record (heap=PAGE_HEAP_NO_INFIMUM) */
- REC_STATUS_INFIMUM = 2,
- /** The page supremum pseudo-record (heap=PAGE_HEAP_NO_SUPREMUM) */
- REC_STATUS_SUPREMUM = 3,
- /** Clustered index record that has been inserted or updated
- after instant ADD COLUMN (more than dict_index_t::n_core_fields) */
- REC_STATUS_COLUMNS_ADDED = 4
-};
-
-/** The dtuple_t::info_bits of the metadata pseudo-record.
-@see rec_is_metadata() */
-static const byte REC_INFO_METADATA
- = REC_INFO_MIN_REC_FLAG | REC_STATUS_COLUMNS_ADDED;
-
#define REC_NEW_STATUS 3 /* This is single byte bit-field */
#define REC_NEW_STATUS_MASK 0x7UL
#define REC_NEW_STATUS_SHIFT 0
@@ -295,7 +266,7 @@ rec_comp_status_t
rec_get_status(const rec_t* rec)
{
byte bits = rec[-REC_NEW_STATUS] & REC_NEW_STATUS_MASK;
- ut_ad(bits <= REC_STATUS_COLUMNS_ADDED);
+ ut_ad(bits <= REC_STATUS_INSTANT);
return static_cast<rec_comp_status_t>(bits);
}
@@ -306,12 +277,12 @@ inline
void
rec_set_status(rec_t* rec, byte bits)
{
- ut_ad(bits <= REC_STATUS_COLUMNS_ADDED);
+ ut_ad(bits <= REC_STATUS_INSTANT);
rec[-REC_NEW_STATUS] = (rec[-REC_NEW_STATUS] & ~REC_NEW_STATUS_MASK)
| bits;
}
-/** Get the length of added field count in a REC_STATUS_COLUMNS_ADDED record.
+/** Get the length of added field count in a REC_STATUS_INSTANT record.
@param[in] n_add_field number of added fields, minus one
@return storage size of the field count, in bytes */
inline unsigned rec_get_n_add_field_len(ulint n_add_field)
@@ -320,8 +291,8 @@ inline unsigned rec_get_n_add_field_len(ulint n_add_field)
return n_add_field < 0x80 ? 1 : 2;
}
-/** Set the added field count in a REC_STATUS_COLUMNS_ADDED record.
-@param[in,out] header variable header of a REC_STATUS_COLUMNS_ADDED record
+/** Set the added field count in a REC_STATUS_INSTANT record.
+@param[in,out] header variable header of a REC_STATUS_INSTANT record
@param[in] n_add number of added fields, minus 1
@return record header before the number of added fields */
inline void rec_set_n_add_field(byte*& header, ulint n_add)
@@ -780,20 +751,89 @@ rec_offs_comp(const ulint* offsets)
}
/** Determine if the record is the metadata pseudo-record
-in the clustered index.
+in the clustered index for instant ADD COLUMN or ALTER TABLE.
+@param[in] rec leaf page record
+@param[in] comp 0 if ROW_FORMAT=REDUNDANT, else nonzero
+@return whether the record is the metadata pseudo-record */
+inline bool rec_is_metadata(const rec_t* rec, ulint comp)
+{
+ bool is = !!(rec_get_info_bits(rec, comp) & REC_INFO_MIN_REC_FLAG);
+ ut_ad(!is || !comp || rec_get_status(rec) == REC_STATUS_INSTANT);
+ return is;
+}
+
+/** Determine if the record is the metadata pseudo-record
+in the clustered index for instant ADD COLUMN or ALTER TABLE.
@param[in] rec leaf page record
@param[in] index index of the record
@return whether the record is the metadata pseudo-record */
-inline bool rec_is_metadata(const rec_t* rec, const dict_index_t* index)
+inline bool rec_is_metadata(const rec_t* rec, const dict_index_t& index)
{
- bool is = rec_get_info_bits(rec, dict_table_is_comp(index->table))
- & REC_INFO_MIN_REC_FLAG;
- ut_ad(!is || index->is_instant());
- ut_ad(!is || !dict_table_is_comp(index->table)
- || rec_get_status(rec) == REC_STATUS_COLUMNS_ADDED);
+ bool is = rec_is_metadata(rec, dict_table_is_comp(index.table));
+ ut_ad(!is || index.is_instant());
return is;
}
+/** Determine if the record is the metadata pseudo-record
+in the clustered index for instant ADD COLUMN (not other ALTER TABLE).
+@param[in] rec leaf page record
+@param[in] comp 0 if ROW_FORMAT=REDUNDANT, else nonzero
+@return whether the record is the metadata pseudo-record */
+inline bool rec_is_add_metadata(const rec_t* rec, ulint comp)
+{
+ bool is = rec_get_info_bits(rec, comp) == REC_INFO_MIN_REC_FLAG;
+ ut_ad(!is || !comp || rec_get_status(rec) == REC_STATUS_INSTANT);
+ return is;
+}
+
+/** Determine if the record is the metadata pseudo-record
+in the clustered index for instant ADD COLUMN (not other ALTER TABLE).
+@param[in] rec leaf page record
+@param[in] index index of the record
+@return whether the record is the metadata pseudo-record */
+inline bool rec_is_add_metadata(const rec_t* rec, const dict_index_t& index)
+{
+ bool is = rec_is_add_metadata(rec, dict_table_is_comp(index.table));
+ ut_ad(!is || index.is_instant());
+ return is;
+}
+
+/** Determine if the record is the metadata pseudo-record
+in the clustered index for instant ALTER TABLE (not plain ADD COLUMN).
+@param[in] rec leaf page record
+@param[in] comp 0 if ROW_FORMAT=REDUNDANT, else nonzero
+@return whether the record is the ALTER TABLE metadata pseudo-record */
+inline bool rec_is_alter_metadata(const rec_t* rec, ulint comp)
+{
+ bool is = !(~rec_get_info_bits(rec, comp)
+ & (REC_INFO_MIN_REC_FLAG | REC_INFO_DELETED_FLAG));
+ ut_ad(!is || rec_is_metadata(rec, comp));
+ return is;
+}
+
+/** Determine if the record is the metadata pseudo-record
+in the clustered index for instant ALTER TABLE (not plain ADD COLUMN).
+@param[in] rec leaf page record
+@param[in] index index of the record
+@return whether the record is the ALTER TABLE metadata pseudo-record */
+inline bool rec_is_alter_metadata(const rec_t* rec, const dict_index_t& index)
+{
+ bool is = rec_is_alter_metadata(rec, dict_table_is_comp(index.table));
+ ut_ad(!is || index.is_dummy || index.is_instant());
+ return is;
+}
+
+/** Determine if a record is delete-marked (not a metadata pseudo-record).
+@param[in] rec record
+@param[in] comp nonzero if ROW_FORMAT!=REDUNDANT
+@return whether the record is a delete-marked user record */
+inline bool rec_is_delete_marked(const rec_t* rec, ulint comp)
+{
+ return (rec_get_info_bits(rec, comp)
+ & (REC_INFO_MIN_REC_FLAG | REC_INFO_DELETED_FLAG))
+ == REC_INFO_DELETED_FLAG;
+}
+
/** Get the nth field from an index.
@param[in] rec index record
@param[in] index index
@@ -811,6 +851,7 @@ rec_get_nth_cfield(
ulint* len)
{
ut_ad(rec_offs_validate(rec, index, offsets));
+
if (!rec_offs_nth_default(offsets, n)) {
return rec_get_nth_field(rec, offsets, n, len);
}
@@ -957,7 +998,7 @@ rec_copy(
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[out] extra record header size
-@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
+@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT
@return total size, in bytes */
ulint
rec_get_converted_size_temp(
@@ -974,7 +1015,7 @@ rec_get_converted_size_temp(
@param[in,out] offsets offsets to the fields; in: rec_offs_n_fields(offsets)
@param[in] n_core number of core fields (index->n_core_fields)
@param[in] def_val default values for non-core fields
-@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED */
+@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT */
void
rec_init_offsets_temp(
const rec_t* rec,
@@ -1001,8 +1042,7 @@ rec_init_offsets_temp(
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
-@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
-*/
+@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT */
void
rec_convert_dtuple_to_temp(
rec_t* rec,
@@ -1065,21 +1105,20 @@ rec_get_converted_size_comp_prefix(
ulint n_fields,/*!< in: number of data fields */
ulint* extra) /*!< out: extra size */
MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)));
-/**********************************************************//**
-Determines the size of a data tuple in ROW_FORMAT=COMPACT.
+
+/** Determine the size of a record in ROW_FORMAT=COMPACT.
+@param[in] index record descriptor. dict_table_is_comp()
+ is assumed to hold, even if it doesn't
+@param[in] tuple logical record
+@param[out] extra extra size
@return total size */
ulint
rec_get_converted_size_comp(
-/*========================*/
- const dict_index_t* index, /*!< in: record descriptor;
- dict_table_is_comp() is
- assumed to hold, even if
- it does not */
- rec_comp_status_t status, /*!< in: status bits of the record */
- const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields,/*!< in: number of data fields */
- ulint* extra) /*!< out: extra size */
- MY_ATTRIBUTE((nonnull(1,3)));
+ const dict_index_t* index,
+ const dtuple_t* tuple,
+ ulint* extra)
+ MY_ATTRIBUTE((nonnull(1,2)));
+
/**********************************************************//**
The following function returns the size of a data tuple when converted to
a physical record.
diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic
index d1d89b9a4be..f5beda44008 100644
--- a/storage/innobase/include/rem0rec.ic
+++ b/storage/innobase/include/rem0rec.ic
@@ -67,7 +67,7 @@ most significant bytes and bits are written below less significant.
001=REC_STATUS_NODE_PTR
010=REC_STATUS_INFIMUM
011=REC_STATUS_SUPREMUM
- 100=REC_STATUS_COLUMNS_ADDED
+ 100=REC_STATUS_INSTANT
1xx=reserved
5 bits heap number
4 8 bits heap number
@@ -451,7 +451,7 @@ rec_get_n_fields(
}
switch (rec_get_status(rec)) {
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
case REC_STATUS_ORDINARY:
return(dict_index_get_n_fields(index));
case REC_STATUS_NODE_PTR:
@@ -547,19 +547,6 @@ rec_set_n_owned_new(
}
}
-#ifdef UNIV_DEBUG
-/** Check if the info bits are valid.
-@param[in] bits info bits to check
-@return true if valid */
-inline
-bool
-rec_info_bits_valid(
- ulint bits)
-{
- return(0 == (bits & ~(REC_INFO_DELETED_FLAG | REC_INFO_MIN_REC_FLAG)));
-}
-#endif /* UNIV_DEBUG */
-
/******************************************************//**
The following function is used to retrieve the info bits of a record.
@return info bits */
@@ -573,7 +560,6 @@ rec_get_info_bits(
const ulint val = rec_get_bit_field_1(
rec, comp ? REC_NEW_INFO_BITS : REC_OLD_INFO_BITS,
REC_INFO_BITS_MASK, REC_INFO_BITS_SHIFT);
- ut_ad(rec_info_bits_valid(val));
return(val);
}
@@ -586,7 +572,6 @@ rec_set_info_bits_old(
rec_t* rec, /*!< in: old-style physical record */
ulint bits) /*!< in: info bits */
{
- ut_ad(rec_info_bits_valid(bits));
rec_set_bit_field_1(rec, bits, REC_OLD_INFO_BITS,
REC_INFO_BITS_MASK, REC_INFO_BITS_SHIFT);
}
@@ -599,7 +584,6 @@ rec_set_info_bits_new(
rec_t* rec, /*!< in/out: new-style physical record */
ulint bits) /*!< in: info bits */
{
- ut_ad(rec_info_bits_valid(bits));
rec_set_bit_field_1(rec, bits, REC_NEW_INFO_BITS,
REC_INFO_BITS_MASK, REC_INFO_BITS_SHIFT);
}
@@ -891,7 +875,6 @@ rec_get_nth_field_offs(
if SQL null; UNIV_SQL_DEFAULT is default value */
{
ulint offs;
- ulint length;
ut_ad(n < rec_offs_n_fields(offsets));
if (n == 0) {
@@ -900,7 +883,7 @@ rec_get_nth_field_offs(
offs = rec_offs_base(offsets)[n] & REC_OFFS_MASK;
}
- length = rec_offs_base(offsets)[1 + n];
+ ulint length = rec_offs_base(offsets)[1 + n];
if (length & REC_OFFS_SQL_NULL) {
length = UNIV_SQL_NULL;
@@ -1258,8 +1241,9 @@ rec_offs_data_size(
ulint size;
ut_ad(rec_offs_validate(NULL, NULL, offsets));
- size = rec_offs_base(offsets)[rec_offs_n_fields(offsets)]
- & REC_OFFS_MASK;
+
+ ulint n = rec_offs_n_fields(offsets);
+ size = rec_offs_base(offsets)[n] & REC_OFFS_MASK;
ut_ad(size < srv_page_size);
return(size);
}
@@ -1398,24 +1382,20 @@ rec_get_converted_size(
} else if (index->table->id == DICT_INDEXES_ID) {
/* The column SYS_INDEXES.MERGE_THRESHOLD was
instantly added in MariaDB 10.2.2 (MySQL 5.7). */
+ ut_ad(!index->table->is_temporary());
ut_ad(index->n_fields == DICT_NUM_FIELDS__SYS_INDEXES);
ut_ad(dtuple->n_fields == DICT_NUM_FIELDS__SYS_INDEXES
|| dtuple->n_fields
== DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD);
} else {
ut_ad(dtuple->n_fields >= index->n_core_fields);
- ut_ad(dtuple->n_fields <= index->n_fields);
+ ut_ad(dtuple->n_fields <= index->n_fields
+ || dtuple->is_alter_metadata());
}
#endif
if (dict_table_is_comp(index->table)) {
- return(rec_get_converted_size_comp(
- index,
- static_cast<rec_comp_status_t>(
- dtuple->info_bits
- & REC_NEW_STATUS_MASK),
- dtuple->fields,
- dtuple->n_fields, NULL));
+ return rec_get_converted_size_comp(index, dtuple, NULL);
}
data_size = dtuple_get_data_size(dtuple, 0);
diff --git a/storage/innobase/include/row0ext.h b/storage/innobase/include/row0ext.h
index fe4bd710156..651dab9f6e3 100644
--- a/storage/innobase/include/row0ext.h
+++ b/storage/innobase/include/row0ext.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,7 +30,7 @@ Created September 2006 Marko Makela
#include "data0types.h"
#include "mem0mem.h"
#include "dict0types.h"
-#include "page0size.h"
+#include "fsp0types.h"
#include "row0types.h"
/********************************************************************//**
@@ -43,7 +44,7 @@ row_ext_create(
in the InnoDB table object, as reported by
dict_col_get_no(); NOT relative to the records
in the clustered index */
- ulint flags, /*!< in: table->flags */
+ const dict_table_t& table, /*!< in: table */
const dtuple_t* tuple, /*!< in: data tuple containing the field
references of the externally stored
columns; must be indexed by col_no;
@@ -91,9 +92,7 @@ struct row_ext_t{
REC_ANTELOPE_MAX_INDEX_COL_LEN or
REC_VERSION_56_MAX_INDEX_COL_LEN depending
on row format */
- page_size_t page_size;
- /*!< page size of the externally stored
- columns */
+ ulint zip_size;/*!< ROW_FORMAT=COMPRESSED page size, or 0 */
ulint len[1]; /*!< prefix lengths; 0 if not cached */
};
diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h
index fa6592f4628..334c67fb434 100644
--- a/storage/innobase/include/row0log.h
+++ b/storage/innobase/include/row0log.h
@@ -36,7 +36,7 @@ Created 2011-05-26 Marko Makela
class ut_stage_alter_t;
-extern ulint onlineddl_rowlog_rows;
+extern Atomic_counter<ulint> onlineddl_rowlog_rows;
extern ulint onlineddl_rowlog_pct_used;
extern ulint onlineddl_pct_progress;
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 087006b9fff..94307146e9e 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -43,6 +43,7 @@ Created 9/17/2000 Heikki Tuuri
extern ibool row_rollback_on_timeout;
struct row_prebuilt_t;
+class ha_innobase;
/*******************************************************************//**
Frees the blob heap in prebuilt when no longer needed. */
@@ -777,10 +778,14 @@ struct row_prebuilt_t {
store it here so that we can return
it to MySQL */
/*----------------------*/
- void* idx_cond; /*!< In ICP, pointer to a ha_innobase,
- passed to innobase_index_cond().
- NULL if index condition pushdown is
- not used. */
+
+ /** Argument of handler_rowid_filter_check(),
+ or NULL if no PRIMARY KEY filter is pushed */
+ ha_innobase* pk_filter;
+
+ /** Argument to handler_index_cond_check(),
+ or NULL if no index condition pushdown (ICP) is used. */
+ ha_innobase* idx_cond;
ulint idx_cond_n_cols;/*!< Number of fields in idx_cond_cols.
0 if and only if idx_cond == NULL. */
/*----------------------*/
diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index 630a40b0765..932accc46b0 100644
--- a/storage/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
@@ -74,6 +74,7 @@ row_get_rec_roll_ptr(
#define ROW_BUILD_FOR_PURGE 1 /*!< build row for purge. */
#define ROW_BUILD_FOR_UNDO 2 /*!< build row for undo. */
#define ROW_BUILD_FOR_INSERT 3 /*!< build row for insert. */
+
/*****************************************************************//**
When an insert or purge to a table is performed, this function builds
the entry to be inserted into or purged from an index on the table.
@@ -227,6 +228,26 @@ row_rec_to_index_entry(
mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
MY_ATTRIBUTE((warn_unused_result));
+
+/** Convert a metadata record to a data tuple.
+@param[in] rec metadata record
+@param[in] index clustered index after instant ALTER TABLE
+@param[in] offsets rec_get_offsets(rec)
+@param[out] n_ext number of externally stored fields
+@param[in,out] heap memory heap for allocations
+@param[in] info_bits the info_bits after an update
+@param[in] pad whether to pad to index->n_fields */
+dtuple_t*
+row_metadata_to_tuple(
+ const rec_t* rec,
+ const dict_index_t* index,
+ const ulint* offsets,
+ ulint* n_ext,
+ mem_heap_t* heap,
+ ulint info_bits,
+ bool pad)
+ MY_ATTRIBUTE((nonnull,warn_unused_result));
+
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record.
diff --git a/storage/innobase/include/row0row.ic b/storage/innobase/include/row0row.ic
index e1a3b5f6a1a..c9db1d2a37e 100644
--- a/storage/innobase/include/row0row.ic
+++ b/storage/innobase/include/row0row.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,16 +39,12 @@ row_get_trx_id_offset(
const dict_index_t* index, /*!< in: clustered index */
const ulint* offsets)/*!< in: record offsets */
{
- ulint pos;
ulint offset;
ulint len;
- ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(NULL, index, offsets));
- pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
-
- offset = rec_get_nth_field_offs(offsets, pos, &len);
+ offset = rec_get_nth_field_offs(offsets, index->db_trx_id(), &len);
ut_ad(len == DATA_TRX_ID_LEN);
diff --git a/storage/innobase/include/row0trunc.h b/storage/innobase/include/row0trunc.h
deleted file mode 100644
index a7592f33cf7..00000000000
--- a/storage/innobase/include/row0trunc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2018, MariaDB Corporation.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/**************************************************//**
-@file include/row0trunc.h
-TRUNCATE implementation
-
-Created 2013-04-25 Krunal Bauskar
-*******************************************************/
-
-#ifndef row0trunc_h
-#define row0trunc_h
-
-#include "row0mysql.h"
-#include "dict0boot.h"
-#include "fil0fil.h"
-#include "srv0start.h"
-
-#include <vector>
-
-/** The information of TRUNCATE log record.
-This class handles the recovery stage of TRUNCATE table. */
-class truncate_t {
-
-public:
- /**
- Constructor
-
- @param old_table_id old table id assigned to table before truncate
- @param new_table_id new table id that will be assigned to table
- after truncate
- @param dir_path directory path */
- truncate_t(
- table_id_t old_table_id,
- table_id_t new_table_id,
- const char* dir_path);
-
- /**
- Constructor
-
- @param log_file_name parse the log file during recovery to populate
- information related to table to truncate */
- truncate_t(const char* log_file_name);
-
- /**
- Consturctor
-
- @param space_id space in which table reisde
- @param name table name
- @param tablespace_flags tablespace flags use for recreating tablespace
- @param log_flags page format flag
- @param recv_lsn lsn of redo log record. */
- truncate_t(
- ulint space_id,
- const char* name,
- ulint tablespace_flags,
- ulint log_flags,
- lsn_t recv_lsn);
-
- /** Destructor */
- ~truncate_t();
-
- /** The index information of MLOG_FILE_TRUNCATE redo record */
- struct index_t {
-
- /* Default copy constructor and destructor should be OK. */
-
- index_t();
-
- /**
- Set the truncate log values for a compressed table.
- @return DB_CORRUPTION or error code */
- dberr_t set(const dict_index_t* index);
-
- typedef std::vector<byte, ut_allocator<byte> > fields_t;
-
- /** Index id */
- index_id_t m_id;
-
- /** Index type */
- ulint m_type;
-
- /** Root Page Number */
- ulint m_root_page_no;
-
- /** New Root Page Number.
- Note: This field is not persisted to TRUNCATE log but used
- during truncate table fix-up for updating SYS_XXXX tables. */
- ulint m_new_root_page_no;
-
- /** Number of index fields */
- ulint m_n_fields;
-
- /** DATA_TRX_ID column position. */
- ulint m_trx_id_pos;
-
- /** Compressed table field meta data, encode by
- page_zip_fields_encode. Empty for non-compressed tables.
- Should be NUL terminated. */
- fields_t m_fields;
- };
-
- /**
- @return the directory path, can be NULL */
- const char* get_dir_path() const
- {
- return(m_dir_path);
- }
-
- /**
- Register index information
-
- @param index index information logged as part of truncate log. */
- void add(index_t& index)
- {
- m_indexes.push_back(index);
- }
-
- /**
- Add table to truncate post recovery.
-
- @param ptr table information need to complete truncate of table. */
- static void add(truncate_t* ptr)
- {
- s_tables.push_back(ptr);
- }
-
- /**
- Clear registered index vector */
- void clear()
- {
- m_indexes.clear();
- }
-
- /**
- @return old table id of the table to truncate */
- table_id_t old_table_id() const
- {
- return(m_old_table_id);
- }
-
- /**
- @return new table id of the table to truncate */
- table_id_t new_table_id() const
- {
- return(m_new_table_id);
- }
-
- /**
- Update root page number in SYS_XXXX tables.
-
- @param trx transaction object
- @param table_id table id for which information needs to
- be updated.
- @param reserve_dict_mutex if TRUE, acquire/release
- dict_sys->mutex around call to pars_sql.
- @param mark_index_corrupted if true, then mark index corrupted
- @return DB_SUCCESS or error code */
- dberr_t update_root_page_no(
- trx_t* trx,
- table_id_t table_id,
- ibool reserve_dict_mutex,
- bool mark_index_corrupted) const;
-
- /** Create an index for a table.
- @param[in] table_name table name, for which to create
- the index
- @param[in,out] space tablespace
- @param[in] index_type type of index to truncate
- @param[in] index_id id of index to truncate
- @param[in] btr_redo_create_info control info for ::btr_create()
- @param[in,out] mtr mini-transaction covering the
- create index
- @return root page no or FIL_NULL on failure */
- inline ulint create_index(
- const char* table_name,
- fil_space_t* space,
- ulint index_type,
- index_id_t index_id,
- const btr_create_t& btr_redo_create_info,
- mtr_t* mtr) const;
-
- /** Create the indexes for a table
- @param[in] table_name table name, for which to create the
- indexes
- @param[in,out] space tablespace
- @param[in] format_flags page format flags
- @return DB_SUCCESS or error code. */
- inline dberr_t create_indexes(
- const char* table_name,
- fil_space_t* space,
- ulint format_flags);
-
- /** Check if index has been modified since TRUNCATE log snapshot
- was recorded.
- @param[in] space tablespace
- @param[in] root_page_no index root page number
- @return true if modified else false */
- inline bool is_index_modified_since_logged(
- const fil_space_t* space,
- ulint root_page_no) const;
-
- /** Drop indexes for a table.
- @param[in,out] space tablespace
- @return DB_SUCCESS or error code. */
- void drop_indexes(fil_space_t* space) const;
-
- /**
- Parses log record during recovery
- @param start_ptr buffer containing log body to parse
- @param end_ptr buffer end
-
- @return DB_SUCCESS or error code */
- dberr_t parse(
- byte* start_ptr,
- const byte* end_ptr);
-
- /** Parse MLOG_TRUNCATE log record from REDO log file during recovery.
- @param[in,out] start_ptr buffer containing log body to parse
- @param[in] end_ptr buffer end
- @param[in] space_id tablespace identifier
- @return parsed upto or NULL. */
- static byte* parse_redo_entry(
- byte* start_ptr,
- const byte* end_ptr,
- ulint space_id);
-
- /**
- Write a log record for truncating a single-table tablespace.
-
- @param start_ptr buffer to write log record
- @param end_ptr buffer end
- @param space_id space id
- @param tablename the table name in the usual
- databasename/tablename format of InnoDB
- @param flags tablespace flags
- @param format_flags page format
- @param lsn lsn while logging */
- dberr_t write(
- byte* start_ptr,
- byte* end_ptr,
- ulint space_id,
- const char* tablename,
- ulint flags,
- ulint format_flags,
- lsn_t lsn) const;
-
- /**
- @return number of indexes parsed from the truncate log record */
- size_t indexes() const;
-
- /**
- Truncate a single-table tablespace. The tablespace must be cached
- in the memory cache.
-
- Note: This is defined in fil0fil.cc because it needs to access some
- types that are local to that file.
-
- @param space_id space id
- @param dir_path directory path
- @param tablename the table name in the usual
- databasename/tablename format of InnoDB
- @param flags tablespace flags
- @param default_size if true, truncate to default size if tablespace
- is being newly re-initialized.
- @return DB_SUCCESS or error */
- static dberr_t truncate(
- ulint space_id,
- const char* dir_path,
- const char* tablename,
- ulint flags,
- bool default_size);
-
- /**
- Fix the table truncate by applying information parsed from TRUNCATE log.
- Fix-up includes re-creating table (drop and re-create indexes)
- @return error code or DB_SUCCESS */
- static dberr_t fixup_tables_in_system_tablespace();
-
- /**
- Fix the table truncate by applying information parsed from TRUNCATE log.
- Fix-up includes re-creating tablespace.
- @return error code or DB_SUCCESS */
- static dberr_t fixup_tables_in_non_system_tablespace();
-
- /**
- Check whether a tablespace was truncated during recovery
- @param space_id tablespace id to check
- @return true if the tablespace was truncated */
- static bool is_tablespace_truncated(ulint space_id);
-
- /** Was tablespace truncated (on crash before checkpoint).
- If the MLOG_TRUNCATE redo-record is still available then tablespace
- was truncated and checkpoint is yet to happen.
- @param[in] space_id tablespace id to check.
- @return true if tablespace was truncated. */
- static bool was_tablespace_truncated(ulint space_id);
-
- /** Get the lsn associated with space.
- @param[in] space_id tablespace id to check.
- @return associated lsn. */
- static lsn_t get_truncated_tablespace_init_lsn(ulint space_id);
-
-private:
- typedef std::vector<index_t, ut_allocator<index_t> > indexes_t;
-
- /** Space ID of tablespace */
- ulint m_space_id;
-
- /** ID of table that is being truncated. */
- table_id_t m_old_table_id;
-
- /** New ID that will be assigned to table on truncation. */
- table_id_t m_new_table_id;
-
- /** Data dir path of tablespace */
- char* m_dir_path;
-
- /** Table name */
- char* m_tablename;
-
- /** Tablespace Flags */
- ulint m_tablespace_flags;
-
- /** Format flags (log flags; stored in page-no field of header) */
- ulint m_format_flags;
-
- /** Index meta-data */
- indexes_t m_indexes;
-
- /** LSN of TRUNCATE log record. */
- lsn_t m_log_lsn;
-
- /** Log file name. */
- char* m_log_file_name;
-
- /** Encryption information of the table */
- fil_encryption_t m_encryption;
- uint32_t m_key_id;
-
- /** Vector of tables to truncate. */
- typedef std::vector<truncate_t*, ut_allocator<truncate_t*> >
- tables_t;
-
- /** Information about tables to truncate post recovery */
- static tables_t s_tables;
-
- /** Information about truncated table
- This is case when truncate is complete but checkpoint hasn't. */
- typedef std::map<ulint, lsn_t> truncated_tables_t;
- static truncated_tables_t s_truncated_tables;
-
-public:
- /** If true then fix-up of table is active and so while creating
- index instead of grabbing information from dict_index_t, grab it
- from parsed truncate log record. */
- static bool s_fix_up_active;
-};
-
-/**
-Parse truncate log file. */
-class TruncateLogParser {
-
-public:
-
- /**
- Scan and Parse truncate log files.
-
- @param dir_path look for log directory in following path
- @return DB_SUCCESS or error code. */
- static dberr_t scan_and_parse(
- const char* dir_path);
-
-private:
- typedef std::vector<char*, ut_allocator<char*> >
- trunc_log_files_t;
-
-private:
- /**
- Scan to find out truncate log file from the given directory path.
-
- @param dir_path look for log directory in following path.
- @param log_files cache to hold truncate log file name found.
- @return DB_SUCCESS or error code. */
- static dberr_t scan(
- const char* dir_path,
- trunc_log_files_t& log_files);
-
- /**
- Parse the log file and populate table to truncate information.
- (Add this table to truncate information to central vector that is then
- used by truncate fix-up routine to fix-up truncate action of the table.)
-
- @param log_file_name log file to parse
- @return DB_SUCCESS or error code. */
- static dberr_t parse(
- const char* log_file_name);
-};
-
-#endif /* row0trunc_h */
diff --git a/storage/innobase/include/row0undo.h b/storage/innobase/include/row0undo.h
index 5ac2c7c5ee0..6aa7ebaa339 100644
--- a/storage/innobase/include/row0undo.h
+++ b/storage/innobase/include/row0undo.h
@@ -82,17 +82,20 @@ that index record. */
enum undo_exec {
UNDO_NODE_FETCH_NEXT = 1, /*!< we should fetch the next
undo log record */
- UNDO_NODE_INSERT, /*!< undo a fresh insert of a
- row to a table */
- UNDO_NODE_MODIFY /*!< undo a modify operation
- (DELETE or UPDATE) on a row
- of a table */
+ /** rollback an insert into persistent table */
+ UNDO_INSERT_PERSISTENT,
+ /** rollback an update (or delete) in a persistent table */
+ UNDO_UPDATE_PERSISTENT,
+ /** rollback an insert into temporary table */
+ UNDO_INSERT_TEMPORARY,
+ /** rollback an update (or delete) in a temporary table */
+ UNDO_UPDATE_TEMPORARY,
};
/** Undo node structure */
struct undo_node_t{
que_common_t common; /*!< node type: QUE_NODE_UNDO */
- enum undo_exec state; /*!< node execution state */
+ undo_exec state; /*!< rollback execution state */
trx_t* trx; /*!< trx for which undo is done */
roll_ptr_t roll_ptr;/*!< roll pointer to undo log record */
trx_undo_rec_t* undo_rec;/*!< undo log record */
diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h
index 12ea3e0f69a..d411e0efdc2 100644
--- a/storage/innobase/include/row0upd.h
+++ b/storage/innobase/include/row0upd.h
@@ -100,19 +100,6 @@ upd_get_field_by_field_no(
bool is_virtual) /*!< in: if it is a virtual column */
MY_ATTRIBUTE((warn_unused_result));
/*********************************************************************//**
-Writes into the redo log the values of trx id and roll ptr and enough info
-to determine their positions within a clustered index record.
-@return new pointer to mlog */
-byte*
-row_upd_write_sys_vals_to_log(
-/*==========================*/
- dict_index_t* index, /*!< in: clustered index */
- trx_id_t trx_id, /*!< in: transaction id */
- roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */
- byte* log_ptr,/*!< pointer to a buffer of size > 20 opened
- in mlog */
- mtr_t* mtr); /*!< in: mtr */
-/*********************************************************************//**
Updates the trx id and roll ptr field in a clustered index record when
a row is updated or marked deleted. */
UNIV_INLINE
@@ -127,18 +114,6 @@ row_upd_rec_sys_fields(
const trx_t* trx, /*!< in: transaction */
roll_ptr_t roll_ptr);/*!< in: DB_ROLL_PTR to the undo log */
/*********************************************************************//**
-Sets the trx id or roll ptr field of a clustered index entry. */
-void
-row_upd_index_entry_sys_field(
-/*==========================*/
- dtuple_t* entry, /*!< in/out: index entry, where the memory
- buffers for sys fields are already allocated:
- the function just copies the new values to
- them */
- dict_index_t* index, /*!< in: clustered index */
- ulint type, /*!< in: DATA_TRX_ID or DATA_ROLL_PTR */
- ib_uint64_t val); /*!< in: value to write */
-/*********************************************************************//**
Creates an update node for a query graph.
@return own: update node */
upd_node_t*
@@ -491,6 +466,14 @@ struct upd_t{
return false;
}
+ /** @return whether this is for a hidden metadata record
+ for instant ALTER TABLE */
+ bool is_metadata() const { return dtuple_t::is_metadata(info_bits); }
+ /** @return whether this is for a hidden metadata record
+ for instant ALTER TABLE (not only ADD COLUMN) */
+ bool is_alter_metadata() const
+ { return dtuple_t::is_alter_metadata(info_bits); }
+
#ifdef UNIV_DEBUG
bool validate() const
{
@@ -504,7 +487,6 @@ struct upd_t{
return(true);
}
#endif // UNIV_DEBUG
-
};
/** Kinds of update operation */
diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic
index 5e43a272388..403c39250cb 100644
--- a/storage/innobase/include/row0upd.ic
+++ b/storage/innobase/include/row0upd.ic
@@ -167,13 +167,13 @@ row_upd_rec_sys_fields(
const trx_t* trx, /*!< in: transaction */
roll_ptr_t roll_ptr)/*!< in: DB_ROLL_PTR to the undo log */
{
- ut_ad(dict_index_is_clust(index));
+ ut_ad(index->is_primary());
ut_ad(rec_offs_validate(rec, index, offsets));
- if (page_zip) {
- ulint pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
+ if (UNIV_LIKELY_NULL(page_zip)) {
page_zip_write_trx_id_and_roll_ptr(page_zip, rec, offsets,
- pos, trx->id, roll_ptr);
+ index->db_trx_id(),
+ trx->id, roll_ptr);
} else {
ulint offset = index->trx_id_offset;
diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h
index 069ab5cf93a..474634ef5e6 100644
--- a/storage/innobase/include/srv0mon.h
+++ b/storage/innobase/include/srv0mon.h
@@ -177,7 +177,6 @@ enum monitor_id_t {
MONITOR_OVLD_INDEX_PAGES_WRITTEN,
MONITOR_OVLD_NON_INDEX_PAGES_WRITTEN,
MONITOR_OVLD_PAGES_READ,
- MONITOR_OVLD_PAGES0_READ,
MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS,
MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS_AVOIDED,
MONITOR_OVLD_BYTE_READ,
@@ -300,7 +299,6 @@ enum monitor_id_t {
MONITOR_TRX_COMMIT_UNDO,
MONITOR_TRX_ROLLBACK,
MONITOR_TRX_ROLLBACK_SAVEPOINT,
- MONITOR_TRX_ROLLBACK_ACTIVE,
MONITOR_TRX_ACTIVE,
MONITOR_RSEG_HISTORY_LEN,
MONITOR_NUM_UNDO_SLOT_USED,
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index c6873ca8938..a905b652c29 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -47,7 +47,6 @@ Created 10/10/1995 Heikki Tuuri
#include "que0types.h"
#include "trx0types.h"
#include "srv0conc.h"
-#include "buf0checksum.h"
#include "fil0fil.h"
#include "mysql/psi/mysql_stage.h"
@@ -144,7 +143,8 @@ struct srv_stats_t
ulint_ctr_1_t n_lock_wait_count;
/** Number of threads currently waiting on database locks */
- simple_atomic_counter<> n_lock_wait_current_count;
+ MY_ALIGNED(CACHE_LINE_SIZE) Atomic_counter<ulint>
+ n_lock_wait_current_count;
/** Number of rows read. */
ulint_ctr_64_t n_rows_read;
@@ -176,9 +176,6 @@ struct srv_stats_t
/** Number of times prefix optimization avoided triggering cluster lookup */
ulint_ctr_64_t n_sec_rec_cluster_reads_avoided;
- /** Number of times page 0 is read from tablespace */
- ulint_ctr_64_t page0_read;
-
/** Number of encryption_get_latest_key_version calls */
ulint_ctr_64_t n_key_requests;
@@ -451,7 +448,7 @@ extern uint srv_fast_shutdown; /*!< If this is 1, do not do a
/** Signal to shut down InnoDB (NULL if shutdown was signaled, or if
running in innodb_read_only mode, srv_read_only_mode) */
-extern st_my_thread_var *srv_running;
+extern std::atomic<st_my_thread_var *> srv_running;
extern ibool srv_innodb_status;
@@ -538,7 +535,6 @@ extern uint srv_sys_space_size_debug;
extern bool srv_log_files_created;
#endif /* UNIV_DEBUG */
-#define SRV_SEMAPHORE_WAIT_EXTENSION 7200
extern ulint srv_dml_needed_delay;
#define SRV_MAX_N_IO_THREADS 130
@@ -900,23 +896,6 @@ srv_purge_wakeup();
/** Shut down the purge threads. */
void srv_purge_shutdown();
-/** Check if tablespace is being truncated.
-(Ignore system-tablespace as we don't re-create the tablespace
-and so some of the action that are suppressed by this function
-for independent tablespace are not applicable to system-tablespace).
-@param space_id space_id to check for truncate action
-@return true if being truncated, false if not being
- truncated or tablespace is system-tablespace. */
-bool
-srv_is_tablespace_truncated(ulint space_id);
-
-/** Check if tablespace was truncated.
-@param[in] space space object to check for truncate action
-@return true if tablespace was truncated and we still have an active
-MLOG_TRUNCATE REDO log record. */
-bool
-srv_was_tablespace_truncated(const fil_space_t* space);
-
#ifdef UNIV_DEBUG
/** Disables master thread. It's used by:
SET GLOBAL innodb_master_thread_disabled_debug = 1 (0).
@@ -971,7 +950,6 @@ struct export_var_t{
ulint innodb_page_size; /*!< srv_page_size */
ulint innodb_pages_created; /*!< buf_pool->stat.n_pages_created */
ulint innodb_pages_read; /*!< buf_pool->stat.n_pages_read*/
- ulint innodb_page0_read; /*!< srv_stats.page0_read */
ulint innodb_pages_written; /*!< buf_pool->stat.n_pages_written */
ulint innodb_row_lock_waits; /*!< srv_n_lock_wait_count */
ulint innodb_row_lock_current_waits; /*!< srv_n_lock_wait_current_count */
diff --git a/storage/innobase/include/sync0arr.ic b/storage/innobase/include/sync0arr.ic
index cd1d8e27625..d8f24cb4279 100644
--- a/storage/innobase/include/sync0arr.ic
+++ b/storage/innobase/include/sync0arr.ic
@@ -44,8 +44,7 @@ sync_array_get()
return(sync_wait_array[0]);
}
- return(sync_wait_array[default_indexer_t<>::get_rnd_index()
- % sync_array_size]);
+ return(sync_wait_array[get_rnd_value() % sync_array_size]);
}
/******************************************************************//**
diff --git a/storage/innobase/include/sync0policy.h b/storage/innobase/include/sync0policy.h
index 93a56e24040..090f83f46e2 100644
--- a/storage/innobase/include/sync0policy.h
+++ b/storage/innobase/include/sync0policy.h
@@ -30,247 +30,176 @@ Created 2012-08-21 Sunny Bains.
#include "ut0rnd.h"
#include "os0thread.h"
#include "srv0mon.h"
+#include "sync0debug.h"
#ifdef UNIV_DEBUG
-# define MUTEX_MAGIC_N 979585UL
-
-template <typename Mutex>
-class MutexDebug {
-public:
-
- /** For passing context to SyncDebug */
- struct Context : public latch_t {
-
- /** Constructor */
- Context()
- :
- m_mutex(),
- m_filename(),
- m_line(),
- m_thread_id(ULINT_UNDEFINED)
- {
- /* No op */
- }
-
- /** Create the context for SyncDebug
- @param[in] id ID of the latch to track */
- Context(latch_id_t id)
- :
- latch_t(id)
- {
- ut_ad(id != LATCH_ID_NONE);
- }
-
- /** Set to locked state
- @param[in] mutex The mutex to acquire
- @param[in] filename File name from where to acquire
- @param[in] line Line number in filename */
- void locked(
- const Mutex* mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW
- {
- m_mutex = mutex;
-
- my_atomic_storelint(&m_thread_id,
- ulint(os_thread_get_curr_id()));
-
- m_filename = filename;
-
- m_line = line;
- }
-
- /** Reset to unlock state */
- void release()
- UNIV_NOTHROW
- {
- m_mutex = NULL;
-
- my_atomic_storelint(&m_thread_id, ULINT_UNDEFINED);
-
- m_filename = NULL;
-
- m_line = 0;
- }
-
- /** Print information about the latch
- @return the string representation */
- virtual std::string to_string() const
- UNIV_NOTHROW
- {
- std::ostringstream msg;
-
- msg << m_mutex->policy().to_string();
-
- if (m_thread_id != ULINT_UNDEFINED) {
-
- msg << " addr: " << m_mutex
- << " acquired: " << locked_from().c_str();
-
- } else {
- msg << "Not locked";
- }
-
- return(msg.str());
- }
-
- /** @return the name of the file and line number in the file
- from where the mutex was acquired "filename:line" */
- virtual std::string locked_from() const
- {
- std::ostringstream msg;
-
- msg << sync_basename(m_filename) << ":" << m_line;
-
- return(std::string(msg.str()));
- }
-
- /** Mutex to check for lock order violation */
- const Mutex* m_mutex;
-
- /** Filename from where enter was called */
- const char* m_filename;
-
- /** Line mumber in filename */
- unsigned m_line;
-
- /** Thread ID of the thread that own(ed) the mutex */
- ulint m_thread_id;
- };
-
- /** Constructor. */
- MutexDebug()
- :
- m_magic_n(),
- m_context()
- UNIV_NOTHROW
- {
- /* No op */
- }
-
- /* Destructor */
- virtual ~MutexDebug() { }
-
- /** Mutex is being destroyed. */
- void destroy() UNIV_NOTHROW
- {
- ut_ad((ulint)my_atomic_loadlint(&m_context.m_thread_id) == ULINT_UNDEFINED);
-
- m_magic_n = 0;
-
- m_context.m_thread_id = 0;
- }
-
- /** Called when the mutex is "created". Note: Not from the constructor
- but when the mutex is initialised.
- @param[in] id Mutex ID */
- void init(latch_id_t id) UNIV_NOTHROW;
-
- /** Called when an attempt is made to lock the mutex
- @param[in] mutex Mutex instance to be locked
- @param[in] filename Filename from where it was called
- @param[in] line Line number from where it was called */
- void enter(
- const Mutex* mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW;
-
- /** Called when the mutex is locked
- @param[in] mutex Mutex instance that was locked
- @param[in] filename Filename from where it was called
- @param[in] line Line number from where it was called */
- void locked(
- const Mutex* mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW;
-
- /** Called when the mutex is released
- @param[in] mutx Mutex that was released */
- void release(const Mutex* mutex)
- UNIV_NOTHROW;
-
- /** @return true if thread owns the mutex */
- bool is_owned() const UNIV_NOTHROW
- {
- return(os_thread_eq(
- (os_thread_id_t)my_atomic_loadlint(&m_context.m_thread_id),
- os_thread_get_curr_id()));
- }
-
- /** @return the name of the file from the mutex was acquired */
- const char* get_enter_filename() const
- UNIV_NOTHROW
- {
- return(m_context.m_filename);
- }
-
- /** @return the name of the file from the mutex was acquired */
- unsigned get_enter_line() const
- UNIV_NOTHROW
- {
- return(m_context.m_line);
- }
-
- /** @return id of the thread that was trying to acquire the mutex */
- os_thread_id_t get_thread_id() const
- UNIV_NOTHROW
- {
- return((os_thread_id_t)my_atomic_loadlint(&m_context.m_thread_id));
- }
+template <typename Mutex> class MutexDebug: public latch_t
+{
+ /** Mutex to check for lock order violation */
+ const Mutex *m_mutex;
+ /** Filename from where enter was called */
+ const char *m_filename;
+ /** Line mumber in filename */
+ unsigned m_line;
+ /** Thread ID of the thread that owns the mutex */
+ os_thread_id_t m_thread_id;
+ /** Mutex protecting the above members */
+ mutable OSMutex m_debug_mutex;
+
+
+ void set(const Mutex *mutex, const char *filename, unsigned line,
+ os_thread_id_t thread_id)
+ {
+ m_debug_mutex.enter();
+ m_mutex= mutex;
+ m_filename= filename;
+ m_line= line;
+ m_thread_id= thread_id;
+ m_debug_mutex.exit();
+ }
+
+
+ const MutexDebug get() const
+ {
+ MutexDebug ret;
+ m_debug_mutex.enter();
+ ret.m_mutex= m_mutex;
+ ret.m_filename= m_filename;
+ ret.m_line= m_line;
+ ret.m_thread_id= m_thread_id;
+ m_debug_mutex.exit();
+ return ret;
+ }
+
+
+ /**
+ Called either when mutex is locked or destroyed. Thus members are protected
+ from concurrent modification.
+ */
+ void assert_clean_context()
+ {
+ ut_ad(!m_mutex);
+ ut_ad(!m_filename);
+ ut_ad(!m_line);
+ ut_ad(m_thread_id == os_thread_id_t(ULINT_UNDEFINED));
+ }
- /** Magic number to check for memory corruption. */
- ulint m_magic_n;
- /** Latch state of the mutex owner */
- Context m_context;
+public:
+ /**
+ Called when the mutex is "created". Note: Not from the constructor
+ but when the mutex is initialised.
+ @param[in] id Mutex ID
+ */
+ void init(latch_id_t id)
+ {
+ ut_ad(id != LATCH_ID_NONE);
+ m_id= id;
+ m_debug_mutex.init();
+ set(0, 0, 0, os_thread_id_t(ULINT_UNDEFINED));
+ }
+
+
+ /** Mutex is being destroyed. */
+ void destroy()
+ {
+ assert_clean_context();
+ m_debug_mutex.destroy();
+ }
+
+
+ /**
+ Called when an attempt is made to lock the mutex
+ @param[in] mutex Mutex instance to be locked
+ @param[in] filename Filename from where it was called
+ @param[in] line Line number from where it was called
+ */
+ void enter(const Mutex &mutex, const char *filename, unsigned line)
+ {
+ MutexDebug context;
+ ut_ad(!is_owned());
+ context.init(m_id);
+ context.set(&mutex, filename, line, os_thread_get_curr_id());
+ /* Check for latch order violation. */
+ sync_check_lock_validate(&context);
+ context.set(0, 0, 0, os_thread_id_t(ULINT_UNDEFINED));
+ context.destroy();
+ }
+
+
+ /**
+ Called when the mutex is locked
+ @param[in] mutex Mutex instance that was locked
+ @param[in] filename Filename from where it was called
+ @param[in] line Line number from where it was called
+ */
+ void locked(const Mutex &mutex, const char *filename, unsigned line)
+ {
+ assert_clean_context();
+ set(&mutex, filename, line, os_thread_get_curr_id());
+ sync_check_lock_granted(this);
+ }
+
+
+ /**
+ Called when the mutex is released
+ @param[in] mutex Mutex that was released
+ */
+ void release(const Mutex &mutex)
+ {
+ ut_ad(is_owned());
+ set(0, 0, 0, os_thread_id_t(ULINT_UNDEFINED));
+ sync_check_unlock(this);
+ }
+
+
+ /** @return true if thread owns the mutex */
+ bool is_owned() const
+ {
+ return os_thread_eq(get_thread_id(), os_thread_get_curr_id());
+ }
+
+
+ /** @return the name of the file from the mutex was acquired */
+ const char* get_enter_filename() const { return get().m_filename; }
+
+
+ /** @return the name of the file from the mutex was acquired */
+ unsigned get_enter_line() const { return get().m_line; }
+
+
+ /** @return id of the thread that was trying to acquire the mutex */
+ os_thread_id_t get_thread_id() const { return get().m_thread_id; }
+
+
+ /**
+ Print information about the latch
+ @return the string representation
+ */
+ virtual std::string to_string() const
+ {
+ std::ostringstream msg;
+ const MutexDebug ctx= get();
+
+ msg << m_mutex->policy().to_string();
+ if (ctx.m_mutex)
+ msg << " addr: " << ctx.m_mutex << " acquired: "
+ << sync_basename(ctx.get_enter_filename()) << ":"
+ << ctx.get_enter_line();
+ else
+ msg << "Not locked";
+
+ return(msg.str());
+ }
};
#endif /* UNIV_DEBUG */
-/* Do nothing */
-template <typename Mutex>
-struct NoPolicy {
- /** Default constructor. */
- NoPolicy() { }
-
- void init(const Mutex&, latch_id_t, const char*, uint32_t)
- UNIV_NOTHROW { }
- void destroy() UNIV_NOTHROW { }
- void enter(const Mutex&, const char*, unsigned) UNIV_NOTHROW { }
- void add(uint32_t, uint32_t) UNIV_NOTHROW { }
- void locked(const Mutex&, const char*, ulint) UNIV_NOTHROW { }
- void release(const Mutex&) UNIV_NOTHROW { }
- std::string to_string() const { return(""); };
- latch_id_t get_id() const;
-};
-
/** Collect the metrics per mutex instance, no aggregation. */
template <typename Mutex>
struct GenericPolicy
-#ifdef UNIV_DEBUG
-: public MutexDebug<Mutex>
-#endif /* UNIV_DEBUG */
{
public:
- typedef Mutex MutexType;
-
- /** Constructor. */
- GenericPolicy()
- UNIV_NOTHROW
- :
-#ifdef UNIV_DEBUG
- MutexDebug<MutexType>(),
-#endif /* UNIV_DEBUG */
- m_count(),
- m_id()
- { }
-
- /** Destructor */
- ~GenericPolicy() { }
-
/** Called when the mutex is "created". Note: Not from the constructor
but when the mutex is initialised.
@param[in] id Mutex ID
@@ -292,8 +221,6 @@ public:
meta.get_counter()->single_register(&m_count);
sync_file_created_register(this, filename, uint16_t(line));
-
- ut_d(MutexDebug<MutexType>::init(m_id));
}
/** Called when the mutex is destroyed. */
@@ -305,8 +232,6 @@ public:
meta.get_counter()->single_deregister(&m_count);
sync_file_created_deregister(this);
-
- ut_d(MutexDebug<MutexType>::destroy());
}
/** Called after a successful mutex acquire.
@@ -332,40 +257,6 @@ public:
++m_count.m_calls;
}
- /** Called when an attempt is made to lock the mutex
- @param[in] mutex Mutex instance to be locked
- @param[in] filename Filename from where it was called
- @param[in] line Line number from where it was called */
- void enter(
- const MutexType& mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW
- {
- ut_d(MutexDebug<MutexType>::enter(&mutex, filename, line));
- }
-
- /** Called when the mutex is locked
- @param[in] mutex Mutex instance that is locked
- @param[in] filename Filename from where it was called
- @param[in] line Line number from where it was called */
- void locked(
- const MutexType& mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW
- {
- ut_d(MutexDebug<MutexType>::locked(&mutex, filename, line));
- }
-
- /** Called when the mutex is released
- @param[in] mutex Mutex instance that is released */
- void release(const MutexType& mutex)
- UNIV_NOTHROW
- {
- ut_d(MutexDebug<MutexType>::release(&mutex));
- }
-
/** Print the information about the latch
@return the string representation */
std::string print() const
@@ -378,14 +269,18 @@ public:
return(m_id);
}
- /** @return the string representation */
- std::string to_string() const;
-private:
- typedef latch_meta_t::CounterType Counter;
+ /** @return the string representation */
+ std::string to_string() const
+ { return sync_mutex_to_string(get_id(), sync_file_created_get(this)); }
- /** The user visible counters, registered with the meta-data. */
- Counter::Count m_count;
+#ifdef UNIV_DEBUG
+ MutexDebug<Mutex> context;
+#endif
+
+private:
+ /** The user visible counters, registered with the meta-data. */
+ latch_meta_t::CounterType::Count m_count;
/** Latch meta data ID */
latch_id_t m_id;
@@ -395,29 +290,8 @@ private:
too many of them to count individually. */
template <typename Mutex>
class BlockMutexPolicy
-#ifdef UNIV_DEBUG
-: public MutexDebug<Mutex>
-#endif /* UNIV_DEBUG */
{
public:
- typedef Mutex MutexType;
- typedef typename latch_meta_t::CounterType::Count Count;
-
- /** Default constructor. */
- BlockMutexPolicy()
- :
-#ifdef UNIV_DEBUG
- MutexDebug<MutexType>(),
-#endif /* UNIV_DEBUG */
- m_count(),
- m_id()
- {
- /* Do nothing */
- }
-
- /** Destructor */
- ~BlockMutexPolicy() { }
-
/** Called when the mutex is "created". Note: Not from the constructor
but when the mutex is initialised.
@param[in] id Mutex ID */
@@ -436,8 +310,6 @@ public:
ut_ad(meta.get_id() == id);
m_count = meta.get_counter()->sum_register();
-
- ut_d(MutexDebug<MutexType>::init(m_id));
}
/** Called when the mutex is destroyed. */
@@ -445,7 +317,6 @@ public:
UNIV_NOTHROW
{
m_count = NULL;
- ut_d(MutexDebug<MutexType>::destroy());
}
/** Called after a successful mutex acquire.
@@ -469,40 +340,6 @@ public:
++m_count->m_calls;
}
- /** Called when the mutex is locked
- @param[in] mutex Mutex instance that is locked
- @param[in] filename Filename from where it was called
- @param[in] line Line number from where it was called */
- void locked(
- const MutexType& mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW
- {
- ut_d(MutexDebug<MutexType>::locked(&mutex, filename, line));
- }
-
- /** Called when the mutex is released
- @param[in] mutex Mutex instance that is released */
- void release(const MutexType& mutex)
- UNIV_NOTHROW
- {
- ut_d(MutexDebug<MutexType>::release(&mutex));
- }
-
- /** Called when an attempt is made to lock the mutex
- @param[in] mutex Mutex instance to be locked
- @param[in] filename Filename from where it was called
- @param[in] line Line number from where it was called */
- void enter(
- const MutexType& mutex,
- const char* filename,
- unsigned line)
- UNIV_NOTHROW
- {
- ut_d(MutexDebug<MutexType>::enter(&mutex, filename, line));
- }
-
/** Print the information about the latch
@return the string representation */
std::string print() const
@@ -514,19 +351,26 @@ public:
return(m_id);
}
- /** @return the string representation */
- std::string to_string() const;
-private:
- typedef latch_meta_t::CounterType Counter;
+ /**
+ I don't think it makes sense to keep track of the file name
+ and line number for each block mutex. Too much of overhead. Use the
+ latch id to figure out the location from the source.
+
+ @return the string representation
+ */
+ std::string to_string() const
+ { return(sync_mutex_to_string(get_id(), "buf0buf.cc:0")); }
+
+#ifdef UNIV_DEBUG
+ MutexDebug<Mutex> context;
+#endif
- /** The user visible counters, registered with the meta-data. */
- Counter::Count* m_count;
+private:
+ /** The user visible counters, registered with the meta-data. */
+ latch_meta_t::CounterType::Count *m_count;
/** Latch meta data ID */
latch_id_t m_id;
};
-
-#include "sync0policy.ic"
-
#endif /* sync0policy_h */
diff --git a/storage/innobase/include/sync0policy.ic b/storage/innobase/include/sync0policy.ic
deleted file mode 100644
index a28e3c382b4..00000000000
--- a/storage/innobase/include/sync0policy.ic
+++ /dev/null
@@ -1,101 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/******************************************************************//**
-@file include/sync0policy.ic
-Policy for mutexes.
-
-Created 2012-08-21 Sunny Bains.
-***********************************************************************/
-
-#include "sync0debug.h"
-
-template <typename Mutex>
-std::string GenericPolicy<Mutex>::to_string() const
-{
- return(sync_mutex_to_string(get_id(), sync_file_created_get(this)));
-}
-
-template <typename Mutex>
-std::string BlockMutexPolicy<Mutex>::to_string() const
-{
- /* I don't think it makes sense to keep track of the file name
- and line number for each block mutex. Too much of overhead. Use the
- latch id to figure out the location from the source. */
- return(sync_mutex_to_string(get_id(), "buf0buf.cc:0"));
-}
-
-#ifdef UNIV_DEBUG
-
-template <typename Mutex>
-void MutexDebug<Mutex>::init(latch_id_t id)
- UNIV_NOTHROW
-{
- m_context.m_id = id;
-
- m_context.release();
-
- m_magic_n = MUTEX_MAGIC_N;
-}
-
-template <typename Mutex>
-void MutexDebug<Mutex>::enter(
- const Mutex* mutex,
- const char* name,
- unsigned line)
- UNIV_NOTHROW
-{
- ut_ad(!is_owned());
-
- Context context(m_context.get_id());
-
- context.locked(mutex, name, line);
-
- /* Check for latch order violation. */
-
- sync_check_lock_validate(&context);
-}
-
-template <typename Mutex>
-void MutexDebug<Mutex>::locked(
- const Mutex* mutex,
- const char* name,
- unsigned line)
- UNIV_NOTHROW
-{
- ut_ad(!is_owned());
- ut_ad(m_context.m_thread_id == ULINT_UNDEFINED);
-
- m_context.locked(mutex, name, line);
-
- sync_check_lock_granted(&m_context);
-}
-
-template <typename Mutex>
-void MutexDebug<Mutex>::release(const Mutex*)
- UNIV_NOTHROW
-{
- ut_ad(is_owned());
-
- m_context.release();
-
- sync_check_unlock(&m_context);
-}
-
-#endif /* UNIV_DEBUG */
diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h
index 429560f637e..4f23e81185c 100644
--- a/storage/innobase/include/sync0rw.h
+++ b/storage/innobase/include/sync0rw.h
@@ -569,10 +569,10 @@ struct rw_lock_t
#endif /* UNIV_DEBUG */
{
/** Holds the state of the lock. */
- int32_t lock_word;
+ std::atomic<int32_t> lock_word;
/** 1: there are waiters */
- int32_t waiters;
+ std::atomic<int32_t> waiters;
/** number of granted SX locks. */
volatile ulint sx_recursive;
@@ -642,7 +642,6 @@ struct rw_lock_t
}
virtual std::string to_string() const;
- virtual std::string locked_from() const;
/** For checking memory corruption. */
ulint magic_n;
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index f0c33ecbeda..3add168edc8 100644
--- a/storage/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
@@ -77,8 +77,7 @@ rw_lock_get_writer(
/*===============*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) {
@@ -110,8 +109,7 @@ rw_lock_get_reader_count(
/*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) {
@@ -147,8 +145,7 @@ rw_lock_get_x_lock_count(
/*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */
{
- int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_copy <= X_LOCK_DECR);
if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
@@ -181,8 +178,7 @@ rw_lock_get_sx_lock_count(
const rw_lock_t* lock) /*!< in: rw-lock */
{
#ifdef UNIV_DEBUG
- int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_copy <= X_LOCK_DECR);
@@ -213,14 +209,15 @@ rw_lock_lock_word_decr(
int32_t amount, /*!< in: amount to decrement */
int32_t threshold) /*!< in: threshold of judgement */
{
- int32_t lock_copy = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
+
while (lock_copy > threshold) {
- if (my_atomic_cas32_strong_explicit(&lock->lock_word,
- &lock_copy,
- lock_copy - amount,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED)) {
+ if (lock->lock_word.compare_exchange_strong(
+ lock_copy,
+ lock_copy - amount,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
+
return(true);
}
}
@@ -304,9 +301,9 @@ rw_lock_x_lock_func_nowait(
{
int32_t oldval = X_LOCK_DECR;
- if (my_atomic_cas32_strong_explicit(&lock->lock_word, &oldval, 0,
- MY_MEMORY_ORDER_ACQUIRE,
- MY_MEMORY_ORDER_RELAXED)) {
+ if (lock->lock_word.compare_exchange_strong(oldval, 0,
+ std::memory_order_acquire,
+ std::memory_order_relaxed)) {
lock->writer_thread = os_thread_get_curr_id();
} else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
@@ -316,12 +313,12 @@ rw_lock_x_lock_func_nowait(
observe consistent values. */
if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) {
/* There are 1 x-locks */
- my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(X_LOCK_DECR,
+ std::memory_order_relaxed);
} else if (oldval <= -X_LOCK_DECR) {
/* There are 2 or more x-locks */
- my_atomic_add32_explicit(&lock->lock_word, -1,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(1,
+ std::memory_order_relaxed);
/* Watch for too many recursive locks */
ut_ad(oldval < 1);
} else {
@@ -356,8 +353,7 @@ rw_lock_s_unlock_func(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
#ifdef UNIV_DEBUG
- int32_t dbg_lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto dbg_lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(dbg_lock_word > -X_LOCK_DECR);
ut_ad(dbg_lock_word != 0);
ut_ad(dbg_lock_word < X_LOCK_DECR);
@@ -366,8 +362,8 @@ rw_lock_s_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
/* Increment lock_word to indicate 1 less reader */
- int32_t lock_word = my_atomic_add32_explicit(&lock->lock_word, 1,
- MY_MEMORY_ORDER_RELEASE) + 1;
+ auto lock_word = lock->lock_word.fetch_add(
+ 1, std::memory_order_release) + 1;
if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
/* wait_ex waiter exists. It may not be asleep, but we signal
@@ -393,8 +389,7 @@ rw_lock_x_unlock_func(
#endif /* UNIV_DEBUG */
rw_lock_t* lock) /*!< in/out: rw-lock */
{
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock_word == 0 || lock_word == -X_LOCK_HALF_DECR
|| lock_word <= -X_LOCK_DECR);
@@ -411,31 +406,29 @@ rw_lock_x_unlock_func(
ACQ_REL due to...
RELEASE: we release rw-lock
ACQUIRE: we want waiters to be loaded after lock_word is stored */
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR,
- MY_MEMORY_ORDER_ACQ_REL);
+ lock->lock_word.fetch_add(X_LOCK_DECR,
+ std::memory_order_acq_rel);
/* This no longer has an X-lock but it may still have
an SX-lock. So it is now free for S-locks by other threads.
We need to signal read/write waiters.
We do not need to signal wait_ex waiters, since they cannot
exist when there is a writer. */
- if (my_atomic_load32_explicit(&lock->waiters,
- MY_MEMORY_ORDER_RELAXED)) {
- my_atomic_store32_explicit(&lock->waiters, 0,
- MY_MEMORY_ORDER_RELAXED);
+ if (lock->waiters.load(std::memory_order_relaxed)) {
+ lock->waiters.store(0, std::memory_order_relaxed);
os_event_set(lock->event);
sync_array_object_signalled();
}
} else if (lock_word == -X_LOCK_DECR
|| lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
/* There are 2 x-locks */
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_add(X_LOCK_DECR,
+ std::memory_order_relaxed);
} else {
/* There are more than 2 x-locks. */
ut_ad(lock_word < -X_LOCK_DECR);
- my_atomic_add32_explicit(&lock->lock_word, 1,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_add(1,
+ std::memory_order_relaxed);
}
ut_ad(rw_lock_validate(lock));
@@ -461,8 +454,8 @@ rw_lock_sx_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
if (lock->sx_recursive == 0) {
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word =
+ lock->lock_word.load(std::memory_order_relaxed);
/* Last caller in a possible recursive chain. */
if (lock_word > 0) {
lock->writer_thread = 0;
@@ -472,17 +465,15 @@ rw_lock_sx_unlock_func(
ACQ_REL due to...
RELEASE: we release rw-lock
ACQUIRE: we want waiters to be loaded after lock_word is stored */
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR,
- MY_MEMORY_ORDER_ACQ_REL);
+ lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
+ std::memory_order_acq_rel);
/* Lock is now free. May have to signal read/write
waiters. We do not need to signal wait_ex waiters,
since they cannot exist when there is an sx-lock
holder. */
- if (my_atomic_load32_explicit(&lock->waiters,
- MY_MEMORY_ORDER_RELAXED)) {
- my_atomic_store32_explicit(&lock->waiters, 0,
- MY_MEMORY_ORDER_RELAXED);
+ if (lock->waiters.load(std::memory_order_relaxed)) {
+ lock->waiters.store(0, std::memory_order_relaxed);
os_event_set(lock->event);
sync_array_object_signalled();
}
@@ -490,8 +481,8 @@ rw_lock_sx_unlock_func(
/* still has x-lock */
ut_ad(lock_word == -X_LOCK_HALF_DECR ||
lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
- my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
+ std::memory_order_relaxed);
}
}
diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h
index 53332af46ab..5fa7691e771 100644
--- a/storage/innobase/include/sync0types.h
+++ b/storage/innobase/include/sync0types.h
@@ -999,9 +999,6 @@ struct latch_t {
@return the string representation */
virtual std::string to_string() const = 0;
- /** @return "filename:line" from where the latch was last locked */
- virtual std::string locked_from() const = 0;
-
/** @return the latch level */
latch_level_t get_level() const
UNIV_NOTHROW
@@ -1117,51 +1114,6 @@ enum rw_lock_flag_t {
#endif /* UNIV_INNOCHECKSUM */
-static inline ulint my_atomic_addlint(ulint *A, ulint B)
-{
-#ifdef _WIN64
- return ulint(my_atomic_add64((volatile int64*)A, B));
-#else
- return ulint(my_atomic_addlong(A, B));
-#endif
-}
-
-static inline ulint my_atomic_loadlint(const ulint *A)
-{
-#ifdef _WIN64
- return ulint(my_atomic_load64((volatile int64*)A));
-#else
- return ulint(my_atomic_loadlong(A));
-#endif
-}
-
-static inline lint my_atomic_addlint(volatile lint *A, lint B)
-{
-#ifdef _WIN64
- return my_atomic_add64((volatile int64*)A, B);
-#else
- return my_atomic_addlong(A, B);
-#endif
-}
-
-static inline lint my_atomic_loadlint(const lint *A)
-{
-#ifdef _WIN64
- return lint(my_atomic_load64((volatile int64*)A));
-#else
- return my_atomic_loadlong(A);
-#endif
-}
-
-static inline void my_atomic_storelint(ulint *A, ulint B)
-{
-#ifdef _WIN64
- my_atomic_store64((volatile int64*)A, B);
-#else
- my_atomic_storelong(A, B);
-#endif
-}
-
/** Simple non-atomic counter aligned to CACHE_LINE_SIZE
@tparam Type the integer type of the counter */
template <typename Type>
@@ -1184,28 +1136,4 @@ private:
/** The counter */
Type m_counter;
};
-
-/** Simple atomic counter aligned to CACHE_LINE_SIZE
-@tparam Type lint or ulint */
-template <typename Type = ulint>
-struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_atomic_counter
-{
- /** Increment the counter */
- Type inc() { return add(1); }
- /** Decrement the counter */
- Type dec() { return add(Type(~0)); }
-
- /** Add to the counter
- @param[in] i amount to be added
- @return the value of the counter before adding */
- Type add(Type i) { return my_atomic_addlint(&m_counter, i); }
-
- /** @return the value of the counter (non-atomic access)! */
- operator Type() const { return m_counter; }
-
-private:
- /** The counter */
- Type m_counter;
-};
-
#endif /* sync0types_h */
diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index 43d771c646b..2299a648a88 100644
--- a/storage/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
@@ -135,202 +135,6 @@ private:
TrxUndoRsegs::const_iterator m_iter;
};
-/* Namespace to hold all the related functions and variables need for truncate
-of undo tablespace. */
-namespace undo {
-
- typedef std::vector<ulint> undo_spaces_t;
- typedef std::vector<trx_rseg_t*> rseg_for_trunc_t;
-
- /** Mark completion of undo truncate action by writing magic number to
- the log file and then removing it from the disk.
- If we are going to remove it from disk then why write magic number ?
- This is to safeguard from unlink (file-system) anomalies that will keep
- the link to the file even after unlink action is successfull and
- ref-count = 0.
- @param[in] space_id id of the undo tablespace to truncate.*/
- void done(ulint space_id);
-
- /** Check if TRUNCATE_DDL_LOG file exist.
- @param[in] space_id id of the undo tablespace.
- @return true if exist else false. */
- bool is_log_present(ulint space_id);
-
- /** Track UNDO tablespace mark for truncate. */
- class Truncate {
- public:
- void create()
- {
- m_undo_for_trunc = ULINT_UNDEFINED;
- m_scan_start = 1;
- m_purge_rseg_truncate_frequency =
- ulint(srv_purge_rseg_truncate_frequency);
- }
-
- /** Clear the cached rollback segment. Normally done
- when purge is about to shutdown. */
- void clear()
- {
- reset();
- rseg_for_trunc_t temp;
- m_rseg_for_trunc.swap(temp);
- }
-
- /** Is tablespace selected for truncate.
- @return true if undo tablespace is marked for truncate */
- bool is_marked() const
- {
- return(!(m_undo_for_trunc == ULINT_UNDEFINED));
- }
-
- /** Mark the tablespace for truncate.
- @param[in] undo_id tablespace for truncate. */
- void mark(ulint undo_id)
- {
- m_undo_for_trunc = undo_id;
-
- m_scan_start = (undo_id + 1)
- % (srv_undo_tablespaces_active + 1);
- if (m_scan_start == 0) {
- /* Note: UNDO tablespace ids starts from 1. */
- m_scan_start = 1;
- }
-
- /* We found an UNDO-tablespace to truncate so set the
- local purge rseg truncate frequency to 1. This will help
- accelerate the purge action and in turn truncate. */
- m_purge_rseg_truncate_frequency = 1;
- }
-
- /** Get the tablespace marked for truncate.
- @return tablespace id marked for truncate. */
- ulint get_marked_space_id() const
- {
- return(m_undo_for_trunc);
- }
-
- /** Add rseg to truncate vector.
- @param[in,out] rseg rseg for truncate */
- void add_rseg_to_trunc(trx_rseg_t* rseg)
- {
- m_rseg_for_trunc.push_back(rseg);
- }
-
- /** Get number of rsegs registered for truncate.
- @return return number of rseg that belongs to tablespace mark
- for truncate. */
- ulint rsegs_size() const
- {
- return(m_rseg_for_trunc.size());
- }
-
- /** Get ith registered rseg.
- @param[in] id index of rseg to get.
- @return reference to registered rseg. */
- trx_rseg_t* get_ith_rseg(ulint id)
- {
- ut_ad(id < m_rseg_for_trunc.size());
- return(m_rseg_for_trunc.at(id));
- }
-
- /** Reset for next rseg truncate. */
- void reset()
- {
- m_undo_for_trunc = ULINT_UNDEFINED;
- m_rseg_for_trunc.clear();
-
- /* Sync with global value as we are done with
- truncate now. */
- m_purge_rseg_truncate_frequency = static_cast<ulint>(
- srv_purge_rseg_truncate_frequency);
- }
-
- /** Get the tablespace id to start scanning from.
- @return id of UNDO tablespace to start scanning from. */
- ulint get_scan_start() const
- {
- return(m_scan_start);
- }
-
- /** Check if the tablespace needs fix-up (based on presence of
- DDL truncate log)
- @param space_id space id of the undo tablespace to check
- @return true if fix up is needed else false */
- bool needs_fix_up(ulint space_id) const
- {
- return(is_log_present(space_id));
- }
-
- /** Add undo tablespace to truncate vector.
- @param[in] space_id space id of tablespace to
- truncate */
- static void add_space_to_trunc_list(ulint space_id)
- {
- s_spaces_to_truncate.push_back(space_id);
- }
-
- /** Clear the truncate vector. */
- static void clear_trunc_list()
- {
- s_spaces_to_truncate.clear();
- }
-
- /** Is tablespace marked for truncate.
- @param[in] space_id space id to check
- @return true if marked for truncate, else false. */
- static bool is_tablespace_truncated(ulint space_id)
- {
- return(std::find(s_spaces_to_truncate.begin(),
- s_spaces_to_truncate.end(), space_id)
- != s_spaces_to_truncate.end());
- }
-
- /** Was a tablespace truncated at startup
- @param[in] space_id space id to check
- @return whether space_id was truncated at startup */
- static bool was_tablespace_truncated(ulint space_id)
- {
- return(std::find(s_fix_up_spaces.begin(),
- s_fix_up_spaces.end(),
- space_id)
- != s_fix_up_spaces.end());
- }
-
- /** Get local rseg purge truncate frequency
- @return rseg purge truncate frequency. */
- ulint get_rseg_truncate_frequency() const
- {
- return(m_purge_rseg_truncate_frequency);
- }
-
- private:
- /** UNDO tablespace is mark for truncate. */
- ulint m_undo_for_trunc;
-
- /** rseg that resides in UNDO tablespace is marked for
- truncate. */
- rseg_for_trunc_t m_rseg_for_trunc;
-
- /** Start scanning for UNDO tablespace from this space_id.
- This is to avoid bias selection of one tablespace always. */
- ulint m_scan_start;
-
- /** Rollback segment(s) purge frequency. This is local
- value maintained along with global value. It is set to global
- value on start but when tablespace is marked for truncate it
- is updated to 1 and then minimum value among 2 is used by
- purge action. */
- ulint m_purge_rseg_truncate_frequency;
-
- /** List of UNDO tablespace(s) to truncate. */
- static undo_spaces_t s_spaces_to_truncate;
- public:
- /** Undo tablespaces that were truncated at startup */
- static undo_spaces_t s_fix_up_spaces;
- }; /* class Truncate */
-
-}; /* namespace undo */
-
/** The control structure used in the purge operation */
class purge_sys_t
{
@@ -343,22 +147,19 @@ public:
MY_ALIGNED(CACHE_LINE_SIZE)
rw_lock_t latch;
private:
- /** whether purge is enabled; protected by latch and my_atomic */
- int32_t m_enabled;
+ /** whether purge is enabled; protected by latch and std::atomic */
+ std::atomic<bool> m_enabled;
/** number of pending stop() calls without resume() */
- int32_t m_paused;
+ Atomic_counter<int32_t> m_paused;
public:
que_t* query; /*!< The query graph which will do the
parallelized purge operation */
MY_ALIGNED(CACHE_LINE_SIZE)
ReadView view; /*!< The purge will not remove undo logs
which are >= this view (purge view) */
- /** Total number of tasks submitted by srv_purge_coordinator_thread.
- Not accessed by other threads. */
- ulint n_submitted;
- /** Number of completed tasks. Accessed by srv_purge_coordinator
- and srv_worker_thread by my_atomic. */
- ulint n_completed;
+ /** Number of not completed tasks. Accessed by srv_purge_coordinator
+ and srv_worker_thread by std::atomic. */
+ std::atomic<ulint> n_tasks;
/** Iterator to the undo log records of committed transactions */
struct iterator
@@ -412,9 +213,14 @@ public:
by the pq_mutex */
PQMutex pq_mutex; /*!< Mutex protecting purge_queue */
- undo::Truncate undo_trunc; /*!< Track UNDO tablespace marked
- for truncate. */
-
+ /** Undo tablespace file truncation (only accessed by the
+ srv_purge_coordinator_thread) */
+ struct {
+ /** The undo tablespace that is currently being truncated */
+ fil_space_t* current;
+ /** The undo tablespace that was last truncated */
+ fil_space_t* last;
+ } truncate;
/**
Constructor.
@@ -423,7 +229,7 @@ public:
uninitialised. Real initialisation happens in create().
*/
- purge_sys_t() : event(NULL), m_enabled(false) {}
+ purge_sys_t() : event(NULL), m_enabled(false), n_tasks(0) {}
/** Create the instance */
@@ -433,39 +239,24 @@ public:
void close();
/** @return whether purge is enabled */
- bool enabled()
- {
- return my_atomic_load32_explicit(&m_enabled, MY_MEMORY_ORDER_RELAXED);
- }
- /** @return whether purge is enabled */
- bool enabled_latched()
- {
- ut_ad(rw_lock_own_flagged(&latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
- return bool(m_enabled);
- }
+ bool enabled() { return m_enabled.load(std::memory_order_relaxed); }
/** @return whether the purge coordinator is paused */
bool paused()
- { return my_atomic_load32_explicit(&m_paused, MY_MEMORY_ORDER_RELAXED); }
- /** @return whether the purge coordinator is paused */
- bool paused_latched()
- {
- ut_ad(rw_lock_own_flagged(&latch, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
- return m_paused != 0;
- }
+ { return m_paused != 0; }
/** Enable purge at startup. Not protected by latch; the main thread
will wait for purge_sys.enabled() in srv_start() */
void coordinator_startup()
{
ut_ad(!enabled());
- my_atomic_store32_explicit(&m_enabled, true, MY_MEMORY_ORDER_RELAXED);
+ m_enabled.store(true, std::memory_order_relaxed);
}
/** Disable purge at shutdown */
void coordinator_shutdown()
{
ut_ad(enabled());
- my_atomic_store32_explicit(&m_enabled, false, MY_MEMORY_ORDER_RELAXED);
+ m_enabled.store(false, std::memory_order_relaxed);
}
/** @return whether the purge coordinator thread is active */
diff --git a/storage/innobase/include/trx0roll.h b/storage/innobase/include/trx0roll.h
index ba974116f0c..e73b5c5d2b8 100644
--- a/storage/innobase/include/trx0roll.h
+++ b/storage/innobase/include/trx0roll.h
@@ -51,16 +51,6 @@ trx_savept_take(
/*============*/
trx_t* trx); /*!< in: transaction */
-/** Get the last undo log record of a transaction (for rollback).
-@param[in,out] trx transaction
-@param[out] roll_ptr DB_ROLL_PTR to the undo record
-@param[in,out] heap memory heap for allocation
-@return undo log record copied to heap
-@retval NULL if none left or the roll_limit (savepoint) was reached */
-trx_undo_rec_t*
-trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
- MY_ATTRIBUTE((nonnull, warn_unused_result));
-
/** Report progress when rolling back a row of a recovered transaction. */
void trx_roll_report_progress();
/*******************************************************************//**
diff --git a/storage/innobase/include/trx0rseg.ic b/storage/innobase/include/trx0rseg.ic
index 9edfe897155..1257ffcb391 100644
--- a/storage/innobase/include/trx0rseg.ic
+++ b/storage/innobase/include/trx0rseg.ic
@@ -41,7 +41,7 @@ trx_rsegf_get(fil_space_t* space, ulint page_no, mtr_t* mtr)
|| !srv_was_started);
buf_block_t* block = buf_page_get(page_id_t(space->id, page_no),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER);
@@ -67,8 +67,7 @@ trx_rsegf_get_new(
|| !srv_was_started);
ut_ad(space <= TRX_SYS_MAX_UNDO_SPACES || space == SRV_TMP_SPACE_ID);
- block = buf_page_get(
- page_id_t(space, page_no), univ_page_size, RW_X_LATCH, mtr);
+ block = buf_page_get(page_id_t(space, page_no), 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER_NEW);
diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h
index 043429b7f56..3692fa2aa67 100644
--- a/storage/innobase/include/trx0sys.h
+++ b/storage/innobase/include/trx0sys.h
@@ -74,7 +74,7 @@ trx_sysf_get(mtr_t* mtr, bool rw = true)
{
buf_block_t* block = buf_page_get(
page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
- univ_page_size, rw ? RW_X_LATCH : RW_S_LATCH, mtr);
+ 0, rw ? RW_X_LATCH : RW_S_LATCH, mtr);
if (block) {
buf_block_dbg_add_level(block, SYNC_TRX_SYS_HEADER);
}
@@ -369,7 +369,7 @@ struct rw_trx_hash_element_t
trx_id_t id; /* lf_hash_init() relies on this to be first in the struct */
- trx_id_t no;
+ Atomic_counter<trx_id_t> no;
trx_t *trx;
ib_mutex_t mutex;
};
@@ -706,11 +706,7 @@ public:
because it may change even before this method returns.
*/
- uint32_t size()
- {
- return uint32_t(my_atomic_load32_explicit(&hash.count,
- MY_MEMORY_ORDER_RELAXED));
- }
+ uint32_t size() { return uint32_t(lf_hash_size(&hash)); }
/**
@@ -792,7 +788,7 @@ class trx_sys_t
The smallest number not yet assigned as a transaction id or transaction
number. Accessed and updated with atomic operations.
*/
- MY_ALIGNED(CACHE_LINE_SIZE) trx_id_t m_max_trx_id;
+ MY_ALIGNED(CACHE_LINE_SIZE) Atomic_counter<trx_id_t> m_max_trx_id;
/**
@@ -803,17 +799,17 @@ class trx_sys_t
@sa assign_new_trx_no()
@sa snapshot_ids()
*/
- MY_ALIGNED(CACHE_LINE_SIZE) trx_id_t m_rw_trx_hash_version;
+ MY_ALIGNED(CACHE_LINE_SIZE) std::atomic<trx_id_t> m_rw_trx_hash_version;
+ bool m_initialised;
+
+public:
/**
TRX_RSEG_HISTORY list length (number of committed transactions to purge)
*/
- MY_ALIGNED(CACHE_LINE_SIZE) int32 rseg_history_len;
-
- bool m_initialised;
+ MY_ALIGNED(CACHE_LINE_SIZE) Atomic_counter<uint32_t> rseg_history_len;
-public:
/** Mutex protecting trx_list. */
MY_ALIGNED(CACHE_LINE_SIZE) mutable TrxSysMutex mutex;
@@ -889,9 +885,7 @@ public:
trx_id_t get_max_trx_id()
{
- return static_cast<trx_id_t>
- (my_atomic_load64_explicit(reinterpret_cast<int64*>(&m_max_trx_id),
- MY_MEMORY_ORDER_RELAXED));
+ return m_max_trx_id;
}
@@ -933,9 +927,7 @@ public:
void assign_new_trx_no(trx_t *trx)
{
trx->no= get_new_trx_id_no_refresh();
- my_atomic_store64_explicit(reinterpret_cast<int64*>
- (&trx->rw_trx_hash_element->no),
- trx->no, MY_MEMORY_ORDER_RELAXED);
+ trx->rw_trx_hash_element->no= trx->no;
refresh_rw_trx_hash_version();
}
@@ -986,7 +978,8 @@ public:
/** Initialiser for m_max_trx_id and m_rw_trx_hash_version. */
void init_max_trx_id(trx_id_t value)
{
- m_max_trx_id= m_rw_trx_hash_version= value;
+ m_max_trx_id= value;
+ m_rw_trx_hash_version.store(value, std::memory_order_relaxed);
}
@@ -1108,22 +1101,6 @@ public:
return count;
}
- /** @return number of committed transactions waiting for purge */
- ulint history_size() const
- {
- return uint32(my_atomic_load32(&const_cast<trx_sys_t*>(this)
- ->rseg_history_len));
- }
- /** Add to the TRX_RSEG_HISTORY length (on database startup). */
- void history_add(int32 len)
- {
- my_atomic_add32(&rseg_history_len, len);
- }
- /** Register a committed transaction. */
- void history_insert() { history_add(1); }
- /** Note that a committed transaction was purged. */
- void history_remove() { history_add(-1); }
-
private:
static my_bool get_min_trx_id_callback(rw_trx_hash_element_t *element,
trx_id_t *id)
@@ -1154,8 +1131,7 @@ private:
{
if (element->id < arg->m_id)
{
- trx_id_t no= static_cast<trx_id_t>(my_atomic_load64_explicit(
- reinterpret_cast<int64*>(&element->no), MY_MEMORY_ORDER_RELAXED));
+ trx_id_t no= element->no;
arg->m_ids->push_back(element->id);
if (no < arg->m_no)
arg->m_no= no;
@@ -1167,18 +1143,14 @@ private:
/** Getter for m_rw_trx_hash_version, must issue ACQUIRE memory barrier. */
trx_id_t get_rw_trx_hash_version()
{
- return static_cast<trx_id_t>
- (my_atomic_load64_explicit(reinterpret_cast<int64*>
- (&m_rw_trx_hash_version),
- MY_MEMORY_ORDER_ACQUIRE));
+ return m_rw_trx_hash_version.load(std::memory_order_acquire);
}
/** Increments m_rw_trx_hash_version, must issue RELEASE memory barrier. */
void refresh_rw_trx_hash_version()
{
- my_atomic_add64_explicit(reinterpret_cast<int64*>(&m_rw_trx_hash_version),
- 1, MY_MEMORY_ORDER_RELEASE);
+ m_rw_trx_hash_version.fetch_add(1, std::memory_order_release);
}
@@ -1197,8 +1169,7 @@ private:
trx_id_t get_new_trx_id_no_refresh()
{
- return static_cast<trx_id_t>(my_atomic_add64_explicit(
- reinterpret_cast<int64*>(&m_max_trx_id), 1, MY_MEMORY_ORDER_RELAXED));
+ return m_max_trx_id++;
}
};
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index b2594b46895..ee5f1c4aefb 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -467,6 +467,7 @@ Check transaction state */
ut_ad(!(t)->read_view.is_open()); \
ut_ad((t)->lock.wait_thr == NULL); \
ut_ad(UT_LIST_GET_LEN((t)->lock.trx_locks) == 0); \
+ ut_ad(UT_LIST_GET_LEN((t)->lock.evicted_tables) == 0); \
ut_ad((t)->dict_operation == TRX_DICT_OP_NONE); \
} while(0)
@@ -565,6 +566,11 @@ struct trx_lock_t {
lock_sys.mutex. Otherwise, this may
only be modified by the thread that is
serving the running transaction. */
+#ifdef WITH_WSREP
+ bool was_chosen_as_wsrep_victim;
+ /*!< high priority wsrep thread has
+ marked this trx to abort */
+#endif /* WITH_WSREP */
/** Pre-allocated record locks */
struct {
@@ -591,6 +597,9 @@ struct trx_lock_t {
lock_list table_locks; /*!< All table locks requested by this
transaction, including AUTOINC locks */
+ /** List of pending trx_t::evict_table() */
+ UT_LIST_BASE_NODE_T(dict_table_t) evicted_tables;
+
bool cancel; /*!< true if the transaction is being
rolled back either via deadlock
detection or due to lock timeout. The
@@ -758,7 +767,7 @@ private:
that it is no longer "active".
*/
- int32_t n_ref;
+ Atomic_counter<int32_t> n_ref;
public:
@@ -1115,19 +1124,23 @@ public:
return flush_observer;
}
+ /** Evict a table definition due to the rollback of ALTER TABLE.
+ @param[in] table_id table identifier */
+ void evict_table(table_id_t table_id);
+
bool is_referenced()
{
- return my_atomic_load32_explicit(&n_ref, MY_MEMORY_ORDER_RELAXED) > 0;
+ return n_ref > 0;
}
void reference()
{
#ifdef UNIV_DEBUG
- int32_t old_n_ref=
+ auto old_n_ref=
#endif
- my_atomic_add32_explicit(&n_ref, 1, MY_MEMORY_ORDER_RELAXED);
+ n_ref++;
ut_ad(old_n_ref >= 0);
}
@@ -1135,9 +1148,9 @@ public:
void release_reference()
{
#ifdef UNIV_DEBUG
- int32_t old_n_ref=
+ auto old_n_ref=
#endif
- my_atomic_add32_explicit(&n_ref, -1, MY_MEMORY_ORDER_RELAXED);
+ n_ref--;
ut_ad(old_n_ref > 0);
}
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index 60b0517db0d..f3c52fff7b5 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -79,27 +79,22 @@ trx_undo_trx_id_is_insert(
/*======================*/
const byte* trx_id) /*!< in: DB_TRX_ID, followed by DB_ROLL_PTR */
MY_ATTRIBUTE((warn_unused_result));
-/*****************************************************************//**
-Writes a roll ptr to an index page. In case that the size changes in
-some future version, this function should be used instead of
-mach_write_... */
-UNIV_INLINE
-void
-trx_write_roll_ptr(
-/*===============*/
- byte* ptr, /*!< in: pointer to memory where
- written */
- roll_ptr_t roll_ptr); /*!< in: roll ptr */
-/*****************************************************************//**
-Reads a roll ptr from an index page. In case that the roll ptr size
-changes in some future version, this function should be used instead of
-mach_read_...
+/** Write DB_ROLL_PTR.
+@param[out] ptr buffer
+@param[in] roll_ptr DB_ROLL_PTR value */
+inline void trx_write_roll_ptr(byte* ptr, roll_ptr_t roll_ptr)
+{
+ compile_time_assert(DATA_ROLL_PTR_LEN == 7);
+ mach_write_to_7(ptr, roll_ptr);
+}
+/** Read DB_ROLL_PTR.
+@param[in] ptr buffer
@return roll ptr */
-UNIV_INLINE
-roll_ptr_t
-trx_read_roll_ptr(
-/*==============*/
- const byte* ptr); /*!< in: pointer to memory from where to read */
+inline roll_ptr_t trx_read_roll_ptr(const byte* ptr)
+{
+ compile_time_assert(DATA_ROLL_PTR_LEN == 7);
+ return mach_read_from_7(ptr);
+}
/** Gets an undo log page and x-latches it.
@param[in] page_id page id
@@ -185,9 +180,7 @@ trx_undo_free_last_page(trx_undo_t* undo, mtr_t* mtr)
@param[in,out] undo undo log
@param[in] limit all undo logs after this limit will be discarded
@param[in] is_temp whether this is temporary undo log */
-void
-trx_undo_truncate_end(trx_undo_t* undo, undo_no_t limit, bool is_temp)
- MY_ATTRIBUTE((nonnull));
+void trx_undo_truncate_end(trx_undo_t& undo, undo_no_t limit, bool is_temp);
/** Truncate the head of an undo log.
NOTE that only whole pages are freed; the header page is not
diff --git a/storage/innobase/include/trx0undo.ic b/storage/innobase/include/trx0undo.ic
index ac8af61be09..b54f73cdda6 100644
--- a/storage/innobase/include/trx0undo.ic
+++ b/storage/innobase/include/trx0undo.ic
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2013, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -103,37 +103,6 @@ trx_undo_trx_id_is_insert(
return bool(trx_id[DATA_TRX_ID_LEN] >> 7);
}
-/*****************************************************************//**
-Writes a roll ptr to an index page. In case that the size changes in
-some future version, this function should be used instead of
-mach_write_... */
-UNIV_INLINE
-void
-trx_write_roll_ptr(
-/*===============*/
- byte* ptr, /*!< in: pointer to memory where
- written */
- roll_ptr_t roll_ptr) /*!< in: roll ptr */
-{
- compile_time_assert(DATA_ROLL_PTR_LEN == 7);
- mach_write_to_7(ptr, roll_ptr);
-}
-
-/*****************************************************************//**
-Reads a roll ptr from an index page. In case that the roll ptr size
-changes in some future version, this function should be used instead of
-mach_read_...
-@return roll ptr */
-UNIV_INLINE
-roll_ptr_t
-trx_read_roll_ptr(
-/*==============*/
- const byte* ptr) /*!< in: pointer to memory from where to read */
-{
- compile_time_assert(DATA_ROLL_PTR_LEN == 7);
- return(mach_read_from_7(ptr));
-}
-
/** Gets an undo log page and x-latches it.
@param[in] page_id page id
@param[in,out] mtr mini-transaction
@@ -142,8 +111,7 @@ UNIV_INLINE
page_t*
trx_undo_page_get(const page_id_t page_id, mtr_t* mtr)
{
- buf_block_t* block = buf_page_get(page_id, univ_page_size,
- RW_X_LATCH, mtr);
+ buf_block_t* block = buf_page_get(page_id, 0, RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
@@ -158,8 +126,7 @@ UNIV_INLINE
page_t*
trx_undo_page_get_s_latched(const page_id_t page_id, mtr_t* mtr)
{
- buf_block_t* block = buf_page_get(page_id, univ_page_size,
- RW_S_LATCH, mtr);
+ buf_block_t* block = buf_page_get(page_id, 0, RW_S_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 69e8e9eb96e..149d0122576 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -77,6 +77,7 @@ used throughout InnoDB but do not include too much themselves. They
support cross-platform development and expose comonly used SQL names. */
#include <my_global.h>
+#include "my_counter.h"
/* JAN: TODO: missing 5.7 header */
#ifdef HAVE_MY_THREAD_H
diff --git a/storage/innobase/include/ut0counter.h b/storage/innobase/include/ut0counter.h
index ab694386099..3c02bacdc2d 100644
--- a/storage/innobase/include/ut0counter.h
+++ b/storage/innobase/include/ut0counter.h
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2012, 2015, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
+Copyright (c) 2017, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -30,7 +30,6 @@ Created 2012/04/12 by Sunny Bains
#include "os0thread.h"
#include "my_rdtsc.h"
-#include "my_atomic.h"
/** CPU cache line size */
#ifdef CPU_LEVEL1_DCACHE_LINESIZE
@@ -42,120 +41,85 @@ Created 2012/04/12 by Sunny Bains
/** Default number of slots to use in ib_counter_t */
#define IB_N_SLOTS 64
-/** Get the offset into the counter array. */
-template <typename Type, int N>
-struct generic_indexer_t {
- /** @return offset within m_counter */
- static size_t offset(size_t index) UNIV_NOTHROW
- {
- return(((index % N) + 1) * (CACHE_LINE_SIZE / sizeof(Type)));
- }
-};
+/** Use the result of my_timer_cycles(), which mainly uses RDTSC for cycles
+as a random value. See the comments for my_timer_cycles() */
+/** @return result from RDTSC or similar functions. */
+static inline size_t
+get_rnd_value()
+{
+ size_t c = static_cast<size_t>(my_timer_cycles());
+
+ if (c != 0) {
+ return c;
+ }
-/** Use the result of my_timer_cycles(), which mainly uses RDTSC for cycles,
-to index into the counter array. See the comments for my_timer_cycles() */
-template <typename Type=ulint, int N=1>
-struct counter_indexer_t : public generic_indexer_t<Type, N> {
- /** @return result from RDTSC or similar functions. */
- static size_t get_rnd_index() UNIV_NOTHROW
- {
- size_t c = static_cast<size_t>(my_timer_cycles());
-
- if (c != 0) {
- return(c);
- } else {
- /* We may go here if my_timer_cycles() returns 0,
- so we have to have the plan B for the counter. */
+ /* We may go here if my_timer_cycles() returns 0,
+ so we have to have the plan B for the counter. */
#if !defined(_WIN32)
- return(size_t(os_thread_get_curr_id()));
+ return (size_t)os_thread_get_curr_id();
#else
- LARGE_INTEGER cnt;
- QueryPerformanceCounter(&cnt);
+ LARGE_INTEGER cnt;
+ QueryPerformanceCounter(&cnt);
- return(static_cast<size_t>(cnt.QuadPart));
+ return static_cast<size_t>(cnt.QuadPart);
#endif /* !_WIN32 */
- }
- }
+}
- /** @return a random offset to the array */
- static size_t get_rnd_offset() UNIV_NOTHROW
- {
- return(generic_indexer_t<Type, N>::offset(get_rnd_index()));
- }
-};
-
-#define default_indexer_t counter_indexer_t
-
-/** Class for using fuzzy counters. The counter is relaxed atomic
+/** Class for using fuzzy counters. The counter is multi-instance relaxed atomic
so the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the
CACHE_LINE_SIZE bytes */
-template <
- typename Type,
- int N = IB_N_SLOTS,
- template<typename, int> class Indexer = default_indexer_t>
-struct MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_t
-{
+template <typename Type, int N = IB_N_SLOTS>
+struct ib_counter_t {
/** Increment the counter by 1. */
- void inc() UNIV_NOTHROW { add(1); }
+ void inc() { add(1); }
/** Increment the counter by 1.
@param[in] index a reasonably thread-unique identifier */
- void inc(size_t index) UNIV_NOTHROW { add(index, 1); }
+ void inc(size_t index) { add(index, 1); }
/** Add to the counter.
@param[in] n amount to be added */
- void add(Type n) UNIV_NOTHROW { add(m_policy.get_rnd_offset(), n); }
+ void add(Type n) { add(get_rnd_value(), n); }
/** Add to the counter.
@param[in] index a reasonably thread-unique identifier
@param[in] n amount to be added */
- void add(size_t index, Type n) UNIV_NOTHROW {
- size_t i = m_policy.offset(index);
-
- ut_ad(i < UT_ARR_SIZE(m_counter));
-
- if (sizeof(Type) == 8) {
- my_atomic_add64_explicit(
- reinterpret_cast<int64*>(&m_counter[i]),
- static_cast<int64>(n), MY_MEMORY_ORDER_RELAXED);
- } else if (sizeof(Type) == 4) {
- my_atomic_add32_explicit(
- reinterpret_cast<int32*>(&m_counter[i]),
- static_cast<int32>(n), MY_MEMORY_ORDER_RELAXED);
- }
- compile_time_assert(sizeof(Type) == 8 || sizeof(Type) == 4);
+ void add(size_t index, Type n) {
+ index = index % N;
+
+ ut_ad(index < UT_ARR_SIZE(m_counter));
+
+ m_counter[index].value.fetch_add(n, std::memory_order_relaxed);
}
- /* @return total value - not 100% accurate, since it is relaxed atomic. */
- operator Type() const UNIV_NOTHROW {
+ /* @return total value - not 100% accurate, since it is relaxed atomic*/
+ operator Type() const {
Type total = 0;
- for (size_t i = 0; i < N; ++i) {
- if (sizeof(Type) == 8) {
- total += static_cast<
- Type>(my_atomic_load64_explicit(
- reinterpret_cast<int64*>(const_cast<Type*>(
- &m_counter[m_policy.offset(i)])),
- MY_MEMORY_ORDER_RELAXED));
- } else if (sizeof(Type) == 4) {
- total += static_cast<
- Type>(my_atomic_load32_explicit(
- reinterpret_cast<int32*>(const_cast<Type*>(
- &m_counter[m_policy.offset(i)])),
- MY_MEMORY_ORDER_RELAXED));
- }
+ for (const auto &counter : m_counter) {
+ total += counter.value.load(std::memory_order_relaxed);
}
return(total);
}
private:
- /** Indexer into the array */
- Indexer<Type, N>m_policy;
-
- /** Slot 0 is unused. */
- Type m_counter[(N + 1) * (CACHE_LINE_SIZE / sizeof(Type))];
+ /** Atomic which occupies whole CPU cache line.
+ Note: We rely on the default constructor of std::atomic and
+ do not explicitly initialize the contents. This works for us,
+ because ib_counter_t is only intended for usage with global
+ memory that is allocated from the .bss and thus guaranteed to
+ be zero-initialized by the run-time environment.
+ @see srv_stats
+ @see rw_lock_stats */
+ struct ib_counter_element_t {
+ MY_ALIGNED(CACHE_LINE_SIZE) std::atomic<Type> value;
+ };
+ static_assert(sizeof(ib_counter_element_t) == CACHE_LINE_SIZE, "");
+
+ /** Array of counter elements */
+ MY_ALIGNED(CACHE_LINE_SIZE) ib_counter_element_t m_counter[N];
};
#endif /* ut0counter_h */
diff --git a/storage/innobase/include/ut0crc32.h b/storage/innobase/include/ut0crc32.h
index b8fc4168dfd..8c04ae0dcc6 100644
--- a/storage/innobase/include/ut0crc32.h
+++ b/storage/innobase/include/ut0crc32.h
@@ -47,12 +47,6 @@ typedef uint32_t (*ut_crc32_func_t)(const byte* ptr, ulint len);
/** Pointer to CRC32 calculation function. */
extern ut_crc32_func_t ut_crc32;
-#ifdef INNODB_BUG_ENDIAN_CRC32
-/** Pointer to CRC32 calculation function, which uses big-endian byte order
-when converting byte strings to integers internally. */
-extern uint32_t ut_crc32_legacy_big_endian(const byte* buf, ulint len);
-#endif /* INNODB_BUG_ENDIAN_CRC32 */
-
/** Text description of CRC32 implementation */
extern const char* ut_crc32_implementation;
diff --git a/storage/innobase/include/ut0mutex.h b/storage/innobase/include/ut0mutex.h
index 3dbd7919a2f..3155151c73a 100644
--- a/storage/innobase/include/ut0mutex.h
+++ b/storage/innobase/include/ut0mutex.h
@@ -38,8 +38,6 @@ Created 2012-03-24 Sunny Bains.
@param[in] T The resulting typedef alias */
#define UT_MUTEX_TYPE(M, P, T) typedef PolicyMutex<M<P> > T;
-typedef OSMutex EventMutex;
-
# ifdef HAVE_IB_LINUX_FUTEX
UT_MUTEX_TYPE(TTASFutexMutex, GenericPolicy, FutexMutex);
UT_MUTEX_TYPE(TTASFutexMutex, BlockMutexPolicy, BlockFutexMutex);
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 33c60484085..65c5d63953c 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -46,7 +46,6 @@ Created 1/20/1994 Heikki Tuuri
#include <stdarg.h>
#include <string>
-#include <my_atomic.h>
/** Index name prefix in fast index creation, as a string constant */
#define TEMP_INDEX_PREFIX_STR "\377"
@@ -173,12 +172,6 @@ ut_2_power_up(
ulint n) /*!< in: number != 0 */
MY_ATTRIBUTE((const));
-/** Determine how many bytes (groups of 8 bits) are needed to
-store the given number of bits.
-@param b in: bits
-@return number of bytes (octets) needed to represent b */
-#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8)
-
/**********************************************************//**
Returns system time. We do not specify the format of the time returned:
the only way to manipulate it is to use the function ut_difftime.
@@ -237,6 +230,12 @@ ut_difftime(
#endif /* !UNIV_INNOCHECKSUM */
+/** Determine how many bytes (groups of 8 bits) are needed to
+store the given number of bits.
+@param b in: bits
+@return number of bytes (octets) needed to represent b */
+#define UT_BITS_IN_BYTES(b) (((b) + 7) / 8)
+
/** Determines if a number is zero or a power of two.
@param[in] n number
@return nonzero if n is zero or a power of two; zero otherwise */
diff --git a/storage/innobase/innodb.cmake b/storage/innobase/innodb.cmake
index c3698df4c11..a73f642c510 100644
--- a/storage/innobase/innodb.cmake
+++ b/storage/innobase/innodb.cmake
@@ -121,11 +121,6 @@ ELSEIF(WITH_INNODB_ROOT_GUESS)
ADD_DEFINITIONS(-DBTR_CUR_ADAPT)
ENDIF()
-OPTION(WITH_INNODB_BUG_ENDIAN_CRC32 "Weaken innodb_checksum_algorithm=crc32 by supporting upgrade from big-endian systems running 5.6/10.0/10.1" ${IS_BIG_ENDIAN})
-IF(WITH_INNODB_BUG_ENDIAN_CRC32)
- ADD_DEFINITIONS(-DINNODB_BUG_ENDIAN_CRC32)
-ENDIF()
-
OPTION(WITH_INNODB_EXTRA_DEBUG "Enable extra InnoDB debug checks" OFF)
IF(WITH_INNODB_EXTRA_DEBUG)
ADD_DEFINITIONS(-DUNIV_ZIP_DEBUG)
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index 1b8394b8790..0c232878c3c 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -333,7 +333,7 @@ lock_report_trx_id_insanity(
trx_id_t max_trx_id) /*!< in: trx_sys.get_max_trx_id() */
{
ut_ad(rec_offs_validate(rec, index, offsets));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
ib::error()
<< "Transaction id " << trx_id
@@ -356,7 +356,7 @@ lock_check_trx_id_sanity(
const ulint* offsets) /*!< in: rec_get_offsets(rec, index) */
{
ut_ad(rec_offs_validate(rec, index, offsets));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
trx_id_t max_trx_id = trx_sys.get_max_trx_id();
ut_ad(max_trx_id || srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN);
@@ -385,7 +385,7 @@ lock_clust_rec_cons_read_sees(
ut_ad(dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
/* Temp-tables are not shared across connections and multiple
transactions from different connections cannot simultaneously
@@ -424,7 +424,7 @@ lock_sec_rec_cons_read_sees(
{
ut_ad(page_rec_is_user_rec(rec));
ut_ad(!index->is_primary());
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
/* NOTE that we might call this function while holding the search
system latch. */
@@ -762,9 +762,7 @@ lock_rec_has_to_wait(
<< wsrep_thd_query(lock2->trx->mysql_thd);
}
- if (wsrep_trx_order_before(trx->mysql_thd,
- lock2->trx->mysql_thd)
- && (type_mode & LOCK_MODE_MASK) == LOCK_X
+ if ((type_mode & LOCK_MODE_MASK) == LOCK_X
&& (lock2->type_mode & LOCK_MODE_MASK) == LOCK_X) {
if (for_locking || wsrep_debug) {
/* exclusive lock conflicts are not
@@ -774,12 +772,11 @@ lock_rec_has_to_wait(
<< type_mode
<< " supremum: " << lock_is_on_supremum
<< "conflicts states: my "
- << wsrep_thd_conflict_state(
- trx->mysql_thd, FALSE)
+ << wsrep_thd_transaction_state_str(
+ trx->mysql_thd)
<< " locked "
- << wsrep_thd_conflict_state(
- lock2->trx->mysql_thd,
- FALSE);
+ << wsrep_thd_transaction_state_str(
+ lock2->trx->mysql_thd);
lock_rec_print(stderr, lock2);
ib::info() << " SQL1: "
<< wsrep_thd_query(trx->mysql_thd)
@@ -1098,11 +1095,14 @@ wsrep_kill_victim(
return;
}
- my_bool bf_this = wsrep_thd_is_BF(trx->mysql_thd, FALSE);
+ if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) {
+ return;
+ }
+
my_bool bf_other = wsrep_thd_is_BF(lock->trx->mysql_thd, TRUE);
- if ((bf_this && !bf_other) ||
- (bf_this && bf_other && wsrep_trx_order_before(
+ if ((!bf_other) ||
+ (wsrep_thd_order_before(
trx->mysql_thd, lock->trx->mysql_thd))) {
if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
@@ -1113,11 +1113,7 @@ wsrep_kill_victim(
is in the queue*/
} else if (lock->trx != trx) {
if (wsrep_log_conflicts) {
- if (bf_this) {
- ib::info() << "*** Priority TRANSACTION:";
- } else {
- ib::info() << "*** Victim TRANSACTION:";
- }
+ ib::info() << "*** Priority TRANSACTION:";
trx_print_latched(stderr, trx, 3000);
@@ -1218,7 +1214,7 @@ lock_sec_rec_some_has_impl(
ut_ad(!dict_index_is_clust(index));
ut_ad(page_rec_is_user_rec(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
max_trx_id = page_get_max_trx_id(page);
@@ -1426,7 +1422,7 @@ lock_rec_create_low(
lock_t *prev = NULL;
while (hash && wsrep_thd_is_BF(hash->trx->mysql_thd, TRUE)
- && wsrep_trx_order_before(hash->trx->mysql_thd,
+ && wsrep_thd_order_before(hash->trx->mysql_thd,
trx->mysql_thd)) {
prev = hash;
hash = (lock_t *)hash->hash;
@@ -1840,15 +1836,15 @@ lock_rec_add_to_queue(
ib::info() << "WSREP BF lock conflict for my lock:\n BF:" <<
((wsrep_thd_is_BF(trx->mysql_thd, FALSE)) ? "BF" : "normal") << " exec: " <<
- wsrep_thd_exec_mode(trx->mysql_thd) << " conflict: " <<
- wsrep_thd_conflict_state(trx->mysql_thd, false) << " seqno: " <<
+ wsrep_thd_client_state_str(trx->mysql_thd) << " conflict: " <<
+ wsrep_thd_transaction_state_str(trx->mysql_thd) << " seqno: " <<
wsrep_thd_trx_seqno(trx->mysql_thd) << " SQL: " <<
wsrep_thd_query(trx->mysql_thd);
trx_t* otrx = other_lock->trx;
ib::info() << "WSREP other lock:\n BF:" <<
((wsrep_thd_is_BF(otrx->mysql_thd, FALSE)) ? "BF" : "normal") << " exec: " <<
- wsrep_thd_exec_mode(otrx->mysql_thd) << " conflict: " <<
- wsrep_thd_conflict_state(otrx->mysql_thd, false) << " seqno: " <<
+ wsrep_thd_client_state_str(otrx->mysql_thd) << " conflict: " <<
+ wsrep_thd_transaction_state_str(otrx->mysql_thd) << " seqno: " <<
wsrep_thd_trx_seqno(otrx->mysql_thd) << " SQL: " <<
wsrep_thd_query(otrx->mysql_thd);
}
@@ -4253,6 +4249,7 @@ lock_check_dict_lock(
const lock_t* lock) /*!< in: lock to check */
{
if (lock_get_type_low(lock) == LOCK_REC) {
+ ut_ad(!lock->index->table->is_temporary());
/* Check if the transcation locked a record
in a system table in X mode. It should have set
@@ -4266,9 +4263,8 @@ lock_check_dict_lock(
} else {
ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
- const dict_table_t* table;
-
- table = lock->un_member.tab_lock.table;
+ const dict_table_t* table = lock->un_member.tab_lock.table;
+ ut_ad(!table->is_temporary());
/* Check if the transcation locked a system table
in IX mode. It should have set the dict_op code
@@ -4607,14 +4603,14 @@ lock_print_info_summary(
fprintf(file,
"Purge done for trx's n:o < " TRX_ID_FMT
" undo n:o < " TRX_ID_FMT " state: %s\n"
- "History list length " ULINTPF "\n",
+ "History list length %u\n",
purge_sys.tail.trx_no(),
purge_sys.tail.undo_no,
purge_sys.enabled()
? (purge_sys.running() ? "running"
: purge_sys.paused() ? "stopped" : "running but idle")
: "disabled",
- trx_sys.history_size());
+ uint32_t{trx_sys.rseg_history_len});
#ifdef PRINT_NUM_OF_LOCK_STRUCTS
fprintf(file,
@@ -4910,8 +4906,8 @@ lock_rec_queue_validate(
if (!lock_get_wait(other_lock) ) {
ib::info() << "WSREP impl BF lock conflict for my impl lock:\n BF:" <<
((wsrep_thd_is_BF(impl_trx->mysql_thd, FALSE)) ? "BF" : "normal") << " exec: " <<
- wsrep_thd_exec_mode(impl_trx->mysql_thd) << " conflict: " <<
- wsrep_thd_conflict_state(impl_trx->mysql_thd, false) << " seqno: " <<
+ wsrep_thd_client_state_str(impl_trx->mysql_thd) << " conflict: " <<
+ wsrep_thd_transaction_state_str(impl_trx->mysql_thd) << " seqno: " <<
wsrep_thd_trx_seqno(impl_trx->mysql_thd) << " SQL: " <<
wsrep_thd_query(impl_trx->mysql_thd);
@@ -4919,8 +4915,8 @@ lock_rec_queue_validate(
ib::info() << "WSREP other lock:\n BF:" <<
((wsrep_thd_is_BF(otrx->mysql_thd, FALSE)) ? "BF" : "normal") << " exec: " <<
- wsrep_thd_exec_mode(otrx->mysql_thd) << " conflict: " <<
- wsrep_thd_conflict_state(otrx->mysql_thd, false) << " seqno: " <<
+ wsrep_thd_client_state_str(otrx->mysql_thd) << " conflict: " <<
+ wsrep_thd_transaction_state_str(otrx->mysql_thd) << " seqno: " <<
wsrep_thd_trx_seqno(otrx->mysql_thd) << " SQL: " <<
wsrep_thd_query(otrx->mysql_thd);
}
@@ -5141,7 +5137,7 @@ lock_rec_block_validate(
block = buf_page_get_gen(
page_id_t(space_id, page_no),
- page_size_t(space->flags),
+ space->zip_size(),
RW_X_LATCH, NULL,
BUF_GET_POSSIBLY_FREED,
__FILE__, __LINE__, &mtr, &err);
@@ -5281,7 +5277,7 @@ lock_rec_insert_check_and_lock(
trx_t* trx = thr_get_trx(thr);
const rec_t* next_rec = page_rec_get_next_const(rec);
ulint heap_no = page_rec_get_heap_no(next_rec);
- ut_ad(!rec_is_metadata(next_rec, index));
+ ut_ad(!rec_is_metadata(next_rec, *index));
lock_mutex_enter();
/* Because this code is invoked for a running transaction by
@@ -5409,7 +5405,7 @@ lock_rec_convert_impl_to_expl_for_trx(
{
ut_ad(trx->is_referenced());
ut_ad(page_rec_is_leaf(rec));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
DEBUG_SYNC_C("before_lock_rec_convert_impl_to_expl_for_trx");
@@ -5547,7 +5543,7 @@ lock_rec_convert_impl_to_expl(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
ut_ad(page_rec_is_leaf(rec));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
if (dict_index_is_clust(index)) {
trx_id_t trx_id;
@@ -5624,7 +5620,7 @@ lock_clust_rec_modify_check_and_lock(
return(DB_SUCCESS);
}
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
ut_ad(!index->table->is_temporary());
heap_no = rec_offs_comp(offsets)
@@ -5680,7 +5676,7 @@ lock_sec_rec_modify_check_and_lock(
ut_ad(block->frame == page_align(rec));
ut_ad(mtr->is_named_space(index->table->space));
ut_ad(page_rec_is_leaf(rec));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
if (flags & BTR_NO_LOCKING_FLAG) {
@@ -5774,7 +5770,7 @@ lock_sec_rec_read_check_and_lock(
return(DB_SUCCESS);
}
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
heap_no = page_rec_get_heap_no(rec);
/* Some transaction may have an implicit x-lock on the record only
@@ -5836,7 +5832,7 @@ lock_clust_rec_read_check_and_lock(
|| gap_mode == LOCK_REC_NOT_GAP);
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(page_rec_is_leaf(rec));
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
if ((flags & BTR_NO_LOCKING_FLAG)
|| srv_read_only_mode
@@ -6107,10 +6103,8 @@ lock_get_table_id(
/*==============*/
const lock_t* lock) /*!< in: lock */
{
- dict_table_t* table;
-
- table = lock_get_table(lock);
-
+ dict_table_t* table = lock_get_table(lock);
+ ut_ad(!table->is_temporary());
return(table->id);
}
@@ -6373,6 +6367,12 @@ lock_trx_handle_wait(
/*=================*/
trx_t* trx) /*!< in/out: trx lock state */
{
+#ifdef WITH_WSREP
+ /* We already own mutexes */
+ if (trx->lock.was_chosen_as_wsrep_victim) {
+ return lock_trx_handle_wait_low(trx);
+ }
+#endif /* WITH_WSREP */
lock_mutex_enter();
trx_mutex_enter(trx);
dberr_t err = lock_trx_handle_wait_low(trx);
@@ -6970,6 +6970,11 @@ DeadlockChecker::trx_rollback()
trx_t* trx = m_wait_lock->trx;
print("*** WE ROLL BACK TRANSACTION (1)\n");
+#ifdef WITH_WSREP
+ if (wsrep_on(trx->mysql_thd)) {
+ wsrep_handle_SR_rollback(m_start->mysql_thd, trx->mysql_thd);
+ }
+#endif
trx_mutex_enter(trx);
@@ -7055,6 +7060,12 @@ DeadlockChecker::check_and_resolve(const lock_t* lock, trx_t* trx)
if (victim_trx != NULL) {
print("*** WE ROLL BACK TRANSACTION (2)\n");
+#ifdef WITH_WSREP
+ if (wsrep_on(trx->mysql_thd)) {
+ wsrep_handle_SR_rollback(trx->mysql_thd,
+ victim_trx->mysql_thd);
+ }
+#endif
lock_deadlock_found = true;
}
diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc
index 721f6151c36..8869290901c 100644
--- a/storage/innobase/lock/lock0wait.cc
+++ b/storage/innobase/lock/lock0wait.cc
@@ -285,7 +285,7 @@ lock_wait_suspend_thread(
if (thr->lock_state == QUE_THR_LOCK_ROW) {
srv_stats.n_lock_wait_count.inc();
- srv_stats.n_lock_wait_current_count.inc();
+ srv_stats.n_lock_wait_current_count++;
if (ut_usectime(&sec, &ms) == -1) {
start_time = -1;
@@ -398,7 +398,7 @@ lock_wait_suspend_thread(
thd_storage_lock_wait(trx->mysql_thd, diff_time);
}
- srv_stats.n_lock_wait_current_count.dec();
+ srv_stats.n_lock_wait_current_count--;
DBUG_EXECUTE_IF("lock_instrument_slow_query_log",
os_thread_sleep(1000););
diff --git a/storage/innobase/log/log0crypt.cc b/storage/innobase/log/log0crypt.cc
index dff9661c6eb..7ad39da29ec 100644
--- a/storage/innobase/log/log0crypt.cc
+++ b/storage/innobase/log/log0crypt.cc
@@ -82,19 +82,62 @@ log_block_get_start_lsn(
return start_lsn;
}
+/** Generate crypt key from crypt msg.
+@param[in,out] info encryption key
+@param[in] upgrade whether to use the key in MariaDB 10.1 format
+@return whether the operation was successful */
+static bool init_crypt_key(crypt_info_t* info, bool upgrade = false)
+{
+ byte mysqld_key[MY_AES_MAX_KEY_LENGTH];
+ uint keylen = sizeof mysqld_key;
+
+ compile_time_assert(16 == sizeof info->crypt_key);
+
+ if (uint rc = encryption_key_get(LOG_DEFAULT_ENCRYPTION_KEY,
+ info->key_version, mysqld_key,
+ &keylen)) {
+ ib::error()
+ << "Obtaining redo log encryption key version "
+ << info->key_version << " failed (" << rc
+ << "). Maybe the key or the required encryption "
+ "key management plugin was not found.";
+ return false;
+ }
+
+ if (upgrade) {
+ while (keylen < sizeof mysqld_key) {
+ mysqld_key[keylen++] = 0;
+ }
+ }
+
+ uint dst_len;
+ int err= my_aes_crypt(MY_AES_ECB,
+ ENCRYPTION_FLAG_NOPAD | ENCRYPTION_FLAG_ENCRYPT,
+ info->crypt_msg.bytes, sizeof info->crypt_msg,
+ info->crypt_key.bytes, &dst_len,
+ mysqld_key, keylen, NULL, 0);
+
+ if (err != MY_AES_OK || dst_len != MY_AES_BLOCK_SIZE) {
+ ib::error() << "Getting redo log crypto key failed: err = "
+ << err << ", len = " << dst_len;
+ return false;
+ }
+
+ return true;
+}
+
/** Encrypt or decrypt log blocks.
@param[in,out] buf log blocks to encrypt or decrypt
@param[in] lsn log sequence number of the start of the buffer
@param[in] size size of the buffer, in bytes
-@param[in] decrypt whether to decrypt instead of encrypting */
-UNIV_INTERN
-void
-log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt)
+@param[in] op whether to decrypt, encrypt, or rotate key and encrypt
+@return whether the operation succeeded (encrypt always does) */
+bool log_crypt(byte* buf, lsn_t lsn, ulint size, log_crypt_t op)
{
ut_ad(size % OS_FILE_LOG_BLOCK_SIZE == 0);
+ ut_ad(ulint(buf) % OS_FILE_LOG_BLOCK_SIZE == 0);
ut_a(info.key_version);
- uint dst_len;
uint32_t aes_ctr_iv[MY_AES_BLOCK_SIZE / sizeof(uint32_t)];
compile_time_assert(sizeof(uint32_t) == 4);
@@ -103,7 +146,8 @@ log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt)
for (const byte* const end = buf + size; buf != end;
buf += OS_FILE_LOG_BLOCK_SIZE, lsn += OS_FILE_LOG_BLOCK_SIZE) {
- uint32_t dst[(OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE)
+ uint32_t dst[(OS_FILE_LOG_BLOCK_SIZE - LOG_CRYPT_HDR_SIZE
+ - LOG_BLOCK_CHECKSUM)
/ sizeof(uint32_t)];
/* The log block number is not encrypted. */
@@ -123,64 +167,61 @@ log_crypt(byte* buf, lsn_t lsn, ulint size, bool decrypt)
ut_ad(log_block_get_start_lsn(lsn,
log_block_get_hdr_no(buf))
== lsn);
+ byte* key_ver = &buf[OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_KEY
+ - LOG_BLOCK_CHECKSUM];
+ const uint dst_size
+ = log_sys.log.format == LOG_HEADER_FORMAT_ENC_10_4
+ ? sizeof dst - LOG_BLOCK_KEY
+ : sizeof dst;
+ if (log_sys.log.format == LOG_HEADER_FORMAT_ENC_10_4) {
+ const uint key_version = info.key_version;
+ switch (op) {
+ case LOG_ENCRYPT_ROTATE_KEY:
+ info.key_version
+ = encryption_key_get_latest_version(
+ LOG_DEFAULT_ENCRYPTION_KEY);
+ if (key_version != info.key_version
+ && !init_crypt_key(&info)) {
+ info.key_version = key_version;
+ }
+ /* fall through */
+ case LOG_ENCRYPT:
+ mach_write_to_4(key_ver, info.key_version);
+ break;
+ case LOG_DECRYPT:
+ info.key_version = mach_read_from_4(key_ver);
+ if (key_version != info.key_version
+ && !init_crypt_key(&info)) {
+ return false;
+ }
+ }
+#ifndef DBUG_OFF
+ if (key_version != info.key_version) {
+ DBUG_PRINT("ib_log", ("key_version: %x -> %x",
+ key_version,
+ info.key_version));
+ }
+#endif /* !DBUG_OFF */
+ }
+ ut_ad(LOG_CRYPT_HDR_SIZE + dst_size
+ == log_sys.trailer_offset());
+
+ uint dst_len;
int rc = encryption_crypt(
- buf + LOG_CRYPT_HDR_SIZE, sizeof dst,
+ buf + LOG_CRYPT_HDR_SIZE, dst_size,
reinterpret_cast<byte*>(dst), &dst_len,
const_cast<byte*>(info.crypt_key.bytes),
sizeof info.crypt_key,
reinterpret_cast<byte*>(aes_ctr_iv), sizeof aes_ctr_iv,
- decrypt
+ op == LOG_DECRYPT
? ENCRYPTION_FLAG_DECRYPT | ENCRYPTION_FLAG_NOPAD
: ENCRYPTION_FLAG_ENCRYPT | ENCRYPTION_FLAG_NOPAD,
LOG_DEFAULT_ENCRYPTION_KEY,
info.key_version);
-
ut_a(rc == MY_AES_OK);
- ut_a(dst_len == sizeof dst);
- memcpy(buf + LOG_CRYPT_HDR_SIZE, dst, sizeof dst);
- }
-}
-
-/** Generate crypt key from crypt msg.
-@param[in,out] info encryption key
-@param[in] upgrade whether to use the key in MariaDB 10.1 format
-@return whether the operation was successful */
-static bool init_crypt_key(crypt_info_t* info, bool upgrade = false)
-{
- byte mysqld_key[MY_AES_MAX_KEY_LENGTH];
- uint keylen = sizeof mysqld_key;
-
- compile_time_assert(16 == sizeof info->crypt_key);
-
- if (uint rc = encryption_key_get(LOG_DEFAULT_ENCRYPTION_KEY,
- info->key_version, mysqld_key,
- &keylen)) {
- ib::error()
- << "Obtaining redo log encryption key version "
- << info->key_version << " failed (" << rc
- << "). Maybe the key or the required encryption "
- "key management plugin was not found.";
- return false;
- }
-
- if (upgrade) {
- while (keylen < sizeof mysqld_key) {
- mysqld_key[keylen++] = 0;
- }
- }
-
- uint dst_len;
- int err= my_aes_crypt(MY_AES_ECB,
- ENCRYPTION_FLAG_NOPAD | ENCRYPTION_FLAG_ENCRYPT,
- info->crypt_msg.bytes, sizeof info->crypt_msg,
- info->crypt_key.bytes, &dst_len,
- mysqld_key, keylen, NULL, 0);
-
- if (err != MY_AES_OK || dst_len != MY_AES_BLOCK_SIZE) {
- ib::error() << "Getting redo log crypto key failed: err = "
- << err << ", len = " << dst_len;
- return false;
+ ut_a(dst_len == dst_size);
+ memcpy(buf + LOG_CRYPT_HDR_SIZE, dst, dst_size);
}
return true;
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 409b42c452a..3a8bd63fcdd 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -255,9 +255,9 @@ log_calculate_actual_len(
{
ut_ad(log_mutex_own());
+ const ulint framing_size = log_sys.framing_size();
/* actual length stored per block */
- const ulint len_per_blk = OS_FILE_LOG_BLOCK_SIZE
- - (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE);
+ const ulint len_per_blk = OS_FILE_LOG_BLOCK_SIZE - framing_size;
/* actual data length in last block already written */
ulint extra_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE);
@@ -266,8 +266,7 @@ log_calculate_actual_len(
extra_len -= LOG_BLOCK_HDR_SIZE;
/* total extra length for block header and trailer */
- extra_len = ((len + extra_len) / len_per_blk)
- * (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE);
+ extra_len = ((len + extra_len) / len_per_blk) * framing_size;
return(len + extra_len);
}
@@ -399,26 +398,24 @@ log_write_low(
ulint str_len) /*!< in: string length */
{
ulint len;
- ulint data_len;
- byte* log_block;
ut_ad(log_mutex_own());
+ const ulint trailer_offset = log_sys.trailer_offset();
part_loop:
/* Calculate a part length */
- data_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len;
+ ulint data_len = (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE) + str_len;
- if (data_len <= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
+ if (data_len <= trailer_offset) {
/* The string fits within the current log block */
len = str_len;
} else {
- data_len = OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE;
+ data_len = trailer_offset;
- len = OS_FILE_LOG_BLOCK_SIZE
- - (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE)
- - LOG_BLOCK_TRL_SIZE;
+ len = trailer_offset
+ - log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE;
}
memcpy(log_sys.buf + log_sys.buf_free, str, len);
@@ -426,18 +423,18 @@ part_loop:
str_len -= len;
str = str + len;
- log_block = static_cast<byte*>(
+ byte* log_block = static_cast<byte*>(
ut_align_down(log_sys.buf + log_sys.buf_free,
OS_FILE_LOG_BLOCK_SIZE));
log_block_set_data_len(log_block, data_len);
- if (data_len == OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
+ if (data_len == trailer_offset) {
/* This block became full */
log_block_set_data_len(log_block, OS_FILE_LOG_BLOCK_SIZE);
log_block_set_checkpoint_no(log_block,
log_sys.next_checkpoint_no);
- len += LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE;
+ len += log_sys.framing_size();
log_sys.lsn += len;
@@ -665,8 +662,7 @@ void log_t::files::create(ulint n_files)
this->n_files= n_files;
format= srv_encrypt_log
- ? LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED
- : LOG_HEADER_FORMAT_CURRENT;
+ ? LOG_HEADER_FORMAT_ENC_10_4 : LOG_HEADER_FORMAT_10_4;
subformat= 2;
file_size= srv_log_file_size;
lsn= LOG_START_LSN;
@@ -688,8 +684,8 @@ log_file_header_flush(
ut_ad(log_write_mutex_own());
ut_ad(!recv_no_log_write);
ut_a(nth_file < log_sys.log.n_files);
- ut_ad((log_sys.log.format & ~LOG_HEADER_FORMAT_ENCRYPTED)
- == LOG_HEADER_FORMAT_CURRENT);
+ ut_ad(log_sys.log.format == LOG_HEADER_FORMAT_10_4
+ || log_sys.log.format == LOG_HEADER_FORMAT_ENC_10_4);
// man 2 open suggests this buffer to be aligned by 512 for O_DIRECT
MY_ALIGNED(OS_FILE_LOG_BLOCK_SIZE)
@@ -720,7 +716,7 @@ log_file_header_flush(
fil_io(IORequestLogWrite, true,
page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
- univ_page_size,
+ 0,
ulint(dest_offset & (srv_page_size - 1)),
OS_FILE_LOG_BLOCK_SIZE, buf, NULL);
@@ -839,7 +835,7 @@ loop:
fil_io(IORequestLogWrite, true,
page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
- univ_page_size,
+ 0,
ulint(next_offset & (srv_page_size - 1)), write_len, buf, NULL);
srv_stats.os_log_pending_writes.dec();
@@ -927,11 +923,9 @@ wait and check if an already running write is covering the request.
@param[in] lsn log sequence number that should be
included in the redo log file write
@param[in] flush_to_disk whether the written log should also
-be flushed to the file system */
-void
-log_write_up_to(
- lsn_t lsn,
- bool flush_to_disk)
+be flushed to the file system
+@param[in] rotate_key whether to rotate the encryption key */
+void log_write_up_to(lsn_t lsn, bool flush_to_disk, bool rotate_key)
{
#ifdef UNIV_DEBUG
ulint loop_count = 0;
@@ -940,6 +934,7 @@ log_write_up_to(
lsn_t write_lsn;
ut_ad(!srv_read_only_mode);
+ ut_ad(!rotate_key || flush_to_disk);
if (recv_no_ibuf_operations) {
/* Recovery is running and no operations on the log files are
@@ -1085,7 +1080,8 @@ loop:
if (log_sys.is_encrypted()) {
log_crypt(write_buf + area_start, log_sys.write_lsn,
- area_end - area_start);
+ area_end - area_start,
+ rotate_key ? LOG_ENCRYPT_ROTATE_KEY : LOG_ENCRYPT);
}
/* Do the write to the log files */
@@ -1340,7 +1336,7 @@ log_group_checkpoint(lsn_t end_lsn)
fil_io(IORequestLogWrite, false,
page_id_t(SRV_LOG_SPACE_FIRST_ID, 0),
- univ_page_size,
+ 0,
(log_sys.next_checkpoint_no & 1)
? LOG_CHECKPOINT_2 : LOG_CHECKPOINT_1,
OS_FILE_LOG_BLOCK_SIZE,
@@ -1360,7 +1356,7 @@ void log_header_read(ulint header)
fil_io(IORequestLogRead, true,
page_id_t(SRV_LOG_SPACE_FIRST_ID,
header >> srv_page_size_shift),
- univ_page_size, header & (srv_page_size - 1),
+ 0, header & (srv_page_size - 1),
OS_FILE_LOG_BLOCK_SIZE, log_sys.checkpoint_buf, NULL);
}
@@ -1491,7 +1487,7 @@ bool log_checkpoint(bool sync)
log_mutex_exit();
- log_write_up_to(flush_lsn, true);
+ log_write_up_to(flush_lsn, true, true);
log_mutex_enter();
@@ -2047,13 +2043,9 @@ log_pad_current_log_block(void)
/* We retrieve lsn only because otherwise gcc crashed on HP-UX */
lsn = log_reserve_and_open(OS_FILE_LOG_BLOCK_SIZE);
- pad_length = OS_FILE_LOG_BLOCK_SIZE
- - (log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE)
- - LOG_BLOCK_TRL_SIZE;
- if (pad_length
- == (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
- - LOG_BLOCK_TRL_SIZE)) {
-
+ pad_length = log_sys.trailer_offset()
+ - log_sys.buf_free % OS_FILE_LOG_BLOCK_SIZE;
+ if (pad_length == log_sys.payload_size()) {
pad_length = 0;
}
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index 686f1232361..420f186b5fb 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -52,7 +52,6 @@ Created 9/20/1997 Heikki Tuuri
#include "trx0undo.h"
#include "trx0rec.h"
#include "fil0fil.h"
-#include "row0trunc.h"
#include "buf0rea.h"
#include "srv0srv.h"
#include "srv0start.h"
@@ -219,10 +218,6 @@ corresponding to MLOG_INDEX_LOAD.
*/
void (*log_optimized_ddl_op)(ulint space_id);
-/** Report backup-unfriendly TRUNCATE operation (with separate log file),
-corresponding to MLOG_TRUNCATE. */
-void (*log_truncate)();
-
/** Report an operation to create, delete, or rename a file during backup.
@param[in] space_id tablespace identifier
@param[in] flags tablespace flags (NULL if not create)
@@ -323,13 +318,13 @@ public:
continue;
}
if (buf_block_t* block = buf_page_get_gen(
- i->first, univ_page_size, RW_X_LATCH, NULL,
+ i->first, 0, RW_X_LATCH, NULL,
BUF_GET_IF_IN_POOL, __FILE__, __LINE__,
&mtr, NULL)) {
mutex_exit(&recv_sys->mutex);
ibuf_merge_or_delete_for_page(
block, i->first,
- &block->page.size, true);
+ block->zip_size(), true);
mtr.commit();
mtr.start();
mutex_enter(&recv_sys->mutex);
@@ -764,7 +759,7 @@ recv_sys_var_init(void)
recv_previous_parsed_rec_type = MLOG_SINGLE_REC_FLAG;
recv_previous_parsed_rec_offset = 0;
recv_previous_parsed_rec_is_multi = 0;
- recv_n_pool_free_frames = 256;
+ recv_n_pool_free_frames = 384;
recv_max_page_lsn = 0;
}
@@ -943,7 +938,7 @@ loop:
fil_io(IORequestLogRead, true,
page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
- univ_page_size,
+ 0,
ulint(source_offset & (srv_page_size - 1)),
len, buf, NULL);
@@ -985,16 +980,18 @@ fail:
goto fail;
}
- if (is_encrypted()) {
- log_crypt(buf, *start_lsn,
- OS_FILE_LOG_BLOCK_SIZE, true);
+ if (is_encrypted()
+ && !log_crypt(buf, *start_lsn,
+ OS_FILE_LOG_BLOCK_SIZE,
+ LOG_DECRYPT)) {
+ goto fail;
}
}
ulint dl = log_block_get_data_len(buf);
if (dl < LOG_BLOCK_HDR_SIZE
- || (dl > OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE
- && dl != OS_FILE_LOG_BLOCK_SIZE)) {
+ || (dl != OS_FILE_LOG_BLOCK_SIZE
+ && dl > log_sys.trailer_offset())) {
recv_sys->found_corrupt_log = true;
goto fail;
}
@@ -1157,7 +1154,7 @@ static dberr_t recv_log_format_0_recover(lsn_t lsn, bool crypt)
fil_io(IORequestLogRead, true,
page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
- univ_page_size,
+ 0,
ulint((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1))
& (srv_page_size - 1)),
OS_FILE_LOG_BLOCK_SIZE, buf, NULL);
@@ -1195,52 +1192,6 @@ static dberr_t recv_log_format_0_recover(lsn_t lsn, bool crypt)
return(DB_SUCCESS);
}
-/** Determine if a redo log from MariaDB 10.4 is clean.
-@return error code
-@retval DB_SUCCESS if the redo log is clean
-@retval DB_CORRUPTION if the redo log is corrupted
-@retval DB_ERROR if the redo log is not empty */
-static dberr_t recv_log_recover_10_4()
-{
- ut_ad(!log_sys.is_encrypted());
- const lsn_t lsn = log_sys.log.get_lsn();
- const lsn_t source_offset = log_sys.log.calc_lsn_offset(lsn);
- const ulint page_no
- = (ulint) (source_offset / univ_page_size.physical());
- byte* buf = log_sys.buf;
-
- fil_io(IORequestLogRead, true,
- page_id_t(SRV_LOG_SPACE_FIRST_ID, page_no),
- univ_page_size,
- (ulint) ((source_offset & ~(OS_FILE_LOG_BLOCK_SIZE - 1))
- % univ_page_size.physical()),
- OS_FILE_LOG_BLOCK_SIZE, buf, NULL);
-
- if (log_block_calc_checksum(buf) != log_block_get_checksum(buf)) {
- return DB_CORRUPTION;
- }
-
- /* On a clean shutdown, the redo log will be logically empty
- after the checkpoint lsn. */
-
- if (log_block_get_data_len(buf)
- != (source_offset & (OS_FILE_LOG_BLOCK_SIZE - 1))) {
- return DB_ERROR;
- }
-
- /* Mark the redo log for downgrading. */
- srv_log_file_size = 0;
- recv_sys->parse_start_lsn = recv_sys->recovered_lsn
- = recv_sys->scanned_lsn
- = recv_sys->mlog_checkpoint_lsn = lsn;
- log_sys.last_checkpoint_lsn = log_sys.next_checkpoint_lsn
- = log_sys.lsn = log_sys.write_lsn
- = log_sys.current_flush_lsn = log_sys.flushed_to_disk_lsn
- = lsn;
- log_sys.next_checkpoint_no = 0;
- return DB_SUCCESS;
-}
-
/** Find the latest checkpoint in the log header.
@param[out] max_field LOG_CHECKPOINT_1 or LOG_CHECKPOINT_2
@return error code or DB_SUCCESS */
@@ -1281,11 +1232,10 @@ recv_find_max_checkpoint(ulint* max_field)
return(recv_find_max_checkpoint_0(max_field));
case LOG_HEADER_FORMAT_10_2:
case LOG_HEADER_FORMAT_10_2 | LOG_HEADER_FORMAT_ENCRYPTED:
- case LOG_HEADER_FORMAT_CURRENT:
- case LOG_HEADER_FORMAT_CURRENT | LOG_HEADER_FORMAT_ENCRYPTED:
+ case LOG_HEADER_FORMAT_10_3:
+ case LOG_HEADER_FORMAT_10_3 | LOG_HEADER_FORMAT_ENCRYPTED:
case LOG_HEADER_FORMAT_10_4:
- /* We can only parse the unencrypted LOG_HEADER_FORMAT_10_4.
- The encrypted format uses a larger redo log block trailer. */
+ case LOG_HEADER_FORMAT_10_4 | LOG_HEADER_FORMAT_ENCRYPTED:
break;
default:
ib::error() << "Unsupported redo log format."
@@ -1350,19 +1300,7 @@ recv_find_max_checkpoint(ulint* max_field)
return(DB_ERROR);
}
- if (log_sys.log.format == LOG_HEADER_FORMAT_10_4) {
- dberr_t err = recv_log_recover_10_4();
- if (err != DB_SUCCESS) {
- ib::error()
- << "Downgrade after a crash is not supported."
- " The redo log was created with " << creator
- << (err == DB_ERROR
- ? "." : ", and it appears corrupted.");
- }
- return err;
- }
-
- return DB_SUCCESS;
+ return(DB_SUCCESS);
}
/** Try to parse a single log record body and also applies it if
@@ -1410,14 +1348,10 @@ recv_parse_or_apply_log_rec_body(
}
return(ptr + 8);
case MLOG_TRUNCATE:
- if (log_truncate) {
- ut_ad(srv_operation != SRV_OPERATION_NORMAL);
- log_truncate();
- recv_sys->found_corrupt_fs = true;
- return NULL;
- }
- return(truncate_t::parse_redo_entry(ptr, end_ptr, space_id));
-
+ ib::error() << "Cannot crash-upgrade from "
+ "old-style TRUNCATE TABLE";
+ recv_sys->found_corrupt_log = true;
+ return NULL;
default:
break;
}
@@ -1471,6 +1405,7 @@ parse_log:
break;
#endif /* UNIV_LOG_LSN_DEBUG */
case MLOG_1BYTE: case MLOG_2BYTES: case MLOG_4BYTES: case MLOG_8BYTES:
+ case MLOG_MEMSET:
#ifdef UNIV_DEBUG
if (page && page_type == FIL_PAGE_TYPE_ALLOCATED
&& end_ptr >= ptr + 2) {
@@ -1738,12 +1673,17 @@ parse_log:
break;
case MLOG_IBUF_BITMAP_INIT:
/* Allow anything in page_type when creating a page. */
- ptr = ibuf_parse_bitmap_init(ptr, end_ptr, block, mtr);
+ if (block) ibuf_bitmap_init_apply(block);
break;
case MLOG_INIT_FILE_PAGE2:
/* Allow anything in page_type when creating a page. */
if (block) fsp_apply_init_file_page(block);
break;
+ case MLOG_INIT_FREE_PAGE:
+ /* The page can be zero-filled and its previous
+ contents can be ignored. We do not write or apply
+ this record yet. */
+ break;
case MLOG_WRITE_STRING:
ptr = mlog_parse_string(ptr, end_ptr, page, page_zip);
break;
@@ -1926,6 +1866,7 @@ recv_add_to_hash_table(
switch (type) {
case MLOG_INIT_FILE_PAGE2:
case MLOG_ZIP_PAGE_COMPRESS:
+ case MLOG_INIT_FREE_PAGE:
/* Ignore any earlier redo log records for this page. */
ut_ad(recv_addr->state == RECV_NOT_PROCESSED
|| recv_addr->state == RECV_WILL_NOT_READ);
@@ -2004,9 +1945,10 @@ lsn of a log record.
@param[in,out] block buffer pool page
@param[in,out] mtr mini-transaction
@param[in,out] recv_addr recovery address
-@param[in] init_lsn the initial LSN where to start recovery */
+@param[in,out] init page initialization operation, or NULL */
static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
- recv_addr_t* recv_addr, lsn_t init_lsn = 0)
+ recv_addr_t* recv_addr,
+ mlog_init_t::init* init = NULL)
{
page_t* page;
page_zip_des_t* page_zip;
@@ -2016,6 +1958,8 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
ut_ad(recv_needed_recovery);
ut_ad(recv_addr->state != RECV_BEING_PROCESSED);
ut_ad(recv_addr->state != RECV_PROCESSED);
+ ut_ad(!init || init->created);
+ ut_ad(!init || init->lsn);
if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) {
fprintf(stderr, "Applying log to page %u:%u\n",
@@ -2037,19 +1981,9 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
page_lsn = mach_read_from_8(page + FIL_PAGE_LSN);
}
+ bool free_page = false;
lsn_t start_lsn = 0, end_lsn = 0;
- fil_space_t* space;
-
- if (srv_is_tablespace_truncated(recv_addr->space)) {
- /* The table will be truncated after applying
- normal redo log records. */
- goto skip_log;
- }
-
- space = fil_space_acquire(recv_addr->space);
- if (!space) {
- goto skip_log;
- }
+ const lsn_t init_lsn = init ? init->lsn : 0;
for (recv_t* recv = UT_LIST_GET_FIRST(recv_addr->rec_list);
recv; recv = UT_LIST_GET_NEXT(rec_list, recv)) {
@@ -2069,18 +2003,11 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
<< get_mlog_string(recv->type)
<< " LSN " << recv->start_lsn << " < "
<< init_lsn);
- } else if (srv_was_tablespace_truncated(space)
- && recv->start_lsn
- < truncate_t::get_truncated_tablespace_init_lsn(
- recv_addr->space)) {
- /* If per-table tablespace was truncated and
- there exist REDO records before truncate that
- are to be applied as part of recovery
- (checkpoint didn't happen since truncate was
- done) skip such records using lsn check as
- they may not stand valid post truncate. */
} else {
- if (!start_lsn) {
+ if (recv->type == MLOG_INIT_FREE_PAGE) {
+ /* This does not really modify the page. */
+ free_page = true;
+ } else if (!start_lsn) {
start_lsn = recv->start_lsn;
}
@@ -2131,9 +2058,6 @@ static void recv_recover_page(buf_block_t* block, mtr_t& mtr,
}
}
- space->release();
-
-skip_log:
#ifdef UNIV_ZIP_DEBUG
ut_ad(!fil_page_index_page_check(page)
|| !page_zip
@@ -2142,8 +2066,15 @@ skip_log:
if (start_lsn) {
log_flush_order_mutex_enter();
- buf_flush_recv_note_modification(block, start_lsn, end_lsn);
+ buf_flush_note_modification(block, start_lsn, end_lsn, NULL);
log_flush_order_mutex_exit();
+ } else if (free_page && init) {
+ /* There have been no operations than MLOG_INIT_FREE_PAGE.
+ Any buffered changes must not be merged. A subsequent
+ buf_page_create() from a user thread should discard
+ any buffered changes. */
+ init->created = false;
+ ut_ad(!mtr.has_modifications());
}
/* Make sure that committing mtr does not change the modification
@@ -2275,6 +2206,17 @@ void recv_apply_hashed_log_recs(bool last_batch)
ut_d(recv_no_log_write = recv_no_ibuf_operations);
if (ulint n = recv_sys->n_addrs) {
+ if (!log_sys.log.subformat && !srv_force_recovery
+ && srv_undo_tablespaces_open) {
+ ib::error() << "Recovery of separately logged"
+ " TRUNCATE operations is no longer supported."
+ " Set innodb_force_recovery=1"
+ " if no *trunc.log files exist";
+ recv_sys->found_corrupt_log = true;
+ mutex_exit(&recv_sys->mutex);
+ return;
+ }
+
const char* msg = last_batch
? "Starting final batch to recover "
: "Starting a batch to recover ";
@@ -2320,13 +2262,6 @@ ignore:
break;
}
- if (srv_is_tablespace_truncated(recv_addr->space)) {
- /* Avoid applying REDO log for the tablespace
- that is schedule for TRUNCATE. */
- recv_addr->state = RECV_DISCARDED;
- goto ignore;
- }
-
const page_id_t page_id(recv_addr->space,
recv_addr->page_no);
@@ -2335,8 +2270,7 @@ apply:
mtr.start();
mtr.set_log_mode(MTR_LOG_NONE);
if (buf_block_t* block = buf_page_get_gen(
- page_id, univ_page_size,
- RW_X_LATCH, NULL,
+ page_id, 0, RW_X_LATCH, NULL,
BUF_GET_IF_IN_POOL,
__FILE__, __LINE__, &mtr, NULL)) {
buf_block_dbg_add_level(
@@ -2363,7 +2297,7 @@ skip:
goto ignore;
}
- fil_space_t* space = fil_space_acquire(
+ fil_space_t* space = fil_space_acquire_for_io(
recv_addr->space);
if (!space) {
goto skip;
@@ -2371,7 +2305,7 @@ skip:
if (space->enable_lsn) {
do_read:
- space->release();
+ space->release_for_io();
recv_addr->state = RECV_NOT_PROCESSED;
goto apply;
}
@@ -2381,25 +2315,27 @@ do_read:
For those tables, no MLOG_INDEX_LOAD record
used to be written when redo logging was
disabled. Hence, we cannot optimize
- away page reads, because all the redo
- log records for initializing and
- modifying the page in the past could
- be older than the page in the data
- file.
+ away page reads when crash-upgrading
+ from MariaDB versions before 10.4,
+ because all the redo log records for
+ initializing and modifying the page in
+ the past could be older than the page
+ in the data file.
The check is too broad, causing all
tables whose names start with FTS_ to
skip the optimization. */
-
- if (strstr(space->name, "/FTS_")) {
+ if ((log_sys.log.format
+ & ~LOG_HEADER_FORMAT_ENCRYPTED)
+ != LOG_HEADER_FORMAT_10_4
+ && strstr(space->name, "/FTS_")) {
goto do_read;
}
mtr.start();
mtr.set_log_mode(MTR_LOG_NONE);
buf_block_t* block = buf_page_create(
- page_id, page_size_t(space->flags),
- &mtr);
+ page_id, space->zip_size(), &mtr);
if (recv_addr->state == RECV_PROCESSED) {
/* The page happened to exist
in the buffer pool, or it was
@@ -2414,11 +2350,11 @@ do_read:
block, SYNC_NO_ORDER_CHECK);
mtr.x_latch_at_savepoint(0, block);
recv_recover_page(block, mtr,
- recv_addr, i.lsn);
+ recv_addr, &i);
ut_ad(mtr.has_committed());
}
- space->release();
+ space->release_for_io();
}
}
}
@@ -2604,17 +2540,12 @@ recv_calc_lsn_on_data_add(
ib_uint64_t len) /*!< in: this many bytes of data is
added, log block headers not included */
{
- ulint frag_len;
- ib_uint64_t lsn_len;
-
- frag_len = (lsn % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_HDR_SIZE;
- ut_ad(frag_len < OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
- - LOG_BLOCK_TRL_SIZE);
- lsn_len = len;
- lsn_len += (lsn_len + frag_len)
- / (OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_HDR_SIZE
- - LOG_BLOCK_TRL_SIZE)
- * (LOG_BLOCK_HDR_SIZE + LOG_BLOCK_TRL_SIZE);
+ unsigned frag_len = (lsn % OS_FILE_LOG_BLOCK_SIZE) - LOG_BLOCK_HDR_SIZE;
+ unsigned payload_size = log_sys.payload_size();
+ ut_ad(frag_len < payload_size);
+ lsn_t lsn_len = len;
+ lsn_len += (lsn_len + frag_len) / payload_size
+ * (OS_FILE_LOG_BLOCK_SIZE - payload_size);
return(lsn + lsn_len);
}
@@ -3085,11 +3016,7 @@ bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn)
start_offset = LOG_BLOCK_HDR_SIZE;
}
- end_offset = data_len;
-
- if (end_offset > OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
- end_offset = OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE;
- }
+ end_offset = std::min<ulint>(data_len, log_sys.trailer_offset());
ut_ad(start_offset <= end_offset);
@@ -3401,10 +3328,12 @@ recv_init_missing_space(dberr_t err, const recv_spaces_t::const_iterator& i)
{
if (srv_operation == SRV_OPERATION_RESTORE
|| srv_operation == SRV_OPERATION_RESTORE_EXPORT) {
- ib::warn() << "Tablespace " << i->first << " was not"
- " found at " << i->second.name << " when"
- " restoring a (partial?) backup. All redo log"
- " for this file will be ignored!";
+ if (i->second.name.find(TEMP_TABLE_PATH_PREFIX) != std::string::npos) {
+ ib::warn() << "Tablespace " << i->first << " was not"
+ " found at " << i->second.name << " when"
+ " restoring a (partial?) backup. All redo log"
+ " for this file will be ignored!";
+ }
return(err);
}
@@ -4151,6 +4080,12 @@ static const char* get_mlog_string(mlog_id_t type)
case MLOG_TRUNCATE:
return("MLOG_TRUNCATE");
+ case MLOG_MEMSET:
+ return("MLOG_MEMSET");
+
+ case MLOG_INIT_FREE_PAGE:
+ return("MLOG_INIT_FREE_PAGE");
+
case MLOG_FILE_WRITE_CRYPT_DATA:
return("MLOG_FILE_WRITE_CRYPT_DATA");
}
diff --git a/storage/innobase/mtr/mtr0log.cc b/storage/innobase/mtr/mtr0log.cc
index 6baf1f06bf9..c9a6de8c902 100644
--- a/storage/innobase/mtr/mtr0log.cc
+++ b/storage/innobase/mtr/mtr0log.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, MariaDB Corporation.
+Copyright (c) 2017, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -121,7 +121,7 @@ mlog_parse_initial_log_record(
}
/********************************************************//**
-Parses a log record written by mlog_write_ulint or mlog_write_ull.
+Parses a log record written by mlog_write_ulint, mlog_write_ull, mlog_memset.
@return parsed record end, NULL if not a complete record or a corrupt record */
byte*
mlog_parse_nbytes(
@@ -137,29 +137,43 @@ mlog_parse_nbytes(
ulint val;
ib_uint64_t dval;
- ut_a(type <= MLOG_8BYTES);
+ ut_ad(type <= MLOG_8BYTES || type == MLOG_MEMSET);
ut_a(!page || !page_zip
|| !fil_page_index_page_check(page));
if (end_ptr < ptr + 2) {
-
- return(NULL);
+ return NULL;
}
offset = mach_read_from_2(ptr);
ptr += 2;
- if (offset >= srv_page_size) {
- recv_sys->found_corrupt_log = TRUE;
-
- return(NULL);
+ if (UNIV_UNLIKELY(offset >= srv_page_size)) {
+ goto corrupt;
}
- if (type == MLOG_8BYTES) {
+ switch (type) {
+ case MLOG_MEMSET:
+ if (end_ptr < ptr + 3) {
+ return NULL;
+ }
+ val = mach_read_from_2(ptr);
+ ptr += 2;
+ if (UNIV_UNLIKELY(offset + val > srv_page_size)) {
+ goto corrupt;
+ }
+ if (page) {
+ memset(page + offset, *ptr, val);
+ if (page_zip) {
+ memset(static_cast<page_zip_des_t*>(page_zip)
+ ->data + offset, *ptr, val);
+ }
+ }
+ return const_cast<byte*>(++ptr);
+ case MLOG_8BYTES:
dval = mach_u64_parse_compressed(&ptr, end_ptr);
if (ptr == NULL) {
-
- return(NULL);
+ return NULL;
}
if (page) {
@@ -171,14 +185,13 @@ mlog_parse_nbytes(
mach_write_to_8(page + offset, dval);
}
- return(const_cast<byte*>(ptr));
+ return const_cast<byte*>(ptr);
+ default:
+ val = mach_parse_compressed(&ptr, end_ptr);
}
- val = mach_parse_compressed(&ptr, end_ptr);
-
if (ptr == NULL) {
-
- return(NULL);
+ return NULL;
}
switch (type) {
@@ -221,11 +234,11 @@ mlog_parse_nbytes(
break;
default:
corrupt:
- recv_sys->found_corrupt_log = TRUE;
+ recv_sys->found_corrupt_log = true;
ptr = NULL;
}
- return(const_cast<byte*>(ptr));
+ return const_cast<byte*>(ptr);
}
/********************************************************//**
@@ -409,6 +422,72 @@ mlog_parse_string(
return(ptr + len);
}
+/** Initialize a string of bytes.
+@param[in,out] b buffer page
+@param[in] ofs byte offset from block->frame
+@param[in] len length of the data to write
+@param[in] val the data byte to write
+@param[in,out] mtr mini-transaction */
+void
+mlog_memset(buf_block_t* b, ulint ofs, ulint len, byte val, mtr_t* mtr)
+{
+ ut_ad(len);
+ ut_ad(ofs <= ulint(srv_page_size));
+ ut_ad(ofs + len <= ulint(srv_page_size));
+ memset(ofs + b->frame, val, len);
+
+ mtr->set_modified();
+ switch (mtr->get_log_mode()) {
+ case MTR_LOG_NONE:
+ case MTR_LOG_NO_REDO:
+ return;
+ case MTR_LOG_SHORT_INSERTS:
+ ut_ad(0);
+ /* fall through */
+ case MTR_LOG_ALL:
+ break;
+ }
+
+ byte* l = mtr->get_log()->open(11 + 2 + 2 + 1);
+ l = mlog_write_initial_log_record_low(
+ MLOG_MEMSET, b->page.id.space(), b->page.id.page_no(), l, mtr);
+ mach_write_to_2(l, ofs);
+ mach_write_to_2(l + 2, len);
+ l[4] = val;
+ mlog_close(mtr, l + 5);
+}
+
+/** Initialize a string of bytes.
+@param[in,out] byte byte address
+@param[in] len length of the data to write
+@param[in] val the data byte to write
+@param[in,out] mtr mini-transaction */
+void mlog_memset(byte* b, ulint len, byte val, mtr_t* mtr)
+{
+ ut_ad(len);
+ ut_ad(page_offset(b) + len <= ulint(srv_page_size));
+ memset(b, val, len);
+
+ mtr->set_modified();
+ switch (mtr->get_log_mode()) {
+ case MTR_LOG_NONE:
+ case MTR_LOG_NO_REDO:
+ return;
+ case MTR_LOG_SHORT_INSERTS:
+ ut_ad(0);
+ /* fall through */
+ case MTR_LOG_ALL:
+ break;
+ }
+
+ byte* l = mtr->get_log()->open(11 + 2 + 2 + 1);
+ l = mlog_write_initial_log_record_fast(b, MLOG_MEMSET, l, mtr);
+ mach_write_to_2(l, page_offset(b));
+ mach_write_to_2(l + 2, len);
+ l[4] = val;
+ mlog_close(mtr, l + 5);
+}
+
/********************************************************//**
Opens a buffer for mlog, writes the initial log record and,
if needed, the field lengths of an index.
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 92b1aa38a81..5c15e8238d2 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -28,10 +28,10 @@ Created 11/26/1995 Heikki Tuuri
#include "buf0buf.h"
#include "buf0flu.h"
+#include "fsp0sysspace.h"
#include "page0types.h"
#include "mtr0log.h"
#include "log0log.h"
-#include "row0trunc.h"
#include "log0recv.h"
@@ -142,7 +142,7 @@ struct FindPage
slot->object);
if (m_ptr < block->frame
- || m_ptr >= block->frame + block->page.size.logical()) {
+ || m_ptr >= block->frame + srv_page_size) {
return(true);
}
@@ -191,7 +191,7 @@ memo_slot_release(mtr_memo_slot_t* slot)
block = reinterpret_cast<buf_block_t*>(slot->object);
- buf_block_unfix(block);
+ block->unfix();
buf_page_release_latch(block, slot->type);
break;
}
@@ -228,7 +228,7 @@ memo_block_unfix(mtr_memo_slot_t* slot)
case MTR_MEMO_PAGE_S_FIX:
case MTR_MEMO_PAGE_X_FIX:
case MTR_MEMO_PAGE_SX_FIX: {
- buf_block_unfix(reinterpret_cast<buf_block_t*>(slot->object));
+ reinterpret_cast<buf_block_t*>(slot->object)->unfix();
break;
}
@@ -677,8 +677,7 @@ mtr_t::x_lock_space(ulint space_id, const char* file, unsigned line)
ut_ad(get_log_mode() != MTR_LOG_NO_REDO
|| space->purpose == FIL_TYPE_TEMPORARY
|| space->purpose == FIL_TYPE_IMPORT
- || my_atomic_loadlint(&space->redo_skipped_count) > 0
- || srv_is_tablespace_truncated(space->id));
+ || space->redo_skipped_count > 0);
}
ut_ad(space);
diff --git a/storage/innobase/os/os0event.cc b/storage/innobase/os/os0event.cc
index 7588020c8db..c350c7b81dd 100644
--- a/storage/innobase/os/os0event.cc
+++ b/storage/innobase/os/os0event.cc
@@ -219,7 +219,7 @@ private:
int64_t signal_count; /*!< this is incremented
each time the event becomes
signaled */
- mutable EventMutex mutex; /*!< this mutex protects
+ mutable OSMutex mutex; /*!< this mutex protects
the next fields */
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index 8ab21d0bd15..77f5ba7c113 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -38,14 +38,14 @@ Created 10/21/1995 Heikki Tuuri
#include "sql_const.h"
#ifdef UNIV_LINUX
-#include <sys/types.h>
-#include <sys/stat.h>
+# include <sys/types.h>
+# include <sys/stat.h>
#endif
#include "srv0srv.h"
#include "srv0start.h"
#include "fil0fil.h"
-#include "srv0srv.h"
+#include "fsp0fsp.h"
#ifdef HAVE_LINUX_UNISTD_H
#include "unistd.h"
#endif
@@ -70,14 +70,6 @@ Created 10/21/1995 Heikki Tuuri
# endif
#endif
-#if defined(UNIV_LINUX) && defined(HAVE_SYS_STATVFS_H)
-#include <sys/statvfs.h>
-#endif
-
-#if defined(UNIV_LINUX) && defined(HAVE_LINUX_FALLOC_H)
-#include <linux/falloc.h>
-#endif
-
#ifdef _WIN32
#include <winioctl.h>
#endif
@@ -821,108 +813,6 @@ os_win32_device_io_control(
#endif
-/***********************************************************************//**
-Try to get number of bytes per sector from file system.
-@return file block size */
-UNIV_INTERN
-ulint
-os_file_get_block_size(
-/*===================*/
- os_file_t file, /*!< in: handle to a file */
- const char* name) /*!< in: file name */
-{
- ulint fblock_size = 512;
-
-#if defined(UNIV_LINUX)
- struct stat local_stat;
- int err;
-
- err = fstat((int)file, &local_stat);
-
- if (err != 0) {
- os_file_handle_error_no_exit(name, "fstat()", FALSE);
- } else {
- fblock_size = local_stat.st_blksize;
- }
-#endif /* UNIV_LINUX */
-#ifdef _WIN32
-
- fblock_size = 0;
- BOOL result = false;
- size_t len = 0;
- // Open volume for this file, find out it "physical bytes per sector"
-
- HANDLE volume_handle = INVALID_HANDLE_VALUE;
- char volume[MAX_PATH + 4]="\\\\.\\"; // Special prefix required for volume names.
- if (!GetVolumePathName(name , volume + 4, MAX_PATH)) {
- os_file_handle_error_no_exit(name,
- "GetVolumePathName()", FALSE);
- goto end;
- }
-
- len = strlen(volume);
- if (volume[len - 1] == '\\') {
- // Trim trailing backslash from volume name.
- volume[len - 1] = 0;
- }
-
- volume_handle = CreateFile(volume, FILE_READ_ATTRIBUTES,
- FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
- 0, OPEN_EXISTING, 0, 0);
-
- if (volume_handle == INVALID_HANDLE_VALUE) {
- if (GetLastError() != ERROR_ACCESS_DENIED) {
- os_file_handle_error_no_exit(volume,
- "CreateFile()", FALSE);
- }
- goto end;
- }
-
- DWORD tmp;
- STORAGE_ACCESS_ALIGNMENT_DESCRIPTOR disk_alignment;
-
- STORAGE_PROPERTY_QUERY storage_query;
- memset(&storage_query, 0, sizeof(storage_query));
- storage_query.PropertyId = StorageAccessAlignmentProperty;
- storage_query.QueryType = PropertyStandardQuery;
-
- result = os_win32_device_io_control(volume_handle,
- IOCTL_STORAGE_QUERY_PROPERTY,
- &storage_query,
- sizeof(storage_query),
- &disk_alignment,
- sizeof(disk_alignment),
- &tmp);
-
- if (!result) {
- DWORD err = GetLastError();
- if (err != ERROR_INVALID_FUNCTION && err != ERROR_NOT_SUPPORTED) {
- os_file_handle_error_no_exit(volume,
- "DeviceIoControl(IOCTL_STORAGE_QUERY_PROPERTY)", FALSE);
- }
- goto end;
- }
-
- fblock_size = disk_alignment.BytesPerPhysicalSector;
-
-end:
- if (volume_handle != INVALID_HANDLE_VALUE) {
- CloseHandle(volume_handle);
- }
-#endif /* _WIN32 */
-
- /* Currently we support file block size up to 4Kb */
- if (fblock_size > 4096 || fblock_size < 512) {
- if (fblock_size < 512) {
- fblock_size = 512;
- } else {
- fblock_size = 4096;
- }
- }
-
- return fblock_size;
-}
-
#ifdef WIN_ASYNC_IO
/** This function is only used in Windows asynchronous i/o.
Waits for an aio operation to complete. This function is used to wait the
@@ -1084,15 +974,8 @@ os_aio_validate_skip()
/** Try os_aio_validate() every this many times */
# define OS_AIO_VALIDATE_SKIP 13
- static int os_aio_validate_count;
-
- if (my_atomic_add32_explicit(&os_aio_validate_count, -1,
- MY_MEMORY_ORDER_RELAXED)
- % OS_AIO_VALIDATE_SKIP) {
- return true;
- }
-
- return(os_aio_validate());
+ static Atomic_counter<uint32_t> os_aio_validate_count;
+ return (os_aio_validate_count++ % OS_AIO_VALIDATE_SKIP) || os_aio_validate();
}
#endif /* UNIV_DEBUG */
@@ -3524,8 +3407,6 @@ static WinIoInit win_io_init;
/** Free storage space associated with a section of the file.
@param[in] fh Open file handle
-@param[in] page_size Tablespace page size
-@param[in] block_size File system block size
@param[in] off Starting offset (SEEK_SET)
@param[in] len Size of the hole
@return 0 on success or errno */
@@ -5278,6 +5159,34 @@ short_warning:
#endif /* _WIN32 */
+/** Check if the file system supports sparse files.
+@param fh file handle
+@return true if the file system supports sparse files */
+IF_WIN(static,) bool os_is_sparse_file_supported(os_file_t fh)
+{
+ /* In this debugging mode, we act as if punch hole is supported,
+ then we skip any calls to actually punch a hole. In this way,
+ Transparent Page Compression is still being tested. */
+ DBUG_EXECUTE_IF("ignore_punch_hole",
+ return(true);
+ );
+
+#ifdef _WIN32
+ FILE_ATTRIBUTE_TAG_INFO info;
+ if (GetFileInformationByHandleEx(fh, FileAttributeTagInfo,
+ &info, (DWORD)sizeof(info))) {
+ if (info.FileAttributes != INVALID_FILE_ATTRIBUTES) {
+ return (info.FileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) != 0;
+ }
+ }
+ return false;
+#else
+ /* We don't know the FS block size, use the sector size. The FS
+ will do the magic. */
+ return DB_SUCCESS == os_file_punch_hole_posix(fh, 0, srv_page_size);
+#endif /* _WIN32 */
+}
+
/** Extend a file.
On Windows, extending a file allocates blocks for the file,
@@ -5505,15 +5414,16 @@ os_file_punch_hole(
os_offset_t off,
os_offset_t len)
{
- dberr_t err;
-
#ifdef _WIN32
- err = os_file_punch_hole_win32(fh, off, len);
+ return os_file_punch_hole_win32(fh, off, len);
#else
- err = os_file_punch_hole_posix(fh, off, len);
+ return os_file_punch_hole_posix(fh, off, len);
#endif /* _WIN32 */
+}
- return (err);
+inline bool IORequest::should_punch_hole() const
+{
+ return m_fil_node && m_fil_node->space->punch_hole;
}
/** Free storage space associated with a section of the file.
@@ -5553,7 +5463,9 @@ IORequest::punch_hole(os_file_t fh, os_offset_t off, ulint len)
/* If punch hole is not supported,
set space so that it is not used. */
if (err == DB_IO_NO_PUNCH_HOLE) {
- space_no_punch_hole();
+ if (m_fil_node) {
+ m_fil_node->space->punch_hole = false;
+ }
err = DB_SUCCESS;
}
}
@@ -5561,43 +5473,6 @@ IORequest::punch_hole(os_file_t fh, os_offset_t off, ulint len)
return (err);
}
-/** Check if the file system supports sparse files.
-
-Warning: On POSIX systems we try and punch a hole from offset 0 to
-the system configured page size. This should only be called on an empty
-file.
-@param[in] fh File handle for the file - if opened
-@return true if the file system supports sparse files */
-bool
-os_is_sparse_file_supported(os_file_t fh)
-{
- /* In this debugging mode, we act as if punch hole is supported,
- then we skip any calls to actually punch a hole. In this way,
- Transparent Page Compression is still being tested. */
- DBUG_EXECUTE_IF("ignore_punch_hole",
- return(true);
- );
-
-#ifdef _WIN32
- FILE_ATTRIBUTE_TAG_INFO info;
- if (GetFileInformationByHandleEx(fh, FileAttributeTagInfo,
- &info, (DWORD)sizeof(info))) {
- if (info.FileAttributes != INVALID_FILE_ATTRIBUTES) {
- return (info.FileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) != 0;
- }
- }
- return false;
-#else
- dberr_t err;
-
- /* We don't know the FS block size, use the sector size. The FS
- will do the magic. */
- err = os_file_punch_hole_posix(fh, 0, srv_page_size);
-
- return(err == DB_SUCCESS);
-#endif /* _WIN32 */
-}
-
/** This function returns information about the specified file
@param[in] path pathname of the file
@param[out] stat_info information of a file in a directory
@@ -7627,6 +7502,285 @@ os_file_set_umask(ulint umask)
os_innodb_umask = umask;
}
+/** Determine some file metadata when creating or reading the file.
+@param file the file that is being created, or OS_FILE_CLOSED */
+void fil_node_t::find_metadata(os_file_t file
+#ifdef UNIV_LINUX
+ , struct stat* statbuf
+#endif
+ )
+{
+ if (file == OS_FILE_CLOSED) {
+ file = handle;
+ ut_ad(is_open());
+ }
+
+#ifdef _WIN32 /* FIXME: make this unconditional */
+ if (space->punch_hole) {
+ space->punch_hole = os_is_sparse_file_supported(file);
+ }
+#endif
+
+ /*
+ For the temporary tablespace and during the
+ non-redo-logged adjustments in
+ IMPORT TABLESPACE, we do not care about
+ the atomicity of writes.
+
+ Atomic writes is supported if the file can be used
+ with atomic_writes (not log file), O_DIRECT is
+ used (tested in ha_innodb.cc) and the file is
+ device and file system that supports atomic writes
+ for the given block size.
+ */
+ space->atomic_write_supported = space->purpose == FIL_TYPE_TEMPORARY
+ || space->purpose == FIL_TYPE_IMPORT;
+#ifdef _WIN32
+ block_size = 512;
+ on_ssd = false;
+ // Open volume for this file, find out it "physical bytes per sector"
+ char volume[MAX_PATH + 4];
+ if (!GetVolumePathName(name, volume + 4, MAX_PATH)) {
+ os_file_handle_error_no_exit(name,
+ "GetVolumePathName()", FALSE);
+ return;
+ }
+ // Special prefix required for volume names.
+ memcpy(volume, "\\\\.\\", 4);
+
+ size_t len = strlen(volume);
+ if (volume[len - 1] == '\\') {
+ // Trim trailing backslash from volume name.
+ volume[len - 1] = 0;
+ }
+
+ HANDLE volume_handle = CreateFile(volume, FILE_READ_ATTRIBUTES,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ 0, OPEN_EXISTING, 0, 0);
+
+ if (volume_handle != INVALID_HANDLE_VALUE) {
+ DWORD tmp;
+ union {
+ STORAGE_ACCESS_ALIGNMENT_DESCRIPTOR disk_alignment;
+ DEVICE_SEEK_PENALTY_DESCRIPTOR seek_penalty;
+ } result;
+ STORAGE_PROPERTY_QUERY storage_query;
+ memset(&storage_query, 0, sizeof(storage_query));
+ storage_query.PropertyId = StorageAccessAlignmentProperty;
+ storage_query.QueryType = PropertyStandardQuery;
+
+ if (!os_win32_device_io_control(volume_handle,
+ IOCTL_STORAGE_QUERY_PROPERTY,
+ &storage_query,
+ sizeof storage_query,
+ &result.disk_alignment,
+ sizeof result.disk_alignment,
+ &tmp)
+ || tmp < sizeof result.disk_alignment) {
+ switch (GetLastError()) {
+ case ERROR_INVALID_FUNCTION:
+ case ERROR_NOT_SUPPORTED:
+ break;
+ default:
+ ioctl_fail:
+ os_file_handle_error_no_exit(
+ volume,
+ "DeviceIoControl(IOCTL_STORAGE_QUERY_PROPERTY)",
+ FALSE);
+ }
+ goto end;
+ }
+
+ block_size = result.disk_alignment.BytesPerPhysicalSector;
+
+ storage_query.PropertyId = StorageDeviceSeekPenaltyProperty;
+ storage_query.QueryType = PropertyStandardQuery;
+
+ if (!os_win32_device_io_control(volume_handle,
+ IOCTL_STORAGE_QUERY_PROPERTY,
+ &storage_query,
+ sizeof storage_query,
+ &result.seek_penalty,
+ sizeof result.seek_penalty,
+ &tmp)
+ || tmp < sizeof result.seek_penalty) {
+ switch (GetLastError()) {
+ case ERROR_INVALID_FUNCTION:
+ case ERROR_NOT_SUPPORTED:
+ case ERROR_GEN_FAILURE:
+ goto end;
+ default:
+ goto ioctl_fail;
+ }
+ }
+
+ on_ssd = !result.seek_penalty.IncursSeekPenalty;
+end:
+ if (volume_handle != INVALID_HANDLE_VALUE) {
+ CloseHandle(volume_handle);
+ }
+ } else {
+ if (GetLastError() != ERROR_ACCESS_DENIED) {
+ os_file_handle_error_no_exit(volume,
+ "CreateFile()", FALSE);
+ }
+ }
+
+ /* Currently we support file block size up to 4KiB */
+ if (block_size > 4096) {
+ block_size = 4096;
+ } else if (block_size < 512) {
+ block_size = 512;
+ }
+#else
+ on_ssd = space->atomic_write_supported;
+# ifdef UNIV_LINUX
+ if (!on_ssd) {
+ struct stat sbuf;
+ if (!statbuf && !fstat(file, &sbuf)) {
+ statbuf = &sbuf;
+ }
+ if (statbuf && fil_system.is_ssd(statbuf->st_dev)) {
+ on_ssd = true;
+ }
+ }
+# endif
+#endif
+ if (!space->atomic_write_supported) {
+ space->atomic_write_supported = atomic_write
+ && srv_use_atomic_writes
+#ifdef _WIN32
+ && my_test_if_atomic_write(file,
+ space->physical_size())
+#else
+ && srv_page_size == block_size
+#endif
+ ;
+ }
+}
+
+/** Read the first page of a data file.
+@param[in] first whether this is the very first read
+@return whether the page was found valid */
+bool fil_node_t::read_page0(bool first)
+{
+ ut_ad(mutex_own(&fil_system.mutex));
+ ut_a(space->purpose != FIL_TYPE_LOG);
+ const ulint psize = space->physical_size();
+#ifndef _WIN32
+ struct stat statbuf;
+ if (fstat(handle, &statbuf)) {
+ return false;
+ }
+ block_size = statbuf.st_blksize;
+ os_offset_t size_bytes = statbuf.st_size;
+#else
+ os_offset_t size_bytes = os_file_get_size(handle);
+ ut_a(size_bytes != (os_offset_t) -1);
+#endif
+ const ulint min_size = FIL_IBD_FILE_INITIAL_SIZE * psize;
+
+ if (size_bytes < min_size) {
+ ib::error() << "The size of the file " << name
+ << " is only " << size_bytes
+ << " bytes, should be at least " << min_size;
+ return false;
+ }
+
+ byte* buf2 = static_cast<byte*>(ut_malloc_nokey(2 * psize));
+
+ /* Align the memory for file i/o if we might have O_DIRECT set */
+ byte* page = static_cast<byte*>(ut_align(buf2, psize));
+ IORequest request(IORequest::READ);
+ if (os_file_read(request, handle, page, 0, psize) != DB_SUCCESS) {
+ ib::error() << "Unable to read first page of file " << name;
+ ut_free(buf2);
+ return false;
+ }
+ const ulint space_id = fsp_header_get_space_id(page);
+ ulint flags = fsp_header_get_flags(page);
+ const ulint size = fsp_header_get_field(page, FSP_SIZE);
+ const ulint free_limit = fsp_header_get_field(page, FSP_FREE_LIMIT);
+ const ulint free_len = flst_get_len(FSP_HEADER_OFFSET + FSP_FREE
+ + page);
+ if (!fil_space_t::is_valid_flags(flags, space->id)) {
+ ulint cflags = fsp_flags_convert_from_101(flags);
+ if (cflags == ULINT_UNDEFINED) {
+invalid:
+ ib::error()
+ << "Expected tablespace flags "
+ << ib::hex(space->flags)
+ << " but found " << ib::hex(flags)
+ << " in the file " << name;
+ ut_free(buf2);
+ return false;
+ }
+
+ ulint cf = cflags & ~FSP_FLAGS_MEM_MASK;
+ ulint sf = space->flags & ~FSP_FLAGS_MEM_MASK;
+
+ if (!fil_space_t::is_flags_equal(cf, sf)
+ && !fil_space_t::is_flags_equal(sf, cf)) {
+ goto invalid;
+ }
+
+ flags = cflags;
+ }
+
+ ut_ad(!(flags & FSP_FLAGS_MEM_MASK));
+
+ /* Try to read crypt_data from page 0 if it is not yet read. */
+ if (!space->crypt_data) {
+ space->crypt_data = fil_space_read_crypt_data(
+ fil_space_t::zip_size(flags), page);
+ }
+ ut_free(buf2);
+
+ if (UNIV_UNLIKELY(space_id != space->id)) {
+ ib::error() << "Expected tablespace id " << space->id
+ << " but found " << space_id
+ << " in the file " << name;
+ return false;
+ }
+
+ if (first) {
+ ut_ad(space->id != TRX_SYS_SPACE);
+#ifdef UNIV_LINUX
+ find_metadata(handle, &statbuf);
+#else
+ find_metadata();
+#endif
+
+ /* Truncate the size to a multiple of extent size. */
+ ulint mask = psize * FSP_EXTENT_SIZE - 1;
+
+ if (size_bytes <= mask) {
+ /* .ibd files start smaller than an
+ extent size. Do not truncate valid data. */
+ } else {
+ size_bytes &= ~os_offset_t(mask);
+ }
+
+ space->flags = (space->flags & FSP_FLAGS_MEM_MASK) | flags;
+
+ this->size = ulint(size_bytes / psize);
+ space->size += this->size;
+ } else if (space->id != TRX_SYS_SPACE || space->size_in_header) {
+ /* If this is not the first-time open, do nothing.
+ For the system tablespace, we always get invoked as
+ first=false, so we detect the true first-time-open based
+ on size_in_header and proceed to initiailze the data. */
+ return true;
+ }
+
+ ut_ad(space->free_limit == 0 || space->free_limit == free_limit);
+ ut_ad(space->free_len == 0 || space->free_len == free_len);
+ space->size_in_header = size;
+ space->free_limit = free_limit;
+ space->free_len = free_len;
+ return true;
+}
+
#else
#include "univ.i"
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/os/os0proc.cc b/storage/innobase/os/os0proc.cc
index b3202992af0..33e65484003 100644
--- a/storage/innobase/os/os0proc.cc
+++ b/storage/innobase/os/os0proc.cc
@@ -40,7 +40,7 @@ MAP_ANON but MAP_ANON is marked as deprecated */
/** The total amount of memory currently allocated from the operating
system with os_mem_alloc_large(). */
-ulint os_total_large_mem_allocated = 0;
+Atomic_counter<ulint> os_total_large_mem_allocated;
/** Converts the current process id to a number.
@return process id as a number */
@@ -98,9 +98,7 @@ os_mem_alloc_large(
if (ptr) {
*n = size;
- my_atomic_addlint(
- &os_total_large_mem_allocated, size);
-
+ os_total_large_mem_allocated += size;
UNIV_MEM_ALLOC(ptr, size);
return(ptr);
}
@@ -123,8 +121,7 @@ skip:
ib::info() << "VirtualAlloc(" << size << " bytes) failed;"
" Windows error " << GetLastError();
} else {
- my_atomic_addlint(
- &os_total_large_mem_allocated, size);
+ os_total_large_mem_allocated += size;
UNIV_MEM_ALLOC(ptr, size);
}
#else
@@ -139,8 +136,7 @@ skip:
" errno " << errno;
ptr = NULL;
} else {
- my_atomic_addlint(
- &os_total_large_mem_allocated, size);
+ os_total_large_mem_allocated += size;
UNIV_MEM_ALLOC(ptr, size);
}
#endif
@@ -159,8 +155,7 @@ os_mem_free_large(
#ifdef HAVE_LINUX_LARGE_PAGES
if (my_use_large_pages && opt_large_page_size && !shmdt(ptr)) {
- my_atomic_addlint(
- &os_total_large_mem_allocated, -size);
+ os_total_large_mem_allocated -= size;
return;
}
#endif /* HAVE_LINUX_LARGE_PAGES */
@@ -171,8 +166,7 @@ os_mem_free_large(
ib::error() << "VirtualFree(" << ptr << ", " << size
<< ") failed; Windows error " << GetLastError();
} else {
- my_atomic_addlint(
- &os_total_large_mem_allocated, -lint(size));
+ os_total_large_mem_allocated -= size;
}
#elif !defined OS_MAP_ANON
ut_free(ptr);
@@ -185,8 +179,7 @@ os_mem_free_large(
ib::error() << "munmap(" << ptr << ", " << size << ") failed;"
" errno " << errno;
} else {
- my_atomic_addlint(
- &os_total_large_mem_allocated, -size);
+ os_total_large_mem_allocated -= size;
}
#endif
}
diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc
index e0996e45880..28ba46c9f92 100644
--- a/storage/innobase/os/os0thread.cc
+++ b/storage/innobase/os/os0thread.cc
@@ -28,7 +28,7 @@ Created 9/8/1995 Heikki Tuuri
#include "srv0srv.h"
/** Number of threads active. */
-ulint os_thread_count;
+Atomic_counter<ulint> os_thread_count;
/***************************************************************//**
Compares two thread ids for equality.
@@ -118,7 +118,7 @@ os_thread_create_func(
CloseHandle(handle);
- my_atomic_addlint(&os_thread_count, 1);
+ os_thread_count++;
return((os_thread_t)new_thread_id);
#else /* _WIN32 else */
@@ -127,7 +127,7 @@ os_thread_create_func(
pthread_attr_init(&attr);
- my_atomic_addlint(&os_thread_count, 1);
+ os_thread_count++;
int ret = pthread_create(&new_thread_id, &attr, func, arg);
@@ -182,7 +182,7 @@ os_thread_exit(bool detach)
pfs_delete_thread();
#endif
- my_atomic_addlint(&os_thread_count, ulint(-1));
+ os_thread_count--;
#ifdef _WIN32
ExitThread(0);
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index d545a004002..9d1e37405b7 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -735,7 +735,7 @@ up_slot_match:
& REC_INFO_MIN_REC_FLAG)) {
ut_ad(!page_has_prev(page_align(mid_rec)));
ut_ad(!page_rec_is_leaf(mid_rec)
- || rec_is_metadata(mid_rec, index));
+ || rec_is_metadata(mid_rec, *index));
cmp = 1;
goto low_rec_match;
}
@@ -1368,7 +1368,7 @@ use_heap:
switch (rec_get_status(current_rec)) {
case REC_STATUS_ORDINARY:
case REC_STATUS_NODE_PTR:
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
case REC_STATUS_INFIMUM:
break;
case REC_STATUS_SUPREMUM:
@@ -1377,7 +1377,7 @@ use_heap:
switch (rec_get_status(insert_rec)) {
case REC_STATUS_ORDINARY:
case REC_STATUS_NODE_PTR:
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
break;
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
@@ -1564,7 +1564,7 @@ page_cur_insert_rec_zip(
get rid of the modification log. */
page_create_zip(page_cur_get_block(cursor), index,
page_header_get_field(page, PAGE_LEVEL),
- 0, NULL, mtr);
+ 0, mtr);
ut_ad(!page_header_get_ptr(page, PAGE_FREE));
if (page_zip_available(
@@ -1639,7 +1639,7 @@ page_cur_insert_rec_zip(
if (!log_compressed) {
if (page_zip_compress(
page_zip, page, index,
- level, NULL, NULL)) {
+ level, NULL)) {
page_cur_insert_rec_write_log(
insert_rec, rec_size,
cursor->rec, index, mtr);
@@ -1785,17 +1785,11 @@ too_small:
columns of free_rec, in case it will not be
overwritten by insert_rec. */
- ulint trx_id_col;
ulint trx_id_offs;
ulint len;
- trx_id_col = dict_index_get_sys_col_pos(index,
- DATA_TRX_ID);
- ut_ad(trx_id_col > 0);
- ut_ad(trx_id_col != ULINT_UNDEFINED);
-
- trx_id_offs = rec_get_nth_field_offs(foffsets,
- trx_id_col, &len);
+ trx_id_offs = rec_get_nth_field_offs(
+ foffsets, index->db_trx_id(), &len);
ut_ad(len == DATA_TRX_ID_LEN);
if (DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN + trx_id_offs
@@ -1811,7 +1805,7 @@ too_small:
ut_ad(free_rec + trx_id_offs + DATA_TRX_ID_LEN
== rec_get_nth_field(free_rec, foffsets,
- trx_id_col + 1, &len));
+ index->db_roll_ptr(), &len));
ut_ad(len == DATA_ROLL_PTR_LEN);
}
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index 8c146948b2c..9109787f191 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -29,8 +29,8 @@ Created 2/2/1994 Heikki Tuuri
#include "page0cur.h"
#include "page0zip.h"
#include "buf0buf.h"
+#include "buf0checksum.h"
#include "btr0btr.h"
-#include "row0trunc.h"
#include "srv0srv.h"
#include "lock0lock.h"
#include "fut0lst.h"
@@ -454,22 +454,15 @@ page_create_zip(
ulint level, /*!< in: the B-tree level
of the page */
trx_id_t max_trx_id, /*!< in: PAGE_MAX_TRX_ID */
- const redo_page_compress_t* page_comp_info,
- /*!< in: used for applying
- TRUNCATE log
- record during recovery */
mtr_t* mtr) /*!< in/out: mini-transaction
handle */
{
page_t* page;
page_zip_des_t* page_zip = buf_block_get_page_zip(block);
- bool is_spatial;
ut_ad(block);
ut_ad(page_zip);
- ut_ad(index == NULL || dict_table_is_comp(index->table));
- is_spatial = index ? dict_index_is_spatial(index)
- : page_comp_info->type & DICT_SPATIAL;
+ ut_ad(dict_table_is_comp(index->table));
/* PAGE_MAX_TRX_ID or PAGE_ROOT_AUTO_INC are always 0 for
temporary tables. */
@@ -487,22 +480,11 @@ page_create_zip(
|| !dict_index_is_sec_or_ibuf(index)
|| index->table->is_temporary());
- page = page_create_low(block, TRUE, is_spatial);
+ page = page_create_low(block, TRUE, dict_index_is_spatial(index));
mach_write_to_2(PAGE_HEADER + PAGE_LEVEL + page, level);
mach_write_to_8(PAGE_HEADER + PAGE_MAX_TRX_ID + page, max_trx_id);
- if (truncate_t::s_fix_up_active) {
- /* Compress the index page created when applying
- TRUNCATE log during recovery */
- if (!page_zip_compress(page_zip, page, index, page_zip_level,
- page_comp_info, NULL)) {
- /* The compression of a newly created
- page should always succeed. */
- ut_error;
- }
-
- } else if (!page_zip_compress(page_zip, page, index,
- page_zip_level, NULL, mtr)) {
+ if (!page_zip_compress(page_zip, page, index, page_zip_level, mtr)) {
/* The compression of a newly created
page should always succeed. */
ut_error;
@@ -548,7 +530,7 @@ page_create_empty(
ut_ad(!index->table->is_temporary());
page_create_zip(block, index,
page_header_get_field(page, PAGE_LEVEL),
- max_trx_id, NULL, mtr);
+ max_trx_id, mtr);
} else {
page_create(block, mtr, page_is_comp(page),
dict_index_is_spatial(index));
@@ -723,11 +705,8 @@ page_copy_rec_list_end(
if (new_page_zip) {
mtr_set_log_mode(mtr, log_mode);
- if (!page_zip_compress(new_page_zip,
- new_page,
- index,
- page_zip_level,
- NULL, mtr)) {
+ if (!page_zip_compress(new_page_zip, new_page, index,
+ page_zip_level, mtr)) {
/* Before trying to reorganize the page,
store the number of preceding records on the page. */
ulint ret_pos
@@ -889,7 +868,7 @@ page_copy_rec_list_start(
goto zip_reorganize;);
if (!page_zip_compress(new_page_zip, new_page, index,
- page_zip_level, NULL, mtr)) {
+ page_zip_level, mtr)) {
ulint ret_pos;
#ifndef DBUG_OFF
zip_reorganize:
@@ -1829,6 +1808,7 @@ page_print_list(
count = 0;
for (;;) {
offsets = rec_get_offsets(cur.rec, index, offsets,
+ page_rec_is_leaf(cur.rec),
ULINT_UNDEFINED, &heap);
page_rec_print(cur.rec, offsets);
@@ -1851,6 +1831,7 @@ page_print_list(
if (count + pr_n >= n_recs) {
offsets = rec_get_offsets(cur.rec, index, offsets,
+ page_rec_is_leaf(cur.rec),
ULINT_UNDEFINED, &heap);
page_rec_print(cur.rec, offsets);
}
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index ac0ff39f4f9..542db15f3bc 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -25,39 +25,31 @@ Compressed page interface
Created June 2005 by Marko Makela
*******************************************************/
-#include "page0size.h"
#include "page0zip.h"
+#include "fsp0types.h"
+#include "page0page.h"
+#include "buf0checksum.h"
+#include "ut0crc32.h"
+#include "zlib.h"
+
+#ifndef UNIV_INNOCHECKSUM
/** A BLOB field reference full of zero, for use in assertions and tests.
Initially, BLOB field references are set to zero, in
dtuple_convert_big_rec(). */
-const byte field_ref_zero[FIELD_REF_SIZE] = {
- 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0,
-};
+const byte field_ref_zero[UNIV_PAGE_SIZE_MAX] = { 0, };
-#ifndef UNIV_INNOCHECKSUM
-#include "page0page.h"
#include "mtr0log.h"
#include "dict0dict.h"
#include "btr0cur.h"
-#include "page0types.h"
#include "log0recv.h"
#include "row0row.h"
-#include "row0trunc.h"
-#include "zlib.h"
-#include "buf0buf.h"
-#include "buf0types.h"
-#include "buf0checksum.h"
#include "btr0sea.h"
#include "dict0boot.h"
#include "lock0lock.h"
#include "srv0srv.h"
#include "buf0lru.h"
#include "srv0mon.h"
-#include "ut0crc32.h"
#include <map>
#include <algorithm>
@@ -106,11 +98,11 @@ Compare at most sizeof(field_ref_zero) bytes.
@param s in: size of the memory block, in bytes */
#define ASSERT_ZERO(b, s) \
ut_ad(!memcmp(b, field_ref_zero, \
- ut_min(static_cast<size_t>(s), sizeof field_ref_zero)));
+ std::min<size_t>(s, sizeof field_ref_zero)));
/** Assert that a BLOB pointer is filled with zero bytes.
@param b in: BLOB pointer */
#define ASSERT_ZERO_BLOB(b) \
- ut_ad(!memcmp(b, field_ref_zero, sizeof field_ref_zero))
+ ut_ad(!memcmp(b, field_ref_zero, FIELD_REF_SIZE))
/* Enable some extra debugging output. This code can be enabled
independently of any UNIV_ debugging conditions. */
@@ -178,18 +170,17 @@ page_zip_is_too_big(
const dict_index_t* index,
const dtuple_t* entry)
{
- const page_size_t& page_size =
- dict_table_page_size(index->table);
+ const ulint zip_size = index->table->space->zip_size();
/* Estimate the free space of an empty compressed page.
Subtract one byte for the encoded heap_no in the
modification log. */
ulint free_space_zip = page_zip_empty_size(
- index->n_fields, page_size.physical());
+ index->n_fields, zip_size);
ulint n_uniq = dict_index_get_n_unique_in_tree(index);
ut_ad(dict_table_is_comp(index->table));
- ut_ad(page_size.is_compressed());
+ ut_ad(zip_size);
if (free_space_zip == 0) {
return(true);
@@ -1248,17 +1239,11 @@ page_zip_compress(
dict_index_t* index, /*!< in: index of the B-tree
node */
ulint level, /*!< in: commpression level */
- const redo_page_compress_t* page_comp_info,
- /*!< in: used for applying
- TRUNCATE log
- record during recovery */
mtr_t* mtr) /*!< in/out: mini-transaction,
or NULL */
{
z_stream c_stream;
int err;
- ulint n_fields; /* number of index fields
- needed */
byte* fields; /*!< index field information */
byte* buf; /*!< compressed payload of the
page */
@@ -1273,7 +1258,6 @@ page_zip_compress(
ulint n_blobs = 0;
byte* storage; /* storage of uncompressed
columns */
- index_id_t ind_id;
uintmax_t usec = ut_time_us(NULL);
#ifdef PAGE_ZIP_COMPRESS_DBG
FILE* logfile = NULL;
@@ -1288,10 +1272,8 @@ page_zip_compress(
ut_a(fil_page_index_page_check(page));
ut_ad(page_simple_validate_new((page_t*) page));
ut_ad(page_zip_simple_validate(page_zip));
- ut_ad(!index
- || (index
- && dict_table_is_comp(index->table)
- && !dict_index_is_ibuf(index)));
+ ut_ad(dict_table_is_comp(index->table));
+ ut_ad(!dict_index_is_ibuf(index));
UNIV_MEM_ASSERT_RW(page, srv_page_size);
@@ -1311,18 +1293,10 @@ page_zip_compress(
== PAGE_NEW_SUPREMUM);
}
- if (truncate_t::s_fix_up_active) {
- ut_ad(page_comp_info != NULL);
- n_fields = page_comp_info->n_fields;
- ind_id = page_comp_info->index_id;
- } else {
- if (page_is_leaf(page)) {
- n_fields = dict_index_get_n_fields(index);
- } else {
- n_fields = dict_index_get_n_unique_in_tree_nonleaf(index);
- }
- ind_id = index->id;
- }
+ const ulint n_fields = page_is_leaf(page)
+ ? dict_index_get_n_fields(index)
+ : dict_index_get_n_unique_in_tree_nonleaf(index);
+ index_id_t ind_id = index->id;
/* The dense directory excludes the infimum and supremum records. */
n_dense = ulint(page_dir_get_n_heap(page)) - PAGE_HEAP_NO_USER_LOW;
@@ -1433,20 +1407,8 @@ page_zip_compress(
/* Dense page directory and uncompressed columns, if any */
if (page_is_leaf(page)) {
- if ((index && dict_index_is_clust(index))
- || (page_comp_info
- && (page_comp_info->type & DICT_CLUSTERED))) {
-
- if (index) {
- trx_id_col = dict_index_get_sys_col_pos(
- index, DATA_TRX_ID);
- ut_ad(trx_id_col > 0);
- ut_ad(trx_id_col != ULINT_UNDEFINED);
- } else if (page_comp_info
- && (page_comp_info->type
- & DICT_CLUSTERED)) {
- trx_id_col = page_comp_info->trx_id_pos;
- }
+ if (dict_index_is_clust(index)) {
+ trx_id_col = index->db_trx_id();
slot_size = PAGE_ZIP_DIR_SLOT_SIZE
+ DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN;
@@ -1454,10 +1416,6 @@ page_zip_compress(
} else {
/* Signal the absence of trx_id
in page_zip_fields_encode() */
- if (index) {
- ut_ad(dict_index_get_sys_col_pos(
- index, DATA_TRX_ID) == ULINT_UNDEFINED);
- }
trx_id_col = 0;
slot_size = PAGE_ZIP_DIR_SLOT_SIZE;
}
@@ -1471,19 +1429,9 @@ page_zip_compress(
goto zlib_error;
}
- c_stream.avail_out -= static_cast<uInt>(n_dense * slot_size);
- if (truncate_t::s_fix_up_active) {
- ut_ad(page_comp_info != NULL);
- c_stream.avail_in = static_cast<uInt>(
- page_comp_info->field_len);
- for (ulint i = 0; i < page_comp_info->field_len; i++) {
- fields[i] = page_comp_info->fields[i];
- }
- } else {
- c_stream.avail_in = static_cast<uInt>(
- page_zip_fields_encode(
- n_fields, index, trx_id_col, fields));
- }
+ c_stream.avail_out -= uInt(n_dense * slot_size);
+ c_stream.avail_in = uInt(page_zip_fields_encode(n_fields, index,
+ trx_id_col, fields));
c_stream.next_in = fields;
if (UNIV_LIKELY(!trx_id_col)) {
@@ -1637,7 +1585,7 @@ err_exit:
mutex_exit(&page_zip_stat_per_index_mutex);
}
- if (page_is_leaf(page) && !truncate_t::s_fix_up_active) {
+ if (page_is_leaf(page)) {
dict_index_zip_success(index);
}
@@ -2169,6 +2117,10 @@ page_zip_apply_log(
rec_get_offsets_reverse(data, index,
hs & REC_STATUS_NODE_PTR,
offsets);
+ /* Silence a debug assertion in rec_offs_make_valid().
+ This will be overwritten in page_zip_set_extra_bytes(),
+ called by page_zip_decompress_low(). */
+ ut_d(rec[-REC_NEW_INFO_BITS] = 0);
rec_offs_make_valid(rec, index, is_leaf, offsets);
/* Copy the extra bytes (backwards). */
@@ -3770,29 +3722,25 @@ page_zip_write_rec(
ulint len;
if (dict_index_is_clust(index)) {
- ulint trx_id_col;
-
- trx_id_col = dict_index_get_sys_col_pos(index,
- DATA_TRX_ID);
- ut_ad(trx_id_col != ULINT_UNDEFINED);
-
/* Store separately trx_id, roll_ptr and
the BTR_EXTERN_FIELD_REF of each BLOB column. */
if (rec_offs_any_extern(offsets)) {
data = page_zip_write_rec_ext(
page_zip, page,
rec, index, offsets, create,
- trx_id_col, heap_no, storage, data);
+ index->db_trx_id(), heap_no,
+ storage, data);
} else {
/* Locate trx_id and roll_ptr. */
const byte* src
= rec_get_nth_field(rec, offsets,
- trx_id_col, &len);
+ index->db_trx_id(),
+ &len);
ut_ad(len == DATA_TRX_ID_LEN);
ut_ad(src + DATA_TRX_ID_LEN
== rec_get_nth_field(
rec, offsets,
- trx_id_col + 1, &len));
+ index->db_roll_ptr(), &len));
ut_ad(len == DATA_ROLL_PTR_LEN);
/* Log the preceding fields. */
@@ -3820,8 +3768,6 @@ page_zip_write_rec(
} else {
/* Leaf page of a secondary index:
no externally stored columns */
- ut_ad(dict_index_get_sys_col_pos(index, DATA_TRX_ID)
- == ULINT_UNDEFINED);
ut_ad(!rec_offs_any_extern(offsets));
/* Log the entire record. */
@@ -4809,9 +4755,7 @@ page_zip_reorganize(
/* Restore logging. */
mtr_set_log_mode(mtr, log_mode);
- if (!page_zip_compress(page_zip, page, index,
- page_zip_level, NULL, mtr)) {
-
+ if (!page_zip_compress(page_zip, page, index, page_zip_level, mtr)) {
buf_block_free(temp_block);
return(FALSE);
}
@@ -4992,12 +4936,7 @@ uint32_t
page_zip_calc_checksum(
const void* data,
ulint size,
- srv_checksum_algorithm_t algo
-#ifdef INNODB_BUG_ENDIAN_CRC32
- /** for crc32, use the big-endian bug-compatible crc32 variant */
- , bool use_legacy_big_endian
-#endif
-)
+ srv_checksum_algorithm_t algo)
{
uLong adler;
const Bytef* s = static_cast<const byte*>(data);
@@ -5006,22 +4945,11 @@ page_zip_calc_checksum(
and FIL_PAGE_FILE_FLUSH_LSN from the checksum. */
switch (algo) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
ut_ad(size > FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
-#ifdef INNODB_BUG_ENDIAN_CRC32
- if (use_legacy_big_endian) {
- return ut_crc32_legacy_big_endian(s + FIL_PAGE_OFFSET,
- FIL_PAGE_LSN
- - FIL_PAGE_OFFSET)
- ^ ut_crc32_legacy_big_endian(
- s + FIL_PAGE_TYPE, 2)
- ^ ut_crc32_legacy_big_endian(
- s + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID,
- size
- - FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
- }
-#endif
return ut_crc32(s + FIL_PAGE_OFFSET,
FIL_PAGE_LSN - FIL_PAGE_OFFSET)
^ ut_crc32(s + FIL_PAGE_TYPE, 2)
@@ -5051,15 +4979,12 @@ page_zip_calc_checksum(
return(0);
}
-/**********************************************************************//**
-Verify a compressed page's checksum.
-@return TRUE if the stored checksum is valid according to the value of
+/** Verify a compressed page's checksum.
+@param[in] data compressed page
+@param[in] size size of compressed page
+@return whether the stored checksum is valid according to the value of
innodb_checksum_algorithm */
-ibool
-page_zip_verify_checksum(
-/*=====================*/
- const void* data, /*!< in: compressed page */
- ulint size) /*!< in: size of compressed page */
+bool page_zip_verify_checksum(const void* data, ulint size)
{
const uint32_t stored = mach_read_from_4(
static_cast<const byte*>(data) + FIL_PAGE_SPACE_OR_CHKSUM);
@@ -5138,40 +5063,26 @@ page_zip_verify_checksum(
}
switch (curr_algo) {
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
-#ifdef INNODB_BUG_ENDIAN_CRC32
- return stored == page_zip_calc_checksum(data, size, curr_algo,
- true);
-#endif
- /* fall through */
case SRV_CHECKSUM_ALGORITHM_STRICT_INNODB:
case SRV_CHECKSUM_ALGORITHM_STRICT_NONE:
return FALSE;
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
case SRV_CHECKSUM_ALGORITHM_CRC32:
if (stored == BUF_NO_CHECKSUM_MAGIC) {
return(TRUE);
}
- return
-#ifdef INNODB_BUG_ENDIAN_CRC32
- stored == page_zip_calc_checksum(data, size, curr_algo,
- true) ||
-#endif
- stored == page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_INNODB);
+ return stored == page_zip_calc_checksum(
+ data, size, SRV_CHECKSUM_ALGORITHM_INNODB);
case SRV_CHECKSUM_ALGORITHM_INNODB:
if (stored == BUF_NO_CHECKSUM_MAGIC) {
return TRUE;
}
return stored == page_zip_calc_checksum(
- data, size, SRV_CHECKSUM_ALGORITHM_CRC32)
-#ifdef INNODB_BUG_ENDIAN_CRC32
- || stored == page_zip_calc_checksum(
- data, size,
- SRV_CHECKSUM_ALGORITHM_CRC32, true)
-#endif
- ;
+ data, size, SRV_CHECKSUM_ALGORITHM_CRC32);
case SRV_CHECKSUM_ALGORITHM_NONE:
return TRUE;
}
diff --git a/storage/innobase/read/read0read.cc b/storage/innobase/read/read0read.cc
index 470c8ec63f1..df2406fb0e4 100644
--- a/storage/innobase/read/read0read.cc
+++ b/storage/innobase/read/read0read.cc
@@ -200,7 +200,7 @@ inline void ReadView::snapshot(trx_t *trx)
void ReadView::open(trx_t *trx)
{
ut_ad(this == &trx->read_view);
- switch (m_state)
+ switch (state())
{
case READ_VIEW_STATE_OPEN:
ut_ad(!srv_read_only_mode);
@@ -254,8 +254,7 @@ void ReadView::open(trx_t *trx)
*/
mutex_enter(&trx_sys.mutex);
mutex_exit(&trx_sys.mutex);
- my_atomic_store32_explicit(&m_state, READ_VIEW_STATE_SNAPSHOT,
- MY_MEMORY_ORDER_RELAXED);
+ m_state.store(READ_VIEW_STATE_SNAPSHOT, std::memory_order_relaxed);
break;
default:
ut_ad(0);
@@ -264,8 +263,7 @@ void ReadView::open(trx_t *trx)
snapshot(trx);
reopen:
m_creator_trx_id= trx->id;
- my_atomic_store32_explicit(&m_state, READ_VIEW_STATE_OPEN,
- MY_MEMORY_ORDER_RELEASE);
+ m_state.store(READ_VIEW_STATE_OPEN, std::memory_order_release);
}
@@ -284,7 +282,7 @@ void trx_sys_t::clone_oldest_view()
for (const trx_t *trx= UT_LIST_GET_FIRST(trx_list); trx;
trx= UT_LIST_GET_NEXT(trx_list, trx))
{
- int32_t state;
+ uint32_t state;
while ((state= trx->read_view.get_state()) == READ_VIEW_STATE_SNAPSHOT)
ut_delay(1);
diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc
index 6b914c46ecf..623d0bd1aa9 100644
--- a/storage/innobase/rem/rem0cmp.cc
+++ b/storage/innobase/rem/rem0cmp.cc
@@ -797,7 +797,7 @@ cmp_dtuple_rec_with_match_bytes(
& rec_get_info_bits(rec, rec_offs_comp(offsets)))) {
ut_ad(page_rec_is_first(rec, page_align(rec)));
ut_ad(!page_has_prev(page_align(rec)));
- ut_ad(rec_is_metadata(rec, index));
+ ut_ad(rec_is_metadata(rec, *index));
return 1;
}
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index f7f11d012fd..3828699e5f0 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -169,7 +169,7 @@ rec_get_n_extern_new(
ut_ad(!index->table->supports_instant() || index->is_dummy);
ut_ad(!index->is_instant());
ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY
- || rec_get_status(rec) == REC_STATUS_COLUMNS_ADDED);
+ || rec_get_status(rec) == REC_STATUS_INSTANT);
ut_ad(n == ULINT_UNDEFINED || n <= dict_index_get_n_fields(index));
if (n == ULINT_UNDEFINED) {
@@ -231,8 +231,8 @@ rec_get_n_extern_new(
return(n_extern);
}
-/** Get the added field count in a REC_STATUS_COLUMNS_ADDED record.
-@param[in,out] header variable header of a REC_STATUS_COLUMNS_ADDED record
+/** Get the added field count in a REC_STATUS_INSTANT record.
+@param[in,out] header variable header of a REC_STATUS_INSTANT record
@return number of added fields */
static inline unsigned rec_get_n_add_field(const byte*& header)
{
@@ -253,18 +253,18 @@ static inline unsigned rec_get_n_add_field(const byte*& header)
enum rec_leaf_format {
/** Temporary file record */
REC_LEAF_TEMP,
- /** Temporary file record, with added columns
- (REC_STATUS_COLUMNS_ADDED) */
- REC_LEAF_TEMP_COLUMNS_ADDED,
+ /** Temporary file record, with added columns (REC_STATUS_INSTANT) */
+ REC_LEAF_TEMP_INSTANT,
/** Normal (REC_STATUS_ORDINARY) */
REC_LEAF_ORDINARY,
- /** With added columns (REC_STATUS_COLUMNS_ADDED) */
- REC_LEAF_COLUMNS_ADDED
+ /** With add or drop columns (REC_STATUS_INSTANT) */
+ REC_LEAF_INSTANT
};
/** Determine the offset to each field in a leaf-page record
in ROW_FORMAT=COMPACT,DYNAMIC,COMPRESSED.
This is a special case of rec_init_offsets() and rec_get_offsets_func().
+@tparam mblob whether the record includes a metadata BLOB
@param[in] rec leaf-page record
@param[in] index the index that the record belongs in
@param[in] n_core number of core fields (index->n_core_fields)
@@ -272,6 +272,7 @@ This is a special case of rec_init_offsets() and rec_get_offsets_func().
NULL to refer to index->fields[].col->def_val
@param[in,out] offsets offsets, with valid rec_offs_n_fields(offsets)
@param[in] format record format */
+template<bool mblob = false>
static inline
void
rec_init_offsets_comp_ordinary(
@@ -293,12 +294,32 @@ rec_init_offsets_comp_ordinary(
ut_ad(n_core > 0);
ut_ad(index->n_fields >= n_core);
ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
- ut_ad(format == REC_LEAF_TEMP || format == REC_LEAF_TEMP_COLUMNS_ADDED
+ ut_ad(format == REC_LEAF_TEMP || format == REC_LEAF_TEMP_INSTANT
|| dict_table_is_comp(index->table));
- ut_ad(format != REC_LEAF_TEMP_COLUMNS_ADDED
+ ut_ad(format != REC_LEAF_TEMP_INSTANT
|| index->n_fields == rec_offs_n_fields(offsets));
ut_d(ulint n_null= 0);
+ if (mblob) {
+ ut_ad(index->is_dummy || index->table->instant);
+ ut_ad(index->is_dummy || index->is_instant());
+ ut_ad(rec_offs_n_fields(offsets)
+ <= ulint(index->n_fields) + 1);
+ ut_ad(!def_val);
+ ut_ad(format == REC_LEAF_INSTANT);
+ nulls -= REC_N_NEW_EXTRA_BYTES;
+ n_fields = n_core + 1 + rec_get_n_add_field(nulls);
+ ut_ad(n_fields <= ulint(index->n_fields) + 1);
+ const ulint n_nullable = index->get_n_nullable(n_fields - 1);
+ const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable);
+ ut_d(n_null = n_nullable);
+ ut_ad(n_null <= index->n_nullable);
+ ut_ad(n_null_bytes >= index->n_core_null_bytes
+ || n_core < index->n_core_fields);
+ lens = --nulls - n_null_bytes;
+ goto start;
+ }
+
switch (format) {
case REC_LEAF_TEMP:
if (dict_table_is_comp(index->table)) {
@@ -312,17 +333,15 @@ rec_init_offsets_comp_ordinary(
ordinary:
lens = --nulls - index->n_core_null_bytes;
- ut_d(n_null = std::min(index->n_core_null_bytes * 8U,
- index->n_nullable));
+ ut_d(n_null = std::min<uint>(index->n_core_null_bytes * 8U,
+ index->n_nullable));
break;
- case REC_LEAF_COLUMNS_ADDED:
- /* We would have !index->is_instant() when rolling back
- an instant ADD COLUMN operation. */
+ case REC_LEAF_INSTANT:
nulls -= REC_N_NEW_EXTRA_BYTES;
ut_ad(index->is_instant());
/* fall through */
- case REC_LEAF_TEMP_COLUMNS_ADDED:
- n_fields = n_core + 1 + rec_get_n_add_field(nulls);
+ case REC_LEAF_TEMP_INSTANT:
+ n_fields = n_core + rec_get_n_add_field(nulls) + 1;
ut_ad(n_fields <= index->n_fields);
const ulint n_nullable = index->get_n_nullable(n_fields);
const ulint n_null_bytes = UT_BITS_IN_BYTES(n_nullable);
@@ -333,26 +352,34 @@ ordinary:
lens = --nulls - n_null_bytes;
}
-#ifdef UNIV_DEBUG
+start:
/* We cannot invoke rec_offs_make_valid() if format==REC_LEAF_TEMP.
Similarly, rec_offs_validate() will fail in that case, because
it invokes rec_get_status(). */
- offsets[2] = (ulint) rec;
- offsets[3] = (ulint) index;
-#endif /* UNIV_DEBUG */
+ ut_d(offsets[2] = ulint(rec));
+ ut_d(offsets[3] = ulint(index));
/* read the lengths of fields 0..n_fields */
+ ulint len;
ulint i = 0;
- do {
- const dict_field_t* field
- = dict_index_get_nth_field(index, i);
- const dict_col_t* col
- = dict_field_get_col(field);
- ulint len;
+ const dict_field_t* field = index->fields;
- /* set default value flag */
- if (i < n_fields) {
- } else if (def_val) {
+ do {
+ if (mblob) {
+ if (i == index->first_user_field()) {
+ offs += FIELD_REF_SIZE;
+ len = offs | REC_OFFS_EXTERNAL;
+ any |= REC_OFFS_EXTERNAL;
+ field--;
+ continue;
+ } else if (i >= n_fields) {
+ len = offs | REC_OFFS_DEFAULT;
+ any |= REC_OFFS_DEFAULT;
+ continue;
+ }
+ } else if (i < n_fields) {
+ /* The field is present, and will be covered below. */
+ } else if (!mblob && def_val) {
const dict_col_t::def_t& d = def_val[i - n_core];
if (!d.data) {
len = offs | REC_OFFS_SQL_NULL;
@@ -362,21 +389,22 @@ ordinary:
any |= REC_OFFS_DEFAULT;
}
- goto resolved;
+ continue;
} else {
- ulint dlen;
- if (!index->instant_field_value(i, &dlen)) {
+ if (!index->instant_field_value(i, &len)) {
+ ut_ad(len == UNIV_SQL_NULL);
len = offs | REC_OFFS_SQL_NULL;
- ut_ad(dlen == UNIV_SQL_NULL);
} else {
len = offs | REC_OFFS_DEFAULT;
any |= REC_OFFS_DEFAULT;
}
- goto resolved;
+ continue;
}
- if (!(col->prtype & DATA_NOT_NULL)) {
+ const dict_col_t* col = field->col;
+
+ if (col->is_nullable()) {
/* nullable field => read the null flag */
ut_ad(n_null--);
@@ -392,7 +420,7 @@ ordinary:
the length to zero and enable the
SQL NULL flag in offsets[]. */
len = offs | REC_OFFS_SQL_NULL;
- goto resolved;
+ continue;
}
null_mask <<= 1;
}
@@ -423,16 +451,15 @@ ordinary:
len = offs;
}
- goto resolved;
+ continue;
}
len = offs += len;
} else {
len = offs += field->fixed_len;
}
-resolved:
- rec_offs_base(offsets)[i + 1] = len;
- } while (++i < rec_offs_n_fields(offsets));
+ } while (field++, rec_offs_base(offsets)[++i] = len,
+ i < rec_offs_n_fields(offsets));
*rec_offs_base(offsets)
= ulint(rec - (lens + 1)) | REC_OFFS_COMPACT | any;
@@ -451,7 +478,10 @@ rec_offs_make_valid(
bool leaf,
ulint* offsets)
{
- ut_ad(rec_offs_n_fields(offsets)
+ const bool is_alter_metadata = leaf
+ && rec_is_alter_metadata(rec, *index);
+ ut_ad(is_alter_metadata
+ || rec_offs_n_fields(offsets)
<= (leaf
? dict_index_get_n_fields(index)
: dict_index_get_n_unique_in_tree_nonleaf(index) + 1)
@@ -469,7 +499,8 @@ rec_offs_make_valid(
|| n >= rec_offs_n_fields(offsets));
for (; n < rec_offs_n_fields(offsets); n++) {
ut_ad(leaf);
- ut_ad(rec_offs_base(offsets)[1 + n] & REC_OFFS_DEFAULT);
+ ut_ad(is_alter_metadata
+ || rec_offs_base(offsets)[1 + n] & REC_OFFS_DEFAULT);
}
offsets[2] = ulint(rec);
offsets[3] = ulint(index);
@@ -509,14 +540,18 @@ rec_offs_validate(
}
}
if (index) {
- ulint max_n_fields;
ut_ad(ulint(index) == offsets[3]);
- max_n_fields = ut_max(
+ ulint max_n_fields = ut_max(
dict_index_get_n_fields(index),
dict_index_get_n_unique_in_tree(index) + 1);
if (comp && rec) {
switch (rec_get_status(rec)) {
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
+ ut_ad(index->is_instant() || index->is_dummy);
+ ut_ad(max_n_fields == index->n_fields);
+ max_n_fields += index->table->instant
+ || index->is_dummy;
+ break;
case REC_STATUS_ORDINARY:
break;
case REC_STATUS_NODE_PTR:
@@ -530,14 +565,19 @@ rec_offs_validate(
default:
ut_error;
}
+ } else if (max_n_fields == index->n_fields
+ && (index->is_dummy
+ || (index->is_instant()
+ && index->table->instant))) {
+ max_n_fields++;
}
/* index->n_def == 0 for dummy indexes if !comp */
- ut_a(!comp || index->n_def);
- ut_a(!index->n_def || i <= max_n_fields);
+ ut_ad(!comp || index->n_def);
+ ut_ad(!index->n_def || i <= max_n_fields);
}
while (i--) {
ulint curr = rec_offs_base(offsets)[1 + i] & REC_OFFS_MASK;
- ut_a(curr <= last);
+ ut_ad(curr <= last);
last = curr;
}
return(TRUE);
@@ -572,7 +612,13 @@ rec_init_offsets(
ulint i = 0;
ulint offs;
- ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable));
+ /* This assertion was relaxed for the btr_cur_open_at_index_side()
+ call in btr_cur_instant_init_low(). We cannot invoke
+ index->is_instant(), because the same assertion would fail there
+ until btr_cur_instant_init_low() has invoked
+ dict_table_t::deserialise_columns(). */
+ ut_ad(index->n_core_null_bytes <= UT_BITS_IN_BYTES(index->n_nullable)
+ || index->in_instant_init);
ut_d(offsets[2] = ulint(rec));
ut_d(offsets[3] = ulint(index));
@@ -598,12 +644,12 @@ rec_init_offsets(
= dict_index_get_n_unique_in_tree_nonleaf(
index);
break;
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
ut_ad(leaf);
rec_init_offsets_comp_ordinary(rec, index, offsets,
index->n_core_fields,
NULL,
- REC_LEAF_COLUMNS_ADDED);
+ REC_LEAF_INSTANT);
return;
case REC_STATUS_ORDINARY:
ut_ad(leaf);
@@ -780,14 +826,17 @@ rec_get_offsets_func(
{
ulint n;
ulint size;
+ bool alter_metadata = false;
if (dict_table_is_comp(index->table)) {
switch (UNIV_EXPECT(rec_get_status(rec),
REC_STATUS_ORDINARY)) {
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
+ alter_metadata = rec_is_alter_metadata(rec, true);
+ /* fall through */
case REC_STATUS_ORDINARY:
ut_ad(leaf);
- n = dict_index_get_n_fields(index);
+ n = dict_index_get_n_fields(index) + alter_metadata;
break;
case REC_STATUS_NODE_PTR:
/* Node pointer records consist of the
@@ -831,7 +880,8 @@ rec_get_offsets_func(
|| dict_index_is_ibuf(index)
|| n == n_fields /* btr_pcur_restore_position() */
|| (n + (index->id == DICT_INDEXES_ID)
- >= index->n_core_fields && n <= index->n_fields));
+ >= index->n_core_fields && n <= index->n_fields
+ + unsigned(rec_is_alter_metadata(rec, false))));
if (is_user_rec && leaf && n < index->n_fields) {
ut_ad(!index->is_dummy);
@@ -861,8 +911,24 @@ rec_get_offsets_func(
}
rec_offs_set_n_fields(offsets, n);
- rec_init_offsets(rec, index, leaf, offsets);
- return(offsets);
+
+ if (UNIV_UNLIKELY(alter_metadata)
+ && dict_table_is_comp(index->table)) {
+ ut_d(offsets[2] = ulint(rec));
+ ut_d(offsets[3] = ulint(index));
+ ut_ad(leaf);
+ ut_ad(index->is_dummy || index->table->instant);
+ ut_ad(index->is_dummy || index->is_instant());
+ ut_ad(rec_offs_n_fields(offsets)
+ <= ulint(index->n_fields) + 1);
+ rec_init_offsets_comp_ordinary<true>(rec, index, offsets,
+ index->n_core_fields,
+ NULL,
+ REC_LEAF_INSTANT);
+ } else {
+ rec_init_offsets(rec, index, leaf, offsets);
+ }
+ return offsets;
}
/******************************************************//**
@@ -1035,36 +1101,45 @@ rec_get_nth_field_offs_old(
return(os);
}
-/**********************************************************//**
-Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+/** Determine the size of a data tuple prefix in ROW_FORMAT=COMPACT.
+@tparam mblob whether the record includes a metadata BLOB
+@param[in] index record descriptor; dict_table_is_comp()
+ is assumed to hold, even if it doesn't
+@param[in] dfield array of data fields
+@param[in] n_fields number of data fields
+@param[out] extra extra size
+@param[in] status status flags
+@param[in] temp whether this is a temporary file record
@return total size */
-MY_ATTRIBUTE((warn_unused_result, nonnull(1,2)))
+template<bool mblob = false>
static inline
ulint
rec_get_converted_size_comp_prefix_low(
-/*===================================*/
- const dict_index_t* index, /*!< in: record descriptor;
- dict_table_is_comp() is
- assumed to hold, even if
- it does not */
- const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields,/*!< in: number of data fields */
- ulint* extra, /*!< out: extra size */
- rec_comp_status_t status, /*!< in: status flags */
- bool temp) /*!< in: whether this is a
- temporary file record */
+ const dict_index_t* index,
+ const dfield_t* dfield,
+ ulint n_fields,
+ ulint* extra,
+ rec_comp_status_t status,
+ bool temp)
{
ulint extra_size = temp ? 0 : REC_N_NEW_EXTRA_BYTES;
- ulint data_size;
- ulint i;
ut_ad(n_fields > 0);
- ut_ad(n_fields <= dict_index_get_n_fields(index));
+ ut_ad(n_fields <= dict_index_get_n_fields(index) + mblob);
ut_d(ulint n_null = index->n_nullable);
ut_ad(status == REC_STATUS_ORDINARY || status == REC_STATUS_NODE_PTR
- || status == REC_STATUS_COLUMNS_ADDED);
+ || status == REC_STATUS_INSTANT);
- if (status == REC_STATUS_COLUMNS_ADDED
- && (!temp || n_fields > index->n_core_fields)) {
+ if (mblob) {
+ ut_ad(!temp);
+ ut_ad(index->table->instant);
+ ut_ad(index->is_instant());
+ ut_ad(status == REC_STATUS_INSTANT);
+ ut_ad(n_fields == ulint(index->n_fields) + 1);
+ extra_size += UT_BITS_IN_BYTES(index->n_nullable)
+ + rec_get_n_add_field_len(n_fields - 1
+ - index->n_core_fields);
+ } else if (status == REC_STATUS_INSTANT
+ && (!temp || n_fields > index->n_core_fields)) {
ut_ad(index->is_instant());
ut_ad(UT_BITS_IN_BYTES(n_null) >= index->n_core_null_bytes);
extra_size += UT_BITS_IN_BYTES(index->get_n_nullable(n_fields))
@@ -1075,7 +1150,7 @@ rec_get_converted_size_comp_prefix_low(
extra_size += index->n_core_null_bytes;
}
- data_size = 0;
+ ulint data_size = 0;
if (temp && dict_table_is_comp(index->table)) {
/* No need to do adjust fixed_len=0. We only need to
@@ -1083,46 +1158,50 @@ rec_get_converted_size_comp_prefix_low(
temp = false;
}
+ const dfield_t* const end = dfield + n_fields;
/* read the lengths of fields 0..n */
- for (i = 0; i < n_fields; i++) {
- const dict_field_t* field;
- ulint len;
- ulint fixed_len;
- const dict_col_t* col;
+ for (ulint i = 0; dfield < end; i++, dfield++) {
+ if (mblob && i == index->first_user_field()) {
+ data_size += FIELD_REF_SIZE;
+ ++dfield;
+ }
- field = dict_index_get_nth_field(index, i);
- len = dfield_get_len(&fields[i]);
- col = dict_field_get_col(field);
+ ulint len = dfield_get_len(dfield);
+ const dict_field_t* field = dict_index_get_nth_field(index, i);
#ifdef UNIV_DEBUG
- const dtype_t* type = dfield_get_type(&fields[i]);
if (dict_index_is_spatial(index)) {
- if (DATA_GEOMETRY_MTYPE(col->mtype) && i == 0) {
- ut_ad(type->prtype & DATA_GIS_MBR);
+ if (DATA_GEOMETRY_MTYPE(field->col->mtype) && i == 0) {
+ ut_ad(dfield->type.prtype & DATA_GIS_MBR);
} else {
- ut_ad(type->mtype == DATA_SYS_CHILD
- || dict_col_type_assert_equal(col, type));
+ ut_ad(dfield->type.mtype == DATA_SYS_CHILD
+ || dict_col_type_assert_equal(
+ field->col, &dfield->type));
}
} else {
- ut_ad(dict_col_type_assert_equal(col, type));
+ ut_ad(field->col->is_dropped()
+ || dict_col_type_assert_equal(field->col,
+ &dfield->type));
}
#endif
/* All NULLable fields must be included in the n_null count. */
- ut_ad((col->prtype & DATA_NOT_NULL) || n_null--);
+ ut_ad(!field->col->is_nullable() || n_null--);
- if (dfield_is_null(&fields[i])) {
+ if (dfield_is_null(dfield)) {
/* No length is stored for NULL fields. */
- ut_ad(!(col->prtype & DATA_NOT_NULL));
+ ut_ad(field->col->is_nullable());
continue;
}
- ut_ad(len <= col->len || DATA_LARGE_MTYPE(col->mtype)
- || (col->len == 0 && col->mtype == DATA_VARCHAR));
+ ut_ad(len <= field->col->len
+ || DATA_LARGE_MTYPE(field->col->mtype)
+ || (field->col->len == 0
+ && field->col->mtype == DATA_VARCHAR));
- fixed_len = field->fixed_len;
+ ulint fixed_len = field->fixed_len;
if (temp && fixed_len
- && !dict_col_get_fixed_size(col, temp)) {
+ && !dict_col_get_fixed_size(field->col, temp)) {
fixed_len = 0;
}
/* If the maximum length of a variable-length field
@@ -1137,25 +1216,27 @@ rec_get_converted_size_comp_prefix_low(
ut_ad(len <= fixed_len);
if (dict_index_is_spatial(index)) {
- ut_ad(type->mtype == DATA_SYS_CHILD
- || !col->mbmaxlen
- || len >= col->mbminlen
- * fixed_len / col->mbmaxlen);
+ ut_ad(dfield->type.mtype == DATA_SYS_CHILD
+ || !field->col->mbmaxlen
+ || len >= field->col->mbminlen
+ * fixed_len / field->col->mbmaxlen);
} else {
- ut_ad(type->mtype != DATA_SYS_CHILD);
- ut_ad(!col->mbmaxlen
- || len >= col->mbminlen
- * fixed_len / col->mbmaxlen);
+ ut_ad(dfield->type.mtype != DATA_SYS_CHILD);
+
+ ut_ad(field->col->is_dropped()
+ || !field->col->mbmaxlen
+ || len >= field->col->mbminlen
+ * fixed_len / field->col->mbmaxlen);
}
/* dict_index_add_col() should guarantee this */
ut_ad(!field->prefix_len
|| fixed_len == field->prefix_len);
#endif /* UNIV_DEBUG */
- } else if (dfield_is_ext(&fields[i])) {
- ut_ad(DATA_BIG_COL(col));
+ } else if (dfield_is_ext(dfield)) {
+ ut_ad(DATA_BIG_COL(field->col));
extra_size += 2;
- } else if (len < 128 || !DATA_BIG_COL(col)) {
+ } else if (len < 128 || !DATA_BIG_COL(field->col)) {
extra_size++;
} else {
/* For variable-length columns, we look up the
@@ -1191,43 +1272,51 @@ rec_get_converted_size_comp_prefix(
REC_STATUS_ORDINARY, false));
}
-/**********************************************************//**
-Determines the size of a data tuple in ROW_FORMAT=COMPACT.
+/** Determine the size of a record in ROW_FORMAT=COMPACT.
+@param[in] index record descriptor. dict_table_is_comp()
+ is assumed to hold, even if it doesn't
+@param[in] tuple logical record
+@param[out] extra extra size
@return total size */
ulint
rec_get_converted_size_comp(
-/*========================*/
- const dict_index_t* index, /*!< in: record descriptor;
- dict_table_is_comp() is
- assumed to hold, even if
- it does not */
- rec_comp_status_t status, /*!< in: status bits of the record */
- const dfield_t* fields, /*!< in: array of data fields */
- ulint n_fields,/*!< in: number of data fields */
- ulint* extra) /*!< out: extra size */
+ const dict_index_t* index,
+ const dtuple_t* tuple,
+ ulint* extra)
{
- ut_ad(n_fields > 0);
+ ut_ad(tuple->n_fields > 0);
+
+ rec_comp_status_t status = rec_comp_status_t(tuple->info_bits
+ & REC_NEW_STATUS_MASK);
switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
case REC_STATUS_ORDINARY:
- if (n_fields > index->n_core_fields) {
+ ut_ad(!tuple->is_metadata());
+ if (tuple->n_fields > index->n_core_fields) {
ut_ad(index->is_instant());
- status = REC_STATUS_COLUMNS_ADDED;
+ status = REC_STATUS_INSTANT;
}
/* fall through */
- case REC_STATUS_COLUMNS_ADDED:
- ut_ad(n_fields >= index->n_core_fields);
- ut_ad(n_fields <= index->n_fields);
+ case REC_STATUS_INSTANT:
+ ut_ad(tuple->n_fields >= index->n_core_fields);
+ if (tuple->is_alter_metadata()) {
+ return rec_get_converted_size_comp_prefix_low<true>(
+ index, tuple->fields, tuple->n_fields,
+ extra, status, false);
+ }
+ ut_ad(tuple->n_fields <= index->n_fields);
return rec_get_converted_size_comp_prefix_low(
- index, fields, n_fields, extra, status, false);
+ index, tuple->fields, tuple->n_fields,
+ extra, status, false);
case REC_STATUS_NODE_PTR:
- n_fields--;
- ut_ad(n_fields == dict_index_get_n_unique_in_tree_nonleaf(
- index));
- ut_ad(dfield_get_len(&fields[n_fields]) == REC_NODE_PTR_SIZE);
+ ut_ad(tuple->n_fields - 1
+ == dict_index_get_n_unique_in_tree_nonleaf(index));
+ ut_ad(dfield_get_len(&tuple->fields[tuple->n_fields - 1])
+ == REC_NODE_PTR_SIZE);
return REC_NODE_PTR_SIZE /* child page number */
+ rec_get_converted_size_comp_prefix_low(
- index, fields, n_fields, extra, status, false);
+ index, tuple->fields, tuple->n_fields - 1,
+ extra, status, false);
case REC_STATUS_INFIMUM:
case REC_STATUS_SUPREMUM:
/* not supported */
@@ -1405,33 +1494,30 @@ rec_convert_dtuple_to_rec_old(
}
/** Convert a data tuple into a ROW_FORMAT=COMPACT record.
+@tparam mblob whether the record includes a metadata BLOB
@param[out] rec converted record
@param[in] index index
-@param[in] fields data fields to convert
+@param[in] field data fields to convert
@param[in] n_fields number of data fields
@param[in] status rec_get_status(rec)
@param[in] temp whether to use the format for temporary files
in index creation */
+template<bool mblob = false>
static inline
void
rec_convert_dtuple_to_rec_comp(
rec_t* rec,
const dict_index_t* index,
- const dfield_t* fields,
+ const dfield_t* field,
ulint n_fields,
rec_comp_status_t status,
bool temp)
{
- const dfield_t* field;
- const dtype_t* type;
byte* end;
byte* nulls = temp
? rec - 1 : rec - (REC_N_NEW_EXTRA_BYTES + 1);
byte* UNINIT_VAR(lens);
- ulint len;
- ulint i;
ulint UNINIT_VAR(n_node_ptr_field);
- ulint fixed_len;
ulint null_mask = 1;
ut_ad(n_fields > 0);
@@ -1440,8 +1526,22 @@ rec_convert_dtuple_to_rec_comp(
ut_d(ulint n_null = index->n_nullable);
+ if (mblob) {
+ ut_ad(!temp);
+ ut_ad(index->table->instant);
+ ut_ad(index->is_instant());
+ ut_ad(status == REC_STATUS_INSTANT);
+ ut_ad(n_fields == ulint(index->n_fields) + 1);
+ rec_set_n_add_field(nulls, n_fields - 1
+ - index->n_core_fields);
+ rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW);
+ rec_set_status(rec, REC_STATUS_INSTANT);
+ n_node_ptr_field = ULINT_UNDEFINED;
+ lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
+ goto start;
+ }
switch (status) {
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
ut_ad(index->is_instant());
ut_ad(n_fields > index->n_core_fields);
rec_set_n_add_field(nulls, n_fields - 1
@@ -1451,19 +1551,24 @@ rec_convert_dtuple_to_rec_comp(
ut_ad(n_fields <= dict_index_get_n_fields(index));
if (!temp) {
rec_set_heap_no_new(rec, PAGE_HEAP_NO_USER_LOW);
- rec_set_status(rec, n_fields == index->n_core_fields
- ? REC_STATUS_ORDINARY
- : REC_STATUS_COLUMNS_ADDED);
- } if (dict_table_is_comp(index->table)) {
+
+ rec_set_status(
+ rec, n_fields == index->n_core_fields
+ ? REC_STATUS_ORDINARY
+ : REC_STATUS_INSTANT);
+ }
+
+ if (dict_table_is_comp(index->table)) {
/* No need to do adjust fixed_len=0. We only
need to adjust it for ROW_FORMAT=REDUNDANT. */
temp = false;
}
n_node_ptr_field = ULINT_UNDEFINED;
+
lens = nulls - (index->is_instant()
? UT_BITS_IN_BYTES(index->get_n_nullable(
- n_fields))
+ n_fields))
: UT_BITS_IN_BYTES(
unsigned(index->n_nullable)));
break;
@@ -1473,8 +1578,8 @@ rec_convert_dtuple_to_rec_comp(
rec_set_status(rec, status);
ut_ad(n_fields
== dict_index_get_n_unique_in_tree_nonleaf(index) + 1);
- ut_d(n_null = std::min(index->n_core_null_bytes * 8U,
- index->n_nullable));
+ ut_d(n_null = std::min<uint>(index->n_core_null_bytes * 8U,
+ index->n_nullable));
n_node_ptr_field = n_fields - 1;
lens = nulls - index->n_core_null_bytes;
break;
@@ -1484,30 +1589,33 @@ rec_convert_dtuple_to_rec_comp(
return;
}
+start:
end = rec;
/* clear the SQL-null flags */
memset(lens + 1, 0, ulint(nulls - lens));
+ const dfield_t* const fend = field + n_fields;
/* Store the data and the offsets */
-
- for (i = 0; i < n_fields; i++) {
- const dict_field_t* ifield;
- dict_col_t* col = NULL;
-
- field = &fields[i];
-
- type = dfield_get_type(field);
- len = dfield_get_len(field);
-
- if (UNIV_UNLIKELY(i == n_node_ptr_field)) {
- ut_ad(dtype_get_prtype(type) & DATA_NOT_NULL);
+ for (ulint i = 0; field < fend; i++, field++) {
+ ulint len = dfield_get_len(field);
+
+ if (mblob) {
+ if (i == index->first_user_field()) {
+ ut_ad(len == FIELD_REF_SIZE);
+ ut_ad(dfield_is_ext(field));
+ memcpy(end, dfield_get_data(field), len);
+ end += len;
+ len = dfield_get_len(++field);
+ }
+ } else if (UNIV_UNLIKELY(i == n_node_ptr_field)) {
+ ut_ad(field->type.prtype & DATA_NOT_NULL);
ut_ad(len == REC_NODE_PTR_SIZE);
memcpy(end, dfield_get_data(field), len);
end += REC_NODE_PTR_SIZE;
break;
}
- if (!(dtype_get_prtype(type) & DATA_NOT_NULL)) {
+ if (!(field->type.prtype & DATA_NOT_NULL)) {
/* nullable field */
ut_ad(n_null--);
@@ -1530,11 +1638,12 @@ rec_convert_dtuple_to_rec_comp(
/* only nullable fields can be null */
ut_ad(!dfield_is_null(field));
- ifield = dict_index_get_nth_field(index, i);
- fixed_len = ifield->fixed_len;
- col = ifield->col;
+ const dict_field_t* ifield
+ = dict_index_get_nth_field(index, i);
+ ulint fixed_len = ifield->fixed_len;
+
if (temp && fixed_len
- && !dict_col_get_fixed_size(col, temp)) {
+ && !dict_col_get_fixed_size(ifield->col, temp)) {
fixed_len = 0;
}
@@ -1546,23 +1655,23 @@ rec_convert_dtuple_to_rec_comp(
it is 128 or more, or when the field is stored externally. */
if (fixed_len) {
ut_ad(len <= fixed_len);
- ut_ad(!col->mbmaxlen
- || len >= col->mbminlen
- * fixed_len / col->mbmaxlen);
+ ut_ad(!ifield->col->mbmaxlen
+ || len >= ifield->col->mbminlen
+ * fixed_len / ifield->col->mbmaxlen);
ut_ad(!dfield_is_ext(field));
} else if (dfield_is_ext(field)) {
- ut_ad(DATA_BIG_COL(col));
+ ut_ad(DATA_BIG_COL(ifield->col));
ut_ad(len <= REC_ANTELOPE_MAX_INDEX_COL_LEN
- + BTR_EXTERN_FIELD_REF_SIZE);
+ + BTR_EXTERN_FIELD_REF_SIZE);
*lens-- = (byte) (len >> 8) | 0xc0;
*lens-- = (byte) len;
} else {
- ut_ad(len <= dtype_get_len(type)
- || DATA_LARGE_MTYPE(dtype_get_mtype(type))
+ ut_ad(len <= field->type.len
+ || DATA_LARGE_MTYPE(field->type.mtype)
|| !strcmp(index->name,
FTS_INDEX_TABLE_IND_NAME));
if (len < 128 || !DATA_BIG_LEN_MTYPE(
- dtype_get_len(type), dtype_get_mtype(type))) {
+ field->type.len, field->type.mtype)) {
*lens-- = (byte) len;
} else {
@@ -1595,24 +1704,37 @@ rec_convert_dtuple_to_rec_new(
ut_ad(!(dtuple->info_bits
& ~(REC_NEW_STATUS_MASK | REC_INFO_DELETED_FLAG
| REC_INFO_MIN_REC_FLAG)));
- rec_comp_status_t status = static_cast<rec_comp_status_t>(
- dtuple->info_bits & REC_NEW_STATUS_MASK);
- if (status == REC_STATUS_ORDINARY
- && dtuple->n_fields > index->n_core_fields) {
- ut_ad(index->is_instant());
- status = REC_STATUS_COLUMNS_ADDED;
- }
ulint extra_size;
- rec_get_converted_size_comp(
- index, status, dtuple->fields, dtuple->n_fields, &extra_size);
- rec_t* rec = buf + extra_size;
+ if (UNIV_UNLIKELY(dtuple->is_alter_metadata())) {
+ ut_ad((dtuple->info_bits & REC_NEW_STATUS_MASK)
+ == REC_STATUS_INSTANT);
+ rec_get_converted_size_comp_prefix_low<true>(
+ index, dtuple->fields, dtuple->n_fields,
+ &extra_size, REC_STATUS_INSTANT, false);
+ buf += extra_size;
+ rec_convert_dtuple_to_rec_comp<true>(
+ buf, index, dtuple->fields, dtuple->n_fields,
+ REC_STATUS_INSTANT, false);
+ } else {
+ rec_get_converted_size_comp(index, dtuple, &extra_size);
+ buf += extra_size;
+ rec_comp_status_t status = rec_comp_status_t(
+ dtuple->info_bits & REC_NEW_STATUS_MASK);
+ if (status == REC_STATUS_ORDINARY
+ && dtuple->n_fields > index->n_core_fields) {
+ ut_ad(index->is_instant());
+ status = REC_STATUS_INSTANT;
+ }
- rec_convert_dtuple_to_rec_comp(
- rec, index, dtuple->fields, dtuple->n_fields, status, false);
- rec_set_info_bits_new(rec, dtuple->info_bits & ~REC_NEW_STATUS_MASK);
- return(rec);
+ rec_convert_dtuple_to_rec_comp(
+ buf, index, dtuple->fields, dtuple->n_fields,
+ status, false);
+ }
+
+ rec_set_info_bits_new(buf, dtuple->info_bits & ~REC_NEW_STATUS_MASK);
+ return buf;
}
/*********************************************************//**
@@ -1651,7 +1773,7 @@ rec_convert_dtuple_to_rec(
@param[in] fields data fields
@param[in] n_fields number of data fields
@param[out] extra record header size
-@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
+@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT
@return total size, in bytes */
ulint
rec_get_converted_size_temp(
@@ -1671,7 +1793,7 @@ rec_get_converted_size_temp(
@param[in,out] offsets offsets to the fields; in: rec_offs_n_fields(offsets)
@param[in] n_core number of core fields (index->n_core_fields)
@param[in] def_val default values for non-core fields
-@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED */
+@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT */
void
rec_init_offsets_temp(
const rec_t* rec,
@@ -1682,14 +1804,14 @@ rec_init_offsets_temp(
rec_comp_status_t status)
{
ut_ad(status == REC_STATUS_ORDINARY
- || status == REC_STATUS_COLUMNS_ADDED);
+ || status == REC_STATUS_INSTANT);
/* The table may have been converted to plain format
if it was emptied during an ALTER TABLE operation. */
ut_ad(index->n_core_fields == n_core || !index->is_instant());
ut_ad(index->n_core_fields >= n_core);
rec_init_offsets_comp_ordinary(rec, index, offsets, n_core, def_val,
- status == REC_STATUS_COLUMNS_ADDED
- ? REC_LEAF_TEMP_COLUMNS_ADDED
+ status == REC_STATUS_INSTANT
+ ? REC_LEAF_TEMP_INSTANT
: REC_LEAF_TEMP);
}
@@ -1715,7 +1837,7 @@ rec_init_offsets_temp(
@param[in] index clustered or secondary index
@param[in] fields data fields
@param[in] n_fields number of data fields
-@param[in] status REC_STATUS_ORDINARY or REC_STATUS_COLUMNS_ADDED
+@param[in] status REC_STATUS_ORDINARY or REC_STATUS_INSTANT
*/
void
rec_convert_dtuple_to_temp(
@@ -1885,13 +2007,15 @@ rec_copy_prefix_to_buf(
ut_ad(n_fields
<= dict_index_get_n_unique_in_tree_nonleaf(index));
break;
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
/* We would have !index->is_instant() when rolling back
an instant ADD COLUMN operation. */
ut_ad(index->is_instant() || page_rec_is_metadata(rec));
+ ut_ad(n_fields <= index->first_user_field());
nulls++;
const ulint n_rec = ulint(index->n_core_fields) + 1
- + rec_get_n_add_field(nulls);
+ + rec_get_n_add_field(nulls)
+ - rec_is_alter_metadata(rec, true);
instant_omit = ulint(&rec[-REC_N_NEW_EXTRA_BYTES] - nulls);
ut_ad(instant_omit == 1 || instant_omit == 2);
nullf = nulls;
@@ -1980,7 +2104,7 @@ rec_copy_prefix_to_buf(
/* copy the fixed-size header and the record prefix */
memcpy(b - REC_N_NEW_EXTRA_BYTES, rec - REC_N_NEW_EXTRA_BYTES,
prefix_len + REC_N_NEW_EXTRA_BYTES);
- ut_ad(rec_get_status(b) == REC_STATUS_COLUMNS_ADDED);
+ ut_ad(rec_get_status(b) == REC_STATUS_INSTANT);
rec_set_status(b, REC_STATUS_ORDINARY);
return b;
} else {
@@ -2498,8 +2622,6 @@ rec_get_trx_id(
const rec_t* rec,
const dict_index_t* index)
{
- ulint trx_id_col
- = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
const byte* trx_id;
ulint len;
mem_heap_t* heap = NULL;
@@ -2507,15 +2629,10 @@ rec_get_trx_id(
rec_offs_init(offsets_);
ulint* offsets = offsets_;
- ut_ad(trx_id_col <= MAX_REF_PARTS);
- ut_ad(dict_index_is_clust(index));
- ut_ad(trx_id_col > 0);
- ut_ad(trx_id_col != ULINT_UNDEFINED);
-
offsets = rec_get_offsets(rec, index, offsets, true,
- trx_id_col + 1, &heap);
+ index->db_trx_id() + 1, &heap);
- trx_id = rec_get_nth_field(rec, offsets, trx_id_col, &len);
+ trx_id = rec_get_nth_field(rec, offsets, index->db_trx_id(), &len);
ut_ad(len == DATA_TRX_ID_LEN);
diff --git a/storage/innobase/row/row0ext.cc b/storage/innobase/row/row0ext.cc
index 503f7d0d3e7..6973fe758d9 100644
--- a/storage/innobase/row/row0ext.cc
+++ b/storage/innobase/row/row0ext.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,14 +30,14 @@ Created September 2006 Marko Makela
/** Fills the column prefix cache of an externally stored column.
@param[in,out] ext column prefix cache
@param[in] i index of ext->ext[]
-@param[in] page_size page size
+@param[in] space tablespace
@param[in] dfield data field */
static
void
row_ext_cache_fill(
row_ext_t* ext,
ulint i,
- const page_size_t& page_size,
+ fil_space_t* space,
const dfield_t* dfield)
{
const byte* field = static_cast<const byte*>(
@@ -75,7 +76,8 @@ row_ext_cache_fill(
crashed during the execution of
btr_free_externally_stored_field(). */
ext->len[i] = btr_copy_externally_stored_field_prefix(
- buf, ext->max_len, page_size, field, f_len);
+ buf, ext->max_len, ext->zip_size,
+ field, f_len);
}
}
}
@@ -91,7 +93,7 @@ row_ext_create(
in the InnoDB table object, as reported by
dict_col_get_no(); NOT relative to the records
in the clustered index */
- ulint flags, /*!< in: table->flags */
+ const dict_table_t& table, /*!< in: table */
const dtuple_t* tuple, /*!< in: data tuple containing the field
references of the externally stored
columns; must be indexed by col_no;
@@ -100,36 +102,30 @@ row_ext_create(
to prevent deletion (rollback or purge). */
mem_heap_t* heap) /*!< in: heap where created */
{
- ulint i;
- const page_size_t& page_size = dict_tf_get_page_size(flags);
-
- row_ext_t* ret;
+ if (!table.space) {
+ return NULL;
+ }
ut_ad(n_ext > 0);
- ret = static_cast<row_ext_t*>(
+ row_ext_t* ret = static_cast<row_ext_t*>(
mem_heap_alloc(heap,
(sizeof *ret) + (n_ext - 1) * sizeof ret->len));
ret->n_ext = n_ext;
ret->ext = ext;
- ret->max_len = DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags);
- ret->page_size.copy_from(page_size);
+ ret->max_len = DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(table.flags);
+ ret->zip_size = dict_tf_get_zip_size(table.flags);
ret->buf = static_cast<byte*>(
mem_heap_alloc(heap, n_ext * ret->max_len));
-#ifdef UNIV_DEBUG
- memset(ret->buf, 0xaa, n_ext * ret->max_len);
- UNIV_MEM_ALLOC(ret->buf, n_ext * ret->max_len);
-#endif
-
/* Fetch the BLOB prefixes */
- for (i = 0; i < n_ext; i++) {
+ for (ulint i = 0; i < n_ext; i++) {
const dfield_t* dfield;
dfield = dtuple_get_nth_field(tuple, ext[i]);
- row_ext_cache_fill(ret, i, page_size, dfield);
+ row_ext_cache_fill(ret, i, table.space, dfield);
}
return(ret);
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index 8afee045493..1ac726fd8cd 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -803,7 +803,7 @@ DECLARE_THREAD(fts_parallel_tokenization)(
block = psort_info->merge_block;
crypt_block = psort_info->crypt_block;
- const page_size_t& page_size = dict_table_page_size(table);
+ const ulint zip_size = table->space->zip_size();
row_merge_fts_get_next_doc_item(psort_info, &doc_item);
@@ -833,7 +833,7 @@ loop:
doc.text.f_str =
btr_copy_externally_stored_field(
&doc.text.f_len, data,
- page_size, data_len, blob_heap);
+ zip_size, data_len, blob_heap);
} else {
doc.text.f_str = data;
doc.text.f_len = data_len;
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
index 082bfc51c06..9ef737828a0 100644
--- a/storage/innobase/row/row0import.cc
+++ b/storage/innobase/row/row0import.cc
@@ -53,7 +53,7 @@ Created 2012-02-08 by Sunny Bains.
/** The size of the buffer to use for IO.
@param n physical page size
@return number of pages */
-#define IO_BUFFER_SIZE(n) ((1024 * 1024) / n)
+#define IO_BUFFER_SIZE(n) ((1024 * 1024) / (n))
/** For gathering stats on records during phase I */
struct row_stats_t {
@@ -115,7 +115,7 @@ struct row_import {
m_hostname(NULL),
m_table_name(NULL),
m_autoinc(0),
- m_page_size(0, 0, false),
+ m_zip_size(0),
m_flags(0),
m_n_cols(0),
m_cols(NULL),
@@ -196,7 +196,8 @@ struct row_import {
ib_uint64_t m_autoinc; /*!< Next autoinc value */
- page_size_t m_page_size; /*!< Tablespace page size */
+ ulint m_zip_size; /*!< ROW_FORMAT=COMPRESSED
+ page size, or 0 */
ulint m_flags; /*!< Table flags */
@@ -356,7 +357,7 @@ public:
@param trx covering transaction */
AbstractCallback(trx_t* trx, ulint space_id)
:
- m_page_size(0, 0, false),
+ m_zip_size(0),
m_trx(trx),
m_space(space_id),
m_xdes(),
@@ -380,7 +381,7 @@ public:
/** @return true if compressed table. */
bool is_compressed_table() const UNIV_NOTHROW
{
- return(get_page_size().is_compressed());
+ return get_zip_size();
}
/** @return the tablespace flags */
@@ -400,7 +401,11 @@ public:
m_filepath = filename;
}
- const page_size_t& get_page_size() const { return m_page_size; }
+ ulint get_zip_size() const { return m_zip_size; }
+ ulint physical_size() const
+ {
+ return m_zip_size ? m_zip_size : srv_page_size;
+ }
const char* filename() const { return m_filepath; }
@@ -439,7 +444,7 @@ protected:
{
ulint offset;
- offset = xdes_calc_descriptor_index(get_page_size(), page_no);
+ offset = xdes_calc_descriptor_index(get_zip_size(), page_no);
return(page + XDES_ARR_OFFSET + XDES_SIZE * offset);
}
@@ -467,9 +472,11 @@ protected:
state = mach_read_ulint(xdesc + XDES_STATE, MLOG_4BYTES);
if (state != XDES_FREE) {
+ const ulint physical_size = m_zip_size
+ ? m_zip_size : srv_page_size;
m_xdes = UT_NEW_ARRAY_NOKEY(xdes_t,
- m_page_size.physical());
+ physical_size);
/* Trigger OOM */
DBUG_EXECUTE_IF(
@@ -482,7 +489,7 @@ protected:
return(DB_OUT_OF_MEMORY);
}
- memcpy(m_xdes, page, m_page_size.physical());
+ memcpy(m_xdes, page, physical_size);
}
return(DB_SUCCESS);
@@ -493,7 +500,7 @@ protected:
@return true if the page is marked as free */
bool is_free(ulint page_no) const UNIV_NOTHROW
{
- ut_a(xdes_calc_descriptor_page(get_page_size(), page_no)
+ ut_a(xdes_calc_descriptor_page(get_zip_size(), page_no)
== m_xdes_page_no);
if (m_xdes != 0) {
@@ -508,8 +515,8 @@ protected:
}
protected:
- /** The tablespace page size. */
- page_size_t m_page_size;
+ /** The ROW_FORMAT=COMPRESSED page size, or 0. */
+ ulint m_zip_size;
/** File handle to the tablespace */
pfs_os_file_t m_file;
@@ -556,7 +563,7 @@ AbstractCallback::init(
const page_t* page = block->frame;
m_space_flags = fsp_header_get_flags(page);
- if (!fsp_flags_is_valid(m_space_flags, true)) {
+ if (!fil_space_t::is_valid_flags(m_space_flags, true)) {
ulint cflags = fsp_flags_convert_from_101(m_space_flags);
if (cflags == ULINT_UNDEFINED) {
ib::error() << "Invalid FSP_SPACE_FLAGS="
@@ -568,21 +575,23 @@ AbstractCallback::init(
/* Clear the DATA_DIR flag, which is basically garbage. */
m_space_flags &= ~(1U << FSP_FLAGS_POS_RESERVED);
- m_page_size.copy_from(page_size_t(m_space_flags));
+ m_zip_size = fil_space_t::zip_size(m_space_flags);
+ const ulint logical_size = fil_space_t::logical_size(m_space_flags);
+ const ulint physical_size = fil_space_t::physical_size(m_space_flags);
- if (!is_compressed_table() && !m_page_size.equals_to(univ_page_size)) {
+ if (logical_size != srv_page_size) {
- ib::error() << "Page size " << m_page_size.physical()
+ ib::error() << "Page size " << logical_size
<< " of ibd file is not the same as the server page"
" size " << srv_page_size;
return(DB_CORRUPTION);
- } else if (file_size % m_page_size.physical() != 0) {
+ } else if (file_size & (physical_size - 1)) {
ib::error() << "File size " << file_size << " is not a"
" multiple of the page size "
- << m_page_size.physical();
+ << physical_size;
return(DB_CORRUPTION);
}
@@ -694,7 +703,7 @@ FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
Indexes::const_iterator end = m_indexes.end();
ut_a(cfg->m_table == m_table);
- cfg->m_page_size.copy_from(m_page_size);
+ cfg->m_zip_size = m_zip_size;
cfg->m_n_indexes = m_indexes.size();
if (cfg->m_n_indexes == 0) {
@@ -814,6 +823,7 @@ public:
@param block block to convert, it is not from the buffer pool.
@retval DB_SUCCESS or error code. */
dberr_t operator()(buf_block_t* block) UNIV_NOTHROW;
+
private:
/** Update the page, set the space id, max trx id and index id.
@param block block read from file
@@ -1459,7 +1469,7 @@ IndexPurge::open() UNIV_NOTHROW
btr_pcur_open_at_index_side(
true, m_index, BTR_MODIFY_LEAF, &m_pcur, true, 0, &m_mtr);
btr_pcur_move_to_next_user_rec(&m_pcur, &m_mtr);
- if (rec_is_metadata(btr_pcur_get_rec(&m_pcur), m_index)) {
+ if (rec_is_metadata(btr_pcur_get_rec(&m_pcur), *m_index)) {
ut_ad(btr_pcur_is_on_user_rec(&m_pcur));
/* Skip the metadata pseudo-record. */
} else {
@@ -1828,6 +1838,23 @@ PageConverter::update_index_page(
return(DB_CORRUPTION);
}
+ if (index->n_core_fields > index->n_fields) {
+ /* Some columns have been dropped.
+ Refuse to IMPORT TABLESPACE for now.
+
+ NOTE: This is not an accurate check.
+ Columns could have been both
+ added and dropped instantly.
+ For an accurate check, we must read
+ the metadata BLOB page pointed to
+ by the leftmost leaf page.
+
+ But we would have to read
+ those pages in a special way,
+ bypassing the buffer pool! */
+ return DB_UNSUPPORTED;
+ }
+
/* Provisionally set all instantly
added columns to be DEFAULT NULL. */
for (unsigned i = index->n_core_fields;
@@ -1989,7 +2016,7 @@ dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW
/* If we already had an old page with matching number
in the buffer pool, evict it now, because
we no longer evict the pages on DISCARD TABLESPACE. */
- buf_page_get_gen(block->page.id, get_page_size(),
+ buf_page_get_gen(block->page.id, get_zip_size(),
RW_NO_LATCH, NULL, BUF_EVICT_IF_IN_POOL,
__FILE__, __LINE__, NULL, NULL);
@@ -1998,18 +2025,37 @@ dberr_t PageConverter::operator()(buf_block_t* block) UNIV_NOTHROW
dberr_t err = update_page(block, page_type);
if (err != DB_SUCCESS) return err;
+ const bool full_crc32 = fil_space_t::full_crc32(get_space_flags());
+ const bool page_compressed = fil_space_t::is_compressed(get_space_flags());
+
if (!block->page.zip.data) {
+ if (full_crc32
+ && (block->page.encrypted || page_compressed)
+ && block->page.id.page_no() > 0) {
+ byte* page = block->frame;
+ mach_write_to_8(page + FIL_PAGE_LSN, m_current_lsn);
+
+ if (!page_compressed) {
+ mach_write_to_4(
+ page + (srv_page_size
+ - FIL_PAGE_FCRC32_END_LSN),
+ (ulint) m_current_lsn);
+ }
+
+ return err;
+ }
+
buf_flush_init_for_writing(
- NULL, block->frame, NULL, m_current_lsn);
+ NULL, block->frame, NULL, m_current_lsn, full_crc32);
} else if (fil_page_type_is_index(page_type)) {
buf_flush_init_for_writing(
NULL, block->page.zip.data, &block->page.zip,
- m_current_lsn);
+ m_current_lsn, full_crc32);
} else {
/* Calculate and update the checksum of non-index
pages for ROW_FORMAT=COMPRESSED tables. */
buf_flush_update_zip_checksum(
- block->page.zip.data, get_page_size().physical(),
+ block->page.zip.data, block->zip_size(),
m_current_lsn);
}
@@ -2233,17 +2279,15 @@ row_import_adjust_root_pages_of_secondary_indexes(
}
/*****************************************************************//**
-Ensure that dict_sys->row_id exceeds SELECT MAX(DB_ROW_ID).
-@return error code */
-static MY_ATTRIBUTE((nonnull, warn_unused_result))
-dberr_t
+Ensure that dict_sys->row_id exceeds SELECT MAX(DB_ROW_ID). */
+MY_ATTRIBUTE((nonnull)) static
+void
row_import_set_sys_max_row_id(
/*==========================*/
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from
handler */
const dict_table_t* table) /*!< in: table to import */
{
- dberr_t err;
const rec_t* rec;
mtr_t mtr;
btr_pcur_t pcur;
@@ -2251,7 +2295,8 @@ row_import_set_sys_max_row_id(
dict_index_t* index;
index = dict_table_get_first_index(table);
- ut_a(dict_index_is_clust(index));
+ ut_ad(index->is_primary());
+ ut_ad(dict_index_is_auto_gen_clust(index));
mtr_start(&mtr);
@@ -2272,57 +2317,17 @@ row_import_set_sys_max_row_id(
/* Check for empty table. */
if (page_rec_is_infimum(rec)) {
/* The table is empty. */
- err = DB_SUCCESS;
- } else if (rec_is_metadata(rec, index)) {
+ } else if (rec_is_metadata(rec, *index)) {
/* The clustered index contains the metadata record only,
that is, the table is empty. */
- err = DB_SUCCESS;
} else {
- ulint len;
- const byte* field;
- mem_heap_t* heap = NULL;
- ulint offsets_[1 + REC_OFFS_HEADER_SIZE];
- ulint* offsets;
-
- rec_offs_init(offsets_);
-
- offsets = rec_get_offsets(
- rec, index, offsets_, true, ULINT_UNDEFINED, &heap);
-
- field = rec_get_nth_field(
- rec, offsets,
- dict_index_get_sys_col_pos(index, DATA_ROW_ID),
- &len);
-
- if (len == DATA_ROW_ID_LEN) {
- row_id = mach_read_from_6(field);
- err = DB_SUCCESS;
- } else {
- err = DB_CORRUPTION;
- }
-
- if (heap != NULL) {
- mem_heap_free(heap);
- }
+ row_id = mach_read_from_6(rec);
}
btr_pcur_close(&pcur);
mtr_commit(&mtr);
- DBUG_EXECUTE_IF("ib_import_set_max_rowid_failure",
- err = DB_CORRUPTION;);
-
- if (err != DB_SUCCESS) {
- ib_errf(prebuilt->trx->mysql_thd,
- IB_LOG_LEVEL_WARN,
- ER_INNODB_INDEX_CORRUPT,
- "Index `%s` corruption detected, invalid DB_ROW_ID"
- " in index.", index->name());
-
- return(err);
-
- } else if (row_id > 0) {
-
+ if (row_id) {
/* Update the system row id if the imported index row id is
greater than the max system row id. */
@@ -2335,8 +2340,6 @@ row_import_set_sys_max_row_id(
mutex_exit(&dict_sys->mutex);
}
-
- return(DB_SUCCESS);
}
/*****************************************************************//**
@@ -2956,10 +2959,7 @@ row_import_read_v1(
cfg->m_flags = mach_read_from_4(ptr);
ptr += sizeof(ib_uint32_t);
- cfg->m_page_size.copy_from(dict_tf_get_page_size(cfg->m_flags));
-
- ut_a(logical_page_size == cfg->m_page_size.logical());
-
+ cfg->m_zip_size = dict_tf_get_zip_size(cfg->m_flags);
cfg->m_n_cols = mach_read_from_4(ptr);
if (!dict_tf_is_valid(cfg->m_flags)) {
@@ -3301,7 +3301,7 @@ fil_iterate(
AbstractCallback& callback)
{
os_offset_t offset;
- const ulint size = callback.get_page_size().physical();
+ const ulint size = callback.physical_size();
ulint n_bytes = iter.n_io_buffers * size;
const ulint buf_size = srv_page_size
@@ -3318,6 +3318,10 @@ fil_iterate(
return DB_OUT_OF_MEMORY;
}
+ ulint actual_space_id = 0;
+ const bool full_crc32 = fil_space_t::full_crc32(
+ callback.get_space_flags());
+
/* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless
copying for non-index pages. Unfortunately, it is
required by buf_zip_decompress() */
@@ -3375,15 +3379,9 @@ fil_iterate(
byte* src = readptr + i * size;
const ulint page_no = page_get_page_no(src);
if (!page_no && block->page.id.page_no()) {
- const ulint* b = reinterpret_cast<const ulint*>
- (src);
- const ulint* const e = b + size / sizeof *b;
- do {
- if (*b++) {
- goto page_corrupted;
- }
- } while (b != e);
-
+ if (!buf_page_is_zeroes(src, size)) {
+ goto page_corrupted;
+ }
/* Proceed to the next page,
because this one is all zero. */
continue;
@@ -3399,9 +3397,19 @@ page_corrupted:
goto func_exit;
}
- const bool page_compressed
- = fil_page_is_compressed_encrypted(src)
- || fil_page_is_compressed(src);
+ if (block->page.id.page_no() == 0) {
+ actual_space_id = mach_read_from_4(
+ src + FIL_PAGE_SPACE_ID);
+ }
+
+ const bool page_compressed =
+ (full_crc32
+ && fil_space_t::is_compressed(
+ callback.get_space_flags())
+ && buf_page_is_compressed(
+ src, callback.get_space_flags()))
+ || (fil_page_is_compressed_encrypted(src)
+ || fil_page_is_compressed(src));
if (page_compressed && block->page.zip.data) {
goto page_corrupted;
@@ -3410,11 +3418,11 @@ page_corrupted:
bool decrypted = false;
byte* dst = io_buffer + i * size;
bool frame_changed = false;
+ uint key_version = buf_page_get_key_version(
+ src, callback.get_space_flags());
if (!encrypted) {
- } else if (!mach_read_from_4(
- FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION
- + src)) {
+ } else if (!key_version) {
not_encrypted:
if (!page_compressed
&& !block->page.zip.data) {
@@ -3425,14 +3433,17 @@ not_encrypted:
memcpy(dst, src, size);
}
} else {
- if (!fil_space_verify_crypt_checksum(
- src, callback.get_page_size())) {
+ if (!buf_page_verify_crypt_checksum(
+ src, callback.get_space_flags())) {
goto page_corrupted;
}
decrypted = fil_space_decrypt(
+ actual_space_id,
iter.crypt_data, dst,
- callback.get_page_size(), src, &err);
+ callback.physical_size(),
+ callback.get_space_flags(),
+ src, &err);
if (err != DB_SUCCESS) {
goto func_exit;
@@ -3445,24 +3456,34 @@ not_encrypted:
updated = true;
}
+ /* For full_crc32 format, skip checksum check
+ after decryption. */
+ bool skip_checksum_check = full_crc32 && encrypted;
+
/* If the original page is page_compressed, we need
to decompress it before adjusting further. */
if (page_compressed) {
ulint compress_length = fil_page_decompress(
- page_compress_buf, dst);
+ page_compress_buf, dst,
+ callback.get_space_flags());
ut_ad(compress_length != srv_page_size);
if (compress_length == 0) {
goto page_corrupted;
}
updated = true;
- } else if (buf_page_is_corrupted(
+ } else if (!skip_checksum_check
+ && buf_page_is_corrupted(
false,
encrypted && !frame_changed
? dst : src,
- callback.get_page_size(), NULL)) {
+ callback.get_space_flags())) {
goto page_corrupted;
}
+ if (encrypted) {
+ block->page.encrypted = true;
+ }
+
if ((err = callback(block)) != DB_SUCCESS) {
goto func_exit;
} else if (!updated) {
@@ -3518,7 +3539,7 @@ not_encrypted:
if (ulint len = fil_page_compress(
src,
page_compress_buf,
- 0,/* FIXME: compression level */
+ callback.get_space_flags(),
512,/* FIXME: proper block size */
encrypted)) {
/* FIXME: remove memcpy() */
@@ -3531,12 +3552,14 @@ not_encrypted:
/* Encrypt the page if encryption was used. */
if (encrypted && decrypted) {
byte *dest = writeptr + i * size;
+
byte* tmp = fil_encrypt_buf(
iter.crypt_data,
block->page.id.space(),
block->page.id.page_no(),
mach_read_from_8(src + FIL_PAGE_LSN),
- src, callback.get_page_size(), dest);
+ src, block->zip_size(), dest,
+ full_crc32);
if (tmp == src) {
/* TODO: remove unnecessary memcpy's */
@@ -3546,6 +3569,26 @@ not_encrypted:
updated = true;
}
+
+ /* Write checksum for the compressed full crc32 page.*/
+ if (full_crc32 && page_compressed) {
+ ut_ad(updated);
+ byte* dest = writeptr + i * size;
+ ut_d(bool comp = false);
+ ut_d(bool corrupt = false);
+ ulint size = buf_page_full_crc32_size(
+ dest,
+#ifdef UNIV_DEBUG
+ &comp, &corrupt
+#else
+ NULL, NULL
+#endif
+ );
+ ut_ad(!comp == (size == srv_page_size));
+ ut_ad(!corrupt);
+ mach_write_to_4(dest + (size - 4),
+ ut_crc32(dest, size - 4));
+ }
}
/* A page was updated in the set, write back to disk. */
@@ -3661,10 +3704,8 @@ fil_tablespace_iterate(
if (err == DB_SUCCESS) {
block->page.id = page_id_t(callback.get_space_id(), 0);
- block->page.size.copy_from(callback.get_page_size());
- if (block->page.size.is_compressed()) {
- page_zip_set_size(&block->page.zip,
- callback.get_page_size().physical());
+ if (ulint zip_size = callback.get_zip_size()) {
+ page_zip_set_size(&block->page.zip, zip_size);
/* ROW_FORMAT=COMPRESSED is not optimised for block IO
for now. We do the IMPORT page by page. */
n_io_buffers = 1;
@@ -3674,7 +3715,7 @@ fil_tablespace_iterate(
/* read (optional) crypt data */
iter.crypt_data = fil_space_read_crypt_data(
- callback.get_page_size(), page);
+ callback.get_zip_size(), page);
/* If tablespace is encrypted, it needs extra buffers */
if (iter.crypt_data && n_io_buffers > 1) {
@@ -3855,12 +3896,12 @@ row_import_for_mysql(
ut_a(err == DB_FAIL);
- cfg.m_page_size.copy_from(univ_page_size);
+ cfg.m_zip_size = 0;
FetchIndexRootPages fetchIndexRootPages(table, trx);
err = fil_tablespace_iterate(
- table, IO_BUFFER_SIZE(cfg.m_page_size.physical()),
+ table, IO_BUFFER_SIZE(srv_page_size),
fetchIndexRootPages);
if (err == DB_SUCCESS) {
@@ -3898,7 +3939,8 @@ row_import_for_mysql(
/* Set the IO buffer size in pages. */
err = fil_tablespace_iterate(
- table, IO_BUFFER_SIZE(cfg.m_page_size.physical()), converter);
+ table, IO_BUFFER_SIZE(cfg.m_zip_size ? cfg.m_zip_size
+ : srv_page_size), converter);
DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
err = DB_TOO_MANY_CONCURRENT_TRXS;);
@@ -4064,12 +4106,7 @@ row_import_for_mysql(
any DB_ROW_ID stored in the table. */
if (prebuilt->clust_index_was_generated) {
-
- err = row_import_set_sys_max_row_id(prebuilt, table);
-
- if (err != DB_SUCCESS) {
- return(row_import_error(prebuilt, trx, err));
- }
+ row_import_set_sys_max_row_id(prebuilt, table);
}
ib::info() << "Phase III - Flush changes to disk";
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index 25c17d0f141..adb154c8446 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2016, 2019, MariaDB Corporation.
+Copyright (c) 2016, 2018, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -44,11 +44,8 @@ Created 4/20/1996 Heikki Tuuri
#include "buf0lru.h"
#include "fts0fts.h"
#include "fts0types.h"
-
#ifdef WITH_WSREP
-#include <mysql/service_wsrep.h>
-#include "../../../wsrep/wsrep_api.h"
-#include "wsrep_mysqld_c.h"
+#include "wsrep_mysqld.h"
#endif /* WITH_WSREP */
/*************************************************************************
@@ -1047,11 +1044,11 @@ func_exit:
#ifdef WITH_WSREP
dberr_t wsrep_append_foreign_key(trx_t *trx,
- dict_foreign_t* foreign,
- const rec_t* clust_rec,
- dict_index_t* clust_index,
- ibool referenced,
- enum wsrep_key_type key_type);
+ dict_foreign_t* foreign,
+ const rec_t* clust_rec,
+ dict_index_t* clust_index,
+ ibool referenced,
+ Wsrep_service_key_type key_type);
#endif /* WITH_WSREP */
/*********************************************************************//**
@@ -1274,8 +1271,10 @@ row_ins_foreign_check_on_constraint(
}
if (table->fts) {
- doc_id = fts_get_doc_id_from_rec(table, clust_rec,
- clust_index, tmp_heap);
+ doc_id = fts_get_doc_id_from_rec(
+ clust_rec, clust_index,
+ rec_get_offsets(clust_rec, clust_index, NULL, true,
+ ULINT_UNDEFINED, &tmp_heap));
}
if (node->is_delete
@@ -1432,7 +1431,7 @@ row_ins_foreign_check_on_constraint(
#ifdef WITH_WSREP
err = wsrep_append_foreign_key(trx, foreign, clust_rec, clust_index,
- FALSE, WSREP_KEY_EXCLUSIVE);
+ FALSE, WSREP_SERVICE_KEY_EXCLUSIVE);
if (err != DB_SUCCESS) {
fprintf(stderr,
"WSREP: foreign key append failed: %d\n", err);
@@ -1807,31 +1806,16 @@ row_ins_check_foreign_constraint(
if (check_ref) {
err = DB_SUCCESS;
#ifdef WITH_WSREP
- if (!wsrep_on(trx->mysql_thd)) {
- goto end_scan;
- }
- enum wsrep_key_type key_type;
- if (upd_node != NULL) {
- key_type = WSREP_KEY_SHARED;
- } else {
- switch (wsrep_certification_rules) {
- default:
- case WSREP_CERTIFICATION_RULES_STRICT:
- key_type = WSREP_KEY_EXCLUSIVE;
- break;
- case WSREP_CERTIFICATION_RULES_OPTIMIZED:
- key_type = WSREP_KEY_SEMI;
- break;
- }
- }
-
err = wsrep_append_foreign_key(
- trx,
+ thr_get_trx(thr),
foreign,
rec,
check_index,
check_ref,
- key_type);
+ (upd_node != NULL
+ && wsrep_protocol_version < 4)
+ ? WSREP_SERVICE_KEY_SHARED
+ : WSREP_SERVICE_KEY_REFERENCE);
#endif /* WITH_WSREP */
goto end_scan;
} else if (foreign->type != 0) {
@@ -2613,25 +2597,32 @@ row_ins_clust_index_entry_low(
} else {
index->set_modified(mtr);
- if (mode == BTR_MODIFY_LEAF
- && dict_index_is_online_ddl(index)) {
- mode = BTR_MODIFY_LEAF_ALREADY_S_LATCHED;
- mtr_s_lock(dict_index_get_lock(index), &mtr);
- }
+ if (UNIV_UNLIKELY(entry->is_metadata())) {
+ ut_ad(index->is_instant());
+ ut_ad(!dict_index_is_online_ddl(index));
+ ut_ad(mode == BTR_MODIFY_TREE);
+ } else {
+ if (mode == BTR_MODIFY_LEAF
+ && dict_index_is_online_ddl(index)) {
+ mode = BTR_MODIFY_LEAF_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ }
- if (unsigned ai = index->table->persistent_autoinc) {
- /* Prepare to persist the AUTO_INCREMENT value
- from the index entry to PAGE_ROOT_AUTO_INC. */
- const dfield_t* dfield = dtuple_get_nth_field(
- entry, ai - 1);
- auto_inc = dfield_is_null(dfield)
- ? 0
- : row_parse_int(static_cast<const byte*>(
+ if (unsigned ai = index->table->persistent_autoinc) {
+ /* Prepare to persist the AUTO_INCREMENT value
+ from the index entry to PAGE_ROOT_AUTO_INC. */
+ const dfield_t* dfield = dtuple_get_nth_field(
+ entry, ai - 1);
+ if (!dfield_is_null(dfield)) {
+ auto_inc = row_parse_int(
+ static_cast<const byte*>(
dfield->data),
dfield->len,
dfield->type.mtype,
dfield->type.prtype
& DATA_UNSIGNED);
+ }
+ }
}
}
@@ -2661,7 +2652,7 @@ row_ins_clust_index_entry_low(
#endif /* UNIV_DEBUG */
if (UNIV_UNLIKELY(entry->info_bits != 0)) {
- ut_ad(entry->info_bits == REC_INFO_METADATA);
+ ut_ad(entry->is_metadata());
ut_ad(flags == BTR_NO_LOCKING_FLAG);
ut_ad(index->is_instant());
ut_ad(!dict_index_is_online_ddl(index));
@@ -2669,28 +2660,18 @@ row_ins_clust_index_entry_low(
const rec_t* rec = btr_cur_get_rec(cursor);
- switch (rec_get_info_bits(rec, page_rec_is_comp(rec))
- & (REC_INFO_MIN_REC_FLAG | REC_INFO_DELETED_FLAG)) {
- case REC_INFO_MIN_REC_FLAG:
+ if (rec_get_info_bits(rec, page_rec_is_comp(rec))
+ & REC_INFO_MIN_REC_FLAG) {
thr_get_trx(thr)->error_info = index;
err = DB_DUPLICATE_KEY;
goto err_exit;
- case REC_INFO_MIN_REC_FLAG | REC_INFO_DELETED_FLAG:
- /* The metadata record never carries the delete-mark
- in MariaDB Server 10.3.
- If a table loses its 'instantness', it happens
- by the rollback of this first-time insert, or
- by a call to btr_page_empty() on the root page
- when the table becomes empty. */
- err = DB_CORRUPTION;
- goto err_exit;
- default:
- ut_ad(!row_ins_must_modify_rec(cursor));
- goto do_insert;
}
+
+ ut_ad(!row_ins_must_modify_rec(cursor));
+ goto do_insert;
}
- if (rec_is_metadata(btr_cur_get_rec(cursor), index)) {
+ if (rec_is_metadata(btr_cur_get_rec(cursor), *index)) {
goto do_insert;
}
@@ -3250,9 +3231,27 @@ row_ins_clust_index_entry(
n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0;
+#ifdef WITH_WSREP
+ const bool skip_locking
+ = wsrep_thd_skip_locking(thr_get_trx(thr)->mysql_thd);
+ ulint flags = index->table->no_rollback() ? BTR_NO_ROLLBACK
+ : (index->table->is_temporary() || skip_locking)
+ ? BTR_NO_LOCKING_FLAG : 0;
+#ifdef UNIV_DEBUG
+ if (skip_locking && strcmp(wsrep_get_sr_table_name(),
+ index->table->name.m_name)) {
+ WSREP_ERROR("Record locking is disabled in this thread, "
+ "but the table being modified is not "
+ "`%s`: `%s`.", wsrep_get_sr_table_name(),
+ index->table->name.m_name);
+ ut_error;
+ }
+#endif /* UNIV_DEBUG */
+#else
ulint flags = index->table->no_rollback() ? BTR_NO_ROLLBACK
: index->table->is_temporary()
? BTR_NO_LOCKING_FLAG : 0;
+#endif /* WITH_WSREP */
const ulint orig_n_fields = entry->n_fields;
/* Try first optimistic descent to the B-tree */
@@ -3475,6 +3474,24 @@ row_ins_index_entry_set_vals(
ut_ad(dtuple_get_n_fields(row)
== dict_table_get_n_cols(index->table));
row_field = dtuple_get_nth_v_field(row, v_col->v_pos);
+ } else if (col->is_dropped()) {
+ ut_ad(index->is_primary());
+
+ if (!(col->prtype & DATA_NOT_NULL)) {
+ field->data = NULL;
+ field->len = UNIV_SQL_NULL;
+ field->type.prtype = DATA_BINARY_TYPE;
+ } else {
+ ut_ad(col->len <= sizeof field_ref_zero);
+ ut_ad(ind_field->fixed_len <= col->len);
+ dfield_set_data(field, field_ref_zero,
+ ind_field->fixed_len);
+ field->type.prtype = DATA_NOT_NULL;
+ }
+
+ field->type.mtype = col->len
+ ? DATA_FIXBINARY : DATA_BINARY;
+ continue;
} else {
row_field = dtuple_get_nth_field(
row, ind_field->col->ind);
@@ -3484,7 +3501,7 @@ row_ins_index_entry_set_vals(
/* Check column prefix indexes */
if (ind_field != NULL && ind_field->prefix_len > 0
- && dfield_get_len(row_field) != UNIV_SQL_NULL) {
+ && len != UNIV_SQL_NULL) {
const dict_col_t* col
= dict_field_get_col(ind_field);
@@ -3538,7 +3555,8 @@ row_ins_index_entry_step(
ut_ad(dtuple_check_typed(node->row));
- err = row_ins_index_entry_set_vals(node->index, node->entry, node->row);
+ err = row_ins_index_entry_set_vals(node->index, node->entry,
+ node->row);
if (err != DB_SUCCESS) {
DBUG_RETURN(err);
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
index 35938c99452..834a31684b2 100644
--- a/storage/innobase/row/row0log.cc
+++ b/storage/innobase/row/row0log.cc
@@ -42,7 +42,7 @@ Created 2011-05-26 Marko Makela
#include <algorithm>
#include <map>
-ulint onlineddl_rowlog_rows;
+Atomic_counter<ulint> onlineddl_rowlog_rows;
ulint onlineddl_rowlog_pct_used;
ulint onlineddl_pct_progress;
@@ -607,7 +607,7 @@ write_failed:
err_exit:
mutex_exit(&log->mutex);
- my_atomic_addlint(&onlineddl_rowlog_rows, 1);
+ onlineddl_rowlog_rows++;
/* 10000 means 100.00%, 4525 means 45.25% */
onlineddl_rowlog_pct_used = static_cast<ulint>((log->tail.total * 10000) / srv_online_max_size);
}
@@ -685,9 +685,9 @@ row_log_table_delete(
fields of the record. */
heap = mem_heap_create(
DATA_TRX_ID_LEN
- + DTUPLE_EST_ALLOC(unsigned(new_index->n_uniq) + 2));
- old_pk = tuple = dtuple_create(
- heap, unsigned(new_index->n_uniq) + 2);
+ + DTUPLE_EST_ALLOC(new_index->first_user_field()));
+ old_pk = tuple = dtuple_create(heap,
+ new_index->first_user_field());
dict_index_copy_types(tuple, new_index, tuple->n_fields);
dtuple_set_n_fields_cmp(tuple, new_index->n_uniq);
@@ -852,7 +852,7 @@ row_log_table_low_redundant(
const bool is_instant = index->online_log->is_instant(index);
rec_comp_status_t status = is_instant
- ? REC_STATUS_COLUMNS_ADDED : REC_STATUS_ORDINARY;
+ ? REC_STATUS_INSTANT : REC_STATUS_ORDINARY;
size = rec_get_converted_size_temp(
index, tuple->fields, tuple->n_fields, &extra_size, status);
@@ -906,7 +906,7 @@ row_log_table_low_redundant(
*b++ = static_cast<byte>(extra_size);
}
- if (status == REC_STATUS_COLUMNS_ADDED) {
+ if (status == REC_STATUS_INSTANT) {
ut_ad(is_instant);
if (n_fields <= index->online_log->n_core_fields) {
status = REC_STATUS_ORDINARY;
@@ -972,7 +972,7 @@ row_log_table_low(
ut_ad(!"wrong page type");
}
#endif /* UNIV_DEBUG */
- ut_ad(!rec_is_metadata(rec, index));
+ ut_ad(!rec_is_metadata(rec, *index));
ut_ad(page_rec_is_leaf(rec));
ut_ad(!page_is_comp(page_align(rec)) == !rec_offs_comp(offsets));
/* old_pk=row_log_table_get_pk() [not needed in INSERT] is a prefix
@@ -995,7 +995,7 @@ row_log_table_low(
ut_ad(page_is_comp(page_align(rec)));
ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY
- || rec_get_status(rec) == REC_STATUS_COLUMNS_ADDED);
+ || rec_get_status(rec) == REC_STATUS_INSTANT);
const ulint omit_size = REC_N_NEW_EXTRA_BYTES;
@@ -1069,7 +1069,7 @@ row_log_table_low(
if (is_instant) {
*b++ = fake_extra_size
- ? REC_STATUS_COLUMNS_ADDED
+ ? REC_STATUS_INSTANT
: rec_get_status(rec);
} else {
ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY);
@@ -1142,7 +1142,7 @@ ALTER TABLE)
table
@param[in] offsets rec_get_offsets(rec)
@param[in] i rec field corresponding to col
-@param[in] page_size page size of the old table
+@param[in] zip_size ROW_FORMAT=COMPRESSED size of the old table
@param[in] max_len maximum length of dfield
@param[in] log row log for the table
@retval DB_INVALID_NULL if a NULL value is encountered
@@ -1156,7 +1156,7 @@ row_log_table_get_pk_col(
const rec_t* rec,
const ulint* offsets,
ulint i,
- const page_size_t& page_size,
+ ulint zip_size,
ulint max_len,
const row_log_t* log)
{
@@ -1195,7 +1195,7 @@ row_log_table_get_pk_col(
mem_heap_alloc(heap, field_len));
len = btr_copy_externally_stored_field_prefix(
- blob_field, field_len, page_size, field, len);
+ blob_field, field_len, zip_size, field, len);
if (len >= max_len + 1) {
return(DB_TOO_BIG_INDEX_COL);
}
@@ -1246,19 +1246,16 @@ row_log_table_get_pk(
ulint trx_id_offs = index->trx_id_offset;
if (!trx_id_offs) {
- ulint pos = dict_index_get_sys_col_pos(
- index, DATA_TRX_ID);
ulint len;
- ut_ad(pos > 0);
if (!offsets) {
offsets = rec_get_offsets(
rec, index, NULL, true,
- pos + 1, heap);
+ index->db_trx_id() + 1, heap);
}
trx_id_offs = rec_get_nth_field_offs(
- offsets, pos, &len);
+ offsets, index->db_trx_id(), &len);
ut_ad(len == DATA_TRX_ID_LEN);
}
@@ -1313,8 +1310,7 @@ row_log_table_get_pk(
const ulint max_len = DICT_MAX_FIELD_LEN_BY_FORMAT(new_table);
- const page_size_t& page_size
- = dict_table_page_size(index->table);
+ const ulint zip_size = index->table->space->zip_size();
for (ulint new_i = 0; new_i < new_n_uniq; new_i++) {
dict_field_t* ifield;
@@ -1341,7 +1337,8 @@ row_log_table_get_pk(
log->error = row_log_table_get_pk_col(
ifield, dfield, *heap,
- rec, offsets, i, page_size, max_len, log);
+ rec, offsets, i, zip_size, max_len,
+ log);
if (log->error != DB_SUCCESS) {
err_exit:
@@ -1561,11 +1558,17 @@ row_log_table_apply_convert_mrec(
const dict_col_t* col
= dict_field_get_col(ind_field);
+ if (col->is_dropped()) {
+ /* the column was instantly dropped earlier */
+ ut_ad(index->table->instant);
+ continue;
+ }
+
ulint col_no
= log->col_map[dict_col_get_no(col)];
if (col_no == ULINT_UNDEFINED) {
- /* dropped column */
+ /* the column is being dropped now */
continue;
}
@@ -1602,7 +1605,7 @@ row_log_table_apply_convert_mrec(
data = btr_rec_copy_externally_stored_field(
mrec, offsets,
- dict_table_page_size(index->table),
+ index->table->space->zip_size(),
i, &len, heap);
ut_a(data);
dfield_set_data(dfield, data, len);
@@ -1921,8 +1924,7 @@ row_log_table_apply_delete(
btr_pcur_t pcur;
ulint* offsets;
- ut_ad(rec_offs_n_fields(moffsets)
- == dict_index_get_n_unique(index) + 2);
+ ut_ad(rec_offs_n_fields(moffsets) == index->first_user_field());
ut_ad(!rec_offs_any_extern(moffsets));
/* Convert the row to a search tuple. */
@@ -2488,8 +2490,7 @@ row_log_table_apply_op(
/* The ROW_T_DELETE record was converted by
rec_convert_dtuple_to_temp() using new_index. */
ut_ad(!new_index->is_instant());
- rec_offs_set_n_fields(offsets,
- unsigned(new_index->n_uniq) + 2);
+ rec_offs_set_n_fields(offsets, new_index->first_user_field());
rec_init_offsets_temp(mrec, new_index, offsets);
next_mrec = mrec + rec_offs_data_size(offsets);
if (next_mrec > mrec_end) {
@@ -2581,7 +2582,7 @@ row_log_table_apply_op(
rec_convert_dtuple_to_temp() using new_index. */
ut_ad(!new_index->is_instant());
rec_offs_set_n_fields(offsets,
- unsigned(new_index->n_uniq) + 2);
+ new_index->first_user_field());
rec_init_offsets_temp(mrec, new_index, offsets);
next_mrec = mrec + rec_offs_data_size(offsets);
@@ -2591,13 +2592,12 @@ row_log_table_apply_op(
/* Copy the PRIMARY KEY fields and
DB_TRX_ID, DB_ROLL_PTR from mrec to old_pk. */
- old_pk = dtuple_create(
- heap, unsigned(new_index->n_uniq) + 2);
+ old_pk = dtuple_create(heap,
+ new_index->first_user_field());
dict_index_copy_types(old_pk, new_index,
old_pk->n_fields);
- for (ulint i = 0;
- i < dict_index_get_n_unique(new_index) + 2;
+ for (ulint i = 0; i < new_index->first_user_field();
i++) {
const void* field;
ulint len;
@@ -2679,8 +2679,8 @@ ulint
row_log_progress_inc_per_block()
{
/* We must increment the progress once per page (as in
- univ_page_size, usually 16KiB). One block here is srv_sort_buf_size
- (usually 1MiB). */
+ srv_page_size, default = innodb_page_size=16KiB).
+ One block here is srv_sort_buf_size (usually 1MiB). */
const ulint pages_per_block = std::max<ulint>(
ulint(srv_sort_buf_size >> srv_page_size_shift), 1);
@@ -2748,8 +2748,8 @@ row_log_table_apply_ops(
dict_index_t* new_index = dict_table_get_first_index(
new_table);
const ulint i = 1 + REC_OFFS_HEADER_SIZE
- + ut_max(dict_index_get_n_fields(index),
- dict_index_get_n_unique(new_index) + 2);
+ + std::max<ulint>(index->n_fields,
+ new_index->first_user_field());
const ulint new_trx_id_col = dict_col_get_clust_pos(
dict_table_get_sys_col(new_table, DATA_TRX_ID), new_index);
trx_t* trx = thr_get_trx(thr);
@@ -3209,7 +3209,8 @@ row_log_allocate(
log->head.total = 0;
log->path = path;
log->n_core_fields = index->n_core_fields;
- ut_ad(!table || log->is_instant(index) == index->is_instant());
+ ut_ad(!table || log->is_instant(index)
+ == (index->n_core_fields < index->n_fields));
log->allow_not_null = allow_not_null;
log->old_table = old_table;
log->n_rows = 0;
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index edf76eb2192..469a25e6836 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 2005, 2017, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2014, 2018, MariaDB Corporation.
+Copyright (c) 2014, 2019, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -442,7 +442,7 @@ row_merge_buf_redundant_convert(
const dfield_t* row_field,
dfield_t* field,
ulint len,
- const page_size_t& page_size,
+ ulint zip_size,
mem_heap_t* heap)
{
ut_ad(field->type.mbminlen == 1);
@@ -462,7 +462,7 @@ row_merge_buf_redundant_convert(
field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE));
byte* data = btr_copy_externally_stored_field(
- &ext_len, field_data, page_size, field_len, heap);
+ &ext_len, field_data, zip_size, field_len, heap);
ut_ad(ext_len < len);
@@ -704,13 +704,13 @@ row_merge_buf_add(
if (conv_heap != NULL) {
row_merge_buf_redundant_convert(
row_field, field, col->len,
- dict_table_page_size(old_table),
+ old_table->space->zip_size(),
conv_heap);
} else {
/* Field length mismatch should not
happen when rebuilding redundant row
format table. */
- ut_ad(dict_table_is_comp(index->table));
+ ut_ad(index->table->not_redundant());
}
}
}
@@ -1861,7 +1861,7 @@ row_merge_read_clustered_index(
btr_pcur_open_at_index_side(
true, clust_index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
- if (rec_is_metadata(btr_pcur_get_rec(&pcur), clust_index)) {
+ if (rec_is_metadata(btr_pcur_get_rec(&pcur), *clust_index)) {
ut_ad(btr_pcur_is_on_user_rec(&pcur));
/* Skip the metadata pseudo-record. */
} else {
@@ -1976,8 +1976,8 @@ row_merge_read_clustered_index(
goto scan_next;
}
- if (my_atomic_load32_explicit(&clust_index->lock.waiters,
- MY_MEMORY_ORDER_RELAXED)) {
+ if (clust_index->lock.waiters.load(
+ std::memory_order_relaxed)) {
/* There are waiters on the clustered
index tree lock, likely the purge
thread. Store and restore the cursor
@@ -2035,7 +2035,7 @@ end_of_index:
block = btr_block_get(
page_id_t(block->page.id.space(),
next_page_no),
- block->page.size,
+ block->zip_size(),
BTR_SEARCH_LEAF,
clust_index, &mtr);
@@ -3433,7 +3433,7 @@ void
row_merge_copy_blobs(
const mrec_t* mrec,
const ulint* offsets,
- const page_size_t& page_size,
+ ulint zip_size,
dtuple_t* tuple,
mem_heap_t* heap)
{
@@ -3471,10 +3471,10 @@ row_merge_copy_blobs(
BTR_EXTERN_FIELD_REF_SIZE));
data = btr_copy_externally_stored_field(
- &len, field_data, page_size, field_len, heap);
+ &len, field_data, zip_size, field_len, heap);
} else {
data = btr_rec_copy_externally_stored_field(
- mrec, offsets, page_size, i, &len, heap);
+ mrec, offsets, zip_size, i, &len, heap);
}
/* Because we have locked the table, any records
@@ -3672,8 +3672,7 @@ row_merge_insert_index_tuples(
row_log_table_blob_alloc() and
row_log_table_blob_free(). */
row_merge_copy_blobs(
- mrec, offsets,
- dict_table_page_size(old_table),
+ mrec, offsets, old_table->space->zip_size(),
dtuple, tuple_heap);
}
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 6b11df854d1..afbcdfe4423 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -34,7 +34,6 @@ Created 9/17/2000 Heikki Tuuri
#include "btr0sea.h"
#include "dict0boot.h"
#include "dict0crea.h"
-#include <sql_const.h>
#include "dict0dict.h"
#include "dict0load.h"
#include "dict0priv.h"
@@ -329,6 +328,7 @@ row_mysql_read_geometry(
ulint col_len) /*!< in: MySQL format length */
{
byte* data;
+ ut_ad(col_len > 8);
*len = mach_read_from_n_little_endian(ref, col_len - 8);
@@ -829,7 +829,8 @@ row_create_prebuilt(
clust_index = dict_table_get_first_index(table);
/* Make sure that search_tuple is long enough for clustered index */
- ut_a(2 * dict_table_get_n_cols(table) >= clust_index->n_fields);
+ ut_a(2 * unsigned(table->n_cols) >= unsigned(clust_index->n_fields)
+ - clust_index->table->n_dropped());
ref_len = dict_index_get_n_unique(clust_index);
@@ -2847,11 +2848,15 @@ row_mysql_table_id_reassign(
dberr_t err;
pars_info_t* info = pars_info_create();
- dict_hdr_get_new_id(new_id, NULL, NULL, table, false);
+ dict_hdr_get_new_id(new_id, NULL, NULL);
pars_info_add_ull_literal(info, "old_id", table->id);
pars_info_add_ull_literal(info, "new_id", *new_id);
+ /* Note: This cannot be rolled back. Rollback would see the
+ UPDATE SYS_INDEXES as two operations: DELETE and INSERT.
+ It would invoke btr_free_if_exists() when rolling back the
+ INSERT, effectively dropping all indexes of the table. */
err = que_eval_sql(
info,
"PROCEDURE RENUMBER_TABLE_PROC () IS\n"
@@ -3081,7 +3086,7 @@ row_discard_tablespace(
dict_table_change_id_in_cache(table, new_id);
dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
- if (index) index->remove_instant();
+ if (index) index->clear_instant_alter();
/* Reset the root page numbers. */
for (; index; index = UT_LIST_GET_NEXT(indexes, index)) {
@@ -3141,6 +3146,12 @@ row_discard_tablespace_for_mysql(
err = row_discard_tablespace_foreign_key_checks(trx, table);
if (err == DB_SUCCESS) {
+ /* Note: This cannot be rolled back.
+ Rollback would see the UPDATE SYS_INDEXES
+ as two operations: DELETE and INSERT.
+ It would invoke btr_free_if_exists()
+ when rolling back the INSERT, effectively
+ dropping all indexes of the table. */
err = row_discard_tablespace(trx, table);
}
}
@@ -3356,8 +3367,7 @@ row_drop_table_for_mysql(
for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
- btr_free(page_id_t(SRV_TMP_SPACE_ID, index->page),
- univ_page_size);
+ btr_free(page_id_t(SRV_TMP_SPACE_ID, index->page));
}
/* Remove the pointer to this table object from the list
of modified tables by the transaction because the object
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index 2fc465e7726..bafcb35fad8 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -102,33 +102,32 @@ row_purge_remove_clust_if_poss_low(
purge_node_t* node, /*!< in/out: row purge node */
ulint mode) /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
- dict_index_t* index;
- bool success = true;
- mtr_t mtr;
- rec_t* rec;
- mem_heap_t* heap = NULL;
- ulint* offsets;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- rec_offs_init(offsets_);
-
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_S)
|| node->vcol_info.is_used());
- index = dict_table_get_first_index(node->table);
+ dict_index_t* index = dict_table_get_first_index(node->table);
log_free_check();
- mtr_start(&mtr);
- index->set_modified(mtr);
+
+ mtr_t mtr;
+ mtr.start();
if (!row_purge_reposition_pcur(mode, node, &mtr)) {
/* The record was already removed. */
- goto func_exit;
+ mtr.commit();
+ return true;
}
- rec = btr_pcur_get_rec(&node->pcur);
+ ut_d(const bool was_instant = !!index->table->instant);
+ index->set_modified(mtr);
- offsets = rec_get_offsets(
+ rec_t* rec = btr_pcur_get_rec(&node->pcur);
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ rec_offs_init(offsets_);
+ mem_heap_t* heap = NULL;
+ ulint* offsets = rec_get_offsets(
rec, index, offsets_, true, ULINT_UNDEFINED, &heap);
+ bool success = true;
if (node->roll_ptr != row_get_rec_roll_ptr(rec, index, offsets)) {
/* Someone else has modified the record later: do not remove */
@@ -161,6 +160,10 @@ row_purge_remove_clust_if_poss_low(
}
}
+ /* Prove that dict_index_t::clear_instant_alter() was
+ not called with index->table->instant != NULL. */
+ ut_ad(!was_instant || index->table->instant);
+
func_exit:
if (heap) {
mem_heap_free(heap);
@@ -820,8 +823,9 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr)
became purgeable) */
if (node->roll_ptr
== row_get_rec_roll_ptr(rec, index, offsets)) {
- ut_ad(!rec_get_deleted_flag(rec,
- rec_offs_comp(offsets)));
+ ut_ad(!rec_get_deleted_flag(
+ rec, rec_offs_comp(offsets))
+ || rec_is_alter_metadata(rec, *index));
DBUG_LOG("purge", "reset DB_TRX_ID="
<< ib::hex(row_get_rec_trx_id(
rec, index, offsets)));
@@ -963,7 +967,7 @@ skip_secondaries:
block = buf_page_get(
page_id_t(rseg->space->id, page_no),
- univ_page_size, RW_X_LATCH, &mtr);
+ 0, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc
index 352407b6ee5..d77cc1e33dc 100644
--- a/storage/innobase/row/row0quiesce.cc
+++ b/storage/innobase/row/row0quiesce.cc
@@ -70,17 +70,16 @@ row_quiesce_write_index_fields(
return(DB_IO_ERROR);
}
+ const char* field_name = field->name ? field->name : "";
/* Include the NUL byte in the length. */
- ib_uint32_t len = static_cast<ib_uint32_t>(strlen(field->name) + 1);
- ut_a(len > 1);
-
+ ib_uint32_t len = static_cast<ib_uint32_t>(strlen(field_name) + 1);
mach_write_to_4(row, len);
DBUG_EXECUTE_IF("ib_export_io_write_failure_10",
close(fileno(file)););
if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
- || fwrite(field->name, 1, len, file) != len) {
+ || fwrite(field_name, 1, len, file) != len) {
ib_senderrf(
thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index a9c946200c2..c200e6fb15c 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -152,7 +152,7 @@ static bool row_build_spatial_index_key(
temp_heap = mem_heap_create(1000);
dptr = btr_copy_externally_stored_field(
- &dlen, dptr, ext ? ext->page_size : page_size_t(space->flags),
+ &dlen, dptr, ext ? ext->zip_size : space->zip_size(),
flen, temp_heap);
write_mbr:
@@ -198,7 +198,7 @@ row_build_index_entry_low(
{
dtuple_t* entry;
ulint entry_len;
- ulint i;
+ ulint i = 0;
ulint num_v = 0;
entry_len = dict_index_get_n_fields(index);
@@ -218,90 +218,87 @@ row_build_index_entry_low(
} else {
dtuple_set_n_fields_cmp(
entry, dict_index_get_n_unique_in_tree(index));
- }
+ if (dict_index_is_spatial(index)) {
+ /* Set the MBR field */
+ if (!row_build_spatial_index_key(
+ index, ext,
+ dtuple_get_nth_field(entry, 0),
+ dtuple_get_nth_field(
+ row,
+ dict_index_get_nth_field(index, i)
+ ->col->ind), flag, heap)) {
+ return NULL;
+ }
- for (i = 0; i < entry_len + num_v; i++) {
- const dict_field_t* ind_field = NULL;
- const dict_col_t* col;
- ulint col_no = 0;
- dfield_t* dfield;
- const dfield_t* dfield2;
- ulint len;
-
- if (i >= entry_len) {
- /* This is to insert new rows to cluster index */
- ut_ad(dict_index_is_clust(index)
- && flag == ROW_BUILD_FOR_INSERT);
- dfield = dtuple_get_nth_v_field(entry, i - entry_len);
- col = &dict_table_get_nth_v_col(
- index->table, i - entry_len)->m_col;
+ i = 1;
+ }
+ }
- } else {
- ind_field = dict_index_get_nth_field(index, i);
- col = ind_field->col;
- col_no = dict_col_get_no(col);
- dfield = dtuple_get_nth_field(entry, i);
+ for (; i < entry_len; i++) {
+ const dict_field_t& f = index->fields[i];
+ dfield_t* dfield = dtuple_get_nth_field(entry, i);
+
+ if (f.col->is_dropped()) {
+ ut_ad(index->is_primary());
+ ut_ad(index->is_instant());
+ ut_ad(!f.col->is_virtual());
+ dict_col_copy_type(f.col, &dfield->type);
+ if (f.col->is_nullable()) {
+ dfield_set_null(dfield);
+ } else {
+ dfield_set_data(dfield, field_ref_zero,
+ f.fixed_len);
+ }
+ continue;
}
- compile_time_assert(DATA_MISSING == 0);
+ const dfield_t* dfield2;
- if (col->is_virtual()) {
- const dict_v_col_t* v_col
- = reinterpret_cast<const dict_v_col_t*>(col);
+ if (f.col->is_virtual()) {
+ const dict_v_col_t* v_col
+ = reinterpret_cast<const dict_v_col_t*>(f.col);
ut_ad(v_col->v_pos < dtuple_get_n_v_fields(row));
dfield2 = dtuple_get_nth_v_field(row, v_col->v_pos);
ut_ad(dfield_is_null(dfield2) ||
dfield_get_len(dfield2) == 0 || dfield2->data);
+ ut_ad(!dfield_is_ext(dfield2));
+ if (UNIV_UNLIKELY(dfield2->type.mtype
+ == DATA_MISSING)) {
+ ut_ad(flag == ROW_BUILD_FOR_PURGE);
+ return(NULL);
+ }
} else {
- dfield2 = dtuple_get_nth_field(row, col_no);
- ut_ad(dfield_get_type(dfield2)->mtype == DATA_MISSING
- || (!(dfield_get_type(dfield2)->prtype
- & DATA_VIRTUAL)));
- }
-
- if (UNIV_UNLIKELY(dfield_get_type(dfield2)->mtype
- == DATA_MISSING)) {
- /* The field has not been initialized in the row.
- This should be from trx_undo_rec_get_partial_row(). */
- return(NULL);
- }
-
-#ifdef UNIV_DEBUG
- if (dfield_get_type(dfield2)->prtype & DATA_VIRTUAL
- && dict_index_is_clust(index)) {
- ut_ad(flag == ROW_BUILD_FOR_INSERT);
- }
-#endif /* UNIV_DEBUG */
-
- /* Special handle spatial index, set the first field
- which is for store MBR. */
- if (dict_index_is_spatial(index) && i == 0) {
- if (!row_build_spatial_index_key(
- index, ext, dfield, dfield2, flag, heap)) {
- return NULL;
+ dfield2 = dtuple_get_nth_field(row, f.col->ind);
+ if (UNIV_UNLIKELY(dfield2->type.mtype
+ == DATA_MISSING)) {
+ /* The field has not been initialized in
+ the row. This should be from
+ trx_undo_rec_get_partial_row(). */
+ return(NULL);
}
- continue;
+ ut_ad(!(dfield2->type.prtype & DATA_VIRTUAL));
}
- len = dfield_get_len(dfield2);
+ compile_time_assert(DATA_MISSING == 0);
- dfield_copy(dfield, dfield2);
+ *dfield = *dfield2;
if (dfield_is_null(dfield)) {
continue;
}
- if ((!ind_field || ind_field->prefix_len == 0)
+ ulint len = dfield_get_len(dfield);
+
+ if (f.prefix_len == 0
&& (!dfield_is_ext(dfield)
|| dict_index_is_clust(index))) {
/* The dfield_copy() above suffices for
columns that are stored in-page, or for
clustered index record columns that are not
- part of a column prefix in the PRIMARY KEY,
- or for virtaul columns in cluster index record. */
+ part of a column prefix in the PRIMARY KEY. */
continue;
}
@@ -312,11 +309,11 @@ row_build_index_entry_low(
index record with an off-page column is when it is a
column prefix index. If atomic_blobs, also fully
indexed long columns may be stored off-page. */
- ut_ad(col->ord_part);
+ ut_ad(f.col->ord_part);
- if (ext && !col->is_virtual()) {
+ if (ext && !f.col->is_virtual()) {
/* See if the column is stored externally. */
- const byte* buf = row_ext_lookup(ext, col_no,
+ const byte* buf = row_ext_lookup(ext, f.col->ind,
&len);
if (UNIV_LIKELY_NULL(buf)) {
if (UNIV_UNLIKELY(buf == field_ref_zero)) {
@@ -325,7 +322,7 @@ row_build_index_entry_low(
dfield_set_data(dfield, buf, len);
}
- if (ind_field->prefix_len == 0) {
+ if (f.prefix_len == 0) {
/* If ROW_FORMAT=DYNAMIC or
ROW_FORMAT=COMPRESSED, we can have a
secondary index on an entire column
@@ -352,16 +349,33 @@ row_build_index_entry_low(
}
/* If a column prefix index, take only the prefix. */
- if (ind_field->prefix_len) {
+ if (f.prefix_len) {
len = dtype_get_at_most_n_mbchars(
- col->prtype, col->mbminlen, col->mbmaxlen,
- ind_field->prefix_len, len,
+ f.col->prtype,
+ f.col->mbminlen, f.col->mbmaxlen,
+ f.prefix_len, len,
static_cast<char*>(dfield_get_data(dfield)));
dfield_set_len(dfield, len);
}
}
- return(entry);
+ for (i = num_v; i--; ) {
+ ut_ad(index->is_primary());
+ ut_ad(flag == ROW_BUILD_FOR_INSERT);
+ dfield_t* dfield = dtuple_get_nth_v_field(entry, i);
+ const dict_v_col_t* v_col = dict_table_get_nth_v_col(
+ index->table, i);
+ ut_ad(!v_col->m_col.is_dropped());
+ ut_ad(v_col->v_pos < dtuple_get_n_v_fields(row));
+ const dfield_t* dfield2 = dtuple_get_nth_v_field(
+ row, v_col->v_pos);
+ ut_ad(dfield_is_null(dfield2) ||
+ dfield_get_len(dfield2) == 0 || dfield2->data);
+ ut_ad(dfield2->type.mtype != DATA_MISSING);
+ *dfield = *dfield2;
+ }
+
+ return entry;
}
/** An inverse function to row_build_index_entry. Builds a row from a
@@ -498,11 +512,23 @@ row_build_low(
j = 0;
+ const dict_field_t* ind_field = index->fields;
+
for (ulint i = 0; i < rec_offs_n_fields(offsets); i++) {
- const dict_field_t* ind_field
- = dict_index_get_nth_field(index, i);
+ if (i == index->first_user_field()
+ && rec_is_alter_metadata(rec, *index)) {
+ ut_ad(rec_offs_nth_extern(offsets, i));
+ ut_d(ulint len);
+ ut_d(rec_get_nth_field_offs(offsets, i, &len));
+ ut_ad(len == FIELD_REF_SIZE);
+ continue;
+ }
+
+ ut_ad(ind_field < &index->fields[index->n_fields]);
- if (ind_field->prefix_len) {
+ const dict_col_t* col = dict_field_get_col(ind_field);
+
+ if ((ind_field++)->prefix_len) {
/* Column prefixes can only occur in key
fields, which cannot be stored externally. For
a column prefix, there should also be the full
@@ -512,10 +538,11 @@ row_build_low(
continue;
}
- const dict_col_t* col
- = dict_field_get_col(ind_field);
- ulint col_no
- = dict_col_get_no(col);
+ if (col->is_dropped()) {
+ continue;
+ }
+
+ ulint col_no = dict_col_get_no(col);
if (col_map) {
col_no = col_map[col_no];
@@ -527,6 +554,7 @@ row_build_low(
}
dfield_t* dfield = dtuple_get_nth_field(row, col_no);
+
const void* field = rec_get_nth_field(
copy, offsets, i, &len);
if (len == UNIV_SQL_DEFAULT) {
@@ -566,7 +594,7 @@ row_build_low(
row_log_table_delete(). */
} else if (j) {
- *ext = row_ext_create(j, ext_cols, index->table->flags, row,
+ *ext = row_ext_create(j, ext_cols, *index->table, row,
heap);
} else {
*ext = NULL;
@@ -670,15 +698,19 @@ row_build_w_add_vcol(
}
/** Convert an index record to a data tuple.
-@tparam def whether the index->instant_field_value() needs to be accessed
-@param[in] rec index record
-@param[in] index index
-@param[in] offsets rec_get_offsets(rec, index)
-@param[out] n_ext number of externally stored columns
-@param[in,out] heap memory heap for allocations
+@tparam metadata whether the index->instant_field_value() needs to be accessed
+@tparam mblob 1 if rec_is_alter_metadata();
+2 if we want converted metadata corresponding to info_bits
+@param[in] rec index record
+@param[in] index index
+@param[in] offsets rec_get_offsets(rec, index)
+@param[out] n_ext number of externally stored columns
+@param[in,out] heap memory heap for allocations
+@param[in] info_bits (only used if mblob=2)
+@param[in] pad (only used if mblob=2)
@return index entry built; does not set info_bits, and the data fields
in the entry will point directly to rec */
-template<bool def>
+template<bool metadata, int mblob = 0>
static inline
dtuple_t*
row_rec_to_index_entry_impl(
@@ -686,44 +718,66 @@ row_rec_to_index_entry_impl(
const dict_index_t* index,
const ulint* offsets,
ulint* n_ext,
- mem_heap_t* heap)
+ mem_heap_t* heap,
+ ulint info_bits = 0,
+ bool pad = false)
{
- dtuple_t* entry;
- dfield_t* dfield;
- ulint i;
- const byte* field;
- ulint len;
- ulint rec_len;
-
ut_ad(rec != NULL);
ut_ad(heap != NULL);
ut_ad(index != NULL);
- ut_ad(def || !rec_offs_any_default(offsets));
-
+ ut_ad(!mblob || index->is_primary());
+ ut_ad(!mblob || !index->table->is_temporary());
+ ut_ad(!mblob || !dict_index_is_spatial(index));
+ compile_time_assert(!mblob || metadata);
+ compile_time_assert(mblob <= 2);
/* Because this function may be invoked by row0merge.cc
on a record whose header is in different format, the check
rec_offs_validate(rec, index, offsets) must be avoided here. */
ut_ad(n_ext);
*n_ext = 0;
- rec_len = rec_offs_n_fields(offsets);
-
- entry = dtuple_create(heap, rec_len);
+ const bool got = mblob == 2 && rec_is_alter_metadata(rec, *index);
+ ulint rec_len = rec_offs_n_fields(offsets);
+ if (mblob == 2) {
+ ut_ad(info_bits == REC_INFO_METADATA_ALTER
+ || info_bits == REC_INFO_METADATA_ADD);
+ ut_ad(rec_len <= ulint(index->n_fields + got));
+ if (pad) {
+ rec_len = ulint(index->n_fields)
+ + (info_bits == REC_INFO_METADATA_ALTER);
+ } else if (!got && info_bits == REC_INFO_METADATA_ALTER) {
+ rec_len++;
+ }
+ } else {
+ ut_ad(info_bits == 0);
+ ut_ad(!pad);
+ }
+ dtuple_t* entry = dtuple_create(heap, rec_len);
+ dfield_t* dfield = entry->fields;
dtuple_set_n_fields_cmp(entry,
dict_index_get_n_unique_in_tree(index));
- ut_ad(rec_len == dict_index_get_n_fields(index)
+ ut_ad(mblob == 2
+ || rec_len == dict_index_get_n_fields(index) + uint(mblob == 1)
/* a record for older SYS_INDEXES table
(missing merge_threshold column) is acceptable. */
- || (index->table->id == DICT_INDEXES_ID
+ || (!index->table->is_temporary()
+ && index->table->id == DICT_INDEXES_ID
&& rec_len == dict_index_get_n_fields(index) - 1));
- dict_index_copy_types(entry, index, rec_len);
-
- for (i = 0; i < rec_len; i++) {
+ ulint i;
+ for (i = 0; i < (mblob ? index->first_user_field() : rec_len);
+ i++, dfield++) {
+ dict_col_copy_type(dict_index_get_nth_col(index, i),
+ &dfield->type);
+ if (!mblob
+ && dict_index_is_spatial(index)
+ && DATA_GEOMETRY_MTYPE(dfield->type.mtype)) {
+ dfield->type.prtype |= DATA_GIS_MBR;
+ }
- dfield = dtuple_get_nth_field(entry, i);
- field = def
+ ulint len;
+ const byte* field = metadata
? rec_get_nth_cfield(rec, index, offsets, i, &len)
: rec_get_nth_field(rec, offsets, i, &len);
@@ -731,12 +785,80 @@ row_rec_to_index_entry_impl(
if (rec_offs_nth_extern(offsets, i)) {
dfield_set_ext(dfield);
- (*n_ext)++;
+ ++*n_ext;
+ }
+ }
+
+ if (mblob) {
+ ulint len;
+ const byte* field;
+ ulint j = i;
+
+ if (mblob == 2) {
+ const bool want = info_bits == REC_INFO_METADATA_ALTER;
+ if (got == want) {
+ if (got) {
+ goto copy_metadata;
+ }
+ } else {
+ if (want) {
+ /* Allocate a placeholder for
+ adding metadata in an update. */
+ len = FIELD_REF_SIZE;
+ field = static_cast<byte*>(
+ mem_heap_zalloc(heap, len));
+ /* In reality there is one fewer
+ field present in the record. */
+ rec_len--;
+ goto init_metadata;
+ }
+
+ /* Skip the undesired metadata blob
+ (for example, when rolling back an
+ instant ALTER TABLE). */
+ i++;
+ }
+ goto copy_user_fields;
+ }
+copy_metadata:
+ ut_ad(rec_offs_nth_extern(offsets, i));
+ field = rec_get_nth_field(rec, offsets, i++, &len);
+init_metadata:
+ dfield->type.metadata_blob_init();
+ ut_ad(len == FIELD_REF_SIZE);
+ dfield_set_data(dfield, field, len);
+ dfield_set_ext(dfield++);
+ ++*n_ext;
+copy_user_fields:
+ for (; i < rec_len; i++, dfield++) {
+ dict_col_copy_type(dict_index_get_nth_col(index, j++),
+ &dfield->type);
+ if (mblob == 2 && pad
+ && i >= rec_offs_n_fields(offsets)) {
+ field = index->instant_field_value(j - 1,
+ &len);
+ dfield_set_data(dfield, field, len);
+ continue;
+ }
+
+ field = rec_get_nth_field(rec, offsets, i, &len);
+ dfield_set_data(dfield, field, len);
+
+ if (rec_offs_nth_extern(offsets, i)) {
+ dfield_set_ext(dfield);
+ ++*n_ext;
+ }
}
}
+ if (mblob == 2) {
+ ulint n_fields = ulint(dfield - entry->fields);
+ ut_ad(entry->n_fields >= n_fields);
+ entry->n_fields = n_fields;
+ }
+ ut_ad(dfield == entry->fields + entry->n_fields);
ut_ad(dtuple_check_typed(entry));
- return(entry);
+ return entry;
}
/** Convert an index record to a data tuple.
@@ -772,25 +894,26 @@ row_rec_to_index_entry(
mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
{
- dtuple_t* entry;
- byte* buf;
- const rec_t* copy_rec;
-
ut_ad(rec != NULL);
ut_ad(heap != NULL);
ut_ad(index != NULL);
ut_ad(rec_offs_validate(rec, index, offsets));
/* Take a copy of rec to heap */
- buf = static_cast<byte*>(
- mem_heap_alloc(heap, rec_offs_size(offsets)));
-
- copy_rec = rec_copy(buf, rec, offsets);
+ const rec_t* copy_rec = rec_copy(
+ static_cast<byte*>(mem_heap_alloc(heap,
+ rec_offs_size(offsets))),
+ rec, offsets);
rec_offs_make_valid(copy_rec, index, true,
const_cast<ulint*>(offsets));
- entry = row_rec_to_index_entry_impl<true>(
- copy_rec, index, offsets, n_ext, heap);
+
+ dtuple_t* entry = rec_is_alter_metadata(copy_rec, *index)
+ ? row_rec_to_index_entry_impl<true,1>(
+ copy_rec, index, offsets, n_ext, heap)
+ : row_rec_to_index_entry_impl<true>(
+ copy_rec, index, offsets, n_ext, heap);
+
rec_offs_make_valid(rec, index, true,
const_cast<ulint*>(offsets));
@@ -800,6 +923,51 @@ row_rec_to_index_entry(
return(entry);
}
+/** Convert a metadata record to a data tuple.
+@param[in] rec metadata record
+@param[in] index clustered index after instant ALTER TABLE
+@param[in] offsets rec_get_offsets(rec)
+@param[out] n_ext number of externally stored fields
+@param[in,out] heap memory heap for allocations
+@param[in] info_bits the info_bits after an update
+@param[in] pad whether to pad to index->n_fields */
+dtuple_t*
+row_metadata_to_tuple(
+ const rec_t* rec,
+ const dict_index_t* index,
+ const ulint* offsets,
+ ulint* n_ext,
+ mem_heap_t* heap,
+ ulint info_bits,
+ bool pad)
+{
+ ut_ad(info_bits == REC_INFO_METADATA_ALTER
+ || info_bits == REC_INFO_METADATA_ADD);
+ ut_ad(rec_is_metadata(rec, *index));
+ ut_ad(rec_offs_validate(rec, index, offsets));
+
+ const rec_t* copy_rec = rec_copy(
+ static_cast<byte*>(mem_heap_alloc(heap,
+ rec_offs_size(offsets))),
+ rec, offsets);
+
+ rec_offs_make_valid(copy_rec, index, true,
+ const_cast<ulint*>(offsets));
+
+ dtuple_t* entry = info_bits == REC_INFO_METADATA_ALTER
+ || rec_is_alter_metadata(copy_rec, *index)
+ ? row_rec_to_index_entry_impl<true,2>(
+ copy_rec, index, offsets, n_ext, heap, info_bits, pad)
+ : row_rec_to_index_entry_impl<true>(
+ copy_rec, index, offsets, n_ext, heap);
+
+ rec_offs_make_valid(rec, index, true,
+ const_cast<ulint*>(offsets));
+
+ dtuple_set_info_bits(entry, info_bits);
+ return entry;
+}
+
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record.
@@ -1031,7 +1199,7 @@ row_search_on_row_ref(
index = dict_table_get_first_index(table);
if (UNIV_UNLIKELY(ref->info_bits != 0)) {
- ut_ad(ref->info_bits == REC_INFO_METADATA);
+ ut_ad(ref->is_metadata());
ut_ad(ref->n_fields <= index->n_uniq);
btr_pcur_open_at_index_side(true, index, mode, pcur, true, 0,
mtr);
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 30911e61ea3..20df64d599f 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -54,6 +54,9 @@ Created 12/19/1997 Heikki Tuuri
#include "buf0lru.h"
#include "srv0srv.h"
#include "srv0mon.h"
+#ifdef WITH_WSREP
+#include "mysql/service_wsrep.h" /* For wsrep_thd_skip_locking */
+#endif
/* Maximum number of rows to prefetch; MySQL interface has another parameter */
#define SEL_MAX_N_PREFETCH 16
@@ -124,7 +127,7 @@ row_sel_sec_rec_is_for_blob(
}
len = btr_copy_externally_stored_field_prefix(
- buf, prefix_len, page_size_t(table->space->flags),
+ buf, prefix_len, table->space->zip_size(),
clust_field, clust_len);
if (len == 0) {
@@ -305,8 +308,7 @@ row_sel_sec_rec_is_for_clust_rec(
if (rec_offs_nth_extern(clust_offs, clust_pos)) {
dptr = btr_copy_externally_stored_field(
&clust_len, dptr,
- page_size_t(clust_index->table->space
- ->flags),
+ clust_index->table->space->zip_size(),
len, heap);
}
@@ -529,7 +531,7 @@ row_sel_fetch_columns(
data = btr_rec_copy_externally_stored_field(
rec, offsets,
- dict_table_page_size(index->table),
+ index->table->space->zip_size(),
field_no, &len, heap);
/* data == NULL means that the
@@ -1132,7 +1134,7 @@ re_scan:
cur_block = buf_page_get_gen(
page_id_t(index->table->space_id, page_no),
- page_size_t(index->table->space->flags),
+ index->table->space->zip_size(),
RW_X_LATCH, NULL, BUF_GET,
__FILE__, __LINE__, mtr, &err);
} else {
@@ -1487,7 +1489,7 @@ row_sel_try_search_shortcut(
const rec_t* rec = btr_pcur_get_rec(&(plan->pcur));
- if (!page_rec_is_user_rec(rec) || rec_is_metadata(rec, index)) {
+ if (!page_rec_is_user_rec(rec) || rec_is_metadata(rec, *index)) {
retry:
rw_lock_s_unlock(ahi_latch);
return(SEL_RETRY);
@@ -1787,7 +1789,7 @@ skip_lock:
goto next_rec;
}
- if (rec_is_metadata(rec, index)) {
+ if (rec_is_metadata(rec, *index)) {
/* Skip the metadata pseudo-record. */
cost_counter++;
goto next_rec;
@@ -2693,44 +2695,6 @@ row_sel_convert_mysql_key_to_innobase(
}
/**************************************************************//**
-Stores the row id to the prebuilt struct. */
-static
-void
-row_sel_store_row_id_to_prebuilt(
-/*=============================*/
- row_prebuilt_t* prebuilt, /*!< in/out: prebuilt */
- const rec_t* index_rec, /*!< in: record */
- const dict_index_t* index, /*!< in: index of the record */
- const ulint* offsets) /*!< in: rec_get_offsets
- (index_rec, index) */
-{
- const byte* data;
- ulint len;
-
- ut_ad(rec_offs_validate(index_rec, index, offsets));
-
- data = rec_get_nth_field(
- index_rec, offsets,
- dict_index_get_sys_col_pos(index, DATA_ROW_ID), &len);
-
- if (UNIV_UNLIKELY(len != DATA_ROW_ID_LEN)) {
-
- ib::error() << "Row id field is wrong length " << len << " in"
- " index " << index->name
- << " of table " << index->table->name
- << ", Field number "
- << dict_index_get_sys_col_pos(index, DATA_ROW_ID)
- << ", record:";
-
- rec_print_new(stderr, index_rec, offsets);
- putc('\n', stderr);
- ut_error;
- }
-
- ut_memcpy(prebuilt->row_id, data, len);
-}
-
-/**************************************************************//**
Stores a non-SQL-NULL field in the MySQL format. The counterpart of this
function is row_mysql_store_col_in_innobase_format() in row0mysql.cc. */
void
@@ -2744,7 +2708,6 @@ row_sel_field_store_in_mysql_format_func(
const byte* data,
ulint len)
{
- byte* ptr;
#ifdef UNIV_DEBUG
const dict_field_t* field
= templ->is_virtual
@@ -2756,31 +2719,10 @@ row_sel_field_store_in_mysql_format_func(
UNIV_MEM_ASSERT_W(dest, templ->mysql_col_len);
UNIV_MEM_INVALID(dest, templ->mysql_col_len);
+ byte* pad = dest + len;
+
switch (templ->type) {
const byte* field_end;
- byte* pad;
- case DATA_INT:
- /* Convert integer data from Innobase to a little-endian
- format, sign bit restored to normal */
-
- ptr = dest + len;
-
- for (;;) {
- ptr--;
- *ptr = *data;
- if (ptr == dest) {
- break;
- }
- data++;
- }
-
- if (!templ->is_unsigned) {
- dest[len - 1] = (byte) (dest[len - 1] ^ 128);
- }
-
- ut_ad(templ->mysql_col_len == len);
- break;
-
case DATA_VARCHAR:
case DATA_VARMYSQL:
case DATA_BINARY:
@@ -2804,7 +2746,14 @@ row_sel_field_store_in_mysql_format_func(
/* Pad with trailing spaces. */
- pad = dest + len;
+ if (pad == field_end) {
+ break;
+ }
+
+ if (UNIV_UNLIKELY(templ->type == DATA_FIXBINARY)) {
+ memset(pad, 0, field_end - pad);
+ break;
+ }
ut_ad(templ->mbminlen <= templ->mbmaxlen);
@@ -2881,7 +2830,7 @@ row_sel_field_store_in_mysql_format_func(
done in row0mysql.cc, function
row_mysql_store_col_in_innobase_format(). */
- memset(dest + len, 0x20, templ->mysql_col_len - len);
+ memset(pad, 0x20, templ->mysql_col_len - len);
}
break;
@@ -2898,13 +2847,24 @@ row_sel_field_store_in_mysql_format_func(
case DATA_FLOAT:
case DATA_DOUBLE:
case DATA_DECIMAL:
- /* Above are the valid column types for MySQL data. */
#endif /* UNIV_DEBUG */
ut_ad((templ->is_virtual && !field)
|| (field && field->prefix_len
? field->prefix_len == len
: templ->mysql_col_len == len));
memcpy(dest, data, len);
+ break;
+
+ case DATA_INT:
+ /* Convert InnoDB big-endian integer to little-endian
+ format, sign bit restored to 2's complement form */
+ DBUG_ASSERT(templ->mysql_col_len == len);
+
+ byte* ptr = pad;
+ do *--ptr = *data++; while (ptr != dest);
+ if (!templ->is_unsigned) {
+ pad[-1] ^= 0x80;
+ }
}
}
@@ -2968,8 +2928,7 @@ row_sel_store_mysql_field(
causes an assert */
data = btr_rec_copy_externally_stored_field(
- rec, offsets,
- dict_table_page_size(prebuilt->table),
+ rec, offsets, prebuilt->table->space->zip_size(),
field_no, &len, heap);
if (UNIV_UNLIKELY(!data)) {
@@ -3098,9 +3057,6 @@ static bool row_sel_store_mysql_rec(
const mysql_row_templ_t*templ = &prebuilt->mysql_template[i];
if (templ->is_virtual && dict_index_is_clust(index)) {
- /* Virtual columns are never declared NOT NULL. */
- ut_ad(templ->mysql_null_bit_mask);
-
/* Skip virtual columns if it is not a covered
search or virtual key read is not requested. */
if (!rec_clust
@@ -3108,8 +3064,10 @@ static bool row_sel_store_mysql_rec(
|| (!prebuilt->read_just_key
&& !prebuilt->m_read_virtual_key)) {
/* Initialize the NULL bit. */
- mysql_rec[templ->mysql_null_byte_offset]
- |= (byte) templ->mysql_null_bit_mask;
+ if (templ->mysql_null_bit_mask) {
+ mysql_rec[templ->mysql_null_byte_offset]
+ |= (byte) templ->mysql_null_bit_mask;
+ }
continue;
}
@@ -3169,8 +3127,9 @@ static bool row_sel_store_mysql_rec(
= rec_clust
? templ->clust_rec_field_no
: templ->rec_field_no;
- /* We should never deliver column prefixes to MySQL,
- except for evaluating innobase_index_cond(). */
+ /* We should never deliver column prefixes to the SQL layer,
+ except for evaluating handler_index_cond_check()
+ or handler_rowid_filter_check(). */
/* ...actually, we do want to do this in order to
support the prefix query optimization.
@@ -3196,7 +3155,7 @@ static bool row_sel_store_mysql_rec(
if (dict_index_is_clust(index)
|| prebuilt->fts_doc_id_in_read_set) {
prebuilt->fts_doc_id = fts_get_doc_id_from_rec(
- prebuilt->table, rec, index, NULL);
+ rec, index, offsets);
}
}
@@ -3348,7 +3307,7 @@ row_sel_get_clust_rec_for_mysql(
and is it not unsafe to use RW_NO_LATCH here? */
buf_block_t* block = buf_page_get_gen(
btr_pcur_get_block(prebuilt->pcur)->page.id,
- dict_table_page_size(sec_index->table),
+ btr_pcur_get_block(prebuilt->pcur)->zip_size(),
RW_NO_LATCH, NULL, BUF_GET,
__FILE__, __LINE__, mtr, &err);
mem_heap_t* heap = mem_heap_create(256);
@@ -3546,7 +3505,7 @@ sel_restore_position_for_mysql(
next:
if (btr_pcur_move_to_next(pcur, mtr)
&& rec_is_metadata(btr_pcur_get_rec(pcur),
- pcur->btr_cur.index)) {
+ *pcur->btr_cur.index)) {
btr_pcur_move_to_next(pcur, mtr);
}
@@ -3562,7 +3521,7 @@ next:
prev:
if (btr_pcur_is_on_user_rec(pcur) && !moves_up
&& !rec_is_metadata(btr_pcur_get_rec(pcur),
- pcur->btr_cur.index)) {
+ *pcur->btr_cur.index)) {
btr_pcur_move_to_prev(pcur, mtr);
}
return true;
@@ -3798,7 +3757,7 @@ row_sel_enqueue_cache_row_for_mysql(
/* For non ICP code path the row should already exist in the
next fetch cache slot. */
- if (prebuilt->idx_cond != NULL) {
+ if (prebuilt->pk_filter || prebuilt->idx_cond) {
byte* dest = row_sel_fetch_last_buf(prebuilt);
ut_memcpy(dest, mysql_rec, prebuilt->mysql_row_len);
@@ -3839,7 +3798,7 @@ row_sel_try_search_shortcut_for_mysql(
BTR_SEARCH_LEAF, pcur, ahi_latch, mtr);
rec = btr_pcur_get_rec(pcur);
- if (!page_rec_is_user_rec(rec) || rec_is_metadata(rec, index)) {
+ if (!page_rec_is_user_rec(rec) || rec_is_metadata(rec, *index)) {
retry:
rw_lock_s_unlock(ahi_latch);
return(SEL_RETRY);
@@ -3896,17 +3855,18 @@ row_search_idx_cond_check(
const rec_t* rec, /*!< in: InnoDB record */
const ulint* offsets) /*!< in: rec_get_offsets() */
{
- ICP_RESULT result;
ulint i;
ut_ad(rec_offs_validate(rec, prebuilt->index, offsets));
if (!prebuilt->idx_cond) {
- return(ICP_MATCH);
+ if (!handler_rowid_filter_is_active(prebuilt->pk_filter)) {
+ return(ICP_MATCH);
+ }
+ } else {
+ MONITOR_INC(MONITOR_ICP_ATTEMPTS);
}
- MONITOR_INC(MONITOR_ICP_ATTEMPTS);
-
/* Convert to MySQL format those fields that are needed for
evaluating the index condition. */
@@ -3936,9 +3896,17 @@ row_search_idx_cond_check(
index, if the case of the column has been updated in
the past, or a record has been deleted and a record
inserted in a different case. */
- result = innobase_index_cond(prebuilt->idx_cond);
+ ICP_RESULT result = prebuilt->idx_cond
+ ? handler_index_cond_check(prebuilt->idx_cond)
+ : ICP_MATCH;
+
switch (result) {
case ICP_MATCH:
+ if (handler_rowid_filter_is_active(prebuilt->pk_filter)
+ && !handler_rowid_filter_check(prebuilt->pk_filter)) {
+ MONITOR_INC(MONITOR_ICP_MATCH);
+ return(ICP_NO_MATCH);
+ }
/* Convert the remaining fields to MySQL format.
If this is a secondary index record, we must defer
this until we have fetched the clustered index record. */
@@ -4391,7 +4359,7 @@ row_search_mvcc(
mtr.commit(). */
ut_ad(!rec_get_deleted_flag(rec, comp));
- if (prebuilt->idx_cond) {
+ if (prebuilt->pk_filter || prebuilt->idx_cond) {
switch (row_search_idx_cond_check(
buf, prebuilt,
rec, offsets)) {
@@ -4485,6 +4453,13 @@ row_search_mvcc(
set_also_gap_locks = FALSE;
}
+#ifdef WITH_WSREP
+ else if (wsrep_thd_skip_locking(trx->mysql_thd)) {
+ ut_ad(!strcmp(wsrep_get_sr_table_name(),
+ prebuilt->table->name.m_name));
+ set_also_gap_locks = FALSE;
+ }
+#endif /* WITH_WSREP */
/* Note that if the search mode was GE or G, then the cursor
naturally moves upward (in fetch next) in alphabetical order,
@@ -5323,7 +5298,7 @@ requires_clust_rec:
result_rec = clust_rec;
ut_ad(rec_offs_validate(result_rec, clust_index, offsets));
- if (prebuilt->idx_cond) {
+ if (prebuilt->pk_filter || prebuilt->idx_cond) {
/* Convert the record to MySQL format. We were
unable to do this in row_search_idx_cond_check(),
because the condition is on the secondary index
@@ -5384,8 +5359,7 @@ use_covering_index:
/* We only convert from InnoDB row format to MySQL row
format when ICP is disabled. */
- if (!prebuilt->idx_cond) {
-
+ if (!prebuilt->pk_filter && !prebuilt->idx_cond) {
/* We use next_buf to track the allocation of buffers
where we store and enqueue the buffers for our
pre-fetch optimisation.
@@ -5457,7 +5431,7 @@ use_covering_index:
rec_offs_size(offsets));
mach_write_to_4(buf,
rec_offs_extra_size(offsets) + 4);
- } else if (!prebuilt->idx_cond) {
+ } else if (!prebuilt->pk_filter && !prebuilt->idx_cond) {
/* The record was not yet converted to MySQL format. */
if (!row_sel_store_mysql_rec(
buf, prebuilt, result_rec, vrow,
@@ -5476,11 +5450,19 @@ use_covering_index:
}
}
- if (prebuilt->clust_index_was_generated) {
- row_sel_store_row_id_to_prebuilt(
- prebuilt, result_rec,
- result_rec == rec ? index : clust_index,
- offsets);
+ if (!prebuilt->clust_index_was_generated) {
+ } else if (result_rec != rec || index->is_primary()) {
+ memcpy(prebuilt->row_id, result_rec, DATA_ROW_ID_LEN);
+ } else {
+ ulint len;
+ const byte* data = rec_get_nth_field(
+ result_rec, offsets, index->n_fields - 1,
+ &len);
+ ut_ad(dict_index_get_nth_col(index,
+ index->n_fields - 1)
+ ->prtype == (DATA_ROW_ID | DATA_NOT_NULL));
+ ut_ad(len == DATA_ROW_ID_LEN);
+ memcpy(prebuilt->row_id, data, DATA_ROW_ID_LEN);
}
}
@@ -5691,8 +5673,7 @@ normal_return:
DEBUG_SYNC_C("row_search_for_mysql_before_return");
- if (prebuilt->idx_cond != 0) {
-
+ if (prebuilt->pk_filter || prebuilt->idx_cond) {
/* When ICP is active we don't write to the MySQL buffer
directly, only to buffers that are enqueued in the pre-fetch
queue. We need to dequeue the first buffer and copy the contents
diff --git a/storage/innobase/row/row0trunc.cc b/storage/innobase/row/row0trunc.cc
deleted file mode 100644
index dbad142f3f4..00000000000
--- a/storage/innobase/row/row0trunc.cc
+++ /dev/null
@@ -1,1961 +0,0 @@
-/*****************************************************************************
-
-Copyright (c) 2013, 2018, Oracle and/or its affiliates. All Rights Reserved.
-Copyright (c) 2017, 2018, MariaDB Corporation.
-
-This program is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free Software
-Foundation; version 2 of the License.
-
-This program is distributed in the hope that it will be useful, but WITHOUT
-ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License along with
-this program; if not, write to the Free Software Foundation, Inc.,
-51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
-
-*****************************************************************************/
-
-/**************************************************//**
-@file row/row0trunc.cc
-TRUNCATE implementation
-
-Created 2013-04-12 Sunny Bains
-*******************************************************/
-
-#include "row0trunc.h"
-#include "btr0sea.h"
-#include "pars0pars.h"
-#include "btr0pcur.h"
-#include "dict0crea.h"
-#include "dict0stats.h"
-#include "dict0stats_bg.h"
-#include "lock0lock.h"
-#include "fts0fts.h"
-#include "ibuf0ibuf.h"
-#include "os0file.h"
-#include "que0que.h"
-#include "trx0undo.h"
-
-/* FIXME: For temporary tables, use a simple approach of btr_free()
-and btr_create() of each index tree. */
-
-/* FIXME: For persistent tables, remove this code in MDEV-11655
-and use a combination of the transactional DDL log to make atomic the
-low-level operations ha_innobase::delete_table(), ha_innobase::create(). */
-
-bool truncate_t::s_fix_up_active = false;
-truncate_t::tables_t truncate_t::s_tables;
-truncate_t::truncated_tables_t truncate_t::s_truncated_tables;
-
-/**
-Iterator over the the raw records in an index, doesn't support MVCC. */
-class IndexIterator {
-
-public:
- /**
- Iterate over an indexes records
- @param index index to iterate over */
- explicit IndexIterator(dict_index_t* index)
- :
- m_index(index)
- {
- /* Do nothing */
- }
-
- /**
- Search for key. Position the cursor on a record GE key.
- @return DB_SUCCESS or error code. */
- dberr_t search(dtuple_t& key, bool noredo)
- {
- mtr_start(&m_mtr);
-
- if (noredo) {
- mtr_set_log_mode(&m_mtr, MTR_LOG_NO_REDO);
- }
-
- btr_pcur_open_on_user_rec(
- m_index,
- &key,
- PAGE_CUR_GE,
- BTR_MODIFY_LEAF,
- &m_pcur, &m_mtr);
-
- return(DB_SUCCESS);
- }
-
- /**
- Iterate over all the records
- @return DB_SUCCESS or error code */
- template <typename Callback>
- dberr_t for_each(Callback& callback)
- {
- dberr_t err = DB_SUCCESS;
-
- for (;;) {
-
- if (!btr_pcur_is_on_user_rec(&m_pcur)
- || !callback.match(&m_pcur)) {
-
- /* The end of of the index has been reached. */
- err = DB_END_OF_INDEX;
- break;
- }
-
- rec_t* rec = btr_pcur_get_rec(&m_pcur);
-
- if (!rec_get_deleted_flag(rec, FALSE)) {
-
- err = callback(&m_mtr, &m_pcur);
-
- if (err != DB_SUCCESS) {
- break;
- }
- }
-
- btr_pcur_move_to_next_user_rec(&m_pcur, &m_mtr);
- }
-
- btr_pcur_close(&m_pcur);
- mtr_commit(&m_mtr);
-
- return(err == DB_END_OF_INDEX ? DB_SUCCESS : err);
- }
-
-private:
- // Disable copying
- IndexIterator(const IndexIterator&);
- IndexIterator& operator=(const IndexIterator&);
-
-private:
- mtr_t m_mtr;
- btr_pcur_t m_pcur;
- dict_index_t* m_index;
-};
-
-/** SysIndex table iterator, iterate over records for a table. */
-class SysIndexIterator {
-
-public:
- /**
- Iterate over all the records that match the table id.
- @return DB_SUCCESS or error code */
- template <typename Callback>
- dberr_t for_each(Callback& callback) const
- {
- dict_index_t* sys_index;
- byte buf[DTUPLE_EST_ALLOC(1)];
- dtuple_t* tuple =
- dtuple_create_from_mem(buf, sizeof(buf), 1, 0);
- dfield_t* dfield = dtuple_get_nth_field(tuple, 0);
-
- dfield_set_data(
- dfield,
- callback.table_id(),
- sizeof(*callback.table_id()));
-
- sys_index = dict_table_get_first_index(dict_sys->sys_indexes);
-
- dict_index_copy_types(tuple, sys_index, 1);
-
- IndexIterator iterator(sys_index);
-
- /* Search on the table id and position the cursor
- on GE table_id. */
- iterator.search(*tuple, callback.get_logging_status());
-
- return(iterator.for_each(callback));
- }
-};
-
-/** Generic callback abstract class. */
-class Callback
-{
-
-public:
- /**
- Constructor
- @param table_id id of the table being operated.
- @param noredo if true turn off logging. */
- Callback(table_id_t table_id, bool noredo)
- :
- m_id(),
- m_noredo(noredo)
- {
- /* Convert to storage byte order. */
- mach_write_to_8(&m_id, table_id);
- }
-
- /**
- Destructor */
- virtual ~Callback()
- {
- /* Do nothing */
- }
-
- /**
- @param pcur persistent cursor used for iteration
- @return true if the table id column matches. */
- bool match(btr_pcur_t* pcur) const
- {
- ulint len;
- const byte* field;
- rec_t* rec = btr_pcur_get_rec(pcur);
-
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_INDEXES__TABLE_ID, &len);
-
- ut_ad(len == 8);
-
- return(memcmp(&m_id, field, len) == 0);
- }
-
- /**
- @return pointer to table id storage format buffer */
- const table_id_t* table_id() const
- {
- return(&m_id);
- }
-
- /**
- @return return if logging needs to be turned off. */
- bool get_logging_status() const
- {
- return(m_noredo);
- }
-
-protected:
- // Disably copying
- Callback(const Callback&);
- Callback& operator=(const Callback&);
-
-protected:
- /** Table id in storage format */
- table_id_t m_id;
-
- /** Turn off logging. */
- const bool m_noredo;
-};
-
-/**
-Scan to find out truncate log file from the given directory path.
-
-@param dir_path look for log directory in following path.
-@param log_files cache to hold truncate log file name found.
-@return DB_SUCCESS or error code. */
-dberr_t
-TruncateLogParser::scan(
- const char* dir_path,
- trunc_log_files_t& log_files)
-{
- os_file_dir_t dir;
- os_file_stat_t fileinfo;
- dberr_t err = DB_SUCCESS;
- const ulint dir_len = strlen(dir_path);
-
- /* Scan and look out for the truncate log files. */
- dir = os_file_opendir(dir_path, true);
- if (dir == NULL) {
- return(DB_IO_ERROR);
- }
-
- while (fil_file_readdir_next_file(
- &err, dir_path, dir, &fileinfo) == 0) {
-
- const size_t nm_len = strlen(fileinfo.name);
-
- if (fileinfo.type == OS_FILE_TYPE_FILE
- && nm_len > sizeof "ib_trunc.log"
- && (0 == strncmp(fileinfo.name + nm_len
- - ((sizeof "trunc.log") - 1),
- "trunc.log", (sizeof "trunc.log") - 1))
- && (0 == strncmp(fileinfo.name, "ib_", 3))) {
-
- if (fileinfo.size == 0) {
- /* Truncate log not written. Remove the file. */
- os_file_delete(
- innodb_log_file_key, fileinfo.name);
- continue;
- }
-
- /* Construct file name by appending directory path */
- ulint sz = dir_len + 22 + 22 + sizeof "ib_trunc.log";
- char* log_file_name = UT_NEW_ARRAY_NOKEY(char, sz);
- if (log_file_name == NULL) {
- err = DB_OUT_OF_MEMORY;
- break;
- }
-
- memcpy(log_file_name, dir_path, dir_len);
- char* e = log_file_name + dir_len;
- if (e[-1] != OS_PATH_SEPARATOR) {
- *e++ = OS_PATH_SEPARATOR;
- }
- strcpy(e, fileinfo.name);
- log_files.push_back(log_file_name);
- }
- }
-
- os_file_closedir(dir);
-
- return(err);
-}
-
-/**
-Parse the log file and populate table to truncate information.
-(Add this table to truncate information to central vector that is then
- used by truncate fix-up routine to fix-up truncate action of the table.)
-
-@param log_file_name log file to parse
-@return DB_SUCCESS or error code. */
-dberr_t
-TruncateLogParser::parse(
- const char* log_file_name)
-{
- dberr_t err = DB_SUCCESS;
- truncate_t* truncate = NULL;
-
- /* Open the file and read magic-number to findout if truncate action
- was completed. */
- bool ret;
- os_file_t handle = os_file_create_simple(
- innodb_log_file_key, log_file_name,
- OS_FILE_OPEN, OS_FILE_READ_ONLY, srv_read_only_mode, &ret);
- if (!ret) {
- ib::error() << "Error opening truncate log file: "
- << log_file_name;
- return(DB_IO_ERROR);
- }
-
- ulint sz = srv_page_size;
- void* buf = ut_zalloc_nokey(sz + srv_page_size);
- if (buf == 0) {
- os_file_close(handle);
- return(DB_OUT_OF_MEMORY);
- }
-
- IORequest request(IORequest::READ);
-
- /* Align the memory for file i/o if we might have O_DIRECT set*/
- byte* log_buf = static_cast<byte*>(ut_align(buf, srv_page_size));
-
- do {
- err = os_file_read(request, handle, log_buf, 0, sz);
-
- if (err != DB_SUCCESS) {
- os_file_close(handle);
- break;
- }
-
- if (mach_read_from_4(log_buf) == 32743712) {
-
- /* Truncate action completed. Avoid parsing the file. */
- os_file_close(handle);
-
- os_file_delete(innodb_log_file_key, log_file_name);
- break;
- }
-
- if (truncate == NULL) {
- truncate = UT_NEW_NOKEY(truncate_t(log_file_name));
- if (truncate == NULL) {
- os_file_close(handle);
- err = DB_OUT_OF_MEMORY;
- break;
- }
- }
-
- err = truncate->parse(log_buf + 4, log_buf + sz - 4);
-
- if (err != DB_SUCCESS) {
-
- ut_ad(err == DB_FAIL);
-
- ut_free(buf);
- buf = 0;
-
- sz *= 2;
-
- buf = ut_zalloc_nokey(sz + srv_page_size);
-
- if (buf == 0) {
- os_file_close(handle);
- err = DB_OUT_OF_MEMORY;
- UT_DELETE(truncate);
- truncate = NULL;
- break;
- }
-
- log_buf = static_cast<byte*>(
- ut_align(buf, srv_page_size));
- }
- } while (err != DB_SUCCESS);
-
- ut_free(buf);
-
- if (err == DB_SUCCESS && truncate != NULL) {
- truncate_t::add(truncate);
- os_file_close(handle);
- }
-
- return(err);
-}
-
-/**
-Scan and Parse truncate log files.
-
-@param dir_path look for log directory in following path
-@return DB_SUCCESS or error code. */
-dberr_t
-TruncateLogParser::scan_and_parse(
- const char* dir_path)
-{
- dberr_t err;
- trunc_log_files_t log_files;
-
- /* Scan and trace all the truncate log files. */
- err = TruncateLogParser::scan(dir_path, log_files);
-
- /* Parse truncate lof files if scan was successful. */
- if (err == DB_SUCCESS) {
-
- for (ulint i = 0;
- i < log_files.size() && err == DB_SUCCESS;
- i++) {
- err = TruncateLogParser::parse(log_files[i]);
- }
- }
-
- trunc_log_files_t::const_iterator end = log_files.end();
- for (trunc_log_files_t::const_iterator it = log_files.begin();
- it != end;
- ++it) {
- if (*it != NULL) {
- UT_DELETE_ARRAY(*it);
- }
- }
- log_files.clear();
-
- return(err);
-}
-
-/** Callback to drop indexes during TRUNCATE */
-class DropIndex : public Callback {
-
-public:
- /**
- Constructor
-
- @param[in,out] table Table to truncate
- @param[in] noredo whether to disable redo logging */
- DropIndex(dict_table_t* table, bool noredo)
- :
- Callback(table->id, noredo),
- m_table(table)
- {
- /* No op */
- }
-
- /**
- @param mtr mini-transaction covering the read
- @param pcur persistent cursor used for reading
- @return DB_SUCCESS or error code */
- dberr_t operator()(mtr_t* mtr, btr_pcur_t* pcur) const;
-
-private:
- /** Table to be truncated */
- dict_table_t* m_table;
-};
-
-/** Callback to create the indexes during TRUNCATE */
-class CreateIndex : public Callback {
-
-public:
- /**
- Constructor
-
- @param[in,out] table Table to truncate
- @param[in] noredo whether to disable redo logging */
- CreateIndex(dict_table_t* table, bool noredo)
- :
- Callback(table->id, noredo),
- m_table(table)
- {
- /* No op */
- }
-
- /**
- Create the new index and update the root page number in the
- SysIndex table.
-
- @param mtr mini-transaction covering the read
- @param pcur persistent cursor used for reading
- @return DB_SUCCESS or error code */
- dberr_t operator()(mtr_t* mtr, btr_pcur_t* pcur) const;
-
-private:
- // Disably copying
- CreateIndex(const CreateIndex&);
- CreateIndex& operator=(const CreateIndex&);
-
-private:
- /** Table to be truncated */
- dict_table_t* m_table;
-};
-
-/** Check for presence of table-id in SYS_XXXX tables. */
-class TableLocator : public Callback {
-
-public:
- /**
- Constructor
- @param table_id table_id to look for */
- explicit TableLocator(table_id_t table_id)
- :
- Callback(table_id, false),
- m_table_found()
- {
- /* No op */
- }
-
- /**
- @return true if table is found */
- bool is_table_found() const
- {
- return(m_table_found);
- }
-
- /**
- Look for table-id in SYS_XXXX tables without loading the table.
-
- @param pcur persistent cursor used for reading
- @return DB_SUCCESS */
- dberr_t operator()(mtr_t*, btr_pcur_t*)
- {
- m_table_found = true;
- return(DB_SUCCESS);
- }
-
-private:
- /** Set to true if table is present */
- bool m_table_found;
-};
-
-/**
-Drop an index in the table.
-
-@param mtr mini-transaction covering the read
-@param pcur persistent cursor used for reading
-@return DB_SUCCESS or error code */
-dberr_t
-DropIndex::operator()(mtr_t* mtr, btr_pcur_t* pcur) const
-{
- rec_t* rec = btr_pcur_get_rec(pcur);
-
- bool freed = dict_drop_index_tree(rec, pcur, mtr);
-
-#ifdef UNIV_DEBUG
- {
- ulint len;
- const byte* field;
- ulint index_type;
-
- field = rec_get_nth_field_old(
- btr_pcur_get_rec(pcur), DICT_FLD__SYS_INDEXES__TYPE,
- &len);
- ut_ad(len == 4);
-
- index_type = mach_read_from_4(field);
-
- if (index_type & DICT_CLUSTERED) {
- /* Clustered index */
- DBUG_EXECUTE_IF("ib_trunc_crash_on_drop_of_clust_index",
- log_buffer_flush_to_disk();
- os_thread_sleep(2000000);
- DBUG_SUICIDE(););
- } else if (index_type & DICT_UNIQUE) {
- /* Unique index */
- DBUG_EXECUTE_IF("ib_trunc_crash_on_drop_of_uniq_index",
- log_buffer_flush_to_disk();
- os_thread_sleep(2000000);
- DBUG_SUICIDE(););
- } else if (index_type == 0) {
- /* Secondary index */
- DBUG_EXECUTE_IF("ib_trunc_crash_on_drop_of_sec_index",
- log_buffer_flush_to_disk();
- os_thread_sleep(2000000);
- DBUG_SUICIDE(););
- }
- }
-#endif /* UNIV_DEBUG */
-
- DBUG_EXECUTE_IF("ib_err_trunc_drop_index", return DB_ERROR;);
-
- if (freed) {
-
- /* We will need to commit and restart the
- mini-transaction in order to avoid deadlocks.
- The dict_drop_index_tree() call has freed
- a page in this mini-transaction, and the rest
- of this loop could latch another index page.*/
- const mtr_log_t log_mode = mtr->get_log_mode();
- mtr_commit(mtr);
-
- mtr_start(mtr);
- mtr->set_log_mode(log_mode);
-
- btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr);
- } else {
- if (!m_table->space) {
- return DB_ERROR;
- }
- }
-
- return(DB_SUCCESS);
-}
-
-/**
-Create the new index and update the root page number in the
-SysIndex table.
-
-@param mtr mini-transaction covering the read
-@param pcur persistent cursor used for reading
-@return DB_SUCCESS or error code */
-dberr_t
-CreateIndex::operator()(mtr_t* mtr, btr_pcur_t* pcur) const
-{
- ulint root_page_no;
-
- root_page_no = dict_recreate_index_tree(m_table, pcur, mtr);
-
-#ifdef UNIV_DEBUG
- {
- ulint len;
- const byte* field;
- ulint index_type;
-
- field = rec_get_nth_field_old(
- btr_pcur_get_rec(pcur), DICT_FLD__SYS_INDEXES__TYPE,
- &len);
- ut_ad(len == 4);
-
- index_type = mach_read_from_4(field);
-
- if (index_type & DICT_CLUSTERED) {
- /* Clustered index */
- DBUG_EXECUTE_IF(
- "ib_trunc_crash_on_create_of_clust_index",
- log_buffer_flush_to_disk();
- os_thread_sleep(2000000);
- DBUG_SUICIDE(););
- } else if (index_type & DICT_UNIQUE) {
- /* Unique index */
- DBUG_EXECUTE_IF(
- "ib_trunc_crash_on_create_of_uniq_index",
- log_buffer_flush_to_disk();
- os_thread_sleep(2000000);
- DBUG_SUICIDE(););
- } else if (index_type == 0) {
- /* Secondary index */
- DBUG_EXECUTE_IF(
- "ib_trunc_crash_on_create_of_sec_index",
- log_buffer_flush_to_disk();
- os_thread_sleep(2000000);
- DBUG_SUICIDE(););
- }
- }
-#endif /* UNIV_DEBUG */
-
- DBUG_EXECUTE_IF("ib_err_trunc_create_index", return DB_ERROR;);
-
- if (root_page_no != FIL_NULL) {
-
- rec_t* rec = btr_pcur_get_rec(pcur);
-
- page_rec_write_field(
- rec, DICT_FLD__SYS_INDEXES__PAGE_NO,
- root_page_no, mtr);
-
- /* We will need to commit and restart the
- mini-transaction in order to avoid deadlocks.
- The dict_create_index_tree() call has allocated
- a page in this mini-transaction, and the rest of
- this loop could latch another index page. */
- mtr_commit(mtr);
-
- mtr_start(mtr);
-
- btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr);
-
- } else {
- if (!m_table->space) {
- return(DB_ERROR);
- }
- }
-
- return(DB_SUCCESS);
-}
-
-/**
-Update system table to reflect new table id.
-@param old_table_id old table id
-@param new_table_id new table id
-@param reserve_dict_mutex if TRUE, acquire/release
- dict_sys->mutex around call to pars_sql.
-@param trx transaction
-@return error code or DB_SUCCESS */
-static MY_ATTRIBUTE((warn_unused_result))
-dberr_t
-row_truncate_update_table_id(
- table_id_t old_table_id,
- table_id_t new_table_id,
- ibool reserve_dict_mutex,
- trx_t* trx)
-{
- pars_info_t* info = NULL;
- dberr_t err = DB_SUCCESS;
-
- /* Scan the SYS_XXXX table and update to reflect new table-id. */
- info = pars_info_create();
- pars_info_add_ull_literal(info, "old_id", old_table_id);
- pars_info_add_ull_literal(info, "new_id", new_table_id);
-
- err = que_eval_sql(
- info,
- "PROCEDURE RENUMBER_TABLE_ID_PROC () IS\n"
- "BEGIN\n"
- "UPDATE SYS_TABLES"
- " SET ID = :new_id\n"
- " WHERE ID = :old_id;\n"
- "UPDATE SYS_COLUMNS SET TABLE_ID = :new_id\n"
- " WHERE TABLE_ID = :old_id;\n"
- "UPDATE SYS_INDEXES"
- " SET TABLE_ID = :new_id\n"
- " WHERE TABLE_ID = :old_id;\n"
- "UPDATE SYS_VIRTUAL"
- " SET TABLE_ID = :new_id\n"
- " WHERE TABLE_ID = :old_id;\n"
- "END;\n", reserve_dict_mutex, trx);
-
- return(err);
-}
-
-/**
-Get the table id to truncate.
-@param truncate_t old/new table id of table to truncate
-@return table_id_t table_id to use in SYS_XXXX table update. */
-static MY_ATTRIBUTE((warn_unused_result))
-table_id_t
-row_truncate_get_trunc_table_id(
- const truncate_t& truncate)
-{
- TableLocator tableLocator(truncate.old_table_id());
-
- SysIndexIterator().for_each(tableLocator);
-
- return(tableLocator.is_table_found() ?
- truncate.old_table_id(): truncate.new_table_id());
-}
-
-/**
-Update system table to reflect new table id and root page number.
-@param truncate_t old/new table id of table to truncate
- and updated root_page_no of indexes.
-@param new_table_id new table id
-@param reserve_dict_mutex if TRUE, acquire/release
- dict_sys->mutex around call to pars_sql.
-@param mark_index_corrupted if true, then mark index corrupted.
-@return error code or DB_SUCCESS */
-static MY_ATTRIBUTE((warn_unused_result))
-dberr_t
-row_truncate_update_sys_tables_during_fix_up(
- const truncate_t& truncate,
- table_id_t new_table_id,
- ibool reserve_dict_mutex,
- bool mark_index_corrupted)
-{
- trx_t* trx = trx_create();
-
- trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
-
- table_id_t table_id = row_truncate_get_trunc_table_id(truncate);
-
- /* Step-1: Update the root-page-no */
-
- dberr_t err;
-
- err = truncate.update_root_page_no(
- trx, table_id, reserve_dict_mutex, mark_index_corrupted);
-
- if (err != DB_SUCCESS) {
- return(err);
- }
-
- /* Step-2: Update table-id. */
-
- err = row_truncate_update_table_id(
- table_id, new_table_id, reserve_dict_mutex, trx);
-
- if (err == DB_SUCCESS) {
- dict_mutex_enter_for_mysql();
-
- /* Remove the table with old table_id from cache. */
- dict_table_t* old_table = dict_table_open_on_id(
- table_id, true, DICT_TABLE_OP_NORMAL);
-
- if (old_table != NULL) {
- dict_table_close(old_table, true, false);
- dict_table_remove_from_cache(old_table);
- }
-
- /* Open table with new table_id and set table as
- corrupted if it has FTS index. */
-
- dict_table_t* table = dict_table_open_on_id(
- new_table_id, true, DICT_TABLE_OP_NORMAL);
- ut_ad(table->id == new_table_id);
-
- bool has_internal_doc_id =
- dict_table_has_fts_index(table)
- || DICT_TF2_FLAG_IS_SET(
- table, DICT_TF2_FTS_HAS_DOC_ID);
-
- if (has_internal_doc_id) {
- trx->dict_operation_lock_mode = RW_X_LATCH;
- fts_check_corrupt(table, trx);
- trx->dict_operation_lock_mode = 0;
- }
-
- dict_table_close(table, true, false);
- dict_mutex_exit_for_mysql();
- }
-
- trx_commit_for_mysql(trx);
- trx_free(trx);
-
- return(err);
-}
-
-/********************************************************//**
-Recreates table indexes by applying
-TRUNCATE log record during recovery.
-@return DB_SUCCESS or error code */
-static
-dberr_t
-fil_recreate_table(
-/*===============*/
- ulint format_flags, /*!< in: page format */
- const char* name, /*!< in: table name */
- truncate_t& truncate) /*!< in: The information of
- TRUNCATE log record */
-{
- ut_ad(!truncate_t::s_fix_up_active);
- truncate_t::s_fix_up_active = true;
-
- /* Step-1: Scan for active indexes from REDO logs and drop
- all the indexes using low level function that take root_page_no
- and space-id. */
- truncate.drop_indexes(fil_system.sys_space);
-
- /* Step-2: Scan for active indexes and re-create them. */
- dberr_t err = truncate.create_indexes(
- name, fil_system.sys_space, format_flags);
- if (err != DB_SUCCESS) {
- ib::info() << "Recovery failed for TRUNCATE TABLE '"
- << name << "' within the system tablespace";
- }
-
- truncate_t::s_fix_up_active = false;
-
- return(err);
-}
-
-/********************************************************//**
-Recreates the tablespace and table indexes by applying
-TRUNCATE log record during recovery.
-@return DB_SUCCESS or error code */
-static
-dberr_t
-fil_recreate_tablespace(
-/*====================*/
- ulint space_id, /*!< in: space id */
- ulint format_flags, /*!< in: page format */
- ulint flags, /*!< in: tablespace flags */
- const char* name, /*!< in: table name */
- truncate_t& truncate, /*!< in: The information of
- TRUNCATE log record */
- lsn_t recv_lsn) /*!< in: the end LSN of
- the log record */
-{
- dberr_t err = DB_SUCCESS;
- mtr_t mtr;
-
- ut_ad(!truncate_t::s_fix_up_active);
- truncate_t::s_fix_up_active = true;
-
- /* Step-1: Invalidate buffer pool pages belonging to the tablespace
- to re-create. */
- buf_LRU_flush_or_remove_pages(space_id, NULL);
-
- /* Remove all insert buffer entries for the tablespace */
- ibuf_delete_for_discarded_space(space_id);
-
- /* Step-2: truncate tablespace (reset the size back to original or
- default size) of tablespace. */
- err = truncate.truncate(
- space_id, truncate.get_dir_path(), name, flags, true);
-
- if (err != DB_SUCCESS) {
-
- ib::info() << "Cannot access .ibd file for table '"
- << name << "' with tablespace " << space_id
- << " while truncating";
- return(DB_ERROR);
- }
-
- fil_space_t* space = fil_space_acquire(space_id);
- if (!space) {
- ib::info() << "Missing .ibd file for table '" << name
- << "' with tablespace " << space_id;
- return(DB_ERROR);
- }
-
- const page_size_t page_size(space->flags);
-
- /* Step-3: Initialize Header. */
- if (page_size.is_compressed()) {
- byte* buf;
- page_t* page;
-
- buf = static_cast<byte*>(
- ut_zalloc_nokey(3U << srv_page_size_shift));
-
- /* Align the memory for file i/o */
- page = static_cast<byte*>(ut_align(buf, srv_page_size));
-
- flags |= FSP_FLAGS_PAGE_SSIZE();
-
- fsp_header_init_fields(page, space_id, flags);
-
- mach_write_to_4(
- page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space_id);
-
- page_zip_des_t page_zip;
- page_zip_set_size(&page_zip, page_size.physical());
- page_zip.data = page + srv_page_size;
-
-#ifdef UNIV_DEBUG
- page_zip.m_start =
-#endif /* UNIV_DEBUG */
- page_zip.m_end = page_zip.m_nonempty = page_zip.n_blobs = 0;
- buf_flush_init_for_writing(NULL, page, &page_zip, 0);
-
- err = fil_io(IORequestWrite, true, page_id_t(space_id, 0),
- page_size, 0, page_size.physical(), page_zip.data,
- NULL);
-
- ut_free(buf);
-
- if (err != DB_SUCCESS) {
- ib::info() << "Failed to clean header of the"
- " table '" << name << "' with tablespace "
- << space_id;
- goto func_exit;
- }
- }
-
- mtr_start(&mtr);
- /* Don't log the operation while fixing up table truncate operation
- as crash at this level can still be sustained with recovery restarting
- from last checkpoint. */
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
-
- /* Initialize the first extent descriptor page and
- the second bitmap page for the new tablespace. */
- fsp_header_init(space, FIL_IBD_FILE_INITIAL_SIZE, &mtr);
- mtr_commit(&mtr);
-
- /* Step-4: Re-Create Indexes to newly re-created tablespace.
- This operation will restore tablespace back to what it was
- when it was created during CREATE TABLE. */
- err = truncate.create_indexes(name, space, format_flags);
- if (err != DB_SUCCESS) {
- goto func_exit;
- }
-
- /* Step-5: Write new created pages into ibd file handle and
- flush it to disk for the tablespace, in case i/o-handler thread
- deletes the bitmap page from buffer. */
- mtr_start(&mtr);
-
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
-
- for (ulint page_no = 0;
- page_no < UT_LIST_GET_FIRST(space->chain)->size; ++page_no) {
-
- const page_id_t cur_page_id(space_id, page_no);
-
- buf_block_t* block = buf_page_get(cur_page_id, page_size,
- RW_X_LATCH, &mtr);
-
- byte* page = buf_block_get_frame(block);
-
- if (!FSP_FLAGS_GET_ZIP_SSIZE(flags)) {
- ut_ad(!page_size.is_compressed());
-
- buf_flush_init_for_writing(
- block, page, NULL, recv_lsn);
-
- err = fil_io(IORequestWrite, true, cur_page_id,
- page_size, 0, srv_page_size, page, NULL);
- } else {
- ut_ad(page_size.is_compressed());
-
- /* We don't want to rewrite empty pages. */
-
- if (fil_page_get_type(page) != 0) {
- page_zip_des_t* page_zip =
- buf_block_get_page_zip(block);
-
- buf_flush_init_for_writing(
- block, page, page_zip, recv_lsn);
-
- err = fil_io(IORequestWrite, true,
- cur_page_id,
- page_size, 0,
- page_size.physical(),
- page_zip->data, NULL);
- } else {
-#ifdef UNIV_DEBUG
- const byte* data = block->page.zip.data;
-
- /* Make sure that the page is really empty */
- for (ulint i = 0;
- i < page_size.physical();
- ++i) {
-
- ut_a(data[i] == 0);
- }
-#endif /* UNIV_DEBUG */
- }
- }
-
- if (err != DB_SUCCESS) {
- ib::info() << "Cannot write page " << page_no
- << " into a .ibd file for table '"
- << name << "' with tablespace " << space_id;
- }
- }
-
- mtr_commit(&mtr);
-
- truncate_t::s_fix_up_active = false;
-func_exit:
- space->release();
- return(err);
-}
-
-/**
-Fix the table truncate by applying information parsed from TRUNCATE log.
-Fix-up includes re-creating table (drop and re-create indexes)
-@return error code or DB_SUCCESS */
-dberr_t
-truncate_t::fixup_tables_in_system_tablespace()
-{
- dberr_t err = DB_SUCCESS;
-
- /* Using the info cached during REDO log scan phase fix the
- table truncate. */
-
- for (tables_t::iterator it = s_tables.begin();
- it != s_tables.end();) {
-
- if ((*it)->m_space_id == TRX_SYS_SPACE) {
- /* Step-1: Drop and re-create indexes. */
- ib::info() << "Completing truncate for table with "
- "id (" << (*it)->m_old_table_id << ") "
- "residing in the system tablespace.";
-
- err = fil_recreate_table(
- (*it)->m_format_flags,
- (*it)->m_tablename,
- **it);
-
- /* Step-2: Update the SYS_XXXX tables to reflect
- this new table_id and root_page_no. */
- table_id_t new_id;
-
- dict_hdr_get_new_id(&new_id, NULL, NULL, NULL, true);
-
- err = row_truncate_update_sys_tables_during_fix_up(
- **it, new_id, TRUE,
- (err == DB_SUCCESS) ? false : true);
-
- if (err != DB_SUCCESS) {
- break;
- }
-
- os_file_delete(
- innodb_log_file_key, (*it)->m_log_file_name);
- UT_DELETE(*it);
- it = s_tables.erase(it);
- } else {
- ++it;
- }
- }
-
- /* Also clear the map used to track tablespace truncated. */
- s_truncated_tables.clear();
-
- return(err);
-}
-
-/**
-Fix the table truncate by applying information parsed from TRUNCATE log.
-Fix-up includes re-creating tablespace.
-@return error code or DB_SUCCESS */
-dberr_t
-truncate_t::fixup_tables_in_non_system_tablespace()
-{
- dberr_t err = DB_SUCCESS;
-
- /* Using the info cached during REDO log scan phase fix the
- table truncate. */
- tables_t::iterator end = s_tables.end();
-
- for (tables_t::iterator it = s_tables.begin(); it != end; ++it) {
-
- /* All tables in the system tablespace have already been
- done and erased from this list. */
- ut_a((*it)->m_space_id != TRX_SYS_SPACE);
-
- /* Drop tablespace, drop indexes and re-create indexes. */
-
- ib::info() << "Completing truncate for table with "
- "id (" << (*it)->m_old_table_id << ") "
- "residing in file-per-table tablespace with "
- "id (" << (*it)->m_space_id << ")";
-
- fil_space_t* space = fil_space_get((*it)->m_space_id);
-
- if (!space) {
- /* Create the database directory for name,
- if it does not exist yet */
- fil_create_directory_for_tablename(
- (*it)->m_tablename);
-
- space = fil_ibd_create((*it)->m_space_id,
- (*it)->m_tablename,
- (*it)->m_dir_path,
- (*it)->m_tablespace_flags,
- FIL_IBD_FILE_INITIAL_SIZE,
- (*it)->m_encryption,
- (*it)->m_key_id, &err);
- if (!space) {
- /* If checkpoint is not yet done
- and table is dropped and then we might
- still have REDO entries for this table
- which are INVALID. Ignore them. */
- ib::warn() << "Failed to create"
- " tablespace for "
- << (*it)->m_space_id
- << " space-id";
- err = DB_ERROR;
- break;
- }
- }
-
- err = fil_recreate_tablespace(
- (*it)->m_space_id,
- (*it)->m_format_flags,
- (*it)->m_tablespace_flags,
- (*it)->m_tablename,
- **it, log_get_lsn());
-
- /* Step-2: Update the SYS_XXXX tables to reflect new
- table-id and root_page_no. */
- table_id_t new_id;
-
- dict_hdr_get_new_id(&new_id, NULL, NULL, NULL, true);
-
- err = row_truncate_update_sys_tables_during_fix_up(
- **it, new_id, TRUE, (err == DB_SUCCESS) ? false : true);
-
- if (err != DB_SUCCESS) {
- break;
- }
- }
-
- if (err == DB_SUCCESS && s_tables.size() > 0) {
-
- log_make_checkpoint_at(LSN_MAX);
- }
-
- for (ulint i = 0; i < s_tables.size(); ++i) {
- os_file_delete(
- innodb_log_file_key, s_tables[i]->m_log_file_name);
- UT_DELETE(s_tables[i]);
- }
-
- s_tables.clear();
-
- return(err);
-}
-
-/**
-Constructor
-
-@param old_table_id old table id assigned to table before truncate
-@param new_table_id new table id that will be assigned to table
- after truncate
-@param dir_path directory path */
-
-truncate_t::truncate_t(
- table_id_t old_table_id,
- table_id_t new_table_id,
- const char* dir_path)
- :
- m_space_id(),
- m_old_table_id(old_table_id),
- m_new_table_id(new_table_id),
- m_dir_path(),
- m_tablename(),
- m_tablespace_flags(),
- m_format_flags(),
- m_indexes(),
- m_log_lsn(),
- m_log_file_name(),
- /* JAN: TODO: Encryption */
- m_encryption(FIL_ENCRYPTION_DEFAULT),
- m_key_id(FIL_DEFAULT_ENCRYPTION_KEY)
-{
- if (dir_path != NULL) {
- m_dir_path = mem_strdup(dir_path);
- }
-}
-
-/**
-Consturctor
-
-@param log_file_name parse the log file during recovery to populate
- information related to table to truncate */
-truncate_t::truncate_t(
- const char* log_file_name)
- :
- m_space_id(),
- m_old_table_id(),
- m_new_table_id(),
- m_dir_path(),
- m_tablename(),
- m_tablespace_flags(),
- m_format_flags(),
- m_indexes(),
- m_log_lsn(),
- m_log_file_name(),
- /* JAN: TODO: Encryption */
- m_encryption(FIL_ENCRYPTION_DEFAULT),
- m_key_id(FIL_DEFAULT_ENCRYPTION_KEY)
-
-{
- m_log_file_name = mem_strdup(log_file_name);
- if (m_log_file_name == NULL) {
- ib::fatal() << "Failed creating truncate_t; out of memory";
- }
-}
-
-/** Constructor */
-
-truncate_t::index_t::index_t()
- :
- m_id(),
- m_type(),
- m_root_page_no(FIL_NULL),
- m_new_root_page_no(FIL_NULL),
- m_n_fields(),
- m_trx_id_pos(ULINT_UNDEFINED),
- m_fields()
-{
- /* Do nothing */
-}
-
-/** Destructor */
-
-truncate_t::~truncate_t()
-{
- if (m_dir_path != NULL) {
- ut_free(m_dir_path);
- m_dir_path = NULL;
- }
-
- if (m_tablename != NULL) {
- ut_free(m_tablename);
- m_tablename = NULL;
- }
-
- if (m_log_file_name != NULL) {
- ut_free(m_log_file_name);
- m_log_file_name = NULL;
- }
-
- m_indexes.clear();
-}
-
-/**
-@return number of indexes parsed from the log record */
-
-size_t
-truncate_t::indexes() const
-{
- return(m_indexes.size());
-}
-
-/**
-Update root page number in SYS_XXXX tables.
-
-@param trx transaction object
-@param table_id table id for which information needs to
- be updated.
-@param reserve_dict_mutex if TRUE, acquire/release
- dict_sys->mutex around call to pars_sql.
-@param mark_index_corrupted if true, then mark index corrupted.
-@return DB_SUCCESS or error code */
-
-dberr_t
-truncate_t::update_root_page_no(
- trx_t* trx,
- table_id_t table_id,
- ibool reserve_dict_mutex,
- bool mark_index_corrupted) const
-{
- indexes_t::const_iterator end = m_indexes.end();
-
- dberr_t err = DB_SUCCESS;
-
- for (indexes_t::const_iterator it = m_indexes.begin();
- it != end;
- ++it) {
-
- pars_info_t* info = pars_info_create();
-
- pars_info_add_int4_literal(
- info, "page_no", it->m_new_root_page_no);
-
- pars_info_add_ull_literal(info, "table_id", table_id);
-
- pars_info_add_ull_literal(
- info, "index_id",
- (mark_index_corrupted ? IB_ID_MAX : it->m_id));
-
- err = que_eval_sql(
- info,
- "PROCEDURE RENUMBER_IDX_PAGE_NO_PROC () IS\n"
- "BEGIN\n"
- "UPDATE SYS_INDEXES"
- " SET PAGE_NO = :page_no\n"
- " WHERE TABLE_ID = :table_id"
- " AND ID = :index_id;\n"
- "END;\n", reserve_dict_mutex, trx);
-
- if (err != DB_SUCCESS) {
- break;
- }
- }
-
- return(err);
-}
-
-/**
-Check whether a tablespace was truncated during recovery
-@param space_id tablespace id to check
-@return true if the tablespace was truncated */
-
-bool
-truncate_t::is_tablespace_truncated(ulint space_id)
-{
- tables_t::iterator end = s_tables.end();
-
- for (tables_t::iterator it = s_tables.begin(); it != end; ++it) {
-
- if ((*it)->m_space_id == space_id) {
-
- return(true);
- }
- }
-
- return(false);
-}
-
-/** Was tablespace truncated (on crash before checkpoint).
-If the MLOG_TRUNCATE redo-record is still available then tablespace
-was truncated and checkpoint is yet to happen.
-@param[in] space_id tablespace id to check.
-@return true if tablespace is was truncated. */
-bool
-truncate_t::was_tablespace_truncated(ulint space_id)
-{
- return(s_truncated_tables.find(space_id) != s_truncated_tables.end());
-}
-
-/** Get the lsn associated with space.
-@param[in] space_id tablespace id to check.
-@return associated lsn. */
-lsn_t
-truncate_t::get_truncated_tablespace_init_lsn(ulint space_id)
-{
- ut_ad(was_tablespace_truncated(space_id));
-
- return(s_truncated_tables.find(space_id)->second);
-}
-
-/**
-Parses log record during recovery
-@param start_ptr buffer containing log body to parse
-@param end_ptr buffer end
-
-@return DB_SUCCESS or error code */
-
-dberr_t
-truncate_t::parse(
- byte* start_ptr,
- const byte* end_ptr)
-{
- /* Parse lsn, space-id, format-flags and tablespace-flags. */
- if (end_ptr < start_ptr + (8 + 4 + 4 + 4)) {
- return(DB_FAIL);
- }
-
- m_log_lsn = mach_read_from_8(start_ptr);
- start_ptr += 8;
-
- m_space_id = mach_read_from_4(start_ptr);
- start_ptr += 4;
-
- m_format_flags = mach_read_from_4(start_ptr);
- start_ptr += 4;
-
- m_tablespace_flags = mach_read_from_4(start_ptr);
- start_ptr += 4;
-
- /* Parse table-name. */
- if (end_ptr < start_ptr + (2)) {
- return(DB_FAIL);
- }
-
- ulint n_tablename_len = mach_read_from_2(start_ptr);
- start_ptr += 2;
-
- if (n_tablename_len > 0) {
- if (end_ptr < start_ptr + n_tablename_len) {
- return(DB_FAIL);
- }
- m_tablename = mem_strdup(reinterpret_cast<char*>(start_ptr));
- ut_ad(m_tablename[n_tablename_len - 1] == 0);
- start_ptr += n_tablename_len;
- }
-
-
- /* Parse and read old/new table-id, number of indexes */
- if (end_ptr < start_ptr + (8 + 8 + 2 + 2)) {
- return(DB_FAIL);
- }
-
- ut_ad(m_indexes.empty());
-
- m_old_table_id = mach_read_from_8(start_ptr);
- start_ptr += 8;
-
- m_new_table_id = mach_read_from_8(start_ptr);
- start_ptr += 8;
-
- ulint n_indexes = mach_read_from_2(start_ptr);
- start_ptr += 2;
-
- /* Parse the remote directory from TRUNCATE log record */
- {
- ulint n_tabledirpath_len = mach_read_from_2(start_ptr);
- start_ptr += 2;
-
- if (end_ptr < start_ptr + n_tabledirpath_len) {
- return(DB_FAIL);
- }
-
- if (n_tabledirpath_len > 0) {
-
- m_dir_path = mem_strdup(reinterpret_cast<char*>(start_ptr));
- ut_ad(m_dir_path[n_tabledirpath_len - 1] == 0);
- start_ptr += n_tabledirpath_len;
- }
- }
-
- /* Parse index ids and types from TRUNCATE log record */
- for (ulint i = 0; i < n_indexes; ++i) {
- index_t index;
-
- if (end_ptr < start_ptr + (8 + 4 + 4 + 4)) {
- return(DB_FAIL);
- }
-
- index.m_id = mach_read_from_8(start_ptr);
- start_ptr += 8;
-
- index.m_type = mach_read_from_4(start_ptr);
- start_ptr += 4;
-
- index.m_root_page_no = mach_read_from_4(start_ptr);
- start_ptr += 4;
-
- index.m_trx_id_pos = mach_read_from_4(start_ptr);
- start_ptr += 4;
-
- if (!(index.m_type & DICT_FTS)) {
- m_indexes.push_back(index);
- }
- }
-
- ut_ad(!m_indexes.empty());
-
- if (FSP_FLAGS_GET_ZIP_SSIZE(m_tablespace_flags)) {
-
- /* Parse the number of index fields from TRUNCATE log record */
- for (ulint i = 0; i < m_indexes.size(); ++i) {
-
- if (end_ptr < start_ptr + (2 + 2)) {
- return(DB_FAIL);
- }
-
- m_indexes[i].m_n_fields = mach_read_from_2(start_ptr);
- start_ptr += 2;
-
- ulint len = mach_read_from_2(start_ptr);
- start_ptr += 2;
-
- if (end_ptr < start_ptr + len) {
- return(DB_FAIL);
- }
-
- index_t& index = m_indexes[i];
-
- /* Should be NUL terminated. */
- ut_ad((start_ptr)[len - 1] == 0);
-
- index_t::fields_t::iterator end;
-
- end = index.m_fields.end();
-
- index.m_fields.insert(
- end, start_ptr, &(start_ptr)[len]);
-
- start_ptr += len;
- }
- }
-
- return(DB_SUCCESS);
-}
-
-/** Parse log record from REDO log file during recovery.
-@param[in,out] start_ptr buffer containing log body to parse
-@param[in] end_ptr buffer end
-@param[in] space_id tablespace identifier
-@return parsed upto or NULL. */
-byte*
-truncate_t::parse_redo_entry(
- byte* start_ptr,
- const byte* end_ptr,
- ulint space_id)
-{
- lsn_t lsn;
-
- /* Parse space-id, lsn */
- if (end_ptr < (start_ptr + 8)) {
- return(NULL);
- }
-
- lsn = mach_read_from_8(start_ptr);
- start_ptr += 8;
-
- /* Tablespace can't exist in both state.
- (scheduled-for-truncate, was-truncated). */
- if (!is_tablespace_truncated(space_id)) {
-
- truncated_tables_t::iterator it =
- s_truncated_tables.find(space_id);
-
- if (it == s_truncated_tables.end()) {
- s_truncated_tables.insert(
- std::pair<ulint, lsn_t>(space_id, lsn));
- } else {
- it->second = lsn;
- }
- }
-
- return(start_ptr);
-}
-
-/**
-Set the truncate log values for a compressed table.
-@param index index from which recreate infoormation needs to be extracted
-@return DB_SUCCESS or error code */
-
-dberr_t
-truncate_t::index_t::set(
- const dict_index_t* index)
-{
- /* Get trx-id column position (set only for clustered index) */
- if (dict_index_is_clust(index)) {
- m_trx_id_pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
- ut_ad(m_trx_id_pos > 0);
- ut_ad(m_trx_id_pos != ULINT_UNDEFINED);
- } else {
- m_trx_id_pos = 0;
- }
-
- /* Original logic set this field differently if page is not leaf.
- For truncate case this being first page to get created it is
- always a leaf page and so we don't need that condition here. */
- m_n_fields = dict_index_get_n_fields(index);
-
- /* See requirements of page_zip_fields_encode for size. */
- ulint encoded_buf_size = (m_n_fields + 1) * 2;
- byte* encoded_buf = UT_NEW_ARRAY_NOKEY(byte, encoded_buf_size);
-
- if (encoded_buf == NULL) {
- return(DB_OUT_OF_MEMORY);
- }
-
- ulint len = page_zip_fields_encode(
- m_n_fields, index, m_trx_id_pos, encoded_buf);
- ut_a(len <= encoded_buf_size);
-
- /* Append the encoded fields data. */
- m_fields.insert(m_fields.end(), &encoded_buf[0], &encoded_buf[len]);
-
- /* NUL terminate the encoded data */
- m_fields.push_back(0);
-
- UT_DELETE_ARRAY(encoded_buf);
-
- return(DB_SUCCESS);
-}
-
-/** Create an index for a table.
-@param[in] table_name table name, for which to create
-the index
-@param[in] space tablespace
-@param[in] page_size page size of the .ibd file
-@param[in] index_type type of index to truncate
-@param[in] index_id id of index to truncate
-@param[in] btr_redo_create_info control info for ::btr_create()
-@param[in,out] mtr mini-transaction covering the
-create index
-@return root page no or FIL_NULL on failure */
-inline ulint
-truncate_t::create_index(
- const char* table_name,
- fil_space_t* space,
- ulint index_type,
- index_id_t index_id,
- const btr_create_t& btr_redo_create_info,
- mtr_t* mtr) const
-{
- ulint root_page_no = btr_create(
- index_type, space, index_id,
- NULL, &btr_redo_create_info, mtr);
-
- if (root_page_no == FIL_NULL) {
-
- ib::info() << "innodb_force_recovery was set to "
- << srv_force_recovery << ". Continuing crash recovery"
- " even though we failed to create index " << index_id
- << " for compressed table '" << table_name << "' with"
- " file " << space->chain.start->name;
- }
-
- return(root_page_no);
-}
-
-/** Check if index has been modified since TRUNCATE log snapshot
-was recorded.
-@param[in] space tablespace
-@param[in] root_page_no index root page number
-@return true if modified else false */
-inline
-bool
-truncate_t::is_index_modified_since_logged(
- const fil_space_t* space,
- ulint root_page_no) const
-{
- dberr_t err;
- mtr_t mtr;
-
- mtr_start(&mtr);
-
- /* Root page could be in free state if truncate crashed after drop_index
- and page was not allocated for any other object. */
- buf_block_t* block= buf_page_get_gen(
- page_id_t(space->id, root_page_no), page_size_t(space->flags),
- RW_X_LATCH, NULL,
- BUF_GET_POSSIBLY_FREED, __FILE__, __LINE__, &mtr, &err);
- if (!block) return true;
-
- page_t* root = buf_block_get_frame(block);
-
-#ifdef UNIV_DEBUG
- /* If the root page has been freed as part of truncate drop_index action
- and not yet allocated for any object still the pagelsn > snapshot lsn */
- if (block->page.file_page_was_freed) {
- ut_ad(mach_read_from_8(root + FIL_PAGE_LSN) > m_log_lsn);
- }
-#endif /* UNIV_DEBUG */
-
- lsn_t page_lsn = mach_read_from_8(root + FIL_PAGE_LSN);
-
- mtr_commit(&mtr);
-
- if (page_lsn > m_log_lsn) {
- return(true);
- }
-
- return(false);
-}
-
-/** Drop indexes for a table.
-@param[in,out] space tablespace */
-void truncate_t::drop_indexes(fil_space_t* space) const
-{
- mtr_t mtr;
-
- indexes_t::const_iterator end = m_indexes.end();
- const page_size_t page_size(space->flags);
-
- for (indexes_t::const_iterator it = m_indexes.begin();
- it != end;
- ++it) {
-
- ulint root_page_no = it->m_root_page_no;
-
- if (is_index_modified_since_logged(space, root_page_no)) {
- /* Page has been modified since TRUNCATE log snapshot
- was recorded so not safe to drop the index. */
- continue;
- }
-
- mtr_start(&mtr);
-
- if (space->id != TRX_SYS_SPACE) {
- /* Do not log changes for single-table
- tablespaces, we are in recovery mode. */
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
- }
-
- if (root_page_no != FIL_NULL) {
- const page_id_t root_page_id(space->id, root_page_no);
-
- btr_free_if_exists(
- root_page_id, page_size, it->m_id, &mtr);
- }
-
- /* If tree is already freed then we might return immediately
- in which case we need to release the lock we have acquired
- on root_page. */
- mtr_commit(&mtr);
- }
-}
-
-
-/** Create the indexes for a table
-@param[in] table_name table name, for which to create the indexes
-@param[in,out] space tablespace
-@param[in] format_flags page format flags
-@return DB_SUCCESS or error code. */
-inline dberr_t
-truncate_t::create_indexes(
- const char* table_name,
- fil_space_t* space,
- ulint format_flags)
-{
- mtr_t mtr;
-
- mtr_start(&mtr);
-
- if (space->id != TRX_SYS_SPACE) {
- /* Do not log changes for single-table tablespaces, we
- are in recovery mode. */
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
- }
-
- /* Create all new index trees with table format, index ids, index
- types, number of index fields and index field information taken
- out from the TRUNCATE log record. */
-
- ulint root_page_no = FIL_NULL;
- indexes_t::iterator end = m_indexes.end();
- for (indexes_t::iterator it = m_indexes.begin();
- it != end;
- ++it) {
-
- btr_create_t btr_redo_create_info(
- FSP_FLAGS_GET_ZIP_SSIZE(space->flags)
- ? &it->m_fields[0] : NULL);
-
- btr_redo_create_info.format_flags = format_flags;
-
- if (FSP_FLAGS_GET_ZIP_SSIZE(space->flags)) {
-
- btr_redo_create_info.n_fields = it->m_n_fields;
- /* Skip the NUL appended field */
- btr_redo_create_info.field_len =
- it->m_fields.size() - 1;
- btr_redo_create_info.trx_id_pos = it->m_trx_id_pos;
- }
-
- root_page_no = create_index(
- table_name, space, it->m_type, it->m_id,
- btr_redo_create_info, &mtr);
-
- if (root_page_no == FIL_NULL) {
- break;
- }
-
- it->m_new_root_page_no = root_page_no;
- }
-
- mtr_commit(&mtr);
-
- return(root_page_no == FIL_NULL ? DB_ERROR : DB_SUCCESS);
-}
-
-/**
-Write a TRUNCATE log record for fixing up table if truncate crashes.
-@param start_ptr buffer to write log record
-@param end_ptr buffer end
-@param space_id space id
-@param tablename the table name in the usual databasename/tablename
- format of InnoDB
-@param flags tablespace flags
-@param format_flags page format
-@param lsn lsn while logging
-@return DB_SUCCESS or error code */
-
-dberr_t
-truncate_t::write(
- byte* start_ptr,
- byte* end_ptr,
- ulint space_id,
- const char* tablename,
- ulint flags,
- ulint format_flags,
- lsn_t lsn) const
-{
- if (end_ptr < start_ptr) {
- return(DB_FAIL);
- }
-
- /* LSN, Type, Space-ID, format-flag (also know as log_flag.
- Stored in page_no field), tablespace flags */
- if (end_ptr < (start_ptr + (8 + 4 + 4 + 4))) {
- return(DB_FAIL);
- }
-
- mach_write_to_8(start_ptr, lsn);
- start_ptr += 8;
-
- mach_write_to_4(start_ptr, space_id);
- start_ptr += 4;
-
- mach_write_to_4(start_ptr, format_flags);
- start_ptr += 4;
-
- mach_write_to_4(start_ptr, flags);
- start_ptr += 4;
-
- /* Name of the table. */
- /* Include the NUL in the log record. */
- ulint len = strlen(tablename) + 1;
- if (end_ptr < (start_ptr + (len + 2))) {
- return(DB_FAIL);
- }
-
- mach_write_to_2(start_ptr, len);
- start_ptr += 2;
-
- memcpy(start_ptr, tablename, len - 1);
- start_ptr += len;
-
- DBUG_EXECUTE_IF("ib_trunc_crash_while_writing_redo_log",
- DBUG_SUICIDE(););
-
- /* Old/New Table-ID, Number of Indexes and Tablespace dir-path-name. */
- /* Write the remote directory of the table into mtr log */
- len = m_dir_path != NULL ? strlen(m_dir_path) + 1 : 0;
- if (end_ptr < (start_ptr + (len + 8 + 8 + 2 + 2))) {
- return(DB_FAIL);
- }
-
- /* Write out old-table-id. */
- mach_write_to_8(start_ptr, m_old_table_id);
- start_ptr += 8;
-
- /* Write out new-table-id. */
- mach_write_to_8(start_ptr, m_new_table_id);
- start_ptr += 8;
-
- /* Write out the number of indexes. */
- mach_write_to_2(start_ptr, m_indexes.size());
- start_ptr += 2;
-
- /* Write the length (NUL included) of the .ibd path. */
- mach_write_to_2(start_ptr, len);
- start_ptr += 2;
-
- if (m_dir_path != NULL) {
- memcpy(start_ptr, m_dir_path, len - 1);
- start_ptr += len;
- }
-
- /* Indexes information (id, type) */
- /* Write index ids, type, root-page-no into mtr log */
- for (ulint i = 0; i < m_indexes.size(); ++i) {
-
- if (end_ptr < (start_ptr + (8 + 4 + 4 + 4))) {
- return(DB_FAIL);
- }
-
- mach_write_to_8(start_ptr, m_indexes[i].m_id);
- start_ptr += 8;
-
- mach_write_to_4(start_ptr, m_indexes[i].m_type);
- start_ptr += 4;
-
- mach_write_to_4(start_ptr, m_indexes[i].m_root_page_no);
- start_ptr += 4;
-
- mach_write_to_4(start_ptr, m_indexes[i].m_trx_id_pos);
- start_ptr += 4;
- }
-
- /* If tablespace compressed then field info of each index. */
- if (FSP_FLAGS_GET_ZIP_SSIZE(flags)) {
-
- for (ulint i = 0; i < m_indexes.size(); ++i) {
-
- ulint len = m_indexes[i].m_fields.size();
- if (end_ptr < (start_ptr + (len + 2 + 2))) {
- return(DB_FAIL);
- }
-
- mach_write_to_2(
- start_ptr, m_indexes[i].m_n_fields);
- start_ptr += 2;
-
- mach_write_to_2(start_ptr, len);
- start_ptr += 2;
-
- const byte* ptr = &m_indexes[i].m_fields[0];
- memcpy(start_ptr, ptr, len - 1);
- start_ptr += len;
- }
- }
-
- return(DB_SUCCESS);
-}
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index b2679c87dae..0fdb740db04 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -65,7 +65,6 @@ row_undo_ins_remove_clust_rec(
/*==========================*/
undo_node_t* node) /*!< in: undo node */
{
- btr_cur_t* btr_cur;
ibool success;
dberr_t err;
ulint n_tries = 0;
@@ -73,15 +72,27 @@ row_undo_ins_remove_clust_rec(
dict_index_t* index = node->pcur.btr_cur.index;
bool online;
- ut_ad(dict_index_is_clust(index));
+ ut_ad(index->is_primary());
ut_ad(node->trx->in_rollback);
mtr.start();
if (index->table->is_temporary()) {
ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
mtr.set_log_mode(MTR_LOG_NO_REDO);
+ ut_ad(!dict_index_is_online_ddl(index));
+ ut_ad(index->table->id >= DICT_HDR_FIRST_ID);
+ online = false;
} else {
index->set_modified(mtr);
+ online = dict_index_is_online_ddl(index);
+ if (online) {
+ ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
+ ut_ad(node->trx->dict_operation_lock_mode
+ != RW_X_LATCH);
+ ut_ad(node->table->id != DICT_INDEXES_ID);
+ ut_ad(node->table->id != DICT_COLUMNS_ID);
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ }
}
/* This is similar to row_undo_mod_clust(). The DDL thread may
@@ -90,104 +101,69 @@ row_undo_ins_remove_clust_rec(
purged. However, we can log the removal out of sync with the
B-tree modification. */
- online = dict_index_is_online_ddl(index);
- if (online) {
- ut_ad(node->trx->dict_operation_lock_mode
- != RW_X_LATCH);
- ut_ad(node->table->id != DICT_INDEXES_ID);
- mtr_s_lock(dict_index_get_lock(index), &mtr);
- }
-
success = btr_pcur_restore_position(
online
? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
- : BTR_MODIFY_LEAF, &node->pcur, &mtr);
+ : (node->rec_type == TRX_UNDO_INSERT_METADATA)
+ ? BTR_MODIFY_TREE : BTR_MODIFY_LEAF, &node->pcur, &mtr);
ut_a(success);
- btr_cur = btr_pcur_get_btr_cur(&node->pcur);
+ rec_t* rec = btr_pcur_get_rec(&node->pcur);
- ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur), btr_cur->index)
- == node->trx->id);
- ut_ad(!rec_get_deleted_flag(
- btr_cur_get_rec(btr_cur),
- dict_table_is_comp(btr_cur->index->table)));
+ ut_ad(rec_get_trx_id(rec, index) == node->trx->id);
+ ut_ad(!rec_get_deleted_flag(rec, index->table->not_redundant())
+ || rec_is_alter_metadata(rec, index->table->not_redundant()));
+ ut_ad(rec_is_metadata(rec, index->table->not_redundant())
+ == (node->rec_type == TRX_UNDO_INSERT_METADATA));
if (online && dict_index_is_online_ddl(index)) {
- const rec_t* rec = btr_cur_get_rec(btr_cur);
mem_heap_t* heap = NULL;
const ulint* offsets = rec_get_offsets(
rec, index, NULL, true, ULINT_UNDEFINED, &heap);
row_log_table_delete(rec, index, offsets, NULL);
mem_heap_free(heap);
- }
-
- switch (node->table->id) {
- case DICT_INDEXES_ID:
- ut_ad(!online);
- ut_ad(node->trx->dict_operation_lock_mode == RW_X_LATCH);
- ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
-
- dict_drop_index_tree(
- btr_pcur_get_rec(&node->pcur), &(node->pcur), &mtr);
-
- mtr.commit();
+ } else {
+ switch (node->table->id) {
+ case DICT_INDEXES_ID:
+ ut_ad(!online);
+ ut_ad(node->trx->dict_operation_lock_mode
+ == RW_X_LATCH);
+ ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
- mtr.start();
+ dict_drop_index_tree(rec, &node->pcur, &mtr);
+ mtr.commit();
- success = btr_pcur_restore_position(
- BTR_MODIFY_LEAF, &node->pcur, &mtr);
- ut_a(success);
- break;
- case DICT_COLUMNS_ID:
- /* This is rolling back an INSERT into SYS_COLUMNS.
- If it was part of an instant ADD COLUMN operation, we
- must modify the table definition. At this point, any
- corresponding operation to the metadata record will have
- been rolled back. */
- ut_ad(!online);
- ut_ad(node->trx->dict_operation_lock_mode == RW_X_LATCH);
- ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
- const rec_t* rec = btr_pcur_get_rec(&node->pcur);
- if (rec_get_n_fields_old(rec)
- != DICT_NUM_FIELDS__SYS_COLUMNS) {
- break;
- }
- ulint len;
- const byte* data = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_COLUMNS__TABLE_ID, &len);
- if (len != 8) {
- break;
- }
- const table_id_t table_id = mach_read_from_8(data);
- data = rec_get_nth_field_old(rec, DICT_FLD__SYS_COLUMNS__POS,
- &len);
- if (len != 4) {
- break;
- }
- const unsigned pos = mach_read_from_4(data);
- if (pos == 0 || pos >= (1U << 16)) {
- break;
- }
- dict_table_t* table = dict_table_open_on_id(
- table_id, true, DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
- if (!table) {
+ mtr.start();
+ success = btr_pcur_restore_position(
+ BTR_MODIFY_LEAF, &node->pcur, &mtr);
+ ut_a(success);
break;
+ case DICT_COLUMNS_ID:
+ /* This is rolling back an INSERT into SYS_COLUMNS.
+ If it was part of an instant ALTER TABLE operation, we
+ must evict the table definition, so that it can be
+ reloaded after the dictionary operation has been
+ completed. At this point, any corresponding operation
+ to the metadata record will have been rolled back. */
+ ut_ad(!online);
+ ut_ad(node->trx->dict_operation_lock_mode
+ == RW_X_LATCH);
+ ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
+ if (rec_get_n_fields_old(rec)
+ != DICT_NUM_FIELDS__SYS_COLUMNS) {
+ break;
+ }
+ ulint len;
+ const byte* data = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_COLUMNS__TABLE_ID, &len);
+ if (len != 8) {
+ break;
+ }
+ node->trx->evict_table(mach_read_from_8(data));
}
-
- dict_index_t* index = dict_table_get_first_index(table);
-
- if (index && index->is_instant()
- && DATA_N_SYS_COLS + 1 + pos == table->n_cols) {
- /* This is the rollback of an instant ADD COLUMN.
- Remove the column from the dictionary cache,
- but keep the system columns. */
- table->rollback_instant(pos);
- }
-
- dict_table_close(table, true, false);
}
- if (btr_cur_optimistic_delete(btr_cur, 0, &mtr)) {
+ if (btr_cur_optimistic_delete(&node->pcur.btr_cur, 0, &mtr)) {
err = DB_SUCCESS;
goto func_exit;
}
@@ -207,7 +183,8 @@ retry:
&node->pcur, &mtr);
ut_a(success);
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0, true, &mtr);
+ btr_cur_pessimistic_delete(&err, FALSE, &node->pcur.btr_cur, 0, true,
+ &mtr);
/* The delete operation may fail if we have little
file space left: TODO: easiest to crash the database
@@ -226,29 +203,34 @@ retry:
}
func_exit:
- btr_pcur_commit_specify_mtr(&node->pcur, &mtr);
if (err == DB_SUCCESS && node->rec_type == TRX_UNDO_INSERT_METADATA) {
/* When rolling back the very first instant ADD COLUMN
operation, reset the root page to the basic state. */
ut_ad(!index->table->is_temporary());
- mtr.start();
if (page_t* root = btr_root_get(index, &mtr)) {
byte* page_type = root + FIL_PAGE_TYPE;
ut_ad(mach_read_from_2(page_type)
== FIL_PAGE_TYPE_INSTANT
|| mach_read_from_2(page_type)
== FIL_PAGE_INDEX);
- index->set_modified(mtr);
mlog_write_ulint(page_type, FIL_PAGE_INDEX,
MLOG_2BYTES, &mtr);
byte* instant = PAGE_INSTANT + PAGE_HEADER + root;
mlog_write_ulint(instant,
page_ptr_get_direction(instant + 1),
MLOG_2BYTES, &mtr);
+ rec_t* infimum = page_get_infimum_rec(root);
+ rec_t* supremum = page_get_supremum_rec(root);
+ static const byte str[8 + 8] = "supremuminfimum";
+ if (memcmp(infimum, str + 8, 8)
+ || memcmp(supremum, str, 8)) {
+ mlog_write_string(infimum, str + 8, 8, &mtr);
+ mlog_write_string(supremum, str, 8, &mtr);
+ }
}
- mtr.commit();
}
+ btr_pcur_commit_specify_mtr(&node->pcur, &mtr);
return(err);
}
@@ -388,14 +370,10 @@ retry:
return(err);
}
-/***********************************************************//**
-Parses the row reference and other info in a fresh insert undo record. */
-static
-void
-row_undo_ins_parse_undo_rec(
-/*========================*/
- undo_node_t* node, /*!< in/out: row undo node */
- ibool dict_locked) /*!< in: TRUE if own dict_sys->mutex */
+/** Parse an insert undo record.
+@param[in,out] node row rollback state
+@param[in] dict_locked whether the data dictionary cache is locked */
+static bool row_undo_ins_parse_undo_rec(undo_node_t* node, bool dict_locked)
{
dict_index_t* clust_index;
byte* ptr;
@@ -404,18 +382,28 @@ row_undo_ins_parse_undo_rec(
ulint dummy;
bool dummy_extern;
- ut_ad(node);
+ ut_ad(node->state == UNDO_INSERT_PERSISTENT
+ || node->state == UNDO_INSERT_TEMPORARY);
+ ut_ad(node->trx->in_rollback);
+ ut_ad(trx_undo_roll_ptr_is_insert(node->roll_ptr));
ptr = trx_undo_rec_get_pars(node->undo_rec, &node->rec_type, &dummy,
&dummy_extern, &undo_no, &table_id);
node->update = NULL;
- node->table = dict_table_open_on_id(
- table_id, dict_locked, DICT_TABLE_OP_NORMAL);
+ if (node->state == UNDO_INSERT_PERSISTENT) {
+ node->table = dict_table_open_on_id(table_id, dict_locked,
+ DICT_TABLE_OP_NORMAL);
+ } else if (!dict_locked) {
+ mutex_enter(&dict_sys->mutex);
+ node->table = dict_sys->get_temporary_table(table_id);
+ mutex_exit(&dict_sys->mutex);
+ } else {
+ node->table = dict_sys->get_temporary_table(table_id);
+ }
- /* Skip the UNDO if we can't find the table or the .ibd file. */
- if (UNIV_UNLIKELY(node->table == NULL)) {
- return;
+ if (!node->table) {
+ return false;
}
switch (node->rec_type) {
@@ -454,6 +442,7 @@ close_table:
connection, instead of doing this rollback. */
dict_table_close(node->table, dict_locked, FALSE);
node->table = NULL;
+ return false;
} else {
ut_ad(!node->table->skip_alter_undo);
clust_index = dict_table_get_first_index(node->table);
@@ -485,6 +474,8 @@ close_table:
goto close_table;
}
}
+
+ return true;
}
/***************************************************************//**
@@ -561,18 +552,10 @@ row_undo_ins(
que_thr_t* thr) /*!< in: query thread */
{
dberr_t err;
- ibool dict_locked;
-
- ut_ad(node->state == UNDO_NODE_INSERT);
- ut_ad(node->trx->in_rollback);
- ut_ad(trx_undo_roll_ptr_is_insert(node->roll_ptr));
+ bool dict_locked = node->trx->dict_operation_lock_mode == RW_X_LATCH;
- dict_locked = node->trx->dict_operation_lock_mode == RW_X_LATCH;
-
- row_undo_ins_parse_undo_rec(node, dict_locked);
-
- if (node->table == NULL) {
- return(DB_SUCCESS);
+ if (!row_undo_ins_parse_undo_rec(node, dict_locked)) {
+ return DB_SUCCESS;
}
/* Iterate over all the indexes and undo the insert.*/
@@ -595,26 +578,19 @@ row_undo_ins(
break;
}
- /* fall through */
- case TRX_UNDO_INSERT_METADATA:
log_free_check();
if (node->table->id == DICT_INDEXES_ID) {
- ut_ad(node->rec_type == TRX_UNDO_INSERT_REC);
-
+ ut_ad(!node->table->is_temporary());
if (!dict_locked) {
mutex_enter(&dict_sys->mutex);
}
- }
-
- // FIXME: We need to update the dict_index_t::space and
- // page number fields too.
- err = row_undo_ins_remove_clust_rec(node);
-
- if (node->table->id == DICT_INDEXES_ID
- && !dict_locked) {
-
- mutex_exit(&dict_sys->mutex);
+ err = row_undo_ins_remove_clust_rec(node);
+ if (!dict_locked) {
+ mutex_exit(&dict_sys->mutex);
+ }
+ } else {
+ err = row_undo_ins_remove_clust_rec(node);
}
if (err == DB_SUCCESS && node->table->stat_initialized) {
@@ -634,6 +610,12 @@ row_undo_ins(
node->table, node->trx->mysql_thd);
}
}
+ break;
+
+ case TRX_UNDO_INSERT_METADATA:
+ log_free_check();
+ ut_ad(!node->table->is_temporary());
+ err = row_undo_ins_remove_clust_rec(node);
}
dict_table_close(node->table, dict_locked, FALSE);
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 41079450159..7ff64929080 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -111,6 +111,9 @@ row_undo_mod_clust_low(
ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur),
btr_cur_get_index(btr_cur))
== thr_get_trx(thr)->id);
+ ut_ad(node->ref != &trx_undo_metadata
+ || node->update->info_bits == REC_INFO_METADATA_ADD
+ || node->update->info_bits == REC_INFO_METADATA_ALTER);
if (mode != BTR_MODIFY_LEAF
&& dict_index_is_online_ddl(btr_cur_get_index(btr_cur))) {
@@ -131,6 +134,7 @@ row_undo_mod_clust_low(
btr_cur, offsets, offsets_heap,
node->update, node->cmpl_info,
thr, thr_get_trx(thr)->id, mtr);
+ ut_ad(err != DB_SUCCESS || node->ref != &trx_undo_metadata);
} else {
big_rec_t* dummy_big_rec;
@@ -143,6 +147,52 @@ row_undo_mod_clust_low(
node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
ut_a(!dummy_big_rec);
+
+ static const byte
+ INFIMUM[8] = {'i','n','f','i','m','u','m',0},
+ SUPREMUM[8] = {'s','u','p','r','e','m','u','m'};
+
+ if (err == DB_SUCCESS
+ && node->ref == &trx_undo_metadata
+ && btr_cur_get_index(btr_cur)->table->instant
+ && node->update->info_bits == REC_INFO_METADATA_ADD) {
+ if (page_t* root = btr_root_get(
+ btr_cur_get_index(btr_cur), mtr)) {
+ byte* infimum;
+ byte *supremum;
+ if (page_is_comp(root)) {
+ infimum = PAGE_NEW_INFIMUM + root;
+ supremum = PAGE_NEW_SUPREMUM + root;
+ } else {
+ infimum = PAGE_OLD_INFIMUM + root;
+ supremum = PAGE_OLD_SUPREMUM + root;
+ }
+
+ ut_ad(!memcmp(infimum, INFIMUM, 8)
+ == !memcmp(supremum, SUPREMUM, 8));
+
+ if (memcmp(infimum, INFIMUM, 8)) {
+ mlog_write_string(infimum, INFIMUM,
+ 8, mtr);
+ mlog_write_string(supremum, SUPREMUM,
+ 8, mtr);
+ }
+ }
+ }
+ }
+
+ if (err == DB_SUCCESS
+ && btr_cur_get_index(btr_cur)->table->id == DICT_COLUMNS_ID) {
+ /* This is rolling back an UPDATE or DELETE on SYS_COLUMNS.
+ If it was part of an instant ALTER TABLE operation, we
+ must evict the table definition, so that it can be
+ reloaded after the dictionary operation has been
+ completed. At this point, any corresponding operation
+ to the metadata record will have been rolled back. */
+ const dfield_t& table_id = *dtuple_get_nth_field(node->row, 0);
+ ut_ad(dfield_get_len(&table_id) == 8);
+ node->trx->evict_table(mach_read_from_8(static_cast<byte*>(
+ table_id.data)));
}
return(err);
@@ -399,22 +449,34 @@ row_undo_mod_clust(
goto mtr_commit_exit;
}
+ ulint trx_id_offset = index->trx_id_offset;
ulint trx_id_pos = index->n_uniq ? index->n_uniq : 1;
- ut_ad(index->n_uniq <= MAX_REF_PARTS);
- /* Reserve enough offsets for the PRIMARY KEY and 2 columns
- so that we can access DB_TRX_ID, DB_ROLL_PTR. */
+ /* Reserve enough offsets for the PRIMARY KEY and
+ 2 columns so that we can access DB_TRX_ID, DB_ROLL_PTR. */
ulint offsets_[REC_OFFS_HEADER_SIZE + MAX_REF_PARTS + 2];
- rec_offs_init(offsets_);
- offsets = rec_get_offsets(
- rec, index, offsets_, true, trx_id_pos + 2, &heap);
- ulint len;
- ulint trx_id_offset = rec_get_nth_field_offs(
- offsets, trx_id_pos, &len);
- ut_ad(len == DATA_TRX_ID_LEN);
+ if (trx_id_offset) {
+ } else if (rec_is_metadata(rec, *index)) {
+ ut_ad(!buf_block_get_page_zip(btr_pcur_get_block(
+ &node->pcur)));
+ for (unsigned i = index->first_user_field(); i--; ) {
+ trx_id_offset += index->fields[i].fixed_len;
+ }
+ } else {
+ ut_ad(index->n_uniq <= MAX_REF_PARTS);
+ rec_offs_init(offsets_);
+ offsets = rec_get_offsets(
+ rec, index, offsets_, true, trx_id_pos + 2,
+ &heap);
+ ulint len;
+ trx_id_offset = rec_get_nth_field_offs(
+ offsets, trx_id_pos, &len);
+ ut_ad(len == DATA_TRX_ID_LEN);
+ }
if (trx_read_trx_id(rec + trx_id_offset) == node->new_trx_id) {
ut_ad(!rec_get_deleted_flag(
- rec, dict_table_is_comp(node->table)));
+ rec, dict_table_is_comp(node->table))
+ || rec_is_alter_metadata(rec, *index));
index->set_modified(mtr);
if (page_zip_des_t* page_zip = buf_block_get_page_zip(
btr_pcur_get_block(&node->pcur))) {
@@ -436,8 +498,6 @@ mtr_commit_exit:
btr_pcur_commit_specify_mtr(pcur, &mtr);
func_exit:
- node->state = UNDO_NODE_FETCH_NEXT;
-
if (offsets_heap) {
mem_heap_free(offsets_heap);
}
@@ -1139,14 +1199,10 @@ row_undo_mod_upd_exist_sec(
return(err);
}
-/***********************************************************//**
-Parses the row reference and other info in a modify undo log record. */
-static MY_ATTRIBUTE((nonnull))
-void
-row_undo_mod_parse_undo_rec(
-/*========================*/
- undo_node_t* node, /*!< in: row undo node */
- ibool dict_locked) /*!< in: TRUE if own dict_sys->mutex */
+/** Parse an update undo record.
+@param[in,out] node row rollback state
+@param[in] dict_locked whether the data dictionary cache is locked */
+static bool row_undo_mod_parse_undo_rec(undo_node_t* node, bool dict_locked)
{
dict_index_t* clust_index;
byte* ptr;
@@ -1159,19 +1215,28 @@ row_undo_mod_parse_undo_rec(
ulint cmpl_info;
bool dummy_extern;
+ ut_ad(node->state == UNDO_UPDATE_PERSISTENT
+ || node->state == UNDO_UPDATE_TEMPORARY);
+ ut_ad(node->trx->in_rollback);
+ ut_ad(!trx_undo_roll_ptr_is_insert(node->roll_ptr));
+
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
&dummy_extern, &undo_no, &table_id);
node->rec_type = type;
- node->table = dict_table_open_on_id(
- table_id, dict_locked, DICT_TABLE_OP_NORMAL);
-
- /* TODO: other fixes associated with DROP TABLE + rollback in the
- same table by another user */
+ if (node->state == UNDO_UPDATE_PERSISTENT) {
+ node->table = dict_table_open_on_id(table_id, dict_locked,
+ DICT_TABLE_OP_NORMAL);
+ } else if (!dict_locked) {
+ mutex_enter(&dict_sys->mutex);
+ node->table = dict_sys->get_temporary_table(table_id);
+ mutex_exit(&dict_sys->mutex);
+ } else {
+ node->table = dict_sys->get_temporary_table(table_id);
+ }
- if (node->table == NULL) {
- /* Table was dropped */
- return;
+ if (!node->table) {
+ return false;
}
ut_ad(!node->table->skip_alter_undo);
@@ -1189,7 +1254,7 @@ close_table:
connection, instead of doing this rollback. */
dict_table_close(node->table, dict_locked, FALSE);
node->table = NULL;
- return;
+ return false;
}
clust_index = dict_table_get_first_index(node->table);
@@ -1208,16 +1273,21 @@ close_table:
ut_ad(!node->ref->info_bits);
if (node->update->info_bits & REC_INFO_MIN_REC_FLAG) {
- /* This must be an undo log record for a subsequent
- instant ALTER TABLE, extending the metadata record. */
- ut_ad(clust_index->is_instant());
- if (node->update->info_bits != REC_INFO_MIN_REC_FLAG) {
+ if ((node->update->info_bits & ~REC_INFO_DELETED_FLAG)
+ != REC_INFO_MIN_REC_FLAG) {
ut_ad(!"wrong info_bits in undo log record");
goto close_table;
}
- node->update->info_bits = REC_INFO_METADATA;
- const_cast<dtuple_t*>(node->ref)->info_bits
- = REC_INFO_METADATA;
+ /* This must be an undo log record for a subsequent
+ instant ALTER TABLE, extending the metadata record. */
+ ut_ad(clust_index->is_instant());
+ ut_ad(clust_index->table->instant
+ || !(node->update->info_bits & REC_INFO_DELETED_FLAG));
+ node->ref = &trx_undo_metadata;
+ node->update->info_bits = (node->update->info_bits
+ & REC_INFO_DELETED_FLAG)
+ ? REC_INFO_METADATA_ALTER
+ : REC_INFO_METADATA_ADD;
}
if (!row_undo_search_clust_to_pcur(node)) {
@@ -1255,6 +1325,8 @@ close_table:
(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)
? NULL : ptr);
}
+
+ return true;
}
/***********************************************************//**
@@ -1267,34 +1339,19 @@ row_undo_mod(
que_thr_t* thr) /*!< in: query thread */
{
dberr_t err;
- ibool dict_locked;
-
- ut_ad(node != NULL);
- ut_ad(thr != NULL);
- ut_ad(node->state == UNDO_NODE_MODIFY);
- ut_ad(node->trx->in_rollback);
- ut_ad(!trx_undo_roll_ptr_is_insert(node->roll_ptr));
-
- dict_locked = thr_get_trx(thr)->dict_operation_lock_mode == RW_X_LATCH;
-
ut_ad(thr_get_trx(thr) == node->trx);
+ const bool dict_locked = node->trx->dict_operation_lock_mode
+ == RW_X_LATCH;
- row_undo_mod_parse_undo_rec(node, dict_locked);
-
- if (node->table == NULL) {
- /* It is already undone, or will be undone by another query
- thread, or table was dropped */
-
- node->state = UNDO_NODE_FETCH_NEXT;
-
- return(DB_SUCCESS);
+ if (!row_undo_mod_parse_undo_rec(node, dict_locked)) {
+ return DB_SUCCESS;
}
node->index = dict_table_get_first_index(node->table);
ut_ad(dict_index_is_clust(node->index));
if (node->ref->info_bits) {
- ut_ad(node->ref->info_bits == REC_INFO_METADATA);
+ ut_ad(node->ref->is_metadata());
goto rollback_clust;
}
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index 7fe59807521..b560ec2a0da 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -218,7 +218,8 @@ row_undo_search_clust_to_pcur(
log, first mark them DATA_MISSING. So we will know if the
value gets updated */
if (node->table->n_v_cols
- && node->state != UNDO_NODE_INSERT
+ && (node->state == UNDO_UPDATE_PERSISTENT
+ || node->state == UNDO_UPDATE_TEMPORARY)
&& !(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
for (ulint i = 0;
i < dict_table_get_n_v_cols(node->table); i++) {
@@ -228,13 +229,15 @@ row_undo_search_clust_to_pcur(
}
if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
- ut_ad(node->row->info_bits == REC_INFO_MIN_REC_FLAG
+ ut_ad((node->row->info_bits & ~REC_INFO_DELETED_FLAG)
+ == REC_INFO_MIN_REC_FLAG
|| node->row->info_bits == 0);
node->undo_row = dtuple_copy(node->row, node->heap);
row_upd_replace(node->undo_row, &node->undo_ext,
clust_index, node->update, node->heap);
} else {
- ut_ad((node->row->info_bits == REC_INFO_MIN_REC_FLAG)
+ ut_ad(((node->row->info_bits & ~REC_INFO_DELETED_FLAG)
+ == REC_INFO_MIN_REC_FLAG)
== (node->rec_type == TRX_UNDO_INSERT_METADATA));
node->undo_row = NULL;
node->undo_ext = NULL;
@@ -252,6 +255,149 @@ func_exit:
return(found);
}
+/** Try to truncate the undo logs.
+@param[in,out] trx transaction */
+static void row_undo_try_truncate(trx_t* trx)
+{
+ if (trx_undo_t* undo = trx->rsegs.m_redo.undo) {
+ ut_ad(undo->rseg == trx->rsegs.m_redo.rseg);
+ trx_undo_truncate_end(*undo, trx->undo_no, false);
+ }
+
+ if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
+ ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
+ trx_undo_truncate_end(*undo, trx->undo_no, true);
+ }
+}
+
+/** Get the latest undo log record for rollback.
+@param[in,out] node rollback context
+@return whether an undo log record was fetched */
+static bool row_undo_rec_get(undo_node_t* node)
+{
+ trx_t* trx = node->trx;
+
+ if (trx->pages_undone) {
+ trx->pages_undone = 0;
+ row_undo_try_truncate(trx);
+ }
+
+ trx_undo_t* undo = NULL;
+ trx_undo_t* insert = trx->rsegs.m_redo.old_insert;
+ trx_undo_t* update = trx->rsegs.m_redo.undo;
+ trx_undo_t* temp = trx->rsegs.m_noredo.undo;
+ const undo_no_t limit = trx->roll_limit;
+
+ ut_ad(!insert || !update || insert->empty() || update->empty()
+ || insert->top_undo_no != update->top_undo_no);
+ ut_ad(!insert || !temp || insert->empty() || temp->empty()
+ || insert->top_undo_no != temp->top_undo_no);
+ ut_ad(!update || !temp || update->empty() || temp->empty()
+ || update->top_undo_no != temp->top_undo_no);
+
+ if (UNIV_LIKELY_NULL(insert)
+ && !insert->empty() && limit <= insert->top_undo_no) {
+ undo = insert;
+ }
+
+ if (update && !update->empty() && update->top_undo_no >= limit) {
+ if (!undo) {
+ undo = update;
+ } else if (undo->top_undo_no < update->top_undo_no) {
+ undo = update;
+ }
+ }
+
+ if (temp && !temp->empty() && temp->top_undo_no >= limit) {
+ if (!undo) {
+ undo = temp;
+ } else if (undo->top_undo_no < temp->top_undo_no) {
+ undo = temp;
+ }
+ }
+
+ if (undo == NULL) {
+ row_undo_try_truncate(trx);
+ /* Mark any ROLLBACK TO SAVEPOINT completed, so that
+ if the transaction object is committed and reused
+ later, we will default to a full ROLLBACK. */
+ trx->roll_limit = 0;
+ trx->in_rollback = false;
+ return false;
+ }
+
+ ut_ad(!undo->empty());
+ ut_ad(limit <= undo->top_undo_no);
+
+ node->roll_ptr = trx_undo_build_roll_ptr(
+ false, undo->rseg->id, undo->top_page_no, undo->top_offset);
+
+ mtr_t mtr;
+ mtr.start();
+
+ page_t* undo_page = trx_undo_page_get_s_latched(
+ page_id_t(undo->rseg->space->id, undo->top_page_no), &mtr);
+
+ ulint offset = undo->top_offset;
+
+ trx_undo_rec_t* prev_rec = trx_undo_get_prev_rec(
+ undo_page + offset, undo->hdr_page_no, undo->hdr_offset,
+ true, &mtr);
+
+ if (prev_rec == NULL) {
+ undo->top_undo_no = IB_ID_MAX;
+ ut_ad(undo->empty());
+ } else {
+ page_t* prev_rec_page = page_align(prev_rec);
+
+ if (prev_rec_page != undo_page) {
+
+ trx->pages_undone++;
+ }
+
+ undo->top_page_no = page_get_page_no(prev_rec_page);
+ undo->top_offset = ulint(prev_rec - prev_rec_page);
+ undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec);
+ ut_ad(!undo->empty());
+ }
+
+ {
+ const trx_undo_rec_t* undo_rec = undo_page + offset;
+ node->undo_rec = trx_undo_rec_copy(undo_rec, node->heap);
+ }
+
+ mtr.commit();
+
+ switch (trx_undo_rec_get_type(node->undo_rec)) {
+ case TRX_UNDO_INSERT_METADATA:
+ /* This record type was introduced in MDEV-11369
+ instant ADD COLUMN, which was implemented after
+ MDEV-12288 removed the insert_undo log. There is no
+ instant ADD COLUMN for temporary tables. Therefore,
+ this record can only be present in the main undo log. */
+ ut_ad(undo == update);
+ /* fall through */
+ case TRX_UNDO_RENAME_TABLE:
+ ut_ad(undo == insert || undo == update);
+ /* fall through */
+ case TRX_UNDO_INSERT_REC:
+ ut_ad(undo == insert || undo == update || undo == temp);
+ node->roll_ptr |= 1ULL << ROLL_PTR_INSERT_FLAG_POS;
+ node->state = undo == temp
+ ? UNDO_INSERT_TEMPORARY : UNDO_INSERT_PERSISTENT;
+ break;
+ default:
+ ut_ad(undo == update || undo == temp);
+ node->state = undo == temp
+ ? UNDO_UPDATE_TEMPORARY : UNDO_UPDATE_PERSISTENT;
+ break;
+ }
+
+ trx->undo_no = node->undo_no = trx_undo_rec_get_undo_no(
+ node->undo_rec);
+ return true;
+}
+
/***********************************************************//**
Fetches an undo log record and does the undo for the recorded operation.
If none left, or a partial rollback completed, returns control to the
@@ -264,23 +410,12 @@ row_undo(
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
- trx_t* trx = node->trx;
- ut_ad(trx->in_rollback);
-
- if (node->state == UNDO_NODE_FETCH_NEXT) {
-
- node->undo_rec = trx_roll_pop_top_rec_of_trx(
- trx, &node->roll_ptr, node->heap);
-
- if (!node->undo_rec) {
- /* Rollback completed for this query thread */
- thr->run_node = que_node_get_parent(node);
- return(DB_SUCCESS);
- }
+ ut_ad(node->trx->in_rollback);
- node->undo_no = trx_undo_rec_get_undo_no(node->undo_rec);
- node->state = trx_undo_roll_ptr_is_insert(node->roll_ptr)
- ? UNDO_NODE_INSERT : UNDO_NODE_MODIFY;
+ if (node->state == UNDO_NODE_FETCH_NEXT && !row_undo_rec_get(node)) {
+ /* Rollback completed for this query thread */
+ thr->run_node = que_node_get_parent(node);
+ return DB_SUCCESS;
}
/* Prevent DROP TABLE etc. while we are rolling back this row.
@@ -288,31 +423,34 @@ row_undo(
then we already have dict_operation_lock locked in x-mode. Do not
try to lock again, because that would cause a hang. */
+ trx_t* trx = node->trx;
const bool locked_data_dict = (trx->dict_operation_lock_mode == 0);
if (locked_data_dict) {
-
row_mysql_freeze_data_dictionary(trx);
}
dberr_t err;
- if (node->state == UNDO_NODE_INSERT) {
-
+ switch (node->state) {
+ case UNDO_INSERT_PERSISTENT:
+ case UNDO_INSERT_TEMPORARY:
err = row_undo_ins(node, thr);
-
- node->state = UNDO_NODE_FETCH_NEXT;
- } else {
- ut_ad(node->state == UNDO_NODE_MODIFY);
+ break;
+ case UNDO_UPDATE_PERSISTENT:
+ case UNDO_UPDATE_TEMPORARY:
err = row_undo_mod(node, thr);
+ break;
+ default:
+ ut_ad(!"wrong state");
+ err = DB_CORRUPTION;
}
if (locked_data_dict) {
-
row_mysql_unfreeze_data_dictionary(trx);
}
- /* Do some cleanup */
+ node->state = UNDO_NODE_FETCH_NEXT;
btr_pcur_close(&(node->pcur));
mem_heap_empty(node->heap);
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 7d8db255718..e1403eea93b 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -495,39 +495,6 @@ row_upd_rec_sys_fields_in_recovery(
}
}
-/*********************************************************************//**
-Sets the trx id or roll ptr field of a clustered index entry. */
-void
-row_upd_index_entry_sys_field(
-/*==========================*/
- dtuple_t* entry, /*!< in/out: index entry, where the memory
- buffers for sys fields are already allocated:
- the function just copies the new values to
- them */
- dict_index_t* index, /*!< in: clustered index */
- ulint type, /*!< in: DATA_TRX_ID or DATA_ROLL_PTR */
- ib_uint64_t val) /*!< in: value to write */
-{
- dfield_t* dfield;
- byte* field;
- ulint pos;
-
- ut_ad(dict_index_is_clust(index));
-
- pos = dict_index_get_sys_col_pos(index, type);
-
- dfield = dtuple_get_nth_field(entry, pos);
- field = static_cast<byte*>(dfield_get_data(dfield));
-
- if (type == DATA_TRX_ID) {
- ut_ad(val > 0);
- trx_write_trx_id(field, val);
- } else {
- ut_ad(type == DATA_ROLL_PTR);
- trx_write_roll_ptr(field, val);
- }
-}
-
/***********************************************************//**
Returns TRUE if row update changes size of some field in index or if some
field to be updated is stored externally in rec or update.
@@ -680,7 +647,7 @@ row_upd_rec_in_place(
switch (rec_get_status(rec)) {
case REC_STATUS_ORDINARY:
break;
- case REC_STATUS_COLUMNS_ADDED:
+ case REC_STATUS_INSTANT:
ut_ad(index->is_instant());
break;
case REC_STATUS_NODE_PTR:
@@ -731,35 +698,6 @@ row_upd_rec_in_place(
}
/*********************************************************************//**
-Writes into the redo log the values of trx id and roll ptr and enough info
-to determine their positions within a clustered index record.
-@return new pointer to mlog */
-byte*
-row_upd_write_sys_vals_to_log(
-/*==========================*/
- dict_index_t* index, /*!< in: clustered index */
- trx_id_t trx_id, /*!< in: transaction id */
- roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */
- byte* log_ptr,/*!< pointer to a buffer of size > 20 opened
- in mlog */
- mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mtr */
-{
- ut_ad(dict_index_is_clust(index));
- ut_ad(mtr);
-
- log_ptr += mach_write_compressed(log_ptr,
- dict_index_get_sys_col_pos(
- index, DATA_TRX_ID));
-
- trx_write_roll_ptr(log_ptr, roll_ptr);
- log_ptr += DATA_ROLL_PTR_LEN;
-
- log_ptr += mach_u64_write_compressed(log_ptr, trx_id);
-
- return(log_ptr);
-}
-
-/*********************************************************************//**
Parses the log data of system field values.
@return log data end or NULL */
byte*
@@ -1053,7 +991,6 @@ row_upd_build_difference_binary(
ulint len;
upd_t* update;
ulint n_diff;
- ulint trx_id_pos;
ulint i;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint n_fld = dtuple_get_n_fields(entry);
@@ -1068,10 +1005,6 @@ row_upd_build_difference_binary(
n_diff = 0;
- trx_id_pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
- ut_ad(dict_index_get_sys_col_pos(index, DATA_ROLL_PTR)
- == trx_id_pos + 1);
-
if (!offsets) {
offsets = rec_get_offsets(rec, index, offsets_, true,
ULINT_UNDEFINED, &heap);
@@ -1086,16 +1019,9 @@ row_upd_build_difference_binary(
/* NOTE: we compare the fields as binary strings!
(No collation) */
- if (no_sys) {
- /* TRX_ID */
- if (i == trx_id_pos) {
- continue;
- }
-
- /* DB_ROLL_PTR */
- if (i == trx_id_pos + 1) {
- continue;
- }
+ if (no_sys && (i == index->db_trx_id()
+ || i == index->db_roll_ptr())) {
+ continue;
}
if (!dfield_is_ext(dfield)
@@ -1204,7 +1130,7 @@ of the column and must not be poisoned with the new values.
@param[in] data 'internally' stored part of the field
containing also the reference to the external part
@param[in] local_len length of data, in bytes
-@param[in] page_size BLOB page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] len input - length of prefix to
fetch; output: fetched length of the prefix
@param[in,out] heap heap where to allocate
@@ -1214,14 +1140,14 @@ byte*
row_upd_ext_fetch(
const byte* data,
ulint local_len,
- const page_size_t& page_size,
+ ulint zip_size,
ulint* len,
mem_heap_t* heap)
{
byte* buf = static_cast<byte*>(mem_heap_alloc(heap, *len));
*len = btr_copy_externally_stored_field_prefix(
- buf, *len, page_size, data, local_len);
+ buf, *len, zip_size, data, local_len);
/* We should never update records containing a half-deleted BLOB. */
ut_a(*len);
@@ -1237,7 +1163,7 @@ the given index entry field.
@param[in] uf update field
@param[in,out] heap memory heap for allocating and copying
the new value
-@param[in] page_size page size */
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0 */
static
void
row_upd_index_replace_new_col_val(
@@ -1246,7 +1172,7 @@ row_upd_index_replace_new_col_val(
const dict_col_t* col,
const upd_field_t* uf,
mem_heap_t* heap,
- const page_size_t& page_size)
+ ulint zip_size)
{
ulint len;
const byte* data;
@@ -1260,7 +1186,7 @@ row_upd_index_replace_new_col_val(
len = dfield_get_len(dfield);
data = static_cast<const byte*>(dfield_get_data(dfield));
- if (field->prefix_len > 0) {
+ if (field && field->prefix_len > 0) {
ibool fetch_ext = dfield_is_ext(dfield)
&& len < (ulint) field->prefix_len
+ BTR_EXTERN_FIELD_REF_SIZE;
@@ -1270,7 +1196,7 @@ row_upd_index_replace_new_col_val(
len = field->prefix_len;
- data = row_upd_ext_fetch(data, l, page_size,
+ data = row_upd_ext_fetch(data, l, zip_size,
&len, heap);
}
@@ -1326,6 +1252,57 @@ row_upd_index_replace_new_col_val(
}
}
+/** Apply an update vector to an metadata entry.
+@param[in,out] entry clustered index metadata record to be updated
+@param[in] index index of the entry
+@param[in] update update vector built for the entry
+@param[in,out] heap memory heap for copying off-page columns */
+static
+void
+row_upd_index_replace_metadata(
+ dtuple_t* entry,
+ const dict_index_t* index,
+ const upd_t* update,
+ mem_heap_t* heap)
+{
+ ut_ad(!index->table->skip_alter_undo);
+ ut_ad(update->is_alter_metadata());
+ ut_ad(entry->info_bits == update->info_bits);
+ ut_ad(entry->n_fields == ulint(index->n_fields) + 1);
+ const ulint zip_size = index->table->space->zip_size();
+ const ulint first = index->first_user_field();
+ ut_d(bool found_mblob = false);
+
+ for (ulint i = upd_get_n_fields(update); i--; ) {
+ const upd_field_t* uf = upd_get_nth_field(update, i);
+ ut_ad(!upd_fld_is_virtual_col(uf));
+ ut_ad(uf->field_no >= first - 2);
+ ulint f = uf->field_no;
+ dfield_t* dfield = dtuple_get_nth_field(entry, f);
+
+ if (f == first) {
+ ut_d(found_mblob = true);
+ ut_ad(!dfield_is_null(&uf->new_val));
+ ut_ad(dfield_is_ext(dfield));
+ ut_ad(dfield_get_len(dfield) == FIELD_REF_SIZE);
+ ut_ad(!dfield_is_null(dfield));
+ dfield_set_data(dfield, uf->new_val.data,
+ uf->new_val.len);
+ if (dfield_is_ext(&uf->new_val)) {
+ dfield_set_ext(dfield);
+ }
+ continue;
+ }
+
+ f -= f > first;
+ const dict_field_t* field = dict_index_get_nth_field(index, f);
+ row_upd_index_replace_new_col_val(dfield, field, field->col,
+ uf, heap, zip_size);
+ }
+
+ ut_ad(found_mblob);
+}
+
/** Apply an update vector to an index entry.
@param[in,out] entry index entry to be updated; the clustered index record
must be covered by a lock or a page latch to prevent
@@ -1341,8 +1318,14 @@ row_upd_index_replace_new_col_vals_index_pos(
mem_heap_t* heap)
{
ut_ad(!index->table->skip_alter_undo);
+ ut_ad(!entry->is_metadata() || entry->info_bits == update->info_bits);
+
+ if (UNIV_UNLIKELY(entry->is_alter_metadata())) {
+ row_upd_index_replace_metadata(entry, index, update, heap);
+ return;
+ }
- const page_size_t& page_size = dict_table_page_size(index->table);
+ const ulint zip_size = index->table->space->zip_size();
dtuple_set_info_bits(entry, update->info_bits);
@@ -1368,7 +1351,7 @@ row_upd_index_replace_new_col_vals_index_pos(
if (uf) {
row_upd_index_replace_new_col_val(
dtuple_get_nth_field(entry, i),
- field, col, uf, heap, page_size);
+ field, col, uf, heap, zip_size);
}
}
}
@@ -1394,7 +1377,7 @@ row_upd_index_replace_new_col_vals(
ulint i;
const dict_index_t* clust_index
= dict_table_get_first_index(index->table);
- const page_size_t& page_size = dict_table_page_size(index->table);
+ const ulint zip_size = index->table->space->zip_size();
ut_ad(!index->table->skip_alter_undo);
@@ -1424,7 +1407,7 @@ row_upd_index_replace_new_col_vals(
if (uf) {
row_upd_index_replace_new_col_val(
dtuple_get_nth_field(entry, i),
- field, col, uf, heap, page_size);
+ field, col, uf, heap, zip_size);
}
}
}
@@ -1648,8 +1631,7 @@ row_upd_replace(
}
if (n_ext_cols) {
- *ext = row_ext_create(n_ext_cols, ext_cols, table->flags, row,
- heap);
+ *ext = row_ext_create(n_ext_cols, ext_cols, *table, row, heap);
} else {
*ext = NULL;
}
@@ -1757,11 +1739,9 @@ row_upd_changes_ord_field_binary_func(
mem_heap_t* temp_heap = NULL;
const dfield_t* new_field = &upd_field->new_val;
- const page_size_t page_size
- = (ext != NULL)
- ? ext->page_size
- : dict_table_page_size(
- index->table);
+ const ulint zip_size = ext
+ ? ext->zip_size
+ : index->table->space->zip_size();
ut_ad(dfield->data != NULL
&& dfield->len > GEO_DATA_HEADER_SIZE);
@@ -1778,7 +1758,7 @@ row_upd_changes_ord_field_binary_func(
dptr = btr_copy_externally_stored_field(
&dlen, dptr,
- page_size,
+ zip_size,
flen,
temp_heap);
} else {
@@ -1841,7 +1821,7 @@ row_upd_changes_ord_field_binary_func(
dptr = btr_copy_externally_stored_field(
&dlen, dptr,
- page_size,
+ zip_size,
flen,
temp_heap);
} else {
@@ -2446,7 +2426,7 @@ row_upd_sec_index_entry(
#ifdef UNIV_DEBUG
mtr_commit(&mtr);
mtr_start(&mtr);
- ut_ad(btr_validate_index(index, 0, false));
+ ut_ad(btr_validate_index(index, 0));
ut_ad(0);
#endif /* UNIV_DEBUG */
break;
@@ -2482,7 +2462,7 @@ row_upd_sec_index_entry(
err = DB_SUCCESS;
break;
case DB_DEADLOCK:
- if (wsrep_debug) {
+ if (wsrep_get_debug()) {
ib::warn() << "WSREP: sec index FK check fail for deadlock"
<< " index " << index->name
<< " table " << index->table->name;
@@ -2567,10 +2547,10 @@ row_upd_sec_step(
}
#ifdef UNIV_DEBUG
-# define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
- row_upd_clust_rec_by_insert_inherit_func(rec,offsets,entry,update)
+# define row_upd_clust_rec_by_insert_inherit(rec,index,offsets,entry,update) \
+ row_upd_clust_rec_by_insert_inherit_func(rec,index,offsets,entry,update)
#else /* UNIV_DEBUG */
-# define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
+# define row_upd_clust_rec_by_insert_inherit(rec,index,offsets,entry,update) \
row_upd_clust_rec_by_insert_inherit_func(rec,entry,update)
#endif /* UNIV_DEBUG */
/*******************************************************************//**
@@ -2585,6 +2565,7 @@ row_upd_clust_rec_by_insert_inherit_func(
/*=====================================*/
const rec_t* rec, /*!< in: old record, or NULL */
#ifdef UNIV_DEBUG
+ dict_index_t* index, /*!< in: index, or NULL */
const ulint* offsets,/*!< in: rec_get_offsets(rec), or NULL */
#endif /* UNIV_DEBUG */
dtuple_t* entry, /*!< in/out: updated entry to be
@@ -2595,6 +2576,8 @@ row_upd_clust_rec_by_insert_inherit_func(
ulint i;
ut_ad(!rec == !offsets);
+ ut_ad(!rec == !index);
+ ut_ad(!rec || rec_offs_validate(rec, index, offsets));
ut_ad(!rec || rec_offs_any_extern(offsets));
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
@@ -2605,6 +2588,9 @@ row_upd_clust_rec_by_insert_inherit_func(
ut_ad(!offsets
|| !rec_offs_nth_extern(offsets, i)
== !dfield_is_ext(dfield)
+ || (!dict_index_get_nth_field(index, i)->name
+ && !dfield_is_ext(dfield)
+ && (dfield_is_null(dfield) || dfield->len == 0))
|| upd_get_field_by_field_no(update, i, false));
if (!dfield_is_ext(dfield)
|| upd_get_field_by_field_no(update, i, false)) {
@@ -2702,7 +2688,11 @@ row_upd_clust_rec_by_insert(
if (index->is_instant()) entry->trim(*index);
ut_ad(dtuple_get_info_bits(entry) == 0);
- row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
+ {
+ dfield_t* t = dtuple_get_nth_field(entry, index->db_trx_id());
+ ut_ad(t->len == DATA_TRX_ID_LEN);
+ trx_write_trx_id(static_cast<byte*>(t->data), trx->id);
+ }
switch (node->state) {
default:
@@ -2711,7 +2701,7 @@ row_upd_clust_rec_by_insert(
/* A lock wait occurred in row_ins_clust_index_entry() in
the previous invocation of this function. */
row_upd_clust_rec_by_insert_inherit(
- NULL, NULL, entry, node->update);
+ NULL, NULL, NULL, entry, node->update);
break;
case UPD_NODE_UPDATE_CLUSTERED:
/* This is the first invocation of the function where
@@ -2752,7 +2742,8 @@ err_exit:
if (rec_offs_any_extern(offsets)) {
if (row_upd_clust_rec_by_insert_inherit(
- rec, offsets, entry, node->update)) {
+ rec, index, offsets,
+ entry, node->update)) {
/* The blobs are disowned here, expecting the
insert down below to inherit them. But if the
insert fails, then this disown will be undone
@@ -2785,7 +2776,7 @@ check_fk:
err = DB_SUCCESS;
break;
case DB_DEADLOCK:
- if (wsrep_debug) {
+ if (wsrep_get_debug()) {
ib::warn() << "WSREP: sec index FK check fail for deadlock"
<< " index " << index->name
<< " table " << index->table->name;
@@ -3012,7 +3003,7 @@ row_upd_del_mark_clust_rec(
err = DB_SUCCESS;
break;
case DB_DEADLOCK:
- if (wsrep_debug) {
+ if (wsrep_get_debug()) {
ib::warn() << "WSREP: sec index FK check fail for deadlock"
<< " index " << index->name
<< " table " << index->table->name;
diff --git a/storage/innobase/srv/srv0conc.cc b/storage/innobase/srv/srv0conc.cc
index e4a3e84df01..b2b464e31fa 100644
--- a/storage/innobase/srv/srv0conc.cc
+++ b/storage/innobase/srv/srv0conc.cc
@@ -67,14 +67,12 @@ ulong srv_thread_concurrency = 0;
/** Variables tracking the active and waiting threads. */
struct srv_conc_t {
- char pad[CACHE_LINE_SIZE - (sizeof(ulint) + sizeof(lint))];
-
/** Number of transactions that have declared_to_be_inside_innodb */
- ulint n_active;
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) Atomic_counter<ulint> n_active;
/** Number of OS threads waiting in the FIFO for permission to
enter InnoDB */
- ulint n_waiting;
+ MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) Atomic_counter<ulint> n_waiting;
};
/* Control variables for tracking concurrency. */
@@ -121,8 +119,8 @@ srv_conc_enter_innodb_with_atomics(
ulint sleep_in_us;
#ifdef WITH_WSREP
if (wsrep_on(trx->mysql_thd) &&
- wsrep_trx_is_aborting(trx->mysql_thd)) {
- if (wsrep_debug) {
+ wsrep_thd_is_aborting(trx->mysql_thd)) {
+ if (wsrep_get_debug()) {
ib::info() <<
"srv_conc_enter due to MUST_ABORT";
}
@@ -133,8 +131,7 @@ srv_conc_enter_innodb_with_atomics(
if (srv_thread_concurrency == 0) {
if (notified_mysql) {
- my_atomic_addlint(&srv_conc.n_waiting,
- ulint(-1));
+ srv_conc.n_waiting--;
thd_wait_end(trx->mysql_thd);
}
@@ -142,19 +139,14 @@ srv_conc_enter_innodb_with_atomics(
}
if (srv_conc.n_active < srv_thread_concurrency) {
- ulint n_active;
/* Check if there are any free tickets. */
- n_active = my_atomic_addlint(
- &srv_conc.n_active, 1) + 1;
-
- if (n_active <= srv_thread_concurrency) {
+ if (srv_conc.n_active++ < srv_thread_concurrency) {
srv_enter_innodb_with_tickets(trx);
if (notified_mysql) {
- my_atomic_addlint(&srv_conc.n_waiting,
- ulint(-1));
+ srv_conc.n_waiting--;
thd_wait_end(trx->mysql_thd);
}
@@ -176,11 +168,11 @@ srv_conc_enter_innodb_with_atomics(
/* Since there were no free seats, we relinquish
the overbooked ticket. */
- my_atomic_addlint(&srv_conc.n_active, ulint(-1));
+ srv_conc.n_active--;
}
if (!notified_mysql) {
- my_atomic_addlint(&srv_conc.n_waiting, 1);
+ srv_conc.n_waiting++;
thd_wait_begin(trx->mysql_thd, THD_WAIT_USER_LOCK);
@@ -224,7 +216,7 @@ srv_conc_exit_innodb_with_atomics(
trx->n_tickets_to_enter_innodb = 0;
trx->declared_to_be_inside_innodb = FALSE;
- my_atomic_addlint(&srv_conc.n_active, ulint(-1));
+ srv_conc.n_active--;
}
/*********************************************************************//**
@@ -258,7 +250,7 @@ srv_conc_force_enter_innodb(
return;
}
- (void) my_atomic_addlint(&srv_conc.n_active, 1);
+ srv_conc.n_active++;
trx->n_tickets_to_enter_innodb = 1;
trx->declared_to_be_inside_innodb = TRUE;
@@ -316,14 +308,14 @@ wsrep_srv_conc_cancel_wait(
srv_conc_enter_innodb_with_atomics(). No need to cancel here,
thr will wake up after os_sleep and let to enter innodb
*/
- if (wsrep_debug) {
+ if (wsrep_get_debug()) {
ib::info() << "WSREP: conc slot cancel, no atomics";
}
#else
// JAN: TODO: MySQL 5.7
//os_fast_mutex_lock(&srv_conc_mutex);
if (trx->wsrep_event) {
- if (wsrep_debug) {
+ if (wsrep_get_debug()) {
ib::info() << "WSREP: conc slot cancel";
}
os_event_set(trx->wsrep_event);
diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc
index 85d9f0522aa..3b3fcf446c7 100644
--- a/storage/innobase/srv/srv0mon.cc
+++ b/storage/innobase/srv/srv0mon.cc
@@ -298,12 +298,6 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES_READ},
- {"buffer_pages0_read", "buffer",
- "Number of page 0 read (innodb_pages0_read)",
- static_cast<monitor_type_t>(
- MONITOR_EXISTING | MONITOR_DEFAULT_ON),
- MONITOR_DEFAULT_START, MONITOR_OVLD_PAGES0_READ},
-
{"buffer_index_sec_rec_cluster_reads", "buffer",
"Number of secondary record reads triggered cluster read",
static_cast<monitor_type_t>(
@@ -802,11 +796,6 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_NONE,
MONITOR_DEFAULT_START, MONITOR_TRX_ROLLBACK_SAVEPOINT},
- {"trx_rollback_active", "transaction",
- "Number of resurrected active transactions rolled back",
- MONITOR_NONE,
- MONITOR_DEFAULT_START, MONITOR_TRX_ROLLBACK_ACTIVE},
-
{"trx_active_transactions", "transaction",
"Number of active transactions",
MONITOR_NONE,
@@ -1745,11 +1734,6 @@ srv_mon_process_existing_counter(
value = stat.n_pages_read;
break;
- /* innodb_pages0_read */
- case MONITOR_OVLD_PAGES0_READ:
- value = srv_stats.page0_read;
- break;
-
/* Number of times secondary index lookup triggered cluster lookup */
case MONITOR_OVLD_INDEX_SEC_REC_CLUSTER_READS:
value = srv_stats.n_sec_rec_cluster_reads;
@@ -1952,7 +1936,7 @@ srv_mon_process_existing_counter(
break;
case MONITOR_RSEG_HISTORY_LEN:
- value = trx_sys.history_size();
+ value = trx_sys.rseg_history_len;
break;
case MONITOR_RSEG_CUR_SIZE:
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index 11a46b5ba28..8501ffbf672 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -59,7 +59,6 @@ Created 10/8/1995 Heikki Tuuri
#include "pars0pars.h"
#include "que0que.h"
#include "row0mysql.h"
-#include "row0trunc.h"
#include "row0log.h"
#include "srv0mon.h"
#include "srv0srv.h"
@@ -77,10 +76,6 @@ Created 10/8/1995 Heikki Tuuri
#include <my_service_manager.h>
-#ifdef WITH_WSREP
-extern int wsrep_debug;
-extern int wsrep_trx_is_aborting(void *thd_ptr);
-#endif
/* The following is the maximum allowed duration of a lock wait. */
UNIV_INTERN ulong srv_fatal_semaphore_wait_threshold = DEFAULT_SRV_FATAL_SEMAPHORE_TIMEOUT;
@@ -199,8 +194,6 @@ ulong srv_page_size_shift;
/** innodb_log_write_ahead_size */
ulong srv_log_write_ahead_size;
-page_size_t univ_page_size(0, 0, false);
-
/** innodb_adaptive_flushing; try to flush dirty pages so as to avoid
IO bursts at the checkpoints. */
my_bool srv_adaptive_flushing;
@@ -498,10 +491,6 @@ UNIV_INTERN ulong srv_buf_dump_status_frequency;
mutex_enter(&srv_sys.mutex); \
} while (0)
-/** Test if the system mutex is owned. */
-#define srv_sys_mutex_own() (mutex_own(&srv_sys.mutex) \
- && !srv_read_only_mode)
-
/** Release the system mutex. */
#define srv_sys_mutex_exit() do { \
mutex_exit(&srv_sys.mutex); \
@@ -597,11 +586,12 @@ struct srv_sys_t{
sys_threads[]->event are
covered by srv_sys_t::mutex */
- ulint n_threads_active[SRV_MASTER + 1];
+ Atomic_counter<ulint>
+ n_threads_active[SRV_MASTER + 1];
/*!< number of threads active
in a thread class; protected
- by both my_atomic_addlint()
- and mutex */
+ by both std::atomic and
+ mutex */
srv_stats_t::ulint_ctr_1_t
activity_count; /*!< For tracking server
@@ -613,7 +603,7 @@ static srv_sys_t srv_sys;
/** @return whether the purge coordinator thread is active */
bool purge_sys_t::running()
{
- return my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE]);
+ return srv_sys.n_threads_active[SRV_PURGE];
}
/** Event to signal srv_monitor_thread. Not protected by a mutex.
@@ -812,7 +802,7 @@ srv_reserve_slot(
ut_ad(srv_slot_get_type(slot) == type);
- my_atomic_addlint(&srv_sys.n_threads_active[type], 1);
+ srv_sys.n_threads_active[type]++;
srv_sys_mutex_exit();
@@ -829,7 +819,7 @@ srv_suspend_thread_low(
srv_slot_t* slot) /*!< in/out: thread slot */
{
ut_ad(!srv_read_only_mode);
- ut_ad(srv_sys_mutex_own());
+ ut_ad(mutex_own(&srv_sys.mutex));
ut_ad(slot->in_use);
@@ -859,8 +849,7 @@ srv_suspend_thread_low(
ut_a(!slot->suspended);
slot->suspended = TRUE;
- if (lint(my_atomic_addlint(&srv_sys.n_threads_active[type], ulint(-1)))
- < 0) {
+ if (srv_sys.n_threads_active[type]-- == 0) {
ut_error;
}
@@ -917,7 +906,7 @@ srv_resume_thread(srv_slot_t* slot, int64_t sig_count = 0, bool wait = true,
ut_ad(slot->suspended);
slot->suspended = FALSE;
- my_atomic_addlint(&srv_sys.n_threads_active[slot->type], 1);
+ srv_sys.n_threads_active[slot->type]++;
srv_sys_mutex_exit();
return(timeout);
}
@@ -1328,7 +1317,7 @@ srv_printf_innodb_monitor(
fprintf(file,
"Total large memory allocated " ULINTPF "\n"
"Dictionary memory allocated " ULINTPF "\n",
- os_total_large_mem_allocated,
+ ulint{os_total_large_mem_allocated},
dict_sys_get_size());
buf_print_io(file);
@@ -1535,7 +1524,6 @@ srv_export_innodb_status(void)
export_vars.innodb_pages_created = stat.n_pages_created;
export_vars.innodb_pages_read = stat.n_pages_read;
- export_vars.innodb_page0_read = srv_stats.page0_read;
export_vars.innodb_pages_written = stat.n_pages_written;
@@ -1909,11 +1897,11 @@ void
srv_active_wake_master_thread_low()
{
ut_ad(!srv_read_only_mode);
- ut_ad(!srv_sys_mutex_own());
+ ut_ad(!mutex_own(&srv_sys.mutex));
srv_inc_activity_count();
- if (my_atomic_loadlint(&srv_sys.n_threads_active[SRV_MASTER]) == 0) {
+ if (srv_sys.n_threads_active[SRV_MASTER] == 0) {
srv_slot_t* slot;
srv_sys_mutex_enter();
@@ -1935,11 +1923,12 @@ srv_active_wake_master_thread_low()
void
srv_wake_purge_thread_if_not_active()
{
- ut_ad(!srv_sys_mutex_own());
+ ut_ad(!srv_read_only_mode);
+ ut_ad(!mutex_own(&srv_sys.mutex));
if (purge_sys.enabled() && !purge_sys.paused()
- && !my_atomic_loadlint(&srv_sys.n_threads_active[SRV_PURGE])
- && trx_sys.history_size()) {
+ && !srv_sys.n_threads_active[SRV_PURGE]
+ && trx_sys.rseg_history_len) {
srv_release_threads(SRV_PURGE, 1);
}
@@ -2432,8 +2421,7 @@ static bool srv_purge_should_exit()
return(true);
}
/* Slow shutdown was requested. */
- ulint history_size = trx_sys.history_size();
-
+ uint32_t history_size = trx_sys.rseg_history_len;
if (history_size) {
#if defined HAVE_SYSTEMD && !defined EMBEDDED_LIBRARY
static ib_time_t progress_time;
@@ -2442,7 +2430,7 @@ static bool srv_purge_should_exit()
progress_time = time;
service_manager_extend_timeout(
INNODB_EXTEND_TIMEOUT_INTERVAL,
- "InnoDB: to purge " ULINTPF " transactions",
+ "InnoDB: to purge %u transactions",
history_size);
}
#endif
@@ -2467,7 +2455,7 @@ static bool srv_task_execute()
UT_LIST_REMOVE(srv_sys.tasks, thr);
mutex_exit(&srv_sys.tasks_mutex);
que_run_threads(thr);
- my_atomic_addlint(&purge_sys.n_completed, 1);
+ purge_sys.n_tasks.fetch_sub(1, std::memory_order_release);
return true;
}
@@ -2503,7 +2491,7 @@ DECLARE_THREAD(srv_worker_thread)(
slot = srv_reserve_slot(SRV_WORKER);
ut_a(srv_n_purge_threads > 1);
- ut_a(ulong(my_atomic_loadlint(&srv_sys.n_threads_active[SRV_WORKER]))
+ ut_a(ulong(srv_sys.n_threads_active[SRV_WORKER])
< srv_n_purge_threads);
/* We need to ensure that the worker threads exit after the
@@ -2545,14 +2533,14 @@ DECLARE_THREAD(srv_worker_thread)(
@param[in,out] n_total_purged total number of purged pages
@return length of history list before the last purge batch. */
static
-ulint
+uint32_t
srv_do_purge(ulint* n_total_purged)
{
ulint n_pages_purged;
static ulint count = 0;
static ulint n_use_threads = 0;
- static ulint rseg_history_len = 0;
+ static uint32_t rseg_history_len = 0;
ulint old_activity_count = srv_get_activity_count();
const ulint n_threads = srv_n_purge_threads;
@@ -2570,7 +2558,7 @@ srv_do_purge(ulint* n_total_purged)
}
do {
- if (trx_sys.history_size() > rseg_history_len
+ if (trx_sys.rseg_history_len > rseg_history_len
|| (srv_max_purge_lag > 0
&& rseg_history_len > srv_max_purge_lag)) {
@@ -2599,20 +2587,14 @@ srv_do_purge(ulint* n_total_purged)
ut_a(n_use_threads <= n_threads);
/* Take a snapshot of the history list before purge. */
- if (!(rseg_history_len = trx_sys.history_size())) {
+ if (!(rseg_history_len = trx_sys.rseg_history_len)) {
break;
}
- ulint undo_trunc_freq =
- purge_sys.undo_trunc.get_rseg_truncate_frequency();
-
- ulint rseg_truncate_frequency = ut_min(
- static_cast<ulint>(srv_purge_rseg_truncate_frequency),
- undo_trunc_freq);
-
n_pages_purged = trx_purge(
n_use_threads,
- (++count % rseg_truncate_frequency) == 0);
+ !(++count % srv_purge_rseg_truncate_frequency)
+ || purge_sys.truncate.current);
*n_total_purged += n_pages_purged;
} while (n_pages_purged > 0 && !purge_sys.paused()
@@ -2629,7 +2611,7 @@ srv_purge_coordinator_suspend(
/*==========================*/
srv_slot_t* slot, /*!< in/out: Purge coordinator
thread slot */
- ulint rseg_history_len) /*!< in: history list length
+ uint32_t rseg_history_len) /*!< in: history list length
before last purge */
{
ut_ad(!srv_read_only_mode);
@@ -2646,7 +2628,7 @@ srv_purge_coordinator_suspend(
/* We don't wait right away on the the non-timed wait because
we want to signal the thread that wants to suspend purge. */
const bool wait = stop
- || rseg_history_len <= trx_sys.history_size();
+ || rseg_history_len <= trx_sys.rseg_history_len;
const bool timeout = srv_resume_thread(
slot, sig_count, wait,
stop ? 0 : SRV_PURGE_MAX_TIMEOUT);
@@ -2656,12 +2638,12 @@ srv_purge_coordinator_suspend(
rw_lock_x_lock(&purge_sys.latch);
stop = srv_shutdown_state == SRV_SHUTDOWN_NONE
- && purge_sys.paused_latched();
+ && purge_sys.paused();
if (!stop) {
if (timeout
&& rseg_history_len < 5000
- && rseg_history_len == trx_sys.history_size()) {
+ && rseg_history_len == trx_sys.rseg_history_len) {
/* No new records were added since the
wait started. Simply wait for new
records. The magic number 5000 is an
@@ -2714,7 +2696,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
slot = srv_reserve_slot(SRV_PURGE);
- ulint rseg_history_len = trx_sys.history_size();
+ uint32_t rseg_history_len = trx_sys.rseg_history_len;
do {
/* If there are no records to purge or the last
@@ -2747,11 +2729,6 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
/* Note that we are shutting down. */
rw_lock_x_lock(&purge_sys.latch);
purge_sys.coordinator_shutdown();
-
- /* If there are any pending undo-tablespace truncate then clear
- it off as we plan to shutdown the purge thread. */
- purge_sys.undo_trunc.clear();
-
/* Ensure that the wait in purge_sys_t::stop() will terminate. */
os_event_set(purge_sys.event);
@@ -2843,9 +2820,7 @@ srv_purge_wakeup()
srv_release_threads(SRV_WORKER, n_workers);
}
- } while (!my_atomic_loadptr_explicit(reinterpret_cast<void**>
- (&srv_running),
- MY_MEMORY_ORDER_RELAXED)
+ } while (!srv_running.load(std::memory_order_relaxed)
&& (srv_sys.n_threads_active[SRV_WORKER]
|| srv_sys.n_threads_active[SRV_PURGE]));
}
@@ -2858,38 +2833,3 @@ void srv_purge_shutdown()
srv_purge_wakeup();
} while (srv_sys.sys_threads[SRV_PURGE_SLOT].in_use);
}
-
-/** Check if tablespace is being truncated.
-(Ignore system-tablespace as we don't re-create the tablespace
-and so some of the action that are suppressed by this function
-for independent tablespace are not applicable to system-tablespace).
-@param space_id space_id to check for truncate action
-@return true if being truncated, false if not being
- truncated or tablespace is system-tablespace. */
-bool
-srv_is_tablespace_truncated(ulint space_id)
-{
- if (is_system_tablespace(space_id)) {
- return(false);
- }
-
- return(truncate_t::is_tablespace_truncated(space_id)
- || undo::Truncate::is_tablespace_truncated(space_id));
-
-}
-
-/** Check if tablespace was truncated.
-@param[in] space space object to check for truncate action
-@return true if tablespace was truncated and we still have an active
-MLOG_TRUNCATE REDO log record. */
-bool
-srv_was_tablespace_truncated(const fil_space_t* space)
-{
- if (space == NULL) {
- ut_ad(0);
- return(false);
- }
-
- return (!is_system_tablespace(space->id)
- && truncate_t::was_tablespace_truncated(space->id));
-}
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index f48111a3341..2337dfaada2 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -74,7 +74,6 @@ Created 2/16/1996 Heikki Tuuri
#include "srv0start.h"
#include "srv0srv.h"
#include "btr0defragment.h"
-#include "row0trunc.h"
#include "mysql/service_wsrep.h" /* wsrep_recovery */
#include "trx0rseg.h"
#include "os0proc.h"
@@ -97,7 +96,6 @@ Created 2/16/1996 Heikki Tuuri
#include "row0upd.h"
#include "row0row.h"
#include "row0mysql.h"
-#include "row0trunc.h"
#include "btr0pcur.h"
#include "os0event.h"
#include "zlib.h"
@@ -677,9 +675,19 @@ static bool srv_undo_tablespace_open(const char* name, ulint space_id,
fil_set_max_space_id_if_bigger(space_id);
- fil_space_t* space = fil_space_create(
- undo_name, space_id, FSP_FLAGS_PAGE_SSIZE(),
- FIL_TYPE_TABLESPACE, NULL);
+ ulint fsp_flags;
+ switch (srv_checksum_algorithm) {
+ case SRV_CHECKSUM_ALGORITHM_FULL_CRC32:
+ case SRV_CHECKSUM_ALGORITHM_STRICT_FULL_CRC32:
+ fsp_flags = (FSP_FLAGS_FCRC32_MASK_MARKER
+ | FSP_FLAGS_FCRC32_PAGE_SSIZE());
+ break;
+ default:
+ fsp_flags = FSP_FLAGS_PAGE_SSIZE();
+ }
+
+ fil_space_t* space = fil_space_create(undo_name, space_id, fsp_flags,
+ FIL_TYPE_TABLESPACE, NULL);
ut_a(fil_validate());
ut_a(space);
@@ -782,8 +790,6 @@ srv_check_undo_redo_logs_exists()
return(DB_SUCCESS);
}
-undo::undo_spaces_t undo::Truncate::s_fix_up_spaces;
-
/** Open the configured number of dedicated undo tablespaces.
@param[in] create_new_db whether the database is being initialized
@return DB_SUCCESS or error code */
@@ -865,46 +871,8 @@ srv_undo_tablespaces_init(bool create_new_db)
prev_space_id = srv_undo_space_id_start - 1;
break;
case SRV_OPERATION_NORMAL:
- if (create_new_db) {
- break;
- }
- /* fall through */
case SRV_OPERATION_RESTORE:
case SRV_OPERATION_RESTORE_EXPORT:
- ut_ad(!create_new_db);
-
- /* Check if any of the UNDO tablespace needs fix-up because
- server crashed while truncate was active on UNDO tablespace.*/
- for (i = 0; i < n_undo_tablespaces; ++i) {
-
- undo::Truncate undo_trunc;
-
- if (undo_trunc.needs_fix_up(undo_tablespace_ids[i])) {
-
- char name[OS_FILE_MAX_PATH];
-
- snprintf(name, sizeof(name),
- "%s%cundo%03zu",
- srv_undo_dir, OS_PATH_SEPARATOR,
- undo_tablespace_ids[i]);
-
- os_file_delete(innodb_data_file_key, name);
-
- err = srv_undo_tablespace_create(
- name,
- SRV_UNDO_TABLESPACE_SIZE_IN_PAGES);
-
- if (err != DB_SUCCESS) {
- ib::error() << "Could not fix-up undo "
- " tablespace truncate '"
- << name << "'.";
- return(err);
- }
-
- undo::Truncate::s_fix_up_spaces.push_back(
- undo_tablespace_ids[i]);
- }
- }
break;
}
@@ -1009,64 +977,6 @@ srv_undo_tablespaces_init(bool create_new_db)
}
}
- if (!undo::Truncate::s_fix_up_spaces.empty()) {
-
- /* Step-1: Initialize the tablespace header and rsegs header. */
- mtr_t mtr;
-
- mtr_start(&mtr);
- /* Turn off REDO logging. We are in server start mode and fixing
- UNDO tablespace even before REDO log is read. Let's say we
- do REDO logging here then this REDO log record will be applied
- as part of the current recovery process. We surely don't need
- that as this is fix-up action parallel to REDO logging. */
- mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
- buf_block_t* sys_header = trx_sysf_get(&mtr);
- if (!sys_header) {
- mtr.commit();
- return DB_CORRUPTION;
- }
-
- for (undo::undo_spaces_t::const_iterator it
- = undo::Truncate::s_fix_up_spaces.begin();
- it != undo::Truncate::s_fix_up_spaces.end();
- ++it) {
-
- undo::Truncate::add_space_to_trunc_list(*it);
-
- fil_space_t* space = fil_space_get(*it);
-
- fsp_header_init(space,
- SRV_UNDO_TABLESPACE_SIZE_IN_PAGES,
- &mtr);
-
- for (ulint i = 0; i < TRX_SYS_N_RSEGS; i++) {
- if (trx_sysf_rseg_get_space(sys_header, i)
- == *it) {
- trx_rseg_header_create(
- space, i, sys_header, &mtr);
- }
- }
-
- undo::Truncate::clear_trunc_list();
- }
- mtr_commit(&mtr);
-
- /* Step-2: Flush the dirty pages from the buffer pool. */
- for (undo::undo_spaces_t::const_iterator it
- = undo::Truncate::s_fix_up_spaces.begin();
- it != undo::Truncate::s_fix_up_spaces.end();
- ++it) {
- FlushObserver dummy(fil_system.sys_space, NULL, NULL);
- buf_LRU_flush_or_remove_pages(TRX_SYS_SPACE, &dummy);
- FlushObserver dummy2(fil_space_get(*it), NULL, NULL);
- buf_LRU_flush_or_remove_pages(*it, &dummy2);
-
- /* Remove the truncate redo log file. */
- undo::done(*it);
- }
- }
-
return(DB_SUCCESS);
}
@@ -1294,9 +1204,7 @@ srv_prepare_to_delete_redo_log_files(
ulint pending_io = 0;
ulint count = 0;
- if ((log_sys.log.format & ~LOG_HEADER_FORMAT_ENCRYPTED)
- != LOG_HEADER_FORMAT_CURRENT
- || log_sys.log.subformat != 2) {
+ if (log_sys.log.subformat != 2) {
srv_log_file_size = 0;
}
@@ -1315,12 +1223,10 @@ srv_prepare_to_delete_redo_log_files(
{
ib::info info;
- if (srv_log_file_size == 0) {
- info << ((log_sys.log.format
- & ~LOG_HEADER_FORMAT_ENCRYPTED)
- != LOG_HEADER_FORMAT_10_4
- ? "Upgrading redo log: "
- : "Downgrading redo log: ");
+ if (srv_log_file_size == 0
+ || (log_sys.log.format & ~LOG_HEADER_FORMAT_ENCRYPTED)
+ != LOG_HEADER_FORMAT_10_4) {
+ info << "Upgrading redo log: ";
} else if (n_files != srv_n_log_files
|| srv_log_file_size
!= srv_log_file_size_requested) {
@@ -1909,7 +1815,7 @@ files_checked:
ulint ibuf_root = btr_create(
DICT_CLUSTERED | DICT_IBUF, fil_system.sys_space,
- DICT_IBUF_ID_MIN, dict_ind_redundant, NULL, &mtr);
+ DICT_IBUF_ID_MIN, dict_ind_redundant, &mtr);
mtr_commit(&mtr);
@@ -1948,21 +1854,13 @@ files_checked:
return(srv_init_abort(err));
}
} else {
- /* Invalidate the buffer pool to ensure that we reread
- the page that we read above, during recovery.
- Note that this is not as heavy weight as it seems. At
- this point there will be only ONE page in the buf_LRU
- and there must be no page in the buf_flush list. */
- buf_pool_invalidate();
-
- /* Scan and locate truncate log files. Parsed located files
- and add table to truncate information to central vector for
- truncate fix-up action post recovery. */
- err = TruncateLogParser::scan_and_parse(srv_log_group_home_dir);
- if (err != DB_SUCCESS) {
+ /* Work around the bug that we were performing a dirty read of
+ at least the TRX_SYS page into the buffer pool above, without
+ reading or applying any redo logs.
- return(srv_init_abort(DB_ERROR));
- }
+ MDEV-19229 FIXME: Remove the dirty reads and this call.
+ Add an assertion that the buffer pool is empty. */
+ buf_pool_invalidate();
/* We always try to do a recovery, even if the database had
been shut down normally: this is the normal startup path */
@@ -2025,7 +1923,7 @@ files_checked:
/* New data file(s) were added */
mtr.start();
buf_block_t* block = buf_page_get(
- page_id_t(0, 0), univ_page_size,
+ page_id_t(0, 0), 0,
RW_SX_LATCH, &mtr);
ulint size = mach_read_from_4(
FSP_HEADER_OFFSET + FSP_SIZE
@@ -2049,8 +1947,7 @@ files_checked:
#ifdef UNIV_DEBUG
{
mtr.start();
- buf_block_t* block = buf_page_get(page_id_t(0, 0),
- univ_page_size,
+ buf_block_t* block = buf_page_get(page_id_t(0, 0), 0,
RW_S_LATCH, &mtr);
ut_ad(mach_read_from_4(FSP_SIZE + FSP_HEADER_OFFSET
+ block->frame)
@@ -2144,9 +2041,8 @@ files_checked:
&& srv_n_log_files_found == srv_n_log_files
&& log_sys.log.format
== (srv_encrypt_log
- ? LOG_HEADER_FORMAT_CURRENT
- | LOG_HEADER_FORMAT_ENCRYPTED
- : LOG_HEADER_FORMAT_CURRENT)
+ ? LOG_HEADER_FORMAT_ENC_10_4
+ : LOG_HEADER_FORMAT_10_4)
&& log_sys.log.subformat == 2) {
/* No need to add or remove encryption,
upgrade, downgrade, or resize. */
@@ -2214,24 +2110,24 @@ files_checked:
block = buf_page_get(
page_id_t(IBUF_SPACE_ID,
FSP_IBUF_HEADER_PAGE_NO),
- univ_page_size, RW_X_LATCH, &mtr);
+ 0, RW_X_LATCH, &mtr);
fil_block_check_type(*block, FIL_PAGE_TYPE_SYS, &mtr);
/* Already MySQL 3.23.53 initialized
FSP_IBUF_TREE_ROOT_PAGE_NO to
FIL_PAGE_INDEX. No need to reset that one. */
block = buf_page_get(
page_id_t(TRX_SYS_SPACE, TRX_SYS_PAGE_NO),
- univ_page_size, RW_X_LATCH, &mtr);
+ 0, RW_X_LATCH, &mtr);
fil_block_check_type(*block, FIL_PAGE_TYPE_TRX_SYS,
&mtr);
block = buf_page_get(
page_id_t(TRX_SYS_SPACE,
FSP_FIRST_RSEG_PAGE_NO),
- univ_page_size, RW_X_LATCH, &mtr);
+ 0, RW_X_LATCH, &mtr);
fil_block_check_type(*block, FIL_PAGE_TYPE_SYS, &mtr);
block = buf_page_get(
page_id_t(TRX_SYS_SPACE, FSP_DICT_HDR_PAGE_NO),
- univ_page_size, RW_X_LATCH, &mtr);
+ 0, RW_X_LATCH, &mtr);
fil_block_check_type(*block, FIL_PAGE_TYPE_SYS, &mtr);
mtr.commit();
}
@@ -2244,14 +2140,6 @@ files_checked:
trx_rollback_recovered(false);
}
- /* Fix-up truncate of tables in the system tablespace
- if server crashed while truncate was active. The non-
- system tables are done after tablespace discovery. Do
- this now because this procedure assumes that no pages
- have changed since redo recovery. Tablespace discovery
- can do updates to pages in the system tablespace.*/
- err = truncate_t::fixup_tables_in_system_tablespace();
-
if (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) {
/* Open or Create SYS_TABLESPACES and SYS_DATAFILES
so that tablespace names and other metadata can be
@@ -2278,10 +2166,6 @@ files_checked:
dict_check_tablespaces_and_store_max_id();
}
- /* Fix-up truncate of table if server crashed while truncate
- was active. */
- err = truncate_t::fixup_tables_in_non_system_tablespace();
-
if (err != DB_SUCCESS) {
return(srv_init_abort(err));
}
@@ -2476,7 +2360,7 @@ skip_monitors:
Create the dump/load thread only when not running with
--wsrep-recover.
*/
- if (!wsrep_recovery) {
+ if (!get_wsrep_recovery()) {
#endif /* WITH_WSREP */
/* Create the buffer pool dump/load thread */
@@ -2564,9 +2448,7 @@ void srv_shutdown_bg_undo_sources()
/** Shut down InnoDB. */
void innodb_shutdown()
{
- ut_ad(!my_atomic_loadptr_explicit(reinterpret_cast<void**>
- (&srv_running),
- MY_MEMORY_ORDER_RELAXED));
+ ut_ad(!srv_running.load(std::memory_order_relaxed));
ut_ad(!srv_undo_sources);
switch (srv_operation) {
diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc
index b126a2f4ba0..9221a643213 100644
--- a/storage/innobase/sync/sync0arr.cc
+++ b/storage/innobase/sync/sync0arr.cc
@@ -76,8 +76,8 @@ keep the global wait array for the sake of diagnostics and also to avoid
infinite wait The error_monitor thread scans the global wait array to signal
any waiting threads who have missed the signal. */
-typedef SyncArrayMutex::MutexType WaitMutex;
-typedef BlockSyncArrayMutex::MutexType BlockWaitMutex;
+typedef TTASEventMutex<GenericPolicy> WaitMutex;
+typedef TTASEventMutex<BlockMutexPolicy> BlockWaitMutex;
/** The latch types that use the sync array. */
union sync_object_t {
@@ -499,7 +499,7 @@ sync_array_cell_print(
WaitMutex* mutex = cell->latch.mutex;
const WaitMutex::MutexPolicy& policy = mutex->policy();
#ifdef UNIV_DEBUG
- const char* name = policy.get_enter_filename();
+ const char* name = policy.context.get_enter_filename();
if (name == NULL) {
/* The mutex might have been released. */
name = "NULL";
@@ -518,7 +518,7 @@ sync_array_cell_print(
mutex->state()
#ifdef UNIV_DEBUG
,name,
- policy.get_enter_line()
+ policy.context.get_enter_line()
#endif /* UNIV_DEBUG */
);
}
@@ -528,7 +528,7 @@ sync_array_cell_print(
const BlockWaitMutex::MutexPolicy& policy =
mutex->policy();
#ifdef UNIV_DEBUG
- const char* name = policy.get_enter_filename();
+ const char* name = policy.context.get_enter_filename();
if (name == NULL) {
/* The mutex might have been released. */
name = "NULL";
@@ -546,7 +546,7 @@ sync_array_cell_print(
(ulong) mutex->state()
#ifdef UNIV_DEBUG
,name,
- (ulong) policy.get_enter_line()
+ (ulong) policy.context.get_enter_line()
#endif /* UNIV_DEBUG */
);
} else if (type == RW_LOCK_X
@@ -591,8 +591,8 @@ sync_array_cell_print(
#endif
"\n",
rw_lock_get_reader_count(rwlock),
- my_atomic_load32_explicit(&rwlock->waiters, MY_MEMORY_ORDER_RELAXED),
- my_atomic_load32_explicit(&rwlock->lock_word, MY_MEMORY_ORDER_RELAXED),
+ rwlock->waiters.load(std::memory_order_relaxed),
+ rwlock->lock_word.load(std::memory_order_relaxed),
innobase_basename(rwlock->last_x_file_name),
rwlock->last_x_line
#if 0 /* JAN: TODO: FIX LATER */
@@ -738,7 +738,7 @@ sync_array_detect_deadlock(
const WaitMutex::MutexPolicy& policy = mutex->policy();
if (mutex->state() != MUTEX_STATE_UNLOCKED) {
- thread = policy.get_thread_id();
+ thread = policy.context.get_thread_id();
/* Note that mutex->thread_id above may be
also OS_THREAD_ID_UNDEFINED, because the
@@ -753,7 +753,7 @@ sync_array_detect_deadlock(
if (ret) {
const char* name;
- name = policy.get_enter_filename();
+ name = policy.context.get_enter_filename();
if (name == NULL) {
/* The mutex might have been
@@ -765,7 +765,7 @@ sync_array_detect_deadlock(
<< "Mutex " << mutex << " owned by"
" thread " << os_thread_pf(thread)
<< " file " << name << " line "
- << policy.get_enter_line();
+ << policy.context.get_enter_line();
sync_array_cell_print(stderr, cell);
@@ -785,7 +785,7 @@ sync_array_detect_deadlock(
mutex->policy();
if (mutex->state() != MUTEX_STATE_UNLOCKED) {
- thread = policy.get_thread_id();
+ thread = policy.context.get_thread_id();
/* Note that mutex->thread_id above may be
also OS_THREAD_ID_UNDEFINED, because the
@@ -800,7 +800,7 @@ sync_array_detect_deadlock(
if (ret) {
const char* name;
- name = policy.get_enter_filename();
+ name = policy.context.get_enter_filename();
if (name == NULL) {
/* The mutex might have been
@@ -812,7 +812,7 @@ sync_array_detect_deadlock(
<< "Mutex " << mutex << " owned by"
" thread " << os_thread_pf(thread)
<< " file " << name << " line "
- << policy.get_enter_line();
+ << policy.context.get_enter_line();
return(true);
@@ -970,7 +970,7 @@ sync_array_print_long_waits_low(
ulint i;
/* For huge tables, skip the check during CHECK TABLE etc... */
- if (fatal_timeout > SRV_SEMAPHORE_WAIT_EXTENSION) {
+ if (btr_validate_index_running) {
return(false);
}
@@ -1379,9 +1379,9 @@ sync_arr_fill_sys_semphore_waits_table(
//fields[SYS_SEMAPHORE_WAITS_HOLDER_LINE]->set_notnull();
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_READERS], rw_lock_get_reader_count(rwlock)));
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_WAITERS_FLAG],
- my_atomic_load32_explicit(&rwlock->waiters, MY_MEMORY_ORDER_RELAXED)));
+ rwlock->waiters.load(std::memory_order_relaxed)));
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_LOCK_WORD],
- my_atomic_load32_explicit(&rwlock->lock_word, MY_MEMORY_ORDER_RELAXED)));
+ rwlock->lock_word.load(std::memory_order_relaxed)));
OK(field_store_string(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_FILE], innobase_basename(rwlock->last_x_file_name)));
OK(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->store(rwlock->last_x_line, true));
fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->set_notnull();
diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc
index 9304fa66900..0f78136c71c 100644
--- a/storage/innobase/sync/sync0rw.cc
+++ b/storage/innobase/sync/sync0rw.cc
@@ -205,8 +205,8 @@ rw_lock_create_func(
/* If this is the very first time a synchronization object is
created, then the following call initializes the sync system. */
- lock->lock_word = X_LOCK_DECR;
- lock->waiters = 0;
+ lock->lock_word.store(X_LOCK_DECR, std::memory_order_relaxed);
+ lock->waiters.store(0, std::memory_order_relaxed);
lock->sx_recursive = 0;
lock->writer_thread= 0;
@@ -257,8 +257,7 @@ rw_lock_free_func(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
ut_ad(rw_lock_validate(lock));
- ut_a(my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED) == X_LOCK_DECR);
+ ut_a(lock->lock_word.load(std::memory_order_relaxed) == X_LOCK_DECR);
mutex_enter(&rw_lock_list_mutex);
@@ -306,8 +305,7 @@ lock_loop:
/* Spin waiting for the writer field to become free */
HMT_low();
while (i < srv_n_spin_wait_rounds &&
- my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED) <= 0) {
+ lock->lock_word.load(std::memory_order_relaxed) <= 0) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -347,7 +345,7 @@ lock_loop:
/* Set waiters before checking lock_word to ensure wake-up
signal is sent. This may lead to some unnecessary signals. */
- my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
+ lock->waiters.exchange(1, std::memory_order_acquire);
if (rw_lock_s_lock_low(lock, pass, file_name, line)) {
@@ -425,10 +423,10 @@ rw_lock_x_lock_wait_func(
sync_array_t* sync_arr;
int64_t count_os_wait = 0;
- ut_ad(my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= threshold);
+ ut_ad(lock->lock_word.load(std::memory_order_relaxed) <= threshold);
HMT_low();
- while (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) {
+ while (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
ut_delay(srv_spin_wait_delay);
if (i < srv_n_spin_wait_rounds) {
@@ -447,7 +445,7 @@ rw_lock_x_lock_wait_func(
i = 0;
/* Check lock_word to ensure wake-up isn't missed.*/
- if (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) {
+ if (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
++count_os_wait;
@@ -537,18 +535,17 @@ rw_lock_x_lock_low(
file_name, line);
} else {
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
/* At least one X lock by this thread already
exists. Add another. */
if (lock_word == 0
|| lock_word == -X_LOCK_HALF_DECR) {
- my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(X_LOCK_DECR,
+ std::memory_order_relaxed);
} else {
ut_ad(lock_word <= -X_LOCK_DECR);
- my_atomic_add32_explicit(&lock->lock_word, -1,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(1,
+ std::memory_order_relaxed);
}
}
@@ -620,10 +617,10 @@ rw_lock_sx_lock_low(
read and write to the lock_word. */
#ifdef UNIV_DEBUG
- int32_t lock_word =
+ auto lock_word =
#endif
- my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_HALF_DECR,
- MY_MEMORY_ORDER_RELAXED);
+ lock->lock_word.fetch_sub(X_LOCK_HALF_DECR,
+ std::memory_order_relaxed);
ut_ad((lock_word == 0)
|| ((lock_word <= -X_LOCK_DECR)
@@ -691,7 +688,7 @@ lock_loop:
/* Spin waiting for the lock_word to become free */
HMT_low();
while (i < srv_n_spin_wait_rounds
- && my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) {
+ && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -716,7 +713,7 @@ lock_loop:
/* Waiters must be set before checking lock_word, to ensure signal
is sent. This could lead to a few unnecessary wake-up signals. */
- my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
+ lock->waiters.exchange(1, std::memory_order_acquire);
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
sync_array_free_cell(sync_arr, cell);
@@ -792,7 +789,7 @@ lock_loop:
/* Spin waiting for the lock_word to become free */
while (i < srv_n_spin_wait_rounds
- && my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) {
+ && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay);
i++;
}
@@ -816,7 +813,7 @@ lock_loop:
/* Waiters must be set before checking lock_word, to ensure signal
is sent. This could lead to a few unnecessary wake-up signals. */
- my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
+ lock->waiters.exchange(1, std::memory_order_acquire);
if (rw_lock_sx_lock_low(lock, pass, file_name, line)) {
@@ -859,12 +856,10 @@ rw_lock_validate(
ut_ad(lock);
- lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
- MY_MEMORY_ORDER_RELAXED);
+ lock_word = lock->lock_word.load(std::memory_order_relaxed);
ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
- ut_ad(my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters),
- MY_MEMORY_ORDER_RELAXED) < 2);
+ ut_ad(lock->waiters.load(std::memory_order_relaxed) < 2);
ut_ad(lock_word > -(2 * X_LOCK_DECR));
ut_ad(lock_word <= X_LOCK_DECR);
@@ -927,8 +922,7 @@ rw_lock_add_debug_info(
rw_lock_debug_mutex_exit();
if (pass == 0 && lock_type != RW_LOCK_X_WAIT) {
- int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
- MY_MEMORY_ORDER_RELAXED);
+ auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
/* Recursive x while holding SX
(lock_type == RW_LOCK_X && lock_word == -X_LOCK_HALF_DECR)
@@ -1094,11 +1088,11 @@ rw_lock_list_print_info(
count++;
- if (my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), MY_MEMORY_ORDER_RELAXED) != X_LOCK_DECR) {
+ if (lock->lock_word.load(std::memory_order_relaxed) != X_LOCK_DECR) {
fprintf(file, "RW-LOCK: %p ", (void*) lock);
- if (int32_t waiters= my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters), MY_MEMORY_ORDER_RELAXED)) {
+ if (int32_t waiters= lock->waiters.load(std::memory_order_relaxed)) {
fprintf(file, " (%d waiters)\n", waiters);
} else {
putc('\n', file);
@@ -1162,10 +1156,10 @@ rw_lock_debug_print(
fprintf(f, "\n");
}
-/** Print where it was locked from
+/** Print the rw-lock information.
@return the string representation */
std::string
-rw_lock_t::locked_from() const
+rw_lock_t::to_string() const
{
/* Note: For X locks it can be locked form multiple places because
the same thread can call X lock recursively. */
@@ -1175,6 +1169,11 @@ rw_lock_t::locked_from() const
ut_ad(rw_lock_validate(this));
+ msg << "RW-LATCH: "
+ << "thread id " << os_thread_pf(os_thread_get_curr_id())
+ << " addr: " << this
+ << " Locked from: ";
+
rw_lock_debug_mutex_enter();
for (rw_lock_debug_t* info = UT_LIST_GET_FIRST(debug_list);
@@ -1197,19 +1196,4 @@ rw_lock_t::locked_from() const
return(msg.str());
}
-
-/** Print the rw-lock information.
-@return the string representation */
-std::string
-rw_lock_t::to_string() const
-{
- std::ostringstream msg;
-
- msg << "RW-LATCH: "
- << "thread id " << os_thread_pf(os_thread_get_curr_id())
- << " addr: " << this
- << " Locked from: " << locked_from().c_str();
-
- return(msg.str());
-}
#endif /* UNIV_DEBUG */
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index defe2c08507..2b03b18d97c 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -165,8 +165,6 @@ void purge_sys_t::create()
ut_ad(event);
m_paused= 0;
query= purge_graph_build();
- n_submitted= 0;
- n_completed= 0;
next_stored= false;
rseg= NULL;
page_no= 0;
@@ -175,7 +173,8 @@ void purge_sys_t::create()
hdr_offset= 0;
rw_lock_create(trx_purge_latch_key, &latch, SYNC_PURGE_LATCH);
mutex_create(LATCH_ID_PURGE_SYS_PQ, &pq_mutex);
- undo_trunc.create();
+ truncate.current= NULL;
+ truncate.last= NULL;
}
/** Close the purge subsystem on shutdown. */
@@ -184,7 +183,8 @@ void purge_sys_t::close()
ut_ad(this == &purge_sys);
if (!event) return;
- m_enabled= false;
+ ut_ad(!enabled());
+ ut_ad(n_tasks.load(std::memory_order_relaxed) == 0);
trx_t* trx = query->trx;
que_graph_free(query);
ut_ad(!trx->id);
@@ -311,7 +311,7 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr)
rseg->needs_purge = true;
}
- trx_sys.history_insert();
+ trx_sys.rseg_history_len++;
if (undo->state == TRX_UNDO_CACHED) {
UT_LIST_ADD_FIRST(rseg->undo_cached, undo);
@@ -337,7 +337,7 @@ trx_purge_remove_log_hdr(
{
flst_remove(rseg_hdr + TRX_RSEG_HISTORY,
log_hdr + TRX_UNDO_HISTORY_NODE, mtr);
- trx_sys.history_remove();
+ trx_sys.rseg_history_len--;
}
/** Free an undo log segment, and remove the header from the history list.
@@ -507,309 +507,22 @@ func_exit:
goto loop;
}
-/** UNDO log truncate logger. Needed to track state of truncate during crash.
-An auxiliary redo log file undo_<space_id>_trunc.log will created while the
-truncate of the UNDO is in progress. This file is required during recovery
-to complete the truncate. */
-
-namespace undo {
- /** Magic Number to indicate truncate action is complete. */
- static const ib_uint32_t s_magic = 76845412;
-
- /** Populate log file name based on space_id
- @param[in] space_id id of the undo tablespace.
- @return DB_SUCCESS or error code */
- static dberr_t populate_log_file_name(
- ulint space_id,
- char*& log_file_name)
- {
- static const char s_log_prefix[] = "undo_";
- static const char s_log_ext[] = "trunc.log";
-
- ulint log_file_name_sz = strlen(srv_log_group_home_dir)
- + (22 - 1 /* NUL */
- + sizeof s_log_prefix + sizeof s_log_ext);
-
- log_file_name = new (std::nothrow) char[log_file_name_sz];
- if (log_file_name == 0) {
- return(DB_OUT_OF_MEMORY);
- }
-
- memset(log_file_name, 0, log_file_name_sz);
-
- strcpy(log_file_name, srv_log_group_home_dir);
- ulint log_file_name_len = strlen(log_file_name);
-
- if (log_file_name[log_file_name_len - 1]
- != OS_PATH_SEPARATOR) {
-
- log_file_name[log_file_name_len]
- = OS_PATH_SEPARATOR;
- log_file_name_len = strlen(log_file_name);
- }
-
- snprintf(log_file_name + log_file_name_len,
- log_file_name_sz - log_file_name_len,
- "%s" ULINTPF "_%s", s_log_prefix,
- space_id, s_log_ext);
-
- return(DB_SUCCESS);
- }
-
- /** Mark completion of undo truncate action by writing magic number to
- the log file and then removing it from the disk.
- If we are going to remove it from disk then why write magic number ?
- This is to safeguard from unlink (file-system) anomalies that will keep
- the link to the file even after unlink action is successfull and
- ref-count = 0.
- @param[in] space_id id of the undo tablespace to truncate.*/
- void done(
- ulint space_id)
- {
- dberr_t err;
- char* log_file_name;
-
- /* Step-1: Create the log file name using the pre-decided
- prefix/suffix and table id of undo tablepsace to truncate. */
- err = populate_log_file_name(space_id, log_file_name);
- if (err != DB_SUCCESS) {
- return;
- }
-
- /* Step-2: Open log file and write magic number to
- indicate done phase. */
- bool ret;
- os_file_t handle =
- os_file_create_simple_no_error_handling(
- innodb_log_file_key, log_file_name,
- OS_FILE_OPEN, OS_FILE_READ_WRITE,
- srv_read_only_mode, &ret);
-
- if (!ret) {
- os_file_delete(innodb_log_file_key, log_file_name);
- delete[] log_file_name;
- return;
- }
-
- ulint sz = srv_page_size;
- void* buf = ut_zalloc_nokey(sz + srv_page_size);
- if (buf == NULL) {
- os_file_close(handle);
- os_file_delete(innodb_log_file_key, log_file_name);
- delete[] log_file_name;
- return;
- }
-
- byte* log_buf = static_cast<byte*>(
- ut_align(buf, srv_page_size));
-
- mach_write_to_4(log_buf, undo::s_magic);
-
- IORequest request(IORequest::WRITE);
-
- err = os_file_write(
- request, log_file_name, handle, log_buf, 0, sz);
-
- ut_ad(err == DB_SUCCESS);
-
- os_file_flush(handle);
- os_file_close(handle);
-
- ut_free(buf);
- os_file_delete(innodb_log_file_key, log_file_name);
- delete[] log_file_name;
- }
-
- /** Check if TRUNCATE_DDL_LOG file exist.
- @param[in] space_id id of the undo tablespace.
- @return true if exist else false. */
- bool is_log_present(
- ulint space_id)
- {
- dberr_t err;
- char* log_file_name;
-
- /* Step-1: Populate log file name. */
- err = populate_log_file_name(space_id, log_file_name);
- if (err != DB_SUCCESS) {
- return(false);
- }
-
- /* Step-2: Check for existence of the file. */
- bool exist;
- os_file_type_t type;
- os_file_status(log_file_name, &exist, &type);
-
- /* Step-3: If file exists, check it for presence of magic
- number. If found, then delete the file and report file
- doesn't exist as presence of magic number suggest that
- truncate action was complete. */
-
- if (exist) {
- bool ret;
- os_file_t handle =
- os_file_create_simple_no_error_handling(
- innodb_log_file_key, log_file_name,
- OS_FILE_OPEN, OS_FILE_READ_WRITE,
- srv_read_only_mode, &ret);
- if (!ret) {
- os_file_delete(innodb_log_file_key,
- log_file_name);
- delete[] log_file_name;
- return(false);
- }
-
- ulint sz = srv_page_size;
- void* buf = ut_zalloc_nokey(sz + srv_page_size);
- if (buf == NULL) {
- os_file_close(handle);
- os_file_delete(innodb_log_file_key,
- log_file_name);
- delete[] log_file_name;
- return(false);
- }
-
- byte* log_buf = static_cast<byte*>(
- ut_align(buf, srv_page_size));
-
- IORequest request(IORequest::READ);
-
- dberr_t err;
-
- err = os_file_read(request, handle, log_buf, 0, sz);
-
- os_file_close(handle);
-
- if (err != DB_SUCCESS) {
-
- ib::info()
- << "Unable to read '"
- << log_file_name << "' : "
- << ut_strerr(err);
-
- os_file_delete(
- innodb_log_file_key, log_file_name);
-
- ut_free(buf);
-
- delete[] log_file_name;
-
- return(false);
- }
-
- ulint magic_no = mach_read_from_4(log_buf);
-
- ut_free(buf);
-
- if (magic_no == undo::s_magic) {
- /* Found magic number. */
- os_file_delete(innodb_log_file_key,
- log_file_name);
- delete[] log_file_name;
- return(false);
- }
- }
-
- delete[] log_file_name;
-
- return(exist);
- }
-};
-
-/** Iterate over all the UNDO tablespaces and check if any of the UNDO
-tablespace qualifies for TRUNCATE (size > threshold).
-@param[in,out] undo_trunc undo truncate tracker */
-static
-void
-trx_purge_mark_undo_for_truncate(
- undo::Truncate* undo_trunc)
-{
- /* Step-1: If UNDO Tablespace
- - already marked for truncate (OR)
- - truncate disabled
- return immediately else search for qualifying tablespace. */
- if (undo_trunc->is_marked() || !srv_undo_log_truncate) {
- return;
- }
-
- /* Step-2: Validation/Qualification checks
- a. At-least 2 UNDO tablespaces so even if one UNDO tablespace
- is being truncated server can continue to operate.
- b. At-least 2 persistent UNDO logs (besides the default rseg-0)
- b. At-least 1 UNDO tablespace size > threshold. */
- if (srv_undo_tablespaces_active < 2 || srv_undo_logs < 3) {
- return;
- }
-
- /* Avoid bias selection and so start the scan from immediate next
- of last selected UNDO tablespace for truncate. */
- ulint space_id = undo_trunc->get_scan_start();
-
- for (ulint i = 1; i <= srv_undo_tablespaces_active; i++) {
-
- if (fil_space_get_size(space_id)
- > (srv_max_undo_log_size >> srv_page_size_shift)) {
- /* Tablespace qualifies for truncate. */
- undo_trunc->mark(space_id);
- undo::Truncate::add_space_to_trunc_list(space_id);
- break;
- }
-
- space_id = ((space_id + 1) % (srv_undo_tablespaces_active + 1));
- if (space_id == 0) {
- /* Note: UNDO tablespace ids starts from 1. */
- ++space_id;
- }
- }
-
- /* Couldn't make any selection. */
- if (!undo_trunc->is_marked()) {
- return;
- }
-
- DBUG_LOG("undo",
- "marking for truncate UNDO tablespace "
- << undo_trunc->get_marked_space_id());
-
- /* Step-3: Iterate over all the rsegs of selected UNDO tablespace
- and mark them temporarily unavailable for allocation.*/
- for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
- if (trx_rseg_t* rseg = trx_sys.rseg_array[i]) {
- ut_ad(rseg->is_persistent());
- if (rseg->space->id
- == undo_trunc->get_marked_space_id()) {
-
- /* Once set this rseg will not be allocated
- to new booting transaction but we will wait
- for existing active transaction to finish. */
- rseg->skip_allocation = true;
- undo_trunc->add_rseg_to_trunc(rseg);
- }
- }
- }
-}
-
-undo::undo_spaces_t undo::Truncate::s_spaces_to_truncate;
-
/** Cleanse purge queue to remove the rseg that reside in undo-tablespace
marked for truncate.
-@param[in,out] undo_trunc undo truncate tracker */
-static
-void
-trx_purge_cleanse_purge_queue(
- undo::Truncate* undo_trunc)
+@param[in] space undo tablespace being truncated */
+static void trx_purge_cleanse_purge_queue(const fil_space_t& space)
{
- mutex_enter(&purge_sys.pq_mutex);
typedef std::vector<TrxUndoRsegs> purge_elem_list_t;
purge_elem_list_t purge_elem_list;
+ mutex_enter(&purge_sys.pq_mutex);
+
/* Remove rseg instances that are in the purge queue before we start
truncate of corresponding UNDO truncate. */
while (!purge_sys.purge_queue.empty()) {
purge_elem_list.push_back(purge_sys.purge_queue.top());
purge_sys.purge_queue.pop();
}
- ut_ad(purge_sys.purge_queue.empty());
for (purge_elem_list_t::iterator it = purge_elem_list.begin();
it != purge_elem_list.end();
@@ -818,9 +531,7 @@ trx_purge_cleanse_purge_queue(
for (TrxUndoRsegs::iterator it2 = it->begin();
it2 != it->end();
++it2) {
-
- if ((*it2)->space->id
- == undo_trunc->get_marked_space_id()) {
+ if ((*it2)->space == &space) {
it->erase(it2);
break;
}
@@ -830,278 +541,285 @@ trx_purge_cleanse_purge_queue(
purge_sys.purge_queue.push(*it);
}
}
+
mutex_exit(&purge_sys.pq_mutex);
}
-/** Iterate over selected UNDO tablespace and check if all the rsegs
-that resides in the tablespace are free.
-@param[in] limit truncate_limit
-@param[in,out] undo_trunc undo truncate tracker */
-static
-void
-trx_purge_initiate_truncate(
- const purge_sys_t::iterator& limit,
- undo::Truncate* undo_trunc)
+/**
+Removes unnecessary history data from rollback segments. NOTE that when this
+function is called, the caller must not have any latches on undo log pages!
+*/
+static void trx_purge_truncate_history()
{
- /* Step-1: Early check to findout if any of the the UNDO tablespace
- is marked for truncate. */
- if (!undo_trunc->is_marked()) {
- /* No tablespace marked for truncate yet. */
- return;
- }
-
- /* Step-2: Scan over each rseg and ensure that it doesn't hold any
- active undo records. */
- bool all_free = true;
-
- for (ulint i = 0; i < undo_trunc->rsegs_size() && all_free; ++i) {
-
- trx_rseg_t* rseg = undo_trunc->get_ith_rseg(i);
+ ut_ad(purge_sys.head <= purge_sys.tail);
+ purge_sys_t::iterator& head = purge_sys.head.commit
+ ? purge_sys.head : purge_sys.tail;
- mutex_enter(&rseg->mutex);
+ if (head.trx_no() >= purge_sys.view.low_limit_no()) {
+ /* This is sometimes necessary. TODO: find out why. */
+ head.reset_trx_no(purge_sys.view.low_limit_no());
+ head.undo_no = 0;
+ }
- if (rseg->trx_ref_count > 0) {
- /* This rseg is still being held by an active
- transaction. */
- all_free = false;
- mutex_exit(&rseg->mutex);
- continue;
+ for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
+ if (trx_rseg_t* rseg = trx_sys.rseg_array[i]) {
+ ut_ad(rseg->id == i);
+ trx_purge_truncate_rseg_history(*rseg, head);
}
+ }
- ut_ad(rseg->trx_ref_count == 0);
- ut_ad(rseg->skip_allocation);
-
- ulint size_of_rsegs = rseg->curr_size;
-
- if (size_of_rsegs == 1) {
- mutex_exit(&rseg->mutex);
- continue;
- } else {
-
- /* There could be cached undo segment. Check if records
- in these segments can be purged. Normal purge history
- will not touch these cached segment. */
- ulint cached_undo_size = 0;
+ if (srv_undo_tablespaces_active < 2) {
+ return;
+ }
- for (trx_undo_t* undo =
- UT_LIST_GET_FIRST(rseg->undo_cached);
- undo != NULL && all_free;
- undo = UT_LIST_GET_NEXT(undo_list, undo)) {
+ while (srv_undo_log_truncate && srv_undo_logs >= 3) {
+ if (!purge_sys.truncate.current) {
+ const ulint threshold = ulint(srv_max_undo_log_size
+ >> srv_page_size_shift);
+ for (ulint i = purge_sys.truncate.last
+ ? purge_sys.truncate.last->id
+ - srv_undo_space_id_start
+ : 0, j = i;; ) {
+ ulint space_id = srv_undo_space_id_start + i;
+ ut_ad(srv_is_undo_tablespace(space_id));
+
+ if (fil_space_get_size(space_id)
+ > threshold) {
+ purge_sys.truncate.current
+ = fil_space_get(space_id);
+ break;
+ }
- if (limit.trx_no() < undo->trx_id) {
- all_free = false;
- } else {
- cached_undo_size += undo->size;
+ ++i;
+ i %= srv_undo_tablespaces_active;
+ if (i == j) {
+ break;
}
}
+ }
- ut_ad(size_of_rsegs >= (cached_undo_size + 1));
+ if (!purge_sys.truncate.current) {
+ return;
+ }
- if (size_of_rsegs > (cached_undo_size + 1)) {
- /* There are pages besides cached pages that
- still hold active data. */
- all_free = false;
+ const fil_space_t& space = *purge_sys.truncate.current;
+ /* Undo tablespace always are a single file. */
+ ut_a(UT_LIST_GET_LEN(space.chain) == 1);
+ fil_node_t* file = UT_LIST_GET_FIRST(space.chain);
+ /* The undo tablespace files are never closed. */
+ ut_ad(file->is_open());
+
+ DBUG_LOG("undo", "marking for truncate: " << file->name);
+
+ for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
+ if (trx_rseg_t* rseg = trx_sys.rseg_array[i]) {
+ ut_ad(rseg->is_persistent());
+ if (rseg->space == &space) {
+ /* Once set, this rseg will
+ not be allocated to subsequent
+ transactions, but we will wait
+ for existing active
+ transactions to finish. */
+ rseg->skip_allocation = true;
+ }
}
}
- mutex_exit(&rseg->mutex);
- }
-
- if (!all_free) {
- /* rseg still holds active data.*/
- return;
- }
-
-
- /* Step-3: Start the actual truncate.
- a. Remove rseg instance if added to purge queue before we
- initiate truncate.
- b. Execute actual truncate */
-
- const ulint space_id = undo_trunc->get_marked_space_id();
-
- ib::info() << "Truncating UNDO tablespace " << space_id;
-
- trx_purge_cleanse_purge_queue(undo_trunc);
-
- ut_a(srv_is_undo_tablespace(space_id));
-
- fil_space_t* space = fil_space_get(space_id);
-
- if (!space) {
-not_found:
- ib::error() << "Failed to find UNDO tablespace " << space_id;
- return;
- }
-
- /* Flush all to-be-discarded pages of the tablespace.
-
- During truncation, we do not want any writes to the
- to-be-discarded area, because we must set the space->size
- early in order to have deterministic page allocation.
-
- If a log checkpoint was completed at LSN earlier than our
- mini-transaction commit and the server was killed, then
- discarding the to-be-trimmed pages without flushing would
- break crash recovery. So, we cannot avoid the write. */
- {
- FlushObserver observer(
- space,
- UT_LIST_GET_FIRST(purge_sys.query->thrs)->graph->trx,
- NULL);
- buf_LRU_flush_or_remove_pages(space_id, &observer);
- }
-
- log_free_check();
+ for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
+ trx_rseg_t* rseg = trx_sys.rseg_array[i];
+ if (!rseg || rseg->space != &space) {
+ continue;
+ }
+ mutex_enter(&rseg->mutex);
+ ut_ad(rseg->skip_allocation);
+ if (rseg->trx_ref_count) {
+not_free:
+ mutex_exit(&rseg->mutex);
+ return;
+ }
- /* Adjust the tablespace metadata. */
- space = fil_truncate_prepare(space_id);
+ if (rseg->curr_size != 1) {
+ /* Check if all segments are
+ cached and safe to remove. */
+ ulint cached = 0;
+
+ for (trx_undo_t* undo = UT_LIST_GET_FIRST(
+ rseg->undo_cached);
+ undo;
+ undo = UT_LIST_GET_NEXT(undo_list,
+ undo)) {
+ if (head.trx_no() < undo->trx_id) {
+ goto not_free;
+ } else {
+ cached += undo->size;
+ }
+ }
- if (!space) {
- goto not_found;
- }
+ ut_ad(rseg->curr_size > cached);
- /* Undo tablespace always are a single file. */
- ut_a(UT_LIST_GET_LEN(space->chain) == 1);
- fil_node_t* file = UT_LIST_GET_FIRST(space->chain);
- /* The undo tablespace files are never closed. */
- ut_ad(file->is_open());
+ if (rseg->curr_size > cached + 1) {
+ goto not_free;
+ }
+ }
- /* Re-initialize tablespace, in a single mini-transaction. */
- mtr_t mtr;
- const ulint size = SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
- mtr.start();
- mtr_x_lock(&space->latch, &mtr);
- fil_truncate_log(space, size, &mtr);
- fsp_header_init(space, size, &mtr);
- mutex_enter(&fil_system.mutex);
- space->size = file->size = size;
- mutex_exit(&fil_system.mutex);
-
- buf_block_t* sys_header = trx_sysf_get(&mtr);
-
- for (ulint i = 0; i < undo_trunc->rsegs_size(); ++i) {
- trx_rseg_t* rseg = undo_trunc->get_ith_rseg(i);
- buf_block_t* rblock = trx_rseg_header_create(
- space, rseg->id, sys_header, &mtr);
- ut_ad(rblock);
- rseg->page_no = rblock ? rblock->page.id.page_no() : FIL_NULL;
-
- /* Before re-initialization ensure that we free the existing
- structure. There can't be any active transactions. */
- ut_a(UT_LIST_GET_LEN(rseg->undo_list) == 0);
- ut_a(UT_LIST_GET_LEN(rseg->old_insert_list) == 0);
-
- trx_undo_t* next_undo;
-
- for (trx_undo_t* undo = UT_LIST_GET_FIRST(rseg->undo_cached);
- undo != NULL;
- undo = next_undo) {
-
- next_undo = UT_LIST_GET_NEXT(undo_list, undo);
- UT_LIST_REMOVE(rseg->undo_cached, undo);
- MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
- ut_free(undo);
+ mutex_exit(&rseg->mutex);
}
- UT_LIST_INIT(rseg->undo_list, &trx_undo_t::undo_list);
- UT_LIST_INIT(rseg->undo_cached, &trx_undo_t::undo_list);
- UT_LIST_INIT(rseg->old_insert_list, &trx_undo_t::undo_list);
-
- /* These were written by trx_rseg_header_create(). */
- ut_ad(!mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
- + rblock->frame));
- ut_ad(!mach_read_from_4(TRX_RSEG + TRX_RSEG_HISTORY_SIZE
- + rblock->frame));
-
- /* Initialize the undo log lists according to the rseg header */
- rseg->curr_size = 1;
- rseg->trx_ref_count = 0;
- rseg->last_page_no = FIL_NULL;
- rseg->last_offset = 0;
- rseg->last_commit = 0;
- rseg->needs_purge = false;
- }
+ ib::info() << "Truncating " << file->name;
+ trx_purge_cleanse_purge_queue(space);
+
+ /* Flush all to-be-discarded pages of the tablespace.
+
+ During truncation, we do not want any writes to the
+ to-be-discarded area, because we must set the space.size
+ early in order to have deterministic page allocation.
+
+ If a log checkpoint was completed at LSN earlier than our
+ mini-transaction commit and the server was killed, then
+ discarding the to-be-trimmed pages without flushing would
+ break crash recovery. So, we cannot avoid the write. */
+ {
+ FlushObserver observer(
+ purge_sys.truncate.current,
+ UT_LIST_GET_FIRST(purge_sys.query->thrs)
+ ->graph->trx,
+ NULL);
+ buf_LRU_flush_or_remove_pages(space.id, &observer);
+ }
- mtr.commit();
- /* Write-ahead the redo log record. */
- log_write_up_to(mtr.commit_lsn(), true);
-
- /* Trim the file size. */
- os_file_truncate(file->name, file->handle,
- os_offset_t(size) << srv_page_size_shift, true);
-
- /* This is only executed by the srv_purge_coordinator_thread. */
- export_vars.innodb_undo_truncations++;
-
- /* TODO: PUNCH_HOLE the garbage (with write-ahead logging) */
-
- mutex_enter(&fil_system.mutex);
- ut_ad(space->stop_new_ops);
- ut_ad(space->is_being_truncated);
- space->stop_new_ops = false;
- space->is_being_truncated = false;
- mutex_exit(&fil_system.mutex);
-
- if (purge_sys.rseg != NULL
- && purge_sys.rseg->last_page_no == FIL_NULL) {
- /* If purge_sys.rseg is pointing to rseg that was recently
- truncated then move to next rseg element.
- Note: Ideally purge_sys.rseg should be NULL because purge
- should complete processing of all the records but there is
- purge_batch_size that can force the purge loop to exit before
- all the records are purged and in this case purge_sys.rseg
- could point to a valid rseg waiting for next purge cycle. */
- purge_sys.next_stored = false;
- purge_sys.rseg = NULL;
- }
+ log_free_check();
- DBUG_EXECUTE_IF("ib_undo_trunc",
- ib::info() << "ib_undo_trunc";
- log_write_up_to(LSN_MAX, true);
- DBUG_SUICIDE(););
+ /* Adjust the tablespace metadata. */
+ if (!fil_truncate_prepare(space.id)) {
+ ib::error() << "Failed to find UNDO tablespace "
+ << file->name;
+ return;
+ }
- /* Completed truncate. Now it is safe to re-use the tablespace. */
- for (ulint i = 0; i < undo_trunc->rsegs_size(); ++i) {
- trx_rseg_t* rseg = undo_trunc->get_ith_rseg(i);
- rseg->skip_allocation = false;
- }
+ /* Re-initialize tablespace, in a single mini-transaction. */
+ mtr_t mtr;
+ const ulint size = SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
+ mtr.start();
+ mtr_x_lock(&purge_sys.truncate.current->latch, &mtr);
+ fil_truncate_log(purge_sys.truncate.current, size, &mtr);
+ fsp_header_init(purge_sys.truncate.current, size, &mtr);
+ mutex_enter(&fil_system.mutex);
+ purge_sys.truncate.current->size = file->size = size;
+ mutex_exit(&fil_system.mutex);
+
+ buf_block_t* sys_header = trx_sysf_get(&mtr);
+
+ for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
+ trx_rseg_t* rseg = trx_sys.rseg_array[i];
+ if (!rseg || rseg->space != &space) {
+ continue;
+ }
- ib::info() << "Truncated UNDO tablespace " << space_id;
+ ut_ad(rseg->is_persistent());
+ ut_d(const ulint old_page = rseg->page_no);
+
+ buf_block_t* rblock = trx_rseg_header_create(
+ purge_sys.truncate.current,
+ rseg->id, sys_header, &mtr);
+ ut_ad(rblock);
+ rseg->page_no = rblock
+ ? rblock->page.id.page_no() : FIL_NULL;
+ ut_ad(old_page == rseg->page_no);
+
+ /* Before re-initialization ensure that we
+ free the existing structure. There can't be
+ any active transactions. */
+ ut_a(UT_LIST_GET_LEN(rseg->undo_list) == 0);
+ ut_a(UT_LIST_GET_LEN(rseg->old_insert_list) == 0);
+
+ trx_undo_t* next_undo;
+
+ for (trx_undo_t* undo = UT_LIST_GET_FIRST(
+ rseg->undo_cached);
+ undo; undo = next_undo) {
+
+ next_undo = UT_LIST_GET_NEXT(undo_list, undo);
+ UT_LIST_REMOVE(rseg->undo_cached, undo);
+ MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
+ ut_free(undo);
+ }
- undo_trunc->reset();
- undo::Truncate::clear_trunc_list();
-}
+ UT_LIST_INIT(rseg->undo_list,
+ &trx_undo_t::undo_list);
+ UT_LIST_INIT(rseg->undo_cached,
+ &trx_undo_t::undo_list);
+ UT_LIST_INIT(rseg->old_insert_list,
+ &trx_undo_t::undo_list);
+
+ /* These were written by trx_rseg_header_create(). */
+ ut_ad(!mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT
+ + rblock->frame));
+ ut_ad(!mach_read_from_4(TRX_RSEG + TRX_RSEG_HISTORY_SIZE
+ + rblock->frame));
+
+ /* Initialize the undo log lists according to
+ the rseg header */
+ rseg->curr_size = 1;
+ rseg->trx_ref_count = 0;
+ rseg->last_page_no = FIL_NULL;
+ rseg->last_offset = 0;
+ rseg->last_commit = 0;
+ rseg->needs_purge = false;
+ }
-/**
-Removes unnecessary history data from rollback segments. NOTE that when this
-function is called, the caller must not have any latches on undo log pages!
-*/
-static void trx_purge_truncate_history()
-{
- ut_ad(purge_sys.head <= purge_sys.tail);
- purge_sys_t::iterator& head = purge_sys.head.commit
- ? purge_sys.head : purge_sys.tail;
+ mtr.commit();
+ /* Write-ahead the redo log record. */
+ log_write_up_to(mtr.commit_lsn(), true);
+
+ /* Trim the file size. */
+ os_file_truncate(file->name, file->handle,
+ os_offset_t(size) << srv_page_size_shift,
+ true);
+
+ /* This is only executed by srv_purge_coordinator_thread. */
+ export_vars.innodb_undo_truncations++;
+
+ /* TODO: PUNCH_HOLE the garbage (with write-ahead logging) */
+ mutex_enter(&fil_system.mutex);
+ ut_ad(&space == purge_sys.truncate.current);
+ ut_ad(space.stop_new_ops);
+ ut_ad(space.is_being_truncated);
+ purge_sys.truncate.current->stop_new_ops = false;
+ purge_sys.truncate.current->is_being_truncated = false;
+ mutex_exit(&fil_system.mutex);
+
+ if (purge_sys.rseg != NULL
+ && purge_sys.rseg->last_page_no == FIL_NULL) {
+ /* If purge_sys.rseg is pointing to rseg that
+ was recently truncated then move to next rseg
+ element. Note: Ideally purge_sys.rseg should
+ be NULL because purge should complete
+ processing of all the records but there is
+ purge_batch_size that can force the purge loop
+ to exit before all the records are purged and
+ in this case purge_sys.rseg could point to a
+ valid rseg waiting for next purge cycle. */
+ purge_sys.next_stored = false;
+ purge_sys.rseg = NULL;
+ }
- if (head.trx_no() >= purge_sys.view.low_limit_no()) {
- /* This is sometimes necessary. TODO: find out why. */
- head.reset_trx_no(purge_sys.view.low_limit_no());
- head.undo_no = 0;
- }
+ DBUG_EXECUTE_IF("ib_undo_trunc",
+ ib::info() << "ib_undo_trunc";
+ log_write_up_to(LSN_MAX, true);
+ DBUG_SUICIDE(););
- for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
- if (trx_rseg_t* rseg = trx_sys.rseg_array[i]) {
- ut_ad(rseg->id == i);
- trx_purge_truncate_rseg_history(*rseg, head);
+ for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
+ if (trx_rseg_t* rseg = trx_sys.rseg_array[i]) {
+ ut_ad(rseg->is_persistent());
+ if (rseg->space == &space) {
+ rseg->skip_allocation = false;
+ }
+ }
}
- }
- /* UNDO tablespace truncate. We will try to truncate as much as we
- can (greedy approach). This will ensure when the server is idle we
- try and truncate all the UNDO tablespaces. */
- for (ulint i = srv_undo_tablespaces_active; i--; ) {
- trx_purge_mark_undo_for_truncate(&purge_sys.undo_trunc);
- trx_purge_initiate_truncate(head, &purge_sys.undo_trunc);
+ ib::info() << "Truncated " << file->name;
+ purge_sys.truncate.last = purge_sys.truncate.current;
+ purge_sys.truncate.current = NULL;
}
}
@@ -1515,7 +1233,7 @@ trx_purge_dml_delay(void)
if (srv_max_purge_lag > 0) {
float ratio;
- ratio = float(trx_sys.history_size()) / srv_max_purge_lag;
+ ratio = float(trx_sys.rseg_history_len) / srv_max_purge_lag;
if (ratio > 1.0) {
/* If the history list length exceeds the
@@ -1541,8 +1259,7 @@ void
trx_purge_wait_for_workers_to_complete()
{
/* Ensure that the work queue empties out. */
- while (my_atomic_loadlint(&purge_sys.n_completed)
- != purge_sys.n_submitted) {
+ while (purge_sys.n_tasks.load(std::memory_order_acquire)) {
if (srv_get_task_queue_length() > 0) {
srv_release_threads(SRV_WORKER, 1);
@@ -1573,9 +1290,8 @@ trx_purge(
srv_dml_needed_delay = trx_purge_dml_delay();
- /* The number of tasks submitted should be completed. */
- ut_a(purge_sys.n_submitted
- == my_atomic_loadlint(&purge_sys.n_completed));
+ /* All submitted tasks should be completed. */
+ ut_ad(purge_sys.n_tasks.load(std::memory_order_relaxed) == 0);
rw_lock_x_lock(&purge_sys.latch);
trx_sys.clone_oldest_view();
@@ -1589,7 +1305,7 @@ trx_purge(
/* Fetch the UNDO recs that need to be purged. */
n_pages_handled = trx_purge_attach_undo_recs(n_purge_threads);
- purge_sys.n_submitted += n_purge_threads;
+ purge_sys.n_tasks.store(n_purge_threads - 1, std::memory_order_relaxed);
/* Submit tasks to workers queue if using multi-threaded purge. */
for (ulint i = n_purge_threads; --i; ) {
@@ -1602,14 +1318,9 @@ trx_purge(
que_run_threads(thr);
- my_atomic_addlint(&purge_sys.n_completed, 1);
-
- if (n_purge_threads > 1) {
- trx_purge_wait_for_workers_to_complete();
- }
+ trx_purge_wait_for_workers_to_complete();
- ut_a(purge_sys.n_submitted
- == my_atomic_loadlint(&purge_sys.n_completed));
+ ut_ad(purge_sys.n_tasks.load(std::memory_order_relaxed) == 0);
if (truncate) {
trx_purge_truncate_history();
@@ -1626,7 +1337,7 @@ void purge_sys_t::stop()
{
rw_lock_x_lock(&latch);
- if (!enabled_latched())
+ if (!enabled())
{
/* Shutdown must have been initiated during FLUSH TABLES FOR EXPORT. */
ut_ad(!srv_undo_sources);
@@ -1636,7 +1347,7 @@ void purge_sys_t::stop()
ut_ad(srv_n_purge_threads > 0);
- if (0 == my_atomic_add32_explicit(&m_paused, 1, MY_MEMORY_ORDER_RELAXED))
+ if (m_paused++ == 0)
{
/* We need to wakeup the purge thread in case it is suspended, so
that it can acknowledge the state change. */
@@ -1670,8 +1381,7 @@ void purge_sys_t::resume()
return;
}
- int32_t paused= my_atomic_add32_explicit(&m_paused, -1,
- MY_MEMORY_ORDER_RELAXED);
+ int32_t paused= m_paused--;
ut_a(paused);
if (paused == 1)
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 4af601441ab..9fdbe0a7aeb 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -39,9 +39,12 @@ Created 3/26/1996 Heikki Tuuri
#include "row0row.h"
#include "row0mysql.h"
-/** The search tuple corresponding to TRX_UNDO_INSERT_METADATA */
+/** The search tuple corresponding to TRX_UNDO_INSERT_METADATA. */
const dtuple_t trx_undo_metadata = {
- REC_INFO_METADATA, 0, 0,
+ /* This also works for REC_INFO_METADATA_ALTER, because the
+ delete-mark (REC_INFO_DELETED_FLAG) is ignored when searching. */
+ REC_INFO_METADATA_ADD,
+ 0, 0,
NULL, 0, NULL,
UT_LIST_NODE_T(dtuple_t)()
#ifdef UNIV_DEBUG
@@ -504,7 +507,7 @@ trx_undo_page_report_insert(
/* Store then the fields required to uniquely determine the record
to be inserted in the clustered index */
if (UNIV_UNLIKELY(clust_entry->info_bits != 0)) {
- ut_ad(clust_entry->info_bits == REC_INFO_METADATA);
+ ut_ad(clust_entry->is_metadata());
ut_ad(index->is_instant());
ut_ad(undo_block->frame[first_free + 2]
== TRX_UNDO_INSERT_REC);
@@ -714,7 +717,7 @@ trx_undo_rec_skip_row_ref(
log of an update or delete marking of a clustered index record.
@param[out] ext_buf buffer to hold the prefix data and BLOB pointer
@param[in] prefix_len prefix size to store in the undo log
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] field an externally stored column
@param[in,out] len input: length of field; output: used length of
ext_buf
@@ -724,13 +727,13 @@ byte*
trx_undo_page_fetch_ext(
byte* ext_buf,
ulint prefix_len,
- const page_size_t& page_size,
+ ulint zip_size,
const byte* field,
ulint* len)
{
/* Fetch the BLOB. */
ulint ext_len = btr_copy_externally_stored_field_prefix(
- ext_buf, prefix_len, page_size, field, *len);
+ ext_buf, prefix_len, zip_size, field, *len);
/* BLOBs should always be nonempty. */
ut_a(ext_len);
/* Append the BLOB pointer to the prefix. */
@@ -748,7 +751,7 @@ available
size, or NULL when should not fetch a longer
prefix
@param[in] prefix_len prefix size to store in the undo log
-@param[in] page_size page size
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in,out] field the locally stored part of the externally
stored column
@param[in,out] len length of field, in bytes
@@ -761,7 +764,7 @@ trx_undo_page_report_modify_ext(
byte* ptr,
byte* ext_buf,
ulint prefix_len,
- const page_size_t& page_size,
+ ulint zip_size,
const byte** field,
ulint* len,
spatial_status_t spatial_status)
@@ -803,7 +806,7 @@ trx_undo_page_report_modify_ext(
ptr += mach_write_compressed(ptr, *len);
*field = trx_undo_page_fetch_ext(ext_buf, prefix_len,
- page_size, *field, len);
+ zip_size, *field, len);
ptr += mach_write_compressed(ptr, *len + spatial_len);
} else {
@@ -816,7 +819,7 @@ trx_undo_page_report_modify_ext(
/** Get MBR from a Geometry column stored externally
@param[out] mbr MBR to fill
-@param[in] pagesize table pagesize
+@param[in] zip_size ROW_FORMAT=COMPRESSED page size, or 0
@param[in] field field contain the geometry data
@param[in,out] len length of field, in bytes
*/
@@ -824,17 +827,17 @@ static
void
trx_undo_get_mbr_from_ext(
/*======================*/
- double* mbr,
- const page_size_t& page_size,
- const byte* field,
- ulint* len)
+ double* mbr,
+ ulint zip_size,
+ const byte* field,
+ ulint* len)
{
uchar* dptr = NULL;
ulint dlen;
mem_heap_t* heap = mem_heap_create(100);
dptr = btr_copy_externally_stored_field(
- &dlen, field, page_size, *len, heap);
+ &dlen, field, zip_size, *len, heap);
if (dlen <= GEO_DATA_HEADER_SIZE) {
for (uint i = 0; i < SPDIMS; ++i) {
@@ -918,9 +921,9 @@ trx_undo_page_report_modify(
/* Store first some general parameters to the undo log */
if (!update) {
- ut_ad(!rec_get_deleted_flag(rec, dict_table_is_comp(table)));
+ ut_ad(!rec_is_delete_marked(rec, dict_table_is_comp(table)));
type_cmpl = TRX_UNDO_DEL_MARK_REC;
- } else if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
+ } else if (rec_is_delete_marked(rec, dict_table_is_comp(table))) {
/* In delete-marked records, DB_TRX_ID must
always refer to an existing update_undo log record. */
ut_ad(row_get_rec_trx_id(rec, index, offsets));
@@ -949,9 +952,7 @@ trx_undo_page_report_modify(
*ptr++ = (byte) rec_get_info_bits(rec, dict_table_is_comp(table));
/* Store the values of the system columns */
- field = rec_get_nth_field(rec, offsets,
- dict_index_get_sys_col_pos(
- index, DATA_TRX_ID), &flen);
+ field = rec_get_nth_field(rec, offsets, index->db_trx_id(), &flen);
ut_ad(flen == DATA_TRX_ID_LEN);
trx_id = trx_read_trx_id(field);
@@ -965,9 +966,7 @@ trx_undo_page_report_modify(
}
ptr += mach_u64_write_compressed(ptr, trx_id);
- field = rec_get_nth_field(rec, offsets,
- dict_index_get_sys_col_pos(
- index, DATA_ROLL_PTR), &flen);
+ field = rec_get_nth_field(rec, offsets, index->db_roll_ptr(), &flen);
ut_ad(flen == DATA_ROLL_PTR_LEN);
ut_ad(memcmp(field, field_ref_zero, DATA_ROLL_PTR_LEN));
@@ -1034,20 +1033,35 @@ trx_undo_page_report_modify(
}
}
+ i = 0;
+
+ if (UNIV_UNLIKELY(update->is_alter_metadata())) {
+ ut_ad(update->n_fields >= 1);
+ ut_ad(!upd_fld_is_virtual_col(&update->fields[0]));
+ ut_ad(update->fields[0].field_no
+ == index->first_user_field());
+ ut_ad(!dfield_is_ext(&update->fields[0].new_val));
+ ut_ad(!dfield_is_null(&update->fields[0].new_val));
+ /* The instant ADD COLUMN metadata record does not
+ contain the BLOB. Do not write anything for it. */
+ i = !rec_is_alter_metadata(rec, *index);
+ n_updated -= i;
+ }
+
ptr += mach_write_compressed(ptr, n_updated);
- for (i = 0; i < upd_get_n_fields(update); i++) {
+ for (; i < upd_get_n_fields(update); i++) {
+ if (trx_undo_left(undo_block, ptr) < 5) {
+ return 0;
+ }
+
upd_field_t* fld = upd_get_nth_field(update, i);
bool is_virtual = upd_fld_is_virtual_col(fld);
ulint max_v_log_len = 0;
- ulint pos = fld->field_no;
-
- /* Write field number to undo log */
- if (trx_undo_left(undo_block, ptr) < 5) {
- return(0);
- }
+ ulint pos = fld->field_no;
+ const dict_col_t* col = NULL;
if (is_virtual) {
/* Skip the non-indexed column, during
@@ -1060,13 +1074,13 @@ trx_undo_page_report_modify(
/* add REC_MAX_N_FIELDS to mark this
is a virtual col */
- pos += REC_MAX_N_FIELDS;
- }
+ ptr += mach_write_compressed(
+ ptr, pos + REC_MAX_N_FIELDS);
- ptr += mach_write_compressed(ptr, pos);
+ if (trx_undo_left(undo_block, ptr) < 15) {
+ return 0;
+ }
- /* Save the old value of field */
- if (is_virtual) {
ut_ad(fld->field_no < table->n_v_def);
ptr = trx_undo_log_v_idx(undo_block, table,
@@ -1091,36 +1105,87 @@ trx_undo_page_report_modify(
flen = ut_min(
flen, max_v_log_len);
}
+
+ goto store_len;
+ }
+
+ if (UNIV_UNLIKELY(update->is_metadata())) {
+ ut_ad(pos >= index->first_user_field());
+ ut_ad(rec_is_metadata(rec, *index));
+
+ if (rec_is_alter_metadata(rec, *index)) {
+ ut_ad(update->is_alter_metadata());
+
+ field = rec_offs_n_fields(offsets)
+ > pos
+ && !rec_offs_nth_default(
+ offsets, pos)
+ ? rec_get_nth_field(
+ rec, offsets,
+ pos, &flen)
+ : index->instant_field_value(
+ pos - 1, &flen);
+
+ if (pos == index->first_user_field()) {
+ ut_ad(rec_offs_nth_extern(
+ offsets, pos));
+ ut_ad(flen == FIELD_REF_SIZE);
+ goto write_field;
+ }
+ col = dict_index_get_nth_col(index,
+ pos - 1);
+ } else if (!update->is_alter_metadata()) {
+ goto get_field;
+ } else {
+ /* We are converting an ADD COLUMN
+ metadata record to an ALTER TABLE
+ metadata record, with BLOB. Subtract
+ the missing metadata BLOB field. */
+ ut_ad(pos > index->first_user_field());
+ --pos;
+ goto get_field;
+ }
} else {
+get_field:
+ col = dict_index_get_nth_col(index, pos);
field = rec_get_nth_cfield(
rec, index, offsets, pos, &flen);
}
+write_field:
+ /* Write field number to undo log */
+ ptr += mach_write_compressed(ptr, pos);
if (trx_undo_left(undo_block, ptr) < 15) {
- return(0);
+ return 0;
}
- if (!is_virtual && rec_offs_nth_extern(offsets, pos)) {
- const dict_col_t* col
- = dict_index_get_nth_col(index, pos);
- ulint prefix_len
- = dict_max_field_len_store_undo(
- table, col);
+ if (rec_offs_n_fields(offsets) > pos
+ && rec_offs_nth_extern(offsets, pos)) {
+ ut_ad(col || pos == index->first_user_field());
+ ut_ad(col || update->is_alter_metadata());
+ ut_ad(col
+ || rec_is_alter_metadata(rec, *index));
+ ulint prefix_len = col
+ ? dict_max_field_len_store_undo(
+ table, col)
+ : 0;
ut_ad(prefix_len + BTR_EXTERN_FIELD_REF_SIZE
<= sizeof ext_buf);
ptr = trx_undo_page_report_modify_ext(
ptr,
- col->ord_part
+ col
+ && col->ord_part
&& !ignore_prefix
&& flen < REC_ANTELOPE_MAX_INDEX_COL_LEN
? ext_buf : NULL, prefix_len,
- dict_table_page_size(table),
+ table->space->zip_size(),
&field, &flen, SPATIAL_UNKNOWN);
*type_cmpl_ptr |= TRX_UNDO_UPD_EXTERN;
} else {
+store_len:
ptr += mach_write_compressed(ptr, flen);
}
@@ -1269,6 +1334,8 @@ trx_undo_page_report_modify(
table, col);
ut_a(prefix_len < sizeof ext_buf);
+ const ulint zip_size
+ = table->space->zip_size();
/* If there is a spatial index on it,
log its MBR */
@@ -1277,9 +1344,7 @@ trx_undo_page_report_modify(
col->mtype));
trx_undo_get_mbr_from_ext(
- mbr,
- dict_table_page_size(
- table),
+ mbr, zip_size,
field, &flen);
}
@@ -1288,7 +1353,7 @@ trx_undo_page_report_modify(
flen < REC_ANTELOPE_MAX_INDEX_COL_LEN
&& !ignore_prefix
? ext_buf : NULL, prefix_len,
- dict_table_page_size(table),
+ zip_size,
&field, &flen,
spatial_status);
} else {
@@ -1482,7 +1547,6 @@ trx_undo_update_rec_get_update(
upd_t* update;
ulint n_fields;
byte* buf;
- ulint i;
bool first_v_col = true;
bool is_undo_log = true;
ulint n_skip_field = 0;
@@ -1495,7 +1559,7 @@ trx_undo_update_rec_get_update(
n_fields = 0;
}
- update = upd_create(n_fields + 2, heap);
+ *upd = update = upd_create(n_fields + 2, heap);
update->info_bits = info_bits;
@@ -1507,9 +1571,7 @@ trx_undo_update_rec_get_update(
mach_write_to_6(buf, trx_id);
- upd_field_set_field_no(upd_field,
- dict_index_get_sys_col_pos(index, DATA_TRX_ID),
- index);
+ upd_field_set_field_no(upd_field, index->db_trx_id(), index);
dfield_set_data(&(upd_field->new_val), buf, DATA_TRX_ID_LEN);
upd_field = upd_get_nth_field(update, n_fields + 1);
@@ -1518,25 +1580,20 @@ trx_undo_update_rec_get_update(
trx_write_roll_ptr(buf, roll_ptr);
- upd_field_set_field_no(
- upd_field, dict_index_get_sys_col_pos(index, DATA_ROLL_PTR),
- index);
+ upd_field_set_field_no(upd_field, index->db_roll_ptr(), index);
dfield_set_data(&(upd_field->new_val), buf, DATA_ROLL_PTR_LEN);
/* Store then the updated ordinary columns to the update vector */
- for (i = 0; i < n_fields; i++) {
-
+ for (ulint i = 0; i < n_fields; i++) {
const byte* field;
ulint len;
- ulint field_no;
ulint orig_len;
- bool is_virtual;
upd_field = upd_get_nth_field(update, i);
- field_no = mach_read_next_compressed(&ptr);
+ ulint field_no = mach_read_next_compressed(&ptr);
- is_virtual = (field_no >= REC_MAX_N_FIELDS);
+ const bool is_virtual = (field_no >= REC_MAX_N_FIELDS);
if (is_virtual) {
/* If new version, we need to check index list to figure
@@ -1559,15 +1616,63 @@ trx_undo_update_rec_get_update(
}
upd_field_set_v_field_no(upd_field, field_no, index);
+ } else if (UNIV_UNLIKELY((update->info_bits
+ & ~REC_INFO_DELETED_FLAG)
+ == REC_INFO_MIN_REC_FLAG)) {
+ ut_ad(type == TRX_UNDO_UPD_EXIST_REC);
+ const ulint uf = index->first_user_field();
+ ut_ad(field_no >= uf);
+
+ if (update->info_bits != REC_INFO_MIN_REC_FLAG) {
+ /* Generic instant ALTER TABLE */
+ if (field_no == uf) {
+ upd_field->new_val.type
+ .metadata_blob_init();
+ } else if (field_no >= index->n_fields) {
+ /* This is reachable during
+ purge if the table was emptied
+ and converted to the canonical
+ format on a later ALTER TABLE.
+ In this case,
+ row_purge_upd_exist_or_extern()
+ would only be interested in
+ freeing any BLOBs that were
+ updated, that is, the metadata
+ BLOB above. Other BLOBs in
+ the metadata record are never
+ updated; they are for the
+ initial DEFAULT values of the
+ instantly added columns, and
+ they will never change.
+
+ Note: if the table becomes
+ empty during ROLLBACK or is
+ empty during subsequent ALTER
+ TABLE, and btr_page_empty() is
+ called to re-create the root
+ page without the metadata
+ record, in that case we should
+ only free the latest version
+ of BLOBs in the record,
+ which purge would never touch. */
+ field_no = REC_MAX_N_FIELDS;
+ n_skip_field++;
+ } else {
+ dict_col_copy_type(
+ dict_index_get_nth_col(
+ index, field_no - 1),
+ &upd_field->new_val.type);
+ }
+ } else {
+ /* Instant ADD COLUMN...LAST */
+ dict_col_copy_type(
+ dict_index_get_nth_col(index,
+ field_no),
+ &upd_field->new_val.type);
+ }
+ upd_field->field_no = field_no;
} else if (field_no < index->n_fields) {
upd_field_set_field_no(upd_field, field_no, index);
- } else if (update->info_bits == REC_INFO_MIN_REC_FLAG
- && index->is_instant()) {
- /* This must be a rollback of a subsequent
- instant ADD COLUMN operation. This will be
- detected and handled by btr_cur_trim(). */
- upd_field->field_no = field_no;
- upd_field->orig_len = 0;
} else {
ib::error() << "Trying to access update undo rec"
" field " << field_no
@@ -1600,6 +1705,12 @@ trx_undo_update_rec_get_update(
dfield_set_ext(&upd_field->new_val);
}
+ ut_ad(update->info_bits != (REC_INFO_DELETED_FLAG
+ | REC_INFO_MIN_REC_FLAG)
+ || field_no != index->first_user_field()
+ || (upd_field->new_val.ext
+ && upd_field->new_val.len == FIELD_REF_SIZE));
+
if (is_virtual) {
upd_field->old_v_val = static_cast<dfield_t*>(
mem_heap_alloc(
@@ -1617,31 +1728,23 @@ trx_undo_update_rec_get_update(
}
}
- /* In rare scenario, we could have skipped virtual column (as they
- are dropped. We will regenerate a update vector and skip them */
- if (n_skip_field > 0) {
- ulint n = 0;
- ut_ad(n_skip_field <= n_fields);
-
- upd_t* new_update = upd_create(
- n_fields + 2 - n_skip_field, heap);
+ /* We may have to skip dropped indexed virtual columns.
+ Also, we may have to trim the update vector of a metadata record
+ if dict_index_t::clear_instant_alter() was invoked on the table
+ later, and the number of fields no longer matches. */
- for (i = 0; i < n_fields + 2; i++) {
- upd_field = upd_get_nth_field(update, i);
+ if (n_skip_field) {
+ upd_field_t* d = upd_get_nth_field(update, 0);
+ const upd_field_t* const end = d + n_fields + 2;
- if (upd_field->field_no == REC_MAX_N_FIELDS) {
- continue;
+ for (const upd_field_t* s = d; s != end; s++) {
+ if (s->field_no != REC_MAX_N_FIELDS) {
+ *d++ = *s;
}
-
- upd_field_t* new_upd_field
- = upd_get_nth_field(new_update, n);
- *new_upd_field = *upd_field;
- n++;
}
- ut_ad(n == n_fields + 2 - n_skip_field);
- *upd = new_update;
- } else {
- *upd = update;
+
+ ut_ad(d + n_skip_field == end);
+ update->n_fields = d - upd_get_nth_field(update, 0);
}
return(const_cast<byte*>(ptr));
@@ -1696,8 +1799,11 @@ trx_undo_rec_get_partial_row(
if (uf->old_v_val) {
continue;
}
- ulint c = dict_index_get_nth_col(index, uf->field_no)->ind;
- *dtuple_get_nth_field(*row, c) = uf->new_val;
+ const dict_col_t& c = *dict_index_get_nth_col(index,
+ uf->field_no);
+ if (!c.is_dropped()) {
+ *dtuple_get_nth_field(*row, c.ind) = uf->new_val;
+ }
}
end_ptr = ptr + mach_read_from_2(ptr);
@@ -1708,7 +1814,6 @@ trx_undo_rec_get_partial_row(
const byte* field;
ulint field_no;
const dict_col_t* col;
- ulint col_no;
ulint len;
ulint orig_len;
bool is_virtual;
@@ -1736,15 +1841,18 @@ trx_undo_rec_get_partial_row(
dict_v_col_t* vcol = dict_table_get_nth_v_col(
index->table, field_no);
col = &vcol->m_col;
- col_no = dict_col_get_no(col);
dfield = dtuple_get_nth_v_field(*row, vcol->v_pos);
dict_col_copy_type(
&vcol->m_col,
dfield_get_type(dfield));
} else {
col = dict_index_get_nth_col(index, field_no);
- col_no = dict_col_get_no(col);
- dfield = dtuple_get_nth_field(*row, col_no);
+
+ if (col->is_dropped()) {
+ continue;
+ }
+
+ dfield = dtuple_get_nth_field(*row, col->ind);
ut_ad(dfield->type.mtype == DATA_MISSING
|| dict_col_type_assert_equal(col,
&dfield->type));
@@ -1752,9 +1860,7 @@ trx_undo_rec_get_partial_row(
|| dfield->len == len
|| (len != UNIV_SQL_NULL
&& len >= UNIV_EXTERN_STORAGE_FIELD));
- dict_col_copy_type(
- dict_table_get_nth_col(index->table, col_no),
- dfield_get_type(dfield));
+ dict_col_copy_type(col, dfield_get_type(dfield));
}
dfield_set_data(dfield, field, len);
diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc
index 22661dcbc0e..a64097fb0fd 100644
--- a/storage/innobase/trx/trx0roll.cc
+++ b/storage/innobase/trx/trx0roll.cc
@@ -44,10 +44,6 @@ Created 3/26/1996 Heikki Tuuri
#include "trx0trx.h"
#include "trx0undo.h"
-/** This many pages must be undone before a truncate is tried within
-rollback */
-static const ulint TRX_ROLL_TRUNC_THRESHOLD = 1;
-
/** true if trx_rollback_all_recovered() thread is active */
bool trx_rollback_is_active;
@@ -181,6 +177,11 @@ trx_rollback_to_savepoint(
partial rollback requested, or NULL for
complete rollback */
{
+#ifdef WITH_WSREP
+ if (savept == NULL && wsrep_on(trx->mysql_thd)) {
+ wsrep_handle_SR_rollback(NULL, trx->mysql_thd);
+ }
+#endif /* WITH_WSREP */
ut_ad(!trx_mutex_own(trx));
trx_start_if_not_started_xa(trx, true);
@@ -451,12 +452,8 @@ trx_rollback_to_savepoint_for_mysql_low(
trx_mark_sql_stat_end(trx);
trx->op_info = "";
-
#ifdef WITH_WSREP
- if (wsrep_on(trx->mysql_thd) &&
- trx->lock.was_chosen_as_deadlock_victim) {
- trx->lock.was_chosen_as_deadlock_victim = FALSE;
- }
+ trx->lock.was_chosen_as_wsrep_victim = FALSE;
#endif
return(err);
}
@@ -876,175 +873,6 @@ DECLARE_THREAD(trx_rollback_all_recovered)(void*)
OS_THREAD_DUMMY_RETURN;
}
-/** Try to truncate the undo logs.
-@param[in,out] trx transaction */
-static
-void
-trx_roll_try_truncate(trx_t* trx)
-{
- trx->pages_undone = 0;
-
- undo_no_t undo_no = trx->undo_no;
-
- if (trx_undo_t* undo = trx->rsegs.m_redo.undo) {
- ut_ad(undo->rseg == trx->rsegs.m_redo.rseg);
- mutex_enter(&undo->rseg->mutex);
- trx_undo_truncate_end(undo, undo_no, false);
- mutex_exit(&undo->rseg->mutex);
- }
-
- if (trx_undo_t* undo = trx->rsegs.m_noredo.undo) {
- ut_ad(undo->rseg == trx->rsegs.m_noredo.rseg);
- mutex_enter(&undo->rseg->mutex);
- trx_undo_truncate_end(undo, undo_no, true);
- mutex_exit(&undo->rseg->mutex);
- }
-
-#ifdef WITH_WSREP_OUT
- if (wsrep_on(trx->mysql_thd)) {
- trx->lock.was_chosen_as_deadlock_victim = FALSE;
- }
-#endif /* WITH_WSREP */
-}
-
-/***********************************************************************//**
-Pops the topmost undo log record in a single undo log and updates the info
-about the topmost record in the undo log memory struct.
-@return undo log record, the page s-latched */
-static
-trx_undo_rec_t*
-trx_roll_pop_top_rec(
-/*=================*/
- trx_t* trx, /*!< in: transaction */
- trx_undo_t* undo, /*!< in: undo log */
- mtr_t* mtr) /*!< in: mtr */
-{
- page_t* undo_page = trx_undo_page_get_s_latched(
- page_id_t(undo->rseg->space->id, undo->top_page_no), mtr);
-
- ulint offset = undo->top_offset;
-
- trx_undo_rec_t* prev_rec = trx_undo_get_prev_rec(
- undo_page + offset, undo->hdr_page_no, undo->hdr_offset,
- true, mtr);
-
- if (prev_rec == NULL) {
- undo->top_undo_no = IB_ID_MAX;
- ut_ad(undo->empty());
- } else {
- page_t* prev_rec_page = page_align(prev_rec);
-
- if (prev_rec_page != undo_page) {
-
- trx->pages_undone++;
- }
-
- undo->top_page_no = page_get_page_no(prev_rec_page);
- undo->top_offset = ulint(prev_rec - prev_rec_page);
- undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec);
- ut_ad(!undo->empty());
- }
-
- return(undo_page + offset);
-}
-
-/** Get the last undo log record of a transaction (for rollback).
-@param[in,out] trx transaction
-@param[out] roll_ptr DB_ROLL_PTR to the undo record
-@param[in,out] heap memory heap for allocation
-@return undo log record copied to heap
-@retval NULL if none left or the roll_limit (savepoint) was reached */
-trx_undo_rec_t*
-trx_roll_pop_top_rec_of_trx(trx_t* trx, roll_ptr_t* roll_ptr, mem_heap_t* heap)
-{
- if (trx->pages_undone >= TRX_ROLL_TRUNC_THRESHOLD) {
- trx_roll_try_truncate(trx);
- }
-
- trx_undo_t* undo = NULL;
- trx_undo_t* insert = trx->rsegs.m_redo.old_insert;
- trx_undo_t* update = trx->rsegs.m_redo.undo;
- trx_undo_t* temp = trx->rsegs.m_noredo.undo;
- const undo_no_t limit = trx->roll_limit;
-
- ut_ad(!insert || !update || insert->empty() || update->empty()
- || insert->top_undo_no != update->top_undo_no);
- ut_ad(!insert || !temp || insert->empty() || temp->empty()
- || insert->top_undo_no != temp->top_undo_no);
- ut_ad(!update || !temp || update->empty() || temp->empty()
- || update->top_undo_no != temp->top_undo_no);
-
- if (UNIV_LIKELY_NULL(insert)
- && !insert->empty() && limit <= insert->top_undo_no) {
- undo = insert;
- }
-
- if (update && !update->empty() && update->top_undo_no >= limit) {
- if (!undo) {
- undo = update;
- } else if (undo->top_undo_no < update->top_undo_no) {
- undo = update;
- }
- }
-
- if (temp && !temp->empty() && temp->top_undo_no >= limit) {
- if (!undo) {
- undo = temp;
- } else if (undo->top_undo_no < temp->top_undo_no) {
- undo = temp;
- }
- }
-
- if (undo == NULL) {
- trx_roll_try_truncate(trx);
- /* Mark any ROLLBACK TO SAVEPOINT completed, so that
- if the transaction object is committed and reused
- later, we will default to a full ROLLBACK. */
- trx->roll_limit = 0;
- trx->in_rollback = false;
- return(NULL);
- }
-
- ut_ad(!undo->empty());
- ut_ad(limit <= undo->top_undo_no);
-
- *roll_ptr = trx_undo_build_roll_ptr(
- false, undo->rseg->id, undo->top_page_no, undo->top_offset);
-
- mtr_t mtr;
- mtr.start();
-
- trx_undo_rec_t* undo_rec = trx_roll_pop_top_rec(trx, undo, &mtr);
- const undo_no_t undo_no = trx_undo_rec_get_undo_no(undo_rec);
- switch (trx_undo_rec_get_type(undo_rec)) {
- case TRX_UNDO_INSERT_METADATA:
- /* This record type was introduced in MDEV-11369
- instant ADD COLUMN, which was implemented after
- MDEV-12288 removed the insert_undo log. There is no
- instant ADD COLUMN for temporary tables. Therefore,
- this record can only be present in the main undo log. */
- ut_ad(undo == update);
- /* fall through */
- case TRX_UNDO_RENAME_TABLE:
- ut_ad(undo == insert || undo == update);
- /* fall through */
- case TRX_UNDO_INSERT_REC:
- ut_ad(undo == insert || undo == update || undo == temp);
- *roll_ptr |= 1ULL << ROLL_PTR_INSERT_FLAG_POS;
- break;
- default:
- ut_ad(undo == update || undo == temp);
- break;
- }
-
- trx->undo_no = undo_no;
-
- trx_undo_rec_t* undo_rec_copy = trx_undo_rec_copy(undo_rec, heap);
- mtr.commit();
-
- return(undo_rec_copy);
-}
-
/****************************************************************//**
Builds an undo 'query' graph for a transaction. The actual rollback is
performed by executing this query graph like a query subprocedure call.
diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc
index 10092375ebd..a57a78b9408 100644
--- a/storage/innobase/trx/trx0rseg.cc
+++ b/storage/innobase/trx/trx0rseg.cc
@@ -43,6 +43,33 @@ static long long wsrep_seqno = -1;
/** The latest known WSREP XID UUID */
static unsigned char wsrep_uuid[16];
+/** Write the WSREP XID information into rollback segment header.
+@param[in,out] rseg_header rollback segment header
+@param[in] xid WSREP XID
+@param[in,out] mtr mini transaction */
+static void
+trx_rseg_write_wsrep_checkpoint(
+ trx_rsegf_t* rseg_header,
+ const XID* xid,
+ mtr_t* mtr)
+{
+ mlog_write_ulint(TRX_RSEG_WSREP_XID_FORMAT + rseg_header,
+ uint32_t(xid->formatID),
+ MLOG_4BYTES, mtr);
+
+ mlog_write_ulint(TRX_RSEG_WSREP_XID_GTRID_LEN + rseg_header,
+ uint32_t(xid->gtrid_length),
+ MLOG_4BYTES, mtr);
+
+ mlog_write_ulint(TRX_RSEG_WSREP_XID_BQUAL_LEN + rseg_header,
+ uint32_t(xid->bqual_length),
+ MLOG_4BYTES, mtr);
+
+ mlog_write_string(TRX_RSEG_WSREP_XID_DATA + rseg_header,
+ reinterpret_cast<const byte*>(xid->data),
+ XIDDATASIZE, mtr);
+}
+
/** Update the WSREP XID information in rollback segment header.
@param[in,out] rseg_header rollback segment header
@param[in] xid WSREP XID
@@ -60,29 +87,28 @@ trx_rseg_update_wsrep_checkpoint(
long long xid_seqno = wsrep_xid_seqno(xid);
const byte* xid_uuid = wsrep_xid_uuid(xid);
- if (!memcmp(xid_uuid, wsrep_uuid, sizeof wsrep_uuid)) {
+ if (xid_seqno != -1
+ && !memcmp(xid_uuid, wsrep_uuid, sizeof wsrep_uuid)) {
ut_ad(xid_seqno > wsrep_seqno);
} else {
memcpy(wsrep_uuid, xid_uuid, sizeof wsrep_uuid);
}
wsrep_seqno = xid_seqno;
#endif /* UNIV_DEBUG */
+ trx_rseg_write_wsrep_checkpoint(rseg_header, xid, mtr);
+}
- mlog_write_ulint(TRX_RSEG_WSREP_XID_FORMAT + rseg_header,
- uint32_t(xid->formatID),
- MLOG_4BYTES, mtr);
-
- mlog_write_ulint(TRX_RSEG_WSREP_XID_GTRID_LEN + rseg_header,
- uint32_t(xid->gtrid_length),
- MLOG_4BYTES, mtr);
-
- mlog_write_ulint(TRX_RSEG_WSREP_XID_BQUAL_LEN + rseg_header,
- uint32_t(xid->bqual_length),
- MLOG_4BYTES, mtr);
-
- mlog_write_string(TRX_RSEG_WSREP_XID_DATA + rseg_header,
- reinterpret_cast<const byte*>(xid->data),
- XIDDATASIZE, mtr);
+/** Clear the WSREP XID information from rollback segment header.
+@param[in,out] rseg_header Rollback segment header
+@param[in,out] mtr mini-transaction */
+static void
+trx_rseg_clear_wsrep_checkpoint(
+ trx_rsegf_t* rseg_header,
+ mtr_t* mtr)
+{
+ mlog_memset(rseg_header + TRX_RSEG_WSREP_XID_INFO,
+ TRX_RSEG_WSREP_XID_DATA + XIDDATASIZE
+ - TRX_RSEG_WSREP_XID_INFO, 0, mtr);
}
/** Update WSREP checkpoint XID in first rollback segment header
@@ -97,6 +123,13 @@ void trx_rseg_update_wsrep_checkpoint(const XID* xid)
mtr_t mtr;
mtr.start();
+ const byte* xid_uuid = wsrep_xid_uuid(xid);
+ /* We must make check against wsrep_uuid here, the
+ trx_rseg_update_wsrep_checkpoint() writes over wsrep_uuid with
+ xid contents in debug mode and the memcmp() will never give nonzero
+ result. */
+ const bool must_clear_rsegs = memcmp(wsrep_uuid, xid_uuid,
+ sizeof wsrep_uuid);
const trx_rseg_t* rseg = trx_sys.rseg_array[0];
trx_rsegf_t* rseg_header = trx_rsegf_get(rseg->space, rseg->page_no,
@@ -107,10 +140,7 @@ void trx_rseg_update_wsrep_checkpoint(const XID* xid)
trx_rseg_update_wsrep_checkpoint(rseg_header, xid, &mtr);
- const byte* xid_uuid = wsrep_xid_uuid(xid);
- if (memcmp(wsrep_uuid, xid_uuid, sizeof wsrep_uuid)) {
- memcpy(wsrep_uuid, xid_uuid, sizeof wsrep_uuid);
-
+ if (must_clear_rsegs) {
/* Because the UUID part of the WSREP XID differed
from current_xid_uuid, the WSREP group UUID was
changed, and we must reset the XID in all rollback
@@ -118,10 +148,11 @@ void trx_rseg_update_wsrep_checkpoint(const XID* xid)
for (ulint rseg_id = 1; rseg_id < TRX_SYS_N_RSEGS; ++rseg_id) {
if (const trx_rseg_t* rseg =
trx_sys.rseg_array[rseg_id]) {
- trx_rseg_update_wsrep_checkpoint(
+ trx_rseg_clear_wsrep_checkpoint(
trx_rsegf_get(rseg->space,
- rseg->page_no, &mtr),
- xid, &mtr);
+ rseg->page_no,
+ &mtr),
+ &mtr);
}
}
}
@@ -252,12 +283,10 @@ void trx_rseg_format_upgrade(trx_rsegf_t* rseg_header, mtr_t* mtr)
mlog_write_ulint(rseg_format, 0, MLOG_4BYTES, mtr);
/* Clear also possible garbage at the end of the page. Old
InnoDB versions did not initialize unused parts of pages. */
- byte* b = rseg_header + TRX_RSEG_MAX_TRX_ID + 8;
- ulint len = srv_page_size
- - (FIL_PAGE_DATA_END
- + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8);
- memset(b, 0, len);
- mlog_log_string(b, len, mtr);
+ mlog_memset(TRX_RSEG_MAX_TRX_ID + 8 + rseg_header,
+ srv_page_size
+ - (FIL_PAGE_DATA_END
+ + TRX_RSEG + TRX_RSEG_MAX_TRX_ID + 8), 0, mtr);
}
/** Create a rollback segment header.
@@ -289,22 +318,17 @@ trx_rseg_header_create(
buf_block_dbg_add_level(block, SYNC_RSEG_HEADER_NEW);
- mlog_write_ulint(TRX_RSEG + TRX_RSEG_FORMAT + block->frame, 0,
- MLOG_4BYTES, mtr);
+ ut_ad(0 == mach_read_from_4(TRX_RSEG_FORMAT + TRX_RSEG
+ + block->frame));
+ ut_ad(0 == mach_read_from_4(TRX_RSEG_HISTORY_SIZE + TRX_RSEG
+ + block->frame));
/* Initialize the history list */
-
- mlog_write_ulint(TRX_RSEG + TRX_RSEG_HISTORY_SIZE + block->frame, 0,
- MLOG_4BYTES, mtr);
- flst_init(TRX_RSEG + TRX_RSEG_HISTORY + block->frame, mtr);
- trx_rsegf_t* rsegf = TRX_RSEG + block->frame;
+ flst_init(block, TRX_RSEG_HISTORY + TRX_RSEG, mtr);
/* Reset the undo log slots */
- for (ulint i = 0; i < TRX_RSEG_N_SLOTS; i++) {
- /* This is generating a lot of redo log. MariaDB 10.4
- introduced MLOG_MEMSET to reduce the redo log volume. */
- trx_rsegf_set_nth_undo(rsegf, i, FIL_NULL, mtr);
- }
+ mlog_memset(block, TRX_RSEG_UNDO_SLOTS + TRX_RSEG,
+ TRX_RSEG_N_SLOTS * 4, 0xff, mtr);
if (sys_header) {
/* Add the rollback segment info to the free slot in
@@ -475,8 +499,8 @@ trx_rseg_mem_restore(trx_rseg_t* rseg, trx_id_t& max_trx_id, mtr_t* mtr)
rseg->curr_size = mach_read_from_4(rseg_header + TRX_RSEG_HISTORY_SIZE)
+ 1 + trx_undo_lists_init(rseg, max_trx_id, rseg_header);
- if (ulint len = flst_get_len(rseg_header + TRX_RSEG_HISTORY)) {
- trx_sys.history_add(int32(len));
+ if (auto len = flst_get_len(rseg_header + TRX_RSEG_HISTORY)) {
+ trx_sys.rseg_history_len += len;
fil_addr_t node_addr = trx_purge_get_log_from_hist(
flst_get_last(rseg_header + TRX_RSEG_HISTORY, mtr));
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 341708abcb0..e0dda3dc660 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -189,10 +189,9 @@ trx_sysf_create(
ut_a(ptr <= page + (srv_page_size - FIL_PAGE_DATA_END));
/* Initialize all of the page. This part used to be uninitialized. */
- memset(ptr, 0, srv_page_size - FIL_PAGE_DATA_END + size_t(page - ptr));
-
- mlog_log_string(TRX_SYS + page, srv_page_size - FIL_PAGE_DATA_END
- - TRX_SYS, mtr);
+ mlog_memset(block, ptr - page,
+ srv_page_size - FIL_PAGE_DATA_END + size_t(page - ptr),
+ 0, mtr);
/* Create the first rollback segment in the SYSTEM tablespace */
slot_no = trx_sys_rseg_find_free(block);
@@ -212,7 +211,7 @@ trx_sys_t::create()
m_initialised = true;
mutex_create(LATCH_ID_TRX_SYS, &mutex);
UT_LIST_INIT(trx_list, &trx_t::trx_list);
- my_atomic_store32(&rseg_history_len, 0);
+ rseg_history_len= 0;
rw_trx_hash.init();
}
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 1675e92ae2c..7ef6b88a9a9 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -199,6 +199,9 @@ struct TrxFactory {
lock_trx_lock_list_init(&trx->lock.trx_locks);
+ UT_LIST_INIT(trx->lock.evicted_tables,
+ &dict_table_t::table_LRU);
+
UT_LIST_INIT(
trx->trx_savepoints,
&trx_named_savept_t::trx_savepoints);
@@ -223,6 +226,7 @@ struct TrxFactory {
}
ut_a(UT_LIST_GET_LEN(trx->lock.trx_locks) == 0);
+ ut_ad(UT_LIST_GET_LEN(trx->lock.evicted_tables) == 0);
UT_DELETE(trx->xid);
ut_free(trx->detailed_error);
@@ -375,6 +379,7 @@ trx_t *trx_create()
ut_ad(trx->lock.n_rec_locks == 0);
ut_ad(trx->lock.table_cached == 0);
ut_ad(trx->lock.rec_cached == 0);
+ ut_ad(UT_LIST_GET_LEN(trx->lock.evicted_tables) == 0);
#ifdef WITH_WSREP
trx->wsrep_event = NULL;
@@ -1249,6 +1254,37 @@ trx_update_mod_tables_timestamp(
trx->mod_tables.clear();
}
+/** Evict a table definition due to the rollback of ALTER TABLE.
+@param[in] table_id table identifier */
+void trx_t::evict_table(table_id_t table_id)
+{
+ ut_ad(in_rollback);
+
+ dict_table_t* table = dict_table_open_on_id(
+ table_id, true, DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
+ if (!table) {
+ return;
+ }
+
+ if (!table->release()) {
+ /* This must be a DDL operation that is being rolled
+ back in an active connection. */
+ ut_a(table->get_ref_count() == 1);
+ ut_ad(!is_recovered);
+ ut_ad(mysql_thd);
+ return;
+ }
+
+ /* This table should only be locked by this transaction, if at all. */
+ ut_ad(UT_LIST_GET_LEN(table->locks) <= 1);
+ const bool locked = UT_LIST_GET_LEN(table->locks);
+ ut_ad(!locked || UT_LIST_GET_FIRST(table->locks)->trx == this);
+ dict_table_remove_from_cache(table, true, locked);
+ if (locked) {
+ UT_LIST_ADD_FIRST(lock.evicted_tables, table);
+ }
+}
+
/****************************************************************//**
Commits a transaction in memory. */
static
@@ -1314,9 +1350,16 @@ trx_commit_in_memory(
trx_update_mod_tables_timestamp(trx);
MONITOR_INC(MONITOR_TRX_RW_COMMIT);
}
+
+ while (dict_table_t* table = UT_LIST_GET_FIRST(
+ trx->lock.evicted_tables)) {
+ UT_LIST_REMOVE(trx->lock.evicted_tables, table);
+ dict_mem_table_free(table);
+ }
}
ut_ad(!trx->rsegs.m_redo.undo);
+ ut_ad(UT_LIST_GET_LEN(trx->lock.evicted_tables) == 0);
if (trx_rseg_t* rseg = trx->rsegs.m_redo.rseg) {
mutex_enter(&rseg->mutex);
@@ -1403,11 +1446,8 @@ trx_commit_in_memory(
trx_mutex_enter(trx);
trx->dict_operation = TRX_DICT_OP_NONE;
-
#ifdef WITH_WSREP
- if (trx->mysql_thd && wsrep_on(trx->mysql_thd)) {
- trx->lock.was_chosen_as_deadlock_victim = FALSE;
- }
+ trx->lock.was_chosen_as_wsrep_victim = FALSE;
#endif
DBUG_LOG("trx", "Commit in memory: " << trx);
@@ -1420,7 +1460,9 @@ trx_commit_in_memory(
trx_mutex_exit(trx);
ut_a(trx->error_state == DB_SUCCESS);
- srv_wake_purge_thread_if_not_active();
+ if (!srv_read_only_mode) {
+ srv_wake_purge_thread_if_not_active();
+ }
}
/** Commit a transaction and a mini-transaction.
@@ -1532,6 +1574,16 @@ trx_commit(
}
trx_commit_low(trx, mtr);
+#ifdef WITH_WSREP
+ /* Serialization history has been written and the
+ transaction is committed in memory, which makes
+ this commit ordered. Release commit order critical
+ section. */
+ if (wsrep_on(trx->mysql_thd))
+ {
+ wsrep_commit_ordered(trx->mysql_thd);
+ }
+#endif /* WITH_WSREP */
}
/****************************************************************//**
@@ -2151,7 +2203,7 @@ static my_bool trx_get_trx_by_xid_callback(rw_trx_hash_element_t *element,
transaction needs a valid trx->xid for
invoking trx_sys_update_wsrep_checkpoint(). */
if (!wsrep_is_wsrep_xid(trx->xid))
-#endif
+#endif /* WITH_WSREP */
/* Invalidate the XID, so that subsequent calls will not find it. */
trx->xid->null();
arg->trx= trx;
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 45088b688ed..dbd45979c93 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -187,7 +187,7 @@ trx_undo_get_prev_rec_from_prev_page(
space = page_get_space_id(undo_page);
buf_block_t* block = buf_page_get(
- page_id_t(space, prev_page_no), univ_page_size,
+ page_id_t(space, prev_page_no), 0,
shared ? RW_S_LATCH : RW_X_LATCH, mtr);
buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
@@ -844,7 +844,7 @@ trx_undo_free_page(
TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE + undo_page, mtr);
fseg_free_page(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + header_page,
- rseg->space, page_no, false, mtr);
+ rseg->space, page_no, false, true, mtr);
const fil_addr_t last_addr = flst_get_last(
TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + header_page, mtr);
@@ -883,54 +883,55 @@ trx_undo_free_last_page(trx_undo_t* undo, mtr_t* mtr)
@param[in,out] undo undo log
@param[in] limit all undo logs after this limit will be discarded
@param[in] is_temp whether this is temporary undo log */
-void
-trx_undo_truncate_end(trx_undo_t* undo, undo_no_t limit, bool is_temp)
+void trx_undo_truncate_end(trx_undo_t& undo, undo_no_t limit, bool is_temp)
{
- ut_ad(mutex_own(&undo->rseg->mutex));
- ut_ad(is_temp == !undo->rseg->is_persistent());
+ mtr_t mtr;
+ ut_ad(is_temp == !undo.rseg->is_persistent());
for (;;) {
- mtr_t mtr;
mtr.start();
if (is_temp) {
mtr.set_log_mode(MTR_LOG_NO_REDO);
}
trx_undo_rec_t* trunc_here = NULL;
+ mutex_enter(&undo.rseg->mutex);
page_t* undo_page = trx_undo_page_get(
- page_id_t(undo->rseg->space->id, undo->last_page_no),
+ page_id_t(undo.rseg->space->id, undo.last_page_no),
&mtr);
trx_undo_rec_t* rec = trx_undo_page_get_last_rec(
- undo_page, undo->hdr_page_no, undo->hdr_offset);
+ undo_page, undo.hdr_page_no, undo.hdr_offset);
while (rec) {
- if (trx_undo_rec_get_undo_no(rec) >= limit) {
- /* Truncate at least this record off, maybe
- more */
- trunc_here = rec;
- } else {
- goto function_exit;
+ if (trx_undo_rec_get_undo_no(rec) < limit) {
+ goto func_exit;
}
+ /* Truncate at least this record off, maybe more */
+ trunc_here = rec;
rec = trx_undo_page_get_prev_rec(rec,
- undo->hdr_page_no,
- undo->hdr_offset);
+ undo.hdr_page_no,
+ undo.hdr_offset);
}
- if (undo->last_page_no == undo->hdr_page_no) {
-function_exit:
- if (trunc_here) {
- mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
- + TRX_UNDO_PAGE_FREE,
- ulint(trunc_here - undo_page),
- MLOG_2BYTES, &mtr);
- }
-
+ if (undo.last_page_no != undo.hdr_page_no) {
+ trx_undo_free_last_page(&undo, &mtr);
+ mutex_exit(&undo.rseg->mutex);
mtr.commit();
- return;
+ continue;
+ }
+
+func_exit:
+ mutex_exit(&undo.rseg->mutex);
+
+ if (trunc_here) {
+ mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
+ + TRX_UNDO_PAGE_FREE,
+ ulint(trunc_here - undo_page),
+ MLOG_2BYTES, &mtr);
}
- trx_undo_free_last_page(undo, &mtr);
mtr.commit();
+ return;
}
}
@@ -1334,7 +1335,7 @@ trx_undo_reuse_cached(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** pundo,
buf_block_t* block = buf_page_get(page_id_t(undo->rseg->space->id,
undo->hdr_page_no),
- univ_page_size, RW_X_LATCH, mtr);
+ 0, RW_X_LATCH, mtr);
if (!block) {
return NULL;
}
@@ -1402,7 +1403,7 @@ trx_undo_assign(trx_t* trx, dberr_t* err, mtr_t* mtr)
if (undo) {
return buf_page_get_gen(
page_id_t(undo->rseg->space->id, undo->last_page_no),
- univ_page_size, RW_X_LATCH,
+ 0, RW_X_LATCH,
buf_pool_is_obsolete(undo->withdraw_clock)
? NULL : undo->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
@@ -1458,7 +1459,7 @@ trx_undo_assign_low(trx_t* trx, trx_rseg_t* rseg, trx_undo_t** undo,
if (*undo) {
return buf_page_get_gen(
page_id_t(rseg->space->id, (*undo)->last_page_no),
- univ_page_size, RW_X_LATCH,
+ 0, RW_X_LATCH,
buf_pool_is_obsolete((*undo)->withdraw_clock)
? NULL : (*undo)->guess_block,
BUF_GET, __FILE__, __LINE__, mtr, err);
diff --git a/storage/innobase/ut/ut0crc32.cc b/storage/innobase/ut/ut0crc32.cc
index 0b1c1b3991a..3eef9329b57 100644
--- a/storage/innobase/ut/ut0crc32.cc
+++ b/storage/innobase/ut/ut0crc32.cc
@@ -469,34 +469,6 @@ ut_crc32_64_sw(
*len -= 8;
}
-#ifdef INNODB_BUG_ENDIAN_CRC32
-/** Calculate CRC32 over 64-bit byte string using a software implementation.
-The byte string is converted to a 64-bit integer using big endian byte order.
-@param[in,out] crc crc32 checksum so far when this function is called,
-when the function ends it will contain the new checksum
-@param[in,out] data data to be checksummed, the pointer will be advanced
-with 8 bytes
-@param[in,out] len remaining bytes, it will be decremented with 8 */
-inline
-void
-ut_crc32_64_legacy_big_endian_sw(
- uint32_t* crc,
- const byte** data,
- ulint* len)
-{
- uint64_t data_int = *reinterpret_cast<const uint64_t*>(*data);
-
-#ifndef WORDS_BIGENDIAN
- data_int = ut_crc32_swap_byteorder(data_int);
-#endif /* WORDS_BIGENDIAN */
-
- *crc = ut_crc32_64_low_sw(*crc, data_int);
-
- *data += 8;
- *len -= 8;
-}
-#endif /* INNODB_BUG_ENDIAN_CRC32 */
-
/** Calculates CRC32 in software, without using CPU instructions.
@param[in] buf data over which to calculate CRC32
@param[in] len data length
@@ -547,57 +519,6 @@ ut_crc32_sw(
return(~crc);
}
-#ifdef INNODB_BUG_ENDIAN_CRC32
-/** Calculates CRC32 in software, without using CPU instructions.
-This function uses big endian byte ordering when converting byte sequence to
-integers.
-@param[in] buf data over which to calculate CRC32
-@param[in] len data length
-@return CRC-32C (polynomial 0x11EDC6F41) */
-uint32_t ut_crc32_legacy_big_endian(const byte* buf, ulint len)
-{
- uint32_t crc = 0xFFFFFFFFU;
-
- ut_a(ut_crc32_slice8_table_initialized);
-
- /* Calculate byte-by-byte up to an 8-byte aligned address. After
- this consume the input 8-bytes at a time. */
- while (len > 0 && (reinterpret_cast<uintptr_t>(buf) & 7) != 0) {
- ut_crc32_8_sw(&crc, &buf, &len);
- }
-
- while (len >= 128) {
- /* This call is repeated 16 times. 16 * 8 = 128. */
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- }
-
- while (len >= 8) {
- ut_crc32_64_legacy_big_endian_sw(&crc, &buf, &len);
- }
-
- while (len > 0) {
- ut_crc32_8_sw(&crc, &buf, &len);
- }
-
- return(~crc);
-}
-#endif /* INNODB_BUG_ENDIAN_CRC32 */
-
/********************************************************************//**
Initializes the data structures used by ut_crc32*(). Does not do any
allocations, would not hurt if called twice, but would be pointless. */
@@ -637,9 +558,6 @@ ut_crc32_init()
if (features_ecx & 1 << 20) {
ut_crc32 = ut_crc32_hw;
-#ifdef INNODB_BUG_ENDIAN_CRC32
- ut_crc32_legacy_big_endian = ut_crc32_legacy_big_endian_hw;
-#endif /* INNODB_BUG_ENDIAN_CRC32 */
ut_crc32_implementation = "Using SSE2 crc32 instructions";
}
#endif
diff --git a/storage/innobase/ut/ut0new.cc b/storage/innobase/ut/ut0new.cc
index f1d5eb7407a..64796a544c9 100644
--- a/storage/innobase/ut/ut0new.cc
+++ b/storage/innobase/ut/ut0new.cc
@@ -147,7 +147,6 @@ ut_new_boot()
"row0merge",
"row0mysql",
"row0sel",
- "row0trunc",
"srv0conc",
"srv0srv",
"srv0start",
diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc
index 39fb037aa28..adda0b960c9 100644
--- a/storage/innobase/ut/ut0ut.cc
+++ b/storage/innobase/ut/ut0ut.cc
@@ -589,8 +589,6 @@ ut_strerr(
return("Tablespace already exists");
case DB_TABLESPACE_DELETED:
return("Tablespace deleted or being deleted");
- case DB_TABLESPACE_TRUNCATED:
- return("Tablespace was truncated");
case DB_TABLESPACE_NOT_FOUND:
return("Tablespace not found");
case DB_LOCK_TABLE_FULL:
diff --git a/storage/maria/CMakeLists.txt b/storage/maria/CMakeLists.txt
index b8e3350ca76..6ed370653d5 100644
--- a/storage/maria/CMakeLists.txt
+++ b/storage/maria/CMakeLists.txt
@@ -47,7 +47,7 @@ SET(ARIA_SOURCES ma_init.c ma_open.c ma_extra.c ma_info.c ma_rkey.c
ma_checkpoint.c ma_recovery.c ma_commit.c ma_pagecrc.c
ha_maria.h maria_def.h ma_recovery_util.c ma_servicethread.c
ma_norec.c
- ma_crypt.c
+ ma_crypt.c ma_backup.c
)
IF(APPLE)
@@ -56,7 +56,8 @@ IF(APPLE)
ENDIF()
MYSQL_ADD_PLUGIN(aria ${ARIA_SOURCES}
- STORAGE_ENGINE STATIC_ONLY DEFAULT
+ STORAGE_ENGINE
+ MANDATORY
RECOMPILE_FOR_EMBEDDED)
IF(NOT WITH_ARIA_STORAGE_ENGINE)
@@ -97,7 +98,12 @@ IF(WITH_UNIT_TESTS)
ADD_EXECUTABLE(ma_sp_test ma_sp_test.c)
TARGET_LINK_LIBRARIES(ma_sp_test aria)
+
+ ADD_EXECUTABLE(test_ma_backup test_ma_backup.c)
+ TARGET_LINK_LIBRARIES(test_ma_backup aria)
+
ADD_SUBDIRECTORY(unittest)
+
ENDIF()
IF (MSVC)
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index 10ec7ad4d67..30f8724aebd 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -387,6 +387,10 @@ static void init_aria_psi_keys(void)
#define init_aria_psi_keys() /* no-op */
#endif /* HAVE_PSI_INTERFACE */
+const char *MA_CHECK_INFO= "info";
+const char *MA_CHECK_WARNING= "warning";
+const char *MA_CHECK_ERROR= "error";
+
/*****************************************************************************
** MARIA tables
*****************************************************************************/
@@ -399,6 +403,20 @@ static handler *maria_create_handler(handlerton *hton,
}
+static void _ma_check_print(HA_CHECK *param, const char* msg_type,
+ const char *msgbuf)
+{
+ if (msg_type == MA_CHECK_INFO)
+ sql_print_information("%s.%s: %s", param->db_name, param->table_name,
+ msgbuf);
+ else if (msg_type == MA_CHECK_WARNING)
+ sql_print_warning("%s.%s: %s", param->db_name, param->table_name,
+ msgbuf);
+ else
+ sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+}
+
+
// collect errors printed by maria_check routines
static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
@@ -420,16 +438,21 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
if (!thd->vio_ok())
{
- sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+ _ma_check_print(param, msg_type, msgbuf);
return;
}
if (param->testflag &
(T_CREATE_MISSING_KEYS | T_SAFE_REPAIR | T_AUTO_REPAIR))
{
- my_message(ER_NOT_KEYFILE, msgbuf, MYF(MY_WME));
+ myf flag= 0;
+ if (msg_type == MA_CHECK_INFO)
+ flag= ME_NOTE;
+ else if (msg_type == MA_CHECK_WARNING)
+ flag= ME_WARNING;
+ my_message(ER_NOT_KEYFILE, msgbuf, MYF(flag));
if (thd->variables.log_warnings > 2)
- sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+ _ma_check_print(param, msg_type, msgbuf);
return;
}
length= (uint) (strxmov(name, param->db_name, ".", param->table_name,
@@ -451,7 +474,7 @@ static void _ma_check_print_msg(HA_CHECK *param, const char *msg_type,
sql_print_error("Failed on my_net_write, writing to stderr instead: %s.%s: %s\n",
param->db_name, param->table_name, msgbuf);
else if (thd->variables.log_warnings > 2)
- sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+ _ma_check_print(param, msg_type, msgbuf);
return;
}
@@ -879,7 +902,7 @@ void _ma_check_print_error(HA_CHECK *param, const char *fmt, ...)
if (param->testflag & T_SUPPRESS_ERR_HANDLING)
DBUG_VOID_RETURN;
va_start(args, fmt);
- _ma_check_print_msg(param, "error", fmt, args);
+ _ma_check_print_msg(param, MA_CHECK_ERROR, fmt, args);
va_end(args);
DBUG_VOID_RETURN;
}
@@ -890,7 +913,7 @@ void _ma_check_print_info(HA_CHECK *param, const char *fmt, ...)
va_list args;
DBUG_ENTER("_ma_check_print_info");
va_start(args, fmt);
- _ma_check_print_msg(param, "info", fmt, args);
+ _ma_check_print_msg(param, MA_CHECK_INFO, fmt, args);
va_end(args);
DBUG_VOID_RETURN;
}
@@ -903,7 +926,7 @@ void _ma_check_print_warning(HA_CHECK *param, const char *fmt, ...)
param->warning_printed= 1;
param->out_flag |= O_DATA_LOST;
va_start(args, fmt);
- _ma_check_print_msg(param, "warning", fmt, args);
+ _ma_check_print_msg(param, MA_CHECK_WARNING, fmt, args);
va_end(args);
DBUG_VOID_RETURN;
}
@@ -1006,6 +1029,8 @@ handler *ha_maria::clone(const char *name, MEM_ROOT *mem_root)
new_handler->file->state= file->state;
/* maria_create_trn_for_mysql() is never called for clone() tables */
new_handler->file->trn= file->trn;
+ DBUG_ASSERT(new_handler->file->trn_prev == 0 &&
+ new_handler->file->trn_next == 0);
}
return new_handler;
}
@@ -1236,6 +1261,14 @@ int ha_maria::open(const char *name, int mode, uint test_if_locked)
int_table_flags |= HA_HAS_NEW_CHECKSUM;
/*
+ We can only do online backup on transactional tables with checksum.
+ Checksums are needed to avoid half writes.
+ */
+ if (file->s->options & HA_OPTION_PAGE_CHECKSUM &&
+ file->s->base.born_transactional)
+ int_table_flags |= HA_CAN_ONLINE_BACKUPS;
+
+ /*
For static size rows, tell MariaDB that we will access all bytes
in the record when writing it. This signals MariaDB to initalize
the full row to ensure we don't get any errors from valgrind and
@@ -1271,6 +1304,8 @@ int ha_maria::close(void)
MARIA_HA *tmp= file;
if (!tmp)
return 0;
+ DBUG_ASSERT(file->trn == 0 || file->trn == &dummy_transaction_object);
+ DBUG_ASSERT(file->trn_next == 0 && file->trn_prev == 0);
file= 0;
return maria_close(tmp);
}
@@ -1386,6 +1421,16 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt)
mysql_mutex_unlock(&share->intern_lock);
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
+
+ /*
+ Write a 'table is ok' message to error log if table is ok and
+ we have written to error log that table was getting checked
+ */
+ if (!error && !(table->db_stat & HA_READ_ONLY) &&
+ !maria_is_crashed(file) && thd->error_printed_to_log &&
+ (param->warning_printed || param->error_printed ||
+ param->note_printed))
+ _ma_check_print_info(param, "Table is fixed");
}
}
else if (!maria_is_crashed(file) && !thd->killed)
@@ -1396,7 +1441,10 @@ int ha_maria::check(THD * thd, HA_CHECK_OPT * check_opt)
/* Reset trn, that may have been set by repair */
if (old_trn && old_trn != file->trn)
+ {
+ DBUG_ASSERT(old_trn->used_instances == 0);
_ma_set_trn_for_table(file, old_trn);
+ }
thd_proc_info(thd, old_proc_info);
thd_progress_end(thd);
return error ? HA_ADMIN_CORRUPT : HA_ADMIN_OK;
@@ -2615,14 +2663,20 @@ int ha_maria::extra(enum ha_extra_function operation)
operation == HA_EXTRA_PREPARE_FOR_FORCED_CLOSE))
{
THD *thd= table->in_use;
- TRN *trn= THD_TRN;
- _ma_set_tmp_trn_for_table(file, trn);
+ file->trn= THD_TRN;
}
DBUG_ASSERT(file->s->base.born_transactional || file->trn == 0 ||
file->trn == &dummy_transaction_object);
tmp= maria_extra(file, operation, 0);
- file->trn= old_trn; // Reset trn if was used
+ /*
+ Restore trn if it was changed above.
+ Note that table could be removed from trn->used_tables and
+ trn->used_instances if trn was set and some of the above operations
+ was used. This is ok as the table should not be part of any transaction
+ after this and thus doesn't need to be part of any of the above lists.
+ */
+ file->trn= old_trn;
return tmp;
}
@@ -2858,9 +2912,12 @@ static void reset_thd_trn(THD *thd, MARIA_HA *first_table)
{
DBUG_ENTER("reset_thd_trn");
THD_TRN= NULL;
- for (MARIA_HA *table= first_table; table ;
- table= table->trn_next)
+ MARIA_HA *next;
+ for (MARIA_HA *table= first_table; table ; table= next)
+ {
+ next= table->trn_next;
_ma_reset_trn_for_table(table);
+ }
DBUG_VOID_RETURN;
}
@@ -2907,9 +2964,11 @@ int ha_maria::implicit_commit(THD *thd, bool new_trn)
DBUG_RETURN(0);
}
+ /* Prepare to move used_instances and locked tables to new TRN object */
locked_tables= trnman_has_locked_tables(trn);
+ trnman_reset_locked_tables(trn, 0);
+ relink_trn_used_instances(&used_tables, trn);
- used_tables= (MARIA_HA*) trn->used_instances;
error= 0;
if (unlikely(ma_commit(trn)))
error= 1;
@@ -3334,6 +3393,8 @@ static int maria_commit(handlerton *hton __attribute__ ((unused)),
{
TRN *trn= THD_TRN;
DBUG_ENTER("maria_commit");
+
+ DBUG_ASSERT(trnman_has_locked_tables(trn) == 0);
trnman_reset_locked_tables(trn, 0);
trnman_set_flags(trn, trnman_get_flags(trn) & ~TRN_STATE_INFO_LOGGED);
@@ -3351,9 +3412,12 @@ static int maria_rollback(handlerton *hton __attribute__ ((unused)),
{
TRN *trn= THD_TRN;
DBUG_ENTER("maria_rollback");
+
+ DBUG_ASSERT(trnman_has_locked_tables(trn) == 0);
trnman_reset_locked_tables(trn, 0);
/* statement or transaction ? */
- if ((thd->variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && !all)
+ if ((thd->variables.option_bits & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) &&
+ !all)
{
trnman_rollback_statement(trn);
DBUG_RETURN(0); // end of statement
@@ -3387,6 +3451,21 @@ int maria_checkpoint_state(handlerton *hton, bool disabled)
}
+/*
+ Handle backup calls
+*/
+
+void maria_prepare_for_backup()
+{
+ translog_disable_purge();
+}
+
+void maria_end_backup()
+{
+ translog_enable_purge();
+}
+
+
#define SHOW_MSG_LEN (FN_REFLEN + 20)
/**
@@ -3509,7 +3588,7 @@ static int mark_recovery_start(const char* log_dir)
int res;
DBUG_ENTER("mark_recovery_start");
if (!(maria_recover_options & HA_RECOVER_ANY))
- ma_message_no_user(ME_JUST_WARNING, "Please consider using option"
+ ma_message_no_user(ME_WARNING, "Please consider using option"
" --aria-recover-options[=...] to automatically check and"
" repair tables when logs are removed by option"
" --aria-force-start-after-recovery-failures=#");
@@ -3527,7 +3606,7 @@ static int mark_recovery_start(const char* log_dir)
" recovery from logs",
(res ? "failed to remove some" : "removed all"),
recovery_failures);
- ma_message_no_user((res ? 0 : ME_JUST_WARNING), msg);
+ ma_message_no_user((res ? 0 : ME_WARNING), msg);
}
else
res= ma_control_file_write_and_force(last_checkpoint_lsn, last_logno,
@@ -3587,6 +3666,9 @@ static int ha_maria_init(void *p)
#endif
maria_hton->flush_logs= maria_flush_logs;
maria_hton->show_status= maria_show_status;
+ maria_hton->prepare_for_backup= maria_prepare_for_backup;
+ maria_hton->end_backup= maria_end_backup;
+
/* TODO: decide if we support Maria being used for log tables */
maria_hton->flags= HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES;
bzero(maria_log_pagecache, sizeof(*maria_log_pagecache));
@@ -3988,8 +4070,8 @@ maria_declare_plugin(aria)
MYSQL_STORAGE_ENGINE_PLUGIN,
&maria_storage_engine,
"Aria",
- "Monty Program Ab",
- "Crash-safe tables with MyISAM heritage",
+ "MariaDB Corporation Ab",
+ "Crash-safe tables with MyISAM heritage. Used for internal temporary tables and privilege tables",
PLUGIN_LICENSE_GPL,
ha_maria_init, /* Plugin Init */
NULL, /* Plugin Deinit */
diff --git a/storage/maria/ma_backup.c b/storage/maria/ma_backup.c
new file mode 100644
index 00000000000..8f20209c48a
--- /dev/null
+++ b/storage/maria/ma_backup.c
@@ -0,0 +1,281 @@
+/* Copyright (C) 2018 MariaDB corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+/* Code for doing backups of Aria tables */
+
+#include "maria_def.h"
+#include "ma_blockrec.h" /* PAGE_SUFFIX_SIZE */
+#include "ma_checkpoint.h"
+#include <aria_backup.h>
+
+static uchar *_ma_base_info_read(uchar *ptr, MARIA_BASE_INFO *base);
+
+/**
+ @brief Get capabilites for an Aria table
+
+ @param kfile key file (.MAI)
+ @param cap Capabilities are stored here
+
+ @return 0 ok
+ @return X errno
+*/
+
+int aria_get_capabilities(File kfile, ARIA_TABLE_CAPABILITIES *cap)
+{
+ MARIA_SHARE share;
+ int error= 0;
+ uint head_length= sizeof(share.state.header), base_pos;
+ uint aligned_bit_blocks;
+ size_t info_length;
+ uchar *disc_cache;
+ DBUG_ENTER("aria_get_capabilities");
+
+ bzero(cap, sizeof(*cap));
+ if (my_pread(kfile,share.state.header.file_version, head_length, 0,
+ MYF(MY_NABP)))
+ DBUG_RETURN(HA_ERR_NOT_A_TABLE);
+
+ if (memcmp(share.state.header.file_version, maria_file_magic, 4))
+ DBUG_RETURN(HA_ERR_NOT_A_TABLE);
+
+ share.options= mi_uint2korr(share.state.header.options);
+
+ info_length= mi_uint2korr(share.state.header.header_length);
+ base_pos= mi_uint2korr(share.state.header.base_pos);
+
+ /*
+ Allocate space for header information and for data that is too
+ big to keep on stack
+ */
+ if (!(disc_cache= my_malloc(info_length, MYF(MY_WME))))
+ DBUG_RETURN(ENOMEM);
+
+ if (my_pread(kfile, disc_cache, info_length, 0L, MYF(MY_NABP)))
+ {
+ error= my_errno;
+ goto err;
+ }
+ _ma_base_info_read(disc_cache + base_pos, &share.base);
+ cap->transactional= share.base.born_transactional;
+ cap->checksum= MY_TEST(share.options & HA_OPTION_PAGE_CHECKSUM);
+ cap->online_backup_safe= cap->transactional && cap->checksum;
+ cap->header_size= share.base.keystart;
+ cap->keypage_header= ((share.base.born_transactional ?
+ LSN_STORE_SIZE + TRANSID_SIZE :
+ 0) + KEYPAGE_KEYID_SIZE + KEYPAGE_FLAG_SIZE +
+ KEYPAGE_USED_SIZE);
+ cap->block_size= share.base.block_size;
+
+ if (share.state.header.data_file_type == BLOCK_RECORD)
+ {
+ /* Calulate how man pages the row bitmap covers. From _ma_bitmap_init() */
+ aligned_bit_blocks= (cap->block_size - PAGE_SUFFIX_SIZE) / 6;
+ /*
+ In each 6 bytes, we have 6*8/3 = 16 pages covered
+ The +1 is to add the bitmap page, as this doesn't have to be covered
+ */
+ cap->bitmap_pages_covered= aligned_bit_blocks * 16 + 1;
+ }
+
+ /* Do a check that that we got things right */
+ if (share.state.header.data_file_type != BLOCK_RECORD &&
+ cap->online_backup_safe)
+ error= HA_ERR_NOT_A_TABLE;
+
+err:
+ my_free(disc_cache);
+ DBUG_RETURN(error);
+} /* maria_get_capabilities */
+
+
+/*
+ This is a copy of my_base_info_read from ma_open().
+ The base information will never change (something may be added
+ last, but not relevant for maria_get_capabilities), so it's safe to
+ copy it here.
+
+ The copy is done to avoid linking in the fill Aria library just
+ because maria_backup uses maria_get_capabilities()
+*/
+
+
+static uchar *_ma_base_info_read(uchar *ptr, MARIA_BASE_INFO *base)
+{
+ bmove(base->uuid, ptr, MY_UUID_SIZE); ptr+= MY_UUID_SIZE;
+ base->keystart= mi_sizekorr(ptr); ptr+= 8;
+ base->max_data_file_length= mi_sizekorr(ptr); ptr+= 8;
+ base->max_key_file_length= mi_sizekorr(ptr); ptr+= 8;
+ base->records= (ha_rows) mi_sizekorr(ptr); ptr+= 8;
+ base->reloc= (ha_rows) mi_sizekorr(ptr); ptr+= 8;
+ base->mean_row_length= mi_uint4korr(ptr); ptr+= 4;
+ base->reclength= mi_uint4korr(ptr); ptr+= 4;
+ base->pack_reclength= mi_uint4korr(ptr); ptr+= 4;
+ base->min_pack_length= mi_uint4korr(ptr); ptr+= 4;
+ base->max_pack_length= mi_uint4korr(ptr); ptr+= 4;
+ base->min_block_length= mi_uint4korr(ptr); ptr+= 4;
+ base->fields= mi_uint2korr(ptr); ptr+= 2;
+ base->fixed_not_null_fields= mi_uint2korr(ptr); ptr+= 2;
+ base->fixed_not_null_fields_length= mi_uint2korr(ptr);ptr+= 2;
+ base->max_field_lengths= mi_uint2korr(ptr); ptr+= 2;
+ base->pack_fields= mi_uint2korr(ptr); ptr+= 2;
+ base->extra_options= mi_uint2korr(ptr); ptr+= 2;
+ base->null_bytes= mi_uint2korr(ptr); ptr+= 2;
+ base->original_null_bytes= mi_uint2korr(ptr); ptr+= 2;
+ base->field_offsets= mi_uint2korr(ptr); ptr+= 2;
+ base->language= mi_uint2korr(ptr); ptr+= 2;
+ base->block_size= mi_uint2korr(ptr); ptr+= 2;
+
+ base->rec_reflength= *ptr++;
+ base->key_reflength= *ptr++;
+ base->keys= *ptr++;
+ base->auto_key= *ptr++;
+ base->born_transactional= *ptr++;
+ ptr++;
+ base->pack_bytes= mi_uint2korr(ptr); ptr+= 2;
+ base->blobs= mi_uint2korr(ptr); ptr+= 2;
+ base->max_key_block_length= mi_uint2korr(ptr); ptr+= 2;
+ base->max_key_length= mi_uint2korr(ptr); ptr+= 2;
+ base->extra_alloc_bytes= mi_uint2korr(ptr); ptr+= 2;
+ base->extra_alloc_procent= *ptr++;
+ ptr+= 16;
+ return ptr;
+}
+
+
+/**
+ @brief Copy an index block with re-read if checksum doesn't match
+
+ @param dfile data file (.MAD)
+ @param cap aria capabilities from aria_get_capabilities
+ @param block block number to read (0, 1, 2, 3...)
+ @param buffer read data to this buffer
+ @param bytes_read number of bytes actually read (in case of end of file)
+
+ @return 0 ok
+ @return HA_ERR_END_OF_FILE ; End of file
+ @return # error number
+*/
+
+#define MAX_RETRY 10
+
+int aria_read_index(File kfile, ARIA_TABLE_CAPABILITIES *cap, ulonglong block,
+ uchar *buffer)
+{
+ MARIA_SHARE share;
+ int retry= 0;
+ DBUG_ENTER("aria_read_index");
+
+ share.keypage_header= cap->keypage_header;
+ share.block_size= cap->block_size;
+ do
+ {
+ int error;
+ size_t length;
+ if ((length= my_pread(kfile, buffer, cap->block_size,
+ block * cap->block_size, MYF(0))) != cap->block_size)
+ {
+ if (length == 0)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ if (length == (size_t) -1)
+ DBUG_RETURN(my_errno ? my_errno : -1);
+ /* Assume we got a half read; Do a re-read */
+ }
+ /* If not transactional or key file header, there are no checksums */
+ if (!cap->online_backup_safe ||
+ block < cap->header_size/ cap->block_size)
+ DBUG_RETURN(length == cap->block_size ? 0 : HA_ERR_CRASHED);
+
+ if (length == cap->block_size)
+ {
+ length= _ma_get_page_used(&share, buffer);
+ if (length > cap->block_size - CRC_SIZE)
+ DBUG_RETURN(HA_ERR_CRASHED);
+ error= maria_page_crc_check(buffer, block, &share,
+ MARIA_NO_CRC_NORMAL_PAGE,
+ (int) length);
+ if (error != HA_ERR_WRONG_CRC)
+ DBUG_RETURN(error);
+ }
+ my_sleep(100000); /* Sleep 0.1 seconds */
+ } while (retry < MAX_RETRY);
+ DBUG_RETURN(HA_ERR_WRONG_CRC);
+}
+
+
+/**
+ @brief Copy a data block with re-read if checksum doesn't match
+
+ @param dfile data file (.MAD)
+ @param cap aria capabilities from aria_get_capabilities
+ @param block block number to read (0, 1, 2, 3...)
+ @param buffer read data to this buffer
+ @param bytes_read number of bytes actually read (in case of end of file)
+
+ @return 0 ok
+ @return HA_ERR_END_OF_FILE ; End of file
+ @return # error number
+*/
+
+int aria_read_data(File dfile, ARIA_TABLE_CAPABILITIES *cap, ulonglong block,
+ uchar *buffer, size_t *bytes_read)
+{
+ MARIA_SHARE share;
+ int retry= 0;
+ DBUG_ENTER("aria_read_data");
+
+ share.keypage_header= cap->keypage_header;
+ share.block_size= cap->block_size;
+
+ if (!cap->online_backup_safe)
+ {
+ *bytes_read= my_pread(dfile, buffer, cap->block_size,
+ block * cap->block_size, MY_WME);
+ if (*bytes_read == 0)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ DBUG_RETURN(*bytes_read > 0 ? 0 : (my_errno ? my_errno : -1));
+ }
+
+ *bytes_read= cap->block_size;
+ do
+ {
+ int error;
+ size_t length;
+ if ((length= my_pread(dfile, buffer, cap->block_size,
+ block * cap->block_size, MYF(0))) != cap->block_size)
+ {
+ if (length == 0)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
+ if (length == (size_t) -1)
+ DBUG_RETURN(my_errno ? my_errno : -1);
+ }
+
+ /* If not transactional or key file header, there are no checksums */
+ if (!cap->online_backup_safe)
+ DBUG_RETURN(length == cap->block_size ? 0 : HA_ERR_CRASHED);
+
+ if (length == cap->block_size)
+ {
+ error= maria_page_crc_check(buffer, block, &share,
+ ((block % cap->bitmap_pages_covered) == 0 ?
+ MARIA_NO_CRC_BITMAP_PAGE :
+ MARIA_NO_CRC_NORMAL_PAGE),
+ share.block_size - CRC_SIZE);
+ if (error != HA_ERR_WRONG_CRC)
+ DBUG_RETURN(error);
+ }
+ my_sleep(100000); /* Sleep 0.1 seconds */
+ } while (retry < MAX_RETRY);
+ DBUG_RETURN(HA_ERR_WRONG_CRC);
+}
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index 59c9040fe2b..e148e33b9f6 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -2598,7 +2598,8 @@ static my_bool free_full_page_range(MARIA_HA *info, pgcache_page_no_t page,
@param record Record we should write
@param row Statistics about record (calculated by
calc_record_size())
- @param map_blocks On which pages the record should be stored
+ @param bitmap_blocks On which pages the record should be stored
+ @param head_block_is_read 1 if head block existed. 0 if new block.
@param row_pos Position on head page where to put head part of
record
@param undo_lsn <> LSN_ERROR if we are executing an UNDO
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index 4eab0e07315..998bb984452 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -167,6 +167,9 @@ int maria_chk_status(HA_CHECK *param, MARIA_HA *info)
{
MARIA_SHARE *share= info->s;
+ /* Protection for HA_EXTRA_FLUSH */
+ mysql_mutex_lock(&share->intern_lock);
+
if (maria_is_crashed_on_repair(info))
_ma_check_print_warning(param,
"Table is marked as crashed and last repair failed");
@@ -189,6 +192,9 @@ int maria_chk_status(HA_CHECK *param, MARIA_HA *info)
if (param->testflag & T_UPDATE_STATE)
param->warning_printed=save;
}
+
+ mysql_mutex_unlock(&share->intern_lock);
+
if (share->state.create_trid > param->max_trid)
{
param->wrong_trd_printed= 1; /* Force should run zerofill */
diff --git a/storage/maria/ma_checkpoint.h b/storage/maria/ma_checkpoint.h
index df877ad2bbc..2ad044d5686 100644
--- a/storage/maria/ma_checkpoint.h
+++ b/storage/maria/ma_checkpoint.h
@@ -84,8 +84,8 @@ static inline LSN lsn_read_non_atomic_32(const volatile LSN *x)
prints a message from a task not connected to any user (checkpoint
and recovery for example).
- @param level 0 if error, ME_JUST_WARNING if warning,
- ME_JUST_INFO if info
+ @param level 0 if error, ME_WARNING if warning,
+ ME_NOTE if info
@param sentence text to write
*/
#define ma_message_no_user(level, sentence) \
diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c
index 6ca6d06c97f..d55b12bbd9f 100644
--- a/storage/maria/ma_control_file.c
+++ b/storage/maria/ma_control_file.c
@@ -531,7 +531,7 @@ int ma_control_file_write_and_force(LSN last_checkpoint_lsn_arg,
"Control file must be from a newer version; zero-ing out %u"
" unknown bytes in control file at offset %u", zeroed,
cf_changeable_size + cf_create_time_size);
- ma_message_no_user(ME_JUST_WARNING, msg);
+ ma_message_no_user(ME_WARNING, msg);
}
else
{
@@ -608,4 +608,124 @@ my_bool ma_control_file_inited(void)
return (control_file_fd >= 0);
}
+/**
+ Print content of aria_log_control file
+*/
+
+my_bool print_aria_log_control()
+{
+ uchar buffer[CF_MAX_SIZE];
+ char name[FN_REFLEN], uuid_str[MY_UUID_STRING_LENGTH+1];
+ const char *errmsg;
+ uint new_cf_create_time_size, new_cf_changeable_size;
+ my_off_t file_size;
+ ulong logno;
+ ulonglong trid,checkpoint_lsn;
+ int open_flags= O_BINARY | /*O_DIRECT |*/ O_RDWR | O_CLOEXEC;
+ int error= CONTROL_FILE_UNKNOWN_ERROR;
+ uint recovery_fails;
+ File file;
+ DBUG_ENTER("ma_control_file_open");
+
+ if (fn_format(name, CONTROL_FILE_BASE_NAME,
+ maria_data_root, "", MYF(MY_WME)) == NullS)
+ DBUG_RETURN(CONTROL_FILE_UNKNOWN_ERROR);
+
+ if ((file= mysql_file_open(key_file_control, name,
+ open_flags, MYF(MY_WME))) < 0)
+ {
+ errmsg= "Can't open file";
+ goto err;
+ }
+
+ file_size= mysql_file_seek(file, 0, SEEK_END, MYF(MY_WME));
+ if (file_size == MY_FILEPOS_ERROR)
+ {
+ errmsg= "Can't read size";
+ goto err;
+ }
+ if (file_size < CF_MIN_SIZE)
+ {
+ /*
+ Given that normally we write only a sector and it's atomic, the only
+ possibility for a file to be of too short size is if we crashed at the
+ very first startup, between file creation and file write. Quite unlikely
+ (and can be made even more unlikely by doing this: create a temp file,
+ write it, and then rename it to be the control file).
+ What's more likely is if someone forgot to restore the control file,
+ just did a "touch control" to try to get Maria to start, or if the
+ disk/filesystem has a problem.
+ So let's be rigid.
+ */
+ error= CONTROL_FILE_TOO_SMALL;
+ errmsg= "Size of control file is smaller than expected";
+ goto err;
+ }
+
+ /* Check if control file is unexpectedly big */
+ if (file_size > CF_MAX_SIZE)
+ {
+ error= CONTROL_FILE_TOO_BIG;
+ errmsg= "File size bigger than expected";
+ goto err;
+ }
+
+ if (mysql_file_pread(file, buffer, (size_t)file_size, 0, MYF(MY_FNABP)))
+ {
+ errmsg= "Can't read file";
+ goto err;
+ }
+
+ if (memcmp(buffer + CF_MAGIC_STRING_OFFSET,
+ CF_MAGIC_STRING, CF_MAGIC_STRING_SIZE))
+ {
+ error= CONTROL_FILE_BAD_MAGIC_STRING;
+ errmsg= "Missing valid id at start of file. File is not a valid aria control file";
+ goto err;
+ }
+
+ printf("Aria file version: %u\n", buffer[CF_VERSION_OFFSET]);
+
+ new_cf_create_time_size= uint2korr(buffer + CF_CREATE_TIME_SIZE_OFFSET);
+ new_cf_changeable_size= uint2korr(buffer + CF_CHANGEABLE_SIZE_OFFSET);
+
+ if (new_cf_create_time_size < CF_MIN_CREATE_TIME_TOTAL_SIZE ||
+ new_cf_changeable_size < CF_MIN_CHANGEABLE_TOTAL_SIZE ||
+ new_cf_create_time_size + new_cf_changeable_size != file_size)
+ {
+ error= CONTROL_FILE_INCONSISTENT_INFORMATION;
+ errmsg= "Sizes stored in control file are inconsistent";
+ goto err;
+ }
+ checkpoint_lsn= lsn_korr(buffer + new_cf_create_time_size +
+ CF_LSN_OFFSET);
+ logno= uint4korr(buffer + new_cf_create_time_size + CF_FILENO_OFFSET);
+ my_uuid2str(buffer + CF_UUID_OFFSET, uuid_str);
+ uuid_str[MY_UUID_STRING_LENGTH]= 0;
+
+ printf("Block size: %u\n", uint2korr(buffer + CF_BLOCKSIZE_OFFSET));
+ printf("maria_uuid: %s\n", uuid_str);
+ printf("last_checkpoint_lsn: " LSN_FMT "\n", LSN_IN_PARTS(checkpoint_lsn));
+ printf("last_log_number: %lu\n", (ulong) logno);
+ if (new_cf_changeable_size >= (CF_MAX_TRID_OFFSET + CF_MAX_TRID_SIZE))
+ {
+ trid= transid_korr(buffer + new_cf_create_time_size + CF_MAX_TRID_OFFSET);
+ printf("trid: %llu\n", (ulonglong) trid);
+ }
+ if (new_cf_changeable_size >= (CF_RECOV_FAIL_OFFSET + CF_RECOV_FAIL_SIZE))
+ {
+ recovery_fails=
+ (buffer + new_cf_create_time_size + CF_RECOV_FAIL_OFFSET)[0];
+ printf("recovery_failuers: %u\n", recovery_fails);
+ }
+
+ DBUG_RETURN(0);
+
+err:
+ my_printf_error(HA_ERR_INITIALIZATION,
+ "Got error '%s' when trying to use aria control file "
+ "'%s'", 0, errmsg, name);
+ DBUG_RETURN(error);
+}
+
#endif /* EXTRACT_DEFINITIONS */
diff --git a/storage/maria/ma_control_file.h b/storage/maria/ma_control_file.h
index 85e8f2c899d..51599f0abfc 100644
--- a/storage/maria/ma_control_file.h
+++ b/storage/maria/ma_control_file.h
@@ -70,5 +70,6 @@ int ma_control_file_write_and_force(LSN last_checkpoint_lsn_arg,
uint8 recovery_failures_arg);
int ma_control_file_end(void);
my_bool ma_control_file_inited(void);
+my_bool print_aria_log_control(void);
C_MODE_END
#endif
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index 503d2420c41..24aa892d212 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -827,6 +827,11 @@ int maria_create(const char *name, enum data_file_type datafile_type,
*/
share.state.skip_redo_lsn= share.state.is_of_horizon=
share.state.create_rename_lsn= LSN_MAX;
+ /*
+ We have to mark the table as not movable as the table will contain the
+ maria_uuid and create_rename_lsn
+ */
+ share.state.changed|= STATE_NOT_MOVABLE;
}
if (datafile_type == DYNAMIC_RECORD)
@@ -1446,6 +1451,7 @@ int _ma_update_state_lsns_sub(MARIA_SHARE *share, LSN lsn, TrID create_trid,
uchar buf[LSN_STORE_SIZE * 3], *ptr;
uchar trid_buff[8];
File file= share->kfile.file;
+ DBUG_ENTER("_ma_update_state_lsns_sub");
DBUG_ASSERT(file >= 0);
if (lsn == LSN_IMPOSSIBLE)
@@ -1464,7 +1470,7 @@ int _ma_update_state_lsns_sub(MARIA_SHARE *share, LSN lsn, TrID create_trid,
0].length,
sizeof(log_array)/sizeof(log_array[0]),
log_array, NULL, NULL)))
- return res;
+ DBUG_RETURN(res);
}
for (ptr= buf; ptr < (buf + sizeof(buf)); ptr+= LSN_STORE_SIZE)
@@ -1497,13 +1503,13 @@ int _ma_update_state_lsns_sub(MARIA_SHARE *share, LSN lsn, TrID create_trid,
}
else
lsn_store(buf, share->state.create_rename_lsn);
- return (my_pwrite(file, buf, sizeof(buf),
- sizeof(share->state.header) +
- MARIA_FILE_CREATE_RENAME_LSN_OFFSET, MYF(MY_NABP)) ||
- my_pwrite(file, trid_buff, sizeof(trid_buff),
- sizeof(share->state.header) +
- MARIA_FILE_CREATE_TRID_OFFSET, MYF(MY_NABP)) ||
- (do_sync && mysql_file_sync(file, MYF(0))));
+ DBUG_RETURN(my_pwrite(file, buf, sizeof(buf),
+ sizeof(share->state.header) +
+ MARIA_FILE_CREATE_RENAME_LSN_OFFSET, MYF(MY_NABP)) ||
+ my_pwrite(file, trid_buff, sizeof(trid_buff),
+ sizeof(share->state.header) +
+ MARIA_FILE_CREATE_TRID_OFFSET, MYF(MY_NABP)) ||
+ (do_sync && mysql_file_sync(file, MYF(0))));
}
#if defined(_MSC_VER) && (_MSC_VER == 1310)
#pragma optimize("",on)
diff --git a/storage/maria/ma_crypt.c b/storage/maria/ma_crypt.c
index 42895dcdfa4..a007c14ba29 100644
--- a/storage/maria/ma_crypt.c
+++ b/storage/maria/ma_crypt.c
@@ -156,7 +156,7 @@ ma_crypt_read(MARIA_SHARE* share, uchar *buff)
{
my_printf_error(HA_ERR_UNSUPPORTED,
"Unsupported crypt scheme! type: %d iv_length: %d\n",
- MYF(ME_FATALERROR|ME_NOREFRESH),
+ MYF(ME_FATAL|ME_ERROR_LOG),
type, iv_length);
return 0;
}
@@ -464,7 +464,7 @@ static int ma_encrypt(MARIA_SHARE *share, MARIA_CRYPT_DATA *crypt_data,
my_errno= HA_ERR_DECRYPTION_FAILED;
my_printf_error(HA_ERR_DECRYPTION_FAILED,
"Unknown key id %u. Can't continue!",
- MYF(ME_FATALERROR|ME_NOREFRESH),
+ MYF(ME_FATAL|ME_ERROR_LOG),
crypt_data->scheme.key_id);
return 1;
}
@@ -481,7 +481,7 @@ static int ma_encrypt(MARIA_SHARE *share, MARIA_CRYPT_DATA *crypt_data,
my_errno= HA_ERR_DECRYPTION_FAILED;
my_printf_error(HA_ERR_DECRYPTION_FAILED,
"failed to encrypt '%s' rc: %d dstlen: %u size: %u\n",
- MYF(ME_FATALERROR|ME_NOREFRESH),
+ MYF(ME_FATAL|ME_ERROR_LOG),
share->open_file_name.str, rc, dstlen, size);
return 1;
}
@@ -508,7 +508,7 @@ static int ma_decrypt(MARIA_SHARE *share, MARIA_CRYPT_DATA *crypt_data,
my_errno= HA_ERR_DECRYPTION_FAILED;
my_printf_error(HA_ERR_DECRYPTION_FAILED,
"failed to decrypt '%s' rc: %d dstlen: %u size: %u\n",
- MYF(ME_FATALERROR|ME_NOREFRESH),
+ MYF(ME_FATAL|ME_ERROR_LOG),
share->open_file_name.str, rc, dstlen, size);
return 1;
}
diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c
index 47c796daab3..99805746109 100644
--- a/storage/maria/ma_extra.c
+++ b/storage/maria/ma_extra.c
@@ -422,7 +422,11 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function,
error= _ma_flush_table_files(info, MARIA_FLUSH_DATA | MARIA_FLUSH_INDEX,
FLUSH_KEEP, FLUSH_KEEP);
+ mysql_mutex_lock(&share->intern_lock);
+ /* Tell maria_lock_database() that we locked the intern_lock mutex */
+ info->intern_lock_locked= 1;
_ma_decrement_open_count(info, 1);
+ info->intern_lock_locked= 0;
if (share->not_flushed)
{
share->not_flushed= 0;
@@ -435,6 +439,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function,
_ma_set_fatal_error(share, HA_ERR_CRASHED);
}
}
+ mysql_mutex_unlock(&share->intern_lock);
break;
case HA_EXTRA_NORMAL: /* Theese isn't in use */
info->quick_mode= 0;
diff --git a/storage/maria/ma_info.c b/storage/maria/ma_info.c
index da44da123d2..22b9c86f21d 100644
--- a/storage/maria/ma_info.c
+++ b/storage/maria/ma_info.c
@@ -148,6 +148,6 @@ void _ma_report_error(int errcode, const LEX_STRING *name)
}
}
- my_error(errcode, MYF(ME_NOREFRESH), file_name);
+ my_error(errcode, MYF(ME_ERROR_LOG), file_name);
DBUG_VOID_RETURN;
}
diff --git a/storage/maria/ma_init.c b/storage/maria/ma_init.c
index 8af3c41a3a1..7244d95b184 100644
--- a/storage/maria/ma_init.c
+++ b/storage/maria/ma_init.c
@@ -148,7 +148,7 @@ my_bool maria_upgrade()
my_message(HA_ERR_INITIALIZATION,
"Found old style Maria log files; "
"Converting them to Aria names",
- MYF(ME_JUST_INFO));
+ MYF(ME_NOTE));
for (i= 0; i < dir->number_of_files; i++)
{
diff --git a/storage/maria/ma_locking.c b/storage/maria/ma_locking.c
index 4723c04e3cf..203fd394d26 100644
--- a/storage/maria/ma_locking.c
+++ b/storage/maria/ma_locking.c
@@ -47,7 +47,8 @@ int maria_lock_database(MARIA_HA *info, int lock_type)
}
error=0;
- mysql_mutex_lock(&share->intern_lock);
+ if (!info->intern_lock_locked)
+ mysql_mutex_lock(&share->intern_lock);
if (share->kfile.file >= 0) /* May only be false on windows */
{
switch (lock_type) {
@@ -234,7 +235,8 @@ int maria_lock_database(MARIA_HA *info, int lock_type)
}
}
#endif
- mysql_mutex_unlock(&share->intern_lock);
+ if (!info->intern_lock_locked)
+ mysql_mutex_unlock(&share->intern_lock);
DBUG_RETURN(error);
} /* maria_lock_database */
@@ -454,7 +456,7 @@ int _ma_mark_file_changed_now(register MARIA_SHARE *share)
}
/* Set uuid of file if not yet set (zerofilled file) */
if (share->base.born_transactional &&
- !(share->state.changed & STATE_NOT_MOVABLE))
+ !(share->state.org_changed & STATE_NOT_MOVABLE))
{
/* Lock table to current installation */
if (_ma_set_uuid(share, 0) ||
@@ -464,6 +466,7 @@ int _ma_mark_file_changed_now(register MARIA_SHARE *share)
TRUE, TRUE)))
goto err;
share->state.changed|= STATE_NOT_MOVABLE;
+ share->state.org_changed|= STATE_NOT_MOVABLE;
}
}
error= 0;
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index 158904f6692..724beb7136c 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -56,6 +56,8 @@ static mysql_cond_t COND_soft_sync;
static MA_SERVICE_THREAD_CONTROL soft_sync_control=
{0, FALSE, FALSE, &LOCK_soft_sync, &COND_soft_sync};
+uint log_purge_disabled= 0;
+
/* transaction log file descriptor */
typedef struct st_translog_file
@@ -3620,6 +3622,7 @@ my_bool translog_init_with_table(const char *directory,
translog_syncs= 0;
flush_start= 0;
id_to_share= NULL;
+ log_purge_disabled= 0;
log_descriptor.directory_fd= -1;
log_descriptor.is_everything_flushed= 1;
@@ -8668,7 +8671,7 @@ my_bool translog_purge(TRANSLOG_ADDRESS low)
mysql_rwlock_unlock(&log_descriptor.open_files_lock);
translog_close_log_file(file);
}
- if (log_purge_type == TRANSLOG_PURGE_IMMIDIATE)
+ if (log_purge_type == TRANSLOG_PURGE_IMMIDIATE && ! log_purge_disabled)
{
char path[FN_REFLEN], *file_name;
file_name= translog_filename_by_fileno(i, path);
@@ -8721,7 +8724,7 @@ my_bool translog_purge_at_flush()
mysql_mutex_lock(&log_descriptor.purger_lock);
- if (unlikely(log_descriptor.min_need_file == 0))
+ if (unlikely(log_descriptor.min_need_file == 0 || log_purge_disabled))
{
DBUG_PRINT("info", ("No info about min need file => exit"));
mysql_mutex_unlock(&log_descriptor.purger_lock);
@@ -9285,3 +9288,22 @@ void dump_page(uchar *buffer, File handler)
}
dump_datapage(buffer, handler);
}
+
+
+/*
+ Handle backup calls
+*/
+
+void translog_disable_purge()
+{
+ mysql_mutex_lock(&log_descriptor.purger_lock);
+ log_purge_disabled++;
+ mysql_mutex_unlock(&log_descriptor.purger_lock);
+}
+
+void translog_enable_purge()
+{
+ mysql_mutex_lock(&log_descriptor.purger_lock);
+ log_purge_disabled--;
+ mysql_mutex_unlock(&log_descriptor.purger_lock);
+}
diff --git a/storage/maria/ma_loghandler.h b/storage/maria/ma_loghandler.h
index 3fb9e7d37bf..07d6a96557d 100644
--- a/storage/maria/ma_loghandler.h
+++ b/storage/maria/ma_loghandler.h
@@ -367,6 +367,8 @@ extern void dump_page(uchar *buffer, File handler);
extern my_bool translog_log_debug_info(TRN *trn,
enum translog_debug_info_type type,
uchar *info, size_t length);
+extern void translog_disable_purge(void);
+extern void translog_enable_purge(void);
enum enum_translog_status
{
@@ -520,6 +522,7 @@ typedef enum
} enum_maria_translog_purge_type;
extern ulong log_purge_type;
extern ulong log_file_size;
+extern uint log_purge_disabled; /* For backup */
typedef enum
{
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index 3d77d7e2c10..750acf73813 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -453,6 +453,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
share->state.state_length=base_pos;
/* For newly opened tables we reset the error-has-been-printed flag */
share->state.changed&= ~STATE_CRASHED_PRINTED;
+ share->state.org_changed= share->state.changed;
if (!(open_flags & HA_OPEN_FOR_REPAIR) &&
((share->state.changed & STATE_CRASHED_FLAGS) ||
@@ -473,13 +474,13 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
/*
A transactional table is not usable on this system if:
- share->state.create_trid > trnman_get_max_trid()
- - Critical as trid as stored releativel to create_trid.
+ - Critical as trid as stored releative to create_trid.
- uuid is different
STATE_NOT_MOVABLE is reset when a table is zerofilled
(has no LSN's and no trids)
- We can ignore testing uuid if STATE_NOT_MOVABLE is set, as in this
+ We can ignore testing uuid if STATE_NOT_MOVABLE is not set, as in this
case the uuid will be set in _ma_mark_file_changed().
*/
if (share->base.born_transactional &&
@@ -800,17 +801,27 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
share->state.is_of_horizon) > 0) ||
!LSN_VALID(share->state.skip_redo_lsn) ||
(cmp_translog_addr(share->state.create_rename_lsn,
- share->state.skip_redo_lsn) > 0)) &&
- !(open_flags & HA_OPEN_FOR_REPAIR))
+ share->state.skip_redo_lsn) > 0)))
{
- /*
- If in Recovery, it will not work. If LSN is invalid and not
- LSN_NEEDS_NEW_STATE_LSNS, header must be corrupted.
- In both cases, must repair.
- */
- my_errno=((share->state.changed & STATE_CRASHED_ON_REPAIR) ?
- HA_ERR_CRASHED_ON_REPAIR : HA_ERR_CRASHED_ON_USAGE);
- goto err;
+ if (!(open_flags & HA_OPEN_FOR_REPAIR))
+ {
+ /*
+ If in Recovery, it will not work. If LSN is invalid and not
+ LSN_NEEDS_NEW_STATE_LSNS, header must be corrupted.
+ In both cases, must repair.
+ */
+ my_errno=((share->state.changed & STATE_CRASHED_ON_REPAIR) ?
+ HA_ERR_CRASHED_ON_REPAIR : HA_ERR_CRASHED_ON_USAGE);
+ goto err;
+ }
+ else
+ {
+ /*
+ Open in repair mode. Ensure that we mark the table crashed, so
+ that we run auto_repair on it
+ */
+ maria_mark_crashed_share(share);
+ }
}
else if (!(open_flags & HA_OPEN_FOR_REPAIR))
{
diff --git a/storage/maria/ma_pagecrc.c b/storage/maria/ma_pagecrc.c
index 8982c7e5c09..aae11158286 100644
--- a/storage/maria/ma_pagecrc.c
+++ b/storage/maria/ma_pagecrc.c
@@ -54,11 +54,11 @@ static uint32 maria_page_crc(uint32 start, uchar *data, uint length)
@retval 1 Error
*/
-static my_bool maria_page_crc_check(uchar *page,
- pgcache_page_no_t page_no,
- MARIA_SHARE *share,
- uint32 no_crc_val,
- int data_length)
+my_bool maria_page_crc_check(uchar *page,
+ pgcache_page_no_t page_no,
+ MARIA_SHARE *share,
+ uint32 no_crc_val,
+ int data_length)
{
uint32 crc= uint4korr(page + share->block_size - CRC_SIZE), new_crc;
my_bool res;
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index 27e46873f3b..3ddf2d91f16 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -184,7 +184,7 @@ void maria_recover_error_handler_hook(uint error, const char *str,
static void print_preamble()
{
- ma_message_no_user(ME_JUST_INFO, "starting recovery");
+ ma_message_no_user(ME_NOTE, "starting recovery");
}
@@ -523,7 +523,7 @@ end:
}
if (!error)
{
- ma_message_no_user(ME_JUST_INFO, "recovery done");
+ ma_message_no_user(ME_NOTE, "recovery done");
maria_recovery_changed_data= 1;
}
}
@@ -1363,6 +1363,7 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id)
silently pass in the "info == NULL" test below.
*/
tprint(tracef, ", record is corrupted");
+ eprint(tracef, "\n***WARNING: %s may be corrupted", name ? name : "NULL");
info= NULL;
recovery_warnings++;
goto end;
@@ -1375,7 +1376,11 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id)
" or its header is so corrupted that we cannot open it;"
" we skip it");
if (my_errno != ENOENT)
+ {
recovery_found_crashed_tables++;
+ eprint(tracef, "\n***WARNING: %s could not be opened: Error: %d",
+ name ? name : "NULL", (int) my_errno);
+ }
error= 0;
goto end;
}
@@ -1404,6 +1409,7 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id)
not transactional table
*/
tprint(tracef, ", is not transactional. Ignoring open request");
+ eprint(tracef, "\n***WARNING: '%s' may be crashed", name);
error= -1;
recovery_warnings++;
goto end;
@@ -1450,6 +1456,8 @@ static int new_table(uint16 sid, const char *name, LSN lsn_of_file_id)
(kfile_len == MY_FILEPOS_ERROR))
{
tprint(tracef, ", length unknown\n");
+ eprint(tracef, "\n***WARNING: Can't read length of file '%s'",
+ share->open_file_name.str);
recovery_warnings++;
goto end;
}
@@ -3573,7 +3581,12 @@ void _ma_tmp_disable_logging_for_table(MARIA_HA *info,
should be now. info->trn may be NULL in maria_chk.
*/
if (info->trn == NULL)
+ {
info->trn= &dummy_transaction_object;
+ info->trn_next= 0;
+ info->trn_prev= 0;
+ }
+
DBUG_ASSERT(info->trn->rec_lsn == LSN_IMPOSSIBLE);
share->page_type= PAGECACHE_PLAIN_PAGE;
/* Functions below will pick up now_transactional and change callbacks */
diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c
index 024b72fff2e..ccf48b80f7c 100644
--- a/storage/maria/ma_sort.c
+++ b/storage/maria/ma_sort.c
@@ -30,13 +30,11 @@
/* static variables */
#undef MIN_SORT_MEMORY
-#undef MYF_RW
#undef DISK_BUFFER_SIZE
#define MERGEBUFF 15
#define MERGEBUFF2 31
#define MIN_SORT_MEMORY (4096-MALLOC_OVERHEAD)
-#define MYF_RW MYF(MY_NABP | MY_WME | MY_WAIT_IF_FULL)
#define DISK_BUFFER_SIZE (IO_SIZE*128)
/* How many keys we can keep in memory */
diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c
index 23cb625fc58..c658b9e667c 100644
--- a/storage/maria/ma_state.c
+++ b/storage/maria/ma_state.c
@@ -30,6 +30,7 @@
#include "maria_def.h"
#include "trnman.h"
+#include "ma_trnman.h"
#include "ma_blockrec.h"
/**
@@ -571,7 +572,6 @@ void _ma_remove_table_from_trnman(MARIA_HA *info)
MARIA_SHARE *share= info->s;
TRN *trn= info->trn;
MARIA_USED_TABLES *tables, **prev;
- MARIA_HA *handler, **prev_file;
DBUG_ENTER("_ma_remove_table_from_trnman");
DBUG_PRINT("enter", ("trn: %p used_tables: %p share: %p in_trans: %d",
trn, trn->used_tables, share, share->in_trans));
@@ -603,26 +603,9 @@ void _ma_remove_table_from_trnman(MARIA_HA *info)
DBUG_PRINT("warning", ("share: %p where not in used_tables_list", share));
}
- /* unlink table from used_instances */
- for (prev_file= (MARIA_HA**) &trn->used_instances;
- (handler= *prev_file);
- prev_file= &handler->trn_next)
- {
- if (handler == info)
- {
- *prev_file= info->trn_next;
- break;
- }
- }
- if (handler != 0)
- {
- /*
- This can only happens in case of rename of intermediate table as
- part of alter table
- */
- DBUG_PRINT("warning", ("table: %p where not in used_instances", info));
- }
- info->trn= 0; /* Not part of trans anymore */
+ /* Reset trn and remove table from used_instances */
+ _ma_reset_trn_for_table(info);
+
DBUG_VOID_RETURN;
}
diff --git a/storage/maria/ma_test2.c b/storage/maria/ma_test2.c
index 24c48c67210..b6442c2be91 100644
--- a/storage/maria/ma_test2.c
+++ b/storage/maria/ma_test2.c
@@ -1086,6 +1086,11 @@ static void get_options(int argc, char **argv)
fprintf(stderr,"record count must be >= 10 (if testflag > 2)\n");
exit(1);
}
+ if (recant <= 1)
+ {
+ fprintf(stderr,"record count must be >= 2\n");
+ exit(1);
+ }
break;
case 'e': /* maria_block_length */
case 'E':
diff --git a/storage/maria/ma_trnman.h b/storage/maria/ma_trnman.h
index 9bfd1f0d047..5b6d0e9f60d 100644
--- a/storage/maria/ma_trnman.h
+++ b/storage/maria/ma_trnman.h
@@ -18,7 +18,7 @@
/**
Sets table's trn and prints debug information
- Links table into used_instances if new_trn is not 0
+ Links table into new_trn->used_instances
@param tbl MARIA_HA of table
@param newtrn what to put into tbl->trn
@@ -34,7 +34,10 @@ static inline void _ma_set_trn_for_table(MARIA_HA *tbl, TRN *newtrn)
tbl->trn= newtrn;
/* Link into used list */
+ if (newtrn->used_instances)
+ ((MARIA_HA*) newtrn->used_instances)->trn_prev= &tbl->trn_next;
tbl->trn_next= (MARIA_HA*) newtrn->used_instances;
+ tbl->trn_prev= (MARIA_HA**) &newtrn->used_instances;
newtrn->used_instances= tbl;
}
@@ -49,6 +52,8 @@ static inline void _ma_set_tmp_trn_for_table(MARIA_HA *tbl, TRN *newtrn)
DBUG_PRINT("info",("table: %p trn: %p -> %p",
tbl, tbl->trn, newtrn));
tbl->trn= newtrn;
+ tbl->trn_prev= 0;
+ tbl->trn_next= 0; /* To avoid assert in ha_maria::close() */
}
@@ -59,7 +64,36 @@ static inline void _ma_set_tmp_trn_for_table(MARIA_HA *tbl, TRN *newtrn)
static inline void _ma_reset_trn_for_table(MARIA_HA *tbl)
{
DBUG_PRINT("info",("table: %p trn: %p -> NULL", tbl, tbl->trn));
+
+ /* The following is only false if tbl->trn == &dummy_transaction_object */
+ if (tbl->trn_prev)
+ {
+ if (tbl->trn_next)
+ tbl->trn_next->trn_prev= tbl->trn_prev;
+ *tbl->trn_prev= tbl->trn_next;
+ tbl->trn_prev= 0;
+ tbl->trn_next= 0;
+ }
tbl->trn= 0;
}
+
+/*
+ Take over the used_instances link from a trn object
+ Reset the link in the trn object
+*/
+
+static inline void relink_trn_used_instances(MARIA_HA **used_tables, TRN *trn)
+{
+ if (likely(*used_tables= (MARIA_HA*) trn->used_instances))
+ {
+ /* Check that first back link is correct */
+ DBUG_ASSERT((*used_tables)->trn_prev == (MARIA_HA **)&trn->used_instances);
+
+ /* Fix back link to point to new base for the list */
+ (*used_tables)->trn_prev= used_tables;
+ trn->used_instances= 0;
+ }
+}
+
#endif /* _ma_trnman_h */
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index 519beb0a7c9..7da53f788cf 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -177,6 +177,7 @@ typedef struct st_maria_state_info
uint sortkey; /* sorted by this key (not used) */
uint open_count;
uint changed; /* Changed since maria_chk */
+ uint org_changed; /* Changed since open */
/**
Birthday of the table: no record in the log before this LSN should ever
be applied to the table. Updated when created, renamed, explicitly
@@ -604,7 +605,7 @@ struct st_maria_handler
{
MARIA_SHARE *s; /* Shared between open:s */
struct st_ma_transaction *trn; /* Pointer to active transaction */
- struct st_maria_handler *trn_next;
+ struct st_maria_handler *trn_next,**trn_prev;
MARIA_STATUS_INFO *state, state_save;
MARIA_STATUS_INFO *state_start; /* State at start of transaction */
MARIA_USED_TABLES *used_tables;
@@ -688,6 +689,7 @@ struct st_maria_handler
uint16 last_used_keyseg; /* For MARIAMRG */
uint8 key_del_used; /* != 0 if key_del is used */
my_bool was_locked; /* Was locked in panic */
+ my_bool intern_lock_locked; /* locked in ma_extra() */
my_bool append_insert_at_end; /* Set if concurrent insert */
my_bool quick_mode;
my_bool in_check_table; /* We are running check tables */
@@ -1416,6 +1418,9 @@ extern my_bool maria_page_crc_check_bitmap(int, PAGECACHE_IO_HOOK_ARGS *args);
extern my_bool maria_page_crc_check_data(int, PAGECACHE_IO_HOOK_ARGS *args);
extern my_bool maria_page_crc_check_index(int, PAGECACHE_IO_HOOK_ARGS *args);
extern my_bool maria_page_crc_check_none(int, PAGECACHE_IO_HOOK_ARGS *args);
+extern my_bool maria_page_crc_check(uchar *page, pgcache_page_no_t page_no,
+ MARIA_SHARE *share, uint32 no_crc_val,
+ int data_length);
extern my_bool maria_page_filler_set_bitmap(PAGECACHE_IO_HOOK_ARGS *args);
extern my_bool maria_page_filler_set_normal(PAGECACHE_IO_HOOK_ARGS *args);
extern my_bool maria_page_filler_set_none(PAGECACHE_IO_HOOK_ARGS *args);
diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c
index 551732d8ba3..f3338714846 100644
--- a/storage/maria/maria_read_log.c
+++ b/storage/maria/maria_read_log.c
@@ -31,6 +31,7 @@ const char *default_dbug_option= "d:t:o,/tmp/aria_read_log.trace";
#endif /* DBUG_OFF */
static my_bool opt_display_only, opt_apply, opt_apply_undo, opt_silent;
static my_bool opt_check;
+static my_bool opt_print_aria_log_control;
static const char *opt_tmpdir;
static ulong opt_translog_buffer_size;
static ulonglong opt_page_buffer_size;
@@ -59,6 +60,12 @@ int main(int argc, char **argv)
goto err;
}
maria_block_size= 0; /* Use block size from file */
+ if (opt_print_aria_log_control)
+ {
+ if (print_aria_log_control())
+ goto err;
+ goto end;
+ }
/* we don't want to create a control file, it MUST exist */
if (ma_control_file_open(FALSE, TRUE))
{
@@ -209,6 +216,10 @@ static struct my_option my_long_options[] =
&opt_page_buffer_size, &opt_page_buffer_size, 0,
GET_ULL, REQUIRED_ARG, PAGE_BUFFER_INIT,
PAGE_BUFFER_INIT, SIZE_T_MAX, MALLOC_OVERHEAD, (long) IO_SIZE, 0},
+ { "print-log-control-file", 'l',
+ "Print the content of the aria_log_control_file",
+ &opt_print_aria_log_control, &opt_print_aria_log_control, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
{ "start-from-lsn", 'o', "Start reading log from this lsn",
&opt_start_from_lsn, &opt_start_from_lsn,
0, GET_ULL, REQUIRED_ARG, 0, 0, ~(longlong) 0, 0, 0, 0 },
@@ -249,7 +260,7 @@ static struct my_option my_long_options[] =
static void print_version(void)
{
- printf("%s Ver 1.3 for %s on %s\n",
+ printf("%s Ver 1.4 for %s on %s\n",
my_progname_short, SYSTEM_TYPE, MACHINE_TYPE);
}
@@ -261,7 +272,7 @@ static void usage(void)
puts("This software comes with ABSOLUTELY NO WARRANTY. This is free software,");
puts("and you are welcome to modify and redistribute it under the GPL license\n");
- puts("Display and apply log records from a Aria transaction log");
+ puts("Display or apply log records from a Aria transaction log");
puts("found in the current directory (for now)");
#ifndef IDENTICAL_PAGES_AFTER_RECOVERY
puts("\nNote: Aria is compiled without -DIDENTICAL_PAGES_AFTER_RECOVERY\n"
@@ -269,8 +280,13 @@ static void usage(void)
"files created during normal execution. This should be ok, except for\n"
"test scripts that tries to compare files before and after recovery.");
#endif
- printf("\nUsage: %s OPTIONS\n", my_progname_short);
- puts("You need to use one of -d or -a");
+ printf("\nUsage: %s OPTIONS [-d | -a] -h `aria_log_directory`\n",
+ my_progname_short);
+ printf("or\n");
+ printf("Usage: %s OPTIONS -h `aria_log_directory` "
+ "--print-aria-log-control\n\n",
+ my_progname_short);
+
my_print_help(my_long_options);
print_defaults("my", load_default_groups);
my_print_variables(my_long_options);
@@ -339,12 +355,12 @@ static void get_options(int *argc,char ***argv)
need_help= 1;
fprintf(stderr, "Too many arguments given\n");
}
- if ((opt_display_only + opt_apply) != 1)
+ if ((opt_display_only + opt_apply + opt_print_aria_log_control) != 1)
{
need_help= 1;
fprintf(stderr,
- "You must use one and only one of the options 'display-only' or "
- "'apply'\n");
+ "You must use one and only one of the options 'display-only', \n"
+ "'print-log-control-file' and 'apply'\n");
}
if (need_help)
diff --git a/storage/maria/test_ma_backup.c b/storage/maria/test_ma_backup.c
new file mode 100644
index 00000000000..2a9a6704ecb
--- /dev/null
+++ b/storage/maria/test_ma_backup.c
@@ -0,0 +1,449 @@
+/* Copyright (C) 2018 MariaDB corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA */
+
+/* Code for doing backups of Aria tables */
+
+/******************************************************************************
+ Testing ma_backup interface
+ Table creation code is taken from ma_test1
+******************************************************************************/
+
+#define ROWS_IN_TEST 100000
+
+#include "maria_def.h"
+#include "ma_blockrec.h" /* PAGE_SUFFIX_SIZE */
+#include "ma_checkpoint.h"
+#include <aria_backup.h>
+
+static int silent;
+static int create_test_table(const char *table_name, int stage);
+static int copy_table(const char *table_name, int stage);
+static void create_record(uchar *record,uint rownr);
+
+int main(int argc __attribute__((unused)), char *argv[])
+{
+ int error= 1;
+ int i;
+ char buff[FN_REFLEN];
+#ifdef SAFE_MUTEX
+ safe_mutex_deadlock_detector= 1;
+#endif
+ MY_INIT(argv[0]);
+ maria_data_root= (char *)".";
+
+ /* Maria requires that we always have a page cache */
+ if (maria_init() ||
+ (init_pagecache(maria_pagecache, maria_block_size * 2000, 0, 0,
+ maria_block_size, 0, MY_WME) == 0) ||
+ ma_control_file_open(TRUE, TRUE) ||
+ (init_pagecache(maria_log_pagecache,
+ TRANSLOG_PAGECACHE_SIZE, 0, 0,
+ TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) ||
+ translog_init(maria_data_root, TRANSLOG_FILE_SIZE,
+ 0, 0, maria_log_pagecache,
+ TRANSLOG_DEFAULT_FLAGS, 0) ||
+ (trnman_init(0) || ma_checkpoint_init(0)))
+ {
+ fprintf(stderr, "Error in initialization\n");
+ exit(1);
+ }
+ init_thr_lock();
+
+ fn_format(buff, "test_copy", maria_data_root, "", MYF(0));
+
+ for (i= 0; i < 5 ; i++)
+ {
+ printf("Stage: %d\n", i);
+ fflush(stdout);
+ if (create_test_table(buff, i))
+ goto err;
+ if (copy_table(buff, i))
+ goto err;
+ }
+ error= 0;
+ printf("test ok\n");
+err:
+ if (error)
+ fprintf(stderr, "Test %i failed\n", i);
+ maria_end();
+ my_uuid_end();
+ my_end(MY_CHECK_ERROR);
+ exit(error);
+}
+
+
+/**
+ Example of how to read an Aria table
+*/
+
+static int copy_table(const char *table_name, int stage)
+{
+ char old_name[FN_REFLEN];
+ uchar *copy_buffer= 0;
+ ARIA_TABLE_CAPABILITIES cap;
+ ulonglong block;
+ File org_file= -1;
+ int error= 1;
+
+ strxmov(old_name, table_name, ".MAI", NullS);
+
+ if ((org_file= my_open(old_name,
+ O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
+ MYF(MY_WME))) < 0)
+ goto err;
+ if ((error= aria_get_capabilities(org_file, &cap)))
+ {
+ fprintf(stderr, "aria_get_capabilities failed: %d\n", error);
+ goto err;
+ }
+
+ printf("- Capabilities read. oneline_backup_safe: %d\n",
+ cap.online_backup_safe);
+ printf("- Copying index file\n");
+
+ copy_buffer= my_malloc(cap.block_size, MYF(0));
+ for (block= 0 ; ; block++)
+ {
+ if ((error= aria_read_index(org_file, &cap, block, copy_buffer) ==
+ HA_ERR_END_OF_FILE))
+ break;
+ if (error)
+ {
+ fprintf(stderr, "aria_read_index failed: %d\n", error);
+ goto err;
+ }
+ }
+ my_close(org_file, MYF(MY_WME));
+
+
+ printf("- Copying data file\n");
+ strxmov(old_name, table_name, ".MAD", NullS);
+ if ((org_file= my_open(old_name, O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC,
+ MYF(MY_WME))) < 0)
+ goto err;
+
+ for (block= 0 ; ; block++)
+ {
+ size_t length;
+ if ((error= aria_read_data(org_file, &cap, block, copy_buffer,
+ &length) == HA_ERR_END_OF_FILE))
+ break;
+ if (error)
+ {
+ fprintf(stderr, "aria_read_index failed: %d\n", error);
+ goto err;
+ }
+ }
+ error= 0;
+
+err:
+ my_free(copy_buffer);
+ if (org_file >= 0)
+ my_close(org_file, MYF(MY_WME));
+ if (error)
+ fprintf(stderr, "Failed in copy_table stage: %d\n", stage);
+ return error;
+}
+
+
+/* Code extracted from ma_test1.c */
+#define MAX_REC_LENGTH 1024
+
+static MARIA_COLUMNDEF recinfo[4];
+static MARIA_KEYDEF keyinfo[10];
+static HA_KEYSEG keyseg[10];
+static HA_KEYSEG uniqueseg[10];
+
+
+/**
+ Create a test table and fill it with some data
+*/
+
+static int create_test_table(const char *table_name, int type_of_table)
+{
+ MARIA_HA *file;
+ int i,error,uniques=0;
+ int key_field=FIELD_SKIP_PRESPACE,extra_field=FIELD_SKIP_ENDSPACE;
+ int key_type=HA_KEYTYPE_NUM;
+ int create_flag=0;
+ uint offset_to_key;
+ uint pack_seg=0, pack_keys= 0;
+ uint key_length;
+ uchar record[MAX_REC_LENGTH];
+ MARIA_UNIQUEDEF uniquedef;
+ MARIA_CREATE_INFO create_info;
+ enum data_file_type record_type= DYNAMIC_RECORD;
+ my_bool null_fields= 0, unique_key= 0;
+ my_bool opt_unique= 0;
+ my_bool transactional= 0;
+
+ key_length= 12;
+ switch (type_of_table) {
+ case 0:
+ break;
+ case 1:
+ create_flag|= HA_CREATE_CHECKSUM | HA_CREATE_PAGE_CHECKSUM;
+ break;
+ case 2: /* transactional */
+ create_flag|= HA_CREATE_CHECKSUM | HA_CREATE_PAGE_CHECKSUM;
+ record_type= BLOCK_RECORD;
+ transactional= 1;
+ break;
+ case 3: /* transactional */
+ create_flag|= HA_CREATE_CHECKSUM | HA_CREATE_PAGE_CHECKSUM;
+ record_type= BLOCK_RECORD;
+ transactional= 1;
+ key_field=FIELD_VARCHAR; /* varchar keys */
+ extra_field= FIELD_VARCHAR;
+ key_type= HA_KEYTYPE_VARTEXT1;
+ pack_seg|= HA_VAR_LENGTH_PART;
+ null_fields= 1;
+ break;
+ case 4: /* transactional */
+ create_flag|= HA_CREATE_CHECKSUM | HA_CREATE_PAGE_CHECKSUM;
+ record_type= BLOCK_RECORD;
+ transactional= 1;
+ key_field=FIELD_BLOB; /* blob key */
+ extra_field= FIELD_BLOB;
+ pack_seg|= HA_BLOB_PART;
+ key_type= HA_KEYTYPE_VARTEXT1;
+ break;
+ }
+
+
+ bzero((char*) recinfo,sizeof(recinfo));
+ bzero((char*) &create_info,sizeof(create_info));
+
+ /* First define 2 columns */
+ create_info.null_bytes= 1;
+ recinfo[0].type= key_field;
+ recinfo[0].length= (key_field == FIELD_BLOB ? 4+portable_sizeof_char_ptr :
+ key_length);
+ if (key_field == FIELD_VARCHAR)
+ recinfo[0].length+= HA_VARCHAR_PACKLENGTH(key_length);
+ recinfo[1].type=extra_field;
+ recinfo[1].length= (extra_field == FIELD_BLOB ? 4 + portable_sizeof_char_ptr : 24);
+ if (extra_field == FIELD_VARCHAR)
+ recinfo[1].length+= HA_VARCHAR_PACKLENGTH(recinfo[1].length);
+ recinfo[1].null_bit= null_fields ? 2 : 0;
+
+ if (opt_unique)
+ {
+ recinfo[2].type=FIELD_CHECK;
+ recinfo[2].length=MARIA_UNIQUE_HASH_LENGTH;
+ }
+
+ if (key_type == HA_KEYTYPE_VARTEXT1 &&
+ key_length > 255)
+ key_type= HA_KEYTYPE_VARTEXT2;
+
+ /* Define a key over the first column */
+ keyinfo[0].seg=keyseg;
+ keyinfo[0].keysegs=1;
+ keyinfo[0].block_length= 0; /* Default block length */
+ keyinfo[0].key_alg=HA_KEY_ALG_BTREE;
+ keyinfo[0].seg[0].type= key_type;
+ keyinfo[0].seg[0].flag= pack_seg;
+ keyinfo[0].seg[0].start=1;
+ keyinfo[0].seg[0].length=key_length;
+ keyinfo[0].seg[0].null_bit= null_fields ? 2 : 0;
+ keyinfo[0].seg[0].null_pos=0;
+ keyinfo[0].seg[0].language= default_charset_info->number;
+ if (pack_seg & HA_BLOB_PART)
+ {
+ keyinfo[0].seg[0].bit_start=4; /* Length of blob length */
+ }
+ keyinfo[0].flag = (uint8) (pack_keys | unique_key);
+
+ if (opt_unique)
+ {
+ uint start;
+ uniques=1;
+ bzero((char*) &uniquedef,sizeof(uniquedef));
+ bzero((char*) uniqueseg,sizeof(uniqueseg));
+ uniquedef.seg=uniqueseg;
+ uniquedef.keysegs=2;
+
+ /* Make a unique over all columns (except first NULL fields) */
+ for (i=0, start=1 ; i < 2 ; i++)
+ {
+ uniqueseg[i].start=start;
+ start+=recinfo[i].length;
+ uniqueseg[i].length=recinfo[i].length;
+ uniqueseg[i].language= default_charset_info->number;
+ }
+ uniqueseg[0].type= key_type;
+ uniqueseg[0].null_bit= null_fields ? 2 : 0;
+ uniqueseg[1].type= HA_KEYTYPE_TEXT;
+ if (extra_field == FIELD_BLOB)
+ {
+ uniqueseg[1].length=0; /* The whole blob */
+ uniqueseg[1].bit_start=4; /* long blob */
+ uniqueseg[1].flag|= HA_BLOB_PART;
+ }
+ else if (extra_field == FIELD_VARCHAR)
+ {
+ uniqueseg[1].flag|= HA_VAR_LENGTH_PART;
+ uniqueseg[1].type= (HA_VARCHAR_PACKLENGTH(recinfo[1].length-1) == 1 ?
+ HA_KEYTYPE_VARTEXT1 : HA_KEYTYPE_VARTEXT2);
+ }
+ }
+ else
+ uniques=0;
+
+ offset_to_key= MY_TEST(null_fields);
+ if (key_field == FIELD_BLOB || key_field == FIELD_VARCHAR)
+ offset_to_key+= 2;
+
+ if (!silent)
+ printf("- Creating Aria file\n");
+ create_info.max_rows= 0;
+ create_info.transactional= transactional;
+ if (maria_create(table_name, record_type, 1, keyinfo,2+opt_unique,recinfo,
+ uniques, &uniquedef, &create_info,
+ create_flag))
+ goto err;
+ if (!(file=maria_open(table_name,2,HA_OPEN_ABORT_IF_LOCKED)))
+ goto err;
+ if (!silent)
+ printf("- Writing key:s\n");
+
+ if (maria_begin(file))
+ goto err;
+ my_errno=0;
+ for (i= 0 ; i < ROWS_IN_TEST ; i++)
+ {
+ create_record(record,i);
+ if ((error=maria_write(file,record)))
+ goto err;
+ }
+
+ if (maria_commit(file) | maria_close(file))
+ goto err;
+ printf("- Data copied\n");
+ return 0;
+
+err:
+ printf("got error: %3d when using maria-database\n",my_errno);
+ return 1; /* skip warning */
+}
+
+
+static void create_key_part(uchar *key,uint rownr)
+{
+ if (keyinfo[0].seg[0].type == HA_KEYTYPE_NUM)
+ {
+ sprintf((char*) key,"%*d",keyinfo[0].seg[0].length,rownr);
+ }
+ else if (keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT1 ||
+ keyinfo[0].seg[0].type == HA_KEYTYPE_VARTEXT2)
+ { /* Alpha record */
+ /* Create a key that may be easily packed */
+ bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B');
+ sprintf((char*) key+keyinfo[0].seg[0].length-2,"%-2d",rownr % 100);
+ if ((rownr & 7) == 0)
+ {
+ /* Change the key to force a unpack of the next key */
+ bfill(key+3,keyinfo[0].seg[0].length-5,rownr < 10 ? 'a' : 'b');
+ }
+ }
+ else
+ { /* Alpha record */
+ if (keyinfo[0].seg[0].flag & HA_SPACE_PACK)
+ sprintf((char*) key,"%-*d",keyinfo[0].seg[0].length,rownr);
+ else
+ {
+ /* Create a key that may be easily packed */
+ bfill(key,keyinfo[0].seg[0].length,rownr < 10 ? 'A' : 'B');
+ sprintf((char*) key+keyinfo[0].seg[0].length-2,"%-2d",rownr % 100);
+ if ((rownr & 7) == 0)
+ {
+ /* Change the key to force a unpack of the next key */
+ key[1]= (rownr < 10 ? 'a' : 'b');
+ }
+ }
+ }
+}
+
+
+static uchar blob_key[MAX_REC_LENGTH];
+static uchar blob_record[MAX_REC_LENGTH+20*20];
+
+
+static void create_record(uchar *record,uint rownr)
+{
+ uchar *pos;
+ bzero((char*) record,MAX_REC_LENGTH);
+ record[0]=1; /* delete marker */
+ if (rownr == 0 && keyinfo[0].seg[0].null_bit)
+ record[0]|=keyinfo[0].seg[0].null_bit; /* Null key */
+
+ pos=record+1;
+ if (recinfo[0].type == FIELD_BLOB)
+ {
+ size_t tmp;
+ uchar *ptr;
+ create_key_part(blob_key,rownr);
+ tmp=strlen((char*) blob_key);
+ int4store(pos,tmp);
+ ptr=blob_key;
+ memcpy(pos+4,&ptr,sizeof(char*));
+ pos+=recinfo[0].length;
+ }
+ else if (recinfo[0].type == FIELD_VARCHAR)
+ {
+ size_t tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[0].length-1);
+ create_key_part(pos+pack_length,rownr);
+ tmp= strlen((char*) pos+pack_length);
+ if (pack_length == 1)
+ *(uchar*) pos= (uchar) tmp;
+ else
+ int2store(pos,tmp);
+ pos+= recinfo[0].length;
+ }
+ else
+ {
+ create_key_part(pos,rownr);
+ pos+=recinfo[0].length;
+ }
+ if (recinfo[1].type == FIELD_BLOB)
+ {
+ size_t tmp;
+ uchar *ptr;;
+ sprintf((char*) blob_record,"... row: %d", rownr);
+ strappend((char*) blob_record, rownr % MAX_REC_LENGTH,'x');
+ tmp=strlen((char*) blob_record);
+ int4store(pos,tmp);
+ ptr=blob_record;
+ memcpy(pos+4,&ptr,sizeof(char*));
+ }
+ else if (recinfo[1].type == FIELD_VARCHAR)
+ {
+ size_t tmp, pack_length= HA_VARCHAR_PACKLENGTH(recinfo[1].length-1);
+ sprintf((char*) pos+pack_length, "... row: %d", rownr);
+ tmp= strlen((char*) pos+pack_length);
+ if (pack_length == 1)
+ *pos= (uchar) tmp;
+ else
+ int2store(pos,tmp);
+ }
+ else
+ {
+ sprintf((char*) pos,"... row: %d", rownr);
+ strappend((char*) pos,recinfo[1].length,' ');
+ }
+}
+
+#include "ma_check_standalone.h"
diff --git a/storage/maria/trnman.c b/storage/maria/trnman.c
index 5b3c9f0287a..3c5ce831f95 100644
--- a/storage/maria/trnman.c
+++ b/storage/maria/trnman.c
@@ -413,6 +413,7 @@ my_bool trnman_end_trn(TRN *trn, my_bool commit)
/* if a rollback, all UNDO records should have been executed */
DBUG_ASSERT(commit || trn->undo_lsn == 0);
DBUG_ASSERT(trn != &dummy_transaction_object);
+ DBUG_ASSERT(trn->locked_tables == 0 && trn->used_instances == 0);
DBUG_PRINT("info", ("mysql_mutex_lock LOCK_trn_list"));
mysql_mutex_lock(&LOCK_trn_list);
@@ -529,6 +530,8 @@ static void trnman_free_trn(TRN *trn)
*/
union { TRN *trn; void *v; } tmp;
+ DBUG_ASSERT(trn != &dummy_transaction_object);
+
mysql_mutex_lock(&trn->state_lock);
trn->short_id= 0;
mysql_mutex_unlock(&trn->state_lock);
diff --git a/storage/maria/unittest/ma_test_all-t b/storage/maria/unittest/ma_test_all-t
index 18b26a7bd45..8858649fb5d 100755
--- a/storage/maria/unittest/ma_test_all-t
+++ b/storage/maria/unittest/ma_test_all-t
@@ -749,9 +749,10 @@ Options
--help Show this help and exit.
--abort-on-error Abort at once in case of error.
--number-of-tests Print the total number of tests and exit.
---run-tests=... Test number(s) that should be run. You can give just
- one number or a range. For example 45..89. To run a specific
- test alone, for example test 215, use --run-tests=215..215
+--run-tests=... Test range that should be run. You can give just
+ one number, to start tests from this test, or a range.
+ For example 45..89. To run a specific test alone,
+ for example test 215, use --run-tests=215..215
Use this option with caution, because some of the tests
might depend on previous ones.
--start-from=... Alias for --run-tests
diff --git a/storage/maria/unittest/ma_test_recovery.expected b/storage/maria/unittest/ma_test_recovery.expected
index 6aaff86e6cf..38e8e4d8e93 100644
--- a/storage/maria/unittest/ma_test_recovery.expected
+++ b/storage/maria/unittest/ma_test_recovery.expected
@@ -70,7 +70,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -79,7 +79,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -88,7 +88,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -99,7 +99,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -108,7 +108,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -117,7 +117,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -167,7 +167,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -176,7 +176,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -185,7 +185,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -196,7 +196,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -205,7 +205,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -214,7 +214,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -264,7 +264,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -273,7 +273,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -282,7 +282,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -293,7 +293,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -302,7 +302,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -311,7 +311,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -361,7 +361,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -370,7 +370,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -379,7 +379,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -390,7 +390,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -399,7 +399,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -408,7 +408,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -458,7 +458,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -467,7 +467,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -476,7 +476,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -487,7 +487,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -496,7 +496,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -505,7 +505,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -555,7 +555,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -564,7 +564,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -573,7 +573,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -584,7 +584,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -593,7 +593,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -602,7 +602,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -652,7 +652,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -661,7 +661,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -670,7 +670,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -681,7 +681,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -690,7 +690,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -699,7 +699,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -749,7 +749,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -758,7 +758,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -767,7 +767,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -778,7 +778,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -787,7 +787,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -796,7 +796,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -846,7 +846,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -855,7 +855,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -864,7 +864,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -875,7 +875,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -884,7 +884,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -893,7 +893,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -943,7 +943,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -952,7 +952,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -961,7 +961,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -972,7 +972,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -981,7 +981,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -990,7 +990,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1040,7 +1040,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1049,7 +1049,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1058,7 +1058,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1069,7 +1069,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1078,7 +1078,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1087,7 +1087,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1137,7 +1137,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1146,7 +1146,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1155,7 +1155,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1166,7 +1166,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1175,7 +1175,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1184,7 +1184,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1234,7 +1234,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1243,7 +1243,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1252,7 +1252,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1263,7 +1263,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1272,7 +1272,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1281,7 +1281,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1331,7 +1331,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1340,7 +1340,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1349,7 +1349,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1360,7 +1360,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1369,7 +1369,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1378,7 +1378,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1428,7 +1428,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1437,7 +1437,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1446,7 +1446,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1457,7 +1457,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1466,7 +1466,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1475,7 +1475,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1525,7 +1525,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1534,7 +1534,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1543,7 +1543,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1554,7 +1554,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1563,7 +1563,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
@@ -1572,7 +1572,7 @@ applying log
Differences in aria_chk -dvv, recovery not yet perfect !
========DIFF START=======
7c7
-< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled,movable
+< Status: checked,analyzed,optimized keys,sorted index pages,zerofilled
---
> Status: changed
========DIFF END=======
diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp
index 925298c15e8..0f153e200ef 100644
--- a/storage/mroonga/ha_mroonga.cpp
+++ b/storage/mroonga/ha_mroonga.cpp
@@ -193,7 +193,7 @@ static mysql_mutex_t *mrn_LOCK_open;
#if MYSQL_VERSION_ID >= 50706 && !defined(MRN_MARIADB_P)
# define MRN_LEX_GET_TABLE_LIST(lex) (lex)->select_lex->table_list.first
#else
-# define MRN_LEX_GET_TABLE_LIST(lex) (lex)->select_lex.table_list.first
+# define MRN_LEX_GET_TABLE_LIST(lex) (lex)->first_select_lex()->table_list.first
#endif
#if MYSQL_VERSION_ID >= 50706 && !defined(MRN_MARIADB_P)
@@ -2856,6 +2856,7 @@ ulonglong ha_mroonga::wrapper_table_flags() const
#ifdef HA_CAN_VIRTUAL_COLUMNS
table_flags |= HA_CAN_VIRTUAL_COLUMNS;
#endif
+ table_flags |= HA_CAN_HASH_KEYS;
DBUG_RETURN(table_flags);
}
@@ -2891,6 +2892,7 @@ ulonglong ha_mroonga::storage_table_flags() const
#ifdef HA_CAN_VIRTUAL_COLUMNS
flags |= HA_CAN_VIRTUAL_COLUMNS;
#endif
+ flags |= HA_CAN_HASH_KEYS;
DBUG_RETURN(flags);
}
@@ -10573,7 +10575,7 @@ int ha_mroonga::generic_store_bulk_time(Field *field, grn_obj *buf)
bool truncated = false;
Field_time *time_field = (Field_time *)field;
MYSQL_TIME mysql_time;
- time_field->get_time(&mysql_time);
+ time_field->get_date(&mysql_time, Time::Options(current_thd));
mrn::TimeConverter time_converter;
long long int time = time_converter.mysql_time_to_grn_time(&mysql_time,
&truncated);
@@ -10593,7 +10595,7 @@ int ha_mroonga::generic_store_bulk_datetime(Field *field, grn_obj *buf)
bool truncated = false;
Field_datetime *datetime_field = (Field_datetime *)field;
MYSQL_TIME mysql_time;
- datetime_field->get_time(&mysql_time);
+ datetime_field->get_date(&mysql_time, Time::Options(current_thd));
mrn::TimeConverter time_converter;
long long int time = time_converter.mysql_time_to_grn_time(&mysql_time,
&truncated);
@@ -10654,7 +10656,7 @@ int ha_mroonga::generic_store_bulk_datetime2(Field *field, grn_obj *buf)
bool truncated = false;
Field_datetimef *datetimef_field = (Field_datetimef *)field;
MYSQL_TIME mysql_time;
- datetimef_field->get_time(&mysql_time);
+ datetimef_field->get_date(&mysql_time, Time::Options(current_thd));
mrn::TimeConverter time_converter;
long long int time = time_converter.mysql_time_to_grn_time(&mysql_time,
&truncated);
@@ -10679,7 +10681,7 @@ int ha_mroonga::generic_store_bulk_time2(Field *field, grn_obj *buf)
int error = 0;
bool truncated = false;
MYSQL_TIME mysql_time;
- field->get_time(&mysql_time);
+ field->get_date(&mysql_time, Time::Options(current_thd));
mrn::TimeConverter time_converter;
long long int time = time_converter.mysql_time_to_grn_time(&mysql_time,
&truncated);
@@ -10704,7 +10706,7 @@ int ha_mroonga::generic_store_bulk_new_date(Field *field, grn_obj *buf)
bool truncated = false;
Field_newdate *newdate_field = (Field_newdate *)field;
MYSQL_TIME mysql_date;
- newdate_field->get_time(&mysql_date);
+ newdate_field->get_date(&mysql_date, Time::Options(current_thd));
mrn::TimeConverter time_converter;
long long int time = time_converter.mysql_time_to_grn_time(&mysql_date,
&truncated);
@@ -11614,14 +11616,14 @@ int ha_mroonga::storage_encode_key_timestamp(Field *field, const uchar *key,
} else {
Field_timestamp_hires *timestamp_hires_field =
(Field_timestamp_hires *)field;
- uint fuzzy_date = 0;
uchar *ptr_backup = field->ptr;
uchar *null_ptr_backup = field->null_ptr;
TABLE *table_backup = field->table;
field->ptr = (uchar *)key;
field->null_ptr = (uchar *)(key - 1);
field->table = table;
- timestamp_hires_field->get_date(&mysql_time, fuzzy_date);
+ Temporal::Options opt(TIME_CONV_NONE, current_thd);
+ timestamp_hires_field->get_date(&mysql_time, opt);
field->ptr = ptr_backup;
field->null_ptr = null_ptr_backup;
field->table = table_backup;
@@ -11672,12 +11674,12 @@ int ha_mroonga::storage_encode_key_time(Field *field, const uchar *key,
mysql_time.time_type = MYSQL_TIMESTAMP_TIME;
} else {
Field_time_hires *time_hires_field = (Field_time_hires *)field;
- uint fuzzy_date = 0;
uchar *ptr_backup = field->ptr;
uchar *null_ptr_backup = field->null_ptr;
field->ptr = (uchar *)key;
field->null_ptr = (uchar *)(key - 1);
- time_hires_field->get_date(&mysql_time, fuzzy_date);
+ Temporal::Options opt(TIME_CONV_NONE, current_thd);
+ time_hires_field->get_date(&mysql_time, opt);
field->ptr = ptr_backup;
field->null_ptr = null_ptr_backup;
}
@@ -11746,12 +11748,12 @@ int ha_mroonga::storage_encode_key_datetime(Field *field, const uchar *key,
if (field->decimals() > 0) {
Field_datetime_hires *datetime_hires_field = (Field_datetime_hires *)field;
MYSQL_TIME mysql_time;
- uint fuzzy_date = 0;
uchar *ptr_backup = field->ptr;
uchar *null_ptr_backup = field->null_ptr;
field->ptr = (uchar *)key;
field->null_ptr = (uchar *)(key - 1);
- datetime_hires_field->get_date(&mysql_time, fuzzy_date);
+ Temporal::Options opt(TIME_CONV_NONE, current_thd);
+ datetime_hires_field->get_date(&mysql_time, opt);
field->ptr = ptr_backup;
field->null_ptr = null_ptr_backup;
mrn::TimeConverter time_converter;
diff --git a/storage/mroonga/lib/mrn_condition_converter.cpp b/storage/mroonga/lib/mrn_condition_converter.cpp
index 579292a7f89..68ffa073f4f 100644
--- a/storage/mroonga/lib/mrn_condition_converter.cpp
+++ b/storage/mroonga/lib/mrn_condition_converter.cpp
@@ -179,17 +179,17 @@ namespace mrn {
NormalizedType normalized_type = normalize_field_type(field_type);
switch (normalized_type) {
case STRING_TYPE:
- if (value_item->type() == Item::STRING_ITEM &&
+ if (value_item->is_of_type(Item::CONST_ITEM, STRING_RESULT) &&
func_type == Item_func::EQ_FUNC) {
convertable = have_index(field_item, GRN_OP_EQUAL);
}
break;
case INT_TYPE:
if (field_type == MYSQL_TYPE_ENUM) {
- convertable = (value_item->type() == Item::STRING_ITEM ||
- value_item->type() == Item::INT_ITEM);
+ convertable = value_item->is_of_type(Item::CONST_ITEM, STRING_RESULT) ||
+ value_item->is_of_type(Item::CONST_ITEM, INT_RESULT);
} else {
- convertable = value_item->type() == Item::INT_ITEM;
+ convertable = value_item->is_of_type(Item::CONST_ITEM, INT_RESULT);
}
break;
case TIME_TYPE:
@@ -215,14 +215,14 @@ namespace mrn {
NormalizedType normalized_type = normalize_field_type(field_type);
switch (normalized_type) {
case STRING_TYPE:
- if (min_item->type() == Item::STRING_ITEM &&
- max_item->type() == Item::STRING_ITEM) {
+ if (min_item->is_of_type(Item::CONST_ITEM, STRING_RESULT) &&
+ max_item->is_of_type(Item::CONST_ITEM, STRING_RESULT)) {
convertable = have_index(field_item, GRN_OP_LESS);
}
break;
case INT_TYPE:
- if (min_item->type() == Item::INT_ITEM &&
- max_item->type() == Item::INT_ITEM) {
+ if (min_item->is_of_type(Item::CONST_ITEM, INT_RESULT) &&
+ max_item->is_of_type(Item::CONST_ITEM, INT_RESULT)) {
convertable = have_index(field_item, GRN_OP_LESS);
}
break;
@@ -258,8 +258,11 @@ namespace mrn {
Item *real_value_item = value_item->real_item();
switch (field_item->field->type()) {
case MYSQL_TYPE_TIME:
- error = real_value_item->get_time(mysql_time);
+ {
+ THD *thd= current_thd;
+ error= real_value_item->get_date(thd, mysql_time, Time::Options(thd));
break;
+ }
case MYSQL_TYPE_YEAR:
mysql_time->year = static_cast<int>(value_item->val_int());
mysql_time->month = 1;
@@ -273,9 +276,13 @@ namespace mrn {
error = false;
break;
default:
- error = real_value_item->get_date(mysql_time, TIME_FUZZY_DATE);
+ {
+ THD *thd= current_thd;
+ Datetime::Options opt(TIME_FUZZY_DATES, thd);
+ error = real_value_item->get_date(thd, mysql_time, opt);
break;
}
+ }
DBUG_RETURN(error);
}
@@ -587,7 +594,7 @@ namespace mrn {
case INT_TYPE:
grn_obj_reinit(ctx_, &value_, GRN_DB_INT64, 0);
if (field_type == MYSQL_TYPE_ENUM) {
- if (const_item->type() == Item::STRING_ITEM) {
+ if (const_item->is_of_type(Item::CONST_ITEM, STRING_RESULT)) {
String *string;
string = const_item->val_str(NULL);
Field_enum *enum_field = static_cast<Field_enum *>(field_item->field);
diff --git a/storage/mroonga/vendor/groonga/vendor/plugins/CMakeLists.txt b/storage/mroonga/vendor/groonga/vendor/plugins/CMakeLists.txt
index 845c57f5716..4bee2fb7d31 100644
--- a/storage/mroonga/vendor/groonga/vendor/plugins/CMakeLists.txt
+++ b/storage/mroonga/vendor/groonga/vendor/plugins/CMakeLists.txt
@@ -15,11 +15,12 @@
file(GLOB
PLUGIN_CMAKE_LISTS_LIST
+ RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}/*/CMakeLists.txt")
if(PLUGIN_CMAKE_LISTS_LIST)
foreach(PLUGIN_CMAKE_LISTS "${PLUGIN_CMAKE_LISTS_LIST}")
string(REGEX REPLACE
- "(^${CMAKE_CURRENT_SOURCE_DIR}/+|/+CMakeLists\\.txt$)" ""
+ "(/+CMakeLists\\.txt$)" ""
PLUGIN_DIR
"${PLUGIN_CMAKE_LISTS}")
add_subdirectory("${PLUGIN_DIR}")
diff --git a/storage/myisam/TODO b/storage/myisam/TODO
deleted file mode 100644
index cad9486e1bb..00000000000
--- a/storage/myisam/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-TODO:
-
-- Let packisam find the optimal way to store keys.
-- kill when using 'myisamchk' should remove all temporary files.
-- Text search index
- (Sergei A. Golub is working on this)
-- Add '%' packed to myisamchk for compressed tables with blobs.
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index da930f67ef4..2db068acbcb 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -96,6 +96,10 @@ static MYSQL_THDVAR_ENUM(stats_method, PLUGIN_VAR_RQCMDARG,
"and NULLS_IGNORED", NULL, NULL,
MI_STATS_METHOD_NULLS_NOT_EQUAL, &myisam_stats_method_typelib);
+const char *MI_CHECK_INFO= "info";
+const char *MI_CHECK_WARNING= "warning";
+const char *MI_CHECK_ERROR= "error";
+
#ifndef DBUG_OFF
/**
Causes the thread to wait in a spin lock for a query kill signal.
@@ -130,6 +134,20 @@ static handler *myisam_create_handler(handlerton *hton,
return new (mem_root) ha_myisam(hton, table);
}
+
+static void mi_check_print(HA_CHECK *param, const char* msg_type,
+ const char *msgbuf)
+{
+ if (msg_type == MI_CHECK_INFO)
+ sql_print_information("%s.%s: %s", param->db_name, param->table_name,
+ msgbuf);
+ else if (msg_type == MI_CHECK_WARNING)
+ sql_print_warning("%s.%s: %s", param->db_name, param->table_name,
+ msgbuf);
+ else
+ sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+}
+
// collect errors printed by mi_check routines
static void mi_check_print_msg(HA_CHECK *param, const char* msg_type,
@@ -151,16 +169,21 @@ static void mi_check_print_msg(HA_CHECK *param, const char* msg_type,
if (!thd->vio_ok())
{
- sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+ mi_check_print(param, msg_type, msgbuf);
return;
}
if (param->testflag & (T_CREATE_MISSING_KEYS | T_SAFE_REPAIR |
T_AUTO_REPAIR))
{
- my_message(ER_NOT_KEYFILE, msgbuf, MYF(MY_WME));
+ myf flag= 0;
+ if (msg_type == MI_CHECK_INFO)
+ flag= ME_NOTE;
+ else if (msg_type == MI_CHECK_WARNING)
+ flag= ME_WARNING;
+ my_message(ER_NOT_KEYFILE, msgbuf, MYF(flag));
if (thd->variables.log_warnings > 2 && ! thd->log_all_errors)
- sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+ mi_check_print(param, msg_type, msgbuf);
return;
}
length=(uint) (strxmov(name, param->db_name,".",param->table_name,NullS) -
@@ -185,7 +208,7 @@ static void mi_check_print_msg(HA_CHECK *param, const char* msg_type,
sql_print_error("Failed on my_net_write, writing to stderr instead: %s\n",
msgbuf);
else if (thd->variables.log_warnings > 2)
- sql_print_error("%s.%s: %s", param->db_name, param->table_name, msgbuf);
+ mi_check_print(param, msg_type, msgbuf);
if (param->need_print_msg_lock)
mysql_mutex_unlock(&param->print_msg_mutex);
@@ -592,7 +615,7 @@ void mi_check_print_error(HA_CHECK *param, const char *fmt,...)
return;
va_list args;
va_start(args, fmt);
- mi_check_print_msg(param, "error", fmt, args);
+ mi_check_print_msg(param, MI_CHECK_ERROR, fmt, args);
va_end(args);
}
@@ -600,7 +623,7 @@ void mi_check_print_info(HA_CHECK *param, const char *fmt,...)
{
va_list args;
va_start(args, fmt);
- mi_check_print_msg(param, "info", fmt, args);
+ mi_check_print_msg(param, MI_CHECK_INFO, fmt, args);
param->note_printed= 1;
va_end(args);
}
@@ -611,7 +634,7 @@ void mi_check_print_warning(HA_CHECK *param, const char *fmt,...)
param->out_flag|= O_DATA_LOST;
va_list args;
va_start(args, fmt);
- mi_check_print_msg(param, "warning", fmt, args);
+ mi_check_print_msg(param, MI_CHECK_WARNING, fmt, args);
va_end(args);
}
@@ -746,7 +769,8 @@ ulong ha_myisam::index_flags(uint inx, uint part, bool all_parts) const
else
{
flags= HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
- HA_READ_ORDER | HA_KEYREAD_ONLY | HA_DO_INDEX_COND_PUSHDOWN;
+ HA_READ_ORDER | HA_KEYREAD_ONLY | HA_DO_INDEX_COND_PUSHDOWN |
+ HA_DO_RANGE_FILTER_PUSHDOWN;
}
return flags;
}
@@ -950,7 +974,6 @@ void ha_myisam::setup_vcols_for_repair(HA_CHECK *param)
DBUG_ASSERT(file->s->base.reclength < file->s->vreclength);
param->fix_record= compute_vcols;
table->use_all_columns();
- table->vcol_set= &table->s->all_set;
}
void ha_myisam::restore_vcos_after_repair()
@@ -1041,6 +1064,15 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt)
mysql_mutex_unlock(&share->intern_lock);
info(HA_STATUS_NO_LOCK | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_CONST);
+ /*
+ Write a 'table is ok' message to error log if table is ok and
+ we have written to error log that table was getting checked
+ */
+ if (!error && !(table->db_stat & HA_READ_ONLY) &&
+ !mi_is_crashed(file) && thd->error_printed_to_log &&
+ (param->warning_printed || param->error_printed ||
+ param->note_printed))
+ mi_check_print_info(param, "Table is fixed");
}
}
else if (!mi_is_crashed(file) && !thd->killed)
@@ -1815,7 +1847,7 @@ bool ha_myisam::check_and_repair(THD *thd)
sql_print_information("Making backup of index file %s with extension '%s'",
file->s->index_file_name, buff);
mi_make_backup_of_index(file, check_opt.start_time,
- MYF(MY_WME | ME_JUST_WARNING));
+ MYF(MY_WME | ME_WARNING));
}
check_opt.flags=
(((myisam_recover_options &
@@ -1853,6 +1885,9 @@ int ha_myisam::index_init(uint idx, bool sorted)
active_index=idx;
if (pushed_idx_cond_keyno == idx)
mi_set_index_cond_func(file, handler_index_cond_check, this);
+ if (pushed_rowid_filter)
+ mi_set_rowid_filter_func(file, handler_rowid_filter_check,
+ handler_rowid_filter_is_active, this);
return 0;
}
@@ -1864,6 +1899,7 @@ int ha_myisam::index_end()
//pushed_idx_cond_keyno= MAX_KEY;
mi_set_index_cond_func(file, NULL, 0);
in_range_check_pushed_down= FALSE;
+ mi_set_rowid_filter_func(file, NULL, NULL, 0);
ds_mrr.dsmrr_close();
#if !defined(DBUG_OFF) && defined(SQL_SELECT_FIXED_FOR_UPDATE)
file->update&= ~HA_STATE_AKTIV; // Forget active row
@@ -1899,6 +1935,9 @@ int ha_myisam::index_read_idx_map(uchar *buf, uint index, const uchar *key,
end_range= NULL;
if (index == pushed_idx_cond_keyno)
mi_set_index_cond_func(file, handler_index_cond_check, this);
+ if (pushed_rowid_filter)
+ mi_set_rowid_filter_func(file, handler_rowid_filter_check,
+ handler_rowid_filter_is_active, this);
res= mi_rkey(file, buf, index, key, keypart_map, find_flag);
mi_set_index_cond_func(file, NULL, 0);
return res;
@@ -2564,6 +2603,14 @@ Item *ha_myisam::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
return NULL;
}
+bool ha_myisam::rowid_filter_push(Rowid_filter* rowid_filter)
+{
+ pushed_rowid_filter= rowid_filter;
+ mi_set_rowid_filter_func(file, handler_rowid_filter_check,
+ handler_rowid_filter_is_active, this);
+ return false;
+}
+
struct st_mysql_storage_engine myisam_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
@@ -2654,7 +2701,7 @@ my_bool ha_myisam::register_query_cache_table(THD *thd, const char *table_name,
If the table size is unknown the SELECT statement can't be cached.
- When concurrent inserts are disabled at table open, mi_open()
+ When concurrent inserts are disabled at table open, mi_ondopen()
does not assign a get_status() function. In this case the local
("current") status is never updated. We would wrongly think that
we cannot cache the statement.
diff --git a/storage/myisam/ha_myisam.h b/storage/myisam/ha_myisam.h
index 804963f5efc..f9f11b6f480 100644
--- a/storage/myisam/ha_myisam.h
+++ b/storage/myisam/ha_myisam.h
@@ -172,6 +172,8 @@ public:
/* Index condition pushdown implementation */
Item *idx_cond_push(uint keyno, Item* idx_cond);
+ bool rowid_filter_push(Rowid_filter* rowid_filter);
+
private:
DsMrr_impl ds_mrr;
friend ICP_RESULT index_cond_func_myisam(void *arg);
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index 5f9b5414174..01078c2a264 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -102,6 +102,9 @@ int chk_status(HA_CHECK *param, register MI_INFO *info)
{
MYISAM_SHARE *share=info->s;
+ /* Protection for HA_EXTRA_FLUSH */
+ mysql_mutex_lock(&share->intern_lock);
+
if (mi_is_crashed_on_repair(info))
mi_check_print_warning(param,
"Table is marked as crashed and last repair failed");
@@ -121,6 +124,7 @@ int chk_status(HA_CHECK *param, register MI_INFO *info)
if (param->testflag & T_UPDATE_STATE)
param->warning_printed=save;
}
+ mysql_mutex_unlock(&share->intern_lock);
return 0;
}
@@ -4782,7 +4786,7 @@ static int replace_data_file(HA_CHECK *param, MI_INFO *info, File new_file)
my_create_backup_name(buff, "", param->backup_time);
my_printf_error(ER_GET_ERRMSG,
"Making backup of data file %s with extension '%s'",
- MYF(ME_JUST_INFO | ME_NOREFRESH), share->data_file_name,
+ MYF(ME_NOTE | ME_ERROR_LOG), share->data_file_name,
buff);
}
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index dcb79a8dc3e..618c3251afc 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -334,7 +334,11 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
if (!share->temporary)
flush_key_blocks(share->key_cache, share->kfile, &share->dirty_part_map,
FLUSH_KEEP);
+ mysql_mutex_lock(&share->intern_lock);
+ /* Tell mi_lock_database() that we locked the intern_lock mutex */
+ info->intern_lock_locked= 1;
_mi_decrement_open_count(info);
+ info->intern_lock_locked= 0;
if (share->not_flushed)
{
share->not_flushed=0;
@@ -351,6 +355,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
}
if (share->base.blobs)
mi_alloc_rec_buff(info, -1, &info->rec_buff);
+ mysql_mutex_unlock(&share->intern_lock);
break;
case HA_EXTRA_NORMAL: /* Theese isn't in use */
info->quick_mode=0;
@@ -420,6 +425,16 @@ void mi_set_index_cond_func(MI_INFO *info, index_cond_func_t func,
info->index_cond_func_arg= func_arg;
}
+void mi_set_rowid_filter_func(MI_INFO *info,
+ rowid_filter_func_t check_func,
+ rowid_filter_func_t is_active_func,
+ void *func_arg)
+{
+ info->rowid_filter_func= check_func;
+ info->rowid_filter_is_active_func= is_active_func;
+ info->rowid_filter_func_arg= func_arg;
+}
+
/*
Start/Stop Inserting Duplicates Into a Table, WL#1648.
*/
diff --git a/storage/myisam/mi_info.c b/storage/myisam/mi_info.c
index 3b9288eeb83..33ff6abb32d 100644
--- a/storage/myisam/mi_info.c
+++ b/storage/myisam/mi_info.c
@@ -127,7 +127,7 @@ void mi_report_error(int errcode, const char *file_name)
if ((lgt= strlen(file_name)) > 64)
file_name+= lgt - 64;
- my_error(errcode, MYF(ME_NOREFRESH), file_name);
+ my_error(errcode, MYF(ME_ERROR_LOG), file_name);
DBUG_VOID_RETURN;
}
diff --git a/storage/myisam/mi_key.c b/storage/myisam/mi_key.c
index 4bd01dcbfa0..8bf63af8f5f 100644
--- a/storage/myisam/mi_key.c
+++ b/storage/myisam/mi_key.c
@@ -529,6 +529,19 @@ ICP_RESULT mi_check_index_cond(register MI_INFO *info, uint keynr,
return res;
}
+
+int mi_check_rowid_filter(MI_INFO *info)
+{
+ return info->rowid_filter_func(info->rowid_filter_func_arg);
+}
+
+int mi_check_rowid_filter_is_active(MI_INFO *info)
+{
+ if (info->rowid_filter_is_active_func == NULL)
+ return 0;
+ return info->rowid_filter_is_active_func(info->rowid_filter_func_arg);
+}
+
/*
Retrieve auto_increment info
diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c
index b348429fd3c..f3030148044 100644
--- a/storage/myisam/mi_locking.c
+++ b/storage/myisam/mi_locking.c
@@ -53,7 +53,8 @@ int mi_lock_database(MI_INFO *info, int lock_type)
error= 0;
DBUG_EXECUTE_IF ("mi_lock_database_failure", error= EINVAL;);
- mysql_mutex_lock(&share->intern_lock);
+ if (!info->intern_lock_locked)
+ mysql_mutex_lock(&share->intern_lock);
if (share->kfile >= 0) /* May only be false on windows */
{
switch (lock_type) {
@@ -261,7 +262,8 @@ int mi_lock_database(MI_INFO *info, int lock_type)
}
}
#endif
- mysql_mutex_unlock(&share->intern_lock);
+ if (!info->intern_lock_locked)
+ mysql_mutex_unlock(&share->intern_lock);
if (mark_crashed)
mi_mark_crashed(info);
DBUG_RETURN(error);
diff --git a/storage/myisam/mi_range.c b/storage/myisam/mi_range.c
index 2074c873979..a464b3d1e09 100644
--- a/storage/myisam/mi_range.c
+++ b/storage/myisam/mi_range.c
@@ -22,9 +22,10 @@
#include "myisamdef.h"
#include "rt_index.h"
-static ha_rows _mi_record_pos(MI_INFO *, const uchar *, key_part_map,
- enum ha_rkey_function);
-static double _mi_search_pos(MI_INFO *,MI_KEYDEF *,uchar *, uint,uint,my_off_t);
+static double _mi_record_pos(MI_INFO *, const uchar *, key_part_map,
+ enum ha_rkey_function);
+static double _mi_search_pos(MI_INFO *,MI_KEYDEF *,uchar *, uint,uint,
+ my_off_t,my_bool);
static uint _mi_keynr(MI_INFO *info,MI_KEYDEF *,uchar *, uchar *,uint *);
/*
@@ -48,7 +49,8 @@ static uint _mi_keynr(MI_INFO *info,MI_KEYDEF *,uchar *, uchar *,uint *);
ha_rows mi_records_in_range(MI_INFO *info, int inx,
key_range *min_key, key_range *max_key)
{
- ha_rows start_pos,end_pos,res;
+ ha_rows res;
+ double start_pos,end_pos,diff;
DBUG_ENTER("mi_records_in_range");
if ((inx = _mi_check_index(info,inx)) < 0)
@@ -94,16 +96,27 @@ ha_rows mi_records_in_range(MI_INFO *info, int inx,
#endif
case HA_KEY_ALG_BTREE:
default:
- start_pos= (min_key ? _mi_record_pos(info, min_key->key,
- min_key->keypart_map, min_key->flag)
- : (ha_rows) 0);
+ start_pos= (min_key ?_mi_record_pos(info, min_key->key,
+ min_key->keypart_map, min_key->flag)
+ : (double) 0);
end_pos= (max_key ? _mi_record_pos(info, max_key->key,
max_key->keypart_map, max_key->flag)
- : info->state->records + (ha_rows) 1);
+ : (double) info->state->records);
res= (end_pos < start_pos ? (ha_rows) 0 :
- (end_pos == start_pos ? (ha_rows) 1 : end_pos-start_pos));
+ (end_pos == start_pos ? (ha_rows) 1 : (ha_rows) (end_pos-start_pos)));
if (start_pos == HA_POS_ERROR || end_pos == HA_POS_ERROR)
res=HA_POS_ERROR;
+ else
+ {
+ diff= end_pos - start_pos;
+ if (diff >= 0)
+ {
+ if (!(res= (ha_rows) (diff + 0.5)))
+ res= 1;
+ }
+ else
+ res= 0;
+ }
}
if (info->s->concurrent_insert)
@@ -115,11 +128,25 @@ ha_rows mi_records_in_range(MI_INFO *info, int inx,
}
- /* Find relative position (in records) for key in index-tree */
+/*
+ To find an approximate relative position of a key tuple among all index
+ key tuples would not be hard if we considered B-trees where all key
+ tuples were contained only in leaf nodes. If we consider a B-tree where
+ key tuples are stored also in non-leaf nodes we have to convert such
+ tree into the tree of the first type. The transformation procedure is
+ simple: the key tuple k goes alter the last key tuple in the most right
+ sub-tree pointer to which is coupled with k. As a result of this
+ transformation each leaf node except the most right one in the tree will
+ contain one extra key tuple following those originally belonging to
+ the leaf.
+*/
+
+
+/* Find relative position (in records) for key in index-tree */
-static ha_rows _mi_record_pos(MI_INFO *info, const uchar *key,
- key_part_map keypart_map,
- enum ha_rkey_function search_flag)
+static double _mi_record_pos(MI_INFO *info, const uchar *key,
+ key_part_map keypart_map,
+ enum ha_rkey_function search_flag)
{
uint inx=(uint) info->lastinx, nextflag, key_len;
MI_KEYDEF *keyinfo=info->s->keyinfo+inx;
@@ -175,13 +202,13 @@ static ha_rows _mi_record_pos(MI_INFO *info, const uchar *key,
*/
pos=_mi_search_pos(info,keyinfo,key_buff,key_len,
nextflag | SEARCH_SAVE_BUFF | SEARCH_UPDATE,
- info->s->state.key_root[inx]);
+ info->s->state.key_root[inx], TRUE);
if (pos >= 0.0)
{
- DBUG_PRINT("exit",("pos: %ld",(ulong) (pos*info->state->records)));
- DBUG_RETURN((ulong) (pos*info->state->records+0.5));
+ DBUG_PRINT("exit",("pos: %g",(pos*info->state->records)));
+ DBUG_RETURN(pos*info->state->records);
}
- DBUG_RETURN(HA_POS_ERROR);
+ DBUG_RETURN((double) (HA_POS_ERROR));
}
@@ -191,7 +218,7 @@ static ha_rows _mi_record_pos(MI_INFO *info, const uchar *key,
static double _mi_search_pos(register MI_INFO *info,
register MI_KEYDEF *keyinfo,
uchar *key, uint key_len, uint nextflag,
- register my_off_t pos)
+ register my_off_t pos, my_bool last_in_level)
{
int flag;
uint nod_flag,keynr,UNINIT_VAR(max_keynr);
@@ -222,7 +249,8 @@ static double _mi_search_pos(register MI_INFO *info,
if (flag > 0 && ! nod_flag)
offset= 1.0;
else if ((offset=_mi_search_pos(info,keyinfo,key,key_len,nextflag,
- _mi_kpos(nod_flag,keypos))) < 0)
+ _mi_kpos(nod_flag,keypos),
+ last_in_level && after_key)) < 0)
DBUG_RETURN(offset);
}
else
@@ -241,13 +269,15 @@ static double _mi_search_pos(register MI_INFO *info,
Matches keynr + [0-1]
*/
if ((offset=_mi_search_pos(info,keyinfo,key,key_len,SEARCH_FIND,
- _mi_kpos(nod_flag,keypos))) < 0)
+ _mi_kpos(nod_flag,keypos),
+ last_in_level && after_key)) < 0)
DBUG_RETURN(offset); /* Read error */
}
}
DBUG_PRINT("info",("keynr: %d offset: %g max_keynr: %d nod: %d flag: %d",
keynr,offset,max_keynr,nod_flag,flag));
- DBUG_RETURN((keynr+offset)/(max_keynr+1));
+ DBUG_RETURN((keynr+offset-MY_TEST(!nod_flag))/
+ (max_keynr+MY_TEST(nod_flag || !last_in_level)));
err:
DBUG_PRINT("exit",("Error: %d",my_errno));
DBUG_RETURN (-1.0);
diff --git a/storage/myisam/mi_rkey.c b/storage/myisam/mi_rkey.c
index 1dddb8b49ad..c0f9b2046fc 100644
--- a/storage/myisam/mi_rkey.c
+++ b/storage/myisam/mi_rkey.c
@@ -120,7 +120,9 @@ int mi_rkey(MI_INFO *info, uchar *buf, int inx, const uchar *key,
(search_flag != HA_READ_KEY_EXACT ||
last_used_keyseg != keyinfo->seg + keyinfo->keysegs)) ||
(info->index_cond_func &&
- (res= mi_check_index_cond(info, inx, buf)) == ICP_NO_MATCH))
+ (res= mi_check_index_cond(info, inx, buf)) == ICP_NO_MATCH) ||
+ (mi_check_rowid_filter_is_active(info) &&
+ !mi_check_rowid_filter(info)))
{
uint not_used[2];
/*
diff --git a/storage/myisam/mi_rnext.c b/storage/myisam/mi_rnext.c
index 6e3701abe6b..c3af209fd71 100644
--- a/storage/myisam/mi_rnext.c
+++ b/storage/myisam/mi_rnext.c
@@ -102,7 +102,9 @@ int mi_rnext(MI_INFO *info, uchar *buf, int inx)
while ((info->s->concurrent_insert &&
info->lastpos >= info->state->data_file_length) ||
(info->index_cond_func &&
- (icp_res= mi_check_index_cond(info, inx, buf)) == ICP_NO_MATCH))
+ (icp_res= mi_check_index_cond(info, inx, buf)) == ICP_NO_MATCH) ||
+ (mi_check_rowid_filter_is_active(info) &&
+ !mi_check_rowid_filter(info)))
{
/*
If we are at the last key on the key page, allow writers to
diff --git a/storage/myisam/mi_rnext_same.c b/storage/myisam/mi_rnext_same.c
index d6856459ae7..ac818bfa2da 100644
--- a/storage/myisam/mi_rnext_same.c
+++ b/storage/myisam/mi_rnext_same.c
@@ -95,7 +95,9 @@ int mi_rnext_same(MI_INFO *info, uchar *buf)
*/
if (info->lastpos < info->state->data_file_length &&
(!info->index_cond_func ||
- (icp_res= mi_check_index_cond(info, inx, buf)) != ICP_NO_MATCH))
+ (icp_res= mi_check_index_cond(info, inx, buf)) != ICP_NO_MATCH) &&
+ (!mi_check_rowid_filter_is_active(info) ||
+ mi_check_rowid_filter(info)))
break;
}
}
diff --git a/storage/myisam/mi_rprev.c b/storage/myisam/mi_rprev.c
index 27fbda95574..a78bab6a040 100644
--- a/storage/myisam/mi_rprev.c
+++ b/storage/myisam/mi_rprev.c
@@ -59,7 +59,9 @@ int mi_rprev(MI_INFO *info, uchar *buf, int inx)
while ((share->concurrent_insert &&
info->lastpos >= info->state->data_file_length) ||
(info->index_cond_func &&
- (icp_res= mi_check_index_cond(info, inx, buf)) == ICP_NO_MATCH))
+ (icp_res= mi_check_index_cond(info, inx, buf)) == ICP_NO_MATCH) ||
+ (mi_check_rowid_filter_is_active(info) &&
+ !mi_check_rowid_filter(info)))
{
/*
If we are at the last (i.e. first?) key on the key page,
diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h
index e350626f192..8ba2bbcc209 100644
--- a/storage/myisam/myisamdef.h
+++ b/storage/myisam/myisamdef.h
@@ -296,6 +296,7 @@ struct st_myisam_info
uint preload_buff_size; /* When preloading indexes */
myf lock_wait; /* is 0 or MY_SHORT_WAIT */
my_bool was_locked; /* Was locked in panic */
+ my_bool intern_lock_locked; /* locked in mi_extra() */
my_bool append_insert_at_end; /* Set if concurrent insert */
my_bool quick_mode;
/* If info->buff can't be used for rnext */
@@ -305,6 +306,9 @@ struct st_myisam_info
my_bool create_unique_index_by_sort;
index_cond_func_t index_cond_func; /* Index condition function */
void *index_cond_func_arg; /* parameter for the func */
+ rowid_filter_func_t rowid_filter_func; /* rowid filter check function */
+ rowid_filter_func_t rowid_filter_is_active_func; /* is activefunction */
+ void *rowid_filter_func_arg; /* parameter for the func */
THR_LOCK_DATA lock;
uchar *rtree_recursion_state; /* For RTREE */
int rtree_recursion_depth;
@@ -724,14 +728,20 @@ int mi_munmap_file(MI_INFO *info);
void mi_remap_file(MI_INFO *info, my_off_t size);
ICP_RESULT mi_check_index_cond(MI_INFO *info, uint keynr, uchar *record);
+int mi_check_rowid_filter(MI_INFO *info);
+int mi_check_rowid_filter_is_active(MI_INFO *info);
/* Functions needed by mi_check */
int killed_ptr(HA_CHECK *param);
void mi_check_print_error(HA_CHECK *param, const char *fmt, ...);
void mi_check_print_warning(HA_CHECK *param, const char *fmt, ...);
void mi_check_print_info(HA_CHECK *param, const char *fmt, ...);
pthread_handler_t thr_find_all_keys(void *arg);
-extern void mi_set_index_cond_func(MI_INFO *info, index_cond_func_t func,
+extern void mi_set_index_cond_func(MI_INFO *info, index_cond_func_t check_func,
void *func_arg);
+extern void mi_set_rowid_filter_func(MI_INFO *info,
+ rowid_filter_func_t check_func,
+ rowid_filter_func_t is_active_func,
+ void *func_arg);
int flush_blocks(HA_CHECK *param, KEY_CACHE *key_cache, File file,
ulonglong *dirty_part_map);
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index 4ea1602bec3..f69dd0196b3 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -2148,7 +2148,7 @@ static my_off_t write_huff_tree(HUFF_TREE *huff_tree, uint trees)
*/
if (!(packed_tree=(uint*) my_alloca(sizeof(uint)*length*2)))
{
- my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATALERROR),
+ my_error(EE_OUTOFMEMORY, MYF(ME_BELL+ME_FATAL),
sizeof(uint)*length*2);
return 0;
}
diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c
index 533f2cd2aa6..aef3c9b42a8 100644
--- a/storage/myisam/sort.c
+++ b/storage/myisam/sort.c
@@ -28,12 +28,10 @@
/* static variables */
-#undef MYF_RW
#undef DISK_BUFFER_SIZE
#define MERGEBUFF 15
#define MERGEBUFF2 31
-#define MYF_RW MYF(MY_NABP | MY_WME | MY_WAIT_IF_FULL)
#define DISK_BUFFER_SIZE (IO_SIZE*128)
/* How many keys we can keep in memory */
diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc
index bd2224e1ad2..fcb18962b5b 100644
--- a/storage/oqgraph/ha_oqgraph.cc
+++ b/storage/oqgraph/ha_oqgraph.cc
@@ -1211,7 +1211,7 @@ ha_rows ha_oqgraph::records_in_range(uint inx, key_range *min_key,
min_key->flag != HA_READ_KEY_EXACT ||
max_key->flag != HA_READ_AFTER_KEY)
{
- if (min_key->length == key->key_part[0].store_length && !key->key_part[0].field->is_null()) /* ensure select * from x where latch is null is consistent with no latch */
+ if (min_key && min_key->length == key->key_part[0].store_length && !key->key_part[0].field->is_null()) /* ensure select * from x where latch is null is consistent with no latch */
{
// If latch is not null and equals 0, return # nodes
diff --git a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result
index 3ef61cc3e37..e0d9b3efe99 100644
--- a/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result
+++ b/storage/oqgraph/mysql-test/oqgraph/regression_mdev6282.result
@@ -28,14 +28,12 @@ SELECT `db`.`version`, `db`.`nodeID`
FROM `version_history` AS `v` INNER JOIN `db_history` AS `db` ON `db`.`nodeID` = `v`.`linkid`
WHERE `latch` = 'breadth_first' AND `origid` = '1' ORDER BY `weight` DESC LIMIT 1;
version nodeID
-0.0.3 3
disconnect con1;
connect con2,localhost,root,,test;
SELECT `db`.`version`, `db`.`nodeID`
FROM `version_history` AS `v` INNER JOIN `db_history` AS `db` ON `db`.`nodeID` = `v`.`linkid`
WHERE `latch` = 'breadth_first' AND `origid` = '1' ORDER BY `weight` DESC LIMIT 1;
version nodeID
-0.0.3 3
disconnect con2;
connect con3,localhost,root,,test;
DROP TABLE version_history;
diff --git a/storage/oqgraph/oqgraph_thunk.cc b/storage/oqgraph/oqgraph_thunk.cc
index 5e254450a2b..09cc9c1798b 100644
--- a/storage/oqgraph/oqgraph_thunk.cc
+++ b/storage/oqgraph/oqgraph_thunk.cc
@@ -109,7 +109,7 @@ const std::string& oqgraph3::cursor::record_position() const
if (_graph->_cursor->_index >= 0)
{
key_copy((uchar*) _graph->_cursor->_key.data(), table.record[0],
- table.s->key_info + _index, table.s->key_info[_index].key_length, true);
+ table.key_info + _index, table.key_info[_index].key_length, true);
}
_graph->_stale= false;
@@ -184,7 +184,7 @@ int oqgraph3::cursor::restore_position()
if (int rc= table.file->ha_index_read_map(
table.record[0], (const uchar*) _key.data(),
(key_part_map)(1 << _parts) - 1,
- table.s->key_info[_index].user_defined_key_parts == _parts ?
+ table.key_info[_index].user_defined_key_parts == _parts ?
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT))
{
table.file->ha_index_end();
@@ -368,7 +368,7 @@ int oqgraph3::cursor::seek_to(
if (!destid)
{
int i= 0;
- for( ::KEY *key_info= table.s->key_info,
+ for( ::KEY *key_info= table.key_info,
*key_end= key_info + table.s->keys;
key_info < key_end; ++key_info, ++i)
{
@@ -399,7 +399,7 @@ int oqgraph3::cursor::seek_to(
else if (!origid)
{
int i= 0;
- for( ::KEY *key_info= table.s->key_info,
+ for( ::KEY *key_info= table.key_info,
*key_end= key_info + table.s->keys;
key_info < key_end; ++key_info, ++i)
{
@@ -430,7 +430,7 @@ int oqgraph3::cursor::seek_to(
else
{
int i= 0;
- for( ::KEY *key_info= table.s->key_info,
+ for( ::KEY *key_info= table.key_info,
*key_end= key_info + table.s->keys;
key_info < key_end; ++key_info, ++i)
{
@@ -485,7 +485,7 @@ int oqgraph3::cursor::seek_to(
if (int rc= table.file->ha_index_read_map(
table.record[0], (uchar*) _key.data(),
(key_part_map) ((1U << _parts) - 1),
- table.s->key_info[_index].user_defined_key_parts == _parts ?
+ table.key_info[_index].user_defined_key_parts == _parts ?
HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT))
{
table.file->ha_index_end();
diff --git a/storage/perfschema/unittest/pfs_server_stubs.cc b/storage/perfschema/unittest/pfs_server_stubs.cc
index d7154067fc2..12f54a193c2 100644
--- a/storage/perfschema/unittest/pfs_server_stubs.cc
+++ b/storage/perfschema/unittest/pfs_server_stubs.cc
@@ -24,8 +24,6 @@
struct sql_digest_storage;
-volatile bool ready_to_exit= false;
-
uint lower_case_table_names= 0;
CHARSET_INFO *files_charset_info= NULL;
CHARSET_INFO *system_charset_info= NULL;
diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc
index c985bd0d194..8ebd1f9de0f 100644
--- a/storage/rocksdb/ha_rocksdb.cc
+++ b/storage/rocksdb/ha_rocksdb.cc
@@ -7301,7 +7301,7 @@ int ha_rocksdb::create_cfs(
tbl_def_arg->full_tablename().c_str(),
table_arg->key_info[i].key_part[part].field->field_name.str);
- my_error(ER_INTERNAL_ERROR, MYF(ME_JUST_WARNING), buf);
+ my_error(ER_INTERNAL_ERROR, MYF(ME_WARNING), buf);
}
}
}
@@ -7816,14 +7816,14 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg,
// The `rocksdb_datadir` setting should be used to configure RocksDB data
// directory.
print_error(HA_ERR_ROCKSDB_TABLE_DATA_DIRECTORY_NOT_SUPPORTED,
- MYF(ME_JUST_WARNING));
+ MYF(ME_WARNING));
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
if (create_info->index_file_name) {
// Similar check for INDEX DIRECTORY as well.
print_error(HA_ERR_ROCKSDB_TABLE_INDEX_DIRECTORY_NOT_SUPPORTED,
- MYF(ME_JUST_WARNING));
+ MYF(ME_WARNING));
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
@@ -10767,6 +10767,11 @@ int ha_rocksdb::info(uint flag) {
stats.data_file_length += m_table_handler->m_mtcache_size;
}
+ // Do like InnoDB does. stats.records=0 confuses the optimizer
+ if (stats.records == 0 && !(flag & (HA_STATUS_TIME | HA_STATUS_OPEN))) {
+ stats.records++;
+ }
+
if (rocksdb_debug_optimizer_n_rows > 0)
stats.records = rocksdb_debug_optimizer_n_rows;
}
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result
index 32c0537c780..1e59dd0daec 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/add_index_inplace.result
@@ -391,33 +391,46 @@ set global rocksdb_force_flush_memtable_now=1;
ALTER TABLE t1 ADD INDEX kj(j), ALGORITHM=INPLACE;
larger
1
+# restart
larger
1
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
larger
1
+# restart
larger
1
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select 1300 < 1300 * 1.5 as "same";
same
@@ -476,6 +489,7 @@ INSERT INTO t1 (a, b) VALUES (4, 20);
set global rocksdb_force_flush_memtable_now=1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SHOW INDEX in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result
index a8d5c07072c..96d25b2e669 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key.result
@@ -62,6 +62,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -171,6 +172,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -254,6 +256,7 @@ SHOW COLUMNS IN t1;
Field Type Null Key Default Extra
a int(11) YES NULL
b char(8) YES NULL
+# restart
INSERT INTO t1 (a,b) VALUES (35,'foo');
INSERT INTO t1 (a,b) VALUES (35,'foo');
INSERT INTO t1 (a,b) VALUES (36,'foo');
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result
index 5d947603ec5..27722b23927 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/allow_no_primary_key_with_sk.result
@@ -63,6 +63,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -147,6 +148,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -228,6 +230,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -311,6 +314,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -401,6 +405,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -484,6 +489,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -568,6 +574,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -747,6 +754,7 @@ a b
123 bbb
321 ccc
321 ccc
+# restart
INSERT INTO t1 (a,b) VALUES (45,'bob');
SELECT * FROM t1;
a b
@@ -788,6 +796,7 @@ t1 CREATE TABLE `t1` (
`i` int(11) NOT NULL AUTO_INCREMENT,
KEY `i` (`i`)
) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+# restart
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result
index b666a17c81c..3459b6f189b 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/analyze_table.result
@@ -5,26 +5,32 @@ CREATE TABLE t2 (pk INT PRIMARY KEY, a INT(11), b CHAR(8)) ENGINE=rocksdb;
INSERT INTO t1 VALUES (3,3,'c');
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t2 VALUES (1,4,'d');
ANALYZE NO_WRITE_TO_BINLOG TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
INSERT INTO t1 VALUES (4,5,'e');
INSERT INTO t2 VALUES (2,6,'f');
ANALYZE LOCAL TABLE t1, t2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
DROP TABLE t1, t2;
CREATE TABLE t1 (pk INT PRIMARY KEY, a INT(11), KEY(a)) ENGINE=rocksdb;
INSERT INTO t1 VALUES (1,1),(2,2),(3,4),(4,7);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1 VALUES (5,8),(6,10),(7,11),(8,12);
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
DROP TABLE t1;
#
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result
index 100bc5fd638..c6dcb023e06 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_secondary.result
@@ -6,6 +6,7 @@ pk a
3 1
2 2
1 3
+# restart
INSERT INTO t1 (pk) VALUES (4);
SELECT * FROM t1;
pk a
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result
index a14ffdec2e3..6bd6cea97de 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/autoinc_vars_thread_2.result
@@ -92,5 +92,7 @@ disconnect con2;
disconnect con1;
disconnect con0;
SELECT * FROM t1 ORDER BY pk INTO OUTFILE <output_file>;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
All pk values matched their expected values
DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result
index 6ad9867049d..773fb68e07e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter3.result
@@ -1,3 +1,4 @@
+# restart
CREATE TABLE `linktable` (
`id1` bigint(20) unsigned NOT NULL DEFAULT '0',
`id1_type` int(10) unsigned NOT NULL DEFAULT '0',
@@ -74,6 +75,7 @@ select case when variable_value-@c = 0 then 'true' else 'false' end from informa
case when variable_value-@c = 0 then 'true' else 'false' end
true
DROP TABLE linktable;
+# restart
#
# bloom filter prefix is 20 byte
# Create a key which is longer than that, so that we see that
@@ -98,6 +100,7 @@ insert into t1 values (10,1,1,0x12FFFFFFFFFF,1);
insert into t1 values (11,1,1,0x12FFFFFFFFFF,1);
insert into t1 values (20,2,2,0x12FFFFFFFFFF,1);
insert into t1 values (21,2,2,0x12FFFFFFFFFF,1);
+# restart
explain
select * from t1 where kp0=1 and kp1=1 and kp2=0x12FFFFFFFFFF order by kp3 desc;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
index 1f4d1a641a2..c4a1c5f4668 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bloomfilter4.result
@@ -20,6 +20,8 @@ END IF;
SET id1_cond = id1_cond + 1;
END WHILE;
END//
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
"Skipping bloom filter"
SET session rocksdb_skip_bloom_filter_on_read=1;
CALL select_test();
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
index 21417caf760..2a5f63f7bf7 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load.result
@@ -43,8 +43,11 @@ t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
@@ -71,5 +74,6 @@ count(b)
2500000
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
test.bulk_load.tmp
+# restart
disconnect other;
DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result
index 4ea8cbccc1e..995da9e88eb 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_errors.result
@@ -86,6 +86,7 @@ INSERT INTO t1 VALUES(51479+0.333333333,1);
DROP TABLE t1;
SET @@global.table_open_cache=@orig_table_open_cache;
FOUND 1 /RocksDB: Error [0-9]+ finalizing bulk load while closing handler/ in rocksdb.bulk_load_errors.3.err
+# restart
CREATE TABLE t1 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
CREATE TABLE t2 (pk INT, PRIMARY KEY (pk)) ENGINE=ROCKSDB;
SET rocksdb_bulk_load=1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result
index 484c2a89c3a..e5b3612d6a4 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf.result
@@ -43,8 +43,11 @@ t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
@@ -71,5 +74,6 @@ count(b)
2500000
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
test.bulk_load.tmp
+# restart
disconnect other;
DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result
index 35a2845cb42..36c81b7eb21 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_cf_and_data.result
@@ -43,8 +43,11 @@ t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
@@ -71,5 +74,6 @@ count(b)
2500000
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
test.bulk_load.tmp
+# restart
disconnect other;
DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result
index 12013539017..685d3d25f19 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_rev_data.result
@@ -43,8 +43,11 @@ t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL 0 N
t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_bin NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
@@ -71,5 +74,6 @@ count(b)
2500000
longfilenamethatvalidatesthatthiswillgetdeleted.bulk_load.tmp
test.bulk_load.tmp
+# restart
disconnect other;
DROP TABLE t1, t2, t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result
index 444f997bf48..632b3b47eb5 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted.result
@@ -80,8 +80,11 @@ t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result
index dea69b3b089..b5a56b21f5e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/bulk_load_unsorted_rev.result
@@ -80,8 +80,11 @@ t2 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL
t3 ROCKSDB 10 Fixed 2500000 # # # # 0 NULL NULL NULL NULL latin1_swedish_ci NULL partitioned 0 N
ANALYZE TABLE t1, t2, t3;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
SHOW TABLE STATUS WHERE name LIKE 't%';
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment Max_index_length Temporary
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result
index d037c636a16..dcaca8b72bc 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/cardinality.result
@@ -1,3 +1,5 @@
+# restart
+set use_stat_tables= 'COMPLEMENTARY';
CREATE TABLE t0 (id int PRIMARY KEY, a int, INDEX ix_a (a)) engine=rocksdb;
insert into t0 values (0, 0),(1, 1),(2, 2),(3, 3),(4, 4),
(5, 4),(6, 4),(7, 4),(8, 4),(9, 4);
@@ -66,6 +68,7 @@ SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema
table_name table_rows
t1 100000
restarting...
+# restart
show index in t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 id A 100000 NULL NULL LSMTREE
@@ -88,6 +91,7 @@ ENGINE=ROCKSDB;
SET GLOBAL rocksdb_force_flush_memtable_now = 1;
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
cardinality of the columns after 'a' must be equal to the cardinality of column 'a'
SELECT CARDINALITY INTO @c FROM information_schema.statistics WHERE TABLE_NAME='t2' AND INDEX_NAME='c' AND COLUMN_NAME='a';
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result
index 62a6dbbdaca..2c1cab7fcac 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/compression_zstd.result
@@ -1,2 +1,3 @@
+# restart: --rocksdb_default_cf_options=compression_per_level=kZSTDNotFinalCompression;compression_opts=-14:4:0;
create table t (id int primary key) engine=rocksdb;
drop table t;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result
index dfa5c5b2590..668f7e8f47e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_index_inplace.result
@@ -57,6 +57,7 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`a`),
KEY `kc` (`c`)
) ENGINE=ROCKSDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+# restart
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -117,6 +118,7 @@ SHOW INDEX IN t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 0 PRIMARY 1 a A 0 NULL NULL LSMTREE
t1 0 kc 1 c A 0 NULL NULL YES LSMTREE
+# restart
INSERT INTO t1 (b,c) VALUES (1,2);
INSERT INTO t1 (b,c) VALUES (3,4);
INSERT INTO t1 (b,c) VALUES (5,6);
@@ -139,6 +141,7 @@ INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3);
ALTER TABLE t1 ADD KEY idx ( col1, col2 );
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 DROP COLUMN col2;
ALTER TABLE t1 DROP COLUMN col3;
@@ -148,6 +151,7 @@ INSERT INTO t1 (col1,col2,col3) VALUES (1,2,3);
ALTER TABLE t1 ADD KEY idx ( col1, col2 );
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 DROP COLUMN col2;
ALTER TABLE t1 DROP COLUMN col3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result
index fad2939d206..aec4138c722 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table2.result
@@ -10,6 +10,7 @@ call mtr.add_suppression("Column family 'rev:cf2' not found");
set global rocksdb_compact_cf = 'cf1';
set global rocksdb_compact_cf = 'rev:cf2';
set global rocksdb_signal_drop_index_thread = 1;
+# restart
CREATE TABLE t1 (
a int not null,
b int not null,
@@ -38,10 +39,12 @@ DELETE FROM t1;
DELETE FROM t2;
DELETE FROM t3;
DELETE FROM t4;
+# restart
DELETE FROM t1;
DELETE FROM t4;
DELETE FROM t1;
DELETE FROM t4;
+# restart
CREATE TABLE t5 (
a int not null,
b int not null,
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result
index 7a33fa83cb4..954e6079bba 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/drop_table3.result
@@ -6,6 +6,7 @@ call mtr.add_suppression("Column family 'rev:cf2' not found");
set global rocksdb_compact_cf = 'cf1';
set global rocksdb_compact_cf = 'rev:cf2';
set global rocksdb_signal_drop_index_thread = 1;
+# restart
CREATE TABLE t1 (
a int not null,
b int not null,
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result
index 7fb9055083b..7c4f57b61bd 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/index_merge_rocksdb.result
@@ -28,6 +28,7 @@ insert into t1 values (1, 100, 100), (1, 200, 200), (1, 300, 300);
set global rocksdb_force_flush_memtable_now=1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain select * from t1 where key1 = 1;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result b/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result
index 5d34f4e9640..d3aac194bc4 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/max_open_files.result
@@ -19,3 +19,4 @@ SELECT @@global.rocksdb_max_open_files;
SELECT FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files;
FLOOR(@@global.open_files_limit / 2) = @@global.rocksdb_max_open_files
1
+# restart
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/misc.result b/storage/rocksdb/mysql-test/rocksdb/r/misc.result
index 6087928b80f..b2e5d04f6f8 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/misc.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/misc.result
@@ -46,6 +46,8 @@ db User NULL NULL
event db NULL NULL
event name NULL NULL
func name NULL NULL
+global_priv Host NULL NULL
+global_priv User NULL NULL
gtid_slave_pos domain_id NULL NULL
gtid_slave_pos sub_id NULL NULL
help_category help_category_id NULL NULL
@@ -56,8 +58,6 @@ help_relation help_keyword_id NULL NULL
help_relation help_topic_id NULL NULL
help_topic help_topic_id NULL NULL
help_topic name NULL NULL
-host Db NULL NULL
-host Host NULL NULL
index_stats db_name NULL NULL
index_stats index_name NULL NULL
index_stats prefix_arity NULL NULL
@@ -92,5 +92,3 @@ time_zone_transition Time_zone_id NULL NULL
time_zone_transition Transition_time NULL NULL
time_zone_transition_type Time_zone_id NULL NULL
time_zone_transition_type Transition_type_id NULL NULL
-user Host NULL NULL
-user User NULL NULL
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
index 3a631d2925b..6ea13872033 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/no_merge_sort.result
@@ -1,63 +1,123 @@
Warnings:
Note 1051 Unknown table 'test.ti_nk'
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
skip_merge_sort
true
DROP TABLE ti_nk;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/partition.result b/storage/rocksdb/mysql-test/rocksdb/r/partition.result
index 1ba966e9e07..a7f2a6112c1 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/partition.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/partition.result
@@ -46,6 +46,7 @@ CREATE TABLE t1 (i INT, j INT, k INT, PRIMARY KEY (i)) ENGINE = ROCKSDB PARTITIO
Table Op Msg_type Msg_text
test.t1 optimize status OK
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
Table Op Msg_type Msg_text
test.t1 repair status OK
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result
index 89ebe760384..0a42e730fe6 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/records_in_range.result
@@ -1,3 +1,4 @@
+# restart
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (
i INT,
@@ -144,7 +145,7 @@ Warnings:
Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`b` >= 500 and `test`.`t1`.`b` <= 500
explain extended select * from t1 where a< 750 and b> 500 and b< 750;
id select_type table type possible_keys key key_len ref rows filtered Extra
-1 SIMPLE t1 range ka,kb ka 5 NULL 1000 100.00 Using index condition; Using where
+1 SIMPLE t1 range ka,kb ka 5 NULL 1000 5.00 Using index condition; Using where
Warnings:
Note 1003 select `test`.`t1`.`i` AS `i`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 750 and `test`.`t1`.`b` > 500 and `test`.`t1`.`b` < 750
drop index ka on t1;
@@ -183,7 +184,7 @@ insert into linktable values (1,1,4,1,1,1,1,1,1);
set global rocksdb_force_flush_memtable_now = true;
explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where
+1 SIMPLE linktable ref PRIMARY,id1_type PRIMARY 16 const,const 2 Using where
drop table linktable;
CREATE TABLE `linktable` (
`id1` bigint(20) unsigned NOT NULL DEFAULT '0',
@@ -205,6 +206,6 @@ insert into linktable values (1,1,4,1,1,1,1,1,1);
set global rocksdb_force_flush_memtable_now = true;
explain select id1, id2, link_type, visibility, data, time, version from linktable where id1 = 1 and link_type = 1 and id2 in (1, 2);
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE linktable range PRIMARY,id1_type PRIMARY 24 NULL 2 Using where
+1 SIMPLE linktable ref PRIMARY,id1_type PRIMARY 16 const,const 2 Using where
drop table linktable;
DROP TABLE t1;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
index e18bb5f9c0c..d8d78a2f571 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result
@@ -1399,6 +1399,7 @@ create table t1 (i int primary key auto_increment) engine=RocksDB;
insert into t1 values (null);
insert into t1 values (null);
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = @ORIG_PAUSE_BACKGROUND_WORK;
+# restart
SET @ORIG_PAUSE_BACKGROUND_WORK = @@ROCKSDB_PAUSE_BACKGROUND_WORK;
SET GLOBAL ROCKSDB_PAUSE_BACKGROUND_WORK = 1;
insert into t1 values (null);
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result
index 6c3d85b760c..80b6301a07c 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_options.result
@@ -7,6 +7,7 @@ primary key (a) comment 'z') engine=rocksdb;
insert into t1 values (1);
insert into t2 values (2);
insert into t3 values (2);
+# restart
Default options for all column families:
@@ -32,6 +33,7 @@ z WRITE_BUFFER_SIZE 12582912
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
__system__ TARGET_FILE_SIZE_BASE 1048576
__system__ WRITE_BUFFER_SIZE 12582912
+# restart: --rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m};
Individualized options for column families:
@@ -57,5 +59,6 @@ z WRITE_BUFFER_SIZE 12582912
__system__ MAX_BYTES_FOR_LEVEL_MULTIPLIER 10.000000
__system__ TARGET_FILE_SIZE_BASE 1048576
__system__ WRITE_BUFFER_SIZE 12582912
+# restart: --rocksdb_override_cf_options=cf1={write_buffer_size=8m;target_file_size_base=2m};cf2={write_buffer_size=16m;max_bytes_for_level_multiplier=8};z={target_file_size_base=4m};
drop table t1,t2,t3;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result
index 99186153796..24b93ee3395 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_cf_per_partition.result
@@ -350,6 +350,8 @@ cf_name
another_cf_for_p5
ANALYZE TABLE t2;
Table Op Msg_type Msg_text
+test.t2 analyze status Engine-independent statistics collected
+test.t2 analyze Warning Engine-independent statistics are not collected for column 'col5'
test.t2 analyze status OK
EXPLAIN PARTITIONS SELECT * FROM t2 WHERE col3 = 0x4 AND col2 = 0x34567;
id select_type table partitions type possible_keys key key_len ref rows Extra
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result
index 505487c08ba..dbc89f32d90 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_checksums.result
@@ -125,4 +125,5 @@ set session debug_dbug= "-d,myrocks_simulate_bad_key_checksum1";
set @@global.rocksdb_store_row_debug_checksums=@save_rocksdb_store_row_debug_checksums;
set @@global.rocksdb_verify_row_debug_checksums=@save_rocksdb_verify_row_debug_checksums;
set @@global.rocksdb_checksums_pct=@save_rocksdb_checksums_pct;
+# restart
drop table t2,t3,t4;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result
index 2fde11c2a08..70bef39eceb 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_parts.result
@@ -131,6 +131,7 @@ DROP DATABASE db3;
CREATE TABLE t1 (a INT) ENGINE=RocksDB PARTITION BY HASH(a) PARTITIONS 2;
INSERT INTO t1 (a) VALUES (1),(2);
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
+# restart
SELECT 1;
1
1
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result
index a925c21e188..bcda9341f1f 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb_range2.result
@@ -1,13 +1,14 @@
create table t1 (id1 bigint, id2 bigint, c1 bigint, c2 bigint, c3 bigint, c4 bigint, c5 bigint, c6 bigint, c7 bigint, primary key (id1, id2), index i(c1, c2));
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
select count(*) from t1;
count(*)
10000
explain select c1 from t1 where c1 > 5 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range i i 9 NULL # Using where; Using index
+1 SIMPLE t1 index i i 18 NULL # Using where; Using index
drop table t1;
#
# MDEV-17414: MyROCKS order desc limit 1 fails
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/select.result b/storage/rocksdb/mysql-test/rocksdb/r/select.result
index 22a6ca9bc87..2bb2bd636dc 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/select.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/select.result
@@ -114,6 +114,8 @@ SELECT t1.a, t2.b FROM t2, t1 WHERE t1.a = t2.a ORDER BY t2.b, t1.a
INTO OUTFILE '<DATADIR>/select.out'
CHARACTER SET utf8
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '''';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
200,'bar'
200,'bar'
100,'foobar'
@@ -125,8 +127,12 @@ INTO DUMPFILE '<DATADIR>/select.dump';
ERROR 42000: Result consisted of more than one row
SELECT t1.*, t2.* FROM t1, t2 ORDER BY t2.b, t1.a, t2.a, t1.b, t1.pk, t2.pk LIMIT 1
INTO DUMPFILE '<DATADIR>/select.dump';
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
1z2200bar3
SELECT MIN(a), MAX(a) FROM t1 INTO @min, @max;
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
SELECT @min, @max;
@min @max
1 200
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result b/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result
index 92906f22b1e..d67c4cbbbc7 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/skip_validate_tmp_table.result
@@ -1,4 +1,5 @@
create table t1 (pk int primary key) engine=rocksdb;
+# restart
show tables;
Tables_in_test
#mysql50#t1#sql-test
@@ -11,6 +12,7 @@ show tables;
Tables_in_test
#mysql50#t1#sql-test
t2
+# restart
show tables;
Tables_in_test
create table t2 (pk int primary key) engine=rocksdb;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result
index 78344991360..9fdd50a7e14 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/statistics.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/statistics.result
@@ -37,6 +37,7 @@ table_name data_length>0 index_length>0
t1 1 1
t2 1 1
t3 1 1
+# restart
SELECT table_name, table_rows FROM information_schema.tables WHERE table_schema = DATABASE();
table_name table_rows
t1 100000
@@ -49,8 +50,11 @@ t2 1 1
t3 1 1
analyze table t1,t2,t3,t4,t5;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
+test.t2 analyze status Engine-independent statistics collected
test.t2 analyze status OK
+test.t3 analyze status Engine-independent statistics collected
test.t3 analyze status OK
test.t4 analyze Error Table 'test.t4' doesn't exist
test.t4 analyze status Operation failed
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result
index eda560fefdb..7c5631a2c97 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/truncate_table3.result
@@ -6,6 +6,7 @@ call mtr.add_suppression("Column family 'rev:cf2' not found");
set global rocksdb_compact_cf = 'cf1';
set global rocksdb_compact_cf = 'rev:cf2';
set global rocksdb_signal_drop_index_thread = 1;
+# restart
CREATE TABLE t1 (
a int not null,
b int not null,
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result
index 7397ff64ab1..668a927669a 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_decimal.result
@@ -25,6 +25,7 @@ insert into t1 select pk+10000, 9.0, 9.0, 'extra-data' from t1;
insert into t1 select pk+100000, 9.0, 9.0, 'extra-data' from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# The following can't use index-only:
explain select * from t1 where col1 between -8 and 8;
@@ -88,6 +89,7 @@ insert into t1 select pk+10000, col1+20000, col2+20000, 'extra-data' from t1;
insert into t1 select pk+100000, col1+20000, col2+20000, 'extra-data' from t1;
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
explain
select col1, col2 from t1 force index(key1) where col1 between -800 and 800;
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result
index a7e086fde66..4c5ef47590a 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/type_varchar.result
@@ -191,6 +191,7 @@ insert into t1 values (2, 'a', 'a');
insert into t1 values (3, 'a \t', 'a-tab');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Must show 'using index' for latin1_bin and utf8_bin:
explain
@@ -306,6 +307,7 @@ insert into t1 values (2, 'a', 'a');
insert into t1 values (3, 'a \t', 'a-tab');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Must show 'using index' for latin1_bin and utf8_bin:
explain
@@ -421,6 +423,7 @@ insert into t1 values (2, 'a', 'a');
insert into t1 values (3, 'a \t', 'a-tab');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Must show 'using index' for latin1_bin and utf8_bin:
explain
@@ -536,6 +539,7 @@ insert into t1 values (2, 'a', 'a');
insert into t1 values (3, 'a \t', 'a-tab');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Must show 'using index' for latin1_bin and utf8_bin:
explain
@@ -651,6 +655,7 @@ insert into t1 values (2, 'a', 'a');
insert into t1 values (3, 'a \t', 'a-tab');
analyze table t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Must show 'using index' for latin1_bin and utf8_bin:
explain
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result
index e8456457cdd..eda49c58d7e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/use_direct_reads_writes.result
@@ -5,10 +5,13 @@ select plugin_name, plugin_type from information_schema.plugins where plugin_nam
plugin_name plugin_type
ROCKSDB STORAGE ENGINE
Checking direct reads
+# restart
FOUND 1 /enable both use_direct_reads/ in use_direct_reads_writes.err
Checking direct writes
+# restart
FOUND 1 /enable both use_direct_io_for_flush_and_compaction/ in use_direct_reads_writes.err
Checking rocksdb_flush_log_at_trx_commit
+# restart
FOUND 1 /rocksdb_flush_log_at_trx_commit needs to be/ in use_direct_reads_writes.err
Validate flush_log settings when direct writes is enabled
set global rocksdb_flush_log_at_trx_commit=0;
@@ -16,3 +19,4 @@ set global rocksdb_flush_log_at_trx_commit=1;
ERROR 42000: Variable 'rocksdb_flush_log_at_trx_commit' can't be set to the value of '1'
set global rocksdb_flush_log_at_trx_commit=2;
ERROR 42000: Variable 'rocksdb_flush_log_at_trx_commit' can't be set to the value of '2'
+# restart
diff --git a/storage/rocksdb/mysql-test/rocksdb/r/xa.result b/storage/rocksdb/mysql-test/rocksdb/r/xa.result
index 30cfe94e0b7..12ae2b474b6 100644
--- a/storage/rocksdb/mysql-test/rocksdb/r/xa.result
+++ b/storage/rocksdb/mysql-test/rocksdb/r/xa.result
@@ -18,6 +18,7 @@ XA PREPARE 'xa2';
connection default;
SELECT * FROM t1;
a
+# restart
connect con3,localhost,root,,test;
XA RECOVER;
formatID gtrid_length bqual_length data
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test
index 21e4b49e560..1dcb176e4fa 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test
+++ b/storage/rocksdb/mysql-test/rocksdb/t/cardinality.test
@@ -2,6 +2,8 @@
--source include/restart_mysqld.inc
+set use_stat_tables= 'COMPLEMENTARY';
+
# Test memtable cardinality statistics
CREATE TABLE t0 (id int PRIMARY KEY, a int, INDEX ix_a (a)) engine=rocksdb;
diff --git a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
index 9fbc0ace0d2..91bf571371e 100644
--- a/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
+++ b/storage/rocksdb/mysql-test/rocksdb/t/disabled.def
@@ -83,3 +83,4 @@ autoinc_debug: Fails with wrong results
drop_table: Hangs on shutdown
allow_to_start_after_corruption : result difference and assertion failure
index_merge_rocksdb2 : result difference
+rocksdb_range2 : result difference, update after MDEV-16746 is fixed
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/r/mdev12179.result b/storage/rocksdb/mysql-test/rocksdb_rpl/r/mdev12179.result
index 9c20fea97ae..a1e501f78f4 100644
--- a/storage/rocksdb/mysql-test/rocksdb_rpl/r/mdev12179.result
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/r/mdev12179.result
@@ -2,6 +2,7 @@ include/master-slave.inc
[connection master]
connection server_2;
include/stop_slave.inc
+SET GLOBAL gtid_cleanup_batch_size = 999999999;
CHANGE MASTER TO master_use_gtid=slave_pos;
SET sql_log_bin=0;
CREATE TABLE mysql.gtid_slave_pos_innodb LIKE mysql.gtid_slave_pos;
@@ -41,6 +42,8 @@ a
1
SELECT * FROM mysql.gtid_slave_pos ORDER BY sub_id;
domain_id sub_id server_id seq_no
+0 1 1 1
+0 2 1 2
0 3 1 3
0 4 1 4
SELECT * FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
@@ -121,6 +124,21 @@ Transactions_multi_engine 6
DELETE FROM t1 WHERE a >= 100;
DELETE FROM t2 WHERE a >= 100;
DELETE FROM t3 WHERE a >= 100;
+connection server_1;
+include/save_master_gtid.inc
+connection server_2;
+include/sync_with_master_gtid.inc
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos;
+COUNT(*)>=10
+1
+SELECT COUNT(*)>=10 FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
+UNION ALL SELECT * FROM mysql.gtid_slave_pos_innodb_redundant) inner_select;
+COUNT(*)>=10
+1
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos_rocksdb;
+COUNT(*)>=10
+1
+SET GLOBAL gtid_cleanup_batch_size = 3;
connection server_2;
include/stop_slave.inc
SET sql_log_bin=0;
diff --git a/storage/rocksdb/mysql-test/rocksdb_rpl/t/mdev12179.test b/storage/rocksdb/mysql-test/rocksdb_rpl/t/mdev12179.test
index e0d16e7f242..631d9ca533f 100644
--- a/storage/rocksdb/mysql-test/rocksdb_rpl/t/mdev12179.test
+++ b/storage/rocksdb/mysql-test/rocksdb_rpl/t/mdev12179.test
@@ -4,6 +4,12 @@
--connection server_2
--source include/stop_slave.inc
+
+# Set GTID cleanup limit high enough that cleanup will not run and we
+# can rely on consistent table output in .result.
+--let $old_gtid_cleanup_batch_size=`SELECT @@GLOBAL.gtid_cleanup_batch_size`
+SET GLOBAL gtid_cleanup_batch_size = 999999999;
+
CHANGE MASTER TO master_use_gtid=slave_pos;
SET sql_log_bin=0;
CREATE TABLE mysql.gtid_slave_pos_innodb LIKE mysql.gtid_slave_pos;
@@ -89,6 +95,82 @@ DELETE FROM t2 WHERE a >= 100;
DELETE FROM t3 WHERE a >= 100;
+# Create a bunch more GTIDs in mysql.gtid_slave_pos* tables to test with.
+--connection server_1
+--disable_query_log
+let $i=10;
+while ($i) {
+ eval INSERT INTO t1 VALUES (300+$i);
+ eval INSERT INTO t2 VALUES (300+$i);
+ eval INSERT INTO t3 VALUES (300+$i);
+ dec $i;
+}
+--enable_query_log
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+
+# Check that we have many rows in mysql.gtid_slave_pos now (since
+# @@gtid_cleanup_batch_size was set to a huge value). No need to check
+# for an exact number, since that will require changing .result if
+# anything changes prior to this point, and we just need to know that
+# we have still have some data in the tables to make the following
+# test effective.
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos;
+SELECT COUNT(*)>=10 FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
+ UNION ALL SELECT * FROM mysql.gtid_slave_pos_innodb_redundant) inner_select;
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos_rocksdb;
+
+# Check that old GTID rows will be deleted when batch delete size is
+# set reasonably. Old row deletion is not 100% deterministic (by design), so
+# we must wait for it to occur, but it should occur eventually.
+SET GLOBAL gtid_cleanup_batch_size = 3;
+let $i=40;
+--disable_query_log
+--let $keep_include_silent=1
+while ($i) {
+ let N=`SELECT 1+($i MOD 3)`;
+ --connection server_1
+ eval UPDATE t$N SET a=a+1 WHERE a=(SELECT MAX(a) FROM t$N);
+ --source include/save_master_gtid.inc
+ --connection server_2
+ --source include/sync_with_master_gtid.inc
+ let $j=50;
+ while ($j) {
+ let $is_done=`SELECT SUM(a)=1 FROM (
+ SELECT COUNT(*) AS a FROM mysql.gtid_slave_pos
+ UNION ALL
+ SELECT COUNT(*) AS a FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
+ UNION ALL SELECT * FROM mysql.gtid_slave_pos_innodb_redundant) inner_select
+ UNION ALL
+ SELECT COUNT(*) AS a FROM mysql.gtid_slave_pos_rocksdb) outer_select`;
+ if ($is_done) {
+ let $j=0;
+ }
+ if (!$is_done) {
+ real_sleep 0.1;
+ dec $j;
+ }
+ }
+ dec $i;
+ if ($is_done) {
+ let $i=0;
+ }
+}
+--enable_query_log
+--let $keep_include_silent=0
+if (!$is_done) {
+ --echo Timed out waiting for mysql.gtid_slave_pos* tables to be cleaned up
+}
+
+--disable_query_log
+DELETE FROM t1 WHERE a >= 100;
+DELETE FROM t2 WHERE a >= 100;
+DELETE FROM t3 WHERE a >= 100;
+--enable_query_log
+
+
# Test status variables Rpl_transactions_multi_engine and Transactions_gtid_foreign_engine.
# Have mysql.gtid_slave_pos* for myisam and innodb but not rocksdb.
--connection server_2
@@ -223,6 +305,9 @@ SHOW STATUS LIKE "%transactions%engine";
SET sql_log_bin=0;
DROP TABLE mysql.gtid_slave_pos_innodb;
SET sql_log_bin=1;
+--disable_query_log
+eval SET GLOBAL gtid_cleanup_batch_size = $old_gtid_cleanup_batch_size;
+--enable_query_log
--connection server_1
DROP TABLE t1;
diff --git a/storage/sequence/mysql-test/sequence/simple.result b/storage/sequence/mysql-test/sequence/simple.result
index ea94e91b1ba..d921b80bf0f 100644
--- a/storage/sequence/mysql-test/sequence/simple.result
+++ b/storage/sequence/mysql-test/sequence/simple.result
@@ -124,9 +124,9 @@ Tables_in_test
explain select * from seq_1_to_15_step_2;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE seq_1_to_15_step_2 index NULL PRIMARY 8 NULL 8 Using index
-explain select * from seq_1_to_15_step_2 where seq > 4;
+explain select * from seq_1_to_15_step_2 where seq > 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 6 Using where; Using index
+1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 3 Using where; Using index
explain select * from seq_1_to_15_step_2 where seq between 4 and 9;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE seq_1_to_15_step_2 range PRIMARY PRIMARY 8 NULL 3 Using where; Using index
diff --git a/storage/sequence/mysql-test/sequence/simple.test b/storage/sequence/mysql-test/sequence/simple.test
index fbf2b0ebc66..00d2464cf08 100644
--- a/storage/sequence/mysql-test/sequence/simple.test
+++ b/storage/sequence/mysql-test/sequence/simple.test
@@ -52,7 +52,7 @@ show open tables from test;
show tables;
# row estimates
explain select * from seq_1_to_15_step_2;
-explain select * from seq_1_to_15_step_2 where seq > 4;
+explain select * from seq_1_to_15_step_2 where seq > 10;
explain select * from seq_1_to_15_step_2 where seq between 4 and 9;
explain select * from seq_1_to_15_step_2 where seq between 20 and 30;
explain select * from seq_1_to_15_step_2 where seq between 4 and 6;
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 90409213843..86332a741c6 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -2746,7 +2746,9 @@ const Item * ha_sphinx::cond_push ( const Item *cond )
if ( !m_pShare->m_bSphinxQL )
{
// on non-QL tables, intercept query=value condition for SELECT
- if (!( args[0]->type()==Item::FIELD_ITEM && args[1]->type()==Item::STRING_ITEM ))
+ if (!( args[0]->type()==Item::FIELD_ITEM &&
+ args[1]->is_of_type(Item::CONST_ITEM,
+ STRING_RESULT)))
break;
Item_field * pField = (Item_field *) args[0];
@@ -2762,7 +2764,9 @@ const Item * ha_sphinx::cond_push ( const Item *cond )
} else
{
- if (!( args[0]->type()==Item::FIELD_ITEM && args[1]->type()==Item::INT_ITEM ))
+ if (!( args[0]->type()==Item::FIELD_ITEM &&
+ args[1]->is_of_type(Item::CONST_ITEM,
+ INT_RESULT)))
break;
// on QL tables, intercept id=value condition for DELETE
diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc
index 9e1b5c916c6..c892bd8828c 100644
--- a/storage/spider/ha_spider.cc
+++ b/storage/spider/ha_spider.cc
@@ -9397,8 +9397,8 @@ ulonglong ha_spider::table_flags() const
HA_BINLOG_ROW_CAPABLE |
HA_BINLOG_STMT_CAPABLE |
HA_PARTIAL_COLUMN_READ |
-#ifdef HA_CMP_REF_IS_EXPENSIVE
- HA_CMP_REF_IS_EXPENSIVE |
+#ifdef HA_SLOW_CMP_REF
+ HA_SLOW_CMP_REF |
#endif
#ifdef SPIDER_ENGINE_CONDITION_PUSHDOWN_IS_ALWAYS_ON
HA_CAN_TABLE_CONDITION_PUSHDOWN |
@@ -9428,7 +9428,7 @@ ulonglong ha_spider::table_flags() const
const char *ha_spider::index_type(
uint key_number
) {
- KEY *key_info = &table->s->key_info[key_number];
+ KEY *key_info = &table->key_info[key_number];
DBUG_ENTER("ha_spider::index_type");
DBUG_PRINT("info",("spider this=%p", this));
DBUG_PRINT("info",("spider flags=%ld", key_info->flags));
@@ -11186,9 +11186,9 @@ double ha_spider::read_time(
if (keyread)
{
DBUG_PRINT("info",("spider read_time(keyread) = %.6f",
- share->read_rate * table->s->key_info[index].key_length *
+ share->read_rate * table->key_info[index].key_length *
rows / 2 + 2));
- DBUG_RETURN(share->read_rate * table->s->key_info[index].key_length *
+ DBUG_RETURN(share->read_rate * table->key_info[index].key_length *
rows / 2 + 2);
} else {
DBUG_PRINT("info",("spider read_time = %.6f",
diff --git a/storage/spider/mysql-test/spider/bugfix/include/cp932_column_deinit.inc b/storage/spider/mysql-test/spider/bugfix/include/cp932_column_deinit.inc
new file mode 100644
index 00000000000..930cde889a4
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/include/cp932_column_deinit.inc
@@ -0,0 +1,13 @@
+--connection master_1
+set session spider_direct_dup_insert= @old_spider_direct_dup_insert;
+--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
+--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
+--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
+--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/bugfix/include/cp932_column_init.inc b/storage/spider/mysql-test/spider/bugfix/include/cp932_column_init.inc
new file mode 100644
index 00000000000..ba412b567f8
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/include/cp932_column_init.inc
@@ -0,0 +1,29 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_init.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
+let $MASTER_1_COMMENT_2_1=
+ COMMENT='table "tbl_a", srv "s_2_1"';
+--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
+let $CHILD2_1_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
+let $CHILD2_1_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ txt_utf8 char(8) NOT NULL,
+ txt_cp932 char(8) NOT NULL COLLATE cp932_japanese_ci,
+ PRIMARY KEY (pkey)
+ ) $CHILD2_1_ENGINE DEFAULT CHARACTER SET utf8;
+--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
+let $CHILD2_1_SELECT_TABLES=
+ SELECT pkey, txt_utf8, txt_cp932 FROM tbl_a ORDER BY pkey;
+let $CHILD2_1_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %' OR argument LIKE '%update %';
+--connection master_1
+set @old_spider_direct_dup_insert= @@spider_direct_dup_insert;
+set session spider_direct_dup_insert= 1;
diff --git a/storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_deinit.inc b/storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_deinit.inc
new file mode 100644
index 00000000000..9d255152dd8
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_deinit.inc
@@ -0,0 +1,14 @@
+--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
+--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
+--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
+--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
+--let $CHILD2_2_DROP_TABLES= $CHILD2_2_DROP_TABLES_BACKUP
+--let $CHILD2_2_CREATE_TABLES= $CHILD2_2_CREATE_TABLES_BACKUP
+--let $CHILD2_2_SELECT_TABLES= $CHILD2_2_SELECT_TABLES_BACKUP
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_init.inc b/storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_init.inc
new file mode 100644
index 00000000000..ac60580f463
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/include/group_by_order_by_limit_init.inc
@@ -0,0 +1,54 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_init.inc
+if (!$HAVE_PARTITION)
+{
+ --source group_by_order_by_limit_deinit.inc
+ --enable_result_log
+ --enable_query_log
+ --enable_warnings
+ skip Test requires partitioning;
+}
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
+let $MASTER_1_COMMENT_2_1=
+ COMMENT='table "tbl_a"'
+ PARTITION BY KEY(pkey) (
+ PARTITION pt1 COMMENT='srv "s_2_1"',
+ PARTITION pt2 COMMENT='srv "s_2_2"'
+ );
+--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
+let $CHILD2_1_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
+let $CHILD2_1_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ PRIMARY KEY (pkey),
+ KEY idx1 (skey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
+--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
+let $CHILD2_1_SELECT_TABLES=
+ SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+let $CHILD2_1_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
+--let $CHILD2_2_DROP_TABLES_BACKUP= $CHILD2_2_DROP_TABLES
+let $CHILD2_2_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_2_CREATE_TABLES_BACKUP= $CHILD2_2_CREATE_TABLES
+let $CHILD2_2_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ PRIMARY KEY (pkey),
+ KEY idx1 (skey)
+ ) $CHILD2_2_ENGINE $CHILD2_2_CHARSET;
+--let $CHILD2_2_SELECT_TABLES_BACKUP= $CHILD2_2_SELECT_TABLES
+let $CHILD2_2_SELECT_TABLES=
+ SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+let $CHILD2_2_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
diff --git a/storage/spider/mysql-test/spider/bugfix/include/wait_timeout_deinit.inc b/storage/spider/mysql-test/spider/bugfix/include/wait_timeout_deinit.inc
new file mode 100644
index 00000000000..e66247c89c5
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/include/wait_timeout_deinit.inc
@@ -0,0 +1,18 @@
+--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
+--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
+--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
+--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
+--let $CHILD2_2_DROP_TABLES= $CHILD2_2_DROP_TABLES_BACKUP
+--let $CHILD2_2_CREATE_TABLES= $CHILD2_2_CREATE_TABLES_BACKUP
+--let $CHILD2_2_SELECT_TABLES= $CHILD2_2_SELECT_TABLES_BACKUP
+--connection child2_1
+set global wait_timeout= @old_wait_timeout;
+--connection child2_2
+set global wait_timeout= @old_wait_timeout;
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/bugfix/include/wait_timeout_init.inc b/storage/spider/mysql-test/spider/bugfix/include/wait_timeout_init.inc
new file mode 100644
index 00000000000..d56d7a20940
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/include/wait_timeout_init.inc
@@ -0,0 +1,56 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_init.inc
+if (!$HAVE_PARTITION)
+{
+ --source wait_timeout_deinit.inc
+ --enable_result_log
+ --enable_query_log
+ --enable_warnings
+ skip Test requires partitioning;
+}
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
+let $MASTER_1_COMMENT_2_1=
+ COMMENT='table "tbl_a"'
+ PARTITION BY KEY(pkey) (
+ PARTITION pt1 COMMENT='srv "s_2_1"',
+ PARTITION pt2 COMMENT='srv "s_2_2"'
+ );
+--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
+let $CHILD2_1_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
+let $CHILD2_1_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ PRIMARY KEY (pkey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
+--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
+let $CHILD2_1_SELECT_TABLES=
+ SELECT pkey FROM tbl_a ORDER BY pkey;
+let $CHILD2_1_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %';
+--let $CHILD2_2_DROP_TABLES_BACKUP= $CHILD2_2_DROP_TABLES
+let $CHILD2_2_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_2_CREATE_TABLES_BACKUP= $CHILD2_2_CREATE_TABLES
+let $CHILD2_2_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ PRIMARY KEY (pkey)
+ ) $CHILD2_2_ENGINE $CHILD2_2_CHARSET;
+--let $CHILD2_2_SELECT_TABLES_BACKUP= $CHILD2_2_SELECT_TABLES
+let $CHILD2_2_SELECT_TABLES=
+ SELECT pkey FROM tbl_a ORDER BY pkey;
+let $CHILD2_2_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %';
+--connection child2_1
+set @old_wait_timeout= @@wait_timeout;
+set global wait_timeout= 1;
+--connection child2_2
+set @old_wait_timeout= @@wait_timeout;
+set global wait_timeout= 1;
diff --git a/storage/spider/mysql-test/spider/bugfix/r/cp932_column.result b/storage/spider/mysql-test/spider/bugfix/r/cp932_column.result
new file mode 100644
index 00000000000..30b333c5008
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/r/cp932_column.result
@@ -0,0 +1,84 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+connection master_1;
+set @old_spider_direct_dup_insert= @@spider_direct_dup_insert;
+set session spider_direct_dup_insert= 1;
+
+this test is for MDEV-18992
+
+drop and create databases
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+
+create table and insert
+connection child2_1;
+CHILD2_1_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+CREATE TABLE tbl_a (
+pkey int NOT NULL,
+txt_utf8 char(8) NOT NULL,
+txt_cp932 char(8) NOT NULL COLLATE cp932_japanese_ci,
+PRIMARY KEY (pkey)
+) MASTER_1_ENGINE DEFAULT CHARACTER SET utf8 MASTER_1_COMMENT_2_1
+INSERT INTO tbl_a (pkey,txt_utf8,txt_cp932) VALUES (0,'',''),(1,'',''),(2,'',''),(3,'',''),(4,'',''),(5,'',''),(6,'',''),(7,'',''),(8,'',''),(9,'','');
+FLUSH TABLES;
+
+test 1
+connection child2_1;
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+SET NAMES cp932;
+INSERT INTO tbl_a (pkey,txt_utf8,txt_cp932) VALUES (10,'','’†‘');
+INSERT INTO tbl_a (pkey,txt_utf8,txt_cp932) VALUES (0,'','') ON DUPLICATE KEY UPDATE txt_cp932 = '’†‘';
+UPDATE tbl_a SET txt_cp932 = '’†‘' WHERE pkey = 2;
+SET NAMES utf8;
+connection child2_1;
+SET NAMES cp932;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %' OR argument LIKE '%update %';
+argument
+insert into `auto_test_remote`.`tbl_a`(`pkey`,`txt_utf8`,`txt_cp932`)values(10,'',_cp932'\\x92\\x86\\x8D\\x91')
+insert high_priority into `auto_test_remote`.`tbl_a`(`pkey`,`txt_utf8`,`txt_cp932`)values(0,'',_cp932'') on duplicate key update `txt_cp932` = _cp932'\x92\x86\x8D\x91'
+update `auto_test_remote`.`tbl_a` set `txt_cp932` = _cp932'\x92\x86\x8D\x91' where (`pkey` = 2)
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %' OR argument LIKE '%update %'
+SELECT pkey, txt_utf8, txt_cp932 FROM tbl_a ORDER BY pkey;
+pkey txt_utf8 txt_cp932
+0 ’†‘
+1
+2 ’†‘
+3
+4
+5
+6
+7
+8
+9
+10 ’†‘
+SET NAMES utf8;
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+connection master_1;
+set session spider_direct_dup_insert= @old_spider_direct_dup_insert;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/bugfix/r/group_by_order_by_limit.result b/storage/spider/mysql-test/spider/bugfix/r/group_by_order_by_limit.result
new file mode 100644
index 00000000000..8a2bcb73537
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/r/group_by_order_by_limit.result
@@ -0,0 +1,117 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+this test is for MDEV-16520
+
+drop and create databases
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+connection child2_2;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote2;
+USE auto_test_remote2;
+
+create table and insert
+connection child2_1;
+CHILD2_1_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection child2_2;
+CHILD2_2_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+CREATE TABLE tbl_a (
+pkey int NOT NULL,
+skey int NOT NULL,
+PRIMARY KEY (pkey),
+KEY idx1 (skey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
+INSERT INTO tbl_a (pkey,skey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_a (pkey,skey) VALUES (10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16),(17,17),(18,18),(19,19);
+INSERT INTO tbl_a (pkey,skey) VALUES (20,5),(21,6),(22,7),(23,8),(24,9),(25,10),(26,11),(27,12),(28,13),(29,14);
+
+select test 1
+connection child2_1;
+TRUNCATE TABLE mysql.general_log;
+connection child2_2;
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+SELECT skey, count(*) cnt FROM tbl_a GROUP BY skey ORDER BY cnt DESC, skey DESC LIMIT 5;
+skey cnt
+14 2
+13 2
+12 2
+11 2
+10 2
+connection child2_1;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
+argument
+select count(0),`skey` from `auto_test_remote`.`tbl_a` group by `skey` order by `skey`
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
+SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+pkey skey
+1 1
+3 3
+5 5
+7 7
+9 9
+11 11
+13 13
+15 15
+17 17
+19 19
+21 6
+23 8
+25 10
+27 12
+29 14
+connection child2_2;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
+argument
+select count(0),`skey` from `auto_test_remote2`.`tbl_a` group by `skey` order by `skey`
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
+SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+pkey skey
+0 0
+2 2
+4 4
+6 6
+8 8
+10 10
+12 12
+14 14
+16 16
+18 18
+20 5
+22 7
+24 9
+26 11
+28 13
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+connection child2_2;
+DROP DATABASE IF EXISTS auto_test_remote2;
+SET GLOBAL log_output = @old_log_output;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result b/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result
index 06581a604a2..e93eb78417f 100644
--- a/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result
+++ b/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result
@@ -50,7 +50,7 @@ SELECT argument FROM mysql.general_log WHERE argument LIKE '%set %';
argument
set session time_zone = '+00:00'
SET NAMES utf8
-set session transaction isolation level read committed;set session autocommit = 1;start transaction
+set session transaction isolation level read committed;set session autocommit = 1;set session wait_timeout = 604800;start transaction
SELECT argument FROM mysql.general_log WHERE argument LIKE '%set %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
diff --git a/storage/spider/mysql-test/spider/bugfix/r/wait_timeout.result b/storage/spider/mysql-test/spider/bugfix/r/wait_timeout.result
new file mode 100644
index 00000000000..ec9534f7592
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/r/wait_timeout.result
@@ -0,0 +1,130 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+connection child2_1;
+set @old_wait_timeout= @@wait_timeout;
+set global wait_timeout= 1;
+connection child2_2;
+set @old_wait_timeout= @@wait_timeout;
+set global wait_timeout= 1;
+
+this test is for MDEV-16530
+
+drop and create databases
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+connection child2_1_2;
+USE auto_test_remote;
+connection child2_2;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote2;
+USE auto_test_remote2;
+connection child2_2_2;
+USE auto_test_remote2;
+
+create table and insert
+connection child2_1;
+CHILD2_1_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection child2_2;
+CHILD2_2_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+CREATE TABLE tbl_a (
+pkey int NOT NULL,
+PRIMARY KEY (pkey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
+INSERT INTO tbl_a (pkey) VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+INSERT INTO tbl_a (pkey) VALUES (10),(11),(12),(13),(14),(15),(16),(17),(18),(19);
+INSERT INTO tbl_a (pkey) VALUES (20),(21),(22),(23),(24),(25),(26),(27),(28),(29);
+
+select test 1
+connection child2_1;
+TRUNCATE TABLE mysql.general_log;
+LOCK TABLE tbl_a READ;
+connection child2_2;
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+INSERT INTO tbl_a SELECT pkey + 30 FROM tbl_a ORDER BY pkey;
+connection child2_1_2;
+SELECT SLEEP(2);
+SLEEP(2)
+0
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %';
+argument
+insert into `auto_test_remote`.`tbl_a`(`pkey`)values(31),(33),(35),(37),(39),(41),(43),(45),(47),(49),(51),(53),(55),(57),(59)
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %'
+connection child2_2_2;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %';
+argument
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%insert %'
+connection child2_1;
+UNLOCK TABLES;
+SELECT pkey FROM tbl_a ORDER BY pkey;
+pkey
+1
+3
+5
+7
+9
+11
+13
+15
+17
+19
+21
+23
+25
+27
+29
+connection child2_2;
+SELECT pkey FROM tbl_a ORDER BY pkey;
+pkey
+0
+2
+4
+6
+8
+10
+12
+14
+16
+18
+20
+22
+24
+26
+28
+connection master_1;
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+connection child2_2;
+DROP DATABASE IF EXISTS auto_test_remote2;
+SET GLOBAL log_output = @old_log_output;
+connection child2_1;
+set global wait_timeout= @old_wait_timeout;
+connection child2_2;
+set global wait_timeout= @old_wait_timeout;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/bugfix/t/cp932_column.cnf b/storage/spider/mysql-test/spider/bugfix/t/cp932_column.cnf
new file mode 100644
index 00000000000..05dfd8a0bce
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/cp932_column.cnf
@@ -0,0 +1,3 @@
+!include include/default_mysqld.cnf
+!include ../my_1_1.cnf
+!include ../my_2_1.cnf
diff --git a/storage/spider/mysql-test/spider/bugfix/t/cp932_column.test b/storage/spider/mysql-test/spider/bugfix/t/cp932_column.test
new file mode 100644
index 00000000000..8bd0d40cb60
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/cp932_column.test
@@ -0,0 +1,80 @@
+--source ../include/cp932_column_init.inc
+--echo
+--echo this test is for MDEV-18992
+--echo
+--echo drop and create databases
+
+--connection master_1
+--disable_warnings
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+
+--connection child2_1
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+--enable_warnings
+
+--echo
+--echo create table and insert
+
+--connection child2_1
+--disable_query_log
+echo CHILD2_1_CREATE_TABLES;
+eval $CHILD2_1_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+--disable_query_log
+echo CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ txt_utf8 char(8) NOT NULL,
+ txt_cp932 char(8) NOT NULL COLLATE cp932_japanese_ci,
+ PRIMARY KEY (pkey)
+) MASTER_1_ENGINE DEFAULT CHARACTER SET utf8 MASTER_1_COMMENT_2_1;
+eval CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ txt_utf8 char(8) NOT NULL,
+ txt_cp932 char(8) NOT NULL COLLATE cp932_japanese_ci,
+ PRIMARY KEY (pkey)
+) $MASTER_1_ENGINE DEFAULT CHARACTER SET utf8 $MASTER_1_COMMENT_2_1;
+--enable_query_log
+INSERT INTO tbl_a (pkey,txt_utf8,txt_cp932) VALUES (0,'',''),(1,'',''),(2,'',''),(3,'',''),(4,'',''),(5,'',''),(6,'',''),(7,'',''),(8,'',''),(9,'','');
+FLUSH TABLES;
+
+--echo
+--echo test 1
+
+--connection child2_1
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+SET NAMES cp932;
+INSERT INTO tbl_a (pkey,txt_utf8,txt_cp932) VALUES (10,'','’†‘');
+INSERT INTO tbl_a (pkey,txt_utf8,txt_cp932) VALUES (0,'','') ON DUPLICATE KEY UPDATE txt_cp932 = '’†‘';
+UPDATE tbl_a SET txt_cp932 = '’†‘' WHERE pkey = 2;
+SET NAMES utf8;
+
+--connection child2_1
+SET NAMES cp932;
+eval $CHILD2_1_SELECT_ARGUMENT1;
+eval $CHILD2_1_SELECT_TABLES;
+SET NAMES utf8;
+
+--echo
+--echo deinit
+--disable_warnings
+
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+
+--enable_warnings
+--source ../include/cp932_column_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.cnf b/storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.cnf
new file mode 100644
index 00000000000..e0ffb99c38e
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.cnf
@@ -0,0 +1,4 @@
+!include include/default_mysqld.cnf
+!include ../my_1_1.cnf
+!include ../my_2_1.cnf
+!include ../my_2_2.cnf
diff --git a/storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.test b/storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.test
new file mode 100644
index 00000000000..f1de6d5d25f
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/group_by_order_by_limit.test
@@ -0,0 +1,97 @@
+--source ../include/group_by_order_by_limit_init.inc
+--echo
+--echo this test is for MDEV-16520
+--echo
+--echo drop and create databases
+--connection master_1
+--disable_warnings
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+
+--connection child2_1
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+
+--connection child2_2
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote2;
+USE auto_test_remote2;
+--enable_warnings
+
+--echo
+--echo create table and insert
+
+--connection child2_1
+--disable_query_log
+echo CHILD2_1_CREATE_TABLES;
+eval $CHILD2_1_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection child2_2
+--disable_query_log
+echo CHILD2_2_CREATE_TABLES;
+eval $CHILD2_2_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+--disable_query_log
+echo CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ PRIMARY KEY (pkey),
+ KEY idx1 (skey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
+eval CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ PRIMARY KEY (pkey),
+ KEY idx1 (skey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
+--enable_query_log
+INSERT INTO tbl_a (pkey,skey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_a (pkey,skey) VALUES (10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16),(17,17),(18,18),(19,19);
+INSERT INTO tbl_a (pkey,skey) VALUES (20,5),(21,6),(22,7),(23,8),(24,9),(25,10),(26,11),(27,12),(28,13),(29,14);
+
+--echo
+--echo select test 1
+
+--connection child2_1
+TRUNCATE TABLE mysql.general_log;
+
+--connection child2_2
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+SELECT skey, count(*) cnt FROM tbl_a GROUP BY skey ORDER BY cnt DESC, skey DESC LIMIT 5;
+
+--connection child2_1
+eval $CHILD2_1_SELECT_ARGUMENT1;
+eval $CHILD2_1_SELECT_TABLES;
+
+--connection child2_2
+eval $CHILD2_2_SELECT_ARGUMENT1;
+eval $CHILD2_2_SELECT_TABLES;
+
+--echo
+--echo deinit
+--disable_warnings
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+
+--connection child2_2
+DROP DATABASE IF EXISTS auto_test_remote2;
+SET GLOBAL log_output = @old_log_output;
+
+--enable_warnings
+--source ../include/group_by_order_by_limit_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/bugfix/t/wait_timeout.cnf b/storage/spider/mysql-test/spider/bugfix/t/wait_timeout.cnf
new file mode 100644
index 00000000000..e0ffb99c38e
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/wait_timeout.cnf
@@ -0,0 +1,4 @@
+!include include/default_mysqld.cnf
+!include ../my_1_1.cnf
+!include ../my_2_1.cnf
+!include ../my_2_2.cnf
diff --git a/storage/spider/mysql-test/spider/bugfix/t/wait_timeout.test b/storage/spider/mysql-test/spider/bugfix/t/wait_timeout.test
new file mode 100644
index 00000000000..8da6e8fe314
--- /dev/null
+++ b/storage/spider/mysql-test/spider/bugfix/t/wait_timeout.test
@@ -0,0 +1,109 @@
+--source ../include/wait_timeout_init.inc
+--echo
+--echo this test is for MDEV-16530
+--echo
+--echo drop and create databases
+
+--connection master_1
+--disable_warnings
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+
+--connection child2_1
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+--connection child2_1_2
+USE auto_test_remote;
+
+--connection child2_2
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote2;
+USE auto_test_remote2;
+--connection child2_2_2
+USE auto_test_remote2;
+--enable_warnings
+
+--echo
+--echo create table and insert
+
+--connection child2_1
+--disable_query_log
+echo CHILD2_1_CREATE_TABLES;
+eval $CHILD2_1_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection child2_2
+--disable_query_log
+echo CHILD2_2_CREATE_TABLES;
+eval $CHILD2_2_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+--disable_query_log
+echo CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ PRIMARY KEY (pkey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
+eval CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ PRIMARY KEY (pkey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
+--enable_query_log
+INSERT INTO tbl_a (pkey) VALUES (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
+INSERT INTO tbl_a (pkey) VALUES (10),(11),(12),(13),(14),(15),(16),(17),(18),(19);
+INSERT INTO tbl_a (pkey) VALUES (20),(21),(22),(23),(24),(25),(26),(27),(28),(29);
+
+--echo
+--echo select test 1
+
+--connection child2_1
+TRUNCATE TABLE mysql.general_log;
+LOCK TABLE tbl_a READ;
+
+--connection child2_2
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+send_eval INSERT INTO tbl_a SELECT pkey + 30 FROM tbl_a ORDER BY pkey;
+
+--connection child2_1_2
+SELECT SLEEP(2);
+eval $CHILD2_1_SELECT_ARGUMENT1;
+
+--connection child2_2_2
+eval $CHILD2_2_SELECT_ARGUMENT1;
+
+--connection child2_1
+UNLOCK TABLES;
+eval $CHILD2_1_SELECT_TABLES;
+
+--connection child2_2
+eval $CHILD2_2_SELECT_TABLES;
+
+--connection master_1
+reap;
+
+--echo
+--echo deinit
+--disable_warnings
+
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+
+--connection child2_2
+DROP DATABASE IF EXISTS auto_test_remote2;
+SET GLOBAL log_output = @old_log_output;
+
+--enable_warnings
+--source ../include/wait_timeout_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_deinit.inc b/storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_deinit.inc
new file mode 100644
index 00000000000..5ac67cdf783
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_deinit.inc
@@ -0,0 +1,10 @@
+--connection slave1_1
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../include/deinit_spider.inc
+--source ../t/slave_test_deinit.inc
+--source ../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_init.inc b/storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_init.inc
new file mode 100644
index 00000000000..052d6ebb2eb
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/include/slave_transaction_retry_errors_init.inc
@@ -0,0 +1,10 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../t/test_init.inc
+--source ../t/slave_test_init.inc
+--connection slave1_1
+--source ../include/init_spider.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/feature/my.cnf b/storage/spider/mysql-test/spider/feature/my.cnf
new file mode 100644
index 00000000000..b7f76a630cc
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my.cnf
@@ -0,0 +1,2 @@
+!include include/default_mysqld.cnf
+!include my_1_1.cnf
diff --git a/storage/spider/mysql-test/spider/feature/my_1_1.cnf b/storage/spider/mysql-test/spider/feature/my_1_1.cnf
new file mode 100644
index 00000000000..5f17295d895
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_1_1.cnf
@@ -0,0 +1,44 @@
+[mysqld.1.1]
+log-bin= master-bin
+loose_handlersocket_port= 20000
+loose_handlersocket_port_wr= 20001
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+loose_partition= 1
+
+[ENV]
+USE_GEOMETRY_TEST= 1
+USE_FULLTEXT_TEST= 1
+USE_HA_TEST= 1
+USE_GENERAL_LOG= 1
+USE_REPLICATION= 1
+MASTER_1_MYPORT= @mysqld.1.1.port
+MASTER_1_HSRPORT= 20000
+MASTER_1_HSWPORT= 20001
+MASTER_1_MYSOCK= @mysqld.1.1.socket
+MASTER_1_ENGINE_TYPE= Spider
+#MASTER_1_ENGINE_TYPE= MyISAM
+MASTER_1_ENGINE= ENGINE=Spider
+MASTER_1_CHARSET= DEFAULT CHARSET=utf8
+MASTER_1_ENGINE2= ENGINE=MyISAM
+MASTER_1_CHARSET2= DEFAULT CHARSET=utf8
+MASTER_1_CHARSET3= DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
+
+STR_SEMICOLON= ;
+
+#The followings are set in include/init_xxx.inc files
+# MASTER_1_COMMENT_2_1
+# MASTER_1_COMMENT2_2_1
+# MASTER_1_COMMENT3_2_1
+# MASTER_1_COMMENT4_2_1
+# MASTER_1_COMMENT5_2_1
+# MASTER_1_COMMENT_P_2_1
diff --git a/storage/spider/mysql-test/spider/feature/my_2_1.cnf b/storage/spider/mysql-test/spider/feature/my_2_1.cnf
new file mode 100644
index 00000000000..24161645607
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_2_1.cnf
@@ -0,0 +1,56 @@
+[mysqld.2.1]
+loose_handlersocket_port= 20002
+loose_handlersocket_port_wr= 20003
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+
+[ENV]
+USE_CHILD_GROUP2= 1
+OUTPUT_CHILD_GROUP2= 0
+CHILD2_1_MYPORT= @mysqld.2.1.port
+CHILD2_1_HSRPORT= 20002
+CHILD2_1_HSWPORT= 20003
+CHILD2_1_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_ENGINE_TYPE= InnoDB
+CHILD2_1_ENGINE= ENGINE=InnoDB
+CHILD2_1_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_1_CHARSET2= DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
+CHILD2_1_FT_MYPORT= @mysqld.2.1.port
+CHILD2_1_FT_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_FT_ENGINE_TYPE= MyISAM
+CHILD2_1_FT_ENGINE= ENGINE=MyISAM
+CHILD2_1_FT_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_1_GM_MYPORT= @mysqld.2.1.port
+CHILD2_1_GM_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_GM_ENGINE_TYPE= MyISAM
+CHILD2_1_GM_ENGINE= ENGINE=MyISAM
+CHILD2_1_GM_CHARSET= DEFAULT CHARSET=utf8
+
+#The followings are set in include/init_xxx.inc files
+# CHILD2_1_DROP_TABLES
+# CHILD2_1_CREATE_TABLES
+# CHILD2_1_SELECT_TABLES
+# CHILD2_1_DROP_TABLES2
+# CHILD2_1_CREATE_TABLES2
+# CHILD2_1_SELECT_TABLES2
+# CHILD2_1_DROP_TABLES3
+# CHILD2_1_CREATE_TABLES3
+# CHILD2_1_SELECT_TABLES3
+# CHILD2_1_DROP_TABLES4
+# CHILD2_1_CREATE_TABLES4
+# CHILD2_1_SELECT_TABLES4
+# CHILD2_1_DROP_TABLES5
+# CHILD2_1_CREATE_TABLES5
+# CHILD2_1_SELECT_TABLES5
+# CHILD2_1_DROP_TABLES6
+# CHILD2_1_CREATE_TABLES6
+# CHILD2_1_SELECT_TABLES6
diff --git a/storage/spider/mysql-test/spider/feature/my_2_2.cnf b/storage/spider/mysql-test/spider/feature/my_2_2.cnf
new file mode 100644
index 00000000000..2d3c2a89a7d
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_2_2.cnf
@@ -0,0 +1,38 @@
+[mysqld.2.2]
+loose_handlersocket_port= 20004
+loose_handlersocket_port_wr= 20005
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+
+[ENV]
+CHILD2_2_MYPORT= @mysqld.2.2.port
+CHILD2_2_HSRPORT= 20004
+CHILD2_2_HSWPORT= 20005
+CHILD2_2_MYSOCK= @mysqld.2.2.socket
+CHILD2_2_ENGINE_TYPE= InnoDB
+CHILD2_2_ENGINE= ENGINE=InnoDB
+CHILD2_2_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_2_FT_MYPORT= @mysqld.2.2.port
+CHILD2_2_FT_MYSOCK= @mysqld.2.2.socket
+CHILD2_2_FT_ENGINE_TYPE= MyISAM
+CHILD2_2_FT_ENGINE= ENGINE=MyISAM
+CHILD2_2_FT_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_2_GM_MYPORT= @mysqld.2.2.port
+CHILD2_2_GM_MYSOCK= @mysqld.2.2.socket
+CHILD2_2_GM_ENGINE_TYPE= MyISAM
+CHILD2_2_GM_ENGINE= ENGINE=MyISAM
+CHILD2_2_GM_CHARSET= DEFAULT CHARSET=utf8
+
+#The followings are set in include/init_xxx.inc files
+# CHILD2_2_DROP_TABLES
+# CHILD2_2_CREATE_TABLES
+# CHILD2_2_SELECT_TABLES
diff --git a/storage/spider/mysql-test/spider/feature/my_2_3.cnf b/storage/spider/mysql-test/spider/feature/my_2_3.cnf
new file mode 100644
index 00000000000..024da651e0c
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_2_3.cnf
@@ -0,0 +1,8 @@
+[mysqld.2.3]
+
+[ENV]
+CHILD2_3_MYPORT= @mysqld.2.3.port
+CHILD2_3_MYSOCK= @mysqld.2.3.socket
+CHILD2_3_ENGINE_TYPE= InnoDB
+CHILD2_3_ENGINE= ENGINE=InnoDB
+CHILD2_3_CHARSET= DEFAULT CHARSET=utf8
diff --git a/storage/spider/mysql-test/spider/feature/my_3_1.cnf b/storage/spider/mysql-test/spider/feature/my_3_1.cnf
new file mode 100644
index 00000000000..fad21607789
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_3_1.cnf
@@ -0,0 +1,11 @@
+[mysqld.3.1]
+loose_partition= 1
+
+[ENV]
+USE_CHILD_GROUP3= 1
+OUTPUT_CHILD_GROUP3= 0
+CHILD3_1_MYPORT= @mysqld.3.1.port
+CHILD3_1_MYSOCK= @mysqld.3.1.socket
+CHILD3_1_ENGINE_TYPE= InnoDB
+CHILD3_1_ENGINE= ENGINE=InnoDB
+CHILD3_1_CHARSET= DEFAULT CHARSET=utf8
diff --git a/storage/spider/mysql-test/spider/feature/my_3_2.cnf b/storage/spider/mysql-test/spider/feature/my_3_2.cnf
new file mode 100644
index 00000000000..6f027b6f525
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_3_2.cnf
@@ -0,0 +1,9 @@
+[mysqld.3.2]
+loose_partition= 1
+
+[ENV]
+CHILD3_2_MYPORT= @mysqld.3.2.port
+CHILD3_2_MYSOCK= @mysqld.3.2.socket
+CHILD3_2_ENGINE_TYPE= InnoDB
+CHILD3_2_ENGINE= ENGINE=InnoDB
+CHILD3_2_CHARSET= DEFAULT CHARSET=utf8
diff --git a/storage/spider/mysql-test/spider/feature/my_3_3.cnf b/storage/spider/mysql-test/spider/feature/my_3_3.cnf
new file mode 100644
index 00000000000..fbb33694738
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_3_3.cnf
@@ -0,0 +1,9 @@
+[mysqld.3.3]
+loose_partition= 1
+
+[ENV]
+CHILD3_3_MYPORT= @mysqld.3.3.port
+CHILD3_3_MYSOCK= @mysqld.3.3.socket
+CHILD3_3_ENGINE_TYPE= InnoDB
+CHILD3_3_ENGINE= ENGINE=InnoDB
+CHILD3_3_CHARSET= DEFAULT CHARSET=utf8
diff --git a/storage/spider/mysql-test/spider/feature/my_4_1.cnf b/storage/spider/mysql-test/spider/feature/my_4_1.cnf
new file mode 100644
index 00000000000..d1812a48b68
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/my_4_1.cnf
@@ -0,0 +1,9 @@
+[mysqld.4.1]
+loose_partition= 1
+
+[ENV]
+SLAVE1_1_MYPORT= @mysqld.4.1.port
+SLAVE1_1_MYSOCK= @mysqld.4.1.socket
+SLAVE1_1_ENGINE_TYPE= MyISAM
+SLAVE1_1_ENGINE= ENGINE=MyISAM
+SLAVE1_1_CHARSET= DEFAULT CHARSET=utf8
diff --git a/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result b/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result
new file mode 100644
index 00000000000..0a147c0356a
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result
@@ -0,0 +1,22 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+for slave1_1
+
+connection slave1_1;
+SHOW VARIABLES LIKE 'slave_transaction_retry_errors';
+Variable_name Value
+slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701
+connection slave1_1;
+for slave1_1
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/feature/suite.opt b/storage/spider/mysql-test/spider/feature/suite.opt
new file mode 100644
index 00000000000..672a3b37d4f
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/suite.opt
@@ -0,0 +1 @@
+--loose-innodb --loose-skip-performance-schema
diff --git a/storage/spider/mysql-test/spider/feature/suite.pm b/storage/spider/mysql-test/spider/feature/suite.pm
new file mode 100644
index 00000000000..f106147deb6
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/suite.pm
@@ -0,0 +1,12 @@
+package My::Suite::Spider;
+
+@ISA = qw(My::Suite);
+
+return "No Spider engine" unless $ENV{HA_SPIDER_SO};
+return "Not run for embedded server" if $::opt_embedded_server;
+return "Test needs --big-test" unless $::opt_big_test;
+
+sub is_default { 1 }
+
+bless { };
+
diff --git a/storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.cnf b/storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.cnf
new file mode 100644
index 00000000000..45019d6c537
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.cnf
@@ -0,0 +1,4 @@
+!include include/default_mysqld.cnf
+!include ../my_1_1.cnf
+!include ../my_2_1.cnf
+!include ../my_4_1.cnf
diff --git a/storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.test b/storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.test
new file mode 100644
index 00000000000..436bc3fb761
--- /dev/null
+++ b/storage/spider/mysql-test/spider/feature/t/slave_transaction_retry_errors.test
@@ -0,0 +1,9 @@
+--source ../include/slave_transaction_retry_errors_init.inc
+--echo
+
+--connection slave1_1
+SHOW VARIABLES LIKE 'slave_transaction_retry_errors';
+
+--source ../include/slave_transaction_retry_errors_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/include/checksum_table_with_quick_mode_3_deinit.inc b/storage/spider/mysql-test/spider/include/checksum_table_with_quick_mode_3_deinit.inc
index d551f5a4af3..7db871c700f 100644
--- a/storage/spider/mysql-test/spider/include/checksum_table_with_quick_mode_3_deinit.inc
+++ b/storage/spider/mysql-test/spider/include/checksum_table_with_quick_mode_3_deinit.inc
@@ -1,5 +1,3 @@
---let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
---let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
--let $OUTPUT_CHILD_GROUP2= $OUTPUT_CHILD_GROUP2_BACKUP
diff --git a/storage/spider/mysql-test/spider/r/direct_join.result b/storage/spider/mysql-test/spider/r/direct_join.result
index 0a76c3246f9..a1018c35fbf 100644
--- a/storage/spider/mysql-test/spider/r/direct_join.result
+++ b/storage/spider/mysql-test/spider/r/direct_join.result
@@ -167,7 +167,7 @@ connection child2_1;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
select `id`,`hr_status`,`region_code`,`region` from `auto_test_remote`.`tbl_person` where `id` = '24FC3F0A5119432BAE13DD65AABAA39C' and `region` = 510411
-select `person_id`,`diseaseKind_id` from `auto_test_remote`.`tbl_ncd_cm_person` where ((`diseaseKind_id` = '52A0328740914BCE86ED10A4D2521816'))
+select count(0) `count(0)` from `auto_test_remote`.`tbl_ncd_cm_person` t0 where ((t0.`person_id` = '24FC3F0A5119432BAE13DD65AABAA39C') and (t0.`diseaseKind_id` = '52A0328740914BCE86ED10A4D2521816'))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT * FROM tbl_person;
id hr_status region_code region
diff --git a/storage/spider/mysql-test/spider/r/partition_fulltext.result b/storage/spider/mysql-test/spider/r/partition_fulltext.result
index 3289473b905..6c001d25444 100644
--- a/storage/spider/mysql-test/spider/r/partition_fulltext.result
+++ b/storage/spider/mysql-test/spider/r/partition_fulltext.result
@@ -71,7 +71,7 @@ pkey words
connection child2_1;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
-select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against('+ghi' in boolean mode))
+select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against(_latin1'+ghi' in boolean mode))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
@@ -80,7 +80,7 @@ pkey
connection child2_2;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
-select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote2`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against('+ghi' in boolean mode))
+select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote2`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against(_latin1'+ghi' in boolean mode))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
@@ -91,7 +91,7 @@ pkey
connection child2_3;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
-select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote3`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against('+ghi' in boolean mode))
+select match(`words`)against('+ghi' in boolean mode),`pkey`,`words` from `auto_test_remote3`.`tbl_a` where match(`words`)against('+ghi' in boolean mode) and (match(`words`)against(_latin1'+ghi' in boolean mode))
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
diff --git a/storage/spider/mysql-test/spider/r/pushdown_not_like.result b/storage/spider/mysql-test/spider/r/pushdown_not_like.result
new file mode 100644
index 00000000000..0e007b094de
--- /dev/null
+++ b/storage/spider/mysql-test/spider/r/pushdown_not_like.result
@@ -0,0 +1,63 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+child3_1
+child3_2
+child3_3
+
+drop and create databases
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+
+create table select test
+connection master_1;
+DROP TABLE IF EXISTS ta_l;
+CREATE TABLE ta_l (
+a INT,
+b CHAR(1),
+c DATETIME,
+PRIMARY KEY(a)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
+INSERT INTO ta_l (a, b, c) VALUES
+(1, 'a', '2018-11-01 10:21:39'),
+(2, 'b', '2015-06-30 23:59:59'),
+(3, 'c', '2013-11-01 01:01:01');
+
+spider not like bug fix test
+connection master_1;
+select * from ta_l where b not like 'a%';
+a b c
+2 b 2015-06-30 23:59:59
+3 c 2013-11-01 01:01:01
+connection child2_1;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select%';
+argument
+select t0.`a` `a`,t0.`b` `b`,t0.`c` `c` from `auto_test_remote`.`ta_r` t0 where (t0.`b` not like 'a%')
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select%'
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+child3_1
+child3_2
+child3_3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/r/show_system_tables.result b/storage/spider/mysql-test/spider/r/show_system_tables.result
index 831d4578efe..67411862e00 100644
--- a/storage/spider/mysql-test/spider/r/show_system_tables.result
+++ b/storage/spider/mysql-test/spider/r/show_system_tables.result
@@ -13,15 +13,15 @@ connection master_1;
SELECT table_name, engine FROM information_schema.tables
WHERE table_schema = 'mysql' AND table_name like '%spider_%';
table_name engine
-spider_link_failed_log MyISAM
-spider_link_mon_servers MyISAM
-spider_table_crd MyISAM
-spider_table_position_for_recovery MyISAM
-spider_table_sts MyISAM
-spider_tables MyISAM
-spider_xa MyISAM
-spider_xa_failed_log MyISAM
-spider_xa_member MyISAM
+spider_link_failed_log Aria
+spider_link_mon_servers Aria
+spider_table_crd Aria
+spider_table_position_for_recovery Aria
+spider_table_sts Aria
+spider_tables Aria
+spider_xa Aria
+spider_xa_failed_log Aria
+spider_xa_member Aria
deinit
for master_1
diff --git a/storage/spider/mysql-test/spider/r/slave_trx_isolation.result b/storage/spider/mysql-test/spider/r/slave_trx_isolation.result
index 7d9ba40cab3..4fd2e71d3f2 100644
--- a/storage/spider/mysql-test/spider/r/slave_trx_isolation.result
+++ b/storage/spider/mysql-test/spider/r/slave_trx_isolation.result
@@ -53,7 +53,7 @@ SELECT argument FROM mysql.general_log WHERE argument LIKE '%set %';
argument
set session time_zone = '+00:00'
SET NAMES utf8
-set session transaction isolation level read committed;set session autocommit = 1;start transaction
+set session transaction isolation level read committed;set session autocommit = 1;set session wait_timeout = 604800;start transaction
SELECT argument FROM mysql.general_log WHERE argument LIKE '%set %'
SELECT pkey FROM tbl_a ORDER BY pkey;
pkey
diff --git a/storage/spider/mysql-test/spider/r/timestamp.result b/storage/spider/mysql-test/spider/r/timestamp.result
index bd1f442d462..85ca7f6e7f4 100644
--- a/storage/spider/mysql-test/spider/r/timestamp.result
+++ b/storage/spider/mysql-test/spider/r/timestamp.result
@@ -252,11 +252,11 @@ col_a col_dt col_ts unix_timestamp(col_ts)
connection child2_1;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > '2017-12-31 23:00:00')
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` < '2018-10-28 01:30:00')
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where ('2018-10-28 01:30:00' > t0.`col_ts`)
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` between '2018-10-28 00:30:00' and '2018-10-28 01:30:00')
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where ((t0.`col_ts` >= '2018-10-28 00:30:00') and (t0.`col_ts` <= '2018-10-28 01:30:00'))
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > _latin1'2017-12-31 23:00:00')
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` < _latin1'2018-10-28 01:30:00')
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (_latin1'2018-10-28 01:30:00' > t0.`col_ts`)
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` between _latin1'2018-10-28 00:30:00' and _latin1'2018-10-28 01:30:00')
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where ((t0.`col_ts` >= _latin1'2018-10-28 00:30:00') and (t0.`col_ts` <= _latin1'2018-10-28 01:30:00'))
select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > '2018-03-25 01:00:00')
select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > '1970-01-01 00:00:01')
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
@@ -339,11 +339,11 @@ col_a col_dt col_ts unix_timestamp(col_ts)
connection child2_1;
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
argument
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > '2017-12-31 23:00:00')
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` < '2018-10-28 01:30:00')
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where ('2018-10-28 01:30:00' > t0.`col_ts`)
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` between '2018-10-28 00:30:00' and '2018-10-28 01:30:00')
-select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where ((t0.`col_ts` >= '2018-10-28 00:30:00') and (t0.`col_ts` <= '2018-10-28 01:30:00'))
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > _latin1'2017-12-31 23:00:00')
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` < _latin1'2018-10-28 01:30:00')
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (_latin1'2018-10-28 01:30:00' > t0.`col_ts`)
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` between _latin1'2018-10-28 00:30:00' and _latin1'2018-10-28 01:30:00')
+select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where ((t0.`col_ts` >= _latin1'2018-10-28 00:30:00') and (t0.`col_ts` <= _latin1'2018-10-28 01:30:00'))
select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > '2018-03-25 01:00:00')
select t0.`col_a` `col_a`,t0.`col_dt` `col_dt`,t0.`col_ts` `col_ts`,(unix_timestamp(t0.`col_ts`)) `unix_timestamp(col_ts)` from `ts_test_remote`.`tbl_a` t0 where (t0.`col_ts` > '1970-01-01 00:00:01')
SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
diff --git a/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_deinit.inc b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_deinit.inc
new file mode 100644
index 00000000000..ad30aac5d6e
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_deinit.inc
@@ -0,0 +1,12 @@
+--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
+--let $MASTER_1_COMMENT_2_1_2= $MASTER_1_COMMENT_2_1_2_BACKUP
+--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
+--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
+--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_init.inc b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_init.inc
new file mode 100644
index 00000000000..695d46101ae
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_key_init.inc
@@ -0,0 +1,36 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../../t/test_init.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
+let $MASTER_1_COMMENT_2_1=
+ COMMENT='table "tbl_a", srv "s_2_1"';
+--let $MASTER_1_COMMENT_2_1_2_BACKUP= $MASTER_1_COMMENT_2_1_2
+let $MASTER_1_COMMENT_2_1_2=
+ COMMENT='table "tbl_b", srv "s_2_1"';
+--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
+let $CHILD2_1_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a $STR_SEMICOLON
+ DROP TABLE IF EXISTS tbl_b;
+--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
+let $CHILD2_1_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ akey int NOT NULL,
+ val int NOT NULL,
+ KEY idx1 (akey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET $STR_SEMICOLON
+ CREATE TABLE tbl_b (
+ bkey int NOT NULL,
+ akey int NOT NULL,
+ PRIMARY KEY (bkey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
+--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
+let $CHILD2_1_SELECT_TABLES=
+ SELECT akey, val FROM tbl_a ORDER BY akey $STR_SEMICOLON
+ SELECT bkey, akey FROM tbl_b ORDER BY bkey;
+let $CHILD2_1_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' $STR_SEMICOLON
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%';
diff --git a/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_deinit.inc b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_deinit.inc
new file mode 100644
index 00000000000..ad30aac5d6e
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_deinit.inc
@@ -0,0 +1,12 @@
+--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
+--let $MASTER_1_COMMENT_2_1_2= $MASTER_1_COMMENT_2_1_2_BACKUP
+--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
+--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
+--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_init.inc b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_init.inc
new file mode 100644
index 00000000000..e364489c05f
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/include/direct_join_by_pkey_pkey_init.inc
@@ -0,0 +1,36 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../../t/test_init.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
+let $MASTER_1_COMMENT_2_1=
+ COMMENT='table "tbl_a", srv "s_2_1"';
+--let $MASTER_1_COMMENT_2_1_2_BACKUP= $MASTER_1_COMMENT_2_1_2
+let $MASTER_1_COMMENT_2_1_2=
+ COMMENT='table "tbl_b", srv "s_2_1"';
+--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
+let $CHILD2_1_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a $STR_SEMICOLON
+ DROP TABLE IF EXISTS tbl_b;
+--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
+let $CHILD2_1_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ akey int NOT NULL,
+ val int NOT NULL,
+ PRIMARY KEY (akey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET $STR_SEMICOLON
+ CREATE TABLE tbl_b (
+ bkey int NOT NULL,
+ akey int NOT NULL,
+ PRIMARY KEY (bkey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
+--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
+let $CHILD2_1_SELECT_TABLES=
+ SELECT akey, val FROM tbl_a ORDER BY akey $STR_SEMICOLON
+ SELECT bkey, akey FROM tbl_b ORDER BY bkey;
+let $CHILD2_1_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' $STR_SEMICOLON
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%';
diff --git a/storage/spider/mysql-test/spider/regression/e1121/my.cnf b/storage/spider/mysql-test/spider/regression/e1121/my.cnf
new file mode 100644
index 00000000000..47558d85fc6
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/my.cnf
@@ -0,0 +1,3 @@
+!include include/default_mysqld.cnf
+!include my_1_1.cnf
+!include my_2_1.cnf
diff --git a/storage/spider/mysql-test/spider/regression/e1121/my_1_1.cnf b/storage/spider/mysql-test/spider/regression/e1121/my_1_1.cnf
new file mode 100644
index 00000000000..5f17295d895
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/my_1_1.cnf
@@ -0,0 +1,44 @@
+[mysqld.1.1]
+log-bin= master-bin
+loose_handlersocket_port= 20000
+loose_handlersocket_port_wr= 20001
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+loose_partition= 1
+
+[ENV]
+USE_GEOMETRY_TEST= 1
+USE_FULLTEXT_TEST= 1
+USE_HA_TEST= 1
+USE_GENERAL_LOG= 1
+USE_REPLICATION= 1
+MASTER_1_MYPORT= @mysqld.1.1.port
+MASTER_1_HSRPORT= 20000
+MASTER_1_HSWPORT= 20001
+MASTER_1_MYSOCK= @mysqld.1.1.socket
+MASTER_1_ENGINE_TYPE= Spider
+#MASTER_1_ENGINE_TYPE= MyISAM
+MASTER_1_ENGINE= ENGINE=Spider
+MASTER_1_CHARSET= DEFAULT CHARSET=utf8
+MASTER_1_ENGINE2= ENGINE=MyISAM
+MASTER_1_CHARSET2= DEFAULT CHARSET=utf8
+MASTER_1_CHARSET3= DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
+
+STR_SEMICOLON= ;
+
+#The followings are set in include/init_xxx.inc files
+# MASTER_1_COMMENT_2_1
+# MASTER_1_COMMENT2_2_1
+# MASTER_1_COMMENT3_2_1
+# MASTER_1_COMMENT4_2_1
+# MASTER_1_COMMENT5_2_1
+# MASTER_1_COMMENT_P_2_1
diff --git a/storage/spider/mysql-test/spider/regression/e1121/my_2_1.cnf b/storage/spider/mysql-test/spider/regression/e1121/my_2_1.cnf
new file mode 100644
index 00000000000..24161645607
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/my_2_1.cnf
@@ -0,0 +1,56 @@
+[mysqld.2.1]
+loose_handlersocket_port= 20002
+loose_handlersocket_port_wr= 20003
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+
+[ENV]
+USE_CHILD_GROUP2= 1
+OUTPUT_CHILD_GROUP2= 0
+CHILD2_1_MYPORT= @mysqld.2.1.port
+CHILD2_1_HSRPORT= 20002
+CHILD2_1_HSWPORT= 20003
+CHILD2_1_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_ENGINE_TYPE= InnoDB
+CHILD2_1_ENGINE= ENGINE=InnoDB
+CHILD2_1_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_1_CHARSET2= DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
+CHILD2_1_FT_MYPORT= @mysqld.2.1.port
+CHILD2_1_FT_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_FT_ENGINE_TYPE= MyISAM
+CHILD2_1_FT_ENGINE= ENGINE=MyISAM
+CHILD2_1_FT_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_1_GM_MYPORT= @mysqld.2.1.port
+CHILD2_1_GM_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_GM_ENGINE_TYPE= MyISAM
+CHILD2_1_GM_ENGINE= ENGINE=MyISAM
+CHILD2_1_GM_CHARSET= DEFAULT CHARSET=utf8
+
+#The followings are set in include/init_xxx.inc files
+# CHILD2_1_DROP_TABLES
+# CHILD2_1_CREATE_TABLES
+# CHILD2_1_SELECT_TABLES
+# CHILD2_1_DROP_TABLES2
+# CHILD2_1_CREATE_TABLES2
+# CHILD2_1_SELECT_TABLES2
+# CHILD2_1_DROP_TABLES3
+# CHILD2_1_CREATE_TABLES3
+# CHILD2_1_SELECT_TABLES3
+# CHILD2_1_DROP_TABLES4
+# CHILD2_1_CREATE_TABLES4
+# CHILD2_1_SELECT_TABLES4
+# CHILD2_1_DROP_TABLES5
+# CHILD2_1_CREATE_TABLES5
+# CHILD2_1_SELECT_TABLES5
+# CHILD2_1_DROP_TABLES6
+# CHILD2_1_CREATE_TABLES6
+# CHILD2_1_SELECT_TABLES6
diff --git a/storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_key.result b/storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_key.result
new file mode 100644
index 00000000000..4b04b71aee2
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_key.result
@@ -0,0 +1,94 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+this test is for MDEV-18995
+
+drop and create databases
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+
+create table and insert
+connection child2_1;
+CHILD2_1_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+CREATE TABLE tbl_a (
+akey int NOT NULL,
+val int NOT NULL,
+KEY idx1 (akey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
+CREATE TABLE tbl_b (
+bkey int NOT NULL,
+akey int NOT NULL,
+PRIMARY KEY (bkey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1_2
+INSERT INTO tbl_a (akey,val) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_b (bkey,akey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,4),(6,3),(7,2),(8,1),(9,0);
+
+select test 1
+connection child2_1;
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+SELECT a.val, a.akey FROM tbl_a a, tbl_b b WHERE a.akey = b.akey AND b.bkey = 5;
+val akey
+4 4
+connection child2_1;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' ;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%';
+argument
+select t0.`val` `val`,t0.`akey` `akey` from `auto_test_remote`.`tbl_a` t0 where (t0.`akey` = '4')
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' ;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%'
+argument
+select `bkey`,`akey` from `auto_test_remote`.`tbl_b` where `bkey` = 5
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' ;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%'
+SELECT akey, val FROM tbl_a ORDER BY akey ;
+SELECT bkey, akey FROM tbl_b ORDER BY bkey;
+akey val
+0 0
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+bkey akey
+0 0
+1 1
+2 2
+3 3
+4 4
+5 4
+6 3
+7 2
+8 1
+9 0
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_pkey.result b/storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_pkey.result
new file mode 100644
index 00000000000..9a75cc691fe
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/r/direct_join_by_pkey_pkey.result
@@ -0,0 +1,94 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+this test is for MDEV-18995
+
+drop and create databases
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+
+create table and insert
+connection child2_1;
+CHILD2_1_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+CREATE TABLE tbl_a (
+akey int NOT NULL,
+val int NOT NULL,
+PRIMARY KEY (akey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
+CREATE TABLE tbl_b (
+bkey int NOT NULL,
+akey int NOT NULL,
+PRIMARY KEY (bkey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1_2
+INSERT INTO tbl_a (akey,val) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_b (bkey,akey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,4),(6,3),(7,2),(8,1),(9,0);
+
+select test 1
+connection child2_1;
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+SELECT a.val, a.akey FROM tbl_a a, tbl_b b WHERE a.akey = b.akey AND b.bkey = 5;
+val akey
+4 4
+connection child2_1;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' ;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%';
+argument
+select `akey`,`val` from `auto_test_remote`.`tbl_a` where `akey` = 4
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' ;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%'
+argument
+select `bkey`,`akey` from `auto_test_remote`.`tbl_b` where `bkey` = 5
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_a`%' ;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%`tbl_b`%'
+SELECT akey, val FROM tbl_a ORDER BY akey ;
+SELECT bkey, akey FROM tbl_b ORDER BY bkey;
+akey val
+0 0
+1 1
+2 2
+3 3
+4 4
+5 5
+6 6
+7 7
+8 8
+9 9
+bkey akey
+0 0
+1 1
+2 2
+3 3
+4 4
+5 4
+6 3
+7 2
+8 1
+9 0
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/regression/e1121/suite.opt b/storage/spider/mysql-test/spider/regression/e1121/suite.opt
new file mode 100644
index 00000000000..672a3b37d4f
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/suite.opt
@@ -0,0 +1 @@
+--loose-innodb --loose-skip-performance-schema
diff --git a/storage/spider/mysql-test/spider/regression/e1121/suite.pm b/storage/spider/mysql-test/spider/regression/e1121/suite.pm
new file mode 100644
index 00000000000..f106147deb6
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/suite.pm
@@ -0,0 +1,12 @@
+package My::Suite::Spider;
+
+@ISA = qw(My::Suite);
+
+return "No Spider engine" unless $ENV{HA_SPIDER_SO};
+return "Not run for embedded server" if $::opt_embedded_server;
+return "Test needs --big-test" unless $::opt_big_test;
+
+sub is_default { 1 }
+
+bless { };
+
diff --git a/storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_key.test b/storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_key.test
new file mode 100644
index 00000000000..e915a21fd4a
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_key.test
@@ -0,0 +1,82 @@
+--source ../include/direct_join_by_pkey_key_init.inc
+--echo
+--echo this test is for MDEV-18995
+--echo
+--echo drop and create databases
+
+--connection master_1
+--disable_warnings
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+
+--connection child2_1
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+--enable_warnings
+
+--echo
+--echo create table and insert
+
+--connection child2_1
+--disable_query_log
+echo CHILD2_1_CREATE_TABLES;
+eval $CHILD2_1_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+--disable_query_log
+echo CREATE TABLE tbl_a (
+ akey int NOT NULL,
+ val int NOT NULL,
+ KEY idx1 (akey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
+eval CREATE TABLE tbl_a (
+ akey int NOT NULL,
+ val int NOT NULL,
+ KEY idx1 (akey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
+echo CREATE TABLE tbl_b (
+ bkey int NOT NULL,
+ akey int NOT NULL,
+ PRIMARY KEY (bkey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1_2;
+eval CREATE TABLE tbl_b (
+ bkey int NOT NULL,
+ akey int NOT NULL,
+ PRIMARY KEY (bkey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1_2;
+--enable_query_log
+INSERT INTO tbl_a (akey,val) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_b (bkey,akey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,4),(6,3),(7,2),(8,1),(9,0);
+
+--echo
+--echo select test 1
+
+--connection child2_1
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+SELECT a.val, a.akey FROM tbl_a a, tbl_b b WHERE a.akey = b.akey AND b.bkey = 5;
+
+--connection child2_1
+eval $CHILD2_1_SELECT_ARGUMENT1;
+eval $CHILD2_1_SELECT_TABLES;
+
+--echo
+--echo deinit
+--disable_warnings
+
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+
+--enable_warnings
+--source ../include/direct_join_by_pkey_key_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_pkey.test b/storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_pkey.test
new file mode 100644
index 00000000000..dcd6e3a4535
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e1121/t/direct_join_by_pkey_pkey.test
@@ -0,0 +1,82 @@
+--source ../include/direct_join_by_pkey_pkey_init.inc
+--echo
+--echo this test is for MDEV-18995
+--echo
+--echo drop and create databases
+
+--connection master_1
+--disable_warnings
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+
+--connection child2_1
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+--enable_warnings
+
+--echo
+--echo create table and insert
+
+--connection child2_1
+--disable_query_log
+echo CHILD2_1_CREATE_TABLES;
+eval $CHILD2_1_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+--disable_query_log
+echo CREATE TABLE tbl_a (
+ akey int NOT NULL,
+ val int NOT NULL,
+ PRIMARY KEY (akey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
+eval CREATE TABLE tbl_a (
+ akey int NOT NULL,
+ val int NOT NULL,
+ PRIMARY KEY (akey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
+echo CREATE TABLE tbl_b (
+ bkey int NOT NULL,
+ akey int NOT NULL,
+ PRIMARY KEY (bkey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1_2;
+eval CREATE TABLE tbl_b (
+ bkey int NOT NULL,
+ akey int NOT NULL,
+ PRIMARY KEY (bkey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1_2;
+--enable_query_log
+INSERT INTO tbl_a (akey,val) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_b (bkey,akey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,4),(6,3),(7,2),(8,1),(9,0);
+
+--echo
+--echo select test 1
+
+--connection child2_1
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+SELECT a.val, a.akey FROM tbl_a a, tbl_b b WHERE a.akey = b.akey AND b.bkey = 5;
+
+--connection child2_1
+eval $CHILD2_1_SELECT_ARGUMENT1;
+eval $CHILD2_1_SELECT_TABLES;
+
+--echo
+--echo deinit
+--disable_warnings
+
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+
+--enable_warnings
+--source ../include/direct_join_by_pkey_pkey_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_deinit.inc b/storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_deinit.inc
new file mode 100644
index 00000000000..2248eef3650
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_deinit.inc
@@ -0,0 +1,14 @@
+--let $MASTER_1_COMMENT_2_1= $MASTER_1_COMMENT_2_1_BACKUP
+--let $CHILD2_1_DROP_TABLES= $CHILD2_1_DROP_TABLES_BACKUP
+--let $CHILD2_1_CREATE_TABLES= $CHILD2_1_CREATE_TABLES_BACKUP
+--let $CHILD2_1_SELECT_TABLES= $CHILD2_1_SELECT_TABLES_BACKUP
+--let $CHILD2_2_DROP_TABLES= $CHILD2_2_DROP_TABLES_BACKUP
+--let $CHILD2_2_CREATE_TABLES= $CHILD2_2_CREATE_TABLES_BACKUP
+--let $CHILD2_2_SELECT_TABLES= $CHILD2_2_SELECT_TABLES_BACKUP
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../../t/test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
diff --git a/storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_init.inc b/storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_init.inc
new file mode 100644
index 00000000000..a6945218fab
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/include/group_by_order_by_limit_ok_init.inc
@@ -0,0 +1,54 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source ../../t/test_init.inc
+if (!$HAVE_PARTITION)
+{
+ --source group_by_order_by_limit_ok_deinit.inc
+ --enable_result_log
+ --enable_query_log
+ --enable_warnings
+ skip Test requires partitioning;
+}
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--let $MASTER_1_COMMENT_2_1_BACKUP= $MASTER_1_COMMENT_2_1
+let $MASTER_1_COMMENT_2_1=
+ COMMENT='table "tbl_a"'
+ PARTITION BY KEY(skey) (
+ PARTITION pt1 COMMENT='srv "s_2_1"',
+ PARTITION pt2 COMMENT='srv "s_2_2"'
+ );
+--let $CHILD2_1_DROP_TABLES_BACKUP= $CHILD2_1_DROP_TABLES
+let $CHILD2_1_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_1_CREATE_TABLES_BACKUP= $CHILD2_1_CREATE_TABLES
+let $CHILD2_1_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ KEY idx1 (pkey),
+ KEY idx2 (skey)
+ ) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
+--let $CHILD2_1_SELECT_TABLES_BACKUP= $CHILD2_1_SELECT_TABLES
+let $CHILD2_1_SELECT_TABLES=
+ SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+let $CHILD2_1_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
+--let $CHILD2_2_DROP_TABLES_BACKUP= $CHILD2_2_DROP_TABLES
+let $CHILD2_2_DROP_TABLES=
+ DROP TABLE IF EXISTS tbl_a;
+--let $CHILD2_2_CREATE_TABLES_BACKUP= $CHILD2_2_CREATE_TABLES
+let $CHILD2_2_CREATE_TABLES=
+ CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ KEY idx1 (pkey),
+ KEY idx2 (skey)
+ ) $CHILD2_2_ENGINE $CHILD2_2_CHARSET;
+--let $CHILD2_2_SELECT_TABLES_BACKUP= $CHILD2_2_SELECT_TABLES
+let $CHILD2_2_SELECT_TABLES=
+ SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+let $CHILD2_2_SELECT_ARGUMENT1=
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
diff --git a/storage/spider/mysql-test/spider/regression/e112122/my.cnf b/storage/spider/mysql-test/spider/regression/e112122/my.cnf
new file mode 100644
index 00000000000..6610e4cfd98
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/my.cnf
@@ -0,0 +1,4 @@
+!include include/default_mysqld.cnf
+!include my_1_1.cnf
+!include my_2_1.cnf
+!include my_2_2.cnf
diff --git a/storage/spider/mysql-test/spider/regression/e112122/my_1_1.cnf b/storage/spider/mysql-test/spider/regression/e112122/my_1_1.cnf
new file mode 100644
index 00000000000..5f17295d895
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/my_1_1.cnf
@@ -0,0 +1,44 @@
+[mysqld.1.1]
+log-bin= master-bin
+loose_handlersocket_port= 20000
+loose_handlersocket_port_wr= 20001
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+loose_partition= 1
+
+[ENV]
+USE_GEOMETRY_TEST= 1
+USE_FULLTEXT_TEST= 1
+USE_HA_TEST= 1
+USE_GENERAL_LOG= 1
+USE_REPLICATION= 1
+MASTER_1_MYPORT= @mysqld.1.1.port
+MASTER_1_HSRPORT= 20000
+MASTER_1_HSWPORT= 20001
+MASTER_1_MYSOCK= @mysqld.1.1.socket
+MASTER_1_ENGINE_TYPE= Spider
+#MASTER_1_ENGINE_TYPE= MyISAM
+MASTER_1_ENGINE= ENGINE=Spider
+MASTER_1_CHARSET= DEFAULT CHARSET=utf8
+MASTER_1_ENGINE2= ENGINE=MyISAM
+MASTER_1_CHARSET2= DEFAULT CHARSET=utf8
+MASTER_1_CHARSET3= DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
+
+STR_SEMICOLON= ;
+
+#The followings are set in include/init_xxx.inc files
+# MASTER_1_COMMENT_2_1
+# MASTER_1_COMMENT2_2_1
+# MASTER_1_COMMENT3_2_1
+# MASTER_1_COMMENT4_2_1
+# MASTER_1_COMMENT5_2_1
+# MASTER_1_COMMENT_P_2_1
diff --git a/storage/spider/mysql-test/spider/regression/e112122/my_2_1.cnf b/storage/spider/mysql-test/spider/regression/e112122/my_2_1.cnf
new file mode 100644
index 00000000000..24161645607
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/my_2_1.cnf
@@ -0,0 +1,56 @@
+[mysqld.2.1]
+loose_handlersocket_port= 20002
+loose_handlersocket_port_wr= 20003
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+
+[ENV]
+USE_CHILD_GROUP2= 1
+OUTPUT_CHILD_GROUP2= 0
+CHILD2_1_MYPORT= @mysqld.2.1.port
+CHILD2_1_HSRPORT= 20002
+CHILD2_1_HSWPORT= 20003
+CHILD2_1_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_ENGINE_TYPE= InnoDB
+CHILD2_1_ENGINE= ENGINE=InnoDB
+CHILD2_1_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_1_CHARSET2= DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci
+CHILD2_1_FT_MYPORT= @mysqld.2.1.port
+CHILD2_1_FT_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_FT_ENGINE_TYPE= MyISAM
+CHILD2_1_FT_ENGINE= ENGINE=MyISAM
+CHILD2_1_FT_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_1_GM_MYPORT= @mysqld.2.1.port
+CHILD2_1_GM_MYSOCK= @mysqld.2.1.socket
+CHILD2_1_GM_ENGINE_TYPE= MyISAM
+CHILD2_1_GM_ENGINE= ENGINE=MyISAM
+CHILD2_1_GM_CHARSET= DEFAULT CHARSET=utf8
+
+#The followings are set in include/init_xxx.inc files
+# CHILD2_1_DROP_TABLES
+# CHILD2_1_CREATE_TABLES
+# CHILD2_1_SELECT_TABLES
+# CHILD2_1_DROP_TABLES2
+# CHILD2_1_CREATE_TABLES2
+# CHILD2_1_SELECT_TABLES2
+# CHILD2_1_DROP_TABLES3
+# CHILD2_1_CREATE_TABLES3
+# CHILD2_1_SELECT_TABLES3
+# CHILD2_1_DROP_TABLES4
+# CHILD2_1_CREATE_TABLES4
+# CHILD2_1_SELECT_TABLES4
+# CHILD2_1_DROP_TABLES5
+# CHILD2_1_CREATE_TABLES5
+# CHILD2_1_SELECT_TABLES5
+# CHILD2_1_DROP_TABLES6
+# CHILD2_1_CREATE_TABLES6
+# CHILD2_1_SELECT_TABLES6
diff --git a/storage/spider/mysql-test/spider/regression/e112122/my_2_2.cnf b/storage/spider/mysql-test/spider/regression/e112122/my_2_2.cnf
new file mode 100644
index 00000000000..2d3c2a89a7d
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/my_2_2.cnf
@@ -0,0 +1,38 @@
+[mysqld.2.2]
+loose_handlersocket_port= 20004
+loose_handlersocket_port_wr= 20005
+loose_handlersocket_threads= 2
+loose_handlersocket_threads_wr= 1
+loose_handlersocket_support_merge_table= 0
+loose_handlersocket_direct_update_mode= 2
+loose_handlersocket_unlimited_boundary= 65536
+loose_handlersocket_bulk_insert= 0
+loose_handlersocket_bulk_insert_timeout= 0
+loose_handlersocket_general_log= 1
+loose_handlersocket_timeout= 30
+loose_handlersocket_close_table_interval=2
+open_files_limit= 4096
+
+[ENV]
+CHILD2_2_MYPORT= @mysqld.2.2.port
+CHILD2_2_HSRPORT= 20004
+CHILD2_2_HSWPORT= 20005
+CHILD2_2_MYSOCK= @mysqld.2.2.socket
+CHILD2_2_ENGINE_TYPE= InnoDB
+CHILD2_2_ENGINE= ENGINE=InnoDB
+CHILD2_2_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_2_FT_MYPORT= @mysqld.2.2.port
+CHILD2_2_FT_MYSOCK= @mysqld.2.2.socket
+CHILD2_2_FT_ENGINE_TYPE= MyISAM
+CHILD2_2_FT_ENGINE= ENGINE=MyISAM
+CHILD2_2_FT_CHARSET= DEFAULT CHARSET=utf8
+CHILD2_2_GM_MYPORT= @mysqld.2.2.port
+CHILD2_2_GM_MYSOCK= @mysqld.2.2.socket
+CHILD2_2_GM_ENGINE_TYPE= MyISAM
+CHILD2_2_GM_ENGINE= ENGINE=MyISAM
+CHILD2_2_GM_CHARSET= DEFAULT CHARSET=utf8
+
+#The followings are set in include/init_xxx.inc files
+# CHILD2_2_DROP_TABLES
+# CHILD2_2_CREATE_TABLES
+# CHILD2_2_SELECT_TABLES
diff --git a/storage/spider/mysql-test/spider/regression/e112122/r/group_by_order_by_limit_ok.result b/storage/spider/mysql-test/spider/regression/e112122/r/group_by_order_by_limit_ok.result
new file mode 100644
index 00000000000..96746e09b8d
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/r/group_by_order_by_limit_ok.result
@@ -0,0 +1,117 @@
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+this test is for MDEV-18988
+
+drop and create databases
+connection master_1;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+connection child2_1;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+connection child2_2;
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote2;
+USE auto_test_remote2;
+
+create table and insert
+connection child2_1;
+CHILD2_1_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection child2_2;
+CHILD2_2_CREATE_TABLES
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+CREATE TABLE tbl_a (
+pkey int NOT NULL,
+skey int NOT NULL,
+KEY idx1 (pkey),
+KEY idx2 (skey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1
+INSERT INTO tbl_a (pkey,skey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_a (pkey,skey) VALUES (10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16),(17,17),(18,18),(19,19);
+INSERT INTO tbl_a (pkey,skey) VALUES (20,5),(21,6),(22,7),(23,8),(24,9),(25,10),(26,11),(27,12),(28,13),(29,14);
+
+select test 1
+connection child2_1;
+TRUNCATE TABLE mysql.general_log;
+connection child2_2;
+TRUNCATE TABLE mysql.general_log;
+connection master_1;
+SELECT skey, count(*) cnt FROM tbl_a GROUP BY skey ORDER BY cnt DESC, skey DESC LIMIT 5;
+skey cnt
+14 2
+13 2
+12 2
+11 2
+10 2
+connection child2_1;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
+argument
+select count(0),`skey` from `auto_test_remote`.`tbl_a` group by `skey` order by count(0) desc,`skey` desc limit 5
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
+SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+pkey skey
+1 1
+3 3
+5 5
+7 7
+9 9
+11 11
+13 13
+15 15
+17 17
+19 19
+20 5
+22 7
+24 9
+26 11
+28 13
+connection child2_2;
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %';
+argument
+select count(0),`skey` from `auto_test_remote2`.`tbl_a` group by `skey` order by count(0) desc,`skey` desc limit 5
+SELECT argument FROM mysql.general_log WHERE argument LIKE '%select %'
+SELECT pkey, skey FROM tbl_a ORDER BY pkey;
+pkey skey
+0 0
+2 2
+4 4
+6 6
+8 8
+10 10
+12 12
+14 14
+16 16
+18 18
+21 6
+23 8
+25 10
+27 12
+29 14
+
+deinit
+connection master_1;
+DROP DATABASE IF EXISTS auto_test_local;
+connection child2_1;
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+connection child2_2;
+DROP DATABASE IF EXISTS auto_test_remote2;
+SET GLOBAL log_output = @old_log_output;
+for master_1
+for child2
+child2_1
+child2_2
+child2_3
+for child3
+
+end of test
diff --git a/storage/spider/mysql-test/spider/regression/e112122/suite.opt b/storage/spider/mysql-test/spider/regression/e112122/suite.opt
new file mode 100644
index 00000000000..672a3b37d4f
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/suite.opt
@@ -0,0 +1 @@
+--loose-innodb --loose-skip-performance-schema
diff --git a/storage/spider/mysql-test/spider/regression/e112122/suite.pm b/storage/spider/mysql-test/spider/regression/e112122/suite.pm
new file mode 100644
index 00000000000..f106147deb6
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/suite.pm
@@ -0,0 +1,12 @@
+package My::Suite::Spider;
+
+@ISA = qw(My::Suite);
+
+return "No Spider engine" unless $ENV{HA_SPIDER_SO};
+return "Not run for embedded server" if $::opt_embedded_server;
+return "Test needs --big-test" unless $::opt_big_test;
+
+sub is_default { 1 }
+
+bless { };
+
diff --git a/storage/spider/mysql-test/spider/regression/e112122/t/group_by_order_by_limit_ok.test b/storage/spider/mysql-test/spider/regression/e112122/t/group_by_order_by_limit_ok.test
new file mode 100644
index 00000000000..2a70098ed1b
--- /dev/null
+++ b/storage/spider/mysql-test/spider/regression/e112122/t/group_by_order_by_limit_ok.test
@@ -0,0 +1,97 @@
+--source ../include/group_by_order_by_limit_ok_init.inc
+--echo
+--echo this test is for MDEV-18988
+--echo
+--echo drop and create databases
+--connection master_1
+--disable_warnings
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+
+--connection child2_1
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote;
+USE auto_test_remote;
+
+--connection child2_2
+SET @old_log_output = @@global.log_output;
+SET GLOBAL log_output = 'TABLE,FILE';
+CREATE DATABASE auto_test_remote2;
+USE auto_test_remote2;
+--enable_warnings
+
+--echo
+--echo create table and insert
+
+--connection child2_1
+--disable_query_log
+echo CHILD2_1_CREATE_TABLES;
+eval $CHILD2_1_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection child2_2
+--disable_query_log
+echo CHILD2_2_CREATE_TABLES;
+eval $CHILD2_2_CREATE_TABLES;
+--enable_query_log
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+--disable_query_log
+echo CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ KEY idx1 (pkey),
+ KEY idx2 (skey)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
+eval CREATE TABLE tbl_a (
+ pkey int NOT NULL,
+ skey int NOT NULL,
+ KEY idx1 (pkey),
+ KEY idx2 (skey)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
+--enable_query_log
+INSERT INTO tbl_a (pkey,skey) VALUES (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9);
+INSERT INTO tbl_a (pkey,skey) VALUES (10,10),(11,11),(12,12),(13,13),(14,14),(15,15),(16,16),(17,17),(18,18),(19,19);
+INSERT INTO tbl_a (pkey,skey) VALUES (20,5),(21,6),(22,7),(23,8),(24,9),(25,10),(26,11),(27,12),(28,13),(29,14);
+
+--echo
+--echo select test 1
+
+--connection child2_1
+TRUNCATE TABLE mysql.general_log;
+
+--connection child2_2
+TRUNCATE TABLE mysql.general_log;
+
+--connection master_1
+SELECT skey, count(*) cnt FROM tbl_a GROUP BY skey ORDER BY cnt DESC, skey DESC LIMIT 5;
+
+--connection child2_1
+eval $CHILD2_1_SELECT_ARGUMENT1;
+eval $CHILD2_1_SELECT_TABLES;
+
+--connection child2_2
+eval $CHILD2_2_SELECT_ARGUMENT1;
+eval $CHILD2_2_SELECT_TABLES;
+
+--echo
+--echo deinit
+--disable_warnings
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+
+--connection child2_1
+DROP DATABASE IF EXISTS auto_test_remote;
+SET GLOBAL log_output = @old_log_output;
+
+--connection child2_2
+DROP DATABASE IF EXISTS auto_test_remote2;
+SET GLOBAL log_output = @old_log_output;
+
+--enable_warnings
+--source ../include/group_by_order_by_limit_ok_deinit.inc
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/t/connect_child2_1.inc b/storage/spider/mysql-test/spider/t/connect_child2_1.inc
index cd9b0c9ca9b..15e26031527 100644
--- a/storage/spider/mysql-test/spider/t/connect_child2_1.inc
+++ b/storage/spider/mysql-test/spider/t/connect_child2_1.inc
@@ -1 +1,2 @@
--connect (child2_1, localhost, root, , , $CHILD2_1_MYPORT, $CHILD2_1_MYSOCK)
+--connect (child2_1_2, localhost, root, , , $CHILD2_1_MYPORT, $CHILD2_1_MYSOCK)
diff --git a/storage/spider/mysql-test/spider/t/connect_child2_2.inc b/storage/spider/mysql-test/spider/t/connect_child2_2.inc
index e145f66b630..75a50821ea7 100644
--- a/storage/spider/mysql-test/spider/t/connect_child2_2.inc
+++ b/storage/spider/mysql-test/spider/t/connect_child2_2.inc
@@ -1 +1,2 @@
--connect (child2_2, localhost, root, , , $CHILD2_2_MYPORT, $CHILD2_2_MYSOCK)
+--connect (child2_2_2, localhost, root, , , $CHILD2_2_MYPORT, $CHILD2_2_MYSOCK)
diff --git a/storage/spider/mysql-test/spider/t/pushdown_not_like.test b/storage/spider/mysql-test/spider/t/pushdown_not_like.test
new file mode 100644
index 00000000000..95e4fa6eea8
--- /dev/null
+++ b/storage/spider/mysql-test/spider/t/pushdown_not_like.test
@@ -0,0 +1,138 @@
+--disable_warnings
+--disable_query_log
+--disable_result_log
+--source test_init.inc
+--enable_result_log
+--enable_query_log
+
+
+--echo
+--echo drop and create databases
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+CREATE DATABASE auto_test_local;
+USE auto_test_local;
+if ($USE_CHILD_GROUP2)
+{
+ --connection child2_1
+ DROP DATABASE IF EXISTS auto_test_remote;
+ CREATE DATABASE auto_test_remote;
+ USE auto_test_remote;
+}
+--enable_warnings
+
+
+--echo
+--echo create table select test
+if ($USE_CHILD_GROUP2)
+{
+ if (!$OUTPUT_CHILD_GROUP2)
+ {
+ --disable_query_log
+ --disable_result_log
+ }
+ --connection child2_1
+ if ($OUTPUT_CHILD_GROUP2)
+ {
+ --disable_query_log
+ echo CHILD2_1_DROP_TABLES;
+ echo CHILD2_1_CREATE_TABLES;
+ }
+ --disable_warnings
+ eval $CHILD2_1_DROP_TABLES;
+ --enable_warnings
+ eval $CHILD2_1_CREATE_TABLES;
+ if ($OUTPUT_CHILD_GROUP2)
+ {
+ --enable_query_log
+ }
+ if ($USE_GENERAL_LOG)
+ {
+ SET @old_log_output = @@global.log_output;
+ TRUNCATE TABLE mysql.general_log;
+ set global log_output = 'TABLE';
+ }
+ if (!$OUTPUT_CHILD_GROUP2)
+ {
+ --enable_query_log
+ --enable_result_log
+ }
+}
+
+--connection master_1
+--disable_warnings
+DROP TABLE IF EXISTS ta_l;
+--enable_warnings
+--disable_query_log
+echo CREATE TABLE ta_l (
+ a INT,
+ b CHAR(1),
+ c DATETIME,
+ PRIMARY KEY(a)
+) MASTER_1_ENGINE MASTER_1_CHARSET MASTER_1_COMMENT_2_1;
+eval CREATE TABLE ta_l (
+ a INT,
+ b CHAR(1),
+ c DATETIME,
+ PRIMARY KEY(a)
+) $MASTER_1_ENGINE $MASTER_1_CHARSET $MASTER_1_COMMENT_2_1;
+--enable_query_log
+INSERT INTO ta_l (a, b, c) VALUES
+ (1, 'a', '2018-11-01 10:21:39'),
+ (2, 'b', '2015-06-30 23:59:59'),
+ (3, 'c', '2013-11-01 01:01:01');
+
+--echo
+--echo spider not like bug fix test
+if ($USE_CHILD_GROUP2)
+{
+ if (!$OUTPUT_CHILD_GROUP2)
+ {
+ --disable_query_log
+ --disable_result_log
+ }
+ --connection child2_1
+ if ($USE_GENERAL_LOG)
+ {
+ TRUNCATE TABLE mysql.general_log;
+ }
+ if (!$OUTPUT_CHILD_GROUP2)
+ {
+ --enable_query_log
+ --enable_result_log
+ }
+}
+
+--connection master_1
+select * from ta_l where b not like 'a%';
+if ($USE_CHILD_GROUP2)
+{
+ --connection child2_1
+ if ($USE_GENERAL_LOG)
+ {
+ SELECT argument FROM mysql.general_log WHERE argument LIKE '%select%';
+ }
+}
+
+
+--echo
+--echo deinit
+--disable_warnings
+--connection master_1
+DROP DATABASE IF EXISTS auto_test_local;
+if ($USE_CHILD_GROUP2)
+{
+ --connection child2_1
+ DROP DATABASE IF EXISTS auto_test_remote;
+ SET GLOBAL log_output = @old_log_output;
+}
+
+
+--disable_query_log
+--disable_result_log
+--source test_deinit.inc
+--enable_result_log
+--enable_query_log
+--enable_warnings
+--echo
+--echo end of test
diff --git a/storage/spider/mysql-test/spider/t/test_deinit.inc b/storage/spider/mysql-test/spider/t/test_deinit.inc
index 989bde26d3c..5a82b43ce0f 100644
--- a/storage/spider/mysql-test/spider/t/test_deinit.inc
+++ b/storage/spider/mysql-test/spider/t/test_deinit.inc
@@ -10,10 +10,12 @@ if ($USE_CHILD_GROUP2)
--connection child2_1
--source ../include/deinit_child2_1.inc
--disconnect child2_1
+ --disconnect child2_1_2
--echo child2_2
--connection child2_2
--source ../include/deinit_child2_2.inc
--disconnect child2_2
+ --disconnect child2_2_2
--echo child2_3
--connection child2_3
--source ../include/deinit_child2_3.inc
diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc
index ba59acd64bc..aac4edd072c 100644
--- a/storage/spider/spd_conn.cc
+++ b/storage/spider/spd_conn.cc
@@ -1,4 +1,5 @@
-/* Copyright (C) 2008-2018 Kentoku Shiba
+/* Copyright (C) 2008-2019 Kentoku Shiba
+ Copyright (C) 2019 MariaDB corp
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -148,6 +149,7 @@ int spider_reset_conn_setted_parameter(
DBUG_ENTER("spider_reset_conn_setted_parameter");
conn->autocommit = spider_param_remote_autocommit();
conn->sql_log_off = spider_param_remote_sql_log_off();
+ conn->wait_timeout = spider_param_remote_wait_timeout(thd);
if (thd && spider_param_remote_time_zone())
{
int tz_length = strlen(spider_param_remote_time_zone());
@@ -1428,6 +1430,20 @@ void spider_conn_queue_sql_log_off(
DBUG_VOID_RETURN;
}
+void spider_conn_queue_wait_timeout(
+ SPIDER_CONN *conn,
+ int wait_timeout
+) {
+ DBUG_ENTER("spider_conn_queue_wait_timeout");
+ DBUG_PRINT("info", ("spider conn=%p", conn));
+ if (wait_timeout > 0)
+ {
+ conn->queued_wait_timeout = TRUE;
+ conn->queued_wait_timeout_val = wait_timeout;
+ }
+ DBUG_VOID_RETURN;
+}
+
void spider_conn_queue_time_zone(
SPIDER_CONN *conn,
Time_zone *time_zone
@@ -1483,6 +1499,7 @@ void spider_conn_clear_queue(
conn->queued_semi_trx_isolation = FALSE;
conn->queued_autocommit = FALSE;
conn->queued_sql_log_off = FALSE;
+ conn->queued_wait_timeout = FALSE;
conn->queued_time_zone = FALSE;
conn->queued_trx_start = FALSE;
conn->queued_xa_start = FALSE;
diff --git a/storage/spider/spd_conn.h b/storage/spider/spd_conn.h
index 0a9f99a1853..97dc4ac7bf0 100644
--- a/storage/spider/spd_conn.h
+++ b/storage/spider/spd_conn.h
@@ -132,6 +132,11 @@ void spider_conn_queue_sql_log_off(
bool sql_log_off
);
+void spider_conn_queue_wait_timeout(
+ SPIDER_CONN *conn,
+ int wait_timeout
+);
+
void spider_conn_queue_time_zone(
SPIDER_CONN *conn,
Time_zone *time_zone
diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc
index c2cd4beccdb..97e2d24c8e7 100644
--- a/storage/spider/spd_db_conn.cc
+++ b/storage/spider/spd_db_conn.cc
@@ -1,4 +1,5 @@
-/* Copyright (C) 2008-2018 Kentoku Shiba
+/* Copyright (C) 2008-2019 Kentoku Shiba
+ Copyright (C) 2019 MariaDB corp
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -381,6 +382,13 @@ int spider_db_conn_queue_action(
append_sql_log_off(&sql_str, conn->queued_sql_log_off_val))
) ||
(
+ conn->queued_wait_timeout &&
+ conn->queued_wait_timeout_val != conn->wait_timeout &&
+ conn->db_conn->set_wait_timeout_in_bulk_sql() &&
+ (error_num = spider_dbton[conn->dbton_id].db_util->
+ append_wait_timeout(&sql_str, conn->queued_wait_timeout_val))
+ ) ||
+ (
conn->queued_time_zone &&
conn->queued_time_zone_val != conn->time_zone &&
conn->db_conn->set_time_zone_in_bulk_sql() &&
@@ -453,6 +461,15 @@ int spider_db_conn_queue_action(
DBUG_RETURN(error_num);
}
if (
+ conn->queued_wait_timeout &&
+ conn->queued_wait_timeout_val != conn->wait_timeout &&
+ !conn->db_conn->set_wait_timeout_in_bulk_sql() &&
+ (error_num = spider_dbton[conn->dbton_id].db_util->
+ append_wait_timeout(&sql_str, conn->queued_wait_timeout_val))
+ ) {
+ DBUG_RETURN(error_num);
+ }
+ if (
conn->queued_time_zone &&
conn->queued_time_zone_val != conn->time_zone &&
!conn->db_conn->set_time_zone_in_bulk_sql() &&
@@ -519,6 +536,13 @@ int spider_db_conn_queue_action(
conn->trx_isolation));
}
+ if (
+ conn->queued_wait_timeout &&
+ conn->queued_wait_timeout_val != conn->wait_timeout
+ ) {
+ conn->wait_timeout = conn->queued_wait_timeout_val;
+ }
+
if (conn->queued_autocommit)
{
if (conn->queued_autocommit_val && conn->autocommit != 1)
@@ -2558,6 +2582,22 @@ int spider_db_append_key_where(
DBUG_RETURN(0);
}
+int spider_db_append_charset_name_before_string(
+ spider_string *str,
+ CHARSET_INFO *cs
+) {
+ const char *csname = cs->csname;
+ uint csname_length = strlen(csname);
+ DBUG_ENTER("spider_db_append_charset_name_before_string");
+ if (str->reserve(SPIDER_SQL_UNDERSCORE_LEN + csname_length))
+ {
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ str->q_append(SPIDER_SQL_UNDERSCORE_STR, SPIDER_SQL_UNDERSCORE_LEN);
+ str->q_append(csname, csname_length);
+ DBUG_RETURN(0);
+}
+
#ifdef HANDLER_HAS_DIRECT_AGGREGATE
int spider_db_refetch_for_item_sum_funcs(
ha_spider *spider
@@ -8884,34 +8924,58 @@ int spider_db_open_item_ident(
Field *field = item_ident->cached_table->table->field[
item_ident->cached_field_index];
DBUG_PRINT("info",("spider use cached_field_index"));
- if (!use_fields)
+ DBUG_PRINT("info",("spider const_table=%s",
+ field->table->const_table ? "TRUE" : "FALSE"));
+ if (field->table->const_table)
{
- if (!(field = spider->field_exchange(field)))
- DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
if (str)
{
- if ((error_num = share->dbton_share[dbton_id]->
- append_column_name_with_alias(str, field->field_index,
- alias, alias_length)))
- DBUG_RETURN(error_num);
+ String str_value;
+ String *tmp_str;
+ tmp_str = field->val_str(&str_value);
+ if (!tmp_str)
+ {
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ if (str->reserve(SPIDER_SQL_VALUE_QUOTE_LEN * 2 +
+ tmp_str->length() * 2))
+ {
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ str->q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN);
+ str->append_escape_string(tmp_str->ptr(), tmp_str->length());
+ str->q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN);
}
} else {
- if (str)
+ if (!use_fields)
{
- SPIDER_FIELD_CHAIN *field_chain = fields->get_next_field_chain();
- SPIDER_FIELD_HOLDER *field_holder = field_chain->field_holder;
- spider = field_holder->spider;
- share = spider->share;
- field = spider->field_exchange(field);
- DBUG_ASSERT(field);
- if ((error_num = share->dbton_share[dbton_id]->
- append_column_name_with_alias(str, field->field_index,
- field_holder->alias->ptr(), field_holder->alias->length())))
- DBUG_RETURN(error_num);
+ if (!(field = spider->field_exchange(field)))
+ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
+ if (str)
+ {
+ if ((error_num = share->dbton_share[dbton_id]->
+ append_column_name_with_alias(str, field->field_index,
+ alias, alias_length)))
+ DBUG_RETURN(error_num);
+ }
} else {
- if ((error_num = fields->add_field(field)))
+ if (str)
{
- DBUG_RETURN(error_num);
+ SPIDER_FIELD_CHAIN *field_chain = fields->get_next_field_chain();
+ SPIDER_FIELD_HOLDER *field_holder = field_chain->field_holder;
+ spider = field_holder->spider;
+ share = spider->share;
+ field = spider->field_exchange(field);
+ DBUG_ASSERT(field);
+ if ((error_num = share->dbton_share[dbton_id]->
+ append_column_name_with_alias(str, field->field_index,
+ field_holder->alias->ptr(), field_holder->alias->length())))
+ DBUG_RETURN(error_num);
+ } else {
+ if ((error_num = fields->add_field(field)))
+ {
+ DBUG_RETURN(error_num);
+ }
}
}
}
@@ -8981,46 +9045,72 @@ int spider_db_open_item_field(
Field *field = item_field->field;
SPIDER_SHARE *share = spider->share;
DBUG_ENTER("spider_db_open_item_field");
- if (field && !field->table->const_table)
+ if (field)
{
DBUG_PRINT("info",("spider field=%p", field));
DBUG_PRINT("info",("spider db=%s", field->table->s->db.str));
- DBUG_PRINT("info",("spider table_name=%s", field->table->s->table_name.str));
- DBUG_PRINT("info",("spider tmp_table=%u", field->table->s->tmp_table));
- if (field->table->s->tmp_table != INTERNAL_TMP_TABLE)
+ DBUG_PRINT("info",("spider table_name=%s",
+ field->table->s->table_name.str));
+ DBUG_PRINT("info",("spider const_table=%s",
+ field->table->const_table ? "TRUE" : "FALSE"));
+ if (field->table->const_table)
{
- if (!use_fields)
+ if (str)
{
- if (!(field = spider->field_exchange(field)))
- DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
- if (str)
+ String str_value;
+ String *tmp_str;
+ tmp_str = field->val_str(&str_value);
+ if (!tmp_str)
{
- if ((error_num = share->dbton_share[dbton_id]->
- append_column_name_with_alias(str, field->field_index,
- alias, alias_length)))
- DBUG_RETURN(error_num);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
- DBUG_RETURN(0);
- } else {
- if (str)
+ if (str->reserve(SPIDER_SQL_VALUE_QUOTE_LEN * 2 +
+ tmp_str->length() * 2))
{
- SPIDER_FIELD_CHAIN *field_chain = fields->get_next_field_chain();
- SPIDER_FIELD_HOLDER *field_holder = field_chain->field_holder;
- spider = field_holder->spider;
- share = spider->share;
- field = spider->field_exchange(field);
- DBUG_ASSERT(field);
- if ((error_num = share->dbton_share[dbton_id]->
- append_column_name_with_alias(str, field->field_index,
- field_holder->alias->ptr(), field_holder->alias->length())))
- DBUG_RETURN(error_num);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ str->q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN);
+ str->append_escape_string(tmp_str->ptr(), tmp_str->length());
+ str->q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN);
+ }
+ DBUG_RETURN(0);
+ } else {
+ DBUG_PRINT("info",("spider tmp_table=%u", field->table->s->tmp_table));
+ if (field->table->s->tmp_table != INTERNAL_TMP_TABLE)
+ {
+ if (!use_fields)
+ {
+ if (!(field = spider->field_exchange(field)))
+ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
+ if (str)
+ {
+ if ((error_num = share->dbton_share[dbton_id]->
+ append_column_name_with_alias(str, field->field_index,
+ alias, alias_length)))
+ DBUG_RETURN(error_num);
+ }
+ DBUG_RETURN(0);
} else {
- if ((error_num = fields->add_field(field)))
+ if (str)
{
- DBUG_RETURN(error_num);
+ SPIDER_FIELD_CHAIN *field_chain = fields->get_next_field_chain();
+ SPIDER_FIELD_HOLDER *field_holder = field_chain->field_holder;
+ spider = field_holder->spider;
+ share = spider->share;
+ field = spider->field_exchange(field);
+ DBUG_ASSERT(field);
+ if ((error_num = share->dbton_share[dbton_id]->
+ append_column_name_with_alias(str, field->field_index,
+ field_holder->alias->ptr(), field_holder->alias->length())))
+ DBUG_RETURN(error_num);
+ } else {
+ if ((error_num = fields->add_field(field)))
+ {
+ DBUG_RETURN(error_num);
+ }
}
+ DBUG_RETURN(0);
}
- DBUG_RETURN(0);
}
}
}
@@ -9217,6 +9307,14 @@ int spider_db_open_item_string(
goto end;
}
}
+ if (str->charset() != tmp_str2->charset())
+ {
+ if ((error_num = spider_db_append_charset_name_before_string(str,
+ tmp_str2->charset())))
+ {
+ goto end;
+ }
+ }
if (str->reserve(SPIDER_SQL_VALUE_QUOTE_LEN * 2 +
tmp_str2->length() * 2))
{
@@ -9226,7 +9324,8 @@ int spider_db_open_item_string(
if (!thd)
tmp_str.mem_calc();
str->q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN);
- str->append_escape_string(tmp_str2->ptr(), tmp_str2->length());
+ str->append_escape_string(tmp_str2->ptr(), tmp_str2->length(),
+ tmp_str2->charset());
if (str->reserve(SPIDER_SQL_VALUE_QUOTE_LEN))
{
error_num = HA_ERR_OUT_OF_MEM;
diff --git a/storage/spider/spd_db_conn.h b/storage/spider/spd_db_conn.h
index bf09d672685..f082996d3d1 100644
--- a/storage/spider/spd_db_conn.h
+++ b/storage/spider/spd_db_conn.h
@@ -472,6 +472,11 @@ int spider_db_append_key_where(
ha_spider *spider
);
+int spider_db_append_charset_name_before_string(
+ spider_string *str,
+ CHARSET_INFO *cs
+);
+
#ifdef HANDLER_HAS_DIRECT_AGGREGATE
int spider_db_refetch_for_item_sum_funcs(
ha_spider *spider
diff --git a/storage/spider/spd_db_handlersocket.cc b/storage/spider/spd_db_handlersocket.cc
index 37bbe530723..c4f403b8728 100644
--- a/storage/spider/spd_db_handlersocket.cc
+++ b/storage/spider/spd_db_handlersocket.cc
@@ -1839,6 +1839,23 @@ int spider_db_handlersocket::set_sql_log_off(
DBUG_RETURN(0);
}
+bool spider_db_handlersocket::set_wait_timeout_in_bulk_sql()
+{
+ DBUG_ENTER("spider_db_handlersocket::set_wait_timeout_in_bulk_sql");
+ DBUG_PRINT("info",("spider this=%p", this));
+ DBUG_RETURN(FALSE);
+}
+
+int spider_db_handlersocket::set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+) {
+ DBUG_ENTER("spider_db_handlersocket::set_wait_timeout");
+ DBUG_PRINT("info",("spider this=%p", this));
+ /* nothing to do */
+ DBUG_RETURN(0);
+}
+
bool spider_db_handlersocket::set_time_zone_in_bulk_sql()
{
DBUG_ENTER("spider_db_handlersocket::set_time_zone_in_bulk_sql");
@@ -2675,6 +2692,16 @@ int spider_db_handlersocket_util::append_sql_log_off(
DBUG_RETURN(0);
}
+int spider_db_handlersocket_util::append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+) {
+ DBUG_ENTER("spider_db_handlersocket_util::append_wait_timeout");
+ DBUG_PRINT("info",("spider this=%p", this));
+ /* nothing to do */
+ DBUG_RETURN(0);
+}
+
int spider_db_handlersocket_util::append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -5145,7 +5172,7 @@ int spider_handlersocket_handler::append_open_handler(
share->tgt_dbs[spider->conn_link_idx[link_idx]],
share->tgt_table_names[spider->conn_link_idx[link_idx]],
spider->active_index < MAX_KEY ?
- table->s->key_info[spider->active_index].name :
+ table->key_info[spider->active_index].name :
"0",
str->c_ptr_safe(),
&request_key
diff --git a/storage/spider/spd_db_handlersocket.h b/storage/spider/spd_db_handlersocket.h
index 075f8720abf..4cf6fce6c61 100644
--- a/storage/spider/spd_db_handlersocket.h
+++ b/storage/spider/spd_db_handlersocket.h
@@ -59,6 +59,10 @@ public:
spider_string *str,
bool sql_log_off
);
+ int append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+ );
int append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -380,6 +384,11 @@ public:
bool sql_log_off,
int *need_mon
);
+ bool set_wait_timeout_in_bulk_sql();
+ int set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+ );
bool set_time_zone_in_bulk_sql();
int set_time_zone(
Time_zone *time_zone,
diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h
index 64cee006c65..f759a78c3cb 100644
--- a/storage/spider/spd_db_include.h
+++ b/storage/spider/spd_db_include.h
@@ -161,6 +161,8 @@ typedef st_spider_result SPIDER_RESULT;
#define SPIDER_SQL_IN_LEN (sizeof(SPIDER_SQL_IN_STR) - 1)
#define SPIDER_SQL_NOT_IN_STR "not in("
#define SPIDER_SQL_NOT_IN_LEN (sizeof(SPIDER_SQL_NOT_IN_STR) - 1)
+#define SPIDER_SQL_NOT_LIKE_STR "not like"
+#define SPIDER_SQL_NOT_LIKE_LEN (sizeof(SPIDER_SQL_NOT_LIKE_STR) - 1)
#define SPIDER_SQL_AS_CHAR_STR " as char"
#define SPIDER_SQL_AS_CHAR_LEN (sizeof(SPIDER_SQL_AS_CHAR_STR) - 1)
#define SPIDER_SQL_CAST_STR "cast("
@@ -531,6 +533,11 @@ public:
const char *st,
uint len
);
+ void append_escape_string(
+ const char *st,
+ uint len,
+ CHARSET_INFO *cs
+ );
bool append_for_single_quote(
const char *st,
uint len
@@ -835,6 +842,10 @@ public:
spider_string *str,
bool sql_log_off
) = 0;
+ virtual int append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+ ) = 0;
virtual int append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -1134,6 +1145,11 @@ public:
bool sql_log_off,
int *need_mon
) = 0;
+ virtual bool set_wait_timeout_in_bulk_sql() = 0;
+ virtual int set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+ ) = 0;
virtual bool set_time_zone_in_bulk_sql() = 0;
virtual int set_time_zone(
Time_zone *time_zone,
diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc
index 262e47120ce..d8fdfcec081 100644
--- a/storage/spider/spd_db_mysql.cc
+++ b/storage/spider/spd_db_mysql.cc
@@ -88,6 +88,9 @@ static const char *name_quote_str = SPIDER_SQL_NAME_QUOTE_STR;
#define SPIDER_SQL_SQL_LOG_ON_STR "set session sql_log_off = 1"
#define SPIDER_SQL_SQL_LOG_ON_LEN sizeof(SPIDER_SQL_SQL_LOG_ON_STR) - 1
+#define SPIDER_SQL_WAIT_TIMEOUT_STR "set session wait_timeout = "
+#define SPIDER_SQL_WAIT_TIMEOUT_LEN sizeof(SPIDER_SQL_WAIT_TIMEOUT_STR) - 1
+
#define SPIDER_SQL_TIME_ZONE_STR "set session time_zone = '"
#define SPIDER_SQL_TIME_ZONE_LEN sizeof(SPIDER_SQL_TIME_ZONE_STR) - 1
@@ -1948,6 +1951,9 @@ int spider_db_mbase::connect(
connect_retry_count--;
my_sleep((ulong) connect_retry_interval);
} else {
+#ifdef SPIDER_NET_HAS_THD
+ db_conn->net.thd = NULL;
+#endif
if (connect_mutex)
pthread_mutex_unlock(&spider_open_conn_mutex);
break;
@@ -2738,6 +2744,44 @@ int spider_db_mbase::set_sql_log_off(
DBUG_RETURN(0);
}
+bool spider_db_mbase::set_wait_timeout_in_bulk_sql()
+{
+ DBUG_ENTER("spider_db_mbase::set_wait_timeout_in_bulk_sql");
+ DBUG_PRINT("info",("spider this=%p", this));
+ DBUG_RETURN(TRUE);
+}
+
+int spider_db_mbase::set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+) {
+ char sql_buf[MAX_FIELD_WIDTH];
+ char timeout_str[SPIDER_SQL_INT_LEN];
+ int timeout_str_length;
+ spider_string sql_str(sql_buf, sizeof(sql_buf), &my_charset_bin);
+ DBUG_ENTER("spider_db_mbase::set_wait_timeout");
+ DBUG_PRINT("info",("spider this=%p", this));
+ sql_str.init_calc_mem(264);
+ sql_str.length(0);
+ timeout_str_length =
+ my_sprintf(timeout_str, (timeout_str, "%d", wait_timeout));
+ if (sql_str.reserve(SPIDER_SQL_WAIT_TIMEOUT_LEN + timeout_str_length))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ sql_str.q_append(SPIDER_SQL_WAIT_TIMEOUT_STR, SPIDER_SQL_WAIT_TIMEOUT_LEN);
+ sql_str.q_append(timeout_str, timeout_str_length);
+ if (spider_db_query(
+ conn,
+ sql_str.ptr(),
+ sql_str.length(),
+ -1,
+ need_mon)
+ )
+ DBUG_RETURN(spider_db_errorno(conn));
+ SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos);
+ pthread_mutex_unlock(&conn->mta_conn_mutex);
+ DBUG_RETURN(0);
+}
+
bool spider_db_mbase::set_time_zone_in_bulk_sql()
{
DBUG_ENTER("spider_db_mbase::set_time_zone_in_bulk_sql");
@@ -3290,10 +3334,10 @@ void spider_db_mbase::set_dup_key_idx(
key_name_length = spider->share->tgt_pk_names_lengths[all_link_idx];
} else {
#ifdef SPIDER_use_LEX_CSTRING_for_KEY_Field_name
- key_name = table->s->key_info[roop_count].name.str;
- key_name_length = table->s->key_info[roop_count].name.length;
+ key_name = table->key_info[roop_count].name.str;
+ key_name_length = table->key_info[roop_count].name.length;
#else
- key_name = table->s->key_info[roop_count].name;
+ key_name = table->key_info[roop_count].name;
key_name_length = strlen(key_name);
#endif
}
@@ -3421,8 +3465,9 @@ int spider_db_mbase_util::append_column_value(
const uchar *new_ptr,
CHARSET_INFO *access_charset
) {
+ int error_num;
char buf[MAX_FIELD_WIDTH];
- spider_string tmp_str(buf, MAX_FIELD_WIDTH, &my_charset_bin);
+ spider_string tmp_str(buf, MAX_FIELD_WIDTH, field->charset());
String *ptr;
uint length;
THD *thd = field->table->in_use;
@@ -3440,7 +3485,7 @@ int spider_db_mbase_util::append_column_value(
) {
length = uint2korr(new_ptr);
tmp_str.set_quick((char *) new_ptr + HA_KEY_BLOB_LENGTH, length,
- &my_charset_bin);
+ field->charset());
ptr = tmp_str.get_str();
} else if (field->type() == MYSQL_TYPE_GEOMETRY)
{
@@ -3556,6 +3601,14 @@ int spider_db_mbase_util::append_column_value(
if (field->result_type() == STRING_RESULT)
{
DBUG_PRINT("info", ("spider STRING_RESULT"));
+ if (str->charset() != field->charset())
+ {
+ if ((error_num = spider_db_append_charset_name_before_string(str,
+ field->charset())))
+ {
+ DBUG_RETURN(error_num);
+ }
+ }
if (str->reserve(SPIDER_SQL_VALUE_QUOTE_LEN))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
str->q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN);
@@ -3566,7 +3619,7 @@ int spider_db_mbase_util::append_column_value(
) {
DBUG_PRINT("info", ("spider append_escaped"));
char buf2[MAX_FIELD_WIDTH];
- spider_string tmp_str2(buf2, MAX_FIELD_WIDTH, access_charset);
+ spider_string tmp_str2(buf2, MAX_FIELD_WIDTH, field->charset());
tmp_str2.init_calc_mem(114);
tmp_str2.length(0);
if (
@@ -3707,6 +3760,30 @@ int spider_db_mbase_util::append_sql_log_off(
DBUG_RETURN(0);
}
+int spider_db_mbase_util::append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+) {
+ char timeout_str[SPIDER_SQL_INT_LEN];
+ int timeout_str_length;
+ DBUG_ENTER("spider_db_mbase_util::append_wait_timeout");
+ DBUG_PRINT("info",("spider this=%p", this));
+ timeout_str_length =
+ my_sprintf(timeout_str, (timeout_str, "%d", wait_timeout));
+ if (str->reserve(SPIDER_SQL_SEMICOLON_LEN + SPIDER_SQL_WAIT_TIMEOUT_LEN +
+ timeout_str_length))
+ {
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ if (str->length())
+ {
+ str->q_append(SPIDER_SQL_SEMICOLON_STR, SPIDER_SQL_SEMICOLON_LEN);
+ }
+ str->q_append(SPIDER_SQL_WAIT_TIMEOUT_STR, SPIDER_SQL_WAIT_TIMEOUT_LEN);
+ str->q_append(timeout_str, timeout_str_length);
+ DBUG_RETURN(0);
+}
+
int spider_db_mbase_util::append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -4789,13 +4866,31 @@ int spider_db_mbase_util::open_item_func(
case Item_func::LE_FUNC:
case Item_func::GE_FUNC:
case Item_func::GT_FUNC:
- case Item_func::LIKE_FUNC:
if (str)
{
func_name = (char*) item_func->func_name();
func_name_length = strlen(func_name);
}
break;
+ case Item_func::LIKE_FUNC:
+#ifdef SPIDER_LIKE_FUNC_HAS_GET_NEGATED
+ if (str)
+ {
+ if (((Item_func_like *)item_func)->get_negated())
+ {
+ func_name = SPIDER_SQL_NOT_LIKE_STR;
+ func_name_length = SPIDER_SQL_NOT_LIKE_LEN;
+ }
+ else
+ {
+ func_name = (char*)item_func->func_name();
+ func_name_length = strlen(func_name);
+ }
+ }
+ break;
+#else
+ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
+#endif
default:
THD *thd = spider->trx->thd;
SPIDER_SHARE *share = spider->share;
@@ -6800,10 +6895,12 @@ int spider_mbase_handler::init()
}
sql.set_charset(share->access_charset);
sql_part.set_charset(share->access_charset);
+ sql_part2.set_charset(share->access_charset);
ha_sql.set_charset(share->access_charset);
insert_sql.set_charset(share->access_charset);
update_sql.set_charset(share->access_charset);
tmp_sql.set_charset(share->access_charset);
+ dup_update_sql.set_charset(share->access_charset);
upd_tmp_tbl_prm.init();
upd_tmp_tbl_prm.field_count = 1;
if (!(link_for_hash = (SPIDER_LINK_FOR_HASH *)
diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h
index 3448cea06cc..f9246bf2721 100644
--- a/storage/spider/spd_db_mysql.h
+++ b/storage/spider/spd_db_mysql.h
@@ -64,6 +64,10 @@ public:
spider_string *str,
bool sql_log_off
);
+ int append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+ );
int append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -466,6 +470,11 @@ public:
bool sql_log_off,
int *need_mon
);
+ bool set_wait_timeout_in_bulk_sql();
+ int set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+ );
bool set_time_zone_in_bulk_sql();
int set_time_zone(
Time_zone *time_zone,
diff --git a/storage/spider/spd_db_oracle.cc b/storage/spider/spd_db_oracle.cc
index eae749f6867..e455ce0e4c0 100644
--- a/storage/spider/spd_db_oracle.cc
+++ b/storage/spider/spd_db_oracle.cc
@@ -2104,6 +2104,23 @@ int spider_db_oracle::set_sql_log_off(
DBUG_RETURN(0);
}
+bool spider_db_oracle::set_wait_timeout_in_bulk_sql()
+{
+ DBUG_ENTER("spider_db_oracle::set_wait_timeout_in_bulk_sql");
+ DBUG_PRINT("info",("spider this=%p", this));
+ DBUG_RETURN(FALSE);
+}
+
+int spider_db_oracle::set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+) {
+ DBUG_ENTER("spider_db_oracle::set_wait_timeout");
+ DBUG_PRINT("info",("spider this=%p", this));
+ /* nothing to do */
+ DBUG_RETURN(0);
+}
+
bool spider_db_oracle::set_time_zone_in_bulk_sql()
{
DBUG_ENTER("spider_db_oracle::set_time_zone_in_bulk_sql");
@@ -2810,6 +2827,16 @@ int spider_db_oracle_util::append_sql_log_off(
DBUG_RETURN(0);
}
+int spider_db_oracle_util::append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+) {
+ DBUG_ENTER("spider_db_oracle_util::append_wait_timeout");
+ DBUG_PRINT("info",("spider this=%p", this));
+ /* nothing to do */
+ DBUG_RETURN(0);
+}
+
int spider_db_oracle_util::append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -4016,13 +4043,31 @@ int spider_db_oracle_util::open_item_func(
case Item_func::LE_FUNC:
case Item_func::GE_FUNC:
case Item_func::GT_FUNC:
- case Item_func::LIKE_FUNC:
if (str)
{
func_name = (char*) item_func->func_name();
func_name_length = strlen(func_name);
}
break;
+ case Item_func::LIKE_FUNC:
+#ifdef SPIDER_LIKE_FUNC_HAS_GET_NEGATED
+ if (str)
+ {
+ if (((Item_func_like *)item_func)->get_negated())
+ {
+ func_name = SPIDER_SQL_NOT_LIKE_STR;
+ func_name_length = SPIDER_SQL_NOT_LIKE_LEN;
+ }
+ else
+ {
+ func_name = (char*)item_func->func_name();
+ func_name_length = strlen(func_name);
+ }
+ }
+ break;
+#else
+ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM);
+#endif
default:
THD *thd = spider->trx->thd;
SPIDER_SHARE *share = spider->share;
@@ -5380,10 +5425,12 @@ int spider_oracle_handler::init()
}
sql.set_charset(share->access_charset);
sql_part.set_charset(share->access_charset);
+ sql_part2.set_charset(share->access_charset);
ha_sql.set_charset(share->access_charset);
insert_sql.set_charset(share->access_charset);
update_sql.set_charset(share->access_charset);
tmp_sql.set_charset(share->access_charset);
+ dup_update_sql.set_charset(share->access_charset);
upd_tmp_tbl_prm.init();
upd_tmp_tbl_prm.field_count = 1;
if (!(link_for_hash = (SPIDER_LINK_FOR_HASH *)
diff --git a/storage/spider/spd_db_oracle.h b/storage/spider/spd_db_oracle.h
index d0bd1757418..1ad79085ef8 100644
--- a/storage/spider/spd_db_oracle.h
+++ b/storage/spider/spd_db_oracle.h
@@ -67,6 +67,10 @@ public:
spider_string *str,
bool sql_log_off
);
+ int append_wait_timeout(
+ spider_string *str,
+ int wait_timeout
+ );
int append_time_zone(
spider_string *str,
Time_zone *time_zone
@@ -430,6 +434,11 @@ public:
bool sql_log_off,
int *need_mon
);
+ bool set_wait_timeout_in_bulk_sql();
+ int set_wait_timeout(
+ int wait_timeout,
+ int *need_mon
+ );
bool set_time_zone_in_bulk_sql();
int set_time_zone(
Time_zone *time_zone,
diff --git a/storage/spider/spd_environ.h b/storage/spider/spd_environ.h
index 5e66a912582..42cbf812bbb 100644
--- a/storage/spider/spd_environ.h
+++ b/storage/spider/spd_environ.h
@@ -25,6 +25,7 @@
#if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100100
#define SPIDER_SUPPORT_CREATE_OR_REPLACE_TABLE
+#define SPIDER_NET_HAS_THD
#endif
#if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100211
@@ -49,5 +50,6 @@
#if defined(MARIADB_BASE_VERSION) && MYSQL_VERSION_ID >= 100400
#define SPIDER_USE_CONST_ITEM_FOR_STRING_INT_REAL_DECIMAL_DATE_ITEM
#define SPIDER_SQL_CACHE_IS_IN_LEX
+#define SPIDER_LIKE_FUNC_HAS_GET_NEGATED
#endif
#endif /* SPD_ENVIRON_INCLUDED */
diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h
index 37ec2d8c608..ead33853957 100644
--- a/storage/spider/spd_include.h
+++ b/storage/spider/spd_include.h
@@ -260,7 +260,7 @@ const char SPIDER_empty_string = "";
#define SPIDER_TMP_SHARE_LONG_COUNT 19
#define SPIDER_TMP_SHARE_LONGLONG_COUNT 3
-#define SPIDER_MEM_CALC_LIST_NUM 257
+#define SPIDER_MEM_CALC_LIST_NUM 265
#define SPIDER_CONN_META_BUF_LEN 64
#define SPIDER_BACKUP_DASTATUS \
@@ -449,6 +449,7 @@ typedef struct st_spider_conn
bool disable_reconnect;
int autocommit;
int sql_log_off;
+ int wait_timeout;
THD *thd;
void *another_ha_first;
void *another_ha_last;
@@ -555,6 +556,7 @@ typedef struct st_spider_conn
bool queued_ping;
bool queued_trx_isolation;
bool queued_semi_trx_isolation;
+ bool queued_wait_timeout;
bool queued_autocommit;
bool queued_sql_log_off;
bool queued_time_zone;
@@ -567,6 +569,7 @@ typedef struct st_spider_conn
int queued_ping_link_idx;
int queued_trx_isolation_val;
int queued_semi_trx_isolation_val;
+ int queued_wait_timeout_val;
bool queued_autocommit_val;
bool queued_sql_log_off_val;
Time_zone *queued_time_zone_val;
diff --git a/storage/spider/spd_malloc.cc b/storage/spider/spd_malloc.cc
index e7a6e710cbc..5373f8b1be7 100644
--- a/storage/spider/spd_malloc.cc
+++ b/storage/spider/spd_malloc.cc
@@ -1238,6 +1238,21 @@ void spider_string::append_escape_string(
DBUG_VOID_RETURN;
}
+void spider_string::append_escape_string(
+ const char *st,
+ uint len,
+ CHARSET_INFO *cs
+) {
+ DBUG_ENTER("spider_string::append_escape_string");
+ DBUG_PRINT("info",("spider this=%p", this));
+ DBUG_ASSERT(mem_calc_inited);
+ DBUG_ASSERT((!current_alloc_mem && !str.is_alloced()) ||
+ current_alloc_mem == str.alloced_length());
+ str.length(str.length() + escape_string_for_mysql(
+ cs, (char *) str.ptr() + str.length(), 0, st, len));
+ DBUG_VOID_RETURN;
+}
+
bool spider_string::append_for_single_quote(
const char *st,
uint len
diff --git a/storage/spider/spd_param.cc b/storage/spider/spd_param.cc
index 6b237bbfff8..ba207d360a2 100644
--- a/storage/spider/spd_param.cc
+++ b/storage/spider/spd_param.cc
@@ -3355,6 +3355,56 @@ int spider_param_slave_trx_isolation()
DBUG_RETURN(spider_slave_trx_isolation);
}
+/*
+ -1 :not set
+ 0-:seconds of timeout
+ */
+static MYSQL_THDVAR_INT(
+ remote_wait_timeout, /* name */
+ PLUGIN_VAR_RQCMDARG, /* opt */
+ "Wait timeout on remote server", /* comment */
+ NULL, /* check */
+ NULL, /* update */
+ -1, /* def */
+ -1, /* min */
+ 2147483647, /* max */
+ 0 /* blk */
+);
+
+int spider_param_remote_wait_timeout(
+ THD *thd
+) {
+ DBUG_ENTER("spider_param_remote_wait_timeout");
+ if (likely(thd))
+ DBUG_RETURN(THDVAR(thd, remote_wait_timeout));
+ DBUG_RETURN(-1);
+}
+
+/*
+ -1 :not set
+ 0-:seconds of timeout
+ */
+static MYSQL_THDVAR_INT(
+ wait_timeout, /* name */
+ PLUGIN_VAR_RQCMDARG, /* opt */
+ "Wait timeout of setting to remote server", /* comment */
+ NULL, /* check */
+ NULL, /* update */
+ 604800, /* def */
+ -1, /* min */
+ 2147483647, /* max */
+ 0 /* blk */
+);
+
+int spider_param_wait_timeout(
+ THD *thd
+) {
+ DBUG_ENTER("spider_param_wait_timeout");
+ if (likely(thd))
+ DBUG_RETURN(THDVAR(thd, wait_timeout));
+ DBUG_RETURN(604800);
+}
+
static struct st_mysql_storage_engine spider_storage_engine =
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
@@ -3505,6 +3555,8 @@ static struct st_mysql_sys_var* spider_system_variables[] = {
MYSQL_SYSVAR(table_crd_thread_count),
#endif
MYSQL_SYSVAR(slave_trx_isolation),
+ MYSQL_SYSVAR(remote_wait_timeout),
+ MYSQL_SYSVAR(wait_timeout),
NULL
};
diff --git a/storage/spider/spd_param.h b/storage/spider/spd_param.h
index 8fdf2e452b2..d874526f29a 100644
--- a/storage/spider/spd_param.h
+++ b/storage/spider/spd_param.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2008-2018 Kentoku Shiba
+/* Copyright (C) 2008-2019 Kentoku Shiba
+ Copyright (C) 2019 MariaDB corp
This program is free software); you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -421,3 +422,9 @@ uint spider_param_table_sts_thread_count();
uint spider_param_table_crd_thread_count();
#endif
int spider_param_slave_trx_isolation();
+int spider_param_remote_wait_timeout(
+ THD *thd
+);
+int spider_param_wait_timeout(
+ THD *thd
+);
diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc
index 8f8605b85bd..45dd4a06781 100644
--- a/storage/spider/spd_sys_table.cc
+++ b/storage/spider/spd_sys_table.cc
@@ -2427,6 +2427,7 @@ int spider_get_sys_tables_static_link_id(
) {
int error_num = 0;
DBUG_ENTER("spider_get_sys_tables_static_link_id");
+ *static_link_id = NULL;
if (
!table->field[24]->is_null() &&
(*static_link_id = get_field(mem_root, table->field[24]))
diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc
index eedea78078d..d17e76e5ec6 100644
--- a/storage/spider/spd_table.cc
+++ b/storage/spider/spd_table.cc
@@ -8473,16 +8473,9 @@ void spider_free_tmp_dbton_handler(
TABLE_LIST *spider_get_parent_table_list(
ha_spider *spider
) {
- TABLE *table = spider->get_table();
- TABLE_LIST *table_list = table->pos_in_table_list;
+ TABLE *table = spider->get_top_table();
DBUG_ENTER("spider_get_parent_table_list");
- if (table_list)
- {
- while (table_list->parent_l)
- table_list = table_list->parent_l;
- DBUG_RETURN(table_list);
- }
- DBUG_RETURN(NULL);
+ DBUG_RETURN(table->pos_in_table_list);
}
List<Index_hint> *spider_get_index_hints(
@@ -8916,6 +8909,13 @@ bool spider_check_direct_order_limit(
break;
}
}
+ if (!spider_all_part_in_order((ORDER *) select_lex->group_list.first,
+ spider->get_table()))
+ {
+ DBUG_PRINT("info",("spider FALSE by group condition"));
+ first_check = FALSE;
+ spider->result_list.direct_distinct = FALSE;
+ }
#endif
}
@@ -8981,6 +8981,98 @@ bool spider_check_direct_order_limit(
DBUG_RETURN(FALSE);
}
+#ifdef HANDLER_HAS_DIRECT_AGGREGATE
+bool spider_all_part_in_order(
+ ORDER *order,
+ TABLE *table
+) {
+ TABLE_LIST *parent;
+ partition_info *part_info;
+ Field **part_fields;
+ ORDER *ptr;
+ Item *item;
+ Item_field *item_field;
+ DBUG_ENTER("spider_all_part_in_order");
+ while (TRUE)
+ {
+ DBUG_PRINT("info", ("spider table_name = %s", table->s->db.str));
+ DBUG_PRINT("info",("spider part_info=%p", table->part_info));
+ if ((part_info = table->part_info))
+ {
+ for (part_fields = part_info->full_part_field_array;
+ *part_fields; ++part_fields)
+ {
+ DBUG_PRINT("info", ("spider part_field = %s",
+ SPIDER_field_name_str(*part_fields)));
+ for (ptr = order; ptr; ptr = ptr->next)
+ {
+ item = *ptr->item;
+ if (item->type() != Item::FIELD_ITEM)
+ {
+ continue;
+ }
+ item_field = (Item_field *) item;
+ Field *field = item_field->field;
+ if (!field)
+ {
+ continue;
+ }
+ DBUG_PRINT("info", ("spider field_name = %s.%s",
+ field->table->s->db.str, SPIDER_field_name_str(field)));
+ if (*part_fields == spider_field_exchange(table->file, field))
+ {
+ break;
+ }
+ }
+ if (!ptr)
+ {
+ DBUG_RETURN(FALSE);
+ }
+ }
+ }
+ if (!(parent = table->pos_in_table_list->parent_l))
+ {
+ break;
+ }
+ table = parent->table;
+ }
+ DBUG_RETURN(TRUE);
+}
+
+Field *spider_field_exchange(
+ handler *handler,
+ Field *field
+) {
+ DBUG_ENTER("spider_field_exchange");
+#ifdef HA_CAN_BULK_ACCESS
+ if (handler->is_bulk_access_clone)
+ {
+ handler = handler->pt_clone_source_handler;
+ }
+#endif
+ DBUG_PRINT("info",("spider in field=%p", field));
+ DBUG_PRINT("info",("spider in field->table=%p", field->table));
+#ifdef HANDLER_HAS_TOP_TABLE_FIELDS
+ if (handler->set_top_table_fields)
+ {
+ DBUG_PRINT("info",("spider top_table=%p", handler->top_table));
+ if (field->table != handler->top_table)
+ DBUG_RETURN(NULL);
+ if (!(field = handler->top_table_field[field->field_index]))
+ DBUG_RETURN(NULL);
+ } else {
+#endif
+ DBUG_PRINT("info",("spider table=%p", handler->get_table()));
+ if (field->table != handler->get_table())
+ DBUG_RETURN(NULL);
+#ifdef HANDLER_HAS_TOP_TABLE_FIELDS
+ }
+#endif
+ DBUG_PRINT("info",("spider out field=%p", field));
+ DBUG_RETURN(field);
+}
+#endif
+
int spider_set_direct_limit_offset(
ha_spider *spider
) {
diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h
index 43958ca6e78..647b03953f3 100644
--- a/storage/spider/spd_table.h
+++ b/storage/spider/spd_table.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2008-2017 Kentoku Shiba
+/* Copyright (C) 2008-2019 Kentoku Shiba
+ Copyright (C) 2019 MariaDB corp
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -638,6 +639,18 @@ bool spider_check_direct_order_limit(
ha_spider *spider
);
+#ifdef HANDLER_HAS_DIRECT_AGGREGATE
+bool spider_all_part_in_order(
+ ORDER *order,
+ TABLE *table
+);
+
+Field *spider_field_exchange(
+ handler *handler,
+ Field *field
+);
+#endif
+
int spider_set_direct_limit_offset(
ha_spider* spider
);
diff --git a/storage/spider/spd_trx.cc b/storage/spider/spd_trx.cc
index 78a95c5e38a..a1f9bde6a2e 100644
--- a/storage/spider/spd_trx.cc
+++ b/storage/spider/spd_trx.cc
@@ -1,4 +1,5 @@
-/* Copyright (C) 2008-2018 Kentoku Shiba
+/* Copyright (C) 2008-2019 Kentoku Shiba
+ Copyright (C) 2019 MariaDB corp
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -1667,6 +1668,22 @@ int spider_check_and_set_sql_log_off(
DBUG_RETURN(0);
}
+int spider_check_and_set_wait_timeout(
+ THD *thd,
+ SPIDER_CONN *conn,
+ int *need_mon
+) {
+ int wait_timeout;
+ DBUG_ENTER("spider_check_and_set_wait_timeout");
+
+ wait_timeout = spider_param_wait_timeout(thd);
+ if (wait_timeout > 0)
+ {
+ spider_conn_queue_wait_timeout(conn, wait_timeout);
+ }
+ DBUG_RETURN(0);
+}
+
int spider_check_and_set_time_zone(
THD *thd,
SPIDER_CONN *conn,
@@ -1690,8 +1707,9 @@ int spider_check_and_set_time_zone(
DBUG_RETURN(0);
}
-int spider_xa_lock(
- XID_STATE *xid_state
+static int spider_xa_lock(
+ XID_STATE *xid_state,
+ XID *xid
) {
THD *thd = current_thd;
int error_num;
@@ -1709,7 +1727,7 @@ int spider_xa_lock(
#endif
old_proc_info = thd_proc_info(thd, "Locking xid by Spider");
#ifdef SPIDER_XID_USES_xid_cache_iterate
- if (xid_cache_insert(thd, xid_state))
+ if (xid_cache_insert(thd, xid_state, xid))
{
error_num = (spider_stmt_da_sql_errno(thd) == ER_XAER_DUPID ?
ER_SPIDER_XA_LOCKED_NUM : HA_ERR_OUT_OF_MEM);
@@ -1774,7 +1792,7 @@ error:
DBUG_RETURN(error_num);
}
-int spider_xa_unlock(
+static int spider_xa_unlock(
XID_STATE *xid_state
) {
THD *thd = current_thd;
@@ -1866,6 +1884,8 @@ int spider_internal_start_trx(
if (
(error_num = spider_check_and_set_sql_log_off(thd, conn,
&spider->need_mons[link_idx])) ||
+ (error_num = spider_check_and_set_wait_timeout(thd, conn,
+ &spider->need_mons[link_idx])) ||
(sync_autocommit &&
(error_num = spider_check_and_set_autocommit(thd, conn,
&spider->need_mons[link_idx])))
@@ -1891,7 +1911,7 @@ int spider_internal_start_trx(
if (!trx->trx_start)
{
if (
- thd->transaction.xid_state.xa_state == XA_ACTIVE &&
+ thd->transaction.xid_state.is_explicit_XA() &&
spider_param_support_xa()
) {
trx->trx_xa = TRUE;
@@ -1929,12 +1949,10 @@ int spider_internal_start_trx(
thd->server_id));
#endif
- trx->internal_xid_state.xa_state = XA_ACTIVE;
- trx->internal_xid_state.xid.set(&trx->xid);
#ifdef SPIDER_XID_STATE_HAS_in_thd
trx->internal_xid_state.in_thd = 1;
#endif
- if ((error_num = spider_xa_lock(&trx->internal_xid_state)))
+ if ((error_num = spider_xa_lock(&trx->internal_xid_state, &trx->xid)))
{
if (error_num == ER_SPIDER_XA_LOCKED_NUM)
my_message(error_num, ER_SPIDER_XA_LOCKED_STR, MYF(0));
@@ -2198,7 +2216,6 @@ int spider_internal_xa_commit(
table_xa_opened = FALSE;
}
spider_xa_unlock(&trx->internal_xid_state);
- trx->internal_xid_state.xa_state = XA_NOTR;
DBUG_RETURN(0);
error:
@@ -2209,7 +2226,6 @@ error:
error_in_commit:
error_open_table:
spider_xa_unlock(&trx->internal_xid_state);
- trx->internal_xid_state.xa_state = XA_NOTR;
DBUG_RETURN(error_num);
}
@@ -2436,7 +2452,6 @@ int spider_internal_xa_rollback(
table_xa_opened = FALSE;
}
spider_xa_unlock(&trx->internal_xid_state);
- trx->internal_xid_state.xa_state = XA_NOTR;
DBUG_RETURN(0);
error:
@@ -2447,7 +2462,6 @@ error:
error_in_rollback:
error_open_table:
spider_xa_unlock(&trx->internal_xid_state);
- trx->internal_xid_state.xa_state = XA_NOTR;
DBUG_RETURN(error_num);
}
@@ -2616,8 +2630,6 @@ int spider_internal_xa_prepare(
spider_close_sys_table(thd, table_xa, &open_tables_backup, TRUE);
table_xa_opened = FALSE;
}
- if (internal_xa)
- trx->internal_xid_state.xa_state = XA_PREPARED;
DBUG_RETURN(0);
error:
diff --git a/storage/spider/spd_trx.h b/storage/spider/spd_trx.h
index 3f3ca7fabed..8e7822e12e1 100644
--- a/storage/spider/spd_trx.h
+++ b/storage/spider/spd_trx.h
@@ -1,4 +1,5 @@
-/* Copyright (C) 2008-2014 Kentoku Shiba
+/* Copyright (C) 2008-2019 Kentoku Shiba
+ Copyright (C) 2019 MariaDB corp
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -99,18 +100,16 @@ int spider_check_and_set_sql_log_off(
int *need_mon
);
-int spider_check_and_set_time_zone(
+int spider_check_and_set_wait_timeout(
THD *thd,
SPIDER_CONN *conn,
int *need_mon
);
-int spider_xa_lock(
- XID_STATE *xid_state
-);
-
-int spider_xa_unlock(
- XID_STATE *xid_state
+int spider_check_and_set_time_zone(
+ THD *thd,
+ SPIDER_CONN *conn,
+ int *need_mon
);
int spider_start_internal_consistent_snapshot(
diff --git a/storage/tokudb/PerconaFT/portability/toku_crash.cc b/storage/tokudb/PerconaFT/portability/toku_crash.cc
index 0af85342a99..297cc29d9ca 100644
--- a/storage/tokudb/PerconaFT/portability/toku_crash.cc
+++ b/storage/tokudb/PerconaFT/portability/toku_crash.cc
@@ -70,7 +70,7 @@ run_gdb(pid_t parent_pid, const char *gdb_path) {
"-ex", "thread apply all bt",
"-ex", "thread apply all bt full",
exe_buf, pid_buf,
- NULL);
+ (char*) NULL);
}
static void
diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc
index b8f6a732585..c39b8099cd2 100644
--- a/storage/tokudb/ha_tokudb.cc
+++ b/storage/tokudb/ha_tokudb.cc
@@ -6934,7 +6934,7 @@ void ha_tokudb::trace_create_table_info(TABLE* form) {
field->flags);
}
for (i = 0; i < form->s->keys; i++) {
- KEY *key = &form->s->key_info[i];
+ KEY *key = &form->key_info[i];
TOKUDB_HANDLER_TRACE(
"key:%d:%s:%d",
i,
@@ -7062,7 +7062,7 @@ int ha_tokudb::create_secondary_dictionary(
sprintf(dict_name, "key-%s", key_info->name.str);
make_name(newname, newname_len, name, dict_name);
- prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
+ prim_key = (hpk) ? NULL : &form->key_info[primary_key];
//
// setup the row descriptor
@@ -7174,7 +7174,7 @@ int ha_tokudb::create_main_dictionary(
make_name(newname, newname_len, name, "main");
- prim_key = (hpk) ? NULL : &form->s->key_info[primary_key];
+ prim_key = (hpk) ? NULL : &form->key_info[primary_key];
//
// setup the row descriptor
@@ -7439,7 +7439,7 @@ int ha_tokudb::create(
error = write_key_name_to_status(
status_block,
- form->s->key_info[i].name.str,
+ form->key_info[i].name.str,
txn);
if (error) {
goto cleanup;
@@ -7785,13 +7785,18 @@ double ha_tokudb::scan_time() {
DBUG_RETURN(ret_val);
}
+bool ha_tokudb::is_clustering_key(uint index)
+{
+ return index == primary_key || key_is_clustering(&table->key_info[index]);
+}
+
double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
{
TOKUDB_HANDLER_DBUG_ENTER("%u %u %" PRIu64, index, ranges, (uint64_t) rows);
- double ret_val;
- if (index == primary_key || key_is_clustering(&table->key_info[index])) {
- ret_val = read_time(index, ranges, rows);
- DBUG_RETURN(ret_val);
+ double cost;
+ if (index == primary_key || is_clustering_key(index)) {
+ cost = read_time(index, ranges, rows);
+ DBUG_RETURN(cost);
}
/*
It is assumed that we will read trough the whole key range and that all
@@ -7801,11 +7806,8 @@ double ha_tokudb::keyread_time(uint index, uint ranges, ha_rows rows)
blocks read. This model does not take into account clustered indexes -
engines that support that (e.g. InnoDB) may want to overwrite this method.
*/
- double keys_per_block= (stats.block_size/2.0/
- (table->key_info[index].key_length +
- ref_length) + 1);
- ret_val = (rows + keys_per_block - 1)/ keys_per_block;
- TOKUDB_HANDLER_DBUG_RETURN_DOUBLE(ret_val);
+ cost= handler::keyread_time(index, ranges, rows);
+ TOKUDB_HANDLER_DBUG_RETURN_DOUBLE(cost);
}
//
@@ -8167,7 +8169,7 @@ int ha_tokudb::tokudb_add_index(
for (uint i = 0; i < num_of_keys; i++) {
for (uint j = 0; j < table_arg->s->keys; j++) {
if (strcmp(key_info[i].name.str,
- table_arg->s->key_info[j].name.str) == 0) {
+ table_arg->key_info[j].name.str) == 0) {
error = HA_ERR_WRONG_COMMAND;
goto cleanup;
}
diff --git a/storage/tokudb/ha_tokudb.h b/storage/tokudb/ha_tokudb.h
index 75fabbf8849..58121f9ecb1 100644
--- a/storage/tokudb/ha_tokudb.h
+++ b/storage/tokudb/ha_tokudb.h
@@ -871,6 +871,7 @@ public:
bool primary_key_is_clustered() {
return true;
}
+ bool is_clustering_key(uint index);
int cmp_ref(const uchar * ref1, const uchar * ref2);
bool check_if_incompatible_data(HA_CREATE_INFO * info, uint table_changes);
diff --git a/storage/tokudb/ha_tokudb_alter_56.cc b/storage/tokudb/ha_tokudb_alter_56.cc
index 2bcc5dee127..6a0802e2d86 100644
--- a/storage/tokudb/ha_tokudb_alter_56.cc
+++ b/storage/tokudb/ha_tokudb_alter_56.cc
@@ -1606,7 +1606,7 @@ int ha_tokudb::new_row_descriptor(TABLE* altered_table,
} else {
KEY* prim_key =
hidden_primary_key ? NULL :
- &altered_table->s->key_info[primary_key];
+ &altered_table->key_info[primary_key];
if (idx == primary_key) {
row_descriptor->size = create_main_key_descriptor(
(uchar*)row_descriptor->data,
diff --git a/storage/tokudb/ha_tokudb_update.cc b/storage/tokudb/ha_tokudb_update.cc
index bb59d112680..fec0a42e063 100644
--- a/storage/tokudb/ha_tokudb_update.cc
+++ b/storage/tokudb/ha_tokudb_update.cc
@@ -451,7 +451,7 @@ static bool check_all_update_expressions(
static bool full_field_in_key(TABLE* table, Field* field) {
assert_always(table->s->primary_key < table->s->keys);
- KEY* key = &table->s->key_info[table->s->primary_key];
+ KEY* key = &table->key_info[table->s->primary_key];
for (uint i = 0; i < key->user_defined_key_parts; i++) {
KEY_PART_INFO* key_part = &key->key_part[i];
if (strcmp(field->field_name.str, key_part->field->field_name.str) == 0) {
@@ -517,7 +517,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
MY_BITMAP pk_fields;
if (bitmap_init(&pk_fields, NULL, table->s->fields, FALSE)) // 1 -> failure
return false;
- KEY *key = &table->s->key_info[table->s->primary_key];
+ KEY *key = &table->key_info[table->s->primary_key];
for (uint i = 0; i < key->user_defined_key_parts; i++)
bitmap_set_bit(&pk_fields, key->key_part[i].field->field_index);
@@ -555,7 +555,7 @@ static bool check_point_update(Item* conds, TABLE* table) {
static bool clustering_keys_exist(TABLE *table) {
for (uint keynr = 0; keynr < table->s->keys; keynr++) {
if (keynr != table->s->primary_key &&
- key_is_clustering(&table->s->key_info[keynr]))
+ key_is_clustering(&table->key_info[keynr]))
return true;
}
return false;
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug28430.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug28430.result
index c7450a1b9c0..aea5bab7cf3 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug28430.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug28430.result
@@ -51,6 +51,10 @@ DELETE FROM test.regular_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE test.proc_bykey()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -72,6 +76,10 @@ DELETE FROM test.bykey_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CREATE PROCEDURE test.proc_byrange()
BEGIN
DECLARE ins_count INT DEFAULT 1000;
@@ -93,6 +101,10 @@ DELETE FROM test.byrange_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL test.proc_norm();
SELECT count(*) as "Master regular" FROM test.regular_tbl;
Master regular 500
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug30888.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug30888.result
index f3ffc908504..912b05216da 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug30888.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_bug30888.result
@@ -26,6 +26,10 @@ DELETE FROM test.regular_tbl WHERE id = del_count;
SET del_count = del_count - 2;
END WHILE;
END|
+Warnings:
+Level Warning
+Code 1287
+Message '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
CALL test.proc_norm();
connection slave;
connection master;
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
index 65057791b48..df3ea1d6de6 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_mixed_dml.result
@@ -484,77 +484,78 @@ SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;
******************** CREATE USER ********************
CREATE USER 'user_test_rpl'@'localhost' IDENTIFIED BY PASSWORD '*1111111111111111111111111111111111111111';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection master;
******************** GRANT ********************
GRANT SELECT ON *.* TO 'user_test_rpl'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 Y
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 Y
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 Y
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 Y
connection master;
******************** REVOKE ********************
REVOKE SELECT ON *.* FROM 'user_test_rpl'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
-localhost user_test_rpl *1111111111111111111111111111111111111111 N
+Host User Password plugin authentication_string Select_priv
+localhost user_test_rpl *1111111111111111111111111111111111111111 mysql_native_password *1111111111111111111111111111111111111111 N
connection master;
******************** SET PASSWORD ********************
SET PASSWORD FOR 'user_test_rpl'@'localhost' = '*0000000000000000000000000000000000000000';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection master;
******************** RENAME USER ********************
RENAME USER 'user_test_rpl'@'localhost' TO 'user_test_rpl_2'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl_2 *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
localhost user_test_rpl_2 *0000000000000000000000000000000000000000 mysql_native_password *0000000000000000000000000000000000000000 N
connection master;
******************** DROP USER ********************
DROP USER 'user_test_rpl_2'@'localhost';
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
connection slave;
USE test_rpl;
SELECT host, user, password, plugin, authentication_string, select_priv FROM mysql.user WHERE user LIKE 'user_test_rpl%';
-host user password plugin authentication_string select_priv
+Host User Password plugin authentication_string Select_priv
connection master;
INSERT INTO t1 VALUES(100, 'test');
******************** ANALYZE ********************
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test_rpl.t1 analyze status Engine-independent statistics collected
test_rpl.t1 analyze status OK
******************** CHECK TABLE ********************
@@ -679,7 +680,6 @@ DROP TRIGGER tr1;
******************** EVENTS ********************
-GRANT EVENT ON *.* TO 'root'@'localhost';
INSERT INTO t1 VALUES(1, 'test1');
CREATE EVENT e1 ON SCHEDULE EVERY '1' SECOND COMMENT 'e_second_comment' DO DELETE FROM t1;
SHOW EVENTS;
@@ -1101,8 +1101,6 @@ master-bin.000001 # Query # # use `test_rpl`; DELETE FROM t2
master-bin.000001 # Xid # # COMMIT /* XID */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test_rpl`; DROP TRIGGER tr1
-master-bin.000001 # Gtid # # GTID #-#-#
-master-bin.000001 # Query # # use `test_rpl`; GRANT EVENT ON *.* TO 'root'@'localhost'
master-bin.000001 # Gtid # # BEGIN GTID #-#-#
master-bin.000001 # Query # # use `test_rpl`; INSERT INTO t1 VALUES(1, 'test1')
master-bin.000001 # Xid # # COMMIT /* XID */
diff --git a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result
index 593f177569f..e0b6b615bb6 100644
--- a/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result
+++ b/storage/tokudb/mysql-test/rpl/r/rpl_tokudb_read_only_ft.result
@@ -24,6 +24,10 @@ unix_timestamp()-@tstart <= 10
connection slave;
connection master;
include/diff_tables.inc [master:test.t, slave:test.t]
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
+Warnings:
+Warning 1287 '<select expression> INTO <destination>;' is deprecated and will be removed in a future release. Please use 'SELECT <select list> INTO <destination> FROM...' instead
connection master;
drop table if exists t;
connection slave;
diff --git a/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc b/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc
index d637b46e8fc..1914da14bb3 100644
--- a/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc
+++ b/storage/tokudb/mysql-test/tokudb/include/cluster_key.inc
@@ -1,7 +1,10 @@
# test for TokuDB clustering keys.
# test assumes that a table 't1' exists with the following columns:
# a int, b int, c int, d int
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+insert into t1 values
+ (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),
+ (5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),
+ (9,90,900,9000),(10,100,1000,10000),(11,110,1100,11000);
#normal queries
@@ -13,20 +16,20 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 950;
+select * from t1 where c > 950;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select a from t1 where a > 8;
-select a from t1 where a > 8;
+explain select a from t1 where a > 10;
+select a from t1 where a > 10;
# ignore rows column
--replace_column 9 NULL;
@@ -35,8 +38,8 @@ select a,b from t1 where b > 30;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 350;
+select a,c from t1 where c > 350;
alter table t1 add index bdca(b,d,c,a) clustering=yes;
@@ -51,25 +54,25 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 950;
+select * from t1 where c > 950;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select b from t1 where b > 30;
-select b from t1 where b > 30;
+explain select b from t1 where b > 70;
+select b from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select b from t1 where c > 750;
-select c from t1 where c > 750;
+explain select b from t1 where c > 950;
+select c from t1 where c > 950;
alter table t1 add e varchar(20);
@@ -77,25 +80,25 @@ alter table t1 add primary key (a,b,c);
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where a > 5;
-select * from t1 where a > 5;
+explain select * from t1 where a > 8;
+select * from t1 where a > 8;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 950;
+select * from t1 where c > 950;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select a from t1 where a > 8;
-select a from t1 where a > 8;
+explain select a from t1 where a > 10;
+select a from t1 where a > 10;
# ignore rows column
--replace_column 9 NULL;
@@ -104,8 +107,8 @@ select a,b from t1 where b > 30;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 350;
+select a,c from t1 where c > 350;
alter table t1 drop primary key;
@@ -116,23 +119,23 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 950;
+select * from t1 where c > 950;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select b from t1 where b > 30;
-select b from t1 where b > 30;
+explain select b from t1 where b > 70;
+select b from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select b from t1 where c > 750;
-select c from t1 where c > 750;
+explain select b from t1 where c > 950;
+select c from t1 where c > 950;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result b/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result
index 71a39eb1f3e..887ccd5f073 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_add_drop.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, c int, d int, key(a), key(b), key(c));
insert into tt values (0,0,0,0),(1,0,0,0),(2,0,1,0),(3,0,1,0);
show indexes from tt;
@@ -9,6 +11,7 @@ tt 1 b 1 b A 4 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -23,6 +26,7 @@ tt 1 c 1 c A 4 NULL NULL YES BTREE
tt 1 d 1 d A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -35,4 +39,5 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
tt 1 a 1 a A 4 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
tt 1 d 1 d A 2 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_add_index.result b/storage/tokudb/mysql-test/tokudb/r/card_add_index.result
index 9a929b19a80..ab7c6e330f4 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_add_index.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_add_index.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, c int, primary key(a));
insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
show indexes from tt;
@@ -7,6 +9,7 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -18,6 +21,7 @@ tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 1 b 1 b A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -31,6 +35,7 @@ tt 1 b 1 b A 2 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -43,4 +48,5 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 1 b 1 b A 2 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result b/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result
index 2cfdfe11296..f924c8cd12b 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_drop_index.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, c int, key(b), key(c), primary key(a));
insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
show indexes from tt;
@@ -9,6 +11,7 @@ tt 1 b 1 b A 4 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -28,4 +31,5 @@ flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result b/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result
index ed28d2a3226..5980b5795d5 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_drop_index_2.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, c int, primary key(a), key(b), key(c));
insert into tt values (0, 0, 0), (0+1, 0, 0), (0+2, 0, 0), (0+3, 0, 0);
insert into tt values (4, 4, 0), (4+1, 4, 0), (4+2, 4, 0), (4+3, 4, 0);
@@ -136,6 +138,7 @@ tt 1 b 1 b A 500 NULL NULL YES BTREE
tt 1 c 1 c A 500 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -155,4 +158,5 @@ flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 500 NULL NULL BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result b/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result
index 2369d88c274..c560defb1fb 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_drop_pk.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, c int, key(b), key(c), primary key(a));
insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
show indexes from tt;
@@ -9,6 +11,7 @@ tt 1 b 1 b A 4 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -25,4 +28,5 @@ show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 b 1 b A 4 NULL NULL YES BTREE
tt 1 c 1 c A 4 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_no_keys.result b/storage/tokudb/mysql-test/tokudb/r/card_no_keys.result
index 1302cfaf252..a96f3edd4b8 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_no_keys.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_no_keys.result
@@ -6,6 +6,7 @@ show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_pk.result b/storage/tokudb/mysql-test/tokudb/r/card_pk.result
index b0317507f7c..173fb696d69 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_pk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_pk.result
@@ -7,6 +7,7 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result b/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result
index 3c1b652db15..b1c3e679bb8 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_pk_2.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, primary key(a,b));
insert into tt values (0,0),(0,1),(1,0),(1,1);
show indexes from tt;
@@ -8,6 +10,7 @@ tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 0 PRIMARY 2 b A 4 NULL NULL BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -18,4 +21,5 @@ show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4 NULL NULL BTREE
tt 0 PRIMARY 2 b A 4 NULL NULL BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result b/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result
index 02c8d1f8218..2f527cbbed5 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_pk_sk.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, primary key(a), key(b));
insert into tt values (4*0,4*0+1),(4*0+1,4*0+2),(4*0+2,4*0+3),(4*0+3,4*0+4);
insert into tt values (4*1,4*1+1),(4*1+1,4*1+2),(4*1+2,4*1+3),(4*1+3,4*1+4);
@@ -1007,6 +1009,7 @@ tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
tt 1 b 1 b A 4000 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -2025,6 +2028,7 @@ tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
tt 1 b 1 b A 4000 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -2035,4 +2039,5 @@ show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 0 PRIMARY 1 a A 4000 NULL NULL BTREE
tt 1 b 1 b A 2 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result b/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result
index 981433fac91..a452be2006a 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_scale_percent.result
@@ -1,5 +1,6 @@
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
set global tokudb_cardinality_scale_percent = 10;
show indexes from tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_sk.result b/storage/tokudb/mysql-test/tokudb/r/card_sk.result
index 310fc863a9b..1956c846e03 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_sk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_sk.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, key(b));
insert into tt values (1,0),(2,1),(3,2),(4,3);
insert into tt values (5,0),(6,1),(7,2),(8,3);
@@ -8,6 +10,7 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
tt 1 b 1 b A 8 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -16,4 +19,5 @@ flush tables;
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 b 1 b A 8 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result b/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result
index 8ff57b63e5d..f0fcbac0e3a 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_sk_2.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists tt;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table tt (a int, b int, key(a,b));
insert into tt values (0,0),(0,1),(1,0),(1,1);
show indexes from tt;
@@ -8,6 +10,7 @@ tt 1 a 1 a A 4 NULL NULL YES BTREE
tt 1 a 2 b A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -18,4 +21,5 @@ show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
tt 1 a 1 a A 4 NULL NULL YES BTREE
tt 1 a 2 b A 4 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/r/card_unique_sk.result b/storage/tokudb/mysql-test/tokudb/r/card_unique_sk.result
index cbcab7bdc44..22b0733a933 100644
--- a/storage/tokudb/mysql-test/tokudb/r/card_unique_sk.result
+++ b/storage/tokudb/mysql-test/tokudb/r/card_unique_sk.result
@@ -7,6 +7,7 @@ Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_par
tt 0 a 1 a A 4 NULL NULL YES BTREE
analyze table tt;
Table Op Msg_type Msg_text
+test.tt analyze status Engine-independent statistics collected
test.tt analyze status OK
show indexes from tt;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result
index 4c3f971770e..b2749966565 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_2968-2.result
@@ -1096,7 +1096,7 @@ t CREATE TABLE `t` (
explain select straight_join s.a,t.a from s,t where s.b = t.b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE s index b,b_2,b_3 b_2 10 NULL 1000 Using where; Using index
-1 SIMPLE t ref b,b_2,b_3 b_2 5 test.s.b 1 Using index
+1 SIMPLE t ref b,b_2,b_3 b_3 5 test.s.b 1 Using index
alter table s drop key b_2;
alter table t drop key b_2;
show create table s;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_create_table.result b/storage/tokudb/mysql-test/tokudb/r/cluster_create_table.result
index 02a90c66398..bbc886fb329 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_create_table.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_create_table.result
@@ -39,16 +39,20 @@ t1 CREATE TABLE `t1` (
KEY `foo` (`c`,`d`) `clustering`=yes,
KEY `bar` (`d`,`c`,`b`,`a`) `clustering`=yes
) ENGINE=TokuDB DEFAULT CHARSET=latin1
-insert into t1 value (1,1,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4),(32,54,12,56);
+insert into t1 value
+(1,1,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4),(5,5,5,5),(6,6,6,6),
+(32,54,12,56);
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 16 NULL 5 Using index
+1 SIMPLE t1 index NULL PRIMARY 16 NULL 7 Using index
select * from t1;
a b c d
1 1 1 1
2 2 2 2
3 3 3 3
4 4 4 4
+5 5 5 5
+6 6 6 6
32 54 12 56
explain select d from t1 where d > 30;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_delete.result b/storage/tokudb/mysql-test/tokudb/r/cluster_delete.result
index 1fd519debef..f85845232dd 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_delete.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_delete.result
@@ -1,7 +1,10 @@
SET DEFAULT_STORAGE_ENGINE='tokudb';
DROP TABLE IF EXISTS t1;
create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+insert into t1 values
+(1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),
+(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),
+(9,90,900,9000),(10,100,1000,10000),(11,110,1100,11000);
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
@@ -11,48 +14,47 @@ a b c d
7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where b > 30;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d
-4 40 400 4000
-5 50 500 5000
-6 60 600 6000
-7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where c > 750;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where c > 850;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select * from t1 where c > 750;
+1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
+select * from t1 where c > 850;
a b c d
-8 80 800 8000
9 90 900 9000
-explain select a from t1 where a > 8;
+10 100 1000 10000
+11 110 1100 11000
+explain select a from t1 where a > 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
-select a from t1 where a > 8;
+select a from t1 where a > 10;
a
-9
-explain select a,b from t1 where b > 30;
+11
+explain select a,b from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select a,b from t1 where b > 30;
+select a,b from t1 where b > 70;
a b
-4 40
-5 50
-6 60
-7 70
8 80
9 90
-explain select a,b from t1 where c > 750;
+10 100
+11 110
+explain select a,b from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 950;
a c
-8 800
-9 900
+10 1000
+11 1100
delete from t1 where b>30 and b < 60;
select * from t1;
a b c d
@@ -63,15 +65,16 @@ a b c d
7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where a > 5;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where a > 8;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
-select * from t1 where a > 5;
+select * from t1 where a > 8;
a b c d
-6 60 600 6000
-7 70 700 7000
-8 80 800 8000
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
@@ -81,19 +84,25 @@ a b c d
7 70 700 7000
8 80 800 8000
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range c c 5 NULL NULL; Using where
+1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
select * from t1 where c > 750;
a b c d
8 80 800 8000
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select a from t1 where a > 8;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
+1 SIMPLE t1 index PRIMARY c 5 NULL NULL; Using where; Using index
select a from t1 where a > 8;
a
9
+10
+11
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
@@ -103,13 +112,14 @@ a b
7 70
8 80
9 90
-explain select a,b from t1 where c > 750;
+10 100
+11 110
+explain select a,b from t1 where c > 1050;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 1050;
a c
-8 800
-9 900
+11 1100
alter table t1 drop primary key;
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
@@ -120,22 +130,23 @@ a b c d
7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where b > 30;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d
-6 60 600 6000
-7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where c > 750;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where c > 1050;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 1050;
a b c d
-8 80 800 8000
-9 90 900 9000
+11 110 1100 11000
explain select a from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
@@ -145,61 +156,76 @@ a
7
8
9
-explain select a,b from t1 where b > 30;
+10
+11
+explain select a,b from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select a,b from t1 where b > 30;
+select a,b from t1 where b > 70;
a b
-6 60
-7 70
8 80
9 90
-explain select a,b from t1 where c > 750;
+10 100
+11 110
+explain select a,b from t1 where c > 1050;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 1050;
a c
-8 800
-9 900
+11 1100
delete from t1 where b > 10 and b < 90;
select * from t1;
a b c d
1 10 100 1000
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
select * from t1 where a > 5;
a b c d
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
select * from t1 where b > 30;
a b c d
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
select * from t1 where c > 750;
a b c d
9 90 900 9000
+10 100 1000 10000
+11 110 1100 11000
explain select a from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
select a from t1 where a > 5;
a
9
+10
+11
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
select a,b from t1 where b > 30;
a b
9 90
+10 100
+11 110
explain select a,b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
select a,c from t1 where c > 750;
a c
9 900
+10 1000
+11 1100
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_filter_key.result b/storage/tokudb/mysql-test/tokudb/r/cluster_filter_key.result
index a594b104444..aa33246bfeb 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_filter_key.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_filter_key.result
@@ -7,22 +7,19 @@ insert into t1 values (2,"20",200);
insert into t1 values (3,"30",300);
insert into t1 values (4,"40",400);
insert into t1 values (5,"50",500);
-explain select * from t1 where a > 2;
+explain select * from t1 where a > 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 3 Using where
-select * from t1 where a > 2;
+1 SIMPLE t1 range a a 5 NULL 2 Using where
+select * from t1 where a > 3;
a b c
-3 30 300
4 40 400
5 50 500
-select b from t1 where a > 2;
+select b from t1 where a > 3;
b
-30
40
50
-select c from t1 where a > 2;
+select c from t1 where a > 3;
c
-300
400
500
delete from t1 where a <2;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_key.result b/storage/tokudb/mysql-test/tokudb/r/cluster_key.result
index fab288047be..4c2fc08cd48 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_key.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_key.result
@@ -1,7 +1,10 @@
SET DEFAULT_STORAGE_ENGINE='tokudb';
DROP TABLE IF EXISTS t1;
create table t1(a int, b int, c int, d int, primary key(a,b,c), key(b) clustering=yes, key (c))engine=tokudb;
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+insert into t1 values
+(1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),
+(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),
+(9,90,900,9000),(10,100,1000,10000),(11,110,1100,11000);
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
@@ -11,30 +14,30 @@ a b c d
7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where b > 30;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d
-4 40 400 4000
-5 50 500 5000
-6 60 600 6000
-7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where c > 750;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 950;
a b c d
-8 80 800 8000
-9 90 900 9000
-explain select a from t1 where a > 8;
+10 100 1000 10000
+11 110 1100 11000
+explain select a from t1 where a > 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
-select a from t1 where a > 8;
+select a from t1 where a > 10;
a
-9
+11
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b c 4 NULL NULL; Using where; Using index
@@ -46,13 +49,21 @@ a b
7 70
8 80
9 90
-explain select a,b from t1 where c > 750;
+10 100
+11 110
+explain select a,b from t1 where c > 350;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index c c 4 NULL NULL; Using where; Using index
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 350;
a c
+4 400
+5 500
+6 600
+7 700
8 800
9 900
+10 1000
+11 1100
alter table t1 add index bdca(b,d,c,a) clustering=yes;
insert into t1 values (10,10,10,10);
alter table t1 drop index bdca;
@@ -67,79 +78,73 @@ a b c d
8 80 800 8000
9 90 900 9000
10 10 10 10
-explain select * from t1 where b > 30;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d
-4 40 400 4000
-5 50 500 5000
-6 60 600 6000
-7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where c > 750;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 950;
a b c d
-8 80 800 8000
-9 90 900 9000
-explain select b from t1 where b > 30;
+10 100 1000 10000
+11 110 1100 11000
+explain select b from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
-select b from t1 where b > 30;
+select b from t1 where b > 70;
b
-40
-50
-60
-70
80
90
-explain select b from t1 where c > 750;
+100
+110
+explain select b from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL NULL; Using where
-select c from t1 where c > 750;
+select c from t1 where c > 950;
c
-800
-900
+1000
+1100
alter table t1 add e varchar(20);
alter table t1 add primary key (a,b,c);
-explain select * from t1 where a > 5;
+explain select * from t1 where a > 8;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
-select * from t1 where a > 5;
+select * from t1 where a > 8;
a b c d e
-6 60 600 6000 NULL
-7 70 700 7000 NULL
-8 80 800 8000 NULL
9 90 900 9000 NULL
10 10 10 10 NULL
-explain select * from t1 where b > 30;
+10 100 1000 10000 NULL
+11 110 1100 11000 NULL
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d e
-4 40 400 4000 NULL
-5 50 500 5000 NULL
-6 60 600 6000 NULL
-7 70 700 7000 NULL
8 80 800 8000 NULL
9 90 900 9000 NULL
-explain select * from t1 where c > 750;
+10 100 1000 10000 NULL
+11 110 1100 11000 NULL
+explain select * from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 950;
a b c d e
-8 80 800 8000 NULL
-9 90 900 9000 NULL
-explain select a from t1 where a > 8;
+10 100 1000 10000 NULL
+11 110 1100 11000 NULL
+explain select a from t1 where a > 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
-select a from t1 where a > 8;
+select a from t1 where a > 10;
a
-9
-10
+11
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b c 4 NULL NULL; Using where; Using index
@@ -151,13 +156,21 @@ a b
7 70
8 80
9 90
-explain select a,b from t1 where c > 750;
+10 100
+11 110
+explain select a,b from t1 where c > 350;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index c c 4 NULL NULL; Using where; Using index
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 350;
a c
+4 400
+5 500
+6 600
+7 700
8 800
9 900
+10 1000
+11 1100
alter table t1 drop primary key;
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
@@ -169,40 +182,38 @@ a b c d e
8 80 800 8000 NULL
9 90 900 9000 NULL
10 10 10 10 NULL
-explain select * from t1 where b > 30;
+10 100 1000 10000 NULL
+11 110 1100 11000 NULL
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d e
-4 40 400 4000 NULL
-5 50 500 5000 NULL
-6 60 600 6000 NULL
-7 70 700 7000 NULL
8 80 800 8000 NULL
9 90 900 9000 NULL
-explain select * from t1 where c > 750;
+10 100 1000 10000 NULL
+11 110 1100 11000 NULL
+explain select * from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 950;
a b c d e
-8 80 800 8000 NULL
-9 90 900 9000 NULL
-explain select b from t1 where b > 30;
+10 100 1000 10000 NULL
+11 110 1100 11000 NULL
+explain select b from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 4 NULL NULL; Using where; Using index
-select b from t1 where b > 30;
+select b from t1 where b > 70;
b
-40
-50
-60
-70
80
90
-explain select b from t1 where c > 750;
+100
+110
+explain select b from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 4 NULL NULL; Using where
-select c from t1 where c > 750;
+select c from t1 where c > 950;
c
-800
-900
+1000
+1100
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_query_plan.result b/storage/tokudb/mysql-test/tokudb/r/cluster_query_plan.result
index c6754db3981..6b458b36585 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_query_plan.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_query_plan.result
@@ -2,16 +2,16 @@ SET DEFAULT_STORAGE_ENGINE='tokudb';
DROP TABLE IF EXISTS t1;
create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
insert into t1 values (1,1,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4),(5,5,5,5),(6,6,6,6),(7,7,7,7),(8,8,8,8),(9,9,9,9);
-explain select * from t1 where b > 2;
+explain select * from t1 where b > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-explain select * from t1 where c > 2;
+explain select * from t1 where c > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
-explain select * from t1 where a > 4;
+explain select * from t1 where a > 7;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
-explain select * from t1 where c > 7;
+explain select * from t1 where c > 8;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
explain select * from t1 where b > 7;
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_tokudb_bug_993_2.result b/storage/tokudb/mysql-test/tokudb/r/cluster_tokudb_bug_993_2.result
index 41abded2857..2dcb65cee10 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_tokudb_bug_993_2.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_tokudb_bug_993_2.result
@@ -32,13 +32,13 @@ max(a)
7
explain select a,b from z1 where a < 7;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE z1 range a a 5 NULL 12 Using where; Using index
+1 SIMPLE z1 index a a 10 NULL 14 Using where; Using index
select max(a) from z1 where a < 7;
max(a)
3
explain select a,b from z1 where a < 3;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE z1 range a a 5 NULL 9 Using where; Using index
+1 SIMPLE z1 index a a 10 NULL 14 Using where; Using index
select max(a) from z1 where a < 3;
max(a)
1
diff --git a/storage/tokudb/mysql-test/tokudb/r/cluster_update.result b/storage/tokudb/mysql-test/tokudb/r/cluster_update.result
index 14ab9a27dc4..586cf2e23d9 100644
--- a/storage/tokudb/mysql-test/tokudb/r/cluster_update.result
+++ b/storage/tokudb/mysql-test/tokudb/r/cluster_update.result
@@ -1,7 +1,10 @@
SET DEFAULT_STORAGE_ENGINE='tokudb';
DROP TABLE IF EXISTS t1;
create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+insert into t1 values
+(1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),
+(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),
+(9,90,900,9000),(10,100,1000,10000),(11,110,1100,11000);
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where
@@ -11,48 +14,48 @@ a b c d
7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where b > 30;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d
-4 40 400 4000
-5 50 500 5000
-6 60 600 6000
-7 70 700 7000
8 80 800 8000
9 90 900 9000
-explain select * from t1 where c > 750;
+10 100 1000 10000
+11 110 1100 11000
+explain select * from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 950;
a b c d
-8 80 800 8000
-9 90 900 9000
-explain select a from t1 where a > 8;
+10 100 1000 10000
+11 110 1100 11000
+explain select a from t1 where a > 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
select a from t1 where a > 8;
a
9
-explain select a,b from t1 where b > 30;
+10
+11
+explain select a,b from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select a,b from t1 where b > 30;
+select a,b from t1 where b > 70;
a b
-4 40
-5 50
-6 60
-7 70
8 80
9 90
-explain select a,b from t1 where c > 750;
+10 100
+11 110
+explain select a,b from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 950;
a c
-8 800
-9 900
+10 1000
+11 1100
update t1 set c = c+5, b = b+5 where b>30;
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
@@ -63,6 +66,8 @@ a b c d
7 75 705 7000
8 85 805 8000
9 95 905 9000
+10 105 1005 10000
+11 115 1105 11000
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
@@ -74,6 +79,8 @@ a b c d
7 75 705 7000
8 85 805 8000
9 95 905 9000
+10 105 1005 10000
+11 115 1105 11000
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
@@ -81,12 +88,14 @@ select * from t1 where c > 750;
a b c d
8 85 805 8000
9 95 905 9000
-explain select a from t1 where a > 8;
+10 105 1005 10000
+11 115 1105 11000
+explain select a from t1 where a > 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL NULL; Using where; Using index
-select a from t1 where a > 8;
+select a from t1 where a > 10;
a
-9
+11
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 NA b b 5 NULL NULL; Using where; Using index
@@ -98,6 +107,8 @@ a b
7 75
8 85
9 95
+10 105
+11 115
explain select a,b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
@@ -105,6 +116,8 @@ select a,c from t1 where c > 750;
a c
8 805
9 905
+10 1005
+11 1105
alter table t1 drop primary key;
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
@@ -115,24 +128,25 @@ a b c d
7 75 705 7000
8 85 805 8000
9 95 905 9000
-explain select * from t1 where b > 30;
+10 105 1005 10000
+11 115 1105 11000
+explain select * from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select * from t1 where b > 30;
+select * from t1 where b > 70;
a b c d
-4 45 405 4000
-5 55 505 5000
-6 65 605 6000
7 75 705 7000
8 85 805 8000
9 95 905 9000
-explain select * from t1 where c > 750;
+10 105 1005 10000
+11 115 1105 11000
+explain select * from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select * from t1 where c > 750;
+select * from t1 where c > 950;
a b c d
-8 85 805 8000
-9 95 905 9000
+10 105 1005 10000
+11 115 1105 11000
explain select a from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
@@ -142,24 +156,25 @@ a
7
8
9
-explain select a,b from t1 where b > 30;
+10
+11
+explain select a,b from t1 where b > 70;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range b b 5 NULL NULL; Using where; Using index
-select a,b from t1 where b > 30;
+select a,b from t1 where b > 70;
a b
-4 45
-5 55
-6 65
7 75
8 85
9 95
-explain select a,b from t1 where c > 750;
+10 105
+11 115
+explain select a,b from t1 where c > 950;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range c c 5 NULL NULL; Using where
-select a,c from t1 where c > 750;
+select a,c from t1 where c > 950;
a c
-8 805
-9 905
+10 1005
+11 1105
update t1 set c = c+5, b = b+5 where b>30;
select * from t1;
a b c d
@@ -172,6 +187,8 @@ a b c d
7 80 710 7000
8 90 810 8000
9 100 910 9000
+10 110 1010 10000
+11 120 1110 11000
explain select * from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
@@ -181,6 +198,8 @@ a b c d
7 80 710 7000
8 90 810 8000
9 100 910 9000
+10 110 1010 10000
+11 120 1110 11000
explain select * from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
@@ -192,6 +211,8 @@ a b c d
7 80 710 7000
8 90 810 8000
9 100 910 9000
+10 110 1010 10000
+11 120 1110 11000
explain select * from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
@@ -199,6 +220,8 @@ select * from t1 where c > 750;
a b c d
8 90 810 8000
9 100 910 9000
+10 110 1010 10000
+11 120 1110 11000
explain select a from t1 where a > 5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL NULL; Using where
@@ -208,6 +231,8 @@ a
7
8
9
+10
+11
explain select a,b from t1 where b > 30;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index b b 5 NULL NULL; Using where; Using index
@@ -219,6 +244,8 @@ a b
7 80
8 90
9 100
+10 110
+11 120
explain select a,b from t1 where c > 750;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL NULL; Using where
@@ -226,4 +253,6 @@ select a,c from t1 where c > 750;
a c
8 810
9 910
+10 1010
+11 1110
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/compressions.result b/storage/tokudb/mysql-test/tokudb/r/compressions.result
index 435b34b6af3..a65d431d797 100644
--- a/storage/tokudb/mysql-test/tokudb/r/compressions.result
+++ b/storage/tokudb/mysql-test/tokudb/r/compressions.result
@@ -8,4 +8,5 @@ FOUND 1 /compression_method=7/ in dump
FOUND 1 /compression_method=9/ in dump
FOUND 1 /compression_method=10/ in dump
FOUND 1 /compression_method=11/ in dump
+# restart
DROP TABLE t1, t2, t3, t4, t5;
diff --git a/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
index 992f380591f..f73a9e43e4a 100644
--- a/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
+++ b/storage/tokudb/mysql-test/tokudb/r/dir_per_db_rename_to_nonexisting_schema.result
@@ -21,6 +21,7 @@ CALL mtr.add_suppression("because destination db does not exist");
ALTER TABLE test.t1 RENAME foo.t1;
ERROR HY000: Error on rename of './test/t1' to './foo/t1' (errno: 2 "No such file or directory")
DROP TABLE t1;
+# restart
SELECT @@tokudb_data_dir;
@@tokudb_data_dir
NULL
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result
index a80e1664663..b24c2fa9ce2 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_innodb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=innodb;
insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4);
explain select x,id from t force index (x) where x=0 and id=0;
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result
index 96d681407fe..8b7f4d8357c 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_1_tokudb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
create table t (id int not null, x int not null, y int not null, primary key(id), key(x)) engine=tokudb;
insert into t values (0,0,0),(1,1,1),(2,2,2),(3,2,3),(4,2,4);
explain select x,id from t force index (x) where x=0 and id=0;
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result
index 43737c7753e..8e95644cdb7 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_innodb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=innodb;
insert into t values (0,0,0,0),(0,1,0,1);
explain select c,a,b from t where c=0 and a=0 and b=1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result
index 1dcb1ee1b8b..740e78f3510 100644
--- a/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb/r/ext_key_2_tokudb.result
@@ -1,7 +1,7 @@
drop table if exists t;
select @@optimizer_switch;
@@optimizer_switch
-index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on
+index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=on,table_elimination=on,extended_keys=on,exists_to_in=on,orderby_uses_equalities=on,condition_pushdown_for_derived=on,split_materialized=on,condition_pushdown_for_subquery=on,rowid_filter=on,condition_pushdown_from_having=on
create table t (a int not null, b int not null, c int not null, d int not null, primary key(a,b), key(c,a)) engine=tokudb;
insert into t values (0,0,0,0),(0,1,0,1);
explain select c,a,b from t where c=0 and a=0 and b=1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/mvcc-29.result b/storage/tokudb/mysql-test/tokudb/r/mvcc-29.result
index 994b906e2a2..f741dca5e3b 100644
--- a/storage/tokudb/mysql-test/tokudb/r/mvcc-29.result
+++ b/storage/tokudb/mysql-test/tokudb/r/mvcc-29.result
@@ -31,7 +31,7 @@ delete from foo where a > 5;
# number of rows should be 9
explain select * from foo where a > 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 9 Using where
+1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 5 Using where
# should have just 4 values
select * from foo where a > 1;
a b
@@ -43,7 +43,7 @@ connection conn1;
# number of rows should be 9
explain select * from foo where a > 1;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 9 Using where
+1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 5 Using where
# 9 values
select * From foo where a > 1;
a b
diff --git a/storage/tokudb/mysql-test/tokudb/r/mvcc-30.result b/storage/tokudb/mysql-test/tokudb/r/mvcc-30.result
index 6bf54efd0e9..37701efd366 100644
--- a/storage/tokudb/mysql-test/tokudb/r/mvcc-30.result
+++ b/storage/tokudb/mysql-test/tokudb/r/mvcc-30.result
@@ -31,7 +31,7 @@ delete from foo where a < 10;
# number of rows should be 9
explain select * from foo where a < 50;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 9 Using where
+1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 5 Using where
# should have just 4 values
select * from foo where a < 50;
a b
@@ -43,7 +43,7 @@ connection conn1;
# number of rows should be 9
explain select * from foo where a < 50;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 9 Using where
+1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 5 Using where
# 9 values
select * From foo where a < 50;
a b
diff --git a/storage/tokudb/mysql-test/tokudb/r/mvcc-31.result b/storage/tokudb/mysql-test/tokudb/r/mvcc-31.result
index cc2bb45a39c..a4043482397 100644
--- a/storage/tokudb/mysql-test/tokudb/r/mvcc-31.result
+++ b/storage/tokudb/mysql-test/tokudb/r/mvcc-31.result
@@ -31,7 +31,7 @@ delete from foo where a = 2 or a = 4 or a = 10 or a = 30 or a = 50;
# number of rows should be 8
explain select * from foo where a > 1 and a < 50;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 8 Using where
+1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 5 Using where
# should have just 4 values
select * from foo where a > 1 and a < 50;
a b
@@ -43,7 +43,7 @@ connection conn1;
# number of rows should be 8
explain select * from foo where a > 1 and a < 50;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 8 Using where
+1 SIMPLE foo range PRIMARY PRIMARY 4 NULL 5 Using where
# 8 values
select * from foo where a > 1 and a < 50;
a b
diff --git a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
index 024580d4258..fd584a3ca0f 100644
--- a/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
+++ b/storage/tokudb/mysql-test/tokudb/r/tokudb_mrr.result
@@ -305,25 +305,25 @@ dummy INT PRIMARY KEY,
a INT UNIQUE,
b INT
) ENGINE=TokuDB;
-INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5),(7,7,7),(8,8,8),(9,9,9);
COMMIT;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT @@tx_isolation;
@@tx_isolation
REPEATABLE-READ
START TRANSACTION;
-EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+EXPLAIN SELECT * FROM t1 WHERE a >= 8 FOR UPDATE;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range a a 5 NULL 2 Using where
-SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+SELECT * FROM t1 WHERE a >= 8 FOR UPDATE;
dummy a b
-3 3 3
-5 5 5
+8 8 8
+9 9 9
connection con2;
SET AUTOCOMMIT=0;
SET TOKUDB_LOCK_TIMEOUT=2;
START TRANSACTION;
-INSERT INTO t1 VALUES (2,2,2);
+INSERT INTO t1 VALUES (8,8,8);
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
ROLLBACK;
connection con1;
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_bit.result b/storage/tokudb/mysql-test/tokudb/r/type_bit.result
index c147c203d43..76a032d99c4 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_bit.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_bit.result
@@ -759,7 +759,7 @@ CREATE TABLE t1 (a BIT(7), b BIT(9), KEY(a, b));
INSERT INTO t1 VALUES(0, 0), (5, 3), (5, 6), (6, 4), (7, 0);
EXPLAIN SELECT a+0, b+0 FROM t1 WHERE a > 4 and b < 7 ORDER BY 2;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 2 NULL 4 Using where; Using index; Using filesort
+1 SIMPLE t1 index a a 5 NULL 5 Using where; Using index; Using filesort
DROP TABLE t1;
End of 5.0 tests
create table t1(a bit(7));
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_blob.result b/storage/tokudb/mysql-test/tokudb/r/type_blob.result
index 1350bc03045..85f9d343e04 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_blob.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_blob.result
@@ -357,8 +357,6 @@ HELLO MY 1
a 1
hello 1
drop table t1;
-create table t1 (a text, unique (a(21000)));
-ERROR 42000: Specified key was too long; max key length is 3072 bytes
create table t1 (a text, key (a(2100)));
show create table t1;
Table Create Table
@@ -667,14 +665,14 @@ id txt
alter table t1 modify column txt blob;
explain select * from t1 where txt='Chevy' or txt is NULL;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL txt_index NULL NULL NULL 4 Using where
+1 SIMPLE t1 ref_or_null txt_index txt_index 23 const 2 Using where
select * from t1 where txt='Chevy' or txt is NULL;
id txt
1 Chevy
3 NULL
explain select * from t1 where txt='Chevy' or txt is NULL order by txt;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL txt_index NULL NULL NULL 4 Using where; Using filesort
+1 SIMPLE t1 ref_or_null txt_index txt_index 23 const 2 Using where; Using filesort
select * from t1 where txt='Chevy' or txt is NULL order by txt;
id txt
3 NULL
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_datetime.result b/storage/tokudb/mysql-test/tokudb/r/type_datetime.result
index ed980f8cee1..dbe93b37271 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_datetime.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_datetime.result
@@ -363,7 +363,7 @@ greatest(cast('01-01-01' as date), '01-01-02') + 0
20010102
select least(cast('01-01-01' as datetime), '01-01-02') + 0;
least(cast('01-01-01' as datetime), '01-01-02') + 0
-20010101000000.000000
+20010101000000
select cast(least(cast('01-01-01' as datetime), '01-01-02') as signed);
cast(least(cast('01-01-01' as datetime), '01-01-02') as signed)
20010101000000
@@ -401,7 +401,7 @@ if(@bug28261 = f1, '', @bug28261:= f1)
2001-01-01
2002-02-02
Warnings:
-Warning 1292 Incorrect datetime value: ''
+Warning 1292 Truncated incorrect datetime value: ''
select if(@bug28261 = f1, '', @bug28261:= f1) from t1;
if(@bug28261 = f1, '', @bug28261:= f1)
2001-01-01
@@ -425,11 +425,11 @@ f1
2001-01-01 00:00:00
2002-02-02 00:00:00
Warnings:
-Warning 1292 Incorrect datetime value: '2002010'
+Warning 1292 Truncated incorrect datetime value: '2002010'
select * from t1 where f1 between 20020101 and 2007010100000;
f1
Warnings:
-Warning 1292 Incorrect datetime value: '2007010100000'
+Warning 1292 Truncated incorrect datetime value: '2007010100000'
drop table t1;
#
# Bug#27216: functions with parameters of different date types may
@@ -500,7 +500,7 @@ f1
45:44:44
15:44:44
Warnings:
-Warning 1292 Incorrect datetime value: '1'
+Warning 1292 Truncated incorrect datetime value: '1'
drop table t1;
create table t1 (a tinyint);
insert into t1 values (), (), ();
diff --git a/storage/tokudb/mysql-test/tokudb/r/type_varchar.result b/storage/tokudb/mysql-test/tokudb/r/type_varchar.result
index bf98e12ce16..881a4cd66ac 100644
--- a/storage/tokudb/mysql-test/tokudb/r/type_varchar.result
+++ b/storage/tokudb/mysql-test/tokudb/r/type_varchar.result
@@ -13,7 +13,7 @@ t1 CREATE TABLE `t1` (
show create table vchar;
Table Create Table
vchar CREATE TABLE `vchar` (
- `v` varchar(30) DEFAULT NULL,
+ `v` varchar(30)/*old*/ DEFAULT NULL,
`c` char(3) DEFAULT NULL,
`e` enum('abc','def','ghi') DEFAULT NULL,
`t` text DEFAULT NULL
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_add_drop.test b/storage/tokudb/mysql-test/tokudb/t/card_add_drop.test
index 8563bf28ad1..66b26f20123 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_add_drop.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_add_drop.test
@@ -7,6 +7,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, c int, d int, key(a), key(b), key(c));
insert into tt values (0,0,0,0),(1,0,0,0),(2,0,1,0),(3,0,1,0);
@@ -24,5 +27,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_add_index.test b/storage/tokudb/mysql-test/tokudb/t/card_add_index.test
index 455dae55236..54f29fcf289 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_add_index.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_add_index.test
@@ -7,6 +7,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, c int, primary key(a));
insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
@@ -30,5 +33,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_drop_index.test b/storage/tokudb/mysql-test/tokudb/t/card_drop_index.test
index b8e7d575dbd..a300f035519 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_drop_index.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_drop_index.test
@@ -7,6 +7,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, c int, key(b), key(c), primary key(a));
insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
@@ -26,5 +29,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_drop_index_2.test b/storage/tokudb/mysql-test/tokudb/t/card_drop_index_2.test
index 88e0f6a413f..9e8b412760c 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_drop_index_2.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_drop_index_2.test
@@ -7,6 +7,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, c int, primary key(a), key(b), key(c));
let $a=0;
while ($a < 500) {
@@ -32,4 +35,5 @@ show indexes from tt;
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_drop_pk.test b/storage/tokudb/mysql-test/tokudb/t/card_drop_pk.test
index 365d920aa80..8de6a1dc7f2 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_drop_pk.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_drop_pk.test
@@ -7,6 +7,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, c int, key(b), key(c), primary key(a));
insert into tt values (1,0,0),(2,0,0),(3,0,1),(4,0,1);
@@ -22,5 +25,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_pk_2.test b/storage/tokudb/mysql-test/tokudb/t/card_pk_2.test
index 826714931aa..b24ff0f26f0 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_pk_2.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_pk_2.test
@@ -5,6 +5,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, primary key(a,b));
insert into tt values (0,0),(0,1),(1,0),(1,1);
@@ -16,5 +19,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_pk_sk.test b/storage/tokudb/mysql-test/tokudb/t/card_pk_sk.test
index f1e11fbbc2a..7e66c677930 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_pk_sk.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_pk_sk.test
@@ -5,6 +5,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, primary key(a), key(b));
let $i=0;
while ($i < 1000) {
@@ -40,6 +43,7 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test b/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test
index 75c53611308..ded6401c09d 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_scale_percent.test
@@ -6,6 +6,9 @@ set @orig_throttle = @@session.tokudb_analyze_throttle;
set @orig_time = @@session.tokudb_analyze_time;
set @orig_scale_percent = @@global.tokudb_cardinality_scale_percent;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, c int, d int, primary key(a), key(b), key(c), key(d)) engine=tokudb;
let $i=0;
while ($i < 1000) {
@@ -50,5 +53,6 @@ drop table tt;
set session tokudb_analyze_throttle = @orig_throttle;
set session tokudb_analyze_time = @orig_time;
set global tokudb_cardinality_scale_percent = @orig_scale_percent;
+set @@use_stat_tables = @save_use_stat_tables;
-- enable_query_log
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_sk.test b/storage/tokudb/mysql-test/tokudb/t/card_sk.test
index cf50b8b167b..12c9ddbb811 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_sk.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_sk.test
@@ -5,6 +5,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, key(b));
insert into tt values (1,0),(2,1),(3,2),(4,3);
insert into tt values (5,0),(6,1),(7,2),(8,3);
@@ -17,5 +20,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/card_sk_2.test b/storage/tokudb/mysql-test/tokudb/t/card_sk_2.test
index 2cd4ece972e..f29b938105f 100644
--- a/storage/tokudb/mysql-test/tokudb/t/card_sk_2.test
+++ b/storage/tokudb/mysql-test/tokudb/t/card_sk_2.test
@@ -5,6 +5,9 @@ disable_warnings;
drop table if exists tt;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table tt (a int, b int, key(a,b));
insert into tt values (0,0),(0,1),(1,0),(1,1);
@@ -16,5 +19,6 @@ show indexes from tt;
# test that cardinality is persistent
flush tables;
show indexes from tt;
+set @@use_stat_tables = @save_use_stat_tables;
drop table tt;
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_create_table.test b/storage/tokudb/mysql-test/tokudb/t/cluster_create_table.test
index c2196bf681e..0cace80e092 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_create_table.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_create_table.test
@@ -22,7 +22,9 @@ alter table t1 drop primary key;
alter table t1 add primary key (a,b,c,d);
alter table t1 add key bar(d,c,b,a) clustering=yes;
show create table t1;
-insert into t1 value (1,1,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4),(32,54,12,56);
+insert into t1 value
+ (1,1,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4),(5,5,5,5),(6,6,6,6),
+ (32,54,12,56);
explain select * from t1;
select * from t1;
explain select d from t1 where d > 30;
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_delete.test b/storage/tokudb/mysql-test/tokudb/t/cluster_delete.test
index 1c0ebad94e4..cb490920259 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_delete.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_delete.test
@@ -7,8 +7,10 @@ DROP TABLE IF EXISTS t1;
create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
-
+insert into t1 values
+ (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),
+ (5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),
+ (9,90,900,9000),(10,100,1000,10000),(11,110,1100,11000);
#normal queries
@@ -19,38 +21,38 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 850;
+select * from t1 where c > 850;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select a from t1 where a > 8;
-select a from t1 where a > 8;
+explain select a from t1 where a > 10;
+select a from t1 where a > 10;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where b > 30;
-select a,b from t1 where b > 30;
+explain select a,b from t1 where b > 70;
+select a,b from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 950;
+select a,c from t1 where c > 950;
delete from t1 where b>30 and b < 60;
select * from t1;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where a > 5;
-select * from t1 where a > 5;
+explain select * from t1 where a > 8;
+select * from t1 where a > 8;
# ignore rows column
--replace_column 9 NULL;
@@ -76,8 +78,8 @@ select a,b from t1 where b > 30;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 1050;
+select a,c from t1 where c > 1050;
alter table t1 drop primary key;
@@ -88,13 +90,13 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 1050;
+select * from t1 where c > 1050;
#covering indexes
@@ -105,13 +107,13 @@ select a from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where b > 30;
-select a,b from t1 where b > 30;
+explain select a,b from t1 where b > 70;
+select a,b from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 1050;
+select a,c from t1 where c > 1050;
delete from t1 where b > 10 and b < 90;
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_filter_key.test b/storage/tokudb/mysql-test/tokudb/t/cluster_filter_key.test
index 192e56f10a1..dc788d531d6 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_filter_key.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_filter_key.test
@@ -13,10 +13,10 @@ insert into t1 values (3,"30",300);
insert into t1 values (4,"40",400);
insert into t1 values (5,"50",500);
-explain select * from t1 where a > 2;
-select * from t1 where a > 2;
-select b from t1 where a > 2;
-select c from t1 where a > 2;
+explain select * from t1 where a > 3;
+select * from t1 where a > 3;
+select b from t1 where a > 3;
+select c from t1 where a > 3;
#explain delete from t1 where a <2;
delete from t1 where a <2;
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_query_plan.test b/storage/tokudb/mysql-test/tokudb/t/cluster_query_plan.test
index a438653958a..23207f277b2 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_query_plan.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_query_plan.test
@@ -11,18 +11,18 @@ insert into t1 values (1,1,1,1),(2,2,2,2),(3,3,3,3),(4,4,4,4),(5,5,5,5),(6,6,6,6
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 2;
+explain select * from t1 where b > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 2;
+explain select * from t1 where c > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where a > 4;
+explain select * from t1 where a > 7;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 7;
+explain select * from t1 where c > 8;
# ignore rows column
--replace_column 9 NULL;
explain select * from t1 where b > 7;
diff --git a/storage/tokudb/mysql-test/tokudb/t/cluster_update.test b/storage/tokudb/mysql-test/tokudb/t/cluster_update.test
index ce5a0254372..42a004f1f03 100644
--- a/storage/tokudb/mysql-test/tokudb/t/cluster_update.test
+++ b/storage/tokudb/mysql-test/tokudb/t/cluster_update.test
@@ -7,7 +7,10 @@ DROP TABLE IF EXISTS t1;
create table t1(a int, b int, c int, d int, primary key(a), key(b) clustering=yes, key (c))engine=tokudb;
-insert into t1 values (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),(5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),(9,90,900,9000);
+insert into t1 values
+ (1,10,100,1000),(2,20,200,2000),(3,30,300,3000),(4,40,400,4000),
+ (5,50,500,5000),(6,60,600,6000),(7,70,700,7000),(8,80,800,8000),
+ (9,90,900,9000),(10,100,1000,10000),(11,110,1100,11000);
#normal queries
@@ -19,30 +22,30 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 950;
+select * from t1 where c > 950;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select a from t1 where a > 8;
+explain select a from t1 where a > 10;
select a from t1 where a > 8;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where b > 30;
-select a,b from t1 where b > 30;
+explain select a,b from t1 where b > 70;
+select a,b from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 950;
+select a,c from t1 where c > 950;
update t1 set c = c+5, b = b+5 where b>30;
@@ -64,8 +67,8 @@ select * from t1 where c > 750;
#covering indexes
# ignore rows column
--replace_column 9 NULL;
-explain select a from t1 where a > 8;
-select a from t1 where a > 8;
+explain select a from t1 where a > 10;
+select a from t1 where a > 10;
# ignore rows column
--replace_column 4 NA 9 NULL;
@@ -86,13 +89,13 @@ select * from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where b > 30;
-select * from t1 where b > 30;
+explain select * from t1 where b > 70;
+select * from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select * from t1 where c > 750;
-select * from t1 where c > 750;
+explain select * from t1 where c > 950;
+select * from t1 where c > 950;
#covering indexes
# ignore rows column
@@ -102,13 +105,13 @@ select a from t1 where a > 5;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where b > 30;
-select a,b from t1 where b > 30;
+explain select a,b from t1 where b > 70;
+select a,b from t1 where b > 70;
# ignore rows column
--replace_column 9 NULL;
-explain select a,b from t1 where c > 750;
-select a,c from t1 where c > 750;
+explain select a,b from t1 where c > 950;
+select a,c from t1 where c > 950;
update t1 set c = c+5, b = b+5 where b>30;
select * from t1;
diff --git a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
index 6130933b279..dcee5940907 100644
--- a/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
+++ b/storage/tokudb/mysql-test/tokudb/t/tokudb_mrr.test
@@ -40,16 +40,16 @@ CREATE TABLE t1 (
b INT
) ENGINE=TokuDB;
-INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5);
+INSERT INTO t1 VALUES (1,1,1),(3,3,3),(5,5,5),(7,7,7),(8,8,8),(9,9,9);
COMMIT;
SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ;
SELECT @@tx_isolation;
START TRANSACTION;
-EXPLAIN SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+EXPLAIN SELECT * FROM t1 WHERE a >= 8 FOR UPDATE;
-SELECT * FROM t1 WHERE a >= 2 FOR UPDATE;
+SELECT * FROM t1 WHERE a >= 8 FOR UPDATE;
connection con2;
@@ -58,7 +58,7 @@ SET TOKUDB_LOCK_TIMEOUT=2;
START TRANSACTION;
--error ER_LOCK_WAIT_TIMEOUT
-INSERT INTO t1 VALUES (2,2,2);
+INSERT INTO t1 VALUES (8,8,8);
ROLLBACK;
connection con1;
diff --git a/storage/tokudb/mysql-test/tokudb/t/type_blob.test b/storage/tokudb/mysql-test/tokudb/t/type_blob.test
index 6a429c46a55..7cf77e386c7 100644
--- a/storage/tokudb/mysql-test/tokudb/t/type_blob.test
+++ b/storage/tokudb/mysql-test/tokudb/t/type_blob.test
@@ -133,8 +133,6 @@ select c,count(*) from t1 group by c;
select d,count(*) from t1 group by d;
drop table t1;
--- error 1071
-create table t1 (a text, unique (a(21000))); # should give an error
create table t1 (a text, key (a(2100))); # key is auto-truncated
replace_regex /ENGINE=[a-zA-Z]*/ENGINE=ENGINE/;
show create table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_alter_table/r/hcad_with_lock_sps.result b/storage/tokudb/mysql-test/tokudb_alter_table/r/hcad_with_lock_sps.result
index 88f28362119..80402df665d 100644
--- a/storage/tokudb/mysql-test/tokudb_alter_table/r/hcad_with_lock_sps.result
+++ b/storage/tokudb/mysql-test/tokudb_alter_table/r/hcad_with_lock_sps.result
@@ -20,7 +20,7 @@ z a b c
999 4 40 400
explain select * from foo where b > 20;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range b b 5 NULL 2 Using where; Using index
+1 SIMPLE foo index b b 5 NULL 4 Using where; Using index
select* from foo where b > 10;
z a b c
999 2 20 200
@@ -40,7 +40,7 @@ a b c
4 40 400
explain select * from foo where b > 20;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range b b 5 NULL 2 Using where; Using index
+1 SIMPLE foo index b b 5 NULL 4 Using where; Using index
select* from foo where b > 10;
a b c
2 20 200
@@ -59,7 +59,7 @@ a b c z
4 40 400 NULL
explain select * from foo where b > 20;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range b b 5 NULL 2 Using where; Using index
+1 SIMPLE foo index b b 5 NULL 4 Using where; Using index
select* from foo where b > 10;
a b c z
2 20 200 NULL
@@ -94,7 +94,7 @@ a b c
3 30 300
4 40 400
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range b b 5 NULL 2 Using where; Using index
+1 SIMPLE foo index b b 5 NULL 4 Using where; Using index
a b c
2 20 200
3 30 300
@@ -110,7 +110,7 @@ a b c
4 40 400
explain select * from foo where b > 20;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range b b 5 NULL 2 Using where; Using index
+1 SIMPLE foo index b b 5 NULL 4 Using where; Using index
select* from foo where b > 10;
a b c
2 20 200
@@ -154,7 +154,7 @@ a b c g
4 40 400 NULL
5 50 500 NULL
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range b b 5 NULL 3 Using where; Using index
+1 SIMPLE foo index b b 5 NULL 5 Using where; Using index
set autocommit=on;
explain select * from foo;
id select_type table type possible_keys key key_len ref rows Extra
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/2970.result b/storage/tokudb/mysql-test/tokudb_bugs/r/2970.result
index 83ba8821f27..c322865b09d 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/2970.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/2970.result
@@ -6,5 +6,5 @@ create table t2970 (a int, b int, c int, d int, key(a), key(a,b));
insert into t2970 values (1,1,1,1),(1,2,3,4);
explain select a,count(b),max(b) from t2970 where a > 0 group by a order by a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t2970 index a,a_2 a_2 10 NULL 2 Using where; Using index
+1 SIMPLE t2970 range a,a_2 a_2 5 NULL 2 Using where; Using index
drop table t2970;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/5733_innodb.result b/storage/tokudb/mysql-test/tokudb_bugs/r/5733_innodb.result
index 10cdb4767f6..ba21899ed92 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/5733_innodb.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/5733_innodb.result
@@ -10003,25 +10003,25 @@ insert into t values (9998,0);
insert into t values (9999,0);
explain select id from t where id>0 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t where id>0 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
explain select id from t where id>1000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t where id>1000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
explain select id from t where id>5000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t where id>5000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
explain select id from t where id>6000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t where id>6000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/5733_tokudb.result b/storage/tokudb/mysql-test/tokudb_bugs/r/5733_tokudb.result
index 1db2c5746e2..0a736a97ec9 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/5733_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/5733_tokudb.result
@@ -10009,7 +10009,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
explain select id from t where id>1000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where; Using index
+1 SIMPLE t index PRIMARY PRIMARY 8 NULL # Using where; Using index
explain select * from t where id>1000 limit 10;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t range PRIMARY PRIMARY 8 NULL # Using where
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
index c870ac1c784..14c5554b754 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/PS-3773.result
@@ -1,3 +1,4 @@
+# restart: --log-error=MYSQLTEST_VARDIR/tmp/tokudb.bugs.PS-3773.log
CREATE TABLE t1(a INT, b INT, c INT, PRIMARY KEY(a), KEY(b)) ENGINE=TokuDB;
SET tokudb_auto_analyze=0;
INSERT INTO t1 VALUES(0,0,0), (1,1,1), (2,2,2), (3,3,3), (4,4,4), (5,5,5);
@@ -6,3 +7,4 @@ SELECT * FROM t1 WHERE b = 2;
ERROR HY000: Index for table 't1' is corrupt; try to repair it
DROP TABLE t1;
FOUND 1 /ha_tokudb::read_full_row on table/ in tokudb.bugs.PS-3773.log
+# restart
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/bulk_fetch.result b/storage/tokudb/mysql-test/tokudb_bugs/r/bulk_fetch.result
index 86943c2d2f6..70201e3a8bb 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/bulk_fetch.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/bulk_fetch.result
@@ -60,13 +60,11 @@ c
400
500
600
-explain select * from foo where c > 300;
+explain select * from foo where c > 500;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE foo range c c 5 NULL 3 Using where; Using index
-select * from foo where c > 300;
+1 SIMPLE foo range c c 5 NULL 1 Using where; Using index
+select * from foo where c > 500;
a b c
-4 40 400
-5 50 500
6 60 600
drop table foo;
create table foo (a int, b int);
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result
index 70bc86e1abc..fb4718353be 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists t;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -13,9 +15,11 @@ t 0 PRIMARY 1 id A 6 NULL NULL BTREE
t 1 x 1 x A 6 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 6 NULL NULL BTREE
t 1 x 1 x A 6 NULL NULL YES BTREE
+set @@use_stat_tables= @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result
index b6d9fd7da85..9746eb81aa1 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists t;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -12,9 +14,11 @@ t 0 PRIMARY 1 id A 4 NULL NULL BTREE
t 1 x 1 x A 4 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 4 NULL NULL BTREE
t 1 x 1 x A 4 NULL NULL YES BTREE
+set @@use_stat_tables= @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
index caaa963c325..33c60935952 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_1_pick.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists t;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -13,9 +15,11 @@ t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A 7 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A 7 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result
index 6d345d98c95..02939dba662 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists t;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -12,9 +14,11 @@ t 0 PRIMARY 1 id A 4 NULL NULL BTREE
t 1 x 1 x A 4 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 4 NULL NULL BTREE
t 1 x 1 x A 4 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result
index 06639c311cf..cb4dfbf9d9b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db756_card_part_hash_2_pick.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists t;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= 'COMPLEMENTARY';
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -13,9 +15,11 @@ t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A 7 NULL NULL YES BTREE
analyze table t;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 7 NULL NULL BTREE
t 1 x 1 x A 7 NULL NULL YES BTREE
+set @@use_stat_tables= @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result b/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
index 62337802688..c9b32d51471 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/db757_part_alter_analyze.result
@@ -1,5 +1,7 @@
set default_storage_engine='tokudb';
drop table if exists t;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table t (id int, x int, y int, primary key (id), key (x), key (y))
partition by range(id)
( partition p0 values less than (10), partition p1 values less than maxvalue);
@@ -11,6 +13,7 @@ t 1 x 1 x A 5 NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p0;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -19,6 +22,7 @@ t 1 x 1 x A 5 NULL NULL YES BTREE
t 1 y 1 y A 5 NULL NULL YES BTREE
alter table t analyze partition p1;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -33,6 +37,7 @@ t 1 x 1 x A 9 NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p0;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
@@ -41,10 +46,12 @@ t 1 x 1 x A 9 NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE
alter table t analyze partition p1;
Table Op Msg_type Msg_text
+test.t analyze status Engine-independent statistics collected
test.t analyze status OK
show indexes from t;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t 0 PRIMARY 1 id A 9 NULL NULL BTREE
t 1 x 1 x A 9 NULL NULL YES BTREE
t 1 y 1 y A 9 NULL NULL YES BTREE
+set @@use_stat_tables = @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store.result b/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store.result
index 65503b908ca..2f025dcfb3d 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store.result
@@ -2,6 +2,7 @@ SET DEFAULT_STORAGE_ENGINE = 'tokudb';
DROP TABLE IF EXISTS foo,bar;
create table foo (a int, b int);
create table bar (a int, key(a));
+# restart
show create table foo;
Table Create Table
foo CREATE TABLE `foo` (
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store2.result b/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store2.result
index b202da70fcf..f661af9a20e 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store2.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store2.result
@@ -5,6 +5,7 @@ create table foo (a bigint, b bigint);
create table bar (a int);
alter table foo drop column a;
alter table bar add column b int, add column c int;
+# restart
show create table foo;
Table Create Table
foo CREATE TABLE `foo` (
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store3.result b/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store3.result
index 08f157f5223..ca33bc0c64b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store3.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/frm_store3.result
@@ -4,6 +4,7 @@ create table foo (a int, b int, key(b,a), primary key (a))engine=TokuDB;
create table bar (a bigint)engine=TokuDB;
alter table foo drop index b;
alter table bar add index (a);
+# restart
show create table foo;
Table Create Table
foo CREATE TABLE `foo` (
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/r/index_read.result b/storage/tokudb/mysql-test/tokudb_bugs/r/index_read.result
index 5d5c4d43cd8..5a5ba28bb82 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/r/index_read.result
+++ b/storage/tokudb/mysql-test/tokudb_bugs/r/index_read.result
@@ -150,13 +150,11 @@ a b c
2 20 200
1 100 100
1 10 100
-explain select * from foo where a>=4;
+explain select * from foo where a>=5;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE foo range a a 4 NULL NULL; Using where
-select * from foo where a>=4;
+select * from foo where a>=5;
a b c
-4 40 400
-4 400 400
5 50 500
5 500 500
6 60 600
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/bulk_fetch.test b/storage/tokudb/mysql-test/tokudb_bugs/t/bulk_fetch.test
index 41b8f1fa37f..11ce1e74b8c 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/bulk_fetch.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/bulk_fetch.test
@@ -26,8 +26,8 @@ alter table foo drop index b;
alter table foo add index c(c) clustering=yes;
select c from foo;
-explain select * from foo where c > 300;
-select * from foo where c > 300;
+explain select * from foo where c > 500;
+select * from foo where c > 500;
drop table foo;
# simple test on hidden primary key
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash.test
index 97dda1cb1b8..440a79a43d7 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash.test
@@ -5,6 +5,8 @@ set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
insert into t values (1,1),(3,1),(5,1);
@@ -12,4 +14,5 @@ insert into t values (2,1),(4,1),(6,1);
show indexes from t;
analyze table t;
show indexes from t;
+set @@use_stat_tables= @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1.test
index 3ef66a4b1e6..b395b70b62c 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1.test
@@ -5,10 +5,14 @@ set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= COMPLEMENTARY;
+
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
insert into t values (1,1),(3,1),(5,1);
show indexes from t;
analyze table t;
show indexes from t;
+set @@use_stat_tables= @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1_pick.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1_pick.test
index b8044641109..c96c58f31ed 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1_pick.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_1_pick.test
@@ -5,6 +5,8 @@ set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
insert into t values (1,1),(3,2),(5,3);
@@ -12,4 +14,5 @@ insert into t values (2,1),(4,1),(6,1),(8,1);
show indexes from t;
analyze table t;
show indexes from t;
+set @@use_stat_tables = @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2.test
index ca10218cb05..de32dfd7f54 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2.test
@@ -5,10 +5,14 @@ set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
+
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
insert into t values (2,1),(4,1),(6,1);
show indexes from t;
analyze table t;
show indexes from t;
+set @@use_stat_tables = @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2_pick.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2_pick.test
index 2cc55ec864d..999ce6ba14f 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2_pick.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db756_card_part_hash_2_pick.test
@@ -5,6 +5,9 @@ set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
+
+set @save_use_stat_tables= @@use_stat_tables;
+set @@use_stat_tables= 'COMPLEMENTARY';
create table t (id int, x int, primary key (id), key (x)) partition by hash(id) partitions 2;
show indexes from t;
insert into t values (1,1),(3,2),(5,3),(7,4);
@@ -12,4 +15,5 @@ insert into t values (2,1),(4,1),(6,1);
show indexes from t;
analyze table t;
show indexes from t;
+set @@use_stat_tables= @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/db757_part_alter_analyze.test b/storage/tokudb/mysql-test/tokudb_bugs/t/db757_part_alter_analyze.test
index fc1599591be..ab9d816be4b 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/db757_part_alter_analyze.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/db757_part_alter_analyze.test
@@ -4,6 +4,8 @@ set default_storage_engine='tokudb';
disable_warnings;
drop table if exists t;
enable_warnings;
+set @save_use_stat_tables = @@use_stat_tables;
+set @@use_stat_tables = COMPLEMENTARY;
create table t (id int, x int, y int, primary key (id), key (x), key (y))
partition by range(id)
@@ -22,5 +24,6 @@ alter table t analyze partition p0;
show indexes from t;
alter table t analyze partition p1;
show indexes from t;
+set @@use_stat_tables = @save_use_stat_tables;
drop table t;
diff --git a/storage/tokudb/mysql-test/tokudb_bugs/t/index_read.test b/storage/tokudb/mysql-test/tokudb_bugs/t/index_read.test
index 03664415349..c79a6341b3a 100644
--- a/storage/tokudb/mysql-test/tokudb_bugs/t/index_read.test
+++ b/storage/tokudb/mysql-test/tokudb_bugs/t/index_read.test
@@ -117,8 +117,8 @@ select * from foo where a<3 order by a desc;
#HA_READ_KEY_OR_NEXT
# ignore rows column
--replace_column 9 NULL;
-explain select * from foo where a>=4;
-select * from foo where a>=4;
+explain select * from foo where a>=5;
+select * from foo where a>=5;
#HA_READ_KEY_OR_PREV not used anymore
diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/clustering.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/clustering.result
index 28c758ff628..fca204e28eb 100644
--- a/storage/tokudb/mysql-test/tokudb_mariadb/r/clustering.result
+++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/clustering.result
@@ -12,6 +12,7 @@ insert t1 values (1, 1, 1);
insert t1 select a+1, a+1, a+1 from t1;
insert t1 select a+2, a+2, a+2 from t1;
insert t1 select a+4, a+4, a+4 from t1;
+insert t1 select a+8, a+8, a+8 from t1;
select * from t1;
a b c
1 1 1
@@ -22,10 +23,18 @@ a b c
6 6 6
7 7 7
8 8 8
-explain select a,c from t1 where a > 2;
+9 9 9
+10 10 10
+11 11 11
+12 12 12
+13 13 13
+14 14 14
+15 15 15
+16 16 16
+explain select a,c from t1 where a > 4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range a a 5 NULL 6 Using where; Using index
-explain select b,c from t1 where b > 2;
+1 SIMPLE t1 index a a 5 NULL 16 Using where; Using index
+explain select b,c from t1 where b > 4;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 ALL b NULL NULL NULL 8 Using where
+1 SIMPLE t1 ALL b NULL NULL NULL 16 Using where
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/t/clustering.test b/storage/tokudb/mysql-test/tokudb_mariadb/t/clustering.test
index 58d806ee365..ce7ef42a7f3 100644
--- a/storage/tokudb/mysql-test/tokudb_mariadb/t/clustering.test
+++ b/storage/tokudb/mysql-test/tokudb_mariadb/t/clustering.test
@@ -6,10 +6,11 @@ insert t1 values (1, 1, 1);
insert t1 select a+1, a+1, a+1 from t1;
insert t1 select a+2, a+2, a+2 from t1;
insert t1 select a+4, a+4, a+4 from t1;
+insert t1 select a+8, a+8, a+8 from t1;
select * from t1;
-explain select a,c from t1 where a > 2;
-explain select b,c from t1 where b > 2;
+explain select a,c from t1 where a > 4;
+explain select b,c from t1 where b > 4;
drop table t1;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result
index 410e6fd0ba2..994be80c4fe 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_2_tokudb.result
@@ -517,6 +517,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1009,6 +1010,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1516,6 +1518,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2017,6 +2020,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2516,6 +2520,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3026,6 +3031,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3538,6 +3544,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4038,6 +4045,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4531,6 +4539,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5023,6 +5032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5530,6 +5540,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6031,6 +6042,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6530,6 +6542,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7040,6 +7053,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7552,6 +7566,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8052,6 +8067,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8562,6 +8578,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9070,6 +9087,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9593,6 +9611,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10110,6 +10129,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10625,6 +10645,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11151,6 +11172,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11679,6 +11701,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12195,6 +12218,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12704,6 +12728,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13212,6 +13237,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13735,6 +13761,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14252,6 +14279,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14767,6 +14795,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15293,6 +15322,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15821,6 +15851,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16337,6 +16368,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16832,6 +16864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17325,6 +17358,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17833,6 +17867,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18335,6 +18370,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18835,6 +18871,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19346,6 +19383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19859,6 +19897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20360,6 +20399,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20854,6 +20894,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21347,6 +21388,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21855,6 +21897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22357,6 +22400,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22857,6 +22901,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23368,6 +23413,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23881,6 +23927,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24382,6 +24429,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24876,6 +24924,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25369,6 +25418,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25877,6 +25927,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26379,6 +26430,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26879,6 +26931,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27390,6 +27443,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27903,6 +27957,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28404,6 +28459,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result
index 093dbbe11c5..8b57e929951 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_1_tokudb.result
@@ -835,6 +835,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1327,6 +1328,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1834,6 +1836,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2335,6 +2338,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2836,6 +2840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3346,6 +3351,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3858,6 +3864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4358,6 +4365,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4851,6 +4859,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5343,6 +5352,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5850,6 +5860,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6351,6 +6362,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6852,6 +6864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7362,6 +7375,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7874,6 +7888,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8374,6 +8389,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8884,6 +8900,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9392,6 +9409,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9915,6 +9933,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10432,6 +10451,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10949,6 +10969,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11475,6 +11496,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12003,6 +12025,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12519,6 +12542,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13028,6 +13052,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13536,6 +13561,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14059,6 +14085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14576,6 +14603,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15093,6 +15121,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15619,6 +15648,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16147,6 +16177,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16663,6 +16694,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result
index a05ce5cb71b..7103426b454 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter1_2_tokudb.result
@@ -464,6 +464,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -904,6 +905,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1359,6 +1361,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1808,6 +1811,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2257,6 +2261,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2715,6 +2720,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3175,6 +3181,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3623,6 +3630,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4064,6 +4072,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4504,6 +4513,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4959,6 +4969,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5408,6 +5419,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5857,6 +5869,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6315,6 +6328,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6775,6 +6789,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7223,6 +7238,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7680,6 +7696,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8136,6 +8153,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8607,6 +8625,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9072,6 +9091,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9537,6 +9557,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10011,6 +10032,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10487,6 +10509,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10951,6 +10974,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11396,6 +11420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11836,6 +11861,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12291,6 +12317,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12740,6 +12767,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13187,6 +13215,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13645,6 +13674,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14105,6 +14135,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14553,6 +14584,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14994,6 +15026,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15434,6 +15467,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15889,6 +15923,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16338,6 +16373,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16785,6 +16821,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17243,6 +17280,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17703,6 +17741,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18151,6 +18190,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18609,6 +18649,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19065,6 +19106,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19536,6 +19578,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20001,6 +20044,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20464,6 +20508,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20938,6 +20983,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21414,6 +21460,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21878,6 +21925,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22335,6 +22383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22791,6 +22840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23262,6 +23312,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23727,6 +23778,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24190,6 +24242,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24664,6 +24717,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25140,6 +25194,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25604,6 +25659,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26046,6 +26102,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26486,6 +26543,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26941,6 +26999,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27390,6 +27449,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27837,6 +27897,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28295,6 +28356,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28755,6 +28817,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29203,6 +29266,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29644,6 +29708,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30084,6 +30149,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30539,6 +30605,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30988,6 +31055,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31435,6 +31503,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31893,6 +31962,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32353,6 +32423,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32801,6 +32872,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33258,6 +33330,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33714,6 +33787,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34185,6 +34259,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34650,6 +34725,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35113,6 +35189,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35587,6 +35664,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36063,6 +36141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36527,6 +36606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result
index a398ac33f42..eba06e441c9 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_1_tokudb.result
@@ -481,6 +481,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -937,6 +938,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1408,6 +1410,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1873,6 +1876,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2338,6 +2342,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2812,6 +2817,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3288,6 +3294,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3752,6 +3759,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4248,6 +4256,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4742,6 +4751,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5251,6 +5261,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5754,6 +5765,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6257,6 +6269,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6769,6 +6782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7283,6 +7297,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7785,6 +7800,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8280,6 +8296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8774,6 +8791,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9283,6 +9301,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9786,6 +9805,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10289,6 +10309,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10801,6 +10822,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11315,6 +11337,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11817,6 +11840,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12327,6 +12351,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12835,6 +12860,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13358,6 +13384,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13875,6 +13902,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14392,6 +14420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14918,6 +14947,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15446,6 +15476,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15962,6 +15993,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16471,6 +16503,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16979,6 +17012,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17502,6 +17536,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18019,6 +18054,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18536,6 +18572,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19062,6 +19099,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19590,6 +19628,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20106,6 +20145,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result
index 60cc765a570..6acd1198d16 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_1_2_tokudb.result
@@ -477,6 +477,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -933,6 +934,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1404,6 +1406,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1869,6 +1872,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2332,6 +2336,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2806,6 +2811,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3282,6 +3288,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3746,6 +3753,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4243,6 +4251,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4738,6 +4747,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5248,6 +5258,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5752,6 +5763,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6254,6 +6266,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6767,6 +6780,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7282,6 +7296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7785,6 +7800,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8281,6 +8297,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8776,6 +8793,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9286,6 +9304,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9790,6 +9809,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10292,6 +10312,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10805,6 +10826,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11320,6 +11342,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11823,6 +11846,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12333,6 +12357,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12841,6 +12866,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13364,6 +13390,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13881,6 +13908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14396,6 +14424,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14922,6 +14951,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15450,6 +15480,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15966,6 +15997,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16475,6 +16507,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16983,6 +17016,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17506,6 +17540,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18023,6 +18058,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18538,6 +18574,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19064,6 +19101,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19592,6 +19630,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20108,6 +20147,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result
index 0b5d8289ecc..6f58b3de45a 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_1_tokudb.result
@@ -483,6 +483,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -941,6 +942,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1414,6 +1416,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1879,6 +1882,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2346,6 +2350,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2820,6 +2825,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3298,6 +3304,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3764,6 +3771,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4261,6 +4269,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4757,6 +4766,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5268,6 +5278,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5771,6 +5782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6276,6 +6288,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6788,6 +6801,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7304,6 +7318,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7808,6 +7823,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8305,6 +8321,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8801,6 +8818,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9312,6 +9330,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9815,6 +9834,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10320,6 +10340,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10832,6 +10853,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11348,6 +11370,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11852,6 +11875,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12364,6 +12388,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12874,6 +12899,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13399,6 +13425,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13916,6 +13943,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14435,6 +14463,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14961,6 +14990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15491,6 +15521,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16009,6 +16040,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16520,6 +16552,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17030,6 +17063,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17555,6 +17589,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18072,6 +18107,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18591,6 +18627,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19117,6 +19154,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19647,6 +19685,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20165,6 +20204,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result
index 67bae3acecb..2a1049bb5ee 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter2_2_2_tokudb.result
@@ -479,6 +479,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -938,6 +939,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1412,6 +1414,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1882,6 +1885,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2350,6 +2354,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2829,6 +2834,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3308,6 +3314,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3775,6 +3782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4275,6 +4283,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4773,6 +4782,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5286,6 +5296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5795,6 +5806,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6302,6 +6314,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6820,6 +6833,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7338,6 +7352,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7844,6 +7859,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8343,6 +8359,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8841,6 +8858,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9354,6 +9372,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9863,6 +9882,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10370,6 +10390,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10888,6 +10909,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11406,6 +11428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11912,6 +11935,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12425,6 +12449,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12936,6 +12961,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13462,6 +13488,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13984,6 +14011,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14504,6 +14532,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15035,6 +15064,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15566,6 +15596,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16085,6 +16116,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16597,6 +16629,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17108,6 +17141,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17634,6 +17668,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18156,6 +18191,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18676,6 +18712,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19207,6 +19244,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19738,6 +19776,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20257,6 +20296,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
index 808f646dd48..27c473e0a5a 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_alter4_tokudb.result
@@ -60,6 +60,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -483,6 +484,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -519,6 +521,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -945,6 +948,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -989,6 +993,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -1418,6 +1423,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1460,6 +1466,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -1885,6 +1892,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1925,6 +1933,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -2352,6 +2361,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2396,6 +2406,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -2828,6 +2839,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2872,6 +2884,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -3306,6 +3319,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3346,6 +3360,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -3772,6 +3787,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3810,6 +3826,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -4233,6 +4250,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4269,6 +4287,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -4695,6 +4714,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4739,6 +4759,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -5168,6 +5189,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5210,6 +5232,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -5635,6 +5658,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5675,6 +5699,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -6102,6 +6127,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6146,6 +6172,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -6578,6 +6605,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6622,6 +6650,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -7056,6 +7085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7096,6 +7126,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -7522,6 +7553,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7983,6 +8015,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8445,6 +8478,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8918,6 +8952,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9385,6 +9420,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9852,6 +9888,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10328,6 +10365,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10806,6 +10844,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11272,6 +11311,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11733,6 +11773,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12195,6 +12236,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12668,6 +12710,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13135,6 +13178,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13602,6 +13646,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14078,6 +14123,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14556,6 +14602,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15022,6 +15069,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15060,6 +15108,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -15483,6 +15532,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15519,6 +15569,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -15945,6 +15996,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15989,6 +16041,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -16418,6 +16471,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16460,6 +16514,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -16885,6 +16940,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16925,6 +16981,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -17352,6 +17409,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17396,6 +17454,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -17828,6 +17887,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17872,6 +17932,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -18306,6 +18367,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18346,6 +18408,7 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@@ -18772,6 +18835,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19236,6 +19300,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19698,6 +19763,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20171,6 +20237,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20638,6 +20705,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21105,6 +21173,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21581,6 +21650,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22059,6 +22129,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22525,6 +22596,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22986,6 +23058,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23448,6 +23521,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23921,6 +23995,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24388,6 +24463,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24855,6 +24931,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25331,6 +25408,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25809,6 +25887,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26275,6 +26354,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26736,6 +26816,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27198,6 +27279,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27671,6 +27753,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28138,6 +28221,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28605,6 +28689,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29081,6 +29166,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29559,6 +29645,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30025,6 +30112,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30486,6 +30574,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30948,6 +31037,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31421,6 +31511,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31888,6 +31979,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32355,6 +32447,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -32831,6 +32924,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33309,6 +33403,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -33775,6 +33870,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34236,6 +34332,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -34698,6 +34795,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35171,6 +35269,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -35638,6 +35737,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36105,6 +36205,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -36581,6 +36682,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37059,6 +37161,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37525,6 +37628,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -37990,6 +38094,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -38453,6 +38558,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -38927,6 +39033,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39395,6 +39502,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -39863,6 +39971,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -40340,6 +40449,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -40819,6 +40929,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -41286,6 +41397,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -41748,6 +41860,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -42211,6 +42324,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -42685,6 +42799,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -43153,6 +43268,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -43621,6 +43737,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -44098,6 +44215,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -44577,6 +44695,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45044,6 +45163,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45505,6 +45625,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -45967,6 +46088,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -46440,6 +46562,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -46907,6 +47030,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -47374,6 +47498,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -47850,6 +47975,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -48328,6 +48454,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -48794,6 +48921,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -49255,6 +49383,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -49717,6 +49846,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -50190,6 +50320,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -50657,6 +50788,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -51124,6 +51256,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -51600,6 +51733,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -52078,6 +52212,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -52544,6 +52679,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53006,6 +53142,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53469,6 +53606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -53943,6 +54081,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -54411,6 +54550,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -54879,6 +55019,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -55356,6 +55497,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -55835,6 +55977,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -56302,6 +56445,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -56764,6 +56908,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -57224,6 +57369,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -57695,6 +57841,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58160,6 +58307,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -58625,6 +58773,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -59099,6 +59248,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -59575,6 +59725,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60039,6 +60190,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60498,6 +60650,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -60958,6 +61111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -61429,6 +61583,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -61894,6 +62049,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -62359,6 +62515,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -62833,6 +62990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -63309,6 +63467,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -63773,6 +63932,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -64552,6 +64712,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65012,6 +65173,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65483,6 +65645,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -65948,6 +66111,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -66413,6 +66577,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -66887,6 +67052,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -67363,6 +67529,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -67827,6 +67994,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68291,6 +68459,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -68753,6 +68922,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -69226,6 +69396,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -69693,6 +69864,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -70160,6 +70332,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -70636,6 +70809,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -71114,6 +71288,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -71580,6 +71755,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72041,6 +72217,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72503,6 +72680,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -72976,6 +73154,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -73443,6 +73622,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -73910,6 +74090,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -74386,6 +74567,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -74864,6 +75046,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -75330,6 +75513,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -75791,6 +75975,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -76253,6 +76438,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -76726,6 +76912,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -77193,6 +77380,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -77660,6 +77848,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -78136,6 +78325,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -78614,6 +78804,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -79080,6 +79271,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -79541,6 +79733,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80003,6 +80196,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80476,6 +80670,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -80943,6 +81138,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -81410,6 +81606,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -81886,6 +82083,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -82364,6 +82562,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -82830,6 +83029,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -83291,6 +83491,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -83753,6 +83954,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -84226,6 +84428,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -84693,6 +84896,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -85160,6 +85364,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -85636,6 +85841,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -86114,6 +86320,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -86580,6 +86787,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87039,6 +87247,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87493,6 +87702,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -87955,6 +88165,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -88415,6 +88626,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -88873,6 +89085,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -89335,6 +89548,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -89797,6 +90011,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -90255,6 +90470,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result
index ae20097fdda..8c19f82b9b6 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_basic_tokudb.result
@@ -484,6 +484,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -939,6 +940,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1409,6 +1411,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1873,6 +1876,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2337,6 +2341,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2810,6 +2815,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3289,6 +3295,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3752,6 +3759,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4209,6 +4217,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4664,6 +4673,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5134,6 +5144,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5598,6 +5609,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6060,6 +6072,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -6533,6 +6546,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7008,6 +7022,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7471,6 +7486,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -7969,6 +7985,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8461,6 +8478,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -8968,6 +8986,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9469,6 +9488,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -9970,6 +9990,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10480,6 +10501,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -10996,6 +11018,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11496,6 +11519,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -11989,6 +12013,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12481,6 +12506,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -12988,6 +13014,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13489,6 +13516,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -13990,6 +14018,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -14500,6 +14529,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15016,6 +15046,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -15516,6 +15547,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16025,6 +16057,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -16533,6 +16566,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17056,6 +17090,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -17573,6 +17608,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18090,6 +18126,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -18616,6 +18653,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19148,6 +19186,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -19664,6 +19703,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20162,6 +20202,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -20654,6 +20695,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21161,6 +21203,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -21662,6 +21705,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22161,6 +22205,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -22671,6 +22716,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23183,6 +23229,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -23683,6 +23730,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24176,6 +24224,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -24668,6 +24717,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25175,6 +25225,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -25676,6 +25727,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26175,6 +26227,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -26685,6 +26738,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27197,6 +27251,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -27697,6 +27752,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28206,6 +28262,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -28714,6 +28771,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29237,6 +29295,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -29754,6 +29813,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30269,6 +30329,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -30795,6 +30856,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31323,6 +31385,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -31839,6 +31902,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result
index af3aaddca7c..125155bad9f 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_debug_tokudb.result
@@ -20,6 +20,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -45,9 +46,11 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -79,6 +82,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -106,9 +110,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -140,6 +146,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -167,9 +174,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -201,6 +210,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -228,9 +238,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -262,6 +274,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -289,9 +302,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -323,6 +338,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -350,9 +366,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -384,6 +402,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -411,9 +430,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -445,6 +466,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -472,9 +494,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -507,6 +531,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -532,9 +557,11 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -567,6 +594,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -592,9 +620,11 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -628,6 +658,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -653,6 +684,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -683,6 +715,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -709,6 +742,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -741,6 +775,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -766,6 +801,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -796,6 +832,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -822,6 +859,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -854,6 +892,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -879,6 +918,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -909,6 +949,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -935,6 +976,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -967,6 +1009,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -992,6 +1035,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1022,6 +1066,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1048,6 +1093,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1080,6 +1126,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1105,6 +1152,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1135,6 +1183,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1161,6 +1210,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1193,6 +1243,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1218,6 +1269,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1248,6 +1300,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1274,6 +1327,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1306,6 +1360,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1331,6 +1386,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1361,6 +1417,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1387,6 +1444,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1419,6 +1477,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1444,6 +1503,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1475,6 +1535,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1501,6 +1562,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1534,6 +1596,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1559,6 +1622,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1590,6 +1654,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1616,6 +1681,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1649,6 +1715,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1674,6 +1741,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1705,6 +1773,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1731,6 +1800,7 @@ ALTER TABLE t1 ADD PARTITION
(PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1765,6 +1835,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1789,9 +1860,11 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1823,6 +1896,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1849,9 +1923,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1883,6 +1959,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1909,9 +1986,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1943,6 +2022,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1969,9 +2049,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -1998,6 +2080,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2024,9 +2107,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2053,6 +2138,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2079,9 +2165,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2108,6 +2196,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2132,9 +2221,11 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2161,6 +2252,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2185,9 +2277,11 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2214,6 +2308,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2238,9 +2333,11 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2268,6 +2365,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2292,6 +2390,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2322,6 +2421,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2347,6 +2447,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2379,6 +2480,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2403,6 +2505,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2433,6 +2536,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2458,6 +2562,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2490,6 +2595,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2514,6 +2620,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2544,6 +2651,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2569,6 +2677,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2601,6 +2710,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2625,6 +2735,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2650,6 +2761,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2675,6 +2787,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2702,6 +2815,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2726,6 +2840,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2751,6 +2866,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2776,6 +2892,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2803,6 +2920,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2827,6 +2945,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2852,6 +2971,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2877,6 +2997,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2904,6 +3025,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2928,6 +3050,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2953,6 +3076,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -2978,6 +3102,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3005,6 +3130,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3029,6 +3155,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3054,6 +3181,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3079,6 +3207,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3106,6 +3235,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3130,6 +3260,7 @@ a b
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3155,6 +3286,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3180,6 +3312,7 @@ LOCK TABLE t1 WRITE;
ALTER TABLE t1 DROP PARTITION p10;
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3209,6 +3342,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3235,9 +3369,11 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3269,6 +3405,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3297,9 +3434,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3331,6 +3470,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3359,9 +3499,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3393,6 +3535,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3421,9 +3564,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3455,6 +3600,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3483,9 +3629,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3517,6 +3665,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3545,9 +3694,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3579,6 +3730,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3607,9 +3759,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3642,6 +3796,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3670,9 +3825,11 @@ ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
#sql-t1.frm
#sql-t1.par
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3705,6 +3862,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3731,9 +3889,11 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3766,6 +3926,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3792,9 +3953,11 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3827,6 +3990,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3853,9 +4017,11 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3888,6 +4054,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3914,9 +4081,11 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
# State after crash recovery
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3951,6 +4120,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3977,6 +4147,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4007,6 +4178,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4034,6 +4206,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4066,6 +4239,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4092,6 +4266,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4122,6 +4297,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4149,6 +4325,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4181,6 +4358,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4207,6 +4385,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4237,6 +4416,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4264,6 +4444,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4296,6 +4477,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4322,6 +4504,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4352,6 +4535,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4379,6 +4563,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4411,6 +4596,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4437,6 +4623,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4467,6 +4654,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4494,6 +4682,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4526,6 +4715,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4552,6 +4742,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4582,6 +4773,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4609,6 +4801,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4641,6 +4834,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4667,6 +4861,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4698,6 +4893,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4725,6 +4921,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4758,6 +4955,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4784,6 +4982,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4815,6 +5014,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4842,6 +5042,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4875,6 +5076,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4901,6 +5103,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4932,6 +5135,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4959,6 +5163,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4992,6 +5197,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5018,6 +5224,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5049,6 +5256,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5076,6 +5284,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5109,6 +5318,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5135,6 +5345,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5166,6 +5377,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5193,6 +5405,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5226,6 +5439,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5252,6 +5466,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5283,6 +5498,7 @@ PARTITION BY LIST (a)
PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19));
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5310,6 +5526,7 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Unknown error
# State after failure
+db.opt
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5363,6 +5580,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5392,10 +5610,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -5459,6 +5679,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5488,10 +5709,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -5555,6 +5778,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5584,10 +5808,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -5651,6 +5877,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5680,10 +5907,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -5747,6 +5976,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5776,10 +6006,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -5843,6 +6075,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5872,10 +6105,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -5939,6 +6174,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -5968,10 +6204,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -6035,6 +6273,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -6064,10 +6303,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -6131,6 +6372,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before crash
+db.opt
t1.frm
t1.par
t2.frm
@@ -6160,10 +6402,12 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+db.opt
t1.frm
t1.par
t2.frm
# State after crash recovery
+db.opt
t1.frm
t1.par
t2.frm
@@ -6227,6 +6471,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6256,6 +6501,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error in DDL log
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6319,6 +6565,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6348,6 +6595,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error in DDL log
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6411,6 +6659,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6440,6 +6689,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 "Internal error/check (Not system error)")
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6503,6 +6753,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6532,6 +6783,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error in DDL log
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6595,6 +6847,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6624,6 +6877,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 "Internal error/check (Not system error)")
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6687,6 +6941,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6716,6 +6971,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error in DDL log
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6779,6 +7035,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6808,6 +7065,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 "Internal error/check (Not system error)")
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6871,6 +7129,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6900,6 +7159,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error in DDL log
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6963,6 +7223,7 @@ PARTITION BY RANGE (a)
PARTITION p1 VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
# State before failure
+db.opt
t1.frm
t1.par
t2.frm
@@ -6992,6 +7253,7 @@ a b
ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
ERROR HY000: Error in DDL log
# State after failure
+db.opt
t1.frm
t1.par
t2.frm
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result
index a921ba0f56d..028809cd36b 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_engine_tokudb.result
@@ -475,6 +475,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -933,6 +934,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1395,6 +1397,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -1920,6 +1923,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2421,6 +2425,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -2883,6 +2888,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3342,6 +3348,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -3804,6 +3811,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4269,6 +4277,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -4723,6 +4732,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
@@ -5178,6 +5188,7 @@ AND f_int2 <> CAST(f_char1 AS SIGNED INT)
AND f_charbig = '####updated per insert trigger####';
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
+test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
CHECK TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc0_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc0_tokudb.result
index 4c30d47f526..558c3cbdff4 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc0_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc0_tokudb.result
@@ -1020,6 +1020,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc10_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc10_tokudb.result
index 6ccd9afefab..1c2f23a277b 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc10_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc10_tokudb.result
@@ -949,6 +949,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc1_tokudb.result b/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc1_tokudb.result
index fce0d496032..76418679582 100644
--- a/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc1_tokudb.result
+++ b/storage/tokudb/mysql-test/tokudb_parts/r/partition_mgm_lc1_tokudb.result
@@ -987,6 +987,7 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
+mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/r/mdev12179.result b/storage/tokudb/mysql-test/tokudb_rpl/r/mdev12179.result
index d4532eec4e2..d79e7e59aa4 100644
--- a/storage/tokudb/mysql-test/tokudb_rpl/r/mdev12179.result
+++ b/storage/tokudb/mysql-test/tokudb_rpl/r/mdev12179.result
@@ -2,6 +2,7 @@ include/master-slave.inc
[connection master]
connection server_2;
include/stop_slave.inc
+SET GLOBAL gtid_cleanup_batch_size = 999999999;
CHANGE MASTER TO master_use_gtid=slave_pos;
SET sql_log_bin=0;
CREATE TABLE mysql.gtid_slave_pos_innodb LIKE mysql.gtid_slave_pos;
@@ -41,6 +42,8 @@ a
1
SELECT * FROM mysql.gtid_slave_pos ORDER BY sub_id;
domain_id sub_id server_id seq_no
+0 1 1 1
+0 2 1 2
0 3 1 3
0 4 1 4
SELECT * FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
@@ -121,6 +124,21 @@ Transactions_multi_engine 6
DELETE FROM t1 WHERE a >= 100;
DELETE FROM t2 WHERE a >= 100;
DELETE FROM t3 WHERE a >= 100;
+connection server_1;
+include/save_master_gtid.inc
+connection server_2;
+include/sync_with_master_gtid.inc
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos;
+COUNT(*)>=10
+1
+SELECT COUNT(*)>=10 FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
+UNION ALL SELECT * FROM mysql.gtid_slave_pos_innodb_redundant) inner_select;
+COUNT(*)>=10
+1
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos_tokudb;
+COUNT(*)>=10
+1
+SET GLOBAL gtid_cleanup_batch_size = 3;
connection server_2;
include/stop_slave.inc
SET sql_log_bin=0;
diff --git a/storage/tokudb/mysql-test/tokudb_rpl/t/mdev12179.test b/storage/tokudb/mysql-test/tokudb_rpl/t/mdev12179.test
index ceb119cd0dc..1d19a25889e 100644
--- a/storage/tokudb/mysql-test/tokudb_rpl/t/mdev12179.test
+++ b/storage/tokudb/mysql-test/tokudb_rpl/t/mdev12179.test
@@ -4,6 +4,12 @@
--connection server_2
--source include/stop_slave.inc
+
+# Set GTID cleanup limit high enough that cleanup will not run and we
+# can rely on consistent table output in .result.
+--let $old_gtid_cleanup_batch_size=`SELECT @@GLOBAL.gtid_cleanup_batch_size`
+SET GLOBAL gtid_cleanup_batch_size = 999999999;
+
CHANGE MASTER TO master_use_gtid=slave_pos;
SET sql_log_bin=0;
CREATE TABLE mysql.gtid_slave_pos_innodb LIKE mysql.gtid_slave_pos;
@@ -89,6 +95,82 @@ DELETE FROM t2 WHERE a >= 100;
DELETE FROM t3 WHERE a >= 100;
+# Create a bunch more GTIDs in mysql.gtid_slave_pos* tables to test with.
+--connection server_1
+--disable_query_log
+let $i=10;
+while ($i) {
+ eval INSERT INTO t1 VALUES (300+$i);
+ eval INSERT INTO t2 VALUES (300+$i);
+ eval INSERT INTO t3 VALUES (300+$i);
+ dec $i;
+}
+--enable_query_log
+--source include/save_master_gtid.inc
+
+--connection server_2
+--source include/sync_with_master_gtid.inc
+
+# Check that we have many rows in mysql.gtid_slave_pos now (since
+# @@gtid_cleanup_batch_size was set to a huge value). No need to check
+# for an exact number, since that will require changing .result if
+# anything changes prior to this point, and we just need to know that
+# we have still have some data in the tables to make the following
+# test effective.
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos;
+SELECT COUNT(*)>=10 FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
+ UNION ALL SELECT * FROM mysql.gtid_slave_pos_innodb_redundant) inner_select;
+SELECT COUNT(*)>=10 FROM mysql.gtid_slave_pos_tokudb;
+
+# Check that old GTID rows will be deleted when batch delete size is
+# set reasonably. Old row deletion is not 100% deterministic (by design), so
+# we must wait for it to occur, but it should occur eventually.
+SET GLOBAL gtid_cleanup_batch_size = 3;
+let $i=40;
+--disable_query_log
+--let $keep_include_silent=1
+while ($i) {
+ let N=`SELECT 1+($i MOD 3)`;
+ --connection server_1
+ eval UPDATE t$N SET a=a+1 WHERE a=(SELECT MAX(a) FROM t$N);
+ --source include/save_master_gtid.inc
+ --connection server_2
+ --source include/sync_with_master_gtid.inc
+ let $j=50;
+ while ($j) {
+ let $is_done=`SELECT SUM(a)=1 FROM (
+ SELECT COUNT(*) AS a FROM mysql.gtid_slave_pos
+ UNION ALL
+ SELECT COUNT(*) AS a FROM ( SELECT * FROM mysql.gtid_slave_pos_innodb
+ UNION ALL SELECT * FROM mysql.gtid_slave_pos_innodb_redundant) inner_select
+ UNION ALL
+ SELECT COUNT(*) AS a FROM mysql.gtid_slave_pos_tokudb) outer_select`;
+ if ($is_done) {
+ let $j=0;
+ }
+ if (!$is_done) {
+ real_sleep 0.1;
+ dec $j;
+ }
+ }
+ dec $i;
+ if ($is_done) {
+ let $i=0;
+ }
+}
+--enable_query_log
+--let $keep_include_silent=0
+if (!$is_done) {
+ --echo Timed out waiting for mysql.gtid_slave_pos* tables to be cleaned up
+}
+
+--disable_query_log
+DELETE FROM t1 WHERE a >= 100;
+DELETE FROM t2 WHERE a >= 100;
+DELETE FROM t3 WHERE a >= 100;
+--enable_query_log
+
+
# Test status variables Rpl_transactions_multi_engine and Transactions_gtid_foreign_engine.
# Have mysql.gtid_slave_pos* for myisam and innodb but not tokudb.
--connection server_2
@@ -223,6 +305,9 @@ SHOW STATUS LIKE "%transactions%engine";
SET sql_log_bin=0;
DROP TABLE mysql.gtid_slave_pos_innodb;
SET sql_log_bin=1;
+--disable_query_log
+eval SET GLOBAL gtid_cleanup_batch_size = $old_gtid_cleanup_batch_size;
+--enable_query_log
--connection server_1
DROP TABLE t1;
diff --git a/storage/tokudb/tokudb_dir_cmd.cc b/storage/tokudb/tokudb_dir_cmd.cc
index 5431cbab7aa..d0da92eab27 100644
--- a/storage/tokudb/tokudb_dir_cmd.cc
+++ b/storage/tokudb/tokudb_dir_cmd.cc
@@ -50,11 +50,11 @@ static int MDL_and_TDC(THD *thd,
table_arg.str = const_cast<char *>(table);
table_arg.length = strlen(table);
Table_ident table_ident(thd, &db_arg, &table_arg, true);;
- thd->lex->select_lex.add_table_to_list(
+ thd->lex->first_select_lex()->add_table_to_list(
thd, &table_ident, NULL, 1, TL_UNLOCK, MDL_EXCLUSIVE, 0, 0, 0);
/* The lock will be released at the end of mysq_execute_command() */
error = lock_table_names(thd,
- thd->lex->select_lex.table_list.first,
+ thd->lex->first_select_lex()->table_list.first,
NULL,
thd->variables.lock_wait_timeout,
0);
diff --git a/storage/tokudb/tokudb_sysvars.cc b/storage/tokudb/tokudb_sysvars.cc
index c9f72f6a34d..684a952750f 100644
--- a/storage/tokudb/tokudb_sysvars.cc
+++ b/storage/tokudb/tokudb_sysvars.cc
@@ -901,7 +901,7 @@ static MYSQL_THDVAR_BOOL(
NULL,
false);
-static MYSQL_THDVAR_BOOL(
+ static MYSQL_THDVAR_BOOL(
enable_fast_upsert,
PLUGIN_VAR_THDLOCAL,
"disable slow upsert",
diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c
index 9efd7242118..8368e33cc1d 100644
--- a/strings/ctype-uca.c
+++ b/strings/ctype-uca.c
@@ -31158,17 +31158,6 @@ typedef struct my_uca_scanner_st
CHARSET_INFO *cs;
} my_uca_scanner;
-/*
- Charset dependent scanner part, to optimize
- some character sets.
-*/
-typedef struct my_uca_scanner_handler_st
-{
- void (*init)(my_uca_scanner *scanner, CHARSET_INFO *cs,
- const MY_UCA_WEIGHT_LEVEL *level,
- const uchar *str, size_t length);
- int (*next)(my_uca_scanner *scanner);
-} my_uca_scanner_handler;
static const uint16 nochar[]= {0,0};
@@ -31421,6 +31410,28 @@ my_uca_can_be_previous_context_tail(const MY_CONTRACTIONS *list, my_wc_t wc)
/**
+ Check if a character needs previous/next context handling:
+ - can be a previois context tail
+ - can be a contraction start
+
+ @param level Pointer to an UCA weight level data
+ @param wc Code point
+
+ @return
+ @retval FALSE - does not need context handling
+ @retval TRUE - needs context handing
+*/
+
+static inline my_bool
+my_uca_needs_context_handling(const MY_UCA_WEIGHT_LEVEL *level, my_wc_t wc)
+{
+ return level->contractions.nitems > 0 &&
+ level->contractions.flags[wc & MY_UCA_CNT_FLAG_MASK] &
+ (MY_UCA_PREVIOUS_CONTEXT_TAIL | MY_UCA_CNT_HEAD);
+}
+
+
+/**
Compare two wide character strings, wide analog to strncmp().
@param a Pointer to the first string
@@ -31554,6 +31565,60 @@ my_uca_previous_context_find(my_uca_scanner *scanner,
return NULL;
}
+
+/*
+ Find a context dependent weight of a character.
+ @param scanner - UCA weight scanner. The caller should set
+ its members "page" and "code" to the previous character
+ (or to zeros if there is no a previous character).
+ @param wc - an array of wide characters which has at least
+ MY_UCA_MAX_CONTRACTION elements, where wc[0] is set
+ to the current character (whose weight is being resolved).
+ The values of wc[i>0] is not important, but if wc[0]
+ appears to be a known contraction head, the function
+ will collect further contraction parts into wc[i>0].
+ If wc[0] and the previous character make a previous context
+ pair, then wc[1] is set to the previous character.
+
+ @retval NULL if could not find any contextual weights for wc[0]
+ @retval non null pointer to a zero-terminated weight string otherwise
+*/
+static inline uint16 *
+my_uca_context_weight_find(my_uca_scanner *scanner, my_wc_t *wc)
+{
+ uint16 *cweight;
+ DBUG_ASSERT(scanner->level->contractions.nitems);
+ /*
+ If we have scanned a character which can have previous context,
+ and there were some more characters already before,
+ then reconstruct codepoint of the previous character
+ from "page" and "code" into w[1], and verify that {wc[1], wc[0]}
+ together form a real previous context pair.
+ Note, we support only 2-character long sequences with previous
+ context at the moment. CLDR does not have longer sequences.
+ */
+ if (my_uca_can_be_previous_context_tail(&scanner->level->contractions,
+ wc[0]) &&
+ scanner->wbeg != nochar && /* if not the very first character */
+ my_uca_can_be_previous_context_head(&scanner->level->contractions,
+ (wc[1]= ((scanner->page << 8) +
+ scanner->code))) &&
+ (cweight= my_uca_previous_context_find(scanner, wc[1], wc[0])))
+ {
+ scanner->page= scanner->code= 0; /* Clear for the next character */
+ return cweight;
+ }
+ else if (my_uca_can_be_contraction_head(&scanner->level->contractions,
+ wc[0]))
+ {
+ /* Check if w[0] starts a contraction */
+ if ((cweight= my_uca_scanner_contraction_find(scanner, wc)))
+ return cweight;
+ }
+ return NULL;
+}
+
+
/****************************************************************/
/**
@@ -31675,223 +31740,6 @@ my_uca_scanner_init_any(my_uca_scanner *scanner,
scanner->cs= cs;
}
-static int my_uca_scanner_next_any(my_uca_scanner *scanner)
-{
- /*
- Check if the weights for the previous character have been
- already fully scanned. If yes, then get the next character and
- initialize wbeg and wlength to its weight string.
- */
-
- if (scanner->wbeg[0]) /* More weights left from the previous step: */
- return *scanner->wbeg++; /* return the next weight from expansion */
-
- do
- {
- const uint16 *wpage;
- my_wc_t wc[MY_UCA_MAX_CONTRACTION];
- int mblen;
-
- /* Get next character */
- if (((mblen= scanner->cs->cset->mb_wc(scanner->cs, wc,
- scanner->sbeg,
- scanner->send)) <= 0))
- {
- if (scanner->sbeg >= scanner->send)
- return -1; /* No more bytes, end of line reached */
- /*
- There are some more bytes left. Non-positive mb_len means that
- we got an incomplete or a bad byte sequence. Consume mbminlen bytes.
- */
- if ((scanner->sbeg+= scanner->cs->mbminlen) > scanner->send)
- {
- /* For safety purposes don't go beyond the string range. */
- scanner->sbeg= scanner->send;
- }
- /*
- Treat every complete or incomplete mbminlen unit as a weight which is
- greater than weight for any possible normal character.
- 0xFFFF is greater than any possible weight in the UCA weight table.
- */
- return 0xFFFF;
- }
-
- scanner->sbeg+= mblen;
- if (wc[0] > scanner->level->maxchar)
- {
- /* Return 0xFFFD as weight for all characters outside BMP */
- scanner->wbeg= nochar;
- return 0xFFFD;
- }
-
- if (my_uca_have_contractions_quick(scanner->level))
- {
- uint16 *cweight;
- /*
- If we have scanned a character which can have previous context,
- and there were some more characters already before,
- then reconstruct codepoint of the previous character
- from "page" and "code" into w[1], and verify that {wc[1], wc[0]}
- together form a real previous context pair.
- Note, we support only 2-character long sequences with previous
- context at the moment. CLDR does not have longer sequences.
- */
- if (my_uca_can_be_previous_context_tail(&scanner->level->contractions,
- wc[0]) &&
- scanner->wbeg != nochar && /* if not the very first character */
- my_uca_can_be_previous_context_head(&scanner->level->contractions,
- (wc[1]= ((scanner->page << 8) +
- scanner->code))) &&
- (cweight= my_uca_previous_context_find(scanner, wc[1], wc[0])))
- {
- scanner->page= scanner->code= 0; /* Clear for the next character */
- return *cweight;
- }
- else if (my_uca_can_be_contraction_head(&scanner->level->contractions,
- wc[0]))
- {
- /* Check if w[0] starts a contraction */
- if ((cweight= my_uca_scanner_contraction_find(scanner, wc)))
- return *cweight;
- }
- }
-
- /* Process single character */
- scanner->page= wc[0] >> 8;
- scanner->code= wc[0] & 0xFF;
-
- /* If weight page for w[0] does not exist, then calculate algoritmically */
- if (!(wpage= scanner->level->weights[scanner->page]))
- return my_uca_scanner_next_implicit(scanner);
-
- /* Calculate pointer to w[0]'s weight, using page and offset */
- scanner->wbeg= wpage +
- scanner->code * scanner->level->lengths[scanner->page];
- } while (!scanner->wbeg[0]); /* Skip ignorable characters */
-
- return *scanner->wbeg++;
-}
-
-
-static my_uca_scanner_handler my_any_uca_scanner_handler=
-{
- my_uca_scanner_init_any,
- my_uca_scanner_next_any
-};
-
-/*
- Compares two strings according to the collation
-
- SYNOPSIS:
- my_strnncoll_uca()
- cs Character set information
- s First string
- slen First string length
- t Second string
- tlen Seconf string length
- level DUCETweight level
-
- NOTES:
- Initializes two weight scanners and gets weights
- corresponding to two strings in a loop. If weights are not
- the same at some step then returns their difference.
-
- In the while() comparison these situations are possible:
- 1. (s_res>0) and (t_res>0) and (s_res == t_res)
- Weights are the same so far, continue comparison
- 2. (s_res>0) and (t_res>0) and (s_res!=t_res)
- A difference has been found, return.
- 3. (s_res>0) and (t_res<0)
- We have reached the end of the second string, or found
- an illegal multibyte sequence in the second string.
- Return a positive number, i.e. the first string is bigger.
- 4. (s_res<0) and (t_res>0)
- We have reached the end of the first string, or found
- an illegal multibyte sequence in the first string.
- Return a negative number, i.e. the second string is bigger.
- 5. (s_res<0) and (t_res<0)
- Both scanners returned -1. It means we have riched
- the end-of-string of illegal-sequence in both strings
- at the same time. Return 0, strings are equal.
-
- RETURN
- Difference between two strings, according to the collation:
- 0 - means strings are equal
- negative number - means the first string is smaller
- positive number - means the first string is bigger
-*/
-
-static int my_strnncoll_uca_onelevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const MY_UCA_WEIGHT_LEVEL *level,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen,
- my_bool t_is_prefix)
-{
- my_uca_scanner sscanner;
- my_uca_scanner tscanner;
- int s_res;
- int t_res;
-
- scanner_handler->init(&sscanner, cs, level, s, slen);
- scanner_handler->init(&tscanner, cs, level, t, tlen);
-
- do
- {
- s_res= scanner_handler->next(&sscanner);
- t_res= scanner_handler->next(&tscanner);
- } while ( s_res == t_res && s_res >0);
-
- return (t_is_prefix && t_res < 0) ? 0 : (s_res - t_res);
-}
-
-static int my_strnncoll_uca(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen,
- my_bool t_is_prefix)
-{
- return my_strnncoll_uca_onelevel(cs, scanner_handler, &cs->uca->level[0],
- s, slen, t, tlen, t_is_prefix);
-}
-
-static int my_strnncoll_uca_multilevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen,
- my_bool t_is_prefix)
-{
- uint num_level= cs->levels_for_order;
- uint i;
- for (i= 0; i != num_level; i++)
- {
- int ret= my_strnncoll_uca_onelevel(cs, scanner_handler, &cs->uca->level[i],
- s, slen, t, tlen, t_is_prefix);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-
-static int
-my_strnncollsp_generic_uca_nopad_multilevel(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- uint num_level= cs->levels_for_order;
- uint i;
- for (i= 0; i != num_level; i++)
- {
- int ret= my_strnncoll_uca_onelevel(cs, &my_any_uca_scanner_handler,
- &cs->uca->level[i],
- s, slen, t, tlen, FALSE);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static inline int
my_space_weight(const MY_UCA_WEIGHT_LEVEL *level)
@@ -31924,258 +31772,6 @@ my_char_weight_addr(const MY_UCA_WEIGHT_LEVEL *level, uint wc)
}
-/*
- Compares two strings according to the collation,
- ignoring trailing spaces.
-
- SYNOPSIS:
- my_strnncollsp_uca()
- cs Character set information
- s First string
- slen First string length
- t Second string
- tlen Seconf string length
- level DUCETweight level
-
- NOTES:
- Works exactly the same with my_strnncoll_uca(),
- but ignores trailing spaces.
-
- In the while() comparison these situations are possible:
- 1. (s_res>0) and (t_res>0) and (s_res == t_res)
- Weights are the same so far, continue comparison
- 2. (s_res>0) and (t_res>0) and (s_res!=t_res)
- A difference has been found, return.
- 3. (s_res>0) and (t_res<0)
- We have reached the end of the second string, or found
- an illegal multibyte sequence in the second string.
- Compare the first string to an infinite array of
- space characters until difference is found, or until
- the end of the first string.
- 4. (s_res<0) and (t_res>0)
- We have reached the end of the first string, or found
- an illegal multibyte sequence in the first string.
- Compare the second string to an infinite array of
- space characters until difference is found or until
- the end of the second steing.
- 5. (s_res<0) and (t_res<0)
- Both scanners returned -1. It means we have riched
- the end-of-string of illegal-sequence in both strings
- at the same time. Return 0, strings are equal.
-
- RETURN
- Difference between two strings, according to the collation:
- 0 - means strings are equal
- negative number - means the first string is smaller
- positive number - means the first string is bigger
-*/
-
-static int my_strnncollsp_uca_onelevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const MY_UCA_WEIGHT_LEVEL *level,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- my_uca_scanner sscanner, tscanner;
- int s_res, t_res;
-
- scanner_handler->init(&sscanner, cs, level, s, slen);
- scanner_handler->init(&tscanner, cs, level, t, tlen);
-
- do
- {
- s_res= scanner_handler->next(&sscanner);
- t_res= scanner_handler->next(&tscanner);
- } while ( s_res == t_res && s_res >0);
-
- if (s_res > 0 && t_res < 0)
- {
- /* Calculate weight for SPACE character */
- t_res= my_space_weight(level);
-
- /* compare the first string to spaces */
- do
- {
- if (s_res != t_res)
- return (s_res - t_res);
- s_res= scanner_handler->next(&sscanner);
- } while (s_res > 0);
- return 0;
- }
-
- if (s_res < 0 && t_res > 0)
- {
- /* Calculate weight for SPACE character */
- s_res= my_space_weight(level);
-
- /* compare the second string to spaces */
- do
- {
- if (s_res != t_res)
- return (s_res - t_res);
- t_res= scanner_handler->next(&tscanner);
- } while (t_res > 0);
- return 0;
- }
-
- return ( s_res - t_res );
-}
-
-static int my_strnncollsp_uca(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- return my_strnncollsp_uca_onelevel(cs, scanner_handler, &cs->uca->level[0],
- s, slen, t, tlen);
-}
-
-static int my_strnncollsp_uca_multilevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- uint num_level= cs->levels_for_order;
- uint i;
- for (i= 0; i != num_level; i++)
- {
- int ret= my_strnncollsp_uca_onelevel(cs, scanner_handler,
- &cs->uca->level[i], s, slen, t, tlen);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/*
- Calculates hash value for the given string,
- according to the collation, and ignoring trailing spaces.
-
- SYNOPSIS:
- my_hash_sort_uca()
- cs Character set information
- s String
- slen String's length
- n1 First hash parameter
- n2 Second hash parameter
-
- NOTES:
- Scans consequently weights and updates
- hash parameters n1 and n2. In a case insensitive collation,
- upper and lower case of the same letter will return the same
- weight sequence, and thus will produce the same hash values
- in n1 and n2.
-
- This functions is used for one-level and for multi-level collations.
- We intentionally use only primary level in multi-level collations.
- This helps to have PARTITION BY KEY put primarily equal records
- into the same partition. E.g. in utf8_thai_520_ci records that differ
- only in tone marks go into the same partition.
-
- RETURN
- N/A
-*/
-
-static void my_hash_sort_uca(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const uchar *s, size_t slen,
- ulong *nr1, ulong *nr2)
-{
- int s_res;
- my_uca_scanner scanner;
- int space_weight= my_space_weight(&cs->uca->level[0]);
- register ulong m1= *nr1, m2= *nr2;
-
- scanner_handler->init(&scanner, cs, &cs->uca->level[0], s, slen);
-
- while ((s_res= scanner_handler->next(&scanner)) >0)
- {
- if (s_res == space_weight)
- {
- /* Combine all spaces to be able to skip end spaces */
- uint count= 0;
- do
- {
- count++;
- if ((s_res= scanner_handler->next(&scanner)) <= 0)
- {
- /* Skip strings at end of string */
- goto end;
- }
- }
- while (s_res == space_weight);
-
- /* Add back that has for the space characters */
- do
- {
- /*
- We can't use MY_HASH_ADD_16() here as we, because of a misstake
- in the original code, where we added the 16 byte variable the
- opposite way. Changing this would cause old partitioned tables
- to fail.
- */
- MY_HASH_ADD(m1, m2, space_weight >> 8);
- MY_HASH_ADD(m1, m2, space_weight & 0xFF);
- }
- while (--count != 0);
-
- }
- /* See comment above why we can't use MY_HASH_ADD_16() */
- MY_HASH_ADD(m1, m2, s_res >> 8);
- MY_HASH_ADD(m1, m2, s_res & 0xFF);
- }
-end:
- *nr1= m1;
- *nr2= m2;
-}
-
-
-static void my_hash_sort_uca_nopad(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- const uchar *s, size_t slen,
- ulong *nr1, ulong *nr2)
-{
- int s_res;
- my_uca_scanner scanner;
- register ulong m1= *nr1, m2= *nr2;
-
- scanner_handler->init(&scanner, cs, &cs->uca->level[0], s, slen);
-
- while ((s_res= scanner_handler->next(&scanner)) >0)
- {
- /* See comment above why we can't use MY_HASH_ADD_16() */
- MY_HASH_ADD(m1, m2, s_res >> 8);
- MY_HASH_ADD(m1, m2, s_res & 0xFF);
- }
- *nr1= m1;
- *nr2= m2;
-}
-
-
-static uchar *
-my_strnxfrm_uca_onelevel_internal(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- MY_UCA_WEIGHT_LEVEL *level,
- uchar *dst, uchar *de, uint *nweights,
- const uchar *src, size_t srclen)
-{
- my_uca_scanner scanner;
- int s_res;
-
- DBUG_ASSERT(src || !srclen);
-
- scanner_handler->init(&scanner, cs, level, src, srclen);
- for (; dst < de && *nweights &&
- (s_res= scanner_handler->next(&scanner)) > 0 ; (*nweights)--)
- {
- *dst++= s_res >> 8;
- if (dst < de)
- *dst++= s_res & 0xFF;
- }
- return dst;
-}
-
-
static uchar *
my_strnxfrm_uca_padn(uchar *dst, uchar *de, uint nweights, int weight)
{
@@ -32202,27 +31798,6 @@ my_strnxfrm_uca_pad(uchar *dst, uchar *de, int weight)
}
-static uchar *
-my_strnxfrm_uca_onelevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- MY_UCA_WEIGHT_LEVEL *level,
- uchar *dst, uchar *de, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uchar *d0= dst;
-
- dst= my_strnxfrm_uca_onelevel_internal(cs, scanner_handler, level,
- dst, de, &nweights,
- src, srclen);
- DBUG_ASSERT(dst <= de);
- if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
- dst= my_strnxfrm_uca_padn(dst, de, nweights, my_space_weight(level));
- DBUG_ASSERT(dst <= de);
- my_strxfrm_desc_and_reverse(d0, dst, flags, 0);
- return dst;
-}
-
-
/*
Return the minimum possible weight on a level.
*/
@@ -32233,136 +31808,6 @@ static uint min_weight_on_level(MY_UCA_WEIGHT_LEVEL *level)
}
-static uchar *
-my_strnxfrm_uca_nopad_onelevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- MY_UCA_WEIGHT_LEVEL *level,
- uchar *dst, uchar *de, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uchar *d0= dst;
-
- dst= my_strnxfrm_uca_onelevel_internal(cs, scanner_handler, level,
- dst, de, &nweights,
- src, srclen);
- DBUG_ASSERT(dst <= de);
- /* Pad with the minimum possible weight on this level */
- if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
- dst= my_strnxfrm_uca_padn(dst, de, nweights, min_weight_on_level(level));
- DBUG_ASSERT(dst <= de);
- my_strxfrm_desc_and_reverse(d0, dst, flags, 0);
- return dst;
-}
-
-
-/*
- For the given string creates its "binary image", suitable
- to be used in binary comparison, i.e. in memcmp().
-
- SYNOPSIS:
- my_strnxfrm_uca()
- cs Character set information
- dst Where to write the image
- dstlen Space available for the image, in bytes
- src The source string
- srclen Length of the source string, in bytes
-
- NOTES:
- In a loop, scans weights from the source string and writes
- them into the binary image. In a case insensitive collation,
- upper and lower cases of the same letter will produce the
- same image subsequences. When we have reached the end-of-string
- or found an illegal multibyte sequence, the loop stops.
-
- It is impossible to restore the original string using its
- binary image.
-
- Binary images are used for bulk comparison purposes,
- e.g. in ORDER BY, when it is more efficient to create
- a binary image and use it instead of weight scanner
- for the original strings for every comparison.
-
- RETURN
- Number of bytes that have been written into the binary image.
-*/
-
-
-static size_t
-my_strnxfrm_uca(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uchar *d0= dst;
- uchar *de= dst + dstlen;
-
- dst= my_strnxfrm_uca_onelevel(cs, scanner_handler, &cs->uca->level[0],
- dst, de, nweights, src, srclen, flags);
- /*
- This can probably be changed to memset(dst, 0, de - dst),
- like my_strnxfrm_uca_multilevel() does.
- */
- if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
- dst= my_strnxfrm_uca_pad(dst, de, my_space_weight(&cs->uca->level[0]));
- return dst - d0;
-}
-
-
-static size_t
-my_strnxfrm_uca_nopad(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uchar *d0= dst;
- uchar *de= dst + dstlen;
-
- dst= my_strnxfrm_uca_nopad_onelevel(cs, scanner_handler, &cs->uca->level[0],
- dst, de, nweights, src, srclen, flags);
- if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
- {
- memset(dst, 0, de - dst);
- dst= de;
- }
- return dst - d0;
-}
-
-
-static size_t
-my_strnxfrm_uca_multilevel(CHARSET_INFO *cs,
- my_uca_scanner_handler *scanner_handler,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uint num_level= cs->levels_for_order;
- uchar *d0= dst;
- uchar *de= dst + dstlen;
- uint current_level;
-
- for (current_level= 0; current_level != num_level; current_level++)
- {
- if (!(flags & MY_STRXFRM_LEVEL_ALL) ||
- (flags & (MY_STRXFRM_LEVEL1 << current_level)))
- dst= cs->state & MY_CS_NOPAD ?
- my_strnxfrm_uca_nopad_onelevel(cs, scanner_handler,
- &cs->uca->level[current_level],
- dst, de, nweights,
- src, srclen, flags) :
- my_strnxfrm_uca_onelevel(cs, scanner_handler,
- &cs->uca->level[current_level],
- dst, de, nweights,
- src, srclen, flags);
- }
-
- if (dst < de && (flags & MY_STRXFRM_PAD_TO_MAXLEN))
- {
- memset(dst, 0, de - dst);
- dst= de;
- }
-
- return dst - d0;
-}
-
/*
This function compares if two characters are the same.
The sign +1 or -1 does not matter. The only
@@ -32568,6 +32013,23 @@ int my_wildcmp_uca(CHARSET_INFO *cs,
/*
+ Tests if an optimized "no contraction" handler can be used for
+ the given collation.
+*/
+static my_bool
+my_uca_collation_can_optimize_no_contractions(CHARSET_INFO *cs)
+{
+ uint i;
+ for (i= 0; i < cs->levels_for_order ; i++)
+ {
+ if (my_uca_have_contractions_quick(&cs->uca->level[i]))
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
+/*
Collation language is implemented according to
subset of ICU Collation Customization (tailorings):
http://icu.sourceforge.net/userguide/Collate_Customization.html
@@ -34250,8 +33712,74 @@ init_weight_level(MY_CHARSET_LOADER *loader, MY_COLL_RULES *rules,
}
-MY_COLLATION_HANDLER my_collation_any_uca_handler_multilevel;
-MY_COLLATION_HANDLER my_collation_generic_uca_nopad_handler_multilevel;
+static my_bool
+create_tailoring(struct charset_info_st *cs,
+ MY_CHARSET_LOADER *loader);
+
+static my_bool
+my_coll_init_uca(struct charset_info_st *cs, MY_CHARSET_LOADER *loader)
+{
+ cs->pad_char= ' ';
+ cs->ctype= my_charset_utf8_unicode_ci.ctype;
+ if (!cs->caseinfo)
+ cs->caseinfo= &my_unicase_default;
+ return create_tailoring(cs, loader);
+}
+
+
+static size_t my_strnxfrmlen_any_uca(CHARSET_INFO *cs, size_t len)
+{
+ /* UCA uses 2 bytes per weight */
+ return (len + cs->mbmaxlen - 1) / cs->mbmaxlen * cs->strxfrm_multiply * 2;
+}
+
+static size_t my_strnxfrmlen_any_uca_multilevel(CHARSET_INFO *cs, size_t len)
+{
+ return my_strnxfrmlen_any_uca(cs, len) * cs->levels_for_order;
+}
+
+
+/*
+ This structure is used at the collation initialization time, to switch
+ from a full-featured collation handler to a "no contraction" collation
+ handler if the collation is known not to have any contractions.
+*/
+typedef struct
+{
+ MY_COLLATION_HANDLER *pad;
+ MY_COLLATION_HANDLER *nopad;
+ MY_COLLATION_HANDLER *multilevel_pad;
+ MY_COLLATION_HANDLER *multilevel_nopad;
+} MY_COLLATION_HANDLER_PACKAGE;
+
+
+static void my_uca_handler_map(struct charset_info_st *cs,
+ const MY_COLLATION_HANDLER_PACKAGE *from,
+ const MY_COLLATION_HANDLER_PACKAGE *to)
+{
+ if (cs->coll == from->pad) cs->coll= to->pad;
+ else if (cs->coll == from->nopad) cs->coll= to->nopad;
+ else if (cs->coll == from->multilevel_pad) cs->coll= to->multilevel_pad;
+ else if (cs->coll == from->multilevel_nopad) cs->coll= to->multilevel_nopad;
+}
+
+
+/*
+ Define generic collation handlers for multi-level collations with tailoring:
+
+ my_uca_collation_handler_nopad_multilevel_generic
+ my_uca_collation_handler_multilevel_generic
+
+ TODO: Use faster character-set specific versions of MY_COLLATION_HANDLER
+ instead of generic.
+*/
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _generic
+#define MY_MB_WC(scanner, wc, beg, end) (scanner->cs->cset->mb_wc(scanner->cs, wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_generic
+#define MY_UCA_ASCII_OPTIMIZE 0
+#define MY_UCA_COMPILE_CONTRACTIONS 1
+#define MY_UCA_COLL_INIT my_coll_init_uca
+#include "ctype-uca.ic"
/*
@@ -34336,8 +33864,8 @@ create_tailoring(struct charset_info_st *cs,
cs->uca[0]= new_uca;
if (cs->levels_for_order > 1)
cs->coll= (cs->state & MY_CS_NOPAD) ?
- &my_collation_generic_uca_nopad_handler_multilevel :
- &my_collation_any_uca_handler_multilevel;
+ &my_uca_collation_handler_nopad_multilevel_generic :
+ &my_uca_collation_handler_multilevel_generic;
ex:
(loader->free)(rules.rule);
@@ -34346,235 +33874,17 @@ ex:
return rc;
}
-/*
- Universal CHARSET_INFO compatible wrappers
- for the above internal functions.
- Should work for any character set.
-*/
-
-static my_bool
-my_coll_init_uca(struct charset_info_st *cs, MY_CHARSET_LOADER *loader)
-{
- cs->pad_char= ' ';
- cs->ctype= my_charset_utf8_unicode_ci.ctype;
- if (!cs->caseinfo)
- cs->caseinfo= &my_unicase_default;
- return create_tailoring(cs, loader);
-}
-
-
-static int my_strnncoll_any_uca(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen,
- my_bool t_is_prefix)
-{
- return my_strnncoll_uca(cs, &my_any_uca_scanner_handler,
- s, slen, t, tlen, t_is_prefix);
-}
-
-static int my_strnncoll_any_uca_multilevel(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen,
- my_bool t_is_prefix)
-{
- return my_strnncoll_uca_multilevel(cs, &my_any_uca_scanner_handler,
- s, slen, t, tlen, t_is_prefix);
-}
-
-static int my_strnncollsp_any_uca(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- return my_strnncollsp_uca(cs, &my_any_uca_scanner_handler, s, slen, t, tlen);
-}
-
-
-static int my_strnncollsp_generic_uca_nopad(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- return my_strnncoll_uca(cs, &my_any_uca_scanner_handler,
- s, slen, t, tlen, FALSE);
-}
-
-
-static int my_strnncollsp_any_uca_multilevel(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- return my_strnncollsp_uca_multilevel(cs, &my_any_uca_scanner_handler,
- s, slen, t, tlen);
-}
-
-static void my_hash_sort_any_uca(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- ulong *n1, ulong *n2)
-{
- my_hash_sort_uca(cs, &my_any_uca_scanner_handler, s, slen, n1, n2);
-}
-
-static void my_hash_sort_generic_uca_nopad(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- ulong *n1, ulong *n2)
-{
- my_hash_sort_uca_nopad(cs, &my_any_uca_scanner_handler, s, slen, n1, n2);
-}
-
-static size_t my_strnxfrm_any_uca(CHARSET_INFO *cs,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- return my_strnxfrm_uca(cs, &my_any_uca_scanner_handler,
- dst, dstlen, nweights, src, srclen, flags);
-}
-
-static size_t my_strnxfrm_generic_uca_nopad(CHARSET_INFO *cs,
- uchar *dst, size_t dstlen,
- uint nweights,
- const uchar *src, size_t srclen,
- uint flags)
-{
- return my_strnxfrm_uca_nopad(cs, &my_any_uca_scanner_handler,
- dst, dstlen, nweights, src, srclen, flags);
-}
-
-static size_t my_strnxfrm_any_uca_multilevel(CHARSET_INFO *cs,
- uchar *dst, size_t dstlen,
- uint nweights, const uchar *src,
- size_t srclen, uint flags)
-{
- return my_strnxfrm_uca_multilevel(cs, &my_any_uca_scanner_handler,
- dst, dstlen, nweights, src, srclen,
- flags);
-}
-
-static size_t my_strnxfrmlen_any_uca(CHARSET_INFO *cs, size_t len)
-{
- /* UCA uses 2 bytes per weight */
- return (len + cs->mbmaxlen - 1) / cs->mbmaxlen * cs->strxfrm_multiply * 2;
-}
-
-static size_t my_strnxfrmlen_any_uca_multilevel(CHARSET_INFO *cs, size_t len)
-{
- return my_strnxfrmlen_any_uca(cs, len) * cs->levels_for_order;
-}
-
-
-/* NO PAD handler for character sets with mbminlen==1 */
-MY_COLLATION_HANDLER my_collation_mb_uca_nopad_handler =
-{
- my_coll_init_uca,
- my_strnncoll_any_uca,
- my_strnncollsp_generic_uca_nopad,
- my_strnxfrm_generic_uca_nopad,
- my_strnxfrmlen_any_uca,
- my_like_range_mb,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_generic_uca_nopad,
- my_propagate_complex
-};
-
-
-/* NO PAD handler for character sets with mbminlen>=1 */
-MY_COLLATION_HANDLER my_collation_generic_uca_nopad_handler =
-{
- my_coll_init_uca,
- my_strnncoll_any_uca,
- my_strnncollsp_generic_uca_nopad,
- my_strnxfrm_generic_uca_nopad,
- my_strnxfrmlen_any_uca,
- my_like_range_generic,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_generic_uca_nopad,
- my_propagate_complex
-};
-
-
-MY_COLLATION_HANDLER my_collation_any_uca_handler_multilevel=
-{
- my_coll_init_uca,
- my_strnncoll_any_uca_multilevel,
- my_strnncollsp_any_uca_multilevel,
- my_strnxfrm_any_uca_multilevel,
- my_strnxfrmlen_any_uca_multilevel,
- my_like_range_generic,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_any_uca,
- my_propagate_complex
-};
-
-
-MY_COLLATION_HANDLER my_collation_generic_uca_nopad_handler_multilevel =
-{
- my_coll_init_uca,
- my_strnncoll_any_uca_multilevel,
- my_strnncollsp_generic_uca_nopad_multilevel,
- my_strnxfrm_any_uca_multilevel,
- my_strnxfrmlen_any_uca_multilevel,
- my_like_range_generic,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_generic_uca_nopad,
- my_propagate_complex
-};
-
#ifdef HAVE_CHARSET_ucs2
-/*
- UCS2 optimized CHARSET_INFO compatible wrappers.
-*/
-static int my_strnncoll_ucs2_uca(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen,
- my_bool t_is_prefix)
-{
- return my_strnncoll_uca(cs, &my_any_uca_scanner_handler,
- s, slen, t, tlen, t_is_prefix);
-}
-static int my_strnncollsp_ucs2_uca(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- const uchar *t, size_t tlen)
-{
- return my_strnncollsp_uca(cs, &my_any_uca_scanner_handler, s, slen, t, tlen);
-}
-
-static void my_hash_sort_ucs2_uca(CHARSET_INFO *cs,
- const uchar *s, size_t slen,
- ulong *n1, ulong *n2)
-{
- my_hash_sort_uca(cs, &my_any_uca_scanner_handler, s, slen, n1, n2);
-}
-
-static size_t my_strnxfrm_ucs2_uca(CHARSET_INFO *cs,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- return my_strnxfrm_uca(cs, &my_any_uca_scanner_handler,
- dst, dstlen, nweights, src, srclen, flags);
-}
-
-MY_COLLATION_HANDLER my_collation_ucs2_uca_handler =
-{
- my_coll_init_uca, /* init */
- my_strnncoll_ucs2_uca,
- my_strnncollsp_ucs2_uca,
- my_strnxfrm_ucs2_uca,
- my_strnxfrmlen_any_uca,
- my_like_range_generic,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_ucs2_uca,
- my_propagate_complex
-};
+#include "ctype-ucs2.h"
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _ucs2
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_ucs2_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_generic
+#define MY_UCA_ASCII_OPTIMIZE 0
+#define MY_UCA_COMPILE_CONTRACTIONS 1
+#define MY_UCA_COLL_INIT my_coll_init_uca
+#include "ctype-uca.ic"
#define MY_CS_UCS2_UCA_FLAGS (MY_CS_COMMON_UCA_FLAGS|MY_CS_NONASCII)
@@ -34609,7 +33919,7 @@ struct charset_info_st my_charset_ucs2_unicode_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_icelandic_uca_ci=
@@ -34641,7 +33951,7 @@ struct charset_info_st my_charset_ucs2_icelandic_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_latvian_uca_ci=
@@ -34673,7 +33983,7 @@ struct charset_info_st my_charset_ucs2_latvian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_romanian_uca_ci=
@@ -34705,7 +34015,7 @@ struct charset_info_st my_charset_ucs2_romanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_slovenian_uca_ci=
@@ -34737,7 +34047,7 @@ struct charset_info_st my_charset_ucs2_slovenian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_polish_uca_ci=
@@ -34769,7 +34079,7 @@ struct charset_info_st my_charset_ucs2_polish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_estonian_uca_ci=
@@ -34801,7 +34111,7 @@ struct charset_info_st my_charset_ucs2_estonian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_spanish_uca_ci=
@@ -34833,7 +34143,7 @@ struct charset_info_st my_charset_ucs2_spanish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_swedish_uca_ci=
@@ -34865,7 +34175,7 @@ struct charset_info_st my_charset_ucs2_swedish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_turkish_uca_ci=
@@ -34897,7 +34207,7 @@ struct charset_info_st my_charset_ucs2_turkish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_czech_uca_ci=
@@ -34929,7 +34239,7 @@ struct charset_info_st my_charset_ucs2_czech_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -34962,7 +34272,7 @@ struct charset_info_st my_charset_ucs2_danish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_lithuanian_uca_ci=
@@ -34994,7 +34304,7 @@ struct charset_info_st my_charset_ucs2_lithuanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_slovak_uca_ci=
@@ -35026,7 +34336,7 @@ struct charset_info_st my_charset_ucs2_slovak_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_spanish2_uca_ci=
@@ -35058,7 +34368,7 @@ struct charset_info_st my_charset_ucs2_spanish2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35091,7 +34401,7 @@ struct charset_info_st my_charset_ucs2_roman_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35124,7 +34434,7 @@ struct charset_info_st my_charset_ucs2_persian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35157,7 +34467,7 @@ struct charset_info_st my_charset_ucs2_esperanto_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35190,7 +34500,7 @@ struct charset_info_st my_charset_ucs2_hungarian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_sinhala_uca_ci=
@@ -35222,7 +34532,7 @@ struct charset_info_st my_charset_ucs2_sinhala_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35256,7 +34566,7 @@ struct charset_info_st my_charset_ucs2_german2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
struct charset_info_st my_charset_ucs2_croatian_mysql561_uca_ci=
@@ -35288,7 +34598,7 @@ struct charset_info_st my_charset_ucs2_croatian_mysql561_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35321,7 +34631,7 @@ struct charset_info_st my_charset_ucs2_croatian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35354,7 +34664,7 @@ struct charset_info_st my_charset_ucs2_myanmar_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35387,7 +34697,7 @@ struct charset_info_st my_charset_ucs2_thai_520_w2=
0, /* escape_with_backslash_is_dangerous */
2, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_any_uca_handler_multilevel
+ &my_uca_collation_handler_multilevel_ucs2
};
struct charset_info_st my_charset_ucs2_unicode_520_ci=
@@ -35419,7 +34729,7 @@ struct charset_info_st my_charset_ucs2_unicode_520_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35452,7 +34762,7 @@ struct charset_info_st my_charset_ucs2_vietnamese_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_ucs2_uca_handler
+ &my_uca_collation_handler_ucs2
};
@@ -35485,7 +34795,7 @@ struct charset_info_st my_charset_ucs2_unicode_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_generic_uca_nopad_handler
+ &my_uca_collation_handler_nopad_ucs2
};
@@ -35518,7 +34828,7 @@ struct charset_info_st my_charset_ucs2_unicode_520_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_ucs2_handler,
- &my_collation_generic_uca_nopad_handler
+ &my_uca_collation_handler_nopad_ucs2
};
@@ -35526,20 +34836,38 @@ struct charset_info_st my_charset_ucs2_unicode_520_nopad_ci=
#ifdef HAVE_CHARSET_utf8
-MY_COLLATION_HANDLER my_collation_any_uca_handler =
+
+static my_bool
+my_uca_coll_init_utf8mb3(struct charset_info_st *cs, MY_CHARSET_LOADER *loader);
+
+#include "ctype-utf8.h"
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _utf8mb3
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_utf8mb3_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_mb
+#define MY_UCA_ASCII_OPTIMIZE 1
+#define MY_UCA_COMPILE_CONTRACTIONS 1
+#define MY_UCA_COLL_INIT my_uca_coll_init_utf8mb3
+#include "ctype-uca.ic"
+
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _no_contractions_utf8mb3
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_utf8mb3_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_mb
+#define MY_UCA_ASCII_OPTIMIZE 1
+#define MY_UCA_COMPILE_CONTRACTIONS 0
+#define MY_UCA_COLL_INIT my_uca_coll_init_utf8mb3
+#include "ctype-uca.ic"
+
+
+static my_bool
+my_uca_coll_init_utf8mb3(struct charset_info_st *cs, MY_CHARSET_LOADER *loader)
{
- my_coll_init_uca, /* init */
- my_strnncoll_any_uca,
- my_strnncollsp_any_uca,
- my_strnxfrm_any_uca,
- my_strnxfrmlen_any_uca,
- my_like_range_mb,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_any_uca,
- my_propagate_complex
-};
+ if (my_coll_init_uca(cs, loader))
+ return TRUE;
+ if (my_uca_collation_can_optimize_no_contractions(cs))
+ my_uca_handler_map(cs, &my_uca_package_utf8mb3,
+ &my_uca_package_no_contractions_utf8mb3);
+ return FALSE;
+}
/*
@@ -35602,7 +34930,7 @@ struct charset_info_st my_charset_utf8_unicode_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -35635,7 +34963,7 @@ struct charset_info_st my_charset_utf8_icelandic_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_latvian_uca_ci=
@@ -35667,7 +34995,7 @@ struct charset_info_st my_charset_utf8_latvian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_romanian_uca_ci=
@@ -35699,7 +35027,7 @@ struct charset_info_st my_charset_utf8_romanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_slovenian_uca_ci=
@@ -35731,7 +35059,7 @@ struct charset_info_st my_charset_utf8_slovenian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_polish_uca_ci=
@@ -35763,7 +35091,7 @@ struct charset_info_st my_charset_utf8_polish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_estonian_uca_ci=
@@ -35795,7 +35123,7 @@ struct charset_info_st my_charset_utf8_estonian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_spanish_uca_ci=
@@ -35827,7 +35155,7 @@ struct charset_info_st my_charset_utf8_spanish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_swedish_uca_ci=
@@ -35859,7 +35187,7 @@ struct charset_info_st my_charset_utf8_swedish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_turkish_uca_ci=
@@ -35891,7 +35219,7 @@ struct charset_info_st my_charset_utf8_turkish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_czech_uca_ci=
@@ -35923,7 +35251,7 @@ struct charset_info_st my_charset_utf8_czech_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -35956,7 +35284,7 @@ struct charset_info_st my_charset_utf8_danish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_lithuanian_uca_ci=
@@ -35988,7 +35316,7 @@ struct charset_info_st my_charset_utf8_lithuanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_slovak_uca_ci=
@@ -36020,7 +35348,7 @@ struct charset_info_st my_charset_utf8_slovak_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_spanish2_uca_ci=
@@ -36052,7 +35380,7 @@ struct charset_info_st my_charset_utf8_spanish2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_roman_uca_ci=
@@ -36084,7 +35412,7 @@ struct charset_info_st my_charset_utf8_roman_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_persian_uca_ci=
@@ -36116,7 +35444,7 @@ struct charset_info_st my_charset_utf8_persian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_esperanto_uca_ci=
@@ -36148,7 +35476,7 @@ struct charset_info_st my_charset_utf8_esperanto_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_hungarian_uca_ci=
@@ -36180,7 +35508,7 @@ struct charset_info_st my_charset_utf8_hungarian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_sinhala_uca_ci=
@@ -36212,7 +35540,7 @@ struct charset_info_st my_charset_utf8_sinhala_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -36245,7 +35573,7 @@ struct charset_info_st my_charset_utf8_german2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_croatian_mysql561_uca_ci=
@@ -36277,7 +35605,7 @@ struct charset_info_st my_charset_utf8_croatian_mysql561_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -36310,7 +35638,7 @@ struct charset_info_st my_charset_utf8_croatian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -36343,7 +35671,7 @@ struct charset_info_st my_charset_utf8_myanmar_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -36376,7 +35704,7 @@ struct charset_info_st my_charset_utf8_unicode_520_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
struct charset_info_st my_charset_utf8_thai_520_w2=
@@ -36408,7 +35736,7 @@ struct charset_info_st my_charset_utf8_thai_520_w2=
0, /* escape_with_backslash_is_dangerous */
2, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler_multilevel
+ &my_uca_collation_handler_multilevel_utf8mb3
};
struct charset_info_st my_charset_utf8_vietnamese_ci=
@@ -36440,7 +35768,7 @@ struct charset_info_st my_charset_utf8_vietnamese_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb3
};
@@ -36473,7 +35801,7 @@ struct charset_info_st my_charset_utf8_unicode_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_mb_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf8mb3
};
@@ -36506,7 +35834,7 @@ struct charset_info_st my_charset_utf8_unicode_520_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8_handler,
- &my_collation_mb_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf8mb3
};
#endif /* HAVE_CHARSET_utf8 */
@@ -36514,6 +35842,39 @@ struct charset_info_st my_charset_utf8_unicode_520_nopad_ci=
#ifdef HAVE_CHARSET_utf8mb4
+static my_bool
+my_uca_coll_init_utf8mb4(struct charset_info_st *cs, MY_CHARSET_LOADER *loader);
+
+
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _utf8mb4
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_utf8mb4_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_mb
+#define MY_UCA_ASCII_OPTIMIZE 1
+#define MY_UCA_COMPILE_CONTRACTIONS 1
+#define MY_UCA_COLL_INIT my_uca_coll_init_utf8mb4
+#include "ctype-uca.ic"
+
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _no_contractions_utf8mb4
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_utf8mb4_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_mb
+#define MY_UCA_ASCII_OPTIMIZE 1
+#define MY_UCA_COMPILE_CONTRACTIONS 0
+#define MY_UCA_COLL_INIT my_uca_coll_init_utf8mb4
+#include "ctype-uca.ic"
+
+
+static my_bool
+my_uca_coll_init_utf8mb4(struct charset_info_st *cs, MY_CHARSET_LOADER *loader)
+{
+ if (my_coll_init_uca(cs, loader))
+ return TRUE;
+ if (my_uca_collation_can_optimize_no_contractions(cs))
+ my_uca_handler_map(cs, &my_uca_package_utf8mb4,
+ &my_uca_package_no_contractions_utf8mb4);
+ return FALSE;
+}
+
+
extern MY_CHARSET_HANDLER my_charset_utf8mb4_handler;
#define MY_CS_UTF8MB4_UCA_FLAGS (MY_CS_COMMON_UCA_FLAGS|MY_CS_UNICODE_SUPPLEMENT)
@@ -36548,7 +35909,7 @@ struct charset_info_st my_charset_utf8mb4_unicode_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
@@ -36581,7 +35942,7 @@ struct charset_info_st my_charset_utf8mb4_icelandic_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_latvian_uca_ci=
@@ -36613,7 +35974,7 @@ struct charset_info_st my_charset_utf8mb4_latvian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_romanian_uca_ci=
@@ -36645,7 +36006,7 @@ struct charset_info_st my_charset_utf8mb4_romanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_slovenian_uca_ci=
@@ -36677,7 +36038,7 @@ struct charset_info_st my_charset_utf8mb4_slovenian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_polish_uca_ci=
@@ -36709,7 +36070,7 @@ struct charset_info_st my_charset_utf8mb4_polish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_estonian_uca_ci=
@@ -36741,7 +36102,7 @@ struct charset_info_st my_charset_utf8mb4_estonian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_spanish_uca_ci=
@@ -36773,7 +36134,7 @@ struct charset_info_st my_charset_utf8mb4_spanish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_swedish_uca_ci=
@@ -36805,7 +36166,7 @@ struct charset_info_st my_charset_utf8mb4_swedish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_turkish_uca_ci=
@@ -36837,7 +36198,7 @@ struct charset_info_st my_charset_utf8mb4_turkish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_czech_uca_ci=
@@ -36869,7 +36230,7 @@ struct charset_info_st my_charset_utf8mb4_czech_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
@@ -36902,7 +36263,7 @@ struct charset_info_st my_charset_utf8mb4_danish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_lithuanian_uca_ci=
@@ -36934,7 +36295,7 @@ struct charset_info_st my_charset_utf8mb4_lithuanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_slovak_uca_ci=
@@ -36966,7 +36327,7 @@ struct charset_info_st my_charset_utf8mb4_slovak_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_spanish2_uca_ci=
@@ -36998,7 +36359,7 @@ struct charset_info_st my_charset_utf8mb4_spanish2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_roman_uca_ci=
@@ -37030,7 +36391,7 @@ struct charset_info_st my_charset_utf8mb4_roman_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_persian_uca_ci=
@@ -37062,7 +36423,7 @@ struct charset_info_st my_charset_utf8mb4_persian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_esperanto_uca_ci=
@@ -37094,7 +36455,7 @@ struct charset_info_st my_charset_utf8mb4_esperanto_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_hungarian_uca_ci=
@@ -37126,7 +36487,7 @@ struct charset_info_st my_charset_utf8mb4_hungarian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_sinhala_uca_ci=
@@ -37158,7 +36519,7 @@ struct charset_info_st my_charset_utf8mb4_sinhala_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_german2_uca_ci=
@@ -37190,7 +36551,7 @@ struct charset_info_st my_charset_utf8mb4_german2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_croatian_mysql561_uca_ci=
@@ -37222,7 +36583,7 @@ struct charset_info_st my_charset_utf8mb4_croatian_mysql561_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
@@ -37255,7 +36616,7 @@ struct charset_info_st my_charset_utf8mb4_croatian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
@@ -37288,7 +36649,7 @@ struct charset_info_st my_charset_utf8mb4_myanmar_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_thai_520_w2=
@@ -37320,7 +36681,7 @@ struct charset_info_st my_charset_utf8mb4_thai_520_w2=
0, /* escape_with_backslash_is_dangerous */
2, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler_multilevel
+ &my_uca_collation_handler_multilevel_utf8mb4
};
struct charset_info_st my_charset_utf8mb4_unicode_520_ci=
@@ -37352,7 +36713,7 @@ struct charset_info_st my_charset_utf8mb4_unicode_520_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
@@ -37385,7 +36746,7 @@ struct charset_info_st my_charset_utf8mb4_vietnamese_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_any_uca_handler
+ &my_uca_collation_handler_utf8mb4
};
@@ -37418,7 +36779,7 @@ struct charset_info_st my_charset_utf8mb4_unicode_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_mb_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf8mb4
};
@@ -37451,7 +36812,7 @@ struct charset_info_st my_charset_utf8mb4_unicode_520_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf8mb4_handler,
- &my_collation_mb_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf8mb4
};
@@ -37460,20 +36821,14 @@ struct charset_info_st my_charset_utf8mb4_unicode_520_nopad_ci=
#ifdef HAVE_CHARSET_utf32
-MY_COLLATION_HANDLER my_collation_utf32_uca_handler =
-{
- my_coll_init_uca, /* init */
- my_strnncoll_any_uca,
- my_strnncollsp_any_uca,
- my_strnxfrm_any_uca,
- my_strnxfrmlen_any_uca,
- my_like_range_generic,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_any_uca,
- my_propagate_complex
-};
+#include "ctype-utf32.h"
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _utf32
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_utf32_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_generic
+#define MY_UCA_ASCII_OPTIMIZE 0
+#define MY_UCA_COMPILE_CONTRACTIONS 1
+#define MY_UCA_COLL_INIT my_coll_init_uca
+#include "ctype-uca.ic"
extern MY_CHARSET_HANDLER my_charset_utf32_handler;
@@ -37510,7 +36865,7 @@ struct charset_info_st my_charset_utf32_unicode_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
@@ -37543,7 +36898,7 @@ struct charset_info_st my_charset_utf32_icelandic_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_latvian_uca_ci=
@@ -37575,7 +36930,7 @@ struct charset_info_st my_charset_utf32_latvian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_romanian_uca_ci=
@@ -37607,7 +36962,7 @@ struct charset_info_st my_charset_utf32_romanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_slovenian_uca_ci=
@@ -37639,7 +36994,7 @@ struct charset_info_st my_charset_utf32_slovenian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_polish_uca_ci=
@@ -37671,7 +37026,7 @@ struct charset_info_st my_charset_utf32_polish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_estonian_uca_ci=
@@ -37703,7 +37058,7 @@ struct charset_info_st my_charset_utf32_estonian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_spanish_uca_ci=
@@ -37735,7 +37090,7 @@ struct charset_info_st my_charset_utf32_spanish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_swedish_uca_ci=
@@ -37767,7 +37122,7 @@ struct charset_info_st my_charset_utf32_swedish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_turkish_uca_ci=
@@ -37799,7 +37154,7 @@ struct charset_info_st my_charset_utf32_turkish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_czech_uca_ci=
@@ -37831,7 +37186,7 @@ struct charset_info_st my_charset_utf32_czech_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
@@ -37864,7 +37219,7 @@ struct charset_info_st my_charset_utf32_danish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_lithuanian_uca_ci=
@@ -37896,7 +37251,7 @@ struct charset_info_st my_charset_utf32_lithuanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_slovak_uca_ci=
@@ -37928,7 +37283,7 @@ struct charset_info_st my_charset_utf32_slovak_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_spanish2_uca_ci=
@@ -37960,7 +37315,7 @@ struct charset_info_st my_charset_utf32_spanish2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_roman_uca_ci=
@@ -37992,7 +37347,7 @@ struct charset_info_st my_charset_utf32_roman_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_persian_uca_ci=
@@ -38024,7 +37379,7 @@ struct charset_info_st my_charset_utf32_persian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_esperanto_uca_ci=
@@ -38056,7 +37411,7 @@ struct charset_info_st my_charset_utf32_esperanto_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_hungarian_uca_ci=
@@ -38088,7 +37443,7 @@ struct charset_info_st my_charset_utf32_hungarian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_sinhala_uca_ci=
@@ -38120,7 +37475,7 @@ struct charset_info_st my_charset_utf32_sinhala_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_german2_uca_ci=
@@ -38152,7 +37507,7 @@ struct charset_info_st my_charset_utf32_german2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_croatian_mysql561_uca_ci=
@@ -38184,7 +37539,7 @@ struct charset_info_st my_charset_utf32_croatian_mysql561_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
struct charset_info_st my_charset_utf32_croatian_uca_ci=
@@ -38216,7 +37571,7 @@ struct charset_info_st my_charset_utf32_croatian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
@@ -38249,7 +37604,7 @@ struct charset_info_st my_charset_utf32_myanmar_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
@@ -38282,7 +37637,7 @@ struct charset_info_st my_charset_utf32_thai_520_w2=
0, /* escape_with_backslash_is_dangerous */
2, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_any_uca_handler_multilevel
+ &my_uca_collation_handler_multilevel_utf32
};
@@ -38315,7 +37670,7 @@ struct charset_info_st my_charset_utf32_unicode_520_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
@@ -38348,7 +37703,7 @@ struct charset_info_st my_charset_utf32_vietnamese_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_utf32_uca_handler
+ &my_uca_collation_handler_utf32
};
@@ -38381,7 +37736,7 @@ struct charset_info_st my_charset_utf32_unicode_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_generic_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf32
};
@@ -38414,7 +37769,7 @@ struct charset_info_st my_charset_utf32_unicode_520_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf32_handler,
- &my_collation_generic_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf32
};
@@ -38424,21 +37779,14 @@ struct charset_info_st my_charset_utf32_unicode_520_nopad_ci=
#ifdef HAVE_CHARSET_utf16
-
-MY_COLLATION_HANDLER my_collation_utf16_uca_handler =
-{
- my_coll_init_uca, /* init */
- my_strnncoll_any_uca,
- my_strnncollsp_any_uca,
- my_strnxfrm_any_uca,
- my_strnxfrmlen_any_uca,
- my_like_range_generic,
- my_wildcmp_uca,
- NULL,
- my_instr_mb,
- my_hash_sort_any_uca,
- my_propagate_complex
-};
+#include "ctype-utf16.h"
+#define MY_FUNCTION_NAME(x) my_uca_ ## x ## _utf16
+#define MY_MB_WC(scanner, wc, beg, end) (my_mb_wc_utf16_quick(wc, beg, end))
+#define MY_LIKE_RANGE my_like_range_generic
+#define MY_UCA_ASCII_OPTIMIZE 0
+#define MY_UCA_COMPILE_CONTRACTIONS 1
+#define MY_UCA_COLL_INIT my_coll_init_uca
+#include "ctype-uca.ic"
extern MY_CHARSET_HANDLER my_charset_utf16_handler;
@@ -38475,7 +37823,7 @@ struct charset_info_st my_charset_utf16_unicode_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -38508,7 +37856,7 @@ struct charset_info_st my_charset_utf16_icelandic_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_latvian_uca_ci=
@@ -38540,7 +37888,7 @@ struct charset_info_st my_charset_utf16_latvian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_romanian_uca_ci=
@@ -38572,7 +37920,7 @@ struct charset_info_st my_charset_utf16_romanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_slovenian_uca_ci=
@@ -38604,7 +37952,7 @@ struct charset_info_st my_charset_utf16_slovenian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_polish_uca_ci=
@@ -38636,7 +37984,7 @@ struct charset_info_st my_charset_utf16_polish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_estonian_uca_ci=
@@ -38668,7 +38016,7 @@ struct charset_info_st my_charset_utf16_estonian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_spanish_uca_ci=
@@ -38700,7 +38048,7 @@ struct charset_info_st my_charset_utf16_spanish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_swedish_uca_ci=
@@ -38732,7 +38080,7 @@ struct charset_info_st my_charset_utf16_swedish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_turkish_uca_ci=
@@ -38764,7 +38112,7 @@ struct charset_info_st my_charset_utf16_turkish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_czech_uca_ci=
@@ -38796,7 +38144,7 @@ struct charset_info_st my_charset_utf16_czech_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -38829,7 +38177,7 @@ struct charset_info_st my_charset_utf16_danish_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_lithuanian_uca_ci=
@@ -38861,7 +38209,7 @@ struct charset_info_st my_charset_utf16_lithuanian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_slovak_uca_ci=
@@ -38893,7 +38241,7 @@ struct charset_info_st my_charset_utf16_slovak_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_spanish2_uca_ci=
@@ -38925,7 +38273,7 @@ struct charset_info_st my_charset_utf16_spanish2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_roman_uca_ci=
@@ -38957,7 +38305,7 @@ struct charset_info_st my_charset_utf16_roman_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_persian_uca_ci=
@@ -38989,7 +38337,7 @@ struct charset_info_st my_charset_utf16_persian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_esperanto_uca_ci=
@@ -39021,7 +38369,7 @@ struct charset_info_st my_charset_utf16_esperanto_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_hungarian_uca_ci=
@@ -39053,7 +38401,7 @@ struct charset_info_st my_charset_utf16_hungarian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_sinhala_uca_ci=
@@ -39085,7 +38433,7 @@ struct charset_info_st my_charset_utf16_sinhala_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
struct charset_info_st my_charset_utf16_german2_uca_ci=
@@ -39117,7 +38465,7 @@ struct charset_info_st my_charset_utf16_german2_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -39150,7 +38498,7 @@ struct charset_info_st my_charset_utf16_croatian_mysql561_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -39183,7 +38531,7 @@ struct charset_info_st my_charset_utf16_croatian_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -39216,7 +38564,7 @@ struct charset_info_st my_charset_utf16_myanmar_uca_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -39249,7 +38597,7 @@ struct charset_info_st my_charset_utf16_thai_520_w2=
0, /* escape_with_backslash_is_dangerous */
2, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_any_uca_handler_multilevel
+ &my_uca_collation_handler_multilevel_utf16
};
@@ -39282,7 +38630,7 @@ struct charset_info_st my_charset_utf16_unicode_520_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -39315,7 +38663,7 @@ struct charset_info_st my_charset_utf16_vietnamese_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_utf16_uca_handler
+ &my_uca_collation_handler_utf16
};
@@ -39348,7 +38696,7 @@ struct charset_info_st my_charset_utf16_unicode_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_generic_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf16
};
@@ -39381,7 +38729,7 @@ struct charset_info_st my_charset_utf16_unicode_520_nopad_ci=
0, /* escape_with_backslash_is_dangerous */
1, /* levels_for_order */
&my_charset_utf16_handler,
- &my_collation_generic_uca_nopad_handler
+ &my_uca_collation_handler_nopad_utf16
};
diff --git a/strings/ctype-uca.ic b/strings/ctype-uca.ic
new file mode 100644
index 00000000000..70c10199e3e
--- /dev/null
+++ b/strings/ctype-uca.ic
@@ -0,0 +1,839 @@
+/*
+ Copyright (c) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+
+#ifndef MY_FUNCTION_NAME
+#error MY_FUNCTION_NAME is not defined
+#endif
+#ifndef MY_MB_WC
+#error MY_MB_WC is not defined
+#endif
+#ifndef MY_LIKE_RANGE
+#error MY_LIKE_RANGE is not defined
+#endif
+#ifndef MY_UCA_ASCII_OPTIMIZE
+#error MY_ASCII_OPTIMIZE is not defined
+#endif
+#ifndef MY_UCA_COMPILE_CONTRACTIONS
+#error MY_UCA_COMPILE_CONTRACTIONS is not defined
+#endif
+#ifndef MY_UCA_COLL_INIT
+#error MY_UCA_COLL_INIT is not defined
+#endif
+
+
+static inline int
+MY_FUNCTION_NAME(scanner_next)(my_uca_scanner *scanner)
+{
+ /*
+ Check if the weights for the previous character have been
+ already fully scanned. If yes, then get the next character and
+ initialize wbeg and wlength to its weight string.
+ */
+
+ if (scanner->wbeg[0]) /* More weights left from the previous step: */
+ return *scanner->wbeg++; /* return the next weight from expansion */
+
+ do
+ {
+ const uint16 *wpage;
+ my_wc_t wc[MY_UCA_MAX_CONTRACTION];
+ int mblen;
+
+ /* Get next character */
+#if MY_UCA_ASCII_OPTIMIZE
+ /* Get next ASCII character */
+ if (scanner->sbeg < scanner->send && scanner->sbeg[0] < 0x80)
+ {
+ wc[0]= scanner->sbeg[0];
+ scanner->sbeg+= 1;
+
+#if MY_UCA_COMPILE_CONTRACTIONS
+ if (my_uca_needs_context_handling(scanner->level, wc[0]))
+ {
+ uint16 *cweight= my_uca_context_weight_find(scanner, wc);
+ if (cweight)
+ return *cweight;
+ }
+#endif
+
+ scanner->page= 0;
+ scanner->code= (int) wc[0];
+ scanner->wbeg= scanner->level->weights[0] + scanner->code * scanner->level->lengths[0];
+ if (scanner->wbeg[0])
+ return *scanner->wbeg++;
+ continue;
+ }
+ else
+#endif
+ /* Get next MB character */
+ if (((mblen= MY_MB_WC(scanner, wc, scanner->sbeg,
+ scanner->send)) <= 0))
+ {
+ if (scanner->sbeg >= scanner->send)
+ return -1; /* No more bytes, end of line reached */
+ /*
+ There are some more bytes left. Non-positive mb_len means that
+ we got an incomplete or a bad byte sequence. Consume mbminlen bytes.
+ */
+ if ((scanner->sbeg+= scanner->cs->mbminlen) > scanner->send)
+ {
+ /* For safety purposes don't go beyond the string range. */
+ scanner->sbeg= scanner->send;
+ }
+ /*
+ Treat every complete or incomplete mbminlen unit as a weight which is
+ greater than weight for any possible normal character.
+ 0xFFFF is greater than any possible weight in the UCA weight table.
+ */
+ return 0xFFFF;
+ }
+
+ scanner->sbeg+= mblen;
+ if (wc[0] > scanner->level->maxchar)
+ {
+ /* Return 0xFFFD as weight for all characters outside BMP */
+ scanner->wbeg= nochar;
+ return 0xFFFD;
+ }
+
+#if MY_UCA_COMPILE_CONTRACTIONS
+ if (my_uca_needs_context_handling(scanner->level, wc[0]))
+ {
+ uint16 *cweight= my_uca_context_weight_find(scanner, wc);
+ if (cweight)
+ return *cweight;
+ }
+#endif
+
+ /* Process single character */
+ scanner->page= wc[0] >> 8;
+ scanner->code= wc[0] & 0xFF;
+
+ /* If weight page for w[0] does not exist, then calculate algoritmically */
+ if (!(wpage= scanner->level->weights[scanner->page]))
+ return my_uca_scanner_next_implicit(scanner);
+
+ /* Calculate pointer to w[0]'s weight, using page and offset */
+ scanner->wbeg= wpage +
+ scanner->code * scanner->level->lengths[scanner->page];
+ } while (!scanner->wbeg[0]); /* Skip ignorable characters */
+
+ return *scanner->wbeg++;
+}
+
+
+
+/*
+ Compares two strings according to the collation
+
+ SYNOPSIS:
+ strnncoll_onelevel()
+ cs Character set information
+ level Weight level (0 primary, 1 secondary, 2 tertiary, etc)
+ s First string
+ slen First string length
+ t Second string
+ tlen Seconf string length
+ level DUCETweight level
+
+ NOTES:
+ Initializes two weight scanners and gets weights
+ corresponding to two strings in a loop. If weights are not
+ the same at some step then returns their difference.
+
+ In the while() comparison these situations are possible:
+ 1. (s_res>0) and (t_res>0) and (s_res == t_res)
+ Weights are the same so far, continue comparison
+ 2. (s_res>0) and (t_res>0) and (s_res!=t_res)
+ A difference has been found, return.
+ 3. (s_res>0) and (t_res<0)
+ We have reached the end of the second string, or found
+ an illegal multibyte sequence in the second string.
+ Return a positive number, i.e. the first string is bigger.
+ 4. (s_res<0) and (t_res>0)
+ We have reached the end of the first string, or found
+ an illegal multibyte sequence in the first string.
+ Return a negative number, i.e. the second string is bigger.
+ 5. (s_res<0) and (t_res<0)
+ Both scanners returned -1. It means we have riched
+ the end-of-string of illegal-sequence in both strings
+ at the same time. Return 0, strings are equal.
+
+ RETURN
+ Difference between two strings, according to the collation:
+ 0 - means strings are equal
+ negative number - means the first string is smaller
+ positive number - means the first string is bigger
+*/
+
+static int
+MY_FUNCTION_NAME(strnncoll_onelevel)(CHARSET_INFO *cs,
+ const MY_UCA_WEIGHT_LEVEL *level,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen,
+ my_bool t_is_prefix)
+{
+ my_uca_scanner sscanner;
+ my_uca_scanner tscanner;
+ int s_res;
+ int t_res;
+
+ my_uca_scanner_init_any(&sscanner, cs, level, s, slen);
+ my_uca_scanner_init_any(&tscanner, cs, level, t, tlen);
+
+ do
+ {
+ s_res= MY_FUNCTION_NAME(scanner_next)(&sscanner);
+ t_res= MY_FUNCTION_NAME(scanner_next)(&tscanner);
+ } while ( s_res == t_res && s_res >0);
+
+ return (t_is_prefix && t_res < 0) ? 0 : (s_res - t_res);
+}
+
+
+/*
+ One-level, PAD SPACE.
+*/
+static int
+MY_FUNCTION_NAME(strnncoll)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen,
+ my_bool t_is_prefix)
+{
+ return MY_FUNCTION_NAME(strnncoll_onelevel)(cs, &cs->uca->level[0],
+ s, slen, t, tlen, t_is_prefix);
+}
+
+
+/*
+ Multi-level, PAD SPACE.
+*/
+static int
+MY_FUNCTION_NAME(strnncoll_multilevel)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen,
+ my_bool t_is_prefix)
+{
+ uint i, num_level= cs->levels_for_order;
+ for (i= 0; i != num_level; i++)
+ {
+ int ret= MY_FUNCTION_NAME(strnncoll_onelevel)(cs, &cs->uca->level[i],
+ s, slen, t, tlen,
+ t_is_prefix);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+
+/*
+ Compares two strings according to the collation,
+ ignoring trailing spaces.
+
+ SYNOPSIS:
+ strnncollsp_onelevel()
+ cs Character set information
+ level UCA weight level
+ s First string
+ slen First string length
+ t Second string
+ tlen Seconf string length
+ level DUCETweight level
+
+ NOTES:
+ Works exactly the same with my_strnncoll_uca(),
+ but ignores trailing spaces.
+
+ In the while() comparison these situations are possible:
+ 1. (s_res>0) and (t_res>0) and (s_res == t_res)
+ Weights are the same so far, continue comparison
+ 2. (s_res>0) and (t_res>0) and (s_res!=t_res)
+ A difference has been found, return.
+ 3. (s_res>0) and (t_res<0)
+ We have reached the end of the second string, or found
+ an illegal multibyte sequence in the second string.
+ Compare the first string to an infinite array of
+ space characters until difference is found, or until
+ the end of the first string.
+ 4. (s_res<0) and (t_res>0)
+ We have reached the end of the first string, or found
+ an illegal multibyte sequence in the first string.
+ Compare the second string to an infinite array of
+ space characters until difference is found or until
+ the end of the second steing.
+ 5. (s_res<0) and (t_res<0)
+ Both scanners returned -1. It means we have riched
+ the end-of-string of illegal-sequence in both strings
+ at the same time. Return 0, strings are equal.
+
+ RETURN
+ Difference between two strings, according to the collation:
+ 0 - means strings are equal
+ negative number - means the first string is smaller
+ positive number - means the first string is bigger
+*/
+
+static int
+MY_FUNCTION_NAME(strnncollsp_onelevel)(CHARSET_INFO *cs,
+ const MY_UCA_WEIGHT_LEVEL *level,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen)
+{
+ my_uca_scanner sscanner, tscanner;
+ int s_res, t_res;
+
+ my_uca_scanner_init_any(&sscanner, cs, level, s, slen);
+ my_uca_scanner_init_any(&tscanner, cs, level, t, tlen);
+
+ do
+ {
+ s_res= MY_FUNCTION_NAME(scanner_next)(&sscanner);
+ t_res= MY_FUNCTION_NAME(scanner_next)(&tscanner);
+ } while ( s_res == t_res && s_res >0);
+
+ if (s_res > 0 && t_res < 0)
+ {
+ /* Calculate weight for SPACE character */
+ t_res= my_space_weight(level);
+
+ /* compare the first string to spaces */
+ do
+ {
+ if (s_res != t_res)
+ return (s_res - t_res);
+ s_res= MY_FUNCTION_NAME(scanner_next)(&sscanner);
+ } while (s_res > 0);
+ return 0;
+ }
+
+ if (s_res < 0 && t_res > 0)
+ {
+ /* Calculate weight for SPACE character */
+ s_res= my_space_weight(level);
+
+ /* compare the second string to spaces */
+ do
+ {
+ if (s_res != t_res)
+ return (s_res - t_res);
+ t_res= MY_FUNCTION_NAME(scanner_next)(&tscanner);
+ } while (t_res > 0);
+ return 0;
+ }
+
+ return ( s_res - t_res );
+}
+
+
+/*
+ One-level, PAD SPACE
+*/
+static int
+MY_FUNCTION_NAME(strnncollsp)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen)
+{
+ return MY_FUNCTION_NAME(strnncollsp_onelevel)(cs, &cs->uca->level[0],
+ s, slen, t, tlen);
+}
+
+
+/*
+ One-level, NO PAD
+*/
+static int
+MY_FUNCTION_NAME(strnncollsp_nopad)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen)
+{
+ return MY_FUNCTION_NAME(strnncoll_onelevel)(cs, &cs->uca->level[0],
+ s, slen, t, tlen, FALSE);
+}
+
+
+/*
+ Multi-level, PAD SPACE
+*/
+static int
+MY_FUNCTION_NAME(strnncollsp_multilevel)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen)
+{
+
+ uint i, num_level= cs->levels_for_order;
+ for (i= 0; i != num_level; i++)
+ {
+ int ret= MY_FUNCTION_NAME(strnncollsp_onelevel)(cs, &cs->uca->level[i],
+ s, slen, t, tlen);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+
+/*
+ Multi-level, NO PAD
+*/
+static int
+MY_FUNCTION_NAME(strnncollsp_nopad_multilevel)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ const uchar *t, size_t tlen)
+{
+ uint num_level= cs->levels_for_order;
+ uint i;
+ for (i= 0; i != num_level; i++)
+ {
+ int ret= MY_FUNCTION_NAME(strnncoll_onelevel)(cs, &cs->uca->level[i],
+ s, slen, t, tlen, FALSE);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+
+
+/*
+ Calculates hash value for the given string,
+ according to the collation, and ignoring trailing spaces.
+
+ SYNOPSIS:
+ hash_sort()
+ cs Character set information
+ s String
+ slen String's length
+ n1 First hash parameter
+ n2 Second hash parameter
+
+ NOTES:
+ Scans consequently weights and updates
+ hash parameters n1 and n2. In a case insensitive collation,
+ upper and lower case of the same letter will return the same
+ weight sequence, and thus will produce the same hash values
+ in n1 and n2.
+
+ This functions is used for one-level and for multi-level collations.
+ We intentionally use only primary level in multi-level collations.
+ This helps to have PARTITION BY KEY put primarily equal records
+ into the same partition. E.g. in utf8_thai_520_ci records that differ
+ only in tone marks go into the same partition.
+
+ RETURN
+ N/A
+*/
+
+static void
+MY_FUNCTION_NAME(hash_sort)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ ulong *nr1, ulong *nr2)
+{
+ int s_res;
+ my_uca_scanner scanner;
+ int space_weight= my_space_weight(&cs->uca->level[0]);
+ register ulong m1= *nr1, m2= *nr2;
+
+ my_uca_scanner_init_any(&scanner, cs, &cs->uca->level[0], s, slen);
+
+ while ((s_res= MY_FUNCTION_NAME(scanner_next)(&scanner)) >0)
+ {
+ if (s_res == space_weight)
+ {
+ /* Combine all spaces to be able to skip end spaces */
+ uint count= 0;
+ do
+ {
+ count++;
+ if ((s_res= MY_FUNCTION_NAME(scanner_next)(&scanner)) <= 0)
+ {
+ /* Skip strings at end of string */
+ goto end;
+ }
+ }
+ while (s_res == space_weight);
+
+ /* Add back that has for the space characters */
+ do
+ {
+ /*
+ We can't use MY_HASH_ADD_16() here as we, because of a misstake
+ in the original code, where we added the 16 byte variable the
+ opposite way. Changing this would cause old partitioned tables
+ to fail.
+ */
+ MY_HASH_ADD(m1, m2, space_weight >> 8);
+ MY_HASH_ADD(m1, m2, space_weight & 0xFF);
+ }
+ while (--count != 0);
+
+ }
+ /* See comment above why we can't use MY_HASH_ADD_16() */
+ MY_HASH_ADD(m1, m2, s_res >> 8);
+ MY_HASH_ADD(m1, m2, s_res & 0xFF);
+ }
+end:
+ *nr1= m1;
+ *nr2= m2;
+}
+
+
+static void
+MY_FUNCTION_NAME(hash_sort_nopad)(CHARSET_INFO *cs,
+ const uchar *s, size_t slen,
+ ulong *nr1, ulong *nr2)
+{
+ int s_res;
+ my_uca_scanner scanner;
+ register ulong m1= *nr1, m2= *nr2;
+
+ my_uca_scanner_init_any(&scanner, cs, &cs->uca->level[0], s, slen);
+
+ while ((s_res= MY_FUNCTION_NAME(scanner_next)(&scanner)) >0)
+ {
+ /* See comment above why we can't use MY_HASH_ADD_16() */
+ MY_HASH_ADD(m1, m2, s_res >> 8);
+ MY_HASH_ADD(m1, m2, s_res & 0xFF);
+ }
+ *nr1= m1;
+ *nr2= m2;
+}
+
+
+
+/*
+ For the given string creates its "binary image", suitable
+ to be used in binary comparison, i.e. in memcmp().
+
+ SYNOPSIS:
+ my_strnxfrm_uca()
+ cs Character set information
+ dst Where to write the image
+ dstlen Space available for the image, in bytes
+ src The source string
+ srclen Length of the source string, in bytes
+
+ NOTES:
+ In a loop, scans weights from the source string and writes
+ them into the binary image. In a case insensitive collation,
+ upper and lower cases of the same letter will produce the
+ same image subsequences. When we have reached the end-of-string
+ or found an illegal multibyte sequence, the loop stops.
+
+ It is impossible to restore the original string using its
+ binary image.
+
+ Binary images are used for bulk comparison purposes,
+ e.g. in ORDER BY, when it is more efficient to create
+ a binary image and use it instead of weight scanner
+ for the original strings for every comparison.
+
+ RETURN
+ Number of bytes that have been written into the binary image.
+*/
+
+static uchar *
+MY_FUNCTION_NAME(strnxfrm_onelevel_internal)(CHARSET_INFO *cs,
+ MY_UCA_WEIGHT_LEVEL *level,
+ uchar *dst, uchar *de,
+ uint *nweights,
+ const uchar *src, size_t srclen)
+{
+ my_uca_scanner scanner;
+ int s_res;
+
+ DBUG_ASSERT(src || !srclen);
+
+#if MY_UCA_ASCII_OPTIMIZE && !MY_UCA_COMPILE_CONTRACTIONS
+ /*
+ Fast path for the ASCII range with no contractions.
+ */
+ {
+ const uchar *de2= de - 1; /* Last position where 2 bytes fit */
+ const uint16 *weights0= level->weights[0];
+ uint lengths0= level->lengths[0];
+ for ( ; ; src++, srclen--)
+ {
+ const uint16 *weight;
+ if (!srclen || !*nweights)
+ return dst; /* Done */
+ if (*src > 0x7F)
+ break; /* Non-ASCII */
+
+ weight= weights0 + (((uint) *src) * lengths0);
+ if (!(s_res= *weight))
+ continue; /* Ignorable */
+ if (weight[1]) /* Expansion (e.g. in a user defined collation */
+ break;
+
+ /* Here we have a character with extactly one 2-byte UCA weight */
+ if (dst < de2) /* Most typical case is when both bytes fit */
+ {
+ *dst++= s_res >> 8;
+ *dst++= s_res & 0xFF;
+ (*nweights)--;
+ continue;
+ }
+ if (dst >= de) /* No space left in "dst" */
+ return dst;
+ *dst++= s_res >> 8; /* There is space only for one byte */
+ (*nweights)--;
+ return dst;
+ }
+ }
+#endif
+
+ my_uca_scanner_init_any(&scanner, cs, level, src, srclen);
+ for (; dst < de && *nweights &&
+ (s_res= MY_FUNCTION_NAME(scanner_next)(&scanner)) > 0 ; (*nweights)--)
+ {
+ *dst++= s_res >> 8;
+ if (dst < de)
+ *dst++= s_res & 0xFF;
+ }
+ return dst;
+}
+
+
+static uchar *
+MY_FUNCTION_NAME(strnxfrm_onelevel)(CHARSET_INFO *cs,
+ MY_UCA_WEIGHT_LEVEL *level,
+ uchar *dst, uchar *de, uint nweights,
+ const uchar *src, size_t srclen, uint flags)
+{
+ uchar *d0= dst;
+ dst= MY_FUNCTION_NAME(strnxfrm_onelevel_internal)(cs, level,
+ dst, de, &nweights,
+ src, srclen);
+ DBUG_ASSERT(dst <= de);
+ if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
+ dst= my_strnxfrm_uca_padn(dst, de, nweights, my_space_weight(level));
+ DBUG_ASSERT(dst <= de);
+ my_strxfrm_desc_and_reverse(d0, dst, flags, 0);
+ return dst;
+}
+
+
+
+static uchar *
+MY_FUNCTION_NAME(strnxfrm_nopad_onelevel)(CHARSET_INFO *cs,
+ MY_UCA_WEIGHT_LEVEL *level,
+ uchar *dst, uchar *de, uint nweights,
+ const uchar *src, size_t srclen,
+ uint flags)
+{
+ uchar *d0= dst;
+ dst= MY_FUNCTION_NAME(strnxfrm_onelevel_internal)(cs, level,
+ dst, de, &nweights,
+ src, srclen);
+ DBUG_ASSERT(dst <= de);
+ /* Pad with the minimum possible weight on this level */
+ if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
+ dst= my_strnxfrm_uca_padn(dst, de, nweights, min_weight_on_level(level));
+ DBUG_ASSERT(dst <= de);
+ my_strxfrm_desc_and_reverse(d0, dst, flags, 0);
+ return dst;
+}
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen, uint nweights,
+ const uchar *src, size_t srclen, uint flags)
+{
+ uchar *d0= dst;
+ uchar *de= dst + dstlen;
+
+ /*
+ There are two ways to handle trailing spaces for PAD SPACE collations:
+ 1. Keep trailing spaces as they are, so have strnxfrm_onelevel() scan
+ spaces as normal characters. This will call scanner_next() for every
+ trailing space and calculate its weight using UCA weights.
+ 2. Strip trailing spaces before calling strnxfrm_onelevel(), as it will
+ append weights for implicit spaces anyway, up to the desired key size.
+ This will effectively generate exactly the same sortable key result.
+ The latter is much faster.
+ */
+
+ if (flags & MY_STRXFRM_PAD_WITH_SPACE)
+ srclen= cs->cset->lengthsp(cs, (const char*) src, srclen);
+ dst= MY_FUNCTION_NAME(strnxfrm_onelevel)(cs, &cs->uca->level[0],
+ dst, de, nweights,
+ src, srclen, flags);
+ /*
+ This can probably be changed to memset(dst, 0, de - dst),
+ like my_strnxfrm_uca_multilevel() does.
+ */
+ if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
+ dst= my_strnxfrm_uca_pad(dst, de, my_space_weight(&cs->uca->level[0]));
+ return dst - d0;
+}
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm_nopad)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen,
+ uint nweights,
+ const uchar *src, size_t srclen,
+ uint flags)
+{
+ uchar *d0= dst;
+ uchar *de= dst + dstlen;
+
+ dst= MY_FUNCTION_NAME(strnxfrm_nopad_onelevel)(cs, &cs->uca->level[0],
+ dst, de, nweights,
+ src, srclen, flags);
+ if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
+ {
+ memset(dst, 0, de - dst);
+ dst= de;
+ }
+ return dst - d0;
+}
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm_multilevel)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen,
+ uint nweights,
+ const uchar *src, size_t srclen,
+ uint flags)
+{
+ uint num_level= cs->levels_for_order;
+ uchar *d0= dst;
+ uchar *de= dst + dstlen;
+ uint current_level;
+
+ for (current_level= 0; current_level != num_level; current_level++)
+ {
+ if (!(flags & MY_STRXFRM_LEVEL_ALL) ||
+ (flags & (MY_STRXFRM_LEVEL1 << current_level)))
+ dst= cs->state & MY_CS_NOPAD ?
+ MY_FUNCTION_NAME(strnxfrm_nopad_onelevel)(cs,
+ &cs->uca->level[current_level],
+ dst, de, nweights,
+ src, srclen, flags) :
+ MY_FUNCTION_NAME(strnxfrm_onelevel)(cs,
+ &cs->uca->level[current_level],
+ dst, de, nweights,
+ src, srclen, flags);
+ }
+
+ if (dst < de && (flags & MY_STRXFRM_PAD_TO_MAXLEN))
+ {
+ memset(dst, 0, de - dst);
+ dst= de;
+ }
+
+ return dst - d0;
+}
+
+
+/*
+ One-level, PAD SPACE
+*/
+MY_COLLATION_HANDLER MY_FUNCTION_NAME(collation_handler)=
+{
+ MY_UCA_COLL_INIT,
+ MY_FUNCTION_NAME(strnncoll),
+ MY_FUNCTION_NAME(strnncollsp),
+ MY_FUNCTION_NAME(strnxfrm),
+ my_strnxfrmlen_any_uca,
+ MY_LIKE_RANGE,
+ my_wildcmp_uca,
+ NULL, /* strcasecmp() */
+ my_instr_mb,
+ MY_FUNCTION_NAME(hash_sort),
+ my_propagate_complex
+};
+
+
+/*
+ One-level, NO PAD
+ For character sets with mbminlen==1 use MY_LIKE_RANGE=my_like_range_mb
+ For character sets with mbminlen>=2 use MY_LIKE_RANGE=my_like_range_generic
+*/
+MY_COLLATION_HANDLER MY_FUNCTION_NAME(collation_handler_nopad)=
+{
+ MY_UCA_COLL_INIT,
+ MY_FUNCTION_NAME(strnncoll),
+ MY_FUNCTION_NAME(strnncollsp_nopad),
+ MY_FUNCTION_NAME(strnxfrm_nopad),
+ my_strnxfrmlen_any_uca,
+ MY_LIKE_RANGE, /* my_like_range_mb or my_like_range_generic */
+ my_wildcmp_uca,
+ NULL, /* strcasecmp() */
+ my_instr_mb,
+ MY_FUNCTION_NAME(hash_sort_nopad),
+ my_propagate_complex
+};
+
+
+/*
+ Multi-level, PAD SPACE
+*/
+MY_COLLATION_HANDLER MY_FUNCTION_NAME(collation_handler_multilevel)=
+{
+ MY_UCA_COLL_INIT,
+ MY_FUNCTION_NAME(strnncoll_multilevel),
+ MY_FUNCTION_NAME(strnncollsp_multilevel),
+ MY_FUNCTION_NAME(strnxfrm_multilevel),
+ my_strnxfrmlen_any_uca_multilevel,
+ MY_LIKE_RANGE,
+ my_wildcmp_uca,
+ NULL, /* strcasecmp() */
+ my_instr_mb,
+ MY_FUNCTION_NAME(hash_sort),
+ my_propagate_complex
+};
+
+
+/*
+ Multi-level, NO PAD
+*/
+MY_COLLATION_HANDLER MY_FUNCTION_NAME(collation_handler_nopad_multilevel)=
+{
+ MY_UCA_COLL_INIT,
+ MY_FUNCTION_NAME(strnncoll_multilevel),
+ MY_FUNCTION_NAME(strnncollsp_nopad_multilevel),
+ MY_FUNCTION_NAME(strnxfrm_multilevel),
+ my_strnxfrmlen_any_uca_multilevel,
+ MY_LIKE_RANGE,
+ my_wildcmp_uca,
+ NULL, /* strcasecmp() */
+ my_instr_mb,
+ MY_FUNCTION_NAME(hash_sort),
+ my_propagate_complex
+};
+
+
+MY_COLLATION_HANDLER_PACKAGE MY_FUNCTION_NAME(package)=
+{
+ &MY_FUNCTION_NAME(collation_handler),
+ &MY_FUNCTION_NAME(collation_handler_nopad),
+ &MY_FUNCTION_NAME(collation_handler_multilevel),
+ &MY_FUNCTION_NAME(collation_handler_nopad_multilevel)
+};
+
+
+#undef MY_FUNCTION_NAME
+#undef MY_MB_WC
+#undef MY_LIKE_RANGE
+#undef MY_UCA_ASCII_OPTIMIZE
+#undef MY_UCA_COMPILE_CONTRACTIONS
+#undef MY_UCA_COLL_INIT
diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c
index 7596b7f2168..28e7def3ddf 100644
--- a/strings/ctype-ucs2.c
+++ b/strings/ctype-ucs2.c
@@ -23,6 +23,8 @@
#include <my_sys.h>
#include <stdarg.h>
+#include "ctype-unidata.h"
+
#if defined(HAVE_CHARSET_utf16) || defined(HAVE_CHARSET_ucs2)
#define HAVE_CHARSET_mb2
@@ -1184,35 +1186,7 @@ my_lengthsp_mb2(CHARSET_INFO *cs __attribute__((unused)),
but the JSON functions needed my_utf16_uni()
so the #ifdef was moved lower.
*/
-
-
-/*
- D800..DB7F - Non-provate surrogate high (896 pages)
- DB80..DBFF - Private surrogate high (128 pages)
- DC00..DFFF - Surrogate low (1024 codes in a page)
-*/
-#define MY_UTF16_SURROGATE_HIGH_FIRST 0xD800
-#define MY_UTF16_SURROGATE_HIGH_LAST 0xDBFF
-#define MY_UTF16_SURROGATE_LOW_FIRST 0xDC00
-#define MY_UTF16_SURROGATE_LOW_LAST 0xDFFF
-
-#define MY_UTF16_HIGH_HEAD(x) ((((uchar) (x)) & 0xFC) == 0xD8)
-#define MY_UTF16_LOW_HEAD(x) ((((uchar) (x)) & 0xFC) == 0xDC)
-/* Test if a byte is a leading byte of a high or low surrogate head: */
-#define MY_UTF16_SURROGATE_HEAD(x) ((((uchar) (x)) & 0xF8) == 0xD8)
-/* Test if a Unicode code point is a high or low surrogate head */
-#define MY_UTF16_SURROGATE(x) (((x) & 0xF800) == 0xD800)
-
-#define MY_UTF16_WC2(a, b) ((a << 8) + b)
-
-/*
- a= 110110?? (<< 18)
- b= ???????? (<< 10)
- c= 110111?? (<< 8)
- d= ???????? (<< 0)
-*/
-#define MY_UTF16_WC4(a, b, c, d) (((a & 3) << 18) + (b << 10) + \
- ((c & 3) << 8) + d + 0x10000)
+#include "ctype-utf16.h"
#define IS_MB2_CHAR(b0,b1) (!MY_UTF16_SURROGATE_HEAD(b0))
#define IS_MB4_CHAR(b0,b1,b2,b3) (MY_UTF16_HIGH_HEAD(b0) && MY_UTF16_LOW_HEAD(b2))
@@ -1220,10 +1194,17 @@ my_lengthsp_mb2(CHARSET_INFO *cs __attribute__((unused)),
static inline int my_weight_mb2_utf16mb2_general_ci(uchar b0, uchar b1)
{
my_wc_t wc= MY_UTF16_WC2(b0, b1);
- MY_UNICASE_CHARACTER *page= my_unicase_default.page[wc >> 8];
+ MY_UNICASE_CHARACTER *page= my_unicase_default_pages[wc >> 8];
return (int) (page ? page[wc & 0xFF].sort : wc);
}
#define MY_FUNCTION_NAME(x) my_ ## x ## _utf16_general_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define DEFINE_STRNXFRM_UNICODE_NOPAD
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf16_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 0
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
#define WEIGHT_MB2(b0,b1) my_weight_mb2_utf16mb2_general_ci(b0,b1)
#define WEIGHT_MB4(b0,b1,b2,b3) MY_CS_REPLACEMENT_CHARACTER
@@ -1261,32 +1242,7 @@ static inline int my_weight_mb2_utf16mb2_general_ci(uchar b0, uchar b1)
my_utf16_uni(CHARSET_INFO *cs __attribute__((unused)),
my_wc_t *pwc, const uchar *s, const uchar *e)
{
- if (s + 2 > e)
- return MY_CS_TOOSMALL2;
-
- /*
- High bytes: 0xD[89AB] = B'110110??'
- Low bytes: 0xD[CDEF] = B'110111??'
- Surrogate mask: 0xFC = B'11111100'
- */
-
- if (MY_UTF16_HIGH_HEAD(*s)) /* Surrogate head */
- {
- if (s + 4 > e)
- return MY_CS_TOOSMALL4;
-
- if (!MY_UTF16_LOW_HEAD(s[2])) /* Broken surrigate pair */
- return MY_CS_ILSEQ;
-
- *pwc= MY_UTF16_WC4(s[0], s[1], s[2], s[3]);
- return 4;
- }
-
- if (MY_UTF16_LOW_HEAD(*s)) /* Low surrogate part without high part */
- return MY_CS_ILSEQ;
-
- *pwc= MY_UTF16_WC2(s[0], s[1]);
- return 2;
+ return my_mb_wc_utf16_quick(pwc, s, e);
}
@@ -1546,7 +1502,7 @@ static MY_COLLATION_HANDLER my_collation_utf16_general_ci_handler =
NULL, /* init */
my_strnncoll_utf16_general_ci,
my_strnncollsp_utf16_general_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf16_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_utf16_ci,
@@ -1578,7 +1534,7 @@ static MY_COLLATION_HANDLER my_collation_utf16_general_nopad_ci_handler =
NULL, /* init */
my_strnncoll_utf16_general_ci,
my_strnncollsp_utf16_general_nopad_ci,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_utf16_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_utf16_ci,
@@ -1775,6 +1731,13 @@ struct charset_info_st my_charset_utf16_nopad_bin=
#define IS_MB4_CHAR(b0,b1,b2,b3) (MY_UTF16_HIGH_HEAD(b1) && MY_UTF16_LOW_HEAD(b3))
#define MY_FUNCTION_NAME(x) my_ ## x ## _utf16le_general_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define DEFINE_STRNXFRM_UNICODE_NOPAD
+#define MY_MB_WC(cs, pwc, s, e) (cs->cset->mb_wc(cs, pwc, s, e))
+#define OPTIMIZE_ASCII 0
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
#define WEIGHT_MB2(b0,b1) my_weight_mb2_utf16mb2_general_ci(b1,b0)
#define WEIGHT_MB4(b0,b1,b2,b3) MY_CS_REPLACEMENT_CHARACTER
@@ -1879,7 +1842,7 @@ static MY_COLLATION_HANDLER my_collation_utf16le_general_ci_handler =
NULL, /* init */
my_strnncoll_utf16le_general_ci,
my_strnncollsp_utf16le_general_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf16le_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_utf16_ci,
@@ -1911,7 +1874,7 @@ static MY_COLLATION_HANDLER my_collation_utf16le_general_nopad_ci_handler =
NULL, /* init */
my_strnncoll_utf16le_general_ci,
my_strnncollsp_utf16le_general_nopad_ci,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_utf16le_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_utf16_ci,
@@ -2109,6 +2072,8 @@ struct charset_info_st my_charset_utf16le_nopad_bin=
#ifdef HAVE_CHARSET_utf32
+#include "ctype-utf32.h"
+
/*
Check is b0 and b1 start a valid UTF32 four-byte sequence.
Don't accept characters greater than U+10FFFF.
@@ -2117,8 +2082,6 @@ struct charset_info_st my_charset_utf16le_nopad_bin=
#define IS_MB4_CHAR(b0,b1,b2,b3) (IS_UTF32_MBHEAD4(b0,b1))
-#define MY_UTF32_WC4(b0,b1,b2,b3) ((((my_wc_t)b0) << 24) + (b1 << 16) + \
- (b2 << 8) + (b3))
static inline int my_weight_utf32_general_ci(uchar b0, uchar b1,
uchar b2, uchar b3)
@@ -2126,12 +2089,19 @@ static inline int my_weight_utf32_general_ci(uchar b0, uchar b1,
my_wc_t wc= MY_UTF32_WC4(b0, b1, b2, b3);
if (wc <= 0xFFFF)
{
- MY_UNICASE_CHARACTER *page= my_unicase_default.page[wc >> 8];
+ MY_UNICASE_CHARACTER *page= my_unicase_default_pages[wc >> 8];
return (int) (page ? page[wc & 0xFF].sort : wc);
}
return MY_CS_REPLACEMENT_CHARACTER;
}
#define MY_FUNCTION_NAME(x) my_ ## x ## _utf32_general_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define DEFINE_STRNXFRM_UNICODE_NOPAD
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf32_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 0
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
#define WEIGHT_MB4(b0,b1,b2,b3) my_weight_utf32_general_ci(b0, b1, b2, b3)
#include "strcoll.ic"
@@ -2161,10 +2131,7 @@ static int
my_utf32_uni(CHARSET_INFO *cs __attribute__((unused)),
my_wc_t *pwc, const uchar *s, const uchar *e)
{
- if (s + 4 > e)
- return MY_CS_TOOSMALL4;
- *pwc= MY_UTF32_WC4(s[0], s[1], s[2], s[3]);
- return *pwc > 0x10FFFF ? MY_CS_ILSEQ : 4;
+ return my_mb_wc_utf32_quick(pwc, s, e);
}
@@ -2698,7 +2665,7 @@ static MY_COLLATION_HANDLER my_collation_utf32_general_ci_handler =
NULL, /* init */
my_strnncoll_utf32_general_ci,
my_strnncollsp_utf32_general_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf32_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_utf32_ci,
@@ -2730,7 +2697,7 @@ static MY_COLLATION_HANDLER my_collation_utf32_general_nopad_ci_handler =
NULL, /* init */
my_strnncoll_utf32_general_ci,
my_strnncollsp_utf32_general_nopad_ci,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_utf32_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_utf32_ci,
@@ -2928,6 +2895,8 @@ struct charset_info_st my_charset_utf32_nopad_bin=
#ifdef HAVE_CHARSET_ucs2
+#include "ctype-ucs2.h"
+
static const uchar ctype_ucs2[] = {
0,
32, 32, 32, 32, 32, 32, 32, 32, 32, 40, 40, 40, 40, 40, 32, 32,
@@ -2995,20 +2964,30 @@ static const uchar to_upper_ucs2[] = {
static inline int my_weight_mb2_ucs2_general_ci(uchar b0, uchar b1)
{
my_wc_t wc= UCS2_CODE(b0, b1);
- MY_UNICASE_CHARACTER *page= my_unicase_default.page[wc >> 8];
+ MY_UNICASE_CHARACTER *page= my_unicase_default_pages[wc >> 8];
return (int) (page ? page[wc & 0xFF].sort : wc);
}
-#define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_general_ci
-#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
-#define WEIGHT_MB2(b0,b1) my_weight_mb2_ucs2_general_ci(b0,b1)
+#define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_general_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define DEFINE_STRNXFRM_UNICODE_NOPAD
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_ucs2_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 0
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
+#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
+#define WEIGHT_MB2(b0,b1) my_weight_mb2_ucs2_general_ci(b0,b1)
#include "strcoll.ic"
-#define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_bin
-#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
-#define WEIGHT_MB2(b0,b1) UCS2_CODE(b0,b1)
+#define MY_FUNCTION_NAME(x) my_ ## x ## _ucs2_bin
+#define DEFINE_STRNXFRM_UNICODE_BIN2
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_ucs2_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 0
+#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
+#define WEIGHT_MB2(b0,b1) UCS2_CODE(b0,b1)
#include "strcoll.ic"
@@ -3037,11 +3016,7 @@ my_charlen_ucs2(CHARSET_INFO *cs __attribute__((unused)),
static int my_ucs2_uni(CHARSET_INFO *cs __attribute__((unused)),
my_wc_t * pwc, const uchar *s, const uchar *e)
{
- if (s+2 > e) /* Need 2 characters */
- return MY_CS_TOOSMALL2;
-
- *pwc= ((uchar)s[0]) * 256 + ((uchar)s[1]);
- return 2;
+ return my_mb_wc_ucs2_quick(pwc, s, e);
}
static int my_uni_ucs2(CHARSET_INFO *cs __attribute__((unused)) ,
@@ -3280,7 +3255,7 @@ static MY_COLLATION_HANDLER my_collation_ucs2_general_ci_handler =
NULL, /* init */
my_strnncoll_ucs2_general_ci,
my_strnncollsp_ucs2_general_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_ucs2_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_ucs2_ci,
@@ -3296,7 +3271,7 @@ static MY_COLLATION_HANDLER my_collation_ucs2_bin_handler =
NULL, /* init */
my_strnncoll_ucs2_bin,
my_strnncollsp_ucs2_bin,
- my_strnxfrm_unicode,
+ my_strnxfrm_ucs2_bin,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_ucs2_bin,
@@ -3312,7 +3287,7 @@ static MY_COLLATION_HANDLER my_collation_ucs2_general_nopad_ci_handler =
NULL, /* init */
my_strnncoll_ucs2_general_ci,
my_strnncollsp_ucs2_general_nopad_ci,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_ucs2_general_ci,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_ucs2_ci,
@@ -3328,7 +3303,7 @@ static MY_COLLATION_HANDLER my_collation_ucs2_nopad_bin_handler =
NULL, /* init */
my_strnncoll_ucs2_bin,
my_strnncollsp_ucs2_nopad_bin,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_ucs2_bin,
my_strnxfrmlen_unicode,
my_like_range_generic,
my_wildcmp_ucs2_bin,
diff --git a/strings/ctype-ucs2.h b/strings/ctype-ucs2.h
new file mode 100644
index 00000000000..c989324172d
--- /dev/null
+++ b/strings/ctype-ucs2.h
@@ -0,0 +1,32 @@
+/*
+ Copyright (c) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef _CTYPE_UCS2_H
+#define _CTYPE_UCS2_H
+
+
+static inline int
+my_mb_wc_ucs2_quick(my_wc_t * pwc, const uchar *s, const uchar *e)
+{
+ if (s+2 > e) /* Need 2 characters */
+ return MY_CS_TOOSMALL2;
+ *pwc= ((uchar)s[0]) * 256 + ((uchar)s[1]);
+ return 2;
+}
+
+
+#endif /* _CTYPE_UCS2_H */
diff --git a/strings/ctype-unidata.h b/strings/ctype-unidata.h
new file mode 100644
index 00000000000..6712f5e1d79
--- /dev/null
+++ b/strings/ctype-unidata.h
@@ -0,0 +1,31 @@
+#ifndef CTYPE_UNIDATA_H_INCLUDED
+#define CTYPE_UNIDATA_H_INCLUDED
+/*
+ Copyright (c) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#define MY_UNICASE_INFO_DEFAULT_MAXCHAR 0xFFFF
+extern MY_UNICASE_CHARACTER my_unicase_default_page00[256];
+extern MY_UNICASE_CHARACTER *my_unicase_default_pages[256];
+
+size_t my_strxfrm_pad_nweights_unicode(uchar *str, uchar *strend, size_t nweights);
+size_t my_strxfrm_pad_unicode(uchar *str, uchar *strend);
+
+
+#define PUT_WC_BE2_HAVE_1BYTE(dst, de, wc) \
+ do { *dst++= (uchar) (wc >> 8); if (dst < de) *dst++= (uchar) (wc & 0xFF); } while(0)
+
+#endif /* CTYPE_UNIDATA_H_INCLUDED */
diff --git a/strings/ctype-utf16.h b/strings/ctype-utf16.h
new file mode 100644
index 00000000000..d4cf4664f97
--- /dev/null
+++ b/strings/ctype-utf16.h
@@ -0,0 +1,80 @@
+/*
+ Copyright (c) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef _CTYPE_UTF16_H
+#define _CTYPE_UTF16_H
+
+/*
+ D800..DB7F - Non-provate surrogate high (896 pages)
+ DB80..DBFF - Private surrogate high (128 pages)
+ DC00..DFFF - Surrogate low (1024 codes in a page)
+*/
+#define MY_UTF16_SURROGATE_HIGH_FIRST 0xD800
+#define MY_UTF16_SURROGATE_HIGH_LAST 0xDBFF
+#define MY_UTF16_SURROGATE_LOW_FIRST 0xDC00
+#define MY_UTF16_SURROGATE_LOW_LAST 0xDFFF
+
+#define MY_UTF16_HIGH_HEAD(x) ((((uchar) (x)) & 0xFC) == 0xD8)
+#define MY_UTF16_LOW_HEAD(x) ((((uchar) (x)) & 0xFC) == 0xDC)
+/* Test if a byte is a leading byte of a high or low surrogate head: */
+#define MY_UTF16_SURROGATE_HEAD(x) ((((uchar) (x)) & 0xF8) == 0xD8)
+/* Test if a Unicode code point is a high or low surrogate head */
+#define MY_UTF16_SURROGATE(x) (((x) & 0xF800) == 0xD800)
+
+#define MY_UTF16_WC2(a, b) ((a << 8) + b)
+
+/*
+ a= 110110?? (<< 18)
+ b= ???????? (<< 10)
+ c= 110111?? (<< 8)
+ d= ???????? (<< 0)
+*/
+#define MY_UTF16_WC4(a, b, c, d) (((a & 3) << 18) + (b << 10) + \
+ ((c & 3) << 8) + d + 0x10000)
+
+static inline int
+my_mb_wc_utf16_quick(my_wc_t *pwc, const uchar *s, const uchar *e)
+{
+ if (s + 2 > e)
+ return MY_CS_TOOSMALL2;
+
+ /*
+ High bytes: 0xD[89AB] = B'110110??'
+ Low bytes: 0xD[CDEF] = B'110111??'
+ Surrogate mask: 0xFC = B'11111100'
+ */
+
+ if (MY_UTF16_HIGH_HEAD(*s)) /* Surrogate head */
+ {
+ if (s + 4 > e)
+ return MY_CS_TOOSMALL4;
+
+ if (!MY_UTF16_LOW_HEAD(s[2])) /* Broken surrigate pair */
+ return MY_CS_ILSEQ;
+
+ *pwc= MY_UTF16_WC4(s[0], s[1], s[2], s[3]);
+ return 4;
+ }
+
+ if (MY_UTF16_LOW_HEAD(*s)) /* Low surrogate part without high part */
+ return MY_CS_ILSEQ;
+
+ *pwc= MY_UTF16_WC2(s[0], s[1]);
+ return 2;
+}
+
+#endif /* _CTYPE_UTF16_H */
diff --git a/strings/ctype-utf32.h b/strings/ctype-utf32.h
new file mode 100644
index 00000000000..e295dc6d081
--- /dev/null
+++ b/strings/ctype-utf32.h
@@ -0,0 +1,33 @@
+/*
+ Copyright (c) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef _CTYPE_UTF32_H
+#define _CTYPE_UTF32_H
+
+#define MY_UTF32_WC4(b0,b1,b2,b3) ((((my_wc_t)b0) << 24) + (b1 << 16) + \
+ (b2 << 8) + (b3))
+
+static inline int
+my_mb_wc_utf32_quick(my_wc_t *pwc, const uchar *s, const uchar *e)
+{
+ if (s + 4 > e)
+ return MY_CS_TOOSMALL4;
+ *pwc= MY_UTF32_WC4(s[0], s[1], s[2], s[3]);
+ return *pwc > 0x10FFFF ? MY_CS_ILSEQ : 4;
+}
+
+#endif /* _CTYPE_UTF32_H */
diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c
index 4ef376dccc8..4ddb086b734 100644
--- a/strings/ctype-utf8.c
+++ b/strings/ctype-utf8.c
@@ -26,78 +26,10 @@
#define EILSEQ ENOENT
#endif
-/* Detect special bytes and sequences */
-#define IS_CONTINUATION_BYTE(c) (((uchar) (c) ^ 0x80) < 0x40)
-/*
- Check MB2 character assuming that b0 is alredy known to be >= 0xC2.
- Use this macro if the caller already checked b0 for:
- - an MB1 character
- - an unused gap between MB1 and MB2HEAD
-*/
-#define IS_UTF8MB2_STEP2(b0,b1) (((uchar) (b0) < 0xE0) && \
- IS_CONTINUATION_BYTE((uchar) b1))
+#include "ctype-utf8.h"
+#include "ctype-unidata.h"
-/*
- Check MB3 character assuming that b0 is already known to be
- in the valid MB3HEAD range [0xE0..0xEF].
-*/
-#define IS_UTF8MB3_STEP2(b0,b1,b2) (IS_CONTINUATION_BYTE(b1) && \
- IS_CONTINUATION_BYTE(b2) && \
- ((uchar) b0 >= 0xe1 || (uchar) b1 >= 0xa0))
-
-/*
- Check MB3 character assuming that b0 is already known to be >= 0xE0,
- but is not checked for the high end 0xF0 yet.
- Use this macro if the caller already checked b0 for:
- - an MB1 character
- - an unused gap between MB1 and MB2HEAD
- - an MB2HEAD
-*/
-#define IS_UTF8MB3_STEP3(b0,b1,b2) (((uchar) (b0) < 0xF0) && \
- IS_UTF8MB3_STEP2(b0,b1,b2))
-
-/*
- UTF-8 quick four-byte mask:
- 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- Encoding allows to encode U+00010000..U+001FFFFF
-
- The maximum character defined in the Unicode standard is U+0010FFFF.
- Higher characters U+00110000..U+001FFFFF are not used.
-
- 11110000.10010000.10xxxxxx.10xxxxxx == F0.90.80.80 == U+00010000 (min)
- 11110100.10001111.10111111.10111111 == F4.8F.BF.BF == U+0010FFFF (max)
-
- Valid codes:
- [F0][90..BF][80..BF][80..BF]
- [F1][80..BF][80..BF][80..BF]
- [F2][80..BF][80..BF][80..BF]
- [F3][80..BF][80..BF][80..BF]
- [F4][80..8F][80..BF][80..BF]
-*/
-
-/*
- Check MB4 character assuming that b0 is already
- known to be in the range [0xF0..0xF4]
-*/
-#define IS_UTF8MB4_STEP2(b0,b1,b2,b3) (IS_CONTINUATION_BYTE(b1) && \
- IS_CONTINUATION_BYTE(b2) && \
- IS_CONTINUATION_BYTE(b3) && \
- (b0 >= 0xf1 || b1 >= 0x90) && \
- (b0 <= 0xf3 || b1 <= 0x8F))
-#define IS_UTF8MB4_STEP3(b0,b1,b2,b3) (((uchar) (b0) < 0xF5) && \
- IS_UTF8MB4_STEP2(b0,b1,b2,b3))
-
-/* Convert individual bytes to Unicode code points */
-#define UTF8MB2_CODE(b0,b1) (((my_wc_t) ((uchar) b0 & 0x1f) << 6) |\
- ((my_wc_t) ((uchar) b1 ^ 0x80)))
-#define UTF8MB3_CODE(b0,b1,b2) (((my_wc_t) ((uchar) b0 & 0x0f) << 12) |\
- ((my_wc_t) ((uchar) b1 ^ 0x80) << 6) |\
- ((my_wc_t) ((uchar) b2 ^ 0x80)))
-#define UTF8MB4_CODE(b0,b1,b2,b3) (((my_wc_t) ((uchar) b0 & 0x07) << 18) |\
- ((my_wc_t) ((uchar) b1 ^ 0x80) << 12) |\
- ((my_wc_t) ((uchar) b2 ^ 0x80) << 6) |\
- (my_wc_t) ((uchar) b3 ^ 0x80))
/* Definitions for strcoll.ic */
#define IS_MB1_CHAR(x) ((uchar) (x) < 0x80)
@@ -180,7 +112,7 @@ int my_valid_mbcharlen_utf8mb3(const uchar *s, const uchar *e)
#include "my_uctype.h"
-static MY_UNICASE_CHARACTER plane00[]={
+MY_UNICASE_CHARACTER my_unicase_default_page00[]={
{0x0000,0x0000,0x0000}, {0x0001,0x0001,0x0001},
{0x0002,0x0002,0x0002}, {0x0003,0x0003,0x0003},
{0x0004,0x0004,0x0004}, {0x0005,0x0005,0x0005},
@@ -313,7 +245,7 @@ static MY_UNICASE_CHARACTER plane00[]={
/*
- Almost similar to plane00, but maps sorting order
+ Almost similar to my_unicase_default_page00, but maps sorting order
for U+00DF to 0x00DF instead of 0x0053.
*/
static MY_UNICASE_CHARACTER plane00_mysql500[]={
@@ -1759,9 +1691,10 @@ static MY_UNICASE_CHARACTER planeFF[]={
};
-static MY_UNICASE_CHARACTER *my_unicase_pages_default[256]=
+MY_UNICASE_CHARACTER *my_unicase_default_pages[256]=
{
- plane00, plane01, plane02, plane03, plane04, plane05, NULL, NULL,
+ my_unicase_default_page00,
+ plane01, plane02, plane03, plane04, plane05, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, plane1E, plane1F,
@@ -1798,8 +1731,8 @@ static MY_UNICASE_CHARACTER *my_unicase_pages_default[256]=
MY_UNICASE_INFO my_unicase_default=
{
- 0xFFFF,
- my_unicase_pages_default
+ MY_UNICASE_INFO_DEFAULT_MAXCHAR,
+ my_unicase_default_pages
};
@@ -4646,7 +4579,7 @@ my_wildcmp_unicode(CHARSET_INFO *cs,
@return Result length
*/
-static size_t
+size_t
my_strxfrm_pad_nweights_unicode(uchar *str, uchar *strend, size_t nweights)
{
uchar *str0;
@@ -4675,7 +4608,7 @@ my_strxfrm_pad_nweights_unicode(uchar *str, uchar *strend, size_t nweights)
@return Result length
*/
-static size_t
+size_t
my_strxfrm_pad_unicode(uchar *str, uchar *strend)
{
uchar *str0= str;
@@ -4690,95 +4623,6 @@ my_strxfrm_pad_unicode(uchar *str, uchar *strend)
}
-size_t my_strnxfrm_unicode_internal(CHARSET_INFO *cs,
- uchar *dst, uchar *de, uint *nweights,
- const uchar *src, const uchar *se)
-{
- my_wc_t UNINIT_VAR(wc);
- int res;
- uchar *dst0= dst;
- MY_UNICASE_INFO *uni_plane= (cs->state & MY_CS_BINSORT) ?
- NULL : cs->caseinfo;
-
- DBUG_ASSERT(src || !se);
-
- for (; dst < de && *nweights; (*nweights)--)
- {
- if ((res= cs->cset->mb_wc(cs, &wc, src, se)) <= 0)
- break;
- src+= res;
-
- if (uni_plane)
- my_tosort_unicode(uni_plane, &wc, cs->state);
-
- *dst++= (uchar) (wc >> 8);
- if (dst < de)
- *dst++= (uchar) (wc & 0xFF);
- }
- return dst - dst0;
-}
-
-
-/*
- Store sorting weights using 2 bytes per character.
-
- This function is shared between
- - utf8mb3_general_ci, utf8_bin, ucs2_general_ci, ucs2_bin
- which support BMP only (U+0000..U+FFFF).
- - utf8mb4_general_ci, utf16_general_ci, utf32_general_ci,
- which map all supplementary characters to weight 0xFFFD.
-*/
-size_t
-my_strnxfrm_unicode(CHARSET_INFO *cs,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uchar *dst0= dst;
- uchar *de= dst + dstlen;
- dst+= my_strnxfrm_unicode_internal(cs, dst, de, &nweights,
- src, src + srclen);
- DBUG_ASSERT(dst <= de); /* Safety */
-
- if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
- dst+= my_strxfrm_pad_nweights_unicode(dst, de, nweights);
-
- my_strxfrm_desc_and_reverse(dst0, dst, flags, 0);
-
- if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
- dst+= my_strxfrm_pad_unicode(dst, de);
- return dst - dst0;
-}
-
-
-size_t
-my_strnxfrm_unicode_nopad(CHARSET_INFO *cs,
- uchar *dst, size_t dstlen, uint nweights,
- const uchar *src, size_t srclen, uint flags)
-{
- uchar *dst0= dst;
- uchar *de= dst + dstlen;
- dst+= my_strnxfrm_unicode_internal(cs, dst, de, &nweights,
- src, src + srclen);
- DBUG_ASSERT(dst <= de); /* Safety */
-
- if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
- {
- size_t len= de - dst;
- set_if_smaller(len, nweights * 2);
- memset(dst, 0x00, len);
- dst+= len;
- }
-
- my_strxfrm_desc_and_reverse(dst0, dst, flags, 0);
-
- if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
- {
- memset(dst, 0x00, de - dst);
- dst= de;
- }
- return dst - dst0;
-}
-
/*
For BMP-only collations that use 2 bytes per weight.
*/
@@ -4977,42 +4821,7 @@ static const uchar to_upper_utf8[] = {
static int my_utf8_uni(CHARSET_INFO *cs __attribute__((unused)),
my_wc_t * pwc, const uchar *s, const uchar *e)
{
- uchar c;
-
- if (s >= e)
- return MY_CS_TOOSMALL;
-
- c= s[0];
- if (c < 0x80)
- {
- *pwc = c;
- return 1;
- }
- else if (c < 0xc2)
- return MY_CS_ILSEQ;
- else if (c < 0xe0)
- {
- if (s+2 > e) /* We need 2 characters */
- return MY_CS_TOOSMALL2;
-
- if (!(IS_CONTINUATION_BYTE(s[1])))
- return MY_CS_ILSEQ;
-
- *pwc= UTF8MB2_CODE(c, s[1]);
- return 2;
- }
- else if (c < 0xf0)
- {
- if (s+3 > e) /* We need 3 characters */
- return MY_CS_TOOSMALL3;
-
- if (!IS_UTF8MB3_STEP2(c, s[1], s[2]))
- return MY_CS_ILSEQ;
-
- *pwc= UTF8MB3_CODE(c, s[1], s[2]);
- return 3;
- }
- return MY_CS_ILSEQ;
+ return my_mb_wc_utf8mb3_quick(pwc, s, e);
}
@@ -5308,7 +5117,7 @@ int my_strcasecmp_utf8(CHARSET_INFO *cs, const char *s, const char *t)
It represents a single byte character.
Convert it into weight according to collation.
*/
- s_wc= plane00[(uchar) s[0]].tolower;
+ s_wc= my_unicase_default_page00[(uchar) s[0]].tolower;
s++;
}
else
@@ -5350,7 +5159,7 @@ int my_strcasecmp_utf8(CHARSET_INFO *cs, const char *s, const char *t)
if ((uchar) t[0] < 128)
{
/* Convert single byte character into weight */
- t_wc= plane00[(uchar) t[0]].tolower;
+ t_wc= my_unicase_default_page00[(uchar) t[0]].tolower;
t++;
}
else
@@ -5413,14 +5222,14 @@ int my_charlen_utf8(CHARSET_INFO *cs __attribute__((unused)),
static inline int my_weight_mb1_utf8_general_ci(uchar b)
{
- return (int) plane00[b & 0xFF].sort;
+ return (int) my_unicase_default_page00[b & 0xFF].sort;
}
static inline int my_weight_mb2_utf8_general_ci(uchar b0, uchar b1)
{
my_wc_t wc= UTF8MB2_CODE(b0, b1);
- MY_UNICASE_CHARACTER *page= my_unicase_pages_default[wc >> 8];
+ MY_UNICASE_CHARACTER *page= my_unicase_default_pages[wc >> 8];
return (int) (page ? page[wc & 0xFF].sort : wc);
}
@@ -5428,16 +5237,23 @@ static inline int my_weight_mb2_utf8_general_ci(uchar b0, uchar b1)
static inline int my_weight_mb3_utf8_general_ci(uchar b0, uchar b1, uchar b2)
{
my_wc_t wc= UTF8MB3_CODE(b0, b1, b2);
- MY_UNICASE_CHARACTER *page= my_unicase_pages_default[wc >> 8];
+ MY_UNICASE_CHARACTER *page= my_unicase_default_pages[wc >> 8];
return (int) (page ? page[wc & 0xFF].sort : wc);
}
-#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8_general_ci
-#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
-#define WEIGHT_MB1(x) my_weight_mb1_utf8_general_ci(x)
-#define WEIGHT_MB2(x,y) my_weight_mb2_utf8_general_ci(x,y)
-#define WEIGHT_MB3(x,y,z) my_weight_mb3_utf8_general_ci(x,y,z)
+#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8_general_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define DEFINE_STRNXFRM_UNICODE_NOPAD
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf8mb3_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 1
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
+#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
+#define WEIGHT_MB1(x) my_weight_mb1_utf8_general_ci(x)
+#define WEIGHT_MB2(x,y) my_weight_mb2_utf8_general_ci(x,y)
+#define WEIGHT_MB3(x,y,z) my_weight_mb3_utf8_general_ci(x,y,z)
#include "strcoll.ic"
@@ -5473,19 +5289,28 @@ my_weight_mb3_utf8_general_mysql500_ci(uchar b0, uchar b1, uchar b2)
}
-#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8_general_mysql500_ci
-#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
-#define WEIGHT_MB1(x) my_weight_mb1_utf8_general_mysql500_ci(x)
-#define WEIGHT_MB2(x,y) my_weight_mb2_utf8_general_mysql500_ci(x,y)
-#define WEIGHT_MB3(x,y,z) my_weight_mb3_utf8_general_mysql500_ci(x,y,z)
+#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8_general_mysql500_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf8mb3_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 1
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 plane00_mysql500
+#define UNICASE_PAGES my_unicase_pages_mysql500
+#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
+#define WEIGHT_MB1(x) my_weight_mb1_utf8_general_mysql500_ci(x)
+#define WEIGHT_MB2(x,y) my_weight_mb2_utf8_general_mysql500_ci(x,y)
+#define WEIGHT_MB3(x,y,z) my_weight_mb3_utf8_general_mysql500_ci(x,y,z)
#include "strcoll.ic"
-#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8_bin
-#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
-#define WEIGHT_MB1(x) ((int) (uchar) (x))
-#define WEIGHT_MB2(x,y) ((int) UTF8MB2_CODE(x,y))
-#define WEIGHT_MB3(x,y,z) ((int) UTF8MB3_CODE(x,y,z))
+#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8_bin
+#define DEFINE_STRNXFRM_UNICODE_BIN2
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf8mb3_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 1
+#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
+#define WEIGHT_MB1(x) ((int) (uchar) (x))
+#define WEIGHT_MB2(x,y) ((int) UTF8MB2_CODE(x,y))
+#define WEIGHT_MB3(x,y,z) ((int) UTF8MB3_CODE(x,y,z))
#include "strcoll.ic"
@@ -5534,7 +5359,7 @@ static MY_COLLATION_HANDLER my_collation_utf8_general_ci_handler =
NULL, /* init */
my_strnncoll_utf8_general_ci,
my_strnncollsp_utf8_general_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf8_general_ci,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_utf8,
@@ -5550,7 +5375,7 @@ static MY_COLLATION_HANDLER my_collation_utf8_general_mysql500_ci_handler =
NULL, /* init */
my_strnncoll_utf8_general_mysql500_ci,
my_strnncollsp_utf8_general_mysql500_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf8_general_mysql500_ci,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_utf8,
@@ -5566,7 +5391,7 @@ static MY_COLLATION_HANDLER my_collation_utf8_bin_handler =
NULL, /* init */
my_strnncoll_utf8_bin,
my_strnncollsp_utf8_bin,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf8_bin,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_mb_bin,
@@ -5582,7 +5407,7 @@ static MY_COLLATION_HANDLER my_collation_utf8_general_nopad_ci_handler =
NULL, /* init */
my_strnncoll_utf8_general_ci,
my_strnncollsp_utf8_general_nopad_ci,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_utf8_general_ci,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_utf8,
@@ -5598,7 +5423,7 @@ static MY_COLLATION_HANDLER my_collation_utf8_nopad_bin_handler =
NULL, /* init */
my_strnncoll_utf8_bin,
my_strnncollsp_utf8_nopad_bin,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_utf8_bin,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_mb_bin,
@@ -5927,7 +5752,7 @@ static MY_COLLATION_HANDLER my_collation_cs_handler =
NULL, /* init */
my_strnncoll_utf8_cs,
my_strnncollsp_utf8_cs,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf8_general_ci,
my_strnxfrmlen_unicode,
my_like_range_simple,
my_wildcmp_mb,
@@ -7212,13 +7037,30 @@ my_charlen_filename(CHARSET_INFO *cs, const uchar *str, const uchar *end)
#undef DEFINE_WELL_FORMED_CHAR_LENGTH_USING_CHARLEN
/* my_well_formed_char_length_filename */
+#define MY_FUNCTION_NAME(x) my_ ## x ## _filename
+#define DEFINE_STRNNCOLL 0
+#define DEFINE_STRNXFRM_UNICODE
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_filename(cs, pwc, s, e)
+#define OPTIMIZE_ASCII 0
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
+
+/*
+#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
+#define WEIGHT_MB1(x) my_weight_mb1_utf8_general_ci(x)
+#define WEIGHT_MB2(x,y) my_weight_mb2_utf8_general_ci(x,y)
+#define WEIGHT_MB3(x,y,z) my_weight_mb3_utf8_general_ci(x,y,z)
+*/
+#include "strcoll.ic"
+
static MY_COLLATION_HANDLER my_collation_filename_handler =
{
NULL, /* init */
my_strnncoll_simple,
my_strnncollsp_simple,
- my_strnxfrm_unicode,
+ my_strnxfrm_filename,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_utf8,
@@ -7375,52 +7217,7 @@ static int
my_mb_wc_utf8mb4(CHARSET_INFO *cs __attribute__((unused)),
my_wc_t * pwc, const uchar *s, const uchar *e)
{
- uchar c;
-
- if (s >= e)
- return MY_CS_TOOSMALL;
-
- c= s[0];
- if (c < 0x80)
- {
- *pwc= c;
- return 1;
- }
- else if (c < 0xc2)
- return MY_CS_ILSEQ;
- else if (c < 0xe0)
- {
- if (s + 2 > e) /* We need 2 characters */
- return MY_CS_TOOSMALL2;
-
- if (!(IS_CONTINUATION_BYTE(s[1])))
- return MY_CS_ILSEQ;
-
- *pwc= UTF8MB2_CODE(c, s[1]);
- return 2;
- }
- else if (c < 0xf0)
- {
- if (s + 3 > e) /* We need 3 characters */
- return MY_CS_TOOSMALL3;
-
- if (!IS_UTF8MB3_STEP2(c, s[1], s[2]))
- return MY_CS_ILSEQ;
-
- *pwc= UTF8MB3_CODE(c, s[1], s[2]);
- return 3;
- }
- else if (c < 0xf5)
- {
- if (s + 4 > e) /* We need 4 characters */
- return MY_CS_TOOSMALL4;
-
- if (!IS_UTF8MB4_STEP2(c, s[1], s[2], s[3]))
- return MY_CS_ILSEQ;
- *pwc= UTF8MB4_CODE(c, s[1], s[2], s[3]);
- return 4;
- }
- return MY_CS_ILSEQ;
+ return my_mb_wc_utf8mb4_quick(pwc, s, e);
}
@@ -7752,7 +7549,7 @@ my_strcasecmp_utf8mb4(CHARSET_INFO *cs, const char *s, const char *t)
It represents a single byte character.
Convert it into weight according to collation.
*/
- s_wc= plane00[(uchar) s[0]].tolower;
+ s_wc= my_unicase_default_page00[(uchar) s[0]].tolower;
s++;
}
else
@@ -7776,7 +7573,7 @@ my_strcasecmp_utf8mb4(CHARSET_INFO *cs, const char *s, const char *t)
if ((uchar) t[0] < 128)
{
/* Convert single byte character into weight */
- t_wc= plane00[(uchar) t[0]].tolower;
+ t_wc= my_unicase_default_page00[(uchar) t[0]].tolower;
t++;
}
else
@@ -7847,6 +7644,13 @@ my_charlen_utf8mb4(CHARSET_INFO *cs __attribute__((unused)),
#define MY_FUNCTION_NAME(x) my_ ## x ## _utf8mb4_general_ci
+#define DEFINE_STRNXFRM_UNICODE
+#define DEFINE_STRNXFRM_UNICODE_NOPAD
+#define MY_MB_WC(cs, pwc, s, e) my_mb_wc_utf8mb4_quick(pwc, s, e)
+#define OPTIMIZE_ASCII 1
+#define UNICASE_MAXCHAR MY_UNICASE_INFO_DEFAULT_MAXCHAR
+#define UNICASE_PAGE0 my_unicase_default_page00
+#define UNICASE_PAGES my_unicase_default_pages
#define IS_MB4_CHAR(b0,b1,b2,b3) IS_UTF8MB4_STEP3(b0,b1,b2,b3)
#define WEIGHT_ILSEQ(x) (0xFF0000 + (uchar) (x))
#define WEIGHT_MB1(b0) my_weight_mb1_utf8_general_ci(b0)
@@ -7897,7 +7701,7 @@ static MY_COLLATION_HANDLER my_collation_utf8mb4_general_ci_handler=
NULL, /* init */
my_strnncoll_utf8mb4_general_ci,
my_strnncollsp_utf8mb4_general_ci,
- my_strnxfrm_unicode,
+ my_strnxfrm_utf8mb4_general_ci,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_utf8mb4,
@@ -7929,7 +7733,7 @@ static MY_COLLATION_HANDLER my_collation_utf8mb4_general_nopad_ci_handler=
NULL, /* init */
my_strnncoll_utf8mb4_general_ci,
my_strnncollsp_utf8mb4_general_nopad_ci,
- my_strnxfrm_unicode_nopad,
+ my_strnxfrm_nopad_utf8mb4_general_ci,
my_strnxfrmlen_unicode,
my_like_range_mb,
my_wildcmp_utf8mb4,
diff --git a/strings/ctype-utf8.h b/strings/ctype-utf8.h
new file mode 100644
index 00000000000..9a44c1658f2
--- /dev/null
+++ b/strings/ctype-utf8.h
@@ -0,0 +1,190 @@
+/*
+ Copyright (c) 2018 MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef _CTYPE_UTF8_H
+#define _CTYPE_UTF8_H
+
+/* Detect special bytes and sequences */
+#define IS_CONTINUATION_BYTE(c) (((uchar) (c) ^ 0x80) < 0x40)
+
+/*
+ Check MB2 character assuming that b0 is alredy known to be >= 0xC2.
+ Use this macro if the caller already checked b0 for:
+ - an MB1 character
+ - an unused gap between MB1 and MB2HEAD
+*/
+#define IS_UTF8MB2_STEP2(b0,b1) (((uchar) (b0) < 0xE0) && \
+ IS_CONTINUATION_BYTE((uchar) b1))
+
+/*
+ Check MB3 character assuming that b0 is already known to be
+ in the valid MB3HEAD range [0xE0..0xEF].
+*/
+#define IS_UTF8MB3_STEP2(b0,b1,b2) (IS_CONTINUATION_BYTE(b1) && \
+ IS_CONTINUATION_BYTE(b2) && \
+ ((uchar) b0 >= 0xe1 || (uchar) b1 >= 0xa0))
+
+/*
+ Check MB3 character assuming that b0 is already known to be >= 0xE0,
+ but is not checked for the high end 0xF0 yet.
+ Use this macro if the caller already checked b0 for:
+ - an MB1 character
+ - an unused gap between MB1 and MB2HEAD
+ - an MB2HEAD
+*/
+#define IS_UTF8MB3_STEP3(b0,b1,b2) (((uchar) (b0) < 0xF0) && \
+ IS_UTF8MB3_STEP2(b0,b1,b2))
+
+/*
+ UTF-8 quick four-byte mask:
+ 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ Encoding allows to encode U+00010000..U+001FFFFF
+
+ The maximum character defined in the Unicode standard is U+0010FFFF.
+ Higher characters U+00110000..U+001FFFFF are not used.
+
+ 11110000.10010000.10xxxxxx.10xxxxxx == F0.90.80.80 == U+00010000 (min)
+ 11110100.10001111.10111111.10111111 == F4.8F.BF.BF == U+0010FFFF (max)
+
+ Valid codes:
+ [F0][90..BF][80..BF][80..BF]
+ [F1][80..BF][80..BF][80..BF]
+ [F2][80..BF][80..BF][80..BF]
+ [F3][80..BF][80..BF][80..BF]
+ [F4][80..8F][80..BF][80..BF]
+*/
+
+/*
+ Check MB4 character assuming that b0 is already
+ known to be in the range [0xF0..0xF4]
+*/
+#define IS_UTF8MB4_STEP2(b0,b1,b2,b3) (IS_CONTINUATION_BYTE(b1) && \
+ IS_CONTINUATION_BYTE(b2) && \
+ IS_CONTINUATION_BYTE(b3) && \
+ (b0 >= 0xf1 || b1 >= 0x90) && \
+ (b0 <= 0xf3 || b1 <= 0x8F))
+#define IS_UTF8MB4_STEP3(b0,b1,b2,b3) (((uchar) (b0) < 0xF5) && \
+ IS_UTF8MB4_STEP2(b0,b1,b2,b3))
+
+/* Convert individual bytes to Unicode code points */
+#define UTF8MB2_CODE(b0,b1) (((my_wc_t) ((uchar) b0 & 0x1f) << 6) |\
+ ((my_wc_t) ((uchar) b1 ^ 0x80)))
+#define UTF8MB3_CODE(b0,b1,b2) (((my_wc_t) ((uchar) b0 & 0x0f) << 12) |\
+ ((my_wc_t) ((uchar) b1 ^ 0x80) << 6) |\
+ ((my_wc_t) ((uchar) b2 ^ 0x80)))
+#define UTF8MB4_CODE(b0,b1,b2,b3) (((my_wc_t) ((uchar) b0 & 0x07) << 18) |\
+ ((my_wc_t) ((uchar) b1 ^ 0x80) << 12) |\
+ ((my_wc_t) ((uchar) b2 ^ 0x80) << 6) |\
+ (my_wc_t) ((uchar) b3 ^ 0x80))
+
+static inline int
+my_mb_wc_utf8mb3_quick(my_wc_t * pwc, const uchar *s, const uchar *e)
+{
+ uchar c;
+
+ if (s >= e)
+ return MY_CS_TOOSMALL;
+
+ c= s[0];
+ if (c < 0x80)
+ {
+ *pwc = c;
+ return 1;
+ }
+ else if (c < 0xc2)
+ return MY_CS_ILSEQ;
+ else if (c < 0xe0)
+ {
+ if (s+2 > e) /* We need 2 characters */
+ return MY_CS_TOOSMALL2;
+
+ if (!(IS_CONTINUATION_BYTE(s[1])))
+ return MY_CS_ILSEQ;
+
+ *pwc= UTF8MB2_CODE(c, s[1]);
+ return 2;
+ }
+ else if (c < 0xf0)
+ {
+ if (s+3 > e) /* We need 3 characters */
+ return MY_CS_TOOSMALL3;
+
+ if (!IS_UTF8MB3_STEP2(c, s[1], s[2]))
+ return MY_CS_ILSEQ;
+
+ *pwc= UTF8MB3_CODE(c, s[1], s[2]);
+ return 3;
+ }
+ return MY_CS_ILSEQ;
+}
+
+
+#ifdef HAVE_CHARSET_utf8mb4
+static inline int
+my_mb_wc_utf8mb4_quick(my_wc_t *pwc, const uchar *s, const uchar *e)
+{
+ uchar c;
+
+ if (s >= e)
+ return MY_CS_TOOSMALL;
+
+ c= s[0];
+ if (c < 0x80)
+ {
+ *pwc= c;
+ return 1;
+ }
+ else if (c < 0xc2)
+ return MY_CS_ILSEQ;
+ else if (c < 0xe0)
+ {
+ if (s + 2 > e) /* We need 2 characters */
+ return MY_CS_TOOSMALL2;
+
+ if (!(IS_CONTINUATION_BYTE(s[1])))
+ return MY_CS_ILSEQ;
+
+ *pwc= UTF8MB2_CODE(c, s[1]);
+ return 2;
+ }
+ else if (c < 0xf0)
+ {
+ if (s + 3 > e) /* We need 3 characters */
+ return MY_CS_TOOSMALL3;
+
+ if (!IS_UTF8MB3_STEP2(c, s[1], s[2]))
+ return MY_CS_ILSEQ;
+
+ *pwc= UTF8MB3_CODE(c, s[1], s[2]);
+ return 3;
+ }
+ else if (c < 0xf5)
+ {
+ if (s + 4 > e) /* We need 4 characters */
+ return MY_CS_TOOSMALL4;
+
+ if (!IS_UTF8MB4_STEP2(c, s[1], s[2], s[3]))
+ return MY_CS_ILSEQ;
+ *pwc= UTF8MB4_CODE(c, s[1], s[2], s[3]);
+ return 4;
+ }
+ return MY_CS_ILSEQ;
+}
+#endif /* HAVE_CHARSET_utf8mb4*/
+
+
+#endif /* _CTYPE_UTF8_H */
diff --git a/strings/json_lib.c b/strings/json_lib.c
index 24c79cb9044..3763ac4ed54 100644
--- a/strings/json_lib.c
+++ b/strings/json_lib.c
@@ -1845,3 +1845,252 @@ int json_path_compare(const json_path_t *a, const json_path_t *b,
return json_path_parts_compare(a->steps+1, a->last_step,
b->steps+1, b->last_step, vt);
}
+
+
+static enum json_types smart_read_value(json_engine_t *je,
+ const char **value, int *value_len)
+{
+ if (json_read_value(je))
+ goto err_return;
+
+ *value= (char *) je->value;
+
+ if (json_value_scalar(je))
+ *value_len= je->value_len;
+ else
+ {
+ if (json_skip_level(je))
+ goto err_return;
+
+ *value_len= (int) ((char *) je->s.c_str - *value);
+ }
+
+ return je->value_type;
+
+err_return:
+ return JSV_BAD_JSON;
+}
+
+
+enum json_types json_type(const char *js, const char *js_end,
+ const char **value, int *value_len)
+{
+ json_engine_t je;
+
+ json_scan_start(&je, &my_charset_utf8mb4_bin,(const uchar *) js,
+ (const uchar *) js_end);
+
+ return smart_read_value(&je, value, value_len);
+}
+
+
+enum json_types json_get_array_item(const char *js, const char *js_end,
+ int n_item,
+ const char **value, int *value_len)
+{
+ json_engine_t je;
+ int c_item= 0;
+
+ json_scan_start(&je, &my_charset_utf8mb4_bin,(const uchar *) js,
+ (const uchar *) js_end);
+
+ if (json_read_value(&je) ||
+ je.value_type != JSON_VALUE_ARRAY)
+ goto err_return;
+
+ while (!json_scan_next(&je))
+ {
+ switch (je.state)
+ {
+ case JST_VALUE:
+ if (c_item == n_item)
+ return smart_read_value(&je, value, value_len);
+
+ if (json_skip_key(&je))
+ goto err_return;
+
+ c_item++;
+ break;
+
+ case JST_ARRAY_END:
+ *value= (const char *) (je.s.c_str - je.sav_c_len);
+ *value_len= c_item;
+ return JSV_NOTHING;
+ }
+ }
+
+err_return:
+ return JSV_BAD_JSON;
+}
+
+
+/** Simple json lookup for a value by the key.
+
+ Expects JSON object.
+ Only scans the 'first level' of the object, not
+ the nested structures.
+
+ @param js [in] json object to search in
+ @param js_end [in] end of json string
+ @param key [in] key to search for
+ @param key_end [in] - " -
+ @param value_start [out] pointer into js (value or closing })
+ @param value_len [out] length of the value found or number of keys
+
+ @retval the type of the key value
+ @retval JSV_BAD_JSON - syntax error found reading JSON.
+ or not JSON object.
+ @retval JSV_NOTHING - no such key found.
+*/
+enum json_types json_get_object_key(const char *js, const char *js_end,
+ const char *key,
+ const char **value, int *value_len)
+{
+ const char *key_end= key + strlen(key);
+ json_engine_t je;
+ json_string_t key_name;
+ int n_keys= 0;
+
+ json_string_set_cs(&key_name, &my_charset_utf8mb4_bin);
+
+ json_scan_start(&je, &my_charset_utf8mb4_bin,(const uchar *) js,
+ (const uchar *) js_end);
+
+ if (json_read_value(&je) ||
+ je.value_type != JSON_VALUE_OBJECT)
+ goto err_return;
+
+ while (!json_scan_next(&je))
+ {
+ switch (je.state)
+ {
+ case JST_KEY:
+ n_keys++;
+ json_string_set_str(&key_name, (const uchar *) key,
+ (const uchar *) key_end);
+ if (json_key_matches(&je, &key_name))
+ return smart_read_value(&je, value, value_len);
+
+ if (json_skip_key(&je))
+ goto err_return;
+
+ break;
+
+ case JST_OBJ_END:
+ *value= (const char *) (je.s.c_str - je.sav_c_len);
+ *value_len= n_keys;
+ return JSV_NOTHING;
+ }
+ }
+
+err_return:
+ return JSV_BAD_JSON;
+}
+
+
+enum json_types json_get_object_nkey(const char *js,const char *js_end, int nkey,
+ const char **keyname, const char **keyname_end,
+ const char **value, int *value_len)
+{
+ return JSV_NOTHING;
+}
+
+
+/** Check if json is valid (well-formed)
+
+ @retval 0 - success, json is well-formed
+ @retval 1 - error, json is invalid
+*/
+int json_valid(const char *js, size_t js_len, CHARSET_INFO *cs)
+{
+ json_engine_t je;
+ json_scan_start(&je, cs, (const uchar *) js, (const uchar *) js + js_len);
+ while (json_scan_next(&je) == 0) /* no-op */ ;
+ return je.s.error == 0;
+}
+
+
+/*
+ Expects the JSON object as an js argument, and the key name.
+ Looks for this key in the object and returns
+ the location of all the text related to it.
+ The text includes the comma, separating this key.
+
+ comma_pos - the hint where the comma is. It is important
+ if you plan to replace the key rather than just cut.
+ 1 - comma is on the left
+ 2 - comma is on the right.
+ 0 - no comma at all (the object has just this single key)
+
+ if no such key found *key_start is set to NULL.
+*/
+int json_locate_key(const char *js, const char *js_end,
+ const char *kname,
+ const char **key_start, const char **key_end,
+ int *comma_pos)
+{
+ const char *kname_end= kname + strlen(kname);
+ json_engine_t je;
+ json_string_t key_name;
+ int t_next, c_len, match_result;
+
+ json_string_set_cs(&key_name, &my_charset_utf8mb4_bin);
+
+ json_scan_start(&je, &my_charset_utf8mb4_bin,(const uchar *) js,
+ (const uchar *) js_end);
+
+ if (json_read_value(&je) ||
+ je.value_type != JSON_VALUE_OBJECT)
+ goto err_return;
+
+ *key_start= (const char *) je.s.c_str;
+ *comma_pos= 0;
+
+ while (!json_scan_next(&je))
+ {
+ switch (je.state)
+ {
+ case JST_KEY:
+ json_string_set_str(&key_name, (const uchar *) kname,
+ (const uchar *) kname_end);
+ match_result= json_key_matches(&je, &key_name);
+ if (json_skip_key(&je))
+ goto err_return;
+ get_first_nonspace(&je.s, &t_next, &c_len);
+ je.s.c_str-= c_len;
+
+ if (match_result)
+ {
+ *key_end= (const char *) je.s.c_str;
+
+ if (*comma_pos == 1)
+ return 0;
+
+ DBUG_ASSERT(*comma_pos == 0);
+
+ if (t_next == C_COMMA)
+ {
+ *key_end+= c_len;
+ *comma_pos= 2;
+ }
+ else if (t_next == C_RCURB)
+ *comma_pos= 0;
+ else
+ goto err_return;
+ return 0;
+ }
+
+ *key_start= (const char *) je.s.c_str;
+ *comma_pos= 1;
+ break;
+
+ case JST_OBJ_END:
+ *key_start= NULL;
+ return 0;
+ }
+ }
+
+err_return:
+ return 1;
+
+}
diff --git a/strings/strcoll.ic b/strings/strcoll.ic
index c647a5ef57e..9dfccb9018c 100644
--- a/strings/strcoll.ic
+++ b/strings/strcoll.ic
@@ -15,11 +15,18 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-
#ifndef MY_FUNCTION_NAME
#error MY_FUNCTION_NAME is not defined
#endif
+/*
+ Define strnncoll() and strnncollsp() by default,
+ unless "#define DEFINE_STRNNCOLL 0" is specified.
+*/
+#ifndef DEFINE_STRNNCOLL
+#define DEFINE_STRNNCOLL 1
+#endif
+
/*
The weight for automatically padded spaces when comparing strings with
@@ -54,6 +61,8 @@
#endif
+#if DEFINE_STRNNCOLL
+
/**
Scan a valid character, or a bad byte, or an auto-padded space
from a string and calculate the weight of the scanned sequence.
@@ -278,6 +287,8 @@ MY_FUNCTION_NAME(strnncollsp)(CHARSET_INFO *cs __attribute__((unused)),
}
#endif
+#endif /* DEFINE_STRNNCOLL */
+
#ifdef DEFINE_STRNXFRM
#ifndef WEIGHT_MB2_FRM
@@ -322,11 +333,261 @@ MY_FUNCTION_NAME(strnxfrm)(CHARSET_INFO *cs,
#endif /* DEFINE_STRNXFRM */
+#if defined(DEFINE_STRNXFRM_UNICODE) || defined(DEFINE_STRNXFRM_UNICODE_NOPAD)
+
+/*
+ Store sorting weights using 2 bytes per character.
+
+ This function is shared between
+ - utf8mb3_general_ci, utf8_bin, ucs2_general_ci, ucs2_bin
+ which support BMP only (U+0000..U+FFFF).
+ - utf8mb4_general_ci, utf16_general_ci, utf32_general_ci,
+ which map all supplementary characters to weight 0xFFFD.
+*/
+
+#ifndef MY_MB_WC
+#error MY_MB_WC must be defined for DEFINE_STRNXFRM_UNICODE
+#endif
+
+#ifndef OPTIMIZE_ASCII
+#error OPTIMIZE_ASCII must be defined for DEFINE_STRNXFRM_UNICODE
+#endif
+
+#ifndef UNICASE_MAXCHAR
+#error UNICASE_MAXCHAR must be defined for DEFINE_STRNXFRM_UNICODE
+#endif
+
+#ifndef UNICASE_PAGE0
+#error UNICASE_PAGE0 must be defined for DEFINE_STRNXFRM_UNICODE
+#endif
+
+#ifndef UNICASE_PAGES
+#error UNICASE_PAGES must be defined for DEFINE_STRNXFRM_UNICODE
+#endif
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm_internal)(CHARSET_INFO *cs,
+ uchar *dst, uchar *de,
+ uint *nweights,
+ const uchar *src, const uchar *se)
+{
+ my_wc_t UNINIT_VAR(wc);
+ uchar *dst0= dst;
+
+ DBUG_ASSERT(src || !se);
+ DBUG_ASSERT((cs->state & MY_CS_LOWER_SORT) == 0);
+ DBUG_ASSERT(0x7F <= UNICASE_MAXCHAR);
+
+ for (; dst < de && *nweights; (*nweights)--)
+ {
+ int res;
+#if OPTIMIZE_ASCII
+ if (src >= se)
+ break;
+ if (src[0] <= 0x7F)
+ {
+ wc= UNICASE_PAGE0[*src++].sort;
+ PUT_WC_BE2_HAVE_1BYTE(dst, de, wc);
+ continue;
+ }
+#endif
+ if ((res= MY_MB_WC(cs, &wc, src, se)) <= 0)
+ break;
+ src+= res;
+ if (wc <= UNICASE_MAXCHAR)
+ {
+ MY_UNICASE_CHARACTER *page;
+ if ((page= UNICASE_PAGES[wc >> 8]))
+ wc= page[wc & 0xFF].sort;
+ }
+ else
+ wc= MY_CS_REPLACEMENT_CHARACTER;
+ PUT_WC_BE2_HAVE_1BYTE(dst, de, wc);
+ }
+ return dst - dst0;
+}
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen, uint nweights,
+ const uchar *src, size_t srclen, uint flags)
+{
+ uchar *dst0= dst;
+ uchar *de= dst + dstlen;
+ dst+= MY_FUNCTION_NAME(strnxfrm_internal)(cs, dst, de, &nweights,
+ src, src + srclen);
+ DBUG_ASSERT(dst <= de); /* Safety */
+
+ if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
+ dst+= my_strxfrm_pad_nweights_unicode(dst, de, nweights);
+
+ my_strxfrm_desc_and_reverse(dst0, dst, flags, 0);
+
+ if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
+ dst+= my_strxfrm_pad_unicode(dst, de);
+ return dst - dst0;
+}
+
+
+#ifdef DEFINE_STRNXFRM_UNICODE_NOPAD
+static size_t
+MY_FUNCTION_NAME(strnxfrm_nopad)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen,
+ uint nweights,
+ const uchar *src, size_t srclen, uint flags)
+{
+ uchar *dst0= dst;
+ uchar *de= dst + dstlen;
+ dst+= MY_FUNCTION_NAME(strnxfrm_internal)(cs, dst, de, &nweights,
+ src, src + srclen);
+ DBUG_ASSERT(dst <= de); /* Safety */
+
+ if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
+ {
+ size_t len= de - dst;
+ set_if_smaller(len, nweights * 2);
+ memset(dst, 0x00, len);
+ dst+= len;
+ }
+
+ my_strxfrm_desc_and_reverse(dst0, dst, flags, 0);
+
+ if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
+ {
+ memset(dst, 0x00, de - dst);
+ dst= de;
+ }
+ return dst - dst0;
+}
+#endif
+
+#endif /* DEFINE_STRNXFRM_UNICODE || DEFINE_STRNXFRM_UNICODE_NOPAD */
+
+
+
+#ifdef DEFINE_STRNXFRM_UNICODE_BIN2
+
+/*
+ Store sorting weights using 2 bytes per character.
+
+ These functions are shared between
+ - utf8mb3_general_ci, utf8_bin, ucs2_general_ci, ucs2_bin
+ which support BMP only (U+0000..U+FFFF).
+ - utf8mb4_general_ci, utf16_general_ci, utf32_general_ci,
+ which map all supplementary characters to weight 0xFFFD.
+*/
+
+#ifndef MY_MB_WC
+#error MY_MB_WC must be defined for DEFINE_STRNXFRM_UNICODE_BIN2
+#endif
+
+#ifndef OPTIMIZE_ASCII
+#error OPTIMIZE_ASCII must be defined for DEFINE_STRNXFRM_UNICODE_BIN2
+#endif
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm_internal)(CHARSET_INFO *cs,
+ uchar *dst, uchar *de,
+ uint *nweights,
+ const uchar *src,
+ const uchar *se)
+{
+ my_wc_t UNINIT_VAR(wc);
+ uchar *dst0= dst;
+
+ DBUG_ASSERT(src || !se);
+
+ for (; dst < de && *nweights; (*nweights)--)
+ {
+ int res;
+#if OPTIMIZE_ASCII
+ if (src >= se)
+ break;
+ if (src[0] <= 0x7F)
+ {
+ wc= *src++;
+ PUT_WC_BE2_HAVE_1BYTE(dst, de, wc);
+ continue;
+ }
+#endif
+ if ((res= MY_MB_WC(cs, &wc, src, se)) <= 0)
+ break;
+ src+= res;
+ if (wc > 0xFFFF)
+ wc= MY_CS_REPLACEMENT_CHARACTER;
+ PUT_WC_BE2_HAVE_1BYTE(dst, de, wc);
+ }
+ return dst - dst0;
+}
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen, uint nweights,
+ const uchar *src, size_t srclen, uint flags)
+{
+ uchar *dst0= dst;
+ uchar *de= dst + dstlen;
+ dst+= MY_FUNCTION_NAME(strnxfrm_internal)(cs, dst, de, &nweights,
+ src, src + srclen);
+ DBUG_ASSERT(dst <= de); /* Safety */
+
+ if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
+ dst+= my_strxfrm_pad_nweights_unicode(dst, de, nweights);
+
+ my_strxfrm_desc_and_reverse(dst0, dst, flags, 0);
+
+ if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
+ dst+= my_strxfrm_pad_unicode(dst, de);
+ return dst - dst0;
+}
+
+
+static size_t
+MY_FUNCTION_NAME(strnxfrm_nopad)(CHARSET_INFO *cs,
+ uchar *dst, size_t dstlen, uint nweights,
+ const uchar *src, size_t srclen, uint flags)
+{
+ uchar *dst0= dst;
+ uchar *de= dst + dstlen;
+ dst+= MY_FUNCTION_NAME(strnxfrm_internal)(cs, dst, de, &nweights,
+ src, src + srclen);
+ DBUG_ASSERT(dst <= de); /* Safety */
+
+ if (dst < de && nweights && (flags & MY_STRXFRM_PAD_WITH_SPACE))
+ {
+ size_t len= de - dst;
+ set_if_smaller(len, nweights * 2);
+ memset(dst, 0x00, len);
+ dst+= len;
+ }
+
+ my_strxfrm_desc_and_reverse(dst0, dst, flags, 0);
+
+ if ((flags & MY_STRXFRM_PAD_TO_MAXLEN) && dst < de)
+ {
+ memset(dst, 0x00, de - dst);
+ dst= de;
+ }
+ return dst - dst0;
+}
+
+#endif /* DEFINE_STRNXFRM_UNICODE_BIN2 */
+
+
/*
We usually include this file at least two times from the same source file,
for the _ci and the _bin collations. Prepare for the second inclusion.
*/
#undef MY_FUNCTION_NAME
+#undef MY_MB_WC
+#undef OPTIMIZE_ASCII
+#undef UNICASE_MAXCHAR
+#undef UNICASE_PAGE0
+#undef UNICASE_PAGES
#undef WEIGHT_ILSEQ
#undef WEIGHT_MB1
#undef WEIGHT_MB2
@@ -335,4 +596,8 @@ MY_FUNCTION_NAME(strnxfrm)(CHARSET_INFO *cs,
#undef WEIGHT_PAD_SPACE
#undef WEIGHT_MB2_FRM
#undef DEFINE_STRNXFRM
+#undef DEFINE_STRNXFRM_UNICODE
+#undef DEFINE_STRNXFRM_UNICODE_NOPAD
+#undef DEFINE_STRNXFRM_UNICODE_BIN2
+#undef DEFINE_STRNNCOLL
#undef DEFINE_STRNNCOLLSP_NOPAD
diff --git a/support-files/CMakeLists.txt b/support-files/CMakeLists.txt
index b5767432fc2..30fe61c3dc4 100644
--- a/support-files/CMakeLists.txt
+++ b/support-files/CMakeLists.txt
@@ -148,9 +148,11 @@ IF(UNIX)
IF (INSTALL_SYSCONFDIR)
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/mysql-log-rotate DESTINATION ${INSTALL_SYSCONFDIR}/logrotate.d
RENAME mysql COMPONENT SupportFiles)
- INSTALL(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/mysql.server
- DESTINATION ${INSTALL_SYSCONFDIR}/init.d
- RENAME mysql COMPONENT SupportFiles)
+ IF(NOT HAVE_SYSTEMD)
+ INSTALL(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/mysql.server
+ DESTINATION ${INSTALL_SYSCONFDIR}/init.d
+ RENAME mysql COMPONENT SupportFiles)
+ ENDIF()
INSTALL(FILES rpm/my.cnf DESTINATION ${INSTALL_SYSCONFDIR}
COMPONENT Common)
diff --git a/support-files/compiler_warnings.supp b/support-files/compiler_warnings.supp
index 4d9ca1c815b..3f7a79556f9 100644
--- a/support-files/compiler_warnings.supp
+++ b/support-files/compiler_warnings.supp
@@ -204,7 +204,6 @@
.*/ctype-simple\.c : .*unary minus operator applied to unsigned type, result still unsigned.*
.*/sql/sys_vars\.cc : invalid access to non-static data member
.*/string3\.h : memset used with constant zero length parameter
-.*/sql/wsrep_hton\.cc : NULL used in arithmetic : 500-600
.* : missing-declarations.*is valid
#
diff --git a/support-files/rpm/server-postin.sh b/support-files/rpm/server-postin.sh
index 08b046dc272..ad783812184 100644
--- a/support-files/rpm/server-postin.sh
+++ b/support-files/rpm/server-postin.sh
@@ -17,9 +17,7 @@ fi
if [ $1 = 1 ] ; then
if [ -x /usr/bin/systemctl ] ; then
/usr/bin/systemctl daemon-reload >/dev/null 2>&1
- fi
-
- if [ -x /sbin/chkconfig ] ; then
+ elif [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add mysql
fi
diff --git a/support-files/rpm/server-posttrans.sh b/support-files/rpm/server-posttrans.sh
index 0d242596185..0845a68c791 100644
--- a/support-files/rpm/server-posttrans.sh
+++ b/support-files/rpm/server-posttrans.sh
@@ -1,11 +1,10 @@
if [ -r %{restart_flag} ] ; then
rm %{restart_flag}
+ # only restart the server if it was alredy running
if [ -x /usr/bin/systemctl ] ; then
/usr/bin/systemctl daemon-reload > /dev/null 2>&1
- fi
-
- # only restart the server if it was alredy running
- if %{_sysconfdir}/init.d/mysql status > /dev/null 2>&1; then
+ /usr/bin/systemctl try-restart mariadb.service > /dev/null 2>&1
+ elif %{_sysconfdir}/init.d/mysql status > /dev/null 2>&1; then
%{_sysconfdir}/init.d/mysql restart
fi
fi
diff --git a/support-files/rpm/server-preun.sh b/support-files/rpm/server-preun.sh
index 7ef48f1c8d4..1d733a7d899 100644
--- a/support-files/rpm/server-preun.sh
+++ b/support-files/rpm/server-preun.sh
@@ -1,12 +1,16 @@
if [ $1 = 0 ] ; then
- # Stop MySQL before uninstalling it
- if [ -x %{_sysconfdir}/init.d/mysql ] ; then
- %{_sysconfdir}/init.d/mysql stop > /dev/null
- fi
+ # Stop MySQL before uninstalling it
# Don't start it automatically anymore
- if [ -x /sbin/chkconfig ] ; then
- /sbin/chkconfig --del mysql
- fi
+ if [ -x /usr/bin/systemctl ] ; then
+ /usr/bin/systemctl stop mariadb.service > /dev/null 2>&1
+ /usr/bin/systemctl disable mariadb.service > /dev/null 2>&1
+ fi
+ if [ -x %{_sysconfdir}/init.d/mysql ] ; then
+ %{_sysconfdir}/init.d/mysql stop > /dev/null
+ fi
+ if [ -x /sbin/chkconfig ] ; then
+ /sbin/chkconfig --del mysql > /dev/null 2>&1
+ fi
fi
# We do not remove the mysql user since it may still own a lot of
diff --git a/support-files/rpm/server.cnf b/support-files/rpm/server.cnf
index f067afd0da3..a9fe564939e 100644
--- a/support-files/rpm/server.cnf
+++ b/support-files/rpm/server.cnf
@@ -39,8 +39,8 @@
# you can put MariaDB-only options here
[mariadb]
-# This group is only read by MariaDB-10.3 servers.
+# This group is only read by MariaDB-10.4 servers.
# If you use the same .cnf file for MariaDB of different versions,
# use this group for options that older servers don't understand
-[mariadb-10.3]
+[mariadb-10.4]
diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c
index 2a529c12c63..c39b5138309 100644
--- a/tests/mysql_client_fw.c
+++ b/tests/mysql_client_fw.c
@@ -50,9 +50,6 @@ static char *opt_user= 0;
static char *opt_password= 0;
static char *opt_host= 0;
static char *opt_unix_socket= 0;
-#ifdef HAVE_SMEM
-static char *shared_memory_base_name= 0;
-#endif
static unsigned int opt_port;
static my_bool tty_password= 0, opt_silent= 0;
@@ -253,10 +250,6 @@ static void print_st_error(MYSQL_STMT *stmt, const char *msg)
static MYSQL *mysql_client_init(MYSQL* con)
{
MYSQL* res = mysql_init(con);
-#ifdef HAVE_SMEM
- if (res && shared_memory_base_name)
- mysql_options(res, MYSQL_SHARED_MEMORY_BASE_NAME, shared_memory_base_name);
-#endif
if (res && non_blocking_api_enabled)
mysql_options(res, MYSQL_OPT_NONBLOCK, 0);
if (opt_plugin_dir && *opt_plugin_dir)
@@ -1229,11 +1222,6 @@ static struct my_option client_test_long_options[] =
0, 0, 0, 0, 0, 0},
{"silent", 's', "Be more silent", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0,
0},
-#ifdef HAVE_SMEM
- {"shared-memory-base-name", 'm', "Base name of shared memory.",
- &shared_memory_base_name, (uchar**)&shared_memory_base_name, 0,
- GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
-#endif
{"socket", 'S', "Socket file to use for connection",
&opt_unix_socket, &opt_unix_socket, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c
index ad0fc4a7422..2c95bac1e21 100644
--- a/tests/mysql_client_test.c
+++ b/tests/mysql_client_test.c
@@ -7606,8 +7606,8 @@ static void test_explain_bug()
verify_prepare_field(result, 7, "ref", "", MYSQL_TYPE_VAR_STRING,
"", "", "", NAME_CHAR_LEN * HA_MAX_KEY_SEG, 0);
- verify_prepare_field(result, 8, "rows", "", MYSQL_TYPE_LONGLONG,
- "", "", "", 10, 0);
+ verify_prepare_field(result, 8, "rows", "", MYSQL_TYPE_VAR_STRING,
+ "", "", "", NAME_CHAR_LEN, 0);
verify_prepare_field(result, 9, "Extra", "", MYSQL_TYPE_VAR_STRING,
"", "", "", 255, 0);
@@ -8397,6 +8397,50 @@ static void test_list_fields()
}
+static void test_list_fields_blob()
+{
+ MYSQL_RES *result;
+ int rc;
+ myheader("test_list_fields_blob");
+
+ rc= mysql_query(mysql, "drop table if exists t1");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "create table t1(c1 tinyblob, c2 blob, c3 mediumblob, c4 longblob)");
+ myquery(rc);
+
+ result= mysql_list_fields(mysql, "t1", NULL);
+ mytest(result);
+
+ rc= my_process_result_set(result);
+ DIE_UNLESS(rc == 0);
+
+ /*
+ All BLOB variant Fields are displayed as MYSQL_TYPE_BLOB in
+ the result set metadata. Note, some Items display the exact
+ BLOB type. This inconsistency should be fixed eventually.
+ */
+ verify_prepare_field(result, 0, "c1", "c1", MYSQL_TYPE_BLOB,
+ "t1", "t1",
+ current_db, 255, NULL);
+
+ verify_prepare_field(result, 1, "c2", "c2", MYSQL_TYPE_BLOB,
+ "t1", "t1",
+ current_db, 65535, NULL);
+
+ verify_prepare_field(result, 2, "c3", "c3", MYSQL_TYPE_BLOB,
+ "t1", "t1",
+ current_db, 16777215, NULL);
+
+ verify_prepare_field(result, 3, "c4", "c4", MYSQL_TYPE_BLOB,
+ "t1", "t1",
+ current_db, 4294967295ULL, NULL);
+
+ mysql_free_result(result);
+ myquery(mysql_query(mysql, "drop table t1"));
+}
+
+
static void test_list_fields_default()
{
int rc, i;
@@ -8467,6 +8511,43 @@ static void test_list_fields_default()
}
+/**
+ Note, this test covers MDEV-18408 and MDEV-18685
+*/
+
+static void test_mdev18408()
+{
+ MYSQL_RES *result;
+ int rc;
+ myheader("test_mdev18408s");
+
+ rc= mysql_query(mysql, "DROP TABLE IF EXISTS t1");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "DROP VIEW IF EXISTS v1");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "CREATE TABLE t1 (c1 TIMESTAMP NULL DEFAULT NULL)");
+ myquery(rc);
+
+ rc= mysql_query(mysql, "CREATE VIEW v1 AS SELECT c1 FROM t1");
+ myquery(rc);
+
+ result= mysql_list_fields(mysql, "v1", NULL);
+ mytest(result);
+
+ rc= my_process_result_set(result);
+ DIE_UNLESS(rc == 0);
+
+ verify_prepare_field(result, 0, "c1", "c1", MYSQL_TYPE_TIMESTAMP,
+ "v1", "v1", current_db, 19, 0);
+
+ mysql_free_result(result);
+ myquery(mysql_query(mysql, "DROP VIEW v1"));
+ myquery(mysql_query(mysql, "DROP TABLE t1"));
+}
+
+
static void test_bug19671()
{
MYSQL_RES *result;
@@ -8493,7 +8574,7 @@ static void test_bug19671()
DIE_UNLESS(rc == 0);
verify_prepare_field(result, 0, "f1", "f1", MYSQL_TYPE_LONG,
- "v1", "v1", current_db, 11, "0");
+ "v1", "v1", current_db, 11, NULL);
mysql_free_result(result);
myquery(mysql_query(mysql, "drop view v1"));
@@ -20865,6 +20946,7 @@ static struct my_tests_st my_tests[]= {
{ "test_fetch_column", test_fetch_column },
{ "test_mem_overun", test_mem_overun },
{ "test_list_fields", test_list_fields },
+ { "test_list_fields_blob", test_list_fields_blob },
{ "test_list_fields_default", test_list_fields_default },
{ "test_free_result", test_free_result },
{ "test_free_store_result", test_free_store_result },
@@ -21057,6 +21139,7 @@ static struct my_tests_st my_tests[]= {
{ "test_bulk_replace", test_bulk_replace },
#endif
{ "test_explain_meta", test_explain_meta },
+ { "test_mdev18408", test_mdev18408 },
{ 0, 0 }
};
diff --git a/unittest/strings/CMakeLists.txt b/unittest/strings/CMakeLists.txt
index 245747538c9..0896e132d24 100644
--- a/unittest/strings/CMakeLists.txt
+++ b/unittest/strings/CMakeLists.txt
@@ -1,3 +1,3 @@
-MY_ADD_TESTS(strings LINK_LIBRARIES strings mysys)
+MY_ADD_TESTS(strings json LINK_LIBRARIES strings mysys)
diff --git a/unittest/strings/json-t.c b/unittest/strings/json-t.c
new file mode 100644
index 00000000000..7c5f7957d42
--- /dev/null
+++ b/unittest/strings/json-t.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2019, MariaDB Corporation
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include <tap.h>
+#include <my_sys.h>
+#include <json_lib.h>
+
+const char *json="{\"int\":1,\"str\":\"foo bar\","
+ "\"array\":[10,20,{\"c\":\"d\"}],\"bool\":false}";
+
+const char *json_ar="[1,\"foo bar\",[10,20,{\"c\":\"d\"}],false]";
+
+const char *json_w="{\"int\" : 1 , "
+ "\"array\" : [10,20,{\"c\":\"d\"}] , \"bool\" : false }";
+const char *json_1="{ \"str\" : \"foo bar\" }";
+
+void do_json(const char *key, int type, const char *value)
+{
+ enum json_types value_type;
+ const char *value_start;
+ int value_len;
+
+ value_type= json_get_object_key(json, json + strlen(json),
+ key, &value_start, &value_len);
+ if (type)
+ ok(value_type == type && value_len == (int)strlen(value) && !memcmp(value_start, value, value_len),
+ "%s: type=%u, value(%d)=\"%.*s\"", key, value_type, value_len, value_len, value_start);
+ else
+ ok(value_type == type && value_len == (int)strlen(value),
+ "%s: type=%u keys=%u end=\"%s\"", key, value_type, value_len, value_start);
+}
+
+void do_json_ar(int n, int type, const char *value)
+{
+ enum json_types value_type;
+ const char *value_start;
+ int value_len;
+
+ value_type= json_get_array_item(json_ar, json_ar + strlen(json_ar),
+ n, &value_start, &value_len);
+ if (type)
+ ok(value_type == type && value_len == (int)strlen(value) && !memcmp(value_start, value, value_len),
+ "%i: type=%u, value(%d)=\"%.*s\"", n, value_type, value_len, value_len, value_start);
+ else
+ ok(value_type == type && value_len == (int)strlen(value),
+ "%i: type=%u keys=%u end=\"%s\"", n, value_type, value_len, value_start);
+}
+
+void do_json_locate(const char *json, const char *key, int from, int to, int cp)
+{
+ const char *key_start, *key_end;
+ int res, comma_pos;
+
+ res= json_locate_key(json, json + strlen(json),
+ key, &key_start, &key_end, &comma_pos);
+ if (key_start)
+ ok(res == 0 && key_start - json == from && key_end - json == to &&
+ comma_pos == cp, "%s: [%d,%d,%d] %.*s%s", key, (int)(key_start-json),
+ (int)(key_end-json), comma_pos, (int)(key_start - json), json, key_end);
+ else
+ ok(res == 0 && from == -1, "%s: key not found", key);
+}
+
+int main()
+{
+ plan(18);
+
+ diag("%s", json);
+ do_json("int", 4, "1");
+ do_json("str", 3, "foo bar");
+ do_json("bool", 6, "false");
+ do_json("c", 0, "1234");
+ do_json("array", 2, "[10,20,{\"c\":\"d\"}]");
+ diag("%s", json_ar);
+ do_json_ar(0, 4, "1");
+ do_json_ar(1, 3, "foo bar");
+ do_json_ar(2, 2, "[10,20,{\"c\":\"d\"}]");
+ do_json_ar(3, 6, "false");
+ do_json_ar(4, 0, "1234");
+
+ do_json_locate(json, "bool", 50, 63, 1);
+ do_json_locate(json, "int", 1, 9, 2);
+ do_json_locate(json, "array", 24, 50, 1);
+ do_json_locate(json_w, "bool", 43, 61, 1);
+ do_json_locate(json_w, "int", 1, 12, 2);
+ do_json_locate(json_w, "array", 11, 43, 1);
+ do_json_locate(json_w, "c", -1, -1, -1);
+ do_json_locate(json_1, "str", 1, 22, 0);
+
+ return exit_status();
+}
diff --git a/vio/CMakeLists.txt b/vio/CMakeLists.txt
index cdb28799ada..95748224f97 100644
--- a/vio/CMakeLists.txt
+++ b/vio/CMakeLists.txt
@@ -17,6 +17,6 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${SSL_INCLUDE_DIRS})
ADD_DEFINITIONS(${SSL_DEFINES})
-SET(VIO_SOURCES vio.c viosocket.c viossl.c viopipe.c vioshm.c viosslfactories.c)
+SET(VIO_SOURCES vio.c viosocket.c viossl.c viopipe.c viosslfactories.c)
ADD_CONVENIENCE_LIBRARY(vio ${VIO_SOURCES})
TARGET_LINK_LIBRARIES(vio ${LIBSOCKET})
diff --git a/vio/docs/TODO b/vio/docs/TODO
deleted file mode 100644
index 7296ab73a10..00000000000
--- a/vio/docs/TODO
+++ /dev/null
@@ -1,3 +0,0 @@
-* Consistent error handling. May be the initialization should
- be taken out of constructors and be put into virtual method open().
-* The open() method is named very misleadingly().
diff --git a/vio/vio.c b/vio/vio.c
index 34ca9ed872d..985a6568e3c 100644
--- a/vio/vio.c
+++ b/vio/vio.c
@@ -54,18 +54,6 @@ static my_bool has_no_data(Vio *vio __attribute__((unused)))
}
#ifdef _WIN32
-my_bool vio_shared_memory_has_data(Vio *vio)
-{
- return (vio->shared_memory_remain > 0);
-}
-
-int vio_shared_memory_shutdown(Vio *vio, int how)
-{
- SetEvent(vio->event_conn_closed);
- SetEvent(vio->event_server_wrote);
- return 0;
-}
-
int vio_pipe_shutdown(Vio *vio, int how)
{
return CancelIoEx(vio->hPipe, NULL);
@@ -116,28 +104,7 @@ static void vio_init(Vio *vio, enum enum_vio_type type,
DBUG_VOID_RETURN;
}
#endif
-#ifdef HAVE_SMEM
- if (type == VIO_TYPE_SHARED_MEMORY)
- {
- vio->viodelete =vio_delete;
- vio->vioerrno =vio_errno;
- vio->read =vio_read_shared_memory;
- vio->write =vio_write_shared_memory;
- vio->fastsend =vio_fastsend;
- vio->viokeepalive =vio_keepalive;
- vio->should_retry =vio_should_retry;
- vio->was_timeout =vio_was_timeout;
- vio->vioclose =vio_close_shared_memory;
- vio->peer_addr =vio_peer_addr;
- vio->vioblocking =vio_blocking;
- vio->is_blocking =vio_is_blocking;
- vio->io_wait =no_io_wait;
- vio->is_connected =vio_is_connected_shared_memory;
- vio->has_data =vio_shared_memory_has_data;
- vio->shutdown =vio_shared_memory_shutdown;
- DBUG_VOID_RETURN;
- }
-#endif
+
#ifdef HAVE_OPENSSL
if (type == VIO_TYPE_SSL)
{
@@ -296,31 +263,7 @@ Vio *vio_new_win32pipe(HANDLE hPipe)
DBUG_RETURN(vio);
}
-#ifdef HAVE_SMEM
-Vio *vio_new_win32shared_memory(HANDLE handle_file_map, HANDLE handle_map,
- HANDLE event_server_wrote, HANDLE event_server_read,
- HANDLE event_client_wrote, HANDLE event_client_read,
- HANDLE event_conn_closed)
-{
- Vio *vio;
- DBUG_ENTER("vio_new_win32shared_memory");
- if ((vio = (Vio*) my_malloc(sizeof(Vio),MYF(MY_WME))))
- {
- vio_init(vio, VIO_TYPE_SHARED_MEMORY, 0, VIO_LOCALHOST);
- vio->desc= "shared memory";
- vio->handle_file_map= handle_file_map;
- vio->handle_map= handle_map;
- vio->event_server_wrote= event_server_wrote;
- vio->event_server_read= event_server_read;
- vio->event_client_wrote= event_client_wrote;
- vio->event_client_read= event_client_read;
- vio->event_conn_closed= event_conn_closed;
- vio->shared_memory_remain= 0;
- vio->shared_memory_pos= handle_map;
- }
- DBUG_RETURN(vio);
-}
-#endif
+
#endif
diff --git a/vio/vio_priv.h b/vio/vio_priv.h
index 248e1a59b23..6f5364417ac 100644
--- a/vio/vio_priv.h
+++ b/vio/vio_priv.h
@@ -35,14 +35,6 @@ int cancel_io(HANDLE handle, DWORD thread_id);
int vio_shutdown_pipe(Vio *vio,int how);
#endif
-#ifdef HAVE_SMEM
-size_t vio_read_shared_memory(Vio *vio, uchar * buf, size_t size);
-size_t vio_write_shared_memory(Vio *vio, const uchar * buf, size_t size);
-my_bool vio_is_connected_shared_memory(Vio *vio);
-int vio_close_shared_memory(Vio * vio);
-my_bool vio_shared_memory_has_data(Vio *vio);
-int vio_shutdown_shared_memory(Vio *vio, int how);
-#endif
int vio_socket_shutdown(Vio *vio, int how);
my_bool vio_buff_has_data(Vio *vio);
diff --git a/vio/viopipe.c b/vio/viopipe.c
index d3447a95c6e..0a3deb91262 100644
--- a/vio/viopipe.c
+++ b/vio/viopipe.c
@@ -131,7 +131,6 @@ int vio_close_pipe(Vio *vio)
CancelIo(vio->hPipe);
CloseHandle(vio->overlapped.hEvent);
- DisconnectNamedPipe(vio->hPipe);
ret= CloseHandle(vio->hPipe);
vio->type= VIO_CLOSED;
diff --git a/vio/vioshm.c b/vio/vioshm.c
deleted file mode 100644
index acc7d2402c5..00000000000
--- a/vio/vioshm.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
-#include "vio_priv.h"
-
-#if defined(_WIN32) && defined(HAVE_SMEM)
-
-size_t vio_read_shared_memory(Vio *vio, uchar *buf, size_t size)
-{
- size_t length;
- size_t remain_local;
- char *current_position;
- HANDLE events[2];
- DWORD timeout;
- DBUG_ENTER("vio_read_shared_memory");
-
- remain_local= size;
- current_position= buf;
- timeout= vio->read_timeout >= 0 ? vio->read_timeout : INFINITE;
-
- events[0]= vio->event_server_wrote;
- events[1]= vio->event_conn_closed;
-
- do
- {
- if (vio->shared_memory_remain == 0)
- {
- DWORD wait_status;
-
- wait_status= WaitForMultipleObjects(array_elements(events), events,
- FALSE, timeout);
-
- /*
- WaitForMultipleObjects can return next values:
- WAIT_OBJECT_0+0 - event from vio->event_server_wrote
- WAIT_OBJECT_0+1 - event from vio->event_conn_closed.
- We can't read anything
- WAIT_ABANDONED_0 and WAIT_TIMEOUT - fail. We can't read anything
- */
- if (wait_status != WAIT_OBJECT_0)
- {
- /*
- If wait_status is WAIT_TIMEOUT, set error code to indicate a
- timeout error. If vio->event_conn_closed was set, use an EOF
- condition (return value of zero) to indicate that the operation
- has been aborted.
- */
- if (wait_status == WAIT_TIMEOUT)
- SetLastError(SOCKET_ETIMEDOUT);
- else if (wait_status == (WAIT_OBJECT_0 + 1))
- DBUG_RETURN(0);
-
- DBUG_RETURN(-1);
- }
-
- vio->shared_memory_pos= vio->handle_map;
- vio->shared_memory_remain= uint4korr((ulong*)vio->shared_memory_pos);
- vio->shared_memory_pos+= 4;
- }
-
- length= size;
-
- if (vio->shared_memory_remain < length)
- length= vio->shared_memory_remain;
- if (length > remain_local)
- length= remain_local;
-
- memcpy(current_position, vio->shared_memory_pos, length);
-
- vio->shared_memory_remain-= length;
- vio->shared_memory_pos+= length;
- current_position+= length;
- remain_local-= length;
-
- if (!vio->shared_memory_remain)
- {
- if (!SetEvent(vio->event_client_read))
- DBUG_RETURN(-1);
- }
- } while (remain_local);
- length= size;
-
- DBUG_RETURN(length);
-}
-
-
-size_t vio_write_shared_memory(Vio *vio, const uchar *buf, size_t size)
-{
- size_t length, remain, sz;
- HANDLE pos;
- const uchar *current_position;
- HANDLE events[2];
- DWORD timeout;
- DBUG_ENTER("vio_write_shared_memory");
-
- remain= size;
- current_position= buf;
- timeout= vio->write_timeout >= 0 ? vio->write_timeout : INFINITE;
-
- events[0]= vio->event_server_read;
- events[1]= vio->event_conn_closed;
-
- while (remain != 0)
- {
- DWORD wait_status;
-
- wait_status= WaitForMultipleObjects(array_elements(events), events,
- FALSE, timeout);
-
- if (wait_status != WAIT_OBJECT_0)
- {
- /* Set error code to indicate a timeout error or disconnect. */
- if (wait_status == WAIT_TIMEOUT)
- SetLastError(SOCKET_ETIMEDOUT);
- else
- SetLastError(ERROR_GRACEFUL_DISCONNECT);
-
- DBUG_RETURN((size_t) -1);
- }
-
- sz= (remain > shared_memory_buffer_length ? shared_memory_buffer_length :
- remain);
-
- int4store(vio->handle_map, sz);
- pos= vio->handle_map + 4;
- memcpy(pos, current_position, sz);
- remain-= sz;
- current_position+= sz;
- if (!SetEvent(vio->event_client_wrote))
- DBUG_RETURN((size_t) -1);
- }
- length= size;
-
- DBUG_RETURN(length);
-}
-
-
-my_bool vio_is_connected_shared_memory(Vio *vio)
-{
- return (WaitForSingleObject(vio->event_conn_closed, 0) != WAIT_OBJECT_0);
-}
-
-
-/**
- Close shared memory and DBUG_PRINT any errors that happen on closing.
- @return Zero if all closing functions succeed, and nonzero otherwise.
-*/
-int vio_close_shared_memory(Vio * vio)
-{
- int error_count= 0;
- DBUG_ENTER("vio_close_shared_memory");
- if (vio->type != VIO_CLOSED)
- {
- /*
- Set event_conn_closed for notification of both client and server that
- connection is closed
- */
- SetEvent(vio->event_conn_closed);
- /*
- Close all handlers. UnmapViewOfFile and CloseHandle return non-zero
- result if they are success.
- */
- if (UnmapViewOfFile(vio->handle_map) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("UnmapViewOfFile() failed"));
- }
- if (CloseHandle(vio->event_server_wrote) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("CloseHandle(vio->esw) failed"));
- }
- if (CloseHandle(vio->event_server_read) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("CloseHandle(vio->esr) failed"));
- }
- if (CloseHandle(vio->event_client_wrote) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("CloseHandle(vio->ecw) failed"));
- }
- if (CloseHandle(vio->event_client_read) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("CloseHandle(vio->ecr) failed"));
- }
- if (CloseHandle(vio->handle_file_map) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("CloseHandle(vio->hfm) failed"));
- }
- if (CloseHandle(vio->event_conn_closed) == 0)
- {
- error_count++;
- DBUG_PRINT("vio_error", ("CloseHandle(vio->ecc) failed"));
- }
- }
- vio->type= VIO_CLOSED;
- vio->mysql_socket= MYSQL_INVALID_SOCKET;
- DBUG_RETURN(error_count);
-}
-
-#endif /* #if defined(_WIN32) && defined(HAVE_SMEM) */
-
diff --git a/vio/viosocket.c b/vio/viosocket.c
index d34bb13b1bd..8fc58ef8f43 100644
--- a/vio/viosocket.c
+++ b/vio/viosocket.c
@@ -74,8 +74,7 @@ int vio_errno(Vio *vio __attribute__((unused)))
{
/* These transport types are not Winsock based. */
#ifdef _WIN32
- if (vio->type == VIO_TYPE_NAMEDPIPE ||
- vio->type == VIO_TYPE_SHARED_MEMORY)
+ if (vio->type == VIO_TYPE_NAMEDPIPE)
return GetLastError();
#endif
@@ -363,7 +362,7 @@ int vio_blocking(Vio *vio, my_bool set_blocking_mode, my_bool *old_mode)
r= set_blocking_mode ? 0 : 1;
#endif /* !defined(NO_FCNTL_NONBLOCK) */
#else /* !defined(__WIN__) */
- if (vio->type != VIO_TYPE_NAMEDPIPE && vio->type != VIO_TYPE_SHARED_MEMORY)
+ if (vio->type != VIO_TYPE_NAMEDPIPE)
{
ulong arg;
int old_fcntl=vio->fcntl_mode;
@@ -435,13 +434,39 @@ int vio_socket_timeout(Vio *vio,
DBUG_RETURN(ret);
}
+/* Set TCP_NODELAY (disable Nagle's algorithm */
+int vio_nodelay(Vio *vio, my_bool on)
+{
+ int r;
+ int no_delay= MY_TEST(on);
+ DBUG_ENTER("vio_nodelay");
+
+ if (vio->type == VIO_TYPE_NAMEDPIPE || vio->type == VIO_TYPE_SOCKET)
+ {
+ DBUG_RETURN(0);
+ }
+
+ r = mysql_socket_setsockopt(vio->mysql_socket, IPPROTO_TCP, TCP_NODELAY,
+ IF_WIN((const char*), (void*)) &no_delay,
+ sizeof(no_delay));
-int vio_fastsend(Vio * vio __attribute__((unused)))
+ if (r)
+ {
+ DBUG_PRINT("warning",
+ ("Couldn't set socket option for fast send, error %d",
+ socket_errno));
+ r = -1;
+ }
+ DBUG_PRINT("exit", ("%d", r));
+ DBUG_RETURN(r);
+}
+
+int vio_fastsend(Vio * vio)
{
int r=0;
DBUG_ENTER("vio_fastsend");
- if (vio->type == VIO_TYPE_NAMEDPIPE ||vio->type == VIO_TYPE_SHARED_MEMORY)
+ if (vio->type == VIO_TYPE_NAMEDPIPE)
{
DBUG_RETURN(0);
}
@@ -454,18 +479,7 @@ int vio_fastsend(Vio * vio __attribute__((unused)))
}
#endif /* IPTOS_THROUGHPUT */
if (!r)
- {
-#ifdef __WIN__
- BOOL nodelay= 1;
-#else
- int nodelay = 1;
-#endif
-
- r= mysql_socket_setsockopt(vio->mysql_socket, IPPROTO_TCP, TCP_NODELAY,
- IF_WIN((const char*), (void*)) &nodelay,
- sizeof(nodelay));
-
- }
+ r = vio_nodelay(vio, TRUE);
if (r)
{
DBUG_PRINT("warning",
@@ -486,7 +500,7 @@ int vio_keepalive(Vio* vio, my_bool set_keep_alive)
(int)mysql_socket_getfd(vio->mysql_socket),
(int)set_keep_alive));
- if (vio->type != VIO_TYPE_NAMEDPIPE && vio->type != VIO_TYPE_SHARED_MEMORY)
+ if (vio->type != VIO_TYPE_NAMEDPIPE)
{
if (set_keep_alive)
opt = 1;
@@ -505,7 +519,7 @@ int vio_set_keepalive_options(Vio* vio, const struct vio_keepalive_opts *opts)
struct tcp_keepalive s;
DWORD nbytes;
- if (vio->type == VIO_TYPE_NAMEDPIPE || vio->type == VIO_TYPE_SHARED_MEMORY)
+ if (vio->type == VIO_TYPE_NAMEDPIPE)
return 0;
if (!opts->idle && !opts->interval)
diff --git a/win/packaging/CMakeLists.txt b/win/packaging/CMakeLists.txt
index 683abc64ef3..465b4529fac 100644
--- a/win/packaging/CMakeLists.txt
+++ b/win/packaging/CMakeLists.txt
@@ -178,6 +178,39 @@ IF(CMAKE_GENERATOR MATCHES "Visual Studio")
SET(CONFIG_PARAM "-DCMAKE_INSTALL_CONFIG_NAME=${CMAKE_CFG_INTDIR}")
ENDIF()
+IF(MSVC_CRT_TYPE MATCHES "/MD")
+ # Find out CRT merge module path, we're going to use it in installer
+ # The path and name depends on VS version
+ IF(MSVC_VERSION LESS 1900)
+ # VS2015
+ SET(VCREDIST_MSM_FILENAME Microsoft_VC140_CRT_${WIX_ARCH_SUFFIX}.msm)
+ SET(ProgramFilesX86 "ProgramFiles(x86)")
+ FIND_FILE(${VCREDIST_MSM_FILENAME}
+ NO_DEFAULT_PATH
+ PATHS
+ "$ENV{${ProgramFilesX86}}/Common Files/Merge Modules"
+ "$ENV{ProgramFiles}/Common Files/Merge Modules"
+ )
+ ELSEIF(MSVC_VERSION LESS 2000)
+ # VS2017
+ SET(VCREDIST_MSM_FILENAME Microsoft_VC141_CRT_${WIX_ARCH_SUFFIX}.msm)
+ FILE(GLOB MSM_LIST "C:/Program Files*/Microsoft Visual Studio/2017/*/VC/Redist/MSVC/*/MergeModules/${VCREDIST_MSM_FILENAME}")
+ LIST(LENGTH MSM_LIST LEN)
+ IF(LEN GREATER 0)
+ LIST(GET MSM_LIST 0 VCRedist_MSM)
+ ENDIF()
+ ELSE()
+ # Post-VS2017. Needs to be ported when new VS is out
+ MESSAGE(WARNING
+ "Name of redistributable merge module not known for this version of MSVC")
+ ENDIF()
+ IF (NOT VCRedist_MSM)
+ MESSAGE(WARNING "Can't find merge module ${VCREDIST_MSM_FILENAME}")
+ ELSE()
+ FILE(TO_NATIVE_PATH ${VCRedist_MSM} VCRedist_MSM)
+ # MESSAGE("VCRedist_MSM=${VCRedist_MSM}")
+ ENDIF()
+ENDIF()
ADD_CUSTOM_TARGET(
MSI
@@ -209,44 +242,12 @@ ADD_CUSTOM_TARGET(
-DVERSION="${VERSION}"
-DWITH_THIRD_PARTY="${WITH_THIRD_PARTY}"
-DWIXCA_LOCATION="$<TARGET_FILE:wixca>"
+ -DMSVC_CRT_TYPE="${MSVC_CRT_TYPE}"
+ -DVCRedist_MSM="${VCRedist_MSM}"
-P ${CMAKE_CURRENT_SOURCE_DIR}/create_msi.cmake
)
ADD_DEPENDENCIES(MSI wixca)
-ADD_CUSTOM_TARGET(
- MSI_ESSENTIALS
- COMMAND ${CMAKE_COMMAND} ${CONFIG_PARAM} -DESSENTIALS=1
- -DCANDLE_EXECUTABLE="${CANDLE_EXECUTABLE}"
- -DCMAKE_CFG_INTDIR="${CMAKE_CFG_INTDIR}"
- -DCMAKE_FULL_VER="${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}.${CMAKE_PATCH_VERSION}"
- -DCMAKE_SIZEOF_VOID_P=${CMAKE_SIZEOF_VOID_P}
- -DCOPYING_RTF="${COPYING_RTF}"
- -DCPACK_WIX_CONFIG="${CPACK_WIX_CONFIG}"
- -DCPACK_WIX_INCLUDE="${CPACK_WIX_INCLUDE}"
- -DCPACK_WIX_PACKAGE_BASE_NAME="${CPACK_WIX_PACKAGE_BASE_NAME}"
- -DCPACK_WIX_PACKAGE_NAME="${CPACK_WIX_PACKAGE_NAME}"
- -DCPACK_WIX_UPGRADE_CODE="${CPACK_WIX_UPGRADE_CODE}"
- -DEXTRA_WIX_PREPROCESSOR_FLAGS="${EXTRA_WIX_PREPROCESSOR_FLAGS}"
- -DLIGHT_EXECUTABLE="${LIGHT_EXECUTABLE}"
- -DMAJOR_VERSION="${MAJOR_VERSION}"
- -DMANUFACTURER="${MANUFACTURER}"
- -DMINOR_VERSION="${MINOR_VERSION}"
- -DPATCH_VERSION="${PATCH_VERSION}"
- -DSIGNCODE="${SIGNCODE}"
- -DSIGNTOOL_EXECUTABLE="${SIGNTOOL_EXECUTABLE}"
- -DSIGNTOOL_PARAMETERS="${SIGNTOOL_PARAMETERS}"
- -DSRCDIR="${CMAKE_CURRENT_SOURCE_DIR}"
- -DTHIRD_PARTY_DOWNLOAD_LOCATION="${THIRD_PARTY_DOWNLOAD_LOCATION}"
- -DTHIRD_PARTY_FEATURE_CONDITION="${THIRD_PARTY_FEATURE_CONDITION}"
- -DTINY_VERSION="${TINY_VERSION}"
- -DTOP_BINDIR="${CMAKE_BINARY_DIR}"
- -DVERSION="${VERSION}"
- -DWITH_THIRD_PARTY="${WITH_THIRD_PARTY}"
- -DWIXCA_LOCATION="$<TARGET_FILE:wixca>"
- -P ${CMAKE_CURRENT_SOURCE_DIR}/create_msi.cmake
-)
-ADD_DEPENDENCIES(MSI_ESSENTIALS wixca)
-
IF(CMAKE_GENERATOR MATCHES "Visual Studio")
SET(CPACK_CONFIG_PARAM -C $(Configuration))
diff --git a/win/packaging/CPackWixConfig.cmake b/win/packaging/CPackWixConfig.cmake
index 994b121797a..74329e79247 100644
--- a/win/packaging/CPackWixConfig.cmake
+++ b/win/packaging/CPackWixConfig.cmake
@@ -9,7 +9,7 @@ IF(ESSENTIALS)
ENDIF()
ELSE()
SET(CPACK_COMPONENTS_USED
- "Server;Client;Development;SharedLibraries;Documentation;Readme;Common;VCCRT;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine;backup")
+ "Server;Client;Development;SharedLibraries;Documentation;Readme;Common;connect-engine;ClientPlugins;gssapi-server;gssapi-client;aws-key-management;rocksdb-engine;backup")
ENDIF()
SET( WIX_FEATURE_MySQLServer_EXTRA_FEATURES "DBInstance;SharedClientServerComponents")
diff --git a/win/packaging/ca/CMakeLists.txt b/win/packaging/ca/CMakeLists.txt
index 04d5408b9c9..79e8ee4c5a7 100644
--- a/win/packaging/ca/CMakeLists.txt
+++ b/win/packaging/ca/CMakeLists.txt
@@ -18,7 +18,8 @@ SET(WIXCA_SOURCES CustomAction.cpp CustomAction.def)
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/sql)
+# Custom action should not depend on C runtime, since we do not know if CRT is installed.
+FORCE_STATIC_CRT()
ADD_VERSION_INFO(wixca SHARED WIXCA_SOURCES)
-ADD_LIBRARY(wixca SHARED EXCLUDE_FROM_ALL ${WIXCA_SOURCES})
-TARGET_LINK_LIBRARIES(wixca ${WIX_WCAUTIL_LIBRARY} ${WIX_DUTIL_LIBRARY}
- msi version winservice)
+ADD_LIBRARY(wixca SHARED EXCLUDE_FROM_ALL ${WIXCA_SOURCES} ${CMAKE_SOURCE_DIR}/sql/winservice.c)
+TARGET_LINK_LIBRARIES(wixca ${WIX_WCAUTIL_LIBRARY} ${WIX_DUTIL_LIBRARY} msi version)
diff --git a/win/packaging/ca/CustomAction.cpp b/win/packaging/ca/CustomAction.cpp
index 71c24e96f92..7ba061c108a 100644
--- a/win/packaging/ca/CustomAction.cpp
+++ b/win/packaging/ca/CustomAction.cpp
@@ -355,7 +355,7 @@ void CheckServiceConfig(
goto end;
}
- WcaLog(LOGMSG_STANDARD, "MySQL service %S found: CommandLine= %S",
+ WcaLog(LOGMSG_STANDARD, "MySQL/MariaDB service %S found: CommandLine= %S",
other_servicename, commandline);
if (wcsstr(argv[0], bindir))
{
diff --git a/win/packaging/create_msi.cmake b/win/packaging/create_msi.cmake
index 122c25a0d91..ad935803a1e 100644
--- a/win/packaging/create_msi.cmake
+++ b/win/packaging/create_msi.cmake
@@ -59,6 +59,11 @@ IF(CMAKE_INSTALL_CONFIG_NAME)
SET(CONFIG_PARAM "-DCMAKE_INSTALL_CONFIG_NAME=${CMAKE_INSTALL_CONFIG_NAME}")
ENDIF()
+IF((MSVC_CRT_TYPE MATCHES "/MD") AND (NOT VCRedist_MSM))
+ # Something was wrong, we package VC runtime merge modules
+ # when compiled with dynamic C runtime.
+ MESSAGE(FATAL_ERROR "Redistributable merge module was not found")
+ENDIF()
SET(COMPONENTS_ALL "${CPACK_COMPONENTS_ALL}")
FOREACH(comp ${COMPONENTS_ALL})
@@ -382,9 +387,13 @@ EXECUTE_PROCESS(
${EXTRA_CANDLE_ARGS}
)
+IF(VCRedist_MSM)
+ SET(SILENCE_VCREDIST_MSM_WARNINGS -sice:ICE82 -sice:ICE03)
+ENDIF()
+
EXECUTE_PROCESS(
COMMAND ${LIGHT_EXECUTABLE} -v -ext WixUIExtension -ext WixUtilExtension
- -ext WixFirewallExtension -sice:ICE61
+ -ext WixFirewallExtension -sice:ICE61 ${SILENCE_VCREDIST_MSM_WARNINGS}
mysql_server.wixobj extra.wixobj -out ${CPACK_PACKAGE_FILE_NAME}.msi
${EXTRA_LIGHT_ARGS}
)
diff --git a/win/packaging/extra.wxs.in b/win/packaging/extra.wxs.in
index a71ef982896..1955799f6f9 100644
--- a/win/packaging/extra.wxs.in
+++ b/win/packaging/extra.wxs.in
@@ -279,7 +279,7 @@
<Control Id="Next" Type="PushButton" X="236" Y="243" Width="56" Height="17" Default="yes" Text="&amp;Next">
<Publish Property="WarningText" Value="Passwords do not match."><![CDATA[PASSWORD <> RootPasswordConfirm]]></Publish>
<Publish Event="SpawnDialog" Value="WarningDlg"><![CDATA[WarningText <>""]]></Publish>
- <Publish Property="SERVICENAME" Value="MySQL">NOT SERVICENAME AND NOT WarningText</Publish>
+ <Publish Property="SERVICENAME" Value="MariaDB">NOT SERVICENAME AND NOT WarningText</Publish>
<Publish Event="NewDialog" Value="ServicePortDlg"><![CDATA[WarningText=""]]></Publish>
<Condition Action="enable"><![CDATA[NOT ModifyRootPassword OR PASSWORD]]> </Condition>
<Condition Action="disable"><![CDATA[ModifyRootPassword AND (NOT PASSWORD)]]> </Condition>
@@ -650,7 +650,17 @@
</Feature>
<?endif ?>
-
+
+ <?if "@VCRedist_MSM@" != "" ?>
+ <!-- VC runtime merge module -->
+ <DirectoryRef Id="TARGETDIR">
+ <Merge Id="VCRedist" SourceFile="@VCRedist_MSM@" DiskId="1" Language="0"/>
+ </DirectoryRef>
+ <Feature Id="VCRedist" Title="Visual C++ Runtime" AllowAdvertise="no" Display="hidden" Level="1">
+ <MergeRef Id="VCRedist"/>
+ </Feature>
+ <?endif?>
+
<!-- Custom action, call mysql_install_db -->
<SetProperty Sequence='execute' Before='CreateDatabaseCommand' Id="SKIPNETWORKING" Value="--skip-networking" >SKIPNETWORKING</SetProperty>
<SetProperty Sequence='execute' Before='CreateDatabaseCommand' Id="ALLOWREMOTEROOTACCESS" Value="--allow-remote-root-access">ALLOWREMOTEROOTACCESS</SetProperty>
diff --git a/win/packaging/mysql_server.wxs.in b/win/packaging/mysql_server.wxs.in
index c10116830e7..80dcc365e56 100644
--- a/win/packaging/mysql_server.wxs.in
+++ b/win/packaging/mysql_server.wxs.in
@@ -12,7 +12,7 @@
Keywords='Installer'
Description='MariaDB Server'
Manufacturer='@MANUFACTURER@'
- InstallerVersion='200'
+ InstallerVersion='301'
Languages='1033'
Compressed='yes'
SummaryCodepage='1252'
diff --git a/win/upgrade_wizard/CMakeLists.txt b/win/upgrade_wizard/CMakeLists.txt
index f4148ee98d2..7d0e774b968 100644
--- a/win/upgrade_wizard/CMakeLists.txt
+++ b/win/upgrade_wizard/CMakeLists.txt
@@ -23,21 +23,27 @@ IF(NOT MFC_FOUND)
ENDIF()
RETURN()
ENDIF()
+
IF(MSVC_CRT_TYPE MATCHES "/MD")
- # MFC should be dynamically linked
- SET(CMAKE_MFC_FLAG 2)
+ # FORCE static CRT and MFC for upgrade wizard,
+ # so we do not have to redistribute MFC.
+ FORCE_STATIC_CRT()
+ SET(UPGRADE_WIZARD_SOURCES ${CMAKE_SOURCE_DIR}/sql/winservice.c)
ELSE()
- # MFC should be statically linked
- SET(CMAKE_MFC_FLAG 1)
+ SET(UPGRADE_WIZARD_LINK_LIBRARIES winservice)
ENDIF()
+
+# MFC should be statically linked
+SET(CMAKE_MFC_FLAG 1)
+
# Enable exception handling (avoids warnings)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc -DNO_WARN_MBCS_MFC_DEPRECATION")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/sql)
MYSQL_ADD_EXECUTABLE(mysql_upgrade_wizard
- upgrade.cpp upgradeDlg.cpp upgrade.rc
+ upgrade.cpp upgradeDlg.cpp upgrade.rc ${UPGRADE_WIZARD_SOURCES}
COMPONENT Server)
-TARGET_LINK_LIBRARIES(mysql_upgrade_wizard winservice)
+TARGET_LINK_LIBRARIES(mysql_upgrade_wizard ${UPGRADE_WIZARD_LINK_LIBRARIES})
# upgrade_wizard is Windows executable, set WIN32_EXECUTABLE so it does not
# create a console.
SET_TARGET_PROPERTIES(mysql_upgrade_wizard PROPERTIES WIN32_EXECUTABLE 1)
diff --git a/wsrep-lib b/wsrep-lib
new file mode 160000
+Subproject e9dafb73734d71ab55078b34748e54f139aec82
diff --git a/wsrep/CMakeLists.txt b/wsrep/CMakeLists.txt
deleted file mode 100644
index ff2bdec4def..00000000000
--- a/wsrep/CMakeLists.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (c) 2012, Codership Oy. All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-SET(WSREP_SOURCES wsrep_gtid.c wsrep_uuid.c wsrep_loader.c wsrep_dummy.c)
-
-IF(NOT WITH_INNOBASE_STORAGE_ENGINE)
- MESSAGE(WARNING "WSRep is enabled, but innodb is not. This configuration is not supported")
-ENDIF()
-
-ADD_CONVENIENCE_LIBRARY(wsrep ${WSREP_SOURCES})
-DTRACE_INSTRUMENT(wsrep)
-
-#ADD_EXECUTABLE(listener wsrep_listener.c ${WSREP_SOURCES})
-#TARGET_LINK_LIBRARIES(listener ${LIBDL})
diff --git a/wsrep/wsrep_api.h b/wsrep/wsrep_api.h
deleted file mode 100644
index 1d6bc059d3d..00000000000
--- a/wsrep/wsrep_api.h
+++ /dev/null
@@ -1,1117 +0,0 @@
-/* Copyright (C) 2009-2013 Codership Oy <info@codership.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-/*!
- @file wsrep API declaration.
-
- HOW TO READ THIS FILE.
-
- Due to C language rules this header layout doesn't lend itself to intuitive
- reading. So here's the scoop: in the end this header declares two main types:
-
- * struct wsrep_init_args
-
- and
-
- * struct wsrep
-
- wsrep_init_args contains initialization parameters for wsrep provider like
- names, addresses, etc. and pointers to callbacks. The callbacks will be called
- by provider when it needs to do something application-specific, like log a
- message or apply a writeset. It should be passed to init() call from
- wsrep API. It is an application part of wsrep API contract.
-
- struct wsrep is the interface to wsrep provider. It contains all wsrep API
- calls. It is a provider part of wsrep API contract.
-
- Finally, wsrep_load() method loads (dlopens) wsrep provider library. It is
- defined in wsrep_loader.c unit and is part of libwsrep.a (which is not a
- wsrep provider, but a convenience library).
-
- wsrep_unload() does the reverse.
-
-*/
-#ifndef WSREP_H
-#define WSREP_H
-
-#include <stdint.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <time.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**************************************************************************
- * *
- * wsrep replication API *
- * *
- **************************************************************************/
-
-#define WSREP_INTERFACE_VERSION "25"
-
-/*! Empty backend spec */
-#define WSREP_NONE "none"
-
-
-/*!
- * @brief log severity levels, passed as first argument to log handler
- */
-typedef enum wsrep_log_level
-{
- WSREP_LOG_FATAL, //!< Unrecoverable error, application must quit.
- WSREP_LOG_ERROR, //!< Operation failed, must be repeated.
- WSREP_LOG_WARN, //!< Unexpected condition, but no operational failure.
- WSREP_LOG_INFO, //!< Informational message.
- WSREP_LOG_DEBUG //!< Debug message. Shows only of compiled with debug.
-} wsrep_log_level_t;
-
-/*!
- * @brief error log handler
- *
- * All messages from wsrep provider are directed to this
- * handler, if present.
- *
- * @param level log level
- * @param message log message
- */
-typedef void (*wsrep_log_cb_t)(wsrep_log_level_t, const char *);
-
-
-/*!
- * Certain provider capabilities application may want to know about
- */
-#define WSREP_CAP_MULTI_MASTER ( 1ULL << 0 )
-#define WSREP_CAP_CERTIFICATION ( 1ULL << 1 )
-#define WSREP_CAP_PARALLEL_APPLYING ( 1ULL << 2 )
-#define WSREP_CAP_TRX_REPLAY ( 1ULL << 3 )
-#define WSREP_CAP_ISOLATION ( 1ULL << 4 )
-#define WSREP_CAP_PAUSE ( 1ULL << 5 )
-#define WSREP_CAP_CAUSAL_READS ( 1ULL << 6 )
-#define WSREP_CAP_CAUSAL_TRX ( 1ULL << 7 )
-#define WSREP_CAP_INCREMENTAL_WRITESET ( 1ULL << 8 )
-#define WSREP_CAP_SESSION_LOCKS ( 1ULL << 9 )
-#define WSREP_CAP_DISTRIBUTED_LOCKS ( 1ULL << 10 )
-#define WSREP_CAP_CONSISTENCY_CHECK ( 1ULL << 11 )
-#define WSREP_CAP_UNORDERED ( 1ULL << 12 )
-#define WSREP_CAP_ANNOTATION ( 1ULL << 13 )
-#define WSREP_CAP_PREORDERED ( 1ULL << 14 )
-
-
-/*!
- * Writeset flags
- *
- * COMMIT the writeset and all preceding writesets must be committed
- * ROLLBACK all preceding writesets in a transaction must be rolled back
- * ISOLATION the writeset must be applied AND committed in isolation
- * PA_UNSAFE the writeset cannot be applied in parallel
- * COMMUTATIVE the order in which the writeset is applied does not matter
- * NATIVE the writeset contains another writeset in this provider format
- *
- * Note that some of the flags are mutually exclusive (e.g. COMMIT and
- * ROLLBACK).
- */
-#define WSREP_FLAG_COMMIT ( 1ULL << 0 )
-#define WSREP_FLAG_ROLLBACK ( 1ULL << 1 )
-#define WSREP_FLAG_ISOLATION ( 1ULL << 2 )
-#define WSREP_FLAG_PA_UNSAFE ( 1ULL << 3 )
-#define WSREP_FLAG_COMMUTATIVE ( 1ULL << 4 )
-#define WSREP_FLAG_NATIVE ( 1ULL << 5 )
-
-
-typedef uint64_t wsrep_trx_id_t; //!< application transaction ID
-typedef uint64_t wsrep_conn_id_t; //!< application connection ID
-typedef int64_t wsrep_seqno_t; //!< sequence number of a writeset, etc.
-#ifdef __cplusplus
-typedef bool wsrep_bool_t;
-#else
-typedef _Bool wsrep_bool_t; //!< should be the same as standard (C99) bool
-#endif /* __cplusplus */
-
-/*! undefined seqno */
-#define WSREP_SEQNO_UNDEFINED (-1)
-
-
-/*! wsrep provider status codes */
-typedef enum wsrep_status
-{
- WSREP_OK = 0, //!< success
- WSREP_WARNING, //!< minor warning, error logged
- WSREP_TRX_MISSING, //!< transaction is not known by wsrep
- WSREP_TRX_FAIL, //!< transaction aborted, server can continue
- WSREP_BF_ABORT, //!< trx was victim of brute force abort
- WSREP_SIZE_EXCEEDED, //!< data exceeded maximum supported size
- WSREP_CONN_FAIL, //!< error in client connection, must abort
- WSREP_NODE_FAIL, //!< error in node state, wsrep must reinit
- WSREP_FATAL, //!< fatal error, server must abort
- WSREP_NOT_IMPLEMENTED //!< feature not implemented
-} wsrep_status_t;
-
-
-/*! wsrep callbacks status codes */
-typedef enum wsrep_cb_status
-{
- WSREP_CB_SUCCESS = 0, //!< success (as in "not critical failure")
- WSREP_CB_FAILURE //!< critical failure (consistency violation)
- /* Technically, wsrep provider has no use for specific failure codes since
- * there is nothing it can do about it but abort execution. Therefore any
- * positive number shall indicate a critical failure. Optionally that value
- * may be used by provider to come to a consensus about state consistency
- * in a group of nodes. */
-} wsrep_cb_status_t;
-
-
-/*!
- * UUID type - for all unique IDs
- */
-typedef struct wsrep_uuid {
- uint8_t data[16];
-} wsrep_uuid_t;
-
-/*! Undefined UUID */
-static const wsrep_uuid_t WSREP_UUID_UNDEFINED = {{0,}};
-
-/*! UUID string representation length, terminating '\0' not included */
-#define WSREP_UUID_STR_LEN 36
-
-/*!
- * Scan UUID from string
- * @return length of UUID string representation or negative error code
- */
-extern int
-wsrep_uuid_scan (const char* str, size_t str_len, wsrep_uuid_t* uuid);
-
-/*!
- * Print UUID to string
- * @return length of UUID string representation or negative error code
- */
-extern int
-wsrep_uuid_print (const wsrep_uuid_t* uuid, char* str, size_t str_len);
-
-#define WSREP_MEMBER_NAME_LEN 32 //!< maximum logical member name length
-#define WSREP_INCOMING_LEN 256 //!< max Domain Name length + 0x00
-
-
-/*!
- * Global transaction identifier
- */
-typedef struct wsrep_gtid
-{
- wsrep_uuid_t uuid; /*!< History UUID */
- wsrep_seqno_t seqno; /*!< Sequence number */
-} wsrep_gtid_t;
-
-/*! Undefined GTID */
-static const wsrep_gtid_t WSREP_GTID_UNDEFINED = {{{0, }}, -1};
-
-/*! Minimum number of bytes guaranteed to store GTID string representation,
- * terminating '\0' not included (36 + 1 + 20) */
-#define WSREP_GTID_STR_LEN 57
-
-
-/*!
- * Scan GTID from string
- * @return length of GTID string representation or negative error code
- */
-extern int
-wsrep_gtid_scan(const char* str, size_t str_len, wsrep_gtid_t* gtid);
-
-/*!
- * Print GTID to string
- * @return length of GTID string representation or negative error code
- */
-extern int
-wsrep_gtid_print(const wsrep_gtid_t* gtid, char* str, size_t str_len);
-
-
-/*!
- * Transaction meta data
- */
-typedef struct wsrep_trx_meta
-{
- wsrep_gtid_t gtid; /*!< Global transaction identifier */
- wsrep_seqno_t depends_on; /*!< Sequence number part of the last transaction
- this transaction depends on */
-} wsrep_trx_meta_t;
-
-
-/*!
- * member status
- */
-typedef enum wsrep_member_status {
- WSREP_MEMBER_UNDEFINED, //!< undefined state
- WSREP_MEMBER_JOINER, //!< incomplete state, requested state transfer
- WSREP_MEMBER_DONOR, //!< complete state, donates state transfer
- WSREP_MEMBER_JOINED, //!< complete state
- WSREP_MEMBER_SYNCED, //!< complete state, synchronized with group
- WSREP_MEMBER_ERROR, //!< this and above is provider-specific error code
- WSREP_MEMBER_MAX
-} wsrep_member_status_t;
-
-/*!
- * static information about a group member (some fields are tentative yet)
- */
-typedef struct wsrep_member_info {
- wsrep_uuid_t id; //!< group-wide unique member ID
- char name[WSREP_MEMBER_NAME_LEN]; //!< human-readable name
- char incoming[WSREP_INCOMING_LEN]; //!< address for client requests
-} wsrep_member_info_t;
-
-/*!
- * group status
- */
-typedef enum wsrep_view_status {
- WSREP_VIEW_PRIMARY, //!< primary group configuration (quorum present)
- WSREP_VIEW_NON_PRIMARY, //!< non-primary group configuration (quorum lost)
- WSREP_VIEW_DISCONNECTED, //!< not connected to group, retrying.
- WSREP_VIEW_MAX
-} wsrep_view_status_t;
-
-/*!
- * view of the group
- */
-typedef struct wsrep_view_info {
- wsrep_gtid_t state_id; //!< global state ID
- wsrep_seqno_t view; //!< global view number
- wsrep_view_status_t status; //!< view status
- wsrep_bool_t state_gap; //!< gap between global and local states
- int my_idx; //!< index of this member in the view
- int memb_num; //!< number of members in the view
- int proto_ver; //!< application protocol agreed on the view
- wsrep_member_info_t members[1];//!< array of member information
-} wsrep_view_info_t;
-
-/*!
- * Magic string to tell provider to engage into trivial (empty) state transfer.
- * No data will be passed, but the node shall be considered JOINED.
- * Should be passed in sst_req parameter of wsrep_view_cb_t.
- */
-#define WSREP_STATE_TRANSFER_TRIVIAL "trivial"
-
-/*!
- * Magic string to tell provider not to engage in state transfer at all.
- * The member will stay in WSREP_MEMBER_UNDEFINED state but will keep on
- * receiving all writesets.
- * Should be passed in sst_req parameter of wsrep_view_cb_t.
- */
-#define WSREP_STATE_TRANSFER_NONE "none"
-
-/*!
- * @brief group view handler
- *
- * This handler is called in total order corresponding to the group
- * configuration change. It is to provide a vital information about
- * new group view. If view info indicates existence of discontinuity
- * between group and member states, state transfer request message
- * should be filled in by the callback implementation.
- *
- * @note Currently it is assumed that sst_req is allocated using
- * malloc()/calloc()/realloc() and it will be freed by
- * wsrep implementation.
- *
- * @param app_ctx application context
- * @param recv_ctx receiver context
- * @param view new view on the group
- * @param state current state
- * @param state_len lenght of current state
- * @param sst_req location to store SST request
- * @param sst_req_len location to store SST request length or error code,
- * value of 0 means no SST.
- */
-typedef enum wsrep_cb_status (*wsrep_view_cb_t) (
- void* app_ctx,
- void* recv_ctx,
- const wsrep_view_info_t* view,
- const char* state,
- size_t state_len,
- void** sst_req,
- size_t* sst_req_len
-);
-
-
-/*!
- * @brief apply callback
- *
- * This handler is called from wsrep library to apply replicated writeset
- * Must support brute force applying for multi-master operation
- *
- * @param recv_ctx receiver context pointer provided by the application
- * @param data data buffer containing the writeset
- * @param size data buffer size
- * @param flags WSREP_FLAG_... flags
- * @param meta transaction meta data of the writeset to be applied
- *
- * @return success code:
- * @retval WSREP_OK
- * @retval WSREP_NOT_IMPLEMENTED appl. does not support the writeset format
- * @retval WSREP_ERROR failed to apply the writeset
- */
-typedef enum wsrep_cb_status (*wsrep_apply_cb_t) (
- void* recv_ctx,
- const void* data,
- size_t size,
- uint32_t flags,
- const wsrep_trx_meta_t* meta
-);
-
-
-/*!
- * @brief commit callback
- *
- * This handler is called to commit the changes made by apply callback.
- *
- * @param recv_ctx receiver context pointer provided by the application
- * @param flags WSREP_FLAG_... flags
- * @param meta transaction meta data of the writeset to be committed
- * @param exit set to true to exit recv loop
- * @param commit true - commit writeset, false - rollback writeset
- *
- * @return success code:
- * @retval WSREP_OK
- * @retval WSREP_ERROR call failed
- */
-typedef enum wsrep_cb_status (*wsrep_commit_cb_t) (
- void* recv_ctx,
- uint32_t flags,
- const wsrep_trx_meta_t* meta,
- wsrep_bool_t* exit,
- wsrep_bool_t commit
-);
-
-
-/*!
- * @brief unordered callback
- *
- * This handler is called to execute unordered actions (actions that need not
- * to be executed in any particular order) attached to writeset.
- *
- * @param recv_ctx receiver context pointer provided by the application
- * @param data data buffer containing the writeset
- * @param size data buffer size
- */
-typedef enum wsrep_cb_status (*wsrep_unordered_cb_t) (
- void* recv_ctx,
- const void* data,
- size_t size
-);
-
-
-/*!
- * @brief a callback to donate state snapshot
- *
- * This handler is called from wsrep library when it needs this node
- * to deliver state to a new cluster member.
- * No state changes will be committed for the duration of this call.
- * Wsrep implementation may provide internal state to be transmitted
- * to new cluster member for initial state.
- *
- * @param app_ctx application context
- * @param recv_ctx receiver context
- * @param msg state transfer request message
- * @param msg_len state transfer request message length
- * @param gtid current state ID on this node
- * @param state current wsrep internal state buffer
- * @param state_len current wsrep internal state buffer len
- * @param bypass bypass snapshot transfer, only transfer uuid:seqno pair
- */
-typedef enum wsrep_cb_status (*wsrep_sst_donate_cb_t) (
- void* app_ctx,
- void* recv_ctx,
- const void* msg,
- size_t msg_len,
- const wsrep_gtid_t* state_id,
- const char* state,
- size_t state_len,
- wsrep_bool_t bypass
-);
-
-
-/*!
- * @brief a callback to signal application that wsrep state is synced
- * with cluster
- *
- * This callback is called after wsrep library has got in sync with
- * rest of the cluster.
- *
- * @param app_ctx application context
- */
-typedef void (*wsrep_synced_cb_t) (void* app_ctx);
-
-
-/*!
- * Initialization parameters for wsrep provider.
- */
-struct wsrep_init_args
-{
- void* app_ctx; //!< Application context for callbacks
-
- /* Configuration parameters */
- const char* node_name; //!< Symbolic name of this node (e.g. hostname)
- const char* node_address; //!< Address to be used by wsrep provider
- const char* node_incoming; //!< Address for incoming client connections
- const char* data_dir; //!< Directory where wsrep files are kept if any
- const char* options; //!< Provider-specific configuration string
- int proto_ver; //!< Max supported application protocol version
-
- /* Application initial state information. */
- const wsrep_gtid_t* state_id; //!< Application state GTID
- const char* state; //!< Initial state for wsrep provider
- size_t state_len; //!< Length of state buffer
-
- /* Application callbacks */
- wsrep_log_cb_t logger_cb; //!< logging handler
- wsrep_view_cb_t view_handler_cb; //!< group view change handler
-
- /* Applier callbacks */
- wsrep_apply_cb_t apply_cb; //!< apply callback
- wsrep_commit_cb_t commit_cb; //!< commit callback
- wsrep_unordered_cb_t unordered_cb; //!< callback for unordered actions
-
- /* State Snapshot Transfer callbacks */
- wsrep_sst_donate_cb_t sst_donate_cb; //!< starting to donate
- wsrep_synced_cb_t synced_cb; //!< synced with group
-};
-
-
-/*! Type of the stats variable value in struct wsrep_status_var */
-typedef enum wsrep_var_type
-{
- WSREP_VAR_STRING, //!< pointer to null-terminated string
- WSREP_VAR_INT64, //!< int64_t
- WSREP_VAR_DOUBLE //!< double
-}
-wsrep_var_type_t;
-
-/*! Generalized stats variable representation */
-struct wsrep_stats_var
-{
- const char* name; //!< variable name
- wsrep_var_type_t type; //!< variable value type
- union {
- int64_t _integer64;
- double _double;
- const char* _string;
- } value; //!< variable value
-};
-
-
-/*! Abstract data buffer structure */
-typedef struct wsrep_buf
-{
- const void* ptr; /*!< Pointer to data buffer */
- size_t len; /*!< Length of buffer */
-} wsrep_buf_t;
-
-/*! Key struct used to pass certification keys for transaction handling calls.
- * A key consists of zero or more key parts. */
-typedef struct wsrep_key
-{
- const wsrep_buf_t* key_parts; /*!< Array of key parts */
- size_t key_parts_num; /*!< Number of key parts */
-} wsrep_key_t;
-
-/*! Key type:
- * EXCLUSIVE conflicts with any key type
- * SEMI reserved. If not supported, should be interpeted as EXCLUSIVE
- * SHARED conflicts only with EXCLUSIVE keys */
-typedef enum wsrep_key_type
-{
- WSREP_KEY_SHARED = 0,
- WSREP_KEY_SEMI,
- WSREP_KEY_EXCLUSIVE
-} wsrep_key_type_t;
-
-/*! Data type:
- * ORDERED state modification event that should be applied and committed
- * in order.
- * UNORDERED some action that does not modify state and execution of which is
- * optional and does not need to happen in order.
- * ANNOTATION (human readable) writeset annotation. */
-typedef enum wsrep_data_type
-{
- WSREP_DATA_ORDERED = 0,
- WSREP_DATA_UNORDERED,
- WSREP_DATA_ANNOTATION
-} wsrep_data_type_t;
-
-
-/*! Transaction handle struct passed for wsrep transaction handling calls */
-typedef struct wsrep_ws_handle
-{
- wsrep_trx_id_t trx_id; //!< transaction ID
- void* opaque; //!< opaque provider transaction context data
-} wsrep_ws_handle_t;
-
-/*!
- * @brief Helper method to reset trx writeset handle state when trx id changes
- *
- * Instead of passing wsrep_ws_handle_t directly to wsrep calls,
- * wrapping handle with this call offloads bookkeeping from
- * application.
- */
-static inline wsrep_ws_handle_t* wsrep_ws_handle_for_trx(
- wsrep_ws_handle_t* ws_handle,
- wsrep_trx_id_t trx_id)
-{
- if (ws_handle->trx_id != trx_id)
- {
- ws_handle->trx_id = trx_id;
- ws_handle->opaque = NULL;
- }
- return ws_handle;
-}
-
-
-/*!
- * A handle for processing preordered actions.
- * Must be initialized to WSREP_PO_INITIALIZER before use.
- */
-typedef struct wsrep_po_handle { void* opaque; } wsrep_po_handle_t;
-
-static const wsrep_po_handle_t WSREP_PO_INITIALIZER = { NULL };
-
-
-typedef struct wsrep wsrep_t;
-/*!
- * wsrep interface for dynamically loadable libraries
- */
-struct wsrep {
-
- const char *version; //!< interface version string
-
- /*!
- * @brief Initializes wsrep provider
- *
- * @param wsrep provider handle
- * @param args wsrep initialization parameters
- */
- wsrep_status_t (*init) (wsrep_t* wsrep,
- const struct wsrep_init_args* args);
-
- /*!
- * @brief Returns provider capabilities flag bitmap
- *
- * @param wsrep provider handle
- */
- uint64_t (*capabilities) (wsrep_t* wsrep);
-
- /*!
- * @brief Passes provider-specific configuration string to provider.
- *
- * @param wsrep provider handle
- * @param conf configuration string
- *
- * @retval WSREP_OK configuration string was parsed successfully
- * @retval WSREP_WARNING could't not parse conf string, no action taken
- */
- wsrep_status_t (*options_set) (wsrep_t* wsrep, const char* conf);
-
- /*!
- * @brief Returns provider-specific string with current configuration values.
- *
- * @param wsrep provider handle
- *
- * @return a dynamically allocated string with current configuration
- * parameter values
- */
- char* (*options_get) (wsrep_t* wsrep);
-
- /*!
- * @brief Opens connection to cluster
- *
- * Returns when either node is ready to operate as a part of the clsuter
- * or fails to reach operating status.
- *
- * @param wsrep provider handle
- * @param cluster_name unique symbolic cluster name
- * @param cluster_url URL-like cluster address (backend://address)
- * @param state_donor name of the node to be asked for state transfer.
- * @param bootstrap a flag to request initialization of a new wsrep
- * service rather then a connection to the existing one.
- * clister_url may still carry important initialization
- * parameters, like backend spec and/or listen address.
- */
- wsrep_status_t (*connect) (wsrep_t* wsrep,
- const char* cluster_name,
- const char* cluster_url,
- const char* state_donor,
- wsrep_bool_t bootstrap);
-
- /*!
- * @brief Closes connection to cluster.
- *
- * If state_uuid and/or state_seqno is not NULL, will store final state
- * in there.
- *
- * @param wsrep this wsrep handler
- */
- wsrep_status_t (*disconnect)(wsrep_t* wsrep);
-
- /*!
- * @brief start receiving replication events
- *
- * This function never returns
- *
- * @param wsrep provider handle
- * @param recv_ctx receiver context
- */
- wsrep_status_t (*recv)(wsrep_t* wsrep, void* recv_ctx);
-
- /*!
- * @brief Replicates/logs result of transaction to other nodes and allocates
- * required resources.
- *
- * Must be called before transaction commit. Returns success code, which
- * caller must check.
- * In case of WSREP_OK, starts commit critical section, transaction can
- * commit. Otherwise transaction must rollback.
- *
- * @param wsrep provider handle
- * @param ws_handle writeset of committing transaction
- * @param conn_id connection ID
- * @param flags fine tuning the replication WSREP_FLAG_*
- * @param meta transaction meta data
- *
- * @retval WSREP_OK cluster-wide commit succeeded
- * @retval WSREP_TRX_FAIL must rollback transaction
- * @retval WSREP_CONN_FAIL must close client connection
- * @retval WSREP_NODE_FAIL must close all connections and reinit
- */
- wsrep_status_t (*pre_commit)(wsrep_t* wsrep,
- wsrep_conn_id_t conn_id,
- wsrep_ws_handle_t* ws_handle,
- uint32_t flags,
- wsrep_trx_meta_t* meta);
-
- /*!
- * @brief Releases resources after transaction commit.
- *
- * Ends commit critical section.
- *
- * @param wsrep provider handle
- * @param ws_handle writeset of committing transaction
- * @retval WSREP_OK post_commit succeeded
- */
- wsrep_status_t (*post_commit) (wsrep_t* wsrep,
- wsrep_ws_handle_t* ws_handle);
-
- /*!
- * @brief Releases resources after transaction rollback.
- *
- * @param wsrep provider handle
- * @param ws_handle writeset of committing transaction
- * @retval WSREP_OK post_rollback succeeded
- */
- wsrep_status_t (*post_rollback)(wsrep_t* wsrep,
- wsrep_ws_handle_t* ws_handle);
-
- /*!
- * @brief Replay trx as a slave writeset
- *
- * If local trx has been aborted by brute force, and it has already
- * replicated before this abort, we must try if we can apply it as
- * slave trx. Note that slave nodes see only trx writesets and certification
- * test based on write set content can be different to DBMS lock conflicts.
- *
- * @param wsrep provider handle
- * @param ws_handle writeset of committing transaction
- * @param trx_ctx transaction context
- *
- * @retval WSREP_OK cluster commit succeeded
- * @retval WSREP_TRX_FAIL must rollback transaction
- * @retval WSREP_BF_ABORT brute force abort happened after trx replicated
- * must rollback transaction and try to replay
- * @retval WSREP_CONN_FAIL must close client connection
- * @retval WSREP_NODE_FAIL must close all connections and reinit
- */
- wsrep_status_t (*replay_trx)(wsrep_t* wsrep,
- wsrep_ws_handle_t* ws_handle,
- void* trx_ctx);
-
- /*!
- * @brief Abort pre_commit() call of another thread.
- *
- * It is possible, that some high-priority transaction needs to abort
- * another transaction which is in pre_commit() call waiting for resources.
- *
- * The kill routine checks that abort is not attmpted against a transaction
- * which is front of the caller (in total order).
- *
- * @param wsrep provider handle
- * @param bf_seqno seqno of brute force trx, running this cancel
- * @param victim_trx transaction to be aborted, and which is committing
- *
- * @retval WSREP_OK abort secceded
- * @retval WSREP_WARNING abort failed
- */
- wsrep_status_t (*abort_pre_commit)(wsrep_t* wsrep,
- wsrep_seqno_t bf_seqno,
- wsrep_trx_id_t victim_trx);
-
- /*!
- * @brief Appends a row reference to transaction writeset
- *
- * Both copy flag and key_type can be ignored by provider (key type
- * interpreted as WSREP_KEY_EXCLUSIVE).
- *
- * @param wsrep provider handle
- * @param ws_handle writeset handle
- * @param keys array of keys
- * @param count length of the array of keys
- * @param type type ot the key
- * @param copy can be set to FALSE if keys persist through commit.
- */
- wsrep_status_t (*append_key)(wsrep_t* wsrep,
- wsrep_ws_handle_t* ws_handle,
- const wsrep_key_t* keys,
- size_t count,
- enum wsrep_key_type type,
- wsrep_bool_t copy);
-
- /*!
- * @brief Appends data to transaction writeset
- *
- * This method can be called any time before commit and it
- * appends a number of data buffers to transaction writeset.
- *
- * Both copy and unordered flags can be ignored by provider.
- *
- * @param wsrep provider handle
- * @param ws_handle writeset handle
- * @param data array of data buffers
- * @param count buffer count
- * @param type type of data
- * @param copy can be set to FALSE if data persists through commit.
- */
- wsrep_status_t (*append_data)(wsrep_t* wsrep,
- wsrep_ws_handle_t* ws_handle,
- const struct wsrep_buf* data,
- size_t count,
- enum wsrep_data_type type,
- wsrep_bool_t copy);
-
- /*!
- * @brief Get causal ordering for read operation
- *
- * This call will block until causal ordering with all possible
- * preceding writes in the cluster is guaranteed. If pointer to
- * gtid is non-null, the call stores the global transaction ID
- * of the last transaction which is guaranteed to be ordered
- * causally before this call.
- *
- * @param wsrep provider handle
- * @param gtid location to store GTID
- */
- wsrep_status_t (*causal_read)(wsrep_t* wsrep, wsrep_gtid_t* gtid);
-
- /*!
- * @brief Clears allocated connection context.
- *
- * Whenever a new connection ID is passed to wsrep provider through
- * any of the API calls, a connection context is allocated for this
- * connection. This call is to explicitly notify provider fo connection
- * closing.
- *
- * @param wsrep provider handle
- * @param conn_id connection ID
- * @param query the 'set database' query
- * @param query_len length of query (does not end with 0)
- */
- wsrep_status_t (*free_connection)(wsrep_t* wsrep,
- wsrep_conn_id_t conn_id);
-
- /*!
- * @brief Replicates a query and starts "total order isolation" section.
- *
- * Replicates the action spec and returns success code, which caller must
- * check. Total order isolation continues until to_execute_end() is called.
- *
- * @param wsrep provider handle
- * @param conn_id connection ID
- * @param keys array of keys
- * @param keys_num lenght of the array of keys
- * @param action action buffer array to be executed
- * @param count action buffer count
- * @param meta transaction meta data
- *
- * @retval WSREP_OK cluster commit succeeded
- * @retval WSREP_CONN_FAIL must close client connection
- * @retval WSREP_NODE_FAIL must close all connections and reinit
- */
- wsrep_status_t (*to_execute_start)(wsrep_t* wsrep,
- wsrep_conn_id_t conn_id,
- const wsrep_key_t* keys,
- size_t keys_num,
- const struct wsrep_buf* action,
- size_t count,
- wsrep_trx_meta_t* meta);
-
- /*!
- * @brief Ends the total order isolation section.
- *
- * Marks the end of total order isolation. TO locks are freed
- * and other transactions are free to commit from this point on.
- *
- * @param wsrep provider handle
- * @param conn_id connection ID
- *
- * @retval WSREP_OK cluster commit succeeded
- * @retval WSREP_CONN_FAIL must close client connection
- * @retval WSREP_NODE_FAIL must close all connections and reinit
- */
- wsrep_status_t (*to_execute_end)(wsrep_t* wsrep, wsrep_conn_id_t conn_id);
-
- /*!
- * @brief Collects preordered replication events into a writeset.
- *
- * @param wsrep wsrep provider handle
- * @param handle a handle associated with a given writeset
- * @param data an array of data buffers.
- * @param count length of data buffer array.
- * @param copy whether provider needs to make a copy of events.
- *
- * @retval WSREP_OK cluster-wide commit succeeded
- * @retval WSREP_TRX_FAIL operation failed (e.g. trx size exceeded limit)
- * @retval WSREP_NODE_FAIL must close all connections and reinit
- */
- wsrep_status_t (*preordered_collect) (wsrep_t* wsrep,
- wsrep_po_handle_t* handle,
- const struct wsrep_buf* data,
- size_t count,
- wsrep_bool_t copy);
-
- /*!
- * @brief "Commits" preordered writeset to cluster.
- *
- * The contract is that the writeset will be committed in the same (partial)
- * order this method was called. Frees resources associated with the writeset
- * handle and reinitializes the handle.
- *
- * @param wsrep wsrep provider handle
- * @param po_handle a handle associated with a given writeset
- * @param source_id ID of the event producer, also serves as the partial order
- * or stream ID - events with different source_ids won't be
- * ordered with respect to each other.
- * @param flags WSREP_FLAG_... flags
- * @param pa_range the number of preceding events this event can be processed
- * in parallel with. A value of 0 means strict serial
- * processing. Note: commits always happen in wsrep order.
- * @param commit 'true' to commit writeset to cluster (replicate) or
- * 'false' to rollback (cancel) the writeset.
- *
- * @retval WSREP_OK cluster-wide commit succeeded
- * @retval WSREP_TRX_FAIL operation failed (e.g. NON-PRIMARY component)
- * @retval WSREP_NODE_FAIL must close all connections and reinit
- */
- wsrep_status_t (*preordered_commit) (wsrep_t* wsrep,
- wsrep_po_handle_t* handle,
- const wsrep_uuid_t* source_id,
- uint32_t flags,
- int pa_range,
- wsrep_bool_t commit);
-
- /*!
- * @brief Signals to wsrep provider that state snapshot has been sent to
- * joiner.
- *
- * @param wsrep provider handle
- * @param state_id state ID
- * @param rcode 0 or negative error code of the operation.
- */
- wsrep_status_t (*sst_sent)(wsrep_t* wsrep,
- const wsrep_gtid_t* state_id,
- int rcode);
-
- /*!
- * @brief Signals to wsrep provider that new state snapshot has been received.
- * May deadlock if called from sst_prepare_cb.
- *
- * @param wsrep provider handle
- * @param state_id state ID
- * @param state initial state provided by SST donor
- * @param state_len length of state buffer
- * @param rcode 0 or negative error code of the operation.
- */
- wsrep_status_t (*sst_received)(wsrep_t* wsrep,
- const wsrep_gtid_t* state_id,
- const void* state,
- size_t state_len,
- int rcode);
-
-
- /*!
- * @brief Generate request for consistent snapshot.
- *
- * If successfull, this call will generate internally SST request
- * which in turn triggers calling SST donate callback on the nodes
- * specified in donor_spec. If donor_spec is null, callback is
- * called only locally. This call will block until sst_sent is called
- * from callback.
- *
- * @param wsrep provider handle
- * @param msg context message for SST donate callback
- * @param msg_len length of context message
- * @param donor_spec list of snapshot donors
- */
- wsrep_status_t (*snapshot)(wsrep_t* wsrep,
- const void* msg,
- size_t msg_len,
- const char* donor_spec);
-
- /*!
- * @brief Returns an array fo status variables.
- * Array is terminated by Null variable name.
- *
- * @param wsrep provider handle
- * @return array of struct wsrep_status_var.
- */
- struct wsrep_stats_var* (*stats_get) (wsrep_t* wsrep);
-
- /*!
- * @brief Release resources that might be associated with the array.
- *
- * @param wsrep provider handle.
- * @param var_array array returned by stats_get().
- */
- void (*stats_free) (wsrep_t* wsrep, struct wsrep_stats_var* var_array);
-
- /*!
- * @brief Reset some stats variables to inital value, provider-dependent.
- *
- * @param wsrep provider handle.
- */
- void (*stats_reset) (wsrep_t* wsrep);
-
- /*!
- * @brief Pauses writeset applying/committing.
- *
- * @return global sequence number of the paused state or negative error code.
- */
- wsrep_seqno_t (*pause) (wsrep_t* wsrep);
-
- /*!
- * @brief Resumes writeset applying/committing.
- */
- wsrep_status_t (*resume) (wsrep_t* wsrep);
-
- /*!
- * @brief Desynchronize from cluster
- *
- * Effectively turns off flow control for this node, allowing it
- * to fall behind the cluster.
- */
- wsrep_status_t (*desync) (wsrep_t* wsrep);
-
- /*!
- * @brief Request to resynchronize with cluster.
- *
- * Effectively turns on flow control. Asynchronous - actual synchronization
- * event to be deliverred via sync_cb.
- */
- wsrep_status_t (*resync) (wsrep_t* wsrep);
-
- /*!
- * @brief Acquire global named lock
- *
- * @param wsrep wsrep provider handle
- * @param name lock name
- * @param shared shared or exclusive lock
- * @param owner 64-bit owner ID
- * @param tout timeout in nanoseconds.
- * 0 - return immediately, -1 wait forever.
- * @return wsrep status or negative error code
- * @retval -EDEADLK lock was already acquired by this thread
- * @retval -EBUSY lock was busy
- */
- wsrep_status_t (*lock) (wsrep_t* wsrep,
- const char* name, wsrep_bool_t shared,
- uint64_t owner, int64_t tout);
-
- /*!
- * @brief Release global named lock
- *
- * @param wsrep wsrep provider handle
- * @param name lock name
- * @param owner 64-bit owner ID
- * @return wsrep status or negative error code
- * @retval -EPERM lock does not belong to this owner
- */
- wsrep_status_t (*unlock) (wsrep_t* wsrep, const char* name, uint64_t owner);
-
- /*!
- * @brief Check if global named lock is locked
- *
- * @param wsrep wsrep provider handle
- * @param name lock name
- * @param owner if not NULL will contain 64-bit owner ID
- * @param node if not NULL will contain owner's node UUID
- * @return true if lock is locked
- */
- wsrep_bool_t (*is_locked) (wsrep_t* wsrep, const char* name, uint64_t* conn,
- wsrep_uuid_t* node);
-
- /*!
- * wsrep provider name
- */
- const char* provider_name;
-
- /*!
- * wsrep provider version
- */
- const char* provider_version;
-
- /*!
- * wsrep provider vendor name
- */
- const char* provider_vendor;
-
- /*!
- * @brief Frees allocated resources before unloading the library.
- * @param wsrep provider handle
- */
- void (*free)(wsrep_t* wsrep);
-
- void *dlh; //!< reserved for future use
- void *ctx; //!< reserved for implemetation private context
-};
-
-
-/*!
- *
- * @brief Loads wsrep library
- *
- * @param spec path to wsrep library. If NULL or WSREP_NONE initialises dummy
- * pass-through implementation.
- * @param hptr wsrep handle
- * @param log_cb callback to handle loader messages. Otherwise writes to stderr.
- *
- * @return zero on success, errno on failure
- */
-int wsrep_load(const char* spec, wsrep_t** hptr, wsrep_log_cb_t log_cb);
-
-/*!
- * @brief Unloads wsrep library and frees associated resources
- *
- * @param hptr wsrep handler pointer
- */
-void wsrep_unload(wsrep_t* hptr);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* WSREP_H */
diff --git a/wsrep/wsrep_dummy.c b/wsrep/wsrep_dummy.c
deleted file mode 100644
index e48dcff39a1..00000000000
--- a/wsrep/wsrep_dummy.c
+++ /dev/null
@@ -1,413 +0,0 @@
-/* Copyright (C) 2009-2010 Codership Oy <info@codersihp.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA
- */
-
-/*! @file Dummy wsrep API implementation. */
-
-#include "wsrep_api.h"
-
-#include <errno.h>
-#include <string.h>
-
-/*! Dummy backend context. */
-typedef struct wsrep_dummy
-{
- wsrep_log_cb_t log_fn;
- char* options;
-} wsrep_dummy_t;
-
-/* Get pointer to wsrep_dummy context from wsrep_t pointer */
-#define WSREP_DUMMY(_p) ((wsrep_dummy_t *) (_p)->ctx)
-
-/* Trace function usage a-la DBUG */
-#define WSREP_DBUG_ENTER(_w) do { \
- if (WSREP_DUMMY(_w)) { \
- if (WSREP_DUMMY(_w)->log_fn) \
- WSREP_DUMMY(_w)->log_fn(WSREP_LOG_DEBUG, __FUNCTION__); \
- } \
- } while (0)
-
-
-static void dummy_free(wsrep_t *w)
-{
- WSREP_DBUG_ENTER(w);
- if (WSREP_DUMMY(w)->options) {
- free(WSREP_DUMMY(w)->options);
- WSREP_DUMMY(w)->options = NULL;
- }
- free(w->ctx);
- w->ctx = NULL;
-}
-
-static wsrep_status_t dummy_init (wsrep_t* w,
- const struct wsrep_init_args* args)
-{
- WSREP_DUMMY(w)->log_fn = args->logger_cb;
- WSREP_DBUG_ENTER(w);
- if (args->options) {
- WSREP_DUMMY(w)->options = strdup(args->options);
- }
- return WSREP_OK;
-}
-
-static uint64_t dummy_capabilities (wsrep_t* w __attribute__((unused)))
-{
- return 0;
-}
-
-static wsrep_status_t dummy_options_set(
- wsrep_t* w,
- const char* conf)
-{
- WSREP_DBUG_ENTER(w);
- if (WSREP_DUMMY(w)->options) {
- free(WSREP_DUMMY(w)->options);
- WSREP_DUMMY(w)->options = NULL;
- }
- if (conf) {
- WSREP_DUMMY(w)->options = strdup(conf);
- }
- return WSREP_OK;
-}
-
-static char* dummy_options_get (wsrep_t* w)
-{
- char *options;
-
- WSREP_DBUG_ENTER(w);
- options= WSREP_DUMMY(w)->options;
-
- if (options)
- options= strdup(WSREP_DUMMY(w)->options);
-
- return options;
-}
-
-static wsrep_status_t dummy_connect(
- wsrep_t* w,
- const char* name __attribute__((unused)),
- const char* url __attribute__((unused)),
- const char* donor __attribute__((unused)),
- wsrep_bool_t bootstrap __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_disconnect(wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_recv(wsrep_t* w,
- void* recv_ctx __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_pre_commit(
- wsrep_t* w,
- const wsrep_conn_id_t conn_id __attribute__((unused)),
- wsrep_ws_handle_t* ws_handle __attribute__((unused)),
- uint32_t flags __attribute__((unused)),
- wsrep_trx_meta_t* meta __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_post_commit(
- wsrep_t* w,
- wsrep_ws_handle_t* ws_handle __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_post_rollback(
- wsrep_t* w,
- wsrep_ws_handle_t* ws_handle __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_replay_trx(
- wsrep_t* w,
- wsrep_ws_handle_t* ws_handle __attribute__((unused)),
- void* trx_ctx __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_abort_pre_commit(
- wsrep_t* w,
- const wsrep_seqno_t bf_seqno __attribute__((unused)),
- const wsrep_trx_id_t trx_id __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_append_key(
- wsrep_t* w,
- wsrep_ws_handle_t* ws_handle __attribute__((unused)),
- const wsrep_key_t* key __attribute__((unused)),
- const size_t key_num __attribute__((unused)),
- const wsrep_key_type_t key_type __attribute__((unused)),
- const wsrep_bool_t copy __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_append_data(
- wsrep_t* w,
- wsrep_ws_handle_t* ws_handle __attribute__((unused)),
- const struct wsrep_buf* data __attribute__((unused)),
- const size_t count __attribute__((unused)),
- const wsrep_data_type_t type __attribute__((unused)),
- const wsrep_bool_t copy __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_causal_read(
- wsrep_t* w,
- wsrep_gtid_t* gtid __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_free_connection(
- wsrep_t* w,
- const wsrep_conn_id_t conn_id __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_to_execute_start(
- wsrep_t* w,
- const wsrep_conn_id_t conn_id __attribute__((unused)),
- const wsrep_key_t* key __attribute__((unused)),
- const size_t key_num __attribute__((unused)),
- const struct wsrep_buf* data __attribute__((unused)),
- const size_t count __attribute__((unused)),
- wsrep_trx_meta_t* meta __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_to_execute_end(
- wsrep_t* w,
- const wsrep_conn_id_t conn_id __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_preordered_collect(
- wsrep_t* w,
- wsrep_po_handle_t* handle __attribute__((unused)),
- const struct wsrep_buf* data __attribute__((unused)),
- size_t count __attribute__((unused)),
- wsrep_bool_t copy __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_preordered_commit(
- wsrep_t* w,
- wsrep_po_handle_t* handle __attribute__((unused)),
- const wsrep_uuid_t* source_id __attribute__((unused)),
- uint32_t flags __attribute__((unused)),
- int pa_range __attribute__((unused)),
- wsrep_bool_t commit __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_sst_sent(
- wsrep_t* w,
- const wsrep_gtid_t* state_id __attribute__((unused)),
- const int rcode __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_sst_received(
- wsrep_t* w,
- const wsrep_gtid_t* state_id __attribute__((unused)),
- const void* state __attribute__((unused)),
- const size_t state_len __attribute__((unused)),
- const int rcode __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_snapshot(
- wsrep_t* w,
- const void* msg __attribute__((unused)),
- const size_t msg_len __attribute__((unused)),
- const char* donor_spec __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static struct wsrep_stats_var dummy_stats[] = {
- { NULL, WSREP_VAR_STRING, { 0 } }
-};
-
-static struct wsrep_stats_var* dummy_stats_get (wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
- return dummy_stats;
-}
-
-static void dummy_stats_free (
- wsrep_t* w,
- struct wsrep_stats_var* stats __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
-}
-
-static void dummy_stats_reset (wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
-}
-
-static wsrep_seqno_t dummy_pause (wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
- return -ENOSYS;
-}
-
-static wsrep_status_t dummy_resume (wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_desync (wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_NOT_IMPLEMENTED;
-}
-
-static wsrep_status_t dummy_resync (wsrep_t* w)
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_status_t dummy_lock (wsrep_t* w,
- const char* s __attribute__((unused)),
- wsrep_bool_t r __attribute__((unused)),
- uint64_t o __attribute__((unused)),
- int64_t t __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_NOT_IMPLEMENTED;
-}
-
-static wsrep_status_t dummy_unlock (wsrep_t* w,
- const char* s __attribute__((unused)),
- uint64_t o __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return WSREP_OK;
-}
-
-static wsrep_bool_t dummy_is_locked (wsrep_t* w,
- const char* s __attribute__((unused)),
- uint64_t* o __attribute__((unused)),
- wsrep_uuid_t* t __attribute__((unused)))
-{
- WSREP_DBUG_ENTER(w);
- return 0;
-}
-
-static wsrep_t dummy_iface = {
- WSREP_INTERFACE_VERSION,
- &dummy_init,
- &dummy_capabilities,
- &dummy_options_set,
- &dummy_options_get,
- &dummy_connect,
- &dummy_disconnect,
- &dummy_recv,
- &dummy_pre_commit,
- &dummy_post_commit,
- &dummy_post_rollback,
- &dummy_replay_trx,
- &dummy_abort_pre_commit,
- &dummy_append_key,
- &dummy_append_data,
- &dummy_causal_read,
- &dummy_free_connection,
- &dummy_to_execute_start,
- &dummy_to_execute_end,
- &dummy_preordered_collect,
- &dummy_preordered_commit,
- &dummy_sst_sent,
- &dummy_sst_received,
- &dummy_snapshot,
- &dummy_stats_get,
- &dummy_stats_free,
- &dummy_stats_reset,
- &dummy_pause,
- &dummy_resume,
- &dummy_desync,
- &dummy_resync,
- &dummy_lock,
- &dummy_unlock,
- &dummy_is_locked,
- WSREP_NONE,
- WSREP_INTERFACE_VERSION,
- "Codership Oy <info@codership.com>",
- &dummy_free,
- NULL,
- NULL
-};
-
-int wsrep_dummy_loader(wsrep_t* w)
-{
- if (!w)
- return EINVAL;
-
- *w = dummy_iface;
-
- // allocate private context
- if (!(w->ctx = malloc(sizeof(wsrep_dummy_t))))
- return ENOMEM;
-
- // initialize private context
- WSREP_DUMMY(w)->log_fn = NULL;
- WSREP_DUMMY(w)->options = NULL;
-
- return 0;
-}
diff --git a/wsrep/wsrep_gtid.c b/wsrep/wsrep_gtid.c
deleted file mode 100644
index 45148785c25..00000000000
--- a/wsrep/wsrep_gtid.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Copyright (C) 2013 Codership Oy <info@codersihp.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA
- */
-
-/*! @file Helper functions to deal with GTID string representations */
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <inttypes.h>
-
-#include "wsrep_api.h"
-
-/*!
- * Read GTID from string
- * @return length of GTID string representation or -EINVAL in case of error
- */
-int
-wsrep_gtid_scan(const char* str, size_t str_len, wsrep_gtid_t* gtid)
-{
- unsigned int offset;
- char* endptr;
-
- if ((offset = wsrep_uuid_scan(str, str_len, &gtid->uuid)) > 0 &&
- offset < str_len && str[offset] == ':') {
- ++offset;
- if (offset < str_len)
- {
- errno = 0;
- gtid->seqno = strtoll(str + offset, &endptr, 0);
-
- if (errno == 0) {
- offset = endptr - str;
- return offset;
- }
- }
- }
- *gtid = WSREP_GTID_UNDEFINED;
- return -EINVAL;
-}
-
-/*!
- * Write GTID to string
- * @return length of GTID stirng representation of -EMSGSIZE if string is too
- * short
- */
-int
-wsrep_gtid_print(const wsrep_gtid_t* gtid, char* str, size_t str_len)
-{
- unsigned int offset, ret;
- if ((offset = wsrep_uuid_print(&gtid->uuid, str, str_len)) > 0)
- {
- ret = snprintf(str + offset, str_len - offset,
- ":%" PRId64, gtid->seqno);
- if (ret <= str_len - offset) {
- return (offset + ret);
- }
-
- }
-
- return -EMSGSIZE;
-}
diff --git a/wsrep/wsrep_loader.c b/wsrep/wsrep_loader.c
deleted file mode 100644
index 1321538742f..00000000000
--- a/wsrep/wsrep_loader.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/* Copyright (C) 2009-2011 Codership Oy <info@codersihp.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA
- */
-
-/*! @file wsrep implementation loader */
-
-#include <dlfcn.h>
-#include <errno.h>
-#include <string.h>
-#include <stdio.h>
-
-#include "wsrep_api.h"
-
-// Logging stuff for the loader
-static const char* log_levels[] = {"FATAL", "ERROR", "WARN", "INFO", "DEBUG"};
-
-static void default_logger (wsrep_log_level_t lvl, const char* msg)
-{
- fprintf (stderr, "wsrep loader: [%s] %s\n", log_levels[lvl], msg);
-}
-
-static wsrep_log_cb_t logger = default_logger;
-
-/**************************************************************************
- * Library loader
- **************************************************************************/
-
-static int wsrep_check_iface_version(const char* found, const char* iface_ver)
-{
- const size_t msg_len = 128;
- char msg[128];
-
- if (strcmp(found, iface_ver)) {
- snprintf (msg, msg_len,
- "provider interface version mismatch: need '%s', found '%s'",
- iface_ver, found);
- logger (WSREP_LOG_ERROR, msg);
- return EINVAL;
- }
-
- return 0;
-}
-
-static int verify(const wsrep_t *wh, const char *iface_ver)
-{
- char msg[128];
-
-#define VERIFY(_p) if (!(_p)) { \
- snprintf(msg, sizeof(msg), "wsrep_load(): verify(): %s\n", # _p); \
- logger (WSREP_LOG_ERROR, msg); \
- return EINVAL; \
- }
-
- VERIFY(wh);
- VERIFY(wh->version);
-
- if (wsrep_check_iface_version(wh->version, iface_ver))
- return EINVAL;
-
- VERIFY(wh->init);
- VERIFY(wh->options_set);
- VERIFY(wh->options_get);
- VERIFY(wh->connect);
- VERIFY(wh->disconnect);
- VERIFY(wh->recv);
- VERIFY(wh->pre_commit);
- VERIFY(wh->post_commit);
- VERIFY(wh->post_rollback);
- VERIFY(wh->replay_trx);
- VERIFY(wh->abort_pre_commit);
- VERIFY(wh->append_key);
- VERIFY(wh->append_data);
- VERIFY(wh->free_connection);
- VERIFY(wh->to_execute_start);
- VERIFY(wh->to_execute_end);
- VERIFY(wh->preordered_collect);
- VERIFY(wh->preordered_commit);
- VERIFY(wh->sst_sent);
- VERIFY(wh->sst_received);
- VERIFY(wh->stats_get);
- VERIFY(wh->stats_free);
- VERIFY(wh->stats_reset);
- VERIFY(wh->pause);
- VERIFY(wh->resume);
- VERIFY(wh->desync);
- VERIFY(wh->resync);
- VERIFY(wh->lock);
- VERIFY(wh->unlock);
- VERIFY(wh->is_locked);
- VERIFY(wh->provider_name);
- VERIFY(wh->provider_version);
- VERIFY(wh->provider_vendor);
- VERIFY(wh->free);
- return 0;
-}
-
-typedef int (*wsrep_loader_fun)(wsrep_t*);
-
-static wsrep_loader_fun wsrep_dlf(void *dlh, const char *sym)
-{
- union {
- wsrep_loader_fun dlfun;
- void *obj;
- } alias;
- alias.obj = dlsym(dlh, sym);
- return alias.dlfun;
-}
-
-static int wsrep_check_version_symbol(void *dlh)
-{
- char** dlversion = NULL;
- dlversion = (char**) dlsym(dlh, "wsrep_interface_version");
- if (dlversion == NULL)
- return 0;
- return wsrep_check_iface_version(*dlversion, WSREP_INTERFACE_VERSION);
-}
-
-extern int wsrep_dummy_loader(wsrep_t *w);
-
-int wsrep_load(const char *spec, wsrep_t **hptr, wsrep_log_cb_t log_cb)
-{
- int ret = 0;
- void *dlh = NULL;
- wsrep_loader_fun dlfun;
- char msg[1025];
- msg[sizeof(msg)-1] = 0;
-
- if (NULL != log_cb)
- logger = log_cb;
-
- if (!(spec && hptr))
- return EINVAL;
-
- snprintf (msg, sizeof(msg)-1,
- "wsrep_load(): loading provider library '%s'", spec);
- logger (WSREP_LOG_INFO, msg);
-
- if (!(*hptr = malloc(sizeof(wsrep_t)))) {
- logger (WSREP_LOG_FATAL, "wsrep_load(): out of memory");
- return ENOMEM;
- }
-
- if (!spec || strcmp(spec, WSREP_NONE) == 0) {
- if ((ret = wsrep_dummy_loader(*hptr)) != 0) {
- free (*hptr);
- *hptr = NULL;
- }
- return ret;
- }
-
- if (!(dlh = dlopen(spec, RTLD_NOW | RTLD_LOCAL))) {
- snprintf(msg, sizeof(msg)-1, "wsrep_load(): dlopen(): %s", dlerror());
- logger (WSREP_LOG_ERROR, msg);
- ret = EINVAL;
- goto out;
- }
-
- if (!(dlfun = wsrep_dlf(dlh, "wsrep_loader"))) {
- ret = EINVAL;
- goto out;
- }
-
- if (wsrep_check_version_symbol(dlh) != 0) {
- ret = EINVAL;
- goto out;
- }
-
- if ((ret = (*dlfun)(*hptr)) != 0) {
- snprintf(msg, sizeof(msg)-1, "wsrep_load(): loader failed: %s",
- strerror(ret));
- logger (WSREP_LOG_ERROR, msg);
- goto out;
- }
-
- if ((ret = verify(*hptr, WSREP_INTERFACE_VERSION)) != 0) {
- snprintf (msg, sizeof(msg)-1,
- "wsrep_load(): interface version mismatch: my version %s, "
- "provider version %s", WSREP_INTERFACE_VERSION,
- (*hptr)->version);
- logger (WSREP_LOG_ERROR, msg);
- goto out;
- }
-
- (*hptr)->dlh = dlh;
-
-out:
- if (ret != 0) {
- if (dlh) dlclose(dlh);
- free(*hptr);
- *hptr = NULL;
- } else {
- snprintf (msg, sizeof(msg)-1,
- "wsrep_load(): %s %s by %s loaded successfully.",
- (*hptr)->provider_name, (*hptr)->provider_version,
- (*hptr)->provider_vendor);
- logger (WSREP_LOG_INFO, msg);
- }
-
- return ret;
-}
-
-void wsrep_unload(wsrep_t *hptr)
-{
- if (!hptr) {
- logger (WSREP_LOG_WARN, "wsrep_unload(): null pointer.");
- } else {
- if (hptr->free)
- hptr->free(hptr);
- if (hptr->dlh)
- dlclose(hptr->dlh);
- free(hptr);
- }
-}
-
diff --git a/wsrep/wsrep_uuid.c b/wsrep/wsrep_uuid.c
deleted file mode 100644
index 54ae4ab5ed5..00000000000
--- a/wsrep/wsrep_uuid.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/* Copyright (C) 2009 Codership Oy <info@codersihp.com>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA
- */
-
-/*! @file Helper functions to deal with history UUID string representations */
-
-#include <errno.h>
-#include <ctype.h>
-#include <stdio.h>
-
-#include "wsrep_api.h"
-
-/*!
- * Read UUID from string
- * @return length of UUID string representation or -EINVAL in case of error
- */
-int
-wsrep_uuid_scan (const char* str, size_t str_len, wsrep_uuid_t* uuid)
-{
- unsigned int uuid_len = 0;
- unsigned int uuid_offt = 0;
-
- while (uuid_len + 1 < str_len) {
- /* We are skipping potential '-' after uuid_offt == 4, 6, 8, 10
- * which means
- * (uuid_offt >> 1) == 2, 3, 4, 5,
- * which in turn means
- * (uuid_offt >> 1) - 2 <= 3
- * since it is always >= 0, because uuid_offt is unsigned */
- if (((uuid_offt >> 1) - 2) <= 3 && str[uuid_len] == '-') {
- // skip dashes after 4th, 6th, 8th and 10th positions
- uuid_len += 1;
- continue;
- }
-
- if (isxdigit(str[uuid_len]) && isxdigit(str[uuid_len + 1])) {
- // got hex digit, scan another byte to uuid, increment uuid_offt
- sscanf (str + uuid_len, "%2hhx", uuid->data + uuid_offt);
- uuid_len += 2;
- uuid_offt += 1;
- if (sizeof (uuid->data) == uuid_offt)
- return uuid_len;
- }
- else {
- break;
- }
- }
-
- *uuid = WSREP_UUID_UNDEFINED;
- return -EINVAL;
-}
-
-/*!
- * Write UUID to string
- * @return length of UUID string representation or -EMSGSIZE if string is too
- * short
- */
-int
-wsrep_uuid_print (const wsrep_uuid_t* uuid, char* str, size_t str_len)
-{
- if (str_len > 36) {
- const unsigned char* u = uuid->data;
- return snprintf(str, str_len, "%02x%02x%02x%02x-%02x%02x-%02x%02x-"
- "%02x%02x-%02x%02x%02x%02x%02x%02x",
- u[ 0], u[ 1], u[ 2], u[ 3], u[ 4], u[ 5], u[ 6], u[ 7],
- u[ 8], u[ 9], u[10], u[11], u[12], u[13], u[14], u[15]);
- }
- else {
- return -EMSGSIZE;
- }
-}